diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..0b1dbb9553cc59c9cdd6fc1d2f85ffb2ecfb2f97 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +Checkpoint/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text +models/research/compression/image_encoder/example.png filter=lfs diff=lfs merge=lfs -text +models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord filter=lfs diff=lfs merge=lfs -text +models/research/lfads/synth_data/trained_itb/model-65000.meta filter=lfs diff=lfs merge=lfs -text +models/research/object_detection/g3doc/img/kites_with_segment_overlay.png filter=lfs diff=lfs merge=lfs -text +models/research/object_detection/test_images/image2.jpg filter=lfs diff=lfs merge=lfs -text diff --git a/CTH_CODE_MAP.csv b/CTH_CODE_MAP.csv new file mode 100644 index 0000000000000000000000000000000000000000..87f488998803b091d34f7e53c8336f00c63dae20 --- /dev/null +++ b/CTH_CODE_MAP.csv @@ -0,0 +1,7859 @@ +CTH,code +3011100,13 +3011900,14 +3019900,16 +4063000,51 +4069000,53 +6029020,75 +6029090,77 +6031900,79 +7131000,95 +7132000,96 +7133100,99 +7133110,100 +7134000,105 +7136000,107 +8013100,111 +8021100,115 +8028090,124 +8041020,126 +8041030,127 +8051000,133 +8061000,137 +8081000,140 +8083000,141 +8094000,146 +8105000,148 +8109090,150 +9024030,174 +9061910,191 +9071010,194 +9109100,214 +11041200,233 +12019000,246 +12093000,259 +12099110,260 +12099160,265 +12099190,266 +12099990,268 +12119029,270 +12119049,275 +12119099,283 +13012000,288 +13019022,293 +13021919,304 +13021990,305 +13023900,310 +14011000,311 +15079010,322 +15091000,324 +15099010,326 +15119020,333 +15159099,349 +17019990,370 +17021190,372 +17049010,387 +17049020,388 +17049030,389 +17049090,390 +18040000,393 +18050000,394 +18063100,397 +18063200,398 +18069010,399 +18069090,403 +19012000,405 +19019090,407 +19021900,408 +19023010,411 +19023090,412 +19042000,417 +19053100,421 +19053211,422 +19053290,424 +19059020,427 +19059090,430 +20057000,442 +20079990,452 +20081990,458 +20089999,469 +20098990,483 +21011190,487 +21039010,501 +21039020,502 +21039040,504 +21039090,505 +21050000,509 +21061000,510 +21069019,512 +21069060,516 +21069099,520 +22021090,525 +22029020,527 +22029090,528 +22029920,531 +22029990,533 +22030000,534 +22041000,535 +22042110,536 +22042120,537 +22042190,538 +22042990,541 +22083012,553 +22083019,555 +22083093,557 +22083099,558 +22084011,559 +22085011,562 +22085013,563 +22086000,565 +22087011,567 +22089099,572 +22090090,574 +23091000,589 +23099010,590 +23099020,591 +23099031,592 +23099039,594 +23099090,595 +24022090,599 +25010090,607 +25041090,613 +25070029,623 +25081090,625 +25151210,643 +25161100,647 +25199020,659 +25199040,661 +25199090,662 +25202010,665 +25202090,666 +25210090,668 +25232910,673 +25249021,679 +25309030,693 +25309099,695 +26060020,710 +26131000,717 +26201900,725 +27011200,731 +27011910,732 +27011920,733 +27075000,741 +27101960,759 +27101971,762 +27101979,767 +27101980,768 +27101990,773 +27111100,776 +27111200,777 +27111300,778 +27122000,782 +27129090,786 +27132000,791 +28030010,802 +28030090,804 +28046900,812 +28061000,821 +28092010,827 +28111990,832 +28112200,834 +28112990,838 +28181000,853 +28182010,854 +28182090,856 +28183000,857 +28211010,862 +28256020,879 +28259090,884 +28273990,895 +28276090,902 +28332990,928 +28342990,935 +28352200,937 +28352990,945 +28353900,947 +28363000,951 +28364000,952 +28365000,953 +28369990,959 +28429090,978 +28439012,984 +28439019,985 +28444000,989 +28459090,994 +28469090,998 +28470000,999 +28492090,1002 +28500020,1007 +28539090,1016 +29011000,1017 +29012990,1023 +29021900,1025 +29021990,1026 +29023000,1028 +29029090,1037 +29033919,1046 +29039990,1058 +29041090,1061 +29049090,1066 +29049990,1067 +29051100,1068 +29051220,1070 +29051490,1072 +29051990,1076 +29053200,1083 +29053990,1085 +29054500,1090 +29054900,1091 +29061990,1098 +29062990,1102 +29071110,1103 +29071990,1116 +29072990,1121 +29091900,1125 +29093090,1130 +29094900,1134 +29094990,1135 +29095090,1137 +29096000,1138 +29109000,1142 +29110090,1144 +29121990,1150 +29122990,1153 +29124999,1157 +29130090,1160 +29141100,1161 +29141990,1165 +29142990,1171 +29143990,1174 +29145000,1176 +29146990,1179 +29147090,1181 +29147990,1184 +29151290,1187 +29152100,1189 +29152990,1192 +29153990,1200 +29153999,1201 +29155000,1205 +29156010,1206 +29157090,1212 +29159090,1217 +29159099,1220 +29161100,1221 +29161290,1223 +29161400,1226 +29161990,1232 +29163190,1239 +29163990,1243 +29171200,1246 +29171400,1249 +29171990,1255 +29173500,1260 +29173990,1266 +29181400,1271 +29181990,1277 +29182990,1286 +29183090,1290 +29189900,1291 +29189990,1292 +29199090,1293 +29209000,1296 +29209099,1299 +29211990,1302 +29212990,1306 +29213090,1308 +29214290,1313 +29214390,1315 +29214990,1322 +29215990,1327 +29221190,1329 +29221990,1336 +29222990,1344 +29224100,1346 +29224990,1352 +29225090,1354 +29232090,1357 +29239000,1359 +29241900,1360 +29242990,1366 +29251900,1368 +29252990,1369 +29269000,1373 +29270090,1375 +29280090,1376 +29291020,1378 +29291090,1379 +29304000,1383 +29309099,1391 +29319090,1396 +29321100,1397 +29321990,1402 +29322090,1405 +29329900,1407 +29329990,1408 +29331990,1410 +29331999,1411 +29332990,1415 +29333919,1422 +29333990,1424 +29334900,1425 +29335990,1428 +29336990,1431 +29337900,1433 +29339900,1437 +29339990,1438 +29341000,1439 +29349900,1442 +29349990,1444 +29350090,1445 +29359090,1448 +29362990,1465 +29369000,1466 +29371900,1468 +29372200,1470 +29372900,1472 +29379090,1476 +29389090,1479 +29400000,1491 +29419090,1511 +29420011,1512 +29420090,1519 +30019099,1524 +30021019,1526 +30021099,1529 +30021290,1532 +30021310,1534 +30021500,1537 +30021900,1538 +30022019,1541 +30022029,1542 +30023000,1543 +30024119,1544 +30024910,1546 +30024990,1547 +30029010,1549 +30029020,1550 +30029030,1551 +30029090,1553 +30039014,1557 +30041090,1562 +30043990,1571 +30049014,1579 +30049099,1598 +30051090,1600 +30059090,1606 +30061010,1607 +30062000,1609 +30063000,1610 +30064000,1611 +30069100,1615 +31010099,1616 +31052000,1628 +31059090,1635 +32012000,1637 +32021000,1640 +32029090,1644 +32041199,1658 +32041219,1666 +32041299,1671 +32041650,1689 +32041690,1692 +32041719,1694 +32041739,1696 +32041790,1704 +32041979,1723 +32041990,1725 +32042090,1727 +32049000,1728 +32050000,1729 +32061110,1730 +32061190,1731 +32061900,1732 +32064990,1739 +32071040,1744 +32071090,1745 +32072010,1746 +32073000,1748 +32074000,1749 +32081010,1750 +32081090,1753 +32082010,1754 +32082020,1755 +32082090,1757 +32089019,1759 +32089029,1762 +32089090,1767 +32091010,1768 +32091090,1769 +32099020,1771 +32099090,1772 +32100090,1778 +32121000,1780 +32129010,1781 +32129020,1782 +32129030,1783 +32129090,1784 +32131000,1785 +32139000,1786 +32141000,1787 +32149090,1790 +32151140,1793 +32151190,1794 +32151940,1797 +32151990,1798 +32159010,1799 +32159020,1800 +32159090,1803 +33011200,1804 +33011300,1805 +33011990,1807 +33012990,1833 +33019090,1845 +33021010,1846 +33021090,1847 +33029011,1848 +33029012,1849 +33029019,1850 +33029090,1851 +33030010,1852 +33030050,1855 +33030060,1856 +33030090,1857 +33041000,1858 +33042000,1859 +33043000,1860 +33049110,1861 +33049190,1863 +33049910,1864 +33049920,1865 +33049930,1866 +33049950,1868 +33049990,1869 +33051090,1871 +33059040,1878 +33059090,1880 +33069000,1885 +33071090,1887 +33072000,1888 +33073090,1890 +33074100,1891 +33074900,1892 +33079090,1895 +34011190,1898 +34012000,1903 +34013019,1906 +34013090,1907 +34021190,1909 +34021200,1910 +34021300,1911 +34021900,1912 +34022010,1913 +34022090,1915 +34024200,1918 +34029011,1921 +34029019,1923 +34029059,1931 +34029092,1933 +34029099,1934 +34031100,1935 +34031900,1936 +34039100,1937 +34039900,1938 +34042000,1939 +34049020,1941 +34049039,1942 +34049090,1943 +34051000,1944 +34053000,1946 +34059010,1948 +34059090,1949 +34060010,1950 +34060090,1951 +34070010,1952 +34070090,1953 +35029000,1957 +35040010,1961 +35040099,1963 +35051090,1965 +35061000,1967 +35069110,1968 +35069190,1969 +35069999,1972 +35079010,1973 +35079069,1977 +35079079,1978 +35079091,1979 +35079099,1980 +37011010,1990 +37011090,1991 +37013000,1993 +37019990,1995 +37032010,2006 +37079010,2013 +37079090,2014 +38011000,2015 +38019000,2018 +38021000,2019 +38029019,2021 +38040010,2024 +38051020,2027 +38061010,2030 +38069090,2034 +38089199,2037 +38089290,2038 +38089340,2040 +38089390,2042 +38089990,2045 +38099110,2047 +38099190,2052 +38099200,2053 +38099390,2055 +38101010,2056 +38101090,2057 +38109090,2059 +38112100,2061 +38112900,2062 +38119000,2063 +38121000,2064 +38122090,2066 +38123090,2069 +38123990,2074 +38140010,2076 +38140020,2077 +38151290,2080 +38151900,2081 +38159000,2082 +38160000,2083 +38180090,2087 +38190090,2089 +38200000,2090 +38210000,2091 +38220019,2094 +38220090,2095 +38221990,2098 +38229010,2099 +38229090,2100 +38231900,2106 +38237090,2108 +38241000,2109 +38243000,2110 +38244010,2111 +38244090,2112 +38245090,2113 +38249021,2124 +38249090,2130 +38249100,2131 +38249900,2132 +38249921,2137 +38249925,2140 +38249990,2143 +39011010,2146 +39011020,2147 +39011090,2148 +39012000,2149 +39013000,2150 +39014000,2151 +39014010,2152 +39014090,2153 +39019000,2154 +39019090,2156 +39021000,2157 +39023000,2159 +39029000,2160 +39031990,2163 +39033000,2165 +39039090,2167 +39041010,2168 +39041020,2169 +39041090,2170 +39042110,2172 +39042200,2174 +39043090,2178 +39046100,2181 +39046990,2183 +39049000,2184 +39052100,2189 +39052900,2190 +39053000,2191 +39059990,2194 +39061090,2196 +39069010,2197 +39069090,2204 +39071000,2205 +39072010,2206 +39072090,2207 +39072990,2210 +39073010,2211 +39073090,2212 +39074000,2213 +39075000,2214 +39076090,2215 +39076990,2219 +39079120,2222 +39079190,2224 +39079900,2225 +39079920,2227 +39079990,2228 +39081019,2231 +39081090,2237 +39089000,2238 +39089090,2241 +39092010,2244 +39092090,2245 +39093090,2246 +39093100,2247 +39094090,2253 +39095000,2254 +39100010,2255 +39100020,2256 +39100090,2257 +39111090,2259 +39119090,2261 +39123100,2267 +39123919,2270 +39123929,2273 +39129090,2276 +39139090,2283 +39140090,2286 +39161090,2292 +39162019,2294 +39162099,2296 +39169010,2297 +39169050,2303 +39169090,2306 +39172110,2309 +39172190,2310 +39172200,2311 +39172390,2313 +39172990,2318 +39173100,2319 +39173210,2320 +39173290,2322 +39173300,2323 +39173910,2324 +39173990,2326 +39174000,2327 +39181010,2328 +39181090,2329 +39189090,2332 +39191000,2333 +39199010,2334 +39199020,2335 +39199090,2336 +39201012,2338 +39201019,2339 +39201099,2342 +39202020,2344 +39202090,2345 +39203090,2348 +39204300,2349 +39204900,2350 +39205111,2351 +39205119,2352 +39205199,2353 +39205999,2356 +39206190,2359 +39206220,2361 +39206290,2362 +39206390,2364 +39206919,2367 +39206929,2368 +39206939,2369 +39206999,2371 +39207119,2373 +39207999,2381 +39209119,2384 +39209190,2385 +39209219,2388 +39209299,2389 +39209919,2393 +39209929,2395 +39209939,2397 +39209949,2399 +39209959,2400 +39209999,2404 +39211100,2405 +39211200,2406 +39211310,2407 +39211390,2408 +39211900,2410 +39219010,2411 +39219029,2416 +39219039,2418 +39219099,2424 +39221000,2425 +39222000,2426 +39229000,2427 +39231010,2428 +39231020,2429 +39231030,2430 +39231040,2431 +39231090,2432 +39232100,2433 +39232910,2434 +39232990,2435 +39233010,2436 +39233090,2437 +39234000,2438 +39235010,2439 +39235090,2440 +39239020,2442 +39239090,2443 +39241010,2444 +39241090,2445 +39249010,2446 +39249090,2448 +39252000,2450 +39253000,2451 +39259010,2452 +39259090,2453 +39261019,2455 +39261099,2457 +39262011,2458 +39262029,2461 +39262039,2462 +39262049,2464 +39262099,2466 +39263010,2467 +39263090,2468 +39264019,2470 +39264029,2471 +39264039,2473 +39264049,2475 +39264059,2476 +39264060,2477 +39264099,2479 +39269010,2480 +39269029,2482 +39269039,2484 +39269049,2486 +39269059,2488 +39269069,2490 +39269079,2492 +39269080,2493 +39269091,2494 +39269099,2495 +40012200,2498 +40021990,2505 +40022000,2506 +40024900,2510 +40025900,2512 +40027000,2514 +40029990,2518 +40040000,2520 +40051000,2521 +40059190,2525 +40059990,2527 +40069090,2530 +40070010,2531 +40070090,2533 +40081110,2534 +40081190,2535 +40081990,2537 +40082190,2540 +40082910,2541 +40082930,2543 +40082990,2545 +40091100,2546 +40091200,2547 +40092100,2548 +40092200,2549 +40093100,2550 +40093200,2551 +40094100,2552 +40094200,2553 +40101190,2555 +40101290,2557 +40101990,2559 +40103110,2560 +40103190,2561 +40103290,2563 +40103390,2565 +40103490,2566 +40103590,2568 +40103690,2569 +40103911,2570 +40103919,2571 +40103999,2574 +40111010,2575 +40111090,2576 +40112010,2577 +40113000,2579 +40114010,2580 +40114090,2582 +40115090,2584 +40118000,2588 +40119000,2589 +40119900,2593 +40121300,2594 +40129020,2598 +40129049,2600 +40132000,2605 +40139090,2610 +40149090,2615 +40151100,2616 +40151900,2618 +40159030,2621 +40159099,2623 +40161000,2624 +40169100,2625 +40169200,2626 +40169310,2627 +40169320,2628 +40169330,2629 +40169340,2630 +40169350,2631 +40169360,2632 +40169390,2633 +40169590,2636 +40169910,2637 +40169920,2638 +40169940,2640 +40169950,2641 +40169960,2642 +40169980,2644 +40169990,2645 +40170090,2650 +41019090,2656 +41041100,2666 +41041900,2667 +41044100,2668 +41044900,2669 +41051000,2670 +41053000,2671 +41071100,2677 +41071200,2678 +41071900,2679 +41079100,2680 +41079200,2681 +41079900,2682 +41120000,2683 +41131000,2684 +41139000,2686 +41142010,2688 +41151000,2690 +42010000,2692 +42021190,2697 +42021220,2699 +42021240,2701 +42021250,2702 +42021290,2706 +42021990,2711 +42022110,2712 +42022190,2714 +42022210,2715 +42022220,2716 +42022290,2719 +42022910,2720 +42022990,2721 +42023120,2723 +42023190,2724 +42023210,2725 +42023290,2726 +42023990,2728 +42029100,2729 +42029200,2730 +42029900,2731 +42031010,2732 +42031090,2733 +42032110,2734 +42032920,2737 +42033000,2739 +42034090,2741 +42050019,2743 +42050090,2745 +44032110,2759 +44032190,2760 +44032290,2762 +44034200,2764 +44034910,2765 +44039929,2774 +44039990,2775 +44071100,2780 +44071200,2781 +44072300,2785 +44072910,2789 +44072990,2790 +44079920,2794 +44079990,2795 +44081090,2798 +44083910,2800 +44083990,2802 +44089010,2803 +44089090,2804 +44092990,2811 +44101110,2812 +44101190,2814 +44111200,2817 +44111300,2818 +44111400,2819 +44119229,2823 +44119429,2825 +44123190,2828 +44123390,2832 +44129990,2835 +44130000,2836 +44140000,2837 +44152000,2840 +44182090,2847 +44187500,2854 +44187900,2855 +44189900,2857 +44199090,2866 +44201000,2867 +44209090,2871 +44219090,2875 +44219990,2883 +46019900,2894 +46021100,2895 +46021990,2899 +46029000,2900 +47032100,2905 +47032900,2906 +47071000,2914 +47072000,2915 +47079000,2917 +48010090,2919 +48021010,2920 +48022090,2923 +48025450,2927 +48025590,2930 +48025690,2933 +48025790,2937 +48025890,2940 +48026190,2941 +48030090,2948 +48041100,2949 +48041900,2950 +48042900,2952 +48043900,2954 +48045900,2960 +48051900,2962 +48052500,2964 +48054000,2966 +48059100,2968 +48059200,2969 +48059300,2970 +48064010,2974 +48064090,2975 +48070090,2977 +48089000,2980 +48099000,2982 +48101320,2984 +48101390,2986 +48101490,2987 +48101920,2989 +48101990,2991 +48102200,2992 +48102900,2993 +48103200,2995 +48103910,2996 +48103990,2998 +48109200,2999 +48109900,3000 +48114100,3002 +48114900,3003 +48115190,3005 +48115910,3006 +48115990,3007 +48116000,3008 +48119015,3011 +48119099,3017 +48120000,3018 +48139090,3022 +48142000,3023 +48149000,3024 +48171000,3029 +48173090,3032 +48181000,3033 +48182000,3034 +48183000,3035 +48189000,3037 +48191010,3038 +48191090,3039 +48192010,3040 +48192020,3041 +48192090,3042 +48193000,3043 +48194000,3044 +48195090,3046 +48196000,3047 +48201090,3050 +48202000,3051 +48203000,3052 +48205000,3054 +48209090,3056 +48211010,3057 +48211020,3058 +48211090,3059 +48219010,3060 +48219090,3061 +48229090,3064 +48232000,3065 +48234000,3066 +48236900,3068 +48237090,3072 +48239012,3074 +48239013,3075 +48239015,3077 +48239030,3082 +48239090,3083 +49011010,3084 +49011020,3085 +49019900,3087 +49021010,3088 +49029020,3091 +49030010,3092 +49030020,3093 +49040000,3094 +49060000,3099 +49070020,3100 +49070030,3101 +49081000,3103 +49089000,3104 +49090090,3106 +49100010,3107 +49100090,3108 +49111010,3109 +49111020,3110 +49111090,3112 +49119100,3113 +49119910,3114 +49119920,3115 +49119990,3116 +50020010,3117 +50072090,3126 +50079090,3128 +51011900,3130 +51012900,3132 +51111130,3149 +51111190,3150 +51121110,3158 +51121130,3159 +51121190,3160 +51121990,3163 +51129090,3169 +52010020,3170 +52083190,3212 +52083290,3214 +52084230,3220 +52084290,3221 +52084990,3224 +52085290,3228 +52093190,3240 +52094200,3245 +52103190,3257 +53061090,3295 +53071010,3297 +53072000,3298 +53091910,3304 +53091990,3306 +53092990,3310 +53101012,3312 +53101013,3313 +54011000,3319 +54012000,3320 +54021990,3323 +54022090,3324 +54023100,3325 +54023300,3327 +54024400,3331 +54024500,3332 +54024700,3334 +54026990,3345 +54041990,3359 +54049010,3360 +54049090,3361 +54071019,3366 +54072090,3374 +54074290,3380 +54075290,3387 +54075300,3388 +54075490,3390 +54076190,3393 +54076900,3394 +54077200,3396 +54077300,3397 +54078290,3402 +54079200,3405 +54079300,3406 +54082290,3415 +54083290,3420 +55031900,3431 +55032000,3433 +55033000,3434 +55081000,3452 +55092100,3456 +55095100,3464 +55101110,3471 +55121910,3478 +55121990,3480 +55122990,3482 +55151130,3499 +55151190,3501 +55162200,3518 +56012190,3531 +56012200,3532 +56012900,3533 +56021000,3535 +56022990,3538 +56029090,3540 +56031100,3541 +56031200,3543 +56031300,3544 +56031400,3545 +56039100,3546 +56039200,3547 +56039300,3548 +56039400,3550 +56039490,3552 +56041000,3553 +56049000,3554 +56050090,3556 +56060090,3557 +56074900,3560 +56075090,3565 +56079090,3568 +56090090,3575 +57019090,3579 +57024290,3593 +57032020,3607 +57032090,3608 +57032990,3612 +57033020,3614 +57033090,3615 +57033990,3619 +57039090,3621 +57050090,3635 +58013690,3646 +58013720,3650 +58041090,3655 +58042100,3656 +58042910,3657 +58042990,3658 +58061000,3661 +58062000,3662 +58063190,3663 +58063200,3664 +58063990,3668 +58071010,3670 +58071020,3671 +58071090,3672 +58079090,3674 +58081090,3676 +58089010,3677 +58089090,3681 +58109210,3685 +58109290,3686 +58109900,3687 +59019090,3696 +59021010,3697 +59021090,3698 +59022090,3700 +59031010,3702 +59031090,3703 +59032010,3704 +59032090,3705 +59039010,3706 +59039090,3708 +59050090,3712 +59061000,3713 +59069990,3717 +59070012,3719 +59070099,3721 +59090090,3726 +59100090,3731 +59111000,3732 +59112000,3733 +59113190,3738 +59113210,3739 +59113290,3743 +59119010,3745 +59119020,3746 +59119090,3747 +60012200,3752 +60019200,3755 +60024000,3757 +60029000,3758 +60041000,3762 +60049000,3763 +60053100,3765 +60053200,3766 +60053600,3770 +60053700,3771 +60053900,3774 +60059000,3777 +60062200,3780 +60063200,3784 +60063300,3785 +60063400,3786 +60064200,3788 +60069000,3791 +61012000,3792 +61013010,3793 +61013020,3794 +61022000,3797 +61023010,3798 +61023020,3799 +61031030,3802 +61032200,3804 +61033200,3808 +61033300,3809 +61033990,3811 +61034200,3813 +61034300,3814 +61034990,3816 +61042300,3821 +61043200,3825 +61043300,3826 +61043990,3828 +61044200,3830 +61044300,3831 +61044400,3832 +61044990,3834 +61045200,3836 +61045300,3837 +61046200,3842 +61046300,3843 +61046920,3845 +61046990,3846 +61051010,3847 +61051020,3848 +61051090,3849 +61052010,3850 +61059090,3853 +61061000,3854 +61062010,3855 +61062020,3856 +61069090,3859 +61071100,3860 +61071210,3861 +61072100,3864 +61082100,3875 +61082210,3876 +61082220,3877 +61082990,3879 +61083100,3880 +61083210,3881 +61083220,3882 +61083990,3883 +61089210,3885 +61091000,3888 +61099010,3889 +61099020,3890 +61099090,3893 +61101110,3894 +61101120,3895 +61101190,3896 +61101200,3897 +61102000,3899 +61103010,3900 +61103020,3901 +61109000,3902 +61112000,3903 +61113000,3904 +61119090,3906 +61121100,3907 +61121200,3908 +61123100,3914 +61124100,3917 +61130000,3920 +61142000,3921 +61143010,3922 +61143020,3923 +61149090,3924 +61151000,3925 +61152100,3926 +61152200,3927 +61152990,3931 +61159500,3934 +61159600,3935 +61159990,3937 +61161000,3938 +61169300,3941 +61169990,3943 +61171040,3947 +61171090,3948 +61178040,3952 +61178090,3953 +61179000,3954 +62011390,3959 +62014090,3966 +62019200,3969 +62019300,3970 +62019990,3971 +62021110,3972 +62021200,3974 +62021300,3975 +62021920,3976 +62024090,3983 +62029010,3984 +62029210,3988 +62029290,3989 +62029310,3990 +62029390,3991 +62031100,3993 +62033100,4001 +62033190,4002 +62033200,4003 +62033300,4004 +62033990,4007 +62034100,4008 +62034200,4009 +62034290,4011 +62034300,4012 +62034990,4014 +62042300,4022 +62043200,4028 +62043300,4030 +62043390,4031 +62043990,4033 +62044190,4036 +62044210,4037 +62044220,4038 +62044290,4039 +62044310,4040 +62044390,4041 +62044400,4042 +62044490,4043 +62044919,4045 +62044990,4046 +62044999,4047 +62045200,4049 +62045300,4051 +62045390,4052 +62045990,4055 +62046110,4057 +62046200,4059 +62046290,4061 +62046300,4062 +62046990,4065 +62052000,4066 +62052090,4068 +62053000,4069 +62059090,4073 +62061010,4074 +62061090,4075 +62062000,4076 +62063000,4077 +62063090,4079 +62064000,4080 +62069000,4081 +62071100,4082 +62072110,4085 +62072190,4086 +62081990,4093 +62082200,4096 +62082990,4098 +62089290,4102 +62092000,4105 +62092090,4107 +62093000,4108 +62099090,4110 +62101000,4111 +62104090,4120 +62105000,4121 +62111100,4122 +62111200,4123 +62113200,4125 +62113300,4126 +62114290,4130 +62114300,4132 +62114390,4133 +62114990,4136 +62114999,4137 +62121000,4138 +62122000,4139 +62129010,4141 +62129090,4142 +62139090,4145 +62141090,4148 +62142020,4150 +62142090,4153 +62143000,4154 +62144000,4156 +62149090,4162 +62151000,4164 +62151090,4166 +62152000,4167 +62159090,4169 +62160090,4171 +62171010,4172 +62171020,4173 +62171090,4178 +62179090,4180 +63013000,4183 +63014000,4184 +63019090,4185 +63026090,4200 +63029900,4203 +63039200,4207 +63039990,4208 +63041910,4210 +63041930,4211 +63041990,4213 +63049239,4219 +63049249,4221 +63049250,4222 +63049260,4223 +63049270,4224 +63049289,4226 +63049299,4228 +63049999,4232 +63051040,4235 +63052000,4238 +63053900,4241 +63059000,4242 +63062990,4247 +63071090,4254 +63072090,4255 +63079019,4258 +63079090,4260 +63079099,4262 +63101020,4265 +63109040,4268 +64019990,4275 +64021290,4277 +64021990,4279 +64022010,4280 +64022090,4281 +64029190,4283 +64029910,4284 +64029990,4285 +64031990,4289 +64032011,4290 +64032090,4298 +64034000,4299 +64035910,4305 +64035920,4306 +64035990,4308 +64039110,4309 +64039190,4311 +64039910,4312 +64039920,4313 +64039990,4314 +64041110,4315 +64041190,4317 +64041910,4318 +64041990,4320 +64042000,4321 +64052000,4323 +64059000,4324 +64061090,4327 +64062000,4328 +64069090,4334 +65010090,4336 +65020090,4339 +65040000,4340 +65050010,4341 +65050090,4342 +65061010,4343 +65061090,4344 +65069100,4345 +65069900,4346 +65070000,4347 +66011000,4348 +66019900,4350 +66020000,4351 +66032000,4352 +66039090,4354 +67021010,4356 +67021090,4357 +67029090,4359 +67042010,4363 +67042090,4364 +67049000,4365 +68022190,4370 +68022390,4372 +68029100,4374 +68029900,4377 +68041000,4379 +68042110,4380 +68042190,4381 +68042210,4382 +68042220,4383 +68042290,4384 +68042310,4385 +68042390,4386 +68043010,4387 +68043020,4388 +68051010,4389 +68051090,4390 +68052010,4391 +68052020,4392 +68052030,4393 +68052040,4394 +68052090,4395 +68053000,4396 +68061000,4397 +68062000,4398 +68069000,4399 +68071090,4401 +68079090,4403 +68080000,4404 +68091100,4405 +68091900,4406 +68099000,4407 +68101990,4411 +68109100,4412 +68109990,4414 +68118200,4417 +68129922,4424 +68129990,4425 +68138100,4428 +68138900,4429 +68141090,4433 +68149090,4436 +68151010,4437 +68151090,4439 +68151900,4443 +68159100,4445 +68159990,4448 +69021010,4452 +69021040,4455 +69021090,4456 +69022090,4462 +69029090,4465 +69031090,4468 +69032090,4470 +69039090,4475 +69051000,4478 +69071010,4481 +69071090,4482 +69072100,4483 +69072200,4484 +69072300,4485 +69074010,4487 +69079010,4488 +69079090,4489 +69081090,4492 +69089090,4495 +69091200,4497 +69091990,4499 +69099000,4500 +69101000,4501 +69109000,4502 +69111011,4503 +69111019,4504 +69111021,4505 +69111029,4506 +69119090,4508 +69120010,4509 +69120020,4510 +69120030,4511 +69120040,4512 +69120090,4513 +69131000,4514 +69139000,4515 +69141000,4516 +69149000,4517 +70022090,4522 +70023100,4523 +70023200,4524 +70023900,4525 +70042019,4530 +70049099,4534 +70051010,4535 +70051090,4536 +70052190,4538 +70052990,4540 +70053090,4541 +70071100,4542 +70071900,4543 +70072190,4545 +70072900,4546 +70091010,4550 +70091090,4551 +70099100,4552 +70099200,4553 +70101000,4554 +70102000,4555 +70109000,4556 +70111090,4559 +70119090,4560 +70131000,4561 +70132800,4563 +70133700,4565 +70134200,4567 +70134900,4568 +70139100,4569 +70139900,4570 +70140020,4572 +70151010,4573 +70159010,4576 +70159090,4577 +70161000,4578 +70169000,4579 +70171000,4580 +70172000,4581 +70179010,4582 +70179090,4584 +70181020,4586 +70181090,4587 +70182000,4588 +70189090,4590 +70191100,4591 +70191200,4592 +70191900,4595 +70193100,4596 +70193200,4597 +70193900,4598 +70194000,4599 +70195900,4602 +70199000,4611 +70199010,4612 +70199090,4613 +70200019,4615 +70200029,4617 +70200090,4618 +71012200,4622 +71023100,4626 +71023910,4627 +71031029,4634 +71031090,4645 +71039100,4646 +71039110,4647 +71039120,4648 +71039130,4649 +71039990,4666 +71042000,4667 +71042010,4668 +71042100,4670 +71049010,4673 +71049090,4674 +71049100,4675 +71049900,4676 +71051000,4677 +71069100,4680 +71069290,4682 +71081200,4684 +71101900,4689 +71131120,4697 +71131130,4698 +71131190,4699 +71131910,4700 +71131930,4702 +71131940,4703 +71131990,4706 +71141910,4709 +71142020,4711 +71159090,4713 +71162000,4715 +71171100,4716 +71171910,4717 +71171990,4719 +71179010,4720 +71179090,4721 +72022100,4728 +72024900,4732 +72026000,4733 +72041000,4745 +72042190,4746 +72042990,4748 +72044100,4750 +72044900,4751 +72051011,4752 +72051021,4754 +72052100,4757 +72052910,4758 +72052990,4759 +72072090,4769 +72082790,4777 +72085110,4791 +72091690,4804 +72091790,4807 +72101290,4819 +72103090,4821 +72104900,4824 +72105000,4827 +72106100,4828 +72106900,4829 +72107000,4830 +72109090,4832 +72112950,4842 +72112990,4843 +72124000,4850 +72125090,4851 +72126000,4852 +72149990,4865 +72155090,4868 +72159090,4871 +72163100,4874 +72163300,4876 +72166900,4880 +72171010,4885 +72172010,4888 +72173010,4891 +72179099,4897 +72191190,4903 +72191200,4904 +72191300,4905 +72192190,4912 +72193390,4928 +72193490,4931 +72193590,4934 +72199013,4937 +72199090,4938 +72202090,4946 +72209022,4947 +72209090,4949 +72210090,4952 +72221111,4953 +72221119,4954 +72221999,4958 +72222011,4959 +72222019,4961 +72222099,4964 +72223019,4967 +72223099,4969 +72224020,4971 +72230010,4972 +72230091,4973 +72230092,4974 +72230099,4975 +72249099,4981 +72251100,4982 +72253090,4986 +72254012,4988 +72254019,4990 +72255010,4993 +72259200,4996 +72259900,4999 +72269190,5010 +72269990,5018 +72279040,5024 +72279090,5025 +72281010,5026 +72281090,5027 +72283011,5029 +72283019,5030 +72283024,5032 +72283029,5033 +72284000,5034 +72285090,5036 +72286011,5037 +72286012,5038 +72286094,5040 +72286099,5041 +72288090,5046 +72299032,5051 +72299060,5055 +72299090,5056 +73049000,5085 +73064000,5095 +73065000,5096 +73069090,5100 +73071190,5103 +73071900,5104 +73072100,5105 +73072200,5106 +73072300,5107 +73072900,5108 +73079190,5110 +73079290,5112 +73079390,5113 +73079910,5114 +73079990,5115 +73083000,5118 +73084000,5119 +73089090,5122 +73102990,5130 +73121090,5135 +73129000,5136 +73151100,5149 +73151290,5151 +73158900,5155 +73159000,5156 +73170099,5163 +73181110,5164 +73181190,5165 +73181400,5168 +73181500,5169 +73181600,5170 +73181900,5171 +73182100,5172 +73182200,5173 +73182300,5174 +73182400,5175 +73182910,5176 +73182990,5177 +73194090,5179 +73199000,5180 +73201011,5181 +73201019,5183 +73202000,5185 +73209090,5188 +73219000,5195 +73239390,5203 +73239990,5208 +73241000,5209 +73242900,5211 +73249000,5212 +73259999,5218 +73261910,5220 +73261990,5221 +73262090,5223 +73269060,5229 +73269080,5231 +73269099,5233 +74032900,5242 +74040012,5243 +74040022,5245 +74061000,5249 +74071020,5252 +74071030,5253 +74071090,5255 +74072120,5257 +74072190,5258 +74072990,5262 +74081190,5263 +74081990,5266 +74082190,5268 +74082290,5270 +74082990,5272 +74091100,5273 +74091900,5274 +74092100,5275 +74092900,5276 +74093100,5277 +74099000,5280 +74101100,5281 +74102100,5283 +74102200,5284 +74111000,5285 +74112100,5286 +74112900,5288 +74121000,5289 +74122012,5291 +74122019,5292 +74122090,5293 +74130000,5294 +74151000,5295 +74152100,5296 +74152900,5297 +74153310,5298 +74153390,5299 +74153910,5300 +74153990,5301 +74181021,5303 +74181039,5306 +74182010,5308 +74182020,5309 +74198090,5314 +74199930,5318 +74199990,5320 +75021000,5321 +75040000,5326 +75051220,5329 +75052200,5331 +75061000,5332 +75062000,5333 +75071200,5335 +75072000,5336 +75089090,5341 +76012010,5344 +76020010,5347 +76041031,5353 +76041039,5354 +76042100,5355 +76042910,5356 +76042930,5358 +76042990,5359 +76052990,5363 +76061190,5365 +76061200,5366 +76069190,5368 +76069210,5369 +76069290,5370 +76071190,5372 +76071991,5374 +76071994,5376 +76071999,5378 +76072090,5380 +76081000,5381 +76082000,5382 +76090000,5383 +76101000,5384 +76109010,5385 +76109020,5386 +76109030,5387 +76109090,5388 +76121090,5392 +76129090,5396 +76130099,5399 +76151021,5403 +76151029,5404 +76151030,5405 +76151090,5406 +76152090,5409 +76161000,5410 +76169100,5411 +76169910,5412 +76169990,5415 +78011000,5416 +78019990,5419 +78020010,5420 +78060090,5426 +79011100,5427 +79020010,5431 +79070010,5440 +79070090,5441 +80070090,5447 +81019400,5449 +81019600,5450 +81019910,5451 +81019990,5452 +81029590,5455 +81029900,5457 +81052030,5469 +81059000,5470 +81089010,5478 +81089090,5479 +81110010,5484 +81129900,5489 +81130090,5493 +82019000,5499 +82021090,5502 +82022000,5503 +82023100,5504 +82023900,5505 +82024000,5506 +82029110,5507 +82029910,5509 +82029990,5510 +82031000,5511 +82032000,5512 +82033000,5513 +82034010,5514 +82034090,5515 +82041110,5516 +82041120,5517 +82041210,5518 +82041220,5519 +82042000,5520 +82051000,5521 +82052000,5522 +82053000,5523 +82054000,5524 +82055110,5525 +82055190,5526 +82055910,5527 +82055920,5528 +82055930,5529 +82055990,5531 +82057000,5533 +82059090,5537 +82060010,5538 +82060090,5539 +82071300,5540 +82071900,5541 +82072000,5542 +82073000,5543 +82074010,5544 +82074090,5545 +82075000,5546 +82076010,5547 +82076090,5548 +82077010,5549 +82077090,5550 +82078000,5551 +82079010,5552 +82079020,5553 +82079030,5554 +82079090,5555 +82081000,5556 +82082000,5557 +82083000,5558 +82084000,5559 +82089010,5560 +82089090,5564 +82090010,5565 +82090090,5566 +82100000,5567 +82119100,5569 +82119200,5570 +82119390,5572 +82119400,5573 +82121090,5576 +82122019,5578 +82130000,5581 +82141010,5582 +82141090,5583 +82142010,5584 +82142090,5585 +82149090,5587 +82152000,5589 +82159900,5591 +83011000,5592 +83012000,5593 +83013000,5594 +83014010,5595 +83014090,5596 +83016000,5598 +83017000,5599 +83021010,5600 +83021090,5602 +83022000,5603 +83023010,5604 +83023090,5605 +83024110,5606 +83024190,5608 +83024200,5609 +83024900,5610 +83025000,5611 +83026000,5612 +83030000,5613 +83040000,5614 +83052000,5616 +83059020,5618 +83059090,5619 +83061000,5620 +83062120,5622 +83062190,5623 +83062920,5625 +83062990,5626 +83063000,5627 +83071000,5628 +83079000,5629 +83081010,5630 +83081021,5631 +83081029,5632 +83082000,5633 +83089011,5634 +83089019,5635 +83089039,5638 +83089040,5639 +83089091,5640 +83089099,5641 +83099020,5644 +83099030,5645 +83099090,5646 +83100090,5648 +83111000,5649 +83112000,5650 +83113090,5652 +83119000,5653 +84021990,5655 +84029090,5658 +84051090,5665 +84059000,5666 +84069000,5668 +84072100,5670 +84073410,5675 +84082020,5681 +84089090,5683 +84091000,5684 +84099111,5685 +84099112,5686 +84099113,5687 +84099114,5688 +84099120,5689 +84099191,5690 +84099192,5691 +84099194,5693 +84099199,5694 +84099911,5695 +84099912,5696 +84099913,5697 +84099914,5698 +84099920,5699 +84099930,5700 +84099941,5701 +84099942,5702 +84099949,5703 +84099990,5704 +84111200,5707 +84119100,5711 +84119900,5712 +84122100,5714 +84122910,5715 +84122990,5716 +84123100,5717 +84123900,5718 +84128090,5722 +84129030,5725 +84129090,5726 +84131199,5729 +84131990,5731 +84132000,5732 +84133010,5733 +84133020,5734 +84133030,5735 +84133090,5736 +84135010,5738 +84135090,5741 +84136010,5742 +84136020,5743 +84136090,5744 +84137010,5745 +84137096,5750 +84137099,5751 +84138190,5755 +84139110,5757 +84139120,5758 +84139130,5759 +84139190,5761 +84139200,5762 +84141000,5763 +84142010,5764 +84142020,5765 +84142090,5766 +84143000,5767 +84144030,5770 +84144090,5771 +84145110,5772 +84145120,5773 +84145130,5774 +84145190,5776 +84145910,5777 +84145920,5778 +84145930,5779 +84145990,5780 +84146000,5781 +84148011,5782 +84148019,5783 +84148030,5785 +84148090,5786 +84149011,5787 +84149012,5788 +84149019,5789 +84149020,5790 +84149030,5791 +84149040,5792 +84149090,5793 +84151010,5794 +84151090,5795 +84152090,5797 +84158190,5799 +84158210,5800 +84158290,5801 +84158310,5802 +84158390,5803 +84159000,5804 +84161000,5805 +84162000,5806 +84169000,5808 +84172000,5810 +84178090,5812 +84179000,5813 +84181090,5815 +84182100,5816 +84182900,5817 +84183010,5818 +84184090,5821 +84185000,5822 +84186100,5823 +84186910,5824 +84186990,5828 +84189900,5830 +84191110,5831 +84191910,5834 +84191920,5835 +84192090,5837 +84193900,5840 +84194090,5842 +84195020,5844 +84195090,5846 +84198110,5848 +84198120,5849 +84198190,5850 +84198990,5858 +84199010,5859 +84199090,5860 +84209100,5862 +84209900,5863 +84211920,5867 +84211999,5873 +84212110,5874 +84212120,5875 +84212190,5876 +84212200,5877 +84212300,5878 +84212900,5879 +84213100,5880 +84213910,5882 +84213920,5883 +84213990,5884 +84219100,5885 +84219900,5886 +84221100,5887 +84221900,5888 +84222000,5889 +84223000,5890 +84224000,5891 +84229010,5892 +84229020,5893 +84229090,5894 +84231000,5895 +84233000,5897 +84238190,5899 +84238290,5900 +84238900,5901 +84239010,5902 +84239020,5903 +84241000,5904 +84242000,5905 +84243000,5906 +84244100,5907 +84244900,5908 +84248100,5909 +84248200,5910 +84248910,5911 +84248990,5913 +84249000,5914 +84251110,5915 +84251910,5917 +84251920,5918 +84253900,5920 +84254200,5922 +84254900,5923 +84264100,5929 +84271000,5933 +84279000,5935 +84281011,5936 +84281019,5937 +84282011,5939 +84282019,5940 +84283300,5944 +84283900,5945 +84284000,5946 +84289090,5951 +84292000,5955 +84295100,5959 +84295200,5960 +84295900,5961 +84304120,5968 +84304190,5970 +84304900,5971 +84306900,5975 +84311010,5976 +84311090,5977 +84312010,5978 +84312090,5979 +84313100,5980 +84313910,5981 +84313990,5982 +84314100,5983 +84314200,5984 +84314310,5985 +84314390,5986 +84314910,5987 +84314920,5988 +84314930,5989 +84314990,5991 +84322990,5994 +84328090,6000 +84329010,6001 +84329090,6002 +84335900,6011 +84339000,6014 +84349010,6017 +84349020,6018 +84361000,6021 +84362900,6023 +84368090,6024 +84369100,6025 +84369900,6026 +84371000,6027 +84378010,6028 +84378020,6029 +84378090,6030 +84379010,6031 +84379020,6032 +84379090,6033 +84381010,6034 +84382000,6036 +84383090,6038 +84385000,6040 +84386000,6041 +84388090,6046 +84389090,6048 +84393010,6051 +84393090,6052 +84399100,6053 +84399900,6054 +84401090,6056 +84409000,6057 +84411010,6058 +84411090,6059 +84413000,6061 +84418000,6063 +84419000,6064 +84423090,6065 +84424000,6066 +84425010,6067 +84425020,6068 +84425039,6070 +84425090,6071 +84431300,6074 +84431990,6084 +84433100,6085 +84433210,6086 +84433240,6088 +84433250,6089 +84433290,6090 +84433910,6091 +84433990,6095 +84439100,6096 +84439940,6099 +84439951,6100 +84439952,6101 +84439959,6102 +84439960,6103 +84439990,6104 +84452019,6117 +84454090,6122 +84459000,6123 +84463019,6131 +84463090,6132 +84471119,6133 +84471190,6135 +84471290,6138 +84472090,6141 +84479020,6143 +84479030,6144 +84481190,6146 +84481900,6147 +84482000,6148 +84483100,6149 +84483210,6150 +84483290,6153 +84483310,6154 +84483390,6157 +84483990,6160 +84484290,6163 +84484910,6164 +84484950,6167 +84484990,6168 +84485110,6169 +84485190,6172 +84485900,6173 +84490090,6175 +84501100,6176 +84501900,6178 +84502000,6179 +84509010,6180 +84509090,6181 +84512900,6185 +84513090,6187 +84515000,6192 +84518090,6197 +84519000,6198 +84522110,6204 +84522190,6206 +84522900,6207 +84523090,6209 +84529011,6210 +84529019,6211 +84529091,6212 +84529099,6213 +84531000,6214 +84532000,6215 +84538000,6216 +84539010,6217 +84539090,6218 +84542020,6221 +84543010,6222 +84543090,6224 +84549000,6225 +84552200,6229 +84553000,6230 +84559000,6231 +84561000,6232 +84561100,6233 +84563000,6236 +84564000,6237 +84569090,6240 +84571010,6241 +84571020,6242 +84581100,6246 +84581990,6248 +84589990,6254 +84592990,6262 +84593990,6265 +84595990,6277 +84596190,6278 +84596990,6280 +84597020,6282 +84601900,6285 +84602990,6294 +84603910,6296 +84603990,6297 +84609010,6301 +84609090,6302 +84612019,6304 +84614029,6314 +84615019,6318 +84615029,6320 +84619000,6321 +84621019,6323 +84622900,6331 +84622910,6332 +84622990,6334 +84623990,6339 +84624990,6344 +84629000,6348 +84629190,6350 +84629990,6355 +84639090,6366 +84641090,6368 +84642000,6369 +84649000,6370 +84659100,6373 +84659200,6374 +84659300,6375 +84659400,6376 +84659500,6377 +84659990,6380 +84661010,6381 +84661020,6382 +84662000,6383 +84663010,6384 +84663020,6385 +84663090,6386 +84669100,6387 +84669200,6388 +84669310,6389 +84669390,6390 +84669400,6391 +84671110,6392 +84671120,6393 +84671190,6394 +84671900,6395 +84672100,6396 +84672200,6397 +84672900,6398 +84678100,6399 +84678990,6402 +84679100,6403 +84679200,6404 +84679900,6405 +84681000,6406 +84682010,6407 +84682090,6408 +84688000,6409 +84689000,6410 +84701000,6411 +84705010,6415 +84713010,6419 +84713090,6420 +84714110,6421 +84714190,6423 +84714900,6424 +84715000,6425 +84716040,6429 +84716050,6430 +84716060,6431 +84716090,6432 +84717020,6434 +84717030,6435 +84717040,6436 +84717050,6437 +84717070,6439 +84717090,6440 +84718000,6441 +84719000,6442 +84729010,6444 +84729030,6446 +84729090,6448 +84729099,6449 +84732100,6451 +84732900,6452 +84733010,6453 +84733020,6454 +84733030,6455 +84733091,6457 +84733092,6458 +84733099,6459 +84734010,6460 +84734090,6461 +84735000,6462 +84741090,6464 +84742090,6467 +84743900,6470 +84748090,6474 +84749000,6475 +84752900,6478 +84759000,6479 +84769090,6483 +84771000,6484 +84772000,6485 +84773000,6486 +84775900,6489 +84778010,6490 +84778090,6491 +84779000,6492 +84789000,6494 +84791000,6495 +84792090,6497 +84794000,6499 +84795000,6500 +84796000,6501 +84797900,6503 +84798100,6504 +84798200,6505 +84798920,6507 +84798940,6509 +84798970,6512 +84798999,6514 +84799010,6515 +84799020,6516 +84799030,6517 +84799040,6518 +84799090,6519 +84801000,6520 +84802000,6521 +84803000,6522 +84804100,6523 +84804900,6524 +84805000,6525 +84806000,6526 +84807100,6527 +84807900,6528 +84811000,6529 +84812000,6530 +84813000,6531 +84814000,6532 +84818010,6533 +84818020,6534 +84818030,6535 +84818049,6537 +84818050,6538 +84818090,6539 +84819010,6540 +84819090,6541 +84821011,6542 +84821012,6543 +84821013,6544 +84821020,6545 +84821030,6546 +84821040,6547 +84821051,6548 +84821052,6549 +84821053,6550 +84821090,6551 +84822011,6552 +84822012,6553 +84822013,6554 +84822090,6555 +84823000,6556 +84824000,6557 +84825000,6558 +84825011,6559 +84825012,6560 +84825013,6561 +84825023,6564 +84828000,6565 +84829119,6570 +84829120,6571 +84829130,6572 +84829900,6573 +84831010,6574 +84831091,6575 +84831092,6576 +84831099,6577 +84832000,6578 +84833000,6579 +84834000,6580 +84835010,6581 +84835090,6582 +84836010,6583 +84836020,6584 +84836090,6585 +84839000,6586 +84841010,6587 +84841090,6588 +84842000,6589 +84849000,6590 +84869000,6596 +84871000,6597 +84879000,6598 +85011011,6599 +85011012,6600 +85011019,6602 +85011020,6603 +85012000,6604 +85013111,6605 +85013112,6606 +85013119,6608 +85013210,6610 +85014010,6616 +85014090,6617 +85015110,6618 +85015190,6620 +85015210,6621 +85015220,6622 +85015290,6623 +85015390,6626 +85016100,6627 +85023990,6642 +85030010,6644 +85030021,6645 +85030029,6646 +85030090,6647 +85041090,6650 +85042100,6651 +85043100,6654 +85043200,6655 +85043300,6656 +85043400,6657 +85044010,6658 +85044021,6659 +85044029,6660 +85044030,6661 +85044040,6662 +85044090,6663 +85045010,6664 +85045090,6665 +85049010,6666 +85049090,6667 +85051110,6668 +85051190,6669 +85051900,6670 +85052000,6671 +85059000,6672 +85065000,6675 +85068010,6677 +85068090,6678 +85069000,6679 +85071000,6680 +85072000,6681 +85073000,6682 +85075000,6684 +85076000,6685 +85078000,6686 +85079010,6687 +85079090,6688 +85081100,6689 +85081900,6690 +85086000,6691 +85087000,6692 +85094090,6694 +85098000,6695 +85099000,6696 +85101000,6697 +85102000,6698 +85103000,6699 +85109000,6700 +85111000,6701 +85112090,6703 +85113020,6705 +85114000,6706 +85115000,6707 +85118000,6708 +85119000,6709 +85121000,6710 +85122010,6711 +85122020,6712 +85122090,6713 +85123010,6714 +85123090,6715 +85124000,6716 +85129000,6717 +85131010,6718 +85131090,6722 +85139000,6723 +85141000,6724 +85143090,6728 +85144000,6730 +85149000,6731 +85151100,6732 +85151900,6733 +85152110,6734 +85152190,6736 +85152900,6737 +85153100,6738 +85153910,6739 +85153990,6741 +85158090,6743 +85159000,6744 +85161000,6745 +85162900,6747 +85163100,6748 +85163200,6749 +85163300,6750 +85164000,6751 +85165000,6752 +85166000,6753 +85167100,6754 +85167200,6755 +85167920,6757 +85167990,6758 +85168000,6759 +85169000,6760 +85171110,6761 +85171190,6762 +85171210,6763 +85171211,6764 +85171290,6766 +85171300,6767 +85171890,6770 +85176100,6771 +85176210,6772 +85176230,6774 +85176250,6776 +85176260,6777 +85176270,6778 +85176290,6779 +85176910,6780 +85176930,6782 +85176950,6783 +85176970,6785 +85176990,6786 +85177010,6787 +85177090,6788 +85177100,6789 +85177910,6790 +85177990,6791 +85181000,6792 +85182100,6793 +85182190,6795 +85182200,6796 +85182290,6798 +85182900,6799 +85182990,6801 +85183000,6802 +85183011,6803 +85183019,6804 +85183020,6805 +85183090,6806 +85184000,6807 +85185000,6808 +85189000,6809 +85198940,6814 +85198990,6815 +85211099,6819 +85219020,6821 +85219090,6822 +85229000,6824 +85232980,6828 +85232990,6829 +85234140,6831 +85234910,6834 +85234920,6835 +85234940,6837 +85234990,6839 +85235100,6840 +85235210,6841 +85235220,6842 +85235290,6843 +85235910,6844 +85235990,6845 +85238010,6846 +85238020,6847 +85238090,6849 +85249100,6853 +85249900,6855 +85255030,6858 +85256000,6861 +85256011,6862 +85258010,6867 +85258020,6868 +85258030,6869 +85258090,6870 +85258900,6873 +85261000,6874 +85269120,6876 +85269190,6878 +85269200,6879 +85271900,6882 +85272100,6883 +85272900,6884 +85279100,6885 +85279990,6889 +85284900,6892 +85285100,6893 +85285200,6894 +85285900,6895 +85286200,6897 +85286900,6898 +85287100,6899 +85287211,6900 +85287212,6901 +85287215,6904 +85287217,6906 +85287219,6908 +85287390,6910 +85291019,6912 +85291029,6915 +85291099,6918 +85299030,6921 +85299090,6922 +85301010,6923 +85308000,6924 +85309000,6925 +85311010,6926 +85311020,6927 +85311090,6928 +85312000,6929 +85318000,6930 +85319000,6931 +85321000,6932 +85322100,6933 +85322200,6934 +85322300,6935 +85322400,6936 +85322500,6937 +85322990,6939 +85323000,6940 +85329000,6941 +85331000,6942 +85332111,6943 +85332119,6944 +85332129,6946 +85332911,6947 +85332919,6948 +85332929,6950 +85333110,6951 +85333190,6953 +85333910,6954 +85333990,6955 +85334010,6956 +85334030,6958 +85334090,6959 +85339000,6960 +85340000,6961 +85351090,6966 +85353090,6975 +85354010,6976 +85354020,6977 +85354030,6978 +85359090,6983 +85361010,6984 +85361020,6985 +85361040,6987 +85361050,6988 +85361060,6989 +85361090,6990 +85362010,6991 +85362020,6992 +85362030,6993 +85362040,6994 +85362090,6995 +85363000,6996 +85364100,6997 +85364900,6998 +85365010,6999 +85365020,7000 +85365090,7001 +85366110,7002 +85366190,7003 +85366910,7004 +85366990,7005 +85367000,7006 +85369010,7007 +85369030,7009 +85369090,7010 +85371000,7011 +85372000,7012 +85381010,7013 +85381090,7014 +85389000,7015 +85391000,7016 +85392120,7018 +85392190,7019 +85392930,7023 +85392940,7024 +85392990,7025 +85393110,7026 +85393190,7027 +85393230,7030 +85393990,7032 +85394100,7033 +85394900,7034 +85395000,7035 +85399090,7040 +85407100,7044 +85408900,7047 +85411000,7050 +85412100,7051 +85412900,7052 +85413010,7053 +85413090,7054 +85414011,7055 +85414012,7056 +85414019,7057 +85414020,7058 +85414090,7059 +85414100,7060 +85414200,7061 +85414300,7062 +85414900,7063 +85415000,7064 +85415900,7066 +85416000,7067 +85419000,7068 +85423100,7069 +85423200,7070 +85423300,7071 +85423900,7072 +85429000,7073 +85432090,7079 +85433000,7080 +85437012,7082 +85437019,7083 +85437022,7084 +85437029,7085 +85437039,7086 +85437049,7088 +85437069,7090 +85437093,7094 +85437099,7097 +85439000,7098 +85441110,7099 +85441190,7100 +85441920,7102 +85441990,7104 +85442010,7105 +85442090,7106 +85443000,7107 +85444210,7108 +85444220,7109 +85444230,7110 +85444290,7111 +85444292,7113 +85444299,7115 +85444920,7117 +85444930,7118 +85444992,7120 +85444993,7121 +85444999,7122 +85446010,7123 +85446020,7124 +85446090,7126 +85447090,7128 +85451900,7130 +85452000,7131 +85459090,7134 +85462090,7139 +85469010,7140 +85469090,7141 +85471090,7144 +85472000,7145 +85479020,7147 +85479090,7148 +85489000,7152 +86071990,7163 +86072100,7164 +86072900,7165 +86073090,7167 +86079100,7168 +86079910,7169 +86079990,7172 +86080090,7176 +86090000,7177 +87032291,7187 +87032391,7189 +87032491,7190 +87033291,7192 +87033391,7193 +87038030,7196 +87071000,7208 +87079000,7209 +87081010,7210 +87081090,7211 +87082100,7212 +87082200,7213 +87082900,7214 +87083000,7215 +87084000,7216 +87085000,7217 +87087000,7218 +87088000,7219 +87089100,7220 +87089200,7221 +87089300,7222 +87089400,7223 +87089500,7224 +87089900,7225 +87099000,7228 +87114090,7238 +87115000,7239 +87116020,7241 +87116090,7242 +87120010,7246 +87120090,7247 +87131010,7248 +87131090,7249 +87139010,7250 +87139090,7251 +87141090,7253 +87142020,7255 +87142090,7256 +87149100,7257 +87149210,7258 +87149220,7259 +87149290,7260 +87149310,7261 +87149400,7264 +87149510,7265 +87149600,7267 +87149910,7268 +87149990,7270 +87150010,7271 +87168010,7276 +87168090,7277 +87169010,7278 +87169090,7279 +88031000,7285 +88032000,7286 +88033000,7287 +88039000,7288 +88040010,7289 +88052900,7294 +88073000,7297 +89040000,7308 +89069000,7312 +89079000,7314 +89080000,7315 +90011000,7316 +90012000,7317 +90013000,7318 +90014090,7320 +90015000,7321 +90019010,7322 +90019090,7323 +90021100,7324 +90021900,7325 +90022000,7326 +90029000,7327 +90031100,7328 +90031900,7329 +90039000,7330 +90041000,7331 +90049020,7333 +90049090,7334 +90051000,7335 +90058090,7338 +90059090,7340 +90063000,7341 +90066100,7345 +90066900,7346 +90069100,7347 +90069900,7348 +90071090,7349 +90072090,7350 +90079100,7351 +90079200,7352 +90106000,7358 +90109000,7359 +90118000,7362 +90119000,7363 +90121090,7365 +90129000,7366 +90132000,7370 +90138000,7371 +90138010,7372 +90138090,7373 +90139010,7375 +90139090,7376 +90142000,7378 +90148090,7380 +90149000,7381 +90152000,7383 +90153090,7385 +90158020,7388 +90158030,7389 +90158090,7390 +90159000,7391 +90160010,7392 +90160020,7393 +90160090,7394 +90172090,7399 +90173010,7400 +90173021,7401 +90173029,7404 +90178010,7405 +90178090,7406 +90179000,7407 +90181100,7408 +90181210,7409 +90181290,7410 +90181300,7411 +90181400,7412 +90181910,7413 +90181990,7415 +90183100,7417 +90183210,7418 +90183220,7419 +90183290,7421 +90183920,7423 +90183930,7424 +90183990,7425 +90184900,7427 +90185010,7428 +90185090,7431 +90189011,7432 +90189012,7433 +90189019,7434 +90189021,7435 +90189022,7436 +90189023,7437 +90189024,7438 +90189025,7439 +90189029,7440 +90189031,7441 +90189041,7444 +90189042,7445 +90189044,7447 +90189091,7448 +90189093,7450 +90189094,7451 +90189095,7452 +90189096,7453 +90189099,7456 +90191010,7457 +90191020,7458 +90191090,7459 +90192010,7460 +90192090,7461 +90200000,7462 +90211000,7463 +90212100,7464 +90212900,7465 +90213100,7466 +90213900,7467 +90214010,7468 +90214090,7469 +90215000,7470 +90219010,7471 +90219090,7472 +90221200,7473 +90221490,7477 +90221900,7478 +90222100,7479 +90222900,7480 +90223000,7481 +90229090,7486 +90230010,7487 +90230090,7488 +90241000,7489 +90248010,7490 +90248091,7491 +90248099,7492 +90249000,7493 +90251190,7495 +90251910,7496 +90251920,7497 +90251990,7498 +90258090,7501 +90259000,7502 +90261010,7503 +90261020,7504 +90261090,7505 +90262000,7506 +90268010,7507 +90268090,7508 +90269000,7509 +90271000,7510 +90272000,7511 +90273010,7512 +90273020,7513 +90273090,7514 +90275020,7516 +90275090,7518 +90278010,7519 +90278030,7521 +90278090,7523 +90278990,7528 +90279010,7529 +90279020,7530 +90279090,7531 +90281000,7532 +90282000,7533 +90283090,7535 +90289010,7536 +90289090,7537 +90291090,7539 +90292010,7540 +90292090,7543 +90299000,7544 +90301000,7545 +90302000,7546 +90303100,7547 +90303200,7548 +90303310,7549 +90303330,7551 +90303390,7554 +90303900,7555 +90304000,7556 +90308200,7557 +90308400,7558 +90308990,7560 +90309010,7561 +90309090,7562 +90311000,7563 +90312000,7564 +90314900,7566 +90318000,7567 +90319000,7568 +90321010,7569 +90321090,7570 +90322010,7571 +90322090,7572 +90328100,7573 +90328910,7574 +90328990,7575 +90329000,7576 +90330000,7577 +91011100,7578 +91011900,7579 +91012100,7580 +91012900,7581 +91019990,7584 +91021100,7585 +91021900,7587 +91022100,7588 +91022900,7589 +91029990,7595 +91039000,7597 +91051900,7600 +91052100,7601 +91052900,7602 +91059990,7605 +91069000,7607 +91070000,7608 +91081100,7609 +91081900,7610 +91089000,7612 +91101100,7616 +91109000,7617 +91111000,7618 +91112000,7619 +91118000,7620 +91119000,7621 +91131000,7624 +91132010,7625 +91132090,7626 +91139010,7627 +91139090,7628 +91141010,7629 +91143010,7631 +91144010,7633 +91149091,7635 +91149092,7636 +92029000,7641 +92059090,7645 +92060000,7646 +92071000,7647 +92079000,7648 +92093000,7651 +92099200,7653 +92099400,7654 +92099900,7655 +93033000,7661 +93040000,7663 +93052090,7665 +93059100,7666 +93059900,7667 +93062100,7668 +93069000,7671 +94011000,7673 +94012000,7674 +94013000,7675 +94013900,7677 +94015900,7682 +94016100,7683 +94016900,7684 +94017100,7685 +94017900,7686 +94018000,7687 +94019000,7688 +94019900,7690 +94021010,7691 +94021090,7692 +94029010,7693 +94029020,7694 +94029090,7695 +94031010,7696 +94031090,7697 +94032010,7698 +94032090,7699 +94033010,7700 +94033090,7701 +94034000,7702 +94035010,7703 +94035090,7704 +94036000,7705 +94037000,7706 +94038100,7707 +94038900,7710 +94039000,7711 +94039100,7712 +94039900,7713 +94041000,7714 +94042110,7715 +94042190,7716 +94042910,7717 +94042990,7719 +94043090,7721 +94049019,7726 +94049099,7728 +94051010,7729 +94051020,7730 +94051090,7731 +94051100,7732 +94051900,7733 +94052010,7734 +94052090,7735 +94052900,7737 +94053000,7738 +94053100,7739 +94053900,7740 +94054010,7741 +94054090,7742 +94054200,7744 +94054900,7745 +94055000,7746 +94055039,7748 +94055040,7749 +94055059,7751 +94056090,7753 +94059100,7756 +94059200,7757 +94059900,7758 +94060099,7763 +94069090,7768 +95030010,7769 +95030020,7770 +95030030,7771 +95030090,7772 +95030091,7773 +95030099,7774 +95042000,7775 +95043000,7776 +95044000,7777 +95045000,7778 +95049090,7781 +95051000,7782 +95059010,7783 +95059090,7784 +95061900,7787 +95062900,7789 +95063100,7790 +95063200,7791 +95063900,7792 +95064000,7793 +95065100,7794 +95065910,7795 +95065990,7796 +95066210,7798 +95066290,7801 +95066990,7805 +95067000,7806 +95069190,7808 +95069990,7815 +95071000,7816 +95072000,7817 +95073000,7818 +95079090,7820 +95089000,7823 +97011090,7825 +97019099,7829 +97030090,7836 +98010011,7844 +98010013,7846 +98010019,7849 +98010020,7850 +98010030,7851 +98020000,7852 +98041000,7854 +98049000,7855 +98059000,7856 +98060000,7857 +1012990,1 +1061900,4 +1069000,5 +1012100,0 +1019090,2 +1051100,3 +2044200,11 +2031900,7 +2044300,12 +2032900,10 +2023000,6 +2032200,9 +2032100,8 +3061790,35 +3028990,20 +3021400,17 +3046200,27 +3061690,32 +3027200,18 +3049900,30 +3061719,33 +3036900,24 +3035300,23 +3031300,21 +3038910,25 +3048100,28 +3028910,19 +3074310,37 +3074920,39 +3061720,34 +3019100,15 +3089000,40 +3032500,22 +3074320,38 +3063620,36 +3049300,29 +3038940,26 +3054100,31 +4061000,50 +4090000,54 +4039090,45 +4015000,42 +4031000,44 +4021010,43 +4051000,48 +4041020,47 +4041010,46 +4064000,52 +4059020,49 +4012000,41 +5119999,66 +5021010,55 +5119911,64 +5080020,59 +5080050,61 +5080030,60 +5080090,62 +5029090,56 +5080010,58 +5111000,63 +5119991,65 +5051090,57 +6039000,80 +6031300,78 +6049000,81 +6021000,70 +6022090,73 +6012010,68 +6029030,76 +6011000,67 +6022020,72 +6012021,69 +6022010,71 +6029010,74 +7133190,101 +7133500,103 +7132020,98 +7133300,102 +7031010,83 +7099990,85 +7132010,97 +7133990,104 +7092000,84 +7139010,108 +7129090,94 +7123900,92 +7129050,93 +7119090,90 +7108090,87 +7122000,91 +7139090,109 +7112000,88 +7135000,106 +7115900,89 +7104000,86 +7019000,82 +8062010,138 +8021200,116 +8023100,118 +8025100,120 +8042090,129 +8131000,153 +8045090,132 +8025200,121 +8045010,131 +8091000,142 +8093000,145 +8134090,155 +8109020,149 +8052100,135 +8041090,128 +8054000,136 +8132000,154 +8028010,122 +8071900,139 +8013210,112 +8092100,143 +8119090,152 +8041010,125 +8023200,119 +8092900,144 +8052000,134 +8104000,147 +8112090,151 +8011100,110 +8044000,130 +8013220,113 +8013290,114 +8022200,117 +8028020,123 +9096119,205 +9041120,178 +9071020,195 +9103020,212 +9011190,160 +9024090,176 +9092110,202 +9024020,173 +9096139,206 +9083110,200 +9101190,209 +9023090,171 +9102090,211 +9012190,161 +9021020,164 +9071090,197 +9103030,213 +9042229,187 +9061190,190 +9022020,167 +9042211,184 +9041190,181 +9071030,196 +9011119,156 +9062000,193 +9019090,162 +9023020,170 +9109990,217 +9024010,172 +9061990,192 +9042110,183 +9042219,186 +9023010,169 +9024040,175 +9082100,199 +9109914,215 +9041140,180 +9081120,198 +9041130,179 +9021010,163 +9042212,185 +9109929,216 +9022090,168 +9102010,210 +9041110,177 +9092190,203 +9083190,201 +9051000,188 +9021090,166 +9093129,204 +9011139,158 +9011129,157 +9041200,182 +9101120,208 +9061110,189 +9096149,207 +9011149,159 +9021030,165 +10063020,225 +10019910,218 +10063090,226 +10063010,224 +10051000,220 +10085000,227 +10059000,221 +10089090,228 +10059030,222 +10049000,219 +10061010,223 +11081200,240 +11081100,239 +11071000,237 +11081990,243 +11063090,236 +11042200,234 +11081300,241 +11072000,238 +11090000,245 +11022000,230 +11010000,229 +11052000,235 +11082000,244 +11081400,242 +11029090,231 +11031300,232 +12077090,253 +12074090,251 +12099120,261 +12119096,282 +12079100,254 +12092990,258 +12119039,272 +12099910,267 +12099130,262 +12102000,269 +12119032,271 +12077010,252 +12119092,280 +12079990,255 +12122110,284 +12099150,264 +12119094,281 +12130000,287 +12030000,248 +12129990,286 +12119091,279 +12060010,249 +12122910,285 +12071010,250 +12119045,274 +12119070,277 +12024210,247 +12119042,273 +12119080,278 +12099140,263 +12092500,257 +12091000,256 +12119050,276 +13019013,289 +13022000,306 +13023100,307 +13019039,295 +13019021,292 +13019099,297 +13021918,303 +13021300,299 +13023290,309 +13021915,302 +13021914,301 +13021200,298 +13019019,291 +13019029,294 +13023230,308 +13019049,296 +13019016,290 +13021911,300 +14049090,318 +14019010,313 +14019090,314 +14049030,315 +14049070,317 +14012000,312 +14049050,316 +15121110,335 +15200000,355 +15111000,331 +15071000,321 +15132110,340 +15211090,357 +15180039,354 +15162099,352 +15153090,345 +15121910,336 +15179090,353 +15119010,332 +15159091,348 +15099090,327 +15109010,330 +15155091,346 +15100091,328 +15050090,320 +15151990,343 +15100099,329 +15162091,351 +15155099,347 +15121990,337 +15162039,350 +15122990,338 +15132990,341 +15119090,334 +15211019,356 +15131900,339 +15092000,325 +15219090,358 +15042020,319 +15141920,342 +15079090,323 +15152990,344 +16024100,362 +16010000,359 +16023900,361 +16024900,364 +16023100,360 +16041410,366 +16024200,363 +16041310,365 +16052900,367 +17021110,371 +17021990,374 +17023039,378 +17029090,385 +17026090,381 +17021910,373 +17029030,383 +17025000,380 +17023010,376 +17041000,386 +17011490,368 +17029010,382 +17022090,375 +17019910,369 +17029040,384 +17023020,377 +17024039,379 +18062000,396 +18069020,400 +18010000,391 +18061000,395 +18069040,402 +18031000,392 +18069030,401 +19049000,418 +19041090,416 +19054000,425 +19053219,423 +19024090,413 +19011090,404 +19059040,429 +19059030,428 +19059010,426 +19022010,409 +19052000,420 +19022090,410 +19051000,419 +19030000,414 +19019010,406 +19041010,415 +20097900,480 +20019000,432 +20093900,474 +20098910,482 +20081910,454 +20081940,457 +20059900,445 +20082000,459 +20081930,456 +20049000,439 +20094900,476 +20031000,435 +20029000,434 +20089300,465 +20059100,444 +20091200,471 +20060000,446 +20083010,460 +20091100,470 +20081100,453 +20055900,441 +20091900,472 +20089991,468 +20079930,450 +20081920,455 +20079100,448 +20079940,451 +20097100,479 +20089919,467 +20021000,433 +20098100,481 +20083090,461 +20099000,484 +20086000,462 +20058000,443 +20094100,475 +20011000,431 +20095000,477 +20039090,437 +20041000,438 +20087000,463 +20071000,447 +20088000,464 +20096900,478 +20089700,466 +20039010,436 +20052000,440 +20079910,449 +20092900,473 +21021090,495 +21012090,491 +21031000,498 +21033000,500 +21069092,519 +21039030,503 +21069050,515 +21069040,514 +21069091,518 +21012020,490 +21041010,506 +21011120,486 +21021010,493 +21011110,485 +21032000,499 +21021020,494 +21069080,517 +21022000,496 +21012010,489 +21011200,488 +21023000,497 +21041090,507 +21069011,511 +21042000,508 +21069030,513 +21013090,492 +22042910,540 +22089011,569 +22082019,549 +22083092,556 +22083011,552 +22089019,570 +22021010,524 +22011020,522 +22083013,554 +22011010,521 +22019090,523 +22082011,547 +22072000,546 +22051000,542 +22086093,566 +22029930,532 +22029100,529 +22042290,539 +22071090,545 +22060000,544 +22082099,551 +22029910,530 +22085091,564 +22029010,526 +22082012,548 +22084091,561 +22084012,560 +22082091,550 +22089091,571 +22090010,573 +22059000,543 +22087091,568 +23069012,586 +23024000,576 +23025000,577 +23066000,585 +23012011,575 +23099032,593 +23040030,579 +23069090,587 +23065010,583 +23031000,578 +23063090,582 +23061020,580 +23063020,581 +23080000,588 +23065020,584 +24029090,600 +24021010,598 +24012090,597 +24039990,604 +24039970,603 +24012010,596 +24031110,601 +24039100,602 +25201010,663 +25232100,672 +25210010,667 +25292200,689 +25221000,669 +25171090,651 +25301020,691 +25010020,606 +25201090,664 +25262000,684 +25084010,628 +25181000,654 +25084090,629 +25101010,634 +25232930,674 +25231000,671 +25083030,626 +25059000,618 +25292100,688 +25239020,677 +25120090,639 +25049090,614 +25252090,681 +25131000,640 +25151220,644 +25070022,622 +25152090,646 +25102010,635 +25291020,687 +25051020,617 +25152010,645 +25161200,648 +25169090,649 +25051019,616 +25199010,658 +25051011,615 +25085010,630 +25182000,655 +25253090,682 +25291010,686 +25041020,612 +25041010,611 +25232990,675 +25030010,609 +25301090,692 +25151100,642 +25293000,690 +25191000,657 +25085039,631 +25183000,656 +25030090,610 +25222000,670 +25251090,680 +25083090,627 +25239090,678 +25062090,620 +25081010,624 +25090000,633 +25120030,638 +25233000,676 +25086000,632 +25061020,619 +25070010,621 +25010010,605 +25020000,608 +25132090,641 +25111090,637 +25280090,685 +25111020,636 +25174900,653 +25199030,660 +25171020,650 +25174100,652 +25261090,683 +25309050,694 +26020040,703 +26020030,702 +26020020,701 +26020010,700 +26030000,708 +26151000,722 +26060090,711 +26140031,719 +26100020,713 +26020090,707 +26201990,727 +26140039,720 +26180000,724 +26140090,721 +26100030,714 +26020060,705 +26020070,706 +26020050,704 +26100090,716 +26201910,726 +26011290,699 +26011112,696 +26011150,698 +26171000,723 +26140010,718 +26011119,697 +26060010,709 +26070000,712 +26203090,729 +26203010,728 +26100040,715 +27040090,738 +27101950,757 +27090010,746 +27129010,783 +27092000,747 +27109900,775 +27131200,789 +27079900,742 +27101290,752 +27131100,787 +27090000,745 +27011100,730 +27073000,739 +27111900,779 +27129030,784 +27101989,772 +27131290,790 +27150090,796 +27129040,785 +27101981,769 +27101219,748 +27074000,740 +27121090,781 +27101930,755 +27030010,736 +27081090,743 +27101951,758 +27149010,793 +27101978,766 +27101969,761 +27101972,763 +27082000,744 +27139000,792 +27011990,734 +27101920,754 +27101910,753 +27149030,794 +27112900,780 +27101221,750 +27102000,774 +27101987,771 +27101983,770 +27149090,795 +27101974,765 +27101241,751 +27101220,749 +27012090,735 +27101949,756 +27101961,760 +27030090,737 +27131110,788 +27101973,764 +28352690,942 +28258000,882 +28070010,822 +28251020,871 +28492010,1001 +28170010,851 +28401900,968 +28012000,798 +28399090,966 +28275990,899 +28252000,874 +28151200,846 +28352400,938 +28321090,917 +28269000,889 +28049000,815 +28362090,950 +28433000,983 +28092020,828 +28432900,982 +28500010,1006 +28421000,977 +28362010,948 +28141000,842 +28047030,813 +28491000,1000 +28331100,920 +28230010,867 +28332700,926 +28323090,919 +28500049,1008 +28372090,963 +28259040,883 +28419000,976 +28272000,891 +28129000,841 +28402090,969 +28362020,949 +28459010,993 +28271000,890 +28353100,946 +28152000,847 +28251090,873 +28499090,1005 +28142000,843 +28299010,908 +28351090,936 +28051200,817 +28151190,845 +28334000,930 +28461090,996 +28080010,824 +28251040,872 +28309010,912 +28273100,892 +28261990,887 +28051100,816 +28201000,860 +28341010,931 +28530099,1014 +28417090,974 +28413000,971 +28499010,1003 +28333090,929 +28352620,941 +28451000,992 +28369910,957 +28444300,990 +28080020,825 +28042990,806 +28369920,958 +28299030,910 +28254000,877 +28112190,833 +28416900,973 +28461010,995 +28112930,836 +28499020,1004 +28053000,819 +28043000,807 +28257090,881 +28299020,909 +28045020,810 +28209000,861 +28054000,820 +28291100,905 +28289090,904 +28500050,1009 +28530010,1012 +28211020,863 +28444400,991 +28352500,939 +28332500,925 +28539010,1015 +28431090,980 +28332100,922 +28371100,960 +28011000,797 +28529000,1011 +28273500,894 +28046100,811 +28042910,805 +28161010,849 +28013020,799 +28151110,844 +28044090,808 +28443090,988 +28199000,859 +28415090,972 +28030020,803 +28331990,921 +28121090,839 +28261910,886 +28051900,818 +28322090,918 +28401100,967 +28342100,933 +28332400,924 +28241010,869 +28391900,965 +28274900,896 +28369200,956 +28432100,981 +28301000,911 +28403000,970 +28291920,906 +28253090,876 +28263000,888 +28311020,915 +28275110,897 +28100020,830 +28371990,962 +28275120,898 +28521000,1010 +28255000,878 +28341090,932 +28212000,864 +28253010,875 +28182019,855 +28273200,893 +28309020,913 +28191000,858 +28111100,831 +28342920,934 +28289030,903 +28153000,848 +28530040,1013 +28443029,987 +28230090,868 +28291990,907 +28311010,914 +28070020,823 +28391100,964 +28020030,801 +28112940,837 +28220010,865 +28100010,829 +28369100,955 +28121990,840 +28045010,809 +28352940,944 +28261200,885 +28352610,940 +28332910,927 +28257010,880 +28164000,850 +28091000,826 +28431010,979 +28418090,975 +28276010,900 +28220020,866 +28112920,835 +28276020,901 +28352930,943 +28249000,870 +28321010,916 +28441000,986 +28366000,954 +28371910,961 +28020010,800 +28469010,997 +28048000,814 +28170020,852 +28332290,923 +29031200,1038 +29399900,1490 +29392030,1482 +29161310,1224 +29412090,1498 +29041040,1060 +29031990,1041 +29052290,1080 +29362800,1459 +29299090,1380 +29037900,1053 +29242190,1362 +29181590,1274 +29411030,1494 +29154010,1202 +29157020,1209 +29372300,1471 +29342000,1440 +29212100,1303 +29181690,1276 +29061200,1094 +29336910,1430 +29142390,1168 +29143930,1173 +29411050,1495 +29126000,1159 +29182910,1284 +29211190,1301 +29181190,1268 +29333100,1416 +29251100,1367 +29071520,1110 +29157050,1211 +29055900,1092 +29173920,1263 +29420034,1518 +29163200,1240 +29222160,1338 +29362700,1458 +29223900,1345 +29413090,1501 +29182190,1280 +29381000,1477 +29156020,1207 +29302000,1381 +29362310,1452 +29061390,1096 +29303000,1382 +29033990,1048 +29375000,1473 +29412010,1497 +29153910,1196 +29336100,1429 +29161210,1222 +29024300,1031 +29144000,1175 +29181390,1270 +29221310,1333 +29201990,1294 +29232010,1356 +29181520,1273 +29224220,1348 +29389010,1478 +29038900,1054 +29171950,1253 +29094300,1132 +29071190,1104 +29392090,1483 +29332910,1413 +29362100,1449 +29171190,1245 +29262000,1371 +29054300,1088 +29153200,1194 +29151300,1188 +29224910,1350 +29182390,1283 +29222933,1343 +29037300,1052 +29092000,1127 +29103000,1141 +29415000,1503 +29089990,1123 +29042090,1065 +29321200,1398 +29393000,1484 +29071290,1107 +29411010,1492 +29411090,1496 +29153300,1195 +29212910,1305 +29053100,1082 +29052900,1081 +29391900,1481 +29151100,1185 +29391100,1480 +29032200,1042 +29419050,1509 +29142922,1170 +29031300,1039 +29054100,1086 +29339100,1436 +29309060,1386 +29012200,1019 +29322020,1404 +29419019,1505 +29362910,1460 +29032300,1043 +29051210,1069 +29221210,1331 +29163110,1235 +29081900,1122 +29072300,1119 +29362210,1450 +29420027,1517 +29161590,1228 +29162000,1233 +29291010,1377 +29141200,1162 +29372100,1469 +29211110,1300 +29025000,1033 +29214910,1321 +29242110,1361 +29061100,1093 +29163140,1237 +29032900,1044 +29029040,1036 +29094400,1133 +29321300,1399 +29054400,1089 +29221290,1332 +29062100,1099 +29072200,1118 +29222190,1339 +29151210,1186 +29153940,1199 +29153100,1193 +29222926,1342 +29122100,1151 +29102000,1140 +29161950,1230 +29061310,1095 +29173200,1257 +29157030,1210 +29172000,1256 +29332100,1412 +29214516,1318 +29212200,1304 +29371200,1467 +29173960,1265 +29124100,1154 +29321910,1401 +29221110,1328 +29173600,1261 +29214110,1309 +29051700,1075 +29309040,1385 +29215190,1326 +29051690,1074 +29209020,1298 +29141910,1164 +29031500,1040 +29333920,1423 +29309070,1387 +29051300,1071 +29214190,1310 +29173400,1259 +29222150,1337 +29215120,1324 +29072100,1117 +29181200,1269 +29261000,1370 +29094100,1131 +29309080,1388 +29396900,1486 +29419040,1508 +29419011,1504 +29214590,1320 +29142310,1167 +29214234,1312 +29163950,1242 +29224210,1347 +29362940,1463 +29362390,1453 +29333911,1418 +29012300,1020 +29022000,1027 +29362610,1456 +29071300,1108 +29071510,1109 +29159020,1214 +29071590,1111 +29332920,1414 +29012920,1022 +29329300,1406 +29052220,1078 +29093019,1128 +29110010,1143 +29214410,1316 +29309010,1384 +29413010,1499 +29033920,1047 +29214490,1317 +29419030,1507 +29053910,1084 +29337100,1432 +29152400,1190 +29224290,1349 +29182110,1278 +29124200,1155 +29214526,1319 +29171970,1254 +29362690,1457 +29161320,1225 +29333200,1417 +29029010,1035 +29095020,1136 +29181110,1267 +29213010,1307 +29333917,1420 +29181610,1275 +29153920,1197 +29309091,1389 +29121200,1146 +29312000,1392 +29054290,1087 +29362500,1455 +29124910,1156 +29419060,1510 +29362400,1454 +29147020,1180 +29121910,1147 +29411020,1493 +29335920,1427 +29413020,1500 +29333918,1421 +29159030,1215 +29093020,1129 +29420014,1514 +29159010,1213 +29420015,1515 +29121930,1149 +29121920,1148 +29173700,1262 +29419020,1506 +29091100,1124 +29379019,1474 +29071220,1106 +29173930,1264 +29071930,1113 +29157010,1208 +29153930,1198 +29141300,1163 +29242910,1363 +29420012,1513 +29062920,1101 +29026000,1034 +29209010,1297 +29021100,1024 +29362950,1464 +29071940,1114 +29183020,1288 +29071950,1115 +29052230,1079 +29146910,1178 +29121100,1145 +29154020,1203 +29379020,1475 +29171110,1244 +29142200,1166 +29042010,1062 +29171930,1252 +29171300,1247 +29331100,1409 +29182310,1282 +29052210,1077 +29122910,1152 +29182200,1281 +29183040,1289 +29182930,1285 +29171910,1250 +29051620,1073 +29214222,1311 +29343000,1441 +29012910,1021 +29231000,1355 +29242930,1365 +29414000,1502 +29071210,1105 +29024100,1029 +29215110,1323 +29154030,1204 +29362290,1451 +29181510,1272 +29362920,1461 +29222913,1340 +29061910,1097 +29062910,1100 +29395900,1485 +29183010,1287 +29420016,1516 +29024200,1030 +29224920,1351 +29214350,1314 +29012100,1018 +29146100,1177 +29163120,1236 +29161960,1231 +29362930,1462 +29143910,1172 +29125000,1158 +29161510,1227 +29161930,1229 +29333916,1419 +29215130,1325 +29182120,1279 +29024400,1032 +29163910,1241 +29142910,1169 +29042050,1063 +29041010,1059 +29322010,1403 +29039940,1057 +29152910,1191 +29039120,1056 +29225011,1353 +29033100,1045 +29222922,1341 +29335400,1426 +29173300,1258 +29101000,1139 +29242920,1364 +29171920,1251 +29319010,1395 +29039110,1055 +29163160,1238 +29071910,1112 +29270010,1374 +29042060,1064 +29397900,1487 +29221200,1330 +29398000,1489 +29313900,1393 +29321400,1400 +29221910,1335 +29202990,1295 +29147100,1182 +29233000,1358 +29221500,1334 +29351000,1446 +29359011,1447 +29264000,1372 +29147920,1183 +29337990,1435 +29159092,1219 +29162090,1234 +29397990,1488 +29091990,1126 +29159070,1216 +29309098,1390 +29349910,1443 +29072920,1120 +29337910,1434 +29159091,1218 +29171390,1248 +29034500,1050 +29034200,1049 +29036900,1051 +29314990,1394 +30049015,1580 +30059040,1603 +30049049,1586 +30044090,1572 +30049039,1583 +30061020,1608 +30059050,1604 +30043110,1565 +30042099,1564 +30049063,1588 +30021091,1528 +30049069,1589 +30049055,1587 +30049085,1593 +30029040,1552 +30049029,1581 +30045090,1576 +30039011,1556 +30019010,1522 +30039090,1559 +30049032,1582 +30059030,1602 +30065000,1612 +30044990,1573 +30012090,1521 +30021020,1527 +30019091,1523 +30049011,1578 +30033900,1555 +30059010,1601 +30049091,1595 +30022012,1539 +30043200,1567 +30049079,1590 +30022014,1540 +30046000,1577 +30041010,1560 +30032000,1554 +30059060,1605 +30021011,1525 +30043912,1568 +30049092,1596 +30021100,1530 +30067000,1614 +30049095,1597 +30049047,1585 +30049081,1591 +30066010,1613 +30021210,1531 +30042019,1563 +30012030,1520 +30045010,1574 +30049087,1594 +30043921,1570 +30021410,1536 +30043190,1566 +30043919,1569 +30051020,1599 +30041030,1561 +30039034,1558 +30049044,1584 +30045020,1575 +30049082,1592 +30021300,1533 +30025900,1548 +30021400,1535 +30024200,1545 +31054000,1630 +31026000,1622 +31042000,1624 +31051000,1627 +31021000,1617 +31043000,1625 +31049000,1626 +31053000,1629 +31029090,1623 +31056000,1633 +31059010,1634 +31023000,1620 +31022990,1619 +31025000,1621 +31022100,1618 +31055900,1632 +31055100,1631 +32100020,1775 +32041929,1706 +32089030,1763 +32041559,1683 +32089049,1765 +32071020,1742 +32019010,1638 +32041911,1705 +32041159,1655 +32065000,1740 +32041761,1700 +32064940,1738 +32064200,1735 +32041139,1653 +32041680,1691 +32072020,1747 +32041978,1722 +32041551,1682 +32151130,1792 +32029030,1643 +32089050,1766 +32151910,1795 +32041720,1695 +32041490,1679 +32041759,1699 +32041630,1688 +32041610,1686 +32041399,1677 +32081030,1752 +32030090,1649 +32082030,1756 +32159030,1801 +32089041,1764 +32110000,1779 +32151110,1791 +32041213,1661 +32041329,1673 +32041119,1651 +32041519,1680 +32159040,1802 +32041211,1659 +32030010,1645 +32041967,1714 +32041751,1698 +32089011,1758 +32062000,1733 +32100030,1776 +32041259,1668 +32041539,1681 +32030030,1647 +32149010,1788 +32089022,1761 +32041359,1676 +32041973,1718 +32041969,1715 +32011000,1636 +32071010,1741 +32041339,1674 +32041975,1720 +32041964,1713 +32041711,1693 +32081020,1751 +32041740,1697 +32041310,1672 +32041214,1662 +32041269,1669 +32041949,1708 +32041196,1657 +32100019,1774 +32041217,1664 +32030020,1646 +32089021,1760 +32041935,1707 +32151930,1796 +32019090,1639 +32041780,1703 +32042010,1726 +32041218,1665 +32041971,1716 +32041193,1656 +32149020,1789 +32041215,1663 +32041770,1702 +32041955,1710 +32029020,1642 +32041111,1650 +32041670,1690 +32041769,1701 +32041989,1724 +32100040,1777 +32071030,1743 +32041620,1687 +32064100,1734 +32041129,1652 +32029010,1641 +32041972,1717 +32041149,1654 +32041959,1711 +32041976,1721 +32099010,1770 +32100011,1773 +32041589,1684 +32064910,1736 +32041212,1660 +32041599,1685 +32041951,1709 +32041419,1678 +32041974,1719 +32041349,1675 +32041961,1712 +32041239,1667 +32030040,1648 +32041291,1670 +32064930,1737 +33012942,1828 +33013099,1835 +33071010,1886 +33012941,1827 +33062000,1884 +33061010,1881 +33019029,1840 +33012510,1809 +33052000,1872 +33019014,1837 +33012911,1811 +33012950,1832 +33051010,1870 +33073010,1889 +33012590,1810 +33012400,1808 +33019079,1844 +33012934,1822 +33049940,1867 +33053000,1873 +33012938,1826 +33012916,1813 +33049120,1862 +33019059,1842 +33030040,1854 +33079010,1893 +33012924,1818 +33059050,1879 +33012937,1825 +33061020,1882 +33012935,1823 +33012921,1816 +33012932,1821 +33019049,1841 +33012918,1815 +33059011,1874 +33059019,1875 +33012928,1820 +33019071,1843 +33013091,1834 +33030020,1853 +33061090,1883 +33059030,1877 +33019013,1836 +33079020,1894 +33012943,1829 +33012917,1814 +33012936,1824 +33019017,1838 +33019022,1839 +33012949,1831 +33012926,1819 +33011910,1806 +33012922,1817 +33012947,1830 +33012915,1812 +33059020,1876 +34011990,1902 +34022020,1914 +34029012,1922 +34029020,1924 +34054000,1947 +34049010,1940 +34052000,1945 +34029091,1932 +34021110,1908 +34013012,1905 +34029030,1925 +34011110,1896 +34011919,1899 +34029049,1928 +34011941,1901 +34011920,1900 +34029052,1930 +34029051,1929 +34011120,1897 +34029042,1927 +34013011,1904 +34029041,1926 +34024900,1919 +34023900,1916 +34025000,1920 +34024100,1917 +35030090,1960 +35052000,1966 +35022000,1956 +35051010,1964 +35019000,1955 +35069910,1970 +35040091,1962 +35079062,1976 +35030030,1959 +35079030,1974 +35069991,1971 +35030020,1958 +35011000,1954 +35079061,1975 +36020090,1982 +36020010,1981 +36030019,1983 +36049010,1987 +36030020,1984 +36061000,1989 +36049090,1988 +36030059,1986 +36030039,1985 +37024490,2001 +37024390,2000 +37029890,2004 +37023990,1998 +37050000,2010 +37071000,2012 +37040010,2008 +37021000,1996 +37040090,2009 +37012000,1992 +37031010,2005 +37059090,2011 +37039010,2007 +37023190,1997 +37024290,1999 +37019190,1994 +37029810,2003 +37025690,2002 +38123010,2067 +38249025,2127 +38012000,2016 +38249026,2128 +38249017,2123 +38249022,2125 +38029011,2020 +38130000,2075 +38231190,2103 +38231200,2104 +38099140,2049 +38151100,2078 +38249015,2121 +38099310,2054 +38180010,2086 +38089330,2039 +38089191,2036 +38220012,2093 +38111900,2060 +38062000,2032 +38099130,2048 +38249016,2122 +38089400,2043 +38040090,2025 +38260000,2145 +38220011,2092 +38051030,2028 +38190010,2088 +38061090,2031 +38249024,2126 +38085000,2035 +38013000,2017 +38091000,2046 +38231119,2102 +38170011,2084 +38237020,2107 +38249011,2119 +38231300,2105 +38151210,2079 +38089910,2044 +38059090,2029 +38259000,2144 +38099160,2050 +38249032,2129 +38099170,2051 +38123030,2068 +38109010,2058 +38247900,2116 +38170019,2085 +38122010,2065 +38089350,2041 +38247800,2115 +38029020,2022 +38249012,2120 +38030000,2023 +38063000,2033 +38246090,2114 +38051010,2026 +38123910,2071 +38123930,2073 +38123100,2070 +38231100,2101 +38248800,2118 +38123920,2072 +38249926,2141 +38249922,2138 +38249924,2139 +38248400,2117 +38249932,2142 +38249917,2136 +38249911,2133 +38249915,2135 +38249912,2134 +38221300,2097 +38221100,2096 +39042290,2176 +39079110,2221 +39123912,2269 +39042190,2173 +39262091,2465 +39261011,2454 +39202010,2343 +39209490,2391 +39209390,2390 +39201091,2340 +39262021,2460 +39123911,2268 +39169027,2300 +39171010,2307 +39269061,2489 +39219094,2421 +39211400,2409 +39122019,2265 +39169024,2299 +39042210,2175 +39081010,2229 +39219096,2423 +39189010,2330 +39205919,2355 +39077000,2220 +39262019,2459 +39171020,2308 +39159042,2288 +39169028,2301 +39139019,2280 +39045090,2180 +39032000,2164 +39051290,2187 +39209960,2401 +39169060,2304 +39089020,2240 +39089010,2239 +39206110,2357 +39219091,2419 +39206912,2366 +39161010,2290 +39269041,2485 +39119010,2260 +39059910,2193 +39039010,2166 +39140010,2284 +39209932,2396 +39079910,2226 +39159090,2289 +39022000,2158 +39201011,2337 +39121290,2264 +39201092,2341 +39139030,2282 +39059100,2192 +39031910,2162 +39046910,2182 +39251000,2449 +39031100,2161 +39069020,2198 +39051990,2188 +39172950,2317 +39131090,2278 +39264011,2469 +39239010,2441 +39091090,2243 +39209111,2383 +39269021,2481 +39044000,2179 +39129020,2275 +39207329,2377 +39019010,2155 +39205911,2354 +39123922,2272 +39209991,2402 +39122029,2266 +39139011,2279 +39043010,2177 +39121190,2263 +39206210,2360 +39219095,2422 +39129010,2274 +39261091,2456 +39207111,2372 +39207399,2378 +39173220,2321 +39140020,2285 +39219093,2420 +39079150,2223 +39219021,2412 +39172910,2314 +39219026,2415 +39169023,2298 +39061010,2195 +39264041,2474 +39249020,2447 +39094020,2250 +39131010,2277 +39094030,2251 +39207911,2379 +39111010,2258 +39207319,2376 +39172310,2312 +39209911,2392 +39207199,2374 +39121110,2262 +39206310,2363 +39069030,2199 +39189020,2331 +39203010,2346 +39262041,2463 +39209212,2387 +39169080,2305 +39209211,2386 +39173920,2325 +39169031,2302 +39206992,2370 +39269031,2483 +39264091,2478 +39123921,2271 +39159030,2287 +39139020,2281 +39161020,2291 +39091010,2242 +39209942,2398 +39219022,2413 +39206120,2358 +39094040,2252 +39094010,2249 +39207919,2380 +39269051,2487 +39203020,2347 +39172920,2315 +39207311,2375 +39269071,2491 +39209921,2394 +39209992,2403 +39206911,2365 +39264031,2472 +39162091,2295 +39093990,2248 +39076100,2216 +39172930,2316 +39162011,2293 +39042100,2171 +39219025,2414 +39219036,2417 +39076190,2218 +39049090,2186 +39081041,2234 +39081011,2230 +39081049,2235 +39069060,2202 +39081079,2236 +39069050,2201 +39209110,2382 +39049010,2185 +39069070,2203 +39076110,2217 +39081039,2233 +39081029,2232 +39069040,2200 +39072910,2209 +39072100,2208 +40149010,2612 +40023900,2508 +40116900,2586 +40131020,2604 +40082110,2538 +40029910,2517 +40103991,2572 +40170010,2646 +40119400,2592 +40012100,2497 +40139010,2606 +40169930,2639 +40159020,2620 +40021910,2502 +40159010,2619 +40023100,2507 +40061000,2528 +40101910,2558 +40169400,2634 +40103992,2573 +40122020,2595 +40028090,2515 +40103510,2567 +40112090,2578 +40139020,2607 +40011020,2496 +40115010,2583 +40052090,2523 +40021100,2501 +40029100,2516 +40012990,2499 +40139030,2608 +40169510,2635 +40170030,2648 +40081910,2536 +40025100,2511 +40159091,2622 +40129090,2602 +40052010,2522 +40021920,2503 +40030000,2519 +40119200,2590 +40116100,2585 +40069010,2529 +40114020,2581 +40119300,2591 +40101110,2554 +40149020,2613 +40129050,2601 +40026000,2513 +40082120,2539 +40082920,2542 +40139049,2609 +40169970,2643 +40170040,2649 +40024100,2509 +40021930,2504 +40170020,2647 +40131010,2603 +40122090,2596 +40103310,2564 +40129030,2599 +40101210,2556 +40103210,2562 +40141010,2611 +40149030,2614 +40059910,2526 +40059110,2524 +40013000,2500 +40082940,2544 +40129010,2597 +40117000,2587 +40070020,2532 +40151200,2617 +41062100,2672 +41012090,2652 +41019010,2655 +41021010,2657 +41015010,2653 +41039000,2665 +41132000,2685 +41142020,2689 +41062200,2673 +41141000,2687 +41022110,2659 +41063200,2674 +41022920,2663 +41069200,2676 +41012010,2651 +41015090,2654 +41152090,2691 +41069100,2675 +41022910,2662 +41033000,2664 +41022130,2661 +41021030,2658 +41022120,2660 +42050011,2742 +42032910,2736 +42021210,2698 +42021110,2693 +42022240,2718 +42021280,2705 +42060090,2746 +42021260,2703 +42023110,2722 +42021910,2707 +42021920,2708 +42022230,2717 +42023910,2727 +42021140,2695 +42032930,2738 +42021230,2700 +42021270,2704 +42021120,2694 +42021930,2709 +42021160,2696 +42021960,2710 +42034010,2740 +42022120,2713 +42050020,2744 +42032120,2735 +43040019,2755 +43040011,2754 +43021930,2750 +43013000,2747 +43021910,2748 +43021920,2749 +43040020,2756 +43021990,2751 +43031090,2752 +43039090,2753 +44034990,2766 +44149000,2838 +44190010,2858 +44189000,2856 +44211000,2872 +44151000,2839 +44071990,2783 +44191900,2862 +44119219,2821 +44083190,2799 +44079200,2792 +44123990,2833 +44091090,2807 +44071090,2779 +44187200,2853 +44071020,2778 +44219190,2879 +44034100,2763 +44072200,2784 +44219119,2877 +44039300,2768 +44181000,2844 +44121000,2826 +44182190,2848 +44191200,2861 +44039400,2769 +44083920,2801 +44190020,2859 +44109090,2816 +44160010,2841 +44119221,2822 +44050000,2776 +44201900,2869 +44182020,2846 +44209010,2870 +44079300,2793 +44123110,2827 +44219919,2880 +44072500,2786 +44170000,2843 +44101130,2813 +44081010,2796 +44182010,2845 +44199020,2865 +44071010,2777 +44123310,2831 +44072900,2788 +44123210,2829 +44039100,2767 +44039928,2773 +44219960,2881 +44092200,2809 +44182990,2849 +44039921,2771 +44201100,2868 +44119329,2824 +44184000,2850 +44219070,2874 +44029090,2758 +44119211,2820 +44219114,2876 +44129910,2834 +44191100,2860 +44032210,2761 +44079100,2791 +44219170,2878 +44039924,2772 +44101290,2815 +44091020,2806 +44186000,2851 +44012200,2757 +44219970,2882 +44091010,2805 +44187100,2852 +44160099,2842 +44199010,2864 +44192000,2863 +44081030,2797 +44039700,2770 +44071910,2782 +44072600,2787 +44092910,2810 +44219060,2873 +44123290,2830 +44092100,2808 +45031000,2885 +45039090,2886 +45041090,2887 +45049000,2888 +45019000,2884 +46021200,2896 +46012100,2889 +46012900,2891 +46021919,2898 +46012200,2890 +46021911,2897 +46019200,2892 +46019300,2893 +47050000,2908 +47073000,2916 +47020000,2902 +47031900,2904 +47063000,2911 +47031100,2903 +47062000,2910 +47010000,2901 +47042900,2907 +47069100,2912 +47061000,2909 +47069200,2913 +48201020,3049 +48081000,2978 +48092000,2981 +48204000,3053 +48239019,3081 +48195010,3045 +48030010,2947 +48239016,3078 +48111000,3001 +48026990,2946 +48062000,2972 +48185000,3036 +48042100,2951 +48119017,3013 +48052400,2963 +48162090,3026 +48201010,3048 +48221000,3062 +48239014,3076 +48115110,3004 +48021020,2921 +48131000,3019 +48025810,2938 +48169090,3028 +48043100,2953 +48101310,2983 +48103100,2994 +48209010,3055 +48025710,2934 +48132000,3020 +48229010,3063 +48044200,2956 +48010010,2918 +48026290,2943 +48172000,3030 +48239017,3079 +48061000,2971 +48051100,2961 +48239011,3073 +48044100,2955 +48101910,2988 +48103920,2997 +48119094,3016 +48119011,3009 +48044900,2957 +48237010,3069 +48237030,3071 +48084090,2979 +48053000,2965 +48025720,2935 +48025490,2928 +48025610,2931 +48025820,2939 +48119093,3015 +48119091,3014 +48063000,2973 +48055000,2967 +48169010,3027 +48236100,3067 +48101330,2985 +48101930,2990 +48025760,2936 +48173010,3031 +48022010,2922 +48119012,3010 +48162010,3025 +48024000,2924 +48139010,3021 +48025510,2929 +48239018,3080 +48025620,2932 +48026910,2944 +48070010,2976 +48045100,2958 +48045200,2959 +48026930,2945 +48026210,2942 +48237020,3070 +48025420,2925 +48025430,2926 +48119016,3012 +49059990,3098 +49090010,3105 +49021020,3089 +49111030,3111 +49059910,3097 +49070090,3102 +49019100,3086 +49051000,3095 +49029010,3090 +49059090,3096 +50071000,3124 +50040090,3121 +50072010,3125 +50060090,3123 +50020030,3119 +50020020,3118 +50050011,3122 +50079010,3127 +50030090,3120 +51012100,3131 +51072090,3144 +51071090,3143 +51121930,3162 +51119030,3156 +51122090,3165 +51113030,3154 +51062090,3141 +51111990,3151 +51021190,3133 +51113090,3155 +51123030,3166 +51091090,3146 +51112030,3152 +51123090,3167 +51111110,3148 +51061090,3140 +51052990,3138 +51112090,3153 +51082000,3145 +51052910,3137 +51119090,3157 +51122030,3164 +51099000,3147 +51121920,3161 +51051000,3136 +51011100,3129 +51021990,3134 +51129030,3168 +51071010,3142 +51053900,3139 +51032090,3135 +52093290,3241 +52084320,3222 +52029900,3172 +52103990,3259 +52093990,3242 +52104190,3261 +52121200,3280 +52115190,3277 +52092990,3237 +52121300,3281 +52052490,3187 +52104130,3260 +52083130,3211 +52054290,3192 +52082290,3206 +52084130,3218 +52121400,3282 +52114990,3275 +52122300,3284 +52114200,3274 +52094990,3247 +52111190,3266 +52064200,3196 +52095990,3251 +52085990,3229 +52041130,3176 +52051110,3181 +52084190,3219 +52051210,3183 +52121500,3283 +52115120,3276 +52079000,3198 +52041190,3177 +52082990,3208 +52095190,3249 +52095290,3250 +52091220,3231 +52115990,3278 +52083129,3210 +52061200,3194 +52113990,3271 +52041110,3174 +52091190,3230 +52093170,3239 +52082390,3207 +52081190,3200 +52082230,3205 +52052390,3186 +52103290,3258 +52084121,3217 +52054890,3193 +52113190,3269 +52084390,3223 +52111900,3267 +52101900,3253 +52083390,3215 +52105190,3264 +52051190,3182 +52094190,3244 +52113290,3270 +52104990,3262 +52083230,3213 +52052890,3188 +52053430,3190 +52091250,3232 +52082190,3204 +52042090,3180 +52103110,3256 +52071000,3197 +52085230,3227 +52085190,3226 +52091290,3233 +52094130,3243 +52085130,3225 +52041120,3175 +52030000,3173 +52093130,3238 +52041900,3178 +52122400,3285 +52053290,3189 +52092190,3235 +52062300,3195 +52083110,3209 +52081130,3199 +52122500,3286 +52105110,3263 +52042030,3179 +52052290,3185 +52094390,3246 +52081990,3203 +52081230,3201 +52114130,3272 +52112099,3268 +52092290,3236 +52021000,3171 +52101190,3252 +52091900,3234 +52114190,3273 +52102990,3255 +52083990,3216 +52081290,3202 +52102190,3254 +52095119,3248 +52105990,3265 +52051290,3184 +52054210,3191 +52121100,3279 +53012900,3288 +53110019,3317 +53110013,3316 +53062090,3296 +53091110,3302 +53092910,3308 +53031010,3290 +53012100,3287 +53110029,3318 +53091920,3305 +53092920,3309 +53089090,3301 +53091120,3303 +53089010,3300 +53082000,3299 +53050090,3293 +53039010,3291 +53101011,3311 +53039090,3292 +53109020,3314 +53061010,3294 +53110012,3315 +53013000,3289 +53092120,3307 +54033100,3347 +54024600,3333 +54041100,3355 +54021110,3321 +54026200,3342 +54034911,3350 +54034912,3351 +54071049,3370 +54082219,3413 +54074230,3379 +54081000,3408 +54023400,3328 +54071029,3367 +54075210,3385 +54083490,3423 +54024800,3335 +54083300,3421 +54041200,3356 +54082490,3418 +54074119,3377 +54077400,3398 +54023200,3326 +54024900,3336 +54026950,3344 +54075119,3383 +54034990,3354 +54073090,3376 +54071099,3371 +54083419,3422 +54082300,3416 +54021910,3322 +54026100,3341 +54023990,3330 +54071039,3369 +54079400,3407 +54023910,3329 +54076110,3391 +54078119,3399 +54077110,3395 +54025100,3337 +54026300,3343 +54072010,3372 +54074490,3382 +54074300,3381 +54071011,3364 +54078300,3403 +54041910,3357 +54082419,3417 +54025910,3339 +54075220,3386 +54060010,3362 +54075129,3384 +54076120,3392 +54074129,3378 +54073030,3375 +54078490,3404 +54025990,3340 +54082120,3409 +54041920,3358 +54033910,3348 +54075420,3389 +54025200,3338 +54082211,3410 +54034919,3353 +54031090,3346 +54078129,3400 +54071015,3365 +54083219,3419 +54072030,3373 +54033990,3349 +54082220,3414 +54082216,3411 +54060020,3363 +54082217,3412 +54034913,3352 +54078250,3401 +54071035,3368 +55093200,3459 +55132300,3488 +55041000,3438 +55013000,3426 +55049090,3442 +55092200,3457 +55051010,3443 +55142100,3494 +55039090,3437 +55109090,3475 +55052000,3445 +55151290,3503 +55034000,3436 +55169300,3529 +55162400,3520 +55132900,3489 +55041090,3441 +55142900,3497 +55131120,3486 +55162300,3519 +55129990,3485 +55101190,3472 +55151990,3507 +55021090,3429 +55161490,3516 +55093100,3458 +55011000,3424 +55133100,3490 +55133900,3491 +55121920,3479 +55031100,3430 +55062000,3447 +55069090,3449 +55094110,3460 +55151390,3505 +55095900,3467 +55159990,3511 +55132100,3487 +55161200,3514 +55070090,3451 +55091200,3455 +55091100,3454 +55109010,3474 +55142200,3495 +55129110,3483 +55094190,3462 +55121110,3476 +55151930,3506 +55099900,3470 +55082000,3453 +55152190,3508 +55121120,3477 +55096100,3468 +55095200,3465 +55041010,3439 +55070020,3450 +55152990,3509 +55122910,3481 +55061000,3446 +55094120,3461 +55163200,3522 +55051090,3444 +55143019,3498 +55161300,3515 +55134100,3492 +55164300,3526 +55151230,3502 +55161120,3513 +55162120,3517 +55151330,3504 +55169200,3528 +55096900,3469 +55012000,3425 +55151140,3500 +55020010,3427 +55033090,3435 +55041019,3440 +55063000,3448 +55161110,3512 +55163110,3521 +55094290,3463 +55021010,3428 +55095300,3466 +55164400,3527 +55164200,3525 +55142300,3496 +55129910,3484 +55159190,3510 +55163300,3523 +55101210,3473 +55031910,3432 +55141110,3493 +55164110,3524 +56013000,3534 +56081110,3569 +56012110,3530 +56022910,3537 +56075010,3561 +56089090,3572 +56075040,3564 +56050010,3555 +56081190,3570 +56039390,3549 +56081900,3571 +56079020,3567 +56090010,3573 +56022100,3536 +56039410,3551 +56075030,3563 +56075020,3562 +56029010,3539 +56072900,3559 +56079010,3566 +56031190,3542 +56072100,3558 +56090030,3574 +57032010,3606 +57033010,3613 +57024210,3591 +57049090,3624 +57011090,3578 +57024110,3589 +57029290,3601 +57023210,3586 +57023190,3585 +57023110,3582 +57022090,3581 +57031020,3604 +57050039,3631 +57031090,3605 +57029990,3602 +57050019,3625 +57029210,3599 +57032100,3609 +57050049,3634 +57011000,3576 +57011010,3577 +57033100,3616 +57031010,3603 +57021000,3580 +57050029,3629 +57032920,3611 +57024190,3590 +57023910,3588 +57023290,3587 +57024990,3595 +57033920,3618 +57050021,3626 +57050023,3628 +57039010,3620 +57041000,3622 +57032910,3610 +57050042,3633 +57033910,3617 +57049010,3623 +57024910,3594 +57023140,3584 +57050031,3630 +57025029,3597 +57029220,3600 +57025039,3598 +57025021,3596 +57050022,3627 +57050041,3632 +57024230,3592 +57023130,3583 +58081010,3675 +58013100,3642 +58013200,3643 +58013790,3651 +58110020,3690 +58110090,3691 +58013300,3644 +58012300,3639 +58079010,3673 +58041010,3654 +58063920,3666 +58109100,3684 +58043000,3659 +58089020,3678 +58013719,3649 +58012290,3638 +58064000,3669 +58012790,3641 +58063910,3665 +58030099,3653 +58012210,3637 +58090090,3682 +58013711,3648 +58019090,3652 +58089040,3680 +58011000,3636 +58063930,3667 +58013710,3647 +58050090,3660 +58012720,3640 +58101000,3683 +58089030,3679 +58013610,3645 +58110010,3689 +58109990,3688 +59022010,3699 +59080090,3723 +59113250,3742 +59100050,3729 +59114000,3744 +59113230,3741 +59080010,3722 +59069920,3716 +59070011,3718 +59100010,3727 +59039020,3707 +59011090,3694 +59113110,3734 +59100060,3730 +59041000,3709 +59069910,3715 +59049090,3710 +59050010,3711 +59090010,3724 +59090020,3725 +59011020,3693 +59019010,3695 +59113220,3740 +59070019,3720 +59100030,3728 +59113130,3736 +59069190,3714 +59113150,3737 +59029090,3701 +59011010,3692 +59113120,3735 +60063100,3783 +60011020,3749 +60032000,3759 +60064400,3790 +60062100,3779 +60019990,3756 +60053800,3773 +60053790,3772 +60062400,3782 +60011090,3750 +60054200,3775 +60054400,3776 +60062300,3781 +60033000,3760 +60053300,3767 +60064300,3789 +60053500,3769 +60012900,3753 +60019100,3754 +60061000,3778 +60052200,3764 +60039000,3761 +60011010,3748 +60053400,3768 +60064100,3787 +60012100,3751 +61044100,3829 +61153000,3932 +61124990,3919 +61079990,3870 +61171030,3946 +61122040,3911 +61044910,3833 +61045990,3840 +61101900,3898 +61046910,3844 +61071220,3862 +61122030,3910 +61082910,3878 +61031020,3801 +61159400,3933 +61069010,3857 +61171020,3945 +61152930,3930 +61069020,3858 +61079190,3869 +61089220,3886 +61169200,3940 +61045910,3838 +61081110,3871 +61081990,3874 +61099040,3892 +61019090,3795 +61059010,3852 +61046100,3841 +61072990,3867 +61052020,3851 +61178030,3951 +61089990,3887 +61099030,3891 +61081920,3873 +61178010,3949 +61043920,3827 +61089100,3884 +61042200,3820 +61021000,3796 +61045920,3839 +61071990,3863 +61034920,3815 +61169100,3939 +61032300,3805 +61041300,3817 +61042990,3823 +61043100,3824 +61081120,3872 +61072210,3865 +61169910,3942 +61123990,3916 +61171010,3944 +61159910,3936 +61178020,3950 +61119020,3905 +61122050,3912 +61121990,3909 +61032990,3806 +61152910,3928 +61029090,3800 +61033100,3807 +61045100,3835 +61041990,3819 +61124920,3918 +61034100,3812 +61031090,3803 +61122090,3913 +61079110,3868 +61152920,3929 +61033920,3810 +61041920,3818 +61123920,3915 +61042920,3822 +61072220,3866 +62041200,4016 +62059010,4071 +62045910,4053 +62089190,4100 +62102010,4113 +62044911,4044 +62102020,4114 +62171070,4177 +62043100,4026 +62046919,4064 +62011990,3960 +62089990,4104 +62141020,4147 +62103090,4118 +62046190,4058 +62044110,4035 +62142010,4149 +62011310,3958 +62041919,4018 +62171040,4175 +62159010,4168 +62079990,4090 +62112000,4124 +62031910,3995 +62141010,4146 +62082100,4094 +62046911,4063 +62089920,4103 +62114210,4129 +62103010,4117 +62149050,4160 +62160010,4170 +62149040,4159 +62021990,3977 +62011100,3955 +62132000,4143 +62079190,4089 +62171060,4176 +62031990,3996 +62021190,3973 +62032900,3999 +62081910,4092 +62149010,4158 +62011290,3957 +62041100,4015 +62029190,3987 +62031200,3994 +62029990,3992 +62171030,4174 +62011210,3956 +62113900,4127 +62019100,3968 +62042290,4021 +62102090,4116 +62042990,4025 +62081100,4091 +62029110,3986 +62033910,4005 +62123000,4140 +62032300,3998 +62179010,4179 +62089210,4101 +62032200,3997 +62072200,4087 +62042210,4020 +62041300,4017 +62114910,4134 +62142030,4152 +62071910,4083 +62043919,4032 +62079110,4088 +62071990,4084 +62104010,4119 +62045100,4048 +62089110,4099 +62149060,4161 +62099010,4109 +62042919,4024 +62102030,4115 +62139010,4144 +62034910,4013 +62082910,4097 +62041990,4019 +62052010,4067 +62043190,4027 +62063010,4078 +62033919,4006 +62046210,4060 +62142029,4151 +62034210,4010 +62059019,4072 +62114929,4135 +62092010,4106 +62082190,4095 +62151010,4165 +62113990,4128 +62032990,4000 +62019090,3967 +62024010,3982 +62022010,3978 +62014010,3965 +62029090,3985 +62013090,3964 +62053090,4070 +62045999,4056 +62143090,4155 +62043999,4034 +62043290,4029 +62114299,4131 +62045290,4050 +62012090,3962 +62042390,4023 +62023090,3981 +62023010,3980 +62012010,3961 +62022090,3979 +62013010,3963 +62045919,4054 +62101090,4112 +62149099,4163 +62144090,4157 +63071010,4251 +63023100,4191 +63029190,4201 +63049190,4214 +63021010,4186 +63049300,4229 +63031200,4204 +63090000,4264 +63022190,4188 +63109010,4267 +63080000,4263 +63053200,4239 +63024090,4195 +63079020,4259 +63071020,4252 +63029300,4202 +63025900,4198 +63049229,4217 +63023900,4193 +63079011,4256 +63069090,4250 +63021090,4187 +63049281,4225 +63053300,4240 +63031900,4205 +63071030,4253 +63061990,4245 +63062200,4246 +63061200,4243 +63079091,4261 +63022200,4189 +63101090,4266 +63049991,4231 +63049219,4215 +63023200,4192 +63039100,4206 +63049910,4230 +63049291,4227 +63041940,4212 +63051060,4236 +63051010,4233 +63063000,4248 +63025300,4197 +63109090,4269 +63011000,4181 +63064000,4249 +63051090,4237 +63012000,4182 +63049231,4218 +63025190,4196 +63061910,4244 +63022900,4190 +63026010,4199 +63024030,4194 +63051030,4234 +63041100,4209 +63079013,4257 +63049241,4220 +63049221,4216 +64051000,4322 +64039120,4310 +64041920,4319 +64035112,4301 +64021210,4276 +64061010,4325 +64035111,4300 +64032019,4293 +64035190,4304 +64035930,4307 +64032013,4292 +64019910,4274 +64031910,4287 +64011010,4270 +64041120,4316 +64032012,4291 +64069030,4331 +64032022,4294 +64031920,4288 +64019290,4273 +64035113,4302 +64032029,4296 +64011090,4271 +64069040,4332 +64035119,4303 +64032040,4297 +64069050,4333 +64061020,4326 +64031200,4286 +64029110,4282 +64021910,4278 +64019210,4272 +64032023,4295 +64069020,4330 +64069010,4329 +65020020,4338 +65010010,4335 +65020010,4337 +66039010,4353 +66019100,4349 +67041990,4362 +67010090,4355 +67030010,4360 +67041100,4361 +67029010,4358 +68151020,4438 +68132010,4426 +68101110,4408 +68022900,4373 +68022310,4371 +68022110,4368 +68159910,4446 +68149040,4435 +68029300,4376 +68079010,4402 +68071010,4400 +68118100,4416 +68021000,4367 +68141020,4431 +68030000,4378 +68159920,4447 +68114090,4415 +68118990,4418 +68152000,4444 +68101190,4409 +68109910,4413 +68141030,4432 +68129300,4421 +68129290,4420 +68129921,4423 +68101910,4410 +68010000,4366 +68141010,4430 +68132090,4427 +68129100,4419 +68029200,4375 +68149010,4434 +68129919,4422 +68022120,4369 +68151300,4442 +68151100,4440 +68151200,4441 +69022030,4459 +69021030,4454 +69022020,4458 +69091100,4496 +69059000,4479 +69119010,4507 +69081020,4491 +69081010,4490 +69031010,4467 +69010030,4450 +69073010,4486 +69089020,4494 +69010090,4451 +69022010,4457 +69091910,4498 +69032010,4469 +69049000,4477 +69039030,4473 +69029010,4463 +69022050,4461 +69039040,4474 +69039010,4471 +69021020,4453 +69060000,4480 +69010010,4449 +69089010,4493 +69022040,4460 +69029020,4464 +69039020,4472 +69041000,4476 +69031000,4466 +70072110,4544 +70195200,4601 +70134100,4566 +70031990,4528 +70133300,4564 +70111020,4558 +70052910,4539 +70140010,4571 +70052110,4537 +70010090,4519 +70080090,4549 +70049091,4533 +70080010,4547 +70151090,4575 +70189010,4589 +70195100,4600 +70031210,4526 +70021000,4520 +70049019,4532 +70111010,4557 +70033090,4529 +70132200,4562 +70151020,4574 +70181010,4585 +70179020,4583 +70031290,4527 +70010010,4518 +70200011,4614 +70042099,4531 +70200021,4616 +70080020,4548 +70022010,4521 +70198000,4610 +70196300,4605 +70197100,4609 +70196200,4604 +70196900,4608 +70196100,4603 +70196400,4606 +70191500,4594 +70191400,4593 +70196600,4607 +71012100,4621 +71131110,4696 +71031012,4630 +71049000,4672 +71022120,4625 +71031011,4629 +71171920,4718 +71181000,4722 +71031022,4632 +71141110,4708 +71131960,4705 +71039930,4656 +71031019,4631 +71031023,4633 +71142010,4710 +71023990,4628 +71011020,4620 +71161000,4714 +71021000,4623 +71102100,4690 +71090000,4686 +71104900,4694 +71101110,4687 +71022110,4624 +71104100,4693 +71061000,4679 +71039920,4653 +71081300,4685 +71069210,4681 +71159010,4712 +71059000,4678 +71102900,4691 +71101120,4688 +71189000,4723 +71132000,4707 +71039910,4650 +71039940,4659 +71131920,4701 +71011010,4619 +71131950,4704 +71070000,4683 +71103100,4692 +71110000,4695 +71039929,4655 +71039949,4662 +71031051,4640 +71042090,4669 +71031069,4642 +71039951,4663 +71039921,4654 +71031039,4636 +71031049,4639 +71039952,4664 +71039942,4660 +71031041,4637 +71039931,4657 +71039939,4658 +71031042,4638 +71031031,4635 +71031071,4643 +71039919,4652 +71031059,4641 +71039944,4661 +71039959,4665 +71039911,4651 +71031072,4644 +71042900,4671 +72201290,4941 +72299021,5050 +72202029,4945 +72285010,5035 +72021900,4727 +72251990,4985 +72121090,4847 +72083610,4778 +72071290,4765 +72051019,4753 +72085190,4793 +72165000,4878 +72259100,4995 +72099000,4815 +72083930,4788 +72029300,4737 +72091530,4800 +72083890,4787 +72192390,4917 +72101210,4818 +72091590,4801 +72083690,4779 +72221919,4957 +72151000,4866 +72139990,4859 +72112350,4840 +72192212,4914 +72071990,4768 +72241000,4976 +72192111,4907 +72022900,4729 +72224010,4970 +72249010,4977 +72123090,4849 +72089000,4799 +72192112,4908 +72119090,4845 +72193320,4927 +72210019,4951 +72172020,4889 +72299011,5048 +72083990,4790 +72139120,4856 +72131090,4854 +72155010,4867 +72192299,4916 +72269910,5014 +72251910,4983 +72193290,4925 +72193310,4926 +72192490,4919 +72255030,4994 +72199011,4935 +72021100,4726 +72171030,4887 +72173030,4893 +72254013,4989 +72083790,4783 +72169990,4884 +72254011,4987 +72169100,4881 +72091720,4805 +72083710,4780 +72199012,4936 +72173020,4892 +72131010,4853 +72149190,4864 +72222092,4963 +72082690,4774 +72122090,4848 +72251920,4984 +72015090,4725 +72111940,4838 +72279030,5023 +72029922,4740 +72193510,4932 +72141090,4860 +72164000,4877 +72193420,4930 +72132090,4855 +72261990,5003 +72139190,4857 +72223011,4965 +72181000,4898 +72262011,5004 +72051090,4756 +72085290,4796 +72029931,4741 +72262012,5005 +72071910,4766 +72091820,4808 +72192219,4915 +72085120,4792 +72159020,4870 +72092620,4811 +72193190,4922 +72254030,4992 +72024100,4731 +72091890,4809 +72092690,4812 +72142090,4861 +72193410,4929 +72299040,5053 +72249091,4980 +72162100,4873 +72189990,4901 +72269230,5013 +72201229,4940 +72029100,4735 +72201190,4939 +72288010,5045 +72221191,4955 +72085210,4794 +72192122,4910 +72111440,4834 +72262022,5007 +72166100,4879 +72269952,5016 +72091620,4802 +72269110,5008 +72287011,5042 +72261100,5001 +72221199,4956 +72222091,4962 +72279010,5021 +72292000,5047 +72282000,5028 +72299016,5049 +72029990,4743 +72249020,4978 +72091630,4803 +72071190,4764 +72189100,4899 +72082630,4772 +72111990,4839 +72222012,4960 +72027000,4734 +72163200,4875 +72269210,5011 +72179092,4895 +72193121,4921 +72179093,4896 +72161000,4872 +72169930,4883 +72299059,5054 +72287012,5043 +72254020,4991 +72171020,4886 +72112390,4841 +72299034,5052 +72042920,4747 +72191400,4906 +72101190,4817 +72249040,4979 +72287022,5044 +72082590,4771 +72149110,4863 +72192121,4909 +72191111,4902 +72031000,4744 +72262021,5006 +72159010,4869 +72082730,4775 +72269120,5009 +72121010,4846 +72083830,4785 +72283022,5031 +72109010,4831 +72085390,4797 +72101110,4816 +72279020,5022 +72269220,5012 +72103010,4820 +72029911,4738 +72179019,4894 +72269930,5015 +72083730,4781 +72061090,4761 +72104100,4823 +72193210,4923 +72202021,4943 +72119011,4844 +72071920,4767 +72172030,4890 +72029932,4742 +72111910,4837 +72023000,4730 +72139920,4858 +72111490,4836 +72082740,4776 +72043000,4749 +72029921,4739 +72192211,4913 +72193111,4920 +72011000,4724 +72143000,4862 +72209029,4948 +72082510,4770 +72085430,4798 +72111450,4835 +72269953,5017 +72029200,4736 +72202010,4942 +72083840,4786 +72193520,4933 +72223091,4968 +72286092,5039 +72189910,4900 +72071120,4763 +72085230,4795 +72092790,4813 +72111410,4833 +72202022,4944 +72210012,4950 +72083940,4789 +72192141,4911 +72272000,5020 +72223012,4966 +72069099,4762 +72051029,4755 +72192429,4918 +72271000,5019 +72169910,4882 +72193220,4924 +72083740,4782 +72092520,4810 +72061010,4760 +72092820,4814 +72261920,5002 +72083810,4784 +72091730,4806 +72082640,4773 +72104910,4825 +72259210,4997 +72259990,5000 +72104990,4826 +72259290,4998 +72103099,4822 +73239490,5205 +73209020,5187 +73239190,5200 +73239910,5206 +73045910,5082 +73141410,5137 +73141990,5140 +73201012,5182 +73041990,5067 +73044900,5079 +73160090,5158 +73059099,5086 +73089010,5120 +73063090,5094 +73041110,5064 +73221900,5196 +73201020,5184 +73239310,5202 +73043939,5077 +73061921,5089 +73141490,5138 +73043121,5072 +73021090,5060 +73231000,5198 +73045110,5080 +73102190,5127 +73121020,5133 +73239110,5199 +73259910,5214 +73194010,5178 +73041190,5065 +73170019,5160 +73144990,5147 +73042990,5070 +73181200,5166 +73211190,5191 +73043119,5071 +73269010,5224 +73160010,5157 +73181300,5167 +73045930,5084 +73209010,5186 +73110090,5131 +73151210,5150 +73239420,5204 +73041910,5066 +73012090,5058 +73043139,5074 +73261100,5219 +73090090,5125 +73151900,5152 +73061100,5087 +73079210,5111 +73211990,5193 +73141910,5139 +73144110,5143 +73079110,5109 +73121010,5132 +73029090,5062 +73044100,5078 +73063010,5093 +73082019,5117 +73269070,5230 +73089050,5121 +73144210,5145 +73170030,5161 +73259930,5216 +73239200,5201 +73061919,5088 +73101090,5126 +73269091,5232 +73170091,5162 +73262010,5222 +73242100,5210 +73142090,5141 +73043919,5075 +73158100,5153 +73066100,5097 +73239920,5207 +73043129,5073 +73090010,5123 +73211110,5189 +73062100,5091 +73045920,5083 +73069019,5099 +73030090,5063 +73218990,5194 +73061929,5090 +73229090,5197 +73144190,5144 +73023000,5061 +73269040,5227 +73062919,5092 +73158200,5154 +73211290,5192 +73043929,5076 +73211120,5190 +73269020,5225 +73042390,5068 +73071110,5101 +73045130,5081 +73090040,5124 +73021010,5059 +73121030,5134 +73143900,5142 +73251000,5213 +73144290,5146 +73259920,5215 +73066900,5098 +73170013,5159 +73102910,5128 +73145000,5148 +73269030,5226 +73102920,5129 +73042400,5069 +73269050,5228 +73011000,5057 +73081000,5116 +73071120,5102 +73259993,5217 +74191029,5311 +74199940,5319 +74040029,5247 +74093900,5278 +74199100,5315 +74072910,5259 +74020090,5235 +74181022,5304 +74101200,5282 +74032100,5239 +74072110,5256 +74094000,5279 +74062000,5250 +74032290,5241 +74081910,5264 +74050000,5248 +74081920,5265 +74181010,5302 +74031900,5238 +74082210,5269 +74020010,5234 +74199910,5316 +74071059,5254 +74122011,5290 +74181090,5307 +74112200,5287 +74040019,5244 +74082910,5271 +74191010,5310 +74071010,5251 +74199920,5317 +74031300,5237 +74082110,5267 +74031100,5236 +74072929,5261 +74181023,5305 +74032210,5240 +74072921,5260 +74198030,5313 +74192000,5312 +74040025,5246 +75051210,5328 +75089030,5340 +75030010,5324 +75051120,5327 +75022090,5323 +75030090,5325 +75022030,5322 +75071100,5334 +75052100,5330 +75081000,5337 +75089020,5339 +75089010,5338 +76071110,5371 +76012090,5346 +76071910,5373 +76032000,5350 +76149000,5401 +76130019,5397 +76151011,5402 +76121030,5391 +76041020,5352 +76110000,5389 +76012020,5345 +76061110,5364 +76169920,5413 +76042920,5357 +76031090,5349 +76052100,5362 +76129010,5393 +76051100,5360 +76152010,5407 +76041010,5351 +76031010,5348 +76121010,5390 +76071995,5377 +76169930,5414 +76011010,5342 +76152020,5408 +76072010,5379 +76129030,5395 +76129020,5394 +76130029,5398 +76069110,5367 +76051999,5361 +76011090,5343 +76141000,5400 +76071993,5375 +78019920,5418 +78019100,5417 +78041990,5424 +78041120,5423 +78020090,5421 +78041110,5422 +78060010,5425 +79011200,5428 +79012010,5429 +79031000,5432 +79050030,5438 +79039000,5433 +79040030,5436 +79012090,5430 +79040019,5434 +79050040,5439 +79050020,5437 +79040029,5435 +80011090,5442 +80030010,5444 +80012000,5443 +80030040,5446 +80030020,5445 +81041100,5461 +81029600,5456 +81083000,5477 +81101000,5482 +81043010,5463 +81052010,5467 +81072000,5473 +81052020,5468 +81099000,5480 +81082000,5476 +81049010,5465 +81043020,5464 +81122100,5487 +81049090,5466 +81122900,5488 +81109000,5483 +81021000,5453 +81011000,5448 +81029400,5454 +81039000,5459 +81039900,5460 +81130010,5490 +81130030,5492 +81041900,5462 +81130020,5491 +81032090,5458 +81079090,5475 +81099900,5481 +81060010,5471 +81060090,5472 +81079010,5474 +81110090,5486 +81110030,5485 +82014000,5496 +82129000,5580 +82089040,5563 +82149010,5586 +82013000,5495 +82111000,5568 +82059010,5534 +82119500,5574 +82015000,5497 +82059020,5535 +82011000,5494 +82021010,5500 +82056000,5532 +82016000,5498 +82151000,5588 +82122020,5579 +82122011,5577 +82159100,5590 +82119310,5571 +82029120,5508 +82021020,5501 +82089030,5562 +82055940,5530 +82121010,5575 +82089020,5561 +82059030,5536 +83100010,5647 +83059010,5617 +83021020,5601 +83099010,5643 +83089020,5636 +83113010,5651 +83051000,5615 +83015000,5597 +83089031,5637 +83062110,5621 +83062910,5624 +83024120,5607 +83091000,5642 +84322910,5993 +84512100,6184 +84678910,6400 +84731000,6450 +84328020,5999 +84632000,6359 +84572090,6243 +84484940,6166 +84261100,5924 +84825022,6563 +84389010,6047 +84595190,6273 +84253100,5919 +84594090,6267 +84818041,6536 +84131191,5728 +84716029,6428 +84433930,6093 +84623100,6335 +84138110,5752 +84861000,6592 +84433220,6087 +84439920,6098 +84862000,6593 +84604019,6299 +84774000,6487 +84641010,6367 +84485120,6170 +84829112,6567 +84134000,5737 +84621020,6324 +84453090,6120 +84829111,6566 +84131110,5727 +84461019,6125 +84639010,6364 +84717010,6433 +84614019,6309 +84629919,6352 +84184010,5820 +84109000,5705 +84452013,6116 +84463011,6129 +84049000,5663 +84431949,6083 +84331110,6003 +84152010,5796 +84615021,6319 +84601100,6283 +84334000,6008 +84359000,6020 +84137091,5746 +84264900,5930 +84412000,6060 +84128020,5720 +84462990,6128 +84305090,5973 +84589100,6249 +84717060,6438 +84633020,6361 +84651000,6371 +84433920,6092 +84593910,6264 +84614025,6312 +84829113,6568 +84414000,6062 +84461090,6126 +84331190,6004 +84864000,6595 +84138130,5754 +84452090,6118 +84798910,6506 +84781090,6493 +84171000,5809 +84303900,5966 +84342000,6016 +84769010,6482 +84592100,6256 +84543020,6223 +84614024,6311 +84471120,6134 +84232000,5896 +84716010,6426 +84079010,5677 +84612011,6303 +84321090,5992 +84729040,6447 +84431100,6072 +84282020,5941 +84622100,6328 +84198940,5854 +84615011,6315 +84569010,6239 +84425031,6069 +84514029,6190 +84261900,5926 +84439910,6097 +84283200,5943 +84481110,6145 +84594010,6266 +84573090,6245 +84138200,5756 +84081093,5680 +84201000,5861 +84189100,5829 +84521029,6203 +84513010,6186 +84624910,6342 +84589920,6251 +84073490,5676 +84431910,6079 +84431930,6081 +84748020,6472 +84678920,6401 +84183090,5819 +84461011,6124 +84595120,6271 +84021200,5654 +84144010,5768 +84211910,5866 +84039000,5660 +84388030,6044 +84198910,5851 +84592930,6259 +84362100,6022 +84581919,6247 +84825021,6562 +84597010,6281 +84659600,6378 +84589932,6252 +84621013,6322 +84551000,6226 +84798960,6511 +84304130,5969 +84483910,6158 +84702900,6413 +84431200,6073 +84071000,5669 +84286000,5947 +84484220,6162 +84186920,5825 +84511090,6183 +84262000,5927 +84158110,5798 +84514019,6188 +84624100,6340 +84614011,6308 +84099193,5692 +84621030,6325 +84518029,6196 +84269990,5932 +84148020,5784 +84589959,6253 +84112200,5709 +84341000,6015 +84392000,6050 +84431941,6082 +84716025,6427 +84518019,6193 +84336020,6013 +84029020,5657 +84451990,6114 +84595110,6270 +84624920,6343 +84748010,6471 +84196000,5847 +84703000,6414 +84471219,6136 +84451960,6113 +84139140,5760 +84454010,6121 +84331990,6005 +84514099,6191 +84595130,6272 +84042000,5662 +84293000,5956 +84483340,6156 +84128019,5719 +84193200,5839 +84193100,5838 +84072900,5671 +84281020,5938 +84381020,6035 +84129010,5723 +84135029,5740 +84272000,5934 +84522120,6205 +84192010,5836 +84762900,6480 +84351000,6019 +84451110,6107 +84523010,6208 +84079090,5678 +84623910,6337 +84484920,6165 +84186930,5826 +84306100,5974 +84629960,6354 +84798950,6510 +84254100,5921 +84131910,5730 +84440090,6106 +84602100,6286 +84521019,6201 +84743110,6468 +84552110,6227 +84137095,5749 +84431700,6078 +84511010,6182 +84431600,6077 +84111100,5706 +84248920,5912 +84384000,6039 +84128030,5721 +84323000,5995 +84302000,5963 +84501200,6177 +84631090,6358 +84121000,5713 +84195010,5843 +84186950,5827 +84332000,6006 +84592940,6260 +84521011,6199 +84269100,5931 +84595920,6275 +84614023,6310 +84622920,6333 +84768990,6481 +84592950,6261 +84251120,5916 +84484210,6161 +84602930,6292 +84081010,5679 +84073290,5672 +84602910,6290 +84742020,6466 +84191190,5832 +84211200,5865 +84614026,6313 +84194020,5841 +84593100,6263 +84336010,6012 +84314940,5990 +84596910,6279 +84705020,6416 +84623920,6338 +84479010,6142 +84775100,6488 +84562000,6235 +84181010,5814 +84261200,5925 +84631020,6357 +84521012,6200 +84291920,5954 +84451300,6111 +84472020,6139 +84303120,5964 +84483230,6152 +84440010,6105 +84163000,5807 +84198980,5857 +84198930,5853 +84595910,6274 +84388020,6043 +84729020,6445 +84733040,6456 +84592910,6257 +84401010,6055 +84453019,6119 +84068200,5667 +84518021,6194 +84451190,6108 +84709020,6418 +84521021,6202 +84659910,6379 +84138120,5753 +84144020,5769 +84798992,6513 +84451210,6109 +84592920,6258 +84462190,6127 +84514021,6189 +84137094,5748 +84709010,6417 +84742010,6465 +84629110,6349 +84613090,6307 +84751000,6476 +84198960,5855 +84615012,6316 +84793000,6498 +84798930,6508 +84602920,6291 +84629911,6351 +84615013,6317 +84490010,6174 +84089010,5682 +84829114,6569 +84301010,5962 +84451290,6110 +84383010,6037 +84388010,6042 +84211100,5864 +84051010,5664 +84631010,6356 +84263000,5928 +84129020,5724 +84335200,6010 +84211950,5870 +84303190,5965 +84304110,5967 +84483330,6155 +84431920,6080 +84452011,6115 +84431500,6076 +84723000,6443 +84073310,5673 +84573010,6244 +84112100,5708 +84591000,6255 +84541000,6219 +84211991,5872 +84629920,6353 +84552120,6228 +84595930,6276 +84198970,5856 +84291120,5953 +84483920,6159 +84612020,6305 +84211940,5869 +84604011,6298 +84633010,6360 +84602940,6293 +84283100,5942 +84431400,6075 +84031000,5659 +84294020,5958 +84118100,5710 +84391000,6049 +84603100,6295 +84741010,6463 +84542010,6220 +84198920,5852 +84388040,6045 +84633030,6362 +84613010,6306 +84639030,6365 +84451910,6112 +84471220,6137 +84211930,5868 +84289010,5949 +84041000,5661 +84433960,6094 +84335100,6009 +84748030,6473 +84743120,6469 +84022000,5656 +84518022,6195 +84211960,5871 +84195030,5845 +84294010,5957 +84073320,5674 +84633040,6363 +84463012,6130 +84289020,5950 +84589910,6250 +84752100,6477 +84178010,5811 +84305010,5972 +84604020,6300 +84328010,5998 +84702100,6412 +84135021,5739 +84472030,6140 +84485130,6171 +84483220,6151 +84863000,6594 +84291110,5952 +84238110,5898 +84333000,6007 +84714120,6422 +84137093,5747 +84792010,6496 +84797100,6502 +84602400,6289 +84323100,5996 +84601200,6284 +84594190,6268 +84652000,6372 +84323900,5997 +84594990,6269 +84561200,6234 +84602300,6288 +84602200,6287 +84565000,6238 +84145150,5775 +84626900,6347 +84626100,6345 +84191200,5833 +84623900,6336 +84624900,6341 +84622300,6329 +84859000,6591 +84621900,6327 +84213200,5881 +84626200,6346 +84622600,6330 +84621100,6326 +84287000,5948 +85352190,6970 +85064000,6674 +85399010,7038 +85255050,6859 +85287218,6907 +85234930,6836 +85441930,7103 +85462019,7136 +85437094,7095 +85432020,7077 +85021200,6636 +85299010,6919 +85392910,7021 +85407900,7045 +85284100,6890 +85447010,7127 +85158010,6742 +85238030,6848 +85437042,7087 +85234190,6833 +85481010,7150 +85351010,6962 +85198100,6811 +85022090,6639 +85479010,7146 +85461000,7135 +85016410,6630 +85221000,6823 +85232100,6825 +85176960,6784 +85271300,6881 +85444291,7112 +85041020,6649 +85286100,6896 +85359010,6979 +85352990,6973 +85444293,7114 +85409900,7049 +85256099,6866 +85437092,7093 +85013220,6611 +85359040,6982 +85131020,6719 +85255090,6860 +85432010,7076 +85211019,6816 +85361030,6986 +85256092,6865 +85481020,7151 +85291021,6913 +85061000,6673 +85011013,6601 +85013120,6609 +85234110,6830 +85094010,6693 +85437091,7092 +85444991,7119 +85021100,6635 +85392110,7017 +85024000,6643 +85279919,6888 +85015310,6624 +85402000,7042 +85287213,6902 +85459020,7133 +85066000,6676 +85408100,7046 +85406000,7043 +85022010,6638 +85451100,7129 +85016200,6628 +85299020,6920 +85256019,6864 +85255010,6856 +85351040,6965 +85471010,7142 +85359030,6981 +85287310,6909 +85015120,6619 +85437011,7081 +85171810,6769 +85444910,7116 +85437071,7091 +85131030,6720 +85041010,6648 +85198910,6812 +85287214,6903 +85393910,7031 +85353010,6974 +85234160,6832 +85392200,7020 +85392920,7022 +85167910,6756 +85369020,7008 +85401190,7041 +85131040,6721 +85459010,7132 +85432030,7078 +85287216,6905 +85023100,6640 +85232920,6827 +85176240,6775 +85441910,7101 +85176920,6781 +85437095,7096 +85013113,6607 +85352119,6968 +85269110,6875 +85332121,6945 +85437062,7089 +85162100,6746 +85176220,6773 +85013310,6612 +85234950,6838 +85013410,6614 +85211029,6817 +85016300,6629 +85152120,6735 +85431090,7075 +85015330,6625 +85393220,7029 +85269130,6877 +85334020,6957 +85256012,6863 +85198920,6813 +85013320,6613 +85271200,6880 +85074000,6683 +85142000,6726 +85023910,6641 +85042200,6652 +85351020,6963 +85113010,6704 +85352919,6971 +85016480,6632 +85352111,6967 +85393210,7028 +85352129,6969 +85462024,7137 +85016420,6631 +85291091,6916 +85255020,6857 +85279200,6886 +85112010,6702 +85291011,6911 +85143010,6727 +85013420,6615 +85232910,6826 +85193000,6810 +85446030,7125 +85219010,6820 +85322910,6938 +85351030,6964 +85153920,6740 +85462029,7138 +85399020,7039 +85291092,6917 +85431010,7074 +85409100,7048 +85352929,6972 +85042310,6653 +85291022,6914 +85359020,6980 +85471040,7143 +85211092,6818 +85333120,6952 +85021340,6637 +85332921,6949 +85284200,6891 +85279900,6887 +85171219,6765 +85258300,6872 +85395200,7037 +85480000,7149 +85141900,6725 +85241900,6852 +85143900,6729 +85258100,6871 +85415100,7065 +85491100,7153 +85249200,6854 +85171400,6768 +85395100,7036 +85241200,6851 +85018000,6634 +85017100,6633 +85241100,6850 +85182210,6797 +85182110,6794 +85182910,6800 +86071930,7162 +86080010,7173 +86071920,7161 +86069900,7158 +86040000,7156 +86073010,7166 +86079920,7170 +86071910,7160 +86080040,7175 +86079930,7171 +86080030,7174 +86011000,7154 +86071100,7159 +86069210,7157 +86021000,7155 +87149590,7266 +87041090,7202 +87149390,7263 +87031010,7183 +87119091,7244 +87091100,7226 +87039010,7200 +87112029,7232 +87031090,7184 +87141010,7252 +87164000,7275 +87150020,7272 +87042190,7203 +87119099,7245 +87149920,7269 +87113010,7234 +87100000,7229 +87114010,7237 +87091900,7227 +87019090,7179 +87059000,7206 +87113090,7236 +87149320,7262 +87112019,7231 +87113020,7235 +87049090,7205 +87142010,7254 +87012090,7178 +87112099,7233 +87111090,7230 +87033191,7191 +87060029,7207 +87042290,7204 +87163900,7274 +87041010,7201 +87032199,7186 +87032299,7188 +87162000,7273 +87032191,7185 +87038040,7197 +87119090,7243 +87116010,7240 +87019300,7182 +87034030,7194 +87019200,7181 +87036030,7195 +87038090,7198 +87019100,7180 +87039000,7199 +88052100,7293 +88051030,7292 +88023000,7283 +88026000,7284 +88040020,7290 +88021200,7281 +88022000,7282 +88021100,7280 +88051010,7291 +88071000,7295 +88079000,7298 +88072000,7296 +89011090,7300 +89019000,7302 +89059090,7310 +89031000,7303 +89071000,7313 +89039990,7307 +89051000,7309 +89039910,7306 +89039900,7305 +89039100,7304 +89061000,7311 +89011010,7299 +89012000,7301 +90184100,7426 +90229010,7482 +90221300,7474 +90101000,7356 +90131090,7369 +90221410,7475 +90189092,7449 +90105000,7357 +90058010,7336 +90183910,7422 +90065990,7344 +90275030,7517 +90303340,7552 +90111000,7360 +90258010,7499 +90059010,7339 +90189032,7442 +90141000,7377 +90182000,7416 +90065390,7343 +90148010,7379 +90058020,7337 +90278020,7520 +90292020,7541 +90158010,7387 +90014010,7319 +90275010,7515 +90172010,7396 +90089000,7355 +90308910,7559 +90173022,7402 +90153010,7384 +90049010,7332 +90292030,7542 +90189098,7455 +90151000,7382 +90283010,7534 +90154000,7386 +90229040,7485 +90189033,7443 +90314100,7565 +90172030,7398 +90112000,7361 +90229020,7483 +90121010,7364 +90303350,7553 +90189097,7454 +90181920,7414 +90085030,7353 +90221420,7476 +90185030,7430 +90189043,7446 +90185020,7429 +90258020,7500 +90278040,7522 +90172020,7397 +90251110,7494 +90131010,7367 +90291010,7538 +90303320,7550 +90171000,7395 +90173023,7403 +90183230,7420 +90064000,7342 +90085040,7354 +90131020,7368 +90229030,7484 +90139000,7374 +90278910,7525 +90278100,7524 +90278920,7526 +90278930,7527 +91061000,7606 +91029120,7591 +91051100,7599 +91091090,7614 +91029910,7593 +91059100,7603 +91099000,7615 +91040000,7598 +91149030,7634 +91029190,7592 +91021200,7586 +91029920,7594 +91141020,7630 +91122000,7622 +91059910,7604 +91129000,7623 +91029110,7590 +91143020,7632 +91019190,7583 +91082000,7611 +91031000,7596 +91091010,7613 +91019110,7582 +92021000,7640 +92099100,7652 +92019000,7639 +92089000,7650 +92059010,7643 +92011000,7637 +92012000,7638 +92051000,7642 +92081000,7649 +92059020,7644 +93020000,7659 +93063000,7670 +93039000,7662 +93062900,7669 +93032000,7660 +93011090,7656 +93051000,7664 +93019000,7658 +93070000,7672 +93012000,7657 +94056010,7752 +94015100,7680 +94060019,7760 +94049091,7727 +94055010,7747 +94049011,7725 +94060091,7761 +94043010,7720 +94014000,7678 +94060092,7762 +94060011,7759 +94055051,7750 +94042920,7718 +94038200,7708 +94038300,7709 +94061090,7765 +94069020,7767 +94015300,7681 +94069010,7766 +94061020,7764 +94056900,7755 +94013100,7676 +94052100,7736 +94049000,7724 +94019100,7689 +94044040,7723 +94054100,7743 +94056100,7754 +94014900,7679 +94044010,7722 +95049010,7779 +95066100,7797 +95069970,7813 +95069960,7812 +95069910,7809 +95066930,7804 +95069110,7807 +95066230,7800 +95061200,7786 +95066220,7799 +95069940,7811 +95069980,7814 +95062100,7788 +95066920,7803 +95061100,7785 +95049020,7780 +95066910,7802 +95079010,7819 +95069920,7810 +95082900,7821 +95083000,7822 +97030010,7834 +97031090,7837 +97019900,7831 +97030020,7835 +97019100,7830 +97019091,7828 +97060000,7843 +97050090,7842 +97020000,7832 +97039090,7838 +97040090,7840 +97012100,7826 +97040010,7839 +97029000,7833 +97012900,7827 +97011010,7824 +97050010,7841 +98010014,7847 +98010012,7845 +98030000,7853 +98010015,7848 diff --git a/CTH_Description.csv b/CTH_Description.csv new file mode 100644 index 0000000000000000000000000000000000000000..1009ece4525001ddd49225a2261b7c23ea59cf41 --- /dev/null +++ b/CTH_Description.csv @@ -0,0 +1,16554 @@ +CTH Code,Concat Description +1008201,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Millet :Jawar (OLD tariff)" +1008202,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Millet :Bajra (OLD tariff)" +1008203,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Millet :Ragi (finger millet) (OLD tariff)" +1010000,live horses asses mules and hinnies horses +1012100,:Horses:Pure-bred bredding animals +1012900,live horses asses mules and hinnies horses >> other +1012910,:other:Horses for polo +1012990,:other:other +1013000,live horses asses mules and hinnies horses >> asses +1013010,:Asses:Pure-bred breeding animals +1013020,:Asses:livestock +1013090,:Asses:other +1019000,live horses asses mules and hinnies horses >> other +1019030,:Other :Mules and hinnies as livestock +1019090,:Other :Other +1020000,live bovine animals cattle +1022100,live bovine animals cattle >> breeding animals +1022110,LIVE BOVINE ANIMALS:Pure-bred breeding animals:Bulls +1022120,LIVE BOVINE ANIMALS:Pure-bred breeding animals:Cows +1022900,live bovine animals cattle >> other +1022910,LIVE BOVINE ANIMALS:Other:Bulls +1022990,LIVE BOVINE ANIMALS:Other:Other including calves +1023100,LIVE BOVINE ANIMALS:Buffalo:Pure-bred breeding animals +1023900,LIVE BOVINE ANIMALS::Others +1029000,live bovine animals cattle >> other +1029010,"LIVE BOVINE ANIMALS:Other :Bulls, adult" +1029090,LIVE BOVINE ANIMALS:Other :Other +1030000,live swine +1031000,LIVE SWINE::Pure-bred breeding animals +1039100,LIVE SWINE::Weighing less than 50 kg. +1039200,LIVE SWINE::Weighing 50 kg. or more +1040000,live sheep and goats +1041000,live sheep and goats >> sheep +1041010,LIVE SHEEP AND GOATS:Sheep:Sheep including lamb for breeding purpose +1041090,LIVE SHEEP AND GOATS:Sheep:Other +1042000,LIVE SHEEP AND GOATS::Goats +1050000,live poultry that is to say fowls of the species gallus domesticus ducks geese turkeys and guinea fowls weighing not more than 185 g +1051100,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Fowls of the species Gallus domesticus" +1051200,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Turkeys" +1051300,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Ducks" +1051400,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Geese" +1051500,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Guinea fowls" +1059400,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Fowls of the species Gallus domesticus" +1059900,"LIVE POULTRY, THAT IS TO SAY, FOWLS OF THE SPECIES GALLUS DOMESTICUS, DUCKS, GEESE, TURKEYS AND GUINEA FOWLS::Other" +1060000,other live animals mammals +1061100,OTHER LIVE ANIMALS::Primates +1061200,"OTHER LIVE ANIMALS::Whales, dolphins and porpoises (mammals of the order Cetacea); manatees and dugongs (mammals of the order Sirenia)" +1061300,OTHER LIVE ANIMALS::Camels and other camelids (Camelidae) +1061400,OTHER LIVE ANIMALS::Rabits and Hares +1061900,OTHER LIVE ANIMALS::Other +1062000,OTHER LIVE ANIMALS::Reptiles (including snakes and turtles) +1063100,OTHER LIVE ANIMALS::Birds of prey +1063200,"OTHER LIVE ANIMALS::Psittaciformes (including parrots, parakeets, macaws and cockatoos)" +1063300,OTHER LIVE ANIMALS::Ostriches; emus(Dromaius novaehollandiae) +1063900,OTHER LIVE ANIMALS::Other +1064100,other live animals mammals >> bees +1064110,OTHER LIVE ANIMALS:Bees:Pureline stock +1064190,OTHER LIVE ANIMALS:Bees:other +1064900,other live animals mammals >> other +1064910,OTHER LIVE ANIMALS:Other:Pureline Stock +1064990,OTHER LIVE ANIMALS:Other:Other +1069000,OTHER LIVE ANIMALS:Other:Other +1202101,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:In shell :H.P.S (OLD tariff)" +1202109,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:In shell :Other (OLD tariff)" +1211901,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Seeds (OLD tariff)" +1211902,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Leaves, powder, flowers and pods(OLD tariff)" +1211903,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Bark, husk and rind (OLD tariff)" +1211904,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Roots and rhizomes (OLD tariff)" +1211909,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Other (OLD tariff)" +1301901,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Natural gums (OLD tariff)" +1301902,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Resins (OLD tariff)" +1301904,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Oleoresins (OLD tariff)" +1301909,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Other (OLD tariff)" +1302191,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Extracts (OLD tariff)" +1404902,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Soap-nuts (OLD tariff) +1504109,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fish liver oils and their fractions:Other (OLD tariff)" +1508909,"GROUND-NUT OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other (OLD tariff)" +1510009,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:Other oils and their fractions obtained solely from olives, whether or not refined, but not chemically modified, including blends of these oils or fractions with oils or fractions of heading 1509:Other (OLD tariff)" +1515509,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Seasame oil and its fractions:Other(OLD tariff)" +1515909,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other(OLD tariff)" +1516201,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Cotton Seed oil (OLD tariff)" +1516202,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Groundnut oil (OLD tariff)" +1516203,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Hydrogenated castor oil (opal-wax) (OLD tariff)" +1516209,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Other(OLD tariff)" +1517102,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Margarine, excluding liquid margarine:Of vegetable origin (OLD tariff)" +1518001,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Linseed oil(OLD tariff)" +1518002,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Castor oil, dehydrated (OLD tariff)" +1518003,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Other Vegetable oil and its fats(OLD tariff)" +1521101,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Vegetable Waxes:Carnauba Waxes (OLD tariff)" +1702303,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, not containing fructose or containing in the dry state less than 20% by weight of fructose:Dextrose(OLD tariff)" +1702403,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, containing in the dry state at least 20% but less than 50% by weight of fructose, excluding invert sugar :Dextrose(OLD tariff)" +1905321,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Waffles and wafers :Communion wafers (OLD tariff)" +2008991,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Squash (OLD tariff)" +2008999,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Other (OLD tariff)" +2010000,meat of bovine animals fresh or chilled +2011000,"MEAT OF BOVINE ANIMALS, FRESH or CHILLED::Carcasses and half-carcasses" +2012000,"MEAT OF BOVINE ANIMALS, FRESH or CHILLED::Other cuts with bone in" +2013000,"MEAT OF BOVINE ANIMALS, FRESH or CHILLED::Boneless" +2020000,meat of bovine animals frozen +2021000,"MEAT OF BOVINE ANIMALS, FROZEN::Carcasses and half-carcasses" +2022000,"MEAT OF BOVINE ANIMALS, FROZEN::Other cuts with bone in" +2023000,"MEAT OF BOVINE ANIMALS, FROZEN::Boneless" +2030000,meat of swine fresh chilled or frozen fresh or chilled +2031100,"MEAT OF SWINE, FRESH, CHILLED OR FROZEN::Carcasses and half-carcasses" +2031200,"MEAT OF SWINE, FRESH, CHILLED OR FROZEN::Hams, shoulders and cuts thereof, with bone in" +2031900,"MEAT OF SWINE, FRESH, CHILLED OR FROZEN::Other" +2032100,"MEAT OF SWINE, FRESH, CHILLED OR FROZEN::Carcasses and half-carcasses" +2032200,"MEAT OF SWINE, FRESH, CHILLED OR FROZEN::Hams, shoulders and cuts thereof, with bone in" +2032900,"MEAT OF SWINE, FRESH, CHILLED OR FROZEN::Other" +2040000,meat of sheep or goats fresh chilled or frozen +2041000,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Carcasses and half-carcasses of lamb, fresh or chilled" +2042100,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Carcasses and half-carcasses" +2042200,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Other cuts with bone in" +2042300,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Boneless" +2043000,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Carcasses and half-carcasses of lamb, frozen" +2044100,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Carcasses and half-carcasses" +2044200,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Other cuts with bone in" +2044300,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Boneless" +2045000,"MEAT OF SHEEP OR GOATS, FRESH, CHILLED OR FROZEN::Meat of goats" +2050000,"::MEAT OF HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN" +2060000,edible offal of bovine animals swine sheep goats horses asses mules or hinnies fresh chilled or frozen +2061000,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Of bovine animals, fresh or chilled" +2062100,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Tongues" +2062200,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Livers" +2062900,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Other" +2063000,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Of swine, fresh or chilled" +2064100,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Livers" +2064900,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN::Other" +2068000,edible offal of bovine animals swine sheep goats horses asses mules or hinnies fresh chilled or frozen >> other fresh or chilled +2068010,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN:Other, fresh or chilled :Of sheep or goats" +2068090,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN:Other, fresh or chilled :Other" +2069000,edible offal of bovine animals swine sheep goats horses asses mules or hinnies fresh chilled or frozen >> other frozen +2069010,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN:Other, frozen :Of sheep or goats" +2069090,"EDIBLE OFFAL OF BOVINE ANIMALS, SWINE, SHEEP, GOATS, HORSES, ASSES, MULES OR HINNIES, FRESH, CHILLED OR FROZEN:Other, frozen :Other" +2070000,meat and edible offal of the poultry of heading 0105 fresh chilled or frozen of fowls of the species gallus domesticus +2071100,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, fresh or chilled" +2071200,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, frozen" +2071300,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Cuts and offal, fresh or chilled" +2071400,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Cuts and offal, frozen" +2072400,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, fresh or chilled" +2072500,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, frozen" +2072600,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Cuts and offal, fresh or chilled" +2072700,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Cuts and offal, frozen" +2074100,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, fresh or chilled" +2074200,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, frozen" +2074300,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Fatty livers, fresh or chilled" +2074400,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::other, fresh or chiled" +2074500,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Other, frozen" +2075100,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, fresh or chilled" +2075200,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Not cut in pieces, frozen" +2075300,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Fatty livers, fresh or chilled" +2075400,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Other, fresh or chilled" +2075500,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::Other, forzen" +2076000,"MEAT AND EDIBLE OFFAL, OF THE POULTRY OF HEADING 0105, FRESH, CHILLED OR FROZEN::of guinea fowls" +2080000,other meat and edible meat offal fresh chilled or frozen +2081000,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN::Of rabbits or hares" +2083000,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN::Of primates" +2084000,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN::Of whales, dolphines and porpoises (mammals of the order catacea); of manatees and dugongs (mammals of the order sirenia)" +2085000,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN::Of reptiles (including snakes and turtles)" +2086000,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN::of camels and other camelids (Camelidae)" +2089000,other meat and edible meat offal fresh chilled or frozen >> other +2089010,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN:Other :Of wild animals" +2089090,"OTHER MEAT AND EDIBLE MEAT OFFAL, FRESH, CHILLED OR FROZEN:Other :Other" +2090000,pig fat free of lean meat and poutry fat not rendered or otherwise extracted fresh chilled frozen salted in brine dried or smoked +2091000,"PIG FAT, FREE OF LEAN MEAT AND POUTRY FAT, NOT RENDERED OR OTHERWISE EXTRACTED, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DIRED OR SMOKED::Of pigs" +2099000,"PIG FAT, FREE OF LEAN MEAT AND POUTRY FAT, NOT RENDERED OR OTHERWISE EXTRACTED, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DIRED OR SMOKED::Other" +2100000,in meat and edible meat offal salted brine dried or smoked edible flours and meals of meat or meat offal meat of swine +2101100,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Hams, shoulders and cuts thereof, with bone in" +2101200,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Bellies (streaky) and cuts thereof" +2101900,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Other" +2102000,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Meat of bovine animals" +2106901,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Soft drink concentrates(OLD tariff) +2106909,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Other(OLD tariff) +2109100,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Of primates" +2109200,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Of whales, dolphins and porpoises (mammals of the order catecea); of manatees and dugongs (mammals of the order sirenia)" +2109300,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Of reptiles (including snakes and turtles)" +2109900,"MEAT AND EDIBLE MEAT OFFAL, SALTED, IN BRINE, DRIED OR SMOKED; EDIBLE FLOURS AND MEALS OF MEAT OR MEAT OFFAL::Other" +2207101,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF 80% VOL. OR HIGHER; ETHYL ALCOHOL AND OTHER SPIRITS, DENATURED, OF ANY STRENGTH:Undenatured ethyl alcohol of an alcoholic strength by volume of 80% vol. or higher:Rectified spirit(OLD tariff)" +2208201,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :In containers holding 2 l or less(OLD tariff)" +2208209,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Other(OLD tariff)" +2208301,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:in containers holding 2 l or less(OLD tariff)" +2208309,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Other(OLD tariff)" +2208401,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Rum and other spirits obtained by distilling fermented sugarcane products:in containers holding 2 l or less(OLD tariff)" +2208409,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Rum and other spirits obtained by distilling fermented sugarcane products:Other(OLD tariff)" +2208501,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :in containers holding 2 l or less(OLD tariff)" +2208509,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :Other(OLD tariff)" +2208701,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Liqueurs and cordials :In containers holding 2 l or less(OLD tariff)" +2208709,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Liqueurs and cordials :Other(OLD tariff)" +2208901,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :In containers holding 2 l or less(OLD tariff)" +2208909,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Other(OLD tariff)" +2301201,"FLOURS, MEALS AND PELLETS, OF MEAT OR MEAT OFFAL, OF FISH OR OF CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES, UNFIT FOR HUMAN CONSUMPTION; GREAVES:Flours, meals and pellets, of fish or of crustaceans, molluscs or other aquatic invertebrates :Fish meal, unfit for human consumption(OLD tariff)" +2306410,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305::Of rape or colza seeds(OLD tariff)" +2306901,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Oil-cake and oil-cake meal, expeller variety(OLD tariff)" +2306902,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Oil-cake and oil-cake meal, sovent extracted (defatted) variety(OLD tariff)" +2309903,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Feeds for fish (prawn etc.)(OLD tariff) +2403103,"OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Smoking tobacco, whether or not containing tobacco substitutes in any proportion :Biris(OLD tariff)" +2505101,"NATURAL SANDS OF ALL KINDS, WHETHER OR NOT COLOURED, OTHER THAN METAL-BEARING SANDS OF CHAPTER 26:Silica sands and quartz sands:Silica sands(OLD tariff)" +2507002,"KAOLIN AND OTHER KAOLINIC CLAYS, WHETHER OR NOT CALCINED:Kaolin and other kaolinic clays, whether or not calcined :Other(OLD tariff)" +2508502,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Kyanite(OLD tariff)" +2508503,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Sillimanite(OLD tariff)" +2524901,ASBESTOS:Other:In rock form(OLD tariff) +2524902,"ASBESTOS:Other:Fibre raw, beaten or washed or graded to length(OLD tariff)" +2524903,ASBESTOS:Other:Flakes or powder(OLD tariff) +2524909,ASBESTOS:Other:Other(OLD tariff) +2530909,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other(OLD tariff) +2614003,TITANIUM ORES AND CONCENTRATES:Titanium ores and concentrates :Rutile(OLD tariff) +2710111,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light oils and preparations :Motor Spirit(OLD tariff)" +2812102,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Phosphorus trichloride and Phosphorus pentachloride(OLD tariff) +2812104,"HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Sulphur oxychloride, Sulphur monochloride, Sulphur dichloride and Thionyl chloride(OLD tariff)" +2828901,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Sodium hypochlorites(OLD tariff) +2843901,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Other compounds; amalgams :Other compounds(OLD tariff)" +2844302,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Compounds of thorium or of uranium depleted in U235(OLD tariff)" +2903391,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other:Fluorinated derivatives(OLD tariff) +2909301,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :Anisole and their derivatives (OLD tariff)" +2920904,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other(OLD tariff)" +2921191,"AMINE- FUNCTION COMPOUNDS:Other:2-Chloro N,N-Di-isopropyl ethylamine and Ethanamine, 2-Chloro-N, N-dimethyl(OLD tariff)" +2921421,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Para chloroaniline, ortho chloro paranitroaniline, dichloroaniline, 2, 6-dichloro paranitroaniline, 2-4-5-trichloroaniline (OLD tariff)" +2921422,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Benzyl ethyl aniline, ethyl aniline, diethylaniline, dimethylaniline, meta nitroaniline, Para nitroaniline (OLD tariff)" +2921423,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:2-amino 3, 5 xylne sulphonic acid, Benzyl ethyl aniline sulphuric acid, metanillic acid (meta amino benzene sulphonic acid), Sulphanillic acid (para aminobenzene sulphonic acid para aniline sulphonic acid), Ethyl hydroxy ethylaniline, Methyl dopa (1-alpha methyl-3, 4-dihydroxyphenylaniline) (OLD tariff)" +2921451,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Alpha naphthylamine, Phenyl alpha naphthylamine, Phenyl beta naphthylamine, Amino F-acid, Aminolineli-R-acid, Sodium naphthionate (OLD tariff)" +2921452,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Bronner's acid (2-naphthylamine-6-sulphonic acid), cleve's acid (1-naphthylamine-6-sulphonic acid), epsilon acid (1-naphthylamine-3,8-disulphonic acid), koch's acid (1-naphthylamine-3,6,8-trisulphonic acid), Laurent's acid (1-naphthylamine-5-sulphonic acid), tobias acid (2-naphthylamine-1-sulphonic acid) (OLD tariff)" +2921453,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Naphthionic acid (1-naphthylamine-4-sulphonic acid), Para tolyl peri acid (para tolyl-1-naphthylamine 8-sulphonic acid), phenyl peri acid (phenyl- 1-naphthylamine-8-sulphonic acid) (OLD tariff)" +2922111,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:2-Hydroxy N,N-Diisopropyl - Ethylamine, N,N-Diethyl Amino ethyl Chloride Hydrochloride, Di-ethyl Amino ethanethiol Hydrochloride, Di-Methyl Amino ethyl chloride Hydrochloride, Di-Methyl Amino ethanethiol, Di-Methyl Amino ethanethiol Hydrochloride(OLD tariff)" +2922121,OXYGEN-FUNCTION AMINO-COMPOUNDS:Diethanolamine and its salts:Ethyldiethanolamine and Methyldiethanolamine(OLD tariff) +2922291,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:2-amino 4-nitrophenol, Meta aminophenol, Para aminophenol, Meta diethyl amino-phenol(OLD tariff)" +2922292,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:2-amino-1-phenol-4-sulphonic acid, 6-nitro-O-aminophenol-4-s ulphonic acid, Phenyl gamma acid (phenyl 2-amino-naphthol-6-sulphonic acid), Phenyl J acid (phenyl-2-amino-8 naphthol-7-sulphonic acid), S acid, peri acid (1-amino-8-naphthol-4-4-sulphoxinic acid, 1-naphthylamine-8-sulphonic acid), Meta-phenylene diamine-4-sulphonic acid (OLD tariff)" +2922293,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:N-methyl-para-aminophenol sulphate (motol),2, 5 dimethoxy aniline, Para acetyl aminophenol (paracetamol), Para cresidine, Picramic acid (T-grade)(OLD tariff)" +2922501,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Para-amino-salicylic acid, Methyl anthranilate, Procaine hydrochloride, Amino anisic acid anilide,L-tyrosine (p-hydroxyphenyl amine) (OLD tariff)" +2922502,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Frusemide, aminodial, N-acetyl anthranilic acid, domperidone(OLD tariff)" +2930909,ORGANO-SULPHUR COMPOUNDS:Other :Other(OLD tariff) +2933391,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Derivatives of pyridine (OLD tariff) +2935001,"SULPHONAMIDES:Sulphonamides :Sulphamethoxazole, sulphafurazole, sulphadiazine, sulphadimidine, sulphacetamide (OLD tariff)" +2935002,"SULPHONAMIDES:Sulphonamides :Sulphamethoxypyridarine, Sulphamethiazole, sulphamoxole, sulphamide (OLD tariff)" +2935902,"SULPHONAMIDES::Sulphamethoxypyridarine, sulphamethiazole, sulphamoxole,sulphamide(OLD tariff)" +2941901,ANTIBIOTICS:Other :Rifampicin and its salts (OLD tariff) +2942001,"OTHER ORGANIC COMPOUNDS:Other organic compounds:Cefadroxil and its salts, ibuprofane, nifedipine, ranitidine, danes salt of D(-) phenyl glycine, D(-) para hydroxy dane's salts (OLD tariff)" +2942002,"OTHER ORGANIC COMPOUNDS:Other organic compounds:Timolo maleate, terbutoline sulphate, D(-) phenyl glycin chloride HCl (DPGCH), imipramine HCl, amitryptyline HCl, cycteanune HCl, atenolol, propronalol(OLD tariff)" +2942003,"OTHER ORGANIC COMPOUNDS:Other organic compounds:Diloxanide furoate, cimetidine, oxyclozanide, famotidine (OLD tariff)" +3001909,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other (OLD tariff)" +3002101,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera, other blood fractions and immunological products, whether or not modified or obtained by biotechnological processes:Antisera(OLD tariff)" +3002109,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera, other blood fractions and immunological products, whether or not modified or obtained by biotechnological processes:Other (OLD tariff)" +3002201,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :Single vaccines (OLD tariff)" +3002202,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :Mixed vaccines (OLD tariff)" +3003901,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Ayurvedic, Unani, Siddha, Homoeopathic or Bio-chemic systems medicaments (OLD tariff)" +3003902,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Menthol crystals and milk of magnesia(OLD tariff)" +3003903,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Bovine albumin and drugs of animal origin, merbromine national formulary XII (mercurochrome), calcium sennoside, anaesthetic agents used in human or veterinary medicine or surgery, aluminium hydroxide gel(OLD tariff)" +3004201,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Cephalosporins and their derivatives (OLD tariff)" +3004203,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Fluoroquinolones (OLD tariff)" +3004204,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Tetracyclines (OLD tariff)" +3004206,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Macrolide (OLD tariff)" +3004209,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Other (OLD tariff)" +3004391,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Pituitary hormones; prednisolone; dexamethasone; danazol; other progestogen and oestogen group hormones (OLD tariff)" +3004392,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Gonadotrophins and luteinising hormone (OLD tariff)" +3004503,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Preparations of vitamins(OLD tariff)" +3004901,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Ayurvedic, Unani, Homoeopathic, Siddha or Bio-chemic systems medicaments, put up for retail sale (OLD tariff)" +3004902,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Anthelmintics drugs; antiamoebic and other antiprotozal drugs; antifungal drugs (OLD tariff)" +3004903,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Antihistaminics drugs; antacids preparations; antiulcer drugs; antiemitics and other gastrointestinal drugs (OLD tariff)" +3004904,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Anticancer drugs (OLD tariff)" +3004905,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Antitubercular drugs; antileprotic drugs; antimalarial drugs (OLD tariff)" +3004906,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Nonsteroidal antiinflammatory, analgesics and antipyratic drugs (OLD tariff)" +3004907,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Antihypertensive drugs(OLD tariff)" +3004908,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:AnAntiepiliptic drugs; sulfa drugs not elsewhere specified or included, preparations of enzymes; veterinary medicinal preparations, not for human use, not elsewhere specified or included; oral rehydration salts; antibacterial formulations not elsewhere specified or included, sedatives and tranquilizers(OLD tariff)" +3004909,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other(OLD tariff)" +3010000,live fish ornamental fish +3011000,LIVE FISH::Ornamental Fish(OLD tariff) +3011100,LIVE FISH:Freshwater:Freshwater +3011900,LIVE FISH:other:Other +3019100,"LIVE FISH::Trout (Salmo trutta, Oncorhynchus mykiss, Oncorhynchus clarki, Oncorhynchus aguabonita, Oncorhynchus gilae, Oncorhynchus apache and Oncorhynchus chrysogaster)" +3019200,LIVE FISH::Eels (Anguilla spp.) +3019300,LIVE FISH::Carp +3019400,LIVE FISH::Bluefin tunas (Thunnus thynnus) +3019500,LIVE FISH::Southern bluefin tunas (Thunnus maccoyii) +3019900,LIVE FISH::Other +3020000,fish fresh or chilled excluding fish fillets and other fish meatof heading 0304 salmonidae excluding edible fish offal of sub headings 0302 91to 0302 99 +3021100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Trout (Salmo trutta, Oncorhynchus mykiss, Oncorhynchus clarki, Oncorhynchus aguabonita, Oncorhynchus gilae, Oncorhynchus apache and Oncorhynchus chrysogaster)" +3021300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Pacific salmon:Pacific Salmon (Oncorhynchus nerka, Oncorhynchus gorbuscha, Oncorhynhus keta, Onchrhynchus masou and Oncorhynchus rhodurus)" +3021400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Atlantic salmon:Atlantic salmon (Salmo salar) and Danube salmon (Hucho hucho)" +3021900,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3022100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Halibut (Reinhardtius hippoglossoides, Hippoglossus hippoglossus, Hippoglossus stenolepis)" +3022200,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Plaice (Pleuronectes platessa)" +3022300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Sole (Solea spp.)" +3022400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Turbots:Turbots (Psetta maxima)" +3022900,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3023100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Albacore or longfinned tunas (Thunnus alalunga)" +3023200,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Yellowfin tunas (Thunnus albacares)" +3023300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Skipjack or stripe-bellied bonito" +3023400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Bigeye tunas (Thunnus obesus)" +3023500,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Atlantic and Pacific Bluefin tunas (Thunnus thynnus, Thunnus orientalis)" +3023600,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Southern bluefin tunas (Thunnus maccoyii)" +3023900,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3024000,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Herrings (Clupea harengus, Clupea pallasii) excluding livers and roes(OLD tariff)" +3024100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Herrings:Herrings (Clupea harengus, Clupea pallasii)" +3024200,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Anchovies (engraulis spp.):Anchovies (Engraulis spp.)" +3024300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:sardines:Sardines, (Sardina pilchardus, sardinops spp.) sardinella (Sardinella spp.) brisling or sprats (Sprattus sprattus)" +3024400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Mackerel:Mackerel (Scomber scombrus, Scomber australasicus, Scomber japonicus)" +3024500,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:jack and horses:Jack and horse mackerel (Trachurus spp.)" +3024600,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:cobia:Cobia (Rachycentron canadum)" +3024700,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:swordfish:Swordfish (Xiphias gladius)" +3024900,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3025100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Fish of the families Bregmacerotidae, Euclichthyidae, Gadidae, Macrouridae, Melanonidae, Merlucciidae, Moridae and Muraenolepididae, excluding edible fish offal of sub-headings 0302 91 to 0302 99:Cod (Gadus morhua, Gadus ogac, Gadus macrocephalus)" +3025200,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Haddock:Haddock (Melanogrammus aegllefinus)" +3025300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Coalfish:Coalfish (Pollachius virens)" +3025400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Hake:Hake (Merluccius spp., Urophycis spp.)" +3025500,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:alaska pollack:Alaska Pollack (Theragra chalcogramma)" +3025600,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Blue whitings:Bluewhitings (Micromesistius poutassou, Micromesistius australis)" +3025900,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Other" +3026300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Coalfish (Pollachius virens)(OLD tariff)" +3027100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Tilapias:Tilapias (Oreochromis spp.)" +3027200,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:catfish:Catfish (Pangasius spp., Sirurus spp., Clarias spp., Ictalurus spp)(OLD tariff)" +3027300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Carp:Carp(Cyprinus carpio, Carassius carassius, Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus" +3027400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Eels:Eels (Anguilla spp.)" +3027900,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other:Other" +3028100,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other fish, excluding livers and roes:Dogfish and other sharks" +3028200,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Rays and skates:Rays and skates (Rajidae)" +3028300,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Toothfish (Dissostichus spp.):Toothfish (Dissostichus spp.)" +3028400,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Seabass:Seabass (Dicentrarchus spp.)" +3028500,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Seabream (Sparidae):Seabream (Sparidae)" +3028900,fish fresh or chilled excluding fish fillets and other fish meatof heading 0304 salmonidae excluding edible fish offal of sub headings 0302 91to 0302 99 >> other +3028910,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Others:Hilsa ( Tenualosa ilisha)" +3028920,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:pomfret //////// Other:Dara" +3028930,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Others:Pomfret" +3028990,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:pomfret //////// Other:other" +3029000,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:livers and roes:livers and roes(OLD tariff)" +3029100,fish fresh or chilled excluding fish fillets and other fish meatof heading 0304 salmonidae excluding edible fish offal of sub headings 0302 91to 0302 99 >> livers roes and milt +3029110,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Livers, roes and milt:livers, roes and milt(OLD tariff)" +3029200,fish fresh or chilled excluding fish fillets and other fish meatof heading 0304 salmonidae excluding edible fish offal of sub headings 0302 91to 0302 99 >> shark fins +3029210,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Sharkfins:Shark fins(OLD tariff)" +3029900,fish fresh or chilled excluding fish fillets and other fish meatof heading 0304 salmonidae excluding edible fish offal of sub headings 0302 91to 0302 99 >> other +3029910,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other:Fish fins other than shark fins; heads, tails and maws" +3029990,"FISH, FRESH OR CHILLED, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other:Other edible fish offal" +3030000,fish frozen excluding fish fillets and other fish meat of heading 0304 salmonidae excluding edible fish offal of sub headings 0303 91 to 0303 99 +3031100,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Sockeye salmon (red-salmon) (oncorhynchus nerka)" +3031200,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Other Pacific Salmon (Oncorhynchus gorbuscha, Oncorhynchus keta, Oncorhynchus tschawytscha, Oncorhynchus kisutch, Oncorhynchus masou and Oncorhynchus rhodurus)" +3031300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Atlantic salmon:Atlantic salmon (Salmo salar and Danube salmon (Hucho hucho)" +3031400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Trout:Trout (Salmo trutta, Oncorhynchus mykiss, Oncorhynchus clarki, Oncorhynchus aguabonita, Oncorhynchus gilae, Oncorhynchus apache and Oncorhynchus chrysogaster)" +3031900,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3032300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Tilapias:Tilapias (Oreochromis spp.)" +3032400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:catfish:Catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.)" +3032500,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Carp:Carp (Cyprinus spp, Carassius spp, Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.)" +3032600,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Eels (Anguilla spp.):Eels (Anguilla spp.)" +3032900,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3033100,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Halibut (Reinhardtius hippoglossoides, Hippoglossus hippoglossus, Hippoglossus stenolepis)" +3033200,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Plaice (Pleuronectes platessa)" +3033300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Sole (Solea spp.)" +3033400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:turbots:Turbots (Psetta maxima)" +3033900,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Other" +3034100,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Albacore or longfinned tunas (Thunnus alalunga)" +3034200,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Yellowfin tunas (Thunnus albacares)" +3034300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Skipjack or stripe-bellied bonito" +3034400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Bigeye tunas (thunnus obesus)" +3034500,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Atlantic and Pacific bluefin tunas:Bluefin tunas (thunnus thynnus)" +3034600,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Southern bluefin tunas (thunnus maccoyii)" +3034900,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::SCADS(DECAPTERUS SPP.)(OLD tariff)" +3035100,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Herrings (Clupea harengus, Clupea pallasii)," +3035300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:sardines:Sardines (Sardina pilchardus, Sardinops spp.) sardinella (sardinella spp.) brisling or sprats (Sprattus sprattus)" +3035400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Mackerel:Mackerel (Scomber scombrus, Scomber australasicus, Scomber japonicus)" +3035500,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Jack and harse mackerel (Trachurus spp.):jack and horse mackerel (Trachurus spp.)" +3035600,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Cobia:Cobia (Rachycentron canadum)" +3035700,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Swordfish:Swordfish (Xiphias gladius)" +3035900,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Others:Other" +3035910,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Others:Indian mackerels (Rastreliger spp.)" +3035990,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Others:other" +3036100,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Swordfish (Xiphias gladius)(OLD tariff)" +3036200,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304::Toothfish (Dissostichus spp.)(OLD tariff)" +3036300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:cod:Cod (Gadus morhua, Gadus ogac, Gadus macrocephalus" +3036400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Haddock:Haddock (Melangrammus aeglefinus)" +3036500,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:coalfish:Coalfish (Pollachius virens)" +3036600,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Hake:Hake (Merluccius spp., Urophycis spp.)" +3036700,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Alaska Pollack:Alaska Pollack (Theragra Chalcogramma)" +3036800,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Blue whitings:Blue whitings (Micromesistius poutassou, Micromesistius australis)" +3036900,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other:Other" +3038100,fish frozen excluding fish fillets and other fish meat of heading 0304 salmonidae excluding edible fish offal of sub headings 0303 91 to 0303 99 >> dogfish and other sharks +3038110,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Dogfish and other sharks:Dogfish" +3038190,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Dogfish and other sharks:Other Sharks" +3038200,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Rays and skates:Rays and skates (Rajidae)" +3038300,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Toothfish (Dissostichus spp.):Toothfish (Dissostichus spp.)" +3038400,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Seabass:Seabass (Dicentrarchus spp.)" +3038900,fish frozen excluding fish fillets and other fish meat of heading 0304 salmonidae excluding edible fish offal of sub headings 0303 91 to 0303 99 >> other +3038910,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Hilsa (Tenualosa ilisha)" +3038920,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Dara" +3038930,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Ribbon fish" +3038940,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Seer" +3038950,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Pomfret (White or silver or black)" +3038960,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Ghol" +3038970,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Threadfin" +3038980,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Croakers, groupers, flounders" +3038990,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Other" +3038991,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Edible fishmaws of wild life(OLD tariff)" +3038992,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Edible sharkfins of wild life(OLD tariff)" +3038999,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:other:Other(OLD tariff)" +3039010,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Livers and roes:Egg or Egg yolk of fish(OLD tariff)" +3039090,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Livers and roes:Other(OLD tariff)" +3039100,fish frozen excluding fish fillets and other fish meat of heading 0304 salmonidae excluding edible fish offal of sub headings 0303 91 to 0303 99 >> livers roes and milt +3039110,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:livers, roes and milt:Egg or Egg yolk of fish" +3039190,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:livers, roes and milt:Other" +3039200,fish frozen excluding fish fillets and other fish meat of heading 0304 salmonidae excluding edible fish offal of sub headings 0303 91 to 0303 99 >> shark fins +3039210,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Shark fins:Shark fins(OLD tariff)" +3039900,fish frozen excluding fish fillets and other fish meat of heading 0304 salmonidae excluding edible fish offal of sub headings 0303 91 to 0303 99 >> other +3039910,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other:Fish fins other than shark fins; heads, tails and maws" +3039990,"FISH, FROZEN, EXCLUDING FISH FILLETS AND OTHER FISH MEAT OF HEADING 0304:Other:Other edible fish offal" +3040000,fish fillets and other fish meat whether or not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp carp cyprinus spp carassius spp not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp chthys spp cirrhinus spp mylopharyngodon piceus catla catla labeo spp osteochilus hasselti leptobarbus hoeveni megalobrama spp eels anguilla spp nile perch lates niloticus and snakeheads channa spp +3041100,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Swordfish (Xiphias gladius)(OLD tariff)" +3041200,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Toothfish (Dissostichus spp.)(OLD tariff)" +3041900,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::other(OLD tariff)" +3042100,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Swordfish (Xiphias gladius)(OLD tariff)" +3042200,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Toothfish (Dissostichus spp.)(OLD tariff)" +3042910,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Hilsa(OLD tariff)" +3042920,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Shark(OLD tariff)" +3042930,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Seer(OLD tariff)" +3042940,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Tuna(OLD tariff)" +3042950,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Cuttlefish(OLD tariff)" +3042990,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Other(OLD tariff)" +3043100,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Tilapias:Tilapias (Oreochromis spp.)(OLD tariff)" +3043200,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Catfish:Catfish (Pangasois s[[., Silurus spp., Clarias spp.,Ictalusus spp.)(OLD tariff)" +3043300,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Nile perch:Nile perch (Lates niloticus)(OLD tariff)" +3043900,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Other(OLD tariff)" +3044100,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Fresh or chilled fillets of other fish:Pacific Salmon (Oncorhynchus nerka, Oncorhynchus gorbuscha, Oncorhynchus keta, Oncorhynchus tschawytscha, Oncorhynchus kisutch, Oncorhynchus massou and Oncorhynchus rhodurus), Atlantic salmon (Salmo salar and Danube salmon (Hucho hucho)" +3044200,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Trout:Trout (Salmo trutta, Oncorhynchus mykiss, Onchorhynchus clarki, Oncorhynchus aguabonita, Oncorhynchus gilae, Oncorhynchus apache and Oncorhynchus chrysogaster)" +3044300,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Flat fish:Flat fish (Pleuronectidae, Bothidae, Cynoglossidae, Soleidae, Scophthalmidae and Citharidae)" +3044400,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Fish of the families Bregmacerotidae, Euclichthyidae, Gadidae,:Fish of the families Bregmacerotidae, Euclichthyidae, Gadidae, Macrouridae, Melanonidae, Merlucciidae, Moridae, and Muraenolepididae" +3044500,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Swordfish:Swordfish (Xiphias gladius)" +3044600,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Toothfish:Toothfish (Dissostichus spp.)" +3044700,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Dogfish and other sharks" +3044800,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Rays and skates (Rajidae)" +3044900,fish fillets and other fish meat whether or not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp carp cyprinus spp carassius spp not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp chthys spp cirrhinus spp mylopharyngodon piceus catla catla labeo spp osteochilus hasselti leptobarbus hoeveni megalobrama spp eels anguilla spp nile perch lates niloticus and snakeheads channa spp >> other +3044910,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Hilsa (Tenualosa ilisha)" +3044920,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Shark(OLD tariff)" +3044930,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Seer" +3044940,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Tuna" +3044990,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Other" +3045100,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other fresh or chilled:Tilapias (Oreochromis spp.) catfish (Pangasius spp.,Silurus spp., Clarias spp.,Ictalurus spp.,), carp (Cyprinus carpio, Carrassius carassius, Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylophayngodon piceus), eels (Anguilla spp.) Nile perch (Lates niloticus) and snake heads (channa spp.)" +3045200,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Salmonidae:Salmonidae(OLD tariff)" +3045300,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Fish:Fish of the families Bregmacerotidae, Euclichthyidae, Gadidae, Macrouridae, Melanonidae, Merlucciidae, Moridae and Muraenolepididae(OLD tariff)" +3045400,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Swordfish (xiphias gladis):Swordfish (Xiphias gladius)(OLD tariff)" +3045500,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Tooth fish:Toothfish (Dissostichus spp.)(OLD tariff)" +3045600,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Dogfish and other sharks(OLD tariff)" +3045700,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Rays and skates (Rajidae)(OLD tariff)" +3045910,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Hilsa (Tenualosa ilisha)(OLD tariff)" +3045920,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Shark(OLD tariff)" +3045930,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Seer(OLD tariff)" +3045940,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Tuna(OLD tariff)" +3045990,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Other(OLD tariff)" +3046100,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Frozen fillets of tilapias:Tilapias (Oreochromis spp.)" +3046200,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):catfish:Catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.)" +3046300,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Nile perch:Nile Perch (Lates niloticus)" +3046900,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:other:Other" +3047100,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Cod:Cod (Gadus morhua, Gadus ogac, Gadus macrocephalus)" +3047200,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Haddock:Haddock (Melanogrammus aeglefinus)" +3047300,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Coalfish:Coalfish (Pollachius virens)" +3047400,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Hake:Hake (Merluccius spp.,Urophycis spp.)" +3047500,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Alaska Pollack:Alaska Pollack (Theragra chalcogramma)" +3047900,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Other" +3048100,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Frozen fillets of other fish:Pacific Salmon (Oncorhynchus nerka, Oncorhunchus gorbuscah, Oncorhynchus keta, Oncorhynchus tschawytscha, Oncorhynchus kisutch, Oncorhynchus masou and Oncorhynchus rhodurus), Atlantic salmon (Salmo salar) and Danube salmon (Hucho hucho)" +3048200,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Trout:Trout (salmo trutta, Oncorhynchus mykiss, Oncorhynchus clarki, Oncorhynchus aguabonita, Oncorhynchus gilae, Oncorhynchus apache and Oncorhynchus chrysogaster)" +3048300,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Flat fish:Flat fish (Pleuronectidae, Bothidae, Cynoglossidae, Soledae, Scophthalmidae and Citharidae)" +3048400,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Swordfish:Swordfish (Xiphias gladius)" +3048500,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Toothfish:Toothfish (Dissostichus spp.)" +3048600,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Herrings:Herrings (Clupea harengus, Clupea pallasii)" +3048700,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Tunas:Tunas (of the genus Thunnus), skipjack or stripe-bellied bonito (Euthynnus (Katsowonus) pelamis)" +3048800,fish fillets and other fish meat whether or not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp carp cyprinus spp carassius spp not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp chthys spp cirrhinus spp mylopharyngodon piceus catla catla labeo spp osteochilus hasselti leptobarbus hoeveni megalobrama spp eels anguilla spp nile perch lates niloticus and snakeheads channa spp >> dogfish other sharks rays and skates rajidae +3048810,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Dogfish, other Sharks Rays and skates (Rajidae):Dogfish" +3048820,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Dogfish, other Sharks Rays and skates (Rajidae):Other Sharks" +3048830,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Dogfish, other Sharks Rays and skates (Rajidae):Rays and skates (Rajidae)" +3048900,fish fillets and other fish meat whether or not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp carp cyprinus spp carassius spp not minced fresh chilled or frozen fresh or chilled fillets of tilapias oreochromis spp catfish pangasius spp silurus spp clarias spp ictalurus spp chthys spp cirrhinus spp mylopharyngodon piceus catla catla labeo spp osteochilus hasselti leptobarbus hoeveni megalobrama spp eels anguilla spp nile perch lates niloticus and snakeheads channa spp >> other +3048910,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Hilsa (Tenualosa ilisha)" +3048920,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Shark(OLD tariff)" +3048930,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Seer" +3048940,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):Other:Tuna" +3048990,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Other:Other" +3049100,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN::Swordfish (Xiphias gladius)" +3049200,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN::Toothfish (Dissostichus spp.)" +3049300,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:Tilapias:Tilapias (Orechromis spp.), catfish (Pangasius spp., Silurus spp, Clarias spp., Ictalurus spp.) carp (Cyprinus carpio, Carassius carassius, Cenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, eels (Anguillaspp.) Nile perch (Lates niloticus) and snakeheads (Channa spp.)" +3049400,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.):alaska pollack:Alaska Pollack (Theragra chalcogramma)" +3049500,"FISH FILLETS AND OTHER FISH MEAT (WHETHER OR NOT MINCED), FRESH, CHILLED OR FROZEN:fish:Fish of the families Bregmacerotidae, Euclichthyidae, Gadidae, Macrouridae, Melanonidae, Merlucciidae, Moridae, and Muraeno-lepididae, other than Alaska Pollack (Theragra chalcogramma)" +3049600,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Dogfish and other sharks" +3049700,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Rays and skates (Rajidae)" +3049900,"Fresh or chilled fillets of tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)::Other" +3050000,or not cooked before or during the smoking process +3051000,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Flours, meals and pellets, of fish fit for human consumption(OLD tariff)" +3052000,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Livers and roes of fish, dried, smoked, salted or in brine" +3053100,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)" +3053200,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:fish:fishof the families Bregmacerotidae, eulichthyidae, gadidae, macrouridae, melononidae, merlucciidae, moridae and muranelepididae" +3053900,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other:Other" +3054100,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Pacific salmon (Oncorhynchus nerka, Oncorhynchus gorbuscha, Oncorhynchus keta, Oncorhynchus tschawytscha, Oncorhynchus kisutch, Oncorhynchus masou and Oncorhynchus rhodurus), Atlantic salmon (Salmo Salar) and Danube salmon (Hucho hucho)" +3054200,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Herrings (Clupea harengus, Clupea pallasii )" +3054300,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Trout:Trout (Salmo trutta, Onchohynchus mykiss, oncorhynchus clarki oncorhynchus aquabonita, oncorhunchus gilae, oncorhunchus apache and oncorhynchus chrysogaster" +3054400,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)" +3054900,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Other" +3055100,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Cod (Gadus morhua, Gadus ogac, Gadus macrocephalus)" +3055200,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)" +3055300,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Fish of the families Bregmacerotidae, Euclichthyidae, Gadidae, Macrouridae, Melanonidae, Merlucciidae, Moridae and Muraenolepididae, other than cod ( Gadus morhua, Gadus ogac, Gadus macrocephalus)" +3055400,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Herrings (Clupea harengus, Clupea pallasii), anchovies (Engraulis spp.), sardines (Sardina pilchardus, Sardinops spp.), sardinella (Sardinella spp.), brisling or sprats (Sprattus sprattus), mackerel (Scomber scombrus, Scomber australasicus, Scomber japonicus), Indian mackerels (Rastrelliger spp.), seerfishes (Scomberomorus spp.), jack and horse mackerel (Trachurus spp.), jacks, crevalles (Caranx spp.), cobia (Rachycentron canadum), silver pomfrets (Pampus spp.), Pacific saury (Cololabis saira), scads (Decapterus spp.), capelin (Mallotus villosus), Sword fish (Xiphias gladius), Kawakawa (Euthynnus affinis), bonitos (Sarda spp.), marlins, sailfishes, spearfish (Istiophoridae)" +3055900,or not cooked before or during the smoking process >> other +3055910,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other :Mumbai Duck" +3055920,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other :Seer without head" +3055930,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other :Sprats" +3055990,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other :Other" +3056100,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Herrings (Clupea harengus, Clupea pallasii )" +3056200,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Cod (Gadus morhua, Gadus ogac, Gadus macrocephalus)" +3056300,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Anchovies (Engraulis spp. )" +3056400,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION::Tilapias (Oreochromis spp.), catfish (Pangasius spp., Silurus spp., Clarias spp., Ictalurus spp.), carp (Cyprinus spp., Carassius spp., Ctenopharyngodon idellus, Hypophthalmichthys spp., Cirrhinus spp., Mylopharyngodon piceus, Catla catla, Labeo spp., Osteochilus hasselti, Leptobarbus hoeveni, Megalobrama spp.), eels (Anguilla spp.), Nile perch (Lates niloticus) and snakeheads (Channa spp.)(OLD tariff)" +3056910,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other:Mumbai duck(OLD tariff)" +3056920,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other:Seer without head(OLD tariff)" +3056930,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other:Sprats(OLD tariff)" +3056990,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:Other:Other(OLD tariff)" +3057100,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:shark fins:Shark fins" +3057200,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:fish heads:fish heads tails and maws" +3057900,"FISH, DRIED, SALTED OR IN BRINE; SMOKED FISH, WHETHER OR NOT COOKED BEFORE OR DURING THE SMOKING PROCESS; FLOURS, MEALS AND PELLETS, OF FISH FIT FOR HUMAN CONSUMPTION:other:other" +3060000,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen +3061100,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Rock lobster and other sea craw fish (Palinurus spp., Panulirus spp., Jasus spp.)" +3061200,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> lobsters homarus spp +3061210,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Lobsters (Homarus spp.) :Whole, cooked" +3061290,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Lobsters (Homarus spp.) :Other" +3061400,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Crabs" +3061500,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Norway lobsters:Norway lobsters (Nephrops norvegicus)" +3061600,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> shrimps and prawns pandalus spp crangon crangon +3061610,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Cold-water shrimps and prawns (Pandalus spp., Crangon crangon):Accelerated Freeze Dried" +3061690,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Cold-water shrimps and prawns (Pandalus spp., Crangon crangon):Other" +3061700,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns scampi macrobrachium spp +3061711,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):Accelerated Freese Dried (AFD)" +3061719,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):other" +3061720,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):Vannamei Shrimp (LITOPENAEUS VANNAMEI)" +3061730,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):Indian White Shrimp (FENNEROPENAEUS INDICUS)" +3061740,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):Black tiger shrimp (Penaeusmonodon)" +3061750,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):Flower Shrimp (Penaeus semisulcalus)" +3061790,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other Shrimps and prawns Scampi (Macrobrachium spp.):other" +3061900,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Other, including flours, meals and pellets, of crustaceans, fit for human consumption" +3062100,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Rock lobster and other sea craw fish (Palinurus spp., Panulirus spp., Jasus spp.)(OLD tariff)" +3062200,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Lobsters (Homarus spp.)(OLD tariff)" +3062400,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Crabs(OLD tariff)" +3062500,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Norway lobsters:Norway lobsters (Nephrops norvegicus)(OLD tariff)" +3062600,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Cold-water shrimps:Cold-water shrimps and prawns (Pandalus spp., Crangon crangon)(OLD tariff)" +3062710,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other shrimps and prawns:Powdered(OLD tariff)" +3062790,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,:Other shrimps and prawns:Other(OLD tariff)" +3062900,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Other, including flours, meals and pellets, of crustaceans, fit for human consumption(OLD tariff)" +3063100,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Rock lobster and other sea craw fish (Palinurus spp., Jasus spp.)" +3063200,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Lobsters (Homarus spp.)" +3063300,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Crabs" +3063400,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Norway lobsters (Nephrops norvegicus)" +3063500,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Cold-water shrimps and prawns (Pandalus spp., Crangon crangon)" +3063600,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Other shrimps and prawns" +3063610,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> scampi macrobachium spp +3063620,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> vannamei shrimp litopenaeus vannamei +3063630,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> indian white shrimp fenneropenaeus indicus +3063640,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> black tiger shrimp penaeus monodon +3063650,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> flower shrimp penaeus semisulcatus +3063660,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> artemia +3063690,chilled frozen dried salted or in brine smoked crustaceans whether in shell or not whether or not cooked before or during the smoking process crustaceans in shell cooked by steaming or by boiling in water whether or not chilled frozen dried salted or in brine frozen >> other shrimps and prawns >> other +3063900,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Other, including flours, meals and pellets, of crustaceans, fit for human consumption" +3069100,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Rock lobster and other sea crawfish (Palinurus spp., Jasus spp.)" +3069200,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Lobsters ( Homarus spp.)" +3069300,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Crabs" +3069400,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Norway lobsters (Nephrops norvegicus)" +3069500,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Shrimps and prawns" +3069900,"CRUSTACEANS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; CRUSTACEANS, IN SHELL, COOKED BY STEAMING OR BY BOILING IN WATER, WHETHER OR NOT CHILLED, FROZEN, DRIED,::Other, including flours, meals and pellets, of crustaceans, fit for human consumption" +3070000,chilled frozen dried salted or in smoked molluscs whether in shell or not whether or not cooked before or during the smoking process oysters +3071100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Oysters:Live, fresh or chilled" +3071200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:frozen:Frozen" +3071900,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:other:Other" +3072100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Live, fresh or chilled" +3072200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen" +3072900,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Other" +3073100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Live, fresh or chilled" +3073200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen" +3073900,chilled frozen dried salted or in smoked molluscs whether in shell or not whether or not cooked before or during the smoking process oysters >> other +3073910,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Clams, clam meat (bivalves-Victorita, spp., Mertrix spp. and Katalysia spp.)" +3073990,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Other" +3074110,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Live, fresh and chilled :Cuttle fish(OLD tariff)" +3074120,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Live, fresh and chilled :Squid(OLD tariff)" +3074200,chilled frozen dried salted or in smoked molluscs whether in shell or not whether or not cooked before or during the smoking process oysters >> live fresh or chilled +3074210,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Live, fresh or chilled:Cuttle fish" +3074220,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Live, fresh or chilled:Squid" +3074300,chilled frozen dried salted or in smoked molluscs whether in shell or not whether or not cooked before or during the smoking process oysters >> frozen +3074310,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Frozen:Cuttle fish" +3074320,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Frozen:Whole squids" +3074330,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Frozen:Squid tubes" +3074390,chilled frozen dried salted or in smoked molluscs whether in shell or not whether or not cooked before or during the smoking process oysters >> frozen >> other +3074900,chilled frozen dried salted or in smoked molluscs whether in shell or not whether or not cooked before or during the smoking process oysters >> other +3074910,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Squid tubes, frozen" +3074920,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Whole squids, frozen" +3074930,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Dried squids" +3074940,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Dried squids" +3074990,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :other" +3075100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Live, fresh or chilled" +3075200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen" +3075900,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Other" +3076000,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Snails, other than sea snails" +3077100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Live, fresh or chilled:Live, fresh or chilled" +3077200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen" +3077900,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other:Other" +3078100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Abalone:Live, fresh or chilled abalone (Haliotis spp.)" +3078200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Live, fresh or chilled stromboid conchs (Strombus spp.)" +3078300,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen abalone (Haliotis spp.)" +3078400,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen stromboid conchs (Strombus spp.)" +3078700,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Other abalone (Haliotis spp.)" +3078800,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Other stromboid conchs (Strombus spp.)" +3078900,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other:Other(OLD tariff)" +3079100,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Live, fresh or chilled:Live, fresh or chilled" +3079200,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION::Frozen" +3079900,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Other" +3079910,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Sea shell flesh(OLD tariff)" +3079920,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Jelly fish (Rhopelina spp.), dried salted or frozen(OLD tariff)" +3079990,"MOLLUSCS, WHETHER IN SHELL OR NOT, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS AND MOLLUSCS, LIVE, FRESH, CHILLED, FROZEN, DRIED, SALTED OR IN BRINE; FLOURS, MEALS AND PELLETS OF AQUATIC INVERTEBRATES OTHER THAN CRUSTACEANS, FIT FOR HUMAN CONSUMPTION:Other :Other(OLD tariff)" +3080000,molluscs live fresh chilled frozen dried salted or in brine smoked aquatic invertebrates other than crustaceans and molluscs whether or not cooked before or during the smoking process sea cucumbers stichopus japonicus holothurioidea +3081100,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:Sea cucumbers:Live, fresh or chilled" +3081200,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption::frozen" +3081900,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:Other:Other" +3082100,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:Sea urchins:Live, fresh or chilled" +3082200,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption::frozen" +3082900,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:Other:Other" +3083000,molluscs live fresh chilled frozen dried salted or in brine smoked aquatic invertebrates other than crustaceans and molluscs whether or not cooked before or during the smoking process sea cucumbers stichopus japonicus holothurioidea >> jellyfish rhopilema spp jellyfish rhopilema spp +3083010,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:Jelly fish (Rhopilema spp.):Live, fresh or chilled" +3083020,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:Jelly fish (Rhopilema spp.):dried salted or frozen" +3083090,molluscs live fresh chilled frozen dried salted or in brine smoked aquatic invertebrates other than crustaceans and molluscs whether or not cooked before or during the smoking process sea cucumbers stichopus japonicus holothurioidea >> jellyfish rhopilema spp jellyfish rhopilema spp >> other +3089000,"Aquatic invertebrates other than crustaceans and molluscs, live, fresh, chilled, frozen,dried, salted or in brine; smoked aquatic invertebrates other than crustaceans and molluscs, whether or not cooked before or duing the smoking process; flours, meals and pellets of aquatic intertebrates other than crustaceans and molluscs, fit for human consumption:other:Other" +3090000,molluscs and other aquatic invertebrates fit for human consumption +3091000,molluscs and other aquatic invertebrates fit for human consumption >> of fish +3091010,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:OF FISH:Fresh or chilled" +3091020,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:OF FISH:Frozen" +3091030,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:OF FISH:Salted , in brine, dried or smoked" +3091090,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:OF FISH:Other" +3099000,molluscs and other aquatic invertebrates fit for human consumption >> other of crustaceans fresh or chilled +3099011,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Vannamei shrimp (Litopenaeus vannamei)" +3099012,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Indian white shrimp (Fenneropenaeus indicus)" +3099013,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Black tiger shrimp (Penaeus monodon)" +3099014,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Flower shrimp (Penaeus semisulcatus)" +3099019,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Other" +3099021,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Vannamei shrimp (Litopenaeus vannamei)" +3099022,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Indian white shrimp (Fenneropenaeus indicus)" +3099023,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Black tiger shrimp (Penaeus monodon)" +3099024,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Flower shrimp (Penaeus semisulcatus)" +3099029,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Other" +3099031,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Vannamei shrimp (Litopenaeus vannamei)" +3099032,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Indian white shrimp (Fenneropenaeus indicus)" +3099033,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Black tiger shrimp (Penaeus monodon)" +3099034,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Flower shrimp (Penaeus semisulcatus)" +3099039,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Other" +3099040,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Of crustaceans, other" +3099050,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Of molluscs, fresh or chilled" +3099060,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Of molluscs, frozen" +3099070,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other: Of molluscs, salted, in brine, dried or smoked" +3099080,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Of molluscs, other" +3099090,"FLOURS, MEALS AND PELLETS OF FISH, CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, FIT FOR HUMAN CONSUMPTION:Other:Other" +3101009,"ANIMAL OR VEGETABLE FERTILISERS, WHETHER OR NOT MIXED TOGETHER OR CHEMICALLY TREATED; FERTILISERS PRODUCED BY THE MIXING OR CHEMICAL TREATMENT OF ANIMAL OR VEGETABLE PRODUCTS:Animal or vegetable fertilisers, whether or not mixed together or chemically treated; fertilisers produced by the mixing or chemical treatment of animal or vegetable products :Other(OLD tariff)" +3204111,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse yellow (OLD tariff)" +3204112,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse orange (OLD tariff)" +3204113,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse red (OLD tariff)" +3204114,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse violet(OLD tariff)" +3204115,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue(OLD tariff)" +3204119,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other(OLD tariff)" +3204121,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Azo dyes (OLD tariff)" +3204122,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid greens (non-azo) (OLD tariff)" +3204123,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid black (non-azo) (OLD tariff)" +3204124,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid Blues 2, 14, 23, 25, 45, 51, 52 and 78 (non-azo) (OLD tariff)" +3204125,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blues 93, 112, 127, 138, 140 and others (non-azo) (OLD tariff)" +3204126,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Mordant dyes (OLD tariff)" +3204129,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other non-azo acid dyes (OLD tariff)" +3204132,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic yellow (non-azo)(OLD tariff)" +3204133,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic red (non-azo)(OLD tariff)" +3204134,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic violet (non-azo)(OLD tariff)" +3204135,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic blue (non-azo)(OLD tariff)" +3204136,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic green (non-azo)(OLD tariff)" +3204139,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other non-azo basic dyes(OLD tariff)" +3204141,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct yellow (azo) (OLD tariff)" +3204142,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct red (azo) (OLD tariff)" +3204143,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct blue (azo)(OLD tariff)" +3204148,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct dyes (non-azo)(OLD tariff)" +3204151,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat yellow (OLD tariff)" +3204152,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat orange (OLD tariff)" +3204153,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat red (OLD tariff)" +3204154,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat violet (OLD tariff)" +3204155,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue(OLD tariff)" +3204156,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat green(OLD tariff)" +3204157,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat brown(OLD tariff)" +3204158,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat black(OLD tariff)" +3204159,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other(OLD tariff)" +3204171,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment yellow (OLD tariff)" +3204173,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment red (OLD tariff)" +3204175,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment blues (OLD tariff)" +3204176,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment greens (OLD tariff)" +3204191,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling components 2,4,5,7,8,13 (OLD tariff)" +3204192,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling components 14,15,17,18,20 and others (OLD tariff)" +3204193,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 1,2,3,4,5,6,10,11 (OLD tariff)" +3204194,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 12,13,20,24,32,41,48 and others (OLD tariff)" +3204195,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic colours (OLD tariff)" +3204196,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Sulphur based colouring matters (OLD tariff)" +3204197,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Solvent based colouring matters (OLD tariff)" +3204198,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Food colouring matters (OLD tariff)" +3208901,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Based on cellulose nitrate or other cellulose derivatives (OLD tariff)" +3208902,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Enamels (OLD tariff)" +3208904,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Varnishes (OLD tariff)" +3210001,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Distempers(OLD tariff)" +3301291,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Anise oil; cajeput oil; cananga oil; caraway oil; cassia oil; cedarwood oil; cinnamon bark oil; cinnamon leaf oil;(OLD tariff)" +3301292,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Clove leaf or stem, oil; coriander seed oil; dill oil; eucalyptus oil; fennel seed oil; ginger oil; ginger grass oil; clove bud oil(OLD tariff)" +3301293,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Tuberose concentrate; nutmeg oil; palmarosa oil; patchouli oil; pepper oil; petitgrain oil; sandalwood oil; rose oil (OLD tariff)" +3301294,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Camphor oil; lemon grass oil; ylang ylang oil; davana oil; cumin oil; celery seed oil, garlic oil, paprika oil, turmeric oil (OLD tariff)" +3301309,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Resinoids :Other(OLD tariff)" +3301901,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Fenugreek,ginger,pepper, turmeric, cardamom, celery seed and nutmeg oleoresins(OLD tariff)" +3301902,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Clove, capsicum, coriander, cumin and fennel oleoreins and oleoresins of spices not elsewhere specified or included(OLD tariff)" +3301903,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Attars of all kinds in fixed oil base; mustard oil aroma essence of ambrettolide (ambrette seed oil essence)(OLD tariff)" +3301904,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Concentrates of essential oils in fats, in fixed oils or in waxes or the like, obtained by cold absorption or by maceration not elsewhere specified or included(OLD tariff)" +3301905,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Terpenic by-products of the deterpenation of essential oils (OLD tariff)" +3301907,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Aqueous solutions of essential oils (OLD tariff)" +3302901,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Other:Mixtures of aromatic chemicals and essential oils as perfume base(OLD tariff)" +3305901,PREPARATIONS FOR USE ON THE HAIR:Other :Hair oil (OLD tariff) +3401191,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Bars and blocks of not less than 500 gm in weight(OLD tariff)" +3401194,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Household and laundry soaps not elsewhere specified or included(OLD tariff)" +3401301,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Organic surface-active products and preparations for washing the skin, in the form of liquid or cream and put up for retail sale, whether or not containing soap :For toilet use (including medicated products)(OLD tariff)" +3402901,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Synthetic detergents(OLD tariff)" +3402904,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Wetting agents(OLD tariff)" +3402905,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Washing preparations whether or not containing soap(OLD tariff)" +3402909,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Other(OLD tariff)" +3404903,"ARTIFICIAL WAXES AND PREPARED WAXES:Other:Artificial waxes (including water soluble waxes) prepared waxes, not emulsified or containing solvents(OLD tariff)" +3504009,"PEPTONES AND THEIR DERIVATIVES; OTHER PROTEIN SUBSTANCES AND THEIR DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; HIDE POWDER, WHETHER OR NOT CHROMED:Peptones and their derivatives; other protein substances and their derivatives, not elsewhere specified or included; hide powder, whether or not chromed:Other(OLD tariff)" +3506999,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG:Other :Prepared glues and other prepared adhesives not elsewhere specified or included(OLD tariff)" +3507101,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Rennet and concentrates thereof:Microbial rennet(OLD tariff) +3507109,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Rennet and concentrates thereof:Other(OLD tariff) +3507906,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other enzymes of microbial origin(OLD tariff) +3507907,"ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Enzymes for pharmaceutical use, other than streptokinase(OLD tariff)" +3507909,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other(OLD tariff) +3603001,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Safety fuses(OLD tariff) +3603003,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Percussion and detonating caps(OLD tariff) +3603004,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Igniters(OLD tariff) +3603005,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Electric detonators(OLD tariff) +3606909,FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER:Other :Other(OLD tariff) +3706101,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Feature films(OLD tariff)" +3706104,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Advertisement shots and films(OLD tariff)" +3706105,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Other children's film(OLD tariff)" +3706106,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Educational shots, and films(OLD tariff)" +3706109,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Other(OLD tariff)" +3706901,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Feature films(OLD tariff)" +3706904,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Advertisement shots and films(OLD tariff)" +3706905,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Other children's film(OLD tariff)" +3706906,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Educational shots, and films(OLD tariff)" +3706909,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Other(OLD tariff)" +3802901,"ACTIVATED CARBON; ACTIVATED NATURAL MINERAL PRODUCTS; ANIMAL BLACK, INCLUDING SPENT ANIMAL BLACK:Other:Activated natural mineral products(OLD tariff)" +3808919,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Other(OLD tariff)" +3817001,"MIXED ALKYLBENZENES AND MIXED ALKYLNAPHTHALENES, OTHER THAN THOSE OF HEADING 2707 OR 2902:Mixed alkylbenzenes and mixed alkylnaphthalenes, other than those of heading 2707 or 2902:Mixed alkylbenzenes(OLD tariff)" +3822001,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Diagnostic or laboratory reagents on a backing, prepared diagnostic or laboratory reagents whether or not on a backing, other than those of heading 3002 or 3006; certified reference materials :For medical diagnosis(OLD tariff)" +3823111,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Stearic acid :Palm stearin(OLD tariff) +3824901,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Ammoniacal gas liquors and spent oxide produced in coal gas purification, case hardening compound, heat transfer salts; mixture of diphenyl and diphenyl oxide as heat transfer medium, mixed polyethylene glycols; salts for curring or salting, surface tension reducing agents(OLD tariff)" +3824902,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Electroplating salts; water treatment chemicals; ion exchanger; correcting fluid; precipitated silica and silica gel; oil well chemical(OLD tariff)" +3824903,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixtures containing perhalogenated derivatives of acyclic hydrocarbons containing two or more different halogens other than chlorine and fluorine; ferrite powder; capacitor fluids - PCB type; dipping oil for treatment of grapes; Poly brominated biphenyls, poly chlorinated biphenyls, Poly chlorinated terphenyls, crocidolite; goods of a kind known as ""hazardous waste""; phosphogypsum(OLD tariff)" +3824991,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Ammoniacal gas liquors and spent oxide produced in coal gas purification, case hardening compound, heat transfer salts; mixture of diphenyl and diphenyl oxide as heat transfer medium, mixed polyethylene glycols; salts for curing or salting, surface tension reducing agents(OLD tariff)" +3824992,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Electroplating salts; water treatment chemicals; ion exchanger, correcting fluid; precipitated silica and silica gel; oil well chemical(OLD tariff)" +3824993,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixture containing perhalogenated derivatives of acyclic hydrocarbons containing two or more different halogens other than chlorine and fluorine; ferrite powder; capacitor fluids - PCB type; dipping oil for treatment of grapes; Poly brominated biphenyls, poly chlorinated biphenyls, Poly chlorinated terphenyls, crocidolite; goods of a kind known as ?hazardous waste?; phosphogypsum(OLD tariff)" +3912201,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Cellulose nitrates (including collodions):Non-plasticised(OLD tariff)" +3912202,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Cellulose nitrates (including collodions):Plasticised(OLD tariff)" +3912391,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Non-plasticised(OLD tariff)" +3912392,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Plasticised(OLD tariff)" +3913901,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Other :Chemical derivatives of natural rubber(OLD tariff)" +3915902,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of polymers of vinyl acetate(OLD tariff)" +3915904,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of alkyds, polyesters and epoxide resins(OLD tariff)" +3915906,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of amino resins; phenolic resins and polyurethanes(OLD tariff)" +3915907,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of cellulose and its chemical derivatives(OLD tariff)" +3916201,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of vinyl chloride:Of poly (vinyl chloride) copolymers(OLD tariff)" +3916209,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of vinyl chloride:Other(OLD tariff)" +3916902,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of phenoplast, aminoplast, alkyds and Polyesters, polyamides, polyurethanes, epoxide-resins (including waste and scrap), polypropylene and acrylic, methacylic and acrylomethacrylic polymers(OLD tariff)" +3916903,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of polymerisation and copolymerisation products of polystyrene and polymethyl methacrylate(OLD tariff)" +3920101,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Sheets of polyethylene(OLD tariff)" +3920109,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Other(OLD tariff)" +3920511,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Sheets(OLD tariff)" +3920519,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Other(OLD tariff)" +3920591,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Polyacrylte shees(OLD tariff)" +3920599,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Other(OLD tariff)" +3920691,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Packaging film(OLD tariff)" +3920692,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Sun and/or dust control film(OLD tariff)" +3920693,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Other film(OLD tariff)" +3920699,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Other(OLD tariff)" +3920711,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Cello phane transparent(OLD tariff)" +3920712,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Sheets of cellulose nitrate and celluloid, whether or not plasticized(OLD tariff)" +3920719,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Other(OLD tariff)" +3920731,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Sheet of cellulose acetate, non-plasticized(OLD tariff)" +3920732,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Sheets of cellulose acetate, plasticized(OLD tariff)" +3920739,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Other(OLD tariff)" +3920791,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Sheets of cellulose nitrate and celluloid, whether or not plasticized(OLD tariff)" +3920799,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Other(OLD tariff)" +3920911,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Of poly (vinyl butyral)(OLD tariff)" +3920921,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Poly (amide fluoride) film(OLD tariff)" +3920929,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Other(OLD tariff)" +3920991,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Plates, sheets, film, foil and strip of poly (vinyl acetate) (OLD tariff)" +3920992,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Film, sheets, strip of vinyl plastics(OLD tariff)" +3920993,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Plates, sheets, strip, film or foil of copolymers of vinyl chloride and vinyl acetate(OLD tariff)" +3920994,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Sheet of poly (tetrafluoro - ethylene) PTFE) (OLD tariff)" +3920995,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Retro reflective sheeting(OLD tariff)" +3920999,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other(OLD tariff)" +3921902,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Of polymers of vinyl chloride(OLD tariff)" +3921903,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Of regenerated cellulose(OLD tariff)" +3921909,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Other(OLD tariff)" +3926101,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Office or school supplies:Office supplies of a kind classified as stationery other than pins, clips, and writing instruments(OLD tariff)" +3926109,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Office or school supplies:Other(OLD tariff) +3926201,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Gloves(OLD tariff)" +3926202,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Aprons(OLD tariff)" +3926203,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Plastic stickers for garments(OLD tariff)" +3926204,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Collar stays, patties, butterfly, shoulder-pads and other stays(OLD tariff)" +3926209,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Other(OLD tariff)" +3926401,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Bangles(OLD tariff) +3926402,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Beads(OLD tariff) +3926403,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Statuettes(OLD tariff) +3926404,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Table and other household articles (including hotel and restaurant) for decoration(OLD tariff) +3926405,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Decorative sheets(OLD tariff) +3926409,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other(OLD tariff) +3926902,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Couplers, packing rings, O rings and the like(OLD tariff)" +3926903,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Lasts, with or without steel hinges; EVA and grape sheets for soles and heels; welts(OLD tariff)" +3926904,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Rings, buckles, tacks, washers and other decorative fittings made of plastic used as trimmings and embellishments for leather products; patterns for leather footwear, leather garments and leather goods(OLD tariff)" +3926905,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Retroreflective sheeting of other than of heading 3920(OLD tariff) +3926906,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Hangers(OLD tariff) +3926907,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Plastic or nylon tipped hammers; insulating liner of nylon, HDPE(OLD tariff)" +3926909,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other(OLD tariff) +4010000,milk and cream not concentrated nor containing added sugar or other sweetening matter +4010391,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Where the rubber compound content is less than 25% by weight(OLD tariff) +4010399,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Other(OLD tariff) +4011000,"MILK AND CREAM, NOT CONCENTRATED NOR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Of a fat content, by weight, not exceeding 1%" +4012000,"MILK AND CREAM, NOT CONCENTRATED NOR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Of a fat content, by weight, exceeding 1% but not exceeding 6%" +4012904,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Tyre flaps(OLD tariff)" +4013904,"INNER TUBES, OF RUBBER:Other :For tractors(OLD tariff)" +4014000,"MILK AND CREAM, NOT CONCENTRATED NOR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:of a fat content, by weight:Of a fat content, by weight, exceeding 6% but not exceeding 10%" +4015000,"MILK AND CREAM, NOT CONCENTRATED NOR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:of a fact content:Of a fat content, by weight, exceeding 10%" +4015909,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Other(OLD tariff)" +4020000,milk and cream concentrated or containing added sugar or other sweetening matter +4021000,milk and cream concentrated or containing added sugar or other sweetening matter >> in powder granules or other solid forms of a fat content by weight not exceeding +4021010,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:In powder, granules or other solid forms, of a fat content, by weight not exceeding 1.5% :Skimmed Milk" +4021020,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:In powder, granules or other solid forms, of a fat content, by weight not exceeding 1.5% :Milk food for babies" +4021090,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:In powder, granules or other solid forms, of a fat content, by weight not exceeding 1.5% :Other" +4022100,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Not containing added sugar or other sweetening matter" +4022900,milk and cream concentrated or containing added sugar or other sweetening matter >> other +4022910,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Whole milk" +4022920,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Milk for babies" +4022990,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Other" +4029100,milk and cream concentrated or containing added sugar or other sweetening matter >> not containing added sugar or other sweetening matter +4029110,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Not containing added sugar or other sweetening matter :Condensed milk" +4029190,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Not containing added sugar or other sweetening matter :Other" +4029900,milk and cream concentrated or containing added sugar or other sweetening matter >> other +4029910,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Whole milk" +4029920,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Condensed milk" +4029990,"MILK AND CREAM, CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Other" +4030000,kephir and other fermented or acidified milk and cream whether or not concentrated or containing added sugar or other sweetening matter or flavoured or containing added fruit nuts or cocoa +4031000,"BUTTERMILK, CURDLED MILK AND CREAM, YOGURT, KEPHIR AND OTHER FERMENTED OR ACIDIFIED MILK AND CREAM, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED OR CONTAINING ADDED FRUIT, NUTS OR COCOA::Yogurt(OLD tariff)" +4032000,"BUTTERMILK, CURDLED MILK AND CREAM, YOGURT, KEPHIR AND OTHER FERMENTED OR ACIDIFIED MILK AND CREAM, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED OR CONTAINING ADDED FRUIT, NUTS OR COCOA:yoghurt:Yogurt" +4039000,kephir and other fermented or acidified milk and cream whether or not concentrated or containing added sugar or other sweetening matter or flavoured or containing added fruit nuts or cocoa >> other +4039010,"BUTTERMILK, CURDLED MILK AND CREAM, YOGURT, KEPHIR AND OTHER FERMENTED OR ACIDIFIED MILK AND CREAM, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED OR CONTAINING ADDED FRUIT, NUTS OR COCOA:Other :Butter milk" +4039090,"BUTTERMILK, CURDLED MILK AND CREAM, YOGURT, KEPHIR AND OTHER FERMENTED OR ACIDIFIED MILK AND CREAM, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED OR CONTAINING ADDED FRUIT, NUTS OR COCOA:Other :Other" +4040000,whey whether or not concentrated or containing added sugar or other sweetening matter products consisting of natural milk or not constituents whether containing added sugar or other sweetening matter not elsewhere specified or included +4041000,whey whether or not concentrated or containing added sugar or other sweetening matter products consisting of natural milk or not constituents whether containing added sugar or other sweetening matter not elsewhere specified or included >> whey and modified whey whether or not concentrated or containing added sugar or other sweetening matter +4041010,"WHEY, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER; PRODUCTS CONSISTING OF NATURAL MILK CONSTITUENTS, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Whey and modified whey, whether or not concentrated or containing added sugar or other sweetening matter :Whey, concentrated, evaporated or condensed, liquid or semi-solid" +4041020,"WHEY, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER; PRODUCTS CONSISTING OF NATURAL MILK CONSTITUENTS, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Whey and modified whey, whether or not concentrated or containing added sugar or other sweetening matter :Whey, dry, blocks and powdered" +4041090,"WHEY, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER; PRODUCTS CONSISTING OF NATURAL MILK CONSTITUENTS, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Whey and modified whey, whether or not concentrated or containing added sugar or other sweetening matter :Other" +4049000,"WHEY, WHETHER OR NOT CONCENTRATED OR CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER; PRODUCTS CONSISTING OF NATURAL MILK CONSTITUENTS, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER, NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +4050000,butter and other fats and oils derived from milk dairy spreads +4051000,BUTTER AND OTHER FATS AND OILS DERIVED FROM MILK; DAIRY SPREADS::Butter +4052000,BUTTER AND OTHER FATS AND OILS DERIVED FROM MILK; DAIRY SPREADS::Dairy spreads(OLD tariff) +4059010,BUTTER AND OTHER FATS AND OILS DERIVED FROM MILK; DAIRY SPREADS:Other :Butter oil(OLD tariff) +4059020,BUTTER AND OTHER FATS AND OILS DERIVED FROM MILK; DAIRY SPREADS:Other :Ghee(OLD tariff) +4059090,BUTTER AND OTHER FATS AND OILS DERIVED FROM MILK; DAIRY SPREADS:Other :Other(OLD tariff) +4061000,"CHEESE AND CURD::Fresh (unripened or uncured) cheese, including whey cheese and curd(OLD tariff)" +4062000,"CHEESE AND CURD::Grated or powdered cheese, of all kinds(OLD tariff)" +4063000,CHEESE AND CURD::Processed cheese not grated or powdered(OLD tariff) +4064000,CHEESE AND CURD::Blue-veined cheese and other cheese containing veins produced by Penicillium roqueforti(OLD tariff) +4069000,CHEESE AND CURD::Other cheese(OLD tariff) +4070000,birds eggs in shell fresh preserved or cooked fertilised eggs for incubation +4071100,"BIRD'S EGGS, IN SHELL, FRESH, PRESERVED OR COOKED:of fowls of the species:Of fowls of the species Gallus domesticus" +4071900,birds eggs in shell fresh preserved or cooked fertilised eggs for incubation >> other +4071910,"BIRD'S EGGS, IN SHELL, FRESH, PRESERVED OR COOKED:Other:Of ducks" +4071990,"BIRD'S EGGS, IN SHELL, FRESH, PRESERVED OR COOKED:Other:Other" +4072100,"BIRD'S EGGS, IN SHELL, FRESH, PRESERVED OR COOKED:Other fresh eggs:Of fowls of the species Gallus domesticus(OLD tariff)" +4072900,"BIRD'S EGGS, IN SHELL, FRESH, PRESERVED OR COOKED:other:Other" +4079000,"BIRD'S EGGS, IN SHELL, FRESH, PRESERVED OR COOKED:other:Other" +4080000,birds eggs not in shell and egg yolks fresh dried cooked by steaming or by boiling in water moulded frozen or otherwise preserved whether or not containing added sugar or other sweetening matter egg yolks +4081100,"BIRD'S EGGS, NOT IN SHELL, AND EGG YOLKS, FRESH, DRIED, COOKED BY STEAMING OR BY BOILING IN WATER, MOULDED, FROZEN OR OTHERWISE PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Dried" +4081900,"BIRD'S EGGS, NOT IN SHELL, AND EGG YOLKS, FRESH, DRIED, COOKED BY STEAMING OR BY BOILING IN WATER, MOULDED, FROZEN OR OTHERWISE PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Other" +4089100,"BIRD'S EGGS, NOT IN SHELL, AND EGG YOLKS, FRESH, DRIED, COOKED BY STEAMING OR BY BOILING IN WATER, MOULDED, FROZEN OR OTHERWISE PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Dried" +4089900,"BIRD'S EGGS, NOT IN SHELL, AND EGG YOLKS, FRESH, DRIED, COOKED BY STEAMING OR BY BOILING IN WATER, MOULDED, FROZEN OR OTHERWISE PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Other" +4090000,::NATURAL HONEY +4100000,not elsewhere specified or included +4100010,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:Edible products of animal origin, not:Of wild animals(OLD tariff)" +4100020,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:Edible products of animal origin, not:Turtle eggs and Salanganes' nests (""birds' Nests"")(OLD tariff)" +4100090,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:Edible products of animal origin, not:Other(OLD tariff)" +4101000,not elsewhere specified or included >> insects +4101010,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:insects:fresh, chilled or Frozen" +4101020,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:insects:Salted, in brine, dried or smoked" +4101090,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:insects:Other" +4109000,not elsewhere specified or included >> other +4109010,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:of wild animals" +4109020,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Turtle eaggs and Sanganes' nests ('birds' nests"")" +4109090,"EDIBLE PRODUCTS OF ANIMAL ORIGIN, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:other" +4205001,OTHER ARTICLES OF LEATHER OR OF COMPOSITION LEATHER:Other articles of leather or of composition leather :Straps other than for machinery or harness(OLD tariff) +4304001,ARTIFICIAL FUR AND ARTICLES THEREOF:Artificial fur and articles thereof:Artificial fur(OLD tariff) +4403991,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Andaman Padauk (Pterocarous dalbaergiodes) Bonsum (Phoebe goalparensis) Gurgan (Dipterocarpus alatus) Khair (Acacia Catechu) Lampati (Dua banga grandiflora) Laurel (Terminalia alata) Paliwood (Palaquium Elliplicum) and Red Sanders (Pterocar pus Sautaninus) and Rose wood (Dalbergea Latifolio)(OLD tariff)" +4403992,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Sal (Chorea robusta, Sandalwood (Santalum album), Semul (Bombax ceiba), Walnut wood (Juglans binata), Anjam (Hardwickia binata), Sisso (Dalbergia sisso) and White cedar (Dysozylum spp.) and the like(OLD tariff)" +4411921,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.8 gm/cm3 :Note mechanically worked or surface covered(OLD tariff)" +4411922,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.8 gm/cm3 :Other(OLD tariff)" +4411931,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.5 gm/cm 3 but not exceeding 0.8 gm/cm 3 :Not mechanically worked or surface covered(OLD tariff)" +4411932,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.5 gm/cm 3 but not exceeding 0.8 gm/cm 3 :Other(OLD tariff)" +4411941,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Of a density not exceeding 0.5 gm/cm 3(OLD tariff)" +4411942,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Other(OLD tariff)" +4416009,"CASKS, BARRELS, VATS, TUBS AND OTHER COOPERS PRODUCTS AND PARTS THEREOF, OF WOOD, INCLUDING STAVES:Casks, barrels, vats, tubs and other cooper's products and parts thereof, of wood, including staves:Parts (of wood)(OLD tariff)" +4421901,"OTHER ARTICLES OF WOOD:Other :Spools, cops, bobbins, sewing thread reels and the like, of turned wood(OLD tariff)" +4421911,"OTHER ARTICLES OF WOOD:Of Bamboo:Spools, cops, bobbins, sewing thread reels and the like of turned wood(OLD tariff)" +4421991,"OTHER ARTICLES OF WOOD:Other:Spools, cops, bobbins, sewing thread reels and the like of turned wood(OLD tariff)" +4602191,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH:Other :Of palm leaves(OLD tariff)" +4811901,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Handmade paper and paperboard, rules lined or squared but not otherwise printed; chromo and art paper, coated, building board of paper or pulp, impregnated; chromo board; raw base paper for sensitising, coated; surface marbled paper; leather board and limitation leather board; and matrix board(OLD tariff)" +4823901,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Braile paper, cellulose in sole board or sheet; packing and wrapping paper; paper for cigarette filter tips; paper cone for loud speaker; patterns made of papers for leather footwear, leather garments and goods; patterns made of paper for articles of apparel and clothing accessories, products consisting of sheets of paper or paperboard, impregnated, coated or covered with plastics (including thermoset resins or mixtures thereof or chemical formulations, containing melamine phenol or urea formaldehyde with or without curing agents or catalysts), compressed together in one or more operations; decorative laminates(OLD tariff)" +4823902,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Pre-punched cards; monotype and newstape paper in strips with perforated edges, not exceeding 15 cm in width; typerwritting paper cut to size and the like(OLD tariff)" +5005001,"YARN SPUN FROM SILK WASTE, NOT PUT UP FOR RETAIL SALE:Yarn spun from silk waste, not put up for retail sale :Containing 85% or more by weight of silk waste(OLD tariff)" +5005002,"YARN SPUN FROM SILK WASTE, NOT PUT UP FOR RETAIL SALE:Yarn spun from silk waste, not put up for retail sale :Containing less than 85% by weight of silk(OLD tariff)" +5006001,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Silk yarn(OLD tariff)" +5006002,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Yarn spun from silk waste containing 85% or more by weight of silk(OLD tariff)" +5006003,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Yarn spun from silk waste containing 85% or less by weight of silk(OLD tariff)" +5010000,human hair unworked whether or not washed or scoured waste of human hair human hair unworked whether or not washed or scoured waste of human hair +5010010,"HUMAN HAIR, UNWORKED, WHETHER OR NOT WASHED OR SCOURED; WASTE OF HUMAN HAIR:Human hair, unworked, whether or not washed or scoured; Waste of human hair :Human hair, unworked, whether or not washed or scoured" +5010020,"HUMAN HAIR, UNWORKED, WHETHER OR NOT WASHED OR SCOURED; WASTE OF HUMAN HAIR:Human hair, unworked, whether or not washed or scoured; Waste of human hair :Waste of human hair" +5020000,pigs hogs or boars bristles and hair badger hair and other brush making hair waste of such bristles or hair +5021000,pigs hogs or boars bristles and hair badger hair and other brush making hair waste of such bristles or hair >> pigs hogs or boars bristles and hair and waste thereof +5021010,"PIGS', HOGS' OR BOARS' BRISTLES AND HAIR; BADGER HAIR AND OTHER BRUSH MAKING HAIR; WASTE OF SUCH BRISTLES OR HAIR:Pigs', hogs' or boars' bristles and hair and waste thereof:Pigs', hogs' or boars' bristles and hair" +5021020,"PIGS', HOGS' OR BOARS' BRISTLES AND HAIR; BADGER HAIR AND OTHER BRUSH MAKING HAIR; WASTE OF SUCH BRISTLES OR HAIR:Pigs', hogs' or boars' bristles and hair and waste thereof:Waste of pigs', hogs' or boars' bristles and hair" +5029000,pigs hogs or boars bristles and hair badger hair and other brush making hair waste of such bristles or hair >> other +5029010,"PIGS', HOGS' OR BOARS' BRISTLES AND HAIR; BADGER HAIR AND OTHER BRUSH MAKING HAIR; WASTE OF SUCH BRISTLES OR HAIR:Other:Badger hair and other brush making hair" +5029020,"PIGS', HOGS' OR BOARS' BRISTLES AND HAIR; BADGER HAIR AND OTHER BRUSH MAKING HAIR; WASTE OF SUCH BRISTLES OR HAIR:Other:Yak tail hair" +5029090,"PIGS', HOGS' OR BOARS' BRISTLES AND HAIR; BADGER HAIR AND OTHER BRUSH MAKING HAIR; WASTE OF SUCH BRISTLES OR HAIR:Other:Other" +5040000,guts bladders and stomachs of animals other than fish whole and pieces thereof fresh chilled frozen salted in brine dried or smoked stomachs of animals other guts bladders and than fish whole and pieces thereof fresh chilled frozen salted in brine dried or smoked +5040010,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Guts of cattle for natural food casings" +5040020,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Guts of sheep and goats for natural food casings Guts of other animals for natural food casings " +5040031,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Of wild animals" +5040039,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Other " +5040041,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Of wild animals" +5040049,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Other " +5040051,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Of wild animals" +5040059,"GUTS, BLADDERS AND STOMACHS OF ANIMALS (OTHER THAN FISH), WHOLE AND PIECES THEREOF, FRESH, CHILLED, FROZEN, SALTED, IN BRINE, DRIED OR SMOKED:Guts, bladders and stomachs of animals (other than fish), whole and pieces thereof, fresh, chilled, frozen, salted, in brine, dried or smoked:Other" +5050000,skins and other parts of birds with their feathers or down feathers and parts of feathers whether or not with trimmed edges and down not further worked than cleaned disinfected or treated for preservation powder and waste of feathers or parts of feathers +5051000,skins and other parts of birds with their feathers or down feathers and parts of feathers whether or not with trimmed edges and down not further worked than cleaned disinfected or treated for preservation powder and waste of feathers or parts of feathers >> feathers of a kind used for stuffing down +5051010,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Feathers of a kind used for stuffing; down :Of wild birds" +5051090,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Feathers of a kind used for stuffing; down :Other" +5059000,skins and other parts of birds with their feathers or down feathers and parts of feathers whether or not with trimmed edges and down not further worked than cleaned disinfected or treated for preservation powder and waste of feathers or parts of feathers >> other +5059010,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Peacock tail and wing feather (trimmed or not) " +5059021,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Of wild birds" +5059029,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Other " +5059031,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Of wild birds" +5059039,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Other " +5059091,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Of wild birds" +5059099,"SKINS AND OTHER PARTS OF BIRDS, WITH THEIR FEATHERS OR DOWN, FEATHERS AND PARTS OF FEATHERS (WHETHER OR NOT WITH TRIMMED EDGES) AND DOWN, NOT FURTHER WORKED THAN CLEANED, DISINFECTED OR TREATED FOR PRESERVATION; POWDER AND WASTE OF FEATHERS OR PARTS OF FEATHERS:Other:Other" +5060000,bones and unworked defatted simply prepared but not cut to shape treated with acid or degelatinised powder and waste of these products +5061000,bones and unworked defatted simply prepared but not cut to shape treated with acid or degelatinised powder and waste of these products >> ossein and bones treated with acid bones including crushed +5061011,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Of wild animals" +5061019,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Other " +5061021,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Of wild animals" +5061029,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Other" +5061031,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Of wild animals" +5061039,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Other" +5061041,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Of wild animals" +5061049,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Ossein and bones treated with acid :Other" +5069000,bones and unworked defatted simply prepared but not cut to shape treated with acid or degelatinised powder and waste of these products >> other bone meal +5069011,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Other :Of wild animals" +5069019,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Other :Other " +5069091,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Other :Of wild animals" +5069099,"BONES AND HORN-CORES, UNWORKED, DEFATTED, SIMPLY PREPARED (BUT NOT CUT TO SHAPE), TREATED WITH ACID OR DEGELATINISED; POWDER AND WASTE OF THESE PRODUCTS:Other :Other" +5070000,ivory whalebone and whalebone hair horns antlers hooves nails claws and beaks unworked or simply prepared but not cut to shape powder and waste of these products +5071000,ivory whalebone and whalebone hair horns antlers hooves nails claws and beaks unworked or simply prepared but not cut to shape powder and waste of these products >> ivory ivory powder and waste +5071010,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Ivory; ivory powder and waste :Ivory" +5071020,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Ivory; ivory powder and waste :Ivory powder and waste" +5079000,ivory whalebone and whalebone hair horns antlers hooves nails claws and beaks unworked or simply prepared but not cut to shape powder and waste of these products >> other +5079010,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Hoof meal" +5079020,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Horn meal" +5079030,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Hooves, claws, nails and beaks" +5079040,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Antlers" +5079050,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Buffalo horns" +5079060,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Tortoise-shell" +5079070,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Claws and waste of tortoise shell" +5079090,"IVORY, TORTOISE-SHELL, WHALEBONE AND WHALEBONE HAIR, HORNS, ANTLERS, HOOVES, NAILS, CLAWS AND BEAKS, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE; POWDER AND WASTE OF THESE PRODUCTS:Other :Other" +5080000,coral and similar materials unworked or simply prepared but not otherwise worked shells of crustaceans molluscs or echinoderms and unworked or simply prepared but not cut to shape powder and waste thereof coral and similar materials unworked or simply prepared but not otherwise worked shells of molluscs crustaceans or echinoderms and unworked or simply prepared but not cut to shape powder and waste thereof +5080010,"CORAL AND SIMILAR MATERIALS, UNWORKED OR SIMPLY PREPARED BUT NOT OTHERWISE WORKED; SHELLS OF MOLLUSCS, CRUSTACEANS OR ECHINODERMS AND CUTTLE-BONE, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE, POWDER AND WASTE THEREOF:Coral and similar materials, unworked or simply prepared but not otherwise worked; shells of molluscs, crustaceans or echinoderms and cuttle-bone, unworked or simply prepared but not cut to shape, powder and waste thereof :Coral" +5080020,"CORAL AND SIMILAR MATERIALS, UNWORKED OR SIMPLY PREPARED BUT NOT OTHERWISE WORKED; SHELLS OF MOLLUSCS, CRUSTACEANS OR ECHINODERMS AND CUTTLE-BONE, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE, POWDER AND WASTE THEREOF:Coral and similar materials, unworked or simply prepared but not otherwise worked; shells of molluscs, crustaceans or echinoderms and cuttle-bone, unworked or simply prepared but not cut to shape, powder and waste thereof :Chanks" +5080030,"CORAL AND SIMILAR MATERIALS, UNWORKED OR SIMPLY PREPARED BUT NOT OTHERWISE WORKED; SHELLS OF MOLLUSCS, CRUSTACEANS OR ECHINODERMS AND CUTTLE-BONE, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE, POWDER AND WASTE THEREOF:Coral and similar materials, unworked or simply prepared but not otherwise worked; shells of molluscs, crustaceans or echinoderms and cuttle-bone, unworked or simply prepared but not cut to shape, powder and waste thereof :Cowries" +5080040,"CORAL AND SIMILAR MATERIALS, UNWORKED OR SIMPLY PREPARED BUT NOT OTHERWISE WORKED; SHELLS OF MOLLUSCS, CRUSTACEANS OR ECHINODERMS AND CUTTLE-BONE, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE, POWDER AND WASTE THEREOF:Coral and similar materials, unworked or simply prepared but not otherwise worked; shells of molluscs, crustaceans or echinoderms and cuttle-bone, unworked or simply prepared but not cut to shape, powder and waste thereof :Cuttlefish bones" +5080050,"CORAL AND SIMILAR MATERIALS, UNWORKED OR SIMPLY PREPARED BUT NOT OTHERWISE WORKED; SHELLS OF MOLLUSCS, CRUSTACEANS OR ECHINODERMS AND CUTTLE-BONE, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE, POWDER AND WASTE THEREOF:Coral and similar materials, unworked or simply prepared but not otherwise worked; shells of molluscs, crustaceans or echinoderms and cuttle-bone, unworked or simply prepared but not cut to shape, powder and waste thereof :Shells" +5080090,"CORAL AND SIMILAR MATERIALS, UNWORKED OR SIMPLY PREPARED BUT NOT OTHERWISE WORKED; SHELLS OF MOLLUSCS, CRUSTACEANS OR ECHINODERMS AND CUTTLE-BONE, UNWORKED OR SIMPLY PREPARED BUT NOT CUT TO SHAPE, POWDER AND WASTE THEREOF:Coral and similar materials, unworked or simply prepared but not otherwise worked; shells of molluscs, crustaceans or echinoderms and cuttle-bone, unworked or simply prepared but not cut to shape, powder and waste thereof :Other" +5100000,ambergris castoreum civet and musk cantharides bile whether or not dried glands and other animal products used in the preparation of pharmaceutical products fresh chilled frozen or otherwise provisionally preserved ambergris castoreum civet and musk cantharides bile whether or not dried glands and other animal products used in the preparation of pharmaceutical products fresh chilled frozen or otherwise provisionally preserved +5100010,"AMBERGRIS, CASTOREUM, CIVET AND MUSK; CANTHARIDES; BILE, WHETHER OR NOT DRIED; GLANDS AND OTHER ANIMAL PRODUCTS USED IN THE PREPARATION OF PHARMACEUTICAL PRODUCTS, FRESH, CHILLED, FROZEN OR OTHERWISE PROVISIONALLY PRESERVED:Ambergris, castoreum, civet and musk; cantharides; bile, whether or not dried; glands and other animal products used in the preparation of pharmaceutical products, fresh, chilled, frozen or otherwise provisionally preserved :Bezoar, cow (goolochan)" +5100020,"AMBERGRIS, CASTOREUM, CIVET AND MUSK; CANTHARIDES; BILE, WHETHER OR NOT DRIED; GLANDS AND OTHER ANIMAL PRODUCTS USED IN THE PREPARATION OF PHARMACEUTICAL PRODUCTS, FRESH, CHILLED, FROZEN OR OTHERWISE PROVISIONALLY PRESERVED:Ambergris, castoreum, civet and musk; cantharides; bile, whether or not dried; glands and other animal products used in the preparation of pharmaceutical products, fresh, chilled, frozen or otherwise provisionally preserved :Ox Gallstone" +5100030,"AMBERGRIS, CASTOREUM, CIVET AND MUSK; CANTHARIDES; BILE, WHETHER OR NOT DRIED; GLANDS AND OTHER ANIMAL PRODUCTS USED IN THE PREPARATION OF PHARMACEUTICAL PRODUCTS, FRESH, CHILLED, FROZEN OR OTHERWISE PROVISIONALLY PRESERVED:Ambergris, castoreum, civet and musk; cantharides; bile, whether or not dried; glands and other animal products used in the preparation of pharmaceutical products, fresh, chilled, frozen or otherwise provisionally preserved :Placenta, frozen " +5100091,"AMBERGRIS, CASTOREUM, CIVET AND MUSK; CANTHARIDES; BILE, WHETHER OR NOT DRIED; GLANDS AND OTHER ANIMAL PRODUCTS USED IN THE PREPARATION OF PHARMACEUTICAL PRODUCTS, FRESH, CHILLED, FROZEN OR OTHERWISE PROVISIONALLY PRESERVED:Ambergris, castoreum, civet and musk; cantharides; bile, whether or not dried; glands and other animal products used in the preparation of pharmaceutical products, fresh, chilled, frozen or otherwise provisionally preserved :Of wild animals" +5100099,"AMBERGRIS, CASTOREUM, CIVET AND MUSK; CANTHARIDES; BILE, WHETHER OR NOT DRIED; GLANDS AND OTHER ANIMAL PRODUCTS USED IN THE PREPARATION OF PHARMACEUTICAL PRODUCTS, FRESH, CHILLED, FROZEN OR OTHERWISE PROVISIONALLY PRESERVED:Ambergris, castoreum, civet and musk; cantharides; bile, whether or not dried; glands and other animal products used in the preparation of pharmaceutical products, fresh, chilled, frozen or otherwise provisionally preserved :Other" +5110000,animal products not elsewhere specified or included dead animals of chapter 1 or 3 unfit for human consumption +5111000,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION::Bovine semen" +5119100,animal products not elsewhere specified or included dead animals of chapter 1 or 3 unfit for human consumption >> products of fish or crustaceans molluscs or other aquatic invertebrates dead animals of chapter 3 +5119110,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Products of fish or crustaceans, molluscs or other aquatic invertebrates; dead animals of Chapter 3 :Fish nails" +5119120,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Products of fish or crustaceans, molluscs or other aquatic invertebrates; dead animals of Chapter 3 :Fish tails" +5119130,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Products of fish or crustaceans, molluscs or other aquatic invertebrates; dead animals of Chapter 3 :Other fish waste" +5119140,animal products not elsewhere specified or included dead animals of chapter 1 or 3 unfit for human consumption >> products of fish or crustaceans molluscs or other aquatic invertebrates dead animals of chapter 3 >> artemia cyst +5119190,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Products of fish or crustaceans, molluscs or other aquatic invertebrates; dead animals of Chapter 3 :Other" +5119900,animal products not elsewhere specified or included dead animals of chapter 1 or 3 unfit for human consumption >> other +5119910,animal products not elsewhere specified or included dead animals of chapter 1 or 3 unfit for human consumption >> other >> silkworm pupae sinews and tendons +5119911,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Artemia(OLD tariff)" +5119919,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Other (OLD tariff)" +5119921,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Of wild life" +5119929,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Other " +5119991,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Frozen semen, other than bovine; bovine embryo" +5119992,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Of wild life" +5119999,"ANIMAL PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED; DEAD ANIMALS OF CHAPTER 1 OR 3, UNFIT FOR HUMAN CONSUMPTION:Other :Other" +5201001,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Indian cotton(OLD tariff)" +5209111,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Handloom(OLD tariff)" +5211209,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Other(OLD tariff)" +5310101,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Containing 100% by weight of jute(OLD tariff) +5310109,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Other(OLD tariff) +5310909,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Other(OLD tariff) +5311001,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Of other vegetable textile fibres(OLD tariff) +5311002,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Of paper yarn(OLD tariff) +5403491,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Cuprammonium filament yarn(OLD tariff)" +5407101,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Unbleached(OLD tariff)" +5407102,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Bleached(OLD tariff)" +5407103,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Dyed(OLD tariff)" +5407104,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Printed(OLD tariff)" +5407109,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other(OLD tariff)" +5407411,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Unbleached(OLD tariff)" +5407412,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Bleached(OLD tariff)" +5407511,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Unbleached(OLD tariff)" +5407512,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Bleached(OLD tariff)" +5407811,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Unbleached(OLD tariff)" +5407812,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Bleached(OLD tariff)" +5408221,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Fabrics of rayon(OLD tariff)" +5408241,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Of rayon(OLD tariff)" +5408321,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Fabrics of rayon(OLD tariff)" +5408341,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Fabric of rayon(OLD tariff)" +5702502,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Of man-made textile materials(OLD tariff)" +5705001,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Carpets(OLD tariff)" +5705002,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Durries(OLD tariff)" +5705003,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Of jute(OLD tariff)" +5705004,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Carpets, carpetting, rugs, mats and mattings(OLD tariff)" +5803001,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Of cotton(OLD tariff)" +5803009,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Of other textile materials(OLD tariff)" +5907001,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :cloths or the like - Fabrics covered partially or fully with textile flocks, or with preparation containing textile flocks(OLD tariff)" +5907009,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :cloths or the like - Other(OLD tariff)" +6010000,bulbs tubers tuberous roots corms crowns and rhizomes dormant in growth or in flower chicory plants and roots other than roots of heading 1212 +6011000,"BULBS, TUBERS, TUBEROUS ROOTS, CORMS, CROWNS AND RHIZOMES, DORMANT, IN GROWTH OR IN FLOWER; CHICORY PLANTS AND ROOTS OTHER THAN ROOTS OF HEADING 1212::Bulbs, tubers, tuberous roots, corms, crowns and rhizomes, dormant" +6012000,bulbs tubers tuberous roots corms crowns and rhizomes dormant in growth or in flower chicory plants and roots other than roots of heading 1212 >> bulbs tubers tuberous roots corms crowns and rhizomes in growth or in flower chicory plants and roots +6012010,"BULBS, TUBERS, TUBEROUS ROOTS, CORMS, CROWNS AND RHIZOMES, DORMANT, IN GROWTH OR IN FLOWER; CHICORY PLANTS AND ROOTS OTHER THAN ROOTS OF HEADING 1212:Bulbs, tubers, tuberous roots, corms, crowns and rhizomes, in growth or in flower; chicory plants and roots :Bulbs, horticultural " +6012021,"BULBS, TUBERS, TUBEROUS ROOTS, CORMS, CROWNS AND RHIZOMES, DORMANT, IN GROWTH OR IN FLOWER; CHICORY PLANTS AND ROOTS OTHER THAN ROOTS OF HEADING 1212:Bulbs, tubers, tuberous roots, corms, crowns and rhizomes, in growth or in flower; chicory plants and roots :Plants" +6012022,"BULBS, TUBERS, TUBEROUS ROOTS, CORMS, CROWNS AND RHIZOMES, DORMANT, IN GROWTH OR IN FLOWER; CHICORY PLANTS AND ROOTS OTHER THAN ROOTS OF HEADING 1212:Bulbs, tubers, tuberous roots, corms, crowns and rhizomes, in growth or in flower; chicory plants and roots :Roots" +6012090,"BULBS, TUBERS, TUBEROUS ROOTS, CORMS, CROWNS AND RHIZOMES, DORMANT, IN GROWTH OR IN FLOWER; CHICORY PLANTS AND ROOTS OTHER THAN ROOTS OF HEADING 1212:Bulbs, tubers, tuberous roots, corms, crowns and rhizomes, in growth or in flower; chicory plants and roots :Other" +6020000,other live plants including their roots cuttings and slips mushroom spawn other live plants including their roots cuttings and slips mushroom spawn other live plants including their roots cuttings and slips mushroom spawn +6021000,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN::Unrooted cuttings and slips" +6022000,other live plants including their roots cuttings and slips mushroom spawn other live plants including their roots cuttings and slips mushroom spawn other live plants including their roots cuttings and slips mushroom spawn >> trees shrubs and bushes grafted or not of kinds which bear edible fruits or nuts trees shrubs and bushes grafted or not of kinds which bear edible fruits or nuts trees shrubs and bushes grafted or not of kinds which bear edible fruits or nuts +6022010,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Trees, shrubs and bushes, grafted or not, of kinds, which bear edible fruits or nuts :Edible fruit or nut trees, grafted or not" +6022020,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Trees, shrubs and bushes, grafted or not, of kinds, which bear edible fruits or nuts :Cactus" +6022090,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Trees, shrubs and bushes, grafted or not, of kinds, which bear edible fruits or nuts :Other" +6023000,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN::Rhododendrons and azaleas, grafted or not" +6024000,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN::Roses, grafted or not" +6029000,other live plants including their roots cuttings and slips mushroom spawn other live plants including their roots cuttings and slips mushroom spawn other live plants including their roots cuttings and slips mushroom spawn >> other other other +6029010,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Other:Mushroom spawn" +6029020,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Other:Flowering plants (excluding roses and rhododendrons)" +6029030,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Other:Tissue culture plant" +6029090,"OTHER LIVE PLANTS (INCLUDING THEIR ROOTS), CUTTINGS AND SLIPS; MUSHROOM SPAWN:Other:Other" +6030000,cut flowers and flower buds of a kind suitable for bouquets or for ornamental purposes fresh dried dyed bleached impregnated or otherwise prepared fresh cut flowers and flower buds of a kind suitable for bouquets or for ornamental purposes fresh dried dyed bleached impregnated or otherwise prepared fresh cut flowers and flower buds of a kind suitable for bouquets or for ornamental purposes fresh dried dyed bleached impregnated or otherwise prepared fresh +6031100,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED::Roses" +6031200,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED::Carnations" +6031300,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED::Orchids" +6031400,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED::Chrysanthemums" +6031500,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED:Lilies:Lilies (Lilium spp.)" +6031900,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED::Other" +6039000,"CUT FLOWERS AND FLOWER BUDS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED::Other" +6040000,foliage branches and other parts of plants without flowers or flower buds and grasses mosses and lichens being goods of a kind suitable for bouquets or for ornamental purposes fresh dried dyed bleached impregnated or otherwise prepared +6042000,"FOLIAGE, BRANCHES AND OTHER PARTS OF PLANTS, WITHOUT FLOWERS OR FLOWER BUDS, AND GRASSES, MOSSES AND LICHENS, BEING GOODS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED:fresh:Fresh" +6049000,"FOLIAGE, BRANCHES AND OTHER PARTS OF PLANTS, WITHOUT FLOWERS OR FLOWER BUDS, AND GRASSES, MOSSES AND LICHENS, BEING GOODS OF A KIND SUITABLE FOR BOUQUETS OR FOR ORNAMENTAL PURPOSES, FRESH, DRIED, DYED, BLEACHED, IMPREGNATED OR OTHERWISE PREPARED:Other:Other" +6202991,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Of silk(OLD tariff)" +6204191,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +6204291,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +6204391,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +6204491,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +6204691,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +6207991,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of silk(OLD tariff)" +6207992,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of wool(OLD tariff)" +6214902,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Chadars, cotton(OLD tariff)" +6214903,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Odhani, cotton(OLD tariff)" +6302210,"Bed linen, table linen, toilet linen and kitchen linen:of cotton:Other bed linen, printed(OLD tariff)" +6307901,"Other made-up articles, including dress patterns:Other :Dress materials hand printed(OLD tariff)" +6403201,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :All leather, closed toe(OLD tariff)" +6403202,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :All leather, open toe(OLD tariff)" +6403511,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :All leather shoes(OLD tariff)" +6406910,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF::Other(OLD tariff)" +6812921,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Paper, millboard and felt:Millboard(OLD tariff)" +6812991,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Lagging compounds(OLD tariff)" +6812992,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Asbestos packing joints and gaskets(OLD tariff)" +6911101,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Tableware and kitchenware:Tableware(OLD tariff)" +6911102,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Tableware and kitchenware:Kitchenware(OLD tariff)" +7004201,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Glass, coloured throughout the mass (body tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Window glass (sheet glass)(OLD tariff)" +7004209,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Glass, coloured throughout the mass (body tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Other(OLD tariff)" +7004901,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other glass :Window glass (sheet glass)(OLD tariff)" +7004909,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other glass :Other(OLD tariff)" +7010000,potatoes fresh or chilled +7011000,"POTATOES, FRESH OR CHILLED::Seed" +7019000,"POTATOES, FRESH OR CHILLED::Other" +7020000,"::TOMATOES, FRESH OR CHILLED" +7020001,"OTHER ARTICLES OF GLASS:Other articles of glass:Glass shells, glass globes and glass founts(OLD tariff)" +7020002,OTHER ARTICLES OF GLASS:Other articles of glass:Glass chimneys(OLD tariff) +7030000,onions shallots garlic leeks and other alliaceous vegetables fresh or chilled +7031000,onions shallots garlic leeks and other alliaceous vegetables fresh or chilled >> onions and shallots onions +7031010,"ONIONS, SHALLOTS, GARLIC, LEEKS AND OTHER ALLIACEOUS VEGETABLES, FRESH OR CHILLED:Onions and shallots:Onions(OLD tariff)" +7031011,onions shallots garlic leeks and other alliaceous vegetables fresh or chilled >> onions and shallots onions >> rose onion +7031019,onions shallots garlic leeks and other alliaceous vegetables fresh or chilled >> onions and shallots onions >> other +7031020,"ONIONS, SHALLOTS, GARLIC, LEEKS AND OTHER ALLIACEOUS VEGETABLES, FRESH OR CHILLED:Onions and shallots:Shallots" +7031090,"ONIONS, SHALLOTS, GARLIC, LEEKS AND OTHER ALLIACEOUS VEGETABLES, FRESH OR CHILLED:Onions and shallots:Other(OLD tariff)" +7032000,"ONIONS, SHALLOTS, GARLIC, LEEKS AND OTHER ALLIACEOUS VEGETABLES, FRESH OR CHILLED::Garlic" +7039000,"ONIONS, SHALLOTS, GARLIC, LEEKS AND OTHER ALLIACEOUS VEGETABLES, FRESH OR CHILLED::Leeks and other alliaceous vegetables" +7040000,cabbages cauliflowers kohlrabi kale and similar edible brassicas fresh or chilled +7041000,"CABBAGES, CAULIFLOWERS, KOHLRABI, KALE AND SIMILAR EDIBLE BRASSICAS, FRESH OR CHILLED::Cauliflowers and headed broccoli" +7042000,"CABBAGES, CAULIFLOWERS, KOHLRABI, KALE AND SIMILAR EDIBLE BRASSICAS, FRESH OR CHILLED::Brussels sprouts" +7049000,"CABBAGES, CAULIFLOWERS, KOHLRABI, KALE AND SIMILAR EDIBLE BRASSICAS, FRESH OR CHILLED::Other" +7050000,lettuce lactucasativa and chicory cichorium spp fresh or chilled lettuce lettuce lactucasativa and chicory cichorium spp fresh or chilled lettuce +7051100,"LETTUCE (LACTUCAS SATIVA) AND CHICORY (CICHORIUM SPP. ), FRESH OR CHILLED::Cabbage lettuce (head lettuce)" +7051900,"LETTUCE (LACTUCAS SATIVA) AND CHICORY (CICHORIUM SPP. ), FRESH OR CHILLED::Other " +7052100,"LETTUCE (LACTUCAS SATIVA) AND CHICORY (CICHORIUM SPP. ), FRESH OR CHILLED::Witloof chicory (Cichorium intybus var. foliosum)" +7052900,"LETTUCE (LACTUCAS SATIVA) AND CHICORY (CICHORIUM SPP. ), FRESH OR CHILLED::Other" +7060000,carrots turnips salad beetroot salsify celeriac radishes and similar edible roots fresh or chilled carrots turnips salad beetroot salsify celeriac radishes and similar edible roots fresh or chilled +7061000,"CARROTS, TURNIPS, SALAD BEETROOT, SALSIFY, CELERIAC, RADISHES AND SIMILAR EDIBLE ROOTS, FRESH OR CHILLED::Carrots and turnips" +7069000,carrots turnips salad beetroot salsify celeriac radishes and similar edible roots fresh or chilled carrots turnips salad beetroot salsify celeriac radishes and similar edible roots fresh or chilled >> other other +7069010,"CARROTS, TURNIPS, SALAD BEETROOT, SALSIFY, CELERIAC, RADISHES AND SIMILAR EDIBLE ROOTS, FRESH OR CHILLED:Other :Horse radish" +7069020,"CARROTS, TURNIPS, SALAD BEETROOT, SALSIFY, CELERIAC, RADISHES AND SIMILAR EDIBLE ROOTS, FRESH OR CHILLED:Other :Other Radish" +7069030,"CARROTS, TURNIPS, SALAD BEETROOT, SALSIFY, CELERIAC, RADISHES AND SIMILAR EDIBLE ROOTS, FRESH OR CHILLED:Other :Salad beetroot" +7069090,"CARROTS, TURNIPS, SALAD BEETROOT, SALSIFY, CELERIAC, RADISHES AND SIMILAR EDIBLE ROOTS, FRESH OR CHILLED:Other :Other" +7070000,"::CUCUMBERS OR GHERKINS, FRESH OR CHILLED" +7080000,leguminous vegetables shelled or unshelled fresh or chilled leguminous vegetables shelled or unshelled fresh or chilled +7081000,"LEGUMINOUS VEGETABLES, SHELLED OR UNSHELLED, FRESH OR CHILLED::Peas (Pisum sativum)" +7082000,"LEGUMINOUS VEGETABLES, SHELLED OR UNSHELLED, FRESH OR CHILLED::Beans (Vigna spp., Phaseolus spp.)" +7089000,"LEGUMINOUS VEGETABLES, SHELLED OR UNSHELLED, FRESH OR CHILLED::Other leguminous vegetables" +7090000,other vegetables fresh or chilled other vegetables fresh or chilled +7092000,"OTHER VEGETABLES, FRESH OR CHILLED::Asparagus" +7093000,"OTHER VEGETABLES, FRESH OR CHILLED::Aubergines (egg-plants)" +7094000,"OTHER VEGETABLES, FRESH OR CHILLED::Celery other than celeraic " +7095100,"OTHER VEGETABLES, FRESH OR CHILLED::Mushrooms of the genus agaricus" +7095200,"OTHER VEGETABLES, FRESH OR CHILLED:Mushrooms of the genus Boletus:Mushrooms of the genus Boletus" +7095300,"OTHER VEGETABLES, FRESH OR CHILLED:Mushroom:Mushrooms of the genus Cantharellus" +7095400,"OTHER VEGETABLES, FRESH OR CHILLED:Shiitake (Lentinus edodes):Shiitake (Lentinus edodes)" +7095500,"OTHER VEGETABLES, FRESH OR CHILLED:matsutake:Matsutake (Tricholoma matsutake, Tricholoma magnivelare, Tricholoma anatolicum, Tricholoma dulciolens, Tricholoma caligatum)" +7095600,"OTHER VEGETABLES, FRESH OR CHILLED:Truffles (Tuber spp.):Truffles (Tuber spp.)" +7095900,"OTHER VEGETABLES, FRESH OR CHILLED::Other" +7096000,other vegetables fresh or chilled other vegetables fresh or chilled >> fruits of the genus capsicum or of the genus pimenta fruits of the genus capsicum or of the genus pimenta +7096010,"OTHER VEGETABLES, FRESH OR CHILLED:Fruits of the genus Capsicum or of the genus pimenta :Green chilly" +7096090,"OTHER VEGETABLES, FRESH OR CHILLED:Fruits of the genus Capsicum or of the genus pimenta :Other" +7097000,"OTHER VEGETABLES, FRESH OR CHILLED::Spinach, New Zealand spinach and orache spinach (garden spinach)" +7099100,"OTHER VEGETABLES, FRESH OR CHILLED:other testing:Glove artichokes" +7099200,"OTHER VEGETABLES, FRESH OR CHILLED:other:Olives" +7099300,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):Pumpkins, squash and guards (Cucurbita spp.)" +7099310,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):pumpkins" +7099320,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):squash" +7099330,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):Bitter guard" +7099340,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):Bottle guard" +7099350,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):Snake guard" +7099360,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):Coccinia (Kundru)" +7099390,"OTHER VEGETABLES, FRESH OR CHILLED:Pumpkins, squash and guards (Cucurbita spp.):Other" +7099900,other vegetables fresh or chilled other vegetables fresh or chilled >> other other +7099910,"OTHER VEGETABLES, FRESH OR CHILLED:Other:Green Pepper" +7099920,"OTHER VEGETABLES, FRESH OR CHILLED:Other:Mixed Vegetables(OLD tariff)" +7099930,"OTHER VEGETABLES, FRESH OR CHILLED:Other:Okra/Lady finger (Bhindi)" +7099990,"OTHER VEGETABLES, FRESH OR CHILLED:Other:other" +7100000,vegetables uncooked or cooked by steaming or boiling in water frozen vegetables uncooked or cooked by steaming or boiling in water frozen +7101000,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Potatoes " +7102100,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Peas (Pisum sativum)" +7102200,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Beans (Vigna spp., Phaseolus spp.)" +7102900,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Other" +7103000,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Spinach, New Zealand spinach and orache spinach (garden spinach)" +7103101,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Precious stones(OLD tariff)" +7103102,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Semi-precious stones(OLD tariff)" +7104000,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Sweet corn" +7108000,vegetables uncooked or cooked by steaming or boiling in water frozen vegetables uncooked or cooked by steaming or boiling in water frozen >> other vegetables other vegetables +7108010,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN:Other vegetables:Terragon" +7108090,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN:Other vegetables:Other" +7109000,"VEGETABLES (UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER), FROZEN::Mixtures of vegetables" +7110000,unsuitable in that state for immediate consumption unsuitable in that state for immediate consumption +7112000,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION::Olives" +7114000,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION::Cucumbers and gherkins " +7115100,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION::Mushrooms of the gensus agaricus" +7115900,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION::Other" +7119000,unsuitable in that state for immediate consumption unsuitable in that state for immediate consumption >> other vegetables mixtures of vegetables other vegetables mixtures of vegetables +7119010,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION:Other vegetables; mixtures of vegetables:Green pepper in brine" +7119020,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION:Other vegetables; mixtures of vegetables:Assorted canned vegetables" +7119090,"VEGETABLES PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION:Other vegetables; mixtures of vegetables:Other" +7120000,dried vegetables whole cut sliced broken or in powder but not further dried vegetables whole cut sliced broken or in powder but not further prepared +7122000,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED::Onions " +7123100,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED::Mushrooms of the genus lgaricus" +7123200,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED::Wood ears (Auricularia spp.)" +7123300,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED::Jelly fungi (Tremella spp.)" +7123400,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Shittake:Shiitake (Lentinus edodes)" +7123900,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED::Other" +7129000,dried vegetables whole cut sliced broken or in powder but not further dried vegetables whole cut sliced broken or in powder but not further prepared >> other vegetables mixtures of vegetables +7129010,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Asparagus" +7129020,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Dehydrated garlic powder" +7129030,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Dehydrated garlic flakes" +7129040,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Dried garlic" +7129050,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Marjoram, Oregano" +7129060,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Potatoes" +7129090,"DRIED VEGETABLES, WHOLE, CUT, SLICED, BROKEN OR IN POWDER, BUT NOT FURTHER PREPARED:Other vegetables; mixtures of vegetables:Other" +7130000,dried leguminous vegetables shelled whether or not skinned or split +7131000,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Peas (PISUM SATIVUM):Peas (Pisum sativum)" +7131010,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Peas (PISUM SATIVUM):Yellow Peas" +7131020,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Peas (PISUM SATIVUM):Green Peas" +7131090,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Peas (PISUM SATIVUM):Other" +7132000,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Chikpeas:Chickpeas (garbanzos) " +7132010,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Chikpeas:kabuli Channa" +7132020,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Chikpeas:Bengal gram (desi channa)" +7132090,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Chikpeas:Other" +7133100,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Beans of the species Vigna mungo (L.) Hepper or Vigna radiata (L.) Wilczek" +7133110,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:BEANS OF THE SPECIES VIGNA MUNGO (L)HEPPER" +7133190,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:BEANS OF THE SPECIES VIGNA RADIATA (L) WILCZEK" +7133200,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT::Small red (Adzuki) beans (Phaseolus or Vigna angularis)" +7133300,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT::Kidney beans, including white pea beans (Phaseolus vulgaris)" +7133400,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Bambara beans:Bambara beans (Vigna subterranea or Voandzeia subterranea)" +7133500,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Cow peas:Cow peas (Vigna unguiculata)" +7133900,dried leguminous vegetables shelled whether or not skinned or split >> other +7133910,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Other :Guar seeds" +7133990,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Other :Other" +7134000,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT::Lentils" +7135000,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT::Broad beans (Vicia faba var major) and horse beans (Vicia faba var equina, Vicia faba var minor)" +7136000,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Pigeon peas:Pigeon peas (Cajanus cajan)" +7139000,dried leguminous vegetables shelled whether or not skinned or split >> other +7139010,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Other :Split" +7139090,"DRIED LEGUMINOUS VEGETABLES, SHELLED, WHETHER OR NOT SKINNED OR SPLIT:Other :Other" +7140000,manioc arrowroot salep jerusalem artichokes sweet potatoes and similar roots and tubers with high starch or inulin content fresh chilled frozen or dried whether or not sliced or in the form of pellets sago pith +7141000,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH::Manioc (cassava)" +7142000,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH::Sweet potatoes" +7143000,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH:Yams:Yams (Dioscorea spp.)" +7144000,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH:Taro:Taro (Colocasia spp.)" +7145000,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH:Yautia:Yautia (Xanthosoma spp.)" +7149000,manioc arrowroot salep jerusalem artichokes sweet potatoes and similar roots and tubers with high starch or inulin content fresh chilled frozen or dried whether or not sliced or in the form of pellets sago pith >> other +7149010,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH:Other:Sago pith" +7149090,"MANIOC, ARROWROOT, SALEP, JERUSALEM ARTICHOKES, SWEET POTATOES AND SIMILAR ROOTS AND TUBERS WITH HIGH STARCH OR INULIN CONTENT, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT SLICED OR IN THE FORM OF PELLETS; SAGO PITH:Other:Other" +7202991,"FERRO-ALLOYS:Other :Ferro-phosphorus, Ferro-selenium, Ferro-cobalt, Ferro-columbium, Ferro-zirconium, Ferro-tantalum(OLD tariff)" +7202992,"FERRO-ALLOYS:Other :Ferro-silico-zirconium, Ferro-silico-magnesium(OLD tariff)" +7202993,"FERRO-ALLOYS:Other :Ferro-boron, charge-chrome(OLD tariff)" +7205101,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Of iron(OLD tariff)" +7205102,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Of alloy steel(OLD tariff)" +7206901,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Of iron(OLD tariff) +7206909,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Other(OLD tariff) +7211901,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Universal plates(OLD tariff)" +7217901,WIRE OF IRON OR NON-ALLOY STEEL:Other :Shaped and profiled wire(OLD tariff) +7217909,WIRE OF IRON OR NON-ALLOY STEEL:Other :Other(OLD tariff) +7219111,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Chromium type(OLD tariff)" +7219211,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Universal plates of stainless steel or heat resisting steel and chromium type(OLD tariff)" +7219212,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Universal plates of stainless steel heat resisting steel, nickel chromium austenitic type(OLD tariff)" +7219213,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Other chromium type(OLD tariff)" +7219214,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Other nickel chromium austenitic type(OLD tariff)" +7219221,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Universal plates of stainless steel or heat resisting steel(OLD tariff)" +7219229,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Other(OLD tariff)" +7219241,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Chromium type, of a thickness(OLD tariff)" +7219242,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Nickel chromium austenitic type, of a thickness(OLD tariff)" +7219311,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Chromium type(OLD tariff)" +7219312,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Nickel chromium austenitic type(OLD tariff)" +7219901,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Other :Sheets and plates(OLD tariff)" +7220122,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of less than 4.75 mm :Strips for pipes and tubes (other than skelp)(OLD tariff)" +7220202,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :rolled (cold- reduced) - Strips for pipes and tubes (other than skelp)(OLD tariff)" +7220902,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Strips for pipes and tubes (other than skelp)(OLD tariff)" +7222111,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Bright bars(OLD tariff)" +7222119,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Other(OLD tariff)" +7222191,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Bright bars(OLD tariff)" +7222199,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Other(OLD tariff)" +7222201,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Bright bars(OLD tariff)" +7222209,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Other(OLD tariff)" +7222301,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Bright bars(OLD tariff)" +7222309,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Other(OLD tariff)" +7223009,WIRE OF STAINLESS STEEL:Wire of stainless steel :Other(OLD tariff) +7224909,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Other(OLD tariff) +7225401,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :Of a thickness of above 4.75 mm(OLD tariff)" +7226201,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :Hot-rolled(OLD tariff)" +7226202,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :Cold-rolled(OLD tariff)" +7226995,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Hoops and strips(OLD tariff)" +7228301,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Bright bars(OLD tariff)" +7228302,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Other(OLD tariff)" +7228601,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Bright bars(OLD tariff)" +7228609,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Other(OLD tariff)" +7228701,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Angles, shapes and sections :Not further worked than hot-rolled, hot-drawn or extruded(OLD tariff)" +7228702,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Angles, shapes and sections :Not further worked than cold-formed or cold-finished(OLD tariff)" +7229901,"WIRE OF OTHER ALLOY STEEL:Other :Tinned wire, bronze coated wire, trapezoidal wire, half round wire, crimped wire and copper coated wire, not insulated(OLD tariff)" +7229902,"WIRE OF OTHER ALLOY STEEL:Other :Electrode, quality wire rope quality and ACSR quality not insulated(OLD tariff)" +7229903,"WIRE OF OTHER ALLOY STEEL:Other :Wire (excluding wire type lead), spring, high tensile, hardened and tempered wires, not insulated(OLD tariff)" +7229905,WIRE OF OTHER ALLOY STEEL:Other :Shaped and profiled wires of cross-section(OLD tariff) +7304311,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Up to 114.3 mm outer diameter(OLD tariff)" +7304312,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Above 114.3 mm but up to 219.1 mm outer diameter(OLD tariff)" +7304313,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Above 219.1 mm diameter(OLD tariff)" +7304391,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Up to 114.3 mm outer diameter(OLD tariff)" +7304392,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Above 114.3 mm but up to 219.1 mm outer diameter(OLD tariff)" +7304393,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Above 219.1 mm diameter(OLD tariff)" +7305111,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally submerged arc welded :Galvanised pipes(OLD tariff)" +7305112,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally submerged arc welded :Non-galvanised pipes(OLD tariff)" +7305121,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other, longitudinally welded :Galvanised pipes(OLD tariff)" +7305122,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other, longitudinally welded :Non-galvanised pipes(OLD tariff)" +7305191,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other:Galvanised pipes(OLD tariff)" +7305192,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other:Non-galvanised (black) pipes(OLD tariff)" +7305902,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :ERW precision tubes(OLD tariff)" +7305909,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Other(OLD tariff)" +7306191,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Galvanised pipes(OLD tariff)" +7306192,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Non-galvanised pipes(OLD tariff)" +7306901,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other :ERW precision tubes(OLD tariff)" +7308201,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Towers and lattice masts :Towers, whether or not assembled(OLD tariff)" +7317001,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Nails(OLD tariff)" +7317002,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Spikes(OLD tariff)" +7317009,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Other(OLD tariff)" +7320101,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Leaf-springs and leaves therefor :Leaf springs(OLD tariff)" +7325999,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Other(OLD tariff) +7326909,OTHER ARTICLES OF IRON OR STEEL:Other :Other(OLD tariff) +7404001,COPPER WASTE AND SCRAP:Copper waste and scrap:Of copper(OLD tariff) +7404002,COPPER WASTE AND SCRAP:Copper waste and scrap:Of copper alloys(OLD tariff) +7407105,"COPPER BARS, RODS AND PROFILES:Of refined copper :Profiles(OLD tariff)" +7407292,"COPPER BARS, RODS AND PROFILES:Other :Profiles(OLD tariff)" +7412201,"COPPER TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES):Of copper alloys :Brass(OLD tariff)" +7418192,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Other :Utensils(OLD tariff)" +7419102,OTHER ARTICLES OF COPPER:Chain and parts thereof :Parts(OLD tariff) +7604103,"ALUMINIUM BARS, RODS AND PROFILES:Of aluminium, not alloyed :Profiles(OLD tariff)" +7605199,ALUMINIUM WIRE:Other :Other(OLD tariff) +7607199,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Other(OLD tariff)" +7613001,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Low pressure cylinders(OLD tariff) +7613002,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :High pressure cylinders(OLD tariff) +7613009,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Other(OLD tariff) +7904001,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Bars and rods(OLD tariff)" +7904002,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Profiles(OLD tariff)" +8010000,coconuts brazil nuts and cashew nuts fresh or dried whether or not shelled or peeled coconuts +8011100,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::Desiccated" +8011200,coconuts brazil nuts and cashew nuts fresh or dried whether or not shelled or peeled coconuts >> in the inner shell endocarp +8011210,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:In the inner shell (Endocarp):Fresh" +8011220,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:In the inner shell (Endocarp):Dried" +8011290,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:In the inner shell (Endocarp):Other" +8011900,coconuts brazil nuts and cashew nuts fresh or dried whether or not shelled or peeled coconuts >> other +8011910,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Other:Fresh" +8011920,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Other:Dried" +8011990,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Other:Other" +8012100,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::In shell" +8012200,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::Shelled" +8013100,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::In shell" +8013200,coconuts brazil nuts and cashew nuts fresh or dried whether or not shelled or peeled coconuts >> shelled +8013210,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Shelled :Cashew kernel, broken" +8013220,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Shelled :Cashew kernel, whole" +8013290,"COCONUTS, BRAZIL NUTS AND CASHEW NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Shelled :Other" +8020000,other nuts fresh or dried whether or not shelled or peeled almonds +8021100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::In shell" +8021200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::Shelled" +8022100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::In shell" +8022200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::Shelled" +8023100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::In shell" +8023200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED::Shelled" +8024100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Chestnuts:In shell" +8024200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Shelled:Shelled" +8025100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Pistachios:In shell" +8025200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Shelled:Shelled" +8026100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Macadamia nuts:In shell" +8026200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Shelled:Shelled" +8027000,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Kola nuts:Kola nuts (Cola spp.)" +8028000,other nuts fresh or dried whether or not shelled or peeled almonds >> areca nuts +8028010,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Areca nuts:Whole" +8028020,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Areca nuts:Split" +8028030,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Areca nuts:Ground" +8028090,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Areca nuts:Other" +8029000,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Other :Other(OLD tariff)" +8029100,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:pine nuts:Pine nuts, in shell" +8029200,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:Pine nuts:pine nuts, shelled" +8029900,"OTHER NUTS, FRESH OR DRIED, WHETHER OR NOT SHELLED OR PEELED:other:other" +8030000,bananas including plantains fresh or dried +8031000,bananas including plantains fresh or dried >> plantains +8031010,"Bananas, including Plantains, Fresh:Plaintains:Curry Plantain" +8031090,"Bananas, including Plantains, Fresh:Plaintains:other" +8039000,bananas including plantains fresh or dried >> other +8039010,"Bananas, including Plantains, Fresh:other:Bananas, Fresh" +8039090,"Bananas, including Plantains, Fresh:other:other" +8040000,dates figs pineapples avocados guavas mangoes and mangosteens fresh or dried +8041000,dates figs pineapples avocados guavas mangoes and mangosteens fresh or dried >> dates +8041010,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Dates :Fresh (excluding wet dates)" +8041020,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Dates :Soft (khayzur or wet dates)" +8041030,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Dates :Hard (chhohara or kharek)" +8041090,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Dates :Other" +8042000,dates figs pineapples avocados guavas mangoes and mangosteens fresh or dried >> figs +8042010,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Figs :Fresh" +8042090,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Figs :Other" +8043000,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED::Pineapples" +8044000,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED::Avocados" +8045000,dates figs pineapples avocados guavas mangoes and mangosteens fresh or dried >> guavas mangoes and mangosteens +8045010,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Guavas, fresh or dried" +8045020,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Mangoes, fresh(OLD tariff)" +8045021,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Alphonso (Hapus)" +8045022,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Banganapalli" +8045023,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:chausa" +8045024,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Dashaeri" +8045025,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Langla" +8045026,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Kesar" +8045027,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Totapuri" +8045028,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Mallika" +8045029,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:other" +8045030,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Mangoes, sliced dried" +8045040,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Mango pulp" +8045090,"DATES, FIGS, PINEAPPLES, AVOCADOS, GUAVAS, MANGOES, AND MANGOSTEENS, FRESH OR DRIED:Guavas, mangoes and mangosteens:Other" +8050000,citrus fruit fresh or dried +8051000,"CITRUS FRUIT, FRESH OR DRIED::Oranges" +8052000,"CITRUS FRUIT, FRESH OR DRIED::Mandarins (including tangerines and satsumas); clementines, wilkings and similar citrus hybrids(OLD tariff)" +8052100,"CITRUS FRUIT, FRESH OR DRIED::Mandarins (including tangerines and satsumas)" +8052200,"CITRUS FRUIT, FRESH OR DRIED::Clementines" +8052900,"CITRUS FRUIT, FRESH OR DRIED::Other" +8054000,"CITRUS FRUIT, FRESH OR DRIED::Grapefruit, including pomelos" +8055000,"CITRUS FRUIT, FRESH OR DRIED::Lemon (Citrus limon, Citrus limonum) and limes (Citrus aurantifolia, Citrus latifolia)" +8059000,"CITRUS FRUIT, FRESH OR DRIED::Other" +8060000,grapes fresh or dried +8061000,"GRAPES, FRESH OR DRIED::Fresh" +8062000,grapes fresh or dried >> dried +8062010,"GRAPES, FRESH OR DRIED:Dried :Raisins" +8062090,"GRAPES, FRESH OR DRIED:Dried :Other" +8070000,melons including watermelons and papaws papayas fresh melons including watermelons +8071100,"MELONS (INCLUDING WATERMELONS) AND PAPAWS (PAPAYAS), FRESH::Water melons" +8071900,"MELONS (INCLUDING WATERMELONS) AND PAPAWS (PAPAYAS), FRESH:other:Other" +8071910,"MELONS (INCLUDING WATERMELONS) AND PAPAWS (PAPAYAS), FRESH:other:Musk melons" +8071990,"MELONS (INCLUDING WATERMELONS) AND PAPAWS (PAPAYAS), FRESH:other:Other" +8072000,"MELONS (INCLUDING WATERMELONS) AND PAPAWS (PAPAYAS), FRESH::Papaws (papayas)" +8080000,apples pears and quinces fresh +8081000,"APPLES, PEARS AND QUINCES, FRESH::Apples" +8083000,"APPLES, PEARS AND QUINCES, FRESH:Pears:Pears" +8084000,"APPLES, PEARS AND QUINCES, FRESH:Quinces:Quinces" +8090000,apricots cherries peaches including nectarines plums and sloes fresh +8091000,"APRICOTS, CHERRIES, PEACHES (INCLUDING NECTARINES), PLUMS AND SLOES, FRESH::Apricots" +8092100,"APRICOTS, CHERRIES, PEACHES (INCLUDING NECTARINES), PLUMS AND SLOES, FRESH:Sour Cherries:Sour Cherries (Prunus cerasus)" +8092900,"APRICOTS, CHERRIES, PEACHES (INCLUDING NECTARINES), PLUMS AND SLOES, FRESH:other:Other" +8093000,"APRICOTS, CHERRIES, PEACHES (INCLUDING NECTARINES), PLUMS AND SLOES, FRESH::Peaches, including nectarine" +8094000,"APRICOTS, CHERRIES, PEACHES (INCLUDING NECTARINES), PLUMS AND SLOES, FRESH::Plums and sloes" +8100000,other fruit fresh +8101000,"OTHER FRUIT, FRESH::Strawberries" +8102000,"OTHER FRUIT, FRESH::Raspberries, blackberries, mulberries and loganberries" +8103000,"OTHER FRUIT, FRESH:Blac, white or ret currants:Black, white or red currants and gooseberries" +8104000,"OTHER FRUIT, FRESH::Cranberries, bilberries and other fruits of the genus Vaccinium" +8105000,"OTHER FRUIT, FRESH::Kiwi fruit" +8106000,"OTHER FRUIT, FRESH::Durians" +8107000,"OTHER FRUIT, FRESH:Persimmons:Persimmons" +8109000,other fruit fresh >> other +8109010,"OTHER FRUIT, FRESH:Other :Pomegranates" +8109020,"OTHER FRUIT, FRESH:Other :Tamarind, fresh" +8109030,"OTHER FRUIT, FRESH:Other :Sapota (chico)" +8109040,"OTHER FRUIT, FRESH:Other :Custard-apple (Ata)" +8109050,"OTHER FRUIT, FRESH:Other :Bore" +8109060,"OTHER FRUIT, FRESH:Other :Lichi" +8109090,"OTHER FRUIT, FRESH:Other :Other" +8110000,fruit and nuts uncooked or cooked by steaming or boiling in water frozen whether or not containing added sugar or other sweetening matter +8111000,fruit and nuts uncooked or cooked by steaming or boiling in water frozen whether or not containing added sugar or other sweetening matter >> strawberries +8111010,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Strawberries :Containing added sugar" +8111020,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Strawberries :Not containing added sugar" +8111090,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Strawberries :Other" +8112000,fruit and nuts uncooked or cooked by steaming or boiling in water frozen whether or not containing added sugar or other sweetening matter >> raspberries blackberries mulberries loganberries black white or red currants and gooseberries +8112010,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Raspberries, blackberries, mulberries, loganberries, black, white or red currants and gooseberries :Containing added sugar" +8112020,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Raspberries, blackberries, mulberries, loganberries, black, white or red currants and gooseberries :Not containing added sugar" +8112090,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Raspberries, blackberries, mulberries, loganberries, black, white or red currants and gooseberries :Other" +8119000,fruit and nuts uncooked or cooked by steaming or boiling in water frozen whether or not containing added sugar or other sweetening matter >> other +8119010,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Containing added sugar" +8119090,"FRUIT AND NUTS, UNCOOKED OR COOKED BY STEAMING OR BOILING IN WATER, FROZEN, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other :Other" +8120000,unsuitable in that state for immediate consumption +8121000,"FRUIT AND NUTS PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION::Cherries" +8129000,unsuitable in that state for immediate consumption >> other +8129010,"FRUIT AND NUTS PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION:Other :Mango slices in brine" +8129090,"FRUIT AND NUTS PROVISIONALLY PRESERVED (FOR EXAMPLE, BY SULPHUR DIOXIDE GAS, IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS), BUT UNSUITABLE IN THAT STATE FOR IMMEDIATE CONSUMPTION:Other :Other" +8130000,fruit dried other than that of headings 0801 to 0806 mixtures of nuts or driedfruits of this chapter +8131000,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER::Apricots" +8132000,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER::Prunes" +8133000,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER::Apples" +8134000,fruit dried other than that of headings 0801 to 0806 mixtures of nuts or driedfruits of this chapter >> other fruit +8134010,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER:Other fruit:Tamarind, dried" +8134020,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER:Other fruit:Singoda whole (water nut)" +8134090,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER:Other fruit:Other" +8135000,fruit dried other than that of headings 0801 to 0806 mixtures of nuts or driedfruits of this chapter >> mixtures of nuts or dried fruits of this chapter +8135010,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER:Mixtures of nuts or dried fruits of this Chapter:Mixtures of nuts" +8135020,"FRUIT, DRIED, OTHER THAN THAT OF HEADINGS 0801 TO 0806; MIXTURES OF NUTS OR DRIED FRUITS OF THIS CHAPTER:Mixtures of nuts or dried fruits of this Chapter:Mixtures of dried fruits" +8140000,"::PEEL OF CITRUS FRUIT OR MELONS (INCLUDING WATERMELONS), FRESH, FROZEN, DRIED OR PROVISIONALLY PRESERVED IN BRINE, IN SULPHUR WATER OR IN OTHER PRESERVATIVE SOLUTIONS" +8212201,"RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS):Safety razor blades, including razor blade blanks in strips:Safety razor blades(OLD tariff)" +8308102,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Hooks, eyes and eyelets:Eyelets(OLD tariff)" +8308901,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Buckles(OLD tariff)" +8308903,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Beads and spangles of base metal(OLD tariff)" +8308909,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Other(OLD tariff)" +8408109,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Marine propulsion engines:Other(OLD tariff) +8409911,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Valves, inlet and exhaust, piston piston rings, piston assemblies(OLD tariff)" +8409919,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Other(OLD tariff) +8409991,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Valves, inlet and exhaust, piston, piston rings, piston assemblies(OLD tariff)" +8409994,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Other parts of diesel engine(OLD tariff) +8412801,OTHER ENGINES AND MOTORS:Other :Steam or other vapour power(OLD tariff) +8413119,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps for dispensing fuel or lubricants, of the type used in filling stations or in garages:Other(OLD tariff)" +8413502,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other reciprocating positive displacement pumps :Primarily designed for handling water(OLD tariff)" +8413709,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Other(OLD tariff)" +8414801,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Gas compressors(OLD tariff)" +8414901,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Of air or vacuum pumps and comressors(OLD tariff)" +8421199,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Other(OLD tariff)" +8428101,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Lifts and skip hoists:Lifts(OLD tariff)" +8428201,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Pneumatic elevators and conveyors:Conveyors(OLD tariff)" +8442503,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Plate, cylinder and lithographic stones prepared for printing purposes(OLD tariff)" +8443194,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:Machinery for printing repetitive word or design or colour(OLD tariff)" +8443995,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Parts and accessories of goods of sub-headings 8443 31, 8443 32(OLD tariff)" +8445201,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Cotton spinning machines(OLD tariff)" +8445301,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Cotton fibre doubling or twisting machines(OLD tariff)" +8446101,WEAVING MACHINES (LOOMS):For weaving fabrics of a width not exceeding 30 cm :Cotton weaving machines(OLD tariff) +8446301,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width exceeding30 cm, shuttleless type:Cotton weaving machines(OLD tariff)" +8447111,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter not exceeding 165 mm:Wool knitting machines(OLD tariff)" +8447121,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter exceeding 165 mm:Wool knitting machines(OLD tariff)" +8451401,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:Bleaching machine(OLD tariff)" +8451402,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:Dyeing machines(OLD tariff)" +8451409,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:Other(OLD tariff)" +8451801,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :Sizing and dressing machines(OLD tariff)" +8451802,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :Finishing machines(OLD tariff)" +8452101,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :Complete, with stand or table(OLD tariff)" +8452102,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :Without stand or table (heads)(OLD tariff)" +8458191,"LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Automatic, single spindle(OLD tariff)" +8458993,"LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Capstans, turrets, capstan and turrent combination, coping, multi tool and production lathes(OLD tariff)" +8458994,"LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Crankshaft, relieving, wheel and axle lathes(OLD tariff)" +8458995,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Centre lathes(OLD tariff) +8460401,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Honing or lapping machines :Honing machines(OLD tariff)" +8461201,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Shaping or slotting machines :Shaping machines(OLD tariff)" +8461401,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear cutting machines(OLD tariff)" +8461402,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear grinding or gear finishing machines(OLD tariff)" +8461501,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Swing machines(OLD tariff)" +8461502,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Cutting-off machines(OLD tariff)" +8462101,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Hammers(OLD tariff)" +8462991,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Pneumatic, inclinable and vertical presses(OLD tariff)" +8471602,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Printer(OLD tariff)" +8479899,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Other(OLD tariff)" +8481804,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Inner tube valves(OLD tariff)" +8482101,BALL OR ROLLER BEARINGS:Ball bearings :Adapter ball bearings (radial type)(OLD tariff) +8482105,BALL OR ROLLER BEARINGS:Ball bearings :Thrust ball bearings(OLD tariff) +8482201,"BALL OR ROLLER BEARINGS:Tapered roller bearings, including cone and tapered roller assemblies :Tapered roller bearings (radial type)(OLD tariff)" +8482501,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Radial type(OLD tariff) +8482502,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Thrust roller bearings(OLD tariff) +8482911,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Balls(OLD tariff)" +8483109,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Transmission shafts (including cam shafts and crank shafts) and cranks :Other(OLD tariff)" +8501101,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Motors of an output not exceeding 37.5 W :DC motor(OLD tariff) +8501311,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W:DC motors(OLD tariff) +8504402,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Rectifier(OLD tariff)" +8521101,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :type - Cassette tape-type(OLD tariff)" +8521102,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :type - Spool type(OLD tariff)" +8521109,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :type - Other(OLD tariff)" +8525601,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Two way radio communication equipment(OLD tariff)" +8525609,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Other(OLD tariff)" +8527991,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK:Other:Radio communication receivers(OLD tariff)" +8529101,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:Dish antenna(OLD tariff) +8529102,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:Other aerials or antenna(OLD tariff) +8529109,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:Other(OLD tariff) +8533211,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W :Of bare wire(OLD tariff)" +8533291,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Of bare wire(OLD tariff)" +8533292,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Of insulated wire(OLD tariff)" +8535211,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:SF6 circuit breakers(OLD tariff)" +8535212,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:Vacuum circuit breakers(OLD tariff)" +8535291,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :SF6 circuits breakers(OLD tariff)" +8535292,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Vacuum circuit breakers(OLD tariff)" +8539211,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Tungsten halogen :Other(OLD tariff)" +8541401,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Photosensitive semi-conductor devices, including photo voltaic cells whether or not assembled in modules or made up into panels; light-emitting diodes (LED):Photocells(OLD tariff)" +8543702,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Audio special effect equipment(OLD tariff)" +8543703,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Video special effect equipments(OLD tariff)" +8543704,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Edit control Unit(OLD tariff)" +8543706,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Amplifier(OLD tariff)" +8543707,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Graphic equalizer and synthesized receivers(OLD tariff)" +8543709,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other(OLD tariff)" +8544429,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Other(OLD tariff)" +8544499,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Other(OLD tariff)" +8546201,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Porcelain discs and strings(OLD tariff) +8546202,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Porcelain post insulators(OLD tariff) +8546203,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Porcelain pin insulators(OLD tariff) +8701301,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Track-laying tractors :Garden tractors(OLD tariff) +8701309,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Track-laying tractors :Other(OLD tariff) +8702101,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Vehicles for transport of not more than 13 persons, including the driver(OLD tariff)" +8702102,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Other(OLD tariff)" +8702109,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Other(OLD tariff)" +8702201,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Vehicles for transport of not more than 13 persons, including the driver(OLD tariff)" +8702202,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Other(OLD tariff)" +8702301,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Vehicles for transport of not more than 13 persons, including the driver(OLD tariff)" +8702302,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Other(OLD tariff)" +8702401,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Vehicles for transport of not more than 13 persons, including the driver(OLD tariff)" +8702402,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Other(OLD tariff)" +8702901,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Vehicles for transport of not more than 13 persons, including the driver(OLD tariff)" +8702902,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Other(OLD tariff)" +8703219,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,000 cc :Other(OLD tariff)" +8703229,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,000 cc but not exceeding 1,500 cc :Other(OLD tariff)" +8703239,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 3,000 cc :Other(OLD tariff)" +8703249,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 3,000 cc :Other(OLD tariff)" +8703319,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,500 cc :Other(OLD tariff)" +8703329,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 2,500 cc :Other(OLD tariff)" +8703339,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 2,500 cc :Other(OLD tariff)" +8704221,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes but not exceeding 20 tonnes :Lorries and trucks(OLD tariff) +8704231,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 20 tonnes :Lorries and trucks(OLD tariff) +8704321,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes:Lorries and trucks(OLD tariff) +8704901,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Other :Lorries and trucks(OLD tariff) +8706001,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For the tractors of heading 8701(OLD tariff)" +8706002,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For the vehicles of heading 8702(OLD tariff)" +8706003,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For the vehicles of heading 8703(OLD tariff)" +8706004,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For the motor vehicles of heading 8704(OLD tariff)" +8711201,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Scooters(OLD tariff)" +8711202,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Motor cycles(OLD tariff)" +8711203,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Mopeds(OLD tariff)" +8711209,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Other(OLD tariff)" +8711909,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:Other :Other(OLD tariff)" +9010000,coffee whether or not roasted or dacaffeinated coffee substitutes coffee husks and skins containing coffee in any proportion coffee not roasted +9011100,coffee whether or not roasted or dacaffeinated coffee substitutes coffee husks and skins containing coffee in any proportion coffee not roasted >> not decaffeinated arabica plantation +9011111,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :A Grade" +9011112,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :B Grade" +9011113,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :C Grade" +9011119,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Other " +9011121,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :AB Grade" +9011122,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :PB Grade" +9011123,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :C Grade" +9011124,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :B/B/B Grade" +9011129,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Other " +9011131,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :AB Grade" +9011132,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :PB Grade" +9011133,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :C Grade" +9011139,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Other " +9011141,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :AB Grade" +9011142,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :PB Grade" +9011143,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :C Grade" +9011144,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :B/B/B Grade" +9011145,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Bulk" +9011149,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Other" +9011190,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Other" +9011200,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION ::Decaffeinated " +9012100,coffee whether or not roasted or dacaffeinated coffee substitutes coffee husks and skins containing coffee in any proportion coffee not roasted >> not decaffeinated +9012110,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :In bulk packing" +9012190,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Not decaffeinated :Other" +9012200,coffee whether or not roasted or dacaffeinated coffee substitutes coffee husks and skins containing coffee in any proportion coffee not roasted >> +9012210,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Decaffeinated :In bulk packing" +9012290,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Decaffeinated :Other" +9017302,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Micrometers, calipers and gauges :Gauges(OLD tariff)" +9018901,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Diagnostic instruments and apparatus(OLD tariff)" +9018902,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Surgical tools(OLD tariff)" +9018903,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Other instruments and appliances Renal dialysis equipment, blood transfusion apparatus and haemofiltration instruments(OLD tariff)" +9018904,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Anesthetic apparatus and instruments, ENT precision instruments, acupuncture apparatus, and endoscopes(OLD tariff)" +9018909,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Other(OLD tariff)" +9019000,coffee whether or not roasted or dacaffeinated coffee substitutes coffee husks and skins containing coffee in any proportion coffee not roasted >> other other +9019010,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Other:Coffee husks and skins" +9019020,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Other:Coffee substitutes containing coffee" +9019090,"COFFEE, WHETHER OR NOT ROASTED OR DACAFFEINATED; COFFEE HUSKS AND SKINS; COFFEE SUBSTITUTES CONTAINING COFFEE IN ANY PROPORTION :Other:Other" +9020000, +9021000, >> +9021010,"TEA, WHETHER OR NOT FLAVOURED:Green tea (not fermented) in immediate packings of a content not exceeding 3 kg :Content not exceeding 25 g ." +9021020,"TEA, WHETHER OR NOT FLAVOURED:Green tea (not fermented) in immediate packings of a content not exceeding 3 kg :Content exceeding 25 g. but not exceeding 1 kg." +9021030,"TEA, WHETHER OR NOT FLAVOURED:Green tea (not fermented) in immediate packings of a content not exceeding 3 kg :Content exceeding 1 kg. but not exceeding 3 kg." +9021090,"TEA, WHETHER OR NOT FLAVOURED:Green tea (not fermented) in immediate packings of a content not exceeding 3 kg :Other" +9022000, >> +9022010,"TEA, WHETHER OR NOT FLAVOURED:Other green tea (not fermented):Green tea in packets with contents exceeding" +9022020,"TEA, WHETHER OR NOT FLAVOURED:Other green tea (not fermented):Green tea in bulk" +9022030,"TEA, WHETHER OR NOT FLAVOURED:Other green tea (not fermented):Green tea agglomerated in forms such as ball, brick and tablets" +9022040,"TEA, WHETHER OR NOT FLAVOURED:Other green tea (not fermented):Green tea waste" +9022090,"TEA, WHETHER OR NOT FLAVOURED:Other green tea (not fermented):Other" +9023000, >> +9023010,"TEA, WHETHER OR NOT FLAVOURED:Black tea (fermented) and partly fermented tea, in immediate packings of a content not exceeding 3 kg. :Content not exceeding 25 g." +9023020,"TEA, WHETHER OR NOT FLAVOURED:Black tea (fermented) and partly fermented tea, in immediate packings of a content not exceeding 3 kg. :Content exceeding 25 g. but not exceeding 1 kg." +9023030,"TEA, WHETHER OR NOT FLAVOURED:Black tea (fermented) and partly fermented tea, in immediate packings of a content not exceeding 3 kg. :Content exceeding 1 kg. but not exceeding 3 kg." +9023090,"TEA, WHETHER OR NOT FLAVOURED:Black tea (fermented) and partly fermented tea, in immediate packings of a content not exceeding 3 kg. :Other" +9024000, >> other black tea fermented and other partly fermented tea +9024010,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Content exceeding 3 kg. but not exceeding 20 kg." +9024020,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Black tea, leaf in bulk" +9024030,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Black tea, dust in bulk" +9024040,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Tea bags" +9024050,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Black tea, agglomerated in forms such as ball, brick and tablets" +9024060,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Black tea, waste" +9024090,"TEA, WHETHER OR NOT FLAVOURED:Other black tea (fermented) and other partly fermented tea :Other" +9024809,"MACHINES AND APPLIANCES FOR TESTING THE HARDNESS, STRENGTH, COMPRESSIBILITY, ELASTICITY OR OTHER MECHANICAL PROPERTIES OF MATERIALS (FOR EXAMPLE, METALS, WOOD, TEXTILES, PAPER, PLASTICS):Other machines and appliances :Other(OLD tariff)" +9030000,::MATE +9041110,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Pepper, long(OLD tariff)" +9041120,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Light black pepper(OLD tariff) +9041130,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Black pepper, garbled(OLD tariff)" +9041140,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Black pepper ungarbled(OLD tariff) +9041150,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Green pepper, dehydrated(OLD tariff)" +9041160,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Pepper pinheads(OLD tariff) +9041170,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Green pepper, frozen or dried(OLD tariff)" +9041180,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Pepper other than green, frozen(OLD tariff)" +9041190,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Neither crushed nor ground:Other(OLD tariff) +9041200,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA ::Crushed or ground(OLD tariff) +9042110,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Dried, neither crushed nor gorund:Of genus Capsicum(OLD tariff)" +9042120,"PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :Dried, neither crushed nor gorund:Of genus Pimenta(OLD tariff)" +9042211,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :crushed or ground:Chilly power(OLD tariff) +9042212,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :crushed or ground:Chilly seeds(OLD tariff) +9042219,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :crushed or ground:other(OLD tariff) +9042221,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :crushed or ground:Powder(OLD tariff) +9042229,PEPPER OF THE GENUS PIPER; DRIED OR CRUSHED OR GROUND FRUITS OF THE GENUS CAPSICUM OR OF THE GENUS PIMENTA :crushed or ground:other(OLD tariff) +9050000,vanilla +9051000,VANILLA:Vannila:Neither crushed nor ground +9052000,VANILLA:Crushed or ground:Crushed or ground +9060000,cinnamon and flowers neither crushed nor ground +9061100,cinnamon and flowers neither crushed nor ground >> cinnamon cinnamomum zeylanicum blume +9061110,CINNAMON AND CINNAMON-TREE FLOWERS :Cinnamon (Cinnamomum Zeylanicum Blume):Cinnamon bark +9061120,CINNAMON AND CINNAMON-TREE FLOWERS :Cinnamon (Cinnamomum Zeylanicum Blume):Cinnamon tree flowers +9061190,CINNAMON AND CINNAMON-TREE FLOWERS :Cinnamon (Cinnamomum Zeylanicum Blume):Other +9061900,cinnamon and flowers neither crushed nor ground >> other +9061910,CINNAMON AND CINNAMON-TREE FLOWERS :Other:cassia +9061990,CINNAMON AND CINNAMON-TREE FLOWERS :Other:other +9062000,CINNAMON AND CINNAMON-TREE FLOWERS ::Crushed or ground +9070000,cloves whole fruit cloves and stems +9071000,cloves whole fruit cloves and stems >> neither crushed nor ground +9071010,"CLOVES (WHOLE FRUIT, CLOVES AND STEMS) :Neither crushed or ground:Extracted" +9071020,"CLOVES (WHOLE FRUIT, CLOVES AND STEMS) :Neither crushed or ground:Not extracted (other than stem)" +9071030,"CLOVES (WHOLE FRUIT, CLOVES AND STEMS) :Neither crushed or ground:Stem" +9071090,"CLOVES (WHOLE FRUIT, CLOVES AND STEMS) :Neither crushed or ground:Other" +9072000,"CLOVES (WHOLE FRUIT, CLOVES AND STEMS) :crushed or ground:Crushed or ground" +9080000,nutmeg mace and cardamoms nutmeg +9081100,nutmeg mace and cardamoms nutmeg >> neither crushed nor ground +9081110,"NUTMEG, MACE AND CARDAMOMS:Neither crushed nor ground:In shell" +9081120,"NUTMEG, MACE AND CARDAMOMS:Neither crushed nor ground:Shelled" +9081200,"NUTMEG, MACE AND CARDAMOMS:crushed or gorund:Crushed or ground" +9082100,"NUTMEG, MACE AND CARDAMOMS:mace:Neither crushed nor ground" +9082200,"NUTMEG, MACE AND CARDAMOMS:crushed or ground:crushed or ground" +9083100,nutmeg mace and cardamoms nutmeg >> neither crushed nor ground +9083110,"NUTMEG, MACE AND CARDAMOMS:Cardomoms Neither crushed nor ground:Large (amomum)" +9083120,"NUTMEG, MACE AND CARDAMOMS:Cardomoms Neither crushed nor ground:Small (Ellettaria), alleppey green" +9083130,"NUTMEG, MACE AND CARDAMOMS:Cardomoms Neither crushed nor ground:Small, Coorg green" +9083140,"NUTMEG, MACE AND CARDAMOMS:Cardomoms Neither crushed nor ground:Small, bleached, half bleached or bleachable" +9083150,"NUTMEG, MACE AND CARDAMOMS:Cardomoms Neither crushed nor ground:Small, fixed" +9083190,"NUTMEG, MACE AND CARDAMOMS:Cardomoms Neither crushed nor ground:Other" +9083200,nutmeg mace and cardamoms nutmeg >> crushed or ground +9083210,"NUTMEG, MACE AND CARDAMOMS:crushed or ground:Powder" +9083220,"NUTMEG, MACE AND CARDAMOMS:crushed or ground:Small cardomom seeds" +9083230,"NUTMEG, MACE AND CARDAMOMS:crushed or ground:Cardomom husk" +9083290,"NUTMEG, MACE AND CARDAMOMS:crushed or ground:Other" +9090000,seeds of anise badian fennel coriander cumin or caraway juniper berries seeds of coriander +9092100,seeds of anise badian fennel coriander cumin or caraway juniper berries seeds of coriander >> neither crushed nor ground +9092110,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Neither crushed nor ground:Of seed quality" +9092190,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Neither crushed nor ground:other" +9092200,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Crushed or ground:Crushed or ground" +9093100,seeds of anise badian fennel coriander cumin or caraway juniper berries seeds of coriander >> neither crushed nor ground cumin black +9093111,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of cumin Neither crushed nor ground:Of seed quality" +9093119,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of cumin Neither crushed nor ground:other" +9093121,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of cumin Neither crushed nor ground:of seed quality" +9093129,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of cumin Neither crushed nor ground:other" +9093200,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Crushed or ground:Crushed or ground" +9096100,seeds of anise badian fennel coriander cumin or caraway juniper berries seeds of coriander >> neither crushed nor ground seeds of anise +9096111,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:of Seed quality" +9096119,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:Other" +9096121,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:Of seed quality" +9096129,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:Other" +9096131,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:Of seed quality" +9096139,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:other" +9096141,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:Of seed quality" +9096149,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Seeds of anise, badian, caraway or fennel juniper berries: neither crushed nor gorund:other" +9096200,seeds of anise badian fennel coriander cumin or caraway juniper berries seeds of coriander >> crushed or ground +9096210,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Crushed or ground:Anise" +9096220,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Crushed or ground:Badian" +9096230,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Crushed or ground:Caraway or fennel" +9096240,"SEEDS OF ANISE, BADIAN, FENNEL, CORIANDER, CUMIN OR CARAWAY; JUNIPER BERRIES:Crushed or ground:Juniper berries" +9100000,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger +9101100,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger >> neither crushed nor ground +9101110,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Ginger Neither crushed nor ground:Fresh" +9101120,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Ginger Neither crushed nor ground:Dried, unbleached" +9101130,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Ginger Neither crushed nor ground:Dried, bleached" +9101190,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Ginger Neither crushed nor ground:Other" +9101200,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger >> crushed or ground +9101210,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Crushed or ground:Powder" +9101290,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Crushed or ground:Other" +9102000,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger >> saffron +9102010,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Saffron :Saffron stigma" +9102020,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Saffron :Saffron stamen" +9102090,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Saffron :Other" +9103000,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger >> turmeric curcuma +9103010,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Turmeric (Curcuma):Fresh" +9103020,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Turmeric (Curcuma):Dried" +9103030,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Turmeric (Curcuma):Powder" +9103090,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Turmeric (Curcuma):Other " +9109100,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES::Mixtures referred to in Note 1(b) to this Chapter" +9109900,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger >> other seed +9109911,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Celery" +9109912,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Fenugreek" +9109913,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Dill" +9109914,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Ajwain" +9109915,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Cassia torea" +9109919,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Other " +9109921,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Cassia" +9109923,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Celery" +9109924,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Fenugreek" +9109925,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Dill" +9109926,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Poppy" +9109927,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Mustard" +9109929,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Other " +9109930,ginger saffron turmeric curcuma thyme bay leaves curry and other spices ginger >> other seed >> husk +9109939,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Other(OLD tariff)" +9109990,"GINGER, SAFFRON, TURMERIC (CURCUMA), THYME, BAY LEAVES, CURRY AND OTHER SPICES:Other :Other" +9404901,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Quilts(OLD tariff)" +9404909,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Other(OLD tariff)" +9405503,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Oil pressure lamps(OLD tariff)" +9405505,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Other oil lamps(OLD tariff)" +9406001,PREFABRICATED BUILDINGS:Prefabricated buildings :Green houses(OLD tariff) +9406009,PREFABRICATED BUILDINGS:Prefabricated buildings :Other(OLD tariff) +9608393,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Other :Other fountain pen(OLD tariff)" +9608399,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Other :Other(OLD tariff)" +9608919,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Pen nibs and nib points :Other(OLD tariff)" +9617001,"VACUUM FLASKS AND OTHER VACUUM VESSELS, COMPLETE WITH CASES; PARTS THEREOF OTHER THAN GLASS INNERS:Vacuum flasks and other vacuum vessels, complete with cases; parts thereof other than glass inners:Vacuum flasks and other vacuum vessels, complete with case(OLD tariff)" +10000000, +10010000, >> wheat and meslin durum wheat +10011100,WHEAT AND MESLIN:Durum Wheat:seed +10011900,WHEAT AND MESLIN:other:other +10019100,WHEAT AND MESLIN:other:seed +10019900, >> wheat and meslin durum wheat >> other +10019910,WHEAT AND MESLIN:other:wheat +10019920,WHEAT AND MESLIN:other:Meslin +10020000, >> rye +10021000,RYE:rye seed:Seed +10029000,RYE:Other:Other +10030000, >> barley +10031000,BARLEY:seed:Seed +10039000,BARLEY:Other:Other +10040000, >> oats +10041000,OATS:Oats seed:Seed +10049000,OATS:Other:Other +10050000, >> maize corn +10051000,MAIZE (CORN)::Seed +10059000,MAIZE (CORN):other:Other +10059011,MAIZE (CORN):other:Yellow +10059019,MAIZE (CORN):other:other +10059020,MAIZE (CORN):other:Flint corn (Zeamays var. Indurata) +10059030,"MAIZE (CORN):other:Popcorn (Zea mays ver,everta)" +10059090,MAIZE (CORN):other:Other +10060000, >> rice +10061000, >> rice >> rice in the husk paddy or rough +10061010,RICE:Rice in the husk (paddy or rough):Of seed quality +10061090,RICE:Rice in the husk (paddy or rough):Other +10062000,RICE::Husked (brown) rice +10063000, >> rice >> or rice whether or not polished or glazed +10063010,"RICE:Semi-milled or wholly-milled rice, whether or not polished or glazed :Rice, parboiled" +10063020,"RICE:Semi-milled or wholly-milled rice, whether or not polished or glazed :Basmati rice" +10063090,"RICE:Semi-milled or wholly-milled rice, whether or not polished or glazed :Other" +10064000,RICE::Broken rice +10070000, >> grain sorghum +10070010,GRAIN SORGHUM:Grain sorghum :Of seed quality(OLD tariff) +10070090,GRAIN SORGHUM:Grain sorghum :Other(OLD tariff) +10071000,GRAIN SORGHUM:Seed:Seed +10079000,GRAIN SORGHUM:Other:Other +10080000, >> buckwheat millet and canary seeds other cereals +10081000, >> buckwheat millet and canary seeds other cereals >> buckwheat +10081010,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Buckwheat :Of seed quality" +10081090,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Buckwheat :Other" +10082100, >> buckwheat millet and canary seeds other cereals >> seed +10082110,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Millet Seed:Jawar" +10082120,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Millet Seed:Bajra" +10082130,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Millet Seed:Ragi" +10082140, >> buckwheat millet and canary seeds other cereals >> seed >> barnyard echinochloa esculenta l +10082150, >> buckwheat millet and canary seeds other cereals >> seed >> proso panicum miliaceum l +10082160, >> buckwheat millet and canary seeds other cereals >> seed >> foxtail setaria italica l +10082170, >> buckwheat millet and canary seeds other cereals >> seed >> kodo paspalum scrobiculatum l +10082180, >> buckwheat millet and canary seeds other cereals >> seed >> little panicum sumatrense l other +10082199, >> buckwheat millet and canary seeds other cereals >> seed >> other +10082900, >> buckwheat millet and canary seeds other cereals >> other +10082910,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Other:Jawar" +10082920,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Other:Bajra" +10082930,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Other:Ragi" +10082940, >> buckwheat millet and canary seeds other cereals >> other >> barnyard echinochloa esculenta l +10082950, >> buckwheat millet and canary seeds other cereals >> other >> proso panicum miliaceum l +10082960, >> buckwheat millet and canary seeds other cereals >> other >> foxtail setaria italica l +10082970, >> buckwheat millet and canary seeds other cereals >> other >> kodo paspalum scrobiculatum l +10082980, >> buckwheat millet and canary seeds other cereals >> other >> little panicum sumatrense l other +10082999, >> buckwheat millet and canary seeds other cereals >> other >> other +10083000, >> buckwheat millet and canary seeds other cereals >> canary seeds +10083010,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Canary seed :Of seed quality" +10083090,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Canary seed :Other" +10084000,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Fonio:Fonio" +10085000,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Quinoa:Quinoa (Chenopodium quinoa)" +10086000,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Triticale:Triticale" +10089000, >> buckwheat millet and canary seeds other cereals >> other cereals +10089010,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Other cereals :Of seed quality" +10089090,"BUCKWHEAT, MILLET AND CANARY SEED; OTHER CEREALS:Other cereals :Other" +11010000,::WHEAT OR MESLIN FLOUR +11020000,cereal flours other than that of wheat or meslin +11022000,CEREAL FLOURS OTHER THAN THAT OF WHEAT OR MESLIN::Maize (corn) flour +11029000,cereal flours other than that of wheat or meslin >> other +11029010,CEREAL FLOURS OTHER THAN THAT OF WHEAT OR MESLIN:other:Rye flour +11029021,CEREAL FLOURS OTHER THAN THAT OF WHEAT OR MESLIN:other:Brown rice flour +11029022,CEREAL FLOURS OTHER THAN THAT OF WHEAT OR MESLIN:other:white rice flour +11029029,CEREAL FLOURS OTHER THAN THAT OF WHEAT OR MESLIN:other:other +11029090,CEREAL FLOURS OTHER THAN THAT OF WHEAT OR MESLIN:other:Other +11030000,cereal groats meal and pellets groats and meal +11031100,cereal groats meal and pellets groats and meal >> of wheat +11031110,"CEREAL GROATS, MEAL AND PELLETS :Of wheat :Groat" +11031120,"CEREAL GROATS, MEAL AND PELLETS :Of wheat :Meal" +11031300,"CEREAL GROATS, MEAL AND PELLETS ::Of maize (corn)" +11031900,"CEREAL GROATS, MEAL AND PELLETS ::Of other cereals" +11032000,"CEREAL GROATS, MEAL AND PELLETS ::Pellets" +11040000,cereal grains otherwise worked for example hulled rolled flaked pearled sliced or kibbled except rice of heading 1006 germ of cereals whole rolled flaked or ground rolled or flaked grains +11041200,"CEREAL GRAINS OTHERWISE WORKED (FOR EXAMPLE, HULLED, ROLLED, FLAKED, PEARLED, SLICED, OR KIBBLED), EXCEPT RICE OF HEADING 1006; GERM OF CEREALS, WHOLE, ROLLED, FLAKED OR GROUND ::Of oats" +11041900,"CEREAL GRAINS OTHERWISE WORKED (FOR EXAMPLE, HULLED, ROLLED, FLAKED, PEARLED, SLICED, OR KIBBLED), EXCEPT RICE OF HEADING 1006; GERM OF CEREALS, WHOLE, ROLLED, FLAKED OR GROUND ::Of other cereals " +11042200,"CEREAL GRAINS OTHERWISE WORKED (FOR EXAMPLE, HULLED, ROLLED, FLAKED, PEARLED, SLICED, OR KIBBLED), EXCEPT RICE OF HEADING 1006; GERM OF CEREALS, WHOLE, ROLLED, FLAKED OR GROUND ::Of oats" +11042300,"CEREAL GRAINS OTHERWISE WORKED (FOR EXAMPLE, HULLED, ROLLED, FLAKED, PEARLED, SLICED, OR KIBBLED), EXCEPT RICE OF HEADING 1006; GERM OF CEREALS, WHOLE, ROLLED, FLAKED OR GROUND ::Of maize (corn)" +11042900,"CEREAL GRAINS OTHERWISE WORKED (FOR EXAMPLE, HULLED, ROLLED, FLAKED, PEARLED, SLICED, OR KIBBLED), EXCEPT RICE OF HEADING 1006; GERM OF CEREALS, WHOLE, ROLLED, FLAKED OR GROUND ::Of other cereals" +11043000,"CEREAL GRAINS OTHERWISE WORKED (FOR EXAMPLE, HULLED, ROLLED, FLAKED, PEARLED, SLICED, OR KIBBLED), EXCEPT RICE OF HEADING 1006; GERM OF CEREALS, WHOLE, ROLLED, FLAKED OR GROUND ::Germ of cereals, whole, rolled, flaked or ground" +11050000,flour meal powder flakes granules and pellets of potatoes +11051000,"FLOUR, MEAL, POWDER, FLAKES, GRANULES AND PELLETS OF POTATOES::Flour, meal and powder" +11052000,"FLOUR, MEAL, POWDER, FLAKES, GRANULES AND PELLETS OF POTATOES::Flakes, granules and pellets" +11060000,flour meal and powder of the dried leguminous vegetables of heading 0713 of sago or of roots or tubers of heading 0714 or of the products of chapter 8 +11061000,of the dried:Of the dried leguminous vegetables of heading 0713:Of the dried leguminous vegetables of heading 0713 +11061010,of the dried:Of the dried leguminous vegetables of heading 0713:Guar Meal +11061090,of the dried:Of the dried leguminous vegetables of heading 0713:Others +11062000,flour meal and powder of the dried leguminous vegetables of heading 0713 of sago or of roots or tubers of heading 0714 or of the products of chapter 8 >> of sago or of roots or tubers of heading 0714 +11062010,of the dried:Of sago or of roots or tubers of heading 0714:Of sago +11062020,of the dried:Of sago or of roots or tubers of heading 0714:Of manioc (cassava) +11062090,of the dried:Of sago or of roots or tubers of heading 0714:Of other roots and tubers +11063000,flour meal and powder of the dried leguminous vegetables of heading 0713 of sago or of roots or tubers of heading 0714 or of the products of chapter 8 >> of the products of chapter 8 +11063010,of the dried:Of the products of Chapter 8 :Of tamarind +11063020,of the dried:Of the products of Chapter 8 :Of singoda +11063030,of the dried:Of the products of Chapter 8 :Mango flour +11063090,of the dried:Of the products of Chapter 8 :Other +11070000,malt whether or not roasted +11071000,"MALT, WHETHER OR NOT ROASTED::Not roasted" +11072000,"MALT, WHETHER OR NOT ROASTED::Roasted" +11080000,starches inulin starches +11081100,STARCHES; INULIN ::Wheat starch +11081200,STARCHES; INULIN ::Maize (corn) starch +11081300,STARCHES; INULIN ::Potato starch +11081400,STARCHES; INULIN ::Manioc (cassava) starch +11081900,starches inulin starches >> other +11081910,STARCHES; INULIN :Other :Sago +11081990,STARCHES; INULIN :Other :Other +11082000,STARCHES; INULIN ::Inulin +11090000,"::WHEAT GLUTEN, WHETHER OR NOT DRIED" +11500000,6 for the purposes of 4805 30 wrapping means paper of which more than 40 by weight of the total fibre content consists of wood fibres obtained by the chemical sulphite process having an ash content not exceeding 8 and having a mullen burst index of not less than 7 for the purposes of 4810 22 coated means paper coated on both sides of a total weight not exceeding 72 with a coating weight not exceeding 15 per side on a base of which not less than 50 by weight of the total fibre content consists of wood fibres obtained by a nothwithstanding anything contained in note 12 if paper and paper products of heading 4811 4816 or 4820 are printed with any character name logo motif or format they shall remain classified under the respective headings as long as such products are intended to be used for futher printing or writing +12010000,soya beans whether or not broken +12011000,"SOYA BEANS, WHETHER OR NOT BROKEN:seed:seed" +12019000,"SOYA BEANS, WHETHER OR NOT BROKEN:other:Other" +12020000,not roasted or otherwise cooked whether or not shelled or broken +12023000,not roasted or otherwise cooked whether or not shelled or broken >> seed +12023010,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:seed:H.P.S." +12023090,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:seed:Other" +12024100,not roasted or otherwise cooked whether or not shelled or broken >> in shell +12024110,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:In shell:H.P.S." +12024190,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:In shell:Other" +12024200,not roasted or otherwise cooked whether or not shelled or broken >> shelled whether or not broken +12024210,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:Shelled, whether or not broken:Kernels, H.P.S" +12024220,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:Shelled, whether or not broken:Kernelgs, Other" +12024290,"GROUND-NUTS, NOT ROASTED OR OTHERWISE COOKED, WHETHER OR NOT SHELLED OR BROKEN:Shelled, whether or not broken:Other" +12030000,::COPRA +12040000,linseed whether or not broken linseed whether or not broken +12040010,"LINSEED, WHETHER OR NOT BROKEN:Linseed, whether or not broken:Of seed quality" +12040090,"LINSEED, WHETHER OR NOT BROKEN:Linseed, whether or not broken:Other" +12050000,rape or colza seeds whether or not broken +12051000,"RAPE OR COLZA SEEDS, WHETHER OR NOT BROKEN::Low erucic acid rape or colza seeds" +12059000,"RAPE OR COLZA SEEDS, WHETHER OR NOT BROKEN::Other" +12060000,sunflower seeds whether or not broken sunflower seeds whether or not broken +12060010,"SUNFLOWER SEEDS, WHETHER OR NOT BROKEN:Sunflower seeds, whether or not broken:Of seed quality" +12060090,"SUNFLOWER SEEDS, WHETHER OR NOT BROKEN:Sunflower seeds, whether or not broken:Other" +12070000,other oil seeds and oleaginous fruits whether or not broken +12071000,other oil seeds and oleaginous fruits whether or not broken >> palm nuts and kernels +12071010,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Palm nuts and kernels:Palm nuts" +12071090,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Palm nuts and kernels:Palm kernels" +12072100,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Cotton seeds:Seed" +12072900,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Other:Other" +12073000,other oil seeds and oleaginous fruits whether or not broken >> castor oil seeds +12073010,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Castor Oil seeds:Of seed quality" +12073090,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Castor Oil seeds:Other" +12074000,other oil seeds and oleaginous fruits whether or not broken >> sesamum seeds +12074010,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Sesamum seeds :Of seed quality" +12074090,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Sesamum seeds :Other" +12075000,other oil seeds and oleaginous fruits whether or not broken >> mustard seeds +12075010,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Mustard seeds :Of seed quality" +12075090,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Mustard seeds :Other " +12076000,other oil seeds and oleaginous fruits whether or not broken >> safflower carthamus tinctorius seeds +12076010,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Safflower (Carthamus tinctorius) seeds:Of seed quality" +12076090,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Safflower (Carthamus tinctorius) seeds:Other" +12077000,other oil seeds and oleaginous fruits whether or not broken >> melon seeds +12077010,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Melon seeds:Of seed quality" +12077090,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Melon seeds:Other" +12079100,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN::Poppy seeds" +12079900,other oil seeds and oleaginous fruits whether or not broken >> other +12079910,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Other :Ajams" +12079920,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Other :Mango kernel" +12079930,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Other :Niger seed" +12079940,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Other :Kokam" +12079990,"OTHER OIL SEEDS AND OLEAGINOUS FRUITS, WHETHER OR NOT BROKEN:Other :Other" +12080000,flours and meals of oil seeds or oleaginous fruits other than those of mustard +12081000,"FLOURS AND MEALS OF OIL SEEDS OR OLEAGINOUS FRUITS, OTHER THAN THOSE OF MUSTARD::Of soya beans" +12089000,"FLOURS AND MEALS OF OIL SEEDS OR OLEAGINOUS FRUITS, OTHER THAN THOSE OF MUSTARD::Other" +12090000,seeds fruit and spores of a kind used for sowing +12091000,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Sugar beet seed " +12092100,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Lucerne (alfalfa) seed" +12092200,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Clover (Trifolium spp.) seed" +12092300,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Fescue seed" +12092400,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Kentucky blue grass (Poa pratensis L.) seed" +12092500,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Rye grass (Lolium multiflorum Lam., Lolium perenne L.) seed" +12092900,seeds fruit and spores of a kind used for sowing >> other +12092910,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Other :Australian lupin seeds" +12092990,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Other :Other" +12093000,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING::Seeds of herbaceous plants cultivated principally for their flowers " +12099100,seeds fruit and spores of a kind used for sowing >> vegetable seeds +12099110,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Of Cabbage" +12099120,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Of Cauliflower" +12099130,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Of Onion" +12099140,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Of Pea" +12099150,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Of Radish" +12099160,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Of Tomato" +12099170,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :OF CHILLY OF GENUS CAPSICUM" +12099190,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Vegetable seeds :Other" +12099900,seeds fruit and spores of a kind used for sowing >> other +12099910,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Other :Fruit seeds for planting or sowing" +12099990,"SEEDS, FRUIT AND SPORES, OF A KIND USED FOR SOWING:Other :Other" +12100000,hop cones fresh or dried whether or not ground powdered or in the form of pellets lupulin +12101000,"HOP CONES, FRESH OR DRIED, WHETHER OR NOT GROUND, POWDERED OR IN THE FORM OF PELLETS; LUPULIN::Hop cones, neither ground nor powdered nor in the form of pellets" +12102000,"HOP CONES, FRESH OR DRIED, WHETHER OR NOT GROUND, POWDERED OR IN THE FORM OF PELLETS; LUPULIN::Hop cones, ground, powdered or in the form of pellets; lupulin" +12110000,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered +12112000,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED::Ginseng roots" +12113000,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED::Coca leaf" +12114000,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED::Poppy straw" +12115000,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED::Ephedra" +12116000,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:bark:Bark of African cherry (Prunus africana)" +12119000,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp +12119011,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Ambrette seeds" +12119012,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Nuxvomica, dried ripe seeds" +12119013,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Psyllium seed (isobgul)" +12119014,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Neem seed" +12119015,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Jojoba seed" +12119016,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> garcinia +12119019,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Other " +12119021,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Belladona leaves(OLD tariff)" +12119022,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Senna leaves and pods" +12119023,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Neem leaves, powder" +12119024,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Gymnema powder" +12119025,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Cubeb powder" +12119026,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Pyrethrum" +12119029,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Other " +12119031,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Cascara sagrada bark" +12119032,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Psyllium husk (isobgul husk)" +12119033,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Gamboge fruit rind" +12119034,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> ashoka saraca asoca +12119035,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> arjuna terminalia arjuna +12119039,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Other " +12119041,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Belladona roots(OLD tariff)" +12119042,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Galangal rhizomes and roots" +12119043,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Ipecac dried rhizome and roots" +12119044,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Serpentina roots (rowwalfia serpentina and other species of rowwalfias)" +12119045,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Zedovary roots" +12119046,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Kuth root" +12119047,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Sarasaparilla roots" +12119048,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Sweet flag rhizomes" +12119049,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Other" +12119050,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Sandalwood chips and dust(OLD tariff)" +12119051,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> sandalwood chips and dust +12119052,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> vinca rosea herbs +12119053,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> mint +12119054,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> agarwood +12119055,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> chirata +12119056,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> basil hyssop rosemary sage and savory +12119057,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> ashwagandha withania somnifera +12119058,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> giloy tinospora cordifolia +12119059,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> other +12119060,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Vinca rosea herbs(OLD tariff)" +12119070,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Mint including leaves (all species)(OLD tariff)" +12119080,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Agarwood (OLD tariff)" +12119090,plants and parts of plants including seeds and fruits of a kind used primarily in perfumery in pharmacy or for insecticidal fungicidal or similar purpose fresh chilled frozen or dried whether or not cut crushed or powdered >> other seeds kernel aril fruit pericarp fruit rind endosperm mesocarp endocarp >> other locust beans seaweeds and other algae sugar +12119091,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Chirata(OLD tariff)" +12119092,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Tukmaria(OLD tariff)" +12119093,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Unab (Indian Jujuba or Chinese dates)(OLD tariff)" +12119094,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Basil, hyssop, rosemary, sage and savory(OLD tariff)" +12119095,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Lovage(OLD tariff)" +12119096,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Garcinia(OLD tariff)" +12119099,"PLANTS AND PARTS OF PLANTS (INCLUDING SEEDS AND FRUITS), OF A KIND USED PRIMARILY IN PERFUMERY, IN PHARMACY OR FOR INSECTICIDAL, FUNGICIDAL OR SIMILAR PURPOSE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT CUT, CRUSHED OR POWDERED:Other :Other(OLD tariff)" +12120000,beet and sugarcane fresh chilled frozen or dried whether or not ground fruit stones and kernels and other vegetable products including unroasted chicory roots of the variety cichorium intybus sativum of a kind used primarily for human consumption not elsewhere specified or included other +12122100,beet and sugarcane fresh chilled frozen or dried whether or not ground fruit stones and kernels and other vegetable products including unroasted chicory roots of the variety cichorium intybus sativum of a kind used primarily for human consumption not elsewhere specified or included other >> fit for human consumption +12122110,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Seaweeds and other algae Fit for human consumption:Seaweeds" +12122190,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Seaweeds and other algae Fit for human consumption:Other algae" +12122900,beet and sugarcane fresh chilled frozen or dried whether or not ground fruit stones and kernels and other vegetable products including unroasted chicory roots of the variety cichorium intybus sativum of a kind used primarily for human consumption not elsewhere specified or included other >> other +12122910,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Seaweeds" +12122990,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Other algae" +12129100,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED::Sugar beet" +12129200,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Locust beans:Locus beans" +12129300,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Sugar cane:Sugar cane" +12129400,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Chicory roots:Chicory roots" +12129900,beet and sugarcane fresh chilled frozen or dried whether or not ground fruit stones and kernels and other vegetable products including unroasted chicory roots of the variety cichorium intybus sativum of a kind used primarily for human consumption not elsewhere specified or included other >> other +12129910,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Kokam (cocum) flowers" +12129920,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Mohua flowers" +12129990,"LOCUST BEANS, SEAWEEDS AND OTHER ALGAE, SUGAR BEET AND SUGARCANE, FRESH, CHILLED, FROZEN OR DRIED, WHETHER OR NOT GROUND; FRUIT STONES AND KERNELS AND OTHER VEGETABLE PRODUCTS (INCLUDING UNROASTED CHICORY ROOTS OF THE VARIETY Cichorium intybus sativum) OF A KIND USED PRIMARILY FOR HUMAN CONSUMPTION,NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Other" +12130000,"::CEREAL STRAW AND HUSKS, UNPREPARED, WHETHER OR NOT CHOPPED, GROUND, PRESSED OR IN THE FORM OF PELLETS" +12140000,swedes mangolds fodder roots hay lucerne alfalfa clover sainfoin forage kale lupines vetches and similar forage products whether or not in the form of pellets +12141000,"SWEDES, MANGOLDS, FODDER ROOTS, HAY, LUCERNE (alfalfa), CLOVER, SAINFOIN, FORAGE KALE, LUPINES, VETCHES AND SIMILAR FORAGE PRODUCTS, WHETHER OR NOT IN THE FORM OF PELLETS::Lucerne (alfalfa) meal and pellets" +12149000,"SWEDES, MANGOLDS, FODDER ROOTS, HAY, LUCERNE (alfalfa), CLOVER, SAINFOIN, FORAGE KALE, LUPINES, VETCHES AND SIMILAR FORAGE PRODUCTS, WHETHER OR NOT IN THE FORM OF PELLETS::Other" +13010000,lac natural gums resins and oleoresins for example balsams +13012000,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS)::Gum Arabic" +13019000,lac natural gums resins and oleoresins for example balsams >> other natural gums +13019011,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Asian gum" +13019012,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :African gum" +13019013,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Asafoetida" +13019014,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Benjamin ras" +13019015,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Benjamin cowrie" +13019016,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Karaya gum (Indian tragacanth) hastab" +13019017,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Tragacanth (adraganth)" +13019018,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Storax" +13019019,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Other " +13019021,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Copal" +13019022,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Dammar batu" +13019029,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Other Gum resins " +13019031,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Myrrh" +13019032,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Oilbanum or frankincense" +13019033,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Mastic gum" +13019034,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Xanthium gum" +13019039,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Other " +13019041,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Of seeds" +13019042,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Of fruits" +13019043,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Of leaves" +13019044,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Of spices" +13019045,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Of flowers" +13019046,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Of roots" +13019049,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Other" +13019099,"LAC; NATURAL GUMS, RESINS, GUM-RESINS AND OLEORESINS (FOR EXAMPLE, BALSAMS):Other :Other" +13020000,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts +13021100,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Opium" +13021200,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Of liquorice" +13021300,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Of hops" +13021400,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Of Ephedra" +13021900,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts >> other extracts +13021911,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of belladona" +13021912,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of cascara sagrada" +13021913,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of nuxvomica" +13021914,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of ginseng (including powder)" +13021915,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of agarose" +13021916,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of neem" +13021917,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of gymnema" +13021918,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Of garcinia and gamboge" +13021919,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Other" +13021920,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Cashew shell liquid (CNSL), crude" +13021930,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Purified and distilled CNSL (Cardanol)" +13021990,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Other :Other" +13022000,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Pectic substances, pectinates and pectates " +13023100,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Agar-agar" +13023200,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts >> mucilages and thickeners whether or not modified derived from locust beans locust bean seeds or guar seeds guargum +13023210,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Mucilages and thickeners, whether or not modified, derived from locust beans, locust bean seeds or guar seeds:Guar meal(OLD tariff)" +13023220,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Mucilages and thickeners, whether or not modified, derived from locust beans, locust bean seeds or guar seeds:Guargum refined split(OLD tariff)" +13023230,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Mucilages and thickeners, whether or not modified, derived from locust beans, locust bean seeds or guar seeds:Guargum treated and pulverised(OLD tariff)" +13023239,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts >> mucilages and thickeners whether or not modified derived from locust beans locust bean seeds or guar seeds guargum >> other +13023240,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Mucilages and thickeners, whether or not modified, derived from locust beans, locust bean seeds or guar seeds:Kappa carrageenan(OLD tariff)" +13023290,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS :Mucilages and thickeners, whether or not modified, derived from locust beans, locust bean seeds or guar seeds:Other" +13023900,"VEGETABLE SAPS AND EXTRACTS; PECTIC SUBSTANCES, PECTINATES AND PECTATES; AGAR-AGAR AND OTHER MUCILAGES AND THICKENERS, WHETHER OR NOT MODIFIED, DERIVED FROM VEGETABLE PRODUCTS ::Other" +13023910,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts >> other >> tamarind kernel powder +13023920,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts >> other >> kappa carrageenan +13023990,vegetable saps and extracts pectic substances pectinates and pectates and other mucilages and thickeners whether or not modified derived from vegetable products vegetable saps and extracts >> other >> other chapter 14 vegetable plaiting materials vegetable products not elsewhere specified or included 1 this chapter does not cover the following products which are to be classified in section xi vegetable materials or fibres of vegetable materials of a kind used primarily in the manufacture of textiles however prepared or other vegetable materials which have undergone treatment so as to render them suitable for use 2 heading 1401 applies inter alia to bamboos whether or not split sawn lengthwise cut to length rounded at the ends bleached rendered polished or dyed split osier reeds and the like to rattan cores and to drawn or split rattans the heading does not apply to chipwood heading 4404 3 heading 1404 does not apply to wood wool heading 4405 and prepared knots or tufts for broom or +14010000,vegetable materials of a kind used primarily for plaiting for example bamboos rattans reeds rushes osier raffia cleaned bleached or dyed cereal straw and lime bark +14011000,"VEGETABLE MATERIALS OF A KIND USED PRIMARILY FOR PLAITING (FOR EXAMPLE, BAMBOOS, RATTANS, REEDS, RUSHES, OSIER, RAFFIA, CLEANED, BLEACHED OR DYED CEREAL STRAW, AND LIME BARK)::Bamboos" +14012000,"VEGETABLE MATERIALS OF A KIND USED PRIMARILY FOR PLAITING (FOR EXAMPLE, BAMBOOS, RATTANS, REEDS, RUSHES, OSIER, RAFFIA, CLEANED, BLEACHED OR DYED CEREAL STRAW, AND LIME BARK)::Rattans" +14019000,vegetable materials of a kind used primarily for plaiting for example bamboos rattans reeds rushes osier raffia cleaned bleached or dyed cereal straw and lime bark >> other +14019010,"VEGETABLE MATERIALS OF A KIND USED PRIMARILY FOR PLAITING (FOR EXAMPLE, BAMBOOS, RATTANS, REEDS, RUSHES, OSIER, RAFFIA, CLEANED, BLEACHED OR DYED CEREAL STRAW, AND LIME BARK):Other :Canes" +14019090,"VEGETABLE MATERIALS OF A KIND USED PRIMARILY FOR PLAITING (FOR EXAMPLE, BAMBOOS, RATTANS, REEDS, RUSHES, OSIER, RAFFIA, CLEANED, BLEACHED OR DYED CEREAL STRAW, AND LIME BARK):Other :Other" +14040000,vegetable products not elsewhere specified or included +14042000,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED::Cotton linters +14049000,vegetable products not elsewhere specified or included >> other +14049010,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Bidi wrapper leaves (tendu) +14049021,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Powder +14049029,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other +14049030,"VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Hard seeds, pips, hulls and nuts, of a kind used primarily for carving" +14049040,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Betel leaves +14049050,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Indian katha +14049060,"VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Coconut shell, unworked" +14049070,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Rudraksha seeds +14049090,VEGETABLE PRODUCTS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other +15010000,pig fats including lard and poultry fat other than that of heading 0209 or 1503 +15011000,"Pig fat (including Lard) and Polutry fat, other than that of heading 0209 or 1503:Lard:Lard(OLD tariff)" +15012000,"Pig fat (including Lard) and Polutry fat, other than that of heading 0209 or 1503:Other pig fat:Other pig fat" +15019000,"Pig fat (including Lard) and Polutry fat, other than that of heading 0209 or 1503:Other:Other" +15020000,other than those ofheading 1503 +15021000,other than those ofheading 1503 >> tallow +15021010,"FATS OF BOVINE ANIMALS, SHEEP OR GOATS, OTHER THAN THOSE OF HEADING 1503:Tallow:Mutton tallow" +15021090,"FATS OF BOVINE ANIMALS, SHEEP OR GOATS, OTHER THAN THOSE OF HEADING 1503:Tallow:Other" +15029000,other than those ofheading 1503 >> other +15029010,"FATS OF BOVINE ANIMALS, SHEEP OR GOATS, OTHER THAN THOSE OF HEADING 1503:Other:Unrendered fats" +15029020,"FATS OF BOVINE ANIMALS, SHEEP OR GOATS, OTHER THAN THOSE OF HEADING 1503:Other:Rendered fats or solvent extraction fats" +15029090,"FATS OF BOVINE ANIMALS, SHEEP OR GOATS, OTHER THAN THOSE OF HEADING 1503:Other:Other" +15030000,"::LARD STEARIN, LARD OIL, OLEOSTEARIN, OLEO-OIL AND TALLOW OIL, NOT EMULSIFIED OR MIXED OR OTHERWISE PREPARED(OLD tariff)" +15040000,fats and oils and their fractions of fish or marine or not mammals whether refined but not chemically modified +15041000,fats and oils and their fractions of fish or marine or not mammals whether refined but not chemically modified >> fish liver oils and their fractions +15041010,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fish liver oils and their fractions:Cod liver oil " +15041091,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fish liver oils and their fractions:Squid liver oil" +15041099,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fish liver oils and their fractions:Other" +15042000,fats and oils and their fractions of fish or marine or not mammals whether refined but not chemically modified >> fats and oils and their fractions of fish other than liver oils +15042010,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fats and oils and their fractions of fish, other than liver oils :Fish body oil" +15042020,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fats and oils and their fractions of fish, other than liver oils :Fish lipid oil" +15042030,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fats and oils and their fractions of fish, other than liver oils :Sperm oil" +15042090,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Fats and oils and their fractions of fish, other than liver oils :Other" +15043000,"FATS AND OILS AND THEIR FRACTIONS, OF FISH OR MARINE MAMMALS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Fats and oils and their fractions, of marine mammals" +15050000,wool grease and fatty substances derived therefrom including lanolin wool grease and fatty substances derived therefrom including lanolin +15050010,WOOL GREASE AND FATTY SUBSTANCES DERIVED THEREFROM (INCLUDING LANOLIN):Wool grease and fatty substances derived therefrom (including lanolin) :Wool alcohol (including lanolin alcohol) +15050020,"WOOL GREASE AND FATTY SUBSTANCES DERIVED THEREFROM (INCLUDING LANOLIN):Wool grease and fatty substances derived therefrom (including lanolin) :Wool grease, crude" +15050090,WOOL GREASE AND FATTY SUBSTANCES DERIVED THEREFROM (INCLUDING LANOLIN):Wool grease and fatty substances derived therefrom (including lanolin) :Other +15060000,their other animal fats and oils and fractions whether or not refined but not chemically modified other animal fats and oils and their fractions whether or not refined but not chemically modified +15060010,"OTHER ANIMAL FATS AND OILS AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other animal fats and oils and their fractions, whether or not refined, but not chemically modified :Neats Foot oil and fats from bone or waste" +15060090,"OTHER ANIMAL FATS AND OILS AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other animal fats and oils and their fractions, whether or not refined, but not chemically modified :Other" +15070000,oil and its fractions whether or not refined but not chemically modified +15071000,"SOYA-BEAN OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil, whether or not degummed" +15079000,oil and its fractions whether or not refined but not chemically modified >> other +15079010,"SOYA-BEAN OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Edible grade" +15079090,"SOYA-BEAN OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other" +15080000,oil and its fractions whether or not refined but not chemically modified +15081000,"GROUND-NUT OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil" +15089000,oil and its fractions whether or not refined but not chemically modified >> other +15089010,"GROUND-NUT OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Deodorized (Salad Oil) " +15089091,"GROUND-NUT OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Edible grade" +15089099,"GROUND-NUT OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other" +15090000,olive oil and its fractions whether or not refined but not chemically modified +15091000,"OLIVE OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Virgin(OLD tariff)" +15092000,"OLIVE OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:extra:Extra virgin olive oil" +15093000,"OLIVE OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:virgin:Virgin olive oil" +15094000,"OLIVE OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:other:Other Virgin olive oils" +15099000,olive oil and its fractions whether or not refined but not chemically modified >> other +15099010,"OLIVE OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Edible grade" +15099090,"OLIVE OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other" +15100000,solely from olives whether or not refined but not chemically modified including blends of these oils or fractions with oils or fractions of heading 1509 +15100010,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:Other oils and their fractions obtained solely from olives, whether or not refined, but not chemically modified, including blends of these oils or fractions with oils or fractions of heading 1509:Crude oil (OLD tariff)" +15100091,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:Other oils and their fractions obtained solely from olives, whether or not refined, but not chemically modified, including blends of these oils or fractions with oils or fractions of heading 1509:Edible grade(OLD tariff)" +15100099,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:Other oils and their fractions obtained solely from olives, whether or not refined, but not chemically modified, including blends of these oils or fractions with oils or fractions of heading 1509:Other(OLD tariff)" +15101000,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:crude olive:Crude oliuve pomace oil" +15109000,solely from olives whether or not refined but not chemically modified including blends of these oils or fractions with oils or fractions of heading 1509 >> other +15109010,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:Other:Refined Olive pomace oil" +15109090,"OTHER OILS AND THEIR FRACTIONS OBTAINED SOLELY FROM OLIVES, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED, INCLUDING BLENDS OF THESE OILS OR FRACTIONS WITH OILS OR FRACTIONS OF HEADING 1509:Other:other" +15110000,palm oil and its fractions whetheror not refined but not chemically modified +15111000,"PALM OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil" +15119000,palm oil and its fractions whetheror not refined but not chemically modified >> other +15119010,"PALM OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Refined bleached deodorised palm oil" +15119020,"PALM OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Refined bleached deodorised palmolein" +15119030,"PALM OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Refinded bleached deodorised palm stearin" +15119090,"PALM OIL AND ITS FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other" +15121110,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Crude oil:Sunflower seed oil(OLD tariff)" +15121120,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Crude oil:Safflower seed oil (kardi seed oil)(OLD tariff)" +15121910,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Sunflower oil, edible grade(OLD tariff)" +15121920,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Sunflower oil, non-edible grade (other than crude oil)(OLD tariff)" +15121930,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Safflower oil, edible grade(OLD tariff)" +15121940,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Saffloer oil, non-edible grade(OLD tariff)" +15121990,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other(OLD tariff)" +15122100,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil, whether or not gossypol has been removed(OLD tariff)" +15122910,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Edible grade(OLD tariff)" +15122990,"SUNFLOWER SEED, SAFFLOWER OR COTTON SEED OIL AND THEIR FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other(OLD tariff)" +15130000,kernel or babassu coconut copra palm oil and fractions thereof whether or not refined but not chemically modified coconut copra oil and its fractions +15131100,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil" +15131900,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Other" +15132100,kernel or babassu coconut copra palm oil and fractions thereof whether or not refined but not chemically modified coconut copra oil and its fractions >> crude oil +15132110,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Crude Oil :Palm kernel oil" +15132120,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Crude Oil :Babassu oil" +15132900,kernel or babassu coconut copra palm oil and fractions thereof whether or not refined but not chemically modified coconut copra oil and its fractions >> other +15132910,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other:Palm kernel oil and its fractions" +15132920,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other:Babassu oil and its fractions edible grade" +15132930,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other:Babassu oil and its fractions, other than edible grade" +15132990,"COCONUT (COPRA), PALM KERNEL OR BABASSU OIL AND FRACTIONS THEREOF, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other:Other" +15140000,rape colza or mustard oil and its fractions thereof whether or not refined but not chemically modifie low erucic acid rape or colza oil and its fractions +15141100,rape colza or mustard oil and its fractions thereof whether or not refined but not chemically modifie low erucic acid rape or colza oil and its fractions >> crude oil +15141110,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Crude oil:Colza oil" +15141120,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Crude oil:Rape oil" +15141190,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Crude oil:Other" +15141900,rape colza or mustard oil and its fractions thereof whether or not refined but not chemically modifie low erucic acid rape or colza oil and its fractions >> other +15141910,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other :Refined colza oil of edible grade" +15141920,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other :Refined rapeseed oil of edible grade" +15141990,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other :Other" +15149100,rape colza or mustard oil and its fractions thereof whether or not refined but not chemically modifie low erucic acid rape or colza oil and its fractions >> crude oil +15149110,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Crude oil :Colza oil" +15149120,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Crude oil :Mustard oil" +15149190,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Crude oil :Rapeseed oil" +15149900,rape colza or mustard oil and its fractions thereof whether or not refined but not chemically modifie low erucic acid rape or colza oil and its fractions >> other +15149910,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other:Refined colza oil of edible grade" +15149920,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other:Refined mustard oil of edible grade" +15149930,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other:Refined rapeseed oil of edible grade" +15149990,"RAPE, COLZA OR MUSTARD OIL AND ITS FRACTIONS THEREOF, WHETHER OR NOT REFINED , BUT NOT CHEMICALLY MODIFIED:Other:Other" +15150000,other fixed vegetable or microbial fats and oils including jojoba oil and their fractions whether or not refined but not chemically modified linseed oil and its fractions +15151100,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil" +15151900,other fixed vegetable or microbial fats and oils including jojoba oil and their fractions whether or not refined but not chemically modified linseed oil and its fractions >> other +15151910,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other:Edible grade" +15151990,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other:Other" +15152100,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED::Crude oil" +15152900,other fixed vegetable or microbial fats and oils including jojoba oil and their fractions whether or not refined but not chemically modified linseed oil and its fractions >> other +15152910,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Edible grade" +15152990,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other" +15153000,other fixed vegetable or microbial fats and oils including jojoba oil and their fractions whether or not refined but not chemically modified linseed oil and its fractions >> castor oil and its fractions +15153010,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Castor oil and its fractions:Edible grade" +15153090,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Castor oil and its fractions:Other" +15155000,other fixed vegetable or microbial fats and oils including jojoba oil and their fractions whether or not refined but not chemically modified linseed oil and its fractions >> seasame oil and its fractions +15155010,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Seasame oil and its fractions:Crude oil " +15155091,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Seasame oil and its fractions:Edible grade" +15155099,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Seasame oil and its fractions:Other" +15156000,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:microbial:Microbial fats and oils and other fractions" +15159000,other fixed vegetable or microbial fats and oils including jojoba oil and their fractions whether or not refined but not chemically modified linseed oil and its fractions >> other +15159010,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Fixed vegetable oils, namely the following chul moogra oil, mawra oil, kokam oil, tobacco seed oil, sal oil" +15159020,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Fixed vegetable oils, namely the following neem seed oil, karanj oil, silk cotton seed oil," +15159030,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Fixed vegetable oils, namely the following cardamom oil, chillies or capsicum oil, turmeric oil, ajwain seed oil, niger seed oil, garlic oil" +15159040,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Fixed vegetable oils of edible grade namely the following mango kernel oil, mahua oil, rice bran oil Other :" +15159091,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Edible grade" +15159099,"OTHER FIXED VEGETABLE FATS AND OILS (INCLUDING JOJOBA OIL) AND THEIR FRACTIONS, WHETHER OR NOT REFINED, BUT NOT CHEMICALLY MODIFIED:Other :Other" +15160000,animal vegetable or microbial fats and oils and their fractions partly or wholly hydrogenated or elaidinised whether or not refined but not further prepared animal vegetable or microbial fats and oils and sulphurised blown polymerised by heat in vacuum or in inert gas or otherwise chemically modified excluding those of heading 1516 inedible mixtures or preparations of animal vegetable or microbial fats or oils or of fractions of different fats or oils of this chapter not elsewhere specified or +15161000,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED::Animal fats and oils and their fractions" +15162000,animal vegetable or microbial fats and oils and their fractions partly or wholly hydrogenated or elaidinised whether or not refined but not further prepared animal vegetable or microbial fats and oils and sulphurised blown polymerised by heat in vacuum or in inert gas or otherwise chemically modified excluding those of heading 1516 inedible mixtures or preparations of animal vegetable or microbial fats or oils or of fractions of different fats or oils of this chapter not elsewhere specified or >> vegetable fats and oils and their fractions cotton seed oil +15162011,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Edible grade" +15162019,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Other " +15162021,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Edible grade" +15162029,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Other " +15162031,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Edible grade" +15162039,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Other " +15162091,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Edible grade" +15162099,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:Vegetable fats and oils and their fractions :Other" +15163000,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, PARTLY OR WHOLLY HYDROGENATED, INTER-ESTERIFIED, RE-ESTERIFIED OR ELAIDINISED, WHETHER OR NOT REFINED, BUT NOT FURTHER PREPARED:microbial: Microbial fats and oils and their fractions" +15170000,margarine edible mixture or preparations of animal vegetable o r m i c r o b i a l fats or oils or of fractions of different fats or oils of this chapter other than edible fats and o i l s or their fractions of heading 1516 +15171000,margarine edible mixture or preparations of animal vegetable o r m i c r o b i a l fats or oils or of fractions of different fats or oils of this chapter other than edible fats and o i l s or their fractions of heading 1516 >> margarine excluding liquid margarine +15171010,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Margarine, excluding liquid margarine:Of animal origin " +15171021,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Margarine, excluding liquid margarine:Edible grade" +15171022,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Margarine, excluding liquid margarine:Linoxyn" +15171029,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Margarine, excluding liquid margarine:Other" +15179000,margarine edible mixture or preparations of animal vegetable o r m i c r o b i a l fats or oils or of fractions of different fats or oils of this chapter other than edible fats and o i l s or their fractions of heading 1516 >> other +15179010,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Other :Sal fat (processed or refined)" +15179020,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Other :Peanut butter(OLD tariff)" +15179030,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Other :Imitation lard of animal origin" +15179040,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Other :Imitation lard of vegetable origin" +15179090,"MARGARINE; EDIBLE MIXTURE OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, OTHER THAN EDIBLE FATS OR OILS OR THEIR FRACTIONS OF HEADING 1516:Other :Other" +15180000,their fractions boiled oxidised dehydrated sulphurised blown polymerised by heat in vacuum or in inert gas or otherwise chemically modified excluding those of heading 1516 inedible mixtures or preparations of animal vegetable or microbial fats or oils or of fractions of different fats or oils of this chapter not elsewhere specified or included linseed oil +15180011,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Edible grade" +15180019,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Other " +15180021,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Edible grade" +15180029,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Other " +15180031,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Edible grade" +15180039,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Other" +15180040,"ANIMAL OR VEGETABLE FATS AND OILS AND THEIR FRACTIONS, BOILED, OXIDISED, DEHYDRATED, SULPHURISED, BLOWN, POLYMERISED BY HEAT IN VACUUM OR IN INERT GAS OR OTHERWISE CHEMICALLY MODIFIED, EXCLUDING THOSE OF HEADING 1516 ; INEDIBLE MIXTURES OR PREPARATIONS OF ANIMAL OR VEGETABLE FATS OR OILS OR OF FRACTIONS OF DIFFERENT FATS OR OILS OF THIS CHAPTER, NOT ELSEWHERE SPECIFIED OR INCLUDED:Animal or vegetable fats and oils and their fractions, boiled, oxidised, dehydrated, sulphurised, blown, polymerised by heat in vacuum or in inert gas or otherwise chemically modified, excluding those of heading 1516 ; inedible mixtures or preparations of animal or vegetable fats or oils or of fractions of different fats or oils of this Chapter, not elsewhere specified or included:Other(OLD tariff)" +15200000,"::GLYCEROL, CRUDE; GLYCEROL WATERS AND GLYCEROL LYES" +15210000,vegetable waxes other than triglycerides beeswax other insect waxes and spermaceti whether or not refined or coloured +15211000,vegetable waxes other than triglycerides beeswax other insect waxes and spermaceti whether or not refined or coloured >> vegetable waxes carnauba waxes +15211011,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Vegetable Waxes:Edible wax for waxing fresh fruits and vegetables" +15211019,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Vegetable Waxes:Other" +15211090,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Vegetable Waxes:Other" +15219000,vegetable waxes other than triglycerides beeswax other insect waxes and spermaceti whether or not refined or coloured >> other +15219010,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Other :Beewax whether or not coloured" +15219020,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Other :Shellac wax whether or not coloured" +15219090,"VEGETABLE WAXES (OTHER THAN TRIGLYCERIDES), BEESWAX, OTHER INSECT WAXES AND SPERMACETI, WHETHER OR NOT REFINED OR COLOURED:Other :Other" +15220010,DEGRAS RESIDUES RESULTING FROM THE TREATMENT OF FATTY SUBSTANCES OR ANIMAL OR VEGETABLE WAXES:Degras residues resulting from the treatment of fatty substances or animal or vegetable waxes ::Degras(OLD tariff) +15220020,DEGRAS RESIDUES RESULTING FROM THE TREATMENT OF FATTY SUBSTANCES OR ANIMAL OR VEGETABLE WAXES:Degras residues resulting from the treatment of fatty substances or animal or vegetable waxes ::Soap stocks(OLD tariff) +15220090,DEGRAS RESIDUES RESULTING FROM THE TREATMENT OF FATTY SUBSTANCES OR ANIMAL OR VEGETABLE WAXES:Degras residues resulting from the treatment of fatty substances or animal or vegetable waxes ::Other(OLD tariff) +16010000,"::SAUSAGES AND SIMILAR PRODUCTS, OF MEAT, MEAT OFFAL OR BLOOD; FOOD PREPARATIONS BASED ON THESE PRODUCTS(OLD tariff)" +16020000,other prepared or preserved meat meat offal blood or insects +16021000,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Homogenised preparations" +16022000,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Of liver of any animal" +16023100,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Of turkeys" +16023200,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Of fowls of the species Gallus domesticus" +16023900,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Other" +16024100,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Hams and cuts thereof" +16024200,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Shoulders and cuts thereof" +16024900,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Other, including mixtures" +16025000,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Of bovine animals" +16029000,"OTHER PREPARED OR PRESERVED MEAT, MEAT OFFAL OR BLOOD::Other, including preparations of blood of any animal" +16030000,extracts and juices of meat fish or crustaceans molluscs or other aquatic invertebrates extracts and juices of meat fish or crustaceans molluscs or other aquatic invertebrates +16030010,"EXTRACTS AND JUICES OF MEAT, FISH OR CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES:Extracts and juices of meat, fish or crustaceans, molluscs or other aquatic invertebrates :Extracts and juices of meat" +16030020,"EXTRACTS AND JUICES OF MEAT, FISH OR CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES:Extracts and juices of meat, fish or crustaceans, molluscs or other aquatic invertebrates :Extracts of fish" +16030090,"EXTRACTS AND JUICES OF MEAT, FISH OR CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES:Extracts and juices of meat, fish or crustaceans, molluscs or other aquatic invertebrates :Other" +16040000,prepared or preserved fish caviar and caviar substitutes prepared from fish eggs fish whole or in pieces but not minced +16041100,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS::Salmon +16041200,prepared or preserved fish caviar and caviar substitutes prepared from fish eggs fish whole or in pieces but not minced >> herrings +16041210,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Herrings :Pickled +16041290,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Herrings :Other +16041300,prepared or preserved fish caviar and caviar substitutes prepared from fish eggs fish whole or in pieces but not minced >> sardines sardinella and brisling or sprats +16041310,"PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Sardines, sardinella and brisling or sprats:Sardines, sardinella and brisling" +16041320,"PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Sardines, sardinella and brisling or sprats:Sprats" +16041400,prepared or preserved fish caviar and caviar substitutes prepared from fish eggs fish whole or in pieces but not minced >> tunas skipjack t una and bonito sarda spp +16041410,"PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Tunas, skipjack and bonito (Sarda spp.):Tunas" +16041490,"PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Tunas, skipjack and bonito (Sarda spp.):Other" +16041500,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS::Mackerel +16041600,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS::Anchovies +16041700,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Eels:Eels +16041800,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS::Shark fins +16041900,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS::Other +16042000,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS::Other prepared or preserved fish +16043100,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:Caviar:Caviar +16043200,PREPARED OR PRESERVED FISH; CAVIAR AND CAVIAR SUBSTITUTES PREPARED FROM FISH EGGS:caviar substitutes:Caviar substitutes +16050000,crustaceans molluscs and other aquatic invertebrates prepared or preserved +16051000,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED::Crab" +16052100,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Shrimps and prawns Not in airtight container:Shrimps and prawns not in airtight container" +16052900,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Other:Other" +16053000,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED::Lobster" +16054000,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED::Other crustaceans" +16055100,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Oysters:Oysters" +16055200,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Scallops, including queen scallops:Scallops, including queen scallops" +16055300,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Mussels:Mussels" +16055400,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Cuttle fish and squid:Cuttle fish and squid" +16055500,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Octopus:Octopus" +16055600,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:clams, cockles and arkshells:Clams, cockles and arkshells" +16055700,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Abalone:Abalone" +16055800,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Snails, other than sea snails:Snails, other than sea snails" +16055900,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Other:Other" +16056100,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Sea cucumbers:Sea cucumbers" +16056200,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Sea urchins:Sea urchins" +16056300,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Jellyfish:Jellyfish" +16056900,"CRUSTACEANS, MOLLUSCS AND OTHER AQUATIC INVERTEBRATES, PREPARED OR PRESERVED:Other:Other" +17010000,cane or beet sugar and chemically pure sucrose in solid form raw sugar not containing added flavouring or colouring matter +17011200,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM ::Beet sugar " +17011300,cane or beet sugar and chemically pure sucrose in solid form raw sugar not containing added flavouring or colouring matter >> cane sugar specified in note 2 to this chapter +17011310,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :cane sugar specified in Sub-heading Note 2 to this chapter:Cane Jaggery" +17011320,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :cane sugar specified in Sub-heading Note 2 to this chapter:Khandsari sugar" +17011390,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :cane sugar specified in Sub-heading Note 2 to this chapter:Other" +17011400,cane or beet sugar and chemically pure sucrose in solid form raw sugar not containing added flavouring or colouring matter >> other cane sugar +17011410,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :Other cane sugar:cane Jaggery" +17011420,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :Other cane sugar:Khandsari sugar" +17011490,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :Other cane sugar:Other" +17019100,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM ::Refined sugar containing added flavouring or colouring matter" +17019900,cane or beet sugar and chemically pure sucrose in solid form raw sugar not containing added flavouring or colouring matter >> other +17019910,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :Other :Sugar cubes" +17019990,"CANE OR BEET SUGAR AND CHEMICALLY PURE SUCROSE, IN SOLID FORM :Other :Other(OLD tariff)" +17021110,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Containing by weight 99% or more lactose, expressed as anhydrous lactose, calculated on the dry matter:In solid form(OLD tariff)" +17021190,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Containing by weight 99% or more lactose, expressed as anhydrous lactose, calculated on the dry matter:Other(OLD tariff)" +17021910,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other :In solid form(OLD tariff)" +17021990,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other :Other(OLD tariff)" +17022010,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Maple sugar and maple syrup:In solid form(OLD tariff)" +17022090,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Maple sugar and maple syrup:Other(OLD tariff)" +17023010,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, not containing fructose or containing in the dry state less than 20% by weight of fructose:Glucose, liquid(OLD tariff)" +17023020,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, not containing fructose or containing in the dry state less than 20% by weight of fructose:Glucose, solid(OLD tariff)" +17023031,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, not containing fructose or containing in the dry state less than 20% by weight of fructose:In solid form(OLD tariff)" +17023039,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, not containing fructose or containing in the dry state less than 20% by weight of fructose:Other(OLD tariff)" +17024010,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, containing in the dry state at least 20% but less than 50% by weight of fructose, excluding invert sugar :Glucose, liquid(OLD tariff)" +17024020,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, containing in the dry state at least 20% but less than 50% by weight of fructose, excluding invert sugar :Glucose, solid (OLD tariff)" +17024031,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, containing in the dry state at least 20% but less than 50% by weight of fructose, excluding invert sugar :In solid form(OLD tariff)" +17024039,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Glucose and glucose syrup, containing in the dry state at least 20% but less than 50% by weight of fructose, excluding invert sugar :Other(OLD tariff)" +17025000,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL::Chemically pure fructose(OLD tariff)" +17026010,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other fructose and fructose syrup, containing in the dry state more than 50% by weight of fructose, excluding invert sugar:In solid form(OLD tariff)" +17026090,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other fructose and fructose syrup, containing in the dry state more than 50% by weight of fructose, excluding invert sugar:Other(OLD tariff)" +17029010,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other, including invert sugar and other sugar and sugar syrup blends containing in the dry state 50% by weight of fructose :Palmyra sugar(OLD tariff)" +17029020,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other, including invert sugar and other sugar and sugar syrup blends containing in the dry state 50% by weight of fructose :Chemically pure maltose(OLD tariff)" +17029030,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other, including invert sugar and other sugar and sugar syrup blends containing in the dry state 50% by weight of fructose :Artificial honey, whether or not mixed with natural honey(OLD tariff)" +17029040,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other, including invert sugar and other sugar and sugar syrup blends containing in the dry state 50% by weight of fructose :Caramel(OLD tariff)" +17029050,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other, including invert sugar and other sugar and sugar syrup blends containing in the dry state 50% by weight of fructose :Insulin syrup(OLD tariff)" +17029090,"OTHER SUGARS, INCLUDING CHEMICALLY PURE LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE, IN SOLID FORM; SUGAR SYRUPS NOT CONTAINING ADDED FLAVOURING OR COLOURING MATTER; ARTIFICIAL HONEY, WHETHER OR NOT MIXED WITH NATURAL HONEY; CARAMEL:Other, including invert sugar and other sugar and sugar syrup blends containing in the dry state 50% by weight of fructose :Other(OLD tariff)" +17030000,molasses resulting from the extraction or refining of sugar +17031000,MOLASSES RESULTING FROM THE EXTRACTION OR REFINING OF SUGAR::Cane molasses +17039000,molasses resulting from the extraction or refining of sugar >> other +17039010,"MOLASSES RESULTING FROM THE EXTRACTION OR REFINING OF SUGAR:Other :Molasses, edible" +17039090,MOLASSES RESULTING FROM THE EXTRACTION OR REFINING OF SUGAR:Other :Other +17040000,sugar confectionery including white chocolate not containing cocoa +17041000,"SUGAR CONFECTIONERY (INCLUDING WHITE CHOCOLATE), NOT CONTAINING COCOA::Chewing gum, whether or not sugar coated" +17049000,sugar confectionery including white chocolate not containing cocoa >> other +17049010,"SUGAR CONFECTIONERY (INCLUDING WHITE CHOCOLATE), NOT CONTAINING COCOA:Other :Jelly confectionary" +17049020,"SUGAR CONFECTIONERY (INCLUDING WHITE CHOCOLATE), NOT CONTAINING COCOA:Other :Boiled sweets, whether or not filled" +17049030,"SUGAR CONFECTIONERY (INCLUDING WHITE CHOCOLATE), NOT CONTAINING COCOA:Other :Toffees, caramels and similar sweets" +17049090,"SUGAR CONFECTIONERY (INCLUDING WHITE CHOCOLATE), NOT CONTAINING COCOA:Other :Other" +18010000,"::COCOA BEANS, WHOLE OR BROKEN, RAW OR ROASTED" +18020000,"::COCOA SHELLS, HUSKS, SKINS AND OTHER COCOA WASTE" +18030000,cocoa paste whether or not defatted +18031000,"COCOA PASTE, WHETHER OR NOT DEFATTED::Not defatted" +18032000,"COCOA PASTE, WHETHER OR NOT DEFATTED::Wholly or partly defatted" +18040000,"::COCOA BUTTER, FAT AND OIL" +18050000,"::COCOA POWDER, NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER" +18060000,chocolate and other food preparations containing cocoa +18061000,"CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA::Cocoa powder, containing added sugar or other sweetening matter" +18062000,"CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA::Other preparations in blocks, slabs or bars weighing more than 2 kg. or in liquid, paste, powder, granular or other bulk form in containers or immediate packings, of a content exceeding 2 kg. " +18063100,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA::Filled +18063200,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA::Not filled +18069000,chocolate and other food preparations containing cocoa >> other +18069010,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA:Other :Chocolate and chocolate products +18069020,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA:Other :Sugar confectionary containing cocoa +18069030,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA:Other :Spreads containing cocoa +18069040,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA:Other :Preparations containing cocoa for making beverages +18069090,CHOCOLATE AND OTHER FOOD PREPARATIONS CONTAINING COCOA:Other :Other +19011010,"MALT EXTRACT; FOOD PREPARATIONS OF FLOUR, GROATS, MEAL, STARCH OR MALT EXTRACT, NOT CONTAINING COCOA OR CONTAINING LESS THAN 40% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED; FOOD PREPARATIONS OF GOODS OF HEADINGS 0401 TO 0404, NOT CONTAINING COCOA OR CONTAINING LESS THAN 5% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Preparations suitable for infants or young children, put up for retail sale:Malted milk (including powder)(OLD tariff)" +19011090,"MALT EXTRACT; FOOD PREPARATIONS OF FLOUR, GROATS, MEAL, STARCH OR MALT EXTRACT, NOT CONTAINING COCOA OR CONTAINING LESS THAN 40% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED; FOOD PREPARATIONS OF GOODS OF HEADINGS 0401 TO 0404, NOT CONTAINING COCOA OR CONTAINING LESS THAN 5% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Preparations suitable for infants or young children, put up for retail sale:Other(OLD tariff)" +19012000,"MALT EXTRACT; FOOD PREPARATIONS OF FLOUR, GROATS, MEAL, STARCH OR MALT EXTRACT, NOT CONTAINING COCOA OR CONTAINING LESS THAN 40% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED; FOOD PREPARATIONS OF GOODS OF HEADINGS 0401 TO 0404, NOT CONTAINING COCOA OR CONTAINING LESS THAN 5% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Mixes and doughs for the preparation of bakers wares of heading 1905(OLD tariff)" +19019010,"MALT EXTRACT; FOOD PREPARATIONS OF FLOUR, GROATS, MEAL, STARCH OR MALT EXTRACT, NOT CONTAINING COCOA OR CONTAINING LESS THAN 40% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED; FOOD PREPARATIONS OF GOODS OF HEADINGS 0401 TO 0404, NOT CONTAINING COCOA OR CONTAINING LESS THAN 5% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Malt extract(OLD tariff)" +19019090,"MALT EXTRACT; FOOD PREPARATIONS OF FLOUR, GROATS, MEAL, STARCH OR MALT EXTRACT, NOT CONTAINING COCOA OR CONTAINING LESS THAN 40% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED; FOOD PREPARATIONS OF GOODS OF HEADINGS 0401 TO 0404, NOT CONTAINING COCOA OR CONTAINING LESS THAN 5% BY WEIGHT OF COCOA CALCULATED ON A TOTALLY DEFATTED BASIS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other(OLD tariff)" +19020000,pasta whether or not cooked or stuffed with meat or other substances or otherwise prepared such as spaghetti macaroni noodles lasagne gnocchi ravioli cannelloni couscous whether or not prepared uncooked pasta not stuffed or otherwise prepared +19021100,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED::Containing eggs" +19021900,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED::Other" +19022000,pasta whether or not cooked or stuffed with meat or other substances or otherwise prepared such as spaghetti macaroni noodles lasagne gnocchi ravioli cannelloni couscous whether or not prepared uncooked pasta not stuffed or otherwise prepared >> stuffed pasta whether or not cooked or otherwise prepared +19022010,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED:Stuffed pasta, whether or not cooked or otherwise prepared:Cooked" +19022090,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED:Stuffed pasta, whether or not cooked or otherwise prepared:Other" +19023010,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED:Other Pasta :Dried(OLD tariff)" +19023090,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED:Other Pasta :Other(OLD tariff)" +19024010,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED:Couscous :Unprepared(OLD tariff)" +19024090,"PASTA, WHETHER OR NOT COOKED OR STUFFED (WITH MEAT OR OTHER SUBSTANCES) OR OTHERWISE PREPARED, SUCH AS SPAGHETTI, MACARONI, NOODLES, LASAGNE, GNOCCHI, RAVIOLI, CANNELLONI; COUSCOUS, WHETHER OR NOT PREPARED:Couscous :Other(OLD tariff)" +19030000,"::TAPIOCA AND SUBSTITUTES THEREFOR PREPARED FROM STARCH, IN THE FORM OF FLAKES, GRAINS, PEARLS, SIFTINGS OR IN SIMILAR FORMS(OLD tariff)" +19041010,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED:Prepared foods obtained by the swelling or roasting of cereals or cereal products:Corn flakes(OLD tariff)" +19041020,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED:Prepared foods obtained by the swelling or roasting of cereals or cereal products:Paws, Mudi and the like(OLD tariff)" +19041030,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED:Prepared foods obtained by the swelling or roasting of cereals or cereal products:Bulgur wheat(OLD tariff)" +19041090,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED:Prepared foods obtained by the swelling or roasting of cereals or cereal products:Other(OLD tariff)" +19042000,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED::Prepared foods obtained from unroasted cereal flakes or from mixtures of unroasted cereal flakes and roasted cereal flakes or swelled cereals(OLD tariff)" +19043000,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED::Bulgur wheat(OLD tariff)" +19049000,"PREPARED FOODS OBTAINED BY THE SWELLING OR ROASTING OF CEREALS OR CEREAL PRODUCTS (FOR EXAMPLE, CORN FLAKES); CEREALS [OTHER THAN MAIZE (CORN) ] IN GRAIN FORM OR IN THE FORM OF FLAKES OR OTHER WORKED GRAINS (EXCEPT FLOUR, GROATS AND MEAL), PRE-COOKED OR OTHERWISE PREPARED, NOT ELSEWHERE SPECIFIED OR INCLUDED::Other(OLD tariff)" +19050000,bread pastry cakes biscuits and other bakers wares whether or not containing cocoa communion wafers empty cachets of a kind suitable for pharmaceutical use sealing wafers rice paper and similar products +19051000,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS::Crispbread" +19052000,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS::Gingerbread and the like" +19053100,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS::Sweet biscuits" +19053200,bread pastry cakes biscuits and other bakers wares whether or not containing cocoa communion wafers empty cachets of a kind suitable for pharmaceutical use sealing wafers rice paper and similar products >> waffles and wafers communion wafers +19053211,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Waffles and wafers :Coated with chocolate or containing chocolate" +19053219,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Waffles and wafers :Other" +19053290,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Waffles and wafers :Other" +19054000,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS::Rusks, toasted bread and similar toasted products" +19059000,bread pastry cakes biscuits and other bakers wares whether or not containing cocoa communion wafers empty cachets of a kind suitable for pharmaceutical use sealing wafers rice paper and similar products >> other +19059010,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Other :Pastries and cakes" +19059020,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Other :Biscuits not elsewhere specified or included" +19059030,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Other :Extruded or expanded products, savoury or salted" +19059040,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Other :Papad" +19059090,"BREAD, PASTRY, CAKES, BISCUITS AND OTHER BAKER'S WARES, WHETHER OR NOT CONTAINING COCOA; COMMUNION WAFERS, EMPTY CACHETS OF A KIND SUITABLE FOR PHARMACEUTICAL USE, SEALING WAFERS, RICE PAPER AND SIMILAR PRODUCTS:Other :Other" +20010000,vegetables fruit nuts and other edible parts of plants prepared or preserved by vinegar or acetic acid +20011000,"VEGETABLES, FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, PREPARED OR PRESERVED BY VINEGAR OR ACETIC ACID::Cucumbers and gherkins" +20019000,"VEGETABLES, FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, PREPARED OR PRESERVED BY VINEGAR OR ACETIC ACID::Other" +20020000,tomatoes prepared or preserved otherwise than by vinegar or acetic acid +20021000,"TOMATOES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID::Tomatoes, whole or in pieces" +20029000,TOMATOES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID::Other +20030000,mushrooms and truffles prepared or preserved otherwise than by vinegar or acetic acid +20031000,"MUSHROOMS AND TRUFFLES, PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID::Mushrooms of the genus Agaricus" +20039000,mushrooms and truffles prepared or preserved otherwise than by vinegar or acetic acid >> other +20039010,"MUSHROOMS AND TRUFFLES, PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID:Other:Truffles" +20039090,"MUSHROOMS AND TRUFFLES, PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID:Other:Other" +20040000,other vegetables prepared or preserved otherwise than by vinegar or acetic acid frozen other than products of heading 2006 +20041000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Potatoes" +20049000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Other vegetables and mixtures of vegetables" +20050000,other vegetables prepared or preserved otherwise than by vinegar or acetic acid not other than products of frozen heading 2006 +20051000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Homogenised vegetables" +20052000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Potatoes" +20054000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Peas (pisum, sativum) " +20055100,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Beans, shelled" +20055900,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Other" +20056000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Asparagus" +20057000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Olives" +20058000,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Sweet corn (Zea mays var. saccharata) " +20059100,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Bamboo shoots" +20059900,"OTHER VEGETABLES PREPARED OR PRESERVED OTHERWISE THAN BY VINEGAR OR ACETIC ACID, NOT FROZEN, OTHER THAN PRODUCTS OF HEADING 2006::Other" +20060000,"::VEGETABLES, FRUITS, NUTS, FRUIT-PEEL AND OTHER PARTS OF PLANTS, PRESERVED BY SUGAR (DRAINED, GLACE OR CRYSTALLISED)" +20070000,jams fruit jellies marmalades fruitor nut puree and fruit or nut pastes obtained by cooking whether or not containing added sugar or other sweetening matter +20071000,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Homogenised preparations" +20079100,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER::Citrus fruit" +20079900,jams fruit jellies marmalades fruitor nut puree and fruit or nut pastes obtained by cooking whether or not containing added sugar or other sweetening matter >> other +20079910,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other:Mango" +20079920,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other:Guava" +20079930,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other:Pine apple" +20079940,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other:Apple" +20079990,"JAMS, FRUIT JELLIES, MARMALADES, FRUIT OR NUT PUREE AND FRUIT OR NUT PASTES, OBTAINED BY COOKING, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER:Other:Other" +20080000,fruit nuts and other edible parts of plants otherwise prepared or preserved whether or not containing added sugar or other sweetening matter or spirit not elsewhere specified or included nuts and other seeds whether or not mixed together +20081100,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Ground-nuts" +20081900,fruit nuts and other edible parts of plants otherwise prepared or preserved whether or not containing added sugar or other sweetening matter or spirit not elsewhere specified or included nuts and other seeds whether or not mixed together >> other including mixtures +20081910,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other, including mixtures:Cashew nut, roasted, salted or roasted and salted" +20081920,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other, including mixtures:Other roasted nuts and seeds" +20081930,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other, including mixtures:Other nuts, otherwise prepared or preserved" +20081940,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other, including mixtures:Other roasted and fried vegetable products" +20081990,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other, including mixtures:Other" +20082000,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Pineapples" +20083000,fruit nuts and other edible parts of plants otherwise prepared or preserved whether or not containing added sugar or other sweetening matter or spirit not elsewhere specified or included nuts and other seeds whether or not mixed together >> citrus fruit +20083010,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Citrus fruit :Orange" +20083090,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Citrus fruit :Other" +20084000,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Pears" +20085000,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Apricots" +20086000,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Cherries" +20087000,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Peaches, including nectarines" +20088000,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Strawberries" +20089100,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED ::Palm hearts" +20089300,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Cranberries:Cranberries (Vaccinium macrocarpon, Vaccinium oxycoccos Vaccinium vitis-idaea)" +20089700,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Mixtures:Mixtures" +20089900,fruit nuts and other edible parts of plants otherwise prepared or preserved whether or not containing added sugar or other sweetening matter or spirit not elsewhere specified or included nuts and other seeds whether or not mixed together >> other squash +20089911,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Mango" +20089912,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Lemon" +20089913,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Orange" +20089914,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Pineapple" +20089919,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Other" +20089991,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Fruit cocktail" +20089992,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Grapes" +20089993,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Apples" +20089994,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Guava" +20089999,"FRUIT, NUTS AND OTHER EDIBLE PARTS OF PLANTS, OTHERWISE PREPARED OR PRESERVED, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR SPIRIT, NOT ELSEWHERE SPECIFIED OR INCLUDED :Other:Other" +20090000,coconut water and vegetable juices unfermented and not containing added spirit whether or not containing added sugar or other sweetening matter orange juice +20091100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Frozen" +20091200,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Not frozen, of a Brix value not exceeding 20" +20091900,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Other " +20092100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Of a Brix value not exceeding 20" +20092900,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Other " +20093100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Of a Brix value not exceeding 20" +20093900,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Other" +20094100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Of a Brix value not exceeding 20" +20094900,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Other" +20095000,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Tomato juice " +20096100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Of a Brix value not exceeding 30" +20096900,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Other " +20097100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Of a Brix value not exceeding 20" +20097900,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER ::Other" +20098100,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER :Cranberry:Cranberry (Vaccinium macrocarpon, Vaccinium Oxycoccos Vaccinium virtis-ieaea) juice" +20098900,coconut water and vegetable juices unfermented and not containing added spirit whether or not containing added sugar or other sweetening matter orange juice >> other +20098910,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER :Other:Mango Juice" +20098990,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER :Other:Other" +20099000,"FRUIT JUICES (INCLUDING GRAPE MUST) AND VEGETABLE JUICES, UNFERMENTED AND NOT CONTAINING ADDED SPIRIT, WHETHER OR NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER :Mixtures of juices:Mixtures of juices" +20900000,plates sticks tips and the like for tools unmounted of cermets +21010000,extracts essences and concentrates of coffee tea or mate and preparations with a basis of these products or with a basis of coffee tea or mate roasted chicory and other roasted coffee substitutes and extracts essences and concentrates thereof extracts essences and concentrates of coffee and preparations with a basis of these extracts essences or concentrates or with a basis of coffee +21011100,extracts essences and concentrates of coffee tea or mate and preparations with a basis of these products or with a basis of coffee tea or mate roasted chicory and other roasted coffee substitutes and extracts essences and concentrates thereof extracts essences and concentrates of coffee and preparations with a basis of these extracts essences or concentrates or with a basis of coffee >> extracts essences and concentrates +21011110,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates:Instant coffee, flavoured" +21011120,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates:Instant coffee, not flovered" +21011130,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates:Coffee aroma" +21011190,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates:Other" +21011200,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF::Preparations with basis of extracts, essences, concentrates or with a basis of coffee" +21012000,extracts essences and concentrates of coffee tea or mate and preparations with a basis of these products or with a basis of coffee tea or mate roasted chicory and other roasted coffee substitutes and extracts essences and concentrates thereof extracts essences and concentrates of coffee and preparations with a basis of these extracts essences or concentrates or with a basis of coffee >> extracts essences and concentrates of tea or mate and preparations with a basis of these extracts essences or concentrates or with a basis of tea or mate +21012010,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates, of tea or mate, and preparations with a basis of these extracts, essences or concentrates or with a basis of tea or mate :Instant tea" +21012020,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates, of tea or mate, and preparations with a basis of these extracts, essences or concentrates or with a basis of tea or mate :Quick brewing black tea" +21012030,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates, of tea or mate, and preparations with a basis of these extracts, essences or concentrates or with a basis of tea or mate :Tea aroma" +21012090,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Extracts, essences and concentrates, of tea or mate, and preparations with a basis of these extracts, essences or concentrates or with a basis of tea or mate :Other" +21013000,extracts essences and concentrates of coffee tea or mate and preparations with a basis of these products or with a basis of coffee tea or mate roasted chicory and other roasted coffee substitutes and extracts essences and concentrates thereof extracts essences and concentrates of coffee and preparations with a basis of these extracts essences or concentrates or with a basis of coffee >> roasted chicory and other roasted coffee substitutes and extracts essences and concentrates thereof +21013010,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Roasted chicory and other roasted coffee substitutes, and extracts, essences and concentrates thereof :Roasted chicory" +21013020,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Roasted chicory and other roasted coffee substitutes, and extracts, essences and concentrates thereof :Roasted coffee substitutes" +21013090,"EXTRACTS, ESSENCES AND CONCENTRATES, OF COFFEE, TEA OR MATE AND PREPARATIONS WITH A BASIS OF THESE PRODUCTS OR WITH A BASIS OF COFFEE, TEA OR MATE; ROASTED CHICORY AND OTHER ROASTED COFFEE SUBSTITUTES, AND EXTRACTS, ESSENCES AND CONCENTRATES THEREOF:Roasted chicory and other roasted coffee substitutes, and extracts, essences and concentrates thereof :Other" +21020000,yeasts active or inactive other single cell dead but not including vaccines of heading 3002 prepared baking powders +21021000,yeasts active or inactive other single cell dead but not including vaccines of heading 3002 prepared baking powders >> active yeasts +21021010,"YEASTS (ACTIVE OR INACTIVE); OTHER SINGLE CELL MICRO-ORGANISMS, DEAD (BUT NOT INCLUDING VACCINES OF HEADING 3002); PREPARED BAKING POWDERS:Active yeasts :Culture yeast" +21021020,"YEASTS (ACTIVE OR INACTIVE); OTHER SINGLE CELL MICRO-ORGANISMS, DEAD (BUT NOT INCLUDING VACCINES OF HEADING 3002); PREPARED BAKING POWDERS:Active yeasts :Baker's yeast" +21021090,"YEASTS (ACTIVE OR INACTIVE); OTHER SINGLE CELL MICRO-ORGANISMS, DEAD (BUT NOT INCLUDING VACCINES OF HEADING 3002); PREPARED BAKING POWDERS:Active yeasts :Other" +21022000,"YEASTS (ACTIVE OR INACTIVE); OTHER SINGLE CELL MICRO-ORGANISMS, DEAD (BUT NOT INCLUDING VACCINES OF HEADING 3002); PREPARED BAKING POWDERS::Inactive yeasts, other single-cell micro-organisms, dead" +21023000,"YEASTS (ACTIVE OR INACTIVE); OTHER SINGLE CELL MICRO-ORGANISMS, DEAD (BUT NOT INCLUDING VACCINES OF HEADING 3002); PREPARED BAKING POWDERS::Prepared baking powders" +21030000,sauces and preparations therefor mixed condiments and mixed seasonings mustard flour and meal and prepared mustard +21031000,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD::Soya sauce +21032000,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD::Tomato ketchup and other tomato sauces +21033000,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD::Mustard flour and meal and prepared mustard +21039000,sauces and preparations therefor mixed condiments and mixed seasonings mustard flour and meal and prepared mustard >> other +21039010,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD:Other:Curry paste +21039020,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD:Other:Chilli sauce +21039030,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD:Other:Majonnaise and salad dressings +21039040,"SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD:Other:Mixed, condiments and mixed seasoning" +21039090,SAUCES AND PREPARATIONS THEREFOR; MIXED CONDIMENTS AND MIXED SEASONINGS; MUSTARD FLOUR AND MEAL AND PREPARED MUSTARD:Other:Other +21040000,soups and broths and preparations therefor homogenised composite food preparations +21041000,soups and broths and preparations therefor homogenised composite food preparations >> soups and broths and preparations therefor +21041010,SOUPS AND BROTHS AND PREPARATIONS THEREFOR; HOMOGENISED COMPOSITE FOOD PREPARATIONS:Soups and broths and preparations therefor:Dried +21041090,SOUPS AND BROTHS AND PREPARATIONS THEREFOR; HOMOGENISED COMPOSITE FOOD PREPARATIONS:Soups and broths and preparations therefor:Other +21042000,SOUPS AND BROTHS AND PREPARATIONS THEREFOR; HOMOGENISED COMPOSITE FOOD PREPARATIONS::Homogenised composite food preparations +21050000,"::ICECREAM AND OTHER EDIBLE ICE, WHETHER OR NOT CONTAINING COCOA" +21060000,food preparations not elsewhere specified or included +21061000,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED::Protein concentrates and textured protein substances +21069000,food preparations not elsewhere specified or included >> other soft drink concentrates +21069011,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Sharbat +21069019,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Other +21069020,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Pan masala +21069030,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Betel nut product known as Supari +21069040,"FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Sugar-syrups containing added flavouring or colouring matter, not elsewhere specified or included; lactose syrup; glucose syrup and malto dextrine syrup" +21069050,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Compound preparations for making non-alcoholic beverages +21069060,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Food flavouring material +21069070,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Churna for pan +21069080,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Custard powder +21069091,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Diabetic foods +21069092,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Sterilized or pasteurized millstone +21069099,FOOD PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Other +21110000,sets of assorted articles other +22010000,waters including natural or artificial mineral waters and aerated waters not containing added sugar or other sweetening matter nor flavoured ice and snow +22011000,waters including natural or artificial mineral waters and aerated waters not containing added sugar or other sweetening matter nor flavoured ice and snow >> mineral waters and aerated waters +22011010,"WATERS, INCLUDING NATURAL OR ARTIFICIAL MINERAL WATERS AND AERATED WATERS, NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER NOR FLAVOURED; ICE AND SNOW:Mineral waters and aerated waters :Mineral waters" +22011020,"WATERS, INCLUDING NATURAL OR ARTIFICIAL MINERAL WATERS AND AERATED WATERS, NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER NOR FLAVOURED; ICE AND SNOW:Mineral waters and aerated waters :Aerated waters" +22019000,waters including natural or artificial mineral waters and aerated waters not containing added sugar or other sweetening matter nor flavoured ice and snow >> other +22019010,"WATERS, INCLUDING NATURAL OR ARTIFICIAL MINERAL WATERS AND AERATED WATERS, NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER NOR FLAVOURED; ICE AND SNOW:Other :Ice and snow" +22019090,"WATERS, INCLUDING NATURAL OR ARTIFICIAL MINERAL WATERS AND AERATED WATERS, NOT CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER NOR FLAVOURED; ICE AND SNOW:Other :Other" +22020000,waters including mineral waters and aerated waters containing added sugar or other sweetening matter or flavoured and other beverages not including fruit nut or vegetable juices of heading 2009 +22021000,waters including mineral waters and aerated waters containing added sugar or other sweetening matter or flavoured and other beverages not including fruit nut or vegetable juices of heading 2009 >> waters including mineral waters and aerated waters containing added sugar or other sweetening matter or flavoured +22021010,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Waters, including mineral waters and aerated waters, containing added sugar or other sweetening matter or flavoured :Aerated waters" +22021020,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Waters, including mineral waters and aerated waters, containing added sugar or other sweetening matter or flavoured :Lemonade" +22021090,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Waters, including mineral waters and aerated waters, containing added sugar or other sweetening matter or flavoured :Other" +22029010,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Soya milk drinks, whether or not sweetended or flavoured(OLD tariff)" +22029020,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Fruit pulp or fruit juice based drinks(OLD tariff)" +22029030,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Beverages containing milk(OLD tariff)" +22029090,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Other(OLD tariff)" +22029100,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009::Non alcoholic beer" +22029900,waters including mineral waters and aerated waters containing added sugar or other sweetening matter or flavoured and other beverages not including fruit nut or vegetable juices of heading 2009 >> other +22029910,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Soya milk drinks, whether or not sweetended or flavoured" +22029920,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Fruit pulp or fruit juice based drinks" +22029930,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Beverages containing milk" +22029990,"WATERS, INCLUDING MINERAL WATERS AND AERATED WATERS, CONTAINING ADDED SUGAR OR OTHER SWEETENING MATTER OR FLAVOURED, AND OTHER NON-ALCOHOLIC BEVERAGES, NOT INCLUDING FRUIT OR VEGETABLE JUICES OF HEADING 2009:Other :Other" +22030000,::BEER MADE FROM MALT +22040000,wine of fresh grapes including fortified wines grape must other than that of heading 2009 +22041000,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009::Sparkling wine" +22042100,wine of fresh grapes including fortified wines grape must other than that of heading 2009 >> in containers holding 2 l or less +22042110,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:In containers holding 2 l or less:Port and other red wines" +22042120,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:In containers holding 2 l or less:Sherry and other white wines" +22042190,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:In containers holding 2 l or less:Other" +22042200,wine of fresh grapes including fortified wines grape must other than that of heading 2009 >> in containers holding more than 2 l but not more than 10 l +22042210,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:In containers holding more than 2 l but not more than 10 l:Port and other red wines" +22042220,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:In containers holding more than 2 l but not more than 10 l:Sherry and other white wines" +22042290,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:In containers holding more than 2 l but not more than 10 l:Other" +22042900,wine of fresh grapes including fortified wines grape must other than that of heading 2009 >> other +22042910,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:Other :Port and other red wines" +22042920,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:Other :Sherry and other white wines" +22042990,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009:Other :Other" +22043000,"WINE OF FRESH GRAPES, INCLUDING FORTIFIED WINES; GRAPE MUST OTHER THAN THAT OF HEADING 2009::Other grape must" +22050000,vermouth and other wine of fresh grapes flavoured with plants or aromatic substances +22051000,VERMOUTH AND OTHER WINE OF FRESH GRAPES FLAVOURED WITH PLANTS OR AROMATIC SUBSTANCES::In containers holding 2 l or less +22059000,VERMOUTH AND OTHER WINE OF FRESH GRAPES FLAVOURED WITH PLANTS OR AROMATIC SUBSTANCES::Other +22060000,"::OTHER FERMENTED BEVERAGES (FOR EXAMPLE, CIDER, PERRY, MEAD, SAKE); MIXTURES OF FERMENTED BEVERAGES AND MIXTURES OF FERMENTED BEVERAGES AND NON-ALCOHOLIC BEVERAGES, NOT ELSEWHERE SPECIFIED OR INCLUDED" +22070000,undenatured ethyl alcohol of an alcoholic strength by volume of 80 vol or higher ethyl alcohol and other spirits denatured of any strength +22071000,undenatured ethyl alcohol of an alcoholic strength by volume of 80 vol or higher ethyl alcohol and other spirits denatured of any strength >> undenatured ethyl alcohol of an alcoholic strength by volume of 80 vol or higher rectified spirit +22071011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF 80% VOL. OR HIGHER; ETHYL ALCOHOL AND OTHER SPIRITS, DENATURED, OF ANY STRENGTH:Undenatured ethyl alcohol of an alcoholic strength by volume of 80% vol. or higher:Concentrates of alcoholic beverages" +22071019,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF 80% VOL. OR HIGHER; ETHYL ALCOHOL AND OTHER SPIRITS, DENATURED, OF ANY STRENGTH:Undenatured ethyl alcohol of an alcoholic strength by volume of 80% vol. or higher:Other" +22071090,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF 80% VOL. OR HIGHER; ETHYL ALCOHOL AND OTHER SPIRITS, DENATURED, OF ANY STRENGTH:Undenatured ethyl alcohol of an alcoholic strength by volume of 80% vol. or higher:Other" +22072000,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF 80% VOL. OR HIGHER; ETHYL ALCOHOL AND OTHER SPIRITS, DENATURED, OF ANY STRENGTH::Ethyl alcohol and other spirits, denatured, of any strength" +22080000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages +22082000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages >> spirits obtained by distilling grape wine or grape marc in containers holding 2 l or less +22082011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Brandy" +22082012,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Liquors(OLD tariff)" +22082019,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Other" +22082091,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Brandy" +22082092,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Liquors(OLD tariff)" +22082099,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Spirits obtained by distilling grape wine or grape marc :Other" +22083000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages >> whiskies in containers holding 2 l or less +22083011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Bourbon whiskey" +22083012,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Scotch" +22083013,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Blended" +22083019,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Other" +22083091,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Bourbon whiskey" +22083092,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Scotch" +22083093,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Blended" +22083099,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Whiskies l 182% -:Other" +22084000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages >> rum and other spirits obtained by distilling fermented sugarcane products in containers holding 2 l or less +22084011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Rum and other spirits obtained by distilling fermented sugarcane products:Rum" +22084012,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Rum and other spirits obtained by distilling fermented sugarcane products:Other" +22084091,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Rum and other spirits obtained by distilling fermented sugarcane products:Rum" +22084092,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Rum and other spirits obtained by distilling fermented sugarcane products:Other" +22085000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages >> gin and geneva in containers holding 2 l or less +22085011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :Gin(OLD tariff)" +22085012,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :Geneva" +22085013,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :Vodka(OLD tariff)" +22085091,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :Gin" +22085092,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Gin and Geneva :Geneva" +22086000,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Vodka:Vodka" +22086093,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Vodka:Vodka(OLD tariff)" +22087000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages >> liqueurs and cordials in containers holding 2 l or less +22087011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Liqueurs and cordials :Liqueurs" +22087012,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Liqueurs and cordials :Cordials" +22087091,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Liqueurs and cordials :Liqueurs" +22087092,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Liqueurs and cordials :Cordials" +22089000,undenatured ethyl alcohol of an alcoholic strength by volume of less than 80 vol spirit liqueurs and other spiritnous beverages >> other in containers holding 2 l or less +22089011,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Tequila" +22089012,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Indenatured ethyl alcohol" +22089019,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Other" +22089091,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Tequila" +22089092,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Indenatured ethyl alcohol" +22089099,"UNDENATURED ETHYL ALCOHOL OF AN ALCOHOLIC STRENGTH BY VOLUME OF LESS THAN 80% VOL.; SPIRIT, LIQUEURS AND OTHER SPIRITNOUS BEVERAGES:Other :Other" +22090000,vinegar and substitutes for vinegar obtained from acetic acid vinegar and substitutes for vinegar obtained from acetic acid +22090010,VINEGAR AND SUBSTITUTES FOR VINEGAR OBTAINED FROM ACETIC ACID:Vinegar and substitutes for vinegar obtained from acetic acid:Brewed vinegar +22090020,VINEGAR AND SUBSTITUTES FOR VINEGAR OBTAINED FROM ACETIC ACID:Vinegar and substitutes for vinegar obtained from acetic acid:Synthetic vinegar +22090090,VINEGAR AND SUBSTITUTES FOR VINEGAR OBTAINED FROM ACETIC ACID:Vinegar and substitutes for vinegar obtained from acetic acid:Other +23010000,flours meals and pellets of meat or meat offal of fish or of crustaceans molluscs or other aquatic invertebrates unfit for human consumption greaves +23011000,flours meals and pellets of meat or meat offal of fish or of crustaceans molluscs or other aquatic invertebrates unfit for human consumption greaves >> flours meals and pellets of meat or meat offal greaves +23011010,"FLOURS, MEALS AND PELLETS, OF MEAT OR MEAT OFFAL, OF FISH OR OF CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES, UNFIT FOR HUMAN CONSUMPTION; GREAVES:Flours, meals and pellets, of meat or meat offal; greaves :Meat meals and pellets (including tankage)" +23011090,"FLOURS, MEALS AND PELLETS, OF MEAT OR MEAT OFFAL, OF FISH OR OF CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES, UNFIT FOR HUMAN CONSUMPTION; GREAVES:Flours, meals and pellets, of meat or meat offal; greaves :Other (including greaves)" +23012000,flours meals and pellets of meat or meat offal of fish or of crustaceans molluscs or other aquatic invertebrates unfit for human consumption greaves >> flours meals and pellets of fish or of crustaceans molluscs or other aquatic invertebrates fish meal unfit for human consumption +23012011,"FLOURS, MEALS AND PELLETS, OF MEAT OR MEAT OFFAL, OF FISH OR OF CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES, UNFIT FOR HUMAN CONSUMPTION; GREAVES:Flours, meals and pellets, of fish or of crustaceans, molluscs or other aquatic invertebrates :In powder form" +23012019,"FLOURS, MEALS AND PELLETS, OF MEAT OR MEAT OFFAL, OF FISH OR OF CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES, UNFIT FOR HUMAN CONSUMPTION; GREAVES:Flours, meals and pellets, of fish or of crustaceans, molluscs or other aquatic invertebrates :Other" +23012090,"FLOURS, MEALS AND PELLETS, OF MEAT OR MEAT OFFAL, OF FISH OR OF CRUSTACEANS, MOLLUSCS OR OTHER AQUATIC INVERTEBRATES, UNFIT FOR HUMAN CONSUMPTION; GREAVES:Flours, meals and pellets, of fish or of crustaceans, molluscs or other aquatic invertebrates :Other" +23020000,bran sharps and other residues whether or not in the form of pellets derived from the sifting milling or other working of cereals or of leguminous plants +23021000,bran sharps and other residues whether or not in the form of pellets derived from the sifting milling or other working of cereals or of leguminous plants >> of maize corn +23021010,"BRAN, SHARPS AND OTHER RESIDUES, WHETHER OR NOT IN THE FORM OF PELLETS, DERIVED FROM THE SIFTING, MILLING OR OTHER WORKING OF CEREALS OR OF LEGUMINOUS PLANTS:Of maize (corn) :Maize bran" +23021090,"BRAN, SHARPS AND OTHER RESIDUES, WHETHER OR NOT IN THE FORM OF PELLETS, DERIVED FROM THE SIFTING, MILLING OR OTHER WORKING OF CEREALS OR OF LEGUMINOUS PLANTS:Of maize (corn) :Other" +23023000,"BRAN, SHARPS AND OTHER RESIDUES, WHETHER OR NOT IN THE FORM OF PELLETS, DERIVED FROM THE SIFTING, MILLING OR OTHER WORKING OF CEREALS OR OF LEGUMINOUS PLANTS::Of wheat" +23024000,"BRAN, SHARPS AND OTHER RESIDUES, WHETHER OR NOT IN THE FORM OF PELLETS, DERIVED FROM THE SIFTING, MILLING OR OTHER WORKING OF CEREALS OR OF LEGUMINOUS PLANTS::Of other cereals" +23025000,"BRAN, SHARPS AND OTHER RESIDUES, WHETHER OR NOT IN THE FORM OF PELLETS, DERIVED FROM THE SIFTING, MILLING OR OTHER WORKING OF CEREALS OR OF LEGUMINOUS PLANTS::Of leguminous plants" +23030000,residues of starch manufacture and similar residues bagasse and other waste of sugar manufacture brewing or distilling dregs and waste whether or not in the form of pellets +23031000,"RESIDUES OF STARCH MANUFACTURE AND SIMILAR RESIDUES, BEET-PULP, BAGASSE AND OTHER WASTE OF SUGAR MANUFACTURE, BREWING OR DISTILLING DREGS AND WASTE, WHETHER OR NOT IN THE FORM OF PELLETS::Residues of starch manufacture and similar residues" +23032000,"RESIDUES OF STARCH MANUFACTURE AND SIMILAR RESIDUES, BEET-PULP, BAGASSE AND OTHER WASTE OF SUGAR MANUFACTURE, BREWING OR DISTILLING DREGS AND WASTE, WHETHER OR NOT IN THE FORM OF PELLETS::Beet-pulp, bagasse and other waste of sugar manufacture" +23033000,"RESIDUES OF STARCH MANUFACTURE AND SIMILAR RESIDUES, BEET-PULP, BAGASSE AND OTHER WASTE OF SUGAR MANUFACTURE, BREWING OR DISTILLING DREGS AND WASTE, WHETHER OR NOT IN THE FORM OF PELLETS::Brewing or distilling dregs and waste" +23040000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of oil +23040010,"OIL-CAKE AND OTHER SOLID RESIDUES WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF SOYABEAN OIL:Oil-cake and other solid residues whether or not ground or in the form of pellets, resulting from the extraction of soyabean oil:Oil-cake and oil-cake meal of soyabean, expeller variety(OLD tariff)" +23040020,"OIL-CAKE AND OTHER SOLID RESIDUES WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF SOYABEAN OIL:Oil-cake and other solid residues whether or not ground or in the form of pellets, resulting from the extraction of soyabean oil:Oil-cake of soyabean, solvent extracted (defatted) variety(OLD tariff)" +23040030,"OIL-CAKE AND OTHER SOLID RESIDUES WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF SOYABEAN OIL:Oil-cake and other solid residues whether or not ground or in the form of pellets, resulting from the extraction of soyabean oil:Meal of soyabean, solvent extracted (defatted)(OLD tariff)" +23040090,"OIL-CAKE AND OTHER SOLID RESIDUES WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF SOYABEAN OIL:Oil-cake and other solid residues whether or not ground or in the form of pellets, resulting from the extraction of soyabean oil:Other(OLD tariff)" +23050000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of oil +23050010,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF GROUND-NUT OIL:Oil-cake and other solid residues, whether or not ground or in the form of pellets, resulting from the extraction of ground-nut oil:Oil-cake and oil-cake meal of ground-nut, expeller variety" +23050020,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF GROUND-NUT OIL:Oil-cake and other solid residues, whether or not ground or in the form of pellets, resulting from the extraction of ground-nut oil:Oil-cake and oil-cake meal of ground-nut, solvent extracted variety (defatted)" +23050090,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF GROUND-NUT OIL:Oil-cake and other solid residues, whether or not ground or in the form of pellets, resulting from the extraction of ground-nut oil:Other" +23060000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of vegetable or microbial fats or oils other than those of heading 2304 or 2305 +23061000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of vegetable or microbial fats or oils other than those of heading 2304 or 2305 >> of cotton seeds +23061010,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of cotton seeds :Oil-cake and oil-cake meal, decorticated expeller variety" +23061020,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of cotton seeds :Oil-cake and oil-cake meal, decorticated, solvent extracted (defatted) variety" +23061030,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of cotton seeds :Oil-cake and oil-cake meal, undecorticated, expeller variety" +23061040,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of cotton seeds :Oil-cake and oil-cake meal, undecorticated, solvent extracted (defatted) variety" +23061090,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of cotton seeds :Other" +23062000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of vegetable or microbial fats or oils other than those of heading 2304 or 2305 >> of linseed +23062010,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of linseed :Oil-cake and oil-cake meal, expeller variety" +23062020,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of linseed :Oil-cake and oil-cake meal, solvent extracted (defatted) variety" +23062090,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of linseed :Other" +23063000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of vegetable or microbial fats or oils other than those of heading 2304 or 2305 >> of sunflower seeds +23063010,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of sunflower seeds:Oil-cake and oil-cake meal, expeller variety" +23063020,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of sunflower seeds:Oil-cake and oil-cake meal, solvent extracted (defatted) variety" +23063090,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of sunflower seeds:Other" +23064100,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305::Of low erucic acid rape or colza seeds" +23064900,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305::Other" +23065000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of vegetable or microbial fats or oils other than those of heading 2304 or 2305 >> of coconut or copra +23065010,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of coconut or copra :Oil-cake and oil-cake meal, expeller variety" +23065020,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of coconut or copra :Oil-cake and oil-cake meal, solvent extracted (defatted) variety" +23065090,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Of coconut or copra :Other" +23066000,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305::Of palm nuts or kernels" +23069000,and other solid residues whether or not ground or in the form of pellets resulting from the extraction of vegetable or microbial fats or oils other than those of heading 2304 or 2305 >> other and meal expeller variety +23069011,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of mowra seeds" +23069012,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of mustard seeds" +23069013,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of niger seeds" +23069014,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of seasamum seeds" +23069015,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of mango kernel" +23069016,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of sal (de-oiled)" +23069017,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of castor seeds" +23069018,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of neem seeds" +23069019,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of other seeds" +23069021,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of mustard seeds" +23069022,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of niger seeds" +23069023,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of cardi seeds" +23069024,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of seasamum seeds" +23069025,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of mango kernel" +23069026,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of sal (de-oiled)" +23069027,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of castor seeds" +23069028,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of neem seeds" +23069029,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Of other seeds" +23069030,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Residues babool seed extraction" +23069090,"OIL-CAKE AND OTHER SOLID RESIDUES, WHETHER OR NOT GROUND OR IN THE FORM OF PELLETS, RESULTING FROM THE EXTRACTION OF VEGETABLE FATS OR OILS, OTHER THAN THOSE OF HEADING 2304 OR 2305:Other:Other" +23070000,::WINE LEES; ARGOL +23080000,"::VEGETABLE MATERIALS AND VEGETABLE WASTE, VEGETABLE RESIDUES AND BY-PRODUCTS, WHETHER OR NOT IN THE FORM OF PELLETS, OF A KIND USED IN ANIMAL FEEDING, NOT ELSEWHERE SPECIFIED OR INCLUDED" +23090000,preparations of a kind used in animal feeding +23091000,"PREPARATIONS OF A KIND USED IN ANIMAL FEEDING::Dog or cat food, put up for retail sale" +23099000,preparations of a kind used in animal feeding >> other +23099010,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Compounded animal feed +23099020,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Concentrates for compound animal feed +23099031,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Prawn and shrimps feed +23099032,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Fish meal in powdered form +23099039,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Other +23099090,PREPARATIONS OF A KIND USED IN ANIMAL FEEDING:Other :Other +24010000,unmanufactured tobacco tobacco refuse +24011000,unmanufactured tobacco tobacco refuse >> tobacco not stemmed or stripped +24011010,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Flue cured virginia tobacco" +24011020,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Sun cured country (natu) tobacco" +24011030,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Sun cured virginia tobacco" +24011040,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Burley tobacco" +24011050,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Tobacco for manufacture of biris, not stemmed" +24011060,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Tobacco for manufacture of chewing tobacco" +24011070,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Tobacco for manufacture of cigar and cheroot" +24011080,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Tobacco for manufacture of hookah tobacco" +24011090,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, not stemmed or stripped :Other" +24012000,unmanufactured tobacco tobacco refuse >> tobacco partly or wholly stemmed or stripped +24012010,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Flue cured virginia tobacco" +24012020,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Sun cured country (natu) tobacco" +24012030,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Sun cured virginia tobacco" +24012040,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Burley tobacco" +24012050,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Tobacco for manufacture of biris" +24012060,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Tobacco for manufacture of chewing tobacco" +24012070,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Tobacco for manufacture of cigar and cheroot" +24012080,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Tobacco for manufacture of hookah tobacco" +24012090,"UNMANUFACTURED TOBACCO; TOBACCO REFUSE:Tobacco, partly or wholly stemmed or stripped :Other" +24013000,UNMANUFACTURED TOBACCO; TOBACCO REFUSE::Tobacco refuse +24020000,cigars cheroots cigarillos and cigarettes of tobacco or of tobacco substitutes +24021000,cigars cheroots cigarillos and cigarettes of tobacco or of tobacco substitutes >> cigars cheroots and cigarillos containing tobacco +24021010,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigars, cheroots and cigarillos, containing tobacco :Cigar and cheroots" +24021020,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigars, cheroots and cigarillos, containing tobacco :Cigarillos" +24022000,cigars cheroots cigarillos and cigarettes of tobacco or of tobacco substitutes >> cigarettes containing tobacco +24022010,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Other than filter cigarettes, of length not exceeding 60 millimetres" +24022020,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Other than filter cigarettes, of length exceeding 60 millimetres but not exceeding 70 millimetres" +24022030,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Filter cigarettes of length (including the length of the filter, the length of filter being 11 millimetres or its actual length, whichever is more) not exceeding 70 millimetres" +24022040,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Filter cigarettes of length (including the length of the filter, the length of filter being 11 millimetres or its actual length, whichever is more) exceeding 70 millimetres but not exceeding 75 millimetres" +24022050,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Filter cigarettes of length (including the length of the filter, the length of filter being 11 millimetres or its actual length, whichever is more) exceeding 75 millimetres but not exceeding 85 millimetres" +24022060,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Filter cigarettes of length (including the length of the filer, the length of the filter being 11 mmor its actual length whichever is more) exceeding 75 mm but not exceeding 85 mm(OLD tariff)" +24022090,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Cigarettes, containing tobacco :Other(OLD tariff)" +24029010,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Other :Cigarettes of tobacco substitutes(OLD tariff)" +24029020,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Other :Cigarillos of tobacco substitutes(OLD tariff)" +24029090,"CIGARS, CHEROOTS, CIGARILLOS AND CIGARETTES, OF TOBACCO OR OF TOBACCO SUBSTITUTES:Other :Other(OLD tariff)" +24031110,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Water pipe tobacco specified in Sub-heading Note to this chapter:Hookah or gudaku tobacco(OLD tariff) +24031190,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Water pipe tobacco specified in Sub-heading Note to this chapter:Other(OLD tariff) +24031910,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other:Smoking mixtures for pipes and cigarettes(OLD tariff) +24031921,"OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other:Other than paper rolled biris, manufactured without the aid of machine(OLD tariff)" +24031929,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other:Other(OLD tariff) +24031990,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other:Other(OLD tariff) +24039100,"OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES::"" Homogenised"" or ""reconstituted"" Tobacco(OLD tariff)" +24039910,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Chewing tobacco(OLD tariff) +24039920,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Preparations containing chewing tobacco(OLD tariff) +24039930,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Jarda scented tobacco(OLD tariff) +24039940,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Snuff(OLD tariff) +24039950,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Preparations containing snuff(OLD tariff) +24039960,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Tobacco extracts and essence(OLD tariff) +24039970,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Cut-tobacco(OLD tariff) +24039990,OTHER MANUFACTURED TOBACCO AND MANUFACTURED TOBACCO SUBSTITUTES; HOMOGENISED OR RECONSTITUTED TOBACCO; TOBACCO EXTRACTS AND ESSENCES:Other :Other(OLD tariff) +24040000,tobacco nicotine or tobacco or nicotine substitutes intended for inhalation without combustion other nicotine containing products intended for the intake of nicotine into the human body products intended for inhalation without combustion +24041100,"PRODUCTS CONTAINING TOBACCO, RECONSTITUTED TOBACCO, NICOTINE, OR TOBACCO OR NICOTINE SUBSTITUTES, INTENDED FOR INHALATION WITHOUT COMBUSTION; OTHER NICOTINE CONTAINING PRODUCTS INTENDED FOR THE INTAKE OF NICOTINE INTO THE HUMAN BODY:containing: Containing tobacco or reconstituted tobacco" +24041200,"PRODUCTS CONTAINING TOBACCO, RECONSTITUTED TOBACCO, NICOTINE, OR TOBACCO OR NICOTINE SUBSTITUTES, INTENDED FOR INHALATION WITHOUT COMBUSTION; OTHER NICOTINE CONTAINING PRODUCTS INTENDED FOR THE INTAKE OF NICOTINE INTO THE HUMAN BODY:other:Other, containing nicotine" +24041900,"PRODUCTS CONTAINING TOBACCO, RECONSTITUTED TOBACCO, NICOTINE, OR TOBACCO OR NICOTINE SUBSTITUTES, INTENDED FOR INHALATION WITHOUT COMBUSTION; OTHER NICOTINE CONTAINING PRODUCTS INTENDED FOR THE INTAKE OF NICOTINE INTO THE HUMAN BODY:other:other" +24049100,"PRODUCTS CONTAINING TOBACCO, RECONSTITUTED TOBACCO, NICOTINE, OR TOBACCO OR NICOTINE SUBSTITUTES, INTENDED FOR INHALATION WITHOUT COMBUSTION; OTHER NICOTINE CONTAINING PRODUCTS INTENDED FOR THE INTAKE OF NICOTINE INTO THE HUMAN BODY:for oral:For oral application" +24049200,"PRODUCTS CONTAINING TOBACCO, RECONSTITUTED TOBACCO, NICOTINE, OR TOBACCO OR NICOTINE SUBSTITUTES, INTENDED FOR INHALATION WITHOUT COMBUSTION; OTHER NICOTINE CONTAINING PRODUCTS INTENDED FOR THE INTAKE OF NICOTINE INTO THE HUMAN BODY:for:For transdermal application" +24049900,"PRODUCTS CONTAINING TOBACCO, RECONSTITUTED TOBACCO, NICOTINE, OR TOBACCO OR NICOTINE SUBSTITUTES, INTENDED FOR INHALATION WITHOUT COMBUSTION; OTHER NICOTINE CONTAINING PRODUCTS INTENDED FOR THE INTAKE OF NICOTINE INTO THE HUMAN BODY:other:other" +25010000,salt including table salt and denatured salt and pure sodium chloride whether or not in aqueous solution or containing added or free flowing agents sea water salt including table salt and denatured salt and pure sodium chloride whether or not in aqueous solution or containing added anti caking or free flowing agents sea water +25010010,"SALT (INCLUDING TABLE SALT AND DENATURED SALT) AND PURE SODIUM CHLORIDE, WHETHER OR NOT IN AQUEOUS SOLUTION OR CONTAINING ADDED ANTI-CAKING OR FREE FLOWING AGENTS; SEA WATER:Salt (including table salt and denatured salt) and pure sodium chloride, whether or not in aqueous solution or containing added anti-caking or free flowing agents; Sea water :Common salt (including iodised salt)" +25010020,"SALT (INCLUDING TABLE SALT AND DENATURED SALT) AND PURE SODIUM CHLORIDE, WHETHER OR NOT IN AQUEOUS SOLUTION OR CONTAINING ADDED ANTI-CAKING OR FREE FLOWING AGENTS; SEA WATER:Salt (including table salt and denatured salt) and pure sodium chloride, whether or not in aqueous solution or containing added anti-caking or free flowing agents; Sea water :Rock salt" +25010090,"SALT (INCLUDING TABLE SALT AND DENATURED SALT) AND PURE SODIUM CHLORIDE, WHETHER OR NOT IN AQUEOUS SOLUTION OR CONTAINING ADDED ANTI-CAKING OR FREE FLOWING AGENTS; SEA WATER:Salt (including table salt and denatured salt) and pure sodium chloride, whether or not in aqueous solution or containing added anti-caking or free flowing agents; Sea water :Other" +25020000,::UNROASTED IRON PYRITES +25030000,sulphur of all kinds other than sublimed sulphur precipitated sulphur and collodial sulphur sulphur of all kinds other than sublimed sulphur precipitated sulphur and collodial sulphur +25030010,"SULPHUR OF ALL KINDS, OTHER THAN SUBLIMED SULPHUR, PRECIPITATED SULPHUR AND COLLODIAL SULPHUR:Sulphur of all kinds, other than sublimed sulphur, precipitated sulphur and collodial sulphur :Sulphur recovered as by-product in refining of crude oil" +25030090,"SULPHUR OF ALL KINDS, OTHER THAN SUBLIMED SULPHUR, PRECIPITATED SULPHUR AND COLLODIAL SULPHUR:Sulphur of all kinds, other than sublimed sulphur, precipitated sulphur and collodial sulphur :Other" +25040000,natural graphite +25041000,natural graphite >> in powder or in flakes +25041010,"NATURAL GRAPHITE:In powder or in flakes :Graphite, crystalline" +25041020,"NATURAL GRAPHITE:In powder or in flakes :Graphite, amorphous" +25041090,NATURAL GRAPHITE:In powder or in flakes :Other +25049000,natural graphite >> other +25049010,"NATURAL GRAPHITE:Other :Graphite, micronised" +25049090,NATURAL GRAPHITE:Other :Other +25050000,natural sands of all kinds whether or not coloured other than sands of chapter 26 +25051000,natural sands of all kinds whether or not coloured other than sands of chapter 26 >> silica sands and quartz sands silica sands +25051011,"NATURAL SANDS OF ALL KINDS, WHETHER OR NOT COLOURED, OTHER THAN METAL-BEARING SANDS OF CHAPTER 26:Silica sands and quartz sands:Processed (white)" +25051012,"NATURAL SANDS OF ALL KINDS, WHETHER OR NOT COLOURED, OTHER THAN METAL-BEARING SANDS OF CHAPTER 26:Silica sands and quartz sands:Processed (brown)" +25051019,"NATURAL SANDS OF ALL KINDS, WHETHER OR NOT COLOURED, OTHER THAN METAL-BEARING SANDS OF CHAPTER 26:Silica sands and quartz sands:Other" +25051020,"NATURAL SANDS OF ALL KINDS, WHETHER OR NOT COLOURED, OTHER THAN METAL-BEARING SANDS OF CHAPTER 26:Silica sands and quartz sands:Quartz sands" +25059000,"NATURAL SANDS OF ALL KINDS, WHETHER OR NOT COLOURED, OTHER THAN METAL-BEARING SANDS OF CHAPTER 26::Other" +25060000,quartz other than natural sands quartzite whether or not roughly trimmed or merely cut by sawing or into blocks or slabs of a otherwise rectangular including square shape +25061000,quartz other than natural sands quartzite whether or not roughly trimmed or merely cut by sawing or into blocks or slabs of a otherwise rectangular including square shape >> quartz +25061010,"QUARTZ (OTHER THAN NATURAL SANDS); QUARTZITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Quartz:In lumps" +25061020,"QUARTZ (OTHER THAN NATURAL SANDS); QUARTZITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Quartz:In powder" +25062000,quartz other than natural sands quartzite whether or not roughly trimmed or merely cut by sawing or into blocks or slabs of a otherwise rectangular including square shape >> quartzite +25062010,"QUARTZ (OTHER THAN NATURAL SANDS); QUARTZITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Quartzite :In lumps" +25062020,"QUARTZ (OTHER THAN NATURAL SANDS); QUARTZITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Quartzite :In powder" +25062090,"QUARTZ (OTHER THAN NATURAL SANDS); QUARTZITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Quartzite :Other" +25070000,kaolin and other kaolinic clays whether or not calcined kaolin and other kaolinic clays whether or not calcined +25070010,"KAOLIN AND OTHER KAOLINIC CLAYS, WHETHER OR NOT CALCINED:Kaolin and other kaolinic clays, whether or not calcined :Crude" +25070021,"KAOLIN AND OTHER KAOLINIC CLAYS, WHETHER OR NOT CALCINED:Kaolin and other kaolinic clays, whether or not calcined :Pharmaceutical grade" +25070022,"KAOLIN AND OTHER KAOLINIC CLAYS, WHETHER OR NOT CALCINED:Kaolin and other kaolinic clays, whether or not calcined :Ceramic grade" +25070029,"KAOLIN AND OTHER KAOLINIC CLAYS, WHETHER OR NOT CALCINED:Kaolin and other kaolinic clays, whether or not calcined :Other" +25080000,other clays not including expanded clays of heading 6806 andalusite kyanite and sillimanite whether or not calcined mullite chamotte or dinas earths +25081000,other clays not including expanded clays of heading 6806 andalusite kyanite and sillimanite whether or not calcined mullite chamotte or dinas earths >> bentonite +25081010,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Bentonite :Crude" +25081090,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Bentonite :Other (includes processed and ground)" +25083000,other clays not including expanded clays of heading 6806 andalusite kyanite and sillimanite whether or not calcined mullite chamotte or dinas earths >> fire clay +25083010,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Fire clay :Non-plastic" +25083020,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Fire clay :Semi-plastic" +25083030,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Fire clay :Plastic" +25083090,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Fire clay :Other" +25084000,other clays not including expanded clays of heading 6806 andalusite kyanite and sillimanite whether or not calcined mullite chamotte or dinas earths >> other clays other clays +25084010,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Other clays :Ball clay" +25084020,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Other clays :Earth clay" +25084090,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Other clays :Other" +25085000,other clays not including expanded clays of heading 6806 andalusite kyanite and sillimanite whether or not calcined mullite chamotte or dinas earths >> andalusite kyanite and sillimanite andalusite kyanite and sillimanite +25085010,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Andalusite" +25085021,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Crude, other than calcined" +25085022,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Processed, other than calcined (washed or ground or screened or beneficiated)" +25085023,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Calcined" +25085031,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Lumps" +25085032,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Fines (including sand)" +25085039,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS:Andalusite, kyanite and sillimanite:Other" +25086000,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS::Mullite" +25087000,"OTHER CLAYS (NOT INCLUDING EXPANDED CLAYS OF HEADING 6806), ANDALUSITE, KYANITE AND SILLIMANITE, WHETHER OR NOT CALCINED; MULLITE; CHAMOTTE OR DINAS EARTHS::Chamotte or dinas earths" +25090000,::CHALK +25100000,natural calcium phosphates natural aluminium calcium phosphates and phosphatic chalk natural calcium phosphates natural aluminium calcium phosphates and phosphatic chalk +25101000,natural calcium phosphates natural aluminium calcium phosphates and phosphatic chalk natural calcium phosphates natural aluminium calcium phosphates and phosphatic chalk >> unground unground +25101010,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Unground :Natural calcium phosphate" +25101020,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Unground :Natural aluminium calcium phosphate" +25101030,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Unground :Natural calcium phosphate apatite" +25101090,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Unground :Other" +25102000,natural calcium phosphates natural aluminium calcium phosphates and phosphatic chalk natural calcium phosphates natural aluminium calcium phosphates and phosphatic chalk >> ground ground +25102010,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Ground :Natural calcium phosphates" +25102020,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Ground :Natural aluminium calcium phosphate" +25102030,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Ground :Natural calcium phosphates apatite" +25102090,"NATURAL CALCIUM PHOSPHATES, NATURAL ALUMINIUM CALCIUM PHOSPHATES AND PHOSPHATIC CHALK:Ground :Other" +25110000,natural barium sulphate barytes natural barium carbonate witherite whether or not calcined other than barium oxide of heading 2816 natural barium sulphate barytes natural barium carbonate witherite whether or not calcined other than barium oxide of heading 2816 +25111000,natural barium sulphate barytes natural barium carbonate witherite whether or not calcined other than barium oxide of heading 2816 natural barium sulphate barytes natural barium carbonate witherite whether or not calcined other than barium oxide of heading 2816 >> natural barium sulphate barytes natural barium sulphate barytes +25111010,"NATURAL BARIUM SULPHATE (BARYTES); NATURAL BARIUM CARBONATE (WITHERITE), WHETHER OR NOT CALCINED, OTHER THAN BARIUM OXIDE OF HEADING 2816:Natural barium sulphate (barytes):Lumps" +25111020,"NATURAL BARIUM SULPHATE (BARYTES); NATURAL BARIUM CARBONATE (WITHERITE), WHETHER OR NOT CALCINED, OTHER THAN BARIUM OXIDE OF HEADING 2816:Natural barium sulphate (barytes):Powder" +25111090,"NATURAL BARIUM SULPHATE (BARYTES); NATURAL BARIUM CARBONATE (WITHERITE), WHETHER OR NOT CALCINED, OTHER THAN BARIUM OXIDE OF HEADING 2816:Natural barium sulphate (barytes):Other" +25112000,"NATURAL BARIUM SULPHATE (BARYTES); NATURAL BARIUM CARBONATE (WITHERITE), WHETHER OR NOT CALCINED, OTHER THAN BARIUM OXIDE OF HEADING 2816::Natural barium carbonate (witherite)" +25120000,siliceous fossil meals for example kieselguhr tripolite and diatomite and similar siliceous earths whetheror not calcined of an apparent specificgravity of 1 or less siliceous fossil meals for example kieselguhr tripolite and diatomite and similar siliceous earths whether or not calcined of an apparent specific gravity of 1 or less +25120010,"SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE AND DIATOMITE) AND SIMILAR SILICEOUS EARTHS, WHETHER OR NOT CALCINED, OF AN APPARENT SPECIFIC GRAVITY OF 1 OR LESS:Siliceous fossil meals (for example, kieselguhr, tripolite and diatomite) and similar siliceous earths, whether or not calcined, of an apparent specific gravity of 1 or less :Kieselguhr" +25120020,"SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE AND DIATOMITE) AND SIMILAR SILICEOUS EARTHS, WHETHER OR NOT CALCINED, OF AN APPARENT SPECIFIC GRAVITY OF 1 OR LESS:Siliceous fossil meals (for example, kieselguhr, tripolite and diatomite) and similar siliceous earths, whether or not calcined, of an apparent specific gravity of 1 or less :Tripolite" +25120030,"SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE AND DIATOMITE) AND SIMILAR SILICEOUS EARTHS, WHETHER OR NOT CALCINED, OF AN APPARENT SPECIFIC GRAVITY OF 1 OR LESS:Siliceous fossil meals (for example, kieselguhr, tripolite and diatomite) and similar siliceous earths, whether or not calcined, of an apparent specific gravity of 1 or less :Diatomite" +25120090,"SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE AND DIATOMITE) AND SIMILAR SILICEOUS EARTHS, WHETHER OR NOT CALCINED, OF AN APPARENT SPECIFIC GRAVITY OF 1 OR LESS:Siliceous fossil meals (for example, kieselguhr, tripolite and diatomite) and similar siliceous earths, whether or not calcined, of an apparent specific gravity of 1 or less :Other" +25130000,pumice stones emery natural corundum ntural garnet and other natrual abrasives whether or not +25131000,"PUMICE STONES; EMERY; NATURAL CORUNDUM, NTURAL GARNET AND OTHER NATRUAL ABRASIVES, WHETHER OR NOT HEAT-TREATED::Pumice stone" +25132000,pumice stones emery natural corundum ntural garnet and other natrual abrasives whether or not >> emery natural corundum natural garnet and other natural abrasives +25132010,"PUMICE STONES; EMERY; NATURAL CORUNDUM, NTURAL GARNET AND OTHER NATRUAL ABRASIVES, WHETHER OR NOT HEAT-TREATED:Emery, natural corundum, natural garnet and other natural abrasives :Emery" +25132020,"PUMICE STONES; EMERY; NATURAL CORUNDUM, NTURAL GARNET AND OTHER NATRUAL ABRASIVES, WHETHER OR NOT HEAT-TREATED:Emery, natural corundum, natural garnet and other natural abrasives :Natural corundum" +25132030,"PUMICE STONES; EMERY; NATURAL CORUNDUM, NTURAL GARNET AND OTHER NATRUAL ABRASIVES, WHETHER OR NOT HEAT-TREATED:Emery, natural corundum, natural garnet and other natural abrasives :Natural garnet" +25132090,"PUMICE STONES; EMERY; NATURAL CORUNDUM, NTURAL GARNET AND OTHER NATRUAL ABRASIVES, WHETHER OR NOT HEAT-TREATED:Emery, natural corundum, natural garnet and other natural abrasives :Other" +25140000,"::SLATE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE" +25150000,marble travertine ecaussine and other calcareous monumental or building stone of an apparent specific gravity of or more and alabaster whether or not roughly trimmed or merely cut by sawing or otherwise into blocks or slabs of a rectangular including square shape marble and travertine +25151100,"MARBLE, TRAVERTINE, ECAUSSINE AND OTHER CALCAREOUS MONUMENTAL OR BUILDING STONE OF AN APPARENT SPECIFIC GRAVITY OF 2.5 OR MORE, AND ALABASTER, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE ::Crude or roughly trimmed" +25151200,marble travertine ecaussine and other calcareous monumental or building stone of an apparent specific gravity of or more and alabaster whether or not roughly trimmed or merely cut by sawing or otherwise into blocks or slabs of a rectangular including square shape marble and travertine >> merely cut by sawing or otherwise into blocks or slabs of a rectangular including square shape +25151210,"MARBLE, TRAVERTINE, ECAUSSINE AND OTHER CALCAREOUS MONUMENTAL OR BUILDING STONE OF AN APPARENT SPECIFIC GRAVITY OF 2.5 OR MORE, AND ALABASTER, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE :Merely cut, by sawing or otherwise, into blocks or slabs of a rectangular (including square) shape:Blocks" +25151220,"MARBLE, TRAVERTINE, ECAUSSINE AND OTHER CALCAREOUS MONUMENTAL OR BUILDING STONE OF AN APPARENT SPECIFIC GRAVITY OF 2.5 OR MORE, AND ALABASTER, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE :Merely cut, by sawing or otherwise, into blocks or slabs of a rectangular (including square) shape:Slabs(OLD tariff)" +25151290,"MARBLE, TRAVERTINE, ECAUSSINE AND OTHER CALCAREOUS MONUMENTAL OR BUILDING STONE OF AN APPARENT SPECIFIC GRAVITY OF 2.5 OR MORE, AND ALABASTER, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE :Merely cut, by sawing or otherwise, into blocks or slabs of a rectangular (including square) shape:Other" +25152000,marble travertine ecaussine and other calcareous monumental or building stone of an apparent specific gravity of or more and alabaster whether or not roughly trimmed or merely cut by sawing or otherwise into blocks or slabs of a rectangular including square shape marble and travertine >> ecaussine and other calcareous monumental or building stone alabaster +25152010,"MARBLE, TRAVERTINE, ECAUSSINE AND OTHER CALCAREOUS MONUMENTAL OR BUILDING STONE OF AN APPARENT SPECIFIC GRAVITY OF 2.5 OR MORE, AND ALABASTER, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE :Ecaussine and other calcareous monumental or building stone; alabaster :Alabaster" +25152090,"MARBLE, TRAVERTINE, ECAUSSINE AND OTHER CALCAREOUS MONUMENTAL OR BUILDING STONE OF AN APPARENT SPECIFIC GRAVITY OF 2.5 OR MORE, AND ALABASTER, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE :Ecaussine and other calcareous monumental or building stone; alabaster :Other" +25160000,granite porphyry basalt sandstone and other monumental or building stone whether or not roughly trimmed or merely cut by sawing or otherwise into blocks or slabs of a rectangular including square shape granite +25161100,"GRANITE, PORPHYRY, BASALT, SANDSTONE AND OTHER MONUMENTAL OR BUILDING STONE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE::Crude or roughly trimmed" +25161200,"GRANITE, PORPHYRY, BASALT, SANDSTONE AND OTHER MONUMENTAL OR BUILDING STONE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE::Merely cut, by sawing or otherwise, into blocks or slabs of a rectangular (including square) shape" +25162000,"GRANITE, PORPHYRY, BASALT, SANDSTONE AND OTHER MONUMENTAL OR BUILDING STONE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE::Sandstone" +25169000,granite porphyry basalt sandstone and other monumental or building stone whether or not roughly trimmed or merely cut by sawing or otherwise into blocks or slabs of a rectangular including square shape granite >> other monumental or building stone +25169010,"GRANITE, PORPHYRY, BASALT, SANDSTONE AND OTHER MONUMENTAL OR BUILDING STONE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Other monumental or building stone :Pakur stone" +25169020,"GRANITE, PORPHYRY, BASALT, SANDSTONE AND OTHER MONUMENTAL OR BUILDING STONE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Other monumental or building stone :Stone boulders" +25169090,"GRANITE, PORPHYRY, BASALT, SANDSTONE AND OTHER MONUMENTAL OR BUILDING STONE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE:Other monumental or building stone :Other" +25170000,pebbles gravel broken or crushed stone of a kind commonly used for concrete aggregates for road metalling or for railway or other ballast shingle and flint whether or not macadam of slag dross or similar industrial waste whether or not incorporating the materials cited in the first part of the heading tarred macadam granules chippings and powder of stones of heading 2515 or 2516 whether or not heat treated +25171000,pebbles gravel broken or crushed stone of a kind commonly used for concrete aggregates for road metalling or for railway or other ballast shingle and flint whether or not macadam of slag dross or similar industrial waste whether or not incorporating the materials cited in the first part of the heading tarred macadam granules chippings and powder of stones of heading 2515 or 2516 whether or not heat treated >> pebbles gravel broken or crushed stone of a kind commonly used for concrete aggregates for road metalling or for railway or other ballast shingle and flint whether or not +25171010,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED:Pebbles, gravel, broken or crushed stone, of a kind commonly used for concrete aggregates, for road metalling or for railway or other ballast, shingle and flint, whether or not heat-treated :Pakur stone, crushed or broken" +25171020,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED:Pebbles, gravel, broken or crushed stone, of a kind commonly used for concrete aggregates, for road metalling or for railway or other ballast, shingle and flint, whether or not heat-treated :Flint" +25171090,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED:Pebbles, gravel, broken or crushed stone, of a kind commonly used for concrete aggregates, for road metalling or for railway or other ballast, shingle and flint, whether or not heat-treated :Other" +25172000,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED::Macadam of slag, dross or similar industrial waste, whether or not incorporating the materials cited in sub-heading 2517 10" +25173000,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED::Tarred macadam" +25174100,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED::Of marble" +25174900,"PEBBLES, GRAVEL, BROKEN OR CRUSHED STONE, OF A KIND COMMONLY USED FOR CONCRETE AGGREGATES, FOR ROAD METALLING OR FOR RAILWAY OR OTHER BALLAST, SHINGLE AND FLINT, WHETHER OR NOT HEAT-TREATED; MACADAM OF SLAG, DROSS OR SIMILAR INDUSTRIAL WASTE, WHETHER OR NOT INCORPORATING THE MATERIALS CITED IN THE FIRST PART OF THE HEADING; TARRED MACADAM; GRANULES, CHIPPINGS AND POWDER, OF STONES OF HEADING 2515 OR 2516, WHETHER OR NOT HEAT-TREATED::Other" +25180000,dolomite whether or not calcined or including dolomite roughly sintered trimmed or merely cut by sawing or into blocks or slabs of a otherwise rectangular including square shape +25181000,"DOLOMITE, WHETHER OR NOT CALCINED OR SINTERED, INCLUDING DOLOMITE ROUGHLY::Dolomite not calcined or sintered" +25182000,"DOLOMITE, WHETHER OR NOT CALCINED OR SINTERED, INCLUDING DOLOMITE ROUGHLY::Calcined or sintered dolomite" +25183000,"DOLOMITE, WHETHER OR NOT CALCINED OR SINTERED, INCLUDING DOLOMITE ROUGHLY::Dolomite ramming mix(OLD tariff)" +25190000,natural magnesium carbonate magnesite fused magnesia sintered magnesia whether or not containing small quantities of other oxides added before sintering other magnesium oxide whether or not pure +25191000,"NATURAL MAGNESIUM CARBONATE (MAGNESITE); FUSED MAGNESIA; DEAD-BURNED (SINTERED) MAGNESIA, WHETHER OR NOT CONTAINING SMALL QUANTITIES OF OTHER OXIDES ADDED BEFORE SINTERING; OTHER MAGNESIUM OXIDE, WHETHER OR NOT PURE::Natural magnesium carbonate (magnesite)" +25199000,natural magnesium carbonate magnesite fused magnesia sintered magnesia whether or not containing small quantities of other oxides added before sintering other magnesium oxide whether or not pure >> other +25199010,"NATURAL MAGNESIUM CARBONATE (MAGNESITE); FUSED MAGNESIA; DEAD-BURNED (SINTERED) MAGNESIA, WHETHER OR NOT CONTAINING SMALL QUANTITIES OF OTHER OXIDES ADDED BEFORE SINTERING; OTHER MAGNESIUM OXIDE, WHETHER OR NOT PURE:Other:Fused magnesia (natural)" +25199020,"NATURAL MAGNESIUM CARBONATE (MAGNESITE); FUSED MAGNESIA; DEAD-BURNED (SINTERED) MAGNESIA, WHETHER OR NOT CONTAINING SMALL QUANTITIES OF OTHER OXIDES ADDED BEFORE SINTERING; OTHER MAGNESIUM OXIDE, WHETHER OR NOT PURE:Other:Dead-burnt (sintered) magnesia" +25199030,"NATURAL MAGNESIUM CARBONATE (MAGNESITE); FUSED MAGNESIA; DEAD-BURNED (SINTERED) MAGNESIA, WHETHER OR NOT CONTAINING SMALL QUANTITIES OF OTHER OXIDES ADDED BEFORE SINTERING; OTHER MAGNESIUM OXIDE, WHETHER OR NOT PURE:Other:Magnesium calcined (other than dead-burnt) not elsewhere specified or included" +25199040,"NATURAL MAGNESIUM CARBONATE (MAGNESITE); FUSED MAGNESIA; DEAD-BURNED (SINTERED) MAGNESIA, WHETHER OR NOT CONTAINING SMALL QUANTITIES OF OTHER OXIDES ADDED BEFORE SINTERING; OTHER MAGNESIUM OXIDE, WHETHER OR NOT PURE:Other:Magnesium oxide" +25199090,"NATURAL MAGNESIUM CARBONATE (MAGNESITE); FUSED MAGNESIA; DEAD-BURNED (SINTERED) MAGNESIA, WHETHER OR NOT CONTAINING SMALL QUANTITIES OF OTHER OXIDES ADDED BEFORE SINTERING; OTHER MAGNESIUM OXIDE, WHETHER OR NOT PURE:Other:Other" +25200000,plasters gypsum anhydrite consisting of calcined gypsum or calcium sulphate whether or not coloured with or without small quantities of accelerators or retarders +25201000,plasters gypsum anhydrite consisting of calcined gypsum or calcium sulphate whether or not coloured with or without small quantities of accelerators or retarders >> gypsum anhydrite +25201010,"GYPSUM; ANHYDRITE; PLASTERS (CONSISTING OF CALCINED GYPSUM OR CALCIUM SULPHATE) WHETHER OR NOT COLOURED, WITH OR WITHOUT SMALL QUANTITIES OF ACCELERATORS OR RETARDERS:Gypsum; anhydrite :Natural" +25201020,"GYPSUM; ANHYDRITE; PLASTERS (CONSISTING OF CALCINED GYPSUM OR CALCIUM SULPHATE) WHETHER OR NOT COLOURED, WITH OR WITHOUT SMALL QUANTITIES OF ACCELERATORS OR RETARDERS:Gypsum; anhydrite :Marine" +25201090,"GYPSUM; ANHYDRITE; PLASTERS (CONSISTING OF CALCINED GYPSUM OR CALCIUM SULPHATE) WHETHER OR NOT COLOURED, WITH OR WITHOUT SMALL QUANTITIES OF ACCELERATORS OR RETARDERS:Gypsum; anhydrite :Other" +25202000,plasters gypsum anhydrite consisting of calcined gypsum or calcium sulphate whether or not coloured with or without small quantities of accelerators or retarders >> plasters +25202010,"GYPSUM; ANHYDRITE; PLASTERS (CONSISTING OF CALCINED GYPSUM OR CALCIUM SULPHATE) WHETHER OR NOT COLOURED, WITH OR WITHOUT SMALL QUANTITIES OF ACCELERATORS OR RETARDERS:Plasters:Calcined" +25202090,"GYPSUM; ANHYDRITE; PLASTERS (CONSISTING OF CALCINED GYPSUM OR CALCIUM SULPHATE) WHETHER OR NOT COLOURED, WITH OR WITHOUT SMALL QUANTITIES OF ACCELERATORS OR RETARDERS:Plasters:Other" +25210000,limestone flux limestone and other calcareous stones of a kind used for the manufacture of lime or cement limestone flux limestone and other calcareous stones of a kind used for the manufacture of lime or cement +25210010,"LIMESTONE FLUX; LIMESTONE AND OTHER CALCAREOUS STONES, OF A KIND USED FOR THE MANUFACTURE OF LIME OR CEMENT:Limestone flux; limestone and other calcareous stones, of a kind used for the manufacture of lime or cement:Limestone flux (L.D., below 1% SiO2)" +25210090,"LIMESTONE FLUX; LIMESTONE AND OTHER CALCAREOUS STONES, OF A KIND USED FOR THE MANUFACTURE OF LIME OR CEMENT:Limestone flux; limestone and other calcareous stones, of a kind used for the manufacture of lime or cement:Other" +25220000,quicklime slaked lime and hydraulic lime other than calcium oxide and hydroxide of heading 2825 +25221000,"QUICKLIME, SLAKED LIME AND HYDRAULIC LIME, OTHER THAN CALCIUM OXIDE AND HYDROXIDE OF HEADING 2825::Quicklime" +25222000,"QUICKLIME, SLAKED LIME AND HYDRAULIC LIME, OTHER THAN CALCIUM OXIDE AND HYDROXIDE OF HEADING 2825::Slaked lime" +25223000,"QUICKLIME, SLAKED LIME AND HYDRAULIC LIME, OTHER THAN CALCIUM OXIDE AND HYDROXIDE OF HEADING 2825::Hydraulic lime" +25230000,portland cement aluminous cement slag cement supersulphate cement and or similar hydraulic cements whether not coloured or in the form of clinkers +25231000,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS::Cement clinkers" +25232100,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS::White cement, whether or not artificially coloured" +25232900,portland cement aluminous cement slag cement supersulphate cement and or similar hydraulic cements whether not coloured or in the form of clinkers >> other +25232910,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other :Ordinary portland cement, dry" +25232920,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other :Ordinary portland cement, coloured" +25232930,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other :Portland pozzolana Cement" +25232940,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other :Portland slag cement" +25232990,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other :Other" +25233000,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS::Aluminous cement" +25239000,portland cement aluminous cement slag cement supersulphate cement and or similar hydraulic cements whether not coloured or in the form of clinkers >> other hydraulic cements +25239010,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other hydraulic cements:Sagol; ashmoh" +25239020,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other hydraulic cements:High alumina refractory cement" +25239090,"PORTLAND CEMENT, ALUMINOUS CEMENT, SLAG CEMENT, SUPERSULPHATE CEMENT AND SIMILAR HYDRAULIC CEMENTS, WHETHER OR NOT COLOURED OR IN THE FORM OF CLINKERS:Other hydraulic cements:Other" +25240000,asbestos +25241010,ASBESTOS::Crocidolite(OLD tariff) +25249000,asbestos >> other in rock form +25249011,ASBESTOS:Other:Chrysotile +25249012,ASBESTOS:Other:Amphibole +25249013,ASBESTOS:Other:Crysolite +25249014,ASBESTOS:Other:Amosite +25249019,ASBESTOS:Other:Other +25249021,ASBESTOS:Other:Chrysotile +25249022,ASBESTOS:Other:Amphibole +25249023,ASBESTOS:Other:Crysolite +25249024,ASBESTOS:Other:Amosite +25249029,ASBESTOS:Other:Other +25249031,ASBESTOS:Other:Chrysotile +25249032,ASBESTOS:Other:Amphibole +25249033,ASBESTOS:Other:Crysolite +25249034,ASBESTOS:Other:Amosite +25249039,ASBESTOS:Other:Other +25249091,ASBESTOS:Other:Waste +25249099,ASBESTOS:Other:Other +25250000,mica including splittings mica waste +25251000,mica including splittings mica waste >> crude mica and mica rifted into sheets or splittings +25251010,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Crude mica and mica rifted into sheets or splittings:Mica blocks" +25251020,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Crude mica and mica rifted into sheets or splittings:Condensor films trimmed but not cut to shape" +25251030,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Crude mica and mica rifted into sheets or splittings:Mica splittings, book form" +25251040,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Crude mica and mica rifted into sheets or splittings:Mica splittings, loose" +25251090,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Crude mica and mica rifted into sheets or splittings:Other" +25252000,mica including splittings mica waste >> mica powder mica powder +25252010,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica powder :Mica flakes, 2.20 mesh" +25252020,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica powder :Mica powder, dry ground" +25252030,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica powder :Mica powder, micronised" +25252040,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica powder :Mica powder, wet ground" +25252050,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica powder :Mica powder, calcined" +25252090,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica powder :Other" +25253000,mica including splittings mica waste >> mica waste mica waste +25253010,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica waste :Mica mine scrap and waste" +25253020,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica waste :Mica factory scrap" +25253030,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica waste :Mica cuttings book form" +25253090,"MICA, INCLUDING SPLITTINGS; MICA WASTE:Mica waste :Other" +25260000,natural steatite whether or not roughly trimmed or merely cut by sawing or into blocks or slabs of a otherwise rectangular including square shape talc +25261000,natural steatite whether or not roughly trimmed or merely cut by sawing or into blocks or slabs of a otherwise rectangular including square shape talc >> not crushed not powdered +25261010,"NATURAL STEATITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE; TALC:Not crushed, not powdered :Steatite (soap stone, etc.) block" +25261020,"NATURAL STEATITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE; TALC:Not crushed, not powdered :Steatite (soap stone, etc.) lumps" +25261090,"NATURAL STEATITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE; TALC:Not crushed, not powdered :Other" +25262000,"NATURAL STEATITE, WHETHER OR NOT ROUGHLY TRIMMED OR MERELY CUT, BY SAWING OR OTHERWISE, INTO BLOCKS OR SLABS OF A RECTANGULAR (INCLUDING SQUARE) SHAPE; TALC::Crushed or powdered" +25280000,natural borates and concentrates thereof whether or not calcined but not including b o rat e s s e pa r at e d f rom n atur al b rin e natural boric acid containing not more than 85 of h3bo3 calculated on the dry weight natural borates and concentrates thereof whether or not calcined but not including borates separated from natural brine natural boric acid containing not more than 85 of h3bo3 calculated on the dry weight +25280010,"NATURAL BORATES AND CONCENTRATES THEREOF (WHETHER OR NOT CALCINED), BUT NOT INCLUDING BORATES SEPARATED FROM NATURAL BRINE; NATURAL BORIC ACID CONTAINING NOT MORE THAN 85% OF H3BO3 CALCULATED ON THE DRY WEIGHT:Natural borates and concentrates thereof (Whether or not calcined), but not including borates separated from natural brine; natural boric acid containing not more than 85% of H3BO3 Calculated on the dry weight:Natural sodium borates and concentrates thereof (Whether or not calcined)" +25280020,"NATURAL BORATES AND CONCENTRATES THEREOF (WHETHER OR NOT CALCINED), BUT NOT INCLUDING BORATES SEPARATED FROM NATURAL BRINE; NATURAL BORIC ACID CONTAINING NOT MORE THAN 85% OF H3BO3 CALCULATED ON THE DRY WEIGHT:Natural borates and concentrates thereof (Whether or not calcined), but not including borates separated from natural brine; natural boric acid containing not more than 85% of H3BO3 Calculated on the dry weight:Natural boric acid containing not more than 85% of H3BO3 (Calculated on the dry weight)" +25280030,"NATURAL BORATES AND CONCENTRATES THEREOF (WHETHER OR NOT CALCINED), BUT NOT INCLUDING BORATES SEPARATED FROM NATURAL BRINE; NATURAL BORIC ACID CONTAINING NOT MORE THAN 85% OF H3BO3 CALCULATED ON THE DRY WEIGHT:Natural borates and concentrates thereof (Whether or not calcined), but not including borates separated from natural brine; natural boric acid containing not more than 85% of H3BO3 Calculated on the dry weight:Natural calcium borates and concentrates thereof (Whether or not calcined)" +25280090,"NATURAL BORATES AND CONCENTRATES THEREOF (WHETHER OR NOT CALCINED), BUT NOT INCLUDING BORATES SEPARATED FROM NATURAL BRINE; NATURAL BORIC ACID CONTAINING NOT MORE THAN 85% OF H3BO3 CALCULATED ON THE DRY WEIGHT:Natural borates and concentrates thereof (Whether or not calcined), but not including borates separated from natural brine; natural boric acid containing not more than 85% of H3BO3 Calculated on the dry weight:Other" +25290000,feldspar leucite nepheline and nepheline syenite fluorspar +25291000,feldspar leucite nepheline and nepheline syenite fluorspar >> feldspar +25291010,FELDSPAR; LEUCITE; NEPHELINE AND NEPHELINE SYENITE; FLUORSPAR:Feldspar :Lumps +25291020,FELDSPAR; LEUCITE; NEPHELINE AND NEPHELINE SYENITE; FLUORSPAR:Feldspar :Powder +25292100,FELDSPAR; LEUCITE; NEPHELINE AND NEPHELINE SYENITE; FLUORSPAR::Containing by weight 97% or less of calcium fluoride +25292200,FELDSPAR; LEUCITE; NEPHELINE AND NEPHELINE SYENITE; FLUORSPAR::Containing by weight more than 97% of calcium fluoride +25293000,FELDSPAR; LEUCITE; NEPHELINE AND NEPHELINE SYENITE; FLUORSPAR::Leucite; nepheline and nepheline syenite +25300000,mineral substances not elsewhere specified or included +25301000,mineral substances not elsewhere specified or included >> vermiculite perlite and chlorites unexpanded +25301010,"MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Vermiculite, perlite and chlorites, unexpanded :Vermiculite" +25301020,"MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Vermiculite, perlite and chlorites, unexpanded :Perlite" +25301090,"MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Vermiculite, perlite and chlorites, unexpanded :Others (including powder)" +25302000,"MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED::Kieserite, epsomite (natural magnesium sulphates)" +25309000,mineral substances not elsewhere specified or included >> other +25309010,"MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Meerschaum (whether or not in polished pieces) and amber agglomerated; meerachaum and agglomerated amber in plates, rods, etc., not worked after moulding jet" +25309020,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Natural arsenic sulphides (such as orpiment) +25309030,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Calcite +25309040,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Ores and concentrates of rare earth metals +25309050,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Wollastonite +25309060,"MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Earth colour ochre, crude" +25309070,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other processed earth colour ochre +25309091,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Strontium sulphate (natural ore) +25309099,MINERAL SUBSTANCES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other +26010000,iron ores and concentrates including roasted iron pyrites iron ores and concentrates other than roasted iron pyrites +26011100,iron ores and concentrates including roasted iron pyrites iron ores and concentrates other than roasted iron pyrites >> iron ore lumps 60 fe or more +26011110,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :Iron ore lumps (60% Fe or more)(OLD tariff)" +26011111,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :60% Fe or more but below 62%fekgs" +26011112,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :62%fe or more but below 65% fe" +26011119,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :65%fe and above(OLD tariff)" +26011120,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :Iron ore lumps (below 60% Fe, including black iron ore containing up to 10% Mn)(OLD tariff)" +26011121,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :newlow 55% fe(OLD tariff)" +26011122,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :55% fe or more but below 58% Fe" +26011129,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :58% Fe and above(OLD tariff)" +26011130,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :Iron ore fines (62% Fe or more)(OLD tariff)" +26011131,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :62% FE or more but below 65% Fe(OLD tariff)" +26011139,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :65% Fe and above(OLD tariff)" +26011140,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :Iron ore fines (below 62% Fe)(OLD tariff)" +26011141,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :below 55% Fe(OLD tariff)" +26011142,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :55% Fe or more but below 58% Fe" +26011143,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :58% Fe or more but below 60% Fe" +26011149,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :60% Fe or more but below 62% Fe" +26011150,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :Iron ore concentrates" +26011190,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Non-agglomerated :Other" +26011200,iron ores and concentrates including roasted iron pyrites iron ores and concentrates other than roasted iron pyrites >> agglomerated +26011210,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Agglomerated :Iron ore pellets" +26011290,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES:Agglomerated :Other" +26012000,"IRON ORES AND CONCENTRATES, INCLUDING ROASTED IRON PYRITES::Roasted iron pyrites" +26020000,manganese ores and concentrates including ferruginous manganese ores and concentrates with a manganese content of 20 or more calculated on the dry weight manganese ores and concentrates including ferruginous manganese ores and concentrates with a manganese content of 20 or more calculated on the dry weight +26020010,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Manganese ore (46% or more)(OLD tariff)" +26020020,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Manganese ore (44% or more but below 46%)" +26020030,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Manganese ore (40% or more but below 44%)" +26020040,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Manganese ore (35% or more but below 40%)" +26020050,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Manganese ore (30% or more but below 35%)" +26020060,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Ferruginous (10% or more but below 30%)" +26020070,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Manganese ore sinters, agglomerated" +26020090,"MANGANESE ORES AND CONCENTRATES, INCLUDING FERRUGINOUS MANGANESE ORES AND CONCENTRATES WITH A MANGANESE CONTENT OF 20% OR MORE, CALCULATED ON THE DRY WEIGHT:Manganese ores and concentrates, including ferruginous manganese ores and concentrates with a manganese content of 20% or more, calculated on the dry weight :Other" +26030000,::COPPER ORES AND CONCERTRATES +26040000,::NICKEL ORES AND CONCENTRATES +26050000,::COBALT ORES AND CONCENTRATES +26060000,aluminium ores and concentrates aluminium ores and concentrates aluminium ores and concentrates aluminium ores and concentrates +26060010,"ALUMINIUM ORES AND CONCENTRATES:Aluminium ores and concentrates :Bauxite (natural), not calcined" +26060020,"ALUMINIUM ORES AND CONCENTRATES:Aluminium ores and concentrates :Bauxite (natural), calcined" +26060090,ALUMINIUM ORES AND CONCENTRATES:Aluminium ores and concentrates :Other aluminium ores and concentrates +26070000,::LEAD ORES AND CONCENTRATES +26080000,::ZINC ORES AND CONCENTRATES +26090000,::TIN ORES AND CONCENTRATES +26100000,chromium ores and concentrates chromium ores and concentrates chromium ores and concentrates chromium ores and concentrates +26100010,"CHROMIUM ORES AND CONCENTRATES:Chromium ores and concentrates :Chrome ore lumps, containing 47% Cr2O3 and above" +26100020,"CHROMIUM ORES AND CONCENTRATES:Chromium ores and concentrates :Chrome ore lumps, containing 40% or more but less than 47% Cr2O3" +26100030,CHROMIUM ORES AND CONCENTRATES:Chromium ores and concentrates :Chrome ore lumps below 40% Cr2O3 +26100040,CHROMIUM ORES AND CONCENTRATES:Chromium ores and concentrates :Chrome ore friable and concentrates fixes containing 47% Cr2O3 and above +26100090,CHROMIUM ORES AND CONCENTRATES:Chromium ores and concentrates :Other +26110000,::TUNGSTEN ORES AND CONCENTRATES +26120000,uranium or thorium ores and concentrates uranium or thorium ores and concentrates +26121000,URANIUM OR THORIUM ORES AND CONCENTRATES::Uranium ores and concentrates +26122000,URANIUM OR THORIUM ORES AND CONCENTRATES::Thorium ores and concentrates +26130000,molybdenum ores and concentrates molybdenum ores and concentrates +26131000,MOLYBDENUM ORES AND CONCENTRATES::Roasted +26139000,MOLYBDENUM ORES AND CONCENTRATES::Other +26140000,titanium ores and concen trates titanium ores and concentrates titanium ores and concen trates titanium ores and concentrates +26140010,"TITANIUM ORES AND CONCENTRATES:Titanium ores and concentrates :Ilmenite, unprocessed" +26140020,"TITANIUM ORES AND CONCENTRATES:Titanium ores and concentrates :Ilmenite, upgraded (beneficiated ilmenite including ilmenite ground) " +26140031,TITANIUM ORES AND CONCENTRATES:Titanium ores and concentrates :Rare earth oxides including rutile sand +26140039,TITANIUM ORES AND CONCENTRATES:Titanium ores and concentrates :Other +26140090,TITANIUM ORES AND CONCENTRATES:Titanium ores and concentrates :Other +26150000,niobium tantalum vanadium or zirconium ores and concentrates niobium tantalum vanadium or zirconium ores and concentrates +26151000,"NIOBIUM, TANTALUM, VANADIUM OR ZIRCONIUM ORES AND CONCENTRATES::Zirconium ores and concentrates" +26159000,niobium tantalum vanadium or zirconium ores and concentrates niobium tantalum vanadium or zirconium ores and concentrates >> other other +26159010,"NIOBIUM, TANTALUM, VANADIUM OR ZIRCONIUM ORES AND CONCENTRATES:Other :Vanadium ores and concentrates" +26159020,"NIOBIUM, TANTALUM, VANADIUM OR ZIRCONIUM ORES AND CONCENTRATES:Other :Niobium or tantalum ores and concentrates" +26160000,precious metal ores and concentrates precious metal ores and concentrates +26161000,PRECIOUS METAL ORES AND CONCENTRATES::Silver ores and concentrates +26169000,precious metal ores and concentrates precious metal ores and concentrates >> other other +26169010,PRECIOUS METAL ORES AND CONCENTRATES:Other :Gold ores and concentrates +26169090,PRECIOUS METAL ORES AND CONCENTRATES:Other :Other +26170000,other ores and concentrates other ores and concentrates +26171000,OTHER ORES AND CONCENTRATES::Antimony ores and concentrates +26179000,OTHER ORES AND CONCENTRATES::Other +26180000,::GRANULATED SLAG (SLAG SAND) FROM THE MANUFACTURE OF IRON OR STEEL +26190000,slag dross other than granulated slag scalings and other waste from the slag dross other than granulated slag scalings and other waste from the manufacture of iron or steel slag dross other than granulated slag scalings and other waste from the manufacture of iron or steel +26190010,"SLAG, DROSS (OTHER THAN GRANULATED SLAG), SCALINGS AND OTHER WASTE FROM THE MANUFACTURE OF IRON OR STEEL:Slag, dross (other than granulated slag), scalings and other waste from the manufacture of iron or steel :Converted slag (scull) of blast furnace" +26190090,"SLAG, DROSS (OTHER THAN GRANULATED SLAG), SCALINGS AND OTHER WASTE FROM THE MANUFACTURE OF IRON OR STEEL:Slag, dross (other than granulated slag), scalings and other waste from the manufacture of iron or steel :Other" +26200000,slag ash and residues other than from the manufacture of iron or steel containing arsenic metals or their compounds containing mainly zinc +26201100,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS ::Hard zinc spelter" +26201900,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Other:Other " +26201910,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Other:zinc dross" +26201990,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Other:Other" +26202100,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS ::Leaded gasoline sludges and leaded anti-knock compound sludges" +26202900,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Other:Other" +26202910,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Other:Lead dross" +26202990,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Other:Other" +26203000,slag ash and residues other than from the manufacture of iron or steel containing arsenic metals or their compounds containing mainly zinc >> containing mainly copper +26203010,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Containing mainly copper :Brass dross" +26203090,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Containing mainly copper :Other" +26204000,slag ash and residues other than from the manufacture of iron or steel containing arsenic metals or their compounds containing mainly zinc >> containing mainly aluminium +26204010,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Containing mainly aluminium:Aluminium dross" +26204090,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS :Containing mainly aluminium:Other" +26206000,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS ::Containing arsenic, mercury, thallium or their mixtures, of a kind used for the extraction of arsenic or those metals or for the manufacture of their chemical compounds " +26209100,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS ::Containing antimony , beryllium, cadmium, chromium or their mixtures" +26209900,"SLAG, ASH AND RESIDUES (OTHER THAN FROM THE MANUFACTURE OF IRON OR STEEL), CONTAINING ARSENIC, METALS OR THEIR COMPOUNDS ::Other" +26210000,other slag and ash inlcuding seaweed ash kelp ash and residues from the incineration of municipal waste +26211000,"OTHER SLAG AND ASH, INLCUDING SEAWEED ASH (KELP); ASH AND RESIDUES FROM THE INCINERATION OF MUNICIPAL WASTE::Ash and residues from the incineration of municipal waste" +26219000,"OTHER SLAG AND ASH, INLCUDING SEAWEED ASH (KELP); ASH AND RESIDUES FROM THE INCINERATION OF MUNICIPAL WASTE::Other" +27010000,coal briquettes ovoids and similar solid fuels manufactured from coal coal whether or not pulverised but not agglomerated +27011100,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL::Anthracite" +27011200,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL::Bituminous coal" +27011210,coal briquettes ovoids and similar solid fuels manufactured from coal coal whether or not pulverised but not agglomerated >> bituminous coal >> coking coal +27011290,coal briquettes ovoids and similar solid fuels manufactured from coal coal whether or not pulverised but not agglomerated >> bituminous coal >> other +27011900,coal briquettes ovoids and similar solid fuels manufactured from coal coal whether or not pulverised but not agglomerated >> other coal +27011910,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL:Other coal:Coking coal" +27011920,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL:Other coal:Steam coal" +27011990,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL:Other coal:Other" +27012000,coal briquettes ovoids and similar solid fuels manufactured from coal coal whether or not pulverised but not agglomerated >> briquettes ovoids and similar solid fuels manufactured from coal +27012010,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL:Briquettes, ovoids and similar solid fuels manufactured from coal :Anthracite agglomerated" +27012090,"COAL; BRIQUETTES, OVOIDS AND SIMILAR SOLID FUELS MANUFACTURED FROM COAL:Briquettes, ovoids and similar solid fuels manufactured from coal :Other" +27020000,lignite whether or not agglomerated excluding jet +27021000,"LIGNITE, WHETHER OR NOT AGGLOMERATED, EXCLUDING JET::Lignite, whether or not pulverised, but not agglomerated" +27022000,"LIGNITE, WHETHER OR NOT AGGLOMERATED, EXCLUDING JET::Agglomerated lignite" +27030000,peat including peat litter whether or not agglomerated peat including peat litter whether or not agglomerated +27030010,"PEAT (INCLUDING PEAT LITTER), WHETHER OR NOT AGGLOMERATED:Peat (including peat litter), whether or not agglomerated :Peat whether or not compressed into bales, but not agglomerated" +27030090,"PEAT (INCLUDING PEAT LITTER), WHETHER OR NOT AGGLOMERATED:Peat (including peat litter), whether or not agglomerated :Other" +27040000,coke and of coal of lignite or of peat whether or not agglomerated retort carbon coke and of coal of lignite or of peat whether or not agglomerated retort carbon +27040010,"COKE AND SEMI-COKE OF COAL, OF LIGNITE OR OF PEAT, WHETHER OR NOT AGGLOMERATED; RETORT CARBON:Coke and semi-coke of coal, of lignite or of peat, whether or not agglomerated; retort carbon :Retort carbon (gas carbon)" +27040020,"COKE AND SEMI-COKE OF COAL, OF LIGNITE OR OF PEAT, WHETHER OR NOT AGGLOMERATED; RETORT CARBON:Coke and semi-coke of coal, of lignite or of peat, whether or not agglomerated; retort carbon :Coke and semi-coke of lignite or of peat" +27040030,"COKE AND SEMI-COKE OF COAL, OF LIGNITE OR OF PEAT, WHETHER OR NOT AGGLOMERATED; RETORT CARBON:Coke and semi-coke of coal, of lignite or of peat, whether or not agglomerated; retort carbon :Hard coke of coal" +27040040,"COKE AND SEMI-COKE OF COAL, OF LIGNITE OR OF PEAT, WHETHER OR NOT AGGLOMERATED; RETORT CARBON:Coke and semi-coke of coal, of lignite or of peat, whether or not agglomerated; retort carbon :Soft coke of coal" +27040090,"COKE AND SEMI-COKE OF COAL, OF LIGNITE OR OF PEAT, WHETHER OR NOT AGGLOMERATED; RETORT CARBON:Coke and semi-coke of coal, of lignite or of peat, whether or not agglomerated; retort carbon :Other" +27050000,"::COAL GAS, WATER GAS, PRODUCER GAS AND SIMILAR GASES, OTHER THAN PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS" +27060000,tar distilled from coal from lignite or from peat and other mineral tars whether or not dehydrated or partially distilled including reconstituted tars +27060010,"TAR DISTILLED FROM COAL, FROM LIGNITE OR FROM PEAT AND OTHER MINERAL TARS, WHETHER OR NOT DEHYDRATED OR PARTIALLY DISTILLED, INCLUDING RECONSTITUTED TARS:Tar distilled from coal, from lignite or from peat and other mineral tars, whether or not dehydrated or partially distilled, including reconstituted tars :Coal tar" +27060090,"TAR DISTILLED FROM COAL, FROM LIGNITE OR FROM PEAT AND OTHER MINERAL TARS, WHETHER OR NOT DEHYDRATED OR PARTIALLY DISTILLED, INCLUDING RECONSTITUTED TARS:Tar distilled from coal, from lignite or from peat and other mineral tars, whether or not dehydrated or partially distilled, including reconstituted tars :Other" +27070000,oils and other products of the distillation of high temperature coal tar similar products in which the weight of the aromatic constituents exceeds that of the constituents +27071000,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Benzol (benzene) +27072000,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Toluol (toluene) +27073000,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Xylol (xylenes) +27074000,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Naphthelene +27075000,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Other aromatic hydrocarbon mixtures of which 65% or more by volume (including losses) distils at 250OC by the ASTM D 86 method +27079100,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Creosote oils +27079900,OILS AND OTHER PRODUCTS OF THE DISTILLATION OF HIGH TEMPERATURE COAL TAR; SIMILAR PRODUCTS IN WHICH THE WEIGHT OF THE AROMATIC CONSTITUENTS EXCEEDS THAT OF THE NON-AROMATIC CONSTITUENTS::Other +27080000,pitch and pitch coke obtained from coal tar or from other mineral tars +27081000,pitch and pitch coke obtained from coal tar or from other mineral tars >> pitch +27081010,"PITCH AND PITCH COKE, OBTAINED FROM COAL TAR OR FROM OTHER MINERAL TARS:Pitch :Obtained by blending with creosote oil or other coal tar distillates" +27081090,"PITCH AND PITCH COKE, OBTAINED FROM COAL TAR OR FROM OTHER MINERAL TARS:Pitch :Other" +27082000,"PITCH AND PITCH COKE, OBTAINED FROM COAL TAR OR FROM OTHER MINERAL TARS::Pitch coke" +27090000,"PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, CRUDE(OLD tariff)" +27090010,"PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:PETRULEUM CRUDE(OLD tariff)" +27090090,"PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:OTHER(OLD tariff)" +27091000,"PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:petroleum oil:Petroleum oils and oils obtained from bituminous minerals(OLD tariff)" +27092000,"PETROLEUM OILS AND OILD OBTAINED FROM BITUMINOUS MINERALS, CRUDE:petroleum crude:Petroleum crude(OLD tariff)" +27100000,petroleum oils and oils obtained from bituminous minerals other than crude preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations waste oils petroleum oils and oils obtained from bituminous minerals other than crude and preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations other than those containing biodiesel and other than waste oils +27101200,petroleum oils and oils obtained from bituminous minerals other than crude preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations waste oils petroleum oils and oils obtained from bituminous minerals other than crude and preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations other than those containing biodiesel and other than waste oils >> light oils and preparations naphtha +27101211,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Special boiling point spirits (other than benzene, toluol) with nominal boiling point range 55 - 115 deg C(OLD tariff)" +27101212,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Special boiling point spirits (other than benzene, bonzole, toluene and toluol) with nominal boiling point range 63- 70 deg C(OLD tariff)" +27101213,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Other Special boiling point spirits (other than benzene, benzol tolune and toluol)(OLD tariff)" +27101219,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Other(OLD tariff)" +27101220,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Natural Gasoline liquid (NGL)(OLD tariff)" +27101221,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Light Naptha" +27101222,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Heavy Naptha" +27101229,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Full orange naphtha" +27101231,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Solvent 60/80" +27101232,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:solvent 50/120" +27101239,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Solvent 145/2015" +27101241,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Motor gasoline conforming to standard IS 2796," +27101242,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:e20 fuel conforming to standard IS 17021" +27101243,petroleum oils and oils obtained from bituminous minerals other than crude preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations waste oils petroleum oils and oils obtained from bituminous minerals other than crude and preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations other than those containing biodiesel and other than waste oils >> light oils and preparations naphtha >> e 12 fuel conforming to standard is 17586 +27101244,petroleum oils and oils obtained from bituminous minerals other than crude preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations waste oils petroleum oils and oils obtained from bituminous minerals other than crude and preparations not elsewhere specified or included containing by weight 70 or more of petroleum oils or of oils obtained from bituminous minerals these oils being the basic constituents of the preparations other than those containing biodiesel and other than waste oils >> light oils and preparations naphtha >> e 15 fuel conforming to standard is 17586 +27101249,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:M15 fuel conforming to standard IS 17076" +27101250,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Aviation gasoline conforming to standard IS 1604(OLD tariff)" +27101290,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Light Oils and Preparations:Other(OLD tariff)" +27101910,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Superior kerosine oil (SKO)(OLD tariff)" +27101920,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :AVIATION TURBINE FUEL (ATF) TO BE READ AS ""SOLVENT 125/240(PETROLEUM HYDROCARBON SOLVENT AS SPECIFIED UNDER STANDARD IS 1745""(OLD tariff)" +27101930,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :High speed diesel (HSD)(OLD tariff)" +27101931,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Kerosene intermediate(OLD tariff)" +27101932,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Kerosene conforming to standard IS1459(OLD tariff)" +27101939,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Aviation turbine fuels, kerosine type conforming to standard IS 1571(OLD tariff)" +27101940,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Light diesel oil (LDO)(OLD tariff)" +27101941,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Gas Oil(OLD tariff)" +27101942,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Vaccum gas oil(OLD tariff)" +27101943,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Light diesel oil conforming to standard IS 15770(OLD tariff)" +27101944,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Automotive diesel fuel not containing biodiesel CONFORMING TO STANDARD IS 1460(OLD tariff)" +27101949,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :High flash high speed diesel fuel conformiing TO STANDARD IS 16861(OLD tariff)" +27101950,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Fuel oil(OLD tariff)" +27101951,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Grade LV(OLD tariff)" +27101952,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Grade MV1(OLD tariff)" +27101953,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Grade MV2(OLD tariff)" +27101959,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Grade HV(OLD tariff)" +27101960,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Base oil(OLD tariff)" +27101961,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Distillate oil(OLD tariff)" +27101969,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Residual oil(OLD tariff)" +27101970,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Jute batching oil and textile oil(OLD tariff)" +27101971,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Base oil(OLD tariff)" +27101972,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Engine Oil (internal combustion engine crankcase oils)conforming to standard IS 13656(OLD tariff)" +27101973,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Engine oil conforming to standard IS 14234(OLD tariff)" +27101974,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :automotive gear oil conforming to standard IS 1118(OLD tariff)" +27101975,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Industrial gear oil conforming to standard IS 8406(OLD tariff)" +27101976,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :General purpose machine and spindle oils conforming to standard IS 493(OLD tariff)" +27101977,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Turbine lubricating oil conforming to standard IS 1012(OLD tariff)" +27101978,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Other Lubricating oil, conforming to anyother BIS standard(OLD tariff)" +27101979,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Other Lubricating oil not conforming to any BIS standard(OLD tariff)" +27101980,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Lubricating oil(OLD tariff)" +27101981,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Cutting oil conforming to standard IS 1115(OLD tariff)" +27101982,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Cutting oil (neat) conforming to standard IS 3065(OLD tariff)" +27101983,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Hydraulic oil conforming to standard IS 3098 or IS 11656(OLD tariff)" +27101984,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Industrial white oil conforming to standard IS 1083(OLD tariff)" +27101985,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Insulating oil for transformer and circuit breaker (transformer and circuit breaker oils) conforming to standard IS 335 or IS 12463(OLD tariff)" +27101986,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Minseral oil for cosmetic industry conforming to standard IS 7299(OLD tariff)" +27101987,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Jute batching oil conforming to standard IS 1758(OLD tariff)" +27101988,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :other cutting oil, hydraulic oil, industrial white oil, jute batching oil, mineral oil for cosmetic industry, transformer oil conforming to any other BIS standard(OLD tariff)" +27101989,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :other cutting oil, hydraulic oil, industrial white oil, jute batching oil, mineral oil for cosmetic industry, transformer oil not conforming to any other BIS standard(OLD tariff)" +27101990,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Other :Other(OLD tariff)" +27102000,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Petroleum Oils:Petroleum Oils and oils obtained from bituminous minerals (other than crude) and preparations not elsewhere specified or included, containing by weight 70% or more of petroleum oils or of oils obtained from bituminous minerals, these oils being the basic constituents of the preparations, containing biodiesel, other than waste oils." +27102010,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Petroleum Oils:Automotive diesel fuel, containing biodiesel conforming to standard IS 1460" +27102020,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Petroleum Oils:Diesel fuel blend (B6 to B20) conforming to standard IS 16531" +27102090,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS :Petroleum Oils:Other" +27109100,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS ::Containing polychlorinated biphenyls (PCBs), polychlorinated terphenyls (PCTs) or polybrominated biphenyls (PBBs)(OLD tariff)" +27109900,"PETROLEUM OILS AND OILS OBTAINED FROM BITUMINOUS MINERALS, OTHER THAN CRUDE; PREPARATIONS NOT ELSEWHERE SPECIFIED OR INCLUDED, CONTAINING BY WEIGHT 70% OR MORE OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS, THESE OILS BEING THE BASIC CONSTITUENTS OF THE PREPARATIONS; WASTE OILS ::Other"";(OLD tariff)" +27110000,petroleum gases and other gaseous hydrocarbons liquified +27111100,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS ::Natural gas +27111200,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS ::Propane +27111300,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS ::Butanes +27111400,"PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS ::Ethylene, propylene, butylene and butadiene" +27111900,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS :Other:Other +27111910,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS :Other:LPG (for non-automotive purposes) conforming to standard IS 4576 +27111920,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS :Other:LPG (for automotive purposes) conforming to standard IS 14861 +27111990,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS :Other:Other +27112100,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS ::Natural gas +27112900,PETROLEUM GASES AND OTHER GASEOUS HYDROCARBONS ::Other +27120000,petroleum jelly paraffin wax microcrystalline petroleum wax slack wax ozokerite lignite wax peat wax other mineral waxes and similar products obtained by synthesis or by other processes whether or not coloured +27121000,petroleum jelly paraffin wax microcrystalline petroleum wax slack wax ozokerite lignite wax peat wax other mineral waxes and similar products obtained by synthesis or by other processes whether or not coloured >> petroleum jelly +27121010,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Petroleum jelly :Crude" +27121090,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Petroleum jelly :Other" +27122000,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Paraffin wax containing by weight less than 0.75 % of oil:Paraffin wax containing by weight less that 0.75% of oil" +27122010,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Paraffin wax containing by weight less than 0.75 % of oil:Chlorinated paraffin wax(OLD tariff)" +27122090,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Paraffin wax containing by weight less than 0.75 % of oil:Other(OLD tariff)" +27129000,petroleum jelly paraffin wax microcrystalline petroleum wax slack wax ozokerite lignite wax peat wax other mineral waxes and similar products obtained by synthesis or by other processes whether or not coloured >> other +27129010,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Other :Micro-crystalline petroleum wax" +27129020,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Other :Lignite wax" +27129030,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Other :Slack wax" +27129040,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Other :Parafinwax containing by weight 0.75% or more of oil" +27129090,"PETROLEUM JELLY, PARAFFIN WAX, MICROCRYSTALLINE PETROLEUM WAX, SLACK WAX, OZOKERITE, LIGNITE WAX, PEAT WAX, OTHER MINERAL WAXES, AND SIMILAR PRODUCTS OBTAINED BY SYNTHESIS OR BY OTHER PROCESSES, WHETHER OR NOT COLOURED:Other :Other" +27130000,petroleum coke petroleum bitumen and other residues of petroleum oils or of oils obtained from bituminous minerals petroleum coke +27131100,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS :Not calcined:Not calcined" +27131110,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS :Not calcined:Raw Petroleum coke for anode making in aluminium industry conforming to standard IS 17049" +27131190,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS :Not calcined:Other" +27131200,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS :Calcined:Calcined" +27131210,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS :Calcined:calcined petroleum code for anode making in aluminium industry conforming to standard IS 17049" +27131290,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS :Calcined:other" +27132000,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS ::Petroleum bitumen" +27139000,"PETROLEUM COKE, PETROLEUM BITUMEN AND OTHER RESIDUES OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS ::Other residues of petroleum oils or of oils obtained from bituminous minerals" +27140000,bitumen and asphalt natural bituminous or oil shale and tar sands asphaltites and asphaltic rocks +27141000,"BITUMEN AND ASPHALT, NATURAL; BITUMINOUS OR OIL SHALE AND TAR SANDS; ASPHALTITES AND ASPHALTIC ROCKS::Bituminous or oil shale and tar sands" +27149000,bitumen and asphalt natural bituminous or oil shale and tar sands asphaltites and asphaltic rocks >> other +27149010,"BITUMEN AND ASPHALT, NATURAL; BITUMINOUS OR OIL SHALE AND TAR SANDS; ASPHALTITES AND ASPHALTIC ROCKS:Other :Asphalt, natural" +27149020,"BITUMEN AND ASPHALT, NATURAL; BITUMINOUS OR OIL SHALE AND TAR SANDS; ASPHALTITES AND ASPHALTIC ROCKS:Other :Bitumen, natural(OLD tariff)" +27149030,"BITUMEN AND ASPHALT, NATURAL; BITUMINOUS OR OIL SHALE AND TAR SANDS; ASPHALTITES AND ASPHALTIC ROCKS:Other :Gilsonete" +27149090,"BITUMEN AND ASPHALT, NATURAL; BITUMINOUS OR OIL SHALE AND TAR SANDS; ASPHALTITES AND ASPHALTIC ROCKS:Other :Other" +27150000,bituminous mixtures based on natural on natural bitumen on petroleum bitumen on mineral tar or on mineral tar pitch for example bituminous mastics cut backs bituminous mixtures based on natural asphalt on natural bitumen on petroleum bitumen on mineral tar or on mineral tar pitch for example bituminous mastics cut backs +27150010,"BITUMINOUS MIXTURES BASED ON NATURAL ASPHALT, ON NATURAL BITUMEN, ON PETROLEUM BITUMEN, ON MINERAL TAR OR ON MINERAL TAR PITCH (FOR EXAMPLE, BITUMINOUS MASTICS, CUT BACKS):Bituminous mixtures based on natural asphalt, on natural bitumen, on petroleum bitumen, on mineral tar or on mineral tar pitch (for example, bituminous mastics, cut backs) :Cut backs, bituminous or asphalt" +27150090,"BITUMINOUS MIXTURES BASED ON NATURAL ASPHALT, ON NATURAL BITUMEN, ON PETROLEUM BITUMEN, ON MINERAL TAR OR ON MINERAL TAR PITCH (FOR EXAMPLE, BITUMINOUS MASTICS, CUT BACKS):Bituminous mixtures based on natural asphalt, on natural bitumen, on petroleum bitumen, on mineral tar or on mineral tar pitch (for example, bituminous mastics, cut backs) :Other" +27160000,::ELECTRICAL ENERGY +28010000,fluorine chlorine bromine and iodine +28011000,"FLUORINE, CHLORINE, BROMINE AND IODINE::Chlorine" +28012000,"FLUORINE, CHLORINE, BROMINE AND IODINE::Iodine" +28013000,fluorine chlorine bromine and iodine >> fluorine bromine +28013010,"FLUORINE, CHLORINE, BROMINE AND IODINE:Fluorine; bromine :Fluorine" +28013020,"FLUORINE, CHLORINE, BROMINE AND IODINE:Fluorine; bromine :Bromine" +28020000,sulphur sublimed or precipitated colloidal sulphur sulphur sublimed or precipitated colloidal sulphur +28020010,"SULPHUR, SUBLIMED OR PRECIPITATED; COLLOIDAL SULPHUR:Sulphur, sublimed or precipitated; colloidal sulphur :Sublimed sulphur" +28020020,"SULPHUR, SUBLIMED OR PRECIPITATED; COLLOIDAL SULPHUR:Sulphur, sublimed or precipitated; colloidal sulphur :Precipitated sulphur" +28020030,"SULPHUR, SUBLIMED OR PRECIPITATED; COLLOIDAL SULPHUR:Sulphur, sublimed or precipitated; colloidal sulphur :Colloidal sulphur" +28030000,carbon carbon blacks and other forms of carbon not elsewhere specified or included carbon carbon blacks and other forms of carbon not elsewhere specified or included +28030010,CARBON (CARBON BLACKS AND OTHER FORMS OF CARBON NOT ELSEWHERE SPECIFIED OR INCLUDED):Carbon (carbon blacks and other forms of carbon not elsewhere specified or included) :Carbon blacks +28030020,CARBON (CARBON BLACKS AND OTHER FORMS OF CARBON NOT ELSEWHERE SPECIFIED OR INCLUDED):Carbon (carbon blacks and other forms of carbon not elsewhere specified or included) :Acetylene black +28030090,CARBON (CARBON BLACKS AND OTHER FORMS OF CARBON NOT ELSEWHERE SPECIFIED OR INCLUDED):Carbon (carbon blacks and other forms of carbon not elsewhere specified or included) :Other +28040000,hydrogen rare gases and other +28041000,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Hydrogen" +28042100,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Argon" +28042900,hydrogen rare gases and other >> other +28042910,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Other :Helium" +28042990,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Other :Other" +28043000,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Nitrogen" +28044000,hydrogen rare gases and other >> oxygen +28044010,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Oxygen :Medicinal grade" +28044090,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Oxygen :Other" +28045000,hydrogen rare gases and other >> boron tellurium +28045010,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Boron; tellurium:Boron" +28045020,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Boron; tellurium:Tellurium" +28046100,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Containing by weight not less than 99.99% of silicon" +28046900,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Other" +28047000,hydrogen rare gases and other >> phosphorus +28047010,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Phosphorus :Phosphorus, black" +28047020,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Phosphorus :Phosphorus, red" +28047030,"HYDROGEN, RARE GASES AND OTHER NON-METALS:Phosphorus :Phosphorus, white or yellow" +28048000,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Arsenic" +28049000,"HYDROGEN, RARE GASES AND OTHER NON-METALS::Selenium" +28050000,alkali or metals metals scandium and yttrium whether or not +28051100,"ALKALI OR ALKALINE-EARTH METALS; RARE-EARTH METALS,SCANDIUM AND YTTRIUM, WHETHER OR NOT INTERMIXED OR INTERALLOYED; MERCURY::Sodium" +28051200,"ALKALI OR ALKALINE-EARTH METALS; RARE-EARTH METALS,SCANDIUM AND YTTRIUM, WHETHER OR NOT INTERMIXED OR INTERALLOYED; MERCURY::Calcium" +28051900,"ALKALI OR ALKALINE-EARTH METALS; RARE-EARTH METALS,SCANDIUM AND YTTRIUM, WHETHER OR NOT INTERMIXED OR INTERALLOYED; MERCURY::Other" +28053000,"ALKALI OR ALKALINE-EARTH METALS; RARE-EARTH METALS,SCANDIUM AND YTTRIUM, WHETHER OR NOT INTERMIXED OR INTERALLOYED; MERCURY::Rare-earth metals, scandium and yttrium, whether or not intermixed or interalloyed" +28054000,"ALKALI OR ALKALINE-EARTH METALS; RARE-EARTH METALS,SCANDIUM AND YTTRIUM, WHETHER OR NOT INTERMIXED OR INTERALLOYED; MERCURY::Mercury" +28060000,hydrogen chloride hydrochloric acid chlorosulphuric acid +28061000,HYDROGEN CHLORIDE (HYDROCHLORIC ACID); CHLOROSULPHURIC ACID::Hydrogen chloride (hydrochloric acid) +28062000,HYDROGEN CHLORIDE (HYDROCHLORIC ACID); CHLOROSULPHURIC ACID::Chlorosulphuric acid +28070000,sulphuric acid oleum sulphuric acid oleum +28070010,SULPHURIC ACID; OLEUM:Sulphuric acid; oleum :Sulphuric acid +28070020,SULPHURIC ACID; OLEUM:Sulphuric acid; oleum :Oleum +28080000,nitric acid sulphonitric acids nitric acid sulphonitric acids +28080010,NITRIC ACID; SULPHONITRIC ACIDS:Nitric acid; sulphonitric acids :Nitric acid +28080020,NITRIC ACID; SULPHONITRIC ACIDS:Nitric acid; sulphonitric acids :Sulphonitric acids +28090000,diphosphorus pentaoxide phosphoric acid polyphosphoric acids whether or not chemically defined +28091000,"DIPHOSPHORUS PENTAOXIDE; PHOSPHORIC ACID; POLYPHOSPHORIC ACIDS, WHETHER OR NOT CHEMICALLY DEFINED::Diphosphorus pentaoxide" +28092000,diphosphorus pentaoxide phosphoric acid polyphosphoric acids whether or not chemically defined >> phosphoric acid and polyphosphoric acids +28092010,"DIPHOSPHORUS PENTAOXIDE; PHOSPHORIC ACID; POLYPHOSPHORIC ACIDS, WHETHER OR NOT CHEMICALLY DEFINED:Phosphoric acid and polyphosphoric acids :Phosporic acid" +28092020,"DIPHOSPHORUS PENTAOXIDE; PHOSPHORIC ACID; POLYPHOSPHORIC ACIDS, WHETHER OR NOT CHEMICALLY DEFINED:Phosphoric acid and polyphosphoric acids :Polyphosphoric acids" +28100000,oxides of boron boric acids oxides of boron boric acids +28100010,OXIDES OF BORON; BORIC ACIDS::Oxides of boron +28100020,OXIDES OF BORON; BORIC ACIDS::Boric acids +28110000,other inorganic acids and other inorganic oxygen compounds of other inorganic acids +28111100,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS::Hydrogen fluoride (hydrofluoric acid) +28111200,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS::Hydrogen cyanide (hydrocyanic acid) +28111900,other inorganic acids and other inorganic oxygen compounds of other inorganic acids >> other +28111910,"OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Hydrocyanic acid (hydrogen cyanide, prussic acid)(OLD tariff)" +28111920,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Hypophosphorus acid (phosphinic acid) +28111930,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Acids of arsenic +28111940,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Sulphonic acid +28111990,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Other +28112100,other inorganic acids and other inorganic oxygen compounds of other inorganic acids >> carbon dioxide +28112110,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Carbon dioxide :Dry ice +28112190,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Carbon dioxide :Other +28112200,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS::Silicon dioxide +28112900,other inorganic acids and other inorganic oxygen compounds of other inorganic acids >> other +28112910,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Arsenic pentaoxide +28112920,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Arsenic trioxide +28112930,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Nitrous oxide +28112940,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Carbon monoxide +28112950,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Sulphur trioxide (sulphuric anhydride) +28112990,OTHER INORGANIC ACIDS AND OTHER INORGANIC OXYGEN COMPOUNDS OF NON-METALS:Other :Other +28120000,halides and halide oxides of chlorides and chloride oxides +28121010,"HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Phosgene (carbonyl chloride, carbonyl dichloride, carbon oxy-chloride, chloroformyl chloride)(OLD tariff)" +28121021,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Phosphorus trichloride(OLD tariff) +28121022,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Phosphorous pentachloride(OLD tariff) +28121030,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Phosphorus oxychloride(OLD tariff) +28121041,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Sulphur oxychloride(OLD tariff) +28121042,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Sulphur monochloride(OLD tariff) +28121043,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Sulphur dichloride(OLD tariff) +28121047,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Thionyl chloride(OLD tariff) +28121050,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Silicon tetrachloride(OLD tariff) +28121060,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Arsenous trichloride(OLD tariff) +28121090,HALIDES AND HALIDE OXIDES OF NON-METALS:Chlorides and chloride oxides :Other(OLD tariff) +28121100,HALIDES AND HALIDE OXIDES OF NON-METALS::Carbonyl dichloride (phosgene) +28121200,HALIDES AND HALIDE OXIDES OF NON-METALS::Phosphorous oxychloride +28121300,HALIDES AND HALIDE OXIDES OF NON-METALS::Phosphorous trichloride +28121400,HALIDES AND HALIDE OXIDES OF NON-METALS::Phosphorous pentachloride +28121500,HALIDES AND HALIDE OXIDES OF NON-METALS::Sulphur monochloride +28121600,HALIDES AND HALIDE OXIDES OF NON-METALS::Sulphur dichloride +28121700,HALIDES AND HALIDE OXIDES OF NON-METALS::Thionyl chloride +28121900,halides and halide oxides of chlorides and chloride oxides >> other +28121910,HALIDES AND HALIDE OXIDES OF NON-METALS::Sulphur oxychloride +28121920,HALIDES AND HALIDE OXIDES OF NON-METALS::Silicon tetrachloride +28121930,HALIDES AND HALIDE OXIDES OF NON-METALS::Arsenous trichloride +28121990,HALIDES AND HALIDE OXIDES OF NON-METALS::other +28129000,HALIDES AND HALIDE OXIDES OF NON-METALS::Other +28130000,sulphides of commercial phosphorus trisulphide +28131000,SULPHIDES OF NON-METALS; COMMERCIAL PHOSPHORUS TRISULPHIDE::Carbon disulphide +28139000,sulphides of commercial phosphorus trisulphide >> other +28139010,SULPHIDES OF NON-METALS; COMMERCIAL PHOSPHORUS TRISULPHIDE:Other :Arsenic disulphide (artificial) +28139020,SULPHIDES OF NON-METALS; COMMERCIAL PHOSPHORUS TRISULPHIDE:Other :Commercial phosphorus trisulphide +28139090,SULPHIDES OF NON-METALS; COMMERCIAL PHOSPHORUS TRISULPHIDE:Other :Other +28140000,ammonia anhydrous or in aqueous solution +28141000,"AMMONIA, ANHYDROUS OR IN AQUEOUS SOLUTION::Anhydrous ammonia" +28142000,"AMMONIA, ANHYDROUS OR IN AQUEOUS SOLUTION::Ammonia in aqueous solution" +28150000,sodium hydroxide caustic soda potassium hydroxide caustic potash peroxides of sodium or potassium sodium hydroxide caustic soda +28151100,sodium hydroxide caustic soda potassium hydroxide caustic potash peroxides of sodium or potassium sodium hydroxide caustic soda >> solid +28151110,SODIUM HYDROXIDE (CAUSTIC SODA); POTASSIUM HYDROXIDE (CAUSTIC POTASH); PEROXIDES OF SODIUM OR POTASSIUM:Solid :Flakes +28151190,SODIUM HYDROXIDE (CAUSTIC SODA); POTASSIUM HYDROXIDE (CAUSTIC POTASH); PEROXIDES OF SODIUM OR POTASSIUM:Solid :Other +28151200,SODIUM HYDROXIDE (CAUSTIC SODA); POTASSIUM HYDROXIDE (CAUSTIC POTASH); PEROXIDES OF SODIUM OR POTASSIUM::In aqueous solution (soda lye or liquid soda) +28152000,SODIUM HYDROXIDE (CAUSTIC SODA); POTASSIUM HYDROXIDE (CAUSTIC POTASH); PEROXIDES OF SODIUM OR POTASSIUM::Potassium hydroxide (caustic potash) +28153000,SODIUM HYDROXIDE (CAUSTIC SODA); POTASSIUM HYDROXIDE (CAUSTIC POTASH); PEROXIDES OF SODIUM OR POTASSIUM::Peroxides of sodium or potassium +28160000,hydroxide and peroxide of magnesium oxides hydroxides and strontium or peroxides of barium +28161000,hydroxide and peroxide of magnesium oxides hydroxides and strontium or peroxides of barium >> hydroxide and peroxide of magnesium +28161010,"HYDROXIDE AND PEROXIDE OF MAGNESIUM; OXIDES, HYDROXIDES AND PEROXIDES, OF STRONTIUM OR BARIUM:Hydroxide and peroxide of magnesium :Hydroxide of magnesium" +28161020,"HYDROXIDE AND PEROXIDE OF MAGNESIUM; OXIDES, HYDROXIDES AND PEROXIDES, OF STRONTIUM OR BARIUM:Hydroxide and peroxide of magnesium :Peroxide of magnesium" +28164000,"HYDROXIDE AND PEROXIDE OF MAGNESIUM; OXIDES, HYDROXIDES AND PEROXIDES, OF STRONTIUM OR BARIUM::Oxides, hydroxides and peroxides, of strontium or barium" +28170000,zinc oxide zinc peroxide zinc oxide zinc peroxide +28170010,ZINC OXIDE; ZINC PEROXIDE:Zinc oxide; zinc peroxide :Zinc oxide +28170020,ZINC OXIDE; ZINC PEROXIDE:Zinc oxide; zinc peroxide :Zinc peroxide +28180000,artificial corundum whether or chemically defined aluminium oxide aluminium hydroxide +28181000,"ARTIFICIAL CORUNDUM, WHETHER OR NOT CHEMICALLY DEFINED; ALUMINIUM OXIDE; ALUMINIUM HYDROXIDE::Artificial corundum, whether or not chemically defined" +28182000,artificial corundum whether or chemically defined aluminium oxide aluminium hydroxide >> aluminium oxide other than artificial corundum alumina calcined +28182010,"ARTIFICIAL CORUNDUM, WHETHER OR NOT CHEMICALLY DEFINED; ALUMINIUM OXIDE; ALUMINIUM HYDROXIDE:Aluminium oxide, other than artificial corundum :Alumina, calcined(OLD tariff)" +28182011,artificial corundum whether or chemically defined aluminium oxide aluminium hydroxide >> aluminium oxide other than artificial corundum alumina calcined >> metallurgical grade conforming to is 17441 +28182019,artificial corundum whether or chemically defined aluminium oxide aluminium hydroxide >> aluminium oxide other than artificial corundum alumina calcined >> non metallurgical grade conforming to is 17441 +28182090,"ARTIFICIAL CORUNDUM, WHETHER OR NOT CHEMICALLY DEFINED; ALUMINIUM OXIDE; ALUMINIUM HYDROXIDE:Aluminium oxide, other than artificial corundum :Other" +28183000,"ARTIFICIAL CORUNDUM, WHETHER OR NOT CHEMICALLY DEFINED; ALUMINIUM OXIDE; ALUMINIUM HYDROXIDE::Aluminium hydroxide" +28190000,chromium oxides and hydroxides +28191000,CHROMIUM OXIDES AND HYDROXIDES::Chromium trioxide +28199000,CHROMIUM OXIDES AND HYDROXIDES::Other +28200000,manganese oxides +28201000,MANGANESE OXIDES::Manganese dioxide +28209000,MANGANESE OXIDES::Other +28210000,iron oxides and hydroxides earth colours containing or more 70 by weight of combined iron evaluated as fe2o3 +28211000,iron oxides and hydroxides earth colours containing or more 70 by weight of combined iron evaluated as fe2o3 >> iron oxides and hydroxides +28211010,IRON OXIDES AND HYDROXIDES; EARTH COLOURS CONTAINING 70% OR MORE BY WEIGHT OF COMBINED IRON EVALUATED AS Fe 2 O 3:Iron oxides and hydroxides :Iron oxides +28211020,IRON OXIDES AND HYDROXIDES; EARTH COLOURS CONTAINING 70% OR MORE BY WEIGHT OF COMBINED IRON EVALUATED AS Fe 2 O 3:Iron oxides and hydroxides :Iron hydroxides +28212000,IRON OXIDES AND HYDROXIDES; EARTH COLOURS CONTAINING 70% OR MORE BY WEIGHT OF COMBINED IRON EVALUATED AS Fe 2 O 3::Earth colours +28220000,cobalt oxides and hydroxides commercial cobalt oxides cobalt oxides and hydroxides commercial cobalt oxides +28220010,COBALT OXIDES AND HYDROXIDES; COMMERCIAL COBALT OXIDES:Cobalt oxides and hydroxides; commercial cobalt oxides :Cobalt oxides +28220020,COBALT OXIDES AND HYDROXIDES; COMMERCIAL COBALT OXIDES:Cobalt oxides and hydroxides; commercial cobalt oxides :Cobalt hydroxides +28220030,COBALT OXIDES AND HYDROXIDES; COMMERCIAL COBALT OXIDES:Cobalt oxides and hydroxides; commercial cobalt oxides :Commercial cobalt oxides +28230000,titanium oxides titanium oxides +28230010,TITANIUM OXIDES:Titanium oxides :Titanium dioxide +28230090,TITANIUM OXIDES:Titanium oxides :Other +28240000,lead oxides red lead and orange lead +28241000,lead oxides red lead and orange lead >> lead monoxide litharge massicot +28241010,"LEAD OXIDES; RED LEAD AND ORANGE LEAD:Lead monoxide (litharge, massicot) :Litharge" +28241020,"LEAD OXIDES; RED LEAD AND ORANGE LEAD:Lead monoxide (litharge, massicot) :Massicot" +28249000,LEAD OXIDES; RED LEAD AND ORANGE LEAD::Other +28250000,hydrazine and hydroxylamine and their inorganic salts other inorganic bases other metal oxides hydroxides and peroxides +28251000,hydrazine and hydroxylamine and their inorganic salts other inorganic bases other metal oxides hydroxides and peroxides >> and hydrazine hydroxylamine and their inorganic salts +28251010,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Hydrazine and hydroxylamine and their inorganic salts :Hydrazine anhydrous" +28251020,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Hydrazine and hydroxylamine and their inorganic salts :Hydrazine hydrate" +28251030,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Hydrazine and hydroxylamine and their inorganic salts :Hydrazine sulphate" +28251040,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Hydrazine and hydroxylamine and their inorganic salts :Hydroxylamine sulphate" +28251090,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Hydrazine and hydroxylamine and their inorganic salts :Other" +28252000,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES::Lithium oxide and hydroxide" +28253000,hydrazine and hydroxylamine and their inorganic salts other inorganic bases other metal oxides hydroxides and peroxides >> vanadium oxides and hydroxides +28253010,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Vanadium oxides and hydroxides :Vanadium pentaoxide flakes" +28253090,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Vanadium oxides and hydroxides :Other" +28254000,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES::Nickel oxides and hydroxides" +28255000,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES::Copper oxides and hydroxides" +28256000,hydrazine and hydroxylamine and their inorganic salts other inorganic bases other metal oxides hydroxides and peroxides >> germanium oxides and zirconium dioxide +28256010,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Germanium oxides and zirconium dioxide :Germanium oxides" +28256020,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Germanium oxides and zirconium dioxide :Zirconium dioxide" +28257000,hydrazine and hydroxylamine and their inorganic salts other inorganic bases other metal oxides hydroxides and peroxides >> molybdenum oxides and hydroxides +28257010,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Molybdenum oxides and hydroxides :Molybdenum trioxide" +28257020,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Molybdenum oxides and hydroxides :Molybdic acid" +28257090,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Molybdenum oxides and hydroxides :Other" +28258000,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES::Antimony oxides" +28259000,hydrazine and hydroxylamine and their inorganic salts other inorganic bases other metal oxides hydroxides and peroxides >> other +28259010,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Other :Tin oxide" +28259020,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Other :Cadmium oxide" +28259040,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Other :Calcium hydroxide" +28259050,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Other :Ammonium hydroxide" +28259090,"HYDRAZINE AND HYDROXYLAMINE AND THEIR INORGANIC SALTS; OTHER INORGANIC BASES; OTHER METAL OXIDES, HYDROXIDES AND PEROXIDES:Other :Other" +28260000,fluorides fluorosilicates fluoroaluminates and other complex fluorine salts fluorides fluorides fluorosilicates fluoroaluminates and other complex fluorine salts fluorides +28261200,"FLUORIDES; FLUOROSILICATES, FLUOROALUMINATES AND OTHER COMPLEX FLUORINE SALTS::Of aluminium" +28261900,fluorides fluorosilicates fluoroaluminates and other complex fluorine salts fluorides fluorides fluorosilicates fluoroaluminates and other complex fluorine salts fluorides >> other other +28261910,"FLUORIDES; FLUOROSILICATES, FLUOROALUMINATES AND OTHER COMPLEX FLUORINE SALTS:Other :Magnesium fluoride" +28261990,"FLUORIDES; FLUOROSILICATES, FLUOROALUMINATES AND OTHER COMPLEX FLUORINE SALTS:Other :Other" +28263000,"FLUORIDES; FLUOROSILICATES, FLUOROALUMINATES AND OTHER COMPLEX FLUORINE SALTS::Sodium hexafluoroaluminate (synthetic cryolite)" +28269000,"FLUORIDES; FLUOROSILICATES, FLUOROALUMINATES AND OTHER COMPLEX FLUORINE SALTS::Other" +28270000,chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides +28271000,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES::Ammonium chloride" +28272000,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES::Calcium chloride" +28273100,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES::Of magnesium" +28273200,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES::Of aluminium" +28273500,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES::Of nickel" +28273900,chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides >> other other +28273920,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Other :Mercurous chloride" +28273930,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Other :Strontium chloride" +28273940,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Other :Cuprous chloride" +28273990,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Other :Other" +28274100,chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides >> of copper +28274110,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Of copper :Copper oxychloride" +28274190,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Of copper :Other" +28274900,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES::Other" +28275100,chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides >> bromides of sodium or of potassium +28275110,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Bromides of sodium or of potassium :Bromides of sodium" +28275120,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Bromides of sodium or of potassium :Bromides of potassium" +28275900,chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides >> other +28275910,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Other :Magnesium bromide" +28275990,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Other :Other" +28276000,chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides chlorides chloride oxides and chloride hydroxides bromides and bromide oxides iodides and iodide oxides >> iodides and iodide oxides +28276010,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Iodides and iodide oxides :Potassium iodide" +28276020,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Iodides and iodide oxides :Sodium iodide" +28276090,"CHLORIDES, CHLORIDE OXIDES AND CHLORIDE HYDROXIDES; BROMIDES AND BROMIDE OXIDES; IODIDES AND IODIDE OXIDES:Iodides and iodide oxides :Other" +28280000,hy p och l o ri t es c om m erc ia l c alc i um hypochlorites chlorites hypobromites +28281000,hy p och l o ri t es c om m erc ia l c alc i um hypochlorites chlorites hypobromites >> commercial calcium hypochlorite and other calcium hypochlorites +28281010,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Commercial calcium hypochlorite and other calcium hypochlorites :Commercial calcium hypochlorite (bleaching paste or powder) +28281090,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Commercial calcium hypochlorite and other calcium hypochlorites :Other +28289000,hy p och l o ri t es c om m erc ia l c alc i um hypochlorites chlorites hypobromites >> other sodium hypochlorites +28289011,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Bleaching paste or powder +28289019,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Other +28289020,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Potassium hypochlorites +28289030,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Sodium chlorite +28289040,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Aluminium chlorite +28289050,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Hypobromites +28289060,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Bleaching paste or powder of other hypochlorites +28289090,HYPOCHLORITES; COMMERCIAL CALCIUM HYPOCHLORITES; CHLORITES; HYPOBROMITES:Other :Other +28290000,chlorates and perchlorates bromates and perbromates iodates and periodates chlorates +28291100,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES::Of sodium +28291900,chlorates and perchlorates bromates and perbromates iodates and periodates chlorates >> other +28291910,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Barium chlorate +28291920,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Potassium chlorate +28291930,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Magnesium chlorate +28291990,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Other +28299000,chlorates and perchlorates bromates and perbromates iodates and periodates chlorates >> other +28299010,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Perchlorates +28299020,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Bromates and perbromates +28299030,CHLORATES AND PERCHLORATES; BROMATES AND PERBROMATES; IODATES AND PERIODATES:Other :Iodates and periodates +28300000,sulphides polysulphides whether or not chemically defined +28301000,"SULPHIDES; POLYSULPHIDES, WHETHER OR NOT CHEMICALLY DEFINED::Sodium sulphides" +28309000,sulphides polysulphides whether or not chemically defined >> other +28309010,"SULPHIDES; POLYSULPHIDES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Sulphides" +28309020,"SULPHIDES; POLYSULPHIDES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Polysulphides" +28310000,dithionites and sulphoxylates +28311000,dithionites and sulphoxylates >> of sodium +28311010,DITHIONITES AND SULPHOXYLATES:Of sodium :Sodium dithionites (sodium hydrosulphite) +28311020,DITHIONITES AND SULPHOXYLATES:Of sodium :Sodium sulphoxylates (including sodium formaldehyde sulphoxylate) +28319000,dithionites and sulphoxylates >> other +28319010,DITHIONITES AND SULPHOXYLATES:Other :Dithionites +28319020,DITHIONITES AND SULPHOXYLATES:Other :Sulphoxylates +28320000,sulphites thiosulphates +28321000,sulphites thiosulphates >> sodium sulphites +28321010,SULPHITES; THIOSULPHATES:Sodium sulphites :Sodium bisulphite +28321020,SULPHITES; THIOSULPHATES:Sodium sulphites :Sodium hydrosulphite +28321090,SULPHITES; THIOSULPHATES:Sodium sulphites :Other +28322000,sulphites thiosulphates >> other sulphites +28322010,SULPHITES; THIOSULPHATES:Other sulphites :Potassium metabisulphite +28322020,SULPHITES; THIOSULPHATES:Other sulphites :Magnesium sulphite +28322090,SULPHITES; THIOSULPHATES:Other sulphites :Other +28323000,sulphites thiosulphates >> thiosulphates +28323010,SULPHITES; THIOSULPHATES:Thiosulphates :Sodium thiosulphate (hypo) +28323020,SULPHITES; THIOSULPHATES:Thiosulphates :Magnesium thiosulphate +28323090,SULPHITES; THIOSULPHATES:Thiosulphates :Other +28330000,sulphates alums peroxosulphates persulphates sodium sulphates +28331100,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES)::Disodium sulphate +28331900,sulphates alums peroxosulphates persulphates sodium sulphates >> other +28331910,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Sodium hydrogen sulphate (acid sulphate) +28331920,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Sodium pyrosulphate +28331990,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Other +28332100,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES)::Of magnesium +28332200,sulphates alums peroxosulphates persulphates sodium sulphates >> of aluminium +28332210,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Of aluminium :Aluminium sulphate (iron free) +28332290,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Of aluminium :Other +28332400,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES)::Of nickel +28332500,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES)::Of copper +28332700,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES)::Of barium +28332900,sulphates alums peroxosulphates persulphates sodium sulphates >> other +28332910,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Ferrous sulphate +28332930,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Quinidine sulphate +28332940,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Manganese sulphate +28332950,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Strontium sulphate +28332990,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Other :Other +28333000,sulphates alums peroxosulphates persulphates sodium sulphates >> alums +28333010,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Alums :Ammonium alum +28333020,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Alums :Ferric ammonium alum +28333030,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Alums :Potash alum +28333090,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES):Alums :Other +28334000,SULPHATES; ALUMS; PEROXOSULPHATES (PERSULPHATES)::Peroxosulphates (persulphates) +28340000,nitrites nitrates +28341000,nitrites nitrates >> nitrites +28341010,NITRITES; NITRATES:Nitrites :Sodium nitrite +28341090,NITRITES; NITRATES:Nitrites :Other +28342100,NITRITES; NITRATES::Of potassium +28342900,nitrites nitrates >> other +28342910,NITRITES; NITRATES:Other :Strontium nitrate +28342920,NITRITES; NITRATES:Other :Magnesium nitrate +28342930,NITRITES; NITRATES:Other :Barium nitrate +28342990,NITRITES; NITRATES:Other :Other +28350000,phosphinates hypophosphites phosphonates phosphites and phosphates polyphosphates whether or not chemically defined +28351000,phosphinates hypophosphites phosphonates phosphites and phosphates polyphosphates whether or not chemically defined >> phosphinates hypophosphites and phosphonates phosphites +28351010,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Phosphinates (hypophosphites) and phosphonates (phosphites) :Calcium hypophosphite" +28351020,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Phosphinates (hypophosphites) and phosphonates (phosphites) :Magnesium hypophosphite" +28351090,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Phosphinates (hypophosphites) and phosphonates (phosphites) :Other" +28352200,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED::Of mono-or disodium" +28352400,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED::Of potassium" +28352500,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED::Calcium hydrogenorthophosphate (""dicalcium phosphate"")" +28352600,phosphinates hypophosphites phosphonates phosphites and phosphates polyphosphates whether or not chemically defined >> other phosphates of calcium +28352610,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other phosphates of calcium :Calcium monobasic phosphate" +28352620,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other phosphates of calcium :Calcium tribasic phosphate" +28352690,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other phosphates of calcium :Other" +28352900,phosphinates hypophosphites phosphonates phosphites and phosphates polyphosphates whether or not chemically defined >> other other +28352910,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Magnesium phosphate, monobasic" +28352920,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Magnesium phosphate, dibasic" +28352930,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Magnesium phosphate, tribasic" +28352940,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Sodium hexametaphosphate" +28352990,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Other" +28353100,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED::Sodium triphosphate (sodium tripoly-phosphate)" +28353900,"PHOSPHINATES (HYPOPHOSPHITES), PHOSPHONATES (PHOSPHITES) AND PHOSPHATES; POLYPHOSPHATES, WHETHER OR NOT CHEMICALLY DEFINED::Other" +28360000,carbonates peroxocarbonates percarbonates commercial ammonium carbonate containing ammonium carbamate carbonates peroxocarbonates percarbonates commercial ammonium carbonate containing ammonium carbamate +28362000,carbonates peroxocarbonates percarbonates commercial ammonium carbonate containing ammonium carbamate carbonates peroxocarbonates percarbonates commercial ammonium carbonate containing ammonium carbamate >> disodium carbonate disodium carbonate +28362010,"CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Disodium carbonate :Disodium carbonate, dense" +28362020,"CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Disodium carbonate :Disodium carbonate, light" +28362090,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Disodium carbonate :Other +28363000,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE::Sodium hydrogencarbonate (sodium bicarbonate); +28364000,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE::Potassium carbonates +28365000,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE::Calcium carbonate +28366000,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE::Barium carbonate +28369100,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE::Lithium carbonates +28369200,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE::Strontium carbonate +28369900,carbonates peroxocarbonates percarbonates commercial ammonium carbonate containing ammonium carbamate carbonates peroxocarbonates percarbonates commercial ammonium carbonate containing ammonium carbamate >> other other +28369910,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Other :Percarbonates +28369920,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Other :Magnesium carbonate +28369930,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Other :Aluminium bicarbonate +28369990,CARBONATES; PEROXOCARBONATES (PERCARBONATES); COMMERCIAL AMMONIUM CARBONATE CONTAINING AMMONIUM CARBAMATE:Other :Other +28370000,cyanides cyanide oxides and complex cyanides cyanides and cyanide oxides cyanides cyanide oxides and complex cyanides cyanides and cyanide oxides +28371100,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES::Of sodium" +28371900,cyanides cyanide oxides and complex cyanides cyanides and cyanide oxides cyanides cyanide oxides and complex cyanides cyanides and cyanide oxides >> other other +28371910,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Other :Potassium cyanide" +28371920,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Other :Double cyanide of potassium and sodium" +28371990,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Other :Other" +28372000,cyanides cyanide oxides and complex cyanides cyanides and cyanide oxides cyanides cyanide oxides and complex cyanides cyanides and cyanide oxides >> complex cyanides complex cyanides +28372010,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Complex cyanides :Ammonium sulphocyanide" +28372020,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Complex cyanides :Potassium ferricyanide" +28372030,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Complex cyanides :Potassium ferrocyanide" +28372040,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Complex cyanides :Sodium ferrocyanide" +28372050,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Complex cyanides :Sodium nitroprusside (sodium nitroferricyanide)" +28372090,"CYANIDES, CYANIDE OXIDES AND COMPLEX CYANIDES:Complex cyanides :Other" +28390000,silicates commercial alkali metal silicates of sodium +28391100,SILICATES; COMMERCIAL ALKALI METAL SILICATES::Sodium metasilicates +28391900,SILICATES; COMMERCIAL ALKALI METAL SILICATES::Other +28399000,silicates commercial alkali metal silicates of sodium >> other +28399010,SILICATES; COMMERCIAL ALKALI METAL SILICATES:Other :Magnesium trisilicate +28399090,SILICATES; COMMERCIAL ALKALI METAL SILICATES:Other :Other +28400000,borates peroxoborates perborates disodium tetraborate refined borax +28401100,BORATES; PEROXOBORATES (PERBORATES)::Anhydrous +28401900,BORATES; PEROXOBORATES (PERBORATES)::Other +28402000,borates peroxoborates perborates disodium tetraborate refined borax >> other borates +28402010,BORATES; PEROXOBORATES (PERBORATES):Other borates :Magnesium borate +28402090,BORATES; PEROXOBORATES (PERBORATES):Other borates :Other +28403000,BORATES; PEROXOBORATES (PERBORATES)::Peroxoborates (perborates) +28410000,salts of oxometallic or peroxometallic acids +28413000,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS::Sodium dichromate +28415000,salts of oxometallic or peroxometallic acids >> other chromates and dichromates peroxochromates +28415010,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Other chromates and dichromates; peroxochromates :Sodium chromate +28415090,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Other chromates and dichromates; peroxochromates :Other +28416100,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS::Potassium permanganate +28416900,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS::Other +28417000,salts of oxometallic or peroxometallic acids >> molybdates +28417010,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Molybdates :Aluminium molybdate +28417020,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Molybdates :Sodium molybdate +28417090,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Molybdates :Other +28418000,salts of oxometallic or peroxometallic acids >> tungstates wolframates +28418010,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Tungstates (wolframates) :Sodium tungstate +28418020,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Tungstates (wolframates) :Magnesium tungstate +28418090,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS:Tungstates (wolframates) :Other +28419000,SALTS OF OXOMETALLIC OR PEROXOMETALLIC ACIDS::Other +28420000,other salts of inorganic acids or peroxoacids including aluminosilicates whether or not chemically defined other than azides +28421000,"OTHER SALTS OF INORGANIC ACIDS OR PEROXOACIDS, (INCLUDING ALUMINOSILICATES, WHETHER OR NOT CHEMICALLY DEFINED), OTHER THAN AZIDES::Double or complex silicates, including aluminosilicates, including aluminosilicates, whether or not chemically defined" +28429000,other salts of inorganic acids or peroxoacids including aluminosilicates whether or not chemically defined other than azides >> other +28429010,"OTHER SALTS OF INORGANIC ACIDS OR PEROXOACIDS, (INCLUDING ALUMINOSILICATES, WHETHER OR NOT CHEMICALLY DEFINED), OTHER THAN AZIDES:Other :Arsenites and arsenates" +28429020,"OTHER SALTS OF INORGANIC ACIDS OR PEROXOACIDS, (INCLUDING ALUMINOSILICATES, WHETHER OR NOT CHEMICALLY DEFINED), OTHER THAN AZIDES:Other :Bichromates and dichromates" +28429090,"OTHER SALTS OF INORGANIC ACIDS OR PEROXOACIDS, (INCLUDING ALUMINOSILICATES, WHETHER OR NOT CHEMICALLY DEFINED), OTHER THAN AZIDES:Other :Other" +28430000,colloidal precious metals inorganic or organic compounds of precious metals whether or not chemically defined amalgams of precious metals +28431000,colloidal precious metals inorganic or organic compounds of precious metals whether or not chemically defined amalgams of precious metals >> colloidal precious metals +28431010,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Colloidal precious metals :Of gold" +28431020,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Colloidal precious metals :Of silver" +28431090,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Colloidal precious metals :Other" +28432100,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS::Silver nitrate" +28432900,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS::Other" +28433000,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS::Gold compounds" +28439000,colloidal precious metals inorganic or organic compounds of precious metals whether or not chemically defined amalgams of precious metals >> other compounds amalgams other compounds +28439011,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Other compounds; amalgams :Sodium aurous thiosulphate" +28439012,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Other compounds; amalgams :Noble metal solutions of platinum, rhodium and palladium" +28439019,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Other compounds; amalgams :Other" +28439020,"COLLOIDAL PRECIOUS METALS; INORGANIC OR ORGANIC COMPOUNDS OF PRECIOUS METALS, WHETHER OR NOT CHEMICALLY DEFINED; AMALGAMS OF PRECIOUS METALS:Other compounds; amalgams :Amalgams" +28440000,radioactive chemical elements and radioactive isotopes including the fissile or fertile chemical elements and isotopes and their compounds mixtures and residues containing these products +28441000,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS::Natural uranium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing natural uranium or natural uranium compounds" +28442000,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS::Uranium enriched in U235 and its compounds; plutonium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium enriched in U235, plutonium or compounds of these products" +28443000,radioactive chemical elements and radioactive isotopes including the fissile or fertile chemical elements and isotopes and their compounds mixtures and residues containing these products >> uranium depleted and in u235 its compounds thorium and its compounds alloys dispersions +28443010,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Uranium depleted in U235 and thorium and their alloys, unwrought or wrought and compounds thereof" +28443021,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Thorium oxide" +28443022,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Thorium hydroxide" +28443023,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Thorium nitrate" +28443029,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Other" +28443030,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Waste and scrap of uranium depleted in U235 or of thorium" +28443090,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:Uranium depleted in U235 and its compounds; thorium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing uranium depleted in U235, thorium or compounds of these products :Other" +28444000,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS::Radioactive elements and isotopes and compounds other than kg. 12.5% - those of sub-headings 2844 10, 2844 20 or 2844 30; alloys, dispersions (including cermets), ceramic products and mixtures containing these elements, isotopes or compounds; radioactive residues(OLD tariff)" +28444100,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:tritium:Tritium and its compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing tritium or its compounds" +28444200,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:actinium:Actinium-225, actinium-227, californium-253, curium-240, curium-241, curium-242, curium243, curium-244, einsteinium-253, einsteinium-254, gadolinium-148, polonium-208, polonium-209, polonium-210, radium-223, uranium230 or uranium-232, and their compounds; alloys, dispersions (including cermets), ceramic products and mixtures containing these elements or compounds" +28444300,"RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:other:Other radioactive elements and isotopes and compounds; other alloys, dispersions (including cermets), ceramic products and mixtures containing these elements, isotopes or compounds" +28444400,RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS:radioactive:Radioactive residues +28445000,RADIOACTIVE CHEMICAL ELEMENTS AND RADIOACTIVE ISOTOPES (INCLUDING THE FISSILE OR FERTILE CHEMICAL ELEMENTS AND ISOTOPES) AND THEIR COMPOUNDS; MIXTURES AND RESIDUES CONTAINING THESE PRODUCTS::Spent (irradiated) fuel elements (cartridges) of nuclear reactors +28450000,isotopes other than those of heading 2844 compounds inorganic or organic of such isotopes whether or not chemically defined +28451000,"ISOTOPES OTHER THAN THOSE OF HEADING 2844; COMPOUNDS, INORGANIC OR ORGANIC, OF SUCH ISOTOPES, WHETHER OR NOT CHEMICALLY DEFINED::Heavy water (deuterium oxide)" +28452000,"ISOTOPES OTHER THAN THOSE OF HEADING 2844; COMPOUNDS, INORGANIC OR ORGANIC, OF SUCH ISOTOPES, WHETHER OR NOT CHEMICALLY DEFINED:boron:Boron enriched in boron-10 and its compounds" +28453000,"ISOTOPES OTHER THAN THOSE OF HEADING 2844; COMPOUNDS, INORGANIC OR ORGANIC, OF SUCH ISOTOPES, WHETHER OR NOT CHEMICALLY DEFINED:lithium:Lithium enriched in lithium-6 and its compounds(OLD tariff)" +28454000,"ISOTOPES OTHER THAN THOSE OF HEADING 2844; COMPOUNDS, INORGANIC OR ORGANIC, OF SUCH ISOTOPES, WHETHER OR NOT CHEMICALLY DEFINED:Helium-3:Helium -3(OLD tariff)" +28459010,"ISOTOPES OTHER THAN THOSE OF HEADING 2844; COMPOUNDS, INORGANIC OR ORGANIC, OF SUCH ISOTOPES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Nuclear fuels not elsewhere included or specified(OLD tariff)" +28459090,"ISOTOPES OTHER THAN THOSE OF HEADING 2844; COMPOUNDS, INORGANIC OR ORGANIC, OF SUCH ISOTOPES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Other(OLD tariff)" +28460000,compounds inorganic or organic of metals of yttrium or of scandium or of mixtures of these metals +28461000,compounds inorganic or organic of metals of yttrium or of scandium or of mixtures of these metals >> cerium compounds +28461010,"COMPOUNDS, INORGANIC OR ORGANIC, OF RARE-EARTH METALS, OF YTTRIUM OR OF SCANDIUM OR OF MIXTURES OF THESE METALS:Cerium compounds :Cerium oxides" +28461090,"COMPOUNDS, INORGANIC OR ORGANIC, OF RARE-EARTH METALS, OF YTTRIUM OR OF SCANDIUM OR OF MIXTURES OF THESE METALS:Cerium compounds :Other" +28469000,compounds inorganic or organic of metals of yttrium or of scandium or of mixtures of these metals >> other +28469010,"COMPOUNDS, INORGANIC OR ORGANIC, OF RARE-EARTH METALS, OF YTTRIUM OR OF SCANDIUM OR OF MIXTURES OF THESE METALS:Other :Rare-earth oxides not elsewhere included or specified" +28469020,"COMPOUNDS, INORGANIC OR ORGANIC, OF RARE-EARTH METALS, OF YTTRIUM OR OF SCANDIUM OR OF MIXTURES OF THESE METALS:Other :Rare-earth fluorides not elsewhere included or specified" +28469030,"COMPOUNDS, INORGANIC OR ORGANIC, OF RARE-EARTH METALS, OF YTTRIUM OR OF SCANDIUM OR OF MIXTURES OF THESE METALS:Other :Rare-earth chlorides not elsewhere included or specified" +28469090,"COMPOUNDS, INORGANIC OR ORGANIC, OF RARE-EARTH METALS, OF YTTRIUM OR OF SCANDIUM OR OF MIXTURES OF THESE METALS:Other :Other" +28470000,"::HYDROGEN PEROXIDE, WHETHER OR NOT SOLIDIFIED WITH UREA" +28480010,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS:Phosphides, whether or not chemically defined, excluding ferrophosphorus :Of copper (phosphor copper), containing more than 15% by weight of phosphorus(OLD tariff)" +28480020,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS:Phosphides, whether or not chemically defined, excluding ferrophosphorus :Of zinc(OLD tariff)" +28480090,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS:Phosphides, whether or not chemically defined, excluding ferrophosphorus :Other(OLD tariff)" +28490000,carbides whether or not chemically defined +28491000,"CARBIDES, WHETHER OR NOT CHEMICALLY DEFINED::Of calcium" +28492000,carbides whether or not chemically defined >> of silicon +28492010,"CARBIDES, WHETHER OR NOT CHEMICALLY DEFINED:Of silicon :Carborundum" +28492090,"CARBIDES, WHETHER OR NOT CHEMICALLY DEFINED:Of silicon :Other" +28499000,carbides whether or not chemically defined >> other +28499010,"CARBIDES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Boron carbide" +28499020,"CARBIDES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Tungsten carbide" +28499090,"CARBIDES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Other" +28500000,hydrides nitrides azides silicides and borides whether or not chemically defined other than compounds which are also carbides of heading 2849 hydrides nitrides azides silicides and borides whether or not chemically defined other than compounds which are also carbides of heading 2849 +28500010,"HYDRIDES, NITRIDES, AZIDES, SILICIDES AND BORIDES, WHETHER OR NOT CHEMICALLY DEFINED, OTHER THAN COMPOUNDS WHICH ARE ALSO CARBIDES OF HEADING 2849:Hydrides, nitrides, azides, silicides and borides, whether or not chemically defined, other than compounds which are also carbides of heading 2849 :Hydrides" +28500020,"HYDRIDES, NITRIDES, AZIDES, SILICIDES AND BORIDES, WHETHER OR NOT CHEMICALLY DEFINED, OTHER THAN COMPOUNDS WHICH ARE ALSO CARBIDES OF HEADING 2849:Hydrides, nitrides, azides, silicides and borides, whether or not chemically defined, other than compounds which are also carbides of heading 2849 :Nitrides" +28500030,"HYDRIDES, NITRIDES, AZIDES, SILICIDES AND BORIDES, WHETHER OR NOT CHEMICALLY DEFINED, OTHER THAN COMPOUNDS WHICH ARE ALSO CARBIDES OF HEADING 2849:Hydrides, nitrides, azides, silicides and borides, whether or not chemically defined, other than compounds which are also carbides of heading 2849 :Azides" +28500041,"HYDRIDES, NITRIDES, AZIDES, SILICIDES AND BORIDES, WHETHER OR NOT CHEMICALLY DEFINED, OTHER THAN COMPOUNDS WHICH ARE ALSO CARBIDES OF HEADING 2849:Hydrides, nitrides, azides, silicides and borides, whether or not chemically defined, other than compounds which are also carbides of heading 2849 :Of calcium" +28500049,"HYDRIDES, NITRIDES, AZIDES, SILICIDES AND BORIDES, WHETHER OR NOT CHEMICALLY DEFINED, OTHER THAN COMPOUNDS WHICH ARE ALSO CARBIDES OF HEADING 2849:Hydrides, nitrides, azides, silicides and borides, whether or not chemically defined, other than compounds which are also carbides of heading 2849 :Other" +28500050,"HYDRIDES, NITRIDES, AZIDES, SILICIDES AND BORIDES, WHETHER OR NOT CHEMICALLY DEFINED, OTHER THAN COMPOUNDS WHICH ARE ALSO CARBIDES OF HEADING 2849:Hydrides, nitrides, azides, silicides and borides, whether or not chemically defined, other than compounds which are also carbides of heading 2849 :Borides" +28520000,inorganic or organic compounds of mercury whether or not chemically defined excluding amalgams +28521000,"Inorganic or organic compounds of mercury, whether or not chemically defiend, excluding amalgams:Chemically defined:chemically defined" +28529000,"Inorganic or organic compounds of mercury, whether or not chemically defiend, excluding amalgams:Other:Other" +28530000,phosphides whether or not chemically defined excluding ferrophosphorus other compounds including distilled or conductivity water and water of similar purity liquid air whether or not rare gases have been removed compressed air amalgams other than amalgams of precious metals +28530010,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS:Other inorganic compounds (including distilled or conductivity water and water of similar purity); liquid air (whether or not rare gases have been removed); compressed air; amalgams, other than amalgams of precious metals :Distilled or conductivity water and water of similar purity(OLD tariff)" +28530020,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS:Other inorganic compounds (including distilled or conductivity water and water of similar purity); liquid air (whether or not rare gases have been removed); compressed air; amalgams, other than amalgams of precious metals :Liquid air, whether or not any fraction of rare gases has been removed(OLD tariff)" +28530030,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS:Other inorganic compounds (including distilled or conductivity water and water of similar purity); liquid air (whether or not rare gases have been removed); compressed air; amalgams, other than amalgams of precious metals :Compressed air(OLD tariff)" +28530040,"OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS:Other inorganic compounds (including distilled or conductivity water and water of similar purity); liquid air (whether or not rare gases have been removed); compressed air; amalgams, other than amalgams of precious metals :Amalgams, other than of precious metals(OLD tariff)" +28530091,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS:Other inorganic compounds (including distilled or conductivity water and water of similar purity); liquid air (whether or not rare gases have been removed); compressed air; amalgams, other than amalgams of precious metals :Cyanogen chloride [(CN) Cl](OLD tariff)" +28530099,"OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS:Other inorganic compounds (including distilled or conductivity water and water of similar purity); liquid air (whether or not rare gases have been removed); compressed air; amalgams, other than amalgams of precious metals :Other(OLD tariff)" +28531000,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS::Cyanogen chloride (Chlorcyan)" +28539000,phosphides whether or not chemically defined excluding ferrophosphorus other compounds including distilled or conductivity water and water of similar purity liquid air whether or not rare gases have been removed compressed air amalgams other than amalgams of precious metals >> other +28539010,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS::Distilled or conductivity water and water of similar purity" +28539020,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS::Liquid air, whether or not rare gases has been removed" +28539030,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS::Compressed air" +28539040,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS::Amalgams, other than of precious metals" +28539090,"PHOSPHIDES, WHETHER OR NOT CHEMICALLY DEFINED, EXCLUDING FERROPHOSPHORUS; OTHER INORGANIC COMPOUNDS (INCLUDING DISTILLED OR CONDUCTIVITY WATER AND WATER OF SIMILAR PURITY); LIQUID AIR (WHETHER OR NOT RARE GASES HAVE BEEN REMOVED); COMPRESSED AIR; AMALGAMS, OTHER THAN AMALGAMS OF PRECIOUS METALS::Other" +29010000,acyclic hydrocarbons +29011000,ACYCLIC HYDROCARBONS::saturated +29012100,ACYCLIC HYDROCARBONS::Ethylene +29012200,ACYCLIC HYDROCARBONS::Propene (propylene) +29012300,ACYCLIC HYDROCARBONS::Butene (butylene) and isomers thereof +29012400,"ACYCLIC HYDROCARBONS::Buta-1,3-diene and isoprene" +29012900,acyclic hydrocarbons >> other +29012910,"ACYCLIC HYDROCARBONS:Other :Acetylene, whether or not in dissolved condition" +29012920,ACYCLIC HYDROCARBONS:Other :Heptene (Heptylene) +29012930,ACYCLIC HYDROCARBONS:Other :Dihydromyrcene +29012940,ACYCLIC HYDROCARBONS:Other :Tetradecene +29012990,ACYCLIC HYDROCARBONS:Other :Other +29020000,cyclic hydrocarbons cyclanes cyclenes and cycloterpenes +29021100,CYCLIC HYDROCARBONS::Cyclohexane +29021900,CYCLIC HYDROCARBONS:Other:Other +29021910,CYCLIC HYDROCARBONS:Other:Cyclopropyl acetetylene +29021990,CYCLIC HYDROCARBONS:Other:Other +29022000,CYCLIC HYDROCARBONS::Benzene +29023000,CYCLIC HYDROCARBONS::Toluene +29024100,CYCLIC HYDROCARBONS::o-Xylene +29024200,CYCLIC HYDROCARBONS::m-Xylene +29024300,CYCLIC HYDROCARBONS::p-Xylene +29024400,CYCLIC HYDROCARBONS::Mixed xylene isomers +29025000,CYCLIC HYDROCARBONS::Styrene +29026000,CYCLIC HYDROCARBONS::Ethylbenzene +29027000,CYCLIC HYDROCARBONS::Cumene +29029000,cyclic hydrocarbons cyclanes cyclenes and cycloterpenes >> other other +29029010,CYCLIC HYDROCARBONS:Other :Dipentene +29029020,CYCLIC HYDROCARBONS:Other :Diphenyl methane +29029030,CYCLIC HYDROCARBONS:Other :Dodecyclic benzenes (excluding mixed alkylarenes) +29029040,"CYCLIC HYDROCARBONS:Other :Napthalene, pure" +29029050,CYCLIC HYDROCARBONS:Other :Isobutyl benzene +29029060,CYCLIC HYDROCARBONS:Other :N-propyl benzene +29029090,CYCLIC HYDROCARBONS:Other :Other +29030000,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons +29031100,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> chloromethane methyl chloride and chloroethane ethyl chloride chloromethane methyl chloride and chloroethane ethyl chloride +29031110,HALOGENATED DERIVATIVES OF HYDROCARBONS:Chloromethane (methyl chloride) and chloroethane (ethyl chloride) :Chloromethane (methyl chloride) +29031120,HALOGENATED DERIVATIVES OF HYDROCARBONS:Chloromethane (methyl chloride) and chloroethane (ethyl chloride) :Chloroethane (ethyl chloride) +29031200,HALOGENATED DERIVATIVES OF HYDROCARBONS::Dichloromethane (methylene chloride) +29031300,HALOGENATED DERIVATIVES OF HYDROCARBONS::Chloroform (trichloro methane) +29031400,HALOGENATED DERIVATIVES OF HYDROCARBONS::Carbon tetrachloride +29031500,"HALOGENATED DERIVATIVES OF HYDROCARBONS::Ethylene dichloride (ISO) (1, 2-dichloroethane)" +29031900,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> other other +29031910,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other :Tetrachloroethane +29031920,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other :Trichloroethane +29031990,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other :Other +29032100,HALOGENATED DERIVATIVES OF HYDROCARBONS::Vinyl chloride (chloroethylene) +29032200,HALOGENATED DERIVATIVES OF HYDROCARBONS::Trichloroethylene +29032300,HALOGENATED DERIVATIVES OF HYDROCARBONS::Tetrachloroethylene (perchloroethylene) +29032900,HALOGENATED DERIVATIVES OF HYDROCARBONS::Other +29033100,"HALOGENATED DERIVATIVES OF HYDROCARBONS::Ethylene dibromide (ISO) (1, 2-dibromoethane)(OLD tariff)" +29033911,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other:1-Propene, 1, 1,3,3,3,-Pentafluoro -2-(trifluoromethyl) (PFIB)(OLD tariff)" +29033919,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other:Other(OLD tariff) +29033920,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other:Brominated derivative(OLD tariff) +29033930,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other:Iodinated derivatives(OLD tariff) +29033990,HALOGENATED DERIVATIVES OF HYDROCARBONS:Other:Other(OLD tariff) +29034100,HALOGENATED DERIVATIVES OF HYDROCARBONS:Trifluoromethane (HFC-23):Trifluoromethane (HFC-23) +29034200,HALOGENATED DERIVATIVES OF HYDROCARBONS:fluoro:Difluoromethane (HFC-32) +29034300,"HALOGENATED DERIVATIVES OF HYDROCARBONS:flurome:Fluoromethane (HFC-41), 1,2difluoroethane (HFC-152) and 1,1difluoroethane (HFC-152a)" +29034400,"HALOGENATED DERIVATIVES OF HYDROCARBONS:penta:Pentafluoroethane (HFC-125), 1,1,1trifluoroethane (HFC-143a) and 1,1,2-trifluoroethane (HFC-143)" +29034500,"HALOGENATED DERIVATIVES OF HYDROCARBONS:1,1,1:1,1,1,2-Tetrafluoroethane (HFC134a) and 1,1,2,2-tetrafluoroethane (HFC-134)" +29034600,"HALOGENATED DERIVATIVES OF HYDROCARBONS:1,1:1,1,1,2,3,3,3-Heptafluoropropane (HFC-227ea), 1,1,1,2,2,3hexafluoropropane (HFC-236cb), 1,1,1,2,3,3-hexafluoropropane (HFC236ea) and 1,1,1,3,3,3hexafluoropropane (HFC-236fa)" +29034700,"HALOGENATED DERIVATIVES OF HYDROCARBONS:1,1,1:1,1,1,3,3-Pentafluoropropane (HFC245fa) and 1,1,2,2,3pentafluoropropane (HFC-245ca)" +29034800,"HALOGENATED DERIVATIVES OF HYDROCARBONS:1.1.1.:1,1,1,3,3-Pentafluorobutane (HFC365mfc) and 1,1,1,2,2,3,4,5,5,5decafluoropentane (HFC-43-10mee)" +29034900,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Other +29035100,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> tetrafluoropropene and z hfo 1336mzz +29035900,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:other +29035910,"HALOGENATED DERIVATIVES OF HYDROCARBONS:other:1,1,3,3,3-pentafluoro-2(trifluoromethyl)prop-1-ene [Perfluoroisobutene (PFIB)]" +29035990,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:other +29036100,HALOGENATED DERIVATIVES OF HYDROCARBONS:methyl:Methyl bromide (bromomethane) +29036200,"HALOGENATED DERIVATIVES OF HYDROCARBONS:ethylene:Ethylene dibromide (ISO) (1,2dibromoethane)" +29036900,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:others +29037100,HALOGENATED DERIVATIVES OF HYDROCARBONS:Halogenated derivatives of acyclic hydrocarons containing two or more different halogens:Chlorodifluoromethane +29037200,HALOGENATED DERIVATIVES OF HYDROCARBONS:Hologenated derivatives of acyclic hydrocarbons containing two or more different hologens:Dichlorotrifluoroethanes +29037300,HALOGENATED DERIVATIVES OF HYDROCARBONS:Hologenated derivatives of acyclic hydrocarbons containing two or more different hologens:Dichlorofluoroethanes +29037400,HALOGENATED DERIVATIVES OF HYDROCARBONS:Hologenated derivatives of acyclic hydrocarbons containing two or more different hologens:Chlorodifluoroethanes +29037500,HALOGENATED DERIVATIVES OF HYDROCARBONS:Hologenated derivatives of acyclic hydrocarbons containing two or more different hologens:Dichloropentafluoropropanes +29037600,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> bromochlorodifluoromethane bromotrifluoromethane and dibromotetrafluoroethanes +29037610,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Bromochlorodifluoromethane, bromotrifluoromethane and dibromotetrafluoroethanes:Bromochlorodifluoromethane" +29037620,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Bromochlorodifluoromethane, bromotrifluoromethane and dibromotetrafluoroethanes:Bromotrifluoromethane" +29037630,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Bromochlorodifluoromethane, bromotrifluoromethane and dibromotetrafluoroethanes:Dibromotetrafluoroethanes" +29037700,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> other perhalogenated only with fluorine and chlorine chlorofluoromethanes +29037711,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Chlorotrifluoromethane" +29037712,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Dichlorodifluoromethane" +29037713,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Trichlorofluoromethane" +29037721,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Chloropentafluoroethane" +29037722,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:1,2-Dichlorotetrafluoroethane" +29037723,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Trichlorotrifluoroethane" +29037724,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Tetrachlorodifluoroethane" +29037725,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Pentachlorofluoroethane" +29037731,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Chloroheptafluoropropane" +29037732,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Dichlorohexafluoropropane" +29037733,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Trichloropentafluoropropane" +29037734,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Tetrachlorotetrafluoropropane" +29037735,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Pentachlorotrifluoropropane" +29037736,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Hexachlorodifluoropropane" +29037737,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:Heptachlorofluoropropane" +29037790,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Other, perhalogenated only with fluorine and chlorine:other derivatives, perhalogenated only wityh fluorine and chlorine" +29037800,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:other perhalogenated derivatives +29037900,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:other +29038100,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Halogenated derivatives of cyclanic, cyclenic or cycloterpenic hydrocarbons:1,2,3,4,5,6- Hexachlorocyclohexame (HCH(ISO)) including lindane (ISO,INN)" +29038200,"HALOGENATED DERIVATIVES OF HYDROCARBONS:aldrin (ISO), chlordane (ISO):Aldrin (ISO), chlordane(ISO)" +29038300,HALOGENATED DERIVATIVES OF HYDROCARBONS::Mirex (ISO) +29038900,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:other Halogenated derivatives of aromatic hydrocarbons +29039100,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> chlorobenzene and benzene chlorobenzene and benzene +29039110,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Chlorobenzene, o-dichlorobenzene and p-dichlorobenzene:Chlorobenzene (monochloro)" +29039120,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Chlorobenzene, o-dichlorobenzene and p-dichlorobenzene:o-dichlorobenzene (Orthodichlorobenzene)" +29039130,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Chlorobenzene, o-dichlorobenzene and p-dichlorobenzene:p-dichlorobenzene (Paradichlorobenzene)" +29039200,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> hexachlorobenzene iso and ddt iso clofenotane inn p chlorophenyl ethane hexachlorobenzene iso and ddt iso clofenotane inn p chlorophenyl ethane +29039210,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Hexachlorobenzene (ISO) and DDT (ISO) (chlofenotane (INN), 1,1,1-trichloro-2,2-bis (p-chlorophenyl) ethane:Hexachlorobenzene (ISO)" +29039221,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Hexachlorobenzene (ISO) and DDT (ISO) (chlofenotane (INN), 1,1,1-trichloro-2,2-bis (p-chlorophenyl) ethane:DDT Technical 75 Wdp" +29039229,"HALOGENATED DERIVATIVES OF HYDROCARBONS:Hexachlorobenzene (ISO) and DDT (ISO) (chlofenotane (INN), 1,1,1-trichloro-2,2-bis (p-chlorophenyl) ethane:other" +29039300,HALOGENATED DERIVATIVES OF HYDROCARBONS::Pentachlorobenzene (ISO) +29039400,HALOGENATED DERIVATIVES OF HYDROCARBONS::Hexabromobiphenyls +29039900,halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons halogenated derivatives of hydrocarbons saturated chlorinated derivatives of acyclic hydrocarbons >> other other +29039910,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Chlorofluorobenzene +29039920,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Benzalchloride (Benzyl dichloride) +29039930,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Benzotrichloride +29039940,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Benzylchloride +29039950,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Parachlorotoluene (4-chloromethyl benzene) +29039960,"HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Napthalene, chlorinated" +29039970,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:Chlorofluoro aniline +29039990,HALOGENATED DERIVATIVES OF HYDROCARBONS:other:other +29040000,sulphonated nitrated or nitrosated derivatives of hydrocarbons whether or not halogenated +29041000,sulphonated nitrated or nitrosated derivatives of hydrocarbons whether or not halogenated >> derivatives containing only sulpho groups their salts and ethyl esters +29041010,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only sulpho groups, their salts and ethyl esters :Benzene sulphonic acid" +29041020,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only sulpho groups, their salts and ethyl esters :1,5 Napthelene disulphonic acid (Armstrong's acid)" +29041030,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only sulpho groups, their salts and ethyl esters :Napthelene sulphonic acid" +29041040,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only sulpho groups, their salts and ethyl esters :Vinyl sulphone(OLD tariff)" +29041090,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only sulpho groups, their salts and ethyl esters :Other" +29042000,sulphonated nitrated or nitrosated derivatives of hydrocarbons whether or not halogenated >> derivatives containing only nitro or only nitroso groups +29042010,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Nitrobenzene" +29042020,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Meta dinitrobenzene" +29042030,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Meta nitrotoluene" +29042040,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Ortho nitrotoluene" +29042050,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Para nitrotoluene" +29042060,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Dinitrotoluene" +29042090,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Derivatives containing only nitro or only nitroso groups :Other" +29043100,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Perfluorooctane sulphonic acid" +29043200,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Ammonium perfluorooctane sulphonate" +29043300,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Lithium perfluorooctane sulphonate" +29043400,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Lithium perfluorooctane sulphonate" +29043500,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Other salts of perfluorooctane sulphonic acid" +29043600,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Perfluorooctane sulphonyl fluoride" +29049010,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :2,5 dichloro nitrobenzene(OLD tariff)" +29049020,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Dinitrochlorobenzene(OLD tariff)" +29049030,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Meta nitrochlorobenzene(OLD tariff)" +29049040,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Ortho nitrochlorobenzene(OLD tariff)" +29049050,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Para nitrochlorobenzene(OLD tariff)" +29049060,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :2-nitrochlorotoluene(OLD tariff)" +29049070,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Sodium meta nitrobenzene sulphonate(OLD tariff)" +29049080,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Chloropicrin (Trichloronitro-Methane)(OLD tariff)" +29049090,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED:Other :Other(OLD tariff)" +29049100,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Trichloronitromethane (chloropicrin)" +29049900,sulphonated nitrated or nitrosated derivatives of hydrocarbons whether or not halogenated >> other +29049910,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::2, 5 dichloronitrobenzene" +29049920,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Dinitrochlorebenzene" +29049930,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Meta nitrochlorobenzene" +29049940,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Ortho nitrochlorobenzene" +29049950,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Para nitrochlorobenzene" +29049960,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::2-nitrochlorotoluene" +29049970,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Sodium meta nitrochlorobenzene sulphonate" +29049990,"SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF HYDROCARBONS, WHETHER OR NOT HALOGENATED::Other" +29050000,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols +29051100,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Methanol (methyl alcohol)" +29051200,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> propyl alcohol and isopropyl alcohol +29051210,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Propan-1-o1 (propyl alcohol) and propan-2-ol (isopropyl alcohol) :Propyl alcohol" +29051220,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Propan-1-o1 (propyl alcohol) and propan-2-ol (isopropyl alcohol) :Isopropyl alcohol" +29051300,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Butan-1-ol (n-butyl alcohol)" +29051400,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> other butanols +29051410,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other butanols :Ethambutol, ethambutol Hcl" +29051420,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other butanols :Salbutamol sulphate" +29051430,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other butanols :Amino butanol" +29051490,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other butanols :Other" +29051600,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> octanol octyl alcohol and isomers thereof +29051610,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Octanol (octyl alcohol) and isomers thereof :Dimethyl octanol" +29051620,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Octanol (octyl alcohol) and isomers thereof :2-ethyl hexanol" +29051690,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Octanol (octyl alcohol) and isomers thereof :Other" +29051700,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dodecan-1-ol (lauryl alcohol),hexadecan-1-ol (cetyl alcohol) and octadecan-1-ol (stearyl alcohol)" +29051900,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> other +29051910,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:2-Butanol, 3, 3-dimethyl-" +29051990,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29052200,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> acyclic terpene alcohols +29052210,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acyclic terpene alcohols :Citranellol" +29052220,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acyclic terpene alcohols :Geraniol" +29052230,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acyclic terpene alcohols :Linalool" +29052240,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acyclic terpene alcohols :Rhodinol" +29052250,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acyclic terpene alcohols :Dihydromyrcenol" +29052290,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acyclic terpene alcohols :Other" +29052900,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other" +29053100,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Ethylene glycol (ethanediol)" +29053200,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Propylene glycol (propane-1,2-diol)" +29053900,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> other +29053910,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :1,4/1,3/2,3-butylene glycol" +29053920,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Hexylene glycol" +29053990,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29054100,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::2- Ethyl-2-(hydroxymethyl) propane-1,3-diol (trimethylolropane)" +29054200,acyclic alcohols and their halogenated sulphonated nitrated or nitrosated derivatives saturated monohydric alcohols >> pentaerythritol +29054210,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Pentaerythritol :Dipentaerythritol" +29054290,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Pentaerythritol :Other" +29054300,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Mannitol" +29054400,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::D-glucitol (Sorbitol)" +29054500,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Glycerol" +29054900,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other" +29055100,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Ethchlorvynol (INN)" +29055900,"ACYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other" +29061100,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Menthol(OLD tariff)" +29061200,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Cyclohexanol, methylcyclohexanols and dimethylcyclohexanols(OLD tariff)" +29061310,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Sterols and inositols :Cholesterol(OLD tariff)" +29061390,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Sterols and inositols :Other(OLD tariff)" +29061910,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Borneol(OLD tariff)" +29061990,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other(OLD tariff)" +29062100,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Benzyl alcohol(OLD tariff)" +29062910,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Cinnamic alcohol(OLD tariff)" +29062920,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phenylethyl alcohol(OLD tariff)" +29062990,"CYCLIC ALCOHOLS AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other(OLD tariff)" +29070000,phenols monophenols +29071100,phenols monophenols >> phenol hydroxybenzene and its salts +29071110,"PHENOLS; PHENOL-ALCOHOLS:Phenol (hydroxybenzene) and its salts :Phenol, as pure carbolic acid" +29071190,PHENOLS; PHENOL-ALCOHOLS:Phenol (hydroxybenzene) and its salts :Other +29071200,phenols monophenols >> cresols and their salts +29071210,PHENOLS; PHENOL-ALCOHOLS:Cresols and their salts :Para cresols (p-cresols) +29071220,PHENOLS; PHENOL-ALCOHOLS:Cresols and their salts :Cresylic acid +29071290,PHENOLS; PHENOL-ALCOHOLS:Cresols and their salts :Other +29071300,"PHENOLS; PHENOL-ALCOHOLS::Octylphenol, nonylphenol and their isomers; salts thereof" +29071500,phenols monophenols >> naphthols and their salts +29071510,PHENOLS; PHENOL-ALCOHOLS:Naphthols and their salts :Alpha naphthols +29071520,PHENOLS; PHENOL-ALCOHOLS:Naphthols and their salts :Beta naphthols +29071590,PHENOLS; PHENOL-ALCOHOLS:Naphthols and their salts :Other +29071900,phenols monophenols >> other +29071910,PHENOLS; PHENOL-ALCOHOLS:Other :o-Phenyl phenols +29071920,PHENOLS; PHENOL-ALCOHOLS:Other :p-Phenyl phenols +29071930,PHENOLS; PHENOL-ALCOHOLS:Other :Thymol +29071940,PHENOLS; PHENOL-ALCOHOLS:Other :Para tertiary butyl phenol +29071950,PHENOLS; PHENOL-ALCOHOLS:Other :Alkyl phenols +29071990,PHENOLS; PHENOL-ALCOHOLS:Other :Other +29072100,PHENOLS; PHENOL-ALCOHOLS::Resorcinol and its salts +29072200,PHENOLS; PHENOL-ALCOHOLS::Hydroquinone (quinol) and its salts +29072300,"PHENOLS; PHENOL-ALCOHOLS::4,4 -isopropylidenediphenol (bis-phenol A, diphenylolpropane) and its salts" +29072900,phenols monophenols >> other +29072910,"PHENOLS; PHENOL-ALCOHOLS:Other :1,5- Dihydroxy naphthalene" +29072920,PHENOLS; PHENOL-ALCOHOLS:Other :Tris (p-hydroxy phenyl) ethane +29072930,PHENOLS; PHENOL-ALCOHOLS:Other :Tertiary butyl hydroquinone +29072990,PHENOLS; PHENOL-ALCOHOLS:Other :Other +29080000,halogenated sulphonated nitrated or nitrosated derivatives of phenols or phenol alcohols derivatives containing only halogen substituents and their salts +29081100,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS::Pentachlorophenol (ISO)" +29081900,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS::Other" +29089100,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS::Dinoseb (ISO) and its salts" +29089200,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS:4,6-D initro-o-cresol:4,6-Dinitro-o-cresol [DNOC(ISO)] and it salts" +29089900,halogenated sulphonated nitrated or nitrosated derivatives of phenols or phenol alcohols derivatives containing only halogen substituents and their salts >> other +29089910,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS:Other:Para nitrophenol" +29089920,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS:Other:Musk xylol" +29089990,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PHENOLS OR PHENOL-ALCOHOLS:Other:Other" +29090000,ethers ether alcohol peroxides ether peroxides acetal and hemiacetal peroxides ketone peroxides whether or not chemically defined and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ethers and their halogenated sulphonated nitrated or nitrosated derivatives +29091100,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Diethyl ether" +29091900,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29091910,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Tertiary amyl methyl ether(OLD tariff)" +29091920,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Methyl tertiary butyl ether (MTBE)(OLD tariff)" +29091990,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other(OLD tariff)" +29092000,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Cyclanic, cyclenic or cycloterpenic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives" +29093000,ethers ether alcohol peroxides ether peroxides acetal and hemiacetal peroxides ketone peroxides whether or not chemically defined and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ethers and their halogenated sulphonated nitrated or nitrosated derivatives >> aromatic ethers and their halogenated sulphonated nitrated or nitrosated derivatives anisole and their derivatives +29093011,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :4-chloro-2-nitro anisole" +29093012,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :Ortho nitro anisole" +29093019,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :Other" +29093020,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :Diphenyl oxide" +29093030,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :Musk ambrette" +29093090,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Aromatic ethers and their halogenated, sulphonated, nitrated or nitrosated derivatives :Other" +29094100,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::2,2-Oxydiethanol (diethylene glycol, digol);" +29094300,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Monobutyl ethers of ethylene glycol or of diethylene glycol" +29094400,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other monoalkylethers of ethylene glycol or of diethylene glycol" +29094900,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29094910,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Phenoxy ethanol" +29094920,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:1-(4-phenoxyphenoxy) propan-2-ol" +29094930,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Meta phenoxy benzyl alcohol (MPBA)" +29094990,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29095000,ethers ether alcohol peroxides ether peroxides acetal and hemiacetal peroxides ketone peroxides whether or not chemically defined and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ethers and their halogenated sulphonated nitrated or nitrosated derivatives >> and their halogenated sulphonated nitrated or nitrosated derivatives +29095010,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ether-phenols, ether-alcohol-phenols and their halogenated, sulphonated, nitrated or nitrosated derivatives :Guaiacol" +29095020,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ether-phenols, ether-alcohol-phenols and their halogenated, sulphonated, nitrated or nitrosated derivatives :Isoeugenol" +29095030,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ether-phenols, ether-alcohol-phenols and their halogenated, sulphonated, nitrated or nitrosated derivatives :Potassium guaiacol sulphonate" +29095040,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ether-phenols, ether-alcohol-phenols and their halogenated, sulphonated, nitrated or nitrosated derivatives :4-methoxy phenol (mono methyl ether of hydroquinone)" +29095050,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ether-phenols, ether-alcohol-phenols and their halogenated, sulphonated, nitrated or nitrosated derivatives :Butylated hydorxyanisole (BHA)" +29095090,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ether-phenols, ether-alcohol-phenols and their halogenated, sulphonated, nitrated or nitrosated derivatives :Other" +29096000,"ETHERS, ETHER-ALCOHOLS, ETHER-PHENOLS, ETHER-ALCOHOL-PHENOLS, ALCOHOL PEROXIDES, ETHER PEROXIDES, KETONE PEROXIDES (WHETHER OR NOT CHEMICALLY DEFINED), AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Alcohol peroxides, ether peroxides, ketone peroxides and their halogenated, sulphonated, nitrated or nitrosated derivatives" +29100000,epoxides epoxyalcohols epoxyphenols and expoxyethers with a ring and their halogenated sulphonated nitrated or nitrosated derivatives +29101000,"EPOXIDES, EPOXYALCOHOLS, EPOXYPHENOLS AND EXPOXYETHERS, WITH A THREE-MEMBERED RING, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Oxirane (ethylene oxide)" +29102000,"EPOXIDES, EPOXYALCOHOLS, EPOXYPHENOLS AND EXPOXYETHERS, WITH A THREE-MEMBERED RING, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Methyloxirane (propylene oxide)" +29103000,"EPOXIDES, EPOXYALCOHOLS, EPOXYPHENOLS AND EXPOXYETHERS, WITH A THREE-MEMBERED RING, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::1-chloro-2,3-expoxypropane (epichlorohydrin)" +29104000,"EPOXIDES, EPOXYALCOHOLS, EPOXYPHENOLS AND EXPOXYETHERS, WITH A THREE-MEMBERED RING, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dieldrin (ISO, INN)" +29105000,"EPOXIDES, EPOXYALCOHOLS, EPOXYPHENOLS AND EXPOXYETHERS, WITH A THREE-MEMBERED RING, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Endrin (ISO)" +29109000,"EPOXIDES, EPOXYALCOHOLS, EPOXYPHENOLS AND EXPOXYETHERS, WITH A THREE-MEMBERED RING, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other" +29110000,acetals and hemiacetals whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acetals and hemiacetals whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives +29110010,"ACETALS AND HEMIACETALS, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acetals and hemiacetals, whether or not with other oxygen function, and their halogenated, sulphonated, nitrated or nitrosated derivatives :Acetals and hemiacetals, whether or not with other oxygen function" +29110090,"ACETALS AND HEMIACETALS, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Acetals and hemiacetals, whether or not with other oxygen function, and their halogenated, sulphonated, nitrated or nitrosated derivatives :Other(OLD tariff)" +29121100,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Methanal (formaldehyde)(OLD tariff)" +29121200,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Ethanal (acetaldehyde)(OLD tariff)" +29121910,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Crotonaldehyde(OLD tariff)" +29121920,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Heptaldehyde (heptanal)(OLD tariff)" +29121930,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Glyoxal(OLD tariff)" +29121990,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Other(OLD tariff)" +29122100,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Benzaldehyde(OLD tariff)" +29122910,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Cinnamic aldehyde(OLD tariff)" +29122920,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Phenyl acetaldehyde(OLD tariff)" +29122930,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Hexyl cinnamic aldehyde(OLD tariff)" +29122990,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Other(OLD tariff)" +29124100,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Vanillin (4-hydroxy-3-methoxybenzaldehyde)(OLD tariff)" +29124200,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Ethylvanillin (3-ethoxy-4-hydroxy-benzaldehyde)(OLD tariff)" +29124910,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Anisic aldehyde (Anisaldehyde)(OLD tariff)" +29124920,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Heliotropin (piperonyl aldehyde)(OLD tariff)" +29124930,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Thiacetazone(OLD tariff)" +29124940,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :3,4,5-trimethoxy-benzaldehyde(OLD tariff)" +29124991,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Aldehyde - alcohols(OLD tariff)" +29124999,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE:Other :Other(OLD tariff)" +29125000,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Cyclic polymers of aldehydes(OLD tariff)" +29126000,"ALDEHYDES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION; CYCLIC POLYMERS OF ALDEHYDES; PARAFORMALDEHYDE::Paraformaldehyde(OLD tariff)" +29130000,halogenated sulphonated nitrated or nitrosated derivatives of products of heading 2912 halogenated sulphonated nitrated or nitrosated derivatives of products of heading 2912 +29130010,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PRODUCTS OF HEADING 2912:Halogenated, sulphonated, nitrated or nitrosated derivatives of products of heading 2912 :Ortho-chloro-benzaldehyde" +29130090,"HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES OF PRODUCTS OF HEADING 2912:Halogenated, sulphonated, nitrated or nitrosated derivatives of products of heading 2912 :Other" +29140000,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function +29141100,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Acetone" +29141200,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Butanone (methyl ethyl ketone)" +29141300,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::4-methylpentan-2-one (methyl isobutyl ketone)" +29141900,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function >> other other +29141910,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Isophoron" +29141990,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29142200,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Cyclohexanone and methyl-cyclohexanones" +29142300,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function >> ionones and methylionones ionones and methylionones +29142310,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ionones and methylionones :Beta-ionone" +29142320,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ionones and methylionones :Pseudo ionone" +29142390,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Ionones and methylionones :Other" +29142900,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function >> other other +29142910,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :L-caravone" +29142921,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :NATURAL" +29142922,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Camphor Synthetic" +29142930,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Pentyl-2-cyclopenten - 1 -one" +29142940,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Cyclohexane dione" +29142950,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :7-acetyl, 1,2,3,4,5,6,7,8-octahydro,1,6.7 -tetra methyl Napthalene/ 1-(2,3,8,8-tetramethyl - 1,2,3,4,5,6,7,8-octahydronaphthalene-2-yl) ethanone" +29142990,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29143100,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Phenylacetone (phenylpropan-2-one)" +29143900,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function >> other +29143910,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Aceto phenone" +29143920,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Benzanthrone" +29143930,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Benzophenone" +29143940,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dibenzanthrone (violanthrone)" +29143990,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29144000,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Ketone-alcohols and ketone-aldehydes" +29145000,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Ketone-phenols and ketones with other oxygen function" +29146100,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Anthraquinone" +29146200,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Coenzyme Q10 (ubidecarenone (INN))" +29146900,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function >> other +29146910,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :1,4- dihydroxy anthraquinone (quinizarin)" +29146920,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Methyl anthraquinone" +29146990,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29147010,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Halogenated, sulphonated, nitrated or nitrosated derivatives :1-chloro anthra quinone(OLD tariff)" +29147020,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Halogenated, sulphonated, nitrated or nitrosated derivatives :Musk ketone(OLD tariff)" +29147090,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Halogenated, sulphonated, nitrated or nitrosated derivatives :Other(OLD tariff)" +29147100,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Chlordecone (ISO)" +29147900,ketones and quinones whether or not with other oxygen function and their halogenated sulphonated nitrated or nitrosated derivatives acyclic ketones without other oxygen function >> other +29147910,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:1-chloro anthraquinone" +29147920,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Musk ketone" +29147930,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Tri fluro methyl acetophenone" +29147940,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Chloro-4-(4-chloro phenoxy) acetophenone" +29147950,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Dichloroacetophenone" +29147990,"KETONES AND QUINONES, WHETHER OR NOT WITH OTHER OXYGEN FUNCTION, AND THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29150000,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters +29151100,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Formic acid" +29151200,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> salts of formic acid +29151210,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts of formic acid :Sodium formate" +29151290,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts of formic acid :Other" +29151300,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Esters of formic acid" +29152100,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Acetic acid" +29152400,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Acetic anhydride" +29152900,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> other +29152910,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Calcium acetate" +29152920,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Magnesium acetate" +29152930,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Manganese acetate" +29152990,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29153100,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Ethyl acetate" +29153200,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Vinyl acetate" +29153300,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::n-Butyl acetate" +29153600,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dinoseb (ISO) acetate" +29153900,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> other +29153910,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Benzyl acetate" +29153920,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Bornyl acetate and iso bornyl acetate" +29153930,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Linalyl acetate" +29153940,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Methyl acetate" +29153950,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phenyl propyl acetate" +29153960,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Terpinyl acetate" +29153970,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Ortho tertiary butyl cyclohexyl acetate" +29153980,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Para tertiary butyl cyclohexyl acetate" +29153990,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other(OLD tariff)" +29153991,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Methyl cyclohexyl acetate" +29153992,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Ethelene glycol mono ethyl ether acetate" +29153999,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29154000,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> mono di or trichloroacetic acids their salts and esters +29154010,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Mono-, di- or trichloroacetic acids, their salts and esters :Monochloroacetic acid, their salts and esters" +29154020,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Mono-, di- or trichloroacetic acids, their salts and esters :Dichloroacetic acid, their salts and esters" +29154030,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Mono-, di- or trichloroacetic acids, their salts and esters :Trichloroacetic acid, their salts and esters" +29155000,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Propionic acid, its salts and esters" +29156000,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> butanoic acids pentanoic acids their salts and esters +29156010,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Butanoic acids, pentanoic acids, their salts and esters :Butanoic acids, their salts and esters" +29156020,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Butanoic acids, pentanoic acids, their salts and esters :Pentanoic acids, their salts and esters" +29157000,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> palmitic acid stearic acid their salts and esters +29157010,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Palmitic acid, stearic acid, their salts and esters :Palmitic acid" +29157020,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Palmitic acid, stearic acid, their salts and esters :Stearic acid" +29157030,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Palmitic acid, stearic acid, their salts and esters :Glycerol monostearate" +29157040,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Palmitic acid, stearic acid, their salts and esters :H.C.O.Fatty acid (including 12-hydroxy stearic acid)" +29157050,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Palmitic acid, stearic acid, their salts and esters :D.C.O. Fatty acid" +29157090,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Palmitic acid, stearic acid, their salts and esters :Other" +29159000,saturated acyclic monocarboxylic acids and their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives formic acid its salts and esters >> other +29159010,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Acetyl chloride" +29159020,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Octoic acid (caprylic acid)(OLD tariff)" +29159030,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Hexoic acid (caproic acid)(OLD tariff)" +29159040,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Pivoloyl chloride" +29159050,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :N-valeryl chloride" +29159060,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :N-Octanoyl chloride" +29159070,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Neodecanoyl chloride" +29159090,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other(OLD tariff)" +29159091,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Hexoic acid (caproic acid)" +29159092,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Octoic acid (caprylic acid)" +29159093,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Trifluoro acetic acid" +29159094,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Ethyl difluoro acetate" +29159095,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Ethyl trifluoro acetate" +29159099,"SATURATED ACYCLIC MONOCARBOXYLIC ACIDS AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29160000,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives +29161100,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Acrylic acid and its salts" +29161200,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> esters of acrylic acid +29161210,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Esters of acrylic acid :Butyl acrylate" +29161290,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Esters of acrylic acid :Other" +29161300,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> methacrylic acid and its salts +29161310,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Methacrylic acid and its salts :Methacrylic acid" +29161320,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Methacrylic acid and its salts :Salts of methacrylic acid" +29161400,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Esters of methacrylic acid" +29161500,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> oleic linoleic or linolenic acids their salts and esters +29161510,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oleic, linoleic or linolenic acids, their salts and esters :Oleic acid" +29161590,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oleic, linoleic or linolenic acids, their salts and esters :Other" +29161600,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Binapacryl (ISO):Binapacryl (ISO)" +29161900,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> other +29161910,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Undecylenic acid" +29161920,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Bismuth compounds of unsaturated acyclic monoacids" +29161930,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Potassium compounds of unsaturated acyclic monoacids" +29161940,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Sodium compounds of unsaturated acyclic monoacids" +29161950,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Esters of unsaturated acyclic monoacids not elsewhere specified" +29161960,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Sorbic acid" +29161970,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Erucic acid" +29161990,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29162000,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Cyclanic, cyclenic or cycloterpenic, mono carboxylic acids, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Cyclanic, cyclenic or cycloterpenic monocarboxylic acids, their anhydrides, halides, peroxides, peroxyacids and their derivatives" +29162010,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Cyclanic, cyclenic or cycloterpenic, mono carboxylic acids, their anhydrides, halides, peroxides, peroxyacids and their derivatives:D.V acid chloride/cypermethric acid chloride" +29162020,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> cyclanic cyclenic or cycloterpenic monocarboxylic acids their anhydrides halides peroxides peroxyacids and their derivatives >> bifenthrin iso +29162090,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Cyclanic, cyclenic or cycloterpenic, mono carboxylic acids, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Other" +29163100,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> benzoic acid its salts and esters +29163110,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Benzoic acid" +29163120,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Benzyl benzoate" +29163130,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Methyl benzoate" +29163140,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Sodium benzoate" +29163150,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Benzocaine (ethylpara-amino benzoate)" +29163160,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Orthochloro benzoic acid" +29163190,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Benzoic acid, its salts and esters :Other" +29163200,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Benzoyl peroxide and benzoyl chloride" +29163400,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Phenylacetic acid and its salts" +29163900,unsaturated acyclic monocarboxylic acids cyclic monocarboxylic acids their anhydrides halides peroxides and peroxyacids their nitrated halogenated sulphonated or nitrosated derivatives unsaturated acyclic monocarboxylic acids their anhydrides halides peroxides peroxyacids their derivatives >> other +29163910,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Cinnamic acid" +29163920,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Bismuth compounds of aromatic monoacids" +29163930,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Potassium compounds of aromatic monoacids" +29163940,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Sodium compounds of aromatic monoacids" +29163950,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Esters of aromatic monoacids not elsewhere specified" +29163960,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dichlorophenyl acetyl chloride" +29163990,"UNSATURATED ACYCLIC MONOCARBOXYLIC ACIDS, CYCLIC MONOCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29170000,polycarboxylic acids their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives acyclic polycarboxylic acids their anhydrides halides peroxides peroxyacids and their derivatives +29171100,polycarboxylic acids their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives acyclic polycarboxylic acids their anhydrides halides peroxides peroxyacids and their derivatives >> oxalic acid its salts and esters +29171110,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oxalic acid, its salts and esters :Oxalic acid" +29171120,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oxalic acid, its salts and esters :Calcium oxalate" +29171130,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oxalic acid, its salts and esters :Strontium oxalate" +29171140,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oxalic acid, its salts and esters :Diethyl oxalate" +29171190,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Oxalic acid, its salts and esters :Other" +29171200,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Adipic acid, its salts and esters" +29171300,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Azelaic acid, sebacicacid their salts and esters:Azelaic acid, sebacic acid, their salts and esters" +29171310,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Azelaic acid, sebacicacid their salts and esters:Sebacic acid" +29171390,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Azelaic acid, sebacicacid their salts and esters:Other" +29171400,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Maleic anhydride" +29171900,polycarboxylic acids their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives acyclic polycarboxylic acids their anhydrides halides peroxides peroxyacids and their derivatives >> other +29171910,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Maleic acid" +29171920,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Malonic acid" +29171930,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Succinic acid" +29171940,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Ferrous fumarate" +29171950,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Fumaric acid" +29171960,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Itaconic acid" +29171970,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Ethoxy methylene malonate, diethyl malonate" +29171990,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29172000,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Cyclanic, cyclenic or cycloterpenic, polycarboxylic acids, their anhydrides, halides, peroxides, peroxyacids and their derivatives" +29173200,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dioctyl orthophthalates" +29173300,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dinonyl or didecyl orthophthalates" +29173400,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other esters of orthophthalic acid" +29173500,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Phthalic anhydride" +29173600,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Terephthalic acid and its salts" +29173700,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dimethyl terephthalate" +29173900,polycarboxylic acids their anhydrides halides peroxides and peroxyacids their halogenated sulphonated nitrated or nitrosated derivatives acyclic polycarboxylic acids their anhydrides halides peroxides peroxyacids and their derivatives >> other +29173910,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dibutyl phthalate" +29173920,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dioctyl phthalate" +29173930,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phthalic acid" +29173940,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dimethyl phthalate" +29173950,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Trimellitic anhydride" +29173960,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Isophthalic acid" +29173990,"POLYCARBOXYLIC ACIDS, THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29180000,carboxylic acids with additional oxygen function and their anhydrides halides +29181110,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Lactic acid, its salts and esters :Lactic acid(OLD tariff)" +29181120,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Lactic acid, its salts and esters :Calcium lactate(OLD tariff)" +29181190,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Lactic acid, its salts and esters :Other(OLD tariff)" +29181200,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Tartaric acid" +29181300,carboxylic acids with additional oxygen function and their anhydrides halides >> salts and esters of tartaric acid +29181310,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of tartaric acid :Potassium bitartrate" +29181320,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of tartaric acid :Metroprolol tartrate" +29181390,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of tartaric acid :Other" +29181400,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Citric acid" +29181500,carboxylic acids with additional oxygen function and their anhydrides halides >> salts and esters of citric acid +29181510,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of citric acid :Potassium citrate" +29181520,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of citric acid :Sodium citrate" +29181530,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of citric acid :Bismuth citrate" +29181540,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of citric acid :Disodium hydrogen citrate" +29181550,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of citric acid :Ferric ammonium citrate" +29181590,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salts and esters of citric acid :Other" +29181600,carboxylic acids with additional oxygen function and their anhydrides halides >> gluconic acid its salts and esters +29181610,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Gluconic acid, its salts and esters :Calcium gluconate" +29181620,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Gluconic acid, its salts and esters :Ferrous gluconate" +29181690,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Gluconic acid, its salts and esters :Other" +29181700,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::2, 2-Diphenyl-2-hydroxyacetic acid (benzilic acid)" +29181800,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Chlorobenzilate (ISO)" +29181900,carboxylic acids with additional oxygen function and their anhydrides halides >> other +29181910,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other;:Benzeneacetic acid, alpha-hydroxy-alpha-phenyl-" +29181920,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other;:Cholic acid" +29181930,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other;:Ricinoleic acid" +29181990,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other;:Other" +29182100,carboxylic acids with additional oxygen function and their anhydrides halides >> salicylic acid and its salts +29182110,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salicylic acid and its salts :Salicylic acid" +29182120,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salicylic acid and its salts :Sodium salicylate" +29182190,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Salicylic acid and its salts :Other" +29182200,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::O-Acetylsalicylic acid, its salts and esters" +29182300,carboxylic acids with additional oxygen function and their anhydrides halides >> other esters of salicylic acid and their salts +29182310,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other esters of salicylic acid and their salts :Methyl salicylate" +29182320,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other esters of salicylic acid and their salts :Amino salicylate" +29182330,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other esters of salicylic acid and their salts :Salicylamide" +29182340,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other esters of salicylic acid and their salts :Benzyl salicylate" +29182390,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other esters of salicylic acid and their salts :Other" +29182900,carboxylic acids with additional oxygen function and their anhydrides halides >> other +29182910,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Gallic acid" +29182920,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Beta hydroxy napthoic acid" +29182930,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Propyl gallate" +29182990,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29183000,carboxylic acids with additional oxygen function and their anhydrides halides >> carboxylic acids with aldehyde or ketone function but without other oxygen function their anhydrides halides peroxides peroxyacids and their derivatives +29183010,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Carboxylic acids with aldehyde or ketone function but without other oxygen function, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Levulinic acid" +29183020,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Carboxylic acids with aldehyde or ketone function but without other oxygen function, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Ethyl aceto acetate (acetoacetic ester)" +29183030,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Carboxylic acids with aldehyde or ketone function but without other oxygen function, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Nalidixic acid" +29183040,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Carboxylic acids with aldehyde or ketone function but without other oxygen function, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Methyl aceto acetate" +29183050,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Carboxylic acids with aldehyde or ketone function but without other oxygen function, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Fluoro benzoyl butyric acid" +29183090,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Carboxylic acids with aldehyde or ketone function but without other oxygen function, their anhydrides, halides, peroxides, peroxyacids and their derivatives:Other" +29189100,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::2, 4, 5-T(ISO) (2, 4, 5-trichlorophenoxyacetic acid), its salts and esters" +29189900,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29189910,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Sodium phenoxy acetate" +29189920,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Methyl (E)-2-(2-chloro methyl) phenyl)-3-methoxyacrylate" +29189990,"CARBOXYLIC ACIDS WITH ADDITIONAL OXYGEN FUNCTION AND THEIR ANHYDRIDES, HALIDES, PEROXIDES AND PEROXYACIDS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29190000,lactophosphates their halogenated sulphonated nitrated or nitrosated derivatives +29191000,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Tris(2,3-dibromopropyl) phosphate" +29199000,lactophosphates their halogenated sulphonated nitrated or nitrosated derivatives >> other +29199010,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Glycerophosphoric acid" +29199020,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Calcium glycerophosphate" +29199030,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Iron glycerophosphate" +29199040,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Sodium glycerophosphate" +29199050,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Tricresyl phosphate" +29199090,"PHOSPHORIC ESTERS AND THEIR SALTS, INCLUDING LACTOPHOSPHATES; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29200000,metals excluding esters of hydrogen halides and their salts their halogenated sulphonated nitrated or nitrosated derivatives thiophosphoric esters phosphorothioates and their salts their halogenated sulphonated nitrated or nitrosated derivatives +29201100,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Parathion (ISO) and parathion-methyl (ISO)" +29201900,metals excluding esters of hydrogen halides and their salts their halogenated sulphonated nitrated or nitrosated derivatives thiophosphoric esters phosphorothioates and their salts their halogenated sulphonated nitrated or nitrosated derivatives >> other +29201910,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Phosphorothioic acid, S[2 -(diethyl amino) ethyl] O, O-Diethyl ester(OLD tariff)" +29201920,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Thiophosphoric ester (phosphorothioates) and their salts, their halogenated, sulphonated, nitrated or nitrosated derivatives" +29201990,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other:Other" +29202100,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dimethyl phosphite" +29202200,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Diethyl phosphite" +29202300,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Trimethyl phosphite" +29202400,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Triethyl phosphite" +29202900,metals excluding esters of hydrogen halides and their salts their halogenated sulphonated nitrated or nitrosated derivatives thiophosphoric esters phosphorothioates and their salts their halogenated sulphonated nitrated or nitrosated derivatives >> other +29202910,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Dimethyl sulphate" +29202920,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Diethyl sulphate" +29202930,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Tris (2, 3 Dibromopropyl) phosphate" +29202990,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Other" +29203000,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES::Endosulfan (ISO)" +29209000,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other" +29209010,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Diethyl sulphate(OLD tariff)" +29209020,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dimethyl sulphate(OLD tariff)" +29209030,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Tris (2,3 Di-bromopropyl) phosphate(OLD tariff)" +29209041,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Trimethyl Phosphite(OLD tariff)" +29209042,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Triethyl Phosphite(OLD tariff)" +29209043,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Dimethyl Phosphite(OLD tariff)" +29209044,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Diethyl Phospnite(OLD tariff)" +29209045,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :O, O, Dimethyl Methyl Phosphonate(OLD tariff)" +29209047,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic Acid, Methyl-compound with (aminoimino methyl) urea (11)(OLD tariff)" +29209048,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :1-Propanaminium N, N, N-trimethyl 3-[1-oxo-9-octadecenyl) amino]-,(Z)- methyl methylphosphonate(OLD tariff)" +29209051,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, [methyl-bis (5-ethyl-2-methyl-2- oxido-1, 3,2-dioxaphosphorinan-5-yl) methyl] ester(OLD tariff)" +29209052,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, [methyl- (5-ethyl-2-methyl 2-oxido- 1,3,2 dioxaphosphorinan-5-yl) methyl] ester(OLD tariff)" +29209053,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, propyl-dimethyl ester(OLD tariff)" +29209054,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonous acid, methyl-diethyl ester(OLD tariff)" +29209055,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, ethyl-(OLD tariff)" +29209056,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, propyl-(OLD tariff)" +29209057,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphinic acid, methyl-(OLD tariff)" +29209058,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonochloridic acid, methyl-, methyl ester(OLD tariff)" +29209061,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonothioic dichloride, ethyl-(OLD tariff)" +29209062,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, methyl-(OLD tariff)" +29209063,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, methyl-, dimethyl ester(OLD tariff)" +29209064,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic dichloride, methyl-(OLD tariff)" +29209065,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonous dichloride, methyl-(OLD tariff)" +29209066,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Phosphonic acid, ethyl-, diethyl ester(OLD tariff)" +29209099,"ESTERS OF OTHER INORGANICACIDS OF NON-METALS(EXCLUDING ESTERS OF HYDROGEN HALIDES) AND THEIR SALTS; THEIR HALOGENATED, SULPHONATED, NITRATED OR NITROSATED DERIVATIVES:Other :Other(OLD tariff)" +29210000,amine function compounds acyclic monoamines and their derivatives salts thereof +29211100,amine function compounds acyclic monoamines and their derivatives salts thereof >> methylamine di or trimethylamine and their salts +29211110,"AMINE- FUNCTION COMPOUNDS:Methylamine, di- or trimethylamine and their salts :Dimethyl formide" +29211190,"AMINE- FUNCTION COMPOUNDS:Methylamine, di- or trimethylamine and their salts :Other" +29211200,"AMINE- FUNCTION COMPOUNDS::2-(N, N-Dimethylamino)ethylchloride hydrochloride" +29211300,"AMINE- FUNCTION COMPOUNDS::2-(N, N-Diethylamino)ethylchloride hydrochloride" +29211400,"AMINE- FUNCTION COMPOUNDS::2-(N, N-Diisopropylamino)ethylchloride hydrochloride" +29211900,amine function compounds acyclic monoamines and their derivatives salts thereof >> other +29211910,"AMINE- FUNCTION COMPOUNDS:Other:2-Chloro N, N-Diisopropyl ethylamine" +29211911,"AMINE- FUNCTION COMPOUNDS:Other:2-Chloro N,N-Di-isopropyl ethylamine(OLD tariff)" +29211914,"AMINE- FUNCTION COMPOUNDS:Other:Ethanamine, 2-Chloro-N, N-dimethyl(OLD tariff)" +29211920,"AMINE- FUNCTION COMPOUNDS:Other:2-Chloro N, N-Dimethyl ethanamine" +29211990,AMINE- FUNCTION COMPOUNDS:Other:Other +29212100,AMINE- FUNCTION COMPOUNDS::Ethylenediamine and its salts +29212200,AMINE- FUNCTION COMPOUNDS::Hexamethylenediamine and its salts +29212900,amine function compounds acyclic monoamines and their derivatives salts thereof >> other +29212910,AMINE- FUNCTION COMPOUNDS:Other :Hexamethylene tetramine (hexamine) not put +29212920,AMINE- FUNCTION COMPOUNDS:Other :Trimethylene triniframine +29212940,AMINE- FUNCTION COMPOUNDS:Other :Tetradecene(OLD tariff) +29212990,AMINE- FUNCTION COMPOUNDS:Other :Other +29213000,amine function compounds acyclic monoamines and their derivatives salts thereof >> cyclanic cyclenic or cycloterpenic mono or polyamines and their derivatives salts thereof +29213010,"AMINE- FUNCTION COMPOUNDS:Cyclanic, cyclenic or cycloterpenic mono or polyamines, and their derivatives; salts thereof :Cyclohexylamine" +29213090,"AMINE- FUNCTION COMPOUNDS:Cyclanic, cyclenic or cycloterpenic mono or polyamines, and their derivatives; salts thereof :Other" +29214100,amine function compounds acyclic monoamines and their derivatives salts thereof >> aniline and its salts +29214110,AMINE- FUNCTION COMPOUNDS:Aniline and its salts :Aniline +29214120,AMINE- FUNCTION COMPOUNDS:Aniline and its salts :Aniline hydrochloride +29214190,AMINE- FUNCTION COMPOUNDS:Aniline and its salts :Other +29214200,amine function compounds acyclic monoamines and their derivatives salts thereof >> aniline derivatives and their salts para chloroaniline ortho chloro paranitroaniline dichloroaniline 2 paranitroaniline +29214211,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Para chloroaniline +29214212,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Ortho chloro paranitroaniline +29214213,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Dichloroaniline +29214214,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:2, 6-dichloro paranitroaniline" +29214215,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:2-4-5-trichloroaniline +29214221,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:N-Benzyl-N-ethylaniline +29214222,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:N,N-Diethylaniline" +29214223,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:N,N-Dimethylaniline" +29214224,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:N-Ethyl aniline +29214225,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Meta nitroaniline +29214226,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Para nitroaniline +29214231,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:2-amino 3, 5 xylne sulphonic acid(OLD tariff)" +29214232,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Benzyl ethyl aniline sulphonic acid(OLD tariff) +29214233,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Metanillic acid (meta amino benzene sulphonic acid)(OLD tariff) +29214234,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Sulphanillic acid (para aminobenzene sulphonic acid para aniline sulphonic acid)(OLD tariff) +29214235,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Ethyl hydroxy ethylaniline(OLD tariff) +29214236,"AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Methyl dopa (1-alpha methyl-3, 4-dihydroxyphenylaniline)(OLD tariff)" +29214290,AMINE- FUNCTION COMPOUNDS:Aniline derivatives and their salts:Other (OLD tariff) +29214310,"AMINE- FUNCTION COMPOUNDS::N,N-Diethyl toluidine(OLD tariff)" +29214320,"AMINE- FUNCTION COMPOUNDS::N,N-Dimethyl toluidine(OLD tariff)" +29214330,AMINE- FUNCTION COMPOUNDS::Ortho toluidine(OLD tariff) +29214340,AMINE- FUNCTION COMPOUNDS::Meta toluidine(OLD tariff) +29214350,AMINE- FUNCTION COMPOUNDS::Para toluidine(OLD tariff) +29214360,AMINE- FUNCTION COMPOUNDS::2-Chloro-5-toluidine-4-sulphonic acid(OLD tariff) +29214370,AMINE- FUNCTION COMPOUNDS::2-Chloro-4-toluidine-5-sulphonic acid (sodium salt)(OLD tariff) +29214380,AMINE- FUNCTION COMPOUNDS::4-Toluidine-3-sulphonic acid(OLD tariff) +29214390,AMINE- FUNCTION COMPOUNDS::Other(OLD tariff) +29214410,AMINE- FUNCTION COMPOUNDS:Diphenylamine and its derivatives; salts thereof :Diphenylamine(OLD tariff) +29214490,AMINE- FUNCTION COMPOUNDS:Diphenylamine and its derivatives; salts thereof :Other(OLD tariff) +29214500,amine function compounds acyclic monoamines and their derivatives salts thereof >> 2 naphthylamine betanaph thylamine and their derivatives salts thereof alpha naphthylamine phenyl naphthylamine phenyl beta naphthylamine amino sodium naphthionate +29214511,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Alpha naphthylamine" +29214512,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Phenyl alpha naphthylamine" +29214513,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Phenyl beta naphthylamine" +29214514,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Amino F-acid" +29214515,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Aminolineli-R-acid" +29214516,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Sodium naphthionate" +29214521,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Bronner's acid (2-naphthylamine-6-sulphonic acid)" +29214522,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Cleve's acid (1-naphthylamine-6-sulphonic acid)" +29214523,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Epsilon acid (1-naphthylamine-3,8-disulphonic acid)" +29214524,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Koch's acid (1-naphthylamine-3,6,8-trisulphonic acid)" +29214525,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Laurent's acid (1-naphthylamine-5-sulphonic acid)" +29214526,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Tobias acid (2-naphthylamine-1-sulphonic acid) " +29214531,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Naphthionic acid (1-naphthylamine-4-sulphonic acid)" +29214532,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Para tolyl peri acid (para tolyl-1-naphthylamine- 8-sulphonic acid)" +29214533,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Phenyl peri acid (phenyl-1-naphthylamine- 8-sulphonic acid)" +29214590,"AMINE- FUNCTION COMPOUNDS:1-Naphthylamine (alpha-naphthylamine), 2-Naphthylamine (betanaph thylamine) and their derivatives; salts thereof :Other" +29214600,"AMINE- FUNCTION COMPOUNDS::Amfetamine (INN), benzfetamine (INN) dexamfetamine (INN), etilamfetamine (INN) fencamfamin(INN), lefetamine (INN), levamfetamine (INN), mefenorex (INN) and phentermine (INN); salts thereof" +29214900,amine function compounds acyclic monoamines and their derivatives salts thereof >> other +29214910,AMINE- FUNCTION COMPOUNDS:Other :Xylidines +29214920,AMINE- FUNCTION COMPOUNDS:Other :Para cumidine +29214990,AMINE- FUNCTION COMPOUNDS:Other :Other +29215100,amine function compounds acyclic monoamines and their derivatives salts thereof >> o m diaminotoluenes and their derivatives salts thereof +29215110,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :O-phenylenediamine" +29215120,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :M-phenylenediamine (m-di aminobenzene)" +29215130,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :P-phenylenediamine" +29215140,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :O-diaminotoluene" +29215150,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :M-diaminotoluene" +29215160,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :P-diaminotoluene" +29215170,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :Para-amino acetanilide" +29215180,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :Meta toluylene diamine" +29215190,"AMINE- FUNCTION COMPOUNDS:o-, m-, p-Phenylenediamine, diaminotoluenes, and their derivatives; salts thereof :Other" +29215900,amine function compounds acyclic monoamines and their derivatives salts thereof >> other +29215910,AMINE- FUNCTION COMPOUNDS:Other :Benzidine +29215920,AMINE- FUNCTION COMPOUNDS:Other :Benzidine dihydrochloride +29215930,"AMINE- FUNCTION COMPOUNDS:Other :3, 3 dichlorobenzidine dihydrochloride sulphate" +29215940,"AMINE- FUNCTION COMPOUNDS:Other :Diaminostibene 2,2-disulphonic acid (Dasda)" +29215990,AMINE- FUNCTION COMPOUNDS:Other :Other +29220000,other than those containing more than one kind of oxygen function their ethers and esters salts thereof +29221100,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> monoethanolamine and its salts +29221110,OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:Monoethanolamine +29221111,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:2-Hydroxy N,N-Diisopropyl Ethylamine(OLD tariff)" +29221112,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:N,N-Diethyl Amino ethyl Chloride Hydrochloride(OLD tariff)" +29221113,OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:Di-ethyl Amino ethanethiol Hydrochloride(OLD tariff) +29221114,OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:Di-Methyl Amino ethyl chloride Hydrochloride(OLD tariff) +29221115,OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:Di-Methyl Amino ethanethiol(OLD tariff) +29221116,OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:Di-Methyl Amino ethanethiol Hydrochloride(OLD tariff) +29221190,OXYGEN-FUNCTION AMINO-COMPOUNDS:Monoethanolamine and its salts;:Other +29221200,OXYGEN-FUNCTION AMINO-COMPOUNDS:Diethanolamine and its salts:Diethanolamine and its salts +29221211,OXYGEN-FUNCTION AMINO-COMPOUNDS:Diethanolamine and its salts:Ethyldiethanolamine(OLD tariff) +29221212,OXYGEN-FUNCTION AMINO-COMPOUNDS:Diethanolamine and its salts:Methyldiethanolamine(OLD tariff) +29221290,OXYGEN-FUNCTION AMINO-COMPOUNDS:Diethanolamine and its salts:Other(OLD tariff) +29221300,OXYGEN-FUNCTION AMINO-COMPOUNDS::Triethanolamine and its salts(OLD tariff) +29221400,OXYGEN-FUNCTION AMINO-COMPOUNDS::Dextropropoxyphene (INN) and its salts +29221500,OXYGEN-FUNCTION AMINO-COMPOUNDS::Triethanolamine +29221600,OXYGEN-FUNCTION AMINO-COMPOUNDS::Diethanolammonium perfluorooctane sulphonate +29221700,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> methyldiethanolamine and ethyldiethanolamine +29221710,OXYGEN-FUNCTION AMINO-COMPOUNDS::Methyldiethanolamine +29221720,OXYGEN-FUNCTION AMINO-COMPOUNDS::Ethyldiethanolamine +29221800,"OXYGEN-FUNCTION AMINO-COMPOUNDS::2-(N, N-Diisopropylamino) ethanol" +29221900,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> other +29221910,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:3 Hydroxy N, N-Dulsopopylethylamine" +29221920,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Ethanol, 2- [bis(1-methylethyl) amino]-(OLD tariff)" +29221930,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Ethanethiol, 2-(diethylamino)-(OLD tariff)" +29221990,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:other +29222100,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> aminohydroxynaphthalene sulphonic acids and their salts +29222110,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :Amino-g-acid +29222120,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :Amino-j-acid +29222130,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :1-amino-2-naphthol-4-sulphonic acid +29222140,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :Gamma acid +29222150,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :J acid (2-amino-5-naphthol-7-sulphonic acid) +29222160,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :H acid +29222170,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :Ortho phenyl sulphonyl H-acid +29222180,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :Chicago acid +29222190,OXYGEN-FUNCTION AMINO-COMPOUNDS:Aminohydroxynaphthalene sulphonic acids and their salts :Other +29222900,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> other meta aminophenol para aminophenol meta diethyl +29222911,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:2-amino 4-nitrophenol +29222912,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Meta aminophenol +29222913,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Para aminophenol +29222914,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Meta diethyl amino-phenol +29222921,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:2-amino-1-phenol-4-sulphonic acid +29222922,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:6-nitro-O-aminophenol-4-sulphonic acid +29222923,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Phenyl gamma acid (phenyl 2-amino-naphtho l-6-sulphonic acid) +29222924,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Phenyl J acid (phenyl-2-amino-8 naphtho +29222925,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:S acid, peri acid (1-amino-8-naphtho l-4-4-sulphoxinic acid, 1-naphthylamine-8-sulphonic acid)" +29222926,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Meta-phenylene diamine-4-sulphonic acid +29222931,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:N-methyl-para-aminophenol sulphate (motol) +29222932,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:2, 5 dimethoxy aniline" +29222933,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Para acetyl aminophenol (paracetamol) +29222934,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Para cresidine +29222935,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Picramic acid (T-grade) +29222936,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Para cresidine ortho sulphonic acid +29222990,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other:Other +29223100,"OXYGEN-FUNCTION AMINO-COMPOUNDS::Amfepramone (INN), methadone (INN) and normethadone (INN); salts thereof" +29223900,OXYGEN-FUNCTION AMINO-COMPOUNDS::Other +29224100,OXYGEN-FUNCTION AMINO-COMPOUNDS::Lysine and its esters; salts thereof +29224200,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> glutamic acid and its salts +29224210,OXYGEN-FUNCTION AMINO-COMPOUNDS:Glutamic acid and its salts :Glutamic acid +29224220,OXYGEN-FUNCTION AMINO-COMPOUNDS:Glutamic acid and its salts :Monosodium glutamate +29224290,OXYGEN-FUNCTION AMINO-COMPOUNDS:Glutamic acid and its salts :Other +29224300,OXYGEN-FUNCTION AMINO-COMPOUNDS::Anthranilic acid and its salts +29224400,OXYGEN-FUNCTION AMINO-COMPOUNDS::Tilidine (INN) and its salts +29224900,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> other +29224910,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other :Amino acetic acid (glycine) +29224920,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other :N-methyl taurine +29224990,OXYGEN-FUNCTION AMINO-COMPOUNDS:Other :Other +29225000,other than those containing more than one kind of oxygen function their ethers and esters salts thereof >> and other with oxygen function acid methyl anthranilate procaine hydrochloride amino anisic acid anilide amine +29225011,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Para-amino-salicylic acid" +29225012,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Methyl anthranilate" +29225013,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Procaine hydrochloride" +29225014,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Amino anisic acid anilide" +29225015,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:L-tyrosine (p-hydroxyphenyl amine) " +29225021,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Frusemide" +29225022,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Aminodial" +29225023,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:N-acetyl anthranilic acid" +29225024,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Domperidone" +29225090,"OXYGEN-FUNCTION AMINO-COMPOUNDS:Amino-alcohol-phenols, amino-acid-phenols and other amino-compounds with oxygen function:Other" +29230000,quaternary ammonium salts and hydroxides lecithins and other phosphoaminolipids whether or not chemically defined +29231000,"QUATERNARY AMMONIUM SALTS AND HYDROXIDES; LECITHINS AND OTHER PHOSPHOAMINOLIPIDS, WHETHER OR NOT CHEMICALLY DEFINED::Choline and its salts" +29232000,quaternary ammonium salts and hydroxides lecithins and other phosphoaminolipids whether or not chemically defined >> lecithins and other phosphoaminolipids +29232010,"QUATERNARY AMMONIUM SALTS AND HYDROXIDES; LECITHINS AND OTHER PHOSPHOAMINOLIPIDS, WHETHER OR NOT CHEMICALLY DEFINED:Lecithins and other phosphoaminolipids :Lecithins" +29232090,"QUATERNARY AMMONIUM SALTS AND HYDROXIDES; LECITHINS AND OTHER PHOSPHOAMINOLIPIDS, WHETHER OR NOT CHEMICALLY DEFINED:Lecithins and other phosphoaminolipids :Other" +29233000,"QUATERNARY AMMONIUM SALTS AND HYDROXIDES; LECITHINS AND OTHER PHOSPHOAMINOLIPIDS, WHETHER OR NOT CHEMICALLY DEFINED::Tetraethylammonium perfluorooctane sulphonate" +29234000,"QUATERNARY AMMONIUM SALTS AND HYDROXIDES; LECITHINS AND OTHER PHOSPHOAMINOLIPIDS, WHETHER OR NOT CHEMICALLY DEFINED::Didecyldimethylammonium perfluorooctane sulphonate" +29239000,"QUATERNARY AMMONIUM SALTS AND HYDROXIDES; LECITHINS AND OTHER PHOSPHOAMINOLIPIDS, WHETHER OR NOT CHEMICALLY DEFINED::Other" +29240000,compounds compounds of onic acid acyclic amides including acyclic carbamates and their derivatives salts thereof +29241100,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID::Meprobamate (INN) +29241200,"CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID::Fluoroacetamide (ISO), monocrotophos (ISO) and phosphamidon (ISO)" +29241900,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID::Other +29242100,compounds compounds of onic acid acyclic amides including acyclic carbamates and their derivatives salts thereof >> ureines and their derivatives salts thereof +29242110,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Ureines and their derivatives; salts thereof :Diethyl diphenyl urea +29242120,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Ureines and their derivatives; salts thereof :Dimethyl diphenyl urea (zentralin) +29242130,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Ureines and their derivatives; salts thereof :Parachloro benzene sulphonyl urea +29242190,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Ureines and their derivatives; salts thereof :Other +29242300,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID::2-Acetamidobenzoic acid (N-acetylanthranilic acid) and its salts +29242400,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID::Ethinamate (INN) +29242500,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID::Alachlor (ISO) +29242900,compounds compounds of onic acid acyclic amides including acyclic carbamates and their derivatives salts thereof >> other +29242910,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Acetanilide +29242920,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Aceto acetanilide +29242921,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Camphor Natural(OLD tariff) +29242930,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Aceto acetic ortho chloranilide +29242940,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Aceto acetic para chloranilide +29242950,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Phenyl acetamide +29242960,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Pyrazinamide (pyrazine carboxamide)(OLD tariff) +29242990,CARBOXYAMIDE-FUNCTION COMPOUNDS; AMIDE-FUNCTION COMPOUNDS OF CARBONIC ACID:Other :Other +29250000,compounds including saccharin and its salts and compounds imides and their derivatives salts thereof +29251100,CARBOXYIMIDE-FUNCTION COMPOUNDS (INCLUDING SACCHARIN AND ITS SALTS) AND IMINE-FUNCTION COMPOUNDS::Saccharin and its salts +29251200,CARBOXYIMIDE-FUNCTION COMPOUNDS (INCLUDING SACCHARIN AND ITS SALTS) AND IMINE-FUNCTION COMPOUNDS::Glutethimide (INN) +29251900,CARBOXYIMIDE-FUNCTION COMPOUNDS (INCLUDING SACCHARIN AND ITS SALTS) AND IMINE-FUNCTION COMPOUNDS::Other +29252100,CARBOXYIMIDE-FUNCTION COMPOUNDS (INCLUDING SACCHARIN AND ITS SALTS) AND IMINE-FUNCTION COMPOUNDS::Chlordimeform (ISO) +29252900,compounds including saccharin and its salts and compounds imides and their derivatives salts thereof >> other +29252910,CARBOXYIMIDE-FUNCTION COMPOUNDS (INCLUDING SACCHARIN AND ITS SALTS) AND IMINE-FUNCTION COMPOUNDS:Other :Guanidine nitrate +29252990,CARBOXYIMIDE-FUNCTION COMPOUNDS (INCLUDING SACCHARIN AND ITS SALTS) AND IMINE-FUNCTION COMPOUNDS:Other :Other +29260000,compounds +29261000,NITRILE-FUNCTION COMPOUNDS::Acrylonitrile +29262000,NITRILE-FUNCTION COMPOUNDS::1-Cyanoguanidine (dicyandiamide) +29263000,"NITRILE-FUNCTION COMPOUNDS::Fenproporex (INN) and its salts; methadone (INN) intermediate (4-cyano-2-Dimethy lamino-4, 4-diphenylbutane)" +29264000,NITRILE-FUNCTION COMPOUNDS::Alpha-phenylacetoacetonitrile +29269000,NITRILE-FUNCTION COMPOUNDS::Other +29270000,diazo azo or diazo azo or azoxy compounds +29270010,"DIAZO-, AZO- OR AZOXY-COMPOUNDS:Diazo-, azo- or azoxy- compounds :Para amino-azo-benzene" +29270090,"DIAZO-, AZO- OR AZOXY-COMPOUNDS:Diazo-, azo- or azoxy- compounds :Other" +29280000,organic derivatives of hydrazine or of hydroxylamine organic derivatives of hydrazine or of hydroxylamine +29280010,ORGANIC DERIVATIVES OF HYDRAZINE OR OF HYDROXYLAMINE:Organic derivatives of hydrazine or of hydroxylamine :Isoniazid +29280090,ORGANIC DERIVATIVES OF HYDRAZINE OR OF HYDROXYLAMINE:Organic derivatives of hydrazine or of hydroxylamine :Other +29290000,compounds with other nitrogen function +29291000,compounds with other nitrogen function >> isocyanates +29291010,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Isocyanates :Phenyl isocyanate +29291020,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Isocyanates :Toluene di-isocyanate +29291090,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Isocyanates :Other +29299000,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:Other +29299010,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:N.N. Diethylphosphoramidic dichloride +29299020,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:N.N. Diusopropylphosphoramidic dichloride +29299030,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:N.N. Diproprophylphosphoramidic dichloride +29299040,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:N.N. Dimethylphosphoramidic dichloride +29299050,"COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:Diethyl N,N-Dimethylphosphoramidate" +29299060,"COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:Phosphoramidic acid, diethyl, dimethylester" +29299090,COMPOUNDS WITH OTHER NITROGEN FUNCTION:Other:Other +29300000,compounds +29301000,"ORGANO-SULPHUR COMPOUNDS:nn: 2-(N,N-Dimethylamino) ethanethiol" +29302000,ORGANO-SULPHUR COMPOUNDS::Thiocarbamates and dithiocarbamates +29302010,compounds >> thiocarbamates and dithiocarbamates >> cartap hydrochloride iso +29302090,compounds >> thiocarbamates and dithiocarbamates >> other +29303000,"ORGANO-SULPHUR COMPOUNDS::Thiuram mono-, di or tetrasulphides" +29304000,ORGANO-SULPHUR COMPOUNDS::Methionine +29305000,ORGANO-SULPHUR COMPOUNDS::Captafol (ISO) and methamidophos (ISO)(OLD tariff) +29306000,"ORGANO-SULPHUR COMPOUNDS::2-(N, N-Diethylamino)ethanethiol" +29307000,ORGANO-SULPHUR COMPOUNDS::Bis(2-hydroxyethyl)sulfide (thiodiglycol (INN)) +29308000,"ORGANO-SULPHUR COMPOUNDS::Aldicarb (ISO), captafol (ISO) and methamidophos (ISO)" +29309000,compounds >> other +29309010,ORGANO-SULPHUR COMPOUNDS:Other :Thiourea (sulphourea) +29309020,ORGANO-SULPHUR COMPOUNDS:Other :Calcium salts of methionine +29309030,ORGANO-SULPHUR COMPOUNDS:Other :Thio sulphonic acid +29309040,ORGANO-SULPHUR COMPOUNDS:Other :L-cystine (alpha-amino beta-thiopropionic acid)-sulphur containing amino acid +29309050,ORGANO-SULPHUR COMPOUNDS:Other :Sulphinic acid +29309060,ORGANO-SULPHUR COMPOUNDS:Other :Sulphoxide +29309070,ORGANO-SULPHUR COMPOUNDS:Other :Mercaptan +29309080,ORGANO-SULPHUR COMPOUNDS:Other :Allyl isothiocyanate +29309091,"ORGANO-SULPHUR COMPOUNDS:Other :Ethanol, 2,2'-thiobis-" +29309092,ORGANO-SULPHUR COMPOUNDS:Other :Di-Methyl Amino Ethanethiol +29309093,ORGANO-SULPHUR COMPOUNDS:Other :Di-Methyl Amino ethanethiol Hydro chloride +29309094,ORGANO-SULPHUR COMPOUNDS:Other :Diethyl amino ethanethiol hydrochloride(OLD tariff) +29309095,ORGANO-SULPHUR COMPOUNDS:Other :Di-ethyl Amino ethanethiol hydrochloride +29309096,ORGANO-SULPHUR COMPOUNDS:Other :O-Ethyl -phenyl ethylphosphonothiolothionate (fonofos) +29309097,"ORGANO-SULPHUR COMPOUNDS:Other :Phosphorothioic acid, S2 (diethyl amino) ethyl] O, O-Diethyl ester" +29309098,ORGANO-SULPHUR COMPOUNDS:Other :Dichloro diphenyl sulphone +29309099,ORGANO-SULPHUR COMPOUNDS:Other :Other +29311010,OTHER ORGANO-INORGANIC COMPOUNDS:Tetramethyl lead and tetraethyl lead:Tetramethyl lead(OLD tariff) +29311020,OTHER ORGANO-INORGANIC COMPOUNDS:Tetramethyl lead and tetraethyl lead:Tetraethyl lead(OLD tariff) +29311090,OTHER ORGANO-INORGANIC COMPOUNDS:Tetramethyl lead and tetraethyl lead:Tetraethyl lead(OLD tariff) +29312000,OTHER ORGANO-INORGANIC COMPOUNDS:Tributyltin compounds:Tributylin compounds(OLD tariff) +29313100,OTHER ORGANO-INORGANIC COMPOUNDS::Dimethyl methylphosphonate(OLD tariff) +29313200,OTHER ORGANO-INORGANIC COMPOUNDS::Dimethyl propylphosphonate(OLD tariff) +29313300,OTHER ORGANO-INORGANIC COMPOUNDS::Diethyl ethylphosphonate(OLD tariff) +29313400,OTHER ORGANO-INORGANIC COMPOUNDS::Sodium 3-(trihydroxysilyl)propyl methylphosphonate(OLD tariff) +29313500,"OTHER ORGANO-INORGANIC COMPOUNDS::2, 4, 6-Tripropyl-1, 3, 5, 2, 4, 6-trioxatriphosphinane 2, 4, 6-trioxide(OLD tariff)" +29313600,"OTHER ORGANO-INORGANIC COMPOUNDS::(5-Ethyl-2-methyl-2-oxido-1, 3, 2-dioxaphosphinan-5-yl)methyl methylphosphonate(OLD tariff)" +29313700,"OTHER ORGANO-INORGANIC COMPOUNDS::Bis[(5-ethyl-2-methyl-2-oxido-1, 3, 2-dioxaphosphinan-5-yl) methyl] methylphosphonate(OLD tariff)" +29313800,OTHER ORGANO-INORGANIC COMPOUNDS::Salt of methylphosphonic acid and (aminoiminomethyl)urea (1 1)(OLD tariff) +29313900,OTHER ORGANO-INORGANIC COMPOUNDS::Other(OLD tariff) +29314100,OTHER ORGANO-INORGANIC COMPOUNDS:dimethyl:Dimethyl methylphosphonate(OLD tariff) +29314200,OTHER ORGANO-INORGANIC COMPOUNDS:dimethyl:Dimethyl propylphosphonate(OLD tariff) +29314300,OTHER ORGANO-INORGANIC COMPOUNDS:diethyl:Diethyl ethylphosphonate(OLD tariff) +29314400,OTHER ORGANO-INORGANIC COMPOUNDS:METHYL:Methylphosphonic acid(OLD tariff) +29314500,OTHER ORGANO-INORGANIC COMPOUNDS:salt:Salt of methylphosphonic acid and (aminoiminomethyl)urea (1 : 1)(OLD tariff) +29314600,"OTHER ORGANO-INORGANIC COMPOUNDS:2,4,6:2,4,6-Tripropyl-1,3,5,2,4,6trioxatriphosphinane 2,4,6-trioxide(OLD tariff)" +29314700,"OTHER ORGANO-INORGANIC COMPOUNDS:ethyl:(5-Ethyl-2-methyl-2-oxido-1,3,2dioxaphosphinan-5-yl) methyl methyl methylphosphonate(OLD tariff)" +29314800,"OTHER ORGANO-INORGANIC COMPOUNDS:dimethyl:3,9-Dimethyl-2,4,8,10-tetraoxa-3,9diphosphaspiro[5.5] undecane 3,9dioxide(OLD tariff)" +29314910,OTHER ORGANO-INORGANIC COMPOUNDS:other:Sodium 3-(trihydroxysilyl)propyl methylphosphonate(OLD tariff) +29314920,"OTHER ORGANO-INORGANIC COMPOUNDS:other:Bis[(5-ethy1-2-methy1-2-oxido-1,3,2dioxaphosphinan-5-yl)methyl] methylphosphonate(OLD tariff)" +29314990,OTHER ORGANO-INORGANIC COMPOUNDS:other:Other(OLD tariff) +29315100,OTHER ORGANO-INORGANIC COMPOUNDS:methyl:Methylphosphonic dichloride(OLD tariff) +29315200,OTHER ORGANO-INORGANIC COMPOUNDS:propyl:Propylphosphonic dichloride(OLD tariff) +29315300,OTHER ORGANO-INORGANIC COMPOUNDS:chloro:O-(3-chloropropyl) O-[4-nitro-3(trifluoromethyl)phenyl] methylphosphonothionate(OLD tariff) +29315400,OTHER ORGANO-INORGANIC COMPOUNDS:trichloro:Trichlorfon (ISO)(OLD tariff) +29315900,OTHER ORGANO-INORGANIC COMPOUNDS:other:Other(OLD tariff) +29319010,OTHER ORGANO-INORGANIC COMPOUNDS:other:Organo arsenic compounds(OLD tariff) +29319011,OTHER ORGANO-INORGANIC COMPOUNDS:other:Methylarsonic acid and its salt(OLD tariff) +29319012,OTHER ORGANO-INORGANIC COMPOUNDS:other:Cacodylic acid and its salt(OLD tariff) +29319013,OTHER ORGANO-INORGANIC COMPOUNDS:other:p-Aminophenylarsonic acid and its salt(OLD tariff) +29319014,"OTHER ORGANO-INORGANIC COMPOUNDS:other:Amino-hydroxyphenylarsonic acids, their formyl and acetyl derivatives and their salts(OLD tariff)" +29319015,OTHER ORGANO-INORGANIC COMPOUNDS:other:Arsenobenzene and its derivative(OLD tariff) +29319019,OTHER ORGANO-INORGANIC COMPOUNDS:other:other(OLD tariff) +29319020,OTHER ORGANO-INORGANIC COMPOUNDS:other:Organo-silicon compounds(OLD tariff) +29319030,OTHER ORGANO-INORGANIC COMPOUNDS:other:o-Iodosobenzoic acid(OLD tariff) +29319090,OTHER ORGANO-INORGANIC COMPOUNDS:other:other(OLD tariff) +29320000,heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure +29321100,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Tetrahydrofuran +29321200,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::2-Furaldehyde (furfuraldehyde) +29321300,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Furfuryl alcohol and tetrahydrofurfuryl alcohol +29321400,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Sucralose +29321900,heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure >> other +29321910,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Other :Hydroxy dibenzfuran carboxylic acid +29321990,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Other :Other +29322000,heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure >> lactones lactones +29322010,"HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Lactones:Coumarin, methylcoumarins and ethylcoumarins" +29322020,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Lactones:Phenolphthalein +29322090,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Lactones:other +29329100,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Isosafrole +29329200,"HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::1-(1,3-Benzodioxol-5-yl) propan-2-one" +29329300,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Piperonal +29329400,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Safrole +29329500,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY::Tetrahydrocannabinols (all isomers) +29329600,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:carbofuran:Carbofuran (ISO) +29329900,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Other:Other +29329910,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Other:Cineole +29329920,heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure heterocyclic compounds with oxygen hetero atom s only compounds containing an unfused furan ring whether or not hydrogenated in the structure >> other other >> emamectin benzoate iso emamectin benzoate iso +29329990,HETEROCYCLIC COMPOUNDS WITH OXYGEN HETERO-ATOM (S) ONLY:Other:other +29330000,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure +29331100,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Phenazone (antipyrin) and its derivates +29331900,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other +29331910,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :3-carboxy (para sulpho-phenyl)-5- pyrazolone +29331920,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :1 (2,5- dichloro-4-sulpho phenyl)-3-methyl- 5-pyrazolone" +29331930,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :3-methyl-1(4-sulpho-O-toluyl-5-pyrazolone) +29331940,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Phenylmethylpyrazolone +29331950,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :1-phenyl-5-pyrazolone-3-carboxylic acid ethylester +29331960,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :1-(m-sulphophenyl)-3-pyrazolone +29331970,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Analgin +29331980,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Oxyphenbutazone +29331990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other(OLD tariff) +29331991,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Fluoro-3-(difluoromethyl)-1-methyl-1h-pyrazole-4-carbonyl fluoride +29331999,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other +29332100,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Hydantoin and its derivatives +29332900,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other +29332910,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Tinidazole +29332920,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Metronidazole, metronidiazole benzoate" +29332930,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Mebendazole +29332940,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Dimetridazole +29332950,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Albendazole +29332960,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other >> imidacloprid iso imidacloprid iso +29332990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other +29333100,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Pyridine and its salts +29333200,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Piperidine and its salts +29333300,"HETEROCYCLIC COMPOUNDS WITH NITROGEN::Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), keto bemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine intermediate A, phencyclindine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN) and trimeperidine (INN); salts thereof" +29333311,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Alfentanil (INN) and its salt" +29333312,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Anileridine (INN) and its salt" +29333313,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Bezitramide (INN) and its salt" +29333314,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Bromazepam (INN) and its salt" +29333315,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Carfentanil (INN) and its salt" +29333316,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Difenoxin (INN) and its salt" +29333317,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Diphenoxylate (INN) and its salt" +29333318,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN); salts thereof" +29333321,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Fentanyl (INN) and its salt" +29333322,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Ketobemidone (INN) and its salt" +29333323,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Methylphenidate (INN) and its salt" +29333324,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Pentazocine (INN) and its salt" +29333325,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Pethidine (INN) and its salt" +29333326,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Pethidine (INN) intermediate A and its salt" +29333327,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof:: Phencyclidine (INN) (PCP) and its salt" +29333328,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Phenoperidine (INN) and its salt Pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof:" +29333331,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Pipradrol (INN) and its salt" +29333332,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Piritramide (INN) and its salt" +29333333,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Propiram (INN) and its salt" +29333334,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Remifentanil (INN) and its salt" +29333335,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN), fentanyl (INN), ketobemidone (INN), methylphenidate (INN), pentazocine (INN), pethidine (INN), pethidine (INN) intermediate A, phencyclidine (INN) (PCP), phenoperidine (INN), pipradrol (INN), piritramide (INN), propiram (INN), remifentanil (INN) and trimeperidine (INN); salts thereof: - - - Alfentanil (INN), anileridine (INN), bezitramide (INN), bromazepam (INN), carfentanil (INN), difenoxin (INN), diphenoxylate (INN), dipipanone (INN); salts thereof::Trimeperidine (INN) and its salt" +29333400,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other fentanyls and their derivatives:Other fentanyls and their derivatives +29333500,HETEROCYCLIC COMPOUNDS WITH NITROGEN:3-quinuclidinol:3-Quinuclidinol +29333600,HETEROCYCLIC COMPOUNDS WITH NITROGEN:anilino:4-Anilino-N-phenethylpiperidine (ANPP) +29333700,HETEROCYCLIC COMPOUNDS WITH NITROGEN:phenethyl:N-Phenethyl-4-piperidone (NPP) +29333900,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other derivatives of pyridine +29333911,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Amino pyridine +29333912,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Alpha picoline (2-methyl pyridine) +29333913,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Gamma picoline (4-methyl pyridine) +29333914,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Chloropheniramine maleate +29333915,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Diphenoxylate hydrochloride +29333916,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Beta picoline (3-methyl pyridine) +29333917,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Morpholine +29333918,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Lutidine (Dimethyl Pyridine) +29333919,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other(OLD tariff) +29333920,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Piperidine and its derivatives(OLD tariff) +29333921,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other derivatives of pyridine >> acetamiprid iso +29333922,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other derivatives of pyridine >> imazethapyr iso +29333929,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other derivatives of pyridine >> other other +29333930,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :1-Azabicyclo (2.2.2.) octan-3-ol(OLD tariff) +29333940,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :BENSENE ACETIC ACID, ALPHA -HYDROXY - ALPHA-PHENYL, I -AZABICYCLO-AZABICYCLO (2.2.2. OCT3-YL ESTER" +29333990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other +29334100,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Levorphanol (INN) and its salts +29334900,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Other +29335200,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Malonylurea (barbituric acid) and its salts +29335300,"HETEROCYCLIC COMPOUNDS WITH NITROGEN::Allobarbital (INN), amobarbital (INN), barbital (INN), butalbital (INN), butobarbital (INN), cyclobarbital (INN), methylphenobarbital (INN), pentobarbital (INN)," +29335400,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Other derivatives of malonylurea (barbituric acid); salts thereof +29335500,"HETEROCYCLIC COMPOUNDS WITH NITROGEN::Loprazolam (INN), mecloqualone (INN), methaqualone (INN) and zipeprol (INN); salts thereof" +29335900,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other +29335910,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Aminophylline (cordophylin) +29335920,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Trimethoprim +29335930,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Diethyl carbamazine citrate +29335940,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :1-Amino-4-Methyl piperazine +29335950,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other >> iso iso +29335990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other +29336100,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Melamine +29336900,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other +29336910,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Cyanuric acid and its salts +29336920,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :4-[4,6-bis(2,4-dimethylphenyl)-1,3,5-triazine-2-yl]-1,3-benzenediol" +29336930,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Tris(2-hydroxyethyl) isocyanurate +29336940,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Ethylhexyltriazone +29336950,"HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :2,4,6-tri(2,4-dygydroxyl-3-methylphenyl)-1,3,5-triazine" +29336990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other :Other +29337100,HETEROCYCLIC COMPOUNDS WITH NITROGEN::6-Hexanelactam (epsilon-caprolactam) +29337200,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Clobazam (INN) and methyprylon (INN) +29337900,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other lactams:Other lactams +29337910,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other lactams:N-methyl-2-pyrrolidone +29337920,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other lactams:N-ethyl-2-pyrrolidone +29337990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other lactams:Other +29339100,"HETEROCYCLIC COMPOUNDS WITH NITROGEN::Alprazolam (INN), camazepam (INN) chloridiazepoxide (INN), clonazepam (INN), clorazepate, delorazepam (INN), diazepam (INN), estazolam (INN), ethyl loflazepate (INN), fludiazepam (INN), flunitrazepam (INN), flurazepam (INN), halazaepam (INN), lorazepam (INN), lormetazepam (INN), mazindol (INN), medazepam (INN), midazolam (INN), nimetazepam (INN), nitrazepam (INN), nordazepam (INN), oxazepam (INN), pinazepam (INN), prazepam (INN), pyrovalerone (INN), tamazepam (INN), tetrazepam (INN) and triazolam (INN); salts thereof" +29339200,HETEROCYCLIC COMPOUNDS WITH NITROGEN::Azinphos-methyl (ISO) +29339900,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other:Other +29339910,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other:Imidazo pyridine methyl amine +29339920,heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure heterocyclic compounds with nitrogen s only compounds containing an unfused pyrazole ring whether or not hydrogenated in the structure >> other other >> carbendazim iso carbendazim iso +29339990,HETEROCYCLIC COMPOUNDS WITH NITROGEN:Other:Other +29340000,chemically defined other heterocyclic compounds chemically defined other heterocyclic compounds +29341000,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS::Compounds containing an unfused thiazole ring (whether or not hydrogenated) in the structure +29342000,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS::Compounds containing in the structure a benzothiazole ring-system (whether or not hydrogenated) not further fused(OLD tariff) +29343000,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS::Compounds containing in the structure a phenothiazine ring-system (whether or not hydrogenated) not further fused +29349100,"NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS::Aminorex (INN), brotizolam (INN), clotiazepam (INN), cloxazolam (INN), dextromoramide (INN), haloxazolam (INN), ketazolam (INN), mesocarb (INN), oxazolam (INN), pemoline (INN), phendimetrazine (INN), phenmetrazine (INN) and sufentanil (INN); salts thereof" +29349200,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS:other:Other fentanyls and their derivatives +29349900,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS:Other:Other +29349910,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS:Other:Chloro thiophene-2-carboxylic acid +29349920,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS:Other:Morpholine +29349930,chemically defined other heterocyclic compounds chemically defined other heterocyclic compounds >> other >> buprofezin iso +29349990,NUCLEIC ACIDS AND THEIR SALTS; WHETHER OR NOT CHEMICALLY DEFINED; OTHER HETEROCYCLIC COMPOUNDS:Other:Other +29350000,sulphonamides +29350011,SULPHONAMIDES:Sulphonamides :Sulphamethoxazole(OLD tariff) +29350012,SULPHONAMIDES:Sulphonamides :Sulphafurazole(OLD tariff) +29350013,SULPHONAMIDES:Sulphonamides :Sulphadiazine(OLD tariff) +29350014,SULPHONAMIDES:Sulphonamides :Sulphadimidine(OLD tariff) +29350015,SULPHONAMIDES:Sulphonamides :Sulphacetamide(OLD tariff) +29350021,SULPHONAMIDES:Sulphonamides :Sulphamethoxypyridarine(OLD tariff) +29350022,SULPHONAMIDES:Sulphonamides :Sulphamethiazole(OLD tariff) +29350023,SULPHONAMIDES:Sulphonamides :Sulphamoxole(OLD tariff) +29350024,SULPHONAMIDES:Sulphonamides :Sulphamide(OLD tariff) +29350090,SULPHONAMIDES:Sulphonamides :Other(OLD tariff) +29351000,SULPHONAMIDES::N-Methylperfluorooctane sulphonamide +29352000,SULPHONAMIDES::N-Ethylperfluorooctane sulphonamide +29353000,SULPHONAMIDES::N-Ethyl-N-(2-hydroxyethyl) perfluorooctane sulphonamide +29354000,SULPHONAMIDES::N-(2-Hydroxyethyl)-N-methylperfluorooctane sulphonamide +29355000,SULPHONAMIDES::Other perfluorooctane sulphonamides +29355010,sulphonamides >> other perfluorooctane sulphonamides >> flubendiamide iso +29355090,sulphonamides >> other perfluorooctane sulphonamides >> other +29359000,sulphonamides >> other sulphamethoxazole sulphafurazole sulphadiazine sulphadimidine sulphacetamide +29359011,SULPHONAMIDES::Sulphamethoxazole +29359012,SULPHONAMIDES::Sulphafurazole +29359013,SULPHONAMIDES::Sulphadiazine(OLD tariff) +29359014,SULPHONAMIDES::Sulphadimidine(OLD tariff) +29359015,SULPHONAMIDES::Sulphacetamide +29359021,SULPHONAMIDES::Sulphamethoxypyridarine +29359022,SULPHONAMIDES::Sulphamethoxypyridarine +29359023,SULPHONAMIDES::Sulphamoxole +29359024,SULPHONAMIDES::Sulphamide +29359090,SULPHONAMIDES::Other +29360000,hormonesprovitamins and vitamins natural or reproduced by synthesis including natural concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed +29362100,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT::Vitamin A and their derivatives" +29362200,hormonesprovitamins and vitamins natural or reproduced by synthesis including natural concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed >> vitamin b and its derivatives +29362210,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Vitamin B1 and its derivatives :Vitamin B1 [Thiamine (INN), aneurine] and its salts" +29362290,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Vitamin B1 and its derivatives :Other" +29362300,hormonesprovitamins and vitamins natural or reproduced by synthesis including natural concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed >> vitamin b2 and its derivatives +29362310,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Vitamin B2 and its derivatives :Vitamin B2 [Riboflavin(INN), lactoflavin] and its salts" +29362390,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Vitamin B2 and its derivatives :Other" +29362400,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT::D- or DL-Pantothenic acid (Vitamin B3 or Vitamin B5) and its derivatives" +29362500,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT::Vitamin B6 and its derivatives" +29362600,hormonesprovitamins and vitamins natural or reproduced by synthesis including natural concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed >> vitamin b12 and its derivatives +29362610,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Vitamin B12 and its derivatives :Vitamin B12 (Cyanocobalamin (INN))" +29362690,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Vitamin B12 and its derivatives :Other" +29362700,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT::Vitamin C (Ascorbic acid) and its derivatives" +29362800,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT::Vitamin E and its derivatives" +29362900,hormonesprovitamins and vitamins natural or reproduced by synthesis including natural concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed concentrates derivatives thereof used primarily as vitamins and intermixtures of the foregoing whether or not in any solvent vitamins and their derivatives unmixed >> other vitamins and their derivatives +29362910,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Other vitamins and their derivatives :Folic acid (Vitamin B9)" +29362920,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Other vitamins and their derivatives :Nicotinic acid and nicotinamide (niacinamide or niacine)" +29362930,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Other vitamins and their derivatives :Vitamin K (menaphthonum BP)" +29362940,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Other vitamins and their derivatives :Vitamin D" +29362950,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Other vitamins and their derivatives :Vitamin H (Biotin)" +29362990,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT:Other vitamins and their derivatives :Other" +29369000,"PROVITAMINS AND VITAMINS, NATURAL OR REPRODUCED BY SYNTHESIS (INCLUDING NATURAL CONCENTRATES), DERIVATIVES THEREOF USED PRIMARILY AS VITAMINS, AND INTERMIXTURES OF THE FOREGOING, WHETHER OR NOT IN ANY SOLVENT::Other, including natural concentrates" +29370000,hormones prostaglandins thromboxanes and +29371100,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Somatotropin, its derivatives and structural analogues(OLD tariff)" +29371200,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Insulin and its salts(OLD tariff)" +29371900,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Other(OLD tariff)" +29372100,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Cortisone, hydrocortisone, prednisone (dehydrocortisone) and prednisolone (dehydrohydrocortisone)(OLD tariff)" +29372200,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Halogenated derivatives of corticosteroidal harmones(OLD tariff)" +29372300,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Oestrogens and progestogens(OLD tariff)" +29372900,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Other" +29373100,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Epinephrine(OLD tariff)" +29375000,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES::Prostaglandins, thromboxanes and leukotrienes, their derivatives and structural analogues" +29379000,hormones prostaglandins thromboxanes and >> other catecholamine hormones their derivatives and structural analogues +29379011,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES:Other Catecholamine harmones, their derivatives and structural analogues:Epinethrine" +29379019,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES:Other Catecholamine harmones, their derivatives and structural analogues:Other" +29379020,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES:Other Catecholamine harmones, their derivatives and structural analogues:Amino-acid derivatives" +29379090,"HORMONES, PROSTAGLANDINS, THROMBOXANES AND LEUKOTRIENES, NATURAL OR REPRODUCED BY SYNTHESIS; DERIVATIVES AND STRUCTURAL ANALOGUES THEREOF, INCLUDING CHAIN MODIFIED POLYPEPTIDES, USED PRIMARILY AS HORMONES:Other Catecholamine harmones, their derivatives and structural analogues:Other" +29380000,glycosides natural or reproduced by synthesis and their salts ethers esters and other derivatives +29381000,"GLYCOSIDES, NATURAL OR REPRODUCED BY SYNTHESIS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Rutoside (rutin) and its derivatives" +29389000,glycosides natural or reproduced by synthesis and their salts ethers esters and other derivatives >> other +29389010,"GLYCOSIDES, NATURAL OR REPRODUCED BY SYNTHESIS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Digoxin" +29389020,"GLYCOSIDES, NATURAL OR REPRODUCED BY SYNTHESIS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Digitalis glycosides" +29389090,"GLYCOSIDES, NATURAL OR REPRODUCED BY SYNTHESIS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Other" +29390000,alkaloids natural or reproduced by synthesis and their salts ethers esters and other derivatives alkaloids of opium and their derivatives salts thereof +29391100,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Concentrates of poppy straw; buprenorphine (INN), codeine, dihydrocodeine (INN), ethylmorphine, etorphine (INN), heroin, hydrocodone (INN), hydromorphone (INN), morphine, nicomorphine (INN), oxycodone (INN), oxymorphone (INN), pholcodine (INN), thebacon (INN) and thebaine; salts thereof" +29391900,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Other;" +29392000,alkaloids natural or reproduced by synthesis and their salts ethers esters and other derivatives alkaloids of opium and their derivatives salts thereof >> alkaloid of cinchona and their derivatives salts thereof +29392010,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Alkaloid of cinchona and their derivatives; salts thereof :Quinine alkaloids" +29392020,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Alkaloid of cinchona and their derivatives; salts thereof :Quinine hydrochloride" +29392030,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Alkaloid of cinchona and their derivatives; salts thereof :Quinine sulphate" +29392040,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Alkaloid of cinchona and their derivatives; salts thereof :Chloroquine phosphate" +29392050,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Alkaloid of cinchona and their derivatives; salts thereof :Benzeneacetic acid, alpha hydroxy-alpha-phenyl, 1- azabicyclo[2.2.2.]oct-3-yl ester(OLD tariff)" +29392090,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Alkaloid of cinchona and their derivatives; salts thereof :Other" +29393000,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Caffeine and its salts" +29394100,"VEGETABLE ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ephedrine and its salts :Ephedrine and its salt" +29394110,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ephedrine and its salts :Ephedrine alkaloids(OLD tariff)" +29394120,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ephedrine and its salts :Ephedrine hydrochloride(OLD tariff)" +29394190,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ephedrine and its salts :Other(OLD tariff)" +29394200,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Pseudoephedrine (INN) and its salts" +29394300,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Cathine (INN) and its salts" +29394400,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Norephedirine and its salts:Norephedrine and its salts" +29394500,"VEGETABLE ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:levo:Levometamfetamine, metamfetamine (INN), metamfetamine racemate and their salts" +29394900,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Other" +29395100,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Fenetylline (INN) and its salts" +29395900,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Other" +29396100,alkaloids natural or reproduced by synthesis and their salts ethers esters and other derivatives alkaloids of opium and their derivatives salts thereof >> ergometrine inn and its salts +29396110,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ergometrine (INN) and its salts :Ergometrine (INN)" +29396190,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ergometrine (INN) and its salts :Other" +29396200,alkaloids natural or reproduced by synthesis and their salts ethers esters and other derivatives alkaloids of opium and their derivatives salts thereof >> ergotamine inn and its salts +29396210,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ergotamine (INN) and its salts :Ergotamine tartrate" +29396290,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Ergotamine (INN) and its salts :Other" +29396300,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Lysergic acid and its salts" +29396900,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Other" +29397100,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Cocaine, ecgonine, levometamfetamine, metamfetamine (INN), metamfetamine racemate; salts, esters and other derivatives thereof(OLD tariff)" +29397200,"VEGETABLE ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:cocaine:Cocaine, ecgonine; salts, esters and other derivatives thereof" +29397900,"VEGETABLE ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other:Other" +29397910,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other:Nicotine(OLD tariff)" +29397990,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other:Other(OLD tariff)" +29398000,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Other" +29399100,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Cocaine, ecgonine, levometamfetamine, metamfetamine(INN), metamfetamine racemate; salts, esters and other derivatives thereof(OLD tariff)" +29399900,"ALKALOIDS, NATURAL OR REPRODUCED BY SYNTHESIS, AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Other(OLD tariff)" +29400000,"::SUGARS, CHEMICALLY PURE, OTHER THAN SUCROSE, LACTOSE, MALTOSE, GLUCOSE AND FRUCTOSE; SUGAR ETHERS, SUGAR ACETALS AND SUGAR ESTERS AND THEIR SALTS, OTHER THAN PRODUCTS OF HEADINGS 2937, 2938 OR 2939" +29410000,antibiotics +29411000,antibiotics >> penicillins and their derivative with a penicillanic acid structure salts thereof +29411010,"ANTIBIOTICS:Penicillins and their derivative with a penicillanic acid structure; salts thereof :Penicillins and its salts ( e.g. procaine penicillin, penicillin G-potassium)" +29411020,ANTIBIOTICS:Penicillins and their derivative with a penicillanic acid structure; salts thereof :Ampicilline and its salts +29411030,ANTIBIOTICS:Penicillins and their derivative with a penicillanic acid structure; salts thereof :Amoxycilline and its salts +29411040,ANTIBIOTICS:Penicillins and their derivative with a penicillanic acid structure; salts thereof :Cioxacilline and its salts +29411050,ANTIBIOTICS:Penicillins and their derivative with a penicillanic acid structure; salts thereof :6-APA +29411090,ANTIBIOTICS:Penicillins and their derivative with a penicillanic acid structure; salts thereof :Other +29412000,antibiotics >> streptomycins and their derivatives salts thereof +29412010,ANTIBIOTICS:Streptomycins and their derivatives; salts thereof :Streptomycin +29412090,ANTIBIOTICS:Streptomycins and their derivatives; salts thereof :Other +29413000,antibiotics >> tetracyclines and their derivatives salts thereof +29413010,"ANTIBIOTICS:Tetracyclines and their derivatives, salts thereof :Doxycycline and its salts" +29413020,"ANTIBIOTICS:Tetracyclines and their derivatives, salts thereof :Tetracycline/oxytetra-cycline and their salts" +29413090,"ANTIBIOTICS:Tetracyclines and their derivatives, salts thereof :Other" +29414000,ANTIBIOTICS::Chloramphenicol and its derivatives; salts thereof +29415000,ANTIBIOTICS::Erythromycin and its derivatives; salts thereof +29419000,antibiotics >> other rifampicin and its salts +29419011,ANTIBIOTICS:Other :Rifampicin +29419012,ANTIBIOTICS:Other :3 Formyl Rifa S V(Rifa int) +29419013,ANTIBIOTICS:Other :Rifa S or Rifa S Sodium (Rifaint) +29419014,ANTIBIOTICS:Other :1-Amino-4-Methyl piperazine (Rifaint) +29419019,ANTIBIOTICS:Other :Other +29419020,ANTIBIOTICS:Other :Cephalexin and its salts +29419030,ANTIBIOTICS:Other :Ciprofloxacine and its salts +29419040,ANTIBIOTICS:Other :Gentamycin and its salts +29419050,ANTIBIOTICS:Other :Neomycin +29419060,ANTIBIOTICS:Other :Norfloxacin and its salts +29419090,ANTIBIOTICS:Other :Other +29420000,other organic compounds other organic compounds cefadroxil and its salts ibuprofane nifedipine ranitidine danes salt of d phenyl glycine d para hydroxy dane s salts +29420011,OTHER ORGANIC COMPOUNDS:Other organic compounds:Cefadroxil and its salts +29420012,OTHER ORGANIC COMPOUNDS:Other organic compounds:Ibuprofane +29420013,OTHER ORGANIC COMPOUNDS:Other organic compounds:Nifedipine +29420014,OTHER ORGANIC COMPOUNDS:Other organic compounds:Ranitidine +29420015,OTHER ORGANIC COMPOUNDS:Other organic compounds:Danes salt of D(-) phenyl glycine +29420016,OTHER ORGANIC COMPOUNDS:Other organic compounds:D(-) para hydroxy dane s salts +29420021,OTHER ORGANIC COMPOUNDS:Other organic compounds:Timolo maleate +29420022,OTHER ORGANIC COMPOUNDS:Other organic compounds:Terbutoline sulphate +29420023,OTHER ORGANIC COMPOUNDS:Other organic compounds:D(-) phenyl glycin chloride HCL (DPGCH) +29420024,OTHER ORGANIC COMPOUNDS:Other organic compounds:Imipramine HCl +29420025,OTHER ORGANIC COMPOUNDS:Other organic compounds:Amitryptyline HCl +29420026,OTHER ORGANIC COMPOUNDS:Other organic compounds:Cysteanune HCl +29420027,"OTHER ORGANIC COMPOUNDS:Other organic compounds:Atenolol, propronalol" +29420031,OTHER ORGANIC COMPOUNDS:Other organic compounds:Diloxanide furoate +29420032,OTHER ORGANIC COMPOUNDS:Other organic compounds:Cimetidine +29420033,OTHER ORGANIC COMPOUNDS:Other organic compounds:Oxyclozanide +29420034,OTHER ORGANIC COMPOUNDS:Other organic compounds:Famotidine +29420090,OTHER ORGANIC COMPOUNDS:Other organic compounds:Other +30012010,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Extracts of glands or other organs or of their secretions :Liquid extracts of liver(OLD tariff)" +30012020,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Extracts of glands or other organs or of their secretions :Liver extracts, dry(OLD tariff)" +30012030,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Extracts of glands or other organs or of their secretions :Snake venom(OLD tariff)" +30012090,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Extracts of glands or other organs or of their secretions :Other(OLD tariff)" +30019010,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Of human origin (OLD tariff)" +30019091,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Heparin and its salts(OLD tariff)" +30019099,"GLANDS AND OTHER ORGANS FOR ORGANOTHERAPEUTIC USES, DRIED,WHETHER OR NOT POWDERED; EXTRACTS OF GLANDS OR OTHER ORGANS OR OF THEIR SECRETIONS FOR ORGANOTHERAPEUTIC USES; HEPARIN AND ITS SALTS; OTHER HUMAN OR ANIMAL SUBSTANCES PREPARED FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other(OLD tariff)" +30020000,fortherapeutic prophylactic or diagnostic uses antisera other blood fractions and immunological products whether or not modified or obtained by means of biotechnological processes vaccines toxins cultures of excluding yeasts and other similar products cell cultures whether or not modified antisera other blood fractions and products immunological whether or not modified or obtained by biotechnologicalprocesses +30021011,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera, other blood fractions and immunological products, whether or not modified or obtained by biotechnological processes:For diptheria(OLD tariff)" +30021012,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera, other blood fractions and immunological products, whether or not modified or obtained by biotechnological processes:For tetanus(OLD tariff)" +30021013,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions and modified immunological products, whether or not obtained by means of biotechnological processes :For rabies(OLD tariff)" +30021014,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions and modified immunological products, whether or not obtained by means of biotechnological processes :For snake venom(OLD tariff)" +30021019,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions and modified immunological products, whether or not obtained by means of biotechnological processes :Other(OLD tariff)" +30021020,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions and modified immunological products, whether or not obtained by means of biotechnological processes :Hemoglobin blood globulins and serum globulins (OLD tariff)" +30021091,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera, other blood fractions and immunological products, whether or not modified or obtained by biotechnological processes:Of human origin(OLD tariff)" +30021099,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera, other blood fractions and immunological products, whether or not modified or obtained by biotechnological processes:Other(OLD tariff)" +30021100,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS::Malaria diagnostic test kits(OLD tariff)" +30021200,fortherapeutic prophylactic or diagnostic uses antisera other blood fractions and immunological products whether or not modified or obtained by means of biotechnological processes vaccines toxins cultures of excluding yeasts and other similar products cell cultures whether or not modified antisera other blood fractions and products immunological whether or not modified or obtained by biotechnologicalprocesses >> antisera and other blood fractions +30021210,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions:For diphtheria" +30021220,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions:For tetanus" +30021230,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions:For rabies" +30021240,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions:For snake venom" +30021290,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Antisera and other blood fractions:Other" +30021300,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Immunological products, unmixed, not put up in measured doses or in forms or packings for retail sale:Immunological products, unmixed, not put up in measured doses or in forms or packings for retail sale" +30021310,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Immunological products, unmixed, not put up in measured doses or in forms or packings for retail sale:Immunological products, unmixed, not put up in measured doses or in forms or packings for retail sale(OLD tariff)" +30021400,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Immunological products, mixed, not put up in measured doses or in forms or packings for retail sale:Immunological products, mixed, not put up in measured doses or in forms or packings for retail sale" +30021410,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Immunological products, mixed, not put up in measured doses or in forms or packings for retail sale:Immunological products, mixed, not put up in measured doses or in forms or packings for retail sale(OLD tariff)" +30021500,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS::Immunological products, put up in measured doses or in forms or packings for retail sale" +30021900,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS::Other(OLD tariff)" +30022011,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For cholera and typhoid(OLD tariff)" +30022012,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For hepatitis(OLD tariff)" +30022013,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For tetanus(OLD tariff)" +30022014,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For polio(OLD tariff)" +30022015,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For tuberculosis(OLD tariff)" +30022016,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For rabies(OLD tariff)" +30022017,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For Japanese encephalitis(OLD tariff)" +30022018,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For whopping cough (pertusis)(OLD tariff)" +30022019,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :Other (OLD tariff)" +30022021,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For diphtheria, pertusis and tetanus (DPT)(OLD tariff)" +30022022,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For diphtheria and tetanus (DT)(OLD tariff)" +30022023,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For measles, mumps and rubella (MMR)(OLD tariff)" +30022024,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :For typhoid-paratyphoid (TAB) or typhoid- paratyphoid-cholera (TABC)(OLD tariff)" +30022029,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine :Other(OLD tariff)" +30023000,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS::Vaccines for veterinary medicine(OLD tariff)" +30024100,fortherapeutic prophylactic or diagnostic uses antisera other blood fractions and immunological products whether or not modified or obtained by means of biotechnological processes vaccines toxins cultures of excluding yeasts and other similar products cell cultures whether or not modified antisera other blood fractions and products immunological whether or not modified or obtained by biotechnologicalprocesses >> vaccines for human medicine single vaccines for +30024111,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Cholera and typhoid" +30024112,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Hepatitis" +30024113,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Tetanus" +30024114,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Polio" +30024115,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Tuberclosis" +30024116,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Rabies" +30024117,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Japanese encephalitis" +30024118,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Whopping cough (pertussis)" +30024119,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Other" +30024121,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Diphtheria, pertussis and tetanus (DPT)" +30024122,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Diphtheria and tetanus (DT)" +30024123,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Measles, mumps and rubella (MMR)" +30024124,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Typhoid-paratyphoid (TAB)" +30024125,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::Typhoid- paratyphoid-cholera (TABC)" +30024129,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Vaccines for human medicine::other" +30024200,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:vaccines:Vaccines for veterinary medicine" +30024900,fortherapeutic prophylactic or diagnostic uses antisera other blood fractions and immunological products whether or not modified or obtained by means of biotechnological processes vaccines toxins cultures of excluding yeasts and other similar products cell cultures whether or not modified antisera other blood fractions and products immunological whether or not modified or obtained by biotechnologicalprocesses >> other +30024910,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other:Cultures of micro-organisms (excluding yeast)" +30024920,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other:Toxins" +30024990,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other:other" +30025100,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:cell:Cell therapy products" +30025900,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:other:Other" +30029000,fortherapeutic prophylactic or diagnostic uses antisera other blood fractions and immunological products whether or not modified or obtained by means of biotechnological processes vaccines toxins cultures of excluding yeasts and other similar products cell cultures whether or not modified antisera other blood fractions and products immunological whether or not modified or obtained by biotechnologicalprocesses >> other +30029010,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other :Human blood" +30029020,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other :Animal blood prepared for therapeutic, prophylactic or diagnostic uses" +30029030,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other :Cultures of micro-organisms (excluding yeast)(OLD tariff)" +30029040,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other :Toxins(OLD tariff)" +30029090,"HUMAN BLOOD; ANIMAL BLOOD PREPARED FOR THERAPEUTIC, PROPHYLACTIC OR DIAGNOSTIC USES; ANTISERA AND OTHER BLOOD FRACTIONS AND MODIFIED IMMUNOLOGICAL PRODUCTS, WHETHER OR NOT OBTAINED BY MEANS OF BIOTECHNOLOGICAL PROCESSES; VACCINES, TOXINS, CULTURES OF MICRO-ORGANISMS (EXCLUDING YEASTS) AND SIMILAR PRODUCTS:Other :Other" +30030000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of two or more constituents which have been mixed together for therapeutic or prophylactic uses not put up in measured doses or in forms or packings for retail sale +30031000,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives" +30032000,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Other, containing antibiotics" +30033100,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing insulin" +30033900,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Other" +30034000,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing alkaloids or derivatives thereof but not containing hormones or other products of heading 2937 or antibiotics(OLD tariff)" +30034100,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing ephedrine or its salts" +30034200,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing pseudoephedrine (INN) or its salts" +30034300,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing norephedrine or its salts" +30034900,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Other" +30036000,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE::Other, containing antimalarial active principles described in Sub-heading Note 2 to this Chapter" +30039000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of two or more constituents which have been mixed together for therapeutic or prophylactic uses not put up in measured doses or in forms or packings for retail sale >> other ayurvedic unani siddha homoeopathic or systems medicaments +30039011,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Of Ayurvedic system" +30039012,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Of Unani system" +30039013,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Of Siddha system" +30039014,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Of Homoeopathic system" +30039015,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Of Bio-chemic system" +30039021,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Menthol crystals" +30039022,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Milk of magnesia " +30039031,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Bovine albumin and drugs of animal origin" +30039032,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Merbromine national formulary XII (mercurochrome)" +30039033,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Calcium sennoside" +30039034,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Anaesthetic agents used in human or veterinary medicine or surgery" +30039035,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Aluminium hydroxide gel" +30039036,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Ketamine" +30039090,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF TWO OR MORE CONSTITUENTS WHICH HAVE BEEN MIXED TOGETHER FOR THERAPEUTIC OR PROPHYLACTIC USES, NOT PUT UP IN MEASURED DOSES OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Other" +30040000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale +30041000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> containing penicillins or derivatives thereof with a penicillanic acid structure or streptomycins or their derivatives +30041010,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Penicillins" +30041020,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Ampicillin" +30041030,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Amoxycillin" +30041040,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Becampicillin" +30041050,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Cloxacillin" +30041060,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Ampicillin and cloxacillin combinations" +30041070,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Streptomycin" +30041090,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing penicillins or derivatives thereof, with a penicillanic acid structure, or streptomycins or their derivatives :Other" +30042000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> other containing antibiotics cephalosporins and their derivatives +30042011,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Cefazolin" +30042012,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Cephalexin" +30042013,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Ciprofloxacin" +30042014,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Cefoxitin" +30042019,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Other" +30042020,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Sulfonamides and cotrimoxazole " +30042031,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Norfloxacin" +30042032,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Nalidixic acid" +30042033,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Ciprofloxacin" +30042034,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Ofloxacin" +30042039,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Other " +30042041,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Chlortetracycline" +30042042,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Oxytetracycline" +30042049,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Other" +30042050,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Chloramphenicol" +30042061,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Erythromycin" +30042062,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Roxithromycin" +30042063,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Clarithromycin" +30042064,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Azithromycin" +30042069,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Other" +30042070,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Cefadroxil " +30042091,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Isoniazid" +30042092,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Rifampicin" +30042093,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Pyrazinamide" +30042094,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Ethambutol" +30042095,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Clindamycin" +30042096,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Vancomycin" +30042097,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing antibiotics:Polymyxin B and colistin" +30042099,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing other antibiotics:Other" +30043100,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> containing insulin +30043110,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing insulin :Insulin injection" +30043190,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing insulin :Other" +30043200,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing corticosteroid hormones, their derivatives or structural analogues" +30043900,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> other pituitary hormones prednisolone dexamethasone danazol other progestogen and oestogen group hormones +30043911,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Pituitary hormones" +30043912,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Prednisolone" +30043913,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Dexamethasone" +30043914,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Danazol" +30043919,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Other progestogen and oestogen group hormones" +30043921,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Gonadotrophins" +30043922,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Luteinising hormone" +30043990,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Other" +30044010,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Atropin and salts thereof(OLD tariff)" +30044020,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Caffein and salts thereof(OLD tariff)" +30044030,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Codeine and its derivatives, with or without ephedrine hydrochloride(OLD tariff)" +30044040,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Ergot preperations, ergotamine and salts thereof(OLD tariff)" +30044050,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Papaverine hydrochloride(OLD tariff)" +30044060,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Bromohexin and solbutamol(OLD tariff)" +30044070,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Theophylline and ephedrine(OLD tariff)" +30044090,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Containing alkaloids or derivatives thereof but not containing hormones, other products of heading 2937 or antibiotics :Other(OLD tariff)" +30044100,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing ephedrine or its salts" +30044200,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing pseudoephedrine (INN) or its salts" +30044300,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE::Containing norephedrine or its salts" +30044900,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> other +30044910,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Atropin and salts thereof" +30044920,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Caffein and salts thereof" +30044930,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Codeine and derivatives, with or without ephidrine hydrochloride" +30044940,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Ergot preparations, ergotamine and salts thereof" +30044950,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Papavarine hydrochloride" +30044960,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Bromohexin and solbutamol" +30044970,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Theophylline and salts thereof" +30044990,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other" +30045000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> other containing vitamins or other products of heading 2936 +30045010,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Heamatinics and erythropoietin preparations" +30045020,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Preparations of minerals and their supplements " +30045031,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin A" +30045032,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin B1 and B2 and salts thereof" +30045033,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin B9" +30045034,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin B12" +30045035,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin C" +30045036,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin D" +30045037,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Of vitamin E" +30045039,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Other" +30045090,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other, containing vitamins or other products of heading 2936:Other" +30046000,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE::Other, containing antimalarial active principles described in Sub-heading Note 2 to this Chapter" +30049000,medicaments excluding goods of heading 3002 3005 or 3006 consisting of mixed or unmixed products for therapeutic or prophylactic uses put up in measured doses including those in the form of transdermal administration or in systems forms or packings for retail sale >> other ayurvedic unani homoeopathic siddha or systems medicaments put up for retail sale +30049011,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Of Ayurvedic system" +30049012,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Of Unani system" +30049013,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Of Siddha system" +30049014,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Of Homoeopathic system" +30049015,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Of Bio-chemic system " +30049021,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Anthelmintics and preparations thereof" +30049022,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Metronidazole" +30049023,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Tinidazole" +30049024,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Secnidazole" +30049025,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Diluxamide furoate" +30049026,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Sodium stibogluconate" +30049027,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Pentamidine" +30049029,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other " +30049031,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Promethazine, chlorpheniramine, astemizole and ceteirizine" +30049032,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Sodium bicarbonate, magnesium hydroxide (milk of magnesia), magnesium carbonate, magnesium trisilicate, aluminium hydroxide gel, magaldarate and combinations thereof" +30049033,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Cimetidine, rantidine, nizatidine and roxatidine" +30049034,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Omeprazole and lansoprazole" +30049035,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Dicyclomine, metoclopramide and dexame thasone and ondansetron" +30049036,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Chenodiol and ursodiol" +30049039,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other " +30049041,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Cyclophosphamide" +30049042,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Methotrexate, 5-fluorouracil(5-FU) and ftorafur" +30049043,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Bincristine and vinblastine" +30049044,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Paclitaxel and docetaxel" +30049045,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Etoposide" +30049046,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Actinomycin D Dactinomycin and doxorubicin" +30049047,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:L-Asparaginase, cisplatin and carboplatin" +30049048,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Tamoxifen" +30049049,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other " +30049051,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Isoniazid" +30049052,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Rifampicin" +30049053,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Pyrazinamide and ethambutol" +30049054,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Streptomycin" +30049055,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Dapsone (DDS), acedapsone (DADDS), solopsone and clofazimine" +30049056,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Chloroquine, amodiaquine, mefloquine, quinine, chloroguamide, pyrimethamine" +30049057,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other antitubercular drugs" +30049058,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other antileprotic drugs" +30049059,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other antimalarial drugs " +30049061,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Analgin with or without other compounds such as paracetamol" +30049062,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Acetyl salicylic acid (aspirin) and formulations thereof" +30049063,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Ibuprofen with or without paracetamol or other compounds" +30049064,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Oxyphen butazone, phenyl butazone and formulations thereof" +30049065,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Indomethacin" +30049066,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Mephenamic acid, dactofenac sodium, piroxicam, tenoxicam and meloxicam" +30049067,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Ketorolac, nimesulide, nabumetone and nefopam" +30049069,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other " +30049071,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Captopril, enalapril, lisinopril, perindopril and ramipril" +30049072,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Verapamil, nifedipine, amlodipine and lacidipine" +30049073,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Losartan" +30049074,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Propranolol, metoprolol, atenolol and labetalol" +30049075,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Prazosin, terazosin, phentolamine and phenoxybenzamine" +30049076,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Clonidine, methyldopa" +30049077,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Hydralazine, minoxidil and diazoxide" +30049079,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other " +30049081,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Phenobarbitone, mephobarbitone, primidone, phenytoin, carbamazepine, ethosuccimide, valporic acid ( sodium valporate), diazepam, lamotrigine, gabapentin, bigabatrin, phenacemide, trimethadione and acetazolamide" +30049082,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other antiepileptic drugs" +30049083,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Sulpha drugs not elsewhere specified or included" +30049084,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Preparations of enzymes" +30049085,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Veterinary medicinal preparations, not for human use, not elsewhere specified or included" +30049086,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Oral rehydration salts" +30049087,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Antibacterial formulations, not elsewhere specified or included" +30049088,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Sedatives" +30049089,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Tranquilizers " +30049091,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Salbutamol, terbutaline, ephedrine, salmeterol and methyl xanthimes" +30049092,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Plasma expanders" +30049093,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Chloropheniramine maleate, with or without other compounds (excluding steriods and alkaloids)" +30049094,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Theophylline, aminophylline and other broncho dilators" +30049095,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Carcino-chemotherapeutic drugs not elsewhere specified or included" +30049096,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Ketamine" +30049099,"MEDICAMENTS (EXCLUDING GOODS OF HEADING 3002, 3005 OR 3006) CONSISTING OF MIXED OR UNMIXED PRODUCTS FOR THERAPEUTIC OR PROPHYLACTIC USES, PUT UP IN MEASURED DOSES (INCLUDING THOSE IN THE FORM OF TRANSDERMAL ADMINISTRATION SYSTEMS) OR IN FORMS OR PACKINGS FOR RETAIL SALE:Other:Other" +30050000,wadding gauze bandages and similar articles for example dressings adhesive plasters impregnated or poultices coated with pharmaceutical substances or put up in forms or packings for retail sale for medical surgical dental or veterinary purposes +30051000,wadding gauze bandages and similar articles for example dressings adhesive plasters impregnated or poultices coated with pharmaceutical substances or put up in forms or packings for retail sale for medical surgical dental or veterinary purposes >> adhesive dressings and other articles having an adhesive layer +30051010,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Adhesive dressings and other articles having an adhesive layer :Adhesive guaze" +30051020,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Adhesive dressings and other articles having an adhesive layer :Adhesive tape" +30051090,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Adhesive dressings and other articles having an adhesive layer :Other" +30059000,wadding gauze bandages and similar articles for example dressings adhesive plasters impregnated or poultices coated with pharmaceutical substances or put up in forms or packings for retail sale for medical surgical dental or veterinary purposes >> other +30059010,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Cotton wool, medicated" +30059020,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Poultice of kaolin" +30059030,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Lint, medicated" +30059040,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Bandages" +30059050,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Burn therapy dressing soaked in protective gel" +30059060,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Micro pores surgical tapes" +30059070,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Corn removers and callous removers" +30059090,"WADDING, GAUZE, BANDAGES AND SIMILAR ARTICLES (FOR EXAMPLE, DRESSINGS, ADHESIVE PLASTERS, POULTICES), IMPREGNATED OR COATED WITH PHARMACEUTICAL SUBSTANCES OR PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY PURPOSES:Other :Other" +30060000,pharmaceutical goods specified in note 4 to this chapter +30061000,pharmaceutical goods specified in note 4 to this chapter >> sterile surgical catgut similar sterile suture materials including sterile absorbable +30061010,"PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER:Sterile surgical catgut, similar sterile suture materials (including sterile absorbable surgical or dental yarns) and sterile tissue adhesives for surgical wound closure; sterile laminaria and sterile laminaria tents; sterile absorbable surgical or dental haemostatics; sterile surgical or dental adhesion barriers, whether or not absorbable :Sterile, surgical catgut and similar sterile suture materials (including sterile absorbable surgical or dental yarns) and sterile tissue adhesives for wound closure(OLD tariff)" +30061020,"PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER:Sterile surgical catgut, similar sterile suture materials (including sterile absorbable surgical or dental yarns) and sterile tissue adhesives for surgical wound closure; sterile laminaria and sterile laminaria tents; sterile absorbable surgical or dental haemostatics; sterile surgical or dental adhesion barriers, whether or not absorbable :Sterile laminaria and sterile laminaria tents, sterile absorbable surgical or dental haemostatics, sterile surgical or dental adhesion barriers, whether or not absorbable(OLD tariff)" +30062000,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::Blood grouping reagents(OLD tariff) +30063000,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::Opacifying preprations for X-ray examinations; diagnostic reagents designed to be administered to the patient(OLD tariff) +30064000,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::Dental cements and other dental fillings; bone reconstruction cements(OLD tariff) +30065000,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::First-aid boxes and kits(OLD tariff) +30066000,pharmaceutical goods specified in note 4 to this chapter >> chemical contraceptive preparations based on hormones on other products of heading 2937 or on spermicides +30066010,"PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER:Chemical contraceptive preparations based on hormones, on other products of heading 2937 or on spermicides:Based on hormones" +30066020,"PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER:Chemical contraceptive preparations based on hormones, on other products of heading 2937 or on spermicides:Based on other products of heading 2937" +30066030,"PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER:Chemical contraceptive preparations based on hormones, on other products of heading 2937 or on spermicides:Based on spermicides" +30067000,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::Gel preparations designed to be used in human or veterinary medicine as a lubricant for parts of the body for surgical operations or physical examinations or as a coupling agent between the body and medical instruments +30069100,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::Appliances identifiable for ostomy use +30069200,PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER::Waste pharmaceuticals +30069300,"PHARMACEUTICAL GOODS SPECIFIED IN NOTE 4 TO THIS CHAPTER:placebos:Placebos and blinded (or doubleblinded) clinical trial kits for a recognised clinical trial, put up in measured doses?" +31010000,animal or vegetable fertilisers whether or not mixed together or chemically treated fertilisers produced by the mixing or chemical treatment of animal or vegetable products animal or vegetable fertilisers whether or not mixed together or chemically treated fertilisers produced by the mixing or chemical treatment of animal or vegetable products +31010010,"ANIMAL OR VEGETABLE FERTILISERS, WHETHER OR NOT MIXED TOGETHER OR CHEMICALLY TREATED; FERTILISERS PRODUCED BY THE MIXING OR CHEMICAL TREATMENT OF ANIMAL OR VEGETABLE PRODUCTS:Animal or vegetable fertilisers, whether or not mixed together or chemically treated; fertilisers produced by the mixing or chemical treatment of animal or vegetable products :Guano " +31010091,"ANIMAL OR VEGETABLE FERTILISERS, WHETHER OR NOT MIXED TOGETHER OR CHEMICALLY TREATED; FERTILISERS PRODUCED BY THE MIXING OR CHEMICAL TREATMENT OF ANIMAL OR VEGETABLE PRODUCTS:Animal or vegetable fertilisers, whether or not mixed together or chemically treated; fertilisers produced by the mixing or chemical treatment of animal or vegetable products :Animal dung" +31010092,"ANIMAL OR VEGETABLE FERTILISERS, WHETHER OR NOT MIXED TOGETHER OR CHEMICALLY TREATED; FERTILISERS PRODUCED BY THE MIXING OR CHEMICAL TREATMENT OF ANIMAL OR VEGETABLE PRODUCTS:Animal or vegetable fertilisers, whether or not mixed together or chemically treated; fertilisers produced by the mixing or chemical treatment of animal or vegetable products :Animal excreta" +31010099,"ANIMAL OR VEGETABLE FERTILISERS, WHETHER OR NOT MIXED TOGETHER OR CHEMICALLY TREATED; FERTILISERS PRODUCED BY THE MIXING OR CHEMICAL TREATMENT OF ANIMAL OR VEGETABLE PRODUCTS:Animal or vegetable fertilisers, whether or not mixed together or chemically treated; fertilisers produced by the mixing or chemical treatment of animal or vegetable products :Other" +31020000,mineral or chemical fertilisers nitrogenous +31021000,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Urea, whether or not in aqueous solution" +31021010,mineral or chemical fertilisers nitrogenous >> urea whether or not in aqueous solution >> fertilizer grade conforming to standard is 5406 +31021090,mineral or chemical fertilisers nitrogenous >> urea whether or not in aqueous solution >> other ammonium sulphate double salts and mixtures of ammonium sulphate and ammonium nitrate +31022100,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Ammonium sulphate" +31022900,mineral or chemical fertilisers nitrogenous >> other +31022910,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS:Other :Ammonium sulphonitrate" +31022990,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS:Other :Other" +31023000,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Ammonium nitrate, whether or not in aqueous solution" +31024000,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Mixtures of ammonium nitrate with calcium carbonate or other inorganic non-fertilising substances" +31025000,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Sodium nitrate" +31026000,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Double salts and mixtures of calcium nitrate and ammonium nitrate" +31028000,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS::Mixtures of urea and ammonium nitrate in aqueous or ammoniacal solution" +31029000,mineral or chemical fertilisers nitrogenous >> other including mixtures not specified in the foregoing +31029010,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS:Other, including mixtures not specified in the foregoing sub-headings :Double salts or mixtures of calcium nitrate and magnesium nitrate" +31029090,"MINERAL OR CHEMICAL FERTILISERS, NITROGENOUS:Other, including mixtures not specified in the foregoing sub-headings :Other" +31030000,mineral or chemical fertilisers phosphatic superphosphates +31031000,"MINERAL OR CHEMICAL FERTILISERS, PHOSPHATIC::Superphosphates(OLD tariff)" +31031100,"MINERAL OR CHEMICAL FERTILISERS, PHOSPHATIC::Containing by weight 35 % or more of diphosphorus pentaoxide (P2O5)" +31031900,"MINERAL OR CHEMICAL FERTILISERS, PHOSPHATIC::Other" +31039000,"MINERAL OR CHEMICAL FERTILISERS, PHOSPHATIC::Other" +31040000,mineral or chemical fertilisers potassic +31042000,"MINERAL OR CHEMICAL FERTILISERS, POTASSIC::Potassium chloride" +31043000,"MINERAL OR CHEMICAL FERTILISERS, POTASSIC::Potassium sulphate" +31049000,"MINERAL OR CHEMICAL FERTILISERS, POTASSIC::Other" +31050000,mineral or chemical fertilisers containing two or three of the fertilising elements nitrogen phosphorus and potassium other fertilisers goods of this chapter in tablets or similar forms or in packages of a gross weight not exceeding 10 kg +31051000,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Goods of this Chapter in tablets or similar forms or in packages of a gross weight not exceeding 10 kg." +31052000,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Mineral or chemical fertilisers containing the three fertilising elements nitrogen, phosphorus and potassium" +31053000,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Diammonium hydrogen ortho phosphate (diammonium phosphate)" +31054000,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Ammonium dihydrogen ortho phosphate (monoammonium phosphate) and mixtures thereof with diammonium hydrogen orthophosphate (diammonium phosphate)" +31055100,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Containing nitrates and phosphates" +31055900,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Other" +31056000,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG::Mineral or chemical fertilisers containing the two fertilising elements phosphorus and potassium" +31059000,mineral or chemical fertilisers containing two or three of the fertilising elements nitrogen phosphorus and potassium other fertilisers goods of this chapter in tablets or similar forms or in packages of a gross weight not exceeding 10 kg >> other +31059010,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG:Other :Mineral or chemical fertilisers containing two fertilising elements namely nitrogen and potassium" +31059090,"MINERAL OR CHEMICAL FERTILISERS CONTAINING TWO OR THREE OF THE FERTILISING ELEMENTS NITROGEN, PHOSPHORUS AND POTASSIUM; OTHER FERTILISERS; GOODS OF THIS CHAPTER IN TABLETS OR SIMILAR FORMS OR IN PACKAGES OF A GROSS WEIGHT NOT EXCEEDING 10 KG:Other :Other" +32011000,"TANNING EXTRACTS OF VEGETABLE ORIGIN; TANNINS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Quebracho extract(OLD tariff)" +32012000,"TANNING EXTRACTS OF VEGETABLE ORIGIN; TANNINS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES::Wattle extract(OLD tariff)" +32019010,"TANNING EXTRACTS OF VEGETABLE ORIGIN; TANNINS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Gambier extracts(OLD tariff)" +32019020,"TANNING EXTRACTS OF VEGETABLE ORIGIN; TANNINS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Myrobalan fruit extract(OLD tariff)" +32019030,"TANNING EXTRACTS OF VEGETABLE ORIGIN; TANNINS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Gallotannic acid (tannin, digallic acid)(OLD tariff)" +32019090,"TANNING EXTRACTS OF VEGETABLE ORIGIN; TANNINS AND THEIR SALTS, ETHERS, ESTERS AND OTHER DERIVATIVES:Other :Other(OLD tariff)" +32020000,inorganic tanning substances tanning preparations whether or not containing natural tanning substances enzymatic preparations for +32021000,"SYNTHETIC ORGANIC TANNING SUBSTANCES; INORGANIC TANNING SUBSTANCES; TANNING PREPARATIONS, WHETHER OR NOT CONTAINING NATURAL TANNING SUBSTANCES; ENZYMATIC PREPARATIONS FOR PRE-TANNING::Synthetic organic tanning substances" +32029000,inorganic tanning substances tanning preparations whether or not containing natural tanning substances enzymatic preparations for >> other +32029010,"SYNTHETIC ORGANIC TANNING SUBSTANCES; INORGANIC TANNING SUBSTANCES; TANNING PREPARATIONS, WHETHER OR NOT CONTAINING NATURAL TANNING SUBSTANCES; ENZYMATIC PREPARATIONS FOR PRE-TANNING:Other :Inorganic tanning substances" +32029020,"SYNTHETIC ORGANIC TANNING SUBSTANCES; INORGANIC TANNING SUBSTANCES; TANNING PREPARATIONS, WHETHER OR NOT CONTAINING NATURAL TANNING SUBSTANCES; ENZYMATIC PREPARATIONS FOR PRE-TANNING:Other :Tanning preparations, whether or not containing natural tanning substances" +32029030,"SYNTHETIC ORGANIC TANNING SUBSTANCES; INORGANIC TANNING SUBSTANCES; TANNING PREPARATIONS, WHETHER OR NOT CONTAINING NATURAL TANNING SUBSTANCES; ENZYMATIC PREPARATIONS FOR PRE-TANNING:Other :Enzymatic preparations for pre-tanning" +32029090,"SYNTHETIC ORGANIC TANNING SUBSTANCES; INORGANIC TANNING SUBSTANCES; TANNING PREPARATIONS, WHETHER OR NOT CONTAINING NATURAL TANNING SUBSTANCES; ENZYMATIC PREPARATIONS FOR PRE-TANNING:Other :Other" +32030000,colouring matter of vegetable or animal origin including dyeing extracts but excluding animal black whether or not chemically defined preparations as specified in note 3 to this chapter based on colouring matter of vegetable or animal origin colouring matter of vegetable or animal origin including dyeing extracts but excluding animal black whether or not chemically defined preparations as specified in note 3 to this chapter based on colouring matter of vegetable or animal origin +32030010,"COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN (INCLUDING DYEING EXTRACTS BUT EXCLUDING ANIMAL BLACK), WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN:Colouring matter of vegetable or animal origin (including dyeing extracts but excluding animal black), whether or not chemically defined; preparations as specified in Note 3 to this Chapter based on colouring matter of vegetable or animal origin :Cutch (Catechu) extracts" +32030020,"COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN (INCLUDING DYEING EXTRACTS BUT EXCLUDING ANIMAL BLACK), WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN:Colouring matter of vegetable or animal origin (including dyeing extracts but excluding animal black), whether or not chemically defined; preparations as specified in Note 3 to this Chapter based on colouring matter of vegetable or animal origin :Food colours other than synthetic" +32030030,"COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN (INCLUDING DYEING EXTRACTS BUT EXCLUDING ANIMAL BLACK), WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN:Colouring matter of vegetable or animal origin (including dyeing extracts but excluding animal black), whether or not chemically defined; preparations as specified in Note 3 to this Chapter based on colouring matter of vegetable or animal origin :Lac-dye" +32030040,"COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN (INCLUDING DYEING EXTRACTS BUT EXCLUDING ANIMAL BLACK), WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN:Colouring matter of vegetable or animal origin (including dyeing extracts but excluding animal black), whether or not chemically defined; preparations as specified in Note 3 to this Chapter based on colouring matter of vegetable or animal origin :Natural indigo" +32030090,"COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN (INCLUDING DYEING EXTRACTS BUT EXCLUDING ANIMAL BLACK), WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON COLOURING MATTER OF VEGETABLE OR ANIMAL ORIGIN:Colouring matter of vegetable or animal origin (including dyeing extracts but excluding animal black), whether or not chemically defined; preparations as specified in Note 3 to this Chapter based on colouring matter of vegetable or animal origin :Other" +32040000,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter +32041100,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> disperse dyes and preparations based thereon disperse yellow +32041111,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse yellow 13 (duranol brill yellow 6 G)" +32041119,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other" +32041121,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse orange 11 (duranol orange G)" +32041129,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other" +32041131,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse red 3 (serisol fast pink B)" +32041132,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse red 4 (celliton fast pink RF)" +32041133,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse red 9 (duranol red GN)" +32041139,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other" +32041141,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse violet 1 (duranol violet 2R)" +32041142,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse violet 4 (duranol brill violet B)" +32041143,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse violet 8 (duranol brill violet BR)" +32041149,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other " +32041151,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue 1 (duranol brill blue CB)" +32041152,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue 3 (duranol brill blue BBN)" +32041153,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue 5 (celliton fast blue FFB)" +32041154,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue 6 (celliton fast blue FFG)" +32041155,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue 14 (duranol brill blue G)" +32041156,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blue 24 (duranol blue 2G)" +32041159,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other " +32041191,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse greens" +32041192,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse browns" +32041193,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse blacks" +32041194,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse brown mixtures" +32041195,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse grey mixtures" +32041196,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Disperse black mixtures" +32041199,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Disperse dyes and preparations based thereon :Other" +32041200,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> acid dyes whether or not premetallised and preparations based thereon mordant dyes and preparations based thereon azo dyes +32041211,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid yellows" +32041212,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid oranges" +32041213,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid red" +32041214,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid violets" +32041215,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blues" +32041216,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid greens" +32041217,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid brown" +32041218,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blacks" +32041219,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other " +32041221,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid green 17 (solacet fast green 2G)" +32041222,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid green 27 (carbolan green G)" +32041223,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid green 28 (carbolan brill green 5G)" +32041224,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid green 38 (alizarine cyanine green 3G)" +32041225,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid green 44 (alizarine cyanine green GWA)" +32041229,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other " +32041231,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid black 2 (nigrosine)" +32041232,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid black 48 (coomasie fast grey 3G)" +32041239,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other" +32041241,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 2 (alizarine brill blue PFN)" +32041242,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 14 (solacet fast blue 4 G1)" +32041243,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 23 (alizarine light blue 4 G1)" +32041244,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 25 (solway ultra blue B)" +32041245,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 45 (solway blue RN)" +32041246,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 51 (alizarine sky blue FFB)" +32041247,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 52 (alizarine light - 5GL)" +32041248,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 78 (solway sky blue B) " +32041251,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 93 (ink blue)" +32041252,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 112 (coomasie ultra sky SE)" +32041253,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 127 (brill alizarine milling blue G)" +32041254,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 138 (carbolan blue B)" +32041255,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid blue 140 (carbolan brill blue 2R)" +32041259,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other " +32041261,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Yellows" +32041262,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Oranges" +32041263,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Violets" +32041264,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Blues" +32041265,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Greens" +32041266,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Browns" +32041267,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Blacks" +32041268,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Red II (alizarine red)" +32041269,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other " +32041291,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid yellows" +32041292,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid oranges" +32041293,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid red" +32041294,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid violets" +32041295,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Acid browns" +32041299,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Acid dyes, whether or not premetallised, and preparations based thereon;mordant dyes and preparations based thereon:Other" +32041300,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> basic dyes and preparations based thereon +32041310,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic azo dyes " +32041321,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Yellow 2 (auramine O)" +32041329,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other " +32041331,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Red 1 (rhodamine 6 G)" +32041339,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other" +32041341,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Violet 1 (methyl Violet)" +32041342,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Violet 10 (rhodamine B)" +32041343,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Violet 14 (magenta)" +32041349,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other " +32041351,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Blue 9 (methylene Blue)" +32041352,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Blue 16 (victoria Blue B)" +32041359,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other " +32041361,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Green 4 (malachite green)" +32041369,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other " +32041391,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic oranges" +32041392,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic browns" +32041393,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Basic black" +32041399,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Basic dyes and preparations based thereon :Other" +32041400,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> direct dyes and preparations based thereon direct yellow azo +32041411,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Yellow 12 (chrysophenine G)" +32041419,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Other" +32041421,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Congo red" +32041429,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Other " +32041431,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Blue 1 (sky blue FF)" +32041439,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Other" +32041440,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct oranges (azo)" +32041450,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct greens (azo)" +32041460,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct browns (azo)" +32041470,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Direct blacks (azo) " +32041481,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Yellows" +32041482,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Oranges" +32041483,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Reds" +32041484,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Violets" +32041485,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Blues" +32041486,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Greens" +32041487,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Browns" +32041488,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Blacks" +32041489,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Other" +32041490,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Direct dyes and preparations based thereon :Other" +32041500,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> vat dyes including those usable in that state as pigments and preparations based thereon vat yellow +32041511,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat yellow 2 (GC)" +32041512,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat yellow 4 (indathrene golden yellow GK)" +32041519,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041521,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat oranges 3 (brill orange RK)" +32041522,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat oranges 15 (golden orange 3G)" +32041529,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other" +32041531,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat red (brill pink)" +32041539,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041541,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat violet 1 (brill violet 2R)" +32041542,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat violet 3 (magenta B)" +32041549,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041551,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 1 (synthetic indigo)" +32041552,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 4" +32041553,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 5 (blue 2B)" +32041554,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 6 (blue BC)" +32041555,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 20 (dark blue 30)" +32041556,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 29 (indanthrene brill blue 4G)" +32041557,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat blue 43 (carbazole blue)" +32041558,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Reduced vat blues" +32041559,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041561,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat green 1 (indanthrene brill green BFFB)" +32041562,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat green 2 (indanthrene brill green GG)" +32041563,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat green 4 (indanthrene brill green 3B)" +32041564,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat green 9 (black BB)" +32041569,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041571,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat brown 1 (brown BR)" +32041572,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat brown 3 (brown RGR)" +32041573,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat brown 5 (brown RRD,G)" +32041579,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041581,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat black 9 (black RB)" +32041582,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat black 25 (olive T)" +32041583,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat back 27 (olive R)" +32041584,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Vat black 29 (grey BG)" +32041589,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other " +32041591,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat yellows" +32041592,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat oranges" +32041593,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat reds" +32041594,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat violets" +32041595,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat blues" +32041596,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat greens" +32041597,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Solubilised vat blacks" +32041599,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Vat dyes (including those usable in that state as pigments) and preparations based thereon:Other" +32041600,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> reactive dyes and preparations based thereon +32041610,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Yellows" +32041620,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Oranges" +32041630,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Reds" +32041640,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Violets" +32041650,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Blues" +32041660,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Greens" +32041670,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Browns" +32041680,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Blacks" +32041690,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Reactive dyes and preparations based thereon :Other" +32041700,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> pigments and preparations based thereon pigment yellow +32041711,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Yellow 1 (hansa yellow)" +32041719,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Other" +32041720,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment oranges " +32041731,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Toluidine red" +32041739,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Other" +32041740,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment violets" +32041751,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Blue 15 (pathalocyanine blue)" +32041759,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Other " +32041761,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Green 7 (pathalovyanine green)" +32041769,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Other" +32041770,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment browns" +32041780,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Pigment blacks" +32041790,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Pigments and preparations based thereon :Other" +32041800,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:carotenoid:Carotenoid colouring matters and preparations based thereon" +32041900,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> other including mixtures of colouring matter of two or more of the 3204 11 to 3204 19 azoic coupling components +32041911,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 2 (naphthol AS)" +32041912,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 4 (naphthol As-BO)" +32041913,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 5 (naphthol ASG)" +32041914,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 7 (naphthol ASSW)" +32041915,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 8 (naphthol ASTR)" +32041916,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 13 (naphthol ASSG) " +32041921,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 14 (naphthol ASPH)" +32041922,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 15 (naphthol ASLB)" +32041923,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 17 (naphthol ASBS)" +32041924,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 18 (naphthol ASD)" +32041925,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic coupling component 20 (naphthol ASOL)" +32041929,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other " +32041931,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 1 (fast bordeaux GP base)" +32041932,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 2 (fast orange G/GC base)" +32041933,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 3 (fast scarlet GGIGGS base)" +32041934,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 4 (fast garment GBC base)" +32041935,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 5 (fast red B base)" +32041936,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 6 (fast orange GR base)" +32041937,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 10 (fast red R base)" +32041938,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 11 (fast red TR base)" +32041941,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 12 (fast scarlet G base)" +32041942,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 13 (fast scarlet R base)" +32041943,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 20 (fast blue BB base)" +32041944,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 24 (fast blue RR base)" +32041945,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 32 (fast red KB base)" +32041946,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 41 (fast violet B base)" +32041947,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Azoic diazo component 48 (fast blue B base)" +32041949,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other" +32041951,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Yellows" +32041952,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Oranges" +32041953,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Reds" +32041954,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Violets" +32041955,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Blues" +32041956,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Greens" +32041957,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Browns" +32041958,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Blacks" +32041959,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other" +32041961,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Yellows" +32041962,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Oranges" +32041963,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Reds" +32041964,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Blues" +32041965,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Greens" +32041966,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Browns" +32041967,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Blacks" +32041969,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other " +32041971,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Yellows" +32041972,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Oranges" +32041973,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Reds" +32041974,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Violets" +32041975,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Blues" +32041976,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Greens" +32041977,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Browns" +32041978,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Blacks" +32041979,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other" +32041981,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Yellow 3 (sunset yellow)" +32041982,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Yellow 4 (tartrazine)" +32041983,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Reds 5 to 8 (poncean)" +32041984,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Red 9 (amaranth)" +32041985,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Oranges" +32041986,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Violets" +32041987,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Greens" +32041988,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Browns" +32041989,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other" +32041990,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other, including mixtures of colouring matter of two or more of the sub-headings 3204 11 to 3204 19 :Other" +32042000,synthetic organic colouring matter whether or not chemically defined preparations as specified in note 3 to this chapter based on synthetic organic colouring matter synthetic organic products of a kind used as fluorescent brightening agents or as luminophores whether or not chemically defined synthetic organic colouring matter and preparations based thereon as specified in note 3 to this chapter >> synthetic organic products of a kind used as fluorescent brightening agents +32042010,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Synthetic organic products of a kind used as fluorescent brightening agents :Optical whitening agents" +32042090,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Synthetic organic products of a kind used as fluorescent brightening agents :Other" +32049000,"SYNTHETIC ORGANIC COLOURING MATTER, WHETHER OR NOT CHEMICALLY DEFINED; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON SYNTHETIC ORGANIC COLOURING MATTER; SYNTHETIC ORGANIC PRODUCTS OF A KIND USED AS FLUORESCENT BRIGHTENING AGENTS OR AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED::Other" +32050000,::COLOUR LAKES; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER BASED ON COLOUR LAKES +32060000,other colouring matter preparations as specified in note 3 to this chapter other than those of headings 3203 3204 or 3205 inorganic products of a kind used as luminophores whether or not chemically defined pigments and preparations based on titanium dioxide +32061100,other colouring matter preparations as specified in note 3 to this chapter other than those of headings 3203 3204 or 3205 inorganic products of a kind used as luminophores whether or not chemically defined pigments and preparations based on titanium dioxide >> containing 80 or more by weight of titanium dioxide calculated on the dry matter +32061110,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Containing 80% or more by weight of titanium dioxide calculated on the dry matter :Pearlsent pigment (titanium dioxide, coated micananeous and lustres pearl pigment)" +32061190,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Containing 80% or more by weight of titanium dioxide calculated on the dry matter :Other" +32061900,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED::Other" +32062000,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED::Pigments and preparations based on chromium compounds" +32064100,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED::Utramarine and preparations based thereon" +32064200,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED::Lithopone and other pigments and preparations based on zinc sulphide" +32064900,other colouring matter preparations as specified in note 3 to this chapter other than those of headings 3203 3204 or 3205 inorganic products of a kind used as luminophores whether or not chemically defined pigments and preparations based on titanium dioxide >> other +32064910,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Red oxide" +32064920,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Persian red" +32064930,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Yellow ochre" +32064940,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Bronze powder" +32064990,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED:Other :Other" +32065000,"OTHER COLOURING MATTER; PREPARATIONS AS SPECIFIED IN NOTE 3 TO THIS CHAPTER, OTHER THAN THOSE OF HEADINGS 3203, 3204 OR 3205; INORGANIC PRODUCTS OF A KIND USED AS LUMINOPHORES, WHETHER OR NOT CHEMICALLY DEFINED::Inorganic products of a kind used as luminophores" +32070000,prepared pigments prepared opacifiers and prepared colours vitrifiable enamels and glazes engobes slips liquid lustres and similar preparations of a kind used in the ceramic enamelling or glass industry glass frit and other glass in the form of powder granules or flakes +32071000,prepared pigments prepared opacifiers and prepared colours vitrifiable enamels and glazes engobes slips liquid lustres and similar preparations of a kind used in the ceramic enamelling or glass industry glass frit and other glass in the form of powder granules or flakes >> prepared pigments prepared opacifiers prepared colours and similar preparations +32071010,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Prepared pigments, prepared opacifiers, prepared colours and similar preparations :Prepared organic dye-stuff pigments, dry" +32071020,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Prepared pigments, prepared opacifiers, prepared colours and similar preparations :Prepared organic dye-stuff pigments, paste" +32071030,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Prepared pigments, prepared opacifiers, prepared colours and similar preparations :Prepared inorganic pigments" +32071040,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Prepared pigments, prepared opacifiers, prepared colours and similar preparations :Prepared opacifiers prepared colours and similar preparations" +32071090,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Prepared pigments, prepared opacifiers, prepared colours and similar preparations :Other" +32072000,prepared pigments prepared opacifiers and prepared colours vitrifiable enamels and glazes engobes slips liquid lustres and similar preparations of a kind used in the ceramic enamelling or glass industry glass frit and other glass in the form of powder granules or flakes >> vitrifiable enamels and glazes engobes slips and similar preparations +32072010,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Vitrifiable enamels and glazes, engobes (slips) and similar preparations :Vitrifiable enamels and glazes" +32072020,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES:Vitrifiable enamels and glazes, engobes (slips) and similar preparations :Engobes (slips) and similar preparations" +32073000,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES::Liquid lustres and similar preparations" +32074000,"PREPARED PIGMENTS, PREPARED OPACIFIERS AND PREPARED COLOURS, VITRIFIABLE ENAMELS AND GLAZES, ENGOBES (SLIPS), LIQUID LUSTRES AND SIMILAR PREPARATIONS, OF A KIND USED IN THE CERAMIC, ENAMELLING OR GLASS INDUSTRY; GLASS FRIT AND OTHER GLASS, IN THE FORM OF POWDER, GRANULES OR FLAKES::Glass frit and other glass, in the form of powder, granules or flakes" +32080000,paints and varnishes including enamels and lacquers based on synthetic polymers or chemically modified natural dispersed or dissolved in a medium solutions as defined in note 4 to this chapter +32081000,paints and varnishes including enamels and lacquers based on synthetic polymers or chemically modified natural dispersed or dissolved in a medium solutions as defined in note 4 to this chapter >> based on polyesters +32081010,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on polyesters :Enamels" +32081020,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on polyesters :Lacquers" +32081030,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on polyesters :Varnishes" +32081090,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on polyesters :Other" +32082000,paints and varnishes including enamels and lacquers based on synthetic polymers or chemically modified natural dispersed or dissolved in a medium solutions as defined in note 4 to this chapter >> based on acrylic or vinyl polymers +32082010,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on acrylic or vinyl polymers :Enamels" +32082020,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on acrylic or vinyl polymers :Lacquers" +32082030,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on acrylic or vinyl polymers :Varnishes" +32082090,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Based on acrylic or vinyl polymers :Other" +32089000,paints and varnishes including enamels and lacquers based on synthetic polymers or chemically modified natural dispersed or dissolved in a medium solutions as defined in note 4 to this chapter >> other based on cellulose nitrate or other cellulose derivatives other based on cellulose nitrate or other cellulose derivatives +32089011,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Nitrocellulose lacquers" +32089019,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Other" +32089021,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Synthetic enamel, ultra white paints" +32089022,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Synthetic enamel, other colours" +32089029,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Other" +32089030,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Lacquers " +32089041,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Insulating varnish" +32089049,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Other" +32089050,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Slip agents" +32089090,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN A NON-AQUEOUS MEDIUM; SOLUTIONS AS DEFINED IN NOTE 4 TO THIS CHAPTER:Other :Other" +32090000,paints and varnishes including enamels and lacquers based on synthetic polymers or +32091000,paints and varnishes including enamels and lacquers based on synthetic polymers or >> based on acrylic or vinyl polymers +32091010,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN AN AQUEOUS MEDIUM:Based on acrylic or vinyl polymers :Acrylic emulsion" +32091090,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN AN AQUEOUS MEDIUM:Based on acrylic or vinyl polymers :Other" +32099000,paints and varnishes including enamels and lacquers based on synthetic polymers or >> other +32099010,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN AN AQUEOUS MEDIUM:Other :Dispersion paints" +32099020,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN AN AQUEOUS MEDIUM:Other :Emulsion paints not elsewhere specified or included" +32099090,"PAINTS AND VARNISHES (INCLUDING ENAMELS AND LACQUERS) BASED ON SYNTHETIC POLYMERS OR CHEMICALLY MODIFIED NATURAL POLYMERS, DISPERSED OR DISSOLVED IN AN AQUEOUS MEDIUM:Other :Other(OLD tariff)" +32100000,other paints and varnishes including enamels lacquers and distempers prepared water pigments of a kind used for finishing leather other paints and varnishes including enamels lacquers and distempers prepared water pigments of a kind used for finishing leather distempers +32100011,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Dry distemper, including cement based water paints" +32100012,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Oil bound distemper" +32100019,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Other" +32100020,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Prepared water pigments of a kind used for finishing leather" +32100030,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Metallic powder or flakes prepared as paints(OLD tariff)" +32100040,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Poly tetra fluoro ethylene (PTFE) or silicon resin based coating materials" +32100090,"OTHER PAINTS AND VARNISHES (INCLUDING ENAMELS, LACQUERS AND DISTEMPERS); PREPARED WATER PIGMENTS OF A KIND USED FOR FINISHING LEATHER:Other paints and varnishes (including enamels, lacquers and distempers); prepared water pigments of a kind used for finishing leather :Other(OLD tariff)" +32110000,::PREPARED DRIERS +32120000,pigments including metallic powders and flakes dispersed in media in liquid or paste form of a kind used in the manufacture of paints including enamels stamping foils dyes and other colouring matter put up in forms or packings for retail sale +32121000,"PIGMENTS (INCLUDING METALLIC POWDERS AND FLAKES) DISPERSED IN NON-AQUEOUS MEDIA, IN LIQUID OR PASTE FORM, OF A KIND USED IN THE MANUFACTURE OF PAINTS (INCLUDING ENAMELS); STAMPING FOILS; DYES AND OTHER COLOURING MATTER PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE::Stamping foils" +32129000,pigments including metallic powders and flakes dispersed in media in liquid or paste form of a kind used in the manufacture of paints including enamels stamping foils dyes and other colouring matter put up in forms or packings for retail sale >> other turpentine varnish and other paints or enamel media not elsewhere specified or included or packings for retail sale +32129010,"PIGMENTS (INCLUDING METALLIC POWDERS AND FLAKES) DISPERSED IN NON-AQUEOUS MEDIA, IN LIQUID OR PASTE FORM, OF A KIND USED IN THE MANUFACTURE OF PAINTS (INCLUDING ENAMELS); STAMPING FOILS; DYES AND OTHER COLOURING MATTER PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Pigments in linseed oil, white spirit, spirit of turpentine, varnish and other paints or enamel media not elsewhere specified or included(OLD tariff)" +32129020,"PIGMENTS (INCLUDING METALLIC POWDERS AND FLAKES) DISPERSED IN NON-AQUEOUS MEDIA, IN LIQUID OR PASTE FORM, OF A KIND USED IN THE MANUFACTURE OF PAINTS (INCLUDING ENAMELS); STAMPING FOILS; DYES AND OTHER COLOURING MATTER PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Dyes and other colouring matter put up in forms or packings for retail sale(OLD tariff)" +32129030,"PIGMENTS (INCLUDING METALLIC POWDERS AND FLAKES) DISPERSED IN NON-AQUEOUS MEDIA, IN LIQUID OR PASTE FORM, OF A KIND USED IN THE MANUFACTURE OF PAINTS (INCLUDING ENAMELS); STAMPING FOILS; DYES AND OTHER COLOURING MATTER PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Aluminium paste" +32129090,"PIGMENTS (INCLUDING METALLIC POWDERS AND FLAKES) DISPERSED IN NON-AQUEOUS MEDIA, IN LIQUID OR PASTE FORM, OF A KIND USED IN THE MANUFACTURE OF PAINTS (INCLUDING ENAMELS); STAMPING FOILS; DYES AND OTHER COLOURING MATTER PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE:Other :Other" +32130000,artists students or signboard painters colours modifiying tints amusement colours and the like in tablets tubes jars bottles pans or in similar forms or packings +32131000,"ARTISTS, STUDENTS OR SIGNBOARD PAINTERS COLOURS, MODIFIYING TINTS, AMUSEMENT COLOURS AND THE LIKE, IN TABLETS, TUBES, JARS, BOTTLES, PANS OR IN SIMILAR FORMS OR PACKINGS::Colours in sets" +32139000,"ARTISTS, STUDENTS OR SIGNBOARD PAINTERS COLOURS, MODIFIYING TINTS, AMUSEMENT COLOURS AND THE LIKE, IN TABLETS, TUBES, JARS, BOTTLES, PANS OR IN SIMILAR FORMS OR PACKINGS::Other" +32140000,glaziers putty grafting putty resin cements caulking compounds and other mastics painters fillings surfacing prepartions for facades indoor walls floors ceilings or the like +32141000,"GLAZIERS PUTTY, GRAFTING PUTTY, RESIN CEMENTS, CAULKING COMPOUNDS AND OTHER MASTICS; PAINTERS FILLINGS; NON-REFRACTORY SURFACING PREPARTIONS FOR FACADES, INDOOR WALLS, FLOORS, CEILINGS OR THE LIKE::Glaziers putty, grafting putty, resin cements, caulking compounds and other mastics; painters fillings" +32149000,glaziers putty grafting putty resin cements caulking compounds and other mastics painters fillings surfacing prepartions for facades indoor walls floors ceilings or the like >> other +32149010,"GLAZIERS PUTTY, GRAFTING PUTTY, RESIN CEMENTS, CAULKING COMPOUNDS AND OTHER MASTICS; PAINTERS FILLINGS; NON-REFRACTORY SURFACING PREPARTIONS FOR FACADES, INDOOR WALLS, FLOORS, CEILINGS OR THE LIKE:Other :Non-refractory surfacing preparations" +32149020,"GLAZIERS PUTTY, GRAFTING PUTTY, RESIN CEMENTS, CAULKING COMPOUNDS AND OTHER MASTICS; PAINTERS FILLINGS; NON-REFRACTORY SURFACING PREPARTIONS FOR FACADES, INDOOR WALLS, FLOORS, CEILINGS OR THE LIKE:Other :Resin cement" +32149090,"GLAZIERS PUTTY, GRAFTING PUTTY, RESIN CEMENTS, CAULKING COMPOUNDS AND OTHER MASTICS; PAINTERS FILLINGS; NON-REFRACTORY SURFACING PREPARTIONS FOR FACADES, INDOOR WALLS, FLOORS, CEILINGS OR THE LIKE:Other :Other" +32150000,printing ink writing or drawing ink and other inks whether or not concentrated or solid printing ink +32151100,printing ink writing or drawing ink and other inks whether or not concentrated or solid printing ink >> black +32151110,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Black :Lithographic ink and jelly" +32151120,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Black :Newspaper ink" +32151130,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Black :Rotary ink" +32151140,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Black :Screen printing ink" +32151190,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Black :Other" +32151900,printing ink writing or drawing ink and other inks whether or not concentrated or solid printing ink >> other +32151910,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Lithographic ink and jelly" +32151920,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Newspaper ink" +32151930,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Rotary ink" +32151940,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Screen printing ink" +32151990,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Other" +32159000,printing ink writing or drawing ink and other inks whether or not concentrated or solid printing ink >> other +32159010,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Fountain pen ink" +32159020,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Ball pen ink" +32159030,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Indelible ink" +32159040,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Drawing ink" +32159090,"PRINTING INK, WRITING OR DRAWING INK AND OTHER INKS, WHETHER OR NOT CONCENTRATED OR SOLID:Other :Other" +33010000,essential oils terpeneless or not including concretes and absolutes resinoids extracted oleoresins concentrates of essential oils in fats in fixed oils in waxes or the like obtained by enfleurage or maceration terpenic by products of the deterpenation of essential oils aqueous distillates and aqueous solutions of essential oils essential oils of citrus fruit +33011200,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS ::Of orange" +33011300,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS ::Of lemon" +33011900,essential oils terpeneless or not including concretes and absolutes resinoids extracted oleoresins concentrates of essential oils in fats in fixed oils in waxes or the like obtained by enfleurage or maceration terpenic by products of the deterpenation of essential oils aqueous distillates and aqueous solutions of essential oils essential oils of citrus fruit >> other +33011910,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Citronella oil" +33011990,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Other" +33012400,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS ::Of peppermint (Mentha piperita)" +33012500,essential oils terpeneless or not including concretes and absolutes resinoids extracted oleoresins concentrates of essential oils in fats in fixed oils in waxes or the like obtained by enfleurage or maceration terpenic by products of the deterpenation of essential oils aqueous distillates and aqueous solutions of essential oils essential oils of citrus fruit >> of other mints +33012510,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Of other mints :Spearmint oil (ex-mentha spicata)" +33012520,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Of other mints :Water mint-oil (ex-mentha aquatic)" +33012530,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Of other mints :Horsemint oil (ex-mentha sylvestries)" +33012540,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Of other mints :Bergament oil (ex-mentha citrate)" +33012590,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Of other mints :Other" +33012900,essential oils terpeneless or not including concretes and absolutes resinoids extracted oleoresins concentrates of essential oils in fats in fixed oils in waxes or the like obtained by enfleurage or maceration terpenic by products of the deterpenation of essential oils aqueous distillates and aqueous solutions of essential oils essential oils of citrus fruit >> other anise oil cajeput oil cananga oil caraway oil cassia oil cedarwood oil cinnamon bark oil cinnamon leaf oil +33012911,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Anise oil (aniseed oil)" +33012912,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cajeput oil" +33012913,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cananga oil" +33012914,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Caraway oil" +33012915,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cassia oil" +33012916,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cedarwood oil" +33012917,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cinnamon bark oil" +33012918,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cinnamon leaf oil " +33012921,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Clove leaf or stem, oil" +33012922,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Coriander seed oil" +33012923,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Dill oil (anethum oil)" +33012924,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Eucalyptus oil" +33012925,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Fennel seed oil" +33012926,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Ginger oil" +33012927,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Ginger grass oil" +33012928,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Clove bud oil " +33012931,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Tuberose concentrate" +33012932,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Nutmeg oil" +33012933,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Palmarosa oil" +33012934,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Patchouli oil" +33012935,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Pepper oil" +33012936,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Petitgrain oil" +33012937,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Sandalwood oil" +33012938,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Rose oil " +33012941,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Camphor oil" +33012942,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Lemon grass oil" +33012943,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Ylang ylang oil" +33012944,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Davana oil" +33012945,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Cumin oil" +33012946,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Celery seed oil" +33012947,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Garlic oil" +33012948,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Paprika oil" +33012949,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Turmeric oil" +33012950,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Spices oils not elsewhere specified or included" +33012990,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other:Other" +33013000,essential oils terpeneless or not including concretes and absolutes resinoids extracted oleoresins concentrates of essential oils in fats in fixed oils in waxes or the like obtained by enfleurage or maceration terpenic by products of the deterpenation of essential oils aqueous distillates and aqueous solutions of essential oils essential oils of citrus fruit >> resinoids +33013010,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Resinoids :Other;" +33013091,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Resinoids :Flavouring essences, all types, including those for liquors" +33013099,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Resinoids :Other" +33019000,essential oils terpeneless or not including concretes and absolutes resinoids extracted oleoresins concentrates of essential oils in fats in fixed oils in waxes or the like obtained by enfleurage or maceration terpenic by products of the deterpenation of essential oils aqueous distillates and aqueous solutions of essential oils essential oils of citrus fruit >> other fenugreek ginger pepper turmeric cardamom celery seed and nutmeg oleoresins +33019011,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Fenugreek oleoresins" +33019012,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Ginger oleoresins" +33019013,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Pepper oleoresins" +33019014,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Turmeric oleoresins" +33019015,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Cardamom oleoresins" +33019016,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Celery seed oleoresins" +33019017,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Nutmeg oleoresins " +33019021,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Clove oleoresins" +33019022,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Capsicum oleoresins" +33019023,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Coriander oleoresins" +33019024,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Cumin oleoresins" +33019025,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Fennel oleoresins" +33019029,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Oleoresins of spices not elsewhere specified or included " +33019031,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Attars of all kinds in fixed oil base" +33019032,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Mustard oil aroma" +33019033,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Essence of ambrettolide (ambrette seed oil essence) " +33019041,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Flavouring essences, all types, including those for liquors" +33019049,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Other " +33019051,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Flavouring essences, all types, including those for liquors" +33019059,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Other" +33019060,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Aqueous distillates of essential oils, not elsewhere specified or included " +33019071,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Flavouring essences, all types, including those for liquors" +33019079,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Other" +33019090,"ESSENTIAL OILS (TERPENELESS OR NOT), INCLUDING CONCRETES AND ABSOLUTES; RESINOIDS; EXTRACTED OLEORESINS; CONCENTRATES OF ESSENTIAL OILS IN FATS, IN FIXED OILS, IN WAXES OR THE LIKE, OBTAINED BY ENFLEURAGE OR MACERATION; TERPENIC BY-PRODUCTS OF THE DETERPENATION OF ESSENTIAL OILS; AQUEOUS DISTILLATES AND AQUEOUS SOLUTIONS OF ESSENTIAL OILS :Other :Other" +33020000,mixtures of odoriferous substances and mixtures including alcoholic solutions with a basis of one or more of these substances of a kind used as raw materials in industry other preparations based on odoriferous substances of a kind used for the manufacture of beverages +33021000,mixtures of odoriferous substances and mixtures including alcoholic solutions with a basis of one or more of these substances of a kind used as raw materials in industry other preparations based on odoriferous substances of a kind used for the manufacture of beverages >> of a kind used in the food or drink industries +33021010,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Of a kind used in the food or drink industries:Synthetic flavouring essences(OLD tariff)" +33021090,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Of a kind used in the food or drink industries:Other(OLD tariff)" +33029011,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Other:Synthetic perfumery compounds(OLD tariff)" +33029012,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Other:Synthetic essential oil(OLD tariff)" +33029019,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Other:Other(OLD tariff)" +33029020,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Other:Aleuritic acid(OLD tariff)" +33029090,"MIXTURES OF ODORIFEROUS SUBSTANCES AND MIXTURES (INCLUDING ALCOHOLIC SOLUTIONS) WITH A BASIS OF ONE OR MORE OF THESE SUBSTANCES, OF A KIND USED AS RAW MATERIALS IN INDUSTRY; OTHER PREPARATIONS BASED ON ODORIFEROUS SUBSTANCES, OF A KIND USED FOR THE MANUFACTURE OF BEVERAGES:Other:Other(OLD tariff)" +33030010,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Eau-de-cologne(OLD tariff) +33030020,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Rose water(OLD tariff) +33030030,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Keora water(OLD tariff) +33030040,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Perfumes and perfumery compounds not containing spirit (excluding aqueous distillates)(OLD tariff) +33030050,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Perfumes containing spirit(OLD tariff) +33030060,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Spirituous toilet preparations not elsewhere or included(OLD tariff) +33030090,PERFUMES AND TOILET WATERS:Perfumes and toilet waters:Other(OLD tariff) +33040000,beauty or preparations and preparations for the care of the skin other than medicaments including sunscreen or suntan preparations manicure or pedicure preparations +33041000,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS::Lip make-up preparations" +33042000,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS::Eye make-up preparations" +33043000,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS::Manicure or pedicure preparations(OLD tariff)" +33049110,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Powders, whether or not compressed:Face powders(OLD tariff)" +33049120,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Powders, whether or not compressed:Talcum powders(OLD tariff)" +33049190,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Powders, whether or not compressed:Other(OLD tariff)" +33049900,beauty or preparations and preparations for the care of the skin other than medicaments including sunscreen or suntan preparations manicure or pedicure preparations >> other +33049910,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Other:Face creams" +33049920,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Other:Nail polish or lacquers" +33049930,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Other:Moisturising lotion" +33049940,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Other:Sindur, bindi, kumkum" +33049950,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Other:Turmeric preparations" +33049990,"BEAUTY OR MAKE-UP PREPARATIONS AND PREPARATIONS FOR THE CARE OF THE SKIN (OTHER THAN MEDICAMENTS), INCLUDING SUNSCREEN OR SUNTAN PREPARATIONS; MANICURE OR PEDICURE PREPARATIONS:Other:Other" +33050000,preparations for use on the hair +33051000,preparations for use on the hair >> shampoos +33051010,PREPARATIONS FOR USE ON THE HAIR:Shampoos:Containing spirit +33051090,PREPARATIONS FOR USE ON THE HAIR:Shampoos:Other +33052000,PREPARATIONS FOR USE ON THE HAIR::Preparations for permanent waving or straightening +33053000,PREPARATIONS FOR USE ON THE HAIR::Hair lacquers(OLD tariff) +33059011,PREPARATIONS FOR USE ON THE HAIR:Other :Perfumed(OLD tariff) +33059019,PREPARATIONS FOR USE ON THE HAIR:Other :Other(OLD tariff) +33059020,PREPARATIONS FOR USE ON THE HAIR:Other :Brilliantines (spirituous)(OLD tariff) +33059030,PREPARATIONS FOR USE ON THE HAIR:Other :Hair cream(OLD tariff) +33059040,"PREPARATIONS FOR USE ON THE HAIR:Other :Hair dyes (natural, herbal or synthetic)(OLD tariff)" +33059050,PREPARATIONS FOR USE ON THE HAIR:Other :Hair fixers(OLD tariff) +33059090,PREPARATIONS FOR USE ON THE HAIR:Other :Other(OLD tariff) +33060000,preparations for oral or dental hygiene including denture fixative pastes and powders yarn used to clean between the teeth in dental floss individual retail packages +33061000,preparations for oral or dental hygiene including denture fixative pastes and powders yarn used to clean between the teeth in dental floss individual retail packages >> dentifrices +33061010,"PREPARATIONS FOR ORAL OR DENTAL HYGIENE, INCLUDING DENTURE FIXATIVE PASTES AND POWDERS; YARN USED TO CLEAN BETWEEN THE TEETH (DENTAL FLOSS), IN INDIVIDUAL RETAIL PACKAGES:Dentifrices :In powder" +33061020,"PREPARATIONS FOR ORAL OR DENTAL HYGIENE, INCLUDING DENTURE FIXATIVE PASTES AND POWDERS; YARN USED TO CLEAN BETWEEN THE TEETH (DENTAL FLOSS), IN INDIVIDUAL RETAIL PACKAGES:Dentifrices :In paste" +33061090,"PREPARATIONS FOR ORAL OR DENTAL HYGIENE, INCLUDING DENTURE FIXATIVE PASTES AND POWDERS; YARN USED TO CLEAN BETWEEN THE TEETH (DENTAL FLOSS), IN INDIVIDUAL RETAIL PACKAGES:Dentifrices :Other" +33062000,"PREPARATIONS FOR ORAL OR DENTAL HYGIENE, INCLUDING DENTURE FIXATIVE PASTES AND POWDERS; YARN USED TO CLEAN BETWEEN THE TEETH (DENTAL FLOSS), IN INDIVIDUAL RETAIL PACKAGES::Yarn used to clean between the teeth (dental floss)" +33069000,"PREPARATIONS FOR ORAL OR DENTAL HYGIENE, INCLUDING DENTURE FIXATIVE PASTES AND POWDERS; YARN USED TO CLEAN BETWEEN THE TEETH (DENTAL FLOSS), IN INDIVIDUAL RETAIL PACKAGES::Other" +33070000,shaving or preparations personal deodorants bath preparations depilatories and other perfumery cosmetic or toilet preparations not elsewhere specified or included prepared room deodorisers whether or not perfumed or having disinfectant properties +33071000,shaving or preparations personal deodorants bath preparations depilatories and other perfumery cosmetic or toilet preparations not elsewhere specified or included prepared room deodorisers whether or not perfumed or having disinfectant properties >> shaving or preparations +33071010,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Pre-shave, shaving or after-shave preparations:Shaving cream" +33071090,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Pre-shave, shaving or after-shave preparations:Other" +33072000,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH::Personal deodorants and anti-perspirants" +33073000,shaving or preparations personal deodorants bath preparations depilatories and other perfumery cosmetic or toilet preparations not elsewhere specified or included prepared room deodorisers whether or not perfumed or having disinfectant properties >> perfumed bath salts and other bath preparations +33073010,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Perfumed bath salts and other bath preparations:Bath oil (thailam)" +33073090,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Perfumed bath salts and other bath preparations:Other" +33074100,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH::""Agarbatti"" and other odoriferous preparations which operate by burning" +33074900,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH::Other" +33079000,shaving or preparations personal deodorants bath preparations depilatories and other perfumery cosmetic or toilet preparations not elsewhere specified or included prepared room deodorisers whether or not perfumed or having disinfectant properties >> other soap organic products and +33079010,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Other:Depilatories(OLD tariff)" +33079020,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Other:Sterile contact lens care solution(OLD tariff)" +33079090,"PRE-SHAVE, SHAVING OR AFTER-SHAVE PREPARATIONS, PERSONAL DEODORANTS, BATH:Other:Other(OLD tariff)" +34010000,preparations for use as soap in the form of pieces or bars cakes moulded shapes whether or not containing soap organic products and preparations for washing the skin in the form of liquid or cream and put up for retail sale whether or not containing soap paper wadding felt and nonwovens impregnated coated or covered with soap or detergent soap and organic products and preparations in the form of bars cakes moulded pieces or shapes and paper wad ding felt and nonwovens impregnated coated or covered with soap or detergent for toilet use including medicated products +34011100,preparations for use as soap in the form of pieces or bars cakes moulded shapes whether or not containing soap organic products and preparations for washing the skin in the form of liquid or cream and put up for retail sale whether or not containing soap paper wadding felt and nonwovens impregnated coated or covered with soap or detergent soap and organic products and preparations in the form of bars cakes moulded pieces or shapes and paper wad ding felt and nonwovens impregnated coated or covered with soap or detergent for toilet use including medicated products >> +34011110,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :For toilet use (including medicated products):Medicated toilet soaps" +34011120,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :For toilet use (including medicated products):Shaving soaps other than shaving cream" +34011190,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :For toilet use (including medicated products):Other" +34011900,preparations for use as soap in the form of pieces or bars cakes moulded shapes whether or not containing soap organic products and preparations for washing the skin in the form of liquid or cream and put up for retail sale whether or not containing soap paper wadding felt and nonwovens impregnated coated or covered with soap or detergent soap and organic products and preparations in the form of bars cakes moulded pieces or shapes and paper wad ding felt and nonwovens impregnated coated or covered with soap or detergent for toilet use including medicated products >> other bars and blocks of not less than 500 gm in weight +34011911,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Industrial soap" +34011919,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Other" +34011920,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Flakes, chips and powder" +34011930,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Tablets and cakes " +34011941,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Household soaps" +34011942,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Laundry soaps" +34011990,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Other:Other" +34012000,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT ::Soap in other forms" +34013000,preparations for use as soap in the form of pieces or bars cakes moulded shapes whether or not containing soap organic products and preparations for washing the skin in the form of liquid or cream and put up for retail sale whether or not containing soap paper wadding felt and nonwovens impregnated coated or covered with soap or detergent soap and organic products and preparations in the form of bars cakes moulded pieces or shapes and paper wad ding felt and nonwovens impregnated coated or covered with soap or detergent for toilet use including medicated products >> organic products and preparations for washing the skin in the form of liquid or cream and put up for retail sal e whether or not containing soap for toilet use including medicated products +34013011,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Organic surface-active products and preparations for washing the skin, in the form of liquid or cream and put up for retail sale, whether or not containing soap :Medicated toilet soaps" +34013012,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Organic surface-active products and preparations for washing the skin, in the form of liquid or cream and put up for retail sale, whether or not containing soap :Shaving cream and shaving gel" +34013019,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Organic surface-active products and preparations for washing the skin, in the form of liquid or cream and put up for retail sale, whether or not containing soap :Other" +34013090,"SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR USE AS SOAP, IN THE FORM OF BARS, CAKES, MOULDED PIECES OR SHAPES, WHETHER OR NOT CONTAINING SOAP; ORGANIC SURFACE-ACTIVE PRODUCTS AND PREPARATIONS FOR WASHING THE SKIN, IN THE FORM OF LIQUID OR CREAM AND PUT UP FOR RETAIL SALE, WHETHER OR NOT CONTAINING SOAP; PAPER, WADDING, FELT AND NONWOVENS, IMPREGNATED, COATED OR COVERED WITH SOAP OR DETERGENT :Organic surface-active products and preparations for washing the skin, in the form of liquid or cream and put up for retail sale, whether or not containing soap :Other" +34020000,organic agents other than soap preparations washing preparations including auxiliary washing and cleaning preparations preparations whether or not containing soap other than those of heading 3401 anionic organic agents whether or not put up for retail sale +34021110,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Anionic:Silicone surfactant(OLD tariff)" +34021190,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Anionic:Other(OLD tariff)" +34021200,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401::Cationinc(OLD tariff)" +34021300,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401::Non-ionic(OLD tariff)" +34021900,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401::Other(OLD tariff)" +34022010,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Preparations put up for retail sale:Washing preparations (including auxili ary washing preparations) and cleaning preparations, having a basis of soap or other organic surface active agents(OLD tariff)" +34022020,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Preparations put up for retail sale:Cleaning or degreasing preparations not having a basis of soap or other organic surface active agents(OLD tariff)" +34022090,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Preparations put up for retail sale:Other(OLD tariff)" +34023100,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:linear:Linear alkylbenzene sulphonic acids and their salts" +34023900,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:other:Other" +34024100,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:cationic:Cationic" +34024200,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Non-ionic:Non-ionic" +34024900,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:other:Other" +34025000,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:preparations:Preparations put up for retail sale" +34029000,organic agents other than soap preparations washing preparations including auxiliary washing and cleaning preparations preparations whether or not containing soap other than those of heading 3401 anionic organic agents whether or not put up for retail sale >> other synthetic detergents +34029011,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Washing preparations (including auxiliary washing preparations) and cleaning preparations, having a basis of soap or other organic surface active agents" +34029012,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Cleaning or degreasing preparations not having a basis of soap or other organic surface active agents" +34029019,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Other" +34029020,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Sulphonated or sulphated or oxidized or chlorinated castor oil; sulphonated or sulphated or oxidized or chlorinated fish oil; sulphonated or sulphated or oxidized or chlorinated sperm oil; sulphonated or sulphated or oxidized or chlorinated neats foot oil" +34029030,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Penetrators " +34029041,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Washing preparations (including auxiliary washing preparations) and cleaning preparations, having a basis of soap or other organic surface active agents" +34029042,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Cleaning or degreasing preparations not having a basis of soap or other organic surface active agents" +34029049,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Other " +34029051,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Washing preparations (including auxiliary washing preparations) and cleaning preparations, having a basis of soap or other organic surface active agents" +34029052,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Cleaning or degreasing preparations not having a basis of soap or other organic surface active agents" +34029059,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Other " +34029091,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Washing preparations (including auxiliary washing preparations) and cleaning preparations, having a basis of soap or other organic surface active agents" +34029092,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Cleaning or degreasing preparations not hav- ing a basis of soap or other organic surface active agents" +34029099,"ORGANIC SURFACE-ACTIVE AGENTS (OTHER THAN SOAP); SURFACE-ACTIVE PREPARATIONS, WASHING PREPARATIONS (INCLUDING AUXILIARY WASHING PREPARATIONS) AND CLEANING PREPARATIONS, WHETHER OR NOT CONTAINING SOAP, OTHER THAN THOSE OF HEADING 3401:Other;:Other" +34030000,lubricating preparations including cutting oil bolt or nut release preparations preparations or preparations and mould release preparations based on lubricants and preparations of a kind used for the oil or grease treatment of textile materials leather furskins or other materials but excluding preparations containing as basic constituents 70 or more by weight of petroleum oils or of oils obtained from bituminous minerals containing petroleum oils or oils obtained frombituminous minerals +34031100,"LUBRICATING PREPARATIONS (INCLUDING CUTTINGOIL PREPARATIONS, BOLT OR NUT RELEASE PREPARATIONS, ANTI-RUST OR ANTI-CORROSION PREPARATIONS AND MOULD RELEASE PREPARATIONS, BASED ON LUBRICANTS) AND PREPARATIONS OF A KIND USED FOR THE OIL OR GREASE TREATMENT OF TEXTILE MATERIALS, LEATHER, FURSKINS OR OTHER MATERIALS, BUT EXCLUDING PREPARATIONS CONTAINING, AS BASIC CONSTITUENTS, 70 % OR MORE BY WEIGHT OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS::Preparations for the treatment of textile materials, leather, furskins or other materials" +34031900,"LUBRICATING PREPARATIONS (INCLUDING CUTTINGOIL PREPARATIONS, BOLT OR NUT RELEASE PREPARATIONS, ANTI-RUST OR ANTI-CORROSION PREPARATIONS AND MOULD RELEASE PREPARATIONS, BASED ON LUBRICANTS) AND PREPARATIONS OF A KIND USED FOR THE OIL OR GREASE TREATMENT OF TEXTILE MATERIALS, LEATHER, FURSKINS OR OTHER MATERIALS, BUT EXCLUDING PREPARATIONS CONTAINING, AS BASIC CONSTITUENTS, 70 % OR MORE BY WEIGHT OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS::Other Other(OLD tariff)" +34039100,"LUBRICATING PREPARATIONS (INCLUDING CUTTINGOIL PREPARATIONS, BOLT OR NUT RELEASE PREPARATIONS, ANTI-RUST OR ANTI-CORROSION PREPARATIONS AND MOULD RELEASE PREPARATIONS, BASED ON LUBRICANTS) AND PREPARATIONS OF A KIND USED FOR THE OIL OR GREASE TREATMENT OF TEXTILE MATERIALS, LEATHER, FURSKINS OR OTHER MATERIALS, BUT EXCLUDING PREPARATIONS CONTAINING, AS BASIC CONSTITUENTS, 70 % OR MORE BY WEIGHT OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS::Preparations for the treatment of textile materials, leather, furskins or other materials" +34039900,"LUBRICATING PREPARATIONS (INCLUDING CUTTINGOIL PREPARATIONS, BOLT OR NUT RELEASE PREPARATIONS, ANTI-RUST OR ANTI-CORROSION PREPARATIONS AND MOULD RELEASE PREPARATIONS, BASED ON LUBRICANTS) AND PREPARATIONS OF A KIND USED FOR THE OIL OR GREASE TREATMENT OF TEXTILE MATERIALS, LEATHER, FURSKINS OR OTHER MATERIALS, BUT EXCLUDING PREPARATIONS CONTAINING, AS BASIC CONSTITUENTS, 70 % OR MORE BY WEIGHT OF PETROLEUM OILS OR OF OILS OBTAINED FROM BITUMINOUS MINERALS::Other" +34040000,artificial waxes and prepared waxes +34042000,ARTIFICIAL WAXES AND PREPARED WAXES::Of poly (oxyethylene) (polyethylene glycol) +34049000,artificial waxes and prepared waxes >> other +34049010,"ARTIFICIAL WAXES AND PREPARED WAXES:Other:Sealing wax (including bottle sealing wax) in sticks, cakes or similar forms" +34049020,ARTIFICIAL WAXES AND PREPARED WAXES:Other:Polyethylene wax (OLD tariff) +34049031,ARTIFICIAL WAXES AND PREPARED WAXES:Other:Poly brominated biphenyls(OLD tariff) +34049032,ARTIFICIAL WAXES AND PREPARED WAXES:Other:Poly chlorinated biphenyls +34049033,ARTIFICIAL WAXES AND PREPARED WAXES:Other:Poly chlorinated terphenyls +34049039,ARTIFICIAL WAXES AND PREPARED WAXES:Other:Other +34049090,ARTIFICIAL WAXES AND PREPARED WAXES:Other:Other +34050000,polishes and creams for footwear furniture floors coachwork glass or metal scouring pastes and powders and similar preparations whether or not in the form of paper wadding cellular plastics or cellular felt nonwovens such excluding waxes of preparations heading 3404 +34051000,"POLISHES AND CREAMS, FOR FOOTWEAR, FURNITURE, FLOORS, COACHWORK, GLASS OR METAL, SCOURING PASTES AND POWDERS AND SIMILAR PREPARATIONS (WHETHER OR NOT IN THE FORM OF PAPER, WADDING, FELT, NONWOVENS, CELLULAR PLASTICS OR CELLULAR RUBBER, IMPREGNATED, COATED OR COVERED WITH SUCH PREPARATIONS), EXCLUDING WAXES OF HEADING 3404::Polishes, creams and similar preparations for footwear or leather(OLD tariff)" +34052000,"POLISHES AND CREAMS, FOR FOOTWEAR, FURNITURE, FLOORS, COACHWORK, GLASS OR METAL, SCOURING PASTES AND POWDERS AND SIMILAR PREPARATIONS (WHETHER OR NOT IN THE FORM OF PAPER, WADDING, FELT, NONWOVENS, CELLULAR PLASTICS OR CELLULAR RUBBER, IMPREGNATED, COATED OR COVERED WITH SUCH PREPARATIONS), EXCLUDING WAXES OF HEADING 3404::Polishes, creams and similar preparations for the maintenance of wooden furniture, floors or other wood work(OLD tariff)" +34053000,"POLISHES AND CREAMS, FOR FOOTWEAR, FURNITURE, FLOORS, COACHWORK, GLASS OR METAL, SCOURING PASTES AND POWDERS AND SIMILAR PREPARATIONS (WHETHER OR NOT IN THE FORM OF PAPER, WADDING, FELT, NONWOVENS, CELLULAR PLASTICS OR CELLULAR RUBBER, IMPREGNATED, COATED OR COVERED WITH SUCH PREPARATIONS), EXCLUDING WAXES OF HEADING 3404::Polishes and similar preparations for coach-work, other than metal polishes(OLD tariff)" +34054000,"POLISHES AND CREAMS, FOR FOOTWEAR, FURNITURE, FLOORS, COACHWORK, GLASS OR METAL, SCOURING PASTES AND POWDERS AND SIMILAR PREPARATIONS (WHETHER OR NOT IN THE FORM OF PAPER, WADDING, FELT, NONWOVENS, CELLULAR PLASTICS OR CELLULAR RUBBER, IMPREGNATED, COATED OR COVERED WITH SUCH PREPARATIONS), EXCLUDING WAXES OF HEADING 3404::Scouring pastes and powders and other scouring preparations(OLD tariff)" +34059010,"POLISHES AND CREAMS, FOR FOOTWEAR, FURNITURE, FLOORS, COACHWORK, GLASS OR METAL, SCOURING PASTES AND POWDERS AND SIMILAR PREPARATIONS (WHETHER OR NOT IN THE FORM OF PAPER, WADDING, FELT, NONWOVENS, CELLULAR PLASTICS OR CELLULAR RUBBER, IMPREGNATED, COATED OR COVERED WITH SUCH PREPARATIONS), EXCLUDING WAXES OF HEADING 3404:Other:Polishes and compositions for application to metal including diamond polishing powder or paste(OLD tariff)" +34059090,"POLISHES AND CREAMS, FOR FOOTWEAR, FURNITURE, FLOORS, COACHWORK, GLASS OR METAL, SCOURING PASTES AND POWDERS AND SIMILAR PREPARATIONS (WHETHER OR NOT IN THE FORM OF PAPER, WADDING, FELT, NONWOVENS, CELLULAR PLASTICS OR CELLULAR RUBBER, IMPREGNATED, COATED OR COVERED WITH SUCH PREPARATIONS), EXCLUDING WAXES OF HEADING 3404:Other:Other(OLD tariff)" +34060000,candles tapers and the like candles tapers and the like +34060010,"CANDLES, TAPERS AND THE LIKE:Candles, tapers and the like:Candles(OLD tariff)" +34060090,"CANDLES, TAPERS AND THE LIKE:Candles, tapers and the like:Other" +34070000,modelling pastes including those put up for children s amusement preparations known as or as impression put up in sets in packings for retail sale or horseshoe in plates shapes preparations for sticks or similar forms other use in dentistry with a basis of plaster of calcined gypsum or calcium sulphate modelling pastes including those put up for children s amusement preparations known as dental wax or as dental impression compounds put up in sets in packings for retail sale or in plates horseshoe shapes sticks or similar forms other preparations for use in dentistry with a basis of plaster of calcined gypsum or calcium sulphate +34070010,"MODELLING PASTES, INCLUDING THOSE PUT UP FOR CHILDREN'S AMUSEMENT; PREPARATIONS KNOWN AS DENTAL WAX OR AS DENTAL IMPRESSION COMPOUNDS, PUT UP IN SETS, IN PACKINGS FOR RETAIL SALE OR IN PLATES, HORSESHOE SHAPES, STICKS OR SIMILAR FORMS; OTHER PREPARATIONS FOR USE IN DENTISTRY, WITH A BASIS OF PLASTER (OF CALCINED GYPSUM OR CALCIUM SULPHATE):Modelling pastes, including those put up for children's amusement; preparations known as dental wax or as dental impression compounds, put up in sets, in packings for retail sale or in plates, horseshoe shapes, sticks or similar forms; other preparations for use in dentistry, with a basis of plaster (of calcined gypsum or calcium sulphate):Modelling pastes, including those put up for children's amusement" +34070090,"MODELLING PASTES, INCLUDING THOSE PUT UP FOR CHILDREN'S AMUSEMENT; PREPARATIONS KNOWN AS DENTAL WAX OR AS DENTAL IMPRESSION COMPOUNDS, PUT UP IN SETS, IN PACKINGS FOR RETAIL SALE OR IN PLATES, HORSESHOE SHAPES, STICKS OR SIMILAR FORMS; OTHER PREPARATIONS FOR USE IN DENTISTRY, WITH A BASIS OF PLASTER (OF CALCINED GYPSUM OR CALCIUM SULPHATE):Modelling pastes, including those put up for children's amusement; preparations known as dental wax or as dental impression compounds, put up in sets, in packings for retail sale or in plates, horseshoe shapes, sticks or similar forms; other preparations for use in dentistry, with a basis of plaster (of calcined gypsum or calcium sulphate):Other" +35010000,casein caseinates and other casein derivatives casein glues +35011000,"CASEIN, CASEINATES AND OTHER CASEIN DERIVATIVES; CASEIN GLUES::Casein" +35019000,"CASEIN, CASEINATES AND OTHER CASEIN DERIVATIVES; CASEIN GLUES::Other" +35020000,albumins including concentrates of two or more whey proteins containing by weight more than 80 whey proteins calculated on the dry matter albuminates and other albumin derivatives egg albumin +35021100,"ALBUMINS (INCLUDING CONCENTRATES OF TWO OR MORE WHEY PROTEINS, CONTAINING BY WEIGHT MORE THAN 80% WHEY PROTEINS, CALCULATED ON THE DRY MATTER), ALBUMINATES AND OTHER ALBUMIN DERIVATIVES::Dried" +35021900,"ALBUMINS (INCLUDING CONCENTRATES OF TWO OR MORE WHEY PROTEINS, CONTAINING BY WEIGHT MORE THAN 80% WHEY PROTEINS, CALCULATED ON THE DRY MATTER), ALBUMINATES AND OTHER ALBUMIN DERIVATIVES::Other" +35022000,"ALBUMINS (INCLUDING CONCENTRATES OF TWO OR MORE WHEY PROTEINS, CONTAINING BY WEIGHT MORE THAN 80% WHEY PROTEINS, CALCULATED ON THE DRY MATTER), ALBUMINATES AND OTHER ALBUMIN DERIVATIVES::Milk albumin, including concentrates of two or more whey proteins" +35029000,"ALBUMINS (INCLUDING CONCENTRATES OF TWO OR MORE WHEY PROTEINS, CONTAINING BY WEIGHT MORE THAN 80% WHEY PROTEINS, CALCULATED ON THE DRY MATTER), ALBUMINATES AND OTHER ALBUMIN DERIVATIVES::Other" +35030000,gelatin including gelatin in rectangular including square sheets whether or or coloured and gelatin other derivatives isinglass glues of animal origin excluding casein glues of heading 3501 gelatin including gelatin in rectangular including square sheets whether or not or coloured and gelatin derivatives isinglass other glues of animal origin excluding casein glues of heading 3501 +35030010,"GELATIN [INCLUDING GELATIN IN RECTANGULAR (INCLUDING SQUARE) SHEETS, WHETHER OR NOTSURFACE-WORKED OR COLOURED] AND GELATIN DERIVATIVES; ISINGLASS; OTHER GLUES OF ANIMAL ORIGIN, EXCLUDING CASEIN GLUES OF HEADING 3501:Gelatin [including gelatin in rectangular (including square) sheets, whether or not surface-worked or coloured] and gelatin derivatives; isinglass; other glues of animal origin, excluding casein glues of heading 3501:Isinglass" +35030020,"GELATIN [INCLUDING GELATIN IN RECTANGULAR (INCLUDING SQUARE) SHEETS, WHETHER OR NOTSURFACE-WORKED OR COLOURED] AND GELATIN DERIVATIVES; ISINGLASS; OTHER GLUES OF ANIMAL ORIGIN, EXCLUDING CASEIN GLUES OF HEADING 3501:Gelatin [including gelatin in rectangular (including square) sheets, whether or not surface-worked or coloured] and gelatin derivatives; isinglass; other glues of animal origin, excluding casein glues of heading 3501:Gelatin, edible grade and not elsewhere specified or included" +35030030,"GELATIN [INCLUDING GELATIN IN RECTANGULAR (INCLUDING SQUARE) SHEETS, WHETHER OR NOTSURFACE-WORKED OR COLOURED] AND GELATIN DERIVATIVES; ISINGLASS; OTHER GLUES OF ANIMAL ORIGIN, EXCLUDING CASEIN GLUES OF HEADING 3501:Gelatin [including gelatin in rectangular (including square) sheets, whether or not surface-worked or coloured] and gelatin derivatives; isinglass; other glues of animal origin, excluding casein glues of heading 3501:Glues derived from bones, hides and similar items; fish glues" +35030090,"GELATIN [INCLUDING GELATIN IN RECTANGULAR (INCLUDING SQUARE) SHEETS, WHETHER OR NOTSURFACE-WORKED OR COLOURED] AND GELATIN DERIVATIVES; ISINGLASS; OTHER GLUES OF ANIMAL ORIGIN, EXCLUDING CASEIN GLUES OF HEADING 3501:Gelatin [including gelatin in rectangular (including square) sheets, whether or not surface-worked or coloured] and gelatin derivatives; isinglass; other glues of animal origin, excluding casein glues of heading 3501:Other" +35040000,peptones and their derivatives other protein substances and their derivatives not elsewhere specified or included hide powder whether or not chromed peptones and their derivatives other protein substances and their derivatives not elsewhere specified or included hide powder whether or not chromed +35040010,"PEPTONES AND THEIR DERIVATIVES; OTHER PROTEIN SUBSTANCES AND THEIR DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; HIDE POWDER, WHETHER OR NOT CHROMED:Peptones and their derivatives; other protein substances and their derivatives, not elsewhere specified or included; hide powder, whether or not chromed:Peptones" +35040091,"PEPTONES AND THEIR DERIVATIVES; OTHER PROTEIN SUBSTANCES AND THEIR DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; HIDE POWDER, WHETHER OR NOT CHROMED:Peptones and their derivatives; other protein substances and their derivatives, not elsewhere specified or included; hide powder, whether or not chromed:Isolated soya protein" +35040099,"PEPTONES AND THEIR DERIVATIVES; OTHER PROTEIN SUBSTANCES AND THEIR DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; HIDE POWDER, WHETHER OR NOT CHROMED:Peptones and their derivatives; other protein substances and their derivatives, not elsewhere specified or included; hide powder, whether or not chromed:Others" +35050000,dextrins and other modified starches for example pregelatinised or esterified starches glues based on starches or on dextrins or other modified starches +35051000,dextrins and other modified starches for example pregelatinised or esterified starches glues based on starches or on dextrins or other modified starches >> dextrins and other modified starches +35051010,"DEXTRINS AND OTHER MODIFIED STARCHES (FOR EXAMPLE, PREGELATINISED OR ESTERIFIED STARCHES); GLUES BASED ON STARCHES, OR ON DEXTRINS OR OTHER MODIFIED STARCHES:Dextrins and other modified starches:Esterified starches" +35051090,"DEXTRINS AND OTHER MODIFIED STARCHES (FOR EXAMPLE, PREGELATINISED OR ESTERIFIED STARCHES); GLUES BASED ON STARCHES, OR ON DEXTRINS OR OTHER MODIFIED STARCHES:Dextrins and other modified starches:Other" +35052000,"DEXTRINS AND OTHER MODIFIED STARCHES (FOR EXAMPLE, PREGELATINISED OR ESTERIFIED STARCHES); GLUES BASED ON STARCHES, OR ON DEXTRINS OR OTHER MODIFIED STARCHES::Glues" +35060000,prepared glues and other prepared adhesives not elsewhere specified or included products suitable for use as glues or adhesives put up for retail sale as glues or not adhesives exceeding a net weight of 1kg +35061000,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG::Products suitable for use as glues or adhesives, put up for retail sale as glues or adhesives, not exceeding a net weight of 1[1 kg)" +35069100,prepared glues and other prepared adhesives not elsewhere specified or included products suitable for use as glues or adhesives put up for retail sale as glues or not adhesives exceeding a net weight of 1kg >> adhesives based on polymers of headings 3901 to 3913 or on rubber +35069110,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG:Adhesives based on polymers of headings 3901 to 3913 or on rubber:Based on latex, phenol formaldehyde (PF), urea formaldehyde (UF) and polyvinyl alcohol (PVA)" +35069190,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG:Adhesives based on polymers of headings 3901 to 3913 or on rubber:Other" +35069900,prepared glues and other prepared adhesives not elsewhere specified or included products suitable for use as glues or adhesives put up for retail sale as glues or not adhesives exceeding a net weight of 1kg >> other +35069910,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG:Other :Synthetic glue with phenol urea or cresol (with formaldehyde) as the main component" +35069991,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG:Other :Based on starch, gum, latex, PF, UF and PVA" +35069999,"PREPARED GLUES AND OTHER PREPARED ADHESIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED; PRODUCTS SUITABLE FOR USE AS GLUES OR ADHESIVES, PUT UP FOR RETAIL SALE AS GLUES OR ADHESIVES, NOT EXCEEDING A NET WEIGHT OF 1 KG:Other :Other" +35070000,enzymes prepared enzymes not elsewhere specified or included +35071000,enzymes prepared enzymes not elsewhere specified or included >> rennet and concentrates thereof microbial rennet +35071011,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Rennet and concentrates thereof:Animal rennet +35071019,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Rennet and concentrates thereof:Other +35071091,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Rennet and concentrates thereof:Animal rennet +35071099,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Rennet and concentrates thereof:Other +35079000,enzymes prepared enzymes not elsewhere specified or included >> other +35079010,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Industrial enzymes (textile assistant) +35079020,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Pancretin pure (excluding medicament) +35079030,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Pepsin (excluding medicament) +35079040,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Pectin esterases pure +35079050,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Pectolytic enzyme (pectimase) +35079061,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Streptokinase +35079062,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Amylases enzymes +35079069,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other +35079071,"ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Papain, pure, of pharmaceutical grade" +35079079,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other +35079091,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Enzymatic preparations containing food stuffs +35079099,ENZYMES; PREPARED ENZYMES NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other +36010000,propellant powders propellant powders +36010010,PROPELLANT POWDERS:Propellant powders :Blasting powder +36010020,PROPELLANT POWDERS:Propellant powders :Gun powder +36010090,PROPELLANT POWDERS:Propellant powders :Other +36020000,prepared explosives other than propellant powders prepared explosives other than propellant powders +36020010,"PREPARED EXPLOSIVES, OTHER THAN PROPELLANT POWDERS:Prepared explosives, other than propellant powders :Industrial explosives" +36020090,"PREPARED EXPLOSIVES, OTHER THAN PROPELLANT POWDERS:Prepared explosives, other than propellant powders :Other" +36030000,detonating caps igniters electric detonators +36030011,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:For mine blasting(OLD tariff) +36030019,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Other(OLD tariff) +36030020,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Detonating fuses(OLD tariff) +36030031,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Non-ordnance(OLD tariff) +36030039,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Other(OLD tariff) +36030041,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Non-ordnance(OLD tariff) +36030049,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Other(OLD tariff) +36030051,"SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Containing explosives electrically ignited, non-ordnance(OLD tariff)" +36030059,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses; detonating fuses; percussion or detonating caps; igniters; electric detonators:Other(OLD tariff) +36031000,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:Safety fuses:Safety fuses(OLD tariff) +36032000,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:cords:detonating cords(OLD tariff) +36033000,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:caps:Percussion caps(OLD tariff) +36034000,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:detonating caps:Detonating caps +36035000,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:igniters:Igniters +36036000,SAFETY FUSES; DETONATING FUSES; PERCUSSION OR DETONATING CAPS; IGNITERS; ELECTRIC DETONATORS:electric:Electric detonators +36040000,fireworks signalling flares rain rock ets fog signals and other pyrotechnic articles +36041000,"FIREWORKS, SIGNALLING FLARES, RAIN ROCKETS, FOG SIGNALS AND OTHER PYROTECHNIC ARTICLES::Fireworks" +36049000,fireworks signalling flares rain rock ets fog signals and other pyrotechnic articles >> other +36049010,"FIREWORKS, SIGNALLING FLARES, RAIN ROCKETS, FOG SIGNALS AND OTHER PYROTECHNIC ARTICLES:Other:Ship signals" +36049090,"FIREWORKS, SIGNALLING FLARES, RAIN ROCKETS, FOG SIGNALS AND OTHER PYROTECHNIC ARTICLES:Other:Other" +36050000,matches other than pyrotechnic articles of heading 3604 matches other than pyrotechnic articles of heading 3604 +36050010,"MATCHES, OTHER THAN PYROTECHNIC ARTICLES OF HEADING 3604:Matches, other than pyrotechnic articles of heading 3604 :Safety matches" +36050090,"MATCHES, OTHER THAN PYROTECHNIC ARTICLES OF HEADING 3604:Matches, other than pyrotechnic articles of heading 3604 :Other" +36060000,and other pyrophoric alloys in all forms articles of combustible materials as specified in note 2 to this chapter +36061000,FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER::Liquid or liquefied-gas fuels in containers of a kind used for filling or refilling cigarette or similar lighters and of a capacity not exceeding 300 cm3 +36069000,and other pyrophoric alloys in all forms articles of combustible materials as specified in note 2 to this chapter >> other +36069010,FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER:Other :Combustible preparations +36069091,"FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER:Other :Ferro-cerium, in all forms" +36069092,"FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER:Other :Pyrophoric alloys, in all forms" +36069093,FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER:Other :DNPT (dinitroso-penta-methylene tetramine) +36069099,FERRO-CERIUM AND OTHER PYROPHORIC ALLOYS IN ALL FORMS; ARTICLES OF COMBUSTIBLE MATERIALS AS SPECIFIED IN NOTE 2 TO THIS CHAPTER:Other :Others +37010000,photographic plates and film in the flat any material sensitised unexposed of other than paper paperboard or textiles instant in the flat sensitised in packs unexposed whether or not +37011000,photographic plates and film in the flat any material sensitised unexposed of other than paper paperboard or textiles instant in the flat sensitised in packs unexposed whether or not >> for +37011010,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS:For X-ray :Medical" +37011090,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS:For X-ray :Other" +37012000,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS::Instant print film" +37013000,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS::Other plates and film, with any side exceeding 255 mm" +37019100,photographic plates and film in the flat any material sensitised unexposed of other than paper paperboard or textiles instant in the flat sensitised in packs unexposed whether or not >> for colour photography polychrome +37019110,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS:For colour photography (Polychrome):Cinematographic film" +37019190,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS:For colour photography (Polychrome):Other" +37019900,photographic plates and film in the flat any material sensitised unexposed of other than paper paperboard or textiles instant in the flat sensitised in packs unexposed whether or not >> other +37019910,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS:Other :Cinematographic film" +37019990,"PHOTOGRAPHIC PLATES AND FILM IN THE FLAT, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPERBOARD OR TEXTILES; INSTANT PRINT-FILM IN THE FLAT, SENSITISED, UNEXPOSED, WHETHER OR NOT IN PACKS:Other :Other" +37021000,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED::For X-ray(OLD tariff)" +37023110,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:For colour photography (Polychrome):Cinematographic film(OLD tariff)" +37023190,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:For colour photography (Polychrome):Other(OLD tariff)" +37023210,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Other, with silver halide emulsion:Cinematographic film(OLD tariff)" +37023290,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Other, with silver halide emulsion:Other(OLD tariff)" +37023910,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Other :Cinematographic film(OLD tariff)" +37023990,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Other :Other(OLD tariff)" +37024110,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length:Cinematographic film(OLD tariff)" +37024190,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length:Other(OLD tariff)" +37024210,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length exceeding 200 m, other than for colour photography :Photographic film of a width 620 mm in rolls(OLD tariff)" +37024220,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length exceeding 200 m, other than for colour photography :Cinematographic film(OLD tariff)" +37024290,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length exceeding 200 m, other than for colour photography :Other(OLD tariff)" +37024310,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length not exceeding 200 m:Photographic films (black and white) of a width 620 mm(OLD tariff)" +37024320,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length not exceeding 200 m:Cinematographic film(OLD tariff)" +37024390,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 610 mm and of a length not exceeding 200 m:Other(OLD tariff)" +37024410,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 105 mm but not exceeding 610 mm:Photographic films of a width 120 mm in rolls(OLD tariff)" +37024420,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 105 mm but not exceeding 610 mm:Cinematographic film(OLD tariff)" +37024490,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 105 mm but not exceeding 610 mm:Other(OLD tariff)" +37025210,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 16 mm and of a length exceeding 14 m :Finished rolles of cinematographic positive(OLD tariff)" +37025220,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 16 mm and of a length exceeding 14 m :Other Cinematographic film(OLD tariff)" +37025290,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 16 mm and of a length exceeding 14 m :Other(OLD tariff)" +37025300,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED::Of a width exceeding 16 mm but not exceeding 35 mm and of a length not exceeding 30 m, for slides(OLD tariff)" +37025410,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 16 mm but not exceeding 35 mm and of a length not exceeding 30 m, other than for slides:Finished rolls of cinematographic positive(OLD tariff)" +37025420,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 16 mm but not exceeding 35 mm and of a length not exceeding 30 m, other than for slides:Other cinematographic film(OLD tariff)" +37025490,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 16 mm but not exceeding 35 mm and of a length not exceeding 30 m, other than for slides:Other(OLD tariff)" +37025510,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 16 mm but not exceeding 35 mm and of a length exceeding 30 m :Finished rolls of cinematographic positive(OLD tariff)" +37025520,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 16 mm but not exceeding 35 mm and of a length exceeding 30 m :Other cinematographic film(OLD tariff)" +37025590,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 16 mm but not exceeding 35 mm and of a length exceeding 30 m :Other(OLD tariff)" +37025610,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 35 mm :Finished rolls of cinematographic positive(OLD tariff)" +37025620,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 35 mm :Other cinematographic film(OLD tariff)" +37025690,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 35 mm :Other(OLD tariff)" +37029611,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 35 mm and of a length not exceeding 30 m:Cinematographic film Not exceeding 16 mm(OLD tariff)" +37029619,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 35 mm and of a length not exceeding 30 m:Other(OLD tariff)" +37029711,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 35 mm and of a length exceeding 30 m Cinematographic film:Not exceeding 16 mm(OLD tariff)" +37029719,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width not exceeding 35 mm and of a length exceeding 30 m Cinematographic film:Other(OLD tariff)" +37029810,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 35 mm:Cinematographic film(OLD tariff)" +37029890,"PHOTOGRAPHIC FILM IN ROLLS, SENSITISED, UNEXPOSED, OF ANY MATERIAL OTHER THAN PAPER, PAPER-BOARD OR TEXTILES; INSTANT PRINT FILM IN ROLLS, SENSITISED, UNEXPOSED:Of a width exceeding 35 mm:Other(OLD tariff)" +37030000,photographic paper paperboard and textiles sensitised unexposed +37031000,photographic paper paperboard and textiles sensitised unexposed >> in rolls of a width exceeding 610 mm +37031010,"PHOTOGRAPHIC PAPER, PAPERBOARD AND TEXTILES, SENSITISED, UNEXPOSED:In rolls of a width exceeding 610 mm:Photographic paper or paperboard" +37031020,"PHOTOGRAPHIC PAPER, PAPERBOARD AND TEXTILES, SENSITISED, UNEXPOSED:In rolls of a width exceeding 610 mm:Textiles" +37032000,photographic paper paperboard and textiles sensitised unexposed >> other for colour photography polychrome +37032010,"PHOTOGRAPHIC PAPER, PAPERBOARD AND TEXTILES, SENSITISED, UNEXPOSED:Other, for colour photography (polychrome):Photographic paper or paperboard" +37032020,"PHOTOGRAPHIC PAPER, PAPERBOARD AND TEXTILES, SENSITISED, UNEXPOSED:Other, for colour photography (polychrome):Textiles" +37039000,photographic paper paperboard and textiles sensitised unexposed >> other +37039010,"PHOTOGRAPHIC PAPER, PAPERBOARD AND TEXTILES, SENSITISED, UNEXPOSED:Other :Photographic paper or paperboard" +37039020,"PHOTOGRAPHIC PAPER, PAPERBOARD AND TEXTILES, SENSITISED, UNEXPOSED:Other :Textiles" +37040000,photographic plates film paper paper board and textiles exposed but not developed photographic plates film paper paper board and textiles exposed but not developed +37040010,"PHOTOGRAPHIC PLATES, FILM, PAPER, PAPER BOARD AND TEXTILES, EXPOSED BUT NOT DEVELOPED:Photographic plates, film, paper, paper board and textiles, exposed but not developed :Photographic paper, or paperboard" +37040020,"PHOTOGRAPHIC PLATES, FILM, PAPER, PAPER BOARD AND TEXTILES, EXPOSED BUT NOT DEVELOPED:Photographic plates, film, paper, paper board and textiles, exposed but not developed :Cinematographic plates and film" +37040030,"PHOTOGRAPHIC PLATES, FILM, PAPER, PAPER BOARD AND TEXTILES, EXPOSED BUT NOT DEVELOPED:Photographic plates, film, paper, paper board and textiles, exposed but not developed :Sensitised textiles" +37040090,"PHOTOGRAPHIC PLATES, FILM, PAPER, PAPER BOARD AND TEXTILES, EXPOSED BUT NOT DEVELOPED:Photographic plates, film, paper, paper board and textiles, exposed but not developed :Other" +37050000,"PHOTOGRAPHIC PLATES AND FILM, EXPOSED AND DEVELOPED, OTHER THAN CINEMATOGRAPHIC FILM::PHOTOGRAPHIC PLATES AND FILM, EXPOSED AND DEVELOPED, OTHER THAN CINEMATOGRAPHIC FILM" +37051000,"PHOTOGRAPHIC PLATES AND FILM, EXPOSED AND DEVELOPED, OTHER THAN CINEMATOGRAPHIC FILM::For offset reproduction(OLD tariff)" +37059010,"PHOTOGRAPHIC PLATES AND FILM, EXPOSED AND DEVELOPED, OTHER THAN CINEMATOGRAPHIC FILM:Other :Microfiches(OLD tariff)" +37059090,"PHOTOGRAPHIC PLATES AND FILM, EXPOSED AND DEVELOPED, OTHER THAN CINEMATOGRAPHIC FILM:Other :Other(OLD tariff)" +37060000,cinematographic film exposed and developed whether or not incorporating sound track or consisting only of sound track +37061000,cinematographic film exposed and developed whether or not incorporating sound track or consisting only of sound track >> of a width of 35 mm or more feature films +37061011,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Made wholly in black and white and of a length not exceeding 4,000 m" +37061012,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Made wholly in black and white and of a length exceeding 4,000 m" +37061013,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Made wholly or partly in colour and of a length not exceeding 4,000 m" +37061014,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Made wholly or partly in colour and of a length exceeding 4,000 m" +37061015,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Children's films certified by the Central Board of Film Certification to be Children's Film" +37061020,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Documentary shorts, and films certified as such by the Central Board of Film Certification" +37061030,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :News Reels and clippings" +37061041,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Made wholly in black and white" +37061042,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Made wholly or partly in colour" +37061051,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Patch prints, including Logos intended" +37061052,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Children's film certified by the Central Board of Films Certification to be ""Children's Film""" +37061059,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Other" +37061061,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Certified as predominantly educational by the Central Board of Film Certification" +37061062,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Patch prints, including logos intended exclusively for educational purposes" +37061063,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Teaching aids including film strips of educational nature" +37061069,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Other" +37061070,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Short films not elsewhere specified or included" +37061091,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Audio-visual news or audio-visual views materials including news clippings" +37061092,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Master positives, exposed negatives, dupes and rush prints as are not cleared for public exhibitions" +37061099,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Of a width of 35 mm or more :Other" +37069000,cinematographic film exposed and developed whether or not incorporating sound track or consisting only of sound track >> other feature films +37069011,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Made wholly in black and white and of a length not exceeding 4,000 m" +37069012,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Made wholly in black and white and of a length exceeding 4,000 m" +37069013,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Made wholly or partly in colour and of a length not exceeding 4,000 m" +37069014,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Made wholly or partly in colour and of a length exceeding 4,000 m" +37069015,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Children's films certified by the Central Board of Film Certification to be Children's Film" +37069020,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Documentary shots, and films certified as such by the Central Board of Film Certification" +37069030,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :News reels and clippings" +37069041,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Made wholly in black and white" +37069042,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Made wholly or partly in colour" +37069051,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Patch prints, including logos intended exclusively for the entertainment of children" +37069052,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Children's film certified by the Central Board of Films Certification to be ""Children's film""" +37069059,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Other" +37069061,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Certified as predominantly educational by the Central Board of Film Certification" +37069062,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Patch prints, including logos intended exclusively for educational purposes" +37069063,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Teaching aids including film strips of educational nature" +37069064,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Certified as predominantly educational, by Central Board of Film Certification, of width below 30mm" +37069069,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Other" +37069070,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Short film not elsewhere specified" +37069091,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Audio-visual news or audio-visual views materials including news clippings" +37069092,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Master positives, exposed negatives, dupes and rush prints as are not cleared for public exhibitions" +37069099,"CINEMATOGRAPHIC FILM, EXPOSED AND DEVELOPED, WHETHER OR NOT INCORPORATING SOUND TRACK OR CONSISTING ONLY OF SOUND TRACK:Other :Other" +37070000,chemical preparations for photographic uses other than varnishes glues adhesives and similar preparations unmixed products for photographic uses put up in measured portions or put up for retail sale in a form ready use +37071000,"CHEMICAL PREPARATIONS FOR PHOTOGRAPHIC USES (OTHER THAN VARNISHES, GLUES, ADHESIVES AND SIMILAR PREPARATIONS); UNMIXED PRODUCTS FOR PHOTOGRAPHIC USES, PUT UP IN MEASURED PORTIONS OR PUT UP FOR RETAIL SALE IN A FORM READY FOR USE::Sensitizing emulsions" +37079000,chemical preparations for photographic uses other than varnishes glues adhesives and similar preparations unmixed products for photographic uses put up in measured portions or put up for retail sale in a form ready use >> other +37079010,"CHEMICAL PREPARATIONS FOR PHOTOGRAPHIC USES (OTHER THAN VARNISHES, GLUES, ADHESIVES AND SIMILAR PREPARATIONS); UNMIXED PRODUCTS FOR PHOTOGRAPHIC USES, PUT UP IN MEASURED PORTIONS OR PUT UP FOR RETAIL SALE IN A FORM READY FOR USE:Other:Chemical products mixed or compounded for Photographic uses (for example, developers and fixers), whether or not in bulk" +37079090,"CHEMICAL PREPARATIONS FOR PHOTOGRAPHIC USES (OTHER THAN VARNISHES, GLUES, ADHESIVES AND SIMILAR PREPARATIONS); UNMIXED PRODUCTS FOR PHOTOGRAPHIC USES, PUT UP IN MEASURED PORTIONS OR PUT UP FOR RETAIL SALE IN A FORM READY FOR USE:Other:Other" +38010000,artificial graphite colloidal or semi colloidal graphite preparations based on graphite or other carbon in the form of pastes blocks plates or other semi manufactures +38011000,"ARTIFICIAL GRAPHITE; COLLOIDAL OR SEMI-COLLOIDAL GRAPHITE; PREPARATIONS BASED ON GRAPHITE OR OTHER CARBON IN THE FORM OF PASTES, BLOCKS, PLATES OR OTHER SEMI-MANUFACTURES::Artificial graphite" +38012000,"ARTIFICIAL GRAPHITE; COLLOIDAL OR SEMI-COLLOIDAL GRAPHITE; PREPARATIONS BASED ON GRAPHITE OR OTHER CARBON IN THE FORM OF PASTES, BLOCKS, PLATES OR OTHER SEMI-MANUFACTURES::Colloidal or semi-colloidal graphite" +38013000,"ARTIFICIAL GRAPHITE; COLLOIDAL OR SEMI-COLLOIDAL GRAPHITE; PREPARATIONS BASED ON GRAPHITE OR OTHER CARBON IN THE FORM OF PASTES, BLOCKS, PLATES OR OTHER SEMI-MANUFACTURES::Carbonaceous pastes for electrodes and similar pastes for furnace linings" +38019000,"ARTIFICIAL GRAPHITE; COLLOIDAL OR SEMI-COLLOIDAL GRAPHITE; PREPARATIONS BASED ON GRAPHITE OR OTHER CARBON IN THE FORM OF PASTES, BLOCKS, PLATES OR OTHER SEMI-MANUFACTURES::Other" +38020000,activated natural activated carbon mineral animal including products black spent animal black +38021000,"ACTIVATED CARBON; ACTIVATED NATURAL MINERAL PRODUCTS; ANIMAL BLACK, INCLUDING SPENT ANIMAL BLACK::Activated carbon" +38029000,activated natural activated carbon mineral animal including products black spent animal black >> other activated natural mineral products +38029011,"ACTIVATED CARBON; ACTIVATED NATURAL MINERAL PRODUCTS; ANIMAL BLACK, INCLUDING SPENT ANIMAL BLACK:Other:Activated alumina" +38029012,"ACTIVATED CARBON; ACTIVATED NATURAL MINERAL PRODUCTS; ANIMAL BLACK, INCLUDING SPENT ANIMAL BLACK:Other:Activated bauxite" +38029019,"ACTIVATED CARBON; ACTIVATED NATURAL MINERAL PRODUCTS; ANIMAL BLACK, INCLUDING SPENT ANIMAL BLACK:Other:Other" +38029020,"ACTIVATED CARBON; ACTIVATED NATURAL MINERAL PRODUCTS; ANIMAL BLACK, INCLUDING SPENT ANIMAL BLACK:Other:Animal black (for example bone black, ivory black), including spent animal black" +38030000,"::TALL OIL, WHETHER OR NOT REFINED" +38040000,residual lyes from the manufacture of terpenic oils produced by the distillation or other treatment of coniferous woods crude dipentene sulphite turpentine and other crude para cymene pine oil containing alpha terpineol as the main constituent +38040010,"RESIDUAL LYES FROM THE MANUFACTURE OF WOOD PULP, WHETHER OR NOT CONCENTRATED, DESUGARED OR CHEMICALLY TREATED, INCLUDING LIGNIN SULPHONATES, BUT EXCLUDING TALL OIL OF HEADING 3803:Residual lyes from the manufacture of wood pulp, whether or not concentrated, desugared or chemically treated, including lignin sulphonates, but excluding tall oil of heading 3803:Lignin sulphonates(OLD tariff)" +38040020,"RESIDUAL LYES FROM THE MANUFACTURE OF WOOD PULP, WHETHER OR NOT CONCENTRATED, DESUGARED OR CHEMICALLY TREATED, INCLUDING LIGNIN SULPHONATES, BUT EXCLUDING TALL OIL OF HEADING 3803:Residual lyes from the manufacture of wood pulp, whether or not concentrated, desugared or chemically treated, including lignin sulphonates, but excluding tall oil of heading 3803:Concentrated sulphate lye(OLD tariff)" +38040090,"RESIDUAL LYES FROM THE MANUFACTURE OF WOOD PULP, WHETHER OR NOT CONCENTRATED, DESUGARED OR CHEMICALLY TREATED, INCLUDING LIGNIN SULPHONATES, BUT EXCLUDING TALL OIL OF HEADING 3803:Residual lyes from the manufacture of wood pulp, whether or not concentrated, desugared or chemically treated, including lignin sulphonates, but excluding tall oil of heading 3803:Other(OLD tariff)" +38051010,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Gum, wood or sulphate turpentine oils:Wood turpentine oil and spirit of turpentine(OLD tariff)" +38051020,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Gum, wood or sulphate turpentine oils:Gum turpentine oil(OLD tariff)" +38051030,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Gum, wood or sulphate turpentine oils:Sulphate turpentine oil(OLD tariff)" +38059010,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Other:Terpenic oils produced by the distillation or other treatment of coniferous woods(OLD tariff)" +38059020,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Other:Crude dipentene(OLD tariff)" +38059030,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Other:Sulphite turpentine(OLD tariff)" +38059090,"GUM, WOOD OR SULPHATE TURPENTINE AND OTHER TERPENIC OILS PRODUCED BY THE DISTILLATION OR OTHER TREATMENT OF CONIFEROUS WOODS; CRUDE DIPENTENE; SULPHITE TURPENTINE AND OTHER CRUDE PARA-CYMENE;:Other:Other(OLD tariff)" +38060000,and resin and derivatives rosin acids thereof rosin spirit and rosin oils run gums +38061000,and resin and derivatives rosin acids thereof rosin spirit and rosin oils run gums >> rosin and resin acids +38061010,"ROSIN AND RESIN ACIDS, AND DERIVATIVES THEREOF; ROSIN SPIRIT AND ROSIN OILS; RUN GUMS:Rosin and resin acids:Gum rosin" +38061090,"ROSIN AND RESIN ACIDS, AND DERIVATIVES THEREOF; ROSIN SPIRIT AND ROSIN OILS; RUN GUMS:Rosin and resin acids:Other" +38062000,"ROSIN AND RESIN ACIDS, AND DERIVATIVES THEREOF; ROSIN SPIRIT AND ROSIN OILS; RUN GUMS::Salts of rosin, of resin acids or of derivatives of rosin or resin acids, other than salts of rosin adducts" +38063000,"ROSIN AND RESIN ACIDS, AND DERIVATIVES THEREOF; ROSIN SPIRIT AND ROSIN OILS; RUN GUMS::Ester gums" +38069000,and resin and derivatives rosin acids thereof rosin spirit and rosin oils run gums >> other +38069010,"ROSIN AND RESIN ACIDS, AND DERIVATIVES THEREOF; ROSIN SPIRIT AND ROSIN OILS; RUN GUMS:Other:Run gums" +38069090,"ROSIN AND RESIN ACIDS, AND DERIVATIVES THEREOF; ROSIN SPIRIT AND ROSIN OILS; RUN GUMS:Other:Other" +38070000,wood tar wood tar oils wood creosote wood naphtha vegetable pitch brewers pitch and similar preparations based on rosin resin acids or on vegetable pitch wood tar wood tar oils wood creosote wood naphtha vegetable pitch brewers pitch and similar preparations based on rosin resin acids or on vegetable pitch +38070010,"WOOD TAR; WOOD TAR OILS; WOOD CREOSOTE; WOOD NAPHTHA; VEGETABLE PITCH; BREWERS PITCH AND SIMILAR PREPARATIONS BASED ON ROSIN, RESIN ACIDS OR ON VEGETABLE PITCH:Wood tar; wood tar oils; wood creosote; wood naphtha; vegetable pitch; brewers pitch and similar preparations based on rosin, resin acids or on vegetable pitch:Wood tar" +38070020,"WOOD TAR; WOOD TAR OILS; WOOD CREOSOTE; WOOD NAPHTHA; VEGETABLE PITCH; BREWERS PITCH AND SIMILAR PREPARATIONS BASED ON ROSIN, RESIN ACIDS OR ON VEGETABLE PITCH:Wood tar; wood tar oils; wood creosote; wood naphtha; vegetable pitch; brewers pitch and similar preparations based on rosin, resin acids or on vegetable pitch:Wood Tar oils, wood creosote, wood naphtha" +38070030,"WOOD TAR; WOOD TAR OILS; WOOD CREOSOTE; WOOD NAPHTHA; VEGETABLE PITCH; BREWERS PITCH AND SIMILAR PREPARATIONS BASED ON ROSIN, RESIN ACIDS OR ON VEGETABLE PITCH:Wood tar; wood tar oils; wood creosote; wood naphtha; vegetable pitch; brewers pitch and similar preparations based on rosin, resin acids or on vegetable pitch:Vegetable pitch, brewers pitch and similar preparations based on rosin, resin acids or vegetable pitch" +38085000,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):aldrin (ISO); binapacryl (ISO); camphechlor(ISO) (toxaphene); captafol (ISO); chlordane (ISO); chlordimeform (ISO); chlorobenzilate (ISO); DDT (ISO) (clofenotane (INN), 1, 1, 1-trichloro-2,2- bis(p-chlorphenyl) ethane); dieldrin (ISO,INN), dinoseb (ISO), its salts or its esters; ethylene dibromide (ISO) (1,2-dibromoethane); ethylene dichloride (ISO); (1,2dicholoroethane);fluroacetamide (ISO); heptachlor(ISO); hexachlorobenzene (ISO); 1,2,3,4,5,6,-hexachlorocyclohexane (HCH (ISO)), including lindane (ISO, INN); mercury compounds; methamidophos (ISO; monocrotophos (ISO); oxirane parathion-methyl (ISO) (methyl-parathion (ISO); pentachlorophenol (ISO); phosphamidon (ISO); 2,4,5-T ((ISO) (2,4,5-trichlorophenoxyacetic acid, its salts or its esters;:aldrin (ISO); binapacryl (ISO);camphechlor (ISO) (toxaphene); captafol (ISO); chlordane (ISO); chlordimeform (ISO); chlorobenzilate (ISO); DDT(ISO) (clofenotane (INN); 1,1,1-trichloro-2,2-bis (p-chlorophenyl) ethane); dieldrin (ISO,INN), dinoseb (ISO), its salts or itsi esters; ethylene dibromide (ISO) (1,2-dibromoethane); ethylene dichloride(ISO); (1,2-dicholoroethane); fluoroacetamide (ISO) heptachlor (ISO), hexachlorobenzene (ISO); 1,2,3,4,5,6,-hexa- chlorocyclohexane (HCH (ISO)), including lindane (ISO, INN); mercury compounds; methamidophos (ISO; monocrotophos (ISO); oxirane parathion-methyl (ISO) (methyl-parathion (ISO); pentachlorophenol (ISO); phosphamidon (ISO); 2,4,5-T ((ISO) (2,4,5-trichlorophenoxyacetic acid, its salts or its esters; (OLD tariff)" +38085200,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS)::DDT (ISO) (clofenotane (INN)), in packings of a net weight content not exceeding 300 g.(OLD tariff)" +38085900,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS)::Other(OLD tariff)" +38086100,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS)::In packings of a net weight content not exceeding 300g(OLD tariff)" +38086200,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS)::In packings of a net weight content exceeding 300 g but not exceeding 7.5 kg(OLD tariff)" +38086900,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS)::Other(OLD tariff)" +38089111,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Aluminium phosphate (for example phostoxin)(OLD tariff)" +38089112,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Calcium cyanide(OLD tariff)" +38089113,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:D.D.V.P. (Dimethyle-dichlorovinyl-phosphate)(OLD tariff)" +38089121,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Diaginal(OLD tariff)" +38089122,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Methyl bromide(OLD tariff)" +38089123,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Dimethoate, technical grade(OLD tariff)" +38089124,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Melathion(OLD tariff)" +38089131,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Endosulphan, technical grade(OLD tariff)" +38089132,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Quinol phos(OLD tariff)" +38089133,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Isoproturon (OLD tariff)" +38089134,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Fenthion(OLD tariff)" +38089135,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Cipermethrin, technical grade(OLD tariff)" +38089136,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Allethrin(OLD tariff)" +38089137,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Synthetic pyrethrum(OLD tariff)" +38089191,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Repellants for insects such as flies, mosquito(OLD tariff)" +38089192,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Paper impregnated or coated with insecticides such as D.D.T. coated paper(OLD tariff)" +38089199,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Insecticides:Other(OLD tariff)" +38089210,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Fungicides;:Maneb(OLD tariff)" +38089220,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Fungicides;:Sodium penta chlorophenate (santrobrite)(OLD tariff)" +38089230,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Fungicides;:Thiram (tetra methyl thiuram disulphide)(OLD tariff)" +38089240,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Fungicides;:Zineb(OLD tariff)" +38089250,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Fungicides;:Copper oxychloride(OLD tariff)" +38089290,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Fungicides;:Other(OLD tariff)" +38089310,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Herbicides, anti-sprouting products and plant-growth regulators:Chloromethyl phenozy acetic acid (M.C.P.A.)(OLD tariff)" +38089320,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Herbicides, anti-sprouting products and plant-growth regulators:24 Dichloro phenozy acetic acid and its esters(OLD tariff)" +38089330,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Herbicides, anti-sprouting products and plant-growth regulators:Gibberellic acid(OLD tariff)" +38089340,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Herbicides, anti-sprouting products and plant-growth regulators:Plant growth regulators(OLD tariff)" +38089350,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Herbicides, anti-sprouting products and plant-growth regulators:Weedicides and weed killing agent(OLD tariff)" +38089390,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Herbicides, anti-sprouting products and plant-growth regulators:Other(OLD tariff)" +38089400,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS)::Disinfectants(OLD tariff)" +38089910,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Other:Pesticides, not else where specified or included(OLD tariff)" +38089990,"INSECTICIDES, RODENTICIDES, FUNGICIDES, HERBICIDES, ANTI-SPROUTING PRODUCTS AND PLANT-GROWTH REGULATORS, DISINFECTANTS AND SIMILAR PRODUCTS, PUT UP IN FORMS OR PACKINGS FOR RETAIL SALE OR AS PREPARATIONS OR ARTICLES (FOR EXAMPLE, SULPHURTREATE BANDS, WICKS AND CANDLES, AND FLY-PAPERS):Other:Other(OLD tariff)" +38090000,dye carriers to finishing agents accelerate the dyeing or fixing of dye stuffs and other products and preparations for example dressings and mordants of a kind used leather or like in the textile paper not elsewhere specified or industries included +38091000,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED::With a basis of amylaceous substances" +38099100,dye carriers to finishing agents accelerate the dyeing or fixing of dye stuffs and other products and preparations for example dressings and mordants of a kind used leather or like in the textile paper not elsewhere specified or industries included >> of a kind used in the textile or like industries +38099110,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants mordanting agents" +38099120,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants desizing agents" +38099130,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants dispersing agents" +38099140,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants emulsifying agents" +38099150,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants hydro sulphite formaldehyde compound (rongalite or formusul)" +38099160,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants textile preservatives" +38099170,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Textile assistants water proofing agents" +38099180,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Prepared textile glazings, dressings and mordants" +38099190,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the textile or like industries:Other" +38099200,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED::Of a kind used in the paper or like industries" +38099300,dye carriers to finishing agents accelerate the dyeing or fixing of dye stuffs and other products and preparations for example dressings and mordants of a kind used leather or like in the textile paper not elsewhere specified or industries included >> of a kind used in the leather or like industries +38099310,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the leather or like industries :Fatty oil or pull up oil" +38099390,"FINISHING AGENTS, DYE CARRIERS TO ACCELERATE THE DYEING OR FIXING OF DYE-STUFFS AND OTHER PRODUCTS AND PREPARATIONS (FOR EXAMPLE, DRESSINGS AND MORDANTS), OF A KIND USED IN THE TEXTILE, PAPER, LEATHER OR LIKE INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Of a kind used in the leather or like industries :Other" +38101010,"PICKLING PREPARATIONS FOR METAL SURFACES; FLUXES AND OTHER AUXILIARY PREPARATIONS FOR SOLDERING, BRAZING OR WELDING; SOLDERING, BRAZING OR WELDING POWDERS AND PASTES CONSISTING OF METAL AND OTHER MATERIALS; PREPARATIONS OF A KIND USED AS CORES OR COATINGS FOR WELDING ELECTRODES OR RODS:Pickling preparations for metal surfaces; soldering, brazing or welding powders and pastes consisting of metal and other materials:Pickling preparations and other soldering, brazing or welding powders or pastes(OLD tariff)" +38101020,"PICKLING PREPARATIONS FOR METAL SURFACES; FLUXES AND OTHER AUXILIARY PREPARATIONS FOR SOLDERING, BRAZING OR WELDING; SOLDERING, BRAZING OR WELDING POWDERS AND PASTES CONSISTING OF METAL AND OTHER MATERIALS; PREPARATIONS OF A KIND USED AS CORES OR COATINGS FOR WELDING ELECTRODES OR RODS:Pickling preparations for metal surfaces; soldering, brazing or welding powders and pastes consisting of metal and other materials:Thermite portion for welding (alumina thermic heat generators)(OLD tariff)" +38101090,"PICKLING PREPARATIONS FOR METAL SURFACES; FLUXES AND OTHER AUXILIARY PREPARATIONS FOR SOLDERING, BRAZING OR WELDING; SOLDERING, BRAZING OR WELDING POWDERS AND PASTES CONSISTING OF METAL AND OTHER MATERIALS; PREPARATIONS OF A KIND USED AS CORES OR COATINGS FOR WELDING ELECTRODES OR RODS:Pickling preparations for metal surfaces; soldering, brazing or welding powders and pastes consisting of metal and other materials:Other(OLD tariff)" +38109010,"PICKLING PREPARATIONS FOR METAL SURFACES; FLUXES AND OTHER AUXILIARY PREPARATIONS FOR SOLDERING, BRAZING OR WELDING; SOLDERING, BRAZING OR WELDING POWDERS AND PASTES CONSISTING OF METAL AND OTHER MATERIALS; PREPARATIONS OF A KIND USED AS CORES OR COATINGS FOR WELDING ELECTRODES OR RODS:Other:Preparations of a kind used as cores or coatings for welding electrodes and rods(OLD tariff)" +38109090,"PICKLING PREPARATIONS FOR METAL SURFACES; FLUXES AND OTHER AUXILIARY PREPARATIONS FOR SOLDERING, BRAZING OR WELDING; SOLDERING, BRAZING OR WELDING POWDERS AND PASTES CONSISTING OF METAL AND OTHER MATERIALS; PREPARATIONS OF A KIND USED AS CORES OR COATINGS FOR WELDING ELECTRODES OR RODS:Other:Other(OLD tariff)" +38110000,gum inhibitors viscosity improvers anti corrosive preparations and other prepared additives for mineral oils including gasoline used for the same or for other liquids purposes as mineral oils preparations +38111100,"ANTI-KNOCK PREPARATIONS, OXIDATION INHIBITORS, GUM INHIBITORS, VISCOSITY IMPROVERS, ANTI-CORROSIVE PREPARATIONS AND OTHER PREPARED ADDITIVES, FOR MINERAL OILS (INCLUDING GASOLINE) OR FOR OTHER LIQUIDS USED FOR THE SAME PURPOSES AS MINERAL OILS::Based on lead compounds" +38111900,"ANTI-KNOCK PREPARATIONS, OXIDATION INHIBITORS, GUM INHIBITORS, VISCOSITY IMPROVERS, ANTI-CORROSIVE PREPARATIONS AND OTHER PREPARED ADDITIVES, FOR MINERAL OILS (INCLUDING GASOLINE) OR FOR OTHER LIQUIDS USED FOR THE SAME PURPOSES AS MINERAL OILS::Other(OLD tariff)" +38112100,"ANTI-KNOCK PREPARATIONS, OXIDATION INHIBITORS, GUM INHIBITORS, VISCOSITY IMPROVERS, ANTI-CORROSIVE PREPARATIONS AND OTHER PREPARED ADDITIVES, FOR MINERAL OILS (INCLUDING GASOLINE) OR FOR OTHER LIQUIDS USED FOR THE SAME PURPOSES AS MINERAL OILS::Containing petroleum oils or oils obtained from bituminous minerals(OLD tariff)" +38112900,"ANTI-KNOCK PREPARATIONS, OXIDATION INHIBITORS, GUM INHIBITORS, VISCOSITY IMPROVERS, ANTI-CORROSIVE PREPARATIONS AND OTHER PREPARED ADDITIVES, FOR MINERAL OILS (INCLUDING GASOLINE) OR FOR OTHER LIQUIDS USED FOR THE SAME PURPOSES AS MINERAL OILS::Other(OLD tariff)" +38119000,"ANTI-KNOCK PREPARATIONS, OXIDATION INHIBITORS, GUM INHIBITORS, VISCOSITY IMPROVERS, ANTI-CORROSIVE PREPARATIONS AND OTHER PREPARED ADDITIVES, FOR MINERAL OILS (INCLUDING GASOLINE) OR FOR OTHER LIQUIDS USED FOR THE SAME PURPOSES AS MINERAL OILS::Other" +38120000,prepared rubber accelerators compound plasticisers for rubber or plastics not elsewhere specified or included anti oxidising preparations and other compound stabilisers for rubber or plastics +38121000,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS::Prepared rubber accelerators" +38122000,prepared rubber accelerators compound plasticisers for rubber or plastics not elsewhere specified or included anti oxidising preparations and other compound stabilisers for rubber or plastics >> compound plasticisers for rubber or plastics +38122010,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Compound plasticisers for rubber or plastics:Phthalate plasticisers" +38122090,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Compound plasticisers for rubber or plastics:Other" +38123010,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Anti-oxidising preparations and other compound stabilisers for rubber or plastics:Anti-oxidants for rubber(OLD tariff)" +38123020,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Anti-oxidising preparations and other compound stabilisers for rubber or plastics:Softeners for rubber(OLD tariff)" +38123030,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Anti-oxidising preparations and other compound stabilisers for rubber or plastics:Vulcanising agents for rubber(OLD tariff)" +38123090,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Anti-oxidising preparations and other compound stabilisers for rubber or plastics:Other(OLD tariff)" +38123100,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS::Mixtures of oligomers of 2, 2, 4-trimethyl-1, 2-dihydroquinoline (TMQ)" +38123900,prepared rubber accelerators compound plasticisers for rubber or plastics not elsewhere specified or included anti oxidising preparations and other compound stabilisers for rubber or plastics >> other +38123910,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Other:Anti-oxidants for rubber" +38123920,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Other:Softeners for rubber" +38123930,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Other:Vulcanizing agents for rubber" +38123990,"PREPARED RUBBER ACCELERATORS; COMPOUND PLASTICISERS FOR RUBBER OR PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED; ANTI-OXIDISING PREPARATIONS AND OTHER COMPOUND STABILISERS FOR RUBBER OR PLASTICS:Other:Other" +38130000,::PREPARATIONS AND CHARGES FOR FIRE-EXTINGUISHERS; CHARGED FIRE-EXTINGUISHING GRENADES +38140000,organic composite solvents and thinners not elsewhere specified or included prepared paint or varnish removers organic composite solvents and thinners not elsewhere specified or included prepared paint or varnish removers +38140010,"ORGANIC COMPOSITE SOLVENTS AND THINNERS, NOT ELSEWHERE SPECIFIED OR INCLUDED; PREPARED PAINT OR VARNISH REMOVERS:Organic composite solvents and thinners, not elsewhere specified or included; prepared paint or varnish removers:Organic composite solvents and thinners, not elsewhere specified or included" +38140020,"ORGANIC COMPOSITE SOLVENTS AND THINNERS, NOT ELSEWHERE SPECIFIED OR INCLUDED; PREPARED PAINT OR VARNISH REMOVERS:Organic composite solvents and thinners, not elsewhere specified or included; prepared paint or varnish removers:Prepared paint or varnish removers" +38150000,reaction initiators reaction accelerators and catalytic preparations not elsewhere specified or included supported catalysts +38151100,"REACTION INITIATORS, REACTION ACCELERATORS AND CATAL PREPARATIONS, NOT ELSEWHERE SPECIFIED OR INCLUDED::With nickel or nickel compounds as the active substance" +38151200,reaction initiators reaction accelerators and catalytic preparations not elsewhere specified or included supported catalysts >> with precious metal or precious metal compounds as the active substance +38151210,"REACTION INITIATORS, REACTION ACCELERATORS AND CATAL PREPARATIONS, NOT ELSEWHERE SPECIFIED OR INCLUDED:With precious metal or precious metal compounds as the active substance:Platinum or palladium catalysts with a base of activated carbon" +38151290,"REACTION INITIATORS, REACTION ACCELERATORS AND CATAL PREPARATIONS, NOT ELSEWHERE SPECIFIED OR INCLUDED:With precious metal or precious metal compounds as the active substance:Other" +38151900,"REACTION INITIATORS, REACTION ACCELERATORS AND CATAL PREPARATIONS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +38159000,"REACTION INITIATORS, REACTION ACCELERATORS AND CATAL PREPARATIONS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +38160000,"::REFRACTORY CEMENTS, MORTARS, CONCRETES AND SIMILAR COMPOSITIONS, OTHER THAN PRODUCTS OF HEADING 3801" +38170000,mixed alkylbenzenes and mixed alkylnaphthalenes other than those of heading 2707 or 2902 mixed alkylbenzenes and mixed alkylnaphthalenes other than those of heading 2707 or 2902 mixed alkylbenzenes +38170011,"MIXED ALKYLBENZENES AND MIXED ALKYLNAPHTHALENES, OTHER THAN THOSE OF HEADING 2707 OR 2902:Mixed alkylbenzenes and mixed alkylnaphthalenes, other than those of heading 2707 or 2902:Linear alkylbenzenes" +38170019,"MIXED ALKYLBENZENES AND MIXED ALKYLNAPHTHALENES, OTHER THAN THOSE OF HEADING 2707 OR 2902:Mixed alkylbenzenes and mixed alkylnaphthalenes, other than those of heading 2707 or 2902:Other" +38170020,"MIXED ALKYLBENZENES AND MIXED ALKYLNAPHTHALENES, OTHER THAN THOSE OF HEADING 2707 OR 2902:Mixed alkylbenzenes and mixed alkylnaphthalenes, other than those of heading 2707 or 2902:Mixed alkylnaphthalenes" +38180000,chemical elements doped for use in electronics in the form of discs wafers or similar forms chemical compounds doped for use in electronics chemical elements doped for use in electronics in the form of discs wafers or similar forms chemical compounds doped for use in electronics +38180010,"CHEMICAL ELEMENTS DOPED FOR USE IN ELECTRONICS, IN THE FORM OF DISCS, WAFERS OR SIMILAR FORMS; CHEMICAL COMPOUNDS DOPED FOR USE IN ELECTRONICS:Chemical elements doped for use in electronics, in the form of discs, wafers or similar forms; chemical compounds doped for use in electronics:Undefused silicon wafers" +38180090,"CHEMICAL ELEMENTS DOPED FOR USE IN ELECTRONICS, IN THE FORM OF DISCS, WAFERS OR SIMILAR FORMS; CHEMICAL COMPOUNDS DOPED FOR USE IN ELECTRONICS:Chemical elements doped for use in electronics, in the form of discs, wafers or similar forms; chemical compounds doped for use in electronics:Other" +38190000,hydraulic brake fluids and other prepared liquids for hydraulic transmission not containing or containing less than 70 by weight of petroleum oils or oils obtained from bituminous minerals +38190010,"HYDRAULIC BRAKE FLUIDS AND OTHER PREPARED LIQUIDS FOR HYDRAULIC TRANSMISSION, NOT CONTAINING OR CONTAINING LESS THAN 70% BY WEIGHT OF PETROLEUM OILS OR OILS OBTAINED FROM BITUMINOUS MINERALS::Hydraulic brake fluids" +38190090,"HYDRAULIC BRAKE FLUIDS AND OTHER PREPARED LIQUIDS FOR HYDRAULIC TRANSMISSION, NOT CONTAINING OR CONTAINING LESS THAN 70% BY WEIGHT OF PETROLEUM OILS OR OILS OBTAINED FROM BITUMINOUS MINERALS::Other" +38200000,::ANTI-FREEZING PREPARATIONS AND PREPARED DE-ICING FLUIDS +38210000,::PREPARED CULTURE MEDIA FOR DEVELOPMENT OR MAINTENANCE OF MICRO-ORGANISM (INCLUDING VIRUSES AND THE LIKE) OR OF PLANT HUMAN OR ANIMAL CELLS +38220000,diagnostic or laboratory reagents on a backing prepared diagnostic or laboratory reagents whether or not on a backing whether or not put up in the form of kits other than those of heading 3006 certified reference materials laboratory reagents on a backing diagnostic or prepared diagnostic or laboratory reagents whether or not on a backing whether or not put up in the form of kits +38220011,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Diagnostic or laboratory reagents on a backing, prepared diagnostic or laboratory reagents whether or not on a backing, other than those of heading 3002 or 3006; certified reference materials :Pregnancy confirmation reagents(OLD tariff)" +38220012,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Diagnostic or laboratory reagents on a backing, prepared diagnostic or laboratory reagents whether or not on a backing, other than those of heading 3002 or 3006; certified reference materials :Reagents for diagnosing AIDS(OLD tariff)" +38220019,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Diagnostic or laboratory reagents on a backing, prepared diagnostic or laboratory reagents whether or not on a backing, other than those of heading 3002 or 3006; certified reference materials :Other(OLD tariff)" +38220090,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Diagnostic or laboratory reagents on a backing, prepared diagnostic or laboratory reagents whether or not on a backing, other than those of heading 3002 or 3006; certified reference materials :Other(OLD tariff)" +38221100,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :for malaria:For malaria" +38221200,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :zika:For Zika and other diseases transmitted by mosquitoes of the genus Aedes" +38221300,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :blood:For blood-grouping" +38221900,diagnostic or laboratory reagents on a backing prepared diagnostic or laboratory reagents whether or not on a backing whether or not put up in the form of kits other than those of heading 3006 certified reference materials laboratory reagents on a backing diagnostic or prepared diagnostic or laboratory reagents whether or not on a backing whether or not put up in the form of kits >> other +38221910,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Other:Pregnancy test kit" +38221990,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Other:Other" +38229000,diagnostic or laboratory reagents on a backing prepared diagnostic or laboratory reagents whether or not on a backing whether or not put up in the form of kits other than those of heading 3006 certified reference materials laboratory reagents on a backing diagnostic or prepared diagnostic or laboratory reagents whether or not on a backing whether or not put up in the form of kits >> other +38229010,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Other:Certified reference materials" +38229090,"DIAGNOSTIC OR LABORATORY REAGENTS ON A BACKING, PREPARED DIAGNOSTIC OR LABORATORY REAGENTS WHETHER OR NOT ON A BACKING, OTHER THAN THOSE OF HEADING 3002 OR 3006; CERTIFIED REFERENCE MATERIALS :Other:Other" +38231100,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Stearic acid :Stearic Acid(OLD tariff) +38231111,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Stearic acid :Crude(OLD tariff) +38231112,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Stearic acid :RBD(OLD tariff) +38231119,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Stearic acid :Other(OLD tariff) +38231190,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Stearic acid :Other stearic acid or stearin(OLD tariff) +38231200,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS::Oleic acid(OLD tariff) +38231300,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS::Tall oil fatty acids(OLD tariff) +38231900,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS::Other(OLD tariff) +38237010,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Industrial fatty alcohols:Cetyl alcohol(OLD tariff) +38237020,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Industrial fatty alcohols:Lauryl alcohol(OLD tariff) +38237030,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Industrial fatty alcohols:Oleyl alcohol(OLD tariff) +38237040,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Industrial fatty alcohols:Stearyl alcohol(OLD tariff) +38237090,INDUSTRIAL MONOCARBOXYLIC FATTY ACIDS; ACID OILS FROM REFINING; INDUSTRIAL FATTY ALCOHOLS:Industrial fatty alcohols:Other(OLD tariff) +38240000,prepared binders for foundry moulds or cores chemical products and preparations of chemical or allied industries including consisting of mixtures of natural products not elsewhere specified or included +38241000,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Prepared binders for foundry moulds or cores" +38243000,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Non-agglomerated metal carbides mixed together or with metallic binders" +38244000,prepared binders for foundry moulds or cores chemical products and preparations of chemical or allied industries including consisting of mixtures of natural products not elsewhere specified or included >> prepared additives for cements mortars or concretes +38244010,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Prepared additives for cements, mortars or concretes:Damp proof or water proof compounds" +38244090,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Prepared additives for cements, mortars or concretes:Other" +38245000,prepared binders for foundry moulds or cores chemical products and preparations of chemical or allied industries including consisting of mixtures of natural products not elsewhere specified or included >> mortars and concretes +38245010,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Non-refractory mortars and concretes:Concretes ready to use known as Ready- mix Concrete (RMC)" +38245090,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Non-refractory mortars and concretes:Other" +38246000,prepared binders for foundry moulds or cores chemical products and preparations of chemical or allied industries including consisting of mixtures of natural products not elsewhere specified or included >> sorbitol other than that of 2905 44 +38246010,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Sorbitol other than that of sub-heading 2905 44:In aqueous solution" +38246090,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Sorbitol other than that of sub-heading 2905 44:Other" +38247100,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing chloroflurocarbons,(CFCs),whether or not containing hydrochlorofluro-carbons (HCFCs), perfluorocarbons (PFCs) or hydroflurocarbons (HFCs)(OLD tariff)" +38247200,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing bromochlorodifluromethane, bromotrifluro- methane, or dibromotertrafluoro-ethanes(OLD tariff)" +38247300,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing hydrobromofluorocarbons (HBFCs)(OLD tariff)" +38247400,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing hydrochlorofluorocarbons(HCFCs),whether or not containing perfluorocarbons (PFCs) or hydrofluoro-carbons (HFCs), but not containing chlorofluorocarbons (CFCs).(OLD tariff)" +38247500,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing carbon tetrachloride(OLD tariff)" +38247600,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing 1,1,1-trichloroethane(methyl chloroform)(OLD tariff)" +38247700,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing bromomethane (methyl bromide) or bromochloromethane(OLD tariff)" +38247800,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing perfluorocarbons (PFCs) or hydrofluorocarbons (HFCs), but not containing chlorofluorocarbons (CFCs) or hydrochlorofluorocarbons (HCFCs)(OLD tariff)" +38247900,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Other(OLD tariff)" +38248100,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing oxirane (ethylene oxide)" +38248200,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing polychlorinated biphenyls(PCBs), polychlor-inated terphenyls (PCTs) or polybrominated biphenyls (PBBs)" +38248300,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing tris(2, 3-dibromopropyl) phosphate" +38248400,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing aldrin (ISO), camphechlor (ISO) (toxaphene), chlordane (ISO), chlordecone (ISO), DDT (ISO) (clofenotane (INN), 1, 1, 1- trichloro-2, 2-bis(p-chlorophenyl)ethane), dieldrin (ISO, INN), endosulfan (ISO), endrin (ISO), heptachlor (ISO) or mirex (ISO)" +38248500,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing 1, 2, 3, 4, 5, 6-hexachlorocyclohexane (HCH (ISO)), including lindane (ISO, INN)" +38248600,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing pentachlorobenzene (ISO) or hexachlorobenzene (ISO)" +38248700,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing perfluorooctane sulphonic acid, its salts, perfluorooctane sulphonamides, or perfluorooctane sulphonyl fluoride" +38248800,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Containing tetra-, penta-, hexa, hepta- or octabromodiphenyl ethers" +38248900,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:containing:Containing short-chain chlorinated paraffins" +38249011,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Ammoniacal gas liquors and spent oxide produced in coal gas purification(OLD tariff)" +38249012,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Case hardening compound(OLD tariff)" +38249013,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Heat transfer salts(OLD tariff)" +38249014,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixture of diphenyl and diphenyl oxide as heat transfer medium(OLD tariff)" +38249015,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixed polyethylene glycols(OLD tariff)" +38249016,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Salts for curing or salting(OLD tariff)" +38249017,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Surface tension reducing agents(OLD tariff)" +38249021,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Electroplating salts(OLD tariff)" +38249022,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Water treatment chemicals, ion exchanger (INN) such as permiutits, zeolites)(OLD tariff)" +38249023,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Gramophone records making material(OLD tariff)" +38249024,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Correcting fluid(OLD tariff)" +38249025,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Precipitated silica and silica gel(OLD tariff)" +38249026,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Oil well chemicals(OLD tariff)" +38249031,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixtures containing perhalogenated derivatives of acyclic hydrocarbons containing two or more different halogens other than chlorine and fluorine(OLD tariff)" +38249032,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Ferrite powder(OLD tariff)" +38249033,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Capacitor fluids PCB type(OLD tariff)" +38249034,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Dipping oil for treatment of grapes(OLD tariff)" +38249035,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Poly brominated biphenyls, poly chlorinated biphenyls, poly chlorinated terphenyls, crocidolite(OLD tariff)" +38249036,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Goods of a kind known as hazardous waste(OLD tariff)" +38249037,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Phosphogypsum(OLD tariff)" +38249090,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Other(OLD tariff)" +38249100,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED::Mixtures and preparations consisting mainly of (5-ethyl-2-methyl-2-oxido-1, 3, 2-dioxaphosphinan-5-yl)methyl methyl methylphosphonate and bis[(5-ethyl-2-methyl-2-oxido-1, 3, 2- dioxaphosphinan-5-yl) methyl] methylphosphonate" +38249200,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:polyglycol:Polyglycol esters of methylphosphonic acid" +38249900,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Other" +38249911,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Ammoniacal gas liquors and spent oxide produced in coal gas purification(OLD tariff)" +38249912,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Case hardening compound(OLD tariff)" +38249913,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Heat transfer salts(OLD tariff)" +38249914,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixture of diphenyl and diphenyl oxide as heat transfer medium(OLD tariff)" +38249915,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixed polyethylene glycols(OLD tariff)" +38249916,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Salts for curing or salting(OLD tariff)" +38249917,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Surface tension reducing agents(OLD tariff)" +38249921,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Electroplating salts(OLD tariff)" +38249922,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Water treatment chemicals; ion exchanger (INN) such as permiutits, zero-lites(OLD tariff)" +38249923,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Gramaphone records making material(OLD tariff)" +38249924,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Correcting fluid(OLD tariff)" +38249925,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Precipitated silica and silica gel(OLD tariff)" +38249926,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Oil well chemical(OLD tariff)" +38249931,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Mixture containing perhalogenated derivatives of acyclic hydrocarbons containing two or more different halogens other than chlorine and fluorine(OLD tariff)" +38249932,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Ferrite powder(OLD tariff)" +38249933,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Capacitor fluids - PCB type(OLD tariff)" +38249934,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Dipping oil for treatment of grapes(OLD tariff)" +38249935,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Poly brominated biphenyls, poly chlorinated biphenyls, Poly chlorinated terphenyls, crocidolite(OLD tariff)" +38249936,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Goods of a kind known as ?hazardous waste?(OLD tariff)" +38249937,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Phosphogypsum(OLD tariff)" +38249938,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Phosphonic Acid, Methyl-compound with (aminoimino methyl) urea (1 1)(OLD tariff)" +38249990,"PREPARED BINDERS FOR FOUNDRY MOULDS OR CORES; CHEMICAL PRODUCTS AND PREPARATIONS OF THE CHEMICAL OR ALLIED INDUSTRIES (INCLUDING THOSE CONSISTING OF MIXTURES OF NATURAL PRODUCTS), NOT ELSEWHERE SPECIFI OR INCLUDED:Other:Other(OLD tariff)" +38250000,residual products of the chemical or allied industries not elsewhere specified or included municipal waste sewage sludge other wastes specified in note 6 to this chapter +38251000,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Municipal waste" +38252000,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Sewage sludge" +38253000,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Clinical waste" +38254100,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Halogenated" +38254900,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Other" +38255000,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Wastes of metal pickling liquors, hydraulic fluids, brake fluids and anti-freeze fluids" +38256100,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Mainly containing organic constituents" +38256900,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Other" +38259000,"RESIDUAL PRODUCTS OF THE CHEMICAL OR ALLIED INDUSTRIES, NOT ELSEWHERE SPECIFIED OR INCLUDED; MUNICIPAL WASTE; SEWAGE SLUDGE; OTHER WASTES SPECIFIED IN NOTE 6 TO THIS CHAPTER::Other" +38260000,"Bio diesel and mixtures thereof:Biodiesel and mintrues thereof:Biodiesel and mixtures thereof, not containing or containing less than 70% by weight of petroleum oils and oils obtained from bituminous minerals" +38270000,mixtures containing halogenated derivatives of methane ethane or propane not elsewhere +38271100,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:cfc:Containing chlorofluorocarbons (CFCs), whether or not containing hydrochlorofluorocarbons (HCFCs), perfluorocarbons (PFCs) or hydrofluorocarbons (HFCs)(OLD tariff)" +38271200,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hbfc:Containing hydrobromofluorocarbons (HBFCs)(OLD tariff)" +38271300,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:carbon:Containing carbon tetrachloride(OLD tariff)" +38271400,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:methyl:Containing 1,1,1-trichloroethane (methyl chloroform)(OLD tariff)" +38272000,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:halon:Containing bromochlorodifluoromethane (Halon1211), bromotrifluoromethane (Halon1301) or dibromotetrafluoroethanes (Halon-2402)(OLD tariff)" +38273100,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:substances:Containing substances of subheadings 2903.41 to 2903.48(OLD tariff)" +38273200,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:other:Other, containing substances of subheadings 2903.71 to 2903.75" +38273900,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:other:Other" +38274000,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:methyl:Containing methyl bromide (bromomethane) or bromochloromethane" +38275100,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hfc:Containing trifluoromethane (HFC-23)" +38275900,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other:Other" +38276100,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hfc:Containing 15 % or more by mass of 1,1,1-trifluoroethane (HFC-143a)" +38276200,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hfo:Other, not included in the subheading above, containing 55 % or more by mass of pentafluoroethane (HFC- 125) but not containing unsaturated fluorinated derivatives of acyclic hydrocarbons (HFOs)" +38276300,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hfc:Other, not included in the subheadings above, containing 40 % or more by mass of pentafluoroethane (HFC-125)" +38276400,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hfo:Other, not included in the subheadings above, containing 30 % or more by mass of 1,1,1,2-tetrafluoroethane (HFC-134a) but not containing unsaturated fluorinated derivatives of acyclic hydrocarbons (HFOs)" +38276500,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:hfc:Other, not included in the subheadings above, containing 20 % or more by mass of difluoromethane (HFC-32) and 20 % or more by mass of pentafluoroethane (HFC-125)" +38276800,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:other:Other, not included in the subheadings above, containing substances of subheadings 2903 41 to 2903 48" +38276900,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:other:Other" +38279000,"MIXTURES CONTAINING HALOGENATED DERIVATIVES OF METHANE, ETHANE OR PROPANE, NOT ELSEWHERE SPECIFIED OR INCLUDED:other:Other" +39011010,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Polyethylene having a specific gravity of less than 0.94 :Linear low density polyethylene (LLDPE) in which ethelyne monomer unit contributes 95% above(OLD tariff)" +39011020,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Polyethylene having a specific gravity of less than 0.94 :Low density polyethylene (LDPE)(OLD tariff)" +39011090,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Polyethylene having a specific gravity of less than 0.94 :Other(OLD tariff)" +39012000,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS::Polyethylene having a specific gravity of 0.94 or more(OLD tariff)" +39013000,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS::Ethylene-vinyl acetate copolymers(OLD tariff)" +39014000,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Ethylene-alpha-olefin copolymers, having a specific gravity of less than 0.94:Ethylene-alpha-olefin copolymers, having a specific gravity of less than 0.94(OLD tariff)" +39014010,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Ethylene-alpha-olefin copolymers, having a specific gravity of less than 0.94:Linear low density polyethylene (LLDPE), in which ethylene monomer unit contributes less than 95% by weight of the total polymer content(OLD tariff)" +39014090,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Ethylene-alpha-olefin copolymers, having a specific gravity of less than 0.94:Other(OLD tariff)" +39019000,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Other:Other(OLD tariff)" +39019010,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Other:Linear medium density polyethylene (LMDPE)(OLD tariff)" +39019090,"I. PRIMARY FORMS POLYMERS OF ETHYLENE, IN PRIMARY FORMS:Other:Other(OLD tariff)" +39020000,polymers of propylene or of other olefins in primary forms +39021000,"POLYMERS OF PROPYLENE OR OF OTHER OLEFINS, IN PRIMARY FORMS::Polypropylene" +39022000,"POLYMERS OF PROPYLENE OR OF OTHER OLEFINS, IN PRIMARY FORMS::Poly iso butylene" +39023000,"POLYMERS OF PROPYLENE OR OF OTHER OLEFINS, IN PRIMARY FORMS::Propylene copolymers" +39029000,"POLYMERS OF PROPYLENE OR OF OTHER OLEFINS, IN PRIMARY FORMS::Other" +39030000,polymers of styrene in primary forms polystyrene +39031100,"POLYMERS OF STYRENE, IN PRIMARY FORMS::Expansible" +39031900,polymers of styrene in primary forms polystyrene >> other +39031910,"POLYMERS OF STYRENE, IN PRIMARY FORMS:Other :Moulding Powder" +39031990,"POLYMERS OF STYRENE, IN PRIMARY FORMS:Other :Other" +39032000,"POLYMERS OF STYRENE, IN PRIMARY FORMS::Styrene-acrylonitrile (SAN) copolymers" +39033000,"POLYMERS OF STYRENE, IN PRIMARY FORMS::Acrylonitrile-butadine-styrene (ABS)copolymers" +39039010,"POLYMERS OF STYRENE, IN PRIMARY FORMS::Copolymers, solely of styrene with allyl alcohol, of any acetyl value of 175 or more(OLD tariff)" +39039020,"POLYMERS OF STYRENE, IN PRIMARY FORMS::Brominated polystyrene, containing by weight 58% or more but not more than 71% of bromine, in one of the forms mentioned in Note 6(b) to this Chapter(OLD tariff)" +39039090,"POLYMERS OF STYRENE, IN PRIMARY FORMS::Other(OLD tariff)" +39040000,polymers of vinyl chloride or of other in primary forms halogenated olefins +39041000,polymers of vinyl chloride or of other in primary forms halogenated olefins >> poly vinyl chloride not mixed with any other substances +39041010,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Poly (Vinyl Chlodide) not mixed with any other substances:Binders for pigments" +39041020,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Poly (Vinyl Chlodide) not mixed with any other substances:Suspension grade PVC resin" +39041090,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Poly (Vinyl Chlodide) not mixed with any other substances:Other" +39042100,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:NON-PLASTICISED:Non-Plasticised" +39042110,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:NON-PLASTICISED:POLY(VINYL CHLORIDE) (PVC) RESINS(OLD tariff)" +39042190,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:NON-PLASTICISED:OTHER(OLD tariff)" +39042200,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Plasticised:Plasticised" +39042210,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Plasticised:POLY (VINYL CHLORIDE) (PVC) RESINS (EMULSION GRADE)(OLD tariff)" +39042290,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Plasticised:OTHER(OLD tariff)" +39043000,polymers of vinyl chloride or of other in primary forms halogenated olefins >> vinyl acetate copolymers +39043010,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Vinyl chloride-vinyl acetate copolymers:Poly (vinyl derivatives)" +39043090,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Vinyl chloride-vinyl acetate copolymers:Other" +39044000,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS::Other Vinyl chloride copolymers" +39045000,polymers of vinyl chloride or of other in primary forms halogenated olefins >> vinylidene chloride polymers +39045010,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Vinylidene chloride polymers:Copolymer of vinylidene chloride with acrylonitrite, in the form of expansible beads of a diameter of 4 micrometers or more but not more than 20 micrometers" +39045090,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Vinylidene chloride polymers:Other" +39046100,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS::Polytetrafluroethylene" +39046910,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS::Poly (vinyl fluoride), in one of the forms mentioned in Note 6(b) to this Chapter(OLD tariff)" +39046990,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS::Other(OLD tariff)" +39049000,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Other:Other" +39049010,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Other:Chlorinated poly vinyl chloride (CPVC) resin" +39049090,"POLYMERS OF VINYL CHLORIDE OR OF OTHER HALOGENATED OLEFINS, IN PRIMARY FORMS:Other:Other" +39050000,polymers of vinyl acetate or of othervinyl esters in primary forms other vinyl polymers in primary forms poly vinyl acetate +39051200,polymers of vinyl acetate or of othervinyl esters in primary forms other vinyl polymers in primary forms poly vinyl acetate >> in aqueous dispersion +39051210,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:In aqueous dispersion:Poly (vinyl acetate) (PVA), moulding material" +39051220,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:In aqueous dispersion:Poly (vinyl acetate) resins" +39051290,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:In aqueous dispersion:Other" +39051900,polymers of vinyl acetate or of othervinyl esters in primary forms other vinyl polymers in primary forms poly vinyl acetate >> other +39051910,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:Other :Poly (vinyl acetate) (PVA) moulding material" +39051920,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:Other :Poly (vinyl acetate) and resins" +39051990,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:Other :Other" +39052100,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS::In aqueous dispersion" +39052900,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS::Other" +39053000,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS::Poly (vinyl alcohol), whether or not containing unhydrolysed acetate groups" +39059100,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS::Copolymers" +39059900,polymers of vinyl acetate or of othervinyl esters in primary forms other vinyl polymers in primary forms poly vinyl acetate >> other +39059910,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:Other :Poly (vinyl pirolidone) (p alcohol)" +39059990,"POLYMERS OF VINYL ACETATE OR OF OTHER VINYL ESTERS, IN PRIMARY FORMS; OTHER VINYL POLYMERS IN PRIMARY FORMS:Other :Other" +39060000,acrylic polymers in primary forms poly methyl methacrylate acrylic polymers in primary forms poly methyl methacrylate +39061010,ACRYLIC POLYMERS IN PRIMARY FORMS::Binders for pigments or inks(OLD tariff) +39061090,ACRYLIC POLYMERS IN PRIMARY FORMS::Other(OLD tariff) +39069000,acrylic polymers in primary forms poly methyl methacrylate acrylic polymers in primary forms poly methyl methacrylate >> other other +39069010,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Acrylic resins(OLD tariff) +39069020,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Polyacrylate moulding powder(OLD tariff) +39069030,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Copolymers of acrylonitrile(OLD tariff) +39069040,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Poly (acrylic acid) +39069050,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Polyacrylonitrile (PAN) +39069060,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Copolymers of acrylonitrile +39069070,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Sodium polyacrylate +39069090,ACRYLIC POLYMERS IN PRIMARY FORMS:Other :Other +39070000,polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms +39071000,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,::Polyacetals" +39072010,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other polyethers:Poly (ether alcohols)(OLD tariff)" +39072090,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other polyethers:Other(OLD tariff)" +39072100,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,::Bis (polyoxyethylene) methylphosphonate" +39072900,polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms >> other other +39072910,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other:Poly (ether alcohols)" +39072990,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other:Other" +39073000,polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms >> epoxide resins epoxide resins +39073010,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Epoxide resins :Epoxy resins" +39073090,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Epoxide resins :Other" +39074000,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,::Polycarbonates" +39075000,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,::Alkyd resins" +39076010,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Poly (ethylene terepthalate):Having an intrinsic viscosity of less than 0.64 dl/g(OLD tariff)" +39076020,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Poly (ethylene terepthalate):Having an intrinsic viscosity of not less than 0.64 dl/g and not greater than 0.72 dl/g(OLD tariff)" +39076090,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Poly (ethylene terepthalate):Other (including clean, clourless grades)(OLD tariff)" +39076100,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Having a viscosity number of 78 ml/g or higher:Having a viscosity number of 78 ml/g or higher" +39076110,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Having a viscosity number of 78 ml/g or higher:PET flake (chip)" +39076190,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Having a viscosity number of 78 ml/g or higher:Other primary form" +39076900,polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms >> other other +39076910,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other:Having a viscosity number less than 78 ml/g but not less than 72 ml/g(OLD tariff)" +39076920,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other:Having a viscosity number less than 72 ml/g but not less than 64 ml/g(OLD tariff)" +39076930,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other:PET flake (chip)" +39076990,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other:Other primary form" +39077000,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,::Poly (lactic acid)" +39079100,polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms polyacetals other polyethers and epoxide resins in primary forms polycarbonates alkyd and other resins polyallylesters polyesters in primary forms >> unsaturated unsaturated +39079110,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Unsaturated :Maleic resins" +39079120,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Unsaturated :Polyester or contract resins" +39079130,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Unsaturated :Fumeric resins" +39079140,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Unsaturated :Diallylphthalate resins" +39079150,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Unsaturated :Poly (butylene terepthalate)" +39079190,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Unsaturated :Other" +39079900,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other :Other" +39079910,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other :Diallyl phthalate resins(OLD tariff)" +39079920,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other :Poly(butylene terepthalate)(OLD tariff)" +39079990,"POLYACETALS, OTHER POLYETHERS AND EPOXIDE RESINS,:Other :Other primary form(OLD tariff)" +39080000,polyamides in primary forms polyamides in primary forms +39081000,polyamides in primary forms polyamides in primary forms >> polyamide 6 11 12 6 6 6 9 6 10 or polyamide 6 polyamide 6 11 12 6 6 6 9 6 10 or polyamide 6 +39081010,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Nylon moulding powder(OLD tariff)" +39081011,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081019,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081021,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081029,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081031,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081039,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081041,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081049,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081051,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081059,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081061,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081069,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081071,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Flake (chip)" +39081079,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other primary form" +39081090,"POLYAMIDES IN PRIMARY FORMS:Polyamide -6, -11,-12, -6, 6, -6, 9, -6, 10 or:Other(OLD tariff)" +39089000,POLYAMIDES IN PRIMARY FORMS:Other :Other +39089010,POLYAMIDES IN PRIMARY FORMS:Other :Nylon moulding powder(OLD tariff) +39089020,POLYAMIDES IN PRIMARY FORMS:Other :Nylon in other primary forms(OLD tariff) +39089090,POLYAMIDES IN PRIMARY FORMS:Other :Other(OLD tariff) +39090000,phenolic resins and polyurethanes in primary forms phenolic resins and polyurethanes in primary forms +39091000,phenolic resins and polyurethanes in primary forms phenolic resins and polyurethanes in primary forms >> urea resins thiourea resins urea resins thiourea resins +39091010,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Urea resins; thiourea resins :Urea formaldehyde resins" +39091090,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Urea resins; thiourea resins :Other" +39092000,phenolic resins and polyurethanes in primary forms phenolic resins and polyurethanes in primary forms >> melamine resins +39092010,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Melamine resins:Melamine formaldehyde resins" +39092090,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Melamine resins:Other" +39093010,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Other amino-resins:Poly (phenylene oxide)(OLD tariff)" +39093090,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Other amino-resins:Other(OLD tariff)" +39093100,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,::Poly(methylene phenyl isocyanate) (crude MDI, polymeric MDI)" +39093900,phenolic resins and polyurethanes in primary forms phenolic resins and polyurethanes in primary forms >> other +39093910,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Other:Poly(phenylene oxide)" +39093990,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Other:Other" +39094000,phenolic resins and polyurethanes in primary forms phenolic resins and polyurethanes in primary forms >> phenolic resins +39094010,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Cresol formaldehyde oxide" +39094020,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Phenol formaldehyde resins" +39094030,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Alkyl phenol-formaldehyde resins" +39094040,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Ketonic resins" +39094050,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Phenoxi resins" +39094060,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Terpene phenolic resins" +39094090,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,:Phenolic resins :Other" +39095000,"AMINO-RESINS, PHENOLIC RESINS AND POLYURETHANES,::Polyurethanes" +39100000,silicones in primary forms silicones in primary forms +39100010,SILICONES IN PRIMARY FORMS:Silicones in primary forms:Silicone resins +39100020,SILICONES IN PRIMARY FORMS:Silicones in primary forms:Silicone oil +39100090,SILICONES IN PRIMARY FORMS:Silicones in primary forms:Other +39110000,petroleum resins resins polyterpenes polysulphides polysulphones and other products specified in note 3 to this chapter not elsewhere specified or included in primary forms +39111000,petroleum resins resins polyterpenes polysulphides polysulphones and other products specified in note 3 to this chapter not elsewhere specified or included in primary forms >> petroleum resins or coumarone indene resins and polyterpenes +39111010,"PETROLEUM RESINS, COUMARONE-INDENE RESINS, POLYTERPENES, POLYSULPHIDES, POLYSULPHONES AND OTHER PRODUCTS SPECIFIED IN NOTE 3 TO THIS:Petroleum resins, coumarone, indene or coumarone-indene resins and polyterpenes:Coumarone-indene resins " +39111090,"PETROLEUM RESINS, COUMARONE-INDENE RESINS, POLYTERPENES, POLYSULPHIDES, POLYSULPHONES AND OTHER PRODUCTS SPECIFIED IN NOTE 3 TO THIS:Petroleum resins, coumarone, indene or coumarone-indene resins and polyterpenes:Other " +39112000,"PETROLEUM RESINS, COUMARONE-INDENE RESINS, POLYTERPENES, POLYSULPHIDES, POLYSULPHONES AND OTHER PRODUCTS SPECIFIED IN NOTE 3 TO THIS::Poly (1,3-phenylene methylphosphonate)" +39119000,petroleum resins resins polyterpenes polysulphides polysulphones and other products specified in note 3 to this chapter not elsewhere specified or included in primary forms >> other +39119010,"PETROLEUM RESINS, COUMARONE-INDENE RESINS, POLYTERPENES, POLYSULPHIDES, POLYSULPHONES AND OTHER PRODUCTS SPECIFIED IN NOTE 3 TO THIS:Other :Polysulphones " +39119090,"PETROLEUM RESINS, COUMARONE-INDENE RESINS, POLYTERPENES, POLYSULPHIDES, POLYSULPHONES AND OTHER PRODUCTS SPECIFIED IN NOTE 3 TO THIS:Other :Other " +39120000,cellulose and its chemical derivatives not elsewhere specified or included in primary forms cellulose acetates +39121100,cellulose and its chemical derivatives not elsewhere specified or included in primary forms cellulose acetates >> +39121110,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Non-plasticised:Cellulose acetate flakes " +39121120,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Non-plasticised:Cellulose acetate moulding powder " +39121130,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Non-plasticised:Cellulose acetobutyrate moulding powder " +39121140,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Non-plasticised:Cellulose nitrate, dynamic grade " +39121190,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Non-plasticised:Other " +39121200,cellulose and its chemical derivatives not elsewhere specified or included in primary forms cellulose acetates >> plasticised +39121210,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Plasticised :Cellulose acetate flakes " +39121220,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Plasticised :Cellulose acetate moulding powder " +39121230,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Plasticised :Cellulose acetobutyrate moulding powder " +39121290,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Plasticised :Other " +39122000,cellulose and its chemical derivatives not elsewhere specified or included in primary forms cellulose acetates >> cellulose nitrates including collodions +39122011,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Cellulose nitrates (including collodions):Moulding powders " +39122019,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Cellulose nitrates (including collodions):Other " +39122021,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Cellulose nitrates (including collodions):Moulding powders " +39122029,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Cellulose nitrates (including collodions):Other " +39123100,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS::Carboxymethyl cellulose and its salts " +39123900,cellulose and its chemical derivatives not elsewhere specified or included in primary forms cellulose acetates >> other +39123911,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Ethylcellulose " +39123912,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Methylcellulose " +39123919,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Other cellulose ethers " +39123921,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Ethyl cellulose " +39123922,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Methyl cellulose " +39123929,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other -:Other cellulose ether " +39129000,cellulose and its chemical derivatives not elsewhere specified or included in primary forms cellulose acetates >> other +39129010,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other:Cellulose propionate and aceto propionate, non-plasticised" +39129020,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other:Viscose sponge" +39129090,"CELLULOSE AND ITS CHEMICAL DERIVATIVES, NOT ELSEWHERE SPECIFIED OR INCLUDED, IN PRIMARY FORMS:Other:Other" +39130000,natural polymers for example alginic acid and modified natural polymers for example hardened proteins chemical derivatives of natural elsewhere specified rubber not or included in primary forms +39131000,natural polymers for example alginic acid and modified natural polymers for example hardened proteins chemical derivatives of natural elsewhere specified rubber not or included in primary forms >> alginic acid its salts and esters +39131010,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Alginic acid, its salts and esters :Sodium alginate" +39131090,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Alginic acid, its salts and esters :Other" +39139000,natural polymers for example alginic acid and modified natural polymers for example hardened proteins chemical derivatives of natural elsewhere specified rubber not or included in primary forms >> other r of natural ubber chemical derivatives +39139011,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Other :Chlorinated rubber" +39139019,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Other :Other" +39139020,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Other :Hardened proteins (such as hardened casein, gelatin)" +39139030,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Other :Dextran" +39139090,"NATURAL POLYMERS (FOR EXAMPLE, ALGINIC ACID) AND MODIFIED NATURAL POLYMERS (FOR EXAMPLE, HARDENED PROTEINS, CHEMICAL DERIVATIVES OF NATURAL RUBBER), NOT ELSEWHERE SPECIFIED OR:Other :Other" +39140000,based on polymers of in primary forms headings 3901 to 3913 based on polymers of headings 3901 to 3913 in primary forms +39140010,"ION EXCHANGERS BASED ON POLYMERS OF HEADINGS 3901 TO 3913, IN PRIMARY FORMS:Ion exchangers based on polymers of headings 3901 to 3913, in primary forms:Ion-exchangers of the condensation, polycondensation or polyaddition type" +39140020,"ION EXCHANGERS BASED ON POLYMERS OF HEADINGS 3901 TO 3913, IN PRIMARY FORMS:Ion exchangers based on polymers of headings 3901 to 3913, in primary forms:Ion-exchangers of polymerisation or co-polymerisation type" +39140090,"ION EXCHANGERS BASED ON POLYMERS OF HEADINGS 3901 TO 3913, IN PRIMARY FORMS:Ion exchangers based on polymers of headings 3901 to 3913, in primary forms:Other" +39150000,ii parings and scrap articles waste parings and scrap of plastics +39151000,"WASTE, PARINGS AND SCRAP, OF PLASTICS::Of polymers of ethylene" +39152000,"WASTE, PARINGS AND SCRAP, OF PLASTICS::Of polymers of styrene" +39153000,ii parings and scrap articles waste parings and scrap of plastics >> of polymers of vinyl chloride +39153010,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of polymers of vinyl chloride:Of copolymers of vinyl chloride" +39153090,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of polymers of vinyl chloride:Other" +39159000,ii parings and scrap articles waste parings and scrap of plastics >> of other plastics +39159010,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of polypropylene" +39159021,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of copolymers of vinyl acetate" +39159029,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Other" +39159030,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of acrylic polymers and methlyacrylic copolymers" +39159041,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of alkyds and polyesters" +39159042,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of pet bottles" +39159049,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of epoxide resins" +39159050,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of polyamides" +39159061,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of phenoplast" +39159062,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of aminoplast" +39159063,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of polyurethanes" +39159071,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Of regenerated cellulose" +39159072,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Cellulose plastic waste such as cellulose nitrate film scrap non-plasticised" +39159073,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Cellulose plastic waste such as cellulose nitrate film scrap plasticised" +39159074,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Cellulose plastic waste such as cellulose acetatc film scrap non-plasticised" +39159075,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Cellulose plastic waste such as cellulose acetatc film scrap plasticised" +39159079,ii parings and scrap articles waste parings and scrap of plastics >> of other plastics >> others +39159090,"WASTE, PARINGS AND SCRAP, OF PLASTICS:Of other plastics :Other" +39160000,monofilament of which any cross sectional dimension exceeds 1mm rods sticks and profile shapes whether or not but not otherwise worked of plastics +39161000,monofilament of which any cross sectional dimension exceeds 1mm rods sticks and profile shapes whether or not but not otherwise worked of plastics >> of polymers of ethylene +39161010,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of ethylene:Rods of polyethylene" +39161020,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of ethylene:Canes" +39161090,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of ethylene:Other" +39162000,monofilament of which any cross sectional dimension exceeds 1mm rods sticks and profile shapes whether or not but not otherwise worked of plastics >> of polymers of vinyl chloride of poly vinyl chloride copolymers +39162011,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of vinyl chloride:Canes" +39162019,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of vinyl chloride:Other" +39162091,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of vinyl chloride:Canes" +39162099,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of polymers of vinyl chloride:Other" +39169000,monofilament of which any cross sectional dimension exceeds 1mm rods sticks and profile shapes whether or not but not otherwise worked of plastics >> of other plastics +39169010,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Canes" +39169021,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of phenoplast" +39169022,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of aminoplast" +39169023,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of alkyds and polysters" +39169024,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of polyamides" +39169025,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of polyurethanes" +39169026,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of epoxide-resins (including waste and scrap)" +39169027,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of polypropylene" +39169028,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of acrylicmethacrylic and acrylomethacrylic polymers" +39169031,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of polymerisation and copolymerisation products of polystyrene" +39169032,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of polymethyl methacrylate" +39169040,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of regenerated cellulose" +39169050,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of cellulose nitrate and celluloid, whether or not plasticized" +39169060,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of vulcanized fibre(OLD tariff)" +39169070,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of cellulose acetate and acetate butyrate, whether or not plasticized" +39169080,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of vinyl plastic(OLD tariff)" +39169090,"MONOFILAMENT OF WHICH ANY CROSS-SECTIONAL DIMENSION EXCEEDS 1MM, RODS, STICKS AND PROFILE SHAPES, WHETHER OR NOT SURFACE-WORKED BUT NOT OTHERWISE WORKED, OF PLASTICS:Of other plastics :Of other polymerisation and copolymerisation products" +39170000,tubes pipes and hoses and fittings therefor for example joints elbows flanges of plastics artificial guts sausage casings of hardened protein or of cellulosic materials +39171010,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Of hardened protein(OLD tariff)" +39171020,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Of cellulosic materials(OLD tariff)" +39172100,tubes pipes and hoses and fittings therefor for example joints elbows flanges of plastics artificial guts sausage casings of hardened protein or of cellulosic materials >> of polymers of ethylene +39172110,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of polymers of ethylene:Tubes of polyethylene" +39172190,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of polymers of ethylene:Other" +39172200,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Of polymers of propylene" +39172300,tubes pipes and hoses and fittings therefor for example joints elbows flanges of plastics artificial guts sausage casings of hardened protein or of cellulosic materials >> of polymers of vinyl chloride +39172310,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of polymers of vinyl chloride :Seamless tubes" +39172390,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of polymers of vinyl chloride :Other" +39172900,tubes pipes and hoses and fittings therefor for example joints elbows flanges of plastics artificial guts sausage casings of hardened protein or of cellulosic materials >> of other plastics +39172910,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of other plastics :Seamless tubes of copolymers of vinyl acetate and vinyl chloride" +39172920,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of other plastics :Seamless tubes of polymers and copolymers of polystyrene" +39172930,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of other plastics :Tubes of cellulose nitrate and celluloid, whether or not plasticised(OLD tariff)" +39172940,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of other plastics :Tubes of cellulose acetate or acetate butyrate(OLD tariff)" +39172950,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of other plastics :Tubes of vinyl plastics" +39172990,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Of other plastics :Other" +39173100,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Flexible tubes, pipes and hoses, having a minimum burst pressure of 27.6 MPa" +39173210,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Of condensation or rearrangement polymerization products, whether or not chemically modified(OLD tariff)" +39173220,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Of addition polymerisation products(OLD tariff)" +39173290,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Other(OLD tariff)" +39173300,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Other, not reinforced or otherwise combined with other materials, with fittings" +39173900,tubes pipes and hoses and fittings therefor for example joints elbows flanges of plastics artificial guts sausage casings of hardened protein or of cellulosic materials >> other +39173910,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Other:Of condensation or rearrangement polymerization products, whether or not chemically modified" +39173920,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Other:Of addition polymerisation products" +39173990,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS:Other:Other" +39174000,"TUBES, PIPES AND HOSES, AND FITTINGS THEREFOR (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES), OF PLASTICS::Fittings" +39180000,floor coverings of plastics whether or not in rolls or in the form of tiles wall or ceiling coverings of plastics as defined in note 9 to this chapter +39181000,floor coverings of plastics whether or not in rolls or in the form of tiles wall or ceiling coverings of plastics as defined in note 9 to this chapter >> of polymers of vinyl chloride +39181010,"FLOOR COVERINGS OF PLASTICS, WHETHER OR NOT SELF-ADHESIVE, IN ROLLS OR IN THE FORM OF TILES; WALL OR CEILING COVERINGS OF PLASTICS, AS DEFINED IN NOTE 9 TO THIS CHAPTER:Of polymers of vinyl chloride :Wall or ceiling coverings combined with knitted or woven fabrics, nonwovens or felts" +39181090,"FLOOR COVERINGS OF PLASTICS, WHETHER OR NOT SELF-ADHESIVE, IN ROLLS OR IN THE FORM OF TILES; WALL OR CEILING COVERINGS OF PLASTICS, AS DEFINED IN NOTE 9 TO THIS CHAPTER:Of polymers of vinyl chloride :Other" +39189000,floor coverings of plastics whether or not in rolls or in the form of tiles wall or ceiling coverings of plastics as defined in note 9 to this chapter >> of other plastics +39189010,"FLOOR COVERINGS OF PLASTICS, WHETHER OR NOT SELF-ADHESIVE, IN ROLLS OR IN THE FORM OF TILES; WALL OR CEILING COVERINGS OF PLASTICS, AS DEFINED IN NOTE 9 TO THIS CHAPTER:Of other plastics :Floor coverings of linoxyne" +39189020,"FLOOR COVERINGS OF PLASTICS, WHETHER OR NOT SELF-ADHESIVE, IN ROLLS OR IN THE FORM OF TILES; WALL OR CEILING COVERINGS OF PLASTICS, AS DEFINED IN NOTE 9 TO THIS CHAPTER:Of other plastics :Wall or ceiling coverings combined with knitted or woven fabrics, nonwovens or felts" +39189090,"FLOOR COVERINGS OF PLASTICS, WHETHER OR NOT SELF-ADHESIVE, IN ROLLS OR IN THE FORM OF TILES; WALL OR CEILING COVERINGS OF PLASTICS, AS DEFINED IN NOTE 9 TO THIS CHAPTER:Of other plastics :Other" +39190000,plates sheets film foil tape strip and other flat shapes of in rolls plastics whether or not +39191000,"SELF-ADHESIVE PLATES, SHEETS, FILM, FOIL, TAPE, STRIP AND OTHER FLAT SHAPES, OF PLASTICS, WHETHER OR NOT IN ROLLS::In rolls of width not exceeding 20 cm" +39199000,plates sheets film foil tape strip and other flat shapes of in rolls plastics whether or not >> other +39199010,"SELF-ADHESIVE PLATES, SHEETS, FILM, FOIL, TAPE, STRIP AND OTHER FLAT SHAPES, OF PLASTICS, WHETHER OR NOT IN ROLLS:Other :Plastic stickers, whether or not printed, embossed, or impregnated" +39199020,"SELF-ADHESIVE PLATES, SHEETS, FILM, FOIL, TAPE, STRIP AND OTHER FLAT SHAPES, OF PLASTICS, WHETHER OR NOT IN ROLLS:Other :Cellulose adhesive tape" +39199090,"SELF-ADHESIVE PLATES, SHEETS, FILM, FOIL, TAPE, STRIP AND OTHER FLAT SHAPES, OF PLASTICS, WHETHER OR NOT IN ROLLS:Other :Other" +39200000,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials +39201000,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of polymers of ehtylene sheets of polyethylene +39201011,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Rigid, plain" +39201012,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Flexible, plain" +39201013,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of polymers of ehtylene sheets of polyethylene >> geomembrane confirming to is 16352 +39201019,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Other" +39201091,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Rigid, plain" +39201092,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Flexible, plain" +39201099,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of ehtylene:Other" +39202000,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of polymers of propylene +39202010,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of propylene :Rigid, plain" +39202020,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of propylene :Flexible, plain" +39202090,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of propylene :Other" +39203000,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of polymers of styrene +39203010,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of styrene :Rigid, plain" +39203020,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of styrene :Flexible, plain" +39203090,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polymers of styrene :Other" +39204300,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS::Containing by weight not less than 6% of plasticizers" +39204900,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS::Other(OLD tariff)" +39205111,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Rigid, plain(OLD tariff)" +39205112,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Flexible, plain(OLD tariff)" +39205119,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Other(OLD tariff)" +39205191,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Rigid, plain(OLD tariff)" +39205192,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Flexible, plain(OLD tariff)" +39205199,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (methyl methacrylate):Other(OLD tariff)" +39205900,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> other polyacrylate sheets +39205911,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Rigid, plain" +39205912,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Flexible, plain" +39205919,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Other" +39205991,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Rigid, plain" +39205992,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Flexible, plain" +39205999,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Other :Other" +39206100,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of polycarbonates +39206110,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polycarbonates :Rigid, plain" +39206120,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polycarbonates :Flexible, plain" +39206190,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polycarbonates :Other" +39206200,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of poly ethylene terephthalate +39206210,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (ethylene terephthalate):Rigid, plain" +39206220,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (ethylene terephthalate):Flexible, plain" +39206290,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of poly (ethylene terephthalate):Other" +39206300,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of unsaturated polyesters +39206310,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of unsaturated polyesters :Rigid, plain" +39206320,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of unsaturated polyesters :Flexible, plain" +39206390,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of unsaturated polyesters :Other" +39206900,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of other polyesters packaging film +39206911,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Rigid, plain" +39206912,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Flexible, plain" +39206919,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Other" +39206921,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Rigid, plain" +39206922,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Flexible, plain" +39206929,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Other" +39206931,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Rigid, plain" +39206932,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Flexible, plain" +39206939,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Other" +39206991,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Rigid, plain" +39206992,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Flexible, plain" +39206999,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other polyesters :Other" +39207100,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of regenerated cellulose cello phane transparent +39207111,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Film" +39207119,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Other" +39207121,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Plain" +39207129,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Other" +39207191,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Rigid, plain" +39207192,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Flexible, plain" +39207199,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of regenerated cellulose:Other" +39207300,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of cellulose acetate sheet of cellulose acetate +39207311,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Rigid, plain" +39207312,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Flexible, plain" +39207319,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Other" +39207321,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Rigid, plain" +39207322,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Flexible, plain" +39207329,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Other" +39207391,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Rigid, plain" +39207392,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Flexible, plain" +39207399,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of cellulose acetate:Other" +39207900,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of other cellulose derivatives sheets of cellulose nitrate and celluloid whether or not plasticized +39207911,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Rigid, plain" +39207912,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Flexible, plain" +39207919,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Other" +39207991,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Rigid, plain" +39207992,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Flexible, plain" +39207999,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other cellulose derivatives :Other" +39209100,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of poly vinyl butyral +39209110,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209111,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain(OLD tariff)" +39209112,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain(OLD tariff)" +39209119,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other(OLD tariff)" +39209120,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209190,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39209200,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of polyamides poly amide fluoride film +39209211,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Rigid, plain" +39209212,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Flexible, plain" +39209219,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Other" +39209291,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Rigid, plain" +39209292,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Flexible, plain" +39209299,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of polyamides:Other" +39209300,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of +39209310,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of amino-resins :Rigid, plain" +39209320,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of amino-resins :Flexible, plain" +39209390,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of amino-resins :Other" +39209400,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of phenolic resins +39209410,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of phenolic resins :Rigid, plain" +39209420,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of phenolic resins :Flexible, plain" +39209490,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of phenolic resins :Other" +39209900,other plates sheets film foil and strip of plastics and not reinforced laminated supported or similarly combined with other materials >> of other plastics plates sheets film foil and strip of poly vinyl acetate +39209911,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209912,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209919,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39209921,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209922,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209929,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39209931,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209932,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209939,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39209941,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209942,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209949,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39209951,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209952,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209959,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39209960,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Clicking boards for leather machinery" +39209991,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Rigid, plain" +39209992,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Flexible, plain" +39209999,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS, NON-CELLULAR AND NOT REINFORCED, LAMINATED, SUPPORTED OR SIMILARLY COMBINED WITH OTHER MATERIALS:Of other plastics:Other" +39210000,other plates sheets film foil and strip of plastics cellular +39211100,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS::Of polymers of styrene" +39211200,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS::Of polymers of vinyl chloride" +39211300,other plates sheets film foil and strip of plastics cellular >> of polyurethanes +39211310,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Of polyurethanes:Flexible" +39211390,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Of polyurethanes:Other" +39211400,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS::Of regenerated cellulose" +39211900,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS::Of other plastics" +39219000,other plates sheets film foil and strip of plastics cellular >> other +39219010,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Thermocol" +39219021,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, lacquered" +39219022,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, lacquered" +39219023,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, metallised" +39219024,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, metallised" +39219025,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, laminated" +39219026,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, laminated" +39219029,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Other" +39219031,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, lacquered" +39219032,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, lacquered" +39219033,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, metallised" +39219034,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, metallised" +39219035,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, laminated" +39219036,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, laminated" +39219039,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Other" +39219091,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, lacquered" +39219092,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, lacquered" +39219093,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, metallised" +39219094,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, metallised" +39219095,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Rigid, laminated" +39219096,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Flexible, laminated" +39219099,"OTHER PLATES, SHEETS, FILM, FOIL AND STRIP, OF PLASTICS:Other :Other" +39220000,baths sinks bidets lavatory pans seats and covers flushing cisterns and similar sanitary ware of plastics +39221000,"BATHS, SHOWER-BATHS, SINKS, WASH-BASINS, BIDETS, LAVATORY PANS, SEATS AND COVERS, FLUSHING CISTERNS AND SIMILAR SANITARY WARE, OF PLASTICS::Baths, shower-baths, sinks and wash basins" +39222000,"BATHS, SHOWER-BATHS, SINKS, WASH-BASINS, BIDETS, LAVATORY PANS, SEATS AND COVERS, FLUSHING CISTERNS AND SIMILAR SANITARY WARE, OF PLASTICS::Lavatory seats and covers" +39229000,"BATHS, SHOWER-BATHS, SINKS, WASH-BASINS, BIDETS, LAVATORY PANS, SEATS AND COVERS, FLUSHING CISTERNS AND SIMILAR SANITARY WARE, OF PLASTICS::Other" +39230000,articles for the conveyance or packing of goods of plastics stoppers lids caps and other closures of plastics +39231000,articles for the conveyance or packing of goods of plastics stoppers lids caps and other closures of plastics >> boxes cases crates and similar articles +39231010,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Boxes, cases, crates and similar articles:Plastic containers for audio or video cassettes, cassette tapes, floppy disk and similar articles" +39231020,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Boxes, cases, crates and similar articles:Watch-box, jewellery box and similar containers of plastics" +39231030,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Boxes, cases, crates and similar articles:Insulated ware" +39231040,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Boxes, cases, crates and similar articles:Packing for accommodating connectors" +39231090,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Boxes, cases, crates and similar articles:Other" +39232100,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS::Of polymers of ethylene" +39232900,articles for the conveyance or packing of goods of plastics stoppers lids caps and other closures of plastics >> of other plastics +39232910,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Of other plastics:Of poly (vinyl chloride)" +39232990,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Of other plastics:Other" +39233000,articles for the conveyance or packing of goods of plastics stoppers lids caps and other closures of plastics >> carboys bottles flasks and similar articles +39233010,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Carboys, bottles, flasks and similar articles:Insulated ware" +39233090,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Carboys, bottles, flasks and similar articles:Other" +39234000,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS::Spools, cops, bobbins and similar supports" +39235000,articles for the conveyance or packing of goods of plastics stoppers lids caps and other closures of plastics >> stoppers lids caps and other closures +39235010,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Stoppers, lids, caps and other closures :Caps and closures for bottles" +39235090,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Stoppers, lids, caps and other closures :Other" +39239000,articles for the conveyance or packing of goods of plastics stoppers lids caps and other closures of plastics >> other +39239010,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Other :Insulated ware" +39239020,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Other :Aseptic bags" +39239090,"ARTICLES FOR THE CONVEYANCE OR PACKING OF GOODS, OF PLASTICS; STOPPERS, LIDS, CAPS AND OTHER CLOSURES, OF PLASTICS:Other :Other" +39240000,tableware kitchenware other household articles and hygienic or toilet articles of plastics +39241000,tableware kitchenware other household articles and hygienic or toilet articles of plastics >> tableware and kitchenware +39241010,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PLASTICS:Tableware and kitchenware :Insulated ware" +39241090,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PLASTICS:Tableware and kitchenware :Other" +39249000,tableware kitchenware other household articles and hygienic or toilet articles of plastics >> other +39249010,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PLASTICS:Other :Toilet articles" +39249020,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PLASTICS:Other :Insulated ware" +39249090,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PLASTICS:Other :Other" +39250000,builders ware of plastics not elsewhere specified or included +39251000,"BUILDERS WARE OF PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Reservoirs, tanks, vats and similar containers, of a capacity exceeding 300 l" +39252000,"BUILDERS WARE OF PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Doors, windows and their frames and" +39253000,"BUILDERS WARE OF PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Shutters, blinds (including venetian blinds) and similar articles and parts thereof" +39259000,builders ware of plastics not elsewhere specified or included >> other +39259010,"BUILDERS WARE OF PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Of polyurethane" +39259090,"BUILDERS WARE OF PLASTICS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other" +39260000,other articles of plastics and articles of other materials of headings 3901 to 3914 +39261000,other articles of plastics and articles of other materials of headings 3901 to 3914 >> office or school supplies office supplies of a kind classified as stationery other than pins clips and writing instruments +39261011,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Office or school supplies:Of polyurethane foam +39261019,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Office or school supplies:Other +39261091,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Office or school supplies:Of polyurethane foam +39261099,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Office or school supplies:Other +39262000,other articles of plastics and articles of other materials of headings 3901 to 3914 >> articles of apparel and clothing accessories including gloves mittens and mitts gloves +39262011,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Disposable" +39262019,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Non-disposable" +39262021,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Of polyurethane foam" +39262029,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Other" +39262031,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Of polyurethane foam" +39262039,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Other" +39262041,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Of polyurethane foam" +39262049,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Other" +39262091,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Of polyurethane foam" +39262099,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Articles of apparel and clothing accessories (including gloves, mittens and mitts) :Other" +39263000,other articles of plastics and articles of other materials of headings 3901 to 3914 >> fittings for furniture coach work or the like +39263010,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Fittings for furniture, coach work or the like :Of polyurethane foam" +39263090,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Fittings for furniture, coach work or the like :Other" +39264000,other articles of plastics and articles of other materials of headings 3901 to 3914 >> statuettes and other ornamental articles bangles +39264011,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Of polyurethane foam +39264019,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other +39264021,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Of polyurethane foam +39264029,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other +39264031,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Of polyurethane foam +39264039,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other +39264041,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Of polyurethane foam +39264049,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other +39264051,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Of polyurethane foam +39264059,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other +39264060,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Sequine +39264091,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Of polyurethane foam +39264099,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Statuettes and other ornamental articles :Other +39269000,other articles of plastics and articles of other materials of headings 3901 to 3914 >> other +39269010,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :PVC belt conveyor +39269021,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269029,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +39269031,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269039,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +39269041,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269049,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +39269051,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269059,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +39269061,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269069,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +39269071,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269079,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +39269080,"OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Polypropylene articles, not elsewhere specified or included" +39269091,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Of polyurethane foam +39269099,OTHER ARTICLES OF PLASTICS AND ARTICLES OF OTHER MATERIALS OF HEADINGS 3901 TO 3914:Other :Other +40010000,natural rubber balata guayule chicle and similar natural gums in primary forms or in plates sheets or strip +40011000,natural rubber balata guayule chicle and similar natural gums in primary forms or in plates sheets or strip >> latex whether or not pre natural rubber vulcanised +40011010,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Natural rubber latex, whether or not pre-vulcanised :Prevulcanised" +40011020,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Natural rubber latex, whether or not pre-vulcanised :Other than prevulcanised" +40012100,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Smoked sheets" +40012200,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Technically specified natural rubber (TSNR)" +40012900,natural rubber balata guayule chicle and similar natural gums in primary forms or in plates sheets or strip >> other +40012910,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Hevea" +40012920,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Pale crepe" +40012930,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Estate brown crepe" +40012940,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Oil extended natural rubber" +40012990,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Other" +40013000,"NATURAL RUBBER, BALATA, GUTTA-PERCHA, GUAYULE, CHICLE AND SIMILAR NATURAL GUMS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Balata, gutta-percha, guayule, chicle and similar natural gums" +40021100,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Latex(OLD tariff)" +40021910,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Oil extended styrene butadiene rubber(OLD tariff)" +40021920,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Styrene butadiene rubber with styrene content exceeding 50%(OLD tariff)" +40021930,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Styrene butadiene styrene oil bound copolymer(OLD tariff)" +40021990,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Other(OLD tariff)" +40022000,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Butadiene rubber (BR)(OLD tariff)" +40023100,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Isobutene-isoprene (butyl) rubber (IIR)(OLD tariff)" +40023900,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Other(OLD tariff)" +40024100,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Latex(OLD tariff)" +40024900,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Other(OLD tariff)" +40025100,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Latex(OLD tariff)" +40025900,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Other(OLD tariff)" +40026000,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Isoprene rubber (IR)(OLD tariff)" +40027000,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Ethylene-propylene-non-conjugated diene(OLD tariff)" +40028010,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Mixtures of any product of heading 40 01 with any product of this heading :Latex(OLD tariff)" +40028020,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Mixtures of any product of heading 40 01 with any product of this heading :Chemically modified form of natural rubber including graft rubber(OLD tariff)" +40028090,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Mixtures of any product of heading 40 01 with any product of this heading :Other(OLD tariff)" +40029100,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Latex(OLD tariff)" +40029910,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Factice (rubber substitute derived from oil)(OLD tariff)" +40029920,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Tread rubber compound, cushion compound, cushion gum and tread gum for resoling or repairing or retreading rubber tyres(OLD tariff)" +40029990,"SYNTHETIC RUBBER AND FACTICE DERIVED FORM OILS, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP; MIXTURES OF ANY PRODUCT OF HEADING 4001 WITH ANY PRODUCT OF THIS HEADING, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Other(OLD tariff)" +40030000,"::RECLAIMED RUBBER IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP" +40040000,"::WASTE, PARINGS AND SCRAP OF RUBBER (OTHER THAN HARD RUBBER) AND POWDERS AND GRANULES OBTAINED THEREFROM" +40050000,compounded rubber unvulcanised in primary forms or in plates sheets or strip +40051000,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP::Compounded with carbon black or silica" +40052000,compounded rubber unvulcanised in primary forms or in plates sheets or strip >> solutions other than those of dispersions sub heading 4005 10 +40052010,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Solutions; dispersions other than those of sub-heading 4005 10 :Can sealing compound" +40052090,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Solutions; dispersions other than those of sub-heading 4005 10 :Other" +40059100,compounded rubber unvulcanised in primary forms or in plates sheets or strip >> plates sheets and strip +40059110,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Plates, sheets and strip:Hospital sheeting" +40059190,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Plates, sheets and strip:Other(OLD tariff)" +40059900,compounded rubber unvulcanised in primary forms or in plates sheets or strip >> other +40059910,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Granules of unvulcanised natural or synthetic rubber, compounded, ready for vulcanisation" +40059990,"COMPOUNDED RUBBER, UNVULCANISED, IN PRIMARY FORMS OR IN PLATES, SHEETS OR STRIP:Other :Other(OLD tariff)" +40060000,other forms for example rods tubes and profile shapes and articles for example discs and rings of unvulcanised rubber +40061000,"OTHER FORMS (FOR EXAMPLE, RODS, TUBES AND PROFILE SHAPES) AND ARTICLES (FOR EXAMPLE, DISCS AND RINGS), OF UNVULCANISED RUBBER::""Camel-back"" strips for retreading rubber tyres" +40069000,other forms for example rods tubes and profile shapes and articles for example discs and rings of unvulcanised rubber >> other +40069010,"OTHER FORMS (FOR EXAMPLE, RODS, TUBES AND PROFILE SHAPES) AND ARTICLES (FOR EXAMPLE, DISCS AND RINGS), OF UNVULCANISED RUBBER:Other :Thread, not covered" +40069090,"OTHER FORMS (FOR EXAMPLE, RODS, TUBES AND PROFILE SHAPES) AND ARTICLES (FOR EXAMPLE, DISCS AND RINGS), OF UNVULCANISED RUBBER:Other :Other" +40070000,vulcanised rubber thread and cord vulcanised rubber thread and cord +40070010,"VULCANISED RUBBER THREAD AND CORD:Vulcanised rubber thread and cord:Thread, not covered" +40070020,"VULCANISED RUBBER THREAD AND CORD:Vulcanised rubber thread and cord:Cord, not covered" +40070090,VULCANISED RUBBER THREAD AND CORD:Vulcanised rubber thread and cord:Other +40080000,vulcanised rubber other than hard rubber of cellular rubber 29 latex foam sponge 29 tread rubber and tread packing strip for resoling or repairing or retreading rubber tyres 29 other +40081100,vulcanised rubber other than hard rubber of cellular rubber 29 latex foam sponge 29 tread rubber and tread packing strip for resoling or repairing or retreading rubber tyres 29 other >> plates sheets and strip +40081110,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Plates, sheets and strip :Of micro-cellular rubber" +40081190,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Plates, sheets and strip :Other" +40081900,vulcanised rubber other than hard rubber of cellular rubber 29 latex foam sponge 29 tread rubber and tread packing strip for resoling or repairing or retreading rubber tyres 29 other >> other +40081910,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Blocks of micro-cellular rubber but not of latex foam sponge, used in the manufacture of soles, heels or soles and heels combined, for footwear" +40081990,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Other" +40082100,vulcanised rubber other than hard rubber of cellular rubber 29 latex foam sponge 29 tread rubber and tread packing strip for resoling or repairing or retreading rubber tyres 29 other >> plates sheets and strip +40082110,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Plates, sheets and strip :Used in the manufacture of soles, heels or soles and heels combined, for footwear" +40082120,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Plates, sheets and strip :For resoling or repairing or retreading rubber tyres" +40082190,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Plates, sheets and strip :Other" +40082900,vulcanised rubber other than hard rubber of cellular rubber 29 latex foam sponge 29 tread rubber and tread packing strip for resoling or repairing or retreading rubber tyres 29 other >> other +40082910,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber sheets and resin rubber sheets for soles and heels" +40082920,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Blocks used in the manufacture of soles, heels or soles and heels combined, for footwear" +40082930,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Latex foam sponge(OLD tariff)" +40082940,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Tread rubber and tread packing strip for resoling or repairing or retreading rubber tyres(OLD tariff)" +40082990,"PLATES, SHEETS, STRIP, RODS AND PROFILE SHAPES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Other(OLD tariff)" +40090000,tubes pipes and hoses of vulcanised rubber other than hard rubber with or without their fittings for example joints elbows flanges not reinforced or otherwise combined with other materials 11 without fittings 12 with fittings reinforced or otherwise combined only with metal 21 without fittings 22 with fittings o reinforced or otherwise combined nly with textile materials 31 without fittings 32 with fittings w reinforced or otherwise combined ith other materials 41 without fittings 4009 42 00 with fittings +40091100,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::Without fittings(OLD tariff)" +40091200,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::With fittings(OLD tariff)" +40092100,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::Without fittings(OLD tariff)" +40092200,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::With fittings(OLD tariff)" +40093100,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::Without fittings(OLD tariff)" +40093200,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::With fittings(OLD tariff)" +40094100,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::Without fittings(OLD tariff)" +40094200,"TUBES, PIPES AND HOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT THEIR FITTINGS (FOR EXAMPLE, JOINTS, ELBOWS, FLANGES)::With fittings(OLD tariff)" +40100000,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting +40101110,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Reinforced only with metal :Where the rubber compound content is less than 25% by weight(OLD tariff) +40101190,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Reinforced only with metal :Other(OLD tariff) +40101210,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Reinforced only with textile materials :Where the rubber compound content is less than 25% by weight(OLD tariff) +40101290,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Reinforced only with textile materials :Other(OLD tariff) +40101910,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Where the rubber compound content is less than 25% by weight(OLD tariff) +40101990,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Other(OLD tariff) +40103100,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> endless transmission belts of trapezoidal of an outside circumference exceeding 60 cm but not exceeding 180 cm +40103110,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal cross-section (V-belts), V-ribbed, of an outside circumference exceeding 60 cm but not exceeding 180 cm:Where the rubber compound content is less than 25% by weight" +40103190,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal cross-section (V-belts), V-ribbed, of an outside circumference exceeding 60 cm but not exceeding 180 cm:Other" +40103200,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> endless transmission belts of trapezoidal cross section other than of an outside circumference exceeding 60 cm but not exceeding 180 cm +40103210,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal crosssection (V-belts), other than V-ribbed, of an outside circumference exceeding 60 cm but not exceeding 180 cm :Where the rubber compound content is less than 25% by weight" +40103290,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal crosssection (V-belts), other than V-ribbed, of an outside circumference exceeding 60 cm but not exceeding 180 cm :Other" +40103300,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> endless transmission belts of trapezoidal crosssection of an outside circumference exceeding 180 cm but not exceeding 240 cm +40103310,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal cross-section (V-belts), V-ribbed, of an outside circumference exceeding 180 cm but not exceeding 240 cm:Where the rubber compound content is less than 25% by weight" +40103390,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal cross-section (V-belts), V-ribbed, of an outside circumference exceeding 180 cm but not exceeding 240 cm:Other" +40103400,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> endless transmission belts of trapezoidal cros s section other than of an outside circumference exceeding 180 cm but not exceeding 240 cm +40103410,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal crosssection (V-belts), other than V-ribbed, of an outside circumference exceeding 180 cm but not exceeding 240 cm :Where the rubber compound content is less than 25% by weight" +40103490,"CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless transmission belts of trapezoidal crosssection (V-belts), other than V-ribbed, of an outside circumference exceeding 180 cm but not exceeding 240 cm :Other" +40103500,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> endless synchronous belts of an outside circumference exceeding 60 cm but not exceeding 150 cm +40103510,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless synchronous belts of an outside circumference exceeding 60 cm but not exceeding 150 cm :Where the rubber compound content is less than 25% by weight +40103590,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless synchronous belts of an outside circumference exceeding 60 cm but not exceeding 150 cm :Other +40103600,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> endless synchronous belts of an outside circumference exceeding 150 cm but not exceeding 198 cm +40103610,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless synchronous belts of an outside circumference exceeding 150 cm but not exceeding 198 cm :Where the rubber compound content is less than 25% by weight +40103690,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Endless synchronous belts of an outside circumference exceeding 150 cm but not exceeding 198 cm :Other +40103900,conveyor or transmission belts or belting of vulcanised rubber conveyor belts or belting 4010 11 reinforced only with metal 11 where the rubber compound content is less than 25 by weight 11 other 4010 12 reinforced only with textile materials 12 where the rubber compound content is less than 25 by weight 12 other 4010 19 other 19 where the rubber compound content is less than 25 by weight 19 other transmission belts or belting >> other where the rubber compound content is less than 25 by weight +40103911,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Endless flat belt +40103912,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Ply belting +40103919,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Other +40103991,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Endless flat belt +40103992,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Ply belting +40103999,CONVEYOR OR TRANSMISSION BELTS OR BELTING OF VULCANISED RUBBER:Other :Other +40110000,new pneumatic tyres of rubber +40111000,new pneumatic tyres of rubber >> of a kind used on motor cars including station wagons and racing cars +40111010,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on motor cars (including station wagons and racing cars) :Radials" +40111090,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on motor cars (including station wagons and racing cars) :Other" +40112000,new pneumatic tyres of rubber >> of a kind used on buses or lorries +40112010,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on buses or lorries :Radials" +40112090,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on buses or lorries :Other" +40113000,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on aircraft" +40114000,new pneumatic tyres of rubber >> of a kind used on motor cycles +40114010,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on motor cycles :For motor cycles" +40114020,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on motor cycles :For motor scooters" +40114090,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on motor cycles :Other" +40115000,new pneumatic tyres of rubber >> of a kind used on bicycles +40115010,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on bicycles :Multi-cellular polyurethane (MCP) tubeless tyres" +40115090,"NEW PNEUMATIC TYRES, OF RUBBER:Of a kind used on bicycles :Other" +40116100,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on agricultural or forestry vehicles and machines(OLD tariff)" +40116200,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on construction or industrial handling vehicles and machines and having a rim size not exceeding 61 cm(OLD tariff)" +40116300,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on construction or industrial handling vehicles and machines and having a rim size exceeding 61 cm(OLD tariff)" +40116900,"NEW PNEUMATIC TYRES, OF RUBBER::Other(OLD tariff)" +40117000,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on agricultural or forestry vehicles and machines" +40118000,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on construction, mining or industrial handling vehicles and machines" +40119000,"NEW PNEUMATIC TYRES, OF RUBBER::Other" +40119200,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on agricultural or forestry vehicles and machines(OLD tariff)" +40119300,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on construction or industrial handling vehicles and machines and having a rim size not exceeding 61 cm(OLD tariff)" +40119400,"NEW PNEUMATIC TYRES, OF RUBBER::Of a kind used on construction or industrial handling vehicles and machines and having a rim size exceeding 61 cm(OLD tariff)" +40119900,"NEW PNEUMATIC TYRES, OF RUBBER::Other(OLD tariff)" +40120000,retreaded or used pneumatic tyres of rubber solid or cushion tyres tyre treads and tyre flaps of rubber retreaded tyres +40121100,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER::Of a kind used on motor cars (including station wagons and racing cars)" +40121200,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER::Of a kind used on buses or lorries" +40121300,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER::Of a kind used on aircraft" +40121900,retreaded or used pneumatic tyres of rubber solid or cushion tyres tyre treads and tyre flaps of rubber retreaded tyres >> other +40121910,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :For two wheelers" +40121990,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Other" +40122000,retreaded or used pneumatic tyres of rubber solid or cushion tyres tyre treads and tyre flaps of rubber retreaded tyres >> used pneumatic tyres +40122010,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Used pneumatic tyres :For buses, lorries and earth moving equipments including light commercial vehicles" +40122020,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Used pneumatic tyres :For passenger automobile vehicles, including two wheelers, three wheelers and personal type vehicles" +40122090,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Used pneumatic tyres :Other" +40129000,retreaded or used pneumatic tyres of rubber solid or cushion tyres tyre treads and tyre flaps of rubber retreaded tyres >> other +40129010,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Solid rubber tyres for motor vehicles" +40129020,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Solid rubber tyres for other vehicles" +40129030,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Tyres with metal framework" +40129041,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Of a kind used in two-wheeled and three- wheeled motor vehicles" +40129049,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Other" +40129050,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Tyre treads, interchangeable" +40129090,"RETREADED OR USED PNEUMATIC TYRES OF RUBBER, SOLID OR CUSHION TYRES, TYRE TREADS AND TYRE FLAPS, OF RUBBER:Other :Other" +40130000,inner tubes of rubber +40131000,inner tubes of rubber >> of a kind used on motor cars including stati wagons and racing cars buses or lorries +40131010,"INNER TUBES, OF RUBBER:Of a kind used on motor cars (including station wagons and racing cars), buses or lorries :For motor cars" +40131020,"INNER TUBES, OF RUBBER:Of a kind used on motor cars (including station wagons and racing cars), buses or lorries :For lorries and buses" +40132000,"INNER TUBES, OF RUBBER::Of a kind used on bicycles" +40139000,inner tubes of rubber >> other +40139010,"INNER TUBES, OF RUBBER:Other :For aircraft" +40139020,"INNER TUBES, OF RUBBER:Other :For motor cycle" +40139030,"INNER TUBES, OF RUBBER:Other :For off the road vehicles, not elsewhere specified or included" +40139041,"INNER TUBES, OF RUBBER:Other :Rear tyres" +40139049,"INNER TUBES, OF RUBBER:Other :Other" +40139050,"INNER TUBES, OF RUBBER:Other :Of a kind used in tyres of cycle rickshaws and three-wheeled powered cycle-rickshaws" +40139090,"INNER TUBES, OF RUBBER:Other :Other" +40140000,hygienic or pharmaceutical articles including teats of vulcanised rubber other than hard rubber with or without fittings of hard rubber +40141000,hygienic or pharmaceutical articles including teats of vulcanised rubber other than hard rubber with or without fittings of hard rubber >> sheath contraceptives +40141010,"HYGIENIC OR PHARMACEUTICAL ARTICLES (INCLUDING TEATS), OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT FITTINGS OF HARD RUBBER:Sheath contraceptives :Rubber contraceptives, male (condoms)" +40141020,"HYGIENIC OR PHARMACEUTICAL ARTICLES (INCLUDING TEATS), OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT FITTINGS OF HARD RUBBER:Sheath contraceptives :Rubber contraceptives, female (diaphragms), such as cervical caps" +40149000,hygienic or pharmaceutical articles including teats of vulcanised rubber other than hard rubber with or without fittings of hard rubber >> other +40149010,"HYGIENIC OR PHARMACEUTICAL ARTICLES (INCLUDING TEATS), OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT FITTINGS OF HARD RUBBER:Other :Hot water bottles" +40149020,"HYGIENIC OR PHARMACEUTICAL ARTICLES (INCLUDING TEATS), OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT FITTINGS OF HARD RUBBER:Other :Ice bags" +40149030,"HYGIENIC OR PHARMACEUTICAL ARTICLES (INCLUDING TEATS), OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT FITTINGS OF HARD RUBBER:Other :Feeding bottle nipples" +40149090,"HYGIENIC OR PHARMACEUTICAL ARTICLES (INCLUDING TEATS), OF VULCANISED RUBBER OTHER THAN HARD RUBBER, WITH OR WITHOUT FITTINGS OF HARD RUBBER:Other :Other" +40151100,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Surgical(OLD tariff)" +40151200,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Of a kind used for medical, surgical, dental or veterinary purposes(OLD tariff)" +40151900,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Other(OLD tariff)" +40159010,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber apron(OLD tariff)" +40159020,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Labels(OLD tariff)" +40159030,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Industrial gloves(OLD tariff)" +40159091,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Diving suits(OLD tariff)" +40159099,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES (INCLUDING GLOVES, MITTENS AND MITTS) FOR ALL PURPOSES, OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Other(OLD tariff)" +40160000,other articles of vulcanised rubber other than hard rubber other +40161000,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Of cellular rubber(OLD tariff) +40169100,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Floor coverings and mats(OLD tariff) +40169200,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Erasers +40169300,other articles of vulcanised rubber other than hard rubber other >> gaskets washers and other seals +40169310,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Patches for puncture repair of self- vulcanising rubber or a rubber backing" +40169320,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Rubber rings (O-ring)(OLD tariff)" +40169330,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Rubber seals (Oil seals and the like)" +40169340,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Gaskets" +40169350,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Washers" +40169360,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Plugs" +40169390,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Gaskets, washers and other seals :Other" +40169400,"OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER::Boat or dock fenders, whether or not inflatable" +40169500,other articles of vulcanised rubber other than hard rubber other >> other inflatable articles +40169510,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other inflatable articles:Air mattresses +40169590,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other inflatable articles:Other +40169900,other articles of vulcanised rubber other than hard rubber other >> other +40169910,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber cots for textile industry +40169920,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber bands +40169930,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber threads +40169940,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber blankets +40169950,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber cushions +40169960,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Rubber bushes +40169970,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Ear plug +40169980,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Stoppers +40169990,OTHER ARTICLES OF VULCANISED RUBBER OTHER THAN HARD RUBBER:Other :Other +40170000,hard rubber for example ebonite in all forms including waste and scrap articles of hard rubber hard rubber for example ebonite in all forms including waste and scrap articles of hard rubber +40170010,"HARD RUBBER (FOR EXAMPLE, EBONITE) IN ALL FORMS, INCLUDING WASTE AND SCRAP; ARTICLES OF HARD RUBBER:Hard rubber (for example, ebonite) in all forms, including waste and scrap; articles of hard rubber:Plates, sheets, rods and tubes of ebonite and vulcanite" +40170020,"HARD RUBBER (FOR EXAMPLE, EBONITE) IN ALL FORMS, INCLUDING WASTE AND SCRAP; ARTICLES OF HARD RUBBER:Hard rubber (for example, ebonite) in all forms, including waste and scrap; articles of hard rubber:Scrap, waste and powder of hardened rubber (ebonite and vulcanite)" +40170030,"HARD RUBBER (FOR EXAMPLE, EBONITE) IN ALL FORMS, INCLUDING WASTE AND SCRAP; ARTICLES OF HARD RUBBER:Hard rubber (for example, ebonite) in all forms, including waste and scrap; articles of hard rubber:Printers' rollers" +40170040,"HARD RUBBER (FOR EXAMPLE, EBONITE) IN ALL FORMS, INCLUDING WASTE AND SCRAP; ARTICLES OF HARD RUBBER:Hard rubber (for example, ebonite) in all forms, including waste and scrap; articles of hard rubber:Textile rollers" +40170050,"HARD RUBBER (FOR EXAMPLE, EBONITE) IN ALL FORMS, INCLUDING WASTE AND SCRAP; ARTICLES OF HARD RUBBER:Hard rubber (for example, ebonite) in all forms, including waste and scrap; articles of hard rubber:Typewriters and cyclostyling rollers" +40170090,"HARD RUBBER (FOR EXAMPLE, EBONITE) IN ALL FORMS, INCLUDING WASTE AND SCRAP; ARTICLES OF HARD RUBBER:Hard rubber (for example, ebonite) in all forms, including waste and scrap; articles of hard rubber:Other" +40510000,polishes creams and similar preparations kg for footwear or leather for the maintenance of wooden furniture floors +40530000,polishes and similar preparations for kg other than metal polishes scouring preparations +41012010,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Whole hides and skins of a weight per skin not exceeding 8 kg. when simply dried, 10 kg. when dry-salted, or 16 kg. when fresh, wet-salted or otherwise preserved:Of cow, including cow calf(OLD tariff)" +41012020,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Whole hides and skins of a weight per skin not exceeding 8 kg. when simply dried, 10 kg. when dry-salted, or 16 kg. when fresh, wet-salted or otherwise preserved:Of buffalo, including buffalo calf(OLD tariff)" +41012090,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Whole hides and skins of a weight per skin not exceeding 8 kg. when simply dried, 10 kg. when dry-salted, or 16 kg. when fresh, wet-salted or otherwise preserved:Other(OLD tariff)" +41015010,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Whole hides and skins, of a weight exceeding 16 kg. :Of cow, including cow calf(OLD tariff)" +41015020,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Whole hides and skins, of a weight exceeding 16 kg. :Of buffalo, including buffalo calf(OLD tariff)" +41015090,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Whole hides and skins, of a weight exceeding 16 kg. :Other(OLD tariff)" +41019010,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Other, including butts, bends and bellies:Of cow, including cow calf(OLD tariff)" +41019020,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Other, including butts, bends and bellies:Of buffalo, including buffalo calf(OLD tariff)" +41019090,"RAW HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS (FRESH OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT:Other, including butts, bends and bellies:Other(OLD tariff)" +41020000,raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter +41021000,raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter >> with wool on with wool on +41021010,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:With wool on :Sheep skins" +41021020,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:With wool on :Pelts of baby lambs" +41021030,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:With wool on :Lamb skins, other than pelts" +41022100,raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter >> pickled +41022110,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:Pickled :Sheep skins" +41022120,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:Pickled :Lamb skins" +41022130,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:Pickled :Lamb pelt" +41022900,raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter raw skins of sheep or lambs fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not with wool on or split other than those excluded by note 1 c to this chapter >> other +41022910,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:Other :Sheep skins" +41022920,"RAW SKINS OF SHEEP OR LAMBS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT WITH WOOL ON OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1 ( C ) TO THIS CHAPTER:Other :Lamb skins" +41030000,other raw hides and skins fresh or salted dried limed pickled or otherwise preserved but not tanned parchment dressed or further prepared whether or not dehaired or split other than those excluded by note 1 b or 1 c to this chapter +41032000,"OTHER RAW HIDES AND SKINS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1(B) OR 1(C) TO THIS::Of reptiles" +41033000,"OTHER RAW HIDES AND SKINS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1(B) OR 1(C) TO THIS::Of swine" +41039000,"OTHER RAW HIDES AND SKINS (FRESH, OR SALTED, DRIED, LIMED, PICKLED OR OTHERWISE PRESERVED, BUT NOT TANNED, PARCHMENT-DRESSED OR FURTHER PREPARED), WHETHER OR NOT DEHAIRED OR SPLIT, OTHER THAN THOSE EXCLUDED BY NOTE 1(B) OR 1(C) TO THIS::Other" +41040000,tanned or crust hides and skins of bovine including buffalo or equine animals without hair on whether or not split but not further prepared in the wet state including +41041100,"TANNED OR CRUST HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, BUT NOT FURTHER PREPARED::Full grains, unsplit; grain splits" +41041900,"TANNED OR CRUST HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, BUT NOT FURTHER PREPARED::Other" +41044100,"TANNED OR CRUST HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, BUT NOT FURTHER PREPARED::Full grains, unsplit; grain splits(OLD tariff)" +41044900,"TANNED OR CRUST HIDES AND SKINS OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, BUT NOT FURTHER PREPARED::Other" +41050000,tanned or crust skins of sheep or lambs without wool on whether or not split but not further prepared +41051000,"TANNED OR CRUST SKINS OF SHEEP OR LAMBS, WITHOUT WOOL ON, WHETHER OR NOT SPLIT, BUT NOT FURTHER PREPARED::In the wet state (including wet-blue)" +41053000,"TANNED OR CRUST SKINS OF SHEEP OR LAMBS, WITHOUT WOOL ON, WHETHER OR NOT SPLIT, BUT NOT FURTHER PREPARED::In the dry state (crust)" +41060000,tanned or crust hides and skins of other animals without wool or hair on whether or not split but not further prepared of goats or kids +41062100,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::In the wet state (including wet-blue)" +41062200,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::In the dry state (crust)" +41063100,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::In the wet state (including wet-blue)(OLD tariff)" +41063200,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::In the dry state (crust)" +41064000,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::Of reptiles" +41069100,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::In the wet state (including wet-blue)(OLD tariff)" +41069200,"TANNED OR CRUST HIDES AND SKINS OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT BUT NOT FURTHER PREPARED::In the dry state (crust)" +41070000,leather further prepared after tanning or crusting including leather of bovine including buffalo or equine animals without hair on whether or not split other than leather of heading 4114 whole hides and skins +41071100,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Full grains, unsplit" +41071200,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Grain splits" +41071900,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Other" +41079100,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Full grains, unsplit(OLD tariff)" +41079200,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Grain splits" +41079900,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF BOVINE (INCLUDING BUFFALO) OR EQUINE ANIMALS, WITHOUT HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Other" +41120000,"::LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF SHEEP OR LAMB, WITHOUT WOOL ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114" +41130000,leather further prepared after tanning or crusting including leather of other animals without wool or hair on whether or not split other than leather of heading 4114 +41131000,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Of goats or kids" +41132000,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Of swine" +41133000,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Of reptiles" +41139000,"LEATHER FURTHER PREPARED AFTER TANNING OR CRUSTING, INCLUDING PARCHMENT-DRESSED LEATHER, OF OTHER ANIMALS, WITHOUT WOOL OR HAIR ON, WHETHER OR NOT SPLIT, OTHER THAN LEATHER OF HEADING 4114::Other" +41140000,chamois including combination chamois leather patent leather and patent laminated leather metallised leather +41141000,CHAMOIS (INCLUDING COMBINATION CHAMOIS) LEATHER; PATENT LEATHER AND PATENT LAMINATED LEATHER ; METALLISED LEATHER::Chamois (including combination chamois) leather +41142000,chamois including combination chamois leather patent leather and patent laminated leather metallised leather >> patent leather and patent laminated leather metallised leather +41142010,CHAMOIS (INCLUDING COMBINATION CHAMOIS) LEATHER; PATENT LEATHER AND PATENT LAMINATED LEATHER ; METALLISED LEATHER:Patent leather and patent laminated leather; metallised leather :Patent leather and patent laminated leather +41142020,CHAMOIS (INCLUDING COMBINATION CHAMOIS) LEATHER; PATENT LEATHER AND PATENT LAMINATED LEATHER ; METALLISED LEATHER:Patent leather and patent laminated leather; metallised leather :Metallised leather +41150000,composition leather with a basis of leather or leather fiber in slabs sheets or strip whether or not in rolls parings and other waste of leather or of composition not suitable for the leather manufacture of leather leather articles dust powder and flour +41151000,"COMPOSITION LEATHER WITH A BASIS OF LEATHER OR LEATHER FIBER, IN SLABS, SHEETS OR STRIP, WHETHER OR NOT IN ROLLS; PARINGS AND OTHER WASTE OF LEATHER OR OF COMPOSITION LEATHER, NOT SUITABLE FOR THE MANUFACTURE OF LEATHER ARTICLES; LEATHER DUST, POWDER AND FLOUR::Composition leather with a basis of leather or leather fiber, in slabs, sheets or strip, whether or not in rolls" +41152000,composition leather with a basis of leather or leather fiber in slabs sheets or strip whether or not in rolls parings and other waste of leather or of composition not suitable for the leather manufacture of leather leather articles dust powder and flour >> parings and other waste of leather or of composition leather not suitable for the manufacture of leather articles leather dust powder and flour +41152010,"COMPOSITION LEATHER WITH A BASIS OF LEATHER OR LEATHER FIBER, IN SLABS, SHEETS OR STRIP, WHETHER OR NOT IN ROLLS; PARINGS AND OTHER WASTE OF LEATHER OR OF COMPOSITION LEATHER, NOT SUITABLE FOR THE MANUFACTURE OF LEATHER ARTICLES; LEATHER DUST, POWDER AND FLOUR:Parings and other waste of leather or of composition leather, not suitable for the manufacture of leather articles ; leather dust, powder and flour:Cuttings of leather" +41152090,"COMPOSITION LEATHER WITH A BASIS OF LEATHER OR LEATHER FIBER, IN SLABS, SHEETS OR STRIP, WHETHER OR NOT IN ROLLS; PARINGS AND OTHER WASTE OF LEATHER OR OF COMPOSITION LEATHER, NOT SUITABLE FOR THE MANUFACTURE OF LEATHER ARTICLES; LEATHER DUST, POWDER AND FLOUR:Parings and other waste of leather or of composition leather, not suitable for the manufacture of leather articles ; leather dust, powder and flour:Other" +42010000,"::SADDLERY AND HARNESS FOR ANY ANIMAL (INCLUDING TRACES, LEADS, KNEE PADS, MUZZLES, SADDLE CLOTHS, SADDLE BAGS, DOG COATS AND THE LIKE), OF ANY MATERIAL" +42020000,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers +42021100,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> with outer surface of leather or of composition leather +42021110,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Travel goods (trunks, suit-cases, sports bags and other similar items ) of leather" +42021120,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Toilet-bags and cases, of leather" +42021130,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Satchels" +42021140,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Brief-cases" +42021150,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Executive-cases" +42021160,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Vanity-cases" +42021170,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Attache-cases" +42021190,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Other" +42021200,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> with outer surface of plastic or of textile materials +42021210,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Toilet-cases" +42021220,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Plastic moulded suit-cases" +42021230,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Plastic moulded brief-cases" +42021240,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Satchels" +42021250,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Other travel-goods" +42021260,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Brief-cases" +42021270,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Executive-cases other than plastic moulded" +42021280,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Vanity-cases" +42021290,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of plastic or of textile materials :Other" +42021900,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> other +42021910,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Travel goods (trunks, suit-cases, sports bags, and other similar items) of leather" +42021920,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Toilet-cases" +42021930,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Satchels" +42021940,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Brief-cases (other than plastic moulded)" +42021950,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Executive-cases" +42021960,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Vanity-cases" +42021990,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Other" +42022110,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Hand-bags for ladies(OLD tariff)" +42022120,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Vanity-bags(OLD tariff)" +42022190,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Other(OLD tariff)" +42022200,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> with outer surface of sheeting of plastics or of textile materials +42022210,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Hand-bags and shopping bags, of artificial plastic material" +42022220,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Hand-bags and shopping bags, of cotton(OLD tariff)" +42022230,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Hand-bags and shopping bags, of Jute" +42022240,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Vanity-bags" +42022290,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Other" +42022900,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> other +42022910,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Hand bags of other materials excluding wicker-work or basket work" +42022990,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Other" +42023110,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Jewellery box(OLD tariff)" +42023120,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Wallets and purses, of leather(OLD tariff)" +42023190,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of leather, of composition leather or of patent leather :Other(OLD tariff)" +42023200,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> with outer surface of sheeting of plastics or of textile materials +42023210,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Jewellery box" +42023290,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surface of sheeting of plastics or of textile materials:Other" +42023900,trunks executive c a s e s b r i e f c a s e s s c h o o l s at c h e l s spectacle cases binocular cases camera cases musical instrument cases gun cases h o l s t e r s a n d s i m i l a r c o n ta i n e r s t r av e l l i n g b a g s i n s u l at e d f o o d o r beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s beverages bags toilet bags rucksacks handbags wallets purses m a p c a s e s c i g a r e t t e c a s e s t o b a c c o pouches tool bags sports bags bottle c a s e s j e we l l ery b o x e s p o w d e r b o xe s cutlery cases and similar containers of l e at h e r o r o f c om p o s i t io n l e at h e r o f sheeting of plastics of textile materials of vulcanised fibre or of paperboard or w h o l ly o r m a i n ly c o v e r e d w i t h s u c h materials or with paper executive cases school satchels and similar containers >> other +42023910,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Jewellery box" +42023990,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:Other :Other" +42029100,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER:With outer surfce of leather or of composition leather:With outer surface of leather, of composition leather or of patent leather" +42029200,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER::With outer surface of plastic sheeting or of textile materials" +42029900,"TRUNKS, SUIT-CASES, VANITY-CASES, EXECUTIVE-CASES, BRIEF-CASES, SCHOOL SATCHELS, SPECTACLE CASES, BINOCULAR CASES, CAMERA CASES, MUSICAL INSTRUMENT CASES, GUN CASES, HOLSTERS AND SIMILAR CONTAINERS; TRAVELLING-BAGS, INSULATED FOOD OR BEVERAGES BAGS, TOILET BAGS, RUCKSACKS, HANDBAGS, SHOPPINGBAGS, WALLETS, PURSES, MAP-CASES, CIGARETTE-CASES, TOBACCO- POUCHES, TOOL BAGS, SPORTS BAGS, BOTTLECASES, JEWELLERY BOXES, POWDER-BOXES, CUTLERY CASES AND SIMILAR CONTAINERS, OF LEATHER OR OF COMPOSITION LEATHER, OF SHEETING OF PLASTICS, OF TEXTILE MATERIALS, OF VULCANISED FIBRE OR OF PAPERBOARD, OR WHOLLY OR MAINLY COVERED WITH SUCH MATERIALS OR WITH PAPER::Other" +42030000,articles of apparel and clothing accessories of leather or of composition leather +42031000,articles of apparel and clothing accessories of leather or of composition leather >> articles of apparel +42031010,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Articles of apparel:Jackets and jerseys" +42031090,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Articles of apparel:Other" +42032100,articles of apparel and clothing accessories of leather or of composition leather >> specially designed for use in sports +42032110,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Specially designed for use in sports:Gloves" +42032120,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Specially designed for use in sports:Mittens and mitts" +42032900,articles of apparel and clothing accessories of leather or of composition leather >> other +42032910,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Other :Gloves for use in industry" +42032920,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Other :Other gloves" +42032930,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Other :Mittens and mitts" +42033000,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER::Belts and bandoliers" +42034000,articles of apparel and clothing accessories of leather or of composition leather >> other clothing accessories +42034010,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Other clothing accessories :Aprons" +42034020,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Other clothing accessories :Semi-chrome grain garments" +42034090,"ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF LEATHER OR OF COMPOSITION LEATHER:Other clothing accessories :Other" +42050000,other articles of leather or of composition leather other articles of leather or of composition leather straps other than for machinery or harness +42050011,OTHER ARTICLES OF LEATHER OR OF COMPOSITION LEATHER:Other articles of leather or of composition leather :Welt +42050019,OTHER ARTICLES OF LEATHER OR OF COMPOSITION LEATHER:Other articles of leather or of composition leather :Other +42050020,OTHER ARTICLES OF LEATHER OR OF COMPOSITION LEATHER:Other articles of leather or of composition leather :Leather sofa cover +42050090,OTHER ARTICLES OF LEATHER OR OF COMPOSITION LEATHER:Other articles of leather or of composition leather :Other +42060000,articles of gut other than gut of goldbeater s skin of bladders or of tendons +42060010,"ARTICLES OF GUT (OTHER THAN SILK-WORM GUT), OF GOLDBEATER'S SKIN, OF BLADDERS OR OF TENDONS::For rackets" +42060090,"ARTICLES OF GUT (OTHER THAN SILK-WORM GUT), OF GOLDBEATER'S SKIN, OF BLADDERS OR OF TENDONS::Other" +43011000,"RAW FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS, SUITABLE FOR FURRIERS USE), OTHER THAN RAW HIDES AND SKINS OF HEADING 4101, 4102 OR 4103::Of mink, whole, with or without head, tail or paws(OLD tariff)" +43013000,"RAW FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS, SUITABLE FOR FURRIERS USE), OTHER THAN RAW HIDES AND SKINS OF HEADING 4101, 4102 OR 4103::Of lamb, the following Astrakhan, Broadtail, Caracul, Persian and similar lamb, Indian, Chinese, Mongolian or Tibetan lamb, whole, with or without head, tail or paws(OLD tariff)" +43016000,"RAW FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS, SUITABLE FOR FURRIERS USE), OTHER THAN RAW HIDES AND SKINS OF HEADING 4101, 4102 OR 4103::Of fox, whole, with or without head, tail or paws(OLD tariff)" +43018000,"RAW FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS, SUITABLE FOR FURRIERS USE), OTHER THAN RAW HIDES AND SKINS OF HEADING 4101, 4102 OR 4103::Other furskins, whole, with or without head, tail or paws(OLD tariff)" +43019000,"RAW FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS, SUITABLE FOR FURRIERS USE), OTHER THAN RAW HIDES AND SKINS OF HEADING 4101, 4102 OR 4103::Heads, tails, paws and other pieces or cuttings, suitable for furriers use(OLD tariff)" +43020000,tanned or dressed furskins including heads tails paws and other pieces or cuttings the unassembled or assembled without addition of other materials other than those of heading 4303 whole skins with or without head tail or paws not assembled +43021100,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303::Of mink" +43021900,tanned or dressed furskins including heads tails paws and other pieces or cuttings the unassembled or assembled without addition of other materials other than those of heading 4303 whole skins with or without head tail or paws not assembled >> other +43021910,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303:Other:Calf skins, with hair on, tanned or dressed" +43021920,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303:Other:Hides or skins of other bovine and equine animals with hair on, tanned or dressed" +43021930,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303:Other:Goat (Common) and kid skins with hair on, tanned or dressed" +43021940,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303:Other:Tiger-cat skins" +43021990,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303:Other:Hides and skins of oher animals with hair on, tanned or dressed" +43022000,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303::Heads, tails, paws and other pieces or cuttings, not assembled" +43023000,"TANNED OR DRESSED FURSKINS (INCLUDING HEADS, TAILS, PAWS AND OTHER PIECES OR CUTTINGS), UNASSEMBLED, OR ASSEMBLED (WITHOUT THE ADDITION OF OTHER MATERIALS) OTHER THAN THOSE OF HEADING 4303::Whole skins and pieces or cuttings thereof, assembled" +43030000,articles of apparel clothing accessories and other articles of furskin +43031000,articles of apparel clothing accessories and other articles of furskin >> articles of apparel and clothing accessories +43031010,"ARTICLES OF APPAREL, CLOTHING ACCESSORIES AND OTHER ARTICLES OF FURSKIN:Articles of apparel and clothing accessories:Of wild animals covered under the Wild Life (Protection) Act, 1972" +43031020,"ARTICLES OF APPAREL, CLOTHING ACCESSORIES AND OTHER ARTICLES OF FURSKIN:Articles of apparel and clothing accessories:Of animals covered under Convention on International Trade of Endangered Species (CITES), other than those of Tariff Item 4303 10 10" +43031090,"ARTICLES OF APPAREL, CLOTHING ACCESSORIES AND OTHER ARTICLES OF FURSKIN:Articles of apparel and clothing accessories:Other" +43039000,articles of apparel clothing accessories and other articles of furskin >> other +43039010,"ARTICLES OF APPAREL, CLOTHING ACCESSORIES AND OTHER ARTICLES OF FURSKIN:Other:Of wild animals covered under the Wild Life (Protection) Act, 1972" +43039020,"ARTICLES OF APPAREL, CLOTHING ACCESSORIES AND OTHER ARTICLES OF FURSKIN:Other:Of animals covered under Convention on International Trade of Endangered Species (CITES), other than those of Tariff Item 4303 90 10" +43039090,"ARTICLES OF APPAREL, CLOTHING ACCESSORIES AND OTHER ARTICLES OF FURSKIN:Other:Other" +43040000,artificial fur and articles thereof artificial fur and articles thereof artificial fur +43040011,"ARTIFICIAL FUR AND ARTICLES THEREOF:Artificial fur and articles thereof:Artificial fur as trimmings and embellishments for garments, made ups, knitwear, plastic and leather goods" +43040019,ARTIFICIAL FUR AND ARTICLES THEREOF:Artificial fur and articles thereof:Other +43040020,ARTIFICIAL FUR AND ARTICLES THEREOF:Artificial fur and articles thereof:Articles of artificial fur +44011010,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Fuel wood, in logs, in billets, in twigs, in faggots or in similar forms :In logs(OLD tariff)" +44011090,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Fuel wood, in logs, in billets, in twigs, in faggots or in similar forms :Other(OLD tariff)" +44011110,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Coniferous:In logs(OLD tariff)" +44011190,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Coniferous:Other(OLD tariff)" +44011210,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Non-Coniferous:In logs(OLD tariff)" +44011290,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Non-Coniferous:Other(OLD tariff)" +44012100,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS::Coniferous(OLD tariff)" +44012200,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS::Non-coniferous(OLD tariff)" +44013100,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:wood pellets:Wood pellets(OLD tariff)" +44013200,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS::Wood briquettes(OLD tariff)" +44013900,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS:Other:Other(OLD tariff)" +44014000,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS::Sawdust and wood waste and scrap, not agglomerated(OLD tariff)" +44014100,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS::Sawdust(OLD tariff)" +44014900,"FUEL WOOD, IN LOGS, IN BILLETS, IN TWIGS, IN FAGGOTS OR IN SIMILAR FORMS; WOOD IN CHIPS OR PARTICLES; SAWDUST AND WOOD WASTE AND SCRAP, WHETHER OR NOT AGGLOMERATED IN LOGS, BRIQUETTES, PELLETS OR SIMILAR FORMS::Other(OLD tariff)" +44020000,wood charcoal including shell or nut charcoal whether or not agglomerated wood charcoal including shell or nut charcoal whether or not agglomerated +44021000,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Wood charcoal (including shell or nut charcoal), whether or not agglomerated:Of bamboo" +44021010,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Wood charcoal (including shell or nut charcoal), whether or not agglomerated:Of bamboo(OLD tariff)" +44022000,wood charcoal including shell or nut charcoal whether or not agglomerated wood charcoal including shell or nut charcoal whether or not agglomerated >> of shell or nut of shell or nut +44022010,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Of Shell or nut:of coconut shell" +44022090,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Of Shell or nut:Other" +44029000,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Other :Other" +44029010,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Other :Of coconut shell(OLD tariff)" +44029090,"WOOD CHARCOAL (INCLUDING SHELL OR NUT CHARCOAL), WHETHER OR NOT AGGLOMERATED:Other :Other(OLD tariff)" +44030000,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives +44031000,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Treated with paint, stains, creosote or other preservatives(OLD tariff)" +44031100,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Coniferous" +44031200,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Non-Coniferous" +44032010,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other, coniferous :Sawlogs and veneerlogs(OLD tariff)" +44032020,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other, coniferous :Poles, pilling and posts(OLD tariff)" +44032090,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other, coniferous :Other(OLD tariff)" +44032100,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> of pine pinus spp of which the smallest dimension is 15 cm or more of pine pinus spp of which the smallest dimension is 15 cm or more +44032110,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of pine (Pinus spp.), of which any cross-sectional dimension is 15 cm or more:Saw logs and veneer logs" +44032120,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of pine (Pinus spp.), of which any cross-sectional dimension is 15 cm or more:Poles, pilling and posts" +44032190,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of pine (Pinus spp.), of which any cross-sectional dimension is 15 cm or more:other" +44032200,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> of pine pinus spp other of pine pinus spp other +44032210,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Saw logs and veneer logs" +44032220,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Poles, pilling and posts" +44032290,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Other" +44032300,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> of fir abies spp and spruce picea spp of which the smallest crosssectional dimension is 15 cm or more of fir abies spp and spruce picea spp of which the smallest crosssectional dimension is 15 cm or more +44032310,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of fir (Abies spp.) and spruce (Picea spp.), of which any cross-sectional dimension is 15 cm or more:Saw logs and veneer logs" +44032320,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of fir (Abies spp.) and spruce (Picea spp.), of which any cross-sectional dimension is 15 cm or more:Poles, pilling and posts" +44032390,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of fir (Abies spp.) and spruce (Picea spp.), of which any cross-sectional dimension is 15 cm or more:Other" +44032400,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> of fir abies spp and spruce picea spp other of fir abies spp and spruce picea spp other +44032410,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of fir (Abies spp.) and spruce (Picea spp.), other:Saw logs and veneer logs" +44032420,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of fir (Abies spp.) and spruce (Picea spp.), other:Poles, pilling and posts" +44032490,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Of fir (Abies spp.) and spruce (Picea spp.), other:Other" +44032500,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> other of which the smallest dimension is 15 cm or more +44032510,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other, of which any cross-sectional dimension is 15 cm or more:Saw logs and veneer logs" +44032520,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other, of which any cross-sectional dimension is 15 cm or more:Poles, pilling and posts" +44032590,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other, of which any cross-sectional dimension is 15 cm or more:Other" +44032600,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> other +44032610,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other:Saw logs and veneer logs" +44032620,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other:Poles, pilling and posts" +44032690,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other:Other" +44034100,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Dark Red Meranti, Light Red Meranti and Meranti Bakau" +44034200,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Teak" +44034900,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Other" +44034910,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Teak wood in rough(OLD tariff)" +44034990,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Other(OLD tariff)" +44039100,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of oak (Quercus Spp.)(OLD tariff)" +44039200,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of beech (Fagus Spp.)(OLD tariff)" +44039300,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of beech (Fagus spp.), of which any cross-sectional dimension is 15 cm or more" +44039400,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of beech (Fagus spp.), other" +44039500,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of birch (Betula spp.), of which any cross-sectional dimension is 15 cm or more" +44039600,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of birch (Betula spp.), other" +44039700,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of poplar and aspen (Populus spp.)" +44039800,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED::Of eucalyptus (Eucalyptus spp.)" +44039900,wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives wood in the rough whether or not stripped of bark or sapwood or roughly squared treated with paint stains creosote or other preservatives >> other andaman padauk pterocarous terminalia alata paliwood palaquium elliplicum and red sanders pterocar pus sautaninus and rose wood dalbergea latifolio +44039911,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Andaman Padauk (Pterocarous dalbaergiodes)" +44039912,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Bonsum (Phoebe goalparensis)" +44039913,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Gurgan (Dipterocarpus alatus)" +44039914,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Khair (Acacia Catechu)" +44039915,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Lampati (Duabanga grandiflora)" +44039916,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Laurel (Terminalia alata)" +44039917,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Paliwood (Palaquium Elliplicum)" +44039918,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Red Sanders(Pterocar pus Sautatinus)" +44039919,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Rose Wood (Dalbergea Latifolio)" +44039921,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Sal (Chorea robusta)" +44039922,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Sandal wood (Santalum alburn)" +44039923,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Semul (Bombax ceiba)" +44039924,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Walnut wood (Juglans binata)" +44039925,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Anjam (Hardwickia binata)" +44039926,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Birch (Betula Spp.)(OLD tariff)" +44039927,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Sissoo (Dalbergia sisso)" +44039928,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :White cedar (Dysozylum malabaricum)" +44039929,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Other(OLD tariff)" +44039990,"WOOD IN THE ROUGH, WHETHER OR NOT STRIPPED OF BARK OR SAPWOOD, OR ROUGHLY SQUARED:Other :Other" +44040000,hoopwood split poles piles pickets and stakes pointed but of wood not sawn lengthwise wooden sticks roughly trimmed but not turned bent or otherwise worked suitable for the manufacture of walking sticks umbrellas tool handles or the like chipwood and the like +44041000,"HOOPWOOD; SPLIT POLES; PILES, PICKETS AND STAKES OF WOOD, POINTED BUT NOT SAWN LENGTHWISE; WOODEN STICKS, ROUGHLY TRIMMED BUT NOT TURNED, BENT OR OTHERWISE WORKED, SUITABLE FOR THE MANUFACTURE OF WALKING STICKS, UMBRELLAS, TOOL HANDLES OR THE LIKE; CHIPWOOD AND THE LIKE::Coniferous" +44042000,hoopwood split poles piles pickets and stakes pointed but of wood not sawn lengthwise wooden sticks roughly trimmed but not turned bent or otherwise worked suitable for the manufacture of walking sticks umbrellas tool handles or the like chipwood and the like >> +44042010,"HOOPWOOD; SPLIT POLES; PILES, PICKETS AND STAKES OF WOOD, POINTED BUT NOT SAWN LENGTHWISE; WOODEN STICKS, ROUGHLY TRIMMED BUT NOT TURNED, BENT OR OTHERWISE WORKED, SUITABLE FOR THE MANUFACTURE OF WALKING STICKS, UMBRELLAS, TOOL HANDLES OR THE LIKE; CHIPWOOD AND THE LIKE:Non-coniferous :Wooden sticks, roughly trimmed but not turned, bent or otherwise worked, suitable for the manufacture of walking sticks, tool handles, split poles, piles, pickets, stakes and the like" +44042020,"HOOPWOOD; SPLIT POLES; PILES, PICKETS AND STAKES OF WOOD, POINTED BUT NOT SAWN LENGTHWISE; WOODEN STICKS, ROUGHLY TRIMMED BUT NOT TURNED, BENT OR OTHERWISE WORKED, SUITABLE FOR THE MANUFACTURE OF WALKING STICKS, UMBRELLAS, TOOL HANDLES OR THE LIKE; CHIPWOOD AND THE LIKE:Non-coniferous :Drawn Wood" +44042090,"HOOPWOOD; SPLIT POLES; PILES, PICKETS AND STAKES OF WOOD, POINTED BUT NOT SAWN LENGTHWISE; WOODEN STICKS, ROUGHLY TRIMMED BUT NOT TURNED, BENT OR OTHERWISE WORKED, SUITABLE FOR THE MANUFACTURE OF WALKING STICKS, UMBRELLAS, TOOL HANDLES OR THE LIKE; CHIPWOOD AND THE LIKE:Non-coniferous :Other" +44050000,::WOOD WOOL; WOOD FLOUR +44061000,RAILWAY OR TRAMWAY SLEEPERS (CROSSTIES) OF WOOD::Not impregnated(OLD tariff) +44061100,RAILWAY OR TRAMWAY SLEEPERS (CROSSTIES) OF WOOD::Coniferous(OLD tariff) +44061200,RAILWAY OR TRAMWAY SLEEPERS (CROSSTIES) OF WOOD::Non-coniferous(OLD tariff) +44069000,RAILWAY OR TRAMWAY SLEEPERS (CROSSTIES) OF WOOD::Other(OLD tariff) +44069100,RAILWAY OR TRAMWAY SLEEPERS (CROSSTIES) OF WOOD::Coniferous(OLD tariff) +44069200,RAILWAY OR TRAMWAY SLEEPERS (CROSSTIES) OF WOOD::Non-coniferous(OLD tariff) +44070000,wood sawn or chipped lengthwise sliced or peeled whether or not planed sanded or of a thickness exceeding 6 mm coniferous +44071010,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Coniferrous:Doglas fir (Pscudotsuga Menziesie)(OLD tariff)" +44071020,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Coniferrous:Pine (Pinus Spp.)(OLD tariff)" +44071090,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Coniferrous:Other(OLD tariff)" +44071100,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of pine (Pinus spp.)" +44071200,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of fir (Abies spp.) and Spruce ( Picea spp.)" +44071300,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:of spf:Of S-P-F (spruce (Picea spp.), pine (Pinus spp.) and fir (Abies spp.))" +44071400,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of Hem-fir (Western hemlock (Tsuga heterophylla) and fir (Abies spp.))" +44071900,wood sawn or chipped lengthwise sliced or peeled whether or not planed sanded or of a thickness exceeding 6 mm coniferous >> other +44071910,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other:Douglas fir (Pseudotsuga menziesii)" +44071990,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other:Other" +44072100,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Mahogany (Swietenia spp." +44072200,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Virola, Imbuia and balsa" +44072300,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Teak" +44072500,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Dark Red Meranti, Light Red Meranti and Meranti Bakau" +44072600,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::White Lauan, White Meranti, White Seraya, Yellow Meranti and Alan(OLD tariff)" +44072700,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Sapelli(OLD tariff)" +44072800,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Iroko" +44072900,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other :Other" +44072910,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other :Teak wood(OLD tariff)" +44072990,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other :Other(OLD tariff)" +44079100,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of oak (Quercus Spp.)" +44079200,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of beech (Fagus Spp.)" +44079300,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of maple (Acer spp.)" +44079400,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of cherry (Prunus spp.)" +44079500,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of ash (Fraxinus spp.)" +44079600,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of birch (Betula spp.)" +44079700,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM::Of poplar and aspen (Populus spp.)" +44079900,wood sawn or chipped lengthwise sliced or peeled whether or not planed sanded or of a thickness exceeding 6 mm coniferous >> other +44079910,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other :Of Birch (Betula Spp.)(OLD tariff)" +44079920,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other :Willow" +44079990,"WOOD SAWN OR CHIPPED LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED OR ENDJOINTED, OF A THICKNESS EXCEEDING 6 MM:Other :Other" +44081010,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Coniferous:Sheets for plywood(OLD tariff)" +44081020,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Coniferous:Oak wood veneer(OLD tariff)" +44081030,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Coniferous:Veneer sheets, for match boxes and match splints(OLD tariff)" +44081090,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Coniferous:Other(OLD tariff)" +44083110,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Of Dark Red Meranti, Light Red Meranti and Meranti Bakau:Sheets for plywood(OLD tariff)" +44083120,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Of Dark Red Meranti, Light Red Meranti and Meranti Bakau:Veneer sheets (of Rose wood)(OLD tariff)" +44083130,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Of Dark Red Meranti, Light Red Meranti and Meranti Bakau:Veneer sheets, for match boxes and match splints(OLD tariff)" +44083190,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Of Dark Red Meranti, Light Red Meranti and Meranti Bakau:Other(OLD tariff)" +44083910,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Sheets for plywoods(OLD tariff)" +44083920,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Veneer sheets (of Rose wood)(OLD tariff)" +44083930,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Veneer sheets, for match boxes and match splints(OLD tariff)" +44083990,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Other(OLD tariff)" +44089010,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Sheets for plywoods(OLD tariff)" +44089020,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Veneer sheets, for match boxes and match splints(OLD tariff)" +44089090,"SHEETS FOR VENEERING (INCLUDING THOSE OBTAINED BY SLICING LAMINATED WOOD), FOR PLYWOOD OR FOR SIMILAR LAMINATED WOOD AND OTHER WOOD, SAWN LENGTHWISE, SLICED OR PEELED, WHETHER OR NOT PLANED, SANDED, SPLICED OR END-JOINTED, OF A THICKNESS NOT EXCEEDING 6 MM:Other :Other(OLD tariff)" +44090000,wood including strips and friezes for parquet flooring not assembled continuously shaped tongued grooved rebated chamfered beaded moulded rounded or the like along any of its edges or faces whether or not planed sanded or +44091000,wood including strips and friezes for parquet flooring not assembled continuously shaped tongued grooved rebated chamfered beaded moulded rounded or the like along any of its edges or faces whether or not planed sanded or >> coniferous +44091010,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED:Coniferous :Planed, tongued, grooved, rebated, chamfered, V-jointed, and the like but not further moulded" +44091020,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED:Coniferous :Beadings, and mouldings (including moulded, skirting and other moulded boards)" +44091090,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED:Coniferous :Other" +44092100,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED::Of bamboo" +44092200,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED::Of tropical wood" +44092900,wood including strips and friezes for parquet flooring not assembled continuously shaped tongued grooved rebated chamfered beaded moulded rounded or the like along any of its edges or faces whether or not planed sanded or >> other +44092910,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED:Other:Planed, tongued, grooved, rebated, chamfered, V-jointed, and the like but not further moulded" +44092920,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED:Other:Beadings and mouldings (including moulded skirting and other moulded boards)" +44092990,"WOOD (INCLUDING STRIPS AND FRIEZES FOR PARQUET FLOORING, NOT ASSEMBLED) CONTINUOUSLY SHAPED (TONGUED, GROOVED, REBATED, CHAMFERED, V-JOINTED, BEADED, MOULDED, ROUNDED OR THE LIKE) ALONG ANY OF ITS EDGES OR FACES, WHETHER OR NOT PLANED, SANDED OR END-JOINTED:Other:Other" +44100000,particle board oriented strand board osb and similar board for example waferboard of wood or other ligneous materials whether or not agglomerated with resins or other organic binding substances of wood +44101100,particle board oriented strand board osb and similar board for example waferboard of wood or other ligneous materials whether or not agglomerated with resins or other organic binding substances of wood >> particle board +44101110,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Particle board:Plain particle boards" +44101120,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Particle board:Insulation board and hardboard" +44101130,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Particle board:Veneered particle board, not having decorative veneers on any face" +44101190,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Particle board:Others" +44101210,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Oriented strand board (OSB):Unworked or not further worked than sanded(OLD tariff)" +44101290,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Oriented strand board (OSB):Other(OLD tariff)" +44101900,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES::Other(OLD tariff)" +44109010,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Other:Plain particle board(OLD tariff)" +44109020,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Other:Insulation board and hard board(OLD tariff)" +44109030,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Other:Veneered particle board, not having decorative veneers on any face(OLD tariff)" +44109040,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Other:of Coir(OLD tariff)" +44109050,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Other:of jute fibre(OLD tariff)" +44109090,"PARTICLE BOARD, ORIENTED STRAND BOARD (OSB) AND SIMILAR BOARD (FOR EXAMPLE, WAFERBOARD) OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT AGGLOMERATED WITH RESINS OR OTHER ORGANIC BINDING SUBSTANCES:Other:Other(OLD tariff)" +44111200,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES::Of a thickness not exceeding 5mm(OLD tariff)" +44111300,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES::Of a thickness exceeding 5mm but not exceeding 9mm(OLD tariff)" +44111400,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES::Of a thickness exceeding 9mm(OLD tariff)" +44119211,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.8 gm/cm3 :Hardboard(OLD tariff)" +44119219,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.8 gm/cm3 :Other(OLD tariff)" +44119221,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.8 gm/cm3 :Hardboard(OLD tariff)" +44119229,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.8 gm/cm3 :Other(OLD tariff)" +44119311,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.5 gm/cm 3 but not exceeding 0.8 gm/cm 3 :Insulation board(OLD tariff)" +44119319,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.5 gm/cm 3 but not exceeding 0.8 gm/cm 3 :Other(OLD tariff)" +44119321,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.5 gm/cm 3 but not exceeding 0.8 gm/cm 3 :Insulation board(OLD tariff)" +44119329,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density exceeding 0.5 gm/cm 3 but not exceeding 0.8 gm/cm 3 :Other(OLD tariff)" +44119411,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Insulation board(OLD tariff)" +44119419,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Other(OLD tariff)" +44119421,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Insulation board(OLD tariff)" +44119422,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Of coir(OLD tariff)" +44119423,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Of jute fibre(OLD tariff)" +44119429,"FIBRE BOARD OF WOOD OR OTHER LIGNEOUS MATERIALS, WHETHER OR NOT BONDED WITH RESINS OR OTHER ORGANIC SUBSTANCES:Of a density not exceeding 0.5 gm/cm 3 :Other(OLD tariff)" +44120000,plywood veneered panels and similar laminated wood +44121000,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::Of bamboo" +44123100,plywood veneered panels and similar laminated wood >> with at least one outer ply of tropical wood +44123110,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood:Decorative plywood" +44123120,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood:Tea chest panels or shooks, packed in sets" +44123130,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood:Other tea chest panels" +44123140,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood:Marine and aircraft plywood" +44123150,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood:Cuttings and trimmings of plywood of width not exceeding 5cm" +44123190,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood:Other" +44123210,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood:Decorative plywood(OLD tariff)" +44123220,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood:Tea chest panels or shooks, packed in sets(OLD tariff)" +44123230,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood:Marine and aircraft plywood(OLD tariff)" +44123240,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood:Cuttings and trimmings of plywood of width not exceeding 5cm(OLD tariff)" +44123290,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood:Other(OLD tariff)" +44123300,plywood veneered panels and similar laminated wood >> other with at least one outer ply of non coniferous wood of the species alder alnus spp ash fraxinus spp beech fagus spp birch betula spp cherry prunus spp chestnut castanea spp elm ulmus spp eucalyptus eucalyptus spp hickory carya spp horse chestnut aesculus spp lime tilia spp maple acer spp oak quercus spp plane tree platanus spp poplar and aspen populus spp robinia robinia spp tulipwood liriodendron spp or walnut juglans spp +44123310,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood of the species alder (Alnus spp.), ash (Fraxinus spp.), beech (Fagus spp.), birch (Betula spp.), cherry (Prunus spp.), chestnut (Castanea spp.), elm (Ulmus spp.), eucalyptus (Eucalyptus spp.), hickory (Carya spp.), horse chestnut (Aesculus spp.), lime (Tilia spp.), maple (Acer spp.), oak (Quercus spp.), plane tree (Platanus spp.), poplar and aspen (Populus spp.), robinia (Robinia spp.), tulipwood (Liriodendron spp.) or walnut (Juglans spp.):Decorative plywood" +44123320,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood of the species alder (Alnus spp.), ash (Fraxinus spp.), beech (Fagus spp.), birch (Betula spp.), cherry (Prunus spp.), chestnut (Castanea spp.), elm (Ulmus spp.), eucalyptus (Eucalyptus spp.), hickory (Carya spp.), horse chestnut (Aesculus spp.), lime (Tilia spp.), maple (Acer spp.), oak (Quercus spp.), plane tree (Platanus spp.), poplar and aspen (Populus spp.), robinia (Robinia spp.), tulipwood (Liriodendron spp.) or walnut (Juglans spp.):Tea chest panels, shooks whether or not packed in sets" +44123330,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood of the species alder (Alnus spp.), ash (Fraxinus spp.), beech (Fagus spp.), birch (Betula spp.), cherry (Prunus spp.), chestnut (Castanea spp.), elm (Ulmus spp.), eucalyptus (Eucalyptus spp.), hickory (Carya spp.), horse chestnut (Aesculus spp.), lime (Tilia spp.), maple (Acer spp.), oak (Quercus spp.), plane tree (Platanus spp.), poplar and aspen (Populus spp.), robinia (Robinia spp.), tulipwood (Liriodendron spp.) or walnut (Juglans spp.):Marine and aircraft plywood" +44123340,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood of the species alder (Alnus spp.), ash (Fraxinus spp.), beech (Fagus spp.), birch (Betula spp.), cherry (Prunus spp.), chestnut (Castanea spp.), elm (Ulmus spp.), eucalyptus (Eucalyptus spp.), hickory (Carya spp.), horse chestnut (Aesculus spp.), lime (Tilia spp.), maple (Acer spp.), oak (Quercus spp.), plane tree (Platanus spp.), poplar and aspen (Populus spp.), robinia (Robinia spp.), tulipwood (Liriodendron spp.) or walnut (Juglans spp.):Cutting and trimmings of plywood of width not exceeding 5 cm" +44123390,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood of the species alder (Alnus spp.), ash (Fraxinus spp.), beech (Fagus spp.), birch (Betula spp.), cherry (Prunus spp.), chestnut (Castanea spp.), elm (Ulmus spp.), eucalyptus (Eucalyptus spp.), hickory (Carya spp.), horse chestnut (Aesculus spp.), lime (Tilia spp.), maple (Acer spp.), oak (Quercus spp.), plane tree (Platanus spp.), poplar and aspen (Populus spp.), robinia (Robinia spp.), tulipwood (Liriodendron spp.) or walnut (Juglans spp.):Other" +44123400,plywood veneered panels and similar laminated wood >> other with at least one outer ply of non coniferous wood not specified under 4412 33 +44123410,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood not specified under sub-heading 4412 33:Decorative plywood" +44123420,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood not specified under sub-heading 4412 33:Tea chest panels, shooks whether or not packed in sets" +44123430,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood not specified under sub-heading 4412 33:Marine and aircraft plywood" +44123440,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood not specified under sub-heading 4412 33:Cutting and trimmings of plywood of width not exceeding 5 cm" +44123490,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of non-coniferous wood not specified under sub-heading 4412 33:Other" +44123900,plywood veneered panels and similar laminated wood >> other with both outer plies of coniferous wood +44123910,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Decorative plywood" +44123920,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other with both outer plies of coniferous wood::Tea chest panels, shooks whether or not packed in sets" +44123930,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Marine and aircraft plywood" +44123940,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other with both outer plies of coniferous wood::Cutting and trimmings of plywood of width not exceeding 5 cm" +44123990,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Other" +44124100,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::With at least one outer ply of tropical wood(OLD tariff)" +44124200,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::Other, with at least one outer ply of nonconiferous wood" +44124900,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::Other, with both outer plies of coniferous wood" +44125100,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::With at least one outer ply of tropical wood" +44125200,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::Other, with at least one outer ply of nonconiferous wood" +44125900,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::Other, with both outer plies of coniferous wood" +44129100,plywood veneered panels and similar laminated wood >> with at least one outer ply of tropical wood +44129110,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood::Decorative plywood" +44129120,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood::Tea chest panel or shooks, packed in sets" +44129130,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood::Marine and aircraft plywood" +44129140,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood::Cuttings and trimmings of plywood of which not exceeding 5cm" +44129190,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:With at least one outer ply of tropical wood::Other" +44129200,plywood veneered panels and similar laminated wood >> other with at least one outer ply of wood +44129210,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of nonconiferous wood::Decorative plywood" +44129220,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of nonconiferous wood::Tea chest panel or shooks, packed in sets" +44129230,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of nonconiferous wood::Marine and aircraft plywood" +44129240,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of nonconiferous wood::Cuttings and trimmings of plywood of which not exceeding 5cm" +44129290,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other, with at least one outer ply of nonconiferous wood::Other" +44129400,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD::Blockboard, laminboard and battenboard(OLD tariff)" +44129900,plywood veneered panels and similar laminated wood >> other with at least one outer ply of coniferous wood +44129910,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Decorative plywood" +44129920,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Tea chest panel or shooks, packed in sets" +44129930,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Marine and aircraft plywood" +44129940,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Cuttings and trimmings of plywood of width not exceeding 5cm" +44129990,"PLYWOOD, VENEERED PANELS AND SIMILAR LAMINATED WOOD:Other:Other" +44130000,"::DENSIFIED WOOD, IN BLOCKS, PLATES, STRIPS, OR PROFILE SHAPES" +44140000,"::WOODEN FRAMES FOR PAINTINGS, PHOTOGRAPHS, MIRRORS OR SIMILAR OBJECTS" +44141000,"WOODEN FRAMES FOR PAINTINGS, PHOTOGRAPHS, MIRRORS OR SIMILAR OBJECTS::Of trophical wood(OLD tariff)" +44149000,"WOODEN FRAMES FOR PAINTINGS, PHOTOGRAPHS, MIRRORS OR SIMILAR OBJECTS::Other(OLD tariff)" +44150000,packing cases boxes crates drums and similar packings of wood of wood pallets box pallets and other load boards of wood pallet collars of wood +44151000,"PACKING CASES, BOXES, CRATES, DRUMS AND SIMILAR PACKINGS, OF WOOD; CABLE-DRUMS OF WOOD; PALLETS, BOX PALLETS AND OTHER LOAD BOARDS, OF WOOD; PALLET COLLARS OF WOOD::Cases, boxes, crates, drums and similar packings; cable-drums" +44152000,"PACKING CASES, BOXES, CRATES, DRUMS AND SIMILAR PACKINGS, OF WOOD; CABLE-DRUMS OF WOOD; PALLETS, BOX PALLETS AND OTHER LOAD BOARDS, OF WOOD; PALLET COLLARS OF WOOD::Pallets, box pallets and other load boards; pallet collars" +44160000,casks barrels vats tubs and other coopers products and parts thereof of including staves wood casks barrels vats tubs and other coopers products and parts thereof of wood including staves +44160010,"CASKS, BARRELS, VATS, TUBS AND OTHER COOPERS PRODUCTS AND PARTS THEREOF, OF WOOD, INCLUDING STAVES:Casks, barrels, vats, tubs and other cooper's products and parts thereof, of wood, including staves:Casks, barrels, vats and tubs" +44160020,"CASKS, BARRELS, VATS, TUBS AND OTHER COOPERS PRODUCTS AND PARTS THEREOF, OF WOOD, INCLUDING STAVES:Casks, barrels, vats, tubs and other cooper's products and parts thereof, of wood, including staves:Other cooper's products" +44160091,"CASKS, BARRELS, VATS, TUBS AND OTHER COOPERS PRODUCTS AND PARTS THEREOF, OF WOOD, INCLUDING STAVES:Casks, barrels, vats, tubs and other cooper's products and parts thereof, of wood, including staves:Riven or sawn staves of wood not further prepared" +44160099,"CASKS, BARRELS, VATS, TUBS AND OTHER COOPERS PRODUCTS AND PARTS THEREOF, OF WOOD, INCLUDING STAVES:Casks, barrels, vats, tubs and other cooper's products and parts thereof, of wood, including staves:Other" +44170000,"::TOOLS, TOOL BODIES, TOOL HANDLES, BROOM OR BRUSH BODIES AND HANDLES, OF WOOD; BOOT OR SHOE LASTS AND TREES, OF WOOD" +44180000,builders joinery and carpentry of wood including cellular wood panels assembled flooring panels shingles and shakes windows and their frames +44181000,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Windows, french-windows and their frames(OLD tariff)" +44181100,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Of trophical wood" +44181900,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other" +44182010,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Doors and their frames and thresholds:Flush doors(OLD tariff)" +44182020,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Doors and their frames and thresholds:Frames and thresholds of flush doors(OLD tariff)" +44182090,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Doors and their frames and thresholds:Other(OLD tariff)" +44182100,builders joinery and carpentry of wood including cellular wood panels assembled flooring panels shingles and shakes windows and their frames >> of tropical wood +44182110,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Doors and their frames and thresholds Of tropical wood::Flush doors" +44182120,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Doors and their frames and thresholds Of tropical wood::Frames and thresholds of flush doors" +44182190,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Doors and their frames and thresholds Of tropical wood::Other" +44182900,builders joinery and carpentry of wood including cellular wood panels assembled flooring panels shingles and shakes windows and their frames >> other +44182910,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Other:Flush doors" +44182920,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Other:Frames and thresholds of flush doors" +44182990,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES:Other:Other" +44183000,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Posts and beams other than products of sub-headings 4418 81 to 4418 89" +44184000,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Shuttering for concerete constructional work" +44185000,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Shingles and shakes" +44186000,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Posts and beams(OLD tariff)" +44187100,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::For mosaic floors(OLD tariff)" +44187200,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other, multilayer(OLD tariff)" +44187300,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Of bamboo or with at least the top layer (wear layer) of bamboo" +44187400,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other, for mosaic floors" +44187500,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other, multilayer" +44187900,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other" +44188100,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Glue-laminated timber (glulam)" +44188200,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Cross-laminated timber (CLT or X-lam)" +44188300,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::I beams" +44188900,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other" +44189000,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other(OLD tariff)" +44189100,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Of bamboo" +44189200,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Cellular wood panels" +44189900,"BUILDER'S JOINERY AND CARPENTRY OF WOOD, INCLUDING CELLULAR WOOD PANELS, ASSEMBLED FLOORING PANELS, SHINGLES AND SHAKES::Other" +44190000,tableware and kitchenware of wood of bamboo +44190010,"TABLEWARE AND KITCHENWARE, OF WOOD:Tableware and kitchenware, of wood:Tableware(OLD tariff)" +44190020,"TABLEWARE AND KITCHENWARE, OF WOOD:Tableware and kitchenware, of wood:Kitchenware(OLD tariff)" +44191100,"TABLEWARE AND KITCHENWARE, OF WOOD::Bread boards, chopping boards and similar boards" +44191200,"TABLEWARE AND KITCHENWARE, OF WOOD::Chopsticks" +44191900,"TABLEWARE AND KITCHENWARE, OF WOOD::Other" +44192000,"TABLEWARE AND KITCHENWARE, OF WOOD::of tropical wood" +44199000,tableware and kitchenware of wood of bamboo >> other +44199010,"TABLEWARE AND KITCHENWARE, OF WOOD:Other:Bread boards, chopping boards and similar boards" +44199020,"TABLEWARE AND KITCHENWARE, OF WOOD:Other:Chopsticks" +44199090,"TABLEWARE AND KITCHENWARE, OF WOOD:Other:other" +44201000,"WOOD MARQUETRY AND INLAID WOOD; CASKETS AND CASES FOR JEWELLERY OR CUTLERY, AND SIMILAR ARTICLES, OF WOOD; STATUETTES AND OTHER ORNAMENTS, OF WOOD; WOODEN ARTICLES OF FURNITURE NOT FALLING IN::Statuettes and other ornaments, of wood(OLD tariff)" +44201100,"WOOD MARQUETRY AND INLAID WOOD; CASKETS AND CASES FOR JEWELLERY OR CUTLERY, AND SIMILAR ARTICLES, OF WOOD; STATUETTES AND OTHER ORNAMENTS, OF WOOD; WOODEN ARTICLES OF FURNITURE NOT FALLING IN::of tropical wood(OLD tariff)" +44201900,"WOOD MARQUETRY AND INLAID WOOD; CASKETS AND CASES FOR JEWELLERY OR CUTLERY, AND SIMILAR ARTICLES, OF WOOD; STATUETTES AND OTHER ORNAMENTS, OF WOOD; WOODEN ARTICLES OF FURNITURE NOT FALLING IN::Other(OLD tariff)" +44209010,"WOOD MARQUETRY AND INLAID WOOD; CASKETS AND CASES FOR JEWELLERY OR CUTLERY, AND SIMILAR ARTICLES, OF WOOD; STATUETTES AND OTHER ORNAMENTS, OF WOOD; WOODEN ARTICLES OF FURNITURE NOT FALLING IN:Other :Wood marquetry and inlaid wood(OLD tariff)" +44209090,"WOOD MARQUETRY AND INLAID WOOD; CASKETS AND CASES FOR JEWELLERY OR CUTLERY, AND SIMILAR ARTICLES, OF WOOD; STATUETTES AND OTHER ORNAMENTS, OF WOOD; WOODEN ARTICLES OF FURNITURE NOT FALLING IN:Other :Other(OLD tariff)" +44211000,OTHER ARTICLES OF WOOD::Clothes hangers(OLD tariff) +44212000,OTHER ARTICLES OF WOOD::Coffins(OLD tariff) +44219011,OTHER ARTICLES OF WOOD:Other :For cotton machinery(OLD tariff) +44219012,OTHER ARTICLES OF WOOD:Other :For jute machinery(OLD tariff) +44219013,OTHER ARTICLES OF WOOD:Other :For silk regenerated and synthetic fibres machinery(OLD tariff) +44219014,OTHER ARTICLES OF WOOD:Other :For other machinery(OLD tariff) +44219019,OTHER ARTICLES OF WOOD:Other :Other(OLD tariff) +44219020,OTHER ARTICLES OF WOOD:Other :Wood paving blocks(OLD tariff) +44219030,OTHER ARTICLES OF WOOD:Other :Match splints(OLD tariff) +44219040,OTHER ARTICLES OF WOOD:Other :Pencil slates(OLD tariff) +44219050,"OTHER ARTICLES OF WOOD:Other :Parts of wood, namely oars, paddles and rudders for ships, boats and other similar floating structures(OLD tariff)" +44219060,OTHER ARTICLES OF WOOD:Other :Parts of domestic decorative articles used as tableware and kitchenware(OLD tariff) +44219070,OTHER ARTICLES OF WOOD:Other :Articles of densified wood not elsewhere included or specified(OLD tariff) +44219090,OTHER ARTICLES OF WOOD:Other :Other(OLD tariff) +44219111,OTHER ARTICLES OF WOOD:Of Bamboo:For cotton machinery(OLD tariff) +44219112,OTHER ARTICLES OF WOOD:Of Bamboo:FOR JUTE MACHINERY(OLD tariff) +44219113,OTHER ARTICLES OF WOOD:Of Bamboo:FOR SILK REGENERATED AND SYNTHETIC FIBRE MACHINERY(OLD tariff) +44219114,OTHER ARTICLES OF WOOD:Of Bamboo:FOR OTHER MACHINERY(OLD tariff) +44219119,OTHER ARTICLES OF WOOD:Of Bamboo:Other(OLD tariff) +44219120,OTHER ARTICLES OF WOOD:Of Bamboo:Wood Paving Blocks(OLD tariff) +44219130,OTHER ARTICLES OF WOOD:Of Bamboo:Match splints(OLD tariff) +44219140,OTHER ARTICLES OF WOOD:Of Bamboo:Pencil slats(OLD tariff) +44219150,"OTHER ARTICLES OF WOOD:Of Bamboo:Parts of wood, namely oars, paddles and rudders for ships, boats and other similarfloating structures(OLD tariff)" +44219160,OTHER ARTICLES OF WOOD:Of Bamboo:Parts of domestic decorative articles used as tableware and kitchenware(OLD tariff) +44219170,OTHER ARTICLES OF WOOD:Of Bamboo:Articles of densified wood not included or specified elsewhere(OLD tariff) +44219190,OTHER ARTICLES OF WOOD:Of Bamboo:Other(OLD tariff) +44219911,OTHER ARTICLES OF WOOD:Other:For cotton machinery(OLD tariff) +44219912,OTHER ARTICLES OF WOOD:Other:For jute machinery(OLD tariff) +44219913,OTHER ARTICLES OF WOOD:Other:For silk regenerated and synthetic fibre machinery(OLD tariff) +44219914,OTHER ARTICLES OF WOOD:Other:For other machinery(OLD tariff) +44219919,OTHER ARTICLES OF WOOD:Other:other(OLD tariff) +44219920,OTHER ARTICLES OF WOOD:Other:Wood paving blocks(OLD tariff) +44219930,OTHER ARTICLES OF WOOD:Other:Match splints(OLD tariff) +44219940,OTHER ARTICLES OF WOOD:Other:Pencil slats(OLD tariff) +44219950,"OTHER ARTICLES OF WOOD:Other:Parts of wood, namely oars, paddles and rudders for ships, boats and other similar floating structures(OLD tariff)" +44219960,OTHER ARTICLES OF WOOD:Other:Parts of domestic decorative articles used as tableware and kitchenware(OLD tariff) +44219970,OTHER ARTICLES OF WOOD:Other:Articles of densified wood not included or specified elsewhere(OLD tariff) +44219990,OTHER ARTICLES OF WOOD:Other:other(OLD tariff) +44229112,::For jute machinery(OLD tariff) +44239113,::For silk regenerated and synthetic fibre machinery(OLD tariff) +44249114,::For other machinery(OLD tariff) +45010000,natural cork raw or simply prepared waste cork crushed granulated or ground cork +45011000,"NATURAL CORK, RAW OR SIMPLY SDF KSDF JKLSD PREPARED; WASTE CORK; CRUSHED, GRANULATED OR GROUND CORK::Natural cork, raw or simply prepared" +45019000,"NATURAL CORK, RAW OR SIMPLY SDF KSDF JKLSD PREPARED; WASTE CORK; CRUSHED, GRANULATED OR GROUND CORK::Other" +45020000,"::NATURAL CORK, DEBACKED OR ROUGHLY SQUARED, OR IN RECTANGULAR (INCLUDING SQUARE) BLOCKS, PLATES, SHEETS OR STRIP (INCLUDING SHARP-EDGED BLANKS FOR CORKS OR STOPPERS)" +45030000,articles of natural cork +45031000,ARTICLES OF NATURAL CORK::Corks and stoppers +45039000,articles of natural cork >> other +45039010,ARTICLES OF NATURAL CORK:Other :Shuttlecock cork bottom +45039090,ARTICLES OF NATURAL CORK:Other :Other +45040000,agglomerated cork with or without a binding substance and articles of agglomerated cork +45041000,agglomerated cork with or without a binding substance and articles of agglomerated cork >> blocks plates sheets and strip tiles of any shape solid cylinders including discs +45041010,"AGGLOMERATED CORK (WITH OR WITHOUT A BINDING SUBSTANCE) AND ARTICLES OF AGGLOMERATED CORK:Blocks, plates, sheets and strip; tiles of any shape; solid cylinders, including discs :Sheets" +45041020,"AGGLOMERATED CORK (WITH OR WITHOUT A BINDING SUBSTANCE) AND ARTICLES OF AGGLOMERATED CORK:Blocks, plates, sheets and strip; tiles of any shape; solid cylinders, including discs :Slabs" +45041090,"AGGLOMERATED CORK (WITH OR WITHOUT A BINDING SUBSTANCE) AND ARTICLES OF AGGLOMERATED CORK:Blocks, plates, sheets and strip; tiles of any shape; solid cylinders, including discs :Other" +45049000,AGGLOMERATED CORK (WITH OR WITHOUT A BINDING SUBSTANCE) AND ARTICLES OF AGGLOMERATED CORK::Other +46010000,plaits and similar products of plaiting materials whether or not assembled into strips plaiting materials plaits and similar products of plaiting together in materials bound parallel strands or woven in sheet form whether or not being finished articles for example mats matting screens mats matting and screens of vegetable materials parallel strands or woven in sheet form whether or not being finished articles for example mats matting screens mats matting and screens of vegetable materials +46012100,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Of bamboo" +46012200,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Of rattan" +46012900,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Other;" +46019200,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Of bamboo" +46019300,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Of rattan" +46019400,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Of other vegetable materials" +46019900,"PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, WHETHER OR NOT ASSEMBLED INTO STRIPS ; PLAITING MATERIALS, PLAITS AND SIMILAR PRODUCTS OF PLAITING MATERIALS, BOUND TOGETHER IN PARALLEL STRANDS OR WOVEN, IN SHEET FORM, WHETHER OR NOT BEING FINISHED ARTICLES (FOR EXAMPLE, MATS, MATTING, SCREENS)::Other" +46020000,basketwork wickerwork and other articles made directly to shape from plaiting materials or made up from goods of heading 4601 articles of loofah of vegetable materials +46021100,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH::Of bamboo" +46021200,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH::Of rattan" +46021900,basketwork wickerwork and other articles made directly to shape from plaiting materials or made up from goods of heading 4601 articles of loofah of vegetable materials >> other of palm leaves +46021911,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH:Other :Baskets" +46021919,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH:Other :Other" +46021990,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH:Other :Other" +46029000,"BASKETWORK, WICKERWORK AND OTHER ARTICLES, MADE DIRECTLY TO SHAPE FROM PLAITING MATERIALS OR MADE UP FROM GOODS OF HEADING 46 01; ARTICLES OF LOOFAH::Other" +47010000,::MECHANICAL WOOD PULP +47020000,"::CHEMICAL WOOD PULP, DISSOLVING GRADES" +47030000,chemical wood pulp soda or sulphate other than dissolving grades unbleached +47031100,"CHEMICAL WOOD PULP, SODA OR SULPHATE, OTHER THAN DISSOLVING GRADES::Coniferous" +47031900,"CHEMICAL WOOD PULP, SODA OR SULPHATE, OTHER THAN DISSOLVING GRADES::Non-coniferous" +47032100,"CHEMICAL WOOD PULP, SODA OR SULPHATE, OTHER THAN DISSOLVING GRADES::Coniferous" +47032900,"CHEMICAL WOOD PULP, SODA OR SULPHATE, OTHER THAN DISSOLVING GRADES::Non-coniferous" +47040000,chemical wood pulp sulphite other than dissolving grades unbleached +47041100,"CHEMICAL WOOD PULP, SULPHITE, OTHER THAN DISSOLVING GRADES::Coniferous" +47041900,"CHEMICAL WOOD PULP, SULPHITE, OTHER THAN DISSOLVING GRADES::Non-coniferous" +47042100,"CHEMICAL WOOD PULP, SULPHITE, OTHER THAN DISSOLVING GRADES::Coniferous" +47042900,"CHEMICAL WOOD PULP, SULPHITE, OTHER THAN DISSOLVING GRADES::Non-coniferous" +47050000,::WOOD PULP OBTAINED BY A COMBINATION OF MECHANICAL AND CHEMICAL PULPING PROCESSES +47060000,pulps of fibres derived from recovered waste and scrap paper or paperboard or of other fibrous cellulosic material +47061000,PULPS OF FIBRES DERIVED FROM RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD OR OF OTHER FIBROUS CELLULOSIC MATERIAL::Cotton linters pulp +47062000,PULPS OF FIBRES DERIVED FROM RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD OR OF OTHER FIBROUS CELLULOSIC MATERIAL::Pulps of fibres derived from recovered (waste and scrap) paper or paperboard +47063000,"PULPS OF FIBRES DERIVED FROM RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD OR OF OTHER FIBROUS CELLULOSIC MATERIAL::Other, of bamboo" +47069100,PULPS OF FIBRES DERIVED FROM RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD OR OF OTHER FIBROUS CELLULOSIC MATERIAL::Mechanical +47069200,PULPS OF FIBRES DERIVED FROM RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD OR OF OTHER FIBROUS CELLULOSIC MATERIAL::Chemical +47069300,PULPS OF FIBRES DERIVED FROM RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD OR OF OTHER FIBROUS CELLULOSIC MATERIAL:Obtained by a combination of mechanical and chemical processes:Obtained by a combination of mechanical and chemical processess +47070000,recovered waste and scrap paper or paperboard +47071000,RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD::Unbleached kraft paper or paperboard or corrugated paper or paperboard +47072000,"RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD::Other paper or paperboard made mainly of bleached chemical pulp, not coloured in the mass" +47073000,"RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD::Paper or paperboard made mainly of mechanical pulp (for example, newspapers, journals and similar printed matter)" +47079000,"RECOVERED (WASTE AND SCRAP) PAPER OR PAPERBOARD::Other, including unsorted waste and scrap" +48010000,newsprint in rolls or sheets newsprint in rolls or sheets +48010010,"NEWSPRINT, IN ROLLS OR SHEETS:Newsprint, in rolls or sheets :Glazed" +48010090,"NEWSPRINT, IN ROLLS OR SHEETS:Newsprint, in rolls or sheets :Other" +48020000,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard +48021000,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> paper and paperboard +48021010,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Hand-made paper and paperboard :Paper" +48021020,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Hand-made paper and paperboard :Paperboard" +48022000,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> paper and paperboard of a kind used as a base for or electro sensitive paper or paperboard +48022010,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Paper and paperboard of a kind used as a base for photo-sensitive, heat-sensitive or electro-sensitive paper or paperboard :Photographic base paper, uncoated" +48022090,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Paper and paperboard of a kind used as a base for photo-sensitive, heat-sensitive or electro-sensitive paper or paperboard :Other" +48024000,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD::Wall paper base" +48025400,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> weighing less than 40 +48025410,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing less than 40 g/m2 :India Paper" +48025420,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing less than 40 g/m2 :Litho and offset paper" +48025430,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing less than 40 g/m2 :Duplicating paper" +48025440,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing less than 40 g/m2 :Airmail paper" +48025450,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing less than 40 g/m2 :Tissue paper" +48025490,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing less than 40 g/m2 :Others" +48025500,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> weighing 40 or more but not more than 150 in rolls +48025510,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Litho and offset paper" +48025520,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Drawing paper" +48025530,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Duplicating paper" +48025540,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Account book paper" +48025550,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Bank, bond and cheque paper" +48025560,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Currency note paper" +48025570,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Paper for security printing, currency paper, stamp paper" +48025590,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Wieghing 40 g/m2 or more but less than 150 g/m2, in rolls:Other" +48025600,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> weighing 40 or more but not more than 150 in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state +48025610,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Litho and offset paper" +48025620,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Drawing paper" +48025630,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Duplicating paper" +48025640,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Account book paper" +48025650,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Bank, bond and cheque paper" +48025660,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Currency note paper" +48025670,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Paper for security printing, currency paper, stamp paper" +48025690,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing 40 g/m2 or more but not more than 150 g/m2, in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Other" +48025700,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> other weighing 40 or more but not more than 150 +48025710,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Litho and offset paper" +48025720,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Drawing paper" +48025730,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Duplicating paper" +48025740,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Account book paper" +48025750,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Bank, bond and cheque paper" +48025760,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Currency note paper" +48025770,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Paper for security printing, currency paper, stamp paper" +48025790,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other, weighing 40 g/m2 or more but not more than 150 g/m2 :Other" +48025800,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> weighing more than 150 +48025810,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing more than 150 g/m2 :Litho and offset paper" +48025820,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing more than 150 g/m2 :Drawing paper" +48025830,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing more than 150 g/m2 :Duplicating paper" +48025840,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing more than 150 g/m2 :Bank, bond and cheque paper" +48025850,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing more than 150 g/m2 :Paper for security printing, currency paper, stamp paper" +48025890,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Weighing more than 150 g/m2 :Other" +48026100,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> in rolls +48026110,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Drawing paper" +48026120,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Poster paper" +48026130,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Printing paper dyed or marbled in mass" +48026140,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Account book paper" +48026150,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Automatic data processing machine paper" +48026160,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Paper for security printing, currency paper, stamp paper" +48026190,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In rolls :Other" +48026200,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state +48026210,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Drawing paper" +48026220,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Poster paper" +48026230,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Printing paper dyed or marbled in mass" +48026240,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Account book paper" +48026250,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Automatic data processing machine paper" +48026260,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Paper for security printing, currency paper, stamp paper" +48026290,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Other" +48026900,uncoated paper and paperboard of a kind used f or wri ting p rinting or o ther grap hic purposes and punch card and punch tape paper in rolls or rectangular including square sheets of any size other than paper of heading 4801 or 4803 hand made paper and paperboard >> other +48026910,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Drawing paper" +48026920,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Poster paper" +48026930,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Printing paper dyed or marbled in mass" +48026940,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Account book paper" +48026950,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Automatic data processing machine paper" +48026960,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Paper for security printing, currency paper, stamp paper" +48026990,"UNCOATED PAPER AND PAPERBOARD, OF A KIND USED FOR WRITING, PRINTING OR OTHER GRAPHIC PURPOSES, AND NON-PERFORATED PUNCH CARD AND PUNCH TAPE PAPER, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS OF ANY SIZE, OTHER THAN PAPER OF HEADING 4801 OR 4803; HAND-MADE PAPER AND PAPERBOARD:Other :Other" +48030000,toilet or facial tissue stock towel or napkin stock and similar paper of a kind used for household or sanitary purposes cellulose wadding and webs of cellulose fibres whether o r n ot c re p e d c ri nk le d e m b os s e d p erf or ated s olou red su rface decorated or printed in rolls or sheets toilet or facial tissue stock towel or napkin stock and similar paper of a kind used for or not creped crinkled embossed perforated surface coloured or printed in rolls or sheets +48030010,"TOILET OR FACIAL TISSUE STOCK, TOWEL OR NAPKIN STOCK AND SIMILAR PAPER OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, WHETHER OR NOT CREPED, CRINKLED, EMBOSSED, PERFORATED, SURFACE-COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR SHEETS:Toilet or facial tissue stock, towel or napkin stock and similar paper of a kind used for household or sanitary purposes, cellulose wadding and webs of cellulose fibres, whether or not creped, crinkled, embossed, perforated, surface-coloured, surface-decorated or printed, in rolls or sheets :In commercial sizerolls of a width 36 cm and above" +48030090,"TOILET OR FACIAL TISSUE STOCK, TOWEL OR NAPKIN STOCK AND SIMILAR PAPER OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, WHETHER OR NOT CREPED, CRINKLED, EMBOSSED, PERFORATED, SURFACE-COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR SHEETS:Toilet or facial tissue stock, towel or napkin stock and similar paper of a kind used for household or sanitary purposes, cellulose wadding and webs of cellulose fibres, whether or not creped, crinkled, embossed, perforated, surface-coloured, surface-decorated or printed, in rolls or sheets :Other" +48040000,uncoated kraft paper and paperboard in rolls or sheets other than that of heading 4802 or 4803 kraftliner +48041100,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Unbleached" +48041900,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Other" +48042100,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Unbleached" +48042900,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Other" +48043100,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Unbleached" +48043900,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Other" +48044100,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Unbleached" +48044200,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Bleached uniformly throughout the mass and of which more than 95% by weight of the total fibre content consists of wood fibres obtained by a chemical process" +48044900,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Other" +48045100,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Unbleached" +48045200,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Bleached uniformly throughout the mass and of which more than 95% by weight of the total fibre content consists of wood fibres obtained by a chemical process" +48045900,"UNCOATED KRAFT PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, OTHER THAN THAT OF HEADING 4802 OR 4803::Other" +48050000,other uncoated paper and paperboard in rolls or sheets not further worked or processed than as specified in note 3 to this chapter fluting paper +48051100,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Semi-chemical fluting paper" +48051200,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Straw fluting paper" +48051900,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Other" +48052400,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Weighing 150 g/m 2 or less" +48052500,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Weighing more than 150 g/m 2 " +48053000,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Sulphite wrapping paper(OLD tariff)" +48054000,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Filter paper and paperboard" +48055000,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Felt paper and paperboard" +48059100,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Weighing 150 g/m 2 or less" +48059200,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Weighing more than 150 g/m 2 but less than 225 g/m 2" +48059300,"OTHER UNCOATED PAPER AND PAPERBOARD, IN ROLLS OR SHEETS, NOT FURTHER WORKED OR PROCESSED THAN AS SPECIFIED IN NOTE 3 TO THIS CHAPTER::Weighing 225 g/m 2 or more" +48060000,vegetable parchment greaseproof papers tracing papers and glassine and other glazed transparent or translucent papers in rolls or sheets +48061000,"VEGETABLE PARCHMENT, GREASEPROOF PAPERS, TRACING PAPERS AND GLASSINE AND OTHER GLAZED TRANSPARENT OR TRANSLUCENT PAPERS, IN ROLLS OR SHEETS::Vegetable parchment" +48062000,"VEGETABLE PARCHMENT, GREASEPROOF PAPERS, TRACING PAPERS AND GLASSINE AND OTHER GLAZED TRANSPARENT OR TRANSLUCENT PAPERS, IN ROLLS OR SHEETS::Greaseproof papers" +48063000,"VEGETABLE PARCHMENT, GREASEPROOF PAPERS, TRACING PAPERS AND GLASSINE AND OTHER GLAZED TRANSPARENT OR TRANSLUCENT PAPERS, IN ROLLS OR SHEETS::Tracing papers" +48064000,vegetable parchment greaseproof papers tracing papers and glassine and other glazed transparent or translucent papers in rolls or sheets >> glassine and other glazed transparent or translucent papers +48064010,"VEGETABLE PARCHMENT, GREASEPROOF PAPERS, TRACING PAPERS AND GLASSINE AND OTHER GLAZED TRANSPARENT OR TRANSLUCENT PAPERS, IN ROLLS OR SHEETS:Glassine and other glazed transparent or translucent papers :Glassine papers" +48064090,"VEGETABLE PARCHMENT, GREASEPROOF PAPERS, TRACING PAPERS AND GLASSINE AND OTHER GLAZED TRANSPARENT OR TRANSLUCENT PAPERS, IN ROLLS OR SHEETS:Glassine and other glazed transparent or translucent papers :Other" +48070000,composite paper and paperboard made by sticking flat layers of paper or paperboard together with an adhesive not surface coated or impregnated whether or not internally reinforced in rolls or sheets composite paper and paperboard made by sticking flat layers of paper or paperboard together with an adhesive not or impregnated whether or not internally reinforced in rolls or sheets +48070010,"COMPOSITE PAPER AND PAPERBOARD (MADE BY STICKING FLAT LAYERS OF PAPER OR PAPERBOARD TOGETHER WITH AN ADHESIVE), NOT SURFACE-COATED OR IMPREGNATED, WHETHER OR NOT INTERNALLY REINFORCED, IN ROLLS OR SHEETS:Composite paper and paperboard (made by sticking flat layers of paper or paperboard together with an adhesive), not surface-coated or impregnated, whether or not internally reinforced, in rolls or sheets :Straw paper and other straw board, whether or not covered with paper other than straw paper" +48070090,"COMPOSITE PAPER AND PAPERBOARD (MADE BY STICKING FLAT LAYERS OF PAPER OR PAPERBOARD TOGETHER WITH AN ADHESIVE), NOT SURFACE-COATED OR IMPREGNATED, WHETHER OR NOT INTERNALLY REINFORCED, IN ROLLS OR SHEETS:Composite paper and paperboard (made by sticking flat layers of paper or paperboard together with an adhesive), not surface-coated or impregnated, whether or not internally reinforced, in rolls or sheets :Other" +48080000,paper and paperboard corrugated with or without glued flat surface sheets creped crinkled embossed or perforated in rolls or sheets other than paper of the kind described in heading 4803 +48081000,"PAPER AND PAPERBOARD, CORRUGATED (WITH OR WITHOUT GLUED FLAT SURFACE SHEETS), CREPED, CRINKLED, EMBOSSED OR PERFORATED, IN ROLLS OR SHEETS, OTHER THAN PAPER OF THE KIND DESCRIBED IN HEADING 4803:Kraft paper, creped or crinkled, whether or not embossed or Perforated:Corrugated paper and paperboard, whether or not perforated" +48084000,paper and paperboard corrugated with or without glued flat surface sheets creped crinkled embossed or perforated in rolls or sheets other than paper of the kind described in heading 4803 >> kraft paper creped or crinkled whether or not embossed or perforated +48084010,"PAPER AND PAPERBOARD, CORRUGATED (WITH OR WITHOUT GLUED FLAT SURFACE SHEETS), CREPED, CRINKLED, EMBOSSED OR PERFORATED, IN ROLLS OR SHEETS, OTHER THAN PAPER OF THE KIND DESCRIBED IN HEADING 4803:Kraft paper, creped or crinkled, whether or not embossed or perforated:Sack kraft paper, creped or crinkled, whether or not embossed or perforated" +48084090,"PAPER AND PAPERBOARD, CORRUGATED (WITH OR WITHOUT GLUED FLAT SURFACE SHEETS), CREPED, CRINKLED, EMBOSSED OR PERFORATED, IN ROLLS OR SHEETS, OTHER THAN PAPER OF THE KIND DESCRIBED IN HEADING 4803:Kraft paper, creped or crinkled, whether or not embossed or perforated:Other Kraft paper, creped or crinkled, whether or nor embossed or perforated" +48089000,"PAPER AND PAPERBOARD, CORRUGATED (WITH OR WITHOUT GLUED FLAT SURFACE SHEETS), CREPED, CRINKLED, EMBOSSED OR PERFORATED, IN ROLLS OR SHEETS, OTHER THAN PAPER OF THE KIND DESCRIBED IN HEADING 4803::Other" +48090000,carbon paper paper and other copying or transfer papers including coated or impregnated paper for duplicator stencils or offset plates whether or not printed in rolls or sheets +48092000,"CARBON PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (INCLUDING COATED OR IMPREGNATED PAPER FOR DUPLICATOR STENCILS OR OFFSET PLATES), WHETHER OR NOT PRINTED, IN ROLLS OR SHEETS::Self-copy paper" +48099000,"CARBON PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (INCLUDING COATED OR IMPREGNATED PAPER FOR DUPLICATOR STENCILS OR OFFSET PLATES), WHETHER OR NOT PRINTED, IN ROLLS OR SHEETS::Other" +48100000,paper and paperboard coated on one or both sides with kaolin china clay or other inorganic substances with or without a binder and with no other coating whether or not surface coloured or printed in rolls or rectangular including square sheets of any size paper and paperboard of a kind used for writing printing or other graphic purposes not containing fibres obtained by a mechanical or process or of which not more than 10 by weight of the total fibre content consists of such fibres +48101300,paper and paperboard coated on one or both sides with kaolin china clay or other inorganic substances with or without a binder and with no other coating whether or not surface coloured or printed in rolls or rectangular including square sheets of any size paper and paperboard of a kind used for writing printing or other graphic purposes not containing fibres obtained by a mechanical or process or of which not more than 10 by weight of the total fibre content consists of such fibres >> in rolls +48101310,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In rolls :Imitation art paper" +48101320,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In rolls :Art paper" +48101330,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In rolls :Chrome paper or paperboard" +48101390,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In rolls :Other" +48101400,paper and paperboard coated on one or both sides with kaolin china clay or other inorganic substances with or without a binder and with no other coating whether or not surface coloured or printed in rolls or rectangular including square sheets of any size paper and paperboard of a kind used for writing printing or other graphic purposes not containing fibres obtained by a mechanical or process or of which not more than 10 by weight of the total fibre content consists of such fibres >> in sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state +48101410,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Imitation art paper" +48101420,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Art paper" +48101430,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Chrome paper or paperboard" +48101490,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:In sheets with one side not exceeding 435 mm and the other side not exceeding 297 mm in the unfolded state :Other" +48101900,paper and paperboard coated on one or both sides with kaolin china clay or other inorganic substances with or without a binder and with no other coating whether or not surface coloured or printed in rolls or rectangular including square sheets of any size paper and paperboard of a kind used for writing printing or other graphic purposes not containing fibres obtained by a mechanical or process or of which not more than 10 by weight of the total fibre content consists of such fibres >> other +48101910,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Imitation art paper" +48101920,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Art paper" +48101930,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Chrome paper or paperboard" +48101990,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Other" +48102200,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE::Light-weight coated paper" +48102900,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE::Other" +48103100,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE::Bleached uniformly throughout the mass and of which more than 95% by weight of the total fibre content consists of wood fibres obtained by a chemical process, and weighing 150 g/m2 or less" +48103200,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE::Bleached uniformly throughout the mass and of which more than 95% by weight of the total fibre content consists of wood fibres obtained by a chemical process, and weighing more than 150 g/m2" +48103900,paper and paperboard coated on one or both sides with kaolin china clay or other inorganic substances with or without a binder and with no other coating whether or not surface coloured or printed in rolls or rectangular including square sheets of any size paper and paperboard of a kind used for writing printing or other graphic purposes not containing fibres obtained by a mechanical or process or of which not more than 10 by weight of the total fibre content consists of such fibres >> other +48103910,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Insulating paper" +48103920,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Electric insulating press board" +48103930,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Insulation boards (homogenous)" +48103990,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE:Other :Other" +48109200,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE::Multi-ply" +48109900,"PAPER AND PAPERBOARD, COATED ON ONE OR BOTH SIDES WITH KAOLIN (CHINA CLAY) OR OTHER INORGANIC SUBSTANCES, WITH OR WITHOUT A BINDER, AND WITH NO OTHER COATING, WHETHER OR NOT SURFACE - COLOURED, SURFACEDECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE::Other" +48111000,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810::Tarred, bituminised or asphalted paper and paperboard(OLD tariff)" +48114100,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810::Self-adhesive(OLD tariff)" +48114900,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810::Other(OLD tariff)" +48115110,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Bleached weighing more than 150 g/sqm:Aseptic packaging paper(OLD tariff)" +48115190,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Bleached weighing more than 150 g/sqm:Other(OLD tariff)" +48115910,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other:Aceptic packing paper(OLD tariff)" +48115990,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other:Others(OLD tariff)" +48116000,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810::Paper and paperboard, coated, impregnated or covered with wax, paraffin wax, stearine, oil or glycerol(OLD tariff)" +48119011,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Handmade paper and paperboard, rules, lined or squared but not otherwise printed(OLD tariff)" +48119012,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Chromo and art paper, coated(OLD tariff)" +48119013,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Building board of paper or pulp, impregnated(OLD tariff)" +48119014,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Chromo board(OLD tariff)" +48119015,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Raw base paper for sensitising, coated(OLD tariff)" +48119016,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Surface marbled paper(OLD tariff)" +48119017,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Leather board and imitation leather board(OLD tariff)" +48119018,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Matrix board(OLD tariff)" +48119091,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Grape guard paper(OLD tariff)" +48119093,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Thermal paper for fax machines(OLD tariff)" +48119094,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Thermal paper in jumbo rolls (size 1 mt and above in width and 5,000 mt and above in length)(OLD tariff)" +48119099,"PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBRES, COATED, IMPREGNATED, COVERED, SURFACE-COLOURED, SURFACE-DECORATED OR PRINTED, IN ROLLS OR RECTANGULAR (INCLUDING SQUARE) SHEETS, OF ANY SIZE, OTHER THAN GOODS OF THE KIND DESCRIBED IN HEADING 4803, 4809 OR 4810:Other paper, paperboard, cellulose wadding and webs of cellulose fibres :Other(OLD tariff)" +48120000,"::FILTER BLOCKS, SLABS AND PLATES, OF PAPER PULP" +48130000,cigarette paper whether or not cut to size or in the form of booklets or tubes +48131000,"CIGARETTE PAPER, WHETHER OR NOT CUT TO SIZE OR IN THE FORM OF BOOKLETS OR TUBES::In the form of booklets or tubes" +48132000,"CIGARETTE PAPER, WHETHER OR NOT CUT TO SIZE OR IN THE FORM OF BOOKLETS OR TUBES::In rolls of a width not exceeding 5 cm" +48139000,cigarette paper whether or not cut to size or in the form of booklets or tubes >> other +48139010,"CIGARETTE PAPER, WHETHER OR NOT CUT TO SIZE OR IN THE FORM OF BOOKLETS OR TUBES:Other :Cigarette paper in bulk, or in sheets(OLD tariff)" +48139090,"CIGARETTE PAPER, WHETHER OR NOT CUT TO SIZE OR IN THE FORM OF BOOKLETS OR TUBES:Other :Other" +48140000,wallpaper and similar wall coverings window transparencies of paper +48142000,"WALLPAPER AND SIMILAR WALL COVERINGS; WINDOW TRANSPARENCIES OF PAPER::Wallpaper and similar wall coverings, consisting of paper coated or covered, on the face side, with a grained, embossed, coloured, design-printed or otherwise decorated layer of plastics" +48149000,WALLPAPER AND SIMILAR WALL COVERINGS; WINDOW TRANSPARENCIES OF PAPER::Other +48160000,paper and other copying or transfer papers other than those of heading 4809 duplicator stencils and offset plates of paper whether or not put up in boxes +48162000,paper and other copying or transfer papers other than those of heading 4809 duplicator stencils and offset plates of paper whether or not put up in boxes >> paper +48162010,"CARBON-PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (OTHER THAN THOSE OF HEADING 4809), DUPLICATOR STENCILS AND OFFSET PLATES, OF PAPER, WHETHER OR NOT PUT UP IN BOXES:Self-copy paper :Duplicating paper, cut to size" +48162020,"CARBON-PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (OTHER THAN THOSE OF HEADING 4809), DUPLICATOR STENCILS AND OFFSET PLATES, OF PAPER, WHETHER OR NOT PUT UP IN BOXES:Self-copy paper :Paper for fax machine" +48162090,"CARBON-PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (OTHER THAN THOSE OF HEADING 4809), DUPLICATOR STENCILS AND OFFSET PLATES, OF PAPER, WHETHER OR NOT PUT UP IN BOXES:Self-copy paper :Other" +48169000,paper and other copying or transfer papers other than those of heading 4809 duplicator stencils and offset plates of paper whether or not put up in boxes >> other +48169010,"CARBON-PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (OTHER THAN THOSE OF HEADING 4809), DUPLICATOR STENCILS AND OFFSET PLATES, OF PAPER, WHETHER OR NOT PUT UP IN BOXES:Other :Other copying or transfer papers" +48169020,"CARBON-PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (OTHER THAN THOSE OF HEADING 4809), DUPLICATOR STENCILS AND OFFSET PLATES, OF PAPER, WHETHER OR NOT PUT UP IN BOXES:Other :Calculating machine paper in rolls and strips not exceeding 15 cm in width" +48169090,"CARBON-PAPER, SELF-COPY PAPER AND OTHER COPYING OR TRANSFER PAPERS (OTHER THAN THOSE OF HEADING 4809), DUPLICATOR STENCILS AND OFFSET PLATES, OF PAPER, WHETHER OR NOT PUT UP IN BOXES:Other :Other" +48170000,envelopes letter cards plain postcards and c or re s p on d enc e c ar ds o f pa pe r o r paperboard boxes pouches wallets and writing compendiums of paper or paperboard containing an assortment of paper stationery +48171000,"ENVELOPES, LETTER CARDS, PLAIN POSTCARDS AND CORRESPONDENCE CARDS, OF PAPER OR PAPERBOARD; BOXES, POUCHES, WALLETS AND WRITING COMPENDIUMS, OF PAPER OR PAPERBOARD, CONTAINING AN ASSORTMENT OF PAPER STATIONERY::Envelopes" +48172000,"ENVELOPES, LETTER CARDS, PLAIN POSTCARDS AND CORRESPONDENCE CARDS, OF PAPER OR PAPERBOARD; BOXES, POUCHES, WALLETS AND WRITING COMPENDIUMS, OF PAPER OR PAPERBOARD, CONTAINING AN ASSORTMENT OF PAPER STATIONERY::Letter cards, plain postcards and correspondence cards" +48173000,envelopes letter cards plain postcards and c or re s p on d enc e c ar ds o f pa pe r o r paperboard boxes pouches wallets and writing compendiums of paper or paperboard containing an assortment of paper stationery >> boxes pouches wallets and writing compendiums of paper or paperboard containing an assortment of paper stationery +48173010,"ENVELOPES, LETTER CARDS, PLAIN POSTCARDS AND CORRESPONDENCE CARDS, OF PAPER OR PAPERBOARD; BOXES, POUCHES, WALLETS AND WRITING COMPENDIUMS, OF PAPER OR PAPERBOARD, CONTAINING AN ASSORTMENT OF PAPER STATIONERY:Boxes, pouches, wallets and writing compendiums, of paper or paperboard, containing an assortment of paper stationery:Writing blocks" +48173090,"ENVELOPES, LETTER CARDS, PLAIN POSTCARDS AND CORRESPONDENCE CARDS, OF PAPER OR PAPERBOARD; BOXES, POUCHES, WALLETS AND WRITING COMPENDIUMS, OF PAPER OR PAPERBOARD, CONTAINING AN ASSORTMENT OF PAPER STATIONERY:Boxes, pouches, wallets and writing compendiums, of paper or paperboard, containing an assortment of paper stationery:Other" +48180000,toilet paper and similar paper cellulose wadding or webs of cellulose fibres of a kind used for household or sanitary purposes in rolls of a width not exceeding 36 cm or cut to size or shape handkerchiefs cleansing tissues towels table cloths serviettes bed sheets and similar household sanitary or hospital articles articles of apparel and clothing accessories of paper pulp paper cellulose wadding or webs of cellulose fibres +48181000,"TOILET PAPER AND SIMILAR PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES, OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, IN ROLLS OF A WIDTH NOT EXCEEDING 36 CM, OR CUT TO SIZE OR SHAPE; HANDKERCHIEFS, CLEANSING TISSUES, TOWELS, TABLE CLOTHS, SERVIETTES, NAPKINS FOR BABIES, TAMPONS, BED SHEETS AND SIMILAR HOUSEHOLD, SANITARY OR HOSPITAL ARTICLES, ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF PAPER PULP, PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES::Toilet paper" +48182000,"TOILET PAPER AND SIMILAR PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES, OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, IN ROLLS OF A WIDTH NOT EXCEEDING 36 CM, OR CUT TO SIZE OR SHAPE; HANDKERCHIEFS, CLEANSING TISSUES, TOWELS, TABLE CLOTHS, SERVIETTES, NAPKINS FOR BABIES, TAMPONS, BED SHEETS AND SIMILAR HOUSEHOLD, SANITARY OR HOSPITAL ARTICLES, ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF PAPER PULP, PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES::Handkerchiefs, cleansing or facial tissues and towels" +48183000,"TOILET PAPER AND SIMILAR PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES, OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, IN ROLLS OF A WIDTH NOT EXCEEDING 36 CM, OR CUT TO SIZE OR SHAPE; HANDKERCHIEFS, CLEANSING TISSUES, TOWELS, TABLE CLOTHS, SERVIETTES, NAPKINS FOR BABIES, TAMPONS, BED SHEETS AND SIMILAR HOUSEHOLD, SANITARY OR HOSPITAL ARTICLES, ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF PAPER PULP, PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES::Tablecloths and serviettes(OLD tariff)" +48185000,"TOILET PAPER AND SIMILAR PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES, OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, IN ROLLS OF A WIDTH NOT EXCEEDING 36 CM, OR CUT TO SIZE OR SHAPE; HANDKERCHIEFS, CLEANSING TISSUES, TOWELS, TABLE CLOTHS, SERVIETTES, NAPKINS FOR BABIES, TAMPONS, BED SHEETS AND SIMILAR HOUSEHOLD, SANITARY OR HOSPITAL ARTICLES, ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF PAPER PULP, PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES::Articles of apparel and clothing accessories" +48189000,"TOILET PAPER AND SIMILAR PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES, OF A KIND USED FOR HOUSEHOLD OR SANITARY PURPOSES, IN ROLLS OF A WIDTH NOT EXCEEDING 36 CM, OR CUT TO SIZE OR SHAPE; HANDKERCHIEFS, CLEANSING TISSUES, TOWELS, TABLE CLOTHS, SERVIETTES, NAPKINS FOR BABIES, TAMPONS, BED SHEETS AND SIMILAR HOUSEHOLD, SANITARY OR HOSPITAL ARTICLES, ARTICLES OF APPAREL AND CLOTHING ACCESSORIES, OF PAPER PULP, PAPER, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES::Other" +48190000,cartons boxes cases bags and other packing containers of paper paperboard cellulose wadding or webs of cellulose fibres box files letter trays and similar articles of paper or paperboard of a kind used in offices shops or the like +48191000,cartons boxes cases bags and other packing containers of paper paperboard cellulose wadding or webs of cellulose fibres box files letter trays and similar articles of paper or paperboard of a kind used in offices shops or the like >> cartons boxes and cases of corrugated paper or paperboard +48191010,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Cartons, boxes and cases, of corrugated paper or paperboard :Boxes" +48191090,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Cartons, boxes and cases, of corrugated paper or paperboard :Other" +48192000,cartons boxes cases bags and other packing containers of paper paperboard cellulose wadding or webs of cellulose fibres box files letter trays and similar articles of paper or paperboard of a kind used in offices shops or the like >> folding cartons boxes and cases of non corrugated paper and paperboard +48192010,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Folding cartons, boxes and cases, of non-corrugated paper and paperboard :Cartons, boxes, cases, intended for the packing of match sticks" +48192020,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Folding cartons, boxes and cases, of non-corrugated paper and paperboard :Boxes(OLD tariff)" +48192090,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Folding cartons, boxes and cases, of non-corrugated paper and paperboard :Other(OLD tariff)" +48193000,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE::Sacks and bags, having a base of a width of 40 cm or more(OLD tariff)" +48194000,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE::Other sacks and bags, including cones(OLD tariff)" +48195010,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Other packing containers, including record sleeves :Made of corrugated paper or paperboard(OLD tariff)" +48195090,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE:Other packing containers, including record sleeves :Other(OLD tariff)" +48196000,"CARTONS, BOXES, CASES, BAGS AND OTHER PACKING CONTAINERS, OF PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBRES; BOX FILES, LETTER TRAYS, AND SIMILAR ARTICLES, OF PAPER OR PAPERBOARD OF A KIND USED IN OFFICES, SHOPS OR THE LIKE::Box files, letter trays, storage boxes and similar articles, of a kind used in offices, shops or the like(OLD tariff)" +48201010,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD:Registers, account books, note books, order books, receipt books, letter pads, memorandum pads, diaries and similar articles :Registers, account books(OLD tariff)" +48201020,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD:Registers, account books, note books, order books, receipt books, letter pads, memorandum pads, diaries and similar articles :Letter pads(OLD tariff)" +48201090,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD:Registers, account books, note books, order books, receipt books, letter pads, memorandum pads, diaries and similar articles :Other(OLD tariff)" +48202000,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD::Exercise books(OLD tariff)" +48203000,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD::Binders (other than book covers), folders and file covers(OLD tariff)" +48204000,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD::Manifold business forms and interleaved carbon sets(OLD tariff)" +48205000,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD::Albums for samples or for collections(OLD tariff)" +48209010,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD:Other :Blotting papers cut to size(OLD tariff)" +48209090,"REGISTERS, ACCOUNT BOOKS, NOTE BOOKS, ORDER BOOKS, RECEIPT BOOKS, LETTER PADS, MEMORANDUM PADS, DIARIES AND SIMILAR ARTICLES, EXERCISE BOOKS, BLOTTINGPADS, BINDERS (LOOSE-LEAF OR OTHER), FOLDERS, FILE COVERS, MANIFOLD BUSINESS FORMS, INTERLEAVED CARBON SETS AND OTHER ARTICLES OF STATIONERY, OF PAPER OR PAPERBOARD; ALBUMS FOR SAMPLES OR FOR COLLECTIONS AND BOOK COVERS, OF PAPER OR PAPERBOARD:Other :Other(OLD tariff)" +48210000,paper or paperboard labels of all kinds whether or not printed +48211000,paper or paperboard labels of all kinds whether or not printed >> printed +48211010,"PAPER OR PAPERBOARD LABELS OF ALL KINDS, WHETHER OR NOT PRINTED:Printed :Paper tags" +48211020,"PAPER OR PAPERBOARD LABELS OF ALL KINDS, WHETHER OR NOT PRINTED:Printed :Labels" +48211090,"PAPER OR PAPERBOARD LABELS OF ALL KINDS, WHETHER OR NOT PRINTED:Printed :Other" +48219000,paper or paperboard labels of all kinds whether or not printed >> other +48219010,"PAPER OR PAPERBOARD LABELS OF ALL KINDS, WHETHER OR NOT PRINTED:Other :Labels" +48219090,"PAPER OR PAPERBOARD LABELS OF ALL KINDS, WHETHER OR NOT PRINTED:Other :Other" +48220000,bobbins spools cops and similar supports of paper pulp paper or paperboard whether or not perforated or hardened +48221000,"BOBBINS, SPOOLS, COPS AND SIMILAR SUPPORTS OF PAPER PULP, PAPER OR PAPERBOARD (WHETHER OR NOT PERFORATED OR HARDENED)::Of a kind used for winding textile yarn" +48229000,bobbins spools cops and similar supports of paper pulp paper or paperboard whether or not perforated or hardened >> other +48229010,"BOBBINS, SPOOLS, COPS AND SIMILAR SUPPORTS OF PAPER PULP, PAPER OR PAPERBOARD (WHETHER OR NOT PERFORATED OR HARDENED):Other :Paper tubes" +48229090,"BOBBINS, SPOOLS, COPS AND SIMILAR SUPPORTS OF PAPER PULP, PAPER OR PAPERBOARD (WHETHER OR NOT PERFORATED OR HARDENED):Other :Other" +48230000,other paper paperboard cellulose wadding and webs of cellulose fibers cut to size or shape +48232000,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS::Filter paper and paperboard" +48234000,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS::Rolls, sheets and dials, printed for self- recording apparatus" +48236100,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS::Of bamboo" +48236900,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS::Other" +48237000,other paper paperboard cellulose wadding and webs of cellulose fibers cut to size or shape >> moulded or pressed article of paper pulp +48237010,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Moulded or pressed article of paper pulp :Paper pulp moulded trays" +48237020,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Moulded or pressed article of paper pulp :Wood pulp board" +48237030,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Moulded or pressed article of paper pulp :Articles made of paper mache other than" +48237090,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Moulded or pressed article of paper pulp :Other" +48239000,other paper paperboard cellulose wadding and webs of cellulose fibers cut to size or shape >> other braille paper cellulose in sole board or sheet packing and wrapping paper paper for cigarette filter tips paper cone for loud speaker patterns made of papers for leather footwear leather garments and goods patterns made of paper for articles of apparel and clothing accessories products consisting of sheets of paper or paper board impregnated coated or covered with plastics including thermoset resins or mixtures thereof or chemical formulations melamine phenol or urea formaldehyde with or without curing agents or catalysts compressed together in one or more operations decorative laminates +48239011,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Braille paper" +48239012,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Cellulose in sole board or sheet" +48239013,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Packing and wrapping paper" +48239014,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Paper for cigarette filter tips" +48239015,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Paper cone for loud speaker" +48239016,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Patterns made of papers for leather footwear, leather garments and goods" +48239017,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Patterns made of paper for articles of apparel and clothing accessories" +48239018,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Products consisting of sheets of paper or paperboard, impregnated, coated or covered with plastics (including thermoset resins or mixtures thereof or chemical formulations containing melamine, phenol or urea formaldehyde with or without curing agents or catalysts), compressed together in one or more operations" +48239019,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Decorative laminates" +48239021,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Pre-punched cards" +48239022,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Monotype and newstape paper in strips with perforated edges, not exceeding 15cm in width" +48239023,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Typewriting paper cut to size" +48239030,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Plain or embossed seals made of paper, laminated paper or paper gaskets" +48239090,"OTHER PAPER, PAPERBOARD, CELLULOSE WADDING AND WEBS OF CELLULOSE FIBERS, CUT TO SIZE OR SHAPE; OTHER ARTICLES OF PAPERPULP, PAPER, PAPERBOARD, CELLULOSE WADDING OR WEBS OF CELLULOSE FIBERS:Other :Other" +49010000,printed books brochures leaflets and similar printed matter whether or not in single sheets +49011000,printed books brochures leaflets and similar printed matter whether or not in single sheets >> in single sheets whether or not folded +49011010,"PRINTED BOOKS, BROCHURES, LEAFLETS AND SIMILAR PRINTED MATTER, WHETHER OR NOT IN SINGLE SHEETS:In single sheets, whether or not folded :Printed books" +49011020,"PRINTED BOOKS, BROCHURES, LEAFLETS AND SIMILAR PRINTED MATTER, WHETHER OR NOT IN SINGLE SHEETS:In single sheets, whether or not folded :Pamphlets, booklets, brochures, leaflets and similar printed matter" +49019100,"PRINTED BOOKS, BROCHURES, LEAFLETS AND SIMILAR PRINTED MATTER, WHETHER OR NOT IN SINGLE SHEETS::Dictionaries and encyclopaedias, and serial instalments thereof" +49019900,"PRINTED BOOKS, BROCHURES, LEAFLETS AND SIMILAR PRINTED MATTER, WHETHER OR NOT IN SINGLE SHEETS::Other" +49020000,newspapers journals and periodicals whether or not illustrated or containing advertising material +49021000,newspapers journals and periodicals whether or not illustrated or containing advertising material >> appearing at least four times a week +49021010,"NEWSPAPERS, JOURNALS AND PERIODICALS, WHETHER OR NOT I LLUSTRATED OR CONTAINING ADVERTISING MATERIAL:Appearing at least four times a week :Newspapers" +49021020,"NEWSPAPERS, JOURNALS AND PERIODICALS, WHETHER OR NOT I LLUSTRATED OR CONTAINING ADVERTISING MATERIAL:Appearing at least four times a week :Journals and periodicals" +49029000,newspapers journals and periodicals whether or not illustrated or containing advertising material >> other +49029010,"NEWSPAPERS, JOURNALS AND PERIODICALS, WHETHER OR NOT I LLUSTRATED OR CONTAINING ADVERTISING MATERIAL:Other :Newspapers" +49029020,"NEWSPAPERS, JOURNALS AND PERIODICALS, WHETHER OR NOT I LLUSTRATED OR CONTAINING ADVERTISING MATERIAL:Other :Journals and periodicals" +49030000,children picture drawing or colouring books children picture drawing or colouring books +49030010,"CHILDREN'S PICTURE, DRAWING OR COLOURING BOOKS:Children's picture, drawing or colouring books :Picture books" +49030020,"CHILDREN'S PICTURE, DRAWING OR COLOURING BOOKS:Children's picture, drawing or colouring books :Drawing or colouring books" +49040000,"::MUSIC, PRINTED OR IN MANUSCRIPT, WHETHER OR NOT BOUND OR ILLUSTRATED" +49050000,maps and hydrographic or similar charts of all including wall kinds atlases maps topographical plans and globes printed +49051000,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED::Globes(OLD tariff)" +49052000,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED::In book form" +49059000,maps and hydrographic or similar charts of all including wall kinds atlases maps topographical plans and globes printed >> other +49059010,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED:Other:Geographical, hydrological, astronomical maps or chart" +49059020,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED:Other:Globe" +49059090,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED:Other:Other" +49059100,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED::In book form(OLD tariff)" +49059910,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED:Other:Geographical, hydrological, astronomical maps or charts(OLD tariff)" +49059990,"MAPS AND HYDROGRAPHIC OR SIMILAR CHARTS OF ALL KINDS, INCLUDING ATLASES, WALL MAPS, TOPOGRAPHICAL PLANS AND GLOBES, PRINTED:Other:Other(OLD tariff)" +49060000,"::PLANS AND DRAWINGS FOR ARCHITECTURAL, ENGINEERING, INDUSTRIAL, COMMERCIAL, TOPOGRAPHICAL OR SIMILAR PURPOSES, BEING ORIGINALS DRAWN BY HAND; HAND-WRITTEN TEXTS; PHOTOGRAPHIC REPRODUCTIONS ON SENSITISED PAPER AND CARBON COPIES OF THE FOREGOING" +49070000,unused postage revenue or similar stamps of current or new issue in the country in which they have or will have a recognized face value paper bank notes cheque forms stock share or bond certificates and similar documents of title unused postage revenue or similar stamps of current or new issue in the country in which they have or will have a recognized face value paper bank notes cheque forms stock share or bond certificates and similar documents of title +49070010,"UNUSED POSTAGE, REVENUE OR SIMILAR STAMPS OF CURRENT OR NEW ISSUE IN THE COUNTRY IN WHICH THEY HAVE, OR WILL HAVE, A RECOGNIZED FACE VALUE; STAMP-IMPRESSED PAPER; BANK NOTES; CHEQUE FORMS; STOCK, SHARE OR BOND CERTIFICATES AND SIMILAR DOCUMENTS OF TITLE:Unused postage, revenue or similar stamps of current or new issue in the country in which they have, or will have, a recognized face value; stamp-impressed paper; bank notes; cheque forms; stock, share or bond certificates and similar documents of title :Unused postage, revenue or similar stamps" +49070020,"UNUSED POSTAGE, REVENUE OR SIMILAR STAMPS OF CURRENT OR NEW ISSUE IN THE COUNTRY IN WHICH THEY HAVE, OR WILL HAVE, A RECOGNIZED FACE VALUE; STAMP-IMPRESSED PAPER; BANK NOTES; CHEQUE FORMS; STOCK, SHARE OR BOND CERTIFICATES AND SIMILAR DOCUMENTS OF TITLE:Unused postage, revenue or similar stamps of current or new issue in the country in which they have, or will have, a recognized face value; stamp-impressed paper; bank notes; cheque forms; stock, share or bond certificates and similar documents of title :Bank notes" +49070030,"UNUSED POSTAGE, REVENUE OR SIMILAR STAMPS OF CURRENT OR NEW ISSUE IN THE COUNTRY IN WHICH THEY HAVE, OR WILL HAVE, A RECOGNIZED FACE VALUE; STAMP-IMPRESSED PAPER; BANK NOTES; CHEQUE FORMS; STOCK, SHARE OR BOND CERTIFICATES AND SIMILAR DOCUMENTS OF TITLE:Unused postage, revenue or similar stamps of current or new issue in the country in which they have, or will have, a recognized face value; stamp-impressed paper; bank notes; cheque forms; stock, share or bond certificates and similar documents of title :Documents of title conveying the right to use Information Technology software" +49070090,"UNUSED POSTAGE, REVENUE OR SIMILAR STAMPS OF CURRENT OR NEW ISSUE IN THE COUNTRY IN WHICH THEY HAVE, OR WILL HAVE, A RECOGNIZED FACE VALUE; STAMP-IMPRESSED PAPER; BANK NOTES; CHEQUE FORMS; STOCK, SHARE OR BOND CERTIFICATES AND SIMILAR DOCUMENTS OF TITLE:Unused postage, revenue or similar stamps of current or new issue in the country in which they have, or will have, a recognized face value; stamp-impressed paper; bank notes; cheque forms; stock, share or bond certificates and similar documents of title :Other" +49080000,transfers decalcomanias +49081000,"TRANSFERS (DECALCOMANIAS)::Transfers (decalcomanias), vitrifiable" +49089000,TRANSFERS (DECALCOMANIAS)::Other +49090000,printed or illustrated postcards printed cards bearing personal greetings messages or announcements whether or not illustrated with or without envelopes or trimmings printed or illustrated postcards printed cards bearing personal greetings messages or announcements whether or not illustrated with or without envelopes or trimmings +49090010,"PRINTED OR ILLUSTRATED POSTCARDS; PRINTED CARDS BEARING PERSONAL GREETINGS, MESSAGES OR ANNOUNCEMENTS, WHETHER OR NOT ILLUSTRATED, WITH OR WITHOUT ENVELOPES OR TRIMMINGS:Printed or illustrated postcards; printed cards bearing personal greetings, messages or announcements, whether or not illustrated, with or without envelopes or trimmings :GREETING OR WEDDING CARDS" +49090090,"PRINTED OR ILLUSTRATED POSTCARDS; PRINTED CARDS BEARING PERSONAL GREETINGS, MESSAGES OR ANNOUNCEMENTS, WHETHER OR NOT ILLUSTRATED, WITH OR WITHOUT ENVELOPES OR TRIMMINGS:Printed or illustrated postcards; printed cards bearing personal greetings, messages or announcements, whether or not illustrated, with or without envelopes or trimmings :Other" +49100000,calendars of any kind printed including calendar blocks calendars of any kind printed including calendar blocks +49100010,"CALENDARS OF ANY KIND, PRINTED, INCLUDING CALENDAR BLOCKS:Calendars of any kind, printed, including calendar blocks :Advertising calendar" +49100090,"CALENDARS OF ANY KIND, PRINTED, INCLUDING CALENDAR BLOCKS:Calendars of any kind, printed, including calendar blocks :Other" +49110000,other printed matter including printed pictures and photographs +49111000,other printed matter including printed pictures and photographs >> trade advertising material commercial catalogues and the like +49111010,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Trade advertising material, commercial catalogues and the like :Posters, printed" +49111020,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Trade advertising material, commercial catalogues and the like :Commercial catalogues" +49111030,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Trade advertising material, commercial catalogues and the like :Printed inlay cards" +49111090,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Trade advertising material, commercial catalogues and the like :Other" +49119100,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS::Pictures, designs and photographs" +49119900,other printed matter including printed pictures and photographs >> other +49119910,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Other :Hard copy (printed) of computer software" +49119920,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Other :Plan and drawings for architectural engineering, industrial, commercial, topographical or similar purposes reproduced with the aid of computer or any other devices" +49119990,"OTHER PRINTED MATTER, INCLUDING PRINTED PICTURES AND PHOTOGRAPHS:Other :Other" +50010000,::SILK-WORM COCOONS SUITABLE FOR REELING +50020000,raw silk not thrown raw silk not thrown +50020010,RAW SILK (NOT THROWN):Raw silk (not thrown) :Mulberry raw silk +50020020,RAW SILK (NOT THROWN):Raw silk (not thrown) :Mulberry dupion silk +50020030,RAW SILK (NOT THROWN):Raw silk (not thrown) :Non-mulberry silk +50030000,silk waste including cocoons unsuitable for reeling yarn waste and garnetted stock silk waste including cocoons unsuitable for reeling yarn waste and garneted stock +50030010,"SILK WASTE (INCLUDING COCOONS UNSUITABLE FOR REELING, YARN WASTE AND GARNETTED STOCK):Silk waste (including cocoons unsuitable for reeling, yarn waste and garneted stock):Mulberry silk waste" +50030020,"SILK WASTE (INCLUDING COCOONS UNSUITABLE FOR REELING, YARN WASTE AND GARNETTED STOCK):Silk waste (including cocoons unsuitable for reeling, yarn waste and garneted stock):Tussar silk waste" +50030030,"SILK WASTE (INCLUDING COCOONS UNSUITABLE FOR REELING, YARN WASTE AND GARNETTED STOCK):Silk waste (including cocoons unsuitable for reeling, yarn waste and garneted stock):Eri waste" +50030040,"SILK WASTE (INCLUDING COCOONS UNSUITABLE FOR REELING, YARN WASTE AND GARNETTED STOCK):Silk waste (including cocoons unsuitable for reeling, yarn waste and garneted stock):Munga waste" +50030090,"SILK WASTE (INCLUDING COCOONS UNSUITABLE FOR REELING, YARN WASTE AND GARNETTED STOCK):Silk waste (including cocoons unsuitable for reeling, yarn waste and garneted stock):Other" +50040000,silk yarn other than yarn spun from silk waste not put up for retail sale silk yarn other than yarn spun from silk waste not put up for retail sale +50040010,SILK YARN (OTHER THAN YARN SPUN FROM SILK WASTE) NOT PUT UP FOR RETAIL SALE:Silk yarn (other than yarn spun from silk waste) not put up for retail sale :100% mulberry dupion silk yarn +50040090,SILK YARN (OTHER THAN YARN SPUN FROM SILK WASTE) NOT PUT UP FOR RETAIL SALE:Silk yarn (other than yarn spun from silk waste) not put up for retail sale :Other +50050000,yarn spun from silk waste not put up for retail sale yarn spun from silk waste not put up for retail sale containing 85 or more by weight of silk waste +50050011,"YARN SPUN FROM SILK WASTE, NOT PUT UP FOR RETAIL SALE:Yarn spun from silk waste, not put up for retail sale :Other than noil silk" +50050012,"YARN SPUN FROM SILK WASTE, NOT PUT UP FOR RETAIL SALE:Yarn spun from silk waste, not put up for retail sale :From noil silk" +50050021,"YARN SPUN FROM SILK WASTE, NOT PUT UP FOR RETAIL SALE:Yarn spun from silk waste, not put up for retail sale :Other than noil silk" +50050022,"YARN SPUN FROM SILK WASTE, NOT PUT UP FOR RETAIL SALE:Yarn spun from silk waste, not put up for retail sale :From noil silk" +50060000,silk yarn and yarn spun from silk waste put up for retail sale gut silk yarn and yarn spun from silk waste put up for retail sale silk worm gut silk yarn +50060011,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Silk embroidery thread" +50060019,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Other" +50060021,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Silk embroidery thread" +50060029,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Other" +50060031,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Spun from silk waste other than noil silk" +50060032,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Yarn spun from noil silk" +50060033,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Silk embroidery thread" +50060039,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Other" +50060090,"SILK YARN AND YARN SPUN FROM SILK WASTE, PUT UP FOR RETAIL SALE; SILK-WORM GUT:Silk yarn and yarn spun from silk waste, put up for retail sale; silk worm gut :Other" +50070000,woven fabrics of silk or of silk waste +50071000,Woven fabrics of silk or of silk waste::Fabrics of noil silk +50072000,woven fabrics of silk or of silk waste >> other fabrics containing 85 or more by weight of silk or of silk waste other than noil silk +50072010,"Woven fabrics of silk or of silk waste:Other fabrics, containing 85% or more by weight of silk or of silk waste other than noil silk :Sarees" +50072090,"Woven fabrics of silk or of silk waste:Other fabrics, containing 85% or more by weight of silk or of silk waste other than noil silk :Other" +50079000,Woven fabrics of silk or of silk waste:Other:Other fabrics +50079010,Woven fabrics of silk or of silk waste:Other:Of Handloom woven +50079090,Woven fabrics of silk or of silk waste:Other:Other Fabrics +50110000,lard +51010000,wool not carded or combed greasy including wool +51011100,"WOOL, NOT CARDED OR COMBED ::Shorn wool" +51011900,"WOOL, NOT CARDED OR COMBED ::Other" +51012100,"WOOL, NOT CARDED OR COMBED ::Shorn wool" +51012900,"WOOL, NOT CARDED OR COMBED ::Other" +51013000,"WOOL, NOT CARDED OR COMBED ::Carb301301onised" +51020000,fine or coarse animal hair not carded or combed fine animal hair +51021100,fine or coarse animal hair not carded or combed fine animal hair >> of kashmir cashmere goats +51021110,"FINE OR COARSE ANIMAL HAIR, NOT CARDED OR COMBED:Of Kashmir (cashmere) goats :Marine Angora" +51021190,"FINE OR COARSE ANIMAL HAIR, NOT CARDED OR COMBED:Of Kashmir (cashmere) goats :Other" +51021900,fine or coarse animal hair not carded or combed fine animal hair >> other +51021910,"FINE OR COARSE ANIMAL HAIR, NOT CARDED OR COMBED:Other :Marine Angora" +51021990,"FINE OR COARSE ANIMAL HAIR, NOT CARDED OR COMBED:Other :Other" +51022000,fine or coarse animal hair not carded or combed fine animal hair >> coarse animal hair +51022010,"FINE OR COARSE ANIMAL HAIR, NOT CARDED OR COMBED:Coarse animal hair :Goat hair (other than Angora)" +51022090,"FINE OR COARSE ANIMAL HAIR, NOT CARDED OR COMBED:Coarse animal hair :Other (excluding pig and boar bristles)" +51030000,waste of wool or of fine or coarse animal hair including yarn waste but excluding garnetted stock +51031000,waste of wool or of fine or coarse animal hair including yarn waste but excluding garnetted stock >> noils of wool or of fine animal hair +51031010,"WASTE OF WOOL OR OF FINE OR COARSE ANIMAL HAIR, INCLUDING YARN WASTE BUT EXCLUDING GARNETTED STOCK:Noils of wool or of fine animal hair :Noils of wool" +51031090,"WASTE OF WOOL OR OF FINE OR COARSE ANIMAL HAIR, INCLUDING YARN WASTE BUT EXCLUDING GARNETTED STOCK:Noils of wool or of fine animal hair :Other" +51032000,waste of wool or of fine or coarse animal hair including yarn waste but excluding garnetted stock >> other waste of wool or of fine animal hair +51032010,"WASTE OF WOOL OR OF FINE OR COARSE ANIMAL HAIR, INCLUDING YARN WASTE BUT EXCLUDING GARNETTED STOCK:Other waste of wool or of fine animal hair :Waste of sheep's and lamb's wool" +51032020,"WASTE OF WOOL OR OF FINE OR COARSE ANIMAL HAIR, INCLUDING YARN WASTE BUT EXCLUDING GARNETTED STOCK:Other waste of wool or of fine animal hair :Yarn waste" +51032090,"WASTE OF WOOL OR OF FINE OR COARSE ANIMAL HAIR, INCLUDING YARN WASTE BUT EXCLUDING GARNETTED STOCK:Other waste of wool or of fine animal hair :Other" +51033000,"WASTE OF WOOL OR OF FINE OR COARSE ANIMAL HAIR, INCLUDING YARN WASTE BUT EXCLUDING GARNETTED STOCK::Waste of coarse animal hair" +51040000,garnetted stock of wool or of fine or coarse animal hair garnetted stock of wool or of fine or coarse animal hair +51040010,GARNETTED STOCK OF WOOL OR OF FINE OR COARSE ANIMAL HAIR:Garnetted stock of wool or of fine or coarse animal hair :Shoddy wool +51040090,GARNETTED STOCK OF WOOL OR OF FINE OR COARSE ANIMAL HAIR:Garnetted stock of wool or of fine or coarse animal hair :Other +51050000,wool and fine or coarse animal hair carded or combed including combed wool in fragments +51051000,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS)::Carded wool" +51052100,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS)::Combed wool in fragments" +51052900,wool and fine or coarse animal hair carded or combed including combed wool in fragments >> other +51052910,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS):Other :Wool tops" +51052990,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS):Other :Other" +51053100,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS)::Of Kashmir (cashmere) goats" +51053900,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS)::Other" +51054000,"WOOL AND FINE OR COARSE ANIMAL HAIR, CARDED OR COMBED (INCLUDING COMBED WOOL IN FRAGMENTS)::Coarse animal hair, carded or combed" +51060000,yarn of carded wool not put up for retail sale +51061000,yarn of carded wool not put up for retail sale >> containing 85 or more by weight of wool +51061010,"YARN OF CARDED WOOL, NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of wool :Shoddy woollen yarn" +51061020,"YARN OF CARDED WOOL, NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of wool :Munga woollen yarn" +51061090,"YARN OF CARDED WOOL, NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of wool :Other" +51062000,yarn of carded wool not put up for retail sale >> containing less than 85 by weight of wool +51062010,"YARN OF CARDED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Shoddy woollen yarn" +51062020,"YARN OF CARDED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Munga woollen yarn" +51062090,"YARN OF CARDED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Other" +51070000,yarn of combed wool not put up for retail sale +51071000,yarn of combed wool not put up for retail sale >> containing 85 or more by weight of wool +51071010,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:CONTAINING 85% OR MORE BY WEIGHT OF WOOL :Worsted hosiery yarn" +51071020,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:CONTAINING 85% OR MORE BY WEIGHT OF WOOL :Worsted knitted yarn" +51071030,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:CONTAINING 85% OR MORE BY WEIGHT OF WOOL :Worsted weaving yarn" +51071040,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:CONTAINING 85% OR MORE BY WEIGHT OF WOOL :Woollen carpet yarn" +51071090,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:CONTAINING 85% OR MORE BY WEIGHT OF WOOL :Other" +51072000,yarn of combed wool not put up for retail sale >> +51072010,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Worsted hosiery yarn" +51072020,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Worsted knitted yarn" +51072030,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Worsted weaving yarn" +51072040,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Woollen carpet yarn" +51072090,"YARN OF COMBED WOOL, NOT PUT UP FOR RETAIL SALE:Containing less than 85% by weight of wool :Other" +51080000,yarn of fine animal hair carded or combed not put up for retail sale +51081000,"YARN OF FINE ANIMAL HAIR (CARDED OR COMBED), NOT PUT UP FOR RETAIL SALE::Carded" +51082000,"YARN OF FINE ANIMAL HAIR (CARDED OR COMBED), NOT PUT UP FOR RETAIL SALE::Combed" +51090000,yarn of wool or fine animal hair put up for retail sale +51091000,yarn of wool or fine animal hair put up for retail sale >> containing 85 or more by weight of wool or of fine animal hair +51091010,"YARN OF WOOL OR FINE ANIMAL HAIR, PUT UP FOR RETAIL SALE:Containing 85% or more by weight of wool or of fine animal hair :Hoisery wool" +51091090,"YARN OF WOOL OR FINE ANIMAL HAIR, PUT UP FOR RETAIL SALE:Containing 85% or more by weight of wool or of fine animal hair :Other" +51099000,"YARN OF WOOL OR FINE ANIMAL HAIR, PUT UP FOR RETAIL SALE::Other" +51100000,yarn of coarse animal hair or of horse hair including gimped horsehair yarn whether or not put up for retail sale yarn of coarse animal hair or of horse hair including gimped horsehair yarn whether or not put up for retail sale +51100010,"YARN OF COARSE ANIMAL HAIR OR OF HORSE HAIR (INCLUDING GIMPED HORSEHAIR YARN), WHETHER OR NOT PUT UP FOR RETAIL SALE:Yarn of coarse animal hair or of horsehair (including gimped horsehair yarn), whether or not put up for retail sale :Put up for retail sale" +51100020,"YARN OF COARSE ANIMAL HAIR OR OF HORSE HAIR (INCLUDING GIMPED HORSEHAIR YARN), WHETHER OR NOT PUT UP FOR RETAIL SALE:Yarn of coarse animal hair or of horsehair (including gimped horsehair yarn), whether or not put up for retail sale :Other" +51110000,woven fabrics of carded wool or of carded fine animal hair containing 85 or more by weight of wool or of fine animal hair +51111100,woven fabrics of carded wool or of carded fine animal hair containing 85 or more by weight of wool or of fine animal hair >> of a weight not exceeding 300 g m2 +51111110,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Of a weight not exceeding 300 g/ m2 :Unbleached +51111120,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Of a weight not exceeding 300 g/ m2 :Bleached +51111130,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Of a weight not exceeding 300 g/ m2 :Dyed +51111140,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Of a weight not exceeding 300 g/ m2 :Printed +51111190,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Of a weight not exceeding 300 g/ m2 :Other +51111910,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Unbleached(OLD tariff) +51111920,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Bleached(OLD tariff) +51111930,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Dyed(OLD tariff) +51111940,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Printed(OLD tariff) +51111990,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Other(OLD tariff) +51112000,woven fabrics of carded wool or of carded fine animal hair containing 85 or more by weight of wool or of fine animal hair >> other mixed mainly or solely with filaments +51112010,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made filaments :Unbleached" +51112020,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made filaments :Bleached" +51112030,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made filaments :Dyed" +51112040,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made filaments :Printed" +51112090,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made filaments :Other" +51113000,woven fabrics of carded wool or of carded fine animal hair containing 85 or more by weight of wool or of fine animal hair >> other mixed mainly or solely with staple fibres +51113010,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made staple fibres :Unbleached" +51113020,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made staple fibres :Bleached" +51113030,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made staple fibres :Dyed" +51113040,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made staple fibres :Printed" +51113090,"WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other, mixed mainly or solely with man-made staple fibres :Other" +51119010,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Unbleached(OLD tariff) +51119020,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Bleached(OLD tariff) +51119030,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Dyed(OLD tariff) +51119040,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Printed(OLD tariff) +51119090,WOVEN FABRICS OF CARDED WOOL OR OF CARDED FINE ANIMAL HAIR:Other :Other(OLD tariff) +51120000,woven fabrics of combed wool or of combed fine animal hair containing 85 or more by weight of wool or of fine animal hair +51121100,woven fabrics of combed wool or of combed fine animal hair containing 85 or more by weight of wool or of fine animal hair >> of a weight not exceeding 200 +51121110,Woven fabrics of combed wool or of combed fine animal hair:Of a weight not exceeding 200 g/m2 :Unbleached +51121120,Woven fabrics of combed wool or of combed fine animal hair:Of a weight not exceeding 200 g/m2 :Bleached +51121130,Woven fabrics of combed wool or of combed fine animal hair:Of a weight not exceeding 200 g/m2 :Dyed +51121140,Woven fabrics of combed wool or of combed fine animal hair:Of a weight not exceeding 200 g/m2 :Printed +51121190,Woven fabrics of combed wool or of combed fine animal hair:Of a weight not exceeding 200 g/m2 :Other +51121900,woven fabrics of combed wool or of combed fine animal hair containing 85 or more by weight of wool or of fine animal hair >> other +51121910,Woven fabrics of combed wool or of combed fine animal hair:Other :Unbleached +51121920,Woven fabrics of combed wool or of combed fine animal hair:Other :Bleached +51121930,Woven fabrics of combed wool or of combed fine animal hair:Other :Dyed +51121940,Woven fabrics of combed wool or of combed fine animal hair:Other :Printed +51121990,Woven fabrics of combed wool or of combed fine animal hair:Other :Other +51122000,woven fabrics of combed wool or of combed fine animal hair containing 85 or more by weight of wool or of fine animal hair >> other mixedmainly or solelywith +51122010,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made filaments :Unbleached" +51122020,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made filaments :Bleached" +51122030,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made filaments :Dyed" +51122040,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made filaments :Printed" +51122090,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made filaments :Other" +51123000,woven fabrics of combed wool or of combed fine animal hair containing 85 or more by weight of wool or of fine animal hair >> other mixed mainly or solely with staple fibres +51123010,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made staple fibres :Unbleached" +51123020,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made staple fibres :Bleached" +51123030,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made staple fibres :Dyed" +51123040,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made staple fibres :Printed" +51123090,"Woven fabrics of combed wool or of combed fine animal hair:Other, mixed mainly or solely with man-made staple fibres :Other" +51129000,woven fabrics of combed wool or of combed fine animal hair containing 85 or more by weight of wool or of fine animal hair >> other +51129010,Woven fabrics of combed wool or of combed fine animal hair:Other:Unbleached +51129020,Woven fabrics of combed wool or of combed fine animal hair:Other:Bleached +51129030,Woven fabrics of combed wool or of combed fine animal hair:Other:Dyed +51129040,Woven fabrics of combed wool or of combed fine animal hair:Other:Printed +51129050,Woven fabrics of combed wool or of combed fine animal hair:Other:Of Handloom +51129090,Woven fabrics of combed wool or of combed fine animal hair:Other:Other +51130000,woven fabrics of coarse animal hair or of horse hair woven fabrics of coarse animal hair or of horse hair +51130010,WOVEN FABRICS OF COARSE ANIMAL HAIR OR of horse hair:Woven fabrics of coarse animal hair or of horse hair :Unbleached +51130020,WOVEN FABRICS OF COARSE ANIMAL HAIR OR of horse hair:Woven fabrics of coarse animal hair or of horse hair :Bleached +51130030,WOVEN FABRICS OF COARSE ANIMAL HAIR OR of horse hair:Woven fabrics of coarse animal hair or of horse hair :Dyed +51130040,WOVEN FABRICS OF COARSE ANIMAL HAIR OR of horse hair:Woven fabrics of coarse animal hair or of horse hair :Printed +51130090,WOVEN FABRICS OF COARSE ANIMAL HAIR OR of horse hair:Woven fabrics of coarse animal hair or of horse hair :Other +52010000,cotton not carded or combed cotton not carded or combed indian cotton +52010011,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Bengal deshi" +52010012,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Indian cotton of staple lengths 20.5 mm (25/32"") and below (e. g. oomras, yellow picking, Assam comillas)" +52010013,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Indian cotton of staple length exceeding 20.5mm (26/32"") but not exceeding 24.5mm (30/32"")" +52010014,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Indian cotton of staple length over 24.5 mm (31/32"") to 28 mm" +52010015,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Indian cotton of staple length 28.5 mm (14/32"") and above but below 34.5 mm" +52010019,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Indian cotton of all staple length 34.5 mm and above (112/32"")" +52010020,"COTTON, NOT CARDED OR COMBED:Cotton, not carded or combed :Cotton, other than Indian, of all staple lengths(OLD tariff)" +52010021,cotton not carded or combed cotton not carded or combed indian cotton >> of staple length not exceeding mm kg +52010022,cotton not carded or combed cotton not carded or combed indian cotton >> of staple length exceeding mm but not kg exceeding mm +52010023,cotton not carded or combed cotton not carded or combed indian cotton >> of staple length exceeding mm but not kg exceeding mm +52010024,cotton not carded or combed cotton not carded or combed indian cotton >> of staple length exceeding mm but not kg exceeding mm +52010025,cotton not carded or combed cotton not carded or combed indian cotton >> of staple length exceeding mm kg +52020000,cotton waste including yarn waste and garnetted stock +52021000,COTTON WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Yarn waste (including thread waste) +52029100,COTTON WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Garnetted stock +52029900,COTTON WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Other +52030000,"::COTTON, CARDED OR COMBED" +52040000,cotton sewing thread whether or not put up f or retail sale not put up for retail sale +52041100,cotton sewing thread whether or not put up f or retail sale not put up for retail sale >> containing 85 or more by weight of cotton +52041110,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of cotton :Cotton thread, sewing,containing any synthetic staple fibre" +52041120,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of cotton :Cotton thread, darning" +52041130,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of cotton :Embroidery cotton thread(OLD tariff)" +52041140,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of cotton :Cotton sewing thread, not containing any synthetic staple fibre(OLD tariff)" +52041190,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Containing 85% or more by weight of cotton :Other" +52041900,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE::Other" +52042000,cotton sewing thread whether or not put up f or retail sale not put up for retail sale >> put up for retail sale put up for retail sale +52042010,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Put up for retail sale :Cotton thread, sewing, containing any synthetic staple fibre" +52042020,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Put up for retail sale :Cotton thread, darning" +52042030,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Put up for retail sale :Embroidery cotton thread" +52042040,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Put up for retail sale :Cotton sewing thread, not containing any synthetic staple fibre" +52042090,"COTTON SEWING THREAD, WHETHER OR NOT PUT UP FOR RETAIL SALE:Put up for retail sale :Other" +52050000,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres +52051100,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring decitex or more not exceeding 14 metric number measuring decitex or more not exceeding 14 metric number +52051110,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Grey" +52051120,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Bleached" +52051130,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Dyed" +52051190,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Other" +52051200,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than decitex exceeding 14 metric number but not exceeding 43 metric number measuring less than decitex but not less than decitex exceeding 14 metric number but not exceeding 43 metric number +52051210,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Grey" +52051220,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Bleached" +52051230,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Dyed" +52051290,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Other" +52051300,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than 192 31 decitex exceeding 43 metric number but not exceeding 52 metric number measuring less than decitex but not less than 192 31 decitex exceeding 43 metric number but not exceeding 52 metric number +52051310,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192 .31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Grey" +52051320,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192 .31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Bleached" +52051330,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192 .31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Dyed" +52051390,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192 .31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Other" +52051400,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than 125 decitex exceeding 52 metric synthetic staple fibre measuring less than decitex but not less than 125 decitex exceeding 52 metric number but not exceeding 80 metric number +52051410,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Grey" +52051420,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Bleached" +52051430,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Dyed" +52051490,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Other" +52051500,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than 125 decitex exceeding 80 metric number +52051510,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex (exceeding 80 metric number) :Grey" +52051520,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex (exceeding 80 metric number) :Bleached" +52051530,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex (exceeding 80 metric number) :Dyed" +52051590,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex (exceeding 80 metric number) :Other" +52052100,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring decitex or more not exceeding 14 metric number +52052110,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Grey" +52052120,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Bleached" +52052130,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Dyed" +52052190,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring 714.29 decitex or more (not exceeding 14 metric number) :Other" +52052200,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than decitex exceeding 14 metric number but not exceeding 43 metric number +52052210,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Grey" +52052220,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Bleached" +52052290,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number) :Other" +52052300,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than decitex exceeding 43 metric number but not exceeding 52 metric number +52052310,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Grey" +52052320,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Bleached" +52052390,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number) :Other" +52052400,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than 125 decitex exceeding 52 metric number but not exceeding 80 metric number +52052410,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Grey" +52052420,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Bleached" +52052490,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number) :Other" +52052600,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than 125 decitex but not less than decitex exceeding 80 metric number but not exceeding 94 metric number +52052610,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number) :Dyed" +52052620,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number) :Bleached" +52052690,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number) :Other" +52052700,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex but not less than decitex exceeding 94 metric number but not exceeding 120 metric number +52052710,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number) :Dyed" +52052720,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number) :Bleached" +52052790,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number) :Other" +52052800,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring less than decitex exceeding 120 metric number +52052810,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 83.33 decitex (exceeding 120 metric number) :Dyed" +52052820,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 83.33 decitex (exceeding 120 metric number) :Bleached" +52052890,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring less than 83.33 decitex (exceeding 120 metric number) :Other" +52053100,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn decitex or more not exceeding 14 metric number per single yarn +52053110,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Grey" +52053120,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Bleached" +52053130,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Dyed" +52053190,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Other" +52053200,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less than decitex exceeding 14 metric number but not exceeding 43 metric number per single yarn +52053210,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn) :Grey" +52053220,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn) :Bleached" +52053290,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn) :Other" +52053300,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less than decitex exceeding 43 metric number but not exceeding 52 metric number per single yarn +52053310,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Grey" +52053320,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Bleached" +52053330,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Dyed" +52053390,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Other" +52053400,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less than 125 decitex exceeding 52 metric number but not exceeding 80 metric number per single yarn +52053410,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Grey" +52053420,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Bleached" +52053430,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Dyed" +52053490,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Other" +52053500,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than 125 decitex exceeding 80 metric number per single yarn +52053510,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 125 decitex (exceeding 80 metric number per single yarn) :Grey" +52053590,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 125 decitex (exceeding 80 metric number per single yarn) :Other" +52054100,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn decitex or more not exceeding 14 metric number per single yarn +52054110,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Grey" +52054120,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Bleached" +52054130,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Dyed" +52054190,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn) :Other" +52054200,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less than decitex exceeding 14 metric number but not exceeding 43 metric number per single yarn +52054210,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn) :Grey" +52054290,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn) :Other" +52054300,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less decitex exceeding 43 metric number but not exceeding 52 metric number per single yarn +52054310,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Grey" +52054320,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Bleached" +52054390,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 232.56 decitex but not less than192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn) :Other" +52054400,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less than 125 decitex exceeding 52 metric number but not exceeding 80 metric number per single yarn +52054410,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Grey" +52054420,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Bleached" +52054490,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn) :Other" +52054600,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than 125 decitex but not less than decitex exceeding 80 metric number but not exceeding 94 metric number per single yarn +52054610,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number per single yarn) :Grey" +52054620,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number per single yarn) :Bleached" +52054630,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number per single yarn) :Dyed" +52054690,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 125 decitex but not less than 106.38 decitex (exceeding 80 metric number but not exceeding 94 metric number per single yarn) :Other" +52054700,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex but not less than decitex exceeding 94 metric number but not exceeding 120 metric number per single yarn +52054710,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number per single yarn) :Grey" +52054720,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number per single yarn) :Bleached" +52054730,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number per single yarn) :Dyed" +52054790,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 106.38 decitex but not less than 83.33 decitex (exceeding 94 metric number but not exceeding 120 metric number per single yarn) :Other" +52054800,cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres cotton yarn other than sewing thread containing 85 or more by weight of cotton not put up for retail sale single yarn of uncombed fibres >> measuring per single yarn less than decitex exceeding 120 metric number per single yarn +52054810,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 83.33 decitex (exceeding 120 metric number per single yarn) :Grey" +52054820,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 83.33 decitex (exceeding 120 metric number per single yarn) :Bleached" +52054830,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 83.33 decitex (exceeding 120 metric number per single yarn) :Dyed" +52054890,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING 85% OR MORE BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE:Measuring per single yarn less than 83.33 decitex (exceeding 120 metric number per single yarn) :Other" +52060000,cotton yarn other than sewing thread containing less than 85 by weight of cotton not put up for retail sale single yarn of uncombed fibres +52061100,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring 714.29 decitex or more (not exceeding 14 metric number)" +52061200,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number)" +52061300,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number)" +52061400,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number)" +52061500,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 125 decitex (exceeding 80 metric number)" +52062100,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring 714.29 decitex or more (not exceeding 14 metric number)" +52062200,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number)" +52062300,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number)" +52062400,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number)" +52062500,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring less than 125 decitex (exceeding 80 metric number)" +52063100,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn)" +52063200,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn)" +52063300,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn)" +52063400,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn)" +52063500,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 125 decitex (exceeding 80 metric number per single yarn)" +52064100,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn 714.29 decitex or more (not exceeding 14 metric number per single yarn)" +52064200,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 714.29 decitex but not less than 232.56 decitex (exceeding 14 metric number but not exceeding 43 metric number per single yarn)" +52064300,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 232.56 decitex but not less than 192.31 decitex (exceeding 43 metric number but not exceeding 52 metric number per single yarn)" +52064400,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 192.31 decitex but not less than 125 decitex (exceeding 52 metric number but not exceeding 80 metric number per single yarn)" +52064500,"COTTON YARN (OTHER THAN SEWING THREAD), CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, NOT PUT UP FOR RETAIL SALE::Measuring per single yarn less than 125 decitex (exceeding 80 metric number per single yarn)" +52070000,cotton yarn other than sewing thread put up for retail sale +52071000,COTTON YARN (OTHER THAN SEWING THREAD) PUT UP FOR RETAIL SALE::Containing 85% or more by weight of cotton +52079000,COTTON YARN (OTHER THAN SEWING THREAD) PUT UP FOR RETAIL SALE::Other +52080000,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 +52081100,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing not more than 100 +52081110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Dhoti" +52081120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Saree" +52081130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Shirting fabrics" +52081140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Casement" +52081190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Other" +52081200,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing more than 100 +52081210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Dhoti" +52081220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Saree" +52081230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Shirting fabrics" +52081240,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Casement" +52081250,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Sheeting(takia, leopard fabrics, other than furnishing fabrics)" +52081260,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Voils" +52081290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Other" +52081300,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> or twill including cross twill +52081310,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52081320,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Dobby fabrics" +52081390,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Other" +52081900,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> other fabrics +52081910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Dedsuti, dosuti fabrics" +52081990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Other" +52082100,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing not more than 100 +52082110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Dhoti" +52082120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Saree" +52082130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Casement" +52082140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Shirting fabrics" +52082150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Cambrics (including madapollam and jaconet)" +52082160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Mulls (including limbric and willaya)" +52082170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Muslin (including lawn, mulmul and organdi)" +52082180,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Voils (excluding leno fabrics)" +52082190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Other" +52082200,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing more than 100 +52082210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Dhoti" +52082220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Saree" +52082230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Shirting fabrics" +52082240,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Casement" +52082250,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Cambrics (including madapollam and jaconet)" +52082260,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Long cloth (including calico)" +52082270,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Sheeting (takia and the like)" +52082280,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Voils (excluding leno fabrics)" +52082290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Other" +52082300,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> or twill includingcross twill +52082310,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52082320,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Parmatta fabrics (including ilesia, pocketing, Italian twill)" +52082330,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52082390,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4-thread twill, including cross twill :Other" +52082900,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> other fabrics +52082910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Dhoti and saree, zari bordered" +52082920,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Dedsuti, dosuti fabrics, ceretonnes and osamburge" +52082990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Other(OLD tariff)" +52083100,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing not more than 100 plain weave weighing not more than 100 +52083110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Lungi" +52083120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Saree(OLD tariff)" +52083121,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Sarees, of Handloom" +52083129,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Other" +52083130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Shirting fabrics" +52083140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Casement" +52083150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Cambrics (including madapollam and jaconet)" +52083160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Mull (including limbric and willaya)" +52083170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Muslin (including lawn mulmul and organdi) of carded or combed yarn" +52083180,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Voils (excluding leno fabrics)" +52083190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Other" +52083200,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing more than 100 plain weave weighing more than 100 +52083210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Lungi" +52083220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Saree" +52083230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Shirting fabrics" +52083240,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Casement" +52083250,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Bedticking, domestic" +52083260,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Cambrics (including madapollam and jaconet), longcloth(including calico) and voils (excluding leno fabrics)" +52083270,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Coating (including suiting)" +52083280,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Furnishing fabrics( excluding pile and chenille fabrics)" +52083290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Other" +52083300,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> or 4 thread twill including cross twill or 4 thread twill including cross twill +52083310,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Shirting fabrics" +52083320,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Coating (including suiting)" +52083330,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Shirting (including mazri)" +52083390,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Other" +52083900,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> other fabrics +52083910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Zari bordered sarees" +52083990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics :Other" +52084100,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing not more than 100 +52084110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Bleeding Madras" +52084120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Saree(OLD tariff)" +52084121,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Sarees, of Handloom" +52084129,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Other" +52084130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Shirting fabrics" +52084140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Bed ticking, domestic" +52084150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Furnishing fabrics (excluding pile and chenille fabrics)" +52084190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2:Other" +52084200,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> plain weave weighing more than 100 +52084210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Bleeding Madras" +52084220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Saree" +52084230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Shirting fabrics" +52084240,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Casement" +52084250,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Bed ticking, domestic" +52084260,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Furnishing fabrics, other than pile and chenille fabrics" +52084290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Other" +52084300,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> or 4 thread twill including cross twill +52084310,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Bleading Madras" +52084320,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Shirting fabrics" +52084330,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Bedticking, damask" +52084340,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Flannelette" +52084390,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:3-thread or 4- thread twill, including cross twill :Other" +52084900,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> +52084910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics:Zari bordered sarees" +52084920,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics:Real Madras handkerchiefs(OLD tariff)" +52084921,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics:Real Madras Handkerchiefs, of Handloom" +52084929,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics:Other" +52084990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Other fabrics:Others" +52085100,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> +52085110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Lungi" +52085120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Saree(OLD tariff)" +52085130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Shirting fabrics" +52085140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Casement" +52085150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Cambrics (including madapollam and jaconet)" +52085160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Mull (including limbric and willaya)" +52085170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Muslin (including lawn mulmul and organdi) of carded or combed yarn" +52085180,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Voils (excluding leno fabrics)" +52085190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing not more than 100 g/m2 :Other" +52085200,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> +52085210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Lungi" +52085220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Saree r" +52085230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Shirting fabrics" +52085240,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Casement" +52085250,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Cambrics (including madapollam and jaconet)" +52085260,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:Mull (including limbric and willaya)" +52085270,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:MUSLIN (INCLUDING LAWN MULMUL AND ORGANDI)" +52085280,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:VOILS (EXCLUDING LENO FABRICS)" +52085290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Plain weave, weighing more than 100 g/m2:OTHER" +52085900,woven fabrics of cotton containing 85 or more by weight of cotton weighing not more than 200 unbleached 2 >> other fabrics +52085910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Others fabrics:Zari bordered sarees" +52085920,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Others fabrics:Zari bordered sarees, handloom" +52085990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing not more than 200g/m2:Others fabrics:Other" +52090000,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached +52091100,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> plain weave handloom +52091111,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Dhoti" +52091112,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Saree" +52091113,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Casement" +52091114,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Sheeting (Takia, leaopord cloth and other than furnishing)" +52091119,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Other" +52091190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Other" +52091200,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> or includingcrosstwill +52091210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Saree" +52091220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52091230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Furnishing fabrics (excluding pile and chenille fabrics)" +52091240,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Seersucker" +52091250,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Canvas, including duck - carded or combed yarn" +52091260,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Flannelette" +52091270,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shetting (takia, leopard cloth)" +52091290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Other" +52091900,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2::Other fabrics" +52092100,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> plain weave +52092110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Saree" +52092120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Shirting fabrics" +52092130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Furnishing fabrics (excluding pile and chenille fabrics)" +52092140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Seersucker" +52092150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Canvas (including duck) of carded or combed yarn" +52092160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Dhoti" +52092170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Flannellete" +52092180,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Sheeting( takia, leopardcloth)" +52092190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Other" +52092200,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> or twill including cross twill +52092210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52092220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Furnishing fabrics (excluding pile and chenille fabrics)" +52092230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Drill" +52092290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Other" +52092900,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> other fabrics +52092910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Dhoti and saree, zari bordered" +52092920,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Dedsuti, dosuti fabrics, ceretonnes and osamburge" +52092990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Other" +52093100,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> plain weave +52093110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Lungi" +52093120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Saree" +52093130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Shirting fabrics" +52093140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Furnishing fabrics(excluding pile and chenille fabrics)" +52093150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Seersucker" +52093160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Bedticking, domestic(other than hand dyed)" +52093170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Canvas (including duck), of carded or combed yarn" +52093180,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Flannellete" +52093190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Other" +52093200,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> or twill including cross twill +52093210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52093220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Furnishing fabrics(excluding pile and chenille fabrics)" +52093230,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Drill" +52093290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Other" +52093900,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> other fabrics +52093910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Zari bordered sarees" +52093990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Other" +52094100,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> plain weave +52094110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Bleeding Madras" +52094120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Saree" +52094130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Shirting fabrics" +52094140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Furnishing fabrics (excluding pile chenille fabrics)" +52094150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Seersucker" +52094160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Bedticking, domestic (other than hand dyed)" +52094170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Flannelette" +52094190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave :Other" +52094200,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2::Denim" +52094300,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> other fabrics of or twill including cross twill +52094310,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Bleeding Madras" +52094320,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52094330,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Furnishing fabrics (excluding pile and chenille fabrics)" +52094340,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Coating (including suiting)" +52094390,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Other" +52094900,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> other fabrics +52094910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Zari bordered sari" +52094990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Other" +52095100,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> plain weave lungis +52095110,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Lungi(OLD tariff)" +52095111,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Lungi, of Handloom" +52095119,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Other" +52095120,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Saree" +52095130,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Shirting fabrics" +52095140,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Furnishing fabrics (excluding pile and chenille fabrics)" +52095150,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Seersucker" +52095160,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Bedticking, domestic" +52095170,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Flannelette" +52095190,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Plain weave:Other" +52095200,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> or twill including cross twill +52095210,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52095220,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Furnishing fabrics (excluding pile and chenille fabrics)" +52095290,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:3-thread or 4-thread twill, including cross twill :Other" +52095900,woven fabrics of cotton containing 85 or more by weight of cotton weighing more 2 than 200 unbleached >> other fabrics +52095910,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Zari bordered saree" +52095990,"Woven fabrics of cotton, containing 85% or more by weight of cotton, weighing more than 200g/m2:Other fabrics :Other" +52100000,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 +52101100,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> plain weave +52101110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52101120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Saree" +52101190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Other" +52101900,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2::Other fabrics" +52102100,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> plain weave +52102110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52102120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Poplin and broad fabrics" +52102130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Saree" +52102140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Shirting (including mazri)" +52102150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Voile" +52102190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Other" +52102900,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> other fabrics +52102910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Dhoti and saree, zari bordered" +52102920,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Dedsuti, Dosuti, ceretonnes and osamburge" +52102990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Other" +52103100,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> plain weave +52103110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52103120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Coating (including suitings)" +52103130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Furnishing fabrics (excluding pile and chenille fabrics)" +52103140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Poplin and broad fabrics" +52103150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Saree" +52103160,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Voils" +52103190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Other" +52103200,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> or twill including cross twill +52103210,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Crepe fabrics including crepe checks" +52103220,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52103230,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Bedticking, damask" +52103290,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Other" +52103900,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> other fabrics +52103910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Zari bordered saree" +52103990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Other" +52104100,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> plain weave +52104110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Bleeding Madras" +52104120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Crepe fabrics (excluding crepe checks)" +52104130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52104140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Suitings" +52104150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Poplin and broad fabrics" +52104160,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Saree" +52104170,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Voils" +52104190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Other" +52104900,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> other fabrics other fabrics +52104910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Zari bordered saree" +52104990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Other" +52105100,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> plain weave plain weave +52105110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52105120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Casement" +52105130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Saree" +52105140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Poplin and broad fabrics" +52105150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Voils" +52105190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Plain weave :Other" +52105900,woven fabrics of cotton containing less than 85 by weight of cotton mixed mainly or solely with fibres 2 weighing not more than 200 >> other fabrics other fabrics +52105910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Zari bordered saree" +52105990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING NOT MORE THAN 200 G/M2:Other fabrics :Other" +52110000,woven fabrics of cotton containing less than unbleached +52111100,woven fabrics of cotton containing less than unbleached >> plain weave +52111110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52111120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Saree" +52111190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Other" +52111200,woven fabrics of cotton containing less than unbleached >> or twill including cross twill +52111210,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52111220,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Twill, not elsewhere specified (including gaberdine)" +52111230,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Damask" +52111290,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Other" +52111900,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2::Other fabrics" +52112000,woven fabrics of cotton containing less than unbleached >> bleached +52112010,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Shirting fabrics 12.5% -" +52112020,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Canvas (including duck) of carded or combed yarn" +52112030,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Flannelettee" +52112040,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Saree" +52112050,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Crepe fabric including Crepe checks" +52112060,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Twill fabrics" +52112091,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Zari bordered sari" +52112092,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Dedsuti, dosuti, ceretonnes and osamburge 12.5% -" +52112099,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Bleached :Other" +52113100,woven fabrics of cotton containing less than unbleached >> plain weave +52113110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52113120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Canvas (including duck) of carded or combed yarn" +52113130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Coating (including suitings)" +52113140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Flannellette" +52113150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Saree" +52113190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Other m2 12.5% or Rs. 150" +52113200,woven fabrics of cotton containing less than unbleached >> or twill including cross twill +52113210,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Crepe fabrics including crepe checks" +52113220,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52113230,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Twill, not elsewhere specified (including gaberdine)" +52113240,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Trousers or pant fabrics (excluding jeans and crepe)" +52113290,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Other" +52113900,woven fabrics of cotton containing less than unbleached >> other fabrics +52113910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics :Zari bordered sarees" +52113990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics :Other" +52114100,woven fabrics of cotton containing less than unbleached >> plain weave +52114110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Bleeding Madras r" +52114120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Check shirting (excluding crepe checks)" +52114130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Shirting" +52114140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Suitings" +52114150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Flannelette" +52114160,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Saree" +52114170,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Parachute fabrics" +52114190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Other" +52114200,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2::Denim" +52114300,woven fabrics of cotton containing less than unbleached >> other fabrics of or twill including cross twill +52114310,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Bleeding Madras" +52114320,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Crepe fabrics" +52114330,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52114340,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Suitings" +52114390,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics of 3-thread or 4-thread twill, including cross twill :Other" +52114900,woven fabrics of cotton containing less than unbleached >> other fabrics +52114910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics :Zari bordered sarees" +52114990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics :Other" +52115100,woven fabrics of cotton containing less than unbleached >> plain weave +52115110,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Shirting fabrics" +52115120,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Furnishing fabrics (excluding pile and chenille fabrics)" +52115130,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Flannelette" +52115140,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Long cloth (chintz)" +52115150,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Saree" +52115190,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Plain weave :Other" +52115200,woven fabrics of cotton containing less than unbleached >> or twill including cross twill +52115210,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Crepe fabrics including crepe checks" +52115220,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Shirting fabrics" +52115230,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Twill, not elsewhere specified (including gaberdine)" +52115290,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:3-thread or 4-thread twill, including cross twill :Other" +52115900,woven fabrics of cotton containing less than unbleached >> other fabrics +52115910,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics :Zari bordered saree" +52115990,"WOVEN FABRICS OF COTTON, CONTAINING LESS THAN 85% BY WEIGHT OF COTTON, MIXED MAINLY OR SOLELY WITH MAN-MADE FIBRES, WEIGHING MORE THAN 200 G/M2:Other fabrics :Other" +52120000,other woven fabrics of cotton 2 weighing more than 200 +52121100,OTHER WOVEN FABRICS OF COTTON::Unbleached +52121200,OTHER WOVEN FABRICS OF COTTON::Bleached +52121300,OTHER WOVEN FABRICS OF COTTON::Dyed +52121400,OTHER WOVEN FABRICS OF COTTON::Of yarns of different colours +52121500,OTHER WOVEN FABRICS OF COTTON::Printed +52122100,OTHER WOVEN FABRICS OF COTTON::Unbleached +52122200,OTHER WOVEN FABRICS OF COTTON::Bleached +52122300,OTHER WOVEN FABRICS OF COTTON::Dyed +52122400,OTHER WOVEN FABRICS OF COTTON::Of yarns of different colours +52122500,OTHER WOVEN FABRICS OF COTTON::Printed +53010000,flax raw or processed but not spun flax tow and waste including yarn waste and garnetted stock +53011000,"FLAX, RAW OR PROCESSED BUT NOT SPUN; FLAX TOW AND WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Flax, raw or retted" +53012100,"FLAX, RAW OR PROCESSED BUT NOT SPUN; FLAX TOW AND WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Broken or scutched" +53012900,"FLAX, RAW OR PROCESSED BUT NOT SPUN; FLAX TOW AND WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Other" +53013000,"FLAX, RAW OR PROCESSED BUT NOT SPUN; FLAX TOW AND WASTE (INCLUDING YARN WASTE AND GARNETTED STOCK)::Flax tow and waste" +53020000,true hemp cannabis sativa l raw or processed but not spun tow and waste of true hemp including yarn waste and garnetted stock +53021000,"TRUE HEMP (CANNABIS SATIVA L ), RAW OR PROCESSED BUT NOT SPUN; TOW AND WASTE OF TRUE HEMP (INCLUDING YARN WASTE AND GARNETTED STOCK)::True hemp, raw or retted" +53029000,"TRUE HEMP (CANNABIS SATIVA L ), RAW OR PROCESSED BUT NOT SPUN; TOW AND WASTE OF TRUE HEMP (INCLUDING YARN WASTE AND GARNETTED STOCK)::Other" +53030000,jute and other textile bast fibres excluding flax true hemp and ramie raw or processed but not spun tow and waste of these fibres including yarn waste and garnetted stock +53031000,jute and other textile bast fibres excluding flax true hemp and ramie raw or processed but not spun tow and waste of these fibres including yarn waste and garnetted stock >> jute and other textile bast fibres raw or retted +53031010,"JUTE AND OTHER TEXTILE BAST FIBRES (EXCLUDING FLAX, TRUE HEMP AND RAMIE), RAW OR PROCESSED BUT NOT SPUN; TOW AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETTED STOCK):Jute and other textile bast fibres, raw or retted :Jute, raw or retted" +53031090,"JUTE AND OTHER TEXTILE BAST FIBRES (EXCLUDING FLAX, TRUE HEMP AND RAMIE), RAW OR PROCESSED BUT NOT SPUN; TOW AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETTED STOCK):Jute and other textile bast fibres, raw or retted :Other" +53039000,jute and other textile bast fibres excluding flax true hemp and ramie raw or processed but not spun tow and waste of these fibres including yarn waste and garnetted stock >> other +53039010,"JUTE AND OTHER TEXTILE BAST FIBRES (EXCLUDING FLAX, TRUE HEMP AND RAMIE), RAW OR PROCESSED BUT NOT SPUN; TOW AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETTED STOCK):Other :Jute cutting" +53039090,"JUTE AND OTHER TEXTILE BAST FIBRES (EXCLUDING FLAX, TRUE HEMP AND RAMIE), RAW OR PROCESSED BUT NOT SPUN; TOW AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETTED STOCK):Other :Other" +53050000,coconut abaca textiles nee ramie and other vegetable coconut abaca manila hemp or musa textilis nee ramie and other vegetable textile fibres not elsewhere specified or included raw or processed but not spun tow noils and waste of these fibres including yarn waste and garneted stock +53050010,"COCONUT, ABACA (MANILA HEMP OR MUSA TEXTILIS NEE), RAMIE AND OTHER VEGETABLE TEXTILE FIBRES, NOT ELSEWHERE SPECIFIED OR INCLUDED,RAW OR PROCESSED BUT NOT SPUN; TOW NOILS AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETED STOCK):Coconut, abaca (Manila hemp or Musa textils Nec), ramie and other vegetable textile fibres, not elsewhere specified or included, raw or processed but not spun, tow noils and waste of these fibres (including yarn waste and garneted stock):Coir bristles fibre" +53050020,"COCONUT, ABACA (MANILA HEMP OR MUSA TEXTILIS NEE), RAMIE AND OTHER VEGETABLE TEXTILE FIBRES, NOT ELSEWHERE SPECIFIED OR INCLUDED,RAW OR PROCESSED BUT NOT SPUN; TOW NOILS AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETED STOCK):Coconut, abaca (Manila hemp or Musa textils Nec), ramie and other vegetable textile fibres, not elsewhere specified or included, raw or processed but not spun, tow noils and waste of these fibres (including yarn waste and garneted stock):Coir mattress fibre(OLD tariff)" +53050030,"COCONUT, ABACA (MANILA HEMP OR MUSA TEXTILIS NEE), RAMIE AND OTHER VEGETABLE TEXTILE FIBRES, NOT ELSEWHERE SPECIFIED OR INCLUDED,RAW OR PROCESSED BUT NOT SPUN; TOW NOILS AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETED STOCK):Coconut, abaca (Manila hemp or Musa textils Nec), ramie and other vegetable textile fibres, not elsewhere specified or included, raw or processed but not spun, tow noils and waste of these fibres (including yarn waste and garneted stock):Curled or machine twisted coir fibre" +53050040,"COCONUT, ABACA (MANILA HEMP OR MUSA TEXTILIS NEE), RAMIE AND OTHER VEGETABLE TEXTILE FIBRES, NOT ELSEWHERE SPECIFIED OR INCLUDED,RAW OR PROCESSED BUT NOT SPUN; TOW NOILS AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETED STOCK):Coconut, abaca (Manila hemp or Musa textils Nec), ramie and other vegetable textile fibres, not elsewhere specified or included, raw or processed but not spun, tow noils and waste of these fibres (including yarn waste and garneted stock):Coir pith" +53050050,"COCONUT, ABACA (MANILA HEMP OR MUSA TEXTILIS NEE), RAMIE AND OTHER VEGETABLE TEXTILE FIBRES, NOT ELSEWHERE SPECIFIED OR INCLUDED,RAW OR PROCESSED BUT NOT SPUN; TOW NOILS AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETED STOCK):Coconut, abaca (Manila hemp or Musa textils Nec), ramie and other vegetable textile fibres, not elsewhere specified or included, raw or processed but not spun, tow noils and waste of these fibres (including yarn waste and garneted stock):of Abaca" +53050090,"COCONUT, ABACA (MANILA HEMP OR MUSA TEXTILIS NEE), RAMIE AND OTHER VEGETABLE TEXTILE FIBRES, NOT ELSEWHERE SPECIFIED OR INCLUDED,RAW OR PROCESSED BUT NOT SPUN; TOW NOILS AND WASTE OF THESE FIBRES (INCLUDING YARN WASTE AND GARNETED STOCK):Coconut, abaca (Manila hemp or Musa textils Nec), ramie and other vegetable textile fibres, not elsewhere specified or included, raw or processed but not spun, tow noils and waste of these fibres (including yarn waste and garneted stock):Of others" +53060000,flax yarn +53061000,flax yarn >> single +53061010,FLAX YARN:Single :Put up for retail sale +53061090,FLAX YARN:Single :Other +53062000,flax yarn >> multiple folded or cabled +53062010,FLAX YARN:Multiple (folded) or cabled :Put up for retail sale +53062090,FLAX YARN:Multiple (folded) or cabled :Other +53070000,yarn of jute or of other textile bast fibres of heading 5303 +53071000,yarn of jute or of other textile bast fibres of heading 5303 >> single +53071010,YARN OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Single :Of jute +53071090,YARN OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Single :Other +53072000,YARN OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303::Multiple (folded) or cabled +53080000,yarn of other vegetable textile fibres paper yarn +53081000,yarn of other vegetable textile fibres paper yarn >> coir yarn +53081010,YARN OF OTHER VEGETABLE TEXTILE FIBRES; PAPER YARN:Coir yarn :Baled +53081020,YARN OF OTHER VEGETABLE TEXTILE FIBRES; PAPER YARN:Coir yarn :SPOOLED HANKS +53081090,YARN OF OTHER VEGETABLE TEXTILE FIBRES; PAPER YARN:Coir yarn :Other +53082000,YARN OF OTHER VEGETABLE TEXTILE FIBRES; PAPER YARN::True hemp yarn +53089000,yarn of other vegetable textile fibres paper yarn >> other +53089010,YARN OF OTHER VEGETABLE TEXTILE FIBRES; PAPER YARN:Other :Ramie yarn +53089090,YARN OF OTHER VEGETABLE TEXTILE FIBRES; PAPER YARN:Other :Other +53090000,woven fabrics of flax containing 85 or more by weight of flax +53091100,woven fabrics of flax containing 85 or more by weight of flax >> unbleached or bleached +53091110,WOVEN FABRICS OF FLAX:Unbleached or bleached :Unbleached +53091120,WOVEN FABRICS OF FLAX:Unbleached or bleached :Bleached +53091900,woven fabrics of flax containing 85 or more by weight of flax >> other +53091910,WOVEN FABRICS OF FLAX:Other :Dyed +53091920,WOVEN FABRICS OF FLAX:Other :Printed +53091990,WOVEN FABRICS OF FLAX:Other :Other +53092100,woven fabrics of flax containing 85 or more by weight of flax >> unbleached or bleached +53092110,WOVEN FABRICS OF FLAX:Unbleached or bleached :Unbleached +53092120,WOVEN FABRICS OF FLAX:Unbleached or bleached :Bleached +53092900,woven fabrics of flax containing 85 or more by weight of flax >> other +53092910,WOVEN FABRICS OF FLAX:Other :Dyed +53092920,WOVEN FABRICS OF FLAX:Other :Printed +53092990,WOVEN FABRICS OF FLAX:Other :Other +53100000,woven fabrics of jute or of other textile bast fibres of heading 5303 +53101000,woven fabrics of jute or of other textile bast fibres of heading 5303 >> unbleached containing 100 by weight of jute +53101011,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Carpet backing fabrics +53101012,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Sacking fabrics +53101013,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Hessian fabrics +53101014,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Jute canvas +53101019,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Other +53101091,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Woven blended fabrics containing more than 50% by weight of jute +53101092,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Stranded woven fabrics of jute containing 50% or more by weight of jute +53101093,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Jute swim fabrics +53101099,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Unbleached :Other +53109000,woven fabrics of jute or of other textile bast fibres of heading 5303 >> other +53109010,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Bleached +53109020,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Decorative fabrics +53109091,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Bleached +53109092,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Dyed +53109093,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Printed +53109099,WOVEN FABRICS OF JUTE OR OF OTHER TEXTILE BAST FIBRES OF HEADING 5303:Other :Other +53110000,woven fabrics of other vegetable textile fibres woven fabrics of paper yarn woven fabrics of other vegetable textile fibres woven fabrics of paper yarn of other vegetable textile fibres +53110011,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Unbleached +53110012,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Bleached +53110013,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Dyed +53110014,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Printed +53110015,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Of coir including log form and geotextiles +53110019,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Other +53110021,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Unbleached +53110022,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Bleached +53110023,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Dyed +53110024,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Printed +53110029,WOVEN FABRICS OF OTHER VEGETABLE TEXTILE FIBRES; WOVEN FABRICS OF PAPER YARN:Woven fabrics of other vegetable textile fibres; woven fabrics of paper yarn :Other +54011000,"SEWING THREAD OF MAN-MADE FILAMENTS, WHETHER OR NOT PUT UP FOR RETAIL SALE::Of synthetic filaments(OLD tariff)" +54012000,"SEWING THREAD OF MAN-MADE FILAMENTS, WHETHER OR NOT PUT UP FOR RETAIL SALE::Of artificial filaments(OLD tariff)" +54021110,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of aramids(OLD tariff)" +54021910,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other;:Nylon tyre yarn(OLD tariff)" +54021990,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other;:Other(OLD tariff)" +54022010,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:High tenacity yarn of polyesters, whether or not textured:Of terylene dacron(OLD tariff)" +54022090,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:High tenacity yarn of polyesters, whether or not textured:Other(OLD tariff)" +54023100,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of nylon or other polyamides, measuring per single yarn not more than 50 tex(OLD tariff)" +54023200,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of nylon or other polyamides, measuring per single yarn more than 50 tex(OLD tariff)" +54023300,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of polyesters(OLD tariff)" +54023400,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of polypropylene(OLD tariff)" +54023910,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Polypropylene filament yarn(OLD tariff)" +54023920,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Acrylic filament yarn(OLD tariff)" +54023990,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Other(OLD tariff)" +54024400,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Elastomeric(OLD tariff)" +54024500,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Other, of nylon or other polyamides(OLD tariff)" +54024600,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Other, of polyesters, partially oriented(OLD tariff)" +54024700,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Other, of polyesters(OLD tariff)" +54024800,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Other, of polypropylene(OLD tariff)" +54024900,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Other(OLD tariff)" +54025100,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of nylon or other polyamides(OLD tariff)" +54025200,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of polyesters(OLD tariff)" +54025300,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of polypropylene(OLD tariff)" +54025910,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Polypropylene filament yarn(OLD tariff)" +54025990,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Other(OLD tariff)" +54026100,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of nylon or other polyamides(OLD tariff)" +54026200,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of polyesters(OLD tariff)" +54026300,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX::Of polypropylene(OLD tariff)" +54026910,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Polyvinyl acetate filament yarn(OLD tariff)" +54026920,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Polyvinyl chloride filament yarn(OLD tariff)" +54026930,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Polypropylene filament yarn(OLD tariff)" +54026940,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Acrylic filament yarn(OLD tariff)" +54026950,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Polytetrafluoroethylene yarn(OLD tariff)" +54026990,"SYNTHETIC FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT UP FOR RETAIL SALE, INCLUDING SYNTHETIC MONOFILAMENT OF LESS THAN 67 DECITEX:Other :Other(OLD tariff)" +54030000,artificial filament yarn other than sewing thread not put for retail sale including artificial mono filament of less than 67 decitex +54031000,artificial filament yarn other than sewing thread not put for retail sale including artificial mono filament of less than 67 decitex >> high tenacity yarn of viscose rayon +54031010,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:High tenacity yarn of viscose rayon :Viscose rayon tyre yarn 1,233 decitex" +54031020,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:High tenacity yarn of viscose rayon :Viscose rayon tyre yarn 1,833 decitex" +54031090,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:High tenacity yarn of viscose rayon :Other" +54033100,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX::Of viscose rayon, untwisted or with a twist not exceeding 120 turns per metre" +54033200,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX::Of viscose rayon, with a twist exceeding 120turns per metre" +54033300,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX::Of cellulose acetate" +54033900,artificial filament yarn other than sewing thread not put for retail sale including artificial mono filament of less than 67 decitex >> other +54033910,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Cuprammonium rayon" +54033990,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Other" +54034100,artificial filament yarn other than sewing thread not put for retail sale including artificial mono filament of less than 67 decitex >> of viscose rayon +54034110,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Up to 67 decitex" +54034120,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 83 decitex" +54034130,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 111 decitex, bright" +54034140,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 111 decitex, dull" +54034150,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 133 decitex, bright" +54034160,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 133 decitex, dull" +54034170,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 167 decitex, bright" +54034180,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Of 167 decitex, dull" +54034190,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of viscose rayon :Other" +54034200,artificial filament yarn other than sewing thread not put for retail sale including artificial mono filament of less than 67 decitex >> of cellulose acetate +54034210,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of cellulose acetate :Acetate rayon filament yarn, 83 decitex" +54034220,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of cellulose acetate :Acetate rayon filament yarn, 111 decitex" +54034230,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of cellulose acetate :Acetate rayon filament yarn, 133 decitex" +54034240,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of cellulose acetate :Acetate rayon filament yarn, 167 decitex" +54034250,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of cellulose acetate :Acetate rayon filament yarn, 333 decitex" +54034290,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Of cellulose acetate :Other" +54034900,artificial filament yarn other than sewing thread not put for retail sale including artificial mono filament of less than 67 decitex >> other cuprammonium filament yarn +54034911,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Of 33 decitex" +54034912,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Of 44 decitex" +54034913,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Of 67 decitex" +54034914,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Of 83 decitex" +54034915,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Of 89 decitex" +54034919,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Other" +54034990,"ARTIFICIAL FILAMENT YARN (OTHER THAN SEWING THREAD), NOT PUT FOR RETAIL SALE, INCLUDING ARTIFICIAL MONO FILAMENT OF LESS THAN 67 DECITEX:Other :Other" +54040000,synthetic monofilament of 67 decitex or more and of which no cross sectional dimension exceeds 1 mm strip and the like for example artificial of straw of synthetictextile materials an apparent width not exceeding 5 mm monofilament +54041100,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM::Elastomeric" +54041200,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM::Other, of polypropylene" +54041900,synthetic monofilament of 67 decitex or more and of which no cross sectional dimension exceeds 1 mm strip and the like for example artificial of straw of synthetictextile materials an apparent width not exceeding 5 mm monofilament >> other +54041910,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM:Other:Catgut imitation of synthetic yarn, non-sterile" +54041920,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM:Other:Strip and the like of synthetic fibre material" +54041990,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM:Other:Other" +54049000,synthetic monofilament of 67 decitex or more and of which no cross sectional dimension exceeds 1 mm strip and the like for example artificial of straw of synthetictextile materials an apparent width not exceeding 5 mm monofilament >> other +54049010,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM:Other :Catgut imitation of synthetic yarn, non-sterile" +54049020,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM:Other :Strip and the like of synthetic fibre materials" +54049090,"SYNTHETIC MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF SYNTHETIC TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM:Other :Other" +54050000,"::ARTIFICIAL MONOFILAMENT OF 67 DECITEX OR MORE AND OF WHICH NO CROSS-SECTIONAL DIMENSION EXCEEDS 1 MM; STRIP AND THE LIKE (FOR EXAMPLE, ARTIFICIAL STRAW) OF ARTIFICIAL TEXTILE MATERIALS OF AN APPARENT WIDTH NOT EXCEEDING 5 MM" +54060000,filament yarn other than sewing thread put up for retail sale +54060010,"MAN-MADE FILAMENT YARN (OTHER THAN SEWING THREAD), PUT UP FOR RETAIL SALE:Man-made filament yarn (other than sewing thread), put for retail sale:Synthetic filament yarn" +54060020,"MAN-MADE FILAMENT YARN (OTHER THAN SEWING THREAD), PUT UP FOR RETAIL SALE:Man-made filament yarn (other than sewing thread), put for retail sale:Artificial filament yarn" +54071011,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Parachute fabrics(OLD tariff)" +54071012,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Tent fabrics(OLD tariff)" +54071013,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Nylon furnishing fabrics(OLD tariff)" +54071014,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Umbrella cloth panel fabrics(OLD tariff)" +54071015,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other nylon and polyamide fabrics (filament)(OLD tariff)" +54071016,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Polyester suitings(OLD tariff)" +54071019,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other polyester fabrics(OLD tariff)" +54071021,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Parachute fabrics(OLD tariff)" +54071022,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Tent fabrics(OLD tariff)" +54071023,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Nylon furnishing fabrics(OLD tariff)" +54071024,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Umbrella cloth panel fabrics(OLD tariff)" +54071025,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other nylon and polyamide fabrics of filament yarn(OLD tariff)" +54071026,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Polyester suitings(OLD tariff)" +54071029,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other(OLD tariff)" +54071031,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Parachute fabrics(OLD tariff)" +54071032,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Tent fabrics(OLD tariff)" +54071033,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Nylon furnishing fabrics(OLD tariff)" +54071034,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Umbrella cloth panel fabrics(OLD tariff)" +54071035,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other nylon and polyamide fabrics (filament)(OLD tariff)" +54071036,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Polyester suitings(OLD tariff)" +54071039,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other(OLD tariff)" +54071041,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Parachute fabrics(OLD tariff)" +54071042,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Tent fabrics(OLD tariff)" +54071043,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Nylon furnishing fabrics(OLD tariff)" +54071044,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Umbrella cloth panel fabrics(OLD tariff)" +54071045,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other nylon and polyamide fabrics (filament)(OLD tariff)" +54071046,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Polyester suitings(OLD tariff)" +54071049,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other(OLD tariff)" +54071091,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Parachute fabrics(OLD tariff)" +54071092,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Tent fabrics(OLD tariff)" +54071093,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Nylon furnishing fabrics(OLD tariff)" +54071094,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Umbrella cloth panel fabrics(OLD tariff)" +54071095,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other nylon and polyamide fabrics of(OLD tariff)" +54071096,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Polyester suitings(OLD tariff)" +54071099,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from high tenacity yarn of nylon or other polyamides or of polyesters :Other(OLD tariff)" +54072010,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from strip or the like :Unbleached(OLD tariff)" +54072020,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from strip or the like :Bleached(OLD tariff)" +54072030,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from strip or the like :Dyed(OLD tariff)" +54072040,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from strip or the like :Printed(OLD tariff)" +54072090,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Woven fabrics obtained from strip or the like :Other(OLD tariff)" +54073010,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Fabrics specified in Note 9 to Section XI :Unbleached(OLD tariff)" +54073020,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Fabrics specified in Note 9 to Section XI :Bleached(OLD tariff)" +54073030,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Fabrics specified in Note 9 to Section XI :Dyed(OLD tariff)" +54073040,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Fabrics specified in Note 9 to Section XI :Printed(OLD tariff)" +54073090,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Fabrics specified in Note 9 to Section XI :Other(OLD tariff)" +54074111,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon brasso(OLD tariff)" +54074112,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon georgette(OLD tariff)" +54074113,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon tafetta(OLD tariff)" +54074114,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon sarees(OLD tariff)" +54074119,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Other(OLD tariff)" +54074121,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon brasso(OLD tariff)" +54074122,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon georgette(OLD tariff)" +54074123,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon tafetta(OLD tariff)" +54074124,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon sarees(OLD tariff)" +54074129,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Other(OLD tariff)" +54074210,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Nylon brasso(OLD tariff)" +54074220,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Nylon georgette(OLD tariff)" +54074230,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Nylon tafetta(OLD tariff)" +54074240,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Nylon sarees(OLD tariff)" +54074290,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Other(OLD tariff)" +54074300,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Of yarn of different colours(OLD tariff)" +54074410,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Nylon brasso(OLD tariff)" +54074420,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Nylon georgette(OLD tariff)" +54074430,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Nylon tafetta(OLD tariff)" +54074440,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Nylon sarees,(OLD tariff)" +54074490,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Other(OLD tariff)" +54075111,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester shirtings(OLD tariff)" +54075119,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Other(OLD tariff)" +54075121,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester shirtings(OLD tariff)" +54075129,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Other(OLD tariff)" +54075210,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Polyester shirtings(OLD tariff)" +54075220,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Polyester suitings(OLD tariff)" +54075230,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Terylene and dacron sarees(OLD tariff)" +54075240,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Polyester sarees(OLD tariff)" +54075290,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Other(OLD tariff)" +54075300,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Of yarns of different colours(OLD tariff)" +54075410,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Terylene and dacron sarees(OLD tariff)" +54075420,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Polyester shirtings(OLD tariff)" +54075430,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Polyester sarees(OLD tariff)" +54075490,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Other(OLD tariff)" +54076110,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Containing 85% or more by weight of nontextured polyester filaments :Polyester shirtings(OLD tariff)" +54076120,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Containing 85% or more by weight of nontextured polyester filaments :Polyester suitings(OLD tariff)" +54076190,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Containing 85% or more by weight of nontextured polyester filaments :Other(OLD tariff)" +54076900,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Other(OLD tariff)" +54077110,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Unbleached(OLD tariff)" +54077120,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Bleached(OLD tariff)" +54077200,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Dyed(OLD tariff)" +54077300,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Of yarns of different colours(OLD tariff)" +54077400,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Printed(OLD tariff)" +54078111,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon georgette(OLD tariff)" +54078112,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon sarees(OLD tariff)" +54078113,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester shirtings(OLD tariff)" +54078114,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester suitings(OLD tariff)" +54078115,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Terylene and dacron sarees(OLD tariff)" +54078116,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester dhoti(OLD tariff)" +54078119,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Other(OLD tariff)" +54078121,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon georgette(OLD tariff)" +54078122,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Nylon sarees(OLD tariff)" +54078123,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester shirtings(OLD tariff)" +54078124,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester suitings(OLD tariff)" +54078125,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Terylene and dacron sarees(OLD tariff)" +54078126,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Polyester dhoti(OLD tariff)" +54078129,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Other(OLD tariff)" +54078210,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Nylon georgette(OLD tariff)" +54078220,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Nylon sarees(OLD tariff)" +54078230,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Polyester shirtings(OLD tariff)" +54078240,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Polyester suitings(OLD tariff)" +54078250,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Terylene and dacron sarees(OLD tariff)" +54078260,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Lungies(OLD tariff)" +54078290,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Dyed :Other(OLD tariff)" +54078300,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Of yarns of different colours(OLD tariff)" +54078410,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Nylon georgette(OLD tariff)" +54078420,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Nylon sarees(OLD tariff)" +54078430,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Polyester shirtings(OLD tariff)" +54078440,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Polyester suitings(OLD tariff)" +54078450,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Terylene and dacron sarees(OLD tariff)" +54078460,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Lungies(OLD tariff)" +54078470,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Polyester sarees(OLD tariff)" +54078490,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Printed :Other(OLD tariff)" +54079110,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Unbleached(OLD tariff)" +54079120,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,:Unbleached or bleached :Bleached(OLD tariff)" +54079200,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Dyed(OLD tariff)" +54079300,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Of yarns of different colours(OLD tariff)" +54079400,"WOVEN FABRICS OF SYNTHETIC FILAMENT YARN,::Printed(OLD tariff)" +54080000,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 +54081000,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405::Woven fabrics obtained from high tenacity yarn of viscose rayon" +54082100,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 >> unbleached or bleached +54082110,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Unbleached or bleached :Unbleached" +54082120,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Unbleached or bleached :Bleached" +54082200,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 >> dyed fabrics of rayon +54082211,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon crepe fabrics" +54082212,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon jacquards" +54082213,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon brocades" +54082214,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon georgette" +54082215,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon tafetta" +54082216,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon suitings" +54082217,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon shirtings" +54082218,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon sarees" +54082219,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Other" +54082220,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Fabrics of continuous filament, other than rayon" +54082290,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Other" +54082300,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405::Of yarns of different colours" +54082400,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 >> printed of rayon +54082411,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon crepe fabrics" +54082412,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon jacquards" +54082413,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon brocades" +54082414,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon georgette" +54082415,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon tafetta" +54082416,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon suitings" +54082417,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon shirtings" +54082418,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon sarees" +54082419,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Other" +54082490,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Other" +54083100,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 >> unbleached or bleached +54083110,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Unbleached or bleached :Unbleached" +54083120,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Unbleached or bleached :Bleached" +54083200,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 >> dyed fabrics of rayon +54083211,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon brocades" +54083212,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon georgette" +54083213,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon tafetta" +54083214,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon suitings" +54083215,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Rayon shirtings" +54083219,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Other" +54083290,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Dyed :Other" +54083300,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405::Of yarns of different colours" +54083400,woven fabrics of artificial filament yarn including woven fabrics obtained from materials of heading 5405 >> printed fabric of rayon +54083411,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon crepe fabrics" +54083412,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon jacquards" +54083413,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon brocades" +54083414,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon georgette" +54083415,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon tafetta" +54083416,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon suitings" +54083417,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon shirtings" +54083418,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Rayon sarees" +54083419,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Other" +54083420,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Fabrics of continuous filament, other than rayon" +54083490,"WOVEN FABRICS OF ARTIFICIAL FILAMENT YARN, INCLUDING WOVEN FABRICS OBTAINED FROM MATERIALS OF HEADING 5405:Printed :Other" +55010000,synthetic filament tow of nylon or other polyamides +55011000,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW::Of nylon or other polyamides(OLD tariff)" +55011100,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW::Of aramids" +55011900,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW::Other" +55012000,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW::Of polyesters" +55013000,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW::Acrylic or modacrylic" +55014000,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW::Of polypropylene" +55019000,synthetic filament tow of nylon or other polyamides >> other +55019010,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW:Other :Of polypropylene" +55019090,"WOOL, NOT CARDED OR COMBED SYNTHETIC FILAMENT TOW:Other :Other" +55020000,artificial filament tow +55020010,ARTIFICIAL FILAMENT TOW:Artificial filament tow :Acetate rayon tow(OLD tariff) +55020020,ARTIFICIAL FILAMENT TOW:Artificial filament tow :Viscose rayon tow(OLD tariff) +55020090,ARTIFICIAL FILAMENT TOW:Artificial filament tow :Other(OLD tariff) +55021000,artificial filament tow >> of cellulose acetate +55021010,ARTIFICIAL FILAMENT TOW:Of cellulose acetate;:Viscose rayon tow +55021090,ARTIFICIAL FILAMENT TOW:Of cellulose acetate;:Other +55029000,artificial filament tow >> other +55029010,ARTIFICIAL FILAMENT TOW:Other;:Viscose rayon tow +55029090,ARTIFICIAL FILAMENT TOW:Other;:other +55030000,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides +55031100,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING::Of aramids" +55031110,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> of aramids >> aramid flame retardant fibre kg +55031120,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> of aramids >> fibre kg +55031190,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> of aramids >> other kg +55031900,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING::Other" +55031910,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> other >> nylon staple fibre kg +55031920,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> other >> nylon anti static staple fibre kg +55031930,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> other >> nylon 66 fibre conforming to is 13464 kg +55031990,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> other >> other kg +55032000,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING::Of polyesters" +55033000,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING::Acrylic or modacrylic" +55033010,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> acrylic or modacrylic >> kg pre oxidised fibre conforming to is 17308 +55033090,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> acrylic or modacrylic >> kg other +55034000,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING::Of polypropylene" +55039000,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> other +55039010,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Other :Polyvinyl staple fibre" +55039020,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Other :Polyvinyl chloride staple fibre" +55039030,synthetic staple fibres not carded combed or otherwise processed for spinning of nylon or other polyamides >> other >> ultra high molecular weight poly ethylene kg staple fibre conforming to astm f2848 +55039090,"SYNTHETIC STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Other :Other" +55040000,artificial staple fibres not carded combed or otherwise processed for spinning +55041000,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Of viscose rayon:Of viscose rayon" +55041010,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Of viscose rayon:Obtained from wood other than bamboo(OLD tariff)" +55041011,artificial staple fibres not carded combed or otherwise processed for spinning >> of viscose rayon obtained from wood other than bamboo >> flame retaradant viscose rayon fibre kg +55041019,artificial staple fibres not carded combed or otherwise processed for spinning >> of viscose rayon obtained from wood other than bamboo >> other kg obtained from bamboo +55041020,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Of viscose rayon:Obtained from bamboo(OLD tariff)" +55041021,artificial staple fibres not carded combed or otherwise processed for spinning >> of viscose rayon obtained from wood other than bamboo >> flame retaradant viscose rayon fibre kg +55041029,artificial staple fibres not carded combed or otherwise processed for spinning >> of viscose rayon obtained from wood other than bamboo >> other kg +55041090,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Of viscose rayon:Other" +55049000,artificial staple fibres not carded combed or otherwise processed for spinning >> other +55049010,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Other :Acetate rayon staple fibre" +55049020,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Other :Polynosic staple fibre" +55049030,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Other :High wet modulus staple fibre" +55049090,"ARTIFICIAL STAPLE FIBRES, NOT CARDED, COMBED OR OTHERWISE PROCESSED FOR:Other :Other" +55050000,waste including noils yarn waste and garnetted stock of fibres +55051000,waste including noils yarn waste and garnetted stock of fibres >> of synthetic fibers +55051010,"WASTE (INCLUDING NOILS, YARN WASTE AND GARNETTED STOCK) OF MAN-MADE FIBRES:Of synthetic fibers :Of acrylic" +55051090,"WASTE (INCLUDING NOILS, YARN WASTE AND GARNETTED STOCK) OF MAN-MADE FIBRES:Of synthetic fibers :Other" +55052000,"WASTE (INCLUDING NOILS, YARN WASTE AND GARNETTED STOCK) OF MAN-MADE FIBRES::Of artificial fibres" +55060000,synthetic staple fibres carded combed or otherwise processed for spinning +55061000,"SYNTHETIC STAPLE FIBRES, CARDED COMBED OR OTHERWISE PROCESSED FOR SPINNING::Of nylon or other polyamides" +55062000,"SYNTHETIC STAPLE FIBRES, CARDED COMBED OR OTHERWISE PROCESSED FOR SPINNING::Of polyesters" +55063000,"SYNTHETIC STAPLE FIBRES, CARDED COMBED OR OTHERWISE PROCESSED FOR SPINNING::Acrylic or modacrylic" +55064000,"SYNTHETIC STAPLE FIBRES, CARDED COMBED OR OTHERWISE PROCESSED FOR SPINNING::Of polypropylene" +55069000,synthetic staple fibres carded combed or otherwise processed for spinning >> other +55069010,"SYNTHETIC STAPLE FIBRES, CARDED COMBED OR OTHERWISE PROCESSED FOR SPINNING:Other :Polypropylene tops" +55069090,"SYNTHETIC STAPLE FIBRES, CARDED COMBED OR OTHERWISE PROCESSED FOR SPINNING:Other :Other" +55070000,artificial staple fibres carded combed or otherwise processed for spinning artificial staple fibres carded combed or otherwise processedfor spinning +55070010,"ARTIFICIAL STAPLE FIBRES, CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Artificial staple fibres, carded, combed or otherwise processed for spinning :Acetate rayon tops" +55070020,"ARTIFICIAL STAPLE FIBRES, CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Artificial staple fibres, carded, combed or otherwise processed for spinning :Viscose tops" +55070030,"ARTIFICIAL STAPLE FIBRES, CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Artificial staple fibres, carded, combed or otherwise processed for spinning :Polynosic tops" +55070040,"ARTIFICIAL STAPLE FIBRES, CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Artificial staple fibres, carded, combed or otherwise processed for spinning :High wet modulus tops" +55070090,"ARTIFICIAL STAPLE FIBRES, CARDED, COMBED OR OTHERWISE PROCESSED FOR SPINNING:Artificial staple fibres, carded, combed or otherwise processed for spinning :Other" +55080000,sewing thread of staple fibres whether or not put up for retail sale +55081000,"SEWING THREAD OF MAN-MADE STAPLE FIBRES, WHETHER OR NOT PUT UP FOR RETAIL SALE::Of synthetic staple fibres" +55082000,"SEWING THREAD OF MAN-MADE STAPLE FIBRES, WHETHER OR NOT PUT UP FOR RETAIL SALE::Of artificial staple fibres" +55090000,yarn other than sewing thread of synthetic staple fibres not put up for retail sale containing 85 or more by weight of staple fibres of nylon or other polyamides +55091100,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Single yarn" +55091200,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Multiple (folded) or cabled yarn" +55092100,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Single yarn" +55092200,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Multiple (folded) or cabled yarn" +55093100,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Single yarn" +55093200,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Multiple (folded) or cabled yarn" +55094100,yarn other than sewing thread of synthetic staple fibres not put up for retail sale containing 85 or more by weight of staple fibres of nylon or other polyamides >> single yarn +55094110,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Polypropylene spun yarn" +55094120,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Polyvinyl acetate spun yarn" +55094130,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Polyvinyl chloride spun yarn" +55094190,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Other" +55094200,yarn other than sewing thread of synthetic staple fibres not put up for retail sale containing 85 or more by weight of staple fibres of nylon or other polyamides >> multiple folded or cabled yarn +55094210,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Polypropylene spun yarn" +55094220,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Polyvinyl acetate (PVA) spun yarn" +55094230,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Polyvinyl chloride (PVC) spun yarn" +55094290,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Other" +55095100,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with artificial staple fibres" +55095200,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with wool or fine animal hair" +55095300,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with cotton" +55095900,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Other" +55096100,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with wool or fine animal hair" +55096200,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with cotton" +55096900,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Other" +55099100,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with wool or fine animal hair" +55099200,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Mixed mainly or solely with cotton" +55099900,"YARN (OTHER THAN SEWING THREAD) OF SYNTHETIC STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE::Other" +55099910,yarn other than sewing thread of synthetic staple fibres not put up for retail sale containing 85 or more by weight of staple fibres of nylon or other polyamides >> other >> yarn made of 100 inherent fr synthetic fibre +55099990,yarn other than sewing thread of synthetic staple fibres not put up for retail sale containing 85 or more by weight of staple fibres of nylon or other polyamides >> other >> other +55100000,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres +55101100,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres >> single yarn +55101110,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Viscose rayon spun yarn" +55101120,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Acetate rayon spun yarn" +55101190,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Single yarn :Other" +55101200,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres >> multiple folded or cabled yarn +55101210,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Viscose rayon spun yarn" +55101220,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Acetate rayon spun yarn" +55101290,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Multiple (folded) or cabled yarn :Other" +55102000,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres >> other yarn mixed mainly or solely with wool or fine animal hair +55102010,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn, mixed mainly or solely with wool or fine animal hair :Viscose rayon spun yarn" +55102020,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn, mixed mainly or solely with wool or fine animal hair :Acetate rayon spun yarn" +55102090,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn, mixed mainly or solely with wool or fine animal hair :Other" +55103000,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres >> other yarn mixed mainly or solely with cotton +55103010,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn, mixed mainly or solely with cotton :Viscose rayon spun yarn" +55103020,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn, mixed mainly or solely with cotton :Acetate rayon spun yarn" +55103090,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn, mixed mainly or solely with cotton :Other" +55109000,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres >> other yarn +55109010,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn :Viscose rayon spun yarn" +55109020,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn :Acetate rayon spun yarn" +55109030,yarn other than sewing of artificial staple thread fibres not put up for retail sale containing 85 or more by weight of artificial staple fibres >> other yarn >> yarn made of 100 inherent fr artificial fibre +55109090,"YARN (OTHER THAN SEWING THREAD) OF ARTIFICIAL STAPLE FIBRES, NOT PUT UP FOR RETAIL SALE:Other yarn :Other" +55110000,yarn other than sewing staple thread of fibres put up for retail sale +55111000,"YARN (OTHER THAN SEWING THREAD) OF MAN-MADE STAPLE FIBRES, PUT UP FOR RETAIL SALE::Of synthetic staple fibres, containing 85% or more by weight of such fibres" +55112000,"YARN (OTHER THAN SEWING THREAD) OF MAN-MADE STAPLE FIBRES, PUT UP FOR RETAIL SALE::Of synthetic staple fibres, containing less than 85% by weight of such fibres" +55113000,yarn other than sewing staple thread of fibres put up for retail sale >> of artificial staple fibres +55113010,"YARN (OTHER THAN SEWING THREAD) OF MAN-MADE STAPLE FIBRES, PUT UP FOR RETAIL SALE:Of artificial staple fibres :Containing more than 85% by weight of staple fibre whichever is higher" +55113090,"YARN (OTHER THAN SEWING THREAD) OF MAN-MADE STAPLE FIBRES, PUT UP FOR RETAIL SALE:Of artificial staple fibres :Other" +55120000,staple fibres containing 85 or more by weight of +55121100,staple fibres containing 85 or more by weight of >> unbleached or bleached +55121110,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Unbleached or bleached :Unbleached" +55121120,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Unbleached or bleached :Bleached" +55121900,staple fibres containing 85 or more by weight of >> other +55121910,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Dyed" +55121920,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Printed" +55121990,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Other" +55122100,staple fibres containing 85 or more by weight of >> unbleached or bleached +55122110,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Unbleached or bleached :Unbleached" +55122120,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Unbleached or bleached :Bleached" +55122900,staple fibres containing 85 or more by weight of >> other +55122910,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Dyed" +55122920,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Printed" +55122990,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Other" +55129100,staple fibres containing 85 or more by weight of >> unbleached or bleached +55129110,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Unbleached or bleached :Unbleached" +55129120,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Unbleached or bleached :Bleached" +55129900,staple fibres containing 85 or more by weight of >> other +55129910,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Dyed" +55129920,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Printed" +55129990,"Woven fabrics of synthetic staple fibres, CONTAINING 85% OR MORE BY WEIGHT OF SYNTHETIC STAPLE FIBRES:Other :Other" +55130000,woven fabrics of synthetic staple fibres containing less +55131100,woven fabrics of synthetic staple fibres containing less >> of polyester staple fibres plain weave +55131110,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:Of polyester staple fibres, plain weave :Unbleached" +55131120,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:Of polyester staple fibres, plain weave :Bleached" +55131200,woven fabrics of synthetic staple fibres containing less >> or twill including cross twill of polyester staple fibres +55131210,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:3-thread or 4-thread twill, including cross twill, of polyester staple fibres :Unbleached" +55131220,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:3-thread or 4-thread twill, including cross twill, of polyester staple fibres :Bleached" +55131300,woven fabrics of synthetic staple fibres containing less >> other woven fabrics of polyester staple fibres +55131310,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:Other woven fabrics of polyester staple fibres :Unbleached" +55131320,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:Other woven fabrics of polyester staple fibres :Bleached" +55131900,woven fabrics of synthetic staple fibres containing less >> other woven fabrics +55131910,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:Other woven fabrics :Unbleached" +55131920,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2:Other woven fabrics :Bleached" +55132100,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Of polyester staple fibres, plain weave" +55132300,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Other woven fabrics of polyester staple fibres" +55132900,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Other woven fabrics" +55133100,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Of polyester staple fibres, plain weave" +55133900,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Other woven fabrics" +55134100,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Of polyester staple fibres, plain weave" +55134900,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT NOT EXCEEDING 170 g/m2::Other woven fabrics" +55140000,woven fabrics of synthetic staple fibres containing less than 85 by weight of such fibres mixed mainly or solely with cotton of a weight exceeding 170 unbleached or bleached +55141100,woven fabrics of synthetic staple fibres containing less than 85 by weight of such fibres mixed mainly or solely with cotton of a weight exceeding 170 unbleached or bleached >> of polyester staple fibres plain weave +55141110,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Of polyester staple fibres, plain weave :Unbleached" +55141120,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Of polyester staple fibres, plain weave :Bleached" +55141200,woven fabrics of synthetic staple fibres containing less than 85 by weight of such fibres mixed mainly or solely with cotton of a weight exceeding 170 unbleached or bleached >> or twill including cross twill of polyester staple fibres +55141210,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:3-thread or 4-thread twill, including cross twill, of polyester staple fibres :Unbleached" +55141220,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:3-thread or 4-thread twill, including cross twill, of polyester staple fibres :Bleached" +55141900,woven fabrics of synthetic staple fibres containing less than 85 by weight of such fibres mixed mainly or solely with cotton of a weight exceeding 170 unbleached or bleached >> other +55141910,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Other :Unbleached" +55141920,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Other :Bleached" +55142100,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::Of polyester staple fibres, plain weave" +55142200,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::3-thread or 4-thread twill, including cross twill, of polyester staple fibres" +55142300,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::Other woven fabrics of polyester staple fibres" +55142900,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::Other woven fabrics" +55143000,woven fabrics of synthetic staple fibres containing less than 85 by weight of such fibres mixed mainly or solely with cotton of a weight exceeding 170 unbleached or bleached >> of yarns of different colours +55143011,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Of yarns of different colours :Of polyester staple fibres, plain weave" +55143012,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Of yarns of different colours :3-thread or 4-thread twill, including cross twill, of polyester staple fibres metre," +55143013,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Of yarns of different colours :Other woven fabrics of polyester staple fibres" +55143019,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2:Of yarns of different colours :Other woven fabrics" +55144100,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::Of polyester staple fibres, plain weave" +55144200,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::3-thread or 4-thread twill, including cross twill, of polyester staple fibres" +55144300,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::Other woven fabrics of polyester staple fibres" +55144900,"WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES, CONTAINING LESS THAN 85% BY WEIGHT OF SUCH FIBRES, MIXED MAINLY OR SOLELY WITH COTTON, OF A WEIGHT EXCEEDING 170 G/M2 2::Other woven fabrics" +55150000,other woven fabrics of synthetic staple fibres of polyester staple fibres +55151100,other woven fabrics of synthetic staple fibres of polyester staple fibres >> mixed mainly or solely with viscos rayon staple fibres +55151110,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with viscos rayon staple fibres :Unbleached +55151120,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with viscos rayon staple fibres :Bleached +55151130,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with viscos rayon staple fibres :Dyed +55151140,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with viscos rayon staple fibres :Printed +55151190,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with viscos rayon staple fibres :Other +55151200,other woven fabrics of synthetic staple fibres of polyester staple fibres >> mixed mainly or solely with filaments +55151210,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Unbleached +55151220,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Bleached +55151230,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Dyed +55151240,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Printed +55151290,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Other +55151300,other woven fabrics of synthetic staple fibres of polyester staple fibres >> mixed mainly or solely with wool or fine animal hair +55151310,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Unbleached +55151320,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Bleached +55151330,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Dyed +55151340,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Printed +55151390,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Other +55151900,other woven fabrics of synthetic staple fibres of polyester staple fibres >> other +55151910,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other:Unbleached +55151920,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other:Bleached +55151930,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other:Dyed +55151940,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other:Printed +55151990,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other:Other +55152100,other woven fabrics of synthetic staple fibres of polyester staple fibres >> mixed mainly or solely with filaments +55152110,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Unbleached +55152120,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Bleached +55152130,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Dyed +55152140,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Printed +55152190,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Other +55152200,other woven fabrics of synthetic staple fibres of polyester staple fibres >> mixed mainly or solely with wool or fine animal hair +55152210,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Unbleached +55152220,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Bleached +55152230,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Dyed +55152240,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Printed +55152290,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with wool or fine animal hair :Other +55152900,other woven fabrics of synthetic staple fibres of polyester staple fibres >> other +55152910,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Unbleached +55152920,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Bleached +55152930,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Dyed +55152940,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Printed +55152990,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Other +55159100,other woven fabrics of synthetic staple fibres of polyester staple fibres >> mixed mainly or solely with filaments +55159110,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Unbleached +55159120,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Bleached +55159130,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Dyed +55159140,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Printed +55159190,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Mixed mainly or solely with man-made filaments :Other +55159900,other woven fabrics of synthetic staple fibres of polyester staple fibres >> other +55159910,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Unbleached +55159920,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Bleached +55159930,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Dyed +55159940,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Printed +55159950,other woven fabrics of synthetic staple fibres of polyester staple fibres >> other >> fabrics made of 100 inherent fr synthetic fibre +55159990,OTHER WOVEN FABRICS OF SYNTHETIC STAPLE FIBRES:Other :Other +55160000,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres +55161100,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres >> unbleached or bleached +55161110,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Unbleached +55161120,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Bleached +55161200,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Dyed +55161300,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Of yarns of different colours +55161400,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres >> printed +55161410,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Printed :Spun rayon printed shantung +55161420,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Printed :Spun rayon printed linen +55161490,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Printed :Other +55162100,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres >> +55162110,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Unbleached +55162120,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Bleached +55162200,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Dyed +55162300,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Of yarns of different colours +55162400,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Printed +55163100,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres >> +55163110,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Unbleached +55163120,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Bleached +55163200,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Dyed +55163300,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Of yarns of different colours +55163400,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Printed +55164100,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres >> unbleached or bleached +55164110,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Unbleached +55164120,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Bleached +55164200,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Dyed +55164300,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Of yarns of different colours +55164400,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Printed +55169100,woven fabrics of artificial staple fibres containing 85 or more by weight of artificial staple fibres >> unbleached or bleached +55169110,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Unbleached +55169120,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES:Unbleached or bleached :Bleached +55169200,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Dyed +55169300,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Of yarns of different colours +55169400,WOVEN FABRICS OF ARTIFICIAL STAPLE FIBRES::Printed +56010000,wadding of textile materials and articles thereof textile fibres not exceeding 5 mm in length flock textile dust and mill neps wadding of textile materials and articles thereof +56012100,wadding of textile materials and articles thereof textile fibres not exceeding 5 mm in length flock textile dust and mill neps wadding of textile materials and articles thereof >> of cotton +56012110,"WADDING OF TEXTILE MATERIALS AND ARTICLES THEREOF; TEXTILE FIBRES, NOT EXCEEDING 5 MM IN LENGTH (FLOCK), TEXTILE DUST AND MILL NEPS:Of cotton :Absorbent cotton wool" +56012190,"WADDING OF TEXTILE MATERIALS AND ARTICLES THEREOF; TEXTILE FIBRES, NOT EXCEEDING 5 MM IN LENGTH (FLOCK), TEXTILE DUST AND MILL NEPS:Of cotton :Other" +56012200,"WADDING OF TEXTILE MATERIALS AND ARTICLES THEREOF; TEXTILE FIBRES, NOT EXCEEDING 5 MM IN LENGTH (FLOCK), TEXTILE DUST AND MILL NEPS::Of man-made fibres" +56012900,"WADDING OF TEXTILE MATERIALS AND ARTICLES THEREOF; TEXTILE FIBRES, NOT EXCEEDING 5 MM IN LENGTH (FLOCK), TEXTILE DUST AND MILL NEPS::Other" +56013000,"WADDING OF TEXTILE MATERIALS AND ARTICLES THEREOF; TEXTILE FIBRES, NOT EXCEEDING 5 MM IN LENGTH (FLOCK), TEXTILE DUST AND MILL NEPS::Textile flock and dust and mill neps" +56020000,felt whether or not impregnated coated covered or laminated +56021000,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Needleloom felt and stitch-bonded fibre fabrics" +56022100,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Of wool or fine animal hair" +56022900,felt whether or not impregnated coated covered or laminated >> of other textile materials +56022910,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED:Of other textile materials :For machines other than cotton machinery" +56022920,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED:Of other textile materials :Of jute (including blended or union jute), other than for machinery" +56022990,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED:Of other textile materials :Other" +56029000,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED:other:Other" +56029010,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED:other:of rubberised coir, needled felt" +56029090,"FELT, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED:other:Other" +56030000,nonwovens whether or not impregnated coated covered or laminated of filaments +56031100,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing not more than 25 g/m2" +56031110,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing not more than 25 >> crop covers conforming to is 16718 +56031190,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing not more than 25 >> other +56031200,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing more than 25 g/m2 but not more than 70 g/m2" +56031300,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing more than 70 g/m2 but not more than 150 g/m2" +56031400,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing more than 150 g/m2" +56039100,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing not more than 25 g/m2" +56039200,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing more than 25 g/m2 but not more than 70 g/m2" +56039300,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing more than 70 g/m2 but not more than 150 g/m2" +56039310,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing more than 70 but not more than 150 >> mulch mats conforming to is 17355 +56039390,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing more than 70 but not more than 150 >> other +56039400,"NONWOVENS, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED::Weighing more than 150 g/m2" +56039410,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing more than 150 >> geotextile and articles thereof conforming to is 16391 is 16392 +56039420,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing more than 150 >> mulch mats conforming to is 17355 +56039490,nonwovens whether or not impregnated coated covered or laminated of filaments >> weighing more than 150 >> other +56040000,rubber thread and cord textile covered textile yarn and strip and the like of heading 5404 or 5405 impregnated coated covered or sheathed with rubber or plastics +56041000,"RUBBER THREAD AND CORD, TEXTILE COVERED; TEXTILE YARN, AND STRIP AND THE LIKE OF HEADING 5404 OR 5405, IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS::Rubber thread and cord, textile covered" +56049000,"RUBBER THREAD AND CORD, TEXTILE COVERED; TEXTILE YARN, AND STRIP AND THE LIKE OF HEADING 5404 OR 5405, IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS::Other" +56050000,metallised yarn whether or not gimped being textile yarn or strip or the like of heading 5404 or 5405 combined withmetal in the form of thread strip or powder or covered with metal metallised yarn whether or not gimped being textile yarn or strip or the like of heading 5404 or 5405 combined with metal in the form of thread strip or powder or covered with metal +56050010,"METALLISED YARN, WHETHER OR NOT GIMPED, BEING TEXTILE YARN, OR STRIP OR THE LIKE OF HEADING 5404 OR 5405, COMBINED WITH METAL IN THE FORM OF THREAD, STRIP OR POWDER OR COVERED WITH METAL:Metallised yarn, whether or not gimped, being textile yarn, or strip or the like of heading 5404 or 5405, combined with metal in the form of thread, strip or powder or covered with metal :Real zari thread (gold) and silver thread combined with textile thread" +56050020,"METALLISED YARN, WHETHER OR NOT GIMPED, BEING TEXTILE YARN, OR STRIP OR THE LIKE OF HEADING 5404 OR 5405, COMBINED WITH METAL IN THE FORM OF THREAD, STRIP OR POWDER OR COVERED WITH METAL:Metallised yarn, whether or not gimped, being textile yarn, or strip or the like of heading 5404 or 5405, combined with metal in the form of thread, strip or powder or covered with metal :Imitation zari thread" +56050090,"METALLISED YARN, WHETHER OR NOT GIMPED, BEING TEXTILE YARN, OR STRIP OR THE LIKE OF HEADING 5404 OR 5405, COMBINED WITH METAL IN THE FORM OF THREAD, STRIP OR POWDER OR COVERED WITH METAL:Metallised yarn, whether or not gimped, being textile yarn, or strip or the like of heading 5404 or 5405, combined with metal in the form of thread, strip or powder or covered with metal :Other" +56060000,the like of strip and gimped yarn and heading 5404 or 5405 gimped other than those of heading 5605 and gimped horsehair chenille yarn flock yarn including chenille yarn loop gimped yarn and strip and the like of heading 5404 or 5405 gimped other than those of +56060010,"GIMPED YARN, AND STRIP AND THE LIKE OF HEADING 5404 OR 5405, GIMPED (OTHER THAN THOSE OF HEADING 5605 AND GIMPED HORSEHAIR YARN); CHENILLE YARN (INCLUDING FLOCK CHENILLE YARN); LOOP WALE-YARN:Gimped yarn, and strip and the like of heading 5404 or 5405, gimped (other than those of heading 5605 and gimped horsehair yarn); chenille yarn (including flock chenille yarn); loop wale-yarn :Trimmings, of cotton" +56060020,"GIMPED YARN, AND STRIP AND THE LIKE OF HEADING 5404 OR 5405, GIMPED (OTHER THAN THOSE OF HEADING 5605 AND GIMPED HORSEHAIR YARN); CHENILLE YARN (INCLUDING FLOCK CHENILLE YARN); LOOP WALE-YARN:Gimped yarn, and strip and the like of heading 5404 or 5405, gimped (other than those of heading 5605 and gimped horsehair yarn); chenille yarn (including flock chenille yarn); loop wale-yarn :Trimmings, of man-made fibres" +56060030,"GIMPED YARN, AND STRIP AND THE LIKE OF HEADING 5404 OR 5405, GIMPED (OTHER THAN THOSE OF HEADING 5605 AND GIMPED HORSEHAIR YARN); CHENILLE YARN (INCLUDING FLOCK CHENILLE YARN); LOOP WALE-YARN:Gimped yarn, and strip and the like of heading 5404 or 5405, gimped (other than those of heading 5605 and gimped horsehair yarn); chenille yarn (including flock chenille yarn); loop wale-yarn :Trimmings, of zari" +56060090,"GIMPED YARN, AND STRIP AND THE LIKE OF HEADING 5404 OR 5405, GIMPED (OTHER THAN THOSE OF HEADING 5605 AND GIMPED HORSEHAIR YARN); CHENILLE YARN (INCLUDING FLOCK CHENILLE YARN); LOOP WALE-YARN:Gimped yarn, and strip and the like of heading 5404 or 5405, gimped (other than those of heading 5605 and gimped horsehair yarn); chenille yarn (including flock chenille yarn); loop wale-yarn :Other" +56070000,twine cordage ropes and cables whether or not plaited or braided and whether or not impregnated coated covered or sheathed with rubber or plastics of sisal or other textile fibres of the genus agave +56072100,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS::Binder or baler twine" +56072900,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS::Other" +56074100,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS::Binder or baler twine" +56074900,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS::Other" +56075000,twine cordage ropes and cables whether or not plaited or braided and whether or not impregnated coated covered or sheathed with rubber or plastics of sisal or other textile fibres of the genus agave >> of other synthetic fibres +56075010,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Of other synthetic fibres :Nylon fish net twine" +56075020,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Of other synthetic fibres :Nylon tyre cord" +56075030,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Of other synthetic fibres :Viscose tyre cord" +56075040,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Of other synthetic fibres :Nylon rope" +56075090,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Of other synthetic fibres :Other" +56079000,twine cordage ropes and cables whether or not plaited or braided and whether or not impregnated coated covered or sheathed with rubber or plastics of sisal or other textile fibres of the genus agave >> other +56079010,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Other :Coir, cordage and ropes, other than of cotton" +56079020,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Other :Cordage, cable, ropes and twine, of cotton" +56079090,"TWINE, CORDAGE, ROPES AND CABLES, WHETHER OR NOT PLAITED OR BRAIDED AND WHETHER OR NOT IMPREGNATED, COATED, COVERED OR SHEATHED WITH RUBBER OR PLASTICS:Other :Other" +56080000,knotted netting of twine cordage or rope made up fishing nets and other made up nets of textile materials of textile materials +56081100,knotted netting of twine cordage or rope made up fishing nets and other made up nets of textile materials of textile materials >> made up fishing nets +56081110,"KNOTTED NETTING OF TWINE, CORDAGE OR ROPE; MADE UP FISHING NETS AND OTHER MADE UP NETS, OF TEXTILE MATERIALS:Made up fishing nets :Made up fishing nets of nylon" +56081190,"KNOTTED NETTING OF TWINE, CORDAGE OR ROPE; MADE UP FISHING NETS AND OTHER MADE UP NETS, OF TEXTILE MATERIALS:Made up fishing nets :Other" +56081900,"KNOTTED NETTING OF TWINE, CORDAGE OR ROPE; MADE UP FISHING NETS AND OTHER MADE UP NETS, OF TEXTILE MATERIALS::Other" +56089000,knotted netting of twine cordage or rope made up fishing nets and other made up nets of textile materials of textile materials >> other +56089010,"KNOTTED NETTING OF TWINE, CORDAGE OR ROPE; MADE UP FISHING NETS AND OTHER MADE UP NETS, OF TEXTILE MATERIALS:Other :Of cotton" +56089020,"KNOTTED NETTING OF TWINE, CORDAGE OR ROPE; MADE UP FISHING NETS AND OTHER MADE UP NETS, OF TEXTILE MATERIALS:Other :Of jute" +56089090,"KNOTTED NETTING OF TWINE, CORDAGE OR ROPE; MADE UP FISHING NETS AND OTHER MADE UP NETS, OF TEXTILE MATERIALS:Other :Other" +56090000,articles of yarn strip or the like of heading 5404 or 5405 twine cordage rope or cables not elsewhere specified or included articles of yarn strip or the like of heading 5404 or 5405 twine cordage rope or cables not elsewhere specified or included +56090010,"ARTICLES OF YARN, STRIP OR THE LIKE OF HEADING 5404 OR 5405, TWINE, CORDAGE, ROPE OR CABLES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Articles of yarn, strip or the like of heading 5404 or 5405, twine, cordage, rope or cables, not elsewhere specified or included:Products of coir" +56090020,"ARTICLES OF YARN, STRIP OR THE LIKE OF HEADING 5404 OR 5405, TWINE, CORDAGE, ROPE OR CABLES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Articles of yarn, strip or the like of heading 5404 or 5405, twine, cordage, rope or cables, not elsewhere specified or included:Articles made up of cotton yarn" +56090030,"ARTICLES OF YARN, STRIP OR THE LIKE OF HEADING 5404 OR 5405, TWINE, CORDAGE, ROPE OR CABLES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Articles of yarn, strip or the like of heading 5404 or 5405, twine, cordage, rope or cables, not elsewhere specified or included:Articles made up of jute" +56090090,"ARTICLES OF YARN, STRIP OR THE LIKE OF HEADING 5404 OR 5405, TWINE, CORDAGE, ROPE OR CABLES, NOT ELSEWHERE SPECIFIED OR INCLUDED:Articles of yarn, strip or the like of heading 5404 or 5405, twine, cordage, rope or cables, not elsewhere specified or included:Other" +57010000,carpets and other textile floor coverings knotted whether or not made up +57011000,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of wool or fine animal hair:Of wool or fine animal hair" +57011010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of wool or fine animal hair:Hand-made" +57011090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of wool or fine animal hair:Other" +57019000,carpets and other textile floor coverings knotted whether or not made up >> of other textile materials of cotton +57019010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:Of cotton(OLD tariff)" +57019011,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:Hand-made" +57019019,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:Other" +57019020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:OF COIR INCLUDING GEO TEXTILE" +57019031,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:Hand-made" +57019039,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:Other" +57019090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, KNOTTED, WHETHER OR NOT MADE UP:Of other textile materials;:Other" +57020000,and other carpets coverings woven not tufted whether and similar rugs +57021000,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs::""Kelem"", ""Schumacks"", ""Karamanie"" and similar hand-woven rugs" +57022000,and other carpets coverings woven not tufted whether and similar rugs >> floor coverings of coconut fibres coir +57022010,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Floor coverings of coconut fibres (coir):Coir matting, woven" +57022020,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Floor coverings of coconut fibres (coir):Coir carpets and other rugs" +57022090,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Floor coverings of coconut fibres (coir):Other" +57023100,and other carpets coverings woven not tufted whether and similar rugs >> of wool or fine animal hair +57023110,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Carpets" +57023120,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Druggets" +57023130,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Mats and matting" +57023140,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Carpeting , floor rugs and the like" +57023190,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Other" +57023200,and other carpets coverings woven not tufted whether and similar rugs >> of textile material +57023210,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile material :Carpets, carpeting and rugs and the like" +57023220,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile material :Mats and matting" +57023290,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile material :Other" +57023900,and other carpets coverings woven not tufted whether and similar rugs >> of other textile materials +57023910,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials:Carpets and other floor coverings other than durries of cotton" +57023920,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials:Carpets and other floor coverings, of silk" +57023990,and other carpets coverings woven not tufted whether and similar rugs >> of other textile materials >> other other of pile construction made up +57024100,and other carpets coverings woven not tufted whether and similar rugs >> of wool or fine animal hair +57024110,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Carpets" +57024120,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Druggets" +57024130,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Mats and matting" +57024190,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Other" +57024200,and other carpets coverings woven not tufted whether and similar rugs >> of man made textile materials +57024210,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials:Carpets, carpeting and rugs" +57024220,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials:Mats and mattings" +57024230,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials:Carpets, rugs and mats of handloom" +57024290,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials:Other" +57024900,and other carpets coverings woven not tufted whether and similar rugs >> of other textile materials +57024910,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials :Carpets and other floor coverings other than durries of cotton" +57024920,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials :Carpets and other floor coverings, of silk" +57024990,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials :Other" +57025000,and other carpets coverings woven not tufted whether and similar rugs >> other not of pile construction not made up of textile materials +57025021,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Carpets, Carpeting and rugs" +57025022,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Mats and matting" +57025029,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Of other textile materials;" +57025031,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Carpets and other floor coverings, of cotton other than durries" +57025032,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Carpets and other floor coverings, of silk" +57025033,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Place mat and other similar goods" +57025039,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials;:Other" +57029100,and other carpets coverings woven not tufted whether and similar rugs >> of wool or fine animal hair +57029110,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Carpets" +57029120,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Druggets" +57029130,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Mats and matting" +57029190,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of wool or fine animal hair :Other" +57029210,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials :Carpets, carpeting and rugs(OLD tariff)" +57029220,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials :Mats and mattings(OLD tariff)" +57029290,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of man-made textile materials :Other(OLD tariff)" +57029900,and other carpets coverings woven not tufted whether and similar rugs >> of other textile materials +57029910,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials :Carpets and other floor coverings of cotton," +57029920,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials :Carpets and other floor coverings, of silk(OLD tariff)" +57029990,"Carpets and other textile floor coverings, woven, not tufted or flocked, whether or not made-up, including ?kelem?, ?Schumacks?, ?Karamanie? and similar hand-woven rugs:Of other textile materials :Other" +57030000, +57031000, >> of wool or +57031010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of wool or fine animal hair :Carpets" +57031020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of wool or fine animal hair :Mats and matting" +57031090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of wool or fine animal hair :Other" +57032010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of nylon or other polyamides :Carpets, carpeting and rugs(OLD tariff)" +57032020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of nylon or other polyamides :100% polyamide tufted velour, or cut pile loop pile carpet mats with jute, rubber latex or PU foam backing(OLD tariff)" +57032090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of nylon or other polyamides :Other(OLD tariff)" +57032100,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP::Turf" +57032900, >> +57032910,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Other:Carpets, carpeting and rug" +57032920,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Other:100% polyamide tufted velour, cut pile or loop pile carpet mats with jute, rubber latex or PU foam backing" +57032990,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Other:Other" +57033010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of other man-made textile materials:Carpets, carpeting and rugs(OLD tariff)" +57033020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of other man-made textile materials:100% polypropylene carpet mats with jute, rubber, latex or PU foam backing(OLD tariff)" +57033090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of other man-made textile materials:Other(OLD tariff)" +57033100,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP::Turf" +57033900, >> +57033910,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Other:Carpets, carpeting and rugs" +57033920,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Other:100%polypropylene carpet mats with jute, rubber, latex or PU foam backing" +57033990,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Other:Other" +57039000, >> of other +57039010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of other textile materials:Carpets and other floor coverings, of cotton, other than durries" +57039020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of other textile materials:CARPETS AND FLOOR COVERINGS OF COIR" +57039090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, TUFTED, WHETHER OR NOT MADE UP:Of other textile materials:Other" +57040000, +57041000,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP::Tiles, having a maximum surface area of 0.3 m2" +57042000, >> tiles having a maximum surface area exceeding m2 but not exceeding +57042010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP:Tiles, having a maximum surface area exceeding 0.3 m? but not exceeding 1 m?:Cotton" +57042020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP:Tiles, having a maximum surface area exceeding 0.3 m? but not exceeding 1 m?:woollen, other than artware" +57042090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP:Tiles, having a maximum surface area exceeding 0.3 m? but not exceeding 1 m?:Other" +57049000, >> other +57049010,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP:Other :Cotton" +57049020,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP:Other :woollen, other than artware" +57049090,"CARPETS AND OTHER TEXTILE FLOOR COVERINGS, OF FELT, NOT TUFTED OR FLOCKED, WHETHER OR NOT MADE UP:Other :Other" +57050000,other carpets and other textile floor coverings whether or not made up other carpets and other textile floor coverings whether or not made up carpets +57050011,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Of silk" +57050019,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Other" +57050021,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Durries cotton" +57050022,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Durries of man-made fibres" +57050023,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Durries of wool" +57050024,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Cotton durries of handloom (including chindi durries, cotton chenille durries, Rag Rug durries, printed durries, druggets);" +57050029,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Other" +57050031,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Of blended jute" +57050032,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Of coir jute" +57050039,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Other" +57050041,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:knitted" +57050042,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Mats and mattings including bath mats, where cotton predominates by weight, of handloom, cotton rugs of handloom" +57050049,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Other" +57050090,"Other carpets and other textile floor coverings, whether or not made-up:Other carpets and other textile floor coverings whether or not made-up:Other" +58011000,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Of wool or fine animal hair(OLD tariff)" +58012100,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Uncut weft pile fabrics(OLD tariff)" +58012210,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Cut corduroy :Solely of cotton(OLD tariff)" +58012290,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Cut corduroy :Other(OLD tariff)" +58012300,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Other weft pile fabrics(OLD tariff)" +58012600,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Chenille fabrics(OLD tariff)" +58012710,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Warp pile fabrics, 'epingle' (uncut)(OLD tariff)" +58012720,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Warp pile fabrics, cut(OLD tariff)" +58012790,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Other(OLD tariff)" +58013100,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Uncut weft pile fabrics(OLD tariff)" +58013200,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Cut corduroy(OLD tariff)" +58013300,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806::Other weft pile fabrics(OLD tariff)" +58013610,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Chenille fabrics:Carduroys(OLD tariff)" +58013690,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Chenille fabrics:Other(OLD tariff)" +58013710,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:WARP PILE FABRICS UNCUT(OLD tariff)" +58013711,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Velvet(OLD tariff)" +58013719,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Other(OLD tariff)" +58013720,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Warp pile fabrics,cut(OLD tariff)" +58013790,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Warp pile fabrics:Other(OLD tariff)" +58019010,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Of other textile materials:Pile fabrics and chenille fabrics of silk containing more than 50% by weight of silk, but not containing wool or hair per sq. metre,(OLD tariff)" +58019090,"WOVEN PILE FABRICS AND CHENILLE FABRICS, OTHER THAN FABRICS OF HEADING 5802 OR 5806:Of other textile materials:Pile fabrics and chenille fabrics not elsewhere specified or included(OLD tariff)" +58020000,terry towelling similar and woven terry fabrics other than arrow fabrics of heading 5806 tufted textile fabrics other than products of heading 5703 +58021000,terry towelling similar and woven terry fabrics other than arrow fabrics of heading 5806 tufted textile fabrics other than products of heading 5703 >> terry towelling and similarwoven terry fabrics of cotton terry towelling and similarwoven terry fabrics of cotton +58021010,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::Unbleached" +58021020,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::bleached" +58021030,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::Piece dyed" +58021040,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::Yarn dyed" +58021050,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::Printed" +58021060,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::Of Handloom" +58021090,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Terry towelling and similar woven terry fabrics, of cotton::Other" +58021100,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703::Unbleached(OLD tariff)" +58021910,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Other:Bleached(OLD tariff)" +58021920,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Other:Piece dyed(OLD tariff)" +58021930,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Other:Yarn dyed(OLD tariff)" +58021940,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Other:Printed(OLD tariff)" +58021950,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Other:Of handloom(OLD tariff)" +58021990,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703:Other:Other(OLD tariff)" +58022000,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703::Terry towelling and similar woven terry fabrics, of other textile materials" +58023000,"Terry toweling and similar woven terry fabrics, other than narrow fabrics of heading 5806; tufted textile fabrics, other than products of heading 5703::Tufted textile fabrics" +58030000,gauze other than n arrowfabrics of heading 5806 of cotton gauze other than n arrowfabrics of heading 5806 of cotton +58030011,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Unbleached" +58030012,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Bleached" +58030013,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Piece dyed" +58030014,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Yarn dyed" +58030015,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Printed" +58030019,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:other" +58030091,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Of silk or silk waste" +58030092,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Of synthetic fibre" +58030093,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Of artificial fibre" +58030099,"GAUZE, OTHER THAN NARROW FABRICS OF HEADING 5806:Gauze, other than narrow fabrics of heading 5806:Other" +58040000,tulles and other net fabrics not including woven knitted or crocheted in fabrics lace in the piece strips or in motifs other than fabrics of headings 6002 to 6006 +58041000,tulles and other net fabrics not including woven knitted or crocheted in fabrics lace in the piece strips or in motifs other than fabrics of headings 6002 to 6006 >> tulles and other net fabrics +58041010,"TULLES AND OTHER NET FABRICS, NOT INCLUDING WOVEN, KNITTED OR CROCHETED FABRICS; LACE IN THE PIECE, IN STRIPS OR IN MOTIFS, OTHER THAN FABRICS OF HEADINGS 6002 TO 6006:Tulles and other net fabrics:Of cotton" +58041090,"TULLES AND OTHER NET FABRICS, NOT INCLUDING WOVEN, KNITTED OR CROCHETED FABRICS; LACE IN THE PIECE, IN STRIPS OR IN MOTIFS, OTHER THAN FABRICS OF HEADINGS 6002 TO 6006:Tulles and other net fabrics:Other" +58042100,"TULLES AND OTHER NET FABRICS, NOT INCLUDING WOVEN, KNITTED OR CROCHETED FABRICS; LACE IN THE PIECE, IN STRIPS OR IN MOTIFS, OTHER THAN FABRICS OF HEADINGS 6002 TO 6006::Of man-made fibres" +58042900,tulles and other net fabrics not including woven knitted or crocheted in fabrics lace in the piece strips or in motifs other than fabrics of headings 6002 to 6006 >> of other textile materials +58042910,"TULLES AND OTHER NET FABRICS, NOT INCLUDING WOVEN, KNITTED OR CROCHETED FABRICS; LACE IN THE PIECE, IN STRIPS OR IN MOTIFS, OTHER THAN FABRICS OF HEADINGS 6002 TO 6006:Of other textile materials:Of cotton" +58042990,"TULLES AND OTHER NET FABRICS, NOT INCLUDING WOVEN, KNITTED OR CROCHETED FABRICS; LACE IN THE PIECE, IN STRIPS OR IN MOTIFS, OTHER THAN FABRICS OF HEADINGS 6002 TO 6006:Of other textile materials:Other" +58043000,"TULLES AND OTHER NET FABRICS, NOT INCLUDING WOVEN, KNITTED OR CROCHETED FABRICS; LACE IN THE PIECE, IN STRIPS OR IN MOTIFS, OTHER THAN FABRICS OF HEADINGS 6002 TO 6006::Hand-made lace" +58050000,tapestries of the type gobelins flanders aubusson beauvais and the like and tapestries for example petit point cross stitch whether or not made up tapestries of the type gobelins flanders aubusson beauvais and the like and tapestries for example petit point cross stitch whether or not made up +58050010,"HAND-WOVEN TAPESTRIES OF THE TYPE GOBELINS, FLANDERS, AUBUSSON, BEAUVAIS AND THE LIKE, AND NEEDLE-WORKED TAPESTRIES (FOR EXAMPLE, PETIT POINT, CROSS STITCH), WHETHER OR NOT MADE UP:Hand-woven tapestries of the type gobelins, flanders, aubusson, beauvais and the like, and needle-worked tapestries (for example, petit point, cross stitch), whether or not made up:Tapestries hand made or needle worked by hand, of cotton" +58050020,"HAND-WOVEN TAPESTRIES OF THE TYPE GOBELINS, FLANDERS, AUBUSSON, BEAUVAIS AND THE LIKE, AND NEEDLE-WORKED TAPESTRIES (FOR EXAMPLE, PETIT POINT, CROSS STITCH), WHETHER OR NOT MADE UP:Hand-woven tapestries of the type gobelins, flanders, aubusson, beauvais and the like, and needle-worked tapestries (for example, petit point, cross stitch), whether or not made up:Tapestries of jute" +58050090,"HAND-WOVEN TAPESTRIES OF THE TYPE GOBELINS, FLANDERS, AUBUSSON, BEAUVAIS AND THE LIKE, AND NEEDLE-WORKED TAPESTRIES (FOR EXAMPLE, PETIT POINT, CROSS STITCH), WHETHER OR NOT MADE UP:Hand-woven tapestries of the type gobelins, flanders, aubusson, beauvais and the like, and needle-worked tapestries (for example, petit point, cross stitch), whether or not made up:Other" +58060000,goods of heading 5807 narrow fabrics consisting of warp without weft assembled by means of an adhesive bolducs goods of heading 5807 narrow fabrics consisting of warp without weft assembled by means of an adhesive bolducs +58061000,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS)::Woven pile fabrics (including terry towelling and similar terry fabrics) and chenille fabrics +58062000,"NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS)::Other woven fabrics, containing by weight 5% or more of elastomeric yarn or rubber thread" +58063100,goods of heading 5807 narrow fabrics consisting of warp without weft assembled by means of an adhesive bolducs goods of heading 5807 narrow fabrics consisting of warp without weft assembled by means of an adhesive bolducs >> of cotton of cotton +58063110,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of cotton:Typewriter ribbon cloth +58063120,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of cotton:Newar cotton +58063190,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of cotton:Other +58063200,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS)::Of man-made fibres +58063900,goods of heading 5807 narrow fabrics consisting of warp without weft assembled by means of an adhesive bolducs goods of heading 5807 narrow fabrics consisting of warp without weft assembled by means of an adhesive bolducs >> of other textile materials of other textile materials +58063910,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of other textile materials:Goat hair puttis tape +58063920,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of other textile materials:Jute webbing +58063930,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of other textile materials:Other narrow fabrics of jute +58063990,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS):Of other textile materials:Other +58064000,NARROW WOVEN FABRICS OTHER THAN GOODS OF HEADING 5807; NARROW FABRICS CONSISTING OF WARP WITHOUT WEFT ASSEMBLED BY MEANS OF AN ADHESIVE (BOLDUCS)::Fabrics consisting of warp without weft assembled by means of an adhesive (bolducs) +58070000,similar labels badges and articles of textile materials in the piece in strips or cut to shape or size not embroidered +58071000,similar labels badges and articles of textile materials in the piece in strips or cut to shape or size not embroidered >> woven +58071010,"LABELS, BADGES AND SIMILAR ARTICLES OF TEXTILE MATERIALS, IN THE PIECE, IN STRIPS OR CUT TO SHAPE OR SIZE, NOT EMBROIDERED:Woven:Of cotton" +58071020,"LABELS, BADGES AND SIMILAR ARTICLES OF TEXTILE MATERIALS, IN THE PIECE, IN STRIPS OR CUT TO SHAPE OR SIZE, NOT EMBROIDERED:Woven:Of man-made fibre" +58071090,"LABELS, BADGES AND SIMILAR ARTICLES OF TEXTILE MATERIALS, IN THE PIECE, IN STRIPS OR CUT TO SHAPE OR SIZE, NOT EMBROIDERED:Woven:Other" +58079000,similar labels badges and articles of textile materials in the piece in strips or cut to shape or size not embroidered >> other +58079010,"LABELS, BADGES AND SIMILAR ARTICLES OF TEXTILE MATERIALS, IN THE PIECE, IN STRIPS OR CUT TO SHAPE OR SIZE, NOT EMBROIDERED:Other:Felt or non-woven" +58079090,"LABELS, BADGES AND SIMILAR ARTICLES OF TEXTILE MATERIALS, IN THE PIECE, IN STRIPS OR CUT TO SHAPE OR SIZE, NOT EMBROIDERED:Other:Other" +58080000,braids in the piece ornamental trimmings in the piece without +58081000,braids in the piece ornamental trimmings in the piece without >> braids in the piece +58081010,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Braids, in the piece:Of cotton" +58081090,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Braids, in the piece:Other" +58089000,braids in the piece ornamental trimmings in the piece without >> other +58089010,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Tapes, ornamental or cotton" +58089020,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Hair band of narrow fabrics" +58089030,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Other braids" +58089040,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Ribbons of rayon with ornamental trimmings" +58089050,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Saree falls, borders (other than zari), frings of cotton" +58089060,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Saree falls, borders (other than zari), frings" +58089090,"BRAIDS IN THE PIECE; ORNAMENTAL TRIMMINGS IN THE PIECE, WITHOUT EMBROIDERY, OTHER THAN KNITTED OR CROCHETED; TASSELS, POMPONS AND SIMILAR ARTICLES:Other:Other" +58090000,woven fabrics of metal thread and woven fabrics of metallised yarn of heading 5605 of a kind used in apparel as furnishing fabrics or for similar purposes not elsewhere specified or included woven fabrics of metal thread and woven fabrics of metallised yarn of heading 5605 of a kind used in apparel as furnishing fabrics or for similar purposes not elsewhere specified or included +58090010,"WOVEN FABRICS OF METAL THREAD AND WOVEN FABRICS OF METALLISED YARN OF HEADING 5605, OF A KIND USED IN APPAREL, AS FURNISHING FABRICS OR FOR SIMILAR PURPOSES, NOT ELSEWHERE SPECIFIED OR:Woven fabrics of metal thread and woven fabrics of metallised yarn of heading 5605, of a kind used in apparel, as furnishing fabrics or for similar purposes, not elsewhere specified or included:Zari border" +58090090,"WOVEN FABRICS OF METAL THREAD AND WOVEN FABRICS OF METALLISED YARN OF HEADING 5605, OF A KIND USED IN APPAREL, AS FURNISHING FABRICS OR FOR SIMILAR PURPOSES, NOT ELSEWHERE SPECIFIED OR:Woven fabrics of metal thread and woven fabrics of metallised yarn of heading 5605, of a kind used in apparel, as furnishing fabrics or for similar purposes, not elsewhere specified or included:Other" +58100000,embroidery in the piece in strips or in motifs +58101000,"EMBROIDERY IN THE PIECE, IN STRIPS OR IN MOTIFS::Embroidery without visible ground" +58109100,"EMBROIDERY IN THE PIECE, IN STRIPS OR IN MOTIFS::Of cotton" +58109110,embroidery in the piece in strips or in motifs >> of cotton >> embroidered with lucknow chikan craft +58109190,embroidery in the piece in strips or in motifs >> of cotton >> other +58109200,embroidery in the piece in strips or in motifs >> of fibres +58109210,"EMBROIDERY IN THE PIECE, IN STRIPS OR IN MOTIFS:Of man-made fibres :Embroidered badges, motifs and the like" +58109220,embroidery in the piece in strips or in motifs >> of fibres >> chikan craft +58109290,"EMBROIDERY IN THE PIECE, IN STRIPS OR IN MOTIFS:Of man-made fibres :Other" +58109900,"EMBROIDERY IN THE PIECE, IN STRIPS OR IN MOTIFS::Of other textile materials" +58109910,embroidery in the piece in strips or in motifs >> of other textile materials >> embroidered with lucknow kg chikan craft +58109990,embroidery in the piece in strips or in motifs >> of other textile materials >> other kg +58110000,quilted textile products in the piece composed of one or more layers of textile materials assembled with padding by stitching or than otherwise other embroidery of heading 5810 quilted textile products in the piece composed of one or more layers of textile materials assembled with padding by stitching or otherwise other than embroidery of heading 5810 +58110010,"QUILTED TEXTILE PRODUCTS IN THE PIECE, COMPOSED OF ONE OR MORE LAYERS OF TEXTILE MATERIALS ASSEMBLED WITH PADDING BY STITCHING OR OTHERWISE, OTHER THAN EMBROIDERY OF HEADING 5810:Quilted textile products in the piece, composed of one or more layers of textile materials assembled with padding by stitching or otherwise, other than embroidery of heading 5810:Kantha (multilayer stitched textile fabrics in piece used for bedding, mattress pads or clothing)" +58110020,"QUILTED TEXTILE PRODUCTS IN THE PIECE, COMPOSED OF ONE OR MORE LAYERS OF TEXTILE MATERIALS ASSEMBLED WITH PADDING BY STITCHING OR OTHERWISE, OTHER THAN EMBROIDERY OF HEADING 5810:Quilted textile products in the piece, composed of one or more layers of textile materials assembled with padding by stitching or otherwise, other than embroidery of heading 5810:Quilted wadding" +58110090,"QUILTED TEXTILE PRODUCTS IN THE PIECE, COMPOSED OF ONE OR MORE LAYERS OF TEXTILE MATERIALS ASSEMBLED WITH PADDING BY STITCHING OR OTHERWISE, OTHER THAN EMBROIDERY OF HEADING 5810:Quilted textile products in the piece, composed of one or more layers of textile materials assembled with padding by stitching or otherwise, other than embroidery of heading 5810:Other(OLD tariff)" +59010000,or textile fabrics coated with gum amylaceous substances of a kind used forthe outer covers of books or the like tracing cloth prepared painting canvas buckram and similar stiffened textile fabrics of a kind used for hat foundations +59011000,or textile fabrics coated with gum amylaceous substances of a kind used forthe outer covers of books or the like tracing cloth prepared painting canvas buckram and similar stiffened textile fabrics of a kind used for hat foundations >> textile fabrics coated with gum or amylaceous substances of a kind used for the outer covers of books or the like +59011010,"TEXTILE FABRICS COATED WITH GUM OR AMYLACEOUS SUBSTANCES, OF A KIND USED FOR THE OUTER COVERS OF BOOKS OR THE LIKE; TRACING CLOTH; PREPARED PAINTING CANVAS; BUCKRAM AND SIMILAR STIFFENED TEXTILE FABRICS OF A KIND USED FOR HAT FOUNDATIONS:Textile fabrics coated with gum or amylaceous substances, of a kind used for the outer covers of books or the like :Of cotton" +59011020,"TEXTILE FABRICS COATED WITH GUM OR AMYLACEOUS SUBSTANCES, OF A KIND USED FOR THE OUTER COVERS OF BOOKS OR THE LIKE; TRACING CLOTH; PREPARED PAINTING CANVAS; BUCKRAM AND SIMILAR STIFFENED TEXTILE FABRICS OF A KIND USED FOR HAT FOUNDATIONS:Textile fabrics coated with gum or amylaceous substances, of a kind used for the outer covers of books or the like :Prepared painting canvas" +59011090,"TEXTILE FABRICS COATED WITH GUM OR AMYLACEOUS SUBSTANCES, OF A KIND USED FOR THE OUTER COVERS OF BOOKS OR THE LIKE; TRACING CLOTH; PREPARED PAINTING CANVAS; BUCKRAM AND SIMILAR STIFFENED TEXTILE FABRICS OF A KIND USED FOR HAT FOUNDATIONS:Textile fabrics coated with gum or amylaceous substances, of a kind used for the outer covers of books or the like :Other" +59019000,or textile fabrics coated with gum amylaceous substances of a kind used forthe outer covers of books or the like tracing cloth prepared painting canvas buckram and similar stiffened textile fabrics of a kind used for hat foundations >> other +59019010,"TEXTILE FABRICS COATED WITH GUM OR AMYLACEOUS SUBSTANCES, OF A KIND USED FOR THE OUTER COVERS OF BOOKS OR THE LIKE; TRACING CLOTH; PREPARED PAINTING CANVAS; BUCKRAM AND SIMILAR STIFFENED TEXTILE FABRICS OF A KIND USED FOR HAT FOUNDATIONS:Other :Tracing cloth of cotton" +59019020,"TEXTILE FABRICS COATED WITH GUM OR AMYLACEOUS SUBSTANCES, OF A KIND USED FOR THE OUTER COVERS OF BOOKS OR THE LIKE; TRACING CLOTH; PREPARED PAINTING CANVAS; BUCKRAM AND SIMILAR STIFFENED TEXTILE FABRICS OF A KIND USED FOR HAT FOUNDATIONS:Other :Varnished cambric fabrics (Empire fabrics) tapes" +59019090,"TEXTILE FABRICS COATED WITH GUM OR AMYLACEOUS SUBSTANCES, OF A KIND USED FOR THE OUTER COVERS OF BOOKS OR THE LIKE; TRACING CLOTH; PREPARED PAINTING CANVAS; BUCKRAM AND SIMILAR STIFFENED TEXTILE FABRICS OF A KIND USED FOR HAT FOUNDATIONS:Other :Other(OLD tariff)" +59020000,tyre cord fabric of high tenacity yarn of nylon or other polyamides polyesters or viscose rayon +59021000,tyre cord fabric of high tenacity yarn of nylon or other polyamides polyesters or viscose rayon >> of nylon or other polyamides +59021010,"TYRE CORD FABRIC OF HIGH TENACITY YARN OF NYLON OR OTHER POLYAMIDES, POLYESTERS OR VISCOSE RAYON:Of nylon or other polyamides :Impregnated with rubber" +59021090,"TYRE CORD FABRIC OF HIGH TENACITY YARN OF NYLON OR OTHER POLYAMIDES, POLYESTERS OR VISCOSE RAYON:Of nylon or other polyamides :Other" +59022000,tyre cord fabric of high tenacity yarn of nylon or other polyamides polyesters or viscose rayon >> of polyesters +59022010,"TYRE CORD FABRIC OF HIGH TENACITY YARN OF NYLON OR OTHER POLYAMIDES, POLYESTERS OR VISCOSE RAYON:Of polyesters :Impregnated with rubber" +59022090,"TYRE CORD FABRIC OF HIGH TENACITY YARN OF NYLON OR OTHER POLYAMIDES, POLYESTERS OR VISCOSE RAYON:Of polyesters :Other" +59029000,tyre cord fabric of high tenacity yarn of nylon or other polyamides polyesters or viscose rayon >> other +59029010,"TYRE CORD FABRIC OF HIGH TENACITY YARN OF NYLON OR OTHER POLYAMIDES, POLYESTERS OR VISCOSE RAYON:Other :Impregnated with rubber" +59029090,"TYRE CORD FABRIC OF HIGH TENACITY YARN OF NYLON OR OTHER POLYAMIDES, POLYESTERS OR VISCOSE RAYON:Other :Other" +59030000,textile fabrics impregnated coated covered or laminated with plastics other than those of heading 5902 +59031000,textile fabrics impregnated coated covered or laminated with plastics other than those of heading 5902 >> with polyvinyl chloride +59031010,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:With polyvinyl chloride :Imitation leather fabrics of cotton" +59031090,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:With polyvinyl chloride :Other" +59032000,textile fabrics impregnated coated covered or laminated with plastics other than those of heading 5902 >> with polyurethane +59032010,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:With polyurethane :Imitation leather fabrics, of cotton" +59032090,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:With polyurethane :Other" +59039000,textile fabrics impregnated coated covered or laminated with plastics other than those of heading 5902 >> other +59039010,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:Other:Of cotton" +59039020,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:Other:Polyethylene laminated jute fabrics" +59039090,"TEXTILE FABRICS, IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OTHER THAN THOSE OF HEADING 5902:Other:Other" +59040000,linoleum whether or not cut to shape floor coverings consisting of a coating or covering applied on a textile backing whether or not cut to shape +59041000,"LINOLEUM, WHETHER OR NOT CUT TO SHAPE; FLOOR COVERINGS CONSISTING OF A COATING OR COVERING APPLIED ON A TEXTILE BACKING, WHETHER OR NOT CUT TO SHAPE::Linoleum" +59049000,linoleum whether or not cut to shape floor coverings consisting of a coating or covering applied on a textile backing whether or not cut to shape >> other +59049010,"LINOLEUM, WHETHER OR NOT CUT TO SHAPE; FLOOR COVERINGS CONSISTING OF A COATING OR COVERING APPLIED ON A TEXTILE BACKING, WHETHER OR NOT CUT TO SHAPE:Other :Floor coverings with jute base" +59049090,"LINOLEUM, WHETHER OR NOT CUT TO SHAPE; FLOOR COVERINGS CONSISTING OF A COATING OR COVERING APPLIED ON A TEXTILE BACKING, WHETHER OR NOT CUT TO SHAPE:Other :Other" +59050000,textile wall coverings textile wall coverings +59050010,TEXTILE WALL COVERINGS:Textile wall coverings :Fixed on the backing of any material +59050090,TEXTILE WALL COVERINGS:Textile wall coverings :Other +59060000,rubberised textile fabrics other than those of heading 5902 +59061000,"RUBBERISED TEXTILE FABRICS, OTHER THAN THOSE OF HEADING 5902::Adhesive tape of a width not exceeding 20 cm" +59069100,rubberised textile fabrics other than those of heading 5902 >> knitted or crocheted +59069110,"RUBBERISED TEXTILE FABRICS, OTHER THAN THOSE OF HEADING 5902:Knitted or crocheted :Of cotton" +59069190,"RUBBERISED TEXTILE FABRICS, OTHER THAN THOSE OF HEADING 5902:Knitted or crocheted :Of other textile materials" +59069900,rubberised textile fabrics other than those of heading 5902 >> other +59069910,"RUBBERISED TEXTILE FABRICS, OTHER THAN THOSE OF HEADING 5902:Other :Insulating tape, electrical of cotton" +59069920,"RUBBERISED TEXTILE FABRICS, OTHER THAN THOSE OF HEADING 5902:Other :Rubberised cotton fabrics, other than knitted or crocheted" +59069990,"RUBBERISED TEXTILE FABRICS, OTHER THAN THOSE OF HEADING 5902:Other :Other" +59070000,textile fabrics otherwise impregnated coated or covered painted canvas being theatrical scenery studio or the like textile fabrics otherwise impregnated coated or covered painted canvas being theatrical scenery studio or the like fabrics covered partially or fully with textile flocks or with preparation containing textile flocks +59070011,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :On the base fabrics of cotton" +59070012,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :On the base fabrics of man-made textile material" +59070019,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :Other;" +59070091,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :Cotton fabrics coated or impregnated with oil or preparations with basis of drying oil" +59070092,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :Other textile fabrics coated or impregnated with oil or oil preparations" +59070093,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :Jute fabrics otherwise impregnated or coated" +59070099,"TEXTILE FABRICS OTHERWISE IMPREGNATED, COATED OR COVERED; PAINTED CANVAS BEING THEATRICAL SCENERY, STUDIO BACK-CLOTHS OR THE LIKE:Textile fabrics otherwise impregnated, coated or covered; painted canvas being theatrical scenery, studio back-cloths or the like :Other" +59080000,textile wicks woven plaited or knitted for lamps stoves lighters candles or the like incandescent gas mantles and tubular knitted gas mantle fabric or not impregnated therefor whether textile wicks woven plaited or knitted for lamps stoves lighters candles or the like incandescent gas mantles and tubular knitted gas mantle fabric therefor whether or not impregnated +59080010,"TEXTILE WICKS, WOVEN, PLAITED OR KNITTED, FOR LAMPS, STOVES, LIGHTERS, CANDLES OR THE LIKE; INCANDESCENT GAS MANTLES AND TUBULAR KNITTED GAS MANTLE FABRIC THEREFOR, WHETHER OR NOT IMPREGNATED:Textile wicks, woven, plaited or knitted, for:Wicks and gas mantle fabrics, of cotton" +59080020,"TEXTILE WICKS, WOVEN, PLAITED OR KNITTED, FOR LAMPS, STOVES, LIGHTERS, CANDLES OR THE LIKE; INCANDESCENT GAS MANTLES AND TUBULAR KNITTED GAS MANTLE FABRIC THEREFOR, WHETHER OR NOT IMPREGNATED:Textile wicks, woven, plaited or knitted, for:Gas mantles of rayon" +59080090,"TEXTILE WICKS, WOVEN, PLAITED OR KNITTED, FOR LAMPS, STOVES, LIGHTERS, CANDLES OR THE LIKE; INCANDESCENT GAS MANTLES AND TUBULAR KNITTED GAS MANTLE FABRIC THEREFOR, WHETHER OR NOT IMPREGNATED:Textile wicks, woven, plaited or knitted, for:Other" +59090000,textile hose piping and similar textile tubing with or without lining armour or accessories of other materials textile hose piping and similar textile tubing with or without lining armour or accessories of other materials +59090010,"TEXTILE HOSE PIPING AND SIMILAR TEXTILE TUBING, WITH OR WITHOUT LINING, ARMOUR OR ACCESSORIES OF OTHER MATERIALS:Textile hose piping and similar textile tubing, with or without lining, armour or accessories of other materials :Of cotton" +59090020,"TEXTILE HOSE PIPING AND SIMILAR TEXTILE TUBING, WITH OR WITHOUT LINING, ARMOUR OR ACCESSORIES OF OTHER MATERIALS:Textile hose piping and similar textile tubing, with or without lining, armour or accessories of other materials :Of man-made fibre" +59090090,"TEXTILE HOSE PIPING AND SIMILAR TEXTILE TUBING, WITH OR WITHOUT LINING, ARMOUR OR ACCESSORIES OF OTHER MATERIALS:Textile hose piping and similar textile tubing, with or without lining, armour or accessories of other materials :Of other textile materials" +59100000,transmission or conveyor belts or belting of textile material whether or not impregnated coated covered or laminatedwith plastics or reinforced with metal or other material transmission or conveyor belts or belting of textile material whether or not impregnated coated covered or laminated with plastics or reinforced with metal or other material +59100010,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Cotton canvas ply belting" +59100020,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Rubberised cotton belting" +59100030,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Other transmission, conveyer or elevator belts or belting of cotton" +59100040,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Hair belting" +59100050,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Flax canvas ply belting" +59100060,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Fibre belt conveyor" +59100090,"TRANSMISSION OR CONVEYOR BELTS OR BELTING, OF TEXTILE MATERIAL, WHETHER OR NOT IMPREGNATED, COATED, COVERED OR LAMINATED WITH PLASTICS, OR REINFORCED WITH METAL OR OTHER MATERIAL:Transmission or conveyor belts or belting, of textile material, whether or not impregnated, coated, covered or laminated with plastics, or reinforced with metal or other material :Other" +59110000,textile products and articles for technical uses specified in note 8 to this chapter +59111000,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER::Textile fabrics, felt and felt-lined woven fabrics, coated, covered or laminated with rubber, leather or other material, of a kind used for card clothing, and similar fabrics of a kind used for other technical purposes, including narrow fabrics made of velvet impregnated with rubber, for covering weaving spindles (weaving beams)" +59112000,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER::Bolting cloth, whether or not made up 12.5% -" +59113100,textile products and articles for technical uses specified in note 8 to this chapter >> weighing less than 650 +59113110,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing less than 650 g/m2 :Felt for cotton textile industries, woven" +59113120,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing less than 650 g/m2 :Woven textiles felt, whether or not impregnated or coated, of a kind commonly used in other machines" +59113130,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing less than 650 g/m2 :Cotton fabrics and articles used in machinery and plant" +59113140,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing less than 650 g/m2 :Jute fabrics and articles used in machinery or plant" +59113150,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing less than 650 g/m2 :Textile fabrics of metalised yarn of a kind commonly used in paper making or other machinery" +59113190,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing less than 650 g/m2 :Other" +59113200,textile products and articles for technical uses specified in note 8 to this chapter >> weighing 650 or more +59113210,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing 650 g/m2 or more:Felt for cotton textile industries, woven" +59113220,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing 650 g/m2 or more:Woven textiles felt, whether or not impregnated or coated, of a kind commonly used in other machines" +59113230,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing 650 g/m2 or more:Cotton fabrics and articles used in machinery and plant" +59113240,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing 650 g/m2 or more:Jute fabrics and articles used in machinery or plant" +59113250,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing 650 g/m2 or more:Textile fabrics of metalised yarn of a kind commonly used in paper making or other machinery" +59113290,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Weighing 650 g/m2 or more:Other" +59114000,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER::Straining cloth of a kind used in oil presses or the like, including that of human hair" +59119000,textile products and articles for technical uses specified in note 8 to this chapter >> other +59119010,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Other :Paper maker's felt, woven" +59119020,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Other :Gaskets, washers, polishing discs and other machinery parts of textile articles" +59119031,textile products and articles for technical uses specified in note 8 to this chapter >> other >> geogrid conforming to is 17373 +59119032,textile products and articles for technical uses specified in note 8 to this chapter >> other >> geotextile conforming to is 16391 is 16392 +59119039,textile products and articles for technical uses specified in note 8 to this chapter >> other >> other +59119040,textile products and articles for technical uses specified in note 8 to this chapter >> other >> mulch mats conforming to is 16202 +59119090,"TEXTILE PRODUCTS AND ARTICLES, FOR TECHNICAL USES, SPECIFIED IN NOTE 7 TO THIS CHAPTER:Other :Other" +60000000,700 +60010000,700 >> pile fabrics including fabrics and terry fabrics knitted or crocheted +60011000,700 >> pile fabrics including fabrics and terry fabrics knitted or crocheted >> long pile fabrics +60011010,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED:""Long pile"" fabrics :Of cotton" +60011020,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED:""Long pile"" fabrics :Of man-made fibres" +60011090,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED:""Long pile"" fabrics :Of other textile materials" +60012100,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED::Of cotton" +60012200,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED::Of man-made fibres" +60012900,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED::Of other textile materials" +60019100,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED::Of cotton" +60019200,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED::Of man-made fibres" +60019900,700 >> pile fabrics including fabrics and terry fabrics knitted or crocheted >> of other textile materials +60019910,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +60019990,"PILE FABRICS, INCLUDING ""LONG PILE"" FABRICS AND TERRY FABRICS, KNITTED OR CROCHETED:Of other textile materials :Other" +60020000,700 >> knitted or crocheted fabrics of a width not exceeding 30 cm containing by weight 5 or more of elastomeric yarn or rubber thread other than those of heading 6001 +60024000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, CONTAINING BY WEIGHT 5% OR MORE OF ELASTOMERIC YARN OR RUBBER THREAD, OTHER THAN THOSE OF HEADING 6001::Containing by weight 5% or more of elastomeric yarn but not containing rubber thread" +60029000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, CONTAINING BY WEIGHT 5% OR MORE OF ELASTOMERIC YARN OR RUBBER THREAD, OTHER THAN THOSE OF HEADING 6001::Other" +60030000,700 >> knitted or crocheted fabrics of a width not exceeding 30 cm other than those of heading 6001 or 6002 +60031000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, OTHER THAN THOSE OF HEADING 6001 OR 6002::Of wool or fine animal hair" +60032000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, OTHER THAN THOSE OF HEADING 6001 OR 6002::Of cotton" +60033000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, OTHER THAN THOSE OF HEADING 6001 OR 6002::Of synthetic fibres" +60034000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, OTHER THAN THOSE OF HEADING 6001 OR 6002::Of artificial fibres" +60039000,"KNITTED OR CROCHETED FABRICS OF A WIDTH NOT EXCEEDING 30 CM, OTHER THAN THOSE OF HEADING 6001 OR 6002::Other" +60040000,700 >> knitted or crocheted fabrics of a width exceeding 30 cm containing by weight 5 or more of elastomeric yarn or rubber thread other than those of heading 6001 +60041000,"KNITTED OR CROCHETED FABRICS OF A WIDTH EXCEEDING 30 CM, CONTAINING BY WEIGHT 5% OR MORE OF ELASTOMERIC YARN OR RUBBER THREAD, OTHER THAN THOSE OF HEADING 6001::Containing by weight 5% or more of elastomeric yarn but not containing rubber thread" +60049000,"KNITTED OR CROCHETED FABRICS OF A WIDTH EXCEEDING 30 CM, CONTAINING BY WEIGHT 5% OR MORE OF ELASTOMERIC YARN OR RUBBER THREAD, OTHER THAN THOSE OF HEADING 6001::Other" +60050000,700 >> warp knit fabrics including those made on galloon knitting machines other than those of headings 6001 to 6004 of cotton +60052100,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Unbleached or bleached" +60052200,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Dyed" +60052300,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Of yarns of different colours" +60052400,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Printed" +60053100,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Unbleached or bleached(OLD tariff)" +60053200,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Dyed(OLD tariff)" +60053300,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Of yarns of different colours(OLD tariff)" +60053400,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Printed(OLD tariff)" +60053500,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Fabrics specified in Sub-heading Note 1 to this Chapter" +60053600,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Other, unbleached or bleached" +60053700,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Other, dyed" +60053710,700 >> warp knit fabrics including those made on galloon knitting machines other than those of headings 6001 to 6004 of cotton >> other dyed >> shade nets conforming to is 16008 +60053790,700 >> warp knit fabrics including those made on galloon knitting machines other than those of headings 6001 to 6004 of cotton >> other dyed >> other +60053800,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Other, of yarns of different colours" +60053900,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Other, printed" +60054100,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Unbleached or bleached" +60054200,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Dyed" +60054300,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Of yarns of different colours" +60054400,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Printed" +60059000,"WARP KNIT FABRICS (INCLUDING THOSE MADE ON GALLOON KNITTING MACHINES), OTHER THAN THOSE OF HEADINGS 6001 TO 6004::Other" +60060000,700 >> other knitted or crocheted fabrics +60061000,OTHER KNITTED OR CROCHETED FABRICS::Of wool or fine animal hair +60062100,OTHER KNITTED OR CROCHETED FABRICS::Unbleached or bleached +60062200,OTHER KNITTED OR CROCHETED FABRICS::Dyed +60062300,OTHER KNITTED OR CROCHETED FABRICS::Of yarns of different colours +60062400,OTHER KNITTED OR CROCHETED FABRICS::Printed +60063100,OTHER KNITTED OR CROCHETED FABRICS::Unbleached or bleached +60063200,OTHER KNITTED OR CROCHETED FABRICS::Dyed +60063300,OTHER KNITTED OR CROCHETED FABRICS::Of yarns of different colours +60063400,OTHER KNITTED OR CROCHETED FABRICS::Printed +60064100,OTHER KNITTED OR CROCHETED FABRICS::Unbleached or bleached +60064200,OTHER KNITTED OR CROCHETED FABRICS::Dyed +60064300,OTHER KNITTED OR CROCHETED FABRICS::Of yarns of different colours +60064400,OTHER KNITTED OR CROCHETED FABRICS::Printed +60069000,OTHER KNITTED OR CROCHETED FABRICS::Other +61010000,men s or carcoats anoraks +61012000,"MEN'S OR BOY'S OVERCOATS, CARCOATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6103::Of cotton(OLD tariff)" +61013010,"MEN'S OR BOY'S OVERCOATS, CARCOATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6103:Of man-made fibres :Of synthetic fibres(OLD tariff)" +61013020,"MEN'S OR BOY'S OVERCOATS, CARCOATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6103:Of man-made fibres :Of artificial fibres(OLD tariff)" +61019010,"MEN'S OR BOY'S OVERCOATS, CARCOATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6103:Other :Of silk(OLD tariff)" +61019090,"MEN'S OR BOY'S OVERCOATS, CARCOATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6103:Other :Other(OLD tariff)" +61021000,"WOMEN'S OR GIRL'S OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND-CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6104::Of wool or fine animal hair(OLD tariff)" +61022000,"WOMEN'S OR GIRL'S OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND-CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6104::Of cotton(OLD tariff)" +61023010,"WOMEN'S OR GIRL'S OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND-CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6104:Of man-made fibres :Of synthetic fibres(OLD tariff)" +61023020,"WOMEN'S OR GIRL'S OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND-CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6104:Of man-made fibres :Of artificial fibres(OLD tariff)" +61029010,"WOMEN'S OR GIRL'S OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND-CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6104:Other :Of silk(OLD tariff)" +61029090,"WOMEN'S OR GIRL'S OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND-CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES, KNITTED OR CROCHETED, OTHER THAN THOSE OF HEADING 6104:Other :Other(OLD tariff)" +61030000,men or boys suits ensembles jackets blazers trousers biband brace overalls breeches and shorts other than swim wear knitted or crocheted +61031000,men or boys suits ensembles jackets blazers trousers biband brace overalls breeches and shorts other than swim wear knitted or crocheted >> suits +61031010,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Suits :Of silk" +61031020,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Suits :Of cotton" +61031030,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Suits :Of artificial fibres" +61031090,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Suits :Other" +61032200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61032300,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61032900,men or boys suits ensembles jackets blazers trousers biband brace overalls breeches and shorts other than swim wear knitted or crocheted >> of other textile materials +61032910,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61032920,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61032990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61033100,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of wool or fine animal hair" +61033200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61033300,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61033900,men or boys suits ensembles jackets blazers trousers biband brace overalls breeches and shorts other than swim wear knitted or crocheted >> of other textile materials +61033910,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61033920,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61033990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61034100,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of wool or fine animal hair" +61034200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61034300,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61034900,men or boys suits ensembles jackets blazers trousers biband brace overalls breeches and shorts other than swim wear knitted or crocheted >> of other textile materials +61034910,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61034920,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61034990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIBAND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61040000,or women s girls suits ensembles jackets blazers suits +61041300,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61041900,or women s girls suits ensembles jackets blazers suits >> of other textile materials +61041910,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61041920,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61041990,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61042200,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61042300,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61042900,or women s girls suits ensembles jackets blazers suits >> of other textile materials +61042910,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61042920,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61042990,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61043100,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of wool or fine animal hair" +61043200,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61043300,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61043900,or women s girls suits ensembles jackets blazers suits >> of other textile materials +61043910,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61043920,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61043990,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61044100,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of wool or fine animal hair" +61044200,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61044300,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61044400,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of artificial fibres which ever is higher" +61044900,or women s girls suits ensembles jackets blazers suits >> of other textile materials +61044910,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61044990,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of other fibres" +61045100,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of wool or fine animal hair" +61045200,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61045300,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres which ever is higher" +61045900,or women s girls suits ensembles jackets blazers suits >> of other textile materials +61045910,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61045920,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61045990,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of other fibres" +61046100,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of wool or fine animal hair" +61046200,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of cotton" +61046300,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED::Of synthetic fibres" +61046900,or women s girls suits ensembles jackets blazers suits >> of other textile materials +61046910,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of silk" +61046920,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61046990,"WOMEN'S OR GIRLS' SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIM WEAR), KNITTED OR CROCHETED:Of other textile materials :Other" +61050000,men s or boys shirts knitted or crocheted +61051000,men s or boys shirts knitted or crocheted >> of cotton +61051010,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of cotton :Shirts, hand crocheted" +61051020,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of cotton :Knit shirts (other than T-shirts) and sweat shirts, other than hand crocheted" +61051090,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of cotton :Other" +61052000,men s or boys shirts knitted or crocheted >> of fibres +61052010,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61052020,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61059000,men s or boys shirts knitted or crocheted >> of other textile materials +61059010,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61059090,"MEN'S OR BOYS SHIRTS, KNITTED OR CROCHETED:Of other textile materials :Other" +61060000,women s or girls blouses shirts and knitted or crocheted +61061000,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES, KNITTED OR CROCHETED::Of cotton" +61062000,women s or girls blouses shirts and knitted or crocheted >> of fibres +61062010,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61062020,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61069000,women s or girls blouses shirts and knitted or crocheted >> of other textile materials +61069010,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61069020,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +61069090,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES, KNITTED OR CROCHETED:Of other textile materials :Other" +61070000,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs +61071100,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of cotton" +61071200,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs >> of fibres +61071210,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61071220,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61071900,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs >> of other textile materials +61071910,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61071990,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Other" +61072100,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of cotton" +61072200,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs >> of fibres +61072210,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61072220,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61072900,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs >> of other textile materials +61072910,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61072920,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +61072990,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Other" +61079100,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs >> of cotton +61079110,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of cotton :Gengis (Vests), other than hand crocheted" +61079190,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of cotton :Other" +61079900,men s or boys underpants briefs nightshirts pyjamas bathrobes dressing gowns and similar articles knitted or crocheted underpants and briefs >> of other textile materials +61079910,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61079920,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +61079990,"MEN'S OR BOYS' UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Other" +61080000,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats +61081100,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of fibres +61081110,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61081120,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61081900,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of other textile materials +61081910,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61081920,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of cotton" +61081990,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of other fibres" +61082100,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of cotton" +61082200,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of fibres +61082210,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61082220,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61082900,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of other textile materials +61082910,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61082990,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Other" +61083100,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of cotton" +61083200,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of fibres +61083210,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61083220,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61083900,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of other textile materials +61083910,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61083990,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Other" +61089100,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of cotton" +61089210,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres(OLD tariff)" +61089220,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres(OLD tariff)" +61089900,or women s girls slips petticoats briefs panties night dresses pyjamas negligees bathrobes dressing gowns and similar articles knitted or crocheted slips and petticoats >> of other textile materials +61089910,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61089920,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +61089990,"WOMEN'S OR GIRLS' SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHT DRESSES, PYJAMAS, NEGLIGEES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of other textile materials :Of other textile materials" +61090000,singlets and other vests knitted or crocheted +61091000,"T-SHIRTS, SINGLETS AND OTHER VESTS, KNITTED OR CROCHETED::Of cotton" +61099000,singlets and other vests knitted or crocheted >> of other textile materials +61099010,"T-SHIRTS, SINGLETS AND OTHER VESTS, KNITTED OR CROCHETED:Of other textile materials :Of synthetic fibres" +61099020,"T-SHIRTS, SINGLETS AND OTHER VESTS, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61099030,"T-SHIRTS, SINGLETS AND OTHER VESTS, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61099040,"T-SHIRTS, SINGLETS AND OTHER VESTS, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +61099090,"T-SHIRTS, SINGLETS AND OTHER VESTS, KNITTED OR CROCHETED:Of other textile materials :Other" +61100000,jerseys pullovers cardigans waistcoats and similar articles knitted or crocheted of wool or fine animal hair +61101100,jerseys pullovers cardigans waistcoats and similar articles knitted or crocheted of wool or fine animal hair >> of wool +61101110,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of wool :Jerseys" +61101120,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of wool :Sweaters and cardigans" +61101190,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of wool :Other" +61101200,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of Kashmir (cashmere) goats" +61101900,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Other" +61102000,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of cotton" +61103000,jerseys pullovers cardigans waistcoats and similar articles knitted or crocheted of wool or fine animal hair >> of fibres +61103010,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61103020,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61109000,"JERSEYS, PULLOVERS, CARDIGANS, WAISTCOATS AND SIMILAR ARTICLES, KNITTED OR CROCHETED::Of other textile materials" +61110000,babies garments and clothing accessories knitted or crocheted +61112000,"BABIES' GARMENTS AND CLOTHING ACCESSORIES, KNITTED OR CROCHETED::Of cotton" +61113000,"BABIES' GARMENTS AND CLOTHING ACCESSORIES, KNITTED OR CROCHETED::Of synthetic fibres" +61119000,babies garments and clothing accessories knitted or crocheted >> of other textile materials +61119010,"BABIES' GARMENTS AND CLOTHING ACCESSORIES, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61119020,"BABIES' GARMENTS AND CLOTHING ACCESSORIES, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61119090,"BABIES' GARMENTS AND CLOTHING ACCESSORIES, KNITTED OR CROCHETED:Of other textile materials :Other" +61120000,track suits ski suits and swimwear knitted or crocheted track suits +61121100,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED::Of cotton" +61121200,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED::Of synthetic fibres" +61121900,track suits ski suits and swimwear knitted or crocheted track suits >> of other textile materials +61121910,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61121920,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of wool or fine animal hair" +61121930,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61121990,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Other" +61122000,track suits ski suits and swimwear knitted or crocheted track suits >> ski suits +61122010,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Ski suits :Of silk" +61122020,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Ski suits :Of wool or fine animal hair" +61122030,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Ski suits :Of cotton" +61122040,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Ski suits :Of synthetic fibres" +61122050,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Ski suits :Of artificial fibres" +61122090,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Ski suits :Other" +61123100,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED::Of synthetic fibres" +61123900,track suits ski suits and swimwear knitted or crocheted track suits >> of other textile materials +61123910,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61123920,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61123990,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Other" +61124100,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED::Of synthetic fibre" +61124900,track suits ski suits and swimwear knitted or crocheted track suits >> of other textile materials +61124910,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61124920,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61124990,"TRACK SUITS, SKI SUITS AND SWIMWEAR, KNITTED OR CROCHETED:Of other textile materials :Other" +61130000,"::GARMENTS, MADE UP OF KNITTED OR CROCHETED FABRICS OF HEADING 5903, 5906 OR 5907" +61140000,other garments knitted or crocheted +61142000,"OTHER GARMENTS, KNITTED OR CROCHETED::Of cotton" +61143000,other garments knitted or crocheted >> of fibres +61143010,"OTHER GARMENTS, KNITTED OR CROCHETED:Of man-made fibres :Of synthetic fibres" +61143020,"OTHER GARMENTS, KNITTED OR CROCHETED:Of man-made fibres :Of artificial fibres" +61149000,other garments knitted or crocheted >> of other textile materials +61149010,"OTHER GARMENTS, KNITTED OR CROCHETED:Of other textile materials :Of silk" +61149090,"OTHER GARMENTS, KNITTED OR CROCHETED:Of other textile materials :Other" +61150000,pantyhose tights stockings socks and other hosiery including graduated compre ssion hosiery for example stockings for varicose veins and footwear without applied soles knitted or crocheted +61151000,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED::graduated compression hosiery for example, (stockings for varicose veins)" +61152100,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Other panty hose and tights:Of synthetic fibres, measuring per single yarn less than 67 decitex" +61152200,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED::Of synthetic fibres, measuring per single yarn less than 67 decitex or more" +61152900,pantyhose tights stockings socks and other hosiery including graduated compre ssion hosiery for example stockings for varicose veins and footwear without applied soles knitted or crocheted >> of other textile materials +61152910,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Of other textile materials:Of silk" +61152920,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Of other textile materials:Of wool or fine animal hair" +61152930,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Of other textile materials:Of artificial fibres" +61152990,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Of other textile materials:Other" +61153000,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED::Other" +61159400,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED::Of wool or fine animal hair" +61159500,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED::Of cotton" +61159600,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED::Of synthetic fibres" +61159900,pantyhose tights stockings socks and other hosiery including graduated compre ssion hosiery for example stockings for varicose veins and footwear without applied soles knitted or crocheted >> of other textile materials +61159910,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61159990,"PANTYHOSE, TIGHTS, STOCKINGS, SOCKS AND OTHER HOSIERY, INCLUDING GRADUATED COMPRE SSION HOSIERY (FOR EXAMPLE, STOCKINGS FOR VARICOSE VEINS) AND FOOTWEAR WITHOUT APPLIED SOLES, KNITTED OR CROCHETED:Of other textile materials :Other" +61160000,gloves mittens and mitts knitted or crocheted +61161000,"GLOVES, MITTENS AND MITTS, KNITTED OR CROCHETED::Impregnated, coated or covered with plastics or rubber" +61169100,"GLOVES, MITTENS AND MITTS, KNITTED OR CROCHETED::Of wool or fine animal hair" +61169200,"GLOVES, MITTENS AND MITTS, KNITTED OR CROCHETED::Of cotton" +61169300,"GLOVES, MITTENS AND MITTS, KNITTED OR CROCHETED::Of synthetic fibres" +61169900,gloves mittens and mitts knitted or crocheted >> of other textile materials +61169910,"GLOVES, MITTENS AND MITTS, KNITTED OR CROCHETED:Of other textile materials :Of artificial fibres" +61169990,"GLOVES, MITTENS AND MITTS, KNITTED OR CROCHETED:Of other textile materials :Other" +61170000,made up clothing other or accessories knitted or crocheted knitted crocheted parts of garments or of clothing accessories +61171000,made up clothing other or accessories knitted or crocheted knitted crocheted parts of garments or of clothing accessories >> shawls scarves mufflers mantillas veils and the like +61171010,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Shawls, scarves, mufflers, mantillas, veils and the like :Of silk" +61171020,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Shawls, scarves, mufflers, mantillas, veils and the like :Of wool" +61171030,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Shawls, scarves, mufflers, mantillas, veils and the like :Of cotton" +61171040,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Shawls, scarves, mufflers, mantillas, veils and the like :Of man-made fibres" +61171090,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Shawls, scarves, mufflers, mantillas, veils and the like :Other" +61178000,made up clothing other or accessories knitted or crocheted knitted crocheted parts of garments or of clothing accessories >> other accessories +61178010,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Other accessories :Of silk" +61178020,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Other accessories :Of wool" +61178030,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Other accessories :Of cotton" +61178040,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Other accessories :Of man-made fibres" +61178090,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES:Other accessories :Other" +61179000,"OTHER MADE UP CLOTHING ACCESSORIES, KNITTED OR CROCHETED; KNITTED OR CROCHETED PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES::Parts" +62010000,anoraks coats cloaks including wind cheaters and similar articles other than those of heading 6203 +62011100,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203::Of wool and fine animal hair(OLD tariff)" +62011210,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of cotton :Raincoats(OLD tariff)" +62011290,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of cotton :Other(OLD tariff)" +62011310,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of man-made fibres :Raincoats(OLD tariff)" +62011390,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of man-made fibres :Other(OLD tariff)" +62011910,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of other textile materials :Of silk(OLD tariff)" +62011990,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of other textile materials :Of other textile fibres(OLD tariff)" +62012000,anoraks coats cloaks including wind cheaters and similar articles other than those of heading 6203 >> of wool and fine animal hair overcoats raincoats car coats +62012010,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of wool or fine animal hair ::Overcoats, raincoats, car-coats, capes, cloaks and similar article" +62012090,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of wool or fine animal hair ::other" +62013000,anoraks coats cloaks including wind cheaters and similar articles other than those of heading 6203 >> of cotton +62013010,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of Cotton:Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62013090,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of Cotton:Other" +62014000,anoraks coats cloaks including wind cheaters and similar articles other than those of heading 6203 >> of fibres +62014010,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of man-made fibres ::Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62014090,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of man-made fibres ::Other" +62019000,anoraks coats cloaks including wind cheaters and similar articles other than those of heading 6203 >> of other textile materials +62019010,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of other textile materials ::Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62019090,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of other textile materials ::Other" +62019100,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203::Of wool or fine animal hair(OLD tariff)" +62019200,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203::Of cotton(OLD tariff)" +62019300,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203::Of man-made fibres(OLD tariff)" +62019910,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of other textile materials :Of silk(OLD tariff)" +62019990,"MEN'S OR BOYS OVERCOATS, CAR-COATS, CLOAKS, ANORAKS (INCLUDING SKIJACKETS), WIND- CHEATERS, WIND-JACKETS AND SIMILAR ARTICLES OTHER THAN THOSE OF HEADING 6203:Of other textile materials :Other(OLD tariff)" +62020000,women s or girls overcoats capes cloaks anoraks including ski jackets wind cheaters and similar articles other than those of heading 6204 +62021110,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of wool or fine animal hair :Coats(OLD tariff)" +62021190,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of wool or fine animal hair :Other(OLD tariff)" +62021200,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204::Of cotton(OLD tariff)" +62021300,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204::Of man-made fibres(OLD tariff)" +62021910,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Coats of silk(OLD tariff)" +62021920,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Coats of all other fibres(OLD tariff)" +62021990,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Other(OLD tariff)" +62022000,women s or girls overcoats capes cloaks anoraks including ski jackets wind cheaters and similar articles other than those of heading 6204 >> of wool or fine animal hair u +62022010,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of wool or fine animal hair ::Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62022090,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of wool or fine animal hair ::Other" +62023000,women s or girls overcoats capes cloaks anoraks including ski jackets wind cheaters and similar articles other than those of heading 6204 >> of cotton +62023010,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of Cotton:Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62023090,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of Cotton:Other" +62024000,women s or girls overcoats capes cloaks anoraks including ski jackets wind cheaters and similar articles other than those of heading 6204 >> of fibres +62024010,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of man-made fibres ::Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62024090,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of man-made fibres ::Other" +62029000,women s or girls overcoats capes cloaks anoraks including ski jackets wind cheaters and similar articles other than those of heading 6204 >> of other textile materials +62029010,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials ::Overcoats, raincoats, car-coats, capes, cloaks and similar articles" +62029090,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials ::Other" +62029110,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of wool or fine animal hair :Wind and ski-jackets, wind- cheaters(OLD tariff)" +62029190,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of wool or fine animal hair :Other(OLD tariff)" +62029210,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of cotton :Wind and ski-jackets, wind-cheaters(OLD tariff)" +62029290,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of cotton :Other(OLD tariff)" +62029310,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of man-made fibres :Wind and ski-jackets, wind-cheaters(OLD tariff)" +62029390,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of man-made fibres :Other(OLD tariff)" +62029911,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Wind and ski-jackets(OLD tariff)" +62029919,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Other(OLD tariff)" +62029990,"WOMEN'S OR GIRLS' OVERCOATS, CAR-COATS, CAPES, CLOAKS, ANORAKS (INCLUDING SKI-JACKETS), WIND-CHEATERS, WINDJACKETS AND SIMILAR ARTICLES, OTHER THAN THOSE OF HEADING 6204:Of other textile materials :Other(OLD tariff)" +62030000,men s or boys suits ensembles jackets blazers trousers bib and brace and oveb ralls breeches shorts other than swimwear suits +62031100,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of wool or fine animal hair" +62031200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62031900,men s or boys suits ensembles jackets blazers trousers bib and brace and oveb ralls breeches shorts other than swimwear suits >> of other textile materials +62031910,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of cotton" +62031990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62032200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of cotton" +62032300,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62032900,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials:Of other textile materials" +62032911,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials:Khadi" +62032919,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials:Other" +62032990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials:Other" +62033100,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair:Of wool or fine animal hair" +62033110,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair:Khadi" +62033190,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair:Oher" +62033200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of cotton" +62033300,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62033900,men s or boys suits ensembles jackets blazers trousers bib and brace and oveb ralls breeches shorts other than swimwear suits >> of other textile materials of silk +62033910,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +62033911,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Khadi" +62033919,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62033990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62034100,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of wool or fine animal hair" +62034200,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton:Of cotton" +62034210,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton:Handloom" +62034290,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton:Other" +62034300,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62034900,men s or boys suits ensembles jackets blazers trousers bib and brace and oveb ralls breeches shorts other than swimwear suits >> of other textile materials +62034910,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk" +62034990,"MEN'S OR BOYS' SUITS, ENSEMBLES, JACKETS, BLAZERS, TROUSERS, BIB AND BRACE OVEB RALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62040000,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits +62041100,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of wool of fine animal hair" +62041200,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of cotton" +62041210,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton >> embroidered with lucknow chikan craft +62041290,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton >> other +62041300,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62041310,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibres of synthetic fibres >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62041390,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibres of synthetic fibres >> other other +62041900,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of other textile materials of silk +62041911,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Sequinned or beaded with chattons or embroidered" +62041912,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of other textile materials of silk >> embroidered with lucknow chikan craft +62041919,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62041990,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of all other fibres(OLD tariff)" +62041991,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of other textile materials of silk >> embroidered with lucknow chikan craft +62041999,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of other textile materials of silk >> other ensembles +62042100,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of wool or fine animal hair" +62042200,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton +62042210,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton :Blouses combined with skirts, trousers or shorts" +62042220,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton >> embroidered with lucknow chikan 20 craft embroidered with lucknow chikan 20 craft +62042290,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton :Other" +62042300,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62042310,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibres of synthetic fibres >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62042390,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibres of synthetic fibres >> other other +62042900,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk +62042911,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Sequinned or beaded(OLD tariff)" +62042912,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Khadi" +62042913,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62042919,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62042990,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other(OLD tariff)" +62042991,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62042999,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk >> other jackets and blazers other jackets and blazers +62043100,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair:Of wool or fine animal hair" +62043110,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair:Khadi" +62043190,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair:Other" +62043200,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of cotton" +62043210,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton of cotton >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62043290,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton of cotton >> other other +62043300,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62043310,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibre of synthetic fibre >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62043390,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibre of synthetic fibre >> other other +62043900,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk +62043911,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Sequinned or beaded with chattons or embroidered(OLD tariff)" +62043912,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Khadi" +62043913,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62043919,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62043990,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other(OLD tariff)" +62043991,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk >> embroidered with lucknow chikan craft embroidered with lucknow chikan craft +62043999,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk of other textile materials of silk >> other dresses other dresses +62044100,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of wool or fine animal hair of wool or fine animal hair +62044110,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair :House coats and like dresses" +62044120,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair :Blazers" +62044190,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair :Other" +62044200,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton of cotton +62044210,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton :House coats and the like dresses" +62044220,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton :Handloom;" +62044230,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton of cotton >> embroidered with lucknow chikan craft +62044290,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton :Other" +62044300,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibres +62044310,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of synthetic fibres :House coats and the like" +62044320,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibres >> embroidered with lucknow chikan craft +62044390,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of synthetic fibres :Other" +62044400,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of artificial fibres" +62044410,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of artificial fibres >> embroidered with lucknow chikan craft +62044490,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of artificial fibres >> other +62044900,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk +62044911,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :House coats and the like dresses" +62044912,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> embroidered with lucknow chikan craft +62044919,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62044990,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other(OLD tariff)" +62044991,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> embroidered with lucknow chikan craft +62044999,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> other skirts and divided skirts +62045100,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of wool or fine animal hair" +62045200,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of cotton" +62045210,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton >> embroidered with lucknow chikan craft +62045290,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of cotton >> other +62045300,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62045310,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibre >> embroidered with lucknow chikan craft +62045390,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of synthetic fibre >> other +62045900,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk +62045910,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Of silk(OLD tariff)" +62045911,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> embroidered with lucknow chikan craft +62045919,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> other other +62045990,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other(OLD tariff)" +62045991,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> embroidered with lucknow chikan craft +62045999,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> of other textile materials of silk >> other trouser bib and brace overalls +62046100,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> +62046110,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair :Trousers and shorts" +62046190,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of wool or fine animal hair :Other" +62046200,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton:Of cotton" +62046210,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton:Handloom" +62046290,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of cotton:Other" +62046300,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR)::Of synthetic fibres" +62046900,women s or girls suits ensembles jackets blazers divided dresses skirts skirts trousers bib and brace overalls breeches and shorts other than swimwear suits >> +62046911,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Sequinned or beaded or embroidered" +62046919,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62046990,"WOMEN'S OR GIRL'S SUITS, ENSEMBLES, JACKETS, BLAZERS, DRESSES, SKIRTS, DIVIDED SKIRTS, TROUSERS, BIB AND BRACE OVERALLS, BREECHES AND SHORTS (OTHER THAN SWIMWEAR):Of other textile materials :Other" +62050000,men s or boys shirts +62052000,MEN'S OR BOYS SHIRTS:Of cotton:Of cotton +62052010,MEN'S OR BOYS SHIRTS:Of cotton:Handloom +62052020,men s or boys shirts >> of cotton >> embroidered with lucknow chikan craft +62052090,MEN'S OR BOYS SHIRTS:Of cotton:Other +62053000,MEN'S OR BOYS SHIRTS::Of man-made fibres +62053010,men s or boys shirts >> of fibres >> embroidered with lucknow chikan craft +62053090,men s or boys shirts >> of fibres >> other +62059000,men s or boys shirts >> of other textile materials of silk +62059010,MEN'S OR BOYS SHIRTS:Of other textile materials :Of silk(OLD tariff) +62059011,MEN'S OR BOYS SHIRTS:Of other textile materials :Khadi +62059012,men s or boys shirts >> of other textile materials of silk >> embroidered with lucknow chikan craft +62059019,MEN'S OR BOYS SHIRTS:Of other textile materials :Other +62059090,MEN'S OR BOYS SHIRTS:Of other textile materials :Other +62060000,women s or girls blouses shirts and +62061000,women s or girls blouses shirts and >> of silk or silk waste +62061010,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES:Of silk or silk waste :Of silk" +62061090,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES:Of silk or silk waste :Other" +62062000,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES::Of wool or fine animal hair" +62063000,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES:Of cotton:Of cotton" +62063010,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES:Of cotton:Handloom" +62063090,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES:Of cotton:Other" +62064000,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES::Of man-made fibres" +62069000,"WOMEN'S OR GIRL'S BLOUSES, SHIRTS AND SHIRT-BLOUSES::Of other textile materials" +62071100,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES::Of cotton(OLD tariff)" +62071910,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of synthetic fibres(OLD tariff)" +62071920,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of wool(OLD tariff)" +62071930,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of silk(OLD tariff)" +62071990,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other(OLD tariff)" +62072110,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton:Handloom(OLD tariff)" +62072190,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton:Other(OLD tariff)" +62072200,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES::Of man-made fibres(OLD tariff)" +62072900,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES::Of other textile materials(OLD tariff)" +62079110,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton :Dressing gowns and bathrobes(OLD tariff)" +62079120,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton :Under shirts other than hand printed(OLD tariff)" +62079190,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton :Other(OLD tariff)" +62079911,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Dressing gowns and bathrobes(OLD tariff)" +62079919,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other(OLD tariff)" +62079921,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Dressing gowns and bathrobes(OLD tariff)" +62079929,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other(OLD tariff)" +62079990,"MEN'S OR BOY'S SINGLETS AND OTHER VESTS, UNDERPANTS, BRIEFS, NIGHTSHIRTS, PYJAMAS, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other(OLD tariff)" +62080000,women s or girls singlets and other vests slips petticoats briefs panties nightdresses pyjamas negliges bathrobes dressing gowns and similar articles slips and petticoats +62081100,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES::Of man-made fibres" +62081900,women s or girls singlets and other vests slips petticoats briefs panties nightdresses pyjamas negliges bathrobes dressing gowns and similar articles slips and petticoats >> of other textile materials +62081910,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of cotton other than hand printed" +62081990,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other" +62082100,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton:Of cotton" +62082110,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton:Handloom" +62082190,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton:Other" +62082200,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES::Of man-made fibres" +62082900,women s or girls singlets and other vests slips petticoats briefs panties nightdresses pyjamas negliges bathrobes dressing gowns and similar articles slips and petticoats >> of other textile materials +62082910,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of silk" +62082920,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Of wool" +62082990,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other" +62089100,women s or girls singlets and other vests slips petticoats briefs panties nightdresses pyjamas negliges bathrobes dressing gowns and similar articles slips and petticoats >> of cotton +62089110,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton :Dressing gowns and bathrobes" +62089190,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of cotton :Other" +62089200,women s or girls singlets and other vests slips petticoats briefs panties nightdresses pyjamas negliges bathrobes dressing gowns and similar articles slips and petticoats >> of fibres +62089210,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of man-made fibres :Dressing gowns and bathrobes" +62089290,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of man-made fibres :Other" +62089900,women s or girls singlets and other vests slips petticoats briefs panties nightdresses pyjamas negliges bathrobes dressing gowns and similar articles slips and petticoats >> of other textile materials +62089910,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Dressing gowns and bathrobes of wool" +62089920,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Dressing gowns and bathrobes of silk" +62089990,"WOMEN'S OR GIRLS' SINGLETS AND OTHER VESTS, SLIPS, PETTICOATS, BRIEFS, PANTIES, NIGHTDRESSES, PYJAMAS, NEGLIGES, BATHROBES, DRESSING GOWNS AND SIMILAR ARTICLES:Of other textile materials :Other" +62090000,babies garments and clothing accessories +62092000,BABIE'S GARMENTS AND CLOTHING ACCESSORIES:Of cotton:Of cotton +62092010,BABIE'S GARMENTS AND CLOTHING ACCESSORIES:Of cotton:Handloom +62092090,BABIE'S GARMENTS AND CLOTHING ACCESSORIES:Of cotton:Other +62093000,BABIE'S GARMENTS AND CLOTHING ACCESSORIES::Of synthetic fibres +62099000,babies garments and clothing accessories >> of other textile materials +62099010,BABIE'S GARMENTS AND CLOTHING ACCESSORIES:Of other textile materials :Of silk +62099090,BABIE'S GARMENTS AND CLOTHING ACCESSORIES:Of other textile materials :Other +62100000,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 +62101000,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907::Of fabrics of heading 5602 or 5603" +62101010,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> of fabrics of heading 5602 or 5603 >> personal protective garments for u use felt or non woven conforming to is 17423 +62101020,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> of fabrics of heading 5602 or 5603 >> surgical gowns and drapes u conforming to is 17334 +62101090,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> of fabrics of heading 5602 or 5603 >> other u +62102000,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other garments of the type described in +62102010,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6201 11 to 6201 19 :Outer garments, of rubberised textile fabrics" +62102020,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6201 11 to 6201 19 :Outer garments, of fabrics impregnated, coated, covered or laminated with prepara- tions of cellulose derivatives and other artificial plastic materials" +62102030,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6201 11 to 6201 19 :Outer garments, of fabrics otherwise impregnated or coated" +62102090,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6201 11 to 6201 19 :Other" +62103000,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other garments of the type described in impregnated coated covered or laminated with preparations of cellulose derivatives and other artificial plastic materials +62103010,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6202 11 to 6202 19 :Outer garments, of textiles impregnated, coated, covered or laminated with preparations of cellulose derivatives and other artificial plastic materials(OLD tariff)" +62103020,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6202 11 to 6202 19 :Outer garments, of rubberised textile fabrics" +62103030,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6202 11 to 6202 19 :Outer garments, of fabrics otherwise impregnated" +62103090,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other garments, of the type described in sub-headings 6202 11 to 6202 19 :Other" +62104000,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments +62104010,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other men's or boy's garments :Bullet proof jacket, bomb disposal jacket and the like" +62104020,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments >> nbc warfare suits and the like u nbc warfare suits and the like conforming to is 17377 +62104030,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments >> high visibility warning clothes and the like conforming to is 15809 +62104040,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments >> high altitude clothes conforming to is 5866 +62104050,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments >> fighter aircraft clothing conforming to is 11871 +62104060,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments >> personal protective garments for surgical medical use felt or +62104080,garments made up of fabrics of heading 5602 5603 5903 5906 or 5907 >> other men s or boys garments other men s or boys garments >> clothing for special use such as fr chemical u is electrical is 11871 is 16655 and industrial protection is 17466 +62104090,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907:Other men's or boy's garments :Other" +62105000,"GARMENTS, MADE UP OF FABRICS OF HEADING 5602, 5603, 5903, 5906 OR 5907::Other women's or girl's garments" +62110000,track suits ski suits and swimwear other garments swimwear +62111100,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS::Men's or boys'" +62111200,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS::Women's or girls'" +62112000,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS::Ski suits" +62113200,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS::Of cotton" +62113300,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS::Of man-made fibres" +62113900,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Of other textile materials" +62113911,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Handloom" +62113919,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Other" +62113990,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Other" +62114200,track suits ski suits and swimwear other garments swimwear >> of cotton of cotton kurta or salwar with or without dupatta +62114210,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of cotton :Kurta and salwar with or without duppatta(OLD tariff)" +62114211,track suits ski suits and swimwear other garments swimwear >> of cotton of cotton kurta or salwar with or without dupatta >> embroidered with lucknow chikan craft +62114219,track suits ski suits and swimwear other garments swimwear >> of cotton of cotton kurta or salwar with or without dupatta >> other other +62114290,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of cotton :Other(OLD tariff)" +62114291,track suits ski suits and swimwear other garments swimwear >> of cotton of cotton kurta or salwar with or without dupatta >> embroidered with lucknow chikan craft +62114299,track suits ski suits and swimwear other garments swimwear >> of cotton of cotton kurta or salwar with or without dupatta >> other +62114300,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS::Of man-made fibres" +62114310,track suits ski suits and swimwear other garments swimwear >> of man made fibre >> embroidered with lucknow chikan craft +62114390,track suits ski suits and swimwear other garments swimwear >> of man made fibre >> other +62114900,track suits ski suits and swimwear other garments swimwear >> of other textile materials +62114910,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Of wool or fine animal hair" +62114921,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Khadi" +62114922,track suits ski suits and swimwear other garments swimwear >> of other textile materials >> embroidered with lucknow chikan craft +62114929,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Other" +62114990,"TRACK SUITS, SKI SUITS AND SWIMWEAR; OTHER GARMENTS:Of other textile materials:Other(OLD tariff)" +62114991,track suits ski suits and swimwear other garments swimwear >> of other textile materials >> embroidered with lucknow chikan craft +62114999,track suits ski suits and swimwear other garments swimwear >> of other textile materials >> other +62120000,brassieres girdles corsets braces suspenders garters and similar articles and parts thereof whether or not knitted or crocheted +62121000,"BRASSIERES, GIRDLES, CORSETS, BRACES, SUSPENDERS, GARTERS AND SIMILAR ARTICLES AND PARTS THEREOF, WHETHER OR NOT KNITTED OR CROCHETED::Brassieres" +62122000,"BRASSIERES, GIRDLES, CORSETS, BRACES, SUSPENDERS, GARTERS AND SIMILAR ARTICLES AND PARTS THEREOF, WHETHER OR NOT KNITTED OR CROCHETED::Girdles and panty-girdles" +62123000,"BRASSIERES, GIRDLES, CORSETS, BRACES, SUSPENDERS, GARTERS AND SIMILAR ARTICLES AND PARTS THEREOF, WHETHER OR NOT KNITTED OR CROCHETED::Corselettes" +62129000,brassieres girdles corsets braces suspenders garters and similar articles and parts thereof whether or not knitted or crocheted >> other +62129010,"BRASSIERES, GIRDLES, CORSETS, BRACES, SUSPENDERS, GARTERS AND SIMILAR ARTICLES AND PARTS THEREOF, WHETHER OR NOT KNITTED OR CROCHETED:Other :Suspender belts, braces, suspender garters" +62129090,"BRASSIERES, GIRDLES, CORSETS, BRACES, SUSPENDERS, GARTERS AND SIMILAR ARTICLES AND PARTS THEREOF, WHETHER OR NOT KNITTED OR CROCHETED:Other :Other" +62130000,handkerchiefs +62132000,HANDKERCHIEFS::Of cotton +62139000,handkerchiefs >> of other textile materials +62139010,HANDKERCHIEFS:Other :Of man-made fibres +62139090,HANDKERCHIEFS:Other :Of other textile materials +62140000,shawls scarves mufflers mantillas veils and the like +62141000,shawls scarves mufflers mantillas veils and the like >> of silk or silk waste +62141010,"Shawls, scarves, mufflers, mantillas, veils and the like:Of silk or silk waste:Scarves of silk measuring 60 cms or less" +62141020,"Shawls, scarves, mufflers, mantillas, veils and the like:Of silk or silk waste:Shawls, scarves (exceeding 60 cms) and the like" +62141030,"Shawls, scarves, mufflers, mantillas, veils and the like:Of silk or silk waste:Of handloom" +62141040,shawls scarves mufflers mantillas veils and the like >> of silk or silk waste >> embroidered with lucknow chikan u craft +62141090,"Shawls, scarves, mufflers, mantillas, veils and the like:Of silk or silk waste:Other" +62142000,shawls scarves mufflers mantillas veils and the like >> of wool or fine animal hair +62142010,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Shawls" +62142020,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Scarves(OLD tariff)" +62142021,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Khadi" +62142029,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Other" +62142030,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Mufflers(OLD tariff)" +62142031,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Khadi" +62142039,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Other" +62142090,"Shawls, scarves, mufflers, mantillas, veils and the like:Of wool or fine animal hair :Other" +62143000,"Shawls, scarves, mufflers, mantillas, veils and the like::Of synthetic fibres" +62143010,shawls scarves mufflers mantillas veils and the like >> of synthetic fibres >> embroidered with lucknow chikan u craft +62143090,shawls scarves mufflers mantillas veils and the like >> of synthetic fibres >> other +62144000,"Shawls, scarves, mufflers, mantillas, veils and the like::Of artificial fibres" +62144010,shawls scarves mufflers mantillas veils and the like >> of artificial fibres >> embroidered with lucknow chikan craft +62144090,shawls scarves mufflers mantillas veils and the like >> of artificial fibres >> other +62149000,shawls scarves mufflers mantillas veils and the like >> of other textile materials +62149010,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Abrabroomal, cotton" +62149021,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Grey" +62149022,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :White bleached" +62149029,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Other" +62149031,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Grey" +62149032,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :White bleached" +62149039,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Other" +62149040,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Scarves, cotton(OLD tariff)" +62149041,shawls scarves mufflers mantillas veils and the like >> of other textile materials >> embroidered with lucknow chikan craft +62149050,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Shawls, mufflers and the like, of cotton(OLD tariff)" +62149060,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Shawls, mufflers and the like of man-made fibres(OLD tariff)" +62149090,"Shawls, scarves, mufflers, mantillas, veils and the like:Of other textile materials :Other(OLD tariff)" +62150000,ties bow ties and cravats +62151000,"TIES, BOW TIES AND CRAVATS:Of silk or silk waste:Of silk or silk waste" +62151010,"TIES, BOW TIES AND CRAVATS:Of silk or silk waste:Khadi" +62151090,"TIES, BOW TIES AND CRAVATS:Of silk or silk waste:Other" +62152000,"TIES, BOW TIES AND CRAVATS::Of man-made fibres" +62159000,ties bow ties and cravats >> of other textile materials +62159010,"TIES, BOW TIES AND CRAVATS:Of other textile materials :Of cotton" +62159090,"TIES, BOW TIES AND CRAVATS:Of other textile materials :Other" +62160000,gloves mittens and mitts gloves mittens and mitts +62160010,"Gloves, mittens, and mitts:Gloves, mittens and mitts:Of cotton" +62160020,"Gloves, mittens, and mitts:Gloves, mittens and mitts:Of handloom" +62160090,"Gloves, mittens, and mitts:Gloves, mittens and mitts:Other" +62170000,clothing other made up accessories parts of garments or of clothing accessories other than those of heading 6212 +62171000,clothing other made up accessories parts of garments or of clothing accessories other than those of heading 6212 >> accessories +62171010,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :For articles of apparel of cotton" +62171020,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :For articles of apparel of synthetic fibres" +62171030,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :For articles of apparel of wool" +62171040,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :For articles of apparel of silk" +62171050,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :For articles of apparel of regenerated fibre" +62171060,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :For articles of apparel of other fibres" +62171070,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :Stockings, socks sockettes and the like of cotton" +62171090,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Accessories :Other" +62179000,clothing other made up accessories parts of garments or of clothing accessories other than those of heading 6212 >> parts +62179010,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Parts :Collar cuffs and the like of cotton" +62179020,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Parts :Of silk" +62179030,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Parts :Of wool" +62179040,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Parts :Separately presented removable linings for raincoats and other" +62179090,"OTHER MADE UP CLOTHING ACCESSORIES; PARTS OF GARMENTS OR OF CLOTHING ACCESSORIES, OTHER:Parts :Other" +63010000,blankets and travelling rugs +63011000,BLANKETS AND TRAVELLING RUGS::Electric blankets +63012000,"BLANKETS AND TRAVELLING RUGS::Blankets (other than electric blankets) and travelling rugs, of wool or fine animal hair" +63013000,"BLANKETS AND TRAVELLING RUGS::Blankets (other than electric blankets) and travelling rugs, of cotton" +63014000,"BLANKETS AND TRAVELLING RUGS::Blankets (other than electric blankets) and travelling rugs, of synthetic fibres" +63019010,BLANKETS AND TRAVELLING RUGS:Other blankets and travelling rugs :Jute blankets including blankets of blended jute(OLD tariff) +63019090,BLANKETS AND TRAVELLING RUGS:Other blankets and travelling rugs :Other(OLD tariff) +63020000,bed linen table linen toilet linen and kitchen linen +63021000,bed linen table linen toilet linen and kitchen linen >> bed linen knitted or crocheted +63021010,"Bed linen, table linen, toilet linen and kitchen linen:Bed linen, knitted or crocheted :Of cotton" +63021090,"Bed linen, table linen, toilet linen and kitchen linen:Bed linen, knitted or crocheted :Other" +63022100,"Bed linen, table linen, toilet linen and kitchen linen:of cotton:Of cotton" +63022110,"Bed linen, table linen, toilet linen and kitchen linen:of cotton:Handloom" +63022190,"Bed linen, table linen, toilet linen and kitchen linen:of cotton:OTHER" +63022200,"Bed linen, table linen, toilet linen and kitchen linen::Of man-made fibres" +63022900,"Bed linen, table linen, toilet linen and kitchen linen::Of other textile materials" +63023100,"Bed linen, table linen, toilet linen and kitchen linen::Of cotton" +63023200,"Bed linen, table linen, toilet linen and kitchen linen::Of man-made fibres" +63023900,"Bed linen, table linen, toilet linen and kitchen linen::Of other textile materials" +63024000,bed linen table linen toilet linen and kitchen linen >> table linen knitted or crocheted +63024010,"Bed linen, table linen, toilet linen and kitchen linen:Table linen, knitted or crocheted :Of silk" +63024020,"Bed linen, table linen, toilet linen and kitchen linen:Table linen, knitted or crocheted :Of wool or fine animal hair" +63024030,"Bed linen, table linen, toilet linen and kitchen linen:Table linen, knitted or crocheted :Of cotton" +63024040,"Bed linen, table linen, toilet linen and kitchen linen:Table linen, knitted or crocheted :Of man-made fibres" +63024090,"Bed linen, table linen, toilet linen and kitchen linen:Table linen, knitted or crocheted :Other" +63025100,"Bed linen, table linen, toilet linen and kitchen linen:OF COTTON:Of cotton" +63025110,"Bed linen, table linen, toilet linen and kitchen linen:OF COTTON:Handloom" +63025190,"Bed linen, table linen, toilet linen and kitchen linen:OF COTTON:OTHER" +63025300,"Bed linen, table linen, toilet linen and kitchen linen::Of man-made fibres" +63025900,"Bed linen, table linen, toilet linen and kitchen linen::Of other textile materials" +63026000,bed linen table linen toilet linen and kitchen linen >> toilet linen and kitchen linen of terry towelling or similar terry fabrics of cotton +63026010,"Bed linen, table linen, toilet linen and kitchen linen::Handloom" +63026090,"Bed linen, table linen, toilet linen and kitchen linen::Other" +63029100,"Bed linen, table linen, toilet linen and kitchen linen:other:Of cotton" +63029110,"Bed linen, table linen, toilet linen and kitchen linen:other:Handloom" +63029190,"Bed linen, table linen, toilet linen and kitchen linen:other:Other" +63029300,"Bed linen, table linen, toilet linen and kitchen linen::Of man-made fibres" +63029900,"Bed linen, table linen, toilet linen and kitchen linen::Of other textile materials" +63030000,curtains including drapes and interior blinds curtain or bed valances knitted or crocheted +63031200,CURTAINS (INCLUDING DRAPES) AND INTERIOR BLINDS; CURTAIN OR BED VALANCES::Of synthetic fibres +63031900,CURTAINS (INCLUDING DRAPES) AND INTERIOR BLINDS; CURTAIN OR BED VALANCES::Of other textile materials +63039100,CURTAINS (INCLUDING DRAPES) AND INTERIOR BLINDS; CURTAIN OR BED VALANCES::Of cotton +63039200,CURTAINS (INCLUDING DRAPES) AND INTERIOR BLINDS; CURTAIN OR BED VALANCES::Of synthetic fibres +63039900,curtains including drapes and interior blinds curtain or bed valances knitted or crocheted >> of other textile materials +63039910,CURTAINS (INCLUDING DRAPES) AND INTERIOR BLINDS; CURTAIN OR BED VALANCES:Of other textile materials :Silk shower curtains +63039990,CURTAINS (INCLUDING DRAPES) AND INTERIOR BLINDS; CURTAIN OR BED VALANCES:Of other textile materials :Other +63040000,other furnishing articles excluding those of heading 9404 bedspreads +63041100,"Other furnishing articles, excluding those of heading 9404::Knitted or crocheted" +63041900,other furnishing articles excluding those of heading 9404 bedspreads >> other +63041910,"Other furnishing articles, excluding those of heading 9404:Other:Bedsheets and bedcovers, of cotton" +63041920,"Other furnishing articles, excluding those of heading 9404:Other:Bedspreads of silk" +63041930,"Other furnishing articles, excluding those of heading 9404:Other:Bedsheets and bedcovers of man-made fibres" +63041940,"Other furnishing articles, excluding those of heading 9404:Other:Bed sheets and bed covers, of cotton, Handloom" +63041990,"Other furnishing articles, excluding those of heading 9404:Other:Other" +63042000,"Other furnishing articles, excluding those of heading 9404::Bed nets, of warp knit fabrics specified in Sub-heading Note 1 to this Chapter" +63049100,other furnishing articles excluding those of heading 9404 bedspreads >> knitted or crocheted +63049110,"Other furnishing articles, excluding those of heading 9404:Knitted or crocheted :Silk belt" +63049120,"Other furnishing articles, excluding those of heading 9404:Knitted or crocheted :Woollen cushion cover" +63049190,"Other furnishing articles, excluding those of heading 9404:Knitted or crocheted :Other" +63049200,other furnishing articles excluding those of heading 9404 bedspreads >> not knitted or crocheted of cotton counterpanes +63049210,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Counterpanes(OLD tariff)" +63049211,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Counterpanes, of Handloom" +63049219,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:OTHER" +63049220,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Napkins(OLD tariff)" +63049221,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Napkins, of Handloom" +63049229,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Other" +63049230,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Pillow case and pillow slip(OLD tariff)" +63049231,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Pillow cases and pillow slips, of handloom" +63049239,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:others" +63049240,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Tablecloth and table cover(OLD tariff)" +63049241,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Table cloth and table covers, of handloom" +63049249,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:OTHER" +63049250,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Terry towel" +63049260,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Towels, other than terry towel" +63049270,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Mosquito nets" +63049280,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Cushion covers(OLD tariff)" +63049281,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Cushion covers, of handloom" +63049289,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:OTHER" +63049290,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Other furnishing articles(OLD tariff)" +63049291,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:Other furnishing articles, of handloom" +63049299,"Other furnishing articles, excluding those of heading 9404:Not knitted or crochetd, of cotton:OTHER" +63049300,"Other furnishing articles, excluding those of heading 9404::Not knitted or crocheted, of synthetic fibres" +63049900,other furnishing articles excluding those of heading 9404 bedspreads >> not knitted or crocheted of other textile material +63049910,"Other furnishing articles, excluding those of heading 9404:Not knitted or crocheted, of other textile material:Silk cushion covers" +63049990,"Other furnishing articles, excluding those of heading 9404:Not knitted or crocheted, of other textile material:Other(OLD tariff)" +63049991,"Other furnishing articles, excluding those of heading 9404:Not knitted or crocheted, of other textile material:Furnishing articles of silk, Handloom" +63049992,"Other furnishing articles, excluding those of heading 9404:Not knitted or crocheted, of other textile material:Furnishing articles of wool, Handloom" +63049999,"Other furnishing articles, excluding those of heading 9404:Not knitted or crocheted, of other textile material:OTHER" +63050000,sacks and bags of a kind used for the packing of goods +63051000,sacks and bags of a kind used for the packing of goods >> of jute or of other textile bast fibres of heading 5303 +63051010,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Jute bagging for raw cotton" +63051020,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Jute corn (grains) sacks" +63051030,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Jute hessian bags" +63051040,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Jute sacking bags" +63051050,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Jute wool sacks" +63051060,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Plastic coated or paper cum polythene lined jute bags and sacks" +63051070,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Paper laminated hessian jute" +63051080,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Jute soil savers" +63051090,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS:Of jute or of other textile bast fibres of heading 5303 :Other" +63052000,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS::Of cotton" +63053200,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS::Flexible intermediate bulk containers" +63053300,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS::Other, of polyethylene or polypropylene strip or the like" +63053900,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS::Other" +63059000,"SACKS AND BAGS, OF A KIND USED FOR THE PACKING OF GOODS::Of other textile materials" +63060000,tarpaulins awnings and sunblinds tents including temporary canopies and similar articles sails for boats sailboards or landcraft camping goods tarpaulins awnings and sunblinds +63061200,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS::Of synthetic fibres" +63061900,tarpaulins awnings and sunblinds tents including temporary canopies and similar articles sails for boats sailboards or landcraft camping goods tarpaulins awnings and sunblinds >> of other textile materials +63061910,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Of other textile materials :Jute tarpaulins (including DW tarpaulin)" +63061920,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Of other textile materials :Blinds or awnings of coir" +63061930,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Of other textile materials :Venetian or Austrian blinds" +63061990,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Of other textile materials :Other" +63062200,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS::Of synthetic fibres" +63062900,tarpaulins awnings and sunblinds tents including temporary canopies and similar articles sails for boats sailboards or landcraft camping goods tarpaulins awnings and sunblinds >> of other textile materials +63062910,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Of other textile materials :Of jute" +63062990,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Of other textile materials :Other" +63063000,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS::Sails" +63064000,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Pneumatic mattresses:Pneumatic mattresses" +63069000,tarpaulins awnings and sunblinds tents including temporary canopies and similar articles sails for boats sailboards or landcraft camping goods tarpaulins awnings and sunblinds >> other +63069010,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Other:Of cotton" +63069090,"TARPAULINS, AWNINGS AND SUNBLINDS; TENTS; SAILS FOR BOATS, SAILBOARDS OR LANDCRAFT; CAMPING GOODS:Other:Of other textile materials" +63070000,other made up articles including dress patterns +63071000,other made up articles including dress patterns >> dusters and similar cleaning cloths +63071010,"Other made-up articles, including dress patterns:Floor-cloths, dish cloths, dusters and similar cleaning cloths:Of cotton" +63071020,"Other made-up articles, including dress patterns:Floor-cloths, dish cloths, dusters and similar cleaning cloths:Of man-made fibres" +63071030,"Other made-up articles, including dress patterns:Floor-cloths, dish cloths, dusters and similar cleaning cloths:Of cotton, Handloom" +63071090,"Other made-up articles, including dress patterns:Floor-cloths, dish cloths, dusters and similar cleaning cloths:Other" +63072000,other made up articles including dress patterns >> and +63072010,"Other made-up articles, including dress patterns:Life-jackets and life-belts :Of cotton" +63072090,"Other made-up articles, including dress patterns:Life-jackets and life-belts :Other" +63079000,other made up articles including dress patterns >> other dress materials hand printed +63079011,"Other made-up articles, including dress patterns:Other :Of cotton" +63079012,"Other made-up articles, including dress patterns:Other :Of silk" +63079013,"Other made-up articles, including dress patterns:Other :Of man-made fibres" +63079019,"Other made-up articles, including dress patterns:Other :Other" +63079020,"Other made-up articles, including dress patterns:Other :Made up articles of cotton" +63079090,"Other made-up articles, including dress patterns:Other :Other(OLD tariff)" +63079091,other made up articles including dress patterns >> other dress materials hand printed >> textile face masks without a replaceable filter or mechanical parts including surgical mask and disposable face mask made of non woven textile +63079099,other made up articles including dress patterns >> other dress materials hand printed >> other +63080000,"::SETS CONSISTING OF WOVEN FABRIC AND YARN, WHETHER OR NOT WITH ACCESSORIES, FOR MAKING UP INTO RUGS, TAPESTRIES, EMBROIDERED TABLE CLOTHS OR SERVIETTES, OR SIMILAR TEXTILE ARTICLES, PUT UP IN PACKINGSFOR RETAIL SALE" +63090000,::WORN CLOTHING AND OTHER WORN ARTICLES +63100000,used or new rags scrap twine cordage rope and cables and worn out articles of twine cordage rope or cables of textile materials +63101000,used or new rags scrap twine cordage rope and cables and worn out articles of twine cordage rope or cables of textile materials >> sorted +63101010,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS:Sorted :Woollen rags" +63101020,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS:Sorted :Cotton rags" +63101030,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS:Sorted :Gunny cuttings" +63101090,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS:Sorted :Other" +63109000,used or new rags scrap twine cordage rope and cables and worn out articles of twine cordage rope or cables of textile materials >> other +63109010,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS::Woolen rags" +63109020,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS::Cotton rags" +63109030,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS::Gunny cuttings" +63109040,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS::Synthetic rags" +63109090,"USED OR NEW RAGS, SCRAP TWINE, CORDAGE, ROPE AND CABLES AND WORN OUT ARTICLES OF TWINE, CORDAGE, ROPE OR CABLES, OF TEXTILE MATERIALS::Other" +64010000,waterproof footwear with outer soles and uppers of rubber or of plastics the uppers of which are neither fixed to the sole nor assembled by stitching riveting nailing screwing plugging or similar processes +64011000,waterproof footwear with outer soles and uppers of rubber or of plastics the uppers of which are neither fixed to the sole nor assembled by stitching riveting nailing screwing plugging or similar processes >> footwear incorporating a protective metal +64011010,"WATERPROOF FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR OF PLASTICS, THE UPPERS OF WHICH ARE NEITHER FIXED TO THE SOLE NOR ASSEMBLED BY STITCHING, RIVETING, NAILING, SCREWING, PLUGGING OR SIMILAR PROCESSES:Footwear incorporating a protective metal toe-cap :Of rubber" +64011090,"WATERPROOF FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR OF PLASTICS, THE UPPERS OF WHICH ARE NEITHER FIXED TO THE SOLE NOR ASSEMBLED BY STITCHING, RIVETING, NAILING, SCREWING, PLUGGING OR SIMILAR PROCESSES:Footwear incorporating a protective metal toe-cap :Other" +64019200,waterproof footwear with outer soles and uppers of rubber or of plastics the uppers of which are neither fixed to the sole nor assembled by stitching riveting nailing screwing plugging or similar processes >> covering the ankle but not covering the knee +64019210,"WATERPROOF FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR OF PLASTICS, THE UPPERS OF WHICH ARE NEITHER FIXED TO THE SOLE NOR ASSEMBLED BY STITCHING, RIVETING, NAILING, SCREWING, PLUGGING OR SIMILAR PROCESSES:Covering the ankle but not covering the knee :Of rubber" +64019290,"WATERPROOF FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR OF PLASTICS, THE UPPERS OF WHICH ARE NEITHER FIXED TO THE SOLE NOR ASSEMBLED BY STITCHING, RIVETING, NAILING, SCREWING, PLUGGING OR SIMILAR PROCESSES:Covering the ankle but not covering the knee :Other" +64019900,waterproof footwear with outer soles and uppers of rubber or of plastics the uppers of which are neither fixed to the sole nor assembled by stitching riveting nailing screwing plugging or similar processes >> other +64019910,"WATERPROOF FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR OF PLASTICS, THE UPPERS OF WHICH ARE NEITHER FIXED TO THE SOLE NOR ASSEMBLED BY STITCHING, RIVETING, NAILING, SCREWING, PLUGGING OR SIMILAR PROCESSES:Other :Of rubber" +64019990,"WATERPROOF FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR OF PLASTICS, THE UPPERS OF WHICH ARE NEITHER FIXED TO THE SOLE NOR ASSEMBLED BY STITCHING, RIVETING, NAILING, SCREWING, PLUGGING OR SIMILAR PROCESSES:Other :Other" +64020000,other footwear with outer soles and uppers of rubber or plastics sports footwear +64021200,other footwear with outer soles and uppers of rubber or plastics sports footwear >> ski footwear and snowboard boots +64021210,"OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Ski-boots, cross-country ski footwear and snowboard boots :Of rubber" +64021290,"OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Ski-boots, cross-country ski footwear and snowboard boots :Other" +64021900,other footwear with outer soles and uppers of rubber or plastics sports footwear >> other +64021910,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Other :Of rubber +64021990,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Other :Other +64022000,other footwear with outer soles and uppers of rubber or plastics sports footwear >> footwear with upper straps or thongs assembled to the sole by means of plugs +64022010,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Footwear with upper straps or thongs assembled to the sole by means of plugs :Of rubber +64022090,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Footwear with upper straps or thongs assembled to the sole by means of plugs :Other +64029100,other footwear with outer soles and uppers of rubber or plastics sports footwear >> covering the ankle +64029110,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Covering the ankle :Of rubber +64029190,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Covering the ankle :Other +64029900,other footwear with outer soles and uppers of rubber or plastics sports footwear >> other +64029910,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Other :Of rubber +64029990,OTHER FOOTWEAR WITH OUTER SOLES AND UPPERS OF RUBBER OR PLASTICS:Other :Other +64030000,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear +64031200,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER::Ski-boots, cross-country ski footwear and snowboard boots" +64031900,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear >> other +64031910,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :With outer soles of leather" +64031920,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :With outer soles of rubber" +64031990,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :Other" +64032000,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear >> footwear with outer soles of leather and uppers which consist of leather straps across the instep and around the big toe all leather closed toe +64032011,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :For men" +64032012,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :For women" +64032013,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :For children" +64032019,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :Other" +64032021,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :For men" +64032022,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :For women" +64032023,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :For children" +64032029,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :Others" +64032030,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :Of leather soles with embroidered uppers" +64032040,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :Kolapuri chappals and similar footwear" +64032090,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Footwear with outer soles of leather, and uppers which consist of leather straps across the instep and around the big toe :Other" +64034000,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER::Other footwear, incorporating a protective metal toe-cap" +64035100,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear >> covering the ankle all leather shoes +64035111,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :For men" +64035112,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :For women" +64035113,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :For children" +64035119,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :Other" +64035190,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :Other" +64035900,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear >> other +64035910,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :For men" +64035920,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :For women" +64035930,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :For children" +64035990,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :Other" +64039100,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear >> covering the ankle +64039110,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :Leather boots and other footwear with rubber sole" +64039120,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :Leather footwear with plastic and synthetic sole" +64039190,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Covering the ankle :Other" +64039900,footwear with outer soles of rubber plastics leather or composition leather and uppers of leather sports footwear >> other +64039910,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :Leather sandals with rubber sole" +64039920,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :Leather sandals with plastic or synthetic sole" +64039990,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF LEATHER:Other :Other" +64040000,footwear with outer soles of rubber plastics leather or composition leather and uppers of textile materials footwear with outer soles of rubber or plastics +64041100,footwear with outer soles of rubber plastics leather or composition leather and uppers of textile materials footwear with outer soles of rubber or plastics >> sports footwear tennis shoes basketball shoes gym shoes training shoes and the like +64041110,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS:Sports footwear; tennis shoes, basketball shoes, gym shoes, training shoes and the like :Of rubber sole with canvas upper" +64041120,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS:Sports footwear; tennis shoes, basketball shoes, gym shoes, training shoes and the like :Of rubber sole with leather cloth uppers" +64041190,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS:Sports footwear; tennis shoes, basketball shoes, gym shoes, training shoes and the like :Other" +64041900,footwear with outer soles of rubber plastics leather or composition leather and uppers of textile materials footwear with outer soles of rubber or plastics >> other +64041910,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS:Other :Of rubber sole with canvas upper" +64041920,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS:Other :Of rubber sole with leather cloth uppers" +64041990,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS:Other :Other" +64042000,"FOOTWEAR WITH OUTER SOLES OF RUBBER, PLASTICS, LEATHER OR COMPOSITION LEATHER AND UPPERS OF TEXTILE MATERIALS::Footwear with outer soles of leather or composition leather" +64050000,other footwear +64051000,OTHER FOOTWEAR::With uppers of leather or composition leather +64052000,OTHER FOOTWEAR::With uppers of textile materials +64059000,OTHER FOOTWEAR::Other +64060000,parts of footwear including uppers whether or not attached to soles other than outer soles removable heel cushions and similar articles gaiters leggings and similar articles and parts thereof +64061000,parts of footwear including uppers whether or not attached to soles other than outer soles removable heel cushions and similar articles gaiters leggings and similar articles and parts thereof >> uppers and parts thereof other than stiffeners +64061010,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Uppers and parts thereof, other:Embroidered uppers of textile materials" +64061020,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Uppers and parts thereof, other:Leather uppers (prepared)" +64061030,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Uppers and parts thereof, other:Goat lining" +64061040,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Uppers and parts thereof, other:Sheep lining" +64061090,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Uppers and parts thereof, other:Other" +64062000,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Outer soles and heels, of rubber and plastics:Outer soles and heels of rubber or plastics" +64069000,parts of footwear including uppers whether or not attached to soles other than outer soles removable heel cushions and similar articles gaiters leggings and similar articles and parts thereof >> other +64069010,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Other:Of wood" +64069020,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Other:Leather parts other than soles and prepared uppers(OLD tariff)" +64069030,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Other:Leather soles(OLD tariff)" +64069040,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Other:Gaiters, leggings and similar articles(OLD tariff)" +64069050,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Other:Parts of gaiters, leggings and similar articles(OLD tariff)" +64069090,"PARTS OF FOOTWEAR (INCLUDING UPPERS WHETHER OR NOT ATTACHED TO SOLES OTHER THAN OUTER SOLES); REMOVABLE IN-SOLES, HEEL CUSHIONS AND SIMILAR ARTICLES; GAITERS, LEGGINGS AND SIMILAR ARTICLES, AND PARTS THEREOF:Other:Other(OLD tariff)" +65010000,felt neither blocked to shape nor with made brims plateaux and manchons including slit manchons of felt hat bodies and hoods of felt neither blocked to shape nor with made brims plateaux and manchons including slit manchons of felt of cotton of wool and fur felt other +65010010,"HAT-FORMS, HAT BODIES AND HOODS OF FELT, NEITHER BLOCKED TO SHAPE NOR WITH MADE BRIMS; PLATEAUX AND MANCHONS (INCLUDING SLIT MANCHONS), OF FELT:Hat-forms, hat bodies and hoods of felt, neither blocked to shape nor with made brims; plateaux and manchons (including slit manchons), of felt:Hair nets(OLD tariff)" +65010090,"HAT-FORMS, HAT BODIES AND HOODS OF FELT, NEITHER BLOCKED TO SHAPE NOR WITH MADE BRIMS; PLATEAUX AND MANCHONS (INCLUDING SLIT MANCHONS), OF FELT:Hat-forms, hat bodies and hoods of felt, neither blocked to shape nor with made brims; plateaux and manchons (including slit manchons), of felt:Other(OLD tariff)" +65020000,plaited or made by assembling strips of any material neither blocked to shape nor with made brims nor lined nor trimmed plaited or made by assembling strips of any material neither blocked to shape nor with made brims nor lined nor trimmed +65020010,"HAT-SHAPES, PLAITED OR MADE BY ASSEMBLING STRIPS OF ANY MATERIAL, NEITHER BLOCKED TO SHAPE, NOR WITH MADE BRIMS, NOR LINED, NOR TRIMMED:Hat-shapes, plaited or made by assembling strips of any material, neither blocked to shape, nor with made brims, nor lined, nor trimmed:of Cotton" +65020020,"HAT-SHAPES, PLAITED OR MADE BY ASSEMBLING STRIPS OF ANY MATERIAL, NEITHER BLOCKED TO SHAPE, NOR WITH MADE BRIMS, NOR LINED, NOR TRIMMED:Hat-shapes, plaited or made by assembling strips of any material, neither blocked to shape, nor with made brims, nor lined, nor trimmed:Of wool(OLD tariff)" +65020090,"HAT-SHAPES, PLAITED OR MADE BY ASSEMBLING STRIPS OF ANY MATERIAL, NEITHER BLOCKED TO SHAPE, NOR WITH MADE BRIMS, NOR LINED, NOR TRIMMED:Hat-shapes, plaited or made by assembling strips of any material, neither blocked to shape, nor with made brims, nor lined, nor trimmed:Other(OLD tariff)" +65040000,"Hats and other Head gear:Hats and other head gear:Hats and other Headgear, platted or made by assembling strips of any material whether or not lined or trimmed" +65050000,hats and other headgear knitted or crocheted or made up from lace felt or other textile fabric in the piece but not in strips whether or not lined or trimmed hair nets of any material whether or not lined or trimmed hats and other headgear knitted or crocheted or made up from lace felt or other textile fabric in the piece but not in strips whether material whether or not lined or trimmed +65050010,"HATS AND OTHER HEADGEAR, KNITTED OR CROCHETED, OR MADE UP FROM LACE, FELT OR OTHER TEXTILE FABRIC, IN THE PIECE (BUT NOT IN STRIPS), WHETHER OR NOT LINED OR TRIMMED; HAIR-NETS OF ANY MATERIAL, WHETHER OR NOT LINED OR TRIMMED:Hats and other headgear, knitted or crocheted, or made up from lace, felt or other textile fabric, in the piece (but not in strips), whether or not lined or trimmed; hair-nets of any material, whether or not lined or trimmed:Hair nets(OLD tariff)" +65050090,"HATS AND OTHER HEADGEAR, KNITTED OR CROCHETED, OR MADE UP FROM LACE, FELT OR OTHER TEXTILE FABRIC, IN THE PIECE (BUT NOT IN STRIPS), WHETHER OR NOT LINED OR TRIMMED; HAIR-NETS OF ANY MATERIAL, WHETHER OR NOT LINED OR TRIMMED:Hats and other headgear, knitted or crocheted, or made up from lace, felt or other textile fabric, in the piece (but not in strips), whether or not lined or trimmed; hair-nets of any material, whether or not lined or trimmed:Other" +65060000,other headgear whether or not lined or trimmed +65061000,other headgear whether or not lined or trimmed >> safety headgear +65061010,"OTHER HEADGEAR, WHETHER OR NOT LINED OR TRIMMED:Safety headgear:Speed glass welding helmets or other helmets meant for industrial use" +65061090,"OTHER HEADGEAR, WHETHER OR NOT LINED OR TRIMMED:Safety headgear:Other" +65069100,"OTHER HEADGEAR, WHETHER OR NOT LINED OR TRIMMED::Of rubber or of plastics" +65069900,"OTHER HEADGEAR, WHETHER OR NOT LINED OR TRIMMED::Of other materials" +65070000,"::HEAD-BANDS, LININGS, COVERS, HAT FOUNDATIONS, HAT FRAMES, PEAKS AND CHINSTRAPS, FOR HEADGEAR" +66010000,umbrellas and sun umbrellas garden umbrellas and similar umbrellas +66011000,"UMBRELLAS AND SUN UMBRELLAS (INCLUDING WALKING-STICK UMBRELLAS, GARDEN UMBRELLAS AND SIMILAR UMBRELLAS)::Garden or similar umbrellas" +66019100,"UMBRELLAS AND SUN UMBRELLAS (INCLUDING WALKING-STICK UMBRELLAS, GARDEN UMBRELLAS AND SIMILAR UMBRELLAS)::Having a telescopic shaft" +66019900,"UMBRELLAS AND SUN UMBRELLAS (INCLUDING WALKING-STICK UMBRELLAS, GARDEN UMBRELLAS AND SIMILAR UMBRELLAS)::Other" +66020000,"::WALKING-STICKS, SEAT-STICKS, WHIPS, RIDING CROPS AND THE LIKE" +66030000,parts trimmings and accessories of articles of heading 6601 to 6602 +66032000,"PARTS, TRIMMINGS AND ACCESSORIES OF ARTICLES OF HEADING 6601 TO 6602::Umbrella frames, including frames mounted on shafts (sticks)" +66039000,parts trimmings and accessories of articles of heading 6601 to 6602 >> other +66039010,"PARTS, TRIMMINGS AND ACCESSORIES OF ARTICLES OF HEADING 6601 TO 6602:Other:Umbrella ribs" +66039090,"PARTS, TRIMMINGS AND ACCESSORIES OF ARTICLES OF HEADING 6601 TO 6602:Other:Other" +67010010,"SKINS AND OTHER PARTS OF BIRDSWITH THEIR FEATHERS OR DOWN, FEATHERS, PARTS OF FEATHERS, DOWN AND ARTICLES THEREOF (OTHER THAN GOODS OF HEADING 0505 AND WORKED QUILLS AND SCAPES):Skins and other parts of birds with their feathers or down, feathers, parts of feathers, down and articles thereof (other than goods of heading 0505 and worked quills and scapes):Feather dusters(OLD tariff)" +67010090,"SKINS AND OTHER PARTS OF BIRDSWITH THEIR FEATHERS OR DOWN, FEATHERS, PARTS OF FEATHERS, DOWN AND ARTICLES THEREOF (OTHER THAN GOODS OF HEADING 0505 AND WORKED QUILLS AND SCAPES):Skins and other parts of birds with their feathers or down, feathers, parts of feathers, down and articles thereof (other than goods of heading 0505 and worked quills and scapes):Other(OLD tariff)" +67020000,artificial flowers foliage and fruit and parts thereof articles made of artificial flowers foliage or fruit +67021000,artificial flowers foliage and fruit and parts thereof articles made of artificial flowers foliage or fruit >> of plastics +67021010,"ARTIFICIAL FLOWERS, FOLIAGE AND FRUIT AND PARTS THEREOF; ARTICLES MADE OF ARTIFICIAL FLOWERS,:Of plastics :Decorative plants" +67021090,"ARTIFICIAL FLOWERS, FOLIAGE AND FRUIT AND PARTS THEREOF; ARTICLES MADE OF ARTIFICIAL FLOWERS,:Of plastics :Other" +67029000,artificial flowers foliage and fruit and parts thereof articles made of artificial flowers foliage or fruit >> of other materials +67029010,"ARTIFICIAL FLOWERS, FOLIAGE AND FRUIT AND PARTS THEREOF; ARTICLES MADE OF ARTIFICIAL FLOWERS,:Of other materials :Of jute" +67029090,"ARTIFICIAL FLOWERS, FOLIAGE AND FRUIT AND PARTS THEREOF; ARTICLES MADE OF ARTIFICIAL FLOWERS,:Of other materials :Other" +67030000,human hair dressed thinned bleached or otherwise worked wool or other animal hair or other textile materials prepared for use in making wigs or the like human hair dressed thinned bleached or otherwise worked wool or other animal hair or other textile materials prepared for use in making wigs or the like +67030010,"HUMAN HAIR, DRESSED, THINNED, BLEACHED OR OTHERWISE WORKED; WOOL OR OTHER ANIMAL HAIR OR OTHER TEXTILE MATERIALS, PREPARED FOR USE IN MAKING WIGS OR THE LIKE:Human hair, dressed, thinned, bleached or otherwise worked; wool or other animal hair or other textile materials, prepared for use in making wigs or the like :Human hair, dressed, thinned, bleached or otherwise worked" +67030020,"HUMAN HAIR, DRESSED, THINNED, BLEACHED OR OTHERWISE WORKED; WOOL OR OTHER ANIMAL HAIR OR OTHER TEXTILE MATERIALS, PREPARED FOR USE IN MAKING WIGS OR THE LIKE:Human hair, dressed, thinned, bleached or otherwise worked; wool or other animal hair or other textile materials, prepared for use in making wigs or the like :Wool or other animal hair or other textile materials, prepared for use in making wigs or the like" +67040000,wigs false beards eyebrows and eyelashes switches and the like of human or animal hair or of textile materials articles of human h air not elsew here specif ied o r included of synthetic textile materials +67041100,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED::Complete wigs" +67041900,wigs false beards eyebrows and eyelashes switches and the like of human or animal hair or of textile materials articles of human h air not elsew here specif ied o r included of synthetic textile materials >> other +67041910,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Hair nets" +67041990,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other" +67042000,wigs false beards eyebrows and eyelashes switches and the like of human or animal hair or of textile materials articles of human h air not elsew here specif ied o r included of synthetic textile materials >> of human hair +67042010,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED:Of human hair :Wigs" +67042020,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED:Of human hair :Hair nets" +67042090,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED:Of human hair :Other" +67049000,"WIGS, FALSE BEARDS, EYEBROWS AND EYELASHES, SWITCHES AND THE LIKE, OF HUMAN OR ANIMAL HAIR OR OF TEXTILE MATERIALS; ARTICLES OF HUMAN HAIR NOT ELSEWHERE SPECIFIED OR INCLUDED::Of other materials" +68010000,"::SETTS, CURBSTONES AND FLAGSTONES, OF NATURAL STONE (EXCEPT SLATE)(OLD tariff)" +68020000,worked monumental or building stone except slate and articles thereof other than goods of heading 6801 mosaiccubes and the like of natural stone including slate whether or not on a backing artificially coloured granules chippings and powder of natural stone including slate tiles cubes and similar articles whether or not +68021000,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;::Tiles, cubes and similar articles, whether or not rectangular (including square), the largest surface area of which is capable of being enclosed in a square the side of which is less than 7 cm; artificially coloured granules,chippings and powder" +68022100,worked monumental or building stone except slate and articles thereof other than goods of heading 6801 mosaiccubes and the like of natural stone including slate whether or not on a backing artificially coloured granules chippings and powder of natural stone including slate tiles cubes and similar articles whether or not >> marble travertine and alabaster +68022110,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;:Marble, travertine and alabaster :Marble blocks or tiles(OLD tariff)" +68022120,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;:Marble, travertine and alabaster :Marble monumental stone" +68022190,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;:Marble, travertine and alabaster :Other" +68022300,worked monumental or building stone except slate and articles thereof other than goods of heading 6801 mosaiccubes and the like of natural stone including slate whether or not on a backing artificially coloured granules chippings and powder of natural stone including slate tiles cubes and similar articles whether or not >> granite +68022310,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;:Granite :Granite blocks or tiles" +68022390,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;:Granite :Other" +68022900,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;::Other stone" +68029100,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;::Marble, travertine and alabaster" +68029200,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;::Other calcareous stone" +68029300,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;::Granite" +68029900,"WORKED MONUMENTAL OR BUILDING STONE (EXCEPT SLATE) AND ARTICLES THEREOF, OTHER THAN GOODS OF HEADING 6801; MOSAIC CUBES AND THE LIKE, OF NATURAL STONE (INCLUDING SLATE), WHETHER OR NOT ON A BACKING;::Other stone" +68030000,::WORKED SLATE AND ARTICLES OF SLATE OR OF AGGLOMERATED SLATE +68040000,millstones grindstones grinding wheels and the like without frameworks for grinding sharpening polishing trueing or cutting hand sharpening or polishing stones and parts thereof of natural stone of agglomerated natural or artificial abrasives or of ceramics with or without parts of other materials +68041000,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS::Millstones and grindstones for milling, grinding or pulping" +68042110,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of agglomerated synthetic or natural diamond:Diamond impregnated wheels(OLD tariff)" +68042190,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of agglomerated synthetic or natural diamond:Other(OLD tariff)" +68042200,millstones grindstones grinding wheels and the like without frameworks for grinding sharpening polishing trueing or cutting hand sharpening or polishing stones and parts thereof of natural stone of agglomerated natural or artificial abrasives or of ceramics with or without parts of other materials >> of other agglomerated abrasives or of ceramics +68042210,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of other agglomerated abrasives or of ceramics :Grinding wheels of synthetic abrasives" +68042220,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of other agglomerated abrasives or of ceramics :Grinding wheels of other materials" +68042290,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of other agglomerated abrasives or of ceramics :Other" +68042300,millstones grindstones grinding wheels and the like without frameworks for grinding sharpening polishing trueing or cutting hand sharpening or polishing stones and parts thereof of natural stone of agglomerated natural or artificial abrasives or of ceramics with or without parts of other materials >> of natural stone +68042310,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of natural stone :Grinding wheels made of natural stone" +68042390,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Of natural stone :Other" +68043000,millstones grindstones grinding wheels and the like without frameworks for grinding sharpening polishing trueing or cutting hand sharpening or polishing stones and parts thereof of natural stone of agglomerated natural or artificial abrasives or of ceramics with or without parts of other materials >> hand sharpening or polishing stones +68043010,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Hand sharpening or polishing stones:Polishing stones" +68043020,"MILLSTONES, GRINDSTONES, GRINDING WHEELS AND THE LIKE, WITHOUT FRAMEWORKS, FOR GRINDING, SHARPENING, POLISHING, TRUEING OR CUTTING,HAND SHARPENING OR POLISHING STONES, AND PARTS THEREOF, OF NATURAL STONE, OF AGGLOMERATED NATURAL OR ARTIFICIAL ABRASIVES, OR OF CERAMICS, WITH OR WITHOUT PARTS OF OTHER MATERIALS:Hand sharpening or polishing stones:Sharpening stones" +68050000,natural or artificial abrasive powder or grain on a base of textile material of paper of paperboard or of other materials whether or not cut to shape or sewn or otherwise made up +68051000,natural or artificial abrasive powder or grain on a base of textile material of paper of paperboard or of other materials whether or not cut to shape or sewn or otherwise made up >> on a base of woven textile fabric only +68051010,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of woven textile fabric only :Abrasive cloth" +68051090,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of woven textile fabric only :Other" +68052000,natural or artificial abrasive powder or grain on a base of textile material of paper of paperboard or of other materials whether or not cut to shape or sewn or otherwise made up >> on a base of paper or paperboard only +68052010,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of paper or paperboard only :Emery or corundum coated paper" +68052020,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of paper or paperboard only :Flint coated paper" +68052030,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of paper or paperboard only :Glass or sand coated paper" +68052040,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of paper or paperboard only :Other abrasive paper" +68052090,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP:On a base of paper or paperboard only :Other" +68053000,"NATURAL OR ARTIFICIAL ABRASIVE POWDER OR GRAIN, ON A BASE OF TEXTILE MATERIAL, OF PAPER, OF PAPERBOARD OR OF OTHER MATERIALS, WHETHER OR NOT CUT TO SHAPE OR SEWN OR OTHERWISE MADE UP::On a base of other materials" +68061000,"SLAG WOOL, ROCK WOOL AND SIMILAR MINERAL WOOLS; EXFOLIATED VERMICULITE, EXPANDED CLAYS, FOAMED SLAG AND SIMILAR EXPANDED MINERAL MATERIALS; MIXTURES AND ARTICLES OF HEAT-INSULATING, SOUND-INSULATING OR SOUND-ABSORBING MINERAL MATERIALS, OTHER THAN THOSE OF HEADING 6811 OR 6812 OR OF CHAPTER 69::Slag wool, rock wool and similar mineral wools (including intermixtures thereof), in bulk, sheets or rolls(OLD tariff)" +68062000,"SLAG WOOL, ROCK WOOL AND SIMILAR MINERAL WOOLS; EXFOLIATED VERMICULITE, EXPANDED CLAYS, FOAMED SLAG AND SIMILAR EXPANDED MINERAL MATERIALS; MIXTURES AND ARTICLES OF HEAT-INSULATING, SOUND-INSULATING OR SOUND-ABSORBING MINERAL MATERIALS, OTHER THAN THOSE OF HEADING 6811 OR 6812 OR OF CHAPTER 69::Exfoliated vermiculite, expanded clays, foamed slag and similar expanded mineral materials (including intermixtures thereof)(OLD tariff)" +68069000,"SLAG WOOL, ROCK WOOL AND SIMILAR MINERAL WOOLS; EXFOLIATED VERMICULITE, EXPANDED CLAYS, FOAMED SLAG AND SIMILAR EXPANDED MINERAL MATERIALS; MIXTURES AND ARTICLES OF HEAT-INSULATING, SOUND-INSULATING OR SOUND-ABSORBING MINERAL MATERIALS, OTHER THAN THOSE OF HEADING 6811 OR 6812 OR OF CHAPTER 69::Other(OLD tariff)" +68070000,articles of asphalt or of similar material for example petroleum bitumen orcoal tar pitch +68071000,articles of asphalt or of similar material for example petroleum bitumen orcoal tar pitch >> +68071010,"ARTICLES OF ASPHALT OR OF SIMILAR MATERIAL (FOR EXAMPLE, PETROLEUM BITUMEN OR COAL TAR PITCH):In-rolls :Tarfelt roofing" +68071090,"ARTICLES OF ASPHALT OR OF SIMILAR MATERIAL (FOR EXAMPLE, PETROLEUM BITUMEN OR COAL TAR PITCH):In-rolls :Other" +68079000,articles of asphalt or of similar material for example petroleum bitumen orcoal tar pitch >> other +68079010,"ARTICLES OF ASPHALT OR OF SIMILAR MATERIAL (FOR EXAMPLE, PETROLEUM BITUMEN OR COAL TAR PITCH):Other:Tarfelt roofing" +68079090,"ARTICLES OF ASPHALT OR OF SIMILAR MATERIAL (FOR EXAMPLE, PETROLEUM BITUMEN OR COAL TAR PITCH):Other:Other" +68080000,"::PANELS, BOARDS, TILES, BLOCKS AND SIMILAR ARTICLES OF VEGETABLE FIBRE, OF STRAW OR OF SHAVINGS, CHIPS, PARTICLES, SAWDUST OR OTHER WASTE, OF WOOD, AGGLOMERATED WITH CEMENT, PLASTER OR OTHER MINERAL BINDERS" +68090000,articles of plaster or of compositions based on plaster boards sheets panels tiles and similar articles not ornamented articles of plaster or of compositions based on plaster boards sheets panels tiles and similar articles not ornamented +68091100,ARTICLES OF PLASTER OR OF COMPOSITIONS BASED ON PLASTER::Faced or reinforced with paper or paperboard only +68091900,ARTICLES OF PLASTER OR OF COMPOSITIONS BASED ON PLASTER::Other +68099000,ARTICLES OF PLASTER OR OF COMPOSITIONS BASED ON PLASTER::Other articles +68100000,articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles +68101100,articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles >> building blocks and bricks building blocks and bricks +68101110,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED:Building blocks and bricks :Cement bricks" +68101190,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED:Building blocks and bricks :Other" +68101900,articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles >> other other +68101910,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED:Other :Cement tiles for mosaic" +68101990,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED:Other :Other" +68109100,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED::Prefabricated structural components for building or civil engineering" +68109900,articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles articles of cement of concrete or of artificial stone whether or not reinforced tiles flagstones bricks and similar articles >> other other +68109910,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED:Other :Concrete boulder" +68109990,"ARTICLES OF CEMENT, OF CONCRETE OR OF ARTIFICIAL STONE, WHETHER OR NOT REINFORCED:Other :Other" +68110000,articles of of cellulose or the like +68114000,articles of of cellulose or the like >> containing asbestos +68114010,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE:Containing asbestos:Asbestos - cement sheets" +68114020,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE:Containing asbestos:Asbestos - cement tiles(OLD tariff)" +68114090,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE:Containing asbestos:Other" +68118100,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE::Corrugated sheets" +68118200,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE::Other sheets, panels, tiles and similar articles" +68118900,articles of of cellulose or the like >> other articles +68118910,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE:other articles:Tubes, pipes and tube or pipe fittings" +68118990,"ARTICLES OF ASBESTOS-CEMENT, OF CELLULOSE FIBRE-CEMENT OR THE LIKE:other articles:Other" +68120000,fabricated asbestos fibres mixtures with a basis of asbestos or with a basis of asbestos and magnesium carbonate articles of such mixtures or of asbestos for example thread woven fabric clothing headgear footwear gaskets whether or notreinforced other than goods of heading 6811or 6813 +68128000,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813::Of crocidolite" +68129100,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813::Clothing, clothing accessories, footwear and headgear" +68129211,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Paper, millboard and felt:Asbestos(OLD tariff)" +68129219,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Paper, millboard and felt:Other(OLD tariff)" +68129290,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Paper, millboard and felt:Other(OLD tariff)" +68129300,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813::Compressed asbestos fibre jointing, in sheets or rolls(OLD tariff)" +68129911,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Asbestos(OLD tariff)" +68129919,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Other(OLD tariff)" +68129921,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Packing joints(OLD tariff)" +68129922,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Gaskets(OLD tariff)" +68129990,"FABRICATED ASBESTOS FIBRES; MIXTURES WITH A BASIS OF ASBESTOS OR WITH A BASIS OF ASBESTOS AND MAGNESIUM CARBONATE; ARTICLES OF SUCH MIXTURES OR OF ASBESTOS (FOR EXAMPLE, THREAD, WOVEN FABRIC, CLOTHING, HEADGEAR, FOOTWEAR, GASKETS), WHETHER OR NOT REINFORCED, OTHER THAN GOODS OF HEADING 6811 OR 6813:Other:Other(OLD tariff)" +68130000,friction material and articles thereof for example sheets rolls strips segments discs washers pads not mounted for brakes for clutches or the like with a basis of asbestos of other mineral substances or of cellulose whether or not combined with textile or other materials +68132000,friction material and articles thereof for example sheets rolls strips segments discs washers pads not mounted for brakes for clutches or the like with a basis of asbestos of other mineral substances or of cellulose whether or not combined with textile or other materials >> containing asbestos +68132010,"FRICTION MATERIAL AND ARTICLES THEREOF (FOR EXAMPLE, SHEETS, ROLLS, STRIPS, SEGMENTS, DISCS, WASHERS, PADS), NOT MOUNTED, FOR BRAKES, FOR CLUTCHES OR THE LIKE, WITH A BASIS OF ASBESTOS, OF OTHER MINERAL SUBSTANCES OR OF CELLULOSE, WHETHER OR NOT COMBINED WITH TEXTILE OR OTHER MATERIALS:Containing asbestos:Brake lining and pads" +68132090,"FRICTION MATERIAL AND ARTICLES THEREOF (FOR EXAMPLE, SHEETS, ROLLS, STRIPS, SEGMENTS, DISCS, WASHERS, PADS), NOT MOUNTED, FOR BRAKES, FOR CLUTCHES OR THE LIKE, WITH A BASIS OF ASBESTOS, OF OTHER MINERAL SUBSTANCES OR OF CELLULOSE, WHETHER OR NOT COMBINED WITH TEXTILE OR OTHER MATERIALS:Containing asbestos:Asbestos friction materials" +68138100,"FRICTION MATERIAL AND ARTICLES THEREOF (FOR EXAMPLE, SHEETS, ROLLS, STRIPS, SEGMENTS, DISCS, WASHERS, PADS), NOT MOUNTED, FOR BRAKES, FOR CLUTCHES OR THE LIKE, WITH A BASIS OF ASBESTOS, OF OTHER MINERAL SUBSTANCES OR OF CELLULOSE, WHETHER OR NOT COMBINED WITH TEXTILE OR OTHER MATERIALS::Brake linings and pads" +68138900,"FRICTION MATERIAL AND ARTICLES THEREOF (FOR EXAMPLE, SHEETS, ROLLS, STRIPS, SEGMENTS, DISCS, WASHERS, PADS), NOT MOUNTED, FOR BRAKES, FOR CLUTCHES OR THE LIKE, WITH A BASIS OF ASBESTOS, OF OTHER MINERAL SUBSTANCES OR OF CELLULOSE, WHETHER OR NOT COMBINED WITH TEXTILE OR OTHER MATERIALS::Other" +68140000,worked mica and articles of mica including agglomerated or reconstituted mica whether or not on a support of paper paperboard or other materials +68141000,worked mica and articles of mica including agglomerated or reconstituted mica whether or not on a support of paper paperboard or other materials >> plates sheets and strips of agglomerated or reconstituted mica whether or not on a support +68141010,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Plates, sheets and strips of agglomerated or reconstituted mica, whether or not on a support :Cut mica condenser films or plates" +68141020,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Plates, sheets and strips of agglomerated or reconstituted mica, whether or not on a support :Sheets and strips cut to shape" +68141030,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Plates, sheets and strips of agglomerated or reconstituted mica, whether or not on a support :Washers and discs" +68141090,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Plates, sheets and strips of agglomerated or reconstituted mica, whether or not on a support :Other" +68149000,worked mica and articles of mica including agglomerated or reconstituted mica whether or not on a support of paper paperboard or other materials >> other +68149010,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Mica stacked units" +68149020,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Silvered mica, capacitor plates or silvered mica plates" +68149030,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Micanite and all sorts of built up mica" +68149040,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Micapaper or reconstituted mica paper" +68149050,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Moulded glass bonded or glass bonded mica" +68149060,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Mica bricks" +68149090,"WORKED MICA AND ARTICLES OF MICA, INCLUDING AGGLOMERATED OR RECONSTITUTED MICA, WHETHER OR NOT ON A SUPPORT OF PAPER, PAPERBOARD OR OTHER MATERIALS:Other :Other" +68150000,articles of stone or of other mineral substances including carbon fibres articles of carbon fibres and articles of peat not elsewhere specified or included carbon fibres articles of carbon fibres for non electrical uses other articles of graphite or other carbon for uses articles of stone or of other mineral substances including carbon fibres articles of carbon fibres and articles of peat not elsewhere specified or included carbon fibres articles of carbon fibres for non electrical uses other articles of graphite or other carbon for uses +68151010,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical articles of graphite or other carbon :Graphite filter candle(OLD tariff)" +68151020,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical articles of graphite or other carbon :Non-electrical articles of graphite(OLD tariff)" +68151090,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical articles of graphite or other carbon :Other(OLD tariff)" +68151100,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED::Carbon fibres" +68151200,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED::Fabrics of carbon fibres" +68151300,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED::Other articles of carbon fibres" +68151900,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +68152000,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED::Articles of peat" +68159100,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED::Containing magnesite, dolomite or chromite" +68159900,articles of stone or of other mineral substances including carbon fibres articles of carbon fibres and articles of peat not elsewhere specified or included carbon fibres articles of carbon fibres for non electrical uses other articles of graphite or other carbon for uses articles of stone or of other mineral substances including carbon fibres articles of carbon fibres and articles of peat not elsewhere specified or included carbon fibres articles of carbon fibres for non electrical uses other articles of graphite or other carbon for uses >> other other +68159910,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Bricks and tiles of fly ash" +68159920,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Sanitary wares, kitchen wares and other made up articles of fly ash" +68159930,articles of stone or of other mineral substances including carbon fibres articles of carbon fibres and articles of peat not elsewhere specified or included carbon fibres articles of carbon fibres for non electrical uses other articles of graphite or other carbon for uses articles of stone or of other mineral substances including carbon fibres articles of carbon fibres and articles of peat not elsewhere specified or included carbon fibres articles of carbon fibres for non electrical uses other articles of graphite or other carbon for uses >> other other >> basalt fibre filament and articles thereof conforming to astm d3039 c1185 +68159990,"ARTICLES OF STONE OR OF OTHER MINERAL SUBSTANCES (INCLUDING CARBON FIBRES, ARTICLES OF CARBON FIBRES AND ARTICLES OF PEAT), NOT ELSEWHERE SPECIFIED OR INCLUDED:Other :Other" +69010010,"BRICKS, BLOCKS, TILES AND OTHER CERAMIC GOODS OF SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE OR DIATOMITE) OR OF SIMILAR SILICEOUS EARTHS:Bricks, blocks, tiles and other ceramic goods of siliceous fossil meals (for example, kieselguhr, tripolite or diatomite) or of similar siliceous earths :Bricks(OLD tariff)" +69010020,"BRICKS, BLOCKS, TILES AND OTHER CERAMIC GOODS OF SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE OR DIATOMITE) OR OF SIMILAR SILICEOUS EARTHS:Bricks, blocks, tiles and other ceramic goods of siliceous fossil meals (for example, kieselguhr, tripolite or diatomite) or of similar siliceous earths :Blocks(OLD tariff)" +69010030,"BRICKS, BLOCKS, TILES AND OTHER CERAMIC GOODS OF SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE OR DIATOMITE) OR OF SIMILAR SILICEOUS EARTHS:Bricks, blocks, tiles and other ceramic goods of siliceous fossil meals (for example, kieselguhr, tripolite or diatomite) or of similar siliceous earths :Tiles(OLD tariff)" +69010090,"BRICKS, BLOCKS, TILES AND OTHER CERAMIC GOODS OF SILICEOUS FOSSIL MEALS (FOR EXAMPLE, KIESELGUHR, TRIPOLITE OR DIATOMITE) OR OF SIMILAR SILICEOUS EARTHS:Bricks, blocks, tiles and other ceramic goods of siliceous fossil meals (for example, kieselguhr, tripolite or diatomite) or of similar siliceous earths :Other(OLD tariff)" +69021010,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight, singly or together, more than 50% of the elements Mg, Ca or Cr, expressed as MgO, CaO or Cr2O3 :Magnesite bricks and shapes(OLD tariff)" +69021020,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight, singly or together, more than 50% of the elements Mg, Ca or Cr, expressed as MgO, CaO or Cr2O3 :Chrome magnesite bricks(OLD tariff)" +69021030,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight, singly or together, more than 50% of the elements Mg, Ca or Cr, expressed as MgO, CaO or Cr2O3 :Magnesite chrome bricks and shapes(OLD tariff)" +69021040,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight, singly or together, more than 50% of the elements Mg, Ca or Cr, expressed as MgO, CaO or Cr2O3 :Magnesia carbon bricks and shapes(OLD tariff)" +69021050,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight, singly or together, more than 50% of the elements Mg, Ca or Cr, expressed as MgO, CaO or Cr2O3 :Direct bonded basic bricks and shapes(OLD tariff)" +69021090,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight, singly or together, more than 50% of the elements Mg, Ca or Cr, expressed as MgO, CaO or Cr2O3 :Other(OLD tariff)" +69022010,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3), of silica (SiO2) or of a mixture or compound of these products :Silica bricks and shapes(OLD tariff)" +69022020,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3), of silica (SiO2) or of a mixture or compound of these products :High alumina bricks and shapes(OLD tariff)" +69022030,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3), of silica (SiO2) or of a mixture or compound of these products :Alumina carbon bricks and shapes(OLD tariff)" +69022040,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3), of silica (SiO2) or of a mixture or compound of these products :Silicon Carbide bricks and shapes(OLD tariff)" +69022050,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3), of silica (SiO2) or of a mixture or compound of these products :Mullite bricks(OLD tariff)" +69022090,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3), of silica (SiO2) or of a mixture or compound of these products :Other(OLD tariff)" +69029010,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Other :Fire clay bricks and shapes(OLD tariff)" +69029020,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Other :Graphite bricks and shapes(OLD tariff)" +69029030,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Other :Vermiculite insulation bricks(OLD tariff)" +69029040,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Other :Clay graphite stopper heads(OLD tariff)" +69029090,"REFRACTORY BRICKS, BLOCKS, TILES AND SIMILAR REFRACTORY CERAMIC CONSTRUCTIONAL GOODS, OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR SIMILAR SILICEOUS EARTHS:Other :Other(OLD tariff)" +69031000,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of graphite or other carbon or of a mixture of these products :Containing by wieighot, more than 50% free carbon(OLD tariff)" +69031010,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of graphite or other carbon or of a mixture of these products :Magnesia carbon bricks, shapes and graphetised alumina(OLD tariff)" +69031090,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of graphite or other carbon or of a mixture of these products :Other(OLD tariff)" +69032010,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3) or of a mixture or compound of alumina and of silica (SiO2) :Silicon carbide crucibles(OLD tariff)" +69032090,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Containing by weight more than 50% of alumina (Al2O3) or of a mixture or compound of alumina and of silica (SiO2) :Other(OLD tariff)" +69039010,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Other :Zircon or zircon-mullite refractories(OLD tariff)" +69039020,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Other :Basalt tiles(OLD tariff)" +69039030,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Other :Ceramic fibres(OLD tariff)" +69039040,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Other :Monolithics or castables (fire-clay, basic, silica, high alumina, insulating)(OLD tariff)" +69039090,"OTHER REFRACTORY CERAMIC GOODS (FOR EXAMPLE, RETORTS, CRUCIBLES, MUFFLES, NOZZLES, PLUGS, SUPPORTS, CUPELS, TUBES, PIPES, SHEATHS AND RODS), OTHER THAN THOSE OF SILICEOUS FOSSIL MEALS OR OF SIMILAR SILICEOUS EARTHS:Other :Other(OLD tariff)" +69040000,ceramic building bricks flooring blocks support or filler tiles and the like +69041000,"CERAMIC BUILDING BRICKS, FLOORING BLOCKS, SUPPORT OR FILLER TILES AND THE LIKE::Building bricks" +69049000,"CERAMIC BUILDING BRICKS, FLOORING BLOCKS, SUPPORT OR FILLER TILES AND THE LIKE::Other" +69050000,roofing tiles cowls chimney liners architectural ornaments and other ceramic constructional goods +69051000,"ROOFING TILES, CHIMNEY-POTS, COWLS, CHIMNEY LINERS, ARCHITECTURAL ORNAMENTS AND OTHER CERAMIC CONSTRUCTIONAL GOODS::Roofing tiles" +69059000,"ROOFING TILES, CHIMNEY-POTS, COWLS, CHIMNEY LINERS, ARCHITECTURAL ORNAMENTS AND OTHER CERAMIC CONSTRUCTIONAL GOODS::Other" +69060000,"::CERAMIC PIPES, CONDUITS, GUTTERING AND PIPE FITTINGS" +69070000,ceramic flags and paving hearth or wall tiles ceramic mosaic cubes and the like whether or not on a backing finishing ceramics flags and paving hearth or wall tiles other than those of 6907 30 and 6907 40 +69071010,"UNGLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; UNGLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Tiles, cubes and similar articles, whether or not rectangular, the largest surface area of which is capable of being enclosed in a square the side of which is less than 7 cm :Vitrified tiles, whether polished or not(OLD tariff)" +69071090,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS:Tiles, cubes and similar articles, whether or not rectangular, the largest surface area of which is capable of being enclosed in a square the side of which is less than 7 cm :Other(OLD tariff)" +69072100,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS::Of a water absorption coefficient by weight not exceeding 0.5%" +69072200,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS::Of a water absorption coefficient by weight exceeding 0.5% but not exceeding 10%" +69072300,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS::Of a water absorption coefficient by weight exceeding 10%" +69073000,ceramic flags and paving hearth or wall tiles ceramic mosaic cubes and the like whether or not on a backing finishing ceramics flags and paving hearth or wall tiles other than those of 6907 30 and 6907 40 >> mosaic cubes and the like other than those of +69073010,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS:Mosaic cubes and the like, other than those of sub-heading 6907 40:MOSAIC CUBES AND THE LIKE, OTHER THAN THOSE OF SUB HEADING 690740(OLD tariff)" +69074010,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS:Finishing ceramics:Finishing ceramics(OLD tariff)" +69079010,"CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING; FINISHING CERAMICS:Other :Vitrified tiles, whether polished or not(OLD tariff)" +69079090,"UNGLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; UNGLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Other :Other(OLD tariff)" +69081010,"GLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; GLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Tiles, cubes and similar articles, whether or not rectangular, the largest surface area of which is capable of being enclosed in a square the side of which is less than 7 cm :Ceramic mosaic cubes(OLD tariff)" +69081020,"GLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; GLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Tiles, cubes and similar articles, whether or not rectangular, the largest surface area of which is capable of being enclosed in a square the side of which is less than 7 cm :Ceramic mosaic tiles(OLD tariff)" +69081090,"GLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; GLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Tiles, cubes and similar articles, whether or not rectangular, the largest surface area of which is capable of being enclosed in a square the side of which is less than 7 cm :Other(OLD tariff)" +69089010,"GLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; GLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Other:Ceramic mosaic cubes(OLD tariff)" +69089020,"GLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; GLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Other:Ceramic mosaic tiles(OLD tariff)" +69089090,"GLAZED CERAMIC FLAGS AND PAVING, HEARTH OR WALL TILES; GLAZED CERAMIC MOSAIC CUBES AND THE LIKE, WHETHER OR NOT ON A BACKING:Other:Other(OLD tariff)" +69090000,ceramic wares for laboratory chemical or other technical uses ceramic troughs tubs and similar receptacles of a kind used in agriculture ceramic pots jars and similar articles of a kind used for the conveyance or packing of goods ceramic wares for laboratory chemical or other technical uses +69091100,"CERAMIC WARES FOR LABORATORY, CHEMICAL OR OTHER TECHNICAL USES; CERAMIC TROUGHS, TUBS AND SIMILAR RECEPTACLES OF A KIND USED IN AGRICULTURE; CERAMIC POTS, JARS AND SIMILAR ARTICLES OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS::Of porcelain or china" +69091200,"CERAMIC WARES FOR LABORATORY, CHEMICAL OR OTHER TECHNICAL USES; CERAMIC TROUGHS, TUBS AND SIMILAR RECEPTACLES OF A KIND USED IN AGRICULTURE; CERAMIC POTS, JARS AND SIMILAR ARTICLES OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS::Articles having a hardness equivalent to 9 or more on the Mohs scale" +69091900,ceramic wares for laboratory chemical or other technical uses ceramic troughs tubs and similar receptacles of a kind used in agriculture ceramic pots jars and similar articles of a kind used for the conveyance or packing of goods ceramic wares for laboratory chemical or other technical uses >> other +69091910,"CERAMIC WARES FOR LABORATORY, CHEMICAL OR OTHER TECHNICAL USES; CERAMIC TROUGHS, TUBS AND SIMILAR RECEPTACLES OF A KIND USED IN AGRICULTURE; CERAMIC POTS, JARS AND SIMILAR ARTICLES OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS:Other :Ceramic filter candle" +69091990,"CERAMIC WARES FOR LABORATORY, CHEMICAL OR OTHER TECHNICAL USES; CERAMIC TROUGHS, TUBS AND SIMILAR RECEPTACLES OF A KIND USED IN AGRICULTURE; CERAMIC POTS, JARS AND SIMILAR ARTICLES OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS:Other :Other" +69099000,"CERAMIC WARES FOR LABORATORY, CHEMICAL OR OTHER TECHNICAL USES; CERAMIC TROUGHS, TUBS AND SIMILAR RECEPTACLES OF A KIND USED IN AGRICULTURE; CERAMIC POTS, JARS AND SIMILAR ARTICLES OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS::Other" +69100000,ceramic sinks wash basins wash basin pedestals baths bidets water closet pans flushing cisterns urinals and similar sanitary fixtures +69101000,"CERAMIC SINKS, WASH BASINS, WASH BASIN PEDESTALS, BATHS, BIDETS, WATER CLOSET PANS, FLUSHING CISTERNS, URINALS AND SIMILAR SANITARY FIXTURES::Of porcelain or china" +69109000,"CERAMIC SINKS, WASH BASINS, WASH BASIN PEDESTALS, BATHS, BIDETS, WATER CLOSET PANS, FLUSHING CISTERNS, URINALS AND SIMILAR SANITARY FIXTURES::Other" +69110000,tableware kitchenware other household articles and toilet articles of porcelain or china +69111000,tableware kitchenware other household articles and toilet articles of porcelain or china >> tableware and kitchenware tableware +69111011,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Tableware and kitchenware:Of bone china and soft porcelain" +69111019,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Tableware and kitchenware:Other" +69111021,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Tableware and kitchenware:Of Bone china and soft porcelain" +69111029,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Tableware and kitchenware:Other" +69119000,tableware kitchenware other household articles and toilet articles of porcelain or china >> other +69119010,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Other :Toilet articles" +69119020,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Other :Water filters of a capacity not exceeding 40 litres" +69119090,"TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OF PORCELAIN OR CHINA:Other :Other" +69120000,ceramic tableware kitchenware other household articles and toilet articles other than of porcelain or china ceramic tableware kitchenware other household articles and toilet articles other than of porcelain or china +69120010,"CERAMIC TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OTHER THAN OF PORCELAIN OR CHINA:Ceramic tableware, kitchenware, other household articles and toilet articles, other than of porcelain or china :Tableware" +69120020,"CERAMIC TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OTHER THAN OF PORCELAIN OR CHINA:Ceramic tableware, kitchenware, other household articles and toilet articles, other than of porcelain or china :Kitchenware" +69120030,"CERAMIC TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OTHER THAN OF PORCELAIN OR CHINA:Ceramic tableware, kitchenware, other household articles and toilet articles, other than of porcelain or china :Toilet articles" +69120040,"CERAMIC TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OTHER THAN OF PORCELAIN OR CHINA:Ceramic tableware, kitchenware, other household articles and toilet articles, other than of porcelain or china :Clay articles" +69120090,"CERAMIC TABLEWARE, KITCHENWARE, OTHER HOUSEHOLD ARTICLES AND TOILET ARTICLES, OTHER THAN OF PORCELAIN OR CHINA:Ceramic tableware, kitchenware, other household articles and toilet articles, other than of porcelain or china :Other" +69130000,statuettes and other ornamental ceramic articles +69131000,STATUETTES AND OTHER ORNAMENTAL CERAMIC ARTICLES::Of porcelain or china +69139000,STATUETTES AND OTHER ORNAMENTAL CERAMIC ARTICLES::Other +69140000,other ceramic articles +69141000,OTHER CERAMIC ARTICLES::Of porcelain or china +69149000,OTHER CERAMIC ARTICLES::Other +70000000,830 +70010000,830 >> excluding glass from tubes or other activated glass of heading 8549 glass in the mass cullet and other waste and scrap of glass excluding glass from cathode ray tubes or other activated glass of heading 8549 glass in the mass +70010010,CULLET AND OTHER WASTE AND SCRAP OF GLASS; GLASS IN THE MASS:Cullet and other waste and scrap of glass; glass in the mass :Cullet and other waste and scrap of glass +70010020,CULLET AND OTHER WASTE AND SCRAP OF GLASS; GLASS IN THE MASS:Cullet and other waste and scrap of glass; glass in the mass :Enamel glass in the mass +70010090,CULLET AND OTHER WASTE AND SCRAP OF GLASS; GLASS IN THE MASS:Cullet and other waste and scrap of glass; glass in the mass :Other +70020000,830 >> glass in balls other than microspheres of heading 7018 rods or tubes unworked +70021000,"GLASS IN BALLS (OTHER THAN MICROSPHERES OF HEADING 7018), RODS OR TUBES, UNWORKED::Balls" +70022000,830 >> glass in balls other than microspheres of heading 7018 rods or tubes unworked >> rods +70022010,"GLASS IN BALLS (OTHER THAN MICROSPHERES OF HEADING 7018), RODS OR TUBES, UNWORKED:Rods :Enamel glass rods" +70022090,"GLASS IN BALLS (OTHER THAN MICROSPHERES OF HEADING 7018), RODS OR TUBES, UNWORKED:Rods :Other" +70023100,"GLASS IN BALLS (OTHER THAN MICROSPHERES OF HEADING 7018), RODS OR TUBES, UNWORKED::Of fused quartz or other fused silica" +70023200,"GLASS IN BALLS (OTHER THAN MICROSPHERES OF HEADING 7018), RODS OR TUBES, UNWORKED::Of other glass having a linear coefficient of expansion not exceeding 5 x 10-6 per Kelvin within a temperature range of 00C to 3000C" +70023900,"GLASS IN BALLS (OTHER THAN MICROSPHERES OF HEADING 7018), RODS OR TUBES, UNWORKED::Other" +70030000,830 >> cast glass and rolled glass in sheets or profiles whether or not having an absorbent reflecting or layer but not otherwise worked sheets +70031200,830 >> cast glass and rolled glass in sheets or profiles whether or not having an absorbent reflecting or layer but not otherwise worked sheets >> coloured throughout the mass opacified flashed or having an absorbent reflecting or layer +70031210,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Coloured throughout the mass (body-tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Tinted" +70031290,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Coloured throughout the mass (body-tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Other" +70031900,830 >> cast glass and rolled glass in sheets or profiles whether or not having an absorbent reflecting or layer but not otherwise worked sheets >> other +70031910,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other :Tinted" +70031990,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other :Other" +70032000,830 >> cast glass and rolled glass in sheets or profiles whether or not having an absorbent reflecting or layer but not otherwise worked sheets >> wired sheets +70032010,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Wired sheets :Tinted" +70032090,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Wired sheets :Other" +70033000,830 >> cast glass and rolled glass in sheets or profiles whether or not having an absorbent reflecting or layer but not otherwise worked sheets >> profiles +70033010,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Profiles :Tinted" +70033090,"CAST GLASS AND ROLLED GLASS, IN SHEETS OR PROFILES, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Profiles :Other" +70040000,830 >> drawn glass and blown glass in sheets whether or not having an absorbent reflecting or layer but not otherwise worked +70042000,830 >> drawn glass and blown glass in sheets whether or not having an absorbent reflecting or layer but not otherwise worked >> glass coloured throughout the mass body tinted opacified flashed or having an absorbent reflecting or layer window glass sheet glass +70042011,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Glass, coloured throughout the mass (body tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Tinted" +70042019,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Glass, coloured throughout the mass (body tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Other" +70042091,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Glass, coloured throughout the mass (body tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Tinted" +70042099,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Glass, coloured throughout the mass (body tinted), opacified, flashed or having an absorbent, reflecting or non-reflecting layer :Other" +70049000,830 >> drawn glass and blown glass in sheets whether or not having an absorbent reflecting or layer but not otherwise worked >> other glass window glass sheet glass +70049011,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other glass :Tinted" +70049019,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other glass :Other" +70049091,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other glass :Tinted" +70049099,"DRAWN GLASS AND BLOWN GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other glass :Other" +70050000,830 >> float glass and surface ground or polished glass in sheets whether or not having an reflecting or absorbent layer but not otherwise worked +70051000,830 >> float glass and surface ground or polished glass in sheets whether or not having an reflecting or absorbent layer but not otherwise worked >> glass having an absorbent reflecting or layer +70051010,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Non-wired glass, having an absorbent, reflecting or non-reflecting layer :Tinted" +70051090,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Non-wired glass, having an absorbent, reflecting or non-reflecting layer :Other" +70052100,830 >> float glass and surface ground or polished glass in sheets whether or not having an reflecting or absorbent layer but not otherwise worked >> coloured throughout the mass body tinted opacified flashed or merely surface ground +70052110,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Coloured throughout the mass (body tinted),opacified, flashed or merely surface ground:Tinted" +70052190,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Coloured throughout the mass (body tinted),opacified, flashed or merely surface ground:Other" +70052900,830 >> float glass and surface ground or polished glass in sheets whether or not having an reflecting or absorbent layer but not otherwise worked >> other +70052910,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other :Tinted" +70052990,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Other :Other" +70053000,830 >> float glass and surface ground or polished glass in sheets whether or not having an reflecting or absorbent layer but not otherwise worked >> wired glass +70053010,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Wired glass :Tinted" +70053090,"FLOAT GLASS AND SURFACE GROUND OR POLISHED GLASS, IN SHEETS, WHETHER OR NOT HAVING AN ABSORBENT, REFLECTING OR NON-REFLECTING LAYER, BUT NOT OTHERWISE WORKED:Wired glass :Other" +70060000,830 >> glass of heading 7003 7004 or 7005 bent edge worked engraved drilled enameled or otherwise +70070000,830 >> safety glass consisting of toughened tempered or laminated glass toughened tempered safety glass +70071100,"SAFETY GLASS, CONSISTING OF TOUGHENED (TEMPERED) OR LAMINATED GLASS::Of size and shape suitable for incorporation in vehicles, aircraft, spacecraft or vessels" +70071900,"SAFETY GLASS, CONSISTING OF TOUGHENED (TEMPERED) OR LAMINATED GLASS::Other" +70072100,830 >> safety glass consisting of toughened tempered or laminated glass toughened tempered safety glass >> of size and shape suitable for incorporation in vehicles aircraft spacecraft or vessels +70072110,"SAFETY GLASS, CONSISTING OF TOUGHENED (TEMPERED) OR LAMINATED GLASS:Of size and shape suitable for incorporation in vehicles, aircraft, spacecraft or vessels :Bullet proof glass" +70072190,"SAFETY GLASS, CONSISTING OF TOUGHENED (TEMPERED) OR LAMINATED GLASS:Of size and shape suitable for incorporation in vehicles, aircraft, spacecraft or vessels :Other" +70072900,"SAFETY GLASS, CONSISTING OF TOUGHENED (TEMPERED) OR LAMINATED GLASS::Other" +70080000,830 >> insulating units of glass insulating units of glass +70080010,"MULTIPLE-WALLED INSULATING UNITS OF GLASS:Multiple-walled insulating units of glass :Glazed glass, double walled" +70080020,"MULTIPLE-WALLED INSULATING UNITS OF GLASS:Multiple-walled insulating units of glass :Glazed glass, multiple walled" +70080090,MULTIPLE-WALLED INSULATING UNITS OF GLASS:Multiple-walled insulating units of glass :Other +70090000,830 >> glass mirrors whether or not framed including mirrors +70091000,830 >> glass mirrors whether or not framed including mirrors >> mirrors for vehicles +70091010,"GLASS MIRRORS, WHETHER OR NOT FRAMED, INCLUDING REAR-VIEW MIRRORS:Rear-view mirrors for vehicles :Prismatic rear-view mirror for vehicles" +70091090,"GLASS MIRRORS, WHETHER OR NOT FRAMED, INCLUDING REAR-VIEW MIRRORS:Rear-view mirrors for vehicles :Other" +70099100,"GLASS MIRRORS, WHETHER OR NOT FRAMED, INCLUDING REAR-VIEW MIRRORS::Unframed" +70099200,"GLASS MIRRORS, WHETHER OR NOT FRAMED, INCLUDING REAR-VIEW MIRRORS::Framed" +70100000,830 >> carboys bottles flasks jars pots phials ampoules and other containers of glass of a kind used for the conveyance or packing of goods preserving jars of glass stoppers lids and other closures of glass +70101000,"CARBOYS, BOTTLES, FLASKS, JARS, POTS, PHIALS, AMPOULES AND OTHER CONTAINERS, OF GLASS, OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS; PRESERVING JARS OF GLASS; STOPPERS, LIDS AND OTHER CLOSURES, OF GLASS::Ampoules" +70102000,"CARBOYS, BOTTLES, FLASKS, JARS, POTS, PHIALS, AMPOULES AND OTHER CONTAINERS, OF GLASS, OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS; PRESERVING JARS OF GLASS; STOPPERS, LIDS AND OTHER CLOSURES, OF GLASS::Stoppers, lids and other closures" +70109000,"CARBOYS, BOTTLES, FLASKS, JARS, POTS, PHIALS, AMPOULES AND OTHER CONTAINERS, OF GLASS, OF A KIND USED FOR THE CONVEYANCE OR PACKING OF GOODS; PRESERVING JARS OF GLASS; STOPPERS, LIDS AND OTHER CLOSURES, OF GLASS::Other" +70110000,830 >> glass envelopes including bulbs and tubes open and glass parts thereof without fittings for electric lamps and light sources cathode ray tubes or the like +70111000,830 >> glass envelopes including bulbs and tubes open and glass parts thereof without fittings for electric lamps and light sources cathode ray tubes or the like >> for electric lighting +70111010,"GLASS ENVELOPES (INCLUDING BULBS AND TUBES), OPEN, AND GLASS PARTS THEREOF, WITHOUT FITTINGS, FOR ELECTRIC LAMPS, CATHODE-RAY TUBES OR THE LIKE:For electric lighting :Glass envelopes for fluorescent lamps" +70111020,"GLASS ENVELOPES (INCLUDING BULBS AND TUBES), OPEN, AND GLASS PARTS THEREOF, WITHOUT FITTINGS, FOR ELECTRIC LAMPS, CATHODE-RAY TUBES OR THE LIKE:For electric lighting :Glass envelopes for filament lamps" +70111090,"GLASS ENVELOPES (INCLUDING BULBS AND TUBES), OPEN, AND GLASS PARTS THEREOF, WITHOUT FITTINGS, FOR ELECTRIC LAMPS, CATHODE-RAY TUBES OR THE LIKE:For electric lighting :Other" +70112000,"GLASS ENVELOPES (INCLUDING BULBS AND TUBES), OPEN, AND GLASS PARTS THEREOF, WITHOUT FITTINGS, FOR ELECTRIC LAMPS, CATHODE-RAY TUBES OR THE LIKE::For cathode-ray tubes" +70119000,830 >> glass envelopes including bulbs and tubes open and glass parts thereof without fittings for electric lamps and light sources cathode ray tubes or the like >> other +70119010,"GLASS ENVELOPES (INCLUDING BULBS AND TUBES), OPEN, AND GLASS PARTS THEREOF, WITHOUT FITTINGS, FOR ELECTRIC LAMPS, CATHODE-RAY TUBES OR THE LIKE:Other :Glass envelopes for electronic valves" +70119090,"GLASS ENVELOPES (INCLUDING BULBS AND TUBES), OPEN, AND GLASS PARTS THEREOF, WITHOUT FITTINGS, FOR ELECTRIC LAMPS, CATHODE-RAY TUBES OR THE LIKE:Other :Other" +70130000,830 >> glassware of a kind used for table kitchen toilet office indoor decoration or similar purposes other than that of heading 7010 or 7018 +70131000,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Of glass-ceramics" +70132200,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Of lead crystal" +70132800,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Other" +70133300,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Of lead crystal" +70133700,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Other" +70134100,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Of lead crystal" +70134200,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Of glass having a linear coefficient of expansion not exceeding 5 x 10 -6 per Kelvin within a temperature range of 0 C to 300 C," +70134900,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Other" +70139100,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Of lead crystal" +70139900,"GLASSWARE OF A KIND USED FOR TABLE, KITCHEN, TOILET, OFFICE, INDOOR DECORATION OR SIMILAR PURPOSES (OTHER THAN THAT OF HEADING 7010 OR 7018)::Other" +70140000,830 >> signalling glassware and optical elements of glass other than those of heading 7015 not optically worked signalling glassware and optical elements of glass other than those of heading 7015 not optically worked +70140010,"SIGNALLING GLASSWARE AND OPTICAL ELEMENTS OF GLASS (OTHER THAN THOSE OF HEADING 7012.5), NOT OPTICALLY WORKED:Signalling glassware and optical elements of glass (other than those of heading 7012.5), not optically worked :Signalling glassware" +70140020,"SIGNALLING GLASSWARE AND OPTICAL ELEMENTS OF GLASS (OTHER THAN THOSE OF HEADING 7012.5), NOT OPTICALLY WORKED:Signalling glassware and optical elements of glass (other than those of heading 7012.5), not optically worked :Optical elements" +70150000,830 >> clock or watch glasses and similar glasses glasses for or corrective spectacles curved bent hollowed or the like not optically worked hollow glass spheres and their segments for the manufacture of such glasses +70151000,830 >> clock or watch glasses and similar glasses glasses for or corrective spectacles curved bent hollowed or the like not optically worked hollow glass spheres and their segments for the manufacture of such glasses >> glasses for corrective spectacles +70151010,"CLOCK OR WATCH GLASSES AND SIMILAR GLASSES, GLASSES FOR NON-CORRECTIVE OR CORRECTIVE SPECTACLES, CURVED, BENT, HOLLOWED OR THE LIKE; NOT OPTICALLY WORKED; HOLLOW GLASS SPHERES AND THEIR SEGMENTS, FOR THE MANUFACTURE OF SUCH GLASSES:Glasses for corrective spectacles :Ophthalmic rough blanks" +70151020,"CLOCK OR WATCH GLASSES AND SIMILAR GLASSES, GLASSES FOR NON-CORRECTIVE OR CORRECTIVE SPECTACLES, CURVED, BENT, HOLLOWED OR THE LIKE; NOT OPTICALLY WORKED; HOLLOW GLASS SPHERES AND THEIR SEGMENTS, FOR THE MANUFACTURE OF SUCH GLASSES:Glasses for corrective spectacles :Flint button" +70151090,"CLOCK OR WATCH GLASSES AND SIMILAR GLASSES, GLASSES FOR NON-CORRECTIVE OR CORRECTIVE SPECTACLES, CURVED, BENT, HOLLOWED OR THE LIKE; NOT OPTICALLY WORKED; HOLLOW GLASS SPHERES AND THEIR SEGMENTS, FOR THE MANUFACTURE OF SUCH GLASSES:Glasses for corrective spectacles :Other" +70159000,830 >> clock or watch glasses and similar glasses glasses for or corrective spectacles curved bent hollowed or the like not optically worked hollow glass spheres and their segments for the manufacture of such glasses >> other +70159010,"CLOCK OR WATCH GLASSES AND SIMILAR GLASSES, GLASSES FOR NON-CORRECTIVE OR CORRECTIVE SPECTACLES, CURVED, BENT, HOLLOWED OR THE LIKE; NOT OPTICALLY WORKED; HOLLOW GLASS SPHERES AND THEIR SEGMENTS, FOR THE MANUFACTURE OF SUCH GLASSES:Other :Clock and watch glasses and similar glasses, curved, bent, hollowed and the like, glass spheres and segments of spheres for the manufacture of such glasses" +70159020,"CLOCK OR WATCH GLASSES AND SIMILAR GLASSES, GLASSES FOR NON-CORRECTIVE OR CORRECTIVE SPECTACLES, CURVED, BENT, HOLLOWED OR THE LIKE; NOT OPTICALLY WORKED; HOLLOW GLASS SPHERES AND THEIR SEGMENTS, FOR THE MANUFACTURE OF SUCH GLASSES:Other :Glass for sun glasses" +70159090,"CLOCK OR WATCH GLASSES AND SIMILAR GLASSES, GLASSES FOR NON-CORRECTIVE OR CORRECTIVE SPECTACLES, CURVED, BENT, HOLLOWED OR THE LIKE; NOT OPTICALLY WORKED; HOLLOW GLASS SPHERES AND THEIR SEGMENTS, FOR THE MANUFACTURE OF SUCH GLASSES:Other :Other" +70160000,830 >> paving blocks slabs bricks squares tiles and other articles of pressed or mouldedglass whether or not wired of a kind usedfor building or construction purposes glass cubes and other glass small wares whether or not on a backing for mosaics or similar decorative purposes leaded lights and the like or foam glass in blocks panels plates shells or similar forms +70161000,"PAVING BLOCKS, SLABS, BRICKS, SQUARES, TILES AND OTHER ARTICLES OF PRESSED OR MOULDED GLASS, WHETHER OR NOT WIRED, OF A KIND USED FOR BUILDING OR CONSTRUCTION PURPOSES; GLASS CUBES AND OTHER GLASS SMALL WARES, WHETHER OR NOT ON A BACKING, FOR MOSAICS OR SIMILAR DECORATIVE PURPOSES; LEADED LIGHTS AND THE LIKE; MULTI-CELLULAR OR FOAM GLASS IN BLOCKS, PANELS, PLATES, SHELLS OR SIMILAR FORMS::Glass cubes and other glass smallwares, whether or not on a backing, for mosaics or similar decorative purposes" +70169000,"PAVING BLOCKS, SLABS, BRICKS, SQUARES, TILES AND OTHER ARTICLES OF PRESSED OR MOULDED GLASS, WHETHER OR NOT WIRED, OF A KIND USED FOR BUILDING OR CONSTRUCTION PURPOSES; GLASS CUBES AND OTHER GLASS SMALL WARES, WHETHER OR NOT ON A BACKING, FOR MOSAICS OR SIMILAR DECORATIVE PURPOSES; LEADED LIGHTS AND THE LIKE; MULTI-CELLULAR OR FOAM GLASS IN BLOCKS, PANELS, PLATES, SHELLS OR SIMILAR FORMS::Other" +70170000,830 >> laboratory hygienic or pharmaceutical glassware whether or not graduated or calibrated +70171000,"LABORATORY, HYGIENIC OR PHARMACEUTICAL GLASSWARE, WHETHER OR NOT GRADUATED OR CALIBRATED::Of fused quartz or other fused silica" +70172000,"LABORATORY, HYGIENIC OR PHARMACEUTICAL GLASSWARE, WHETHER OR NOT GRADUATED OR CALIBRATED::Of other glass having a linear coefficient of expansion not exceeding 5 x 10-6 per Kelvin within a temperature range of 0C to 300C" +70179000,830 >> laboratory hygienic or pharmaceutical glassware whether or not graduated or calibrated >> other +70179010,"LABORATORY, HYGIENIC OR PHARMACEUTICAL GLASSWARE, WHETHER OR NOT GRADUATED OR CALIBRATED:Other :Graduated or calibrated laboratory glassware" +70179020,"LABORATORY, HYGIENIC OR PHARMACEUTICAL GLASSWARE, WHETHER OR NOT GRADUATED OR CALIBRATED:Other :Pharmaceutical glassware(OLD tariff)" +70179030,"LABORATORY, HYGIENIC OR PHARMACEUTICAL GLASSWARE, WHETHER OR NOT GRADUATED OR CALIBRATED:Other :Hygienic glassware(OLD tariff)" +70179090,"LABORATORY, HYGIENIC OR PHARMACEUTICAL GLASSWARE, WHETHER OR NOT GRADUATED OR CALIBRATED:Other :Other(OLD tariff)" +70180000,830 >> glass beads imitation pearls imitation precious or stones and similar glass smallwares and articles thereof other than imitation jewellery glass eyes other than prosthetic articles statuettes and other ornaments of glass other than imitation jewellery glass in microspheres not exceeding 1 mm diameter +70181000,830 >> glass beads imitation pearls imitation precious or stones and similar glass smallwares and articles thereof other than imitation jewellery glass eyes other than prosthetic articles statuettes and other ornaments of glass other than imitation jewellery glass in microspheres not exceeding 1 mm diameter >> glass beads imitation pearls imitation precious or stones and similar glass smallwares +70181010,"GLASS BEADS, IMITATION PEARLS, IMITATION PRECIOUS OR SEMI-PRECIOUS STONES AND SIMILAR GLASS SMALLWARES, AND ARTICLES THEREOF OTHER THAN IMITATION JEWELLERY; GLASS EYES OTHER THAN PROSTHETIC ARTICLES; STATUETTES AND OTHER ORNAMENTS OF LAMP-WORKED GLASS, OTHER THAN IMITATION JEWELLERY; GLASS MICROSPHERES NOT EXCEEDING 1 MM IN DIAMETER:Glass beads, imitation pearls, imitation precious or:Bangles" +70181020,"GLASS BEADS, IMITATION PEARLS, IMITATION PRECIOUS OR SEMI-PRECIOUS STONES AND SIMILAR GLASS SMALLWARES, AND ARTICLES THEREOF OTHER THAN IMITATION JEWELLERY; GLASS EYES OTHER THAN PROSTHETIC ARTICLES; STATUETTES AND OTHER ORNAMENTS OF LAMP-WORKED GLASS, OTHER THAN IMITATION JEWELLERY; GLASS MICROSPHERES NOT EXCEEDING 1 MM IN DIAMETER:Glass beads, imitation pearls, imitation precious or:Beads" +70181090,"GLASS BEADS, IMITATION PEARLS, IMITATION PRECIOUS OR SEMI-PRECIOUS STONES AND SIMILAR GLASS SMALLWARES, AND ARTICLES THEREOF OTHER THAN IMITATION JEWELLERY; GLASS EYES OTHER THAN PROSTHETIC ARTICLES; STATUETTES AND OTHER ORNAMENTS OF LAMP-WORKED GLASS, OTHER THAN IMITATION JEWELLERY; GLASS MICROSPHERES NOT EXCEEDING 1 MM IN DIAMETER:Glass beads, imitation pearls, imitation precious or:Other" +70182000,"GLASS BEADS, IMITATION PEARLS, IMITATION PRECIOUS OR SEMI-PRECIOUS STONES AND SIMILAR GLASS SMALLWARES, AND ARTICLES THEREOF OTHER THAN IMITATION JEWELLERY; GLASS EYES OTHER THAN PROSTHETIC ARTICLES; STATUETTES AND OTHER ORNAMENTS OF LAMP-WORKED GLASS, OTHER THAN IMITATION JEWELLERY; GLASS MICROSPHERES NOT EXCEEDING 1 MM IN DIAMETER::Glass microspheres not exceeding 1 mm in diameter" +70189000,830 >> glass beads imitation pearls imitation precious or stones and similar glass smallwares and articles thereof other than imitation jewellery glass eyes other than prosthetic articles statuettes and other ornaments of glass other than imitation jewellery glass in microspheres not exceeding 1 mm diameter >> other +70189010,"GLASS BEADS, IMITATION PEARLS, IMITATION PRECIOUS OR SEMI-PRECIOUS STONES AND SIMILAR GLASS SMALLWARES, AND ARTICLES THEREOF OTHER THAN IMITATION JEWELLERY; GLASS EYES OTHER THAN PROSTHETIC ARTICLES; STATUETTES AND OTHER ORNAMENTS OF LAMP-WORKED GLASS, OTHER THAN IMITATION JEWELLERY; GLASS MICROSPHERES NOT EXCEEDING 1 MM IN DIAMETER:Other :Glass statues" +70189090,"GLASS BEADS, IMITATION PEARLS, IMITATION PRECIOUS OR SEMI-PRECIOUS STONES AND SIMILAR GLASS SMALLWARES, AND ARTICLES THEREOF OTHER THAN IMITATION JEWELLERY; GLASS EYES OTHER THAN PROSTHETIC ARTICLES; STATUETTES AND OTHER ORNAMENTS OF LAMP-WORKED GLASS, OTHER THAN IMITATION JEWELLERY; GLASS MICROSPHERES NOT EXCEEDING 1 MM IN DIAMETER:Other :Other" +70190000,830 >> glass fibres including glass wool and articles thereof for example yarn rovings woven fabrics slivers rovings yarn and chopped strands and mats thereof +70191100,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Chopped strands, of a length of not more than 50 mm" +70191200,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Rovings" +70191300,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other yarn, slivers" +70191400,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Mechanically bonded mats" +70191500,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Chemically bonded mats" +70191900,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other" +70193100,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Mats(OLD tariff)" +70193200,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Thin sheets (voiles)(OLD tariff)" +70193900,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other(OLD tariff)" +70194000,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Woven fabrics of rovings(OLD tariff)" +70195100,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Of a width not exceeding 30 cm(OLD tariff)" +70195200,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Of a width exceeding 30 cm, plain weave, weighing less than 250 g/sq. metre, of filaments measuring per single yarn not more than 136 tex(OLD tariff)" +70195900,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other(OLD tariff)" +70196100,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Closed woven fabrics of rovings" +70196200,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other closed fabrics of rovings" +70196300,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Closed woven fabrics, plain weave, of yarns, not coated or laminated" +70196400,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Closed woven fabrics, plain weave, of yarns, coated or laminate" +70196500,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Open woven fabrics of a width not exceeding 30 cm" +70196600,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS):: Open woven fabrics of a width exceeding 30 cm" +70196900,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::other" +70197100,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Veils (thin sheets)" +70197200,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other closed fabrics" +70197300,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Other open fabrics" +70198000,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS)::Glass wool and articles of glass wool" +70199000,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS):Other:Other(OLD tariff)" +70199010,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS):Other:Glass wool or glass fibre(OLD tariff)" +70199090,"GLASS FIBRES (INCLUDING GLASS WOOL) AND ARTICLES THEREOF (FOR EXAMPLE, YARN, WOVEN FABRICS):Other:Other(OLD tariff)" +70200000,830 >> other articles of glass other articles of glass glass shells glass globes and glass founts +70200011,OTHER ARTICLES OF GLASS:Other articles of glass:Globes for lamps and lanterns +70200012,OTHER ARTICLES OF GLASS:Other articles of glass:Founts for kerosene wick lamps +70200019,OTHER ARTICLES OF GLASS:Other articles of glass:Other +70200021,OTHER ARTICLES OF GLASS:Other articles of glass:For lamps and lanterns +70200029,OTHER ARTICLES OF GLASS:Other articles of glass:Other +70200090,OTHER ARTICLES OF GLASS:Other articles of glass:Other +71010000,pearls natural or cultured whether or not worked or graded but not strung mounted or set pearls natural or cultured temporarily strung for convenience of transport +71011000,pearls natural or cultured whether or not worked or graded but not strung mounted or set pearls natural or cultured temporarily strung for convenience of transport >> natural pearls +71011010,"PEARLS, NATURAL OR CULTURED, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; PEARLS, NATURAL OR CULTURED, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Natural pearls :Unworked" +71011020,"PEARLS, NATURAL OR CULTURED, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; PEARLS, NATURAL OR CULTURED, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Natural pearls :Worked" +71012100,"PEARLS, NATURAL OR CULTURED, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; PEARLS, NATURAL OR CULTURED, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT::Unworked" +71012200,"PEARLS, NATURAL OR CULTURED, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; PEARLS, NATURAL OR CULTURED, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT::Worked" +71020000,diamonds whether or not worked but not mounted or set +71021000,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET::Unsorted" +71022100,diamonds whether or not worked but not mounted or set >> unworked or simply sawn cleaved or bruted +71022110,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET:Unworked or simply sawn, cleaved or bruted:Sorted" +71022120,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET:Unworked or simply sawn, cleaved or bruted:Unsorted" +71022900,diamonds whether or not worked but not mounted or set >> other +71022910,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET:Other :Crushed" +71022990,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET:Other :Other" +71023100,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET::Unworked or simply sawn, cleaved or bruted" +71023900,diamonds whether or not worked but not mounted or set >> others +71023910,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET:Others :Diamond, cut or otherwise worked but not mounted or set" +71023990,"DIAMONDS, WHETHER OR NOT WORKED, BUT NOT MOUNTED OR SET:Others :Other" +71030000,precious stones other than diamonds and semi precious stones whether or not worked or graded but not strung mounted or set ungraded precious stones than other diamonds and stones temporarily strung for convenience of transport +71031000,precious stones other than diamonds and semi precious stones whether or not worked or graded but not strung mounted or set ungraded precious stones than other diamonds and stones temporarily strung for convenience of transport >> unworked or simply sawn or roughly shaped precious or stones of and mineralogical species unworked or simply sawn or roughly shaped precious or stones of and mineralogical species +71031011,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Emerald(OLD tariff)" +71031012,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Ruby and sapphire(OLD tariff)" +71031019,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other(OLD tariff)" +71031021,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Feldspar (Moon stone)(OLD tariff)" +71031022,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Garnet(OLD tariff)" +71031023,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Agate(OLD tariff)" +71031024,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Green aventurine(OLD tariff)" +71031029,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other(OLD tariff)" +71031031,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Emerald" +71031032,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Yellow/golden/pink/red/green beryl" +71031033,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Chrysoberyl (including chrysoberyl cat's eye)" +71031034,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Alexandrite (including alexandrite cat's eye)" +71031039,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other" +71031041,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Ruby" +71031042,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Sapphire" +71031043,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Moonstone" +71031049,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other" +71031051,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Garnet" +71031052,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Lapis-lazuli" +71031059,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other" +71031061,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Prehnite" +71031062,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Agate" +71031063,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Aventurine" +71031064,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Chalcedony" +71031069,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other" +71031071,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Tourmaline" +71031072,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Tanzanite" +71031079,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other" +71031090,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Unworked or simply sawn or roughly shaped:Other" +71039100,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Ruby, sapphire and emeralds:Ruby, sapphire and emeralds" +71039110,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Ruby, sapphire and emeralds:Ruby" +71039120,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Ruby, sapphire and emeralds:Sapphire" +71039130,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Ruby, sapphire and emeralds:Emeralds" +71039900,precious stones other than diamonds and semi precious stones whether or not worked or graded but not strung mounted or set ungraded precious stones than other diamonds and stones temporarily strung for convenience of transport >> other precious or stones of and mineralogical species other than +71039910,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Feldspar (Moon stone)(OLD tariff)" +71039911,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Yellow/golden/pink/red/green beryl" +71039912,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Chrysoberyl (including chrysoberyl cat's eye)" +71039913,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Alexandrite (including alexandrite cat's eye)" +71039919,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Other" +71039920,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Garnet(OLD tariff)" +71039921,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Moonstone" +71039929,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Other" +71039930,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Agate(OLD tariff)" +71039931,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Garnet" +71039932,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Lapis-lazuli" +71039939,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Other" +71039940,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Chalcedony(OLD tariff)" +71039941,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Prehnite" +71039942,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Agate" +71039943,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Aventurine" +71039944,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Chalcedony" +71039949,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Other" +71039951,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Tourmaline" +71039952,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Tanzanite" +71039959,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Other" +71039990,"PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED PRECIOUS STONES (OTHER THAN DIAMONDS) AND SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other :Other" +71040000,synthetic or reconstructed precious or semi precious stones whether or not worked or graded but not strung mounted or set ungraded synthetic or reconstructed precious or stones temporarily strung for convenience of transport +71041000,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT::Piezo-electric quartz" +71042000,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other, unworked or simply sawn or roughly shaped:Other, unworked or simply sawn or roughly shaped(OLD tariff)" +71042010,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other, unworked or simply sawn or roughly shaped:Laboratory-created or laboratory grown or manmade or cultured or synthetic diamonds(OLD tariff)" +71042090,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other, unworked or simply sawn or roughly shaped:Other(OLD tariff)" +71042100,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:diamonds:Diamonds" +71042110,synthetic or reconstructed precious or semi precious stones whether or not worked or graded but not strung mounted or set ungraded synthetic or reconstructed precious or stones temporarily strung for convenience of transport >> diamonds >> industrial +71042120,synthetic or reconstructed precious or semi precious stones whether or not worked or graded but not strung mounted or set ungraded synthetic or reconstructed precious or stones temporarily strung for convenience of transport >> diamonds >> +71042900,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:Other:Other" +71049000,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:other:Other(OLD tariff)" +71049010,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:other:Laboratory created or laboratory grown or manmade or cultured or ssynthetic diamonds(OLD tariff)" +71049090,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:other:OTHER(OLD tariff)" +71049100,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:diamond:Diamonds" +71049110,synthetic or reconstructed precious or semi precious stones whether or not worked or graded but not strung mounted or set ungraded synthetic or reconstructed precious or stones temporarily strung for convenience of transport >> diamonds >> industrial +71049120,synthetic or reconstructed precious or semi precious stones whether or not worked or graded but not strung mounted or set ungraded synthetic or reconstructed precious or stones temporarily strung for convenience of transport >> diamonds >> +71049900,"SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMIPRECIOUS STONES, WHETHER OR NOT WORKED OR GRADED BUT NOT STRUNG, MOUNTED OR SET; UNGRADED SYNTHETIC OR RECONSTRUCTED PRECIOUS OR SEMI-PRECIOUS STONES, TEMPORARILY STRUNG FOR CONVENIENCE OF TRANSPORT:other:other" +71050000,dust and powder of natural or synthetic precious or stones +71051000,DUST AND POWDER OF NATURAL OR SYNTHETIC PRECIOUS OR SEMI-PRECIOUS STONES::Of diamond +71051010,dust and powder of natural or synthetic precious or stones >> of diamonds >> of heading 7102 +71051020,dust and powder of natural or synthetic precious or stones >> of diamonds >> of heading 7104 +71059000,DUST AND POWDER OF NATURAL OR SYNTHETIC PRECIOUS OR SEMI-PRECIOUS STONES::Other +71060000,silver including silver plated with gold or platinum unwrought or in forms or in powder form +71061000,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM::Powder" +71069100,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM:Unwrought:Unwrought" +71069110,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM:Unwrought:Grains" +71069190,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM:Unwrought:Other" +71069200,silver including silver plated with gold or platinum unwrought or in forms or in powder form >> +71069210,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM:Semi-manufactured :Sheets, plates, strips, tubes and pipes" +71069220,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM:Semi-manufactured :Bar" +71069290,"SILVER (INCLUDING SILVER PLATED WITH GOLD OR PLATINUM), UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM:Semi-manufactured :Other" +71070000,"::BASE METALS CLAD WITH SILVER, NOT FURTHER WORKED THAN SEMI-MANUFACTURED" +71080000,gold including gold plated with platinum unwrought or in forms or in powder form +71081100,"GOLD (INCLUDING GOLD PLATED WITH PLATINUM) UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM::Powder" +71081200,"GOLD (INCLUDING GOLD PLATED WITH PLATINUM) UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM::Other unwrought forms" +71081300,"GOLD (INCLUDING GOLD PLATED WITH PLATINUM) UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM::Other semi-manufactured forms" +71082000,"GOLD (INCLUDING GOLD PLATED WITH PLATINUM) UNWROUGHT OR IN SEMI-MANUFACTURED FORMS, OR IN POWDER FORM::Monetary" +71090000,"::BASE METALS OR SILVER, CLAD WITH GOLD, NOT FURTHER WORKED THAN SEMI-MANUFACTURED" +71100000,platinum unwrought or in form or in powder form platinum +71101100,platinum unwrought or in form or in powder form platinum >> unwrought or in powder form +71101110,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM:Unwrought or in powder form:Unwrought form" +71101120,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM:Unwrought or in powder form:In powder form" +71101900,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Other" +71102100,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Unwrought or in powder form" +71102900,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Other" +71103100,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Unwrought or in powder from" +71103900,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Other" +71104100,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Unwrought or in powder from" +71104900,"PLATINUM, UNWROUGHT OR IN SEMI-MANUFACTURED FORM, OR IN POWDER FORM::Other" +71110000,"::BASE METALS, SILVER OR GOLD, CLAD WITH PLATINUM, NOT FURTHER WORKED THAN SEMI-MANUFACTURED" +71120000,waste and scrap of precious metal or of metal clad with precious metal other waste and scrap containing precious metal or precious metal compounds of a kind used principally for the recovery of precious metal other than goods of heading 8549 +71123000,"WASTE AND SCRAP OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL; OTHER WASTE AND SCRAP CONTAINING PRECIOUS METAL OR PRECIOUS METAL COMPOUNDS, OF A KIND USED PRINCIPALLY FOR THE RECOVERY OF PRECIOUS METAL::Ash containing precious metal or precious metal compounds" +71129100,"WASTE AND SCRAP OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL; OTHER WASTE AND SCRAP CONTAINING PRECIOUS METAL OR PRECIOUS METAL COMPOUNDS, OF A KIND USED PRINCIPALLY FOR THE RECOVERY OF PRECIOUS METAL::Of gold, including metal clad with gold but excluding sweepings containing other precious metals" +71129200,"WASTE AND SCRAP OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL; OTHER WASTE AND SCRAP CONTAINING PRECIOUS METAL OR PRECIOUS METAL COMPOUNDS, OF A KIND USED PRINCIPALLY FOR THE RECOVERY OF PRECIOUS METAL::Of platinum, including metal clad with platinum but excluding sweepings containing other precious metals" +71129900,waste and scrap of precious metal or of metal clad with precious metal other waste and scrap containing precious metal or precious metal compounds of a kind used principally for the recovery of precious metal other than goods of heading 8549 >> other +71129910,"WASTE AND SCRAP OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL; OTHER WASTE AND SCRAP CONTAINING PRECIOUS METAL OR PRECIOUS METAL COMPOUNDS, OF A KIND USED PRINCIPALLY FOR THE RECOVERY OF PRECIOUS METAL:Other :Of silver, including metal clad with silver but excluding sweepings containing other precious metals" +71129920,"WASTE AND SCRAP OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL; OTHER WASTE AND SCRAP CONTAINING PRECIOUS METAL OR PRECIOUS METAL COMPOUNDS, OF A KIND USED PRINCIPALLY FOR THE RECOVERY OF PRECIOUS METAL:Other :Sweepings containing gold or silver" +71129990,"WASTE AND SCRAP OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL; OTHER WASTE AND SCRAP CONTAINING PRECIOUS METAL OR PRECIOUS METAL COMPOUNDS, OF A KIND USED PRINCIPALLY FOR THE RECOVERY OF PRECIOUS METAL:Other :Other" +71130000,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal +71131100,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal +71131110,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of silver, whether or not plated or clad with other precious metal:Jewellery with filigree work" +71131120,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of silver, whether or not plated or clad with other precious metal:Jewellery studded with gems(OLD tariff)" +71131130,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of silver, whether or not plated or clad with other precious metal:Other articles of Jewellery(OLD tariff)" +71131141,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal >> unstudded unstudded +71131142,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal >> studded with pearls studded with pearls +71131143,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal >> studded with diamonds of heading 7102 studded with diamonds of heading 7102 +71131144,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal >> studded with diamonds of heading 7104 studded with diamonds of heading 7104 +71131145,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal >> studded with other precious and stones studded with other precious and stones +71131149,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of silver whether or not plated or clad with other precious metal of silver whether or not plated or clad with other precious metal >> other other +71131190,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of silver, whether or not plated or clad with other precious metal:Parts" +71131900,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold +71131910,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Of gold, unstudded(OLD tariff)" +71131912,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with pearls studded with pearls +71131913,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with diamonds of heading 7102 studded with diamonds of heading 7102 +71131914,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with diamonds of heading 7104 studded with diamonds of heading 7104 +71131915,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with other precious and stones studded with other precious and stones +71131919,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> other of platinum other of platinum +71131920,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Of gold, set with pearls(OLD tariff)" +71131921,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> unstudded unstudded +71131922,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with pearls studded with pearls +71131923,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with diamonds of heading 7102 studded with diamonds of heading 7102 +71131924,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with diamonds of heading 7104 studded with diamonds of heading 7104 +71131925,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> studded with other precious and stones studded with other precious and stones +71131929,articles of jewellery and parts thereof of precious metal or of metal clad with precious metal of precious metal whether or not plated or clad with precious metal >> of other precious metal whether or not plated or clad with precious metal of gold of other precious metal whether or not plated or clad with precious metal of gold >> other other +71131930,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Of gold, set with diamonds(OLD tariff)" +71131940,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Of gold, set with other precious and semi- precious stones(OLD tariff)" +71131950,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Of platinum, unstudded(OLD tariff)" +71131960,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Parts" +71131990,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated:Other" +71132000,"ARTICLES OF JEWELLERY AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL::Of base metal clad with precious metal" +71141110,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of silver, whether or not plated or clad with precious metal :Articles(OLD tariff)" +71141120,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of silver, whether or not plated or clad with precious metal :Parts(OLD tariff)" +71141910,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated or clad with precious metal :Articles of gold(OLD tariff)" +71141920,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated or clad with precious metal :Articles of platinum(OLD tariff)" +71141930,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of other precious metal, whether or not plated or clad with precious metal :Parts(OLD tariff)" +71142010,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of base metal clad with precious metal :Articles clad with gold(OLD tariff)" +71142020,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of base metal clad with precious metal :Other articles(OLD tariff)" +71142030,"ARTICLES OF GOLDSMITH'S OR SILVERSMITH'S WARES AND PARTS THEREOF, OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Of base metal clad with precious metal :Parts(OLD tariff)" +71150000,other articles of precious metal or of metal clad with precious metal +71151000,"OTHER ARTICLES OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL::Catalysts in the from of wire cloth or grill," +71159000,other articles of precious metal or of metal clad with precious metal >> other +71159010,OTHER ARTICLES OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Other:Laboratory and industrial articles of +71159020,OTHER ARTICLES OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Other:Spinneret's made mainly of gold +71159090,OTHER ARTICLES OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Other:Other +71160000,articles of natural or cultured pearls precious or stones natural synthetic or reconstructed +71161000,"ARTICLES OF NATURAL OR CULTURED PEARLS, PRECIOUS OR SEMI-PRECIOUS STONES(NATURAL, SYNTHETIC OR RECONSTRUCTED)::Of natural or cultured pearls" +71162000,"ARTICLES OF NATURAL OR CULTURED PEARLS, PRECIOUS OR SEMI-PRECIOUS STONES(NATURAL, SYNTHETIC OR RECONSTRUCTED)::Of precious or semi-precious stones (natural, synthetic or reconstructed)" +71170000,imitation jewellery of base metal whether or not plated with precious metal +71171100,IMITATION JEWELLERY::Cuff-links and studs +71171900,imitation jewellery of base metal whether or not plated with precious metal >> other +71171910,IMITATION JEWELLERY:Other :Bangles +71171920,IMITATION JEWELLERY:Other :German silver jewellery +71171990,IMITATION JEWELLERY:Other :Other +71179000,imitation jewellery of base metal whether or not plated with precious metal >> other +71179010,IMITATION JEWELLERY:Other :Jewellery studded with imitation pearls or imitation or synthetic stones +71179090,IMITATION JEWELLERY:Other :Other +71180000,coin +71181000,"COIN::Coin (other than gold coin), not being legal tender" +71189000,COIN::Other +72010000,pig iron and spiegeleisen in pigs blocks or other primary forms +72011000,"PIG IRON AND SPIEGELEISEN IN PIGS, BLOCKS OR OTHER PRIMARY FORMS::Non-alloy pig iron containing by weight 0.5% or less of phosphorus" +72012000,"PIG IRON AND SPIEGELEISEN IN PIGS, BLOCKS OR OTHER PRIMARY FORMS::Non-alloy pig iron containing by weight more than 0.5% of phosphorus" +72015000,pig iron and spiegeleisen in pigs blocks or other primary forms >> alloy pig iron spiegeleisen +72015010,"PIG IRON AND SPIEGELEISEN IN PIGS, BLOCKS OR OTHER PRIMARY FORMS:Alloy pig iron; spiegeleisen :Cast iron" +72015090,"PIG IRON AND SPIEGELEISEN IN PIGS, BLOCKS OR OTHER PRIMARY FORMS:Alloy pig iron; spiegeleisen :Other" +72020000, +72021100,FERRO-ALLOYS::Containing by weight more than 2% of carbon +72021900,FERRO-ALLOYS::Other +72022100,FERRO-ALLOYS::Containing by weight more than 55% of silicon +72022900,FERRO-ALLOYS::Other +72023000,FERRO-ALLOYS::Ferro-silico-manganese +72024100,FERRO-ALLOYS::Containing by weight more than 4% of carbon +72024900,FERRO-ALLOYS::Other +72025000,FERRO-ALLOYS::Ferro-silico-chromium +72026000,FERRO-ALLOYS::Ferro-nickel +72027000,FERRO-ALLOYS::Ferro-molybdenum +72028000,FERRO-ALLOYS::Ferro tungsten and ferro-silico-tungsten +72029100,FERRO-ALLOYS::Ferro-titanium and Ferro-silico-titanium +72029200,FERRO-ALLOYS::Ferro-vanadium +72029300,FERRO-ALLOYS::Ferro-niobium +72029900, >> other ferro cobalt +72029911,FERRO-ALLOYS:Other :Ferro-phosphorus +72029912,FERRO-ALLOYS:Other :Ferro-selenium +72029913,FERRO-ALLOYS:Other :Ferro-cobalt +72029914,FERRO-ALLOYS:Other :Ferro-columbium +72029915,FERRO-ALLOYS:Other :Ferro-zirconium +72029916,FERRO-ALLOYS:Other :Ferro-tantalum +72029921,FERRO-ALLOYS:Other :Ferro-silico-zirconium +72029922,FERRO-ALLOYS:Other :Ferro-silico-magnesium +72029931,FERRO-ALLOYS:Other :Ferro-boron +72029932,FERRO-ALLOYS:Other :Charge-chrome +72029990,FERRO-ALLOYS:Other :Other +72030000,and other spongy ferrous products in lumps pellets or similar forms iron having minimum purity by weight of in lumps pellets or similar forms +72031000,"FERROUS PRODUCTS OBTAINED BY DIRECT REDUCTION OF IRON ORE AND OTHER SPONGY FERROUS PRODUCTS, IN LUMPS, PELLETS OR SIMILAR FORMS; IRON HAVING MINIMUM PURITY BY WEIGHT OF 99.94%, IN LUMPS, PELLETS OR SIMILAR FORMS::Ferrous products obtained by direct reduction of iron ore" +72039000,"FERROUS PRODUCTS OBTAINED BY DIRECT REDUCTION OF IRON ORE AND OTHER SPONGY FERROUS PRODUCTS, IN LUMPS, PELLETS OR SIMILAR FORMS; IRON HAVING MINIMUM PURITY BY WEIGHT OF 99.94%, IN LUMPS, PELLETS OR SIMILAR FORMS::Other" +72040000,ferrous waste and scrap remelting scrap ingots of iron or steel +72041000,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS::Waste and scrap of cast iron +72042100,ferrous waste and scrap remelting scrap ingots of iron or steel >> of stainless steel +72042110,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS:Of stainless steel :Empty or discharged cartridges of all bores and sizes +72042190,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS:Of stainless steel :Other +72042900,ferrous waste and scrap remelting scrap ingots of iron or steel >> other +72042910,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS:Other :Empty or discharged cartridges of all bores and sizes +72042920,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS:Other :Of high speed steel +72042990,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS:Other :Other +72043000,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS::Waste and scrap of tinned iron or steel +72044100,"FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS::Turnings, shavings, chips, milling waste, saw dust, fillings, trimmings and stampings, whether or not in bundles" +72044900,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS::Other +72045000,FERROUS WASTE AND SCRAP; REMELTING SCRAP INGOTS::Remelting scrap ingots +72050000, +72051000, >> granules of iron +72051011,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Shot and angular grit" +72051012,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Wire pellets" +72051019,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Other" +72051021,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Shot and angular grit" +72051022,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Wire pellets" +72051029,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Other" +72051090,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Granules :Other" +72052100,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL::Of alloy steel" +72052900, >> other +72052910,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Other :Of iron" +72052990,"GRANULES AND POWDERS, OF PIG IRON, SPIEGELEISEN, IRON OR STEEL:Other :Other" +72060000,iron and steel in ingots or other primary forms excluding iron of heading 7203 +72061000,iron and steel in ingots or other primary forms excluding iron of heading 7203 >> ingots +72061010,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Ingots :Of iron +72061020,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Ingots :Of high carbon steel +72061090,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Ingots :Other +72069000,iron and steel in ingots or other primary forms excluding iron of heading 7203 >> other of iron +72069011,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Puddled bars and pilings +72069012,"IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Blocks, lumps and similar forms" +72069019,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Other +72069091,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Puddled bars and pilings +72069092,"IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Blocks, lumps and similar forms" +72069099,IRON AND NON-ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS (EXCLUDING IRON OF HEADING 7203):Other :Other +72070000,products of iron or steel containing by weight less than of carbon +72071100,products of iron or steel containing by weight less than of carbon >> of rectangular including square cross section the width measuring less than twice the thickness +72071110,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Of rectangular (including square) cross-section, the width measuring less than twice the thickness :Electrical quality" +72071120,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Of rectangular (including square) cross-section, the width measuring less than twice the thickness :Forging quality" +72071130,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Of rectangular (including square) cross-section, the width measuring less than twice the thickness :Seamless steel tube quality" +72071190,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Of rectangular (including square) cross-section, the width measuring less than twice the thickness :Other" +72071200,products of iron or steel containing by weight less than of carbon >> other of rectangular other than square +72071210,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other, of rectangular (other than square) cross-section :Electrical quality" +72071220,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other, of rectangular (other than square) cross-section :Forging quality" +72071230,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other, of rectangular (other than square) cross-section :Seamless steel tube quality" +72071290,"SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other, of rectangular (other than square) cross-section :Other" +72071900,products of iron or steel containing by weight less than of carbon >> other +72071910,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other :Forged blanks of non-alloy steel +72071920,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other :Mild steel billets +72071990,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Other :Other +72072000,products of iron or steel containing by weight less than of carbon >> containing by weight or more of carbon +72072010,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Containing by weight 0.20% or more of carbon :Forging quality +72072020,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Containing by weight 0.20% or more of carbon :Spring steel quality +72072030,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Containing by weight 0.20% or more of carbon :Seamless steel tube quality +72072090,SEMI-FINISHED PRODUCTS OF IRON OR NON-ALLOY STEEL:Containing by weight 0.20% or more of carbon :Other +72080000,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated +72081000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED::In coils, not further worked than hot-rolled, with patterns in relief" +72082500,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of mm or more +72082510,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more :Plates" +72082520,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more :Universal plates" +72082530,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more :Sheets" +72082540,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more :Strip" +72082590,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more :Other" +72082600,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of 3 mm or more but less than mm +72082610,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Plates" +72082620,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Universal plates" +72082630,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Sheets" +72082640,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Strip" +72082690,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Other" +72082700,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of less than 3 mm +72082710,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Plates" +72082720,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Universal plates" +72082730,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Sheets" +72082740,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Strip" +72082790,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Other" +72083600,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness exceeding 10 mm +72083610,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Plates" +72083620,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Universal plates" +72083630,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Sheets" +72083640,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Strip" +72083690,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Other" +72083700,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of mm or more but not exceeding 10 mm +72083710,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Plates" +72083720,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Universal plates" +72083730,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Sheets" +72083740,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Strip" +72083790,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Other" +72083800,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of 3 mm or more but less than mm +72083810,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Plates" +72083820,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Universal plates" +72083830,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Sheets" +72083840,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Strip" +72083890,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Other" +72083900,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of less than 3 mm +72083910,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Plates" +72083920,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Universal plates" +72083930,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Sheets" +72083940,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Strip" +72083990,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Other" +72084000,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> not in coils not further worked than with patterns in relief +72084010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Not in coils, not further worked than hot-rolled, with patterns in relief :Plates" +72084020,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Not in coils, not further worked than hot-rolled, with patterns in relief :Universal plates" +72084030,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Not in coils, not further worked than hot-rolled, with patterns in relief :Sheets" +72084040,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Not in coils, not further worked than hot-rolled, with patterns in relief :Strip" +72084090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Not in coils, not further worked than hot-rolled, with patterns in relief :Other" +72085100,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness exceeding 10 mm +72085110,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Plates" +72085120,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Universal plates" +72085130,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Sheets" +72085140,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Strip" +72085190,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness exceeding 10 mm :Other" +72085200,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of mm or more but not exceeding 10 mm +72085210,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Plates" +72085220,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Universal plates" +72085230,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Sheets" +72085240,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Strip" +72085290,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Other" +72085300,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of 3 mm or more but less than mm +72085310,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Plates" +72085320,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Universal plates" +72085330,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Sheets" +72085340,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Strip" +72085390,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more but less than 4.75 mm :Other" +72085400,products of iron or steel of a width of 600 mm or more hot rolled not clad plated or coated >> of a thickness of less than 3 mm +72085410,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Plates" +72085420,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Universal plates" +72085430,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Sheets" +72085440,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Strip" +72085490,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED:Of a thickness of less than 3 mm :Other" +72089000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, HOT-ROLLED, NOT CLAD, PLATED OR COATED::Other" +72090000,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than +72091500,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness of 3 mm or more +72091510,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Plates" +72091520,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Sheets" +72091530,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Strip" +72091590,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Other" +72091600,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness exceeding 1 mm but less than 3 mm +72091610,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness exceeding 1 mm but less than 3 mm :Plates" +72091620,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness exceeding 1 mm but less than 3 mm :Sheets" +72091630,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness exceeding 1 mm but less than 3 mm :Strip" +72091690,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness exceeding 1 mm but less than 3 mm :Other" +72091700,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness of mm or more but not exceeding 1 mm +72091710,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Plates" +72091720,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Sheets" +72091730,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Strip" +72091790,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Other" +72091800,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness of less than mm +72091810,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Plates" +72091820,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Sheets" +72091830,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Strip" +72091890,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Other" +72092500,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness of 3 mm or more +72092510,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Plates" +72092520,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Sheets" +72092530,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Strip" +72092590,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 3 mm or more :Other" +72092600,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness exceeding 1mm but less than 3 mm +72092610,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness excddeing 1 mm but less than 3 mm:Plate" +72092620,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness excddeing 1 mm but less than 3 mm:Sheets" +72092630,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness excddeing 1 mm but less than 3 mm:Strip" +72092690,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness excddeing 1 mm but less than 3 mm:Other" +72092700,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness of mm or more but not exceeding 1 mm +72092710,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Plates" +72092720,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Sheets" +72092730,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Strip" +72092790,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Other" +72092800,products of iron or steel of a width of 600 mm or more cold rolled not clad plated or coated in coils not further worked than >> of a thickness of less than mm +72092810,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Plates" +72092820,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Sheets" +72092830,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Strip" +72092890,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm :Other" +72099000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, COLD-ROLLED (COLD-REDUCED), NOT CLAD, PLATED OR COATED::Other" +72100000,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin +72101100,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> of a thickness of mm or more +72101110,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more :OTS/MR Type" +72101190,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Of a thickness of 0.5 mm or more :Other" +72101200,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> of a thickness of less than mm +72101210,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm:OTS/MR Type" +72101290,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Of a thickness of less than 0.5 mm:Other" +72102000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Plated or coated with lead, including terne-plate" +72103000,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> electrolytically plated or coated with zinc +72103010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Electrolytically plated or coated with zinc :Corrugated" +72103090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Electrolytically plated or coated with zinc :Other(OLD tariff)" +72103091,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> electrolytically plated or coated with zinc >> galvannealed +72103099,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> electrolytically plated or coated with zinc >> other otherwise plated or coated with zinc +72104100,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Corrugated" +72104900,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Other" +72104910,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> other >> galvannealed +72104990,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> other >> other +72105000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Plated or coated with chromium oxides or with chromium and chromium oxides" +72106100,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Plated or coated with aluminium-zinc alloys" +72106900,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Other" +72107000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED::Painted, varnished or coated with plastics" +72109000,products of iron or steel of width of 600 mm or more clad plated or coated plated or coated with tin >> other +72109010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Other :Lacquered" +72109090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE, CLAD, PLATED OR COATED:Other :Other" +72110000,products of iron or steel of a width of less than 600 mm not clad plated or coated not further worked than +72111300,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED::Rolled on four faces or in a closed box pass, of a width exceeding 150 mm and a thickness of not less than 4 mm, not in coils and without patterns in relief" +72111400,products of iron or steel of a width of less than 600 mm not clad plated or coated not further worked than >> other of a thickness of mm or more +72111410,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Flats" +72111420,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Universal plates" +72111430,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Hoops" +72111440,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Sheets" +72111450,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Strip" +72111460,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Skelp" +72111490,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other, of a thickness of 4.75 mm or more :Other" +72111900,products of iron or steel of a width of less than 600 mm not clad plated or coated not further worked than >> other +72111910,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Flats" +72111920,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Universal plates" +72111930,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Hoops" +72111940,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Sheets" +72111950,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Strip" +72111960,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Skelp" +72111990,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Other" +72112300,products of iron or steel of a width of less than 600 mm not clad plated or coated not further worked than >> containing by weight less than of carbon +72112310,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Containing by weight less than 0.20% of carbon:Flats" +72112320,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Containing by weight less than 0.20% of carbon:Universal plates" +72112330,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Containing by weight less than 0.20% of carbon:Hoops" +72112340,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Containing by weight less than 0.20% of carbon:Sheets" +72112350,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Containing by weight less than 0.20% of carbon:Strip" +72112390,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Containing by weight less than 0.20% of carbon:Other" +72112900,products of iron or steel of a width of less than 600 mm not clad plated or coated not further worked than >> other +72112910,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Flats" +72112920,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Universal plates" +72112930,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Hoops" +72112940,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Sheets" +72112950,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Strip" +72112960,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Skelp" +72112990,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Other" +72119000,products of iron or steel of a width of less than 600 mm not clad plated or coated not further worked than >> other universal plates +72119011,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Of boiler quality" +72119012,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Of high tensile quality" +72119013,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Of ship building quality" +72119090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, NOT CLAD, PLATED OR COATED:Other :Other" +72120000,products of iron or steel of a width of less than 600 mm clad plated or coated +72121000,products of iron or steel of a width of less than 600 mm clad plated or coated >> plated or coated with tin +72121010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Plated or coated with tin :OTS or MR type" +72121090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Plated or coated with tin :Other" +72122000,products of iron or steel of a width of less than 600 mm clad plated or coated >> electrolytically plated or coated with zinc +72122010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Electrolytically plated or coated with zinc :Corrugated" +72122090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Electrolytically plated or coated with zinc :Other(OLD tariff)" +72122091,products of iron or steel of a width of less than 600 mm clad plated or coated >> electrolytically plated or coated with zinc >> galvannealed +72123000,products of iron or steel of a width of less than 600 mm clad plated or coated >> otherwise plated or coated with zinc +72123010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Otherwise plated or coated with zinc :Corrugated" +72123090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Otherwise plated or coated with zinc :Other(OLD tariff)" +72123091,products of iron or steel of a width of less than 600 mm clad plated or coated >> otherwise plated or coated with zinc >> galvannealed +72123099,products of iron or steel of a width of less than 600 mm clad plated or coated >> otherwise plated or coated with zinc >> other +72124000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED::Painted, varnished or coated with plastics" +72125000,products of iron or steel of a width of less than 600 mm clad plated or coated >> otherwise plated or coated +72125010,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Otherwise plated or coated :Plated or coated with lead" +72125020,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Otherwise plated or coated :Lacquered" +72125030,products of iron or steel of a width of less than 600 mm clad plated or coated >> otherwise plated or coated >> plated or coated with aluminium +72125040,products of iron or steel of a width of less than 600 mm clad plated or coated >> otherwise plated or coated >> plated or coated with alloys +72125090,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED:Otherwise plated or coated :Other" +72126000,"FLAT-ROLLED PRODUCTS OF IRON OR NON-ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM, CLAD, PLATED OR COATED::Clad" +72130000,bars and rods in irregularly wound coils of iron or steel +72131000,bars and rods in irregularly wound coils of iron or steel >> containing indentations ribs grooves or other deformations produced during the rolling process +72131010,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Containing indentations, ribs, grooves or other deformations produced during the rolling process :Of free cutting steel" +72131090,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Containing indentations, ribs, grooves or other deformations produced during the rolling process :Other" +72132000,bars and rods in irregularly wound coils of iron or steel >> other of steel +72132010,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Other, of free-cutting steel :Electrode quality" +72132020,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Other, of free-cutting steel :Cold heading quality" +72132090,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Other, of free-cutting steel :Other" +72139100,bars and rods in irregularly wound coils of iron or steel >> of circular measuring less than 14 mm in diameter +72139110,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Of circular cross-section measuring less than 14 mm in diameter :Electrode quality" +72139120,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Of circular cross-section measuring less than 14 mm in diameter :Cold heading quality" +72139190,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Of circular cross-section measuring less than 14 mm in diameter :Other" +72139900,bars and rods in irregularly wound coils of iron or steel >> other +72139910,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Other :Electrode quality" +72139920,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Other :Cold heading quality" +72139990,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF IRON OR NON-ALLOY STEEL:Other :Other" +72140000,other bars and rods of iron or steel not further worked than forged hot rolled or but including those twisted after rolling +72141000,other bars and rods of iron or steel not further worked than forged hot rolled or but including those twisted after rolling >> forged +72141010,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Forged :Spring steel quality" +72141090,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Forged :Other" +72142000,other bars and rods of iron or steel not further worked than forged hot rolled or but including those twisted after rolling >> containing indentations ribs grooves or other deformations produced during the rolling process or twisted after rolling +72142010,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Containing indentations, ribs, grooves or other deformations produced during the rolling process or twisted after rolling :Spring steel quality" +72142090,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Containing indentations, ribs, grooves or other deformations produced during the rolling process or twisted after rolling :Other" +72143000,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING::Other, of free cutting steel" +72149100,other bars and rods of iron or steel not further worked than forged hot rolled or but including those twisted after rolling >> of rectangular other than square +72149110,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Of rectangular (other than square) cross-section :Mild steel bright bar" +72149190,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Of rectangular (other than square) cross-section :Other" +72149900,other bars and rods of iron or steel not further worked than forged hot rolled or but including those twisted after rolling >> other +72149910,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Other :Of spring steel quality" +72149990,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL, NOT FURTHER WORKED THAN FORGED, HOT-ROLLED, HOT-DRAWN OR HOT-EXTRUDED, BUT INCLUDING THOSE TWISTED AFTER ROLLING:Other :Other" +72150000,other bars and rods of iron or steel +72151000,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL::Of free-cutting steel, not further worked than cold-formed or cold-finished" +72155000,other bars and rods of iron or steel >> other not further worked than or +72155010,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL:Other, not further worked than cold-formed or cold-finished :Mild steel bright bar" +72155090,"OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL:Other, not further worked than cold-formed or cold-finished :Other" +72159000,other bars and rods of iron or steel >> other +72159010,OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL:Other :Plated or coated with zinc +72159020,OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL:Other :Plated or coated with other base metals +72159090,OTHER BARS AND RODS OF IRON OR NON-ALLOY STEEL:Other :Other +72160000,angles shapes and sections of iron or non alloy steel +72161000,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::U, I or H sections, not further worked than hot-rolled, hot-drawn or extruded, of a height of less than 80 mm" +72162100,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::L sections" +72162200,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::T sections" +72163100,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::U sections" +72163200,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::I sections" +72163300,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::H sections" +72164000,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::L or T sections, not further worked than hot-rolled, hot-drawn or extruded, of a height of 80 mm or more" +72165000,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::Other angles, shapes and sections, not further worked than hot-rolled, hot-drawn or extruded" +72166100,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::Obtained from flat-rolled products" +72166900,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::Other" +72169100,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL::Cold-formed or cold-finished from flat rolled products" +72169900,angles shapes and sections of iron or non alloy steel >> other other +72169910,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL:Other :Plated or coated with zinc" +72169920,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL:Other :Plated or coated with base metals other than zinc" +72169930,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL:Other :Slotted angles and slotted channels" +72169940,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL:Other :Forged" +72169990,"ANGLES, SHAPES AND SECTIONS OF IRON OR NON-ALLOY STEEL:Other :Other" +72170000,wire of iron or steel wire of iron or steel +72171000,wire of iron or steel wire of iron or steel >> not plated or coated whether or not polished not plated or coated whether or not polished +72171010,"WIRE OF IRON OR NON-ALLOY STEEL:Not plated or coated, whether or not polished :Of a thickness of 18 SWG and below" +72171020,"WIRE OF IRON OR NON-ALLOY STEEL:Not plated or coated, whether or not polished :Of a thickness above 18 SWG but up to 26 SWG" +72171030,"WIRE OF IRON OR NON-ALLOY STEEL:Not plated or coated, whether or not polished :Of a thickness above 26 SWG" +72172000,wire of iron or steel wire of iron or steel >> plated or coated with zinc plated or coated with zinc +72172010,WIRE OF IRON OR NON-ALLOY STEEL:Plated or coated with zinc :Of a thickness of 18 SWG and below +72172020,WIRE OF IRON OR NON-ALLOY STEEL:Plated or coated with zinc :Of a thickness above 18 SWG but up to 26 SWG +72172030,WIRE OF IRON OR NON-ALLOY STEEL:Plated or coated with zinc :Of a thickness above 26 SWG +72173000,wire of iron or steel wire of iron or steel >> plated or coated with other base metals plated or coated with other base metals +72173010,WIRE OF IRON OR NON-ALLOY STEEL:Plated or coated with other base metals :Of a thickness of 18 SWG and below +72173020,WIRE OF IRON OR NON-ALLOY STEEL:Plated or coated with other base metals :Of a thickness above 18 SWG but up to 26 SWG +72173030,WIRE OF IRON OR NON-ALLOY STEEL:Plated or coated with other base metals :Of a thickness above 26 SWG +72179000,wire of iron or steel wire of iron or steel >> other than 80 mm l or t sections not further worked than hot rolled or extruded of a height of less than 80 mm other shaped and profiled wire +72179011,WIRE OF IRON OR NON-ALLOY STEEL:Other :Of cross section - half round +72179012,WIRE OF IRON OR NON-ALLOY STEEL:Other :Of cross section - flat and rectangular +72179013,WIRE OF IRON OR NON-ALLOY STEEL:Other :Of cross section - z'shaped +72179019,WIRE OF IRON OR NON-ALLOY STEEL:Other :Of cross section - other shapes +72179091,WIRE OF IRON OR NON-ALLOY STEEL:Other :High tensile quality +72179092,WIRE OF IRON OR NON-ALLOY STEEL:Other :Electrode quality +72179093,WIRE OF IRON OR NON-ALLOY STEEL:Other :Electric resistance wire (including electric resistance heating wire) +72179099,WIRE OF IRON OR NON-ALLOY STEEL:Other :Other +72180000,stainless steel in ingots or other primary forms products of stainless steel stainless steel in ingots or other primary forms products of stainless steel +72181000,STAINLESS STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF STAINLESS STEEL::Ingots and other primary forms +72189100,STAINLESS STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF STAINLESS STEEL::Of rectangular (other than square) cross-section +72189900,stainless steel in ingots or other primary forms products of stainless steel stainless steel in ingots or other primary forms products of stainless steel >> other other +72189910,STAINLESS STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF STAINLESS STEEL:Other :Billets +72189990,STAINLESS STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF STAINLESS STEEL:Other :Other +72190000,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils +72191100,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness exceeding 10 mm chromium type of a thickness exceeding 10 mm chromium type +72191111,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Not exceeding 14 mm" +72191112,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Exceeding 14 mm" +72191190,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Other" +72191200,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE::Of a thickness of 4.75 mm or more but not exceeding 10 mm" +72191300,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE::Of a thickness of 3 mm or more but less than 4.75 mm" +72191400,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE::Of a thickness of less than 3 mm" +72192100,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness exceeding 10 mm universal plates of stainless steel or heat resisting steel and chromium type of a thickness exceeding 10 mm universal plates of stainless steel or heat resisting steel and chromium type +72192111,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Not exceeding 14 mm" +72192112,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Exceeding 14 mm" +72192121,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Not exceeding 14 mm" +72192122,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Exceeding 14 mm" +72192131,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Not exceeding 14 mm" +72192132,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Exceeding 14 mm" +72192141,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Not exceeding 14 mm" +72192142,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Exceeding 14 mm" +72192190,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 10 mm :Other" +72192200,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of mm or more but not exceeding 10 mm universal plates of stainless steel or heat resisting steel +72192211,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Chromium type" +72192212,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Nickel chromium austenitic type" +72192219,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Other" +72192291,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Chromium type" +72192292,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Nickel chromium austenitic type" +72192299,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more but not exceeding 10 mm :Other" +72192300,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of 3 mm or more but less than mm +72192310,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 3 mm or more but less than 4.75 mm :Chromium type" +72192320,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 3 mm or more but less than 4.75 mm :Nickel chromium austenitic type" +72192390,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 3 mm or more but less than 4.75 mm :Other" +72192400,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of less than 3 mm chromium type of a thickness +72192411,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Below 0.35 mm" +72192412,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :0.35 mm and above but below 0.56 mm" +72192413,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :0.56 mm and above but below 0.90 mm" +72192419,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Other" +72192421,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Below 0.35 mm" +72192422,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :0.35 mm and above but below 0.56 mm" +72192423,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :0.56 mm and above but below 0.90 mm" +72192429,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Other" +72192490,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 3 mm :Other" +72193100,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of mm or more chromium type +72193111,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Not exceeding 14 mm" +72193112,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Exceeding 14 mm" +72193121,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Not exceeding 14 mm" +72193122,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Exceeding 14 mm" +72193190,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 4.75 mm or more :Other" +72193200,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of 3 mm or more but less than mm +72193210,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 3 mm or more but less than 4.75 mm :Chromium type" +72193220,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 3 mm or more but less than 4.75 mm :Nickel chromium austenitic type" +72193290,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 3 mm or more but less than 4.75 mm :Other" +72193300,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness exceeding 1 mm but less than 3 mm +72193310,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 1 mm but less than 3 mm :Chromium type" +72193320,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 1 mm but less than 3 mm :Nickel chromium austenitic type" +72193390,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness exceeding 1 mm but less than 3 mm :Other" +72193400,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of mm or more but not exceeding 1 mm +72193410,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Chromium type" +72193420,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Nickel chromium austenitic type" +72193490,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of 0.5 mm or more but not exceeding 1 mm :Other" +72193500,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> of a thickness of less than mm +72193510,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 0.5 mm :Chromium type" +72193520,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 0.5 mm :Nickel chromium austenitic type" +72193590,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Of a thickness of less than 0.5 mm :Other" +72199000,products of stainless steel of a width of 600 mm or more not further worked than in coils products of stainless steel of a width of 600 mm or more not further worked than in coils >> other sheets and plates +72199011,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Other :Thickness more than 4.75 mm" +72199012,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Other :Thickness 3 mm to 4.75 mm" +72199013,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Other :Thickness less than 3 mm" +72199090,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF 600 MM OR MORE:Other :Other" +72200000,products of stainless steel of a width of less than 600 mm not further worked than +72201100,products of stainless steel of a width of less than 600 mm not further worked than >> of a thickness of mm or more +72201110,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of 4.75 mm or more :Skelp for pipes and tubes" +72201121,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of 4.75 mm or more :Chromium type" +72201122,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of 4.75 mm or more :Nickel chromium austenitic type" +72201129,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of 4.75 mm or more :Other" +72201190,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of 4.75 mm or more :Other" +72201200,products of stainless steel of a width of less than 600 mm not further worked than >> of a thickness of less than mm +72201210,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of less than 4.75 mm :Skelp for pipes and tubes" +72201221,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of less than 4.75 mm :Chromium type" +72201222,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of less than 4.75 mm :Nickel chromium austenitic type" +72201229,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of less than 4.75 mm :Other" +72201290,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Of a thickness of less than 4.75 mm :Other" +72202000,products of stainless steel of a width of less than 600 mm not further worked than >> not further worked than cold reduced +72202010,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Skelp for pipes and tubes" +72202021,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Chromium type" +72202022,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Nickel chromium austenitic type" +72202029,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Other" +72202090,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Other" +72209000,products of stainless steel of a width of less than 600 mm not further worked than >> other +72209010,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Skelp (strips for pipes and tubes)" +72209021,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Chromium type" +72209022,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Nickel chromium austenitic type" +72209029,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Other" +72209090,"FLAT-ROLLED PRODUCTS OF STAINLESS STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Other" +72210000,bars and rods in irregularlywound coils of stainless steel of stainless steel bright bars +72210011,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF STAINLESS STEEL:Bars and rods, hot-rolled, in irregularly wound coils, of stainless steel :Chromium type" +72210012,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF STAINLESS STEEL:Bars and rods, hot-rolled, in irregularly wound coils, of stainless steel :Nickel chromium austenitic type" +72210019,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF STAINLESS STEEL:Bars and rods, hot-rolled, in irregularly wound coils, of stainless steel :Other" +72210090,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF STAINLESS STEEL:Bars and rods, hot-rolled, in irregularly wound coils, of stainless steel :Other" +72220000,other bars and rods of stainless steel angles shapes and sections of stainless steel bars and rods not further worked than or extruded +72221100,other bars and rods of stainless steel angles shapes and sections of stainless steel bars and rods not further worked than or extruded >> of circular cross section bright bars +72221111,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Chromium type" +72221112,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Nickel chromium austenitic type" +72221119,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Other" +72221191,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Chromium type" +72221192,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Nickel chromium austenitic type" +72221199,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Of circular cross section :Other" +72221900,other bars and rods of stainless steel angles shapes and sections of stainless steel bars and rods not further worked than or extruded >> other bright bars +72221911,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Chromium type" +72221912,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Nickel chromium austenitic type" +72221919,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Other" +72221991,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Chromium type" +72221992,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Nickel chromium austenitic type" +72221999,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other :Other" +72222000,other bars and rods of stainless steel angles shapes and sections of stainless steel bars and rods not further worked than or extruded >> bars and rods not further worked than or bright bars +72222011,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Chromium type" +72222012,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Nickel chromium austenitic type" +72222019,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Other" +72222091,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Chromium type" +72222092,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Nickel chromium austenitic type" +72222099,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Bars and rods, not further worked than cold-formed or cold-finished :Other" +72223000,other bars and rods of stainless steel angles shapes and sections of stainless steel bars and rods not further worked than or extruded >> other bars and rods bright bars +72223011,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Chromium type" +72223012,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Nickel chromium austenitic type" +72223019,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Other" +72223091,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Chromium type" +72223092,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Nickel chromium austenitic type" +72223099,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Other bars and rods :Other" +72224000,other bars and rods of stainless steel angles shapes and sections of stainless steel bars and rods not further worked than or extruded >> angles shapes and sections +72224010,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Angles, shapes and sections :Of thickness of 80 mm and above" +72224020,"OTHER BARS AND RODS OF STAINLESS STEEL; ANGLES, SHAPES AND SECTIONS OF STAINLESS STEEL:Angles, shapes and sections :Of below 80 mm" +72230000,wire of stainless steel wire of stainless steel +72230010,WIRE OF STAINLESS STEEL:Wire of stainless steel :Electrode quality +72230091,WIRE OF STAINLESS STEEL:Wire of stainless steel :Of thickness of above 1.5 mm +72230092,WIRE OF STAINLESS STEEL:Wire of stainless steel :Of thickness of 0.46 mm and above but not exceeding 1.5 mm +72230099,WIRE OF STAINLESS STEEL:Wire of stainless steel :Of thickness of below 0.46 mm +72240000,other alloy steel in ingots or other primary forms products of other alloy steel +72241000,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL::Ingots and other primary forms +72249000,other alloy steel in ingots or other primary forms products of other alloy steel >> other +72249010,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Of tool steel quality +72249020,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Of die steel quality +72249030,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Of cobalt bearing high speed steel quality +72249040,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Forged blanks of alloy steel +72249091,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Billets +72249099,OTHER ALLOY STEEL IN INGOTS OR OTHER PRIMARY FORMS; SEMI-FINISHED PRODUCTS OF OTHER ALLOY STEEL:Other :Other +72250000,products of other alloy steel of a width of 600 mm or more of steel +72251100,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE::Grain-oriented" +72251900,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other:Other" +72251910,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other:Hot rolled" +72251920,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other:Cold rolled" +72251990,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other:other" +72253000,products of other alloy steel of a width of 600 mm or more of steel >> other not further worked than in coils +72253010,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, in coils :Of spring steel quality" +72253090,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, in coils :Other" +72254000,products of other alloy steel of a width of 600 mm or more of steel >> other not further worked than not in coils of a thickness of above mm +72254011,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :Boiler quality" +72254012,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :Pressure vessel quality" +72254013,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :High tensile quality" +72254019,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :Other" +72254020,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :Of a thickness of 3 mm and above but not exceeding 4.75 mm" +72254030,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than hot-rolled, not in coils :Of a thickness of below 3 mm" +72255000,products of other alloy steel of a width of 600 mm or more of steel >> other not further worked than +72255010,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than cold-rolled (cold-reduced) :Of a thickness of less than 3 mm" +72255020,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than cold-rolled (cold-reduced) :Of a thickness of 3 mm to 4.75 mm" +72255030,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE:Other, not further worked than cold-rolled (cold-reduced) :Of a thickness of above 4.75 mm" +72259100,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE::Electrolytically plated or coated with zinc" +72259110,products of other alloy steel of a width of 600 mm or more of steel >> electrolytically plated or coated with zinc >> galvannealed +72259190,products of other alloy steel of a width of 600 mm or more of steel >> electrolytically plated or coated with zinc >> other +72259200,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE::Otherwise plated or coated with zinc" +72259210,products of other alloy steel of a width of 600 mm or more of steel >> otherwise plated or coated with zinc >> galvannealed +72259290,products of other alloy steel of a width of 600 mm or more of steel >> otherwise plated or coated with zinc >> other +72259900,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF 600 MM OR MORE::Other" +72259910,products of other alloy steel of a width of 600 mm or more of steel >> other >> plated or coated with aluminium +72259920,products of other alloy steel of a width of 600 mm or more of steel >> other >> plated or coated with alloys +72259930,products of other alloy steel of a width of 600 mm or more of steel >> other >> painted coloured or coated with plastics +72259990,products of other alloy steel of a width of 600 mm or more of steel >> other >> other +72260000,products of other alloy steel of a width of less than 600 mm of steel +72261100,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM::Grain-oriented" +72261900,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other:Other" +72261910,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other:Hot Rolled" +72261920,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other:Cold rolled" +72261990,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other:other" +72262000,products of other alloy steel of a width of less than 600 mm of steel >> of high speed steel +72262011,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :In coils" +72262012,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :Other" +72262021,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :In coils" +72262022,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :Other" +72262030,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Of high speed steel :Hoops and strips" +72269100,products of other alloy steel of a width of less than 600 mm of steel >> not further worked than not further worked than +72269110,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than hot-rolled :Of a thickness of below 3 mm" +72269120,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than hot-rolled :Of a thickness of 3 mm and above but not exceeding 4.75 mm" +72269130,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than hot-rolled :High tensile quality of a thickness of above 4.75 mm" +72269190,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than hot-rolled :Other" +72269200,products of other alloy steel of a width of less than 600 mm of steel >> not further worked than cold reduced not further worked than cold reduced +72269210,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Of a thickness of below 3 mm" +72269220,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Of a thickness of 3 mm and above but not exceeding 4.75 mm" +72269230,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Not further worked than cold-rolled (cold- reduced) :Of a thickness of above 4.75 mm" +72269900,products of other alloy steel of a width of less than 600 mm of steel >> other other +72269910,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Of a thickness of above 4.75 mm" +72269920,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Of a thickness of above 3 mm and but not exceeding 4.75 mm" +72269930,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Of a thickness of below 3 mm" +72269940,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Skelp (strips for pipes and tubes)" +72269951,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Hot rolled" +72269952,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Cold rolled" +72269953,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Of spring steel, other than skelp" +72269960,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Skelps and strips for razor blades and saw blades" +72269971,products of other alloy steel of a width of less than 600 mm of steel >> other other >> plain and corrugated kg plain and corrugated kg +72269972,products of other alloy steel of a width of less than 600 mm of steel >> other other >> electrolytically plain and corrugated kg electrolytically plain and corrugated kg +72269973,products of other alloy steel of a width of less than 600 mm of steel >> other other >> galvannealed kg galvannealed kg +72269979,products of other alloy steel of a width of less than 600 mm of steel >> other other >> other kg otherwise coated or plated other kg otherwise coated or plated +72269981,products of other alloy steel of a width of less than 600 mm of steel >> other other >> with aluminium kg with aluminium kg +72269982,products of other alloy steel of a width of less than 600 mm of steel >> other other >> with alloys kg with alloys kg +72269983,products of other alloy steel of a width of less than 600 mm of steel >> other other >> painted coloured or coated with plastics kg painted coloured or coated with plastics kg +72269989,products of other alloy steel of a width of less than 600 mm of steel >> other other >> other kg other kg +72269990,"FLAT-ROLLED PRODUCTS OF OTHER ALLOY STEEL, OF A WIDTH OF LESS THAN 600 MM:Other :Other" +72270000,bars and rods in irregularly wound coils of other alloy steel bars and rods in irregularly wound coils of other alloy steel +72271000,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL::Of high speed steel" +72272000,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL::Of silico-manganese steel" +72279000,bars and rods in irregularly wound coils of other alloy steel bars and rods in irregularly wound coils of other alloy steel >> other other +72279010,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Valve spring steel quality" +72279020,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Other spring steel quality" +72279030,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Ball bearing quality" +72279040,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Cold heading quality" +72279050,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Lead-free cutting quality" +72279060,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Sulphur free cutting quality" +72279090,"BARS AND RODS, HOT-ROLLED, IN IRREGULARLY WOUND COILS, OF OTHER ALLOY STEEL:Other :Other" +72280000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel +72281000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel >> bars and rods of high speed steel +72281010,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Bars and rods, of high speed steel :Bright bars" +72281090,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Bars and rods, of high speed steel :Other" +72282000,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL::Bars and rods, of silico-manganese steel" +72283000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel >> other bars and rods not further worked than or extruded bright bars +72283011,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Of alloy tool steel" +72283019,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Other" +72283021,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Lead bearing steel" +72283022,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Spring steel" +72283023,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Sulphur bearing steel" +72283024,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Tool and die steel" +72283029,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than hot-rolled, hot-drawn or extruded :Other" +72284000,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL::Other bars and rods, not further worked than forged" +72285000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel >> other bars and rods not further worked than or +72285010,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than cold-formed or cold-finished :Of engine valves and cold heading steel" +72285090,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods, not further worked than cold-formed or cold-finished :Other" +72286000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel >> other bars and rods bright bars +72286011,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Of alloy tool steel" +72286012,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Other" +72286091,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Lead bearing steel" +72286092,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Spring steel" +72286093,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Sulphur bearing steel" +72286094,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Tool and die steel" +72286099,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Other bars and rods :Other" +72287000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel >> angles shapes and sections not further worked than or extruded +72287011,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Angles, shapes and sections :Of 80 mm or more" +72287012,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Angles, shapes and sections :Of less than 80 mm" +72287021,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Angles, shapes and sections :Of 80 mm or more" +72287022,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Angles, shapes and sections :Of less than 80 mm" +72288000,other bars and rods of other alloy steel angles shapes and sections of other alloy steel hollow drill bars and rods of alloy or steel >> hollow drill bars and rods +72288010,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Hollow drill bars and rods :Of alloy steel" +72288020,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Hollow drill bars and rods :Of non alloy steel, forged" +72288090,"OTHER BARS AND RODS OF OTHER ALLOY STEEL; ANGLES, SHAPES AND SECTIONS, OF OTHER ALLOY STEEL; HOLLOW DRILL BARS AND RODS, OF ALLOY OR NON-ALLOY STEEL:Hollow drill bars and rods :Other" +72290000,wire of other alloy steel +72292000,WIRE OF OTHER ALLOY STEEL::Of silico-manganese steel +72299000,wire of other alloy steel >> other tinned wire bronze coated wire trapezoidal wire half round wire crimped wire and copper coated wire not insulated +72299011,WIRE OF OTHER ALLOY STEEL:Other :Tinned wire +72299012,WIRE OF OTHER ALLOY STEEL:Other :Bronze coated wire +72299013,WIRE OF OTHER ALLOY STEEL:Other :Trapezoidal wire +72299014,WIRE OF OTHER ALLOY STEEL:Other :Half round wire +72299015,WIRE OF OTHER ALLOY STEEL:Other :Crimped wire +72299016,WIRE OF OTHER ALLOY STEEL:Other :Copper coated wire +72299021,WIRE OF OTHER ALLOY STEEL:Other :Electrode quality +72299022,WIRE OF OTHER ALLOY STEEL:Other :Wire rope quality +72299023,WIRE OF OTHER ALLOY STEEL:Other :ACSR quality +72299031,WIRE OF OTHER ALLOY STEEL:Other :Wire (excluding wire type lead) +72299032,WIRE OF OTHER ALLOY STEEL:Other :Spring wire +72299033,WIRE OF OTHER ALLOY STEEL:Other :High tensile wire +72299034,WIRE OF OTHER ALLOY STEEL:Other :Hardened and tempered wire +72299040,WIRE OF OTHER ALLOY STEEL:Other :Other wire +72299051,WIRE OF OTHER ALLOY STEEL:Other :Half round +72299052,WIRE OF OTHER ALLOY STEEL:Other :Flat and rectangular +72299053,WIRE OF OTHER ALLOY STEEL:Other :`L' shape +72299054,WIRE OF OTHER ALLOY STEEL:Other :`Z' shape +72299059,WIRE OF OTHER ALLOY STEEL:Other :Other +72299060,WIRE OF OTHER ALLOY STEEL:Other :Electric resistance wire (including electric resistance heating wire) +72299070,WIRE OF OTHER ALLOY STEEL:Other :Crimped wire +72299090,WIRE OF OTHER ALLOY STEEL:Other :Other +73010000,sheet piling of iron or steel whether or not drilled punched or made from assembled elements welded angles shapes and sections of iron or steel +73011000,"SHEET PILING OF IRON OR STEEL, WHETHER OR NOT DRILLED, PUNCHED OR MADE FROM ASSEMBLED ELEMENTS; WELDED ANGLES, SHAPES AND SECTIONS, OF IRON OR STEEL::Sheet piling" +73012000,sheet piling of iron or steel whether or not drilled punched or made from assembled elements welded angles shapes and sections of iron or steel >> angles shapes and sections +73012010,"SHEET PILING OF IRON OR STEEL, WHETHER OR NOT DRILLED, PUNCHED OR MADE FROM ASSEMBLED ELEMENTS; WELDED ANGLES, SHAPES AND SECTIONS, OF IRON OR STEEL:Angles, shapes and sections :Steel slotted angles" +73012090,"SHEET PILING OF IRON OR STEEL, WHETHER OR NOT DRILLED, PUNCHED OR MADE FROM ASSEMBLED ELEMENTS; WELDED ANGLES, SHAPES AND SECTIONS, OF IRON OR STEEL:Angles, shapes and sections :Other" +73020000,railway or tramway track construction material of iron or steel the following rails and rack rails switch blades crossing frogs point rods and other crossing pieces sleepers fish plates chairs chair wedges sole plates base plates rail clips bedplates ties and other material specialized for jointing or fixing rails +73021000,railway or tramway track construction material of iron or steel the following rails and rack rails switch blades crossing frogs point rods and other crossing pieces sleepers fish plates chairs chair wedges sole plates base plates rail clips bedplates ties and other material specialized for jointing or fixing rails >> rails +73021010,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS:Rails :For railways(OLD tariff)" +73021020,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS:Rails :For tramways(OLD tariff)" +73021090,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS:Rails :Other(OLD tariff)" +73023000,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS::Switch blades, crossing frogs, point rods and other crossing pieces(OLD tariff)" +73024000,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS::Fish-plates and sole plates(OLD tariff)" +73029010,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS:Other :Material for jointing or fixing rails(OLD tariff)" +73029090,"RAILWAY OR TRAMWAY TRACK CONSTRUCTION MATERIAL OF IRON OR STEEL, THE FOLLOWING RAILS, CHECK-RAILS AND RACK RAILS, SWITCH BLADES, CROSSING FROGS, POINT RODS AND OTHER CROSSING PIECES, SLEEPERS (CROSS-TIES), FISH- PLATES, CHAIRS, CHAIR WEDGES, SOLE PLATES (BASE PLATES), RAIL CLIPS, BEDPLATES, TIES AND OTHER MATERIAL SPECIALIZED FOR JOINTING OR FIXING RAILS:Other :Other(OLD tariff)" +73030000,tubes pipes and hollow profiles of cast iron tubes pipes and hollow profiles of cast iron +73030010,"TUBES, PIPES AND HOLLOW PROFILES, OF CAST IRON:Tubes, pipes and hollow profiles, of cast iron:Rain water pipe" +73030020,"TUBES, PIPES AND HOLLOW PROFILES, OF CAST IRON:Tubes, pipes and hollow profiles, of cast iron:Soil pipe" +73030030,"TUBES, PIPES AND HOLLOW PROFILES, OF CAST IRON:Tubes, pipes and hollow profiles, of cast iron:Spun pipe" +73030090,"TUBES, PIPES AND HOLLOW PROFILES, OF CAST IRON:Tubes, pipes and hollow profiles, of cast iron:Other" +73040000,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines +73041100,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> of stainless steel +73041110,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Of stainless steel:Tubes and pipes" +73041120,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Of stainless steel:Blanks for tubes and pipes" +73041190,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Of stainless steel:Other" +73041900,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> other +73041910,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Tubes and pipes" +73041920,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Blanks for tubes and pipes" +73041990,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Other" +73042200,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL::Drill pipe of stainless steel;" +73042300,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> other drill pipe +73042310,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other drill pipe:Of iron" +73042390,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other drill pipe:Other;" +73042400,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL::Other, of stainless steel" +73042900,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> other +73042910,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other;:Of iron" +73042990,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other;:Other" +73043100,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> or up to mm outer diameter +73043111,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):OF IRON" +73043119,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Other" +73043121,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Of iron" +73043129,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Other(OLD tariff)" +73043131,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Of iron" +73043139,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold-rolled (cold-reduced):Other" +73043900,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> other up to mm outer diameter +73043911,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Of iron" +73043919,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Other" +73043921,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Of iron" +73043929,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Other" +73043931,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Of iron" +73043939,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Other" +73044100,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL::Cold drawn or cold rolled (cold reduced)" +73044900,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL::Other" +73045100,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> or cold rolled +73045110,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold rolled (cold-reduced):Upto 114.3 mm diameter" +73045120,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold rolled (cold-reduced):Above 114.3 mm but upto 219.1 mm outer diameter" +73045130,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Cold-drawn or cold rolled (cold-reduced):Above 219.1 mm outer diameter" +73045900,tubes pipes and hollow profiles seamless of iron other than cast iron or steel line pipe of a kind used for oil and gas pipelines >> other +73045910,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Upto 114.3 mm diameter" +73045920,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Above 114.3 mm but upto 219.1 mm outer diameter" +73045930,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL:Other:Above 219.1 mm outer diameter" +73049000,"TUBES, PIPES AND HOLLOW PROFILES, SEAMLESS, OF IRON (OTHER THAN CAST IRON) OR STEEL::Other" +73051111,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally submerged arc welded :Of iron(OLD tariff)" +73051119,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally submerged arc welded :Other(OLD tariff)" +73051121,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally submerged arc welded :Of iron(OLD tariff)" +73051129,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally submerged arc welded :Other(OLD tariff)" +73051211,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other, longitudinally welded :Of iron(OLD tariff)" +73051219,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other, longitudinally welded :Other(OLD tariff)" +73051221,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other, longitudinally welded :Of iron(OLD tariff)" +73051229,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other, longitudinally welded :Other(OLD tariff)" +73051911,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other:Of iron(OLD tariff)" +73051919,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other:Other(OLD tariff)" +73051921,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other:Of iron(OLD tariff)" +73051929,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other:Other(OLD tariff)" +73052010,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Casing of a kind used in drilling for oil or gas :Of iron(OLD tariff)" +73052090,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Casing of a kind used in drilling for oil or gas :Other(OLD tariff)" +73053110,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally welded:Of iron(OLD tariff)" +73053190,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Longitudinally welded:Other(OLD tariff)" +73053910,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Of iron(OLD tariff)" +73053990,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Other(OLD tariff)" +73059010,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :High pressure hydroelectric conduits of steel(OLD tariff)" +73059021,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Of iron(OLD tariff)" +73059029,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Other(OLD tariff)" +73059091,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Of iron(OLD tariff)" +73059099,"OTHER TUBES AND PIPES (FOR EXAMPLE, WELDED, RIVETED OR SIMILARLY CLOSED), HAVING CIRCULAR CROSS-SECTIONS, THE EXTERNAL DIAMETER OF WHICH EXCEEDS 406.4 MM, OF IRON OR STEEL:Other :Other(OLD tariff)" +73060000,other tubes pipes and hollow profiles for example open seam or welded riveted or similarly closed of iron or steel line pipe of a kind used for oil or gas pipelines +73061100,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL::Welded, of stainless steel" +73061900,other tubes pipes and hollow profiles for example open seam or welded riveted or similarly closed of iron or steel line pipe of a kind used for oil or gas pipelines >> other galvanised pipes +73061911,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Of iron" +73061919,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Other" +73061921,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Of iron" +73061929,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Other" +73062100,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL::Welded, of stainless steel" +73062900,other tubes pipes and hollow profiles for example open seam or welded riveted or similarly closed of iron or steel line pipe of a kind used for oil or gas pipelines >> other +73062911,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Of iron" +73062919,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other:Other" +73063000,other tubes pipes and hollow profiles for example open seam or welded riveted or similarly closed of iron or steel line pipe of a kind used for oil or gas pipelines >> other welded of circular of iron or steel +73063010,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other, welded, of circular cross-section, of iron or non-alloy steel:Of iron" +73063090,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other, welded, of circular cross-section, of iron or non-alloy steel:Other" +73064000,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL::Other, welded, of circular cross-section, of stainless steel" +73065000,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL::Other, welded, of circular cross-section, of other alloy steel" +73066100,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL::Of square or rectangular cross-section" +73066900,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL::Of other non-circular cross-section" +73069000,other tubes pipes and hollow profiles for example open seam or welded riveted or similarly closed of iron or steel line pipe of a kind used for oil or gas pipelines >> other erw precision tubes +73069011,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other :Of iron" +73069019,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other :Other" +73069090,"OTHER TUBES, PIPES AND HOLLOW PROFILES (FOR EXAMPLE, OPEN SEAM OR WELDED, RIVETED OR SIMILARLY CLOSED), OF IRON OR STEEL:Other :Other" +73070000,tube or pipe fittings for example couplings elbows sleeves of iron or steel cast fittings +73071100,tube or pipe fittings for example couplings elbows sleeves of iron or steel cast fittings >> of cast iron +73071110,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Of non-malleable cast iron :Sponge iron cast fittings" +73071120,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Of non-malleable cast iron :SG iron cast fittings" +73071190,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Of non-malleable cast iron :Other" +73071900,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL::Other" +73072100,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL::Flanges" +73072200,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL::Threaded elbows, bends and sleeves" +73072300,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL::Butt welding fittings" +73072900,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL::Other" +73079100,tube or pipe fittings for example couplings elbows sleeves of iron or steel cast fittings >> flanges +73079110,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Flanges :Galvanised" +73079190,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Flanges :Other" +73079200,tube or pipe fittings for example couplings elbows sleeves of iron or steel cast fittings >> threaded elbows bends and sleeves +73079210,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Threaded elbows, bends and sleeves :Galvanised" +73079290,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Threaded elbows, bends and sleeves :Other" +73079300,tube or pipe fittings for example couplings elbows sleeves of iron or steel cast fittings >> butt welding fittings +73079310,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Butt welding fittings :Galvanised" +73079390,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Butt welding fittings :Other" +73079900,tube or pipe fittings for example couplings elbows sleeves of iron or steel cast fittings >> other +73079910,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Other :Galvanised" +73079990,"TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOW SLEEVES), OF IRON OR STEEL:Other :Other" +73080000,structures excluding prefabricated buildings of heading 9406 and parts of structures for gates towers lattice masts roofs roofing doors and windows and their frames and thresholds for doors shutters balustrades pillars and columns of iron or steel plates rods angles shapes sections tubes and the like prepared for use in structures of iron or steel +73081000,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL::Bridges and bridge-sections" +73082000,structures excluding prefabricated buildings of heading 9406 and parts of structures for gates towers lattice masts roofs roofing doors and windows and their frames and thresholds for doors shutters balustrades pillars and columns of iron or steel plates rods angles shapes sections tubes and the like prepared for use in structures of iron or steel >> towers and lattice masts towers whether or not assembled +73082011,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Towers and lattice masts :For transmission line" +73082019,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Towers and lattice masts :Other" +73082020,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Towers and lattice masts :Lattice masts" +73083000,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL::Doors, windows and their frames and thresholds for doors" +73084000,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL::Equipment for scaffolding, shuttering, propping or pit-propping" +73089000,structures excluding prefabricated buildings of heading 9406 and parts of structures for gates towers lattice masts roofs roofing doors and windows and their frames and thresholds for doors shutters balustrades pillars and columns of iron or steel plates rods angles shapes sections tubes and the like prepared for use in structures of iron or steel >> other +73089010,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Beams, channels, pillars and girders prepared for use in structures" +73089020,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Drop rods" +73089030,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Hatchway, rails and bulkheads for ships or boats and parts of hull" +73089040,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Galvanised tension bars" +73089050,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Structures and super structures for mining" +73089060,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Truss rods" +73089070,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Tubular steel poles for electric transmission and distribution lines" +73089090,"STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, LOCK-GATES, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAME-WORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, SHUTTERS, BALUSTRADES, PILLARS AND COLUMNS), OF IRON OR STEEL; PLATES, RODS, ANGLES, SHAPES, SECT TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES, OF IRON OR STEEL:Other :Other" +73090000,reservoirs tanks vats and similar containers for any material other than compressed or liquefied gas of iron or steel of a capacity exceeding 300 l whether or not lined or heat insulated but not fitted with mechanical or thermal equipment reservoirs tanks vats and similar containers for any material other than compressed or liquefied gas of iron or steel of a capacity exceeding 300 l whether or not lined or but not fitted with mechanical or thermal equipment +73090010,"RESERVOIRS, TANKS, VATS AND SIMILAR CONTAINERS FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY EXCEEDING 300 WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Reservoirs, tanks, vats and similar containers for any material (other than compressed or liquefied gas), of iron or steel, of a capacity exceeding 300 l, whether or not lined or heat-insulated, but not fitted with mechanical or thermal equipment :Galvanized iron tanks" +73090020,"RESERVOIRS, TANKS, VATS AND SIMILAR CONTAINERS FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY EXCEEDING 300 WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Reservoirs, tanks, vats and similar containers for any material (other than compressed or liquefied gas), of iron or steel, of a capacity exceeding 300 l, whether or not lined or heat-insulated, but not fitted with mechanical or thermal equipment :Galvanized iron barrels and drums" +73090030,"RESERVOIRS, TANKS, VATS AND SIMILAR CONTAINERS FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY EXCEEDING 300 WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Reservoirs, tanks, vats and similar containers for any material (other than compressed or liquefied gas), of iron or steel, of a capacity exceeding 300 l, whether or not lined or heat-insulated, but not fitted with mechanical or thermal equipment :Pressed steel tanks" +73090040,"RESERVOIRS, TANKS, VATS AND SIMILAR CONTAINERS FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY EXCEEDING 300 WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Reservoirs, tanks, vats and similar containers for any material (other than compressed or liquefied gas), of iron or steel, of a capacity exceeding 300 l, whether or not lined or heat-insulated, but not fitted with mechanical or thermal equipment :Pressure vessels" +73090090,"RESERVOIRS, TANKS, VATS AND SIMILAR CONTAINERS FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY EXCEEDING 300 WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Reservoirs, tanks, vats and similar containers for any material (other than compressed or liquefied gas), of iron or steel, of a capacity exceeding 300 l, whether or not lined or heat-insulated, but not fitted with mechanical or thermal equipment :Other" +73100000,tanks casks drums cans boxes and similar containers for any material other than compressed or liquefied gas of iron or steel of a capacity not exceeding 300 l whether or not lined or but not fitted with mechanical or thermal equipment +73101000,tanks casks drums cans boxes and similar containers for any material other than compressed or liquefied gas of iron or steel of a capacity not exceeding 300 l whether or not lined or but not fitted with mechanical or thermal equipment >> of a capacity of 50 l or more +73101010,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Of a capacity of 50 l or more :Tin plate containers" +73101020,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Of a capacity of 50 l or more :Trunks and cases" +73101090,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Of a capacity of 50 l or more :Other" +73102100,tanks casks drums cans boxes and similar containers for any material other than compressed or liquefied gas of iron or steel of a capacity not exceeding 300 l whether or not lined or but not fitted with mechanical or thermal equipment >> cans which are to be closed by soldering or crimping +73102110,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Cans which are to be closed by soldering or crimping :Tin plate containers" +73102190,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Cans which are to be closed by soldering or crimping :Other" +73102900,tanks casks drums cans boxes and similar containers for any material other than compressed or liquefied gas of iron or steel of a capacity not exceeding 300 l whether or not lined or but not fitted with mechanical or thermal equipment >> other +73102910,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Tin plate containers" +73102920,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Trunks and cases" +73102990,"TANKS, CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF IRON OR STEEL, OF A CAPACITY NOT EXCEEDING 300 I, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Other" +73110000,containers for compressed or liquefied gas of iron or steel containers for compressed or liquefied gas of iron or steel +73110010,"CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS, OF IRON OR STEEL:Containers for compressed or liquefied gas, of iron or steel :Liquefied petroleum gas (LPG) cylinder" +73110020,"CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS, OF IRON OR STEEL:Containers for compressed or liquefied gas, of iron or steel :Low pressure cylinder (working pressure up to 35.2 kg/sq.cm other than LPG)" +73110030,"CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS, OF IRON OR STEEL:Containers for compressed or liquefied gas, of iron or steel :High pressure cylinder (working pressure exceeding 35.2 kg/sq.cm)" +73110090,"CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS, OF IRON OR STEEL:Containers for compressed or liquefied gas, of iron or steel :Other" +73120000,stranded wire ropes cables plaited bands slings and the like of iron or steel not electrically insulated +73121000,stranded wire ropes cables plaited bands slings and the like of iron or steel not electrically insulated >> stranded wire ropes and cables +73121010,"STRANDED WIRE, ROPES, CABLES, PLAITED BANDS, SLINGS AND THE LIKE, OF IRON OR STEEL, NOT ELECTRICALLY INSULATED:Stranded wire, ropes and cables :Wire ropes, black" +73121020,"STRANDED WIRE, ROPES, CABLES, PLAITED BANDS, SLINGS AND THE LIKE, OF IRON OR STEEL, NOT ELECTRICALLY INSULATED:Stranded wire, ropes and cables :Wire ropes, galvanised(OLD tariff)" +73121030,"STRANDED WIRE, ROPES, CABLES, PLAITED BANDS, SLINGS AND THE LIKE, OF IRON OR STEEL, NOT ELECTRICALLY INSULATED:Stranded wire, ropes and cables :Stranded wire(OLD tariff)" +73121090,"STRANDED WIRE, ROPES, CABLES, PLAITED BANDS, SLINGS AND THE LIKE, OF IRON OR STEEL, NOT ELECTRICALLY INSULATED:Stranded wire, ropes and cables :Other(OLD tariff)" +73129000,"STRANDED WIRE, ROPES, CABLES, PLAITED BANDS, SLINGS AND THE LIKE, OF IRON OR STEEL, NOT ELECTRICALLY INSULATED::Other(OLD tariff)" +73130000,barbed wire of iron or steel twisted hoop or single flat wire barbed or not and loosely twisted double wire of a kind used for fencing of iron or steel barbed wire of iron or steel twisted hoop or single flat wire barbed or not and loosely twisted double wire of a kind used for fencing of iron or steel +73130010,"BARBED WIRE OF IRON OR STEEL; TWISTED HOOP OR SINGLE FLAT WIRE, BARBED OR NOT, AND LOOSELY TWISTED DOUBLE WIRE, OF A KIND USED FOR FENCING, OF IRON OR STEEL:Barbed wire of iron or steel; twisted hoop or single flat wire, barbed or not, and loosely twisted double wire, of a kind used for fencing, of iron or steel :Barbed wire" +73130020,"BARBED WIRE OF IRON OR STEEL; TWISTED HOOP OR SINGLE FLAT WIRE, BARBED OR NOT, AND LOOSELY TWISTED DOUBLE WIRE, OF A KIND USED FOR FENCING, OF IRON OR STEEL:Barbed wire of iron or steel; twisted hoop or single flat wire, barbed or not, and loosely twisted double wire, of a kind used for fencing, of iron or steel :Twisted hoop or single flat wire, barbed or not, and loosely twisted double wire, of a kind used for fencing" +73140000,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth +73141200,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL::Endless bands for machinery, of stainless steel" +73141400,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth >> other woven cloth of stainless steel +73141410,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Other woven cloth, of stainless steel :Wire gauze (wire cloth, wire mesh)" +73141490,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Other woven cloth, of stainless steel :Other" +73141900,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth >> other +73141910,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Other :Wire gauze (wire cloth, wire mesh)" +73141990,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Other :Other" +73142000,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth >> grill netting and fencing welded at the intersection of wire with a maximum dimension of 3 mm or more and having a mesh size of 100 cm2 or more +73142010,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Grill, netting and fencing, welded at the intersection, of wire with a maximum crosssectional dimension of 3 mm or more and having a mesh size of 100 cm2 or more :Wire netting" +73142090,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Grill, netting and fencing, welded at the intersection, of wire with a maximum crosssectional dimension of 3 mm or more and having a mesh size of 100 cm2 or more :Other" +73143100,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL::Plated or coated with zinc" +73143900,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL::Other" +73144100,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth >> plated or coated with zinc +73144110,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Plated or coated with zinc :Wire netting" +73144190,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Plated or coated with zinc :Other" +73144200,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth >> coated with plastics +73144210,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Coated with plastics :Wire netting" +73144290,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Coated with plastics :Other" +73144900,cloth including endless bands grill netting and fencing of iron or steel wire expanded metal of iron or steel woven cloth >> other +73144910,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Other :Wire netting" +73144990,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL:Other :Other" +73145000,"CLOTH (INCLUDING ENDLESS BANDS), GRILL, NETTING AND FENCING, OF IRON OR STEEL WIRE; EXPANDED METAL OF IRON OR STEEL::Expanded metal" +73150000,chain and parts thereof of iron or steel articulated link chain and parts thereof +73151100,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Roller chain" +73151200,chain and parts thereof of iron or steel articulated link chain and parts thereof >> other chain +73151210,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL:Other chain :Lifting and hoisting chain" +73151220,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL:Other chain :Ship chain" +73151290,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL:Other chain :Other" +73151900,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Parts" +73152000,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Skid chain" +73158100,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Stud-link" +73158200,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Other, welded link" +73158900,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Other" +73159000,"CHAIN AND PARTS THEREOF, OF IRON OR STEEL::Other parts" +73160000,anchors grapnels and parts thereof of iron or steel anchors grapnels and parts thereof of iron or steel +73160010,"ANCHORS, GRAPNELS AND PARTS THEREOF, OF IRON OR STEEL:Anchors, grapnels and parts thereof, of iron or steel :Anchors and grapnels" +73160090,"ANCHORS, GRAPNELS AND PARTS THEREOF, OF IRON OR STEEL:Anchors, grapnels and parts thereof, of iron or steel :Parts" +73170000,nails tacks drawing pins corrugated nails staples other than those of heading 8305 and similar articles of iron or steel whether or not with heads of other material but excluding such articles with heads of copper nails tacks drawing pins corrugated nails staples other than those of heading 8305 and similar articles of iron or steel whether or not with heads of other material but excluding such articles with heads of copper nails +73170011,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :For animal shoes" +73170012,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :For fixing heel strips and toe plates" +73170013,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Wire nails" +73170019,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Other" +73170021,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Dog spikes" +73170029,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Other" +73170030,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Tacks" +73170091,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Staples other than in strips, and drawing pins" +73170099,"NAILS, TACKS, DRAWING PINS, CORRUGATED NAILS,:Nails, tacks, drawing pins, corrugated nails, staples (other than those of heading 8305) and similar articles, of iron or steel, whether or not with heads of other material, but excluding such articles with heads of copper :Other" +73180000,screws bolts nuts screw hooks rivets cotters washers including spring washers and similar articles of iron or steel threaded articles +73181100,screws bolts nuts screw hooks rivets cotters washers including spring washers and similar articles of iron or steel threaded articles >> coach screws +73181110,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL:Coach screws :Machine screws" +73181190,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL:Coach screws :Other" +73181200,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Other wood screws" +73181300,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Screw hooks and screw rings" +73181400,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Self-tapping screws" +73181500,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Other screws and bolts, whether or not with their nuts or washers" +73181600,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Nuts" +73181900,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Other" +73182100,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Spring washers and other lock washers" +73182200,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Other washers" +73182300,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Rivets" +73182400,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL::Cotters and cotter-pins" +73182900,screws bolts nuts screw hooks rivets cotters washers including spring washers and similar articles of iron or steel threaded articles >> other +73182910,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL:Other :Circlips" +73182990,"SCREWS, BOLTS, NUTS, COACH-SCREWS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS) AND SIMILAR ARTICLES, OF IRON OR STEEL:Other :Other" +73190000,sewing needles knitting needles bodkins crochet hooks embroidery stilettos and similar articles for use in the hand of iron or steel safety pins and other pins of iron or steel not elsewhere specified or included +73194000,sewing needles knitting needles bodkins crochet hooks embroidery stilettos and similar articles for use in the hand of iron or steel safety pins and other pins of iron or steel not elsewhere specified or included >> safety pins and other pins +73194010,"SEWING NEEDLES, KNITTING NEEDLES, BODKINS, CROCHET HOOKS, EMBROIDERY STILETTOS AND SIMILAR ARTICLES, FOR USE IN THE HAND, OF IRON OR STEEL; SAFETY PINS A OTHER PINS, OF IRON OR STEEL, NOT ELSEWHERE SPECIFIE OR INCLUDED:Safety pings and other pings:Safety pings" +73194090,"SEWING NEEDLES, KNITTING NEEDLES, BODKINS, CROCHET HOOKS, EMBROIDERY STILETTOS AND SIMILAR ARTICLES, FOR USE IN THE HAND, OF IRON OR STEEL; SAFETY PINS A OTHER PINS, OF IRON OR STEEL, NOT ELSEWHERE SPECIFIE OR INCLUDED:Safety pings and other pings:Other pings" +73199000,"SEWING NEEDLES, KNITTING NEEDLES, BODKINS, CROCHET HOOKS, EMBROIDERY STILETTOS AND SIMILAR ARTICLES, FOR USE IN THE HAND, OF IRON OR STEEL; SAFETY PINS A OTHER PINS, OF IRON OR STEEL, NOT ELSEWHERE SPECIFIE OR INCLUDED::Other" +73200000,springs and leaves for springs of iron or steel +73201000,springs and leaves for springs of iron or steel >> and leaves therefor +73201011,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Leaf-springs and leaves therefor :For motor vehicles" +73201012,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Leaf-springs and leaves therefor :For railways and tramways" +73201019,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Leaf-springs and leaves therefor :Other" +73201020,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Leaf-springs and leaves therefor :Leaves for springs" +73202000,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL::Helical springs" +73209000,springs and leaves for springs of iron or steel >> other +73209010,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Other :Coil spring for railways, tramways" +73209020,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Other :Spring pins" +73209090,"SPRINGS AND LEAVES FOR SPRINGS, OF IRON OR STEEL:Other :Other" +73210000,stoves ranges grates cookers including those with subsidiary boilers for central heating barbecues braziers plate warmers and similar domestic appliances and parts thereof of iron or steel cooking appliances and plate warmers +73211100,stoves ranges grates cookers including those with subsidiary boilers for central heating barbecues braziers plate warmers and similar domestic appliances and parts thereof of iron or steel cooking appliances and plate warmers >> for gas fuel or for both gas and other fuels for gas fuel or for both gas and other fuels +73211110,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:For gas fuel or for both gas and other fuels :Cookers and kitchen stoves" +73211120,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:For gas fuel or for both gas and other fuels :Other stoves" +73211190,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:For gas fuel or for both gas and other fuels :Other" +73211200,stoves ranges grates cookers including those with subsidiary boilers for central heating barbecues braziers plate warmers and similar domestic appliances and parts thereof of iron or steel cooking appliances and plate warmers >> for liquid fuel for liquid fuel +73211210,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:For liquid fuel:Cookers and kitchen stoves" +73211220,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:For liquid fuel:Other stoves" +73211290,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:For liquid fuel:Other" +73211900,stoves ranges grates cookers including those with subsidiary boilers for central heating barbecues braziers plate warmers and similar domestic appliances and parts thereof of iron or steel cooking appliances and plate warmers >> other including appliances for solid fuel other including appliances for solid fuel +73211910,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:Other, including appliances for solid fuel:Cookers and kitchen stoves" +73211990,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:Other, including appliances for solid fuel:Other stoves and appliances" +73218100,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL::For gas fuel or for both gas and other fuels" +73218200,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL::For liquid fuel" +73218900,stoves ranges grates cookers including those with subsidiary boilers for central heating barbecues braziers plate warmers and similar domestic appliances and parts thereof of iron or steel cooking appliances and plate warmers >> other including appliances for solid fuel other including appliances for solid fuel +73218910,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:Other, including appliances for solid fuel:Clay tandoor (oven with iron or steel body and earthen grates)" +73218990,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL:Other, including appliances for solid fuel:Other" +73219000,"STOVES, RANGES, GRATES, COOKERS (INCLUDING THOSE WITH SUBSIDIARY BOILERS FOR CENTRAL HEATING), BARBECUES, BRAZIERS, GAS-RINGS, PLATE WARMERS AND SIMILAR NON-ELECTRIC DOMESTIC APPLIANCES, AND PARTS THEREOF, OF IRON OR STEEL::Parts" +73220000,radiators for central heating not electrically heated and parts thereof of iron +73221100,"RADIATORS FOR CENTRAL HEATING, NOT ELECTRICALLY HEATED, AND PARTS THEREOF, OF IRON OR STEEL; AIR HEATERS AND HOT AIR DISTRIBUTORS (INCLUDING DISTRIBUTORS WHICH CAN ALSO DISTRIBUTE FRESH OR CONDITIONED AIR), NOT ELECTRICALLY HEATED, INCORPORATING A MOTOR-DRIVEN FAN OR BLOWER, AND PARTS THEREOF, OF IRON OR STEEL::Of cast iron(OLD tariff)" +73221900,"RADIATORS FOR CENTRAL HEATING, NOT ELECTRICALLY HEATED, AND PARTS THEREOF, OF IRON OR STEEL; AIR HEATERS AND HOT AIR DISTRIBUTORS (INCLUDING DISTRIBUTORS WHICH CAN ALSO DISTRIBUTE FRESH OR CONDITIONED AIR), NOT ELECTRICALLY HEATED, INCORPORATING A MOTOR-DRIVEN FAN OR BLOWER, AND PARTS THEREOF, OF IRON OR STEEL::Other(OLD tariff)" +73229010,"RADIATORS FOR CENTRAL HEATING, NOT ELECTRICALLY HEATED, AND PARTS THEREOF, OF IRON OR STEEL; AIR HEATERS AND HOT AIR DISTRIBUTORS (INCLUDING DISTRIBUTORS WHICH CAN ALSO DISTRIBUTE FRESH OR CONDITIONED AIR), NOT ELECTRICALLY HEATED, INCORPORATING A MOTOR-DRIVEN FAN OR BLOWER, AND PARTS THEREOF, OF IRON OR STEEL:Other :Air heaters and hot air distributors(OLD tariff)" +73229090,"RADIATORS FOR CENTRAL HEATING, NOT ELECTRICALLY HEATED, AND PARTS THEREOF, OF IRON OR STEEL; AIR HEATERS AND HOT AIR DISTRIBUTORS (INCLUDING DISTRIBUTORS WHICH CAN ALSO DISTRIBUTE FRESH OR CONDITIONED AIR), NOT ELECTRICALLY HEATED, INCORPORATING A MOTOR-DRIVEN FAN OR BLOWER, AND PARTS THEREOF, OF IRON OR STEEL:Other :Parts of air heaters and hot air distributors(OLD tariff)" +73230000,table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel +73231000,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL::Iron or steel wool; pot scourers and scouring or polishing pads, gloves and the like" +73239100,table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel >> of cast iron not enamelled of cast iron not enamelled +73239110,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of cast iron, not enamelled :Pans" +73239190,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of cast iron, not enamelled :Other" +73239200,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL::Of cast iron, enamelled" +73239300,table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel >> of stainless steel of stainless steel +73239310,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of stainless steel :Pressure cookers" +73239390,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of stainless steel :Other" +73239400,table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel >> of iron other than cast iron or steel enamelled of iron other than cast iron or steel enamelled +73239410,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of iron (other than cast iron) or steel, enamelled :Ghamellas" +73239420,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of iron (other than cast iron) or steel, enamelled :Utensils" +73239490,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Of iron (other than cast iron) or steel, enamelled :Other" +73239900,table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel table kitchen or other household articles and parts thereof of iron or steel iron or steel wool pot scourers and scouring or polishing pads gloves and the like of iron or steel >> other other +73239910,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Other :Utensils of galvanised iron" +73239920,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Other :Other utensils" +73239990,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF IRON OR STEEL; IRON OR STEEL WOOL; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF IRON OR STEEL:Other :Other" +73240000,sanitary ware and parts thereof of iron or steel sanitary ware and parts thereof of iron or steel +73241000,"SANITARY WARE AND PARTS THEREOF, OF IRON OR STEEL::Sinks and wash basins, of stainless steel" +73242100,"SANITARY WARE AND PARTS THEREOF, OF IRON OR STEEL::Of cast iron, whether or not enamelled" +73242900,"SANITARY WARE AND PARTS THEREOF, OF IRON OR STEEL::Other" +73249000,"SANITARY WARE AND PARTS THEREOF, OF IRON OR STEEL::Other, including parts" +73250000,other cast articles of iron or steel other cast articles of iron or steel +73251000,OTHER CAST ARTICLES OF IRON OR STEEL::Of non-malleable cast iron +73259100,OTHER CAST ARTICLES OF IRON OR STEEL::Grinding balls and similar articles for mills +73259900,other cast articles of iron or steel other cast articles of iron or steel >> other other +73259910,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Of iron +73259920,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Of alloy steel +73259930,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Of stainless steel +73259991,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Rudders for ships or boats +73259992,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Drain covers +73259993,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Plates and frames for sewage water or similar system +73259999,OTHER CAST ARTICLES OF IRON OR STEEL:Other :Other +73260000,other articles of iron or steel forged or stamped but not further worked +73261100,OTHER ARTICLES OF IRON OR STEEL::Grinding balls and similar articles for mills +73261900,other articles of iron or steel forged or stamped but not further worked >> other +73261910,OTHER ARTICLES OF IRON OR STEEL:Other :For automobiles and earth moving equipments +73261990,OTHER ARTICLES OF IRON OR STEEL:Other :Other +73262000,other articles of iron or steel forged or stamped but not further worked >> articles of iron or steel wire +73262010,OTHER ARTICLES OF IRON OR STEEL:Articles of iron or steel wire :Tyre bead wire rings intended for use in the manufacture of tyres for cycles and cyclerickshaws +73262090,OTHER ARTICLES OF IRON OR STEEL:Articles of iron or steel wire :Other +73269000,other articles of iron or steel forged or stamped but not further worked >> other +73269010,OTHER ARTICLES OF IRON OR STEEL:Other :Belt lacing of steel +73269020,OTHER ARTICLES OF IRON OR STEEL:Other :Belt fasteners for machinery belts +73269030,"OTHER ARTICLES OF IRON OR STEEL:Other :Drain covers, plates, and frames for sewages, water or similar system" +73269040,OTHER ARTICLES OF IRON OR STEEL:Other :Enamelled iron ware +73269050,OTHER ARTICLES OF IRON OR STEEL:Other :Grinding media balls and cylpebs +73269060,OTHER ARTICLES OF IRON OR STEEL:Other :Manufactures of stainless steel +73269070,OTHER ARTICLES OF IRON OR STEEL:Other :Articles of clad metal +73269080,"OTHER ARTICLES OF IRON OR STEEL:Other :Parts of ships, floating structure and vessels (excluding hull, propellers and paddle-wheels)" +73269091,OTHER ARTICLES OF IRON OR STEEL:Other :Shanks +73269099,OTHER ARTICLES OF IRON OR STEEL:Other :Other +74010010,COPPER MATTES; CEMENT COPPER (PRECIPITATED COPPER):Copper mattes; Cement copper (precipitated copper):Copper mattes(OLD tariff) +74010090,COPPER MATTES; CEMENT COPPER (PRECIPITATED COPPER):Copper mattes; Cement copper (precipitated copper):Cement copper (precipitated copper)(OLD tariff) +74020010,UNREFINED COPPER; COPPER ANODES FOR ELECTROLYTIC REFINING:Unrefined copper; copper anodes for electrolytic refining :Blister copper(OLD tariff) +74020090,UNREFINED COPPER; COPPER ANODES FOR ELECTROLYTIC REFINING:Unrefined copper; copper anodes for electrolytic refining :Other(OLD tariff) +74031100,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT::Cathodes and sections of cathodes(OLD tariff)" +74031200,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT::Wire-bars(OLD tariff)" +74031300,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT::Billets(OLD tariff)" +74031900,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT::Other(OLD tariff)" +74032100,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT::Copper-zinc base alloys (brass)(OLD tariff)" +74032210,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT:Copper-tin base alloys (bronze) :Phosphor bronze(OLD tariff)" +74032290,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT:Copper-tin base alloys (bronze) :Other(OLD tariff)" +74032900,"REFINED COPPER AND COPPER ALLOYS, UNWROUGHT::Other copper alloys (other than master alloys of heading 7405)(OLD tariff)" +74040000,copper waste and scrap of copper +74040011,COPPER WASTE AND SCRAP:Copper waste and scrap:Empty or discharged cartridges of all bores and sizes +74040012,"COPPER WASTE AND SCRAP:Copper waste and scrap:Copper scrap, namely the following copper wire scrap covered by ISRI code words Barley, Berry and Birch; heavy copper scrap covered by ISRI code word Candy;unalloyed copper scrap covered by ISRI code word Cliff; copper wire nodules scrap covered by ISRI code words Clove, Cobra and Cocoa; light copper scrap covered by ISRI Code word Dream; muntz metal tubes covered by ISRI code word Palms" +74040019,COPPER WASTE AND SCRAP:Copper waste and scrap:Other +74040021,"COPPER WASTE AND SCRAP:Copper waste and scrap:Empty or discharged cartridges of all bores and sizes, including the following: clean fired 70/30 brass shells free of bullets, iron and any other foreign material covered by ISRI code word 'Lake'; clean muffled (popped) 70/30 brass shells free of bullets, iron and any other foreign material covered by ISRI code word 'Lamb'" +74040022,"COPPER WASTE AND SCRAP:Copper waste and scrap:Brass scrap, namely the following refinery brass scrap covered by ISRI code word Drink; composition of red brass scrap covered by ISRI code word Ebony; red brass composition turnings scrap covered by ISRI code word Enerv; genuine babbit-lined brass bushings scrap covered by ISRI code word Elder; machinery or hard brass solids scrap covered by ISRI code word Engel; machinery or hand brass solids scrap covered by ISRI code word Erin; cocks and faucets scrap covered by ISRI code word Grape; yellow brass scrap covered by ISRI code word Honey; yellow brass castings covered by ISRI code word Ivory; new brass clippings covered by ISRI code word Label; yellow brass primer covered by ISRI code word Lark; brass pipe covered by ISRI code word Melon; yellow brass rod turnings covered by ISRI code word Night; new yellow brass turnigs covered by ISRI code word Normad; mixed unsweated auto radiators covered by ISRI code word Ocean; admiralty brass condenser tubes covered by ISRI code word Pales; aluminium brass condenser tubes covered by ISRI code word Pallu;" +74040023,COPPER WASTE AND SCRAP:Copper waste and scrap:Nickel silver scrap name the following mixed new nickel silver clippings covered by ISRI Code word 'Maize'; new nickel silver clippings covered by ISRI code word 'Major';(OLD tariff) +74040024,"COPPER WASTE AND SCRAP:Copper waste and scrap:Bronze scrap, including the following:manganese bronze soilds covered ISRI code word 'Parch';High lead bronze solids and borings covered by ISRI code word 'Elias'" +74040025,"COPPER WASTE AND SCRAP:Copper waste and scrap:Copper nickel scrap, including the following: new cupro nickel clips and solids covered by ISRI code word 'Dandy'; cupro nickel solids covered by ISRI code word 'Daunt'; soldered cupro-nickel solids covered by ISRI code word 'Delta'; cupro nickel spinnings, turnings, borings covered by ISRI code word 'Decoy';" +74040029,COPPER WASTE AND SCRAP:Copper waste and scrap:Other +74050000,::MASTER ALLOYS OF COPPER +74060000,copper powders and flakes copper powders and flakes +74061000,COPPER POWDERS AND FLAKES::Powders of non-lamellar structure +74062000,COPPER POWDERS AND FLAKES::Powders of lamellar structure; flakes +74070000,copper bars rods and profiles copper bars rods and profiles +74071000,copper bars rods and profiles copper bars rods and profiles >> of refined copper of refined copper +74071010,"COPPER BARS, RODS AND PROFILES:Of refined copper :Electrolytic copper rods or black copper rods" +74071020,"COPPER BARS, RODS AND PROFILES:Of refined copper :Other copper rods" +74071030,"COPPER BARS, RODS AND PROFILES:Of refined copper :Copper bars (excluding hollow bars)" +74071040,"COPPER BARS, RODS AND PROFILES:Of refined copper :Hollow bars of copper" +74071051,"COPPER BARS, RODS AND PROFILES:Of refined copper :Hollow profiles" +74071059,"COPPER BARS, RODS AND PROFILES:Of refined copper :Other" +74071090,"COPPER BARS, RODS AND PROFILES:Of refined copper :Other" +74072100,copper bars rods and profiles copper bars rods and profiles >> of base alloys brass of base alloys brass +74072110,"COPPER BARS, RODS AND PROFILES:Of copper-zinc base alloys (brass) :Bars" +74072120,"COPPER BARS, RODS AND PROFILES:Of copper-zinc base alloys (brass) :Rods" +74072130,"COPPER BARS, RODS AND PROFILES:Of copper-zinc base alloys (brass) :Hollow bars" +74072190,"COPPER BARS, RODS AND PROFILES:Of copper-zinc base alloys (brass) :Other" +74072900,copper bars rods and profiles copper bars rods and profiles >> other other +74072910,"COPPER BARS, RODS AND PROFILES:Other :Rods of bronze and similar alloys" +74072921,"COPPER BARS, RODS AND PROFILES:Other :Hollow" +74072929,"COPPER BARS, RODS AND PROFILES:Other :Other" +74072990,"COPPER BARS, RODS AND PROFILES:Other :Other" +74080000,copper wire of refined copper +74081100,copper wire of refined copper >> of which the maximum dimension exceeds 6 mm +74081110,COPPER WIRE:Of which the maximum cross-sectional dimension exceeds 6 mm :Copper weld wire +74081190,COPPER WIRE:Of which the maximum cross-sectional dimension exceeds 6 mm :Other +74081900,copper wire of refined copper >> other +74081910,COPPER WIRE:Other :Copper weld wire +74081920,COPPER WIRE:Other :Welding wire +74081990,COPPER WIRE:Other :Other +74082100,copper wire of refined copper >> of base alloys brass +74082110,COPPER WIRE:Of copper-zinc base alloys (brass) :Of which the maximum cross-sectional dimension exceeds 6 mm +74082190,COPPER WIRE:Of copper-zinc base alloys (brass) :Other +74082200,copper wire of refined copper >> of base alloys or base alloys nickel silver +74082210,COPPER WIRE:Of copper-nickel base alloys (cupro-nickel) or copper-nickel-zinc base alloys (nickel silver) :Silver plated flattened wire of copper (lametta) +74082220,COPPER WIRE:Of copper-nickel base alloys (cupro-nickel) or copper-nickel-zinc base alloys (nickel silver) :Other of which the maximum cross-sectional dimension exceeds 6 mm +74082290,COPPER WIRE:Of copper-nickel base alloys (cupro-nickel) or copper-nickel-zinc base alloys (nickel silver) :Other +74082900,copper wire of refined copper >> other +74082910,COPPER WIRE:Other :Wire of bronze and similar alloys +74082990,COPPER WIRE:Other :Other +74090000,copper plates sheets and strip of a thickness exceeding mm of refined copper +74091100,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::In coils" +74091900,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::Other" +74092100,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::In coils" +74092900,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::Other" +74093100,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::In coils" +74093900,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::Other" +74094000,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::Of copper-nickel base alloys (cupro-nickel) or copper-nickel-zinc base alloys (nickel silver)" +74099000,"COPPER PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.12.5 MM::Of other copper alloys" +74101100,"COPPER FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.12.5 MM::Of refined copper(OLD tariff)" +74101200,"COPPER FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.12.5 MM::Of copper alloys(OLD tariff)" +74102100,"COPPER FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.12.5 MM::Of refined copper(OLD tariff)" +74102200,"COPPER FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.12.5 MM::Of copper alloys(OLD tariff)" +74110000,copper tubes and pipes +74111000,COPPER TUBES AND PIPES::Of refined copper +74112100,COPPER TUBES AND PIPES::Of copper-zinc base alloys (brass) +74112200,COPPER TUBES AND PIPES::Of copper-nickel base alloys (cupro-nickel) or copper-nickel-zinc base alloys (nickel silver) +74112900,COPPER TUBES AND PIPES::Other +74120000,copper tube or pipe fittings for example couplings elbows sleeves +74121000,"COPPER TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES)::Of refined copper" +74122000,copper tube or pipe fittings for example couplings elbows sleeves >> of copper alloys brass +74122011,"COPPER TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES):Of copper alloys :Tube-well strainer" +74122012,"COPPER TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES):Of copper alloys :Hose connectors" +74122019,"COPPER TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES):Of copper alloys :Other" +74122090,"COPPER TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES):Of copper alloys :Fittings of bronze or other alloys of copper" +74130000,"::STRANDED WIRE, CABLES, PLATED BANDS AND THE LIKE, OF COPPER, NOT ELECTRICALLY INSULATED" +74151000,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS)::Nails and tacks, drawing pins, staples and similar articles(OLD tariff)" +74152100,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS)::Washers (including spring washers)(OLD tariff)" +74152900,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS)::Other(OLD tariff)" +74153310,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS):Screws; bolts and nuts :Screws for wood(OLD tariff)" +74153390,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS):Screws; bolts and nuts :Other(OLD tariff)" +74153910,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS):Other :Rivets (excluding tubular or bifurcated)(OLD tariff)" +74153990,"NAILS, TACKS, DRAWING PINS, STAPLES (OTHER THAN THOSE OF HEADING 8305) AND SIMILAR ARTICLES, OF COPPER OR OF IRON OR STEEL WITH HEADS OF COPPER; SCREWS, BOLTS, NUTS, SCREW HOOKS, RIVETS, COTTERS, COTTER-PINS, WASHERS (INCLUDING SPRING WASHERS):Other :Other(OLD tariff)" +74180000,table kitchen or other household articles and parts thereof of copper pot scourers and scouring or polishing pads gloves and the of sanitary ware like copper and parts thereof of copper +74181000,table kitchen or other household articles and parts thereof of copper pot scourers and scouring or polishing pads gloves and the of sanitary ware like copper and parts thereof of copper >> table kitchen or other household articles and parts thereof pot scourers and scouring or polishing pads gloves and the like +74181010,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Pot scourers and scoring or polishing pads. gloves and the like" +74181021,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Utensils of Brass" +74181022,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Utensils of Copper" +74181023,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Utensils of other copper alloys" +74181024,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:E.P.N.S. WARE" +74181031,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Other of EPNS" +74181039,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Other" +74181090,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Table, kitcehn or other house hold articles and parts thereof Pot scourers and scouring orpolishing pads, gloves and the like:Parts" +74182000,table kitchen or other household articles and parts thereof of copper pot scourers and scouring or polishing pads gloves and the of sanitary ware like copper and parts thereof of copper >> sanitary ware and parts thereof +74182010,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Sanitary ware and parts thereof :Sanitary ware" +74182020,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF COPPER; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF COPPER; SANITARY WARE AND PARTS THEREOF, OF COPPER:Sanitary ware and parts thereof :Parts of sanitary ware" +74190000,other articles of copper +74191010,OTHER ARTICLES OF COPPER:Chain and parts thereof :Chain(OLD tariff) +74191021,OTHER ARTICLES OF COPPER:Chain and parts thereof :Of copper chains(OLD tariff) +74191029,OTHER ARTICLES OF COPPER:Chain and parts thereof :Other(OLD tariff) +74192000,"OTHER ARTICLES OF COPPER:cast:Cast, moulded, stamped or forged, but not further worked" +74198000,other articles of copper >> other +74198010,"OTHER ARTICLES OF COPPER:OTHER:Reservoirs, tanks, vats and similar containers" +74198020,OTHER ARTICLES OF COPPER:OTHER:Articles of copper alloys electro-plated with nickel-silver +74198030,OTHER ARTICLES OF COPPER:OTHER:Articlers of brass +74198040,OTHER ARTICLES OF COPPER:OTHER:Copper worked articles +74198050,OTHER ARTICLES OF COPPER:OTHER:Copper chain +74198090,OTHER ARTICLES OF COPPER:OTHER:Other articles of copper(OLD tariff) +74199100,"OTHER ARTICLES OF COPPER::Cast, moulded, stamped or forged, but not further worked(OLD tariff)" +74199910,"OTHER ARTICLES OF COPPER:Other :Reservoirs, tanks, vats and similar containers of a capcity above 300 l(OLD tariff)" +74199920,OTHER ARTICLES OF COPPER:Other :Articles of copper alloys electro-plated with nickel-silver(OLD tariff) +74199930,OTHER ARTICLES OF COPPER:Other :Articles of brass(OLD tariff) +74199940,OTHER ARTICLES OF COPPER:Other :Copper worked articles(OLD tariff) +74199990,OTHER ARTICLES OF COPPER:Other :Other articles of copper(OLD tariff) +75011000,"NICKEL MATTES, NICKEL OXIDE SINTERS AND OTHER INTERMEDIATE PRODUCTS OF NICKEL METALLURGY::Nickel mattes(OLD tariff)" +75012000,"NICKEL MATTES, NICKEL OXIDE SINTERS AND OTHER INTERMEDIATE PRODUCTS OF NICKEL METALLURGY::Nickel oxide sinters and other intermediate products of nickel metallurgy(OLD tariff)" +75020000,unwrought nickel +75021000,"UNWROUGHT NICKEL::Nickel, not alloyed" +75022000,unwrought nickel >> nickel alloys +75022010,UNWROUGHT NICKEL:Nickel alloys :Cupro-nickel containing more than 40% +75022020,UNWROUGHT NICKEL:Nickel alloys :Monel metal including `K' monel +75022030,UNWROUGHT NICKEL:Nickel alloys :Nickel alloys containing more than 40% by weight of nickel +75022040,UNWROUGHT NICKEL:Nickel alloys :Nickel alloys containing more than 10 % but not more than 40 % by weight of nickel +75022090,UNWROUGHT NICKEL:Nickel alloys :Other +75030000,nickel waste and scrap nickel waste and scrap +75030010,NICKEL WASTE AND SCRAP:Nickel waste and scrap :Nickel scrap +75030090,NICKEL WASTE AND SCRAP:Nickel waste and scrap :Other +75040000,::NICKEL POWDERS AND FLAKES +75050000,nickel bars rods profiles and wire bars rods and profiles +75051110,"NICKEL BARS, RODS, PROFILES AND WIRE:Of nickel, not alloyed :Hollow bars(OLD tariff)" +75051120,"NICKEL BARS, RODS, PROFILES AND WIRE:Of nickel, not alloyed :Other bars; rods and profiles(OLD tariff)" +75051210,"NICKEL BARS, RODS, PROFILES AND WIRE:Of nickel alloys :Hollow bars(OLD tariff)" +75051220,"NICKEL BARS, RODS, PROFILES AND WIRE:Of nickel alloys :Other bars; rods and profiles(OLD tariff)" +75052100,"NICKEL BARS, RODS, PROFILES AND WIRE::Of nickel, not alloyed" +75052200,"NICKEL BARS, RODS, PROFILES AND WIRE::Of nickel alloys" +75060000,nickel plates sheets strip and foil +75061000,"NICKEL PLATES, SHEETS, STRIP AND FOIL::Of nickel, not alloyed" +75062000,"NICKEL PLATES, SHEETS, STRIP AND FOIL::Of nickel alloys" +75070000,nickel tubes pipes and tube or pipe fittings for example couplings elbows sleeves tubes and pipes +75071100,"NICKEL TUBES, PIPES AND TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES)::Of nickel, not alloyed" +75071200,"NICKEL TUBES, PIPES AND TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES)::Of nickel alloys" +75072000,"NICKEL TUBES, PIPES AND TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES)::Tube or pipe fittings" +75080000,other articles of nickel +75081000,"OTHER ARTICLES OF NICKEL::Cloth, grill and netting, of nickel wire" +75089000,other articles of nickel >> other +75089010,OTHER ARTICLES OF NICKEL:Other :Electroplating anodes of nickel +75089020,OTHER ARTICLES OF NICKEL:Other :Blanks ordinarily used for manufacturing tubes and pipes of nickel +75089030,OTHER ARTICLES OF NICKEL:Other :Nickel screen +75089090,OTHER ARTICLES OF NICKEL:Other :Other articles of nickel and nickel alloy +76010000,unwrought aluminium +76011000,unwrought aluminium >> aluminium not alloyed +76011010,"UNWROUGHT ALUMINIUM:Aluminium, not alloyed :Ingots" +76011020,"UNWROUGHT ALUMINIUM:Aluminium, not alloyed :Billets" +76011030,"UNWROUGHT ALUMINIUM:Aluminium, not alloyed :Wire bars" +76011040,"UNWROUGHT ALUMINIUM:Aluminium, not alloyed :Wire rods" +76011090,"UNWROUGHT ALUMINIUM:Aluminium, not alloyed :Other" +76012000,unwrought aluminium >> aluminium alloys +76012010,UNWROUGHT ALUMINIUM:Aluminium alloys :Ingots +76012020,UNWROUGHT ALUMINIUM:Aluminium alloys :Billets +76012030,UNWROUGHT ALUMINIUM:Aluminium alloys :Wire bars +76012040,UNWROUGHT ALUMINIUM:Aluminium alloys :Wire rods +76012090,UNWROUGHT ALUMINIUM:Aluminium alloys :Other +76020000,aluminium waste and scrap aluminium waste and scrap +76020010,"ALUMINIUM WASTE AND SCRAP:Aluminium waste and scrap :Aluminium scrap, namely the following clean aluminium lithographic sheets covered by ISRI code word `Tablet'; new, clean aluminium lithographic sheets covered by ISRI code word `Tabloid'; mixed low copper aluminium clippings and solids covered by ISRI code word `Taboo'; clean mixed old alloy sheet aluminium covered by ISRI code word `Taint'/`Tabor'; new aluminium can stock covered by ISRI code word `Take'; old can stock covered by ISRI code word `Talap'; shredded aluminium used beverages can (U) scrap covered by ISRI code word `Talcred'; densified aluminium used beverages can (UBC) scrap covered by ISRI code word `Taldack'; baled aluminium used beverage can (UBC) scrap covered by ISRI code word `Taldon'; briquetted aluminium used beverage can (UBC) scrap covered by ISRI code word `Taldork'; painted siding covered by ISRI code word `Tale'; coated scrap covered by ISRI code word `Talent'; aluminium scrap radiators covered by ISRI code word `Talk'; E.C. aluminium nodules covered by ISRI code word `Tall'; new pure aluminium wire and cable covered by ISRI code word `Talon'; new mixed aluminium wire and cable covered by ISRI code word `Tanri'; Old pure aluminium wire and cable covered by ISRI code word `Taste'; old mixed aluminium wire and cable covered by ISRI code word `Tassel'; aluminium pistons covered by ISRI code word `Tarry'; segregated aluminium borings and turnings covered by ISRI code word `Teens'; mixed aluminium castings covered by ISRI code word `Telic'; mixed aluminium castings covered by ISRI code word `Tense'; wrecked airplane sheet aluminium covered by ISRI code word `Tepid';" +76020090,ALUMINIUM WASTE AND SCRAP:Aluminium waste and scrap :Other waste and scrap +76030000,aluminium powders and flakes +76031000,aluminium powders and flakes >> powders of structure +76031010,ALUMINIUM POWDERS AND FLAKES:Powders of non-lamellar structure :Aluminium powder for thermit process +76031090,ALUMINIUM POWDERS AND FLAKES:Powders of non-lamellar structure :Other +76032000,ALUMINIUM POWDERS AND FLAKES::Powders of lamellar structure; flakes +76040000,aluminium bars rods and profiles +76041000,aluminium bars rods and profiles >> of aluminium not alloyed +76041010,"ALUMINIUM BARS, RODS AND PROFILES:Of aluminium, not alloyed :Wire rods" +76041020,"ALUMINIUM BARS, RODS AND PROFILES:Of aluminium, not alloyed :Bars and rods, other than wire rods" +76041031,"ALUMINIUM BARS, RODS AND PROFILES:Of aluminium, not alloyed :Hollow" +76041039,"ALUMINIUM BARS, RODS AND PROFILES:Of aluminium, not alloyed :Other" +76042100,"ALUMINIUM BARS, RODS AND PROFILES::Hollow profiles" +76042900,aluminium bars rods and profiles >> other +76042910,"ALUMINIUM BARS, RODS AND PROFILES:Other :Hard drawn bare aluminium conductors steel re-inforced (A.C.S.R.)" +76042920,"ALUMINIUM BARS, RODS AND PROFILES:Other :Wire rods" +76042930,"ALUMINIUM BARS, RODS AND PROFILES:Other :Bars and rods, other than wire rods" +76042990,"ALUMINIUM BARS, RODS AND PROFILES:Other :Other" +76050000,aluminium wire of aluminium not alloyed +76051100,ALUMINIUM WIRE::Of which the maximum cross-sectional dimension exceeds 7 mm +76051900,aluminium wire of aluminium not alloyed >> other +76051910,ALUMINIUM WIRE:Other :Of which the maximum cross-sectional dimension exceeds 6 mm but does not exceed 7 mm +76051991,ALUMINIUM WIRE:Other :Hard drawn bare-solid +76051999,ALUMINIUM WIRE:Other :Other +76052100,ALUMINIUM WIRE::Of which the maximum cross-sectional dimension exceeds 7 mm +76052900,aluminium wire of aluminium not alloyed >> other +76052910,ALUMINIUM WIRE:Other :Of which the maximum cross-sectional dimension exceeds 6 mm but does not exceed 7 mm +76052990,ALUMINIUM WIRE:Other :Other +76060000,aluminium plates sheets and strip of a thickness exceeding mm rectangular including square +76061100,aluminium plates sheets and strip of a thickness exceeding mm rectangular including square >> of aluminium not alloyed +76061110,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium, not alloyed :Electrolytic plates or sheets" +76061190,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium, not alloyed :Other" +76061200,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm::Of aluminium alloys" +76069100,aluminium plates sheets and strip of a thickness exceeding mm rectangular including square >> of aluminium not alloyed +76069110,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium, not alloyed :Circles" +76069120,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium, not alloyed :Electrolytic plates or sheets" +76069190,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium, not alloyed :Other" +76069200,aluminium plates sheets and strip of a thickness exceeding mm rectangular including square >> of aluminium alloys +76069210,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium alloys :Circles" +76069290,"ALUMINIUM PLATES, SHEETS AND STRIP, OF A THICKNESS EXCEEDING 0.2 mm:Of aluminium alloys :Other" +76071110,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Rolled but not further worked :Ordinarily used for tea chest lining(OLD tariff)" +76071190,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Rolled but not further worked :Other(OLD tariff)" +76071910,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Ordinarily used for tea chest lining(OLD tariff)" +76071991,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Plain(OLD tariff)" +76071992,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Embossed(OLD tariff)" +76071993,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Perforated or cut-to-shape(OLD tariff)" +76071994,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Coated(OLD tariff)" +76071995,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Printed(OLD tariff)" +76071999,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Other :Other(OLD tariff)" +76072010,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Backed :Ordinarily used for tea chest lining(OLD tariff)" +76072090,"ALUMINIUM FOIL (WHETHER OR NOT PRINTED OR BACKED WITH PAPER, PAPERBOARD, PLASTICS OR SIMILAR BACKING MATERIALS) OF A THICKNESS (EXCLUDING ANY BACKING) NOT EXCEEDING 0.2 MM:Backed :Other(OLD tariff)" +76080000,aluminium tubes and pipes +76081000,"ALUMINIUM TUBES AND PIPES::Of aluminium, not alloyed" +76082000,ALUMINIUM TUBES AND PIPES::Of aluminium alloys +76090000,"::ALUMINIUM TUBE OR PIPE FITTINGS (FOR EXAMPLE, COUPLINGS, ELBOWS, SLEEVES)" +76100000,aluminium structures excluding prefabricated buildings of heading 9406 and parts of structures for example bridges and towers lattice masts roofs roofing frameworks doors and windows and their frames and thresholds for doors balustrades pillars and columns aluminium plates rods profiles tubes and the like prepared for use in structures +76101000,"ALUMINIUM STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAMEWORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, BALUSTRADES, PILLARS AND COLUMNS); ALUMINIUM PLATES, RODS, PROFILES, TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES::Doors, windows and their frames and thresholds for doors" +76109000,aluminium structures excluding prefabricated buildings of heading 9406 and parts of structures for example bridges and towers lattice masts roofs roofing frameworks doors and windows and their frames and thresholds for doors balustrades pillars and columns aluminium plates rods profiles tubes and the like prepared for use in structures >> other +76109010,"ALUMINIUM STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAMEWORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, BALUSTRADES, PILLARS AND COLUMNS); ALUMINIUM PLATES, RODS, PROFILES, TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES:Other :Structures" +76109020,"ALUMINIUM STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAMEWORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, BALUSTRADES, PILLARS AND COLUMNS); ALUMINIUM PLATES, RODS, PROFILES, TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES:Other :Parts of structures, not elsewhere specified" +76109030,"ALUMINIUM STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAMEWORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, BALUSTRADES, PILLARS AND COLUMNS); ALUMINIUM PLATES, RODS, PROFILES, TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES:Other :Aluminium plates , rods, profiles, tubes and the like, prepared for use in structure" +76109090,"ALUMINIUM STRUCTURES (EXCLUDING PREFABRICATED BUILDINGS OF HEADING 9406) AND PARTS OF STRUCTURES (FOR EXAMPLE, BRIDGES AND BRIDGE-SECTIONS, TOWERS, LATTICE MASTS, ROOFS, ROOFING FRAMEWORKS, DOORS AND WINDOWS AND THEIR FRAMES AND THRESHOLDS FOR DOORS, BALUSTRADES, PILLARS AND COLUMNS); ALUMINIUM PLATES, RODS, PROFILES, TUBES AND THE LIKE, PREPARED FOR USE IN STRUCTURES:Other :Other" +76110000,"::ALUMINIUM RESERVOIRS, TANKS, VATS AND SIMILAR CONTAINERS, FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY EXCEEDING 300 L, WHETHER OR NOT LINED OR HEAT-INSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT" +76120000,boxes and aluminium casks drums cans similar containers rigid or including collapsible tubular for any containers material other than compressed or liquefied of a capacity not exceeding gas 300 l whether or not lined or but not fitted with mechanical or thermal equipment +76121000,boxes and aluminium casks drums cans similar containers rigid or including collapsible tubular for any containers material other than compressed or liquefied of a capacity not exceeding gas 300 l whether or not lined or but not fitted with mechanical or thermal equipment >> collapsible tubular containers +76121010,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Collapsible tubular containers :Plain(OLD tariff)" +76121020,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Collapsible tubular containers :Lacquered(OLD tariff)" +76121030,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Collapsible tubular containers :Printed(OLD tariff)" +76121090,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Collapsible tubular containers :Other(OLD tariff)" +76129000,boxes and aluminium casks drums cans similar containers rigid or including collapsible tubular for any containers material other than compressed or liquefied of a capacity not exceeding gas 300 l whether or not lined or but not fitted with mechanical or thermal equipment >> other +76129010,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Plain" +76129020,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Lacquered" +76129030,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Printed" +76129090,"ALUMINIUM CASKS, DRUMS, CANS, BOXES AND SIMILAR CONTAINERS (INCLUDING RIGID OR COLLAPSIBLE TUBULAR CONTAINERS), FOR ANY MATERIAL (OTHER THAN COMPRESSED OR LIQUEFIED GAS), OF A CAPACITY NOT EXCEEDING 300 l, WHETHER OR NOT LINED OR HEATINSULATED, BUT NOT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT:Other :Other" +76130000,aluminium containers for compressed or liquefied gas aluminium containers for compressed or liquefied gas low pressure cylinders +76130011,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Plain +76130012,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Lacquered +76130013,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Printed +76130019,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Other +76130021,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Plain +76130022,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Lacquered +76130023,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Printed +76130029,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Other +76130091,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Plain +76130092,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Lacquered +76130093,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Printed +76130099,ALUMINIUM CONTAINERS FOR COMPRESSED OR LIQUEFIED GAS:Aluminium containers for compressed or liquefied gas :Other +76140000,stranded wire cables plaited bands and the like of aluminium not electrically insulated +76141000,"STRANDED WIRE, CABLES, PLAITED BANDS AND THE LIKE, OF ALUMINIUM, NOT ELECTRICALLY INSULATED::With steel core" +76149000,"STRANDED WIRE, CABLES, PLAITED BANDS AND THE LIKE, OF ALUMINIUM, NOT ELECTRICALLY INSULATED::Other" +76150000,table kitchen or other household articles and parts thereof of aluminium pot scourers and scouring or polishing pads gloves and the like of aluminium sanitary ware and parts thereof of aluminium +76151000,table kitchen or other household articles and parts thereof of aluminium pot scourers and scouring or polishing pads gloves and the like of aluminium sanitary ware and parts thereof of aluminium >> table kitchen or other household articles and parts thereof pot scourers and scouring or polishing pads gloves and the like pressure cookers solar collectors +76151011,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Pressure cookers" +76151012,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Solar collectors" +76151021,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Non-Stick(OLD tariff)" +76151029,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Other" +76151030,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Other table, kitchen or household articles" +76151040,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Pot scourers and scouring or polishing pads, gloves and the like" +76151090,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Table, kitchen or other household articles and parts thereof; pot scourers and scouring or polishing pads, gloves and the like:Parts" +76152000,table kitchen or other household articles and parts thereof of aluminium pot scourers and scouring or polishing pads gloves and the like of aluminium sanitary ware and parts thereof of aluminium >> sanitary ware and parts thereof +76152010,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Sanitary ware and parts thereof :Sanitary ware of aluminium and aluminium alloys for indoor use" +76152020,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Sanitary ware and parts thereof :Parts" +76152090,"TABLE, KITCHEN OR OTHER HOUSEHOLD ARTICLES AND PARTS THEREOF, OF ALUMINIUM; POT SCOURERS AND SCOURING OR POLISHING PADS, GLOVES AND THE LIKE, OF ALUMINIUM; SANITARY WARE AND PARTS THEREOF, OF ALUMINIUM:Sanitary ware and parts thereof :Other" +76160000,other articles of aluminium +76161000,"OTHER ARTICLES OF ALUMINIUM::Nails, tacks, staples (other than those of heading 8305), screws, bolts, nuts, screw hooks, rivets, cotters, cotter-pins, washers and similar articles" +76169100,"OTHER ARTICLES OF ALUMINIUM::Cloth, grill, netting and fencing, of aluminium wire" +76169900,other articles of aluminium >> other +76169910,OTHER ARTICLES OF ALUMINIUM:Other :Expanded metal of aluminium and aluminium alloys +76169920,OTHER ARTICLES OF ALUMINIUM:Other :Chains +76169930,OTHER ARTICLES OF ALUMINIUM:Other :Bobbins +76169990,OTHER ARTICLES OF ALUMINIUM:Other :Other +78011000,UNWROUGHT LEAD::Refined lead(OLD tariff) +78019100,UNWROUGHT LEAD::Containing by weight antimony as the principal other element(OLD tariff) +78019910,UNWROUGHT LEAD:Other :Pig lead(OLD tariff) +78019920,UNWROUGHT LEAD:Other :Unrefined lead(OLD tariff) +78019930,UNWROUGHT LEAD:Other :Unrefined lead alloys(OLD tariff) +78019990,UNWROUGHT LEAD:Other :Other(OLD tariff) +78020010,"LEAD WASTE AND SCRAP:Lead waste and scrap:Lead scrap, namely the following scrap lead-soft covered by ISRI code word `Racks'; mixed hard or soft scrap lead covered by ISRI code word `Radio'; wheel weights covered by ISRI code word `Ropes'; mixed common babbit covered by ISRI code word `Roses'(OLD tariff)" +78020090,LEAD WASTE AND SCRAP:Lead waste and scrap:Other(OLD tariff) +78041110,"LEAD PLATES, SHEETS, STRIP AND FOIL; LEAD POWDERS AND FLAKES:Sheets, strip and foil of a thickness (excluding any backing) not exceeding 0.2 mm :Sheets and strip(OLD tariff)" +78041120,"LEAD PLATES, SHEETS, STRIP AND FOIL; LEAD POWDERS AND FLAKES:Sheets, strip and foil of a thickness (excluding any backing) not exceeding 0.2 mm :Foil(OLD tariff)" +78041910,"LEAD PLATES, SHEETS, STRIP AND FOIL; LEAD POWDERS AND FLAKES:Other :Plates(OLD tariff)" +78041990,"LEAD PLATES, SHEETS, STRIP AND FOIL; LEAD POWDERS AND FLAKES:Other :Other(OLD tariff)" +78042000,"LEAD PLATES, SHEETS, STRIP AND FOIL; LEAD POWDERS AND FLAKES::Powders and flakes(OLD tariff)" +78060010,OTHER ARTICLES OF LEAD:Other articles of lead:Sanitary fixtures(OLD tariff) +78060020,OTHER ARTICLES OF LEAD:Other articles of lead:Indian lead seals(OLD tariff) +78060030,OTHER ARTICLES OF LEAD:Other articles of lead:Blanks(OLD tariff) +78060090,OTHER ARTICLES OF LEAD:Other articles of lead:Other(OLD tariff) +79010000,unwrought zinc zinc not alloyed +79011100,UNWROUGHT ZINC::Containing by weight 99.99% or more of zinc +79011200,UNWROUGHT ZINC::Containing by weight less than 99.99% of zinc +79012000,unwrought zinc zinc not alloyed >> zinc alloys +79012010,UNWROUGHT ZINC:Zinc alloys :Mozak or alloys of zinc and aluminium containing not less than 94% by weight of zinc +79012090,UNWROUGHT ZINC:Zinc alloys :Other +79020000,zinc waste and scrap zinc waste and scrap +79020010,"ZINC WASTE AND SCRAP:Zinc waste and scrap :Zinc scrap, namely the following old zinc die cast scrap covered by ISRI code word `Saves';New zinc die cast scrap covered by ISRI code word 'Scabs'; New plated zinc die cast scrap covered by ISRI code word 'Scope'; Zinc die cast automotive grills covered by ISRI code word 'Scoot'; Old scrap zinc covered by ISRI code word 'Score'; New zinc clippings covered by ISRI code word 'Screen'; Crushed clean sorted fragmentizers die cast scrap, as produced from automobile fragmentizers covered by ISRI code word 'Scribe';" +79020090,ZINC WASTE AND SCRAP:Zinc waste and scrap :Other +79030000,zinc dust powders and flakes +79031000,"ZINC DUST, POWDERS AND FLAKES::Zinc dust" +79039000,"ZINC DUST, POWDERS AND FLAKES::Other" +79040000,zinc bars rods profiles and wire zinc bars rods profiles and wire bars and rods +79040011,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Hollow bars" +79040012,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Rods, including wire rods" +79040019,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Other" +79040021,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Hollow" +79040022,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Angles, shapes and sections" +79040029,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Other" +79040030,"ZINC BARS, RODS, PROFILES AND WIRE:Zinc bars, rods, profiles and wire :Wire" +79050000,zinc plates sheets strip and foil zinc plates sheets strip and foil +79050010,"ZINC PLATES, SHEETS, STRIP AND FOIL:Zinc plates, sheets, strip and foil :Calots" +79050020,"ZINC PLATES, SHEETS, STRIP AND FOIL:Zinc plates, sheets, strip and foil :Plates" +79050030,"ZINC PLATES, SHEETS, STRIP AND FOIL:Zinc plates, sheets, strip and foil :Sheets, strip and circles" +79050040,"ZINC PLATES, SHEETS, STRIP AND FOIL:Zinc plates, sheets, strip and foil :Foil" +79070000,other articles of zinc other articles of zinc +79070010,OTHER ARTICLES OF ZINC:Other articles of zinc :Sanitary fixtures +79070090,OTHER ARTICLES OF ZINC:Other articles of zinc :Other +80000000,965 +80010000,965 >> unwrought tin +80011000,965 >> unwrought tin >> tin not alloyed +80011010,"UNWROUGHT TIN:Tin, not alloyed :Blocks" +80011090,"UNWROUGHT TIN:Tin, not alloyed :Ingots, pigs, slabs and other primary forms of tin" +80012000,UNWROUGHT TIN::Tin alloys +80020000,965 >> tin waste and scrap tin waste and scrap +80020010,"TIN WASTE AND SCRAP:Tin waste and scrap :Tin scrap, namely the following block tin covered by ISRI code word `Ranch'; high tin base babbit covered by ISRI code word `Raves'; pewter covered by ISRI code word `Ranks'" +80020090,TIN WASTE AND SCRAP:Tin waste and scrap :Other +80030000,965 >> tin bars rods profiles and wire tin bars rods profiles and wire +80030010,"TIN BARS, RODS, PROFILES AND WIRE:Tin bars, rods, profiles and wire :Hollow bars" +80030020,"TIN BARS, RODS, PROFILES AND WIRE:Tin bars, rods, profiles and wire :Bars, other than hollow bars; rods" +80030030,"TIN BARS, RODS, PROFILES AND WIRE:Tin bars, rods, profiles and wire :Profiles" +80030040,"TIN BARS, RODS, PROFILES AND WIRE:Tin bars, rods, profiles and wire :Wire(OLD tariff)" +80070010,OTHER ARTICLES OF TIN:Other articles of tin :Blanks(OLD tariff) +80070090,OTHER ARTICLES OF TIN:Other articles of tin :Other(OLD tariff) +80790000,965 >> other +81010000,tungsten wolfram and articles thereof including waste and scrap +81011000,"TUNGSTEN (WOLFRAM) AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Powders" +81019400,"TUNGSTEN (WOLFRAM) AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Unwrought tungsten, including bars and rods obtained simply by sintering" +81019600,"TUNGSTEN (WOLFRAM) AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Wire" +81019700,"TUNGSTEN (WOLFRAM) AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap" +81019900,tungsten wolfram and articles thereof including waste and scrap >> other +81019910,"TUNGSTEN (WOLFRAM) AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Tungsten filament" +81019990,"TUNGSTEN (WOLFRAM) AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Other" +81020000,molybdenum and articles thereof including waste and scrap +81021000,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Powders" +81029400,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Unwrought molybdenum, including bars and rods obtained simply by sintering" +81029500,molybdenum and articles thereof including waste and scrap >> bars and rods other than those obtained simply by sintering profiles plates sheets strip and foil +81029510,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Bars and rods, other than those obtained simply by sintering, profiles, plates, sheets, strip and foil :Hollow bars" +81029590,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Bars and rods, other than those obtained simply by sintering, profiles, plates, sheets, strip and foil :Other" +81029600,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Wire" +81029700,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap" +81029900,"MOLYBDENUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Other" +81030000,tantalum and articles thereof including waste and scrap +81032000,tantalum and articles thereof including waste and scrap >> unwrought tantalum including bars and rods obtained simply by sintering powders +81032010,"TANTALUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Unwrought tantalum, including bars and rods obtained simply by sintering; powders :Hollow bars" +81032090,"TANTALUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Unwrought tantalum, including bars and rods obtained simply by sintering; powders :Other" +81033000,"TANTALUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap" +81039000,"TANTALUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Other(OLD tariff)" +81039100,"TANTALUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:crucibles:Crucibles" +81039900,"TANTALUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:other:Other" +81040000,waste and scrap unwrought magnesium +81041100,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Containing at least 99.8% by weight of magnesium" +81041900,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Other" +81042000,waste and scrap unwrought magnesium >> waste and scrap +81042010,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Waste and scrap :Magnesium scrap, namely the following magnesium clips covered by ISRI code word `Wafer'; magnesium scrap covered by ISRI code word `Walnut'; magnesium engraver plates covered by ISRI code word `Wine'; magnesium dock boards covered by ISRI code word `Wood'; magnesium turnings covered by ISRI code word world " +81042090,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Waste and scrap :Other" +81043000,waste and scrap unwrought magnesium >> raspings turnings and granules graded according to size powders +81043010,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Raspings, turnings and granules, graded according to size; powders :Raspings, turnings and granules, graded according to size" +81043020,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Raspings, turnings and granules, graded according to size; powders :Powders" +81049000,waste and scrap unwrought magnesium >> other +81049010,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Other magnesium and magnesium base alloys, wrought" +81049020,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Flakes" +81049030,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Wire" +81049090,"MAGNESIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Other" +81050000,cobalt mattes and other intermediate products of cobalt metallurgy cobalt and articles thereof including waste and scrap +81052000,cobalt mattes and other intermediate products of cobalt metallurgy cobalt and articles thereof including waste and scrap >> cobalt mattes and other intermediate products of cobalt metallurgy unwrought cobalt powders +81052010,"COBALT MATTES AND OTHER INTERMEDIATE PRODUCTS OF COBALT METALLURGY; COBALT AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cobalt mattes and other intermediate products of cobalt metallurgy; unwrought cobalt; powders :Cobalt mattes and other intermediate products of cobalt metallurgy" +81052020,"COBALT MATTES AND OTHER INTERMEDIATE PRODUCTS OF COBALT METALLURGY; COBALT AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cobalt mattes and other intermediate products of cobalt metallurgy; unwrought cobalt; powders :Cobalt unwrought" +81052030,"COBALT MATTES AND OTHER INTERMEDIATE PRODUCTS OF COBALT METALLURGY; COBALT AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cobalt mattes and other intermediate products of cobalt metallurgy; unwrought cobalt; powders :Powders" +81053000,"COBALT MATTES AND OTHER INTERMEDIATE PRODUCTS OF COBALT METALLURGY; COBALT AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap" +81059000,"COBALT MATTES AND OTHER INTERMEDIATE PRODUCTS OF COBALT METALLURGY; COBALT AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Other" +81060000,bismuth and articles thereof including waste and scrap +81060010,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Bismuth and articles thereof, including waste and scrap:Bismuth, unwrought(OLD tariff)" +81060020,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Bismuth and articles thereof, including waste and scrap:Waste and scrap of bismuth and bismuth alloys(OLD tariff)" +81060030,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Bismuth and articles thereof, including waste and scrap:Bismuth, wrought(OLD tariff)" +81060090,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Bismuth and articles thereof, including waste and scrap:Other(OLD tariff)" +81061000,bismuth and articles thereof including waste and scrap >> containing more than of bismuth by weight +81061010,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Containing more than 99.99 % of bismuth, by weight:Bismuth, unwrough" +81061020,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Containing more than 99.99 % of bismuth, by weight: Article of bismuth" +81061090,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Containing more than 99.99 % of bismuth, by weight:Other" +81069000,bismuth and articles thereof including waste and scrap >> other +81069010,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other:Waste and scrap of bismuth and bismuth alloys" +81069090,"BISMUTH AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other:Other" +81072000,"CADMIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Unwrought cadmium; powders(OLD tariff)" +81073000,"CADMIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap(OLD tariff)" +81079010,"CADMIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Cadmium, wrought(OLD tariff)" +81079090,"CADMIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Other(OLD tariff)" +81080000,titanium and articles thereof including waste and scrap +81082000,"TITANIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Unwrought titanium; powders" +81083000,"TITANIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap" +81089000,titanium and articles thereof including waste and scrap >> other +81089010,"TITANIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Titanium, wrought" +81089090,"TITANIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Other :Other" +81090000,zirconium and articles thereof including waste and scrap unwrought zirconium powders +81092000,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Unwrought zirconium; powders(OLD tariff)" +81092100,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Containing less than 1 part hafnium to 500 parts zirconium by weight:Containing less than 1 part hafnium to 500 parts zirconium by weight" +81092900,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:othre:Other" +81093000,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap(OLD tariff)" +81093100,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:zirconiium:Containing less than 1 part hafnium to 500 parts zirconium by weight" +81093900,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:other:Other" +81099000,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Other(OLD tariff)" +81099100,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:zirconium:Containing less than 1 part hafnium to 500 parts zirconium by weight" +81099900,"ZIRCONIUM AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:other:Other" +81100000,antimony and articles thereof including waste and scrap +81101000,"ANTIMONY AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Unwrought antimony; powders" +81102000,"ANTIMONY AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Waste and scrap" +81109000,"ANTIMONY AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP::Other" +81110000,manganese and articles thereof including waste and scrap manganese and articles thereof including waste and scrap +81110010,"MANGANESE AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Manganese and articles thereof, including waste and scrap:Unwrought manganese and manganese base alloys" +81110020,"MANGANESE AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Manganese and articles thereof, including waste and scrap:Waste and scrap of manganese base alloys" +81110030,"MANGANESE AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Manganese and articles thereof, including waste and scrap:Wrought manganese" +81110090,"MANGANESE AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Manganese and articles thereof, including waste and scrap:Other" +81120000,beryllium chromium hafnium rhenium thallium cadmium germanium vanadium gallium indium and niobium columbium and and articles of these metals including waste and scrap beryllium +81121200,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Unwrought; powders" +81121300,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Waste and scrap" +81121900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Other" +81122100,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Unwrought; powders" +81122200,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Waste and scrap" +81122900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Other" +81123100,beryllium chromium hafnium rhenium thallium cadmium germanium vanadium gallium indium and niobium columbium and and articles of these metals including waste and scrap beryllium >> unwrought waste and scrap powders +81123110,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:Unwrought; waste and scrap; powders::Unwrought" +81123120,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:Unwrought; waste and scrap; powders::Waste and scrap" +81123130,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:Unwrought; waste and scrap; powders::Powders" +81123900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:other:Other" +81124100,beryllium chromium hafnium rhenium thallium cadmium germanium vanadium gallium indium and niobium columbium and and articles of these metals including waste and scrap beryllium >> unwrought waste and scrap powders +81124110,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:Unwrought; waste and scrap; powders:Unwrought" +81124120,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:Unwrought; waste and scrap; powders:Waste and scrap" +81124130,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:Unwrought; waste and scrap; powders:Powders" +81124900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:other:Other" +81125100,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Unwrought; powders" +81125200,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Waste and scrap" +81125900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Other" +81126100,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:waste and scrap:Waste and scrap" +81126900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP:other:Other" +81126910,beryllium chromium hafnium rhenium thallium cadmium germanium vanadium gallium indium and niobium columbium and and articles of these metals including waste and scrap beryllium >> other >> cadmium unwrought powders +81126920,beryllium chromium hafnium rhenium thallium cadmium germanium vanadium gallium indium and niobium columbium and and articles of these metals including waste and scrap beryllium >> other >> cadmium wrought +81126990,beryllium chromium hafnium rhenium thallium cadmium germanium vanadium gallium indium and niobium columbium and and articles of these metals including waste and scrap beryllium >> other >> other other +81129200,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Unwrought; waste and scrap; powders" +81129900,"BERYLLIUM, CHROMIUM, GERMANIUM, VANADIUM, GALLIUM, HAFNIUM, INDIUM, NIOBIUM (COLUMBIUM), RHENIUM AND THALLIUM, AND ARTICLES OF THESE METALS, INCLUDING WASTE AND SCRAP::Other" +81130000,cermets and articles thereof including waste and scrap cermets and articles thereof including waste andscrap +81130010,"CERMETS AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cermets and articles thereof, including waste and scrap :Unwrought cermets" +81130020,"CERMETS AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cermets and articles thereof, including waste and scrap :Waste and scrap of cermets" +81130030,"CERMETS AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cermets and articles thereof, including waste and scrap :Articles of cermets" +81130090,"CERMETS AND ARTICLES THEREOF, INCLUDING WASTE AND SCRAP:Cermets and articles thereof, including waste and scrap :Other" +82011000,"HAND TOOLS, THE FOLLOWING SPADES, SHOVELS, MATTOCKS, PICKS, HOES, FORKS AND RAKES; AXES, BILL HOOKS AND SIMILAR HEWING TOOLS; SECATEURS AND PRUNERS OF ANY KIND; SCYTHES, SICKLES, HAY KNIVES, HEDGE SHEARS, TIMBER WEDGES AND OTHER TOOLS OF A KIND USED IN AGRICULTURE, HORTICULTURE OR FORESTRY.::Spades and shovels(OLD tariff)" +82013000,"HAND TOOLS, THE FOLLOWING SPADES, SHOVELS, MATTOCKS, PICKS, HOES, FORKS AND RAKES; AXES, BILL HOOKS AND SIMILAR HEWING TOOLS; SECATEURS AND PRUNERS OF ANY KIND; SCYTHES, SICKLES, HAY KNIVES, HEDGE SHEARS, TIMBER WEDGES AND OTHER TOOLS OF A KIND USED IN AGRICULTURE, HORTICULTURE OR FORESTRY.::Mattocks, picks, hoes and rakes(OLD tariff)" +82014000,"HAND TOOLS, THE FOLLOWING SPADES, SHOVELS, MATTOCKS, PICKS, HOES, FORKS AND RAKES; AXES, BILL HOOKS AND SIMILAR HEWING TOOLS; SECATEURS AND PRUNERS OF ANY KIND; SCYTHES, SICKLES, HAY KNIVES, HEDGE SHEARS, TIMBER WEDGES AND OTHER TOOLS OF A KIND USED IN AGRICULTURE, HORTICULTURE OR FORESTRY.::Axes, bill hooks and similar hewing tools(OLD tariff)" +82015000,"HAND TOOLS, THE FOLLOWING SPADES, SHOVELS, MATTOCKS, PICKS, HOES, FORKS AND RAKES; AXES, BILL HOOKS AND SIMILAR HEWING TOOLS; SECATEURS AND PRUNERS OF ANY KIND; SCYTHES, SICKLES, HAY KNIVES, HEDGE SHEARS, TIMBER WEDGES AND OTHER TOOLS OF A KIND USED IN AGRICULTURE, HORTICULTURE OR FORESTRY.::Secateurs and similar one-handed pruners and shears (including poultry shears)(OLD tariff)" +82016000,"HAND TOOLS, THE FOLLOWING SPADES, SHOVELS, MATTOCKS, PICKS, HOES, FORKS AND RAKES; AXES, BILL HOOKS AND SIMILAR HEWING TOOLS; SECATEURS AND PRUNERS OF ANY KIND; SCYTHES, SICKLES, HAY KNIVES, HEDGE SHEARS, TIMBER WEDGES AND OTHER TOOLS OF A KIND USED IN AGRICULTURE, HORTICULTURE OR FORESTRY.::Hedge shears, two-handed pruning shears and similar two-handed shears(OLD tariff)" +82019000,"HAND TOOLS, THE FOLLOWING SPADES, SHOVELS, MATTOCKS, PICKS, HOES, FORKS AND RAKES; AXES, BILL HOOKS AND SIMILAR HEWING TOOLS; SECATEURS AND PRUNERS OF ANY KIND; SCYTHES, SICKLES, HAY KNIVES, HEDGE SHEARS, TIMBER WEDGES AND OTHER TOOLS OF A KIND USED IN AGRICULTURE, HORTICULTURE OR FORESTRY.::Other hand tools of a kind used in agriculture, horticulture or forestry(OLD tariff)" +82020000,hand saws blades for saws of all kinds including slitting slotting or toothless saw blades +82021000,hand saws blades for saws of all kinds including slitting slotting or toothless saw blades >> hand saws +82021010,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Hand saws :Metal working hand saws" +82021020,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Hand saws :Wood working and similar hand saws" +82021090,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Hand saws :Other" +82022000,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES)::Band saw blades" +82023100,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES)::With working part of steel" +82023900,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES)::Other, including parts" +82024000,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES)::Chain saw blades" +82029100,hand saws blades for saws of all kinds including slitting slotting or toothless saw blades >> straight saw blades for working metal +82029110,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Straight saw blades, for working metal:Machine operated" +82029120,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Straight saw blades, for working metal:Hand operated" +82029900,hand saws blades for saws of all kinds including slitting slotting or toothless saw blades >> other +82029910,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Other:Hacksaw frames" +82029990,"HAND SAWS; BLADES FOR SAWS OF ALL KINDS (INCLUDING SLITTING, SLOTTING OR TOOTHLESS SAW BLADES):Other:Other" +82030000,files rasps pliers including cutting pliers pincers tweezers metal cutting shears pipe cutters bolt croppers perforating punches and similar hand tools +82031000,"FILES, RASPS, PLIERS (INCLUDING CUTTING PLIERS), PINCERS, TWEEZERS, METAL CUTTING SHEARS, PIPE-CUTTERS, BOLT CROPPERS, PERFORATING PUNCHES AND SIMILAR HAND TOOLS::Files, rasps and similar tools" +82032000,"FILES, RASPS, PLIERS (INCLUDING CUTTING PLIERS), PINCERS, TWEEZERS, METAL CUTTING SHEARS, PIPE-CUTTERS, BOLT CROPPERS, PERFORATING PUNCHES AND SIMILAR HAND TOOLS::Pliers (including cutting pliers), pincers, tweezers and similar tools" +82033000,"FILES, RASPS, PLIERS (INCLUDING CUTTING PLIERS), PINCERS, TWEEZERS, METAL CUTTING SHEARS, PIPE-CUTTERS, BOLT CROPPERS, PERFORATING PUNCHES AND SIMILAR HAND TOOLS::Metal cutting shears and similar tools" +82034000,files rasps pliers including cutting pliers pincers tweezers metal cutting shears pipe cutters bolt croppers perforating punches and similar hand tools >> bolt croppers perforating punches and similar tools +82034010,"FILES, RASPS, PLIERS (INCLUDING CUTTING PLIERS), PINCERS, TWEEZERS, METAL CUTTING SHEARS, PIPE-CUTTERS, BOLT CROPPERS, PERFORATING PUNCHES AND SIMILAR HAND TOOLS:Pipe-cutters, bolt croppers, perforating punches and similar tools :Perforating punches and pipe cutters" +82034090,"FILES, RASPS, PLIERS (INCLUDING CUTTING PLIERS), PINCERS, TWEEZERS, METAL CUTTING SHEARS, PIPE-CUTTERS, BOLT CROPPERS, PERFORATING PUNCHES AND SIMILAR HAND TOOLS:Pipe-cutters, bolt croppers, perforating punches and similar tools :Other" +82040000,ha p e rat e d s pa nn e rs a nd w renc he s including torque meter wrenches but not including tap interchangeable wrenches spanner sockets with or without handles spanners and wrenches +82041100,ha p e rat e d s pa nn e rs a nd w renc he s including torque meter wrenches but not including tap interchangeable wrenches spanner sockets with or without handles spanners and wrenches >> +82041110,"HAND-OPERATED SPANNERS AND WRENCHES (INCLUDING TORQUE METER WRENCHES BUT NOT INCLUDING TAP WRENCHES); INTERCHANGEABLE SPANNER SOCKETS, WITH OR WITHOUT HANDLES:Non-adjustable :Spanners" +82041120,"HAND-OPERATED SPANNERS AND WRENCHES (INCLUDING TORQUE METER WRENCHES BUT NOT INCLUDING TAP WRENCHES); INTERCHANGEABLE SPANNER SOCKETS, WITH OR WITHOUT HANDLES:Non-adjustable :Wrenches" +82041200,ha p e rat e d s pa nn e rs a nd w renc he s including torque meter wrenches but not including tap interchangeable wrenches spanner sockets with or without handles spanners and wrenches >> adjustable +82041210,"HAND-OPERATED SPANNERS AND WRENCHES (INCLUDING TORQUE METER WRENCHES BUT NOT INCLUDING TAP WRENCHES); INTERCHANGEABLE SPANNER SOCKETS, WITH OR WITHOUT HANDLES:Adjustable :Spanners" +82041220,"HAND-OPERATED SPANNERS AND WRENCHES (INCLUDING TORQUE METER WRENCHES BUT NOT INCLUDING TAP WRENCHES); INTERCHANGEABLE SPANNER SOCKETS, WITH OR WITHOUT HANDLES:Adjustable :Wrenches" +82042000,"HAND-OPERATED SPANNERS AND WRENCHES (INCLUDING TORQUE METER WRENCHES BUT NOT INCLUDING TAP WRENCHES); INTERCHANGEABLE SPANNER SOCKETS, WITH OR WITHOUT HANDLES::Interchangeable spanner sockets, with or without handles" +82050000,hand tools including glaziers diamonds not elsewhere specified or included blow lamps vices clamps and the like other than accessories for and parts of o r wat et cu tting m a chin e s a nvi ls portable forges grinding wheels with frameworks +82051000,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS::Drilling, threading or tapping tools" +82052000,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS::Hammers and sledge hammers" +82053000,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS::Planes, chisels, gouges and similar cutting tools for working wood" +82054000,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS::Screwdrivers(OLD tariff)" +82055100,hand tools including glaziers diamonds not elsewhere specified or included blow lamps vices clamps and the like other than accessories for and parts of o r wat et cu tting m a chin e s a nvi ls portable forges grinding wheels with frameworks >> household tools +82055110,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Household tools :Can or cork openers" +82055190,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Household tools :Other" +82055900,hand tools including glaziers diamonds not elsewhere specified or included blow lamps vices clamps and the like other than accessories for and parts of o r wat et cu tting m a chin e s a nvi ls portable forges grinding wheels with frameworks >> other +82055910,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other:Grease guns (excluding compressed air type)" +82055920,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other:Metal working hand tools" +82055930,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other:Hand tools for specified uses, such as watch making tools, goldsmith tools" +82055940,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other:Forks other than those of headings 8201 and 8215(OLD tariff)" +82055990,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other:Other" +82056000,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS::Blow lamps" +82057000,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS::Vices, clamps and the like" +82059000,hand tools including glaziers diamonds not elsewhere specified or included blow lamps vices clamps and the like other than accessories for and parts of o r wat et cu tting m a chin e s a nvi ls portable forges grinding wheels with frameworks >> other including sets of articles of two or more of this heading +82059010,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other, including sets of articles of two or more subheadins of this heading:ANVILS AND PORTABLE FORGES(OLD tariff)" +82059020,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other, including sets of articles of two or more subheadins of this heading:GRINDING WHEELS WITH FRAME, HAND OR PEDAL OPERATED" +82059030,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other, including sets of articles of two or more subheadins of this heading:SETS OF ARTICLES OF TWO OR MORE OF THE FOREGOING SUB HEADING" +82059090,"HAND TOOLS (INCLUDING GLAZIER'S DIAMONDS), NOT ELSEWHERE SPECIFIED OR INCLUDED; BLOW LAMPS; VICES; CLAMPS AND THE LIKE, OTHER THAN ACCESSORIES FOR AND PARTS OF, MACHINE TOOLS; ANVILS; PORTABLE FORGES; HAND-OR PEDAL-OPERATED GRINDING WHEELS WITH FRAMEWORKS:Other, including sets of articles of two or more subheadins of this heading:OTHER(OLD tariff)" +82060000,tools of two or more of the headings 8202 to 8205 put up in sets for retail sale tools of two or more of the headings 8202 to 8205 put up in sets for retail sale including dies f or drawing or driving extruding metal and rock drilling or earth boring tools rock drilling or earth boring tools +82060010,"TOOLS OF TWO OR MORE OF THE HEADINGS 8202 TO 8205, PUT UP IN SETS FOR RETAIL SALE:Tools of two or more of the headings 8202 to 8205, put up in sets for retail sale :Garage tools in sets(OLD tariff)" +82060090,"TOOLS OF TWO OR MORE OF THE HEADINGS 8202 TO 8205, PUT UP IN SETS FOR RETAIL SALE:Tools of two or more of the headings 8202 to 8205, put up in sets for retail sale :Other(OLD tariff)" +82071300,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS::With working part of cermets(OLD tariff)" +82071900,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS::Other, including parts(OLD tariff)" +82072000,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS::Dies for drawing or extruding metal(OLD tariff)" +82073000,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS::Tools for pressing, stamping or punching(OLD tariff)" +82074010,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Tools for tapping or threading :Chasers(OLD tariff)" +82074090,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Tools for tapping or threading :Other(OLD tariff)" +82075000,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS::Tools for drilling, other than for rock drilling(OLD tariff)" +82076010,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Tools for boring or broaching:Reamers(OLD tariff)" +82076090,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Tools for boring or broaching:Other(OLD tariff)" +82077010,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Tools for milling :Cutters(OLD tariff)" +82077090,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Tools for milling :Other(OLD tariff)" +82078000,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS::Tools for turning(OLD tariff)" +82079010,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Other interchangeable tools:For metal working hand tools(OLD tariff)" +82079020,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Other interchangeable tools:For wood working hand tools(OLD tariff)" +82079030,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Other interchangeable tools:Lathe tools and tool belts(OLD tariff)" +82079090,"INTERCHANGEABLE TOOLS FOR HAND TOOLS, WHETHER OR NOT POWER-OPERATED, OR FOR MACHINE-TOOLS (FOR EXAMPLE, FOR PRESSING, STAMPING, PUNCHING, TAPPING, THREADING, DRILLING, BORING, BROACHING, MILLING, TURNING OR SCREW DRIVING), INCLUDING DIES FOR DRAWING OR EXTRUDING METAL, AND ROCK DRILLING OR EARTH BORING TOOLS:Other interchangeable tools:Other(OLD tariff)" +82080000,knives and cutting blades for machines or for mechanical appliances +82081000,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES::For metal working" +82082000,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES::For wood working" +82083000,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES::For kitchen appliances or for machines used by the food industry" +82084000,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES::For agricultural, horticultural or forestry machines" +82089000,knives and cutting blades for machines or for mechanical appliances >> other +82089010,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES:Other :Knives and cutting blades for paper cutting machines" +82089020,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES:Other :Bell skiving knives" +82089030,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES:Other :Band knives for splitting machine" +82089040,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES:Other :Cutting and clicking dies" +82089090,"KNIVES AND CUTTING BLADES, FOR MACHINES OR FOR MECHANICAL APPLIANCES:Other :Other" +82090000,plates sticks tips and the like for tools +82090010,"PLATES, STICKS, TIPS AND THE LIKE FOR TOOLS, UNMOUNTED, OF CERMETS:Plates, sticks, tips and the like for tools, unmounted, of cermets:Tungsten carbide tips(OLD tariff)" +82090090,"PLATES, STICKS, TIPS AND THE LIKE FOR TOOLS, UNMOUNTED, OF CERMETS:Plates, sticks, tips and the like for tools, unmounted, of cermets:Other" +82100000,"::HAND-OPERATED MECHANICAL APPLIANCES, WEIGHING 10 KG. OR LESS, USED IN THE PREPARATION, CONDITIONING OR SERVING OF FOOD OR DRINK" +82110000,knives with cutting blades serrated or not including pruning knives other than knives +82111000,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR::Sets of assorted articles(OLD tariff)" +82119100,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR::Table knives having fixed blades" +82119200,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR::Other knives having fixed blades" +82119300,knives with cutting blades serrated or not including pruning knives other than knives >> knives having other than fixed blades +82119310,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR:Knives having other than fixed blades:Pocket knives" +82119390,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR:Knives having other than fixed blades:Other" +82119400,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR::Blades" +82119500,"KNIVES WITH CUTTING BLADES, SERRATED OR NOT (INCLUDING PRUNING KNIVES), OTHER THAN KNIVES OF HEADING 8208, AND BLADES THEREFOR::Handles of base metal" +82120000,razors and razor blades including razorblade blanks in strips +82121000,razors and razor blades including razorblade blanks in strips >> razors +82121010,RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS):Razors :Twin type shaving +82121090,RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS):Razors :Other +82122000,razors and razor blades including razorblade blanks in strips >> safety razor blades including razor blade blanks in strips safety razor blades +82122011,"RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS):Safety razor blades, including razor blade blanks in strips:Disposable catridge blades" +82122019,"RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS):Safety razor blades, including razor blade blanks in strips:Other" +82122020,"RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS):Safety razor blades, including razor blade blanks in strips:Safety razor blade blanks, in strips" +82129000,RAZORS AND RAZOR BLADES (INCLUDING RAZOR BLADE BLANKS IN STRIPS)::Other parts +82130000,"::SCISSORS, TAILOR'S SHEARS AND SIMILAR SHEARS, AND BLADES THEREFOR" +82141010,"OTHER ARTICLES OF CUTLERY (FOR EXAMPLE, HAIR CLIPPERS, BUTCHER'S OR KITCHEN CLEAVERS, CHOPPERS AND MINCING KNIVES, PAPER KNIVES); MANICURE OR PEDICURE SETS AND INSTRUMENTS (INCLUDING NAIL FILES):Paper knives, letter openers, erasing knives, pencil sharpeners and blades therefor:Paper knives, letter openers, erasing knives, pencil sharpeners(OLD tariff)" +82141090,"OTHER ARTICLES OF CUTLERY (FOR EXAMPLE, HAIR CLIPPERS, BUTCHER'S OR KITCHEN CLEAVERS, CHOPPERS AND MINCING KNIVES, PAPER KNIVES); MANICURE OR PEDICURE SETS AND INSTRUMENTS (INCLUDING NAIL FILES):Paper knives, letter openers, erasing knives, pencil sharpeners and blades therefor:Blades(OLD tariff)" +82142010,"OTHER ARTICLES OF CUTLERY (FOR EXAMPLE, HAIR CLIPPERS, BUTCHER'S OR KITCHEN CLEAVERS, CHOPPERS AND MINCING KNIVES, PAPER KNIVES); MANICURE OR PEDICURE SETS AND INSTRUMENTS (INCLUDING NAIL FILES):Manicure or pedicure sets and instruments (including nail files):Nail cutters(OLD tariff)" +82142090,"OTHER ARTICLES OF CUTLERY (FOR EXAMPLE, HAIR CLIPPERS, BUTCHER'S OR KITCHEN CLEAVERS, CHOPPERS AND MINCING KNIVES, PAPER KNIVES); MANICURE OR PEDICURE SETS AND INSTRUMENTS (INCLUDING NAIL FILES):Manicure or pedicure sets and instruments (including nail files):Other(OLD tariff)" +82149010,"OTHER ARTICLES OF CUTLERY (FOR EXAMPLE, HAIR CLIPPERS, BUTCHER'S OR KITCHEN CLEAVERS, CHOPPERS AND MINCING KNIVES, PAPER KNIVES); MANICURE OR PEDICURE SETS AND INSTRUMENTS (INCLUDING NAIL FILES):Other :Handles of cutlery of base metal(OLD tariff)" +82149090,"OTHER ARTICLES OF CUTLERY (FOR EXAMPLE, HAIR CLIPPERS, BUTCHER'S OR KITCHEN CLEAVERS, CHOPPERS AND MINCING KNIVES, PAPER KNIVES); MANICURE OR PEDICURE SETS AND INSTRUMENTS (INCLUDING NAIL FILES):Other :Other(OLD tariff)" +82150000,spoons forks ladles skimmers sugar tongs and similar kitchen or tableware +82151000,"SPOONS, FORKS, LADLES, SKIMMERS, CAKE-SERVERS, FISHKNIVES, BUTTER-KNIVES, SUGAR TONGS AND SIMILAR KITCHEN OR TABLEWARE::Sets of assorted articles containing at least one article plated with precious metal" +82152000,"SPOONS, FORKS, LADLES, SKIMMERS, CAKE-SERVERS, FISHKNIVES, BUTTER-KNIVES, SUGAR TONGS AND SIMILAR KITCHEN OR TABLEWARE::Other sets of assorted articles" +82159100,"SPOONS, FORKS, LADLES, SKIMMERS, CAKE-SERVERS, FISHKNIVES, BUTTER-KNIVES, SUGAR TONGS AND SIMILAR KITCHEN OR TABLEWARE::Plated with precious metal" +82159900,"SPOONS, FORKS, LADLES, SKIMMERS, CAKE-SERVERS, FISHKNIVES, BUTTER-KNIVES, SUGAR TONGS AND SIMILAR KITCHEN OR TABLEWARE::Other" +83011000,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL::Padlocks(OLD tariff)" +83012000,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL::Locks of a kind used for motor vehicles(OLD tariff)" +83013000,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL::Locks of a kind used for furniture(OLD tariff)" +83014010,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL:Other locks :Combination locks(OLD tariff)" +83014090,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL:Other locks :Other(OLD tariff)" +83015000,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL::Clasps and frames with clasps, incorporating locks(OLD tariff)" +83016000,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL::Parts(OLD tariff)" +83017000,"PADLOCKS AND LOCKS (KEY, COMBINATION OR ELECTRICALLY OPERATED), OF BASE METAL; CLASPS AND FRAMES WITH CLASPS, INCORPORATING LOCKS, OF BASE METAL; KEYS FOR ANY OF THE FOREGOING ARTICLES, OF BASE METAL::Keys presented separately(OLD tariff)" +83021010,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Hinges:Of steel(OLD tariff)" +83021020,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Hinges:Of brass(OLD tariff)" +83021090,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Hinges:Other(OLD tariff)" +83022000,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL::Castors(OLD tariff)" +83023010,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Other mountings, fittings and similar articles suitable for motor vehicles:Curve drive stakes(OLD tariff)" +83023090,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Other mountings, fittings and similar articles suitable for motor vehicles:Other(OLD tariff)" +83024110,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Suitable for buildings:Fittings for doors and windows(OLD tariff)" +83024120,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Suitable for buildings:Tower bolts(OLD tariff)" +83024190,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL:Suitable for buildings:Other(OLD tariff)" +83024200,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL::Other, suitable for furniture(OLD tariff)" +83024900,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL::Other(OLD tariff)" +83025000,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL::Hat-racks, hat-pegs, brackets and similar fixtures(OLD tariff)" +83026000,"BASE METAL MOUNTINGS, FITTINGS AND SIMILAR ARTICLES SUITABLE FOR FURNITURE, DOORS, STAIRCASES, WINDOWS, BLINDS, COACHWORK, SADDLERY, TRUNKS, CHESTS, CASKETS OR THE LIKE; BASE METAL HAT-RACKS, HAT-PEGS, BRACKETS AND SIMILAR FIXTURES; CASTORS WITH MOUNTINGS OF BASE METAL; AUTOMATIC DOOR CLOSERS OF BASE METAL::Automatic door closers(OLD tariff)" +83030000,"::ARMOURED OR REINFORCED SAFES, STRONG-BOXES AND DOORS AND SAFE DEPOSIT LOCKERS FOR STRONG-ROOMS, CASH OR DEED BOXES AND THE LIKE, OF BASE METAL" +83040000,"::FILING, CABINETS, CARD-INDEX CABINETS, PAPER TRAYS, PAPER RESTS, PEN TRAYS, OFFICE-STAMP STANDS AND SIMILAR OFFICE OR DESK EQUIPMENT, OF BASE METAL, OTHER THAN OFFICE FURNITURE OF HEADING 9403" +83050000,fittings for binders or files letter clips letter corners paper clips indexing tags and similar office articles of base metal staples in strips for example for offices upholstery packaging of base +83051000,"FITTINGS FOR LOOSE-LEAF BINDERS OR FILES, LETTER CLIPS, LETTER CORNERS, PAPER CLIPS, INDEXING TAGS AND SIMILAR OFFICE ARTICLES, OF BASE METAL; STAPLES IN STRIPS (FOR EXAMPLE, FOR OFFICES, UPHOLSTERY, PACKAGING), OF BASE METAL::Fittings for loose-leaf binders or files(OLD tariff)" +83052000,"FITTINGS FOR LOOSE-LEAF BINDERS OR FILES, LETTER CLIPS, LETTER CORNERS, PAPER CLIPS, INDEXING TAGS AND SIMILAR OFFICE ARTICLES, OF BASE METAL; STAPLES IN STRIPS (FOR EXAMPLE, FOR OFFICES, UPHOLSTERY, PACKAGING), OF BASE METAL::Staples in strips(OLD tariff)" +83059010,"FITTINGS FOR LOOSE-LEAF BINDERS OR FILES, LETTER CLIPS, LETTER CORNERS, PAPER CLIPS, INDEXING TAGS AND SIMILAR OFFICE ARTICLES, OF BASE METAL; STAPLES IN STRIPS (FOR EXAMPLE, FOR OFFICES, UPHOLSTERY, PACKAGING), OF BASE METAL:Other, including parts:Pins (other than those of heading 7317)(OLD tariff)" +83059020,"FITTINGS FOR LOOSE-LEAF BINDERS OR FILES, LETTER CLIPS, LETTER CORNERS, PAPER CLIPS, INDEXING TAGS AND SIMILAR OFFICE ARTICLES, OF BASE METAL; STAPLES IN STRIPS (FOR EXAMPLE, FOR OFFICES, UPHOLSTERY, PACKAGING), OF BASE METAL:Other, including parts:Clips(OLD tariff)" +83059090,"FITTINGS FOR LOOSE-LEAF BINDERS OR FILES, LETTER CLIPS, LETTER CORNERS, PAPER CLIPS, INDEXING TAGS AND SIMILAR OFFICE ARTICLES, OF BASE METAL; STAPLES IN STRIPS (FOR EXAMPLE, FOR OFFICES, UPHOLSTERY, PACKAGING), OF BASE METAL:Other, including parts:Other(OLD tariff)" +83061000,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL::Bells, gongs and the like(OLD tariff)" +83062110,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL:Plated with precious metal:Statuettes(OLD tariff)" +83062120,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL:Plated with precious metal:Trophies(OLD tariff)" +83062190,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL:Plated with precious metal:Other(OLD tariff)" +83062910,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL:Other:Statuettes(OLD tariff)" +83062920,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL:Other:Trophies(OLD tariff)" +83062990,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL:Other:Other(OLD tariff)" +83063000,"BELLS, GONGS AND THE LIKE, NON-ELECTRIC, OF BASE METAL; STATUETTES AND OTHER ORNAMENTS, OF BASE METAL; PHOTOGRAPH, PICTURE OR SIMILAR FRAMES, OF BASE METAL; MIRRORS OF BASE METAL::Photograph, picture or similar frames; mirrors(OLD tariff)" +83070000,flexible tubing of base metal with or without fittings +83071000,"FLEXIBLE TUBING OF BASE METAL, WITH OR WITHOUT FITTINGS::Of iron or steel" +83079000,"FLEXIBLE TUBING OF BASE METAL, WITH OR WITHOUT FITTINGS::Of other base metal" +83080000,clasps frames with clasps buckles buckle clasps hooks eyes eyelets and the like of +83081010,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Hooks, eyes and eyelets:Hooks and eyes(OLD tariff)" +83081021,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Hooks, eyes and eyelets:For footwear(OLD tariff)" +83081029,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Hooks, eyes and eyelets:Other(OLD tariff)" +83082000,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS::Tubular or bifurcated rivets(OLD tariff)" +83089011,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:For footwear(OLD tariff)" +83089019,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Other(OLD tariff)" +83089020,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Imitation zari spangles(OLD tariff)" +83089031,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:For garments, made ups, knitwear, plastic and leather goods(OLD tariff)" +83089039,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Other(OLD tariff)" +83089040,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Fittings for travel requisites and leather goods(OLD tariff)" +83089091,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:For garments, made ups, knitwear, plastic and leather goods(OLD tariff)" +83089099,"CLASPS, FRAMES WITH CLASPS, BUCKLES, BUCKLE-CLASPS, HOOKS, EYES, EYELETS AND THE LIKE, OF BASE METAL, OF A KIND USED FOR CLOTHING, FOOTWEAR, AWNINGS, HANDBAGS, TRAVEL GOODS OR OTHER MADE UP ARTICLES; TUBULAR OR BIFURCATED RIVETS, OF BASE METAL; BEADS AND SPANGLES, OF BASE METALS:Other, including parts:Other(OLD tariff)" +83090000,stoppers caps and lids including crown corks screw caps and pouring stoppers capsules for bottles threaded bungs bung covers seals and other packing accessories of base metal +83091000,"STOPPERS, CAPS AND LIDS (INCLUDING CROWN CORKS, SCREW CAPS AND POURING STOPPERS), CAPSULES FOR BOTTLES, THREADED BUNGS, BUNG COVERS, SEALS AND OTHER PACKING ACCESSORIES, OF BASE METAL::Crown corks" +83099000,stoppers caps and lids including crown corks screw caps and pouring stoppers capsules for bottles threaded bungs bung covers seals and other packing accessories of base metal >> other +83099010,"STOPPERS, CAPS AND LIDS (INCLUDING CROWN CORKS, SCREW CAPS AND POURING STOPPERS), CAPSULES FOR BOTTLES, THREADED BUNGS, BUNG COVERS, SEALS AND OTHER PACKING ACCESSORIES, OF BASE METAL:Other:Pilfer proof caps for packaging, all sorts, with or without washers or other fittings, of cork, rubber, polyethylene or any other material" +83099020,"STOPPERS, CAPS AND LIDS (INCLUDING CROWN CORKS, SCREW CAPS AND POURING STOPPERS), CAPSULES FOR BOTTLES, THREADED BUNGS, BUNG COVERS, SEALS AND OTHER PACKING ACCESSORIES, OF BASE METAL:Other:Aluminium caps, seals, capsules and closers" +83099030,"STOPPERS, CAPS AND LIDS (INCLUDING CROWN CORKS, SCREW CAPS AND POURING STOPPERS), CAPSULES FOR BOTTLES, THREADED BUNGS, BUNG COVERS, SEALS AND OTHER PACKING ACCESSORIES, OF BASE METAL:Other:Other seals" +83099090,"STOPPERS, CAPS AND LIDS (INCLUDING CROWN CORKS, SCREW CAPS AND POURING STOPPERS), CAPSULES FOR BOTTLES, THREADED BUNGS, BUNG COVERS, SEALS AND OTHER PACKING ACCESSORIES, OF BASE METAL:Other:Other" +83100000,and similar plates numbers letters and other symbols of base metal excluding those of heading 9405 and similar plates numbers letters and other symbols of base metal excluding those of heading 9405 +83100010,"SIGN-PLATES, NAME-PLATES, ADDRESS-PLATES AND SIMILAR PLATES, NUMBERS, LETTERS AND OTHER SYMBOLS, OF BASE METAL, EXCLUDING THOSE OF HEADING 9405:Sign-plates, name-plates, address-plates and similar plates, numbers, letters and other symbols, of base metal, excluding those of heading 9405:Enamel iron signboard" +83100090,"SIGN-PLATES, NAME-PLATES, ADDRESS-PLATES AND SIMILAR PLATES, NUMBERS, LETTERS AND OTHER SYMBOLS, OF BASE METAL, EXCLUDING THOSE OF HEADING 9405:Sign-plates, name-plates, address-plates and similar plates, numbers, letters and other symbols, of base metal, excluding those of heading 9405:Other" +83110000,electrodes wire rods tubes plates and similar products of base metal or of metal coated or cored with carbides flux of a kind used for material soldering brazing welding or deposition of metal or of metal wire and carbides rods of agglomerated base metal powder used for metal spraying +83111000,"WIRE, RODS, TUBES, PLATES, ELECTRODES AND SIMILAR PRODUCTS, OF BASE METAL OR OF METAL CARBIDES, COATED OR CORED WITH FLUX MATERIAL, OF A KIND USED FOR SOLDERING, BRAZING, WELDING OR DEPOSITION OF METAL OR OF METAL CARBIDES; WIRE AND RODS, OF AGGLOMERATED BASE METAL POWDER, USED FOR METAL SPRAYING::Coated electrodes of base metal, for electric arc-welding" +83112000,"WIRE, RODS, TUBES, PLATES, ELECTRODES AND SIMILAR PRODUCTS, OF BASE METAL OR OF METAL CARBIDES, COATED OR CORED WITH FLUX MATERIAL, OF A KIND USED FOR SOLDERING, BRAZING, WELDING OR DEPOSITION OF METAL OR OF METAL CARBIDES; WIRE AND RODS, OF AGGLOMERATED BASE METAL POWDER, USED FOR METAL SPRAYING::Cored wire of base metal, for electric arc- welding" +83113000,electrodes wire rods tubes plates and similar products of base metal or of metal coated or cored with carbides flux of a kind used for material soldering brazing welding or deposition of metal or of metal wire and carbides rods of agglomerated base metal powder used for metal spraying >> coated rods and cored wire of base metal for soldering brazing or welding by flame +83113010,"WIRE, RODS, TUBES, PLATES, ELECTRODES AND SIMILAR PRODUCTS, OF BASE METAL OR OF METAL CARBIDES, COATED OR CORED WITH FLUX MATERIAL, OF A KIND USED FOR SOLDERING, BRAZING, WELDING OR DEPOSITION OF METAL OR OF METAL CARBIDES; WIRE AND RODS, OF AGGLOMERATED BASE METAL POWDER, USED FOR METAL SPRAYING:Coated rods and cored wire, of base metal, for soldering, brazing or welding by flame:Wire and rods of agglomerated base metal" +83113090,"WIRE, RODS, TUBES, PLATES, ELECTRODES AND SIMILAR PRODUCTS, OF BASE METAL OR OF METAL CARBIDES, COATED OR CORED WITH FLUX MATERIAL, OF A KIND USED FOR SOLDERING, BRAZING, WELDING OR DEPOSITION OF METAL OR OF METAL CARBIDES; WIRE AND RODS, OF AGGLOMERATED BASE METAL POWDER, USED FOR METAL SPRAYING:Coated rods and cored wire, of base metal, for soldering, brazing or welding by flame:Other" +83119000,"WIRE, RODS, TUBES, PLATES, ELECTRODES AND SIMILAR PRODUCTS, OF BASE METAL OR OF METAL CARBIDES, COATED OR CORED WITH FLUX MATERIAL, OF A KIND USED FOR SOLDERING, BRAZING, WELDING OR DEPOSITION OF METAL OR OF METAL CARBIDES; WIRE AND RODS, OF AGGLOMERATED BASE METAL POWDER, USED FOR METAL SPRAYING::Other(OLD tariff)" +84010000,for nuclear reactors machinery and apparatus for isotopic separation +84011000,"NUCLEAR REACTORS; FUEL ELEMENTS (CARTRIDGES), NON-IRRADIATED, FOR NUCLEAR REACTORS; MACHINERY AND APPARATUS FOR ISOTOPIC SEPARATION::Nuclear reactors" +84012000,"NUCLEAR REACTORS; FUEL ELEMENTS (CARTRIDGES), NON-IRRADIATED, FOR NUCLEAR REACTORS; MACHINERY AND APPARATUS FOR ISOTOPIC SEPARATION::Machinery and apparatus for isotopic separation, and parts thereof" +84013000,"NUCLEAR REACTORS; FUEL ELEMENTS (CARTRIDGES), NON-IRRADIATED, FOR NUCLEAR REACTORS; MACHINERY AND APPARATUS FOR ISOTOPIC SEPARATION::Fuel elements (cartridges), non-irradiated" +84014000,"NUCLEAR REACTORS; FUEL ELEMENTS (CARTRIDGES), NON-IRRADIATED, FOR NUCLEAR REACTORS; MACHINERY AND APPARATUS FOR ISOTOPIC SEPARATION::Parts of nuclear reactors" +84020000,steam or other vapour generating boilers other than central heating hot water b oi le rs c apable al s o of p ro duc in g lo w pressure steam water boilers steam or other vapour generating boilers +84021100,STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS::Watertube boilers with a steam production exceeding +84021200,STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS::Water tube boilers with a steam production not exceeding 45 t per hour +84021900,steam or other vapour generating boilers other than central heating hot water b oi le rs c apable al s o of p ro duc in g lo w pressure steam water boilers steam or other vapour generating boilers >> other vapour generating boilers including +84021910,"STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS:Other vapour generating boilers, including hybrid boilers:Fire tube horizontal (lancashire) boilers" +84021920,"STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS:Other vapour generating boilers, including hybrid boilers:Fire tube boilers vertical" +84021990,"STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS:Other vapour generating boilers, including hybrid boilers:Other" +84022000,STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS::Super-heated water boilers +84029000,steam or other vapour generating boilers other than central heating hot water b oi le rs c apable al s o of p ro duc in g lo w pressure steam water boilers steam or other vapour generating boilers >> parts +84029010,STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS:Parts:Parts of fire tube boilers +84029020,STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS:Parts:Parts of watertube boilers +84029090,STEAM OR OTHER VAPOUR GENERATING BOILERS (OTHER THAN CENTRAL HEATING HOT WATER BOILERS CAPABLE ALSO OF PRODUCING LOW PRESSURE STEAM); SUPERHEATED WATER BOILERS:Parts:Other +84030000,central heating boilers other than thoseof heading 8402 +84031000,CENTRAL HEATING BOILERS OTHER THAN THOSEOF HEADING 8402::Boilers +84039000,CENTRAL HEATING BOILERS OTHER THAN THOSEOF HEADING 8402::Parts +84040000,auxiliary plant for use with boilers of heading or 8402 8403 for example economisers soot removers gas recoverers condensers for steam or other vapour power units +84041000,"AUXILIARY PLANT FOR USE WITH BOILERS OF HEADING 8402 OR 8403 (FOR EXAMPLE, ECONOMISERS, SUPERHEATERS, SOOT REMOVERS, GAS RECOVERERS); CONDENSERS FOR STEAM OR OTHER VAPOUR POWER UNITS::Auxiliary plants for use with boilers of heading 8402 or 8403" +84042000,"AUXILIARY PLANT FOR USE WITH BOILERS OF HEADING 8402 OR 8403 (FOR EXAMPLE, ECONOMISERS, SUPERHEATERS, SOOT REMOVERS, GAS RECOVERERS); CONDENSERS FOR STEAM OR OTHER VAPOUR POWER UNITS::Condensers for steam or other vapour power units" +84049000,"AUXILIARY PLANT FOR USE WITH BOILERS OF HEADING 8402 OR 8403 (FOR EXAMPLE, ECONOMISERS, SUPERHEATERS, SOOT REMOVERS, GAS RECOVERERS); CONDENSERS FOR STEAM OR OTHER VAPOUR POWER UNITS::Parts" +84050000,producer gas or water gas generators with or without their acetylene gas purifiers generators and similar water process gas generators with or without their purifiers +84051000,producer gas or water gas generators with or without their acetylene gas purifiers generators and similar water process gas generators with or without their purifiers >> producer gas or water gas generators with or without their purifiers acetylene gas generators and similar water process gas generators with or without their purifiers +84051010,"PRODUCER GAS OR WATER GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS; ACETYLENE GAS GENERATORS AND SIMILAR WATER PROCESS GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS:Producer gas or water gas generators, with or without their purifiers; acetylene gas generators and similar water process gas generators, with or without their purifiers :Producer gas or water gas generators" +84051020,"PRODUCER GAS OR WATER GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS; ACETYLENE GAS GENERATORS AND SIMILAR WATER PROCESS GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS:Producer gas or water gas generators, with or without their purifiers; acetylene gas generators and similar water process gas generators, with or without their purifiers :Acetylene gas generators" +84051090,"PRODUCER GAS OR WATER GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS; ACETYLENE GAS GENERATORS AND SIMILAR WATER PROCESS GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS:Producer gas or water gas generators, with or without their purifiers; acetylene gas generators and similar water process gas generators, with or without their purifiers :Other" +84059000,"PRODUCER GAS OR WATER GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS; ACETYLENE GAS GENERATORS AND SIMILAR WATER PROCESS GAS GENERATORS, WITH OR WITHOUT THEIR PURIFIERS:parts:PARTS" +84060000,steam turbines and other vapour turbines +84061000,STEAM TURBINES AND OTHER VAPOUR TURBINES::Turbines for marine propulsion +84068100,STEAM TURBINES AND OTHER VAPOUR TURBINES::Of an output exceeding 40 MW +84068200,STEAM TURBINES AND OTHER VAPOUR TURBINES::Of an output not exceeding 40 MW +84069000,STEAM TURBINES AND OTHER VAPOUR TURBINES::Parts +84070000,reciprocating or rotary internal combustion piston engines +84071000,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES::Aircraft engines +84072100,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES::Outboard motors +84072900,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES::Other +84073100,reciprocating or rotary internal combustion piston engines >> of a cylinder capacity not exceeding 50 cc +84073110,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity not exceeding 50 cc :For motor cycles +84073190,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity not exceeding 50 cc :Other +84073200,reciprocating or rotary internal combustion piston engines >> of a cylinder capacity exceeding 50 cc but not exceeding 250 cc +84073210,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 50 cc but not exceeding 250 cc:For motor cycles +84073290,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 50 cc but not exceeding 250 cc:Other +84073300,reciprocating or rotary internal combustion piston engines >> of a cylinder capacity exceeding 250 cc but not exceeding cc +84073310,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 250 cc but not exceeding:For motor cars +84073320,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 250 cc but not exceeding:For motor cycles +84073390,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 250 cc but not exceeding:Other +84073400,reciprocating or rotary internal combustion piston engines >> of a cylinder capacity exceeding cc +84073410,"SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 1,000 cc:For motor cars" +84073490,"SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Of a cylinder capacity exceeding 1,000 cc:Other" +84079000,reciprocating or rotary internal combustion piston engines >> other engines +84079010,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Other engines:Petrol engines +84079020,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Other engines:Kerosene engines +84079090,SPARK-IGNITION RECIPROCATING OR ROTARY INTERNAL COMBUSTION PISTON ENGINES:Other engines:Other +84080000,internal combustion piston engines diesel or engines +84081000,internal combustion piston engines diesel or engines >> marine propulsion engines +84081010,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Marine propulsion engines:Outboard engines +84081091,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Marine propulsion engines:Of a cylinder capacity not exceeding 100 cc +84081092,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Marine propulsion engines:Of a cylinder capacity exceeding 100 cc but not exceeding 250 cc +84081093,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Marine propulsion engines:Of a cylinder capacity exceeding 250 cc +84082000,internal combustion piston engines diesel or engines >> engines of a kind used for the propulsion of vehicles of chapter 87 +84082010,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Engines of a kind used for the propulsion of vehicles of Chapter 87:Of cylinder capacity not exceeding 250 cc +84082020,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Engines of a kind used for the propulsion of vehicles of Chapter 87:Engines of cylinder capacity exceeding 250 cc +84089000,internal combustion piston engines diesel or engines >> other engines +84089010,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Other engines:Stationary engines of cylinder capacity exceeding 50 cc +84089090,COMPRESSION-IGNITION INTERNAL COMBUSTION PISTON ENGINES (DIESEL OR SEMI-DIESEL ENGINES):Other engines:Other +84090000,parts suitable for use solely or p rincipally with the engines of heading 8407 or 8408 +84091000,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408::For aircraft engines +84099100,parts suitable for use solely or p rincipally with the engines of heading 8407 or 8408 >> suitable for use solely or principally with spark ignition internal combustion piston engines valves inlet and exhaust piston piston rings piston assemblies +84099111,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Valves, inlet and exhaust" +84099112,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Pistons +84099113,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Piston rings +84099114,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Piston assemblies +84099120,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Fuel injection equipment excluding injection pumps +84099191,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Of petrol engines for motor vehicles +84099192,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Of other petrol engines +84099193,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Of kerosene engines +84099194,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Of gas engines +84099199,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Suitable for use solely or principally with spark-ignition internal combustion piston engines:Other +84099900,parts suitable for use solely or p rincipally with the engines of heading 8407 or 8408 >> other valves inlet and exhaust piston piston rings piston assemblies +84099911,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Valve, inlet and exhaust" +84099912,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Pistons +84099913,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Piston rings +84099914,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Piston assemblies +84099920,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Fuel nozzles +84099930,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Fuel injection equipment excluding injection pumps +84099941,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Of diesel engines for motor vehicles +84099942,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Of outboard engine +84099949,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Other +84099990,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE ENGINES OF HEADING 8407 OR 8408:Other :Other +84100000,hydraulic turbines water wheels and regulators therefor hydraulic turbines and water wheels +84101100,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR::Of a power not exceeding 1,000 kW" +84101200,hydraulic turbines water wheels and regulators therefor hydraulic turbines and water wheels >> of a power exceeding kw but not exceeding kw +84101210,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR:Of a power exceeding 1,000 kW but not exceeding 10,000 kW:Of power exceeding 1,000 kW but not exceeding 5,000 kW" +84101220,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR:Of a power exceeding 1,000 kW but not exceeding 10,000 kW:Of power exceeding 5,000 kW but not exceeding 10,000 kW" +84101300,hydraulic turbines water wheels and regulators therefor hydraulic turbines and water wheels >> of a power exceeding kw +84101310,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR:Of a power exceeding 10,000 kW:Of power exceeding 10,000 kW but not exceeding 30,000 kW" +84101320,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR:Of a power exceeding 10,000 kW:Of power exceeding 30,000 kW but not exceeding 80,000 kW" +84101390,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR:Of a power exceeding 10,000 kW:Of power exceeding 80,000 kW" +84109000,"HYDRAULIC TURBINES, WATER WHEELS, ANDREGULATORS THEREFOR::Parts, including regulators" +84110000,and other gas turbines +84111100,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Of a thrust not exceeding 25kN" +84111200,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Of a thrust exceeding 25 kN" +84112100,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Of a power not exceeding 1,100 kW" +84112200,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Of a power exceeding 1,100 kW" +84118100,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Of a power not exceeding 5,000 kW" +84118200,and other gas turbines >> of a power exceeding kw +84118210,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES:Of a power exceeding 5,000 kW:Of power exceeding 5,000 kW but not exceeding 15,000 kW" +84118220,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES:Of a power exceeding 5,000 kW:Of power exceeding 15,000 kW but not exceeding 30,000 kW" +84118230,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES:Of a power exceeding 5,000 kW:Of power exceeding 30,000 kW but not exceeding 60,000 kW" +84118240,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES:Of a power exceeding 5,000 kW:Of power exceeding 60,000 kW but not exceeding 90,000 kW" +84118250,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES:Of a power exceeding 5,000 kW:Of power exceeding 90,000 kW but not exceeding 1,12.5,000 kW" +84118260,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES:Of a power exceeding 5,000 kW:Of power exceeding 1,15,000 kW" +84119100,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Of turbo-jets or turbo-propellers" +84119900,"TURBO-JETS, TURBO-PROPELLERS AND OTHER GAS TURBINES::Other" +84120000,other engines and motors +84121000,OTHER ENGINES AND MOTORS::Reaction engines other than turbo-jets +84122100,OTHER ENGINES AND MOTORS::Linear acting (cylinders) +84122900,other engines and motors >> other +84122910,OTHER ENGINES AND MOTORS:Other :Hydrojet (hydraulic jet engines) +84122990,OTHER ENGINES AND MOTORS:Other :Other +84123100,OTHER ENGINES AND MOTORS::Linear acting (cylinders) +84123900,OTHER ENGINES AND MOTORS::Other +84128000,other engines and motors >> other steam or other vapour power +84128011,OTHER ENGINES AND MOTORS:Other :Stationary +84128019,OTHER ENGINES AND MOTORS:Other :Other +84128020,"OTHER ENGINES AND MOTORS:Other :Motors, spring operated excluding clock and watch movements" +84128030,OTHER ENGINES AND MOTORS:Other :Wind turbine or engine +84128090,OTHER ENGINES AND MOTORS:Other :Other +84129000,other engines and motors >> parts +84129010,OTHER ENGINES AND MOTORS:Parts :Of steam engines incorporating boilers +84129020,OTHER ENGINES AND MOTORS:Parts :Of other steam engines and other vapour power units not incorporating boilers +84129030,OTHER ENGINES AND MOTORS:Parts :Of hydraulic engines and motors +84129090,OTHER ENGINES AND MOTORS:Parts :Other +84130000,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device +84131100,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> pumps for dispensing fuel or lubricants of the type used in filling stations or in garages +84131110,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps for dispensing fuel or lubricants, of the type used in filling stations or in garages:Hand pumps" +84131191,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps for dispensing fuel or lubricants, of the type used in filling stations or in garages:Pumps for dispensing fuel" +84131199,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps for dispensing fuel or lubricants, of the type used in filling stations or in garages:Other" +84131900,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> other +84131910,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other :Hand pumps" +84131990,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other :Other" +84132000,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS::Hand pumps, other than those of sub-heading 8413 11 or 8413 19" +84133000,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> fuel lubricating or cooling medium pumps for internal combustion piston engines +84133010,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Fuel, lubricating or cooling medium pumps for internal combustion piston engines:Injection pumps for diesel engines" +84133020,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Fuel, lubricating or cooling medium pumps for internal combustion piston engines:Oil pump" +84133030,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Fuel, lubricating or cooling medium pumps for internal combustion piston engines:Water pump" +84133090,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Fuel, lubricating or cooling medium pumps for internal combustion piston engines:Other" +84134000,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS::Concrete pumps" +84135000,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> other reciprocating positive displacement pumps +84135010,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other reciprocating positive displacement pumps :Metering and dosing pumps" +84135021,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other reciprocating positive displacement pumps :Deep tube well turbine pump" +84135029,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other reciprocating positive displacement pumps :Other" +84135090,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other reciprocating positive displacement pumps :Other" +84136000,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> other rotary positive displacement pumps +84136010,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other rotary positive displacement pumps:Gear type pumps" +84136020,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other rotary positive displacement pumps:Screw type pumps" +84136090,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other rotary positive displacement pumps:Other" +84137000,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> other centrifugal pumps +84137010,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Primarily designed to handle water" +84137091,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Single and multistage chemical process pumps" +84137092,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Horizontal split casing pumps" +84137093,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Horizontal self priming pumps" +84137094,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Vertical turbine driven pumps" +84137095,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Boiler feed pumps" +84137096,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Slurry pumps" +84137097,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Dredger pumps" +84137099,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Other centrifugal pumps:Other" +84138100,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> pumps +84138110,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps :Gas pumps" +84138120,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps :Hydraulic ram" +84138130,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps :Axial flow and mixed flow vertical pump designed primarily for handling water" +84138190,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Pumps :Other" +84138200,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS::Liquid elevators" +84139100,pumps for liquids whether or not fitted with a measuring device liquid elevators pumps fitted or designed to be fitted with a measuring device >> of pumps +84139110,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Of pumps :Of reciprocating pumps" +84139120,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Of pumps :Of centrifugal pumps" +84139130,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Of pumps :Of deep well turbine pumps and of other rotary pumps" +84139140,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Of pumps :Of hand pump for handling water" +84139190,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS:Of pumps :Other" +84139200,"PUMPS FOR LIQUIDS, WHETHER OR NOT FITTED WITH A MEASURING DEVICE; LIQUID ELEVATORS::Of liquid elevators" +84141000,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS::Vacuum pumps(OLD tariff)" +84142010,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Hand or foot-operated air pumps :Bicycle pumps(OLD tariff)" +84142020,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Hand or foot-operated air pumps :Other hand pumps(OLD tariff)" +84142090,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Hand or foot-operated air pumps :Other(OLD tariff)" +84143000,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS::Compressors of a kind used in refrigerating equipment(OLD tariff)" +84144010,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Air compressors mounted on a wheeled chassis for towing :Reciprocating air compressors(OLD tariff)" +84144020,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Air compressors mounted on a wheeled chassis for towing :Centrifugal air compressors(OLD tariff)" +84144030,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Air compressors mounted on a wheeled chassis for towing :Screw air compressors(OLD tariff)" +84144090,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Air compressors mounted on a wheeled chassis for towing :Other(OLD tariff)" +84145110,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Table, floor, wall, window, ceiling or roof fans, with a self-contained electric motor of an output not exceeding 125 W:Table fans(OLD tariff)" +84145120,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Table, floor, wall, window, ceiling or roof fans, with a self-contained electric motor of an output not exceeding 125 W:Ceiling fans(OLD tariff)" +84145130,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Table, floor, wall, window, ceiling or roof fans, with a self-contained electric motor of an output not exceeding 125 W:Pedestal fans(OLD tariff)" +84145140,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Table, floor, wall, window, ceiling or roof fans, with a self-contained electric motor of an output not exceeding 125 W:Railway carriage fans(OLD tariff)" +84145150,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Table, floor, wall, window, ceiling or roof fans, with a self-contained electric motor of an output not exceeding 125 W:Wall fans(OLD tariff)" +84145190,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Table, floor, wall, window, ceiling or roof fans, with a self-contained electric motor of an output not exceeding 125 W:Other(OLD tariff)" +84145910,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Air circulator(OLD tariff)" +84145920,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Blowers, portable(OLD tariff)" +84145930,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Industrial fans and blowers(OLD tariff)" +84145990,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Other(OLD tariff)" +84146000,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS::Hoods having a maximum horizontal side not exceeding 120 cm(OLD tariff)" +84147000,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:gas:Gas-tight biological safety cabinet(OLD tariff)" +84148011,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Of a kind used in air-conditioning equipment(OLD tariff)" +84148019,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Other(OLD tariff)" +84148020,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Free-piston generators for gas turbine(OLD tariff)" +84148030,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Turbo charger(OLD tariff)" +84148090,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Other :Other(OLD tariff)" +84149011,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Of gas compressors of a kind used in refrigerating and air conditioning appliances and machinery(OLD tariff)" +84149012,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Of bicycle pumps(OLD tariff)" +84149019,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Other(OLD tariff)" +84149020,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Of free piston generators(OLD tariff)" +84149030,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Of electric fans(OLD tariff)" +84149040,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Of Industrial fans, blowers(OLD tariff)" +84149090,"AIR OR VACUUM PUMPS, AIR OR OTHER GAS COMPRESSORS AND FANS; VENTILATING OR RECYCLING HOODS INCORPORATING A FAN, WHETHER OR NOT FITTED WITH FILTERS:Parts :Other(OLD tariff)" +84150000,air conditioning machines comprising a motor driven fan and elements for changing the temperature and those humidity including machines in which the humidity can not be separately regulated +84151000,air conditioning machines comprising a motor driven fan and elements for changing the temperature and those humidity including machines in which the humidity can not be separately regulated >> of a kind designed to be fixed to a window wall ceiling or floor or +84151010,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Of a kind designed to be fixed to a window, wall, ceiling or floor, self-contained or ?split-system?;?;:Split system" +84151090,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Of a kind designed to be fixed to a window, wall, ceiling or floor, self-contained or ?split-system?;?;:Other" +84152000,air conditioning machines comprising a motor driven fan and elements for changing the temperature and those humidity including machines in which the humidity can not be separately regulated >> of a kind used for persons in motor vehicles +84152010,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Of a kind used for persons in motor vehicles :For buses" +84152090,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Of a kind used for persons in motor vehicles :Other" +84158100,air conditioning machines comprising a motor driven fan and elements for changing the temperature and those humidity including machines in which the humidity can not be separately regulated >> incorporating a refrigerating unit and a valve for reversal of the cooling or heat cycle reversible heat pumps +84158110,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Incorporating a refrigerating unit and a valve for reversal of the cooling or heat cycle (reversible heat pumps):Split air-conditioner two tonnes and above" +84158190,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Incorporating a refrigerating unit and a valve for reversal of the cooling or heat cycle (reversible heat pumps):Other" +84158200,air conditioning machines comprising a motor driven fan and elements for changing the temperature and those humidity including machines in which the humidity can not be separately regulated >> other incorporating a refrigerating unit +84158210,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Other, incorporating a refrigerating unit:Split air-conditioner two tonnes and above" +84158290,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Other, incorporating a refrigerating unit:Other" +84158300,air conditioning machines comprising a motor driven fan and elements for changing the temperature and those humidity including machines in which the humidity can not be separately regulated >> not incorporating a refrigerating unit +84158310,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Not incorporating a refrigerating unit :Split air-conditioner two tonnes and above" +84158390,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED:Not incorporating a refrigerating unit :Other" +84159000,"AIR CONDITIONING MACHINES, COMPRISING A MOTORDRIVEN FAN AND ELEMENTS FOR CHANGING THE TEMPERATURE AND HUMIDITY, INCLUDING THOSE MACHINES IN WHICH THE HUMIDITY CANNOT BE SEPARATELY REGULATED::Parts" +84160000,furnace burners for liquid fuel for pulverised solid fuel or for gas mechanical stokers including their mechanical grates mechanical ash dischargers and similar appliances +84161000,"FURNACE BURNERS FOR LIQUID FUEL, FOR PULVERISED SOLID FUEL OR FOR GAS; MECHANICAL STOKERS, INCLUDING THEIR MECHANICAL GRATES, MECHANICAL ASH DISCHARGERS AND SIMILAR APPLIANCES::Furnace burners for liquid fuel" +84162000,"FURNACE BURNERS FOR LIQUID FUEL, FOR PULVERISED SOLID FUEL OR FOR GAS; MECHANICAL STOKERS, INCLUDING THEIR MECHANICAL GRATES, MECHANICAL ASH DISCHARGERS AND SIMILAR APPLIANCES::Other furnace burners, including combination burners" +84163000,"FURNACE BURNERS FOR LIQUID FUEL, FOR PULVERISED SOLID FUEL OR FOR GAS; MECHANICAL STOKERS, INCLUDING THEIR MECHANICAL GRATES, MECHANICAL ASH DISCHARGERS AND SIMILAR APPLIANCES::Mechanical stokers, mechanical grates, mechanical ash dischargers and similar appliances" +84169000,"FURNACE BURNERS FOR LIQUID FUEL, FOR PULVERISED SOLID FUEL OR FOR GAS; MECHANICAL STOKERS, INCLUDING THEIR MECHANICAL GRATES, MECHANICAL ASH DISCHARGERS AND SIMILAR APPLIANCES::Parts" +84170000,industrial or laboratory furnaces and ovens including incinerators +84171000,"INDUSTRIAL OR LABORATORY FURNACES AND OVENS, INCLUDING INCINERATORS, NON-ELECTRIC::Furnaces and ovens for the roasting, melting or other heat-treatment of ores, pyrites or of metals" +84172000,"INDUSTRIAL OR LABORATORY FURNACES AND OVENS, INCLUDING INCINERATORS, NON-ELECTRIC::Bakery ovens, including biscuit ovens" +84178000,industrial or laboratory furnaces and ovens including incinerators >> other +84178010,"INDUSTRIAL OR LABORATORY FURNACES AND OVENS, INCLUDING INCINERATORS, NON-ELECTRIC:Other :For cement industry" +84178090,"INDUSTRIAL OR LABORATORY FURNACES AND OVENS, INCLUDING INCINERATORS, NON-ELECTRIC:Other :Other" +84179000,"INDUSTRIAL OR LABORATORY FURNACES AND OVENS, INCLUDING INCINERATORS, NON-ELECTRIC::Parts" +84180000,f r e eze rs re f ri ge r ato rs a nd o t he r refrigerating or freezing equipment electric or heat pumps other other than air conditioning machines of heading +84181000,f r e eze rs re f ri ge r ato rs a nd o t he r refrigerating or freezing equipment electric or heat pumps other other than air conditioning machines of heading >> combined fitted with separate external doors or drawers or combinations thereof +84181010,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Combined refrigerator-freezers, fitted with separate external doors :Commercial type" +84181090,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Combined refrigerator-freezers, fitted with separate external doors :Other" +84182100,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415::Compression-type" +84182900,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415::Other" +84183000,f r e eze rs re f ri ge r ato rs a nd o t he r refrigerating or freezing equipment electric or heat pumps other other than air conditioning machines of heading >> freezers of the chest type not exceeding 800l capacity +84183010,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Freezers of the chest type, not exceeding 800 l capacity :Commercial type electrical" +84183090,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Freezers of the chest type, not exceeding 800 l capacity :Other" +84184000,f r e eze rs re f ri ge r ato rs a nd o t he r refrigerating or freezing equipment electric or heat pumps other other than air conditioning machines of heading >> freezers of the upright type not exceeding 900l capacity +84184010,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Freezers of the upright type, not exceeding 900 l capacity :Electrical" +84184090,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Freezers of the upright type, not exceeding 900 l capacity :Other" +84185000,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415::Other furniture (chests, cabinets, display counters, show-cases and the like) for storage and display, incorporating or freezing equipment" +84186100,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415::Heat pumps other than air-conditioning machines of heading 8415" +84186900,f r e eze rs re f ri ge r ato rs a nd o t he r refrigerating or freezing equipment electric or heat pumps other other than air conditioning machines of heading >> other +84186910,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Other :Ice making machinery" +84186920,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Other :Water cooler" +84186930,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Other :Vending machine, other than automatic vending machine" +84186940,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Other :Refrigeration equipment or devices specially used in leather industries for manufacturing of leather articles" +84186950,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Other :Refrigerated farm tanks, industrial ice cream freezer" +84186990,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415:Other :Other" +84189100,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415::Furniture designed to receive refrigerating or freezing equipment" +84189900,"REFRIGERATORS, FREEZERS AND OTHER REFRIGERATING OR FREEZING EQUIPMENT, ELECTRIC OR OTHER; HEAT PUMPS OTHER THAN AIR CONDITIONING MACHINES OF HEADING 8415::Other" +84190000,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric +84191110,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Instantaneous gas water heaters :Domestic type(OLD tariff)" +84191190,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Instantaneous gas water heaters :Other(OLD tariff)" +84191200,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC::Solar water heaters" +84191900,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> other +84191910,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Domestic type" +84191920,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Other" +84192000,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> medical surgical or laboratory sterilisers +84192010,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Medical, surgical or laboratory sterilisers:Auto claves" +84192090,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Medical, surgical or laboratory sterilisers:Other" +84193100,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC::For agricultural products(OLD tariff)" +84193200,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC::For wood, paper pulp, paper or paper, board(OLD tariff)" +84193300,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Luophi:Lyophilisation apparatus, freeze drying units and spray dryer" +84193400,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:other:Other, for agricultural products" +84193500,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other, for wood, paper pulp, paper or paperboard:Other, for wood, paper pulp, paper or paperboard" +84193900,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC::Other" +84194000,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> distilling or rectifying plant +84194010,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Distilling or rectifying plant:For petroleum refining" +84194020,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Distilling or rectifying plant:Other distilling equipment" +84194090,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Distilling or rectifying plant:Other" +84195000,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 +84195010,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Heat exchange units:Shell and tube type(OLD tariff)" +84195011,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> shell and tube type +84195012,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> plate type +84195013,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> spiral type +84195019,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> other +84195020,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Heat exchange units:Plate type(OLD tariff)" +84195030,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Heat exchange units:Spiral type(OLD tariff)" +84195090,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Heat exchange units:Other(OLD tariff)" +84195092,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> plate type +84195093,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> spiral type +84195099,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> heat exchange units with a heat transfer surface area of greater than and less than 20 >> other +84196000,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC::Machinery for liquefying air or other gases" +84198100,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> for making hot drinks or for cooking heating food +84198110,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:For making hot drinks or for cooking or heating food :Friers(OLD tariff)" +84198120,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:For making hot drinks or for cooking or heating food :Other kitchen machines" +84198190,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:For making hot drinks or for cooking or heating food :Other" +84198900,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> other pressure vessels reactors columns or towers or +84198910,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Pressure vessels, reactors, columns or towers or chemical storage tanks(OLD tariff)" +84198912,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> other pressure vessels reactors columns or towers or >> reactors with total internal geometric volume greater than 100 l and less than 20 +84198914,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> other pressure vessels reactors columns or towers or >> distillation or absorption columns of internal diameter greater than m +84198916,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> other pressure vessels reactors columns or towers or >> chemical storage tanks with a total internal geometric volume greater than 100 l +84198919,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> other pressure vessels reactors columns or towers or >> other +84198920,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Glass lined equipment" +84198930,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Auto claves other than for cooking or heating food, not elsewhere specified or included" +84198940,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Cooling towers and similar plants for direct cooling (without a separating wall) by means of recirculated water" +84198950,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Pasteurizers" +84198960,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Plant growth chambers and rooms and tissue culture chambers and rooms having temperature, humidity or light control" +84198970,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Apparatus for rapid heating of semi-conductor devices; apparatus for chemical or physical vapour deposition on semi-conductor wafers; apparatus for chemical vapour deposition on LCD substratus" +84198980,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Vacuum-vapour plant for deposition of metals" +84198990,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Other :Other" +84199000,machinery plant or laboratory equipment whether or not electrically heated excluding furnaces ovens and other equipment of heading for the treatment of 8514 materials by a process involving a change of temperature such as heating cooking roasting distilling rectifying sterilising pasteurising steaming drying evaporating vaporising condensing or cooling other than machinery or plant of a kind used for domestic purposes instantaneous or storage water heaters non electric >> parts +84199010,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Parts :Parts of instantaneous or storage water heaters (domestic type)" +84199090,"MACHINERY, PLANT OR LABORATORY EQUIPMENT, WHETHER OR NOT ELECTRICALLY HEATED (EXCLUDING FURNACES, OVENS AND OTHER EQUIPMENT OF HEADING 8514), FOR THE TREATMENT OF MATERIALS BY A PROCESS INVOLVING A CHANGE OF TEMPERATURE SUCH AS HEATING, COOKING, ROASTING, DISTILLING, RECTIFYING, STERILISING, PASTEURISING, STEAMING, DRYING, EVAPORATING, VAPORISING, CONDENSING OR COOLING, OTHER THAN MACHINERY OR PLANT OF A KIND USED FOR DOMESTIC PURPOSES; INSTANTANEOUS OR STORAGE WATER HEATERS, NON-ELECTRIC:Parts :Other" +84200000,calendering or other rolling machines other than for metals or glass and cylinders therefor +84201000,"CALENDERING OR OTHER ROLLING MACHINES, OTHER THAN FOR METALS OR GLASS, AND CYLINDERS THEREFOR::Calendering or other rolling machines" +84209100,"CALENDERING OR OTHER ROLLING MACHINES, OTHER THAN FOR METALS OR GLASS, AND CYLINDERS THEREFOR::Cylinders" +84209900,"CALENDERING OR OTHER ROLLING MACHINES, OTHER THAN FOR METALS OR GLASS, AND CYLINDERS THEREFOR::Other" +84210000,filtering or purifying machinery and apparatus for liquids or gases centrifuges including centrifugal dryers +84211100,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Cream separators" +84211200,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Clothes-dryers" +84211900,filtering or purifying machinery and apparatus for liquids or gases centrifuges including centrifugal dryers >> other +84211910,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Bowl centrifuges" +84211920,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Basket centrifuges" +84211930,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Continuous automatic centrifuges" +84211940,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Self cleaning centrifuges" +84211950,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Decanter centrifuges horizontal bowl" +84211960,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Screw conveyor centrifuges" +84211991,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :For chemical industries" +84211999,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Other" +84212100,filtering or purifying machinery and apparatus for liquids or gases centrifuges including centrifugal dryers >> for filtering or purifying water +84212110,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:For filtering or purifying water :Ion exchanger plant or apparatus" +84212120,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:For filtering or purifying water :Household type filters" +84212190,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:For filtering or purifying water :Other" +84212200,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::For filtering or purifying beverages other than water" +84212300,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Oil or petrol-filters for internal combustion engines" +84212900,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Other" +84213100,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Intake air filters for internal combustion engines" +84213200,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:catalytic:Catalytic converters or particulate filters, whether or not combined, for purifying or filtering exhaust gases from internal combustion engines" +84213900,filtering or purifying machinery and apparatus for liquids or gases centrifuges including centrifugal dryers >> other +84213910,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Air separators to be employed in the processing, smelting or refining of minerals, ores or metals; air strippers" +84213920,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Air purifiers or cleaners" +84213990,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES:Other :Other" +84219100,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Of centrifuges, including centrifugal dryers" +84219900,"CENTRIFUGES, INCLUDING CENTRIFUGAL DRYERS; FILTERING OR PURIFYING MACHINERY AND APPARATUS, FOR LIQUIDS OR GASES::Other" +84221100,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES::Of the household type(OLD tariff)" +84221900,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES::Other(OLD tariff)" +84222000,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES::Machinery for cleaning or drying bottles or other containers(OLD tariff)" +84223000,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES::Machinery for filling, closing, sealing or labelling bottles, cans, boxes, bags or othercontainers; machinery for capsuling bottles, jars, tubes and similar containers; machinery for aerating beverages(OLD tariff)" +84224000,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES::Other packing or wrapping machinery (including heat-shrink wrapping machinery)(OLD tariff)" +84229010,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES:Parts:Of machinery for cleaning or drying bottles or other containers(OLD tariff)" +84229020,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES:Parts:Of dish washing machines of household type(OLD tariff)" +84229090,"DISH WASHING MACHINES; MACHINERY FOR CLEANING OR DRYING BOTTLES OR OTHER CONTAINERS; MACHINERY FOR FILLING, CLOSING, SEALING OR LABELLING BOTTLES, CANS, BOXES, BAGS OR OTHER CONTAINERS; MACHINERY FOR CAPSULING BOTTLES, JARS, TUBES AND SIMILAR CONTAINERS; OTHER PACKING OR WRAPPING MACHINERY (INCLUDING HEAT-SHRINK WRAPPING MACHINERY); MACHINERY FOR AERATING BEVERAGES:Parts:Of other machinery(OLD tariff)" +84230000,weighing machinery excluding balances of a sensitivity of 5 centi gram s or bette r in cludin g w eig ht op e rat ed cou nti ng or checking machines weighing machine weights of all kinds +84231000,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS::Personal weighing machines, including baby scales; household scales" +84232000,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS::Scales for continuous weighing of goods on conveyors" +84233000,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS::Constant weight scales and scales for discharging a predetermined weight ofmaterial into a bag or container, including hopper scales" +84238100,weighing machinery excluding balances of a sensitivity of 5 centi gram s or bette r in cludin g w eig ht op e rat ed cou nti ng or checking machines weighing machine weights of all kinds >> having a maximum weighing capacity not exceeding 30 kg +84238110,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS:Having a maximum weighing capacity not exceeding 30 kg :Beam scale" +84238190,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS:Having a maximum weighing capacity not exceeding 30 kg :Other" +84238200,weighing machinery excluding balances of a sensitivity of 5 centi gram s or bette r in cludin g w eig ht op e rat ed cou nti ng or checking machines weighing machine weights of all kinds >> having a maximum weighing capacity exceeding 30 kg but not exceeding kg +84238210,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS:Having a maximum weighing capacity exceeding 30 kg but not exceeding 5,000 kg :Beam scale" +84238290,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS:Having a maximum weighing capacity exceeding 30 kg but not exceeding 5,000 kg :Other" +84238900,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS::Other" +84239000,weighing machinery excluding balances of a sensitivity of 5 centi gram s or bette r in cludin g w eig ht op e rat ed cou nti ng or checking machines weighing machine weights of all kinds >> weighing machine weights of all kinds parts of weighing machinery +84239010,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS:Weighing machine weights of all kinds; parts of weighing machinery :Weighing machine weights of all kinds" +84239020,"WEIGHING MACHINERY (EXCLUDING BALANCES OF A SENSITIVITY OF 5 CENTIGRAMS OR BETTER), INCLUDING WEIGHT OPERATED COUNTING OR CHECKING MACHINES; WEIGHING MACHINE WEIGHTS OF ALL KINDS:Weighing machine weights of all kinds; parts of weighing machinery :Parts of weighing machinery" +84240000,mechanical appliances whether or not hand op erated f or p rojecting dispersing or s p r ay in g l iqui ds o r f i re p o w de r s extinguishers whether or not charged spray +84241000,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Fire extinguishers, whether or not charged" +84242000,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Spray guns and similar appliances" +84243000,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Steam or sand blasting machines and similar jet projecting machines" +84244100,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Portable sprayers" +84244900,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Other" +84248100,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Agricultural or horticultural(OLD tariff)" +84248200,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Agricultural or horticultural" +84248900,mechanical appliances whether or not hand op erated f or p rojecting dispersing or s p r ay in g l iqui ds o r f i re p o w de r s extinguishers whether or not charged spray >> other +84248910,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES:Other :Painting equipment, including electrostatic phosphating and powder coating equipment" +84248920,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES:Other :Industrial bellows" +84248990,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES:Other :Other" +84249000,"MECHANICAL APPLIANCES (WHETHER OR NOT HANDOPERATED) FOR PROJECTING, DISPERSING OR SPRAYING LIQUIDS OR POWDERS; FIRE EXTINGUISHERS, WHETHER OR NOT CHARGED; SPRAY GUNS AND SIMILARA P P L I A N C E S ; STEAM OR SAND BLASTING MACHINES AND SIMILAR JET PROJECTING MACHINES::Parts" +84250000,pulley tackle and hoists other than skip hoists winches and capstans jacks pulley tackles and hoists other than skip hoists or hoists of a kind used for raising vehicles +84251100,pulley tackle and hoists other than skip hoists winches and capstans jacks pulley tackles and hoists other than skip hoists or hoists of a kind used for raising vehicles >> powered by electric motor +84251110,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS:Powered by electric motor:Hoists +84251120,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS:Powered by electric motor:Pulley tackle +84251900,pulley tackle and hoists other than skip hoists winches and capstans jacks pulley tackles and hoists other than skip hoists or hoists of a kind used for raising vehicles >> other +84251910,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS:Other :Hoists machine +84251920,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS:Other :Pulley tackle Other winches; capstans +84253100,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS::Powered by electric motor +84253900,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS::Other +84254100,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS::Built-in jacking system of a type used in garages +84254200,"PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS::Other jacks and hoists, hydraulic" +84254900,PULLEY TACKLE AND HOISTS OTHER THAN SKIP HOISTS; WINCHES AND CAPSTANS; JACKS::Other +84260000,ship s derricks cranes including cable cranes mobile lifting frames straddle carriers and works trucks fitted with a crane overhead travelling cranes transporter cranes gantry cranes bridge cranes mobile lifting frames and straddle carriers +84261100,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Overhead travelling cranes on fixed support" +84261200,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Mobile lifting frames on tyres and straddle carriers" +84261900,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Other" +84262000,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Tower cranes" +84263000,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Portal or pedestal jib cranes" +84264100,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::On tyres" +84264900,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Other" +84269100,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers ::Designed for mounting on road vehicles" +84269900,ship s derricks cranes including cable cranes mobile lifting frames straddle carriers and works trucks fitted with a crane overhead travelling cranes transporter cranes gantry cranes bridge cranes mobile lifting frames and straddle carriers >> other +84269910,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers :Other :Ropeway and telphers" +84269990,"SHIP'S DERRICKS; CRANES INCLUDING CABLE CRANES; MOBILE LIFTING FRAMES, STRADDLE CARRIERS AND WORKS TRUCKS FITTED WITH A CRANE Overhead travelling cranes, transporter cranes, gantry cranes, bridge cranes, mobile lifting frames and straddle carriers :Other :Other" +84270000,trucks other works trucks fitted with lifting or handling equipment +84271000,FORK-LIFT TRUCKS; OTHER WORKS TRUCKS FITTED WITH LIFTING OR HANDLING EQUIPMENT::Self-propelled trucks powered by an electric motor +84272000,FORK-LIFT TRUCKS; OTHER WORKS TRUCKS FITTED WITH LIFTING OR HANDLING EQUIPMENT::Other self-propelled trucks +84279000,FORK-LIFT TRUCKS; OTHER WORKS TRUCKS FITTED WITH LIFTING OR HANDLING EQUIPMENT::Other trucks +84280000,other lifting handling loading or unloading machinery for example lifts escalators conveyors teleferics +84281000,other lifting handling loading or unloading machinery for example lifts escalators conveyors teleferics >> lifts and skip hoists lifts +84281011,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Lifts and skip hoists:Lifts of a kind used in buildings" +84281019,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Lifts and skip hoists:Other" +84281020,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Lifts and skip hoists:Skip hoists" +84282000,other lifting handling loading or unloading machinery for example lifts escalators conveyors teleferics >> pneumatic elevators and conveyors conveyors +84282011,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Pneumatic elevators and conveyors:Belt conveyors" +84282019,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Pneumatic elevators and conveyors:Other" +84282020,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Pneumatic elevators and conveyors:Pneumatic elevators" +84283100,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS)::Specially designed for underground use" +84283200,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS)::Other, bucket type" +84283300,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS)::Other, belt type" +84283900,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS)::Other" +84284000,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS)::Escalators and moving walkways" +84286000,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS)::Teleferics, chair-lifts, ski-raglines, traction mechanisms for funiculars" +84287000,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):robot:Industrial robots" +84289000,other lifting handling loading or unloading machinery for example lifts escalators conveyors teleferics >> other machinery +84289010,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Other machinery :For coal handling" +84289020,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Other machinery :For ash handling" +84289090,"OTHER LIFTING, HANDLING, LOADING OR UNLOADING MACHINERY (FOR EXAMPLE, LIFTS, ESCALATORS, CONVEYORS, TELEFERICS):Other machinery :Other" +84290000,bulldozers angledozers graders levellers scrapers mechanical shovels excavators shovel loaders tamping machines and road rollers bulldozers and angledozers +84291100,bulldozers angledozers graders levellers scrapers mechanical shovels excavators shovel loaders tamping machines and road rollers bulldozers and angledozers >> track laying +84291110,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Track laying :Angledozers" +84291120,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Track laying :Bulldozers" +84291900,bulldozers angledozers graders levellers scrapers mechanical shovels excavators shovel loaders tamping machines and road rollers bulldozers and angledozers >> other +84291910,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Other :Angledozers" +84291920,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Other :Bulldozers" +84292000,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS::Graders and levellers" +84293000,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS::Scrappers" +84294000,bulldozers angledozers graders levellers scrapers mechanical shovels excavators shovel loaders tamping machines and road rollers bulldozers and angledozers >> tamping machines and road rollers +84294010,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Tamping machines and road rollers :Road rollers upto 5 tons capacity" +84294020,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Tamping machines and road rollers :Road rollers above 5 tons capacity" +84294030,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS:Tamping machines and road rollers :Tamping machines" +84295100,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS::Front-end shovel loaders" +84295200,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS::Machinery with a 360 degrees revolving superstructure" +84295900,"SELF-PROPELLED BULLDOZERS, ANGLEDOZERS, GRADERS, LEVELLERS, SCRAPERS, MECHANICAL SHOVELS, EXCAVATORS, SHOVEL LOADERS, TAMPING MACHINES AND ROAD ROLLERS::Other" +84300000,other moving grading levelling scraping excavating tamping compacting extracting or boring machinery for earth minerals or ores p and p snow ploughs and +84301000,other moving grading levelling scraping excavating tamping compacting extracting or boring machinery for earth minerals or ores p and p snow ploughs and >> and +84301010,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Pile-drivers and pile-extractors :Pile-drivers" +84301020,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Pile-drivers and pile-extractors :Pile-extractors" +84302000,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS::Snow-ploughs and snow-blowers" +84303100,other moving grading levelling scraping excavating tamping compacting extracting or boring machinery for earth minerals or ores p and p snow ploughs and >> +84303110,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled:Coal cutters" +84303120,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled:Tunneling machinery" +84303190,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled:Other" +84303900,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS::Other" +84304100,other moving grading levelling scraping excavating tamping compacting extracting or boring machinery for earth minerals or ores p and p snow ploughs and >> +84304110,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled :Tube well drilling and core drilling machinery" +84304120,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled :Petroleum and gas well drilling machinery" +84304130,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled :Rock drilling machinery" +84304190,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Self-propelled :Other" +84304900,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS::Other" +84305000,other moving grading levelling scraping excavating tamping compacting extracting or boring machinery for earth minerals or ores p and p snow ploughs and >> other machinery +84305010,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Other machinery, self-propelled :Mining machinery (excluding coal mining)" +84305090,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS:Other machinery, self-propelled :Other" +84306100,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS::Tamping or compacting machinery" +84306900,"OTHER MOVING, GRADING, LEVELLING, SCRAPING, EXCAVATING, TAMPING, COMPACTING, EXTRACTING OR BORING MACHINERY, FOR EARTH, MINERALS OR ORES; PILEDRIVERS AND PILE-EXTRACTORS; SNOW-PLOUGHS AND SNOW-BLOWERS::Other" +84310000,parts suitable for use solely or principally with the machinery of headings 8425 to 8430 +84311000,parts suitable for use solely or principally with the machinery of headings 8425 to 8430 >> of machinery of heading 8425 +84311010,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Of machinery of heading 8425 :Of pulley tackle and hoists, other than ship hoists, winches or capstans" +84311090,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Of machinery of heading 8425 :Other +84312000,parts suitable for use solely or principally with the machinery of headings 8425 to 8430 >> of machinery of heading 8427 +84312010,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Of machinery of heading 8427 :Of fork lift trucks +84312090,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Of machinery of heading 8427 :Other +84313100,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430::Of lifts, skip hoists or escalators" +84313900,parts suitable for use solely or principally with the machinery of headings 8425 to 8430 >> other +84313910,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Of elevators, conveyors and moving equipments" +84313990,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Other +84314100,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430::Buckets, shovels, grabs and grips" +84314200,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430::Bulldozers or angledozer blades +84314300,parts suitable for use solely or principally with the machinery of headings 8425 to 8430 >> parts of boring or sinking machinery of sub heading 8430 41 or 8430 49 +84314310,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Parts of boring or sinking machinery of sub-heading 8430 41 or 8430 49 :Of boring or sinking machinery, self-propelled" +84314390,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Parts of boring or sinking machinery of sub-heading 8430 41 or 8430 49 :Other +84314900,parts suitable for use solely or principally with the machinery of headings 8425 to 8430 >> other +84314910,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Of road rollers, mechanically propelled" +84314920,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Of ships derricks and cranes +84314930,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Of other excavating, levelling, tamping or excavating machinery for earth, mineral or ores" +84314940,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Of pile driver, snow plough, not self-propelled" +84314990,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINERY OF HEADINGS 8425 TO 8430:Other :Other +84320000,agricultural horticultural or forestry machinery for soil preparation or cultivation lawn or sports ground rollers +84321000,agricultural horticultural or forestry machinery for soil preparation or cultivation lawn or sports ground rollers >> ploughs +84321010,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Ploughs:Disc ploughs" +84321020,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Ploughs:Other tractor ploughs" +84321090,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Ploughs:Other" +84322100,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::Disc harrows" +84322900,agricultural horticultural or forestry machinery for soil preparation or cultivation lawn or sports ground rollers >> other +84322910,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Other :Rotary hoes" +84322990,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Other :Other" +84323000,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::Seeders, planters and transplanters(OLD tariff)" +84323100,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::No-till direct seeders, planters and transplanters" +84323900,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::Other" +84324000,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::Manure spreaders and fertiliser distributors(OLD tariff)" +84324100,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::Manure spreaders" +84324200,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS::Fertiliser distributors" +84328000,agricultural horticultural or forestry machinery for soil preparation or cultivation lawn or sports ground rollers >> other machinery +84328010,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Other machinery :Lawn or sports ground rollers" +84328020,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Other machinery :Rotary tiller" +84328090,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Other machinery :Other" +84329000,agricultural horticultural or forestry machinery for soil preparation or cultivation lawn or sports ground rollers >> parts +84329010,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Parts :Parts of agricultural machinery falling within headings 8432 10, 8432 21, 8432 29, 8432 30 and 8432 40" +84329090,"AGRICULTURAL, HORTICULTURAL OR FORESTRY MACHINERY FOR SOIL PREPARATION OR CULTIVATION; LAWN OR SPORTS- GROUND ROLLERS:Parts :Other" +84330000,harvesting or threshing machinery including straw or fodder balers grass or hay mowers machines for cleaning sorting or grading eggs fruit or other agricultural produce other than machinery of heading 8437 mowers for lawns parks or +84331100,harvesting or threshing machinery including straw or fodder balers grass or hay mowers machines for cleaning sorting or grading eggs fruit or other agricultural produce other than machinery of heading 8437 mowers for lawns parks or >> powered with the cutting device rotating in a horizontal plane +84331110,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437:Powered with the cutting device rotating in a horizontal plane:Powered with 3 HP or more" +84331190,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437:Powered with the cutting device rotating in a horizontal plane:Other" +84331900,harvesting or threshing machinery including straw or fodder balers grass or hay mowers machines for cleaning sorting or grading eggs fruit or other agricultural produce other than machinery of heading 8437 mowers for lawns parks or >> other +84331910,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437:Other :Non-powered mowers, having width of 75 cm or more" +84331990,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437:Other :Other" +84332000,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Other mowers, including cutter bars for tractor mounting" +84333000,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Other haymaking machinery" +84334000,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Straw or fodder balers, including pick-up balers" +84335100,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Combine harvester-threshers" +84335200,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Other threshing machinery" +84335300,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Root or tuber harvesting machines" +84335900,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Other" +84336000,harvesting or threshing machinery including straw or fodder balers grass or hay mowers machines for cleaning sorting or grading eggs fruit or other agricultural produce other than machinery of heading 8437 mowers for lawns parks or >> machines for cleaning sorting or grading eggs fruit or other agricultural produce +84336010,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437:Machines for cleaning, sorting or grading eggs, fruit or other agricultural produce :Machines for cleaning" +84336020,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437:Machines for cleaning, sorting or grading eggs, fruit or other agricultural produce :Machines for sorting or grading" +84339000,"HARVESTING OR THRESHING MACHINERY, INCLUDING STRAW OR FODDER BALERS; GRASS OR HAY MOWERS; MACHINES FOR CLEANING, SORTING OR GRADING EGGS, FRUIT OR OTHER AGRICULTURAL PRODUCE, OTHER THAN MACHINERY OF HEADING 8437::Parts" +84340000,milking machines and dairy machinery +84341000,MILKING MACHINES AND DAIRY MACHINERY::Milking machines +84342000,MILKING MACHINES AND DAIRY MACHINERY::Dairy machinery +84349000,milking machines and dairy machinery >> parts +84349010,MILKING MACHINES AND DAIRY MACHINERY:Parts :Of milking machinery +84349020,MILKING MACHINES AND DAIRY MACHINERY:Parts :Of dairy machinery +84350000,presses crushers and similar machinery used in the manufacture of wine cider fruit juices or similar beverages +84351000,"PRESSES, CRUSHERS AND SIMILAR MACHINERY USED IN THE MANUFACTURE OF WINE, CIDER, FRUIT JUICES OR SIMILAR BEVERAGES::Machinery" +84359000,"PRESSES, CRUSHERS AND SIMILAR MACHINERY USED IN THE MANUFACTURE OF WINE, CIDER, FRUIT JUICES OR SIMILAR BEVERAGES::Parts" +84360000,other agricultural horticultural forestry or machinery including germination plant fitted with mechanical or thermal equipment poultry incubators and brooders +84361000,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS::Machinery for preparing animal feeding stuffs" +84362100,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS::Poultry incubators and brooders" +84362900,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS::Other" +84368000,other agricultural horticultural forestry or machinery including germination plant fitted with mechanical or thermal equipment poultry incubators and brooders >> other machinery +84368010,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS:Other machinery:Germination plant fitted with mechanical and thermal equipment" +84368090,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS:Other machinery:Other" +84369100,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS::Of poultry-keeping machinery or poultry incubators and brooders" +84369900,"OTHER AGRICULTURAL, HORTICULTURAL, FORESTRY, POULTRY-KEEPING OR BEE-KEEPING MACHINERY, INCLUDING GERMINATION PLANT FITTED WITH MECHANICAL OR THERMAL EQUIPMENT; POULTRY INCUBATORS AND BROODERS::Other" +84370000,machines for cleaning sorting or grading seed grain or dried leguminous vegetables machinery used in the milling industry or for the working of cereals or dried leguminous vegetables other than machinery +84371000,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY::Machines for cleaning, sorting or grading seed, grain or dried leguminous vegetables" +84378000,machines for cleaning sorting or grading seed grain or dried leguminous vegetables machinery used in the milling industry or for the working of cereals or dried leguminous vegetables other than machinery >> other machinery +84378010,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY:Other machinery :Flour mill machinery(OLD tariff)" +84378020,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY:Other machinery :Rice mill machinery(OLD tariff)" +84378090,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY:Other machinery :Other(OLD tariff)" +84379010,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY:Parts :Of flour mill machinery(OLD tariff)" +84379020,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY:Parts :Of rice mill machinery(OLD tariff)" +84379090,"MACHINES FOR CLEANING, SORTING OR GRADING SEED, GRAIN OR DRIED LEGUMINOUS VEGETABLES; MACHINERY USED IN THE MILLING INDUSTRY OR FOR THE WORKING OF CEREALS OR DRIED LEGUMINOUS VEGETABLES, OTHER THAN FARM-TYPE MACHINERY:Parts :Other(OLD tariff)" +84380000,o r i nc lu de d ma chi ne ry n ot s p ec if ie d elsewhere for the industrial in this chapter preparation or manufacture of food or drink other than machinery for the extraction or preparation of animal or fixed vegetable or microbial fats or oils +84381000,o r i nc lu de d ma chi ne ry n ot s p ec if ie d elsewhere for the industrial in this chapter preparation or manufacture of food or drink other than machinery for the extraction or preparation of animal or fixed vegetable or microbial fats or oils >> bakery machinery and machinery for the manufacture of macaroni spaghetti or similar products +84381010,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Bakery machinery and machinery for the manufacture of macaroni, spaghetti or similar products :Bakery machinery" +84381020,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Bakery machinery and machinery for the manufacture of macaroni, spaghetti or similar products :Machinery for manufacture of macaroni or spaghetti or similar products" +84382000,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS::Machinery for the manufacture of confectionery, cocoa or chocolate" +84383000,o r i nc lu de d ma chi ne ry n ot s p ec if ie d elsewhere for the industrial in this chapter preparation or manufacture of food or drink other than machinery for the extraction or preparation of animal or fixed vegetable or microbial fats or oils >> machinery for sugar manufacture +84383010,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Machinery for sugar manufacture :Sugar cane crushers" +84383090,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Machinery for sugar manufacture :Other" +84384000,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS::Brewery machinery" +84385000,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS::Machinery for the preparation of meat or poultry" +84386000,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS::Machinery for the preparation of fruits, nuts or vegetables" +84388000,o r i nc lu de d ma chi ne ry n ot s p ec if ie d elsewhere for the industrial in this chapter preparation or manufacture of food or drink other than machinery for the extraction or preparation of animal or fixed vegetable or microbial fats or oils >> other machinery +84388010,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Other machinery :Auxiliary equipment for extrusion cooking plant" +84388020,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Other machinery :For production of soya milk or other soya products (other than soya oil)" +84388030,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Other machinery :Diffusing machines (diffusers)" +84388040,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Other machinery :Tea leaf rolling or cutting machine" +84388090,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Other machinery :Other" +84389000,o r i nc lu de d ma chi ne ry n ot s p ec if ie d elsewhere for the industrial in this chapter preparation or manufacture of food or drink other than machinery for the extraction or preparation of animal or fixed vegetable or microbial fats or oils >> parts +84389010,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Parts :Of sugar manufacturing machinery" +84389090,"MACHINERY, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER, FOR THE INDUSTRIAL PREPARATION OR MANUFACTURE OF FOOD OR DRINK, OTHER THAN MACHINERY FOR THE EXTRACTION OR PREPARATION OF ANIMAL OR FIXED VEGETABLE FATS OR OILS:Parts :Of other machinery" +84390000,ma chi ne r y f or m akin g p u l p o f f i br ou s cellulosic material or for making or finishing paper or paperboard +84391000,MACHINERY FOR MAKING PULP OF FIBROUS CELLULOSIC MATERIAL OR FOR MAKING OR FINISHING PAPER OR PAPERBOARD::Machinery for making pulp of fibrous cellulosic material +84392000,MACHINERY FOR MAKING PULP OF FIBROUS CELLULOSIC MATERIAL OR FOR MAKING OR FINISHING PAPER OR PAPERBOARD::Machinery for making paper or paperboard +84393000,ma chi ne r y f or m akin g p u l p o f f i br ou s cellulosic material or for making or finishing paper or paperboard >> machinery for finishing paper or paperboard +84393010,MACHINERY FOR MAKING PULP OF FIBROUS CELLULOSIC MATERIAL OR FOR MAKING OR FINISHING PAPER OR PAPERBOARD:Machinery for finishing paper or paperboard:Paper laminating machine +84393090,MACHINERY FOR MAKING PULP OF FIBROUS CELLULOSIC MATERIAL OR FOR MAKING OR FINISHING PAPER OR PAPERBOARD:Machinery for finishing paper or paperboard:Other +84399100,MACHINERY FOR MAKING PULP OF FIBROUS CELLULOSIC MATERIAL OR FOR MAKING OR FINISHING PAPER OR PAPERBOARD::Of machinery for making pulp of fibrous cellulosic material +84399900,MACHINERY FOR MAKING PULP OF FIBROUS CELLULOSIC MATERIAL OR FOR MAKING OR FINISHING PAPER OR PAPERBOARD::Other +84400000,including machinery book sewing machines +84401000,including machinery book sewing machines >> machinery +84401010,"BOOK-BINDING MACHINERY, INCLUDING BOOK-SEWING MACHINES:Machinery :Wire stitching machinery, single headed" +84401090,"BOOK-BINDING MACHINERY, INCLUDING BOOK-SEWING MACHINES:Machinery :Other" +84409000,"BOOK-BINDING MACHINERY, INCLUDING BOOK-SEWING MACHINES::Parts" +84410000,other machinery for making up paper pulp i n c l u d i n g cutting paper or paprboard machines of all kinds +84411000,other machinery for making up paper pulp i n c l u d i n g cutting paper or paprboard machines of all kinds >> cutting machines +84411010,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS:Cutting machines :Paper cutting machines, excluding machines with devices such as automatic programme cutting or three knife trimmers" +84411090,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS:Cutting machines :Other" +84412000,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS::Machines for making bags, sacks or envelopes" +84413000,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS::Machines for making cartons, boxes, cases, tubes, drums or similar containers, other than by moulding" +84414000,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS::Machines for moulding articles in paper pulp, paper or paperboard" +84418000,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS::Other machinery" +84419000,"OTHER MACHINERY FOR MAKING UP PAPER PULP, PAPER OR PAPERBOARD, INCLUDING CUTTING MACHINES OF ALL KINDS::Parts" +84420000,machinery apparatus and equipment other then the machines of headings 8456 to 8465 for preparing or making plates printing c omponents plates cylinders and lithographic for printing stones prepared purposes or for example planed grained polished +84423000,machinery apparatus and equipment other then the machines of headings 8456 to 8465 for preparing or making plates printing c omponents plates cylinders and lithographic for printing stones prepared purposes or for example planed grained polished >> machinery apparatus and equipment +84423010,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):machinery, apparatus and equipment:Brass rules" +84423020,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):machinery, apparatus and equipment:Chases" +84423090,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):machinery, apparatus and equipment:Other" +84424000,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED)::Parts of the foregoing machinery, apparatus or equipment" +84425000,machinery apparatus and equipment other then the machines of headings 8456 to 8465 for preparing or making plates printing c omponents plates cylinders and lithographic for printing stones prepared purposes or for example planed grained polished >> plates cylinders and other printing components plates cylinders and lithographic stones prepared for printing purposes for example +84425010,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Plates and cylinders" +84425020,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Lithographic plates" +84425031,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Plate and cylinder for textile printing machine" +84425039,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Other" +84425040,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Highly polished copper sheets for making blocks" +84425050,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Highly polished zinc sheets for making process blocks" +84425090,"MACHINERY, APPARATUS AND EQUIPMENT (OTHER THAN THE MACHINES OF HEADINGS 8456 TO 8465) FOR PREPARING OR MAKING PLATES, PRINTING COMPONENTS; PLATES, CYLINDERS AND LITHOGRAPHIC STONES, PREPARED FOR PRINTING PURPOSES (FOR EXAMPLE, PLANED, GRAINED OR POLISHED):Plates, cylinders and other printing components; plates, cylinders and lithographic stones, prepared for printing purposes (for example, planed, grained or polished):Other" +84430000,printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 other printers copying machines and facsimile machines w h ether o r not combined parts and accessories thereof printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 +84431100,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Off set printing machinery, reel fed" +84431200,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Offset printing machinery, sheet- fed, office type (using sheets With one side not exceeding 22 cm and the other side not exceeding 36 cm in the unfolded state)" +84431300,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Other offset printing machinery" +84431400,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Letterpress printing machinery, reel fed, excluding flexography printing" +84431500,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Letterpress printing machinery, other than reel fed, excluding flexographic printing" +84431600,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Flexographic printing machinery" +84431700,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Gravure printing machinery" +84431900,printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 other printers copying machines and facsimile machines w h ether o r not combined parts and accessories thereof printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 >> other +84431910,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:Flat bed prianting presses" +84431920,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:Platen printing presses" +84431930,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:Proof presses" +84431941,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:On cotton textile" +84431949,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:Other" +84431990,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other:Other" +84433100,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Machines which perform two or more of the functions of printing, copying or facsimile transmission, capable of connecting to an automatic data processing machine or to a network" +84433200,printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 other printers copying machines and facsimile machines w h ether o r not combined parts and accessories thereof printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 >> other capable of connecting to an automatic data processing machine or to a network other capable of connecting to an automatic data processing machine or to a network +84433210,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Line printer" +84433220,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Dot matrix printer" +84433230,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Letter quality daisy wheel printer" +84433240,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Laser jet printer" +84433250,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Ink jet printer" +84433260,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Facsimile machine" +84433290,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other, capable of connecting to an automatic data processing machine or to a network:Other" +84433900,printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 other printers copying machines and facsimile machines w h ether o r not combined parts and accessories thereof printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 >> other other +84433910,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Ink-jet printing machine" +84433920,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Electrostatic photocopying apparatus operated by reproducing the original image directly onto the copy (direct process)" +84433930,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Electrostatic photocopying apparatus operated by reproducing the original image via and intermediate onto the copy (indirect process)" +84433940,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Other photocopying apparatus incorporating an optical system" +84433950,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Other photocopying apparatus of contact type" +84433960,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Thermo-copying apparatus" +84433970,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Facsimile machine not capable of getting connected to automatic data processing machine" +84433990,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Other" +84439100,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF::Parts and accessories of printing machinery used for printing by means of plates, cylinders and other printing components of heading 8442" +84439900,printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 other printers copying machines and facsimile machines w h ether o r not combined parts and accessories thereof printing machinery used for printing by means of plates cylinders and other printing components of heading 8442 >> other other +84439910,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Automatic documents feeders of copying machines" +84439920,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Paper feeders of copying machines" +84439930,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Sorters of copying machines" +84439940,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Other parts of copying machines" +84439951,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Ink cartridges, with print head assembly" +84439952,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Ink spray nozzle" +84439953,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :INK SPRAY NOZZLE" +84439959,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Other" +84439960,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Parts and accessories of goods of sub-heading 8443 39" +84439990,"PRINTING MACHINERY USED FOR PRINTING BY MEANS OF PLATES, CYLINDERS AND OTHER PRINTING COMPONENTS OF HEADING 8442; OTHER PRINTERS, COPYING MACHINES AND FACSIMILE MACHINES, WHETHER OR NOT COMBINED; PARTS AND ACCESSORIES THEREOF:Other :Other" +84440010,"MACHINES FOR EXTRUDING, DRAWING, TEXTURING OR CUTTING MAN-MADE TEXTILE MATERIALS:Machines for extruding, drawing, texturing or cutting man-made textile materials :Machines for extruding man-made textile materials(OLD tariff)" +84440090,"MACHINES FOR EXTRUDING, DRAWING, TEXTURING OR CUTTING MAN-MADE TEXTILE MATERIALS:Machines for extruding, drawing, texturing or cutting man-made textile materials :Other(OLD tariff)" +84451110,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Carding machines :Cotton carding machines(OLD tariff)" +84451190,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Carding machines :Other(OLD tariff)" +84451210,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Combing machines:Cotton combing machines(OLD tariff)" +84451290,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Combing machines:Other(OLD tariff)" +84451300,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447::Drawing or roving machines(OLD tariff)" +84451910,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Cotton processing machines (including cotton ginning machine)(OLD tariff)" +84451920,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Jute fibre processing machines(OLD tariff)" +84451930,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Regenerated fibres and synthetic fibres processing machines(OLD tariff)" +84451940,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Silk processing machines(OLD tariff)" +84451950,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Wool processing machines(OLD tariff)" +84451960,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Blowroom machines(OLD tariff)" +84451990,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Other :Other(OLD tariff)" +84452011,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Drawing frames(OLD tariff)" +84452012,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Intermediate frames(OLD tariff)" +84452013,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Ring frames(OLD tariff)" +84452014,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Roving frames(OLD tariff)" +84452019,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Other(OLD tariff)" +84452020,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Jute fibres spinning machines(OLD tariff)" +84452030,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Regenerated fibres and synthetic fibres spinning machines(OLD tariff)" +84452040,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Silk fibres spinning machines(OLD tariff)" +84452050,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Wool spinning machines(OLD tariff)" +84452090,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile spinning machines :Other(OLD tariff)" +84453011,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Doubling frames(OLD tariff)" +84453019,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Other(OLD tariff)" +84453020,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Jute fibre doubling or twisting machines(OLD tariff)" +84453030,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Regenerated fibre and synthetic fibre yarn doubling or twisting machines(OLD tariff)" +84453040,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Silk fibre doubling or twisting machines(OLD tariff)" +84453050,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Wool fibre doubling or twisting machines(OLD tariff)" +84453090,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile doubling or twisting machines :Other fibre doubling or twisting machines(OLD tariff)" +84454010,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile winding (including weft-winding) or reeling machines :Cotton fibre winding (including weft-winding) or reeling machines, automatic or otherwise(OLD tariff)" +84454020,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile winding (including weft-winding) or reeling machines :Jute fibre reeling (including weft-winding) machines(OLD tariff)" +84454030,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile winding (including weft-winding) or reeling machines :Regenerated fibres yarn and synthetic fibres yarn reeling (including weft-winding) machines(OLD tariff)" +84454040,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile winding (including weft-winding) or reeling machines :Silk fibre reeling (including weft-winding) machines(OLD tariff)" +84454050,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile winding (including weft-winding) or reeling machines :Wool fibre reeling (including weft-winding) machines(OLD tariff)" +84454090,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447:Textile winding (including weft-winding) or reeling machines :Other(OLD tariff)" +84459000,"MACHINES FOR PREPARING TEXTILE FIBRES; SPINNING, DOUBLING OR TWISTING MACHINES AND OTHER MACHINERY FOR PRODUCING TEXTILE YARNS; TEXTILE REELING OR WINDING (INCLUDING WEFT-WINDING) MACHINES AND MACHINES FOR PREPARING TEXTILE YARNS FOR USE ON THE MACHINES OF HEADING 8446 OR 8447::Other(OLD tariff)" +84460000,weaving machines looms +84461000,weaving machines looms >> for weaving fabrics of a width not exceeding 30 cotton weaving machines +84461011,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width not exceeding 30 cm :Automatic, powerloom" +84461012,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width not exceeding 30 cm :Plain, powerloom" +84461019,WEAVING MACHINES (LOOMS):For weaving fabrics of a width not exceeding 30 cm :Other +84461090,WEAVING MACHINES (LOOMS):For weaving fabrics of a width not exceeding 30 cm :Other +84462100,weaving machines looms >> power looms +84462110,"WEAVING MACHINES (LOOMS):Power looms:Cotton weaving machines, automatic" +84462190,WEAVING MACHINES (LOOMS):Power looms:Other +84462900,weaving machines looms >> other +84462910,WEAVING MACHINES (LOOMS):Other :Cotton weaving machines +84462990,WEAVING MACHINES (LOOMS):Other :Other +84463000,weaving machines looms >> for weaving fabrics of a width exceeding 30 cm shuttleless type cotton weaving machines +84463011,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width exceeding30 cm, shuttleless type:Automatic, powerloom" +84463012,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width exceeding30 cm, shuttleless type:Plain, powerloom" +84463019,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width exceeding30 cm, shuttleless type:Other" +84463090,"WEAVING MACHINES (LOOMS):For weaving fabrics of a width exceeding30 cm, shuttleless type:Other" +84470000,knitting machines machines and machines for making gimped yarn tulle lace embroidery trimmings braid or net and machines for tufting circular knitting machines +84471100,knitting machines machines and machines for making gimped yarn tulle lace embroidery trimmings braid or net and machines for tufting circular knitting machines >> with cylinder diameter not exceeding 165 mm wool knitting machines +84471111,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter not exceeding 165 mm:Hand knitting machines" +84471119,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter not exceeding 165 mm:Other" +84471120,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter not exceeding 165 mm:Cotton hosiery machines" +84471190,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter not exceeding 165 mm:Other" +84471200,knitting machines machines and machines for making gimped yarn tulle lace embroidery trimmings braid or net and machines for tufting circular knitting machines >> with cylinder diameter exceeding 165 mm wool knitting machines +84471211,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter exceeding 165 mm:Hand knitting machines" +84471219,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter exceeding 165 mm:Other" +84471220,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter exceeding 165 mm:Cotton hosiery machines" +84471290,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:With cylinder diameter exceeding 165 mm:Other" +84472000,knitting machines machines and machines for making gimped yarn tulle lace embroidery trimmings braid or net and machines for tufting circular knitting machines >> flat kniting machines machines +84472010,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Flat kniting machines; stitch-bonding machines:Hand knitting machines for wool" +84472020,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Flat kniting machines; stitch-bonding machines:Other knitting machines for wool" +84472030,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Flat kniting machines; stitch-bonding machines:Cotton hosiery machines" +84472090,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Flat kniting machines; stitch-bonding machines:Other" +84479000,knitting machines machines and machines for making gimped yarn tulle lace embroidery trimmings braid or net and machines for tufting circular knitting machines >> other +84479010,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Other :Machines for making of tulle and lace" +84479020,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Other :Machines for making embroidery" +84479030,"KNITTING MACHINES, STITCH-BONDING MACHINES AND MACHINES FOR MAKING GIMPED YARN, TULLE, LACE, EMBROIDERY, TRIMMINGS, BRAID OR NET AND MACHINES FOR TUFTING:Other :Other" +84480000,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for +84481110,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Dobbies and jacquards; card reducing, copying, punching or:Jacquards and harness liner cards for cotton textile machinery(OLD tariff)" +84481190,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Dobbies and jacquards; card reducing, copying, punching or:Other(OLD tariff)" +84481900,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES)::Other" +84482000,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES)::Parts and accessories of machines of heading 8444 or of their auxiliary machinery" +84483100,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES)::Card clothing" +84483200,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for >> of machines for preparing textile fibres other than card clothing +84483210,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Of machines for preparing textile fibres, other than card clothing:For cotton processing machines" +84483220,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Of machines for preparing textile fibres, other than card clothing:For jute processing machines" +84483230,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Of machines for preparing textile fibres, other than card clothing:For silk and manmade (regenerated and synthetic fibres processing machines)" +84483240,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Of machines for preparing textile fibres, other than card clothing:For wool processing machines" +84483290,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Of machines for preparing textile fibres, other than card clothing:Other" +84483300,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for >> spindles spindle flyers spinning rings and ring travellers +84483310,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Spindles, spindle flyers, spinning rings and ring travellers :For cotton spinning machines" +84483320,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Spindles, spindle flyers, spinning rings and ring travellers :For jute spinning machines" +84483330,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Spindles, spindle flyers, spinning rings and ring travellers :For silk and man-made (regenerated and synthetic) fibre spinning machines" +84483340,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Spindles, spindle flyers, spinning rings and ring travellers :For wool spinning machines" +84483390,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Spindles, spindle flyers, spinning rings and ring travellers :For other textile fibre spinning machines" +84483900,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for >> other +84483910,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Combs for cotton textile machinery" +84483920,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Gills for gill boxes" +84483990,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Other" +84484200,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for >> reeds for looms healds and +84484210,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Reeds for looms, healds and heald-frames:Healds (excluding wire healds) and reeds for cotton machinery" +84484220,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Reeds for looms, healds and heald-frames:Healds, wire" +84484290,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Reeds for looms, healds and heald-frames:Other" +84484900,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for >> other +84484910,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Parts of cotton weaving machinery" +84484920,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Parts of jute weaving machinery" +84484930,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Parts of silk and man-made fibres weaving machinery" +84484940,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Parts of wool weaving machinery" +84484950,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Parts of other textile weaving machinery" +84484990,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Other :Other" +84485100,auxiliary machinery for use with machines of heading 8444 8445 8446 or 8447 for >> sinkers needles and other articles used in forming stitches +84485110,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Sinkers, needles and other articles used in forming stitches :Of cotton hosiery machine" +84485120,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Sinkers, needles and other articles used in forming stitches :Of wool knitting machines" +84485130,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Sinkers, needles and other articles used in forming stitches :Of machines for tulle, lace" +84485190,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES):Sinkers, needles and other articles used in forming stitches :Other" +84485900,"AUXILIARY MACHINERY FOR USE WITH MACHINES OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, DOBBIES, JACQUARDS, AUTOMATIC STOP MOTIONS, SHUTTLE CHANGING MECHANISMS); PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF THIS HEADING OR OF HEADING 8444, 8445, 8446 OR 8447 (FOR EXAMPLE, SPINDLES AND SPINDLE FLYERS, CARD CLOTHING, COMBS, EXTRUDING NIPPLES, SHUTTLES, HEALDS AND HEALD FRAMES, HOSIERY NEEDLES)::Other" +84490000,machinery for the manufacture or finishing of felt or nonwovens in the piece or in shapes including machinery for making felt hats blocks for making hats machinery for the manufacture or finishing of felt or nonwovens in the piece or in shapes including machinery for making felt hats blocks for making hats +84490010,"MACHINERY FOR THE MANUFACTURE OR FINISHING OF FELT OR NONWOVENS IN THE PIECE OR IN SHAPES, INCLUDING MACHINERY FOR MAKING FELT HATS; BLOCKS FOR MAKING HATS:Machinery for the manufacture or finishing of felt or nonwovens in the piece or in shapes, including machinery for making felt hats; blocks for making hats :Machinery for manufacture or finishing of felt in piece or inshapes (including felt hat-making machines and hat making blocks)" +84490090,"MACHINERY FOR THE MANUFACTURE OR FINISHING OF FELT OR NONWOVENS IN THE PIECE OR IN SHAPES, INCLUDING MACHINERY FOR MAKING FELT HATS; BLOCKS FOR MAKING HATS:Machinery for the manufacture or finishing of felt or nonwovens in the piece or in shapes, including machinery for making felt hats; blocks for making hats :Other" +84500000,household or washing machines including machines which both wash and dry machines each of a dry linen capacity not exceeding 10 kg +84501100,"HOUSEHOLD OR LAUNDRY-TYPE WASHING MACHINES, INCLUDING MACHINES WHICH BOTH WASH AND DRY::Fully-automatic machines" +84501200,"HOUSEHOLD OR LAUNDRY-TYPE WASHING MACHINES, INCLUDING MACHINES WHICH BOTH WASH AND DRY::Other machines, with built-in centrifugal drier" +84501900,"HOUSEHOLD OR LAUNDRY-TYPE WASHING MACHINES, INCLUDING MACHINES WHICH BOTH WASH AND DRY::Other" +84502000,"HOUSEHOLD OR LAUNDRY-TYPE WASHING MACHINES, INCLUDING MACHINES WHICH BOTH WASH AND DRY::Machines, each of a dry linen capacity exceeding 10 kg" +84509000,household or washing machines including machines which both wash and dry machines each of a dry linen capacity not exceeding 10 kg >> parts +84509010,"HOUSEHOLD OR LAUNDRY-TYPE WASHING MACHINES, INCLUDING MACHINES WHICH BOTH WASH AND DRY:Parts:Parts of household type machines" +84509090,"HOUSEHOLD OR LAUNDRY-TYPE WASHING MACHINES, INCLUDING MACHINES WHICH BOTH WASH AND DRY:Parts:Other" +84510000,machinery other than machines of heading 8450 for washing cleaning wringing drying ironing pressing including fusing presses bleaching dyeing dressing finishing coating or impregnating textile yarns fabrics or made up textile articles and machines for applying the paste to the base fabric or other support used in the manufacture of floor coverings such as linoleum machines for reeling unreeling folding cutting or pinking textile fabrics +84511000,machinery other than machines of heading 8450 for washing cleaning wringing drying ironing pressing including fusing presses bleaching dyeing dressing finishing coating or impregnating textile yarns fabrics or made up textile articles and machines for applying the paste to the base fabric or other support used in the manufacture of floor coverings such as linoleum machines for reeling unreeling folding cutting or pinking textile fabrics >> machines +84511010,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Dry-cleaning machines :Dry cleaning machines for cotton textile" +84511090,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Dry-cleaning machines :Other" +84512100,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS::Each of a dry linen capacity not exceeding 10 kg" +84512900,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS::Other" +84513000,machinery other than machines of heading 8450 for washing cleaning wringing drying ironing pressing including fusing presses bleaching dyeing dressing finishing coating or impregnating textile yarns fabrics or made up textile articles and machines for applying the paste to the base fabric or other support used in the manufacture of floor coverings such as linoleum machines for reeling unreeling folding cutting or pinking textile fabrics >> ironing machines and presses including fusing presses +84513010,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Ironing machines and presses (including fusing presses):Hand ironing press" +84513090,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Ironing machines and presses (including fusing presses):Other" +84514000,machinery other than machines of heading 8450 for washing cleaning wringing drying ironing pressing including fusing presses bleaching dyeing dressing finishing coating or impregnating textile yarns fabrics or made up textile articles and machines for applying the paste to the base fabric or other support used in the manufacture of floor coverings such as linoleum machines for reeling unreeling folding cutting or pinking textile fabrics >> washing bleaching or dyeing machines bleaching machine +84514011,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:For cotton textile" +84514019,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:Other" +84514021,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:For cotton textile" +84514029,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:For other textile" +84514091,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:For washing and cleaning for woollen textile" +84514099,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Washing, bleaching or dyeing machines:Other" +84515000,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS::Machines for reeling, unreeling, folding, cutting or pinking textile fabrics" +84518000,machinery other than machines of heading 8450 for washing cleaning wringing drying ironing pressing including fusing presses bleaching dyeing dressing finishing coating or impregnating textile yarns fabrics or made up textile articles and machines for applying the paste to the base fabric or other support used in the manufacture of floor coverings such as linoleum machines for reeling unreeling folding cutting or pinking textile fabrics >> other machinery sizing and dressing machines +84518011,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :For cotton textile" +84518019,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :Other" +84518021,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :For coating or impregnating yarn or fabrics" +84518022,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :Other finishing processes for cotton textile" +84518029,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :Other" +84518090,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS:Other machinery :Other" +84519000,"MACHINERY (OTHER THAN MACHINES OF HEADING 8450) FOR WASHING, CLEANING, WRINGING, DRYING, IRONING, PRESSING (INCLUDING FUSING PRESSES), BLEACHING, DYEING, DRESSING, FINISHING, COATING OR IMPREGNATING TEXTILE YARNS, FABRICS OR MADE UP TEXTILE ARTICLES AND MACHINES FOR APPLYING THE PASTE TO THE BASE FABRIC OR OTHER SUPPORT USED IN THE MANUFACTURE OF FLOOR COVERINGS SUCH AS LINOLEUM; MACHINES FOR REELING, UNREELING, FOLDING, CUTTING OR PINKING TEXTILE FABRICS::Parts" +84520000,sewing machines other than machines of heading 8440 furniture bases and covers specially designed for sewing machines sewing machine needles +84521000,sewing machines other than machines of heading 8440 furniture bases and covers specially designed for sewing machines sewing machine needles >> sewing machines of the household type complete with stand or table +84521011,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :With electronic controls or electric motors" +84521012,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :Hand operated" +84521019,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :Other" +84521021,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :With electronic controls or electric motors" +84521022,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :Hand operated" +84521029,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machines of the household type :Other" +84522100,sewing machines other than machines of heading 8440 furniture bases and covers specially designed for sewing machines sewing machine needles >> automatic units +84522110,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Automatic units :Industrial sewing machines having a motor of 150 watts capacity and above and having a speed of 1,500 stitches per minute or more" +84522120,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Automatic units :Other with electronic controls or electric motors" +84522190,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Automatic units :Other" +84522900,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES::Other" +84523000,sewing machines other than machines of heading 8440 furniture bases and covers specially designed for sewing machines sewing machine needles >> sewing machine needles +84523010,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machine needles :For household type sewing machines" +84523090,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Sewing machine needles :Other" +84529000,sewing machines other than machines of heading 8440 furniture bases and covers specially designed for sewing machines sewing machine needles >> furniture bases and covers for sewing machines and parts thereof other parts of sewing machines furniture bases and covers for sewing machines and parts thereof +84529011,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Other parts of sewing machines :Furniture, bases and covers" +84529019,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Other parts of sewing machines :Parts if furniture, bases and covers of sewing machines" +84529091,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Other parts of sewing machines :Of household sewing machines" +84529099,"SEWING MACHINES, OTHER THAN BOOK-SEWING MACHINES OF HEADING 8440; FURNITURE, BASES AND COVERS SPECIALLY DESIGNED FOR SEWING MACHINES; SEWING MACHINE NEEDLES:Other parts of sewing machines :Other" +84530000,machinery for preparing tanning or working hides skins or leather or for making or repairing footwear or other articles of hides skins or leather other than sewing machines +84531000,"MACHINERY FOR PREPARING, TANNING OR WORKING HIDES, SKINS OR LEATHER OR FOR MAKING OR REPAIRING FOOTWEAR OR OTHER ARTICLES OF HIDES, SKINS OR LEATHER, OTHER THAN SEWING MACHINES::Machinery for preparing, tanning or working hides, skins or leather" +84532000,"MACHINERY FOR PREPARING, TANNING OR WORKING HIDES, SKINS OR LEATHER OR FOR MAKING OR REPAIRING FOOTWEAR OR OTHER ARTICLES OF HIDES, SKINS OR LEATHER, OTHER THAN SEWING MACHINES::Machinery for making or repairing footwear" +84538000,"MACHINERY FOR PREPARING, TANNING OR WORKING HIDES, SKINS OR LEATHER OR FOR MAKING OR REPAIRING FOOTWEAR OR OTHER ARTICLES OF HIDES, SKINS OR LEATHER, OTHER THAN SEWING MACHINES::Other machinery" +84539000,machinery for preparing tanning or working hides skins or leather or for making or repairing footwear or other articles of hides skins or leather other than sewing machines >> parts +84539010,"MACHINERY FOR PREPARING, TANNING OR WORKING HIDES, SKINS OR LEATHER OR FOR MAKING OR REPAIRING FOOTWEAR OR OTHER ARTICLES OF HIDES, SKINS OR LEATHER, OTHER THAN SEWING MACHINES:Parts :Of boot and shoe manufacturing machinery" +84539090,"MACHINERY FOR PREPARING, TANNING OR WORKING HIDES, SKINS OR LEATHER OR FOR MAKING OR REPAIRING FOOTWEAR OR OTHER ARTICLES OF HIDES, SKINS OR LEATHER, OTHER THAN SEWING MACHINES:Parts :Other" +84540000,converters ladles ingot moulds and casting machines of a kind used in metallurgy or in metal foundries +84541000,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES::Converters" +84542000,converters ladles ingot moulds and casting machines of a kind used in metallurgy or in metal foundries >> ingot moulds and ladles +84542010,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES:Ingot moulds and ladles :Ladles" +84542020,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES:Ingot moulds and ladles :Ingot moulds" +84543000,converters ladles ingot moulds and casting machines of a kind used in metallurgy or in metal foundries >> casting machines +84543010,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES:Casting machines:Die-casting machines(OLD tariff)" +84543020,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES:Casting machines:Continuous casting machines(OLD tariff)" +84543090,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES:Casting machines:Other" +84549000,"CONVERTERS, LADLES, INGOT MOULDS AND CASTING MACHINES, OF A KIND USED IN METALLURGY OR IN METAL FOUNDRIES::Parts" +84550000,mills and rolls therefor +84551000,METAL-ROLLING MILLS AND ROLLS THEREFOR::Tube mills +84552100,mills and rolls therefor >> hot or combination hot and cold +84552110,METAL-ROLLING MILLS AND ROLLS THEREFOR:Hot or combination hot and cold:Hot +84552120,METAL-ROLLING MILLS AND ROLLS THEREFOR:Hot or combination hot and cold:Combination hot and cold +84552200,METAL-ROLLING MILLS AND ROLLS THEREFOR::Cold +84553000,METAL-ROLLING MILLS AND ROLLS THEREFOR::Rolls for rolling mills +84559000,METAL-ROLLING MILLS AND ROLLS THEREFOR::Other parts +84560000,for working any material by removal of material by laser or other light o r p ho to n be a m u lt r a s o nic e l e ct r o discharge electron beam or plasma arc processes water jet cutting machines operated by laser or other light or photon beam processes +84561000,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Operated by laser or other light or photon beam processes(OLD tariff)" +84561100,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Operated by laser" +84561200,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Operated by other light or photon beam processes" +84562000,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Operated by ultrasonic processes" +84563000,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Operated by electro-discharge processes" +84564000,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Operated by plasma arc processes" +84565000,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES::Water-jet cutting machines" +84569000,for working any material by removal of material by laser or other light o r p ho to n be a m u lt r a s o nic e l e ct r o discharge electron beam or plasma arc processes water jet cutting machines operated by laser or other light or photon beam processes >> other +84569010,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES:Other :For dry-etching patterns on semi-conductor materials" +84569020,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES:Other :Electro-chemical machines" +84569090,"MACHINE-TOOLS FOR WORKING ANY MATERIAL BY REMOVAL OF MATERIAL, BY LASER OR OTHER LIGHT OR PHOTON BEAM, ULTRA-SONIC, ELECTRO-DISCHARGE, ELECTRO-CHEMICAL, ELECTRON BEAM, IONIC-BEAM OR PLASMA ARC PROCESSES:Other :Other" +84570000,ma chi ni n g ce nt r e s u ni t co ns t r uct i on machines single station and transfer machines for working metal +84571000,ma chi ni n g ce nt r e s u ni t co ns t r uct i on machines single station and transfer machines for working metal >> machining centres +84571010,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Machining centres:Horizontal(OLD tariff)" +84571020,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Machining centres:Vertical(OLD tariff)" +84572010,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Unit construction machines (single station):Unit head boring(OLD tariff)" +84572020,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Unit construction machines (single station):Unit head drilling(OLD tariff)" +84572090,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Unit construction machines (single station):Other(OLD tariff)" +84573010,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Multi-station transfer machines:Rotary type(OLD tariff)" +84573020,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Multi-station transfer machines:In-line type(OLD tariff)" +84573090,"MACHINING CENTRES, UNIT CONSTRUCTION MACHINES (SINGLE STATION) AND MULTI-STATION TRANSFER MACHINES FOR WORKING METAL:Multi-station transfer machines:Other(OLD tariff)" +84580000,lathes including t urning c entr es f or removing metal horizontal lathes +84581100,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL::Numerically controlled +84581900,lathes including t urning c entr es f or removing metal horizontal lathes >> other automatic single spindle +84581911,"LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Horizontal bar, swiss type" +84581912,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Base sliding head type +84581913,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Horizontal chucking +84581919,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Other +84581990,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Other +84589100,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL::Numerically controlled +84589900,lathes including t urning c entr es f or removing metal horizontal lathes >> other +84589910,"LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Automatic, multi-spindle bar" +84589920,"LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Automatic, multi-spindle chucking" +84589931,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Capstans lathes +84589932,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Turrets lathes +84589933,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Capstan and turret combination lathes +84589934,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Copying lathes +84589935,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Multi-tool and production lathes +84589941,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Crankshaft lathes +84589942,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Relieving lathes +84589943,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Wheel and axle lathes +84589951,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Tool-room type +84589959,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Other +84589990,LATHES (INCLUDING TURNING CENTRES) FOR REMOVING METAL:Other :Other +84590000,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 +84591000,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458::Way-type unit head machines" +84592100,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458::Numerically controlled" +84592900,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> other +84592910,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Bench and pillar" +84592920,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Pillar or columns, multi-spindle" +84592930,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Radial" +84592940,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Deep hole" +84592950,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Multi head drilling machines" +84592990,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Other" +84593100,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458::Numerically controlled" +84593900,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> other +84593910,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Vertical turning or boring" +84593990,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Other" +84594010,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other boring machines :Jig boring machines horizontal(OLD tariff)" +84594020,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other boring machines :Fine boring machines, horizontal(OLD tariff)" +84594030,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other boring machines :Fine boring machines, vertical(OLD tariff)" +84594090,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other boring machines :Other(OLD tariff)" +84594100,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> numerically controlled +84594110,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Jig boring machines, horizontal" +84594120,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Fine boring machines, horizontal" +84594130,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Fine boring machines, vertical" +84594190,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Other" +84594900,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> other +84594910,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other:Jig boring machines, horizontal" +84594920,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other:Fine boring machines, horizontal" +84594930,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other:Fine boring machines, vertical" +84594990,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other:Other" +84595100,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> numerically controlled +84595110,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Horizontal" +84595120,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Vertical" +84595130,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Universal" +84595190,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Other" +84595900,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> other +84595910,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Horizontal" +84595920,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Vertical" +84595930,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Universal" +84595940,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Ram type" +84595950,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Die-sinking or pantograph" +84595990,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Other" +84596100,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> numerically controlled +84596110,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Piano milling" +84596190,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Numerically controlled:Other" +84596900,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> other +84596910,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Bed type, horizontal" +84596920,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Bed type, vertical" +84596930,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Piano milling, single column" +84596940,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Piano milling, double column" +84596990,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other :Other" +84597000,including unit head machines for drilling boring milling treading or tapping by removing metal other than lathes including turning centres of heading 8458 >> other threading or tapping machines +84597010,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other threading or tapping machines :Threading machines" +84597020,"MACHINE-TOOLS (INCLUDING WAY-TYPE UNIT HEAD MACHINES) FOR DRILLING, BORING, MILLING, TREADING OR TAPPING BY REMOVING METAL, OTHER THAN LATHES (INCLUDING TURNING CENTRES) OF HEADING 8458:Other threading or tapping machines :Tapping machines(OLD tariff)" +84601100,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Numerically controlled(OLD tariff)" +84601200,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Numerically controlled(OLD tariff)" +84601900,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Other(OLD tariff)" +84602100,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Numerically controlled(OLD tariff)" +84602200,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Centreless grinding machines, numerically controlled(OLD tariff)" +84602300,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Other cylindrical grinding machines, numerically controlled(OLD tariff)" +84602400,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Other, numerically controlled(OLD tariff)" +84602910,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Cylindrical grinders(OLD tariff)" +84602920,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Internal grinders(OLD tariff)" +84602930,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Centreless grinders(OLD tariff)" +84602940,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Profile grinders(OLD tariff)" +84602990,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Other(OLD tariff)" +84603100,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461::Numerically controlled(OLD tariff)" +84603910,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Grinder, tool or cutter(OLD tariff)" +84603990,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Other(OLD tariff)" +84604011,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Honing or lapping machines :Vertical, single spindle(OLD tariff)" +84604012,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Honing or lapping machines :Vertical, multi-spindle(OLD tariff)" +84604013,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Honing or lapping machines :Horizontal(OLD tariff)" +84604019,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Honing or lapping machines :Other(OLD tariff)" +84604020,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Honing or lapping machines :Lapping machines(OLD tariff)" +84609010,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Polishing and buffing machines(OLD tariff)" +84609090,"MACHINE-TOOLS FOR DEBURRING, SHARPENING, GRINDING, HONING, LAPPING, POLISHING OR OTHERWISE FINISHING METAL, OR CERMETS BY MEANS OF GRINDING STONES, ABRASIVES OR POLISHING PRODUCTS, OTHER THAN GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING MACHINES OF HEADING 8461:Other :Other(OLD tariff)" +84612011,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Shaping or slotting machines :Die and punch shaping machines(OLD tariff)" +84612019,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Shaping or slotting machines :Other(OLD tariff)" +84612020,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Shaping or slotting machines :Slotting machines(OLD tariff)" +84613010,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Broaching machines :Vertical(OLD tariff)" +84613020,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Broaching machines :Horizontal(OLD tariff)" +84613090,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Broaching machines :Other(OLD tariff)" +84614011,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Bevel gear cutting(OLD tariff)" +84614012,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear cutting spiral bevel and /or hypoid(OLD tariff)" +84614013,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear slotter or planar formed cutter type(OLD tariff)" +84614014,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear milling formed disc cutter type(OLD tariff)" +84614019,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Other(OLD tariff)" +84614021,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Single or double wheel disc type gear grinder(OLD tariff)" +84614022,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Formed wheel gear grinder(OLD tariff)" +84614023,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear shaver(OLD tariff)" +84614024,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear tooth, rounding, chamfering or burring(OLD tariff)" +84614025,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear shaper, spur and helical(OLD tariff)" +84614026,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Gear hobber, spur and helical(OLD tariff)" +84614029,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Gear cutting, gear grinding or gear finishing machines :Other(OLD tariff)" +84615011,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Band saw, horizontal(OLD tariff)" +84615012,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Band saw, vertical(OLD tariff)" +84615013,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Circular saw, cold(OLD tariff)" +84615014,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Circular saw, hot(OLD tariff)" +84615015,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Hack saw(OLD tariff)" +84615019,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Other(OLD tariff)" +84615021,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Abrasive wheel cutting-off machines(OLD tariff)" +84615029,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED:Sawing or cutting-off machines :Other(OLD tariff)" +84619000,"MACHINE-TOOLS FOR PLANING, SHAPING, SLOTTING, BROACHING, GEAR CUTTING, GEAR GRINDING OR GEAR FINISHING, SAWING, CUTTING-OFF AND OTHER MACHINE TOOLS WORKING BY REMOVING METAL, OR CERMETS, NOT ELSEWHERE SPECIFIED OR INCLUDED::Other(OLD tariff)" +84621011,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Steam or air, single frame(OLD tariff)" +84621012,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Steam or air, double frame(OLD tariff)" +84621013,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Headers and upsetters(OLD tariff)" +84621014,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Double acting counter blow, air or steam(OLD tariff)" +84621019,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Other(OLD tariff)" +84621020,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Forging machines(OLD tariff)" +84621030,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Forging or die-stamping machines (including presses) and hammers:Die stamping machines(OLD tariff)" +84621100,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:closed:Closed die forging machines(OLD tariff)" +84621900,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other:Other(OLD tariff)" +84622100,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE::Numerically controlled(OLD tariff)" +84622200,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:profile:Profile forming machines(OLD tariff)" +84622300,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:press:Numerically controlled press brakes(OLD tariff)" +84622400,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:panel:Numerically controlled panel bender(OLD tariff)" +84622500,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:roll:Numerically controlled roll forming machines(OLD tariff)" +84622600,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:other:Other numerically controlled bending, folding, straightening or flattening machines(OLD tariff)" +84622900,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84622910,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Bending and straightening machines(OLD tariff)" +84622920,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Press brakes(OLD tariff)" +84622930,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other rotary head and ram type(OLD tariff)" +84622990,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84623100,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE::Numerically controlled(OLD tariff)" +84623200,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:line:Slitting lines and cut-to-length lines(OLD tariff)" +84623300,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:machines:Numerically controlled shearing machines(OLD tariff)" +84623900,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84623910,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Plate and sheet shears (guillotine)(OLD tariff)" +84623920,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Bar and angle shearing and cropping(OLD tariff)" +84623990,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84624100,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE::Numerically controlled(OLD tariff)" +84624200,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Numerically controlled:Numerically controlled(OLD tariff)" +84624900,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84624910,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Punching machines (including turret)(OLD tariff)" +84624920,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Combination of punching, shearing and cropping machines(OLD tariff)" +84624930,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Nibbling machines(OLD tariff)" +84624990,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84625100,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Numerically controlled:Numerically controlled(OLD tariff)" +84625900,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other:Other(OLD tariff)" +84626100,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:presses:Hydraulic presses(OLD tariff)" +84626200,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:presses:Mecahnical Presses(OLD tariff)" +84626300,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:servo:Servo-presses(OLD tariff)" +84626900,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:other:Other(OLD tariff)" +84629000,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:other:Other(OLD tariff)" +84629110,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Hydraulic presses:Hydraulic extension(OLD tariff)" +84629190,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Hydraulic presses:Other(OLD tariff)" +84629911,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Pneumatic presses(OLD tariff)" +84629912,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Inclinable presses(OLD tariff)" +84629913,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Vertical gap of frame presses(OLD tariff)" +84629914,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Vertical straight presses(OLD tariff)" +84629915,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Vertical forging presses(OLD tariff)" +84629919,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84629920,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Dieing or lobbing machine presses(OLD tariff)" +84629930,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Transfer and multiple presses(OLD tariff)" +84629940,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Horizontal presses(OLD tariff)" +84629950,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Friction screw presses(OLD tariff)" +84629960,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Knuckle joint presses(OLD tariff)" +84629970,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Coining joint presses(OLD tariff)" +84629990,"MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY FORGING, HAMMERING OR DIE-STAMPING; MACHINE-TOOLS (INCLUDING PRESSES) FOR WORKING METAL BY BENDING, FOLDING, STRAIGHTENING, FLATTENING, SHEARING, PUNCHING OR NOTCHING; PRESSES FOR WORKING METAL OR METAL CARBIDES, NOT SPECIFIED ABOVE:Other :Other(OLD tariff)" +84630000,other for working metal or other for working metal or cermets without removing material +84631000,other for working metal or other for working metal or cermets without removing material >> for bars tubes profiles wire or the like +84631010,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Draw-benches for bars, tubes, profiles, wire or the like:Wire and metal ribbon drawing machines" +84631020,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Draw-benches for bars, tubes, profiles, wire or the like:Other wire making machines" +84631030,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Draw-benches for bars, tubes, profiles, wire or the like:Tube drawing machines" +84631090,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Draw-benches for bars, tubes, profiles, wire or the like:Other" +84632000,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL::Thread rolling machines" +84633000,other for working metal or other for working metal or cermets without removing material >> machines for working wire +84633010,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Machines for working wire :Wire grill or knitting machine" +84633020,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Machines for working wire :Spring coiling" +84633030,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Machines for working wire :Chain making" +84633040,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Machines for working wire :Nail-making machine" +84639000,other for working metal or other for working metal or cermets without removing material >> other +84639010,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Other:Riveting machines (excluding portable hand operated machine)" +84639020,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Other:Strip profiling" +84639030,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Other:Seaming machine for example for cans" +84639090,"OTHER MACHINE-TOOLS FOR WORKING METAL, OR CERMETS, WITHOUT REMOVING MATERIAL:Other:Other" +84640000,for working stone ceramics concrete or like mineral materials or for cold working glass +84641000,for working stone ceramics concrete or like mineral materials or for cold working glass >> sawing machine +84641010,"MACHINE-TOOLS FOR WORKING STONE, CERAMICS, CONCRETE, ASBESTOS-CEMENT OR LIKE MINERAL MATERIALS OR FOR COLD WORKING GLASS:Sawing machine:Granite cutting machines or equipment" +84641090,"MACHINE-TOOLS FOR WORKING STONE, CERAMICS, CONCRETE, ASBESTOS-CEMENT OR LIKE MINERAL MATERIALS OR FOR COLD WORKING GLASS:Sawing machine:Other" +84642000,"MACHINE-TOOLS FOR WORKING STONE, CERAMICS, CONCRETE, ASBESTOS-CEMENT OR LIKE MINERAL MATERIALS OR FOR COLD WORKING GLASS::Grinding or polishing machines" +84649000,"MACHINE-TOOLS FOR WORKING STONE, CERAMICS, CONCRETE, ASBESTOS-CEMENT OR LIKE MINERAL MATERIALS OR FOR COLD WORKING GLASS::Other" +84650000,mach incl uding m achines f or glueing or otherwise nailing stapling assembling for working wood cork bone hard plastics or similar rubber hard hard materials +84651000,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Machines which can carry out different types of machining operations without tool change between such operations" +84652000,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Machining centres" +84659100,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Sawing machines" +84659200,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Planing, milling or moulding (by cutting) machines" +84659300,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Grinding, sanding or polishing machines" +84659400,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Bending or assembling machines" +84659500,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Drilling or morticing machines" +84659600,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS::Splitting, slicing or paring machines" +84659900,mach incl uding m achines f or glueing or otherwise nailing stapling assembling for working wood cork bone hard plastics or similar rubber hard hard materials >> other other +84659910,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS:Other :Lathes" +84659990,"MACHINE-TOOLS (INCLUDING MACHINES FOR NAILING, STAPLING, GLUEING OR OTHERWISE ASSEMBLING) FOR WORKING WOOD, CORK, BONE, HARD RUBBER, HARD PLASTICS OR SIMILAR HARD MATERIALS:Other :Other" +84660000,parts and accessories suitable for use solely or principally with the machines of headings8456 to 8465 including work or tool holders self opening dieheads dividing heads and other special attachm ents for the machines tool holders for any type of tool for working in the hand +84661000,parts and accessories suitable for use solely or principally with the machines of headings8456 to 8465 including work or tool holders self opening dieheads dividing heads and other special attachm ents for the machines tool holders for any type of tool for working in the hand >> tool holders and dieheads +84661010,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:Tool holders and self-opening dieheads:Tool holders" +84661020,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:Tool holders and self-opening dieheads:Self-opening dieheads" +84662000,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND::Work holders" +84663000,parts and accessories suitable for use solely or principally with the machines of headings8456 to 8465 including work or tool holders self opening dieheads dividing heads and other special attachm ents for the machines tool holders for any type of tool for working in the hand >> dividing heads and other special attachments for machines +84663010,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:Dividing heads and other special attachments for machines:Chucks" +84663020,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:Dividing heads and other special attachments for machines:Jigs and fixtures" +84663090,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:Dividing heads and other special attachments for machines:Other" +84669100,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND::For machines of heading 8464" +84669200,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND::For machines of heading 8465" +84669300,parts and accessories suitable for use solely or principally with the machines of headings8456 to 8465 including work or tool holders self opening dieheads dividing heads and other special attachm ents for the machines tool holders for any type of tool for working in the hand >> for machines of headings 8456 to 8461 +84669310,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:For machines of headings 8456 to 8461:Parts and accessories of machine tools, for working metals" +84669390,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND:For machines of headings 8456 to 8461:Other" +84669400,"PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADINGS 8456 TO 8465 INCLUDING WORK OR TOOL HOLDERS, SELF OPENING DIEHEADS, DIVIDING HEADS AND OTHER SPECIAL ATTACHMENTS FOR THE MACHINES; TOOL HOLDERS FOR ANY TYPE OF TOOL, FOR WORKING IN THE HAND::For machines of heading 8462 or 8463" +84670000,tools for working in the hand pneumatic hydraulic or with self contained electric or motor pneumatic +84671100,tools for working in the hand pneumatic hydraulic or with self contained electric or motor pneumatic >> rotary type including combined rotary percussion +84671110,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR:Rotary type (including combined rotary percussion) :Drills" +84671120,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR:Rotary type (including combined rotary percussion) :Hammers" +84671190,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR:Rotary type (including combined rotary percussion) :Other" +84671900,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Other" +84672100,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Drills of all kinds" +84672200,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Saws" +84672900,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Other" +84678100,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Chain saws" +84678900,tools for working in the hand pneumatic hydraulic or with self contained electric or motor pneumatic >> other +84678910,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR:Other :Compressed air grease guns, lubricators and similar appliances" +84678920,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR:Other :Vibrators" +84678990,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR:Other :Other" +84679100,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Of chain saws" +84679200,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Of pneumatic tools" +84679900,"TOOLS FOR WORKING IN THE HAND, PNEUMATIC, HYDRAULIC OR WITH SELF- CONTAINED ELECTRIC OR NON-ELECTRIC MOTOR::Other" +84680000,machinery and apparatus for soldering brazing or welding whether or not capable of cutting other than those of heading gas operated surface tempering machines and appliances +84681000,"MACHINERY AND APPARATUS FOR SOLDERING, BRAZING OR WELDING, WHETHER OR NOT CAPABLE OF CUTTING, OTHER THAN THOSE OF HEADING 8512.5; GAS- OPERATED SURFACE TEMPERING MACHINES AND APPLIANCES::Hand-held blow pipes" +84682000,machinery and apparatus for soldering brazing or welding whether or not capable of cutting other than those of heading gas operated surface tempering machines and appliances >> other machinery and apparatus +84682010,"MACHINERY AND APPARATUS FOR SOLDERING, BRAZING OR WELDING, WHETHER OR NOT CAPABLE OF CUTTING, OTHER THAN THOSE OF HEADING 8512.5; GAS- OPERATED SURFACE TEMPERING MACHINES AND APPLIANCES:Other gas-operated machinery and apparatus:Welding or cutting machines" +84682090,"MACHINERY AND APPARATUS FOR SOLDERING, BRAZING OR WELDING, WHETHER OR NOT CAPABLE OF CUTTING, OTHER THAN THOSE OF HEADING 8512.5; GAS- OPERATED SURFACE TEMPERING MACHINES AND APPLIANCES:Other gas-operated machinery and apparatus:Other" +84688000,"MACHINERY AND APPARATUS FOR SOLDERING, BRAZING OR WELDING, WHETHER OR NOT CAPABLE OF CUTTING, OTHER THAN THOSE OF HEADING 8512.5; GAS- OPERATED SURFACE TEMPERING MACHINES AND APPLIANCES::Other machinery and apparatus" +84689000,"MACHINERY AND APPARATUS FOR SOLDERING, BRAZING OR WELDING, WHETHER OR NOT CAPABLE OF CUTTING, OTHER THAN THOSE OF HEADING 8512.5; GAS- OPERATED SURFACE TEMPERING MACHINES AND APPLIANCES::Parts" +84690010,TYPEWRITERS OTHER THAN PRINTERS OF HEADING 8443; WORD-PROCESSING MACHINES:Tyewriters other than printers of heading 8443 word-processing machines::Word processing machine(OLD tariff) +84690020,TYPEWRITERS OTHER THAN PRINTERS OF HEADING 8443; WORD-PROCESSING MACHINES:Tyewriters other than printers of heading 8443 word-processing machines::Automatic typewriters(OLD tariff) +84690030,"TYPEWRITERS OTHER THAN PRINTERS OF HEADING 8443; WORD-PROCESSING MACHINES:Tyewriters other than printers of heading 8443 word-processing machines::Braille typewriters, electric(OLD tariff)" +84690040,"TYPEWRITERS OTHER THAN PRINTERS OF HEADING 8443; WORD-PROCESSING MACHINES:Tyewriters other than printers of heading 8443 word-processing machines::Braille typewriters, non-electric(OLD tariff)" +84690090,"TYPEWRITERS OTHER THAN PRINTERS OF HEADING 8443; WORD-PROCESSING MACHINES:Tyewriters other than printers of heading 8443 word-processing machines::Other typewriters, electric or non-electric(OLD tariff)" +84700000,calculating machines and data r ec or di ng r ep ro duc i ng a nd d is p l ayi ng machines incorporating a calculating device cash registers +84701000,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS::Electronic calculators capable of operation without an external source of electric power and pocket-size data recording, reproducing and displaying machines with calculating functions" +84702100,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS::Incorporating a printing device" +84702900,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS::Other" +84703000,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS::Other calculating machines" +84705000,calculating machines and data r ec or di ng r ep ro duc i ng a nd d is p l ayi ng machines incorporating a calculating device cash registers >> cash registers +84705010,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS:Cash registers :Electrically operated" +84705020,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS:Cash registers :Manually operated" +84709000,calculating machines and data r ec or di ng r ep ro duc i ng a nd d is p l ayi ng machines incorporating a calculating device cash registers >> other +84709010,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS:Other :Electrically operated" +84709020,"CALCULATING MACHINES AND POCKET-SIZE DATA RECORDING, REPRODUCING AND DISPLAYING MACHINES WITH CALCULATING FUNCTIONS; ACCOUNTING MACHINES, POSTAGE-FRANKING MACHINES, TICKET-ISSUING MACHINES AND SIMILAR MACHINES, INCORPORATING A CALCULATING DEVICE; CASH REGISTERS:Other :Manually operated" +84710000,automatic data processing machines and units t h ere o f m ag ne t i c o r o p t ica l r ea de rs machines for transcribing data on to data media in coded form and machines for processing such data not elsewhere specified or included +84713000,automatic data processing machines and units t h ere o f m ag ne t i c o r o p t ica l r ea de rs machines for transcribing data on to data media in coded form and machines for processing such data not elsewhere specified or included >> portable automatic data processing machines weighing not more than 10 kg consisting of at least a central processing unit a keyboard and a display +84713010,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Portable automatic data processing machines, weighing not more than 10 kg, consisting of at least a central processing unit, a keyboard and a display:Personal computer" +84713090,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Portable automatic data processing machines, weighing not more than 10 kg, consisting of at least a central processing unit, a keyboard and a display:Other" +84714100,automatic data processing machines and units t h ere o f m ag ne t i c o r o p t ica l r ea de rs machines for transcribing data on to data media in coded form and machines for processing such data not elsewhere specified or included >> comprising in the same housing at least a central processingunit and an input and output unit whether or not combined +84714110,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Comprising in the same housing at least a central processing unit and an input and output unit, whether or not combined :Micro computer" +84714120,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Comprising in the same housing at least a central processing unit and an input and output unit, whether or not combined :Large or main frame computer" +84714190,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Comprising in the same housing at least a central processing unit and an input and output unit, whether or not combined :Other" +84714900,AUTOMATIC DATA PROCESSING MACHINES AND UNITS::Presented in the form of systems +84715000,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS::Processing units other than those of sub-headings 8471 41 or 8471 49, whether or not containing in the same housing one or two of the following types of unit storage units, input units, output units" +84716000,automatic data processing machines and units t h ere o f m ag ne t i c o r o p t ica l r ea de rs machines for transcribing data on to data media in coded form and machines for processing such data not elsewhere specified or included >> input or output units whether or not containing storage units in the same housing +84716010,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Combined input or out put units" +84716024,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Graphic printer" +84716025,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Plotter" +84716029,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Other" +84716040,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Keyboard" +84716050,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Scanners" +84716060,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Mouse" +84716090,"AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Input or output units, whether or not containing storage units in the same housing :Other" +84717000,automatic data processing machines and units t h ere o f m ag ne t i c o r o p t ica l r ea de rs machines for transcribing data on to data media in coded form and machines for processing such data not elsewhere specified or included >> storage units +84717010,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Floppy disc drives +84717020,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Hard disc drives +84717030,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Removable or exchangeable disc drives +84717040,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Magnetic tape drives +84717050,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Cartridge tape drive +84717060,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :CD-ROM drive +84717070,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Digital video disc drive +84717090,AUTOMATIC DATA PROCESSING MACHINES AND UNITS:Storage units :Other +84718000,AUTOMATIC DATA PROCESSING MACHINES AND UNITS::Other units of automatic data processing machines +84719000,AUTOMATIC DATA PROCESSING MACHINES AND UNITS::Other +84720000,oth er office machines for example hectograph or stencil duplicating machines addressing machines automatic banknote dispensers coin sorting machines coin counting or wrapping machines pencil sharpening machines perforating or stapling machines +84721000,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES)::Duplicating machines" +84723000,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES)::Machines for sorting or folding mail or for inserting mail in envelopes or bands, machines for opening, closing or sealing mail and machines for affixing or cancelling postage stamps" +84729000,oth er office machines for example hectograph or stencil duplicating machines addressing machines automatic banknote dispensers coin sorting machines coin counting or wrapping machines pencil sharpening machines perforating or stapling machines >> other other +84729010,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Stapling machines (staplers)" +84729020,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Digital duplicator" +84729030,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Automatic bank note dispensers" +84729040,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Coin sorting machines, coin-counting or wrapping machines" +84729090,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Other(OLD tariff)" +84729091,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Word-processing machines" +84729092,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Automatic typewriters" +84729093,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Braille typewriters, electric" +84729094,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Braille typewriters, non-electric" +84729095,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Other typewriters, electric or non-electric" +84729099,"OTHER OFFICE MACHINES (FOR EXAMPLE, HECTOGRAPH OR STENCIL DUPLICATING MACHINES, ADDRESSING MACHINES, AUTOMATIC BANKNOTE DISPENSERS, COIN SORTING MACHINES, COIN COUNTING OR WRAPPING MACHINES, PENCIL-SHARPENING MACHINES, PERFORATING OR STAPLING MACHINES):Other :Other" +84730000,parts and accessories other than covers carrying cases and the like suitable for use parts and accessories of the machines of heading 8470 +84731000,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8469 TO 8472::Parts and accessories of the machines of heading 8469(OLD tariff)" +84732100,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472::Of the electronic calculating machines of sub-heading 8470 10, 8470 21 or 8471 29" +84732900,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472::Other" +84733000,parts and accessories other than covers carrying cases and the like suitable for use parts and accessories of the machines of heading 8470 >> parts and accessories of the machines of heading 8471 +84733010,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Microprocessors" +84733020,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Motherboards" +84733030,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Other mounted printed circuit boards" +84733040,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Head stack" +84733091,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Network access controllers" +84733092,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Graphic and intelligence based script technology (GIST) cards for multilingual computers" +84733099,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8471:Other" +84734000,parts and accessories other than covers carrying cases and the like suitable for use parts and accessories of the machines of heading 8470 >> parts and accessories of the machines of heading 8472 +84734010,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8472:Parts of duplicating, hectograph or stencil machines" +84734090,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8470 TO 8472:Parts and accessories of the machines of heading 8472:Other" +84735000,"PARTS AND ACCESSORIES (OTHER THAN COVERS, CARRYING CASES AND THE LIKE) SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH MACHINES OF HEADINGS 8469 TO 8472::Parts and accessories equally suitable for use with machines of two or more of the headings 8469 to 8472" +84740000,machinery for sorting screening separating washing crushing grinding mixing or kneading e ar t h s t o ne o re s o r o t he r m in er a l substances in solid including powder or paste form machinery for agglomerating shaping or moulding solid mineral fuels ceramic pa s te u nh ard e ne d c em e n t s p l a st eri ng materials or other mineral products in powder or paste form machines for forming foundry moulds of sand +84741000,machinery for sorting screening separating washing crushing grinding mixing or kneading e ar t h s t o ne o re s o r o t he r m in er a l substances in solid including powder or paste form machinery for agglomerating shaping or moulding solid mineral fuels ceramic pa s te u nh ard e ne d c em e n t s p l a st eri ng materials or other mineral products in powder or paste form machines for forming foundry moulds of sand >> sorting screening separating or washing machines +84741010,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Sorting, screening, separating or washing machines :For coal" +84741090,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Sorting, screening, separating or washing machines :Other" +84742000,machinery for sorting screening separating washing crushing grinding mixing or kneading e ar t h s t o ne o re s o r o t he r m in er a l substances in solid including powder or paste form machinery for agglomerating shaping or moulding solid mineral fuels ceramic pa s te u nh ard e ne d c em e n t s p l a st eri ng materials or other mineral products in powder or paste form machines for forming foundry moulds of sand >> crushing or grinding machines +84742010,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Crushing or grinding machines:For stone and mineral" +84742020,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Crushing or grinding machines:For coal" +84742090,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Crushing or grinding machines:Other" +84743100,machinery for sorting screening separating washing crushing grinding mixing or kneading e ar t h s t o ne o re s o r o t he r m in er a l substances in solid including powder or paste form machinery for agglomerating shaping or moulding solid mineral fuels ceramic pa s te u nh ard e ne d c em e n t s p l a st eri ng materials or other mineral products in powder or paste form machines for forming foundry moulds of sand >> concrete or mortar mixers +84743110,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Concrete or mortar mixers :Concrete mixers" +84743120,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Concrete or mortar mixers :Mortar mixers" +84743200,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND::Machines for mixing mineral substances with bitumen" +84743900,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND::Other" +84748000,machinery for sorting screening separating washing crushing grinding mixing or kneading e ar t h s t o ne o re s o r o t he r m in er a l substances in solid including powder or paste form machinery for agglomerating shaping or moulding solid mineral fuels ceramic pa s te u nh ard e ne d c em e n t s p l a st eri ng materials or other mineral products in powder or paste form machines for forming foundry moulds of sand >> other machinery +84748010,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Other machinery:Brick and tile making machinery" +84748020,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Other machinery:Ceramic and clay making machinery" +84748030,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Other machinery:Machinery for forming foundry moulds of sand" +84748090,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND:Other machinery:Other" +84749000,"MACHINERY FOR SORTING, SCREENING, SEPARATING, WASHING, CRUSHING, GRINDING, MIXING OR KNEADING EARTH, STONE, ORES OR OTHER MINERAL SUBSTANCES, IN SOLID (INCLUDING POWDER OR PASTE) FORM; MACHINERY FOR AGGLOMERATING, SHAPING OR MOULDING SOLID MINERAL FUELS, CERAMIC PASTE, UNHARDENED CEMENTS, PLASTERING MATERIALS OR OTHER MINERAL PRODUCTS IN POWDER OR PASTE FORM; MACHINES FOR FORMING FOUNDRY MOULDS OF SAND::Parts" +84750000,machines for assembling electric or electronic lamps tubes or valves or in glass envelopes machines or manufacturing or hot working glass orglassware +84751000,"MACHINES FOR ASSEMBLING ELECTRIC OR ELECTRONIC LAMPS, TUBES OR VALVES OR FLASH-BULBS, IN GLASS ENVELOPES; MACHINES OR MANUFACTURING OR HOT WORKING GLASS OR GLASSWARE::Machines for assembling electric or electronic lamps, tubes or valves or flash-bulbs, in glass envelopes" +84752100,"MACHINES FOR ASSEMBLING ELECTRIC OR ELECTRONIC LAMPS, TUBES OR VALVES OR FLASH-BULBS, IN GLASS ENVELOPES; MACHINES OR MANUFACTURING OR HOT WORKING GLASS OR GLASSWARE::Machines for making optical fibres and preforms thereof" +84752900,"MACHINES FOR ASSEMBLING ELECTRIC OR ELECTRONIC LAMPS, TUBES OR VALVES OR FLASH-BULBS, IN GLASS ENVELOPES; MACHINES OR MANUFACTURING OR HOT WORKING GLASS OR GLASSWARE::Other" +84759000,"MACHINES FOR ASSEMBLING ELECTRIC OR ELECTRONIC LAMPS, TUBES OR VALVES OR FLASH-BULBS, IN GLASS ENVELOPES; MACHINES OR MANUFACTURING OR HOT WORKING GLASS OR GLASSWARE::Parts" +84760000,automatic machines for example postage stamps cigarette food or beverage machines including money changing machines automatic machines +84762100,automatic machines for example postage stamps cigarette food or beverage machines including money changing machines automatic machines >> incorporating heating or refrigerating devices +84762110,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Incorporating heating or refrigerating devices:Incorporating refrigerating devices" +84762120,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Incorporating heating or refrigerating devices:Incorporating heating devices" +84762900,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES::Other" +84768100,automatic machines for example postage stamps cigarette food or beverage machines including money changing machines automatic machines >> incorporating heating or refrigerating devices +84768110,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Incorporating heating or refrigerating devices:Incorporating refrigerating devices" +84768120,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Incorporating heating or refrigerating devices:Incorporating heating devices" +84768900,automatic machines for example postage stamps cigarette food or beverage machines including money changing machines automatic machines >> other +84768910,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Other :Money changing machines" +84768920,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Other :Postage stamps vending machines" +84768930,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Other :Cigarette vending machines" +84768990,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Other :Other" +84769000,automatic machines for example postage stamps cigarette food or beverage machines including money changing machines automatic machines >> parts +84769010,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Parts :Of machines of Sub-heading 8476 21" +84769090,"AUTOMATIC GOODS-VENDING MACHINES (FOR EXAMPLE, POSTAGE STAMPS, CIGARETTE, FOOD OR BEVERAGE MACHINES), INCLUDING MONEY CHANGING MACHINES:Parts :Other" +84770000,machinery for working rubber or plastics or for the manufacture of products from or included these materials not specified elsewhere in this chapter +84771000,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Injection-moulding machines" +84772000,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Extruders" +84773000,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Blow moulding machines" +84774000,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Vacuum moulding machines and other thermoforming machines" +84775100,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::For moulding or retreading pneumatic tyres or for moulding or otherwise forming inner tubes" +84775900,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Other" +84778000,machinery for working rubber or plastics or for the manufacture of products from or included these materials not specified elsewhere in this chapter >> other machinery +84778010,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other machinery :Machinery for making rubber goods" +84778090,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other machinery :Other" +84779000,"MACHINERY FOR WORKING RUBBER OR PLASTICS OR FOR THE MANUFACTURE OF PRODUCTS FROM THESE MATERIALS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Parts" +84780000,machinery for preparing or making up tobacco not specified or included elsewhere in this chapter +84781000,machinery for preparing or making up tobacco not specified or included elsewhere in this chapter >> machinery +84781010,"MACHINERY FOR PREPARING OR MAKING UP TOBACCO, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS:Machinery :Cigar making machinery" +84781020,"MACHINERY FOR PREPARING OR MAKING UP TOBACCO, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS:Machinery :Cigarette making machinery" +84781090,"MACHINERY FOR PREPARING OR MAKING UP TOBACCO, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS:Machinery :Other" +84789000,"MACHINERY FOR PREPARING OR MAKING UP TOBACCO, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Parts" +84790000,machines and mechanical appliances having individual functions not specified or included elsewhere in this chapter +84791000,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Machinery for public works, building or the like" +84792000,machines and mechanical appliances having individual functions not specified or included elsewhere in this chapter >> machinery for the extraction or preparation of animal or fixed vegetable or microbial fats or oils +84792010,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Machinery for the extraction or preparation of animal or fixed vegetable fats or oils :Oil-seed crushing or grinding machinery including purifying tanks" +84792090,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Machinery for the extraction or preparation of animal or fixed vegetable fats or oils :Other" +84793000,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Presses for the manufacture of particle board or fibre building board of wood or other ligneous materials and other machinery for treating wood or cork" +84794000,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Rope or cable-making machines" +84795000,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Industrial robots, not elsewhere specified or included" +84796000,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Evaporative air coolers" +84797100,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Of a kind use in airports:Passenger boarding bridges of a kind used in airports" +84797900,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:other:Other" +84798100,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::For treating metal, including electric wire coil-winders" +84798200,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Mixing, kneading, crushing, grinding, screening, sifting, homogenising, emulsifying or stirring machines" +84798300,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:cold:Cold isostatic presses" +84798900,machines and mechanical appliances having individual functions not specified or included elsewhere in this chapter >> other +84798910,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Soap cutting or moulding machinery" +84798920,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Air humidifiers or dehumidifiers (other than those falling under heading 8415 or 8424)" +84798930,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Mechanical shifting machines" +84798940,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Ultrasonic transducers" +84798950,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Car washing machines and related appliances" +84798960,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Coke oven plants" +84798970,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Machinery for the manufacture of chemical and pharmaceuticals goods" +84798992,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Briquetting plant and machinery intended for manufacture of briquettes from agricultural and municipal waste" +84798999,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other :Other" +84799000,machines and mechanical appliances having individual functions not specified or included elsewhere in this chapter >> parts +84799010,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Parts :Of machines for public works, building or the like" +84799020,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Parts :Of machines for the extraction of animal or fruit and vegetable fats or oil" +84799030,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Parts :Of machines and mechanical appliances for treating wood" +84799040,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Parts :Of machinery used for manufacture of chemicals and pharmaceuticals" +84799090,"MACHINES AND MECHANICAL APPLIANCES HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Parts :Other" +84800000,moulding boxes for metal foundry mould bases moulding patterns moulds for metal other than ingot moulds metal carbides glass mineral materials rubber or plastics +84801000,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Moulding boxes for metal foundry" +84802000,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Mould bases" +84803000,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Moulding patterns" +84804100,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Injection or compression types" +84804900,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Other" +84805000,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Moulds for glass" +84806000,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Moulds for mineral materials" +84807100,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Injection or compression types" +84807900,"MOULDING BOXES FOR METAL FOUNDRY; MOULD BASES; MOULDING PATTERNS; MOULDS FOR METAL (OTHER THAN INGOT MOULDS), METAL CARBIDES, GLASS, MINERAL MATERIALS, RUBBER OR PLASTICS::Other" +84810000,taps cocks valves and similar appliances for pipes boiler shells tanks vats or the like including valves and thermostatically controlled valves +84811000,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES::Pressure-reducing valves" +84812000,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES::Valves for oleohydraulic or pneumatic transmissions" +84813000,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES::Check (non-return) valves" +84814000,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES::Safety or relief valves" +84818000,taps cocks valves and similar appliances for pipes boiler shells tanks vats or the like including valves and thermostatically controlled valves >> other appliances +84818010,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Taps, cocks and similar appliances of iron or steel" +84818020,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Taps, cocks and similar appliances of non-ferrous metal" +84818030,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Industrial valves (excluding pressure-reducing valves, and thermostatically controlled valves)" +84818041,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :For bicycles" +84818049,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Other" +84818050,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Expansion valves and solenoid valves for refrigerating and air conditioning appliances and machinery" +84818090,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Other appliances :Other" +84819000,taps cocks valves and similar appliances for pipes boiler shells tanks vats or the like including valves and thermostatically controlled valves >> parts +84819010,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Parts :Bicycles valves" +84819090,"TAPS, COCKS, VALVES AND SIMILAR APPLIANCES FOR PIPES, BOILER SHELLS, TANKS, VATS OR THE LIKE, INCLUDING PRESSURE-REDUCING VALVES AND THE RMOSTATICALLY CONTROLLED VALVES:Parts :Other" +84820000,ball or roller bearings +84821000,ball or roller bearings >> ball bearings adapter ball bearings radial type +84821011,BALL OR ROLLER BEARINGS:Ball bearings :Not exceeding 50mm of bore diameter +84821012,BALL OR ROLLER BEARINGS:Ball bearings :Of bore diameter exceeding 50 mm but not exceeding 100 mm +84821013,BALL OR ROLLER BEARINGS:Ball bearings :Of bore diameter exceeding 100 mm +84821020,BALL OR ROLLER BEARINGS:Ball bearings :Other ball bearing (radial type) of bore diameter not exceeding 50 mm +84821030,BALL OR ROLLER BEARINGS:Ball bearings :Other ball bearing (radial type) of bore diameter exceeding 50 mm but not exceeding 100 mm +84821040,BALL OR ROLLER BEARINGS:Ball bearings :Of bore diameter exceeding 100 mm +84821051,BALL OR ROLLER BEARINGS:Ball bearings :Of bore diameter not exceeding 50 mm +84821052,BALL OR ROLLER BEARINGS:Ball bearings :Of bore diameter exceeding 50 mm but not exceeding 100 mm +84821053,BALL OR ROLLER BEARINGS:Ball bearings :Of bore diameter exceeding 100 mm +84821090,BALL OR ROLLER BEARINGS:Ball bearings :Other +84822000,ball or roller bearings >> tapered roller bearings including cone and tapered roller assemblies tapered roller bearings radial type +84822011,"BALL OR ROLLER BEARINGS:Tapered roller bearings, including cone and tapered roller assemblies :Of bore diameter not exceeding 50 mm" +84822012,"BALL OR ROLLER BEARINGS:Tapered roller bearings, including cone and tapered roller assemblies :Of bore diameter exceeding 50 mm but not exceeding 100 mm" +84822013,"BALL OR ROLLER BEARINGS:Tapered roller bearings, including cone and tapered roller assemblies :Of bore diameter exceeding 100 mm" +84822090,"BALL OR ROLLER BEARINGS:Tapered roller bearings, including cone and tapered roller assemblies :Other" +84823000,BALL OR ROLLER BEARINGS::Spherical roller bearings +84824000,BALL OR ROLLER BEARINGS::Needle roller bearings +84825000,"BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Other cylindrical roller bearings, including cage and roller assemblies" +84825011,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Of bore diameter not exceeding 50 mm(OLD tariff) +84825012,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Of bore diameter exceeding 50 mm not exceeding 100 mm(OLD tariff) +84825013,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Of bore diameter exceeding 100 mm(OLD tariff) +84825021,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Of bore diameter not exceeding 50 mm(OLD tariff) +84825022,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Of bore diameter exceeding 50 mm but not exceeding 100 mm(OLD tariff) +84825023,BALL OR ROLLER BEARINGS:Other cylindrical roller bearings :Of bore diameter exceeding 100 mm(OLD tariff) +84828000,"BALL OR ROLLER BEARINGS::Other, including combined ball or roller bearings" +84829100,ball or roller bearings >> balls needles and rollers balls +84829111,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Of nickel alloys" +84829112,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Of tungsten carbide" +84829113,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Of special stainless steel" +84829114,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Of high speed steel" +84829119,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Other" +84829120,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Needles" +84829130,"BALL OR ROLLER BEARINGS:Balls, needles and rollers :Rollers" +84829900,BALL OR ROLLER BEARINGS::Other +84830000,transmission shafts including cam shafts and crank shafts and cranks bearing housings and +84831000,transmission shafts including cam shafts and crank shafts and cranks bearing housings and >> transmission shafts including cam shafts and crank shafts and cranks +84831010,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Transmission shafts (including cam shafts and crank shafts) and cranks :Crank shafts for sewing machines" +84831091,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Transmission shafts (including cam shafts and crank shafts) and cranks :Crank shaft for engines of heading 8407" +84831092,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Transmission shafts (including cam shafts and crank shafts) and cranks :Crank shaft for engines of heading 8408" +84831099,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Transmission shafts (including cam shafts and crank shafts) and cranks :Other" +84832000,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS)::Bearing housings, incorporating ball or roller bearings" +84833000,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS)::Bearing housings, not incorporating ball or roller bearings; plain shaft bearings" +84834000,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS)::Gears and gearing, other than toothed wheels, chain spro ckets and other transmission elements presented separately; ball or roller screws; gear boxes and other speed changers, including torque converters" +84835000,transmission shafts including cam shafts and crank shafts and cranks bearing housings and >> flywheels and pulleys including pulley blocks +84835010,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Flywheels and pulleys, including pulley blocks:Pulleys, power transmission" +84835090,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Flywheels and pulleys, including pulley blocks:Other" +84836000,transmission shafts including cam shafts and crank shafts and cranks bearing housings and >> clutches and shaft couplings including universal joints +84836010,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Clutches and shaft couplings (including universal joints):Flexible coupling" +84836020,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Clutches and shaft couplings (including universal joints):Fluid coupling" +84836090,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS):Clutches and shaft couplings (including universal joints):Other" +84839000,"TRANSMISSION SHAFTS (INCLUDING CAM SHAFTS AND CRANK SHAFTS) AND CRANKS; BEARING HOUSINGS AND PLAIN SHAFT BEARINGS; GEARS AND GEARING; BALL OR ROLLER SCREWS; GEAR BOXES AND OTHER SPEED CHANGERS, INCLUDING TORQUE CONVERTERS; FLYWHEELS AND PULLEYS, INCLUDING PULLEY BLOCKS; CLUTCHES AND SHAFT COUPLINGS (INCLUDING UNIVERSAL JOINTS)::Toothed wheels, chain sprockets and other transmission elements presented separately; parts" +84840000,gaskets and similar joints of metal sheeting combined with other material or of two or more layers of metal sets or assortments of gaskets and similar joints dissimilar in composition put up in pouches envelopes or similar packings mechanical seals +84841000,gaskets and similar joints of metal sheeting combined with other material or of two or more layers of metal sets or assortments of gaskets and similar joints dissimilar in composition put up in pouches envelopes or similar packings mechanical seals >> gaskets and similar joints of metal sheeting combined with other material or of two or more layers of metal +84841010,"GASKETS AND SIMILAR JOINTS OF METAL SHEETING COMBINED WITH OTHER MATERIAL OR OF TWO OR MORE LAYERS OF METAL; SETS OR ASSORTMENTS OF GASKETS AND SIMILAR JOINTS, DISSIMILAR IN COMPOSITION, PUT UP IN POUCHES, ENVELOPES OR SIMILAR PACKINGS; MECHANICAL SEALS:Gaskets and similar joints of metal sheeting combined with other material or of two or more layers of metal :Asbestos metallic packings and gaskets (excluding gaskets of asbestos board reinforced with metal gauze or wire)" +84841090,"GASKETS AND SIMILAR JOINTS OF METAL SHEETING COMBINED WITH OTHER MATERIAL OR OF TWO OR MORE LAYERS OF METAL; SETS OR ASSORTMENTS OF GASKETS AND SIMILAR JOINTS, DISSIMILAR IN COMPOSITION, PUT UP IN POUCHES, ENVELOPES OR SIMILAR PACKINGS; MECHANICAL SEALS:Gaskets and similar joints of metal sheeting combined with other material or of two or more layers of metal :Other" +84842000,"GASKETS AND SIMILAR JOINTS OF METAL SHEETING COMBINED WITH OTHER MATERIAL OR OF TWO OR MORE LAYERS OF METAL; SETS OR ASSORTMENTS OF GASKETS AND SIMILAR JOINTS, DISSIMILAR IN COMPOSITION, PUT UP IN POUCHES, ENVELOPES OR SIMILAR PACKINGS; MECHANICAL SEALS::Mechanical seals" +84849000,"GASKETS AND SIMILAR JOINTS OF METAL SHEETING COMBINED WITH OTHER MATERIAL OR OF TWO OR MORE LAYERS OF METAL; SETS OR ASSORTMENTS OF GASKETS AND SIMILAR JOINTS, DISSIMILAR IN COMPOSITION, PUT UP IN POUCHES, ENVELOPES OR SIMILAR PACKINGS; MECHANICAL SEALS::Other" +84850000,machines for additive manufacturing +84851000,MACHINES FOR ADDITIVE MANUFACTURING:by:By metal deposit +84852000,MACHINES FOR ADDITIVE MANUFACTURING:by:By plastics or rubber deposit +84853000,"MACHINES FOR ADDITIVE MANUFACTURING:by:By plaster, cement, ceramics or glass deposi" +84858000,MACHINES FOR ADDITIVE MANUFACTURING:other:Other +84859000,MACHINES FOR ADDITIVE MANUFACTURING:parts:Parts +84860000,machines and apparatus of a kind used solely or pricipally for the manufacture of semiconductor boules or wafers semiconductor devices electronic integrated circuits or flat panel displays machines and apparatus specified in note 11 c to this chapter parts and accessories +84861000,"MACHINES AND APPARATUS OF A KIND USED SOLELY OR PRINCIPALLY FOR THE MANUFACTURE OF SEMICONDUCTOR BOULES OR WAFERS, SEMICONDUCTOR DEVICES, ELECTRONIC INTEGRATED CIRCUITS OR FLAT PANEL DISPLAYS; MACHINES AND APPARATUS SPECIFIED IN NOTE 9(C)TO THIS CHAPTER; PARTS AND ACCESSORIES::Machines and apparatus for the manufacture of boules or wafers" +84862000,"MACHINES AND APPARATUS OF A KIND USED SOLELY OR PRINCIPALLY FOR THE MANUFACTURE OF SEMICONDUCTOR BOULES OR WAFERS, SEMICONDUCTOR DEVICES, ELECTRONIC INTEGRATED CIRCUITS OR FLAT PANEL DISPLAYS; MACHINES AND APPARATUS SPECIFIED IN NOTE 9(C)TO THIS CHAPTER; PARTS AND ACCESSORIES::Machines and apparatus for the manufacture of semi- conductor devices or of electronic integrated circuits" +84863000,"MACHINES AND APPARATUS OF A KIND USED SOLELY OR PRINCIPALLY FOR THE MANUFACTURE OF SEMICONDUCTOR BOULES OR WAFERS, SEMICONDUCTOR DEVICES, ELECTRONIC INTEGRATED CIRCUITS OR FLAT PANEL DISPLAYS; MACHINES AND APPARATUS SPECIFIED IN NOTE 9(C)TO THIS CHAPTER; PARTS AND ACCESSORIES::Machines and apparatus for the manufacture of flat panel displays" +84864000,"MACHINES AND APPARATUS OF A KIND USED SOLELY OR PRINCIPALLY FOR THE MANUFACTURE OF SEMICONDUCTOR BOULES OR WAFERS, SEMICONDUCTOR DEVICES, ELECTRONIC INTEGRATED CIRCUITS OR FLAT PANEL DISPLAYS; MACHINES AND APPARATUS SPECIFIED IN NOTE 9(C)TO THIS CHAPTER; PARTS AND ACCESSORIES::Machines and apparatus specified in Note 9(c) to this Chapter" +84869000,"MACHINES AND APPARATUS OF A KIND USED SOLELY OR PRINCIPALLY FOR THE MANUFACTURE OF SEMICONDUCTOR BOULES OR WAFERS, SEMICONDUCTOR DEVICES, ELECTRONIC INTEGRATED CIRCUITS OR FLAT PANEL DISPLAYS; MACHINES AND APPARATUS SPECIFIED IN NOTE 9(C)TO THIS CHAPTER; PARTS AND ACCESSORIES::Parts and accessories" +84870000,machines parts not containing electrical connectors insulators coils contacts or other electrical features not specified or included elsewhere in this chapter +84871000,"MACHINES PARTS, NOT CONTAINING ELECTRICAL CONNECTORS, INSULATORS, COILS, CONTACTS OR OTHER ELECTRICAL FEATURES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Ship's or boat's propellers and blades therefore" +84879000,"MACHINES PARTS, NOT CONTAINING ELECTRICAL CONNECTORS, INSULATORS, COILS, CONTACTS OR OTHER ELECTRICAL FEATURES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Other" +85011011,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Motors of an output not exceeding 37.5 W :Micro motor(OLD tariff) +85011012,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Motors of an output not exceeding 37.5 W :Stepper motor(OLD tariff) +85011013,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Motors of an output not exceeding 37.5 W :Wiper motor(OLD tariff) +85011019,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Motors of an output not exceeding 37.5 W :Other(OLD tariff) +85011020,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Motors of an output not exceeding 37.5 W :AC motor(OLD tariff) +85012000,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS)::Universal AC or DC motors of an output exceeding 37.5 W(OLD tariff) +85013111,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W:Micro motor(OLD tariff) +85013112,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W:Stepper motor(OLD tariff) +85013113,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W:Wiper motor(OLD tariff) +85013119,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W:Other(OLD tariff) +85013120,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W:DC generators(OLD tariff) +85013210,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 W but not exceeding 75 kW :DC motor(OLD tariff) +85013220,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 W but not exceeding 75 kW :DC generators(OLD tariff) +85013310,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 75 kW but not exceeding 375 kW :DC motors(OLD tariff) +85013320,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 75 kW but not exceeding 375 kW :DC generators(OLD tariff) +85013410,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 375 kW:Of an output exceeding 375 kW but not exceeding 1,000 kW(OLD tariff)" +85013420,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 375 kW:Of an output exceeding 1,000 kW but not exceeding 2,000 kW(OLD tariff)" +85013430,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 375 kW:Of an output exceeding 2,000 kW(OLD tariff)" +85013440,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 375 kW:Of an output exceeding 5,000 kW but not exceeding 10,000 kW(OLD tariff)" +85013450,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 375 kW:Of an output exceeding 10,000 kW(OLD tariff)" +85014010,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Other AC motors, single-phase:Fractional horse power motor(OLD tariff)" +85014090,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Other AC motors, single-phase:Other(OLD tariff)" +85015110,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W :Squirrel cage induction motor, 3 phase type(OLD tariff)" +85015120,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W :Slipring motor(OLD tariff) +85015190,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output not exceeding 750 W :Other(OLD tariff) +85015210,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 W but not exceeding 75 kW:Squirrel cage induction motor, 3 phase type(OLD tariff)" +85015220,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 W but not exceeding 75 kW:Slipring motor(OLD tariff) +85015290,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 W but not exceeding 75 kW:Other(OLD tariff) +85015310,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 75 kW :Squirrel cage induction motor, 3 phase type(OLD tariff)" +85015320,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 75 kW :Slipring motor(OLD tariff) +85015330,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 75 kW :Traction motor(OLD tariff) +85015390,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 75 kW :Other(OLD tariff) +85016100,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS)::Of an output not exceeding 75 kVA(OLD tariff) +85016200,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS)::Of an output exceeding 75 kVA but not exceeding 375 kVA(OLD tariff) +85016300,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS)::Of an output exceeding 375 kVA but not exceeding 750 kVA(OLD tariff) +85016410,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 750 kVA but not exceeding 2,000 kVA(OLD tariff)" +85016420,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 2,000 kVA but not exceeding 5,000 kVA(OLD tariff)" +85016430,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 5,000 kVA but not exceeding 15,000 kVA(OLD tariff)" +85016440,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 15,000 kVA but notexceeding 37,500 kVA(OLD tariff)" +85016450,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 37,500 kVA but not exceeding 75,000 kVA(OLD tariff)" +85016460,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 75,000 kVA but notexceeding 1,37,500 kVA(OLD tariff)" +85016470,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 1,37,500 kVA but not exceeding 3,12,500 kVA(OLD tariff)" +85016480,"ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an output exceeding 750 kVA:Of an output exceeding 3,12,500 kVA(OLD tariff)" +85017100,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):85017100:Of an output not exceeding 50 W(OLD tariff) +85017200,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):Of an: Of an output exceeding 50 W(OLD tariff) +85018000,ELECTRIC MOTORS AND GENERATORS (EXCLUDING GENERATING SETS):ac:Photovoltaic AC generators(OLD tariff) +85020000,electric generating sets and rotary converters generating sets with internal combustion piston engines diesel or engines +85021100,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS::Of an output not exceeding 75 kVA +85021200,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS::Of an output exceeding 75 kVA but not exceeding 375 kVA +85021300,electric generating sets and rotary converters generating sets with internal combustion piston engines diesel or engines >> of an output exceeding 375 kva +85021310,"ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Of an output exceeding 375 kVA :Of an output exceeding 375 kVA but not exceeding 1,000 kVA" +85021320,"ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Of an output exceeding 375 kVA :Of an output exceeding 1,000 kVA but not exceeding 1,500 kVA" +85021330,"ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Of an output exceeding 375 kVA :Of an output exceeding 1,500 kVA" +85021340,"ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Of an output exceeding 375 kVA :Of an output exceeding 2,000 kVA but not exceeding 5,000 kVA" +85021350,"ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Of an output exceeding 375 kVA :Of an output exceeding 5,000 kVA but not exceeding 10,000 kVA" +85021360,"ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Of an output exceeding 375 kVA :Of an output exceeding 10,000 kVA" +85022000,electric generating sets and rotary converters generating sets with internal combustion piston engines diesel or engines >> generating sets with internal combustion piston engines +85022010,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Generating sets with spark-ignition internal combustion piston engines:Electric portable generators of an output not exceeding 3.5 kVA +85022090,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Generating sets with spark-ignition internal combustion piston engines:Other +85023100,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS::Wind-powered +85023900,electric generating sets and rotary converters generating sets with internal combustion piston engines diesel or engines >> other +85023910,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Other :Powered by steam engine +85023920,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Other :Powered by water turbine +85023990,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS:Other :Other +85024000,ELECTRIC GENERATING SETS AND ROTARY CONVERTERS::Electric rotary converters +85030000,parts suitable for use solely or principally with the machines of heading 8501 or 8502 parts suitable for use solely or principally with the machines ofheading 8501 or 8502 +85030010,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADING 8501 OR 8502:Parts suitable for use solely or principally with the machines of heading 8501 or 8502:Parts of generator (AC or DC) +85030021,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADING 8501 OR 8502:Parts suitable for use solely or principally with the machines of heading 8501 or 8502:Of DC motor +85030029,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADING 8501 OR 8502:Parts suitable for use solely or principally with the machines of heading 8501 or 8502:Other +85030090,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE MACHINES OF HEADING 8501 OR 8502:Parts suitable for use solely or principally with the machines of heading 8501 or 8502:Other +85040000,electrical transformers static converters for example rectifiers and inductors +85041000,electrical transformers static converters for example rectifiers and inductors >> ballasts for discharge lamps or tubes +85041010,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Ballasts for discharge lamps or tubes :Conventional type" +85041020,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Ballasts for discharge lamps or tubes :For compact fluorescent lamps" +85041090,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Ballasts for discharge lamps or tubes :Other" +85042100,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS::Having a power handling capacity" +85042200,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS::Having a power handling capacity exceeding 650 kVA but not exceeding 10,000 kVA" +85042300,electrical transformers static converters for example rectifiers and inductors >> having a power handling capacity exceeding kva +85042310,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Having a power handling capacity exceeding 10,000 kVA:Having a power handling capacity exceeding 10,000 kVA but not exceeding 50,000 kVA" +85042320,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Having a power handling capacity exceeding 10,000 kVA:Having a power handling capacity exceeding 50,000 kVA but not exceeding 1,00,000 kVA" +85042330,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Having a power handling capacity exceeding 10,000 kVA:Having a power handling capacity exceeding 1,00,000 kVA but not exceeding 2,50,000 kVA" +85042340,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Having a power handling capacity exceeding 10,000 kVA:Having a power handling capacity exceeding 2,50,000 kVA" +85043100,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS::Having a power handling capacity not exceeding 1 kVA" +85043200,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS::Having a power handling capacity exceeding 1 kVA but not exceeding 16 kVA" +85043300,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS::Having a power handling capacity exceeding 16 kVA but not exceeding 500 kVA" +85043400,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS::Having a power handling capacity exceeding" +85044000,electrical transformers static converters for example rectifiers and inductors >> static converters +85044010,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Electric inverter" +85044021,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Dip bridge rectifier" +85044029,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Other" +85044030,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Battery chargers" +85044040,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Voltage regulator and stabilizers (other than automatic)" +85044090,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Static converters:Other" +85045000,electrical transformers static converters for example rectifiers and inductors >> other inductors +85045010,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Other inductors:Choke coils (chokes)" +85045090,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Other inductors:Other" +85049000,electrical transformers static converters for example rectifiers and inductors >> parts +85049010,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Parts :Of transformers" +85049090,"ELECTRICAL TRANSFORMERS, STATIC CONVERTERS (FOR EXAMPLE, RECTIFIERS) AND INDUCTORS:Parts :Other" +85050000,intended to become permanent magnets after magnetisation or permanent magnet chucks clamps and similar holding devices couplings clutches and brakes electro magnetic lifting heads permanent magnets and articles intended to become permanent magnets after magnetisation +85051100,intended to become permanent magnets after magnetisation or permanent magnet chucks clamps and similar holding devices couplings clutches and brakes electro magnetic lifting heads permanent magnets and articles intended to become permanent magnets after magnetisation >> of metal +85051110,"ELECTRO-MAGNETS; PERMANENT MAGNETS AND ARTICLES INTENDED TO BECOME PERMANENT MAGNETS AFTER MAGNETISATION; ELECTRO-MAGNETIC OR PERMANENT MAGNET CHUCKS, CLAMPS AND SIMILAR HOLDING DEVICES; ELECTRO-MAGNETIC COUPLINGS, CLUTCHES AND BRAKES; ELECTROMAGNETIC LIFTING HEADS:Of metal :Ferrite cores" +85051190,"ELECTRO-MAGNETS; PERMANENT MAGNETS AND ARTICLES INTENDED TO BECOME PERMANENT MAGNETS AFTER MAGNETISATION; ELECTRO-MAGNETIC OR PERMANENT MAGNET CHUCKS, CLAMPS AND SIMILAR HOLDING DEVICES; ELECTRO-MAGNETIC COUPLINGS, CLUTCHES AND BRAKES; ELECTROMAGNETIC LIFTING HEADS:Of metal :Other" +85051900,"ELECTRO-MAGNETS; PERMANENT MAGNETS AND ARTICLES INTENDED TO BECOME PERMANENT MAGNETS AFTER MAGNETISATION; ELECTRO-MAGNETIC OR PERMANENT MAGNET CHUCKS, CLAMPS AND SIMILAR HOLDING DEVICES; ELECTRO-MAGNETIC COUPLINGS, CLUTCHES AND BRAKES; ELECTROMAGNETIC LIFTING HEADS::Other" +85052000,"ELECTRO-MAGNETS; PERMANENT MAGNETS AND ARTICLES INTENDED TO BECOME PERMANENT MAGNETS AFTER MAGNETISATION; ELECTRO-MAGNETIC OR PERMANENT MAGNET CHUCKS, CLAMPS AND SIMILAR HOLDING DEVICES; ELECTRO-MAGNETIC COUPLINGS, CLUTCHES AND BRAKES; ELECTROMAGNETIC LIFTING HEADS::Electro-magnetic couplings, clutches and brakes" +85059000,"ELECTRO-MAGNETS; PERMANENT MAGNETS AND ARTICLES INTENDED TO BECOME PERMANENT MAGNETS AFTER MAGNETISATION; ELECTRO-MAGNETIC OR PERMANENT MAGNET CHUCKS, CLAMPS AND SIMILAR HOLDING DEVICES; ELECTRO-MAGNETIC COUPLINGS, CLUTCHES AND BRAKES; ELECTROMAGNETIC LIFTING HEADS::Other, including parts" +85060000,primary cells and primary batteries +85061000,PRIMARY CELLS AND PRIMARY BATTERIES::Manganese dioxide +85063000,PRIMARY CELLS AND PRIMARY BATTERIES::Mercuric oxide +85064000,PRIMARY CELLS AND PRIMARY BATTERIES::Silver oxide +85065000,PRIMARY CELLS AND PRIMARY BATTERIES::Lithium +85066000,PRIMARY CELLS AND PRIMARY BATTERIES::Air-zinc +85068000,primary cells and primary batteries >> other primary cells and primary batteries +85068010,PRIMARY CELLS AND PRIMARY BATTERIES:Other primary cells and primary batteries:Button Cells +85068090,PRIMARY CELLS AND PRIMARY BATTERIES:Other primary cells and primary batteries:Other +85069000,PRIMARY CELLS AND PRIMARY BATTERIES::Parts +85070000,electric accumulators including separators therefor whether or not rectangular including square +85071000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE)::Lead-acid, of a kind used for starting piston engines" +85072000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE)::Other lead-acid accumulators" +85073000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE)::Nickel-cadmium" +85074000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE)::Nickel-iron(OLD tariff)" +85075000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE):Nickel-metal hydride:Nickel-metal hydride" +85076000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE):Lithium-ion:Lithium-ion" +85078000,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE)::Other accumulators" +85079000,electric accumulators including separators therefor whether or not rectangular including square >> parts +85079010,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE):Parts :Accumulator cases made of hard rubber and separators" +85079090,"ELECTRIC ACCUMULATORS, INCLUDING SEPARATORS THEREFOR, WHETHER OR NOT RECTANGULAR (INCLUDING SQUARE):Parts :Other" +85080000,vacuum cleaners with electric motor +85081100,"VACUUM CLEANERS::Of a power not exceeding 1,500 W and having a dust bag or other receptacle capacity not exceeding 20 l" +85081900,VACUUM CLEANERS::Other +85086000,VACUUM CLEANERS::Other vacuum cleaners +85087000,VACUUM CLEANERS::Parts +85090000,domestic appliances with self contained electric motor other than +85094000,domestic appliances with self contained electric motor other than >> food grinders and mixers fruit or vegetable juice extractors +85094010,"ELECTRO-MECHANICAL DOMESTIC APPLIANCES, WITH SELF CONTAINED ELECTRIC MOTOR, OTHER THAN VACUUM CLEANERS OF HEADING 8508.:Food grinders and mixers; fruit or vegetable juice extractors :Food grinders" +85094090,"ELECTRO-MECHANICAL DOMESTIC APPLIANCES, WITH SELF CONTAINED ELECTRIC MOTOR, OTHER THAN VACUUM CLEANERS OF HEADING 8508.:Food grinders and mixers; fruit or vegetable juice extractors :Other" +85098000,"ELECTRO-MECHANICAL DOMESTIC APPLIANCES, WITH SELF CONTAINED ELECTRIC MOTOR, OTHER THAN VACUUM CLEANERS OF HEADING 8508.::Other appliances" +85099000,"ELECTRO-MECHANICAL DOMESTIC APPLIANCES, WITH SELF CONTAINED ELECTRIC MOTOR, OTHER THAN VACUUM CLEANERS OF HEADING 8508.::Parts" +85100000,shavers hair clippers and appliances with electric motor +85101000,"SHAVERS, HAIR CLIPPERS AND HAIR-REMOVING APPLIANCES, WITH SELF-CONTAINED ELECTRIC MOTOR::Shavers" +85102000,"SHAVERS, HAIR CLIPPERS AND HAIR-REMOVING APPLIANCES, WITH SELF-CONTAINED ELECTRIC MOTOR::Hair clippers" +85103000,"SHAVERS, HAIR CLIPPERS AND HAIR-REMOVING APPLIANCES, WITH SELF-CONTAINED ELECTRIC MOTOR::Hair-removing appliances" +85109000,"SHAVERS, HAIR CLIPPERS AND HAIR-REMOVING APPLIANCES, WITH SELF-CONTAINED ELECTRIC MOTOR::Parts" +85110000,electrical ignition or starting equipment of a kind used for or compression ignition internal combustion engines for example ignition magnetos ignition coils sparking plugs and glow plugs starter motors generators for example dynamos alternators and cut outs of a kind used in conjunction with such engines +85111000,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES::Sparking plugs" +85112000,electrical ignition or starting equipment of a kind used for or compression ignition internal combustion engines for example ignition magnetos ignition coils sparking plugs and glow plugs starter motors generators for example dynamos alternators and cut outs of a kind used in conjunction with such engines >> ignition magnetos magnetic flywheels +85112010,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES:Ignition magnetos; magneto-dynamos; magnetic flywheels :Electronic ignition magnetos" +85112090,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES:Ignition magnetos; magneto-dynamos; magnetic flywheels :Other" +85113000,electrical ignition or starting equipment of a kind used for or compression ignition internal combustion engines for example ignition magnetos ignition coils sparking plugs and glow plugs starter motors generators for example dynamos alternators and cut outs of a kind used in conjunction with such engines >> distributors ignition coils +85113010,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES:Distributors; ignition coils :Distributors" +85113020,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES:Distributors; ignition coils :Ignition coils" +85114000,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES::Starter motors and dual purpose starter-generators" +85115000,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES::Other generators" +85118000,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES::Other equipment" +85119000,"ELECTRICAL IGNITION OR STARTING EQUIPMENT OF A KIND USED FOR SPARKIGNITION OR COMPRESSION-IGNITION INTERNAL COMBUSTION ENGINES (FOR EXAMPLE, IGNITION MAGNETOS, MAGNETODYNAMOS, IGNITION COILS, SPARKING PLUGS AND GLOW PLUGS, STARTER MOTORS); GENERATORS (FOR EXAMPLE, DYNAMOS, ALTERNATORS) AND CUT- OUTS OF A KIND USED IN CONJUNCTION WITH SUCH ENGINES::Parts" +85120000,electrical lighting or signalling equipment articles of heading excluding 8539 windscreen wipers defrosters and demisters of a kind used for cycles or motor vehicles +85121000,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES::Lighting or visual signalling equipment of a kind used on bicycles" +85122000,electrical lighting or signalling equipment articles of heading excluding 8539 windscreen wipers defrosters and demisters of a kind used for cycles or motor vehicles >> other lighting or visual signalling equipment +85122010,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES:Other lighting or visual signalling equipment :Head lamps, tail lamps, stop lamps, side lamps and blinkers" +85122020,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES:Other lighting or visual signalling equipment :Other automobile lighting equipment" +85122090,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES:Other lighting or visual signalling equipment :Other" +85123000,electrical lighting or signalling equipment articles of heading excluding 8539 windscreen wipers defrosters and demisters of a kind used for cycles or motor vehicles >> sound signalling equipment +85123010,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES:Sound signalling equipment:Horns" +85123090,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES:Sound signalling equipment:Other" +85124000,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES::Windscreen wipers, defrosters and demisters" +85129000,"ELECTRICAL LIGHTING OR SIGNALLING EQUIPMENT (EXCLUDING ARTICLES OF HEADING 8539), WINDSCREEN WIPERS, DEFROSTERS AND DEMISTERS, OF A KIND USED FOR CYCLES OR MOTOR VEHICLES::Parts" +85130000,portable electric lamps designed to function by their own source of energy for example dry batteries accumulators magnetos other than lighting equipment of heading 8512 portable electric lamps designed to function by their own source of energy for example dry batteries accumulators magnetos other than lighting equipment of heading 8512 +85131000,portable electric lamps designed to function by their own source of energy for example dry batteries accumulators magnetos other than lighting equipment of heading 8512 portable electric lamps designed to function by their own source of energy for example dry batteries accumulators magnetos other than lighting equipment of heading 8512 >> lamps lamps +85131010,"PORTABLE ELECTRIC LAMPS DESIGNED TO FUNCTION BY THEIR OWN SOURCE OF ENERGY (FOR EXAMPLE, DRY BATTERIES, ACCUMULATORS, MAGNETOS), OTHER THAN LIGHTING EQUIPMENT OF HEADING 8512:Lamps :Torch" +85131020,"PORTABLE ELECTRIC LAMPS DESIGNED TO FUNCTION BY THEIR OWN SOURCE OF ENERGY (FOR EXAMPLE, DRY BATTERIES, ACCUMULATORS, MAGNETOS), OTHER THAN LIGHTING EQUIPMENT OF HEADING 8512:Lamps :Other flash-lights excluding those for photographic purposes" +85131030,"PORTABLE ELECTRIC LAMPS DESIGNED TO FUNCTION BY THEIR OWN SOURCE OF ENERGY (FOR EXAMPLE, DRY BATTERIES, ACCUMULATORS, MAGNETOS), OTHER THAN LIGHTING EQUIPMENT OF HEADING 8512:Lamps :Miners safety lamps" +85131040,"PORTABLE ELECTRIC LAMPS DESIGNED TO FUNCTION BY THEIR OWN SOURCE OF ENERGY (FOR EXAMPLE, DRY BATTERIES, ACCUMULATORS, MAGNETOS), OTHER THAN LIGHTING EQUIPMENT OF HEADING 8512:Lamps :Magneto lamps" +85131090,"PORTABLE ELECTRIC LAMPS DESIGNED TO FUNCTION BY THEIR OWN SOURCE OF ENERGY (FOR EXAMPLE, DRY BATTERIES, ACCUMULATORS, MAGNETOS), OTHER THAN LIGHTING EQUIPMENT OF HEADING 8512:Lamps :Other" +85139000,"PORTABLE ELECTRIC LAMPS DESIGNED TO FUNCTION BY THEIR OWN SOURCE OF ENERGY (FOR EXAMPLE, DRY BATTERIES, ACCUMULATORS, MAGNETOS), OTHER THAN LIGHTING EQUIPMENT OF HEADING 8512::Parts" +85140000,industrial or laboratory electric furnaces and ovens including those functioning by induction or dielectric loss other industrial or laboratory equipment for the heat treatment of materials by induction or dielectric loss resistance heated furnaces and ovens industrial or laboratory electric furnaces and ovens including those functioning by induction or dielectric loss other industrial or laboratory equipment for the heat treatment of materials by induction or dielectric loss resistance heated furnaces and ovens +85141000,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Resistance heated furnaces and ovens(OLD tariff) +85141100,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Hot isostatic presses +85141900,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS:oth:85141900 +85142000,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Furnaces and ovens functioning by induction or dielectric loss +85143010,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS:Other furnaces and ovens:For melting(OLD tariff) +85143090,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS:Other furnaces and ovens:Other(OLD tariff) +85143100,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Electron beam furnaces +85143200,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS:plas:Plasma and vacuum arc furnaces +85143900,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Other +85144000,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Other equipment for the heat treatment of materials by induction or dielectric loss +85149000,INDUSTRIAL OR LABORATORY ELECTRIC FURNACES AND OVENS (INCLUDING THOSE FUNCTIONING BY INDUCTION OR DIELECTRIC LOSS); OTHER INDUSTRIAL OR LABORATORY EQUIPMENT FOR THE HEAT TREATMENT OF MATERIALS BY INDUCTION OR DIELECTRIC LOSS::Parts +85150000,electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am ultrasonic electron beam magnetic pulse or plasma arc soldering brazing or welding machines and apparatus whether or not capable of cutting electric machines and apparatus for hot spraying of metals or cermets brazing or soldering machines and apparatus +85151100,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS::Soldering irons and guns" +85151900,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS::Other" +85152100,electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am ultrasonic electron beam magnetic pulse or plasma arc soldering brazing or welding machines and apparatus whether or not capable of cutting electric machines and apparatus for hot spraying of metals or cermets brazing or soldering machines and apparatus >> fully or partly automatic +85152110,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Fully or partly automatic:Automatic spot welding machinery" +85152120,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Fully or partly automatic:Automatic butt welding machinery" +85152190,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Fully or partly automatic:Other" +85152900,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS::Other" +85153100,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS::Fully or partly automatic" +85153900,electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am ultrasonic electron beam magnetic pulse or plasma arc soldering brazing or welding machines and apparatus whether or not capable of cutting electric machines and apparatus for hot spraying of metals or cermets brazing or soldering machines and apparatus >> other +85153910,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Other :AC arc welding machinery" +85153920,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Other :Argon arc welding machinery" +85153990,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Other :Other" +85158000,electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am electric including electrically heated gas l as e r o r o t her l ig h t or p h o t o b e am ultrasonic electron beam magnetic pulse or plasma arc soldering brazing or welding machines and apparatus whether or not capable of cutting electric machines and apparatus for hot spraying of metals or cermets brazing or soldering machines and apparatus >> other machines and apparatus +85158010,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Other machines and apparatus:High-frequency plastic welding machine" +85158090,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS:Other machines and apparatus:Other" +85159000,"ELECTRIC (INCLUDING ELECTRICALLY HEATED GAS), LASER OR OTHER LIGHT OR PHOTO BEAM, ULTRASONIC, ELECTRON BEAM, MAGNETIC PULSE OR PLASMA ARC SOLDERING, BRAZING OR WELDING MACHINES AND APPARATUS, WHETHER OR NOT CAPABLE OF CUTTING; ELECTRIC MACHINES AND APPARATUS FOR HOT SPRAYING OF METALS OR CERMETS::Parts" +85160000,electric instantaneous or storage water heaters and immersion heaters electric space heating other appliances +85161000,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Electric instantaneous or storage water heaters and immersion heaters(OLD tariff)" +85162100,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Storage heating radiators(OLD tariff)" +85162900,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Other(OLD tariff)" +85163100,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Hair dryers(OLD tariff)" +85163200,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Other hair-dressing apparatus(OLD tariff)" +85163300,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Hand-drying apparatus(OLD tariff)" +85164000,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Electric smoothing irons(OLD tariff)" +85165000,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Microwave ovens(OLD tariff)" +85166000,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Other ovens; cookers, cooking plates, boiling rings, grillers and roasters(OLD tariff)" +85167100,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Coffee or tea makers" +85167200,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Toasters" +85167900,electric instantaneous or storage water heaters and immersion heaters electric space heating other appliances >> other +85167910,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545:Other :Electro-thermic fluid heaters" +85167920,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545:Other :Electrical or electronic devices for repelling insects (for example, mosquitoes or other similar kind of inscets)" +85167990,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545:Other :Other" +85168000,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Electric heating resistors" +85169000,"ELECTRIC INSTANTANEOUS OR STORAGE WATER HEATERS AND IMMERSION HEATERS; ELECTRIC SPACE HEATING APPARATUS AND SOIL HEATING APPARATUS; ELECTRO-THERMIC HAIR-DRESSING APPARATUS (FOR EXAMPLE, HAIR DRYERS, HAIR CURLERS, CURLING TONG HEATERS) AND HAND DRYERS; ELECTRIC SMOOTHING IRONS; OTHER ELECTRO-THERMIC APPLIANCES OF A KIND USED FOR DOMESTIC PURPOSES; ELECTRIC HEATING RESISTORS, OTHER THAN THOSE OF HEADING 8545::Parts" +85171110,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Line telephone sets with cordless handsets:Push button type(OLD tariff)" +85171190,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Line telephone sets with cordless handsets:Other(OLD tariff)" +85171210,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Telephones for cellular networks or for other wireless networks:Push button type(OLD tariff)" +85171211,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Telephones for cellular networks or for other wireless networks:Mobils phone, other than push button type(OLD tariff)" +85171219,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Telephones for cellular networks or for other wireless networks:Mobile phone, push button type(OLD tariff)" +85171290,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Telephones for cellular networks or for other wireless networks:Telephones for other wireless networks(OLD tariff)" +85171300,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:smar:Smartphones(OLD tariff)" +85171400,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528::Other telephones for cellular networks or for other wireless networks(OLD tariff)" +85171810,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Push button type(OLD tariff)" +85171890,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Other(OLD tariff)" +85176100,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528::Base stations(OLD tariff)" +85176210,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:PLCC equipment(OLD tariff)" +85176220,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:Voice frequency telegraphy(OLD tariff)" +85176230,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:Modems (modulators-demodulators)(OLD tariff)" +85176240,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:High bit rate digital subscriber line system (HDSL)(OLD tariff)" +85176250,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:Digital loop carrier system(DLC)(OLD tariff)" +85176260,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:Synchronous digital hierarchy system(SDH)(OLD tariff)" +85176270,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:Multiplexers, statistical multiplexers(OLD tariff)" +85176290,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Machines for the reception, conversion and transmission or regeneration of voice, images or other data, including switching and routing apparatus:Other(OLD tariff)" +85176910,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:ISDN System(OLD tariff)" +85176920,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:ISDN terminal adaptor(OLD tariff)" +85176930,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Routers(OLD tariff)" +85176940,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:X 25 Pads(OLD tariff)" +85176950,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Subscriber end equipment(OLD tariff)" +85176960,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Set top boxes for gaining access to internet(OLD tariff)" +85176970,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Attachments for telephones(OLD tariff)" +85176990,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Other(OLD tariff)" +85177010,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Parts:Populated, loaded or stuffed printed circuit boards(OLD tariff)" +85177090,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Parts:Other(OLD tariff)" +85177100,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528::Aerials and aerial reflectors of all kinds; parts suitable for use therewith(OLD tariff)" +85177910,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Populated, loaded or stuffed printed circuit boards(OLD tariff)" +85177990,"TELEPHONE SETS, INCLUDING TELEPHONES FOR CELLULAR NETWORKS OR FOR OTHER WIRELESS NETWORKS OTHER APPARATUS FOR THE TRANSMISSION OR RECEPTION OF VICE, IMAGES OR OTHER DATA, INCLUDING APPARATUS FOR COMMUNICATION IN A WIRED OR WIRELESS NETWORK (SUCH AS A LOCAL OR WIDE AREA NETWORK), OTHER THAN TRANSMISSION OR RECEPTION APPARATUS OF HEADING 8443, 8525, 8527 OR 8528:Other:Other(OLD tariff)" +85180000,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets +85181000,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Microphones and stands therefor" +85182100,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Single loudspeakers, mounted in their enclosures" +85182110,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> single loudspeakers mounted in their enclosures >> wireless kg +85182190,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> single loudspeakers mounted in their enclosures >> kg other +85182200,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Multiple loudspeakers, mounted in the same enclosure" +85182210,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> multiple loudspeakers mounted in the same enclosure >> wireless kg +85182290,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> multiple loudspeakers mounted in the same enclosure >> kg other +85182900,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Other" +85182910,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> other >> kg wireless +85182990,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> other >> kg other +85183000,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Headphones and earphones, whether or not combined with a microphone, and sets consisting of a microphone and one or more loudspeakers" +85183011,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers >> true wireless stereo tws sound channel not kg connected by wire +85183019,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers >> kg other +85183020,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers >> headphones and earphones whether or not kg combined with a microphone and capable of connecting only through wired medium +85183090,microphones and stands therefor loudspeakers whether or not mounted in their enclosures headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers electric amplifiers electric sound amplifier sets >> headphones and earphones whether or not combined with a microphone and sets consisting of a microphone and one or more loudspeakers >> other other +85184000,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Audio-frequency electric amplifiers" +85185000,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Electric sound amplifier sets" +85189000,"MICROPHONES AND STANDS THEREFOR LOUDSPEAKERS, WHETHER OR NOT MOUNTED IN THEIR ENCLOSURES: HEADPHONES AND EARPHONES, WHETHER OR NOT COMBINED WITH A MICROPHONE, AND SETS CONSISTING OF A MICROPHONE AND ONE OR MORE LUUDSPEAKERS: AUDIO-FREQUENCY ELECTRIC AMPLIFIERS: ELECTRIC SOUND AMPLIFIER SETS::Parts" +85190000, +85192000,"SOUND RECORDING OR REPRODUCING APPARATUS::Apparatus operated by coins, banknotes, bank cards, tokens or by other means of payment" +85193000,SOUND RECORDING OR REPRODUCING APPARATUS::Turntables (record-decks) +85195000,SOUND RECORDING OR REPRODUCING APPARATUS::Telephone answering machines(OLD tariff) +85198100,"SOUND RECORDING OR REPRODUCING APPARATUS::Using magnetic, optical or semiconductor media" +85198900, >> other other +85198910,SOUND RECORDING OR REPRODUCING APPARATUS:Other:Audio Compact disc player +85198920,SOUND RECORDING OR REPRODUCING APPARATUS:Other:Compact disc changer including mini disc player or laser disc player +85198930,SOUND RECORDING OR REPRODUCING APPARATUS:Other:Time Code recorder +85198940,SOUND RECORDING OR REPRODUCING APPARATUS:Other:MP-3 player +85198990,SOUND RECORDING OR REPRODUCING APPARATUS:Other:Others +85210000,video recording or reproducing apparatus whether or not incorporating a video tuner video recording or reproducing apparatus whether or not incorporating a video tuner +85211000,video recording or reproducing apparatus whether or not incorporating a video tuner video recording or reproducing apparatus whether or not incorporating a video tuner >> magnetic cassette magnetic cassette +85211011,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Professional video tape recorders with 3/4"" or 1"" tape" +85211012,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Video recorders betacam or betacam SP or digital betacam S-VHS or digital-S" +85211019,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Other" +85211021,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Professional video tape recorders with 3/4"" or 1"" tape" +85211022,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Video recorders betacam or betacam SP or digital betacam S-VHS or digital-S" +85211029,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Other" +85211091,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Professional video tape recorders with 3/4"" or 1"" tape solid state or otherwise" +85211092,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Video recorders betacam or betacam SP or digital betacam S-VHS or digital-S" +85211099,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Magnetic tape-type :Other" +85219000,video recording or reproducing apparatus whether or not incorporating a video tuner video recording or reproducing apparatus whether or not incorporating a video tuner >> other other +85219010,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Other :Video duplicating system with master and slave control" +85219020,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Other :DVD player" +85219090,"VIDEO RECORDING OR REPRODUCING APPARATUS, WHETHER OR NOT INCORPORATING A VIDEO TUNER:Other :Other" +85220000,parts and accessories suitable for use solely or principally with the apparatus of headings 8519 or 8521 +85221000,PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8519 TO 8521::Pick-up cartridges +85229000,PARTS AND ACCESSORIES SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8519 TO 8521::Other +85230000,discs tapes storage devices smart cards and other media for the recording of sound or of other whether or not phenomena including matrices and masters recorded for the production of discs but excluding products of chapter 37 magnetic media +85232100,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37::Cards incorporating a magnetic stripe" +85232900,discs tapes storage devices smart cards and other media for the recording of sound or of other whether or not phenomena including matrices and masters recorded for the production of discs but excluding products of chapter 37 magnetic media >> other +85232910,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Audio cassettes" +85232920,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Video cassettes" +85232930,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Video magnetic tape including those in hubs and reels, rolls, pancakes and jumbo rolls" +85232940,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other: 3/4"" and 1"" video cassettes" +85232950,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other: 1/2"" video cassettes suitable to work with betacam, betacam SP/M II and VHS type VCR" +85232960,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Other video cassettes and tapes 3/4"" and 1"" video cassettes" +85232970,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:All kinds of Magnetic discs" +85232980,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Cartridge tape" +85232990,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Other" +85234100,discs tapes storage devices smart cards and other media for the recording of sound or of other whether or not phenomena including matrices and masters recorded for the production of discs but excluding products of chapter 37 magnetic media >> unrecorded unrecorded +85234110,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:Compact Disk - Audio Video" +85234120,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:Compact Disc (Audio/Video)" +85234130,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:Matrices for production of records; prepared record blank" +85234140,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:Cartridge tape" +85234150,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:1/2"" Videocassette suitable to work with digital VCR" +85234160,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:DVD" +85234190,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Unrecorded:Other" +85234900,discs tapes storage devices smart cards and other media for the recording of sound or of other whether or not phenomena including matrices and masters recorded for the production of discs but excluding products of chapter 37 magnetic media >> other other +85234910,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Compact Disc - Audio" +85234920,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Compact Disc - Video" +85234930,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Stamper for CD audio, CD video and CD-ROM" +85234940,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Digital Video Disc" +85234950,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Matrices for production of records; prepared record blank" +85234960,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Catridge tape" +85234970,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:1/2"" Videocassette sutiable to work with digital VCR" +85234990,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Other" +85235100,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37::Solid-state non-volatile storage devices(OLD tariff)" +85235210,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Smart cards:SIM cards(OLD tariff)" +85235220,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Smart cards:Memory Cards(OLD tariff)" +85235290,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Smart cards:Other(OLD tariff)" +85235910,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Proximity cards and tags(OLD tariff)" +85235990,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Other(OLD tariff)" +85238010,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Gramophone records(OLD tariff)" +85238020,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Information technology software(OLD tariff)" +85238030,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Audio-visual news or audio visual views(OLD tariff)" +85238040,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Children's video films(OLD tariff)" +85238050,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Video tapes of educational nature(OLD tariff)" +85238060,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:2-D/ 3D computer graphics(OLD tariff)" +85238090,"DISCS, TAPES, SOLID-STATE NON-VOLATILE STORAGE DEVICES, ""SMART CARDS"" AND OTHER MEDIA FOR THE RECORDING OF SOUND OR OF OTHER PHENOMENA, WHETHER OR NOT RECORDED, INCLUDING MATRICES AND MASTERS FOR THE PRODUCTION OF DISCS, BUT EXCLUDING PRODUCTS OF CHAPTER 37:Other:Other(OLD tariff)" +85240000,incorporating screens without drivers or control circuits u u u u u u u u u +85241100,"FLAT PANEL DISPLAY MODULES, WHETHER OR NOT INCORPORATING TOUCH-SENSITIVE SCREENS: Of liquid crystals: Of liquid crystals" +85241110,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> for the goods of 8471 30 or 8471 41 u +85241120,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> for the goods of 8517 13 or 8517 14 u +85241130,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> for the goods of 8528 72 or 8528 73 u +85241190,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> other +85241200,"FLAT PANEL DISPLAY MODULES, WHETHER OR NOT INCORPORATING TOUCH-SENSITIVE SCREENS::Of organic light-emitting diodes (OLED)" +85241210,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> for the goods of 8471 30 or 8471 41 u +85241220,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> for the goods of 8517 13 or 8517 14 u +85241230,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> for the goods of 8528 72 or 8528 73 u +85241290,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> other +85241900,"FLAT PANEL DISPLAY MODULES, WHETHER OR NOT INCORPORATING TOUCH-SENSITIVE SCREENS:oth:Other" +85241910,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> for the goods of 8471 30 or 8471 41 u +85241920,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> for the goods of 8517 13 or 8517 14 u for the goods of 8528 72 or 8528 73 +85241930,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> u other u +85241990,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> other u u u u reproducing apparatus television cameras digital cameras and video camera recorders +85249100,"FLAT PANEL DISPLAY MODULES, WHETHER OR NOT INCORPORATING TOUCH-SENSITIVE SCREENS::Of liquid crystal" +85249110,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> for the goods of 8471 30 or 8471 41 u +85249120,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> for the goods of 8517 13 or 8517 14 u +85249130,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> for the goods of 8528 72 or 8528 73 u +85249190,incorporating screens without drivers or control circuits u u u u u u u u u >> of liquid crystals u >> other +85249200,"FLAT PANEL DISPLAY MODULES, WHETHER OR NOT INCORPORATING TOUCH-SENSITIVE SCREENS::Of organic light-emitting diodes (OLED)" +85249210,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> for the goods of 8471 30 or 8471 41 u +85249220,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> for the goods of 8517 13 or 8517 14 u +85249230,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> for the goods of 8528 72 or 8528 73 u +85249290,incorporating screens without drivers or control circuits u u u u u u u u u >> of organic diodes oled u >> other +85249900,"FLAT PANEL DISPLAY MODULES, WHETHER OR NOT INCORPORATING TOUCH-SENSITIVE SCREENS::Other" +85249910,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> for the goods of 8471 30 or 8471 41 +85249920,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> for the goods of 8517 13 or 8517 14 u +85249930,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> for the goods of 8528 72 or 8528 73 u +85249990,incorporating screens without drivers or control circuits u u u u u u u u u >> other u >> other u +85250000,transmission apparatus for +85255000,transmission apparatus for >> transmission apparatus +85255010,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus:Radio broadcast transmitter" +85255020,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus:TV broadcast transmitter" +85255030,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus:Broadcast equipment sub-system" +85255040,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus:Communication jamming equipment" +85255050,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus:Wireless microphone(OLD tariff)" +85255090,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus:Other" +85256000,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Transmission apparatus incorporating reception apparatus" +85256011,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Walkie talkie set(OLD tariff)" +85256012,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Marine radio communication equipment(OLD tariff)" +85256013,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Amateur radio equipment(OLD tariff)" +85256019,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Other(OLD tariff)" +85256091,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:VSAT terminals(OLD tariff)" +85256092,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Other satellite communication equipment(OLD tariff)" +85256099,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Transmission apparatus incorporating reception apparatus:Other(OLD tariff)" +85258010,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Television cameras, digital cameras and video camera recorders:Television Cameras(OLD tariff)" +85258020,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Television cameras, digital cameras and video camera recorders:Digital cameras(OLD tariff)" +85258030,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Television cameras, digital cameras and video camera recorders:video camera recorders(OLD tariff)" +85258090,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:Television cameras, digital cameras and video camera recorders:Other(OLD tariff)" +85258100,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:high:High-speed goods as specified in Subheading Note 1 to this Chapte" +85258200,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS::Other, radiation-hardened or radiationtolerant goods as specified in Sub-heading Note 2 to this Chapter" +85258300,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS:oth:Other, night vision goods as specified in Sub-heading Note 3 to this Chapter" +85258900,"TRANSMISSION APPARATUS FOR RADIO-BROADCASTING OR TELEVISION, WHETHER OR NOT INCORPORATING RECEPTION APPARATUS OR SOUND RECORDING OR REPRODUCING APPARATUS; TELEVISION CAMERAS, DIGITAL CAMERAS AND VIDEO CAMERA RECORDERS::Other" +85260000,radar apparatus radio navigational aid apparatus and radio remote control apparatus +85261000,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS::Radar apparatus" +85269100,radar apparatus radio navigational aid apparatus and radio remote control apparatus >> radio navigational aid apparatus +85269110,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS:Radio navigational aid apparatus:Direction measuring equipment" +85269120,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS:Radio navigational aid apparatus:Instrument landing system" +85269130,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS:Radio navigational aid apparatus:Direction finding equipment" +85269140,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS:Radio navigational aid apparatus:Non-directional beacon" +85269150,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS:Radio navigational aid apparatus:VHF omni range equipment" +85269190,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS:Radio navigational aid apparatus:Other" +85269200,"RADAR APPARATUS, RADIO NAVIGATIONAL AID APPARATUS AND RADIO REMOTE CONTROL APPARATUS::Radio remote control apparatus" +85270000,reception apparatus for whether or not combined in the same housing with sound recording or reproducing apparatus or a clock receivers capable of operating without an external source of power +85271200,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Pocket-size radio cassette-players" +85271300,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Other apparatus combined with sound recording or reproducing apparatus" +85271900,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Other" +85272100,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Combined with sound recording or reproducing apparatus" +85272900,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Other" +85279100,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Combined with sound recording or reproducing apparatus" +85279200,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK::Not combined with sound recording or reproducing apparatus but combined with a clock" +85279900,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK:Other:Other" +85279911,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK:Other:Radio pagers(OLD tariff)" +85279912,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK:Other:Demodulators(OLD tariff)" +85279919,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK:Other:Other(OLD tariff)" +85279990,"RECEPTION APPARATUS FOR RADIO-BROADCASTING WHETHER OR NOT COMBINED, IN THE SAME HOUSING, WITH SOUND RECORDING OR REPRODUCING APPARATUS OR A CLOCK:Other:Other(OLD tariff)" +85280000,monitors and projectors not incorporating television reception reception apparatus apparatus for television whether or not incorporating or sound or video recording or reproducing apparatus tube monitors monitors and projectors not incorporating television reception reception apparatus apparatus for television whether or not incorporating or sound or video recording or reproducing apparatus tube monitors +85284100,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Of a kind solely or principally used in an automatic data processing system of heading 8471(OLD tariff)" +85284200,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Capable of directly connecting to and designed for use with an automatic data processing machine of heading 8471" +85284900,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Other" +85285100,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Of a kind solely or principally used in an automatic data processing system of heading 8471(OLD tariff)" +85285200,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Capable of directly connecting to and designed for use with an automatic data processing machine of heading 8471" +85285900,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Other" +85286100,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Of a kind solely or principally used in an automatic data processing system of heading 8471(OLD tariff)" +85286200,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Capable of directly connecting to and designed for use with an automatic data processing machine of heading 8471" +85286900,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Other" +85287100,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS::Not designed to incorporate a video display or screen" +85287200,monitors and projectors not incorporating television reception reception apparatus apparatus for television whether or not incorporating or sound or video recording or reproducing apparatus tube monitors monitors and projectors not incorporating television reception reception apparatus apparatus for television whether or not incorporating or sound or video recording or reproducing apparatus tube monitors >> other colour other colour +85287211,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size upto 36 cm" +85287212,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size exceeding 36 cm but not exceeding 54 cm" +85287213,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size exceeding 54 cm but not exceeding 68 cm" +85287214,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size exceeding 68 cm but not exceeding 74 cm" +85287215,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size exceeding 74 cm but not exceeding 87 cm" +85287216,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size exceeding 87 cm but not exceeding 105 cm" +85287217,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Television set of screen size exceeding 105 cm" +85287218,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Liquid crystal display television set of screen size below 63 cm" +85287219,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, colour:Other" +85287300,monitors and projectors not incorporating television reception reception apparatus apparatus for television whether or not incorporating or sound or video recording or reproducing apparatus tube monitors monitors and projectors not incorporating television reception reception apparatus apparatus for television whether or not incorporating or sound or video recording or reproducing apparatus tube monitors >> other monochrome other monochrome +85287310,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, black and white or other monochrome:Liquid crystal display television set of screen size below 25 cm" +85287390,"MONITORS AND PROJECTORS, NOT INCORPORATING TELEVISION RECEPTION APPARATUS, RECEPTION APPARATUS FOR TELEVISION, WHETHER OR NOT INCORPORATING RADIO-BROADCASTRECEIVERS OR SOUND OR VIDEO RECORDING OR REPRODUCING APPARATUS:Other, black and white or other monochrome:Other" +85290000,parts suitable for use solely or principally with the apparatus of headings 8524 to 8528 +85291000,parts suitable for use solely or principally with the apparatus of headings 8524 to 8528 >> aerials and aerial reflectors of all kinds parts suitable for use therewith dish antenna +85291011,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:For communication jamming equipment +85291012,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:For amateur radio communication equipment +85291019,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:Other +85291021,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:For communication jamming equipment +85291022,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:For amateur radio communication equipment +85291029,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:Other +85291091,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:For communication jamming equipment +85291092,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:For amateur radio communication equipment +85291099,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Aerials and aerial reflectors of all kinds; parts suitable for use therewith:Other +85299000,parts suitable for use solely or principally with the apparatus of headings 8524 to 8528 >> other +85299010,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Other :For communication jamming equipment +85299020,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Other :For amateur radio communication equipment +85299030,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Other :Open cell for television set(OLD tariff) +85299090,PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8525 TO 8528:Other :Other +85300000,electrical signalling safety or traffic control equipment for railways tramways roads inland waterways parking facilities port installations or airfields other than those of heading 8608 +85301000,electrical signalling safety or traffic control equipment for railways tramways roads inland waterways parking facilities port installations or airfields other than those of heading 8608 >> equipment for railways or tramways +85301010,"ELECTRICAL SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAYS, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATIONS OR AIRFIELDS (OTHER THAN THOSE OF HEADING 8608):Equipment for railways or tramways:For railways" +85301020,"ELECTRICAL SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAYS, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATIONS OR AIRFIELDS (OTHER THAN THOSE OF HEADING 8608):Equipment for railways or tramways:For tramways" +85308000,"ELECTRICAL SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAYS, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATIONS OR AIRFIELDS (OTHER THAN THOSE OF HEADING 8608)::Other equipment" +85309000,"ELECTRICAL SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAYS, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATIONS OR AIRFIELDS (OTHER THAN THOSE OF HEADING 8608)::Parts" +85310000,electric sound or visual signalling apparatus for example bells sirens indicator panels burglar or fire alarms other than those of heading 8512 or 8530 +85311000,electric sound or visual signalling apparatus for example bells sirens indicator panels burglar or fire alarms other than those of heading 8512 or 8530 >> burglar or fire alarms and similar apparatus +85311010,"ELECTRIC SOUND OR VISUAL SIGNALLING APPARATUS (FOR EXAMPLE, BELLS, SIRENS, INDICATOR PANELS, BURGLAR OR FIRE ALARMS), OTHER THAN THOSE OF HEADING 8512 OR 8530:Burglar or fire alarms and similar apparatus :Burglar alarm" +85311020,"ELECTRIC SOUND OR VISUAL SIGNALLING APPARATUS (FOR EXAMPLE, BELLS, SIRENS, INDICATOR PANELS, BURGLAR OR FIRE ALARMS), OTHER THAN THOSE OF HEADING 8512 OR 8530:Burglar or fire alarms and similar apparatus :Fire alarm" +85311090,"ELECTRIC SOUND OR VISUAL SIGNALLING APPARATUS (FOR EXAMPLE, BELLS, SIRENS, INDICATOR PANELS, BURGLAR OR FIRE ALARMS), OTHER THAN THOSE OF HEADING 8512 OR 8530:Burglar or fire alarms and similar apparatus :Other" +85312000,"ELECTRIC SOUND OR VISUAL SIGNALLING APPARATUS (FOR EXAMPLE, BELLS, SIRENS, INDICATOR PANELS, BURGLAR OR FIRE ALARMS), OTHER THAN THOSE OF HEADING 8512 OR 8530::Indicator panels incorporating liquid crystal devices (LCD) or light emitting diodes (LED)" +85318000,"ELECTRIC SOUND OR VISUAL SIGNALLING APPARATUS (FOR EXAMPLE, BELLS, SIRENS, INDICATOR PANELS, BURGLAR OR FIRE ALARMS), OTHER THAN THOSE OF HEADING 8512 OR 8530::Other apparatus" +85319000,"ELECTRIC SOUND OR VISUAL SIGNALLING APPARATUS (FOR EXAMPLE, BELLS, SIRENS, INDICATOR PANELS, BURGLAR OR FIRE ALARMS), OTHER THAN THOSE OF HEADING 8512 OR 8530::Parts" +85320000,electrical capacitors fixed variable or adjustable +85321000,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Fixed capacitors designed for use in 50 or 60 Hz circuits and having a reactive power handling capacity of not less than 0.5 kvar (power capacitors)" +85322100,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Tantalum" +85322200,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Aluminium electrolytic" +85322300,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Ceramic dielectric, single layer" +85322400,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Ceramic dielectric, multilayer" +85322500,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Dielectric of paper or plastics" +85322900,electrical capacitors fixed variable or adjustable >> other other +85322910,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET):Other :Of dielectric of mica" +85322990,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET):Other :Other" +85323000,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Variable or adjustable (pre-set) capacitors" +85329000,"ELECTRICAL CAPACITORS, FIXED, VARIABLE OR ADJUSTABLE (PRE-SET)::Parts" +85330000,electrical resistors including rheostats and potentiometers other than heating resistors electrical resistors including rheostats and potentiometers other than heating resistors +85331000,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS::Fixed carbon resistors, composition or film types" +85332100,electrical resistors including rheostats and potentiometers other than heating resistors electrical resistors including rheostats and potentiometers other than heating resistors >> for a power handling capacity not exceeding 20 w of bare wire for a power handling capacity not exceeding 20 w of bare wire +85332111,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W :Of nichrome" +85332119,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W :Other" +85332121,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W :Of nichrome" +85332129,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W :Other" +85332900,electrical resistors including rheostats and potentiometers other than heating resistors electrical resistors including rheostats and potentiometers other than heating resistors >> other of bare wire other of bare wire +85332911,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Of nichrome" +85332919,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Other" +85332921,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Of nichrome" +85332929,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Other" +85333100,electrical resistors including rheostats and potentiometers other than heating resistors electrical resistors including rheostats and potentiometers other than heating resistors >> for a power handling capacity not exceeding 20 w for a power handling capacity not exceeding 20 w +85333110,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W:Potentiometers" +85333120,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W:Rheostats" +85333190,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:For a power handling capacity not exceeding 20 W:Other" +85333900,electrical resistors including rheostats and potentiometers other than heating resistors electrical resistors including rheostats and potentiometers other than heating resistors >> other other +85333910,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Potentiometers" +85333920,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Rheostats" +85333990,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other :Other" +85334000,electrical resistors including rheostats and potentiometers other than heating resistors electrical resistors including rheostats and potentiometers other than heating resistors >> other variable resistors including rheostats and other fixed capacitors other variable resistors including rheostats and potentiometers +85334010,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other variable resistors, including rheostats and potentiometers :Potentiometers" +85334020,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other variable resistors, including rheostats and potentiometers :Rheostats" +85334030,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other variable resistors, including rheostats and potentiometers :Thermistors" +85334090,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS:Other variable resistors, including rheostats and potentiometers :Other" +85339000,"ELECTRICAL RESISTORS (INCLUDING RHEOSTATS AND POTENTIOMETERS), OTHER THAN HEATING RESISTORS::Parts" +85340000,::PRINTED CIRCUITS +85350000,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts +85351000,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts >> fuses +85351010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Fuses :For switches having rating upto 15 amps, rewireable" +85351020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Fuses :For switches having rating above 15 amps, high rupturing capacity or rewireable" +85351030,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Fuses :Other rewireable fuses" +85351040,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Fuses :Other high rupturing capacity fuses" +85351050,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Fuses :Fuses gear" +85351090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Fuses :Other" +85352100,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts >> for a voltage of less than kv sf6 circuit breakers +85352111,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:For a voltage of 11 kV" +85352112,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:For a voltage of 33 kV" +85352113,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:For a voltage of 66 kV" +85352119,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:Other" +85352121,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:For a voltage of 11 kV" +85352122,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:For a voltage of 33 kV" +85352123,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:For a voltage of 66 kV" +85352129,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:Other" +85352190,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:For a voltage of less than 72.5 kV:Other" +85352900,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts >> other sf6 circuits breakers +85352911,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :For a voltage of 132 kV" +85352912,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :For a voltage of 220 kV" +85352913,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :For a voltage of 400 kV" +85352919,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Other" +85352921,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :For a voltage of 132 kV" +85352922,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :For a voltage of 220 kV" +85352923,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :For a voltage of 400 kV" +85352929,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Other" +85352990,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Other" +85353000,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts >> isolating switches and switches +85353010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Isolating switches and make-and-break switches:Of plastic" +85353090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Isolating switches and make-and-break switches:Other" +85354000,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts >> lightning arresters voltage limiters and surge suppressors +85354010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Lightning arresters, voltage limiters and:Lightning arresters" +85354020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Lightning arresters, voltage limiters and:Voltage limiters" +85354030,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Lightning arresters, voltage limiters and:Surge suppressors" +85359000,f or s wi t ch i ng o r ele ct rical ap par at us protecting electrical circuits or for making connections to or in electrical circuits for example switches fuses lightning arresters voltage limiters surge suppressors plugs and other a connectors junction boxes for voltage exceeding volts >> other +85359010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Motor starters for AC motors" +85359020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Control gear and starters for DC motors" +85359030,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Other control and switchgears" +85359040,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Junction boxes" +85359090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, FUSES, LIGHTNING ARRESTERS, VOLTAGE LIMITERS, SURGE SUPPRESSORS, PLUGS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE EXCEEDING 1,000 VOLTS:Other :Other" +85360000,ele ct rical ap par at us f or s wi t ch i ng o r protecting electrical circuits or for making +85361010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :For switches having rating upto 15 amps, rewireable(OLD tariff)" +85361020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :For switches having rating above 15 amps, high rupturing capacity or rewireable(OLD tariff)" +85361030,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :Other rewireable fuses(OLD tariff)" +85361040,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :Other high rupturing capacity fuses(OLD tariff)" +85361050,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :Fuses gear(OLD tariff)" +85361060,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :Electronic fuses(OLD tariff)" +85361090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Fuses :Other(OLD tariff)" +85362010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Automatic circuit breakers :Air circuit breakers(OLD tariff)" +85362020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Automatic circuit breakers :Moulded case circuit breakers(OLD tariff)" +85362030,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Automatic circuit breakers :Miniature circuit breakers(OLD tariff)" +85362040,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Automatic circuit breakers :Earth leak circuit breakers(OLD tariff)" +85362090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Automatic circuit breakers :Other(OLD tariff)" +85363000,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.::Other apparatus for protecting electrical circuits" +85364100,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.::For a voltage not exceeding 60 V" +85364900,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.::Other" +85365000,ele ct rical ap par at us f or s wi t ch i ng o r protecting electrical circuits or for making >> other switches +85365010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other switches :Control and switch gears" +85365020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other switches :Other switches of plastic" +85365090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other switches :Other" +85366100,ele ct rical ap par at us f or s wi t ch i ng o r protecting electrical circuits or for making >> +85366110,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Lamp-holders :Of plastic" +85366190,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Lamp-holders :Of other materials" +85366900,ele ct rical ap par at us f or s wi t ch i ng o r protecting electrical circuits or for making >> other +85366910,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other :Of plastic" +85366990,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other :Of other materials" +85367000,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.::Connectors for optical fibres, optical fibre bundles or cables" +85369000,ele ct rical ap par at us f or s wi t ch i ng o r protecting electrical circuits or for making >> other apparatus +85369010,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other apparatus :Motor starters for AC motors" +85369020,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other apparatus :Motor starters for DC motors" +85369030,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other apparatus :Junction boxes" +85369090,"ELECTRICAL APPARATUS FOR SWITCHING OR PROTECTING ELECTRICAL CIRCUITS, OR FOR MAKING CONNECTIONS TO OR IN ELECTRICAL CIRCUITS (FOR EXAMPLE, SWITCHES, RELAYS, FUSES, SURGE SUPPRESSORS, PLUGS, SOCKETS, LAMP-HOLDERS AND OTHER CONNECTORS, JUNCTION BOXES), FOR A VOLTAGE NOT EXCEEDING 1,000 VOLTS CONNECTORS FOR OPTICAL FIBRES, OPTICAL FIBER BUNDLES OR CABLES.:Other apparatus :Other" +85370000,boards panels consoles desks cabinets and other bases equipped with two or more for apparatus of heading 8535 or 8536 electric control or the distribution of electricity including those incorporating instruments or apparatus of chapter 90 and numerical control apparatus other than switching apparatus of heading 8517 +85371000,"BOARDS, PANELS, CONSOLES, DESKS, CABINETS AND OTHER BASES, EQUIPPED WITH TWO OR MORE APPARATUS OF HEADING 8535 OR 8536, FOR ELECTRIC CONTROL OR THE DISTRIBUTION OF ELECTRICITY, INCLUDING THOSE INCORPORATING INSTRUMENTS OR APPARATUS OF CHAPTER 90, AND NUMERICAL CONTROL APPARATUS, OTHER THAN SWITCHING APPARATUS OF HEADING 8517::For a voltage not exceeding 1,000 V" +85372000,"BOARDS, PANELS, CONSOLES, DESKS, CABINETS AND OTHER BASES, EQUIPPED WITH TWO OR MORE APPARATUS OF HEADING 8535 OR 8536, FOR ELECTRIC CONTROL OR THE DISTRIBUTION OF ELECTRICITY, INCLUDING THOSE INCORPORATING INSTRUMENTS OR APPARATUS OF CHAPTER 90, AND NUMERICAL CONTROL APPARATUS, OTHER THAN SWITCHING APPARATUS OF HEADING 8517::For a voltage exceeding 1,000 V" +85380000,parts suitable for use solely or principally with the apparatus of headings 8535 8536 or 8537 +85381000,parts suitable for use solely or principally with the apparatus of headings 8535 8536 or 8537 >> boards panels consoles desks cabinets and other bases for the goods of heading 8537 not equipped with their apparatus +85381010,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8535, 8536 OR 8537:Boards, panels, consoles, desks, cabinets and other bases for the goods of heading 8537, not equipped with their apparatus:For industrial use" +85381090,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8535, 8536 OR 8537:Boards, panels, consoles, desks, cabinets and other bases for the goods of heading 8537, not equipped with their apparatus:Other" +85389000,"PARTS SUITABLE FOR USE SOLELY OR PRINCIPALLY WITH THE APPARATUS OF HEADINGS 8535, 8536 OR 8537::Other(OLD tariff)" +85391000,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS::Sealed beam lamp units(OLD tariff)" +85392110,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Tungsten halogen :Miniature halogen lamps with fittings(OLD tariff)" +85392120,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Tungsten halogen :Other for automobiles(OLD tariff)" +85392190,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Tungsten halogen :Other(OLD tariff)" +85392200,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS::Other, of a power not exceeding 200 W and for a voltage exceeding 100 V(OLD tariff)" +85392910,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Of retail sale price not exceeding rupees 20 per bulb(OLD tariff)" +85392920,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Bulb, for torches(OLD tariff)" +85392930,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Miniature bulbs(OLD tariff)" +85392940,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Other for automobile lamps(OLD tariff)" +85392990,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Other(OLD tariff)" +85393110,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Fluorescent, hot cathode :Compact fluorescent lamps(OLD tariff)" +85393190,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Fluorescent, hot cathode :Other(OLD tariff)" +85393210,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Mercury or sodium vapour lamps; metal halide lamps:Mercury vapour lamps(OLD tariff)" +85393220,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Mercury or sodium vapour lamps; metal halide lamps:Sodium vapour lamps(OLD tariff)" +85393230,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Mercury or sodium vapour lamps; metal halide lamps:Metal halide lamps(OLD tariff)" +85393910,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Energy efficient triphosphor fluorescent lamps(OLD tariff)" +85393990,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Other :Other(OLD tariff)" +85394100,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS::Arc-lamps(OLD tariff)" +85394900,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS::Other(OLD tariff)" +85395000,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS::Light-emitting diode (LED) lamps(OLD tariff)" +85395100,"ELECTRIC FILAMENT OR DISCHARGE LAMPS, INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRA-RED LAMPS; ARC-LAMPS:: Light-emitting diode (LED) modules(OLD tariff)" +85395200,"ELECTRIC FILAMENT OR DISCHARGE LAMPS, INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRA-RED LAMPS; ARC-LAMPS:led:Light-emitting diode (LED) lamps(OLD tariff)" +85399010,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Parts :Parts of fluorescent tube lamps(OLD tariff)" +85399020,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Parts :Parts of arc-lamps(OLD tariff)" +85399090,"ELECTRIC FILAMENT OR DISCHARGE LAMPS INCLUDING SEALED BEAM LAMP UNITS AND ULTRA-VIOLET OR INFRARED LAMPS, ARC LAMPS; LIGHT-EMITTING DIODE (LED) LAMPS:Parts :Other(OLD tariff)" +85400000,thermionic cold cathode or valves and tubes for example vacuum or television picture tubes including video monitor tubes colour +85401100,thermionic cold cathode or valves and tubes for example vacuum or television picture tubes including video monitor tubes colour >> +85401110,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES):Colour :Television picture tubes of 20 and 21 size, except 21 flat and full square (F and FST) colour TV picture tubes" +85401120,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES):Colour :Video monitor cathode-ray tubes" +85401190,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES):Colour :Other" +85401200,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES):Monochrome:Monochrome" +85402000,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Television camera tubes; image converters and intensifiers; other photos-cathode tubes" +85404000,thermionic cold cathode or valves and tubes for example vacuum or television picture tubes including video monitor tubes colour >> data or graphic display tubes monochrome data or graphic display tubes colour with a phosphor dot screen pitch smaller than mm +85404010,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES):Data or graphic display tubes, monochrine data or graphic display tubes, colour with a phosphor dot screen pitch smaller than 0.4 mm::Data or graphic display tubes, monochrome" +85404020,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES):Data or graphic display tubes, monochrine data or graphic display tubes, colour with a phosphor dot screen pitch smaller than 0.4 mm::Data or graphic display tubes, colour with a phosphor dot screen pitch smaller than 0.4 mm" +85406000,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Other cathode-ray tubes" +85407100,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Magnetrons" +85407200,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Klystrons(OLD tariff)" +85407900,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Other" +85408100,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Receiver or amplifier valves and tubes" +85408900,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Other" +85409100,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Of cathode-ray tubes" +85409900,"THERMIONIC, COLD CATHODE OR PHOTO-CATHODE VALVES AND TUBES (FOR EXAMPLE, VACUUM OR VAPOUR OR GAS FILLED VALVES AND TUBES, MERCURY ARC RECTIFYING VALVES AND TUBES, CATHODE-RAY TUBES, TELEVISION CAMERA TUBES)::Other" +85410000,semiconductor devices for example diodes transistors transducers photosensitive semiconductor devices including photovoltaic cells whether or not assembled in modules or made up into panels diodes led whether or not assembled with other diodes led mounted crystals +85411000,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS::Diodes, other than photosensitive or light emitting diodes" +85412100,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS::With a dissipation rate of less than 1 W" +85412900,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS::Other" +85413000,semiconductor devices for example diodes transistors transducers photosensitive semiconductor devices including photovoltaic cells whether or not assembled in modules or made up into panels diodes led whether or not assembled with other diodes led mounted crystals >> thyristors diacs and triacs other than photosensitive devices +85413010,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Thyristors, diacs and triacs, other than photosensitive devices :Thyristors" +85413090,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Thyristors, diacs and triacs, other than photosensitive devices :Other" +85414011,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Photosensitive semi-conductor devices, including photo voltaic cells whether or not assembled in modules or made up into panels; light-emitting diodes (LED):Solar cells, not assembled(OLD tariff)" +85414012,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Photosensitive semi-conductor devices, including photo voltaic cells whether or not assembled in modules or made up into panels; light-emitting diodes (LED):Solar cells, assembled in modules or made up into panels(OLD tariff)" +85414019,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Photosensitive semi-conductor devices, including photo voltaic cells whether or not assembled in modules or made up into panels; light-emitting diodes (LED):Other(OLD tariff)" +85414020,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Photosensitive semi-conductor devices, including photo voltaic cells whether or not assembled in modules or made up into panels; light-emitting diodes (LED):Light emitting diodes (electro-luminescent)(OLD tariff)" +85414090,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS:Photosensitive semi-conductor devices, including photo voltaic cells whether or not assembled in modules or made up into panels; light-emitting diodes (LED):Other(OLD tariff)" +85414100,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS::Light-emitting diodes (LED)" +85414200,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS::Photovoltaic cells not assembled in modules or made up into panels" +85414300,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS:phot:Photovoltaic cells assembled in modules or made up into panel" +85414900,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS::Other" +85415000,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS::Other semi-conductors devices(OLD tariff)" +85415100,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS:semi:Semiconductor-based transducers" +85415900,"DIODES, TRANSISTORS AND SIMILAR SEMICONDUCTOR DEVICES; PHOTOSENSITIVE SEMICONDUCTOR DEVICES, INCLUDING PHOTOVOLTAIC CELLS WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT EMITTING DIODES; MOUNTED PIEZO-ELECTRIC CRYSTALS::Other" +85416000,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS::Mounted piezo-electric crystals" +85419000,"DIODES, TRANSISTORS AND SIMILAR SEMI-CONDUCTOR DEVICES; PHOTOSENSITIVE SEMI-CONDUCTOR DEVICES; INCLUDING PHOTO VOLTAIC CELLS, WHETHER OR NOT ASSEMBLED IN MODULES OR MADE UP INTO PANELS; LIGHT-EMITTING DIODES (LED); MOUNTED PIEZOELECTRIC CRYSTALS::Parts" +85420000,electronic integrated circuits electronic integrated circuits +85423100,"ELECTRONIC INTEGRATED CIRCUITS::Processors and controllers, whether or not combined with memories, converters, logic circuits, amplifiers, clock and timing circuits, or other circuits" +85423200,ELECTRONIC INTEGRATED CIRCUITS::Memories +85423300,ELECTRONIC INTEGRATED CIRCUITS::Amplifiers +85423900,ELECTRONIC INTEGRATED CIRCUITS::Other +85429000,ELECTRONIC INTEGRATED CIRCUITS::Parts +85430000,and apparatus having electrical machines individual functions not specified or including elsewhere in this chapter +85431000,and apparatus having electrical machines individual functions not specified or including elsewhere in this chapter >> particle accelerators +85431010,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Particle accelerators:Ion implanters for doping semi conductor material" +85431020,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Particle accelerators:Vane graff, cock-croft, Walton accelerators" +85431030,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Particle accelerators:Synchrocyclotrons, synchrotrons" +85431090,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Particle accelerators:Other including cyclotrons" +85432000,and apparatus having electrical machines individual functions not specified or including elsewhere in this chapter >> signal generators +85432010,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Signal generators :Sweep generators" +85432020,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Signal generators :Impulse generators" +85432030,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Signal generators :Tacho generators" +85432090,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Signal generators :Other" +85433000,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER::Machines and apparatus for electroplating, electrolysis or electrophoresis" +85434000,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:elec:Electronic cigarettes and similar personal electric vaporising device" +85437000,and apparatus having electrical machines individual functions not specified or including elsewhere in this chapter >> other machines and apparatus +85437011,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Proximity card and tags" +85437012,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Metal detector" +85437013,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Mine detector" +85437019,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other" +85437021,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Digital reverberators" +85437022,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Mixing system or consoles" +85437029,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other" +85437031,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Video mixing system or consoles" +85437032,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Video effect system" +85437033,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Digital layering machine" +85437034,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Paint box" +85437035,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Video typewriter" +85437036,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Video matting machine" +85437039,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other" +85437041,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Computerised editing system controlling more than three video editing machines" +85437042,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other video control unit" +85437049,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other" +85437050,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Colour corrector" +85437061,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Broadcast amplifier" +85437062,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Limiting amplifier, video distribution amplifier and stabilizing amplifiers" +85437069,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other" +85437071,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Graphic equalizer" +85437072,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Synthesised receivers" +85437091,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:RF(radio frequency) power amplifer and noise generators for communication jamming equipment, static and mobile or man-portable" +85437092,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Equipment gadgets based on solar energy" +85437093,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Professional beauty care equipment" +85437094,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Audio visual stereo encoders" +85437095,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Time code generator" +85437099,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER:Other machines and apparatus:Other" +85439000,"ELECTRICAL MACHINES AND APPARATUS HAVING INDIVIDUAL FUNCTIONS, NOT SPECIFIED OR INCLUDING ELSEWHERE IN THIS CHAPTER::Parts" +85440000,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire +85441100,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> of copper +85441110,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Of copper:Enamelled" +85441190,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Of copper:Other" +85441900,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> other +85441910,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Asbestos covered" +85441920,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Plastic insulated" +85441930,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Rubber insulated" +85441990,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Other" +85442000,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> cable and other electric conductors +85442010,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Co-axial cable and other co-axial electric conductors :Co-axial cable" +85442090,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Co-axial cable and other co-axial electric conductors :Other" +85443000,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS::Ignition wiring sets and other wiring sets of a kind used in vehicles, aircraft or ships" +85444200,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> fitted with connectors +85444210,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Paper insulated" +85444220,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Plastic insulated" +85444230,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Rubber insulated" +85444290,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:OTHER(OLD tariff)" +85444291,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Paper insulated, of a kind used in telecommunication" +85444292,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Plastic insulated, of a kind used in telecommunication" +85444293,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Rubber insulated, of a kind used in telecommunication" +85444299,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Fitted with connectors:Other" +85444900,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> other +85444910,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :paper insulated" +85444920,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Plastic insulated" +85444930,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Rubber insulated" +85444991,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Paper insulated, of a kind used in telecommunication" +85444992,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Plastic insulated, of a kind used in telecommunication" +85444993,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Rubber insulated, of a kind used in telecommunication" +85444999,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other :Other" +85446000,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> other electric conductors for a voltage exceeding 1000 v +85446010,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other electric conductors, for a voltage exceeding 1000 V :Paper insulated" +85446020,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other electric conductors, for a voltage exceeding 1000 V :Plastic insulated" +85446030,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other electric conductors, for a voltage exceeding 1000 V :Rubber insulated" +85446090,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Other electric conductors, for a voltage exceeding 1000 V :Other" +85447000,insulated including enamelled or anodised wire cable including cable and f ibres whe th er o r n ot a ss emb led wi th electric conductors or fitted with connectors winding wire >> optical fibre cables +85447010,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Optical fibre cables :Lead alloy sheathed cables for lighting purposes" +85447090,"INSULATED (INCLUDING ENAMELLED OR ANODISED) WIRE, CABLE (INCLUDING CO-AXIAL CABLE) AND OTHER INSULATED ELECTRIC CONDUCTORS, WHETHER OR NOT FITTED WITH CONNECTORS; OPTICAL FIBRE CABLES, MADE UP OF INDIVIDUALLY SHEATHED FIBRES, WHETHER OR NOT ASSEMBLED WITH ELECTRIC CONDUCTORS OR FITTED WITH CONNECTORS:Optical fibre cables :Other" +85450000,carbon electrodes carbon brushes lamp carbons battery carbons and other articles of graphite or other carbon with or without metal of a kind used for electrical purposes electrodes +85451100,"CARBON ELECTRODES, CARBON BRUSHES, LAMP CARBONS, BATTERY CARBONS AND OTHER ARTICLES OF GRAPHITE OR OTHER CARBON, WITH OR WITHOUT METAL, OF A KIND USED FOR ELECTRICAL PURPOSES::Of a kind used for furnaces" +85451900,"CARBON ELECTRODES, CARBON BRUSHES, LAMP CARBONS, BATTERY CARBONS AND OTHER ARTICLES OF GRAPHITE OR OTHER CARBON, WITH OR WITHOUT METAL, OF A KIND USED FOR ELECTRICAL PURPOSES::Other" +85452000,"CARBON ELECTRODES, CARBON BRUSHES, LAMP CARBONS, BATTERY CARBONS AND OTHER ARTICLES OF GRAPHITE OR OTHER CARBON, WITH OR WITHOUT METAL, OF A KIND USED FOR ELECTRICAL PURPOSES::Brushes" +85459000,carbon electrodes carbon brushes lamp carbons battery carbons and other articles of graphite or other carbon with or without metal of a kind used for electrical purposes electrodes >> other +85459010,"CARBON ELECTRODES, CARBON BRUSHES, LAMP CARBONS, BATTERY CARBONS AND OTHER ARTICLES OF GRAPHITE OR OTHER CARBON, WITH OR WITHOUT METAL, OF A KIND USED FOR ELECTRICAL PURPOSES:Other :Arc-lamp carbon" +85459020,"CARBON ELECTRODES, CARBON BRUSHES, LAMP CARBONS, BATTERY CARBONS AND OTHER ARTICLES OF GRAPHITE OR OTHER CARBON, WITH OR WITHOUT METAL, OF A KIND USED FOR ELECTRICAL PURPOSES:Other :Battery carbon" +85459090,"CARBON ELECTRODES, CARBON BRUSHES, LAMP CARBONS, BATTERY CARBONS AND OTHER ARTICLES OF GRAPHITE OR OTHER CARBON, WITH OR WITHOUT METAL, OF A KIND USED FOR ELECTRICAL PURPOSES:Other :Other" +85460000,electrical insulators of any material +85461000,ELECTRICAL INSULATORS OF ANY MATERIAL::Of glass +85462000,electrical insulators of any material >> of ceramics porcelain discs and strings +85462011,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Porcelain below 6.6 kV +85462019,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Other +85462021,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Below 6.6 kV +85462022,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :6.6 kV or above but upto 11 kV +85462023,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Above 11 kV but upto 66 kV +85462024,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Above 66 kV but upto 132 kV +85462029,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Above 132 kV +85462031,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Below 6.6 kV +85462032,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :6.6 kV or above but up to 11 kV +85462033,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Above 11 kV but upto 66 kV +85462039,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Above 66 kV +85462040,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Other high tension procelain solid core insulators +85462050,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Other low tension procelain insulators including telegraph and telephone insulators +85462090,ELECTRICAL INSULATORS OF ANY MATERIAL:Of ceramics :Other +85469000,electrical insulators of any material >> other +85469010,ELECTRICAL INSULATORS OF ANY MATERIAL:Other :Heat shrinkable components +85469090,ELECTRICAL INSULATORS OF ANY MATERIAL:Other :Other +85470000,insulating fittings for electrical machines appliances or equipment being fittings wholly of any insulating material apart from minorcomponents of metal for example threaded sockets incorporated during moulding solely for the purposes of assembly other than insulators of heading 8546 electrical conduit tubing and joints therefor of base metal lined with insulating material +85471000,insulating fittings for electrical machines appliances or equipment being fittings wholly of any insulating material apart from minorcomponents of metal for example threaded sockets incorporated during moulding solely for the purposes of assembly other than insulators of heading 8546 electrical conduit tubing and joints therefor of base metal lined with insulating material >> insulating fittings of ceramics material insulating fittings of ceramics +85471010,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Insulating fittings of ceramics :Porcelain bushing below 6.6 kV" +85471020,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Insulating fittings of ceramics :Porcelain bushings for voltage 6.6 kV or above but below 11 kV" +85471030,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Insulating fittings of ceramics :Porcelain bushings for voltage 11 kV or above but upto 66 kV" +85471040,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Insulating fittings of ceramics :Porcelain bushings for voltage 66 kV or above" +85471090,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Insulating fittings of ceramics :Other" +85472000,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL::Insulating fittings of plastics" +85479000,insulating fittings for electrical machines appliances or equipment being fittings wholly of any insulating material apart from minorcomponents of metal for example threaded sockets incorporated during moulding solely for the purposes of assembly other than insulators of heading 8546 electrical conduit tubing and joints therefor of base metal lined with insulating material >> other +85479010,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Other :Electrical insulating fittings of glass" +85479020,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Other :Electrical conduit tubing and joints therefor, of base metal lined with insulating material" +85479090,"INSULATING FITTINGS FOR ELECTRICAL MACHINES, APPLIANCES OR EQUIPMENT, BEING FITTINGS WHOLLY OF INSULATING MATERIAL APART FROM ANY MINOR COMPONENTS OF METAL (FOR EXAMPLE, THREADED SOCKETS) INCORPORATED DURING MOULDING SOLELY FOR THE PURPOSES OF ASSEMBLY, OTHER THAN INSULATORS OF HEADING 8546; ELECTRICAL CONDUIT TUBING AND JOINTS THEREFOR, OF BASE METAL LINED WITH INSULATING MATERIAL:Other :Other" +85480000,"WASTE AND SCRAP OF PRIMARY CELLS, PRIMARY BATTERIES AND ELECTRIC ACCUMULATORS; SPENT PRIMARY CELLS, SPENT PRIMARY BATTERIES AND SPENT ELECTRIC ACCUMULATORS; ELECTRICAL PARTS OF MACHINERY OR APPARATUS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::electrical parts of machinery or apparatus, not specified or included elsewhere in this chapter(OLD tariff)" +85481010,"WASTE AND SCRAP OF PRIMARY CELLS, PRIMARY BATTERIES AND ELECTRIC ACCUMULATORS; SPENT PRIMARY CELLS, SPENT PRIMARY BATTERIES AND SPENT ELECTRIC ACCUMULATORS; ELECTRICAL PARTS OF MACHINERY OR APPARATUS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS:Waste and scrap of primary cells, primary batteries and electric accumulators; spent primary cells, spent primary batteries and spent electric accumulators :Battery scrap, namely the following lead battery plates covered by ISRI code word Rails; battery lugs covered by ISRI code word Rakes.(OLD tariff)" +85481020,"WASTE AND SCRAP OF PRIMARY CELLS, PRIMARY BATTERIES AND ELECTRIC ACCUMULATORS; SPENT PRIMARY CELLS, SPENT PRIMARY BATTERIES AND SPENT ELECTRIC ACCUMULATORS; ELECTRICAL PARTS OF MACHINERY OR APPARATUS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS:Waste and scrap of primary cells, primary batteries and electric accumulators; spent primary cells, spent primary batteries and spent electric accumulators :Battery waste, namely the following Scrap drained or dry while intact, lead batteries covered by ISRI code word Rains; scrap wet whole intact lead batteries covered by ISRI code word Rink ; scrap industrial intact lead cells covered by ISRI code word Rono ; scrap whole intact industrial lead batteries covered by ISRI code word Roper ; edison batteries covered by ISRI code word Vaunt(OLD tariff)" +85481090,"WASTE AND SCRAP OF PRIMARY CELLS, PRIMARY BATTERIES AND ELECTRIC ACCUMULATORS; SPENT PRIMARY CELLS, SPENT PRIMARY BATTERIES AND SPENT ELECTRIC ACCUMULATORS; ELECTRICAL PARTS OF MACHINERY OR APPARATUS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS:Waste and scrap of primary cells, primary batteries and electric accumulators; spent primary cells, spent primary batteries and spent electric accumulators :Other waste and scrap(OLD tariff)" +85489000,"WASTE AND SCRAP OF PRIMARY CELLS, PRIMARY BATTERIES AND ELECTRIC ACCUMULATORS; SPENT PRIMARY CELLS, SPENT PRIMARY BATTERIES AND SPENT ELECTRIC ACCUMULATORS; ELECTRICAL PARTS OF MACHINERY OR APPARATUS, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Other(OLD tariff)" +85491100,ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Waste and scrap of lead-acid accumulators; spent lead-acid accumulators(OLD tariff) +85491200,"ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Other, containing lead, cadmium or mercury(OLD tariff)" +85491400,"ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Unsorted and not containing lead, cadmium or mercury(OLD tariff)" +85491900,ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Other(OLD tariff) +85492100,"ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Containing primary cells, primary batteries, electric accumulators, mercuryswitches, glass from cathode ray tubes or other activated glass, or electrical or electronic components containing cadmium, mercury, lead or polychlorinated biphenyls (PCBs)(OLD tariff)" +85492900,ELECTRICAL AND ELECTRONIC WASTE AND SCRAP:oth:other(OLD tariff) +85493100,ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Other(OLD tariff) +85493900,ELECTRICAL AND ELECTRONIC WASTE AND SCRAP:Oth:Other(OLD tariff) +85499100,"ELECTRICAL AND ELECTRONIC WASTE AND SCRAP:cont:Containing primary cells, primary batteries, electric accumulators, mercury-switches, glass from cathode ray tubes or other activated glass, or electrical or electronic components containing cadmium, mercury, lead or polychlorinated biphenyls (PCBs)(OLD tariff)" +85499900,ELECTRICAL AND ELECTRONIC WASTE AND SCRAP::Other(OLD tariff) +86010000,source of electricity or accumulators +86011000,RAIL LOCOMOTIVES POWERED FROM AN EXTERNAL SOURCE OF ELECTRICITY OR BY ELECTRIC ACCUMULATORS::Powered from an external source of +86012000,RAIL LOCOMOTIVES POWERED FROM AN EXTERNAL SOURCE OF ELECTRICITY OR BY ELECTRIC ACCUMULATORS::Powered by electric accumulators +86020000,other rail locomotives locomotive tenders +86021000,OTHER RAIL LOCOMOTIVES; LOCOMOTIVE TENDERS::Diesel-electric locomotives +86029000,other rail locomotives locomotive tenders >> other +86029010,OTHER RAIL LOCOMOTIVES; LOCOMOTIVE TENDERS:Other :Steam locomotives and tenders thereof +86029090,OTHER RAIL LOCOMOTIVES; LOCOMOTIVE TENDERS:Other :Other +86030000,railway or tramway coaches vans and trucks other than those of heading 8604 +86031000,"SELF-PROPELLED RAILWAY OR TRAMWAY COACHES, VANS AND TRUCKS, OTHER THAN THOSE OF HEADING 8604::Powered from an external source of electricity" +86039000,"SELF-PROPELLED RAILWAY OR TRAMWAY COACHES, VANS AND TRUCKS, OTHER THAN THOSE OF HEADING 8604::Other" +86040000,"::RAILWAY OR TRAMWAY MAINTENANCE OR SERVICE VEHICLES WHETHER OR NOT SELF-PROPELLED (FOR EXAMPLE, WORKSHOPS, CRANES, BALLAST TAMPERS, TRACK-LINERS,TESTING COACHES AND TRACK INSPECTION VEHICLES)" +86050000,"::RAILWAY OR TRAMWAY PASSENGER COACHES, NOT SELF-PROPELLED; LUGGAGE VANS, POST OFFICE COACHES AND OTHER SPECIAL PURPOSE RAILWAY OR TRAMWAY COACHES, NOT SELF-PROPELLED (EXCLUDING THOSE OF HEADING 8604)" +86060000,railway or tramway goods vans and wagons not +86061000,railway or tramway goods vans and wagons not >> tank wagons and the like +86061010,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Tank wagons and the like :Four wheeler tank wagons of pay-load exceeding 23 tonnes" +86061020,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Tank wagons and the like :Eight wheeler tank wagons of pay-load not exceeding 60 tonnes" +86061090,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Tank wagons and the like :Other" +86063000,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED::Self-discharging vans and wagons, other than those of sub-heading 8606 10" +86069100,railway or tramway goods vans and wagons not >> covered and closed +86069110,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Covered and closed :Meter guage eight wheeler covered wagons of pay-load not exceeding 38 tonnes" +86069120,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Covered and closed :Broad guage eight wheeler covered wagons of pay-load not exceeding 60 tonnes" +86069190,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Covered and closed :Other" +86069200,railway or tramway goods vans and wagons not >> open with sides of a height exceeding 60 cms +86069210,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Open with non-removable sides of a height exceeding 60 cms :Bogie eight wheeler wagons of pay-load not exceeding 60 tonnes" +86069220,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Open with non-removable sides of a height exceeding 60 cms :Broad guage bogie eight wheeler wagons of pay-load exceeding 60 tonnes but not exceeding 67 tonnes" +86069290,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED:Open with non-removable sides of a height exceeding 60 cms :Other" +86069900,"RAILWAY OR TRAMWAY GOODS VANS AND WAGONS, NOT SELF-PROPELLED::Other" +86070000,parts of railway or tramway locomotives or bogies axles and wheels and parts thereof +86071100,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK::Driving bogies and bissel-bogies +86071200,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK::Other bogies and bissel-bogies +86071900,parts of railway or tramway locomotives or bogies axles and wheels and parts thereof >> other including parts +86071910,"PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other including parts :Axles, wheels for coaches, van and wagons (rolling-stock)" +86071920,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other including parts :Axles and wheels for locomotives +86071930,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other including parts :Axle boxes (lubricating or grease box) +86071990,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other including parts :Other parts of axles and wheels +86072100,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK::Air brakes and parts thereof +86072900,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK::Other +86073000,parts of railway or tramway locomotives or bogies axles and wheels and parts thereof >> hooks and other coupling devices buffers and parts thereof +86073010,"PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Hooks and other coupling devices, buffers and parts thereof :Buffers and coupling devices" +86073090,"PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Hooks and other coupling devices, buffers and parts thereof :Other" +86079100,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK::Of locomotives +86079900,parts of railway or tramway locomotives or bogies axles and wheels and parts thereof >> other +86079910,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other :Parts of coach work of railway running stock +86079920,"PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other :Parts of tramway, locomotives and running stock" +86079930,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other :Hydraulic shock absorbers for railway bogies +86079990,PARTS OF RAILWAY OR TRAMWAY LOCOMOTIVES OR ROLLING-STOCK:Other :Other +86080000,railway or tramway track fixtures and fittings mechanical including electo mechanical signalling safety or traffic control equipment for railway tramways roads inland waterways parking facilities port installation or parts of the foregoing railway or tramway track fixtures and fittings mechanical including signalling safety or traffic control equipment for railway tramways roads inland waterways parking facilities port installation or parts of the foregoing +86080010,"RAILWAY OR TRAMWAY TRACK FIXTURES AND FITTINGS; MECHANICAL (INCLUDING ELECTO-MECHANICAL) SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAY, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATION OR AIRFIELDS; PARTS OF THE FOREGOING:Railway or tramway track fixtures and fittings; mechanical (including electomechanical) signalling, safety or traffic control equipment for railway, tramways, roads, inland waterways, parking facilities, port installation or air-fields; parts of the foregoing :Railway and tramway track fixtures and fittings" +86080020,"RAILWAY OR TRAMWAY TRACK FIXTURES AND FITTINGS; MECHANICAL (INCLUDING ELECTO-MECHANICAL) SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAY, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATION OR AIRFIELDS; PARTS OF THE FOREGOING:Railway or tramway track fixtures and fittings; mechanical (including electomechanical) signalling, safety or traffic control equipment for railway, tramways, roads, inland waterways, parking facilities, port installation or air-fields; parts of the foregoing :Mechanical equipment, not electrically powered for signalling to, or controlling, road rail or other vehicles, ships or aircraft" +86080030,"RAILWAY OR TRAMWAY TRACK FIXTURES AND FITTINGS; MECHANICAL (INCLUDING ELECTO-MECHANICAL) SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAY, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATION OR AIRFIELDS; PARTS OF THE FOREGOING:Railway or tramway track fixtures and fittings; mechanical (including electomechanical) signalling, safety or traffic control equipment for railway, tramways, roads, inland waterways, parking facilities, port installation or air-fields; parts of the foregoing :Other traffic control equipment for railways" +86080040,"RAILWAY OR TRAMWAY TRACK FIXTURES AND FITTINGS; MECHANICAL (INCLUDING ELECTO-MECHANICAL) SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAY, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATION OR AIRFIELDS; PARTS OF THE FOREGOING:Railway or tramway track fixtures and fittings; mechanical (including electomechanical) signalling, safety or traffic control equipment for railway, tramways, roads, inland waterways, parking facilities, port installation or air-fields; parts of the foregoing :Other traffic control equipment for roads or inland waterways including automatic traffic control equipment for use at ports and airports" +86080090,"RAILWAY OR TRAMWAY TRACK FIXTURES AND FITTINGS; MECHANICAL (INCLUDING ELECTO-MECHANICAL) SIGNALLING, SAFETY OR TRAFFIC CONTROL EQUIPMENT FOR RAILWAY, TRAMWAYS, ROADS, INLAND WATERWAYS, PARKING FACILITIES, PORT INSTALLATION OR AIRFIELDS; PARTS OF THE FOREGOING:Railway or tramway track fixtures and fittings; mechanical (including electomechanical) signalling, safety or traffic control equipment for railway, tramways, roads, inland waterways, parking facilities, port installation or air-fields; parts of the foregoing :Other" +86090000,::CONTAINERS (INCLUDING CONTAINERS FOR THE TRANSPORT OF FLUIDS) SPECIALLY DESIGNED AND EQUIPPED FOR CARRIAGE BY ONE OR MORE MODES OF TRANSPORT +87010000,tractors other than tractors of heading 8709 +87011000,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Pedestrian controlled tractors +87012010,"TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Road tractors for semi-trailers :Of engine capacity not exceeding 1,800 cc(OLD tariff)" +87012090,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Road tractors for semi-trailers :Other(OLD tariff) +87012100,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::With only compression-ignition internal combustion piston engine (diesel or semi-diesel) +87012200,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion +87012300,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::With both spark-ignition internal combustion piston engine and electric motor as motors for propulsion +87012400,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::With only electric motor for propulsion +87012900,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Other +87013000,tractors other than tractors of heading 8709 >> tractors garden tractors +87013011,"TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Track-laying tractors :Of engine capacity not exceeding 1,800 cc" +87013019,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Track-laying tractors :Other +87013091,"TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Track-laying tractors :Of engine capacity not exceeding 1,800 cc" +87013099,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Track-laying tractors :Other +87019010,"TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Other :Of engine capacity not exceeding 1,800 cc(OLD tariff)" +87019090,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709):Other :Other(OLD tariff) +87019100,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Not exceeding 18 kW +87019200,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Exceeding 18 kW but not exceeding 37 kW +87019300,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Exceeding 37 kW but not exceeding 75 kW +87019400,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Exceeding 75 kW but not exceeding 130 kW +87019500,TRACTORS (OTHER THAN TRACTORS OF HEADING 8709)::Exceeding 130 kW +87020000,motor vehicles for the transport of ten or more persons including the driver +87021000,motor vehicles for the transport of ten or more persons including the driver >> only internal with combustion piston engine diesel or vehicles for transport of not more than 13 persons including the driver +87021011,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With compression-ignition internal combustion piston engine (diesel or semi-diesel) :Integrated monocoque vehicle" +87021012,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With compression-ignition internal combustion piston engine (diesel or semi-diesel) :Air-conditioned vehicle" +87021018,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Other, air-conditioned" +87021019,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Other, non air-conditioned" +87021021,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Integrated monocoque vehicle, air-conditioned" +87021022,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Integrated monocoque vehicle, non air-conditioned" +87021028,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Other, air-conditioned" +87021029,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Other, non air-conditioned" +87021091,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Integrated moncoque vehicle(OLD tariff)" +87021092,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only compression-ignition internal combustion piston engine (diesel or semi-diesel):Air-conditioned vehicle(OLD tariff)" +87021099,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With compression-ignition internal combustion piston engine (diesel or semi-diesel) :Other(OLD tariff)" +87022000,motor vehicles for the transport of ten or more persons including the driver >> with both internal combustion piston engine diesel or semi diesel and electric motor as motors for propulsion vehicles for transport of not more than 13 persons including the driver +87022011,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Integrated monocoque vehicle, air-conditioned" +87022012,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Integrated monocoque vehicle, non air-conditioned" +87022018,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Other, air-conditioned" +87022019,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Other, non air-conditioned" +87022021,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Integrated monocoque vehicle, air-conditioned" +87022022,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Integrated monocoque vehicle, non air-conditioned" +87022028,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Other, air-conditioned" +87022029,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion:Other, non air-conditioned" +87023000,motor vehicles for the transport of ten or more persons including the driver >> with both internal combustion piston engine and electric motor as motors for propulsion vehicles for transport of not more than 13 persons including the driver +87023011,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Integrated monocoque vehicle, air-conditioned" +87023012,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Integrated monocoque vehicle, non air-conditioned" +87023018,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Other, air-conditioned" +87023019,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Other, non air-conditioned" +87023021,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Integrated monocoque vehicle, air-conditioned" +87023022,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Integrated monocoque vehicle, non air-conditioned" +87023028,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Other, air-conditioned" +87023029,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion:Other, non air-conditioned" +87024000,motor vehicles for the transport of ten or more persons including the driver >> with only electric motor for propulsion vehicles for transport of not more than 13persons including the driver +87024011,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Integrated monocoque vehicle, air-conditioned" +87024012,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Integrated monocoque vehicle, non air-conditioned" +87024018,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Other, air-conditioned" +87024019,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Other, non air-conditioned" +87024021,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Integrated monocoque vehicle, air-conditioned" +87024022,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Integrated monocoque vehicle, non air-conditioned" +87024028,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Other, air-conditioned" +87024029,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:With only electric motor for propulsion:Other, non air-conditioned" +87029000,motor vehicles for the transport of ten or more persons including the driver >> other vehicles for transport of not more than 13 persons including the driver +87029011,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Integrated monocoque vehicle" +87029012,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Integrated monocoque vehicle, non air-conditioned" +87029013,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Electrically operated(OLD tariff)" +87029018,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Other, air-conditioned" +87029019,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Other, non air-conditioned" +87029020,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Electrically operated vehicles not elsewhere included or specified(OLD tariff)" +87029021,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Integrated monocoque vehicle, air-conditioned" +87029022,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Integrated monocoque vehicle, non air-conditioned" +87029028,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Other, air-conditioned" +87029029,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Other, non air-conditioned" +87029091,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Integrated monocoque vehicle(OLD tariff)" +87029092,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Air-conditioned vehicle(OLD tariff)" +87029099,"MOTOR VEHICLES FOR THE TRANSPORT OF TEN OR MORE PERSONS, INCLUDING THE DRIVER:Other :Other(OLD tariff)" +87030000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars +87031000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> vehicles specially designed for travelling on snow golf cars and similar vehicles +87031010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Vehicles specially designed for travelling on snow; golf cars and similar vehicles :Electrically operated" +87031090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Vehicles specially designed for travelling on snow; golf cars and similar vehicles :Other" +87032100,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity not exceeding cc +87032110,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,000 cc :Vehicles principally designed for the transport of more than seven persons, including the driver" +87032120,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,000 cc :Three-wheeled vehicles" +87032191,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,000 cc :Motor cars" +87032192,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,000 cc :Specialised transport vehicles such as ambulances, prison vans and the like" +87032199,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,000 cc :Other" +87032200,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity exceeding cc but not exceeding cc +87032210,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,000 cc but not exceeding 1,500 cc :Vehicles principally designed for the transport of more than seven persons, including the driver" +87032220,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,000 cc but not exceeding 1,500 cc :Specialised transport vehicles such as ambulances, prison vans and the like" +87032230,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,000 cc but not exceeding 1,500 cc :Three-wheeled vehicles" +87032291,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,000 cc but not exceeding 1,500 cc :Motor cars" +87032299,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,000 cc but not exceeding 1,500 cc :Other" +87032300,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity exceeding cc but not exceeding cc +87032310,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 3,000 cc :Vehicles principally designed for the transport of more than seven persons including the driver" +87032320,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 3,000 cc :Three-wheeled vehicles" +87032391,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 3,000 cc :Motor cars" +87032392,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 3,000 cc :Specialised transport vehicles such as ambulances, prison vans and the like" +87032399,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 3,000 cc :Other" +87032400,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity exceeding cc +87032410,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 3,000 cc :Vehicles principally designed for the transport of more than seven persons, including the driver" +87032420,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 3,000 cc :Three-wheeled vehicles" +87032491,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 3,000 cc :Motor cars" +87032492,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 3,000 cc :Specialised transport vehicles such as ambulances, prison vans and the like" +87032499,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 3,000 cc :Other" +87033100,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity not exceeding cc +87033110,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,500 cc :Vehicles principally designed for the transport of more than seven persons, including the driver" +87033120,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,500 cc :Three-wheeled vehicles(OLD tariff)" +87033191,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,500 cc :Motor cars" +87033192,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,500 cc :Specialised transport vehicles such as ambulances, prison vans and the like" +87033199,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity not exceeding 1,500 cc :Other" +87033200,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity exceeding cc but not exceeding cc +87033210,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 2,500 cc :Vehicles principally designed for the transport of more than seven persons, including the driver" +87033220,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 2,500 cc :Three-wheeled vehicles(OLD tariff)" +87033291,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 2,500 cc :Motor cars" +87033292,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 2,500 cc :Specialized transport vehicles such as ambulances, prison vans and the like" +87033299,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 1,500 cc but not exceeding 2,500 cc :Other" +87033300,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> of a cylinder capacity exceeding cc +87033310,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 2,500 cc :Vehicles principally designed for the transport of more than seven persons, including the driver" +87033320,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 2,500 cc :Three-wheeled vehicles" +87033391,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 2,500 cc :Motor cars" +87033392,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 2,500 cc :Specialised transport vehicles such as ambulances, prison vans and the like" +87033399,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Of a cylinder capacity exceeding 2,500 cc :Other" +87034000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> other vehicles with both internal combustion piston engine and electric motor as motors for propulsion other than those capable of being charged by plugging to external source of electric power +87034010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Vehicles principally designed for transport of more than seven persons, including driver" +87034020,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Specialised transport vehicles such as ambulances, prison vans and the like" +87034030,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Motor cars" +87034040,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Three-wheeled vehicles" +87034090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Other" +87035000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> other vehicles with both internal combustion piston engine diesel or semi diesel and electric motor as motors for propulsion other than those capable of being charged by plugging to external source of electric power +87035010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Vehicles principally designed for transport of more than seven persons, including driver" +87035020,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Specialised transport vehicles such as ambulances, prison vans and the like" +87035030,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Motor cars" +87035040,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Three-wheeled vehicles" +87035090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, other than those capable of being charged by plugging to external source of electric power:Other" +87036000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> other vehicles with both internal combustion piston engine and electric motor as motors for propulsion capable of being charged by plugging to external source of electric power +87036010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Vehicles principally designed for transport of more than seven persons, including driver" +87036020,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Specialised transport vehicles such as ambulances, prison vans and the like" +87036030,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Motor cars" +87036040,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Three-wheeled vehicles" +87036090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both spark-ignition internal combustion reciprocating piston engine and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:other" +87037000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> other vehicles with both internal combustion piston engine diesel or and electric motor as motors for propulsion capable of being charged by plugging to external source of electric power +87037010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Vehicles principally designed for transport of more than seven persons, including driver" +87037020,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Specialised transport vehicles such as ambulances, prison vans and the like" +87037030,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Motor cars" +87037040,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Three wheeled vehicle" +87037090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with both compression-ignition internal combustion piston engine (diesel or semi-diesel) and electric motor as motors for propulsion, capable of being charged by plugging to external source of electric power:Other" +87038000,cars and other motor vehicles motor principally designed for the transport of persons other than those of heading 8702 including station wagons and racing cars >> other vehicles with only electric motor for propulsion +87038010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with only electric motor for propulsion:Vehicles principally designed for transport of more than seven persons, including driver" +87038020,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with only electric motor for propulsion:Specialised transport vehicles such as ambulances, prison vans and the like" +87038030,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with only electric motor for propulsion:Motor cars" +87038040,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with only electric motor for propulsion:Three wheeled vehicle" +87038090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other vehicles, with only electric motor for propulsion:Other" +87039000,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other :Other" +87039010,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other :Electrically operated(OLD tariff)" +87039090,"MOTOR CARS AND OTHER MOTOR VEHICLES PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS (OTHER THAN THOSE OF HEADING 8702), INCLUDING STATION WAGONS AND RACING CARS:Other :Other(OLD tariff)" +87040000,motor vehicles for the transport of goods +87041000,motor vehicles for the transport of goods >> dumpers designed for use +87041010,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Dumpers designed for off-highway use :With net weight (excluding pay-load) exceeding 8 tonnes and maximum pay-load capacity not less than 10 tonnes +87041090,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Dumpers designed for off-highway use :Other(OLD tariff) +87042100,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :g.v.w. not exceeding 5 tonnes +87042110,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :Refrigerated(OLD tariff) +87042120,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :Three-wheeled motor vehicles(OLD tariff) +87042190,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :Other(OLD tariff) +87042200,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes but not exceeding 20 tonnes :g.v.w. exceeding 5 tonnes but not exceeding 20 tonne +87042211,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes but not exceeding 20 tonnes :Refrigerated(OLD tariff) +87042219,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes but not exceeding 20 tonnes :Other(OLD tariff) +87042290,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes but not exceeding 20 tonnes :Other(OLD tariff) +87042300,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 20 tonnes :g.v.w. exceeding 20 tonnes +87042311,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 20 tonnes :Refrigerated(OLD tariff) +87042319,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 20 tonnes :Other(OLD tariff) +87042390,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 20 tonnes :Other(OLD tariff) +87043100,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :g.v.w. not exceeding 5 tonnes +87043110,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :Refrigerated(OLD tariff) +87043120,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :Three-wheeled motor vehicles(OLD tariff) +87043190,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. not exceeding 5 tonnes :Other(OLD tariff) +87043200,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes:g.v.w. exceeding 5 tonnes +87043211,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes:Refrigerated(OLD tariff) +87043219,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes:Other(OLD tariff) +87043290,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:g.v.w. exceeding 5 tonnes:Other(OLD tariff) +87044100,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS::g.v.w. not exceeding 5 tonnes +87044200,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS::g.v.w. exceeding 5 tonnes but not exceeding 20 tonnes +87044300,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:gvw:g.v.w. exceeding 20 tonnes +87045100,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:GVS:g.v.w. not exceeding 5 tonnes +87045200,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:gvs:g.v.w. exceeding 5 tonnes +87046000,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS::Other with only electric motor for propulsion +87049000,motor vehicles for the transport of goods >> other lorries and trucks +87049011,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Other :Refrigerated +87049012,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Other :Electrically operated +87049019,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Other :Other +87049090,MOTOR VEHICLES FOR THE TRANSPORT OF GOODS:Other :Other +87050000,special purpose motor vehicles other than those principally designed for the transport of persons or goods for example lorries crane lorries fire fighting vehicles spraying lorries mobile workshops mobile radiological units +87051000,"SPECIAL PURPOSE MOTOR VEHICLES, OTHER THAN THOSE PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS OR GOODS (FOR EXAMPLE, BREAKDOWN LORRIES, CRANE LORRIES, FIRE FIGHTING VEHICLES, CONCRETE-MIXERS LORRIES, SPRAYING LORRIES, MOBILE WORKSHOPS, MOBILE RADIOLOGICAL UNITS)::Crane lorries" +87052000,"SPECIAL PURPOSE MOTOR VEHICLES, OTHER THAN THOSE PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS OR GOODS (FOR EXAMPLE, BREAKDOWN LORRIES, CRANE LORRIES, FIRE FIGHTING VEHICLES, CONCRETE-MIXERS LORRIES, SPRAYING LORRIES, MOBILE WORKSHOPS, MOBILE RADIOLOGICAL UNITS)::Mobile drilling derricks" +87053000,"SPECIAL PURPOSE MOTOR VEHICLES, OTHER THAN THOSE PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS OR GOODS (FOR EXAMPLE, BREAKDOWN LORRIES, CRANE LORRIES, FIRE FIGHTING VEHICLES, CONCRETE-MIXERS LORRIES, SPRAYING LORRIES, MOBILE WORKSHOPS, MOBILE RADIOLOGICAL UNITS)::Fire fighting vehicles" +87054000,"SPECIAL PURPOSE MOTOR VEHICLES, OTHER THAN THOSE PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS OR GOODS (FOR EXAMPLE, BREAKDOWN LORRIES, CRANE LORRIES, FIRE FIGHTING VEHICLES, CONCRETE-MIXERS LORRIES, SPRAYING LORRIES, MOBILE WORKSHOPS, MOBILE RADIOLOGICAL UNITS)::Concrete-mixer lorries" +87059000,"SPECIAL PURPOSE MOTOR VEHICLES, OTHER THAN THOSE PRINCIPALLY DESIGNED FOR THE TRANSPORT OF PERSONS OR GOODS (FOR EXAMPLE, BREAKDOWN LORRIES, CRANE LORRIES, FIRE FIGHTING VEHICLES, CONCRETE-MIXERS LORRIES, SPRAYING LORRIES, MOBILE WORKSHOPS, MOBILE RADIOLOGICAL UNITS)::Other" +87060000,chassis fitted with engines for the motor vehicles of headings 8701 to 8705 chassis fitted with engines for the motor vehicles of headings 8701 to 8705 for the tractors of heading 8701 +87060011,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:Of engine capacity not exceeding 1,800 cc" +87060019,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:Other" +87060021,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For transport of not more than thirteen persons, including the driver" +87060029,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:Other" +87060031,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For three-wheeled vehicles" +87060039,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:Other" +87060041,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For three-wheeled motor vehicle" +87060042,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For vehicles, other than petrol driven" +87060043,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For dumpers covered in the heading 8704" +87060049,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:Other" +87060050,"CHASSIS FITTED WITH ENGINES, FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Chassis fitted with engines, for the motor vehicles of headings 8701 to 8705:For the motor vehicles of heading 8705" +87070000,bodies including cabs for the motor vehicles of headings 8701 to 8705 +87071000,"BODIES (INCLUDING CABS), FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::For the vehicles of heading 8703" +87079000,"BODIES (INCLUDING CABS), FOR THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Other" +87080000,parts and accessories of the motorvehicles of headings 8701 to 8705 +87081000,parts and accessories of the motorvehicles of headings 8701 to 8705 >> bumpers and parts thereof +87081010,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Bumpers and parts thereof :For tractors +87081090,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:Bumpers and parts thereof :Other +87082100,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Safety seat belts +87082200,"PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705:front:Front windscreens (windshields), rear windows and other windows specified in Subheading Note 1 to this Chapter" +87082900,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Other +87083000,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Brakes and servo-brakes; parts thereof +87084000,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Gear boxes and parts thereof +87085000,"PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Drive-axles with differential, whether or not provided with other transmission components, non-driving axles; parts thereof" +87087000,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Road wheels and parts and accessories thereof +87088000,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Suspension systems and parts thereof (including shock absorbers) +87089100,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Radiators and parts thereof +87089200,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Silencers (mufflers) and exhaust pipes; parts thereof +87089300,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Clutches and parts thereof +87089400,"PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Steering wheels, steering columns and steering boxes; parts thereof" +87089500,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Safety airbags with inflater system; parts thereof +87089900,PARTS AND ACCESSORIES OF THE MOTOR VEHICLES OF HEADINGS 8701 TO 8705::Other +87090000,works trucks not fitted with lifting or handling equipment of the type used in factories warehouses dock areas or airports for short distance transport of goods tractors of the type used on railway station platforms parts of the foregoing vehicles vehicles fitted with an auxiliary motor with or without +87091100,"WORKS TRUCKS, SELF-PROPELLED, NOT FITTED WITH LIFTING OR HANDLING EQUIPMENT, OF THE TYPE USED IN FACTORIES, WAREHOUSES, DOCK AREAS OR AIRPORTS FOR SHORT DISTANCE TRANSPORT OF GOODS; TRACTORS OF THE TYPE USED ON RAILWAY STATION PLATFORMS; PARTS OF THE FOREGOING VEHICLES::Electrical(OLD tariff)" +87091900,"WORKS TRUCKS, SELF-PROPELLED, NOT FITTED WITH LIFTING OR HANDLING EQUIPMENT, OF THE TYPE USED IN FACTORIES, WAREHOUSES, DOCK AREAS OR AIRPORTS FOR SHORT DISTANCE TRANSPORT OF GOODS; TRACTORS OF THE TYPE USED ON RAILWAY STATION PLATFORMS; PARTS OF THE FOREGOING VEHICLES::Other(OLD tariff)" +87099000,"WORKS TRUCKS, SELF-PROPELLED, NOT FITTED WITH LIFTING OR HANDLING EQUIPMENT, OF THE TYPE USED IN FACTORIES, WAREHOUSES, DOCK AREAS OR AIRPORTS FOR SHORT DISTANCE TRANSPORT OF GOODS; TRACTORS OF THE TYPE USED ON RAILWAY STATION PLATFORMS; PARTS OF THE FOREGOING VEHICLES::Parts(OLD tariff)" +87100000,"::TANKS AND OTHER ARMOURED FIGHTING VEHICLES, MOTORISED, WHETHER OR NOT FITTED WITH WEAPONS, AND PARTS OF SUCH VEHICLES(OLD tariff)" +87110000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without +87111000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without >> with internal combustion piston engine of a cylinder capacity not exceeding 50 cc with internal combustion piston engine of a cylinder capacity not exceeding 50 cc +87111010,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity not exceeding 50 cc:Mopeds" +87111020,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity not exceeding 50 cc:Motorised cycles" +87111090,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity not exceeding 50 cc:Other" +87112000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without >> with internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc scooters with internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc scooters +87112011,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Of cylinder capacity not exceeding 75 cc" +87112019,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Other" +87112021,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Of cylinder capacity not exceeding 75 cc" +87112029,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Other" +87112031,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Of cylinder capacity not exceeding 75 cc" +87112039,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Other" +87112091,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Of cylinder capacity not exceeding 75 cc" +87112099,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 50 cc but not exceeding 250 cc :Other" +87113000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without >> with internal combustion piston engine of a cylinder capacity exceeding 250 cc but not exceeding 500 cc with internal combustion piston engine of a cylinder capacity exceeding 250 cc but not exceeding 500 cc +87113010,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 250 cc but not exceeding 500 cc :Scooters" +87113020,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 250 cc but not exceeding 500 cc :Motor-cycles" +87113090,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 250 cc but not exceeding 500 cc :Other" +87114000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without >> internal combustion piston with engine of a cylinder capacity exceeding500 cc but not exceeding 800 cc internal combustion piston with engine of a cylinder capacity exceeding500 cc but not exceeding 800 cc +87114010,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 500 cc but not exceeding 800 cc :Motor-cycles" +87114090,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With reciprocating internal combustion piston engine of a cylinder capacity exceeding 500 cc but not exceeding 800 cc :Other" +87115000,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;::With reciprocating internal combustion piston engine of a cylinder capacity exceeding 800 cc" +87116000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without >> with electric motor for propulsion with electric motor for propulsion +87116010,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With electric motor for propulsion:Motor cycles" +87116020,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With electric motor for propulsion:Scooters" +87116030,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With electric motor for propulsion:Mopeds" +87116090,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:With electric motor for propulsion:Others" +87119000,motorcycles including mopeds and cycles fitted with an auxiliary motor with or without >> other other +87119010,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:Other :Side-cars" +87119090,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:Other :Other" +87119091,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:Other :Electrically operated(OLD tariff)" +87119099,"MOTORCYCLES (INCLUDING MOPEDS) AND CYCLES FITTED WITH AN AUXILIARY MOTOR, WITH OR WITHOUT SIDE-CARS;:Other :Other(OLD tariff)" +87120000,bicycles and other cycles including delivery tricycles not motorised bicycles and other cycles including delivery tricycles not motorised +87120010,"BICYCLES AND OTHER CYCLES (INCLUDING DELIVERY TRICYCLES), NOT MOTORISED:Bicycles and other cycles (including delivery tricycles), not motorised :Bicycles" +87120090,"BICYCLES AND OTHER CYCLES (INCLUDING DELIVERY TRICYCLES), NOT MOTORISED:Bicycles and other cycles (including delivery tricycles), not motorised :Other" +87130000,motorised or otherwise propelled +87131000,motorised or otherwise propelled >> not mechanically propelled +87131010,:Not mechanically propelled :Wheel chairs for invalid +87131090,:Not mechanically propelled :Other +87139000,motorised or otherwise propelled >> other +87139010,:Other :Wheel chairs for invalid +87139090,:Other :Other +87140000,parts and accessories of vehicles of headings 8711 to 8713 +87141000,parts and accessories of vehicles of headings 8711 to 8713 >> of motorcycles including mopeds +87141010,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Of motorcycles (including mopeds):Saddles +87141090,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Of motorcycles (including mopeds):Other +87142000,parts and accessories of vehicles of headings 8711 to 8713 >> of carriages for disabled persons +87142010,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Of carriages for disabled persons :Mechanically propelled +87142020,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Of carriages for disabled persons :Non-mechanically propelled +87142090,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Of carriages for disabled persons :Other +87149100,"PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713::Frames and forks, and parts thereof" +87149200,parts and accessories of vehicles of headings 8711 to 8713 >> wheel rims and spokes +87149210,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Wheel rims and spokes :Bicycle rims +87149220,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Wheel rims and spokes :Bicycle spokes +87149290,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Wheel rims and spokes :Other +87149300,parts and accessories of vehicles of headings 8711 to 8713 >> hubs other than coaster braking hubs and hub brakes and +87149310,"PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Hubs, other than coaster braking hubs and hub brakes, and free-wheel sprocket-wheels :Bicycle hubs" +87149320,"PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Hubs, other than coaster braking hubs and hub brakes, and free-wheel sprocket-wheels :Bicycle free-wheels" +87149390,"PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Hubs, other than coaster braking hubs and hub brakes, and free-wheel sprocket-wheels :Other" +87149400,"PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713::Brakes, including coaster braking hubs and hub brakes, and parts thereof" +87149500,parts and accessories of vehicles of headings 8711 to 8713 >> saddles +87149510,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Saddles :Bicycle saddles +87149590,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Saddles :Other +87149600,"PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713::Pedals and crank-gear, and parts thereof" +87149900,parts and accessories of vehicles of headings 8711 to 8713 >> other +87149910,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Other :Bicycle chains +87149920,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Other :Bicycle wheels +87149990,PARTS AND ACCESSORIES OF VEHICLES OF HEADINGS 8711 TO 8713:Other :Other +87150000,baby carriages and parts thereof baby carriages and parts thereof +87150010,BABY CARRIAGES AND PARTS THEREOF:Baby carriages and parts thereof :Baby carriages +87150020,BABY CARRIAGES AND PARTS THEREOF:Baby carriages and parts thereof :Parts +87160000,trailers and other vehicles not mechanically propelled parts thereof +87161000,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF::Trailers and semi-trailers of the caravan type, for housing or camping" +87162000,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF::Self-loading or self-unloading trailers and semi-trailers for agricultural purposes" +87163100,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF::Tanker trailers and tanker semi-trailers" +87163900,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF::Other" +87164000,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF::Other trailers and semi-trailers" +87168000,trailers and other vehicles not mechanically propelled parts thereof >> other vehicles +87168010,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF:Other vehicles :Hand propelled vehicles (e.g. hand carts, rickshaws and the like)" +87168020,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF:Other vehicles :Animal drawn vehicles" +87168090,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF:Other vehicles :Other" +87169000,trailers and other vehicles not mechanically propelled parts thereof >> parts +87169010,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF:Parts :Parts and accessories of trailers" +87169090,"TRAILERS AND SEMI-TRAILERS; OTHER VEHICLES, NOT MECHANICALLY PROPELLED; PARTS THEREOF:Parts :Other" +88010000,balloons and dirigibles gliders hand gliders and other aircraft balloons and dirigibles gliders hang gliders and other aircraft +88010010,"BALLOONS AND DIRIGIBLES, GLIDERS, HAND GLIDERS AND OTHER NON-POWERED AIRCRAFT:Balloons and dirigibles; gliders, hang gliders and other non-powered aircraft:Gliders and hang gliders" +88010020,"BALLOONS AND DIRIGIBLES, GLIDERS, HAND GLIDERS AND OTHER NON-POWERED AIRCRAFT:Balloons and dirigibles; gliders, hang gliders and other non-powered aircraft:Balloons" +88010090,"BALLOONS AND DIRIGIBLES, GLIDERS, HAND GLIDERS AND OTHER NON-POWERED AIRCRAFT:Balloons and dirigibles; gliders, hang gliders and other non-powered aircraft:Other" +88020000,other aircraft for example helicopters aeroplanes except unmanned aircraft of heading helicopters +88021100,"OTHER AIRCRAFT (FOR EXAMPLE, HELICOPTERS, AEROPLANES); SPACECRAFT (INCLUDING SATELLITES) AND SUBORBITAL AND SPACECRAFT LAUNCH VEHICLES::Of an unladen weight not exceeding 2,000 kg." +88021200,"OTHER AIRCRAFT (FOR EXAMPLE, HELICOPTERS, AEROPLANES); SPACECRAFT (INCLUDING SATELLITES) AND SUBORBITAL AND SPACECRAFT LAUNCH VEHICLES::Of an unladen weight exceeding 2,000 kg." +88022000,"OTHER AIRCRAFT (FOR EXAMPLE, HELICOPTERS, AEROPLANES); SPACECRAFT (INCLUDING SATELLITES) AND SUBORBITAL AND SPACECRAFT LAUNCH VEHICLES::Aeroplanes and other aircraft, of an unladen weight not exceeding 2,000 kg." +88023000,"OTHER AIRCRAFT (FOR EXAMPLE, HELICOPTERS, AEROPLANES); SPACECRAFT (INCLUDING SATELLITES) AND SUBORBITAL AND SPACECRAFT LAUNCH VEHICLES::Aeroplanes and other aircraft, of an unladen weight exceeding 2,000 kg. but not exceeding 15,000 kg." +88024000,"OTHER AIRCRAFT (FOR EXAMPLE, HELICOPTERS, AEROPLANES); SPACECRAFT (INCLUDING SATELLITES) AND SUBORBITAL AND SPACECRAFT LAUNCH VEHICLES::Aeroplanes and other aircraft, of an unladen weight exceeding 15,000 kg." +88026000,"OTHER AIRCRAFT (FOR EXAMPLE, HELICOPTERS, AEROPLANES); SPACECRAFT (INCLUDING SATELLITES) AND SUBORBITAL AND SPACECRAFT LAUNCH VEHICLES::Spacecraft (including satellites) and suborbital and spacecraft launch vehicles" +88031000,PARTS OF GOODS OF HEADING 8801 OR 8802::Propellers and rotors and parts thereof(OLD tariff) +88032000,PARTS OF GOODS OF HEADING 8801 OR 8802::Under-carriages and parts thereof(OLD tariff) +88033000,PARTS OF GOODS OF HEADING 8801 OR 8802::Other parts of aeroplanes or helicopters(OLD tariff) +88039000,PARTS OF GOODS OF HEADING 8801 OR 8802::Other(OLD tariff) +88040000,parachutes including dirigible parachutes and paragliders and rotochutes parts thereof and accessories thereto parachutes including dirigible parachutes and paragliders and rotochutes parts thereof and accessories thereto +88040010,PARACHUTES (INCLUDING DIRIGIBLE PARACHUTES AND PARAGLIDERS) AND ROTOCHUTES; PARTS THEREOF AND ACCESSORIES THERETO:Parachutes (including dirigible parachutes and paragliders) and rotochutes; parts thereof and accessories thereto :Parachutes (including dirigible parachutes and paragliders) and parts and accessories thereof +88040020,PARACHUTES (INCLUDING DIRIGIBLE PARACHUTES AND PARAGLIDERS) AND ROTOCHUTES; PARTS THEREOF AND ACCESSORIES THERETO:Parachutes (including dirigible parachutes and paragliders) and rotochutes; parts thereof and accessories thereto :Rotochutes; parts and accessories thereof +88050000,aircraft launching gear or similar gear ground flying trainers parts of the foregoing articles +88051000,aircraft launching gear or similar gear ground flying trainers parts of the foregoing articles >> aircraft launching gear and parts thereof deck arrestor or similar gear and parts thereof +88051010,AIRCRAFT LAUNCHING GEAR; DECK-ARRESTOR OR SIMILAR GEAR; GROUND FLYING TRAINERS; PARTS OF THE FOREGOING ARTICLES:Aircraft launching gear and parts thereof; deck arrestor or similar gear and parts thereof :Aircraft launching gear +88051020,AIRCRAFT LAUNCHING GEAR; DECK-ARRESTOR OR SIMILAR GEAR; GROUND FLYING TRAINERS; PARTS OF THE FOREGOING ARTICLES:Aircraft launching gear and parts thereof; deck arrestor or similar gear and parts thereof :Deck arrestor or similar gear +88051030,AIRCRAFT LAUNCHING GEAR; DECK-ARRESTOR OR SIMILAR GEAR; GROUND FLYING TRAINERS; PARTS OF THE FOREGOING ARTICLES:Aircraft launching gear and parts thereof; deck arrestor or similar gear and parts thereof :Part of aircraft launching gear and deck- arrestor or similar gear +88052100,AIRCRAFT LAUNCHING GEAR; DECK-ARRESTOR OR SIMILAR GEAR; GROUND FLYING TRAINERS; PARTS OF THE FOREGOING ARTICLES::Air combat simulators and parts thereof +88052900,AIRCRAFT LAUNCHING GEAR; DECK-ARRESTOR OR SIMILAR GEAR; GROUND FLYING TRAINERS; PARTS OF THE FOREGOING ARTICLES::Other +88060000,unmanned aircraft +88061000,UNMANNED AIRCRAFT::Designed for the carriage of passengers +88062100,UNMANNED AIRCRAFT::With maximum take-off weight not more than 250 g +88062200,UNMANNED AIRCRAFT:with:With maximum take-off weight more than 250 g but not more than 7 kg +88062300,UNMANNED AIRCRAFT::With maximum take-off weight more than 7 kg but not more than 25 kg +88062400,UNMANNED AIRCRAFT:with:With maximum take-off weight more than 25 kg but not more than 150 kg +88062900,UNMANNED AIRCRAFT:oth:Other +88069100,UNMANNED AIRCRAFT::With maximum take-off weight not more than 250 g +88069200,UNMANNED AIRCRAFT::With maximum take-off weight more than 250 g but not more than 7 kg +88069300,UNMANNED AIRCRAFT:with:With maximum take-off weight more than 7 kg but not more than 25 kg +88069400,UNMANNED AIRCRAFT:with:With maximum take-off weight more than 25 kg but not more than 150 kg +88069900,UNMANNED AIRCRAFT::Other +88070000,parts of goods of heading 8801 8802 or 8806 +88071000,"PARTS OF GOODS OF HEADING 8801, 8802 OR 8806::Propellers and rotors and parts thereof" +88072000,"PARTS OF GOODS OF HEADING 8801, 8802 OR 8806:under:Under-carriages and parts thereof" +88073000,"PARTS OF GOODS OF HEADING 8801, 8802 OR 8806::Other parts of airplanes, helicopters or unmanned aircraft" +88079000,"PARTS OF GOODS OF HEADING 8801, 8802 OR 8806::Other(OLD tariff)" +89010000,cruise ships excursion boats cargo ships barges and similar vessels for the transport of persons or goods +89011000,cruise ships excursion boats cargo ships barges and similar vessels for the transport of persons or goods >> cruise ships excursion boats and similar vessels principally designed for the transport of persons of all kinds +89011010,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS:Cruise ships, excursion boats and similar vessels principally designed for the transport of persons; ferryboats of all kinds :Ships" +89011020,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS:Cruise ships, excursion boats and similar vessels principally designed for the transport of persons; ferryboats of all kinds :Launches" +89011030,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS:Cruise ships, excursion boats and similar vessels principally designed for the transport of persons; ferryboats of all kinds :Boats" +89011040,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS:Cruise ships, excursion boats and similar vessels principally designed for the transport of persons; ferryboats of all kinds :Barges" +89011090,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS:Cruise ships, excursion boats and similar vessels principally designed for the transport of persons; ferryboats of all kinds :Other" +89012000,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS::Tankers" +89013000,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS::Refrigerated vessels, other than those of Sub-heading 8901 20" +89019000,"CRUISE SHIPS, EXCURSION BOATS , FERRY-BOATS, CARGO SHIPS, BARGES AND SIMILAR VESSELS FOR THE TRANSPORT OF PERSONS OR GOODS::Other vessels for transport of the goods and other vessels for the transport of both persons and goods" +89020000,fishing vessels factory ships and other vessels for processing or preserving fishery products fishing vessels factory ships and other vessels for processing or preserving fishery products +89020010,FISHING VESSELS; FACTORY SHIPS AND OTHER VESSELS FOR PROCESSING OR PRESERVING FISHERY PRODUCTS:Fishing vessels; factory ships and other vessels for processing or preserving fishery products :Trawlers and other fishing vessels +89020090,FISHING VESSELS; FACTORY SHIPS AND OTHER VESSELS FOR PROCESSING OR PRESERVING FISHERY PRODUCTS:Fishing vessels; factory ships and other vessels for processing or preserving fishery products :Other +89030000,yachts and other vessels for pleasure or sports rowing boats and canoes inflatable including rigid hull inflatable boats +89031000,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Inflatable(OLD tariff) +89031100,"YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Fitted or designed to be fitted with a motor, unladen (net) weight (excluding the motor) not exceeding 100 kg" +89031200,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:not:Not designed for use with a motor and unladen (net) weight not exceeding 100 kg +89031900,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:other:Other +89032100,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:of:Of a length not exceeding 7.5 m +89032200,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:of:Of a length exceeding 7.5 m but not exceeding 24 m +89032300,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:of:Of a length exceeding 24 m +89033100,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Of a length not exceeding 7.5 m +89033200,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Of a length exceeding 7.5 m but not exceeding 24 m +89033300,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:of:Of a length exceeding 24 m +89039100,"YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Sail boats, with or without auxiliary motor(OLD tariff)" +89039200,"YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Motorboats, other than outboard motorboats(OLD tariff)" +89039300,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES::Of a length not exceeding 7.5 m +89039900,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:Other :Other +89039910,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:Other :Canoes(OLD tariff) +89039990,YACHTS AND OTHER VESSELS FOR PLEASURE OR SPORTS; ROWING BOATS AND CANOES:Other :Other(OLD tariff) +89040000,::TUGS AND PUSHER CRAFT +89050000,dredgers floating cranes and other vessels the navigability of which is subsidiary to their main function floating docks floating or submersible drilling or production platforms dredgers floating cranes and other vessels the navigability of which is subsidiary to their main function floating docks floating or submersible drilling or production platforms +89051000,"LIGHT-VESSELS, FIRE-FLOATS, DREDGERS, FLOATING CRANES, AND OTHER VESSELS THE NAVIGABILITY OF WHICH IS SUBSIDIARY TO THEIR::Dredgers" +89052000,"LIGHT-VESSELS, FIRE-FLOATS, DREDGERS, FLOATING CRANES, AND OTHER VESSELS THE NAVIGABILITY OF WHICH IS SUBSIDIARY TO THEIR::Floating or submersible drilling or production platforms" +89059000,dredgers floating cranes and other vessels the navigability of which is subsidiary to their main function floating docks floating or submersible drilling or production platforms dredgers floating cranes and other vessels the navigability of which is subsidiary to their main function floating docks floating or submersible drilling or production platforms >> other other +89059010,"LIGHT-VESSELS, FIRE-FLOATS, DREDGERS, FLOATING CRANES, AND OTHER VESSELS THE NAVIGABILITY OF WHICH IS SUBSIDIARY TO THEIR:Other :Floating docks" +89059090,"LIGHT-VESSELS, FIRE-FLOATS, DREDGERS, FLOATING CRANES, AND OTHER VESSELS THE NAVIGABILITY OF WHICH IS SUBSIDIARY TO THEIR:Other :Other" +89060000,other vessels including warships and lifeboats other than rowing boats other vessels including warships and lifeboats other than rowing boats +89061000,"OTHER VESSELS, INCLUDING WARSHIPS AND LIFEBOATS OTHER THAN ROWING BOATS::Warships" +89069000,"OTHER VESSELS, INCLUDING WARSHIPS AND LIFEBOATS OTHER THAN ROWING BOATS::Other" +89070000,other floating structures for example rafts tanks buoys and beacons other floating structures for example rafts tanks buoys and beacons +89071000,"OTHER FLOATING STRUCTURES ( FOR EXAMPLE, RAFTS, TANKS, COFFER-DAMS, LANDING-STAGES, BUOYS AND BEACONS)::Inflatable rafts" +89079000,"OTHER FLOATING STRUCTURES ( FOR EXAMPLE, RAFTS, TANKS, COFFER-DAMS, LANDING-STAGES, BUOYS AND BEACONS)::Other" +89080000,::VESSELS AND OTHER FLOATING STRUCTURES FOR BREAKING UP +90010000,optical fibres and optical fibre bundles optical fibre cables other than those of heading 8544 sheets and plates of polarising material lenses including contact lenses prisms mirrors and other optical elements of any material unmounted other than such elements of glass not optically worked +90011000,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Optical fibres, optical fibres bundles and cables" +90012000,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Sheets and plates of polarising material" +90013000,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Contact lenses" +90014000,optical fibres and optical fibre bundles optical fibre cables other than those of heading 8544 sheets and plates of polarising material lenses including contact lenses prisms mirrors and other optical elements of any material unmounted other than such elements of glass not optically worked >> spectacle lenses of glass +90014010,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED:Spectacle lenses of glass :Polarised glass" +90014090,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED:Spectacle lenses of glass :Other" +90015000,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Spectacle lenses of other materials" +90019000,optical fibres and optical fibre bundles optical fibre cables other than those of heading 8544 sheets and plates of polarising material lenses including contact lenses prisms mirrors and other optical elements of any material unmounted other than such elements of glass not optically worked >> other +90019010,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED:Other :Optical calcite crystal" +90019090,"OPTICAL FIBRES AND OPTICAL FIBRE BUNDLES; OPTICAL FIBRE CABLES OTHER THAN THOSE OF HEADING 8544; SHEETS AND PLATES OF POLARISING MATERIAL; LENSES (INCLUDING CONTACT LENSES), PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, UNMOUNTED, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED:Other :Other" +90021100,"LENSES, PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, MOUNTED, BEING PARTS OF OR FITTINGS FOR INSTRUMENTS OR APPARATUS, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::For cameras, projectors or photographic enlargers or reducers(OLD tariff)" +90021900,"LENSES, PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, MOUNTED, BEING PARTS OF OR FITTINGS FOR INSTRUMENTS OR APPARATUS, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Other(OLD tariff)" +90022000,"LENSES, PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, MOUNTED, BEING PARTS OF OR FITTINGS FOR INSTRUMENTS OR APPARATUS, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Filters(OLD tariff)" +90029000,"LENSES, PRISMS, MIRRORS AND OTHER OPTICAL ELEMENTS, OF ANY MATERIAL, MOUNTED, BEING PARTS OF OR FITTINGS FOR INSTRUMENTS OR APPARATUS, OTHER THAN SUCH ELEMENTS OF GLASS NOT OPTICALLY WORKED::Other(OLD tariff)" +90031100,"FRAMES AND MOUNTINGS FOR SPECTACLES, GOGGLES OR THE LIKE, AND PARTS THEREOF::Of plastics(OLD tariff)" +90031900,"FRAMES AND MOUNTINGS FOR SPECTACLES, GOGGLES OR THE LIKE, AND PARTS THEREOF::Of other materials(OLD tariff)" +90039000,"FRAMES AND MOUNTINGS FOR SPECTACLES, GOGGLES OR THE LIKE, AND PARTS THEREOF::Parts(OLD tariff)" +90041000,"SPECTACLES, GOGGLES AND THE LIKE, CORRECTIVE, PROTECTIVE OR OTHER::Sunglasses(OLD tariff)" +90049010,"SPECTACLES, GOGGLES AND THE LIKE, CORRECTIVE, PROTECTIVE OR OTHER:Other :Passive night vision goggles(OLD tariff)" +90049020,"SPECTACLES, GOGGLES AND THE LIKE, CORRECTIVE, PROTECTIVE OR OTHER:Other :Prismatic eyeglasses for reading(OLD tariff)" +90049090,"SPECTACLES, GOGGLES AND THE LIKE, CORRECTIVE, PROTECTIVE OR OTHER:Other :Other(OLD tariff)" +90050000,binocu lars monoculars other op tical telescopes and mountings therefor other astronomical instruments and mountings including instruments for therefor but not +90051000,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY::Binoculars" +90058000,binocu lars monoculars other op tical telescopes and mountings therefor other astronomical instruments and mountings including instruments for therefor but not >> other instruments +90058010,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY:Other instruments :Monocular and refracting telescopes" +90058020,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY:Other instruments :Astronomical instruments" +90058090,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY:Other instruments :Other" +90059000,binocu lars monoculars other op tical telescopes and mountings therefor other astronomical instruments and mountings including instruments for therefor but not >> parts and accessories including mountings +90059010,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY:Parts and accessories (including mountings):Of binocular or telescopes of heading 9005, other than mountings" +90059020,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY:Parts and accessories (including mountings):Mountings for astronomical instruments" +90059090,"BINOCULARS, MONOCULARS, OTHER OPTICAL TELESCOPES, AND MOUNTINGS THEREFOR; OTHER ASTRONOMICAL INSTRUMENTS AND MOUNTINGS THEREFOR, BUT NOT INCLUDING INSTRUMENTS FOR RADIO-ASTRONOMY:Parts and accessories (including mountings):Other" +90060000,photographic other than cinematographic cameras photographic flashlight apparatus and flash bulbs other than discharge lamps of heading 8539 +90061000,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Cameras of a kind used for preparing printing plates or cylinders(OLD tariff) +90063000,"PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Cameras specially designed for underwater use, for aerial survey or for medical or surgical examination of internal organs; comparison cameras for forensic or criminological purposes" +90064000,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Instant print cameras +90065100,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::With a through-the-lens view-finder [single lens reflex (SLR)] for roll film of a width not exceeding 35 mm(OLD tariff) +90065200,"PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Other, for roll film of a width less than 35 mm(OLD tariff)" +90065300,photographic other than cinematographic cameras photographic flashlight apparatus and flash bulbs other than discharge lamps of heading 8539 >> for roll film of a width of 35 mm +90065310,"PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539:Other, for roll film of a width of 35 mm:Fixed focus 35 mm cameras" +90065390,"PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539:Other, for roll film of a width of 35 mm:Other" +90065900,photographic other than cinematographic cameras photographic flashlight apparatus and flash bulbs other than discharge lamps of heading 8539 >> other +90065910,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539:Other :Fixed focus 110 mm cameras +90065990,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539:Other :Other +90066100,"PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Discharge lamp (""electronic"") flashlight apparatus" +90066900,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Other +90069100,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::For cameras +90069900,PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) CAMERAS; PHOTOGRAPHIC FLASHLIGHT APPARATUS AND FLASH BULBS OTHER THAN DISCHARGE LAMPS OF HEADING 8539::Other +90070000,cinematographic cameras and projectors whether or not incorporating sound recording or reproducing apparatus +90071000,cinematographic cameras and projectors whether or not incorporating sound recording or reproducing apparatus >> cameras +90071010,"CINEMATOGRAPHIC CAMERAS AND PROJECTORS, WHETHER OR NOT INCORPORATING SOUND RECORDING OR REPRODUCING APPARATUS:Cameras:For film of less than 16mm width or for double - 8 mm film" +90071090,"CINEMATOGRAPHIC CAMERAS AND PROJECTORS, WHETHER OR NOT INCORPORATING SOUND RECORDING OR REPRODUCING APPARATUS:Cameras:Other" +90072000,cinematographic cameras and projectors whether or not incorporating sound recording or reproducing apparatus >> projectors +90072010,"CINEMATOGRAPHIC CAMERAS AND PROJECTORS, WHETHER OR NOT INCORPORATING SOUND RECORDING OR REPRODUCING APPARATUS:Projectors :For film of less than 16 mm width" +90072090,"CINEMATOGRAPHIC CAMERAS AND PROJECTORS, WHETHER OR NOT INCORPORATING SOUND RECORDING OR REPRODUCING APPARATUS:Projectors :Other" +90079100,"CINEMATOGRAPHIC CAMERAS AND PROJECTORS, WHETHER OR NOT INCORPORATING SOUND RECORDING OR REPRODUCING APPARATUS::For cameras" +90079200,"CINEMATOGRAPHIC CAMERAS AND PROJECTORS, WHETHER OR NOT INCORPORATING SOUND RECORDING OR REPRODUCING APPARATUS::For projectors" +90080000,than image projectors other cinematographic photographic other than cinematographic enlargers and reducers +90084000,"IMAGE PROJECTORS, OTHER THAN CINEMATOGRAPHIC; PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) ENLARGERS::Photographic (other than Cinematographic) enlargers and reducers(OLD tariff)" +90085000,than image projectors other cinematographic photographic other than cinematographic enlargers and reducers >> projectors enlargers and reducers +90085010,"IMAGE PROJECTORS, OTHER THAN CINEMATOGRAPHIC; PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) ENLARGERS:Projectors, enlargers and reducers:Side Projectors" +90085020,"IMAGE PROJECTORS, OTHER THAN CINEMATOGRAPHIC; PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) ENLARGERS:Projectors, enlargers and reducers:Microfilm, microfiche or other microform readers, whether or not capable of producing copies" +90085030,"IMAGE PROJECTORS, OTHER THAN CINEMATOGRAPHIC; PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) ENLARGERS:Projectors, enlargers and reducers:Other image projectors" +90085040,"IMAGE PROJECTORS, OTHER THAN CINEMATOGRAPHIC; PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) ENLARGERS:Projectors, enlargers and reducers:Photographic (other than cinematographic) enlargers and reducers" +90089000,"IMAGE PROJECTORS, OTHER THAN CINEMATOGRAPHIC; PHOTOGRAPHIC (OTHER THAN CINEMATOGRAPHIC) ENLARGERS::Parts and accessories" +90100000,apparatus and equipment for photographic including cinematographic laboratories not specified or included elsewhere in this chapter negatoscopes projection screens +90101000,APPARATUS AND EQUIPMENT FOR PHOTOGRAPHIC (INCLUDING CINEMATOGRAPHIC) LABORATORIES NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Apparatus and equipment for automatically developing photographic (including cinematographic) film or paper in rolls or for automatically exposing developed film to rolls of photographic paper +90105000,APPARATUS AND EQUIPMENT FOR PHOTOGRAPHIC (INCLUDING CINEMATOGRAPHIC) LABORATORIES NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Other apparatus and equipment for photographic (including cinematographic) laboratories; negatoscopes +90106000,APPARATUS AND EQUIPMENT FOR PHOTOGRAPHIC (INCLUDING CINEMATOGRAPHIC) LABORATORIES NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Projection screens +90109000,APPARATUS AND EQUIPMENT FOR PHOTOGRAPHIC (INCLUDING CINEMATOGRAPHIC) LABORATORIES NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Parts and accessories +90110000,compound optical microscopes includingthose for cinephotomicrography or microprojection +90111000,"COMPOUND OPTICAL MICROSCOPES, INCLUDING THOSE FOR PHOTOMICRO-GRAPHY, CINEPHOTOMICROGRAPHY OR MICROPROJECTION::Stereoscopic microscopes" +90112000,"COMPOUND OPTICAL MICROSCOPES, INCLUDING THOSE FOR PHOTOMICRO-GRAPHY, CINEPHOTOMICROGRAPHY OR MICROPROJECTION::Other microscopes, for photomicrography, cinephotomicrography or microprojection" +90118000,"COMPOUND OPTICAL MICROSCOPES, INCLUDING THOSE FOR PHOTOMICRO-GRAPHY, CINEPHOTOMICROGRAPHY OR MICROPROJECTION::Other microscopes" +90119000,"COMPOUND OPTICAL MICROSCOPES, INCLUDING THOSE FOR PHOTOMICRO-GRAPHY, CINEPHOTOMICROGRAPHY OR MICROPROJECTION::Parts and accessories" +90120000,microscopes other than optical microscopes diffraction apparatus +90121000,microscopes other than optical microscopes diffraction apparatus >> microscopes other than optical microscopes diffraction apparatus +90121010,MICROSCOPES OTHER THAN OPTICAL MICROSCOPES; DIFFRACTION APPARATUS:Microscopes other than optical microscopes; diffraction apparatus:Electron microscopes fitted with equipment specifically designed for the handling and transport of semiconductor wafers or reticles +90121090,MICROSCOPES OTHER THAN OPTICAL MICROSCOPES; DIFFRACTION APPARATUS:Microscopes other than optical microscopes; diffraction apparatus:Other +90129000,MICROSCOPES OTHER THAN OPTICAL MICROSCOPES; DIFFRACTION APPARATUS::Parts and accessories +90130000,lasers other than laser diodes other optical appliances and instruments not specified or included elsewhere in this chapter +90131000,lasers other than laser diodes other optical appliances and instruments not specified or included elsewhere in this chapter >> telescopic sights for fitting to arms periscopes telescopes designed to form parts of machines appliances instruments or apparatus of this chapter or section xvi telescopic sights for fitting to arms periscopes telescopes designed to form parts of machines appliances instruments or apparatus of this chapter or section xvi +90131010,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Telescopic sights for fitting to arms; periscopes; telescopes designed to form parts of machines, appliances, instruments or apparatus of this Chapter or Section XVI :Telescopic sights for fitting to arms" +90131020,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Telescopic sights for fitting to arms; periscopes; telescopes designed to form parts of machines, appliances, instruments or apparatus of this Chapter or Section XVI :Periscopes" +90131090,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Telescopic sights for fitting to arms; periscopes; telescopes designed to form parts of machines, appliances, instruments or apparatus of this Chapter or Section XVI :Other" +90132000,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER::Lasers, other than laser diodes" +90138000,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Other devices, appliances and instruments :Other devices, appliances and instrument" +90138010,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Other devices, appliances and instruments :Liquid crystal devices (LCD)(OLD tariff)" +90138090,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Other devices, appliances and instruments :Other(OLD tariff)" +90139000,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Parts and accessories:Parts and accessories" +90139010,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Parts and accessories:For liquid crystal devices (LCD)(OLD tariff)" +90139090,"LIQUID CRYSTAL DEVICES NOT CONSTITUTING ARTICLES PROVIDED FOR MORE SPECIFICALLY IN OTHER HEADINGS; LASERS, OTHER THAN LASER DIODES; OTHER OPTICAL APPLIANCES AND INSTRUMENTS, NOT SPECIFIED OR INCLUDED ELSE WHERE IN THIS CHAPTER:Parts and accessories:Other(OLD tariff)" +90140000,direction finding compasses other navigational instruments and appliances +90141000,DIRECTION FINDING COMPASSES; OTHER NAVIGATIONAL INSTRUMENTS AND APPLIANCES::Direction finding compasses +90142000,DIRECTION FINDING COMPASSES; OTHER NAVIGATIONAL INSTRUMENTS AND APPLIANCES::Instruments and appliances for aeronautical or space navigation (other than compasses) +90148000,direction finding compasses other navigational instruments and appliances >> other instruments and appliances +90148010,DIRECTION FINDING COMPASSES; OTHER NAVIGATIONAL INSTRUMENTS AND APPLIANCES:Other instruments and appliances :Echo sounding instrument +90148020,DIRECTION FINDING COMPASSES; OTHER NAVIGATIONAL INSTRUMENTS AND APPLIANCES:Other instruments and appliances :Sextants +90148090,DIRECTION FINDING COMPASSES; OTHER NAVIGATIONAL INSTRUMENTS AND APPLIANCES:Other instruments and appliances :Other +90149000,DIRECTION FINDING COMPASSES; OTHER NAVIGATIONAL INSTRUMENTS AND APPLIANCES::Parts and accessories +90150000,photogrammetrical surveying including surveying hydrographic oceanographic hydrological meteorological or geophysical instruments and appliances excluding compasses rangefinders +90151000,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS::Rangefinders" +90152000,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS::Theodolities and tachymeters (tacheometers)" +90153000,photogrammetrical surveying including surveying hydrographic oceanographic hydrological meteorological or geophysical instruments and appliances excluding compasses rangefinders >> levels +90153010,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS:Levels :Dumpy levels or engineer's levels or builder's levels (not automatic) and quick set levels with or without horizontal circles" +90153090,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS:Levels :Other" +90154000,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS::Photogrammetrical surveying instruments and appliances" +90158000,photogrammetrical surveying including surveying hydrographic oceanographic hydrological meteorological or geophysical instruments and appliances excluding compasses rangefinders >> other instruments and appliances +90158010,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS:Other instruments and appliances:Hydrographic instruments" +90158020,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS:Other instruments and appliances:Meteorological instruments" +90158030,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS:Other instruments and appliances:Geophysical instruments" +90158090,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS:Other instruments and appliances:Other" +90159000,"SURVEYING (INCLUDING PHOTOGRAMMETRICAL SURVEYING), HYDROGRAPHIC, OCEANOGRAPHIC, HYDROLOGICAL, METEOROLOGICAL OR GEOPHYSICAL INSTRUMENTS AND APPLIANCES, EXCLUDING COMPASSES; RANGEFINDERS::Parts and accessories" +90160000,balances of a sensitivity of 5 cg or better with or without weights balances of a sensitivity of 5cg or better with or without weights +90160010,"BALANCES OF A SENSITIVITY OF 5 cg OR BETTER, WITH OR WITHOUT WEIGHTS:Balances of a sensitivity of 5cg or better, with or without weights :Electric balances" +90160020,"BALANCES OF A SENSITIVITY OF 5 cg OR BETTER, WITH OR WITHOUT WEIGHTS:Balances of a sensitivity of 5cg or better, with or without weights :Other balances" +90160090,"BALANCES OF A SENSITIVITY OF 5 cg OR BETTER, WITH OR WITHOUT WEIGHTS:Balances of a sensitivity of 5cg or better, with or without weights :Parts" +90170000,drawing or mathematical calculating instruments for example drafting machines pantographs protractors drawing sets slide rules disc calculators instruments for measuring length for use in the hand for example measuring rods and tapes micrometers callipers not specified or included elsewhere in this chapter +90171000,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Drafting tables and machines, whether or not automatic" +90172000,drawing or mathematical calculating instruments for example drafting machines pantographs protractors drawing sets slide rules disc calculators instruments for measuring length for use in the hand for example measuring rods and tapes micrometers callipers not specified or included elsewhere in this chapter >> other drawing or mathematical calculating instruments +90172010,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other drawing, marking-out or mathematical calculating instruments :Drawing and marking-out instruments" +90172020,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other drawing, marking-out or mathematical calculating instruments :Mathematical calculating instruments" +90172030,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other drawing, marking-out or mathematical calculating instruments :Pantograph" +90172090,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other drawing, marking-out or mathematical calculating instruments :Other" +90173000,drawing or mathematical calculating instruments for example drafting machines pantographs protractors drawing sets slide rules disc calculators instruments for measuring length for use in the hand for example measuring rods and tapes micrometers callipers not specified or included elsewhere in this chapter >> micrometers calipers and gauges +90173010,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Micrometers, calipers and gauges :Micro-meters and calipers" +90173021,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Micrometers, calipers and gauges :Plug" +90173022,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Micrometers, calipers and gauges :Ring" +90173023,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Micrometers, calipers and gauges :Slip" +90173029,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Micrometers, calipers and gauges :Other" +90178000,drawing or mathematical calculating instruments for example drafting machines pantographs protractors drawing sets slide rules disc calculators instruments for measuring length for use in the hand for example measuring rods and tapes micrometers callipers not specified or included elsewhere in this chapter >> other instruments +90178010,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other instruments :Measuring rods and tapes and divided scales" +90178090,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER:Other instruments :Other" +90179000,"DRAWING, MARKING-OUT OR MATHEMATICAL CALCULATING INSTRUMENTS (FOR EXAMPLE, DRAFTING MACHINES, PANTOGRAPHS, PROTRACTORS, DRAWING SETS, SLIDE RULES, DISC CALCULATORS); INSTRUMENTS FOR MEASURING LENGTH, FOR USE IN THE HAND (FOR EXAMPLE, MEASURING RODS AND TAPES, MICROMETERS, CALLIPERS), NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER::Parts and accessories" +90180000,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters +90181100,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Electro-cardiographs" +90181200,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters >> ultrasonic scanning apparatus +90181210,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Ultrasonic scanning apparatus :Linear ultrasound scanner" +90181290,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Ultrasonic scanning apparatus :Other" +90181300,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Magnetic resonance imaging apparatus" +90181400,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Scientigraphic apparatus" +90181900,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters >> other +90181910,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Electro encephalographs" +90181920,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Echo cardiograph(OLD tariff)" +90181990,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Other" +90182000,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Ultra-violet or infra-red ray apparatus" +90183100,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Syringes, with or without needles" +90183200,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters >> tubular metal needles and needles for sutures +90183210,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Tubular metal needles and needles for sutures :Needles for suture" +90183220,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Tubular metal needles and needles for sutures :Hollow needles for injection, aspiration, biopsy and transfusion" +90183230,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Tubular metal needles and needles for sutures :Hilerio venus fistula needles" +90183290,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Tubular metal needles and needles for sutures :Other" +90183900,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters >> other +90183910,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Catheters (for urine, stool)" +90183920,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Cardiac catheters" +90183930,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Cannulae" +90183990,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other :Other" +90184100,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Dental drill engines, whether or not combined on a single base with other dental equipment" +90184900,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS ::Other" +90185000,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters >> other ophthalmic instruments and appliances +90185010,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other ophthalmic instruments and appliances :Ophthalmoscopes" +90185020,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other ophthalmic instruments and appliances :Tonometers" +90185030,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other ophthalmic instruments and appliances :Ophthalmic lasers" +90185090,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other ophthalmic instruments and appliances :Other" +90189000,instruments and appliances used in medical dental or veterinary surgical sciences dental or veterinary surgical sciences including scientigraphic apparatus other electromedical apparatus and instruments apparatus including apparatus for functional exploratory examinations or for checking physiological parameters >> other instruments and appliances diagnostic instruments and apparatus +90189011,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Instrument and apparatus for measuring blood pressure" +90189012,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Stethoscopes" +90189019,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Other" +90189021,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Bone saws, drills and trephines" +90189022,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Knives, scissors and blades" +90189023,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Forceps, forcep clamps, clips, needle holders, introducers, cephalotribe bone holding and other holding instruments" +90189024,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Chisel, gauges, elevators, raspatones, osteotome, craniotome, bone cutters" +90189025,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Retractors, spatulaprobes, hooks dialators, sounds, mallets" +90189029,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Other" +90189031,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Artificial kidney (dialysis) apparatus" +90189032,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Blood tranfusion apparatus" +90189033,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Haemofiltration instruments(OLD tariff)" +90189041,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Anesthetic apparatus and instruments" +90189042,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :ENT precision instruments" +90189043,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Acupuncture apparatus" +90189044,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Endoscopes" +90189091,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Hilerial or venous shunts" +90189092,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Baby incubators" +90189093,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Heart-lung machines" +90189094,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Defibrillators" +90189095,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Fibrescopes" +90189096,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Laproscopes" +90189097,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Vetrasonic lithotripsy instruments" +90189098,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Apparatus for nerve stimulation" +90189099,"INSTRUMENTS AND APPLIANCES USED IN MEDICAL, SURGICAL, DENTAL OR VETERINARY SCIENCES, INCLUDING SCIENTIGRAPHIC APPARATUS, OTHER ELECTROMEDICAL APPARATUS AND SIGHT-TESTING INSTRUMENTS :Other instruments and appliances :Other" +90190000,appliances massage apparatus psychological apparatus ozone therapy oxygen therapy aerosol therapy artificial respiration or other therapeutic respiration apparatus +90191000,appliances massage apparatus psychological apparatus ozone therapy oxygen therapy aerosol therapy artificial respiration or other therapeutic respiration apparatus >> appliances massage apparatus psychological apparatus +90191010,"MECHANO-THERAPY APPLIANCES; MASSAGE APPARATUS; PSYCHOLOGICAL APTITUDETESTING APPARATUS; OZONE THERAPY, OXYGEN THERAPY, AEROSOL THERAPY, ARTIFICIAL RESPIRATION OR OTHER THERAPEUTIC RESPIRATION APPARATUS:Mechano-therapy appliances; massage apparatus; psychological aptitude-testing apparatus :Mechano-therapy appliances" +90191020,"MECHANO-THERAPY APPLIANCES; MASSAGE APPARATUS; PSYCHOLOGICAL APTITUDETESTING APPARATUS; OZONE THERAPY, OXYGEN THERAPY, AEROSOL THERAPY, ARTIFICIAL RESPIRATION OR OTHER THERAPEUTIC RESPIRATION APPARATUS:Mechano-therapy appliances; massage apparatus; psychological aptitude-testing apparatus :Massage apparatus" +90191090,"MECHANO-THERAPY APPLIANCES; MASSAGE APPARATUS; PSYCHOLOGICAL APTITUDETESTING APPARATUS; OZONE THERAPY, OXYGEN THERAPY, AEROSOL THERAPY, ARTIFICIAL RESPIRATION OR OTHER THERAPEUTIC RESPIRATION APPARATUS:Mechano-therapy appliances; massage apparatus; psychological aptitude-testing apparatus :Other" +90192000,appliances massage apparatus psychological apparatus ozone therapy oxygen therapy aerosol therapy artificial respiration or other therapeutic respiration apparatus >> ozone therapy oxygen therapy aerosol therapy artificial respiration or other therapeutic respiration apparatus +90192010,"MECHANO-THERAPY APPLIANCES; MASSAGE APPARATUS; PSYCHOLOGICAL APTITUDETESTING APPARATUS; OZONE THERAPY, OXYGEN THERAPY, AEROSOL THERAPY, ARTIFICIAL RESPIRATION OR OTHER THERAPEUTIC RESPIRATION APPARATUS:Ozone therapy, oxygen therapy, aerosol therapy, artificial respiration or other therapeutic respiration apparatus :Oxygen therapy apparatus" +90192090,"MECHANO-THERAPY APPLIANCES; MASSAGE APPARATUS; PSYCHOLOGICAL APTITUDETESTING APPARATUS; OZONE THERAPY, OXYGEN THERAPY, AEROSOL THERAPY, ARTIFICIAL RESPIRATION OR OTHER THERAPEUTIC RESPIRATION APPARATUS:Ozone therapy, oxygen therapy, aerosol therapy, artificial respiration or other therapeutic respiration apparatus :Other" +90200000,"::OTHER BREATHING APPLIANCES AND GAS MASKS, EXCLUDING PROTECTIVE MASKS HAVING NEITHER MECHANICAL PARTS NOR REPLACEABLE FILTERS" +90210000,orthopaedic appliances including crutches surgical belts and trusses splints and other fracture appliances artificial parts of the body hearing aids and other appliances which are worn in or carried or implanted the body to compensate for a defect or disability +90211000,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY::Orthopeaedic or fracture appliances" +90212100,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY::Artificial teeth" +90212900,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY::Other" +90213100,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY::Artificial joints" +90213900,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY::Other" +90214000,orthopaedic appliances including crutches surgical belts and trusses splints and other fracture appliances artificial parts of the body hearing aids and other appliances which are worn in or carried or implanted the body to compensate for a defect or disability >> hearing aids excluding parts and accessories +90214010,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY:Hearing aids, excluding parts and accessories:Frequency modulated hearing aid system used for hearing by handicapped persons in group situation" +90214090,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY:Hearing aids, excluding parts and accessories:Other" +90215000,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY::Pacemakers for stimulating heart muscles, excluding parts and accessories" +90219000,orthopaedic appliances including crutches surgical belts and trusses splints and other fracture appliances artificial parts of the body hearing aids and other appliances which are worn in or carried or implanted the body to compensate for a defect or disability >> other +90219010,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY:Other :Parts and accessories of hearing aids" +90219090,"ORTHOPAEDIC APPLIANCES, INCLUDING CRUTCHES, SURGICAL BELTS AND TRUSSES; SPLINTS AND OTHER FRACTURE APPLIANCES; ARTIFICIAL PARTS OF THE BODY; HEARING AIDS AND OTHER APPLIANCES WHICH ARE WORN OR CARRIED, OR IMPLANTED IN THE BODY, TO COMPENSATE FOR A DEFECT OR DISABILITY:Other :Other" +90220000,apparatus based on the use of or of alpha beta gamma or other ionising radiations whether or not for medical surgical dental radiography or veterinary uses including or radiotherapy apparatus tubes and high tension other generators generators control panels and desks screens examination or treatment tables chairs and the like apparatus based on the use of whether or not for medical surgical dental or veterinary uses including radiography or radiotherapy apparatus +90221200,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE::Computed tomography apparatus" +90221300,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE::Other, for dental uses" +90221400,apparatus based on the use of or of alpha beta gamma or other ionising radiations whether or not for medical surgical dental radiography or veterinary uses including or radiotherapy apparatus tubes and high tension other generators generators control panels and desks screens examination or treatment tables chairs and the like apparatus based on the use of whether or not for medical surgical dental or veterinary uses including radiography or radiotherapy apparatus >> other for medical surgical or veterinary uses +90221410,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, for medical, surgical or veterinary uses :X-ray generators and apparatus (non-portable)" +90221420,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, for medical, surgical or veterinary uses :Portable X-ray machine" +90221490,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, for medical, surgical or veterinary uses :Other" +90221900,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE::For other uses" +90222100,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE::For medical, surgical, dental or veterinary uses" +90222900,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE::For other uses" +90223000,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE::X-ray tubes(OLD tariff)" +90229010,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, including parts and accessories :X-ray valves(OLD tariff)" +90229020,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, including parts and accessories :Radiation generation units(OLD tariff)" +90229030,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, including parts and accessories :Radiation beam delivery units(OLD tariff)" +90229040,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, including parts and accessories :X-ray examination or treatment table, chairs and the like(OLD tariff)" +90229090,"APPARATUS BASED ON THE USE OF X-RAYS OR OF ALPHA, BETA OR GAMMA RADIATIONS, WHETHER OR NOT FOR MEDICAL, SURGICAL, DENTAL OR VETERINARY USES, INCLUDING RADIOGRAPHY OR RADIOTHERAPY APPARATUS,X-RAY TUBES AND OTHER X-RAY GENERATORS, HIGH TENSION GENERATORS, CONTROL PANELS AND DESKS, SCREENS, EXAMINATION OR TREATMENT TABLES, CHAIRS AND THE LIKE:Other, including parts and accessories :Other(OLD tariff)" +90230000,instruments apparatus and models designed for demonstrational purposes for example in education or exhibitions unsuitable for other uses instruments apparatus and models designed for demonstrational purposes for example in education or exhibitions unsuitable for other uses +90230010,"INSTRUMENTS, APPARATUS AND MODELS, DESIGNED FOR DEMONSTRATIONAL PURPOSES (FOR EXAMPLE, IN EDUCATION OR EXHIBITIONS), UNSUITABLE FOR OTHER USES:Instruments, apparatus and models, designed for demonstrational purposes (for example, in education or exhibitions), unsuitable for other uses :Teaching aids" +90230090,"INSTRUMENTS, APPARATUS AND MODELS, DESIGNED FOR DEMONSTRATIONAL PURPOSES (FOR EXAMPLE, IN EDUCATION OR EXHIBITIONS), UNSUITABLE FOR OTHER USES:Instruments, apparatus and models, designed for demonstrational purposes (for example, in education or exhibitions), unsuitable for other uses :Other" +90240000,machines and appliances for testing the hardness strength compressibility elasticity or other mechanical properties of materials for example metals wood textiles paper plastics +90241000,"MACHINES AND APPLIANCES FOR TESTING THE HARDNESS, STRENGTH, COMPRESSIBILITY, ELASTICITY OR OTHER MECHANICAL PROPERTIES OF MATERIALS (FOR EXAMPLE, METALS, WOOD, TEXTILES, PAPER, PLASTICS)::Machines and appliances for testing metals" +90248000,machines and appliances for testing the hardness strength compressibility elasticity or other mechanical properties of materials for example metals wood textiles paper plastics >> other machines and appliances +90248010,"MACHINES AND APPLIANCES FOR TESTING THE HARDNESS, STRENGTH, COMPRESSIBILITY, ELASTICITY OR OTHER MECHANICAL PROPERTIES OF MATERIALS (FOR EXAMPLE, METALS, WOOD, TEXTILES, PAPER, PLASTICS):Other machines and appliances :For testing textiles, paper and paperboard" +90248091,"MACHINES AND APPLIANCES FOR TESTING THE HARDNESS, STRENGTH, COMPRESSIBILITY, ELASTICITY OR OTHER MECHANICAL PROPERTIES OF MATERIALS (FOR EXAMPLE, METALS, WOOD, TEXTILES, PAPER, PLASTICS):Other machines and appliances :For testing hardness" +90248099,"MACHINES AND APPLIANCES FOR TESTING THE HARDNESS, STRENGTH, COMPRESSIBILITY, ELASTICITY OR OTHER MECHANICAL PROPERTIES OF MATERIALS (FOR EXAMPLE, METALS, WOOD, TEXTILES, PAPER, PLASTICS):Other machines and appliances :Other" +90249000,"MACHINES AND APPLIANCES FOR TESTING THE HARDNESS, STRENGTH, COMPRESSIBILITY, ELASTICITY OR OTHER MECHANICAL PROPERTIES OF MATERIALS (FOR EXAMPLE, METALS, WOOD, TEXTILES, PAPER, PLASTICS)::Parts and accessories" +90250000,hydrometers and similar floating instruments thermometers pyrometers barometers hygrometers and psychrometers recording or not and any combination of these instruments thermometers and pyrometers not combined with other instruments +90251100,hydrometers and similar floating instruments thermometers pyrometers barometers hygrometers and psychrometers recording or not and any combination of these instruments thermometers and pyrometers not combined with other instruments >> for direct reading +90251110,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Liquid-filled, for direct reading :Clinical thermometers" +90251190,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Liquid-filled, for direct reading :Other" +90251900,hydrometers and similar floating instruments thermometers pyrometers barometers hygrometers and psychrometers recording or not and any combination of these instruments thermometers and pyrometers not combined with other instruments >> other +90251910,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other :Digital thermometers" +90251920,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other :Pyrometers" +90251990,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other :Other" +90258000,hydrometers and similar floating instruments thermometers pyrometers barometers hygrometers and psychrometers recording or not and any combination of these instruments thermometers and pyrometers not combined with other instruments >> other instruments +90258010,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other instruments :Hydrometers and similar floating instruments" +90258020,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other instruments :Barometers, not combined with other instruments" +90258030,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other instruments :Lactometer" +90258090,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS:Other instruments :Other(OLD tariff)" +90259000,"HYDROMETERS AND SIMILAR FLOATING INSTRUMENTS, THERMOMETERS, PYROMETERS, BAROMETERS, HYGROMETERS AND PSYCHROMETERS, RECORDING OR NOT, AND ANY COMBINATION OF THESE INSTRUMENTS::Parts and accessories(OLD tariff)" +90261010,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032:For measuring or checking the flow or level of liquids :Flow meters(OLD tariff)" +90261020,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032:For measuring or checking the flow or level of liquids :Level gauges(OLD tariff)" +90261090,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032:For measuring or checking the flow or level of liquids :Other(OLD tariff)" +90262000,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032::For measuring or checking pressure(OLD tariff)" +90268010,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032:Other instruments or apparatus :Heat meters(OLD tariff)" +90268090,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032:Other instruments or apparatus :Other(OLD tariff)" +90269000,"INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING THE FLOW, LEVEL, PRESSURE OR OTHER VARIABLES OF LIQUIDS OR GASES (FOR EXAMPLE, FLOW METERS, LEVEL GAUGES, MANOMETERS, HEAT METERS), EXCLUDING INSTRUMENTS AND APPARATUS OF HEADING 9014, 9015, 9028 OR 9032::Parts and accessories(OLD tariff)" +90270000,instruments and apparatus for physical or chemical analysis for example polarimeters refractometers spectrometers gas or smoke analysis apparatus instruments and apparatus for measuring or checking viscosity porosity tension or the expansion surface like instruments and apparatus for measuring or checking quantities of heat sound or light including exposure meters microtomes +90271000,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES::Gas or smoke analysis apparatus" +90272000,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES::Chromatographs and electrophoresis instruments" +90273000,instruments and apparatus for physical or chemical analysis for example polarimeters refractometers spectrometers gas or smoke analysis apparatus instruments and apparatus for measuring or checking viscosity porosity tension or the expansion surface like instruments and apparatus for measuring or checking quantities of heat sound or light including exposure meters microtomes >> spectrometers spectrophotometers and spectrographs using optical radiations uv visible ir +90273010,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Spectrometers, spectrophotometers and spectrographs using optical radiations (UV, visible, IR) :Spectrometers" +90273020,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Spectrometers, spectrophotometers and spectrographs using optical radiations (UV, visible, IR) :Spectrophotometers" +90273090,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Spectrometers, spectrophotometers and spectrographs using optical radiations (UV, visible, IR) :Other" +90275000,instruments and apparatus for physical or chemical analysis for example polarimeters refractometers spectrometers gas or smoke analysis apparatus instruments and apparatus for measuring or checking viscosity porosity tension or the expansion surface like instruments and apparatus for measuring or checking quantities of heat sound or light including exposure meters microtomes >> other instruments and apparatus using optical radiations uv visible ir +90275010,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus using optical radiations (UV, visible, IR) :Photometers" +90275020,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus using optical radiations (UV, visible, IR) :Refractometers(OLD tariff)" +90275030,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus using optical radiations (UV, visible, IR) :Polarimeters(OLD tariff)" +90275090,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus using optical radiations (UV, visible, IR) :Other(OLD tariff)" +90278010,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus:Viscometers(OLD tariff)" +90278020,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus:Calorimeters(OLD tariff)" +90278030,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus:Instruments and apparatus for measuring the surface or interfocial tension of liquids(OLD tariff)" +90278040,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus:Nuclear magnetic resonance instruments(OLD tariff)" +90278090,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other instruments and apparatus:Other(OLD tariff)" +90278100,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES::Mass spectrometers(OLD tariff)" +90278910,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other:Viscometers(OLD tariff)" +90278920,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other:Calorimeters(OLD tariff)" +90278930,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Other:Instruments and apparatus for measuring the surface or interfacial tension of liquids(OLD tariff)" +90279000,instruments and apparatus for physical or chemical analysis for example polarimeters refractometers spectrometers gas or smoke analysis apparatus instruments and apparatus for measuring or checking viscosity porosity tension or the expansion surface like instruments and apparatus for measuring or checking quantities of heat sound or light including exposure meters microtomes >> microtomes parts and accessories +90279010,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Microtomes; parts and accessories :Microtomes, including parts and accessories thereof" +90279020,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Microtomes; parts and accessories :Printed circuit assemblies for the goods of sub-heading 9027 80" +90279090,"INSTRUMENTS AND APPARATUS FOR PHYSICAL OR CHEMICAL ANALYSIS (FOR EXAMPLE, POLARIMETERS, REFRACTOMETERS, SPECTROMETERS, GAS OR SMOKE ANALYSIS APPARATUS); INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING VISCOSITY, POROSITY, EXPANSION, SURFACE TENSION OR THE LIKE; INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING QUANTITIES OF HEAT, SOUND OR LIGHT (INCLUDING EXPOSURE METERS); MICROTOMES:Microtomes; parts and accessories :Other" +90280000,gas liquid or electricity supply or production meters including calibrating meters therefor +90281000,"GAS, LIQUID OR ELECTRICITY SUPPLY OR PRODUCTION METERS, INCLUDING CALIBRATING METERS THEREFOR::Gas meters" +90282000,"GAS, LIQUID OR ELECTRICITY SUPPLY OR PRODUCTION METERS, INCLUDING CALIBRATING METERS THEREFOR::Liquid meters" +90283000,gas liquid or electricity supply or production meters including calibrating meters therefor >> electricity meters +90283010,"GAS, LIQUID OR ELECTRICITY SUPPLY OR PRODUCTION METERS, INCLUDING CALIBRATING METERS THEREFOR:Electricity meters :For alternating current" +90283090,"GAS, LIQUID OR ELECTRICITY SUPPLY OR PRODUCTION METERS, INCLUDING CALIBRATING METERS THEREFOR:Electricity meters :Other" +90289000,gas liquid or electricity supply or production meters including calibrating meters therefor >> parts and accessories +90289010,"GAS, LIQUID OR ELECTRICITY SUPPLY OR PRODUCTION METERS, INCLUDING CALIBRATING METERS THEREFOR:Parts and accessories :For electricity meters" +90289090,"GAS, LIQUID OR ELECTRICITY SUPPLY OR PRODUCTION METERS, INCLUDING CALIBRATING METERS THEREFOR:Parts and accessories :Other" +90290000,revolution counters production counters taximeters mileometers pedometers and the like speed indicators and tachometers other than those of heading 9014 or 9015 stroboscopes +90291000,revolution counters production counters taximeters mileometers pedometers and the like speed indicators and tachometers other than those of heading 9014 or 9015 stroboscopes >> revolution counters production counters taximeters mileometers pedometers and thelike +90291010,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES:Revolution counters, production counters, taximeters, mileometers, pedometers and the like :Taximeters" +90291090,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES:Revolution counters, production counters, taximeters, mileometers, pedometers and the like :Other" +90292000,revolution counters production counters taximeters mileometers pedometers and the like speed indicators and tachometers other than those of heading 9014 or 9015 stroboscopes >> speed indicators and tachometers stroboscopes +90292010,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES:Speed indicators and tachometers; stroboscopes :Tachometers, non-electrical" +90292020,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES:Speed indicators and tachometers; stroboscopes :Speedometers, non-electrical" +90292030,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES:Speed indicators and tachometers; stroboscopes :Stroboscopes" +90292090,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES:Speed indicators and tachometers; stroboscopes :Other" +90299000,"REVOLUTION COUNTERS, PRODUCTION COUNTERS, TAXIMETERS, MILEOMETERS, PEDOMETERS AND THE LIKE; SPEED INDICATORS AND TACHOMETERS, OTHER THAN THOSE OF HEADING 9014 OR 9015; STROBOSCOPES::Parts and accessories" +90300000,oscilloscopes spectrum analysers and other instruments and apparatus for measuring or +90301000,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Instruments and apparatus for measuring or detecting ionising radiations" +90302000,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Oscilloscopes and oscillographs" +90303100,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Multimeters without a recording device" +90303200,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Multimeters with a recording device" +90303300,oscilloscopes spectrum analysers and other instruments and apparatus for measuring or >> other without a recording device +90303310,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other, without a recording device:Ammeters, volt meters and watt meters" +90303320,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other, without a recording device:Spectrum resistance meters" +90303330,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other, without a recording device:Capacitance meter" +90303340,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other, without a recording device:Frequency measuring apparatus" +90303350,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other, without a recording device:Megar meters" +90303390,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other, without a recording device:Other" +90303900,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Other, with a recording device" +90304000,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Other instruments and apparatus, specially designed for telecommunications (for example, cross-talk meters, gain measuring instruments, distortion factor meters, psophometers)" +90308200,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::For measuring or checking semiconductor wafer or device" +90308400,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS::Other, with a recording device" +90308900,oscilloscopes spectrum analysers and other instruments and apparatus for measuring or >> other +90308910,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other :Scintillator counters" +90308920,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other :Vectroscope" +90308990,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Other :Other" +90309000,oscilloscopes spectrum analysers and other instruments and apparatus for measuring or >> parts and accessories +90309010,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Parts and accessories :Of meters and counters" +90309090,"OSCILLOSCOPES, SPECTRUM ANALYSERS AND OTHER INSTRUMENTS AND APPARATUS FOR MEASURING OR CHECKING ELECTRICAL QUANTITIES, EXCLUDING METERS OF HEADING 9028; INSTRUMENTS AND APPARATUS FOR MEASURING OR DETECTING ALPHA, BETA, GAMMA, X-RAY, COSMIC OR OTHER IONISING RADIATIONS:Parts and accessories :Other" +90310000,measuring or checking instruments appliances and machines not specified or included elsewhere in this chapter profile projectors +90311000,"MEASURING OR CHECKING INSTRUMENTS, APPLIANCES AND MACHINES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Machines for balancing mechanical parts" +90312000,"MEASURING OR CHECKING INSTRUMENTS, APPLIANCES AND MACHINES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Test benches" +90314100,"MEASURING OR CHECKING INSTRUMENTS, APPLIANCES AND MACHINES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::For inspecting semiconductor wafers, or devices or for inspecting photo-masks or reticles used in manufacturing semiconductor devices" +90314900,"MEASURING OR CHECKING INSTRUMENTS, APPLIANCES AND MACHINES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Other" +90318000,"MEASURING OR CHECKING INSTRUMENTS, APPLIANCES AND MACHINES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Other instruments, appliances and machines" +90319000,"MEASURING OR CHECKING INSTRUMENTS, APPLIANCES AND MACHINES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS::Parts and accessories" +90320000,controlling automatic regulating or instruments and apparatus +90321000,controlling automatic regulating or instruments and apparatus >> thermostats +90321010,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS:Thermostats :For refrigerating and air-conditioning appliances and machinery +90321090,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS:Thermostats :Other +90322000,controlling automatic regulating or instruments and apparatus >> manostats +90322010,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS:Manostats :For refrigerating and air-conditioning appliances and machinery +90322090,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS:Manostats :Other +90328100,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS::Hydraulic or pneumatic +90328900,controlling automatic regulating or instruments and apparatus >> other +90328910,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS:Other :Electronic automatic regulators +90328990,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS:Other :Other +90329000,AUTOMATIC REGULATING OR CONTROLLING INSTRUMENTS AND APPARATUS::Parts and accessories +90330000,"::PARTS AND ACCESSORIES (NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER) FOR MACHINES, APPLIANCES, INSTRUMENTS OR APPARATUS OF CHAPTER 90" +91011100,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL::With mechanical display only(OLD tariff)" +91011900,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL::Other(OLD tariff)" +91012100,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL::With automatic winding(OLD tariff)" +91012900,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL::Other(OLD tariff)" +91019110,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Electrically operated :Pocket watches(OLD tariff)" +91019120,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Electrically operated :Stop watches(OLD tariff)" +91019190,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Electrically operated :Other(OLD tariff)" +91019910,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Other :Pocket watches(OLD tariff)" +91019920,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Other :Stop watches(OLD tariff)" +91019990,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP-WATCHES, WITH CASE OF PRECIOUS METAL OR OF METAL CLAD WITH PRECIOUS METAL:Other :Other(OLD tariff)" +91020000,and other watches including stop watches other than those of heading electrically operated whether or not incorporating a facility +91021100,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 ::With mechanical display only" +91021200,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 ::With opto-electronic display only" +91021900,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 ::Other" +91022100,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 ::With automatic winding" +91022900,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 ::Other" +91029100,and other watches including stop watches other than those of heading electrically operated whether or not incorporating a facility >> electrically operated +91029110,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 :Electrically operated :Pocket watches" +91029120,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 :Electrically operated :Stop watches" +91029190,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 :Electrically operated :Other" +91029900,and other watches including stop watches other than those of heading electrically operated whether or not incorporating a facility >> other +91029910,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 :Other :Pocket watches" +91029920,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 :Other :Stop watches" +91029990,"WRIST-WATCHES, POCKET-WATCHES AND OTHER WATCHES, INCLUDING STOP WATCHES, OTHER THAN THOSE OF HEADING 9101 :Other :Other" +91030000,clocks with watch movements excluding clocks of heading 9104 +91031000,"CLOCKS WITH WATCH MOVEMENTS, EXCLUDING CLOCKS OF HEADING 9104::Electrically operated" +91039000,"CLOCKS WITH WATCH MOVEMENTS, EXCLUDING CLOCKS OF HEADING 9104::Other" +91040000,"::INSTRUMENT PANEL CLOCKS AND CLOCKS OF A SIMILAR TYPE FOR VEHICLES, AIRCRAFT, SPACECRAFT OR VESSELS" +91050000,other clocks alarm clocks +91051100,OTHER CLOCKS::Electrically operated +91051900,OTHER CLOCKS::Other +91052100,OTHER CLOCKS::Electrically operated +91052900,OTHER CLOCKS::Other +91059100,"OTHER CLOCKS::Battery, accumulator or mains powered" +91059900,other clocks alarm clocks >> other +91059910,OTHER CLOCKS:Other :Time pieces +91059990,OTHER CLOCKS:Other :Other +91060000,time of day recording apparatus and apparatus for recording or otherwise indicating measuring intervals of time with clock or watch movement or with synchronous motor for example time registers +91061000,"TIME OF DAY RECORDING APPARATUS AND APPARATUS FOR MEASURING, RECORDING OR OTHERWISE INDICATING INTERVALS OF TIME, WITH CLOCK OR WATCH MOVEMENT OR WITH SYNCHRONOUS MOTOR (FOR EXAMPLE, TIMEREGISTERS, TIME-RECORDERS)::Time-registers; time-recorders" +91069000,"TIME OF DAY RECORDING APPARATUS AND APPARATUS FOR MEASURING, RECORDING OR OTHERWISE INDICATING INTERVALS OF TIME, WITH CLOCK OR WATCH MOVEMENT OR WITH SYNCHRONOUS MOTOR (FOR EXAMPLE, TIMEREGISTERS, TIME-RECORDERS)::Other" +91070000,::TIME SWITCHES WITH CLOCK OR WATCH MOVEMENT OR WITH SYNCHRONOUS MOTOR +91080000,watch movements complete and assembled electrically operated +91081100,"WATCH MOVEMENTS, COMPLETE AND ASSEMBLED::With mechanical display only or with a device to which a mechanical display can be incorporated" +91081200,"WATCH MOVEMENTS, COMPLETE AND ASSEMBLED::With opto-electronic display only" +91081900,"WATCH MOVEMENTS, COMPLETE AND ASSEMBLED::Other" +91082000,"WATCH MOVEMENTS, COMPLETE AND ASSEMBLED::With automatic winding" +91089000,"WATCH MOVEMENTS, COMPLETE AND ASSEMBLED::Other" +91090000,clock movements complete and assembled +91091000,clock movements complete and assembled >> electrically operated +91091010,"CLOCK MOVEMENTS, COMPLETE AND ASSEMBLED:Electrically operated:of Alarm clocks" +91091090,"CLOCK MOVEMENTS, COMPLETE AND ASSEMBLED:Electrically operated:Other" +91099000,"CLOCK MOVEMENTS, COMPLETE AND ASSEMBLED::Other" +91100000,complete watch or clock movements unassembled or partly assembled movement sets +91101100,"COMPLETE WATCH OR CLOCK MOVEMENTS, UNASSEMBLED OR PARTLY ASSEMBLED (MOVEMENT SETS); INCOMPLETE WATCH OR CLOCK MOVEMENTS, ASSEMBLED; ROUGH WATCH OR CLOCK MOVEMENTS::Complete movements, unassembled or partly assembled (movement sets)" +91101200,"COMPLETE WATCH OR CLOCK MOVEMENTS, UNASSEMBLED OR PARTLY ASSEMBLED (MOVEMENT SETS); INCOMPLETE WATCH OR CLOCK MOVEMENTS, ASSEMBLED; ROUGH WATCH OR CLOCK MOVEMENTS::Incomplete movements, assembled" +91101900,"COMPLETE WATCH OR CLOCK MOVEMENTS, UNASSEMBLED OR PARTLY ASSEMBLED (MOVEMENT SETS); INCOMPLETE WATCH OR CLOCK MOVEMENTS, ASSEMBLED; ROUGH WATCH OR CLOCK MOVEMENTS::Rough movements" +91109000,"COMPLETE WATCH OR CLOCK MOVEMENTS, UNASSEMBLED OR PARTLY ASSEMBLED (MOVEMENT SETS); INCOMPLETE WATCH OR CLOCK MOVEMENTS, ASSEMBLED; ROUGH WATCH OR CLOCK MOVEMENTS::Other" +91110000,watch cases and parts thereof +91111000,WATCH CASES AND PARTS THEREOF::Cases of precious metal or of metal clad with precious metal +91112000,"WATCH CASES AND PARTS THEREOF::Cases of base metal, whether or not gold- or silver-plated" +91118000,WATCH CASES AND PARTS THEREOF::Other cases +91119000,WATCH CASES AND PARTS THEREOF::Parts +91120000,clock cases and cases of a similar type for other goods of this chapter and parts thereof +91122000,"CLOCK CASES AND CASES OF A SIMILAR TYPE FOR OTHER GOODS OF THIS CHAPTER, AND PARTS THEREOF::Cases" +91129000,"CLOCK CASES AND CASES OF A SIMILAR TYPE FOR OTHER GOODS OF THIS CHAPTER, AND PARTS THEREOF::Parts" +91130000,watch straps watch bands and watch bracelets and parts thereof +91131000,"WATCH STRAPS, WATCH BANDS AND WATCH BRACELETS, AND PARTS THEREOF::Of precious metal or of metal clad with precious metal" +91132000,watch straps watch bands and watch bracelets and parts thereof >> of base metal whether or not gold or +91132010,"WATCH STRAPS, WATCH BANDS AND WATCH BRACELETS, AND PARTS THEREOF:Of base metal, whether or not gold- or silver-plated:Parts" +91132090,"WATCH STRAPS, WATCH BANDS AND WATCH BRACELETS, AND PARTS THEREOF:Of base metal, whether or not gold- or silver-plated:Other" +91139000,watch straps watch bands and watch bracelets and parts thereof >> other +91139010,"WATCH STRAPS, WATCH BANDS AND WATCH BRACELETS, AND PARTS THEREOF:Other :Parts" +91139090,"WATCH STRAPS, WATCH BANDS AND WATCH BRACELETS, AND PARTS THEREOF:Other :Other" +91140000,other clock or watch parts +91141010,"OTHER CLOCK OR WATCH PARTS:Springs, including hair-springs :For watches(OLD tariff)" +91141020,"OTHER CLOCK OR WATCH PARTS:Springs, including hair-springs :For clocks(OLD tariff)" +91143000,other clock or watch parts >> dials +91143010,OTHER CLOCK OR WATCH PARTS:Dials :For watches +91143020,OTHER CLOCK OR WATCH PARTS:Dials :For clocks +91144000,other clock or watch parts >> plates and bridges +91144010,OTHER CLOCK OR WATCH PARTS:Plates and bridges :For watches +91144020,OTHER CLOCK OR WATCH PARTS:Plates and bridges :For clocks +91149000,other clock or watch parts >> other +91149030,OTHER CLOCK OR WATCH PARTS:Other :Jewels +91149040,"OTHER CLOCK OR WATCH PARTS:Other :Springs, including hair-spring" +91149091,OTHER CLOCK OR WATCH PARTS:Other :For watches +91149092,OTHER CLOCK OR WATCH PARTS:Other :For clocks +92010000,pianos including automatic pianos and other keyboard stringed instruments +92011000,"PIANOS, INCLUDING AUTOMATIC PIANOS; HARPSI-CHORDS AND OTHER KEYBOARD STRINGED INSTRUMENTS::Upright pianos" +92012000,"PIANOS, INCLUDING AUTOMATIC PIANOS; HARPSI-CHORDS AND OTHER KEYBOARD STRINGED INSTRUMENTS::Grand pianos" +92019000,"PIANOS, INCLUDING AUTOMATIC PIANOS; HARPSI-CHORDS AND OTHER KEYBOARD STRINGED INSTRUMENTS::Other" +92020000,other string musical instruments for example guitars violins harps +92021000,"OTHER STRING MUSICAL INSTRUMENTS (FOR EXAMPLE, GUITARS, VIOLINS, HARPS)::Played with a bow" +92029000,"OTHER STRING MUSICAL INSTRUMENTS (FOR EXAMPLE, GUITARS, VIOLINS, HARPS)::Other" +92050000,wind musical instruments for example keyboard pipe organs accordions clarinets trumpets bagpipes other than fairground organs and mechanical street organs +92051000,"OTHER WIND MUSICAL INSTRUMENTS (FOR EXAMPLE, CLARINETS, TRUMPETS, BAGPIPES)::Brass-wind instruments" +92059000,wind musical instruments for example keyboard pipe organs accordions clarinets trumpets bagpipes other than fairground organs and mechanical street organs >> other +92059010,"OTHER WIND MUSICAL INSTRUMENTS (FOR EXAMPLE, CLARINETS, TRUMPETS, BAGPIPES):Other :Flutes" +92059020,"OTHER WIND MUSICAL INSTRUMENTS (FOR EXAMPLE, CLARINETS, TRUMPETS, BAGPIPES):Other :Clarinets" +92059090,"OTHER WIND MUSICAL INSTRUMENTS (FOR EXAMPLE, CLARINETS, TRUMPETS, BAGPIPES):Other :Other" +92060000,"::PERCUSSION MUSICAL INSTRUMENTS (FOR EXAMPLE, DRUMS, XYLOPHONES, CYMBOLS, CASTANETS, MARACAS)" +92070000,musical instruments the sound of which is produced or must be amplified electrically for example organs guitars accordions +92071000,"MUSICAL INSTRUMENTS, THE SOUND OF WHICH IS PRODUCED, OR MUST BE AMPLIFIED, ELECTRICALLY (FOR EXAMPLE, ORGANS, GUITARS, ACCORDIONS)::Keyboard instruments, other than accordions" +92079000,"MUSICAL INSTRUMENTS, THE SOUND OF WHICH IS PRODUCED, OR MUST BE AMPLIFIED, ELECTRICALLY (FOR EXAMPLE, ORGANS, GUITARS, ACCORDIONS)::Other" +92080000,musical boxes fairground organs mechanical street organs mechanical singing birds musical saws and other musical instruments not falling within any other heading of this chapter decoy calls of all kinds whistles call horns and other sound signalling instruments +92081000,"MUSICAL BOXES, FAIRGROUND ORGANS, MECHANICAL STREET ORGANS, MECHANICAL SINGING BIRDS, MUSICAL SAWS AND OTHER MUSICAL INSTRUMENTS NOT FALLING WITHIN ANY OTHER HEADING OF THIS CHAPTER; DECOY CALLS OF ALL KINDS; WHISTLES, CALL HORNS AND OTHER MOUTH-BLOWN SOUND SIGNALLING INSTRUMENTS::Musical boxes" +92089000,"MUSICAL BOXES, FAIRGROUND ORGANS, MECHANICAL STREET ORGANS, MECHANICAL SINGING BIRDS, MUSICAL SAWS AND OTHER MUSICAL INSTRUMENTS NOT FALLING WITHIN ANY OTHER HEADING OF THIS CHAPTER; DECOY CALLS OF ALL KINDS; WHISTLES, CALL HORNS AND OTHER MOUTH-BLOWN SOUND SIGNALLING INSTRUMENTS::Other" +92090000,parts for example mechanisms for musical boxes and accessories for example cards discs and rolls for mechanical instruments of musical instruments metronomes tuning forks and pitch pipes of all kinds +92093000,"PARTS (FOR EXAMPLE, MECHANISMS FOR MUSICAL BOXES) AND ACCESSORIES (FOR EXAMPLE, CARDS, DISCS AND ROLLS FOR MECHANICAL INSTRUMENTS) OF MUSICAL INSTRUMENTS; METRONOMES, TUNING FORKS AND PITCH PIPES OF ALL KINDS::Musical instrument strings" +92099100,"PARTS (FOR EXAMPLE, MECHANISMS FOR MUSICAL BOXES) AND ACCESSORIES (FOR EXAMPLE, CARDS, DISCS AND ROLLS FOR MECHANICAL INSTRUMENTS) OF MUSICAL INSTRUMENTS; METRONOMES, TUNING FORKS AND PITCH PIPES OF ALL KINDS::Parts and accessories for pianos" +92099200,"PARTS (FOR EXAMPLE, MECHANISMS FOR MUSICAL BOXES) AND ACCESSORIES (FOR EXAMPLE, CARDS, DISCS AND ROLLS FOR MECHANICAL INSTRUMENTS) OF MUSICAL INSTRUMENTS; METRONOMES, TUNING FORKS AND PITCH PIPES OF ALL KINDS::Parts and accessories for the musical instruments of heading 9202" +92099400,"PARTS (FOR EXAMPLE, MECHANISMS FOR MUSICAL BOXES) AND ACCESSORIES (FOR EXAMPLE, CARDS, DISCS AND ROLLS FOR MECHANICAL INSTRUMENTS) OF MUSICAL INSTRUMENTS; METRONOMES, TUNING FORKS AND PITCH PIPES OF ALL KINDS::Parts and accessories for the musical instruments of heading 9207" +92099900,"PARTS (FOR EXAMPLE, MECHANISMS FOR MUSICAL BOXES) AND ACCESSORIES (FOR EXAMPLE, CARDS, DISCS AND ROLLS FOR MECHANICAL INSTRUMENTS) OF MUSICAL INSTRUMENTS; METRONOMES, TUNING FORKS AND PITCH PIPES OF ALL KINDS::Other" +93010000,military weapons other than revolvers pistols and the arms of heading +93011000,military weapons other than revolvers pistols and the arms of heading >> artillery weapons for example guns howitzers and mortars +93011010,"MILITARY WEAPONS, OTHER THAN REVOLVERS, PISTOLS AND THE ARMS OF HEADING:Artillery weapons (for exaple, gunds, howitzers and mortars):Self propelled" +93011090,"MILITARY WEAPONS, OTHER THAN REVOLVERS, PISTOLS AND THE ARMS OF HEADING:Artillery weapons (for exaple, gunds, howitzers and mortars):Other" +93012000,"MILITARY WEAPONS, OTHER THAN REVOLVERS, PISTOLS AND THE ARMS OF HEADING::Rocket launchers; flame- throwers; grenade launchers; torpedo tubes and similar projectors" +93019000,"MILITARY WEAPONS, OTHER THAN REVOLVERS, PISTOLS AND THE ARMS OF HEADING::Other" +93020000,"::REVOLVERS AND PISTOLS, OTHER THAN THOSE OF HEADING 9303 OR 9304" +93030000,other firearms and similar devices which operate by the firing of an explosive charge for example sporting shotguns and rifles firearms very pistols and other devices designed to project only signal flares pistols and revolvers for firing blank ammunition humane killers line throwing guns +93031000,"OTHER FIREARMS AND SIMILAR DEVICES WHICH OPERATE BY THE FIRING OF AN EXPLOSIVE CHARGE (FOR EXAMPLE, SPORTING SHOTGUNS AND RIFLES, MUZZLE-LOADING FIREARMS, VERY PISTOLS AND OTHER DEVICES DESIGNED TO PROJECT ONLY SIGNAL FLARES, PISTOLS AND REVOLVERS FOR FIRING BLANK AMMUNITION, CAPTIVE-BOLT HUMANE KILLERS, LINE-THROWING GUNS)::Muzzle-loading firearms" +93032000,"OTHER FIREARMS AND SIMILAR DEVICES WHICH OPERATE BY THE FIRING OF AN EXPLOSIVE CHARGE (FOR EXAMPLE, SPORTING SHOTGUNS AND RIFLES, MUZZLE-LOADING FIREARMS, VERY PISTOLS AND OTHER DEVICES DESIGNED TO PROJECT ONLY SIGNAL FLARES, PISTOLS AND REVOLVERS FOR FIRING BLANK AMMUNITION, CAPTIVE-BOLT HUMANE KILLERS, LINE-THROWING GUNS)::Other sporting, hunting or target-shooting shotguns, including combination shotgun-rifles" +93033000,"OTHER FIREARMS AND SIMILAR DEVICES WHICH OPERATE BY THE FIRING OF AN EXPLOSIVE CHARGE (FOR EXAMPLE, SPORTING SHOTGUNS AND RIFLES, MUZZLE-LOADING FIREARMS, VERY PISTOLS AND OTHER DEVICES DESIGNED TO PROJECT ONLY SIGNAL FLARES, PISTOLS AND REVOLVERS FOR FIRING BLANK AMMUNITION, CAPTIVE-BOLT HUMANE KILLERS, LINE-THROWING GUNS)::Other sporting, hunting or target-shooting rifles" +93039000,"OTHER FIREARMS AND SIMILAR DEVICES WHICH OPERATE BY THE FIRING OF AN EXPLOSIVE CHARGE (FOR EXAMPLE, SPORTING SHOTGUNS AND RIFLES, MUZZLE-LOADING FIREARMS, VERY PISTOLS AND OTHER DEVICES DESIGNED TO PROJECT ONLY SIGNAL FLARES, PISTOLS AND REVOLVERS FOR FIRING BLANK AMMUNITION, CAPTIVE-BOLT HUMANE KILLERS, LINE-THROWING GUNS)::Other" +93040000,"::OTHER ARMS (FOR EXAMPLE, SPRING, AIR OR GAS GUNS AND PISTOLS, TRUNCHEONS), EXCLUDING THOSE OF HEADING 9307" +93050000,parts and accessories of articles of headings 9301 to 9304 +93051000,PARTS AND ACCESSORIES OF ARTICLES OF HEADINGS 9301 TO 9304::Of revolvers or pistols +93052000,parts and accessories of articles of headings 9301 to 9304 >> of shotguns or rifles of heading 9303 +93052010,PARTS AND ACCESSORIES OF ARTICLES OF HEADINGS 9301 TO 9304:Of shotgunds or rifles of heading 9303:Shotgund barrels +93052090,PARTS AND ACCESSORIES OF ARTICLES OF HEADINGS 9301 TO 9304:Of shotgunds or rifles of heading 9303:Other +93059100,PARTS AND ACCESSORIES OF ARTICLES OF HEADINGS 9301 TO 9304::Of military weapons of heading 9301 +93059900,PARTS AND ACCESSORIES OF ARTICLES OF HEADINGS 9301 TO 9304::Other +93060000,bombs grenades torpedoes mines missiles and similar munitions of war and parts thereof cartridges and other ammunition and projectiles and parts thereof including shot and cartridge wads shotgun cartridges and parts thereof air gun pellets +93062100,"BOMBS , GRENADES, TORPEDOES, MINES, MISSILES, AND SIMILAR MUNITIONS OF WAR AND PARTS THEREOF; CARTRIDGES AND OTHER AMMUNITION AND PROJECTILES AND PARTS THEREOF, INCLUDING SHOT AND CARTRIDGE WADS::Cartridges" +93062900,"BOMBS , GRENADES, TORPEDOES, MINES, MISSILES, AND SIMILAR MUNITIONS OF WAR AND PARTS THEREOF; CARTRIDGES AND OTHER AMMUNITION AND PROJECTILES AND PARTS THEREOF, INCLUDING SHOT AND CARTRIDGE WADS::Other" +93063000,"BOMBS , GRENADES, TORPEDOES, MINES, MISSILES, AND SIMILAR MUNITIONS OF WAR AND PARTS THEREOF; CARTRIDGES AND OTHER AMMUNITION AND PROJECTILES AND PARTS THEREOF, INCLUDING SHOT AND CARTRIDGE WADS::Other cartridges and parts thereof" +93069000,"BOMBS , GRENADES, TORPEDOES, MINES, MISSILES, AND SIMILAR MUNITIONS OF WAR AND PARTS THEREOF; CARTRIDGES AND OTHER AMMUNITION AND PROJECTILES AND PARTS THEREOF, INCLUDING SHOT AND CARTRIDGE WADS::Other" +93070000,"::SWORDS, CUT LASSES, BAYONETS, LANCES AND SIMILAR ARMS AND PARTS THEREOF AND SCABBARDS AND SHEATHS THEREFOF" +94010000,seats other than those of heading 9402 whether or not convertible into beds and parts thereof +94011000,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Seats of a kind used for aircraft" +94012000,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Seats of a kind used for motor vehicles" +94013000,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Swivel seats and variable height adjustment(OLD tariff)" +94013100,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of wood" +94013900,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF:oth:Other" +94014000,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Seats other than garden seats or camping equipment, convertible into beds(OLD tariff)" +94014100,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF:of:Of wood" +94014900,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF:oth:Other" +94015100,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of bamboo or rattan(OLD tariff)" +94015200,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of Bamboo" +94015300,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of Rattan" +94015900,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Other" +94016100,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Upholstered" +94016900,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Other" +94017100,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Upholstered" +94017900,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Other" +94018000,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Other seats" +94018200,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of Bamboo(OLD tariff)" +94018300,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of Rattan(OLD tariff)" +94019000,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Parts(OLD tariff)" +94019100,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Of wood" +94019900,"SEATS (OTHER THAN THOSE OF HEADING 9402), WHETHER OR NOT CONVERTIBLE INTO BEDS, AND PARTS THEREOF::Other" +94020000,medical surgical dental or veterinary furniture for example operating tables examination tables hospital beds with mechanical fittings dentists chairs barbers chairs and similar chairs having rotating as well as both reclining and elevating movements parts of the foregoing articles medical surgical dental or veterinary furniture for example operating tables examination tables hospital beds with mechanical fittings dentists chairs barbers chairs and similar chairs having rotating as well as both reclining and elevating movements parts of the foregoing articles +94021000,medical surgical dental or veterinary furniture for example operating tables examination tables hospital beds with mechanical fittings dentists chairs barbers chairs and similar chairs having rotating as well as both reclining and elevating movements parts of the foregoing articles medical surgical dental or veterinary furniture for example operating tables examination tables hospital beds with mechanical fittings dentists chairs barbers chairs and similar chairs having rotating as well as both reclining and elevating movements parts of the foregoing articles >> dentists barbers or similar chairs and parts thereof dentists barbers or similar chairs and parts thereof +94021010,"MEDICAL, SURGICAL, DENTAL OR VETERINARY FURNITURE (FOR EXAMPLE, OPERATING TABLES, EXAMINATION TABLES, HOSPITAL BEDS WITH MECHANICAL FITTINGS, DENTISTS' CHAIRS); BARBERS' CHAIRS AND SIMILAR CHAIRS, HAVING ROTATING AS WELL AS BOTH RECLINING AND ELEVATING MOVEMENTS; PARTS OF THE FOREGOING ARTICLES:Dentist's, barber's or similar chairs and parts thereof :Dentist's chairs and parts thereof" +94021090,"MEDICAL, SURGICAL, DENTAL OR VETERINARY FURNITURE (FOR EXAMPLE, OPERATING TABLES, EXAMINATION TABLES, HOSPITAL BEDS WITH MECHANICAL FITTINGS, DENTISTS' CHAIRS); BARBERS' CHAIRS AND SIMILAR CHAIRS, HAVING ROTATING AS WELL AS BOTH RECLINING AND ELEVATING MOVEMENTS; PARTS OF THE FOREGOING ARTICLES:Dentist's, barber's or similar chairs and parts thereof :Other" +94029000,medical surgical dental or veterinary furniture for example operating tables examination tables hospital beds with mechanical fittings dentists chairs barbers chairs and similar chairs having rotating as well as both reclining and elevating movements parts of the foregoing articles medical surgical dental or veterinary furniture for example operating tables examination tables hospital beds with mechanical fittings dentists chairs barbers chairs and similar chairs having rotating as well as both reclining and elevating movements parts of the foregoing articles >> other other +94029010,"MEDICAL, SURGICAL, DENTAL OR VETERINARY FURNITURE (FOR EXAMPLE, OPERATING TABLES, EXAMINATION TABLES, HOSPITAL BEDS WITH MECHANICAL FITTINGS, DENTISTS' CHAIRS); BARBERS' CHAIRS AND SIMILAR CHAIRS, HAVING ROTATING AS WELL AS BOTH RECLINING AND ELEVATING MOVEMENTS; PARTS OF THE FOREGOING ARTICLES:Other :Hospital beds with mechanical fittings" +94029020,"MEDICAL, SURGICAL, DENTAL OR VETERINARY FURNITURE (FOR EXAMPLE, OPERATING TABLES, EXAMINATION TABLES, HOSPITAL BEDS WITH MECHANICAL FITTINGS, DENTISTS' CHAIRS); BARBERS' CHAIRS AND SIMILAR CHAIRS, HAVING ROTATING AS WELL AS BOTH RECLINING AND ELEVATING MOVEMENTS; PARTS OF THE FOREGOING ARTICLES:Other :Parts" +94029090,"MEDICAL, SURGICAL, DENTAL OR VETERINARY FURNITURE (FOR EXAMPLE, OPERATING TABLES, EXAMINATION TABLES, HOSPITAL BEDS WITH MECHANICAL FITTINGS, DENTISTS' CHAIRS); BARBERS' CHAIRS AND SIMILAR CHAIRS, HAVING ROTATING AS WELL AS BOTH RECLINING AND ELEVATING MOVEMENTS; PARTS OF THE FOREGOING ARTICLES:Other :Other" +94030000,other furniture and parts thereof other furniture and parts thereof +94031000,other furniture and parts thereof other furniture and parts thereof >> metal furniture of a kind used in offices metal furniture of a kind used in offices +94031010,OTHER FURNITURE AND PARTS THEREOF:Metal furniture of a kind used in offices :Of steel +94031090,OTHER FURNITURE AND PARTS THEREOF:Metal furniture of a kind used in offices :Other +94032000,other furniture and parts thereof other furniture and parts thereof >> other metal furniture other metal furniture +94032010,OTHER FURNITURE AND PARTS THEREOF:Other metal furniture :Of steel +94032090,OTHER FURNITURE AND PARTS THEREOF:Other metal furniture :Other +94033000,other furniture and parts thereof other furniture and parts thereof >> wooden furniture of a kind used in offices wooden furniture of a kind used in offices +94033010,OTHER FURNITURE AND PARTS THEREOF:Wooden furniture of a kind used in offices :Cabinetware +94033090,OTHER FURNITURE AND PARTS THEREOF:Wooden furniture of a kind used in offices :Other +94034000,OTHER FURNITURE AND PARTS THEREOF::Wooden furniture of a kind used in the kitchen +94035000,other furniture and parts thereof other furniture and parts thereof >> wooden furniture of a kind used in the bed room wooden furniture of a kind used in the bed room +94035010,OTHER FURNITURE AND PARTS THEREOF:Wooden furniture of a kind used in the bed room :Bed stead +94035090,OTHER FURNITURE AND PARTS THEREOF:Wooden furniture of a kind used in the bed room :Other +94036000,OTHER FURNITURE AND PARTS THEREOF::Other wooden furniture +94037000,OTHER FURNITURE AND PARTS THEREOF::Furniture of plastics +94038100,OTHER FURNITURE AND PARTS THEREOF::Of bamboo or rattan(OLD tariff) +94038200,OTHER FURNITURE AND PARTS THEREOF:OF BAMBOO:OF BAMBOO +94038300,OTHER FURNITURE AND PARTS THEREOF:OF RATTAN:OF RATTAN +94038900,OTHER FURNITURE AND PARTS THEREOF::Other +94039000,OTHER FURNITURE AND PARTS THEREOF::Parts(OLD tariff) +94039100,OTHER FURNITURE AND PARTS THEREOF:of:Of wood +94039900,OTHER FURNITURE AND PARTS THEREOF:oth:Other +94040000,mattress supports articles of bedding and similar furnishing for example mattresses quilts eiderdowns cushions pouffes and pillows fitted with springs or stuffed or internally fitted with any material or of cellular rubber or plastics whether or not covered +94041000,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED::Mattress supports" +94042100,mattress supports articles of bedding and similar furnishing for example mattresses quilts eiderdowns cushions pouffes and pillows fitted with springs or stuffed or internally fitted with any material or of cellular rubber or plastics whether or not covered >> of cellular rubber or plastics whether or not covered +94042110,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Of cellular rubber or plastics, whether or not covered :Of rubber" +94042190,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Of cellular rubber or plastics, whether or not covered :Of plastic" +94042900,mattress supports articles of bedding and similar furnishing for example mattresses quilts eiderdowns cushions pouffes and pillows fitted with springs or stuffed or internally fitted with any material or of cellular rubber or plastics whether or not covered >> of other materials +94042910,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Of other materials :Spring interior" +94042920,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Of other materials :OF RUUBERISED COIR WITH OR WITHOUT COMBINATION of other materials whether or not with metallic springs" +94042990,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Of other materials :other" +94043000,mattress supports articles of bedding and similar furnishing for example mattresses quilts eiderdowns cushions pouffes and pillows fitted with springs or stuffed or internally fitted with any material or of cellular rubber or plastics whether or not covered >> sleeping bags +94043010,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Sleeping bags :Filled with feathers or down" +94043090,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Sleeping bags :Other" +94044000,mattress supports articles of bedding and similar furnishing for example mattresses quilts eiderdowns cushions pouffes and pillows fitted with springs or stuffed or internally fitted with any material or of cellular rubber or plastics whether or not covered >> quilts bedspreads eiderdowns and comforters +94044010,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Quilts, bedspreads, eiderdowns and duvets (comforters)::Quilts" +94044020,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Quilts, bedspreads, eiderdowns and duvets (comforters)::Bedspreads" +94044030,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Quilts, bedspreads, eiderdowns and duvets (comforters)::Eiderdowns" +94044040,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Quilts, bedspreads, eiderdowns and duvets (comforters)::Duvets (comforters)" +94049000,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Other" +94049011,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Filled with feathers or down(OLD tariff)" +94049019,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Other(OLD tariff)" +94049091,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Filled with feathers or down(OLD tariff)" +94049099,"MATTRESS SUPPORTS; ARTICLES OF BEDDING AND SIMILAR FURNISHING (FOR EXAMPLE, MATTRESSES, QUILTS, EIDERDOWNS, CUSHIONS, POUFFES AND PILLOWS) FITTED WITH SPRINGS OR STUFFED OR INTERNALLY FITTED WITH ANY MATERIAL OR OF CELLULAR RUBBER OR PLASTICS, WHETHER OR NOT COVERED:Other :Other(OLD tariff)" +94050000,searchlights and spotlights and parts thereof not elsewhere specified or included illuminated signs illuminated and permanently fixed light the like having a parts thereof not elsewhere source and specified or included chandeliers and other electric ceiling or wall lighting fittings excluding those of a kind used for lighting public open spaces or thoroughfares +94051010,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Chandeliers and other electric ceiling or wall lighting fittings, excluding those of a kind used for lighting public open spaces or thorough fares :Hanging lamps, complete fittings(OLD tariff)" +94051020,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Chandeliers and other electric ceiling or wall lighting fittings, excluding those of a kind used for lighting public open spaces or thorough fares :Wall lamps(OLD tariff)" +94051090,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Chandeliers and other electric ceiling or wall lighting fittings, excluding those of a kind used for lighting public open spaces or thorough fares :Other(OLD tariff)" +94051100,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Designed for use solely with light-emitting diode (LED) light source" +94051900,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:oth:Others" +94052010,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Electric table, desk, bedside or floor-standing lamps :Table lamps, complete fittings(OLD tariff)" +94052090,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Electric table, desk, bedside or floor-standing lamps :Other(OLD tariff)" +94052100,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Des:Designed for use solely with light-emitting diode (LED) light sources" +94052900,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +94053000,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Lighting sets of a kind used for Christmas trees(OLD tariff)" +94053100,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:des:Designed for use solely with light-emitting diode (LED) light sources" +94053900,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:oth:Other" +94054010,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Other electric lamps and lighting fittings :Searchlights and sportlights(OLD tariff)" +94054090,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Other electric lamps and lighting fittings :Other(OLD tariff)" +94054100,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:photo:Photovoltaic, designed for use solely with lightemitting diode (LED) light sources" +94054200,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:oth:Other, designed for use solely with light-emitting diode (LED) light sources" +94054900,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Oth:Other" +94055000,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Non-electrical luminaires and lighting fittings" +94055010,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Hurricane lanterns(OLD tariff)" +94055020,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Miner's safety lamps(OLD tariff)" +94055031,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Kerosene pressure lanterns(OLD tariff)" +94055039,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Other(OLD tariff)" +94055040,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Solar lanterns or lamps(OLD tariff)" +94055051,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Metal(OLD tariff)" +94055059,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Non-electrical lamps and lighting fittings :Other(OLD tariff)" +94056010,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Illuminated signs, illuminated name-plates and the like :Of plastic(OLD tariff)" +94056090,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED:Illuminated signs, illuminated name-plates and the like :Of other materials(OLD tariff)" +94056100,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Designed for use solely with light-emitting diode (LED) light sources" +94056900,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +94059100,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Of glass" +94059200,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Of plastics" +94059900,"LAMPS AND LIGHTING FITTINGS INCLUDING SEARCHLIGHTS AND SPOTLIGHTS AND PARTS THEREOF, NOT ELSEWHERE SPECIFIED OR INCLUDED; ILLUMINATED SIGNS, ILLUMINATED NAME-PLATES AND THE LIKE, HAVING A PERMANENTLY FIXED LIGHT SOURCE, AND PARTS THEREOF NOT ELSEWHERE SPECIFIED OR INCLUDED::Other" +94060011,PREFABRICATED BUILDINGS:Prefabricated buildings :Green house - in ready to assemble sets(OLD tariff) +94060019,PREFABRICATED BUILDINGS:Prefabricated buildings :Other(OLD tariff) +94060091,PREFABRICATED BUILDINGS:Prefabricated buildings :Prefabricated housing material(OLD tariff) +94060092,PREFABRICATED BUILDINGS:Prefabricated buildings :Prefabricated construction for cold storage(OLD tariff) +94060093,PREFABRICATED BUILDINGS:Prefabricated buildings :Silos for storing ensilage(OLD tariff) +94060099,PREFABRICATED BUILDINGS:Prefabricated buildings :Other(OLD tariff) +94061010,PREFABRICATED BUILDINGS:Of wood:Green-houses(OLD tariff) +94061020,PREFABRICATED BUILDINGS:Of wood:For cold storage(OLD tariff) +94061030,PREFABRICATED BUILDINGS:Of wood:Silos for storing ensilage(OLD tariff) +94061090,PREFABRICATED BUILDINGS:Of wood:Other(OLD tariff) +94062000,"PREFABRICATED BUILDINGS:mod:Modular building units, of steel(OLD tariff)" +94069010,PREFABRICATED BUILDINGS:Other:Green-houses(OLD tariff) +94069020,PREFABRICATED BUILDINGS:Other:For cold storage(OLD tariff) +94069030,PREFABRICATED BUILDINGS:Other:Silos for storing ensilage(OLD tariff) +94069090,PREFABRICATED BUILDINGS:Other:Other(OLD tariff) +95030000,tricycles scooters pedal cars and similar wheeled toys dolls carriages dolls other toys scale models and similar recreational models working or not puzzles of all kinds +95030010,"TRICYCLES, SCOOTERS, PEDAL CARS AND SIMILAR WHEELED TOYS; DOLLS' CARRIAGES; DOLLS; OTHER TOYS; REDUCED-SIZE (""SCALE"") MODELS AND SIMILAR RECREATIONAL MODELS, WORKING OR NOT; PUZZLES OF ALLKINDS:Tricycles, scooters, pedal cars and similar wheeled toys; doll's carriages; dolls; other toys; reduced-size (scale) models and similar recreational models, working or not; puzzles of all kinds:Of wood" +95030020,"TRICYCLES, SCOOTERS, PEDAL CARS AND SIMILAR WHEELED TOYS; DOLLS' CARRIAGES; DOLLS; OTHER TOYS; REDUCED-SIZE (""SCALE"") MODELS AND SIMILAR RECREATIONAL MODELS, WORKING OR NOT; PUZZLES OF ALLKINDS:Tricycles, scooters, pedal cars and similar wheeled toys; doll's carriages; dolls; other toys; reduced-size (scale) models and similar recreational models, working or not; puzzles of all kinds:Of metal" +95030030,"TRICYCLES, SCOOTERS, PEDAL CARS AND SIMILAR WHEELED TOYS; DOLLS' CARRIAGES; DOLLS; OTHER TOYS; REDUCED-SIZE (""SCALE"") MODELS AND SIMILAR RECREATIONAL MODELS, WORKING OR NOT; PUZZLES OF ALLKINDS:Tricycles, scooters, pedal cars and similar wheeled toys; doll's carriages; dolls; other toys; reduced-size (scale) models and similar recreational models, working or not; puzzles of all kinds:Of plastics(OLD tariff)" +95030090,"TRICYCLES, SCOOTERS, PEDAL CARS AND SIMILAR WHEELED TOYS; DOLLS' CARRIAGES; DOLLS; OTHER TOYS; REDUCED-SIZE (""SCALE"") MODELS AND SIMILAR RECREATIONAL MODELS, WORKING OR NOT; PUZZLES OF ALLKINDS:Tricycles, scooters, pedal cars and similar wheeled toys; doll's carriages; dolls; other toys; reduced-size (scale) models and similar recreational models, working or not; puzzles of all kinds:Other(OLD tariff)" +95030091,tricycles scooters pedal cars and similar wheeled toys dolls carriages dolls other toys scale models and similar recreational models working or not puzzles of all kinds >> of electronic toys +95030099,tricycles scooters pedal cars and similar wheeled toys dolls carriages dolls other toys scale models and similar recreational models working or not puzzles of all kinds >> other video game consoles and machines table or +95040000,parlour games including pintables billiards special tables for casino games and automatic bowling equipment amusement machines operated by coins banknotes bank cards tokens or by any other means of payment +95041000,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT::Video games of a kind used with a television receiver(OLD tariff)" +95042000,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT::Articles and accessories for billiards of all kinds" +95043000,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT:Other games, operated by coins, bank notes, bank cards, tokens or by other means of payment, other than bowling alley equipment:Other games, operated by coins, banknotes, bank cards, tokens or by any other means of payment, other than automatic bowling alley equipment" +95044000,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT::Playing cards" +95045000,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT:Video game:Video game consoles and machines, other than those of sub-heading 950430" +95049000,parlour games including pintables billiards special tables for casino games and automatic bowling equipment amusement machines operated by coins banknotes bank cards tokens or by any other means of payment >> other +95049010,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT:Other :Chess set, all types" +95049020,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT:Other :Carrom Board, with or without coins and strikers" +95049090,"ARTICLES FOR FUNFAIR, TABLE OR PARLOUR GAMES, INCLUDING PINTABLES, BILLIARDS, SPECIAL TABLES FOR CASINO GAMES AND AUTOMATIC BOWLING ALLEY EQUIPMENT:Other :Other" +95050000,festive carnival or other entertainment articles including conjuring tricks and novelty jokes +95051000,"FESTIVE, CARNIVAL OR OTHER ENTERTAINMENT ARTICLES, INCLUDING CONJURING TRICKS AND NOVELTY JOKES::Articles for Christmas festivities" +95059000,festive carnival or other entertainment articles including conjuring tricks and novelty jokes >> other +95059010,"FESTIVE, CARNIVAL OR OTHER ENTERTAINMENT ARTICLES, INCLUDING CONJURING TRICKS AND NOVELTY JOKES:Other :Magical equipments" +95059090,"FESTIVE, CARNIVAL OR OTHER ENTERTAINMENT ARTICLES, INCLUDING CONJURING TRICKS AND NOVELTY JOKES:Other :Other" +95060000,articles and equipment for general physical exercise gymnastics athletics other sports including or games not specified or included elsewhere in this chapter swimming pools and paddling pools and other equipment +95061100,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Skis" +95061200,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Ski-fastenings (ski-bindings)" +95061900,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Other" +95062100,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Sailboards" +95062900,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Other" +95063100,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Clubs, complete" +95063200,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Balls" +95063900,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Other" +95064000,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Articles and equipment for table-tennis" +95065100,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Lawn-tennis rackets, whether or not strung" +95065900,articles and equipment for general physical exercise gymnastics athletics other sports including or games not specified or included elsewhere in this chapter swimming pools and paddling pools and other equipment >> other +95065910,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Squash or racketball badminton rackets, whether or not strung" +95065990,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Other" +95066100,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Lawn-tennis balls" +95066200,articles and equipment for general physical exercise gymnastics athletics other sports including or games not specified or included elsewhere in this chapter swimming pools and paddling pools and other equipment >> inflatable +95066210,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Inflatable :Football" +95066220,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Inflatable :Volley ball" +95066230,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Inflatable :Basket ball" +95066290,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Inflatable :Other" +95066900,articles and equipment for general physical exercise gymnastics athletics other sports including or games not specified or included elsewhere in this chapter swimming pools and paddling pools and other equipment >> other +95066910,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Hockey ball" +95066920,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Cricket ball" +95066930,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Golf ball" +95066940,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Rugby ball" +95066990,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Other" +95067000,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS::Ice skates and roller skates, including skating boots with skates attached" +95069100,articles and equipment for general physical exercise gymnastics athletics other sports including or games not specified or included elsewhere in this chapter swimming pools and paddling pools and other equipment >> articles and equipment for general physical exercise gymnastics or athletics +95069110,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Articles and equipment for general physical exercise, gymnastics or athletics :Boxing equipment" +95069190,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Articles and equipment for general physical exercise, gymnastics or athletics :Other" +95069900,articles and equipment for general physical exercise gymnastics athletics other sports including or games not specified or included elsewhere in this chapter swimming pools and paddling pools and other equipment >> other +95069910,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Badminton shuttle cocks" +95069920,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Leg pads and bats for cricket" +95069930,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Shoulder pads for football" +95069940,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Hockey sticks and blades" +95069950,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Polo sticks including blades, shafts and heads" +95069960,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Sports net" +95069970,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Tennis and badminton racket pressures" +95069980,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Shin-guards and elbow or shoulders pads excluding those for football; waist, thigh and hip protective equipment" +95069990,"ARTICLES AND EQUIPMENT FOR GENERAL PHYSICAL EXERCISE, GYMNASTICS, ATHLETICS, OTHER SPORTS (INCLUDING TABLETENNIS) OR OUT-DOOR GAMES, NOT SPECIFIED OR INCLUDED ELSEWHERE IN THIS CHAPTER; SWIMMING POOLS AND PADDLING POOLS:Other :Other" +95070000,fishing rods and other line fishing tackle fish landing nets butterfly nets and similar nets decoy other than those of heading 9208 or 9705 and similar hunting or shooting requisites +95071000,"FISHING RODS, FISH-HOOKS AND OTHER LINE FISHING TACKLE; FISH LANDING NETS, BUTTERFLY NETS AND SIMILAR NETS; DECOY ""BIRDS"" (OTHER THAN THOSE OF HEADING 9208 OR 9705) AND SIMILAR HUNTING OR SHOOTING REQUISITES::Fishing rods" +95072000,"FISHING RODS, FISH-HOOKS AND OTHER LINE FISHING TACKLE; FISH LANDING NETS, BUTTERFLY NETS AND SIMILAR NETS; DECOY ""BIRDS"" (OTHER THAN THOSE OF HEADING 9208 OR 9705) AND SIMILAR HUNTING OR SHOOTING REQUISITES::Fish-hooks, whether or not snelled" +95073000,"FISHING RODS, FISH-HOOKS AND OTHER LINE FISHING TACKLE; FISH LANDING NETS, BUTTERFLY NETS AND SIMILAR NETS; DECOY ""BIRDS"" (OTHER THAN THOSE OF HEADING 9208 OR 9705) AND SIMILAR HUNTING OR SHOOTING REQUISITES::Fishing reels" +95079000,fishing rods and other line fishing tackle fish landing nets butterfly nets and similar nets decoy other than those of heading 9208 or 9705 and similar hunting or shooting requisites >> other +95079010,"FISHING RODS, FISH-HOOKS AND OTHER LINE FISHING TACKLE; FISH LANDING NETS, BUTTERFLY NETS AND SIMILAR NETS; DECOY ""BIRDS"" (OTHER THAN THOSE OF HEADING 9208 OR 9705) AND SIMILAR HUNTING OR SHOOTING REQUISITES:Other :Fish landing and butterfly nets" +95079090,"FISHING RODS, FISH-HOOKS AND OTHER LINE FISHING TACKLE; FISH LANDING NETS, BUTTERFLY NETS AND SIMILAR NETS; DECOY ""BIRDS"" (OTHER THAN THOSE OF HEADING 9208 OR 9705) AND SIMILAR HUNTING OR SHOOTING REQUISITES:Other :Other" +95080000,menageries amusement park rides and water park amusements fairground amusements including shooting galleries travelling theatres +95081000,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Travelling circuses and travelling menageries" +95082100,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES:rol:Roller coasters" +95082200,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Carousels, swings and roundabouts" +95082300,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Dodge?em cars" +95082400,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Motion simulators and moving theatres" +95082500,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Water rides" +95082600,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Water park amusements" +95082900,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Other" +95083000,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Fairground amusements" +95084000,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Travelling theatres" +95089000,"ROUNDABOUTS, SWINGS, SHOOTING GALLERIES AND OTHER FAIRGROUND AMUSEMENTS; TRAVELLING CIRCUSES, TRAVELLING MENAGERIES AND TRAVELLING THEATRES::Other(OLD tariff)" +96010000,worked ivory bone horn antlers coral and other animal carving material and articles of these materials including articles obtained by moulding +96011000,"WORKED IVORY, BONE, TORTOISE-SHELL, HORN, ANTLERS, CORAL, MOTHER-OF-PEARL AND OTHER ANIMAL CARVING MATERIAL, AND ARTICLES OF THESE MATERIALS (INCLUDING ARTICLES OBTAINED BY MOULDING)::Worked ivory and articles of ivory" +96019000,worked ivory bone horn antlers coral and other animal carving material and articles of these materials including articles obtained by moulding >> other +96019010,"WORKED IVORY, BONE, TORTOISE-SHELL, HORN, ANTLERS, CORAL, MOTHER-OF-PEARL AND OTHER ANIMAL CARVING MATERIAL, AND ARTICLES OF THESE MATERIALS (INCLUDING ARTICLES OBTAINED BY MOULDING):Other :Worked tortoise-shell and articles thereof" +96019020,"WORKED IVORY, BONE, TORTOISE-SHELL, HORN, ANTLERS, CORAL, MOTHER-OF-PEARL AND OTHER ANIMAL CARVING MATERIAL, AND ARTICLES OF THESE MATERIALS (INCLUDING ARTICLES OBTAINED BY MOULDING):Other :Worked mother-of-pearl and articles thereof" +96019030,"WORKED IVORY, BONE, TORTOISE-SHELL, HORN, ANTLERS, CORAL, MOTHER-OF-PEARL AND OTHER ANIMAL CARVING MATERIAL, AND ARTICLES OF THESE MATERIALS (INCLUDING ARTICLES OBTAINED BY MOULDING):Other :Worked bone (excluding whale bone) and articles thereof" +96019040,"WORKED IVORY, BONE, TORTOISE-SHELL, HORN, ANTLERS, CORAL, MOTHER-OF-PEARL AND OTHER ANIMAL CARVING MATERIAL, AND ARTICLES OF THESE MATERIALS (INCLUDING ARTICLES OBTAINED BY MOULDING):Other :Worked horn, coral and other animal carving material and articles thereof" +96019090,"WORKED IVORY, BONE, TORTOISE-SHELL, HORN, ANTLERS, CORAL, MOTHER-OF-PEARL AND OTHER ANIMAL CARVING MATERIAL, AND ARTICLES OF THESE MATERIALS (INCLUDING ARTICLES OBTAINED BY MOULDING):Other :Other" +96020000,wo rk ed v eg e tab l e o r m in er a l c arvi n g material and articles of these materials moulded or carved articles of wax of stearin of natural gums or natural resins or of modelling and other moulded or pastes specified or carved articles not elsewhere included worked unhardened gelatin except gelatin of heading 3503 and articles of unhardened gelatin worked vegetable or mineral carving material and articles of these materials moulded or carved articles of wax of stearin of natural gums or natural resins or of modelling pastes and other moulded or carved articles not elsewhere specified or included worked unhardened gelatin except gelatin of heading 3503 and articles of unhardened gelatin +96020010,"WORKED VEGETABLE OR MINERAL CARVING MATERIAL AND ARTICLES OF THESE MATERIALS MOULDED OR CARVED ARTICLES OF WAX, OF STEARIN, OF NATURAL GUMS OR NATURAL RESINS OR OF MODELLING PASTES, AND OTHER MOULDED OR CARVED ARTICLES, NOT ELSEWHERE SPECIFIED OR INCLUDED; WORKED, UNHARDENED GELATIN (EXCEPT GELATIN OF HEADING 3503) AND ARTICLES OF UNHARDENED GELATIN:Worked vegetable or mineral carving material and articles of these materials moulded or carved articles of wax, of stearin, of natural gums or natural resins or of modelling pastes, and other moulded or carved articles, not elsewhere specified or included; worked, unhardened gelatin (except gelatin of heading 3503) and articles of unhardened gelatin:Worked vegetable carving material and articles thereof" +96020020,"WORKED VEGETABLE OR MINERAL CARVING MATERIAL AND ARTICLES OF THESE MATERIALS MOULDED OR CARVED ARTICLES OF WAX, OF STEARIN, OF NATURAL GUMS OR NATURAL RESINS OR OF MODELLING PASTES, AND OTHER MOULDED OR CARVED ARTICLES, NOT ELSEWHERE SPECIFIED OR INCLUDED; WORKED, UNHARDENED GELATIN (EXCEPT GELATIN OF HEADING 3503) AND ARTICLES OF UNHARDENED GELATIN:Worked vegetable or mineral carving material and articles of these materials moulded or carved articles of wax, of stearin, of natural gums or natural resins or of modelling pastes, and other moulded or carved articles, not elsewhere specified or included; worked, unhardened gelatin (except gelatin of heading 3503) and articles of unhardened gelatin:Moulded or carved articles of wax, stearin, natural gums and resins and other moulded or carved articles" +96020030,"WORKED VEGETABLE OR MINERAL CARVING MATERIAL AND ARTICLES OF THESE MATERIALS MOULDED OR CARVED ARTICLES OF WAX, OF STEARIN, OF NATURAL GUMS OR NATURAL RESINS OR OF MODELLING PASTES, AND OTHER MOULDED OR CARVED ARTICLES, NOT ELSEWHERE SPECIFIED OR INCLUDED; WORKED, UNHARDENED GELATIN (EXCEPT GELATIN OF HEADING 3503) AND ARTICLES OF UNHARDENED GELATIN:Worked vegetable or mineral carving material and articles of these materials moulded or carved articles of wax, of stearin, of natural gums or natural resins or of modelling pastes, and other moulded or carved articles, not elsewhere specified or included; worked, unhardened gelatin (except gelatin of heading 3503) and articles of unhardened gelatin:Gelatin capsules, empty" +96020040,"WORKED VEGETABLE OR MINERAL CARVING MATERIAL AND ARTICLES OF THESE MATERIALS MOULDED OR CARVED ARTICLES OF WAX, OF STEARIN, OF NATURAL GUMS OR NATURAL RESINS OR OF MODELLING PASTES, AND OTHER MOULDED OR CARVED ARTICLES, NOT ELSEWHERE SPECIFIED OR INCLUDED; WORKED, UNHARDENED GELATIN (EXCEPT GELATIN OF HEADING 3503) AND ARTICLES OF UNHARDENED GELATIN:Worked vegetable or mineral carving material and articles of these materials moulded or carved articles of wax, of stearin, of natural gums or natural resins or of modelling pastes, and other moulded or carved articles, not elsewhere specified or included; worked, unhardened gelatin (except gelatin of heading 3503) and articles of unhardened gelatin:Other articles of unhardened gelatin" +96020090,"WORKED VEGETABLE OR MINERAL CARVING MATERIAL AND ARTICLES OF THESE MATERIALS MOULDED OR CARVED ARTICLES OF WAX, OF STEARIN, OF NATURAL GUMS OR NATURAL RESINS OR OF MODELLING PASTES, AND OTHER MOULDED OR CARVED ARTICLES, NOT ELSEWHERE SPECIFIED OR INCLUDED; WORKED, UNHARDENED GELATIN (EXCEPT GELATIN OF HEADING 3503) AND ARTICLES OF UNHARDENED GELATIN:Worked vegetable or mineral carving material and articles of these materials moulded or carved articles of wax, of stearin, of natural gums or natural resins or of modelling pastes, and other moulded or carved articles, not elsewhere specified or included; worked, unhardened gelatin (except gelatin of heading 3503) and articles of unhardened gelatin:Other(OLD tariff)" +96030000,brooms brushes including brushes constituting parts of machines appliances or vehicles hand operated mechanical floor sweepers not motorised mops and feather dusters prepared knots and tufts for broom or brush making paint pads and rollers squeegees other than roller squeegees +96031000,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES)::Brooms and brushes, consisting of twigs or other vegetable materials, bound together, with or without handles" +96032100,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES)::Tooth brushes including dental-plate brushes" +96032900,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES)::Other" +96033000,brooms brushes including brushes constituting parts of machines appliances or vehicles hand operated mechanical floor sweepers not motorised mops and feather dusters prepared knots and tufts for broom or brush making paint pads and rollers squeegees other than roller squeegees >> artists brushes writing brushes and similar brushes for the application of cosmetics +96033010,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES):Artists' brushes, writing brushes and similar brushes for the application of cosmetics :Artist brushes" +96033020,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES):Artists' brushes, writing brushes and similar brushes for the application of cosmetics :Brushes for the application of cosmetics" +96033090,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES):Artists' brushes, writing brushes and similar brushes for the application of cosmetics :Other" +96034000,brooms brushes including brushes constituting parts of machines appliances or vehicles hand operated mechanical floor sweepers not motorised mops and feather dusters prepared knots and tufts for broom or brush making paint pads and rollers squeegees other than roller squeegees >> paint distemper varnish or similar brushes other than brushes of 9603 30 paint pads and rollers +96034010,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES):Paint, distemper, varnish or similar brushes (other than brushes of sub-heading 9603 30); paint pads and rollers :Paint, distemper, varnish or similar brushes (other than brushes of sub-heading 9603 30);" +96034020,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES):Paint, distemper, varnish or similar brushes (other than brushes of sub-heading 9603 30); paint pads and rollers :Paint pads and rollers" +96035000,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES)::Other brushes constituting parts of machines, appliances or vehicles" +96039000,"BROOMS, BRUSHES (INCLUDING BRUSHES CONSTITUTING PARTS OF MACHINES, APPLIANCES OR VEHICLES), HAND-OPERATED MECHANICAL FLOOR SWEEPERS, NOT MOTORISED, MOPS AND FEATHER DUSTERS; PREPARED KNOTS AND TUFTS FOR BROOM OR BRUSH MAKING; PAINT PADS AND ROLLERS; SQUEEGEES (OTHER THAN ROLLER SQUEEGEES)::Other" +96040000,::HAND SIEVES AND HAND RIDDLES +96050000,travel sets for personal toilet sewing or shoe or clothes cleaning travel sets for personal toilet sewing or shoe or clothes cleaning +96050010,"TRAVEL SETS FOR PERSONAL TOILET, SEWING OR SHOE OR CLOTHES CLEANING:Travel sets for personal toilet, sewing or shoe or clothes cleaning :For personal toilet" +96050090,"TRAVEL SETS FOR PERSONAL TOILET, SEWING OR SHOE OR CLOTHES CLEANING:Travel sets for personal toilet, sewing or shoe or clothes cleaning :Other(OLD tariff)" +96061010,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS:Press-fasteners, snap-fasteners and press-studs and parts thereof :Press-fasteners, snap-fasteners and press-studs(OLD tariff)" +96061020,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS:Press-fasteners, snap-fasteners and press-studs and parts thereof :Parts(OLD tariff)" +96062100,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS::Of plastics, not covered with textile material(OLD tariff)" +96062200,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS::Of base metals, not covered with textile material(OLD tariff)" +96062910,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS:Other :Button of coconut shell or wood(OLD tariff)" +96062990,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS:Other :Other(OLD tariff)" +96063010,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS:Button moulds and other parts of buttons; button blanks :Button blanks(OLD tariff)" +96063090,"BUTTONS, PRESS-FASTENERS, SNAP-FASTENERS AND PRESS-STUDS, BUTTON MOULDS AND OTHER PARTS OF THESE ARTICLES; BUTTON BLANKS:Button moulds and other parts of buttons; button blanks :Other(OLD tariff)" +96070000,slide fasteners and parts thereof slide fasteners +96071100,slide fasteners and parts thereof slide fasteners >> fitted with chain scoops of base metal +96071110,SLIDE FASTENERS AND PARTS THEREOF:Fitted with chain scoops of base metal :Zip fasteners +96071190,SLIDE FASTENERS AND PARTS THEREOF:Fitted with chain scoops of base metal :Other +96071900,slide fasteners and parts thereof slide fasteners >> other +96071910,SLIDE FASTENERS AND PARTS THEREOF:Other :Zip fasteners +96071990,SLIDE FASTENERS AND PARTS THEREOF:Other :Other +96072000,SLIDE FASTENERS AND PARTS THEREOF::Parts +96081011,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Ball point pens :High value ball point pens (US$100 and above cif per unit)(OLD tariff)" +96081012,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Ball point pens :Ball point pens with body or cap of precious metal or rolled precious metal(OLD tariff)" +96081019,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Ball point pens :Other(OLD tariff)" +96081091,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Ball point pens :High value ball point pens(OLD tariff)" +96081092,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Ball point pens :Ball point pens with body or cap of precious metal or rolled precious metal(OLD tariff)" +96081099,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Ball point pens :Other(OLD tariff)" +96082000,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:felt tipped pen:Felt tipped and other porous-tipped pens and markers(OLD tariff)" +96083011,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:High value fountain pens (US $100 and above cif per unit)(OLD tariff)" +96083012,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:With body or cap of precious metal or rolled precious metal(OLD tariff)" +96083019,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:Other(OLD tariff)" +96083021,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:High value pens(OLD tariff)" +96083022,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:With body or cap of precious metal or rolled precious metal(OLD tariff)" +96083029,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:Other(OLD tariff)" +96083091,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:High value pens (US $100 and above cif per unit)(OLD tariff)" +96083092,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:With body or cap of precious metal or rolled precious metal(OLD tariff)" +96083099,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Fountain pens, stylograph pens and other pens fountain pens:Other(OLD tariff)" +96084000,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609::Propelling or sliding pencils(OLD tariff)" +96085000,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609::Sets of articles from two or more of the foregoing sub-headings(OLD tariff)" +96086010,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Refills for ball point pens, comprising the ball point and ink-reservoir :With liquid ink (for rolling ball-pen)(OLD tariff)" +96086090,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Refills for ball point pens, comprising the ball point and ink-reservoir :Other(OLD tariff)" +96089110,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Pen nibs and nib points :Nib points for pen(OLD tariff)" +96089120,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Pen nibs and nib points :Nibs of wool felt or plastics for use in the manufacture of porous tip pen or markers(OLD tariff)" +96089130,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Pen nibs and nib points :Other pen nibs(OLD tariff)" +96089191,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Pen nibs and nib points :Of metal(OLD tariff)" +96089199,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Pen nibs and nib points :Other(OLD tariff)" +96089910,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Other :Pen holders, pencil holders and similar holders(OLD tariff)" +96089990,"BALL POINT PENS; FELT TIPPED AND OTHER POROUS-TIPPED PENS AND MARKERS; FOUNTAIN PENS; STYLOGRAPH PENS AND OTHER PENS; DUPLICATING STYLOS; PROPELLING OR SLIDING PENCILS; PEN HOLDERS, PENCIL HOLDERS AND SIMILAR HOLDERS; PARTS (INCLUDING CAPS AND CLIPS) OF THE FOREGOING ARTICLES, OTHER THAN THOSE OF HEADING 9609:Other :Other(OLD tariff)" +96090000,pencils other than pencils of heading 9608 crayons pencil leads pastels drawing charcoals writing or drawing chalks and tailors chalks +96091000,"PENCILS (OTHER THAN PENCILS OF HEADING 9608 ), CRAYONS, PENCIL LEADS, PASTELS, DRAWING CHARCOALS, WRITING OR DRAWING CHALKS AND TAILORS' CHALKS::Pencils and crayons, with leads encased in a rigid sheath" +96092000,"PENCILS (OTHER THAN PENCILS OF HEADING 9608 ), CRAYONS, PENCIL LEADS, PASTELS, DRAWING CHARCOALS, WRITING OR DRAWING CHALKS AND TAILORS' CHALKS::Pencil leads, black or coloured" +96099000,pencils other than pencils of heading 9608 crayons pencil leads pastels drawing charcoals writing or drawing chalks and tailors chalks >> other +96099010,"PENCILS (OTHER THAN PENCILS OF HEADING 9608 ), CRAYONS, PENCIL LEADS, PASTELS, DRAWING CHARCOALS, WRITING OR DRAWING CHALKS AND TAILORS' CHALKS:Other :Slate pencils" +96099020,"PENCILS (OTHER THAN PENCILS OF HEADING 9608 ), CRAYONS, PENCIL LEADS, PASTELS, DRAWING CHARCOALS, WRITING OR DRAWING CHALKS AND TAILORS' CHALKS:Other :Other pencils" +96099030,"PENCILS (OTHER THAN PENCILS OF HEADING 9608 ), CRAYONS, PENCIL LEADS, PASTELS, DRAWING CHARCOALS, WRITING OR DRAWING CHALKS AND TAILORS' CHALKS:Other :Pastels, drawing charcoals and writing or drawing chalks and tailors chalks" +96099090,"PENCILS (OTHER THAN PENCILS OF HEADING 9608 ), CRAYONS, PENCIL LEADS, PASTELS, DRAWING CHARCOALS, WRITING OR DRAWING CHALKS AND TAILORS' CHALKS:Other :Other" +96100000,"::SLATES AND BOARDS, WITH WRITING OR DRAWING SURFACES, WHETHER OR NOT FRAMED" +96110000,"::DATE, SEALING OR NUMBERING STAMPS, AND THE LIKE (INCLUDING DEVICES FOR PRINTING OR EMBOSSING LABELS), DESIGNED FOR OPERATING IN THE HAND; HAND-OPERATED COMPOSING STICKS AND HAND PRINTING SETS INCORPORATING SUCH COMPOSING STICKS" +96120000,inked or typewriter or similar ribbons otherwise prepared for giving impressions whether or not on spools or in cartridges ink pads whether or not inked with or without boxes +96121000,inked or typewriter or similar ribbons otherwise prepared for giving impressions whether or not on spools or in cartridges ink pads whether or not inked with or without boxes >> ribbons +96121010,"TYPEWRITER OR SIMILAR RIBBONS, INKED OR OTHERWISE PREPARED FOR GIVING IMPRESSIONS, WHETHER OR NOT ON SPOOLS OR IN CARTRIDGES; INKPADS, WHETHER OR NOT INKED, WITH OR WITHOUT BOXES:Ribbons :Computer printer ribbon" +96121020,"TYPEWRITER OR SIMILAR RIBBONS, INKED OR OTHERWISE PREPARED FOR GIVING IMPRESSIONS, WHETHER OR NOT ON SPOOLS OR IN CARTRIDGES; INKPADS, WHETHER OR NOT INKED, WITH OR WITHOUT BOXES:Ribbons :Ribbon for typewriters, other than electronic and similar machines" +96121030,"TYPEWRITER OR SIMILAR RIBBONS, INKED OR OTHERWISE PREPARED FOR GIVING IMPRESSIONS, WHETHER OR NOT ON SPOOLS OR IN CARTRIDGES; INKPADS, WHETHER OR NOT INKED, WITH OR WITHOUT BOXES:Ribbons :Ribbon for electronic typewriter" +96121090,"TYPEWRITER OR SIMILAR RIBBONS, INKED OR OTHERWISE PREPARED FOR GIVING IMPRESSIONS, WHETHER OR NOT ON SPOOLS OR IN CARTRIDGES; INKPADS, WHETHER OR NOT INKED, WITH OR WITHOUT BOXES:Ribbons :Other" +96122000,"TYPEWRITER OR SIMILAR RIBBONS, INKED OR OTHERWISE PREPARED FOR GIVING IMPRESSIONS, WHETHER OR NOT ON SPOOLS OR IN CARTRIDGES; INKPADS, WHETHER OR NOT INKED, WITH OR WITHOUT BOXES::Ink-pads" +96130000,cigarette lighters and other lighters whether or not mechanical or electrical and parts thereof other than flints and wicks +96131000,"CIGARETTE LIGHTERS AND OTHER LIGHTERS, WHETHER OR NOT MECHANICAL OR ELECTRICAL, AND PARTS THEREOF OTHER THAN FLINTS AND WICKS::Pocket lighters, gas fuelled, non-refillable" +96132000,"CIGARETTE LIGHTERS AND OTHER LIGHTERS, WHETHER OR NOT MECHANICAL OR ELECTRICAL, AND PARTS THEREOF OTHER THAN FLINTS AND WICKS::Pocket lighters, gas fuelled, refillable(OLD tariff)" +96138010,"CIGARETTE LIGHTERS AND OTHER LIGHTERS, WHETHER OR NOT MECHANICAL OR ELECTRICAL, AND PARTS THEREOF OTHER THAN FLINTS AND WICKS:Other lighters :Electronic(OLD tariff)" +96138090,"CIGARETTE LIGHTERS AND OTHER LIGHTERS, WHETHER OR NOT MECHANICAL OR ELECTRICAL, AND PARTS THEREOF OTHER THAN FLINTS AND WICKS:Other lighters :Other(OLD tariff)" +96139000,"CIGARETTE LIGHTERS AND OTHER LIGHTERS, WHETHER OR NOT MECHANICAL OR ELECTRICAL, AND PARTS THEREOF OTHER THAN FLINTS AND WICKS::Parts(OLD tariff)" +96140000,::SMOKING PIPES (INCLUDING PIPE BOWLS) AND CIGAR OR CIGARETTE HOLDERS AND PARTS THEREOF +96150000,combs and the like hairpins curling pins curling grips and the like other than those of heading 8516 and parts thereof combs and the like +96151100,"COMBS, HAIR-SLIDES AND THE LIKE, HAIRPINS, CURLING PINS, CURLING GRIPS, HAIR-CURLERS AND THE LIKE, OTHER THAN THOSE OF HEADING 8516, AND PARTS THEREOF::Of hard rubber or plastics" +96151900,"COMBS, HAIR-SLIDES AND THE LIKE, HAIRPINS, CURLING PINS, CURLING GRIPS, HAIR-CURLERS AND THE LIKE, OTHER THAN THOSE OF HEADING 8516, AND PARTS THEREOF::Other" +96159000,"COMBS, HAIR-SLIDES AND THE LIKE, HAIRPINS, CURLING PINS, CURLING GRIPS, HAIR-CURLERS AND THE LIKE, OTHER THAN THOSE OF HEADING 8516, AND PARTS THEREOF::Other" +96160000,scent sprays and similar toilet sprays and mounts and heads therefor and pads for the application of cosmetics or toilet preparations +96161000,scent sprays and similar toilet sprays and mounts and heads therefor and pads for the application of cosmetics or toilet preparations >> scent sprays and similar toilet sprays and mounts and heads therefor +96161010,"SCENT SPRAYS AND SIMILAR TOILET SPRAYS, AND MOUNTS AND HEADS THEREFOR; POWDER-PUFFS AND PADS FOR THE APPLICATION OF COSMETICS OR TOILET PREPARATIONS:Scent sprays and similar toilet sprays, and mounts and heads therefor :Scent sprays and similar toilet sprays" +96161020,"SCENT SPRAYS AND SIMILAR TOILET SPRAYS, AND MOUNTS AND HEADS THEREFOR; POWDER-PUFFS AND PADS FOR THE APPLICATION OF COSMETICS OR TOILET PREPARATIONS:Scent sprays and similar toilet sprays, and mounts and heads therefor :Mounts and heads" +96162000,"SCENT SPRAYS AND SIMILAR TOILET SPRAYS, AND MOUNTS AND HEADS THEREFOR; POWDER-PUFFS AND PADS FOR THE APPLICATION OF COSMETICS OR TOILET PREPARATIONS::Powder-puffs and pads for the application of cosmetics or toilet preparations" +96170000,complete parts thereof other than glass inners vacuum flasks and other vacuum vessels complete parts thereof other than glass inners vacuum flasks and other vacuum vessels complete +96170011,"VACUUM FLASKS AND OTHER VACUUM VESSELS, COMPLETE WITH CASES; PARTS THEREOF OTHER THAN GLASS INNERS:Vacuum flasks and other vacuum vessels, complete with cases; parts thereof other than glass inners:Vacuum flasks having a capacity not exceeding 0.75 l" +96170012,"VACUUM FLASKS AND OTHER VACUUM VESSELS, COMPLETE WITH CASES; PARTS THEREOF OTHER THAN GLASS INNERS:Vacuum flasks and other vacuum vessels, complete with cases; parts thereof other than glass inners:Vacuum flasks having a capacity exceeding 0.75 l" +96170013,"VACUUM FLASKS AND OTHER VACUUM VESSELS, COMPLETE WITH CASES; PARTS THEREOF OTHER THAN GLASS INNERS:Vacuum flasks and other vacuum vessels, complete with cases; parts thereof other than glass inners:Casserol and other vacuum containers" +96170019,"VACUUM FLASKS AND OTHER VACUUM VESSELS, COMPLETE WITH CASES; PARTS THEREOF OTHER THAN GLASS INNERS:Vacuum flasks and other vacuum vessels, complete with cases; parts thereof other than glass inners:Other" +96170090,"VACUUM FLASKS AND OTHER VACUUM VESSELS, COMPLETE WITH CASES; PARTS THEREOF OTHER THAN GLASS INNERS:Vacuum flasks and other vacuum vessels, complete with cases; parts thereof other than glass inners:Parts (other than glass inners)" +96180000,"::TAILORS' DUMMIES AND OTHER LAY FIGURES; AUTOMATA AND OTHER ANIMATED DISPLAYS, USED FOR SHOP WINDOW DRESSING" +96190000,diapers napkin liners and similar articles of any material sanitary towels pads and tampons napkins diapers napkin liners and similar articles of any material +96190010,"Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Sanitary towels (pads) or sanitary napkins" +96190020,"Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Tampons(OLD tariff)" +96190030,"Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Napkins and napkin liners for babies(OLD tariff)" +96190040,"Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Clinical diapers(OLD tariff)" +96190090,"Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Sanitary towels (Pads) and tampons, napkins and napkin liners for babies and similar articles, of any material:Other(OLD tariff)" +96200000,"::MONOPODS, BIPODS, TRIPODS AND SIMILAR ARTICLES(OLD tariff)" +97011010,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Paintings, drawings and pastels :Madhubani paintings (on textiles)(OLD tariff)" +97011020,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Paintings, drawings and pastels :Kalamkari paintings (on textiles)(OLD tariff)" +97011030,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Paintings, drawings and pastels :Rajasthani paintings (on textiles)(OLD tariff)" +97011090,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Paintings, drawings and pastels :Other(OLD tariff)" +97012100,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES::Paintings, drawings and pastels(OLD tariff)" +97012200,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES::Mosaics(OLD tariff)" +97012900,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES::Other(OLD tariff)" +97019091,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Other :Domestic articles of wood (hand decorated )(OLD tariff)" +97019092,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Other :Restaurant decoration of plastics(OLD tariff)" +97019099,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES:Other :Other(OLD tariff)" +97019100,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES::Paintings, drawings and pastels(OLD tariff)" +97019200,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES::Mosaics(OLD tariff)" +97019900,"PAINTINGS, DRAWINGS AND PASTELS, EXECUTED ENTIRELY BY HAND, OTHER THAN DRAWINGS OF HEADING 4906 AND OTHER THAN HAND-PAINTED OR HAND-DECORATED MANUFACTURED ARTICLES; COLLAGES AND SIMILAR DECORATIVE PLAQUES::Other(OLD tariff)" +97020000,"::ORIGINAL ENGRAVINGS, PRINTS AND LITHOGRAPHS" +97021000,"ORIGINAL ENGRAVINGS, PRINTS AND LITHOGRAPHS::Of an age exceeding 100 years" +97029000,"ORIGINAL ENGRAVINGS, PRINTS AND LITHOGRAPHS::Other" +97030000,original sculptures and statuary in any material +97030010,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Original sculptures and statuary, in any material:Original sculptures and statuary, in metal(OLD tariff)" +97030020,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Original sculptures and statuary, in any material:Original sculptures and statuary, in stone(OLD tariff)" +97030090,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Original sculptures and statuary, in any material:Original sculptures and statuary, in other materials(OLD tariff)" +97031000,original sculptures and statuary in any material >> of an age exceeding 100 years +97031010,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Of an age exceeding 100 years ::Of metal" +97031020,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Of an age exceeding 100 years ::Of stone" +97031090,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Of an age exceeding 100 years ::Other" +97039000,original sculptures and statuary in any material >> other +97039010,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Other:Of metal" +97039020,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Other:Of stone" +97039090,"ORIGINAL SCULPTURES AND STATUARY, IN ANY MATERIAL:Other:Other" +97040000,postage or revenue stamps marks covers postal stationery stamped paper and the like used or unused other than those of heading 4907 postage or revenue stamps marks covers postal stationery stamped paper and the like used or unused other than those of heading 4907 +97040010,"POSTAGE OR REVENUE STAMPS, STAMP-POST MARKS, FIRST-DAY COVERS, POSTAL STATIONERY (STAMPED PAPER), AND THE LIKE, USED OR UNUSED , OTHER THAN THOSE OF HEADING 4907:Postage or revenue stamps, stamp-post marks, first-day covers, postal stationery (stamped paper), and the like, used or unused, other than those of heading 4907 :Used postal stamp" +97040020,"POSTAGE OR REVENUE STAMPS, STAMP-POST MARKS, FIRST-DAY COVERS, POSTAL STATIONERY (STAMPED PAPER), AND THE LIKE, USED OR UNUSED , OTHER THAN THOSE OF HEADING 4907:Postage or revenue stamps, stamp-post marks, first-day covers, postal stationery (stamped paper), and the like, used or unused, other than those of heading 4907 :Used or unused first-day covers for philatelists" +97040090,"POSTAGE OR REVENUE STAMPS, STAMP-POST MARKS, FIRST-DAY COVERS, POSTAL STATIONERY (STAMPED PAPER), AND THE LIKE, USED OR UNUSED , OTHER THAN THOSE OF HEADING 4907:Postage or revenue stamps, stamp-post marks, first-day covers, postal stationery (stamped paper), and the like, used or unused, other than those of heading 4907 :Other" +97050000,archaeological ethnographic historical zoological botanical mineralogical anatomical paleontological or numismatic interest +97050010,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST:Collections and collectors' pieces of zoological, botanical, mineralogical, anatomical, historical, archaeological, palaeontological, ethnographic or numismatic interest:Stuffed animals and birds (taxidermy)(OLD tariff)" +97050090,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST:Collections and collectors' pieces of zoological, botanical, mineralogical, anatomical, historical, archaeological, palaeontological, ethnographic or numismatic interest:Other(OLD tariff)" +97051000,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST::Collections and collectors' pieces of archaeological, ethnographic or historical interest" +97052100,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST::Human specimens and parts thereof" +97052200,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST::Extinct or endangered species and parts thereof" +97052900,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST::Other" +97053100,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST::Of an age exceeding 100 years" +97053900,"COLLECTIONS AND COLLECTORS' PIECES OF ZOOLOGICAL, BOTANICAL, MINERALOGICAL, ANATOMICAL, HISTORICAL, ARCHAEOLOGICAL, PALAEONTOLOGICAL, ETHNOGRAPHIC OR NUMISMATIC INTEREST::Other" +97060000,::ANTIQUES OF AN AGE EXCEEDING ONE HUNDRED YEARS(OLD tariff) +97061000,ANTIQUES OF AN AGE EXCEEDING 100 YEARS::Of an age exceeding 250 years(OLD tariff) +97069000,ANTIQUES OF AN AGE EXCEEDING 100 YEARS::Other(OLD tariff) +98010000,all items of machinery including prime movers instruments apparatus and appliances control gear and transmission equipment auxiliary equipment including those required for research and development purposes testing and quality control as well as all components whether finished or not or raw materials for the manufacture of the aforesaid items and their components required for the initial setting up of a unit or the substantial expansion of an existing unit of a specified 1 industrial plant 2 irrigation project 3 power project other than solar power plant or solar power project 4 mining project 5 project for the exploration for oil or other minerals and 6 such other projects other than solar power plant or solar power project as the central government may having regard to the economic development of the country notify in the official gazette in this behalf and spare parts other raw materials including semi finished materials of consumable stores not exceeding 10 of the value of the goods specified above provided that such spare parts raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in 1 to 6 above machinery +98010011,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::For industrial plant project" +98010012,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::For irrigation plant" +98010013,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::For power project" +98010014,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::For mining project" +98010015,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::Project for exploration of oil or other minerals" +98010019,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::For other projects" +98010020,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::Components (whether or not finished or not) or raw materials for the manufacture of aforesaid items required for the initial setting up of a unit or the substantial expansion of a unit" +98010030,"ALL ITEMS OF MACHINERY INCLUDING PRIME MOVERS, INSTRUMENTS, APPARATUS AND APPLIANCES, CONTROL GEAR AND TRANSMISSION EQUIPMENT, AUXILIARY EQUIPMENT (INCLUDING THOSE REQUIRED FOR RESEARCH AND DEVELOPMENT PURPOSES, TESTING AND QUALITY CONTROL), AS WELL AS ALL COMPONENTS (WHETHER FINISHED OR NOT) OR RAW MATERIALS FOR THE MANUFACTURE OF THE AFORESAID ITEMS AND THEIR COMPONENTS, REQUIRED FOR THE INITIAL SETTING UP OF A UNIT, OR THE SUBSTANTIAL EXPANSION OF AN EXISTING UNIT, OF A SPECIFIED (1) INDUSTRIAL PLANT, (2) IRRIGATION PROJECT, (3) POWER PROJECT, (4) MINING PROJECT, (5) PROJECT FOR THE EXPLORATION FOR OIL OR OTHER MINERALS, AND (6) SUCH OTHER PROJECTS AS THE CENTRAL GOVERNMENT may, having regard to the economic development of the country notify in the Official Gazette in this behalf; and spare parts, other raw materials (including semi-finished material) or consumable stores not exceeding 10% of the value of the goods specified above provided that such spare parts, raw materials or consumable stores are essential for the maintenance of the plant or project mentioned in (1) to 6 above ::Spare parts and other raw materials (including semi-finished materials or consumable stores for the maintenance of plant or project" +98020000,::LABORATORY CHEMICALS +98030000,"::ALL DUTIABLE ARTICLES, IMPORTED BY A PASSENGER OR A MEMBER OF A CREW IN HIS BAGGAGE" +98040000,all dutiable goods imported for personal use +98041000,"ALL DUTIABLE ARTICLES, INTENDED FOR PERSONAL USE, IMPORTED BY POST OR AIR, AND EXEMPTED FROM ANY PROHIBITION IN RESPECT OF THE IMPORTS THEREOF UNDER THE FOREIGN TRADE (DEVELOPMENT AND REGULATION) ACT, 1992 (22 OF 1992) BUT EXCLUDING ARTICLES FALLING UNDER::Drugs and medicines" +98049000,"ALL DUTIABLE ARTICLES, INTENDED FOR PERSONAL USE, IMPORTED BY POST OR AIR, AND EXEMPTED FROM ANY PROHIBITION IN RESPECT OF THE IMPORTS THEREOF UNDER THE FOREIGN TRADE (DEVELOPMENT AND REGULATION) ACT, 1992 (22 OF 1992) BUT EXCLUDING ARTICLES FALLING UNDER::Other" +98050000,the following articles of stores on board of a vessel or aircraft on which duty is leviable under the customs act 1962 52 of 1962 namely +98051000,"THE FOLLOWING ARTICLES OF STORES ON BOARD OF A VESSEL OR AIRCRAFT ON WHICH DUTY IS LEVIABLE UNDER THE CUSTOMS ACT, 1962 (52 OF 1962), NAMELY::Prepared or preserved meat, fish and vegetables; dairy products; soup; lard; fresh fruits" +98059000,"THE FOLLOWING ARTICLES OF STORES ON BOARD OF A VESSEL OR AIRCRAFT ON WHICH DUTY IS LEVIABLE UNDER THE CUSTOMS ACT, 1962 (52 OF 1962), NAMELY::All other consumable stores excluding fuel, lubricating oil, alcoholic drinks and tobacco products" +98060000,All goods:all goods:All goods originating in or exported from the Islamic republic of Pakistan(OLD tariff) diff --git a/CTH_WISE_DUTY_RATE.csv b/CTH_WISE_DUTY_RATE.csv new file mode 100644 index 0000000000000000000000000000000000000000..00a97d48eff65fb24a917768db77e305af8b99dd --- /dev/null +++ b/CTH_WISE_DUTY_RATE.csv @@ -0,0 +1,7936 @@ +CTH,DUTY_RATE +01012100,0 +01012990,30 +01061900,30 +01063900,5 +01064910,30 +01064990,30 +01069000,30 +02023000,30 +02031900,30 +02032100,30 +02032200,30 +02032900,30 +02044200,30 +02044300,30 +02062900,30 +02071200,30 +02072500,30 +02072700,30 +02074200,0 +02074500,0 +03011100,30 +03011900,30 +03019900,30 +03021400,30 +03027200,30 +03027300,30 +03027400,30 +03027900,30 +03028910,5 +03028930,30 +03028990,30 +03031200,30 +03034300,0 +03034400,30 +03034500,30 +03034900,0 +03036300,30 +03036900,30 +03038300,30 +03038400,30 +03038910,0 +03038930,30 +03038950,30 +03038980,30 +03043300,30 +03044100,30 +03044940,30 +03046200,30 +03047100,30 +03047900,30 +03048100,30 +03048300,30 +03048700,30 +03049300,30 +03049400,30 +03049900,30 +03054100,30 +03055990,30 +03061400,30 +03061719,30 +03061720,30 +03061740,30 +03061790,30 +03061900,30 +03063620,10 +03063640,10 +03063660,5 +03072200,30 +03074320,15 +03089000,30 +04015000,30 +04021010,60 +04022100,60 +04041020,40 +04041090,40 +04051000,40 +04052000,40 +04059020,0 +04061000,30 +04063000,30 +04064000,30 +04069000,40 +04090000,60 +04109090,30 +05021010,30 +05051090,30 +05069099,30 +05079090,30 +05080010,0 +05080020,30 +05080030,30 +05080050,30 +05080090,30 +05119140,5 +05119190,30 +06011000,5 +06012010,10 +06021000,0 +06022020,5 +06022090,10 +06024000,5 +06029010,10 +06029020,5 +06029030,10 +06029090,10 +06031900,60 +06039000,60 +06042000,30 +06049000,30 +07019000,0 +07032000,100 +07061000,30 +07092000,30 +07095900,30 +07099390,30 +07099990,30 +07101000,0 +07102100,0 +07104000,5 +07108090,30 +07115900,30 +07119020,0 +07119090,30 +07122000,30 +07123100,30 +07123900,0 +07129050,30 +07129090,30 +07132020,10 +07133110,30 +07133300,5 +07133500,0 +07133990,0 +07134000,0 +07135000,0 +07136000,30 +08011100,70 +08013100,2.5 +08013220,0 +08013290,30 +08021100,0 +08021200,0 +08022200,30 +08023100,120 +08023200,100 +08025100,10 +08025200,10 +08026200,30 +08027000,30 +08028010,100 +08028020,100 +08028090,100 +08029100,100 +08029200,100 +08029900,100 +08041010,30 +08041020,20 +08041030,20 +08041090,30 +08042090,30 +08044000,30 +08045010,30 +08045029,30 +08045090,30 +08051000,30 +08052100,30 +08054000,25 +08055000,30 +08061000,30 +08062010,105 +08062090,105 +08071910,30 +08071990,30 +08081000,75 +08083000,30 +08091000,30 +08092900,30 +08093000,30 +08094000,25 +08101000,30 +08102000,30 +08104000,30 +08105000,30 +08107000,30 +08109010,30 +08109020,30 +08109060,30 +08109090,30 +08111090,30 +08112020,30 +08112090,30 +08119090,30 +08131000,30 +08132000,25 +08133000,30 +08135020,30 +09011129,100 +09011139,100 +09011145,100 +09011149,100 +09011190,100 +09012190,100 +09021010,100 +09021020,100 +09021030,0 +09021090,100 +09022020,100 +09023010,100 +09024020,100 +09024030,100 +09024040,100 +09024060,100 +09024090,100 +09041110,30 +09041120,70 +09041130,70 +09041140,0 +09041190,70 +09041200,70 +09042110,70 +09042120,70 +09042211,70 +09042219,70 +09051000,30 +09061110,30 +09061190,30 +09061910,30 +09062000,30 +09071010,35 +09071020,35 +09071030,35 +09071090,35 +09081110,30 +09081120,30 +09081200,30 +09082100,30 +09082200,5 +09083110,0 +09083120,70 +09083190,70 +09092110,5 +09092190,30 +09092200,30 +09093129,30 +09096119,0 +09096139,30 +09096149,30 +09101110,0 +09101120,30 +09101190,30 +09101210,30 +09102010,30 +09102090,30 +09103020,30 +09103030,30 +09109100,30 +09109919,0 +09109929,30 +09109990,30 +10039000,0 +10049000,0 +10051000,50 +10059030,50 +10061010,80 +10063010,70 +10063020,70 +10063090,70 +10071000,5 +10082920,50 +11010000,30 +11022000,30 +11029090,30 +11041200,30 +11042200,15 +11043000,30 +11052000,30 +11063010,30 +11063090,30 +11071000,30 +11072000,30 +11081100,30 +11081200,30 +11081300,30 +11081400,50 +11081990,50 +11082000,30 +11090000,30 +12011000,5 +12019000,45 +12024210,30 +12030000,70 +12060010,30 +12060090,30 +12071010,5 +12074090,30 +12075090,30 +12077010,5 +12077090,30 +12079100,20 +12079930,30 +12079990,30 +12091000,15 +12092500,15 +12093000,15 +12099110,5 +12099120,5 +12099130,5 +12099140,5 +12099150,5 +12099160,5 +12099190,5 +12099910,5 +12099990,5 +12102000,30 +12112000,30 +12119011,30 +12119019,15 +12119021,15 +12119026,15 +12119029,30 +12119032,30 +12119039,30 +12119042,15 +12119045,0 +12119049,30 +12119050,15 +12119070,0 +12119080,30 +12119091,0 +12119092,30 +12119094,30 +12119096,30 +12119099,30 +12122110,30 +12122190,30 +12122910,0 +12129990,30 +12130000,30 +13012000,30 +13019011,30 +13019013,5 +13019016,30 +13019018,0 +13019019,30 +13019021,30 +13019022,30 +13019029,0 +13019033,30 +13019039,30 +13019043,30 +13019045,30 +13019049,30 +13019099,30 +13021200,30 +13021300,30 +13021911,30 +13021914,15 +13021918,30 +13021919,30 +13021920,15 +13021990,30 +13022000,15 +13023100,30 +13023230,30 +13023240,30 +13023290,30 +13023900,30 +14011000,25 +14012000,30 +14049029,30 +14049030,0 +14049050,0 +14049070,0 +14049090,30 +15042020,30 +15050010,15 +15050020,15 +15050090,15 +15071000,0 +15079010,32.5 +15079090,45 +15089091,45 +15089099,45 +15092000,35 +15093000,40 +15099010,40 +15099090,40 +15109010,45 +15109090,45 +15111000,0 +15119010,32.5 +15119020,32.5 +15119030,12.5 +15119090,12.5 +15121110,0 +15121120,35 +15121910,32.5 +15121990,100 +15122910,35 +15122990,100 +15131100,100 +15131900,45 +15132110,35 +15132910,35 +15132990,45 +15141120,35 +15141920,45 +15141990,45 +15151990,100 +15152910,45 +15152990,100 +15153090,100 +15155010,100 +15155091,45 +15155099,100 +15156000,45 +15159040,100 +15159091,45 +15159099,100 +15162019,80 +15162031,45 +15162039,80 +15162091,80 +15162099,80 +15171021,45 +15179090,100 +15180019,30 +15180029,30 +15180031,45 +15180039,100 +15200000,30 +15211019,30 +15211090,30 +15219010,30 +15219090,30 +16010000,100 +16023900,30 +16024100,30 +16024200,30 +16024900,30 +16030090,30 +16041100,30 +16041310,30 +16041410,30 +16041500,30 +16041700,30 +16042000,30 +16043200,30 +16051000,30 +16055500,30 +16056900,30 +17011490,100 +17019910,100 +17019990,100 +17021110,25 +17021190,25 +17021910,25 +17021990,25 +17022090,30 +17023010,30 +17023020,30 +17023031,30 +17023039,30 +17025000,30 +17026090,30 +17029010,0 +17029020,30 +17029030,30 +17029040,30 +17029090,30 +17031000,0 +17039090,30 +17041000,45 +17049010,30 +17049020,30 +17049030,30 +17049090,30 +18010000,15 +18031000,30 +18032000,30 +18040000,30 +18050000,30 +18061000,0 +18062000,30 +18063100,30 +18063200,30 +18069010,30 +18069020,30 +18069030,30 +18069040,30 +18069090,30 +19011090,0 +19012000,30 +19019010,30 +19019090,30 +19021900,30 +19022010,0 +19022090,30 +19023010,30 +19023090,30 +19024090,30 +19030000,50 +19041010,30 +19041090,30 +19042000,30 +19049000,30 +19052000,30 +19053100,30 +19053211,30 +19053219,30 +19053290,30 +19054000,30 +19059010,30 +19059020,30 +19059030,30 +19059040,30 +19059090,30 +20011000,30 +20019000,30 +20021000,30 +20029000,30 +20031000,30 +20039010,30 +20039090,30 +20049000,30 +20052000,30 +20054000,30 +20055900,30 +20056000,30 +20057000,30 +20058000,30 +20059900,30 +20060000,30 +20079100,0 +20079910,30 +20079920,0 +20079930,30 +20079940,30 +20079990,30 +20081100,30 +20081910,45 +20081920,30 +20081930,30 +20081940,30 +20081990,30 +20082000,30 +20083090,30 +20085000,30 +20086000,30 +20087000,30 +20088000,30 +20089100,30 +20089300,10 +20089911,0 +20089913,0 +20089914,0 +20089919,30 +20089991,30 +20089993,30 +20089999,30 +20091100,35 +20091200,35 +20091900,35 +20093100,50 +20093900,50 +20095000,50 +20096900,50 +20097100,50 +20097900,50 +20098100,50 +20098990,50 +20099000,50 +21011110,30 +21011120,30 +21011190,30 +21011200,30 +21012010,30 +21012090,30 +21021010,30 +21021020,30 +21021090,30 +21022000,30 +21023000,30 +21031000,30 +21032000,30 +21033000,30 +21039010,30 +21039020,30 +21039030,30 +21039040,30 +21039090,30 +21041090,30 +21042000,30 +21050000,30 +21061000,40 +21069011,50 +21069019,150 +21069030,50 +21069040,50 +21069050,150 +21069060,150 +21069091,0 +21069092,150 +21069099,150 +22011010,30 +22011020,30 +22019090,30 +22021010,30 +22021090,30 +22029100,30 +22029910,30 +22029920,30 +22029930,30 +22029990,30 +22030000,100 +22041000,150 +22042110,150 +22042120,150 +22042190,150 +22042210,50 +22042290,150 +22042910,50 +22042920,50 +22042990,150 +22043000,50 +22051000,50 +22060000,50 +22071019,150 +22071090,150 +22072000,5 +22082011,50 +22082019,50 +22082091,50 +22082099,50 +22083011,150 +22083012,50 +22083013,50 +22083019,150 +22083091,50 +22083092,50 +22083093,50 +22083099,150 +22084011,50 +22084012,50 +22084091,50 +22084092,150 +22085011,150 +22085012,50 +22085091,150 +22086000,150 +22087011,150 +22087091,50 +22089011,50 +22089019,50 +22089099,150 +22090010,30 +22090020,30 +22090090,30 +23012011,15 +23012019,15 +23021090,15 +23023000,15 +23024000,15 +23025000,15 +23031000,15 +23033000,15 +23040010,0 +23040030,15 +23040090,15 +23061020,15 +23061030,15 +23063020,15 +23064900,0 +23065010,15 +23065020,15 +23066000,15 +23069012,15 +23069017,15 +23069021,0 +23069022,0 +23069090,15 +23080000,15 +23091000,20 +23099020,15 +23099031,15 +23099039,15 +23099090,15 +24011010,30 +24012010,30 +24012090,30 +24021010,60 +24022090,30 +24031110,30 +24039970,30 +24039990,30 +25010010,5 +25010020,5 +25010090,5 +25020000,5 +25030010,2.5 +25030090,5 +25041010,5 +25041020,5 +25041090,5 +25049090,5 +25051011,5 +25051019,5 +25051020,5 +25061010,5 +25061020,5 +25062010,0 +25062090,5 +25070010,5 +25070022,5 +25070029,5 +25081010,5 +25081090,5 +25083090,5 +25084010,5 +25084090,5 +25085010,5 +25085022,5 +25085023,5 +25085039,5 +25086000,5 +25101010,2.5 +25102010,2.5 +25102030,2.5 +25111020,5 +25111090,5 +25120010,5 +25120030,5 +25120090,5 +25131000,5 +25132090,5 +25151210,40 +25151220,40 +25151290,40 +25152010,10 +25161100,40 +25161200,40 +25162000,10 +25169090,5 +25171020,5 +25171090,5 +25174900,5 +25181000,10 +25182000,10 +25191000,5 +25199010,5 +25199020,5 +25199030,5 +25199040,5 +25199090,5 +25201010,2.5 +25201090,2.5 +25202010,5 +25202090,5 +25210010,5 +25210090,5 +25221000,5 +25222000,5 +25223000,5 +25231000,10 +25232100,10 +25232910,0 +25232930,0 +25232940,0 +25233000,10 +25239020,10 +25239090,10 +25249021,10 +25249099,10 +25251010,5 +25251090,5 +25252050,5 +25252090,5 +25261020,0 +25261090,5 +25262000,5 +25280030,2.5 +25280090,2.5 +25291020,5 +25292100,5 +25292200,5 +25293000,5 +25301010,5 +25301020,5 +25301090,5 +25309030,5 +25309050,5 +25309070,5 +25309099,5 +26011112,2.5 +26011119,2.5 +26011150,2.5 +26011290,2.5 +26020010,2.5 +26020020,2.5 +26020030,2.5 +26020040,2.5 +26020050,2.5 +26020060,2.5 +26020070,2.5 +26020090,2.5 +26030000,2.5 +26060010,2.5 +26060020,2.5 +26060090,2.5 +26070000,2.5 +26080000,2.5 +26090000,2.5 +26100010,2.5 +26100020,2.5 +26100030,2.5 +26100040,2.5 +26100090,2.5 +26131000,2.5 +26140010,2.5 +26140020,2.5 +26140031,2.5 +26140039,2.5 +26140090,2.5 +26151000,2.5 +26159010,2.5 +26169010,0 +26171000,2.5 +26180000,5 +26201910,5 +26201990,5 +26202910,5 +26203010,5 +26203090,5 +26204010,5 +27011100,5 +27011200,1 +27011910,1 +27011920,1 +27011990,1 +27030010,5 +27030090,5 +27040010,5 +27040020,5 +27040030,5 +27040090,5 +27060010,5 +27072000,2.5 +27073000,2.5 +27074000,2.5 +27075000,2.5 +27079900,2.5 +27081010,5 +27081090,5 +27082000,5 +27090010,5 +27101221,2.5 +27101222,2.5 +27101229,2.5 +27101232,5 +27101239,5 +27101241,2.5 +27101249,2.5 +27101250,0 +27101290,5 +27101939,5 +27101941,5 +27101943,5 +27101944,2.5 +27101949,2.5 +27101951,5 +27101952,2.5 +27101953,5 +27101959,5 +27101961,5 +27101969,5 +27101971,5 +27101972,5 +27101973,5 +27101974,5 +27101976,5 +27101978,5 +27101979,5 +27101981,5 +27101983,5 +27101985,5 +27101987,5 +27101988,5 +27101989,5 +27101990,5 +27102020,2.5 +27109900,5 +27111100,2.5 +27111200,2.5 +27111300,2.5 +27111400,5 +27111910,5 +27111990,5 +27112100,5 +27112900,5 +27121090,5 +27122000,5 +27129010,5 +27129030,5 +27129040,5 +27129090,5 +27131110,10 +27131190,10 +27131210,7.5 +27131290,7.5 +27132000,5 +27139000,5 +27149010,5 +27149020,5 +27149030,5 +27149090,5 +27150010,5 +27150090,5 +28011000,5 +28012000,2.5 +28013010,5 +28013020,5 +28020010,5 +28020020,5 +28030010,7.5 +28030020,5 +28030090,5 +28042100,5 +28042910,5 +28042990,5 +28043000,5 +28044090,5 +28045010,5 +28045020,5 +28046100,5 +28046900,5 +28047030,5 +28048000,5 +28049000,5 +28051100,5 +28051200,5 +28051900,5 +28053000,5 +28054000,0 +28061000,7.5 +28070010,7.5 +28080010,7.5 +28080020,7.5 +28091000,7.5 +28092010,20 +28092020,7.5 +28100010,7.5 +28100020,27.5 +28111100,7.5 +28111920,7.5 +28111990,7.5 +28112110,5 +28112190,7.5 +28112200,7.5 +28112920,7.5 +28112930,7.5 +28112940,7.5 +28112990,7.5 +28121920,7.5 +28121990,7.5 +28129000,7.5 +28131000,7.5 +28141000,5 +28142000,5 +28151110,7.5 +28151190,7.5 +28151200,7.5 +28152000,7.5 +28153000,7.5 +28161010,7.5 +28164000,7.5 +28170010,7.5 +28170020,7.5 +28181000,7.5 +28182011,5 +28182019,5 +28182090,7.5 +28183000,7.5 +28191000,7.5 +28199000,7.5 +28201000,7.5 +28209000,7.5 +28211010,7.5 +28211020,7.5 +28212000,7.5 +28220020,7.5 +28230010,10 +28230090,7.5 +28251020,7.5 +28251040,7.5 +28251090,7.5 +28252000,7.5 +28253010,0 +28254000,0 +28255000,7.5 +28256010,7.5 +28256020,7.5 +28257090,7.5 +28258000,7.5 +28259010,7.5 +28259020,7.5 +28259040,7.5 +28259090,7.5 +28261200,7.5 +28261910,7.5 +28261990,7.5 +28263000,7.5 +28269000,7.5 +28271000,7.5 +28272000,7.5 +28273100,7.5 +28273200,7.5 +28273500,0 +28273920,7.5 +28273930,7.5 +28273990,7.5 +28274900,7.5 +28275120,7.5 +28275990,7.5 +28276010,7.5 +28276020,7.5 +28276090,7.5 +28281010,7.5 +28289019,7.5 +28289030,7.5 +28289090,7.5 +28291100,7.5 +28299010,7.5 +28299030,7.5 +28301000,7.5 +28309010,7.5 +28309020,7.5 +28311010,7.5 +28311020,7.5 +28319020,7.5 +28321010,7.5 +28321090,7.5 +28322010,7.5 +28322090,7.5 +28323010,7.5 +28323090,7.5 +28331100,7.5 +28331910,7.5 +28331990,7.5 +28332100,7.5 +28332210,7.5 +28332290,7.5 +28332400,7.5 +28332500,7.5 +28332700,7.5 +28332910,7.5 +28332990,7.5 +28333090,7.5 +28334000,7.5 +28341010,7.5 +28342100,7.5 +28342910,7.5 +28342920,7.5 +28342990,7.5 +28351090,7.5 +28352200,7.5 +28352400,7.5 +28352500,7.5 +28352610,7.5 +28352620,7.5 +28352690,7.5 +28352930,7.5 +28352940,7.5 +28352990,7.5 +28353100,7.5 +28353900,7.5 +28362010,7.5 +28362020,7.5 +28362090,7.5 +28363000,7.5 +28364000,7.5 +28365000,7.5 +28369100,7.5 +28369200,7.5 +28369910,7.5 +28369920,7.5 +28369930,7.5 +28369990,7.5 +28371100,10 +28371910,0 +28371990,7.5 +28372010,7.5 +28372030,7.5 +28372040,7.5 +28372090,7.5 +28391100,7.5 +28391900,7.5 +28399090,7.5 +28401100,7.5 +28401900,7.5 +28402090,7.5 +28403000,7.5 +28413000,7.5 +28415010,7.5 +28415090,7.5 +28416900,7.5 +28417020,7.5 +28417090,7.5 +28419000,7.5 +28421000,7.5 +28429090,7.5 +28431020,0 +28431090,0 +28432100,10 +28432900,10 +28433000,10 +28439012,10 +28439019,10 +28443090,7.5 +28444100,5 +28444300,7.5 +28444400,7.5 +28451000,7.5 +28459090,7.5 +28461010,7.5 +28461090,7.5 +28469010,7.5 +28469090,7.5 +28470000,7.5 +28491000,7.5 +28492010,7.5 +28492090,7.5 +28499010,7.5 +28499020,7.5 +28499090,7.5 +28500010,7.5 +28500020,7.5 +28500030,7.5 +28500041,7.5 +28500049,7.5 +28500050,7.5 +28521000,7.5 +28539010,7.5 +28539020,7.5 +28539090,7.5 +29011000,2.5 +29012100,2.5 +29012200,2.5 +29012300,2.5 +29012400,2.5 +29012910,2.5 +29012920,2.5 +29012930,2.5 +29012940,2.5 +29012990,2.5 +29021100,2.5 +29021990,2.5 +29022000,2.5 +29023000,2.5 +29024100,0 +29024200,2.5 +29024300,0 +29024400,2.5 +29025000,2 +29026000,2.5 +29027000,2.5 +29029030,2.5 +29029040,2.5 +29029090,2.5 +29031110,7.5 +29031200,7.5 +29031300,7.5 +29031400,5 +29031500,0 +29031920,5 +29031990,5 +29032100,2 +29032200,7.5 +29032300,5 +29032900,5 +29034200,7.5 +29034300,7.5 +29034400,7.5 +29034500,7.5 +29034600,7.5 +29034700,7.5 +29034800,7.5 +29034900,7.5 +29035100,7.5 +29035990,7.5 +29036100,7.5 +29036200,7.5 +29036900,7.5 +29037900,7.5 +29038900,7.5 +29039110,7.5 +29039120,7.5 +29039910,7.5 +29039930,7.5 +29039940,7.5 +29039950,7.5 +29039960,7.5 +29039990,7.5 +29041010,5 +29041090,5 +29042050,5 +29042060,5 +29042090,5 +29049910,5 +29049930,5 +29049950,5 +29049990,5 +29051100,2.5 +29051210,7.5 +29051220,7.5 +29051300,7.5 +29051490,7.5 +29051620,7.5 +29051690,7.5 +29051700,7.5 +29051990,7.5 +29052210,7.5 +29052220,7.5 +29052230,7.5 +29052290,7.5 +29052900,7.5 +29053100,5 +29053200,7.5 +29053910,7.5 +29053920,7.5 +29053990,7.5 +29054100,7.5 +29054290,7.5 +29054300,20 +29054400,20 +29054500,7.5 +29054900,7.5 +29055900,7.5 +29061100,7.5 +29061200,7.5 +29061310,7.5 +29061390,7.5 +29061990,7.5 +29062100,7.5 +29062910,7.5 +29062920,7.5 +29062990,7.5 +29071110,7.5 +29071190,7.5 +29071210,7.5 +29071220,7.5 +29071290,7.5 +29071300,0 +29071510,7.5 +29071520,7.5 +29071590,7.5 +29071910,7.5 +29071920,7.5 +29071930,7.5 +29071940,7.5 +29071950,7.5 +29071990,7.5 +29072100,7.5 +29072200,7.5 +29072300,7.5 +29072910,7.5 +29072920,7.5 +29072990,7.5 +29081900,7.5 +29089920,7.5 +29089990,7.5 +29091100,7.5 +29091990,7.5 +29092000,7.5 +29093019,7.5 +29093020,7.5 +29093030,7.5 +29093090,7.5 +29094100,7.5 +29094300,7.5 +29094400,7.5 +29094910,7.5 +29094990,7.5 +29095010,7.5 +29095020,0 +29095090,7.5 +29096000,7.5 +29101000,7.5 +29102000,5 +29103000,7.5 +29109000,7.5 +29110010,7.5 +29110090,7.5 +29121100,7.5 +29121200,7.5 +29121910,7.5 +29121920,7.5 +29121930,7.5 +29121990,7.5 +29122100,7.5 +29122920,7.5 +29122930,7.5 +29122990,7.5 +29124100,7.5 +29124200,7.5 +29124910,7.5 +29124920,7.5 +29124991,7.5 +29124999,7.5 +29126000,7.5 +29130010,7.5 +29130090,7.5 +29141100,7.5 +29141200,7.5 +29141300,7.5 +29141910,7.5 +29141990,7.5 +29142200,7.5 +29142310,7.5 +29142320,7.5 +29142390,7.5 +29142910,7.5 +29142921,7.5 +29142922,7.5 +29142950,7.5 +29142990,7.5 +29143910,7.5 +29143920,7.5 +29143930,7.5 +29143990,7.5 +29144000,7.5 +29145000,7.5 +29146100,7.5 +29146200,7.5 +29146910,7.5 +29146990,7.5 +29147910,7.5 +29147920,7.5 +29147990,7.5 +29151100,7.5 +29151210,7.5 +29151290,7.5 +29151300,7.5 +29152100,5 +29152400,7.5 +29152920,7.5 +29152990,7.5 +29153100,7.5 +29153200,7.5 +29153300,7.5 +29153910,7.5 +29153920,7.5 +29153930,7.5 +29153940,7.5 +29153960,7.5 +29153980,7.5 +29153991,7.5 +29153999,7.5 +29154010,7.5 +29154030,7.5 +29155000,7.5 +29156010,7.5 +29156020,7.5 +29157010,7.5 +29157020,7.5 +29157030,7.5 +29157050,7.5 +29157090,7.5 +29159010,7.5 +29159040,7.5 +29159070,7.5 +29159091,7.5 +29159092,7.5 +29159093,7.5 +29159095,7.5 +29159099,7.5 +29161100,7.5 +29161210,7.5 +29161290,7.5 +29161310,7.5 +29161320,7.5 +29161400,7.5 +29161510,7.5 +29161590,7.5 +29161930,7.5 +29161950,7.5 +29161960,7.5 +29161970,7.5 +29161990,7.5 +29162010,7.5 +29162090,7.5 +29163110,7.5 +29163120,7.5 +29163140,7.5 +29163160,7.5 +29163190,7.5 +29163200,7.5 +29163910,7.5 +29163950,7.5 +29163990,7.5 +29171110,7.5 +29171140,7.5 +29171190,7.5 +29171200,7.5 +29171310,7.5 +29171390,7.5 +29171400,7.5 +29171910,7.5 +29171920,7.5 +29171930,7.5 +29171950,7.5 +29171960,7.5 +29171970,7.5 +29171990,7.5 +29172000,7.5 +29173200,7.5 +29173300,7.5 +29173400,7.5 +29173500,7.5 +29173600,5 +29173700,5 +29173920,7.5 +29173930,7.5 +29173940,7.5 +29173950,7.5 +29173960,7.5 +29173990,7.5 +29181110,7.5 +29181120,7.5 +29181190,7.5 +29181200,7.5 +29181310,7.5 +29181390,7.5 +29181400,7.5 +29181510,7.5 +29181520,7.5 +29181590,7.5 +29181610,7.5 +29181690,7.5 +29181700,7.5 +29181920,7.5 +29181990,7.5 +29182110,7.5 +29182120,7.5 +29182190,7.5 +29182310,7.5 +29182330,7.5 +29182340,7.5 +29182390,7.5 +29182920,7.5 +29182990,7.5 +29183020,7.5 +29183040,7.5 +29183090,7.5 +29189990,7.5 +29191000,7.5 +29199020,7.5 +29199050,7.5 +29199090,7.5 +29201920,7.5 +29201990,7.5 +29202300,7.5 +29202910,7.5 +29202920,7.5 +29202990,7.5 +29209000,7.5 +29211110,7.5 +29211190,7.5 +29211990,7.5 +29212100,7.5 +29212200,7.5 +29212910,7.5 +29212990,7.5 +29213010,7.5 +29213090,7.5 +29214110,7.5 +29214190,7.5 +29214212,7.5 +29214222,7.5 +29214223,7.5 +29214224,7.5 +29214234,7.5 +29214236,7.5 +29214290,7.5 +29214340,7.5 +29214350,7.5 +29214380,7.5 +29214390,7.5 +29214410,7.5 +29214490,7.5 +29214516,7.5 +29214524,7.5 +29214526,7.5 +29214531,7.5 +29214590,7.5 +29214910,7.5 +29214990,7.5 +29215110,7.5 +29215120,7.5 +29215130,7.5 +29215190,7.5 +29215940,7.5 +29215990,7.5 +29221110,7.5 +29221190,7.5 +29221200,7.5 +29221500,7.5 +29221600,7.5 +29221990,7.5 +29222150,7.5 +29222160,7.5 +29222190,7.5 +29222913,7.5 +29222922,7.5 +29222926,7.5 +29222933,7.5 +29222934,7.5 +29222990,7.5 +29223900,7.5 +29224100,7.5 +29224210,7.5 +29224220,7.5 +29224290,7.5 +29224910,7.5 +29224920,7.5 +29224990,7.5 +29225014,7.5 +29225015,7.5 +29225090,7.5 +29231000,7.5 +29232010,7.5 +29232090,7.5 +29233000,7.5 +29239000,7.5 +29241900,7.5 +29242110,7.5 +29242190,7.5 +29242910,7.5 +29242920,7.5 +29242930,7.5 +29242990,7.5 +29251100,7.5 +29251900,7.5 +29252990,7.5 +29261000,2.5 +29262000,7.5 +29264000,7.5 +29269000,7.5 +29270010,7.5 +29270090,7.5 +29280090,7.5 +29291010,7.5 +29291020,7.5 +29291090,7.5 +29299010,7.5 +29299090,7.5 +29302000,7.5 +29303000,7.5 +29304000,7.5 +29309010,7.5 +29309040,7.5 +29309050,7.5 +29309060,7.5 +29309070,7.5 +29309080,7.5 +29309091,7.5 +29309098,7.5 +29309099,7.5 +29311090,7.5 +29312000,7.5 +29314990,7.5 +29315900,7.5 +29319019,7.5 +29319020,7.5 +29319090,7.5 +29321100,7.5 +29321200,7.5 +29321300,7.5 +29321400,7.5 +29321910,7.5 +29321990,7.5 +29322010,7.5 +29322090,7.5 +29329300,7.5 +29329910,7.5 +29329990,7.5 +29331100,7.5 +29331920,7.5 +29331940,7.5 +29331970,7.5 +29331999,7.5 +29332100,7.5 +29332920,7.5 +29332990,7.5 +29333100,7.5 +29333200,7.5 +29333314,7.5 +29333325,7.5 +29333915,7.5 +29333916,7.5 +29333919,7.5 +29333990,7.5 +29334900,7.5 +29335200,7.5 +29335400,7.5 +29335920,10 +29335990,10 +29336100,7.5 +29336910,7.5 +29336930,7.5 +29336940,7.5 +29336950,7.5 +29336990,7.5 +29337100,5 +29337200,7.5 +29337910,7.5 +29337920,7.5 +29337990,7.5 +29339100,7.5 +29339910,7.5 +29339990,7.5 +29341000,7.5 +29342000,7.5 +29343000,7.5 +29349910,7.5 +29349920,7.5 +29349990,7.5 +29359013,7.5 +29359014,7.5 +29359024,7.5 +29359090,7.5 +29362100,7.5 +29362210,7.5 +29362290,7.5 +29362310,7.5 +29362390,7.5 +29362400,7.5 +29362500,7.5 +29362610,7.5 +29362690,7.5 +29362700,7.5 +29362800,7.5 +29362910,7.5 +29362920,7.5 +29362930,7.5 +29362940,7.5 +29362950,7.5 +29362990,7.5 +29369000,7.5 +29371200,7.5 +29371900,7.5 +29372100,7.5 +29372200,7.5 +29372300,7.5 +29372900,7.5 +29375000,7.5 +29379019,7.5 +29379020,7.5 +29379090,7.5 +29381000,7.5 +29389090,7.5 +29391100,7.5 +29391900,7.5 +29392030,7.5 +29392090,7.5 +29393000,7.5 +29394200,7.5 +29395900,7.5 +29396210,7.5 +29396900,7.5 +29397900,7.5 +29398000,7.5 +29400000,7.5 +29411010,7.5 +29411020,7.5 +29411030,7.5 +29411050,7.5 +29411090,7.5 +29412010,7.5 +29412090,7.5 +29413010,7.5 +29413020,7.5 +29413090,7.5 +29414000,7.5 +29415000,7.5 +29419011,7.5 +29419019,7.5 +29419020,7.5 +29419030,7.5 +29419040,7.5 +29419050,7.5 +29419060,7.5 +29419090,7.5 +29420011,7.5 +29420012,7.5 +29420014,7.5 +29420015,7.5 +29420016,7.5 +29420021,7.5 +29420026,7.5 +29420027,7.5 +29420032,7.5 +29420034,7.5 +29420090,7.5 +30012030,10 +30012090,10 +30019010,10 +30019091,10 +30019099,10 +30021220,5 +30021230,0 +30021290,10 +30021300,10 +30021400,10 +30021500,10 +30024112,10 +30024113,10 +30024114,10 +30024118,10 +30024119,10 +30024121,10 +30024123,10 +30024129,10 +30024200,10 +30024910,10 +30024920,10 +30024990,10 +30025900,10 +30029010,10 +30029020,10 +30029090,10 +30032000,10 +30033900,10 +30039011,0 +30039034,10 +30039090,10 +30041010,10 +30041030,10 +30041090,10 +30042019,10 +30042020,10 +30042039,10 +30042049,10 +30042061,10 +30042063,10 +30042095,5 +30042099,10 +30043110,10 +30043190,10 +30043200,10 +30043912,10 +30043919,10 +30043990,10 +30044990,10 +30045010,5 +30045036,10 +30045090,10 +30049011,10 +30049012,10 +30049014,10 +30049015,10 +30049022,10 +30049029,10 +30049032,10 +30049039,10 +30049044,10 +30049046,10 +30049047,10 +30049049,10 +30049056,10 +30049057,10 +30049059,10 +30049061,10 +30049063,10 +30049065,10 +30049069,10 +30049071,5 +30049072,5 +30049073,10 +30049074,10 +30049075,5 +30049079,10 +30049081,10 +30049082,10 +30049085,10 +30049087,10 +30049091,10 +30049092,10 +30049095,10 +30049099,10 +30051020,10 +30051090,10 +30059010,10 +30059030,10 +30059040,10 +30059050,10 +30059060,10 +30059090,10 +30061010,10 +30061020,10 +30063000,10 +30064000,10 +30065000,10 +30066010,0 +30067000,10 +30069100,10 +30069300,10 +31010099,7.5 +31021000,10 +31022100,5 +31022990,7.5 +31023000,2.5 +31025000,0 +31026000,7.5 +31029010,7.5 +31029090,7.5 +31031900,7.5 +31042000,7.5 +31043000,5 +31049000,7.5 +31051000,7.5 +31052000,5 +31053000,5 +31054000,5 +31055100,5 +31055900,5 +31056000,5 +31059010,5 +31059090,5 +32011000,7.5 +32012000,2.5 +32019010,7.5 +32019090,7.5 +32021000,7.5 +32029010,7.5 +32029020,7.5 +32029030,7.5 +32029090,7.5 +32030010,7.5 +32030020,7.5 +32030090,7.5 +32041119,7.5 +32041129,7.5 +32041133,7.5 +32041139,7.5 +32041149,7.5 +32041152,7.5 +32041159,7.5 +32041192,7.5 +32041193,7.5 +32041196,7.5 +32041199,7.5 +32041211,7.5 +32041212,7.5 +32041213,7.5 +32041214,7.5 +32041215,7.5 +32041217,7.5 +32041218,7.5 +32041219,7.5 +32041245,7.5 +32041259,7.5 +32041291,7.5 +32041293,7.5 +32041294,7.5 +32041299,7.5 +32041329,7.5 +32041339,7.5 +32041349,7.5 +32041351,7.5 +32041359,7.5 +32041369,5 +32041399,7.5 +32041419,7.5 +32041429,7.5 +32041439,7.5 +32041460,7.5 +32041483,7.5 +32041485,7.5 +32041490,7.5 +32041539,7.5 +32041551,7.5 +32041552,7.5 +32041555,7.5 +32041559,7.5 +32041569,7.5 +32041589,7.5 +32041599,7.5 +32041610,7.5 +32041620,7.5 +32041630,7.5 +32041650,7.5 +32041670,7.5 +32041680,7.5 +32041690,7.5 +32041711,7.5 +32041719,7.5 +32041720,7.5 +32041739,7.5 +32041740,7.5 +32041751,7.5 +32041759,7.5 +32041761,7.5 +32041769,7.5 +32041770,7.5 +32041780,7.5 +32041790,7.5 +32041800,7.5 +32041911,7.5 +32041916,7.5 +32041921,7.5 +32041929,7.5 +32041935,7.5 +32041951,7.5 +32041952,7.5 +32041953,7.5 +32041954,7.5 +32041955,7.5 +32041956,7.5 +32041958,7.5 +32041959,7.5 +32041961,7.5 +32041963,7.5 +32041964,7.5 +32041965,7.5 +32041966,7.5 +32041967,7.5 +32041969,7.5 +32041971,7.5 +32041972,7.5 +32041973,7.5 +32041974,7.5 +32041975,7.5 +32041976,7.5 +32041978,7.5 +32041979,7.5 +32041983,7.5 +32041990,7.5 +32042010,7.5 +32042090,7.5 +32049000,7.5 +32050000,7.5 +32061110,10 +32061190,10 +32061900,10 +32062000,7.5 +32064100,7.5 +32064200,7.5 +32064910,7.5 +32064940,7.5 +32064990,7.5 +32065000,7.5 +32071010,7.5 +32071020,7.5 +32071030,7.5 +32071040,7.5 +32071090,7.5 +32072010,7.5 +32072020,7.5 +32073000,7.5 +32074000,7.5 +32081010,10 +32081020,10 +32081030,10 +32081090,10 +32082010,10 +32082020,10 +32082030,10 +32082090,10 +32089011,10 +32089019,10 +32089022,10 +32089029,10 +32089030,10 +32089041,10 +32089049,10 +32089050,10 +32089090,10 +32091010,10 +32091090,10 +32099010,10 +32099020,10 +32099090,10 +32100011,10 +32100019,10 +32100020,10 +32100030,10 +32100040,10 +32100090,10 +32110000,10 +32121000,10 +32129010,10 +32129020,10 +32129030,10 +32129090,10 +32131000,10 +32139000,10 +32141000,10 +32149010,10 +32149020,5 +32149090,10 +32151110,10 +32151130,10 +32151140,10 +32151190,10 +32151910,10 +32151930,10 +32151940,10 +32151990,10 +32159010,10 +32159020,10 +32159040,10 +32159090,10 +33011200,20 +33011300,20 +33011910,20 +33011990,20 +33012400,20 +33012510,20 +33012590,20 +33012911,20 +33012912,20 +33012913,5 +33012914,20 +33012915,20 +33012916,20 +33012917,20 +33012918,20 +33012921,20 +33012922,20 +33012923,20 +33012924,20 +33012925,20 +33012928,20 +33012932,5 +33012933,20 +33012934,20 +33012936,20 +33012937,20 +33012938,20 +33012941,20 +33012942,20 +33012943,20 +33012945,20 +33012946,20 +33012947,20 +33012949,20 +33012950,20 +33012990,20 +33013010,20 +33013091,20 +33013099,20 +33019012,20 +33019013,20 +33019022,20 +33019029,20 +33019041,20 +33019049,20 +33019059,20 +33019071,20 +33019079,20 +33019090,20 +33021010,100 +33021090,100 +33029011,10 +33029012,10 +33029019,10 +33029020,10 +33029090,10 +33030010,20 +33030050,20 +33030060,20 +33030090,20 +33041000,20 +33042000,20 +33043000,20 +33049110,20 +33049120,20 +33049190,20 +33049910,20 +33049920,20 +33049930,20 +33049940,20 +33049950,20 +33049990,20 +33051010,20 +33051090,20 +33052000,20 +33053000,20 +33059011,20 +33059019,20 +33059020,0 +33059030,20 +33059040,20 +33059050,20 +33059090,20 +33061010,0 +33061020,20 +33061090,20 +33062000,20 +33069000,20 +33071010,20 +33071090,20 +33072000,20 +33073090,20 +33074100,20 +33074900,20 +33079010,20 +33079090,20 +34011110,10 +34011120,10 +34011190,10 +34011919,10 +34011920,5 +34011941,10 +34011942,0 +34011990,10 +34012000,10 +34013011,10 +34013012,10 +34013019,10 +34013090,10 +34023100,10 +34023900,10 +34024100,10 +34024200,10 +34024900,10 +34025000,10 +34029011,10 +34029012,10 +34029019,10 +34029020,10 +34029030,10 +34029042,10 +34029049,10 +34029051,10 +34029052,10 +34029059,10 +34029091,10 +34029092,10 +34029099,10 +34031100,7.5 +34031900,7.5 +34039100,7.5 +34039900,7.5 +34042000,10 +34049010,10 +34049020,10 +34049039,10 +34049090,10 +34051000,10 +34052000,10 +34053000,10 +34054000,10 +34059010,10 +34059090,10 +34060010,25 +34060090,25 +34070010,10 +34070090,10 +35011000,20 +35019000,20 +35022000,20 +35029000,20 +35030020,20 +35030030,20 +35030090,20 +35040010,20 +35040091,20 +35040099,20 +35051010,20 +35051090,20 +35052000,20 +35061000,10 +35069110,10 +35069190,10 +35069910,10 +35069991,10 +35069999,10 +35071099,10 +35079010,10 +35079030,10 +35079061,10 +35079062,10 +35079069,10 +35079079,10 +35079091,10 +35079099,10 +36020010,10 +36020090,10 +36031000,10 +36032000,10 +36034000,10 +36035000,10 +36036000,10 +36061000,10 +37011010,10 +37011090,10 +37012000,10 +37013000,10 +37019190,10 +37019990,10 +37021000,10 +37023990,10 +37024290,10 +37024390,10 +37024490,10 +37029890,10 +37031010,10 +37032010,10 +37039010,10 +37039020,0 +37040010,10 +37050000,10 +37061051,10 +37071000,10 +37079010,10 +37079090,10 +38011000,7.5 +38012000,7.5 +38013000,7.5 +38019000,7.5 +38021000,7.5 +38029011,7.5 +38029019,7.5 +38029020,7.5 +38030000,7.5 +38040010,7.5 +38040090,7.5 +38051010,7.5 +38051020,7.5 +38051030,7.5 +38059090,7.5 +38061010,7.5 +38061090,7.5 +38062000,7.5 +38063000,7.5 +38069090,7.5 +38070010,7.5 +38070020,7.5 +38070030,7.5 +38086200,10 +38089111,10 +38089191,10 +38089199,10 +38089290,10 +38089320,10 +38089330,5 +38089340,10 +38089350,10 +38089390,10 +38089400,10 +38089910,10 +38089990,10 +38091000,0 +38099110,7.5 +38099120,7.5 +38099130,7.5 +38099140,7.5 +38099160,7.5 +38099170,7.5 +38099180,7.5 +38099190,7.5 +38099200,7.5 +38099310,7.5 +38099390,7.5 +38101010,7.5 +38101020,7.5 +38101090,7.5 +38109010,7.5 +38109090,7.5 +38111100,10 +38111900,10 +38112100,10 +38112900,10 +38119000,10 +38121000,7.5 +38122010,7.5 +38122090,7.5 +38123100,7.5 +38123910,7.5 +38123920,7.5 +38123930,7.5 +38123990,7.5 +38130000,10 +38140010,10 +38140020,10 +38151100,7.5 +38151210,7.5 +38151290,7.5 +38151900,7.5 +38159000,7.5 +38160000,7.5 +38170011,7.5 +38170019,7.5 +38170020,7.5 +38180010,0 +38180090,0 +38190010,10 +38190090,10 +38200000,10 +38210000,7.5 +38221200,0 +38221300,10 +38221910,10 +38221990,10 +38229010,30 +38229090,30 +38231100,7.5 +38231200,7.5 +38231300,7.5 +38231900,7.5 +38237020,7.5 +38237090,7.5 +38241000,7.5 +38243000,7.5 +38244010,7.5 +38244090,7.5 +38245090,7.5 +38246010,30 +38246090,30 +38248900,7.5 +38249100,7.5 +38249200,7.5 +38249900,17.5 +38259000,0 +38276100,7.5 +38276300,7.5 +38276400,7.5 +38276500,7.5 +38276800,7.5 +38279000,7.5 +39011010,7.5 +39011020,7.5 +39011090,7.5 +39012000,7.5 +39013000,7.5 +39014010,7.5 +39014090,7.5 +39019000,7.5 +39021000,7.5 +39022000,7.5 +39023000,7.5 +39029000,7.5 +39031100,7.5 +39031910,7.5 +39031990,7.5 +39032000,7.5 +39033000,7.5 +39039010,7.5 +39039090,7.5 +39041010,10 +39041020,10 +39041090,10 +39042100,10 +39042200,10 +39043010,5 +39043090,10 +39044000,10 +39045090,10 +39046100,10 +39046910,10 +39046990,10 +39049010,10 +39049090,10 +39051220,7.5 +39051290,7.5 +39051920,7.5 +39051990,7.5 +39052100,7.5 +39052900,7.5 +39053000,7.5 +39059100,7.5 +39059910,7.5 +39059990,7.5 +39061010,7.5 +39061090,7.5 +39069040,7.5 +39069050,7.5 +39069060,7.5 +39069070,5 +39069090,7.5 +39071000,7.5 +39072100,7.5 +39072910,7.5 +39072990,7.5 +39073010,7.5 +39073090,7.5 +39074000,7.5 +39075000,7.5 +39076110,7.5 +39076190,7.5 +39076930,7.5 +39076990,7.5 +39077000,7.5 +39079110,7.5 +39079120,7.5 +39079150,7.5 +39079190,7.5 +39079900,7.5 +39081011,10 +39081019,10 +39081021,10 +39081029,10 +39081031,5 +39081039,10 +39081041,10 +39081049,10 +39081071,10 +39081079,10 +39089000,10 +39091010,7.5 +39091090,7.5 +39092010,7.5 +39092090,7.5 +39093100,7.5 +39093990,7.5 +39094010,7.5 +39094020,7.5 +39094030,7.5 +39094040,7.5 +39094060,7.5 +39094090,7.5 +39095000,7.5 +39100010,7.5 +39100020,7.5 +39100090,7.5 +39111010,7.5 +39111090,7.5 +39119010,7.5 +39119090,7.5 +39121110,7.5 +39121190,7.5 +39121220,7.5 +39121290,7.5 +39122019,7.5 +39122029,7.5 +39123100,7.5 +39123911,7.5 +39123912,7.5 +39123919,7.5 +39123921,7.5 +39123922,7.5 +39123929,7.5 +39129010,7.5 +39129020,7.5 +39129090,7.5 +39131010,7.5 +39131090,7.5 +39139011,7.5 +39139019,7.5 +39139020,7.5 +39139030,7.5 +39139090,7.5 +39140010,7.5 +39140020,7.5 +39140090,7.5 +39159090,7.5 +39161010,10 +39161020,10 +39161090,10 +39162011,10 +39162019,10 +39162099,10 +39169010,10 +39169023,10 +39169024,10 +39169025,10 +39169027,10 +39169031,10 +39169050,10 +39169060,10 +39169080,10 +39169090,10 +39171010,10 +39171020,10 +39172110,10 +39172190,10 +39172200,10 +39172310,10 +39172390,10 +39172910,10 +39172920,10 +39172950,10 +39172990,10 +39173100,10 +39173210,10 +39173220,10 +39173290,10 +39173300,10 +39173910,10 +39173920,10 +39173990,10 +39174000,10 +39181010,15 +39181090,15 +39189010,15 +39189020,15 +39189090,15 +39191000,10 +39199010,10 +39199020,10 +39199090,15 +39201011,10 +39201012,10 +39201019,10 +39201091,10 +39201092,10 +39201099,10 +39202010,10 +39202020,10 +39202090,10 +39203010,10 +39203020,10 +39203090,10 +39204300,10 +39204900,10 +39205111,10 +39205119,10 +39205199,10 +39205911,10 +39205919,10 +39205999,10 +39206110,10 +39206120,10 +39206190,10 +39206210,10 +39206220,10 +39206290,10 +39206320,10 +39206390,10 +39206911,10 +39206912,10 +39206919,10 +39206929,10 +39206939,10 +39206992,10 +39206999,10 +39207111,10 +39207119,10 +39207129,10 +39207199,10 +39207311,10 +39207319,10 +39207329,10 +39207399,10 +39207911,10 +39207919,10 +39207999,10 +39209110,10 +39209190,10 +39209211,0 +39209219,10 +39209292,10 +39209299,10 +39209390,10 +39209410,10 +39209490,10 +39209911,10 +39209912,10 +39209919,10 +39209921,10 +39209922,10 +39209929,10 +39209932,10 +39209939,10 +39209941,10 +39209949,10 +39209959,10 +39209991,10 +39209992,10 +39209999,15 +39211100,10 +39211200,10 +39211310,10 +39211390,10 +39211400,10 +39211900,10 +39219010,10 +39219021,10 +39219022,10 +39219023,10 +39219024,10 +39219025,10 +39219026,10 +39219029,10 +39219039,10 +39219091,10 +39219092,10 +39219094,10 +39219095,10 +39219096,10 +39219099,10 +39221000,15 +39222000,15 +39229000,15 +39231010,15 +39231020,15 +39231030,15 +39231040,15 +39231090,15 +39232100,15 +39232910,15 +39232990,15 +39233010,15 +39233090,15 +39234000,15 +39235010,15 +39235090,15 +39239010,15 +39239020,15 +39239090,15 +39241010,15 +39241090,15 +39249010,15 +39249020,15 +39249090,15 +39251000,15 +39252000,15 +39253000,15 +39259010,15 +39259090,15 +39261011,15 +39261019,15 +39261091,15 +39261099,15 +39262011,15 +39262019,15 +39262021,15 +39262029,15 +39262039,15 +39262041,15 +39262049,15 +39262099,15 +39263010,15 +39263090,15 +39264011,15 +39264019,15 +39264021,15 +39264029,15 +39264039,15 +39264041,15 +39264049,15 +39264051,15 +39264059,15 +39264060,15 +39264091,15 +39264099,15 +39269010,15 +39269021,15 +39269029,15 +39269031,15 +39269039,15 +39269041,15 +39269049,15 +39269051,15 +39269059,15 +39269061,15 +39269069,15 +39269071,15 +39269079,15 +39269080,15 +39269091,15 +39269099,15 +40012100,25 +40012200,25 +40012920,25 +40012930,25 +40012990,25 +40013000,10 +40021100,10 +40021910,10 +40021920,10 +40021930,10 +40021990,10 +40022000,10 +40023100,10 +40023900,10 +40024100,10 +40024900,10 +40025100,10 +40025900,10 +40026000,10 +40027000,10 +40028010,10 +40028020,10 +40028090,10 +40029100,10 +40029910,10 +40029990,10 +40030000,10 +40040000,10 +40051000,10 +40052010,10 +40052090,10 +40059110,5 +40059190,10 +40059910,10 +40059990,10 +40061000,10 +40069010,10 +40069090,10 +40070010,10 +40070090,10 +40081110,10 +40081190,10 +40081910,10 +40081990,10 +40082110,10 +40082120,10 +40082190,10 +40082910,10 +40082920,10 +40082930,10 +40082940,10 +40082990,10 +40091100,10 +40091200,10 +40092100,10 +40092200,10 +40093100,10 +40093200,10 +40094100,10 +40094200,10 +40101110,10 +40101190,10 +40101210,10 +40101290,10 +40101910,10 +40101990,10 +40103110,10 +40103190,10 +40103210,10 +40103290,10 +40103390,10 +40103410,10 +40103490,10 +40103510,10 +40103590,10 +40103610,10 +40103690,10 +40103911,10 +40103919,10 +40103991,10 +40103992,10 +40103999,10 +40111010,15 +40111090,10 +40112010,15 +40112090,10 +40113000,3 +40114010,10 +40114020,10 +40114090,10 +40115090,10 +40117000,10 +40118000,10 +40119000,10 +40121300,10 +40122090,10 +40129010,10 +40129020,10 +40129049,10 +40129050,10 +40129090,10 +40131020,10 +40132000,10 +40139010,10 +40139020,10 +40139030,10 +40139041,0 +40139090,10 +40141010,0 +40149010,10 +40149020,10 +40149030,10 +40149090,10 +40151200,10 +40151900,10 +40159010,10 +40159020,10 +40159030,10 +40159091,10 +40159099,10 +40161000,10 +40169100,10 +40169200,10 +40169310,10 +40169320,10 +40169330,10 +40169340,10 +40169350,10 +40169360,10 +40169390,10 +40169400,10 +40169510,10 +40169590,20 +40169910,10 +40169920,10 +40169930,10 +40169940,10 +40169950,10 +40169960,10 +40169970,10 +40169980,10 +40169990,20 +40170010,10 +40170020,10 +40170030,10 +40170040,10 +40170090,10 +41012010,0 +41015010,0 +41015090,0 +41019010,0 +41019090,0 +41021010,0 +41022110,0 +41022120,0 +41022910,0 +41039000,0 +41041100,10 +41041900,10 +41044100,10 +41044900,10 +41051000,10 +41053000,10 +41062200,10 +41071100,10 +41071200,10 +41071900,10 +41079100,10 +41079200,10 +41079900,10 +41120000,10 +41131000,10 +41132000,10 +41139000,10 +41141000,10 +41142010,10 +41142020,10 +41151000,10 +41152090,10 +42010000,10 +42021110,15 +42021140,15 +42021190,15 +42021210,15 +42021220,15 +42021240,15 +42021250,15 +42021260,15 +42021270,15 +42021280,15 +42021290,15 +42021910,15 +42021920,15 +42021960,15 +42021990,15 +42022110,15 +42022190,15 +42022210,15 +42022220,15 +42022230,15 +42022240,15 +42022290,15 +42022910,15 +42022990,15 +42023110,15 +42023120,15 +42023190,15 +42023210,15 +42023290,15 +42023910,15 +42023990,15 +42029100,15 +42029200,15 +42029900,15 +42031010,10 +42031090,10 +42032110,10 +42032910,10 +42032920,10 +42033000,10 +42034010,10 +42034020,10 +42034090,10 +42050011,10 +42050019,10 +42050020,10 +42050090,10 +43013000,0 +43021910,10 +43021920,10 +43021930,10 +43021990,10 +43039090,10 +43040011,10 +43040019,10 +43040020,10 +44011190,5 +44012100,5 +44012200,5 +44013900,5 +44014100,0 +44014900,5 +44021000,5 +44022010,5 +44029000,5 +44032110,5 +44032190,5 +44032210,5 +44032290,5 +44032390,5 +44032410,5 +44032590,5 +44032690,5 +44034100,5 +44034200,5 +44034900,5 +44039100,5 +44039300,5 +44039400,5 +44039700,5 +44039924,5 +44039990,5 +44042090,10 +44050000,10 +44071100,10 +44071200,10 +44071910,10 +44071990,10 +44072100,10 +44072200,10 +44072300,10 +44072500,0 +44072600,0 +44072900,10 +44079100,10 +44079200,10 +44079300,10 +44079400,10 +44079500,10 +44079600,10 +44079920,10 +44079990,10 +44081010,0 +44081020,10 +44081030,0 +44081090,10 +44083190,10 +44083910,10 +44083920,0 +44083990,10 +44089010,10 +44089090,10 +44091010,10 +44091020,10 +44091090,10 +44092100,10 +44092200,10 +44092910,10 +44092920,10 +44092990,10 +44101110,10 +44101130,10 +44101190,10 +44101290,0 +44101900,10 +44109010,10 +44109050,10 +44109090,10 +44111200,10 +44111300,10 +44111400,10 +44119211,10 +44119219,10 +44119221,0 +44119229,10 +44119329,10 +44119429,10 +44123110,10 +44123140,0 +44123190,10 +44123310,10 +44123330,10 +44123390,10 +44123410,10 +44123490,10 +44123910,0 +44123990,10 +44124200,10 +44124900,10 +44129290,10 +44129990,10 +44130000,10 +44141000,10 +44149000,10 +44151000,10 +44152000,10 +44160010,10 +44170000,10 +44182110,0 +44182190,10 +44182990,10 +44183000,10 +44187300,10 +44187400,10 +44187500,10 +44187900,10 +44188900,10 +44189100,10 +44189900,10 +44191100,10 +44191200,10 +44191900,10 +44192000,10 +44199010,10 +44199020,10 +44199090,10 +44201100,10 +44201900,10 +44209090,10 +44211000,10 +44219119,10 +44219140,10 +44219160,10 +44219170,10 +44219190,10 +44219919,10 +44219920,10 +44219930,10 +44219960,10 +44219970,10 +44219990,10 +45011000,10 +45019000,10 +45020000,10 +45031000,10 +45039090,10 +45041010,10 +45041090,10 +45049000,10 +46012100,10 +46012200,10 +46012900,10 +46019200,10 +46019300,10 +46019900,10 +46021100,10 +46021200,10 +46021911,10 +46021919,10 +46021990,10 +47020000,5 +47031100,5 +47031900,5 +47032100,5 +47032900,5 +47042900,5 +47050000,5 +47061000,5 +47062000,5 +47069100,5 +47069200,5 +47069300,5 +47071000,10 +47072000,10 +47073000,10 +47079000,10 +48010010,10 +48010090,10 +48021010,10 +48021020,10 +48022010,10 +48022090,10 +48024000,10 +48025410,10 +48025420,10 +48025430,10 +48025510,10 +48025550,10 +48025590,10 +48025620,10 +48025690,10 +48025710,10 +48025770,10 +48025790,10 +48025810,10 +48025820,10 +48025890,10 +48026110,10 +48026190,10 +48026290,10 +48026930,10 +48026990,10 +48030010,10 +48030090,10 +48041100,10 +48041900,10 +48042100,10 +48042900,10 +48043100,10 +48043900,10 +48044100,10 +48044200,10 +48044900,10 +48045100,10 +48045200,10 +48045900,10 +48051100,10 +48051900,10 +48052400,10 +48052500,10 +48053000,10 +48054000,10 +48055000,10 +48059100,10 +48059200,10 +48059300,10 +48061000,10 +48062000,10 +48063000,10 +48064010,10 +48064090,10 +48070010,10 +48070090,10 +48081000,10 +48084090,10 +48089000,10 +48092000,10 +48099000,10 +48101310,10 +48101320,10 +48101330,10 +48101390,10 +48101410,10 +48101490,10 +48101910,10 +48101920,10 +48101930,10 +48101990,10 +48102200,10 +48102900,10 +48103100,10 +48103200,10 +48103910,10 +48103990,10 +48109200,10 +48109900,10 +48111000,10 +48114100,10 +48114900,10 +48115110,10 +48115190,10 +48115910,10 +48115990,10 +48116000,10 +48119011,10 +48119012,10 +48119015,10 +48119017,10 +48119091,10 +48119094,10 +48119099,10 +48120000,10 +48131000,10 +48132000,10 +48139010,10 +48139090,10 +48142000,10 +48149000,10 +48162090,10 +48169010,10 +48169090,10 +48171000,10 +48172000,10 +48173010,10 +48173090,10 +48181000,10 +48182000,10 +48183000,10 +48185000,10 +48189000,10 +48191010,10 +48191090,10 +48192010,10 +48192020,10 +48192090,10 +48193000,10 +48194000,10 +48195010,10 +48195090,10 +48196000,10 +48201010,10 +48201020,10 +48201090,10 +48202000,10 +48203000,10 +48204000,10 +48205000,10 +48209010,10 +48209090,10 +48211010,10 +48211020,10 +48211090,10 +48219010,10 +48219090,10 +48221000,10 +48229010,10 +48229090,10 +48232000,10 +48234000,10 +48236100,10 +48236900,10 +48237010,10 +48237020,10 +48237090,10 +48239011,10 +48239012,10 +48239013,10 +48239014,10 +48239015,10 +48239016,10 +48239017,10 +48239018,10 +48239019,10 +48239030,10 +48239090,20 +49011010,10 +49011020,10 +49019100,10 +49019900,10 +49021010,0 +49021020,0 +49029010,0 +49029020,0 +49030010,10 +49030020,10 +49040000,0 +49052000,0 +49059090,0 +49060000,0 +49070010,10 +49070020,0 +49070030,10 +49070090,10 +49081000,10 +49089000,10 +49090010,10 +49090090,10 +49100010,10 +49100090,10 +49111010,10 +49111020,10 +49111030,10 +49111090,10 +49119100,10 +49119910,10 +49119920,10 +49119990,10 +50020010,15 +50020020,15 +50020030,15 +50030010,15 +50030090,15 +50040010,15 +50040090,15 +50050011,15 +50060090,15 +50072090,20 +50079090,20 +51011100,2.5 +51011900,2.5 +51012100,2.5 +51012900,2.5 +51021190,5 +51021990,5 +51031010,5 +51051000,10 +51052910,2.5 +51052990,10 +51053100,10 +51053900,10 +51061090,10 +51062090,10 +51071010,10 +51071040,10 +51071090,10 +51072030,10 +51072090,10 +51091090,10 +51099000,10 +51111110,10 +51111130,10 +51111190,10 +51111990,10 +51112090,10 +51113010,10 +51113030,10 +51113040,10 +51113090,10 +51119030,10 +51119090,10 +51121110,10 +51121130,10 +51121190,10 +51121920,10 +51121930,10 +51121990,10 +51122090,10 +51123010,10 +51123020,10 +51123030,10 +51123090,10 +51129010,10 +51129030,10 +51129090,10 +51130030,10 +52010020,5 +52021000,10 +52029900,10 +52030000,30 +52041110,10 +52041120,10 +52041140,10 +52041190,10 +52041900,10 +52042030,10 +52042090,10 +52051190,10 +52051210,10 +52051290,10 +52051310,0 +52051390,10 +52051410,0 +52051490,0 +52051590,10 +52052310,10 +52052390,10 +52052490,10 +52052790,10 +52052890,10 +52053290,10 +52053390,10 +52053490,10 +52054190,10 +52054290,10 +52054830,10 +52054890,10 +52061200,10 +52062200,10 +52062400,0 +52064200,10 +52064500,10 +52071000,10 +52079000,10 +52081120,10 +52081190,10 +52081230,10 +52081290,10 +52081390,10 +52082140,10 +52082190,10 +52082210,10 +52082230,10 +52082290,10 +52082310,10 +52082330,10 +52082390,10 +52082990,10 +52083110,10 +52083129,0 +52083130,10 +52083180,10 +52083190,10 +52083230,10 +52083280,10 +52083290,10 +52083310,10 +52083390,10 +52083990,10 +52084129,10 +52084130,10 +52084190,10 +52084230,10 +52084290,10 +52084320,10 +52084390,10 +52084990,10 +52085280,10 +52085290,10 +52085990,10 +52091190,10 +52091220,10 +52091290,10 +52091900,10 +52092190,10 +52092290,10 +52092990,10 +52093130,10 +52093170,10 +52093190,10 +52093290,10 +52093910,10 +52093990,10 +52094130,10 +52094190,10 +52094200,10 +52094320,10 +52094390,10 +52094990,10 +52095130,10 +52095190,10 +52095290,10 +52095990,10 +52101900,25 +52102110,10 +52102190,10 +52102990,10 +52103110,10 +52103190,10 +52103220,10 +52103290,10 +52103990,10 +52104130,10 +52104190,10 +52104910,10 +52104990,10 +52105110,10 +52105190,10 +52105990,10 +52111110,10 +52111190,10 +52112099,10 +52113110,10 +52113190,10 +52113240,10 +52113290,10 +52113990,10 +52114130,10 +52114190,10 +52114200,10 +52114910,10 +52114990,10 +52115290,10 +52115990,10 +52121100,10 +52121200,10 +52121300,10 +52121400,10 +52121500,10 +52122100,10 +52122200,10 +52122300,10 +52122400,10 +52122500,10 +53012100,0 +53012900,0 +53029000,30 +53031010,5 +53039090,10 +53050040,10 +53050090,10 +53061090,10 +53062010,10 +53062090,10 +53071010,10 +53072000,10 +53082000,10 +53089010,10 +53089090,10 +53091110,10 +53091120,10 +53091910,10 +53091920,10 +53091990,10 +53092110,10 +53092120,10 +53092910,10 +53092920,10 +53092990,10 +53101011,20 +53101012,20 +53101013,20 +53109099,10 +53110012,10 +53110013,10 +53110015,10 +53110019,10 +54011000,5 +54012000,5 +54021110,5 +54021910,5 +54021920,2.5 +54021990,5 +54022090,5 +54023100,5 +54023200,5 +54023300,5 +54023400,5 +54023910,5 +54023990,5 +54024400,5 +54024500,5 +54024600,5 +54024700,5 +54024800,5 +54024900,5 +54025100,5 +54025210,2.5 +54025290,5 +54025990,5 +54026100,5 +54026200,5 +54026300,5 +54026920,5 +54026990,5 +54031090,5 +54033100,5 +54033990,5 +54034150,5 +54034190,5 +54034911,0 +54034912,0 +54034913,0 +54034919,5 +54034990,5 +54041100,5 +54041200,5 +54041920,5 +54041990,5 +54049010,5 +54049090,5 +54060010,5 +54071011,20 +54071015,20 +54071019,20 +54071023,20 +54071029,20 +54071034,20 +54071035,20 +54071039,20 +54071045,20 +54071049,20 +54071095,20 +54071099,20 +54072010,20 +54072090,20 +54073090,20 +54074119,20 +54074129,0 +54074210,20 +54074230,20 +54074290,20 +54074300,20 +54074490,20 +54075119,20 +54075121,20 +54075129,20 +54075210,20 +54075290,20 +54075300,20 +54075420,20 +54075490,20 +54076110,20 +54076190,20 +54076900,20 +54077110,20 +54077200,20 +54077300,20 +54077400,20 +54078119,20 +54078129,20 +54078290,20 +54078300,20 +54078490,20 +54079110,20 +54079200,20 +54079300,20 +54079400,20 +54081000,20 +54082110,20 +54082120,20 +54082211,20 +54082217,20 +54082219,20 +54082290,20 +54082300,20 +54082412,10 +54082419,20 +54082490,20 +54083219,20 +54083290,20 +54083300,20 +54083419,20 +54083490,20 +55011900,5 +55012000,5 +55013000,5 +55021010,5 +55021090,5 +55029010,5 +55029090,5 +55031110,2.5 +55031120,2.5 +55031190,5 +55031910,2.5 +55031920,2.5 +55031990,5 +55032000,5 +55033010,2.5 +55033090,5 +55034000,5 +55039090,5 +55041011,2.5 +55041019,5 +55041021,2.5 +55041029,5 +55041090,5 +55049090,5 +55051010,5 +55051090,5 +55052000,5 +55061000,5 +55062000,5 +55063000,5 +55069090,5 +55070090,5 +55081000,5 +55082000,5 +55091100,5 +55091200,5 +55092100,5 +55092200,5 +55093100,0 +55093200,5 +55094110,5 +55094190,5 +55094220,5 +55094290,5 +55095100,5 +55095200,0 +55095300,5 +55095900,5 +55096100,5 +55096900,5 +55099200,5 +55099910,5 +55099990,5 +55101110,5 +55101190,5 +55101210,0 +55109090,5 +55121110,20 +55121120,20 +55121910,20 +55121920,20 +55121990,20 +55122910,20 +55122990,20 +55129110,0 +55129120,20 +55129910,20 +55129990,20 +55131120,20 +55131210,20 +55132100,20 +55132300,20 +55132900,20 +55133100,20 +55133900,20 +55134900,20 +55141110,20 +55141220,20 +55141910,20 +55141920,20 +55142100,20 +55142200,20 +55142300,20 +55142900,20 +55143011,20 +55143019,20 +55151130,20 +55151140,20 +55151190,20 +55151230,20 +55151290,20 +55151310,20 +55151330,20 +55151390,20 +55151910,20 +55151930,20 +55151990,20 +55152190,20 +55152290,20 +55152990,20 +55159130,20 +55159930,20 +55159990,20 +55161200,20 +55161300,20 +55161490,20 +55162120,20 +55162200,20 +55162300,20 +55162400,20 +55163110,20 +55163200,20 +55164110,20 +55164200,20 +55164300,20 +55164400,20 +55169200,20 +55169300,20 +55169400,20 +56012110,10 +56012190,10 +56012200,10 +56012900,10 +56013000,20 +56021000,10 +56022100,10 +56022910,10 +56022990,10 +56029090,10 +56031110,20 +56031190,20 +56031200,20 +56031300,10 +56031400,20 +56039100,10 +56039200,20 +56039310,10 +56039390,10 +56039410,20 +56039420,0 +56039490,20 +56041000,10 +56049000,10 +56050010,0 +56050090,10 +56060020,10 +56060090,10 +56072100,10 +56072900,10 +56074100,10 +56074900,10 +56075010,10 +56075020,10 +56075030,10 +56075040,10 +56075090,10 +56079010,10 +56079020,10 +56079090,10 +56081110,10 +56081190,10 +56081900,10 +56089010,10 +56089020,10 +56089090,10 +56090030,10 +56090090,10 +57011010,20 +57011090,20 +57019020,20 +57019031,20 +57019090,20 +57021000,20 +57022010,20 +57022020,20 +57023110,20 +57023190,25 +57023210,20 +57023220,20 +57023290,20 +57023910,20 +57023920,20 +57024110,20 +57024120,20 +57024190,20 +57024210,20 +57024220,20 +57024230,20 +57024290,20 +57024910,20 +57025029,20 +57025033,20 +57025039,20 +57029110,20 +57029130,20 +57029190,20 +57029210,20 +57029220,20 +57029290,20 +57029910,20 +57029990,20 +57031010,20 +57031090,20 +57032100,20 +57032910,20 +57032920,20 +57032990,20 +57033100,20 +57033910,20 +57033920,20 +57033990,20 +57039090,20 +57041000,20 +57042090,20 +57049090,20 +57050023,20 +57050024,20 +57050029,20 +57050039,20 +57050041,20 +57050042,20 +57050049,20 +57050090,20 +58011000,10 +58012210,10 +58012290,10 +58012300,10 +58012790,10 +58013200,20 +58013300,20 +58013610,20 +58013690,20 +58013710,20 +58013720,20 +58013790,20 +58019090,10 +58022000,10 +58030099,10 +58041010,10 +58041090,10 +58042100,20 +58042910,10 +58042990,10 +58043000,10 +58050090,10 +58061000,10 +58062000,10 +58063110,10 +58063120,10 +58063190,10 +58063200,20 +58063920,10 +58063990,10 +58064000,10 +58071010,10 +58071020,10 +58071090,10 +58079010,10 +58079090,10 +58081010,10 +58081090,10 +58089010,10 +58089020,10 +58089030,10 +58089040,10 +58089090,10 +58090090,10 +58101000,10 +58109190,10 +58109210,10 +58109290,10 +58109910,10 +58109990,10 +58110010,10 +58110020,10 +58110090,10 +59011020,10 +59011090,10 +59019010,10 +59019020,0 +59019090,10 +59021010,20 +59021090,20 +59022010,20 +59022090,20 +59029090,20 +59031010,20 +59031090,20 +59032010,20 +59032090,20 +59039010,20 +59039020,20 +59039090,20 +59041000,10 +59050010,10 +59050090,10 +59061000,10 +59069110,0 +59069190,10 +59069910,10 +59069920,10 +59069990,10 +59070012,10 +59070019,10 +59070099,10 +59080010,10 +59080090,10 +59090010,10 +59090020,10 +59090090,10 +59100010,20 +59100030,20 +59100060,20 +59100090,20 +59111000,10 +59112000,10 +59113110,10 +59113120,10 +59113130,10 +59113150,10 +59113190,10 +59113210,10 +59113220,10 +59113230,10 +59113250,10 +59113290,10 +59114000,10 +59119010,10 +59119020,10 +59119090,10 +60011010,10 +60011020,10 +60011090,10 +60012200,20 +60012900,10 +60019100,10 +60019200,20 +60019990,10 +60024000,10 +60029000,10 +60033000,20 +60034000,20 +60039000,10 +60041000,20 +60049000,20 +60052200,25 +60053500,20 +60053600,20 +60053710,20 +60053790,20 +60053800,20 +60053900,20 +60054200,20 +60054400,20 +60059000,10 +60061000,10 +60062100,10 +60062200,10 +60062300,10 +60062400,10 +60063100,20 +60063200,20 +60063300,20 +60063400,20 +60064100,20 +60064200,20 +60064300,20 +60064400,20 +60069000,10 +61012000,20 +61013010,20 +61013020,20 +61019010,20 +61019090,20 +61021000,20 +61022000,20 +61023010,20 +61023020,20 +61029010,20 +61029090,20 +61031020,20 +61031030,20 +61031090,20 +61032200,20 +61032300,20 +61032990,20 +61033100,20 +61033200,20 +61033300,20 +61033990,20 +61034100,20 +61034200,20 +61034300,20 +61034910,20 +61034920,20 +61034990,20 +61041300,20 +61041990,20 +61042200,20 +61042300,20 +61042920,20 +61042990,20 +61043100,20 +61043200,20 +61043300,20 +61043920,20 +61043990,20 +61044100,20 +61044200,20 +61044300,20 +61044400,20 +61044910,20 +61044990,20 +61045100,20 +61045200,20 +61045300,20 +61045920,20 +61045990,20 +61046100,20 +61046200,20 +61046300,20 +61046910,20 +61046920,20 +61046990,20 +61051010,20 +61051020,20 +61051090,20 +61052010,20 +61052020,20 +61059010,20 +61059090,20 +61061000,20 +61062010,20 +61062020,20 +61069010,20 +61069090,20 +61071100,20 +61071210,20 +61071220,20 +61071990,20 +61072100,20 +61072210,20 +61072990,20 +61079110,20 +61079190,20 +61079990,20 +61081110,20 +61081120,20 +61081920,20 +61081990,20 +61082100,20 +61082210,20 +61082220,20 +61082990,20 +61083100,20 +61083210,20 +61083220,20 +61083990,20 +61089100,20 +61089210,20 +61089220,20 +61089990,20 +61091000,20 +61099010,20 +61099020,20 +61099040,20 +61099090,20 +61101110,20 +61101120,20 +61101190,20 +61101200,20 +61101900,20 +61102000,20 +61103010,20 +61103020,20 +61109000,20 +61112000,20 +61113000,20 +61119020,20 +61119090,20 +61121100,20 +61121200,20 +61121990,20 +61122030,20 +61122090,20 +61123100,20 +61123920,20 +61123990,20 +61124100,20 +61124920,20 +61124990,20 +61130000,20 +61142000,20 +61143010,20 +61143020,20 +61149090,20 +61151000,20 +61152100,20 +61152200,20 +61152910,20 +61152930,20 +61152990,20 +61153000,20 +61159400,20 +61159500,20 +61159600,20 +61159910,20 +61159990,20 +61161000,20 +61169100,20 +61169200,20 +61169300,20 +61169910,20 +61169990,20 +61171010,20 +61171020,20 +61171030,20 +61171040,20 +61171090,20 +61178010,20 +61178030,20 +61178040,20 +61178090,20 +61179000,20 +62012010,20 +62012090,20 +62013010,20 +62013090,20 +62014010,20 +62014090,20 +62019010,20 +62019090,20 +62022010,20 +62022090,20 +62023010,20 +62023090,20 +62024010,20 +62024090,20 +62029010,20 +62029090,20 +62031100,20 +62031200,20 +62031910,20 +62031990,20 +62032200,20 +62032300,20 +62033110,20 +62033190,20 +62033200,20 +62033300,20 +62033919,20 +62033990,20 +62034100,20 +62034210,20 +62034290,20 +62034300,20 +62034910,20 +62034990,20 +62041100,20 +62041390,20 +62041919,20 +62041999,20 +62042210,20 +62042290,20 +62042390,20 +62042919,20 +62042999,20 +62043190,20 +62043290,20 +62043310,20 +62043390,20 +62043919,20 +62043999,20 +62044120,20 +62044190,20 +62044220,20 +62044290,20 +62044310,20 +62044390,20 +62044410,20 +62044490,20 +62044919,20 +62044999,20 +62045100,20 +62045290,20 +62045310,20 +62045390,20 +62045911,20 +62045919,20 +62045999,20 +62046110,20 +62046190,20 +62046210,20 +62046290,20 +62046300,20 +62046911,20 +62046919,20 +62046990,20 +62052010,20 +62052020,20 +62052090,20 +62053010,20 +62053090,20 +62059011,20 +62059019,20 +62059090,20 +62061010,20 +62061090,20 +62062000,20 +62063010,20 +62063090,20 +62064000,20 +62069000,20 +62071100,20 +62071930,10 +62072110,20 +62072190,20 +62072200,20 +62072900,20 +62079190,20 +62079919,20 +62079990,20 +62081100,20 +62081990,20 +62082190,20 +62082200,20 +62082910,20 +62082990,20 +62089110,20 +62089190,20 +62089210,20 +62089290,20 +62089920,20 +62089990,20 +62092090,20 +62093000,20 +62099090,20 +62101020,20 +62101090,20 +62102010,20 +62102020,20 +62102090,20 +62103010,20 +62103090,20 +62104010,20 +62104090,20 +62105000,20 +62111100,20 +62111200,20 +62112000,20 +62113200,20 +62113300,20 +62113990,20 +62114211,20 +62114219,20 +62114299,20 +62114310,20 +62114390,20 +62114910,20 +62114929,20 +62114999,20 +62121000,20 +62122000,20 +62123000,20 +62129010,20 +62129090,20 +62132000,20 +62139010,20 +62139090,20 +62141010,20 +62141020,20 +62141090,20 +62142010,20 +62142029,20 +62142090,20 +62143010,20 +62143090,20 +62144090,20 +62149010,20 +62149049,20 +62149059,20 +62149099,20 +62151010,20 +62151090,20 +62152000,20 +62159010,20 +62159090,20 +62160010,20 +62160090,20 +62171010,20 +62171020,20 +62171040,20 +62171050,20 +62171060,20 +62171070,20 +62171090,20 +62179010,20 +62179020,20 +62179090,20 +63011000,10 +63012000,10 +63013000,10 +63014000,20 +63019090,20 +63021010,10 +63021090,10 +63022190,10 +63022200,10 +63023100,10 +63023200,10 +63023900,10 +63024030,10 +63024090,10 +63025110,10 +63025190,10 +63025300,10 +63025900,10 +63026010,10 +63026090,10 +63029110,10 +63029190,10 +63029300,10 +63029900,10 +63031200,10 +63031900,10 +63039100,10 +63039200,10 +63039990,10 +63041100,10 +63041910,10 +63041930,10 +63041940,10 +63041990,10 +63049110,10 +63049190,10 +63049219,10 +63049221,10 +63049229,10 +63049231,10 +63049239,10 +63049241,10 +63049249,10 +63049250,10 +63049260,10 +63049270,10 +63049281,10 +63049289,10 +63049291,10 +63049299,10 +63049300,10 +63049910,10 +63049999,10 +63051030,10 +63051040,10 +63051060,10 +63051090,10 +63052000,10 +63053200,10 +63053300,10 +63053900,10 +63059000,10 +63061200,10 +63061910,10 +63061990,10 +63062200,10 +63062910,10 +63062990,10 +63063000,10 +63069010,10 +63069090,10 +63071010,10 +63071020,10 +63071030,10 +63071090,10 +63072090,10 +63079011,10 +63079013,10 +63079019,10 +63079020,10 +63079091,10 +63079099,10 +63080000,10 +63090000,10 +63101010,20 +63101020,20 +63109010,20 +63109020,20 +63109030,20 +63109040,20 +63109090,25 +64011010,35 +64011090,35 +64019210,35 +64019290,35 +64019990,35 +64021290,35 +64021910,35 +64021990,35 +64022010,5 +64022090,35 +64029110,35 +64029190,35 +64029910,35 +64029990,35 +64031200,35 +64031990,35 +64032011,35 +64032013,35 +64032019,35 +64032022,35 +64032023,35 +64032029,35 +64032090,35 +64034000,35 +64035111,35 +64035112,35 +64035119,35 +64035190,35 +64035910,35 +64035920,35 +64035930,35 +64035990,35 +64039110,35 +64039120,35 +64039190,35 +64039910,35 +64039920,35 +64039990,35 +64041110,35 +64041120,35 +64041190,35 +64041910,35 +64041920,35 +64041990,35 +64042000,35 +64051000,35 +64052000,35 +64059000,35 +64061010,20 +64061020,20 +64061090,20 +64062000,20 +64069030,20 +64069040,20 +64069050,20 +64069090,20 +65010010,10 +65010090,10 +65020020,10 +65020090,10 +65040000,10 +65050010,10 +65050090,10 +65061010,10 +65061090,10 +65069100,10 +65069900,10 +65070000,10 +66011000,20 +66019100,20 +66019900,20 +66020000,10 +66032000,10 +66039010,10 +66039090,10 +67010090,10 +67021010,20 +67021090,20 +67029010,20 +67029090,20 +67030010,10 +67041100,10 +67041910,10 +67041990,10 +67042010,10 +67042090,10 +67049000,10 +68010000,10 +68022110,40 +68022190,40 +68022310,40 +68022390,40 +68022900,40 +68029100,40 +68029200,10 +68029300,40 +68029900,10 +68030000,10 +68041000,10 +68042110,10 +68042190,10 +68042210,10 +68042220,10 +68042290,10 +68042310,10 +68042390,10 +68043010,10 +68043020,10 +68051010,10 +68051090,10 +68052010,10 +68052020,10 +68052030,10 +68052040,10 +68052090,10 +68053000,10 +68061000,10 +68062000,10 +68069000,10 +68071090,10 +68079010,10 +68079090,10 +68080000,10 +68091100,10 +68091900,10 +68099000,10 +68101110,10 +68101190,10 +68101990,10 +68109100,10 +68109990,10 +68114010,10 +68114090,10 +68118200,10 +68118990,10 +68129100,10 +68129921,10 +68129922,10 +68129990,10 +68132010,15 +68132090,15 +68138100,15 +68138900,15 +68141010,10 +68141020,10 +68141030,10 +68141090,10 +68149040,10 +68149090,10 +68151100,10 +68151200,10 +68151300,10 +68151900,10 +68159100,7.5 +68159910,10 +68159920,10 +68159990,10 +69010010,7.5 +69010020,7.5 +69010030,7.5 +69010090,7.5 +69021010,7.5 +69021040,7.5 +69021090,7.5 +69022010,7.5 +69022020,7.5 +69022050,7.5 +69022090,7.5 +69029010,7.5 +69029090,7.5 +69031000,7.5 +69032010,7.5 +69032090,7.5 +69039010,7.5 +69039030,7.5 +69039090,7.5 +69041000,10 +69049000,10 +69051000,15 +69059000,15 +69060000,10 +69072100,15 +69072200,15 +69072300,15 +69073010,15 +69074010,15 +69091100,10 +69091200,10 +69091910,10 +69091990,10 +69099000,10 +69101000,10 +69109000,10 +69111011,20 +69111019,20 +69111029,20 +69119010,10 +69119090,20 +69120010,20 +69120020,20 +69120030,10 +69120040,20 +69120090,20 +69131000,10 +69139000,10 +69141000,10 +69149000,10 +70010010,5 +70010090,10 +70021000,10 +70022010,10 +70022090,10 +70023100,10 +70023200,10 +70023900,10 +70031290,10 +70031910,10 +70031990,10 +70032090,10 +70033090,10 +70042099,10 +70049011,10 +70049019,10 +70049091,10 +70049099,10 +70051010,10 +70051090,10 +70052110,10 +70052190,10 +70052910,10 +70052990,10 +70053090,10 +70071100,15 +70071900,15 +70072110,15 +70072190,15 +70072900,15 +70080010,10 +70080020,10 +70080090,10 +70091010,15 +70091090,15 +70099100,15 +70099200,15 +70101000,10 +70102000,10 +70109000,10 +70111020,10 +70111090,10 +70119090,10 +70131000,20 +70132200,20 +70132800,20 +70133300,20 +70133700,20 +70134100,20 +70134200,20 +70134900,20 +70139100,20 +70139900,20 +70140010,10 +70140020,10 +70151010,5 +70151020,10 +70159010,10 +70159020,10 +70159090,10 +70161000,10 +70169000,10 +70171000,10 +70172000,10 +70179010,10 +70179020,10 +70179090,10 +70181010,10 +70181020,20 +70181090,10 +70182000,10 +70189010,10 +70189090,10 +70191100,10 +70191200,10 +70191300,10 +70191400,10 +70191500,10 +70191900,10 +70196100,10 +70196200,10 +70196300,10 +70196400,10 +70196500,10 +70196600,10 +70196900,10 +70197100,10 +70197300,10 +70198000,10 +70199000,10 +70200011,10 +70200019,10 +70200021,10 +70200029,10 +70200090,10 +71011010,5 +71011020,10 +71012100,5 +71012200,10 +71022110,10 +71023100,0 +71023910,5 +71023990,5 +71031031,10 +71031032,0.5 +71031034,0.5 +71031039,10 +71031041,0.5 +71031042,0.5 +71031043,0.5 +71031049,0.5 +71031051,0.5 +71031059,0.5 +71031062,0.5 +71031069,0.5 +71031071,0.5 +71031072,0.5 +71031079,0.5 +71031090,10 +71039110,10 +71039120,10 +71039130,10 +71039911,5 +71039912,5 +71039913,5 +71039919,10 +71039921,10 +71039929,5 +71039931,5 +71039932,5 +71039939,10 +71039941,5 +71039942,5 +71039943,5 +71039944,5 +71039949,10 +71039951,10 +71039952,5 +71039959,5 +71039990,10 +71042100,0.5 +71042900,10 +71049100,10 +71049900,15 +71051000,10 +71059000,10 +71061000,12.5 +71069110,12.5 +71069290,12.5 +71070000,10 +71081200,12.5 +71081300,12.5 +71101110,12.5 +71101120,12.5 +71101900,12.5 +71102100,12.5 +71102900,12.5 +71103100,2.5 +71103900,2.5 +71104100,12.5 +71104900,12.5 +71110000,12.5 +71131110,20 +71131120,20 +71131130,20 +71131190,20 +71131910,20 +71131920,20 +71131930,20 +71131940,20 +71131950,20 +71131960,20 +71131990,20 +71132000,20 +71141110,20 +71141120,20 +71141910,20 +71142010,20 +71142020,20 +71159010,10 +71159020,10 +71159090,10 +71161000,10 +71162000,10 +71171100,20 +71171910,20 +71171920,20 +71171990,20 +71179010,20 +71179090,20 +71181000,12.5 +71189000,10 +72011000,5 +72015010,5 +72015090,5 +72021100,5 +72021900,5 +72022100,5 +72022900,5 +72023000,5 +72024100,5 +72024900,5 +72025000,5 +72026000,2.5 +72027000,5 +72028000,5 +72029100,5 +72029200,5 +72029300,5 +72029911,5 +72029921,5 +72029922,5 +72029931,5 +72029990,5 +72031000,5 +72039000,5 +72041000,0 +72042110,0 +72042190,2.5 +72042920,0 +72042990,2.5 +72044100,0 +72044900,2.5 +72045000,0 +72051011,5 +72051019,5 +72051021,5 +72051029,5 +72051090,5 +72052100,5 +72052910,5 +72052990,5 +72061010,7.5 +72069019,15 +72069099,15 +72071190,15 +72071220,0 +72071290,7.5 +72071920,7.5 +72071990,7.5 +72072020,7.5 +72072090,7.5 +72082510,15 +72082540,5 +72082590,15 +72082630,7.5 +72082640,0 +72082690,15 +72082710,0 +72082740,0 +72082790,7.5 +72083610,0 +72083690,7.5 +72083710,0 +72083740,0 +72083790,7.5 +72083810,7.5 +72083830,7.5 +72083840,0 +72083890,7.5 +72083930,0 +72083940,0 +72083990,7.5 +72084010,7.5 +72084090,7.5 +72085110,15 +72085120,7.5 +72085190,7.5 +72085210,7.5 +72085230,15 +72085290,2.5 +72085430,7.5 +72089000,15 +72091530,0 +72091590,15 +72091620,0 +72091630,0 +72091690,7.5 +72091720,0 +72091730,15 +72091790,15 +72091820,0 +72091830,0 +72091890,7.5 +72092590,15 +72092620,0 +72092690,7.5 +72092720,0 +72092820,0 +72099000,15 +72101190,15 +72101290,27.5 +72103091,15 +72103099,15 +72104100,0 +72104910,15 +72104990,7.5 +72106100,7.5 +72106900,15 +72107000,15 +72109010,7.5 +72109090,15 +72111430,15 +72111490,7.5 +72111910,7.5 +72111940,0 +72111990,15 +72112320,15 +72112330,7.5 +72112350,7.5 +72112390,7.5 +72112910,0 +72112950,15 +72112990,15 +72119090,15 +72121090,7.5 +72122099,15 +72123010,0 +72123091,7.5 +72123099,15 +72124000,15 +72125010,7.5 +72125090,15 +72126000,15 +72132090,7.5 +72139190,7.5 +72139920,0 +72139990,15 +72141090,15 +72142090,15 +72143000,7.5 +72149190,15 +72149990,15 +72151000,15 +72155090,15 +72159010,7.5 +72159020,15 +72159090,15 +72161000,7.5 +72162100,15 +72163100,15 +72163200,15 +72163300,15 +72164000,15 +72165000,7.5 +72166100,15 +72166900,15 +72169100,15 +72169990,15 +72171010,15 +72171020,15 +72171030,15 +72172010,7.5 +72172020,7.5 +72172030,7.5 +72173010,7.5 +72173020,15 +72173030,15 +72179012,7.5 +72179019,7.5 +72179092,7.5 +72179093,7.5 +72179099,15 +72181000,7.5 +72189100,0 +72189910,7.5 +72189990,15 +72191111,7.5 +72191190,7.5 +72191200,7.5 +72191300,7.5 +72191400,15 +72192111,7.5 +72192112,7.5 +72192121,7.5 +72192122,7.5 +72192131,7.5 +72192141,7.5 +72192190,7.5 +72192211,7.5 +72192212,7.5 +72192219,15 +72192299,7.5 +72192390,7.5 +72193111,7.5 +72193121,15 +72193190,7.5 +72193210,7.5 +72193220,15 +72193290,15 +72193310,15 +72193320,15 +72193390,15 +72193410,7.5 +72193420,15 +72193490,15 +72193510,15 +72193520,15 +72193590,7.5 +72199011,7.5 +72199012,7.5 +72199013,15 +72199090,22.5 +72201129,7.5 +72201190,15 +72201210,7.5 +72201222,7.5 +72201229,7.5 +72201290,7.5 +72202010,7.5 +72202021,7.5 +72202022,7.5 +72202029,15 +72202090,15 +72209022,7.5 +72209029,15 +72209090,15 +72210011,7.5 +72210019,7.5 +72210090,7.5 +72221111,7.5 +72221119,15 +72221191,7.5 +72221199,15 +72221919,15 +72221999,15 +72222011,7.5 +72222019,15 +72222091,7.5 +72222092,7.5 +72222099,15 +72223012,7.5 +72223019,15 +72223091,7.5 +72223092,7.5 +72223099,15 +72224010,15 +72224020,15 +72230010,15 +72230091,15 +72230092,15 +72230099,15 +72241000,7.5 +72249010,15 +72249040,15 +72249091,7.5 +72249099,15 +72251100,7.5 +72251910,0 +72251920,7.5 +72251990,7.5 +72253090,7.5 +72254012,7.5 +72254013,7.5 +72254019,15 +72254020,15 +72254030,7.5 +72255010,7.5 +72255020,7.5 +72255030,7.5 +72259110,0 +72259190,0 +72259210,0 +72259290,7.5 +72259910,7.5 +72259990,7.5 +72261100,15 +72261910,15 +72261920,15 +72261990,15 +72262011,7.5 +72262012,0 +72262021,7.5 +72262022,7.5 +72269110,7.5 +72269120,7.5 +72269190,7.5 +72269210,15 +72269220,0 +72269230,7.5 +72269910,15 +72269930,5 +72269952,7.5 +72269990,15 +72271000,7.5 +72279010,7.5 +72279040,7.5 +72279050,7.5 +72279090,15 +72281010,7.5 +72281090,15 +72283011,7.5 +72283019,15 +72283021,7.5 +72283024,15 +72283029,15 +72284000,15 +72285010,7.5 +72285090,15 +72286011,7.5 +72286012,15 +72286094,15 +72286099,15 +72287012,7.5 +72287021,7.5 +72287022,15 +72288010,7.5 +72288090,15 +72292000,15 +72299011,7.5 +72299016,15 +72299021,7.5 +72299032,15 +72299033,7.5 +72299034,7.5 +72299040,7.5 +72299059,15 +72299060,15 +72299090,15 +73011000,10 +73012010,15 +73012090,15 +73021012,10 +73021090,10 +73023000,10 +73024000,10 +73029010,10 +73029090,15 +73030090,15 +73041110,15 +73041190,15 +73041910,15 +73041920,15 +73041990,15 +73042200,0 +73042390,10 +73042400,15 +73042910,10 +73042990,15 +73043111,10 +73043119,15 +73043121,10 +73043129,15 +73043139,10 +73043911,15 +73043919,15 +73043921,15 +73043929,10 +73043939,15 +73044100,15 +73044900,15 +73045110,10 +73045130,15 +73045910,15 +73045920,10 +73045930,15 +73049000,15 +73051119,10 +73051121,15 +73051129,15 +73051229,10 +73051919,15 +73052090,2.5 +73053190,10 +73053990,10 +73059099,15 +73061100,10 +73061919,15 +73061921,10 +73061929,15 +73062100,10 +73062919,10 +73063010,0 +73063090,15 +73064000,15 +73065000,15 +73066100,15 +73066900,15 +73069011,10 +73069019,15 +73069090,15 +73071110,15 +73071120,15 +73071190,15 +73071900,15 +73072100,15 +73072200,15 +73072300,15 +73072900,25 +73079110,15 +73079190,15 +73079210,15 +73079290,15 +73079310,10 +73079390,15 +73079910,15 +73079990,25 +73081000,10 +73082011,0 +73082019,15 +73082020,10 +73083000,15 +73084000,15 +73089010,15 +73089020,15 +73089040,10 +73089050,15 +73089060,10 +73089090,25 +73090010,10 +73090030,10 +73090040,10 +73090090,15 +73101010,10 +73101090,15 +73102110,15 +73102190,15 +73102910,15 +73102920,10 +73102990,25 +73110010,15 +73110020,15 +73110030,10 +73110090,15 +73121010,15 +73121020,15 +73121030,15 +73121090,15 +73129000,15 +73130010,15 +73130020,10 +73141200,15 +73141410,15 +73141490,15 +73141910,15 +73141990,15 +73142010,15 +73142090,15 +73143100,10 +73143900,10 +73144110,15 +73144190,10 +73144210,15 +73144290,15 +73144910,15 +73144990,15 +73145000,15 +73151100,15 +73151210,15 +73151220,10 +73151290,15 +73151900,15 +73152000,10 +73158100,15 +73158200,15 +73158900,15 +73159000,15 +73160010,15 +73160090,15 +73170012,10 +73170013,10 +73170019,15 +73170029,15 +73170030,15 +73170091,15 +73170099,15 +73181110,15 +73181190,15 +73181200,15 +73181300,15 +73181400,15 +73181500,25 +73181600,25 +73181900,15 +73182100,15 +73182200,15 +73182300,15 +73182400,15 +73182910,15 +73182990,25 +73194010,15 +73194090,15 +73199000,15 +73201011,15 +73201012,10 +73201019,15 +73201020,15 +73202000,15 +73209010,15 +73209020,15 +73209090,25 +73211110,15 +73211120,15 +73211190,15 +73211290,15 +73211910,10 +73211990,10 +73218100,10 +73218990,15 +73219000,15 +73221100,10 +73221900,15 +73229010,15 +73229090,15 +73231000,20 +73239110,20 +73239190,20 +73239200,20 +73239310,20 +73239390,20 +73239420,20 +73239490,20 +73239910,20 +73239920,20 +73239990,20 +73241000,15 +73242100,15 +73242900,15 +73249000,15 +73251000,15 +73259100,10 +73259910,15 +73259920,15 +73259930,15 +73259991,10 +73259992,15 +73259993,15 +73259999,25 +73261100,15 +73261910,15 +73261990,25 +73262010,15 +73262090,15 +73269010,15 +73269020,15 +73269030,15 +73269040,15 +73269050,15 +73269060,15 +73269070,15 +73269080,15 +73269091,15 +73269099,25 +74020010,5 +74020090,5 +74031100,5 +74031300,5 +74031900,5 +74032100,5 +74032210,5 +74032290,5 +74032900,5 +74040012,2.5 +74040019,2.5 +74040022,2.5 +74040025,2.5 +74040029,2.5 +74050000,5 +74061000,5 +74062000,5 +74071010,5 +74071020,5 +74071030,5 +74071059,5 +74071090,5 +74072110,5 +74072120,5 +74072190,5 +74072910,5 +74072929,5 +74072990,5 +74081110,5 +74081190,5 +74081910,5 +74081920,5 +74081990,5 +74082110,5 +74082190,5 +74082290,5 +74082910,5 +74082990,5 +74091100,5 +74091900,5 +74092100,5 +74092900,5 +74093100,5 +74093900,5 +74094000,5 +74099000,5 +74101100,5 +74101200,5 +74102100,5 +74102200,5 +74111000,7.5 +74112100,7.5 +74112200,0 +74112900,7.5 +74121000,7.5 +74122011,7.5 +74122012,7.5 +74122019,7.5 +74122090,7.5 +74130000,10 +74151000,10 +74152100,10 +74152900,10 +74153310,10 +74153390,10 +74153910,10 +74153990,10 +74181010,20 +74181021,20 +74181022,0 +74181039,20 +74181090,20 +74182010,10 +74182020,10 +74192000,10 +74198010,10 +74198020,10 +74198030,10 +74198040,10 +74198050,10 +74198090,10 +75012000,0 +75021000,0 +75022030,0 +75022090,0 +75030010,0 +75040000,0 +75051110,0 +75051120,0 +75051210,0 +75051220,0 +75052100,0 +75052200,0 +75061000,0 +75062000,0 +75071200,0 +75072000,0 +75081000,0 +75089010,0 +75089030,0 +75089090,0 +76011010,7.5 +76011040,7.5 +76011090,7.5 +76012010,7.5 +76012020,7.5 +76012090,7.5 +76020010,2.5 +76031010,7.5 +76031090,7.5 +76032000,7.5 +76041010,7.5 +76041020,7.5 +76041031,7.5 +76041039,7.5 +76042100,7.5 +76042910,7.5 +76042920,7.5 +76042930,7.5 +76042990,7.5 +76051100,7.5 +76051999,7.5 +76052100,7.5 +76052990,7.5 +76061110,7.5 +76061190,7.5 +76061200,7.5 +76069110,7.5 +76069120,7.5 +76069190,7.5 +76069210,7.5 +76069290,7.5 +76071110,7.5 +76071190,7.5 +76071910,7.5 +76071991,7.5 +76071993,7.5 +76071994,7.5 +76071995,7.5 +76071999,7.5 +76072010,7.5 +76072090,7.5 +76081000,10 +76082000,10 +76090000,10 +76101000,10 +76109010,10 +76109020,10 +76109030,10 +76109090,10 +76110000,10 +76121030,10 +76121090,10 +76129010,10 +76129020,10 +76129030,10 +76129090,10 +76130011,10 +76130019,10 +76130029,10 +76130099,10 +76141000,10 +76149000,10 +76151011,20 +76151021,20 +76151029,20 +76151030,20 +76151040,20 +76151090,20 +76152010,10 +76152020,10 +76152090,10 +76161000,10 +76169100,10 +76169910,10 +76169920,10 +76169930,10 +76169990,10 +78011000,5 +78019100,5 +78019910,5 +78019920,5 +78019990,5 +78020010,5 +78020090,5 +78041120,5 +78041990,5 +78042000,5 +78060010,10 +78060020,10 +78060090,10 +79011100,5 +79011200,5 +79012010,7.5 +79012090,7.5 +79020010,5 +79031000,5 +79039000,5 +79040012,0 +79040019,5 +79040029,5 +79040030,5 +79050020,5 +79050030,5 +79050040,5 +79070010,10 +79070090,10 +80011090,5 +80020090,5 +80030010,5 +80030020,5 +80030040,5 +80070010,10 +80070090,10 +81011000,5 +81019400,5 +81019600,5 +81019910,10 +81019990,10 +81021000,5 +81029400,5 +81029510,5 +81029590,5 +81029600,5 +81029900,10 +81032090,5 +81039900,10 +81041100,5 +81041900,5 +81043010,5 +81043020,5 +81049010,5 +81049090,10 +81052010,2.5 +81052020,5 +81052030,5 +81059000,10 +81061010,5 +81069090,10 +81082000,5 +81083000,5 +81089010,5 +81089090,10 +81099100,10 +81099900,10 +81101000,2.5 +81110010,5 +81110030,5 +81110090,10 +81121200,5 +81121900,10 +81122100,5 +81122900,10 +81123900,10 +81126910,5 +81126920,5 +81129900,10 +81130010,10 +81130020,10 +81130090,10 +82011000,10 +82013000,10 +82014000,10 +82015000,10 +82016000,10 +82019000,10 +82021010,10 +82021020,10 +82021090,10 +82022000,10 +82023100,10 +82023900,10 +82024000,10 +82029110,10 +82029120,10 +82029910,10 +82029990,10 +82031000,10 +82032000,10 +82033000,10 +82034010,10 +82034090,10 +82041110,10 +82041120,10 +82041210,10 +82041220,10 +82042000,10 +82051000,10 +82052000,10 +82053000,10 +82054000,10 +82055110,10 +82055190,10 +82055910,10 +82055920,10 +82055930,10 +82055940,10 +82055990,10 +82056000,10 +82057000,10 +82059010,10 +82059020,10 +82059030,10 +82059090,10 +82060010,10 +82060090,10 +82071300,10 +82071900,10 +82072000,10 +82073000,10 +82074010,10 +82074090,10 +82075000,10 +82076010,10 +82076090,10 +82077010,10 +82077090,10 +82078000,10 +82079010,10 +82079020,10 +82079030,10 +82079090,10 +82081000,10 +82082000,10 +82083000,10 +82084000,10 +82089010,10 +82089020,10 +82089030,10 +82089040,10 +82089090,10 +82090010,10 +82090090,10 +82100000,10 +82111000,10 +82119100,10 +82119200,10 +82119310,10 +82119390,10 +82119400,10 +82119500,10 +82121010,10 +82121090,10 +82122011,10 +82122019,10 +82122020,10 +82129000,10 +82130000,10 +82141010,10 +82141090,10 +82142010,10 +82142090,10 +82149010,10 +82149090,10 +82151000,10 +82152000,10 +82159100,10 +82159900,10 +83011000,20 +83012000,15 +83013000,20 +83014010,20 +83014090,20 +83015000,20 +83016000,20 +83017000,20 +83021010,15 +83021020,15 +83021090,15 +83022000,15 +83023010,15 +83023090,15 +83024110,15 +83024120,15 +83024190,15 +83024200,15 +83024900,15 +83025000,15 +83026000,15 +83030000,10 +83040000,20 +83051000,20 +83052000,20 +83059010,20 +83059020,20 +83059090,20 +83061000,20 +83062110,20 +83062120,20 +83062190,20 +83062910,20 +83062920,20 +83062990,20 +83063000,20 +83071000,10 +83079000,10 +83081010,10 +83081021,10 +83081029,10 +83082000,10 +83089011,10 +83089019,10 +83089020,10 +83089031,10 +83089039,10 +83089040,10 +83089091,10 +83089099,10 +83091000,10 +83099010,10 +83099020,10 +83099030,10 +83099090,10 +83100010,20 +83100090,20 +83111000,10 +83112000,10 +83113010,10 +83113090,10 +83119000,10 +84021100,10 +84021920,10 +84021990,10 +84022000,10 +84029020,5 +84029090,10 +84031000,7.5 +84039000,7.5 +84041000,10 +84042000,10 +84049000,10 +84051010,7.5 +84051090,7.5 +84059000,7.5 +84068200,10 +84069000,10 +84071000,15 +84072100,5 +84072900,15 +84073210,15 +84073310,15 +84073320,15 +84073410,15 +84073490,15 +84079010,15 +84079090,15 +84081093,15 +84082020,15 +84089010,7.5 +84089090,15 +84091000,15 +84099111,15 +84099112,15 +84099113,15 +84099114,15 +84099120,15 +84099191,15 +84099192,15 +84099193,7.5 +84099194,15 +84099199,15 +84099911,15 +84099912,15 +84099913,15 +84099914,15 +84099920,15 +84099930,15 +84099941,15 +84099942,15 +84099949,15 +84099990,15 +84109000,10 +84111100,7.5 +84111200,7.5 +84112100,2.5 +84112200,0 +84118100,10 +84118210,0 +84118220,0 +84119100,7.5 +84119900,10 +84121000,7.5 +84122100,7.5 +84122910,7.5 +84122990,7.5 +84123100,7.5 +84123900,7.5 +84128019,10 +84128020,10 +84128090,10 +84129010,7.5 +84129020,7.5 +84129030,7.5 +84129090,7.5 +84131110,10 +84131191,7.5 +84131199,7.5 +84131910,10 +84131990,7.5 +84132000,10 +84133010,7.5 +84133020,7.5 +84133030,7.5 +84133090,7.5 +84134000,7.5 +84135010,7.5 +84135021,7.5 +84135029,7.5 +84135090,7.5 +84136010,7.5 +84136020,7.5 +84136090,7.5 +84137010,7.5 +84137091,7.5 +84137093,7.5 +84137095,7.5 +84137096,7.5 +84137099,7.5 +84138110,7.5 +84138120,7.5 +84138130,7.5 +84138190,7.5 +84138200,7.5 +84139110,7.5 +84139120,7.5 +84139130,7.5 +84139140,10 +84139190,7.5 +84139200,7.5 +84141000,7.5 +84142010,10 +84142020,10 +84142090,7.5 +84143000,15 +84144010,15 +84144020,7.5 +84144030,15 +84144090,15 +84145110,20 +84145120,20 +84145130,20 +84145150,20 +84145190,20 +84145910,10 +84145920,20 +84145930,10 +84145990,10 +84146000,7.5 +84147000,7.5 +84148011,15 +84148019,15 +84148020,15 +84148030,15 +84148090,15 +84149011,7.5 +84149012,10 +84149019,7.5 +84149020,7.5 +84149030,10 +84149040,7.5 +84149090,7.5 +84151010,20 +84151090,20 +84152010,20 +84152090,20 +84158110,20 +84158190,20 +84158210,20 +84158290,20 +84158310,20 +84158390,20 +84159000,20 +84161000,7.5 +84162000,7.5 +84169000,7.5 +84171000,7.5 +84172000,7.5 +84178090,7.5 +84179000,7.5 +84181010,15 +84181090,20 +84182100,20 +84182900,20 +84183010,15 +84183090,15 +84184010,15 +84184090,15 +84185000,15 +84186100,15 +84186910,15 +84186920,15 +84186930,15 +84186940,15 +84186950,15 +84186990,15 +84189100,7.5 +84189900,7.5 +84191190,7.5 +84191200,10 +84191910,10 +84191920,7.5 +84192010,7.5 +84192090,7.5 +84193300,7.5 +84193400,7.5 +84193500,7.5 +84193900,7.5 +84194090,7.5 +84195010,7.5 +84195020,7.5 +84195030,7.5 +84195090,7.5 +84196000,7.5 +84198110,10 +84198120,10 +84198190,10 +84198910,10 +84198920,7.5 +84198930,7.5 +84198940,7.5 +84198960,7.5 +84198970,7.5 +84198980,7.5 +84198990,7.5 +84199010,10 +84199090,7.5 +84201000,7.5 +84209100,7.5 +84209900,7.5 +84211100,7.5 +84211200,7.5 +84211910,7.5 +84211920,7.5 +84211940,7.5 +84211950,7.5 +84211991,7.5 +84211999,7.5 +84212110,7.5 +84212120,10 +84212190,7.5 +84212200,7.5 +84212300,10 +84212900,7.5 +84213100,10 +84213200,15 +84213910,7.5 +84213920,7.5 +84213990,7.5 +84219100,7.5 +84219900,10 +84221100,10 +84221900,7.5 +84222000,7.5 +84223000,5 +84224000,5 +84229010,7.5 +84229020,10 +84229090,5 +84231000,10 +84232000,7.5 +84233000,7.5 +84238110,7.5 +84238190,7.5 +84238210,7.5 +84238290,7.5 +84238900,7.5 +84239010,10 +84239020,7.5 +84241000,7.5 +84242000,7.5 +84243000,7.5 +84244100,7.5 +84244900,7.5 +84248200,7.5 +84248910,7.5 +84248920,7.5 +84248990,7.5 +84249000,7.5 +84251110,7.5 +84251120,7.5 +84251910,7.5 +84251920,7.5 +84253100,7.5 +84253900,7.5 +84254100,7.5 +84254200,7.5 +84254900,7.5 +84261100,7.5 +84261200,7.5 +84261900,7.5 +84262000,7.5 +84263000,7.5 +84264100,7.5 +84264900,7.5 +84269100,7.5 +84269910,7.5 +84269990,7.5 +84271000,7.5 +84272000,7.5 +84279000,7.5 +84281011,7.5 +84281019,7.5 +84281020,7.5 +84282011,7.5 +84282019,7.5 +84282020,7.5 +84283100,7.5 +84283200,7.5 +84283300,7.5 +84283900,7.5 +84284000,7.5 +84286000,7.5 +84287000,7.5 +84289020,7.5 +84289090,7.5 +84291110,7.5 +84291920,7.5 +84292000,7.5 +84293000,7.5 +84294010,7.5 +84294020,7.5 +84295100,7.5 +84295200,7.5 +84295900,7.5 +84301010,7.5 +84302000,7.5 +84303120,7.5 +84303190,7.5 +84303900,7.5 +84304110,7.5 +84304120,7.5 +84304130,7.5 +84304190,7.5 +84304900,7.5 +84305010,7.5 +84305090,7.5 +84306100,7.5 +84306900,7.5 +84311010,7.5 +84311090,7.5 +84312010,7.5 +84312090,7.5 +84313100,7.5 +84313910,7.5 +84313990,7.5 +84314100,7.5 +84314200,7.5 +84314310,7.5 +84314390,7.5 +84314910,7.5 +84314920,7.5 +84314930,7.5 +84314940,7.5 +84314990,7.5 +84321010,7.5 +84321020,7.5 +84321090,7.5 +84322910,7.5 +84322990,7.5 +84323100,7.5 +84323900,7.5 +84324100,7.5 +84324200,7.5 +84328010,7.5 +84328020,7.5 +84328090,7.5 +84329010,7.5 +84329090,7.5 +84331110,7.5 +84331190,10 +84331910,7.5 +84331990,10 +84332000,7.5 +84333000,7.5 +84334000,7.5 +84335100,7.5 +84335200,7.5 +84335900,7.5 +84336010,7.5 +84336020,7.5 +84339000,7.5 +84341000,7.5 +84342000,7.5 +84349010,7.5 +84349020,7.5 +84351000,7.5 +84359000,7.5 +84361000,7.5 +84362100,7.5 +84362900,7.5 +84368090,7.5 +84369100,7.5 +84369900,7.5 +84371000,7.5 +84378010,7.5 +84378020,7.5 +84378090,7.5 +84379010,7.5 +84379020,7.5 +84379090,7.5 +84381010,5 +84381020,5 +84382000,5 +84383010,5 +84383090,5 +84384000,5 +84385000,5 +84386000,5 +84388010,5 +84388030,5 +84388040,5 +84388090,5 +84389010,5 +84389090,5 +84391000,7.5 +84392000,7.5 +84393010,7.5 +84393090,7.5 +84399100,7.5 +84399900,7.5 +84401010,7.5 +84401090,7.5 +84409000,7.5 +84411010,7.5 +84411090,7.5 +84412000,7.5 +84413000,7.5 +84414000,7.5 +84418000,7.5 +84419000,7.5 +84423010,7.5 +84423090,7.5 +84424000,7.5 +84425010,7.5 +84425020,7.5 +84425031,7.5 +84425039,7.5 +84425090,7.5 +84431100,7.5 +84431200,7.5 +84431300,7.5 +84431400,7.5 +84431500,7.5 +84431600,7.5 +84431700,7.5 +84431910,7.5 +84431920,7.5 +84431930,7.5 +84431941,5 +84431949,7.5 +84431990,7.5 +84433100,0 +84433210,0 +84433220,0 +84433240,0 +84433250,0 +84433260,0 +84433290,10 +84433910,7.5 +84433940,7.5 +84433960,7.5 +84433990,7.5 +84439100,7.5 +84439910,0 +84439920,0 +84439930,0 +84439940,0 +84439951,10 +84439952,10 +84439953,10 +84439959,0 +84439960,7.5 +84439990,7.5 +84440010,5 +84440090,7.5 +84451110,7.5 +84451190,5 +84451210,5 +84451290,5 +84451300,5 +84451930,5 +84451960,7.5 +84451990,7.5 +84452011,5 +84452013,5 +84452014,7.5 +84452019,7.5 +84452090,5 +84453050,5 +84453090,5 +84454010,7.5 +84454090,7.5 +84459000,7.5 +84461011,7.5 +84461019,5 +84461090,5 +84462990,5 +84463011,5 +84463012,7.5 +84463019,7.5 +84463090,7.5 +84471111,7.5 +84471119,5 +84471120,7.5 +84471190,7.5 +84471219,5 +84471220,5 +84471290,7.5 +84472020,5 +84472030,5 +84472090,7.5 +84479010,5 +84479020,7.5 +84479030,7.5 +84481110,5 +84481190,7.5 +84481900,7.5 +84482000,7.5 +84483100,7.5 +84483210,7.5 +84483220,5 +84483290,7.5 +84483310,7.5 +84483320,5 +84483390,7.5 +84483910,7.5 +84483920,5 +84483990,7.5 +84484210,5 +84484220,7.5 +84484290,7.5 +84484910,7.5 +84484920,5 +84484950,7.5 +84484990,7.5 +84485110,7.5 +84485190,7.5 +84485900,7.5 +84490010,7.5 +84490090,7.5 +84501100,20 +84501900,20 +84502000,7.5 +84509010,10 +84509090,7.5 +84511010,7.5 +84511090,7.5 +84512100,10 +84512900,7.5 +84513010,10 +84513090,7.5 +84514019,7.5 +84514021,7.5 +84514029,7.5 +84514099,7.5 +84515000,7.5 +84518019,7.5 +84518021,5 +84518022,7.5 +84518029,7.5 +84518090,7.5 +84519000,7.5 +84521012,10 +84521019,10 +84521021,5 +84521022,10 +84521029,10 +84522110,7.5 +84522120,7.5 +84522190,7.5 +84522900,7.5 +84523010,10 +84523090,7.5 +84529011,7.5 +84529019,7.5 +84529091,10 +84529099,7.5 +84531000,7.5 +84532000,7.5 +84538000,7.5 +84539010,7.5 +84539090,7.5 +84541000,7.5 +84542010,7.5 +84542020,7.5 +84543010,7.5 +84543020,7.5 +84543090,7.5 +84549000,7.5 +84551000,7.5 +84552110,7.5 +84552120,7.5 +84552200,7.5 +84553000,7.5 +84559000,7.5 +84561100,7.5 +84561200,7.5 +84562000,7.5 +84563000,7.5 +84564000,7.5 +84565000,7.5 +84569010,0 +84569020,7.5 +84569090,7.5 +84571010,7.5 +84571020,7.5 +84572090,7.5 +84573010,7.5 +84573090,7.5 +84581100,7.5 +84581911,7.5 +84581919,7.5 +84581990,7.5 +84589100,7.5 +84589910,7.5 +84589932,7.5 +84589959,7.5 +84589990,7.5 +84591000,7.5 +84592100,7.5 +84592910,7.5 +84592920,7.5 +84592930,7.5 +84592940,7.5 +84592950,7.5 +84592990,7.5 +84593100,7.5 +84593910,7.5 +84593990,7.5 +84594110,7.5 +84594190,7.5 +84594920,7.5 +84594930,7.5 +84594990,7.5 +84595110,7.5 +84595120,7.5 +84595130,7.5 +84595190,7.5 +84595910,7.5 +84595920,7.5 +84595930,7.5 +84595940,7.5 +84595990,7.5 +84596110,7.5 +84596190,7.5 +84596910,7.5 +84596940,7.5 +84596990,7.5 +84597010,7.5 +84597020,7.5 +84601200,7.5 +84601900,7.5 +84602200,7.5 +84602300,7.5 +84602400,7.5 +84602910,7.5 +84602920,7.5 +84602930,7.5 +84602990,7.5 +84603100,7.5 +84603910,7.5 +84603990,7.5 +84604012,7.5 +84604013,7.5 +84604019,7.5 +84604020,7.5 +84609010,7.5 +84609090,7.5 +84612011,7.5 +84612019,7.5 +84612020,7.5 +84613010,7.5 +84613090,7.5 +84614011,7.5 +84614012,7.5 +84614019,7.5 +84614023,7.5 +84614024,7.5 +84614025,7.5 +84614026,7.5 +84614029,7.5 +84615011,7.5 +84615012,7.5 +84615013,7.5 +84615015,7.5 +84615019,7.5 +84615021,7.5 +84615029,7.5 +84619000,7.5 +84621100,7.5 +84621900,7.5 +84622200,7.5 +84622300,7.5 +84622400,7.5 +84622500,0 +84622600,7.5 +84622900,7.5 +84623200,7.5 +84623300,7.5 +84623900,7.5 +84624200,7.5 +84624900,7.5 +84625100,7.5 +84625900,7.5 +84626100,7.5 +84626200,7.5 +84626300,7.5 +84626900,7.5 +84629000,7.5 +84631010,7.5 +84631020,7.5 +84631030,7.5 +84631090,7.5 +84632000,7.5 +84633010,7.5 +84633020,7.5 +84633030,7.5 +84633040,7.5 +84639010,7.5 +84639020,7.5 +84639030,7.5 +84639090,7.5 +84641010,7.5 +84641090,7.5 +84642000,7.5 +84649000,7.5 +84651000,7.5 +84659100,7.5 +84659200,7.5 +84659300,7.5 +84659400,7.5 +84659500,7.5 +84659600,7.5 +84659910,7.5 +84659990,7.5 +84661010,7.5 +84661020,7.5 +84662000,7.5 +84663010,7.5 +84663020,7.5 +84663090,7.5 +84669100,7.5 +84669200,7.5 +84669310,7.5 +84669390,7.5 +84669400,7.5 +84671110,7.5 +84671120,7.5 +84671190,7.5 +84671900,7.5 +84672100,7.5 +84672200,7.5 +84672900,7.5 +84678100,7.5 +84678910,7.5 +84678920,7.5 +84678990,7.5 +84679100,7.5 +84679200,7.5 +84679900,7.5 +84681000,7.5 +84682010,7.5 +84682090,7.5 +84688000,7.5 +84689000,7.5 +84701000,0 +84702100,0 +84702900,0 +84703000,0 +84705010,0 +84705020,0 +84709010,0 +84709020,0 +84713010,0 +84713090,0 +84714110,0 +84714120,0 +84714190,0 +84714900,0 +84715000,0 +84716010,0 +84716024,0 +84716025,0 +84716029,0 +84716040,0 +84716050,0 +84716060,0 +84716090,0 +84717010,0 +84717020,0 +84717030,0 +84717040,0 +84717050,0 +84717060,0 +84717070,0 +84717090,0 +84718000,0 +84719000,0 +84723000,7.5 +84729010,10 +84729020,7.5 +84729030,7.5 +84729040,7.5 +84729094,7.5 +84729099,7.5 +84732100,0 +84732900,0 +84733010,0 +84733020,0 +84733030,0 +84733040,0 +84733091,0 +84733092,0 +84733099,0 +84734010,7.5 +84734090,7.5 +84735000,0 +84741010,7.5 +84741090,7.5 +84742010,7.5 +84742090,7.5 +84743110,7.5 +84743120,7.5 +84743200,7.5 +84743900,7.5 +84748010,7.5 +84748020,7.5 +84748030,7.5 +84748090,7.5 +84749000,7.5 +84751000,7.5 +84752100,7.5 +84752900,7.5 +84759000,7.5 +84762110,7.5 +84762120,7.5 +84762900,7.5 +84768990,7.5 +84769010,7.5 +84769090,7.5 +84771000,7.5 +84772000,7.5 +84773000,7.5 +84774000,7.5 +84775100,7.5 +84775900,7.5 +84778010,7.5 +84778090,7.5 +84779000,7.5 +84781090,7.5 +84789000,7.5 +84791000,7.5 +84792010,7.5 +84792090,7.5 +84793000,5 +84794000,7.5 +84795000,7.5 +84796000,7.5 +84797900,7.5 +84798100,7.5 +84798200,7.5 +84798300,7.5 +84798910,7.5 +84798920,7.5 +84798930,0 +84798940,7.5 +84798950,7.5 +84798960,7.5 +84798970,7.5 +84798992,7.5 +84798999,7.5 +84799010,7.5 +84799020,7.5 +84799030,7.5 +84799040,7.5 +84799090,7.5 +84801000,7.5 +84802000,7.5 +84803000,7.5 +84804100,7.5 +84804900,7.5 +84805000,7.5 +84806000,7.5 +84807100,7.5 +84807900,7.5 +84811000,7.5 +84812000,7.5 +84813000,7.5 +84814000,7.5 +84818010,7.5 +84818020,7.5 +84818030,7.5 +84818041,7.5 +84818049,7.5 +84818050,7.5 +84818090,7.5 +84819010,7.5 +84819090,7.5 +84821011,7.5 +84821012,7.5 +84821013,7.5 +84821020,7.5 +84821030,7.5 +84821040,7.5 +84821051,7.5 +84821052,7.5 +84821053,7.5 +84821090,7.5 +84822011,7.5 +84822012,7.5 +84822013,7.5 +84822090,7.5 +84823000,7.5 +84824000,7.5 +84825000,7.5 +84828000,7.5 +84829111,7.5 +84829112,7.5 +84829113,7.5 +84829114,7.5 +84829119,7.5 +84829120,7.5 +84829130,7.5 +84829900,7.5 +84831010,7.5 +84831091,15 +84831092,15 +84831099,7.5 +84832000,7.5 +84833000,7.5 +84834000,7.5 +84835010,7.5 +84835090,7.5 +84836010,7.5 +84836020,7.5 +84836090,7.5 +84839000,7.5 +84841010,7.5 +84841090,7.5 +84842000,7.5 +84849000,7.5 +84851000,7.5 +84852000,7.5 +84858000,7.5 +84859000,7.5 +84861000,5 +84862000,7.5 +84863000,7.5 +84864000,7.5 +84869000,7.5 +84871000,7.5 +84879000,7.5 +85011011,15 +85011012,15 +85011013,15 +85011019,15 +85011020,15 +85012000,15 +85013111,15 +85013112,15 +85013113,15 +85013119,15 +85013120,15 +85013210,15 +85013220,15 +85013310,15 +85013320,15 +85013410,15 +85014010,15 +85014090,15 +85015110,15 +85015120,10 +85015190,15 +85015210,15 +85015220,15 +85015290,15 +85015310,10 +85015320,15 +85015330,15 +85015390,15 +85016100,10 +85016200,10 +85016300,0 +85016410,10 +85016420,10 +85016430,10 +85016470,0 +85017100,10 +85017200,10 +85018000,10 +85021100,10 +85021310,7.5 +85021340,7.5 +85022010,10 +85022090,7.5 +85023100,7.5 +85023990,7.5 +85024000,7.5 +85030010,7.5 +85030021,7.5 +85030029,7.5 +85030090,7.5 +85041010,7.5 +85041020,7.5 +85041090,7.5 +85042100,10 +85042200,10 +85042310,10 +85042320,10 +85042330,10 +85043100,10 +85043200,10 +85043300,10 +85043400,10 +85044010,20 +85044021,20 +85044029,20 +85044030,20 +85044040,20 +85044090,20 +85045010,7.5 +85045090,7.5 +85049010,10 +85049090,15 +85051110,7.5 +85051190,7.5 +85051900,7.5 +85052000,7.5 +85059000,7.5 +85061000,15 +85064000,15 +85065000,15 +85066000,10 +85068010,15 +85068090,15 +85069000,10 +85071000,15 +85072000,15 +85073000,15 +85075000,15 +85076000,20 +85078000,15 +85079010,10 +85079090,10 +85081100,10 +85081900,10 +85086000,10 +85087000,10 +85094010,20 +85094090,20 +85098000,20 +85099000,10 +85101000,20 +85102000,20 +85103000,20 +85109000,10 +85111000,15 +85112010,15 +85112090,15 +85113010,15 +85113020,15 +85114000,15 +85115000,15 +85118000,15 +85119000,15 +85121000,15 +85122010,15 +85122020,15 +85122090,15 +85123010,15 +85123090,15 +85124000,15 +85129000,15 +85131010,10 +85131020,7.5 +85131030,7.5 +85131040,7.5 +85131090,7.5 +85139000,7.5 +85141900,7.5 +85142000,7.5 +85143100,7.5 +85143900,7.5 +85144000,7.5 +85149000,7.5 +85151100,10 +85151900,10 +85152110,10 +85152120,10 +85152190,10 +85152900,10 +85153100,10 +85153910,10 +85153920,10 +85153990,10 +85158010,10 +85158090,10 +85159000,7.5 +85161000,20 +85162100,20 +85162900,20 +85163100,20 +85163200,20 +85163300,20 +85164000,20 +85165000,20 +85166000,20 +85167100,20 +85167200,20 +85167910,20 +85167920,20 +85167990,20 +85168000,20 +85169000,10 +85171110,0 +85171190,0 +85171300,20 +85171400,20 +85171810,0 +85171890,0 +85176100,20 +85176210,0 +85176220,0 +85176230,0 +85176240,0 +85176250,0 +85176260,0 +85176270,0 +85176290,20 +85176910,0 +85176920,0 +85176950,0 +85176960,0 +85176970,0 +85176990,20 +85177100,20 +85177910,20 +85177990,15 +85181000,15 +85182110,20 +85182190,20 +85182210,20 +85182290,20 +85182910,20 +85182990,20 +85183011,20 +85183019,20 +85183020,20 +85183090,20 +85184000,15 +85185000,10 +85189000,15 +85192000,10 +85193000,10 +85198100,2.5 +85198910,10 +85198940,10 +85198990,10 +85211019,10 +85211029,10 +85211099,10 +85219020,10 +85219090,20 +85229000,10 +85232100,10 +85232910,10 +85232970,10 +85232980,0 +85232990,10 +85234110,10 +85234140,10 +85234160,10 +85234190,10 +85234910,10 +85234920,10 +85234930,10 +85234940,10 +85234950,10 +85234970,10 +85234990,10 +85235100,10 +85235210,0 +85235220,0 +85235290,0 +85235910,0 +85235990,10 +85238010,10 +85238020,0 +85238030,0 +85238090,10 +85241100,15 +85241200,15 +85241900,15 +85249100,15 +85249200,15 +85249900,15 +85255010,7.5 +85255030,7.5 +85255040,7.5 +85255090,7.5 +85256000,0 +85258100,20 +85258200,20 +85258300,2.5 +85258900,20 +85261000,7.5 +85269110,7.5 +85269120,7.5 +85269130,2.5 +85269150,7.5 +85269190,7.5 +85269200,7.5 +85271200,10 +85271300,10 +85271900,10 +85272100,10 +85272900,10 +85279100,10 +85279200,10 +85279900,10 +85284200,10 +85284900,10 +85285200,10 +85285900,10 +85286200,10 +85286900,10 +85287100,20 +85287212,20 +85287213,20 +85287214,20 +85287215,20 +85287216,20 +85287217,20 +85287218,20 +85287219,20 +85287310,10 +85287390,10 +85291019,10 +85291021,7.5 +85291022,7.5 +85291029,10 +85291091,2.5 +85291092,7.5 +85291099,15 +85299020,7.5 +85299090,15 +85301010,7.5 +85308000,7.5 +85309000,7.5 +85311010,10 +85311020,10 +85311090,10 +85312000,0 +85318000,10 +85319000,10 +85321000,0 +85322100,0 +85322200,0 +85322300,0 +85322400,0 +85322500,0 +85322910,0 +85322990,0 +85323000,0 +85329000,0 +85331000,0 +85332111,0 +85332119,0 +85332121,0 +85332129,0 +85332911,0 +85332919,0 +85332921,0 +85332929,0 +85333110,0 +85333120,0 +85333190,0 +85333910,0 +85333920,0 +85333990,0 +85334010,0 +85334020,0 +85334030,0 +85334090,0 +85339000,0 +85340000,0 +85351010,10 +85351020,10 +85351030,10 +85351040,10 +85351090,10 +85352119,10 +85352121,10 +85352129,10 +85352190,10 +85352929,10 +85352990,10 +85353010,10 +85353090,10 +85354010,7.5 +85354020,7.5 +85354030,7.5 +85359010,7.5 +85359020,7.5 +85359030,7.5 +85359040,7.5 +85359090,7.5 +85361010,10 +85361020,10 +85361030,10 +85361040,10 +85361050,10 +85361060,10 +85361090,10 +85362010,10 +85362020,10 +85362030,10 +85362040,10 +85362090,10 +85363000,10 +85364100,15 +85364900,15 +85365010,10 +85365020,10 +85365090,10 +85366110,10 +85366190,10 +85366910,10 +85366990,10 +85367000,7.5 +85369010,10 +85369020,10 +85369030,10 +85369090,10 +85371000,15 +85372000,15 +85381010,7.5 +85381090,7.5 +85389000,15 +85391000,15 +85392110,10 +85392120,15 +85392190,10 +85392200,10 +85392910,10 +85392920,10 +85392930,10 +85392940,15 +85392990,10 +85393110,10 +85393190,10 +85393210,10 +85393220,10 +85393230,10 +85393910,10 +85393990,10 +85394100,10 +85394900,10 +85395100,20 +85395200,20 +85399010,10 +85399090,10 +85401110,10 +85401190,10 +85402000,10 +85404020,0 +85406000,10 +85407100,10 +85407900,10 +85408100,10 +85408900,10 +85409100,10 +85409900,10 +85411000,0 +85412100,0 +85412900,0 +85413010,0 +85413090,0 +85414100,0 +85414200,25 +85414300,40 +85414900,5 +85415100,0 +85415900,5 +85416000,0 +85419000,0 +85423100,0 +85423200,0 +85423300,0 +85423900,7.5 +85429000,0 +85431010,0 +85431090,7.5 +85432010,7.5 +85432020,7.5 +85432030,7.5 +85432090,7.5 +85433000,7.5 +85437011,0 +85437012,7.5 +85437013,7.5 +85437019,7.5 +85437022,7.5 +85437029,7.5 +85437039,7.5 +85437041,7.5 +85437042,7.5 +85437049,7.5 +85437062,7.5 +85437069,7.5 +85437072,7.5 +85437091,7.5 +85437092,7.5 +85437093,7.5 +85437094,7.5 +85437095,7.5 +85437099,7.5 +85439000,7.5 +85441110,10 +85441190,10 +85441910,15 +85441920,15 +85441930,15 +85441990,15 +85442010,10 +85442090,10 +85443000,15 +85444210,15 +85444220,15 +85444230,15 +85444290,15 +85444291,15 +85444292,15 +85444293,15 +85444299,15 +85444910,15 +85444920,15 +85444930,15 +85444991,15 +85444992,15 +85444993,15 +85444999,15 +85446010,10 +85446020,10 +85446030,10 +85446090,10 +85447010,0 +85447090,0 +85451100,7.5 +85451900,7.5 +85452000,7.5 +85459020,0 +85459090,7.5 +85461000,7.5 +85462019,7.5 +85462050,7.5 +85462090,7.5 +85469010,7.5 +85469090,7.5 +85471010,7.5 +85471040,7.5 +85471090,7.5 +85472000,7.5 +85479010,7.5 +85479020,7.5 +85479090,7.5 +85480000,10 +85491100,10 +85491400,5 +85491900,5 +85492900,10 +85493100,10 +86021000,10 +86029090,10 +86040000,10 +86069900,10 +86071200,10 +86071910,10 +86071920,10 +86071930,10 +86071990,10 +86072100,10 +86072900,10 +86073010,10 +86073090,10 +86079100,10 +86079910,10 +86079920,10 +86079930,10 +86079990,10 +86080010,10 +86080040,10 +86080090,10 +86090000,10 +87012100,10 +87013091,10 +87013099,10 +87019100,0 +87019200,0 +87019300,10 +87031010,125 +87031090,60 +87032110,0 +87032210,125 +87032291,125 +87032299,60 +87032391,100 +87032491,125 +87033199,0 +87033291,60 +87033299,125 +87033391,100 +87034030,100 +87036030,100 +87038030,100 +87038090,0 +87041010,40 +87042300,40 +87044300,40 +87049019,40 +87051000,10 +87053000,10 +87054000,10 +87059000,10 +87060029,15 +87060039,15 +87071000,15 +87079000,15 +87081010,15 +87081090,15 +87082100,15 +87082200,15 +87082900,15 +87083000,15 +87084000,15 +87085000,15 +87087000,15 +87088000,15 +87089100,15 +87089200,15 +87089300,15 +87089400,15 +87089500,15 +87089900,15 +87091100,10 +87091900,10 +87099000,10 +87100000,0 +87111020,100 +87111090,100 +87112029,50 +87112099,50 +87113020,100 +87114010,50 +87115000,50 +87116010,100 +87116020,100 +87116090,100 +87119090,100 +87120010,30 +87120090,10 +87131010,10 +87131090,10 +87139010,10 +87139090,10 +87141010,15 +87141090,15 +87142010,0 +87142020,10 +87142090,10 +87149100,20 +87149210,20 +87149220,20 +87149290,20 +87149310,20 +87149320,20 +87149390,20 +87149400,20 +87149510,20 +87149590,20 +87149600,20 +87149910,20 +87149920,20 +87149990,20 +87150010,10 +87150020,10 +87162000,10 +87163100,10 +87163900,10 +87164000,10 +87168010,10 +87168090,10 +87169010,10 +87169090,10 +88010020,0 +88021100,2.5 +88021200,0 +88023000,0 +88040010,10 +88040020,10 +88051010,10 +88052100,10 +88052900,10 +88062200,10 +88071000,2.5 +88072000,2.5 +88073000,2.5 +88079000,10 +89011030,10 +89011040,0 +89012000,0 +89019000,0 +89020090,10 +89031100,25 +89031900,25 +89032100,25 +89039900,25 +89040000,10 +89051000,0 +89059090,5 +89069000,10 +89071000,0 +89079000,10 +89080000,2.5 +90011000,15 +90012000,10 +90013000,10 +90014090,10 +90015000,10 +90019010,10 +90019090,10 +90021100,10 +90021900,10 +90022000,10 +90029000,10 +90031100,10 +90031900,10 +90039000,10 +90041000,20 +90049010,10 +90049020,10 +90049090,10 +90051000,10 +90058010,10 +90058020,10 +90058090,10 +90059010,10 +90059020,10 +90059090,10 +90063000,10 +90064000,10 +90065390,10 +90065990,10 +90066100,10 +90066900,10 +90069100,10 +90069900,10 +90071090,10 +90072090,10 +90079100,10 +90079200,10 +90085010,10 +90085030,10 +90089000,10 +90101000,7.5 +90105000,7.5 +90106000,10 +90109000,7.5 +90111000,7.5 +90112000,7.5 +90118000,7.5 +90119000,7.5 +90121010,7.5 +90121090,7.5 +90129000,7.5 +90131010,10 +90131020,7.5 +90131090,7.5 +90132000,7.5 +90138000,7.5 +90139000,7.5 +90141000,7.5 +90142000,7.5 +90148010,7.5 +90148090,7.5 +90149000,7.5 +90151000,7.5 +90152000,7.5 +90153010,7.5 +90153090,7.5 +90154000,7.5 +90158010,7.5 +90158020,7.5 +90158030,7.5 +90158090,7.5 +90159000,7.5 +90160010,10 +90160020,10 +90160090,10 +90171000,0 +90172010,10 +90172020,10 +90172030,10 +90172090,10 +90173010,10 +90173021,10 +90173022,10 +90173029,10 +90178010,10 +90178090,10 +90179000,10 +90181100,7.5 +90181210,7.5 +90181290,7.5 +90181300,7.5 +90181400,7.5 +90181910,7.5 +90181920,7.5 +90181990,7.5 +90182000,7.5 +90183100,7.5 +90183210,7.5 +90183220,7.5 +90183290,7.5 +90183910,7.5 +90183920,7.5 +90183930,7.5 +90183990,7.5 +90184100,7.5 +90184900,7.5 +90185010,7.5 +90185020,5 +90185030,7.5 +90185090,7.5 +90189011,7.5 +90189012,7.5 +90189019,7.5 +90189021,5 +90189022,7.5 +90189023,7.5 +90189024,5 +90189025,7.5 +90189029,7.5 +90189031,7.5 +90189032,7.5 +90189041,7.5 +90189042,7.5 +90189043,5 +90189044,7.5 +90189092,7.5 +90189093,7.5 +90189094,7.5 +90189095,5 +90189096,5 +90189097,5 +90189098,5 +90189099,10 +90191010,7.5 +90191020,10 +90191090,7.5 +90192010,7.5 +90192090,7.5 +90200000,7.5 +90211000,7.5 +90212100,7.5 +90212900,7.5 +90213100,7.5 +90213900,7.5 +90214010,7.5 +90214090,7.5 +90215000,7.5 +90219010,7.5 +90219090,7.5 +90221200,10 +90221300,10 +90221410,7.5 +90221420,10 +90221490,10 +90221900,10 +90222100,10 +90222900,10 +90223000,10 +90229010,10 +90229020,10 +90229030,10 +90229040,10 +90229090,10 +90230010,10 +90230090,10 +90241000,7.5 +90248010,7.5 +90248091,7.5 +90248099,7.5 +90249000,7.5 +90251110,10 +90251190,7.5 +90251910,10 +90251920,7.5 +90251990,7.5 +90258010,7.5 +90258020,7.5 +90258030,0 +90258090,7.5 +90259000,7.5 +90261010,0 +90261020,0 +90261090,0 +90262000,0 +90268010,0 +90268090,0 +90269000,0 +90271000,10 +90272000,0 +90273010,0 +90273020,0 +90273090,0 +90275010,0 +90275020,0 +90275030,0 +90275090,0 +90278100,0 +90278910,0 +90278920,0 +90278930,0 +90278990,0 +90279010,7.5 +90279020,7.5 +90279090,7.5 +90281000,7.5 +90282000,7.5 +90283010,25 +90283090,15 +90289010,20 +90289090,7.5 +90291010,10 +90291090,7.5 +90292010,7.5 +90292020,7.5 +90292030,7.5 +90292090,7.5 +90299000,7.5 +90301000,7.5 +90302000,7.5 +90303100,7.5 +90303200,7.5 +90303310,7.5 +90303320,7.5 +90303330,7.5 +90303340,7.5 +90303350,7.5 +90303390,7.5 +90303900,7.5 +90304000,0 +90308200,0 +90308400,7.5 +90308910,7.5 +90308920,7.5 +90308990,7.5 +90309010,7.5 +90309090,7.5 +90311000,7.5 +90312000,7.5 +90314100,0 +90314900,7.5 +90318000,15 +90319000,7.5 +90321010,7.5 +90321090,7.5 +90322010,7.5 +90322090,7.5 +90328100,7.5 +90328910,15 +90328990,15 +90329000,7.5 +90330000,7.5 +91011100,20 +91011900,20 +91012100,20 +91012900,20 +91019120,20 +91019990,20 +91021100,20 +91021200,20 +91021900,20 +91022100,20 +91022900,20 +91029110,20 +91029120,20 +91029190,20 +91029910,20 +91029920,20 +91029990,20 +91031000,20 +91039000,20 +91040000,15 +91051100,20 +91051900,20 +91052100,20 +91052900,20 +91059100,20 +91059910,20 +91059990,20 +91061000,10 +91069000,10 +91070000,10 +91081100,5 +91081900,5 +91082000,5 +91089000,5 +91091010,10 +91091090,10 +91099000,10 +91101100,5 +91101200,5 +91109000,10 +91112000,10 +91118000,10 +91119000,10 +91122000,10 +91129000,10 +91131000,10 +91132010,10 +91132090,10 +91139010,10 +91139090,10 +91143010,5 +91143020,10 +91144010,10 +91144020,10 +91149030,10 +91149040,10 +91149091,10 +91149092,10 +92011000,10 +92012000,10 +92019000,10 +92021000,10 +92029000,10 +92051000,10 +92059010,10 +92059020,10 +92059090,10 +92060000,10 +92071000,10 +92079000,10 +92081000,10 +92089000,10 +92093000,10 +92099100,10 +92099200,10 +92099400,10 +92099900,10 +93011090,10 +93019000,10 +93020000,10 +93032000,10 +93033000,0 +93039000,10 +93040000,10 +93052090,10 +93059100,10 +93059900,10 +93062100,10 +93062900,10 +93063000,10 +93069000,10 +93070000,10 +94011000,25 +94012000,25 +94013100,25 +94013900,25 +94014100,25 +94014900,25 +94016100,25 +94016900,25 +94017100,25 +94017900,25 +94018000,25 +94019100,25 +94019900,25 +94021010,10 +94021090,10 +94029010,10 +94029020,10 +94029090,10 +94031010,25 +94031090,25 +94032010,25 +94032090,25 +94033010,25 +94033090,25 +94034000,25 +94035010,25 +94035090,25 +94036000,25 +94037000,25 +94038200,25 +94038300,25 +94038900,25 +94039100,25 +94039900,25 +94041000,25 +94042110,25 +94042190,25 +94042910,25 +94042990,25 +94043010,25 +94043090,25 +94044010,25 +94044020,25 +94044040,25 +94049000,25 +94051100,25 +94051900,25 +94052100,25 +94052900,25 +94053100,25 +94053900,25 +94054100,25 +94054200,25 +94054900,25 +94055000,25 +94056100,25 +94056900,25 +94059100,25 +94059200,25 +94059900,25 +94062000,10 +94069020,10 +94069030,10 +94069090,10 +95030020,60 +95030091,60 +95030099,60 +95042000,20 +95043000,20 +95044000,20 +95045000,20 +95049010,20 +95049090,20 +95051000,20 +95059010,20 +95059090,20 +95061100,0 +95061200,0 +95061900,20 +95062900,20 +95063100,20 +95063200,20 +95063900,20 +95064000,20 +95065100,20 +95065910,20 +95065990,20 +95066100,20 +95066210,20 +95066220,20 +95066230,20 +95066290,20 +95066910,20 +95066920,20 +95066930,20 +95066940,20 +95066990,20 +95067000,20 +95069110,10 +95069190,10 +95069910,20 +95069920,20 +95069940,20 +95069960,20 +95069970,20 +95069980,20 +95069990,20 +95071000,20 +95072000,20 +95073000,20 +95079010,20 +95079090,20 +95082200,20 +95082400,20 +95082600,20 +95082900,20 +95083000,20 +95084000,20 +96019030,10 +96019040,10 +96019090,10 +96020010,10 +96020020,10 +96020030,10 +96020090,10 +96031000,0 +96032100,20 +96032900,20 +96033020,20 +96033090,20 +96034010,20 +96034020,20 +96035000,20 +96039000,20 +96040000,20 +96050010,10 +96050090,10 +96061010,10 +96061020,10 +96062100,10 +96062200,10 +96062910,10 +96062990,10 +96063010,10 +96063090,10 +96071110,10 +96071190,10 +96071910,10 +96071990,10 +96072000,10 +96081011,10 +96081012,10 +96081019,10 +96081091,10 +96081092,10 +96081099,10 +96082000,10 +96083019,10 +96083029,10 +96083092,10 +96083099,10 +96084000,10 +96085000,10 +96086010,10 +96086090,10 +96089110,10 +96089120,10 +96089130,10 +96089191,10 +96089199,10 +96089910,10 +96089990,10 +96091000,10 +96092000,10 +96099020,10 +96099030,10 +96099090,10 +96100000,10 +96110000,20 +96121010,10 +96121020,10 +96121030,10 +96121090,10 +96122000,10 +96131000,20 +96132000,20 +96138010,20 +96138090,20 +96139000,20 +96140000,10 +96151100,20 +96151900,20 +96159000,20 +96161010,20 +96161020,20 +96162000,20 +96170011,20 +96170012,20 +96170013,20 +96170019,20 +96170090,20 +96180000,10 +96190010,10 +96190020,10 +96190030,10 +96190040,10 +96190090,10 +96200000,10 +97012100,10 +97012900,10 +97019100,10 +97019900,10 +97029000,10 +97031010,10 +97031090,10 +97039010,10 +97039020,10 +97039090,10 +97040010,0 +97040020,0 +97040090,0 +97069000,10 +98010011,5 +98010013,5 +98010014,0 +98010019,5 +98020000,10 +98030000,0 +98041000,35 +98049000,35 +98059000,10 +98060000,200 diff --git a/Checkpoint/assets/vocab.txt b/Checkpoint/assets/vocab.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb140275c155a9c7c5a3b3e0e77a9e839594a938 --- /dev/null +++ b/Checkpoint/assets/vocab.txt @@ -0,0 +1,30522 @@ +[PAD] +[unused0] +[unused1] +[unused2] +[unused3] +[unused4] +[unused5] +[unused6] +[unused7] +[unused8] +[unused9] +[unused10] +[unused11] +[unused12] +[unused13] +[unused14] +[unused15] +[unused16] +[unused17] +[unused18] +[unused19] +[unused20] +[unused21] +[unused22] +[unused23] +[unused24] +[unused25] +[unused26] +[unused27] +[unused28] +[unused29] +[unused30] +[unused31] +[unused32] +[unused33] +[unused34] +[unused35] +[unused36] +[unused37] +[unused38] +[unused39] +[unused40] +[unused41] +[unused42] +[unused43] +[unused44] +[unused45] +[unused46] +[unused47] +[unused48] +[unused49] +[unused50] +[unused51] +[unused52] +[unused53] +[unused54] +[unused55] +[unused56] +[unused57] +[unused58] +[unused59] +[unused60] +[unused61] +[unused62] +[unused63] +[unused64] +[unused65] +[unused66] +[unused67] +[unused68] +[unused69] +[unused70] +[unused71] +[unused72] +[unused73] +[unused74] +[unused75] +[unused76] +[unused77] +[unused78] +[unused79] +[unused80] +[unused81] +[unused82] +[unused83] +[unused84] +[unused85] +[unused86] +[unused87] +[unused88] +[unused89] +[unused90] +[unused91] +[unused92] +[unused93] +[unused94] +[unused95] +[unused96] +[unused97] +[unused98] +[UNK] +[CLS] +[SEP] +[MASK] +[unused99] +[unused100] +[unused101] +[unused102] +[unused103] +[unused104] +[unused105] +[unused106] +[unused107] +[unused108] +[unused109] +[unused110] +[unused111] +[unused112] +[unused113] +[unused114] +[unused115] +[unused116] +[unused117] +[unused118] +[unused119] +[unused120] +[unused121] +[unused122] +[unused123] +[unused124] +[unused125] +[unused126] +[unused127] +[unused128] +[unused129] +[unused130] +[unused131] +[unused132] +[unused133] +[unused134] +[unused135] +[unused136] +[unused137] +[unused138] +[unused139] +[unused140] +[unused141] +[unused142] +[unused143] +[unused144] +[unused145] +[unused146] +[unused147] +[unused148] +[unused149] +[unused150] +[unused151] +[unused152] +[unused153] +[unused154] +[unused155] +[unused156] +[unused157] +[unused158] +[unused159] +[unused160] +[unused161] +[unused162] +[unused163] +[unused164] +[unused165] +[unused166] +[unused167] +[unused168] +[unused169] +[unused170] +[unused171] +[unused172] +[unused173] +[unused174] +[unused175] +[unused176] +[unused177] +[unused178] +[unused179] +[unused180] +[unused181] +[unused182] +[unused183] +[unused184] +[unused185] +[unused186] +[unused187] +[unused188] +[unused189] +[unused190] +[unused191] +[unused192] +[unused193] +[unused194] +[unused195] +[unused196] +[unused197] +[unused198] +[unused199] +[unused200] +[unused201] +[unused202] +[unused203] +[unused204] +[unused205] +[unused206] +[unused207] +[unused208] +[unused209] +[unused210] +[unused211] +[unused212] +[unused213] +[unused214] +[unused215] +[unused216] +[unused217] +[unused218] +[unused219] +[unused220] +[unused221] +[unused222] +[unused223] +[unused224] +[unused225] +[unused226] +[unused227] +[unused228] +[unused229] +[unused230] +[unused231] +[unused232] +[unused233] +[unused234] +[unused235] +[unused236] +[unused237] +[unused238] +[unused239] +[unused240] +[unused241] +[unused242] +[unused243] +[unused244] +[unused245] +[unused246] +[unused247] +[unused248] +[unused249] +[unused250] +[unused251] +[unused252] +[unused253] +[unused254] +[unused255] +[unused256] +[unused257] +[unused258] +[unused259] +[unused260] +[unused261] +[unused262] +[unused263] +[unused264] +[unused265] +[unused266] +[unused267] +[unused268] +[unused269] +[unused270] +[unused271] +[unused272] +[unused273] +[unused274] +[unused275] +[unused276] +[unused277] +[unused278] +[unused279] +[unused280] +[unused281] +[unused282] +[unused283] +[unused284] +[unused285] +[unused286] +[unused287] +[unused288] +[unused289] +[unused290] +[unused291] +[unused292] +[unused293] +[unused294] +[unused295] +[unused296] +[unused297] +[unused298] +[unused299] +[unused300] +[unused301] +[unused302] +[unused303] +[unused304] +[unused305] +[unused306] +[unused307] +[unused308] +[unused309] +[unused310] +[unused311] +[unused312] +[unused313] +[unused314] +[unused315] +[unused316] +[unused317] +[unused318] +[unused319] +[unused320] +[unused321] +[unused322] +[unused323] +[unused324] +[unused325] +[unused326] +[unused327] +[unused328] +[unused329] +[unused330] +[unused331] +[unused332] +[unused333] +[unused334] +[unused335] +[unused336] +[unused337] +[unused338] +[unused339] +[unused340] +[unused341] +[unused342] +[unused343] +[unused344] +[unused345] +[unused346] +[unused347] +[unused348] +[unused349] +[unused350] +[unused351] +[unused352] +[unused353] +[unused354] +[unused355] +[unused356] +[unused357] +[unused358] +[unused359] +[unused360] +[unused361] +[unused362] +[unused363] +[unused364] +[unused365] +[unused366] +[unused367] +[unused368] +[unused369] +[unused370] +[unused371] +[unused372] +[unused373] +[unused374] +[unused375] +[unused376] +[unused377] +[unused378] +[unused379] +[unused380] +[unused381] +[unused382] +[unused383] +[unused384] +[unused385] +[unused386] +[unused387] +[unused388] +[unused389] +[unused390] +[unused391] +[unused392] +[unused393] +[unused394] +[unused395] +[unused396] +[unused397] +[unused398] +[unused399] +[unused400] +[unused401] +[unused402] +[unused403] +[unused404] +[unused405] +[unused406] +[unused407] +[unused408] +[unused409] +[unused410] +[unused411] +[unused412] +[unused413] +[unused414] +[unused415] +[unused416] +[unused417] +[unused418] +[unused419] +[unused420] +[unused421] +[unused422] +[unused423] +[unused424] +[unused425] +[unused426] +[unused427] +[unused428] +[unused429] +[unused430] +[unused431] +[unused432] +[unused433] +[unused434] +[unused435] +[unused436] +[unused437] +[unused438] +[unused439] +[unused440] +[unused441] +[unused442] +[unused443] +[unused444] +[unused445] +[unused446] +[unused447] +[unused448] +[unused449] +[unused450] +[unused451] +[unused452] +[unused453] +[unused454] +[unused455] +[unused456] +[unused457] +[unused458] +[unused459] +[unused460] +[unused461] +[unused462] +[unused463] +[unused464] +[unused465] +[unused466] +[unused467] +[unused468] +[unused469] +[unused470] +[unused471] +[unused472] +[unused473] +[unused474] +[unused475] +[unused476] +[unused477] +[unused478] +[unused479] +[unused480] +[unused481] +[unused482] +[unused483] +[unused484] +[unused485] +[unused486] +[unused487] +[unused488] +[unused489] +[unused490] +[unused491] +[unused492] +[unused493] +[unused494] +[unused495] +[unused496] +[unused497] +[unused498] +[unused499] +[unused500] +[unused501] +[unused502] +[unused503] +[unused504] +[unused505] +[unused506] +[unused507] +[unused508] +[unused509] +[unused510] +[unused511] +[unused512] +[unused513] +[unused514] +[unused515] +[unused516] +[unused517] +[unused518] +[unused519] +[unused520] +[unused521] +[unused522] +[unused523] +[unused524] +[unused525] +[unused526] +[unused527] +[unused528] +[unused529] +[unused530] +[unused531] +[unused532] +[unused533] +[unused534] +[unused535] +[unused536] +[unused537] +[unused538] +[unused539] +[unused540] +[unused541] +[unused542] +[unused543] +[unused544] +[unused545] +[unused546] +[unused547] +[unused548] +[unused549] +[unused550] +[unused551] +[unused552] +[unused553] +[unused554] +[unused555] +[unused556] +[unused557] +[unused558] +[unused559] +[unused560] +[unused561] +[unused562] +[unused563] +[unused564] +[unused565] +[unused566] +[unused567] +[unused568] +[unused569] +[unused570] +[unused571] +[unused572] +[unused573] +[unused574] +[unused575] +[unused576] +[unused577] +[unused578] +[unused579] +[unused580] +[unused581] +[unused582] +[unused583] +[unused584] +[unused585] +[unused586] +[unused587] +[unused588] +[unused589] +[unused590] +[unused591] +[unused592] +[unused593] +[unused594] +[unused595] +[unused596] +[unused597] +[unused598] +[unused599] +[unused600] +[unused601] +[unused602] +[unused603] +[unused604] +[unused605] +[unused606] +[unused607] +[unused608] +[unused609] +[unused610] +[unused611] +[unused612] +[unused613] +[unused614] +[unused615] +[unused616] +[unused617] +[unused618] +[unused619] +[unused620] +[unused621] +[unused622] +[unused623] +[unused624] +[unused625] +[unused626] +[unused627] +[unused628] +[unused629] +[unused630] +[unused631] +[unused632] +[unused633] +[unused634] +[unused635] +[unused636] +[unused637] +[unused638] +[unused639] +[unused640] +[unused641] +[unused642] +[unused643] +[unused644] +[unused645] +[unused646] +[unused647] +[unused648] +[unused649] +[unused650] +[unused651] +[unused652] +[unused653] +[unused654] +[unused655] +[unused656] +[unused657] +[unused658] +[unused659] +[unused660] +[unused661] +[unused662] +[unused663] +[unused664] +[unused665] +[unused666] +[unused667] +[unused668] +[unused669] +[unused670] +[unused671] +[unused672] +[unused673] +[unused674] +[unused675] +[unused676] +[unused677] +[unused678] +[unused679] +[unused680] +[unused681] +[unused682] +[unused683] +[unused684] +[unused685] +[unused686] +[unused687] +[unused688] +[unused689] +[unused690] +[unused691] +[unused692] +[unused693] +[unused694] +[unused695] +[unused696] +[unused697] +[unused698] +[unused699] +[unused700] +[unused701] +[unused702] +[unused703] +[unused704] +[unused705] +[unused706] +[unused707] +[unused708] +[unused709] +[unused710] +[unused711] +[unused712] +[unused713] +[unused714] +[unused715] +[unused716] +[unused717] +[unused718] +[unused719] +[unused720] +[unused721] +[unused722] +[unused723] +[unused724] +[unused725] +[unused726] +[unused727] +[unused728] +[unused729] +[unused730] +[unused731] +[unused732] +[unused733] +[unused734] +[unused735] +[unused736] +[unused737] +[unused738] +[unused739] +[unused740] +[unused741] +[unused742] +[unused743] +[unused744] +[unused745] +[unused746] +[unused747] +[unused748] +[unused749] +[unused750] +[unused751] +[unused752] +[unused753] +[unused754] +[unused755] +[unused756] +[unused757] +[unused758] +[unused759] +[unused760] +[unused761] +[unused762] +[unused763] +[unused764] +[unused765] +[unused766] +[unused767] +[unused768] +[unused769] +[unused770] +[unused771] +[unused772] +[unused773] +[unused774] +[unused775] +[unused776] +[unused777] +[unused778] +[unused779] +[unused780] +[unused781] +[unused782] +[unused783] +[unused784] +[unused785] +[unused786] +[unused787] +[unused788] +[unused789] +[unused790] +[unused791] +[unused792] +[unused793] +[unused794] +[unused795] +[unused796] +[unused797] +[unused798] +[unused799] +[unused800] +[unused801] +[unused802] +[unused803] +[unused804] +[unused805] +[unused806] +[unused807] +[unused808] +[unused809] +[unused810] +[unused811] +[unused812] +[unused813] +[unused814] +[unused815] +[unused816] +[unused817] +[unused818] +[unused819] +[unused820] +[unused821] +[unused822] +[unused823] +[unused824] +[unused825] +[unused826] +[unused827] +[unused828] +[unused829] +[unused830] +[unused831] +[unused832] +[unused833] +[unused834] +[unused835] +[unused836] +[unused837] +[unused838] +[unused839] +[unused840] +[unused841] +[unused842] +[unused843] +[unused844] +[unused845] +[unused846] +[unused847] +[unused848] +[unused849] +[unused850] +[unused851] +[unused852] +[unused853] +[unused854] +[unused855] +[unused856] +[unused857] +[unused858] +[unused859] +[unused860] +[unused861] +[unused862] +[unused863] +[unused864] +[unused865] +[unused866] +[unused867] +[unused868] +[unused869] +[unused870] +[unused871] +[unused872] +[unused873] +[unused874] +[unused875] +[unused876] +[unused877] +[unused878] +[unused879] +[unused880] +[unused881] +[unused882] +[unused883] +[unused884] +[unused885] +[unused886] +[unused887] +[unused888] +[unused889] +[unused890] +[unused891] +[unused892] +[unused893] +[unused894] +[unused895] +[unused896] +[unused897] +[unused898] +[unused899] +[unused900] +[unused901] +[unused902] +[unused903] +[unused904] +[unused905] +[unused906] +[unused907] +[unused908] +[unused909] +[unused910] +[unused911] +[unused912] +[unused913] +[unused914] +[unused915] +[unused916] +[unused917] +[unused918] +[unused919] +[unused920] +[unused921] +[unused922] +[unused923] +[unused924] +[unused925] +[unused926] +[unused927] +[unused928] +[unused929] +[unused930] +[unused931] +[unused932] +[unused933] +[unused934] +[unused935] +[unused936] +[unused937] +[unused938] +[unused939] +[unused940] +[unused941] +[unused942] +[unused943] +[unused944] +[unused945] +[unused946] +[unused947] +[unused948] +[unused949] +[unused950] +[unused951] +[unused952] +[unused953] +[unused954] +[unused955] +[unused956] +[unused957] +[unused958] +[unused959] +[unused960] +[unused961] +[unused962] +[unused963] +[unused964] +[unused965] +[unused966] +[unused967] +[unused968] +[unused969] +[unused970] +[unused971] +[unused972] +[unused973] +[unused974] +[unused975] +[unused976] +[unused977] +[unused978] +[unused979] +[unused980] +[unused981] +[unused982] +[unused983] +[unused984] +[unused985] +[unused986] +[unused987] +[unused988] +[unused989] +[unused990] +[unused991] +[unused992] +[unused993] +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +¡ +¢ +£ +¤ +¥ +¦ +§ +¨ +© +ª +« +¬ +® +° +± +² +³ +´ +µ +¶ +· +¹ +º +» +¼ +½ +¾ +¿ +× +ß +æ +ð +÷ +ø +þ +đ +ħ +ı +ł +ŋ +œ +ƒ +ɐ +ɑ +ɒ +ɔ +ɕ +ə +ɛ +ɡ +ɣ +ɨ +ɪ +ɫ +ɬ +ɯ +ɲ +ɴ +ɹ +ɾ +ʀ +ʁ +ʂ +ʃ +ʉ +ʊ +ʋ +ʌ +ʎ +ʐ +ʑ +ʒ +ʔ +ʰ +ʲ +ʳ +ʷ +ʸ +ʻ +ʼ +ʾ +ʿ +ˈ +ː +ˡ +ˢ +ˣ +ˤ +α +β +γ +δ +ε +ζ +η +θ +ι +κ +λ +μ +ν +ξ +ο +π +ρ +ς +σ +τ +υ +φ +χ +ψ +ω +а +б +в +г +д +е +ж +з +и +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +щ +ъ +ы +ь +э +ю +я +ђ +є +і +ј +љ +њ +ћ +ӏ +ա +բ +գ +դ +ե +թ +ի +լ +կ +հ +մ +յ +ն +ո +պ +ս +վ +տ +ր +ւ +ք +־ +א +ב +ג +ד +ה +ו +ז +ח +ט +י +ך +כ +ל +ם +מ +ן +נ +ס +ע +ף +פ +ץ +צ +ק +ר +ש +ת +، +ء +ا +ب +ة +ت +ث +ج +ح +خ +د +ذ +ر +ز +س +ش +ص +ض +ط +ظ +ع +غ +ـ +ف +ق +ك +ل +م +ن +ه +و +ى +ي +ٹ +پ +چ +ک +گ +ں +ھ +ہ +ی +ے +अ +आ +उ +ए +क +ख +ग +च +ज +ट +ड +ण +त +थ +द +ध +न +प +ब +भ +म +य +र +ल +व +श +ष +स +ह +ा +ि +ी +ो +। +॥ +ং +অ +আ +ই +উ +এ +ও +ক +খ +গ +চ +ছ +জ +ট +ড +ণ +ত +থ +দ +ধ +ন +প +ব +ভ +ম +য +র +ল +শ +ষ +স +হ +া +ি +ী +ে +க +ச +ட +த +ந +ன +ப +ம +ய +ர +ல +ள +வ +ா +ி +ு +ே +ை +ನ +ರ +ಾ +ක +ය +ර +ල +ව +ා +ก +ง +ต +ท +น +พ +ม +ย +ร +ล +ว +ส +อ +า +เ +་ +། +ག +ང +ད +ན +པ +བ +མ +འ +ར +ལ +ས +မ +ა +ბ +გ +დ +ე +ვ +თ +ი +კ +ლ +მ +ნ +ო +რ +ს +ტ +უ +ᄀ +ᄂ +ᄃ +ᄅ +ᄆ +ᄇ +ᄉ +ᄊ +ᄋ +ᄌ +ᄎ +ᄏ +ᄐ +ᄑ +ᄒ +ᅡ +ᅢ +ᅥ +ᅦ +ᅧ +ᅩ +ᅪ +ᅭ +ᅮ +ᅯ +ᅲ +ᅳ +ᅴ +ᅵ +ᆨ +ᆫ +ᆯ +ᆷ +ᆸ +ᆼ +ᴬ +ᴮ +ᴰ +ᴵ +ᴺ +ᵀ +ᵃ +ᵇ +ᵈ +ᵉ +ᵍ +ᵏ +ᵐ +ᵒ +ᵖ +ᵗ +ᵘ +ᵢ +ᵣ +ᵤ +ᵥ +ᶜ +ᶠ +‐ +‑ +‒ +– +— +― +‖ +‘ +’ +‚ +“ +” +„ +† +‡ +• +… +‰ +′ +″ +› +‿ +⁄ +⁰ +ⁱ +⁴ +⁵ +⁶ +⁷ +⁸ +⁹ +⁺ +⁻ +ⁿ +₀ +₁ +₂ +₃ +₄ +₅ +₆ +₇ +₈ +₉ +₊ +₍ +₎ +ₐ +ₑ +ₒ +ₓ +ₕ +ₖ +ₗ +ₘ +ₙ +ₚ +ₛ +ₜ +₤ +₩ +€ +₱ +₹ +ℓ +№ +ℝ +™ +⅓ +⅔ +← +↑ +→ +↓ +↔ +↦ +⇄ +⇌ +⇒ +∂ +∅ +∆ +∇ +∈ +− +∗ +∘ +√ +∞ +∧ +∨ +∩ +∪ +≈ +≡ +≤ +≥ +⊂ +⊆ +⊕ +⊗ +⋅ +─ +│ +■ +▪ +● +★ +☆ +☉ +♠ +♣ +♥ +♦ +♭ +♯ +⟨ +⟩ +ⱼ +⺩ +⺼ +⽥ +、 +。 +〈 +〉 +《 +》 +「 +」 +『 +』 +〜 +あ +い +う +え +お +か +き +く +け +こ +さ +し +す +せ +そ +た +ち +っ +つ +て +と +な +に +ぬ +ね +の +は +ひ +ふ +へ +ほ +ま +み +む +め +も +や +ゆ +よ +ら +り +る +れ +ろ +を +ん +ァ +ア +ィ +イ +ウ +ェ +エ +オ +カ +キ +ク +ケ +コ +サ +シ +ス +セ +タ +チ +ッ +ツ +テ +ト +ナ +ニ +ノ +ハ +ヒ +フ +ヘ +ホ +マ +ミ +ム +メ +モ +ャ +ュ +ョ +ラ +リ +ル +レ +ロ +ワ +ン +・ +ー +一 +三 +上 +下 +不 +世 +中 +主 +久 +之 +也 +事 +二 +五 +井 +京 +人 +亻 +仁 +介 +代 +仮 +伊 +会 +佐 +侍 +保 +信 +健 +元 +光 +八 +公 +内 +出 +分 +前 +劉 +力 +加 +勝 +北 +区 +十 +千 +南 +博 +原 +口 +古 +史 +司 +合 +吉 +同 +名 +和 +囗 +四 +国 +國 +土 +地 +坂 +城 +堂 +場 +士 +夏 +外 +大 +天 +太 +夫 +奈 +女 +子 +学 +宀 +宇 +安 +宗 +定 +宣 +宮 +家 +宿 +寺 +將 +小 +尚 +山 +岡 +島 +崎 +川 +州 +巿 +帝 +平 +年 +幸 +广 +弘 +張 +彳 +後 +御 +德 +心 +忄 +志 +忠 +愛 +成 +我 +戦 +戸 +手 +扌 +政 +文 +新 +方 +日 +明 +星 +春 +昭 +智 +曲 +書 +月 +有 +朝 +木 +本 +李 +村 +東 +松 +林 +森 +楊 +樹 +橋 +歌 +止 +正 +武 +比 +氏 +民 +水 +氵 +氷 +永 +江 +沢 +河 +治 +法 +海 +清 +漢 +瀬 +火 +版 +犬 +王 +生 +田 +男 +疒 +発 +白 +的 +皇 +目 +相 +省 +真 +石 +示 +社 +神 +福 +禾 +秀 +秋 +空 +立 +章 +竹 +糹 +美 +義 +耳 +良 +艹 +花 +英 +華 +葉 +藤 +行 +街 +西 +見 +訁 +語 +谷 +貝 +貴 +車 +軍 +辶 +道 +郎 +郡 +部 +都 +里 +野 +金 +鈴 +镇 +長 +門 +間 +阝 +阿 +陳 +陽 +雄 +青 +面 +風 +食 +香 +馬 +高 +龍 +龸 +fi +fl +! +( +) +, +- +. +/ +: +? +~ +the +of +and +in +to +was +he +is +as +for +on +with +that +it +his +by +at +from +her +##s +she +you +had +an +were +but +be +this +are +not +my +they +one +which +or +have +him +me +first +all +also +their +has +up +who +out +been +when +after +there +into +new +two +its +##a +time +would +no +what +about +said +we +over +then +other +so +more +##e +can +if +like +back +them +only +some +could +##i +where +just +##ing +during +before +##n +do +##o +made +school +through +than +now +years +most +world +may +between +down +well +three +##d +year +while +will +##ed +##r +##y +later +##t +city +under +around +did +such +being +used +state +people +part +know +against +your +many +second +university +both +national +##er +these +don +known +off +way +until +re +how +even +get +head +... +didn +##ly +team +american +because +de +##l +born +united +film +since +still +long +work +south +us +became +any +high +again +day +family +see +right +man +eyes +house +season +war +states +including +took +life +north +same +each +called +name +much +place +however +go +four +group +another +found +won +area +here +going +10 +away +series +left +home +music +best +make +hand +number +company +several +never +last +john +000 +very +album +take +end +good +too +following +released +game +played +little +began +district +##m +old +want +those +side +held +own +early +county +ll +league +use +west +##u +face +think +##es +2010 +government +##h +march +came +small +general +town +june +##on +line +based +something +##k +september +thought +looked +along +international +2011 +air +july +club +went +january +october +our +august +april +york +12 +few +2012 +2008 +east +show +member +college +2009 +father +public +##us +come +men +five +set +station +church +##c +next +former +november +room +party +located +december +2013 +age +got +2007 +##g +system +let +love +2006 +though +every +2014 +look +song +water +century +without +body +black +night +within +great +women +single +ve +building +large +population +river +named +band +white +started +##an +once +15 +20 +should +18 +2015 +service +top +built +british +open +death +king +moved +local +times +children +february +book +why +11 +door +need +president +order +final +road +wasn +although +due +major +died +village +third +knew +2016 +asked +turned +st +wanted +say +##p +together +received +main +son +served +different +##en +behind +himself +felt +members +power +football +law +voice +play +##in +near +park +history +30 +having +2005 +16 +##man +saw +mother +##al +army +point +front +help +english +street +art +late +hands +games +award +##ia +young +14 +put +published +country +division +across +told +13 +often +ever +french +london +center +six +red +2017 +led +days +include +light +25 +find +tell +among +species +really +according +central +half +2004 +form +original +gave +office +making +enough +lost +full +opened +must +included +live +given +german +player +run +business +woman +community +cup +might +million +land +2000 +court +development +17 +short +round +ii +km +seen +class +story +always +become +sure +research +almost +director +council +la +##2 +career +things +using +island +##z +couldn +car +##is +24 +close +force +##1 +better +free +support +control +field +students +2003 +education +married +##b +nothing +worked +others +record +big +inside +level +anything +continued +give +james +##3 +military +established +non +returned +feel +does +title +written +thing +feet +william +far +co +association +hard +already +2002 +##ra +championship +human +western +100 +##na +department +hall +role +various +production +21 +19 +heart +2001 +living +fire +version +##ers +##f +television +royal +##4 +produced +working +act +case +society +region +present +radio +period +looking +least +total +keep +england +wife +program +per +brother +mind +special +22 +##le +am +works +soon +##6 +political +george +services +taken +created +##7 +further +able +reached +david +union +joined +upon +done +important +social +information +either +##ic +##x +appeared +position +ground +lead +rock +dark +election +23 +board +france +hair +course +arms +site +police +girl +instead +real +sound +##v +words +moment +##te +someone +##8 +summer +project +announced +san +less +wrote +past +followed +##5 +blue +founded +al +finally +india +taking +records +america +##ne +1999 +design +considered +northern +god +stop +battle +toward +european +outside +described +track +today +playing +language +28 +call +26 +heard +professional +low +australia +miles +california +win +yet +green +##ie +trying +blood +##ton +southern +science +maybe +everything +match +square +27 +mouth +video +race +recorded +leave +above +##9 +daughter +points +space +1998 +museum +change +middle +common +##0 +move +tv +post +##ta +lake +seven +tried +elected +closed +ten +paul +minister +##th +months +start +chief +return +canada +person +sea +release +similar +modern +brought +rest +hit +formed +mr +##la +1997 +floor +event +doing +thomas +1996 +robert +care +killed +training +star +week +needed +turn +finished +railway +rather +news +health +sent +example +ran +term +michael +coming +currently +yes +forces +despite +gold +areas +50 +stage +fact +29 +dead +says +popular +2018 +originally +germany +probably +developed +result +pulled +friend +stood +money +running +mi +signed +word +songs +child +eventually +met +tour +average +teams +minutes +festival +current +deep +kind +1995 +decided +usually +eastern +seemed +##ness +episode +bed +added +table +indian +private +charles +route +available +idea +throughout +centre +addition +appointed +style +1994 +books +eight +construction +press +mean +wall +friends +remained +schools +study +##ch +##um +institute +oh +chinese +sometimes +events +possible +1992 +australian +type +brown +forward +talk +process +food +debut +seat +performance +committee +features +character +arts +herself +else +lot +strong +russian +range +hours +peter +arm +##da +morning +dr +sold +##ry +quickly +directed +1993 +guitar +china +##w +31 +list +##ma +performed +media +uk +players +smile +##rs +myself +40 +placed +coach +province +towards +wouldn +leading +whole +boy +official +designed +grand +census +##el +europe +attack +japanese +henry +1991 +##re +##os +cross +getting +alone +action +lower +network +wide +washington +japan +1990 +hospital +believe +changed +sister +##ar +hold +gone +sir +hadn +ship +##ka +studies +academy +shot +rights +below +base +bad +involved +kept +largest +##ist +bank +future +especially +beginning +mark +movement +section +female +magazine +plan +professor +lord +longer +##ian +sat +walked +hill +actually +civil +energy +model +families +size +thus +aircraft +completed +includes +data +captain +##or +fight +vocals +featured +richard +bridge +fourth +1989 +officer +stone +hear +##ism +means +medical +groups +management +self +lips +competition +entire +lived +technology +leaving +federal +tournament +bit +passed +hot +independent +awards +kingdom +mary +spent +fine +doesn +reported +##ling +jack +fall +raised +itself +stay +true +studio +1988 +sports +replaced +paris +systems +saint +leader +theatre +whose +market +capital +parents +spanish +canadian +earth +##ity +cut +degree +writing +bay +christian +awarded +natural +higher +bill +##as +coast +provided +previous +senior +ft +valley +organization +stopped +onto +countries +parts +conference +queen +security +interest +saying +allowed +master +earlier +phone +matter +smith +winning +try +happened +moving +campaign +los +##ley +breath +nearly +mid +1987 +certain +girls +date +italian +african +standing +fell +artist +##ted +shows +deal +mine +industry +1986 +##ng +everyone +republic +provide +collection +library +student +##ville +primary +owned +older +via +heavy +1st +makes +##able +attention +anyone +africa +##ri +stated +length +ended +fingers +command +staff +skin +foreign +opening +governor +okay +medal +kill +sun +cover +job +1985 +introduced +chest +hell +feeling +##ies +success +meet +reason +standard +meeting +novel +1984 +trade +source +buildings +##land +rose +guy +goal +##ur +chapter +native +husband +previously +unit +limited +entered +weeks +producer +operations +mountain +takes +covered +forced +related +roman +complete +successful +key +texas +cold +##ya +channel +1980 +traditional +films +dance +clear +approximately +500 +nine +van +prince +question +active +tracks +ireland +regional +silver +author +personal +sense +operation +##ine +economic +1983 +holding +twenty +isbn +additional +speed +hour +edition +regular +historic +places +whom +shook +movie +km² +secretary +prior +report +chicago +read +foundation +view +engine +scored +1982 +units +ask +airport +property +ready +immediately +lady +month +listed +contract +##de +manager +themselves +lines +##ki +navy +writer +meant +##ts +runs +##ro +practice +championships +singer +glass +commission +required +forest +starting +culture +generally +giving +access +attended +test +couple +stand +catholic +martin +caught +executive +##less +eye +##ey +thinking +chair +quite +shoulder +1979 +hope +decision +plays +defeated +municipality +whether +structure +offered +slowly +pain +ice +direction +##ion +paper +mission +1981 +mostly +200 +noted +individual +managed +nature +lives +plant +##ha +helped +except +studied +computer +figure +relationship +issue +significant +loss +die +smiled +gun +ago +highest +1972 +##am +male +bring +goals +mexico +problem +distance +commercial +completely +location +annual +famous +drive +1976 +neck +1978 +surface +caused +italy +understand +greek +highway +wrong +hotel +comes +appearance +joseph +double +issues +musical +companies +castle +income +review +assembly +bass +initially +parliament +artists +experience +1974 +particular +walk +foot +engineering +talking +window +dropped +##ter +miss +baby +boys +break +1975 +stars +edge +remember +policy +carried +train +stadium +bar +sex +angeles +evidence +##ge +becoming +assistant +soviet +1977 +upper +step +wing +1970 +youth +financial +reach +##ll +actor +numerous +##se +##st +nodded +arrived +##ation +minute +##nt +believed +sorry +complex +beautiful +victory +associated +temple +1968 +1973 +chance +perhaps +metal +##son +1945 +bishop +##et +lee +launched +particularly +tree +le +retired +subject +prize +contains +yeah +theory +empire +##ce +suddenly +waiting +trust +recording +##to +happy +terms +camp +champion +1971 +religious +pass +zealand +names +2nd +port +ancient +tom +corner +represented +watch +legal +anti +justice +cause +watched +brothers +45 +material +changes +simply +response +louis +fast +##ting +answer +60 +historical +1969 +stories +straight +create +feature +increased +rate +administration +virginia +el +activities +cultural +overall +winner +programs +basketball +legs +guard +beyond +cast +doctor +mm +flight +results +remains +cost +effect +winter +##ble +larger +islands +problems +chairman +grew +commander +isn +1967 +pay +failed +selected +hurt +fort +box +regiment +majority +journal +35 +edward +plans +##ke +##ni +shown +pretty +irish +characters +directly +scene +likely +operated +allow +spring +##j +junior +matches +looks +mike +houses +fellow +##tion +beach +marriage +##ham +##ive +rules +oil +65 +florida +expected +nearby +congress +sam +peace +recent +iii +wait +subsequently +cell +##do +variety +serving +agreed +please +poor +joe +pacific +attempt +wood +democratic +piece +prime +##ca +rural +mile +touch +appears +township +1964 +1966 +soldiers +##men +##ized +1965 +pennsylvania +closer +fighting +claimed +score +jones +physical +editor +##ous +filled +genus +specific +sitting +super +mom +##va +therefore +supported +status +fear +cases +store +meaning +wales +minor +spain +tower +focus +vice +frank +follow +parish +separate +golden +horse +fifth +remaining +branch +32 +presented +stared +##id +uses +secret +forms +##co +baseball +exactly +##ck +choice +note +discovered +travel +composed +truth +russia +ball +color +kiss +dad +wind +continue +ring +referred +numbers +digital +greater +##ns +metres +slightly +direct +increase +1960 +responsible +crew +rule +trees +troops +##no +broke +goes +individuals +hundred +weight +creek +sleep +memory +defense +provides +ordered +code +value +jewish +windows +1944 +safe +judge +whatever +corps +realized +growing +pre +##ga +cities +alexander +gaze +lies +spread +scott +letter +showed +situation +mayor +transport +watching +workers +extended +##li +expression +normal +##ment +chart +multiple +border +##ba +host +##ner +daily +mrs +walls +piano +##ko +heat +cannot +##ate +earned +products +drama +era +authority +seasons +join +grade +##io +sign +difficult +machine +1963 +territory +mainly +##wood +stations +squadron +1962 +stepped +iron +19th +##led +serve +appear +sky +speak +broken +charge +knowledge +kilometres +removed +ships +article +campus +simple +##ty +pushed +britain +##ve +leaves +recently +cd +soft +boston +latter +easy +acquired +poland +##sa +quality +officers +presence +planned +nations +mass +broadcast +jean +share +image +influence +wild +offer +emperor +electric +reading +headed +ability +promoted +yellow +ministry +1942 +throat +smaller +politician +##by +latin +spoke +cars +williams +males +lack +pop +80 +##ier +acting +seeing +consists +##ti +estate +1961 +pressure +johnson +newspaper +jr +chris +olympics +online +conditions +beat +elements +walking +vote +##field +needs +carolina +text +featuring +global +block +shirt +levels +francisco +purpose +females +et +dutch +duke +ahead +gas +twice +safety +serious +turning +highly +lieutenant +firm +maria +amount +mixed +daniel +proposed +perfect +agreement +affairs +3rd +seconds +contemporary +paid +1943 +prison +save +kitchen +label +administrative +intended +constructed +academic +nice +teacher +races +1956 +formerly +corporation +ben +nation +issued +shut +1958 +drums +housing +victoria +seems +opera +1959 +graduated +function +von +mentioned +picked +build +recognized +shortly +protection +picture +notable +exchange +elections +1980s +loved +percent +racing +fish +elizabeth +garden +volume +hockey +1941 +beside +settled +##ford +1940 +competed +replied +drew +1948 +actress +marine +scotland +steel +glanced +farm +steve +1957 +risk +tonight +positive +magic +singles +effects +gray +screen +dog +##ja +residents +bus +sides +none +secondary +literature +polish +destroyed +flying +founder +households +1939 +lay +reserve +usa +gallery +##ler +1946 +industrial +younger +approach +appearances +urban +ones +1950 +finish +avenue +powerful +fully +growth +page +honor +jersey +projects +advanced +revealed +basic +90 +infantry +pair +equipment +visit +33 +evening +search +grant +effort +solo +treatment +buried +republican +primarily +bottom +owner +1970s +israel +gives +jim +dream +bob +remain +spot +70 +notes +produce +champions +contact +ed +soul +accepted +ways +del +##ally +losing +split +price +capacity +basis +trial +questions +##ina +1955 +20th +guess +officially +memorial +naval +initial +##ization +whispered +median +engineer +##ful +sydney +##go +columbia +strength +300 +1952 +tears +senate +00 +card +asian +agent +1947 +software +44 +draw +warm +supposed +com +pro +##il +transferred +leaned +##at +candidate +escape +mountains +asia +potential +activity +entertainment +seem +traffic +jackson +murder +36 +slow +product +orchestra +haven +agency +bbc +taught +website +comedy +unable +storm +planning +albums +rugby +environment +scientific +grabbed +protect +##hi +boat +typically +1954 +1953 +damage +principal +divided +dedicated +mount +ohio +##berg +pick +fought +driver +##der +empty +shoulders +sort +thank +berlin +prominent +account +freedom +necessary +efforts +alex +headquarters +follows +alongside +des +simon +andrew +suggested +operating +learning +steps +1949 +sweet +technical +begin +easily +34 +teeth +speaking +settlement +scale +##sh +renamed +ray +max +enemy +semi +joint +compared +##rd +scottish +leadership +analysis +offers +georgia +pieces +captured +animal +deputy +guest +organized +##lin +tony +combined +method +challenge +1960s +huge +wants +battalion +sons +rise +crime +types +facilities +telling +path +1951 +platform +sit +1990s +##lo +tells +assigned +rich +pull +##ot +commonly +alive +##za +letters +concept +conducted +wearing +happen +bought +becomes +holy +gets +ocean +defeat +languages +purchased +coffee +occurred +titled +##q +declared +applied +sciences +concert +sounds +jazz +brain +##me +painting +fleet +tax +nick +##ius +michigan +count +animals +leaders +episodes +##line +content +##den +birth +##it +clubs +64 +palace +critical +refused +fair +leg +laughed +returning +surrounding +participated +formation +lifted +pointed +connected +rome +medicine +laid +taylor +santa +powers +adam +tall +shared +focused +knowing +yards +entrance +falls +##wa +calling +##ad +sources +chosen +beneath +resources +yard +##ite +nominated +silence +zone +defined +##que +gained +thirty +38 +bodies +moon +##ard +adopted +christmas +widely +register +apart +iran +premier +serves +du +unknown +parties +##les +generation +##ff +continues +quick +fields +brigade +quiet +teaching +clothes +impact +weapons +partner +flat +theater +supreme +1938 +37 +relations +##tor +plants +suffered +1936 +wilson +kids +begins +##age +1918 +seats +armed +internet +models +worth +laws +400 +communities +classes +background +knows +thanks +quarter +reaching +humans +carry +killing +format +kong +hong +setting +75 +architecture +disease +railroad +inc +possibly +wish +arthur +thoughts +harry +doors +density +##di +crowd +illinois +stomach +tone +unique +reports +anyway +##ir +liberal +der +vehicle +thick +dry +drug +faced +largely +facility +theme +holds +creation +strange +colonel +##mi +revolution +bell +politics +turns +silent +rail +relief +independence +combat +shape +write +determined +sales +learned +4th +finger +oxford +providing +1937 +heritage +fiction +situated +designated +allowing +distribution +hosted +##est +sight +interview +estimated +reduced +##ria +toronto +footballer +keeping +guys +damn +claim +motion +sport +sixth +stayed +##ze +en +rear +receive +handed +twelve +dress +audience +granted +brazil +##well +spirit +##ated +noticed +etc +olympic +representative +eric +tight +trouble +reviews +drink +vampire +missing +roles +ranked +newly +household +finals +wave +critics +##ee +phase +massachusetts +pilot +unlike +philadelphia +bright +guns +crown +organizations +roof +42 +respectively +clearly +tongue +marked +circle +fox +korea +bronze +brian +expanded +sexual +supply +yourself +inspired +labour +fc +##ah +reference +vision +draft +connection +brand +reasons +1935 +classic +driving +trip +jesus +cells +entry +1920 +neither +trail +claims +atlantic +orders +labor +nose +afraid +identified +intelligence +calls +cancer +attacked +passing +stephen +positions +imperial +grey +jason +39 +sunday +48 +swedish +avoid +extra +uncle +message +covers +allows +surprise +materials +fame +hunter +##ji +1930 +citizens +figures +davis +environmental +confirmed +shit +titles +di +performing +difference +acts +attacks +##ov +existing +votes +opportunity +nor +shop +entirely +trains +opposite +pakistan +##pa +develop +resulted +representatives +actions +reality +pressed +##ish +barely +wine +conversation +faculty +northwest +ends +documentary +nuclear +stock +grace +sets +eat +alternative +##ps +bag +resulting +creating +surprised +cemetery +1919 +drop +finding +sarah +cricket +streets +tradition +ride +1933 +exhibition +target +ear +explained +rain +composer +injury +apartment +municipal +educational +occupied +netherlands +clean +billion +constitution +learn +1914 +maximum +classical +francis +lose +opposition +jose +ontario +bear +core +hills +rolled +ending +drawn +permanent +fun +##tes +##lla +lewis +sites +chamber +ryan +##way +scoring +height +1934 +##house +lyrics +staring +55 +officials +1917 +snow +oldest +##tic +orange +##ger +qualified +interior +apparently +succeeded +thousand +dinner +lights +existence +fans +heavily +41 +greatest +conservative +send +bowl +plus +enter +catch +##un +economy +duty +1929 +speech +authorities +princess +performances +versions +shall +graduate +pictures +effective +remembered +poetry +desk +crossed +starring +starts +passenger +sharp +##ant +acres +ass +weather +falling +rank +fund +supporting +check +adult +publishing +heads +cm +southeast +lane +##burg +application +bc +##ura +les +condition +transfer +prevent +display +ex +regions +earl +federation +cool +relatively +answered +besides +1928 +obtained +portion +##town +mix +##ding +reaction +liked +dean +express +peak +1932 +##tte +counter +religion +chain +rare +miller +convention +aid +lie +vehicles +mobile +perform +squad +wonder +lying +crazy +sword +##ping +attempted +centuries +weren +philosophy +category +##ize +anna +interested +47 +sweden +wolf +frequently +abandoned +kg +literary +alliance +task +entitled +##ay +threw +promotion +factory +tiny +soccer +visited +matt +fm +achieved +52 +defence +internal +persian +43 +methods +##ging +arrested +otherwise +cambridge +programming +villages +elementary +districts +rooms +criminal +conflict +worry +trained +1931 +attempts +waited +signal +bird +truck +subsequent +programme +##ol +ad +49 +communist +details +faith +sector +patrick +carrying +laugh +##ss +controlled +korean +showing +origin +fuel +evil +1927 +##ent +brief +identity +darkness +address +pool +missed +publication +web +planet +ian +anne +wings +invited +##tt +briefly +standards +kissed +##be +ideas +climate +causing +walter +worse +albert +articles +winners +desire +aged +northeast +dangerous +gate +doubt +1922 +wooden +multi +##ky +poet +rising +funding +46 +communications +communication +violence +copies +prepared +ford +investigation +skills +1924 +pulling +electronic +##ak +##ial +##han +containing +ultimately +offices +singing +understanding +restaurant +tomorrow +fashion +christ +ward +da +pope +stands +5th +flow +studios +aired +commissioned +contained +exist +fresh +americans +##per +wrestling +approved +kid +employed +respect +suit +1925 +angel +asking +increasing +frame +angry +selling +1950s +thin +finds +##nd +temperature +statement +ali +explain +inhabitants +towns +extensive +narrow +51 +jane +flowers +images +promise +somewhere +object +fly +closely +##ls +1912 +bureau +cape +1926 +weekly +presidential +legislative +1921 +##ai +##au +launch +founding +##ny +978 +##ring +artillery +strike +un +institutions +roll +writers +landing +chose +kevin +anymore +pp +##ut +attorney +fit +dan +billboard +receiving +agricultural +breaking +sought +dave +admitted +lands +mexican +##bury +charlie +specifically +hole +iv +howard +credit +moscow +roads +accident +1923 +proved +wear +struck +hey +guards +stuff +slid +expansion +1915 +cat +anthony +##kin +melbourne +opposed +sub +southwest +architect +failure +plane +1916 +##ron +map +camera +tank +listen +regarding +wet +introduction +metropolitan +link +ep +fighter +inch +grown +gene +anger +fixed +buy +dvd +khan +domestic +worldwide +chapel +mill +functions +examples +##head +developing +1910 +turkey +hits +pocket +antonio +papers +grow +unless +circuit +18th +concerned +attached +journalist +selection +journey +converted +provincial +painted +hearing +aren +bands +negative +aside +wondered +knight +lap +survey +ma +##ow +noise +billy +##ium +shooting +guide +bedroom +priest +resistance +motor +homes +sounded +giant +##mer +150 +scenes +equal +comic +patients +hidden +solid +actual +bringing +afternoon +touched +funds +wedding +consisted +marie +canal +sr +kim +treaty +turkish +recognition +residence +cathedral +broad +knees +incident +shaped +fired +norwegian +handle +cheek +contest +represent +##pe +representing +beauty +##sen +birds +advantage +emergency +wrapped +drawing +notice +pink +broadcasting +##ong +somehow +bachelor +seventh +collected +registered +establishment +alan +assumed +chemical +personnel +roger +retirement +jeff +portuguese +wore +tied +device +threat +progress +advance +##ised +banks +hired +manchester +nfl +teachers +structures +forever +##bo +tennis +helping +saturday +sale +applications +junction +hip +incorporated +neighborhood +dressed +ceremony +##ds +influenced +hers +visual +stairs +decades +inner +kansas +hung +hoped +gain +scheduled +downtown +engaged +austria +clock +norway +certainly +pale +protected +1913 +victor +employees +plate +putting +surrounded +##ists +finishing +blues +tropical +##ries +minnesota +consider +philippines +accept +54 +retrieved +1900 +concern +anderson +properties +institution +gordon +successfully +vietnam +##dy +backing +outstanding +muslim +crossing +folk +producing +usual +demand +occurs +observed +lawyer +educated +##ana +kelly +string +pleasure +budget +items +quietly +colorado +philip +typical +##worth +derived +600 +survived +asks +mental +##ide +56 +jake +jews +distinguished +ltd +1911 +sri +extremely +53 +athletic +loud +thousands +worried +shadow +transportation +horses +weapon +arena +importance +users +tim +objects +contributed +dragon +douglas +aware +senator +johnny +jordan +sisters +engines +flag +investment +samuel +shock +capable +clark +row +wheel +refers +session +familiar +biggest +wins +hate +maintained +drove +hamilton +request +expressed +injured +underground +churches +walker +wars +tunnel +passes +stupid +agriculture +softly +cabinet +regarded +joining +indiana +##ea +##ms +push +dates +spend +behavior +woods +protein +gently +chase +morgan +mention +burning +wake +combination +occur +mirror +leads +jimmy +indeed +impossible +singapore +paintings +covering +##nes +soldier +locations +attendance +sell +historian +wisconsin +invasion +argued +painter +diego +changing +egypt +##don +experienced +inches +##ku +missouri +vol +grounds +spoken +switzerland +##gan +reform +rolling +ha +forget +massive +resigned +burned +allen +tennessee +locked +values +improved +##mo +wounded +universe +sick +dating +facing +pack +purchase +user +##pur +moments +##ul +merged +anniversary +1908 +coal +brick +understood +causes +dynasty +queensland +establish +stores +crisis +promote +hoping +views +cards +referee +extension +##si +raise +arizona +improve +colonial +formal +charged +##rt +palm +lucky +hide +rescue +faces +95 +feelings +candidates +juan +##ell +goods +6th +courses +weekend +59 +luke +cash +fallen +##om +delivered +affected +installed +carefully +tries +swiss +hollywood +costs +lincoln +responsibility +##he +shore +file +proper +normally +maryland +assistance +jump +constant +offering +friendly +waters +persons +realize +contain +trophy +800 +partnership +factor +58 +musicians +cry +bound +oregon +indicated +hero +houston +medium +##ure +consisting +somewhat +##ara +57 +cycle +##che +beer +moore +frederick +gotten +eleven +worst +weak +approached +arranged +chin +loan +universal +bond +fifteen +pattern +disappeared +##ney +translated +##zed +lip +arab +capture +interests +insurance +##chi +shifted +cave +prix +warning +sections +courts +coat +plot +smell +feed +golf +favorite +maintain +knife +vs +voted +degrees +finance +quebec +opinion +translation +manner +ruled +operate +productions +choose +musician +discovery +confused +tired +separated +stream +techniques +committed +attend +ranking +kings +throw +passengers +measure +horror +fan +mining +sand +danger +salt +calm +decade +dam +require +runner +##ik +rush +associate +greece +##ker +rivers +consecutive +matthew +##ski +sighed +sq +documents +steam +edited +closing +tie +accused +1905 +##ini +islamic +distributed +directors +organisation +bruce +7th +breathing +mad +lit +arrival +concrete +taste +08 +composition +shaking +faster +amateur +adjacent +stating +1906 +twin +flew +##ran +tokyo +publications +##tone +obviously +ridge +storage +1907 +carl +pages +concluded +desert +driven +universities +ages +terminal +sequence +borough +250 +constituency +creative +cousin +economics +dreams +margaret +notably +reduce +montreal +mode +17th +ears +saved +jan +vocal +##ica +1909 +andy +##jo +riding +roughly +threatened +##ise +meters +meanwhile +landed +compete +repeated +grass +czech +regularly +charges +tea +sudden +appeal +##ung +solution +describes +pierre +classification +glad +parking +##ning +belt +physics +99 +rachel +add +hungarian +participate +expedition +damaged +gift +childhood +85 +fifty +##red +mathematics +jumped +letting +defensive +mph +##ux +##gh +testing +##hip +hundreds +shoot +owners +matters +smoke +israeli +kentucky +dancing +mounted +grandfather +emma +designs +profit +argentina +##gs +truly +li +lawrence +cole +begun +detroit +willing +branches +smiling +decide +miami +enjoyed +recordings +##dale +poverty +ethnic +gay +##bi +gary +arabic +09 +accompanied +##one +##ons +fishing +determine +residential +acid +##ary +alice +returns +starred +mail +##ang +jonathan +strategy +##ue +net +forty +cook +businesses +equivalent +commonwealth +distinct +ill +##cy +seriously +##ors +##ped +shift +harris +replace +rio +imagine +formula +ensure +##ber +additionally +scheme +conservation +occasionally +purposes +feels +favor +##and +##ore +1930s +contrast +hanging +hunt +movies +1904 +instruments +victims +danish +christopher +busy +demon +sugar +earliest +colony +studying +balance +duties +##ks +belgium +slipped +carter +05 +visible +stages +iraq +fifa +##im +commune +forming +zero +07 +continuing +talked +counties +legend +bathroom +option +tail +clay +daughters +afterwards +severe +jaw +visitors +##ded +devices +aviation +russell +kate +##vi +entering +subjects +##ino +temporary +swimming +forth +smooth +ghost +audio +bush +operates +rocks +movements +signs +eddie +##tz +ann +voices +honorary +06 +memories +dallas +pure +measures +racial +promised +66 +harvard +ceo +16th +parliamentary +indicate +benefit +flesh +dublin +louisiana +1902 +1901 +patient +sleeping +1903 +membership +coastal +medieval +wanting +element +scholars +rice +62 +limit +survive +makeup +rating +definitely +collaboration +obvious +##tan +boss +ms +baron +birthday +linked +soil +diocese +##lan +ncaa +##mann +offensive +shell +shouldn +waist +##tus +plain +ross +organ +resolution +manufacturing +adding +relative +kennedy +98 +whilst +moth +marketing +gardens +crash +72 +heading +partners +credited +carlos +moves +cable +##zi +marshall +##out +depending +bottle +represents +rejected +responded +existed +04 +jobs +denmark +lock +##ating +treated +graham +routes +talent +commissioner +drugs +secure +tests +reign +restored +photography +##gi +contributions +oklahoma +designer +disc +grin +seattle +robin +paused +atlanta +unusual +##gate +praised +las +laughing +satellite +hungary +visiting +##sky +interesting +factors +deck +poems +norman +##water +stuck +speaker +rifle +domain +premiered +##her +dc +comics +actors +01 +reputation +eliminated +8th +ceiling +prisoners +script +##nce +leather +austin +mississippi +rapidly +admiral +parallel +charlotte +guilty +tools +gender +divisions +fruit +##bs +laboratory +nelson +fantasy +marry +rapid +aunt +tribe +requirements +aspects +suicide +amongst +adams +bone +ukraine +abc +kick +sees +edinburgh +clothing +column +rough +gods +hunting +broadway +gathered +concerns +##ek +spending +ty +12th +snapped +requires +solar +bones +cavalry +##tta +iowa +drinking +waste +index +franklin +charity +thompson +stewart +tip +flash +landscape +friday +enjoy +singh +poem +listening +##back +eighth +fred +differences +adapted +bomb +ukrainian +surgery +corporate +masters +anywhere +##more +waves +odd +sean +portugal +orleans +dick +debate +kent +eating +puerto +cleared +96 +expect +cinema +97 +guitarist +blocks +electrical +agree +involving +depth +dying +panel +struggle +##ged +peninsula +adults +novels +emerged +vienna +metro +debuted +shoes +tamil +songwriter +meets +prove +beating +instance +heaven +scared +sending +marks +artistic +passage +superior +03 +significantly +shopping +##tive +retained +##izing +malaysia +technique +cheeks +##ola +warren +maintenance +destroy +extreme +allied +120 +appearing +##yn +fill +advice +alabama +qualifying +policies +cleveland +hat +battery +smart +authors +10th +soundtrack +acted +dated +lb +glance +equipped +coalition +funny +outer +ambassador +roy +possibility +couples +campbell +dna +loose +ethan +supplies +1898 +gonna +88 +monster +##res +shake +agents +frequency +springs +dogs +practices +61 +gang +plastic +easier +suggests +gulf +blade +exposed +colors +industries +markets +pan +nervous +electoral +charts +legislation +ownership +##idae +mac +appointment +shield +copy +assault +socialist +abbey +monument +license +throne +employment +jay +93 +replacement +charter +cloud +powered +suffering +accounts +oak +connecticut +strongly +wright +colour +crystal +13th +context +welsh +networks +voiced +gabriel +jerry +##cing +forehead +mp +##ens +manage +schedule +totally +remix +##ii +forests +occupation +print +nicholas +brazilian +strategic +vampires +engineers +76 +roots +seek +correct +instrumental +und +alfred +backed +hop +##des +stanley +robinson +traveled +wayne +welcome +austrian +achieve +67 +exit +rates +1899 +strip +whereas +##cs +sing +deeply +adventure +bobby +rick +jamie +careful +components +cap +useful +personality +knee +##shi +pushing +hosts +02 +protest +ca +ottoman +symphony +##sis +63 +boundary +1890 +processes +considering +considerable +tons +##work +##ft +##nia +cooper +trading +dear +conduct +91 +illegal +apple +revolutionary +holiday +definition +harder +##van +jacob +circumstances +destruction +##lle +popularity +grip +classified +liverpool +donald +baltimore +flows +seeking +honour +approval +92 +mechanical +till +happening +statue +critic +increasingly +immediate +describe +commerce +stare +##ster +indonesia +meat +rounds +boats +baker +orthodox +depression +formally +worn +naked +claire +muttered +sentence +11th +emily +document +77 +criticism +wished +vessel +spiritual +bent +virgin +parker +minimum +murray +lunch +danny +printed +compilation +keyboards +false +blow +belonged +68 +raising +78 +cutting +##board +pittsburgh +##up +9th +shadows +81 +hated +indigenous +jon +15th +barry +scholar +ah +##zer +oliver +##gy +stick +susan +meetings +attracted +spell +romantic +##ver +ye +1895 +photo +demanded +customers +##ac +1896 +logan +revival +keys +modified +commanded +jeans +##ious +upset +raw +phil +detective +hiding +resident +vincent +##bly +experiences +diamond +defeating +coverage +lucas +external +parks +franchise +helen +bible +successor +percussion +celebrated +il +lift +profile +clan +romania +##ied +mills +##su +nobody +achievement +shrugged +fault +1897 +rhythm +initiative +breakfast +carbon +700 +69 +lasted +violent +74 +wound +ken +killer +gradually +filmed +°c +dollars +processing +94 +remove +criticized +guests +sang +chemistry +##vin +legislature +disney +##bridge +uniform +escaped +integrated +proposal +purple +denied +liquid +karl +influential +morris +nights +stones +intense +experimental +twisted +71 +84 +##ld +pace +nazi +mitchell +ny +blind +reporter +newspapers +14th +centers +burn +basin +forgotten +surviving +filed +collections +monastery +losses +manual +couch +description +appropriate +merely +tag +missions +sebastian +restoration +replacing +triple +73 +elder +julia +warriors +benjamin +julian +convinced +stronger +amazing +declined +versus +merchant +happens +output +finland +bare +barbara +absence +ignored +dawn +injuries +##port +producers +##ram +82 +luis +##ities +kw +admit +expensive +electricity +nba +exception +symbol +##ving +ladies +shower +sheriff +characteristics +##je +aimed +button +ratio +effectively +summit +angle +jury +bears +foster +vessels +pants +executed +evans +dozen +advertising +kicked +patrol +1889 +competitions +lifetime +principles +athletics +##logy +birmingham +sponsored +89 +rob +nomination +1893 +acoustic +##sm +creature +longest +##tra +credits +harbor +dust +josh +##so +territories +milk +infrastructure +completion +thailand +indians +leon +archbishop +##sy +assist +pitch +blake +arrangement +girlfriend +serbian +operational +hence +sad +scent +fur +dj +sessions +hp +refer +rarely +##ora +exists +1892 +##ten +scientists +dirty +penalty +burst +portrait +seed +79 +pole +limits +rival +1894 +stable +alpha +grave +constitutional +alcohol +arrest +flower +mystery +devil +architectural +relationships +greatly +habitat +##istic +larry +progressive +remote +cotton +##ics +##ok +preserved +reaches +##ming +cited +86 +vast +scholarship +decisions +cbs +joy +teach +1885 +editions +knocked +eve +searching +partly +participation +gap +animated +fate +excellent +##ett +na +87 +alternate +saints +youngest +##ily +climbed +##ita +##tors +suggest +##ct +discussion +staying +choir +lakes +jacket +revenue +nevertheless +peaked +instrument +wondering +annually +managing +neil +1891 +signing +terry +##ice +apply +clinical +brooklyn +aim +catherine +fuck +farmers +figured +ninth +pride +hugh +evolution +ordinary +involvement +comfortable +shouted +tech +encouraged +taiwan +representation +sharing +##lia +##em +panic +exact +cargo +competing +fat +cried +83 +1920s +occasions +pa +cabin +borders +utah +marcus +##isation +badly +muscles +##ance +victorian +transition +warner +bet +permission +##rin +slave +terrible +similarly +shares +seth +uefa +possession +medals +benefits +colleges +lowered +perfectly +mall +transit +##ye +##kar +publisher +##ened +harrison +deaths +elevation +##ae +asleep +machines +sigh +ash +hardly +argument +occasion +parent +leo +decline +1888 +contribution +##ua +concentration +1000 +opportunities +hispanic +guardian +extent +emotions +hips +mason +volumes +bloody +controversy +diameter +steady +mistake +phoenix +identify +violin +##sk +departure +richmond +spin +funeral +enemies +1864 +gear +literally +connor +random +sergeant +grab +confusion +1865 +transmission +informed +op +leaning +sacred +suspended +thinks +gates +portland +luck +agencies +yours +hull +expert +muscle +layer +practical +sculpture +jerusalem +latest +lloyd +statistics +deeper +recommended +warrior +arkansas +mess +supports +greg +eagle +1880 +recovered +rated +concerts +rushed +##ano +stops +eggs +files +premiere +keith +##vo +delhi +turner +pit +affair +belief +paint +##zing +mate +##ach +##ev +victim +##ology +withdrew +bonus +styles +fled +##ud +glasgow +technologies +funded +nbc +adaptation +##ata +portrayed +cooperation +supporters +judges +bernard +justin +hallway +ralph +##ick +graduating +controversial +distant +continental +spider +bite +##ho +recognize +intention +mixing +##ese +egyptian +bow +tourism +suppose +claiming +tiger +dominated +participants +vi +##ru +nurse +partially +tape +##rum +psychology +##rn +essential +touring +duo +voting +civilian +emotional +channels +##king +apparent +hebrew +1887 +tommy +carrier +intersection +beast +hudson +##gar +##zo +lab +nova +bench +discuss +costa +##ered +detailed +behalf +drivers +unfortunately +obtain +##lis +rocky +##dae +siege +friendship +honey +##rian +1861 +amy +hang +posted +governments +collins +respond +wildlife +preferred +operator +##po +laura +pregnant +videos +dennis +suspected +boots +instantly +weird +automatic +businessman +alleged +placing +throwing +ph +mood +1862 +perry +venue +jet +remainder +##lli +##ci +passion +biological +boyfriend +1863 +dirt +buffalo +ron +segment +fa +abuse +##era +genre +thrown +stroke +colored +stress +exercise +displayed +##gen +struggled +##tti +abroad +dramatic +wonderful +thereafter +madrid +component +widespread +##sed +tale +citizen +todd +monday +1886 +vancouver +overseas +forcing +crying +descent +##ris +discussed +substantial +ranks +regime +1870 +provinces +switch +drum +zane +ted +tribes +proof +lp +cream +researchers +volunteer +manor +silk +milan +donated +allies +venture +principle +delivery +enterprise +##ves +##ans +bars +traditionally +witch +reminded +copper +##uk +pete +inter +links +colin +grinned +elsewhere +competitive +frequent +##oy +scream +##hu +tension +texts +submarine +finnish +defending +defend +pat +detail +1884 +affiliated +stuart +themes +villa +periods +tool +belgian +ruling +crimes +answers +folded +licensed +resort +demolished +hans +lucy +1881 +lion +traded +photographs +writes +craig +##fa +trials +generated +beth +noble +debt +percentage +yorkshire +erected +ss +viewed +grades +confidence +ceased +islam +telephone +retail +##ible +chile +m² +roberts +sixteen +##ich +commented +hampshire +innocent +dual +pounds +checked +regulations +afghanistan +sung +rico +liberty +assets +bigger +options +angels +relegated +tribute +wells +attending +leaf +##yan +butler +romanian +forum +monthly +lisa +patterns +gmina +##tory +madison +hurricane +rev +##ians +bristol +##ula +elite +valuable +disaster +democracy +awareness +germans +freyja +##ins +loop +absolutely +paying +populations +maine +sole +prayer +spencer +releases +doorway +bull +##ani +lover +midnight +conclusion +##sson +thirteen +lily +mediterranean +##lt +nhl +proud +sample +##hill +drummer +guinea +##ova +murphy +climb +##ston +instant +attributed +horn +ain +railways +steven +##ao +autumn +ferry +opponent +root +traveling +secured +corridor +stretched +tales +sheet +trinity +cattle +helps +indicates +manhattan +murdered +fitted +1882 +gentle +grandmother +mines +shocked +vegas +produces +##light +caribbean +##ou +belong +continuous +desperate +drunk +historically +trio +waved +raf +dealing +nathan +bat +murmured +interrupted +residing +scientist +pioneer +harold +aaron +##net +delta +attempting +minority +mini +believes +chorus +tend +lots +eyed +indoor +load +shots +updated +jail +##llo +concerning +connecting +wealth +##ved +slaves +arrive +rangers +sufficient +rebuilt +##wick +cardinal +flood +muhammad +whenever +relation +runners +moral +repair +viewers +arriving +revenge +punk +assisted +bath +fairly +breathe +lists +innings +illustrated +whisper +nearest +voters +clinton +ties +ultimate +screamed +beijing +lions +andre +fictional +gathering +comfort +radar +suitable +dismissed +hms +ban +pine +wrist +atmosphere +voivodeship +bid +timber +##ned +##nan +giants +##ane +cameron +recovery +uss +identical +categories +switched +serbia +laughter +noah +ensemble +therapy +peoples +touching +##off +locally +pearl +platforms +everywhere +ballet +tables +lanka +herbert +outdoor +toured +derek +1883 +spaces +contested +swept +1878 +exclusive +slight +connections +##dra +winds +prisoner +collective +bangladesh +tube +publicly +wealthy +thai +##ys +isolated +select +##ric +insisted +pen +fortune +ticket +spotted +reportedly +animation +enforcement +tanks +110 +decides +wider +lowest +owen +##time +nod +hitting +##hn +gregory +furthermore +magazines +fighters +solutions +##ery +pointing +requested +peru +reed +chancellor +knights +mask +worker +eldest +flames +reduction +1860 +volunteers +##tis +reporting +##hl +wire +advisory +endemic +origins +settlers +pursue +knock +consumer +1876 +eu +compound +creatures +mansion +sentenced +ivan +deployed +guitars +frowned +involves +mechanism +kilometers +perspective +shops +maps +terminus +duncan +alien +fist +bridges +##pers +heroes +fed +derby +swallowed +##ros +patent +sara +illness +characterized +adventures +slide +hawaii +jurisdiction +##op +organised +##side +adelaide +walks +biology +se +##ties +rogers +swing +tightly +boundaries +##rie +prepare +implementation +stolen +##sha +certified +colombia +edwards +garage +##mm +recalled +##ball +rage +harm +nigeria +breast +##ren +furniture +pupils +settle +##lus +cuba +balls +client +alaska +21st +linear +thrust +celebration +latino +genetic +terror +##cia +##ening +lightning +fee +witness +lodge +establishing +skull +##ique +earning +hood +##ei +rebellion +wang +sporting +warned +missile +devoted +activist +porch +worship +fourteen +package +1871 +decorated +##shire +housed +##ock +chess +sailed +doctors +oscar +joan +treat +garcia +harbour +jeremy +##ire +traditions +dominant +jacques +##gon +##wan +relocated +1879 +amendment +sized +companion +simultaneously +volleyball +spun +acre +increases +stopping +loves +belongs +affect +drafted +tossed +scout +battles +1875 +filming +shoved +munich +tenure +vertical +romance +pc +##cher +argue +##ical +craft +ranging +www +opens +honest +tyler +yesterday +virtual +##let +muslims +reveal +snake +immigrants +radical +screaming +speakers +firing +saving +belonging +ease +lighting +prefecture +blame +farmer +hungry +grows +rubbed +beam +sur +subsidiary +##cha +armenian +sao +dropping +conventional +##fer +microsoft +reply +qualify +spots +1867 +sweat +festivals +##ken +immigration +physician +discover +exposure +sandy +explanation +isaac +implemented +##fish +hart +initiated +connect +stakes +presents +heights +householder +pleased +tourist +regardless +slip +closest +##ction +surely +sultan +brings +riley +preparation +aboard +slammed +baptist +experiment +ongoing +interstate +organic +playoffs +##ika +1877 +130 +##tar +hindu +error +tours +tier +plenty +arrangements +talks +trapped +excited +sank +ho +athens +1872 +denver +welfare +suburb +athletes +trick +diverse +belly +exclusively +yelled +1868 +##med +conversion +##ette +1874 +internationally +computers +conductor +abilities +sensitive +hello +dispute +measured +globe +rocket +prices +amsterdam +flights +tigers +inn +municipalities +emotion +references +3d +##mus +explains +airlines +manufactured +pm +archaeological +1873 +interpretation +devon +comment +##ites +settlements +kissing +absolute +improvement +suite +impressed +barcelona +sullivan +jefferson +towers +jesse +julie +##tin +##lu +grandson +hi +gauge +regard +rings +interviews +trace +raymond +thumb +departments +burns +serial +bulgarian +scores +demonstrated +##ix +1866 +kyle +alberta +underneath +romanized +##ward +relieved +acquisition +phrase +cliff +reveals +han +cuts +merger +custom +##dar +nee +gilbert +graduation +##nts +assessment +cafe +difficulty +demands +swung +democrat +jennifer +commons +1940s +grove +##yo +completing +focuses +sum +substitute +bearing +stretch +reception +##py +reflected +essentially +destination +pairs +##ched +survival +resource +##bach +promoting +doubles +messages +tear +##down +##fully +parade +florence +harvey +incumbent +partial +framework +900 +pedro +frozen +procedure +olivia +controls +##mic +shelter +personally +temperatures +##od +brisbane +tested +sits +marble +comprehensive +oxygen +leonard +##kov +inaugural +iranian +referring +quarters +attitude +##ivity +mainstream +lined +mars +dakota +norfolk +unsuccessful +##° +explosion +helicopter +congressional +##sing +inspector +bitch +seal +departed +divine +##ters +coaching +examination +punishment +manufacturer +sink +columns +unincorporated +signals +nevada +squeezed +dylan +dining +photos +martial +manuel +eighteen +elevator +brushed +plates +ministers +ivy +congregation +##len +slept +specialized +taxes +curve +restricted +negotiations +likes +statistical +arnold +inspiration +execution +bold +intermediate +significance +margin +ruler +wheels +gothic +intellectual +dependent +listened +eligible +buses +widow +syria +earn +cincinnati +collapsed +recipient +secrets +accessible +philippine +maritime +goddess +clerk +surrender +breaks +playoff +database +##ified +##lon +ideal +beetle +aspect +soap +regulation +strings +expand +anglo +shorter +crosses +retreat +tough +coins +wallace +directions +pressing +##oon +shipping +locomotives +comparison +topics +nephew +##mes +distinction +honors +travelled +sierra +ibn +##over +fortress +sa +recognised +carved +1869 +clients +##dan +intent +##mar +coaches +describing +bread +##ington +beaten +northwestern +##ona +merit +youtube +collapse +challenges +em +historians +objective +submitted +virus +attacking +drake +assume +##ere +diseases +marc +stem +leeds +##cus +##ab +farming +glasses +##lock +visits +nowhere +fellowship +relevant +carries +restaurants +experiments +101 +constantly +bases +targets +shah +tenth +opponents +verse +territorial +##ira +writings +corruption +##hs +instruction +inherited +reverse +emphasis +##vic +employee +arch +keeps +rabbi +watson +payment +uh +##ala +nancy +##tre +venice +fastest +sexy +banned +adrian +properly +ruth +touchdown +dollar +boards +metre +circles +edges +favour +comments +ok +travels +liberation +scattered +firmly +##ular +holland +permitted +diesel +kenya +den +originated +##ral +demons +resumed +dragged +rider +##rus +servant +blinked +extend +torn +##ias +##sey +input +meal +everybody +cylinder +kinds +camps +##fe +bullet +logic +##wn +croatian +evolved +healthy +fool +chocolate +wise +preserve +pradesh +##ess +respective +1850 +##ew +chicken +artificial +gross +corresponding +convicted +cage +caroline +dialogue +##dor +narrative +stranger +mario +br +christianity +failing +trent +commanding +buddhist +1848 +maurice +focusing +yale +bike +altitude +##ering +mouse +revised +##sley +veteran +##ig +pulls +theology +crashed +campaigns +legion +##ability +drag +excellence +customer +cancelled +intensity +excuse +##lar +liga +participating +contributing +printing +##burn +variable +##rk +curious +bin +legacy +renaissance +##my +symptoms +binding +vocalist +dancer +##nie +grammar +gospel +democrats +ya +enters +sc +diplomatic +hitler +##ser +clouds +mathematical +quit +defended +oriented +##heim +fundamental +hardware +impressive +equally +convince +confederate +guilt +chuck +sliding +##ware +magnetic +narrowed +petersburg +bulgaria +otto +phd +skill +##ama +reader +hopes +pitcher +reservoir +hearts +automatically +expecting +mysterious +bennett +extensively +imagined +seeds +monitor +fix +##ative +journalism +struggling +signature +ranch +encounter +photographer +observation +protests +##pin +influences +##hr +calendar +##all +cruz +croatia +locomotive +hughes +naturally +shakespeare +basement +hook +uncredited +faded +theories +approaches +dare +phillips +filling +fury +obama +##ain +efficient +arc +deliver +min +raid +breeding +inducted +leagues +efficiency +axis +montana +eagles +##ked +supplied +instructions +karen +picking +indicating +trap +anchor +practically +christians +tomb +vary +occasional +electronics +lords +readers +newcastle +faint +innovation +collect +situations +engagement +160 +claude +mixture +##feld +peer +tissue +logo +lean +##ration +°f +floors +##ven +architects +reducing +##our +##ments +rope +1859 +ottawa +##har +samples +banking +declaration +proteins +resignation +francois +saudi +advocate +exhibited +armor +twins +divorce +##ras +abraham +reviewed +jo +temporarily +matrix +physically +pulse +curled +##ena +difficulties +bengal +usage +##ban +annie +riders +certificate +##pi +holes +warsaw +distinctive +jessica +##mon +mutual +1857 +customs +circular +eugene +removal +loaded +mere +vulnerable +depicted +generations +dame +heir +enormous +lightly +climbing +pitched +lessons +pilots +nepal +ram +google +preparing +brad +louise +renowned +##₂ +liam +##ably +plaza +shaw +sophie +brilliant +bills +##bar +##nik +fucking +mainland +server +pleasant +seized +veterans +jerked +fail +beta +brush +radiation +stored +warmth +southeastern +nate +sin +raced +berkeley +joke +athlete +designation +trunk +##low +roland +qualification +archives +heels +artwork +receives +judicial +reserves +##bed +woke +installation +abu +floating +fake +lesser +excitement +interface +concentrated +addressed +characteristic +amanda +saxophone +monk +auto +##bus +releasing +egg +dies +interaction +defender +ce +outbreak +glory +loving +##bert +sequel +consciousness +http +awake +ski +enrolled +##ress +handling +rookie +brow +somebody +biography +warfare +amounts +contracts +presentation +fabric +dissolved +challenged +meter +psychological +lt +elevated +rally +accurate +##tha +hospitals +undergraduate +specialist +venezuela +exhibit +shed +nursing +protestant +fluid +structural +footage +jared +consistent +prey +##ska +succession +reflect +exile +lebanon +wiped +suspect +shanghai +resting +integration +preservation +marvel +variant +pirates +sheep +rounded +capita +sailing +colonies +manuscript +deemed +variations +clarke +functional +emerging +boxing +relaxed +curse +azerbaijan +heavyweight +nickname +editorial +rang +grid +tightened +earthquake +flashed +miguel +rushing +##ches +improvements +boxes +brooks +180 +consumption +molecular +felix +societies +repeatedly +variation +aids +civic +graphics +professionals +realm +autonomous +receiver +delayed +workshop +militia +chairs +trump +canyon +##point +harsh +extending +lovely +happiness +##jan +stake +eyebrows +embassy +wellington +hannah +##ella +sony +corners +bishops +swear +cloth +contents +xi +namely +commenced +1854 +stanford +nashville +courage +graphic +commitment +garrison +##bin +hamlet +clearing +rebels +attraction +literacy +cooking +ruins +temples +jenny +humanity +celebrate +hasn +freight +sixty +rebel +bastard +##art +newton +##ada +deer +##ges +##ching +smiles +delaware +singers +##ets +approaching +assists +flame +##ph +boulevard +barrel +planted +##ome +pursuit +##sia +consequences +posts +shallow +invitation +rode +depot +ernest +kane +rod +concepts +preston +topic +chambers +striking +blast +arrives +descendants +montgomery +ranges +worlds +##lay +##ari +span +chaos +praise +##ag +fewer +1855 +sanctuary +mud +fbi +##ions +programmes +maintaining +unity +harper +bore +handsome +closure +tournaments +thunder +nebraska +linda +facade +puts +satisfied +argentine +dale +cork +dome +panama +##yl +1858 +tasks +experts +##ates +feeding +equation +##las +##ida +##tu +engage +bryan +##ax +um +quartet +melody +disbanded +sheffield +blocked +gasped +delay +kisses +maggie +connects +##non +sts +poured +creator +publishers +##we +guided +ellis +extinct +hug +gaining +##ord +complicated +##bility +poll +clenched +investigate +##use +thereby +quantum +spine +cdp +humor +kills +administered +semifinals +##du +encountered +ignore +##bu +commentary +##maker +bother +roosevelt +140 +plains +halfway +flowing +cultures +crack +imprisoned +neighboring +airline +##ses +##view +##mate +##ec +gather +wolves +marathon +transformed +##ill +cruise +organisations +carol +punch +exhibitions +numbered +alarm +ratings +daddy +silently +##stein +queens +colours +impression +guidance +liu +tactical +##rat +marshal +della +arrow +##ings +rested +feared +tender +owns +bitter +advisor +escort +##ides +spare +farms +grants +##ene +dragons +encourage +colleagues +cameras +##und +sucked +pile +spirits +prague +statements +suspension +landmark +fence +torture +recreation +bags +permanently +survivors +pond +spy +predecessor +bombing +coup +##og +protecting +transformation +glow +##lands +##book +dug +priests +andrea +feat +barn +jumping +##chen +##ologist +##con +casualties +stern +auckland +pipe +serie +revealing +ba +##bel +trevor +mercy +spectrum +yang +consist +governing +collaborated +possessed +epic +comprises +blew +shane +##ack +lopez +honored +magical +sacrifice +judgment +perceived +hammer +mtv +baronet +tune +das +missionary +sheets +350 +neutral +oral +threatening +attractive +shade +aims +seminary +##master +estates +1856 +michel +wounds +refugees +manufacturers +##nic +mercury +syndrome +porter +##iya +##din +hamburg +identification +upstairs +purse +widened +pause +cared +breathed +affiliate +santiago +prevented +celtic +fisher +125 +recruited +byzantine +reconstruction +farther +##mp +diet +sake +au +spite +sensation +##ert +blank +separation +105 +##hon +vladimir +armies +anime +##lie +accommodate +orbit +cult +sofia +archive +##ify +##box +founders +sustained +disorder +honours +northeastern +mia +crops +violet +threats +blanket +fires +canton +followers +southwestern +prototype +voyage +assignment +altered +moderate +protocol +pistol +##eo +questioned +brass +lifting +1852 +math +authored +##ual +doug +dimensional +dynamic +##san +1851 +pronounced +grateful +quest +uncomfortable +boom +presidency +stevens +relating +politicians +chen +barrier +quinn +diana +mosque +tribal +cheese +palmer +portions +sometime +chester +treasure +wu +bend +download +millions +reforms +registration +##osa +consequently +monitoring +ate +preliminary +brandon +invented +ps +eaten +exterior +intervention +ports +documented +log +displays +lecture +sally +favourite +##itz +vermont +lo +invisible +isle +breed +##ator +journalists +relay +speaks +backward +explore +midfielder +actively +stefan +procedures +cannon +blond +kenneth +centered +servants +chains +libraries +malcolm +essex +henri +slavery +##hal +facts +fairy +coached +cassie +cats +washed +cop +##fi +announcement +item +2000s +vinyl +activated +marco +frontier +growled +curriculum +##das +loyal +accomplished +leslie +ritual +kenny +##00 +vii +napoleon +hollow +hybrid +jungle +stationed +friedrich +counted +##ulated +platinum +theatrical +seated +col +rubber +glen +1840 +diversity +healing +extends +id +provisions +administrator +columbus +##oe +tributary +te +assured +org +##uous +prestigious +examined +lectures +grammy +ronald +associations +bailey +allan +essays +flute +believing +consultant +proceedings +travelling +1853 +kit +kerala +yugoslavia +buddy +methodist +##ith +burial +centres +batman +##nda +discontinued +bo +dock +stockholm +lungs +severely +##nk +citing +manga +##ugh +steal +mumbai +iraqi +robot +celebrity +bride +broadcasts +abolished +pot +joel +overhead +franz +packed +reconnaissance +johann +acknowledged +introduce +handled +doctorate +developments +drinks +alley +palestine +##nis +##aki +proceeded +recover +bradley +grain +patch +afford +infection +nationalist +legendary +##ath +interchange +virtually +gen +gravity +exploration +amber +vital +wishes +powell +doctrine +elbow +screenplay +##bird +contribute +indonesian +pet +creates +##com +enzyme +kylie +discipline +drops +manila +hunger +##ien +layers +suffer +fever +bits +monica +keyboard +manages +##hood +searched +appeals +##bad +testament +grande +reid +##war +beliefs +congo +##ification +##dia +si +requiring +##via +casey +1849 +regret +streak +rape +depends +syrian +sprint +pound +tourists +upcoming +pub +##xi +tense +##els +practiced +echo +nationwide +guild +motorcycle +liz +##zar +chiefs +desired +elena +bye +precious +absorbed +relatives +booth +pianist +##mal +citizenship +exhausted +wilhelm +##ceae +##hed +noting +quarterback +urge +hectares +##gue +ace +holly +##tal +blonde +davies +parked +sustainable +stepping +twentieth +airfield +galaxy +nest +chip +##nell +tan +shaft +paulo +requirement +##zy +paradise +tobacco +trans +renewed +vietnamese +##cker +##ju +suggesting +catching +holmes +enjoying +md +trips +colt +holder +butterfly +nerve +reformed +cherry +bowling +trailer +carriage +goodbye +appreciate +toy +joshua +interactive +enabled +involve +##kan +collar +determination +bunch +facebook +recall +shorts +superintendent +episcopal +frustration +giovanni +nineteenth +laser +privately +array +circulation +##ovic +armstrong +deals +painful +permit +discrimination +##wi +aires +retiring +cottage +ni +##sta +horizon +ellen +jamaica +ripped +fernando +chapters +playstation +patron +lecturer +navigation +behaviour +genes +georgian +export +solomon +rivals +swift +seventeen +rodriguez +princeton +independently +sox +1847 +arguing +entity +casting +hank +criteria +oakland +geographic +milwaukee +reflection +expanding +conquest +dubbed +##tv +halt +brave +brunswick +doi +arched +curtis +divorced +predominantly +somerset +streams +ugly +zoo +horrible +curved +buenos +fierce +dictionary +vector +theological +unions +handful +stability +chan +punjab +segments +##lly +altar +ignoring +gesture +monsters +pastor +##stone +thighs +unexpected +operators +abruptly +coin +compiled +associates +improving +migration +pin +##ose +compact +collegiate +reserved +##urs +quarterfinals +roster +restore +assembled +hurry +oval +##cies +1846 +flags +martha +##del +victories +sharply +##rated +argues +deadly +neo +drawings +symbols +performer +##iel +griffin +restrictions +editing +andrews +java +journals +arabia +compositions +dee +pierce +removing +hindi +casino +runway +civilians +minds +nasa +hotels +##zation +refuge +rent +retain +potentially +conferences +suburban +conducting +##tto +##tions +##tle +descended +massacre +##cal +ammunition +terrain +fork +souls +counts +chelsea +durham +drives +cab +##bank +perth +realizing +palestinian +finn +simpson +##dal +betty +##ule +moreover +particles +cardinals +tent +evaluation +extraordinary +##oid +inscription +##works +wednesday +chloe +maintains +panels +ashley +trucks +##nation +cluster +sunlight +strikes +zhang +##wing +dialect +canon +##ap +tucked +##ws +collecting +##mas +##can +##sville +maker +quoted +evan +franco +aria +buying +cleaning +eva +closet +provision +apollo +clinic +rat +##ez +necessarily +ac +##gle +##ising +venues +flipped +cent +spreading +trustees +checking +authorized +##sco +disappointed +##ado +notion +duration +trumpet +hesitated +topped +brussels +rolls +theoretical +hint +define +aggressive +repeat +wash +peaceful +optical +width +allegedly +mcdonald +strict +copyright +##illa +investors +mar +jam +witnesses +sounding +miranda +michelle +privacy +hugo +harmony +##pp +valid +lynn +glared +nina +102 +headquartered +diving +boarding +gibson +##ncy +albanian +marsh +routine +dealt +enhanced +er +intelligent +substance +targeted +enlisted +discovers +spinning +observations +pissed +smoking +rebecca +capitol +visa +varied +costume +seemingly +indies +compensation +surgeon +thursday +arsenal +westminster +suburbs +rid +anglican +##ridge +knots +foods +alumni +lighter +fraser +whoever +portal +scandal +##ray +gavin +advised +instructor +flooding +terrorist +##ale +teenage +interim +senses +duck +teen +thesis +abby +eager +overcome +##ile +newport +glenn +rises +shame +##cc +prompted +priority +forgot +bomber +nicolas +protective +360 +cartoon +katherine +breeze +lonely +trusted +henderson +richardson +relax +banner +candy +palms +remarkable +##rio +legends +cricketer +essay +ordained +edmund +rifles +trigger +##uri +##away +sail +alert +1830 +audiences +penn +sussex +siblings +pursued +indianapolis +resist +rosa +consequence +succeed +avoided +1845 +##ulation +inland +##tie +##nna +counsel +profession +chronicle +hurried +##una +eyebrow +eventual +bleeding +innovative +cure +##dom +committees +accounting +con +scope +hardy +heather +tenor +gut +herald +codes +tore +scales +wagon +##oo +luxury +tin +prefer +fountain +triangle +bonds +darling +convoy +dried +traced +beings +troy +accidentally +slam +findings +smelled +joey +lawyers +outcome +steep +bosnia +configuration +shifting +toll +brook +performers +lobby +philosophical +construct +shrine +aggregate +boot +cox +phenomenon +savage +insane +solely +reynolds +lifestyle +##ima +nationally +holdings +consideration +enable +edgar +mo +mama +##tein +fights +relegation +chances +atomic +hub +conjunction +awkward +reactions +currency +finale +kumar +underwent +steering +elaborate +gifts +comprising +melissa +veins +reasonable +sunshine +chi +solve +trails +inhabited +elimination +ethics +huh +ana +molly +consent +apartments +layout +marines +##ces +hunters +bulk +##oma +hometown +##wall +##mont +cracked +reads +neighbouring +withdrawn +admission +wingspan +damned +anthology +lancashire +brands +batting +forgive +cuban +awful +##lyn +104 +dimensions +imagination +##ade +dante +##ship +tracking +desperately +goalkeeper +##yne +groaned +workshops +confident +burton +gerald +milton +circus +uncertain +slope +copenhagen +sophia +fog +philosopher +portraits +accent +cycling +varying +gripped +larvae +garrett +specified +scotia +mature +luther +kurt +rap +##kes +aerial +750 +ferdinand +heated +es +transported +##shan +safely +nonetheless +##orn +##gal +motors +demanding +##sburg +startled +##brook +ally +generate +caps +ghana +stained +demo +mentions +beds +ap +afterward +diary +##bling +utility +##iro +richards +1837 +conspiracy +conscious +shining +footsteps +observer +cyprus +urged +loyalty +developer +probability +olive +upgraded +gym +miracle +insects +graves +1844 +ourselves +hydrogen +amazon +katie +tickets +poets +##pm +planes +##pan +prevention +witnessed +dense +jin +randy +tang +warehouse +monroe +bang +archived +elderly +investigations +alec +granite +mineral +conflicts +controlling +aboriginal +carlo +##zu +mechanics +stan +stark +rhode +skirt +est +##berry +bombs +respected +##horn +imposed +limestone +deny +nominee +memphis +grabbing +disabled +##als +amusement +aa +frankfurt +corn +referendum +varies +slowed +disk +firms +unconscious +incredible +clue +sue +##zhou +twist +##cio +joins +idaho +chad +developers +computing +destroyer +103 +mortal +tucker +kingston +choices +yu +carson +1800 +os +whitney +geneva +pretend +dimension +staged +plateau +maya +##une +freestyle +##bc +rovers +hiv +##ids +tristan +classroom +prospect +##hus +honestly +diploma +lied +thermal +auxiliary +feast +unlikely +iata +##tel +morocco +pounding +treasury +lithuania +considerably +1841 +dish +1812 +geological +matching +stumbled +destroying +marched +brien +advances +cake +nicole +belle +settling +measuring +directing +##mie +tuesday +bassist +capabilities +stunned +fraud +torpedo +##list +##phone +anton +wisdom +surveillance +ruined +##ulate +lawsuit +healthcare +theorem +halls +trend +aka +horizontal +dozens +acquire +lasting +swim +hawk +gorgeous +fees +vicinity +decrease +adoption +tactics +##ography +pakistani +##ole +draws +##hall +willie +burke +heath +algorithm +integral +powder +elliott +brigadier +jackie +tate +varieties +darker +##cho +lately +cigarette +specimens +adds +##ree +##ensis +##inger +exploded +finalist +cia +murders +wilderness +arguments +nicknamed +acceptance +onwards +manufacture +robertson +jets +tampa +enterprises +blog +loudly +composers +nominations +1838 +ai +malta +inquiry +automobile +hosting +viii +rays +tilted +grief +museums +strategies +furious +euro +equality +cohen +poison +surrey +wireless +governed +ridiculous +moses +##esh +##room +vanished +##ito +barnes +attract +morrison +istanbul +##iness +absent +rotation +petition +janet +##logical +satisfaction +custody +deliberately +observatory +comedian +surfaces +pinyin +novelist +strictly +canterbury +oslo +monks +embrace +ibm +jealous +photograph +continent +dorothy +marina +doc +excess +holden +allegations +explaining +stack +avoiding +lance +storyline +majesty +poorly +spike +dos +bradford +raven +travis +classics +proven +voltage +pillow +fists +butt +1842 +interpreted +##car +1839 +gage +telegraph +lens +promising +expelled +casual +collector +zones +##min +silly +nintendo +##kh +##bra +downstairs +chef +suspicious +afl +flies +vacant +uganda +pregnancy +condemned +lutheran +estimates +cheap +decree +saxon +proximity +stripped +idiot +deposits +contrary +presenter +magnus +glacier +im +offense +edwin +##ori +upright +##long +bolt +##ois +toss +geographical +##izes +environments +delicate +marking +abstract +xavier +nails +windsor +plantation +occurring +equity +saskatchewan +fears +drifted +sequences +vegetation +revolt +##stic +1843 +sooner +fusion +opposing +nato +skating +1836 +secretly +ruin +lease +##oc +edit +##nne +flora +anxiety +ruby +##ological +##mia +tel +bout +taxi +emmy +frost +rainbow +compounds +foundations +rainfall +assassination +nightmare +dominican +##win +achievements +deserve +orlando +intact +armenia +##nte +calgary +valentine +106 +marion +proclaimed +theodore +bells +courtyard +thigh +gonzalez +console +troop +minimal +monte +everyday +##ence +##if +supporter +terrorism +buck +openly +presbyterian +activists +carpet +##iers +rubbing +uprising +##yi +cute +conceived +legally +##cht +millennium +cello +velocity +ji +rescued +cardiff +1835 +rex +concentrate +senators +beard +rendered +glowing +battalions +scouts +competitors +sculptor +catalogue +arctic +ion +raja +bicycle +wow +glancing +lawn +##woman +gentleman +lighthouse +publish +predicted +calculated +##val +variants +##gne +strain +##ui +winston +deceased +##nus +touchdowns +brady +caleb +sinking +echoed +crush +hon +blessed +protagonist +hayes +endangered +magnitude +editors +##tine +estimate +responsibilities +##mel +backup +laying +consumed +sealed +zurich +lovers +frustrated +##eau +ahmed +kicking +mit +treasurer +1832 +biblical +refuse +terrified +pump +agrees +genuine +imprisonment +refuses +plymouth +##hen +lou +##nen +tara +trembling +antarctic +ton +learns +##tas +crap +crucial +faction +atop +##borough +wrap +lancaster +odds +hopkins +erik +lyon +##eon +bros +##ode +snap +locality +tips +empress +crowned +cal +acclaimed +chuckled +##ory +clara +sends +mild +towel +##fl +##day +##а +wishing +assuming +interviewed +##bal +##die +interactions +eden +cups +helena +##lf +indie +beck +##fire +batteries +filipino +wizard +parted +##lam +traces +##born +rows +idol +albany +delegates +##ees +##sar +discussions +##ex +notre +instructed +belgrade +highways +suggestion +lauren +possess +orientation +alexandria +abdul +beats +salary +reunion +ludwig +alright +wagner +intimate +pockets +slovenia +hugged +brighton +merchants +cruel +stole +trek +slopes +repairs +enrollment +politically +underlying +promotional +counting +boeing +##bb +isabella +naming +##и +keen +bacteria +listing +separately +belfast +ussr +450 +lithuanian +anybody +ribs +sphere +martinez +cock +embarrassed +proposals +fragments +nationals +##fs +##wski +premises +fin +1500 +alpine +matched +freely +bounded +jace +sleeve +##af +gaming +pier +populated +evident +##like +frances +flooded +##dle +frightened +pour +trainer +framed +visitor +challenging +pig +wickets +##fold +infected +email +##pes +arose +##aw +reward +ecuador +oblast +vale +ch +shuttle +##usa +bach +rankings +forbidden +cornwall +accordance +salem +consumers +bruno +fantastic +toes +machinery +resolved +julius +remembering +propaganda +iceland +bombardment +tide +contacts +wives +##rah +concerto +macdonald +albania +implement +daisy +tapped +sudan +helmet +angela +mistress +##lic +crop +sunk +finest +##craft +hostile +##ute +##tsu +boxer +fr +paths +adjusted +habit +ballot +supervision +soprano +##zen +bullets +wicked +sunset +regiments +disappear +lamp +performs +app +##gia +##oa +rabbit +digging +incidents +entries +##cion +dishes +##oi +introducing +##ati +##fied +freshman +slot +jill +tackles +baroque +backs +##iest +lone +sponsor +destiny +altogether +convert +##aro +consensus +shapes +demonstration +basically +feminist +auction +artifacts +##bing +strongest +twitter +halifax +2019 +allmusic +mighty +smallest +precise +alexandra +viola +##los +##ille +manuscripts +##illo +dancers +ari +managers +monuments +blades +barracks +springfield +maiden +consolidated +electron +##end +berry +airing +wheat +nobel +inclusion +blair +payments +geography +bee +cc +eleanor +react +##hurst +afc +manitoba +##yu +su +lineup +fitness +recreational +investments +airborne +disappointment +##dis +edmonton +viewing +##row +renovation +##cast +infant +bankruptcy +roses +aftermath +pavilion +##yer +carpenter +withdrawal +ladder +##hy +discussing +popped +reliable +agreements +rochester +##abad +curves +bombers +220 +rao +reverend +decreased +choosing +107 +stiff +consulting +naples +crawford +tracy +ka +ribbon +cops +##lee +crushed +deciding +unified +teenager +accepting +flagship +explorer +poles +sanchez +inspection +revived +skilled +induced +exchanged +flee +locals +tragedy +swallow +loading +hanna +demonstrate +##ela +salvador +flown +contestants +civilization +##ines +wanna +rhodes +fletcher +hector +knocking +considers +##ough +nash +mechanisms +sensed +mentally +walt +unclear +##eus +renovated +madame +##cks +crews +governmental +##hin +undertaken +monkey +##ben +##ato +fatal +armored +copa +caves +governance +grasp +perception +certification +froze +damp +tugged +wyoming +##rg +##ero +newman +##lor +nerves +curiosity +graph +115 +##ami +withdraw +tunnels +dull +meredith +moss +exhibits +neighbors +communicate +accuracy +explored +raiders +republicans +secular +kat +superman +penny +criticised +##tch +freed +update +conviction +wade +ham +likewise +delegation +gotta +doll +promises +technological +myth +nationality +resolve +convent +##mark +sharon +dig +sip +coordinator +entrepreneur +fold +##dine +capability +councillor +synonym +blown +swan +cursed +1815 +jonas +haired +sofa +canvas +keeper +rivalry +##hart +rapper +speedway +swords +postal +maxwell +estonia +potter +recurring +##nn +##ave +errors +##oni +cognitive +1834 +##² +claws +nadu +roberto +bce +wrestler +ellie +##ations +infinite +ink +##tia +presumably +finite +staircase +108 +noel +patricia +nacional +##cation +chill +eternal +tu +preventing +prussia +fossil +limbs +##logist +ernst +frog +perez +rene +##ace +pizza +prussian +##ios +##vy +molecules +regulatory +answering +opinions +sworn +lengths +supposedly +hypothesis +upward +habitats +seating +ancestors +drank +yield +hd +synthesis +researcher +modest +##var +mothers +peered +voluntary +homeland +##the +acclaim +##igan +static +valve +luxembourg +alto +carroll +fe +receptor +norton +ambulance +##tian +johnston +catholics +depicting +jointly +elephant +gloria +mentor +badge +ahmad +distinguish +remarked +councils +precisely +allison +advancing +detection +crowded +##10 +cooperative +ankle +mercedes +dagger +surrendered +pollution +commit +subway +jeffrey +lesson +sculptures +provider +##fication +membrane +timothy +rectangular +fiscal +heating +teammate +basket +particle +anonymous +deployment +##ple +missiles +courthouse +proportion +shoe +sec +##ller +complaints +forbes +blacks +abandon +remind +sizes +overwhelming +autobiography +natalie +##awa +risks +contestant +countryside +babies +scorer +invaded +enclosed +proceed +hurling +disorders +##cu +reflecting +continuously +cruiser +graduates +freeway +investigated +ore +deserved +maid +blocking +phillip +jorge +shakes +dove +mann +variables +lacked +burden +accompanying +que +consistently +organizing +provisional +complained +endless +##rm +tubes +juice +georges +krishna +mick +labels +thriller +##uch +laps +arcade +sage +snail +##table +shannon +fi +laurence +seoul +vacation +presenting +hire +churchill +surprisingly +prohibited +savannah +technically +##oli +170 +##lessly +testimony +suited +speeds +toys +romans +mlb +flowering +measurement +talented +kay +settings +charleston +expectations +shattered +achieving +triumph +ceremonies +portsmouth +lanes +mandatory +loser +stretching +cologne +realizes +seventy +cornell +careers +webb +##ulating +americas +budapest +ava +suspicion +##ison +yo +conrad +##hai +sterling +jessie +rector +##az +1831 +transform +organize +loans +christine +volcanic +warrant +slender +summers +subfamily +newer +danced +dynamics +rhine +proceeds +heinrich +gastropod +commands +sings +facilitate +easter +ra +positioned +responses +expense +fruits +yanked +imported +25th +velvet +vic +primitive +tribune +baldwin +neighbourhood +donna +rip +hay +pr +##uro +1814 +espn +welcomed +##aria +qualifier +glare +highland +timing +##cted +shells +eased +geometry +louder +exciting +slovakia +##sion +##iz +##lot +savings +prairie +##ques +marching +rafael +tonnes +##lled +curtain +preceding +shy +heal +greene +worthy +##pot +detachment +bury +sherman +##eck +reinforced +seeks +bottles +contracted +duchess +outfit +walsh +##sc +mickey +##ase +geoffrey +archer +squeeze +dawson +eliminate +invention +##enberg +neal +##eth +stance +dealer +coral +maple +retire +polo +simplified +##ht +1833 +hid +watts +backwards +jules +##oke +genesis +mt +frames +rebounds +burma +woodland +moist +santos +whispers +drained +subspecies +##aa +streaming +ulster +burnt +correspondence +maternal +gerard +denis +stealing +##load +genius +duchy +##oria +inaugurated +momentum +suits +placement +sovereign +clause +thames +##hara +confederation +reservation +sketch +yankees +lets +rotten +charm +hal +verses +ultra +commercially +dot +salon +citation +adopt +winnipeg +mist +allocated +cairo +##boy +jenkins +interference +objectives +##wind +1820 +portfolio +armoured +sectors +##eh +initiatives +##world +integrity +exercises +robe +tap +ab +gazed +##tones +distracted +rulers +111 +favorable +jerome +tended +cart +factories +##eri +diplomat +valued +gravel +charitable +##try +calvin +exploring +chang +shepherd +terrace +pdf +pupil +##ural +reflects +ups +##rch +governors +shelf +depths +##nberg +trailed +crest +tackle +##nian +##ats +hatred +##kai +clare +makers +ethiopia +longtime +detected +embedded +lacking +slapped +rely +thomson +anticipation +iso +morton +successive +agnes +screenwriter +straightened +philippe +playwright +haunted +licence +iris +intentions +sutton +112 +logical +correctly +##weight +branded +licked +tipped +silva +ricky +narrator +requests +##ents +greeted +supernatural +cow +##wald +lung +refusing +employer +strait +gaelic +liner +##piece +zoe +sabha +##mba +driveway +harvest +prints +bates +reluctantly +threshold +algebra +ira +wherever +coupled +240 +assumption +picks +##air +designers +raids +gentlemen +##ean +roller +blowing +leipzig +locks +screw +dressing +strand +##lings +scar +dwarf +depicts +##nu +nods +##mine +differ +boris +##eur +yuan +flip +##gie +mob +invested +questioning +applying +##ture +shout +##sel +gameplay +blamed +illustrations +bothered +weakness +rehabilitation +##of +##zes +envelope +rumors +miners +leicester +subtle +kerry +##ico +ferguson +##fu +premiership +ne +##cat +bengali +prof +catches +remnants +dana +##rily +shouting +presidents +baltic +ought +ghosts +dances +sailors +shirley +fancy +dominic +##bie +madonna +##rick +bark +buttons +gymnasium +ashes +liver +toby +oath +providence +doyle +evangelical +nixon +cement +carnegie +embarked +hatch +surroundings +guarantee +needing +pirate +essence +##bee +filter +crane +hammond +projected +immune +percy +twelfth +##ult +regent +doctoral +damon +mikhail +##ichi +lu +critically +elect +realised +abortion +acute +screening +mythology +steadily +##fc +frown +nottingham +kirk +wa +minneapolis +##rra +module +algeria +mc +nautical +encounters +surprising +statues +availability +shirts +pie +alma +brows +munster +mack +soup +crater +tornado +sanskrit +cedar +explosive +bordered +dixon +planets +stamp +exam +happily +##bble +carriers +kidnapped +##vis +accommodation +emigrated +##met +knockout +correspondent +violation +profits +peaks +lang +specimen +agenda +ancestry +pottery +spelling +equations +obtaining +ki +linking +1825 +debris +asylum +##20 +buddhism +teddy +##ants +gazette +##nger +##sse +dental +eligibility +utc +fathers +averaged +zimbabwe +francesco +coloured +hissed +translator +lynch +mandate +humanities +mackenzie +uniforms +lin +##iana +##gio +asset +mhz +fitting +samantha +genera +wei +rim +beloved +shark +riot +entities +expressions +indo +carmen +slipping +owing +abbot +neighbor +sidney +##av +rats +recommendations +encouraging +squadrons +anticipated +commanders +conquered +##oto +donations +diagnosed +##mond +divide +##iva +guessed +decoration +vernon +auditorium +revelation +conversations +##kers +##power +herzegovina +dash +alike +protested +lateral +herman +accredited +mg +##gent +freeman +mel +fiji +crow +crimson +##rine +livestock +##pped +humanitarian +bored +oz +whip +##lene +##ali +legitimate +alter +grinning +spelled +anxious +oriental +wesley +##nin +##hole +carnival +controller +detect +##ssa +bowed +educator +kosovo +macedonia +##sin +occupy +mastering +stephanie +janeiro +para +unaware +nurses +noon +135 +cam +hopefully +ranger +combine +sociology +polar +rica +##eer +neill +##sman +holocaust +##ip +doubled +lust +1828 +109 +decent +cooling +unveiled +##card +1829 +nsw +homer +chapman +meyer +##gin +dive +mae +reagan +expertise +##gled +darwin +brooke +sided +prosecution +investigating +comprised +petroleum +genres +reluctant +differently +trilogy +johns +vegetables +corpse +highlighted +lounge +pension +unsuccessfully +elegant +aided +ivory +beatles +amelia +cain +dubai +sunny +immigrant +babe +click +##nder +underwater +pepper +combining +mumbled +atlas +horns +accessed +ballad +physicians +homeless +gestured +rpm +freak +louisville +corporations +patriots +prizes +rational +warn +modes +decorative +overnight +din +troubled +phantom +##ort +monarch +sheer +##dorf +generals +guidelines +organs +addresses +##zon +enhance +curling +parishes +cord +##kie +linux +caesar +deutsche +bavaria +##bia +coleman +cyclone +##eria +bacon +petty +##yama +##old +hampton +diagnosis +1824 +throws +complexity +rita +disputed +##₃ +pablo +##sch +marketed +trafficking +##ulus +examine +plague +formats +##oh +vault +faithful +##bourne +webster +##ox +highlights +##ient +##ann +phones +vacuum +sandwich +modeling +##gated +bolivia +clergy +qualities +isabel +##nas +##ars +wears +screams +reunited +annoyed +bra +##ancy +##rate +differential +transmitter +tattoo +container +poker +##och +excessive +resides +cowboys +##tum +augustus +trash +providers +statute +retreated +balcony +reversed +void +storey +preceded +masses +leap +laughs +neighborhoods +wards +schemes +falcon +santo +battlefield +pad +ronnie +thread +lesbian +venus +##dian +beg +sandstone +daylight +punched +gwen +analog +stroked +wwe +acceptable +measurements +dec +toxic +##kel +adequate +surgical +economist +parameters +varsity +##sberg +quantity +ella +##chy +##rton +countess +generating +precision +diamonds +expressway +ga +##ı +1821 +uruguay +talents +galleries +expenses +scanned +colleague +outlets +ryder +lucien +##ila +paramount +##bon +syracuse +dim +fangs +gown +sweep +##sie +toyota +missionaries +websites +##nsis +sentences +adviser +val +trademark +spells +##plane +patience +starter +slim +##borg +toe +incredibly +shoots +elliot +nobility +##wyn +cowboy +endorsed +gardner +tendency +persuaded +organisms +emissions +kazakhstan +amused +boring +chips +themed +##hand +llc +constantinople +chasing +systematic +guatemala +borrowed +erin +carey +##hard +highlands +struggles +1810 +##ifying +##ced +wong +exceptions +develops +enlarged +kindergarten +castro +##ern +##rina +leigh +zombie +juvenile +##most +consul +##nar +sailor +hyde +clarence +intensive +pinned +nasty +useless +jung +clayton +stuffed +exceptional +ix +apostolic +230 +transactions +##dge +exempt +swinging +cove +religions +##ash +shields +dairy +bypass +190 +pursuing +bug +joyce +bombay +chassis +southampton +chat +interact +redesignated +##pen +nascar +pray +salmon +rigid +regained +malaysian +grim +publicity +constituted +capturing +toilet +delegate +purely +tray +drift +loosely +striker +weakened +trinidad +mitch +itv +defines +transmitted +ming +scarlet +nodding +fitzgerald +fu +narrowly +sp +tooth +standings +virtue +##₁ +##wara +##cting +chateau +gloves +lid +##nel +hurting +conservatory +##pel +sinclair +reopened +sympathy +nigerian +strode +advocated +optional +chronic +discharge +##rc +suck +compatible +laurel +stella +shi +fails +wage +dodge +128 +informal +sorts +levi +buddha +villagers +##aka +chronicles +heavier +summoned +gateway +3000 +eleventh +jewelry +translations +accordingly +seas +##ency +fiber +pyramid +cubic +dragging +##ista +caring +##ops +android +contacted +lunar +##dt +kai +lisbon +patted +1826 +sacramento +theft +madagascar +subtropical +disputes +ta +holidays +piper +willow +mare +cane +itunes +newfoundland +benny +companions +dong +raj +observe +roar +charming +plaque +tibetan +fossils +enacted +manning +bubble +tina +tanzania +##eda +##hir +funk +swamp +deputies +cloak +ufc +scenario +par +scratch +metals +anthem +guru +engaging +specially +##boat +dialects +nineteen +cecil +duet +disability +messenger +unofficial +##lies +defunct +eds +moonlight +drainage +surname +puzzle +honda +switching +conservatives +mammals +knox +broadcaster +sidewalk +cope +##ried +benson +princes +peterson +##sal +bedford +sharks +eli +wreck +alberto +gasp +archaeology +lgbt +teaches +securities +madness +compromise +waving +coordination +davidson +visions +leased +possibilities +eighty +jun +fernandez +enthusiasm +assassin +sponsorship +reviewer +kingdoms +estonian +laboratories +##fy +##nal +applies +verb +celebrations +##zzo +rowing +lightweight +sadness +submit +mvp +balanced +dude +##vas +explicitly +metric +magnificent +mound +brett +mohammad +mistakes +irregular +##hing +##ass +sanders +betrayed +shipped +surge +##enburg +reporters +termed +georg +pity +verbal +bulls +abbreviated +enabling +appealed +##are +##atic +sicily +sting +heel +sweetheart +bart +spacecraft +brutal +monarchy +##tter +aberdeen +cameo +diane +##ub +survivor +clyde +##aries +complaint +##makers +clarinet +delicious +chilean +karnataka +coordinates +1818 +panties +##rst +pretending +ar +dramatically +kiev +bella +tends +distances +113 +catalog +launching +instances +telecommunications +portable +lindsay +vatican +##eim +angles +aliens +marker +stint +screens +bolton +##rne +judy +wool +benedict +plasma +europa +spark +imaging +filmmaker +swiftly +##een +contributor +##nor +opted +stamps +apologize +financing +butter +gideon +sophisticated +alignment +avery +chemicals +yearly +speculation +prominence +professionally +##ils +immortal +institutional +inception +wrists +identifying +tribunal +derives +gains +##wo +papal +preference +linguistic +vince +operative +brewery +##ont +unemployment +boyd +##ured +##outs +albeit +prophet +1813 +bi +##rr +##face +##rad +quarterly +asteroid +cleaned +radius +temper +##llen +telugu +jerk +viscount +menu +##ote +glimpse +##aya +yacht +hawaiian +baden +##rl +laptop +readily +##gu +monetary +offshore +scots +watches +##yang +##arian +upgrade +needle +xbox +lea +encyclopedia +flank +fingertips +##pus +delight +teachings +confirm +roth +beaches +midway +winters +##iah +teasing +daytime +beverly +gambling +bonnie +##backs +regulated +clement +hermann +tricks +knot +##shing +##uring +##vre +detached +ecological +owed +specialty +byron +inventor +bats +stays +screened +unesco +midland +trim +affection +##ander +##rry +jess +thoroughly +feedback +##uma +chennai +strained +heartbeat +wrapping +overtime +pleaded +##sworth +mon +leisure +oclc +##tate +##ele +feathers +angelo +thirds +nuts +surveys +clever +gill +commentator +##dos +darren +rides +gibraltar +##nc +##mu +dissolution +dedication +shin +meals +saddle +elvis +reds +chaired +taller +appreciation +functioning +niece +favored +advocacy +robbie +criminals +suffolk +yugoslav +passport +constable +congressman +hastings +vera +##rov +consecrated +sparks +ecclesiastical +confined +##ovich +muller +floyd +nora +1822 +paved +1827 +cumberland +ned +saga +spiral +##flow +appreciated +yi +collaborative +treating +similarities +feminine +finishes +##ib +jade +import +##nse +##hot +champagne +mice +securing +celebrities +helsinki +attributes +##gos +cousins +phases +ache +lucia +gandhi +submission +vicar +spear +shine +tasmania +biting +detention +constitute +tighter +seasonal +##gus +terrestrial +matthews +##oka +effectiveness +parody +philharmonic +##onic +1816 +strangers +encoded +consortium +guaranteed +regards +shifts +tortured +collision +supervisor +inform +broader +insight +theaters +armour +emeritus +blink +incorporates +mapping +##50 +##ein +handball +flexible +##nta +substantially +generous +thief +##own +carr +loses +1793 +prose +ucla +romeo +generic +metallic +realization +damages +mk +commissioners +zach +default +##ther +helicopters +lengthy +stems +spa +partnered +spectators +rogue +indication +penalties +teresa +1801 +sen +##tric +dalton +##wich +irving +photographic +##vey +dell +deaf +peters +excluded +unsure +##vable +patterson +crawled +##zio +resided +whipped +latvia +slower +ecole +pipes +employers +maharashtra +comparable +va +textile +pageant +##gel +alphabet +binary +irrigation +chartered +choked +antoine +offs +waking +supplement +##wen +quantities +demolition +regain +locate +urdu +folks +alt +114 +##mc +scary +andreas +whites +##ava +classrooms +mw +aesthetic +publishes +valleys +guides +cubs +johannes +bryant +conventions +affecting +##itt +drain +awesome +isolation +prosecutor +ambitious +apology +captive +downs +atmospheric +lorenzo +aisle +beef +foul +##onia +kidding +composite +disturbed +illusion +natives +##ffer +emi +rockets +riverside +wartime +painters +adolf +melted +##ail +uncertainty +simulation +hawks +progressed +meantime +builder +spray +breach +unhappy +regina +russians +##urg +determining +##tation +tram +1806 +##quin +aging +##12 +1823 +garion +rented +mister +diaz +terminated +clip +1817 +depend +nervously +disco +owe +defenders +shiva +notorious +disbelief +shiny +worcester +##gation +##yr +trailing +undertook +islander +belarus +limitations +watershed +fuller +overlooking +utilized +raphael +1819 +synthetic +breakdown +klein +##nate +moaned +memoir +lamb +practicing +##erly +cellular +arrows +exotic +##graphy +witches +117 +charted +rey +hut +hierarchy +subdivision +freshwater +giuseppe +aloud +reyes +qatar +marty +sideways +utterly +sexually +jude +prayers +mccarthy +softball +blend +damien +##gging +##metric +wholly +erupted +lebanese +negro +revenues +tasted +comparative +teamed +transaction +labeled +maori +sovereignty +parkway +trauma +gran +malay +121 +advancement +descendant +2020 +buzz +salvation +inventory +symbolic +##making +antarctica +mps +##gas +##bro +mohammed +myanmar +holt +submarines +tones +##lman +locker +patriarch +bangkok +emerson +remarks +predators +kin +afghan +confession +norwich +rental +emerge +advantages +##zel +rca +##hold +shortened +storms +aidan +##matic +autonomy +compliance +##quet +dudley +atp +##osis +1803 +motto +documentation +summary +professors +spectacular +christina +archdiocese +flashing +innocence +remake +##dell +psychic +reef +scare +employ +rs +sticks +meg +gus +leans +##ude +accompany +bergen +tomas +##iko +doom +wages +pools +##nch +##bes +breasts +scholarly +alison +outline +brittany +breakthrough +willis +realistic +##cut +##boro +competitor +##stan +pike +picnic +icon +designing +commercials +washing +villain +skiing +micro +costumes +auburn +halted +executives +##hat +logistics +cycles +vowel +applicable +barrett +exclaimed +eurovision +eternity +ramon +##umi +##lls +modifications +sweeping +disgust +##uck +torch +aviv +ensuring +rude +dusty +sonic +donovan +outskirts +cu +pathway +##band +##gun +##lines +disciplines +acids +cadet +paired +##40 +sketches +##sive +marriages +##⁺ +folding +peers +slovak +implies +admired +##beck +1880s +leopold +instinct +attained +weston +megan +horace +##ination +dorsal +ingredients +evolutionary +##its +complications +deity +lethal +brushing +levy +deserted +institutes +posthumously +delivering +telescope +coronation +motivated +rapids +luc +flicked +pays +volcano +tanner +weighed +##nica +crowds +frankie +gifted +addressing +granddaughter +winding +##rna +constantine +gomez +##front +landscapes +rudolf +anthropology +slate +werewolf +##lio +astronomy +circa +rouge +dreaming +sack +knelt +drowned +naomi +prolific +tracked +freezing +herb +##dium +agony +randall +twisting +wendy +deposit +touches +vein +wheeler +##bbled +##bor +batted +retaining +tire +presently +compare +specification +daemon +nigel +##grave +merry +recommendation +czechoslovakia +sandra +ng +roma +##sts +lambert +inheritance +sheikh +winchester +cries +examining +##yle +comeback +cuisine +nave +##iv +ko +retrieve +tomatoes +barker +polished +defining +irene +lantern +personalities +begging +tract +swore +1809 +175 +##gic +omaha +brotherhood +##rley +haiti +##ots +exeter +##ete +##zia +steele +dumb +pearson +210 +surveyed +elisabeth +trends +##ef +fritz +##rf +premium +bugs +fraction +calmly +viking +##birds +tug +inserted +unusually +##ield +confronted +distress +crashing +brent +turks +resign +##olo +cambodia +gabe +sauce +##kal +evelyn +116 +extant +clusters +quarry +teenagers +luna +##lers +##ister +affiliation +drill +##ashi +panthers +scenic +libya +anita +strengthen +inscriptions +##cated +lace +sued +judith +riots +##uted +mint +##eta +preparations +midst +dub +challenger +##vich +mock +cf +displaced +wicket +breaths +enables +schmidt +analyst +##lum +ag +highlight +automotive +axe +josef +newark +sufficiently +resembles +50th +##pal +flushed +mum +traits +##ante +commodore +incomplete +warming +titular +ceremonial +ethical +118 +celebrating +eighteenth +cao +lima +medalist +mobility +strips +snakes +##city +miniature +zagreb +barton +escapes +umbrella +automated +doubted +differs +cooled +georgetown +dresden +cooked +fade +wyatt +rna +jacobs +carlton +abundant +stereo +boost +madras +inning +##hia +spur +ip +malayalam +begged +osaka +groan +escaping +charging +dose +vista +##aj +bud +papa +communists +advocates +edged +tri +##cent +resemble +peaking +necklace +fried +montenegro +saxony +goose +glances +stuttgart +curator +recruit +grocery +sympathetic +##tting +##fort +127 +lotus +randolph +ancestor +##rand +succeeding +jupiter +1798 +macedonian +##heads +hiking +1808 +handing +fischer +##itive +garbage +node +##pies +prone +singular +papua +inclined +attractions +italia +pouring +motioned +grandma +garnered +jacksonville +corp +ego +ringing +aluminum +##hausen +ordering +##foot +drawer +traders +synagogue +##play +##kawa +resistant +wandering +fragile +fiona +teased +var +hardcore +soaked +jubilee +decisive +exposition +mercer +poster +valencia +hale +kuwait +1811 +##ises +##wr +##eed +tavern +gamma +122 +johan +##uer +airways +amino +gil +##ury +vocational +domains +torres +##sp +generator +folklore +outcomes +##keeper +canberra +shooter +fl +beams +confrontation +##lling +##gram +feb +aligned +forestry +pipeline +jax +motorway +conception +decay +##tos +coffin +##cott +stalin +1805 +escorted +minded +##nam +sitcom +purchasing +twilight +veronica +additions +passive +tensions +straw +123 +frequencies +1804 +refugee +cultivation +##iate +christie +clary +bulletin +crept +disposal +##rich +##zong +processor +crescent +##rol +bmw +emphasized +whale +nazis +aurora +##eng +dwelling +hauled +sponsors +toledo +mega +ideology +theatres +tessa +cerambycidae +saves +turtle +cone +suspects +kara +rusty +yelling +greeks +mozart +shades +cocked +participant +##tro +shire +spit +freeze +necessity +##cos +inmates +nielsen +councillors +loaned +uncommon +omar +peasants +botanical +offspring +daniels +formations +jokes +1794 +pioneers +sigma +licensing +##sus +wheelchair +polite +1807 +liquor +pratt +trustee +##uta +forewings +balloon +##zz +kilometre +camping +explicit +casually +shawn +foolish +teammates +nm +hassan +carrie +judged +satisfy +vanessa +knives +selective +cnn +flowed +##lice +eclipse +stressed +eliza +mathematician +cease +cultivated +##roy +commissions +browns +##ania +destroyers +sheridan +meadow +##rius +minerals +##cial +downstream +clash +gram +memoirs +ventures +baha +seymour +archie +midlands +edith +fare +flynn +invite +canceled +tiles +stabbed +boulder +incorporate +amended +camden +facial +mollusk +unreleased +descriptions +yoga +grabs +550 +raises +ramp +shiver +##rose +coined +pioneering +tunes +qing +warwick +tops +119 +melanie +giles +##rous +wandered +##inal +annexed +nov +30th +unnamed +##ished +organizational +airplane +normandy +stoke +whistle +blessing +violations +chased +holders +shotgun +##ctic +outlet +reactor +##vik +tires +tearing +shores +fortified +mascot +constituencies +nc +columnist +productive +tibet +##rta +lineage +hooked +oct +tapes +judging +cody +##gger +hansen +kashmir +triggered +##eva +solved +cliffs +##tree +resisted +anatomy +protesters +transparent +implied +##iga +injection +mattress +excluding +##mbo +defenses +helpless +devotion +##elli +growl +liberals +weber +phenomena +atoms +plug +##iff +mortality +apprentice +howe +convincing +aaa +swimmer +barber +leone +promptly +sodium +def +nowadays +arise +##oning +gloucester +corrected +dignity +norm +erie +##ders +elders +evacuated +sylvia +compression +##yar +hartford +pose +backpack +reasoning +accepts +24th +wipe +millimetres +marcel +##oda +dodgers +albion +1790 +overwhelmed +aerospace +oaks +1795 +showcase +acknowledge +recovering +nolan +ashe +hurts +geology +fashioned +disappearance +farewell +swollen +shrug +marquis +wimbledon +124 +rue +1792 +commemorate +reduces +experiencing +inevitable +calcutta +intel +##court +murderer +sticking +fisheries +imagery +bloom +280 +brake +##inus +gustav +hesitation +memorable +po +viral +beans +accidents +tunisia +antenna +spilled +consort +treatments +aye +perimeter +##gard +donation +hostage +migrated +banker +addiction +apex +lil +trout +##ously +conscience +##nova +rams +sands +genome +passionate +troubles +##lets +##set +amid +##ibility +##ret +higgins +exceed +vikings +##vie +payne +##zan +muscular +##ste +defendant +sucking +##wal +ibrahim +fuselage +claudia +vfl +europeans +snails +interval +##garh +preparatory +statewide +tasked +lacrosse +viktor +##lation +angola +##hra +flint +implications +employs +teens +patrons +stall +weekends +barriers +scrambled +nucleus +tehran +jenna +parsons +lifelong +robots +displacement +5000 +##bles +precipitation +##gt +knuckles +clutched +1802 +marrying +ecology +marx +accusations +declare +scars +kolkata +mat +meadows +bermuda +skeleton +finalists +vintage +crawl +coordinate +affects +subjected +orchestral +mistaken +##tc +mirrors +dipped +relied +260 +arches +candle +##nick +incorporating +wildly +fond +basilica +owl +fringe +rituals +whispering +stirred +feud +tertiary +slick +goat +honorable +whereby +skip +ricardo +stripes +parachute +adjoining +submerged +synthesizer +##gren +intend +positively +ninety +phi +beaver +partition +fellows +alexis +prohibition +carlisle +bizarre +fraternity +##bre +doubts +icy +cbc +aquatic +sneak +sonny +combines +airports +crude +supervised +spatial +merge +alfonso +##bic +corrupt +scan +undergo +##ams +disabilities +colombian +comparing +dolphins +perkins +##lish +reprinted +unanimous +bounced +hairs +underworld +midwest +semester +bucket +paperback +miniseries +coventry +demise +##leigh +demonstrations +sensor +rotating +yan +##hler +arrange +soils +##idge +hyderabad +labs +##dr +brakes +grandchildren +##nde +negotiated +rover +ferrari +continuation +directorate +augusta +stevenson +counterpart +gore +##rda +nursery +rican +ave +collectively +broadly +pastoral +repertoire +asserted +discovering +nordic +styled +fiba +cunningham +harley +middlesex +survives +tumor +tempo +zack +aiming +lok +urgent +##rade +##nto +devils +##ement +contractor +turin +##wl +##ool +bliss +repaired +simmons +moan +astronomical +cr +negotiate +lyric +1890s +lara +bred +clad +angus +pbs +##ience +engineered +posed +##lk +hernandez +possessions +elbows +psychiatric +strokes +confluence +electorate +lifts +campuses +lava +alps +##ep +##ution +##date +physicist +woody +##page +##ographic +##itis +juliet +reformation +sparhawk +320 +complement +suppressed +jewel +##½ +floated +##kas +continuity +sadly +##ische +inability +melting +scanning +paula +flour +judaism +safer +vague +##lm +solving +curb +##stown +financially +gable +bees +expired +miserable +cassidy +dominion +1789 +cupped +145 +robbery +facto +amos +warden +resume +tallest +marvin +ing +pounded +usd +declaring +gasoline +##aux +darkened +270 +650 +sophomore +##mere +erection +gossip +televised +risen +dial +##eu +pillars +##link +passages +profound +##tina +arabian +ashton +silicon +nail +##ead +##lated +##wer +##hardt +fleming +firearms +ducked +circuits +blows +waterloo +titans +##lina +atom +fireplace +cheshire +financed +activation +algorithms +##zzi +constituent +catcher +cherokee +partnerships +sexuality +platoon +tragic +vivian +guarded +whiskey +meditation +poetic +##late +##nga +##ake +porto +listeners +dominance +kendra +mona +chandler +factions +22nd +salisbury +attitudes +derivative +##ido +##haus +intake +paced +javier +illustrator +barrels +bias +cockpit +burnett +dreamed +ensuing +##anda +receptors +someday +hawkins +mattered +##lal +slavic +1799 +jesuit +cameroon +wasted +tai +wax +lowering +victorious +freaking +outright +hancock +librarian +sensing +bald +calcium +myers +tablet +announcing +barack +shipyard +pharmaceutical +##uan +greenwich +flush +medley +patches +wolfgang +pt +speeches +acquiring +exams +nikolai +##gg +hayden +kannada +##type +reilly +##pt +waitress +abdomen +devastated +capped +pseudonym +pharmacy +fulfill +paraguay +1796 +clicked +##trom +archipelago +syndicated +##hman +lumber +orgasm +rejection +clifford +lorraine +advent +mafia +rodney +brock +##ght +##used +##elia +cassette +chamberlain +despair +mongolia +sensors +developmental +upstream +##eg +##alis +spanning +165 +trombone +basque +seeded +interred +renewable +rhys +leapt +revision +molecule +##ages +chord +vicious +nord +shivered +23rd +arlington +debts +corpus +sunrise +bays +blackburn +centimetres +##uded +shuddered +gm +strangely +gripping +cartoons +isabelle +orbital +##ppa +seals +proving +##lton +refusal +strengthened +bust +assisting +baghdad +batsman +portrayal +mara +pushes +spears +og +##cock +reside +nathaniel +brennan +1776 +confirmation +caucus +##worthy +markings +yemen +nobles +ku +lazy +viewer +catalan +encompasses +sawyer +##fall +sparked +substances +patents +braves +arranger +evacuation +sergio +persuade +dover +tolerance +penguin +cum +jockey +insufficient +townships +occupying +declining +plural +processed +projection +puppet +flanders +introduces +liability +##yon +gymnastics +antwerp +taipei +hobart +candles +jeep +wes +observers +126 +chaplain +bundle +glorious +##hine +hazel +flung +sol +excavations +dumped +stares +sh +bangalore +triangular +icelandic +intervals +expressing +turbine +##vers +songwriting +crafts +##igo +jasmine +ditch +rite +##ways +entertaining +comply +sorrow +wrestlers +basel +emirates +marian +rivera +helpful +##some +caution +downward +networking +##atory +##tered +darted +genocide +emergence +replies +specializing +spokesman +convenient +unlocked +fading +augustine +concentrations +resemblance +elijah +investigator +andhra +##uda +promotes +bean +##rrell +fleeing +wan +simone +announcer +##ame +##bby +lydia +weaver +132 +residency +modification +##fest +stretches +##ast +alternatively +nat +lowe +lacks +##ented +pam +tile +concealed +inferior +abdullah +residences +tissues +vengeance +##ided +moisture +peculiar +groove +zip +bologna +jennings +ninja +oversaw +zombies +pumping +batch +livingston +emerald +installations +1797 +peel +nitrogen +rama +##fying +##star +schooling +strands +responding +werner +##ost +lime +casa +accurately +targeting +##rod +underway +##uru +hemisphere +lester +##yard +occupies +2d +griffith +angrily +reorganized +##owing +courtney +deposited +##dd +##30 +estadio +##ifies +dunn +exiled +##ying +checks +##combe +##о +##fly +successes +unexpectedly +blu +assessed +##flower +##ه +observing +sacked +spiders +kn +##tail +mu +nodes +prosperity +audrey +divisional +155 +broncos +tangled +adjust +feeds +erosion +paolo +surf +directory +snatched +humid +admiralty +screwed +gt +reddish +##nese +modules +trench +lamps +bind +leah +bucks +competes +##nz +##form +transcription +##uc +isles +violently +clutching +pga +cyclist +inflation +flats +ragged +unnecessary +##hian +stubborn +coordinated +harriet +baba +disqualified +330 +insect +wolfe +##fies +reinforcements +rocked +duel +winked +embraced +bricks +##raj +hiatus +defeats +pending +brightly +jealousy +##xton +##hm +##uki +lena +gdp +colorful +##dley +stein +kidney +##shu +underwear +wanderers +##haw +##icus +guardians +m³ +roared +habits +##wise +permits +gp +uranium +punished +disguise +bundesliga +elise +dundee +erotic +partisan +pi +collectors +float +individually +rendering +behavioral +bucharest +ser +hare +valerie +corporal +nutrition +proportional +##isa +immense +##kis +pavement +##zie +##eld +sutherland +crouched +1775 +##lp +suzuki +trades +endurance +operas +crosby +prayed +priory +rory +socially +##urn +gujarat +##pu +walton +cube +pasha +privilege +lennon +floods +thorne +waterfall +nipple +scouting +approve +##lov +minorities +voter +dwight +extensions +assure +ballroom +slap +dripping +privileges +rejoined +confessed +demonstrating +patriotic +yell +investor +##uth +pagan +slumped +squares +##cle +##kins +confront +bert +embarrassment +##aid +aston +urging +sweater +starr +yuri +brains +williamson +commuter +mortar +structured +selfish +exports +##jon +cds +##him +unfinished +##rre +mortgage +destinations +##nagar +canoe +solitary +buchanan +delays +magistrate +fk +##pling +motivation +##lier +##vier +recruiting +assess +##mouth +malik +antique +1791 +pius +rahman +reich +tub +zhou +smashed +airs +galway +xii +conditioning +honduras +discharged +dexter +##pf +lionel +129 +debates +lemon +tiffany +volunteered +dom +dioxide +procession +devi +sic +tremendous +advertisements +colts +transferring +verdict +hanover +decommissioned +utter +relate +pac +racism +##top +beacon +limp +similarity +terra +occurrence +ant +##how +becky +capt +updates +armament +richie +pal +##graph +halloween +mayo +##ssen +##bone +cara +serena +fcc +dolls +obligations +##dling +violated +lafayette +jakarta +exploitation +##ime +infamous +iconic +##lah +##park +kitty +moody +reginald +dread +spill +crystals +olivier +modeled +bluff +equilibrium +separating +notices +ordnance +extinction +onset +cosmic +attachment +sammy +expose +privy +anchored +##bil +abbott +admits +bending +baritone +emmanuel +policeman +vaughan +winged +climax +dresses +denny +polytechnic +mohamed +burmese +authentic +nikki +genetics +grandparents +homestead +gaza +postponed +metacritic +una +##sby +##bat +unstable +dissertation +##rial +##cian +curls +obscure +uncovered +bronx +praying +disappearing +##hoe +prehistoric +coke +turret +mutations +nonprofit +pits +monaco +##ي +##usion +prominently +dispatched +podium +##mir +uci +##uation +133 +fortifications +birthplace +kendall +##lby +##oll +preacher +rack +goodman +##rman +persistent +##ott +countless +jaime +recorder +lexington +persecution +jumps +renewal +wagons +##11 +crushing +##holder +decorations +##lake +abundance +wrath +laundry +£1 +garde +##rp +jeanne +beetles +peasant +##sl +splitting +caste +sergei +##rer +##ema +scripts +##ively +rub +satellites +##vor +inscribed +verlag +scrapped +gale +packages +chick +potato +slogan +kathleen +arabs +##culture +counterparts +reminiscent +choral +##tead +rand +retains +bushes +dane +accomplish +courtesy +closes +##oth +slaughter +hague +krakow +lawson +tailed +elias +ginger +##ttes +canopy +betrayal +rebuilding +turf +##hof +frowning +allegiance +brigades +kicks +rebuild +polls +alias +nationalism +td +rowan +audition +bowie +fortunately +recognizes +harp +dillon +horrified +##oro +renault +##tics +ropes +##α +presumed +rewarded +infrared +wiping +accelerated +illustration +##rid +presses +practitioners +badminton +##iard +detained +##tera +recognizing +relates +misery +##sies +##tly +reproduction +piercing +potatoes +thornton +esther +manners +hbo +##aan +ours +bullshit +ernie +perennial +sensitivity +illuminated +rupert +##jin +##iss +##ear +rfc +nassau +##dock +staggered +socialism +##haven +appointments +nonsense +prestige +sharma +haul +##tical +solidarity +gps +##ook +##rata +igor +pedestrian +##uit +baxter +tenants +wires +medication +unlimited +guiding +impacts +diabetes +##rama +sasha +pas +clive +extraction +131 +continually +constraints +##bilities +sonata +hunted +sixteenth +chu +planting +quote +mayer +pretended +abs +spat +##hua +ceramic +##cci +curtains +pigs +pitching +##dad +latvian +sore +dayton +##sted +##qi +patrols +slice +playground +##nted +shone +stool +apparatus +inadequate +mates +treason +##ija +desires +##liga +##croft +somalia +laurent +mir +leonardo +oracle +grape +obliged +chevrolet +thirteenth +stunning +enthusiastic +##ede +accounted +concludes +currents +basil +##kovic +drought +##rica +mai +##aire +shove +posting +##shed +pilgrimage +humorous +packing +fry +pencil +wines +smells +144 +marilyn +aching +newest +clung +bon +neighbours +sanctioned +##pie +mug +##stock +drowning +##mma +hydraulic +##vil +hiring +reminder +lilly +investigators +##ncies +sour +##eous +compulsory +packet +##rion +##graphic +##elle +cannes +##inate +depressed +##rit +heroic +importantly +theresa +##tled +conway +saturn +marginal +rae +##xia +corresponds +royce +pact +jasper +explosives +packaging +aluminium +##ttered +denotes +rhythmic +spans +assignments +hereditary +outlined +originating +sundays +lad +reissued +greeting +beatrice +##dic +pillar +marcos +plots +handbook +alcoholic +judiciary +avant +slides +extract +masculine +blur +##eum +##force +homage +trembled +owens +hymn +trey +omega +signaling +socks +accumulated +reacted +attic +theo +lining +angie +distraction +primera +talbot +##key +1200 +ti +creativity +billed +##hey +deacon +eduardo +identifies +proposition +dizzy +gunner +hogan +##yam +##pping +##hol +ja +##chan +jensen +reconstructed +##berger +clearance +darius +##nier +abe +harlem +plea +dei +circled +emotionally +notation +fascist +neville +exceeded +upwards +viable +ducks +##fo +workforce +racer +limiting +shri +##lson +possesses +1600 +kerr +moths +devastating +laden +disturbing +locking +##cture +gal +fearing +accreditation +flavor +aide +1870s +mountainous +##baum +melt +##ures +motel +texture +servers +soda +##mb +herd +##nium +erect +puzzled +hum +peggy +examinations +gould +testified +geoff +ren +devised +sacks +##law +denial +posters +grunted +cesar +tutor +ec +gerry +offerings +byrne +falcons +combinations +ct +incoming +pardon +rocking +26th +avengers +flared +mankind +seller +uttar +loch +nadia +stroking +exposing +##hd +fertile +ancestral +instituted +##has +noises +prophecy +taxation +eminent +vivid +pol +##bol +dart +indirect +multimedia +notebook +upside +displaying +adrenaline +referenced +geometric +##iving +progression +##ddy +blunt +announce +##far +implementing +##lav +aggression +liaison +cooler +cares +headache +plantations +gorge +dots +impulse +thickness +ashamed +averaging +kathy +obligation +precursor +137 +fowler +symmetry +thee +225 +hears +##rai +undergoing +ads +butcher +bowler +##lip +cigarettes +subscription +goodness +##ically +browne +##hos +##tech +kyoto +donor +##erty +damaging +friction +drifting +expeditions +hardened +prostitution +152 +fauna +blankets +claw +tossing +snarled +butterflies +recruits +investigative +coated +healed +138 +communal +hai +xiii +academics +boone +psychologist +restless +lahore +stephens +mba +brendan +foreigners +printer +##pc +ached +explode +27th +deed +scratched +dared +##pole +cardiac +1780 +okinawa +proto +commando +compelled +oddly +electrons +##base +replica +thanksgiving +##rist +sheila +deliberate +stafford +tidal +representations +hercules +ou +##path +##iated +kidnapping +lenses +##tling +deficit +samoa +mouths +consuming +computational +maze +granting +smirk +razor +fixture +ideals +inviting +aiden +nominal +##vs +issuing +julio +pitt +ramsey +docks +##oss +exhaust +##owed +bavarian +draped +anterior +mating +ethiopian +explores +noticing +##nton +discarded +convenience +hoffman +endowment +beasts +cartridge +mormon +paternal +probe +sleeves +interfere +lump +deadline +##rail +jenks +bulldogs +scrap +alternating +justified +reproductive +nam +seize +descending +secretariat +kirby +coupe +grouped +smash +panther +sedan +tapping +##18 +lola +cheer +germanic +unfortunate +##eter +unrelated +##fan +subordinate +##sdale +suzanne +advertisement +##ility +horsepower +##lda +cautiously +discourse +luigi +##mans +##fields +noun +prevalent +mao +schneider +everett +surround +governorate +kira +##avia +westward +##take +misty +rails +sustainability +134 +unused +##rating +packs +toast +unwilling +regulate +thy +suffrage +nile +awe +assam +definitions +travelers +affordable +##rb +conferred +sells +undefeated +beneficial +torso +basal +repeating +remixes +##pass +bahrain +cables +fang +##itated +excavated +numbering +statutory +##rey +deluxe +##lian +forested +ramirez +derbyshire +zeus +slamming +transfers +astronomer +banana +lottery +berg +histories +bamboo +##uchi +resurrection +posterior +bowls +vaguely +##thi +thou +preserving +tensed +offence +##inas +meyrick +callum +ridden +watt +langdon +tying +lowland +snorted +daring +truman +##hale +##girl +aura +overly +filing +weighing +goa +infections +philanthropist +saunders +eponymous +##owski +latitude +perspectives +reviewing +mets +commandant +radial +##kha +flashlight +reliability +koch +vowels +amazed +ada +elaine +supper +##rth +##encies +predator +debated +soviets +cola +##boards +##nah +compartment +crooked +arbitrary +fourteenth +##ctive +havana +majors +steelers +clips +profitable +ambush +exited +packers +##tile +nude +cracks +fungi +##е +limb +trousers +josie +shelby +tens +frederic +##ος +definite +smoothly +constellation +insult +baton +discs +lingering +##nco +conclusions +lent +staging +becker +grandpa +shaky +##tron +einstein +obstacles +sk +adverse +elle +economically +##moto +mccartney +thor +dismissal +motions +readings +nostrils +treatise +##pace +squeezing +evidently +prolonged +1783 +venezuelan +je +marguerite +beirut +takeover +shareholders +##vent +denise +digit +airplay +norse +##bbling +imaginary +pills +hubert +blaze +vacated +eliminating +##ello +vine +mansfield +##tty +retrospective +barrow +borne +clutch +bail +forensic +weaving +##nett +##witz +desktop +citadel +promotions +worrying +dorset +ieee +subdivided +##iating +manned +expeditionary +pickup +synod +chuckle +185 +barney +##rz +##ffin +functionality +karachi +litigation +meanings +uc +lick +turbo +anders +##ffed +execute +curl +oppose +ankles +typhoon +##د +##ache +##asia +linguistics +compassion +pressures +grazing +perfection +##iting +immunity +monopoly +muddy +backgrounds +136 +namibia +francesca +monitors +attracting +stunt +tuition +##ии +vegetable +##mates +##quent +mgm +jen +complexes +forts +##ond +cellar +bites +seventeenth +royals +flemish +failures +mast +charities +##cular +peruvian +capitals +macmillan +ipswich +outward +frigate +postgraduate +folds +employing +##ouse +concurrently +fiery +##tai +contingent +nightmares +monumental +nicaragua +##kowski +lizard +mal +fielding +gig +reject +##pad +harding +##ipe +coastline +##cin +##nos +beethoven +humphrey +innovations +##tam +##nge +norris +doris +solicitor +huang +obey +141 +##lc +niagara +##tton +shelves +aug +bourbon +curry +nightclub +specifications +hilton +##ndo +centennial +dispersed +worm +neglected +briggs +sm +font +kuala +uneasy +plc +##nstein +##bound +##aking +##burgh +awaiting +pronunciation +##bbed +##quest +eh +optimal +zhu +raped +greens +presided +brenda +worries +##life +venetian +marxist +turnout +##lius +refined +braced +sins +grasped +sunderland +nickel +speculated +lowell +cyrillic +communism +fundraising +resembling +colonists +mutant +freddie +usc +##mos +gratitude +##run +mural +##lous +chemist +wi +reminds +28th +steals +tess +pietro +##ingen +promoter +ri +microphone +honoured +rai +sant +##qui +feather +##nson +burlington +kurdish +terrorists +deborah +sickness +##wed +##eet +hazard +irritated +desperation +veil +clarity +##rik +jewels +xv +##gged +##ows +##cup +berkshire +unfair +mysteries +orchid +winced +exhaustion +renovations +stranded +obe +infinity +##nies +adapt +redevelopment +thanked +registry +olga +domingo +noir +tudor +ole +##atus +commenting +behaviors +##ais +crisp +pauline +probable +stirling +wigan +##bian +paralympics +panting +surpassed +##rew +luca +barred +pony +famed +##sters +cassandra +waiter +carolyn +exported +##orted +andres +destructive +deeds +jonah +castles +vacancy +suv +##glass +1788 +orchard +yep +famine +belarusian +sprang +##forth +skinny +##mis +administrators +rotterdam +zambia +zhao +boiler +discoveries +##ride +##physics +lucius +disappointing +outreach +spoon +##frame +qualifications +unanimously +enjoys +regency +##iidae +stade +realism +veterinary +rodgers +dump +alain +chestnut +castile +censorship +rumble +gibbs +##itor +communion +reggae +inactivated +logs +loads +##houses +homosexual +##iano +ale +informs +##cas +phrases +plaster +linebacker +ambrose +kaiser +fascinated +850 +limerick +recruitment +forge +mastered +##nding +leinster +rooted +threaten +##strom +borneo +##hes +suggestions +scholarships +propeller +documentaries +patronage +coats +constructing +invest +neurons +comet +entirety +shouts +identities +annoying +unchanged +wary +##antly +##ogy +neat +oversight +##kos +phillies +replay +constance +##kka +incarnation +humble +skies +minus +##acy +smithsonian +##chel +guerrilla +jar +cadets +##plate +surplus +audit +##aru +cracking +joanna +louisa +pacing +##lights +intentionally +##iri +diner +nwa +imprint +australians +tong +unprecedented +bunker +naive +specialists +ark +nichols +railing +leaked +pedal +##uka +shrub +longing +roofs +v8 +captains +neural +tuned +##ntal +##jet +emission +medina +frantic +codex +definitive +sid +abolition +intensified +stocks +enrique +sustain +genoa +oxide +##written +clues +cha +##gers +tributaries +fragment +venom +##rity +##ente +##sca +muffled +vain +sire +laos +##ingly +##hana +hastily +snapping +surfaced +sentiment +motive +##oft +contests +approximate +mesa +luckily +dinosaur +exchanges +propelled +accord +bourne +relieve +tow +masks +offended +##ues +cynthia +##mmer +rains +bartender +zinc +reviewers +lois +##sai +legged +arrogant +rafe +rosie +comprise +handicap +blockade +inlet +lagoon +copied +drilling +shelley +petals +##inian +mandarin +obsolete +##inated +onward +arguably +productivity +cindy +praising +seldom +busch +discusses +raleigh +shortage +ranged +stanton +encouragement +firstly +conceded +overs +temporal +##uke +cbe +##bos +woo +certainty +pumps +##pton +stalked +##uli +lizzie +periodic +thieves +weaker +##night +gases +shoving +chooses +wc +##chemical +prompting +weights +##kill +robust +flanked +sticky +hu +tuberculosis +##eb +##eal +christchurch +resembled +wallet +reese +inappropriate +pictured +distract +fixing +fiddle +giggled +burger +heirs +hairy +mechanic +torque +apache +obsessed +chiefly +cheng +logging +##tag +extracted +meaningful +numb +##vsky +gloucestershire +reminding +##bay +unite +##lit +breeds +diminished +clown +glove +1860s +##ن +##ug +archibald +focal +freelance +sliced +depiction +##yk +organism +switches +sights +stray +crawling +##ril +lever +leningrad +interpretations +loops +anytime +reel +alicia +delighted +##ech +inhaled +xiv +suitcase +bernie +vega +licenses +northampton +exclusion +induction +monasteries +racecourse +homosexuality +##right +##sfield +##rky +dimitri +michele +alternatives +ions +commentators +genuinely +objected +pork +hospitality +fencing +stephan +warships +peripheral +wit +drunken +wrinkled +quentin +spends +departing +chung +numerical +spokesperson +##zone +johannesburg +caliber +killers +##udge +assumes +neatly +demographic +abigail +bloc +##vel +mounting +##lain +bentley +slightest +xu +recipients +##jk +merlin +##writer +seniors +prisons +blinking +hindwings +flickered +kappa +##hel +80s +strengthening +appealing +brewing +gypsy +mali +lashes +hulk +unpleasant +harassment +bio +treaties +predict +instrumentation +pulp +troupe +boiling +mantle +##ffe +ins +##vn +dividing +handles +verbs +##onal +coconut +senegal +340 +thorough +gum +momentarily +##sto +cocaine +panicked +destined +##turing +teatro +denying +weary +captained +mans +##hawks +##code +wakefield +bollywood +thankfully +##16 +cyril +##wu +amendments +##bahn +consultation +stud +reflections +kindness +1787 +internally +##ovo +tex +mosaic +distribute +paddy +seeming +143 +##hic +piers +##15 +##mura +##verse +popularly +winger +kang +sentinel +mccoy +##anza +covenant +##bag +verge +fireworks +suppress +thrilled +dominate +##jar +swansea +##60 +142 +reconciliation +##ndi +stiffened +cue +dorian +##uf +damascus +amor +ida +foremost +##aga +porsche +unseen +dir +##had +##azi +stony +lexi +melodies +##nko +angular +integer +podcast +ants +inherent +jaws +justify +persona +##olved +josephine +##nr +##ressed +customary +flashes +gala +cyrus +glaring +backyard +ariel +physiology +greenland +html +stir +avon +atletico +finch +methodology +ked +##lent +mas +catholicism +townsend +branding +quincy +fits +containers +1777 +ashore +aragon +##19 +forearm +poisoning +##sd +adopting +conquer +grinding +amnesty +keller +finances +evaluate +forged +lankan +instincts +##uto +guam +bosnian +photographed +workplace +desirable +protector +##dog +allocation +intently +encourages +willy +##sten +bodyguard +electro +brighter +##ν +bihar +##chev +lasts +opener +amphibious +sal +verde +arte +##cope +captivity +vocabulary +yields +##tted +agreeing +desmond +pioneered +##chus +strap +campaigned +railroads +##ович +emblem +##dre +stormed +501 +##ulous +marijuana +northumberland +##gn +##nath +bowen +landmarks +beaumont +##qua +danube +##bler +attorneys +th +ge +flyers +critique +villains +cass +mutation +acc +##0s +colombo +mckay +motif +sampling +concluding +syndicate +##rell +neon +stables +ds +warnings +clint +mourning +wilkinson +##tated +merrill +leopard +evenings +exhaled +emil +sonia +ezra +discrete +stove +farrell +fifteenth +prescribed +superhero +##rier +worms +helm +wren +##duction +##hc +expo +##rator +hq +unfamiliar +antony +prevents +acceleration +fiercely +mari +painfully +calculations +cheaper +ign +clifton +irvine +davenport +mozambique +##np +pierced +##evich +wonders +##wig +##cate +##iling +crusade +ware +##uel +enzymes +reasonably +mls +##coe +mater +ambition +bunny +eliot +kernel +##fin +asphalt +headmaster +torah +aden +lush +pins +waived +##care +##yas +joao +substrate +enforce +##grad +##ules +alvarez +selections +epidemic +tempted +##bit +bremen +translates +ensured +waterfront +29th +forrest +manny +malone +kramer +reigning +cookies +simpler +absorption +205 +engraved +##ffy +evaluated +1778 +haze +146 +comforting +crossover +##abe +thorn +##rift +##imo +##pop +suppression +fatigue +cutter +##tr +201 +wurttemberg +##orf +enforced +hovering +proprietary +gb +samurai +syllable +ascent +lacey +tick +lars +tractor +merchandise +rep +bouncing +defendants +##yre +huntington +##ground +##oko +standardized +##hor +##hima +assassinated +nu +predecessors +rainy +liar +assurance +lyrical +##uga +secondly +flattened +ios +parameter +undercover +##mity +bordeaux +punish +ridges +markers +exodus +inactive +hesitate +debbie +nyc +pledge +savoy +nagar +offset +organist +##tium +hesse +marin +converting +##iver +diagram +propulsion +pu +validity +reverted +supportive +##dc +ministries +clans +responds +proclamation +##inae +##ø +##rea +ein +pleading +patriot +sf +birch +islanders +strauss +hates +##dh +brandenburg +concession +rd +##ob +1900s +killings +textbook +antiquity +cinematography +wharf +embarrassing +setup +creed +farmland +inequality +centred +signatures +fallon +370 +##ingham +##uts +ceylon +gazing +directive +laurie +##tern +globally +##uated +##dent +allah +excavation +threads +##cross +148 +frantically +icc +utilize +determines +respiratory +thoughtful +receptions +##dicate +merging +chandra +seine +147 +builders +builds +diagnostic +dev +visibility +goddamn +analyses +dhaka +cho +proves +chancel +concurrent +curiously +canadians +pumped +restoring +1850s +turtles +jaguar +sinister +spinal +traction +declan +vows +1784 +glowed +capitalism +swirling +install +universidad +##lder +##oat +soloist +##genic +##oor +coincidence +beginnings +nissan +dip +resorts +caucasus +combustion +infectious +##eno +pigeon +serpent +##itating +conclude +masked +salad +jew +##gr +surreal +toni +##wc +harmonica +151 +##gins +##etic +##coat +fishermen +intending +bravery +##wave +klaus +titan +wembley +taiwanese +ransom +40th +incorrect +hussein +eyelids +jp +cooke +dramas +utilities +##etta +##print +eisenhower +principally +granada +lana +##rak +openings +concord +##bl +bethany +connie +morality +sega +##mons +##nard +earnings +##kara +##cine +wii +communes +##rel +coma +composing +softened +severed +grapes +##17 +nguyen +analyzed +warlord +hubbard +heavenly +behave +slovenian +##hit +##ony +hailed +filmmakers +trance +caldwell +skye +unrest +coward +likelihood +##aging +bern +sci +taliban +honolulu +propose +##wang +1700 +browser +imagining +cobra +contributes +dukes +instinctively +conan +violinist +##ores +accessories +gradual +##amp +quotes +sioux +##dating +undertake +intercepted +sparkling +compressed +139 +fungus +tombs +haley +imposing +rests +degradation +lincolnshire +retailers +wetlands +tulsa +distributor +dungeon +nun +greenhouse +convey +atlantis +aft +exits +oman +dresser +lyons +##sti +joking +eddy +judgement +omitted +digits +##cts +##game +juniors +##rae +cents +stricken +une +##ngo +wizards +weir +breton +nan +technician +fibers +liking +royalty +##cca +154 +persia +terribly +magician +##rable +##unt +vance +cafeteria +booker +camille +warmer +##static +consume +cavern +gaps +compass +contemporaries +foyer +soothing +graveyard +maj +plunged +blush +##wear +cascade +demonstrates +ordinance +##nov +boyle +##lana +rockefeller +shaken +banjo +izzy +##ense +breathless +vines +##32 +##eman +alterations +chromosome +dwellings +feudal +mole +153 +catalonia +relics +tenant +mandated +##fm +fridge +hats +honesty +patented +raul +heap +cruisers +accusing +enlightenment +infants +wherein +chatham +contractors +zen +affinity +hc +osborne +piston +156 +traps +maturity +##rana +lagos +##zal +peering +##nay +attendant +dealers +protocols +subset +prospects +biographical +##cre +artery +##zers +insignia +nuns +endured +##eration +recommend +schwartz +serbs +berger +cromwell +crossroads +##ctor +enduring +clasped +grounded +##bine +marseille +twitched +abel +choke +https +catalyst +moldova +italians +##tist +disastrous +wee +##oured +##nti +wwf +nope +##piration +##asa +expresses +thumbs +167 +##nza +coca +1781 +cheating +##ption +skipped +sensory +heidelberg +spies +satan +dangers +semifinal +202 +bohemia +whitish +confusing +shipbuilding +relies +surgeons +landings +ravi +baku +moor +suffix +alejandro +##yana +litre +upheld +##unk +rajasthan +##rek +coaster +insists +posture +scenarios +etienne +favoured +appoint +transgender +elephants +poked +greenwood +defences +fulfilled +militant +somali +1758 +chalk +potent +##ucci +migrants +wink +assistants +nos +restriction +activism +niger +##ario +colon +shaun +##sat +daphne +##erated +swam +congregations +reprise +considerations +magnet +playable +xvi +##р +overthrow +tobias +knob +chavez +coding +##mers +propped +katrina +orient +newcomer +##suke +temperate +##pool +farmhouse +interrogation +##vd +committing +##vert +forthcoming +strawberry +joaquin +macau +ponds +shocking +siberia +##cellular +chant +contributors +##nant +##ologists +sped +absorb +hail +1782 +spared +##hore +barbados +karate +opus +originates +saul +##xie +evergreen +leaped +##rock +correlation +exaggerated +weekday +unification +bump +tracing +brig +afb +pathways +utilizing +##ners +mod +mb +disturbance +kneeling +##stad +##guchi +100th +pune +##thy +decreasing +168 +manipulation +miriam +academia +ecosystem +occupational +rbi +##lem +rift +##14 +rotary +stacked +incorporation +awakening +generators +guerrero +racist +##omy +cyber +derivatives +culminated +allie +annals +panzer +sainte +wikipedia +pops +zu +austro +##vate +algerian +politely +nicholson +mornings +educate +tastes +thrill +dartmouth +##gating +db +##jee +regan +differing +concentrating +choreography +divinity +##media +pledged +alexandre +routing +gregor +madeline +##idal +apocalypse +##hora +gunfire +culminating +elves +fined +liang +lam +programmed +tar +guessing +transparency +gabrielle +##gna +cancellation +flexibility +##lining +accession +shea +stronghold +nets +specializes +##rgan +abused +hasan +sgt +ling +exceeding +##₄ +admiration +supermarket +##ark +photographers +specialised +tilt +resonance +hmm +perfume +380 +sami +threatens +garland +botany +guarding +boiled +greet +puppy +russo +supplier +wilmington +vibrant +vijay +##bius +paralympic +grumbled +paige +faa +licking +margins +hurricanes +##gong +fest +grenade +ripping +##uz +counseling +weigh +##sian +needles +wiltshire +edison +costly +##not +fulton +tramway +redesigned +staffordshire +cache +gasping +watkins +sleepy +candidacy +##group +monkeys +timeline +throbbing +##bid +##sos +berth +uzbekistan +vanderbilt +bothering +overturned +ballots +gem +##iger +sunglasses +subscribers +hooker +compelling +ang +exceptionally +saloon +stab +##rdi +carla +terrifying +rom +##vision +coil +##oids +satisfying +vendors +31st +mackay +deities +overlooked +ambient +bahamas +felipe +olympia +whirled +botanist +advertised +tugging +##dden +disciples +morales +unionist +rites +foley +morse +motives +creepy +##₀ +soo +##sz +bargain +highness +frightening +turnpike +tory +reorganization +##cer +depict +biographer +##walk +unopposed +manifesto +##gles +institut +emile +accidental +kapoor +##dam +kilkenny +cortex +lively +##13 +romanesque +jain +shan +cannons +##ood +##ske +petrol +echoing +amalgamated +disappears +cautious +proposes +sanctions +trenton +##ر +flotilla +aus +contempt +tor +canary +cote +theirs +##hun +conceptual +deleted +fascinating +paso +blazing +elf +honourable +hutchinson +##eiro +##outh +##zin +surveyor +tee +amidst +wooded +reissue +intro +##ono +cobb +shelters +newsletter +hanson +brace +encoding +confiscated +dem +caravan +marino +scroll +melodic +cows +imam +##adi +##aneous +northward +searches +biodiversity +cora +310 +roaring +##bers +connell +theologian +halo +compose +pathetic +unmarried +dynamo +##oot +az +calculation +toulouse +deserves +humour +nr +forgiveness +tam +undergone +martyr +pamela +myths +whore +counselor +hicks +290 +heavens +battleship +electromagnetic +##bbs +stellar +establishments +presley +hopped +##chin +temptation +90s +wills +nas +##yuan +nhs +##nya +seminars +##yev +adaptations +gong +asher +lex +indicator +sikh +tobago +cites +goin +##yte +satirical +##gies +characterised +correspond +bubbles +lure +participates +##vid +eruption +skate +therapeutic +1785 +canals +wholesale +defaulted +sac +460 +petit +##zzled +virgil +leak +ravens +256 +portraying +##yx +ghetto +creators +dams +portray +vicente +##rington +fae +namesake +bounty +##arium +joachim +##ota +##iser +aforementioned +axle +snout +depended +dismantled +reuben +480 +##ibly +gallagher +##lau +##pd +earnest +##ieu +##iary +inflicted +objections +##llar +asa +gritted +##athy +jericho +##sea +##was +flick +underside +ceramics +undead +substituted +195 +eastward +undoubtedly +wheeled +chimney +##iche +guinness +cb +##ager +siding +##bell +traitor +baptiste +disguised +inauguration +149 +tipperary +choreographer +perched +warmed +stationary +eco +##ike +##ntes +bacterial +##aurus +flores +phosphate +##core +attacker +invaders +alvin +intersects +a1 +indirectly +immigrated +businessmen +cornelius +valves +narrated +pill +sober +ul +nationale +monastic +applicants +scenery +##jack +161 +motifs +constitutes +cpu +##osh +jurisdictions +sd +tuning +irritation +woven +##uddin +fertility +gao +##erie +antagonist +impatient +glacial +hides +boarded +denominations +interception +##jas +cookie +nicola +##tee +algebraic +marquess +bahn +parole +buyers +bait +turbines +paperwork +bestowed +natasha +renee +oceans +purchases +157 +vaccine +215 +##tock +fixtures +playhouse +integrate +jai +oswald +intellectuals +##cky +booked +nests +mortimer +##isi +obsession +sept +##gler +##sum +440 +scrutiny +simultaneous +squinted +##shin +collects +oven +shankar +penned +remarkably +##я +slips +luggage +spectral +1786 +collaborations +louie +consolidation +##ailed +##ivating +420 +hoover +blackpool +harness +ignition +vest +tails +belmont +mongol +skinner +##nae +visually +mage +derry +##tism +##unce +stevie +transitional +##rdy +redskins +drying +prep +prospective +##21 +annoyance +oversee +##loaded +fills +##books +##iki +announces +fda +scowled +respects +prasad +mystic +tucson +##vale +revue +springer +bankrupt +1772 +aristotle +salvatore +habsburg +##geny +dal +natal +nut +pod +chewing +darts +moroccan +walkover +rosario +lenin +punjabi +##ße +grossed +scattering +wired +invasive +hui +polynomial +corridors +wakes +gina +portrays +##cratic +arid +retreating +erich +irwin +sniper +##dha +linen +lindsey +maneuver +butch +shutting +socio +bounce +commemorative +postseason +jeremiah +pines +275 +mystical +beads +bp +abbas +furnace +bidding +consulted +assaulted +empirical +rubble +enclosure +sob +weakly +cancel +polly +yielded +##emann +curly +prediction +battered +70s +vhs +jacqueline +render +sails +barked +detailing +grayson +riga +sloane +raging +##yah +herbs +bravo +##athlon +alloy +giggle +imminent +suffers +assumptions +waltz +##itate +accomplishments +##ited +bathing +remixed +deception +prefix +##emia +deepest +##tier +##eis +balkan +frogs +##rong +slab +##pate +philosophers +peterborough +grains +imports +dickinson +rwanda +##atics +1774 +dirk +lan +tablets +##rove +clone +##rice +caretaker +hostilities +mclean +##gre +regimental +treasures +norms +impose +tsar +tango +diplomacy +variously +complain +192 +recognise +arrests +1779 +celestial +pulitzer +##dus +bing +libretto +##moor +adele +splash +##rite +expectation +lds +confronts +##izer +spontaneous +harmful +wedge +entrepreneurs +buyer +##ope +bilingual +translate +rugged +conner +circulated +uae +eaton +##gra +##zzle +lingered +lockheed +vishnu +reelection +alonso +##oom +joints +yankee +headline +cooperate +heinz +laureate +invading +##sford +echoes +scandinavian +##dham +hugging +vitamin +salute +micah +hind +trader +##sper +radioactive +##ndra +militants +poisoned +ratified +remark +campeonato +deprived +wander +prop +##dong +outlook +##tani +##rix +##eye +chiang +darcy +##oping +mandolin +spice +statesman +babylon +182 +walled +forgetting +afro +##cap +158 +giorgio +buffer +##polis +planetary +##gis +overlap +terminals +kinda +centenary +##bir +arising +manipulate +elm +ke +1770 +ak +##tad +chrysler +mapped +moose +pomeranian +quad +macarthur +assemblies +shoreline +recalls +stratford +##rted +noticeable +##evic +imp +##rita +##sque +accustomed +supplying +tents +disgusted +vogue +sipped +filters +khz +reno +selecting +luftwaffe +mcmahon +tyne +masterpiece +carriages +collided +dunes +exercised +flare +remembers +muzzle +##mobile +heck +##rson +burgess +lunged +middleton +boycott +bilateral +##sity +hazardous +lumpur +multiplayer +spotlight +jackets +goldman +liege +porcelain +rag +waterford +benz +attracts +hopeful +battling +ottomans +kensington +baked +hymns +cheyenne +lattice +levine +borrow +polymer +clashes +michaels +monitored +commitments +denounced +##25 +##von +cavity +##oney +hobby +akin +##holders +futures +intricate +cornish +patty +##oned +illegally +dolphin +##lag +barlow +yellowish +maddie +apologized +luton +plagued +##puram +nana +##rds +sway +fanny +łodz +##rino +psi +suspicions +hanged +##eding +initiate +charlton +##por +nak +competent +235 +analytical +annex +wardrobe +reservations +##rma +sect +162 +fairfax +hedge +piled +buckingham +uneven +bauer +simplicity +snyder +interpret +accountability +donors +moderately +byrd +continents +##cite +##max +disciple +hr +jamaican +ping +nominees +##uss +mongolian +diver +attackers +eagerly +ideological +pillows +miracles +apartheid +revolver +sulfur +clinics +moran +163 +##enko +ile +katy +rhetoric +##icated +chronology +recycling +##hrer +elongated +mughal +pascal +profiles +vibration +databases +domination +##fare +##rant +matthias +digest +rehearsal +polling +weiss +initiation +reeves +clinging +flourished +impress +ngo +##hoff +##ume +buckley +symposium +rhythms +weed +emphasize +transforming +##taking +##gence +##yman +accountant +analyze +flicker +foil +priesthood +voluntarily +decreases +##80 +##hya +slater +sv +charting +mcgill +##lde +moreno +##iu +besieged +zur +robes +##phic +admitting +api +deported +turmoil +peyton +earthquakes +##ares +nationalists +beau +clair +brethren +interrupt +welch +curated +galerie +requesting +164 +##ested +impending +steward +viper +##vina +complaining +beautifully +brandy +foam +nl +1660 +##cake +alessandro +punches +laced +explanations +##lim +attribute +clit +reggie +discomfort +##cards +smoothed +whales +##cene +adler +countered +duffy +disciplinary +widening +recipe +reliance +conducts +goats +gradient +preaching +##shaw +matilda +quasi +striped +meridian +cannabis +cordoba +certificates +##agh +##tering +graffiti +hangs +pilgrims +repeats +##ych +revive +urine +etat +##hawk +fueled +belts +fuzzy +susceptible +##hang +mauritius +salle +sincere +beers +hooks +##cki +arbitration +entrusted +advise +sniffed +seminar +junk +donnell +processors +principality +strapped +celia +mendoza +everton +fortunes +prejudice +starving +reassigned +steamer +##lund +tuck +evenly +foreman +##ffen +dans +375 +envisioned +slit +##xy +baseman +liberia +rosemary +##weed +electrified +periodically +potassium +stride +contexts +sperm +slade +mariners +influx +bianca +subcommittee +##rane +spilling +icao +estuary +##nock +delivers +iphone +##ulata +isa +mira +bohemian +dessert +##sbury +welcoming +proudly +slowing +##chs +musee +ascension +russ +##vian +waits +##psy +africans +exploit +##morphic +gov +eccentric +crab +peck +##ull +entrances +formidable +marketplace +groom +bolted +metabolism +patton +robbins +courier +payload +endure +##ifier +andes +refrigerator +##pr +ornate +##uca +ruthless +illegitimate +masonry +strasbourg +bikes +adobe +##³ +apples +quintet +willingly +niche +bakery +corpses +energetic +##cliffe +##sser +##ards +177 +centimeters +centro +fuscous +cretaceous +rancho +##yde +andrei +telecom +tottenham +oasis +ordination +vulnerability +presiding +corey +cp +penguins +sims +##pis +malawi +piss +##48 +correction +##cked +##ffle +##ryn +countdown +detectives +psychiatrist +psychedelic +dinosaurs +blouse +##get +choi +vowed +##oz +randomly +##pol +49ers +scrub +blanche +bruins +dusseldorf +##using +unwanted +##ums +212 +dominique +elevations +headlights +om +laguna +##oga +1750 +famously +ignorance +shrewsbury +##aine +ajax +breuning +che +confederacy +greco +overhaul +##screen +paz +skirts +disagreement +cruelty +jagged +phoebe +shifter +hovered +viruses +##wes +mandy +##lined +##gc +landlord +squirrel +dashed +##ι +ornamental +gag +wally +grange +literal +spurs +undisclosed +proceeding +yin +##text +billie +orphan +spanned +humidity +indy +weighted +presentations +explosions +lucian +##tary +vaughn +hindus +##anga +##hell +psycho +171 +daytona +protects +efficiently +rematch +sly +tandem +##oya +rebranded +impaired +hee +metropolis +peach +godfrey +diaspora +ethnicity +prosperous +gleaming +dar +grossing +playback +##rden +stripe +pistols +##tain +births +labelled +##cating +172 +rudy +alba +##onne +aquarium +hostility +##gb +##tase +shudder +sumatra +hardest +lakers +consonant +creeping +demos +homicide +capsule +zeke +liberties +expulsion +pueblo +##comb +trait +transporting +##ddin +##neck +##yna +depart +gregg +mold +ledge +hangar +oldham +playboy +termination +analysts +gmbh +romero +##itic +insist +cradle +filthy +brightness +slash +shootout +deposed +bordering +##truct +isis +microwave +tumbled +sheltered +cathy +werewolves +messy +andersen +convex +clapped +clinched +satire +wasting +edo +vc +rufus +##jak +mont +##etti +poznan +##keeping +restructuring +transverse +##rland +azerbaijani +slovene +gestures +roommate +choking +shear +##quist +vanguard +oblivious +##hiro +disagreed +baptism +##lich +coliseum +##aceae +salvage +societe +cory +locke +relocation +relying +versailles +ahl +swelling +##elo +cheerful +##word +##edes +gin +sarajevo +obstacle +diverted +##nac +messed +thoroughbred +fluttered +utrecht +chewed +acquaintance +assassins +dispatch +mirza +##wart +nike +salzburg +swell +yen +##gee +idle +ligue +samson +##nds +##igh +playful +spawned +##cise +tease +##case +burgundy +##bot +stirring +skeptical +interceptions +marathi +##dies +bedrooms +aroused +pinch +##lik +preferences +tattoos +buster +digitally +projecting +rust +##ital +kitten +priorities +addison +pseudo +##guard +dusk +icons +sermon +##psis +##iba +bt +##lift +##xt +ju +truce +rink +##dah +##wy +defects +psychiatry +offences +calculate +glucose +##iful +##rized +##unda +francaise +##hari +richest +warwickshire +carly +1763 +purity +redemption +lending +##cious +muse +bruises +cerebral +aero +carving +##name +preface +terminology +invade +monty +##int +anarchist +blurred +##iled +rossi +treats +guts +shu +foothills +ballads +undertaking +premise +cecilia +affiliates +blasted +conditional +wilder +minors +drone +rudolph +buffy +swallowing +horton +attested +##hop +rutherford +howell +primetime +livery +penal +##bis +minimize +hydro +wrecked +wrought +palazzo +##gling +cans +vernacular +friedman +nobleman +shale +walnut +danielle +##ection +##tley +sears +##kumar +chords +lend +flipping +streamed +por +dracula +gallons +sacrifices +gamble +orphanage +##iman +mckenzie +##gible +boxers +daly +##balls +##ان +208 +##ific +##rative +##iq +exploited +slated +##uity +circling +hillary +pinched +goldberg +provost +campaigning +lim +piles +ironically +jong +mohan +successors +usaf +##tem +##ught +autobiographical +haute +preserves +##ending +acquitted +comparisons +203 +hydroelectric +gangs +cypriot +torpedoes +rushes +chrome +derive +bumps +instability +fiat +pets +##mbe +silas +dye +reckless +settler +##itation +info +heats +##writing +176 +canonical +maltese +fins +mushroom +stacy +aspen +avid +##kur +##loading +vickers +gaston +hillside +statutes +wilde +gail +kung +sabine +comfortably +motorcycles +##rgo +169 +pneumonia +fetch +##sonic +axel +faintly +parallels +##oop +mclaren +spouse +compton +interdisciplinary +miner +##eni +181 +clamped +##chal +##llah +separates +versa +##mler +scarborough +labrador +##lity +##osing +rutgers +hurdles +como +166 +burt +divers +##100 +wichita +cade +coincided +##erson +bruised +mla +##pper +vineyard +##ili +##brush +notch +mentioning +jase +hearted +kits +doe +##acle +pomerania +##ady +ronan +seizure +pavel +problematic +##zaki +domenico +##ulin +catering +penelope +dependence +parental +emilio +ministerial +atkinson +##bolic +clarkson +chargers +colby +grill +peeked +arises +summon +##aged +fools +##grapher +faculties +qaeda +##vial +garner +refurbished +##hwa +geelong +disasters +nudged +bs +shareholder +lori +algae +reinstated +rot +##ades +##nous +invites +stainless +183 +inclusive +##itude +diocesan +til +##icz +denomination +##xa +benton +floral +registers +##ider +##erman +##kell +absurd +brunei +guangzhou +hitter +retaliation +##uled +##eve +blanc +nh +consistency +contamination +##eres +##rner +dire +palermo +broadcasters +diaries +inspire +vols +brewer +tightening +ky +mixtape +hormone +##tok +stokes +##color +##dly +##ssi +pg +##ometer +##lington +sanitation +##tility +intercontinental +apps +##adt +¹⁄₂ +cylinders +economies +favourable +unison +croix +gertrude +odyssey +vanity +dangling +##logists +upgrades +dice +middleweight +practitioner +##ight +206 +henrik +parlor +orion +angered +lac +python +blurted +##rri +sensual +intends +swings +angled +##phs +husky +attain +peerage +precinct +textiles +cheltenham +shuffled +dai +confess +tasting +bhutan +##riation +tyrone +segregation +abrupt +ruiz +##rish +smirked +blackwell +confidential +browning +amounted +##put +vase +scarce +fabulous +raided +staple +guyana +unemployed +glider +shay +##tow +carmine +troll +intervene +squash +superstar +##uce +cylindrical +len +roadway +researched +handy +##rium +##jana +meta +lao +declares +##rring +##tadt +##elin +##kova +willem +shrubs +napoleonic +realms +skater +qi +volkswagen +##ł +tad +hara +archaeologist +awkwardly +eerie +##kind +wiley +##heimer +##24 +titus +organizers +cfl +crusaders +lama +usb +vent +enraged +thankful +occupants +maximilian +##gaard +possessing +textbooks +##oran +collaborator +quaker +##ulo +avalanche +mono +silky +straits +isaiah +mustang +surged +resolutions +potomac +descend +cl +kilograms +plato +strains +saturdays +##olin +bernstein +##ype +holstein +ponytail +##watch +belize +conversely +heroine +perpetual +##ylus +charcoal +piedmont +glee +negotiating +backdrop +prologue +##jah +##mmy +pasadena +climbs +ramos +sunni +##holm +##tner +##tri +anand +deficiency +hertfordshire +stout +##avi +aperture +orioles +##irs +doncaster +intrigued +bombed +coating +otis +##mat +cocktail +##jit +##eto +amir +arousal +sar +##proof +##act +##ories +dixie +pots +##bow +whereabouts +159 +##fted +drains +bullying +cottages +scripture +coherent +fore +poe +appetite +##uration +sampled +##ators +##dp +derrick +rotor +jays +peacock +installment +##rro +advisors +##coming +rodeo +scotch +##mot +##db +##fen +##vant +ensued +rodrigo +dictatorship +martyrs +twenties +##н +towed +incidence +marta +rainforest +sai +scaled +##cles +oceanic +qualifiers +symphonic +mcbride +dislike +generalized +aubrey +colonization +##iation +##lion +##ssing +disliked +lublin +salesman +##ulates +spherical +whatsoever +sweating +avalon +contention +punt +severity +alderman +atari +##dina +##grant +##rop +scarf +seville +vertices +annexation +fairfield +fascination +inspiring +launches +palatinate +regretted +##rca +feral +##iom +elk +nap +olsen +reddy +yong +##leader +##iae +garment +transports +feng +gracie +outrage +viceroy +insides +##esis +breakup +grady +organizer +softer +grimaced +222 +murals +galicia +arranging +vectors +##rsten +bas +##sb +##cens +sloan +##eka +bitten +ara +fender +nausea +bumped +kris +banquet +comrades +detector +persisted +##llan +adjustment +endowed +cinemas +##shot +sellers +##uman +peek +epa +kindly +neglect +simpsons +talon +mausoleum +runaway +hangul +lookout +##cic +rewards +coughed +acquainted +chloride +##ald +quicker +accordion +neolithic +##qa +artemis +coefficient +lenny +pandora +tx +##xed +ecstasy +litter +segunda +chairperson +gemma +hiss +rumor +vow +nasal +antioch +compensate +patiently +transformers +##eded +judo +morrow +penis +posthumous +philips +bandits +husbands +denote +flaming +##any +##phones +langley +yorker +1760 +walters +##uo +##kle +gubernatorial +fatty +samsung +leroy +outlaw +##nine +unpublished +poole +jakob +##ᵢ +##ₙ +crete +distorted +superiority +##dhi +intercept +crust +mig +claus +crashes +positioning +188 +stallion +301 +frontal +armistice +##estinal +elton +aj +encompassing +camel +commemorated +malaria +woodward +calf +cigar +penetrate +##oso +willard +##rno +##uche +illustrate +amusing +convergence +noteworthy +##lma +##rva +journeys +realise +manfred +##sable +410 +##vocation +hearings +fiance +##posed +educators +provoked +adjusting +##cturing +modular +stockton +paterson +vlad +rejects +electors +selena +maureen +##tres +uber +##rce +swirled +##num +proportions +nanny +pawn +naturalist +parma +apostles +awoke +ethel +wen +##bey +monsoon +overview +##inating +mccain +rendition +risky +adorned +##ih +equestrian +germain +nj +conspicuous +confirming +##yoshi +shivering +##imeter +milestone +rumours +flinched +bounds +smacked +token +##bei +lectured +automobiles +##shore +impacted +##iable +nouns +nero +##leaf +ismail +prostitute +trams +##lace +bridget +sud +stimulus +impressions +reins +revolves +##oud +##gned +giro +honeymoon +##swell +criterion +##sms +##uil +libyan +prefers +##osition +211 +preview +sucks +accusation +bursts +metaphor +diffusion +tolerate +faye +betting +cinematographer +liturgical +specials +bitterly +humboldt +##ckle +flux +rattled +##itzer +archaeologists +odor +authorised +marshes +discretion +##ов +alarmed +archaic +inverse +##leton +explorers +##pine +drummond +tsunami +woodlands +##minate +##tland +booklet +insanity +owning +insert +crafted +calculus +##tore +receivers +##bt +stung +##eca +##nched +prevailing +travellers +eyeing +lila +graphs +##borne +178 +julien +##won +morale +adaptive +therapist +erica +cw +libertarian +bowman +pitches +vita +##ional +crook +##ads +##entation +caledonia +mutiny +##sible +1840s +automation +##ß +flock +##pia +ironic +pathology +##imus +remarried +##22 +joker +withstand +energies +##att +shropshire +hostages +madeleine +tentatively +conflicting +mateo +recipes +euros +ol +mercenaries +nico +##ndon +albuquerque +augmented +mythical +bel +freud +##child +cough +##lica +365 +freddy +lillian +genetically +nuremberg +calder +209 +bonn +outdoors +paste +suns +urgency +vin +restraint +tyson +##cera +##selle +barrage +bethlehem +kahn +##par +mounts +nippon +barony +happier +ryu +makeshift +sheldon +blushed +castillo +barking +listener +taped +bethel +fluent +headlines +pornography +rum +disclosure +sighing +mace +doubling +gunther +manly +##plex +rt +interventions +physiological +forwards +emerges +##tooth +##gny +compliment +rib +recession +visibly +barge +faults +connector +exquisite +prefect +##rlin +patio +##cured +elevators +brandt +italics +pena +173 +wasp +satin +ea +botswana +graceful +respectable +##jima +##rter +##oic +franciscan +generates +##dl +alfredo +disgusting +##olate +##iously +sherwood +warns +cod +promo +cheryl +sino +##ة +##escu +twitch +##zhi +brownish +thom +ortiz +##dron +densely +##beat +carmel +reinforce +##bana +187 +anastasia +downhill +vertex +contaminated +remembrance +harmonic +homework +##sol +fiancee +gears +olds +angelica +loft +ramsay +quiz +colliery +sevens +##cape +autism +##hil +walkway +##boats +ruben +abnormal +ounce +khmer +##bbe +zachary +bedside +morphology +punching +##olar +sparrow +convinces +##35 +hewitt +queer +remastered +rods +mabel +solemn +notified +lyricist +symmetric +##xide +174 +encore +passports +wildcats +##uni +baja +##pac +mildly +##ease +bleed +commodity +mounds +glossy +orchestras +##omo +damian +prelude +ambitions +##vet +awhile +remotely +##aud +asserts +imply +##iques +distinctly +modelling +remedy +##dded +windshield +dani +xiao +##endra +audible +powerplant +1300 +invalid +elemental +acquisitions +##hala +immaculate +libby +plata +smuggling +ventilation +denoted +minh +##morphism +430 +differed +dion +kelley +lore +mocking +sabbath +spikes +hygiene +drown +runoff +stylized +tally +liberated +aux +interpreter +righteous +aba +siren +reaper +pearce +millie +##cier +##yra +gaius +##iso +captures +##ttering +dorm +claudio +##sic +benches +knighted +blackness +##ored +discount +fumble +oxidation +routed +##ς +novak +perpendicular +spoiled +fracture +splits +##urt +pads +topology +##cats +axes +fortunate +offenders +protestants +esteem +221 +broadband +convened +frankly +hound +prototypes +isil +facilitated +keel +##sher +sahara +awaited +bubba +orb +prosecutors +186 +hem +520 +##xing +relaxing +remnant +romney +sorted +slalom +stefano +ulrich +##active +exemption +folder +pauses +foliage +hitchcock +epithet +204 +criticisms +##aca +ballistic +brody +hinduism +chaotic +youths +equals +##pala +pts +thicker +analogous +capitalist +improvised +overseeing +sinatra +ascended +beverage +##tl +straightforward +##kon +curran +##west +bois +325 +induce +surveying +emperors +sax +unpopular +##kk +cartoonist +fused +##mble +unto +##yuki +localities +##cko +##ln +darlington +slain +academie +lobbying +sediment +puzzles +##grass +defiance +dickens +manifest +tongues +alumnus +arbor +coincide +184 +appalachian +mustafa +examiner +cabaret +traumatic +yves +bracelet +draining +heroin +magnum +baths +odessa +consonants +mitsubishi +##gua +kellan +vaudeville +##fr +joked +null +straps +probation +##ław +ceded +interfaces +##pas +##zawa +blinding +viet +224 +rothschild +museo +640 +huddersfield +##vr +tactic +##storm +brackets +dazed +incorrectly +##vu +reg +glazed +fearful +manifold +benefited +irony +##sun +stumbling +##rte +willingness +balkans +mei +wraps +##aba +injected +##lea +gu +syed +harmless +##hammer +bray +takeoff +poppy +timor +cardboard +astronaut +purdue +weeping +southbound +cursing +stalls +diagonal +##neer +lamar +bryce +comte +weekdays +harrington +##uba +negatively +##see +lays +grouping +##cken +##henko +affirmed +halle +modernist +##lai +hodges +smelling +aristocratic +baptized +dismiss +justification +oilers +##now +coupling +qin +snack +healer +##qing +gardener +layla +battled +formulated +stephenson +gravitational +##gill +##jun +1768 +granny +coordinating +suites +##cd +##ioned +monarchs +##cote +##hips +sep +blended +apr +barrister +deposition +fia +mina +policemen +paranoid +##pressed +churchyard +covert +crumpled +creep +abandoning +tr +transmit +conceal +barr +understands +readiness +spire +##cology +##enia +##erry +610 +startling +unlock +vida +bowled +slots +##nat +##islav +spaced +trusting +admire +rig +##ink +slack +##70 +mv +207 +casualty +##wei +classmates +##odes +##rar +##rked +amherst +furnished +evolve +foundry +menace +mead +##lein +flu +wesleyan +##kled +monterey +webber +##vos +wil +##mith +##на +bartholomew +justices +restrained +##cke +amenities +191 +mediated +sewage +trenches +ml +mainz +##thus +1800s +##cula +##inski +caine +bonding +213 +converts +spheres +superseded +marianne +crypt +sweaty +ensign +historia +##br +spruce +##post +##ask +forks +thoughtfully +yukon +pamphlet +ames +##uter +karma +##yya +bryn +negotiation +sighs +incapable +##mbre +##ntial +actresses +taft +##mill +luce +prevailed +##amine +1773 +motionless +envoy +testify +investing +sculpted +instructors +provence +kali +cullen +horseback +##while +goodwin +##jos +gaa +norte +##ldon +modify +wavelength +abd +214 +skinned +sprinter +forecast +scheduling +marries +squared +tentative +##chman +boer +##isch +bolts +swap +fisherman +assyrian +impatiently +guthrie +martins +murdoch +194 +tanya +nicely +dolly +lacy +med +##45 +syn +decks +fashionable +millionaire +##ust +surfing +##ml +##ision +heaved +tammy +consulate +attendees +routinely +197 +fuse +saxophonist +backseat +malaya +##lord +scowl +tau +##ishly +193 +sighted +steaming +##rks +303 +911 +##holes +##hong +ching +##wife +bless +conserved +jurassic +stacey +unix +zion +chunk +rigorous +blaine +198 +peabody +slayer +dismay +brewers +nz +##jer +det +##glia +glover +postwar +int +penetration +sylvester +imitation +vertically +airlift +heiress +knoxville +viva +##uin +390 +macon +##rim +##fighter +##gonal +janice +##orescence +##wari +marius +belongings +leicestershire +196 +blanco +inverted +preseason +sanity +sobbing +##due +##elt +##dled +collingwood +regeneration +flickering +shortest +##mount +##osi +feminism +##lat +sherlock +cabinets +fumbled +northbound +precedent +snaps +##mme +researching +##akes +guillaume +insights +manipulated +vapor +neighbour +sap +gangster +frey +f1 +stalking +scarcely +callie +barnett +tendencies +audi +doomed +assessing +slung +panchayat +ambiguous +bartlett +##etto +distributing +violating +wolverhampton +##hetic +swami +histoire +##urus +liable +pounder +groin +hussain +larsen +popping +surprises +##atter +vie +curt +##station +mute +relocate +musicals +authorization +richter +##sef +immortality +tna +bombings +##press +deteriorated +yiddish +##acious +robbed +colchester +cs +pmid +ao +verified +balancing +apostle +swayed +recognizable +oxfordshire +retention +nottinghamshire +contender +judd +invitational +shrimp +uhf +##icient +cleaner +longitudinal +tanker +##mur +acronym +broker +koppen +sundance +suppliers +##gil +4000 +clipped +fuels +petite +##anne +landslide +helene +diversion +populous +landowners +auspices +melville +quantitative +##xes +ferries +nicky +##llus +doo +haunting +roche +carver +downed +unavailable +##pathy +approximation +hiroshima +##hue +garfield +valle +comparatively +keyboardist +traveler +##eit +congestion +calculating +subsidiaries +##bate +serb +modernization +fairies +deepened +ville +averages +##lore +inflammatory +tonga +##itch +co₂ +squads +##hea +gigantic +serum +enjoyment +retailer +verona +35th +cis +##phobic +magna +technicians +##vati +arithmetic +##sport +levin +##dation +amtrak +chow +sienna +##eyer +backstage +entrepreneurship +##otic +learnt +tao +##udy +worcestershire +formulation +baggage +hesitant +bali +sabotage +##kari +barren +enhancing +murmur +pl +freshly +putnam +syntax +aces +medicines +resentment +bandwidth +##sier +grins +chili +guido +##sei +framing +implying +gareth +lissa +genevieve +pertaining +admissions +geo +thorpe +proliferation +sato +bela +analyzing +parting +##gor +awakened +##isman +huddled +secrecy +##kling +hush +gentry +540 +dungeons +##ego +coasts +##utz +sacrificed +##chule +landowner +mutually +prevalence +programmer +adolescent +disrupted +seaside +gee +trusts +vamp +georgie +##nesian +##iol +schedules +sindh +##market +etched +hm +sparse +bey +beaux +scratching +gliding +unidentified +216 +collaborating +gems +jesuits +oro +accumulation +shaping +mbe +anal +##xin +231 +enthusiasts +newscast +##egan +janata +dewey +parkinson +179 +ankara +biennial +towering +dd +inconsistent +950 +##chet +thriving +terminate +cabins +furiously +eats +advocating +donkey +marley +muster +phyllis +leiden +##user +grassland +glittering +iucn +loneliness +217 +memorandum +armenians +##ddle +popularized +rhodesia +60s +lame +##illon +sans +bikini +header +orbits +##xx +##finger +##ulator +sharif +spines +biotechnology +strolled +naughty +yates +##wire +fremantle +milo +##mour +abducted +removes +##atin +humming +wonderland +##chrome +##ester +hume +pivotal +##rates +armand +grams +believers +elector +rte +apron +bis +scraped +##yria +endorsement +initials +##llation +eps +dotted +hints +buzzing +emigration +nearer +##tom +indicators +##ulu +coarse +neutron +protectorate +##uze +directional +exploits +pains +loire +1830s +proponents +guggenheim +rabbits +ritchie +305 +hectare +inputs +hutton +##raz +verify +##ako +boilers +longitude +##lev +skeletal +yer +emilia +citrus +compromised +##gau +pokemon +prescription +paragraph +eduard +cadillac +attire +categorized +kenyan +weddings +charley +##bourg +entertain +monmouth +##lles +nutrients +davey +mesh +incentive +practised +ecosystems +kemp +subdued +overheard +##rya +bodily +maxim +##nius +apprenticeship +ursula +##fight +lodged +rug +silesian +unconstitutional +patel +inspected +coyote +unbeaten +##hak +34th +disruption +convict +parcel +##cl +##nham +collier +implicated +mallory +##iac +##lab +susannah +winkler +##rber +shia +phelps +sediments +graphical +robotic +##sner +adulthood +mart +smoked +##isto +kathryn +clarified +##aran +divides +convictions +oppression +pausing +burying +##mt +federico +mathias +eileen +##tana +kite +hunched +##acies +189 +##atz +disadvantage +liza +kinetic +greedy +paradox +yokohama +dowager +trunks +ventured +##gement +gupta +vilnius +olaf +##thest +crimean +hopper +##ej +progressively +arturo +mouthed +arrondissement +##fusion +rubin +simulcast +oceania +##orum +##stra +##rred +busiest +intensely +navigator +cary +##vine +##hini +##bies +fife +rowe +rowland +posing +insurgents +shafts +lawsuits +activate +conor +inward +culturally +garlic +265 +##eering +eclectic +##hui +##kee +##nl +furrowed +vargas +meteorological +rendezvous +##aus +culinary +commencement +##dition +quota +##notes +mommy +salaries +overlapping +mule +##iology +##mology +sums +wentworth +##isk +##zione +mainline +subgroup +##illy +hack +plaintiff +verdi +bulb +differentiation +engagements +multinational +supplemented +bertrand +caller +regis +##naire +##sler +##arts +##imated +blossom +propagation +kilometer +viaduct +vineyards +##uate +beckett +optimization +golfer +songwriters +seminal +semitic +thud +volatile +evolving +ridley +##wley +trivial +distributions +scandinavia +jiang +##ject +wrestled +insistence +##dio +emphasizes +napkin +##ods +adjunct +rhyme +##ricted +##eti +hopeless +surrounds +tremble +32nd +smoky +##ntly +oils +medicinal +padded +steer +wilkes +219 +255 +concessions +hue +uniquely +blinded +landon +yahoo +##lane +hendrix +commemorating +dex +specify +chicks +##ggio +intercity +1400 +morley +##torm +highlighting +##oting +pang +oblique +stalled +##liner +flirting +newborn +1769 +bishopric +shaved +232 +currie +##ush +dharma +spartan +##ooped +favorites +smug +novella +sirens +abusive +creations +espana +##lage +paradigm +semiconductor +sheen +##rdo +##yen +##zak +nrl +renew +##pose +##tur +adjutant +marches +norma +##enity +ineffective +weimar +grunt +##gat +lordship +plotting +expenditure +infringement +lbs +refrain +av +mimi +mistakenly +postmaster +1771 +##bara +ras +motorsports +tito +199 +subjective +##zza +bully +stew +##kaya +prescott +1a +##raphic +##zam +bids +styling +paranormal +reeve +sneaking +exploding +katz +akbar +migrant +syllables +indefinitely +##ogical +destroys +replaces +applause +##phine +pest +##fide +218 +articulated +bertie +##thing +##cars +##ptic +courtroom +crowley +aesthetics +cummings +tehsil +hormones +titanic +dangerously +##ibe +stadion +jaenelle +auguste +ciudad +##chu +mysore +partisans +##sio +lucan +philipp +##aly +debating +henley +interiors +##rano +##tious +homecoming +beyonce +usher +henrietta +prepares +weeds +##oman +ely +plucked +##pire +##dable +luxurious +##aq +artifact +password +pasture +juno +maddy +minsk +##dder +##ologies +##rone +assessments +martian +royalist +1765 +examines +##mani +##rge +nino +223 +parry +scooped +relativity +##eli +##uting +##cao +congregational +noisy +traverse +##agawa +strikeouts +nickelodeon +obituary +transylvania +binds +depictions +polk +trolley +##yed +##lard +breeders +##under +dryly +hokkaido +1762 +strengths +stacks +bonaparte +connectivity +neared +prostitutes +stamped +anaheim +gutierrez +sinai +##zzling +bram +fresno +madhya +##86 +proton +##lena +##llum +##phon +reelected +wanda +##anus +##lb +ample +distinguishing +##yler +grasping +sermons +tomato +bland +stimulation +avenues +##eux +spreads +scarlett +fern +pentagon +assert +baird +chesapeake +ir +calmed +distortion +fatalities +##olis +correctional +pricing +##astic +##gina +prom +dammit +ying +collaborate +##chia +welterweight +33rd +pointer +substitution +bonded +umpire +communicating +multitude +paddle +##obe +federally +intimacy +##insky +betray +ssr +##lett +##lean +##lves +##therapy +airbus +##tery +functioned +ud +bearer +biomedical +netflix +##hire +##nca +condom +brink +ik +##nical +macy +##bet +flap +gma +experimented +jelly +lavender +##icles +##ulia +munro +##mian +##tial +rye +##rle +60th +gigs +hottest +rotated +predictions +fuji +bu +##erence +##omi +barangay +##fulness +##sas +clocks +##rwood +##liness +cereal +roe +wight +decker +uttered +babu +onion +xml +forcibly +##df +petra +sarcasm +hartley +peeled +storytelling +##42 +##xley +##ysis +##ffa +fibre +kiel +auditor +fig +harald +greenville +##berries +geographically +nell +quartz +##athic +cemeteries +##lr +crossings +nah +holloway +reptiles +chun +sichuan +snowy +660 +corrections +##ivo +zheng +ambassadors +blacksmith +fielded +fluids +hardcover +turnover +medications +melvin +academies +##erton +ro +roach +absorbing +spaniards +colton +##founded +outsider +espionage +kelsey +245 +edible +##ulf +dora +establishes +##sham +##tries +contracting +##tania +cinematic +costello +nesting +##uron +connolly +duff +##nology +mma +##mata +fergus +sexes +gi +optics +spectator +woodstock +banning +##hee +##fle +differentiate +outfielder +refinery +226 +312 +gerhard +horde +lair +drastically +##udi +landfall +##cheng +motorsport +odi +##achi +predominant +quay +skins +##ental +edna +harshly +complementary +murdering +##aves +wreckage +##90 +ono +outstretched +lennox +munitions +galen +reconcile +470 +scalp +bicycles +gillespie +questionable +rosenberg +guillermo +hostel +jarvis +kabul +volvo +opium +yd +##twined +abuses +decca +outpost +##cino +sensible +neutrality +##64 +ponce +anchorage +atkins +turrets +inadvertently +disagree +libre +vodka +reassuring +weighs +##yal +glide +jumper +ceilings +repertory +outs +stain +##bial +envy +##ucible +smashing +heightened +policing +hyun +mixes +lai +prima +##ples +celeste +##bina +lucrative +intervened +kc +manually +##rned +stature +staffed +bun +bastards +nairobi +priced +##auer +thatcher +##kia +tripped +comune +##ogan +##pled +brasil +incentives +emanuel +hereford +musica +##kim +benedictine +biennale +##lani +eureka +gardiner +rb +knocks +sha +##ael +##elled +##onate +efficacy +ventura +masonic +sanford +maize +leverage +##feit +capacities +santana +##aur +novelty +vanilla +##cter +##tour +benin +##oir +##rain +neptune +drafting +tallinn +##cable +humiliation +##boarding +schleswig +fabian +bernardo +liturgy +spectacle +sweeney +pont +routledge +##tment +cosmos +ut +hilt +sleek +universally +##eville +##gawa +typed +##dry +favors +allegheny +glaciers +##rly +recalling +aziz +##log +parasite +requiem +auf +##berto +##llin +illumination +##breaker +##issa +festivities +bows +govern +vibe +vp +333 +sprawled +larson +pilgrim +bwf +leaping +##rts +##ssel +alexei +greyhound +hoarse +##dler +##oration +seneca +##cule +gaping +##ulously +##pura +cinnamon +##gens +##rricular +craven +fantasies +houghton +engined +reigned +dictator +supervising +##oris +bogota +commentaries +unnatural +fingernails +spirituality +tighten +##tm +canadiens +protesting +intentional +cheers +sparta +##ytic +##iere +##zine +widen +belgarath +controllers +dodd +iaaf +navarre +##ication +defect +squire +steiner +whisky +##mins +560 +inevitably +tome +##gold +chew +##uid +##lid +elastic +##aby +streaked +alliances +jailed +regal +##ined +##phy +czechoslovak +narration +absently +##uld +bluegrass +guangdong +quran +criticizing +hose +hari +##liest +##owa +skier +streaks +deploy +##lom +raft +bose +dialed +huff +##eira +haifa +simplest +bursting +endings +ib +sultanate +##titled +franks +whitman +ensures +sven +##ggs +collaborators +forster +organising +ui +banished +napier +injustice +teller +layered +thump +##otti +roc +battleships +evidenced +fugitive +sadie +robotics +##roud +equatorial +geologist +##iza +yielding +##bron +##sr +internationale +mecca +##diment +sbs +skyline +toad +uploaded +reflective +undrafted +lal +leafs +bayern +##dai +lakshmi +shortlisted +##stick +##wicz +camouflage +donate +af +christi +lau +##acio +disclosed +nemesis +1761 +assemble +straining +northamptonshire +tal +##asi +bernardino +premature +heidi +42nd +coefficients +galactic +reproduce +buzzed +sensations +zionist +monsieur +myrtle +##eme +archery +strangled +musically +viewpoint +antiquities +bei +trailers +seahawks +cured +pee +preferring +tasmanian +lange +sul +##mail +##working +colder +overland +lucivar +massey +gatherings +haitian +##smith +disapproval +flaws +##cco +##enbach +1766 +npr +##icular +boroughs +creole +forums +techno +1755 +dent +abdominal +streetcar +##eson +##stream +procurement +gemini +predictable +##tya +acheron +christoph +feeder +fronts +vendor +bernhard +jammu +tumors +slang +##uber +goaltender +twists +curving +manson +vuelta +mer +peanut +confessions +pouch +unpredictable +allowance +theodor +vascular +##factory +bala +authenticity +metabolic +coughing +nanjing +##cea +pembroke +##bard +splendid +36th +ff +hourly +##ahu +elmer +handel +##ivate +awarding +thrusting +dl +experimentation +##hesion +##46 +caressed +entertained +steak +##rangle +biologist +orphans +baroness +oyster +stepfather +##dridge +mirage +reefs +speeding +##31 +barons +1764 +227 +inhabit +preached +repealed +##tral +honoring +boogie +captives +administer +johanna +##imate +gel +suspiciously +1767 +sobs +##dington +backbone +hayward +garry +##folding +##nesia +maxi +##oof +##ppe +ellison +galileo +##stand +crimea +frenzy +amour +bumper +matrices +natalia +baking +garth +palestinians +##grove +smack +conveyed +ensembles +gardening +##manship +##rup +##stituting +1640 +harvesting +topography +jing +shifters +dormitory +##carriage +##lston +ist +skulls +##stadt +dolores +jewellery +sarawak +##wai +##zier +fences +christy +confinement +tumbling +credibility +fir +stench +##bria +##plication +##nged +##sam +virtues +##belt +marjorie +pba +##eem +##made +celebrates +schooner +agitated +barley +fulfilling +anthropologist +##pro +restrict +novi +regulating +##nent +padres +##rani +##hesive +loyola +tabitha +milky +olson +proprietor +crambidae +guarantees +intercollegiate +ljubljana +hilda +##sko +ignorant +hooded +##lts +sardinia +##lidae +##vation +frontman +privileged +witchcraft +##gp +jammed +laude +poking +##than +bracket +amazement +yunnan +##erus +maharaja +linnaeus +264 +commissioning +milano +peacefully +##logies +akira +rani +regulator +##36 +grasses +##rance +luzon +crows +compiler +gretchen +seaman +edouard +tab +buccaneers +ellington +hamlets +whig +socialists +##anto +directorial +easton +mythological +##kr +##vary +rhineland +semantic +taut +dune +inventions +succeeds +##iter +replication +branched +##pired +jul +prosecuted +kangaroo +penetrated +##avian +middlesbrough +doses +bleak +madam +predatory +relentless +##vili +reluctance +##vir +hailey +crore +silvery +1759 +monstrous +swimmers +transmissions +hawthorn +informing +##eral +toilets +caracas +crouch +kb +##sett +295 +cartel +hadley +##aling +alexia +yvonne +##biology +cinderella +eton +superb +blizzard +stabbing +industrialist +maximus +##gm +##orus +groves +maud +clade +oversized +comedic +##bella +rosen +nomadic +fulham +montane +beverages +galaxies +redundant +swarm +##rot +##folia +##llis +buckinghamshire +fen +bearings +bahadur +##rom +gilles +phased +dynamite +faber +benoit +vip +##ount +##wd +booking +fractured +tailored +anya +spices +westwood +cairns +auditions +inflammation +steamed +##rocity +##acion +##urne +skyla +thereof +watford +torment +archdeacon +transforms +lulu +demeanor +fucked +serge +##sor +mckenna +minas +entertainer +##icide +caress +originate +residue +##sty +1740 +##ilised +##org +beech +##wana +subsidies +##ghton +emptied +gladstone +ru +firefighters +voodoo +##rcle +het +nightingale +tamara +edmond +ingredient +weaknesses +silhouette +285 +compatibility +withdrawing +hampson +##mona +anguish +giggling +##mber +bookstore +##jiang +southernmost +tilting +##vance +bai +economical +rf +briefcase +dreadful +hinted +projections +shattering +totaling +##rogate +analogue +indicted +periodical +fullback +##dman +haynes +##tenberg +##ffs +##ishment +1745 +thirst +stumble +penang +vigorous +##ddling +##kor +##lium +octave +##ove +##enstein +##inen +##ones +siberian +##uti +cbn +repeal +swaying +##vington +khalid +tanaka +unicorn +otago +plastered +lobe +riddle +##rella +perch +##ishing +croydon +filtered +graeme +tripoli +##ossa +crocodile +##chers +sufi +mined +##tung +inferno +lsu +##phi +swelled +utilizes +£2 +cale +periodicals +styx +hike +informally +coop +lund +##tidae +ala +hen +qui +transformations +disposed +sheath +chickens +##cade +fitzroy +sas +silesia +unacceptable +odisha +1650 +sabrina +pe +spokane +ratios +athena +massage +shen +dilemma +##drum +##riz +##hul +corona +doubtful +niall +##pha +##bino +fines +cite +acknowledging +bangor +ballard +bathurst +##resh +huron +mustered +alzheimer +garments +kinase +tyre +warship +##cp +flashback +pulmonary +braun +cheat +kamal +cyclists +constructions +grenades +ndp +traveller +excuses +stomped +signalling +trimmed +futsal +mosques +relevance +##wine +wta +##23 +##vah +##lter +hoc +##riding +optimistic +##´s +deco +sim +interacting +rejecting +moniker +waterways +##ieri +##oku +mayors +gdansk +outnumbered +pearls +##ended +##hampton +fairs +totals +dominating +262 +notions +stairway +compiling +pursed +commodities +grease +yeast +##jong +carthage +griffiths +residual +amc +contraction +laird +sapphire +##marine +##ivated +amalgamation +dissolve +inclination +lyle +packaged +altitudes +suez +canons +graded +lurched +narrowing +boasts +guise +wed +enrico +##ovsky +rower +scarred +bree +cub +iberian +protagonists +bargaining +proposing +trainers +voyages +vans +fishes +##aea +##ivist +##verance +encryption +artworks +kazan +sabre +cleopatra +hepburn +rotting +supremacy +mecklenburg +##brate +burrows +hazards +outgoing +flair +organizes +##ctions +scorpion +##usions +boo +234 +chevalier +dunedin +slapping +##34 +ineligible +pensions +##38 +##omic +manufactures +emails +bismarck +238 +weakening +blackish +ding +mcgee +quo +##rling +northernmost +xx +manpower +greed +sampson +clicking +##ange +##horpe +##inations +##roving +torre +##eptive +##moral +symbolism +38th +asshole +meritorious +outfits +splashed +biographies +sprung +astros +##tale +302 +737 +filly +raoul +nw +tokugawa +linden +clubhouse +##apa +tracts +romano +##pio +putin +tags +##note +chained +dickson +gunshot +moe +gunn +rashid +##tails +zipper +##bas +##nea +contrasted +##ply +##udes +plum +pharaoh +##pile +aw +comedies +ingrid +sandwiches +subdivisions +1100 +mariana +nokia +kamen +hz +delaney +veto +herring +##words +possessive +outlines +##roup +siemens +stairwell +rc +gallantry +messiah +palais +yells +233 +zeppelin +##dm +bolivar +##cede +smackdown +mckinley +##mora +##yt +muted +geologic +finely +unitary +avatar +hamas +maynard +rees +bog +contrasting +##rut +liv +chico +disposition +pixel +##erate +becca +dmitry +yeshiva +narratives +##lva +##ulton +mercenary +sharpe +tempered +navigate +stealth +amassed +keynes +##lini +untouched +##rrie +havoc +lithium +##fighting +abyss +graf +southward +wolverine +balloons +implements +ngos +transitions +##icum +ambushed +concacaf +dormant +economists +##dim +costing +csi +rana +universite +boulders +verity +##llon +collin +mellon +misses +cypress +fluorescent +lifeless +spence +##ulla +crewe +shepard +pak +revelations +##م +jolly +gibbons +paw +##dro +##quel +freeing +##test +shack +fries +palatine +##51 +##hiko +accompaniment +cruising +recycled +##aver +erwin +sorting +synthesizers +dyke +realities +sg +strides +enslaved +wetland +##ghan +competence +gunpowder +grassy +maroon +reactors +objection +##oms +carlson +gearbox +macintosh +radios +shelton +##sho +clergyman +prakash +254 +mongols +trophies +oricon +228 +stimuli +twenty20 +cantonese +cortes +mirrored +##saurus +bhp +cristina +melancholy +##lating +enjoyable +nuevo +##wny +downfall +schumacher +##ind +banging +lausanne +rumbled +paramilitary +reflex +ax +amplitude +migratory +##gall +##ups +midi +barnard +lastly +sherry +##hp +##nall +keystone +##kra +carleton +slippery +##53 +coloring +foe +socket +otter +##rgos +mats +##tose +consultants +bafta +bison +topping +##km +490 +primal +abandonment +transplant +atoll +hideous +mort +pained +reproduced +tae +howling +##turn +unlawful +billionaire +hotter +poised +lansing +##chang +dinamo +retro +messing +nfc +domesday +##mina +blitz +timed +##athing +##kley +ascending +gesturing +##izations +signaled +tis +chinatown +mermaid +savanna +jameson +##aint +catalina +##pet +##hers +cochrane +cy +chatting +##kus +alerted +computation +mused +noelle +majestic +mohawk +campo +octagonal +##sant +##hend +241 +aspiring +##mart +comprehend +iona +paralyzed +shimmering +swindon +rhone +##eley +reputed +configurations +pitchfork +agitation +francais +gillian +lipstick +##ilo +outsiders +pontifical +resisting +bitterness +sewer +rockies +##edd +##ucher +misleading +1756 +exiting +galloway +##nging +risked +##heart +246 +commemoration +schultz +##rka +integrating +##rsa +poses +shrieked +##weiler +guineas +gladys +jerking +owls +goldsmith +nightly +penetrating +##unced +lia +##33 +ignited +betsy +##aring +##thorpe +follower +vigorously +##rave +coded +kiran +knit +zoology +tbilisi +##28 +##bered +repository +govt +deciduous +dino +growling +##bba +enhancement +unleashed +chanting +pussy +biochemistry +##eric +kettle +repression +toxicity +nrhp +##arth +##kko +##bush +ernesto +commended +outspoken +242 +mca +parchment +sms +kristen +##aton +bisexual +raked +glamour +navajo +a2 +conditioned +showcased +##hma +spacious +youthful +##esa +usl +appliances +junta +brest +layne +conglomerate +enchanted +chao +loosened +picasso +circulating +inspect +montevideo +##centric +##kti +piazza +spurred +##aith +bari +freedoms +poultry +stamford +lieu +##ect +indigo +sarcastic +bahia +stump +attach +dvds +frankenstein +lille +approx +scriptures +pollen +##script +nmi +overseen +##ivism +tides +proponent +newmarket +inherit +milling +##erland +centralized +##rou +distributors +credentials +drawers +abbreviation +##lco +##xon +downing +uncomfortably +ripe +##oes +erase +franchises +##ever +populace +##bery +##khar +decomposition +pleas +##tet +daryl +sabah +##stle +##wide +fearless +genie +lesions +annette +##ogist +oboe +appendix +nair +dripped +petitioned +maclean +mosquito +parrot +rpg +hampered +1648 +operatic +reservoirs +##tham +irrelevant +jolt +summarized +##fp +medallion +##taff +##− +clawed +harlow +narrower +goddard +marcia +bodied +fremont +suarez +altering +tempest +mussolini +porn +##isms +sweetly +oversees +walkers +solitude +grimly +shrines +hk +ich +supervisors +hostess +dietrich +legitimacy +brushes +expressive +##yp +dissipated +##rse +localized +systemic +##nikov +gettysburg +##js +##uaries +dialogues +muttering +251 +housekeeper +sicilian +discouraged +##frey +beamed +kaladin +halftime +kidnap +##amo +##llet +1754 +synonymous +depleted +instituto +insulin +reprised +##opsis +clashed +##ctric +interrupting +radcliffe +insisting +medici +1715 +ejected +playfully +turbulent +##47 +starvation +##rini +shipment +rebellious +petersen +verification +merits +##rified +cakes +##charged +1757 +milford +shortages +spying +fidelity +##aker +emitted +storylines +harvested +seismic +##iform +cheung +kilda +theoretically +barbie +lynx +##rgy +##tius +goblin +mata +poisonous +##nburg +reactive +residues +obedience +##евич +conjecture +##rac +401 +hating +sixties +kicker +moaning +motown +##bha +emancipation +neoclassical +##hering +consoles +ebert +professorship +##tures +sustaining +assaults +obeyed +affluent +incurred +tornadoes +##eber +##zow +emphasizing +highlanders +cheated +helmets +##ctus +internship +terence +bony +executions +legislators +berries +peninsular +tinged +##aco +1689 +amplifier +corvette +ribbons +lavish +pennant +##lander +worthless +##chfield +##forms +mariano +pyrenees +expenditures +##icides +chesterfield +mandir +tailor +39th +sergey +nestled +willed +aristocracy +devotees +goodnight +raaf +rumored +weaponry +remy +appropriations +harcourt +burr +riaa +##lence +limitation +unnoticed +guo +soaking +swamps +##tica +collapsing +tatiana +descriptive +brigham +psalm +##chment +maddox +##lization +patti +caliph +##aja +akron +injuring +serra +##ganj +basins +##sari +astonished +launcher +##church +hilary +wilkins +sewing +##sf +stinging +##fia +##ncia +underwood +startup +##ition +compilations +vibrations +embankment +jurist +##nity +bard +juventus +groundwater +kern +palaces +helium +boca +cramped +marissa +soto +##worm +jae +princely +##ggy +faso +bazaar +warmly +##voking +229 +pairing +##lite +##grate +##nets +wien +freaked +ulysses +rebirth +##alia +##rent +mummy +guzman +jimenez +stilled +##nitz +trajectory +tha +woken +archival +professions +##pts +##pta +hilly +shadowy +shrink +##bolt +norwood +glued +migrate +stereotypes +devoid +##pheus +625 +evacuate +horrors +infancy +gotham +knowles +optic +downloaded +sachs +kingsley +parramatta +darryl +mor +##onale +shady +commence +confesses +kan +##meter +##placed +marlborough +roundabout +regents +frigates +io +##imating +gothenburg +revoked +carvings +clockwise +convertible +intruder +##sche +banged +##ogo +vicky +bourgeois +##mony +dupont +footing +##gum +pd +##real +buckle +yun +penthouse +sane +720 +serviced +stakeholders +neumann +bb +##eers +comb +##gam +catchment +pinning +rallies +typing +##elles +forefront +freiburg +sweetie +giacomo +widowed +goodwill +worshipped +aspirations +midday +##vat +fishery +##trick +bournemouth +turk +243 +hearth +ethanol +guadalajara +murmurs +sl +##uge +afforded +scripted +##hta +wah +##jn +coroner +translucent +252 +memorials +puck +progresses +clumsy +##race +315 +candace +recounted +##27 +##slin +##uve +filtering +##mac +howl +strata +heron +leveled +##ays +dubious +##oja +##т +##wheel +citations +exhibiting +##laya +##mics +##pods +turkic +##lberg +injunction +##ennial +##mit +antibodies +##44 +organise +##rigues +cardiovascular +cushion +inverness +##zquez +dia +cocoa +sibling +##tman +##roid +expanse +feasible +tunisian +algiers +##relli +rus +bloomberg +dso +westphalia +bro +tacoma +281 +downloads +##ours +konrad +duran +##hdi +continuum +jett +compares +legislator +secession +##nable +##gues +##zuka +translating +reacher +##gley +##ła +aleppo +##agi +tc +orchards +trapping +linguist +versatile +drumming +postage +calhoun +superiors +##mx +barefoot +leary +##cis +ignacio +alfa +kaplan +##rogen +bratislava +mori +##vot +disturb +haas +313 +cartridges +gilmore +radiated +salford +tunic +hades +##ulsive +archeological +delilah +magistrates +auditioned +brewster +charters +empowerment +blogs +cappella +dynasties +iroquois +whipping +##krishna +raceway +truths +myra +weaken +judah +mcgregor +##horse +mic +refueling +37th +burnley +bosses +markus +premio +query +##gga +dunbar +##economic +darkest +lyndon +sealing +commendation +reappeared +##mun +addicted +ezio +slaughtered +satisfactory +shuffle +##eves +##thic +##uj +fortification +warrington +##otto +resurrected +fargo +mane +##utable +##lei +##space +foreword +ox +##aris +##vern +abrams +hua +##mento +sakura +##alo +uv +sentimental +##skaya +midfield +##eses +sturdy +scrolls +macleod +##kyu +entropy +##lance +mitochondrial +cicero +excelled +thinner +convoys +perceive +##oslav +##urable +systematically +grind +burkina +287 +##tagram +ops +##aman +guantanamo +##cloth +##tite +forcefully +wavy +##jou +pointless +##linger +##tze +layton +portico +superficial +clerical +outlaws +##hism +burials +muir +##inn +creditors +hauling +rattle +##leg +calais +monde +archers +reclaimed +dwell +wexford +hellenic +falsely +remorse +##tek +dough +furnishings +##uttered +gabon +neurological +novice +##igraphy +contemplated +pulpit +nightstand +saratoga +##istan +documenting +pulsing +taluk +##firmed +busted +marital +##rien +disagreements +wasps +##yes +hodge +mcdonnell +mimic +fran +pendant +dhabi +musa +##nington +congratulations +argent +darrell +concussion +losers +regrets +thessaloniki +reversal +donaldson +hardwood +thence +achilles +ritter +##eran +demonic +jurgen +prophets +goethe +eki +classmate +buff +##cking +yank +irrational +##inging +perished +seductive +qur +sourced +##crat +##typic +mustard +ravine +barre +horizontally +characterization +phylogenetic +boise +##dit +##runner +##tower +brutally +intercourse +seduce +##bbing +fay +ferris +ogden +amar +nik +unarmed +##inator +evaluating +kyrgyzstan +sweetness +##lford +##oki +mccormick +meiji +notoriety +stimulate +disrupt +figuring +instructional +mcgrath +##zoo +groundbreaking +##lto +flinch +khorasan +agrarian +bengals +mixer +radiating +##sov +ingram +pitchers +nad +tariff +##cript +tata +##codes +##emi +##ungen +appellate +lehigh +##bled +##giri +brawl +duct +texans +##ciation +##ropolis +skipper +speculative +vomit +doctrines +stresses +253 +davy +graders +whitehead +jozef +timely +cumulative +haryana +paints +appropriately +boon +cactus +##ales +##pid +dow +legions +##pit +perceptions +1730 +picturesque +##yse +periphery +rune +wr +##aha +celtics +sentencing +whoa +##erin +confirms +variance +425 +moines +mathews +spade +rave +m1 +fronted +fx +blending +alleging +reared +##gl +237 +##paper +grassroots +eroded +##free +##physical +directs +ordeal +##sław +accelerate +hacker +rooftop +##inia +lev +buys +cebu +devote +##lce +specialising +##ulsion +choreographed +repetition +warehouses +##ryl +paisley +tuscany +analogy +sorcerer +hash +huts +shards +descends +exclude +nix +chaplin +gaga +ito +vane +##drich +causeway +misconduct +limo +orchestrated +glands +jana +##kot +u2 +##mple +##sons +branching +contrasts +scoop +longed +##virus +chattanooga +##75 +syrup +cornerstone +##tized +##mind +##iaceae +careless +precedence +frescoes +##uet +chilled +consult +modelled +snatch +peat +##thermal +caucasian +humane +relaxation +spins +temperance +##lbert +occupations +lambda +hybrids +moons +mp3 +##oese +247 +rolf +societal +yerevan +ness +##ssler +befriended +mechanized +nominate +trough +boasted +cues +seater +##hom +bends +##tangle +conductors +emptiness +##lmer +eurasian +adriatic +tian +##cie +anxiously +lark +propellers +chichester +jock +ev +2a +##holding +credible +recounts +tori +loyalist +abduction +##hoot +##redo +nepali +##mite +ventral +tempting +##ango +##crats +steered +##wice +javelin +dipping +laborers +prentice +looming +titanium +##ː +badges +emir +tensor +##ntation +egyptians +rash +denies +hawthorne +lombard +showers +wehrmacht +dietary +trojan +##reus +welles +executing +horseshoe +lifeboat +##lak +elsa +infirmary +nearing +roberta +boyer +mutter +trillion +joanne +##fine +##oked +sinks +vortex +uruguayan +clasp +sirius +##block +accelerator +prohibit +sunken +byu +chronological +diplomats +ochreous +510 +symmetrical +1644 +maia +##tology +salts +reigns +atrocities +##ия +hess +bared +issn +##vyn +cater +saturated +##cycle +##isse +sable +voyager +dyer +yusuf +##inge +fountains +wolff +##39 +##nni +engraving +rollins +atheist +ominous +##ault +herr +chariot +martina +strung +##fell +##farlane +horrific +sahib +gazes +saetan +erased +ptolemy +##olic +flushing +lauderdale +analytic +##ices +530 +navarro +beak +gorilla +herrera +broom +guadalupe +raiding +sykes +311 +bsc +deliveries +1720 +invasions +carmichael +tajikistan +thematic +ecumenical +sentiments +onstage +##rians +##brand +##sume +catastrophic +flanks +molten +##arns +waller +aimee +terminating +##icing +alternately +##oche +nehru +printers +outraged +##eving +empires +template +banners +repetitive +za +##oise +vegetarian +##tell +guiana +opt +cavendish +lucknow +synthesized +##hani +##mada +finalized +##ctable +fictitious +mayoral +unreliable +##enham +embracing +peppers +rbis +##chio +##neo +inhibition +slashed +togo +orderly +embroidered +safari +salty +236 +barron +benito +totaled +##dak +pubs +simulated +caden +devin +tolkien +momma +welding +sesame +##ept +gottingen +hardness +630 +shaman +temeraire +620 +adequately +pediatric +##kit +ck +assertion +radicals +composure +cadence +seafood +beaufort +lazarus +mani +warily +cunning +kurdistan +249 +cantata +##kir +ares +##41 +##clusive +nape +townland +geared +insulted +flutter +boating +violate +draper +dumping +malmo +##hh +##romatic +firearm +alta +bono +obscured +##clave +exceeds +panorama +unbelievable +##train +preschool +##essed +disconnected +installing +rescuing +secretaries +accessibility +##castle +##drive +##ifice +##film +bouts +slug +waterway +mindanao +##buro +##ratic +halves +##ل +calming +liter +maternity +adorable +bragg +electrification +mcc +##dote +roxy +schizophrenia +##body +munoz +kaye +whaling +239 +mil +tingling +tolerant +##ago +unconventional +volcanoes +##finder +deportivo +##llie +robson +kaufman +neuroscience +wai +deportation +masovian +scraping +converse +##bh +hacking +bulge +##oun +administratively +yao +580 +amp +mammoth +booster +claremont +hooper +nomenclature +pursuits +mclaughlin +melinda +##sul +catfish +barclay +substrates +taxa +zee +originals +kimberly +packets +padma +##ality +borrowing +ostensibly +solvent +##bri +##genesis +##mist +lukas +shreveport +veracruz +##ь +##lou +##wives +cheney +tt +anatolia +hobbs +##zyn +cyclic +radiant +alistair +greenish +siena +dat +independents +##bation +conform +pieter +hyper +applicant +bradshaw +spores +telangana +vinci +inexpensive +nuclei +322 +jang +nme +soho +spd +##ign +cradled +receptionist +pow +##43 +##rika +fascism +##ifer +experimenting +##ading +##iec +##region +345 +jocelyn +maris +stair +nocturnal +toro +constabulary +elgin +##kker +msc +##giving +##schen +##rase +doherty +doping +sarcastically +batter +maneuvers +##cano +##apple +##gai +##git +intrinsic +##nst +##stor +1753 +showtime +cafes +gasps +lviv +ushered +##thed +fours +restart +astonishment +transmitting +flyer +shrugs +##sau +intriguing +cones +dictated +mushrooms +medial +##kovsky +##elman +escorting +gaped +##26 +godfather +##door +##sell +djs +recaptured +timetable +vila +1710 +3a +aerodrome +mortals +scientology +##orne +angelina +mag +convection +unpaid +insertion +intermittent +lego +##nated +endeavor +kota +pereira +##lz +304 +bwv +glamorgan +insults +agatha +fey +##cend +fleetwood +mahogany +protruding +steamship +zeta +##arty +mcguire +suspense +##sphere +advising +urges +##wala +hurriedly +meteor +gilded +inline +arroyo +stalker +##oge +excitedly +revered +##cure +earle +introductory +##break +##ilde +mutants +puff +pulses +reinforcement +##haling +curses +lizards +stalk +correlated +##fixed +fallout +macquarie +##unas +bearded +denton +heaving +802 +##ocation +winery +assign +dortmund +##lkirk +everest +invariant +charismatic +susie +##elling +bled +lesley +telegram +sumner +bk +##ogen +##к +wilcox +needy +colbert +duval +##iferous +##mbled +allotted +attends +imperative +##hita +replacements +hawker +##inda +insurgency +##zee +##eke +casts +##yla +680 +ives +transitioned +##pack +##powering +authoritative +baylor +flex +cringed +plaintiffs +woodrow +##skie +drastic +ape +aroma +unfolded +commotion +nt +preoccupied +theta +routines +lasers +privatization +wand +domino +ek +clenching +nsa +strategically +showered +bile +handkerchief +pere +storing +christophe +insulting +316 +nakamura +romani +asiatic +magdalena +palma +cruises +stripping +405 +konstantin +soaring +##berman +colloquially +forerunner +havilland +incarcerated +parasites +sincerity +##utus +disks +plank +saigon +##ining +corbin +homo +ornaments +powerhouse +##tlement +chong +fastened +feasibility +idf +morphological +usable +##nish +##zuki +aqueduct +jaguars +keepers +##flies +aleksandr +faust +assigns +ewing +bacterium +hurled +tricky +hungarians +integers +wallis +321 +yamaha +##isha +hushed +oblivion +aviator +evangelist +friars +##eller +monograph +ode +##nary +airplanes +labourers +charms +##nee +1661 +hagen +tnt +rudder +fiesta +transcript +dorothea +ska +inhibitor +maccabi +retorted +raining +encompassed +clauses +menacing +1642 +lineman +##gist +vamps +##ape +##dick +gloom +##rera +dealings +easing +seekers +##nut +##pment +helens +unmanned +##anu +##isson +basics +##amy +##ckman +adjustments +1688 +brutality +horne +##zell +sui +##55 +##mable +aggregator +##thal +rhino +##drick +##vira +counters +zoom +##01 +##rting +mn +montenegrin +packard +##unciation +##♭ +##kki +reclaim +scholastic +thugs +pulsed +##icia +syriac +quan +saddam +banda +kobe +blaming +buddies +dissent +##lusion +##usia +corbett +jaya +delle +erratic +lexie +##hesis +435 +amiga +hermes +##pressing +##leen +chapels +gospels +jamal +##uating +compute +revolving +warp +##sso +##thes +armory +##eras +##gol +antrim +loki +##kow +##asian +##good +##zano +braid +handwriting +subdistrict +funky +pantheon +##iculate +concurrency +estimation +improper +juliana +##his +newcomers +johnstone +staten +communicated +##oco +##alle +sausage +stormy +##stered +##tters +superfamily +##grade +acidic +collateral +tabloid +##oped +##rza +bladder +austen +##ellant +mcgraw +##hay +hannibal +mein +aquino +lucifer +wo +badger +boar +cher +christensen +greenberg +interruption +##kken +jem +244 +mocked +bottoms +cambridgeshire +##lide +sprawling +##bbly +eastwood +ghent +synth +##buck +advisers +##bah +nominally +hapoel +qu +daggers +estranged +fabricated +towels +vinnie +wcw +misunderstanding +anglia +nothin +unmistakable +##dust +##lova +chilly +marquette +truss +##edge +##erine +reece +##lty +##chemist +##connected +272 +308 +41st +bash +raion +waterfalls +##ump +##main +labyrinth +queue +theorist +##istle +bharatiya +flexed +soundtracks +rooney +leftist +patrolling +wharton +plainly +alleviate +eastman +schuster +topographic +engages +immensely +unbearable +fairchild +1620 +dona +lurking +parisian +oliveira +ia +indictment +hahn +bangladeshi +##aster +vivo +##uming +##ential +antonia +expects +indoors +kildare +harlan +##logue +##ogenic +##sities +forgiven +##wat +childish +tavi +##mide +##orra +plausible +grimm +successively +scooted +##bola +##dget +##rith +spartans +emery +flatly +azure +epilogue +##wark +flourish +##iny +##tracted +##overs +##oshi +bestseller +distressed +receipt +spitting +hermit +topological +##cot +drilled +subunit +francs +##layer +eel +##fk +##itas +octopus +footprint +petitions +ufo +##say +##foil +interfering +leaking +palo +##metry +thistle +valiant +##pic +narayan +mcpherson +##fast +gonzales +##ym +##enne +dustin +novgorod +solos +##zman +doin +##raph +##patient +##meyer +soluble +ashland +cuffs +carole +pendleton +whistling +vassal +##river +deviation +revisited +constituents +rallied +rotate +loomed +##eil +##nting +amateurs +augsburg +auschwitz +crowns +skeletons +##cona +bonnet +257 +dummy +globalization +simeon +sleeper +mandal +differentiated +##crow +##mare +milne +bundled +exasperated +talmud +owes +segregated +##feng +##uary +dentist +piracy +props +##rang +devlin +##torium +malicious +paws +##laid +dependency +##ergy +##fers +##enna +258 +pistons +rourke +jed +grammatical +tres +maha +wig +512 +ghostly +jayne +##achal +##creen +##ilis +##lins +##rence +designate +##with +arrogance +cambodian +clones +showdown +throttle +twain +##ception +lobes +metz +nagoya +335 +braking +##furt +385 +roaming +##minster +amin +crippled +##37 +##llary +indifferent +hoffmann +idols +intimidating +1751 +261 +influenza +memo +onions +1748 +bandage +consciously +##landa +##rage +clandestine +observes +swiped +tangle +##ener +##jected +##trum +##bill +##lta +hugs +congresses +josiah +spirited +##dek +humanist +managerial +filmmaking +inmate +rhymes +debuting +grimsby +ur +##laze +duplicate +vigor +##tf +republished +bolshevik +refurbishment +antibiotics +martini +methane +newscasts +royale +horizons +levant +iain +visas +##ischen +paler +##around +manifestation +snuck +alf +chop +futile +pedestal +rehab +##kat +bmg +kerman +res +fairbanks +jarrett +abstraction +saharan +##zek +1746 +procedural +clearer +kincaid +sash +luciano +##ffey +crunch +helmut +##vara +revolutionaries +##tute +creamy +leach +##mmon +1747 +permitting +nes +plight +wendell +##lese +contra +ts +clancy +ipa +mach +staples +autopsy +disturbances +nueva +karin +pontiac +##uding +proxy +venerable +haunt +leto +bergman +expands +##helm +wal +##pipe +canning +celine +cords +obesity +##enary +intrusion +planner +##phate +reasoned +sequencing +307 +harrow +##chon +##dora +marred +mcintyre +repay +tarzan +darting +248 +harrisburg +margarita +repulsed +##hur +##lding +belinda +hamburger +novo +compliant +runways +bingham +registrar +skyscraper +ic +cuthbert +improvisation +livelihood +##corp +##elial +admiring +##dened +sporadic +believer +casablanca +popcorn +##29 +asha +shovel +##bek +##dice +coiled +tangible +##dez +casper +elsie +resin +tenderness +rectory +##ivision +avail +sonar +##mori +boutique +##dier +guerre +bathed +upbringing +vaulted +sandals +blessings +##naut +##utnant +1680 +306 +foxes +pia +corrosion +hesitantly +confederates +crystalline +footprints +shapiro +tirana +valentin +drones +45th +microscope +shipments +texted +inquisition +wry +guernsey +unauthorized +resigning +760 +ripple +schubert +stu +reassure +felony +##ardo +brittle +koreans +##havan +##ives +dun +implicit +tyres +##aldi +##lth +magnolia +##ehan +##puri +##poulos +aggressively +fei +gr +familiarity +##poo +indicative +##trust +fundamentally +jimmie +overrun +395 +anchors +moans +##opus +britannia +armagh +##ggle +purposely +seizing +##vao +bewildered +mundane +avoidance +cosmopolitan +geometridae +quartermaster +caf +415 +chatter +engulfed +gleam +purge +##icate +juliette +jurisprudence +guerra +revisions +##bn +casimir +brew +##jm +1749 +clapton +cloudy +conde +hermitage +278 +simulations +torches +vincenzo +matteo +##rill +hidalgo +booming +westbound +accomplishment +tentacles +unaffected +##sius +annabelle +flopped +sloping +##litz +dreamer +interceptor +vu +##loh +consecration +copying +messaging +breaker +climates +hospitalized +1752 +torino +afternoons +winfield +witnessing +##teacher +breakers +choirs +sawmill +coldly +##ege +sipping +haste +uninhabited +conical +bibliography +pamphlets +severn +edict +##oca +deux +illnesses +grips +##pl +rehearsals +sis +thinkers +tame +##keepers +1690 +acacia +reformer +##osed +##rys +shuffling +##iring +##shima +eastbound +ionic +rhea +flees +littered +##oum +rocker +vomiting +groaning +champ +overwhelmingly +civilizations +paces +sloop +adoptive +##tish +skaters +##vres +aiding +mango +##joy +nikola +shriek +##ignon +pharmaceuticals +##mg +tuna +calvert +gustavo +stocked +yearbook +##urai +##mana +computed +subsp +riff +hanoi +kelvin +hamid +moors +pastures +summons +jihad +nectar +##ctors +bayou +untitled +pleasing +vastly +republics +intellect +##η +##ulio +##tou +crumbling +stylistic +sb +##ی +consolation +frequented +h₂o +walden +widows +##iens +404 +##ignment +chunks +improves +288 +grit +recited +##dev +snarl +sociological +##arte +##gul +inquired +##held +bruise +clube +consultancy +homogeneous +hornets +multiplication +pasta +prick +savior +##grin +##kou +##phile +yoon +##gara +grimes +vanishing +cheering +reacting +bn +distillery +##quisite +##vity +coe +dockyard +massif +##jord +escorts +voss +##valent +byte +chopped +hawke +illusions +workings +floats +##koto +##vac +kv +annapolis +madden +##onus +alvaro +noctuidae +##cum +##scopic +avenge +steamboat +forte +illustrates +erika +##trip +570 +dew +nationalities +bran +manifested +thirsty +diversified +muscled +reborn +##standing +arson +##lessness +##dran +##logram +##boys +##kushima +##vious +willoughby +##phobia +286 +alsace +dashboard +yuki +##chai +granville +myspace +publicized +tricked +##gang +adjective +##ater +relic +reorganisation +enthusiastically +indications +saxe +##lassified +consolidate +iec +padua +helplessly +ramps +renaming +regulars +pedestrians +accents +convicts +inaccurate +lowers +mana +##pati +barrie +bjp +outta +someplace +berwick +flanking +invoked +marrow +sparsely +excerpts +clothed +rei +##ginal +wept +##straße +##vish +alexa +excel +##ptive +membranes +aquitaine +creeks +cutler +sheppard +implementations +ns +##dur +fragrance +budge +concordia +magnesium +marcelo +##antes +gladly +vibrating +##rral +##ggles +montrose +##omba +lew +seamus +1630 +cocky +##ament +##uen +bjorn +##rrick +fielder +fluttering +##lase +methyl +kimberley +mcdowell +reductions +barbed +##jic +##tonic +aeronautical +condensed +distracting +##promising +huffed +##cala +##sle +claudius +invincible +missy +pious +balthazar +ci +##lang +butte +combo +orson +##dication +myriad +1707 +silenced +##fed +##rh +coco +netball +yourselves +##oza +clarify +heller +peg +durban +etudes +offender +roast +blackmail +curvature +##woods +vile +309 +illicit +suriname +##linson +overture +1685 +bubbling +gymnast +tucking +##mming +##ouin +maldives +##bala +gurney +##dda +##eased +##oides +backside +pinto +jars +racehorse +tending +##rdial +baronetcy +wiener +duly +##rke +barbarian +cupping +flawed +##thesis +bertha +pleistocene +puddle +swearing +##nob +##tically +fleeting +prostate +amulet +educating +##mined +##iti +##tler +75th +jens +respondents +analytics +cavaliers +papacy +raju +##iente +##ulum +##tip +funnel +271 +disneyland +##lley +sociologist +##iam +2500 +faulkner +louvre +menon +##dson +276 +##ower +afterlife +mannheim +peptide +referees +comedians +meaningless +##anger +##laise +fabrics +hurley +renal +sleeps +##bour +##icle +breakout +kristin +roadside +animator +clover +disdain +unsafe +redesign +##urity +firth +barnsley +portage +reset +narrows +268 +commandos +expansive +speechless +tubular +##lux +essendon +eyelashes +smashwords +##yad +##bang +##claim +craved +sprinted +chet +somme +astor +wrocław +orton +266 +bane +##erving +##uing +mischief +##amps +##sund +scaling +terre +##xious +impairment +offenses +undermine +moi +soy +contiguous +arcadia +inuit +seam +##tops +macbeth +rebelled +##icative +##iot +590 +elaborated +frs +uniformed +##dberg +259 +powerless +priscilla +stimulated +980 +qc +arboretum +frustrating +trieste +bullock +##nified +enriched +glistening +intern +##adia +locus +nouvelle +ollie +ike +lash +starboard +ee +tapestry +headlined +hove +rigged +##vite +pollock +##yme +thrive +clustered +cas +roi +gleamed +olympiad +##lino +pressured +regimes +##hosis +##lick +ripley +##ophone +kickoff +gallon +rockwell +##arable +crusader +glue +revolutions +scrambling +1714 +grover +##jure +englishman +aztec +263 +contemplating +coven +ipad +preach +triumphant +tufts +##esian +rotational +##phus +328 +falkland +##brates +strewn +clarissa +rejoin +environmentally +glint +banded +drenched +moat +albanians +johor +rr +maestro +malley +nouveau +shaded +taxonomy +v6 +adhere +bunk +airfields +##ritan +1741 +encompass +remington +tran +##erative +amelie +mazda +friar +morals +passions +##zai +breadth +vis +##hae +argus +burnham +caressing +insider +rudd +##imov +##mini +##rso +italianate +murderous +textual +wainwright +armada +bam +weave +timer +##taken +##nh +fra +##crest +ardent +salazar +taps +tunis +##ntino +allegro +gland +philanthropic +##chester +implication +##optera +esq +judas +noticeably +wynn +##dara +inched +indexed +crises +villiers +bandit +royalties +patterned +cupboard +interspersed +accessory +isla +kendrick +entourage +stitches +##esthesia +headwaters +##ior +interlude +distraught +draught +1727 +##basket +biased +sy +transient +triad +subgenus +adapting +kidd +shortstop +##umatic +dimly +spiked +mcleod +reprint +nellie +pretoria +windmill +##cek +singled +##mps +273 +reunite +##orous +747 +bankers +outlying +##omp +##ports +##tream +apologies +cosmetics +patsy +##deh +##ocks +##yson +bender +nantes +serene +##nad +lucha +mmm +323 +##cius +##gli +cmll +coinage +nestor +juarez +##rook +smeared +sprayed +twitching +sterile +irina +embodied +juveniles +enveloped +miscellaneous +cancers +dq +gulped +luisa +crested +swat +donegal +ref +##anov +##acker +hearst +mercantile +##lika +doorbell +ua +vicki +##alla +##som +bilbao +psychologists +stryker +sw +horsemen +turkmenistan +wits +##national +anson +mathew +screenings +##umb +rihanna +##agne +##nessy +aisles +##iani +##osphere +hines +kenton +saskatoon +tasha +truncated +##champ +##itan +mildred +advises +fredrik +interpreting +inhibitors +##athi +spectroscopy +##hab +##kong +karim +panda +##oia +##nail +##vc +conqueror +kgb +leukemia +##dity +arrivals +cheered +pisa +phosphorus +shielded +##riated +mammal +unitarian +urgently +chopin +sanitary +##mission +spicy +drugged +hinges +##tort +tipping +trier +impoverished +westchester +##caster +267 +epoch +nonstop +##gman +##khov +aromatic +centrally +cerro +##tively +##vio +billions +modulation +sedimentary +283 +facilitating +outrageous +goldstein +##eak +##kt +ld +maitland +penultimate +pollard +##dance +fleets +spaceship +vertebrae +##nig +alcoholism +als +recital +##bham +##ference +##omics +m2 +##bm +trois +##tropical +##в +commemorates +##meric +marge +##raction +1643 +670 +cosmetic +ravaged +##ige +catastrophe +eng +##shida +albrecht +arterial +bellamy +decor +harmon +##rde +bulbs +synchronized +vito +easiest +shetland +shielding +wnba +##glers +##ssar +##riam +brianna +cumbria +##aceous +##rard +cores +thayer +##nsk +brood +hilltop +luminous +carts +keynote +larkin +logos +##cta +##ا +##mund +##quay +lilith +tinted +277 +wrestle +mobilization +##uses +sequential +siam +bloomfield +takahashi +274 +##ieving +presenters +ringo +blazed +witty +##oven +##ignant +devastation +haydn +harmed +newt +therese +##peed +gershwin +molina +rabbis +sudanese +001 +innate +restarted +##sack +##fus +slices +wb +##shah +enroll +hypothetical +hysterical +1743 +fabio +indefinite +warped +##hg +exchanging +525 +unsuitable +##sboro +gallo +1603 +bret +cobalt +homemade +##hunter +mx +operatives +##dhar +terraces +durable +latch +pens +whorls +##ctuated +##eaux +billing +ligament +succumbed +##gly +regulators +spawn +##brick +##stead +filmfare +rochelle +##nzo +1725 +circumstance +saber +supplements +##nsky +##tson +crowe +wellesley +carrot +##9th +##movable +primate +drury +sincerely +topical +##mad +##rao +callahan +kyiv +smarter +tits +undo +##yeh +announcements +anthologies +barrio +nebula +##islaus +##shaft +##tyn +bodyguards +2021 +assassinate +barns +emmett +scully +##mah +##yd +##eland +##tino +##itarian +demoted +gorman +lashed +prized +adventist +writ +##gui +alla +invertebrates +##ausen +1641 +amman +1742 +align +healy +redistribution +##gf +##rize +insulation +##drop +adherents +hezbollah +vitro +ferns +yanking +269 +php +registering +uppsala +cheerleading +confines +mischievous +tully +##ross +49th +docked +roam +stipulated +pumpkin +##bry +prompt +##ezer +blindly +shuddering +craftsmen +frail +scented +katharine +scramble +shaggy +sponge +helix +zaragoza +279 +##52 +43rd +backlash +fontaine +seizures +posse +cowan +nonfiction +telenovela +wwii +hammered +undone +##gpur +encircled +irs +##ivation +artefacts +oneself +searing +smallpox +##belle +##osaurus +shandong +breached +upland +blushing +rankin +infinitely +psyche +tolerated +docking +evicted +##col +unmarked +##lving +gnome +lettering +litres +musique +##oint +benevolent +##jal +blackened +##anna +mccall +racers +tingle +##ocene +##orestation +introductions +radically +292 +##hiff +##باد +1610 +1739 +munchen +plead +##nka +condo +scissors +##sight +##tens +apprehension +##cey +##yin +hallmark +watering +formulas +sequels +##llas +aggravated +bae +commencing +##building +enfield +prohibits +marne +vedic +civilized +euclidean +jagger +beforehand +blasts +dumont +##arney +##nem +740 +conversions +hierarchical +rios +simulator +##dya +##lellan +hedges +oleg +thrusts +shadowed +darby +maximize +1744 +gregorian +##nded +##routed +sham +unspecified +##hog +emory +factual +##smo +##tp +fooled +##rger +ortega +wellness +marlon +##oton +##urance +casket +keating +ley +enclave +##ayan +char +influencing +jia +##chenko +412 +ammonia +erebidae +incompatible +violins +cornered +##arat +grooves +astronauts +columbian +rampant +fabrication +kyushu +mahmud +vanish +##dern +mesopotamia +##lete +ict +##rgen +caspian +kenji +pitted +##vered +999 +grimace +roanoke +tchaikovsky +twinned +##analysis +##awan +xinjiang +arias +clemson +kazakh +sizable +1662 +##khand +##vard +plunge +tatum +vittorio +##nden +cholera +##dana +##oper +bracing +indifference +projectile +superliga +##chee +realises +upgrading +299 +porte +retribution +##vies +nk +stil +##resses +ama +bureaucracy +blackberry +bosch +testosterone +collapses +greer +##pathic +ioc +fifties +malls +##erved +bao +baskets +adolescents +siegfried +##osity +##tosis +mantra +detecting +existent +fledgling +##cchi +dissatisfied +gan +telecommunication +mingled +sobbed +6000 +controversies +outdated +taxis +##raus +fright +slams +##lham +##fect +##tten +detectors +fetal +tanned +##uw +fray +goth +olympian +skipping +mandates +scratches +sheng +unspoken +hyundai +tracey +hotspur +restrictive +##buch +americana +mundo +##bari +burroughs +diva +vulcan +##6th +distinctions +thumping +##ngen +mikey +sheds +fide +rescues +springsteen +vested +valuation +##ece +##ely +pinnacle +rake +sylvie +##edo +almond +quivering +##irus +alteration +faltered +##wad +51st +hydra +ticked +##kato +recommends +##dicated +antigua +arjun +stagecoach +wilfred +trickle +pronouns +##pon +aryan +nighttime +##anian +gall +pea +stitch +##hei +leung +milos +##dini +eritrea +nexus +starved +snowfall +kant +parasitic +cot +discus +hana +strikers +appleton +kitchens +##erina +##partisan +##itha +##vius +disclose +metis +##channel +1701 +tesla +##vera +fitch +1735 +blooded +##tila +decimal +##tang +##bai +cyclones +eun +bottled +peas +pensacola +basha +bolivian +crabs +boil +lanterns +partridge +roofed +1645 +necks +##phila +opined +patting +##kla +##lland +chuckles +volta +whereupon +##nche +devout +euroleague +suicidal +##dee +inherently +involuntary +knitting +nasser +##hide +puppets +colourful +courageous +southend +stills +miraculous +hodgson +richer +rochdale +ethernet +greta +uniting +prism +umm +##haya +##itical +##utation +deterioration +pointe +prowess +##ropriation +lids +scranton +billings +subcontinent +##koff +##scope +brute +kellogg +psalms +degraded +##vez +stanisław +##ructured +ferreira +pun +astonishing +gunnar +##yat +arya +prc +gottfried +##tight +excursion +##ographer +dina +##quil +##nare +huffington +illustrious +wilbur +gundam +verandah +##zard +naacp +##odle +constructive +fjord +kade +##naud +generosity +thrilling +baseline +cayman +frankish +plastics +accommodations +zoological +##fting +cedric +qb +motorized +##dome +##otted +squealed +tackled +canucks +budgets +situ +asthma +dail +gabled +grasslands +whimpered +writhing +judgments +##65 +minnie +pv +##carbon +bananas +grille +domes +monique +odin +maguire +markham +tierney +##estra +##chua +libel +poke +speedy +atrium +laval +notwithstanding +##edly +fai +kala +##sur +robb +##sma +listings +luz +supplementary +tianjin +##acing +enzo +jd +ric +scanner +croats +transcribed +##49 +arden +cv +##hair +##raphy +##lver +##uy +357 +seventies +staggering +alam +horticultural +hs +regression +timbers +blasting +##ounded +montagu +manipulating +##cit +catalytic +1550 +troopers +##meo +condemnation +fitzpatrick +##oire +##roved +inexperienced +1670 +castes +##lative +outing +314 +dubois +flicking +quarrel +ste +learners +1625 +iq +whistled +##class +282 +classify +tariffs +temperament +355 +folly +liszt +##yles +immersed +jordanian +ceasefire +apparel +extras +maru +fished +##bio +harta +stockport +assortment +craftsman +paralysis +transmitters +##cola +blindness +##wk +fatally +proficiency +solemnly +##orno +repairing +amore +groceries +ultraviolet +##chase +schoolhouse +##tua +resurgence +nailed +##otype +##× +ruse +saliva +diagrams +##tructing +albans +rann +thirties +1b +antennas +hilarious +cougars +paddington +stats +##eger +breakaway +ipod +reza +authorship +prohibiting +scoffed +##etz +##ttle +conscription +defected +trondheim +##fires +ivanov +keenan +##adan +##ciful +##fb +##slow +locating +##ials +##tford +cadiz +basalt +blankly +interned +rags +rattling +##tick +carpathian +reassured +sync +bum +guildford +iss +staunch +##onga +astronomers +sera +sofie +emergencies +susquehanna +##heard +duc +mastery +vh1 +williamsburg +bayer +buckled +craving +##khan +##rdes +bloomington +##write +alton +barbecue +##bians +justine +##hri +##ndt +delightful +smartphone +newtown +photon +retrieval +peugeot +hissing +##monium +##orough +flavors +lighted +relaunched +tainted +##games +##lysis +anarchy +microscopic +hopping +adept +evade +evie +##beau +inhibit +sinn +adjustable +hurst +intuition +wilton +cisco +44th +lawful +lowlands +stockings +thierry +##dalen +##hila +##nai +fates +prank +tb +maison +lobbied +provocative +1724 +4a +utopia +##qual +carbonate +gujarati +purcell +##rford +curtiss +##mei +overgrown +arenas +mediation +swallows +##rnik +respectful +turnbull +##hedron +##hope +alyssa +ozone +##ʻi +ami +gestapo +johansson +snooker +canteen +cuff +declines +empathy +stigma +##ags +##iner +##raine +taxpayers +gui +volga +##wright +##copic +lifespan +overcame +tattooed +enactment +giggles +##ador +##camp +barrington +bribe +obligatory +orbiting +peng +##enas +elusive +sucker +##vating +cong +hardship +empowered +anticipating +estrada +cryptic +greasy +detainees +planck +sudbury +plaid +dod +marriott +kayla +##ears +##vb +##zd +mortally +##hein +cognition +radha +319 +liechtenstein +meade +richly +argyle +harpsichord +liberalism +trumpets +lauded +tyrant +salsa +tiled +lear +promoters +reused +slicing +trident +##chuk +##gami +##lka +cantor +checkpoint +##points +gaul +leger +mammalian +##tov +##aar +##schaft +doha +frenchman +nirvana +##vino +delgado +headlining +##eron +##iography +jug +tko +1649 +naga +intersections +##jia +benfica +nawab +##suka +ashford +gulp +##deck +##vill +##rug +brentford +frazier +pleasures +dunne +potsdam +shenzhen +dentistry +##tec +flanagan +##dorff +##hear +chorale +dinah +prem +quezon +##rogated +relinquished +sutra +terri +##pani +flaps +##rissa +poly +##rnet +homme +aback +##eki +linger +womb +##kson +##lewood +doorstep +orthodoxy +threaded +westfield +##rval +dioceses +fridays +subsided +##gata +loyalists +##biotic +##ettes +letterman +lunatic +prelate +tenderly +invariably +souza +thug +winslow +##otide +furlongs +gogh +jeopardy +##runa +pegasus +##umble +humiliated +standalone +tagged +##roller +freshmen +klan +##bright +attaining +initiating +transatlantic +logged +viz +##uance +1723 +combatants +intervening +stephane +chieftain +despised +grazed +317 +cdc +galveston +godzilla +macro +simulate +##planes +parades +##esses +960 +##ductive +##unes +equator +overdose +##cans +##hosh +##lifting +joshi +epstein +sonora +treacherous +aquatics +manchu +responsive +##sation +supervisory +##christ +##llins +##ibar +##balance +##uso +kimball +karlsruhe +mab +##emy +ignores +phonetic +reuters +spaghetti +820 +almighty +danzig +rumbling +tombstone +designations +lured +outset +##felt +supermarkets +##wt +grupo +kei +kraft +susanna +##blood +comprehension +genealogy +##aghan +##verted +redding +##ythe +1722 +bowing +##pore +##roi +lest +sharpened +fulbright +valkyrie +sikhs +##unds +swans +bouquet +merritt +##tage +##venting +commuted +redhead +clerks +leasing +cesare +dea +hazy +##vances +fledged +greenfield +servicemen +##gical +armando +blackout +dt +sagged +downloadable +intra +potion +pods +##4th +##mism +xp +attendants +gambia +stale +##ntine +plump +asteroids +rediscovered +buds +flea +hive +##neas +1737 +classifications +debuts +##eles +olympus +scala +##eurs +##gno +##mute +hummed +sigismund +visuals +wiggled +await +pilasters +clench +sulfate +##ances +bellevue +enigma +trainee +snort +##sw +clouded +denim +##rank +##rder +churning +hartman +lodges +riches +sima +##missible +accountable +socrates +regulates +mueller +##cr +1702 +avoids +solids +himalayas +nutrient +pup +##jevic +squat +fades +nec +##lates +##pina +##rona +##ου +privateer +tequila +##gative +##mpton +apt +hornet +immortals +##dou +asturias +cleansing +dario +##rries +##anta +etymology +servicing +zhejiang +##venor +##nx +horned +erasmus +rayon +relocating +£10 +##bags +escalated +promenade +stubble +2010s +artisans +axial +liquids +mora +sho +yoo +##tsky +bundles +oldies +##nally +notification +bastion +##ths +sparkle +##lved +1728 +leash +pathogen +highs +##hmi +immature +880 +gonzaga +ignatius +mansions +monterrey +sweets +bryson +##loe +polled +regatta +brightest +pei +rosy +squid +hatfield +payroll +addict +meath +cornerback +heaviest +lodging +##mage +capcom +rippled +##sily +barnet +mayhem +ymca +snuggled +rousseau +##cute +blanchard +284 +fragmented +leighton +chromosomes +risking +##md +##strel +##utter +corinne +coyotes +cynical +hiroshi +yeomanry +##ractive +ebook +grading +mandela +plume +agustin +magdalene +##rkin +bea +femme +trafford +##coll +##lun +##tance +52nd +fourier +upton +##mental +camilla +gust +iihf +islamabad +longevity +##kala +feldman +netting +##rization +endeavour +foraging +mfa +orr +##open +greyish +contradiction +graz +##ruff +handicapped +marlene +tweed +oaxaca +spp +campos +miocene +pri +configured +cooks +pluto +cozy +pornographic +##entes +70th +fairness +glided +jonny +lynne +rounding +sired +##emon +##nist +remade +uncover +##mack +complied +lei +newsweek +##jured +##parts +##enting +##pg +293 +finer +guerrillas +athenian +deng +disused +stepmother +accuse +gingerly +seduction +521 +confronting +##walker +##going +gora +nostalgia +sabres +virginity +wrenched +##minated +syndication +wielding +eyre +##56 +##gnon +##igny +behaved +taxpayer +sweeps +##growth +childless +gallant +##ywood +amplified +geraldine +scrape +##ffi +babylonian +fresco +##rdan +##kney +##position +1718 +restricting +tack +fukuoka +osborn +selector +partnering +##dlow +318 +gnu +kia +tak +whitley +gables +##54 +##mania +mri +softness +immersion +##bots +##evsky +1713 +chilling +insignificant +pcs +##uis +elites +lina +purported +supplemental +teaming +##americana +##dding +##inton +proficient +rouen +##nage +##rret +niccolo +selects +##bread +fluffy +1621 +gruff +knotted +mukherjee +polgara +thrash +nicholls +secluded +smoothing +thru +corsica +loaf +whitaker +inquiries +##rrier +##kam +indochina +289 +marlins +myles +peking +##tea +extracts +pastry +superhuman +connacht +vogel +##ditional +##het +##udged +##lash +gloss +quarries +refit +teaser +##alic +##gaon +20s +materialized +sling +camped +pickering +tung +tracker +pursuant +##cide +cranes +soc +##cini +##typical +##viere +anhalt +overboard +workout +chores +fares +orphaned +stains +##logie +fenton +surpassing +joyah +triggers +##itte +grandmaster +##lass +##lists +clapping +fraudulent +ledger +nagasaki +##cor +##nosis +##tsa +eucalyptus +tun +##icio +##rney +##tara +dax +heroism +ina +wrexham +onboard +unsigned +##dates +moshe +galley +winnie +droplets +exiles +praises +watered +noodles +##aia +fein +adi +leland +multicultural +stink +bingo +comets +erskine +modernized +canned +constraint +domestically +chemotherapy +featherweight +stifled +##mum +darkly +irresistible +refreshing +hasty +isolate +##oys +kitchener +planners +##wehr +cages +yarn +implant +toulon +elects +childbirth +yue +##lind +##lone +cn +rightful +sportsman +junctions +remodeled +specifies +##rgh +291 +##oons +complimented +##urgent +lister +ot +##logic +bequeathed +cheekbones +fontana +gabby +##dial +amadeus +corrugated +maverick +resented +triangles +##hered +##usly +nazareth +tyrol +1675 +assent +poorer +sectional +aegean +##cous +296 +nylon +ghanaian +##egorical +##weig +cushions +forbid +fusiliers +obstruction +somerville +##scia +dime +earrings +elliptical +leyte +oder +polymers +timmy +atm +midtown +piloted +settles +continual +externally +mayfield +##uh +enrichment +henson +keane +persians +1733 +benji +braden +pep +324 +##efe +contenders +pepsi +valet +##isches +298 +##asse +##earing +goofy +stroll +##amen +authoritarian +occurrences +adversary +ahmedabad +tangent +toppled +dorchester +1672 +modernism +marxism +islamist +charlemagne +exponential +racks +unicode +brunette +mbc +pic +skirmish +##bund +##lad +##powered +##yst +hoisted +messina +shatter +##ctum +jedi +vantage +##music +##neil +clemens +mahmoud +corrupted +authentication +lowry +nils +##washed +omnibus +wounding +jillian +##itors +##opped +serialized +narcotics +handheld +##arm +##plicity +intersecting +stimulating +##onis +crate +fellowships +hemingway +casinos +climatic +fordham +copeland +drip +beatty +leaflets +robber +brothel +madeira +##hedral +sphinx +ultrasound +##vana +valor +forbade +leonid +villas +##aldo +duane +marquez +##cytes +disadvantaged +forearms +kawasaki +reacts +consular +lax +uncles +uphold +##hopper +concepcion +dorsey +lass +##izan +arching +passageway +1708 +researches +tia +internationals +##graphs +##opers +distinguishes +javanese +divert +##uven +plotted +##listic +##rwin +##erik +##tify +affirmative +signifies +validation +##bson +kari +felicity +georgina +zulu +##eros +##rained +##rath +overcoming +##dot +argyll +##rbin +1734 +chiba +ratification +windy +earls +parapet +##marks +hunan +pristine +astrid +punta +##gart +brodie +##kota +##oder +malaga +minerva +rouse +##phonic +bellowed +pagoda +portals +reclamation +##gur +##odies +##⁄₄ +parentheses +quoting +allergic +palette +showcases +benefactor +heartland +nonlinear +##tness +bladed +cheerfully +scans +##ety +##hone +1666 +girlfriends +pedersen +hiram +sous +##liche +##nator +1683 +##nery +##orio +##umen +bobo +primaries +smiley +##cb +unearthed +uniformly +fis +metadata +1635 +ind +##oted +recoil +##titles +##tura +##ια +406 +hilbert +jamestown +mcmillan +tulane +seychelles +##frid +antics +coli +fated +stucco +##grants +1654 +bulky +accolades +arrays +caledonian +carnage +optimism +puebla +##tative +##cave +enforcing +rotherham +seo +dunlop +aeronautics +chimed +incline +zoning +archduke +hellenistic +##oses +##sions +candi +thong +##ople +magnate +rustic +##rsk +projective +slant +##offs +danes +hollis +vocalists +##ammed +congenital +contend +gesellschaft +##ocating +##pressive +douglass +quieter +##cm +##kshi +howled +salim +spontaneously +townsville +buena +southport +##bold +kato +1638 +faerie +stiffly +##vus +##rled +297 +flawless +realising +taboo +##7th +bytes +straightening +356 +jena +##hid +##rmin +cartwright +berber +bertram +soloists +411 +noses +417 +coping +fission +hardin +inca +##cen +1717 +mobilized +vhf +##raf +biscuits +curate +##85 +##anial +331 +gaunt +neighbourhoods +1540 +##abas +blanca +bypassed +sockets +behold +coincidentally +##bane +nara +shave +splinter +terrific +##arion +##erian +commonplace +juris +redwood +waistband +boxed +caitlin +fingerprints +jennie +naturalized +##ired +balfour +craters +jody +bungalow +hugely +quilt +glitter +pigeons +undertaker +bulging +constrained +goo +##sil +##akh +assimilation +reworked +##person +persuasion +##pants +felicia +##cliff +##ulent +1732 +explodes +##dun +##inium +##zic +lyman +vulture +hog +overlook +begs +northwards +ow +spoil +##urer +fatima +favorably +accumulate +sargent +sorority +corresponded +dispersal +kochi +toned +##imi +##lita +internacional +newfound +##agger +##lynn +##rigue +booths +peanuts +##eborg +medicare +muriel +nur +##uram +crates +millennia +pajamas +worsened +##breakers +jimi +vanuatu +yawned +##udeau +carousel +##hony +hurdle +##ccus +##mounted +##pod +rv +##eche +airship +ambiguity +compulsion +recapture +##claiming +arthritis +##osomal +1667 +asserting +ngc +sniffing +dade +discontent +glendale +ported +##amina +defamation +rammed +##scent +fling +livingstone +##fleet +875 +##ppy +apocalyptic +comrade +lcd +##lowe +cessna +eine +persecuted +subsistence +demi +hoop +reliefs +710 +coptic +progressing +stemmed +perpetrators +1665 +priestess +##nio +dobson +ebony +rooster +itf +tortricidae +##bbon +##jian +cleanup +##jean +##øy +1721 +eighties +taxonomic +holiness +##hearted +##spar +antilles +showcasing +stabilized +##nb +gia +mascara +michelangelo +dawned +##uria +##vinsky +extinguished +fitz +grotesque +£100 +##fera +##loid +##mous +barges +neue +throbbed +cipher +johnnie +##a1 +##mpt +outburst +##swick +spearheaded +administrations +c1 +heartbreak +pixels +pleasantly +##enay +lombardy +plush +##nsed +bobbie +##hly +reapers +tremor +xiang +minogue +substantive +hitch +barak +##wyl +kwan +##encia +910 +obscene +elegance +indus +surfer +bribery +conserve +##hyllum +##masters +horatio +##fat +apes +rebound +psychotic +##pour +iteration +##mium +##vani +botanic +horribly +antiques +dispose +paxton +##hli +##wg +timeless +1704 +disregard +engraver +hounds +##bau +##version +looted +uno +facilitates +groans +masjid +rutland +antibody +disqualification +decatur +footballers +quake +slacks +48th +rein +scribe +stabilize +commits +exemplary +tho +##hort +##chison +pantry +traversed +##hiti +disrepair +identifiable +vibrated +baccalaureate +##nnis +csa +interviewing +##iensis +##raße +greaves +wealthiest +343 +classed +jogged +£5 +##58 +##atal +illuminating +knicks +respecting +##uno +scrubbed +##iji +##dles +kruger +moods +growls +raider +silvia +chefs +kam +vr +cree +percival +##terol +gunter +counterattack +defiant +henan +ze +##rasia +##riety +equivalence +submissions +##fra +##thor +bautista +mechanically +##heater +cornice +herbal +templar +##mering +outputs +ruining +ligand +renumbered +extravagant +mika +blockbuster +eta +insurrection +##ilia +darkening +ferocious +pianos +strife +kinship +##aer +melee +##anor +##iste +##may +##oue +decidedly +weep +##jad +##missive +##ppel +354 +puget +unease +##gnant +1629 +hammering +kassel +ob +wessex +##lga +bromwich +egan +paranoia +utilization +##atable +##idad +contradictory +provoke +##ols +##ouring +##tangled +knesset +##very +##lette +plumbing +##sden +##¹ +greensboro +occult +sniff +338 +zev +beaming +gamer +haggard +mahal +##olt +##pins +mendes +utmost +briefing +gunnery +##gut +##pher +##zh +##rok +1679 +khalifa +sonya +##boot +principals +urbana +wiring +##liffe +##minating +##rrado +dahl +nyu +skepticism +np +townspeople +ithaca +lobster +somethin +##fur +##arina +##−1 +freighter +zimmerman +biceps +contractual +##herton +amend +hurrying +subconscious +##anal +336 +meng +clermont +spawning +##eia +##lub +dignitaries +impetus +snacks +spotting +twigs +##bilis +##cz +##ouk +libertadores +nic +skylar +##aina +##firm +gustave +asean +##anum +dieter +legislatures +flirt +bromley +trolls +umar +##bbies +##tyle +blah +parc +bridgeport +crank +negligence +##nction +46th +constantin +molded +bandages +seriousness +00pm +siegel +carpets +compartments +upbeat +statehood +##dner +##edging +marko +730 +platt +##hane +paving +##iy +1738 +abbess +impatience +limousine +nbl +##talk +441 +lucille +mojo +nightfall +robbers +##nais +karel +brisk +calves +replicate +ascribed +telescopes +##olf +intimidated +##reen +ballast +specialization +##sit +aerodynamic +caliphate +rainer +visionary +##arded +epsilon +##aday +##onte +aggregation +auditory +boosted +reunification +kathmandu +loco +robyn +402 +acknowledges +appointing +humanoid +newell +redeveloped +restraints +##tained +barbarians +chopper +1609 +italiana +##lez +##lho +investigates +wrestlemania +##anies +##bib +690 +##falls +creaked +dragoons +gravely +minions +stupidity +volley +##harat +##week +musik +##eries +##uously +fungal +massimo +semantics +malvern +##ahl +##pee +discourage +embryo +imperialism +1910s +profoundly +##ddled +jiangsu +sparkled +stat +##holz +sweatshirt +tobin +##iction +sneered +##cheon +##oit +brit +causal +smyth +##neuve +diffuse +perrin +silvio +##ipes +##recht +detonated +iqbal +selma +##nism +##zumi +roasted +##riders +tay +##ados +##mament +##mut +##rud +840 +completes +nipples +cfa +flavour +hirsch +##laus +calderon +sneakers +moravian +##ksha +1622 +rq +294 +##imeters +bodo +##isance +##pre +##ronia +anatomical +excerpt +##lke +dh +kunst +##tablished +##scoe +biomass +panted +unharmed +gael +housemates +montpellier +##59 +coa +rodents +tonic +hickory +singleton +##taro +451 +1719 +aldo +breaststroke +dempsey +och +rocco +##cuit +merton +dissemination +midsummer +serials +##idi +haji +polynomials +##rdon +gs +enoch +prematurely +shutter +taunton +£3 +##grating +##inates +archangel +harassed +##asco +326 +archway +dazzling +##ecin +1736 +sumo +wat +##kovich +1086 +honneur +##ently +##nostic +##ttal +##idon +1605 +403 +1716 +blogger +rents +##gnan +hires +##ikh +##dant +howie +##rons +handler +retracted +shocks +1632 +arun +duluth +kepler +trumpeter +##lary +peeking +seasoned +trooper +##mara +laszlo +##iciencies +##rti +heterosexual +##inatory +##ssion +indira +jogging +##inga +##lism +beit +dissatisfaction +malice +##ately +nedra +peeling +##rgeon +47th +stadiums +475 +vertigo +##ains +iced +restroom +##plify +##tub +illustrating +pear +##chner +##sibility +inorganic +rappers +receipts +watery +##kura +lucinda +##oulos +reintroduced +##8th +##tched +gracefully +saxons +nutritional +wastewater +rained +favourites +bedrock +fisted +hallways +likeness +upscale +##lateral +1580 +blinds +prequel +##pps +##tama +deter +humiliating +restraining +tn +vents +1659 +laundering +recess +rosary +tractors +coulter +federer +##ifiers +##plin +persistence +##quitable +geschichte +pendulum +quakers +##beam +bassett +pictorial +buffet +koln +##sitor +drills +reciprocal +shooters +##57 +##cton +##tees +converge +pip +dmitri +donnelly +yamamoto +aqua +azores +demographics +hypnotic +spitfire +suspend +wryly +roderick +##rran +sebastien +##asurable +mavericks +##fles +##200 +himalayan +prodigy +##iance +transvaal +demonstrators +handcuffs +dodged +mcnamara +sublime +1726 +crazed +##efined +##till +ivo +pondered +reconciled +shrill +sava +##duk +bal +cad +heresy +jaipur +goran +##nished +341 +lux +shelly +whitehall +##hre +israelis +peacekeeping +##wled +1703 +demetrius +ousted +##arians +##zos +beale +anwar +backstroke +raged +shrinking +cremated +##yck +benign +towing +wadi +darmstadt +landfill +parana +soothe +colleen +sidewalks +mayfair +tumble +hepatitis +ferrer +superstructure +##gingly +##urse +##wee +anthropological +translators +##mies +closeness +hooves +##pw +mondays +##roll +##vita +landscaping +##urized +purification +sock +thorns +thwarted +jalan +tiberius +##taka +saline +##rito +confidently +khyber +sculptors +##ij +brahms +hammersmith +inspectors +battista +fivb +fragmentation +hackney +##uls +arresting +exercising +antoinette +bedfordshire +##zily +dyed +##hema +1656 +racetrack +variability +##tique +1655 +austrians +deteriorating +madman +theorists +aix +lehman +weathered +1731 +decreed +eruptions +1729 +flaw +quinlan +sorbonne +flutes +nunez +1711 +adored +downwards +fable +rasped +1712 +moritz +mouthful +renegade +shivers +stunts +dysfunction +restrain +translit +327 +pancakes +##avio +##cision +##tray +351 +vial +##lden +bain +##maid +##oxide +chihuahua +malacca +vimes +##rba +##rnier +1664 +donnie +plaques +##ually +337 +bangs +floppy +huntsville +loretta +nikolay +##otte +eater +handgun +ubiquitous +##hett +eras +zodiac +1634 +##omorphic +1820s +##zog +cochran +##bula +##lithic +warring +##rada +dalai +excused +blazers +mcconnell +reeling +bot +este +##abi +geese +hoax +taxon +##bla +guitarists +##icon +condemning +hunts +inversion +moffat +taekwondo +##lvis +1624 +stammered +##rest +##rzy +sousa +fundraiser +marylebone +navigable +uptown +cabbage +daniela +salman +shitty +whimper +##kian +##utive +programmers +protections +rm +##rmi +##rued +forceful +##enes +fuss +##tao +##wash +brat +oppressive +reykjavik +spartak +ticking +##inkles +##kiewicz +adolph +horst +maui +protege +straighten +cpc +landau +concourse +clements +resultant +##ando +imaginative +joo +reactivated +##rem +##ffled +##uising +consultative +##guide +flop +kaitlyn +mergers +parenting +somber +##vron +supervise +vidhan +##imum +courtship +exemplified +harmonies +medallist +refining +##rrow +##ка +amara +##hum +780 +goalscorer +sited +overshadowed +rohan +displeasure +secretive +multiplied +osman +##orth +engravings +padre +##kali +##veda +miniatures +mis +##yala +clap +pali +rook +##cana +1692 +57th +antennae +astro +oskar +1628 +bulldog +crotch +hackett +yucatan +##sure +amplifiers +brno +ferrara +migrating +##gree +thanking +turing +##eza +mccann +ting +andersson +onslaught +gaines +ganga +incense +standardization +##mation +sentai +scuba +stuffing +turquoise +waivers +alloys +##vitt +regaining +vaults +##clops +##gizing +digger +furry +memorabilia +probing +##iad +payton +rec +deutschland +filippo +opaque +seamen +zenith +afrikaans +##filtration +disciplined +inspirational +##merie +banco +confuse +grafton +tod +##dgets +championed +simi +anomaly +biplane +##ceptive +electrode +##para +1697 +cleavage +crossbow +swirl +informant +##lars +##osta +afi +bonfire +spec +##oux +lakeside +slump +##culus +##lais +##qvist +##rrigan +1016 +facades +borg +inwardly +cervical +xl +pointedly +050 +stabilization +##odon +chests +1699 +hacked +ctv +orthogonal +suzy +##lastic +gaulle +jacobite +rearview +##cam +##erted +ashby +##drik +##igate +##mise +##zbek +affectionately +canine +disperse +latham +##istles +##ivar +spielberg +##orin +##idium +ezekiel +cid +##sg +durga +middletown +##cina +customized +frontiers +harden +##etano +##zzy +1604 +bolsheviks +##66 +coloration +yoko +##bedo +briefs +slabs +debra +liquidation +plumage +##oin +blossoms +dementia +subsidy +1611 +proctor +relational +jerseys +parochial +ter +##ici +esa +peshawar +cavalier +loren +cpi +idiots +shamrock +1646 +dutton +malabar +mustache +##endez +##ocytes +referencing +terminates +marche +yarmouth +##sop +acton +mated +seton +subtly +baptised +beige +extremes +jolted +kristina +telecast +##actic +safeguard +waldo +##baldi +##bular +endeavors +sloppy +subterranean +##ensburg +##itung +delicately +pigment +tq +##scu +1626 +##ound +collisions +coveted +herds +##personal +##meister +##nberger +chopra +##ricting +abnormalities +defective +galician +lucie +##dilly +alligator +likened +##genase +burundi +clears +complexion +derelict +deafening +diablo +fingered +champaign +dogg +enlist +isotope +labeling +mrna +##erre +brilliance +marvelous +##ayo +1652 +crawley +ether +footed +dwellers +deserts +hamish +rubs +warlock +skimmed +##lizer +870 +buick +embark +heraldic +irregularities +##ajan +kiara +##kulam +##ieg +antigen +kowalski +##lge +oakley +visitation +##mbit +vt +##suit +1570 +murderers +##miento +##rites +chimneys +##sling +condemn +custer +exchequer +havre +##ghi +fluctuations +##rations +dfb +hendricks +vaccines +##tarian +nietzsche +biking +juicy +##duced +brooding +scrolling +selangor +##ragan +352 +annum +boomed +seminole +sugarcane +##dna +departmental +dismissing +innsbruck +arteries +ashok +batavia +daze +kun +overtook +##rga +##tlan +beheaded +gaddafi +holm +electronically +faulty +galilee +fractures +kobayashi +##lized +gunmen +magma +aramaic +mala +eastenders +inference +messengers +bf +##qu +407 +bathrooms +##vere +1658 +flashbacks +ideally +misunderstood +##jali +##weather +mendez +##grounds +505 +uncanny +##iii +1709 +friendships +##nbc +sacrament +accommodated +reiterated +logistical +pebbles +thumped +##escence +administering +decrees +drafts +##flight +##cased +##tula +futuristic +picket +intimidation +winthrop +##fahan +interfered +339 +afar +francoise +morally +uta +cochin +croft +dwarfs +##bruck +##dents +##nami +biker +##hner +##meral +nano +##isen +##ometric +##pres +##ан +brightened +meek +parcels +securely +gunners +##jhl +##zko +agile +hysteria +##lten +##rcus +bukit +champs +chevy +cuckoo +leith +sadler +theologians +welded +##section +1663 +jj +plurality +xander +##rooms +##formed +shredded +temps +intimately +pau +tormented +##lok +##stellar +1618 +charred +ems +essen +##mmel +alarms +spraying +ascot +blooms +twinkle +##abia +##apes +internment +obsidian +##chaft +snoop +##dav +##ooping +malibu +##tension +quiver +##itia +hays +mcintosh +travers +walsall +##ffie +1623 +beverley +schwarz +plunging +structurally +m3 +rosenthal +vikram +##tsk +770 +ghz +##onda +##tiv +chalmers +groningen +pew +reckon +unicef +##rvis +55th +##gni +1651 +sulawesi +avila +cai +metaphysical +screwing +turbulence +##mberg +augusto +samba +56th +baffled +momentary +toxin +##urian +##wani +aachen +condoms +dali +steppe +##3d +##app +##oed +##year +adolescence +dauphin +electrically +inaccessible +microscopy +nikita +##ega +atv +##cel +##enter +##oles +##oteric +##ы +accountants +punishments +wrongly +bribes +adventurous +clinch +flinders +southland +##hem +##kata +gough +##ciency +lads +soared +##ה +undergoes +deformation +outlawed +rubbish +##arus +##mussen +##nidae +##rzburg +arcs +##ingdon +##tituted +1695 +wheelbase +wheeling +bombardier +campground +zebra +##lices +##oj +##bain +lullaby +##ecure +donetsk +wylie +grenada +##arding +##ης +squinting +eireann +opposes +##andra +maximal +runes +##broken +##cuting +##iface +##ror +##rosis +additive +britney +adultery +triggering +##drome +detrimental +aarhus +containment +jc +swapped +vichy +##ioms +madly +##oric +##rag +brant +##ckey +##trix +1560 +1612 +broughton +rustling +##stems +##uder +asbestos +mentoring +##nivorous +finley +leaps +##isan +apical +pry +slits +substitutes +##dict +intuitive +fantasia +insistent +unreasonable +##igen +##vna +domed +hannover +margot +ponder +##zziness +impromptu +jian +lc +rampage +stemming +##eft +andrey +gerais +whichever +amnesia +appropriated +anzac +clicks +modifying +ultimatum +cambrian +maids +verve +yellowstone +##mbs +conservatoire +##scribe +adherence +dinners +spectra +imperfect +mysteriously +sidekick +tatar +tuba +##aks +##ifolia +distrust +##athan +##zle +c2 +ronin +zac +##pse +celaena +instrumentalist +scents +skopje +##mbling +comical +compensated +vidal +condor +intersect +jingle +wavelengths +##urrent +mcqueen +##izzly +carp +weasel +422 +kanye +militias +postdoctoral +eugen +gunslinger +##ɛ +faux +hospice +##for +appalled +derivation +dwarves +##elis +dilapidated +##folk +astoria +philology +##lwyn +##otho +##saka +inducing +philanthropy +##bf +##itative +geek +markedly +sql +##yce +bessie +indices +rn +##flict +495 +frowns +resolving +weightlifting +tugs +cleric +contentious +1653 +mania +rms +##miya +##reate +##ruck +##tucket +bien +eels +marek +##ayton +##cence +discreet +unofficially +##ife +leaks +##bber +1705 +332 +dung +compressor +hillsborough +pandit +shillings +distal +##skin +381 +##tat +##you +nosed +##nir +mangrove +undeveloped +##idia +textures +##inho +##500 +##rise +ae +irritating +nay +amazingly +bancroft +apologetic +compassionate +kata +symphonies +##lovic +airspace +##lch +930 +gifford +precautions +fulfillment +sevilla +vulgar +martinique +##urities +looting +piccolo +tidy +##dermott +quadrant +armchair +incomes +mathematicians +stampede +nilsson +##inking +##scan +foo +quarterfinal +##ostal +shang +shouldered +squirrels +##owe +344 +vinegar +##bner +##rchy +##systems +delaying +##trics +ars +dwyer +rhapsody +sponsoring +##gration +bipolar +cinder +starters +##olio +##urst +421 +signage +##nty +aground +figurative +mons +acquaintances +duets +erroneously +soyuz +elliptic +recreated +##cultural +##quette +##ssed +##tma +##zcz +moderator +scares +##itaire +##stones +##udence +juniper +sighting +##just +##nsen +britten +calabria +ry +bop +cramer +forsyth +stillness +##л +airmen +gathers +unfit +##umber +##upt +taunting +##rip +seeker +streamlined +##bution +holster +schumann +tread +vox +##gano +##onzo +strive +dil +reforming +covent +newbury +predicting +##orro +decorate +tre +##puted +andover +ie +asahi +dept +dunkirk +gills +##tori +buren +huskies +##stis +##stov +abstracts +bets +loosen +##opa +1682 +yearning +##glio +##sir +berman +effortlessly +enamel +napoli +persist +##peration +##uez +attache +elisa +b1 +invitations +##kic +accelerating +reindeer +boardwalk +clutches +nelly +polka +starbucks +##kei +adamant +huey +lough +unbroken +adventurer +embroidery +inspecting +stanza +##ducted +naia +taluka +##pone +##roids +chases +deprivation +florian +##jing +##ppet +earthly +##lib +##ssee +colossal +foreigner +vet +freaks +patrice +rosewood +triassic +upstate +##pkins +dominates +ata +chants +ks +vo +##400 +##bley +##raya +##rmed +555 +agra +infiltrate +##ailing +##ilation +##tzer +##uppe +##werk +binoculars +enthusiast +fujian +squeak +##avs +abolitionist +almeida +boredom +hampstead +marsden +rations +##ands +inflated +334 +bonuses +rosalie +patna +##rco +329 +detachments +penitentiary +54th +flourishing +woolf +##dion +##etched +papyrus +##lster +##nsor +##toy +bobbed +dismounted +endelle +inhuman +motorola +tbs +wince +wreath +##ticus +hideout +inspections +sanjay +disgrace +infused +pudding +stalks +##urbed +arsenic +leases +##hyl +##rrard +collarbone +##waite +##wil +dowry +##bant +##edance +genealogical +nitrate +salamanca +scandals +thyroid +necessitated +##! +##" +### +##$ +##% +##& +##' +##( +##) +##* +##+ +##, +##- +##. +##/ +##: +##; +##< +##= +##> +##? +##@ +##[ +##\ +##] +##^ +##_ +##` +##{ +##| +##} +##~ +##¡ +##¢ +##£ +##¤ +##¥ +##¦ +##§ +##¨ +##© +##ª +##« +##¬ +##® +##± +##´ +##µ +##¶ +##· +##º +##» +##¼ +##¾ +##¿ +##æ +##ð +##÷ +##þ +##đ +##ħ +##ŋ +##œ +##ƒ +##ɐ +##ɑ +##ɒ +##ɔ +##ɕ +##ə +##ɡ +##ɣ +##ɨ +##ɪ +##ɫ +##ɬ +##ɯ +##ɲ +##ɴ +##ɹ +##ɾ +##ʀ +##ʁ +##ʂ +##ʃ +##ʉ +##ʊ +##ʋ +##ʌ +##ʎ +##ʐ +##ʑ +##ʒ +##ʔ +##ʰ +##ʲ +##ʳ +##ʷ +##ʸ +##ʻ +##ʼ +##ʾ +##ʿ +##ˈ +##ˡ +##ˢ +##ˣ +##ˤ +##β +##γ +##δ +##ε +##ζ +##θ +##κ +##λ +##μ +##ξ +##ο +##π +##ρ +##σ +##τ +##υ +##φ +##χ +##ψ +##ω +##б +##г +##д +##ж +##з +##м +##п +##с +##у +##ф +##х +##ц +##ч +##ш +##щ +##ъ +##э +##ю +##ђ +##є +##і +##ј +##љ +##њ +##ћ +##ӏ +##ա +##բ +##գ +##դ +##ե +##թ +##ի +##լ +##կ +##հ +##մ +##յ +##ն +##ո +##պ +##ս +##վ +##տ +##ր +##ւ +##ք +##־ +##א +##ב +##ג +##ד +##ו +##ז +##ח +##ט +##י +##ך +##כ +##ל +##ם +##מ +##ן +##נ +##ס +##ע +##ף +##פ +##ץ +##צ +##ק +##ר +##ש +##ת +##، +##ء +##ب +##ت +##ث +##ج +##ح +##خ +##ذ +##ز +##س +##ش +##ص +##ض +##ط +##ظ +##ع +##غ +##ـ +##ف +##ق +##ك +##و +##ى +##ٹ +##پ +##چ +##ک +##گ +##ں +##ھ +##ہ +##ے +##अ +##आ +##उ +##ए +##क +##ख +##ग +##च +##ज +##ट +##ड +##ण +##त +##थ +##द +##ध +##न +##प +##ब +##भ +##म +##य +##र +##ल +##व +##श +##ष +##स +##ह +##ा +##ि +##ी +##ो +##। +##॥ +##ং +##অ +##আ +##ই +##উ +##এ +##ও +##ক +##খ +##গ +##চ +##ছ +##জ +##ট +##ড +##ণ +##ত +##থ +##দ +##ধ +##ন +##প +##ব +##ভ +##ম +##য +##র +##ল +##শ +##ষ +##স +##হ +##া +##ি +##ী +##ে +##க +##ச +##ட +##த +##ந +##ன +##ப +##ம +##ய +##ர +##ல +##ள +##வ +##ா +##ி +##ு +##ே +##ை +##ನ +##ರ +##ಾ +##ක +##ය +##ර +##ල +##ව +##ා +##ก +##ง +##ต +##ท +##น +##พ +##ม +##ย +##ร +##ล +##ว +##ส +##อ +##า +##เ +##་ +##། +##ག +##ང +##ད +##ན +##པ +##བ +##མ +##འ +##ར +##ལ +##ས +##မ +##ა +##ბ +##გ +##დ +##ე +##ვ +##თ +##ი +##კ +##ლ +##მ +##ნ +##ო +##რ +##ს +##ტ +##უ +##ᄀ +##ᄂ +##ᄃ +##ᄅ +##ᄆ +##ᄇ +##ᄉ +##ᄊ +##ᄋ +##ᄌ +##ᄎ +##ᄏ +##ᄐ +##ᄑ +##ᄒ +##ᅡ +##ᅢ +##ᅥ +##ᅦ +##ᅧ +##ᅩ +##ᅪ +##ᅭ +##ᅮ +##ᅯ +##ᅲ +##ᅳ +##ᅴ +##ᅵ +##ᆨ +##ᆫ +##ᆯ +##ᆷ +##ᆸ +##ᆼ +##ᴬ +##ᴮ +##ᴰ +##ᴵ +##ᴺ +##ᵀ +##ᵃ +##ᵇ +##ᵈ +##ᵉ +##ᵍ +##ᵏ +##ᵐ +##ᵒ +##ᵖ +##ᵗ +##ᵘ +##ᵣ +##ᵤ +##ᵥ +##ᶜ +##ᶠ +##‐ +##‑ +##‒ +##– +##— +##― +##‖ +##‘ +##’ +##‚ +##“ +##” +##„ +##† +##‡ +##• +##… +##‰ +##′ +##″ +##› +##‿ +##⁄ +##⁰ +##ⁱ +##⁴ +##⁵ +##⁶ +##⁷ +##⁸ +##⁹ +##⁻ +##ⁿ +##₅ +##₆ +##₇ +##₈ +##₉ +##₊ +##₍ +##₎ +##ₐ +##ₑ +##ₒ +##ₓ +##ₕ +##ₖ +##ₗ +##ₘ +##ₚ +##ₛ +##ₜ +##₤ +##₩ +##€ +##₱ +##₹ +##ℓ +##№ +##ℝ +##™ +##⅓ +##⅔ +##← +##↑ +##→ +##↓ +##↔ +##↦ +##⇄ +##⇌ +##⇒ +##∂ +##∅ +##∆ +##∇ +##∈ +##∗ +##∘ +##√ +##∞ +##∧ +##∨ +##∩ +##∪ +##≈ +##≡ +##≤ +##≥ +##⊂ +##⊆ +##⊕ +##⊗ +##⋅ +##─ +##│ +##■ +##▪ +##● +##★ +##☆ +##☉ +##♠ +##♣ +##♥ +##♦ +##♯ +##⟨ +##⟩ +##ⱼ +##⺩ +##⺼ +##⽥ +##、 +##。 +##〈 +##〉 +##《 +##》 +##「 +##」 +##『 +##』 +##〜 +##あ +##い +##う +##え +##お +##か +##き +##く +##け +##こ +##さ +##し +##す +##せ +##そ +##た +##ち +##っ +##つ +##て +##と +##な +##に +##ぬ +##ね +##の +##は +##ひ +##ふ +##へ +##ほ +##ま +##み +##む +##め +##も +##や +##ゆ +##よ +##ら +##り +##る +##れ +##ろ +##を +##ん +##ァ +##ア +##ィ +##イ +##ウ +##ェ +##エ +##オ +##カ +##キ +##ク +##ケ +##コ +##サ +##シ +##ス +##セ +##タ +##チ +##ッ +##ツ +##テ +##ト +##ナ +##ニ +##ノ +##ハ +##ヒ +##フ +##ヘ +##ホ +##マ +##ミ +##ム +##メ +##モ +##ャ +##ュ +##ョ +##ラ +##リ +##ル +##レ +##ロ +##ワ +##ン +##・ +##ー +##一 +##三 +##上 +##下 +##不 +##世 +##中 +##主 +##久 +##之 +##也 +##事 +##二 +##五 +##井 +##京 +##人 +##亻 +##仁 +##介 +##代 +##仮 +##伊 +##会 +##佐 +##侍 +##保 +##信 +##健 +##元 +##光 +##八 +##公 +##内 +##出 +##分 +##前 +##劉 +##力 +##加 +##勝 +##北 +##区 +##十 +##千 +##南 +##博 +##原 +##口 +##古 +##史 +##司 +##合 +##吉 +##同 +##名 +##和 +##囗 +##四 +##国 +##國 +##土 +##地 +##坂 +##城 +##堂 +##場 +##士 +##夏 +##外 +##大 +##天 +##太 +##夫 +##奈 +##女 +##子 +##学 +##宀 +##宇 +##安 +##宗 +##定 +##宣 +##宮 +##家 +##宿 +##寺 +##將 +##小 +##尚 +##山 +##岡 +##島 +##崎 +##川 +##州 +##巿 +##帝 +##平 +##年 +##幸 +##广 +##弘 +##張 +##彳 +##後 +##御 +##德 +##心 +##忄 +##志 +##忠 +##愛 +##成 +##我 +##戦 +##戸 +##手 +##扌 +##政 +##文 +##新 +##方 +##日 +##明 +##星 +##春 +##昭 +##智 +##曲 +##書 +##月 +##有 +##朝 +##木 +##本 +##李 +##村 +##東 +##松 +##林 +##森 +##楊 +##樹 +##橋 +##歌 +##止 +##正 +##武 +##比 +##氏 +##民 +##水 +##氵 +##氷 +##永 +##江 +##沢 +##河 +##治 +##法 +##海 +##清 +##漢 +##瀬 +##火 +##版 +##犬 +##王 +##生 +##田 +##男 +##疒 +##発 +##白 +##的 +##皇 +##目 +##相 +##省 +##真 +##石 +##示 +##社 +##神 +##福 +##禾 +##秀 +##秋 +##空 +##立 +##章 +##竹 +##糹 +##美 +##義 +##耳 +##良 +##艹 +##花 +##英 +##華 +##葉 +##藤 +##行 +##街 +##西 +##見 +##訁 +##語 +##谷 +##貝 +##貴 +##車 +##軍 +##辶 +##道 +##郎 +##郡 +##部 +##都 +##里 +##野 +##金 +##鈴 +##镇 +##長 +##門 +##間 +##阝 +##阿 +##陳 +##陽 +##雄 +##青 +##面 +##風 +##食 +##香 +##馬 +##高 +##龍 +##龸 +##fi +##fl +##! +##( +##) +##, +##- +##. +##/ +##: +##? +##~ diff --git a/Checkpoint/keras_metadata.pb b/Checkpoint/keras_metadata.pb new file mode 100644 index 0000000000000000000000000000000000000000..ee042498f6dc895f288a8ed729bfcb433460b3dc --- /dev/null +++ b/Checkpoint/keras_metadata.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38bb0f1231a198848366566e176c8948dceab7b085b658d00550a83f784731f5 +size 11535 diff --git a/Checkpoint/saved_model.pb b/Checkpoint/saved_model.pb new file mode 100644 index 0000000000000000000000000000000000000000..bb7388cbced23391a7f33b312d2d4f3f2eab8f78 --- /dev/null +++ b/Checkpoint/saved_model.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0054848283b4fb79fefcebe71830bedb75e023ad04c5655adbc6a2ddd1e2c60 +size 11477628 diff --git a/Checkpoint/variables/variables.data-00000-of-00001 b/Checkpoint/variables/variables.data-00000-of-00001 new file mode 100644 index 0000000000000000000000000000000000000000..c5c08b8135a9d7d878bac175a274e27ae754330f --- /dev/null +++ b/Checkpoint/variables/variables.data-00000-of-00001 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4ec6776ca3161577663eaa115fb9f965304670a1af8db7a37e9499a23082e67 +size 1389095096 diff --git a/Checkpoint/variables/variables.index b/Checkpoint/variables/variables.index new file mode 100644 index 0000000000000000000000000000000000000000..6ddf498b4fb3d44a92db6055388d864901e40012 Binary files /dev/null and b/Checkpoint/variables/variables.index differ diff --git a/ETC/fun_advaitbert.py b/ETC/fun_advaitbert.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1922585838ab226750db8c62dd7e240633cac2 --- /dev/null +++ b/ETC/fun_advaitbert.py @@ -0,0 +1,339 @@ +import pandas as pd +import numpy as np +import tensorflow as tf +import tensorflow_hub as hub +import sys +import random +sys.path.append('models') +from official.nlp.data import classifier_data_lib +from official.nlp.bert import tokenization +from official.nlp import optimization +tf.get_logger().setLevel('ERROR') +from huggingface_hub import InferenceClient +import math +import gradio as gr + +num_warmup_steps=1 +num_train_steps=1 +init_lr = 3e-5 +optimizer = optimization.create_optimizer(init_lr=init_lr,num_train_steps=num_train_steps,num_warmup_steps=num_warmup_steps,optimizer_type='adamw') + +### Load Model +checkpoint_filepath=r'./Checkpoint' +model = tf.keras.models.load_model(checkpoint_filepath, custom_objects={'KerasLayer':hub.KerasLayer , 'AdamWeightDecay': optimizer}) + +df_report = pd.read_csv('./CTH_Description.csv') +df_report['CTH Code'] = df_report['CTH Code'].astype(str).str.zfill(8) + +df_report_DUTY = pd.read_csv('./CTH_WISE_DUTY_RATE.csv') +df_report_DUTY['CTH'] = df_report_DUTY['CTH'].astype(str).str.zfill(8) + +df = pd.read_csv("./CTH_CODE_MAP.csv") +df['CTH'] = df['CTH'].astype(str).str.zfill(8) +df = df[['CTH', 'code']] + +client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") + + + +class_names=df[['CTH','code']].drop_duplicates(subset='CTH').sort_values(by='code',ignore_index=True)['CTH'].values.tolist() +label_list=list(range(0,len(class_names))) +max_seq_length = 200 # maximum length of (token) input sequences . it can be any number +train_batch_size = 32 # batch size ( 16 choosen to avoid Out-Of-Memory errors) + +# Get BERT layer and tokenizer: +# More details here: https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4 +bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4" , trainable = True) +vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() +do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() +tokenizer = tokenization.FullTokenizer(vocab_file , do_lower_case) + +# This provides a function to convert each row to input features and label ( as required by BERT) + +max_seq_length = 200 # maximum length of (token) input sequences . it can be any number +def to_feature(text, label, label_list=label_list, max_seq_length=max_seq_length, tokenizer=tokenizer): + example = classifier_data_lib.InputExample(guid = None, + text_a = text.numpy(), + text_b = None, + label = label.numpy()) + feature = classifier_data_lib.convert_single_example(0 , example , label_list , max_seq_length , tokenizer) + + return (feature.input_ids , feature.input_mask , feature.segment_ids , feature.label_id) + + +def to_feature_map(text, label): + input_ids , input_mask , segment_ids , label_id = tf.py_function(to_feature , inp = [text , label], + Tout = [tf.int32 , tf.int32 , tf.int32 , tf.int32]) + + input_ids.set_shape([max_seq_length]) + input_mask.set_shape([max_seq_length]) + segment_ids.set_shape([max_seq_length]) + label_id.set_shape([]) + + x = { + "input_word_ids": input_ids, + "input_mask": input_mask, + "input_type_ids": segment_ids + } + + return(x,label_id) + +def print3largest(arr, arr_size): + third = first = second = -sys.maxsize + for i in range(0, arr_size): + + if (arr[i] > first): + third = second + second = first + first = arr[i] + elif (arr[i] > second): + third = second + second = arr[i] + elif (arr[i] > third): + third = arr[i] + pred_value_max_three=[first, second, third] + return pred_value_max_three + +def count_special_character(string): + special_char= 0 + for i in range(len(string)): + ch = string[i] + if (string[i].isalpha()): + continue + else: + special_char += 1 + + if len(string)==special_char: + return False + else: + return True + +def format_prompt(message, history): + prompt = "" + for user_prompt, bot_response in history: + prompt += f"[INST] {user_prompt} [/INST]" + prompt += f" {bot_response} " + prompt += f"[INST] {message} [/INST]" + return prompt + + +additional_inputs=[ + gr.Textbox( + label="System Prompt", + max_lines=1, + interactive=True, + ), + gr.Slider( + label="Temperature", + value=0.9, + minimum=0.0, + maximum=1.0, + step=0.05, + interactive=True, + info="Higher values produce more diverse outputs", + ), + gr.Slider( + label="Max new tokens", + value=1024, + minimum=0, + maximum=4096, + step=64, + interactive=True, + info="The maximum numbers of new tokens", + ), + gr.Slider( + label="Top-p (nucleus sampling)", + value=0.90, + minimum=0.0, + maximum=1, + step=0.05, + interactive=True, + info="Higher values sample more low-probability tokens", + ), + gr.Slider( + label="Repetition penalty", + value=1.2, + minimum=1.0, + maximum=2.0, + step=0.05, + interactive=True, + info="Penalize repeated tokens", + ) +] + +def predict_CTH(txt): + print('Desc: ',txt) + if (txt!='') and len(txt)>=3 and (count_special_character(txt)): + valid_data = tf.data.Dataset.from_tensor_slices(([txt] , [1])) # 1 refers to 'entertainment' and 2 refers to 'sport' + valid_data = (valid_data.map(to_feature_map).batch(1)) + preds = model.predict(valid_data) + predicted_values = tf.nn.softmax(preds) + arr = predicted_values.numpy().tolist()[0] + n = len(arr) + pred_value_max_three=print3largest(arr, n) + + sum_all = pred_value_max_three[0] + pred_value_max_three[1] + pred_value_max_three[2] + + val_1 = pred_value_max_three[0]/sum_all + val_2 = pred_value_max_three[1]/sum_all + val_3 = pred_value_max_three[2]/sum_all + + if pred_value_max_three[0]<=0.000131: + Var_CTH=[] + Var_desc=[] + Var_duty=[] + pred_duty='' + pred_desc='' + pred_CTH='' + + return{'Not a adequate description':float(1.0)} + else: + Var_CTH=[] + Var_desc=[] + Var_duty=[] + pred_duty='' + pred_desc='' + pred_CTH='' + + for i in pred_value_max_three: + predicted_code=np.where(predicted_values.numpy()==i)[1][0] + pred_CTH=df[df['code'] == predicted_code]['CTH'].iloc[0] + + try: + pred_duty=df_report_DUTY[df_report_DUTY['CTH']==str(pred_CTH)]['DUTY_RATE'].iloc[0] + pred_desc=df_report[df_report['CTH Code']==str(pred_CTH)]['Concat Description'].iloc[0] + except: + pass + + Var_CTH.append(pred_CTH) + Var_desc.append(pred_desc) + Var_duty.append(pred_duty) + + P1 ='CTH: '+str(Var_CTH[0])+' Duty Rate(%): '+ str(Var_duty[0]) + P2 ='CTH: '+str(Var_CTH[1])+' Duty Rate(%): '+ str(Var_duty[1]) + P3 ='CTH: '+str(Var_CTH[2])+' Duty Rate(%): '+ str(Var_duty[2]) + + Q1='Desc: '+str(Var_desc[0]) + Q2='Desc: '+str(Var_desc[1]) + Q3='Desc: '+str(Var_desc[2]) + + return {str(P1):float(val_1),str(Q1):float(val_1), + str(P2):float(val_2),str(Q2):float(val_2), + str(P3):float(val_3),str(Q3):float(val_3),} + else: + return{'Enter Correct Description':float(1.0)} + +def llm_model_function(txt,history,chatbot=[], temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,): + system_prompt=[] + if (txt!='') and len(txt)>=3 and (count_special_character(txt)): + valid_data = tf.data.Dataset.from_tensor_slices(([txt] , [1])) # 1 refers to 'entertainment' and 2 refers to 'sport' + valid_data = (valid_data.map(to_feature_map).batch(1)) + preds = model.predict(valid_data) + predicted_values = tf.nn.softmax(preds) + arr = predicted_values.numpy().tolist()[0] + n = len(arr) + pred_value_max_three=print3largest(arr, n) + + sum_all = pred_value_max_three[0] + pred_value_max_three[1] + pred_value_max_three[2] + + val_1 = pred_value_max_three[0]/sum_all + val_2 = pred_value_max_three[1]/sum_all + val_3 = pred_value_max_three[2]/sum_all + + if pred_value_max_three[0]<=0.000131: + Var_CTH=[] + Var_desc=[] + Var_duty=[] + pred_duty='' + pred_desc='' + pred_CTH='' + + return{'Not a adequate description':float(1.0)} + else: + Var_CTH=[] + Var_desc=[] + Var_duty=[] + pred_duty='' + pred_desc='' + pred_CTH='' + + for i in pred_value_max_three: + predicted_code=np.where(predicted_values.numpy()==i)[1][0] + pred_CTH=df[df['code'] == predicted_code]['CTH'].iloc[0] + + try: + pred_duty=df_report_DUTY[df_report_DUTY['CTH']==str(pred_CTH)]['DUTY_RATE'].iloc[0] + pred_desc=df_report[df_report['CTH Code']==str(pred_CTH)]['Concat Description'].iloc[0] + except: + pass + + Var_CTH.append(pred_CTH) + Var_desc.append(pred_desc) + Var_duty.append(pred_duty) + + P1 ='CTH: '+str(Var_CTH[0])+' Duty Rate(%): '+ str(Var_duty[0]) + P2 ='CTH: '+str(Var_CTH[1])+' Duty Rate(%): '+ str(Var_duty[1]) + P3 ='CTH: '+str(Var_CTH[2])+' Duty Rate(%): '+ str(Var_duty[2]) + + Q1='Desc: '+str(Var_desc[0]) + Q2='Desc: '+str(Var_desc[1]) + Q3='Desc: '+str(Var_desc[2]) + + output_str_msg='1. '+str(P1)+' '+str(Q1)+' '+'2. '+str(P2)+' '+str(Q2)+' '+'3. '+str(P3)+' '+str(Q3) + + prompt=f'First Explain What is the product- {txt}. Which is the most appropriate 8 Digit classification code out of the three given below classes. Explain the reason step by step. if none of the three classification is applicable more precisely due to lack of any additional information, tell you need additional information and what is the that additional information. {output_str_msg} ?' + + temperature = float(temperature) + if temperature < 1e-2: + temperature = 1e-2 + top_p = float(top_p) + + generate_kwargs = dict( + temperature=temperature, + max_new_tokens=max_new_tokens, + top_p=top_p, + repetition_penalty=repetition_penalty, + do_sample=True, + seed=42, + ) + + formatted_prompt = format_prompt(f", {prompt}", history) + stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) + output = "" + for response in stream: + output += response.token.text + + chatbot.append((txt, output)) + return "", chatbot + else: + warning_msg = f"Unexpected response" + raise gr.Error(warning_msg) + +def product_explaination(txt,history,chatbot=[], temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,): + print('Input Descrption is:',txt) + prompt=f'What is the product- {txt}?' + print('prompt',prompt) + temperature = float(temperature) + if temperature < 1e-2: + temperature = 1e-2 + top_p = float(top_p) + + generate_kwargs = dict( + temperature=temperature, + max_new_tokens=max_new_tokens, + top_p=top_p, + repetition_penalty=repetition_penalty, + do_sample=True, + seed=42, + ) + + formatted_prompt = format_prompt(f", {prompt}", history) + + stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) + output = "" + + for response in stream: + output += response.token.text + + chatbot.append((txt, output)) + return "", chatbot \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..0854222270a0144674a51b89746e0ad7a8ea99fb --- /dev/null +++ b/app.py @@ -0,0 +1,91 @@ +import gradio as gr + +from fun_advaitbert import predict_CTH +from fun_advaitbert import llm_model_function +from fun_advaitbert import product_explaination + +title="

AdvaitBERT:HS Code AI Explanability Through Mixtral 46.7B

" +description = """ +AdvaitBERT is modified version of BERT (Bidirectional Encoder Representation for Transformers), \ +finetuned on the Text corpus of Indian Customs Declarations. It is trained for performing \ +downstream tasks like automating the tariff classification and validation process of Customs \ +declarations in realtime. This model may help Customs administration to efficiently use AI assisted \ +NLP in realtime Customs process like Assessment, Post Clearance Audit, thereby highlighting classification \ +inconsistencies and help in revenue augmentation. +""" + +article="

Powered by NCTC

" + + +css = """ +.gradio-container { + width: 100vw !important; + min-height: 100vh !important; + padding:0 !important; + margin:0 !important; + max-width: none !important; +} +""" + +footnote = """Note: All rights, including licensing and acceptable use policies, related to the AI models, can be found on their respective model pages on Hugging Face. Powered by NCTC +""" + +#Powered by NCTC + +# input_txt=gr.Textbox(label='Enter Your Product Descrption',lines=3,) +# textbox = gr.Textbox(container=False,placeholder='Enter text and click the Submit button or press Enter') + +textbox = gr.Textbox(label='Enter Your Product Descrption',lines=3,) +textbox_2=textbox + +print('textbox',textbox) +print('textbox_2',textbox_2) + +chat_prod = gr.Chatbot(label="Product Explanation", layout='panel') #height=300 +#chat_Advait = gr.Chatbot(label="Advaitbert Prediction", layout='panel') +chat_alpha = gr.Chatbot(label="AI Explanability", layout='panel') +chat_Advait=gr.Interface(predict_CTH,inputs=textbox,outputs="label",) + + +submit = gr.Button('Submit', variant='primary',) +submit_second = gr.Button('Submit', variant='secondary',) +#submit2 = gr.Button('Submit', variant='primary',) +retry = gr.Button('🔄Retry', variant='secondary') +undo = gr.Button('↩️Undo', variant='secondary') + +with gr.Blocks(css=css) as demo: + gr.HTML(f'

{title}

') + gr.Markdown(description) + + with gr.Row(): + with gr.Column(scale=0,min_width=600): + chat_Advait.render() + + with gr.Column(scale=1,min_width=600): + chat_alpha.render() + with gr.Row(equal_height=True): + with gr.Column(scale=1): + submit.render() + with gr.Column(scale=1): + undo.render() + with gr.Column(scale=1): + clear = gr.ClearButton(value='🗑️Clear',components=[chat_alpha,chat_prod,textbox]) + chat_prod.render() + #submit_second.render() + + gr.Markdown(footnote) + textbox.submit(llm_model_function, [textbox, chat_alpha], [textbox, chat_alpha]) + textbox_2.submit(product_explaination, [textbox_2, chat_prod], [textbox_2, chat_prod]) + + submit.click(llm_model_function,[textbox, chat_alpha], [textbox, chat_alpha]) + submit.click(product_explaination,[textbox_2, chat_prod], [textbox_2, chat_prod]) + + undo.click(lambda x:x[:-1], [chat_alpha], [chat_alpha]) + undo.click(lambda x:x[:-1], [chat_prod], [chat_prod]) + + gr.Examples([ + ['200 SI/SI/SI LPO ALUMINIUM LIDS (QTY: 8820000 PCS/PRICE: 21.'], + ], + textbox) + +demo.launch(debug=True) \ No newline at end of file diff --git a/fun_advaitbert.py b/fun_advaitbert.py new file mode 100644 index 0000000000000000000000000000000000000000..7e66e76f09ef3f55755f92fbdc55e450278d9f13 --- /dev/null +++ b/fun_advaitbert.py @@ -0,0 +1,344 @@ +import pandas as pd +import numpy as np +import tensorflow as tf +import tensorflow_hub as hub +import sys +import random +sys.path.append('models') +from official.nlp.data import classifier_data_lib +from official.nlp.bert import tokenization +from official.nlp import optimization +tf.get_logger().setLevel('ERROR') +from huggingface_hub import InferenceClient +import math +import gradio as gr + +num_warmup_steps=1 +num_train_steps=1 +init_lr = 3e-5 +optimizer = optimization.create_optimizer(init_lr=init_lr,num_train_steps=num_train_steps,num_warmup_steps=num_warmup_steps,optimizer_type='adamw') + +### Load Model +checkpoint_filepath=r'./Checkpoint' +model = tf.keras.models.load_model(checkpoint_filepath, custom_objects={'KerasLayer':hub.KerasLayer , 'AdamWeightDecay': optimizer}) + +df_report = pd.read_csv('./CTH_Description.csv') +df_report['CTH Code'] = df_report['CTH Code'].astype(str).str.zfill(8) + +df_report_DUTY = pd.read_csv('./CTH_WISE_DUTY_RATE.csv') +df_report_DUTY['CTH'] = df_report_DUTY['CTH'].astype(str).str.zfill(8) + +df = pd.read_csv("./CTH_CODE_MAP.csv") +df['CTH'] = df['CTH'].astype(str).str.zfill(8) +df = df[['CTH', 'code']] + +client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") + + +class_names=df[['CTH','code']].drop_duplicates(subset='CTH').sort_values(by='code',ignore_index=True)['CTH'].values.tolist() +label_list=list(range(0,len(class_names))) +max_seq_length = 200 # maximum length of (token) input sequences . it can be any number +train_batch_size = 32 # batch size ( 16 choosen to avoid Out-Of-Memory errors) + +# Get BERT layer and tokenizer: +# More details here: https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4 +bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4" , trainable = True) +vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() +do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() +tokenizer = tokenization.FullTokenizer(vocab_file , do_lower_case) + +# This provides a function to convert each row to input features and label ( as required by BERT) + +max_seq_length = 200 # maximum length of (token) input sequences . it can be any number +def to_feature(text, label, label_list=label_list, max_seq_length=max_seq_length, tokenizer=tokenizer): + example = classifier_data_lib.InputExample(guid = None, + text_a = text.numpy(), + text_b = None, + label = label.numpy()) + feature = classifier_data_lib.convert_single_example(0 , example , label_list , max_seq_length , tokenizer) + + return (feature.input_ids , feature.input_mask , feature.segment_ids , feature.label_id) + + +def to_feature_map(text, label): + input_ids , input_mask , segment_ids , label_id = tf.py_function(to_feature , inp = [text , label], + Tout = [tf.int32 , tf.int32 , tf.int32 , tf.int32]) + + input_ids.set_shape([max_seq_length]) + input_mask.set_shape([max_seq_length]) + segment_ids.set_shape([max_seq_length]) + label_id.set_shape([]) + + x = { + "input_word_ids": input_ids, + "input_mask": input_mask, + "input_type_ids": segment_ids + } + + return(x,label_id) + + +def find_max_10_with_position(arr, arr_size): + max_values_with_position = [(-sys.maxsize, -1)] * 10 + + for i in range(arr_size): + for j in range(5): + value, position = max_values_with_position[j] + if arr[i] > value: + max_values_with_position[j+1:] = max_values_with_position[j:9] + max_values_with_position[j] = (arr[i], i) + break + + return max_values_with_position + +def count_special_character(string): + special_char= 0 + for i in range(len(string)): + ch = string[i] + if (string[i].isalpha()): + continue + else: + special_char += 1 + + if len(string)==special_char: + return False + else: + return True + +def format_prompt(message, history): + prompt = "" + for user_prompt, bot_response in history: + prompt += f"[INST] {user_prompt} [/INST]" + prompt += f" {bot_response} " + prompt += f"[INST] {message} [/INST]" + return prompt + + +additional_inputs=[ + gr.Textbox( + label="System Prompt", + max_lines=1, + interactive=True, + ), + gr.Slider( + label="Temperature", + value=0.5, + minimum=0.0, + maximum=1.0, + step=0.05, + interactive=True, + info="Higher values produce more diverse outputs", + ), + gr.Slider( + label="Max new tokens", + value=1024, + minimum=0, + maximum=4096, + step=64, + interactive=True, + info="The maximum numbers of new tokens", + ), + gr.Slider( + label="Top-p (nucleus sampling)", + value=0.90, + minimum=0.0, + maximum=1, + step=0.05, + interactive=True, + info="Higher values sample more low-probability tokens", + ), + gr.Slider( + label="Repetition penalty", + value=1.2, + minimum=1.0, + maximum=2.0, + step=0.05, + interactive=True, + info="Penalize repeated tokens", + ) +] + +def predict_CTH(txt): + print('Desc: ',txt) + global output_str_msg + if (txt!='') and len(txt)>=3 and (count_special_character(txt)): + valid_data = tf.data.Dataset.from_tensor_slices(([txt] , [1])) # 1 refers to 'entertainment' and 2 refers to 'sport' + valid_data = (valid_data.map(to_feature_map).batch(1)) + preds = model.predict(valid_data) + predicted_values = tf.nn.softmax(preds) + arr = predicted_values.numpy().tolist()[0] + n = len(arr) + + pred_value_max=find_max_10_with_position(arr, n) + + sum_all = 0 + for i in range(10): + sum_all += pred_value_max[i][0] + + + val_1 = pred_value_max[0][0]/sum_all + val_2 = pred_value_max[1][0]/sum_all + val_3 = pred_value_max[2][0]/sum_all + val_4 = pred_value_max[3][0]/sum_all + val_5 = pred_value_max[4][0]/sum_all + val_6 = pred_value_max[5][0]/sum_all + val_7 = pred_value_max[6][0]/sum_all + val_8 = pred_value_max[7][0]/sum_all + val_9 = pred_value_max[8][0]/sum_all + val_10 = pred_value_max[9][0]/sum_all + + if pred_value_max[0][0]<=0.000131: + Var_CTH=[] + Var_desc=[] + Var_duty=[] + pred_duty='' + pred_desc='' + pred_CTH='' + + output_str_msg='Not a adequate description' + + return{'Not a adequate description':float(1.0)} + else: + Var_CTH=[] + Var_desc=[] + Var_duty=[] + pred_duty='' + pred_desc='' + pred_CTH='' + + for i in range(len(pred_value_max)): + #predicted_code=np.where(predicted_values.numpy()==i)[1][0] + predicted_code=pred_value_max[i][1] + pred_CTH=df[df['code'] == predicted_code]['CTH'].iloc[0] + + try: + pred_duty=df_report_DUTY[df_report_DUTY['CTH']==str(pred_CTH)]['DUTY_RATE'].iloc[0] + pred_desc=df_report[df_report['CTH Code']==str(pred_CTH)]['Concat Description'].iloc[0] + except: + pred_desc='' + pred_duty='' + pass + + Var_CTH.append(pred_CTH) + Var_desc.append(pred_desc) + Var_duty.append(pred_duty) + + P1 ='CTH: '+str(Var_CTH[0])+' Duty Rate(%): '+ str(Var_duty[0]) + P2 ='CTH: '+str(Var_CTH[1])+' Duty Rate(%): '+ str(Var_duty[1]) + P3 ='CTH: '+str(Var_CTH[2])+' Duty Rate(%): '+ str(Var_duty[2]) + P4 ='CTH: '+str(Var_CTH[3])+' Duty Rate(%): '+ str(Var_duty[3]) + P5 ='CTH: '+str(Var_CTH[4])+' Duty Rate(%): '+ str(Var_duty[4]) + P6 ='CTH: '+str(Var_CTH[5])+' Duty Rate(%): '+ str(Var_duty[5]) + P7 ='CTH: '+str(Var_CTH[6])+' Duty Rate(%): '+ str(Var_duty[6]) + P8 ='CTH: '+str(Var_CTH[7])+' Duty Rate(%): '+ str(Var_duty[7]) + P9 ='CTH: '+str(Var_CTH[8])+' Duty Rate(%): '+ str(Var_duty[8]) + P10 ='CTH: '+str(Var_CTH[9])+' Duty Rate(%): '+ str(Var_duty[9]) + + Q1='Desc: '+str(Var_desc[0]) + Q2='Desc: '+str(Var_desc[1]) + Q3='Desc: '+str(Var_desc[2]) + Q4='Desc: '+str(Var_desc[3]) + Q5='Desc: '+str(Var_desc[4]) + Q6='Desc: '+str(Var_desc[5]) + Q7='Desc: '+str(Var_desc[6]) + Q8='Desc: '+str(Var_desc[7]) + Q9='Desc: '+str(Var_desc[8]) + Q10='Desc: '+str(Var_desc[9]) + + output_str_msg = ( + f'1. {P1} {Q1} ' + f'2. {P2} {Q2} ' + f'3. {P3} {Q3} ' + f'4. {P4} {Q4} ' + f'5. {P5} {Q5} ' + f'6. {P6} {Q6} ' + f'7. {P7} {Q7} ' + f'8. {P8} {Q8} ' + f'9. {P9} {Q9} ' + f'10. {P10} {Q10}') + + print('output_str_msg',output_str_msg) + + return {str(P1):float(val_1),str(Q1):float(val_1), + str(P2):float(val_2),str(Q2):float(val_2), + str(P3):float(val_3),str(Q3):float(val_3), + str(P4):float(val_4),str(Q4):float(val_4), + str(P5):float(val_5),str(Q5):float(val_5), + str(P6):float(val_6),str(Q6):float(val_6), + str(P7):float(val_7),str(Q7):float(val_7), + str(P8):float(val_8),str(Q8):float(val_8), + str(P9):float(val_9),str(Q9):float(val_9), + str(P10):float(val_10),str(Q10):float(val_10),} + else: + output_str_msg='Not a adequate description' + return{'Enter Correct Description':float(1.0)} + +def llm_model_function(txt,history,chatbot=[], temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,): + system_prompt=[] + chatbot=[] + + global output_str_msg + + print('output_str_msg',output_str_msg) + + if output_str_msg!='Not a adequate description': + + prompt=f'First Explain What is the product- {txt}. Which is the most appropriate 8 Digit classification code out of the three given below classes. Explain the reason step by step. if none of the three classification is applicable more precisely due to lack of any additional information, tell you need additional information and what is the that additional information. {output_str_msg} ?' + + temperature = float(temperature) + if temperature < 1e-2: + temperature = 1e-2 + top_p = float(top_p) + + generate_kwargs = dict( + temperature=temperature, + max_new_tokens=max_new_tokens, + top_p=top_p, + repetition_penalty=repetition_penalty, + do_sample=True, + seed=42, + ) + + formatted_prompt = format_prompt(f", {prompt}", history) + stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) + output = "" + for response in stream: + output += response.token.text + + chatbot.append((txt, output)) + return "", chatbot + else: + # warning_msg = f"Unexpected response" + # raise gr.Error(warning_msg) + chatbot.append(('Not a adequate description', 'Not a adequate description')) + return "", chatbot + +def product_explaination(txt,history,chatbot=[], temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,): + print('Input Descrption is:',txt) + chatbot=[] + prompt=f'What is the product- {txt}?' + #print('prompt',prompt) + temperature = float(temperature) + if temperature < 1e-2: + temperature = 1e-2 + top_p = float(top_p) + + generate_kwargs = dict( + temperature=temperature, + max_new_tokens=max_new_tokens, + top_p=top_p, + repetition_penalty=repetition_penalty, + do_sample=True, + seed=42, + ) + + formatted_prompt = format_prompt(f", {prompt}", history) + + stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) + output = "" + + for response in stream: + output += response.token.text + + chatbot.append((txt, output)) + return "", chatbot \ No newline at end of file diff --git a/models/.github/ISSUE_TEMPLATE/00-official-bug-report-issue.md b/models/.github/ISSUE_TEMPLATE/00-official-bug-report-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..51e08c26db66114de0b604bf0cc5c461311a0b4f --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/00-official-bug-report-issue.md @@ -0,0 +1,59 @@ +--- +name: "[Official Model] Bug Report" +about: Use this template for reporting a bug for the “official” directory +labels: type:bug,models:official + +--- + +# Prerequisites + +Please answer the following questions for yourself before submitting an issue. + +- [ ] I am using the latest TensorFlow Model Garden release and TensorFlow 2. +- [ ] I am reporting the issue to the correct repository. (Model Garden official or research directory) +- [ ] I checked to make sure that this issue has not been filed already. + +## 1. The entire URL of the file you are using + +https://github.com/tensorflow/models/tree/master/official/... + +## 2. Describe the bug + +A clear and concise description of what the bug is. + +## 3. Steps to reproduce + +Steps to reproduce the behavior. + +## 4. Expected behavior + +A clear and concise description of what you expected to happen. + +## 5. Additional context + +Include any logs that would be helpful to diagnose the problem. + +## 6. System information + +- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): +- Mobile device name if the issue happens on a mobile device: +- TensorFlow installed from (source or binary): +- TensorFlow version (use command below): +- Python version: +- Bazel version (if compiling from source): +- GCC/Compiler version (if compiling from source): +- CUDA/cuDNN version: +- GPU model and memory: + + diff --git a/models/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md b/models/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..00d79a16916c327d2d8a729791db7d7d3d96b735 --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md @@ -0,0 +1,20 @@ +--- +name: "[Official Model] Documentation Issue" +about: Use this template for reporting a documentation issue for the “official” directory +labels: type:docs,models:official + +--- + +# Prerequisites + +Please answer the following question for yourself before submitting an issue. + +- [ ] I checked to make sure that this issue has not been filed already. + +## 1. The entire URL of the documentation with the issue + +https://github.com/tensorflow/models/tree/master/official/... + +## 2. Describe the issue + +A clear and concise description of what needs to be changed. diff --git a/models/.github/ISSUE_TEMPLATE/20-official-feature-request-issue.md b/models/.github/ISSUE_TEMPLATE/20-official-feature-request-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..02d8cab52218202707646345a4ab2570519660dd --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/20-official-feature-request-issue.md @@ -0,0 +1,26 @@ +--- +name: "[Official Model] Feature request" +about: Use this template for raising a feature request for the “official” directory +labels: type:feature,models:official + +--- + +# Prerequisites + +Please answer the following question for yourself before submitting an issue. + +- [ ] I checked to make sure that this feature has not been requested already. + +## 1. The entire URL of the file you are using + +https://github.com/tensorflow/models/tree/master/official/... + +## 2. Describe the feature you request + +A clear and concise description of what you want to happen. + +## 3. Additional context + +Add any other context about the feature request here. + +## 4. Are you willing to contribute it? (Yes or No) diff --git a/models/.github/ISSUE_TEMPLATE/30-research-bug-report-issue.md b/models/.github/ISSUE_TEMPLATE/30-research-bug-report-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..4448ed9e40d6a089b84881635c2ee0f53524ae61 --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/30-research-bug-report-issue.md @@ -0,0 +1,58 @@ +--- +name: "[Research Model] Bug Report" +about: Use this template for reporting a bug for the “research” directory +labels: type:bug,models:research + +--- +# Prerequisites + +Please answer the following questions for yourself before submitting an issue. + +- [ ] I am using the latest TensorFlow Model Garden release and TensorFlow 2. +- [ ] I am reporting the issue to the correct repository. (Model Garden official or research directory) +- [ ] I checked to make sure that this issue has not already been filed. + +## 1. The entire URL of the file you are using + +https://github.com/tensorflow/models/tree/master/research/... + +## 2. Describe the bug + +A clear and concise description of what the bug is. + +## 3. Steps to reproduce + +Steps to reproduce the behavior. + +## 4. Expected behavior + +A clear and concise description of what you expected to happen. + +## 5. Additional context + +Include any logs that would be helpful to diagnose the problem. + +## 6. System information + +- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): +- Mobile device name if the issue happens on a mobile device: +- TensorFlow installed from (source or binary): +- TensorFlow version (use command below): +- Python version: +- Bazel version (if compiling from source): +- GCC/Compiler version (if compiling from source): +- CUDA/cuDNN version: +- GPU model and memory: + + diff --git a/models/.github/ISSUE_TEMPLATE/40-research-documentation-issue.md b/models/.github/ISSUE_TEMPLATE/40-research-documentation-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..26adfd83e1fbe27d045ecd8dfccef91bbd27fcf1 --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/40-research-documentation-issue.md @@ -0,0 +1,20 @@ +--- +name: "[Research Model] Documentation Issue" +about: Use this template for reporting a documentation issue for the “research” directory +labels: type:docs,models:research + +--- + +# Prerequisites + +Please answer the following question for yourself before submitting an issue. + +- [ ] I checked to make sure that this issue has not been filed already. + +## 1. The entire URL of the documentation with the issue + +https://github.com/tensorflow/models/tree/master/research/... + +## 2. Describe the issue + +A clear and concise description of what needs to be changed. diff --git a/models/.github/ISSUE_TEMPLATE/50-research-feature-request-issue.md b/models/.github/ISSUE_TEMPLATE/50-research-feature-request-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..412942a31be9cc4c2935dcd38ecb059a8a4ec18c --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/50-research-feature-request-issue.md @@ -0,0 +1,26 @@ +--- +name: "[Research Model] Feature Request" +about: Use this template for raising a feature request for the “research” directory +labels: type:feature,models:research + +--- + +# Prerequisites + +Please answer the following question for yourself before submitting an issue. + +- [ ] I checked to make sure that this feature has not been requested already. + +## 1. The entire URL of the file you are using + +https://github.com/tensorflow/models/tree/master/research/... + +## 2. Describe the feature you request + +A clear and concise description of what you want to happen. + +## 3. Additional context + +Add any other context about the feature request here. + +## 4. Are you willing to contribute it? (Yes or No) diff --git a/models/.github/ISSUE_TEMPLATE/60-questions-help-issue.md b/models/.github/ISSUE_TEMPLATE/60-questions-help-issue.md new file mode 100644 index 0000000000000000000000000000000000000000..bc85e0bb019fd2d5960b822c18358f906d5264b7 --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/60-questions-help-issue.md @@ -0,0 +1,14 @@ +--- +name: Questions and Help +about: Use this template for Questions and Help. +labels: type:support + +--- + diff --git a/models/.github/ISSUE_TEMPLATE/config.yml b/models/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..3ba13e0cec6cbbfd462e9ebf529dd2093148cd69 --- /dev/null +++ b/models/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/models/.github/PULL_REQUEST_TEMPLATE.md b/models/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..379b31c57c118a174d4e787e03099288957f9fe2 --- /dev/null +++ b/models/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,41 @@ +# Description + +> :memo: Please include a summary of the change. +> +> * Please also include relevant motivation and context. +> * List any dependencies that are required for this change. + +## Type of change + +For a new feature or function, please create an issue first to discuss it +with us before submitting a pull request. + +Note: Please delete options that are not relevant. + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] Documentation update +- [ ] TensorFlow 2 migration +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] A new research paper code implementation +- [ ] Other (Specify) + +## Tests + +> :memo: Please describe the tests that you ran to verify your changes. +> +> * Provide instructions so we can reproduce. +> * Please also list any relevant details for your test configuration. + +**Test Configuration**: + +## Checklist + +- [ ] I have signed the [Contributor License Agreement](https://github.com/tensorflow/models/wiki/Contributor-License-Agreements). +- [ ] I have read [guidelines for pull request](https://github.com/tensorflow/models/wiki/Submitting-a-pull-request). +- [ ] My code follows the [coding guidelines](https://github.com/tensorflow/models/wiki/Coding-guidelines). +- [ ] I have performed a self [code review](https://github.com/tensorflow/models/wiki/Code-review) of my own code. +- [ ] I have commented my code, particularly in hard-to-understand areas. +- [ ] I have made corresponding changes to the documentation. +- [ ] My changes generate no new warnings. +- [ ] I have added tests that prove my fix is effective or that my feature works. diff --git a/models/.github/README_TEMPLATE.md b/models/.github/README_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..45179d0aeba52caa8d84c102790b7e3fafc2c7fe --- /dev/null +++ b/models/.github/README_TEMPLATE.md @@ -0,0 +1,122 @@ +> :memo: A README.md template for releasing a paper code implementation to a GitHub repository. +> +> * Template version: 1.0.2020.170 +> * Please modify sections depending on needs. + +# Model name, Paper title, or Project Name + +> :memo: Add a badge for the ArXiv identifier of your paper (arXiv:YYMM.NNNNN) + +[![Paper](http://img.shields.io/badge/Paper-arXiv.YYMM.NNNNN-B3181B?logo=arXiv)](https://arxiv.org/abs/...) + +This repository is the official or unofficial implementation of the following paper. + +* Paper title: [Paper Title](https://arxiv.org/abs/YYMM.NNNNN) + +## Description + +> :memo: Provide description of the model. +> +> * Provide brief information of the algorithms used. +> * Provide links for demos, blog posts, etc. + +## History + +> :memo: Provide a changelog. + +## Authors or Maintainers + +> :memo: Provide maintainer information. + +* Full name ([@GitHub username](https://github.com/username)) +* Full name ([@GitHub username](https://github.com/username)) + +## Table of Contents + +> :memo: Provide a table of contents to help readers navigate a lengthy README document. + +## Requirements + +[![TensorFlow 2.1](https://img.shields.io/badge/TensorFlow-2.1-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.1.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + +> :memo: Provide details of the software required. +> +> * Add a `requirements.txt` file to the root directory for installing the necessary dependencies. +> * Describe how to install requirements using pip. +> * Alternatively, create INSTALL.md. + +To install requirements: + +```setup +pip install -r requirements.txt +``` + +## Results + +> :memo: Provide a table with results. (e.g., accuracy, latency) +> +> * Provide links to the pre-trained models (checkpoint, SavedModel files). +> * Publish TensorFlow SavedModel files on TensorFlow Hub (tfhub.dev) if possible. +> * Add links to [TensorBoard.dev](https://tensorboard.dev/) for visualizing metrics. +> +> An example table for image classification results +> +> ### Image Classification +> +> | Model name | Download | Top 1 Accuracy | Top 5 Accuracy | +> |------------|----------|----------------|----------------| +> | Model name | [Checkpoint](https://drive.google.com/...), [SavedModel](https://tfhub.dev/...) | xx% | xx% | + +## Dataset + +> :memo: Provide information of the dataset used. + +## Training + +> :memo: Provide training information. +> +> * Provide details for preprocessing, hyperparameters, random seeds, and environment. +> * Provide a command line example for training. + +Please run this command line for training. + +```shell +python3 ... +``` + +## Evaluation + +> :memo: Provide an evaluation script with details of how to reproduce results. +> +> * Describe data preprocessing / postprocessing steps. +> * Provide a command line example for evaluation. + +Please run this command line for evaluation. + +```shell +python3 ... +``` + +## References + +> :memo: Provide links to references. + +## License + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +> :memo: Place your license text in a file named LICENSE in the root of the repository. +> +> * Include information about your license. +> * Reference: [Adding a license to a repository](https://help.github.com/en/github/building-a-strong-community/adding-a-license-to-a-repository) + +This project is licensed under the terms of the **Apache License 2.0**. + +## Citation + +> :memo: Make your repository citable. +> +> * Reference: [Making Your Code Citable](https://guides.github.com/activities/citable-code/) + +If you want to cite this repository in your research paper, please use the following information. diff --git a/models/.gitignore b/models/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..cbc8846d64152b8a933f4bd2727877a94f98f92a --- /dev/null +++ b/models/.gitignore @@ -0,0 +1,98 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# mypy +.mypy_cache + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + +# PyCharm +.idea/ + +# For mac +.DS_Store diff --git a/models/AUTHORS b/models/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..0fa85c98ffeb38c6d6d0ef2bddb790b75b90f3dc --- /dev/null +++ b/models/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Google Inc. +David Dao diff --git a/models/CODEOWNERS b/models/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..36b7ebd4e779dc110c53d49ed73cf43f519ca211 --- /dev/null +++ b/models/CODEOWNERS @@ -0,0 +1,61 @@ +* @tensorflow/tf-garden-team @tensorflow/tf-model-garden-team +/official/ @rachellj218 @saberkun @jaeyounkim +/official/nlp/ @saberkun @chenGitHuber @lehougoogle @rachellj218 +/official/vision/ @pengchongjin @xianzhidu @yeqingli @arashwan @saberkun @rachellj218 +/research/adv_imagenet_models/ @alexeykurakin +/research/adversarial_crypto/ @dave-andersen +/research/adversarial_logit_pairing/ @alexeykurakin +/research/adversarial_text/ @rsepassi @a-dai +/research/attention_ocr/ @xavigibert +/research/audioset/ @plakal @dpwe +/research/autoaugment/* @barretzoph +/research/autoencoders/ @snurkabill +/research/brain_coder/ @danabo +/research/cognitive_mapping_and_planning/ @s-gupta +/research/compression/ @nmjohn +/research/cvt_text/ @clarkkev @lmthang +/research/deep_contextual_bandits/ @rikel +/research/deep_speech/ @yhliang2018 +/research/deeplab/ @aquariusjay @yknzhu @gpapan +/research/delf/ @andrefaraujo +/research/domain_adaptation/ @bousmalis @dmrd +/research/efficient-hrl/ @ofirnachum +/research/feelvos/ @pvoigtlaender @yuningchai @aquariusjay +/research/fivo/ @dieterichlawson +/research/global_objectives/ @mackeya-google +/research/im2txt/ @cshallue +/research/inception/ @shlens @vincentvanhoucke +/research/keypointnet/ @mnorouzi +/research/learned_optimizer/ @olganw @nirum +/research/learning_to_remember_rare_events/ @lukaszkaiser @ofirnachum +/research/learning_unsupervised_learning/ @lukemetz @nirum +/research/lexnet_nc/ @vered1986 @waterson +/research/lfads/ @jazcollins @sussillo +/research/lm_1b/ @oriolvinyals @panyx0718 +/research/lm_commonsense/ @thtrieu +/research/lstm_object_detection/ @yinxiaoli @yongzhe2160 +/research/marco/ @vincentvanhoucke +/research/maskgan/ @liamb315 @a-dai +/research/namignizer/ @knathanieltucker +/research/neural_gpu/ @lukaszkaiser +/research/neural_programmer/ @arvind2505 +/research/next_frame_prediction/ @panyx0718 +/research/object_detection/ @jch1 @tombstone @pkulzc +/research/pcl_rl/ @ofirnachum +/research/ptn/ @xcyan @arkanath @hellojas @honglaklee +/research/qa_kg/ @yuyuz +/research/real_nvp/ @laurent-dinh +/research/rebar/ @gjtucker +/research/sentiment_analysis/ @sculd +/research/seq2species/ @apbusia @depristo +/research/skip_thoughts/ @cshallue +/research/slim/ @sguada @marksandler2 +/research/steve/ @buckman-google +/research/street/ @theraysmith +/research/struct2depth/ @aneliaangelova +/research/swivel/ @waterson +/research/tcn/ @coreylynch @sermanet +/research/textsum/ @panyx0718 @peterjliu +/research/transformer/ @daviddao +/research/vid2depth/ @rezama +/research/video_prediction/ @cbfinn diff --git a/models/CONTRIBUTING.md b/models/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..f909461ae7b9c75264e0915ecb37228314933e4a --- /dev/null +++ b/models/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +![Contributors](https://img.shields.io/github/contributors/tensorflow/models) + +We encourage you to contribute to the TensorFlow Model Garden. + +Please read our [guidelines](../../wiki/How-to-contribute) for details. + +**NOTE**: Only [code owners](./CODEOWNERS) are allowed to merge a pull request. +Please contact the code owners of each model to merge your pull request. diff --git a/models/ISSUES.md b/models/ISSUES.md new file mode 100644 index 0000000000000000000000000000000000000000..b23d6daa1654188d640beb67e6614bd0743f919f --- /dev/null +++ b/models/ISSUES.md @@ -0,0 +1,24 @@ +# If you open a GitHub issue, here is our policy. + +* It must be a **bug**, a **feature request**, or a significant problem +with **documentation**. + * Please send a pull request instead for small documentation fixes. +* The required form must be filled out. +* The issue should be related to the repository it is created in. + +General help and support should be sought on [Stack Overflow](https://stackoverflow.com/questions/tagged/tensorflow-model-garden) or other non-GitHub channels. + +[![](https://img.shields.io/stackexchange/stackoverflow/t/tensorflow-model-garden)](https://stackoverflow.com/questions/tagged/tensorflow-model-garden) + +TensorFlow developers respond to issues. +We want to focus on work that benefits the whole community such as fixing bugs +and adding new features. +It helps us to address bugs and feature requests in a timely manner. + +--- + +Please understand that research models in the [research directory](https://github.com/tensorflow/models/tree/master/research) +included in this repository are experimental and research-style code. +They are not officially supported by the TensorFlow team. + + diff --git a/models/LICENSE b/models/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..43fcf7bf1f1f9f824a1debf05d6ced45bf5810aa --- /dev/null +++ b/models/LICENSE @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/models/README.md b/models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5b52e4a5cf41f949c2cf85744ea297ec3c324004 --- /dev/null +++ b/models/README.md @@ -0,0 +1,39 @@ +![Logo](https://storage.googleapis.com/model_garden_artifacts/TF_Model_Garden.png) + +# Welcome to the Model Garden for TensorFlow + +The TensorFlow Model Garden is a repository with a number of different implementations of state-of-the-art (SOTA) models and modeling solutions for TensorFlow users. We aim to demonstrate the best practices for modeling so that TensorFlow users +can take full advantage of TensorFlow for their research and product development. + +| Directory | Description | +|-----------|-------------| +| [official](official) | • A collection of example implementations for SOTA models using the latest TensorFlow 2's high-level APIs
• Officially maintained, supported, and kept up to date with the latest TensorFlow 2 APIs by TensorFlow
• Reasonably optimized for fast performance while still being easy to read | +| [research](research) | • A collection of research model implementations in TensorFlow 1 or 2 by researchers
• Maintained and supported by researchers | +| [community](community) | • A curated list of the GitHub repositories with machine learning models and implementations powered by TensorFlow 2 | + +## [Announcements](https://github.com/tensorflow/models/wiki/Announcements) + +| Date | News | +|------|------| +| June 17, 2020 | [Context R-CNN: Long Term Temporal Context for Per-Camera Object Detection](https://github.com/tensorflow/models/tree/master/research/object_detection#june-17th-2020) released +| May 21, 2020 | [Unifying Deep Local and Global Features for Image Search (DELG)](https://github.com/tensorflow/models/tree/master/research/delf#delg) code released +| May 19, 2020 | [MobileDets: Searching for Object Detection Architectures for Mobile Accelerators](https://github.com/tensorflow/models/tree/master/research/object_detection#may-19th-2020) released +| May 7, 2020 | [MnasFPN with MobileNet-V2 backbone](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md#mobile-models) released for object detection +| May 1, 2020 | [DELF: DEep Local Features](https://github.com/tensorflow/models/tree/master/research/delf) updated to support TensorFlow 2.1 +| March 31, 2020 | [Introducing the Model Garden for TensorFlow 2](https://blog.tensorflow.org/2020/03/introducing-model-garden-for-tensorflow-2.html) ([Tweet](https://twitter.com/TensorFlow/status/1245029834633297921)) | + +## [Milestones](https://github.com/tensorflow/models/milestones) + +| Date | Milestone | +|------|-----------| +| July 7, 2020 | [![GitHub milestone](https://img.shields.io/github/milestones/progress/tensorflow/models/1)](https://github.com/tensorflow/models/milestone/1) | + +## Contributions + +[![help wanted:paper implementation](https://img.shields.io/github/issues/tensorflow/models/help%20wanted%3Apaper%20implementation)](https://github.com/tensorflow/models/labels/help%20wanted%3Apaper%20implementation) + +If you want to contribute, please review the [contribution guidelines](https://github.com/tensorflow/models/wiki/How-to-contribute). + +## License + +[Apache License 2.0](LICENSE) diff --git a/models/official/LICENSE b/models/official/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d3da228420e973edaf4123d5eeb42210f4450b0c --- /dev/null +++ b/models/official/LICENSE @@ -0,0 +1,203 @@ +Copyright 2015 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/models/official/README-TPU.md b/models/official/README-TPU.md new file mode 100644 index 0000000000000000000000000000000000000000..8a54f95314abc2bae40d11acdf5439939acf7583 --- /dev/null +++ b/models/official/README-TPU.md @@ -0,0 +1,25 @@ +# Offically Supported TensorFlow 2.1+ Models on Cloud TPU + +## Natural Language Processing + +* [bert](nlp/bert): A powerful pre-trained language representation model: + BERT, which stands for Bidirectional Encoder Representations from + Transformers. + [BERT FineTuning with Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/bert-2.x) provides step by step instructions on Cloud TPU training. You can look [Bert MNLI Tensorboard.dev metrics](https://tensorboard.dev/experiment/LijZ1IrERxKALQfr76gndA) for MNLI fine tuning task. +* [transformer](nlp/transformer): A transformer model to translate the WMT + English to German dataset. + [Training transformer on Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/transformer-2.x) for step by step instructions on Cloud TPU training. + +## Computer Vision + +* [efficientnet](vision/image_classification): A family of convolutional + neural networks that scale by balancing network depth, width, and + resolution and can be used to classify ImageNet's dataset of 1000 classes. + See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/KnaWjrq5TXGfv0NW5m7rpg/#scalars). +* [mnist](vision/image_classification): A basic model to classify digits + from the MNIST dataset. See [Running MNIST on Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/mnist-2.x) tutorial and [Tensorboard.dev metrics](https://tensorboard.dev/experiment/mIah5lppTASvrHqWrdr6NA). +* [mask-rcnn](vision/detection): An object detection and instance segmentation model. See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/LH7k0fMsRwqUAcE09o9kPA). +* [resnet](vision/image_classification): A deep residual network that can + be used to classify ImageNet's dataset of 1000 classes. + See [Training ResNet on Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/resnet-2.x) tutorial and [Tensorboard.dev metrics](https://tensorboard.dev/experiment/CxlDK8YMRrSpYEGtBRpOhg). +* [retinanet](vision/detection): A fast and powerful object detector. See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/b8NRnWU3TqG6Rw0UxueU6Q). diff --git a/models/official/README.md b/models/official/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2b3f2dd768d0b7cf8238136d003aa5cb89070cc3 --- /dev/null +++ b/models/official/README.md @@ -0,0 +1,142 @@ +![Logo](https://storage.googleapis.com/model_garden_artifacts/TF_Model_Garden.png) + +# TensorFlow Official Models + +The TensorFlow official models are a collection of models +that use TensorFlow’s high-level APIs. +They are intended to be well-maintained, tested, and kept up to date +with the latest TensorFlow API. + +They should also be reasonably optimized for fast performance while still +being easy to read. +These models are used as end-to-end tests, ensuring that the models run +with the same or improved speed and performance with each new TensorFlow build. + +## More models to come! + +The team is actively developing new models. +In the near future, we will add: + +* State-of-the-art language understanding models: + More members in Transformer family +* Start-of-the-art image classification models: + EfficientNet, MnasNet, and variants +* A set of excellent objection detection models. + +## Table of Contents + +- [Models and Implementations](#models-and-implementations) + * [Computer Vision](#computer-vision) + + [Image Classification](#image-classification) + + [Object Detection and Segmentation](#object-detection-and-segmentation) + * [Natural Language Processing](#natural-language-processing) + * [Recommendation](#recommendation) +- [How to get started with the official models](#how-to-get-started-with-the-official-models) + +## Models and Implementations + +### Computer Vision + +#### Image Classification + +| Model | Reference (Paper) | +|-------|-------------------| +| [MNIST](vision/image_classification) | A basic model to classify digits from the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) | +| [ResNet](vision/image_classification) | [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) | +| [EfficientNet](vision/image_classification) | [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) | + +#### Object Detection and Segmentation + +| Model | Reference (Paper) | +|-------|-------------------| +| [RetinaNet](vision/detection) | [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) | +| [Mask R-CNN](vision/detection) | [Mask R-CNN](https://arxiv.org/abs/1703.06870) | +| [ShapeMask](vision/detection) | [ShapeMask: Learning to Segment Novel Objects by Refining Shape Priors](https://arxiv.org/abs/1904.03239) | + +### Natural Language Processing + +| Model | Reference (Paper) | +|-------|-------------------| +| [ALBERT (A Lite BERT)](nlp/albert) | [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942) | +| [BERT (Bidirectional Encoder Representations from Transformers)](nlp/bert) | [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) | +| [NHNet (News Headline generation model)](nlp/nhnet) | [Generating Representative Headlines for News Stories](https://arxiv.org/abs/2001.09386) | +| [Transformer](nlp/transformer) | [Attention Is All You Need](https://arxiv.org/abs/1706.03762) | +| [XLNet](nlp/xlnet) | [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) | + +### Recommendation + +| Model | Reference (Paper) | +|-------|-------------------| +| [NCF](recommendation) | [Neural Collaborative Filtering](https://arxiv.org/abs/1708.05031) | + +## How to get started with the official models + +* The models in the master branch are developed using TensorFlow 2, +and they target the TensorFlow [nightly binaries](https://github.com/tensorflow/tensorflow#installation) +built from the +[master branch of TensorFlow](https://github.com/tensorflow/tensorflow/tree/master). +* The stable versions targeting releases of TensorFlow are available +as tagged branches or [downloadable releases](https://github.com/tensorflow/models/releases). +* Model repository version numbers match the target TensorFlow release, +such that +[release v2.2.0](https://github.com/tensorflow/models/releases/tag/v2.2.0) +are compatible with +[TensorFlow v2.2.0](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0). + +Please follow the below steps before running models in this repository. + +### Requirements + +* The latest TensorFlow Model Garden release and TensorFlow 2 + * If you are on a version of TensorFlow earlier than 2.2, please +upgrade your TensorFlow to [the latest TensorFlow 2](https://www.tensorflow.org/install/). + +```shell +pip3 install tf-nightly +``` + +### Installation + +#### Method 1: Install the TensorFlow Model Garden pip package + +**tf-models-nightly** is the nightly Model Garden package +created daily automatically. pip will install all models +and dependencies automatically. + +```shell +pip install tf-models-nightly +``` + +Please check out our [example](colab/fine_tuning_bert.ipynb) +to learn how to use a PIP package. + +#### Method 2: Clone the source + +1. Clone the GitHub repository: + +```shell +git clone https://github.com/tensorflow/models.git +``` + +2. Add the top-level ***/models*** folder to the Python path. + +```shell +export PYTHONPATH=$PYTHONPATH:/path/to/models +``` + +If you are using a Colab notebook, please set the Python path with os.environ. + +```python +import os +os.environ['PYTHONPATH'] += ":/path/to/models" +``` + +3. Install other dependencies + +```shell +pip3 install --user -r official/requirements.txt +``` + +## Contributions + +If you want to contribute, please review the [contribution guidelines](https://github.com/tensorflow/models/wiki/How-to-contribute). diff --git a/models/official/__init__.py b/models/official/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/__pycache__/__init__.cpython-310.pyc b/models/official/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46eb1d8fb6c544f21ff52681984538792572cd90 Binary files /dev/null and b/models/official/__pycache__/__init__.cpython-310.pyc differ diff --git a/models/official/__pycache__/__init__.cpython-38.pyc b/models/official/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..556257a7074799181f4d95d751286d1b27bd4e77 Binary files /dev/null and b/models/official/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/official/__pycache__/__init__.cpython-39.pyc b/models/official/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..745a3d49d03eae81040f19e1b522ab1bf63ced87 Binary files /dev/null and b/models/official/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/official/benchmark/__init__.py b/models/official/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/benchmark/benchmark_wrappers.py b/models/official/benchmark/benchmark_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..3d38b690c7865e0ab560e59422a2454e44be052d --- /dev/null +++ b/models/official/benchmark/benchmark_wrappers.py @@ -0,0 +1,97 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils to annotate and trace benchmarks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +from absl import logging +from absl.testing import flagsaver + +FLAGS = flags.FLAGS + +flags.DEFINE_multi_string( + 'benchmark_method_flags', None, + 'Optional list of runtime flags of the form key=value. Specify ' + 'multiple times to specify different flags. These will override the FLAGS ' + 'object directly after hardcoded settings in individual benchmark methods ' + 'before they call _run_and_report benchmark. Example if we set ' + '--benchmark_method_flags=train_steps=10 and a benchmark method hardcodes ' + 'FLAGS.train_steps=10000 and later calls _run_and_report_benchmark, ' + 'it\'ll only run for 10 steps. This is useful for ' + 'debugging/profiling workflows.') + + +def enable_runtime_flags(decorated_func): + """Sets attributes from --benchmark_method_flags for method execution. + + @enable_runtime_flags decorator temporarily adds flags passed in via + --benchmark_method_flags and runs the decorated function in that context. + + A user can set --benchmark_method_flags=train_steps=5 to run the benchmark + method in the snippet below with FLAGS.train_steps=5 for debugging (without + modifying the benchmark code). + + class ModelBenchmark(): + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + # run benchmark ... + # report benchmark results ... + + def benchmark_method(self): + FLAGS.train_steps = 1000 + ... + self._run_and_report_benchmark() + + Args: + decorated_func: The method that runs the benchmark after previous setup + execution that set some flags. + + Returns: + new_func: The same method which executes in a temporary context where flag + overrides from --benchmark_method_flags are active. + """ + + def runner(*args, **kwargs): + """Creates a temporary context to activate --benchmark_method_flags.""" + if FLAGS.benchmark_method_flags: + saved_flag_values = flagsaver.save_flag_values() + for key_value in FLAGS.benchmark_method_flags: + key, value = key_value.split('=', 1) + try: + numeric_float = float(value) + numeric_int = int(numeric_float) + if abs(numeric_int) == abs(numeric_float): + flag_value = numeric_int + else: + flag_value = numeric_float + except ValueError: + flag_value = value + logging.info('Setting --%s=%s', key, flag_value) + setattr(FLAGS, key, flag_value) + else: + saved_flag_values = None + try: + result = decorated_func(*args, **kwargs) + return result + finally: + if saved_flag_values: + flagsaver.restore_flag_values(saved_flag_values) + + return runner diff --git a/models/official/benchmark/bert_benchmark.py b/models/official/benchmark/bert_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..35daac672ebe87434e99db8c7c3bbcc67a8061e4 --- /dev/null +++ b/models/official/benchmark/bert_benchmark.py @@ -0,0 +1,365 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes BERT benchmarks and accuracy tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import json +import math +import os +import time + +# pylint: disable=g-bad-import-order +from absl import flags +from absl.testing import flagsaver +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.benchmark import bert_benchmark_utils as benchmark_utils +from official.benchmark import owner_utils +from official.nlp.bert import configs +from official.nlp.bert import run_classifier +from official.utils.misc import distribution_utils +from official.benchmark import benchmark_wrappers + +# pylint: disable=line-too-long +PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_model.ckpt' +CLASSIFIER_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_train.tf_record' +CLASSIFIER_EVAL_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_eval.tf_record' +CLASSIFIER_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_meta_data' +MODEL_CONFIG_FILE_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_config.json' +# pylint: enable=line-too-long + +TMP_DIR = os.getenv('TMPDIR') +FLAGS = flags.FLAGS + + +class BertClassifyBenchmarkBase(benchmark_utils.BertBenchmarkBase): + """Base class to hold methods common to test classes in the module.""" + + def __init__(self, output_dir=None, tpu=None): + super(BertClassifyBenchmarkBase, self).__init__(output_dir, tpu=tpu) + self.num_epochs = None + self.num_steps_per_epoch = None + FLAGS.steps_per_loop = 1 + + @flagsaver.flagsaver + def _run_bert_classifier(self, callbacks=None, use_ds=True): + """Starts BERT classification task.""" + with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: + input_meta_data = json.loads(reader.read().decode('utf-8')) + + bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) + epochs = self.num_epochs if self.num_epochs else FLAGS.num_train_epochs + if self.num_steps_per_epoch: + steps_per_epoch = self.num_steps_per_epoch + else: + train_data_size = input_meta_data['train_data_size'] + steps_per_epoch = int(train_data_size / FLAGS.train_batch_size) + warmup_steps = int(epochs * steps_per_epoch * 0.1) + eval_steps = int( + math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size)) + if self.tpu: + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy='tpu', tpu_address=self.tpu) + else: + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy='mirrored' if use_ds else 'off', + num_gpus=self.num_gpus) + + max_seq_length = input_meta_data['max_seq_length'] + train_input_fn = run_classifier.get_dataset_fn( + FLAGS.train_data_path, + max_seq_length, + FLAGS.train_batch_size, + is_training=True) + eval_input_fn = run_classifier.get_dataset_fn( + FLAGS.eval_data_path, + max_seq_length, + FLAGS.eval_batch_size, + is_training=False) + _, summary = run_classifier.run_bert_classifier( + strategy, + bert_config, + input_meta_data, + FLAGS.model_dir, + epochs, + steps_per_epoch, + FLAGS.steps_per_loop, + eval_steps, + warmup_steps, + FLAGS.learning_rate, + FLAGS.init_checkpoint, + train_input_fn, + eval_input_fn, + training_callbacks=False, + custom_callbacks=callbacks) + return summary + + +class BertClassifyBenchmarkReal(BertClassifyBenchmarkBase): + """Short benchmark performance tests for BERT model. + + Tests BERT classification performance in different GPU, TPU configurations. + The naming convention of below test cases follow + `benchmark_(number of gpus)_gpu_(dataset type)` for GPUs and + `benchmark_(topology)_tpu_(dataset type)` for TPUs. + """ + + def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs): + super(BertClassifyBenchmarkReal, self).__init__( + output_dir=output_dir, tpu=tpu) + + self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH + self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH + self.bert_config_file = MODEL_CONFIG_FILE_PATH + self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH + + # Since we only care about performance metrics, we limit + # the number of training steps and epochs to prevent unnecessarily + # long tests. + self.num_steps_per_epoch = 100 + self.num_epochs = 1 + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + training_summary_path, + min_accuracy=0, + max_accuracy=1, + use_ds=True): + """Starts BERT performance benchmark test.""" + start_time_sec = time.time() + summary = self._run_bert_classifier( + callbacks=[self.timer_callback], use_ds=use_ds) + wall_time_sec = time.time() - start_time_sec + + # Since we do not load from any pretrained checkpoints, we ignore all + # accuracy metrics. + summary.pop('eval_metrics', None) + summary['start_time_sec'] = start_time_sec + + super(BertClassifyBenchmarkReal, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=min_accuracy, + max_accuracy=max_accuracy) + + def benchmark_1_gpu_mrpc(self): + """Test BERT model performance with 1 GPU.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.train_batch_size = 4 + FLAGS.eval_batch_size = 4 + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + def benchmark_1_gpu_mrpc_xla(self): + """Test BERT model performance with 1 GPU.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_xla') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.train_batch_size = 4 + FLAGS.eval_batch_size = 4 + FLAGS.enable_xla = True + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + def benchmark_1_gpu_mrpc_no_dist_strat(self): + """Test BERT model performance with 1 GPU, no distribution strategy.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_no_dist_strat') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.train_batch_size = 4 + FLAGS.eval_batch_size = 4 + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path, use_ds=False) + + @owner_utils.Owner('tf-model-garden') + def benchmark_8_gpu_mrpc(self): + """Test BERT model performance with 8 GPUs.""" + + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + def benchmark_1_gpu_amp_mrpc_no_dist_strat(self): + """Performance for 1 GPU no DS with automatic mixed precision.""" + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_amp_mrpc_no_dist_strat') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.train_batch_size = 4 + FLAGS.eval_batch_size = 4 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path, use_ds=False) + + def benchmark_8_gpu_amp_mrpc(self): + """Test BERT model performance with 8 GPUs with automatic mixed precision.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp_mrpc') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.train_batch_size = 32 + FLAGS.eval_batch_size = 32 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path, use_ds=False) + + @owner_utils.Owner('tf-model-garden') + def benchmark_2x2_tpu_mrpc(self): + """Test BERT model performance with 2x2 TPU.""" + + self._setup() + FLAGS.steps_per_loop = 50 + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_mrpc') + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.train_batch_size = 32 + FLAGS.eval_batch_size = 32 + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path, use_ds=False) + + +class BertClassifyAccuracy(BertClassifyBenchmarkBase): + """Short accuracy test for BERT model. + + Tests BERT classification task model accuracy. The naming + convention of below test cases follow + `benchmark_(number of gpus)_gpu_(dataset type)` format. + """ + + def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs): + self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH + self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH + self.bert_config_file = MODEL_CONFIG_FILE_PATH + self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH + self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH + + super(BertClassifyAccuracy, self).__init__(output_dir=output_dir, tpu=tpu) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + training_summary_path, + min_accuracy=0.84, + max_accuracy=0.88): + """Starts BERT accuracy benchmark test.""" + + start_time_sec = time.time() + summary = self._run_bert_classifier(callbacks=[self.timer_callback]) + wall_time_sec = time.time() - start_time_sec + + super(BertClassifyAccuracy, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=min_accuracy, + max_accuracy=max_accuracy) + + def _setup(self): + super(BertClassifyAccuracy, self)._setup() + FLAGS.train_data_path = self.train_data_path + FLAGS.eval_data_path = self.eval_data_path + FLAGS.input_meta_data_path = self.input_meta_data_path + FLAGS.bert_config_file = self.bert_config_file + FLAGS.init_checkpoint = self.pretrained_checkpoint_path + + @owner_utils.Owner('tf-model-garden') + def benchmark_8_gpu_mrpc(self): + """Run BERT model accuracy test with 8 GPUs. + + Due to comparatively small cardinality of MRPC dataset, training + accuracy metric has high variance between trainings. As so, we + set the wide range of allowed accuracy (84% to 88%). + """ + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc') + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + def benchmark_8_gpu_mrpc_xla(self): + """Run BERT model accuracy test with 8 GPUs with XLA.""" + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc_xla') + FLAGS.enable_xla = True + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + @owner_utils.Owner('tf-model-garden') + def benchmark_2x2_tpu_mrpc(self): + """Run BERT model accuracy test on 2x2 TPU.""" + self._setup() + FLAGS.steps_per_loop = 50 + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_mrpc') + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/bert_benchmark_utils.py b/models/official/benchmark/bert_benchmark_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..705a243315616080fe15c70925ed74a905818cdc --- /dev/null +++ b/models/official/benchmark/bert_benchmark_utils.py @@ -0,0 +1,127 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions or classes shared between BERT benchmarks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time + +# pylint: disable=g-bad-import-order +import numpy as np +from absl import flags +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.utils.flags import core as flags_core +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark + +FLAGS = flags.FLAGS + + +class BenchmarkTimerCallback(tf.keras.callbacks.Callback): + """Callback that records time it takes to run each batch.""" + + def __init__(self, num_batches_to_skip=10): + super(BenchmarkTimerCallback, self).__init__() + self.batch_start_times = {} + self.batch_stop_times = {} + + def on_batch_begin(self, batch, logs=None): + self.batch_start_times[batch] = time.time() + + def on_batch_end(self, batch, logs=None): + # If there are multiple steps_per_loop, the end batch index will not be the + # same as the starting index. Use the last starting index instead. + if batch not in self.batch_start_times: + batch = max(self.batch_start_times.keys()) + + self.batch_stop_times[batch] = time.time() + + def get_examples_per_sec(self, batch_size, num_batches_to_skip=1): + batch_durations = [] + for batch in self.batch_start_times: + if batch in self.batch_stop_times and batch >= num_batches_to_skip: + batch_durations.append(self.batch_stop_times[batch] - + self.batch_start_times[batch]) + return batch_size / np.mean(batch_durations) + + def get_startup_time(self, program_start_time): + return self.batch_start_times[0] - program_start_time + + +class BertBenchmarkBase(PerfZeroBenchmark): + """Base class to hold methods common to test classes.""" + local_flags = None + + def __init__(self, output_dir=None, tpu=None, **kwargs): + super(BertBenchmarkBase, self).__init__( + output_dir=output_dir, tpu=tpu, **kwargs) + self.num_gpus = 8 + self.timer_callback = None + + def _setup(self): + """Sets up and resets flags before each test.""" + super(BertBenchmarkBase, self)._setup() + self.timer_callback = BenchmarkTimerCallback() + + def _report_benchmark(self, stats, wall_time_sec, min_accuracy, max_accuracy): + """Report benchmark results by writing to local protobuf file. + + Args: + stats: dict returned from BERT models with known entries. + wall_time_sec: the during of the benchmark execution in seconds + min_accuracy: Minimum classification accuracy constraint to verify + correctness of the model. + max_accuracy: Maximum classification accuracy constraint to verify + correctness of the model. + """ + metrics = [{ + 'name': 'training_loss', + 'value': stats['train_loss'], + }] + if self.timer_callback: + metrics.append({ + 'name': + 'exp_per_second', + 'value': + self.timer_callback.get_examples_per_sec(FLAGS.train_batch_size * + FLAGS.steps_per_loop) + }) + else: + metrics.append({ + 'name': 'exp_per_second', + 'value': 0.0, + }) + if self.timer_callback and 'start_time_sec' in stats: + metrics.append({ + 'name': 'startup_time', + 'value': self.timer_callback.get_startup_time(stats['start_time_sec']) + }) + + if 'eval_metrics' in stats: + metrics.append({ + 'name': 'eval_accuracy', + 'value': stats['eval_metrics'], + 'min_value': min_accuracy, + 'max_value': max_accuracy, + }) + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark( + iters=stats['total_training_steps'], + wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_str}) diff --git a/models/official/benchmark/bert_pretrain_benchmark.py b/models/official/benchmark/bert_pretrain_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..d63c894847d8e9e9308523d3efcb06c162d323c6 --- /dev/null +++ b/models/official/benchmark/bert_pretrain_benchmark.py @@ -0,0 +1,179 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes benchmark testing for bert pretraining.""" +# pylint: disable=line-too-long +from __future__ import print_function + +import json +import os +import time +from typing import Optional + +from absl import flags +from absl import logging +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.benchmark import benchmark_wrappers +from official.benchmark import bert_benchmark_utils +from official.benchmark import owner_utils +from official.nlp.bert import run_pretraining +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils + +# Pretrain masked lanauge modeling accuracy range: +MIN_MLM_ACCURACY = 0.635 +MAX_MLM_ACCURACY = 0.645 + +# Pretrain next sentence prediction accuracy range: +MIN_NSP_ACCURACY = 0.94 +MAX_NSP_ACCURACY = 0.96 + +BERT_PRETRAIN_FILES_SEQ128 = 'gs://mlcompass-data/bert/pretraining_data/seq_128/wikipedia.tfrecord*,gs://mlcompass-data/bert/pretraining_data/seq_128/books.tfrecord*' +BERT_BASE_CONFIG_FILE = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12/bert_config.json' + +FLAGS = flags.FLAGS + + +class BertPretrainAccuracyBenchmark(bert_benchmark_utils.BertBenchmarkBase): + """Benchmark accuracy tests for BERT Pretraining.""" + + def __init__(self, + output_dir: Optional[str] = None, + tpu: Optional[str] = None, + **kwargs): + """Inits BertPretrainAccuracyBenchmark class. + + Args: + output_dir: Directory where to output e.g. log files + tpu: TPU name to use in a TPU benchmark. + **kwargs: Additional keyword arguments. + """ + super(BertPretrainAccuracyBenchmark, self).__init__( + output_dir=output_dir, tpu=tpu, **kwargs) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, summary_path: str, report_accuracy: bool): + """Runs and reports the benchmark given the provided configuration.""" + distribution = distribution_utils.get_distribution_strategy( + distribution_strategy='tpu', tpu_address=self.tpu) + logging.info('Flags: %s', flags_core.get_nondefault_flags_as_str()) + start_time_sec = time.time() + run_pretraining.run_bert_pretrain( + strategy=distribution, custom_callbacks=self.timer_callback) + wall_time_sec = time.time() - start_time_sec + + with tf.io.gfile.GFile(summary_path, 'rb') as reader: + summary = json.loads(reader.read().decode('utf-8')) + self._report_benchmark(summary, start_time_sec, wall_time_sec, + report_accuracy) + + def _report_benchmark(self, summary, start_time_sec, wall_time_sec, + report_accuracy): + metrics = [{ + 'name': 'train_loss', + 'value': summary['train_loss'], + }, { + 'name': + 'exp_per_second', + 'value': + self.timer_callback.get_examples_per_sec(FLAGS.train_batch_size * + FLAGS.steps_per_loop) + }, { + 'name': 'startup_time', + 'value': self.timer_callback.get_startup_time(start_time_sec) + }] + if report_accuracy: + metrics.extend([{ + 'name': 'masked_lm_accuracy', + 'value': summary['masked_lm_accuracy'], + 'min_value': MIN_MLM_ACCURACY, + 'max_value': MAX_MLM_ACCURACY, + }, { + 'name': 'next_sentence_accuracy', + 'value': summary['next_sentence_accuracy'], + 'min_value': MIN_NSP_ACCURACY, + 'max_value': MAX_NSP_ACCURACY, + }]) + self.report_benchmark( + iters=summary['total_training_steps'], + wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_core.get_nondefault_flags_as_str()}) + + def _specify_common_flags(self): + FLAGS.bert_config_file = BERT_BASE_CONFIG_FILE + FLAGS.train_batch_size = 512 + FLAGS.learning_rate = 1e-4 + FLAGS.warmup_steps = 10000 + FLAGS.steps_per_loop = 10000 + FLAGS.distribution_strategy = 'tpu' + FLAGS.input_files = BERT_PRETRAIN_FILES_SEQ128 + FLAGS.max_seq_length = 128 + FLAGS.max_predictions_per_seq = 20 + FLAGS.dtype = 'bf16' + + @owner_utils.Owner('tf-model-garden') + def benchmark_accuracy_8x8_tpu_bf16_seq128_500k_steps(self): + """Test bert pretraining with 8x8 TPU for 500k steps.""" + # This is used for accuracy test. + self._setup() + self._specify_common_flags() + FLAGS.num_steps_per_epoch = 500000 + FLAGS.num_train_epochs = 1 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_accuracy_8x8_tpu_bf16_seq128_500k_steps') + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + # Set train_summary_interval to -1 to disable training summary, because + # writing summary to gcs may fail and summaries are not needed for this + # accuracy benchmark test. + FLAGS.train_summary_interval = -1 + self._run_and_report_benchmark(summary_path=summary_path, + report_accuracy=True) + + @owner_utils.Owner('tf-model-garden') + def benchmark_perf_4x4_tpu_bf16_seq128_10k_steps(self): + """Test bert pretraining with 4x4 TPU for 10000 steps.""" + self._setup() + self._specify_common_flags() + FLAGS.num_steps_per_epoch = 5000 + FLAGS.num_train_epochs = 2 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_perf_4x4_tpu_bf16_seq128_10k_steps') + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + # Disable accuracy check. + self._run_and_report_benchmark( + summary_path=summary_path, report_accuracy=False) + + @owner_utils.Owner('tf-model-garden') + def benchmark_perf_8x8_tpu_bf16_seq128_10k_steps(self): + """Test bert pretraining with 8x8 TPU for 10000 steps.""" + self._setup() + self._specify_common_flags() + FLAGS.num_steps_per_epoch = 5000 + FLAGS.num_train_epochs = 2 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_perf_8x8_tpu_bf16_seq128_10k_steps') + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + # Disable accuracy check. + self._run_and_report_benchmark(summary_path=summary_path, + report_accuracy=False) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/bert_squad_benchmark.py b/models/official/benchmark/bert_squad_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..dab90a485b9a2c22d11da82ec1d9c320ea0db114 --- /dev/null +++ b/models/official/benchmark/bert_squad_benchmark.py @@ -0,0 +1,608 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes BERT SQuAD benchmarks and accuracy tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import time + +# pylint: disable=g-bad-import-order +from absl import flags +from absl import logging +from absl.testing import flagsaver +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.benchmark import bert_benchmark_utils as benchmark_utils +from official.benchmark import owner_utils +from official.nlp.bert import run_squad +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.benchmark import benchmark_wrappers + + +# pylint: disable=line-too-long +PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_model.ckpt' +SQUAD_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/bert/squad/squad_train.tf_record' +SQUAD_PREDICT_FILE = 'gs://tf-perfzero-data/bert/squad/dev-v1.1.json' +SQUAD_VOCAB_FILE = 'gs://tf-perfzero-data/bert/squad/vocab.txt' +SQUAD_MEDIUM_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/squad/squad_medium_meta_data' +SQUAD_LONG_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/squad/squad_long_meta_data' +SQUAD_FULL_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/squad/squad_full_meta_data' +MODEL_CONFIG_FILE_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_config.json' +# pylint: enable=line-too-long + +TMP_DIR = os.getenv('TMPDIR') +FLAGS = flags.FLAGS + + +class BertSquadBenchmarkBase(benchmark_utils.BertBenchmarkBase): + """Base class to hold methods common to test classes in the module.""" + + def __init__(self, output_dir=None, tpu=None): + super(BertSquadBenchmarkBase, self).__init__(output_dir=output_dir, tpu=tpu) + + def _read_training_summary_from_file(self): + """Reads the training summary from a file.""" + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + with tf.io.gfile.GFile(summary_path, 'rb') as reader: + return json.loads(reader.read().decode('utf-8')) + + def _read_input_meta_data_from_file(self): + """Reads the input metadata from a file.""" + with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: + return json.loads(reader.read().decode('utf-8')) + + def _get_distribution_strategy(self, ds_type='mirrored'): + """Gets the distribution strategy. + + Args: + ds_type: String, the distribution strategy type to be used. Can be + 'mirrored', 'multi_worker_mirrored', 'tpu' and 'off'. + + Returns: + A `tf.distribute.DistibutionStrategy` object. + """ + if self.tpu or ds_type == 'tpu': + return distribution_utils.get_distribution_strategy( + distribution_strategy='tpu', tpu_address=self.tpu) + elif ds_type == 'multi_worker_mirrored': + # Configures cluster spec for multi-worker distribution strategy. + _ = distribution_utils.configure_cluster(FLAGS.worker_hosts, + FLAGS.task_index) + return distribution_utils.get_distribution_strategy( + distribution_strategy=ds_type, + num_gpus=self.num_gpus, + all_reduce_alg=FLAGS.all_reduce_alg) + + def _init_gpu_and_data_threads(self): + """Set env variables before any TF calls.""" + if FLAGS.tf_gpu_thread_mode: + keras_utils.set_gpu_thread_mode_and_count( + per_gpu_thread_count=FLAGS.per_gpu_thread_count, + gpu_thread_mode=FLAGS.tf_gpu_thread_mode, + num_gpus=self.num_gpus, + datasets_num_private_threads=FLAGS.datasets_num_private_threads) + + @flagsaver.flagsaver + def _train_squad(self, run_eagerly=False, ds_type='mirrored'): + """Runs BERT SQuAD training. Uses mirrored strategy by default.""" + self._init_gpu_and_data_threads() + input_meta_data = self._read_input_meta_data_from_file() + strategy = self._get_distribution_strategy(ds_type) + + run_squad.train_squad( + strategy=strategy, + input_meta_data=input_meta_data, + run_eagerly=run_eagerly, + custom_callbacks=[self.timer_callback]) + + @flagsaver.flagsaver + def _evaluate_squad(self, ds_type='mirrored'): + """Runs BERT SQuAD evaluation. Uses mirrored strategy by default.""" + self._init_gpu_and_data_threads() + input_meta_data = self._read_input_meta_data_from_file() + strategy = self._get_distribution_strategy(ds_type) + + if input_meta_data.get('version_2_with_negative', False): + logging.error('In memory evaluation result for SQuAD v2 is not accurate') + eval_metrics = run_squad.eval_squad(strategy=strategy, + input_meta_data=input_meta_data) + # Use F1 score as reported evaluation metric. + self.eval_metrics = eval_metrics['final_f1'] + + +class BertSquadBenchmarkReal(BertSquadBenchmarkBase): + """Short benchmark performance tests for BERT SQuAD model. + + Tests BERT SQuAD performance in different GPU configurations. + The naming convention of below test cases follow + `benchmark_(number of gpus)_gpu` format for GPUs and + `benchmark_(topology)_tpu` format for TPUs. + """ + + def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs): + super(BertSquadBenchmarkReal, self).__init__(output_dir=output_dir, tpu=tpu) + + def _setup(self): + """Sets up the benchmark and SQuAD flags.""" + super(BertSquadBenchmarkReal, self)._setup() + FLAGS.train_data_path = SQUAD_TRAIN_DATA_PATH + FLAGS.predict_file = SQUAD_PREDICT_FILE + FLAGS.vocab_file = SQUAD_VOCAB_FILE + FLAGS.bert_config_file = MODEL_CONFIG_FILE_PATH + FLAGS.num_train_epochs = 1 + FLAGS.steps_per_loop = 100 + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + run_eagerly=False, + ds_type='mirrored'): + """Runs the benchmark and reports various metrics.""" + if FLAGS.train_batch_size <= 4 or run_eagerly: + FLAGS.input_meta_data_path = SQUAD_MEDIUM_INPUT_META_DATA_PATH + else: + FLAGS.input_meta_data_path = SQUAD_LONG_INPUT_META_DATA_PATH + start_time_sec = time.time() + self._train_squad(run_eagerly=run_eagerly, ds_type=ds_type) + wall_time_sec = time.time() - start_time_sec + + summary = self._read_training_summary_from_file() + summary['start_time_sec'] = start_time_sec + + super(BertSquadBenchmarkReal, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=0, + max_accuracy=1) + + def benchmark_1_gpu(self): + """Tests BERT SQuAD model performance with 1 GPU.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_squad') + FLAGS.train_batch_size = 4 + + self._run_and_report_benchmark() + + def benchmark_1_gpu_eager(self): + """Tests BERT SQuAD model performance with 1 GPU.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_squad_eager') + FLAGS.train_batch_size = 2 + + self._run_and_report_benchmark(run_eagerly=True) + + def benchmark_1_gpu_xla(self): + """Tests BERT SQuAD model performance with 1 GPU with XLA.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_xla_squad') + # XLA runs out of memory when running with batch size 4. + FLAGS.train_batch_size = 3 + FLAGS.enable_xla = True + + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat(self): + """Tests BERT SQuAD model performance with 1 GPU without DS.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat_squad') + FLAGS.train_batch_size = 4 + + self._run_and_report_benchmark(ds_type='off') + + def benchmark_1_gpu_eager_no_dist_strat(self): + """Tests BERT SQuAD model performance with 1 GPU with eager execution.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_eager_no_dist_strat_squad') + FLAGS.train_batch_size = 4 + + self._run_and_report_benchmark(ds_type='off', run_eagerly=True) + + @owner_utils.Owner('tf-model-garden') + def benchmark_8_gpu(self): + """Tests BERT SQuAD model performance with 8 GPUs.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squad') + FLAGS.train_batch_size = 24 + FLAGS.tf_gpu_thread_mode = 'gpu_private' + + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16_eager(self): + """Tests BERT SQuAD model performance with 1 GPU and FP16.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_squad_fp16_eager') + FLAGS.train_batch_size = 4 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 'dynamic' + + self._run_and_report_benchmark(run_eagerly=True) + + def benchmark_1_gpu_fp16(self): + """Tests BERT SQuAD model performance with 1 GPU and FP16.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_squad_fp16') + FLAGS.train_batch_size = 4 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 'dynamic' + + self._run_and_report_benchmark() + + def benchmark_1_gpu_xla_fp16(self): + """Tests BERT SQuAD model performance with 1 GPU with XLA and FP16.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_xla_squad_fp16') + FLAGS.train_batch_size = 4 + FLAGS.enable_xla = True + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 'dynamic' + + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16(self): + """Tests BERT SQuAD model performance with 8 GPUs.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squad_fp16') + FLAGS.train_batch_size = 32 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 'dynamic' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + + self._run_and_report_benchmark() + + def benchmark_8_gpu_xla_fp16(self): + """Tests BERT SQuAD model performance with 8 GPUs with XLA.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squad_fp16') + FLAGS.train_batch_size = 32 + FLAGS.enable_xla = True + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 'dynamic' + + self._run_and_report_benchmark() + + def benchmark_1_gpu_amp(self): + """Tests BERT SQuAD model performance with 1 GPU with automatic mixed precision.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp_squad') + FLAGS.train_batch_size = 4 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + + self._run_and_report_benchmark() + + def benchmark_8_gpu_amp(self): + """Tests BERT SQuAD model performance with 1 GPU with automatic mixed precision.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp_squad') + FLAGS.train_batch_size = 32 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + + self._run_and_report_benchmark() + + @owner_utils.Owner('tf-model-garden') + def benchmark_2x2_tpu(self): + """Tests BERT SQuAD model performance with 2x2 TPU.""" + + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu') + FLAGS.train_batch_size = 48 + FLAGS.predict_batch_size = 48 + FLAGS.mode = 'train' + FLAGS.learning_rate = 8e-5 + FLAGS.num_train_epochs = 1 + FLAGS.steps_per_loop = 100 + FLAGS.do_lower_case = True + FLAGS.init_checkpoint = PRETRAINED_CHECKPOINT_PATH + self._run_and_report_benchmark() + + +class BertSquadAccuracy(BertSquadBenchmarkBase): + """Short accuracy test for BERT SQuAD model. + + Tests BERT SQuAD accuracy. The naming convention of below test cases follow + `benchmark_(number of gpus)_gpu` format for GPUs and + `benchmark_(topology)_tpu` format for TPUs. + """ + + def __init__(self, output_dir=None, tpu=None, **kwargs): + super(BertSquadAccuracy, self).__init__(output_dir=output_dir, tpu=tpu) + + def _setup(self): + """Sets up the benchmark and SQuAD flags.""" + super(BertSquadAccuracy, self)._setup() + FLAGS.train_data_path = SQUAD_TRAIN_DATA_PATH + FLAGS.predict_file = SQUAD_PREDICT_FILE + FLAGS.vocab_file = SQUAD_VOCAB_FILE + FLAGS.input_meta_data_path = SQUAD_FULL_INPUT_META_DATA_PATH + FLAGS.bert_config_file = MODEL_CONFIG_FILE_PATH + FLAGS.init_checkpoint = PRETRAINED_CHECKPOINT_PATH + FLAGS.num_train_epochs = 2 + FLAGS.steps_per_loop = 100 + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + run_eagerly=False, + ds_type='mirrored'): + """Runs the benchmark and reports various metrics.""" + start_time_sec = time.time() + self._train_squad(run_eagerly=run_eagerly, ds_type=ds_type) + self._evaluate_squad(ds_type=ds_type) + wall_time_sec = time.time() - start_time_sec + + summary = self._read_training_summary_from_file() + summary['eval_metrics'] = self.eval_metrics + summary['start_time_sec'] = start_time_sec + + super(BertSquadAccuracy, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=0.900, + max_accuracy=0.920) + + def benchmark_1_gpu_eager(self): + """Tests BERT SQuAD model accuracy with 1 GPU with eager execution.""" + + self._setup() + self.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_squad_eager') + FLAGS.train_batch_size = 4 + + self._run_and_report_benchmark(ds_type='off', run_eagerly=True) + + @owner_utils.Owner('tf-model-garden') + def benchmark_8_gpu(self): + """Tests BERT SQuAD model accuracy with 8 GPUs.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squad') + FLAGS.train_batch_size = 24 + FLAGS.tf_gpu_thread_mode = 'gpu_private' + + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16(self): + """Tests BERT SQuAD model accuracy with 8 GPUs and FP16.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squad_fp16') + FLAGS.train_batch_size = 32 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 'dynamic' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + + self._run_and_report_benchmark() + + def benchmark_8_gpu_xla(self): + """Tests BERT SQuAD model accuracy with 8 GPUs.""" + + self._setup() + self.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squad_xla') + FLAGS.train_batch_size = 32 + FLAGS.enable_xla = True + FLAGS.tf_gpu_thread_mode = 'gpu_private' + + self._run_and_report_benchmark() + + @owner_utils.Owner('tf-model-garden') + def benchmark_2x2_tpu(self): + """Tests BERT SQuAD model accuracy with 2x2 TPU.""" + + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu') + FLAGS.train_batch_size = 48 + + self._run_and_report_benchmark() + + +class BertSquadMultiWorkerAccuracy(BertSquadBenchmarkBase): + """BERT SQuAD distributed accuracy tests with multiple workers.""" + + def __init__(self, output_dir=None, tpu=None, **kwargs): + super(BertSquadMultiWorkerAccuracy, self).__init__( + output_dir=output_dir, tpu=tpu) + + def _setup(self): + """Sets up the benchmark and SQuAD flags.""" + super(BertSquadMultiWorkerAccuracy, self)._setup() + FLAGS.train_data_path = SQUAD_TRAIN_DATA_PATH + FLAGS.predict_file = SQUAD_PREDICT_FILE + FLAGS.vocab_file = SQUAD_VOCAB_FILE + FLAGS.input_meta_data_path = SQUAD_FULL_INPUT_META_DATA_PATH + FLAGS.bert_config_file = MODEL_CONFIG_FILE_PATH + FLAGS.init_checkpoint = PRETRAINED_CHECKPOINT_PATH + FLAGS.num_train_epochs = 2 + FLAGS.steps_per_loop = 100 + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + use_ds=True, + run_eagerly=False): + """Runs the benchmark and reports various metrics.""" + start_time_sec = time.time() + self._train_squad(run_eagerly=run_eagerly, + ds_type='multi_worker_mirrored') + self._evaluate_squad(ds_type='multi_worker_mirrored') + wall_time_sec = time.time() - start_time_sec + + summary = self._read_training_summary_from_file() + summary['eval_metrics'] = self.eval_metrics + + super(BertSquadMultiWorkerAccuracy, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=0.900, + max_accuracy=0.920) + + def _benchmark_common(self, num_workers, all_reduce_alg): + """Common to all benchmarks in this class.""" + self._setup() + + num_gpus = 8 + FLAGS.num_gpus = num_gpus + FLAGS.dtype = 'fp16' + FLAGS.enable_xla = False + FLAGS.distribution_strategy = 'multi_worker_mirrored' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 32 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_8_gpu_{}_worker_fp16_{}_tweaked'.format( + num_workers, all_reduce_alg)) + FLAGS.train_batch_size = 4 * num_gpus * num_workers + FLAGS.all_reduce_alg = all_reduce_alg + + self._run_and_report_benchmark() + + def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self): + """8 GPUs per worker, 2 workers, fp16, ring all-reduce.""" + self._benchmark_common(num_workers=2, all_reduce_alg='ring') + + def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self): + """8 GPUs per worker, 2 workers, fp16, nccl all-reduce.""" + self._benchmark_common(num_workers=2, all_reduce_alg='nccl') + + def benchmark_8_gpu_8_workers_fp16_ring_tweaked(self): + """8 GPUs per worker, 8 workers, fp16, ring all-reduce.""" + self._benchmark_common(num_workers=8, all_reduce_alg='ring') + + def benchmark_8_gpu_8_workers_fp16_nccl_tweaked(self): + """8 GPUs per worker, 8 workers, fp16, nccl all-reduce.""" + self._benchmark_common(num_workers=8, all_reduce_alg='nccl') + + +class BertSquadMultiWorkerBenchmark(BertSquadBenchmarkBase): + """BERT SQuAD distributed benchmark tests with multiple workers.""" + + def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs): + super(BertSquadMultiWorkerBenchmark, self).__init__( + output_dir=output_dir, tpu=tpu) + + def _setup(self): + """Sets up the benchmark and SQuAD flags.""" + super(BertSquadMultiWorkerBenchmark, self)._setup() + FLAGS.train_data_path = SQUAD_TRAIN_DATA_PATH + FLAGS.predict_file = SQUAD_PREDICT_FILE + FLAGS.vocab_file = SQUAD_VOCAB_FILE + FLAGS.input_meta_data_path = SQUAD_FULL_INPUT_META_DATA_PATH + FLAGS.bert_config_file = MODEL_CONFIG_FILE_PATH + FLAGS.num_train_epochs = 1 + FLAGS.steps_per_loop = 100 + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + use_ds=True, + run_eagerly=False): + """Runs the benchmark and reports various metrics.""" + if FLAGS.train_batch_size <= 4 * 8: + FLAGS.input_meta_data_path = SQUAD_LONG_INPUT_META_DATA_PATH + else: + FLAGS.input_meta_data_path = SQUAD_FULL_INPUT_META_DATA_PATH + start_time_sec = time.time() + self._train_squad(run_eagerly=run_eagerly, + ds_type='multi_worker_mirrored') + wall_time_sec = time.time() - start_time_sec + + summary = self._read_training_summary_from_file() + summary['start_time_sec'] = start_time_sec + + super(BertSquadMultiWorkerBenchmark, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=0, + max_accuracy=1) + + def _benchmark_common(self, num_workers, all_reduce_alg): + """Common to all benchmarks in this class.""" + self._setup() + + num_gpus = 8 + FLAGS.num_gpus = num_gpus + FLAGS.dtype = 'fp16' + FLAGS.enable_xla = False + FLAGS.distribution_strategy = 'multi_worker_mirrored' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 32 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_8_gpu_{}_worker_fp16_{}_tweaked'.format( + num_workers, all_reduce_alg)) + FLAGS.train_batch_size = 4 * num_gpus * num_workers + FLAGS.all_reduce_alg = all_reduce_alg + + self._run_and_report_benchmark() + + def benchmark_8_gpu_1_worker_fp16_ring_tweaked(self): + """8 GPUs per worker, 1 worker, fp16, ring all-reduce.""" + self._benchmark_common(num_workers=1, all_reduce_alg='ring') + + def benchmark_8_gpu_1_worker_fp16_nccl_tweaked(self): + """8 GPUs per worker, 1 worker, fp16, nccl all-reduce.""" + self._benchmark_common(num_workers=1, all_reduce_alg='nccl') + + def benchmark_8_gpu_2_workers_fp16_ring_tweaked(self): + """8 GPUs per worker, 2 workers, fp16, ring all-reduce.""" + self._benchmark_common(num_workers=2, all_reduce_alg='ring') + + def benchmark_8_gpu_2_workers_fp16_nccl_tweaked(self): + """8 GPUs per worker, 2 workers, fp16, nccl all-reduce.""" + self._benchmark_common(num_workers=2, all_reduce_alg='nccl') + + def benchmark_8_gpu_8_workers_fp16_ring_tweaked(self): + """8 GPUs per worker, 8 workers, fp16, ring all-reduce.""" + self._benchmark_common(num_workers=8, all_reduce_alg='ring') + + def benchmark_8_gpu_8_workers_fp16_nccl_tweaked(self): + """8 GPUs per worker, 8 workers, fp16, nccl all-reduce.""" + self._benchmark_common(num_workers=8, all_reduce_alg='nccl') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/datastore/schema/benchmark_metric.json b/models/official/benchmark/datastore/schema/benchmark_metric.json new file mode 100644 index 0000000000000000000000000000000000000000..cc571d480605241e7c71d2e4cabdaf6ad3da9295 --- /dev/null +++ b/models/official/benchmark/datastore/schema/benchmark_metric.json @@ -0,0 +1,56 @@ +[ + { + "description": "The ID of the benchmark run, where this metric should tie to.", + "mode": "REQUIRED", + "name": "run_id", + "type": "STRING" + }, + { + "description": "The name of the metric, which should be descriptive. E.g. training_loss, accuracy.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The unit of the metric. E.g. MB per sec.", + "mode": "NULLABLE", + "name": "unit", + "type": "STRING" + }, + { + "description": "The value of the metric.", + "mode": "NULLABLE", + "name": "value", + "type": "FLOAT" + }, + { + "description": "The timestamp when the metric is recorded.", + "mode": "REQUIRED", + "name": "timestamp", + "type": "TIMESTAMP" + }, + { + "description": "The global step when this metric is recorded.", + "mode": "NULLABLE", + "name": "global_step", + "type": "INTEGER" + }, + { + "description": "Free format metadata for the extra information about the metric.", + "mode": "REPEATED", + "name": "extras", + "type": "RECORD", + "fields": [ + { + "mode": "NULLABLE", + "name": "name", + "type": "STRING" + }, + { + "mode": "NULLABLE", + "name": "value", + "type": "STRING" + } + ] + } +] diff --git a/models/official/benchmark/datastore/schema/benchmark_run.json b/models/official/benchmark/datastore/schema/benchmark_run.json new file mode 100644 index 0000000000000000000000000000000000000000..58e5ddcadeff98b05c328c2798071f9cd73ef9d2 --- /dev/null +++ b/models/official/benchmark/datastore/schema/benchmark_run.json @@ -0,0 +1,368 @@ +[ + { + "description": "The UUID of the run for the benchmark.", + "mode": "REQUIRED", + "name": "model_id", + "type": "STRING" + }, + { + "description": "The name of the model, E.g ResNet50, LeNet-5 etc.", + "mode": "REQUIRED", + "name": "model_name", + "type": "STRING" + }, + { + "description": "The date when the test of the model is started", + "mode": "REQUIRED", + "name": "run_date", + "type": "TIMESTAMP" + }, + { + "description": "The unique name for a test by the combination of key parameters, eg batch size, num of GPU, etc. It is hardware independent.", + "mode": "NULLABLE", + "name": "test_id", + "type": "STRING" + }, + { + "description": "The tensorflow version information.", + "fields": [ + { + "description": "Version of the tensorflow. E.g. 1.7.0-rc0", + "mode": "REQUIRED", + "name": "version", + "type": "STRING" + }, + { + "description": "Git Hash of the tensorflow", + "mode": "NULLABLE", + "name": "git_hash", + "type": "STRING" + }, + { + "description": "The channel of the tensorflow binary, eg, nightly, RC, final, custom.", + "mode": "NULLABLE", + "name": "channel", + "type": "STRING" + }, + { + "description": "Identify anything special about the build, eg CUDA 10, NCCL, MKL, etc.", + "mode": "NULLABLE", + "name": "build_type", + "type": "STRING" + } + ], + "mode": "REQUIRED", + "name": "tensorflow_version", + "type": "RECORD" + }, + { + "description": "The arbitrary attribute of the model.", + "fields": [ + { + "description": "The name of the attribute.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The value of the attribute.", + "mode": "NULLABLE", + "name": "value", + "type": "STRING" + } + ], + "mode": "REPEATED", + "name": "attribute", + "type": "RECORD" + }, + { + "description": "Environment variables when the benchmark run is executed.", + "fields": [ + { + "description": "The name of the variable.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The value of the variable.", + "mode": "NULLABLE", + "name": "value", + "type": "STRING" + } + ], + "mode": "REPEATED", + "name": "environment_variable", + "type": "RECORD" + }, + { + "description": "TF Environment variables when the benchmark run is executed.", + "fields": [ + { + "description": "The name of the variable.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The value of the variable.", + "mode": "NULLABLE", + "name": "value", + "type": "STRING" + } + ], + "mode": "REPEATED", + "name": "tensorflow_environment_variables", + "type": "RECORD" + }, + { + "description": "The list of parameters run with the model. It could contain hyperparameters or others.", + "fields": [ + { + "description": "The name of the parameter.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The string value of the parameter.", + "mode": "NULLABLE", + "name": "string_value", + "type": "STRING" + }, + { + "description": "The bool value of the parameter.", + "mode": "NULLABLE", + "name": "bool_value", + "type": "STRING" + }, + { + "description": "The int/long value of the parameter.", + "mode": "NULLABLE", + "name": "long_value", + "type": "INTEGER" + }, + { + "description": "The double/float value of parameter.", + "mode": "NULLABLE", + "name": "float_value", + "type": "FLOAT" + } + ], + "mode": "REPEATED", + "name": "run_parameters", + "type": "RECORD" + }, + { + "description": "The dataset that run with the benchmark.", + "mode": "NULLABLE", + "name": "dataset", + "type": "RECORD", + "fields": [ + { + "description": "The name of the dataset that the model is trained/validated with. E.g ImageNet, mnist.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The arbitrary attribute of the dataset.", + "fields": [ + { + "description": "The name of the attribute.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The value of the attribute.", + "mode": "NULLABLE", + "name": "value", + "type": "STRING" + } + ], + "mode": "REPEATED", + "name": "attribute", + "type": "RECORD" + } + ] + }, + { + "description": "Used to differentiate from AWS, GCE or DGX-1 at a high level", + "mode": "NULLABLE", + "name": "test_environment", + "type": "STRING" + }, + { + "description": "The machine configuration of the benchmark run.", + "mode": "NULLABLE", + "name": "machine_config", + "type": "RECORD", + "fields": [ + { + "description": "The platform information of the benchmark run.", + "mode": "NULLABLE", + "name": "platform_info", + "type": "RECORD", + "fields": [ + { + "description": "Eg: 64bit.", + "mode": "NULLABLE", + "name": "bits", + "type": "STRING" + }, + { + "description": "Eg: ELF.", + "mode": "NULLABLE", + "name": "linkage", + "type": "STRING" + }, + { + "description": "Eg: i386.", + "mode": "NULLABLE", + "name": "machine", + "type": "STRING" + }, + { + "description": "Eg: 3.13.0-76-generic.", + "mode": "NULLABLE", + "name": "release", + "type": "STRING" + }, + { + "description": "Eg: Linux.", + "mode": "NULLABLE", + "name": "system", + "type": "STRING" + }, + { + "description": "Eg: #120-Ubuntu SMP Mon Jan 18 15:59:10 UTC 2016.", + "mode": "NULLABLE", + "name": "version", + "type": "STRING" + } + ] + }, + { + "description": "The CPU information of the benchmark run.", + "mode": "NULLABLE", + "name": "cpu_info", + "type": "RECORD", + "fields": [ + { + "mode": "NULLABLE", + "name": "num_cores", + "type": "INTEGER" + }, + { + "mode": "NULLABLE", + "name": "num_cores_allowed", + "type": "INTEGER" + }, + { + "description" : "How fast are those CPUs.", + "mode": "NULLABLE", + "name": "mhz_per_cpu", + "type": "FLOAT" + }, + { + "description" : "Additional CPU info, Eg: Intel Ivybridge with HyperThreading (24 cores).", + "mode": "NULLABLE", + "name": "cpu_info", + "type": "STRING" + }, + { + "description" : "What kind of cpu scaling is enabled on the host. Eg performance, ondemand, conservative, mixed.", + "mode": "NULLABLE", + "name": "cpu_governor", + "type": "STRING" + }, + { + "description": "Cache size of the CPUs.", + "mode": "NULLABLE", + "name": "cache_size", + "type": "RECORD", + "fields": [ + { + "mode": "NULLABLE", + "name": "level", + "type": "STRING" + }, + { + "mode": "NULLABLE", + "name": "size", + "type": "INTEGER" + } + ] + } + ] + }, + { + "mode": "NULLABLE", + "name": "gpu_info", + "type": "RECORD", + "fields": [ + { + "mode": "NULLABLE", + "name": "count", + "type": "INTEGER" + }, + { + "mode": "NULLABLE", + "name": "model", + "type": "STRING" + }, + { + "mode": "NULLABLE", + "name": "cuda_version", + "type": "STRING" + } + ] + }, + { + "description": "The cloud instance inforation if the benchmark run is executed on cloud", + "mode": "NULLABLE", + "name": "cloud_info", + "type": "RECORD", + "fields": [ + { + "description": "The instance type, E.g. n1-standard-4.", + "mode": "NULLABLE", + "name": "instance_type", + "type": "STRING" + }, + { + "description": "The arbitrary attribute of the cloud info.", + "fields": [ + { + "description": "The name of the attribute.", + "mode": "REQUIRED", + "name": "name", + "type": "STRING" + }, + { + "description": "The value of the attribute.", + "mode": "NULLABLE", + "name": "value", + "type": "STRING" + } + ], + "mode": "REPEATED", + "name": "attribute", + "type": "RECORD" + } + ] + }, + { + "mode": "NULLABLE", + "name": "memory_total", + "type": "INTEGER" + }, + { + "mode": "NULLABLE", + "name": "memory_available", + "type": "STRING" + } + ] + } +] diff --git a/models/official/benchmark/datastore/schema/benchmark_run_status.json b/models/official/benchmark/datastore/schema/benchmark_run_status.json new file mode 100644 index 0000000000000000000000000000000000000000..f7ac59eb8042c181e8996d9e1a0e7ee79f6f0343 --- /dev/null +++ b/models/official/benchmark/datastore/schema/benchmark_run_status.json @@ -0,0 +1,14 @@ +[ + { + "description": "The UUID of the run for the benchmark.", + "mode": "REQUIRED", + "name": "run_id", + "type": "STRING" + }, + { + "description": "The status of the run for the benchmark. Eg, running, failed, success", + "mode": "REQUIRED", + "name": "status", + "type": "STRING" + } +] \ No newline at end of file diff --git a/models/official/benchmark/keras_benchmark.py b/models/official/benchmark/keras_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..770674ac658f213d614f0a3704a0bbb200bb94aa --- /dev/null +++ b/models/official/benchmark/keras_benchmark.py @@ -0,0 +1,98 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes Keras benchmarks and accuracy tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark +from official.utils.flags import core as flags_core + + +class KerasBenchmark(PerfZeroBenchmark): + """Base benchmark class with methods to simplify testing.""" + + def __init__(self, + output_dir=None, + default_flags=None, + flag_methods=None, + tpu=None): + super(KerasBenchmark, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + flag_methods=flag_methods, + tpu=tpu) + + def _report_benchmark(self, + stats, + wall_time_sec, + top_1_max=None, + top_1_min=None, + log_steps=None, + total_batch_size=None, + warmup=1, + start_time_sec=None): + """Report benchmark results by writing to local protobuf file. + + Args: + stats: dict returned from keras models with known entries. + wall_time_sec: the during of the benchmark execution in seconds + top_1_max: highest passing level for top_1 accuracy. + top_1_min: lowest passing level for top_1 accuracy. + log_steps: How often the log was created for stats['step_timestamp_log']. + total_batch_size: Global batch-size. + warmup: number of entries in stats['step_timestamp_log'] to ignore. + start_time_sec: the start time of the program in seconds since epoch + """ + + metrics = [] + if 'accuracy_top_1' in stats: + metrics.append({'name': 'accuracy_top_1', + 'value': stats['accuracy_top_1'], + 'min_value': top_1_min, + 'max_value': top_1_max}) + metrics.append({'name': 'top_1_train_accuracy', + 'value': stats['training_accuracy_top_1']}) + + if (warmup and 'step_timestamp_log' in stats and + len(stats['step_timestamp_log']) > warmup): + # first entry in the time_log is start of step 1. The rest of the + # entries are the end of each step recorded + time_log = stats['step_timestamp_log'] + elapsed = time_log[-1].timestamp - time_log[warmup].timestamp + num_examples = ( + total_batch_size * log_steps * (len(time_log) - warmup - 1)) + examples_per_sec = num_examples / elapsed + metrics.append({'name': 'exp_per_second', + 'value': examples_per_sec}) + + if 'avg_exp_per_second' in stats: + metrics.append({'name': 'avg_exp_per_second', + 'value': stats['avg_exp_per_second']}) + + if start_time_sec and 'step_timestamp_log' in stats: + time_log = stats['step_timestamp_log'] + # time_log[0] is recorded at the beginning of the first step. + startup_time = time_log[0].timestamp - start_time_sec + metrics.append({'name': 'startup_time', 'value': startup_time}) + + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark( + iters=-1, + wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_str}) diff --git a/models/official/benchmark/keras_cifar_benchmark.py b/models/official/benchmark/keras_cifar_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..694200f66678a1bc9bc44194377a52489a1b97f3 --- /dev/null +++ b/models/official/benchmark/keras_cifar_benchmark.py @@ -0,0 +1,402 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes Keras benchmarks and accuracy tests.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +from absl import flags +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.benchmark import keras_benchmark +from official.benchmark import benchmark_wrappers +from official.benchmark.models import resnet_cifar_main + +MIN_TOP_1_ACCURACY = 0.929 +MAX_TOP_1_ACCURACY = 0.938 + +FLAGS = flags.FLAGS +CIFAR_DATA_DIR_NAME = 'cifar-10-batches-bin' + + +class Resnet56KerasAccuracy(keras_benchmark.KerasBenchmark): + """Accuracy tests for ResNet56 Keras CIFAR-10.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + """A benchmark class. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + + self.data_dir = os.path.join(root_data_dir, CIFAR_DATA_DIR_NAME) + flag_methods = [resnet_cifar_main.define_cifar_flags] + + super(Resnet56KerasAccuracy, self).__init__( + output_dir=output_dir, flag_methods=flag_methods) + + def _setup(self): + super(Resnet56KerasAccuracy, self)._setup() + FLAGS.use_tensor_lr = False + + def benchmark_graph_1_gpu(self): + """Test keras based model with Keras fit and distribution strategies.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu') + FLAGS.dtype = 'fp32' + self._run_and_report_benchmark() + + def benchmark_1_gpu(self): + """Test keras based model with eager and distribution strategies.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + self._run_and_report_benchmark() + + def benchmark_cpu(self): + """Test keras based model on CPU.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_cpu') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_cpu_no_dist_strat(self): + """Test keras based model on CPU without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_cpu_no_dist_strat') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'off' + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_cpu_no_dist_strat_run_eagerly(self): + """Test keras based model on CPU w/forced eager and no dist_strat.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_cpu_no_dist_strat_run_eagerly') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat(self): + """Test keras based model with eager and no dist strat.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly(self): + """Test keras based model w/forced eager and no dist_strat.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_graph_1_gpu_no_dist_strat(self): + """Test keras based model with Keras fit but not distribution strategies.""" + self._setup() + FLAGS.distribution_strategy = 'off' + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat') + FLAGS.dtype = 'fp32' + self._run_and_report_benchmark() + + def benchmark_2_gpu(self): + """Test keras based model with eager and distribution strategies.""" + self._setup() + FLAGS.num_gpus = 2 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + self._run_and_report_benchmark() + + def benchmark_graph_2_gpu(self): + """Test keras based model with Keras fit and distribution strategies.""" + self._setup() + FLAGS.num_gpus = 2 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 + FLAGS.train_epochs = 182 + FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu') + FLAGS.dtype = 'fp32' + self._run_and_report_benchmark() + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + start_time_sec = time.time() + stats = resnet_cifar_main.run(FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(Resnet56KerasAccuracy, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=MIN_TOP_1_ACCURACY, + top_1_max=MAX_TOP_1_ACCURACY, + total_batch_size=FLAGS.batch_size, + log_steps=100) + + +class Resnet56KerasBenchmarkBase(keras_benchmark.KerasBenchmark): + """Short performance tests for ResNet56 via Keras and CIFAR-10.""" + + def __init__(self, output_dir=None, default_flags=None): + flag_methods = [resnet_cifar_main.define_cifar_flags] + + super(Resnet56KerasBenchmarkBase, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags=default_flags) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + start_time_sec = time.time() + stats = resnet_cifar_main.run(FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(Resnet56KerasBenchmarkBase, self)._report_benchmark( + stats, + wall_time_sec, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_1_gpu(self): + """Test 1 gpu.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_xla(self): + """Test 1 gpu with xla enabled.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = False + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_xla') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_graph_1_gpu(self): + """Test 1 gpu graph.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.enable_eager = False + FLAGS.run_eagerly = False + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat(self): + """Test 1 gpu without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_graph_1_gpu_no_dist_strat(self): + """Test 1 gpu graph mode without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.enable_eager = False + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly(self): + """Test 1 gpu without distribution strategy and forced eager.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 128 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_2_gpu(self): + """Test 2 gpu.""" + self._setup() + FLAGS.num_gpus = 2 + FLAGS.enable_eager = True + FLAGS.run_eagerly = False + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu') + FLAGS.batch_size = 128 * 2 # 2 GPUs + self._run_and_report_benchmark() + + def benchmark_graph_2_gpu(self): + """Test 2 gpu graph mode.""" + self._setup() + FLAGS.num_gpus = 2 + FLAGS.enable_eager = False + FLAGS.run_eagerly = False + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu') + FLAGS.batch_size = 128 * 2 # 2 GPUs + self._run_and_report_benchmark() + + def benchmark_cpu(self): + """Test cpu.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.enable_eager = True + FLAGS.model_dir = self._get_model_dir('benchmark_cpu') + FLAGS.batch_size = 128 + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_graph_cpu(self): + """Test cpu graph mode.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.enable_eager = False + FLAGS.model_dir = self._get_model_dir('benchmark_graph_cpu') + FLAGS.batch_size = 128 + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_cpu_no_dist_strat_run_eagerly(self): + """Test cpu without distribution strategy and forced eager.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.distribution_strategy = 'off' + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.model_dir = self._get_model_dir( + 'benchmark_cpu_no_dist_strat_run_eagerly') + FLAGS.batch_size = 128 + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_cpu_no_dist_strat(self): + """Test cpu without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_cpu_no_dist_strat') + FLAGS.batch_size = 128 + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + def benchmark_graph_cpu_no_dist_strat(self): + """Test cpu graph mode without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.enable_eager = False + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_graph_cpu_no_dist_strat') + FLAGS.batch_size = 128 + FLAGS.data_format = 'channels_last' + self._run_and_report_benchmark() + + +class Resnet56KerasBenchmarkSynth(Resnet56KerasBenchmarkBase): + """Synthetic benchmarks for ResNet56 and Keras.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + default_flags = {} + default_flags['skip_eval'] = True + default_flags['use_synthetic_data'] = True + default_flags['train_steps'] = 110 + default_flags['log_steps'] = 10 + default_flags['use_tensor_lr'] = False + + super(Resnet56KerasBenchmarkSynth, self).__init__( + output_dir=output_dir, default_flags=default_flags) + + +class Resnet56KerasBenchmarkReal(Resnet56KerasBenchmarkBase): + """Real data benchmarks for ResNet56 and Keras.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + default_flags = {} + default_flags['skip_eval'] = True + default_flags['data_dir'] = os.path.join(root_data_dir, CIFAR_DATA_DIR_NAME) + default_flags['train_steps'] = 110 + default_flags['log_steps'] = 10 + default_flags['use_tensor_lr'] = False + + super(Resnet56KerasBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=default_flags) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/keras_imagenet_benchmark.py b/models/official/benchmark/keras_imagenet_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..63a48dfb1222b65311652e3bee4241854a55043e --- /dev/null +++ b/models/official/benchmark/keras_imagenet_benchmark.py @@ -0,0 +1,1724 @@ +# Lint as: python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes Keras benchmarks and accuracy tests.""" +# pylint: disable=line-too-long +from __future__ import print_function + +import json +import os +import time + +from typing import Any, MutableMapping, Optional + +from absl import flags +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.benchmark import benchmark_wrappers +from official.benchmark import keras_benchmark +from official.benchmark.models import resnet_imagenet_main +from official.vision.image_classification import classifier_trainer + +MIN_TOP_1_ACCURACY = 0.76 +MAX_TOP_1_ACCURACY = 0.77 + +MOBILENET_V1_MIN_TOP_1_ACCURACY = 0.65 +MOBILENET_V1_MAX_TOP_1_ACCURACY = 0.68 + +# Range of top-1 accracies for model optimization techniques. +# Each item indicates (MIN_TOP_1_ACCURACY, MAX_TOP_1_ACCURACY). +MODEL_OPTIMIZATION_TOP_1_ACCURACY = { + 'RESNET50_FINETUNE_PRUNING': (0.76, 0.77), + 'MOBILENET_V1_FINETUNE_PRUNING': (0.67, 0.68), +} + +FLAGS = flags.FLAGS + + +def _get_classifier_parameters( + num_gpus: int = 0, + builder: str = 'records', + skip_eval: bool = False, + distribution_strategy: str = 'mirrored', + per_replica_batch_size: int = 128, + epochs: int = 90, + steps: int = 0, + epochs_between_evals: int = 1, + dtype: str = 'float32', + enable_xla: bool = False, + run_eagerly: bool = False, + gpu_thread_mode: Optional[str] = None, + dataset_num_private_threads: Optional[int] = None, + loss_scale: Optional[str] = None, + report_metrics: bool = True, + batchnorm_spatial_persistent: bool = False) -> MutableMapping[str, Any]: + """Gets classifier trainer's ResNet parameters.""" + return { + 'runtime': { + 'num_gpus': num_gpus, + 'distribution_strategy': distribution_strategy, + 'run_eagerly': run_eagerly, + 'enable_xla': enable_xla, + 'dataset_num_private_threads': dataset_num_private_threads, + 'gpu_thread_mode': gpu_thread_mode, + 'loss_scale': loss_scale, + 'batchnorm_spatial_persistent': batchnorm_spatial_persistent, + }, + 'train_dataset': { + 'builder': builder, + 'use_per_replica_batch_size': True, + 'batch_size': per_replica_batch_size, + 'image_size': 224, + 'dtype': dtype, + }, + 'validation_dataset': { + 'builder': builder, + 'batch_size': per_replica_batch_size, + 'use_per_replica_batch_size': True, + 'image_size': 224, + 'dtype': dtype, + }, + 'train': { + 'epochs': epochs, + 'steps': steps, + 'callbacks': { + 'enable_tensorboard': False, + 'enable_checkpoint_and_export': False, + 'enable_time_history': True, + }, + 'metrics': ['accuracy'] if report_metrics else [], + }, + 'model': { + 'loss': { + 'label_smoothing': 0.1, + }, + }, + 'evaluation': { + 'epochs_between_evals': epochs_between_evals, + 'skip_eval': skip_eval, + }, + } + + +class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark): + """Benchmark accuracy tests for ResNet50 in Keras.""" + + def __init__(self, + output_dir: Optional[str] = None, + root_data_dir: Optional[str] = None, + **kwargs): + """A benchmark class. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + + flag_methods = [classifier_trainer.define_classifier_flags] + + self.data_dir = os.path.join(root_data_dir, 'imagenet') + super(Resnet50KerasAccuracy, self).__init__( + output_dir=output_dir, flag_methods=flag_methods) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark( + self, + experiment_name: str, + top_1_min: float = MIN_TOP_1_ACCURACY, + top_1_max: float = MAX_TOP_1_ACCURACY, + num_gpus: int = 0, + distribution_strategy: str = 'mirrored', + per_replica_batch_size: int = 128, + epochs: int = 90, + steps: int = 0, + epochs_between_evals: int = 1, + dtype: str = 'float32', + enable_xla: bool = False, + run_eagerly: bool = False, + gpu_thread_mode: Optional[str] = None, + dataset_num_private_threads: Optional[int] = None, + loss_scale: Optional[str] = None): + """Runs and reports the benchmark given the provided configuration.""" + FLAGS.model_type = 'resnet' + FLAGS.dataset = 'imagenet' + FLAGS.mode = 'train_and_eval' + FLAGS.data_dir = self.data_dir + FLAGS.model_dir = self._get_model_dir(experiment_name) + parameters = _get_classifier_parameters( + num_gpus=num_gpus, + distribution_strategy=distribution_strategy, + per_replica_batch_size=per_replica_batch_size, + epochs=epochs, + steps=steps, + epochs_between_evals=epochs_between_evals, + dtype=dtype, + enable_xla=enable_xla, + run_eagerly=run_eagerly, + gpu_thread_mode=gpu_thread_mode, + dataset_num_private_threads=dataset_num_private_threads, + report_metrics=True, + loss_scale=loss_scale, + batchnorm_spatial_persistent=True) + FLAGS.params_override = json.dumps(parameters) + total_batch_size = num_gpus * per_replica_batch_size + + start_time_sec = time.time() + stats = classifier_trainer.run(flags.FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(Resnet50KerasAccuracy, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=top_1_min, + top_1_max=top_1_max, + total_batch_size=total_batch_size, + log_steps=100) + + def benchmark_8_gpu(self): + """Tests Keras model with eager, dist_strat and 8 GPUs.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu', + num_gpus=8, + per_replica_batch_size=128, + epochs=90, + epochs_between_evals=10, + dtype='float32') + + def benchmark_8_gpu_fp16(self): + """Tests Keras model with eager, dist_strat, 8 GPUs, and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu_fp16', + num_gpus=8, + per_replica_batch_size=256, + epochs=90, + epochs_between_evals=10, + dtype='float16') + + def benchmark_xla_8_gpu_fp16(self): + """Tests Keras model with XLA, eager, dist_strat, 8 GPUs and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_fp16', + num_gpus=8, + per_replica_batch_size=256, + epochs=90, + epochs_between_evals=10, + dtype='float16', + enable_xla=True) + + def benchmark_xla_8_gpu_fp16_dynamic(self): + """Tests Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_fp16_dynamic', + top_1_min=0.736, + num_gpus=8, + per_replica_batch_size=256, + epochs=90, + epochs_between_evals=10, + dtype='float16', + loss_scale='dynamic') + + def _get_model_dir(self, folder_name): + return os.path.join(self.output_dir, folder_name) + + +class MobilenetV1KerasAccuracy(keras_benchmark.KerasBenchmark): + """Benchmark accuracy tests for MobilenetV1 in Keras.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + """A benchmark class. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + + flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags] + + self.data_dir = os.path.join(root_data_dir, 'imagenet') + super(MobilenetV1KerasAccuracy, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags={ + 'model': 'mobilenet', + 'optimizer': 'mobilenet_default', + 'initial_learning_rate_per_sample': 0.00039, + }) + + def benchmark_8_gpu(self): + """Test Keras model with eager, dist_strat and 8 GPUs.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 * 8 + FLAGS.train_epochs = 90 + FLAGS.epochs_between_evals = 10 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + self._run_and_report_benchmark() + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + top_1_min=MOBILENET_V1_MIN_TOP_1_ACCURACY, + top_1_max=MOBILENET_V1_MAX_TOP_1_ACCURACY): + start_time_sec = time.time() + stats = resnet_imagenet_main.run(flags.FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(MobilenetV1KerasAccuracy, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=top_1_min, + top_1_max=top_1_max, + total_batch_size=FLAGS.batch_size, + log_steps=100) + + def _get_model_dir(self, folder_name): + return os.path.join(self.output_dir, folder_name) + + +class Resnet50KerasClassifierBenchmarkBase(keras_benchmark.KerasBenchmark): + """Resnet50 (classifier_trainer) benchmarks.""" + + def __init__(self, output_dir=None, default_flags=None, + tpu=None, dataset_builder='records', train_epochs=1, + train_steps=110, data_dir=None): + flag_methods = [classifier_trainer.define_classifier_flags] + + self.dataset_builder = dataset_builder + self.train_epochs = train_epochs + self.train_steps = train_steps + self.data_dir = data_dir + + super(Resnet50KerasClassifierBenchmarkBase, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags=default_flags, + tpu=tpu) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark( + self, + experiment_name: str, + skip_steps: Optional[int] = None, + top_1_min: float = MIN_TOP_1_ACCURACY, + top_1_max: float = MAX_TOP_1_ACCURACY, + num_gpus: int = 0, + num_tpus: int = 0, + distribution_strategy: str = 'mirrored', + per_replica_batch_size: int = 128, + epochs_between_evals: int = 1, + dtype: str = 'float32', + enable_xla: bool = False, + run_eagerly: bool = False, + gpu_thread_mode: Optional[str] = None, + dataset_num_private_threads: Optional[int] = None, + loss_scale: Optional[str] = None): + """Runs and reports the benchmark given the provided configuration.""" + FLAGS.model_type = 'resnet' + FLAGS.dataset = 'imagenet' + FLAGS.mode = 'train_and_eval' + FLAGS.data_dir = self.data_dir + FLAGS.model_dir = self._get_model_dir(experiment_name) + parameters = _get_classifier_parameters( + builder=self.dataset_builder, + skip_eval=True, + num_gpus=num_gpus, + distribution_strategy=distribution_strategy, + per_replica_batch_size=per_replica_batch_size, + epochs=self.train_epochs, + steps=self.train_steps, + epochs_between_evals=epochs_between_evals, + dtype=dtype, + enable_xla=enable_xla, + gpu_thread_mode=gpu_thread_mode, + dataset_num_private_threads=dataset_num_private_threads, + loss_scale=loss_scale, + report_metrics=False, + batchnorm_spatial_persistent=True) + FLAGS.params_override = json.dumps(parameters) + if distribution_strategy == 'tpu': + total_batch_size = num_tpus * per_replica_batch_size + else: + total_batch_size = num_gpus * per_replica_batch_size + + start_time_sec = time.time() + stats = classifier_trainer.run(flags.FLAGS) + wall_time_sec = time.time() - start_time_sec + # Number of logged step time entries that are excluded in performance + # report. We keep results from last 100 batches, or skip the steps based on + # input skip_steps. + warmup = (skip_steps or (self.train_steps - 100)) // FLAGS.log_steps + + super(Resnet50KerasClassifierBenchmarkBase, self)._report_benchmark( + stats, + wall_time_sec, + total_batch_size=total_batch_size, + log_steps=FLAGS.log_steps, + warmup=warmup, + start_time_sec=start_time_sec) + + def benchmark_1_gpu_no_dist_strat(self): + """Tests Keras model with 1 GPU, no distribution strategy.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_1_gpu_no_dist_strat', + num_gpus=1, + distribution_strategy='off', + per_replica_batch_size=128) + + def benchmark_1_gpu_no_dist_strat_run_eagerly(self): + """Tests Keras model with 1 GPU, no distribution strategy, run eagerly.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly', + num_gpus=1, + run_eagerly=True, + distribution_strategy='off', + per_replica_batch_size=64) + + def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self): + """Tests with 1 GPU, no distribution strategy, fp16, run eagerly.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly_fp16', + num_gpus=1, + run_eagerly=True, + distribution_strategy='off', + dtype='float16', + per_replica_batch_size=128) + + def benchmark_1_gpu(self): + """Tests Keras model with 1 GPU.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_1_gpu', + num_gpus=1, + distribution_strategy='one_device', + per_replica_batch_size=128) + + def benchmark_xla_1_gpu(self): + """Tests Keras model with XLA and 1 GPU.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_1_gpu', + num_gpus=1, + enable_xla=True, + distribution_strategy='one_device', + per_replica_batch_size=128) + + def benchmark_1_gpu_fp16(self): + """Tests Keras model with 1 GPU and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_1_gpu_fp16', + num_gpus=1, + distribution_strategy='one_device', + dtype='float16', + per_replica_batch_size=256) + + def benchmark_1_gpu_fp16_dynamic(self): + """Tests Keras model with 1 GPU, fp16, and dynamic loss scaling.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_1_gpu_fp16_dynamic', + num_gpus=1, + distribution_strategy='one_device', + dtype='float16', + per_replica_batch_size=256, + loss_scale='dynamic') + + def benchmark_xla_1_gpu_fp16(self): + """Tests Keras model with XLA, 1 GPU and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_1_gpu_fp16', + num_gpus=1, + enable_xla=True, + distribution_strategy='one_device', + dtype='float16', + per_replica_batch_size=256) + + def benchmark_xla_1_gpu_fp16_tweaked(self): + """Tests Keras model with XLA, 1 GPU, fp16, and manual config tuning.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_1_gpu_fp16_tweaked', + num_gpus=1, + enable_xla=True, + distribution_strategy='one_device', + dtype='float16', + per_replica_batch_size=256, + gpu_thread_mode='gpu_private') + + def benchmark_xla_1_gpu_fp16_dynamic(self): + """Tests Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_1_gpu_fp16_dynamic', + num_gpus=1, + enable_xla=True, + distribution_strategy='one_device', + dtype='float16', + per_replica_batch_size=256, + loss_scale='dynamic') + + def benchmark_8_gpu(self): + """Tests Keras model with 8 GPUs.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu', + num_gpus=8, + distribution_strategy='mirrored', + per_replica_batch_size=128) + + def benchmark_8_gpu_tweaked(self): + """Tests Keras model with manual config tuning and 8 GPUs.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu_tweaked', + num_gpus=8, + distribution_strategy='mirrored', + per_replica_batch_size=128, + dataset_num_private_threads=14) + + def benchmark_xla_8_gpu(self): + """Tests Keras model with XLA and 8 GPUs.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu', + num_gpus=8, + enable_xla=True, + distribution_strategy='mirrored', + per_replica_batch_size=128) + + def benchmark_xla_8_gpu_tweaked(self): + """Tests Keras model with manual config tuning, 8 GPUs, and XLA.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_tweaked', + num_gpus=8, + enable_xla=True, + distribution_strategy='mirrored', + per_replica_batch_size=128, + gpu_thread_mode='gpu_private', + dataset_num_private_threads=24) + + def benchmark_8_gpu_fp16(self): + """Tests Keras model with 8 GPUs and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu_fp16', + num_gpus=8, + dtype='float16', + distribution_strategy='mirrored', + per_replica_batch_size=256) + + def benchmark_8_gpu_fp16_tweaked(self): + """Tests Keras model with 8 GPUs, fp16, and manual config tuning.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu_fp16_tweaked', + num_gpus=8, + dtype='float16', + distribution_strategy='mirrored', + per_replica_batch_size=256, + gpu_thread_mode='gpu_private', + dataset_num_private_threads=40) + + def benchmark_8_gpu_fp16_dynamic_tweaked(self): + """Tests Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8_gpu_fp16_dynamic_tweaked', + num_gpus=8, + dtype='float16', + distribution_strategy='mirrored', + per_replica_batch_size=256, + loss_scale='dynamic', + gpu_thread_mode='gpu_private', + dataset_num_private_threads=40) + + def benchmark_xla_8_gpu_fp16(self): + """Tests Keras model with XLA, 8 GPUs and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_fp16', + dtype='float16', + num_gpus=8, + enable_xla=True, + distribution_strategy='mirrored', + per_replica_batch_size=256) + + def benchmark_xla_8_gpu_fp16_tweaked(self): + """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_fp16_tweaked', + dtype='float16', + num_gpus=8, + enable_xla=True, + distribution_strategy='mirrored', + per_replica_batch_size=256, + gpu_thread_mode='gpu_private', + dataset_num_private_threads=48) + + def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self): + """Tests with manual config tuning, XLA, 8 GPUs and fp16. + + Delay performance measurement for stable performance on 96 vCPU platforms. + """ + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_fp16_tweaked_delay_measure', + dtype='float16', + num_gpus=8, + enable_xla=True, + distribution_strategy='mirrored', + per_replica_batch_size=256, + gpu_thread_mode='gpu_private', + dataset_num_private_threads=48, + steps=310) + + def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self): + """Tests Keras model with config tuning, XLA, 8 GPUs and dynamic fp16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_xla_8_gpu_fp16_dynamic_tweaked', + dtype='float16', + num_gpus=8, + enable_xla=True, + distribution_strategy='mirrored', + per_replica_batch_size=256, + gpu_thread_mode='gpu_private', + loss_scale='dynamic', + dataset_num_private_threads=48) + + def benchmark_2x2_tpu_bf16(self): + """Test Keras model with 2x2 TPU, bf16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_2x2_tpu_bf16', + dtype='bfloat16', + num_tpus=8, + distribution_strategy='tpu', + per_replica_batch_size=128) + + def benchmark_4x4_tpu_bf16(self): + """Test Keras model with 4x4 TPU, bf16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_4x4_tpu_bf16', + dtype='bfloat16', + num_tpus=32, + distribution_strategy='tpu', + per_replica_batch_size=128) + + def benchmark_8x8_tpu_bf16(self): + """Test Keras model with 8x8 TPU, bf16.""" + self._setup() + self._run_and_report_benchmark( + experiment_name='benchmark_8x8_tpu_bf16', + dtype='bfloat16', + num_tpus=128, + distribution_strategy='tpu', + per_replica_batch_size=64) + + def fill_report_object(self, stats): + super(Resnet50KerasClassifierBenchmarkBase, self).fill_report_object( + stats, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + +class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark): + """Resnet50 benchmarks.""" + + def __init__(self, output_dir=None, default_flags=None, tpu=None): + flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags] + + super(Resnet50KerasBenchmarkBase, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags=default_flags, + tpu=tpu) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, skip_steps=None): + start_time_sec = time.time() + stats = resnet_imagenet_main.run(FLAGS) + wall_time_sec = time.time() - start_time_sec + # Number of logged step time entries that are excluded in performance + # report. We keep results from last 100 batches, or skip the steps based on + # input skip_steps. + warmup = (skip_steps or (FLAGS.train_steps - 100)) // FLAGS.log_steps + + super(Resnet50KerasBenchmarkBase, self)._report_benchmark( + stats, + wall_time_sec, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + warmup=warmup, + start_time_sec=start_time_sec) + + def benchmark_1_gpu_no_dist_strat(self): + """Test Keras model with 1 GPU, no distribution strategy.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly(self): + """Test Keras model with 1 GPU, no distribution strategy, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly') + FLAGS.batch_size = 64 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self): + """Test Keras model with 1 GPU, no distribution strategy, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.explicit_gpu_placement = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked') + FLAGS.batch_size = 64 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self): + """Test with 1 GPU, no distribution strategy, fp16, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self): + """Test with 1 GPU, no distribution strategy, fp16, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.explicit_gpu_placement = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu(self): + """Test Keras model with 1 GPU.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_amp(self): + """Test Keras model with 1 GPU with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp') + FLAGS.batch_size = 256 + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu(self): + """Test Keras model with XLA and 1 GPU.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_amp(self): + """Test Keras model with XLA and 1 GPU with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp') + FLAGS.batch_size = 256 + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16(self): + """Test Keras model with 1 GPU and fp16.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16_dynamic(self): + """Test Keras model with 1 GPU, fp16, and dynamic loss scaling.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + FLAGS.loss_scale = 'dynamic' + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_fp16(self): + """Test Keras model with XLA, 1 GPU and fp16.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_fp16_tweaked(self): + """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + FLAGS.tf_gpu_thread_mode = 'gpu_private' + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_fp16_dynamic(self): + """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + FLAGS.loss_scale = 'dynamic' + self._run_and_report_benchmark() + + def benchmark_8_gpu(self): + """Test Keras model with 8 GPUs.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + FLAGS.batch_size = 128 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_8_gpu_amp(self): + """Test Keras model with 8 GPUs with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp') + FLAGS.batch_size = 256 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_8_gpu_tweaked(self): + """Test Keras model with manual config tuning and 8 GPUs.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked') + FLAGS.batch_size = 128 * 8 # 8 GPUs + FLAGS.datasets_num_private_threads = 14 + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu(self): + """Test Keras model with XLA and 8 GPUs.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu') + FLAGS.batch_size = 128 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_amp(self): + """Test Keras model with XLA and 8 GPUs with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_amp') + FLAGS.batch_size = 256 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_tweaked(self): + """Test Keras model with manual config tuning, 8 GPUs, and XLA.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked') + FLAGS.batch_size = 128 * 8 + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 24 + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16(self): + """Test Keras model with 8 GPUs and fp16.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16') + FLAGS.batch_size = 256 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16_tweaked(self): + """Test Keras model with 8 GPUs, fp16, and manual config tuning.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.dataset_num_private_threads = 40 + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16_dynamic_tweaked(self): + """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_8_gpu_fp16_dynamic_tweaked') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.loss_scale = 'dynamic' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.dataset_num_private_threads = 40 + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_fp16(self): + """Test Keras model with XLA, 8 GPUs and fp16.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16') + FLAGS.batch_size = 256 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_fp16_tweaked(self): + """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 48 + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self): + """Test with manual config tuning, XLA, 8 GPUs and fp16. + + Delay performance measurement for stable performance on 96 vCPU platforms. + """ + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_xla_8_gpu_fp16_tweaked_delay_measure') + FLAGS.batch_size = 256 * 8 + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 48 + FLAGS.train_steps = 310 + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self): + """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_xla_8_gpu_fp16_dynamic_tweaked') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.loss_scale = 'dynamic' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 48 + self._run_and_report_benchmark() + + def benchmark_2x2_tpu_bf16(self): + """Test Keras model with 2x2 TPU, bf16.""" + self._setup() + + FLAGS.dtype = 'bf16' + FLAGS.distribution_strategy = 'tpu' + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_bf16') + FLAGS.batch_size = 1024 + self._run_and_report_benchmark() + + def benchmark_4x4_tpu_bf16(self): + """Test Keras model with 4x4 TPU, bf16.""" + self._setup() + + FLAGS.dtype = 'bf16' + FLAGS.distribution_strategy = 'tpu' + FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu_bf16') + FLAGS.batch_size = 4096 + self._run_and_report_benchmark() + + def benchmark_8x8_tpu_bf16(self): + """Test Keras model with 8x8 TPU, bf16.""" + self._setup() + + FLAGS.dtype = 'bf16' + FLAGS.distribution_strategy = 'tpu' + FLAGS.model_dir = self._get_model_dir('benchmark_8x8_tpu_bf16') + FLAGS.batch_size = 8192 + self._run_and_report_benchmark() + + def fill_report_object(self, stats): + super(Resnet50KerasBenchmarkBase, self).fill_report_object( + stats, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + +class Resnet50KerasBenchmarkSynth(Resnet50KerasClassifierBenchmarkBase): + """Resnet50 synthetic benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs): + def_flags = {} + def_flags['log_steps'] = 10 + + super(Resnet50KerasBenchmarkSynth, self).__init__( + output_dir=output_dir, default_flags=def_flags, tpu=tpu, + dataset_builder='synthetic', train_epochs=1, train_steps=110) + + +class Resnet50KerasBenchmarkReal(Resnet50KerasClassifierBenchmarkBase): + """Resnet50 real data benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs): + data_dir = os.path.join(root_data_dir, 'imagenet') + def_flags = {} + def_flags['log_steps'] = 10 + + super(Resnet50KerasBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=def_flags, tpu=tpu, + dataset_builder='records', train_epochs=1, train_steps=110, + data_dir=data_dir) + + +class Resnet50KerasBenchmarkRemoteData(Resnet50KerasBenchmarkBase): + """Resnet50 real data (stored in remote storage) benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + def_flags = {} + def_flags['skip_eval'] = True + def_flags['report_accuracy_metrics'] = False + def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet') + # Defining multiple epochs overrides the train_steps setting in benchmarks. + def_flags['train_epochs'] = 2 + # Cache dataset so performance is stable after the first epoch. + def_flags['training_dataset_cache'] = True + def_flags['log_steps'] = 100 + # Note that for single GPU and pure eager tests which are less likely to be + # input bound and more stable, these tests will run for shorter time by + # overriding FLAGS.train_epochs, train_seteps, log_steps in benchmark + # methods, and skip_steps in _run_and_report_benchmark(). + + super(Resnet50KerasBenchmarkRemoteData, self).__init__( + output_dir=output_dir, default_flags=def_flags) + + def _override_flags_to_run_test_shorter(self): + FLAGS.train_epochs = 1 + FLAGS.train_steps = 300 + FLAGS.log_steps = 10 + + def benchmark_1_gpu_no_dist_strat(self): + """Test Keras model with 1 GPU, no distribution strategy.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat') + FLAGS.batch_size = 128 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly(self): + """Test Keras model with 1 GPU, no distribution strategy, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly') + FLAGS.batch_size = 64 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self): + """Test Keras model with 1 GPU, no distribution strategy, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.explicit_gpu_placement = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked') + FLAGS.batch_size = 64 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self): + """Test with 1 GPU, no distribution strategy, fp16, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 128 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self): + """Test with 1 GPU, no distribution strategy, fp16, run eagerly.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.run_eagerly = True + FLAGS.explicit_gpu_placement = True + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 128 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu(self): + """Test Keras model with 1 GPU.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + FLAGS.batch_size = 128 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_amp(self): + """Test Keras model with 1 GPU with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp') + FLAGS.batch_size = 256 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu(self): + """Test Keras model with XLA and 1 GPU.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu') + FLAGS.batch_size = 128 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_amp(self): + """Test Keras model with XLA and 1 GPU with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp') + FLAGS.batch_size = 256 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16(self): + """Test Keras model with 1 GPU and fp16.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16_dynamic(self): + """Test Keras model with 1 GPU, fp16, and dynamic loss scaling.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + FLAGS.loss_scale = 'dynamic' + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_fp16(self): + """Test Keras model with XLA, 1 GPU and fp16.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_fp16_tweaked(self): + """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + FLAGS.tf_gpu_thread_mode = 'gpu_private' + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_fp16_dynamic(self): + """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.enable_eager = True + FLAGS.enable_xla = True + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic') + FLAGS.dtype = 'fp16' + FLAGS.batch_size = 256 + FLAGS.loss_scale = 'dynamic' + self._override_flags_to_run_test_shorter() + self._run_and_report_benchmark() + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + if FLAGS.num_gpus == 1 or FLAGS.run_eagerly: + # For single GPU and pure eager tests which are less likely to be input + # bound and more stable, run for shorter time and use the default + # skip_steps. + skip_steps = None + else: + # skip the first epoch for performance measurement. + skip_steps = 600 + super(Resnet50KerasBenchmarkRemoteData, + self)._run_and_report_benchmark(skip_steps=skip_steps) + + +class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark): + """Trivial model with real data benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags] + + def_flags = {} + def_flags['use_trivial_model'] = True + def_flags['skip_eval'] = True + def_flags['report_accuracy_metrics'] = False + def_flags['dtype'] = 'fp16' + def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet') + def_flags['train_steps'] = 600 + def_flags['log_steps'] = 100 + def_flags['distribution_strategy'] = 'mirrored' + + super(TrivialKerasBenchmarkReal, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags=def_flags) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + start_time_sec = time.time() + stats = resnet_imagenet_main.run(FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(TrivialKerasBenchmarkReal, self)._report_benchmark( + stats, + wall_time_sec, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_8_gpu_warmup(self): + """Dummy test that runs over an epoch to warmup the machine.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.enable_eager = True + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup') + FLAGS.batch_size = 256 * 8 + FLAGS.train_steps = 700 + self._run_and_report_benchmark() + + def fill_report_object(self, stats): + super(TrivialKerasBenchmarkReal, self).fill_report_object( + stats, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + +class Resnet50MultiWorkerKerasAccuracy(keras_benchmark.KerasBenchmark): + """Resnet50 distributed accuracy tests with multiple workers.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + flag_methods = [classifier_trainer.define_imagenet_keras_flags] + self.data_dir = os.path.join(root_data_dir, 'imagenet') + super(Resnet50MultiWorkerKerasAccuracy, self).__init__( + output_dir=output_dir, flag_methods=flag_methods) + + def _benchmark_common(self, eager, num_workers, all_reduce_alg): + """Common to all benchmarks in this class.""" + self._setup() + + num_gpus = 8 + FLAGS.num_gpus = num_gpus + FLAGS.data_dir = self.data_dir + FLAGS.train_epochs = 90 + FLAGS.epochs_between_evals = 10 + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = eager + FLAGS.enable_xla = False + FLAGS.distribution_strategy = 'multi_worker_mirrored' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 32 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format( + 'eager' if eager else 'graph', num_workers, all_reduce_alg)) + FLAGS.batch_size = 256 * num_gpus * num_workers + FLAGS.all_reduce_alg = all_reduce_alg + + self._run_and_report_benchmark() + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + top_1_min=MIN_TOP_1_ACCURACY, + top_1_max=MAX_TOP_1_ACCURACY): + start_time_sec = time.time() + stats = classifier_trainer.run(flags.FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(Resnet50MultiWorkerKerasAccuracy, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=top_1_min, + top_1_max=top_1_max, + total_batch_size=FLAGS.batch_size, + log_steps=100) + + def _get_model_dir(self, folder_name): + return os.path.join(self.output_dir, folder_name) + + def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self): + """Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce.""" + self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring') + + def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self): + """Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce.""" + self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl') + + def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self): + """Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce.""" + self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring') + + def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self): + """Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce.""" + self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl') + + +class Resnet50MultiWorkerKerasBenchmark(Resnet50KerasBenchmarkBase): + """Resnet50 distributed benchmark tests with multiple workers.""" + + def __init__(self, output_dir=None, default_flags=None): + super(Resnet50MultiWorkerKerasBenchmark, self).__init__( + output_dir=output_dir, default_flags=default_flags) + + def _benchmark_common(self, eager, num_workers, all_reduce_alg): + """Common to all benchmarks in this class.""" + self._setup() + + num_gpus = 8 + FLAGS.num_gpus = num_gpus + FLAGS.dtype = 'fp16' + FLAGS.enable_eager = eager + FLAGS.enable_xla = False + FLAGS.distribution_strategy = 'multi_worker_mirrored' + FLAGS.tf_gpu_thread_mode = 'gpu_private' + FLAGS.datasets_num_private_threads = 32 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format( + 'eager' if eager else 'graph', num_workers, all_reduce_alg)) + FLAGS.batch_size = 256 * num_gpus * num_workers + FLAGS.all_reduce_alg = all_reduce_alg + + self._run_and_report_benchmark() + + def benchmark_eager_8_gpu_1_worker_fp16_ring_tweaked(self): + """Eager, 8 GPUs per worker, 1 worker, fp16, ring all-reduce.""" + self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='ring') + + def benchmark_eager_8_gpu_1_worker_fp16_nccl_tweaked(self): + """Eager, 8 GPUs per worker, 1 worker, fp16, nccl all-reduce.""" + self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='nccl') + + def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self): + """Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce.""" + self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring') + + def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self): + """Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce.""" + self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl') + + def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self): + """Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce.""" + self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring') + + def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self): + """Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce.""" + self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl') + + +class Resnet50MultiWorkerKerasBenchmarkSynth(Resnet50MultiWorkerKerasBenchmark): + """Resnet50 multi-worker synthetic data benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + def_flags = {} + def_flags['skip_eval'] = True + def_flags['report_accuracy_metrics'] = False + def_flags['use_synthetic_data'] = True + def_flags['train_steps'] = 110 + def_flags['log_steps'] = 10 + + super(Resnet50MultiWorkerKerasBenchmarkSynth, self).__init__( + output_dir=output_dir, default_flags=def_flags) + + +class Resnet50MultiWorkerKerasBenchmarkReal(Resnet50MultiWorkerKerasBenchmark): + """Resnet50 multi-worker real data benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + def_flags = {} + def_flags['skip_eval'] = True + def_flags['report_accuracy_metrics'] = False + def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet') + def_flags['train_steps'] = 110 + def_flags['log_steps'] = 10 + + super(Resnet50MultiWorkerKerasBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=def_flags) + + +# TODO(kimjaehong): It also should be also cover other metheods of model +# optimization techniques. In that time, this class will change to something +# like 'KerasModelOptimizationAccuracyBase'. +class KerasPruningAccuracyBase(keras_benchmark.KerasBenchmark): + """Benchmark accuracy tests for pruning method.""" + + def __init__(self, + output_dir=None, + root_data_dir=None, + default_flags=None, + **kwargs): + """A accuracy benchmark class for pruning method. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + default_flags: default flags + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + if default_flags is None: + default_flags = {} + default_flags['pruning_method'] = 'polynomial_decay' + default_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet') + + flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags] + + super(KerasPruningAccuracyBase, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags=default_flags, + **kwargs) + + def benchmark_8_gpu(self): + """Test Keras model with eager, dist_strat and 8 GPUs.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.batch_size = 32 * 8 + FLAGS.train_epochs = 90 + FLAGS.epochs_between_evals = 10 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + FLAGS.dtype = 'fp32' + FLAGS.enable_eager = True + self._run_and_report_benchmark() + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + top_1_min=MODEL_OPTIMIZATION_TOP_1_ACCURACY[ + 'RESNET50_FINETUNE_PRUNING'][0], + top_1_max=MODEL_OPTIMIZATION_TOP_1_ACCURACY[ + 'RESNET50_FINETUNE_PRUNING'][1]): + start_time_sec = time.time() + stats = resnet_imagenet_main.run(flags.FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(KerasPruningAccuracyBase, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=top_1_min, + top_1_max=top_1_max, + total_batch_size=FLAGS.batch_size, + log_steps=100) + + +class MobilenetV1KerasPruningAccuracy(KerasPruningAccuracyBase): + """Benchmark accuracy tests for MobilenetV1 with pruning method.""" + + def __init__(self, root_data_dir=None, **kwargs): + default_flags = { + 'model': 'mobilenet', + 'optimizer': 'mobilenet_default', + 'initial_learning_rate_per_sample': 0.00007, + 'pretrained_filepath': tf.train.latest_checkpoint( + os.path.join(root_data_dir, 'mobilenet_v1')), + 'pruning_begin_step': 0, + 'pruning_end_step': 100000, + 'pruning_initial_sparsity': 0.0, + 'pruning_final_sparsity': 0.5, + 'pruning_frequency': 100, + } + super(MobilenetV1KerasPruningAccuracy, self).__init__( + root_data_dir=root_data_dir, + default_flags=default_flags, + **kwargs) + + def _run_and_report_benchmark(self): + super(MobilenetV1KerasPruningAccuracy, self)._run_and_report_benchmark( + top_1_min=\ + MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][0], + top_1_max=\ + MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][1]) + + +class Resnet50KerasPruningAccuracy(KerasPruningAccuracyBase): + """Benchmark accuracy tests for resnet50 with pruning method.""" + + def __init__(self, root_data_dir=None, **kwargs): + default_flags = { + 'model': 'resnet50_v1.5', + 'optimizer': 'mobilenet_default', + 'initial_learning_rate_per_sample': 0.0000039, + 'pretrained_filepath': tf.train.latest_checkpoint( + os.path.join(root_data_dir, 'resnet50')), + 'pruning_begin_step': 0, + 'pruning_end_step': 50000, + 'pruning_initial_sparsity': 0.0, + 'pruning_final_sparsity': 0.5, + 'pruning_frequency': 100, + } + super(Resnet50KerasPruningAccuracy, self).__init__( + root_data_dir=root_data_dir, + default_flags=default_flags, + **kwargs) + + def _run_and_report_benchmark(self): + super(Resnet50KerasPruningAccuracy, self)._run_and_report_benchmark( + top_1_min=\ + MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][0], + top_1_max=\ + MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][1]) + + +class KerasPruningBenchmarkRealBase(Resnet50KerasBenchmarkBase): + """Pruning method benchmarks.""" + + def __init__(self, root_data_dir=None, default_flags=None, **kwargs): + if default_flags is None: + default_flags = {} + default_flags.update({ + 'skip_eval': True, + 'report_accuracy_metrics': False, + 'data_dir': os.path.join(root_data_dir, 'imagenet'), + 'train_steps': 110, + 'log_steps': 10, + 'pruning_method': 'polynomial_decay', + 'pruning_begin_step': 0, + 'pruning_end_step': 50000, + 'pruning_initial_sparsity': 0, + 'pruning_final_sparsity': 0.5, + 'pruning_frequency': 100, + }) + super(KerasPruningBenchmarkRealBase, self).__init__( + default_flags=default_flags, **kwargs) + + +class MobilenetV1KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase): + """Pruning method benchmarks for MobilenetV1.""" + + def __init__(self, **kwargs): + default_flags = { + 'model': 'mobilenet', + 'optimizer': 'mobilenet_default', + } + super(MobilenetV1KerasPruningBenchmarkReal, self).__init__( + default_flags=default_flags, **kwargs) + + +class Resnet50KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase): + """Pruning method benchmarks for resnet50.""" + + def __init__(self, **kwargs): + default_flags = { + 'model': 'resnet50_v1.5', + 'optimizer': 'mobilenet_default', + } + super(Resnet50KerasPruningBenchmarkReal, self).__init__( + default_flags=default_flags, **kwargs) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/models/__init__.py b/models/official/benchmark/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/benchmark/models/cifar_preprocessing.py b/models/official/benchmark/models/cifar_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..18d7fe630e194953c8c5f3f7552c7104c6155c9a --- /dev/null +++ b/models/official/benchmark/models/cifar_preprocessing.py @@ -0,0 +1,159 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to Cifar-10 dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from absl import logging +import tensorflow as tf + +from official.vision.image_classification.resnet import imagenet_preprocessing + +HEIGHT = 32 +WIDTH = 32 +NUM_CHANNELS = 3 +_DEFAULT_IMAGE_BYTES = HEIGHT * WIDTH * NUM_CHANNELS +# The record is the image plus a one-byte label +_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1 + +# TODO(tobyboyd): Change to best practice 45K(train)/5K(val)/10K(test) splits. +NUM_IMAGES = { + 'train': 50000, + 'validation': 10000, +} +_NUM_DATA_FILES = 5 +NUM_CLASSES = 10 + + +def parse_record(raw_record, is_training, dtype): + """Parses a record containing a training example of an image. + + The input record is parsed into a label and image, and the image is passed + through preprocessing steps (cropping, flipping, and so on). + + This method converts the label to one hot to fit the loss function. + + Args: + raw_record: scalar Tensor tf.string containing a serialized + Example protocol buffer. + is_training: A boolean denoting whether the input is for training. + dtype: Data type to use for input images. + + Returns: + Tuple with processed image tensor and one-hot-encoded label tensor. + """ + # Convert bytes to a vector of uint8 that is record_bytes long. + record_vector = tf.io.decode_raw(raw_record, tf.uint8) + + # The first byte represents the label, which we convert from uint8 to int32 + # and then to one-hot. + label = tf.cast(record_vector[0], tf.int32) + + # The remaining bytes after the label represent the image, which we reshape + # from [depth * height * width] to [depth, height, width]. + depth_major = tf.reshape(record_vector[1:_RECORD_BYTES], + [NUM_CHANNELS, HEIGHT, WIDTH]) + + # Convert from [depth, height, width] to [height, width, depth], and cast as + # float32. + image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32) + + image = preprocess_image(image, is_training) + image = tf.cast(image, dtype) + + return image, label + + +def preprocess_image(image, is_training): + """Preprocess a single image of layout [height, width, depth].""" + if is_training: + # Resize the image to add four extra pixels on each side. + image = tf.image.resize_with_crop_or_pad( + image, HEIGHT + 8, WIDTH + 8) + + # Randomly crop a [HEIGHT, WIDTH] section of the image. + image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS]) + + # Randomly flip the image horizontally. + image = tf.image.random_flip_left_right(image) + + # Subtract off the mean and divide by the variance of the pixels. + image = tf.image.per_image_standardization(image) + return image + + +def get_filenames(is_training, data_dir): + """Returns a list of filenames.""" + assert tf.io.gfile.exists(data_dir), ( + 'Run cifar10_download_and_extract.py first to download and extract the ' + 'CIFAR-10 data.') + + if is_training: + return [ + os.path.join(data_dir, 'data_batch_%d.bin' % i) + for i in range(1, _NUM_DATA_FILES + 1) + ] + else: + return [os.path.join(data_dir, 'test_batch.bin')] + + +def input_fn(is_training, + data_dir, + batch_size, + dtype=tf.float32, + datasets_num_private_threads=None, + parse_record_fn=parse_record, + input_context=None, + drop_remainder=False): + """Input function which provides batches for train or eval. + + Args: + is_training: A boolean denoting whether the input is for training. + data_dir: The directory containing the input data. + batch_size: The number of samples per batch. + dtype: Data type to use for images/features + datasets_num_private_threads: Number of private threads for tf.data. + parse_record_fn: Function to use for parsing the records. + input_context: A `tf.distribute.InputContext` object passed in by + `tf.distribute.Strategy`. + drop_remainder: A boolean indicates whether to drop the remainder of the + batches. If True, the batch dimension will be static. + + Returns: + A dataset that can be used for iteration. + """ + filenames = get_filenames(is_training, data_dir) + dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES) + + if input_context: + logging.info( + 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d', + input_context.input_pipeline_id, input_context.num_input_pipelines) + dataset = dataset.shard(input_context.num_input_pipelines, + input_context.input_pipeline_id) + + return imagenet_preprocessing.process_record_dataset( + dataset=dataset, + is_training=is_training, + batch_size=batch_size, + shuffle_buffer=NUM_IMAGES['train'], + parse_record_fn=parse_record_fn, + dtype=dtype, + datasets_num_private_threads=datasets_num_private_threads, + drop_remainder=drop_remainder + ) diff --git a/models/official/benchmark/models/resnet_cifar_main.py b/models/official/benchmark/models/resnet_cifar_main.py new file mode 100644 index 0000000000000000000000000000000000000000..4a02fec8b96e25228e6e0467d646c26995f944fc --- /dev/null +++ b/models/official/benchmark/models/resnet_cifar_main.py @@ -0,0 +1,284 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a ResNet model on the Cifar-10 dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags +from absl import logging +import numpy as np +import tensorflow as tf +from official.benchmark.models import cifar_preprocessing +from official.benchmark.models import resnet_cifar_model +from official.benchmark.models import synthetic_util +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.vision.image_classification.resnet import common + + +LR_SCHEDULE = [ # (multiplier, epoch to start) tuples + (0.1, 91), (0.01, 136), (0.001, 182) +] + + +def learning_rate_schedule(current_epoch, + current_batch, + batches_per_epoch, + batch_size): + """Handles linear scaling rule and LR decay. + + Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the + provided scaling factor. + + Args: + current_epoch: integer, current epoch indexed from 0. + current_batch: integer, current batch in the current epoch, indexed from 0. + batches_per_epoch: integer, number of steps in an epoch. + batch_size: integer, total batch sized. + + Returns: + Adjusted learning rate. + """ + del current_batch, batches_per_epoch # not used + initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128 + learning_rate = initial_learning_rate + for mult, start_epoch in LR_SCHEDULE: + if current_epoch >= start_epoch: + learning_rate = initial_learning_rate * mult + else: + break + return learning_rate + + +class LearningRateBatchScheduler(tf.keras.callbacks.Callback): + """Callback to update learning rate on every batch (not epoch boundaries). + + N.B. Only support Keras optimizers, not TF optimizers. + + Attributes: + schedule: a function that takes an epoch index and a batch index as input + (both integer, indexed from 0) and returns a new learning rate as + output (float). + """ + + def __init__(self, schedule, batch_size, steps_per_epoch): + super(LearningRateBatchScheduler, self).__init__() + self.schedule = schedule + self.steps_per_epoch = steps_per_epoch + self.batch_size = batch_size + self.epochs = -1 + self.prev_lr = -1 + + def on_epoch_begin(self, epoch, logs=None): + if not hasattr(self.model.optimizer, 'learning_rate'): + raise ValueError('Optimizer must have a "learning_rate" attribute.') + self.epochs += 1 + + def on_batch_begin(self, batch, logs=None): + """Executes before step begins.""" + lr = self.schedule(self.epochs, + batch, + self.steps_per_epoch, + self.batch_size) + if not isinstance(lr, (float, np.float32, np.float64)): + raise ValueError('The output of the "schedule" function should be float.') + if lr != self.prev_lr: + self.model.optimizer.learning_rate = lr # lr should be a float here + self.prev_lr = lr + logging.debug( + 'Epoch %05d Batch %05d: LearningRateBatchScheduler ' + 'change learning rate to %s.', self.epochs, batch, lr) + + +def run(flags_obj): + """Run ResNet Cifar-10 training and eval loop using native Keras APIs. + + Args: + flags_obj: An object containing parsed flag values. + + Raises: + ValueError: If fp16 is passed as it is not currently supported. + + Returns: + Dictionary of training and eval stats. + """ + keras_utils.set_session_config( + enable_xla=flags_obj.enable_xla) + + # Execute flag override logic for better model performance + if flags_obj.tf_gpu_thread_mode: + keras_utils.set_gpu_thread_mode_and_count( + per_gpu_thread_count=flags_obj.per_gpu_thread_count, + gpu_thread_mode=flags_obj.tf_gpu_thread_mode, + num_gpus=flags_obj.num_gpus, + datasets_num_private_threads=flags_obj.datasets_num_private_threads) + common.set_cudnn_batchnorm_mode() + + dtype = flags_core.get_tf_dtype(flags_obj) + if dtype == 'fp16': + raise ValueError('dtype fp16 is not supported in Keras. Use the default ' + 'value(fp32).') + + data_format = flags_obj.data_format + if data_format is None: + data_format = ('channels_first' if tf.config.list_physical_devices('GPU') + else 'channels_last') + tf.keras.backend.set_image_data_format(data_format) + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=flags_obj.num_gpus, + all_reduce_alg=flags_obj.all_reduce_alg, + num_packs=flags_obj.num_packs) + + if strategy: + # flags_obj.enable_get_next_as_optional controls whether enabling + # get_next_as_optional behavior in DistributedIterator. If true, last + # partial batch can be supported. + strategy.extended.experimental_enable_get_next_as_optional = ( + flags_obj.enable_get_next_as_optional + ) + + strategy_scope = distribution_utils.get_strategy_scope(strategy) + + if flags_obj.use_synthetic_data: + synthetic_util.set_up_synthetic_data() + input_fn = common.get_synth_input_fn( + height=cifar_preprocessing.HEIGHT, + width=cifar_preprocessing.WIDTH, + num_channels=cifar_preprocessing.NUM_CHANNELS, + num_classes=cifar_preprocessing.NUM_CLASSES, + dtype=flags_core.get_tf_dtype(flags_obj), + drop_remainder=True) + else: + synthetic_util.undo_set_up_synthetic_data() + input_fn = cifar_preprocessing.input_fn + + train_input_dataset = input_fn( + is_training=True, + data_dir=flags_obj.data_dir, + batch_size=flags_obj.batch_size, + parse_record_fn=cifar_preprocessing.parse_record, + datasets_num_private_threads=flags_obj.datasets_num_private_threads, + dtype=dtype, + # Setting drop_remainder to avoid the partial batch logic in normalization + # layer, which triggers tf.where and leads to extra memory copy of input + # sizes between host and GPU. + drop_remainder=(not flags_obj.enable_get_next_as_optional)) + + eval_input_dataset = None + if not flags_obj.skip_eval: + eval_input_dataset = input_fn( + is_training=False, + data_dir=flags_obj.data_dir, + batch_size=flags_obj.batch_size, + parse_record_fn=cifar_preprocessing.parse_record) + + steps_per_epoch = ( + cifar_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size) + lr_schedule = 0.1 + if flags_obj.use_tensor_lr: + initial_learning_rate = common.BASE_LEARNING_RATE * flags_obj.batch_size / 128 + lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay( + boundaries=list(p[1] * steps_per_epoch for p in LR_SCHEDULE), + values=[initial_learning_rate] + + list(p[0] * initial_learning_rate for p in LR_SCHEDULE)) + + with strategy_scope: + optimizer = common.get_optimizer(lr_schedule) + model = resnet_cifar_model.resnet56(classes=cifar_preprocessing.NUM_CLASSES) + model.compile( + loss='sparse_categorical_crossentropy', + optimizer=optimizer, + metrics=(['sparse_categorical_accuracy'] + if flags_obj.report_accuracy_metrics else None), + run_eagerly=flags_obj.run_eagerly) + + train_epochs = flags_obj.train_epochs + + callbacks = common.get_callbacks() + + if not flags_obj.use_tensor_lr: + lr_callback = LearningRateBatchScheduler( + schedule=learning_rate_schedule, + batch_size=flags_obj.batch_size, + steps_per_epoch=steps_per_epoch) + callbacks.append(lr_callback) + + # if mutliple epochs, ignore the train_steps flag. + if train_epochs <= 1 and flags_obj.train_steps: + steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch) + train_epochs = 1 + + num_eval_steps = (cifar_preprocessing.NUM_IMAGES['validation'] // + flags_obj.batch_size) + + validation_data = eval_input_dataset + if flags_obj.skip_eval: + if flags_obj.set_learning_phase_to_train: + # TODO(haoyuzhang): Understand slowdown of setting learning phase when + # not using distribution strategy. + tf.keras.backend.set_learning_phase(1) + num_eval_steps = None + validation_data = None + + if not strategy and flags_obj.explicit_gpu_placement: + # TODO(b/135607227): Add device scope automatically in Keras training loop + # when not using distribition strategy. + no_dist_strat_device = tf.device('/device:GPU:0') + no_dist_strat_device.__enter__() + + history = model.fit(train_input_dataset, + epochs=train_epochs, + steps_per_epoch=steps_per_epoch, + callbacks=callbacks, + validation_steps=num_eval_steps, + validation_data=validation_data, + validation_freq=flags_obj.epochs_between_evals, + verbose=2) + eval_output = None + if not flags_obj.skip_eval: + eval_output = model.evaluate(eval_input_dataset, + steps=num_eval_steps, + verbose=2) + + if not strategy and flags_obj.explicit_gpu_placement: + no_dist_strat_device.__exit__() + + stats = common.build_stats(history, eval_output, callbacks) + return stats + + +def define_cifar_flags(): + common.define_keras_flags(dynamic_loss_scale=False) + + flags_core.set_defaults(data_dir='/tmp/cifar10_data/cifar-10-batches-bin', + model_dir='/tmp/cifar10_model', + epochs_between_evals=10, + batch_size=128) + + +def main(_): + return run(flags.FLAGS) + + +if __name__ == '__main__': + logging.set_verbosity(logging.INFO) + define_cifar_flags() + app.run(main) diff --git a/models/official/benchmark/models/resnet_cifar_model.py b/models/official/benchmark/models/resnet_cifar_model.py new file mode 100644 index 0000000000000000000000000000000000000000..1b507381f1b6907fdfb078d8316f3621a9e2b8f7 --- /dev/null +++ b/models/official/benchmark/models/resnet_cifar_model.py @@ -0,0 +1,262 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ResNet56 model for Keras adapted from tf.keras.applications.ResNet50. + +# Reference: +- [Deep Residual Learning for Image Recognition]( + https://arxiv.org/abs/1512.03385) +Adapted from code contributed by BigMoyan. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import tensorflow as tf +from tensorflow.python.keras import backend +from tensorflow.python.keras import initializers +from tensorflow.python.keras import layers +from tensorflow.python.keras import regularizers + + +BATCH_NORM_DECAY = 0.997 +BATCH_NORM_EPSILON = 1e-5 +L2_WEIGHT_DECAY = 2e-4 + + +def identity_building_block(input_tensor, + kernel_size, + filters, + stage, + block, + training=None): + """The identity block is the block that has no conv layer at shortcut. + + Arguments: + input_tensor: input tensor + kernel_size: default 3, the kernel size of + middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: current block label, used for generating layer names + training: Only used if training keras model with Estimator. In other + scenarios it is handled automatically. + + Returns: + Output tensor for the block. + """ + filters1, filters2 = filters + if backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = layers.Conv2D(filters1, kernel_size, + padding='same', use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name=conv_name_base + '2a')(input_tensor) + x = layers.BatchNormalization( + axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, + name=bn_name_base + '2a')(x, training=training) + x = layers.Activation('relu')(x) + + x = layers.Conv2D(filters2, kernel_size, + padding='same', use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name=conv_name_base + '2b')(x) + x = layers.BatchNormalization( + axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, + name=bn_name_base + '2b')(x, training=training) + + x = layers.add([x, input_tensor]) + x = layers.Activation('relu')(x) + return x + + +def conv_building_block(input_tensor, + kernel_size, + filters, + stage, + block, + strides=(2, 2), + training=None): + """A block that has a conv layer at shortcut. + + Arguments: + input_tensor: input tensor + kernel_size: default 3, the kernel size of + middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: current block label, used for generating layer names + strides: Strides for the first conv layer in the block. + training: Only used if training keras model with Estimator. In other + scenarios it is handled automatically. + + Returns: + Output tensor for the block. + + Note that from stage 3, + the first conv layer at main path is with strides=(2, 2) + And the shortcut should have strides=(2, 2) as well + """ + filters1, filters2 = filters + if tf.keras.backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = layers.Conv2D(filters1, kernel_size, strides=strides, + padding='same', use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name=conv_name_base + '2a')(input_tensor) + x = layers.BatchNormalization( + axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, + name=bn_name_base + '2a')(x, training=training) + x = layers.Activation('relu')(x) + + x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name=conv_name_base + '2b')(x) + x = layers.BatchNormalization( + axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, + name=bn_name_base + '2b')(x, training=training) + + shortcut = layers.Conv2D(filters2, (1, 1), strides=strides, use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name=conv_name_base + '1')(input_tensor) + shortcut = layers.BatchNormalization( + axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, + name=bn_name_base + '1')(shortcut, training=training) + + x = layers.add([x, shortcut]) + x = layers.Activation('relu')(x) + return x + + +def resnet_block(input_tensor, + size, + kernel_size, + filters, + stage, + conv_strides=(2, 2), + training=None): + """A block which applies conv followed by multiple identity blocks. + + Arguments: + input_tensor: input tensor + size: integer, number of constituent conv/identity building blocks. + A conv block is applied once, followed by (size - 1) identity blocks. + kernel_size: default 3, the kernel size of + middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + conv_strides: Strides for the first conv layer in the block. + training: Only used if training keras model with Estimator. In other + scenarios it is handled automatically. + + Returns: + Output tensor after applying conv and identity blocks. + """ + + x = conv_building_block(input_tensor, kernel_size, filters, stage=stage, + strides=conv_strides, block='block_0', + training=training) + for i in range(size - 1): + x = identity_building_block(x, kernel_size, filters, stage=stage, + block='block_%d' % (i + 1), training=training) + return x + + +def resnet(num_blocks, classes=10, training=None): + """Instantiates the ResNet architecture. + + Arguments: + num_blocks: integer, the number of conv/identity blocks in each block. + The ResNet contains 3 blocks with each block containing one conv block + followed by (layers_per_block - 1) number of idenity blocks. Each + conv/idenity block has 2 convolutional layers. With the input + convolutional layer and the pooling layer towards the end, this brings + the total size of the network to (6*num_blocks + 2) + classes: optional number of classes to classify images into + training: Only used if training keras model with Estimator. In other + scenarios it is handled automatically. + + Returns: + A Keras model instance. + """ + + input_shape = (32, 32, 3) + img_input = layers.Input(shape=input_shape) + + if backend.image_data_format() == 'channels_first': + x = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)), + name='transpose')(img_input) + bn_axis = 1 + else: # channel_last + x = img_input + bn_axis = 3 + + x = layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(x) + x = layers.Conv2D(16, (3, 3), + strides=(1, 1), + padding='valid', use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name='conv1')(x) + x = layers.BatchNormalization(axis=bn_axis, + momentum=BATCH_NORM_DECAY, + epsilon=BATCH_NORM_EPSILON, + name='bn_conv1',)(x, training=training) + x = layers.Activation('relu')(x) + + x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[16, 16], + stage=2, conv_strides=(1, 1), training=training) + + x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[32, 32], + stage=3, conv_strides=(2, 2), training=training) + + x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[64, 64], + stage=4, conv_strides=(2, 2), training=training) + + rm_axes = [1, 2] if backend.image_data_format() == 'channels_last' else [2, 3] + x = layers.Lambda(lambda x: backend.mean(x, rm_axes), name='reduce_mean')(x) + x = layers.Dense(classes, + activation='softmax', + kernel_initializer=initializers.RandomNormal(stddev=0.01), + kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), + name='fc10')(x) + + inputs = img_input + # Create model. + model = tf.keras.models.Model(inputs, x, name='resnet56') + + return model + + +resnet20 = functools.partial(resnet, num_blocks=3) +resnet32 = functools.partial(resnet, num_blocks=5) +resnet56 = functools.partial(resnet, num_blocks=9) +resnet10 = functools.partial(resnet, num_blocks=110) diff --git a/models/official/benchmark/models/resnet_cifar_test.py b/models/official/benchmark/models/resnet_cifar_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c160f44eca1b6faf9def08860ebbdc6403d352e3 --- /dev/null +++ b/models/official/benchmark/models/resnet_cifar_test.py @@ -0,0 +1,180 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test the keras ResNet model with Cifar data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tempfile + +import tensorflow as tf + +from tensorflow.python.eager import context +from tensorflow.python.platform import googletest +from official.benchmark.models import cifar_preprocessing +from official.benchmark.models import resnet_cifar_main +from official.utils.testing import integration + + +class KerasCifarTest(googletest.TestCase): + """Unit tests for Keras ResNet with Cifar.""" + + _extra_flags = [ + "-batch_size", "4", + "-train_steps", "1", + "-use_synthetic_data", "true" + ] + _tempdir = None + + def get_temp_dir(self): + if not self._tempdir: + self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir()) + return self._tempdir + + @classmethod + def setUpClass(cls): # pylint: disable=invalid-name + super(KerasCifarTest, cls).setUpClass() + resnet_cifar_main.define_cifar_flags() + + def setUp(self): + super(KerasCifarTest, self).setUp() + cifar_preprocessing.NUM_IMAGES["validation"] = 4 + + def tearDown(self): + super(KerasCifarTest, self).tearDown() + tf.io.gfile.rmtree(self.get_temp_dir()) + + def test_end_to_end_no_dist_strat(self): + """Test Keras model with 1 GPU, no distribution strategy.""" + + extra_flags = [ + "-distribution_strategy", "off", + "-model_dir", "keras_cifar_no_dist_strat", + "-data_format", "channels_last", + ] + extra_flags = extra_flags + self._extra_flags + + integration.run_synthetic( + main=resnet_cifar_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_graph_no_dist_strat(self): + """Test Keras model in legacy graph mode with 1 GPU, no dist strat.""" + extra_flags = [ + "-enable_eager", "false", + "-distribution_strategy", "off", + "-model_dir", "keras_cifar_graph_no_dist_strat", + "-data_format", "channels_last", + ] + extra_flags = extra_flags + self._extra_flags + + integration.run_synthetic( + main=resnet_cifar_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_1_gpu(self): + """Test Keras model with 1 GPU.""" + + if context.num_gpus() < 1: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(1, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "1", + "-distribution_strategy", "mirrored", + "-model_dir", "keras_cifar_1_gpu", + "-data_format", "channels_last", + ] + extra_flags = extra_flags + self._extra_flags + + integration.run_synthetic( + main=resnet_cifar_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_graph_1_gpu(self): + """Test Keras model in legacy graph mode with 1 GPU.""" + if context.num_gpus() < 1: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(1, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "1", + "-noenable_eager", + "-distribution_strategy", "mirrored", + "-model_dir", "keras_cifar_graph_1_gpu", + "-data_format", "channels_last", + ] + extra_flags = extra_flags + self._extra_flags + + integration.run_synthetic( + main=resnet_cifar_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_2_gpu(self): + """Test Keras model with 2 GPUs.""" + + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "2", + "-distribution_strategy", "mirrored", + "-model_dir", "keras_cifar_2_gpu", + ] + extra_flags = extra_flags + self._extra_flags + + integration.run_synthetic( + main=resnet_cifar_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_graph_2_gpu(self): + """Test Keras model in legacy graph mode with 2 GPUs.""" + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "2", + "-enable_eager", "false", + "-distribution_strategy", "mirrored", + "-model_dir", "keras_cifar_graph_2_gpu", + ] + extra_flags = extra_flags + self._extra_flags + + integration.run_synthetic( + main=resnet_cifar_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + +if __name__ == "__main__": + googletest.main() diff --git a/models/official/benchmark/models/resnet_imagenet_main.py b/models/official/benchmark/models/resnet_imagenet_main.py new file mode 100644 index 0000000000000000000000000000000000000000..5a3cd503126e8796aed8a59164e9dcd6bef9c1dc --- /dev/null +++ b/models/official/benchmark/models/resnet_imagenet_main.py @@ -0,0 +1,301 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a ResNet model on the ImageNet dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +import tensorflow_model_optimization as tfmot +from official.modeling import performance +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.utils.misc import model_helpers +from official.vision.image_classification import test_utils +from official.vision.image_classification.resnet import common +from official.vision.image_classification.resnet import imagenet_preprocessing +from official.vision.image_classification.resnet import resnet_model + + +def run(flags_obj): + """Run ResNet ImageNet training and eval loop using native Keras APIs. + + Args: + flags_obj: An object containing parsed flag values. + + Raises: + ValueError: If fp16 is passed as it is not currently supported. + NotImplementedError: If some features are not currently supported. + + Returns: + Dictionary of training and eval stats. + """ + keras_utils.set_session_config( + enable_xla=flags_obj.enable_xla) + + # Execute flag override logic for better model performance + if flags_obj.tf_gpu_thread_mode: + keras_utils.set_gpu_thread_mode_and_count( + per_gpu_thread_count=flags_obj.per_gpu_thread_count, + gpu_thread_mode=flags_obj.tf_gpu_thread_mode, + num_gpus=flags_obj.num_gpus, + datasets_num_private_threads=flags_obj.datasets_num_private_threads) + common.set_cudnn_batchnorm_mode() + + dtype = flags_core.get_tf_dtype(flags_obj) + performance.set_mixed_precision_policy( + flags_core.get_tf_dtype(flags_obj), + flags_core.get_loss_scale(flags_obj, default_for_fp16=128)) + + data_format = flags_obj.data_format + if data_format is None: + data_format = ('channels_first' if tf.config.list_physical_devices('GPU') + else 'channels_last') + tf.keras.backend.set_image_data_format(data_format) + + # Configures cluster spec for distribution strategy. + _ = distribution_utils.configure_cluster(flags_obj.worker_hosts, + flags_obj.task_index) + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=flags_obj.num_gpus, + all_reduce_alg=flags_obj.all_reduce_alg, + num_packs=flags_obj.num_packs, + tpu_address=flags_obj.tpu) + + if strategy: + # flags_obj.enable_get_next_as_optional controls whether enabling + # get_next_as_optional behavior in DistributedIterator. If true, last + # partial batch can be supported. + strategy.extended.experimental_enable_get_next_as_optional = ( + flags_obj.enable_get_next_as_optional + ) + + strategy_scope = distribution_utils.get_strategy_scope(strategy) + + # pylint: disable=protected-access + if flags_obj.use_synthetic_data: + input_fn = common.get_synth_input_fn( + height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE, + width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE, + num_channels=imagenet_preprocessing.NUM_CHANNELS, + num_classes=imagenet_preprocessing.NUM_CLASSES, + dtype=dtype, + drop_remainder=True) + else: + input_fn = imagenet_preprocessing.input_fn + + # When `enable_xla` is True, we always drop the remainder of the batches + # in the dataset, as XLA-GPU doesn't support dynamic shapes. + drop_remainder = flags_obj.enable_xla + + # Current resnet_model.resnet50 input format is always channel-last. + # We use keras_application mobilenet model which input format is depends on + # the keras beckend image data format. + # This use_keras_image_data_format flags indicates whether image preprocessor + # output format should be same as the keras backend image data format or just + # channel-last format. + use_keras_image_data_format = (flags_obj.model == 'mobilenet') + train_input_dataset = input_fn( + is_training=True, + data_dir=flags_obj.data_dir, + batch_size=flags_obj.batch_size, + parse_record_fn=imagenet_preprocessing.get_parse_record_fn( + use_keras_image_data_format=use_keras_image_data_format), + datasets_num_private_threads=flags_obj.datasets_num_private_threads, + dtype=dtype, + drop_remainder=drop_remainder, + tf_data_experimental_slack=flags_obj.tf_data_experimental_slack, + training_dataset_cache=flags_obj.training_dataset_cache, + ) + + eval_input_dataset = None + if not flags_obj.skip_eval: + eval_input_dataset = input_fn( + is_training=False, + data_dir=flags_obj.data_dir, + batch_size=flags_obj.batch_size, + parse_record_fn=imagenet_preprocessing.get_parse_record_fn( + use_keras_image_data_format=use_keras_image_data_format), + dtype=dtype, + drop_remainder=drop_remainder) + + lr_schedule = common.PiecewiseConstantDecayWithWarmup( + batch_size=flags_obj.batch_size, + epoch_size=imagenet_preprocessing.NUM_IMAGES['train'], + warmup_epochs=common.LR_SCHEDULE[0][1], + boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]), + multipliers=list(p[0] for p in common.LR_SCHEDULE), + compute_lr_on_cpu=True) + steps_per_epoch = ( + imagenet_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size) + + with strategy_scope: + if flags_obj.optimizer == 'resnet50_default': + optimizer = common.get_optimizer(lr_schedule) + elif flags_obj.optimizer == 'mobilenet_default': + initial_learning_rate = \ + flags_obj.initial_learning_rate_per_sample * flags_obj.batch_size + optimizer = tf.keras.optimizers.SGD( + learning_rate=tf.keras.optimizers.schedules.ExponentialDecay( + initial_learning_rate, + decay_steps=steps_per_epoch * flags_obj.num_epochs_per_decay, + decay_rate=flags_obj.lr_decay_factor, + staircase=True), + momentum=0.9) + if flags_obj.fp16_implementation == 'graph_rewrite': + # Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as + # determined by flags_core.get_tf_dtype(flags_obj) would be 'float32' + # which will ensure tf.compat.v2.keras.mixed_precision and + # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double + # up. + optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite( + optimizer) + + # TODO(hongkuny): Remove trivial model usage and move it to benchmark. + if flags_obj.use_trivial_model: + model = test_utils.trivial_model(imagenet_preprocessing.NUM_CLASSES) + elif flags_obj.model == 'resnet50_v1.5': + model = resnet_model.resnet50( + num_classes=imagenet_preprocessing.NUM_CLASSES) + elif flags_obj.model == 'mobilenet': + # TODO(kimjaehong): Remove layers attribute when minimum TF version + # support 2.0 layers by default. + model = tf.keras.applications.mobilenet.MobileNet( + weights=None, + classes=imagenet_preprocessing.NUM_CLASSES, + layers=tf.keras.layers) + if flags_obj.pretrained_filepath: + model.load_weights(flags_obj.pretrained_filepath) + + if flags_obj.pruning_method == 'polynomial_decay': + if dtype != tf.float32: + raise NotImplementedError( + 'Pruning is currently only supported on dtype=tf.float32.') + pruning_params = { + 'pruning_schedule': + tfmot.sparsity.keras.PolynomialDecay( + initial_sparsity=flags_obj.pruning_initial_sparsity, + final_sparsity=flags_obj.pruning_final_sparsity, + begin_step=flags_obj.pruning_begin_step, + end_step=flags_obj.pruning_end_step, + frequency=flags_obj.pruning_frequency), + } + model = tfmot.sparsity.keras.prune_low_magnitude(model, **pruning_params) + elif flags_obj.pruning_method: + raise NotImplementedError( + 'Only polynomial_decay is currently supported.') + + model.compile( + loss='sparse_categorical_crossentropy', + optimizer=optimizer, + metrics=(['sparse_categorical_accuracy'] + if flags_obj.report_accuracy_metrics else None), + run_eagerly=flags_obj.run_eagerly) + + train_epochs = flags_obj.train_epochs + + callbacks = common.get_callbacks( + pruning_method=flags_obj.pruning_method, + enable_checkpoint_and_export=flags_obj.enable_checkpoint_and_export, + model_dir=flags_obj.model_dir) + + # if mutliple epochs, ignore the train_steps flag. + if train_epochs <= 1 and flags_obj.train_steps: + steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch) + train_epochs = 1 + + num_eval_steps = ( + imagenet_preprocessing.NUM_IMAGES['validation'] // flags_obj.batch_size) + + validation_data = eval_input_dataset + if flags_obj.skip_eval: + # Only build the training graph. This reduces memory usage introduced by + # control flow ops in layers that have different implementations for + # training and inference (e.g., batch norm). + if flags_obj.set_learning_phase_to_train: + # TODO(haoyuzhang): Understand slowdown of setting learning phase when + # not using distribution strategy. + tf.keras.backend.set_learning_phase(1) + num_eval_steps = None + validation_data = None + + if not strategy and flags_obj.explicit_gpu_placement: + # TODO(b/135607227): Add device scope automatically in Keras training loop + # when not using distribition strategy. + no_dist_strat_device = tf.device('/device:GPU:0') + no_dist_strat_device.__enter__() + + history = model.fit(train_input_dataset, + epochs=train_epochs, + steps_per_epoch=steps_per_epoch, + callbacks=callbacks, + validation_steps=num_eval_steps, + validation_data=validation_data, + validation_freq=flags_obj.epochs_between_evals, + verbose=2) + + eval_output = None + if not flags_obj.skip_eval: + eval_output = model.evaluate(eval_input_dataset, + steps=num_eval_steps, + verbose=2) + + if flags_obj.pruning_method: + model = tfmot.sparsity.keras.strip_pruning(model) + if flags_obj.enable_checkpoint_and_export: + if dtype == tf.bfloat16: + logging.warning('Keras model.save does not support bfloat16 dtype.') + else: + # Keras model.save assumes a float32 input designature. + export_path = os.path.join(flags_obj.model_dir, 'saved_model') + model.save(export_path, include_optimizer=False) + + if not strategy and flags_obj.explicit_gpu_placement: + no_dist_strat_device.__exit__() + + stats = common.build_stats(history, eval_output, callbacks) + return stats + + +def define_imagenet_keras_flags(): + common.define_keras_flags( + model=True, + optimizer=True, + pretrained_filepath=True) + common.define_pruning_flags() + flags_core.set_defaults() + flags.adopt_module_key_flags(common) + + +def main(_): + model_helpers.apply_clean(flags.FLAGS) + stats = run(flags.FLAGS) + logging.info('Run stats:\n%s', stats) + + +if __name__ == '__main__': + logging.set_verbosity(logging.INFO) + define_imagenet_keras_flags() + app.run(main) diff --git a/models/official/benchmark/models/resnet_imagenet_test.py b/models/official/benchmark/models/resnet_imagenet_test.py new file mode 100644 index 0000000000000000000000000000000000000000..45c35d539ce2d7fcd0df30ed1d520e47e51312fa --- /dev/null +++ b/models/official/benchmark/models/resnet_imagenet_test.py @@ -0,0 +1,249 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test the keras ResNet model with ImageNet data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow.python.eager import context +from official.benchmark.models import resnet_imagenet_main +from official.utils.testing import integration +from official.vision.image_classification.resnet import imagenet_preprocessing + + +@parameterized.parameters( + "resnet", + # "resnet_polynomial_decay", b/151854314 + "mobilenet", + # "mobilenet_polynomial_decay" b/151854314 +) +class KerasImagenetTest(tf.test.TestCase): + """Unit tests for Keras Models with ImageNet.""" + _default_flags_dict = [ + "-batch_size", "4", + "-train_steps", "1", + "-use_synthetic_data", "true", + "-data_format", "channels_last", + ] + _extra_flags_dict = { + "resnet": [ + "-model", "resnet50_v1.5", + "-optimizer", "resnet50_default", + ], + "resnet_polynomial_decay": [ + "-model", "resnet50_v1.5", + "-optimizer", "resnet50_default", + "-pruning_method", "polynomial_decay", + ], + "mobilenet": [ + "-model", "mobilenet", + "-optimizer", "mobilenet_default", + ], + "mobilenet_polynomial_decay": [ + "-model", "mobilenet", + "-optimizer", "mobilenet_default", + "-pruning_method", "polynomial_decay", + ], + } + _tempdir = None + + @classmethod + def setUpClass(cls): # pylint: disable=invalid-name + super(KerasImagenetTest, cls).setUpClass() + resnet_imagenet_main.define_imagenet_keras_flags() + + def setUp(self): + super(KerasImagenetTest, self).setUp() + imagenet_preprocessing.NUM_IMAGES["validation"] = 4 + self.policy = \ + tf.keras.mixed_precision.experimental.global_policy() + + def tearDown(self): + super(KerasImagenetTest, self).tearDown() + tf.io.gfile.rmtree(self.get_temp_dir()) + tf.keras.mixed_precision.experimental.set_policy(self.policy) + + def get_extra_flags_dict(self, flags_key): + return self._extra_flags_dict[flags_key] + self._default_flags_dict + + def test_end_to_end_no_dist_strat(self, flags_key): + """Test Keras model with 1 GPU, no distribution strategy.""" + + extra_flags = [ + "-distribution_strategy", "off", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_graph_no_dist_strat(self, flags_key): + """Test Keras model in legacy graph mode with 1 GPU, no dist strat.""" + extra_flags = [ + "-enable_eager", "false", + "-distribution_strategy", "off", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_1_gpu(self, flags_key): + """Test Keras model with 1 GPU.""" + + if context.num_gpus() < 1: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(1, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "1", + "-distribution_strategy", "mirrored", + "-enable_checkpoint_and_export", "1", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_1_gpu_fp16(self, flags_key): + """Test Keras model with 1 GPU and fp16.""" + + if context.num_gpus() < 1: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available" + .format(1, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "1", + "-dtype", "fp16", + "-distribution_strategy", "mirrored", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + if "polynomial_decay" in extra_flags: + self.skipTest("Pruning with fp16 is not currently supported.") + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_2_gpu(self, flags_key): + """Test Keras model with 2 GPUs.""" + + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "2", + "-distribution_strategy", "mirrored", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_xla_2_gpu(self, flags_key): + """Test Keras model with XLA and 2 GPUs.""" + + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "2", + "-enable_xla", "true", + "-distribution_strategy", "mirrored", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_2_gpu_fp16(self, flags_key): + """Test Keras model with 2 GPUs and fp16.""" + + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "2", + "-dtype", "fp16", + "-distribution_strategy", "mirrored", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + if "polynomial_decay" in extra_flags: + self.skipTest("Pruning with fp16 is not currently supported.") + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + def test_end_to_end_xla_2_gpu_fp16(self, flags_key): + """Test Keras model with XLA, 2 GPUs and fp16.""" + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + extra_flags = [ + "-num_gpus", "2", + "-dtype", "fp16", + "-enable_xla", "true", + "-distribution_strategy", "mirrored", + ] + extra_flags = extra_flags + self.get_extra_flags_dict(flags_key) + + if "polynomial_decay" in extra_flags: + self.skipTest("Pruning with fp16 is not currently supported.") + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/benchmark/models/resnet_imagenet_test_tpu.py b/models/official/benchmark/models/resnet_imagenet_test_tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd72c404139b723407cc9a68c8afddd158ed691 --- /dev/null +++ b/models/official/benchmark/models/resnet_imagenet_test_tpu.py @@ -0,0 +1,105 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test the keras ResNet model with ImageNet data on TPU.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import tensorflow as tf +from official.benchmark.models import resnet_imagenet_main +from official.utils.testing import integration +from official.vision.image_classification.resnet import imagenet_preprocessing + + +class KerasImagenetTest(tf.test.TestCase, parameterized.TestCase): + """Unit tests for Keras Models with ImageNet.""" + + _extra_flags_dict = { + "resnet": [ + "-batch_size", "4", + "-train_steps", "1", + "-use_synthetic_data", "true" + "-model", "resnet50_v1.5", + "-optimizer", "resnet50_default", + ], + "resnet_polynomial_decay": [ + "-batch_size", "4", + "-train_steps", "1", + "-use_synthetic_data", "true", + "-model", "resnet50_v1.5", + "-optimizer", "resnet50_default", + "-pruning_method", "polynomial_decay", + ], + } + _tempdir = None + + @classmethod + def setUpClass(cls): # pylint: disable=invalid-name + super(KerasImagenetTest, cls).setUpClass() + resnet_imagenet_main.define_imagenet_keras_flags() + + def setUp(self): + super(KerasImagenetTest, self).setUp() + imagenet_preprocessing.NUM_IMAGES["validation"] = 4 + self.policy = \ + tf.keras.mixed_precision.experimental.global_policy() + + def tearDown(self): + super(KerasImagenetTest, self).tearDown() + tf.io.gfile.rmtree(self.get_temp_dir()) + tf.keras.mixed_precision.experimental.set_policy(self.policy) + + @parameterized.parameters([ + "resnet", + # "resnet_polynomial_decay" b/151854314 + ]) + def test_end_to_end_tpu(self, flags_key): + """Test Keras model with TPU distribution strategy.""" + + extra_flags = [ + "-distribution_strategy", "tpu", + "-data_format", "channels_last", + "-enable_checkpoint_and_export", "1", + ] + extra_flags = extra_flags + self._extra_flags_dict[flags_key] + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + @parameterized.parameters(["resnet"]) + def test_end_to_end_tpu_bf16(self, flags_key): + """Test Keras model with TPU and bfloat16 activation.""" + + extra_flags = [ + "-distribution_strategy", "tpu", + "-data_format", "channels_last", + "-dtype", "bf16", + ] + extra_flags = extra_flags + self._extra_flags_dict[flags_key] + + integration.run_synthetic( + main=resnet_imagenet_main.run, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags + ) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/benchmark/models/shakespeare/README.md b/models/official/benchmark/models/shakespeare/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5395cc9642845ffb8bf36fdbc4f93bb450ba557f --- /dev/null +++ b/models/official/benchmark/models/shakespeare/README.md @@ -0,0 +1,31 @@ +# Shakespeare character LSTM model + +This is an implemention of a simple character LSTM used to generate text. + +## Instructions + +First download the source data: + +``` +wget https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt +``` + +Note that files other than shakepeare.txt can also be used to train the model to generater other text. + +Then train the model: + +```python +python3 shakespeare_main.py --training_data shakespeare.txt \ + --model_dir /tmp/shakespeare +``` + +This will place model checkpoints in `/tmp/shakespeare`, so that we can use them to make predictions. + +Then generate predictions: + +```python +python3 shakespeare_main.py --training_data shakespeare.txt \ + --model_dir /tmp/shakespeare --notrain --predict_context=ROMEO: +``` + +Change `--predict_context` and `--predict_length` to suit your needs. diff --git a/models/official/benchmark/models/shakespeare/__init__.py b/models/official/benchmark/models/shakespeare/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/official/benchmark/models/shakespeare/__init__.py @@ -0,0 +1 @@ + diff --git a/models/official/benchmark/models/shakespeare/shakespeare_main.py b/models/official/benchmark/models/shakespeare/shakespeare_main.py new file mode 100644 index 0000000000000000000000000000000000000000..6928dd1d61491acf84b969a52c7f0693617ac7f0 --- /dev/null +++ b/models/official/benchmark/models/shakespeare/shakespeare_main.py @@ -0,0 +1,313 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a character LSTM model trained on Shakespeare.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os + +# pylint: disable=wrong-import-order +from absl import app +from absl import flags +import numpy as np +import tensorflow as tf +# pylint: enable=wrong-import-order + +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + +EMBEDDING_DIM = 256 +RNN_UNITS = 1024 +SEQ_LENGTH = 100 +# Calculated by running batch_size=1 +BATCHES_PER_EPOCH = 11043 + + +def define_flags(): + """Define the flags for the Shakespeare character LSTM.""" + flags_core.define_base(data_dir=False, + clean=False, + train_epochs=True, + epochs_between_evals=False, + stop_threshold=False, + num_gpu=True, + export_dir=False, + run_eagerly=True, + distribution_strategy=True) + + flags_core.define_performance(num_parallel_calls=False, + inter_op=False, + intra_op=False, + synthetic_data=False, + max_train_steps=False, + dtype=True, + loss_scale=True, + enable_xla=True) + + flags_core.set_defaults(train_epochs=43, + batch_size=64) + + flags.DEFINE_boolean(name='enable_eager', default=True, help='Enable eager?') + flags.DEFINE_boolean( + name='train', default=True, + help='If true trains the model.') + flags.DEFINE_string( + name='predict_context', default=None, + help='If set, makes a prediction with the given context.') + flags.DEFINE_integer( + name='predict_length', default=1000, + help='Length of the predicted text including the context.') + flags.DEFINE_integer(name='train_steps', default=None, + help='Overrides train_steps per epoch if not None.') + flags.DEFINE_integer( + name='log_steps', default=100, + help='For every log_steps, we log the timing information such as ' + 'examples per second.') + flags.DEFINE_string( + name='training_data', default=None, + help='Path to file containing the training data.') + flags.DEFINE_boolean(name='cudnn', default=True, help='Use CuDNN LSTM.') + + +def get_dataset(path_to_file, batch_size=None, seq_length=SEQ_LENGTH): + """Creates a dataset from a given text file. + + Args: + path_to_file: The path to the training data. + batch_size: Batch size to use. + seq_length: The length of the LSTM sequence. + + Returns: + A tuple, consisting of the Dataset and the class to character mapping + and character to class mapping. + """ + with tf.io.gfile.GFile(path_to_file, 'rb') as train_data: + text = train_data.read().decode(encoding='utf-8') + + # Create vocab + vocab = sorted(set(text)) + char2idx = {u: i for i, u in enumerate(vocab)} + idx2char = np.array(vocab) + + # Split text into sequence length + 1 chucks to create examples + text_as_int = np.array([char2idx[c] for c in text]) + char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int) + sequences = char_dataset.batch(seq_length+1, drop_remainder=True) + + def split_input_target(chunk): + input_text = chunk[:-1] + target_text = chunk[1:] + return input_text, tf.one_hot(target_text, len(vocab)) + dataset = sequences.map(split_input_target) + dataset = dataset.shuffle(10000).repeat() + dataset = dataset.batch(batch_size, drop_remainder=True) + + return dataset, idx2char, char2idx + + +def build_model(vocab_size, + embedding_dim=EMBEDDING_DIM, + rnn_units=RNN_UNITS, + batch_size=None, + stateful=False, + use_cudnn=True): + """Builds the Shakespeare model. + + Args: + vocab_size: The number of character classes in the input. + embedding_dim: The dimension of the embedding space for each class. + rnn_units: The number of RNN units in the layer. + batch_size: When predicting, the batch size of the predictions. + stateful: If true, the LSTM is stateful. + + Returns: + A Keras Model. + """ + LSTM = functools.partial(tf.keras.layers.LSTM, implementation=2) + + # By indirecting the activation through a lambda layer, the logic to dispatch + # to CuDNN in V2 doesn't trigger and we force the LSTM to run in non-CuDNN + # mode. + lstm_activation = ('tanh' if use_cudnn else + lambda x: tf.math.tanh(x)) + + batch_shape = [batch_size if stateful else None, None] + return tf.keras.Sequential([ + tf.keras.layers.Embedding(vocab_size, embedding_dim, + batch_input_shape=batch_shape), + LSTM(rnn_units, + activation=lstm_activation, + return_sequences=True, + stateful=stateful, + recurrent_initializer='glorot_uniform'), + tf.keras.layers.Dense(vocab_size), + tf.keras.layers.Softmax(dtype=tf.float32)]) + + +def train_model(flags_obj, dataset, vocab_size, strategy, checkpoint_dir=None): + """Trains a Shakespeare model. + + Args: + flags_obj: An object containing parsed flag values.s + dataset: the training data set. + vocab_size: the number of unique character classes. + strategy: distribution strategy to use. + checkpoint_dir: if not None, the directory in which to make checkpoints. + + Returns: + The training history and callbacks. + """ + if flags_obj.train_steps: + train_steps = flags_obj.train_steps + else: + train_steps = BATCHES_PER_EPOCH // flags_obj.batch_size + strategy_scope = distribution_utils.get_strategy_scope(strategy) + + with strategy_scope: + model = build_model(vocab_size=vocab_size, batch_size=flags_obj.batch_size, + use_cudnn=flags_obj.cudnn) + + # When keras_use_ctl is False, Model.fit() automatically applies + # loss scaling so we don't need to create a LossScaleOptimizer. + model.compile( + optimizer=tf.keras.optimizers.Adam(), + loss=tf.keras.losses.CategoricalCrossentropy(), + metrics=[tf.keras.metrics.Recall(top_k=1, name='RecallAt1'), + tf.keras.metrics.Recall(top_k=5, name='RecallAt5')], + run_eagerly=flags_obj.run_eagerly) + + callbacks = [] + if checkpoint_dir: + checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}') + checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + filepath=checkpoint_prefix, + save_weights_only=True) + callbacks.append(checkpoint_callback) + time_callback = keras_utils.TimeHistory(flags_obj.batch_size, + flags_obj.log_steps) + callbacks.append(time_callback) + history = model.fit(dataset, + epochs=flags_obj.train_epochs, + steps_per_epoch=train_steps, + callbacks=callbacks, + verbose=2) + return history, callbacks + + +def make_prediction(checkpoint_dir, length, context, idx2char, char2idx): + """Make predictions from a Shakespeare model. + + Args: + checkpoint_dir: the directory from which to load checkpoints + length: the total length of the generated text (including the context). + context: the initial text with which the LSTM is primed. + idx2char: the character class to character mapping. + char2idx: the character to character class mapping. + + Returns: + A generated string of text of the given length. + """ + prediction_model = build_model( + vocab_size=len(idx2char), batch_size=1, stateful=True) + prediction_model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) + prediction_model.build(tf.TensorShape([1, None])) + + input_eval = [char2idx[s] for s in context] + input_eval = tf.expand_dims(input_eval, 0) + + text_generated = [] + + prediction_model.reset_states() + for _ in range(length - len(context)): + predictions = prediction_model(input_eval) + predictions = tf.squeeze(predictions, 0) + + # We applied a softmax to the output of the model so that + # tf.keras.metrics.Recall would work. We need logits for + # tf.random.categorical, so we convert the probabilities back to log odds + predictions = tf.math.log(predictions / (1 - predictions)) + + random_output = tf.random.categorical(predictions, num_samples=1) + selected_id = random_output[-1, 0].numpy() + input_eval = tf.expand_dims([selected_id], 0) + text_generated.append(idx2char[selected_id]) + + return context + ''.join(text_generated) + + +def run(flags_obj): + """Run Shakespeare training and predict. + + Args: + flags_obj: An object containing parsed flag values. + + Returns: + Dictionary with status from the run. + """ + if not flags_obj.training_data: + raise ValueError( + 'Must set the path to a training data file. e.g download the following ' + 'https://storage.googleapis.com/download.tensorflow.org/data/' + 'shakespeare.txt') + + if flags_obj.dtype == 'fp16': + policy = tf.keras.mixed_precision.experimental.Policy( + 'mixed_float16', + loss_scale=flags_core.get_loss_scale(flags_obj, + default_for_fp16='dynamic')) + tf.keras.mixed_precision.experimental.set_policy(policy) + + keras_utils.set_session_config( + enable_xla=flags_obj.enable_xla) + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=flags_obj.num_gpus) + + dataset, idx2char, char2idx = get_dataset(flags_obj.training_data, + batch_size=flags_obj.batch_size) + stats = {} + if flags_obj.train: + history, callbacks = train_model(flags_obj, dataset, + len(idx2char), strategy, + checkpoint_dir=flags_obj.model_dir) + + stats['history'] = history.history + stats['callbacks'] = callbacks + + if flags_obj.predict_context: + if not flags_obj.model_dir: + raise ValueError('Must set model_dir to get predictions.') + print(make_prediction(flags_obj.model_dir, + flags_obj.predict_length, + flags_obj.predict_context, + idx2char, + char2idx)) + + return stats + + +def main(_): + flags_obj = flags.FLAGS + run(flags_obj) + + +if __name__ == '__main__': + define_flags() + app.run(main) diff --git a/models/official/benchmark/models/synthetic_util.py b/models/official/benchmark/models/synthetic_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c14d0223dc417e6b0bd220f65dc3db0291bb773c --- /dev/null +++ b/models/official/benchmark/models/synthetic_util.py @@ -0,0 +1,129 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions to generate data directly on devices.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random +import string + +from absl import logging +import tensorflow as tf + + +# The `SyntheticDataset` is a temporary solution for generating synthetic data +# directly on devices. It is only useful for Keras with Distribution +# Strategies. We will have better support in `tf.data` or Distribution Strategy +# later. +class SyntheticDataset(object): + """A dataset that generates synthetic data on each device.""" + + def __init__(self, dataset, split_by=1): + # dataset.take(1) doesn't have GPU kernel. + with tf.device('device:CPU:0'): + tensor = tf.data.experimental.get_single_element(dataset.take(1)) + flat_tensor = tf.nest.flatten(tensor) + variable_data = [] + initializers = [] + for t in flat_tensor: + rebatched_t = tf.split(t, num_or_size_splits=split_by, axis=0)[0] + assert rebatched_t.shape.is_fully_defined(), rebatched_t.shape + v = tf.compat.v1.get_local_variable(self._random_name(), + initializer=rebatched_t) + variable_data.append(v) + initializers.append(v.initializer) + input_data = tf.nest.pack_sequence_as(tensor, variable_data) + self._iterator = SyntheticIterator(input_data, initializers) + + def _random_name(self, size=10, chars=string.ascii_uppercase + string.digits): + return ''.join(random.choice(chars) for _ in range(size)) + + def __iter__(self): + return self._iterator + + def make_one_shot_iterator(self): + return self._iterator + + def make_initializable_iterator(self): + return self._iterator + + +class SyntheticIterator(object): + """A dataset that generates synthetic data on each device.""" + + def __init__(self, input_data, initializers): + self._input_data = input_data + self._initializers = initializers + + def get_next(self): + return self._input_data + + def next(self): + return self.__next__() + + def __next__(self): + try: + return self.get_next() + except tf.errors.OutOfRangeError: + raise StopIteration + + def initialize(self): + if tf.executing_eagerly(): + return tf.no_op() + else: + return self._initializers + + +def _monkey_patch_dataset_method(strategy): + """Monkey-patch `strategy`'s `make_dataset_iterator` method.""" + def make_dataset(self, dataset): + logging.info('Using pure synthetic data.') + with self.scope(): + if self.extended._global_batch_size: # pylint: disable=protected-access + return SyntheticDataset(dataset, self.num_replicas_in_sync) + else: + return SyntheticDataset(dataset) + + def make_iterator(self, dataset): + dist_dataset = make_dataset(self, dataset) + return iter(dist_dataset) + + strategy.orig_make_dataset_iterator = strategy.make_dataset_iterator + strategy.make_dataset_iterator = make_iterator + strategy.orig_distribute_dataset = strategy.experimental_distribute_dataset + strategy.experimental_distribute_dataset = make_dataset + + +def _undo_monkey_patch_dataset_method(strategy): + if hasattr(strategy, 'orig_make_dataset_iterator'): + strategy.make_dataset_iterator = strategy.orig_make_dataset_iterator + if hasattr(strategy, 'orig_distribute_dataset'): + strategy.make_dataset_iterator = strategy.orig_distribute_dataset + + +def set_up_synthetic_data(): + _monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy) + _monkey_patch_dataset_method(tf.distribute.MirroredStrategy) + _monkey_patch_dataset_method( + tf.distribute.experimental.MultiWorkerMirroredStrategy) + + +def undo_set_up_synthetic_data(): + _undo_monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy) + _undo_monkey_patch_dataset_method(tf.distribute.MirroredStrategy) + _undo_monkey_patch_dataset_method( + tf.distribute.experimental.MultiWorkerMirroredStrategy) diff --git a/models/official/benchmark/ncf_keras_benchmark.py b/models/official/benchmark/ncf_keras_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..170c99a33f46f14f977182c4e8a6d7ffbf96682d --- /dev/null +++ b/models/official/benchmark/ncf_keras_benchmark.py @@ -0,0 +1,488 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes Keras benchmarks and accuracy tests.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time + +from absl import flags +from absl import logging +from absl.testing import flagsaver +import tensorflow as tf +from official.benchmark import benchmark_wrappers +from official.benchmark import owner_utils +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark +from official.recommendation import ncf_common +from official.recommendation import ncf_keras_main +from official.utils.flags import core + +FLAGS = flags.FLAGS +NCF_DATA_DIR_NAME = 'movielens_data' +NCF_TF_REGRESSION_DATA_DIR_NAME = 'gs://tf-regression/ncf/data' + + +class NCFKerasBenchmarkBase(PerfZeroBenchmark): + """Base class for NCF model benchmark.""" + + def __init__(self, output_dir=None, default_flags=None, **kwargs): + super(NCFKerasBenchmarkBase, self).__init__(output_dir, default_flags, + **kwargs) + + # Run all benchmarks with ml_perf flag. + self.default_flags['ml_perf'] = True + + def _setup(self): + """Sets up and resets flags before each test.""" + logging.set_verbosity(logging.INFO) + if NCFKerasBenchmarkBase.local_flags is None: + ncf_common.define_ncf_flags() + # Loads flags to get defaults to then override. List cannot be empty. + flags.FLAGS(['foo']) + core.set_defaults(**self.default_flags) + saved_flag_values = flagsaver.save_flag_values() + NCFKerasBenchmarkBase.local_flags = saved_flag_values + else: + flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, hr_at_10_min=0, hr_at_10_max=0): + start_time_sec = time.time() + stats = ncf_keras_main.run_ncf(FLAGS) + wall_time_sec = time.time() - start_time_sec + + metrics = [] + metrics.append({'name': 'exp_per_second', + 'value': stats['avg_exp_per_second']}) + + if hr_at_10_min > 0: + metrics.append({'name': 'hr_at_10', + 'value': stats['eval_hit_rate'], + 'min_value': hr_at_10_min, + 'max_value': hr_at_10_max}) + + metrics.append({'name': 'train_loss', + 'value': stats['loss']}) + + self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics) + + +class NCFKerasAccuracy(NCFKerasBenchmarkBase): + """Benchmark NCF model using real data.""" + + def __init__(self, + output_dir=None, + root_data_dir=None, + default_flags=None, + **kwargs): + root_data_dir = root_data_dir if root_data_dir else '' + default_flags = {} + default_flags['dataset'] = 'ml-20m' + default_flags['num_gpus'] = 1 + default_flags['train_epochs'] = 10 + default_flags['clean'] = True + default_flags['batch_size'] = 99000 + default_flags['learning_rate'] = 0.00382059 + default_flags['beta1'] = 0.783529 + default_flags['beta2'] = 0.909003 + default_flags['epsilon'] = 1.45439e-07 + default_flags['layers'] = [256, 256, 128, 64] + default_flags['num_factors'] = 64 + default_flags['hr_threshold'] = 0.635 + default_flags['ml_perf'] = True + default_flags['use_synthetic_data'] = False + default_flags['data_dir'] = os.path.join(root_data_dir, NCF_DATA_DIR_NAME) + + super(NCFKerasAccuracy, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + **kwargs) + + def _run_and_report_benchmark_mlperf_like(self): + """Run test and report results. + + Note: MLPerf like tests are not tuned to hit a specific hr@10 value, but + we want it recorded. + """ + self._run_and_report_benchmark(hr_at_10_min=0.61) + + def _run_and_report_benchmark(self, hr_at_10_min=0.630, hr_at_10_max=0.645): + """Run test and report results. + + Note: Target is 0.635, but some runs are below that level. Until we have + multi-run tests, we have to accept a lower target. + + Args: + hr_at_10_min: Minimum acceptable hr@10 value. + hr_at_10_max: Maximum acceptable hr@10 value. + """ + super(NCFKerasAccuracy, self)._run_and_report_benchmark( + hr_at_10_min=hr_at_10_min, + hr_at_10_max=hr_at_10_max) + + def _set_8_gpu_defaults(self): + FLAGS.num_gpus = 8 + FLAGS.learning_rate = 0.0045 + FLAGS.beta1 = 0.25 + FLAGS.beta2 = 0.5 + FLAGS.epsilon = 1e-8 + FLAGS.train_epochs = 14 + FLAGS.batch_size = 99000 + FLAGS.eval_batch_size = 160000 + FLAGS.train_dataset_path = os.path.join(NCF_TF_REGRESSION_DATA_DIR_NAME, + 'training_cycle_*/*') + FLAGS.eval_dataset_path = os.path.join(NCF_TF_REGRESSION_DATA_DIR_NAME, + 'eval_data/*') + FLAGS.input_meta_data_path = os.path.join(NCF_TF_REGRESSION_DATA_DIR_NAME, + 'metadata') + FLAGS.data_dir = NCF_TF_REGRESSION_DATA_DIR_NAME + + def benchmark_1_gpu_early_stop(self): + self._setup() + FLAGS.early_stopping = True + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_early_stop(self): + self._setup() + FLAGS.distribution_strategy = 'off' + FLAGS.early_stopping = True + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_early_stop(self): + self._setup() + FLAGS.distribution_strategy = 'off' + FLAGS.early_stopping = True + FLAGS.run_eagerly = True + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_early_stop(self): + self._setup() + FLAGS.early_stopping = True + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_1_gpu_ctl_early_stop(self): + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.early_stopping = True + self._run_and_report_benchmark() + + def benchmark_1_gpu_ctl_run_eagerly_early_stop(self): + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.early_stopping = True + FLAGS.run_eagerly = True + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_ctl_early_stop(self): + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.early_stopping = True + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_2_gpus_early_stop(self): + self._setup() + FLAGS.early_stopping = True + FLAGS.num_gpus = 2 + FLAGS.eval_batch_size = 160000 + self._run_and_report_benchmark() + + def benchmark_2_gpus_ctl_early_stop(self): + """NCF with custom training loop. Works only in TF 2.0.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.early_stopping = True + FLAGS.num_gpus = 2 + FLAGS.eval_batch_size = 160000 + self._run_and_report_benchmark() + +############################################# +# Tests below with mlperf in the test name are of two types: +# 1) 1 GPU tests are based on MLPerf 0.5 and the TensorFlow pulled submission. +# 2) 8 GPU tests are based on MLPerf 0.5 and use NVIDIA's hyper parameters. +# +# The purpose of both is to get a number to compare to existing results. To do +# this the number of epochs is held constant rather than a race to a given +# accuracy. The accuracy validation is done by the "early_stop" tests. +############################################# + + def benchmark_1_gpu_mlperf_like(self): + """1 GPU using keras fit/compile.""" + self._setup() + FLAGS.train_epochs = 7 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_no_dist_strat_mlperf_like(self): + """1 GPU using compile/fit without dist_strat.""" + self._setup() + FLAGS.train_epochs = 7 + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_no_dist_strat_run_eagerly_mlperf_like(self): + self._setup() + FLAGS.train_epochs = 7 + FLAGS.distribution_strategy = 'off' + FLAGS.run_eagerly = True + self._run_and_report_benchmark_mlperf_like() + + def benchmark_xla_1_gpu_mlperf_like(self): + """1 GPU using compile/fit with XLA.""" + self._setup() + FLAGS.train_epochs = 7 + FLAGS.enable_xla = True + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_ctl_mlperf_like(self): + """1 GPU using CTL.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.train_epochs = 7 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_ctl_fp16_mlperf_like(self): + """1 GPU using CTL and FP16.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.train_epochs = 7 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_fp16_mlperf_like(self): + """1 GPU using FP16.""" + self._setup() + FLAGS.train_epochs = 7 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_ctl_fp16_graph_rewrite_mlperf_like(self): + """1 GPU using CTL and FP16 graph rewrite.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.train_epochs = 7 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_fp16_graph_rewrite_mlperf_like(self): + """1 GPU using FP16 graph rewrite.""" + self._setup() + FLAGS.train_epochs = 7 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_1_gpu_ctl_run_eagerly_mlperf_like(self): + """1 GPU using CTL with eager and distribution strategy.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.run_eagerly = True + FLAGS.train_epochs = 7 + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_ctl_mlperf_like(self): + """1 GPU using CTL with XLA.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.enable_xla = True + FLAGS.train_epochs = 7 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_xla_1_gpu_fp16_mlperf_like(self): + """1 GPU using with XLA and FP16.""" + self._setup() + FLAGS.enable_xla = True + FLAGS.train_epochs = 7 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_xla_1_gpu_ctl_fp16_mlperf_like(self): + """1 GPU using CTL with XLA and FP16.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.enable_xla = True + FLAGS.train_epochs = 7 + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_8_gpu_mlperf_like(self): + """8 GPU using keras fit/compile.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.train_epochs = 17 + FLAGS.batch_size = 1048576 + FLAGS.eval_batch_size = 160000 + FLAGS.learning_rate = 0.0045 + FLAGS.beta1 = 0.25 + FLAGS.beta2 = 0.5 + FLAGS.epsilon = 1e-8 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_8_gpu_ctl_mlperf_like(self): + """8 GPU using CTL.""" + self._setup() + FLAGS.keras_use_ctl = True + FLAGS.num_gpus = 8 + FLAGS.train_epochs = 17 + FLAGS.batch_size = 1048576 + FLAGS.eval_batch_size = 160000 + FLAGS.learning_rate = 0.0045 + FLAGS.beta1 = 0.25 + FLAGS.beta2 = 0.5 + FLAGS.epsilon = 1e-8 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_8_gpu_tf_data_ctl_mlperf_like(self): + """8 GPU using CTL.""" + self._setup() + self._set_8_gpu_defaults() + FLAGS.keras_use_ctl = True + self._run_and_report_benchmark_mlperf_like() + + def benchmark_8_gpu_tf_data_fp16_mlperf_like(self): + """8 GPU FP16.""" + self._setup() + self._set_8_gpu_defaults() + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_8_gpu_tf_data_ctl_fp16_mlperf_like(self): + """8 GPU FP16 using CTL.""" + self._setup() + self._set_8_gpu_defaults() + FLAGS.keras_use_ctl = True + FLAGS.dtype = 'fp16' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + def benchmark_8_gpu_tf_data_ctl_fp16_graph_rewrite_mlperf_like(self): + """8 GPU FP16 graph rewrite using CTL.""" + self._setup() + self._set_8_gpu_defaults() + FLAGS.keras_use_ctl = True + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.loss_scale = 8192 + self._run_and_report_benchmark_mlperf_like() + + +class NCFKerasBenchmarkReal(NCFKerasBenchmarkBase): + """NCF Keras throughput benchmarks.""" + + def __init__(self, + output_dir=None, + root_data_dir=None, + default_flags=None, + **kwargs): + + root_data_dir = root_data_dir if root_data_dir else '' + default_flags = {} + default_flags['dataset'] = 'ml-20m' + default_flags['num_gpus'] = 1 + default_flags['train_epochs'] = 14 + default_flags['clean'] = True + default_flags['batch_size'] = 99000 + default_flags['eval_batch_size'] = 160000 + default_flags['learning_rate'] = 0.00382059 + default_flags['beta1'] = 0.783529 + default_flags['beta2'] = 0.909003 + default_flags['epsilon'] = 1.45439e-07 + default_flags['layers'] = [256, 256, 128, 64] + default_flags['num_factors'] = 64 + default_flags['hr_threshold'] = 0.635 + default_flags['ml_perf'] = True + default_flags['use_synthetic_data'] = False + default_flags['train_dataset_path'] = os.path.join( + NCF_TF_REGRESSION_DATA_DIR_NAME, 'training_cycle_*/*') + default_flags['eval_dataset_path'] = os.path.join( + NCF_TF_REGRESSION_DATA_DIR_NAME, 'eval_data/*') + default_flags['input_meta_data_path'] = os.path.join( + NCF_TF_REGRESSION_DATA_DIR_NAME, 'metadata') + default_flags['data_dir'] = NCF_TF_REGRESSION_DATA_DIR_NAME + + super(NCFKerasBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=default_flags, **kwargs) + + def benchmark_2x2_tpu(self): + """2x2 TPU using CTL with distribution strategy.""" + self._setup() + FLAGS.distribution_strategy = 'tpu' + FLAGS.keras_use_ctl = True + FLAGS.num_gpus = 0 + FLAGS.train_epochs = 1 + self._run_and_report_benchmark() + + @owner_utils.Owner('tf-graph-compiler') + def benchmark_2x2_tpu_mlir(self): + """2x2 TPU using CTL with distribution strategy using the MLIR bridge.""" + self._setup() + FLAGS.distribution_strategy = 'tpu' + FLAGS.keras_use_ctl = True + FLAGS.num_gpus = 0 + FLAGS.train_epochs = 1 + tf.config.experimental.enable_mlir_bridge() + self._run_and_report_benchmark() + + +class NCFKerasSynth(NCFKerasBenchmarkBase): + """Benchmark NCF model using synthetic data.""" + + def __init__(self, + output_dir=None, + default_flags=None, + **kwargs): + + default_flags = {} + default_flags['dataset'] = 'ml-20m' + default_flags['num_gpus'] = 1 + default_flags['train_epochs'] = 8 + default_flags['batch_size'] = 99000 + default_flags['eval_batch_size'] = 160000 + default_flags['learning_rate'] = 0.00382059 + default_flags['beta1'] = 0.783529 + default_flags['beta2'] = 0.909003 + default_flags['epsilon'] = 1.45439e-07 + default_flags['layers'] = [256, 256, 128, 64] + default_flags['num_factors'] = 64 + default_flags['hr_threshold'] = 0.635 + default_flags['use_synthetic_data'] = True + + super(NCFKerasSynth, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + **kwargs) + + def benchmark_1_gpu(self): + self._setup() + self._run_and_report_benchmark() + + def benchmark_2_gpus(self): + self._setup() + FLAGS.num_gpus = 2 + self._run_and_report_benchmark() + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/nhnet_benchmark.py b/models/official/benchmark/nhnet_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..7eac36b204a4f064216fb4c81effff06d8c7e6f0 --- /dev/null +++ b/models/official/benchmark/nhnet_benchmark.py @@ -0,0 +1,148 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes benchmark testing for bert pretraining.""" +# pylint: disable=line-too-long +from __future__ import print_function + +import time +from typing import Optional + +from absl import flags +import tensorflow as tf + +from official.benchmark import benchmark_wrappers +from official.benchmark import owner_utils +from official.benchmark import perfzero_benchmark +from official.nlp.nhnet import trainer +from official.utils.flags import core as flags_core + +MIN_LOSS = 0.40 +MAX_LOSS = 0.55 +NHNET_DATA = 'gs://tf-perfzero-data/nhnet/v1/processed/train.tfrecord*' +PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12/bert_model.ckpt' + +FLAGS = flags.FLAGS + + +class NHNetBenchmark(perfzero_benchmark.PerfZeroBenchmark): + """Base benchmark class for NHNet.""" + + def __init__(self, output_dir=None, default_flags=None, tpu=None, **kwargs): + self.default_flags = default_flags or {} + flag_methods = trainer.define_flags() + super(NHNetBenchmark, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + flag_methods=flag_methods, + tpu=tpu, + **kwargs) + + def _report_benchmark(self, + stats, + wall_time_sec, + max_value=None, + min_value=None): + """Report benchmark results by writing to local protobuf file. + + Args: + stats: dict returned from keras models with known entries. + wall_time_sec: the during of the benchmark execution in seconds + max_value: highest passing level. + min_value: lowest passing level. + """ + + metrics = [] + metrics.append({ + 'name': 'training_loss', + 'value': stats['training_loss'], + 'min_value': min_value, + 'max_value': max_value + }) + # These metrics are placeholders to avoid PerfZero failure. + metrics.append({ + 'name': 'exp_per_second', + 'value': 0.0, + }) + metrics.append({ + 'name': 'startup_time', + 'value': 9999., + }) + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark( + iters=-1, + wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_str}) + + +class NHNetAccuracyBenchmark(NHNetBenchmark): + """Benchmark accuracy tests for NHNet.""" + + def __init__(self, + output_dir: Optional[str] = None, + tpu: Optional[str] = None, + **kwargs): + default_flags = dict( + mode='train', + train_file_pattern=NHNET_DATA, + train_batch_size=1024, + model_type='nhnet', + len_title=15, + len_passage=200, + num_encoder_layers=12, + num_decoder_layers=12, + num_nhnet_articles=5, + steps_per_loop=1000, + params_override='init_from_bert2bert=false') + super(NHNetAccuracyBenchmark, self).__init__( + output_dir=output_dir, default_flags=default_flags, tpu=tpu, **kwargs) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, max_value=MAX_LOSS, min_value=MIN_LOSS): + """Runs and reports the benchmark given the provided configuration.""" + start_time_sec = time.time() + stats = trainer.run() + wall_time_sec = time.time() - start_time_sec + self._report_benchmark( + stats, wall_time_sec, max_value=max_value, min_value=min_value) + + @owner_utils.Owner('tf-model-garden') + def benchmark_accuracy_4x4_tpu_f32_50k_steps(self): + """Test bert pretraining with 4x4 TPU for 50k steps.""" + # This is used for accuracy test. + self._setup() + FLAGS.train_steps = 50000 + FLAGS.checkpoint_interval = FLAGS.train_steps + FLAGS.distribution_strategy = 'tpu' + FLAGS.init_checkpoint = PRETRAINED_CHECKPOINT_PATH + FLAGS.model_dir = self._get_model_dir( + 'benchmark_accuracy_4x4_tpu_bf32_50k_steps') + self._run_and_report_benchmark() + + @owner_utils.Owner('tf-model-garden') + def benchmark_accuracy_4x4_tpu_f32_1k_steps(self): + """Test bert pretraining with 4x4 TPU for 1k steps.""" + self._setup() + FLAGS.train_steps = 1000 + FLAGS.checkpoint_interval = FLAGS.train_steps + FLAGS.distribution_strategy = 'tpu' + FLAGS.model_dir = self._get_model_dir( + 'benchmark_accuracy_4x4_tpu_bf32_1k_steps') + self._run_and_report_benchmark() + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/owner_utils.py b/models/official/benchmark/owner_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e7d189d7b9a2ba05a0bd3af8cb970d52cc85f5a0 --- /dev/null +++ b/models/official/benchmark/owner_utils.py @@ -0,0 +1,67 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils to set Owner annotations on benchmarks. + +@owner_utils.Owner('owner_team/user') can be set either at the benchmark class +level / benchmark method level or both. + +Runner frameworks can use owner_utils.GetOwner(benchmark_method) to get the +actual owner. Python inheritance for the owner attribute is respected. (E.g +method level owner takes precedence over class level). + +See owner_utils_test for associated tests and more examples. + +The decorator can be applied both at the method level and at the class level. + +Simple example: +=============== + +class MLBenchmark: + + @Owner('example_id') + def benchmark_method_1_gpu(self): + return True +""" + + +def Owner(owner_name): + """Sets the owner attribute on a decorated method or class.""" + + def _Wrapper(func_or_class): + """Sets the benchmark owner attribute.""" + func_or_class.__benchmark__owner__ = owner_name + return func_or_class + + return _Wrapper + + +def GetOwner(benchmark_method_or_class): + """Gets the inherited owner attribute for this benchmark. + + Checks for existence of __benchmark__owner__. If it's not present, looks for + it in the parent class's attribute list. + + Args: + benchmark_method_or_class: A benchmark method or class. + + Returns: + string - the associated owner if present / None. + """ + if hasattr(benchmark_method_or_class, '__benchmark__owner__'): + return benchmark_method_or_class.__benchmark__owner__ + elif hasattr(benchmark_method_or_class, '__self__'): + if hasattr(benchmark_method_or_class.__self__, '__benchmark__owner__'): + return benchmark_method_or_class.__self__.__benchmark__owner__ + return None diff --git a/models/official/benchmark/owner_utils_test.py b/models/official/benchmark/owner_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..588bb80378fbf7ba5a6aec470f24fc1c4ad995b2 --- /dev/null +++ b/models/official/benchmark/owner_utils_test.py @@ -0,0 +1,104 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.benchmark.owner_utils.""" + +from absl.testing import absltest + +from official.benchmark import owner_utils + + +@owner_utils.Owner('static_owner') +def static_function(foo=5): + return foo + + +def static_function_without_owner(foo=5): + return foo + + +class BenchmarkClassWithoutOwner: + + def method_without_owner(self): + return 100 + + @owner_utils.Owner('method_owner') + def method_with_owner(self): + return 200 + + +@owner_utils.Owner('class_owner') +class SomeBenchmarkClass: + + def method_inherited_owner(self): + return 123 + + @owner_utils.Owner('method_owner') + def method_override_owner(self): + return 345 + + +@owner_utils.Owner('new_class_owner') +class InheritedClass(SomeBenchmarkClass): + + def method_inherited_owner(self): + return 456 + + @owner_utils.Owner('new_method_owner') + def method_override_owner(self): + return 567 + + +class OwnerUtilsTest(absltest.TestCase): + """Tests to assert for owner decorator functionality.""" + + def test_owner_tag_missing(self): + self.assertEqual(None, owner_utils.GetOwner(static_function_without_owner)) + + benchmark_class = BenchmarkClassWithoutOwner() + self.assertEqual(None, + owner_utils.GetOwner(benchmark_class.method_without_owner)) + self.assertEqual(100, benchmark_class.method_without_owner()) + + self.assertEqual('method_owner', + owner_utils.GetOwner(benchmark_class.method_with_owner)) + self.assertEqual(200, benchmark_class.method_with_owner()) + + def test_owner_attributes_static(self): + self.assertEqual('static_owner', owner_utils.GetOwner(static_function)) + self.assertEqual(5, static_function(5)) + + def test_owner_attributes_per_class(self): + level1 = SomeBenchmarkClass() + self.assertEqual('class_owner', + owner_utils.GetOwner(level1.method_inherited_owner)) + self.assertEqual(123, level1.method_inherited_owner()) + + self.assertEqual('method_owner', + owner_utils.GetOwner(level1.method_override_owner)) + self.assertEqual(345, level1.method_override_owner()) + + def test_owner_attributes_inherited_class(self): + level2 = InheritedClass() + self.assertEqual('new_class_owner', + owner_utils.GetOwner(level2.method_inherited_owner)) + self.assertEqual(456, level2.method_inherited_owner()) + + self.assertEqual('new_method_owner', + owner_utils.GetOwner(level2.method_override_owner)) + self.assertEqual(567, level2.method_override_owner()) + + +if __name__ == '__main__': + absltest.main() diff --git a/models/official/benchmark/perfzero_benchmark.py b/models/official/benchmark/perfzero_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..bedc1320217d1b9469333a8cdfdf70c56de34f77 --- /dev/null +++ b/models/official/benchmark/perfzero_benchmark.py @@ -0,0 +1,100 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils for creating PerfZero benchmarks.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl import logging +from absl.testing import flagsaver +import tensorflow as tf + +FLAGS = flags.FLAGS + + +class PerfZeroBenchmark(tf.test.Benchmark): + """Common methods used in PerfZero Benchmarks. + + Handles the resetting of flags between tests, loading of default_flags, + overriding of defaults. PerfZero (OSS) runs each test in a separate + process reducing some need to reset the flags. + """ + local_flags = None + + def __init__(self, + output_dir=None, + default_flags=None, + root_data_dir=None, + flag_methods=None, + tpu=None): + """Initialize class. + + Args: + output_dir: Base directory to store all output for the test. + default_flags: Set of flags to pass to model. + root_data_dir: Optional param used by child classes to look for the + dataset. + flag_methods: Set of flag methods to run during setup. + tpu: (optional) TPU name to use in a TPU benchmark. + """ + if os.getenv('BENCHMARK_OUTPUT_DIR'): + self.output_dir = os.getenv('BENCHMARK_OUTPUT_DIR') + elif output_dir: + self.output_dir = output_dir + else: + self.output_dir = '/tmp' + self.default_flags = default_flags or {} + self.flag_methods = flag_methods or {} + + if os.getenv('BENCHMARK_TPU'): + resolved_tpu = os.getenv('BENCHMARK_TPU') + elif tpu: + resolved_tpu = tpu + else: + resolved_tpu = None + + if resolved_tpu: + # TPU models are expected to accept a --tpu=name flag. PerfZero creates + # the TPU at runtime and passes the TPU's name to this flag. + self.default_flags['tpu'] = resolved_tpu + + logging.info('root_data_dir: %s', root_data_dir) + + @property + def tpu(self): + return self.default_flags.get('tpu', None) + + def _get_model_dir(self, folder_name): + """Returns directory to store info, e.g. saved model and event log.""" + return os.path.join(self.output_dir, folder_name) + + def _setup(self): + """Sets up and resets flags before each test.""" + logging.set_verbosity(logging.INFO) + if PerfZeroBenchmark.local_flags is None: + for flag_method in self.flag_methods: + flag_method() + # Loads flags to get defaults to then override. List cannot be empty. + flags.FLAGS(['foo']) + # Overrides flag values with defaults for the class of tests. + for k, v in self.default_flags.items(): + setattr(FLAGS, k, v) + saved_flag_values = flagsaver.save_flag_values() + PerfZeroBenchmark.local_flags = saved_flag_values + else: + flagsaver.restore_flag_values(PerfZeroBenchmark.local_flags) diff --git a/models/official/benchmark/resnet_ctl_imagenet_benchmark.py b/models/official/benchmark/resnet_ctl_imagenet_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..0e70e8da969ec9b02a2de00d1973bdd2aa5f2b51 --- /dev/null +++ b/models/official/benchmark/resnet_ctl_imagenet_benchmark.py @@ -0,0 +1,452 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes CTL benchmarks and accuracy tests.""" +# pylint: disable=line-too-long,g-bad-import-order +from __future__ import print_function + +import os +import time + +from absl import flags +import tensorflow as tf + +from official.benchmark import owner_utils +from official.vision.image_classification.resnet import common +from official.vision.image_classification.resnet import resnet_ctl_imagenet_main +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark +from official.benchmark import benchmark_wrappers +from official.utils.flags import core as flags_core + +MIN_TOP_1_ACCURACY = 0.76 +MAX_TOP_1_ACCURACY = 0.77 + +FLAGS = flags.FLAGS + + +class CtlBenchmark(PerfZeroBenchmark): + """Base benchmark class with methods to simplify testing.""" + + def __init__(self, output_dir=None, default_flags=None, flag_methods=None): + self.default_flags = default_flags or {} + self.flag_methods = flag_methods or {} + super(CtlBenchmark, self).__init__( + output_dir=output_dir, + default_flags=self.default_flags, + flag_methods=self.flag_methods) + + def _report_benchmark(self, + stats, + wall_time_sec, + top_1_max=None, + top_1_min=None, + total_batch_size=None, + log_steps=None, + warmup=1, + start_time_sec=None): + """Report benchmark results by writing to local protobuf file. + + Args: + stats: dict returned from keras models with known entries. + wall_time_sec: the during of the benchmark execution in seconds + top_1_max: highest passing level for top_1 accuracy. + top_1_min: lowest passing level for top_1 accuracy. + total_batch_size: Global batch-size. + log_steps: How often the log was created for stats['step_timestamp_log']. + warmup: number of entries in stats['step_timestamp_log'] to ignore. + start_time_sec: the start time of the program in seconds since epoch. + """ + + metrics = [] + if 'eval_acc' in stats: + metrics.append({ + 'name': 'accuracy_top_1', + 'value': stats['eval_acc'], + 'min_value': top_1_min, + 'max_value': top_1_max + }) + metrics.append({'name': 'eval_loss', 'value': stats['eval_loss']}) + + metrics.append({ + 'name': 'top_1_train_accuracy', + 'value': stats['train_acc'] + }) + metrics.append({'name': 'train_loss', 'value': stats['train_loss']}) + + if (warmup and 'step_timestamp_log' in stats and + len(stats['step_timestamp_log']) > warmup + 1): + # first entry in the time_log is start of step 0. The rest of the + # entries are the end of each step recorded + time_log = stats['step_timestamp_log'] + steps_elapsed = time_log[-1].batch_index - time_log[warmup].batch_index + time_elapsed = time_log[-1].timestamp - time_log[warmup].timestamp + examples_per_sec = total_batch_size * (steps_elapsed / time_elapsed) + metrics.append({'name': 'exp_per_second', 'value': examples_per_sec}) + + if 'avg_exp_per_second' in stats: + metrics.append({ + 'name': 'avg_exp_per_second', + 'value': stats['avg_exp_per_second'] + }) + + if start_time_sec and 'step_timestamp_log' in stats: + time_log = stats['step_timestamp_log'] + # time_log[0] is recorded at the beginning of the first step. + startup_time = time_log[0].timestamp - start_time_sec + metrics.append({'name': 'startup_time', 'value': startup_time}) + + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark( + iters=-1, + wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_str}) + + +class Resnet50CtlAccuracy(CtlBenchmark): + """Benchmark accuracy tests for ResNet50 in CTL.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + """A benchmark class. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more named + arguments before updating the constructor. + """ + + flag_methods = [common.define_keras_flags] + + self.data_dir = os.path.join(root_data_dir, 'imagenet') + super(Resnet50CtlAccuracy, self).__init__( + output_dir=output_dir, flag_methods=flag_methods) + + def benchmark_8_gpu(self): + """Test Keras model with eager, dist_strat and 8 GPUs.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 128 * 8 + FLAGS.train_epochs = 90 + FLAGS.epochs_between_evals = 10 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + FLAGS.dtype = 'fp32' + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16(self): + """Test Keras model with eager, 8 GPUs with tf.keras mixed precision.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 256 * 8 + FLAGS.train_epochs = 90 + FLAGS.epochs_between_evals = 10 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16') + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark() + + def benchmark_8_gpu_amp(self): + """Test Keras model with 8 GPUs and mixed precision via graph rewrite.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.data_dir + FLAGS.batch_size = 256 * 8 + FLAGS.train_epochs = 90 + FLAGS.epochs_between_evals = 10 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp') + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + self._run_and_report_benchmark() + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + start_time_sec = time.time() + stats = resnet_ctl_imagenet_main.run(flags.FLAGS) + wall_time_sec = time.time() - start_time_sec + + super(Resnet50CtlAccuracy, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=MIN_TOP_1_ACCURACY, + top_1_max=MAX_TOP_1_ACCURACY, + total_batch_size=FLAGS.batch_size, + log_steps=100, + start_time_sec=start_time_sec) + + +class Resnet50CtlBenchmarkBase(CtlBenchmark): + """Resnet50 benchmarks.""" + + def __init__(self, output_dir=None, default_flags=None): + flag_methods = [common.define_keras_flags] + + super(Resnet50CtlBenchmarkBase, self).__init__( + output_dir=output_dir, + flag_methods=flag_methods, + default_flags=default_flags) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self): + start_time_sec = time.time() + stats = resnet_ctl_imagenet_main.run(FLAGS) + wall_time_sec = time.time() - start_time_sec + + # Warmup means the number of logged step time entries that are excluded in + # performance report. Default to exclude 1 FLAGS.log_steps time. + super(Resnet50CtlBenchmarkBase, self)._report_benchmark( + stats, + wall_time_sec, + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + warmup=1, + start_time_sec=start_time_sec) + + def benchmark_1_gpu_no_dist_strat(self): + """Test Keras model with 1 GPU, no distribution strategy.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'off' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu(self): + """Test Keras model with 1 GPU.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16(self): + """Test Keras model with 1 GPU with tf.keras mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16') + FLAGS.batch_size = 256 + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark() + + def benchmark_1_gpu_amp(self): + """Test Keras model with 1 GPU with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp') + FLAGS.batch_size = 256 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_amp(self): + """Test Keras model with XLA and 1 GPU with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp') + FLAGS.batch_size = 256 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_1_gpu_eager(self): + """Test Keras model with 1 GPU in pure eager mode.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_eager') + FLAGS.batch_size = 120 + FLAGS.use_tf_function = False + FLAGS.use_tf_while_loop = False + FLAGS.single_l2_loss_op = True + self._run_and_report_benchmark() + + def benchmark_1_gpu_fp16_eager(self): + """Test Keras model with 1 GPU with fp16 and pure eager mode.""" + self._setup() + + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'one_device' + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_eager') + FLAGS.batch_size = 240 + FLAGS.dtype = 'fp16' + FLAGS.use_tf_function = False + FLAGS.use_tf_while_loop = False + FLAGS.single_l2_loss_op = True + self._run_and_report_benchmark() + + def benchmark_8_gpu(self): + """Test Keras model with 8 GPUs.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + FLAGS.batch_size = 128 * 8 # 8 GPUs + self._run_and_report_benchmark() + + def benchmark_8_gpu_fp16(self): + """Test Keras model with 8 GPUs with tf.keras mixed precision.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark() + + def benchmark_8_gpu_eager(self): + """Test Keras model with 8 GPUs, eager, fp32.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.use_tf_function = False + FLAGS.use_tf_while_loop = False + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_eager') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_8_gpu_eager_fp16(self): + """Test Keras model with 8 GPUs, eager, fp16.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.use_tf_function = False + FLAGS.use_tf_while_loop = False + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_eager_fp16') + FLAGS.batch_size = 128 + self._run_and_report_benchmark() + + def benchmark_8_gpu_amp(self): + """Test Keras model with 8 GPUs with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_amp(self): + """Test Keras model with XLA and 8 GPUs with automatic mixed precision.""" + self._setup() + + FLAGS.num_gpus = 8 + FLAGS.distribution_strategy = 'mirrored' + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_amp') + FLAGS.batch_size = 256 * 8 # 8 GPUs + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def _set_df_common(self): + FLAGS.steps_per_loop = 500 + FLAGS.train_epochs = 2 + FLAGS.train_steps = None + FLAGS.skip_eval = True + FLAGS.enable_eager = True + FLAGS.enable_tensorboard = False + FLAGS.distribution_strategy = 'tpu' + FLAGS.report_accuracy_metrics = False + FLAGS.log_steps = 50 + FLAGS.single_l2_loss_op = True + FLAGS.use_tf_function = True + FLAGS.enable_checkpoint_and_export = False + + def benchmark_2x2_tpu_bf16(self): + self._setup() + self._set_df_common() + FLAGS.batch_size = 1024 + FLAGS.dtype = 'bf16' + self._run_and_report_benchmark() + + def benchmark_4x4_tpu_bf16(self): + self._setup() + self._set_df_common() + FLAGS.batch_size = 4096 + FLAGS.dtype = 'bf16' + self._run_and_report_benchmark() + + @owner_utils.Owner('tf-graph-compiler') + def benchmark_4x4_tpu_bf16_mlir(self): + """Run resnet model on 4x4 with the MLIR Bridge enabled.""" + self._setup() + self._set_df_common() + FLAGS.batch_size = 4096 + FLAGS.dtype = 'bf16' + tf.config.experimental.enable_mlir_bridge() + self._run_and_report_benchmark() + + def benchmark_8x16_tpu_bf16(self): + self._setup() + self._set_df_common() + FLAGS.batch_size = 8192 + FLAGS.dtype = 'bf16' + self._run_and_report_benchmark() + + def fill_report_object(self, stats): + super(Resnet50CtlBenchmarkBase, self).fill_report_object( + stats, total_batch_size=FLAGS.batch_size, log_steps=FLAGS.log_steps) + + +class Resnet50CtlBenchmarkSynth(Resnet50CtlBenchmarkBase): + """Resnet50 synthetic benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + def_flags = {} + def_flags['skip_eval'] = True + def_flags['use_synthetic_data'] = True + def_flags['train_steps'] = 110 + def_flags['steps_per_loop'] = 20 + def_flags['log_steps'] = 10 + + super(Resnet50CtlBenchmarkSynth, self).__init__( + output_dir=output_dir, default_flags=def_flags) + + +class Resnet50CtlBenchmarkReal(Resnet50CtlBenchmarkBase): + """Resnet50 real data benchmark tests.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + def_flags = {} + def_flags['skip_eval'] = True + def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet') + def_flags['train_steps'] = 110 + def_flags['steps_per_loop'] = 20 + def_flags['log_steps'] = 10 + + super(Resnet50CtlBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=def_flags) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/retinanet_benchmark.py b/models/official/benchmark/retinanet_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..62bc80eef1fd00d5087af5522561ff7cf7863f5e --- /dev/null +++ b/models/official/benchmark/retinanet_benchmark.py @@ -0,0 +1,276 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes RetinaNet benchmarks and accuracy tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=g-bad-import-order +import json +import time + +from absl import flags +from absl.testing import flagsaver +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.benchmark import benchmark_wrappers +from official.benchmark import perfzero_benchmark +from official.utils.flags import core as flags_core +from official.utils.misc import keras_utils +from official.vision.detection import main as detection +from official.vision.detection.configs import base_config + +FLAGS = flags.FLAGS + +# pylint: disable=line-too-long +COCO_TRAIN_DATA = 'gs://tf-perfzero-data/coco/train*' +COCO_EVAL_DATA = 'gs://tf-perfzero-data/coco/val*' +COCO_EVAL_JSON = 'gs://tf-perfzero-data/coco/instances_val2017.json' +RESNET_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/retinanet/resnet50-checkpoint-2018-02-07' +# pylint: enable=line-too-long + + +class DetectionBenchmarkBase(perfzero_benchmark.PerfZeroBenchmark): + """Base class to hold methods common to test classes.""" + + def __init__(self, **kwargs): + super(DetectionBenchmarkBase, self).__init__(**kwargs) + self.timer_callback = None + + def _report_benchmark(self, stats, start_time_sec, wall_time_sec, min_ap, + max_ap, warmup): + """Report benchmark results by writing to local protobuf file. + + Args: + stats: dict returned from Detection models with known entries. + start_time_sec: the start of the benchmark execution in seconds + wall_time_sec: the duration of the benchmark execution in seconds + min_ap: Minimum detection AP constraint to verify correctness of the + model. + max_ap: Maximum detection AP accuracy constraint to verify correctness of + the model. + warmup: Number of time log entries to ignore when computing examples/sec. + """ + metrics = [{ + 'name': 'total_loss', + 'value': stats['total_loss'], + }] + if self.timer_callback: + metrics.append({ + 'name': 'exp_per_second', + 'value': self.timer_callback.get_examples_per_sec(warmup) + }) + metrics.append({ + 'name': 'startup_time', + 'value': self.timer_callback.get_startup_time(start_time_sec) + }) + else: + metrics.append({ + 'name': 'exp_per_second', + 'value': 0.0, + }) + + if 'eval_metrics' in stats: + metrics.append({ + 'name': 'AP', + 'value': stats['AP'], + 'min_value': min_ap, + 'max_value': max_ap, + }) + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark( + iters=stats['total_steps'], + wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_str}) + + +class RetinanetBenchmarkBase(DetectionBenchmarkBase): + """Base class to hold methods common to test classes in the module.""" + + def __init__(self, **kwargs): + self.train_data_path = COCO_TRAIN_DATA + self.eval_data_path = COCO_EVAL_DATA + self.eval_json_path = COCO_EVAL_JSON + self.resnet_checkpoint_path = RESNET_CHECKPOINT_PATH + super(RetinanetBenchmarkBase, self).__init__(**kwargs) + + def _run_detection_main(self): + """Starts detection job.""" + if self.timer_callback: + FLAGS.log_steps = 0 # prevent detection.run from adding the same callback + return detection.run(callbacks=[self.timer_callback]) + else: + return detection.run() + + +class RetinanetAccuracy(RetinanetBenchmarkBase): + """Accuracy test for RetinaNet model. + + Tests RetinaNet detection task model accuracy. The naming + convention of below test cases follow + `benchmark_(number of gpus)_gpu_(dataset type)` format. + """ + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + params, + min_ap=0.325, + max_ap=0.35, + do_eval=True, + warmup=1): + """Starts RetinaNet accuracy benchmark test.""" + FLAGS.params_override = json.dumps(params) + # Need timer callback to measure performance + self.timer_callback = keras_utils.TimeHistory( + batch_size=params['train']['batch_size'], + log_steps=FLAGS.log_steps, + ) + + start_time_sec = time.time() + FLAGS.mode = 'train' + summary, _ = self._run_detection_main() + wall_time_sec = time.time() - start_time_sec + + if do_eval: + FLAGS.mode = 'eval' + eval_metrics = self._run_detection_main() + summary.update(eval_metrics) + + summary['total_steps'] = params['train']['total_steps'] + self._report_benchmark(summary, start_time_sec, wall_time_sec, min_ap, + max_ap, warmup) + + def _setup(self): + super(RetinanetAccuracy, self)._setup() + FLAGS.model = 'retinanet' + + def _params(self): + return { + 'architecture': { + 'use_bfloat16': True, + }, + 'train': { + 'batch_size': 64, + 'iterations_per_loop': 100, + 'total_steps': 22500, + 'train_file_pattern': self.train_data_path, + 'checkpoint': { + 'path': self.resnet_checkpoint_path, + 'prefix': 'resnet50/' + }, + # Speed up ResNet training when loading from the checkpoint. + 'frozen_variable_prefix': base_config.RESNET_FROZEN_VAR_PREFIX, + }, + 'eval': { + 'batch_size': 8, + 'eval_samples': 5000, + 'val_json_file': self.eval_json_path, + 'eval_file_pattern': self.eval_data_path, + }, + } + + @flagsaver.flagsaver + def benchmark_8_gpu_coco(self): + """Run RetinaNet model accuracy test with 8 GPUs.""" + self._setup() + params = self._params() + FLAGS.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_coco') + FLAGS.strategy_type = 'mirrored' + self._run_and_report_benchmark(params) + + +class RetinanetBenchmarkReal(RetinanetAccuracy): + """Short benchmark performance tests for RetinaNet model. + + Tests RetinaNet performance in different GPU configurations. + The naming convention of below test cases follow + `benchmark_(number of gpus)_gpu` format. + """ + + def _setup(self): + super(RetinanetBenchmarkReal, self)._setup() + # Use negative value to avoid saving checkpoints. + FLAGS.save_checkpoint_freq = -1 + + @flagsaver.flagsaver + def benchmark_8_gpu_coco(self): + """Run RetinaNet model accuracy test with 8 GPUs.""" + self._setup() + params = self._params() + params['architecture']['use_bfloat16'] = False + params['train']['total_steps'] = 1875 # One epoch. + # The iterations_per_loop must be one, otherwise the number of examples per + # second would be wrong. Currently only support calling callback per batch + # when each loop only runs on one batch, i.e. host loop for one step. The + # performance of this situation might be lower than the case of + # iterations_per_loop > 1. + # Related bug: b/135933080 + params['train']['iterations_per_loop'] = 1 + params['eval']['eval_samples'] = 8 + FLAGS.num_gpus = 8 + FLAGS.model_dir = self._get_model_dir('real_benchmark_8_gpu_coco') + FLAGS.strategy_type = 'mirrored' + self._run_and_report_benchmark(params) + + @flagsaver.flagsaver + def benchmark_1_gpu_coco(self): + """Run RetinaNet model accuracy test with 1 GPU.""" + self._setup() + params = self._params() + params['architecture']['use_bfloat16'] = False + params['train']['batch_size'] = 8 + params['train']['total_steps'] = 200 + params['train']['iterations_per_loop'] = 1 + params['eval']['eval_samples'] = 8 + FLAGS.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('real_benchmark_1_gpu_coco') + FLAGS.strategy_type = 'one_device' + self._run_and_report_benchmark(params) + + @flagsaver.flagsaver + def benchmark_xla_1_gpu_coco(self): + """Run RetinaNet model accuracy test with 1 GPU and XLA enabled.""" + self._setup() + params = self._params() + params['architecture']['use_bfloat16'] = False + params['train']['batch_size'] = 8 + params['train']['total_steps'] = 200 + params['train']['iterations_per_loop'] = 1 + params['eval']['eval_samples'] = 8 + FLAGS.num_gpus = 1 + FLAGS.model_dir = self._get_model_dir('real_benchmark_xla_1_gpu_coco') + FLAGS.strategy_type = 'one_device' + FLAGS.enable_xla = True + self._run_and_report_benchmark(params) + + @flagsaver.flagsaver + def benchmark_2x2_tpu_coco(self): + """Run RetinaNet model accuracy test with 4 TPUs.""" + self._setup() + params = self._params() + params['train']['batch_size'] = 64 + params['train']['total_steps'] = 1875 # One epoch. + params['train']['iterations_per_loop'] = 500 + FLAGS.model_dir = self._get_model_dir('real_benchmark_2x2_tpu_coco') + FLAGS.strategy_type = 'tpu' + self._run_and_report_benchmark(params, do_eval=False, warmup=0) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/shakespeare_benchmark.py b/models/official/benchmark/shakespeare_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..430ab75da5300e3c374bbe56c2c02befb4dc2dff --- /dev/null +++ b/models/official/benchmark/shakespeare_benchmark.py @@ -0,0 +1,355 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes Shakespeare (LSTM) benchmark and accuracy tests.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time + +from absl import flags +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.benchmark.models.shakespeare import shakespeare_main +from official.utils.flags import core as flags_core +from official.utils.misc import keras_utils +from official.benchmark import benchmark_wrappers +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark + +SHAKESPEARE_TRAIN_DATA = 'shakespeare/shakespeare.txt' +TMP_DIR = os.getenv('TMPDIR') +FLAGS = flags.FLAGS + + +class ShakespeareBenchmarkBase(PerfZeroBenchmark): + """Base class for Shakespeare (LSTM) benchmark and accuracy tests.""" + + def __init__(self, output_dir=None, default_flags=None, root_data_dir=None): + super(ShakespeareBenchmarkBase, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + flag_methods=[shakespeare_main.define_flags]) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + top_1_train_min=0.91, + top_1_train_max=0.94, + warmup=1, + log_steps=100): + """Report benchmark results by writing to local protobuf file. + + Average epoch time is calculated by skipping the first epoch. This average + ignores time spent between epoch and is recorded by begin and end epoch. To + skip accuracy check set `top_1_train_min=None`. + + Args: + top_1_train_min: lowest passing value. + top_1_train_max: highest passing value. + warmup: number of entries in `timestamp_log` to ignore. + log_steps: How often the log was created for `timestamp_log`. + """ + total_batch_size = FLAGS.batch_size + metrics = [] + start_time_sec = time.time() + stats = shakespeare_main.run(FLAGS) + wall_time_sec = time.time() - start_time_sec + + if top_1_train_min: + metrics.append({'name': 'accuracy_top_1_train', + 'value': stats['history']['RecallAt1'][-1], + 'min_value': top_1_train_min, + 'max_value': top_1_train_max}) + + # Look for the time history callback which was used during keras.fit + for callback in stats['callbacks']: + if isinstance(callback, keras_utils.TimeHistory): + epoch_timings = callback.epoch_runtime_log + if len(epoch_timings) > 1: + average_time = sum(epoch_timings[1:]) / len(epoch_timings[1:]) + metrics.append({'name': 'avg_epoch_time', + 'value': average_time}) + + # First entry in timestamp_log is the start of step 1. The rest of the + # entries are the end of each step recorded. + time_log = callback.timestamp_log + elapsed = time_log[-1].timestamp - time_log[warmup].timestamp + num_examples = ( + total_batch_size * log_steps * (len(time_log) - warmup - 1)) + if elapsed > 0: + examples_per_sec = num_examples / elapsed + metrics.append({'name': 'exp_per_second', + 'value': examples_per_sec}) + + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark(iters=-1, wall_time=wall_time_sec, + metrics=metrics, + extras={'flags': flags_str}) + + +class ShakespeareAccuracy(ShakespeareBenchmarkBase): + """Shakespeare accuracy tests. + + This is not an ideal test. The best we can use for the accuracy check is to + validate top_1 of the training set. At batch size 64 the top_1 training + stabilizes to ~0.92 around 40-45 epochs. + """ + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + """Shakespeare accuracy tests. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + self.train_data = os.path.join(root_data_dir, SHAKESPEARE_TRAIN_DATA) + super(ShakespeareAccuracy, self).__init__( + output_dir=output_dir, root_data_dir=root_data_dir) + + def benchmark_cpu(self): + """Benchmark cpu.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + self._run_and_report_benchmark() + + def benchmark_cpu_no_ds_run_eagerly(self): + """Benchmark cpu without distribution strategies and run eagerly.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_1_gpu(self): + """Benchmark 1 gpu.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_ds(self): + """Benchmark 1 gpu without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_ds_run_eagerly(self): + """Benchmark 1 gpu without distribution strategies and run eagerly.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu(self): + """Benchmark 1 gpu w/xla.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_8_gpu(self): + """Benchmark 8 gpu. + + This is test is for accuracy not scaling. The batch-size is not scaled to + the number of gpus. + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.training_data = self.train_data + FLAGS.batch_size = 64 + FLAGS.train_epochs = 43 + FLAGS.model_dir = '' + self._run_and_report_benchmark() + + +class ShakespeareKerasBenchmarkReal(ShakespeareBenchmarkBase): + """Benchmark accuracy tests.""" + + def __init__(self, output_dir=None, root_data_dir=TMP_DIR, **kwargs): + """Benchmark tests w/Keras. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + self.train_data = os.path.join(root_data_dir, SHAKESPEARE_TRAIN_DATA) + + def_flags = {} + def_flags['training_data'] = self.train_data + def_flags['model_dir'] = '' + def_flags['train_epochs'] = 4 + def_flags['log_steps'] = 50 + + super(ShakespeareKerasBenchmarkReal, self).__init__( + output_dir=output_dir, + root_data_dir=root_data_dir, + default_flags=def_flags) + + def benchmark_cpu(self): + """Benchmark cpu.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.batch_size = 64 + self._run_and_report_benchmark() + + def benchmark_cpu_no_ds_run_eagerly(self): + """Benchmark cpu without distribution strategy and run eagerly.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.batch_size = 64 + FLAGS.distribution_strategy = 'off' + FLAGS.run_eagerly = True + self._run_and_report_benchmark() + + def benchmark_cpu_no_ds(self): + """Benchmark cpu without distribution strategy.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.batch_size = 64 + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_cpu_no_ds_force_v2(self): + """Benchmark cpu no ds, and force v2.""" + self._setup() + FLAGS.num_gpus = 0 + FLAGS.batch_size = 64 + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_1_gpu(self): + """Benchmark 1 gpu.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_cudnn(self): + """Benchmark 1 gpu with CuDNN disabled.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 + FLAGS.cudnn = False + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_ds(self): + """Benchmark 1 gpu without distribution strategies.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_1_gpu_no_ds_run_eagerly(self): + """Benchmark 1 gpu.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 + FLAGS.run_eagerly = True + FLAGS.distribution_strategy = 'off' + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu(self): + """Benchmark 1 gpu.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_xla_1_gpu_no_cudnn(self): + """Benchmark 1 gpu w/xla and CuDNN disabled.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 + FLAGS.cudnn = False + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_8_gpu(self): + """Benchmark 8 gpu.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.batch_size = 64 * 8 + FLAGS.log_steps = 10 + self._run_and_report_benchmark() + + def benchmark_8_gpu_no_cudnn(self): + """Benchmark 8 gpu with CuDNN disabled.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.batch_size = 64 * 8 + FLAGS.log_steps = 10 + FLAGS.cudnn = False + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu(self): + """Benchmark 8 gpu w/xla.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = 64 * 8 + FLAGS.log_steps = 10 + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def benchmark_xla_8_gpu_no_cudnn(self): + """Benchmark 8 gpu w/xla and CuDNN disabled.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.batch_size = 64 * 8 + FLAGS.log_steps = 10 + FLAGS.cudnn = False + FLAGS.enable_xla = True + self._run_and_report_benchmark() + + def _run_and_report_benchmark(self): + """Run and report benchmark.""" + super(ShakespeareKerasBenchmarkReal, self)._run_and_report_benchmark( + top_1_train_min=None, log_steps=FLAGS.log_steps) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/tfhub_memory_usage_benchmark.py b/models/official/benchmark/tfhub_memory_usage_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..7f50ecf6b3e0c95c78c0ac574131321a1e41fceb --- /dev/null +++ b/models/official/benchmark/tfhub_memory_usage_benchmark.py @@ -0,0 +1,69 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a memory usage benchmark for a Tensorflow Hub model. + +Loads a SavedModel and records memory usage. +""" +import functools +import time + +from absl import flags +import tensorflow as tf +import tensorflow_hub as hub + +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark + +FLAGS = flags.FLAGS + + +class TfHubMemoryUsageBenchmark(PerfZeroBenchmark): + """A benchmark measuring memory usage for a given TF Hub SavedModel.""" + + def __init__(self, + hub_model_handle_list=None, + output_dir=None, + default_flags=None, + root_data_dir=None, + **kwargs): + super(TfHubMemoryUsageBenchmark, self).__init__( + output_dir=output_dir, default_flags=default_flags, **kwargs) + if hub_model_handle_list: + for hub_model_handle in hub_model_handle_list.split(';'): + # Converts a model handle of the form + # https://tfhub.dev/google/nnlm-en-dim128/1 to valid python method name + # like google_nnlm_en_dim128_1. + hub_model_method_name = hub_model_handle.replace( + 'https://tfhub.dev', + '').replace('/', '_').replace('-', '_').strip('_') + setattr( + self, 'benchmark_' + hub_model_method_name, + functools.partial(self.benchmark_memory_usage, hub_model_handle)) + + def benchmark_memory_usage( + self, hub_model_handle='https://tfhub.dev/google/nnlm-en-dim128/1'): + start_time_sec = time.time() + self.load_model(hub_model_handle) + wall_time_sec = time.time() - start_time_sec + + metrics = [] + self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics) + + def load_model(self, hub_model_handle): + """Loads a TF Hub module.""" + hub.load(hub_model_handle) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/transformer_benchmark.py b/models/official/benchmark/transformer_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..e61201aa174af4882c6dbab28e10fe64d8cc1377 --- /dev/null +++ b/models/official/benchmark/transformer_benchmark.py @@ -0,0 +1,757 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes Transformer w/Keras benchmark and accuracy tests.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time + +from absl import flags +import tensorflow as tf +from official.benchmark import benchmark_wrappers +from official.benchmark import owner_utils +from official.benchmark.perfzero_benchmark import PerfZeroBenchmark +from official.nlp.transformer import misc +from official.nlp.transformer import transformer_main as transformer_main +from official.utils.flags import core as flags_core + +TRANSFORMER_EN2DE_DATA_DIR_NAME = 'wmt32k-en2de-official' +EN2DE_2014_BLEU_DATA_DIR_NAME = 'newstest2014' +FLAGS = flags.FLAGS +TMP_DIR = os.getenv('TMPDIR') + + +class TransformerBenchmark(PerfZeroBenchmark): + """Methods common to executing transformer w/keras tests. + + Code under test for the Transformer Keras models report the same data and + require the same FLAG setup. + """ + + def __init__(self, output_dir=None, default_flags=None, root_data_dir=None, + flag_methods=None, tpu=None): + root_data_dir = root_data_dir if root_data_dir else '' + + self.train_data_dir = os.path.join(root_data_dir, + TRANSFORMER_EN2DE_DATA_DIR_NAME) + + self.vocab_file = os.path.join(root_data_dir, + TRANSFORMER_EN2DE_DATA_DIR_NAME, + 'vocab.ende.32768') + + self.bleu_source = os.path.join(root_data_dir, + EN2DE_2014_BLEU_DATA_DIR_NAME, + 'newstest2014.en') + + self.bleu_ref = os.path.join(root_data_dir, + EN2DE_2014_BLEU_DATA_DIR_NAME, + 'newstest2014.de') + + if default_flags is None: + default_flags = {} + default_flags['data_dir'] = self.train_data_dir + default_flags['vocab_file'] = self.vocab_file + + super(TransformerBenchmark, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + flag_methods=flag_methods, + tpu=tpu) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + bleu_max=None, + bleu_min=None, + log_steps=None, + total_batch_size=None, + warmup=1): + """Report benchmark results by writing to local protobuf file. + + Args: + bleu_max: highest passing level for bleu score. + bleu_min: lowest passing level for bleu score. + log_steps: How often the log was created for stats['step_timestamp_log']. + total_batch_size: Global batch-size. + warmup: number of entries in stats['step_timestamp_log'] to ignore. + """ + start_time_sec = time.time() + task = transformer_main.TransformerTask(FLAGS) + stats = task.train() + wall_time_sec = time.time() - start_time_sec + + metrics = [] + if 'bleu_uncased' in stats: + if 'bleu_uncased_history' in stats: + bleu_uncased_best = max(stats['bleu_uncased_history'], + key=lambda x: x[1]) + metrics.append({'name': 'bleu_uncased', + 'value': bleu_uncased_best[1], + 'min_value': bleu_min, + 'max_value': bleu_max}) + metrics.append({'name': 'bleu_best_score_iteration', + 'value': bleu_uncased_best[0]}) + metrics.append({'name': 'bleu_uncased_last', + 'value': stats['bleu_uncased']}) + else: + metrics.append({'name': 'bleu_uncased', + 'value': stats['bleu_uncased'], + 'min_value': bleu_min, + 'max_value': bleu_max}) + + if (warmup and 'step_timestamp_log' in stats and + len(stats['step_timestamp_log']) > warmup + 1): + # first entry in the time_log is start of step 1. The rest of the + # entries are the end of each step recorded + time_log = stats['step_timestamp_log'] + elapsed = time_log[-1].timestamp - time_log[warmup].timestamp + num_examples = ( + total_batch_size * log_steps * (len(time_log) - warmup - 1)) + examples_per_sec = num_examples / elapsed + metrics.append({'name': 'exp_per_second', + 'value': examples_per_sec}) + + if 'avg_exp_per_second' in stats: + metrics.append({'name': 'avg_exp_per_second', + 'value': stats['avg_exp_per_second']}) + + if 'step_timestamp_log' in stats: + time_log = stats['step_timestamp_log'] + metrics.append({'name': 'startup_time', + 'value': time_log[0].timestamp - start_time_sec}) + + flags_str = flags_core.get_nondefault_flags_as_str() + self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics, + extras={'flags': flags_str}) + + +class TransformerBaseKerasAccuracy(TransformerBenchmark): + """Benchmark accuracy tests for Transformer Base model w/ Keras.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + """Benchmark accuracy tests for Transformer Base model w/ Keras. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + flag_methods = [misc.define_transformer_flags] + + super(TransformerBaseKerasAccuracy, self).__init__( + output_dir=output_dir, root_data_dir=root_data_dir, + flag_methods=flag_methods) + + def benchmark_1_gpu(self): + """Benchmark 1 gpu. + + The paper uses 8 GPUs and a much larger effective batch size, this is will + not converge to the 27.3 BLEU (uncased) SOTA. + """ + self._setup() + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'base' + FLAGS.batch_size = 2048 + FLAGS.train_steps = 1000 + FLAGS.steps_between_evals = 500 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + # These bleu scores are based on test runs after at this limited + # number of steps and batch size after verifying SOTA at 8xV100s. + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=25.3, + bleu_max=26) + + def benchmark_1_gpu_static_batch(self): + """Benchmark 1 gpu with static_batch. + + The paper uses 8 GPUs and a much larger effective batch size, this is will + not converge to the 27.3 BLEU (uncased) SOTA. + """ + self._setup() + FLAGS.num_gpus = 1 + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'base' + FLAGS.batch_size = 4096 + FLAGS.train_steps = 100000 + FLAGS.steps_between_evals = 5000 + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_static_batch') + # These bleu scores are based on test runs after at this limited + # number of steps and batch size after verifying SOTA at 8xV100s. + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=25.3, + bleu_max=26) + + def benchmark_8_gpu(self): + """Benchmark 8 gpu. + + Should converge to 27.3 BLEU (uncased). This has not been confirmed yet. + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'base' + FLAGS.batch_size = 4096*8 + FLAGS.train_steps = 100000 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=27, + bleu_max=28) + + def benchmark_8_gpu_static_batch(self): + """Benchmark 8 gpu. + + Should converge to 27.3 BLEU (uncased). This has not been confirmed yet. + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'base' + FLAGS.batch_size = 4096*8 + FLAGS.train_steps = 100000 + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.steps_between_evals = 5000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=27, + bleu_max=28) + + +class TransformerBigKerasAccuracy(TransformerBenchmark): + """Benchmark accuracy tests for Transformer Big model w/ Keras.""" + + def __init__(self, output_dir=None, root_data_dir=None, **kwargs): + """Benchmark accuracy tests for Transformer Big model w/ Keras. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more + named arguments before updating the constructor. + """ + flag_methods = [misc.define_transformer_flags] + + super(TransformerBigKerasAccuracy, self).__init__( + output_dir=output_dir, root_data_dir=root_data_dir, + flag_methods=flag_methods) + + def benchmark_8_gpu(self): + """Benchmark 8 gpu. + + Over 6 runs with eval every 20K steps the average highest value was 28.195 + (bleu uncased). 28.424 was the highest and 27.96 the lowest. The values are + the highest value seen during a run and occurred at a median of iteration 9. + Iterations are not epochs, an iteration is a number of steps between evals. + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'big' + FLAGS.batch_size = 3072*8 + FLAGS.train_steps = 20000 * 12 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=27.9, + bleu_max=29.2) + + def benchmark_8_gpu_static_batch(self): + """Benchmark 8 gpu. + + Should converge to 28.4 BLEU (uncased). This has not be verified yet." + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'big' + FLAGS.batch_size = 3072*8 + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.train_steps = 20000 * 12 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=28, + bleu_max=29.2) + + def benchmark_8_gpu_fp16(self): + """Benchmark 8 gpu with dynamic batch and fp16. + + Over 6 runs with eval every 20K steps the average highest value was 28.247 + (bleu uncased). 28.424 was the highest and 28.09 the lowest. The values are + the highest value seen during a run and occurred at a median of iteration + 11. While this could be interpreted as worse than FP32, if looking at the + first iteration at which 28 is passed FP16 performs equal and possibly + better. Although not part of the initial test runs, the highest value + recorded with the arguments below was 28.9 at iteration 12. Iterations are + not epochs, an iteration is a number of steps between evals. + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'big' + FLAGS.batch_size = 3072*8 + FLAGS.train_steps = 20000 * 12 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=28, + bleu_max=29.2) + + def benchmark_8_gpu_fp16_amp(self): + """Benchmark 8 gpu with dynamic batch and fp16 with automatic mixed precision. + + Should converge to 28.4 BLEU (uncased). This has not be verified yet." + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.fp16_implementation = 'graph_rewrite' + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'big' + FLAGS.batch_size = 3072*8 + FLAGS.train_steps = 20000 * 12 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_amp') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=28, + bleu_max=29) + + def benchmark_8_gpu_static_batch_fp16(self): + """Benchmark 8 gpu with static batch and fp16. + + Should converge to 28.4 BLEU (uncased). This has not be verified yet." + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'big' + FLAGS.batch_size = 3072*8 + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.train_steps = 400000 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch_fp16') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=28, + bleu_max=29.2) + + def benchmark_xla_8_gpu_static_batch_fp16(self): + """Benchmark 8 gpu with static batch, XLA, and FP16. + + Should converge to 28.4 BLEU (uncased). This has not be verified yet." + """ + self._setup() + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.enable_xla = True + FLAGS.data_dir = self.train_data_dir + FLAGS.vocab_file = self.vocab_file + # Sets values directly to avoid validation check. + FLAGS['bleu_source'].value = self.bleu_source + FLAGS['bleu_ref'].value = self.bleu_ref + FLAGS.param_set = 'big' + FLAGS.batch_size = 3072*8 + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.train_steps = 400000 + FLAGS.steps_between_evals = 20000 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_xla_8_gpu_static_batch_fp16') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps, + bleu_min=28, + bleu_max=29.2) + + +class TransformerKerasBenchmark(TransformerBenchmark): + """Benchmarks for Transformer (Base and Big) using Keras.""" + + def __init__(self, output_dir=None, default_flags=None, + root_data_dir=None, batch_per_gpu=4096, tpu=None): + """Initialize. + + Args: + output_dir: Based directory for saving artifacts, e.g. checkpoints. + default_flags: default flags to use for all tests. + root_data_dir: root directory for data, e.g. training. + batch_per_gpu: batch size to use per gpu. + tpu: Target TPU to use. + """ + flag_methods = [misc.define_transformer_flags] + self.batch_per_gpu = batch_per_gpu + + super(TransformerKerasBenchmark, self).__init__( + output_dir=output_dir, + default_flags=default_flags, + root_data_dir=root_data_dir, + flag_methods=flag_methods, + tpu=tpu) + + def benchmark_1_gpu_no_dist_strat(self): + """Benchmark 1 gpu without distribution strategy.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'off' + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_1_gpu_no_dist_strat_static_batch(self): + """Benchmark 1 gpu without distribution strategy with static batch.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'off' + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_ds_sb') + FLAGS.static_batch = True + FLAGS.max_length = 64 + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_1_gpu(self): + """Benchmark 1 gpu.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_1_gpu_fp16(self): + """Benchmark 1 gpu FP16.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16') + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_1_gpu(self): + """Benchmark 1 gpu w/xla.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu') + FLAGS.enable_xla = True + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_1_gpu_fp16(self): + """Benchmark 1 gpu w/xla and FP16.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16') + FLAGS.enable_xla = True + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_1_gpu_static_batch(self): + """Benchmark 1 gpu with static batch.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_static_batch') + FLAGS.static_batch = True + FLAGS.max_length = 64 + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_1_gpu_static_batch(self): + """Benchmark 1 gpu with static batch w/xla.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_static_batch') + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.enable_xla = True + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_1_gpu_static_batch_fp16(self): + """Benchmark 1 gpu with static batch FP16.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir( + 'benchmark_1_gpu_static_batch_fp16') + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_1_gpu_static_batch_fp16(self): + """Benchmark 1 gpu with static batch w/xla and FP16.""" + self._setup() + FLAGS.num_gpus = 1 + FLAGS.batch_size = self.batch_per_gpu + FLAGS.model_dir = self._get_model_dir( + 'benchmark_xla_1_gpu_static_batch_fp16') + FLAGS.static_batch = True + FLAGS.max_length = 64 + FLAGS.enable_xla = True + FLAGS.dtype = 'fp16' + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_8_gpu(self): + """Benchmark 8 gpu.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_8_gpu_fp16(self): + """Benchmark 8 gpu FP16.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_8_gpu(self): + """Benchmark 8 gpu w/xla.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.enable_xla = True + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_8_gpu_fp16(self): + """Benchmark 8 gpu w/xla and FP16.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.enable_xla = True + FLAGS.dtype = 'fp16' + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16') + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_8_gpu_static_batch(self): + """Benchmark 8 gpu with static batch.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch') + FLAGS.static_batch = True + FLAGS.max_length = 64 + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_8_gpu_static_batch_fp16(self): + """Benchmark 8 gpu with static batch FP16.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.dtype = 'fp16' + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_8_gpu_static_batch_fp16') + FLAGS.static_batch = True + FLAGS.max_length = 64 + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_8_gpu_static_batch(self): + """Benchmark 8 gpu with static batch w/xla.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.enable_xla = True + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_static_batch') + FLAGS.static_batch = True + FLAGS.max_length = 64 + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_xla_8_gpu_static_batch_fp16(self): + """Benchmark 8 gpu with static batch w/xla and FP16.""" + self._setup() + FLAGS.num_gpus = 8 + FLAGS.enable_xla = True + FLAGS.dtype = 'fp16' + FLAGS.batch_size = self.batch_per_gpu * 8 + FLAGS.model_dir = self._get_model_dir( + 'benchmark_xla_8_gpu_static_batch_fp16') + FLAGS.static_batch = True + FLAGS.max_length = 64 + self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + +class TransformerBaseKerasBenchmarkReal(TransformerKerasBenchmark): + """Transformer based version real data benchmark tests.""" + + def __init__(self, output_dir=TMP_DIR, root_data_dir=TMP_DIR, **kwargs): + def_flags = {} + def_flags['param_set'] = 'base' + def_flags['train_steps'] = 50 + def_flags['log_steps'] = 10 + + super(TransformerBaseKerasBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=def_flags, + root_data_dir=root_data_dir, batch_per_gpu=4096) + + +class TransformerBigKerasBenchmarkReal(TransformerKerasBenchmark): + """Transformer based version real data benchmark tests.""" + + def __init__(self, output_dir=TMP_DIR, root_data_dir=TMP_DIR, + tpu=None, **kwargs): + def_flags = {} + def_flags['param_set'] = 'big' + def_flags['train_steps'] = 50 + def_flags['log_steps'] = 10 + + super(TransformerBigKerasBenchmarkReal, self).__init__( + output_dir=output_dir, default_flags=def_flags, + root_data_dir=root_data_dir, batch_per_gpu=3072, + tpu=tpu) + + def benchmark_2x2_tpu(self): + """Port of former snaggletooth transformer_big model on 2x2.""" + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu') + FLAGS.train_steps = 300 + FLAGS.log_steps = 150 + FLAGS.steps_between_evals = 150 + FLAGS.distribution_strategy = 'tpu' + FLAGS.static_batch = True + FLAGS.use_ctl = True + FLAGS.batch_size = 6144 + FLAGS.max_length = 64 + FLAGS.decode_batch_size = 32 + FLAGS.decode_max_length = 97 + FLAGS.padded_decode = True + FLAGS.enable_checkpointing = False + + self._run_and_report_benchmark( + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + def benchmark_4x4_tpu(self): + """Port of former GCP transformer_big model on 4x4.""" + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu') + FLAGS.train_steps = 300 + FLAGS.log_steps = 150 + FLAGS.steps_between_evals = 150 + FLAGS.distribution_strategy = 'tpu' + FLAGS.static_batch = True + FLAGS.use_ctl = True + FLAGS.batch_size = 24576 + FLAGS.max_length = 64 + FLAGS.decode_batch_size = 32 + FLAGS.decode_max_length = 97 + FLAGS.padded_decode = True + FLAGS.enable_checkpointing = False + + self._run_and_report_benchmark( + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + @owner_utils.Owner('tf-graph-compiler') + def benchmark_4x4_tpu_mlir(self): + """Run transformer_big model on 4x4 with the MLIR Bridge enabled.""" + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu') + FLAGS.train_steps = 300 + FLAGS.log_steps = 150 + FLAGS.steps_between_evals = 150 + FLAGS.distribution_strategy = 'tpu' + FLAGS.static_batch = True + FLAGS.use_ctl = True + FLAGS.batch_size = 24576 + FLAGS.max_length = 64 + FLAGS.decode_batch_size = 32 + FLAGS.decode_max_length = 97 + FLAGS.padded_decode = True + FLAGS.enable_checkpointing = False + tf.config.experimental.enable_mlir_bridge() + + self._run_and_report_benchmark( + total_batch_size=FLAGS.batch_size, + log_steps=FLAGS.log_steps) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/unet3d_benchmark.py b/models/official/benchmark/unet3d_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..2614b29259dcf4c85d609abca94706c95570b7ec --- /dev/null +++ b/models/official/benchmark/unet3d_benchmark.py @@ -0,0 +1,148 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes benchmark testing for 3D Unet model.""" +# pylint: disable=line-too-long +from __future__ import print_function + +import functools +import os +import time +from typing import Optional +from absl import flags +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.benchmark import benchmark_wrappers +from official.benchmark import keras_benchmark +from official.benchmark import owner_utils +from official.vision.segmentation import unet_main as unet_training_lib +from official.vision.segmentation import unet_model as unet_model_lib + +UNET3D_MIN_ACCURACY = 0.90 +UNET3D_MAX_ACCURACY = 0.98 +UNET_TRAINING_FILES = 'gs://mlcompass-data/unet3d/train_data/*' +UNET_EVAL_FILES = 'gs://mlcompass-data/unet3d/eval_data/*' +UNET_MODEL_CONFIG_FILE = 'gs://mlcompass-data/unet3d/config/unet_config.yaml' + +FLAGS = flags.FLAGS + + +class Unet3DAccuracyBenchmark(keras_benchmark.KerasBenchmark): + """Benchmark accuracy tests for UNet3D model in Keras.""" + + def __init__(self, + output_dir: Optional[str] = None, + root_data_dir: Optional[str] = None, + **kwargs): + """A benchmark class. + + Args: + output_dir: directory where to output e.g. log files + root_data_dir: directory under which to look for dataset + **kwargs: arbitrary named arguments. This is needed to make the + constructor forward compatible in case PerfZero provides more named + arguments before updating the constructor. + """ + + flag_methods = [unet_training_lib.define_unet3d_flags] + + # UNet3D model in Keras.""" + self.training_file_pattern = UNET_TRAINING_FILES + self.eval_file_pattern = UNET_EVAL_FILES + + # TODO(hongjunchoi): Create and use shared config file instead. + self.config_file = UNET_MODEL_CONFIG_FILE + super(Unet3DAccuracyBenchmark, self).__init__( + output_dir=output_dir, flag_methods=flag_methods) + + def _set_benchmark_parameters(self, experiment_name): + """Overrides training parameters for benchmark tests.""" + FLAGS.model_dir = self._get_model_dir(experiment_name) + FLAGS.mode = 'train' + FLAGS.training_file_pattern = self.training_file_pattern + FLAGS.eval_file_pattern = self.eval_file_pattern + FLAGS.config_file = self.config_file + FLAGS.lr_init_value = 0.00005 + FLAGS.lr_decay_rate = 0.5 + FLAGS.epochs = 3 + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + experiment_name: str, + min_accuracy: float = UNET3D_MIN_ACCURACY, + max_accuracy: float = UNET3D_MAX_ACCURACY, + distribution_strategy: str = 'tpu', + epochs: int = 10, + steps: int = 0, + epochs_between_evals: int = 1, + dtype: str = 'float32', + enable_xla: bool = False, + run_eagerly: bool = False): + """Runs and reports the benchmark given the provided configuration.""" + params = unet_training_lib.extract_params(FLAGS) + strategy = unet_training_lib.create_distribution_strategy(params) + if params.use_bfloat16: + policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') + tf.keras.mixed_precision.experimental.set_policy(policy) + + stats = {} + start_time_sec = time.time() + with strategy.scope(): + unet_model = unet_model_lib.build_unet_model(params) + history = unet_training_lib.train( + params, strategy, unet_model, + functools.partial(unet_training_lib.get_train_dataset, params), + functools.partial(unet_training_lib.get_eval_dataset, params)) + + stats['accuracy_top_1'] = history.history['val_metric_accuracy'][-1] + stats['training_accuracy_top_1'] = history.history['metric_accuracy'][-1] + wall_time_sec = time.time() - start_time_sec + + super(Unet3DAccuracyBenchmark, self)._report_benchmark( + stats, + wall_time_sec, + top_1_min=min_accuracy, + top_1_max=max_accuracy, + total_batch_size=params.train_batch_size) + + def _get_model_dir(self, folder_name): + return os.path.join(self.output_dir, folder_name) + + @owner_utils.Owner('tf-model-garden') + def benchmark_4x4_tpu_bf16(self): + """Test Keras model with 4x4 TPU, fp16.""" + experiment_name = 'benchmark_4x4_tpu_fp16' + self._setup() + self._set_benchmark_parameters(experiment_name) + self._run_and_report_benchmark( + experiment_name=experiment_name, + dtype='bfloat16', + distribution_strategy='tpu') + + @owner_utils.Owner('tf-graph-compiler') + def benchmark_4x4_tpu_bf16_mlir(self): + """Test Keras model with 4x4 TPU, fp16 and MLIR enabled.""" + experiment_name = 'benchmark_4x4_tpu_fp16_mlir' + tf.config.experimental.enable_mlir_bridge() + self._setup() + self._set_benchmark_parameters(experiment_name) + self._run_and_report_benchmark( + experiment_name=experiment_name, + dtype='bfloat16', + distribution_strategy='tpu') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/benchmark/xlnet_benchmark.py b/models/official/benchmark/xlnet_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..4df69cf081a4a06000ed46ea66ac742cb1c39e02 --- /dev/null +++ b/models/official/benchmark/xlnet_benchmark.py @@ -0,0 +1,246 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executes XLNet benchmarks and accuracy tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import time + +# pylint: disable=g-bad-import-order +from absl import flags +from absl.testing import flagsaver +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.benchmark import bert_benchmark_utils as benchmark_utils +from official.benchmark import owner_utils +from official.nlp.xlnet import run_classifier +from official.nlp.xlnet import run_squad +from official.benchmark import benchmark_wrappers + + +# pylint: disable=line-too-long +PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/xlnet/large/xlnet_model-1' +CLASSIFIER_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/xlnet/imdb/spiece.model.len-512.train.tf_record' +CLASSIFIER_EVAL_DATA_PATH = 'gs://tf-perfzero-data/xlnet/imdb/spiece.model.len-512.dev.eval.tf_record' +SQUAD_DATA_PATH = 'gs://tf-perfzero-data/xlnet/squadv2_cased/' +# pylint: enable=line-too-long + +FLAGS = flags.FLAGS + + +class XLNetBenchmarkBase(benchmark_utils.BertBenchmarkBase): + """Base class to hold methods common to test classes in the module.""" + + def __init__(self, output_dir=None, tpu=None): + super(XLNetBenchmarkBase, self).__init__(output_dir=output_dir, tpu=tpu) + self.num_epochs = None + self.num_steps_per_epoch = None + + @flagsaver.flagsaver + def _run_xlnet_classifier(self): + """Starts XLNet classification task.""" + run_classifier.main(unused_argv=None) + + @flagsaver.flagsaver + def _run_xlnet_squad(self): + """Starts XLNet classification task.""" + run_squad.main(unused_argv=None) + + +class XLNetClassifyAccuracy(XLNetBenchmarkBase): + """Short accuracy test for XLNet classifier model. + + Tests XLNet classification task model accuracy. The naming + convention of below test cases follow + `benchmark_(number of gpus)_gpu_(dataset type)` format. + """ + + def __init__(self, output_dir=None, tpu=None, **kwargs): + self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH + self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH + self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH + + super(XLNetClassifyAccuracy, self).__init__(output_dir=output_dir, tpu=tpu) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + training_summary_path, + min_accuracy=0.95, + max_accuracy=0.97): + """Starts XLNet accuracy benchmark test.""" + + start_time_sec = time.time() + self._run_xlnet_classifier() + wall_time_sec = time.time() - start_time_sec + + with tf.io.gfile.GFile(training_summary_path, 'rb') as reader: + summary = json.loads(reader.read().decode('utf-8')) + + super(XLNetClassifyAccuracy, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=min_accuracy, + max_accuracy=max_accuracy) + + def _setup(self): + super(XLNetClassifyAccuracy, self)._setup() + FLAGS.test_data_size = 25024 + FLAGS.train_batch_size = 16 + FLAGS.seq_len = 512 + FLAGS.mem_len = 0 + FLAGS.n_layer = 24 + FLAGS.d_model = 1024 + FLAGS.d_embed = 1024 + FLAGS.n_head = 16 + FLAGS.d_head = 64 + FLAGS.d_inner = 4096 + FLAGS.untie_r = True + FLAGS.n_class = 2 + FLAGS.ff_activation = 'gelu' + FLAGS.strategy_type = 'mirror' + FLAGS.learning_rate = 2e-5 + FLAGS.train_steps = 4000 + FLAGS.warmup_steps = 500 + FLAGS.iterations = 200 + FLAGS.bi_data = False + FLAGS.init_checkpoint = self.pretrained_checkpoint_path + FLAGS.train_tfrecord_path = self.train_data_path + FLAGS.test_tfrecord_path = self.eval_data_path + + @owner_utils.Owner('tf-model-garden') + def benchmark_8_gpu_imdb(self): + """Run XLNet model accuracy test with 8 GPUs.""" + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_imdb') + # Sets timer_callback to None as we do not use it now. + self.timer_callback = None + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + @owner_utils.Owner('tf-model-garden') + def benchmark_2x2_tpu_imdb(self): + """Run XLNet model accuracy test on 2x2 tpu.""" + self._setup() + FLAGS.strategy_type = 'tpu' + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_imdb') + # Sets timer_callback to None as we do not use it now. + self.timer_callback = None + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + +class XLNetSquadAccuracy(XLNetBenchmarkBase): + """Short accuracy test for XLNet squad model. + + Tests XLNet squad task model accuracy. The naming + convention of below test cases follow + `benchmark_(number of gpus)_gpu_(dataset type)` format. + """ + + def __init__(self, output_dir=None, tpu=None, **kwargs): + self.train_data_path = SQUAD_DATA_PATH + self.predict_file = os.path.join(SQUAD_DATA_PATH, "dev-v2.0.json") + self.test_data_path = os.path.join(SQUAD_DATA_PATH, "12048.eval.tf_record") + self.spiece_model_file = os.path.join(SQUAD_DATA_PATH, "spiece.cased.model") + self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH + + super(XLNetSquadAccuracy, self).__init__(output_dir=output_dir, tpu=tpu) + + @benchmark_wrappers.enable_runtime_flags + def _run_and_report_benchmark(self, + training_summary_path, + min_accuracy=87.0, + max_accuracy=89.0): + """Starts XLNet accuracy benchmark test.""" + + start_time_sec = time.time() + self._run_xlnet_squad() + wall_time_sec = time.time() - start_time_sec + + with tf.io.gfile.GFile(training_summary_path, 'rb') as reader: + summary = json.loads(reader.read().decode('utf-8')) + + super(XLNetSquadAccuracy, self)._report_benchmark( + stats=summary, + wall_time_sec=wall_time_sec, + min_accuracy=min_accuracy, + max_accuracy=max_accuracy) + + def _setup(self): + super(XLNetSquadAccuracy, self)._setup() + FLAGS.train_batch_size = 16 + FLAGS.seq_len = 512 + FLAGS.mem_len = 0 + FLAGS.n_layer = 24 + FLAGS.d_model = 1024 + FLAGS.d_embed = 1024 + FLAGS.n_head = 16 + FLAGS.d_head = 64 + FLAGS.d_inner = 4096 + FLAGS.untie_r = True + FLAGS.ff_activation = 'gelu' + FLAGS.strategy_type = 'mirror' + FLAGS.learning_rate = 3e-5 + FLAGS.train_steps = 8000 + FLAGS.warmup_steps = 1000 + FLAGS.iterations = 1000 + FLAGS.bi_data = False + FLAGS.init_checkpoint = self.pretrained_checkpoint_path + FLAGS.train_tfrecord_path = self.train_data_path + FLAGS.test_tfrecord_path = self.test_data_path + FLAGS.spiece_model_file = self.spiece_model_file + FLAGS.predict_file = self.predict_file + FLAGS.adam_epsilon = 1e-6 + FLAGS.lr_layer_decay_rate = 0.75 + + @owner_utils.Owner('tf-model-garden') + def benchmark_8_gpu_squadv2(self): + """Run XLNet model squad v2 accuracy test with 8 GPUs.""" + self._setup() + FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squadv2') + FLAGS.predict_dir = FLAGS.model_dir + # Sets timer_callback to None as we do not use it now. + self.timer_callback = None + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + @owner_utils.Owner('tf-model-garden') + def benchmark_2x2_tpu_squadv2(self): + """Run XLNet model squad v2 accuracy test on 2x2 tpu.""" + self._setup() + FLAGS.strategy_type = 'tpu' + FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_squadv2') + FLAGS.predict_dir = FLAGS.model_dir + # Sets timer_callback to None as we do not use it now. + self.timer_callback = None + + summary_path = os.path.join(FLAGS.model_dir, + 'summaries/training_summary.txt') + self._run_and_report_benchmark(summary_path) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/colab/fine_tuning_bert.ipynb b/models/official/colab/fine_tuning_bert.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..443674b6b9f1292d25f26cc06e3359506763bfce --- /dev/null +++ b/models/official/colab/fine_tuning_bert.ipynb @@ -0,0 +1,1830 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "vXLA5InzXydn" + }, + "source": [ + "##### Copyright 2019 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "RuRlpLL-X0R_" + }, + "outputs": [], + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "1mLJmVotXs64" + }, + "source": [ + "# Fine-tuning a BERT model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hYEwGTeCXnnX" + }, + "source": [ + "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/official_models/tutorials/fine_tune_bert.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/models/blob/master/official/colab/fine_tuning_bert.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/official/colab/fine_tuning_bert.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/models/official/colab/fine_tuning_bert.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n", + " \u003c/td\u003e\n", + "\u003c/table\u003e" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "YN2ACivEPxgD" + }, + "source": [ + "In this example, we will work through fine-tuning a BERT model using the tensorflow-models PIP package.\n", + "\n", + "The pretrained BERT model this tutorial is based on is also available on [TensorFlow Hub](https://tensorflow.org/hub), to see how to use it refer to the [Hub Appendix](#hub_bert)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "s2d9S2CSSO1z" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fsACVQpVSifi" + }, + "source": [ + "### Install the TensorFlow Model Garden pip package\n", + "\n", + "* `tf-models-nightly` is the nightly Model Garden package created daily automatically.\n", + "* pip will install all models and dependencies automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "NvNr2svBM-p3" + }, + "outputs": [], + "source": [ + "!pip install -q tf-nightly\n", + "!pip install -q tf-models-nightly" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "U-7qPCjWUAyy" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "lXsXev5MNr20" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import tensorflow as tf\n", + "\n", + "import tensorflow_hub as hub\n", + "import tensorflow_datasets as tfds\n", + "tfds.disable_progress_bar()\n", + "\n", + "from official.modeling import tf_utils\n", + "from official import nlp\n", + "from official.nlp import bert\n", + "\n", + "# Load the required submodules\n", + "import official.nlp.optimization\n", + "import official.nlp.bert.bert_models\n", + "import official.nlp.bert.configs\n", + "import official.nlp.bert.run_classifier\n", + "import official.nlp.bert.tokenization\n", + "import official.nlp.data.classifier_data_lib\n", + "import official.nlp.modeling.losses\n", + "import official.nlp.modeling.models\n", + "import official.nlp.modeling.networks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "mbanlzTvJBsz" + }, + "source": [ + "### Resources" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "PpW0x8TpR8DT" + }, + "source": [ + "This directory contains the configuration, vocabulary, and a pre-trained checkpoint used in this tutorial:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vzRHOLciR8eq" + }, + "outputs": [], + "source": [ + "gs_folder_bert = \"gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12\"\n", + "tf.io.gfile.listdir(gs_folder_bert)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "9uFskufsR2LT" + }, + "source": [ + "You can get a pre-trained BERT encoder from TensorFlow Hub here:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "e0dAkUttJAzj" + }, + "outputs": [], + "source": [ + "hub_url_bert = \"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Qv6abtRvH4xO" + }, + "source": [ + "## The data\n", + "For this example we used the [GLUE MRPC dataset from TFDS](https://www.tensorflow.org/datasets/catalog/glue#gluemrpc).\n", + "\n", + "This dataset is not set up so that it can be directly fed into the BERT model, so this section also handles the necessary preprocessing." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "28DvUhC1YUiB" + }, + "source": [ + "### Get the dataset from TensorFlow Datasets\n", + "\n", + "The Microsoft Research Paraphrase Corpus (Dolan \u0026 Brockett, 2005) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent.\n", + "\n", + "* Number of labels: 2.\n", + "* Size of training dataset: 3668.\n", + "* Size of evaluation dataset: 408.\n", + "* Maximum sequence length of training and evaluation dataset: 128.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Ijikx5OsH9AT" + }, + "outputs": [], + "source": [ + "glue, info = tfds.load('glue/mrpc', with_info=True,\n", + " # It's small, load the whole dataset\n", + " batch_size=-1)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "xf9zz4vLYXjr" + }, + "outputs": [], + "source": [ + "list(glue.keys())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ZgBg2r2nYT-K" + }, + "source": [ + "The `info` object describes the dataset and it's features:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "IQrHxv7W7jH5" + }, + "outputs": [], + "source": [ + "info.features" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "vhsVWYNxazz5" + }, + "source": [ + "The two classes are:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "n0gfc_VTayfQ" + }, + "outputs": [], + "source": [ + "info.features['label'].names" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "38zJcap6xkbC" + }, + "source": [ + "Here is one example from the training set:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "xON_i6SkwApW" + }, + "outputs": [], + "source": [ + "glue_train = glue['train']\n", + "\n", + "for key, value in glue_train.items():\n", + " print(f\"{key:9s}: {value[0].numpy()}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "9fbTyfJpNr7x" + }, + "source": [ + "### The BERT tokenizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "wqeN54S61ZKQ" + }, + "source": [ + "To fine tune a pre-trained model you need to be sure that you're using exactly the same tokenization, vocabulary, and index mapping as you used during training.\n", + "\n", + "The BERT tokenizer used in this tutorial is written in pure Python (It's not built out of TensorFlow ops). So you can't just plug it into your model as a `keras.layer` like you can with `preprocessing.TextVectorization`.\n", + "\n", + "The following code rebuilds the tokenizer that was used by the base model:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "idxyhmrCQcw5" + }, + "outputs": [], + "source": [ + "# Set up tokenizer to generate Tensorflow dataset\n", + "tokenizer = bert.tokenization.FullTokenizer(\n", + " vocab_file=os.path.join(gs_folder_bert, \"vocab.txt\"),\n", + " do_lower_case=True)\n", + "\n", + "print(\"Vocab size:\", len(tokenizer.vocab))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "zYHDSquU2lDU" + }, + "source": [ + "Tokenize a sentence:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "L_OfOYPg853R" + }, + "outputs": [], + "source": [ + "tokens = tokenizer.tokenize(\"Hello TensorFlow!\")\n", + "print(tokens)\n", + "ids = tokenizer.convert_tokens_to_ids(tokens)\n", + "print(ids)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "kkAXLtuyWWDI" + }, + "source": [ + "### Preprocess the data\n", + "\n", + "The section manually preprocessed the dataset into the format expected by the model.\n", + "\n", + "This dataset is small, so preprocessing can be done quickly and easily in memory. For larger datasets the `tf_models` library includes some tools for preprocessing and re-serializing a dataset. See [Appendix: Re-encoding a large dataset](#re_encoding_tools) for details." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "62UTWLQd9-LB" + }, + "source": [ + "#### Encode the sentences\n", + "\n", + "The model expects its two inputs sentences to be concatenated together. This input is expected to start with a `[CLS]` \"This is a classification problem\" token, and each sentence should end with a `[SEP]` \"Separator\" token:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bdL-dRNRBRJT" + }, + "outputs": [], + "source": [ + "tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "UrPktnqpwqie" + }, + "source": [ + "Start by encoding all the sentences while appending a `[SEP]` token, and packing them into ragged-tensors:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "BR7BmtU498Bh" + }, + "outputs": [], + "source": [ + "def encode_sentence(s):\n", + " tokens = list(tokenizer.tokenize(s.numpy()))\n", + " tokens.append('[SEP]')\n", + " return tokenizer.convert_tokens_to_ids(tokens)\n", + "\n", + "sentence1 = tf.ragged.constant([\n", + " encode_sentence(s) for s in glue_train[\"sentence1\"]])\n", + "sentence2 = tf.ragged.constant([\n", + " encode_sentence(s) for s in glue_train[\"sentence2\"]])" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "has42aUdfky-" + }, + "outputs": [], + "source": [ + "print(\"Sentence1 shape:\", sentence1.shape.as_list())\n", + "print(\"Sentence2 shape:\", sentence2.shape.as_list())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MU9lTWy_xXbb" + }, + "source": [ + "Now prepend a `[CLS]` token, and concatenate the ragged tensors to form a single `input_word_ids` tensor for each example. `RaggedTensor.to_tensor()` zero pads to the longest sequence." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "USD8uihw-g4J" + }, + "outputs": [], + "source": [ + "cls = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*sentence1.shape[0]\n", + "input_word_ids = tf.concat([cls, sentence1, sentence2], axis=-1)\n", + "_ = plt.pcolormesh(input_word_ids.to_tensor())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xmNv4l4k-dBZ" + }, + "source": [ + "#### Mask and input type" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DIWjNIKq-ldh" + }, + "source": [ + "The model expects two additional inputs:\n", + "\n", + "* The input mask\n", + "* The input type" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ulNZ4U96-8JZ" + }, + "source": [ + "The mask allows the model to cleanly differentiate between the content and the padding. The mask has the same shape as the `input_word_ids`, and contains a `1` anywhere the `input_word_ids` is not padding." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "EezOO9qj91kP" + }, + "outputs": [], + "source": [ + "input_mask = tf.ones_like(input_word_ids).to_tensor()\n", + "\n", + "plt.pcolormesh(input_mask)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "rxLenwAvCkBf" + }, + "source": [ + "The \"input type\" also has the same shape, but inside the non-padded region, contains a `0` or a `1` indicating which sentence the token is a part of. " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "2CetH_5C9P2m" + }, + "outputs": [], + "source": [ + "type_cls = tf.zeros_like(cls)\n", + "type_s1 = tf.zeros_like(sentence1)\n", + "type_s2 = tf.ones_like(sentence2)\n", + "input_type_ids = tf.concat([type_cls, type_s1, type_s2], axis=-1).to_tensor()\n", + "\n", + "plt.pcolormesh(input_type_ids)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "P5UBnCn8Ii6s" + }, + "source": [ + "#### Put it all together\n", + "\n", + "Collect the above text parsing code into a single function, and apply it to each split of the `glue/mrpc` dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "sDGiWYPLEd5a" + }, + "outputs": [], + "source": [ + "def encode_sentence(s, tokenizer):\n", + " tokens = list(tokenizer.tokenize(s))\n", + " tokens.append('[SEP]')\n", + " return tokenizer.convert_tokens_to_ids(tokens)\n", + "\n", + "def bert_encode(glue_dict, tokenizer):\n", + " num_examples = len(glue_dict[\"sentence1\"])\n", + " \n", + " sentence1 = tf.ragged.constant([\n", + " encode_sentence(s, tokenizer)\n", + " for s in np.array(glue_dict[\"sentence1\"])])\n", + " sentence2 = tf.ragged.constant([\n", + " encode_sentence(s, tokenizer)\n", + " for s in np.array(glue_dict[\"sentence2\"])])\n", + "\n", + " cls = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*sentence1.shape[0]\n", + " input_word_ids = tf.concat([cls, sentence1, sentence2], axis=-1)\n", + "\n", + " input_mask = tf.ones_like(input_word_ids).to_tensor()\n", + "\n", + " type_cls = tf.zeros_like(cls)\n", + " type_s1 = tf.zeros_like(sentence1)\n", + " type_s2 = tf.ones_like(sentence2)\n", + " input_type_ids = tf.concat(\n", + " [type_cls, type_s1, type_s2], axis=-1).to_tensor()\n", + "\n", + " inputs = {\n", + " 'input_word_ids': input_word_ids.to_tensor(),\n", + " 'input_mask': input_mask,\n", + " 'input_type_ids': input_type_ids}\n", + "\n", + " return inputs" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "yuLKxf6zHxw-" + }, + "outputs": [], + "source": [ + "glue_train = bert_encode(glue['train'], tokenizer)\n", + "glue_train_labels = glue['train']['label']\n", + "\n", + "glue_validation = bert_encode(glue['validation'], tokenizer)\n", + "glue_validation_labels = glue['validation']['label']\n", + "\n", + "glue_test = bert_encode(glue['test'], tokenizer)\n", + "glue_test_labels = glue['test']['label']" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7FC5aLVxKVKK" + }, + "source": [ + "Each subset of the data has been converted to a dictionary of features, and a set of labels. Each feature in the input dictionary has the same shape, and the number of labels should match:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "jyjTdGpFhO_1" + }, + "outputs": [], + "source": [ + "for key, value in glue_train.items():\n", + " print(f'{key:15s} shape: {value.shape}')\n", + "\n", + "print(f'glue_train_labels shape: {glue_train_labels.shape}')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "FSwymsbkbLDA" + }, + "source": [ + "## The model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Efrj3Cn1kLAp" + }, + "source": [ + "### Build the model\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xxpOY5r2Ayq6" + }, + "source": [ + "The first step is to download the configuration for the pre-trained model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ujapVfZ_AKW7" + }, + "outputs": [], + "source": [ + "import json\n", + "\n", + "bert_config_file = os.path.join(gs_folder_bert, \"bert_config.json\")\n", + "config_dict = json.loads(tf.io.gfile.GFile(bert_config_file).read())\n", + "\n", + "bert_config = bert.configs.BertConfig.from_dict(config_dict)\n", + "\n", + "config_dict" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "96ldxDSwkVkj" + }, + "source": [ + "The `config` defines the core BERT Model, which is a Keras model to predict the outputs of `num_classes` from the inputs with maximum sequence length `max_seq_length`.\n", + "\n", + "This function returns both the encoder and the classifier." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "cH682__U0FBv" + }, + "outputs": [], + "source": [ + "bert_classifier, bert_encoder = bert.bert_models.classifier_model(\n", + " bert_config, num_labels=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "XqKp3-5GIZlw" + }, + "source": [ + "The classifier has three inputs and one output:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bAQblMIjwkvx" + }, + "outputs": [], + "source": [ + "tf.keras.utils.plot_model(bert_classifier, show_shapes=True, dpi=48)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "sFmVG4SKZAw8" + }, + "source": [ + "Run it on a test batch of data 10 examples from the training set. The output is the logits for the two classes:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "VTjgPbp4ZDKo" + }, + "outputs": [], + "source": [ + "glue_batch = {key: val[:10] for key, val in glue_train.items()}\n", + "\n", + "bert_classifier(\n", + " glue_batch, training=True\n", + ").numpy()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q0NTdwZsQK8n" + }, + "source": [ + "The `TransformerEncoder` in the center of the classifier above **is** the `bert_encoder`.\n", + "\n", + "Inspecting the encoder, we see its stack of `Transformer` layers connected to those same three inputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "8L__-erBwLIQ" + }, + "outputs": [], + "source": [ + "tf.keras.utils.plot_model(bert_encoder, show_shapes=True, dpi=48)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "mKAvkQc3heSy" + }, + "source": [ + "### Restore the encoder weights\n", + "\n", + "When built the encoder is randomly initialized. Restore the encoder's weights from the checkpoint:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "97Ll2Gichd_Y" + }, + "outputs": [], + "source": [ + "checkpoint = tf.train.Checkpoint(model=bert_encoder)\n", + "checkpoint.restore(\n", + " os.path.join(gs_folder_bert, 'bert_model.ckpt')).assert_consumed()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "2oHOql35k3Dd" + }, + "source": [ + "Note: The pretrained `TransformerEncoder` is also available on [TensorFlow Hub](https://tensorflow.org/hub). See the [Hub appendix](#hub_bert) for details. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "115caFLMk-_l" + }, + "source": [ + "### Set up the optimizer\n", + "\n", + "BERT adopts the Adam optimizer with weight decay (aka \"[AdamW](https://arxiv.org/abs/1711.05101)\").\n", + "It also employs a learning rate schedule that firstly warms up from 0 and then decays to 0." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "w8qXKRZuCwW4" + }, + "outputs": [], + "source": [ + "# Set up epochs and steps\n", + "epochs = 3\n", + "batch_size = 32\n", + "eval_batch_size = 32\n", + "\n", + "train_data_size = len(glue_train_labels)\n", + "steps_per_epoch = int(train_data_size / batch_size)\n", + "num_train_steps = steps_per_epoch * epochs\n", + "warmup_steps = int(epochs * train_data_size * 0.1 / batch_size)\n", + "\n", + "# creates an optimizer with learning rate schedule\n", + "optimizer = nlp.optimization.create_optimizer(\n", + " 2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pXRGxiRNEHS2" + }, + "source": [ + "This returns an `AdamWeightDecay` optimizer with the learning rate schedule set:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "eQNA16bhDpky" + }, + "outputs": [], + "source": [ + "type(optimizer)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xqu_K71fJQB8" + }, + "source": [ + "To see an example of how to customize the optimizer and it's schedule, see the [Optimizer schedule appendix](#optiizer_schedule)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "78FEUOOEkoP0" + }, + "source": [ + "### Train the model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "OTNcA0O0nSq9" + }, + "source": [ + "The metric is accuracy and we use sparse categorical cross-entropy as loss." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nzi8hjeTQTRs" + }, + "outputs": [], + "source": [ + "metrics = [tf.keras.metrics.SparseCategoricalAccuracy('accuracy', dtype=tf.float32)]\n", + "loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n", + "\n", + "bert_classifier.compile(\n", + " optimizer=optimizer,\n", + " loss=loss,\n", + " metrics=metrics)\n", + "\n", + "bert_classifier.fit(\n", + " glue_train, glue_train_labels,\n", + " validation_data=(glue_validation, glue_validation_labels),\n", + " batch_size=32,\n", + " epochs=epochs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "IFtKFWbNKb0u" + }, + "source": [ + "Now run the fine-tuned model on a custom example to see that it works.\n", + "\n", + "Start by encoding some sentence pairs:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "9ZoUgDUNJPz3" + }, + "outputs": [], + "source": [ + "my_examples = bert_encode(\n", + " glue_dict = {\n", + " 'sentence1':[\n", + " 'The rain in Spain falls mainly on the plain.',\n", + " 'Look I fine tuned BERT.'],\n", + " 'sentence2':[\n", + " 'It mostly rains on the flat lands of Spain.',\n", + " 'Is it working? This does not match.']\n", + " },\n", + " tokenizer=tokenizer)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7ynJibkBRTJF" + }, + "source": [ + "The model should report class `1` \"match\" for the first example and class `0` \"no-match\" for the second:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "umo0ttrgRYIM" + }, + "outputs": [], + "source": [ + "result = bert_classifier(my_examples, training=False)\n", + "\n", + "result = tf.argmax(result).numpy()\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "utGl0M3aZCE4" + }, + "outputs": [], + "source": [ + "np.array(info.features['label'].names)[result]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fVo_AnT0l26j" + }, + "source": [ + "### Save the model\n", + "\n", + "Often the goal of training a model is to _use_ it for something, so export the model and then restore it to be sure that it works." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Nl5x6nElZqkP" + }, + "outputs": [], + "source": [ + "export_dir='./saved_model'\n", + "tf.saved_model.save(bert_classifier, export_dir=export_dir)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "y_ACvKPsVUXC" + }, + "outputs": [], + "source": [ + "reloaded = tf.saved_model.load(export_dir)\n", + "reloaded_result = reloaded([my_examples['input_word_ids'],\n", + " my_examples['input_mask'],\n", + " my_examples['input_type_ids']], training=False)\n", + "\n", + "original_result = bert_classifier(my_examples, training=False)\n", + "\n", + "# The results are (nearly) identical:\n", + "print(original_result.numpy())\n", + "print()\n", + "print(reloaded_result.numpy())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "eQceYqRFT_Eg" + }, + "source": [ + "## Appendix" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "SaC1RlFawUpc" + }, + "source": [ + "\u003ca id=re_encoding_tools\u003e\u003c/a\u003e\n", + "### Re-encoding a large dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "CwUdjFBkzUgh" + }, + "source": [ + "This tutorial you re-encoded the dataset in memory, for clarity.\n", + "\n", + "This was only possible because `glue/mrpc` is a very small dataset. To deal with larger datasets `tf_models` library includes some tools for processing and re-encoding a dataset for efficient training." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "2UTQrkyOT5wD" + }, + "source": [ + "The first step is to describe which features of the dataset should be transformed:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "XQeDFOzYR9Z9" + }, + "outputs": [], + "source": [ + "processor = nlp.data.classifier_data_lib.TfdsProcessor(\n", + " tfds_params=\"dataset=glue/mrpc,text_key=sentence1,text_b_key=sentence2\",\n", + " process_text_fn=bert.tokenization.convert_to_unicode)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "XrFQbfErUWxa" + }, + "source": [ + "Then apply the transformation to generate new TFRecord files." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ymw7GOHpSHKU" + }, + "outputs": [], + "source": [ + "# Set up output of training and evaluation Tensorflow dataset\n", + "train_data_output_path=\"./mrpc_train.tf_record\"\n", + "eval_data_output_path=\"./mrpc_eval.tf_record\"\n", + "\n", + "max_seq_length = 128\n", + "batch_size = 32\n", + "eval_batch_size = 32\n", + "\n", + "# Generate and save training data into a tf record file\n", + "input_meta_data = (\n", + " nlp.data.classifier_data_lib.generate_tf_record_from_data_file(\n", + " processor=processor,\n", + " data_dir=None, # It is `None` because data is from tfds, not local dir.\n", + " tokenizer=tokenizer,\n", + " train_data_output_path=train_data_output_path,\n", + " eval_data_output_path=eval_data_output_path,\n", + " max_seq_length=max_seq_length))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "uX_Sp-wTUoRm" + }, + "source": [ + "Finally create `tf.data` input pipelines from those TFRecord files:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "rkHxIK57SQ_r" + }, + "outputs": [], + "source": [ + "training_dataset = bert.run_classifier.get_dataset_fn(\n", + " train_data_output_path,\n", + " max_seq_length,\n", + " batch_size,\n", + " is_training=True)()\n", + "\n", + "evaluation_dataset = bert.run_classifier.get_dataset_fn(\n", + " eval_data_output_path,\n", + " max_seq_length,\n", + " eval_batch_size,\n", + " is_training=False)()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "stbaVouogvzS" + }, + "source": [ + "The resulting `tf.data.Datasets` return `(features, labels)` pairs, as expected by `keras.Model.fit`:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "gwhrlQl4gxVF" + }, + "outputs": [], + "source": [ + "training_dataset.element_spec" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "dbJ76vSJj77j" + }, + "source": [ + "#### Create tf.data.Dataset for training and evaluation\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "9J95LFRohiYw" + }, + "source": [ + "If you need to modify the data loading here is some code to get you started:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "gCvaLLAxPuMc" + }, + "outputs": [], + "source": [ + "def create_classifier_dataset(file_path, seq_length, batch_size, is_training):\n", + " \"\"\"Creates input dataset from (tf)records files for train/eval.\"\"\"\n", + " dataset = tf.data.TFRecordDataset(file_path)\n", + " if is_training:\n", + " dataset = dataset.shuffle(100)\n", + " dataset = dataset.repeat()\n", + "\n", + " def decode_record(record):\n", + " name_to_features = {\n", + " 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n", + " 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),\n", + " 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n", + " 'label_ids': tf.io.FixedLenFeature([], tf.int64),\n", + " }\n", + " return tf.io.parse_single_example(record, name_to_features)\n", + "\n", + " def _select_data_from_record(record):\n", + " x = {\n", + " 'input_word_ids': record['input_ids'],\n", + " 'input_mask': record['input_mask'],\n", + " 'input_type_ids': record['segment_ids']\n", + " }\n", + " y = record['label_ids']\n", + " return (x, y)\n", + "\n", + " dataset = dataset.map(decode_record,\n", + " num_parallel_calls=tf.data.experimental.AUTOTUNE)\n", + " dataset = dataset.map(\n", + " _select_data_from_record,\n", + " num_parallel_calls=tf.data.experimental.AUTOTUNE)\n", + " dataset = dataset.batch(batch_size, drop_remainder=is_training)\n", + " dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n", + " return dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "rutkBadrhzdR" + }, + "outputs": [], + "source": [ + "# Set up batch sizes\n", + "batch_size = 32\n", + "eval_batch_size = 32\n", + "\n", + "# Return Tensorflow dataset\n", + "training_dataset = create_classifier_dataset(\n", + " train_data_output_path,\n", + " input_meta_data['max_seq_length'],\n", + " batch_size,\n", + " is_training=True)\n", + "\n", + "evaluation_dataset = create_classifier_dataset(\n", + " eval_data_output_path,\n", + " input_meta_data['max_seq_length'],\n", + " eval_batch_size,\n", + " is_training=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "59TVgt4Z7fuU" + }, + "outputs": [], + "source": [ + "training_dataset.element_spec" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "QbklKt-w_CiI" + }, + "source": [ + "\u003ca id=\"hub_bert\"\u003e\u003c/a\u003e\n", + "\n", + "### TFModels BERT on TFHub\n", + "\n", + "You can get [the BERT model](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2) off the shelf from [TFHub](https://tensorflow.org/hub). It would not be hard to add a classification head on top of this `hub.KerasLayer`" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "lo6479At4sP1" + }, + "outputs": [], + "source": [ + "# Note: 350MB download.\n", + "import tensorflow_hub as hub\n", + "hub_encoder = hub.KerasLayer(hub_url_bert, trainable=True)\n", + "\n", + "print(f\"The Hub encoder has {len(hub_encoder.trainable_variables)} trainable variables\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "iTzF574wivQv" + }, + "source": [ + "Test run it on a batch of data:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "XEcYrCR45Uwo" + }, + "outputs": [], + "source": [ + "result = hub_encoder(\n", + " inputs=[glue_train['input_word_ids'][:10],\n", + " glue_train['input_mask'][:10],\n", + " glue_train['input_type_ids'][:10],],\n", + " training=False,\n", + ")\n", + "\n", + "print(\"Pooled output shape:\", result[0].shape)\n", + "print(\"Sequence output shape:\", result[1].shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cjojn8SmLSRI" + }, + "source": [ + "At this point it would be simple to add a classification head yourself.\n", + "\n", + "The `bert_models.classifier_model` function can also build a classifier onto the encoder from TensorFlow Hub:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "9nTDaApyLR70" + }, + "outputs": [], + "source": [ + "hub_classifier, hub_encoder = bert.bert_models.classifier_model(\n", + " # Caution: Most of `bert_config` is ignored if you pass a hub url.\n", + " bert_config=bert_config, hub_module_url=hub_url_bert, num_labels=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xMJX3wV0_v7I" + }, + "source": [ + "The one downside to loading this model from TFHub is that the structure of internal keras layers is not restored. So it's more difficult to inspect or modify the model. The `TransformerEncoder` model is now a single layer:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "pD71dnvhM2QS" + }, + "outputs": [], + "source": [ + "tf.keras.utils.plot_model(hub_classifier, show_shapes=True, dpi=64)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nLZD-isBzNKi" + }, + "outputs": [], + "source": [ + "try:\n", + " tf.keras.utils.plot_model(hub_encoder, show_shapes=True, dpi=64)\n", + " assert False\n", + "except Exception as e:\n", + " print(f\"{type(e).__name__}: {e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ZxSqH0dNAgXV" + }, + "source": [ + "\u003ca id=\"model_builder_functions\"\u003e\u003c/a\u003e\n", + "\n", + "### Low level model building\n", + "\n", + "If you need a more control over the construction of the model it's worth noting that the `classifier_model` function used earlier is really just a thin wrapper over the `nlp.modeling.networks.TransformerEncoder` and `nlp.modeling.models.BertClassifier` classes. Just remember that if you start modifying the architecture it may not be correct or possible to reload the pre-trained checkpoint so you'll need to retrain from scratch." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0cgABEwDj06P" + }, + "source": [ + "Build the encoder:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "5r_yqhBFSVEM" + }, + "outputs": [], + "source": [ + "transformer_config = config_dict.copy()\n", + "\n", + "# You need to rename a few fields to make this work:\n", + "transformer_config['attention_dropout_rate'] = transformer_config.pop('attention_probs_dropout_prob')\n", + "transformer_config['activation'] = tf_utils.get_activation(transformer_config.pop('hidden_act'))\n", + "transformer_config['dropout_rate'] = transformer_config.pop('hidden_dropout_prob')\n", + "transformer_config['initializer'] = tf.keras.initializers.TruncatedNormal(\n", + " stddev=transformer_config.pop('initializer_range'))\n", + "transformer_config['max_sequence_length'] = transformer_config.pop('max_position_embeddings')\n", + "transformer_config['num_layers'] = transformer_config.pop('num_hidden_layers')\n", + "\n", + "transformer_config" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "rIO8MI7LLijh" + }, + "outputs": [], + "source": [ + "manual_encoder = nlp.modeling.networks.TransformerEncoder(**transformer_config)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "4a4tFSg9krRi" + }, + "source": [ + "Restore the weights:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "X6N9NEqfXJCx" + }, + "outputs": [], + "source": [ + "checkpoint = tf.train.Checkpoint(model=manual_encoder)\n", + "checkpoint.restore(\n", + " os.path.join(gs_folder_bert, 'bert_model.ckpt')).assert_consumed()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "1BPiPO4ykuwM" + }, + "source": [ + "Test run it:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hlVdgJKmj389" + }, + "outputs": [], + "source": [ + "result = manual_encoder(my_examples, training=True)\n", + "\n", + "print(\"Sequence output shape:\", result[0].shape)\n", + "print(\"Pooled output shape:\", result[1].shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nJMXvVgJkyBv" + }, + "source": [ + "Wrap it in a classifier:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "tQX57GJ6wkAb" + }, + "outputs": [], + "source": [ + "manual_classifier = nlp.modeling.models.BertClassifier(\n", + " bert_encoder,\n", + " num_classes=2,\n", + " dropout_rate=transformer_config['dropout_rate'],\n", + " initializer=tf.keras.initializers.TruncatedNormal(\n", + " stddev=bert_config.initializer_range))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "kB-nBWhQk0dS" + }, + "outputs": [], + "source": [ + "manual_classifier(my_examples, training=True).numpy()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "E6AJlOSyIO1L" + }, + "source": [ + "\u003ca id=\"optiizer_schedule\"\u003e\u003c/a\u003e\n", + "\n", + "### Optimizers and schedules\n", + "\n", + "The optimizer used to train the model was created using the `nlp.optimization.create_optimizer` function:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "28Dv3BPRlFTD" + }, + "outputs": [], + "source": [ + "optimizer = nlp.optimization.create_optimizer(\n", + " 2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LRjcHr0UlT8c" + }, + "source": [ + "That high level wrapper sets up the learning rate schedules and the optimizer.\n", + "\n", + "The base learning rate schedule used here is a linear decay to zero over the training run:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "MHY8K6kDngQn" + }, + "outputs": [], + "source": [ + "epochs = 3\n", + "batch_size = 32\n", + "eval_batch_size = 32\n", + "\n", + "train_data_size = len(glue_train_labels)\n", + "steps_per_epoch = int(train_data_size / batch_size)\n", + "num_train_steps = steps_per_epoch * epochs" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "wKIcSprulu3P" + }, + "outputs": [], + "source": [ + "decay_schedule = tf.keras.optimizers.schedules.PolynomialDecay(\n", + " initial_learning_rate=2e-5,\n", + " decay_steps=num_train_steps,\n", + " end_learning_rate=0)\n", + "\n", + "plt.plot([decay_schedule(n) for n in range(num_train_steps)])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "IMTC_gfAl_PZ" + }, + "source": [ + "This, in turn is wrapped in a `WarmUp` schedule that linearly increases the learning rate to the target value over the first 10% of training:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "YRt3VTmBmCBY" + }, + "outputs": [], + "source": [ + "warmup_steps = num_train_steps * 0.1\n", + "\n", + "warmup_schedule = nlp.optimization.WarmUp(\n", + " initial_learning_rate=2e-5,\n", + " decay_schedule_fn=decay_schedule,\n", + " warmup_steps=warmup_steps)\n", + "\n", + "# The warmup overshoots, because it warms up to the `initial_learning_rate`\n", + "# following the original implementation. You can set\n", + "# `initial_learning_rate=decay_schedule(warmup_steps)` if you don't like the\n", + "# overshoot.\n", + "plt.plot([warmup_schedule(n) for n in range(num_train_steps)])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "l8D9Lv3Bn740" + }, + "source": [ + "Then create the `nlp.optimization.AdamWeightDecay` using that schedule, configured for the BERT model:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "2Hf2rpRXk89N" + }, + "outputs": [], + "source": [ + "optimizer = nlp.optimization.AdamWeightDecay(\n", + " learning_rate=warmup_schedule,\n", + " weight_decay_rate=0.01,\n", + " epsilon=1e-6,\n", + " exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "fine_tuning_bert.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/models/official/core/__init__.py b/models/official/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/core/base_task.py b/models/official/core/base_task.py new file mode 100644 index 0000000000000000000000000000000000000000..31811cbe6606fac61b664973717f4c75b6b4b37b --- /dev/null +++ b/models/official/core/base_task.py @@ -0,0 +1,303 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines the base task abstraction.""" +import abc +import functools +from typing import Any, Callable, Optional + +import six +import tensorflow as tf + +from official.modeling.hyperparams import config_definitions as cfg +from official.utils import registry + + +@six.add_metaclass(abc.ABCMeta) +class Task(tf.Module): + """A single-replica view of training procedure. + + Tasks provide artifacts for training/evalution procedures, including + loading/iterating over Datasets, initializing the model, calculating the loss + and customized metrics with reduction. + """ + + # Special keys in train/validate step returned logs. + loss = "loss" + + def __init__(self, params: cfg.TaskConfig): + self._task_config = params + + @property + def task_config(self) -> cfg.TaskConfig: + return self._task_config + + def initialize(self, model: tf.keras.Model): + """A callback function used as CheckpointManager's init_fn. + + This function will be called when no checkpoint found for the model. + If there is a checkpoint, the checkpoint will be loaded and this function + will not be called. You can use this callback function to load a pretrained + checkpoint, saved under a directory other than the model_dir. + + Args: + model: The keras.Model built or used by this task. + """ + pass + + @abc.abstractmethod + def build_model(self) -> tf.keras.Model: + """Creates the model architecture. + + Returns: + A model instance. + """ + + def compile_model(self, + model: tf.keras.Model, + optimizer: tf.keras.optimizers.Optimizer, + loss=None, + train_step: Optional[Callable[..., Any]] = None, + validation_step: Optional[Callable[..., Any]] = None, + **kwargs) -> tf.keras.Model: + """Compiles the model with objects created by the task. + + The method should not be used in any customized training implementation. + + Args: + model: a keras.Model. + optimizer: the keras optimizer. + loss: a callable/list of losses. + train_step: optional train step function defined by the task. + validation_step: optional validation_step step function defined by the + task. + **kwargs: other kwargs consumed by keras.Model compile(). + + Returns: + a compiled keras.Model. + """ + if bool(loss is None) == bool(train_step is None): + raise ValueError("`loss` and `train_step` should be exclusive to " + "each other.") + model.compile(optimizer=optimizer, loss=loss, **kwargs) + + if train_step: + model.train_step = functools.partial( + train_step, model=model, optimizer=model.optimizer) + if validation_step: + model.test_step = functools.partial(validation_step, model=model) + return model + + @abc.abstractmethod + def build_inputs(self, + params: cfg.DataConfig, + input_context: Optional[tf.distribute.InputContext] = None): + """Returns a dataset or a nested structure of dataset functions. + + Dataset functions define per-host datasets with the per-replica batch size. + + Args: + params: hyperparams to create input pipelines. + input_context: optional distribution input pipeline context. + + Returns: + A nested structure of per-replica input functions. + """ + + def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: + """Standard interface to compute losses. + + Args: + labels: optional label tensors. + model_outputs: a nested structure of output tensors. + aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model. + + Returns: + The total loss tensor. + """ + del model_outputs, labels + + if aux_losses is None: + losses = [tf.constant(0.0, dtype=tf.float32)] + else: + losses = aux_losses + total_loss = tf.add_n(losses) + return total_loss + + def build_metrics(self, training: bool = True): + """Gets streaming metrics for training/validation.""" + del training + return [] + + def process_metrics(self, metrics, labels, model_outputs): + """Process and update metrics. Called when using custom training loop API. + + Args: + metrics: a nested structure of metrics objects. + The return of function self.build_metrics. + labels: a tensor or a nested structure of tensors. + model_outputs: a tensor or a nested structure of tensors. + For example, output of the keras model built by self.build_model. + """ + for metric in metrics: + metric.update_state(labels, model_outputs) + + def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): + """Process and update compiled_metrics. call when using compile/fit API. + + Args: + compiled_metrics: the compiled metrics (model.compiled_metrics). + labels: a tensor or a nested structure of tensors. + model_outputs: a tensor or a nested structure of tensors. + For example, output of the keras model built by self.build_model. + """ + compiled_metrics.update_state(labels, model_outputs) + + def train_step(self, + inputs, + model: tf.keras.Model, + optimizer: tf.keras.optimizers.Optimizer, + metrics=None): + """Does forward and backward. + + Args: + inputs: a dictionary of input tensors. + model: the model, forward pass definition. + optimizer: the optimizer for this training step. + metrics: a nested structure of metrics objects. + + Returns: + A dictionary of logs. + """ + if isinstance(inputs, tuple) and len(inputs) == 2: + features, labels = inputs + else: + features, labels = inputs, inputs + with tf.GradientTape() as tape: + outputs = model(features, training=True) + # Computes per-replica loss. + loss = self.build_losses( + labels=labels, model_outputs=outputs, aux_losses=model.losses) + # Scales loss as the default gradients allreduce performs sum inside the + # optimizer. + scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync + + # For mixed precision, when a LossScaleOptimizer is used, the loss is + # scaled to avoid numeric underflow. + if isinstance(optimizer, + tf.keras.mixed_precision.experimental.LossScaleOptimizer): + scaled_loss = optimizer.get_scaled_loss(scaled_loss) + + tvars = model.trainable_variables + grads = tape.gradient(scaled_loss, tvars) + + if isinstance(optimizer, + tf.keras.mixed_precision.experimental.LossScaleOptimizer): + grads = optimizer.get_unscaled_gradients(grads) + optimizer.apply_gradients(list(zip(grads, tvars))) + logs = {self.loss: loss} + if metrics: + self.process_metrics(metrics, labels, outputs) + logs.update({m.name: m.result() for m in metrics}) + elif model.compiled_metrics: + self.process_compiled_metrics(model.compiled_metrics, labels, outputs) + logs.update({m.name: m.result() for m in model.metrics}) + return logs + + def validation_step(self, inputs, model: tf.keras.Model, metrics=None): + """Validatation step. + + Args: + inputs: a dictionary of input tensors. + model: the keras.Model. + metrics: a nested structure of metrics objects. + + Returns: + A dictionary of logs. + """ + if isinstance(inputs, tuple) and len(inputs) == 2: + features, labels = inputs + else: + features, labels = inputs, inputs + outputs = self.inference_step(features, model) + loss = self.build_losses( + labels=labels, model_outputs=outputs, aux_losses=model.losses) + logs = {self.loss: loss} + if metrics: + self.process_metrics(metrics, labels, outputs) + logs.update({m.name: m.result() for m in metrics}) + elif model.compiled_metrics: + self.process_compiled_metrics(model.compiled_metrics, labels, outputs) + logs.update({m.name: m.result() for m in model.metrics}) + return logs + + def inference_step(self, inputs, model: tf.keras.Model): + """Performs the forward step.""" + return model(inputs, training=False) + + def aggregate_logs(self, state, step_logs): + """Optional aggregation over logs returned from a validation step.""" + pass + + def reduce_aggregated_logs(self, aggregated_logs): + """Optional reduce of aggregated logs over validation steps.""" + return {} + + +_REGISTERED_TASK_CLS = {} + + +# TODO(b/158268740): Move these outside the base class file. +# TODO(b/158741360): Add type annotations once pytype checks across modules. +def register_task_cls(task_config_cls): + """Decorates a factory of Tasks for lookup by a subclass of TaskConfig. + + This decorator supports registration of tasks as follows: + + ``` + @dataclasses.dataclass + class MyTaskConfig(TaskConfig): + # Add fields here. + pass + + @register_task_cls(MyTaskConfig) + class MyTask(Task): + # Inherits def __init__(self, task_config). + pass + + my_task_config = MyTaskConfig() + my_task = get_task(my_task_config) # Returns MyTask(my_task_config). + ``` + + Besisdes a class itself, other callables that create a Task from a TaskConfig + can be decorated by the result of this function, as long as there is at most + one registration for each config class. + + Args: + task_config_cls: a subclass of TaskConfig (*not* an instance of TaskConfig). + Each task_config_cls can only be used for a single registration. + + Returns: + A callable for use as class decorator that registers the decorated class + for creation from an instance of task_config_cls. + """ + return registry.register(_REGISTERED_TASK_CLS, task_config_cls) + + +# The user-visible get_task() is defined after classes have been registered. +# TODO(b/158741360): Add type annotations once pytype checks across modules. +def get_task_cls(task_config_cls): + task_cls = registry.lookup(_REGISTERED_TASK_CLS, task_config_cls) + return task_cls diff --git a/models/official/core/input_reader.py b/models/official/core/input_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..52f6e84e4bd02d4178586556ca191912de18fc18 --- /dev/null +++ b/models/official/core/input_reader.py @@ -0,0 +1,223 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A common dataset reader.""" + +from typing import Any, Callable, List, Optional + +import tensorflow as tf +import tensorflow_datasets as tfds + +from official.modeling.hyperparams import config_definitions as cfg + + +class InputReader: + """Input reader that returns a tf.data.Dataset instance.""" + + def __init__(self, + params: cfg.DataConfig, + shards: Optional[List[str]] = None, + dataset_fn=tf.data.TFRecordDataset, + decoder_fn: Optional[Callable[..., Any]] = None, + parser_fn: Optional[Callable[..., Any]] = None, + dataset_transform_fn: Optional[Callable[[tf.data.Dataset], + tf.data.Dataset]] = None, + postprocess_fn: Optional[Callable[..., Any]] = None): + """Initializes an InputReader instance. + + Args: + params: A config_definitions.DataConfig object. + shards: A list of files to be read. If given, read from these files. + Otherwise, read from params.input_path. + dataset_fn: A `tf.data.Dataset` that consumes the input files. For + example, it can be `tf.data.TFRecordDataset`. + decoder_fn: An optional `callable` that takes the serialized data string + and decodes them into the raw tensor dictionary. + parser_fn: An optional `callable` that takes the decoded raw tensors dict + and parse them into a dictionary of tensors that can be consumed by the + model. It will be executed after decoder_fn. + dataset_transform_fn: An optional `callable` that takes a + `tf.data.Dataset` object and returns a `tf.data.Dataset`. It will be + executed after parser_fn. + postprocess_fn: A optional `callable` that processes batched tensors. It + will be executed after batching. + """ + if params.input_path and params.tfds_name: + raise ValueError('At most one of `input_path` and `tfds_name` can be ' + 'specified, but got %s and %s.' % ( + params.input_path, params.tfds_name)) + self._shards = shards + self._tfds_builder = None + if self._shards: + self._num_files = len(self._shards) + elif not params.tfds_name: + self._input_patterns = params.input_path.strip().split(',') + self._num_files = 0 + for input_pattern in self._input_patterns: + input_pattern = input_pattern.strip() + if not input_pattern: + continue + matched_files = tf.io.gfile.glob(input_pattern) + if not matched_files: + raise ValueError('%s does not match any files.' % input_pattern) + else: + self._num_files += len(matched_files) + if self._num_files == 0: + raise ValueError('%s does not match any files.' % params.input_path) + else: + if not params.tfds_split: + raise ValueError( + '`tfds_name` is %s, but `tfds_split` is not specified.' % + params.tfds_name) + self._tfds_builder = tfds.builder( + params.tfds_name, data_dir=params.tfds_data_dir) + + self._global_batch_size = params.global_batch_size + self._is_training = params.is_training + self._drop_remainder = params.drop_remainder + self._shuffle_buffer_size = params.shuffle_buffer_size + self._cache = params.cache + self._cycle_length = params.cycle_length + self._block_length = params.block_length + self._sharding = params.sharding + self._examples_consume = params.examples_consume + self._tfds_split = params.tfds_split + self._tfds_download = params.tfds_download + self._tfds_as_supervised = params.tfds_as_supervised + self._tfds_skip_decoding_feature = params.tfds_skip_decoding_feature + + self._dataset_fn = dataset_fn + self._decoder_fn = decoder_fn + self._parser_fn = parser_fn + self._dataset_transform_fn = dataset_transform_fn + self._postprocess_fn = postprocess_fn + + def _read_sharded_files( + self, + input_context: Optional[tf.distribute.InputContext] = None): + """Reads a dataset from sharded files.""" + # Read from `self._shards` if it is provided. + if self._shards: + dataset = tf.data.Dataset.from_tensor_slices(self._shards) + else: + dataset = tf.data.Dataset.list_files( + self._input_patterns, shuffle=self._is_training) + if self._sharding and input_context and ( + input_context.num_input_pipelines > 1): + dataset = dataset.shard(input_context.num_input_pipelines, + input_context.input_pipeline_id) + if self._is_training: + dataset = dataset.repeat() + + dataset = dataset.interleave( + map_func=self._dataset_fn, + cycle_length=self._cycle_length, + block_length=self._block_length, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + def _read_single_file( + self, + input_context: Optional[tf.distribute.InputContext] = None): + """Reads a dataset from a single file.""" + # Read from `self._shards` if it is provided. + dataset = self._dataset_fn(self._shards or self._input_patterns) + + # When `input_file` is a path to a single file, disable auto sharding + # so that same input file is sent to all workers. + options = tf.data.Options() + options.experimental_distribute.auto_shard_policy = ( + tf.data.experimental.AutoShardPolicy.OFF) + dataset = dataset.with_options(options) + if self._sharding and input_context and ( + input_context.num_input_pipelines > 1): + dataset = dataset.shard(input_context.num_input_pipelines, + input_context.input_pipeline_id) + if self._is_training: + dataset = dataset.repeat() + return dataset + + def _read_tfds( + self, + input_context: Optional[tf.distribute.InputContext] = None + ) -> tf.data.Dataset: + """Reads a dataset from tfds.""" + if self._tfds_download: + self._tfds_builder.download_and_prepare() + + read_config = tfds.ReadConfig( + interleave_cycle_length=self._cycle_length, + interleave_block_length=self._block_length, + input_context=input_context) + decoders = {} + if self._tfds_skip_decoding_feature: + for skip_feature in self._tfds_skip_decoding_feature.split(','): + decoders[skip_feature.strip()] = tfds.decode.SkipDecoding() + dataset = self._tfds_builder.as_dataset( + split=self._tfds_split, + shuffle_files=self._is_training, + as_supervised=self._tfds_as_supervised, + decoders=decoders, + read_config=read_config) + return dataset + + @property + def tfds_info(self) -> tfds.core.DatasetInfo: + """Returns TFDS dataset info, if available.""" + if self._tfds_builder: + return self._tfds_builder.info + else: + raise ValueError('tfds_info is not available, because the dataset ' + 'is not loaded from tfds.') + + def read( + self, + input_context: Optional[tf.distribute.InputContext] = None + ) -> tf.data.Dataset: + """Generates a tf.data.Dataset object.""" + if self._tfds_builder: + dataset = self._read_tfds(input_context) + elif self._num_files > 1: + dataset = self._read_sharded_files(input_context) + else: + assert self._num_files == 1 + dataset = self._read_single_file(input_context) + + if self._cache: + dataset = dataset.cache() + + if self._is_training: + dataset = dataset.shuffle(self._shuffle_buffer_size) + + if self._examples_consume > 0: + dataset = dataset.take(self._examples_consume) + + def maybe_map_fn(dataset, fn): + return dataset if fn is None else dataset.map( + fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + + dataset = maybe_map_fn(dataset, self._decoder_fn) + dataset = maybe_map_fn(dataset, self._parser_fn) + + if self._dataset_transform_fn is not None: + dataset = self._dataset_transform_fn(dataset) + + per_replica_batch_size = input_context.get_per_replica_batch_size( + self._global_batch_size) if input_context else self._global_batch_size + + dataset = dataset.batch( + per_replica_batch_size, drop_remainder=self._drop_remainder) + dataset = maybe_map_fn(dataset, self._postprocess_fn) + return dataset.prefetch(tf.data.experimental.AUTOTUNE) diff --git a/models/official/modeling/__init__.py b/models/official/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/modeling/activations/__init__.py b/models/official/modeling/activations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b558fef3cb276c61e58d93c219db6a899c107ef --- /dev/null +++ b/models/official/modeling/activations/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Activations package definition.""" +from official.modeling.activations.gelu import gelu +from official.modeling.activations.swish import hard_swish +from official.modeling.activations.swish import identity +from official.modeling.activations.swish import simple_swish diff --git a/models/official/modeling/activations/gelu.py b/models/official/modeling/activations/gelu.py new file mode 100644 index 0000000000000000000000000000000000000000..c045bffa95b29e069831b548701b76d1b8e76c0d --- /dev/null +++ b/models/official/modeling/activations/gelu.py @@ -0,0 +1,40 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gaussian error linear unit.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package='Text') +def gelu(x): + """Gaussian Error Linear Unit. + + This is a smoother version of the RELU. + Original paper: https://arxiv.org/abs/1606.08415 + Args: + x: float Tensor to perform activation. + + Returns: + `x` with the GELU activation applied. + """ + cdf = 0.5 * (1.0 + tf.tanh( + (math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3))))) + return x * cdf diff --git a/models/official/modeling/activations/gelu_test.py b/models/official/modeling/activations/gelu_test.py new file mode 100644 index 0000000000000000000000000000000000000000..dc3b95ca8be16c058c592247684e45d419b50cc5 --- /dev/null +++ b/models/official/modeling/activations/gelu_test.py @@ -0,0 +1,38 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the Gaussian error linear unit.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.modeling import activations + + +@keras_parameterized.run_all_keras_modes +class GeluTest(keras_parameterized.TestCase): + + def test_gelu(self): + expected_data = [[0.14967535, 0., -0.10032465], + [-0.15880796, -0.04540223, 2.9963627]] + gelu_data = activations.gelu([[.25, 0, -.25], [-1, -2, 3]]) + self.assertAllClose(expected_data, gelu_data) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/activations/swish.py b/models/official/modeling/activations/swish.py new file mode 100644 index 0000000000000000000000000000000000000000..1d799613095efe1a16dade9673adddee05f2679d --- /dev/null +++ b/models/official/modeling/activations/swish.py @@ -0,0 +1,75 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Customized Swish activation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package='Text') +def simple_swish(features): + """Computes the Swish activation function. + + The tf.nn.swish operation uses a custom gradient to reduce memory usage. + Since saving custom gradients in SavedModel is currently not supported, and + one would not be able to use an exported TF-Hub module for fine-tuning, we + provide this wrapper that can allow to select whether to use the native + TensorFlow swish operation, or whether to use a customized operation that + has uses default TensorFlow gradient computation. + + Args: + features: A `Tensor` representing preactivation values. + + Returns: + The activation value. + """ + features = tf.convert_to_tensor(features) + return features * tf.nn.sigmoid(features) + + +@tf.keras.utils.register_keras_serializable(package='Text') +def hard_swish(features): + """Computes a hard version of the swish function. + + This operation can be used to reduce computational cost and improve + quantization for edge devices. + + Args: + features: A `Tensor` representing preactivation values. + + Returns: + The activation value. + """ + features = tf.convert_to_tensor(features) + return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.) + + +@tf.keras.utils.register_keras_serializable(package='Text') +def identity(features): + """Computes the identity function. + + Useful for helping in quantization. + + Args: + features: A `Tensor` representing preactivation values. + + Returns: + The activation value. + """ + features = tf.convert_to_tensor(features) + return tf.identity(features) diff --git a/models/official/modeling/activations/swish_test.py b/models/official/modeling/activations/swish_test.py new file mode 100644 index 0000000000000000000000000000000000000000..22042e9a290a420805fc75bbfca6ded6e917d9eb --- /dev/null +++ b/models/official/modeling/activations/swish_test.py @@ -0,0 +1,49 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the customized Swish activation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.modeling import activations + + +@keras_parameterized.run_all_keras_modes +class CustomizedSwishTest(keras_parameterized.TestCase): + + def _hard_swish_np(self, x): + x = np.float32(x) + return x * np.clip(x + 3, 0, 6) / 6 + + def test_simple_swish(self): + features = [[.25, 0, -.25], [-1, -2, 3]] + customized_swish_data = activations.simple_swish(features) + swish_data = tf.nn.swish(features) + self.assertAllClose(customized_swish_data, swish_data) + + def test_hard_swish(self): + features = [[.25, 0, -.25], [-1, -2, 3]] + customized_swish_data = activations.hard_swish(features) + swish_data = self._hard_swish_np(features) + self.assertAllClose(customized_swish_data, swish_data) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/hyperparams/__init__.py b/models/official/modeling/hyperparams/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87c00e7f2a1934800cac21405aa924f2ddc1f241 --- /dev/null +++ b/models/official/modeling/hyperparams/__init__.py @@ -0,0 +1,21 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hyperparams package definition.""" +# pylint: disable=g-multiple-import +from official.modeling.hyperparams.base_config import * +from official.modeling.hyperparams.oneof import * +from official.modeling.hyperparams.params_dict import * + diff --git a/models/official/modeling/hyperparams/base_config.py b/models/official/modeling/hyperparams/base_config.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce5ce2d55016dce0c985a0e6f9fe3893a25f644 --- /dev/null +++ b/models/official/modeling/hyperparams/base_config.py @@ -0,0 +1,248 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base configurations to standardize experiments.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import copy +import functools +from typing import Any, List, Mapping, Optional, Type + +import dataclasses +import tensorflow as tf +import yaml + +from official.modeling.hyperparams import params_dict + + +@dataclasses.dataclass +class Config(params_dict.ParamsDict): + """The base configuration class that supports YAML/JSON based overrides. + + * It recursively enforces a whitelist of basic types and container types, so + it avoids surprises with copy and reuse caused by unanticipated types. + * It converts dict to Config even within sequences, + e.g. for config = Config({'key': [([{'a': 42}],)]), + type(config.key[0][0][0]) is Config rather than dict. + """ + + # It's safe to add bytes and other immutable types here. + IMMUTABLE_TYPES = (str, int, float, bool, type(None)) + # It's safe to add set, frozenset and other collections here. + SEQUENCE_TYPES = (list, tuple) + + default_params: dataclasses.InitVar[Optional[Mapping[str, Any]]] = None + restrictions: dataclasses.InitVar[Optional[List[str]]] = None + + @classmethod + def _isvalidsequence(cls, v): + """Check if the input values are valid sequences. + + Args: + v: Input sequence. + + Returns: + True if the sequence is valid. Valid sequence includes the sequence + type in cls.SEQUENCE_TYPES and element type is in cls.IMMUTABLE_TYPES or + is dict or ParamsDict. + """ + if not isinstance(v, cls.SEQUENCE_TYPES): + return False + return (all(isinstance(e, cls.IMMUTABLE_TYPES) for e in v) or + all(isinstance(e, dict) for e in v) or + all(isinstance(e, params_dict.ParamsDict) for e in v)) + + @classmethod + def _import_config(cls, v, subconfig_type): + """Returns v with dicts converted to Configs, recursively.""" + if not issubclass(subconfig_type, params_dict.ParamsDict): + raise TypeError( + 'Subconfig_type should be subclass of ParamsDict, found {!r}'.format( + subconfig_type)) + if isinstance(v, cls.IMMUTABLE_TYPES): + return v + elif isinstance(v, cls.SEQUENCE_TYPES): + # Only support one layer of sequence. + if not cls._isvalidsequence(v): + raise TypeError( + 'Invalid sequence: only supports single level {!r} of {!r} or ' + 'dict or ParamsDict found: {!r}'.format(cls.SEQUENCE_TYPES, + cls.IMMUTABLE_TYPES, v)) + import_fn = functools.partial( + cls._import_config, subconfig_type=subconfig_type) + return type(v)(map(import_fn, v)) + elif isinstance(v, params_dict.ParamsDict): + # Deepcopy here is a temporary solution for preserving type in nested + # Config object. + return copy.deepcopy(v) + elif isinstance(v, dict): + return subconfig_type(v) + else: + raise TypeError('Unknown type: {!r}'.format(type(v))) + + @classmethod + def _export_config(cls, v): + """Returns v with Configs converted to dicts, recursively.""" + if isinstance(v, cls.IMMUTABLE_TYPES): + return v + elif isinstance(v, cls.SEQUENCE_TYPES): + return type(v)(map(cls._export_config, v)) + elif isinstance(v, params_dict.ParamsDict): + return v.as_dict() + elif isinstance(v, dict): + raise TypeError('dict value not supported in converting.') + else: + raise TypeError('Unknown type: {!r}'.format(type(v))) + + @classmethod + def _get_subconfig_type(cls, k) -> Type[params_dict.ParamsDict]: + """Get element type by the field name. + + Args: + k: the key/name of the field. + + Returns: + Config as default. If a type annotation is found for `k`, + 1) returns the type of the annotation if it is subtype of ParamsDict; + 2) returns the element type if the annotation of `k` is List[SubType] + or Tuple[SubType]. + """ + subconfig_type = Config + if k in cls.__annotations__: + # Directly Config subtype. + type_annotation = cls.__annotations__[k] + if (isinstance(type_annotation, type) and + issubclass(type_annotation, Config)): + subconfig_type = cls.__annotations__[k] + else: + # Check if the field is a sequence of subtypes. + field_type = getattr(type_annotation, '__origin__', type(None)) + if (isinstance(field_type, type) and + issubclass(field_type, cls.SEQUENCE_TYPES)): + element_type = getattr(type_annotation, '__args__', [type(None)])[0] + subconfig_type = ( + element_type if issubclass(element_type, params_dict.ParamsDict) + else subconfig_type) + return subconfig_type + + def __post_init__(self, default_params, restrictions, *args, **kwargs): + super().__init__(default_params=default_params, + restrictions=restrictions, + *args, + **kwargs) + + def _set(self, k, v): + """Overrides same method in ParamsDict. + + Also called by ParamsDict methods. + + Args: + k: key to set. + v: value. + + Raises: + RuntimeError + """ + subconfig_type = self._get_subconfig_type(k) + if isinstance(v, dict): + if k not in self.__dict__ or not self.__dict__[k]: + # If the key not exist or the value is None, a new Config-family object + # sould be created for the key. + self.__dict__[k] = subconfig_type(v) + else: + self.__dict__[k].override(v) + else: + self.__dict__[k] = self._import_config(v, subconfig_type) + + def __setattr__(self, k, v): + if k not in self.RESERVED_ATTR: + if getattr(self, '_locked', False): + raise ValueError('The Config has been locked. ' 'No change is allowed.') + self._set(k, v) + + def _override(self, override_dict, is_strict=True): + """Overrides same method in ParamsDict. + + Also called by ParamsDict methods. + + Args: + override_dict: dictionary to write to . + is_strict: If True, not allows to add new keys. + + Raises: + KeyError: overriding reserved keys or keys not exist (is_strict=True). + """ + for k, v in sorted(override_dict.items()): + if k in self.RESERVED_ATTR: + raise KeyError('The key {!r} is internally reserved. ' + 'Can not be overridden.'.format(k)) + if k not in self.__dict__: + if is_strict: + raise KeyError('The key {!r} does not exist in {!r}. ' + 'To extend the existing keys, use ' + '`override` with `is_strict` = False.'.format( + k, type(self))) + else: + self._set(k, v) + else: + if isinstance(v, dict) and self.__dict__[k]: + self.__dict__[k]._override(v, is_strict) # pylint: disable=protected-access + elif isinstance(v, params_dict.ParamsDict) and self.__dict__[k]: + self.__dict__[k]._override(v.as_dict(), is_strict) # pylint: disable=protected-access + else: + self._set(k, v) + + def as_dict(self): + """Returns a dict representation of params_dict.ParamsDict. + + For the nested params_dict.ParamsDict, a nested dict will be returned. + """ + return { + k: self._export_config(v) + for k, v in self.__dict__.items() + if k not in self.RESERVED_ATTR + } + + def replace(self, **kwargs): + """Like `override`, but returns a copy with the current config unchanged.""" + params = self.__class__(self) + params.override(kwargs, is_strict=True) + return params + + @classmethod + def from_yaml(cls, file_path: str): + # Note: This only works if the Config has all default values. + with tf.io.gfile.GFile(file_path, 'r') as f: + loaded = yaml.load(f) + config = cls() + config.override(loaded) + return config + + @classmethod + def from_json(cls, file_path: str): + """Wrapper for `from_yaml`.""" + return cls.from_yaml(file_path) + + @classmethod + def from_args(cls, *args, **kwargs): + """Builds a config from the given list of arguments.""" + attributes = list(cls.__annotations__.keys()) + default_params = {a: p for a, p in zip(attributes, args)} + default_params.update(kwargs) + return cls(default_params) diff --git a/models/official/modeling/hyperparams/base_config_test.py b/models/official/modeling/hyperparams/base_config_test.py new file mode 100644 index 0000000000000000000000000000000000000000..501f95899f526c8eab7cbfaaafb65433389ce0d8 --- /dev/null +++ b/models/official/modeling/hyperparams/base_config_test.py @@ -0,0 +1,299 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import pprint +from typing import List, Tuple + +from absl.testing import parameterized +import dataclasses +import tensorflow as tf +from official.modeling.hyperparams import base_config + + +@dataclasses.dataclass +class DumpConfig1(base_config.Config): + a: int = 1 + b: str = 'text' + + +@dataclasses.dataclass +class DumpConfig2(base_config.Config): + c: int = 2 + d: str = 'text' + e: DumpConfig1 = DumpConfig1() + + +@dataclasses.dataclass +class DumpConfig3(DumpConfig2): + f: int = 2 + g: str = 'text' + h: List[DumpConfig1] = dataclasses.field( + default_factory=lambda: [DumpConfig1(), DumpConfig1()]) + g: Tuple[DumpConfig1, ...] = (DumpConfig1(),) + + +class BaseConfigTest(parameterized.TestCase, tf.test.TestCase): + + def assertHasSameTypes(self, c, d, msg=''): + """Checks if a Config has the same structure as a given dict. + + Args: + c: the Config object to be check. + d: the reference dict object. + msg: The error message to show when type mismatched. + """ + # Make sure d is not a Config. Assume d is either + # dictionary or primitive type and c is the Config or primitive types. + self.assertNotIsInstance(d, base_config.Config) + if isinstance(d, base_config.Config.IMMUTABLE_TYPES): + self.assertEqual(pprint.pformat(c), pprint.pformat(d), msg=msg) + elif isinstance(d, base_config.Config.SEQUENCE_TYPES): + self.assertEqual(type(c), type(d), msg=msg) + for i, v in enumerate(d): + self.assertHasSameTypes(c[i], v, msg='{}[{!r}]'.format(msg, i)) + elif isinstance(d, dict): + self.assertIsInstance(c, base_config.Config, msg=msg) + for k, v in sorted(d.items()): + self.assertHasSameTypes(getattr(c, k), v, msg='{}[{!r}]'.format(msg, k)) + else: + raise TypeError('Unknown type: %r' % type(d)) + + def assertImportExport(self, v): + config = base_config.Config({'key': v}) + back = config.as_dict()['key'] + self.assertEqual(pprint.pformat(back), pprint.pformat(v)) + self.assertHasSameTypes(config.key, v, msg='=%s v' % pprint.pformat(v)) + + def test_invalid_keys(self): + params = base_config.Config() + with self.assertRaises(AttributeError): + _ = params.a + + def test_nested_config_types(self): + config = DumpConfig3() + self.assertIsInstance(config.e, DumpConfig1) + self.assertIsInstance(config.h[0], DumpConfig1) + self.assertIsInstance(config.h[1], DumpConfig1) + self.assertIsInstance(config.g[0], DumpConfig1) + + config.override({'e': {'a': 2, 'b': 'new text'}}) + self.assertIsInstance(config.e, DumpConfig1) + self.assertEqual(config.e.a, 2) + self.assertEqual(config.e.b, 'new text') + + config.override({'h': [{'a': 3, 'b': 'new text 2'}]}) + self.assertIsInstance(config.h[0], DumpConfig1) + self.assertLen(config.h, 1) + self.assertEqual(config.h[0].a, 3) + self.assertEqual(config.h[0].b, 'new text 2') + + config.override({'g': [{'a': 4, 'b': 'new text 3'}]}) + self.assertIsInstance(config.g[0], DumpConfig1) + self.assertLen(config.g, 1) + self.assertEqual(config.g[0].a, 4) + self.assertEqual(config.g[0].b, 'new text 3') + + @parameterized.parameters( + ('_locked', "The key '_locked' is internally reserved."), + ('_restrictions', "The key '_restrictions' is internally reserved."), + ('aa', "The key 'aa' does not exist."), + ) + def test_key_error(self, key, msg): + params = base_config.Config() + with self.assertRaisesRegex(KeyError, msg): + params.override({key: True}) + + @parameterized.parameters( + ('str data',), + (123,), + (1.23,), + (None,), + (['str', 1, 2.3, None],), + (('str', 1, 2.3, None),), + ) + def test_import_export_immutable_types(self, v): + self.assertImportExport(v) + out = base_config.Config({'key': v}) + self.assertEqual(pprint.pformat(v), pprint.pformat(out.key)) + + def test_override_is_strict_true(self): + params = base_config.Config({ + 'a': 'aa', + 'b': 2, + 'c': { + 'c1': 'cc', + 'c2': 20 + } + }) + params.override({'a': 2, 'c': {'c1': 'ccc'}}, is_strict=True) + self.assertEqual(params.a, 2) + self.assertEqual(params.c.c1, 'ccc') + with self.assertRaises(KeyError): + params.override({'d': 'ddd'}, is_strict=True) + with self.assertRaises(KeyError): + params.override({'c': {'c3': 30}}, is_strict=True) + + config = base_config.Config({'key': [{'a': 42}]}) + config.override({'key': [{'b': 43}]}) + self.assertEqual(config.key[0].b, 43) + with self.assertRaisesRegex(AttributeError, 'The key `a` does not exist'): + _ = config.key[0].a + + @parameterized.parameters( + (lambda x: x, 'Unknown type'), + (object(), 'Unknown type'), + (set(), 'Unknown type'), + (frozenset(), 'Unknown type'), + ) + def test_import_unsupport_types(self, v, msg): + with self.assertRaisesRegex(TypeError, msg): + _ = base_config.Config({'key': v}) + + @parameterized.parameters( + ({ + 'a': [{ + 'b': 2, + }, { + 'c': 3, + }] + },), + ({ + 'c': [{ + 'f': 1.1, + }, { + 'h': [1, 2], + }] + },), + (({ + 'a': 'aa', + 'b': 2, + 'c': { + 'c1': 10, + 'c2': 20, + } + },),), + ) + def test_import_export_nested_structure(self, d): + self.assertImportExport(d) + + @parameterized.parameters( + ([{ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + }],), + (({ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + },),), + ) + def test_import_export_nested_sequences(self, v): + self.assertImportExport(v) + + @parameterized.parameters( + ([([{}],)],), + ([['str', 1, 2.3, None]],), + ((('str', 1, 2.3, None),),), + ([ + ('str', 1, 2.3, None), + ],), + ([ + ('str', 1, 2.3, None), + ],), + ([[{ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + }]],), + ([[[{ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + }]]],), + ((({ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + },),),), + (((({ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + },),),),), + ([({ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + },)],), + (([{ + 'a': 42, + 'b': 'hello', + 'c': 1.2 + }],),), + ) + def test_import_export_unsupport_sequence(self, v): + with self.assertRaisesRegex(TypeError, + 'Invalid sequence: only supports single level'): + _ = base_config.Config({'key': v}) + + def test_construct_subtype(self): + pass + + def test_import_config(self): + params = base_config.Config({'a': [{'b': 2}, {'c': {'d': 3}}]}) + self.assertLen(params.a, 2) + self.assertEqual(params.a[0].b, 2) + self.assertEqual(type(params.a[0]), base_config.Config) + self.assertEqual(pprint.pformat(params.a[0].b), '2') + self.assertEqual(type(params.a[1]), base_config.Config) + self.assertEqual(type(params.a[1].c), base_config.Config) + self.assertEqual(pprint.pformat(params.a[1].c.d), '3') + + def test_override(self): + params = base_config.Config({'a': [{'b': 2}, {'c': {'d': 3}}]}) + params.override({'a': [{'b': 4}, {'c': {'d': 5}}]}, is_strict=False) + self.assertEqual(type(params.a), list) + self.assertEqual(type(params.a[0]), base_config.Config) + self.assertEqual(pprint.pformat(params.a[0].b), '4') + self.assertEqual(type(params.a[1]), base_config.Config) + self.assertEqual(type(params.a[1].c), base_config.Config) + self.assertEqual(pprint.pformat(params.a[1].c.d), '5') + + @parameterized.parameters( + ([{}],), + (({},),), + ) + def test_config_vs_params_dict(self, v): + d = {'key': v} + self.assertEqual(type(base_config.Config(d).key[0]), base_config.Config) + self.assertEqual(type(base_config.params_dict.ParamsDict(d).key[0]), dict) + + def test_ppformat(self): + self.assertEqual( + pprint.pformat([ + 's', 1, 1.0, True, None, {}, [], (), { + (2,): (3, [4], { + 6: 7, + }), + 8: 9, + } + ]), + "['s', 1, 1.0, True, None, {}, [], (), {8: 9, (2,): (3, [4], {6: 7})}]") + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/hyperparams/config_definitions.py b/models/official/modeling/hyperparams/config_definitions.py new file mode 100644 index 0000000000000000000000000000000000000000..78180cd8a01a09a9d646c04eb05742bafce5bf42 --- /dev/null +++ b/models/official/modeling/hyperparams/config_definitions.py @@ -0,0 +1,220 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common configuration settings.""" +from typing import Optional, Union + +import dataclasses + +from official.modeling.hyperparams import base_config +from official.modeling.optimization.configs import optimization_config +from official.utils import registry + +OptimizationConfig = optimization_config.OptimizationConfig + + +@dataclasses.dataclass +class DataConfig(base_config.Config): + """The base configuration for building datasets. + + Attributes: + input_path: The path to the input. It can be either (1) a file pattern, or + (2) multiple file patterns separated by comma. It should not be specified + when the following `tfds_name` is specified. + tfds_name: The name of the tensorflow dataset (TFDS). It should not be + specified when the above `input_path` is specified. + tfds_split: A str indicating which split of the data to load from TFDS. It + is required when above `tfds_name` is specified. + global_batch_size: The global batch size across all replicas. + is_training: Whether this data is used for training or not. + drop_remainder: Whether the last batch should be dropped in the case it has + fewer than `global_batch_size` elements. + shuffle_buffer_size: The buffer size used for shuffling training data. + cache: Whether to cache dataset examples. Can be used to avoid re-reading + from disk on the second epoch. Requires significant memory overhead. + cycle_length: The number of files that will be processed concurrently when + interleaving files. + block_length: The number of consecutive elements to produce from each input + element before cycling to another input element when interleaving files. + sharding: Whether sharding is used in the input pipeline. + examples_consume: An `integer` specifying the number of examples it will + produce. If positive, it only takes this number of examples and raises + tf.error.OutOfRangeError after that. Default is -1, meaning it will + exhaust all the examples in the dataset. + tfds_data_dir: A str specifying the directory to read/write TFDS data. + tfds_download: A bool to indicate whether to download data using TFDS. + tfds_as_supervised: A bool. When loading dataset from TFDS, if True, + the returned tf.data.Dataset will have a 2-tuple structure (input, label) + according to builder.info.supervised_keys; if False, the default, + the returned tf.data.Dataset will have a dictionary with all the features. + tfds_skip_decoding_feature: A str to indicate which features are skipped + for decoding when loading dataset from TFDS. Use comma to separate + multiple features. The main use case is to skip the image/video decoding + for better performance. + """ + input_path: str = "" + tfds_name: str = "" + tfds_split: str = "" + global_batch_size: int = 0 + is_training: bool = None + drop_remainder: bool = True + shuffle_buffer_size: int = 100 + cache: bool = False + cycle_length: int = 8 + block_length: int = 1 + sharding: bool = True + examples_consume: int = -1 + tfds_data_dir: str = "" + tfds_download: bool = False + tfds_as_supervised: bool = False + tfds_skip_decoding_feature: str = "" + + +@dataclasses.dataclass +class RuntimeConfig(base_config.Config): + """High-level configurations for Runtime. + + These include parameters that are not directly related to the experiment, + e.g. directories, accelerator type, etc. + + Attributes: + distribution_strategy: e.g. 'mirrored', 'tpu', etc. + enable_xla: Whether or not to enable XLA. + per_gpu_thread_count: thread count per GPU. + gpu_thread_mode: Whether and how the GPU device uses its own threadpool. + dataset_num_private_threads: Number of threads for a private threadpool + created for all datasets computation. + tpu: The address of the TPU to use, if any. + num_gpus: The number of GPUs to use, if any. + worker_hosts: comma-separated list of worker ip:port pairs for running + multi-worker models with DistributionStrategy. + task_index: If multi-worker training, the task index of this worker. + all_reduce_alg: Defines the algorithm for performing all-reduce. + num_packs: Sets `num_packs` in the cross device ops used in + MirroredStrategy. For details, see tf.distribute.NcclAllReduce. + mixed_precision_dtype: dtype of mixed precision policy. It can be 'float32', + 'float16', or 'bfloat16'. + loss_scale: The type of loss scale, or 'float' value. This is used when + setting the mixed precision policy. + run_eagerly: Whether or not to run the experiment eagerly. + batchnorm_spatial_persistent: Whether or not to enable the spatial + persistent mode for CuDNN batch norm kernel for improved GPU performance. + """ + distribution_strategy: str = "mirrored" + enable_xla: bool = False + gpu_thread_mode: Optional[str] = None + dataset_num_private_threads: Optional[int] = None + per_gpu_thread_count: int = 0 + tpu: Optional[str] = None + num_gpus: int = 0 + worker_hosts: Optional[str] = None + task_index: int = -1 + all_reduce_alg: Optional[str] = None + num_packs: int = 1 + loss_scale: Optional[Union[str, float]] = None + mixed_precision_dtype: Optional[str] = None + run_eagerly: bool = False + batchnorm_spatial_persistent: bool = False + + +@dataclasses.dataclass +class TensorboardConfig(base_config.Config): + """Configuration for Tensorboard. + + Attributes: + track_lr: Whether or not to track the learning rate in Tensorboard. Defaults + to True. + write_model_weights: Whether or not to write the model weights as images in + Tensorboard. Defaults to False. + """ + track_lr: bool = True + write_model_weights: bool = False + + +@dataclasses.dataclass +class CallbacksConfig(base_config.Config): + """Configuration for Callbacks. + + Attributes: + enable_checkpoint_and_export: Whether or not to enable checkpoints as a + Callback. Defaults to True. + enable_tensorboard: Whether or not to enable Tensorboard as a Callback. + Defaults to True. + enable_time_history: Whether or not to enable TimeHistory Callbacks. + Defaults to True. + """ + enable_checkpoint_and_export: bool = True + enable_tensorboard: bool = True + enable_time_history: bool = True + + +@dataclasses.dataclass +class TrainerConfig(base_config.Config): + """Configuration for trainer. + + Attributes: + optimizer_config: optimizer config, it includes optimizer, learning rate, + and warmup schedule configs. + train_tf_while_loop: whether or not to use tf while loop. + train_tf_function: whether or not to use tf_function for training loop. + eval_tf_function: whether or not to use tf_function for eval. + steps_per_loop: number of steps per loop. + summary_interval: number of steps between each summary. + checkpoint_intervals: number of steps between checkpoints. + max_to_keep: max checkpoints to keep. + continuous_eval_timeout: maximum number of seconds to wait between + checkpoints, if set to None, continuous eval will wait indefinetely. + """ + optimizer_config: OptimizationConfig = OptimizationConfig() + train_tf_while_loop: bool = True + train_tf_function: bool = True + eval_tf_function: bool = True + steps_per_loop: int = 1000 + summary_interval: int = 1000 + checkpoint_interval: int = 1000 + max_to_keep: int = 5 + continuous_eval_timeout: Optional[int] = None + + +@dataclasses.dataclass +class TaskConfig(base_config.Config): + network: base_config.Config = None + train_data: DataConfig = DataConfig() + validation_data: DataConfig = DataConfig() + + +@dataclasses.dataclass +class ExperimentConfig(base_config.Config): + """Top-level configuration.""" + task: TaskConfig = TaskConfig() + trainer: TrainerConfig = TrainerConfig() + runtime: RuntimeConfig = RuntimeConfig() + train_steps: int = 0 + validation_steps: Optional[int] = None + validation_interval: int = 100 + + +_REGISTERED_CONFIGS = {} + + +def register_config_factory(name): + """Register ExperimentConfig factory method.""" + return registry.register(_REGISTERED_CONFIGS, name) + + +def get_exp_config_creater(exp_name: str): + """Looks up ExperimentConfig factory methods.""" + exp_creater = registry.lookup(_REGISTERED_CONFIGS, exp_name) + return exp_creater diff --git a/models/official/modeling/hyperparams/oneof.py b/models/official/modeling/hyperparams/oneof.py new file mode 100644 index 0000000000000000000000000000000000000000..cd49218c137d17e7917e16f8f2eb0b73a8a6a392 --- /dev/null +++ b/models/official/modeling/hyperparams/oneof.py @@ -0,0 +1,62 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Config class that supports oneof functionality.""" + +from typing import Optional + +import dataclasses +from official.modeling.hyperparams import base_config + + +@dataclasses.dataclass +class OneOfConfig(base_config.Config): + """Configuration for configs with one of feature. + + Attributes: + type: 'str', name of the field to select. + """ + type: Optional[str] = None + + def as_dict(self): + """Returns a dict representation of OneOfConfig. + + For the nested base_config.Config, a nested dict will be returned. + """ + if self.type is None: + return {'type': None} + elif self.__dict__['type'] not in self.__dict__: + raise ValueError( + 'type: {!r} is not a valid key!'.format(self.__dict__['type'])) + else: + chosen_type = self.type + chosen_value = self.__dict__[chosen_type] + return { + 'type': self.type, + chosen_type: self._export_config(chosen_value) + } + + def get(self): + """Returns selected config based on the value of type. + + If type is not set (None), None is returned. + """ + chosen_type = self.type + if chosen_type is None: + return None + if chosen_type not in self.__dict__: + raise ValueError( + 'type: {!r} is not a valid key!'.format(self.type)) + return self.__dict__[chosen_type] diff --git a/models/official/modeling/hyperparams/oneof_test.py b/models/official/modeling/hyperparams/oneof_test.py new file mode 100644 index 0000000000000000000000000000000000000000..abd6564c17efbad9987eea6a0e91261afc34f3f3 --- /dev/null +++ b/models/official/modeling/hyperparams/oneof_test.py @@ -0,0 +1,67 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import dataclasses +import tensorflow as tf +from official.modeling.hyperparams import base_config +from official.modeling.hyperparams import oneof + + +@dataclasses.dataclass +class ResNet(base_config.Config): + model_depth: int = 50 + + +@dataclasses.dataclass +class Backbone(oneof.OneOfConfig): + type: str = 'resnet' + resnet: ResNet = ResNet() + not_resnet: int = 2 + + +@dataclasses.dataclass +class OutputLayer(oneof.OneOfConfig): + type: str = 'single' + single: int = 1 + multi_head: int = 2 + + +@dataclasses.dataclass +class Network(base_config.Config): + backbone: Backbone = Backbone() + output_layer: OutputLayer = OutputLayer() + + +class OneOfTest(tf.test.TestCase): + + def test_to_dict(self): + network_params = {'backbone': {'type': 'resnet', + 'resnet': {'model_depth': 50} + }, + 'output_layer': {'type': 'single', + 'single': 1000} + } + network_config = Network(network_params) + self.assertEqual(network_config.as_dict(), network_params) + + def test_get_oneof(self): + backbone = Backbone() + self.assertIsInstance(backbone.get(), ResNet) + self.assertEqual(backbone.get().as_dict(), {'model_depth': 50}) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/hyperparams/params_dict.py b/models/official/modeling/hyperparams/params_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..88510e770a2021f0e7f65bfbf2ae6a2d3480de17 --- /dev/null +++ b/models/official/modeling/hyperparams/params_dict.py @@ -0,0 +1,439 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A parameter dictionary class which supports the nest structure.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import copy +import re + +import six +import tensorflow as tf +import yaml + +# regex pattern that matches on key-value pairs in a comma-separated +# key-value pair string. It splits each k-v pair on the = sign, and +# matches on values that are within single quotes, double quotes, single +# values (e.g. floats, ints, etc.), and a lists within brackets. +_PARAM_RE = re.compile(r""" + (?P[a-zA-Z][\w\.]*) # variable name: "var" or "x" + \s*=\s* + ((?P\'(.*?)\' # single quote + | + \"(.*?)\" # double quote + | + [^,\[]* # single value + | + \[[^\]]*\])) # list of values + ($|,\s*)""", re.VERBOSE) + +_CONST_VALUE_RE = re.compile(r'(\d.*|-\d.*|None)') + + +class ParamsDict(object): + """A hyperparameter container class.""" + + RESERVED_ATTR = ['_locked', '_restrictions'] + + def __init__(self, default_params=None, restrictions=None): + """Instantiate a ParamsDict. + + Instantiate a ParamsDict given a set of default parameters and a list of + restrictions. Upon initialization, it validates itself by checking all the + defined restrictions, and raise error if it finds inconsistency. + + Args: + default_params: a Python dict or another ParamsDict object including the + default parameters to initialize. + restrictions: a list of strings, which define a list of restrictions to + ensure the consistency of different parameters internally. Each + restriction string is defined as a binary relation with a set of + operators, including {'==', '!=', '<', '<=', '>', '>='}. + """ + self._locked = False + self._restrictions = [] + if restrictions: + self._restrictions = restrictions + if default_params is None: + default_params = {} + self.override(default_params, is_strict=False) + self.validate() + + def _set(self, k, v): + if isinstance(v, dict): + self.__dict__[k] = ParamsDict(v) + else: + self.__dict__[k] = copy.deepcopy(v) + + def __setattr__(self, k, v): + """Sets the value of the existing key. + + Note that this does not allow directly defining a new key. Use the + `override` method with `is_strict=False` instead. + + Args: + k: the key string. + v: the value to be used to set the key `k`. + + Raises: + KeyError: if k is not defined in the ParamsDict. + """ + if k not in ParamsDict.RESERVED_ATTR: + if k not in self.__dict__.keys(): + raise KeyError('The key `%{}` does not exist. ' + 'To extend the existing keys, use ' + '`override` with `is_strict` = True.'.format(k)) + if self._locked: + raise ValueError('The ParamsDict has been locked. ' + 'No change is allowed.') + self._set(k, v) + + def __getattr__(self, k): + """Gets the value of the existing key. + + Args: + k: the key string. + + Returns: + the value of the key. + + Raises: + AttributeError: if k is not defined in the ParamsDict. + """ + if k not in self.__dict__.keys(): + raise AttributeError('The key `{}` does not exist. '.format(k)) + return self.__dict__[k] + + def __contains__(self, key): + """Implements the membership test operator.""" + return key in self.__dict__ + + def get(self, key, value=None): + """Accesses through built-in dictionary get method.""" + return self.__dict__.get(key, value) + + def __delattr__(self, k): + """Deletes the key and removes its values. + + Args: + k: the key string. + + Raises: + AttributeError: if k is reserverd or not defined in the ParamsDict. + ValueError: if the ParamsDict instance has been locked. + """ + if k in ParamsDict.RESERVED_ATTR: + raise AttributeError('The key `{}` is reserved. No change is allowes. ' + .format(k)) + if k not in self.__dict__.keys(): + raise AttributeError('The key `{}` does not exist. '.format(k)) + if self._locked: + raise ValueError('The ParamsDict has been locked. No change is allowed.') + del self.__dict__[k] + + def override(self, override_params, is_strict=True): + """Override the ParamsDict with a set of given params. + + Args: + override_params: a dict or a ParamsDict specifying the parameters to + be overridden. + is_strict: a boolean specifying whether override is strict or not. If + True, keys in `override_params` must be present in the ParamsDict. + If False, keys in `override_params` can be different from what is + currently defined in the ParamsDict. In this case, the ParamsDict will + be extended to include the new keys. + """ + if self._locked: + raise ValueError('The ParamsDict has been locked. No change is allowed.') + if isinstance(override_params, ParamsDict): + override_params = override_params.as_dict() + self._override(override_params, is_strict) # pylint: disable=protected-access + + def _override(self, override_dict, is_strict=True): + """The implementation of `override`.""" + for k, v in six.iteritems(override_dict): + if k in ParamsDict.RESERVED_ATTR: + raise KeyError('The key `%{}` is internally reserved. ' + 'Can not be overridden.') + if k not in self.__dict__.keys(): + if is_strict: + raise KeyError('The key `{}` does not exist. ' + 'To extend the existing keys, use ' + '`override` with `is_strict` = False.'.format(k)) + else: + self._set(k, v) + else: + if isinstance(v, dict): + self.__dict__[k]._override(v, is_strict) # pylint: disable=protected-access + elif isinstance(v, ParamsDict): + self.__dict__[k]._override(v.as_dict(), is_strict) # pylint: disable=protected-access + else: + self.__dict__[k] = copy.deepcopy(v) + + def lock(self): + """Makes the ParamsDict immutable.""" + self._locked = True + + def as_dict(self): + """Returns a dict representation of ParamsDict. + + For the nested ParamsDict, a nested dict will be returned. + """ + params_dict = {} + for k, v in six.iteritems(self.__dict__): + if k not in ParamsDict.RESERVED_ATTR: + if isinstance(v, ParamsDict): + params_dict[k] = v.as_dict() + else: + params_dict[k] = copy.deepcopy(v) + return params_dict + + def validate(self): + """Validate the parameters consistency based on the restrictions. + + This method validates the internal consistency using the pre-defined list of + restrictions. A restriction is defined as a string which specfiies a binary + operation. The supported binary operations are {'==', '!=', '<', '<=', '>', + '>='}. Note that the meaning of these operators are consistent with the + underlying Python immplementation. Users should make sure the define + restrictions on their type make sense. + + For example, for a ParamsDict like the following + ``` + a: + a1: 1 + a2: 2 + b: + bb: + bb1: 10 + bb2: 20 + ccc: + a1: 1 + a3: 3 + ``` + one can define two restrictions like this + ['a.a1 == b.ccc.a1', 'a.a2 <= b.bb.bb2'] + + What it enforces are: + - a.a1 = 1 == b.ccc.a1 = 2 + - a.a2 = 2 <= b.bb.bb2 = 20 + + Raises: + KeyError: if any of the following happens + (1) any of parameters in any of restrictions is not defined in + ParamsDict, + (2) any inconsistency violating the restriction is found. + ValueError: if the restriction defined in the string is not supported. + """ + def _get_kv(dotted_string, params_dict): + """Get keys and values indicated by dotted_string.""" + if _CONST_VALUE_RE.match(dotted_string) is not None: + const_str = dotted_string + if const_str == 'None': + constant = None + else: + constant = float(const_str) + return None, constant + else: + tokenized_params = dotted_string.split('.') + v = params_dict + for t in tokenized_params: + v = v[t] + return tokenized_params[-1], v + + def _get_kvs(tokens, params_dict): + if len(tokens) != 2: + raise ValueError('Only support binary relation in restriction.') + stripped_tokens = [t.strip() for t in tokens] + left_k, left_v = _get_kv(stripped_tokens[0], params_dict) + right_k, right_v = _get_kv(stripped_tokens[1], params_dict) + return left_k, left_v, right_k, right_v + + params_dict = self.as_dict() + for restriction in self._restrictions: + if '==' in restriction: + tokens = restriction.split('==') + _, left_v, _, right_v = _get_kvs(tokens, params_dict) + if left_v != right_v: + raise KeyError('Found inconsistncy between key `{}` and key `{}`.' + .format(tokens[0], tokens[1])) + elif '!=' in restriction: + tokens = restriction.split('!=') + _, left_v, _, right_v = _get_kvs(tokens, params_dict) + if left_v == right_v: + raise KeyError('Found inconsistncy between key `{}` and key `{}`.' + .format(tokens[0], tokens[1])) + elif '<' in restriction: + tokens = restriction.split('<') + _, left_v, _, right_v = _get_kvs(tokens, params_dict) + if left_v >= right_v: + raise KeyError('Found inconsistncy between key `{}` and key `{}`.' + .format(tokens[0], tokens[1])) + elif '<=' in restriction: + tokens = restriction.split('<=') + _, left_v, _, right_v = _get_kvs(tokens, params_dict) + if left_v > right_v: + raise KeyError('Found inconsistncy between key `{}` and key `{}`.' + .format(tokens[0], tokens[1])) + elif '>' in restriction: + tokens = restriction.split('>') + _, left_v, _, right_v = _get_kvs(tokens, params_dict) + if left_v <= right_v: + raise KeyError('Found inconsistncy between key `{}` and key `{}`.' + .format(tokens[0], tokens[1])) + elif '>=' in restriction: + tokens = restriction.split('>=') + _, left_v, _, right_v = _get_kvs(tokens, params_dict) + if left_v < right_v: + raise KeyError('Found inconsistncy between key `{}` and key `{}`.' + .format(tokens[0], tokens[1])) + else: + raise ValueError('Unsupported relation in restriction.') + + +def read_yaml_to_params_dict(file_path): + """Reads a YAML file to a ParamsDict.""" + with tf.io.gfile.GFile(file_path, 'r') as f: + params_dict = yaml.load(f) + return ParamsDict(params_dict) + + +def save_params_dict_to_yaml(params, file_path): + """Saves the input ParamsDict to a YAML file.""" + with tf.io.gfile.GFile(file_path, 'w') as f: + def _my_list_rep(dumper, data): + # u'tag:yaml.org,2002:seq' is the YAML internal tag for sequence. + return dumper.represent_sequence( + u'tag:yaml.org,2002:seq', data, flow_style=True) + yaml.add_representer(list, _my_list_rep) + yaml.dump(params.as_dict(), f, default_flow_style=False) + + +def nested_csv_str_to_json_str(csv_str): + """Converts a nested (using '.') comma-separated k=v string to a JSON string. + + Converts a comma-separated string of key/value pairs that supports + nesting of keys to a JSON string. Nesting is implemented using + '.' between levels for a given key. + + Spacing between commas and = is supported (e.g. there is no difference between + "a=1,b=2", "a = 1, b = 2", or "a=1, b=2") but there should be no spaces before + keys or after values (e.g. " a=1,b=2" and "a=1,b=2 " are not supported). + + Note that this will only support values supported by CSV, meaning + values such as nested lists (e.g. "a=[[1,2,3],[4,5,6]]") are not + supported. Strings are supported as well, e.g. "a='hello'". + + An example conversion would be: + + "a=1, b=2, c.a=2, c.b=3, d.a.a=5" + + to + + "{ a: 1, b : 2, c: {a : 2, b : 3}, d: {a: {a : 5}}}" + + Args: + csv_str: the comma separated string. + + Returns: + the converted JSON string. + + Raises: + ValueError: If csv_str is not in a comma separated string or + if the string is formatted incorrectly. + """ + if not csv_str: + return '' + + formatted_entries = [] + nested_map = collections.defaultdict(list) + pos = 0 + while pos < len(csv_str): + m = _PARAM_RE.match(csv_str, pos) + if not m: + raise ValueError('Malformed hyperparameter value while parsing ' + 'CSV string: %s' % csv_str[pos:]) + pos = m.end() + # Parse the values. + m_dict = m.groupdict() + name = m_dict['name'] + v = m_dict['val'] + + # If a GCS path (e.g. gs://...) is provided, wrap this in quotes + # as yaml.load would otherwise throw an exception + if re.match(r'(?=[^\"\'])(?=[gs://])', v): + v = '\'{}\''.format(v) + + name_nested = name.split('.') + if len(name_nested) > 1: + grouping = name_nested[0] + value = '.'.join(name_nested[1:]) + '=' + v + nested_map[grouping].append(value) + else: + formatted_entries.append('%s : %s' % (name, v)) + + for grouping, value in nested_map.items(): + value = ','.join(value) + value = nested_csv_str_to_json_str(value) + formatted_entries.append('%s : %s' % (grouping, value)) + return '{' + ', '.join(formatted_entries) + '}' + + +def override_params_dict(params, dict_or_string_or_yaml_file, is_strict): + """Override a given ParamsDict using a dict, JSON/YAML/CSV string or YAML file. + + The logic of the function is outlined below: + 1. Test that the input is a dict. If not, proceed to 2. + 2. Tests that the input is a string. If not, raise unknown ValueError + 2.1. Test if the string is in a CSV format. If so, parse. + If not, proceed to 2.2. + 2.2. Try loading the string as a YAML/JSON. If successful, parse to + dict and use it to override. If not, proceed to 2.3. + 2.3. Try using the string as a file path and load the YAML file. + + Args: + params: a ParamsDict object to be overridden. + dict_or_string_or_yaml_file: a Python dict, JSON/YAML/CSV string or + path to a YAML file specifying the parameters to be overridden. + is_strict: a boolean specifying whether override is strict or not. + + Returns: + params: the overridden ParamsDict object. + + Raises: + ValueError: if failed to override the parameters. + """ + if not dict_or_string_or_yaml_file: + return params + if isinstance(dict_or_string_or_yaml_file, dict): + params.override(dict_or_string_or_yaml_file, is_strict) + elif isinstance(dict_or_string_or_yaml_file, six.string_types): + try: + dict_or_string_or_yaml_file = ( + nested_csv_str_to_json_str(dict_or_string_or_yaml_file)) + except ValueError: + pass + params_dict = yaml.load(dict_or_string_or_yaml_file) + if isinstance(params_dict, dict): + params.override(params_dict, is_strict) + else: + with tf.io.gfile.GFile(dict_or_string_or_yaml_file) as f: + params.override(yaml.load(f), is_strict) + else: + raise ValueError('Unknown input type to parse.') + return params diff --git a/models/official/modeling/hyperparams/params_dict_test.py b/models/official/modeling/hyperparams/params_dict_test.py new file mode 100644 index 0000000000000000000000000000000000000000..169ffa47ceff5717c2ae375f7e1114c5b05f3ea1 --- /dev/null +++ b/models/official/modeling/hyperparams/params_dict_test.py @@ -0,0 +1,346 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for params_dict.py.""" + +import os + +import tensorflow as tf +import yaml + +from official.modeling.hyperparams import params_dict + + +class ParamsDictTest(tf.test.TestCase): + + def test_init_from_an_empty_dict(self): + params = params_dict.ParamsDict() + with self.assertRaises(AttributeError): + _ = params.a + + with self.assertRaises(KeyError): + params.a = 'aa' + + def test_init_from_a_dict(self): + params = params_dict.ParamsDict({'a': 'aa', 'b': 2}) + self.assertEqual(params.a, 'aa') + self.assertEqual(params.b, 2) + + def test_init_from_a_param_dict(self): + params_init = params_dict.ParamsDict({'a': 'aa', 'b': 2}) + params = params_dict.ParamsDict(params_init) + self.assertEqual(params.a, 'aa') + self.assertEqual(params.b, 2) + + def test_lock(self): + params = params_dict.ParamsDict({'a': 1, 'b': 2, 'c': 3}) + params.lock() + with self.assertRaises(ValueError): + params.a = 10 + with self.assertRaises(ValueError): + params.override({'b': 20}) + with self.assertRaises(ValueError): + del params.c + + def test_setattr(self): + params = params_dict.ParamsDict() + params.override( + {'a': 'aa', 'b': 2, 'c': None}, is_strict=False) + params.c = 'ccc' + self.assertEqual(params.a, 'aa') + self.assertEqual(params.b, 2) + self.assertEqual(params.c, 'ccc') + + def test_getattr(self): + params = params_dict.ParamsDict() + params.override( + {'a': 'aa', 'b': 2, 'c': None}, is_strict=False) + self.assertEqual(params.a, 'aa') + self.assertEqual(params.b, 2) + self.assertEqual(params.c, None) + + def test_delattr(self): + params = params_dict.ParamsDict() + params.override( + {'a': 'aa', 'b': 2, 'c': None, 'd': {'d1': 1, 'd2': 10}}, + is_strict=False) + del params.c + self.assertEqual(params.a, 'aa') + self.assertEqual(params.b, 2) + with self.assertRaises(AttributeError): + _ = params.c + del params.d + with self.assertRaises(AttributeError): + _ = params.d.d1 + + def test_contains(self): + params = params_dict.ParamsDict() + params.override( + {'a': 'aa'}, is_strict=False) + self.assertIn('a', params) + self.assertNotIn('b', params) + + def test_get(self): + params = params_dict.ParamsDict() + params.override( + {'a': 'aa'}, is_strict=False) + self.assertEqual(params.get('a'), 'aa') + self.assertEqual(params.get('b', 2), 2) + self.assertEqual(params.get('b'), None) + + def test_override_is_strict_true(self): + params = params_dict.ParamsDict( + {'a': 'aa', 'b': 2, 'c': {'c1': 'cc', 'c2': 20}}) + params.override({'a': 2, 'c': {'c1': 'ccc'}}, is_strict=True) + self.assertEqual(params.a, 2) + self.assertEqual(params.c.c1, 'ccc') + with self.assertRaises(KeyError): + params.override({'d': 'ddd'}, is_strict=True) + with self.assertRaises(KeyError): + params.override({'c': {'c3': 30}}, is_strict=True) + + def test_override_is_strict_false(self): + params = params_dict.ParamsDict( + {'a': 'aa', 'b': 2, 'c': {'c1': 10, 'c2': 20}}) + params.override({'a': 2, 'c': {'c3': 3000}}, is_strict=False) + self.assertEqual(params.a, 2) + self.assertEqual(params.c.c3, 3000) + params.override({'d': 'ddd'}, is_strict=False) + self.assertEqual(params.d, 'ddd') + params.override({'c': {'c4': 4444}}, is_strict=False) + self.assertEqual(params.c.c4, 4444) + + def test_as_dict(self): + params = params_dict.ParamsDict( + {'a': 'aa', 'b': 2, 'c': {'c1': 10, 'c2': 20}}) + params_d = params.as_dict() + self.assertEqual(params_d['a'], 'aa') + self.assertEqual(params_d['b'], 2) + self.assertEqual(params_d['c']['c1'], 10) + self.assertEqual(params_d['c']['c2'], 20) + + def test_validate(self): + # Raise error due to the unknown parameter. + with self.assertRaises(KeyError): + params = params_dict.ParamsDict( + {'a': 1, 'b': {'a': 11}}, ['a == c']) + + # OK to check equality of two nested dicts. + params = params_dict.ParamsDict( + {'a': 1, 'b': {'a': 10}, 'c': {'a': 10}}, ['b == c']) + + # Raise error due to inconsistency + with self.assertRaises(KeyError): + params = params_dict.ParamsDict( + {'a': 1, 'c': {'a': 10}}, ['a == c.a']) + + # Valid rule. + params = params_dict.ParamsDict( + {'a': 1, 'c': {'a': 1}}, ['a == c.a']) + + # Overridding violates the existing rule, raise error upon validate. + params.override({'a': 11}) + with self.assertRaises(KeyError): + params.validate() + + # Valid restrictions with constant. + params = params_dict.ParamsDict( + {'a': None, 'c': {'a': 1}}, ['a == None', 'c.a == 1']) + params.validate() + with self.assertRaises(KeyError): + params = params_dict.ParamsDict( + {'a': 4, 'c': {'a': 1}}, ['a == None', 'c.a == 1']) + + +class ParamsDictIOTest(tf.test.TestCase): + + def write_temp_file(self, filename, text): + temp_file = os.path.join(self.get_temp_dir(), filename) + with tf.io.gfile.GFile(temp_file, 'w') as writer: + writer.write(text) + return temp_file + + def test_save_params_dict_to_yaml(self): + params = params_dict.ParamsDict( + {'a': 'aa', 'b': 2, 'c': {'c1': 10, 'c2': 20}}) + output_yaml_file = os.path.join(self.get_temp_dir(), 'params.yaml') + params_dict.save_params_dict_to_yaml(params, output_yaml_file) + + with tf.io.gfile.GFile(output_yaml_file, 'r') as f: + params_d = yaml.load(f) + self.assertEqual(params.a, params_d['a']) + self.assertEqual(params.b, params_d['b']) + self.assertEqual(params.c.c1, params_d['c']['c1']) + self.assertEqual(params.c.c2, params_d['c']['c2']) + + def test_read_yaml_to_params_dict(self): + input_yaml_file = self.write_temp_file( + 'params.yaml', r""" + a: 'aa' + b: 2 + c: + c1: 10 + c2: 20 + """) + params = params_dict.read_yaml_to_params_dict(input_yaml_file) + + self.assertEqual(params.a, 'aa') + self.assertEqual(params.b, 2) + self.assertEqual(params.c.c1, 10) + self.assertEqual(params.c.c2, 20) + + def test_override_params_dict_using_dict(self): + params = params_dict.ParamsDict({ + 'a': 1, 'b': 2.5, 'c': [3, 4], 'd': 'hello', 'e': False}) + override_dict = {'b': 5.2, 'c': [30, 40]} + params = params_dict.override_params_dict( + params, override_dict, is_strict=True) + self.assertEqual(1, params.a) + self.assertEqual(5.2, params.b) + self.assertEqual([30, 40], params.c) + self.assertEqual('hello', params.d) + self.assertEqual(False, params.e) + + def test_override_params_dict_using_yaml_string(self): + params = params_dict.ParamsDict({ + 'a': 1, 'b': 2.5, 'c': [3, 4], 'd': 'hello', 'e': False}) + override_yaml_string = "'b': 5.2\n'c': [30, 40]" + params = params_dict.override_params_dict( + params, override_yaml_string, is_strict=True) + self.assertEqual(1, params.a) + self.assertEqual(5.2, params.b) + self.assertEqual([30, 40], params.c) + self.assertEqual('hello', params.d) + self.assertEqual(False, params.e) + + def test_override_params_dict_using_json_string(self): + params = params_dict.ParamsDict({ + 'a': 1, 'b': {'b1': 2, 'b2': [2, 3],}, + 'd': {'d1': {'d2': 'hello'}}, 'e': False}) + override_json_string = "{ b: { b2: [3, 4] }, d: { d1: { d2: 'hi' } } }" + params = params_dict.override_params_dict( + params, override_json_string, is_strict=True) + self.assertEqual(1, params.a) + self.assertEqual(2, params.b.b1) + self.assertEqual([3, 4], params.b.b2) + self.assertEqual('hi', params.d.d1.d2) + self.assertEqual(False, params.e) + + def test_override_params_dict_using_csv_string(self): + params = params_dict.ParamsDict({ + 'a': 1, 'b': {'b1': 2, 'b2': [2, 3],}, + 'd': {'d1': {'d2': 'hello'}}, 'e': False}) + override_csv_string = "b.b2=[3,4], d.d1.d2='hi, world', e=gs://test" + params = params_dict.override_params_dict( + params, override_csv_string, is_strict=True) + self.assertEqual(1, params.a) + self.assertEqual(2, params.b.b1) + self.assertEqual([3, 4], params.b.b2) + self.assertEqual('hi, world', params.d.d1.d2) + self.assertEqual('gs://test', params.e) + + def test_override_params_dict_using_yaml_file(self): + params = params_dict.ParamsDict({ + 'a': 1, 'b': 2.5, 'c': [3, 4], 'd': 'hello', 'e': False}) + override_yaml_file = self.write_temp_file( + 'params.yaml', r""" + b: 5.2 + c: [30, 40] + """) + params = params_dict.override_params_dict( + params, override_yaml_file, is_strict=True) + self.assertEqual(1, params.a) + self.assertEqual(5.2, params.b) + self.assertEqual([30, 40], params.c) + self.assertEqual('hello', params.d) + self.assertEqual(False, params.e) + + +class IOTest(tf.test.TestCase): + + def test_basic_csv_str_to_json_str(self): + csv_str = 'a=1,b=2,c=3' + json_str = '{a : 1, b : 2, c : 3}' + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + self.assertEqual(converted_csv_str, json_str) + + def test_basic_csv_str_load(self): + csv_str = 'a=1,b=2,c=3' + expected_output = {'a': 1, 'b': 2, 'c': 3} + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + converted_dict = yaml.load(converted_csv_str) + self.assertDictEqual(converted_dict, expected_output) + + def test_basic_nested_csv_str_to_json_str(self): + csv_str = 'a=1,b.b1=2' + json_str = '{a : 1, b : {b1 : 2}}' + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + self.assertEqual(converted_csv_str, json_str) + + def test_basic_nested_csv_str_load(self): + csv_str = 'a=1,b.b1=2,c.c1=3' + expected_output = {'a': 1, 'b': {'b1': 2}, 'c': {'c1': 3}} + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + converted_dict = yaml.load(converted_csv_str) + self.assertDictEqual(converted_dict, expected_output) + + def test_complex_nested_csv_str_to_json_str(self): + csv_str = 'a.aa.aaa.aaaaa.a=1' + json_str = '{a : {aa : {aaa : {aaaaa : {a : 1}}}}}' + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + self.assertEqual(converted_csv_str, json_str) + + def test_complex_nested_csv_str_load(self): + csv_str = 'a.aa.aaa.aaaaa.a=1,a.a=2' + expected_output = {'a': {'aa': {'aaa': {'aaaaa': {'a': 1}}}, 'a': 2}} + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + converted_dict = yaml.load(converted_csv_str) + self.assertDictEqual(converted_dict, expected_output) + + def test_csv_str_load_supported_datatypes(self): + csv_str = 'a=1,b=2.,c=[1,2,3],d=\'hello, there\',e=\"Hi.\"' + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + converted_dict = yaml.load(converted_csv_str) + self.assertEqual(converted_dict['a'], 1) + self.assertEqual(converted_dict['b'], 2.) + self.assertEqual(converted_dict['c'], [1, 2, 3]) + self.assertEqual(converted_dict['d'], 'hello, there') + self.assertEqual(converted_dict['e'], 'Hi.') + + def test_csv_str_load_unsupported_datatypes(self): + csv_str = 'a=[[1,2,3],[4,5,6]]' + self.assertRaises(ValueError, + params_dict.nested_csv_str_to_json_str, + csv_str) + + def test_csv_str_to_json_str_spacing(self): + csv_str1 = 'a=1,b=2,c=3' + csv_str2 = 'a = 1, b = 2, c = 3' + json_str = '{a : 1, b : 2, c : 3}' + converted_csv_str1 = params_dict.nested_csv_str_to_json_str(csv_str1) + converted_csv_str2 = params_dict.nested_csv_str_to_json_str(csv_str2) + self.assertEqual(converted_csv_str1, converted_csv_str2) + self.assertEqual(converted_csv_str1, json_str) + self.assertEqual(converted_csv_str2, json_str) + + def test_gcs_added_quotes(self): + csv_str = 'a=gs://abc, b=gs://def' + expected_output = '{a : \'gs://abc\', b : \'gs://def\'}' + converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) + self.assertEqual(converted_csv_str, expected_output) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/optimization/__init__.py b/models/official/modeling/optimization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b5c6292b64d922144b7ced18c8c9460617e05492 --- /dev/null +++ b/models/official/modeling/optimization/__init__.py @@ -0,0 +1,7 @@ +"""Optimization package definition.""" + +# pylint: disable=wildcard-import +from official.modeling.optimization.configs.learning_rate_config import * +from official.modeling.optimization.configs.optimization_config import * +from official.modeling.optimization.configs.optimizer_config import * +from official.modeling.optimization.optimizer_factory import OptimizerFactory diff --git a/models/official/modeling/optimization/configs/__init__.py b/models/official/modeling/optimization/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/modeling/optimization/configs/learning_rate_config.py b/models/official/modeling/optimization/configs/learning_rate_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b55c713f1905cf9aaa52f87a6663d3385628d5a5 --- /dev/null +++ b/models/official/modeling/optimization/configs/learning_rate_config.py @@ -0,0 +1,162 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dataclasses for learning rate schedule config.""" +from typing import List, Optional + +import dataclasses +from official.modeling.hyperparams import base_config + + +@dataclasses.dataclass +class StepwiseLrConfig(base_config.Config): + """Configuration for stepwise learning rate decay. + + This class is a container for the piecewise constant learning rate scheduling + configs. It will configure an instance of PiecewiseConstantDecay keras + learning rate schedule. + + An example (from keras docs): use a learning rate that's 1.0 for the first + 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. + ```python + boundaries: [100000, 110000] + values: [1.0, 0.5, 0.1] + + Attributes: + name: The name of the learning rate schedule. Defaults to PiecewiseConstant. + boundaries: A list of ints of strictly increasing entries. + Defaults to None. + values: A list of floats that specifies the values for the intervals defined + by `boundaries`. It should have one more element than `boundaries`. + The learning rate is computed as follows: + [0, boundaries[0]] -> values[0] + [boundaries[0], boundaries[1]] -> values[1] + [boundaries[n-1], boundaries[n]] -> values[n] + [boundaries[n], end] -> values[n+1] + Defaults to None. + """ + name: str = 'PiecewiseConstantDecay' + boundaries: Optional[List[int]] = None + values: Optional[List[float]] = None + + +@dataclasses.dataclass +class ExponentialLrConfig(base_config.Config): + """Configuration for exponential learning rate decay. + + This class is a containers for the exponential learning rate decay configs. + + Attributes: + name: The name of the learning rate schedule. Defaults to ExponentialDecay. + initial_learning_rate: A float. The initial learning rate. Defaults to + None. + decay_steps: A positive integer that is used for decay computation. + Defaults to None. + decay_rate: A float. Defaults to None. + staircase: A boolean, if true, learning rate is decreased at discreate + intervals. Defaults to False. + """ + name: str = 'ExponentialDecay' + initial_learning_rate: Optional[float] = None + decay_steps: Optional[int] = None + decay_rate: Optional[float] = None + staircase: Optional[bool] = None + + +@dataclasses.dataclass +class PolynomialLrConfig(base_config.Config): + """Configuration for polynomial learning rate decay. + + This class is a containers for the polynomial learning rate decay configs. + + Attributes: + name: The name of the learning rate schedule. Defaults to PolynomialDecay. + initial_learning_rate: A float. The initial learning rate. Defaults to + None. + decay_steps: A positive integer that is used for decay computation. + Defaults to None. + end_learning_rate: A float. The minimal end learning rate. + power: A float. The power of the polynomial. Defaults to linear, 1.0. + cycle: A boolean, whether or not it should cycle beyond decay_steps. + Defaults to False. + """ + name: str = 'PolynomialDecay' + initial_learning_rate: Optional[float] = None + decay_steps: Optional[int] = None + end_learning_rate: float = 0.0001 + power: float = 1.0 + cycle: bool = False + + +@dataclasses.dataclass +class CosineLrConfig(base_config.Config): + """Configuration for Cosine learning rate decay. + + This class is a containers for the cosine learning rate decay configs, + tf.keras.experimental.CosineDecay. + + Attributes: + name: The name of the learning rate schedule. Defaults to CosineDecay. + initial_learning_rate: A float. The initial learning rate. Defaults to + None. + decay_steps: A positive integer that is used for decay computation. + Defaults to None. + alpha: A float. Minimum learning rate value as a fraction of + initial_learning_rate. + """ + name: str = 'CosineDecay' + initial_learning_rate: Optional[float] = None + decay_steps: Optional[int] = None + alpha: float = 0.0 + + +@dataclasses.dataclass +class LinearWarmupConfig(base_config.Config): + """Configuration for linear warmup schedule config. + + This class is a container for the linear warmup schedule configs. + Warmup_learning_rate is the initial learning rate, the final learning rate of + the warmup period is the learning_rate of the optimizer in use. The learning + rate at each step linearly increased according to the following formula: + warmup_learning_rate = warmup_learning_rate + + step / warmup_steps * (final_learning_rate - warmup_learning_rate). + Using warmup overrides the learning rate schedule by the number of warmup + steps. + + Attributes: + name: The name of warmup schedule. Defaults to linear. + warmup_learning_rate: Initial learning rate for the warmup. Defaults to 0. + warmup_steps: Warmup steps. Defaults to None. + """ + name: str = 'linear' + warmup_learning_rate: float = 0 + warmup_steps: Optional[int] = None + + +@dataclasses.dataclass +class PolynomialWarmupConfig(base_config.Config): + """Configuration for linear warmup schedule config. + + This class is a container for the polynomial warmup schedule configs. + + Attributes: + name: The name of warmup schedule. Defaults to Polynomial. + power: Polynomial power. Defaults to 1. + warmup_steps: Warmup steps. Defaults to None. + """ + name: str = 'polynomial' + power: float = 1 + warmup_steps: Optional[int] = None + diff --git a/models/official/modeling/optimization/configs/optimization_config.py b/models/official/modeling/optimization/configs/optimization_config.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf3616c75bec20c2560747561530f332cd2466c --- /dev/null +++ b/models/official/modeling/optimization/configs/optimization_config.py @@ -0,0 +1,95 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dataclasses for optimization configs. + +This file define the dataclass for optimization configs (OptimizationConfig). +It also has two helper functions get_optimizer_config, and get_lr_config from +an OptimizationConfig class. +""" +from typing import Optional + +import dataclasses + +from official.modeling.hyperparams import base_config +from official.modeling.hyperparams import oneof +from official.modeling.optimization.configs import learning_rate_config as lr_cfg +from official.modeling.optimization.configs import optimizer_config as opt_cfg + + +@dataclasses.dataclass +class OptimizerConfig(oneof.OneOfConfig): + """Configuration for optimizer. + + Attributes: + type: 'str', type of optimizer to be used, on the of fields below. + sgd: sgd optimizer config. + adam: adam optimizer config. + adamw: adam with weight decay. + lamb: lamb optimizer. + rmsprop: rmsprop optimizer. + """ + type: Optional[str] = None + sgd: opt_cfg.SGDConfig = opt_cfg.SGDConfig() + adam: opt_cfg.AdamConfig = opt_cfg.AdamConfig() + adamw: opt_cfg.AdamWeightDecayConfig = opt_cfg.AdamWeightDecayConfig() + lamb: opt_cfg.LAMBConfig = opt_cfg.LAMBConfig() + rmsprop: opt_cfg.RMSPropConfig = opt_cfg.RMSPropConfig() + + +@dataclasses.dataclass +class LrConfig(oneof.OneOfConfig): + """Configuration for lr schedule. + + Attributes: + type: 'str', type of lr schedule to be used, on the of fields below. + stepwise: stepwise learning rate config. + exponential: exponential learning rate config. + polynomial: polynomial learning rate config. + cosine: cosine learning rate config. + """ + type: Optional[str] = None + stepwise: lr_cfg.StepwiseLrConfig = lr_cfg.StepwiseLrConfig() + exponential: lr_cfg.ExponentialLrConfig = lr_cfg.ExponentialLrConfig() + polynomial: lr_cfg.PolynomialLrConfig = lr_cfg.PolynomialLrConfig() + cosine: lr_cfg.CosineLrConfig = lr_cfg.CosineLrConfig() + + +@dataclasses.dataclass +class WarmupConfig(oneof.OneOfConfig): + """Configuration for lr schedule. + + Attributes: + type: 'str', type of warmup schedule to be used, on the of fields below. + linear: linear warmup config. + polynomial: polynomial warmup config. + """ + type: Optional[str] = None + linear: lr_cfg.LinearWarmupConfig = lr_cfg.LinearWarmupConfig() + polynomial: lr_cfg.PolynomialWarmupConfig = lr_cfg.PolynomialWarmupConfig() + + +@dataclasses.dataclass +class OptimizationConfig(base_config.Config): + """Configuration for optimizer and learning rate schedule. + + Attributes: + optimizer: optimizer oneof config. + learning_rate: learning rate oneof config. + warmup: warmup oneof config. + """ + optimizer: OptimizerConfig = OptimizerConfig() + learning_rate: LrConfig = LrConfig() + warmup: WarmupConfig = WarmupConfig() diff --git a/models/official/modeling/optimization/configs/optimization_config_test.py b/models/official/modeling/optimization/configs/optimization_config_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcd55e0e2071a23cae1494ae29c5efa282d052a --- /dev/null +++ b/models/official/modeling/optimization/configs/optimization_config_test.py @@ -0,0 +1,61 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for optimization_config.py.""" + +import tensorflow as tf + +from official.modeling.optimization.configs import learning_rate_config as lr_cfg +from official.modeling.optimization.configs import optimization_config +from official.modeling.optimization.configs import optimizer_config as opt_cfg + + +class OptimizerConfigTest(tf.test.TestCase): + + def test_no_optimizer(self): + optimizer = optimization_config.OptimizationConfig({}).optimizer.get() + self.assertEqual(optimizer, None) + + def test_no_lr_schedule(self): + lr = optimization_config.OptimizationConfig({}).learning_rate.get() + self.assertEqual(lr, None) + + def test_no_warmup_schedule(self): + warmup = optimization_config.OptimizationConfig({}).warmup.get() + self.assertEqual(warmup, None) + + def test_config(self): + opt_config = optimization_config.OptimizationConfig({ + 'optimizer': { + 'type': 'sgd', + 'sgd': {} # default config + }, + 'learning_rate': { + 'type': 'polynomial', + 'polynomial': {} + }, + 'warmup': { + 'type': 'linear' + } + }) + self.assertEqual(opt_config.optimizer.get(), + opt_cfg.SGDConfig()) + self.assertEqual(opt_config.learning_rate.get(), + lr_cfg.PolynomialLrConfig()) + self.assertEqual(opt_config.warmup.get(), + lr_cfg.LinearWarmupConfig()) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/optimization/configs/optimizer_config.py b/models/official/modeling/optimization/configs/optimizer_config.py new file mode 100644 index 0000000000000000000000000000000000000000..6e295777481957c9e965fbe7408dbb55ba063fc9 --- /dev/null +++ b/models/official/modeling/optimization/configs/optimizer_config.py @@ -0,0 +1,148 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dataclasses for optimizer configs.""" +from typing import List, Optional + +import dataclasses +from official.modeling.hyperparams import base_config + + +@dataclasses.dataclass +class SGDConfig(base_config.Config): + """Configuration for SGD optimizer. + + The attributes for this class matches the arguments of tf.keras.optimizer.SGD. + + Attributes: + name: name of the optimizer. + learning_rate: learning_rate for SGD optimizer. + decay: decay rate for SGD optimizer. + nesterov: nesterov for SGD optimizer. + momentum: momentum for SGD optimizer. + """ + name: str = "SGD" + learning_rate: float = 0.01 + decay: float = 0.0 + nesterov: bool = False + momentum: float = 0.0 + + +@dataclasses.dataclass +class RMSPropConfig(base_config.Config): + """Configuration for RMSProp optimizer. + + The attributes for this class matches the arguments of + tf.keras.optimizers.RMSprop. + + Attributes: + name: name of the optimizer. + learning_rate: learning_rate for RMSprop optimizer. + rho: discounting factor for RMSprop optimizer. + momentum: momentum for RMSprop optimizer. + epsilon: epsilon value for RMSprop optimizer, help with numerical stability. + centered: Whether to normalize gradients or not. + """ + name: str = "RMSprop" + learning_rate: float = 0.001 + rho: float = 0.9 + momentum: float = 0.0 + epsilon: float = 1e-7 + centered: bool = False + + +@dataclasses.dataclass +class AdamConfig(base_config.Config): + """Configuration for Adam optimizer. + + The attributes for this class matches the arguments of + tf.keras.optimizer.Adam. + + Attributes: + name: name of the optimizer. + learning_rate: learning_rate for Adam optimizer. + beta_1: decay rate for 1st order moments. + beta_2: decay rate for 2st order moments. + epsilon: epsilon value used for numerical stability in Adam optimizer. + amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from + the paper "On the Convergence of Adam and beyond". + """ + name: str = "Adam" + learning_rate: float = 0.001 + beta_1: float = 0.9 + beta_2: float = 0.999 + epsilon: float = 1e-07 + amsgrad: bool = False + + +@dataclasses.dataclass +class AdamWeightDecayConfig(base_config.Config): + """Configuration for Adam optimizer with weight decay. + + Attributes: + name: name of the optimizer. + learning_rate: learning_rate for the optimizer. + beta_1: decay rate for 1st order moments. + beta_2: decay rate for 2st order moments. + epsilon: epsilon value used for numerical stability in the optimizer. + amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from + the paper "On the Convergence of Adam and beyond". + weight_decay_rate: float. Weight decay rate. Default to 0. + include_in_weight_decay: list[str], or None. List of weight names to include + in weight decay. + include_in_weight_decay: list[str], or None. List of weight names to not + include in weight decay. + """ + name: str = "AdamWeightDecay" + learning_rate: float = 0.001 + beta_1: float = 0.9 + beta_2: float = 0.999 + epsilon: float = 1e-07 + amsgrad: bool = False + weight_decay_rate: float = 0.0 + include_in_weight_decay: Optional[List[str]] = None + exclude_from_weight_decay: Optional[List[str]] = None + + +@dataclasses.dataclass +class LAMBConfig(base_config.Config): + """Configuration for LAMB optimizer. + + The attributes for this class matches the arguments of + tensorflow_addons.optimizers.LAMB. + + Attributes: + name: name of the optimizer. + learning_rate: learning_rate for Adam optimizer. + beta_1: decay rate for 1st order moments. + beta_2: decay rate for 2st order moments. + epsilon: epsilon value used for numerical stability in LAMB optimizer. + weight_decay_rate: float. Weight decay rate. Default to 0. + exclude_from_weight_decay: List of regex patterns of variables excluded from + weight decay. Variables whose name contain a + substring matching the pattern will be excluded. + exclude_from_layer_adaptation: List of regex patterns of variables excluded + from layer adaptation. Variables whose name + contain a substring matching the pattern will + be excluded. + """ + name: str = "LAMB" + learning_rate: float = 0.001 + beta_1: float = 0.9 + beta_2: float = 0.999 + epsilon: float = 1e-6 + weight_decay_rate: float = 0.0 + exclude_from_weight_decay: Optional[List[str]] = None + exclude_from_layer_adaptation: Optional[List[str]] = None diff --git a/models/official/modeling/optimization/lr_schedule.py b/models/official/modeling/optimization/lr_schedule.py new file mode 100644 index 0000000000000000000000000000000000000000..d5dd6fb6fb1478297e579a4be5b87ab5ae25f40e --- /dev/null +++ b/models/official/modeling/optimization/lr_schedule.py @@ -0,0 +1,155 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Learning rate schedule classes.""" + +from typing import Mapping, Any, Union, Optional + +import tensorflow as tf + + +class LinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule): + """Linear warmup schedule.""" + + def __init__(self, after_warmup_lr_sched: Union[ + tf.keras.optimizers.schedules.LearningRateSchedule, float], + warmup_steps: int, warmup_learning_rate: float, + name: Optional[str] = None): + """Add linear warmup schedule to a learning rate schedule. + + warmup_lr is the initial learning rate, the final learning rate of the + init_warmup period is the initial learning rate of lr_schedule in use. + The learning rate at each step linearly increased according to the following + formula: + learning_rate = warmup_lr + step / warmup_steps + * (final_warmup_lr - warmup_lr). + Using warmup overrides the learning rate schedule by the number of warmup + steps. + + Args: + after_warmup_lr_sched: tf.keras.optimizers.schedules + .LearningRateSchedule or a constant. + warmup_steps: int. number of the warmup steps. + warmup_learning_rate: floating point number. Initial learning rate for the + warmup. + name: Optional, name of warmup schedule. + """ + super(LinearWarmup, self).__init__() + self._name = name + self._after_warmup_lr_sched = after_warmup_lr_sched + self._warmup_steps = warmup_steps + self._init_warmup_lr = warmup_learning_rate + if isinstance(after_warmup_lr_sched, + tf.keras.optimizers.schedules.LearningRateSchedule): + self._final_warmup_lr = after_warmup_lr_sched(warmup_steps) + else: + self._final_warmup_lr = tf.cast( + after_warmup_lr_sched, dtype=tf.float32) + + def __call__(self, step: int): + + global_step = tf.cast(step, dtype=tf.float32) + + linear_warmup_lr = ( + self._init_warmup_lr + global_step / self._warmup_steps * + (self._final_warmup_lr - self._init_warmup_lr)) + + if isinstance(self._after_warmup_lr_sched, + tf.keras.optimizers.schedules.LearningRateSchedule): + after_warmup_lr = self._after_warmup_lr_sched(step) + else: + after_warmup_lr = tf.cast(self._after_warmup_lr_sched, dtype=tf.float32) + + lr = tf.cond(global_step < self._warmup_steps, + lambda: linear_warmup_lr, + lambda: after_warmup_lr) + return lr + + def get_config(self) -> Mapping[str, Any]: + if isinstance(self._after_warmup_lr_sched, + tf.keras.optimizers.schedules.LearningRateSchedule): + config = { + "after_warmup_lr_sched": self._after_warmup_lr_sched.get_config()} # pytype: disable=attribute-error + else: + config = {"after_warmup_lr_sched": self._after_warmup_lr_sched} # pytype: disable=attribute-error + + config.update({ + "warmup_steps": self._warmup_steps, + "warmup_learning_rate": self._init_warmup_lr, + "name": self._name + }) + return config + + +class PolynomialWarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): + """Applies polynomial warmup schedule on a given learning rate decay schedule. + """ + + def __init__(self, + after_warmup_lr_sched: Union[ + tf.keras.optimizers.schedules.LearningRateSchedule, float], + warmup_steps: int, + power: float = 1.0, + name: str = "PolynomialWarmup"): + super(PolynomialWarmUp, self).__init__() + if isinstance(after_warmup_lr_sched, + tf.keras.optimizers.schedules.LearningRateSchedule): + self._initial_learning_rate = after_warmup_lr_sched(warmup_steps) + else: + self._initial_learning_rate = tf.cast( + after_warmup_lr_sched, dtype=tf.float32) + + self._warmup_steps = warmup_steps + self._power = power + self._after_warmup_lr_sched = after_warmup_lr_sched + self._name = name + + def __call__(self, step): + with tf.name_scope(self._name or "PolynomialWarmUp") as name: + # Implements polynomial warmup. i.e., if global_step < warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + global_step_float = tf.cast(step, tf.float32) + warmup_steps_float = tf.cast(self._warmup_steps, tf.float32) + warmup_percent_done = global_step_float / warmup_steps_float + warmup_learning_rate = ( + self._initial_learning_rate * + tf.math.pow(warmup_percent_done, self._power)) + + if isinstance(self._after_warmup_lr_sched, + tf.keras.optimizers.schedules.LearningRateSchedule): + after_warmup_lr = self._after_warmup_lr_sched(step) + else: + after_warmup_lr = tf.cast(self._after_warmup_lr_sched, dtype=tf.float32) + + return tf.cond( + global_step_float < warmup_steps_float, + lambda: warmup_learning_rate, + lambda: after_warmup_lr, + name=name) + + def get_config(self) -> Mapping[str, Any]: + if isinstance(self._after_warmup_lr_sched, + tf.keras.optimizers.schedules.LearningRateSchedule): + config = { + "after_warmup_lr_sched": self._after_warmup_lr_sched.get_config()} # pytype: disable=attribute-error + else: + config = {"after_warmup_lr_sched": self._after_warmup_lr_sched} # pytype: disable=attribute-error + + config.update({ + "warmup_steps": self._warmup_setps, + "power": self._power, + "name": self._name + }) + return config diff --git a/models/official/modeling/optimization/optimizer_factory.py b/models/official/modeling/optimization/optimizer_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb03d50ee8a5b74cda84cbe261cfdbecce60d23 --- /dev/null +++ b/models/official/modeling/optimization/optimizer_factory.py @@ -0,0 +1,145 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Optimizer factory class.""" +from typing import Union + +import tensorflow as tf + +import tensorflow_addons.optimizers as tfa_optimizers + +from official.modeling.optimization import lr_schedule +from official.modeling.optimization.configs import optimization_config as opt_cfg +from official.nlp import optimization as nlp_optimization + +OPTIMIZERS_CLS = { + 'sgd': tf.keras.optimizers.SGD, + 'adam': tf.keras.optimizers.Adam, + 'adamw': nlp_optimization.AdamWeightDecay, + 'lamb': tfa_optimizers.LAMB, + 'rmsprop': tf.keras.optimizers.RMSprop +} + +LR_CLS = { + 'stepwise': tf.keras.optimizers.schedules.PiecewiseConstantDecay, + 'polynomial': tf.keras.optimizers.schedules.PolynomialDecay, + 'exponential': tf.keras.optimizers.schedules.ExponentialDecay, + 'cosine': tf.keras.experimental.CosineDecay +} + +WARMUP_CLS = { + 'linear': lr_schedule.LinearWarmup, + 'polynomial': lr_schedule.PolynomialWarmUp +} + + +class OptimizerFactory(object): + """Optimizer factory class. + + This class builds learning rate and optimizer based on an optimization config. + To use this class, you need to do the following: + (1) Define optimization config, this includes optimizer, and learning rate + schedule. + (2) Initialize the class using the optimization config. + (3) Build learning rate. + (4) Build optimizer. + + This is a typical example for using this class: + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'stepwise', + 'stepwise': {'boundaries': [10000, 20000], + 'values': [0.1, 0.01, 0.001]} + }, + 'warmup': { + 'type': 'linear', + 'linear': {'warmup_steps': 500, 'warmup_learning_rate': 0.01} + } + } + opt_config = OptimizationConfig(params) + opt_factory = OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + optimizer = opt_factory.build_optimizer(lr) + """ + + def __init__(self, config: opt_cfg.OptimizationConfig): + """Initializing OptimizerFactory. + + Args: + config: OptimizationConfig instance contain optimization config. + """ + self._config = config + self._optimizer_config = config.optimizer.get() + self._optimizer_type = config.optimizer.type + + if self._optimizer_config is None: + raise ValueError('Optimizer type must be specified') + + self._lr_config = config.learning_rate.get() + self._lr_type = config.learning_rate.type + + self._warmup_config = config.warmup.get() + self._warmup_type = config.warmup.type + + def build_learning_rate(self): + """Build learning rate. + + Builds learning rate from config. Learning rate schedule is built according + to the learning rate config. If there is no learning rate config, optimizer + learning rate is returned. + + Returns: + tf.keras.optimizers.schedules.LearningRateSchedule instance. If no + learning rate schedule defined, optimizer_config.learning_rate is + returned. + """ + + # TODO(arashwan): Explore if we want to only allow explicit const lr sched. + if not self._lr_config: + lr = self._optimizer_config.learning_rate + else: + lr = LR_CLS[self._lr_type](**self._lr_config.as_dict()) + + if self._warmup_config: + lr = WARMUP_CLS[self._warmup_type](lr, **self._warmup_config.as_dict()) + + return lr + + def build_optimizer( + self, lr: Union[tf.keras.optimizers.schedules.LearningRateSchedule, + float]): + """Build optimizer. + + Builds optimizer from config. It takes learning rate as input, and builds + the optimizer according to the optimizer config. Typically, the learning + rate built using self.build_lr() is passed as an argument to this method. + + Args: + lr: A floating point value, or + a tf.keras.optimizers.schedules.LearningRateSchedule instance. + Returns: + tf.keras.optimizers.Optimizer instance. + """ + + optimizer_dict = self._optimizer_config.as_dict() + optimizer_dict['learning_rate'] = lr + + optimizer = OPTIMIZERS_CLS[self._optimizer_type](**optimizer_dict) + return optimizer + diff --git a/models/official/modeling/optimization/optimizer_factory_test.py b/models/official/modeling/optimization/optimizer_factory_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6da76fec93303813df4e44a0a5dddff4073db3fa --- /dev/null +++ b/models/official/modeling/optimization/optimizer_factory_test.py @@ -0,0 +1,249 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for optimizer_factory.py.""" + +from absl.testing import parameterized + +import tensorflow as tf + +from official.modeling.optimization import optimizer_factory +from official.modeling.optimization.configs import optimization_config + + +class OptimizerFactoryTest(tf.test.TestCase, parameterized.TestCase): + + @parameterized.parameters( + ('sgd'), + ('rmsprop'), + ('adam'), + ('adamw'), + ('lamb')) + def test_optimizers(self, optimizer_type): + params = { + 'optimizer': { + 'type': optimizer_type + } + } + optimizer_cls = optimizer_factory.OPTIMIZERS_CLS[optimizer_type] + expected_optimizer_config = optimizer_cls().get_config() + + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + optimizer = opt_factory.build_optimizer(lr) + + self.assertIsInstance(optimizer, optimizer_cls) + self.assertEqual(expected_optimizer_config, optimizer.get_config()) + + def test_stepwise_lr_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'stepwise', + 'stepwise': {'boundaries': [10000, 20000], + 'values': [0.1, 0.01, 0.001]} + } + } + expected_lr_step_values = [ + [0, 0.1], + [5000, 0.1], + [10000, 0.1], + [10001, 0.01], + [20000, 0.01], + [20001, 0.001] + ] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + + def test_stepwise_lr_with_warmup_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'stepwise', + 'stepwise': {'boundaries': [10000, 20000], + 'values': [0.1, 0.01, 0.001]} + }, + 'warmup': { + 'type': 'linear', + 'linear': {'warmup_steps': 500, 'warmup_learning_rate': 0.01} + } + } + expected_lr_step_values = [ + [0, 0.01], + [250, 0.055], + [500, 0.1], + [5500, 0.1], + [10000, 0.1], + [10001, 0.01], + [20000, 0.01], + [20001, 0.001] + ] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + + def test_exponential_lr_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'exponential', + 'exponential': { + 'initial_learning_rate': 0.1, + 'decay_steps': 1000, + 'decay_rate': 0.96, + 'staircase': True + } + } + } + expected_lr_step_values = [ + [0, 0.1], + [999, 0.1], + [1000, 0.096], + [1999, 0.096], + [2000, 0.09216], + ] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + + def test_polynomial_lr_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'polynomial', + 'polynomial': { + 'initial_learning_rate': 0.1, + 'decay_steps': 1000, + 'end_learning_rate': 0.001 + } + } + } + + expected_lr_step_values = [[0, 0.1], [500, 0.0505], [1000, 0.001]] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + + def test_cosine_lr_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'cosine', + 'cosine': { + 'initial_learning_rate': 0.1, + 'decay_steps': 1000 + } + } + } + expected_lr_step_values = [[0, 0.1], + [250, 0.08535534], + [500, 0.04999999], + [750, 0.01464466], + [1000, 0]] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + + def test_constant_lr_with_warmup_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'warmup': { + 'type': 'linear', + 'linear': { + 'warmup_steps': 500, + 'warmup_learning_rate': 0.01 + } + } + } + + expected_lr_step_values = [[0, 0.01], [250, 0.055], [500, 0.1], [5000, 0.1], + [10000, 0.1], [20000, 0.1]] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + + def test_stepwise_lr_with_polynomial_warmup_schedule(self): + params = { + 'optimizer': { + 'type': 'sgd', + 'sgd': {'learning_rate': 0.1, 'momentum': 0.9} + }, + 'learning_rate': { + 'type': 'stepwise', + 'stepwise': {'boundaries': [10000, 20000], + 'values': [0.1, 0.01, 0.001]} + }, + 'warmup': { + 'type': 'polynomial', + 'polynomial': {'warmup_steps': 500, 'power': 2.} + } + } + expected_lr_step_values = [ + [0, 0.0], + [250, 0.025], + [500, 0.1], + [5500, 0.1], + [10000, 0.1], + [10001, 0.01], + [20000, 0.01], + [20001, 0.001] + ] + opt_config = optimization_config.OptimizationConfig(params) + opt_factory = optimizer_factory.OptimizerFactory(opt_config) + lr = opt_factory.build_learning_rate() + + for step, value in expected_lr_step_values: + self.assertAlmostEqual(lr(step).numpy(), value) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/modeling/performance.py b/models/official/modeling/performance.py new file mode 100644 index 0000000000000000000000000000000000000000..4b264f53256db66326ee4e51c5a29676e273eca9 --- /dev/null +++ b/models/official/modeling/performance.py @@ -0,0 +1,56 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions and classes related to training performance.""" + +import tensorflow as tf + + +def configure_optimizer(optimizer, + use_float16=False, + use_graph_rewrite=False, + loss_scale="dynamic"): + """Configures optimizer object with performance options.""" + if use_float16: + # Wraps optimizer with a LossScaleOptimizer. This is done automatically + # in compile() with the "mixed_float16" policy, but since we do not call + # compile(), we must wrap the optimizer manually. + optimizer = ( + tf.keras.mixed_precision.experimental.LossScaleOptimizer( + optimizer, loss_scale=loss_scale)) + if use_graph_rewrite: + # Note: the model dtype must be 'float32', which will ensure + # tf.ckeras.mixed_precision and + # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double + # up. + optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite( + optimizer) + return optimizer + + +def set_mixed_precision_policy(dtype, loss_scale=None): + """Sets mix precision policy.""" + if dtype == tf.float16: + policy = tf.keras.mixed_precision.experimental.Policy( + 'mixed_float16', loss_scale=loss_scale) + tf.keras.mixed_precision.experimental.set_policy(policy) + elif dtype == tf.bfloat16: + policy = tf.keras.mixed_precision.experimental.Policy( + 'mixed_bfloat16') + tf.keras.mixed_precision.experimental.set_policy(policy) + elif dtype == tf.float32: + tf.keras.mixed_precision.experimental.set_policy('float32') + else: + raise ValueError("Unexpected dtype: %s" % dtype) diff --git a/models/official/modeling/tf_utils.py b/models/official/modeling/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..279208239349e143ed59d9c6d5dbc418d25fe0fa --- /dev/null +++ b/models/official/modeling/tf_utils.py @@ -0,0 +1,190 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common TF utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six +import tensorflow as tf + +from tensorflow.python.util import deprecation +from official.modeling import activations + + +@deprecation.deprecated( + None, + "tf.keras.layers.Layer supports multiple positional args and kwargs as " + "input tensors. pack/unpack inputs to override __call__ is no longer " + "needed." +) +def pack_inputs(inputs): + """Pack a list of `inputs` tensors to a tuple. + + Args: + inputs: a list of tensors. + + Returns: + a tuple of tensors. if any input is None, replace it with a special constant + tensor. + """ + inputs = tf.nest.flatten(inputs) + outputs = [] + for x in inputs: + if x is None: + outputs.append(tf.constant(0, shape=[], dtype=tf.int32)) + else: + outputs.append(x) + return tuple(outputs) + + +@deprecation.deprecated( + None, + "tf.keras.layers.Layer supports multiple positional args and kwargs as " + "input tensors. pack/unpack inputs to override __call__ is no longer " + "needed." +) +def unpack_inputs(inputs): + """unpack a tuple of `inputs` tensors to a tuple. + + Args: + inputs: a list of tensors. + + Returns: + a tuple of tensors. if any input is a special constant tensor, replace it + with None. + """ + inputs = tf.nest.flatten(inputs) + outputs = [] + for x in inputs: + if is_special_none_tensor(x): + outputs.append(None) + else: + outputs.append(x) + x = tuple(outputs) + + # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check + # from triggering. + if len(x) == 1: + return x[0] + return tuple(outputs) + + +def is_special_none_tensor(tensor): + """Checks if a tensor is a special None Tensor.""" + return tensor.shape.ndims == 0 and tensor.dtype == tf.int32 + + +# TODO(hongkuny): consider moving custom string-map lookup to keras api. +def get_activation(identifier): + """Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`. + + It checks string first and if it is one of customized activation not in TF, + the corresponding activation will be returned. For non-customized activation + names and callable identifiers, always fallback to tf.keras.activations.get. + + Args: + identifier: String name of the activation function or callable. + + Returns: + A Python function corresponding to the activation function. + """ + if isinstance(identifier, six.string_types): + name_to_fn = { + "gelu": activations.gelu, + "simple_swish": activations.simple_swish, + "hard_swish": activations.hard_swish, + "identity": activations.identity, + } + identifier = str(identifier).lower() + if identifier in name_to_fn: + return tf.keras.activations.get(name_to_fn[identifier]) + return tf.keras.activations.get(identifier) + + +def get_shape_list(tensor, expected_rank=None, name=None): + """Returns a list of the shape of tensor, preferring static dimensions. + + Args: + tensor: A tf.Tensor object to find the shape of. + expected_rank: (optional) int. The expected rank of `tensor`. If this is + specified and the `tensor` has a different rank, and exception will be + thrown. + name: Optional name of the tensor for the error message. + + Returns: + A list of dimensions of the shape of tensor. All static dimensions will + be returned as python integers, and dynamic dimensions will be returned + as tf.Tensor scalars. + """ + if expected_rank is not None: + assert_rank(tensor, expected_rank, name) + + shape = tensor.shape.as_list() + + non_static_indexes = [] + for (index, dim) in enumerate(shape): + if dim is None: + non_static_indexes.append(index) + + if not non_static_indexes: + return shape + + dyn_shape = tf.shape(tensor) + for index in non_static_indexes: + shape[index] = dyn_shape[index] + return shape + + +def assert_rank(tensor, expected_rank, name=None): + """Raises an exception if the tensor rank is not of the expected rank. + + Args: + tensor: A tf.Tensor to check the rank of. + expected_rank: Python integer or list of integers, expected rank. + name: Optional name of the tensor for the error message. + + Raises: + ValueError: If the expected shape doesn't match the actual shape. + """ + expected_rank_dict = {} + if isinstance(expected_rank, six.integer_types): + expected_rank_dict[expected_rank] = True + else: + for x in expected_rank: + expected_rank_dict[x] = True + + actual_rank = tensor.shape.ndims + if actual_rank not in expected_rank_dict: + raise ValueError( + "For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not " + "equal to the expected tensor rank `%s`" % + (name, actual_rank, str(tensor.shape), str(expected_rank))) + + +def safe_mean(losses): + """Computes a safe mean of the losses. + + Args: + losses: `Tensor` whose elements contain individual loss measurements. + + Returns: + A scalar representing the mean of `losses`. If `num_present` is zero, + then zero is returned. + """ + total = tf.reduce_sum(losses) + num_elements = tf.cast(tf.size(losses), dtype=losses.dtype) + return tf.math.divide_no_nan(total, num_elements) diff --git a/models/official/modeling/training/__init__.py b/models/official/modeling/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/modeling/training/distributed_executor.py b/models/official/modeling/training/distributed_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..11451260cdca52a9c9f4019010123c4d2b40e99e --- /dev/null +++ b/models/official/modeling/training/distributed_executor.py @@ -0,0 +1,815 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Custom training loop for running TensorFlow 2.0 models.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os + +from absl import flags +from absl import logging + +import numpy as np +import tensorflow as tf + +# pylint: disable=unused-import,g-import-not-at-top,redefined-outer-name,reimported +from typing import Optional, Dict, List, Text, Callable, Union, Iterator, Any +from official.modeling.hyperparams import params_dict +from official.utils import hyperparams_flags +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + +FLAGS = flags.FLAGS + +strategy_flags_dict = hyperparams_flags.strategy_flags_dict +hparam_flags_dict = hyperparams_flags.hparam_flags_dict + + +def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix): + """Saves model to model_dir with provided checkpoint prefix.""" + + checkpoint_path = os.path.join(model_dir, checkpoint_prefix) + saved_path = checkpoint.save(checkpoint_path) + logging.info('Saving model as TF checkpoint: %s', saved_path) + + +def _steps_to_run(current_step, total_steps, steps_per_loop): + """Calculates steps to run on device.""" + if steps_per_loop <= 0: + raise ValueError('steps_per_loop should be positive integer.') + return min(total_steps - current_step, steps_per_loop) + + +def _no_metric(): + return None + + +def metrics_as_dict(metric): + """Puts input metric(s) into a list. + + Args: + metric: metric(s) to be put into the list. `metric` could be a object, a + list or a dict of tf.keras.metrics.Metric or has the `required_method`. + + Returns: + A dictionary of valid metrics. + """ + if isinstance(metric, tf.keras.metrics.Metric): + metrics = {metric.name: metric} + elif isinstance(metric, list): + metrics = {m.name: m for m in metric} + elif isinstance(metric, dict): + metrics = metric + elif not metric: + return {} + else: + metrics = {'metric': metric} + return metrics + + +def metric_results(metric): + """Collects results from the given metric(s).""" + metrics = metrics_as_dict(metric) + metric_result = { + name: m.result().numpy().astype(float) for name, m in metrics.items() + } + return metric_result + + +def reset_states(metric): + """Resets states of the given metric(s).""" + metrics = metrics_as_dict(metric) + for m in metrics.values(): + m.reset_states() + + +class SummaryWriter(object): + """Simple SummaryWriter for writing dictionary of metrics. + + Attributes: + writer: The tf.SummaryWriter. + """ + + def __init__(self, model_dir: Text, name: Text): + """Inits SummaryWriter with paths. + + Arguments: + model_dir: the model folder path. + name: the summary subfolder name. + """ + self.writer = tf.summary.create_file_writer(os.path.join(model_dir, name)) + + def __call__(self, metrics: Union[Dict[Text, float], float], step: int): + """Write metrics to summary with the given writer. + + Args: + metrics: a dictionary of metrics values. Prefer dictionary. + step: integer. The training step. + """ + if not isinstance(metrics, dict): + # Support scalar metric without name. + logging.warning('Warning: summary writer prefer metrics as dictionary.') + metrics = {'metric': metrics} + + with self.writer.as_default(): + for k, v in metrics.items(): + tf.summary.scalar(k, v, step=step) + self.writer.flush() + + +class DistributedExecutor(object): + """Interface to train and eval models with tf.distribute.Strategy. + """ + + def __init__(self, + strategy, + params, + model_fn, + loss_fn, + is_multi_host=False): + """Constructor. + + Args: + strategy: an instance of tf.distribute.Strategy. + params: Model configuration needed to run distribution strategy. + model_fn: Keras model function. Signature: + (params: ParamsDict) -> tf.keras.models.Model. + loss_fn: loss function. Signature: + (y_true: Tensor, y_pred: Tensor) -> Tensor + is_multi_host: Set to True when using multi hosts for training, like multi + worker GPU or TPU pod (slice). Otherwise, False. + """ + + self._params = params + self._model_fn = model_fn + self._loss_fn = loss_fn + self._strategy = strategy + self._checkpoint_name = 'ctl_step_{step}.ckpt' + self._is_multi_host = is_multi_host + self.train_summary_writer = None + self.eval_summary_writer = None + self.global_train_step = None + + @property + def checkpoint_name(self): + """Returns default checkpoint name.""" + return self._checkpoint_name + + @checkpoint_name.setter + def checkpoint_name(self, name): + """Sets default summary writer for the current thread.""" + self._checkpoint_name = name + + def loss_fn(self): + return self._loss_fn() + + def model_fn(self, params): + return self._model_fn(params) + + def _save_config(self, model_dir): + """Save parameters to config files if model_dir is defined.""" + + logging.info('Save config to model_dir %s.', model_dir) + if model_dir: + if not tf.io.gfile.exists(model_dir): + tf.io.gfile.makedirs(model_dir) + self._params.lock() + params_dict.save_params_dict_to_yaml(self._params, + model_dir + '/params.yaml') + else: + logging.warning('model_dir is empty, so skip the save config.') + + def _get_input_iterator( + self, input_fn: Callable[..., tf.data.Dataset], + strategy: tf.distribute.Strategy) -> Optional[Iterator[Any]]: + """Returns distributed dataset iterator. + + Args: + input_fn: (params: dict) -> tf.data.Dataset. + strategy: an instance of tf.distribute.Strategy. + + Returns: + An iterator that yields input tensors. + """ + + if input_fn is None: + return None + # When training with multiple TPU workers, datasets needs to be cloned + # across workers. Since Dataset instance cannot be cloned in eager mode, + # we instead pass callable that returns a dataset. + if self._is_multi_host: + return iter( + strategy.experimental_distribute_datasets_from_function(input_fn)) + else: + input_data = input_fn() + return iter(strategy.experimental_distribute_dataset(input_data)) + + def _create_replicated_step(self, + strategy, + model, + loss_fn, + optimizer, + metric=None): + """Creates a single training step. + + Args: + strategy: an instance of tf.distribute.Strategy. + model: (Tensor, bool) -> Tensor. model function. + loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor. + optimizer: tf.keras.optimizers.Optimizer. + metric: tf.keras.metrics.Metric subclass. + + Returns: + The training step callable. + """ + metrics = metrics_as_dict(metric) + + def _replicated_step(inputs): + """Replicated training step.""" + inputs, labels = inputs + + with tf.GradientTape() as tape: + outputs = model(inputs, training=True) + prediction_loss = loss_fn(labels, outputs) + loss = tf.reduce_mean(prediction_loss) + loss = loss / strategy.num_replicas_in_sync + for m in metrics.values(): + m.update_state(labels, outputs) + + grads = tape.gradient(loss, model.trainable_variables) + optimizer.apply_gradients(zip(grads, model.trainable_variables)) + return loss + + return _replicated_step + + def _create_train_step(self, + strategy, + model, + loss_fn, + optimizer, + metric=None): + """Creates a distributed training step. + + Args: + strategy: an instance of tf.distribute.Strategy. + model: (Tensor, bool) -> Tensor. model function. + loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor. + optimizer: tf.keras.optimizers.Optimizer. + metric: tf.keras.metrics.Metric subclass. + + Returns: + The training step callable. + """ + replicated_step = self._create_replicated_step(strategy, model, loss_fn, + optimizer, metric) + + @tf.function + def train_step(iterator, num_steps): + """Performs a distributed training step. + + Args: + iterator: an iterator that yields input tensors. + num_steps: the number of steps in the loop. + + Returns: + The loss tensor. + """ + if not isinstance(num_steps, tf.Tensor): + raise ValueError('steps should be an Tensor. Python object may cause ' + 'retracing.') + + per_replica_losses = strategy.run( + replicated_step, args=(next(iterator),)) + for _ in tf.range(num_steps - 1): + per_replica_losses = strategy.run( + replicated_step, args=(next(iterator),)) + + # For reporting, we returns the mean of losses. + losses = tf.nest.map_structure( + lambda x: strategy.reduce(tf.distribute.ReduceOp.MEAN, x, axis=None), + per_replica_losses) + return losses + + return train_step + + def _create_test_step(self, strategy, model, metric): + """Creates a distributed test step.""" + metrics = metrics_as_dict(metric) + + @tf.function + def test_step(iterator): + """Calculates evaluation metrics on distributed devices.""" + if not metric: + logging.info('Skip test_step because metric is None (%s)', metric) + return None, None + + def _test_step_fn(inputs): + """Replicated accuracy calculation.""" + inputs, labels = inputs + model_outputs = model(inputs, training=False) + for m in metrics.values(): + m.update_state(labels, model_outputs) + return labels, model_outputs + + return strategy.run(_test_step_fn, args=(next(iterator),)) + + return test_step + + def train(self, + train_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], + eval_input_fn: Callable[[params_dict.ParamsDict], + tf.data.Dataset] = None, + model_dir: Text = None, + total_steps: int = 1, + iterations_per_loop: int = 1, + train_metric_fn: Callable[[], Any] = None, + eval_metric_fn: Callable[[], Any] = None, + summary_writer_fn: Callable[[Text, Text], + SummaryWriter] = SummaryWriter, + init_checkpoint: Callable[[tf.keras.Model], Any] = None, + custom_callbacks: List[tf.keras.callbacks.Callback] = None, + continuous_eval: bool = False, + save_config: bool = True): + """Runs distributed training. + + Args: + train_input_fn: (params: dict) -> tf.data.Dataset training data input + function. + eval_input_fn: (Optional) same type as train_input_fn. If not None, will + trigger evaluting metric on eval data. If None, will not run eval step. + model_dir: the folder path for model checkpoints. + total_steps: total training steps. + iterations_per_loop: train steps per loop. After each loop, this job will + update metrics like loss and save checkpoint. + train_metric_fn: metric_fn for evaluation in train_step. + eval_metric_fn: metric_fn for evaluation in test_step. + summary_writer_fn: function to create summary writer. + init_checkpoint: function to load checkpoint. + custom_callbacks: A list of Keras Callbacks objects to run during + training. More specifically, `on_batch_begin()`, `on_batch_end()`, + methods are invoked during training. + continuous_eval: If `True`, will continously run evaluation on every + available checkpoints. If `False`, will do the evaluation once after the + final step. + save_config: bool. Whether to save params to model_dir. + Returns: + The training loss and eval metrics. + """ + assert train_input_fn is not None + if train_metric_fn and not callable(train_metric_fn): + raise ValueError('if `train_metric_fn` is specified, ' + 'train_metric_fn must be a callable.') + if eval_metric_fn and not callable(eval_metric_fn): + raise ValueError('if `eval_metric_fn` is specified, ' + 'eval_metric_fn must be a callable.') + train_metric_fn = train_metric_fn or _no_metric + eval_metric_fn = eval_metric_fn or _no_metric + + if custom_callbacks and iterations_per_loop != 1: + logging.warning( + 'It is sematically wrong to run callbacks when ' + 'iterations_per_loop is not one (%s)', iterations_per_loop) + + custom_callbacks = custom_callbacks or [] + + def _run_callbacks_on_batch_begin(batch): + """Runs custom callbacks at the start of every step.""" + if not custom_callbacks: + return + for callback in custom_callbacks: + if callback: + callback.on_batch_begin(batch) + + def _run_callbacks_on_batch_end(batch): + """Runs custom callbacks at the end of every step.""" + if not custom_callbacks: + return + for callback in custom_callbacks: + if callback: + callback.on_batch_end(batch) + + if save_config: + self._save_config(model_dir) + + if FLAGS.save_checkpoint_freq: + save_freq = FLAGS.save_checkpoint_freq + else: + save_freq = iterations_per_loop + + params = self._params + strategy = self._strategy + # To reduce unnecessary send/receive input pipeline operation, we place + # input pipeline ops in worker task. + train_iterator = self._get_input_iterator(train_input_fn, strategy) + train_loss = None + train_metric_result = None + eval_metric_result = None + tf.keras.backend.set_learning_phase(1) + with strategy.scope(): + # To correctly place the model weights on accelerators, + # model and optimizer should be created in scope. + model = self.model_fn(params.as_dict()) + if not hasattr(model, 'optimizer'): + raise ValueError('User should set optimizer attribute to model ' + 'inside `model_fn`.') + optimizer = model.optimizer + + # Training loop starts here. + checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) + latest_checkpoint_file = tf.train.latest_checkpoint(model_dir) + initial_step = 0 + if latest_checkpoint_file: + logging.info( + 'Checkpoint file %s found and restoring from ' + 'checkpoint', latest_checkpoint_file) + checkpoint.restore(latest_checkpoint_file) + initial_step = optimizer.iterations.numpy() + logging.info('Loading from checkpoint file completed. Init step %d', + initial_step) + elif init_checkpoint: + logging.info('Restoring from init checkpoint function') + init_checkpoint(model) + logging.info('Loading from init checkpoint file completed') + + current_step = optimizer.iterations.numpy() + checkpoint_name = self.checkpoint_name + + eval_metric = eval_metric_fn() + train_metric = train_metric_fn() + train_summary_writer = summary_writer_fn(model_dir, 'eval_train') + self.train_summary_writer = train_summary_writer.writer + + test_summary_writer = summary_writer_fn(model_dir, 'eval_test') + self.eval_summary_writer = test_summary_writer.writer + + # Use training summary writer in TimeHistory if it's in use + for cb in custom_callbacks: + if isinstance(cb, keras_utils.TimeHistory): + cb.summary_writer = self.train_summary_writer + + # Continue training loop. + train_step = self._create_train_step( + strategy=strategy, + model=model, + loss_fn=self.loss_fn(), + optimizer=optimizer, + metric=train_metric) + test_step = None + if eval_input_fn and eval_metric: + self.global_train_step = model.optimizer.iterations + test_step = self._create_test_step(strategy, model, metric=eval_metric) + + # Step-0 operations + if current_step == 0 and not latest_checkpoint_file: + _save_checkpoint( + checkpoint, model_dir, checkpoint_name.format(step=current_step)) + if test_step: + eval_iterator = self._get_input_iterator(eval_input_fn, strategy) + eval_metric_result = self._run_evaluation( + test_step, current_step, eval_metric, eval_iterator) + logging.info( + 'Step: %s evalation metric = %s.', current_step, eval_metric_result) + test_summary_writer( + metrics=eval_metric_result, step=optimizer.iterations) + reset_states(eval_metric) + + logging.info('Training started') + last_save_checkpoint_step = current_step + while current_step < total_steps: + + num_steps = _steps_to_run(current_step, total_steps, iterations_per_loop) + _run_callbacks_on_batch_begin(current_step) + train_loss = train_step(train_iterator, + tf.convert_to_tensor(num_steps, dtype=tf.int32)) + current_step += num_steps + + train_loss = tf.nest.map_structure(lambda x: x.numpy().astype(float), + train_loss) + + _run_callbacks_on_batch_end(current_step - 1) + if not isinstance(train_loss, dict): + train_loss = {'total_loss': train_loss} + if np.isnan(train_loss['total_loss']): + raise ValueError('total loss is NaN.') + + if train_metric: + train_metric_result = metric_results(train_metric) + train_metric_result.update(train_loss) + else: + train_metric_result = train_loss + if callable(optimizer.lr): + train_metric_result.update( + {'learning_rate': optimizer.lr(current_step).numpy()}) + else: + train_metric_result.update({'learning_rate': optimizer.lr.numpy()}) + logging.info('Train Step: %d/%d / loss = %s / training metric = %s', + current_step, total_steps, train_loss, + train_metric_result) + + train_summary_writer( + metrics=train_metric_result, step=optimizer.iterations) + + # Saves model checkpoints and run validation steps at every + # iterations_per_loop steps. + # To avoid repeated model saving, we do not save after the last + # step of training. + if save_freq > 0 and current_step < total_steps and ( + current_step - last_save_checkpoint_step) >= save_freq: + _save_checkpoint(checkpoint, model_dir, + checkpoint_name.format(step=current_step)) + last_save_checkpoint_step = current_step + + if continuous_eval and current_step < total_steps and test_step: + eval_iterator = self._get_input_iterator(eval_input_fn, strategy) + eval_metric_result = self._run_evaluation(test_step, current_step, + eval_metric, eval_iterator) + logging.info('Step: %s evalation metric = %s.', current_step, + eval_metric_result) + test_summary_writer( + metrics=eval_metric_result, step=optimizer.iterations) + + # Re-initialize evaluation metric, except the last step. + if eval_metric and current_step < total_steps: + reset_states(eval_metric) + if train_metric and current_step < total_steps: + reset_states(train_metric) + + # Reaches the end of training and saves the last checkpoint. + if last_save_checkpoint_step < total_steps: + _save_checkpoint(checkpoint, model_dir, + checkpoint_name.format(step=current_step)) + + if test_step: + logging.info('Running final evaluation after training is complete.') + eval_iterator = self._get_input_iterator(eval_input_fn, strategy) + eval_metric_result = self._run_evaluation(test_step, current_step, + eval_metric, eval_iterator) + logging.info('Final evaluation metric = %s.', eval_metric_result) + test_summary_writer( + metrics=eval_metric_result, step=optimizer.iterations) + + self.train_summary_writer.close() + self.eval_summary_writer.close() + + return train_metric_result, eval_metric_result + + def _run_evaluation(self, test_step, current_training_step, metric, + test_iterator): + """Runs validation steps and aggregate metrics.""" + if not test_iterator or not metric: + logging.warning( + 'Both test_iterator (%s) and metrics (%s) must not be None.', + test_iterator, metric) + return None + logging.info('Running evaluation after step: %s.', current_training_step) + eval_step = 0 + while True: + try: + with tf.experimental.async_scope(): + test_step(test_iterator) + eval_step += 1 + except (StopIteration, tf.errors.OutOfRangeError): + tf.experimental.async_clear_error() + break + + metric_result = metric_results(metric) + logging.info('Total eval steps: [%d]', eval_step) + logging.info('At training step: [%r] Validation metric = %r', + current_training_step, metric_result) + return metric_result + + def evaluate_from_model_dir( + self, + model_dir: Text, + eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], + eval_metric_fn: Callable[[], Any], + total_steps: int = -1, + eval_timeout: int = None, + min_eval_interval: int = 180, + summary_writer_fn: Callable[[Text, Text], SummaryWriter] = SummaryWriter): + """Runs distributed evaluation on model folder. + + Args: + model_dir: the folder for storing model checkpoints. + eval_input_fn: (Optional) same type as train_input_fn. If not None, will + trigger evaluting metric on eval data. If None, will not run eval step. + eval_metric_fn: metric_fn for evaluation in test_step. + total_steps: total training steps. If the current step reaches the + total_steps, the evaluation loop will stop. + eval_timeout: The maximum number of seconds to wait between checkpoints. + If left as None, then the process will wait indefinitely. Used by + tf.train.checkpoints_iterator. + min_eval_interval: The minimum number of seconds between yielding + checkpoints. Used by tf.train.checkpoints_iterator. + summary_writer_fn: function to create summary writer. + + Returns: + Eval metrics dictionary of the last checkpoint. + """ + + if not model_dir: + raise ValueError('model_dir must be set.') + + def terminate_eval(): + tf.logging.info('Terminating eval after %d seconds of no checkpoints' % + eval_timeout) + return True + + summary_writer = summary_writer_fn(model_dir, 'eval') + self.eval_summary_writer = summary_writer.writer + + # Read checkpoints from the given model directory + # until `eval_timeout` seconds elapses. + for checkpoint_path in tf.train.checkpoints_iterator( + model_dir, + min_interval_secs=min_eval_interval, + timeout=eval_timeout, + timeout_fn=terminate_eval): + eval_metric_result, current_step = self.evaluate_checkpoint( + checkpoint_path=checkpoint_path, + eval_input_fn=eval_input_fn, + eval_metric_fn=eval_metric_fn, + summary_writer=summary_writer) + if total_steps > 0 and current_step >= total_steps: + logging.info('Evaluation finished after training step %d', current_step) + break + return eval_metric_result + + def evaluate_checkpoint(self, + checkpoint_path: Text, + eval_input_fn: Callable[[params_dict.ParamsDict], + tf.data.Dataset], + eval_metric_fn: Callable[[], Any], + summary_writer: SummaryWriter = None): + """Runs distributed evaluation on the one checkpoint. + + Args: + checkpoint_path: the checkpoint to evaluate. + eval_input_fn: (Optional) same type as train_input_fn. If not None, will + trigger evaluting metric on eval data. If None, will not run eval step. + eval_metric_fn: metric_fn for evaluation in test_step. + summary_writer: function to create summary writer. + + Returns: + Eval metrics dictionary of the last checkpoint. + """ + if not callable(eval_metric_fn): + raise ValueError('if `eval_metric_fn` is specified, ' + 'eval_metric_fn must be a callable.') + + old_phrase = tf.keras.backend.learning_phase() + tf.keras.backend.set_learning_phase(0) + params = self._params + strategy = self._strategy + # To reduce unnecessary send/receive input pipeline operation, we place + # input pipeline ops in worker task. + with strategy.scope(): + + # To correctly place the model weights on accelerators, + # model and optimizer should be created in scope. + model = self.model_fn(params.as_dict()) + checkpoint = tf.train.Checkpoint(model=model) + + eval_metric = eval_metric_fn() + assert eval_metric, 'eval_metric does not exist' + test_step = self._create_test_step(strategy, model, metric=eval_metric) + + logging.info('Starting to evaluate.') + if not checkpoint_path: + raise ValueError('checkpoint path is empty') + reader = tf.compat.v1.train.NewCheckpointReader(checkpoint_path) + current_step = reader.get_tensor( + 'optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE') + logging.info( + 'Checkpoint file %s found and restoring from ' + 'checkpoint', checkpoint_path) + checkpoint.restore(checkpoint_path) + + self.global_train_step = model.optimizer.iterations + eval_iterator = self._get_input_iterator(eval_input_fn, strategy) + eval_metric_result = self._run_evaluation(test_step, current_step, + eval_metric, eval_iterator) + logging.info('Step: %s evalation metric = %s.', current_step, + eval_metric_result) + summary_writer(metrics=eval_metric_result, step=current_step) + reset_states(eval_metric) + + tf.keras.backend.set_learning_phase(old_phrase) + return eval_metric_result, current_step + + def predict(self): + return NotImplementedError('Unimplmented function.') + + +class ExecutorBuilder(object): + """Builder of DistributedExecutor. + + Example 1: Builds an executor with supported Strategy. + builder = ExecutorBuilder( + strategy_type='tpu', + strategy_config={'tpu': '/bns/xxx'}) + dist_executor = builder.build_executor( + params=params, + model_fn=my_model_fn, + loss_fn=my_loss_fn, + metric_fn=my_metric_fn) + + Example 2: Builds an executor with customized Strategy. + builder = ExecutorBuilder() + builder.strategy = + dist_executor = builder.build_executor( + params=params, + model_fn=my_model_fn, + loss_fn=my_loss_fn, + metric_fn=my_metric_fn) + + Example 3: Builds a customized executor with customized Strategy. + class MyDistributedExecutor(DistributedExecutor): + # implementation ... + + builder = ExecutorBuilder() + builder.strategy = + dist_executor = builder.build_executor( + class_ctor=MyDistributedExecutor, + params=params, + model_fn=my_model_fn, + loss_fn=my_loss_fn, + metric_fn=my_metric_fn) + """ + + def __init__(self, strategy_type=None, strategy_config=None): + _ = distribution_utils.configure_cluster( + strategy_config.worker_hosts, strategy_config.task_index) + """Constructor. + + Args: + strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'. + If None. User is responsible to set the strategy before calling + build_executor(...). + strategy_config: necessary config for constructing the proper Strategy. + Check strategy_flags_dict() for examples of the structure. + """ + self._strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=strategy_type, + num_gpus=strategy_config.num_gpus, + all_reduce_alg=strategy_config.all_reduce_alg, + num_packs=strategy_config.num_packs, + tpu_address=strategy_config.tpu) + + @property + def strategy(self): + """Returns default checkpoint name.""" + return self._strategy + + @strategy.setter + def strategy(self, new_strategy): + """Sets default summary writer for the current thread.""" + self._strategy = new_strategy + + def build_executor(self, + class_ctor=DistributedExecutor, + params=None, + model_fn=None, + loss_fn=None, + **kwargs): + """Creates an executor according to strategy type. + + See doc string of the DistributedExecutor.__init__ for more information of + the + input arguments. + + Args: + class_ctor: A constructor of executor (default: DistributedExecutor). + params: ParamsDict, all the model parameters and runtime parameters. + model_fn: Keras model function. + loss_fn: loss function. + **kwargs: other arguments to the executor constructor. + + Returns: + An instance of DistributedExecutor or its subclass. + """ + if self._strategy is None: + raise ValueError('`strategy` should not be None. You need to specify ' + '`strategy_type` in the builder contructor or directly ' + 'set the `strategy` property of the builder.') + return class_ctor( + strategy=self._strategy, + params=params, + model_fn=model_fn, + loss_fn=loss_fn, + **kwargs) diff --git a/models/official/nlp/README.md b/models/official/nlp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..156f5c42858be92f20ed6bc157ddd8593cbc4329 --- /dev/null +++ b/models/official/nlp/README.md @@ -0,0 +1,37 @@ +# TensorFlow NLP Modelling Toolkit + +This codebase provides a Natrual Language Processing modeling toolkit written in +[TF2](https://www.tensorflow.org/guide/effective_tf2). It allows researchers and +developers to reproduce state-of-the-art model results and train custom models +to experiment new research ideas. + +## Features + +* Reusable and modularized modeling building blocks +* State-of-the-art reproducible +* Easy to customize and extend +* End-to-end training +* Distributed trainable on both GPUs and TPUs + +## Major components + +### Libraries + +We provide modeling library to allow users to train custom models for new +research ideas. Detailed intructions can be found in READMEs in each folder. + +* [modeling/](modeling): modeling library that provides building blocks (e.g., Layers, Networks, and Models) that can be assembled into transformer-based achitectures . +* [data/](data): binaries and utils for input preprocessing, tokenization, etc. + +### State-of-the-Art models and examples + +We provide SoTA model implementations, pre-trained models, training and +evaluation examples, and command lines. Detail instructions can be found in the +READMEs for specific papers. + +1. [BERT](bert): [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Devlin et al., 2018 +2. [ALBERT](albert): [A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942) by Lan et al., 2019 +3. [XLNet](xlnet): [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Yang et al., 2019 +4. [Transformer for translation](transformer): [Attention Is All You Need](https://arxiv.org/abs/1706.03762) by Vaswani et al., 2017 +5. [NHNet](nhnet): [Generating Representative Headlines for News Stories](https://arxiv.org/abs/2001.09386) by Gu et al, 2020 + diff --git a/models/official/nlp/__init__.py b/models/official/nlp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/__pycache__/__init__.cpython-310.pyc b/models/official/nlp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12138212531ff436903dd5fa8e73277b898b610c Binary files /dev/null and b/models/official/nlp/__pycache__/__init__.cpython-310.pyc differ diff --git a/models/official/nlp/__pycache__/__init__.cpython-38.pyc b/models/official/nlp/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..189ad70a8ac6b38cec6a6639e0790bdecf6b6ec5 Binary files /dev/null and b/models/official/nlp/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/official/nlp/__pycache__/__init__.cpython-39.pyc b/models/official/nlp/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cea2816038525fbca46e8d0575126c9fa59b6a2 Binary files /dev/null and b/models/official/nlp/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/official/nlp/__pycache__/optimization.cpython-38.pyc b/models/official/nlp/__pycache__/optimization.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..737c69c61222d76eb2a4c8aa7b070f8f89a4df69 Binary files /dev/null and b/models/official/nlp/__pycache__/optimization.cpython-38.pyc differ diff --git a/models/official/nlp/__pycache__/optimization.cpython-39.pyc b/models/official/nlp/__pycache__/optimization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8ca526bdcc669126e2467acd6ee6f3cfd417674 Binary files /dev/null and b/models/official/nlp/__pycache__/optimization.cpython-39.pyc differ diff --git a/models/official/nlp/albert/README.md b/models/official/nlp/albert/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cfb726c90ef9a638d5fd0485e341c232a86bdac2 --- /dev/null +++ b/models/official/nlp/albert/README.md @@ -0,0 +1,332 @@ +# ALBERT (ALBERT: A Lite BERT for Self-supervised Learning of Language Representations) + +The academic paper which describes ALBERT in detail and provides full results on +a number of tasks can be found here: https://arxiv.org/abs/1909.11942. + +This repository contains TensorFlow 2.x implementation for ALBERT. + +## Contents + * [Contents](#contents) + * [Pre-trained Models](#pre-trained-models) + * [Restoring from Checkpoints](#restoring-from-checkpoints) + * [Set Up](#set-up) + * [Process Datasets](#process-datasets) + * [Fine-tuning with BERT](#fine-tuning-with-bert) + * [Cloud GPUs and TPUs](#cloud-gpus-and-tpus) + * [Sentence and Sentence-pair Classification Tasks](#sentence-and-sentence-pair-classification-tasks) + * [SQuAD 1.1](#squad-1.1) + + +## Pre-trained Models + +We released both checkpoints and tf.hub modules as the pretrained models for +fine-tuning. They are TF 2.x compatible and are converted from the ALBERT v2 +checkpoints released in TF 1.x official ALBERT repository +[google-research/albert](https://github.com/google-research/albert) +in order to keep consistent with ALBERT paper. + +Our current released checkpoints are exactly the same as TF 1.x official ALBERT +repository. + +### Access to Pretrained Checkpoints + +Pretrained checkpoints can be found in the following links: + +**Note: We implemented ALBERT using Keras functional-style networks in [nlp/modeling](../modeling). +ALBERT V2 models compatible with TF 2.x checkpoints are:** + +* **[`ALBERT V2 Base`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base.tar.gz)**: + 12-layer, 768-hidden, 12-heads, 12M parameters +* **[`ALBERT V2 Large`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_large.tar.gz)**: + 24-layer, 1024-hidden, 16-heads, 18M parameters +* **[`ALBERT V2 XLarge`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_xlarge.tar.gz)**: + 24-layer, 2048-hidden, 32-heads, 60M parameters +* **[`ALBERT V2 XXLarge`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_xxlarge.tar.gz)**: + 12-layer, 4096-hidden, 64-heads, 235M parameters + +We recommend to host checkpoints on Google Cloud storage buckets when you use +Cloud GPU/TPU. + +### Restoring from Checkpoints + +`tf.train.Checkpoint` is used to manage model checkpoints in TF 2. To restore +weights from provided pre-trained checkpoints, you can use the following code: + +```python +init_checkpoint='the pretrained model checkpoint path.' +model=tf.keras.Model() # Bert pre-trained model as feature extractor. +checkpoint = tf.train.Checkpoint(model=model) +checkpoint.restore(init_checkpoint) +``` + +Checkpoints featuring native serialized Keras models +(i.e. model.load()/load_weights()) will be available soon. + +### Access to Pretrained hub modules. + +Pretrained tf.hub modules in TF 2.x SavedModel format can be found in the +following links: + +* **[`ALBERT V2 Base`](https://tfhub.dev/tensorflow/albert_en_base/1)**: + 12-layer, 768-hidden, 12-heads, 12M parameters +* **[`ALBERT V2 Large`](https://tfhub.dev/tensorflow/albert_en_large/1)**: + 24-layer, 1024-hidden, 16-heads, 18M parameters +* **[`ALBERT V2 XLarge`](https://tfhub.dev/tensorflow/albert_en_xlarge/1)**: + 24-layer, 2048-hidden, 32-heads, 60M parameters +* **[`ALBERT V2 XXLarge`](https://tfhub.dev/tensorflow/albert_en_xxlarge/1)**: + 12-layer, 4096-hidden, 64-heads, 235M parameters + +## Set Up + +```shell +export PYTHONPATH="$PYTHONPATH:/path/to/models" +``` + +Install `tf-nightly` to get latest updates: + +```shell +pip install tf-nightly-gpu +``` + +With TPU, GPU support is not necessary. First, you need to create a `tf-nightly` +TPU with [ctpu tool](https://github.com/tensorflow/tpu/tree/master/tools/ctpu): + +```shell +ctpu up -name --tf-version=”nightly” +``` + +Second, you need to install TF 2 `tf-nightly` on your VM: + +```shell +pip install tf-nightly +``` + +Warning: More details TPU-specific set-up instructions and tutorial should come +along with official TF 2.x release for TPU. Note that this repo is not +officially supported by Google Cloud TPU team yet until TF 2.1 released. + +## Process Datasets + +### Pre-training + +Pre-train ALBERT using TF2.x will come soon. +For now, please use [ALBERT research repo](https://github.com/google-research/ALBERT) +to pretrain the model and convert the checkpoint to TF2.x compatible ones using +[tf2_albert_encoder_checkpoint_converter.py](tf2_albert_encoder_checkpoint_converter.py). + + + +### Fine-tuning + +To prepare the fine-tuning data for final model training, use the +[`../data/create_finetuning_data.py`](../data/create_finetuning_data.py) script. +Note that different from BERT models that use word piece tokenzer, +ALBERT models employ sentence piece tokenizer. So the FLAG tokenizer_impl has +to be set to 'sentence_piece'. +Resulting datasets in `tf_record` format and training meta data should be later +passed to training or evaluation scripts. The task-specific arguments are +described in following sections: + +* GLUE + +Users can download the +[GLUE data](https://gluebenchmark.com/tasks) by running +[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e) +and unpack it to some directory `$GLUE_DIR`. + +```shell +export GLUE_DIR=~/glue +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base + +export TASK_NAME=MNLI +export OUTPUT_DIR=gs://some_bucket/datasets +python ../data/create_finetuning_data.py \ + --input_data_dir=${GLUE_DIR}/${TASK_NAME}/ \ + --sp_model_file=${ALBERT_DIR}/30k-clean.model \ + --train_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_train.tf_record \ + --eval_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_eval.tf_record \ + --meta_data_file_path=${OUTPUT_DIR}/${TASK_NAME}_meta_data \ + --fine_tuning_task_type=classification --max_seq_length=128 \ + --classification_task_name=${TASK_NAME} \ + --tokenizer_impl=sentence_piece +``` + +* SQUAD + +The [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/) contains +detailed information about the SQuAD datasets and evaluation. + +The necessary files can be found here: + +* [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json) +* [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json) +* [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py) +* [train-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json) +* [dev-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json) +* [evaluate-v2.0.py](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/) + +```shell +export SQUAD_DIR=~/squad +export SQUAD_VERSION=v1.1 +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base +export OUTPUT_DIR=gs://some_bucket/datasets + +python ../data/create_finetuning_data.py \ + --squad_data_file=${SQUAD_DIR}/train-${SQUAD_VERSION}.json \ + --sp_model_file=${ALBERT_DIR}/30k-clean.model \ + --train_data_output_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_train.tf_record \ + --meta_data_file_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_meta_data \ + --fine_tuning_task_type=squad --max_seq_length=384 \ + --tokenizer_impl=sentence_piece +``` + +## Fine-tuning with ALBERT + +### Cloud GPUs and TPUs + +* Cloud Storage + +The unzipped pre-trained model files can also be found in the Google Cloud +Storage folder `gs://cloud-tpu-checkpoints/albert/checkpoints`. For example: + +```shell +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base +export MODEL_DIR=gs://some_bucket/my_output_dir +``` + +Currently, users are able to access to `tf-nightly` TPUs and the following TPU +script should run with `tf-nightly`. + +* GPU -> TPU + +Just add the following flags to `run_classifier.py` or `run_squad.py`: + +```shell + --distribution_strategy=tpu + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` + +### Sentence and Sentence-pair Classification Tasks + +This example code fine-tunes `albert_v2_base` on the Microsoft Research +Paraphrase Corpus (MRPC) corpus, which only contains 3,600 examples and can +fine-tune in a few minutes on most GPUs. + +We use the `albert_v2_base` as an example throughout the +workflow. + + +```shell +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base +export MODEL_DIR=gs://some_bucket/my_output_dir +export GLUE_DIR=gs://some_bucket/datasets +export TASK=MRPC + +python run_classifier.py \ + --mode='train_and_eval' \ + --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \ + --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \ + --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \ + --bert_config_file=${ALBERT_DIR}/albert_config.json \ + --init_checkpoint=${ALBERT_DIR}/bert_model.ckpt \ + --train_batch_size=4 \ + --eval_batch_size=4 \ + --steps_per_loop=1 \ + --learning_rate=2e-5 \ + --num_train_epochs=3 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=mirrored +``` + +Alternatively, instead of specifying `init_checkpoint`, you can specify +`hub_module_url` to employ a pretraind BERT hub module, e.g., +` --hub_module_url=https://tfhub.dev/tensorflow/albert_en_base/1`. + +To use TPU, you only need to switch distribution strategy type to `tpu` with TPU +information and use remote storage for model checkpoints. + +```shell +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base +export TPU_IP_ADDRESS='???' +export MODEL_DIR=gs://some_bucket/my_output_dir +export GLUE_DIR=gs://some_bucket/datasets + +python run_classifier.py \ + --mode='train_and_eval' \ + --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \ + --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \ + --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \ + --bert_config_file=$ALBERT_DIR/albert_config.json \ + --init_checkpoint=$ALBERT_DIR/bert_model.ckpt \ + --train_batch_size=32 \ + --eval_batch_size=32 \ + --learning_rate=2e-5 \ + --num_train_epochs=3 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=tpu \ + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` + +### SQuAD 1.1 + +The Stanford Question Answering Dataset (SQuAD) is a popular question answering +benchmark dataset. See more in [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/). + +We use the `albert_v2_base` as an example throughout the +workflow. + +```shell +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base +export SQUAD_DIR=gs://some_bucket/datasets +export MODEL_DIR=gs://some_bucket/my_output_dir +export SQUAD_VERSION=v1.1 + +python run_squad.py \ + --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \ + --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \ + --predict_file=${SQUAD_DIR}/dev-v1.1.json \ + --sp_model_file=${ALBERT_DIR}/30k-clean.model \ + --bert_config_file=$ALBERT_DIR/albert_config.json \ + --init_checkpoint=$ALBERT_DIR/bert_model.ckpt \ + --train_batch_size=4 \ + --predict_batch_size=4 \ + --learning_rate=8e-5 \ + --num_train_epochs=2 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=mirrored +``` + +Similarily, you can replace `init_checkpoint` FLAGS with `hub_module_url` to +specify a hub module path. + +To use TPU, you need switch distribution strategy type to `tpu` with TPU +information. + +```shell +export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base +export TPU_IP_ADDRESS='???' +export MODEL_DIR=gs://some_bucket/my_output_dir +export SQUAD_DIR=gs://some_bucket/datasets +export SQUAD_VERSION=v1.1 + +python run_squad.py \ + --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \ + --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \ + --predict_file=${SQUAD_DIR}/dev-v1.1.json \ + --sp_model_file=${ALBERT_DIR}/30k-clean.model \ + --bert_config_file=$ALBERT_DIR/albert_config.json \ + --init_checkpoint=$ALBERT_DIR/bert_model.ckpt \ + --train_batch_size=32 \ + --learning_rate=8e-5 \ + --num_train_epochs=2 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=tpu \ + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` + +The dev set predictions will be saved into a file called predictions.json in the +model_dir: + +```shell +python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ./squad/predictions.json +``` diff --git a/models/official/nlp/albert/__init__.py b/models/official/nlp/albert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/albert/configs.py b/models/official/nlp/albert/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..10fbb79bd50cc224f4192819bfb428cde357ef3c --- /dev/null +++ b/models/official/nlp/albert/configs.py @@ -0,0 +1,58 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The ALBERT configurations.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + +from official.nlp.bert import configs + + +class AlbertConfig(configs.BertConfig): + """Configuration for `ALBERT`.""" + + def __init__(self, + num_hidden_groups=1, + inner_group_num=1, + **kwargs): + """Constructs AlbertConfig. + + Args: + num_hidden_groups: Number of group for the hidden layers, parameters in + the same group are shared. Note that this value and also the following + 'inner_group_num' has to be 1 for now, because all released ALBERT + models set them to 1. We may support arbitary valid values in future. + inner_group_num: Number of inner repetition of attention and ffn. + **kwargs: The remaining arguments are the same as above 'BertConfig'. + """ + super(AlbertConfig, self).__init__(**kwargs) + + # TODO(chendouble): 'inner_group_num' and 'num_hidden_groups' are always 1 + # in the released ALBERT. Support other values in AlbertTransformerEncoder + # if needed. + if inner_group_num != 1 or num_hidden_groups != 1: + raise ValueError("We only support 'inner_group_num' and " + "'num_hidden_groups' as 1.") + + @classmethod + def from_dict(cls, json_object): + """Constructs a `AlbertConfig` from a Python dictionary of parameters.""" + config = AlbertConfig(vocab_size=None) + for (key, value) in six.iteritems(json_object): + config.__dict__[key] = value + return config diff --git a/models/official/nlp/albert/export_albert_tfhub.py b/models/official/nlp/albert/export_albert_tfhub.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1af1a17735c5f0b995bb5e431fe143ffffa1d1 --- /dev/null +++ b/models/official/nlp/albert/export_albert_tfhub.py @@ -0,0 +1,88 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A script to export the ALBERT core model as a TF-Hub SavedModel.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import app +from absl import flags +import tensorflow as tf +from typing import Text + +from official.nlp.albert import configs +from official.nlp.bert import bert_models + +FLAGS = flags.FLAGS + +flags.DEFINE_string("albert_config_file", None, + "Albert configuration file to define core albert layers.") +flags.DEFINE_string("model_checkpoint_path", None, + "File path to TF model checkpoint.") +flags.DEFINE_string("export_path", None, "TF-Hub SavedModel destination path.") +flags.DEFINE_string( + "sp_model_file", None, + "The sentence piece model file that the ALBERT model was trained on.") + + +def create_albert_model( + albert_config: configs.AlbertConfig) -> tf.keras.Model: + """Creates an ALBERT keras core model from ALBERT configuration. + + Args: + albert_config: An `AlbertConfig` to create the core model. + + Returns: + A keras model. + """ + # Adds input layers just as placeholders. + input_word_ids = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name="input_word_ids") + input_mask = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name="input_mask") + input_type_ids = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name="input_type_ids") + transformer_encoder = bert_models.get_transformer_encoder( + albert_config, sequence_length=None) + sequence_output, pooled_output = transformer_encoder( + [input_word_ids, input_mask, input_type_ids]) + # To keep consistent with legacy hub modules, the outputs are + # "pooled_output" and "sequence_output". + return tf.keras.Model( + inputs=[input_word_ids, input_mask, input_type_ids], + outputs=[pooled_output, sequence_output]), transformer_encoder + + +def export_albert_tfhub(albert_config: configs.AlbertConfig, + model_checkpoint_path: Text, hub_destination: Text, + sp_model_file: Text): + """Restores a tf.keras.Model and saves for TF-Hub.""" + core_model, encoder = create_albert_model(albert_config) + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.restore(model_checkpoint_path).assert_consumed() + core_model.sp_model_file = tf.saved_model.Asset(sp_model_file) + core_model.save(hub_destination, include_optimizer=False, save_format="tf") + + +def main(_): + albert_config = configs.AlbertConfig.from_json_file( + FLAGS.albert_config_file) + export_albert_tfhub(albert_config, FLAGS.model_checkpoint_path, + FLAGS.export_path, FLAGS.sp_model_file) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/albert/export_albert_tfhub_test.py b/models/official/nlp/albert/export_albert_tfhub_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4973090365b7ce6527ef1e4458e3f334ea1a5d1b --- /dev/null +++ b/models/official/nlp/albert/export_albert_tfhub_test.py @@ -0,0 +1,89 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests official.nlp.albert.export_albert_tfhub.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import numpy as np + +import tensorflow as tf +import tensorflow_hub as hub + +from official.nlp.albert import configs +from official.nlp.albert import export_albert_tfhub + + +class ExportAlbertTfhubTest(tf.test.TestCase): + + def test_export_albert_tfhub(self): + # Exports a savedmodel for TF-Hub + albert_config = configs.AlbertConfig( + vocab_size=100, + embedding_size=8, + hidden_size=16, + intermediate_size=32, + max_position_embeddings=128, + num_attention_heads=2, + num_hidden_layers=1) + bert_model, encoder = export_albert_tfhub.create_albert_model(albert_config) + model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.save(os.path.join(model_checkpoint_dir, "test")) + model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) + + sp_model_file = os.path.join(self.get_temp_dir(), "sp_tokenizer.model") + with tf.io.gfile.GFile(sp_model_file, "w") as f: + f.write("dummy content") + + hub_destination = os.path.join(self.get_temp_dir(), "hub") + export_albert_tfhub.export_albert_tfhub( + albert_config, + model_checkpoint_path, + hub_destination, + sp_model_file=sp_model_file) + + # Restores a hub KerasLayer. + hub_layer = hub.KerasLayer(hub_destination, trainable=True) + + if hasattr(hub_layer, "resolved_object"): + with tf.io.gfile.GFile( + hub_layer.resolved_object.sp_model_file.asset_path.numpy()) as f: + self.assertEqual("dummy content", f.read()) + # Checks the hub KerasLayer. + for source_weight, hub_weight in zip(bert_model.trainable_weights, + hub_layer.trainable_weights): + self.assertAllClose(source_weight.numpy(), hub_weight.numpy()) + + dummy_ids = np.zeros((2, 10), dtype=np.int32) + hub_outputs = hub_layer([dummy_ids, dummy_ids, dummy_ids]) + source_outputs = bert_model([dummy_ids, dummy_ids, dummy_ids]) + + # The outputs of hub module are "pooled_output" and "sequence_output", + # while the outputs of encoder is in reversed order, i.e., + # "sequence_output" and "pooled_output". + encoder_outputs = reversed(encoder([dummy_ids, dummy_ids, dummy_ids])) + self.assertEqual(hub_outputs[0].shape, (2, 16)) + self.assertEqual(hub_outputs[1].shape, (2, 10, 16)) + for source_output, hub_output, encoder_output in zip( + source_outputs, hub_outputs, encoder_outputs): + self.assertAllClose(source_output.numpy(), hub_output.numpy()) + self.assertAllClose(source_output.numpy(), encoder_output.numpy()) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/albert/run_classifier.py b/models/official/nlp/albert/run_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..fe72ff880f61c99e304bf089ef4ed0d75bfc349b --- /dev/null +++ b/models/official/nlp/albert/run_classifier.py @@ -0,0 +1,67 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ALBERT classification finetuning runner in tf2.x.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json + +from absl import app +from absl import flags +import tensorflow as tf + +from official.nlp.albert import configs as albert_configs +from official.nlp.bert import run_classifier as run_classifier_bert +from official.utils.misc import distribution_utils + +FLAGS = flags.FLAGS + + +def main(_): + with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: + input_meta_data = json.loads(reader.read().decode('utf-8')) + + if not FLAGS.model_dir: + FLAGS.model_dir = '/tmp/bert20/' + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, + num_gpus=FLAGS.num_gpus, + tpu_address=FLAGS.tpu) + max_seq_length = input_meta_data['max_seq_length'] + train_input_fn = run_classifier_bert.get_dataset_fn( + FLAGS.train_data_path, + max_seq_length, + FLAGS.train_batch_size, + is_training=True) + eval_input_fn = run_classifier_bert.get_dataset_fn( + FLAGS.eval_data_path, + max_seq_length, + FLAGS.eval_batch_size, + is_training=False) + + albert_config = albert_configs.AlbertConfig.from_json_file( + FLAGS.bert_config_file) + run_classifier_bert.run_bert(strategy, input_meta_data, albert_config, + train_input_fn, eval_input_fn) + + +if __name__ == '__main__': + flags.mark_flag_as_required('bert_config_file') + flags.mark_flag_as_required('input_meta_data_path') + flags.mark_flag_as_required('model_dir') + app.run(main) diff --git a/models/official/nlp/albert/run_squad.py b/models/official/nlp/albert/run_squad.py new file mode 100644 index 0000000000000000000000000000000000000000..28a171a3f4a377ab174418c3b466b22680ad5734 --- /dev/null +++ b/models/official/nlp/albert/run_squad.py @@ -0,0 +1,137 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run ALBERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import time + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +from official.nlp.albert import configs as albert_configs +from official.nlp.bert import run_squad_helper +from official.nlp.bert import tokenization +from official.nlp.data import squad_lib_sp +from official.utils.misc import distribution_utils + +flags.DEFINE_string( + 'sp_model_file', None, + 'The path to the sentence piece model. Used by sentence piece tokenizer ' + 'employed by ALBERT.') + +# More flags can be found in run_squad_helper. +run_squad_helper.define_common_squad_flags() + +FLAGS = flags.FLAGS + + +def train_squad(strategy, + input_meta_data, + custom_callbacks=None, + run_eagerly=False): + """Runs bert squad training.""" + bert_config = albert_configs.AlbertConfig.from_json_file( + FLAGS.bert_config_file) + run_squad_helper.train_squad(strategy, input_meta_data, bert_config, + custom_callbacks, run_eagerly) + + +def predict_squad(strategy, input_meta_data): + """Makes predictions for the squad dataset.""" + bert_config = albert_configs.AlbertConfig.from_json_file( + FLAGS.bert_config_file) + tokenizer = tokenization.FullSentencePieceTokenizer( + sp_model_file=FLAGS.sp_model_file) + + run_squad_helper.predict_squad(strategy, input_meta_data, tokenizer, + bert_config, squad_lib_sp) + + +def eval_squad(strategy, input_meta_data): + """Evaluate on the squad dataset.""" + bert_config = albert_configs.AlbertConfig.from_json_file( + FLAGS.bert_config_file) + tokenizer = tokenization.FullSentencePieceTokenizer( + sp_model_file=FLAGS.sp_model_file) + + eval_metrics = run_squad_helper.eval_squad( + strategy, input_meta_data, tokenizer, bert_config, squad_lib_sp) + return eval_metrics + + +def export_squad(model_export_path, input_meta_data): + """Exports a trained model as a `SavedModel` for inference. + + Args: + model_export_path: a string specifying the path to the SavedModel directory. + input_meta_data: dictionary containing meta data about input and model. + + Raises: + Export path is not specified, got an empty string or None. + """ + bert_config = albert_configs.AlbertConfig.from_json_file( + FLAGS.bert_config_file) + run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config) + + +def main(_): + with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: + input_meta_data = json.loads(reader.read().decode('utf-8')) + + if FLAGS.mode == 'export_only': + export_squad(FLAGS.model_export_path, input_meta_data) + return + + # Configures cluster spec for multi-worker distribution strategy. + if FLAGS.num_gpus > 0: + _ = distribution_utils.configure_cluster(FLAGS.worker_hosts, + FLAGS.task_index) + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, + num_gpus=FLAGS.num_gpus, + all_reduce_alg=FLAGS.all_reduce_alg, + tpu_address=FLAGS.tpu) + + if 'train' in FLAGS.mode: + train_squad(strategy, input_meta_data, run_eagerly=FLAGS.run_eagerly) + if 'predict' in FLAGS.mode: + predict_squad(strategy, input_meta_data) + if 'eval' in FLAGS.mode: + eval_metrics = eval_squad(strategy, input_meta_data) + f1_score = eval_metrics['final_f1'] + logging.info('SQuAD eval F1-score: %f', f1_score) + summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval') + summary_writer = tf.summary.create_file_writer(summary_dir) + with summary_writer.as_default(): + # TODO(lehou): write to the correct step number. + tf.summary.scalar('F1-score', f1_score, step=0) + summary_writer.flush() + # Also write eval_metrics to json file. + squad_lib_sp.write_to_json_files( + eval_metrics, os.path.join(summary_dir, 'eval_metrics.json')) + time.sleep(60) + + +if __name__ == '__main__': + flags.mark_flag_as_required('bert_config_file') + flags.mark_flag_as_required('model_dir') + app.run(main) diff --git a/models/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py b/models/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..402bc1445bed575362598d09212d14d03b629179 --- /dev/null +++ b/models/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py @@ -0,0 +1,132 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A converter from a tf1 ALBERT encoder checkpoint to a tf2 encoder checkpoint. + +The conversion will yield an object-oriented checkpoint that can be used +to restore a AlbertTransformerEncoder object. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags + +import tensorflow as tf +from official.modeling import activations +from official.nlp.albert import configs +from official.nlp.bert import tf1_checkpoint_converter_lib +from official.nlp.modeling import networks + +FLAGS = flags.FLAGS + +flags.DEFINE_string("albert_config_file", None, + "Albert configuration file to define core bert layers.") +flags.DEFINE_string( + "checkpoint_to_convert", None, + "Initial checkpoint from a pretrained BERT model core (that is, only the " + "BertModel, with no task heads.)") +flags.DEFINE_string("converted_checkpoint_path", None, + "Name for the created object-based V2 checkpoint.") + + +ALBERT_NAME_REPLACEMENTS = ( + ("bert/encoder/", ""), + ("bert/", ""), + ("embeddings/word_embeddings", "word_embeddings/embeddings"), + ("embeddings/position_embeddings", "position_embedding/embeddings"), + ("embeddings/token_type_embeddings", "type_embeddings/embeddings"), + ("embeddings/LayerNorm", "embeddings/layer_norm"), + ("embedding_hidden_mapping_in", "embedding_projection"), + ("group_0/inner_group_0/", ""), + ("attention_1/self", "self_attention"), + ("attention_1/output/dense", "self_attention/attention_output"), + ("LayerNorm/", "self_attention_layer_norm/"), + ("ffn_1/intermediate/dense", "intermediate"), + ("ffn_1/intermediate/output/dense", "output"), + ("LayerNorm_1/", "output_layer_norm/"), + ("pooler/dense", "pooler_transform"), + ("cls/predictions/output_bias", "cls/predictions/output_bias/bias"), + ("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"), + ("cls/seq_relationship/output_weights", + "predictions/transform/logits/kernel"), +) + + +def _create_albert_model(cfg): + """Creates a BERT keras core model from BERT configuration. + + Args: + cfg: A `BertConfig` to create the core model. + + Returns: + A keras model. + """ + albert_encoder = networks.AlbertTransformerEncoder( + vocab_size=cfg.vocab_size, + hidden_size=cfg.hidden_size, + embedding_width=cfg.embedding_size, + num_layers=cfg.num_hidden_layers, + num_attention_heads=cfg.num_attention_heads, + intermediate_size=cfg.intermediate_size, + activation=activations.gelu, + dropout_rate=cfg.hidden_dropout_prob, + attention_dropout_rate=cfg.attention_probs_dropout_prob, + sequence_length=cfg.max_position_embeddings, + type_vocab_size=cfg.type_vocab_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=cfg.initializer_range)) + return albert_encoder + + +def convert_checkpoint(bert_config, output_path, v1_checkpoint): + """Converts a V1 checkpoint into an OO V2 checkpoint.""" + output_dir, _ = os.path.split(output_path) + + # Create a temporary V1 name-converted checkpoint in the output directory. + temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1") + temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt") + tf1_checkpoint_converter_lib.convert( + checkpoint_from_path=v1_checkpoint, + checkpoint_to_path=temporary_checkpoint, + num_heads=bert_config.num_attention_heads, + name_replacements=ALBERT_NAME_REPLACEMENTS, + permutations=tf1_checkpoint_converter_lib.BERT_V2_PERMUTATIONS, + exclude_patterns=["adam", "Adam"]) + + # Create a V2 checkpoint from the temporary checkpoint. + model = _create_albert_model(bert_config) + tf1_checkpoint_converter_lib.create_v2_checkpoint(model, temporary_checkpoint, + output_path) + + # Clean up the temporary checkpoint, if it exists. + try: + tf.io.gfile.rmtree(temporary_checkpoint_dir) + except tf.errors.OpError: + # If it doesn't exist, we don't need to clean it up; continue. + pass + + +def main(_): + output_path = FLAGS.converted_checkpoint_path + v1_checkpoint = FLAGS.checkpoint_to_convert + albert_config = configs.AlbertConfig.from_json_file(FLAGS.albert_config_file) + convert_checkpoint(albert_config, output_path, v1_checkpoint) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/bert/README.md b/models/official/nlp/bert/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c26a87df520b9d9bb9cccefd515abc0bf4a399c7 --- /dev/null +++ b/models/official/nlp/bert/README.md @@ -0,0 +1,368 @@ +# BERT (Bidirectional Encoder Representations from Transformers) + +The academic paper which describes BERT in detail and provides full results on a +number of tasks can be found here: https://arxiv.org/abs/1810.04805. + +This repository contains TensorFlow 2.x implementation for BERT. + +## Contents + * [Contents](#contents) + * [Pre-trained Models](#pre-trained-models) + * [Restoring from Checkpoints](#restoring-from-checkpoints) + * [Set Up](#set-up) + * [Process Datasets](#process-datasets) + * [Fine-tuning with BERT](#fine-tuning-with-bert) + * [Cloud GPUs and TPUs](#cloud-gpus-and-tpus) + * [Sentence and Sentence-pair Classification Tasks](#sentence-and-sentence-pair-classification-tasks) + * [SQuAD 1.1](#squad-1.1) + + +## Pre-trained Models + +We released both checkpoints and tf.hub modules as the pretrained models for +fine-tuning. They are TF 2.x compatible and are converted from the checkpoints +released in TF 1.x official BERT repository +[google-research/bert](https://github.com/google-research/bert) +in order to keep consistent with BERT paper. + + +### Access to Pretrained Checkpoints + +Pretrained checkpoints can be found in the following links: + +**Note: We have switched BERT implementation +to use Keras functional-style networks in [nlp/modeling](../modeling). +The new checkpoints are:** + +* **[`BERT-Large, Uncased (Whole Word Masking)`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/wwm_uncased_L-24_H-1024_A-16.tar.gz)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Large, Cased (Whole Word Masking)`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/wwm_cased_L-24_H-1024_A-16.tar.gz)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Base, Uncased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12.tar.gz)**: + 12-layer, 768-hidden, 12-heads, 110M parameters +* **[`BERT-Large, Uncased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16.tar.gz)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Base, Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-12_H-768_A-12.tar.gz)**: + 12-layer, 768-hidden, 12-heads , 110M parameters +* **[`BERT-Large, Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-24_H-1024_A-16.tar.gz)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters + +We recommend to host checkpoints on Google Cloud storage buckets when you use +Cloud GPU/TPU. + +### Restoring from Checkpoints + +`tf.train.Checkpoint` is used to manage model checkpoints in TF 2. To restore +weights from provided pre-trained checkpoints, you can use the following code: + +```python +init_checkpoint='the pretrained model checkpoint path.' +model=tf.keras.Model() # Bert pre-trained model as feature extractor. +checkpoint = tf.train.Checkpoint(model=model) +checkpoint.restore(init_checkpoint) +``` + +Checkpoints featuring native serialized Keras models +(i.e. model.load()/load_weights()) will be available soon. + +### Access to Pretrained hub modules. + +Pretrained tf.hub modules in TF 2.x SavedModel format can be found in the +following links: + +* **[`BERT-Large, Uncased (Whole Word Masking)`](https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/1)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Large, Cased (Whole Word Masking)`](https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/1)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Base, Uncased`](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1)**: + 12-layer, 768-hidden, 12-heads, 110M parameters +* **[`BERT-Large, Uncased`](https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Base, Cased`](https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/1)**: + 12-layer, 768-hidden, 12-heads , 110M parameters +* **[`BERT-Large, Cased`](https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/1)**: + 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Base, Multilingual Cased`](https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/1)**: + 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters +* **[`BERT-Base, Chinese`](https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/1)**: + Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, + 110M parameters + +## Set Up + +```shell +export PYTHONPATH="$PYTHONPATH:/path/to/models" +``` + +Install `tf-nightly` to get latest updates: + +```shell +pip install tf-nightly-gpu +``` + +With TPU, GPU support is not necessary. First, you need to create a `tf-nightly` +TPU with [ctpu tool](https://github.com/tensorflow/tpu/tree/master/tools/ctpu): + +```shell +ctpu up -name --tf-version=”nightly” +``` + +Second, you need to install TF 2 `tf-nightly` on your VM: + +```shell +pip install tf-nightly +``` + +## Process Datasets + +### Pre-training + +There is no change to generate pre-training data. Please use the script +[`../data/create_pretraining_data.py`](../data/create_pretraining_data.py) +which is essentially branched from [BERT research repo](https://github.com/google-research/bert) +to get processed pre-training data and it adapts to TF2 symbols and python3 +compatibility. + + +### Fine-tuning + +To prepare the fine-tuning data for final model training, use the +[`../data/create_finetuning_data.py`](../data/create_finetuning_data.py) script. +Resulting datasets in `tf_record` format and training meta data should be later +passed to training or evaluation scripts. The task-specific arguments are +described in following sections: + +* GLUE + +Users can download the +[GLUE data](https://gluebenchmark.com/tasks) by running +[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e) +and unpack it to some directory `$GLUE_DIR`. +Also, users can download [Pretrained Checkpoint](#access-to-pretrained-checkpoints) and locate on some directory `$BERT_DIR` instead of using checkpoints on Google Cloud Storage. + +```shell +export GLUE_DIR=~/glue +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 + +export TASK_NAME=MNLI +export OUTPUT_DIR=gs://some_bucket/datasets +python ../data/create_finetuning_data.py \ + --input_data_dir=${GLUE_DIR}/${TASK_NAME}/ \ + --vocab_file=${BERT_DIR}/vocab.txt \ + --train_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_train.tf_record \ + --eval_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_eval.tf_record \ + --meta_data_file_path=${OUTPUT_DIR}/${TASK_NAME}_meta_data \ + --fine_tuning_task_type=classification --max_seq_length=128 \ + --classification_task_name=${TASK_NAME} +``` + +* SQUAD + +The [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/) contains +detailed information about the SQuAD datasets and evaluation. + +The necessary files can be found here: + +* [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json) +* [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json) +* [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py) +* [train-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json) +* [dev-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json) +* [evaluate-v2.0.py](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/) + +```shell +export SQUAD_DIR=~/squad +export SQUAD_VERSION=v1.1 +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +export OUTPUT_DIR=gs://some_bucket/datasets + +python ../data/create_finetuning_data.py \ + --squad_data_file=${SQUAD_DIR}/train-${SQUAD_VERSION}.json \ + --vocab_file=${BERT_DIR}/vocab.txt \ + --train_data_output_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_train.tf_record \ + --meta_data_file_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_meta_data \ + --fine_tuning_task_type=squad --max_seq_length=384 +``` + +## Fine-tuning with BERT + +### Cloud GPUs and TPUs + +* Cloud Storage + +The unzipped pre-trained model files can also be found in the Google Cloud +Storage folder `gs://cloud-tpu-checkpoints/bert/keras_bert`. For example: + +```shell +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +export MODEL_DIR=gs://some_bucket/my_output_dir +``` + +Currently, users are able to access to `tf-nightly` TPUs and the following TPU +script should run with `tf-nightly`. + +* GPU -> TPU + +Just add the following flags to `run_classifier.py` or `run_squad.py`: + +```shell + --distribution_strategy=tpu + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` + +### Sentence and Sentence-pair Classification Tasks + +This example code fine-tunes `BERT-Large` on the Microsoft Research Paraphrase +Corpus (MRPC) corpus, which only contains 3,600 examples and can fine-tune in a +few minutes on most GPUs. + +We use the `BERT-Large` (uncased_L-24_H-1024_A-16) as an example throughout the +workflow. +For GPU memory of 16GB or smaller, you may try to use `BERT-Base` +(uncased_L-12_H-768_A-12). + +```shell +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +export MODEL_DIR=gs://some_bucket/my_output_dir +export GLUE_DIR=gs://some_bucket/datasets +export TASK=MRPC + +python run_classifier.py \ + --mode='train_and_eval' \ + --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \ + --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \ + --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \ + --bert_config_file=${BERT_DIR}/bert_config.json \ + --init_checkpoint=${BERT_DIR}/bert_model.ckpt \ + --train_batch_size=4 \ + --eval_batch_size=4 \ + --steps_per_loop=1 \ + --learning_rate=2e-5 \ + --num_train_epochs=3 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=mirrored +``` + +Alternatively, instead of specifying `init_checkpoint`, you can specify +`hub_module_url` to employ a pretraind BERT hub module, e.g., +` --hub_module_url=https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1`. + +After training a model, to get predictions from the classifier, you can set the +`--mode=predict` and offer the test set tfrecords to `--eval_data_path`. +Output will be created in file called test_results.tsv in the output folder. +Each line will contain output for each sample, columns are the class +probabilities. + +```shell +python run_classifier.py \ + --mode='predict' \ + --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \ + --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \ + --bert_config_file=${BERT_DIR}/bert_config.json \ + --eval_batch_size=4 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=mirrored +``` + +To use TPU, you only need to switch distribution strategy type to `tpu` with TPU +information and use remote storage for model checkpoints. + +```shell +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +export TPU_IP_ADDRESS='???' +export MODEL_DIR=gs://some_bucket/my_output_dir +export GLUE_DIR=gs://some_bucket/datasets +export TASK=MRPC + +python run_classifier.py \ + --mode='train_and_eval' \ + --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \ + --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \ + --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \ + --bert_config_file=${BERT_DIR}/bert_config.json \ + --init_checkpoint=${BERT_DIR}/bert_model.ckpt \ + --train_batch_size=32 \ + --eval_batch_size=32 \ + --steps_per_loop=1000 \ + --learning_rate=2e-5 \ + --num_train_epochs=3 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=tpu \ + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` + +Note that, we specify `steps_per_loop=1000` for TPU, because running a loop of +training steps inside a `tf.function` can significantly increase TPU utilization +and callbacks will not be called inside the loop. + +### SQuAD 1.1 + +The Stanford Question Answering Dataset (SQuAD) is a popular question answering +benchmark dataset. See more in [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/). + +We use the `BERT-Large` (uncased_L-24_H-1024_A-16) as an example throughout the +workflow. +For GPU memory of 16GB or smaller, you may try to use `BERT-Base` +(uncased_L-12_H-768_A-12). + +```shell +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +export SQUAD_DIR=gs://some_bucket/datasets +export MODEL_DIR=gs://some_bucket/my_output_dir +export SQUAD_VERSION=v1.1 + +python run_squad.py \ + --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \ + --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \ + --predict_file=${SQUAD_DIR}/dev-v1.1.json \ + --vocab_file=${BERT_DIR}/vocab.txt \ + --bert_config_file=${BERT_DIR}/bert_config.json \ + --init_checkpoint=${BERT_DIR}/bert_model.ckpt \ + --train_batch_size=4 \ + --predict_batch_size=4 \ + --learning_rate=8e-5 \ + --num_train_epochs=2 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=mirrored +``` + +Similarily, you can replace `init_checkpoint` FLAG with `hub_module_url` to +specify a hub module path. + +`run_squad.py` writes the prediction for `--predict_file` by default. If you set +the `--model=predict` and offer the SQuAD test data, the scripts will generate +the prediction json file. + +To use TPU, you need switch distribution strategy type to `tpu` with TPU +information. + +```shell +export BERT_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +export TPU_IP_ADDRESS='???' +export MODEL_DIR=gs://some_bucket/my_output_dir +export SQUAD_DIR=gs://some_bucket/datasets +export SQUAD_VERSION=v1.1 + +python run_squad.py \ + --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \ + --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \ + --predict_file=${SQUAD_DIR}/dev-v1.1.json \ + --vocab_file=${BERT_DIR}/vocab.txt \ + --bert_config_file=${BERT_DIR}/bert_config.json \ + --init_checkpoint=${BERT_DIR}/bert_model.ckpt \ + --train_batch_size=32 \ + --learning_rate=8e-5 \ + --num_train_epochs=2 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=tpu \ + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` + +The dev set predictions will be saved into a file called predictions.json in the +model_dir: + +```shell +python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ./squad/predictions.json +``` + + diff --git a/models/official/nlp/bert/__init__.py b/models/official/nlp/bert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/official/nlp/bert/__init__.py @@ -0,0 +1 @@ + diff --git a/models/official/nlp/bert/__pycache__/__init__.cpython-38.pyc b/models/official/nlp/bert/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6db7d3b5135d245ab0dcc000b240d54ffdf3a938 Binary files /dev/null and b/models/official/nlp/bert/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/official/nlp/bert/__pycache__/__init__.cpython-39.pyc b/models/official/nlp/bert/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8be831873bc9909e371fdc816ebe9760a643e71e Binary files /dev/null and b/models/official/nlp/bert/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/official/nlp/bert/__pycache__/tokenization.cpython-38.pyc b/models/official/nlp/bert/__pycache__/tokenization.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c334a3013725666cc3cc808cb6ccbbc4003582a Binary files /dev/null and b/models/official/nlp/bert/__pycache__/tokenization.cpython-38.pyc differ diff --git a/models/official/nlp/bert/__pycache__/tokenization.cpython-39.pyc b/models/official/nlp/bert/__pycache__/tokenization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d4a853456ee0666c019aeb5fdd3ab2b21f929e5 Binary files /dev/null and b/models/official/nlp/bert/__pycache__/tokenization.cpython-39.pyc differ diff --git a/models/official/nlp/bert/bert_cloud_tpu.md b/models/official/nlp/bert/bert_cloud_tpu.md new file mode 100644 index 0000000000000000000000000000000000000000..e5e6758a8bdc216744b7770d7eb8b5ff47408493 --- /dev/null +++ b/models/official/nlp/bert/bert_cloud_tpu.md @@ -0,0 +1,110 @@ +# BERT FineTuning with Cloud TPU: Sentence and Sentence-Pair Classification Tasks (TF 2.1) +This tutorial shows you how to train the Bidirectional Encoder Representations from Transformers (BERT) model on Cloud TPU. + + +## Set up Cloud Storage and Compute Engine VM +1. [Open a cloud shell window](https://console.cloud.google.com/?cloudshell=true&_ga=2.11844148.-1612541229.1552429951) +2. Create a variable for the project's name: +``` +export PROJECT_NAME=your-project_name +``` +3. Configure `gcloud` command-line tool to use the project where you want to create Cloud TPU. +``` +gcloud config set project ${PROJECT_NAME} +``` +4. Create a Cloud Storage bucket using the following command: +``` +gsutil mb -p ${PROJECT_NAME} -c standard -l europe-west4 -b on gs://your-bucket-name +``` +This Cloud Storage bucket stores the data you use to train your model and the training results. +5. Launch a Compute Engine VM and Cloud TPU using the ctpu up command. +``` +ctpu up --tpu-size=v3-8 \ + --machine-type=n1-standard-8 \ + --zone=europe-west4-a \ + --tf-version=2.1 [optional flags: --project, --name] +``` +6. The configuration you specified appears. Enter y to approve or n to cancel. +7. When the ctpu up command has finished executing, verify that your shell prompt has changed from username@project to username@tpuname. This change shows that you are now logged into your Compute Engine VM. +``` +gcloud compute ssh vm-name --zone=europe-west4-a +(vm)$ export TPU_NAME=vm-name +``` +As you continue these instructions, run each command that begins with `(vm)$` in your VM session window. + +## Prepare the Dataset +1. From your Compute Engine virtual machine (VM), install requirements.txt. +``` +(vm)$ cd /usr/share/models +(vm)$ sudo pip3 install -r official/requirements.txt +``` +2. Optional: download download_glue_data.py + +This tutorial uses the General Language Understanding Evaluation (GLUE) benchmark to evaluate and analyze the performance of the model. The GLUE data is provided for this tutorial at gs://cloud-tpu-checkpoints/bert/classification. + +## Define parameter values +Next, define several parameter values that are required when you train and evaluate your model: + +``` +(vm)$ export PYTHONPATH="$PYTHONPATH:/usr/share/tpu/models" +(vm)$ export STORAGE_BUCKET=gs://your-bucket-name +(vm)$ export BERT_BASE_DIR=gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16 +(vm)$ export MODEL_DIR=${STORAGE_BUCKET}/bert-output +(vm)$ export GLUE_DIR=gs://cloud-tpu-checkpoints/bert/classification +(vm)$ export TASK=mnli +``` + +## Train the model +From your Compute Engine VM, run the following command. + +``` +(vm)$ python3 official/nlp/bert/run_classifier.py \ + --mode='train_and_eval' \ + --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \ + --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \ + --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \ + --bert_config_file=$BERT_BASE_DIR/bert_config.json \ + --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ + --train_batch_size=32 \ + --eval_batch_size=32 \ + --learning_rate=2e-5 \ + --num_train_epochs=3 \ + --model_dir=${MODEL_DIR} \ + --distribution_strategy=tpu \ + --tpu=${TPU_NAME} +``` + +## Verify your results +The training takes approximately 1 hour on a v3-8 TPU. When script completes, you should see results similar to the following: +``` +Training Summary: +{'train_loss': 0.28142181038856506, +'last_train_metrics': 0.9467429518699646, +'eval_metrics': 0.8599063158035278, +'total_training_steps': 36813} +``` + +## Clean up +To avoid incurring charges to your GCP account for the resources used in this topic: +1. Disconnect from the Compute Engine VM: +``` +(vm)$ exit +``` +2. In your Cloud Shell, run ctpu delete with the --zone flag you used when you set up the Cloud TPU to delete your Compute Engine VM and your Cloud TPU: +``` +$ ctpu delete --zone=your-zone +``` +3. Run ctpu status specifying your zone to make sure you have no instances allocated to avoid unnecessary charges for TPU usage. The deletion might take several minutes. A response like the one below indicates there are no more allocated instances: +``` +$ ctpu status --zone=your-zone +``` +4. Run gsutil as shown, replacing your-bucket with the name of the Cloud Storage bucket you created for this tutorial: +``` +$ gsutil rm -r gs://your-bucket +``` + + + + + + diff --git a/models/official/nlp/bert/bert_models.py b/models/official/nlp/bert/bert_models.py new file mode 100644 index 0000000000000000000000000000000000000000..9d16150d0c353e6626b911b32c9961c4712c8aed --- /dev/null +++ b/models/official/nlp/bert/bert_models.py @@ -0,0 +1,371 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BERT models that are compatible with TF 2.0.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gin +import tensorflow as tf +import tensorflow_hub as hub + +from official.modeling import tf_utils +from official.nlp.albert import configs as albert_configs +from official.nlp.bert import configs +from official.nlp.modeling import models +from official.nlp.modeling import networks + + +class BertPretrainLossAndMetricLayer(tf.keras.layers.Layer): + """Returns layer that computes custom loss and metrics for pretraining.""" + + def __init__(self, vocab_size, **kwargs): + super(BertPretrainLossAndMetricLayer, self).__init__(**kwargs) + self._vocab_size = vocab_size + self.config = { + 'vocab_size': vocab_size, + } + + def _add_metrics(self, lm_output, lm_labels, lm_label_weights, + lm_example_loss, sentence_output, sentence_labels, + next_sentence_loss): + """Adds metrics.""" + masked_lm_accuracy = tf.keras.metrics.sparse_categorical_accuracy( + lm_labels, lm_output) + numerator = tf.reduce_sum(masked_lm_accuracy * lm_label_weights) + denominator = tf.reduce_sum(lm_label_weights) + 1e-5 + masked_lm_accuracy = numerator / denominator + self.add_metric( + masked_lm_accuracy, name='masked_lm_accuracy', aggregation='mean') + + self.add_metric(lm_example_loss, name='lm_example_loss', aggregation='mean') + + if sentence_labels is not None: + next_sentence_accuracy = tf.keras.metrics.sparse_categorical_accuracy( + sentence_labels, sentence_output) + self.add_metric( + next_sentence_accuracy, + name='next_sentence_accuracy', + aggregation='mean') + + if next_sentence_loss is not None: + self.add_metric( + next_sentence_loss, name='next_sentence_loss', aggregation='mean') + + def call(self, + lm_output_logits, + sentence_output_logits, + lm_label_ids, + lm_label_weights, + sentence_labels=None): + """Implements call() for the layer.""" + lm_label_weights = tf.cast(lm_label_weights, tf.float32) + lm_output_logits = tf.cast(lm_output_logits, tf.float32) + + lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy( + lm_label_ids, lm_output_logits, from_logits=True) + lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights) + lm_denominator_loss = tf.reduce_sum(lm_label_weights) + mask_label_loss = tf.math.divide_no_nan(lm_numerator_loss, + lm_denominator_loss) + + if sentence_labels is not None: + sentence_output_logits = tf.cast(sentence_output_logits, tf.float32) + sentence_loss = tf.keras.losses.sparse_categorical_crossentropy( + sentence_labels, sentence_output_logits, from_logits=True) + sentence_loss = tf.reduce_mean(sentence_loss) + loss = mask_label_loss + sentence_loss + else: + sentence_loss = None + loss = mask_label_loss + + batch_shape = tf.slice(tf.shape(lm_label_ids), [0], [1]) + # TODO(hongkuny): Avoids the hack and switches add_loss. + final_loss = tf.fill(batch_shape, loss) + + self._add_metrics(lm_output_logits, lm_label_ids, lm_label_weights, + mask_label_loss, sentence_output_logits, sentence_labels, + sentence_loss) + return final_loss + + +@gin.configurable +def get_transformer_encoder(bert_config, + sequence_length, + transformer_encoder_cls=None, + output_range=None): + """Gets a 'TransformerEncoder' object. + + Args: + bert_config: A 'modeling.BertConfig' or 'modeling.AlbertConfig' object. + sequence_length: Maximum sequence length of the training data. + transformer_encoder_cls: A EncoderScaffold class. If it is None, uses the + default BERT encoder implementation. + output_range: the sequence output range, [0, output_range). Default setting + is to return the entire sequence output. + + Returns: + A networks.TransformerEncoder object. + """ + if transformer_encoder_cls is not None: + # TODO(hongkuny): evaluate if it is better to put cfg definition in gin. + embedding_cfg = dict( + vocab_size=bert_config.vocab_size, + type_vocab_size=bert_config.type_vocab_size, + hidden_size=bert_config.hidden_size, + seq_length=sequence_length, + max_seq_length=bert_config.max_position_embeddings, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range), + dropout_rate=bert_config.hidden_dropout_prob, + ) + hidden_cfg = dict( + num_attention_heads=bert_config.num_attention_heads, + intermediate_size=bert_config.intermediate_size, + intermediate_activation=tf_utils.get_activation(bert_config.hidden_act), + dropout_rate=bert_config.hidden_dropout_prob, + attention_dropout_rate=bert_config.attention_probs_dropout_prob, + kernel_initializer=tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range), + ) + kwargs = dict( + embedding_cfg=embedding_cfg, + hidden_cfg=hidden_cfg, + num_hidden_instances=bert_config.num_hidden_layers, + pooled_output_dim=bert_config.hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range)) + + # Relies on gin configuration to define the Transformer encoder arguments. + return transformer_encoder_cls(**kwargs) + + kwargs = dict( + vocab_size=bert_config.vocab_size, + hidden_size=bert_config.hidden_size, + num_layers=bert_config.num_hidden_layers, + num_attention_heads=bert_config.num_attention_heads, + intermediate_size=bert_config.intermediate_size, + activation=tf_utils.get_activation(bert_config.hidden_act), + dropout_rate=bert_config.hidden_dropout_prob, + attention_dropout_rate=bert_config.attention_probs_dropout_prob, + sequence_length=sequence_length, + max_sequence_length=bert_config.max_position_embeddings, + type_vocab_size=bert_config.type_vocab_size, + embedding_width=bert_config.embedding_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range)) + if isinstance(bert_config, albert_configs.AlbertConfig): + return networks.AlbertTransformerEncoder(**kwargs) + else: + assert isinstance(bert_config, configs.BertConfig) + kwargs['output_range'] = output_range + return networks.TransformerEncoder(**kwargs) + + +def pretrain_model(bert_config, + seq_length, + max_predictions_per_seq, + initializer=None, + use_next_sentence_label=True, + return_core_pretrainer_model=False): + """Returns model to be used for pre-training. + + Args: + bert_config: Configuration that defines the core BERT model. + seq_length: Maximum sequence length of the training data. + max_predictions_per_seq: Maximum number of tokens in sequence to mask out + and use for pretraining. + initializer: Initializer for weights in BertPretrainer. + use_next_sentence_label: Whether to use the next sentence label. + return_core_pretrainer_model: Whether to also return the `BertPretrainer` + object. + + Returns: + A Tuple of (1) Pretraining model, (2) core BERT submodel from which to + save weights after pretraining, and (3) optional core `BertPretrainer` + object if argument `return_core_pretrainer_model` is True. + """ + input_word_ids = tf.keras.layers.Input( + shape=(seq_length,), name='input_word_ids', dtype=tf.int32) + input_mask = tf.keras.layers.Input( + shape=(seq_length,), name='input_mask', dtype=tf.int32) + input_type_ids = tf.keras.layers.Input( + shape=(seq_length,), name='input_type_ids', dtype=tf.int32) + masked_lm_positions = tf.keras.layers.Input( + shape=(max_predictions_per_seq,), + name='masked_lm_positions', + dtype=tf.int32) + masked_lm_ids = tf.keras.layers.Input( + shape=(max_predictions_per_seq,), name='masked_lm_ids', dtype=tf.int32) + masked_lm_weights = tf.keras.layers.Input( + shape=(max_predictions_per_seq,), + name='masked_lm_weights', + dtype=tf.int32) + + if use_next_sentence_label: + next_sentence_labels = tf.keras.layers.Input( + shape=(1,), name='next_sentence_labels', dtype=tf.int32) + else: + next_sentence_labels = None + + transformer_encoder = get_transformer_encoder(bert_config, seq_length) + if initializer is None: + initializer = tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range) + pretrainer_model = models.BertPretrainer( + network=transformer_encoder, + embedding_table=transformer_encoder.get_embedding_table(), + num_classes=2, # The next sentence prediction label has two classes. + activation=tf_utils.get_activation(bert_config.hidden_act), + num_token_predictions=max_predictions_per_seq, + initializer=initializer, + output='logits') + + outputs = pretrainer_model( + [input_word_ids, input_mask, input_type_ids, masked_lm_positions]) + lm_output = outputs['masked_lm'] + sentence_output = outputs['classification'] + pretrain_loss_layer = BertPretrainLossAndMetricLayer( + vocab_size=bert_config.vocab_size) + output_loss = pretrain_loss_layer(lm_output, sentence_output, masked_lm_ids, + masked_lm_weights, next_sentence_labels) + inputs = { + 'input_word_ids': input_word_ids, + 'input_mask': input_mask, + 'input_type_ids': input_type_ids, + 'masked_lm_positions': masked_lm_positions, + 'masked_lm_ids': masked_lm_ids, + 'masked_lm_weights': masked_lm_weights, + } + if use_next_sentence_label: + inputs['next_sentence_labels'] = next_sentence_labels + + keras_model = tf.keras.Model(inputs=inputs, outputs=output_loss) + if return_core_pretrainer_model: + return keras_model, transformer_encoder, pretrainer_model + else: + return keras_model, transformer_encoder + + +def squad_model(bert_config, + max_seq_length, + initializer=None, + hub_module_url=None, + hub_module_trainable=True): + """Returns BERT Squad model along with core BERT model to import weights. + + Args: + bert_config: BertConfig, the config defines the core Bert model. + max_seq_length: integer, the maximum input sequence length. + initializer: Initializer for the final dense layer in the span labeler. + Defaulted to TruncatedNormal initializer. + hub_module_url: TF-Hub path/url to Bert module. + hub_module_trainable: True to finetune layers in the hub module. + + Returns: + A tuple of (1) keras model that outputs start logits and end logits and + (2) the core BERT transformer encoder. + """ + if initializer is None: + initializer = tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range) + if not hub_module_url: + bert_encoder = get_transformer_encoder(bert_config, max_seq_length) + return models.BertSpanLabeler( + network=bert_encoder, initializer=initializer), bert_encoder + + input_word_ids = tf.keras.layers.Input( + shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids') + input_mask = tf.keras.layers.Input( + shape=(max_seq_length,), dtype=tf.int32, name='input_mask') + input_type_ids = tf.keras.layers.Input( + shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids') + core_model = hub.KerasLayer(hub_module_url, trainable=hub_module_trainable) + pooled_output, sequence_output = core_model( + [input_word_ids, input_mask, input_type_ids]) + bert_encoder = tf.keras.Model( + inputs={ + 'input_word_ids': input_word_ids, + 'input_mask': input_mask, + 'input_type_ids': input_type_ids, + }, + outputs=[sequence_output, pooled_output], + name='core_model') + return models.BertSpanLabeler( + network=bert_encoder, initializer=initializer), bert_encoder + + +def classifier_model(bert_config, + num_labels, + max_seq_length=None, + final_layer_initializer=None, + hub_module_url=None, + hub_module_trainable=True): + """BERT classifier model in functional API style. + + Construct a Keras model for predicting `num_labels` outputs from an input with + maximum sequence length `max_seq_length`. + + Args: + bert_config: BertConfig or AlbertConfig, the config defines the core BERT or + ALBERT model. + num_labels: integer, the number of classes. + max_seq_length: integer, the maximum input sequence length. + final_layer_initializer: Initializer for final dense layer. Defaulted + TruncatedNormal initializer. + hub_module_url: TF-Hub path/url to Bert module. + hub_module_trainable: True to finetune layers in the hub module. + + Returns: + Combined prediction model (words, mask, type) -> (one-hot labels) + BERT sub-model (words, mask, type) -> (bert_outputs) + """ + if final_layer_initializer is not None: + initializer = final_layer_initializer + else: + initializer = tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range) + + if not hub_module_url: + bert_encoder = get_transformer_encoder( + bert_config, max_seq_length, output_range=1) + return models.BertClassifier( + bert_encoder, + num_classes=num_labels, + dropout_rate=bert_config.hidden_dropout_prob, + initializer=initializer), bert_encoder + + input_word_ids = tf.keras.layers.Input( + shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids') + input_mask = tf.keras.layers.Input( + shape=(max_seq_length,), dtype=tf.int32, name='input_mask') + input_type_ids = tf.keras.layers.Input( + shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids') + bert_model = hub.KerasLayer(hub_module_url, trainable=hub_module_trainable) + pooled_output, _ = bert_model([input_word_ids, input_mask, input_type_ids]) + output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)( + pooled_output) + + output = tf.keras.layers.Dense( + num_labels, kernel_initializer=initializer, name='output')( + output) + return tf.keras.Model( + inputs={ + 'input_word_ids': input_word_ids, + 'input_mask': input_mask, + 'input_type_ids': input_type_ids + }, + outputs=output), bert_model diff --git a/models/official/nlp/bert/bert_models_test.py b/models/official/nlp/bert/bert_models_test.py new file mode 100644 index 0000000000000000000000000000000000000000..93763b45bfc53c5d32de2df7f7f0f72894e9556f --- /dev/null +++ b/models/official/nlp/bert/bert_models_test.py @@ -0,0 +1,114 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.bert import bert_models +from official.nlp.bert import configs as bert_configs +from official.nlp.modeling import networks + + +class BertModelsTest(tf.test.TestCase): + + def setUp(self): + super(BertModelsTest, self).setUp() + self._bert_test_config = bert_configs.BertConfig( + attention_probs_dropout_prob=0.0, + hidden_act='gelu', + hidden_dropout_prob=0.0, + hidden_size=16, + initializer_range=0.02, + intermediate_size=32, + max_position_embeddings=128, + num_attention_heads=2, + num_hidden_layers=2, + type_vocab_size=2, + vocab_size=30522) + + def test_pretrain_model(self): + model, encoder = bert_models.pretrain_model( + self._bert_test_config, + seq_length=5, + max_predictions_per_seq=2, + initializer=None, + use_next_sentence_label=True) + self.assertIsInstance(model, tf.keras.Model) + self.assertIsInstance(encoder, networks.TransformerEncoder) + + # model has one scalar output: loss value. + self.assertEqual(model.output.shape.as_list(), [None,]) + + # Expect two output from encoder: sequence and classification output. + self.assertIsInstance(encoder.output, list) + self.assertLen(encoder.output, 2) + # shape should be [batch size, seq_length, hidden_size] + self.assertEqual(encoder.output[0].shape.as_list(), [None, 5, 16]) + # shape should be [batch size, hidden_size] + self.assertEqual(encoder.output[1].shape.as_list(), [None, 16]) + + def test_squad_model(self): + model, core_model = bert_models.squad_model( + self._bert_test_config, + max_seq_length=5, + initializer=None, + hub_module_url=None, + hub_module_trainable=None) + self.assertIsInstance(model, tf.keras.Model) + self.assertIsInstance(core_model, tf.keras.Model) + + # Expect two output from model: start positions and end positions + self.assertIsInstance(model.output, list) + self.assertLen(model.output, 2) + # shape should be [batch size, seq_length] + self.assertEqual(model.output[0].shape.as_list(), [None, 5]) + # shape should be [batch size, seq_length] + self.assertEqual(model.output[1].shape.as_list(), [None, 5]) + + # Expect two output from core_model: sequence and classification output. + self.assertIsInstance(core_model.output, list) + self.assertLen(core_model.output, 2) + # shape should be [batch size, seq_length, hidden_size] + self.assertEqual(core_model.output[0].shape.as_list(), [None, 5, 16]) + # shape should be [batch size, hidden_size] + self.assertEqual(core_model.output[1].shape.as_list(), [None, 16]) + + def test_classifier_model(self): + model, core_model = bert_models.classifier_model( + self._bert_test_config, + num_labels=3, + max_seq_length=5, + final_layer_initializer=None, + hub_module_url=None, + hub_module_trainable=None) + self.assertIsInstance(model, tf.keras.Model) + self.assertIsInstance(core_model, tf.keras.Model) + + # model has one classification output with num_labels=3. + self.assertEqual(model.output.shape.as_list(), [None, 3]) + + # Expect two output from core_model: sequence and classification output. + self.assertIsInstance(core_model.output, list) + self.assertLen(core_model.output, 2) + # shape should be [batch size, 1, hidden_size] + self.assertEqual(core_model.output[0].shape.as_list(), [None, 1, 16]) + # shape should be [batch size, hidden_size] + self.assertEqual(core_model.output[1].shape.as_list(), [None, 16]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/bert/common_flags.py b/models/official/nlp/bert/common_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..06a376d63de5447ddd67810f2cf6be3399f2a958 --- /dev/null +++ b/models/official/nlp/bert/common_flags.py @@ -0,0 +1,117 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defining common flags used across all BERT models/applications.""" + +from absl import flags +import tensorflow as tf + +from official.utils import hyperparams_flags +from official.utils.flags import core as flags_core + + +def define_common_bert_flags(): + """Define common flags for BERT tasks.""" + flags_core.define_base( + data_dir=False, + model_dir=True, + clean=False, + train_epochs=False, + epochs_between_evals=False, + stop_threshold=False, + batch_size=False, + num_gpu=True, + export_dir=False, + distribution_strategy=True, + run_eagerly=True) + flags_core.define_distribution() + flags.DEFINE_string('bert_config_file', None, + 'Bert configuration file to define core bert layers.') + flags.DEFINE_string( + 'model_export_path', None, + 'Path to the directory, where trainined model will be ' + 'exported.') + flags.DEFINE_string('tpu', '', 'TPU address to connect to.') + flags.DEFINE_string( + 'init_checkpoint', None, + 'Initial checkpoint (usually from a pre-trained BERT model).') + flags.DEFINE_integer('num_train_epochs', 3, + 'Total number of training epochs to perform.') + flags.DEFINE_integer( + 'steps_per_loop', None, + 'Number of steps per graph-mode loop. Only training step ' + 'happens inside the loop. Callbacks will not be called ' + 'inside. If not set the value will be configured depending on the ' + 'devices available.') + flags.DEFINE_float('learning_rate', 5e-5, + 'The initial learning rate for Adam.') + flags.DEFINE_float('end_lr', 0.0, + 'The end learning rate for learning rate decay.') + flags.DEFINE_string('optimizer_type', 'adamw', + 'The type of optimizer to use for training (adamw|lamb)') + flags.DEFINE_boolean( + 'scale_loss', False, + 'Whether to divide the loss by number of replica inside the per-replica ' + 'loss function.') + flags.DEFINE_boolean( + 'use_keras_compile_fit', False, + 'If True, uses Keras compile/fit() API for training logic. Otherwise ' + 'use custom training loop.') + flags.DEFINE_string( + 'hub_module_url', None, 'TF-Hub path/url to Bert module. ' + 'If specified, init_checkpoint flag should not be used.') + flags.DEFINE_bool('hub_module_trainable', True, + 'True to make keras layers in the hub module trainable.') + flags.DEFINE_string('sub_model_export_name', None, + 'If set, `sub_model` checkpoints are exported into ' + 'FLAGS.model_dir/FLAGS.sub_model_export_name.') + + flags_core.define_log_steps() + + # Adds flags for mixed precision and multi-worker training. + flags_core.define_performance( + num_parallel_calls=False, + inter_op=False, + intra_op=False, + synthetic_data=False, + max_train_steps=False, + dtype=True, + dynamic_loss_scale=True, + loss_scale=True, + all_reduce_alg=True, + num_packs=False, + tf_gpu_thread_mode=True, + datasets_num_private_threads=True, + enable_xla=True, + fp16_implementation=True, + ) + + # Adds gin configuration flags. + hyperparams_flags.define_gin_flags() + + +def dtype(): + return flags_core.get_tf_dtype(flags.FLAGS) + + +def use_float16(): + return flags_core.get_tf_dtype(flags.FLAGS) == tf.float16 + + +def use_graph_rewrite(): + return flags.FLAGS.fp16_implementation == 'graph_rewrite' + + +def get_loss_scale(): + return flags_core.get_loss_scale(flags.FLAGS, default_for_fp16='dynamic') diff --git a/models/official/nlp/bert/configs.py b/models/official/nlp/bert/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f9082655f490e010ff2a341c40d488eb1097c1 --- /dev/null +++ b/models/official/nlp/bert/configs.py @@ -0,0 +1,108 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The main BERT model and related functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import json +import six +import tensorflow as tf + + +class BertConfig(object): + """Configuration for `BertModel`.""" + + def __init__(self, + vocab_size, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + embedding_size=None, + backward_compatible=True): + """Constructs BertConfig. + + Args: + vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. + hidden_dropout_prob: The dropout probability for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `BertModel`. + initializer_range: The stdev of the truncated_normal_initializer for + initializing all weight matrices. + embedding_size: (Optional) width of the factorized word embeddings. + backward_compatible: Boolean, whether the variables shape are compatible + with checkpoints converted from TF 1.x BERT. + """ + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.embedding_size = embedding_size + self.backward_compatible = backward_compatible + + @classmethod + def from_dict(cls, json_object): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + config = BertConfig(vocab_size=None) + for (key, value) in six.iteritems(json_object): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with tf.io.gfile.GFile(json_file, "r") as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + diff --git a/models/official/nlp/bert/export_tfhub.py b/models/official/nlp/bert/export_tfhub.py new file mode 100644 index 0000000000000000000000000000000000000000..5923309d1fa36a16d4cccda11650d9c3d0fcc616 --- /dev/null +++ b/models/official/nlp/bert/export_tfhub.py @@ -0,0 +1,95 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A script to export the BERT core model as a TF-Hub SavedModel.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf +from typing import Text +from official.nlp.bert import bert_models +from official.nlp.bert import configs + +FLAGS = flags.FLAGS + +flags.DEFINE_string("bert_config_file", None, + "Bert configuration file to define core bert layers.") +flags.DEFINE_string("model_checkpoint_path", None, + "File path to TF model checkpoint.") +flags.DEFINE_string("export_path", None, "TF-Hub SavedModel destination path.") +flags.DEFINE_string("vocab_file", None, + "The vocabulary file that the BERT model was trained on.") +flags.DEFINE_bool("do_lower_case", None, "Whether to lowercase. If None, " + "do_lower_case will be enabled if 'uncased' appears in the " + "name of --vocab_file") + + +def create_bert_model(bert_config: configs.BertConfig) -> tf.keras.Model: + """Creates a BERT keras core model from BERT configuration. + + Args: + bert_config: A `BertConfig` to create the core model. + + Returns: + A keras model. + """ + # Adds input layers just as placeholders. + input_word_ids = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name="input_word_ids") + input_mask = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name="input_mask") + input_type_ids = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name="input_type_ids") + transformer_encoder = bert_models.get_transformer_encoder( + bert_config, sequence_length=None) + sequence_output, pooled_output = transformer_encoder( + [input_word_ids, input_mask, input_type_ids]) + # To keep consistent with legacy hub modules, the outputs are + # "pooled_output" and "sequence_output". + return tf.keras.Model( + inputs=[input_word_ids, input_mask, input_type_ids], + outputs=[pooled_output, sequence_output]), transformer_encoder + + +def export_bert_tfhub(bert_config: configs.BertConfig, + model_checkpoint_path: Text, hub_destination: Text, + vocab_file: Text, do_lower_case: bool = None): + """Restores a tf.keras.Model and saves for TF-Hub.""" + # If do_lower_case is not explicit, default to checking whether "uncased" is + # in the vocab file name + if do_lower_case is None: + do_lower_case = "uncased" in vocab_file + logging.info("Using do_lower_case=%s based on name of vocab_file=%s", + do_lower_case, vocab_file) + core_model, encoder = create_bert_model(bert_config) + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.restore(model_checkpoint_path).assert_consumed() + core_model.vocab_file = tf.saved_model.Asset(vocab_file) + core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False) + core_model.save(hub_destination, include_optimizer=False, save_format="tf") + + +def main(_): + bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) + export_bert_tfhub(bert_config, FLAGS.model_checkpoint_path, FLAGS.export_path, + FLAGS.vocab_file, FLAGS.do_lower_case) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/bert/export_tfhub_test.py b/models/official/nlp/bert/export_tfhub_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6fd40f5e1be5d5e8d4699d54c048add7435523 --- /dev/null +++ b/models/official/nlp/bert/export_tfhub_test.py @@ -0,0 +1,109 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests official.nlp.bert.export_tfhub.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import numpy as np + +import tensorflow as tf +import tensorflow_hub as hub +from official.nlp.bert import configs +from official.nlp.bert import export_tfhub + + +class ExportTfhubTest(tf.test.TestCase): + + def test_export_tfhub(self): + # Exports a savedmodel for TF-Hub + hidden_size = 16 + bert_config = configs.BertConfig( + vocab_size=100, + hidden_size=hidden_size, + intermediate_size=32, + max_position_embeddings=128, + num_attention_heads=2, + num_hidden_layers=1) + bert_model, encoder = export_tfhub.create_bert_model(bert_config) + model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.save(os.path.join(model_checkpoint_dir, "test")) + model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) + + vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt") + with tf.io.gfile.GFile(vocab_file, "w") as f: + f.write("dummy content") + + hub_destination = os.path.join(self.get_temp_dir(), "hub") + export_tfhub.export_bert_tfhub(bert_config, model_checkpoint_path, + hub_destination, vocab_file) + + # Restores a hub KerasLayer. + hub_layer = hub.KerasLayer(hub_destination, trainable=True) + + if hasattr(hub_layer, "resolved_object"): + # Checks meta attributes. + self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy()) + with tf.io.gfile.GFile( + hub_layer.resolved_object.vocab_file.asset_path.numpy()) as f: + self.assertEqual("dummy content", f.read()) + # Checks the hub KerasLayer. + for source_weight, hub_weight in zip(bert_model.trainable_weights, + hub_layer.trainable_weights): + self.assertAllClose(source_weight.numpy(), hub_weight.numpy()) + + seq_length = 10 + dummy_ids = np.zeros((2, seq_length), dtype=np.int32) + hub_outputs = hub_layer([dummy_ids, dummy_ids, dummy_ids]) + source_outputs = bert_model([dummy_ids, dummy_ids, dummy_ids]) + + # The outputs of hub module are "pooled_output" and "sequence_output", + # while the outputs of encoder is in reversed order, i.e., + # "sequence_output" and "pooled_output". + encoder_outputs = reversed(encoder([dummy_ids, dummy_ids, dummy_ids])) + self.assertEqual(hub_outputs[0].shape, (2, hidden_size)) + self.assertEqual(hub_outputs[1].shape, (2, seq_length, hidden_size)) + for source_output, hub_output, encoder_output in zip( + source_outputs, hub_outputs, encoder_outputs): + self.assertAllClose(source_output.numpy(), hub_output.numpy()) + self.assertAllClose(source_output.numpy(), encoder_output.numpy()) + + # Test that training=True makes a difference (activates dropout). + def _dropout_mean_stddev(training, num_runs=20): + input_ids = np.array([[14, 12, 42, 95, 99]], np.int32) + inputs = [input_ids, np.ones_like(input_ids), np.zeros_like(input_ids)] + outputs = np.concatenate( + [hub_layer(inputs, training=training)[0] for _ in range(num_runs)]) + return np.mean(np.std(outputs, axis=0)) + self.assertLess(_dropout_mean_stddev(training=False), 1e-6) + self.assertGreater(_dropout_mean_stddev(training=True), 1e-3) + + # Test propagation of seq_length in shape inference. + input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) + input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) + input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) + pooled_output, sequence_output = hub_layer( + [input_word_ids, input_mask, input_type_ids]) + self.assertEqual(pooled_output.shape.as_list(), [None, hidden_size]) + self.assertEqual(sequence_output.shape.as_list(), + [None, seq_length, hidden_size]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/bert/input_pipeline.py b/models/official/nlp/bert/input_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..ed3fd173d4379a75ab1e2e5a9ba0bbdcbaa0be42 --- /dev/null +++ b/models/official/nlp/bert/input_pipeline.py @@ -0,0 +1,285 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BERT model input pipelines.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +def decode_record(record, name_to_features): + """Decodes a record to a TensorFlow example.""" + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = t + + return example + + +def single_file_dataset(input_file, name_to_features): + """Creates a single-file dataset to be passed for BERT custom training.""" + # For training, we want a lot of parallel reading and shuffling. + # For eval, we want no shuffling and parallel reading doesn't matter. + d = tf.data.TFRecordDataset(input_file) + d = d.map( + lambda record: decode_record(record, name_to_features), + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + # When `input_file` is a path to a single file or a list + # containing a single path, disable auto sharding so that + # same input file is sent to all workers. + if isinstance(input_file, str) or len(input_file) == 1: + options = tf.data.Options() + options.experimental_distribute.auto_shard_policy = ( + tf.data.experimental.AutoShardPolicy.OFF) + d = d.with_options(options) + return d + + +def create_pretrain_dataset(input_patterns, + seq_length, + max_predictions_per_seq, + batch_size, + is_training=True, + input_pipeline_context=None, + use_next_sentence_label=True, + use_position_id=False, + output_fake_labels=True): + """Creates input dataset from (tf)records files for pretraining.""" + name_to_features = { + 'input_ids': + tf.io.FixedLenFeature([seq_length], tf.int64), + 'input_mask': + tf.io.FixedLenFeature([seq_length], tf.int64), + 'segment_ids': + tf.io.FixedLenFeature([seq_length], tf.int64), + 'masked_lm_positions': + tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64), + 'masked_lm_ids': + tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64), + 'masked_lm_weights': + tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32), + } + if use_next_sentence_label: + name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1], + tf.int64) + if use_position_id: + name_to_features['position_ids'] = tf.io.FixedLenFeature([seq_length], + tf.int64) + for input_pattern in input_patterns: + if not tf.io.gfile.glob(input_pattern): + raise ValueError('%s does not match any files.' % input_pattern) + + dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training) + + if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: + dataset = dataset.shard(input_pipeline_context.num_input_pipelines, + input_pipeline_context.input_pipeline_id) + if is_training: + dataset = dataset.repeat() + + # We set shuffle buffer to exactly match total number of + # training files to ensure that training data is well shuffled. + input_files = [] + for input_pattern in input_patterns: + input_files.extend(tf.io.gfile.glob(input_pattern)) + dataset = dataset.shuffle(len(input_files)) + + # In parallel, create tf record dataset for each train files. + # cycle_length = 8 means that up to 8 files will be read and deserialized in + # parallel. You may want to increase this number if you have a large number of + # CPU cores. + dataset = dataset.interleave( + tf.data.TFRecordDataset, + cycle_length=8, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if is_training: + dataset = dataset.shuffle(100) + + decode_fn = lambda record: decode_record(record, name_to_features) + dataset = dataset.map( + decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + + def _select_data_from_record(record): + """Filter out features to use for pretraining.""" + x = { + 'input_word_ids': record['input_ids'], + 'input_mask': record['input_mask'], + 'input_type_ids': record['segment_ids'], + 'masked_lm_positions': record['masked_lm_positions'], + 'masked_lm_ids': record['masked_lm_ids'], + 'masked_lm_weights': record['masked_lm_weights'], + } + if use_next_sentence_label: + x['next_sentence_labels'] = record['next_sentence_labels'] + if use_position_id: + x['position_ids'] = record['position_ids'] + + # TODO(hongkuny): Remove the fake labels after migrating bert pretraining. + if output_fake_labels: + return (x, record['masked_lm_weights']) + else: + return x + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=is_training) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset + + +def create_classifier_dataset(file_path, + seq_length, + batch_size, + is_training=True, + input_pipeline_context=None, + label_type=tf.int64, + include_sample_weights=False): + """Creates input dataset from (tf)records files for train/eval.""" + name_to_features = { + 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), + 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), + 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), + 'label_ids': tf.io.FixedLenFeature([], label_type), + } + if include_sample_weights: + name_to_features['weight'] = tf.io.FixedLenFeature([], tf.float32) + dataset = single_file_dataset(file_path, name_to_features) + + # The dataset is always sharded by number of hosts. + # num_input_pipelines is the number of hosts rather than number of cores. + if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: + dataset = dataset.shard(input_pipeline_context.num_input_pipelines, + input_pipeline_context.input_pipeline_id) + + def _select_data_from_record(record): + x = { + 'input_word_ids': record['input_ids'], + 'input_mask': record['input_mask'], + 'input_type_ids': record['segment_ids'] + } + y = record['label_ids'] + if include_sample_weights: + w = record['weight'] + return (x, y, w) + return (x, y) + + if is_training: + dataset = dataset.shuffle(100) + dataset = dataset.repeat() + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=is_training) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset + + +def create_squad_dataset(file_path, + seq_length, + batch_size, + is_training=True, + input_pipeline_context=None): + """Creates input dataset from (tf)records files for train/eval.""" + name_to_features = { + 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), + 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), + 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), + } + if is_training: + name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64) + name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64) + else: + name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64) + + dataset = single_file_dataset(file_path, name_to_features) + + # The dataset is always sharded by number of hosts. + # num_input_pipelines is the number of hosts rather than number of cores. + if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: + dataset = dataset.shard(input_pipeline_context.num_input_pipelines, + input_pipeline_context.input_pipeline_id) + + def _select_data_from_record(record): + """Dispatches record to features and labels.""" + x, y = {}, {} + for name, tensor in record.items(): + if name in ('start_positions', 'end_positions'): + y[name] = tensor + elif name == 'input_ids': + x['input_word_ids'] = tensor + elif name == 'segment_ids': + x['input_type_ids'] = tensor + else: + x[name] = tensor + return (x, y) + + if is_training: + dataset = dataset.shuffle(100) + dataset = dataset.repeat() + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=True) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset + + +def create_retrieval_dataset(file_path, + seq_length, + batch_size, + input_pipeline_context=None): + """Creates input dataset from (tf)records files for scoring.""" + name_to_features = { + 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), + 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), + 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), + 'int_iden': tf.io.FixedLenFeature([1], tf.int64), + } + dataset = single_file_dataset(file_path, name_to_features) + + # The dataset is always sharded by number of hosts. + # num_input_pipelines is the number of hosts rather than number of cores. + if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: + dataset = dataset.shard(input_pipeline_context.num_input_pipelines, + input_pipeline_context.input_pipeline_id) + + def _select_data_from_record(record): + x = { + 'input_word_ids': record['input_ids'], + 'input_mask': record['input_mask'], + 'input_type_ids': record['segment_ids'] + } + y = record['int_iden'] + return (x, y) + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=False) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset diff --git a/models/official/nlp/bert/model_saving_utils.py b/models/official/nlp/bert/model_saving_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..13d2c9ed02f9a98d9dcbb2a60c46fa5cd13bb666 --- /dev/null +++ b/models/official/nlp/bert/model_saving_utils.py @@ -0,0 +1,77 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities to save models.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os + +from absl import logging +import tensorflow as tf +import typing + + +def export_bert_model(model_export_path: typing.Text, + model: tf.keras.Model, + checkpoint_dir: typing.Optional[typing.Text] = None, + restore_model_using_load_weights: bool = False) -> None: + """Export BERT model for serving which does not include the optimizer. + + Arguments: + model_export_path: Path to which exported model will be saved. + model: Keras model object to export. + checkpoint_dir: Path from which model weights will be loaded, if + specified. + restore_model_using_load_weights: Whether to use checkpoint.restore() API + for custom checkpoint or to use model.load_weights() API. + There are 2 different ways to save checkpoints. One is using + tf.train.Checkpoint and another is using Keras model.save_weights(). + Custom training loop implementation uses tf.train.Checkpoint API + and Keras ModelCheckpoint callback internally uses model.save_weights() + API. Since these two API's cannot be used toghether, model loading logic + must be take into account how model checkpoint was saved. + + Raises: + ValueError when either model_export_path or model is not specified. + """ + if not model_export_path: + raise ValueError('model_export_path must be specified.') + if not isinstance(model, tf.keras.Model): + raise ValueError('model must be a tf.keras.Model object.') + + if checkpoint_dir: + # Keras compile/fit() was used to save checkpoint using + # model.save_weights(). + if restore_model_using_load_weights: + model_weight_path = os.path.join(checkpoint_dir, 'checkpoint') + assert tf.io.gfile.exists(model_weight_path) + model.load_weights(model_weight_path) + + # tf.train.Checkpoint API was used via custom training loop logic. + else: + checkpoint = tf.train.Checkpoint(model=model) + + # Restores the model from latest checkpoint. + latest_checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) + assert latest_checkpoint_file + logging.info('Checkpoint file %s found and restoring from ' + 'checkpoint', latest_checkpoint_file) + checkpoint.restore( + latest_checkpoint_file).assert_existing_objects_matched() + + model.save(model_export_path, include_optimizer=False, save_format='tf') diff --git a/models/official/nlp/bert/model_training_utils.py b/models/official/nlp/bert/model_training_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f0fe67615726906a6b1d3ef38a5ca9acfe8502de --- /dev/null +++ b/models/official/nlp/bert/model_training_utils.py @@ -0,0 +1,572 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A light weight utilities to train NLP models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import tempfile + +from absl import logging +import tensorflow as tf +from tensorflow.python.util import deprecation +from official.staging.training import grad_utils +from official.utils.misc import distribution_utils + +_SUMMARY_TXT = 'training_summary.txt' +_MIN_SUMMARY_STEPS = 10 + + +def _should_export_checkpoint(strategy): + return (not strategy) or strategy.extended.should_checkpoint + + +def _should_export_summary(strategy): + return (not strategy) or strategy.extended.should_save_summary + + +def _save_checkpoint(strategy, checkpoint, model_dir, checkpoint_prefix): + """Saves model to with provided checkpoint prefix.""" + + if _should_export_checkpoint(strategy): + checkpoint_path = os.path.join(model_dir, checkpoint_prefix) + saved_path = checkpoint.save(checkpoint_path) + logging.info('Saving model as TF checkpoint: %s', saved_path) + else: + # In multi worker training we need every worker to save checkpoint, because + # variables can trigger synchronization on read and synchronization needs + # all workers to participate. To avoid workers overriding each other we save + # to a temporary directory on non-chief workers. + tmp_dir = tempfile.mkdtemp() + checkpoint.save(os.path.join(tmp_dir, 'ckpt')) + tf.io.gfile.rmtree(tmp_dir) + return + + +def _get_input_iterator(input_fn, strategy): + """Returns distributed dataset iterator.""" + # When training with TPU pods, datasets needs to be cloned across + # workers. Since Dataset instance cannot be cloned in eager mode, we instead + # pass callable that returns a dataset. + if not callable(input_fn): + raise ValueError('`input_fn` should be a closure that returns a dataset.') + iterator = iter( + strategy.experimental_distribute_datasets_from_function(input_fn)) + return iterator + + +def _float_metric_value(metric): + """Gets the value of a float-value keras metric.""" + return metric.result().numpy().astype(float) + + +def steps_to_run(current_step, steps_per_epoch, steps_per_loop): + """Calculates steps to run on device.""" + if steps_per_loop <= 0: + raise ValueError('steps_per_loop should be positive integer.') + if steps_per_loop == 1: + return steps_per_loop + remainder_in_epoch = current_step % steps_per_epoch + if remainder_in_epoch != 0: + return min(steps_per_epoch - remainder_in_epoch, steps_per_loop) + else: + return steps_per_loop + + +def write_txt_summary(training_summary, summary_dir): + """Writes a summary text file to record stats.""" + if not tf.io.gfile.exists(summary_dir): + tf.io.gfile.mkdir(summary_dir) + summary_path = os.path.join(summary_dir, _SUMMARY_TXT) + with tf.io.gfile.GFile(summary_path, 'wb') as f: + logging.info('Training Summary: \n%s', str(training_summary)) + f.write(json.dumps(training_summary, indent=4)) + + +@deprecation.deprecated( + None, 'This function is deprecated. Please use Keras compile/fit instead.') +def run_customized_training_loop( + # pylint: disable=invalid-name + _sentinel=None, + # pylint: enable=invalid-name + strategy=None, + model_fn=None, + loss_fn=None, + scale_loss=True, + model_dir=None, + train_input_fn=None, + steps_per_epoch=None, + num_eval_per_epoch=1, + steps_per_loop=None, + epochs=1, + eval_input_fn=None, + eval_steps=None, + metric_fn=None, + init_checkpoint=None, + custom_callbacks=None, + run_eagerly=False, + sub_model_export_name=None, + explicit_allreduce=False, + pre_allreduce_callbacks=None, + post_allreduce_callbacks=None, + train_summary_interval=0): + """Run BERT pretrain model training using low-level API. + + Arguments: + _sentinel: Used to prevent positional parameters. Internal, do not use. + strategy: Distribution strategy on which to run low level training loop. + model_fn: Function that returns a tuple (model, sub_model). Caller of this + function should add optimizer to the `model` via calling + `model.compile()` API or manually setting `model.optimizer` attribute. + Second element of the returned tuple(sub_model) is an optional sub model + to be used for initial checkpoint -- if provided. + loss_fn: Function with signature func(labels, logits) and returns a loss + tensor. + scale_loss: Whether to divide the raw loss by number of replicas before + gradients calculation. + model_dir: Model directory used during training for restoring/saving model + weights. + train_input_fn: Function that returns a tf.data.Dataset used for training. + steps_per_epoch: Number of steps to run per epoch. At the end of each + epoch, model checkpoint will be saved and evaluation will be conducted + if evaluation dataset is provided. + num_eval_per_epoch: Number of evaluations per epoch. + steps_per_loop: Number of steps per graph-mode loop. In order to reduce + communication in eager context, training logs are printed every + steps_per_loop. + epochs: Number of epochs to train. + eval_input_fn: Function that returns evaluation dataset. If none, + evaluation is skipped. + eval_steps: Number of steps to run evaluation. Required if `eval_input_fn` + is not none. + metric_fn: A metrics function that returns a Keras Metric object to record + evaluation result using evaluation dataset or with training dataset + after every epoch. + init_checkpoint: Optional checkpoint to load to `sub_model` returned by + `model_fn`. + custom_callbacks: A list of Keras Callbacks objects to run during + training. More specifically, `on_train_begin(), on_train_end(), + on_batch_begin()`, `on_batch_end()`, `on_epoch_begin()`, + `on_epoch_end()` methods are invoked during training. + Note that some metrics may be missing from `logs`. + run_eagerly: Whether to run model training in pure eager execution. This + should be disable for TPUStrategy. + sub_model_export_name: If not None, will export `sub_model` returned by + `model_fn` into checkpoint files. The name of intermediate checkpoint + file is {sub_model_export_name}_step_{step}.ckpt and the last + checkpint's name is {sub_model_export_name}.ckpt; if None, `sub_model` + will not be exported as checkpoint. + explicit_allreduce: Whether to explicitly perform gradient allreduce, + instead of relying on implicit allreduce in optimizer.apply_gradients(). + default is False. For now, if training using FP16 mixed precision, + explicit allreduce will aggregate gradients in FP16 format. For TPU and + GPU training using FP32, explicit allreduce will aggregate gradients in + FP32 format. + pre_allreduce_callbacks: A list of callback functions that takes gradients + and model variables pairs as input, manipulate them, and returns a new + gradients and model variables paris. The callback functions will be + invoked in the list order and before gradients are allreduced. With + mixed precision training, the pre_allreduce_allbacks will be applied on + scaled_gradients. Default is no callbacks. Only used when + explicit_allreduce=True. + post_allreduce_callbacks: A list of callback functions that takes + gradients and model variables pairs as input, manipulate them, and + returns a new gradients and model variables paris. The callback + functions will be invoked in the list order and right before gradients + are applied to variables for updates. Default is no callbacks. Only used + when explicit_allreduce=True. + train_summary_interval: Step interval for training summaries. If the value + is a negative number, then training summaries are not enabled. + + Returns: + Trained model. + + Raises: + ValueError: (1) When model returned by `model_fn` does not have optimizer + attribute or when required parameters are set to none. (2) eval args are + not specified correctly. (3) metric_fn must be a callable if specified. + (4) sub_model_checkpoint_name is specified, but `sub_model` returned + by `model_fn` is None. + """ + + if _sentinel is not None: + raise ValueError('only call `run_customized_training_loop()` ' + 'with named arguments.') + + required_arguments = [ + strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn + ] + + steps_between_evals = int(steps_per_epoch / num_eval_per_epoch) + if [arg for arg in required_arguments if arg is None]: + raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, ' + '`steps_per_epoch` and `train_input_fn` are required ' + 'parameters.') + if not steps_per_loop: + if tf.config.list_logical_devices('TPU'): + # One can't fully utilize a TPU with steps_per_loop=1, so in this case + # default users to a more useful value. + steps_per_loop = min(1000, steps_between_evals) + else: + steps_per_loop = 1 + logging.info('steps_per_loop not specified. Using steps_per_loop=%d', + steps_per_loop) + if steps_per_loop > steps_between_evals: + logging.warning( + 'steps_per_loop: %d is specified to be greater than ' + ' steps_between_evals: %d, we will use steps_between_evals as' + ' steps_per_loop.', steps_per_loop, steps_between_evals) + steps_per_loop = steps_between_evals + assert tf.executing_eagerly() + + if run_eagerly: + if isinstance(strategy, tf.distribute.experimental.TPUStrategy): + raise ValueError( + 'TPUStrategy should not run eagerly as it heavily relies on graph' + ' optimization for the distributed system.') + + if eval_input_fn and eval_steps is None: + raise ValueError( + '`eval_step` is required when `eval_input_fn ` is not none.') + if metric_fn and not callable(metric_fn): + raise ValueError( + 'if `metric_fn` is specified, metric_fn must be a callable.') + + total_training_steps = steps_per_epoch * epochs + train_iterator = _get_input_iterator(train_input_fn, strategy) + eval_loss_metric = tf.keras.metrics.Mean('training_loss', dtype=tf.float32) + + with distribution_utils.get_strategy_scope(strategy): + # To correctly place the model weights on accelerators, + # model and optimizer should be created in scope. + model, sub_model = model_fn() + if not hasattr(model, 'optimizer'): + raise ValueError('User should set optimizer attribute to model ' + 'inside `model_fn`.') + if sub_model_export_name and sub_model is None: + raise ValueError('sub_model_export_name is specified as %s, but ' + 'sub_model is None.' % sub_model_export_name) + + callback_list = tf.keras.callbacks.CallbackList( + callbacks=custom_callbacks, model=model) + + optimizer = model.optimizer + + if init_checkpoint: + logging.info( + 'Checkpoint file %s found and restoring from ' + 'initial checkpoint for core model.', init_checkpoint) + checkpoint = tf.train.Checkpoint(model=sub_model) + checkpoint.restore(init_checkpoint).assert_existing_objects_matched() + logging.info('Loading from checkpoint file completed') + + train_loss_metric = tf.keras.metrics.Mean('training_loss', dtype=tf.float32) + eval_metrics = [metric_fn()] if metric_fn else [] + # If evaluation is required, make a copy of metric as it will be used by + # both train and evaluation. + train_metrics = [ + metric.__class__.from_config(metric.get_config()) + for metric in eval_metrics + ] + + # Create summary writers + if _should_export_summary(strategy): + summary_dir = os.path.join(model_dir, 'summaries') + else: + # In multi worker training we need every worker to write summary, because + # variables can trigger synchronization on read and synchronization needs + # all workers to participate. + summary_dir = tempfile.mkdtemp() + eval_summary_writer = tf.summary.create_file_writer( + os.path.join(summary_dir, 'eval')) + last_summary_step = 0 + if steps_per_loop >= _MIN_SUMMARY_STEPS and train_summary_interval >= 0: + # Only writes summary when the stats are collected sufficiently over + # enough steps. + train_summary_writer = tf.summary.create_file_writer( + os.path.join(summary_dir, 'train')) + else: + train_summary_writer = tf.summary.create_noop_writer() + + # Collects training variables. + training_vars = model.trainable_variables + + def _replicated_step(inputs): + """Replicated training step.""" + + inputs, labels = inputs + with tf.GradientTape() as tape: + model_outputs = model(inputs, training=True) + loss = loss_fn(labels, model_outputs) + # Raw loss is used for reporting in metrics/logs. + raw_loss = loss + if scale_loss: + # Scales down the loss for gradients to be invariant from replicas. + loss = loss / strategy.num_replicas_in_sync + + if explicit_allreduce: + grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss, + training_vars, + pre_allreduce_callbacks, + post_allreduce_callbacks) + else: + if isinstance(optimizer, + tf.keras.mixed_precision.experimental.LossScaleOptimizer): + with tape: + scaled_loss = optimizer.get_scaled_loss(loss) + scaled_grads = tape.gradient(scaled_loss, training_vars) + grads = optimizer.get_unscaled_gradients(scaled_grads) + else: + grads = tape.gradient(loss, training_vars) + optimizer.apply_gradients(zip(grads, training_vars)) + # For reporting, the metric takes the mean of losses. + train_loss_metric.update_state(raw_loss) + for metric in train_metrics: + metric.update_state(labels, model_outputs) + + @tf.function + def train_steps(iterator, steps): + """Performs distributed training steps in a loop. + + Args: + iterator: the distributed iterator of training datasets. + steps: an tf.int32 integer tensor to specify number of steps to run + inside host training loop. + + Raises: + ValueError: Any of the arguments or tensor shapes are invalid. + """ + if not isinstance(steps, tf.Tensor): + raise ValueError('steps should be an Tensor. Python object may cause ' + 'retracing.') + + for _ in tf.range(steps): + strategy.run(_replicated_step, args=(next(iterator),)) + + def train_single_step(iterator): + """Performs a distributed training step. + + Args: + iterator: the distributed iterator of training datasets. + + Raises: + ValueError: Any of the arguments or tensor shapes are invalid. + """ + strategy.run(_replicated_step, args=(next(iterator),)) + + def test_step(iterator): + """Calculates evaluation metrics on distributed devices.""" + + def _test_step_fn(inputs): + """Replicated accuracy calculation.""" + + inputs, labels = inputs + model_outputs = model(inputs, training=False) + for metric in eval_metrics: + metric.update_state(labels, model_outputs) + return model_outputs, labels + + outputs, labels = strategy.run(_test_step_fn, args=(next(iterator),)) + outputs = tf.nest.map_structure(strategy.experimental_local_results, + outputs) + labels = tf.nest.map_structure(strategy.experimental_local_results, + labels) + return outputs, labels + + if not run_eagerly: + train_single_step = tf.function(train_single_step) + test_step = tf.function(test_step) + + def _run_evaluation(current_training_step, test_iterator): + """Runs validation steps and aggregate metrics. + + Args: + current_training_step: tf.int32 tensor containing the current step. + test_iterator: distributed iterator of test datasets. + + Returns: + A dict of metic names and values. + """ + # The last batch of the evaluation is often smaller than previous ones. + # Moreover, in some distributed pieces it might even be empty. Therefore, + # different from the way training_loss is calculated, it is needed to + # gather all the logits and labels here to calculate the evaluation loss + # outside. + loss_list, loss_weights = list(), list() + for _ in range(eval_steps): + outputs, labels = test_step(test_iterator) + for cur_logits, cur_labels in zip(outputs, labels): + # This is to handle cases when cur_labels is not a single tensor, + # but a dict of tensors. + cur_weight = tf.shape(tf.nest.flatten(cur_labels)[0])[0] + if cur_weight != 0: + loss_list.append(loss_fn(cur_labels, cur_logits).numpy()) + loss_weights.append(cur_weight) + # The sample_weights are the actual number of examples in each batch, + # a summation of numbers of examples in each replica if using + # distributed training. + eval_loss_metric.update_state(loss_list, sample_weight=loss_weights) + + logs = {} + with eval_summary_writer.as_default(): + for metric in [eval_loss_metric] + eval_metrics + model.metrics: + metric_value = _float_metric_value(metric) + logs[metric.name] = metric_value + logging.info('Step: [%d] Validation %s = %f', current_training_step, + metric.name, metric_value) + tf.summary.scalar( + metric.name, metric_value, step=current_training_step) + eval_summary_writer.flush() + + return logs + + # Training loop starts here. + checkpoint = tf.train.Checkpoint( + model=model, optimizer=optimizer, global_step=optimizer.iterations) + sub_model_checkpoint = tf.train.Checkpoint( + model=sub_model, + global_step=optimizer.iterations) if sub_model_export_name else None + + latest_checkpoint_file = tf.train.latest_checkpoint(model_dir) + if latest_checkpoint_file: + logging.info('Checkpoint file %s found and restoring from ' + 'checkpoint', latest_checkpoint_file) + checkpoint.restore(latest_checkpoint_file) + logging.info('Loading from checkpoint file completed') + + current_step = optimizer.iterations.numpy() + checkpoint_name = 'ctl_step_{step}.ckpt' + + logs = {} + callback_list.on_train_begin() + while current_step < total_training_steps and not model.stop_training: + if current_step % steps_per_epoch == 0: + callback_list.on_epoch_begin( + int(current_step / steps_per_epoch) + 1) + + # Training loss/metric are taking average over steps inside micro + # training loop. We reset the their values before each round. + train_loss_metric.reset_states() + for metric in train_metrics + model.metrics: + metric.reset_states() + + callback_list.on_batch_begin(current_step) + # Runs several steps in the host while loop. + steps = steps_to_run(current_step, steps_between_evals, steps_per_loop) + + if tf.config.list_physical_devices('GPU'): + # TODO(zongweiz): merge with train_steps once tf.while_loop + # GPU performance bugs are fixed. + for _ in range(steps): + train_single_step(train_iterator) + else: + # Converts steps to a Tensor to avoid tf.function retracing. + train_steps(train_iterator, tf.convert_to_tensor(steps, dtype=tf.int32)) + train_loss = _float_metric_value(train_loss_metric) + current_step += steps + + # Updates training logging. + training_status = 'Train Step: %d/%d / loss = %s' % ( + current_step, total_training_steps, train_loss) + + if current_step >= last_summary_step + train_summary_interval: + summary_writer = train_summary_writer + last_summary_step = current_step + else: + summary_writer = tf.summary.create_noop_writer() + + with summary_writer.as_default(): + if callable(optimizer.learning_rate): + tf.summary.scalar( + 'learning_rate', + optimizer.learning_rate(current_step), + step=current_step) + tf.summary.scalar(train_loss_metric.name, train_loss, step=current_step) + for metric in train_metrics + model.metrics: + metric_value = _float_metric_value(metric) + training_status += ' %s = %f' % (metric.name, metric_value) + tf.summary.scalar(metric.name, metric_value, step=current_step) + summary_writer.flush() + logging.info(training_status) + + # If no need for evaluation, we only call on_batch_end with train_loss, + # this is to ensure we get granular global_step/sec on Tensorboard. + if current_step % steps_between_evals: + callback_list.on_batch_end(current_step - 1, {'loss': train_loss}) + else: + # Save a submodel with the step in the file name after each epoch. + if sub_model_export_name: + _save_checkpoint( + strategy, sub_model_checkpoint, model_dir, + '%s_step_%d.ckpt' % (sub_model_export_name, current_step)) + + # Save model checkpoints and run validation steps after each epoch + # (with the exception of the final epoch which is handled after the + # training loop). + if current_step < total_training_steps: + _save_checkpoint(strategy, checkpoint, model_dir, + checkpoint_name.format(step=current_step)) + if eval_input_fn: + logging.info('Running evaluation after step: %s.', current_step) + logs = _run_evaluation(current_step, + _get_input_iterator(eval_input_fn, strategy)) + # Re-initialize evaluation metric. + eval_loss_metric.reset_states() + for metric in eval_metrics + model.metrics: + metric.reset_states() + # We add train_loss here rather than call on_batch_end twice to make + # sure that no duplicated values are generated. + logs['loss'] = train_loss + callback_list.on_batch_end(current_step - 1, logs) + + # Calls on_epoch_end after each real epoch ends to prevent mis-calculation + # of training steps. + if current_step % steps_per_epoch == 0: + callback_list.on_epoch_end(int(current_step / steps_per_epoch), logs) + + if sub_model_export_name: + _save_checkpoint(strategy, sub_model_checkpoint, model_dir, + '%s.ckpt' % sub_model_export_name) + + _save_checkpoint(strategy, checkpoint, model_dir, + checkpoint_name.format(step=current_step)) + if eval_input_fn: + logging.info('Running final evaluation after training is complete.') + logs = _run_evaluation(current_step, + _get_input_iterator(eval_input_fn, strategy)) + callback_list.on_epoch_end(int(current_step / steps_per_epoch), logs) + training_summary = { + 'total_training_steps': total_training_steps, + 'train_loss': _float_metric_value(train_loss_metric), + } + for metric in model.metrics: + training_summary[metric.name] = _float_metric_value(metric) + if eval_metrics: + # TODO(hongkuny): Cleans up summary reporting in text. + training_summary['last_train_metrics'] = _float_metric_value( + train_metrics[0]) + training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0]) + + write_txt_summary(training_summary, summary_dir) + + if not _should_export_summary(strategy): + tf.io.gfile.rmtree(summary_dir) + + callback_list.on_train_end() + + return model diff --git a/models/official/nlp/bert/model_training_utils_test.py b/models/official/nlp/bert/model_training_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4c85a6c9b520a1b4e39e6abdfde503b35034d29e --- /dev/null +++ b/models/official/nlp/bert/model_training_utils_test.py @@ -0,0 +1,308 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.modeling.training.model_training_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import logging +from absl.testing import parameterized +from absl.testing.absltest import mock +import numpy as np +import tensorflow as tf + +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from official.nlp.bert import model_training_utils + + +def eager_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + strategy_combinations.mirrored_strategy_with_gpu_and_cpu, + strategy_combinations.mirrored_strategy_with_two_gpus, + ], + mode='eager', + ) + + +def eager_gpu_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.one_device_strategy_gpu, + strategy_combinations.mirrored_strategy_with_gpu_and_cpu, + strategy_combinations.mirrored_strategy_with_two_gpus, + ], + mode='eager', + ) + + +def create_fake_data_input_fn(batch_size, features_shape, num_classes): + """Creates a dummy input function with the given feature and label shapes. + + Args: + batch_size: integer. + features_shape: list[int]. Feature shape for an individual example. + num_classes: integer. Number of labels. + + Returns: + An input function that is usable in the executor. + """ + + def _dataset_fn(input_context=None): + """An input function for generating fake data.""" + local_batch_size = input_context.get_per_replica_batch_size(batch_size) + features = np.random.rand(64, *features_shape) + labels = np.random.randint(2, size=[64, num_classes]) + # Convert the inputs to a Dataset. + dataset = tf.data.Dataset.from_tensor_slices((features, labels)) + dataset = dataset.shard(input_context.num_input_pipelines, + input_context.input_pipeline_id) + + def _assign_dtype(features, labels): + features = tf.cast(features, tf.float32) + labels = tf.cast(labels, tf.float32) + return features, labels + + # Shuffle, repeat, and batch the examples. + dataset = dataset.map(_assign_dtype) + dataset = dataset.shuffle(64).repeat() + dataset = dataset.batch(local_batch_size, drop_remainder=True) + dataset = dataset.prefetch(buffer_size=64) + return dataset + + return _dataset_fn + + +def create_model_fn(input_shape, num_classes, use_float16=False): + + def _model_fn(): + """A one-layer softmax model suitable for testing.""" + input_layer = tf.keras.layers.Input(shape=input_shape) + x = tf.keras.layers.Dense(num_classes, activation='relu')(input_layer) + output_layer = tf.keras.layers.Dense(num_classes, activation='softmax')(x) + sub_model = tf.keras.models.Model(input_layer, x, name='sub_model') + model = tf.keras.models.Model(input_layer, output_layer, name='model') + model.add_metric( + tf.reduce_mean(input_layer), name='mean_input', aggregation='mean') + model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9) + if use_float16: + model.optimizer = ( + tf.keras.mixed_precision.experimental.LossScaleOptimizer( + model.optimizer, loss_scale='dynamic')) + return model, sub_model + + return _model_fn + + +def metric_fn(): + """Gets a tf.keras metric object.""" + return tf.keras.metrics.CategoricalAccuracy(name='accuracy', dtype=tf.float32) + + +def summaries_with_matching_keyword(keyword, summary_dir): + """Yields summary protos matching given keyword from event file.""" + event_paths = tf.io.gfile.glob(os.path.join(summary_dir, 'events*')) + for event in tf.compat.v1.train.summary_iterator(event_paths[-1]): + if event.summary is not None: + for value in event.summary.value: + if keyword in value.tag: + logging.error(event) + yield event.summary + + +def check_eventfile_for_keyword(keyword, summary_dir): + """Checks event files for the keyword.""" + return any(summaries_with_matching_keyword(keyword, summary_dir)) + + +class RecordingCallback(tf.keras.callbacks.Callback): + + def __init__(self): + self.batch_begin = [] # (batch, logs) + self.batch_end = [] # (batch, logs) + self.epoch_begin = [] # (epoch, logs) + self.epoch_end = [] # (epoch, logs) + + def on_batch_begin(self, batch, logs=None): + self.batch_begin.append((batch, logs)) + + def on_batch_end(self, batch, logs=None): + self.batch_end.append((batch, logs)) + + def on_epoch_begin(self, epoch, logs=None): + self.epoch_begin.append((epoch, logs)) + + def on_epoch_end(self, epoch, logs=None): + self.epoch_end.append((epoch, logs)) + + +class ModelTrainingUtilsTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super(ModelTrainingUtilsTest, self).setUp() + self._model_fn = create_model_fn(input_shape=[128], num_classes=3) + + def run_training(self, strategy, model_dir, steps_per_loop, run_eagerly): + input_fn = create_fake_data_input_fn( + batch_size=8, features_shape=[128], num_classes=3) + model_training_utils.run_customized_training_loop( + strategy=strategy, + model_fn=self._model_fn, + loss_fn=tf.keras.losses.categorical_crossentropy, + model_dir=model_dir, + steps_per_epoch=20, + steps_per_loop=steps_per_loop, + epochs=2, + train_input_fn=input_fn, + eval_input_fn=input_fn, + eval_steps=10, + init_checkpoint=None, + sub_model_export_name='my_submodel_name', + metric_fn=metric_fn, + custom_callbacks=None, + run_eagerly=run_eagerly) + + @combinations.generate(eager_strategy_combinations()) + def test_train_eager_single_step(self, distribution): + model_dir = self.get_temp_dir() + if isinstance(distribution, tf.distribute.experimental.TPUStrategy): + with self.assertRaises(ValueError): + self.run_training( + distribution, model_dir, steps_per_loop=1, run_eagerly=True) + else: + self.run_training( + distribution, model_dir, steps_per_loop=1, run_eagerly=True) + + @combinations.generate(eager_gpu_strategy_combinations()) + def test_train_eager_mixed_precision(self, distribution): + model_dir = self.get_temp_dir() + policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') + tf.keras.mixed_precision.experimental.set_policy(policy) + self._model_fn = create_model_fn( + input_shape=[128], num_classes=3, use_float16=True) + self.run_training( + distribution, model_dir, steps_per_loop=1, run_eagerly=True) + + @combinations.generate(eager_strategy_combinations()) + def test_train_check_artifacts(self, distribution): + model_dir = self.get_temp_dir() + self.run_training( + distribution, model_dir, steps_per_loop=10, run_eagerly=False) + + # Two checkpoints should be saved after two epochs. + files = map(os.path.basename, + tf.io.gfile.glob(os.path.join(model_dir, 'ctl_step_*index'))) + self.assertCountEqual(['ctl_step_20.ckpt-1.index', + 'ctl_step_40.ckpt-2.index'], files) + + # Three submodel checkpoints should be saved after two epochs (one after + # each epoch plus one final). + files = map(os.path.basename, + tf.io.gfile.glob(os.path.join(model_dir, + 'my_submodel_name*index'))) + self.assertCountEqual(['my_submodel_name.ckpt-3.index', + 'my_submodel_name_step_20.ckpt-1.index', + 'my_submodel_name_step_40.ckpt-2.index'], files) + + self.assertNotEmpty( + tf.io.gfile.glob( + os.path.join(model_dir, 'summaries/training_summary*'))) + + # Loss and accuracy values should be written into summaries. + self.assertTrue( + check_eventfile_for_keyword('loss', + os.path.join(model_dir, 'summaries/train'))) + self.assertTrue( + check_eventfile_for_keyword('accuracy', + os.path.join(model_dir, 'summaries/train'))) + self.assertTrue( + check_eventfile_for_keyword('mean_input', + os.path.join(model_dir, 'summaries/train'))) + self.assertTrue( + check_eventfile_for_keyword('accuracy', + os.path.join(model_dir, 'summaries/eval'))) + self.assertTrue( + check_eventfile_for_keyword('mean_input', + os.path.join(model_dir, 'summaries/eval'))) + + @combinations.generate(eager_strategy_combinations()) + def test_train_check_callbacks(self, distribution): + model_dir = self.get_temp_dir() + callback = RecordingCallback() + callbacks = [callback] + input_fn = create_fake_data_input_fn( + batch_size=8, features_shape=[128], num_classes=3) + model_training_utils.run_customized_training_loop( + strategy=distribution, + model_fn=self._model_fn, + loss_fn=tf.keras.losses.categorical_crossentropy, + model_dir=model_dir, + steps_per_epoch=20, + num_eval_per_epoch=4, + steps_per_loop=10, + epochs=2, + train_input_fn=input_fn, + eval_input_fn=input_fn, + eval_steps=10, + init_checkpoint=None, + metric_fn=metric_fn, + custom_callbacks=callbacks, + run_eagerly=False) + self.assertEqual(callback.epoch_begin, [(1, {}), (2, {})]) + epoch_ends, epoch_end_infos = zip(*callback.epoch_end) + self.assertEqual(list(epoch_ends), [1, 2, 2]) + for info in epoch_end_infos: + self.assertIn('accuracy', info) + + self.assertEqual(callback.batch_begin, [(0, {}), (5, {}), (10, {}), + (15, {}), (20, {}), (25, {}), + (30, {}), (35, {})]) + batch_ends, batch_end_infos = zip(*callback.batch_end) + self.assertEqual(list(batch_ends), [4, 9, 14, 19, 24, 29, 34, 39]) + for info in batch_end_infos: + self.assertIn('loss', info) + + @combinations.generate( + combinations.combine( + distribution=[ + strategy_combinations.one_device_strategy_gpu, + ], + mode='eager', + )) + def test_train_check_artifacts_non_chief(self, distribution): + # We shouldn't export artifacts on non-chief workers. Since there's no easy + # way to test with real MultiWorkerMirroredStrategy, we patch the strategy + # to make it as if it's MultiWorkerMirroredStrategy on non-chief workers. + extended = distribution.extended + with mock.patch.object(extended.__class__, 'should_checkpoint', + new_callable=mock.PropertyMock, return_value=False), \ + mock.patch.object(extended.__class__, 'should_save_summary', + new_callable=mock.PropertyMock, return_value=False): + model_dir = self.get_temp_dir() + self.run_training( + distribution, model_dir, steps_per_loop=10, run_eagerly=False) + self.assertEmpty(tf.io.gfile.listdir(model_dir)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/bert/run_classifier.py b/models/official/nlp/bert/run_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..e2eb525ae4335091c78eb4ead72494f8021a7f89 --- /dev/null +++ b/models/official/nlp/bert/run_classifier.py @@ -0,0 +1,497 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BERT classification or regression finetuning runner in TF 2.x.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import json +import math +import os + +from absl import app +from absl import flags +from absl import logging +import gin +import tensorflow as tf +from official.modeling import performance +from official.nlp import optimization +from official.nlp.bert import bert_models +from official.nlp.bert import common_flags +from official.nlp.bert import configs as bert_configs +from official.nlp.bert import input_pipeline +from official.nlp.bert import model_saving_utils +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + +flags.DEFINE_enum( + 'mode', 'train_and_eval', ['train_and_eval', 'export_only', 'predict'], + 'One of {"train_and_eval", "export_only", "predict"}. `train_and_eval`: ' + 'trains the model and evaluates in the meantime. ' + '`export_only`: will take the latest checkpoint inside ' + 'model_dir and export a `SavedModel`. `predict`: takes a checkpoint and ' + 'restores the model to output predictions on the test set.') +flags.DEFINE_string('train_data_path', None, + 'Path to training data for BERT classifier.') +flags.DEFINE_string('eval_data_path', None, + 'Path to evaluation data for BERT classifier.') +flags.DEFINE_string( + 'input_meta_data_path', None, + 'Path to file that contains meta data about input ' + 'to be used for training and evaluation.') +flags.DEFINE_string('predict_checkpoint_path', None, + 'Path to the checkpoint for predictions.') +flags.DEFINE_integer( + 'num_eval_per_epoch', 1, + 'Number of evaluations per epoch. The purpose of this flag is to provide ' + 'more granular evaluation scores and checkpoints. For example, if original ' + 'data has N samples and num_eval_per_epoch is n, then each epoch will be ' + 'evaluated every N/n samples.') +flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.') +flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.') + +common_flags.define_common_bert_flags() + +FLAGS = flags.FLAGS + +LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32} + + +def get_loss_fn(num_classes): + """Gets the classification loss function.""" + + def classification_loss_fn(labels, logits): + """Classification loss.""" + labels = tf.squeeze(labels) + log_probs = tf.nn.log_softmax(logits, axis=-1) + one_hot_labels = tf.one_hot( + tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32) + per_example_loss = -tf.reduce_sum( + tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1) + return tf.reduce_mean(per_example_loss) + + return classification_loss_fn + + +def get_dataset_fn(input_file_pattern, + max_seq_length, + global_batch_size, + is_training, + label_type=tf.int64, + include_sample_weights=False): + """Gets a closure to create a dataset.""" + + def _dataset_fn(ctx=None): + """Returns tf.data.Dataset for distributed BERT pretraining.""" + batch_size = ctx.get_per_replica_batch_size( + global_batch_size) if ctx else global_batch_size + dataset = input_pipeline.create_classifier_dataset( + tf.io.gfile.glob(input_file_pattern), + max_seq_length, + batch_size, + is_training=is_training, + input_pipeline_context=ctx, + label_type=label_type, + include_sample_weights=include_sample_weights) + return dataset + + return _dataset_fn + + +def run_bert_classifier(strategy, + bert_config, + input_meta_data, + model_dir, + epochs, + steps_per_epoch, + steps_per_loop, + eval_steps, + warmup_steps, + initial_lr, + init_checkpoint, + train_input_fn, + eval_input_fn, + training_callbacks=True, + custom_callbacks=None, + custom_metrics=None): + """Run BERT classifier training using low-level API.""" + max_seq_length = input_meta_data['max_seq_length'] + num_classes = input_meta_data.get('num_labels', 1) + is_regression = num_classes == 1 + + def _get_classifier_model(): + """Gets a classifier model.""" + classifier_model, core_model = ( + bert_models.classifier_model( + bert_config, + num_classes, + max_seq_length, + hub_module_url=FLAGS.hub_module_url, + hub_module_trainable=FLAGS.hub_module_trainable)) + optimizer = optimization.create_optimizer(initial_lr, + steps_per_epoch * epochs, + warmup_steps, FLAGS.end_lr, + FLAGS.optimizer_type) + classifier_model.optimizer = performance.configure_optimizer( + optimizer, + use_float16=common_flags.use_float16(), + use_graph_rewrite=common_flags.use_graph_rewrite()) + return classifier_model, core_model + + # tf.keras.losses objects accept optional sample_weight arguments (eg. coming + # from the dataset) to compute weighted loss, as used for the regression + # tasks. The classification tasks, using the custom get_loss_fn don't accept + # sample weights though. + loss_fn = (tf.keras.losses.MeanSquaredError() if is_regression + else get_loss_fn(num_classes)) + + # Defines evaluation metrics function, which will create metrics in the + # correct device and strategy scope. + if custom_metrics: + metric_fn = custom_metrics + elif is_regression: + metric_fn = functools.partial( + tf.keras.metrics.MeanSquaredError, + 'mean_squared_error', + dtype=tf.float32) + else: + metric_fn = functools.partial( + tf.keras.metrics.SparseCategoricalAccuracy, + 'accuracy', + dtype=tf.float32) + + # Start training using Keras compile/fit API. + logging.info('Training using TF 2.x Keras compile/fit API with ' + 'distribution strategy.') + return run_keras_compile_fit( + model_dir, + strategy, + _get_classifier_model, + train_input_fn, + eval_input_fn, + loss_fn, + metric_fn, + init_checkpoint, + epochs, + steps_per_epoch, + steps_per_loop, + eval_steps, + training_callbacks=training_callbacks, + custom_callbacks=custom_callbacks) + + +def run_keras_compile_fit(model_dir, + strategy, + model_fn, + train_input_fn, + eval_input_fn, + loss_fn, + metric_fn, + init_checkpoint, + epochs, + steps_per_epoch, + steps_per_loop, + eval_steps, + training_callbacks=True, + custom_callbacks=None): + """Runs BERT classifier model using Keras compile/fit API.""" + + with strategy.scope(): + training_dataset = train_input_fn() + evaluation_dataset = eval_input_fn() if eval_input_fn else None + bert_model, sub_model = model_fn() + optimizer = bert_model.optimizer + + if init_checkpoint: + checkpoint = tf.train.Checkpoint(model=sub_model) + checkpoint.restore(init_checkpoint).assert_existing_objects_matched() + + if not isinstance(metric_fn, (list, tuple)): + metric_fn = [metric_fn] + bert_model.compile( + optimizer=optimizer, + loss=loss_fn, + metrics=[fn() for fn in metric_fn], + experimental_steps_per_execution=steps_per_loop) + + summary_dir = os.path.join(model_dir, 'summaries') + summary_callback = tf.keras.callbacks.TensorBoard(summary_dir) + checkpoint = tf.train.Checkpoint(model=bert_model, optimizer=optimizer) + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + directory=model_dir, + max_to_keep=None, + step_counter=optimizer.iterations, + checkpoint_interval=0) + checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager) + + if training_callbacks: + if custom_callbacks is not None: + custom_callbacks += [summary_callback, checkpoint_callback] + else: + custom_callbacks = [summary_callback, checkpoint_callback] + + history = bert_model.fit( + x=training_dataset, + validation_data=evaluation_dataset, + steps_per_epoch=steps_per_epoch, + epochs=epochs, + validation_steps=eval_steps, + callbacks=custom_callbacks) + stats = {'total_training_steps': steps_per_epoch * epochs} + if 'loss' in history.history: + stats['train_loss'] = history.history['loss'][-1] + if 'val_accuracy' in history.history: + stats['eval_metrics'] = history.history['val_accuracy'][-1] + return bert_model, stats + + +def get_predictions_and_labels(strategy, + trained_model, + eval_input_fn, + return_probs=False): + """Obtains predictions of trained model on evaluation data. + + Note that list of labels is returned along with the predictions because the + order changes on distributing dataset over TPU pods. + + Args: + strategy: Distribution strategy. + trained_model: Trained model with preloaded weights. + eval_input_fn: Input function for evaluation data. + return_probs: Whether to return probabilities of classes. + + Returns: + predictions: List of predictions. + labels: List of gold labels corresponding to predictions. + """ + + @tf.function + def test_step(iterator): + """Computes predictions on distributed devices.""" + + def _test_step_fn(inputs): + """Replicated predictions.""" + inputs, labels = inputs + logits = trained_model(inputs, training=False) + probabilities = tf.nn.softmax(logits) + return probabilities, labels + + outputs, labels = strategy.run(_test_step_fn, args=(next(iterator),)) + # outputs: current batch logits as a tuple of shard logits + outputs = tf.nest.map_structure(strategy.experimental_local_results, + outputs) + labels = tf.nest.map_structure(strategy.experimental_local_results, labels) + return outputs, labels + + def _run_evaluation(test_iterator): + """Runs evaluation steps.""" + preds, golds = list(), list() + try: + with tf.experimental.async_scope(): + while True: + probabilities, labels = test_step(test_iterator) + for cur_probs, cur_labels in zip(probabilities, labels): + if return_probs: + preds.extend(cur_probs.numpy().tolist()) + else: + preds.extend(tf.math.argmax(cur_probs, axis=1).numpy()) + golds.extend(cur_labels.numpy().tolist()) + except (StopIteration, tf.errors.OutOfRangeError): + tf.experimental.async_clear_error() + return preds, golds + + test_iter = iter( + strategy.experimental_distribute_datasets_from_function(eval_input_fn)) + predictions, labels = _run_evaluation(test_iter) + + return predictions, labels + + +def export_classifier(model_export_path, input_meta_data, bert_config, + model_dir): + """Exports a trained model as a `SavedModel` for inference. + + Args: + model_export_path: a string specifying the path to the SavedModel directory. + input_meta_data: dictionary containing meta data about input and model. + bert_config: Bert configuration file to define core bert layers. + model_dir: The directory where the model weights and training/evaluation + summaries are stored. + + Raises: + Export path is not specified, got an empty string or None. + """ + if not model_export_path: + raise ValueError('Export path is not specified: %s' % model_export_path) + if not model_dir: + raise ValueError('Export path is not specified: %s' % model_dir) + + # Export uses float32 for now, even if training uses mixed precision. + tf.keras.mixed_precision.experimental.set_policy('float32') + classifier_model = bert_models.classifier_model( + bert_config, input_meta_data.get('num_labels', 1))[0] + + model_saving_utils.export_bert_model( + model_export_path, model=classifier_model, checkpoint_dir=model_dir) + + +def run_bert(strategy, + input_meta_data, + model_config, + train_input_fn=None, + eval_input_fn=None, + init_checkpoint=None, + custom_callbacks=None, + custom_metrics=None): + """Run BERT training.""" + # Enables XLA in Session Config. Should not be set for TPU. + keras_utils.set_session_config(FLAGS.enable_xla) + performance.set_mixed_precision_policy(common_flags.dtype()) + + epochs = FLAGS.num_train_epochs * FLAGS.num_eval_per_epoch + train_data_size = ( + input_meta_data['train_data_size'] // FLAGS.num_eval_per_epoch) + steps_per_epoch = int(train_data_size / FLAGS.train_batch_size) + warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size) + eval_steps = int( + math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size)) + + if not strategy: + raise ValueError('Distribution strategy has not been specified.') + + if not custom_callbacks: + custom_callbacks = [] + + if FLAGS.log_steps: + custom_callbacks.append( + keras_utils.TimeHistory( + batch_size=FLAGS.train_batch_size, + log_steps=FLAGS.log_steps, + logdir=FLAGS.model_dir)) + + trained_model, _ = run_bert_classifier( + strategy, + model_config, + input_meta_data, + FLAGS.model_dir, + epochs, + steps_per_epoch, + FLAGS.steps_per_loop, + eval_steps, + warmup_steps, + FLAGS.learning_rate, + init_checkpoint or FLAGS.init_checkpoint, + train_input_fn, + eval_input_fn, + custom_callbacks=custom_callbacks, + custom_metrics=custom_metrics) + + if FLAGS.model_export_path: + model_saving_utils.export_bert_model( + FLAGS.model_export_path, model=trained_model) + return trained_model + + +def custom_main(custom_callbacks=None, custom_metrics=None): + """Run classification or regression. + + Args: + custom_callbacks: list of tf.keras.Callbacks passed to training loop. + custom_metrics: list of metrics passed to the training loop. + """ + gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param) + + with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: + input_meta_data = json.loads(reader.read().decode('utf-8')) + label_type = LABEL_TYPES_MAP[input_meta_data.get('label_type', 'int')] + include_sample_weights = input_meta_data.get('has_sample_weights', False) + + if not FLAGS.model_dir: + FLAGS.model_dir = '/tmp/bert20/' + + bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file) + + if FLAGS.mode == 'export_only': + export_classifier(FLAGS.model_export_path, input_meta_data, bert_config, + FLAGS.model_dir) + return + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, + num_gpus=FLAGS.num_gpus, + tpu_address=FLAGS.tpu) + eval_input_fn = get_dataset_fn( + FLAGS.eval_data_path, + input_meta_data['max_seq_length'], + FLAGS.eval_batch_size, + is_training=False, + label_type=label_type, + include_sample_weights=include_sample_weights) + + if FLAGS.mode == 'predict': + with strategy.scope(): + classifier_model = bert_models.classifier_model( + bert_config, input_meta_data['num_labels'])[0] + checkpoint = tf.train.Checkpoint(model=classifier_model) + latest_checkpoint_file = ( + FLAGS.predict_checkpoint_path or + tf.train.latest_checkpoint(FLAGS.model_dir)) + assert latest_checkpoint_file + logging.info('Checkpoint file %s found and restoring from ' + 'checkpoint', latest_checkpoint_file) + checkpoint.restore( + latest_checkpoint_file).assert_existing_objects_matched() + preds, _ = get_predictions_and_labels( + strategy, classifier_model, eval_input_fn, return_probs=True) + output_predict_file = os.path.join(FLAGS.model_dir, 'test_results.tsv') + with tf.io.gfile.GFile(output_predict_file, 'w') as writer: + logging.info('***** Predict results *****') + for probabilities in preds: + output_line = '\t'.join( + str(class_probability) + for class_probability in probabilities) + '\n' + writer.write(output_line) + return + + if FLAGS.mode != 'train_and_eval': + raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode) + train_input_fn = get_dataset_fn( + FLAGS.train_data_path, + input_meta_data['max_seq_length'], + FLAGS.train_batch_size, + is_training=True, + label_type=label_type, + include_sample_weights=include_sample_weights) + run_bert( + strategy, + input_meta_data, + bert_config, + train_input_fn, + eval_input_fn, + custom_callbacks=custom_callbacks, + custom_metrics=custom_metrics) + + +def main(_): + custom_main(custom_callbacks=None, custom_metrics=None) + + +if __name__ == '__main__': + flags.mark_flag_as_required('bert_config_file') + flags.mark_flag_as_required('input_meta_data_path') + flags.mark_flag_as_required('model_dir') + app.run(main) diff --git a/models/official/nlp/bert/run_pretraining.py b/models/official/nlp/bert/run_pretraining.py new file mode 100644 index 0000000000000000000000000000000000000000..44a18fea0ce9d79bea61294e91f0ac00c2ea45e6 --- /dev/null +++ b/models/official/nlp/bert/run_pretraining.py @@ -0,0 +1,197 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run masked LM/next sentence pre-training for BERT in TF 2.x.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags +from absl import logging +import gin +import tensorflow as tf +from official.modeling import performance +from official.nlp import optimization +from official.nlp.bert import bert_models +from official.nlp.bert import common_flags +from official.nlp.bert import configs +from official.nlp.bert import input_pipeline +from official.nlp.bert import model_training_utils +from official.utils.misc import distribution_utils + + +flags.DEFINE_string('input_files', None, + 'File path to retrieve training data for pre-training.') +# Model training specific flags. +flags.DEFINE_integer( + 'max_seq_length', 128, + 'The maximum total input sequence length after WordPiece tokenization. ' + 'Sequences longer than this will be truncated, and sequences shorter ' + 'than this will be padded.') +flags.DEFINE_integer('max_predictions_per_seq', 20, + 'Maximum predictions per sequence_output.') +flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.') +flags.DEFINE_integer('num_steps_per_epoch', 1000, + 'Total number of training steps to run per epoch.') +flags.DEFINE_float('warmup_steps', 10000, + 'Warmup steps for Adam weight decay optimizer.') +flags.DEFINE_bool('use_next_sentence_label', True, + 'Whether to use next sentence label to compute final loss.') +flags.DEFINE_bool('train_summary_interval', 0, 'Step interval for training ' + 'summaries. If the value is a negative number, ' + 'then training summaries are not enabled.') + +common_flags.define_common_bert_flags() + +FLAGS = flags.FLAGS + + +def get_pretrain_dataset_fn(input_file_pattern, seq_length, + max_predictions_per_seq, global_batch_size, + use_next_sentence_label=True): + """Returns input dataset from input file string.""" + def _dataset_fn(ctx=None): + """Returns tf.data.Dataset for distributed BERT pretraining.""" + input_patterns = input_file_pattern.split(',') + batch_size = ctx.get_per_replica_batch_size(global_batch_size) + train_dataset = input_pipeline.create_pretrain_dataset( + input_patterns, + seq_length, + max_predictions_per_seq, + batch_size, + is_training=True, + input_pipeline_context=ctx, + use_next_sentence_label=use_next_sentence_label) + return train_dataset + + return _dataset_fn + + +def get_loss_fn(): + """Returns loss function for BERT pretraining.""" + + def _bert_pretrain_loss_fn(unused_labels, losses, **unused_args): + return tf.reduce_mean(losses) + + return _bert_pretrain_loss_fn + + +def run_customized_training(strategy, + bert_config, + init_checkpoint, + max_seq_length, + max_predictions_per_seq, + model_dir, + steps_per_epoch, + steps_per_loop, + epochs, + initial_lr, + warmup_steps, + end_lr, + optimizer_type, + input_files, + train_batch_size, + use_next_sentence_label=True, + train_summary_interval=0, + custom_callbacks=None): + """Run BERT pretrain model training using low-level API.""" + + train_input_fn = get_pretrain_dataset_fn(input_files, max_seq_length, + max_predictions_per_seq, + train_batch_size, + use_next_sentence_label) + + def _get_pretrain_model(): + """Gets a pretraining model.""" + pretrain_model, core_model = bert_models.pretrain_model( + bert_config, max_seq_length, max_predictions_per_seq, + use_next_sentence_label=use_next_sentence_label) + optimizer = optimization.create_optimizer( + initial_lr, steps_per_epoch * epochs, warmup_steps, + end_lr, optimizer_type) + pretrain_model.optimizer = performance.configure_optimizer( + optimizer, + use_float16=common_flags.use_float16(), + use_graph_rewrite=common_flags.use_graph_rewrite()) + return pretrain_model, core_model + + trained_model = model_training_utils.run_customized_training_loop( + strategy=strategy, + model_fn=_get_pretrain_model, + loss_fn=get_loss_fn(), + scale_loss=FLAGS.scale_loss, + model_dir=model_dir, + init_checkpoint=init_checkpoint, + train_input_fn=train_input_fn, + steps_per_epoch=steps_per_epoch, + steps_per_loop=steps_per_loop, + epochs=epochs, + sub_model_export_name='pretrained/bert_model', + train_summary_interval=train_summary_interval, + custom_callbacks=custom_callbacks) + + return trained_model + + +def run_bert_pretrain(strategy, custom_callbacks=None): + """Runs BERT pre-training.""" + + bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) + if not strategy: + raise ValueError('Distribution strategy is not specified.') + + # Runs customized training loop. + logging.info('Training using customized training loop TF 2.0 with distributed' + 'strategy.') + + performance.set_mixed_precision_policy(common_flags.dtype()) + + return run_customized_training( + strategy, + bert_config, + FLAGS.init_checkpoint, # Used to initialize only the BERT submodel. + FLAGS.max_seq_length, + FLAGS.max_predictions_per_seq, + FLAGS.model_dir, + FLAGS.num_steps_per_epoch, + FLAGS.steps_per_loop, + FLAGS.num_train_epochs, + FLAGS.learning_rate, + FLAGS.warmup_steps, + FLAGS.end_lr, + FLAGS.optimizer_type, + FLAGS.input_files, + FLAGS.train_batch_size, + FLAGS.use_next_sentence_label, + FLAGS.train_summary_interval, + custom_callbacks=custom_callbacks) + + +def main(_): + gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param) + if not FLAGS.model_dir: + FLAGS.model_dir = '/tmp/bert20/' + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, + num_gpus=FLAGS.num_gpus, + tpu_address=FLAGS.tpu) + if strategy: + print('***** Number of cores used : ', strategy.num_replicas_in_sync) + + run_bert_pretrain(strategy) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/official/nlp/bert/run_squad.py b/models/official/nlp/bert/run_squad.py new file mode 100644 index 0000000000000000000000000000000000000000..b12925cfaad2337c28483325c5f942df651add62 --- /dev/null +++ b/models/official/nlp/bert/run_squad.py @@ -0,0 +1,153 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run BERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import time + +from absl import app +from absl import flags +from absl import logging +import gin +import tensorflow as tf + +from official.nlp.bert import configs as bert_configs +from official.nlp.bert import run_squad_helper +from official.nlp.bert import tokenization +from official.nlp.data import squad_lib as squad_lib_wp +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + + +flags.DEFINE_string('vocab_file', None, + 'The vocabulary file that the BERT model was trained on.') + +# More flags can be found in run_squad_helper. +run_squad_helper.define_common_squad_flags() + +FLAGS = flags.FLAGS + + +def train_squad(strategy, + input_meta_data, + custom_callbacks=None, + run_eagerly=False, + init_checkpoint=None, + sub_model_export_name=None): + """Run bert squad training.""" + bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file) + init_checkpoint = init_checkpoint or FLAGS.init_checkpoint + run_squad_helper.train_squad(strategy, input_meta_data, bert_config, + custom_callbacks, run_eagerly, init_checkpoint, + sub_model_export_name=sub_model_export_name) + + +def predict_squad(strategy, input_meta_data): + """Makes predictions for the squad dataset.""" + bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file) + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + run_squad_helper.predict_squad( + strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp) + + +def eval_squad(strategy, input_meta_data): + """Evaluate on the squad dataset.""" + bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file) + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + eval_metrics = run_squad_helper.eval_squad( + strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp) + return eval_metrics + + +def export_squad(model_export_path, input_meta_data): + """Exports a trained model as a `SavedModel` for inference. + + Args: + model_export_path: a string specifying the path to the SavedModel directory. + input_meta_data: dictionary containing meta data about input and model. + + Raises: + Export path is not specified, got an empty string or None. + """ + bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file) + run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config) + + +def main(_): + gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param) + + with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: + input_meta_data = json.loads(reader.read().decode('utf-8')) + + if FLAGS.mode == 'export_only': + export_squad(FLAGS.model_export_path, input_meta_data) + return + + # Configures cluster spec for multi-worker distribution strategy. + if FLAGS.num_gpus > 0: + _ = distribution_utils.configure_cluster(FLAGS.worker_hosts, + FLAGS.task_index) + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, + num_gpus=FLAGS.num_gpus, + all_reduce_alg=FLAGS.all_reduce_alg, + tpu_address=FLAGS.tpu) + + if 'train' in FLAGS.mode: + if FLAGS.log_steps: + custom_callbacks = [keras_utils.TimeHistory( + batch_size=FLAGS.train_batch_size, + log_steps=FLAGS.log_steps, + logdir=FLAGS.model_dir, + )] + else: + custom_callbacks = None + + train_squad( + strategy, + input_meta_data, + custom_callbacks=custom_callbacks, + run_eagerly=FLAGS.run_eagerly, + sub_model_export_name=FLAGS.sub_model_export_name, + ) + if 'predict' in FLAGS.mode: + predict_squad(strategy, input_meta_data) + if 'eval' in FLAGS.mode: + eval_metrics = eval_squad(strategy, input_meta_data) + f1_score = eval_metrics['final_f1'] + logging.info('SQuAD eval F1-score: %f', f1_score) + summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval') + summary_writer = tf.summary.create_file_writer(summary_dir) + with summary_writer.as_default(): + # TODO(lehou): write to the correct step number. + tf.summary.scalar('F1-score', f1_score, step=0) + summary_writer.flush() + # Also write eval_metrics to json file. + squad_lib_wp.write_to_json_files( + eval_metrics, os.path.join(summary_dir, 'eval_metrics.json')) + time.sleep(60) + + +if __name__ == '__main__': + flags.mark_flag_as_required('bert_config_file') + flags.mark_flag_as_required('model_dir') + app.run(main) diff --git a/models/official/nlp/bert/run_squad_helper.py b/models/official/nlp/bert/run_squad_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..b03e356d91bdf6a9edf9486f505526852c6c7ef6 --- /dev/null +++ b/models/official/nlp/bert/run_squad_helper.py @@ -0,0 +1,481 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library for running BERT family models on SQuAD 1.1/2.0 in TF 2.x.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import json +import os +from absl import flags +from absl import logging +import tensorflow as tf +from official.modeling import performance +from official.nlp import optimization +from official.nlp.bert import bert_models +from official.nlp.bert import common_flags +from official.nlp.bert import input_pipeline +from official.nlp.bert import model_saving_utils +from official.nlp.bert import model_training_utils +from official.nlp.bert import squad_evaluate_v1_1 +from official.nlp.bert import squad_evaluate_v2_0 +from official.nlp.data import squad_lib_sp +from official.utils.misc import keras_utils + + +def define_common_squad_flags(): + """Defines common flags used by SQuAD tasks.""" + flags.DEFINE_enum( + 'mode', 'train_and_eval', + ['train_and_eval', 'train_and_predict', + 'train', 'eval', 'predict', 'export_only'], + 'One of {"train_and_eval", "train_and_predict", ' + '"train", "eval", "predict", "export_only"}. ' + '`train_and_eval`: train & predict to json files & compute eval metrics. ' + '`train_and_predict`: train & predict to json files. ' + '`train`: only trains the model. ' + '`eval`: predict answers from squad json file & compute eval metrics. ' + '`predict`: predict answers from the squad json file. ' + '`export_only`: will take the latest checkpoint inside ' + 'model_dir and export a `SavedModel`.') + flags.DEFINE_string('train_data_path', '', + 'Training data path with train tfrecords.') + flags.DEFINE_string( + 'input_meta_data_path', None, + 'Path to file that contains meta data about input ' + 'to be used for training and evaluation.') + # Model training specific flags. + flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.') + # Predict processing related. + flags.DEFINE_string('predict_file', None, + 'SQuAD prediction json file path. ' + '`predict` mode supports multiple files: one can use ' + 'wildcard to specify multiple files and it can also be ' + 'multiple file patterns separated by comma. Note that ' + '`eval` mode only supports a single predict file.') + flags.DEFINE_bool( + 'do_lower_case', True, + 'Whether to lower case the input text. Should be True for uncased ' + 'models and False for cased models.') + flags.DEFINE_float( + 'null_score_diff_threshold', 0.0, + 'If null_score - best_non_null is greater than the threshold, ' + 'predict null. This is only used for SQuAD v2.') + flags.DEFINE_bool( + 'verbose_logging', False, + 'If true, all of the warnings related to data processing will be ' + 'printed. A number of warnings are expected for a normal SQuAD ' + 'evaluation.') + flags.DEFINE_integer('predict_batch_size', 8, + 'Total batch size for prediction.') + flags.DEFINE_integer( + 'n_best_size', 20, + 'The total number of n-best predictions to generate in the ' + 'nbest_predictions.json output file.') + flags.DEFINE_integer( + 'max_answer_length', 30, + 'The maximum length of an answer that can be generated. This is needed ' + 'because the start and end predictions are not conditioned on one ' + 'another.') + + common_flags.define_common_bert_flags() + + +FLAGS = flags.FLAGS + + +def squad_loss_fn(start_positions, + end_positions, + start_logits, + end_logits): + """Returns sparse categorical crossentropy for start/end logits.""" + start_loss = tf.keras.losses.sparse_categorical_crossentropy( + start_positions, start_logits, from_logits=True) + end_loss = tf.keras.losses.sparse_categorical_crossentropy( + end_positions, end_logits, from_logits=True) + + total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2 + return total_loss + + +def get_loss_fn(): + """Gets a loss function for squad task.""" + + def _loss_fn(labels, model_outputs): + start_positions = labels['start_positions'] + end_positions = labels['end_positions'] + start_logits, end_logits = model_outputs + return squad_loss_fn( + start_positions, + end_positions, + start_logits, + end_logits) + + return _loss_fn + + +RawResult = collections.namedtuple('RawResult', + ['unique_id', 'start_logits', 'end_logits']) + + +def get_raw_results(predictions): + """Converts multi-replica predictions to RawResult.""" + for unique_ids, start_logits, end_logits in zip(predictions['unique_ids'], + predictions['start_logits'], + predictions['end_logits']): + for values in zip(unique_ids.numpy(), start_logits.numpy(), + end_logits.numpy()): + yield RawResult( + unique_id=values[0], + start_logits=values[1].tolist(), + end_logits=values[2].tolist()) + + +def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size, + is_training): + """Gets a closure to create a dataset..""" + + def _dataset_fn(ctx=None): + """Returns tf.data.Dataset for distributed BERT pretraining.""" + batch_size = ctx.get_per_replica_batch_size( + global_batch_size) if ctx else global_batch_size + dataset = input_pipeline.create_squad_dataset( + input_file_pattern, + max_seq_length, + batch_size, + is_training=is_training, + input_pipeline_context=ctx) + return dataset + + return _dataset_fn + + +def get_squad_model_to_predict(strategy, bert_config, checkpoint_path, + input_meta_data): + """Gets a squad model to make predictions.""" + with strategy.scope(): + # Prediction always uses float32, even if training uses mixed precision. + tf.keras.mixed_precision.experimental.set_policy('float32') + squad_model, _ = bert_models.squad_model( + bert_config, + input_meta_data['max_seq_length'], + hub_module_url=FLAGS.hub_module_url) + + if checkpoint_path is None: + checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir) + logging.info('Restoring checkpoints from %s', checkpoint_path) + checkpoint = tf.train.Checkpoint(model=squad_model) + checkpoint.restore(checkpoint_path).expect_partial() + return squad_model + + +def predict_squad_customized(strategy, + input_meta_data, + predict_tfrecord_path, + num_steps, + squad_model): + """Make predictions using a Bert-based squad model.""" + predict_dataset_fn = get_dataset_fn( + predict_tfrecord_path, + input_meta_data['max_seq_length'], + FLAGS.predict_batch_size, + is_training=False) + predict_iterator = iter( + strategy.experimental_distribute_datasets_from_function( + predict_dataset_fn)) + + @tf.function + def predict_step(iterator): + """Predicts on distributed devices.""" + + def _replicated_step(inputs): + """Replicated prediction calculation.""" + x, _ = inputs + unique_ids = x.pop('unique_ids') + start_logits, end_logits = squad_model(x, training=False) + return dict( + unique_ids=unique_ids, + start_logits=start_logits, + end_logits=end_logits) + + outputs = strategy.run(_replicated_step, args=(next(iterator),)) + return tf.nest.map_structure(strategy.experimental_local_results, outputs) + + all_results = [] + for _ in range(num_steps): + predictions = predict_step(predict_iterator) + for result in get_raw_results(predictions): + all_results.append(result) + if len(all_results) % 100 == 0: + logging.info('Made predictions for %d records.', len(all_results)) + return all_results + + +def train_squad(strategy, + input_meta_data, + bert_config, + custom_callbacks=None, + run_eagerly=False, + init_checkpoint=None, + sub_model_export_name=None): + """Run bert squad training.""" + if strategy: + logging.info('Training using customized training loop with distribution' + ' strategy.') + # Enables XLA in Session Config. Should not be set for TPU. + keras_utils.set_session_config(FLAGS.enable_xla) + performance.set_mixed_precision_policy(common_flags.dtype()) + + epochs = FLAGS.num_train_epochs + num_train_examples = input_meta_data['train_data_size'] + max_seq_length = input_meta_data['max_seq_length'] + steps_per_epoch = int(num_train_examples / FLAGS.train_batch_size) + warmup_steps = int(epochs * num_train_examples * 0.1 / FLAGS.train_batch_size) + train_input_fn = get_dataset_fn( + FLAGS.train_data_path, + max_seq_length, + FLAGS.train_batch_size, + is_training=True) + + def _get_squad_model(): + """Get Squad model and optimizer.""" + squad_model, core_model = bert_models.squad_model( + bert_config, + max_seq_length, + hub_module_url=FLAGS.hub_module_url, + hub_module_trainable=FLAGS.hub_module_trainable) + optimizer = optimization.create_optimizer(FLAGS.learning_rate, + steps_per_epoch * epochs, + warmup_steps, + FLAGS.end_lr, + FLAGS.optimizer_type) + + squad_model.optimizer = performance.configure_optimizer( + optimizer, + use_float16=common_flags.use_float16(), + use_graph_rewrite=common_flags.use_graph_rewrite()) + return squad_model, core_model + + # If explicit_allreduce = True, apply_gradients() no longer implicitly + # allreduce gradients, users manually allreduce gradient and pass the + # allreduced grads_and_vars to apply_gradients(). clip_by_global_norm will be + # applied to allreduced gradients. + def clip_by_global_norm_callback(grads_and_vars): + grads, variables = zip(*grads_and_vars) + (clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) + return zip(clipped_grads, variables) + + model_training_utils.run_customized_training_loop( + strategy=strategy, + model_fn=_get_squad_model, + loss_fn=get_loss_fn(), + model_dir=FLAGS.model_dir, + steps_per_epoch=steps_per_epoch, + steps_per_loop=FLAGS.steps_per_loop, + epochs=epochs, + train_input_fn=train_input_fn, + init_checkpoint=init_checkpoint or FLAGS.init_checkpoint, + sub_model_export_name=sub_model_export_name, + run_eagerly=run_eagerly, + custom_callbacks=custom_callbacks, + explicit_allreduce=False, + post_allreduce_callbacks=[clip_by_global_norm_callback]) + + +def prediction_output_squad(strategy, input_meta_data, tokenizer, squad_lib, + predict_file, squad_model): + """Makes predictions for a squad dataset.""" + doc_stride = input_meta_data['doc_stride'] + max_query_length = input_meta_data['max_query_length'] + # Whether data should be in Ver 2.0 format. + version_2_with_negative = input_meta_data.get('version_2_with_negative', + False) + eval_examples = squad_lib.read_squad_examples( + input_file=predict_file, + is_training=False, + version_2_with_negative=version_2_with_negative) + + eval_writer = squad_lib.FeatureWriter( + filename=os.path.join(FLAGS.model_dir, 'eval.tf_record'), + is_training=False) + eval_features = [] + + def _append_feature(feature, is_padding): + if not is_padding: + eval_features.append(feature) + eval_writer.process_feature(feature) + + # TPU requires a fixed batch size for all batches, therefore the number + # of examples must be a multiple of the batch size, or else examples + # will get dropped. So we pad with fake examples which are ignored + # later on. + kwargs = dict( + examples=eval_examples, + tokenizer=tokenizer, + max_seq_length=input_meta_data['max_seq_length'], + doc_stride=doc_stride, + max_query_length=max_query_length, + is_training=False, + output_fn=_append_feature, + batch_size=FLAGS.predict_batch_size) + + # squad_lib_sp requires one more argument 'do_lower_case'. + if squad_lib == squad_lib_sp: + kwargs['do_lower_case'] = FLAGS.do_lower_case + dataset_size = squad_lib.convert_examples_to_features(**kwargs) + eval_writer.close() + + logging.info('***** Running predictions *****') + logging.info(' Num orig examples = %d', len(eval_examples)) + logging.info(' Num split examples = %d', len(eval_features)) + logging.info(' Batch size = %d', FLAGS.predict_batch_size) + + num_steps = int(dataset_size / FLAGS.predict_batch_size) + all_results = predict_squad_customized( + strategy, input_meta_data, eval_writer.filename, num_steps, squad_model) + + all_predictions, all_nbest_json, scores_diff_json = ( + squad_lib.postprocess_output( + eval_examples, + eval_features, + all_results, + FLAGS.n_best_size, + FLAGS.max_answer_length, + FLAGS.do_lower_case, + version_2_with_negative=version_2_with_negative, + null_score_diff_threshold=FLAGS.null_score_diff_threshold, + verbose=FLAGS.verbose_logging)) + + return all_predictions, all_nbest_json, scores_diff_json + + +def dump_to_files(all_predictions, all_nbest_json, scores_diff_json, + squad_lib, version_2_with_negative, file_prefix=''): + """Save output to json files.""" + output_prediction_file = os.path.join(FLAGS.model_dir, + '%spredictions.json' % file_prefix) + output_nbest_file = os.path.join(FLAGS.model_dir, + '%snbest_predictions.json' % file_prefix) + output_null_log_odds_file = os.path.join(FLAGS.model_dir, file_prefix, + '%snull_odds.json' % file_prefix) + logging.info('Writing predictions to: %s', (output_prediction_file)) + logging.info('Writing nbest to: %s', (output_nbest_file)) + + squad_lib.write_to_json_files(all_predictions, output_prediction_file) + squad_lib.write_to_json_files(all_nbest_json, output_nbest_file) + if version_2_with_negative: + squad_lib.write_to_json_files(scores_diff_json, output_null_log_odds_file) + + +def _get_matched_files(input_path): + """Returns all files that matches the input_path.""" + input_patterns = input_path.strip().split(',') + all_matched_files = [] + for input_pattern in input_patterns: + input_pattern = input_pattern.strip() + if not input_pattern: + continue + matched_files = tf.io.gfile.glob(input_pattern) + if not matched_files: + raise ValueError('%s does not match any files.' % input_pattern) + else: + all_matched_files.extend(matched_files) + return sorted(all_matched_files) + + +def predict_squad(strategy, + input_meta_data, + tokenizer, + bert_config, + squad_lib, + init_checkpoint=None): + """Get prediction results and evaluate them to hard drive.""" + if init_checkpoint is None: + init_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir) + + all_predict_files = _get_matched_files(FLAGS.predict_file) + squad_model = get_squad_model_to_predict(strategy, bert_config, + init_checkpoint, input_meta_data) + for idx, predict_file in enumerate(all_predict_files): + all_predictions, all_nbest_json, scores_diff_json = prediction_output_squad( + strategy, input_meta_data, tokenizer, squad_lib, predict_file, + squad_model) + if len(all_predict_files) == 1: + file_prefix = '' + else: + # if predict_file is /path/xquad.ar.json, the `file_prefix` may be + # "xquad.ar-0-" + file_prefix = '%s-' % os.path.splitext( + os.path.basename(all_predict_files[idx]))[0] + dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib, + input_meta_data.get('version_2_with_negative', False), + file_prefix) + + +def eval_squad(strategy, + input_meta_data, + tokenizer, + bert_config, + squad_lib, + init_checkpoint=None): + """Get prediction results and evaluate them against ground truth.""" + if init_checkpoint is None: + init_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir) + + all_predict_files = _get_matched_files(FLAGS.predict_file) + if len(all_predict_files) != 1: + raise ValueError('`eval_squad` only supports one predict file, ' + 'but got %s' % all_predict_files) + + squad_model = get_squad_model_to_predict(strategy, bert_config, + init_checkpoint, input_meta_data) + all_predictions, all_nbest_json, scores_diff_json = prediction_output_squad( + strategy, input_meta_data, tokenizer, squad_lib, all_predict_files[0], + squad_model) + dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib, + input_meta_data.get('version_2_with_negative', False)) + + with tf.io.gfile.GFile(FLAGS.predict_file, 'r') as reader: + dataset_json = json.load(reader) + pred_dataset = dataset_json['data'] + if input_meta_data.get('version_2_with_negative', False): + eval_metrics = squad_evaluate_v2_0.evaluate(pred_dataset, + all_predictions, + scores_diff_json) + else: + eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions) + return eval_metrics + + +def export_squad(model_export_path, input_meta_data, bert_config): + """Exports a trained model as a `SavedModel` for inference. + + Args: + model_export_path: a string specifying the path to the SavedModel directory. + input_meta_data: dictionary containing meta data about input and model. + bert_config: Bert configuration file to define core bert layers. + + Raises: + Export path is not specified, got an empty string or None. + """ + if not model_export_path: + raise ValueError('Export path is not specified: %s' % model_export_path) + # Export uses float32 for now, even if training uses mixed precision. + tf.keras.mixed_precision.experimental.set_policy('float32') + squad_model, _ = bert_models.squad_model(bert_config, + input_meta_data['max_seq_length']) + model_saving_utils.export_bert_model( + model_export_path, model=squad_model, checkpoint_dir=FLAGS.model_dir) diff --git a/models/official/nlp/bert/serving.py b/models/official/nlp/bert/serving.py new file mode 100644 index 0000000000000000000000000000000000000000..895f61dc37adf40d93ea347817abbb18966e157e --- /dev/null +++ b/models/official/nlp/bert/serving.py @@ -0,0 +1,134 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Examples of SavedModel export for tf-serving.""" + +from absl import app +from absl import flags +import tensorflow as tf + +from official.nlp.bert import bert_models +from official.nlp.bert import configs + +flags.DEFINE_integer("sequence_length", None, + "Sequence length to parse the tf.Example. If " + "sequence_length > 0, add a signature for serialized " + "tf.Example and define the parsing specification by the " + "sequence_length.") +flags.DEFINE_string("bert_config_file", None, + "Bert configuration file to define core bert layers.") +flags.DEFINE_string("model_checkpoint_path", None, + "File path to TF model checkpoint.") +flags.DEFINE_string("export_path", None, + "Destination folder to export the serving SavedModel.") + +FLAGS = flags.FLAGS + + +class BertServing(tf.keras.Model): + """Bert transformer encoder model for serving.""" + + def __init__(self, bert_config, name_to_features=None, name="serving_model"): + super(BertServing, self).__init__(name=name) + self.encoder = bert_models.get_transformer_encoder( + bert_config, sequence_length=None) + self.name_to_features = name_to_features + + def call(self, inputs): + input_word_ids = inputs["input_ids"] + input_mask = inputs["input_mask"] + input_type_ids = inputs["segment_ids"] + + encoder_outputs, _ = self.encoder( + [input_word_ids, input_mask, input_type_ids]) + return encoder_outputs + + def serve_body(self, input_ids, input_mask=None, segment_ids=None): + if segment_ids is None: + # Requires CLS token is the first token of inputs. + segment_ids = tf.zeros_like(input_ids) + if input_mask is None: + # The mask has 1 for real tokens and 0 for padding tokens. + input_mask = tf.where( + tf.equal(input_ids, 0), tf.zeros_like(input_ids), + tf.ones_like(input_ids)) + + inputs = dict( + input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids) + return self.call(inputs) + + @tf.function + def serve(self, input_ids, input_mask=None, segment_ids=None): + outputs = self.serve_body(input_ids, input_mask, segment_ids) + # Returns a dictionary to control SignatureDef output signature. + return {"outputs": outputs[-1]} + + @tf.function + def serve_examples(self, inputs): + features = tf.io.parse_example(inputs, self.name_to_features) + for key in list(features.keys()): + t = features[key] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + features[key] = t + return self.serve( + features["input_ids"], + input_mask=features["input_mask"] if "input_mask" in features else None, + segment_ids=features["segment_ids"] + if "segment_ids" in features else None) + + @classmethod + def export(cls, model, export_dir): + if not isinstance(model, cls): + raise ValueError("Invalid model instance: %s, it should be a %s" % + (model, cls)) + + signatures = { + "serving_default": + model.serve.get_concrete_function( + input_ids=tf.TensorSpec( + shape=[None, None], dtype=tf.int32, name="inputs")), + } + if model.name_to_features: + signatures[ + "serving_examples"] = model.serve_examples.get_concrete_function( + tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")) + tf.saved_model.save(model, export_dir=export_dir, signatures=signatures) + + +def main(_): + sequence_length = FLAGS.sequence_length + if sequence_length is not None and sequence_length > 0: + name_to_features = { + "input_ids": tf.io.FixedLenFeature([sequence_length], tf.int64), + "input_mask": tf.io.FixedLenFeature([sequence_length], tf.int64), + "segment_ids": tf.io.FixedLenFeature([sequence_length], tf.int64), + } + else: + name_to_features = None + bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) + serving_model = BertServing( + bert_config=bert_config, name_to_features=name_to_features) + checkpoint = tf.train.Checkpoint(model=serving_model.encoder) + checkpoint.restore(FLAGS.model_checkpoint_path + ).assert_existing_objects_matched().run_restore_ops() + BertServing.export(serving_model, FLAGS.export_path) + + +if __name__ == "__main__": + flags.mark_flag_as_required("bert_config_file") + flags.mark_flag_as_required("model_checkpoint_path") + flags.mark_flag_as_required("export_path") + app.run(main) diff --git a/models/official/nlp/bert/squad_evaluate_v1_1.py b/models/official/nlp/bert/squad_evaluate_v1_1.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f4f4de66813cb4fbdc59cc716911fac064f0c9 --- /dev/null +++ b/models/official/nlp/bert/squad_evaluate_v1_1.py @@ -0,0 +1,108 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluation of SQuAD predictions (version 1.1). + +The functions are copied from +https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/. + +The SQuAD dataset is described in this paper: +SQuAD: 100,000+ Questions for Machine Comprehension of Text +Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, Percy Liang +https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import string + +# pylint: disable=g-bad-import-order +from absl import logging +# pylint: enable=g-bad-import-order + + +def _normalize_answer(s): + """Lowers text and remove punctuation, articles and extra whitespace.""" + + def remove_articles(text): + return re.sub(r"\b(a|an|the)\b", " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def _f1_score(prediction, ground_truth): + """Computes F1 score by comparing prediction to ground truth.""" + prediction_tokens = _normalize_answer(prediction).split() + ground_truth_tokens = _normalize_answer(ground_truth).split() + prediction_counter = collections.Counter(prediction_tokens) + ground_truth_counter = collections.Counter(ground_truth_tokens) + common = prediction_counter & ground_truth_counter + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(ground_truth_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + +def _exact_match_score(prediction, ground_truth): + """Checks if predicted answer exactly matches ground truth answer.""" + return _normalize_answer(prediction) == _normalize_answer(ground_truth) + + +def _metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + """Computes the max over all metric scores.""" + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + + +def evaluate(dataset, predictions): + """Evaluates predictions for a dataset.""" + f1 = exact_match = total = 0 + for article in dataset: + for paragraph in article["paragraphs"]: + for qa in paragraph["qas"]: + total += 1 + if qa["id"] not in predictions: + message = "Unanswered question " + qa["id"] + " will receive score 0." + logging.error(message) + continue + ground_truths = [entry["text"] for entry in qa["answers"]] + prediction = predictions[qa["id"]] + exact_match += _metric_max_over_ground_truths(_exact_match_score, + prediction, ground_truths) + f1 += _metric_max_over_ground_truths(_f1_score, prediction, + ground_truths) + + exact_match = exact_match / total + f1 = f1 / total + + return {"exact_match": exact_match, "final_f1": f1} diff --git a/models/official/nlp/bert/squad_evaluate_v2_0.py b/models/official/nlp/bert/squad_evaluate_v2_0.py new file mode 100644 index 0000000000000000000000000000000000000000..54fb84e993c3459ffdd2b3d90f870e4d178ab54f --- /dev/null +++ b/models/official/nlp/bert/squad_evaluate_v2_0.py @@ -0,0 +1,252 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluation script for SQuAD version 2.0. + +The functions are copied and modified from +https://raw.githubusercontent.com/white127/SQUAD-2.0-bidaf/master/evaluate-v2.0.py + +In addition to basic functionality, we also compute additional statistics and +plot precision-recall curves if an additional na_prob.json file is provided. +This file is expected to map question ID's to the model's predicted probability +that a question is unanswerable. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import string + +from absl import logging + + +def _make_qid_to_has_ans(dataset): + qid_to_has_ans = {} + for article in dataset: + for p in article['paragraphs']: + for qa in p['qas']: + qid_to_has_ans[qa['id']] = bool(qa['answers']) + return qid_to_has_ans + + +def _normalize_answer(s): + """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): + regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) + return re.sub(regex, ' ', text) + def white_space_fix(text): + return ' '.join(text.split()) + def remove_punc(text): + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + def lower(text): + return text.lower() + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def _get_tokens(s): + if not s: return [] + return _normalize_answer(s).split() + + +def _compute_exact(a_gold, a_pred): + return int(_normalize_answer(a_gold) == _normalize_answer(a_pred)) + + +def _compute_f1(a_gold, a_pred): + """Compute F1-score.""" + gold_toks = _get_tokens(a_gold) + pred_toks = _get_tokens(a_pred) + common = collections.Counter(gold_toks) & collections.Counter(pred_toks) + num_same = sum(common.values()) + if not gold_toks or not pred_toks: + # If either is no-answer, then F1 is 1 if they agree, 0 otherwise + return int(gold_toks == pred_toks) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(pred_toks) + recall = 1.0 * num_same / len(gold_toks) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + +def _get_raw_scores(dataset, predictions): + """Compute raw scores.""" + exact_scores = {} + f1_scores = {} + for article in dataset: + for p in article['paragraphs']: + for qa in p['qas']: + qid = qa['id'] + gold_answers = [a['text'] for a in qa['answers'] + if _normalize_answer(a['text'])] + if not gold_answers: + # For unanswerable questions, only correct answer is empty string + gold_answers = [''] + if qid not in predictions: + logging.error('Missing prediction for %s', qid) + continue + a_pred = predictions[qid] + # Take max over all gold answers + exact_scores[qid] = max(_compute_exact(a, a_pred) for a in gold_answers) + f1_scores[qid] = max(_compute_f1(a, a_pred) for a in gold_answers) + return exact_scores, f1_scores + + +def _apply_no_ans_threshold( + scores, na_probs, qid_to_has_ans, na_prob_thresh=1.0): + new_scores = {} + for qid, s in scores.items(): + pred_na = na_probs[qid] > na_prob_thresh + if pred_na: + new_scores[qid] = float(not qid_to_has_ans[qid]) + else: + new_scores[qid] = s + return new_scores + + +def _make_eval_dict(exact_scores, f1_scores, qid_list=None): + """Make evaluation result dictionary.""" + if not qid_list: + total = len(exact_scores) + return collections.OrderedDict([ + ('exact', 100.0 * sum(exact_scores.values()) / total), + ('f1', 100.0 * sum(f1_scores.values()) / total), + ('total', total), + ]) + else: + total = len(qid_list) + return collections.OrderedDict([ + ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), + ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), + ('total', total), + ]) + + +def _merge_eval(main_eval, new_eval, prefix): + for k in new_eval: + main_eval['%s_%s' % (prefix, k)] = new_eval[k] + + +def _make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans): + """Make evaluation dictionary containing average recision recall.""" + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + true_pos = 0.0 + cur_p = 1.0 + cur_r = 0.0 + precisions = [1.0] + recalls = [0.0] + avg_prec = 0.0 + for i, qid in enumerate(qid_list): + if qid_to_has_ans[qid]: + true_pos += scores[qid] + cur_p = true_pos / float(i+1) + cur_r = true_pos / float(num_true_pos) + if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]: + # i.e., if we can put a threshold after this point + avg_prec += cur_p * (cur_r - recalls[-1]) + precisions.append(cur_p) + recalls.append(cur_r) + return {'ap': 100.0 * avg_prec} + + +def _run_precision_recall_analysis( + main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans): + """Run precision recall analysis and return result dictionary.""" + num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) + if num_true_pos == 0: + return + pr_exact = _make_precision_recall_eval( + exact_raw, na_probs, num_true_pos, qid_to_has_ans) + pr_f1 = _make_precision_recall_eval( + f1_raw, na_probs, num_true_pos, qid_to_has_ans) + oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} + pr_oracle = _make_precision_recall_eval( + oracle_scores, na_probs, num_true_pos, qid_to_has_ans) + _merge_eval(main_eval, pr_exact, 'pr_exact') + _merge_eval(main_eval, pr_f1, 'pr_f1') + _merge_eval(main_eval, pr_oracle, 'pr_oracle') + + +def _find_best_thresh(predictions, scores, na_probs, qid_to_has_ans): + """Find the best threshold for no answer probability.""" + num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) + cur_score = num_no_ans + best_score = cur_score + best_thresh = 0.0 + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + for qid in qid_list: + if qid not in scores: continue + if qid_to_has_ans[qid]: + diff = scores[qid] + else: + if predictions[qid]: + diff = -1 + else: + diff = 0 + cur_score += diff + if cur_score > best_score: + best_score = cur_score + best_thresh = na_probs[qid] + return 100.0 * best_score / len(scores), best_thresh + + +def _find_all_best_thresh( + main_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans): + best_exact, exact_thresh = _find_best_thresh( + predictions, exact_raw, na_probs, qid_to_has_ans) + best_f1, f1_thresh = _find_best_thresh( + predictions, f1_raw, na_probs, qid_to_has_ans) + main_eval['final_exact'] = best_exact + main_eval['final_exact_thresh'] = exact_thresh + main_eval['final_f1'] = best_f1 + main_eval['final_f1_thresh'] = f1_thresh + + +def evaluate(dataset, predictions, na_probs=None): + """Evaluate prediction results.""" + new_orig_data = [] + for article in dataset: + for p in article['paragraphs']: + for qa in p['qas']: + if qa['id'] in predictions: + new_para = {'qas': [qa]} + new_article = {'paragraphs': [new_para]} + new_orig_data.append(new_article) + dataset = new_orig_data + + if na_probs is None: + na_probs = {k: 0.0 for k in predictions} + qid_to_has_ans = _make_qid_to_has_ans(dataset) # maps qid to True/False + has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] + no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] + exact_raw, f1_raw = _get_raw_scores(dataset, predictions) + exact_thresh = _apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans) + f1_thresh = _apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans) + out_eval = _make_eval_dict(exact_thresh, f1_thresh) + if has_ans_qids: + has_ans_eval = _make_eval_dict( + exact_thresh, f1_thresh, qid_list=has_ans_qids) + _merge_eval(out_eval, has_ans_eval, 'HasAns') + if no_ans_qids: + no_ans_eval = _make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) + _merge_eval(out_eval, no_ans_eval, 'NoAns') + + _find_all_best_thresh( + out_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans) + _run_precision_recall_analysis( + out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans) + return out_eval diff --git a/models/official/nlp/bert/tf1_checkpoint_converter_lib.py b/models/official/nlp/bert/tf1_checkpoint_converter_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..122e455210ae70cd9af04912b95a600a3d23d09a --- /dev/null +++ b/models/official/nlp/bert/tf1_checkpoint_converter_lib.py @@ -0,0 +1,195 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Convert checkpoints created by Estimator (tf1) to be Keras compatible.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf # TF 1.x + +# Mapping between old <=> new names. The source pattern in original variable +# name will be replaced by destination pattern. +BERT_NAME_REPLACEMENTS = ( + ("bert", "bert_model"), + ("embeddings/word_embeddings", "word_embeddings/embeddings"), + ("embeddings/token_type_embeddings", + "embedding_postprocessor/type_embeddings"), + ("embeddings/position_embeddings", + "embedding_postprocessor/position_embeddings"), + ("embeddings/LayerNorm", "embedding_postprocessor/layer_norm"), + ("attention/self", "self_attention"), + ("attention/output/dense", "self_attention_output"), + ("attention/output/LayerNorm", "self_attention_layer_norm"), + ("intermediate/dense", "intermediate"), + ("output/dense", "output"), + ("output/LayerNorm", "output_layer_norm"), + ("pooler/dense", "pooler_transform"), +) + +BERT_V2_NAME_REPLACEMENTS = ( + ("bert/", ""), + ("encoder", "transformer"), + ("embeddings/word_embeddings", "word_embeddings/embeddings"), + ("embeddings/token_type_embeddings", "type_embeddings/embeddings"), + ("embeddings/position_embeddings", "position_embedding/embeddings"), + ("embeddings/LayerNorm", "embeddings/layer_norm"), + ("attention/self", "self_attention"), + ("attention/output/dense", "self_attention/attention_output"), + ("attention/output/LayerNorm", "self_attention_layer_norm"), + ("intermediate/dense", "intermediate"), + ("output/dense", "output"), + ("output/LayerNorm", "output_layer_norm"), + ("pooler/dense", "pooler_transform"), + ("cls/predictions/output_bias", "cls/predictions/output_bias/bias"), + ("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"), + ("cls/seq_relationship/output_weights", + "predictions/transform/logits/kernel"), +) + +BERT_PERMUTATIONS = () + +BERT_V2_PERMUTATIONS = (("cls/seq_relationship/output_weights", (1, 0)),) + + +def _bert_name_replacement(var_name, name_replacements): + """Gets the variable name replacement.""" + for src_pattern, tgt_pattern in name_replacements: + if src_pattern in var_name: + old_var_name = var_name + var_name = var_name.replace(src_pattern, tgt_pattern) + tf.logging.info("Converted: %s --> %s", old_var_name, var_name) + return var_name + + +def _has_exclude_patterns(name, exclude_patterns): + """Checks if a string contains substrings that match patterns to exclude.""" + for p in exclude_patterns: + if p in name: + return True + return False + + +def _get_permutation(name, permutations): + """Checks whether a variable requires transposition by pattern matching.""" + for src_pattern, permutation in permutations: + if src_pattern in name: + tf.logging.info("Permuted: %s --> %s", name, permutation) + return permutation + + return None + + +def _get_new_shape(name, shape, num_heads): + """Checks whether a variable requires reshape by pattern matching.""" + if "self_attention/attention_output/kernel" in name: + return tuple([num_heads, shape[0] // num_heads, shape[1]]) + if "self_attention/attention_output/bias" in name: + return shape + + patterns = [ + "self_attention/query", "self_attention/value", "self_attention/key" + ] + for pattern in patterns: + if pattern in name: + if "kernel" in name: + return tuple([shape[0], num_heads, shape[1] // num_heads]) + if "bias" in name: + return tuple([num_heads, shape[0] // num_heads]) + return None + + +def create_v2_checkpoint(model, src_checkpoint, output_path): + """Converts a name-based matched TF V1 checkpoint to TF V2 checkpoint.""" + # Uses streaming-restore in eager model to read V1 name-based checkpoints. + model.load_weights(src_checkpoint).assert_existing_objects_matched() + checkpoint = tf.train.Checkpoint(model=model) + checkpoint.save(output_path) + + +def convert(checkpoint_from_path, + checkpoint_to_path, + num_heads, + name_replacements, + permutations, + exclude_patterns=None): + """Migrates the names of variables within a checkpoint. + + Args: + checkpoint_from_path: Path to source checkpoint to be read in. + checkpoint_to_path: Path to checkpoint to be written out. + num_heads: The number of heads of the model. + name_replacements: A list of tuples of the form (match_str, replace_str) + describing variable names to adjust. + permutations: A list of tuples of the form (match_str, permutation) + describing permutations to apply to given variables. Note that match_str + should match the original variable name, not the replaced one. + exclude_patterns: A list of string patterns to exclude variables from + checkpoint conversion. + + Returns: + A dictionary that maps the new variable names to the Variable objects. + A dictionary that maps the old variable names to the new variable names. + """ + with tf.Graph().as_default(): + tf.logging.info("Reading checkpoint_from_path %s", checkpoint_from_path) + reader = tf.train.NewCheckpointReader(checkpoint_from_path) + name_shape_map = reader.get_variable_to_shape_map() + new_variable_map = {} + conversion_map = {} + for var_name in name_shape_map: + if exclude_patterns and _has_exclude_patterns(var_name, exclude_patterns): + continue + # Get the original tensor data. + tensor = reader.get_tensor(var_name) + + # Look up the new variable name, if any. + new_var_name = _bert_name_replacement(var_name, name_replacements) + + # See if we need to reshape the underlying tensor. + new_shape = None + if num_heads > 0: + new_shape = _get_new_shape(new_var_name, tensor.shape, num_heads) + if new_shape: + tf.logging.info("Veriable %s has a shape change from %s to %s", + + var_name, tensor.shape, new_shape) + tensor = np.reshape(tensor, new_shape) + + # See if we need to permute the underlying tensor. + permutation = _get_permutation(var_name, permutations) + if permutation: + tensor = np.transpose(tensor, permutation) + + # Create a new variable with the possibly-reshaped or transposed tensor. + var = tf.Variable(tensor, name=var_name) + + # Save the variable into the new variable map. + new_variable_map[new_var_name] = var + + # Keep a list of converter variables for sanity checking. + if new_var_name != var_name: + conversion_map[var_name] = new_var_name + + saver = tf.train.Saver(new_variable_map) + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + tf.logging.info("Writing checkpoint_to_path %s", checkpoint_to_path) + saver.save(sess, checkpoint_to_path, write_meta_graph=False) + + tf.logging.info("Summary:") + tf.logging.info(" Converted %d variable name(s).", len(new_variable_map)) + tf.logging.info(" Converted: %s", str(conversion_map)) diff --git a/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py b/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..2faf6ea2cfb9f0d71d0a79dff101e0408fa41778 --- /dev/null +++ b/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py @@ -0,0 +1,109 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A converter from a V1 BERT encoder checkpoint to a V2 encoder checkpoint. + +The conversion will yield an object-oriented checkpoint that can be used +to restore a TransformerEncoder object. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags + +import tensorflow as tf +from official.modeling import activations +from official.nlp.bert import configs +from official.nlp.bert import tf1_checkpoint_converter_lib +from official.nlp.modeling import networks + +FLAGS = flags.FLAGS + +flags.DEFINE_string("bert_config_file", None, + "Bert configuration file to define core bert layers.") +flags.DEFINE_string( + "checkpoint_to_convert", None, + "Initial checkpoint from a pretrained BERT model core (that is, only the " + "BertModel, with no task heads.)") +flags.DEFINE_string("converted_checkpoint_path", None, + "Name for the created object-based V2 checkpoint.") + + +def _create_bert_model(cfg): + """Creates a BERT keras core model from BERT configuration. + + Args: + cfg: A `BertConfig` to create the core model. + Returns: + A TransformerEncoder netowork. + """ + bert_encoder = networks.TransformerEncoder( + vocab_size=cfg.vocab_size, + hidden_size=cfg.hidden_size, + num_layers=cfg.num_hidden_layers, + num_attention_heads=cfg.num_attention_heads, + intermediate_size=cfg.intermediate_size, + activation=activations.gelu, + dropout_rate=cfg.hidden_dropout_prob, + attention_dropout_rate=cfg.attention_probs_dropout_prob, + sequence_length=cfg.max_position_embeddings, + type_vocab_size=cfg.type_vocab_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=cfg.initializer_range), + embedding_width=cfg.embedding_size) + + return bert_encoder + + +def convert_checkpoint(bert_config, output_path, v1_checkpoint): + """Converts a V1 checkpoint into an OO V2 checkpoint.""" + output_dir, _ = os.path.split(output_path) + + # Create a temporary V1 name-converted checkpoint in the output directory. + temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1") + temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt") + tf1_checkpoint_converter_lib.convert( + checkpoint_from_path=v1_checkpoint, + checkpoint_to_path=temporary_checkpoint, + num_heads=bert_config.num_attention_heads, + name_replacements=tf1_checkpoint_converter_lib.BERT_V2_NAME_REPLACEMENTS, + permutations=tf1_checkpoint_converter_lib.BERT_V2_PERMUTATIONS, + exclude_patterns=["adam", "Adam"]) + + # Create a V2 checkpoint from the temporary checkpoint. + model = _create_bert_model(bert_config) + tf1_checkpoint_converter_lib.create_v2_checkpoint(model, temporary_checkpoint, + output_path) + + # Clean up the temporary checkpoint, if it exists. + try: + tf.io.gfile.rmtree(temporary_checkpoint_dir) + except tf.errors.OpError: + # If it doesn't exist, we don't need to clean it up; continue. + pass + + +def main(_): + output_path = FLAGS.converted_checkpoint_path + v1_checkpoint = FLAGS.checkpoint_to_convert + bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) + convert_checkpoint(bert_config, output_path, v1_checkpoint) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/bert/tokenization.py b/models/official/nlp/bert/tokenization.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f7e27e320c727c4eee511fc63ebb63929250c7 --- /dev/null +++ b/models/official/nlp/bert/tokenization.py @@ -0,0 +1,545 @@ +# coding=utf-8 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tokenization classes implementation. + +The file is forked from: +https://github.com/google-research/bert/blob/master/tokenization.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import unicodedata + +import six +import tensorflow as tf + +import sentencepiece as spm + +SPIECE_UNDERLINE = "▁" + + +def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): + """Checks whether the casing config is consistent with the checkpoint name.""" + + # The casing has to be passed in by the user and there is no explicit check + # as to whether it matches the checkpoint. The casing information probably + # should have been stored in the bert_config.json file, but it's not, so + # we have to heuristically detect it to validate. + + if not init_checkpoint: + return + + m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) + if m is None: + return + + model_name = m.group(1) + + lower_models = [ + "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", + "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" + ] + + cased_models = [ + "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", + "multi_cased_L-12_H-768_A-12" + ] + + is_bad_config = False + if model_name in lower_models and not do_lower_case: + is_bad_config = True + actual_flag = "False" + case_name = "lowercased" + opposite_flag = "True" + + if model_name in cased_models and do_lower_case: + is_bad_config = True + actual_flag = "True" + case_name = "cased" + opposite_flag = "False" + + if is_bad_config: + raise ValueError( + "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " + "However, `%s` seems to be a %s model, so you " + "should pass in `--do_lower_case=%s` so that the fine-tuning matches " + "how the model was pre-training. If this error is wrong, please " + "just comment out this check." % + (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)) + + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def printable_text(text): + """Returns text encoded in a way suitable for print or `tf.logging`.""" + + # These functions want `str` for both Python2 and Python3, but in one case + # it's a Unicode string and in the other it's a byte string. + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return text.encode("utf-8") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + with tf.io.gfile.GFile(vocab_file, "r") as reader: + while True: + token = convert_to_unicode(reader.readline()) + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + +def convert_by_vocab(vocab, items): + """Converts a sequence of [tokens|ids] using the vocab.""" + output = [] + for item in items: + output.append(vocab[item]) + return output + + +def convert_tokens_to_ids(vocab, tokens): + return convert_by_vocab(vocab, tokens) + + +def convert_ids_to_tokens(inv_vocab, ids): + return convert_by_vocab(inv_vocab, ids) + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class FullTokenizer(object): + """Runs end-to-end tokenziation.""" + + def __init__(self, vocab_file, do_lower_case=True, split_on_punc=True): + self.vocab = load_vocab(vocab_file) + self.inv_vocab = {v: k for k, v in self.vocab.items()} + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, split_on_punc=split_on_punc) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + + return split_tokens + + def convert_tokens_to_ids(self, tokens): + return convert_by_vocab(self.vocab, tokens) + + def convert_ids_to_tokens(self, ids): + return convert_by_vocab(self.inv_vocab, ids) + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, do_lower_case=True, split_on_punc=True): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + split_on_punc: Whether to apply split on punctuations. By default BERT + starts a new token for punctuations. This makes detokenization difficult + for tasks like seq2seq decoding. + """ + self.do_lower_case = do_lower_case + self.split_on_punc = split_on_punc + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = convert_to_unicode(text) + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if self.do_lower_case: + token = token.lower() + token = self._run_strip_accents(token) + if self.split_on_punc: + split_tokens.extend(self._run_split_on_punc(token)) + else: + split_tokens.append(token) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenziation.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=400): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer. + + Returns: + A list of wordpiece tokens. + """ + + text = convert_to_unicode(text) + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically control characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat in ("Cc", "Cf"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False + + +def preprocess_text(inputs, remove_space=True, lower=False): + """Preprocesses data by removing extra space and normalize data. + + This method is used together with sentence piece tokenizer and is forked from: + https://github.com/google-research/google-research/blob/master/albert/tokenization.py + + Args: + inputs: The input text. + remove_space: Whether to remove the extra space. + lower: Whether to lowercase the text. + + Returns: + The preprocessed text. + + """ + outputs = inputs + if remove_space: + outputs = " ".join(inputs.strip().split()) + + if six.PY2 and isinstance(outputs, str): + try: + outputs = six.ensure_text(outputs, "utf-8") + except UnicodeDecodeError: + outputs = six.ensure_text(outputs, "latin-1") + + outputs = unicodedata.normalize("NFKD", outputs) + outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) + if lower: + outputs = outputs.lower() + + return outputs + + +def encode_pieces(sp_model, text, sample=False): + """Segements text into pieces. + + This method is used together with sentence piece tokenizer and is forked from: + https://github.com/google-research/google-research/blob/master/albert/tokenization.py + + + Args: + sp_model: A spm.SentencePieceProcessor object. + text: The input text to be segemented. + sample: Whether to randomly sample a segmentation output or return a + deterministic one. + + Returns: + A list of token pieces. + """ + if six.PY2 and isinstance(text, six.text_type): + text = six.ensure_binary(text, "utf-8") + + if not sample: + pieces = sp_model.EncodeAsPieces(text) + else: + pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) + new_pieces = [] + for piece in pieces: + piece = printable_text(piece) + if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit(): + cur_pieces = sp_model.EncodeAsPieces(piece[:-1].replace( + SPIECE_UNDERLINE, "")) + if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: + if len(cur_pieces[0]) == 1: + cur_pieces = cur_pieces[1:] + else: + cur_pieces[0] = cur_pieces[0][1:] + cur_pieces.append(piece[-1]) + new_pieces.extend(cur_pieces) + else: + new_pieces.append(piece) + + return new_pieces + + +def encode_ids(sp_model, text, sample=False): + """Segments text and return token ids. + + This method is used together with sentence piece tokenizer and is forked from: + https://github.com/google-research/google-research/blob/master/albert/tokenization.py + + Args: + sp_model: A spm.SentencePieceProcessor object. + text: The input text to be segemented. + sample: Whether to randomly sample a segmentation output or return a + deterministic one. + + Returns: + A list of token ids. + """ + pieces = encode_pieces(sp_model, text, sample=sample) + ids = [sp_model.PieceToId(piece) for piece in pieces] + return ids + + +class FullSentencePieceTokenizer(object): + """Runs end-to-end sentence piece tokenization. + + The interface of this class is intended to keep the same as above + `FullTokenizer` class for easier usage. + """ + + def __init__(self, sp_model_file): + """Inits FullSentencePieceTokenizer. + + Args: + sp_model_file: The path to the sentence piece model file. + """ + self.sp_model = spm.SentencePieceProcessor() + self.sp_model.Load(sp_model_file) + self.vocab = { + self.sp_model.IdToPiece(i): i + for i in six.moves.range(self.sp_model.GetPieceSize()) + } + + def tokenize(self, text): + """Tokenizes text into pieces.""" + return encode_pieces(self.sp_model, text) + + def convert_tokens_to_ids(self, tokens): + """Converts a list of tokens to a list of ids.""" + return [self.sp_model.PieceToId(printable_text(token)) for token in tokens] + + def convert_ids_to_tokens(self, ids): + """Converts a list of ids ot a list of tokens.""" + return [self.sp_model.IdToPiece(id_) for id_ in ids] diff --git a/models/official/nlp/bert/tokenization_test.py b/models/official/nlp/bert/tokenization_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4a0503c3ed6999e3bd81aec4de8f7d64ec733bd9 --- /dev/null +++ b/models/official/nlp/bert/tokenization_test.py @@ -0,0 +1,160 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile + +import six +import tensorflow as tf + +from official.nlp.bert import tokenization + + +class TokenizationTest(tf.test.TestCase): + """Tokenization test. + + The implementation is forked from + https://github.com/google-research/bert/blob/master/tokenization_test.py." + """ + + def test_full_tokenizer(self): + vocab_tokens = [ + "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", + "##ing", "," + ] + with tempfile.NamedTemporaryFile(delete=False) as vocab_writer: + if six.PY2: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + else: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens + ]).encode("utf-8")) + + vocab_file = vocab_writer.name + + tokenizer = tokenization.FullTokenizer(vocab_file) + os.unlink(vocab_file) + + tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") + self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) + + self.assertAllEqual( + tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) + + def test_chinese(self): + tokenizer = tokenization.BasicTokenizer() + + self.assertAllEqual( + tokenizer.tokenize(u"ah\u535A\u63A8zz"), + [u"ah", u"\u535A", u"\u63A8", u"zz"]) + + def test_basic_tokenizer_lower(self): + tokenizer = tokenization.BasicTokenizer(do_lower_case=True) + + self.assertAllEqual( + tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), + ["hello", "!", "how", "are", "you", "?"]) + self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"]) + + def test_basic_tokenizer_no_lower(self): + tokenizer = tokenization.BasicTokenizer(do_lower_case=False) + + self.assertAllEqual( + tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), + ["HeLLo", "!", "how", "Are", "yoU", "?"]) + + def test_basic_tokenizer_no_split_on_punc(self): + tokenizer = tokenization.BasicTokenizer( + do_lower_case=True, split_on_punc=False) + + self.assertAllEqual( + tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), + ["hello!how", "are", "you?"]) + + def test_wordpiece_tokenizer(self): + vocab_tokens = [ + "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", + "##ing", "##!", "!" + ] + + vocab = {} + for (i, token) in enumerate(vocab_tokens): + vocab[token] = i + tokenizer = tokenization.WordpieceTokenizer(vocab=vocab) + + self.assertAllEqual(tokenizer.tokenize(""), []) + + self.assertAllEqual( + tokenizer.tokenize("unwanted running"), + ["un", "##want", "##ed", "runn", "##ing"]) + + self.assertAllEqual( + tokenizer.tokenize("unwanted running !"), + ["un", "##want", "##ed", "runn", "##ing", "!"]) + + self.assertAllEqual( + tokenizer.tokenize("unwanted running!"), + ["un", "##want", "##ed", "runn", "##ing", "##!"]) + + self.assertAllEqual( + tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) + + def test_convert_tokens_to_ids(self): + vocab_tokens = [ + "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", + "##ing" + ] + + vocab = {} + for (i, token) in enumerate(vocab_tokens): + vocab[token] = i + + self.assertAllEqual( + tokenization.convert_tokens_to_ids( + vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9]) + + def test_is_whitespace(self): + self.assertTrue(tokenization._is_whitespace(u" ")) + self.assertTrue(tokenization._is_whitespace(u"\t")) + self.assertTrue(tokenization._is_whitespace(u"\r")) + self.assertTrue(tokenization._is_whitespace(u"\n")) + self.assertTrue(tokenization._is_whitespace(u"\u00A0")) + + self.assertFalse(tokenization._is_whitespace(u"A")) + self.assertFalse(tokenization._is_whitespace(u"-")) + + def test_is_control(self): + self.assertTrue(tokenization._is_control(u"\u0005")) + + self.assertFalse(tokenization._is_control(u"A")) + self.assertFalse(tokenization._is_control(u" ")) + self.assertFalse(tokenization._is_control(u"\t")) + self.assertFalse(tokenization._is_control(u"\r")) + self.assertFalse(tokenization._is_control(u"\U0001F4A9")) + + def test_is_punctuation(self): + self.assertTrue(tokenization._is_punctuation(u"-")) + self.assertTrue(tokenization._is_punctuation(u"$")) + self.assertTrue(tokenization._is_punctuation(u"`")) + self.assertTrue(tokenization._is_punctuation(u".")) + + self.assertFalse(tokenization._is_punctuation(u"A")) + self.assertFalse(tokenization._is_punctuation(u" ")) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/configs/__init__.py b/models/official/nlp/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/official/nlp/configs/__init__.py @@ -0,0 +1 @@ + diff --git a/models/official/nlp/configs/bert.py b/models/official/nlp/configs/bert.py new file mode 100644 index 0000000000000000000000000000000000000000..48b83107f20a2b3251624a24b580412b93ed1979 --- /dev/null +++ b/models/official/nlp/configs/bert.py @@ -0,0 +1,151 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Multi-head BERT encoder network with classification heads. + +Includes configurations and instantiation methods. +""" +from typing import List, Optional, Text + +import dataclasses +import tensorflow as tf + +from official.modeling import tf_utils +from official.modeling.hyperparams import base_config +from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.configs import encoders +from official.nlp.modeling import layers +from official.nlp.modeling.models import bert_pretrainer + + +@dataclasses.dataclass +class ClsHeadConfig(base_config.Config): + inner_dim: int = 0 + num_classes: int = 2 + activation: Optional[Text] = "tanh" + dropout_rate: float = 0.0 + cls_token_idx: int = 0 + name: Optional[Text] = None + + +@dataclasses.dataclass +class BertPretrainerConfig(base_config.Config): + """BERT encoder configuration.""" + num_masked_tokens: int = 76 + encoder: encoders.TransformerEncoderConfig = ( + encoders.TransformerEncoderConfig()) + cls_heads: List[ClsHeadConfig] = dataclasses.field(default_factory=list) + + +def instantiate_classification_heads_from_cfgs( + cls_head_configs: List[ClsHeadConfig]) -> List[layers.ClassificationHead]: + return [ + layers.ClassificationHead(**cfg.as_dict()) for cfg in cls_head_configs + ] if cls_head_configs else [] + + +def instantiate_bertpretrainer_from_cfg( + config: BertPretrainerConfig, + encoder_network: Optional[tf.keras.Model] = None + ) -> bert_pretrainer.BertPretrainerV2: + """Instantiates a BertPretrainer from the config.""" + encoder_cfg = config.encoder + if encoder_network is None: + encoder_network = encoders.instantiate_encoder_from_cfg(encoder_cfg) + return bert_pretrainer.BertPretrainerV2( + config.num_masked_tokens, + mlm_activation=tf_utils.get_activation(encoder_cfg.hidden_activation), + mlm_initializer=tf.keras.initializers.TruncatedNormal( + stddev=encoder_cfg.initializer_range), + encoder_network=encoder_network, + classification_heads=instantiate_classification_heads_from_cfgs( + config.cls_heads)) + + +@dataclasses.dataclass +class BertPretrainDataConfig(cfg.DataConfig): + """Data config for BERT pretraining task (tasks/masked_lm).""" + input_path: str = "" + global_batch_size: int = 512 + is_training: bool = True + seq_length: int = 512 + max_predictions_per_seq: int = 76 + use_next_sentence_label: bool = True + use_position_id: bool = False + + +@dataclasses.dataclass +class BertPretrainEvalDataConfig(BertPretrainDataConfig): + """Data config for the eval set in BERT pretraining task (tasks/masked_lm).""" + input_path: str = "" + global_batch_size: int = 512 + is_training: bool = False + + +@dataclasses.dataclass +class SentencePredictionDataConfig(cfg.DataConfig): + """Data config for sentence prediction task (tasks/sentence_prediction).""" + input_path: str = "" + global_batch_size: int = 32 + is_training: bool = True + seq_length: int = 128 + + +@dataclasses.dataclass +class SentencePredictionDevDataConfig(cfg.DataConfig): + """Dev Data config for sentence prediction (tasks/sentence_prediction).""" + input_path: str = "" + global_batch_size: int = 32 + is_training: bool = False + seq_length: int = 128 + drop_remainder: bool = False + + +@dataclasses.dataclass +class QADataConfig(cfg.DataConfig): + """Data config for question answering task (tasks/question_answering).""" + input_path: str = "" + global_batch_size: int = 48 + is_training: bool = True + seq_length: int = 384 + + +@dataclasses.dataclass +class QADevDataConfig(cfg.DataConfig): + """Dev Data config for queston answering (tasks/question_answering).""" + input_path: str = "" + global_batch_size: int = 48 + is_training: bool = False + seq_length: int = 384 + drop_remainder: bool = False + + +@dataclasses.dataclass +class TaggingDataConfig(cfg.DataConfig): + """Data config for tagging (tasks/tagging).""" + input_path: str = "" + global_batch_size: int = 48 + is_training: bool = True + seq_length: int = 384 + + +@dataclasses.dataclass +class TaggingDevDataConfig(cfg.DataConfig): + """Dev Data config for tagging (tasks/tagging).""" + input_path: str = "" + global_batch_size: int = 48 + is_training: bool = False + seq_length: int = 384 + drop_remainder: bool = False diff --git a/models/official/nlp/configs/bert_test.py b/models/official/nlp/configs/bert_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c734b190ea71697350cc0fb84cf50582afdb96b3 --- /dev/null +++ b/models/official/nlp/configs/bert_test.py @@ -0,0 +1,65 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for BERT configurations and models instantiation.""" + +import tensorflow as tf + +from official.nlp.configs import bert +from official.nlp.configs import encoders + + +class BertModelsTest(tf.test.TestCase): + + def test_network_invocation(self): + config = bert.BertPretrainerConfig( + encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1)) + _ = bert.instantiate_bertpretrainer_from_cfg(config) + + # Invokes with classification heads. + config = bert.BertPretrainerConfig( + encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1), + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, num_classes=2, name="next_sentence") + ]) + _ = bert.instantiate_bertpretrainer_from_cfg(config) + + with self.assertRaises(ValueError): + config = bert.BertPretrainerConfig( + encoder=encoders.TransformerEncoderConfig( + vocab_size=10, num_layers=1), + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, num_classes=2, name="next_sentence"), + bert.ClsHeadConfig( + inner_dim=10, num_classes=2, name="next_sentence") + ]) + _ = bert.instantiate_bertpretrainer_from_cfg(config) + + def test_checkpoint_items(self): + config = bert.BertPretrainerConfig( + encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1), + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, num_classes=2, name="next_sentence") + ]) + encoder = bert.instantiate_bertpretrainer_from_cfg(config) + self.assertSameElements(encoder.checkpoint_items.keys(), + ["encoder", "next_sentence.pooler_dense"]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/configs/encoders.py b/models/official/nlp/configs/encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..0af5b733d9a7b60af21a8be9021fafdfa085e34a --- /dev/null +++ b/models/official/nlp/configs/encoders.py @@ -0,0 +1,62 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transformer Encoders. + +Includes configurations and instantiation methods. +""" + +import dataclasses +import tensorflow as tf + +from official.modeling import tf_utils +from official.modeling.hyperparams import base_config +from official.nlp.modeling import networks + + +@dataclasses.dataclass +class TransformerEncoderConfig(base_config.Config): + """BERT encoder configuration.""" + vocab_size: int = 30522 + hidden_size: int = 768 + num_layers: int = 12 + num_attention_heads: int = 12 + hidden_activation: str = "gelu" + intermediate_size: int = 3072 + dropout_rate: float = 0.1 + attention_dropout_rate: float = 0.1 + max_position_embeddings: int = 512 + type_vocab_size: int = 2 + initializer_range: float = 0.02 + + +def instantiate_encoder_from_cfg( + config: TransformerEncoderConfig) -> networks.TransformerEncoder: + """Instantiate a Transformer encoder network from TransformerEncoderConfig.""" + encoder_network = networks.TransformerEncoder( + vocab_size=config.vocab_size, + hidden_size=config.hidden_size, + num_layers=config.num_layers, + num_attention_heads=config.num_attention_heads, + intermediate_size=config.intermediate_size, + activation=tf_utils.get_activation(config.hidden_activation), + dropout_rate=config.dropout_rate, + attention_dropout_rate=config.attention_dropout_rate, + sequence_length=None, + max_sequence_length=config.max_position_embeddings, + type_vocab_size=config.type_vocab_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=config.initializer_range)) + return encoder_network diff --git a/models/official/nlp/data/__init__.py b/models/official/nlp/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/data/__pycache__/__init__.cpython-310.pyc b/models/official/nlp/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3866844f42b63caffc58cebe79447266ef6bad5d Binary files /dev/null and b/models/official/nlp/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/models/official/nlp/data/__pycache__/__init__.cpython-38.pyc b/models/official/nlp/data/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b43d026004ab9d6e35ecbbe5888b00e9cd8a4677 Binary files /dev/null and b/models/official/nlp/data/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/official/nlp/data/__pycache__/__init__.cpython-39.pyc b/models/official/nlp/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2ae6127a7ed41164727b124e616b4a4f69227a4 Binary files /dev/null and b/models/official/nlp/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-310.pyc b/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..303a264d77eb106f58c195f0d8d4511afee9d9d7 Binary files /dev/null and b/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-310.pyc differ diff --git a/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-38.pyc b/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a935018c088877b49469e9a4df486eacb64837f0 Binary files /dev/null and b/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-38.pyc differ diff --git a/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-39.pyc b/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19fe41408ff4122c5998a2d2f15b31cf63bba55a Binary files /dev/null and b/models/official/nlp/data/__pycache__/classifier_data_lib.cpython-39.pyc differ diff --git a/models/official/nlp/data/classifier_data_lib.py b/models/official/nlp/data/classifier_data_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..67c47d7874c4d3a40b23e5280e14ed8716a23176 --- /dev/null +++ b/models/official/nlp/data/classifier_data_lib.py @@ -0,0 +1,1088 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BERT library to process data for classification task.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import csv +import importlib +import os + +from absl import logging +import tensorflow as tf +import tensorflow_datasets as tfds + +from official.nlp.bert import tokenization + + +class InputExample(object): + """A single training/test example for simple sequence classification.""" + + def __init__(self, + guid, + text_a, + text_b=None, + label=None, + weight=None, + int_iden=None): + """Constructs a InputExample. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + weight: (Optional) float. The weight of the example to be used during + training. + int_iden: (Optional) int. The int identification number of example in the + corpus. + """ + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label + self.weight = weight + self.int_iden = int_iden + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + input_ids, + input_mask, + segment_ids, + label_id, + is_real_example=True, + weight=None, + int_iden=None): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id + self.is_real_example = is_real_example + self.weight = weight + self.int_iden = int_iden + + +class DataProcessor(object): + """Base class for data converters for sequence classification data sets.""" + + def __init__(self, process_text_fn=tokenization.convert_to_unicode): + self.process_text_fn = process_text_fn + + def get_train_examples(self, data_dir): + """Gets a collection of `InputExample`s for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of `InputExample`s for the dev set.""" + raise NotImplementedError() + + def get_test_examples(self, data_dir): + """Gets a collection of `InputExample`s for prediction.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + @staticmethod + def get_processor_name(): + """Gets the string identifier of the processor.""" + raise NotImplementedError() + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with tf.io.gfile.GFile(input_file, "r") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + lines.append(line) + return lines + + +class XnliProcessor(DataProcessor): + """Processor for the XNLI data set.""" + supported_languages = [ + "ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", + "ur", "vi", "zh" + ] + + def __init__(self, + language="en", + process_text_fn=tokenization.convert_to_unicode): + super(XnliProcessor, self).__init__(process_text_fn) + if language == "all": + self.languages = XnliProcessor.supported_languages + elif language not in XnliProcessor.supported_languages: + raise ValueError("language %s is not supported for XNLI task." % language) + else: + self.languages = [language] + + def get_train_examples(self, data_dir): + """See base class.""" + lines = [] + for language in self.languages: + # Skips the header. + lines.extend( + self._read_tsv( + os.path.join(data_dir, "multinli", + "multinli.train.%s.tsv" % language))[1:]) + + examples = [] + for (i, line) in enumerate(lines): + guid = "train-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + if label == self.process_text_fn("contradictory"): + label = self.process_text_fn("contradiction") + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_dev_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "dev-%d" % i + text_a = self.process_text_fn(line[6]) + text_b = self.process_text_fn(line[7]) + label = self.process_text_fn(line[1]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "xnli.test.tsv")) + examples_by_lang = {k: [] for k in XnliProcessor.supported_languages} + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "test-%d" % i + language = self.process_text_fn(line[0]) + text_a = self.process_text_fn(line[6]) + text_b = self.process_text_fn(line[7]) + label = self.process_text_fn(line[1]) + examples_by_lang[language].append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples_by_lang + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "XNLI" + + +class XtremeXnliProcessor(DataProcessor): + """Processor for the XTREME XNLI data set.""" + supported_languages = [ + "ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", + "ur", "vi", "zh" + ] + + def get_train_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "train-en.tsv")) + + examples = [] + for (i, line) in enumerate(lines): + guid = "train-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_dev_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "dev-en.tsv")) + examples = [] + for (i, line) in enumerate(lines): + guid = "dev-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + examples_by_lang = {k: [] for k in self.supported_languages} + for lang in self.supported_languages: + lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv")) + for (i, line) in enumerate(lines): + guid = f"test-{i}" + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = "contradiction" + examples_by_lang[lang].append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples_by_lang + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "XTREME-XNLI" + + +class PawsxProcessor(DataProcessor): + """Processor for the PAWS-X data set.""" + supported_languages = ["de", "en", "es", "fr", "ja", "ko", "zh"] + + def __init__(self, + language="en", + process_text_fn=tokenization.convert_to_unicode): + super(PawsxProcessor, self).__init__(process_text_fn) + if language == "all": + self.languages = PawsxProcessor.supported_languages + elif language not in PawsxProcessor.supported_languages: + raise ValueError("language %s is not supported for PAWS-X task." % + language) + else: + self.languages = [language] + + def get_train_examples(self, data_dir): + """See base class.""" + lines = [] + for language in self.languages: + if language == "en": + train_tsv = "train.tsv" + else: + train_tsv = "translated_train.tsv" + # Skips the header. + lines.extend( + self._read_tsv(os.path.join(data_dir, language, train_tsv))[1:]) + + examples = [] + for (i, line) in enumerate(lines): + guid = "train-%d" % i + text_a = self.process_text_fn(line[1]) + text_b = self.process_text_fn(line[2]) + label = self.process_text_fn(line[3]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_dev_examples(self, data_dir): + """See base class.""" + lines = [] + for lang in PawsxProcessor.supported_languages: + lines.extend(self._read_tsv(os.path.join(data_dir, f"dev-{lang}.tsv"))) + + examples = [] + for (i, line) in enumerate(lines): + guid = "dev-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + examples_by_lang = {k: [] for k in self.supported_languages} + for lang in self.supported_languages: + lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv")) + for (i, line) in enumerate(lines): + guid = "test-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + examples_by_lang[lang].append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples_by_lang + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "XTREME-PAWS-X" + + +class XtremePawsxProcessor(DataProcessor): + """Processor for the XTREME PAWS-X data set.""" + supported_languages = ["de", "en", "es", "fr", "ja", "ko", "zh"] + + def get_train_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "train-en.tsv")) + examples = [] + for (i, line) in enumerate(lines): + guid = "train-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_dev_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "dev-en.tsv")) + + examples = [] + for (i, line) in enumerate(lines): + guid = "dev-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = self.process_text_fn(line[2]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + examples_by_lang = {k: [] for k in self.supported_languages} + for lang in self.supported_languages: + lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv")) + for (i, line) in enumerate(lines): + guid = "test-%d" % i + text_a = self.process_text_fn(line[0]) + text_b = self.process_text_fn(line[1]) + label = "0" + examples_by_lang[lang].append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples_by_lang + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "XTREME-PAWS-X" + + +class MnliProcessor(DataProcessor): + """Processor for the MultiNLI data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), + "dev_matched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "MNLI" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, self.process_text_fn(line[0])) + text_a = self.process_text_fn(line[8]) + text_b = self.process_text_fn(line[9]) + if set_type == "test": + label = "contradiction" + else: + label = self.process_text_fn(line[-1]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MrpcProcessor(DataProcessor): + """Processor for the MRPC data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "MRPC" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = self.process_text_fn(line[3]) + text_b = self.process_text_fn(line[4]) + if set_type == "test": + label = "0" + else: + label = self.process_text_fn(line[0]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QqpProcessor(DataProcessor): + """Processor for the QQP data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "QQP" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, line[0]) + try: + text_a = line[3] + text_b = line[4] + label = line[5] + except IndexError: + continue + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class ColaProcessor(DataProcessor): + """Processor for the CoLA data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "COLA" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + # Only the test set has a header + if set_type == "test" and i == 0: + continue + guid = "%s-%s" % (set_type, i) + if set_type == "test": + text_a = self.process_text_fn(line[1]) + label = "0" + else: + text_a = self.process_text_fn(line[3]) + label = self.process_text_fn(line[1]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class RteProcessor(DataProcessor): + """Processor for the RTE data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + # All datasets are converted to 2-class split, where for 3-class datasets we + # collapse neutral and contradiction into not_entailment. + return ["entailment", "not_entailment"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "RTE" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + if set_type == "test": + text_a = tokenization.convert_to_unicode(line[1]) + text_b = tokenization.convert_to_unicode(line[2]) + label = "entailment" + else: + text_a = tokenization.convert_to_unicode(line[1]) + text_b = tokenization.convert_to_unicode(line[2]) + label = tokenization.convert_to_unicode(line[3]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class SstProcessor(DataProcessor): + """Processor for the SST-2 data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "SST-2" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + if set_type == "test": + text_a = tokenization.convert_to_unicode(line[1]) + label = "0" + else: + text_a = tokenization.convert_to_unicode(line[0]) + label = tokenization.convert_to_unicode(line[1]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class QnliProcessor(DataProcessor): + """Processor for the QNLI data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + @staticmethod + def get_processor_name(): + """See base class.""" + return "QNLI" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, 1) + if set_type == "test": + text_a = tokenization.convert_to_unicode(line[1]) + text_b = tokenization.convert_to_unicode(line[2]) + label = "entailment" + else: + text_a = tokenization.convert_to_unicode(line[1]) + text_b = tokenization.convert_to_unicode(line[2]) + label = tokenization.convert_to_unicode(line[-1]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class TfdsProcessor(DataProcessor): + """Processor for generic text classification and regression TFDS data set. + + The TFDS parameters are expected to be provided in the tfds_params string, in + a comma-separated list of parameter assignments. + Examples: + tfds_params="dataset=scicite,text_key=string" + tfds_params="dataset=imdb_reviews,test_split=,dev_split=test" + tfds_params="dataset=glue/cola,text_key=sentence" + tfds_params="dataset=glue/sst2,text_key=sentence" + tfds_params="dataset=glue/qnli,text_key=question,text_b_key=sentence" + tfds_params="dataset=glue/mrpc,text_key=sentence1,text_b_key=sentence2" + tfds_params="dataset=glue/stsb,text_key=sentence1,text_b_key=sentence2," + "is_regression=true,label_type=float" + Possible parameters (please refer to the documentation of Tensorflow Datasets + (TFDS) for the meaning of individual parameters): + dataset: Required dataset name (potentially with subset and version number). + data_dir: Optional TFDS source root directory. + module_import: Optional Dataset module to import. + train_split: Name of the train split (defaults to `train`). + dev_split: Name of the dev split (defaults to `validation`). + test_split: Name of the test split (defaults to `test`). + text_key: Key of the text_a feature (defaults to `text`). + text_b_key: Key of the second text feature if available. + label_key: Key of the label feature (defaults to `label`). + test_text_key: Key of the text feature to use in test set. + test_text_b_key: Key of the second text feature to use in test set. + test_label: String to be used as the label for all test examples. + label_type: Type of the label key (defaults to `int`). + weight_key: Key of the float sample weight (is not used if not provided). + is_regression: Whether the task is a regression problem (defaults to False). + """ + + def __init__(self, + tfds_params, + process_text_fn=tokenization.convert_to_unicode): + super(TfdsProcessor, self).__init__(process_text_fn) + self._process_tfds_params_str(tfds_params) + if self.module_import: + importlib.import_module(self.module_import) + + self.dataset, info = tfds.load( + self.dataset_name, data_dir=self.data_dir, with_info=True) + if self.is_regression: + self._labels = None + else: + self._labels = list(range(info.features[self.label_key].num_classes)) + + def _process_tfds_params_str(self, params_str): + """Extracts TFDS parameters from a comma-separated assignements string.""" + dtype_map = {"int": int, "float": float} + cast_str_to_bool = lambda s: s.lower() not in ["false", "0"] + + tuples = [x.split("=") for x in params_str.split(",")] + d = {k.strip(): v.strip() for k, v in tuples} + self.dataset_name = d["dataset"] # Required. + self.data_dir = d.get("data_dir", None) + self.module_import = d.get("module_import", None) + self.train_split = d.get("train_split", "train") + self.dev_split = d.get("dev_split", "validation") + self.test_split = d.get("test_split", "test") + self.text_key = d.get("text_key", "text") + self.text_b_key = d.get("text_b_key", None) + self.label_key = d.get("label_key", "label") + self.test_text_key = d.get("test_text_key", self.text_key) + self.test_text_b_key = d.get("test_text_b_key", self.text_b_key) + self.test_label = d.get("test_label", "test_example") + self.label_type = dtype_map[d.get("label_type", "int")] + self.is_regression = cast_str_to_bool(d.get("is_regression", "False")) + self.weight_key = d.get("weight_key", None) + + def get_train_examples(self, data_dir): + assert data_dir is None + return self._create_examples(self.train_split, "train") + + def get_dev_examples(self, data_dir): + assert data_dir is None + return self._create_examples(self.dev_split, "dev") + + def get_test_examples(self, data_dir): + assert data_dir is None + return self._create_examples(self.test_split, "test") + + def get_labels(self): + return self._labels + + def get_processor_name(self): + return "TFDS_" + self.dataset_name + + def _create_examples(self, split_name, set_type): + """Creates examples for the training and dev sets.""" + if split_name not in self.dataset: + raise ValueError("Split {} not available.".format(split_name)) + dataset = self.dataset[split_name].as_numpy_iterator() + examples = [] + text_b, weight = None, None + for i, example in enumerate(dataset): + guid = "%s-%s" % (set_type, i) + if set_type == "test": + text_a = self.process_text_fn(example[self.test_text_key]) + if self.test_text_b_key: + text_b = self.process_text_fn(example[self.test_text_b_key]) + label = self.test_label + else: + text_a = self.process_text_fn(example[self.text_key]) + if self.text_b_key: + text_b = self.process_text_fn(example[self.text_b_key]) + label = self.label_type(example[self.label_key]) + if self.weight_key: + weight = float(example[self.weight_key]) + examples.append( + InputExample( + guid=guid, + text_a=text_a, + text_b=text_b, + label=label, + weight=weight)) + return examples + + +def convert_single_example(ex_index, example, label_list, max_seq_length, + tokenizer): + """Converts a single `InputExample` into a single `InputFeatures`.""" + label_map = {} + if label_list: + for (i, label) in enumerate(label_list): + label_map[label] = i + + tokens_a = tokenizer.tokenize(example.text_a) + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + + if tokens_b: + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[0:(max_seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambiguously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + if tokens_b: + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + label_id = label_map[example.label] if label_map else example.label + if ex_index < 5: + logging.info("*** Example ***") + logging.info("guid: %s", (example.guid)) + logging.info("tokens: %s", + " ".join([tokenization.printable_text(x) for x in tokens])) + logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) + logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) + logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) + logging.info("label: %s (id = %s)", example.label, str(label_id)) + logging.info("weight: %s", example.weight) + logging.info("int_iden: %s", str(example.int_iden)) + + feature = InputFeatures( + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id, + is_real_example=True, + weight=example.weight, + int_iden=example.int_iden) + + return feature + + +def file_based_convert_examples_to_features(examples, + label_list, + max_seq_length, + tokenizer, + output_file, + label_type=None): + """Convert a set of `InputExample`s to a TFRecord file.""" + + tf.io.gfile.makedirs(os.path.dirname(output_file)) + writer = tf.io.TFRecordWriter(output_file) + + for (ex_index, example) in enumerate(examples): + if ex_index % 10000 == 0: + logging.info("Writing example %d of %d", ex_index, len(examples)) + + feature = convert_single_example(ex_index, example, label_list, + max_seq_length, tokenizer) + + def create_int_feature(values): + f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return f + + def create_float_feature(values): + f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return f + + features = collections.OrderedDict() + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_int_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + if label_type is not None and label_type == float: + features["label_ids"] = create_float_feature([feature.label_id]) + elif feature.label_id is not None: + features["label_ids"] = create_int_feature([feature.label_id]) + features["is_real_example"] = create_int_feature( + [int(feature.is_real_example)]) + if feature.weight is not None: + features["weight"] = create_float_feature([feature.weight]) + if feature.int_iden is not None: + features["int_iden"] = create_int_feature([feature.int_iden]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(tf_example.SerializeToString()) + writer.close() + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +def generate_tf_record_from_data_file(processor, + data_dir, + tokenizer, + train_data_output_path=None, + eval_data_output_path=None, + test_data_output_path=None, + max_seq_length=128): + """Generates and saves training data into a tf record file. + + Arguments: + processor: Input processor object to be used for generating data. Subclass + of `DataProcessor`. + data_dir: Directory that contains train/eval data to process. Data files + should be in from "dev.tsv", "test.tsv", or "train.tsv". + tokenizer: The tokenizer to be applied on the data. + train_data_output_path: Output to which processed tf record for training + will be saved. + eval_data_output_path: Output to which processed tf record for evaluation + will be saved. + test_data_output_path: Output to which processed tf record for testing + will be saved. Must be a pattern template with {} if processor has + language specific test data. + max_seq_length: Maximum sequence length of the to be generated + training/eval data. + + Returns: + A dictionary containing input meta data. + """ + assert train_data_output_path or eval_data_output_path + + label_list = processor.get_labels() + label_type = getattr(processor, "label_type", None) + is_regression = getattr(processor, "is_regression", False) + has_sample_weights = getattr(processor, "weight_key", False) + assert train_data_output_path + + train_input_data_examples = processor.get_train_examples(data_dir) + file_based_convert_examples_to_features(train_input_data_examples, label_list, + max_seq_length, tokenizer, + train_data_output_path, label_type) + num_training_data = len(train_input_data_examples) + + if eval_data_output_path: + eval_input_data_examples = processor.get_dev_examples(data_dir) + file_based_convert_examples_to_features(eval_input_data_examples, + label_list, max_seq_length, + tokenizer, eval_data_output_path, + label_type) + + if test_data_output_path: + test_input_data_examples = processor.get_test_examples(data_dir) + if isinstance(test_input_data_examples, dict): + for language, examples in test_input_data_examples.items(): + file_based_convert_examples_to_features( + examples, label_list, max_seq_length, tokenizer, + test_data_output_path.format(language), label_type) + else: + file_based_convert_examples_to_features(test_input_data_examples, + label_list, max_seq_length, + tokenizer, test_data_output_path, + label_type) + + meta_data = { + "processor_type": processor.get_processor_name(), + "train_data_size": num_training_data, + "max_seq_length": max_seq_length, + } + if is_regression: + meta_data["task_type"] = "bert_regression" + meta_data["label_type"] = {int: "int", float: "float"}[label_type] + else: + meta_data["task_type"] = "bert_classification" + meta_data["num_labels"] = len(processor.get_labels()) + if has_sample_weights: + meta_data["has_sample_weights"] = True + + if eval_data_output_path: + meta_data["eval_data_size"] = len(eval_input_data_examples) + + if test_data_output_path: + test_input_data_examples = processor.get_test_examples(data_dir) + if isinstance(test_input_data_examples, dict): + for language, examples in test_input_data_examples.items(): + meta_data["test_{}_data_size".format(language)] = len(examples) + else: + meta_data["test_data_size"] = len(test_input_data_examples) + + return meta_data diff --git a/models/official/nlp/data/create_finetuning_data.py b/models/official/nlp/data/create_finetuning_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8fae97e127680d8828d23442ecd7592abb39b584 --- /dev/null +++ b/models/official/nlp/data/create_finetuning_data.py @@ -0,0 +1,316 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BERT finetuning task dataset generator.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import json +import os + +from absl import app +from absl import flags +import tensorflow as tf +from official.nlp.bert import tokenization +from official.nlp.data import classifier_data_lib +from official.nlp.data import sentence_retrieval_lib +# word-piece tokenizer based squad_lib +from official.nlp.data import squad_lib as squad_lib_wp +# sentence-piece tokenizer based squad_lib +from official.nlp.data import squad_lib_sp + +FLAGS = flags.FLAGS + +flags.DEFINE_enum( + "fine_tuning_task_type", "classification", + ["classification", "regression", "squad", "retrieval"], + "The name of the BERT fine tuning task for which data " + "will be generated..") + +# BERT classification specific flags. +flags.DEFINE_string( + "input_data_dir", None, + "The input data dir. Should contain the .tsv files (or other data files) " + "for the task.") + +flags.DEFINE_enum("classification_task_name", "MNLI", + ["COLA", "MNLI", "MRPC", "QNLI", "QQP", "SST-2", "XNLI", + "PAWS-X", "XTREME-XNLI", "XTREME-PAWS-X"], + "The name of the task to train BERT classifier. The " + "difference between XTREME-XNLI and XNLI is: 1. the format " + "of input tsv files; 2. the dev set for XTREME is english " + "only and for XNLI is all languages combined. Same for " + "PAWS-X.") + +flags.DEFINE_enum("retrieval_task_name", "bucc", ["bucc", "tatoeba"], + "The name of sentence retrieval task for scoring") + +# XNLI task specific flag. +flags.DEFINE_string( + "xnli_language", "en", + "Language of training data for XNIL task. If the value is 'all', the data " + "of all languages will be used for training.") + +# PAWS-X task specific flag. +flags.DEFINE_string( + "pawsx_language", "en", + "Language of trainig data for PAWS-X task. If the value is 'all', the data " + "of all languages will be used for training.") + +# BERT Squad task specific flags. +flags.DEFINE_string( + "squad_data_file", None, + "The input data file in for generating training data for BERT squad task.") + +flags.DEFINE_integer( + "doc_stride", 128, + "When splitting up a long document into chunks, how much stride to " + "take between chunks.") + +flags.DEFINE_integer( + "max_query_length", 64, + "The maximum number of tokens for the question. Questions longer than " + "this will be truncated to this length.") + +flags.DEFINE_bool( + "version_2_with_negative", False, + "If true, the SQuAD examples contain some that do not have an answer.") + +# Shared flags across BERT fine-tuning tasks. +flags.DEFINE_string("vocab_file", None, + "The vocabulary file that the BERT model was trained on.") + +flags.DEFINE_string( + "train_data_output_path", None, + "The path in which generated training input data will be written as tf" + " records.") + +flags.DEFINE_string( + "eval_data_output_path", None, + "The path in which generated evaluation input data will be written as tf" + " records.") + +flags.DEFINE_string( + "test_data_output_path", None, + "The path in which generated test input data will be written as tf" + " records. If None, do not generate test data. Must be a pattern template" + " as test_{}.tfrecords if processor has language specific test data.") + +flags.DEFINE_string("meta_data_file_path", None, + "The path in which input meta data will be written.") + +flags.DEFINE_bool( + "do_lower_case", True, + "Whether to lower case the input text. Should be True for uncased " + "models and False for cased models.") + +flags.DEFINE_integer( + "max_seq_length", 128, + "The maximum total input sequence length after WordPiece tokenization. " + "Sequences longer than this will be truncated, and sequences shorter " + "than this will be padded.") + +flags.DEFINE_string("sp_model_file", "", + "The path to the model used by sentence piece tokenizer.") + +flags.DEFINE_enum( + "tokenizer_impl", "word_piece", ["word_piece", "sentence_piece"], + "Specifies the tokenizer implementation, i.e., whehter to use word_piece " + "or sentence_piece tokenizer. Canonical BERT uses word_piece tokenizer, " + "while ALBERT uses sentence_piece tokenizer.") + +flags.DEFINE_string("tfds_params", "", + "Comma-separated list of TFDS parameter assigments for " + "generic classfication data import (for more details " + "see the TfdsProcessor class documentation).") + + +def generate_classifier_dataset(): + """Generates classifier dataset and returns input meta data.""" + assert (FLAGS.input_data_dir and FLAGS.classification_task_name + or FLAGS.tfds_params) + + if FLAGS.tokenizer_impl == "word_piece": + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + processor_text_fn = tokenization.convert_to_unicode + else: + assert FLAGS.tokenizer_impl == "sentence_piece" + tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file) + processor_text_fn = functools.partial( + tokenization.preprocess_text, lower=FLAGS.do_lower_case) + + if FLAGS.tfds_params: + processor = classifier_data_lib.TfdsProcessor( + tfds_params=FLAGS.tfds_params, + process_text_fn=processor_text_fn) + return classifier_data_lib.generate_tf_record_from_data_file( + processor, + None, + tokenizer, + train_data_output_path=FLAGS.train_data_output_path, + eval_data_output_path=FLAGS.eval_data_output_path, + test_data_output_path=FLAGS.test_data_output_path, + max_seq_length=FLAGS.max_seq_length) + else: + processors = { + "cola": + classifier_data_lib.ColaProcessor, + "mnli": + classifier_data_lib.MnliProcessor, + "mrpc": + classifier_data_lib.MrpcProcessor, + "qnli": + classifier_data_lib.QnliProcessor, + "qqp": classifier_data_lib.QqpProcessor, + "rte": classifier_data_lib.RteProcessor, + "sst-2": + classifier_data_lib.SstProcessor, + "xnli": + functools.partial(classifier_data_lib.XnliProcessor, + language=FLAGS.xnli_language), + "paws-x": + functools.partial(classifier_data_lib.PawsxProcessor, + language=FLAGS.pawsx_language), + "xtreme-xnli": + functools.partial(classifier_data_lib.XtremeXnliProcessor), + "xtreme-paws-x": + functools.partial(classifier_data_lib.XtremePawsxProcessor) + } + task_name = FLAGS.classification_task_name.lower() + if task_name not in processors: + raise ValueError("Task not found: %s" % (task_name)) + + processor = processors[task_name](process_text_fn=processor_text_fn) + return classifier_data_lib.generate_tf_record_from_data_file( + processor, + FLAGS.input_data_dir, + tokenizer, + train_data_output_path=FLAGS.train_data_output_path, + eval_data_output_path=FLAGS.eval_data_output_path, + test_data_output_path=FLAGS.test_data_output_path, + max_seq_length=FLAGS.max_seq_length) + + +def generate_regression_dataset(): + """Generates regression dataset and returns input meta data.""" + if FLAGS.tokenizer_impl == "word_piece": + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + processor_text_fn = tokenization.convert_to_unicode + else: + assert FLAGS.tokenizer_impl == "sentence_piece" + tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file) + processor_text_fn = functools.partial( + tokenization.preprocess_text, lower=FLAGS.do_lower_case) + + if FLAGS.tfds_params: + processor = classifier_data_lib.TfdsProcessor( + tfds_params=FLAGS.tfds_params, + process_text_fn=processor_text_fn) + return classifier_data_lib.generate_tf_record_from_data_file( + processor, + None, + tokenizer, + train_data_output_path=FLAGS.train_data_output_path, + eval_data_output_path=FLAGS.eval_data_output_path, + test_data_output_path=FLAGS.test_data_output_path, + max_seq_length=FLAGS.max_seq_length) + else: + raise ValueError("No data processor found for the given regression task.") + + +def generate_squad_dataset(): + """Generates squad training dataset and returns input meta data.""" + assert FLAGS.squad_data_file + if FLAGS.tokenizer_impl == "word_piece": + return squad_lib_wp.generate_tf_record_from_json_file( + FLAGS.squad_data_file, FLAGS.vocab_file, FLAGS.train_data_output_path, + FLAGS.max_seq_length, FLAGS.do_lower_case, FLAGS.max_query_length, + FLAGS.doc_stride, FLAGS.version_2_with_negative) + else: + assert FLAGS.tokenizer_impl == "sentence_piece" + return squad_lib_sp.generate_tf_record_from_json_file( + FLAGS.squad_data_file, FLAGS.sp_model_file, + FLAGS.train_data_output_path, FLAGS.max_seq_length, FLAGS.do_lower_case, + FLAGS.max_query_length, FLAGS.doc_stride, FLAGS.version_2_with_negative) + + +def generate_retrieval_dataset(): + """Generate retrieval test and dev dataset and returns input meta data.""" + assert (FLAGS.input_data_dir and FLAGS.retrieval_task_name) + if FLAGS.tokenizer_impl == "word_piece": + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + processor_text_fn = tokenization.convert_to_unicode + else: + assert FLAGS.tokenizer_impl == "sentence_piece" + tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file) + processor_text_fn = functools.partial( + tokenization.preprocess_text, lower=FLAGS.do_lower_case) + + processors = { + "bucc": sentence_retrieval_lib.BuccProcessor, + "tatoeba": sentence_retrieval_lib.TatoebaProcessor, + } + + task_name = FLAGS.retrieval_task_name.lower() + if task_name not in processors: + raise ValueError("Task not found: %s" % task_name) + + processor = processors[task_name](process_text_fn=processor_text_fn) + + return sentence_retrieval_lib.generate_sentence_retrevial_tf_record( + processor, + FLAGS.input_data_dir, + tokenizer, + FLAGS.eval_data_output_path, + FLAGS.test_data_output_path, + FLAGS.max_seq_length) + + +def main(_): + if FLAGS.tokenizer_impl == "word_piece": + if not FLAGS.vocab_file: + raise ValueError( + "FLAG vocab_file for word-piece tokenizer is not specified.") + else: + assert FLAGS.tokenizer_impl == "sentence_piece" + if not FLAGS.sp_model_file: + raise ValueError( + "FLAG sp_model_file for sentence-piece tokenizer is not specified.") + + if FLAGS.fine_tuning_task_type != "retrieval": + flags.mark_flag_as_required("train_data_output_path") + + if FLAGS.fine_tuning_task_type == "classification": + input_meta_data = generate_classifier_dataset() + elif FLAGS.fine_tuning_task_type == "regression": + input_meta_data = generate_regression_dataset() + elif FLAGS.fine_tuning_task_type == "retrieval": + input_meta_data = generate_retrieval_dataset() + else: + input_meta_data = generate_squad_dataset() + + tf.io.gfile.makedirs(os.path.dirname(FLAGS.meta_data_file_path)) + with tf.io.gfile.GFile(FLAGS.meta_data_file_path, "w") as writer: + writer.write(json.dumps(input_meta_data, indent=4) + "\n") + + +if __name__ == "__main__": + flags.mark_flag_as_required("meta_data_file_path") + app.run(main) diff --git a/models/official/nlp/data/create_pretraining_data.py b/models/official/nlp/data/create_pretraining_data.py new file mode 100644 index 0000000000000000000000000000000000000000..79dac57ac8775687673604af6fb2fb50c9f74244 --- /dev/null +++ b/models/official/nlp/data/create_pretraining_data.py @@ -0,0 +1,486 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import random + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +from official.nlp.bert import tokenization + +FLAGS = flags.FLAGS + +flags.DEFINE_string("input_file", None, + "Input raw text file (or comma-separated list of files).") + +flags.DEFINE_string( + "output_file", None, + "Output TF example file (or comma-separated list of files).") + +flags.DEFINE_string("vocab_file", None, + "The vocabulary file that the BERT model was trained on.") + +flags.DEFINE_bool( + "do_lower_case", True, + "Whether to lower case the input text. Should be True for uncased " + "models and False for cased models.") + +flags.DEFINE_bool( + "do_whole_word_mask", False, + "Whether to use whole word masking rather than per-WordPiece masking.") + +flags.DEFINE_bool( + "gzip_compress", False, + "Whether to use `GZIP` compress option to get compressed TFRecord files.") + +flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.") + +flags.DEFINE_integer("max_predictions_per_seq", 20, + "Maximum number of masked LM predictions per sequence.") + +flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.") + +flags.DEFINE_integer( + "dupe_factor", 10, + "Number of times to duplicate the input data (with different masks).") + +flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.") + +flags.DEFINE_float( + "short_seq_prob", 0.1, + "Probability of creating sequences which are shorter than the " + "maximum length.") + + +class TrainingInstance(object): + """A single training instance (sentence pair).""" + + def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, + is_random_next): + self.tokens = tokens + self.segment_ids = segment_ids + self.is_random_next = is_random_next + self.masked_lm_positions = masked_lm_positions + self.masked_lm_labels = masked_lm_labels + + def __str__(self): + s = "" + s += "tokens: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.tokens])) + s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) + s += "is_random_next: %s\n" % self.is_random_next + s += "masked_lm_positions: %s\n" % (" ".join( + [str(x) for x in self.masked_lm_positions])) + s += "masked_lm_labels: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.masked_lm_labels])) + s += "\n" + return s + + def __repr__(self): + return self.__str__() + + +def write_instance_to_example_files(instances, tokenizer, max_seq_length, + max_predictions_per_seq, output_files, + gzip_compress): + """Create TF example files from `TrainingInstance`s.""" + writers = [] + for output_file in output_files: + writers.append( + tf.io.TFRecordWriter( + output_file, options="GZIP" if gzip_compress else "")) + + writer_index = 0 + + total_written = 0 + for (inst_index, instance) in enumerate(instances): + input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) + input_mask = [1] * len(input_ids) + segment_ids = list(instance.segment_ids) + assert len(input_ids) <= max_seq_length + + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + masked_lm_positions = list(instance.masked_lm_positions) + masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) + masked_lm_weights = [1.0] * len(masked_lm_ids) + + while len(masked_lm_positions) < max_predictions_per_seq: + masked_lm_positions.append(0) + masked_lm_ids.append(0) + masked_lm_weights.append(0.0) + + next_sentence_label = 1 if instance.is_random_next else 0 + + features = collections.OrderedDict() + features["input_ids"] = create_int_feature(input_ids) + features["input_mask"] = create_int_feature(input_mask) + features["segment_ids"] = create_int_feature(segment_ids) + features["masked_lm_positions"] = create_int_feature(masked_lm_positions) + features["masked_lm_ids"] = create_int_feature(masked_lm_ids) + features["masked_lm_weights"] = create_float_feature(masked_lm_weights) + features["next_sentence_labels"] = create_int_feature([next_sentence_label]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + + writers[writer_index].write(tf_example.SerializeToString()) + writer_index = (writer_index + 1) % len(writers) + + total_written += 1 + + if inst_index < 20: + logging.info("*** Example ***") + logging.info("tokens: %s", " ".join( + [tokenization.printable_text(x) for x in instance.tokens])) + + for feature_name in features.keys(): + feature = features[feature_name] + values = [] + if feature.int64_list.value: + values = feature.int64_list.value + elif feature.float_list.value: + values = feature.float_list.value + logging.info("%s: %s", feature_name, " ".join([str(x) for x in values])) + + for writer in writers: + writer.close() + + logging.info("Wrote %d total instances", total_written) + + +def create_int_feature(values): + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return feature + + +def create_float_feature(values): + feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return feature + + +def create_training_instances(input_files, + tokenizer, + max_seq_length, + dupe_factor, + short_seq_prob, + masked_lm_prob, + max_predictions_per_seq, + rng, + do_whole_word_mask=False): + """Create `TrainingInstance`s from raw text.""" + all_documents = [[]] + + # Input file format: + # (1) One sentence per line. These should ideally be actual sentences, not + # entire paragraphs or arbitrary spans of text. (Because we use the + # sentence boundaries for the "next sentence prediction" task). + # (2) Blank lines between documents. Document boundaries are needed so + # that the "next sentence prediction" task doesn't span between documents. + for input_file in input_files: + with tf.io.gfile.GFile(input_file, "rb") as reader: + while True: + line = tokenization.convert_to_unicode(reader.readline()) + if not line: + break + line = line.strip() + + # Empty lines are used as document delimiters + if not line: + all_documents.append([]) + tokens = tokenizer.tokenize(line) + if tokens: + all_documents[-1].append(tokens) + + # Remove empty documents + all_documents = [x for x in all_documents if x] + rng.shuffle(all_documents) + + vocab_words = list(tokenizer.vocab.keys()) + instances = [] + for _ in range(dupe_factor): + for document_index in range(len(all_documents)): + instances.extend( + create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng, + do_whole_word_mask)) + + rng.shuffle(instances) + return instances + + +def create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng, + do_whole_word_mask=False): + """Creates `TrainingInstance`s for a single document.""" + document = all_documents[document_index] + + # Account for [CLS], [SEP], [SEP] + max_num_tokens = max_seq_length - 3 + + # We *usually* want to fill up the entire sequence since we are padding + # to `max_seq_length` anyways, so short sequences are generally wasted + # computation. However, we *sometimes* + # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter + # sequences to minimize the mismatch between pre-training and fine-tuning. + # The `target_seq_length` is just a rough target however, whereas + # `max_seq_length` is a hard limit. + target_seq_length = max_num_tokens + if rng.random() < short_seq_prob: + target_seq_length = rng.randint(2, max_num_tokens) + + # We DON'T just concatenate all of the tokens from a document into a long + # sequence and choose an arbitrary split point because this would make the + # next sentence prediction task too easy. Instead, we split the input into + # segments "A" and "B" based on the actual "sentences" provided by the user + # input. + instances = [] + current_chunk = [] + current_length = 0 + i = 0 + while i < len(document): + segment = document[i] + current_chunk.append(segment) + current_length += len(segment) + if i == len(document) - 1 or current_length >= target_seq_length: + if current_chunk: + # `a_end` is how many segments from `current_chunk` go into the `A` + # (first) sentence. + a_end = 1 + if len(current_chunk) >= 2: + a_end = rng.randint(1, len(current_chunk) - 1) + + tokens_a = [] + for j in range(a_end): + tokens_a.extend(current_chunk[j]) + + tokens_b = [] + # Random next + is_random_next = False + if len(current_chunk) == 1 or rng.random() < 0.5: + is_random_next = True + target_b_length = target_seq_length - len(tokens_a) + + # This should rarely go for more than one iteration for large + # corpora. However, just to be careful, we try to make sure that + # the random document is not the same as the document + # we're processing. + for _ in range(10): + random_document_index = rng.randint(0, len(all_documents) - 1) + if random_document_index != document_index: + break + + random_document = all_documents[random_document_index] + random_start = rng.randint(0, len(random_document) - 1) + for j in range(random_start, len(random_document)): + tokens_b.extend(random_document[j]) + if len(tokens_b) >= target_b_length: + break + # We didn't actually use these segments so we "put them back" so + # they don't go to waste. + num_unused_segments = len(current_chunk) - a_end + i -= num_unused_segments + # Actual next + else: + is_random_next = False + for j in range(a_end, len(current_chunk)): + tokens_b.extend(current_chunk[j]) + truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) + + assert len(tokens_a) >= 1 + assert len(tokens_b) >= 1 + + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + + tokens.append("[SEP]") + segment_ids.append(0) + + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + (tokens, masked_lm_positions, + masked_lm_labels) = create_masked_lm_predictions( + tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng, + do_whole_word_mask) + instance = TrainingInstance( + tokens=tokens, + segment_ids=segment_ids, + is_random_next=is_random_next, + masked_lm_positions=masked_lm_positions, + masked_lm_labels=masked_lm_labels) + instances.append(instance) + current_chunk = [] + current_length = 0 + i += 1 + + return instances + + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + + +def create_masked_lm_predictions(tokens, masked_lm_prob, + max_predictions_per_seq, vocab_words, rng, + do_whole_word_mask): + """Creates the predictions for the masked LM objective.""" + + cand_indexes = [] + for (i, token) in enumerate(tokens): + if token == "[CLS]" or token == "[SEP]": + continue + # Whole Word Masking means that if we mask all of the wordpieces + # corresponding to an original word. When a word has been split into + # WordPieces, the first token does not have any marker and any subsequence + # tokens are prefixed with ##. So whenever we see the ## token, we + # append it to the previous set of word indexes. + # + # Note that Whole Word Masking does *not* change the training code + # at all -- we still predict each WordPiece independently, softmaxed + # over the entire vocabulary. + if (do_whole_word_mask and len(cand_indexes) >= 1 and + token.startswith("##")): + cand_indexes[-1].append(i) + else: + cand_indexes.append([i]) + + rng.shuffle(cand_indexes) + + output_tokens = list(tokens) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + masked_lms = [] + covered_indexes = set() + for index_set in cand_indexes: + if len(masked_lms) >= num_to_predict: + break + # If adding a whole-word mask would exceed the maximum number of + # predictions, then just skip this candidate. + if len(masked_lms) + len(index_set) > num_to_predict: + continue + is_any_index_covered = False + for index in index_set: + if index in covered_indexes: + is_any_index_covered = True + break + if is_any_index_covered: + continue + for index in index_set: + covered_indexes.add(index) + + masked_token = None + # 80% of the time, replace with [MASK] + if rng.random() < 0.8: + masked_token = "[MASK]" + else: + # 10% of the time, keep original + if rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] + + output_tokens[index] = masked_token + + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + assert len(masked_lms) <= num_to_predict + masked_lms = sorted(masked_lms, key=lambda x: x.index) + + masked_lm_positions = [] + masked_lm_labels = [] + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + + return (output_tokens, masked_lm_positions, masked_lm_labels) + + +def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): + """Truncates a pair of sequences to a maximum sequence length.""" + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_num_tokens: + break + + trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b + assert len(trunc_tokens) >= 1 + + # We want to sometimes truncate from the front and sometimes from the + # back to add more randomness and avoid biases. + if rng.random() < 0.5: + del trunc_tokens[0] + else: + trunc_tokens.pop() + + +def main(_): + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + + input_files = [] + for input_pattern in FLAGS.input_file.split(","): + input_files.extend(tf.io.gfile.glob(input_pattern)) + + logging.info("*** Reading from input files ***") + for input_file in input_files: + logging.info(" %s", input_file) + + rng = random.Random(FLAGS.random_seed) + instances = create_training_instances( + input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor, + FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, + rng, FLAGS.do_whole_word_mask) + + output_files = FLAGS.output_file.split(",") + logging.info("*** Writing to output files ***") + for output_file in output_files: + logging.info(" %s", output_file) + + write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, + FLAGS.max_predictions_per_seq, output_files, + FLAGS.gzip_compress) + + +if __name__ == "__main__": + flags.mark_flag_as_required("input_file") + flags.mark_flag_as_required("output_file") + flags.mark_flag_as_required("vocab_file") + app.run(main) diff --git a/models/official/nlp/data/pretrain_dataloader.py b/models/official/nlp/data/pretrain_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..18325090caa6d83e68b4077aac4a27ee69bea938 --- /dev/null +++ b/models/official/nlp/data/pretrain_dataloader.py @@ -0,0 +1,97 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Loads dataset for the BERT pretraining task.""" +from typing import Mapping, Optional + +import tensorflow as tf + +from official.core import input_reader + + +class BertPretrainDataLoader: + """A class to load dataset for bert pretraining task.""" + + def __init__(self, params): + """Inits `BertPretrainDataLoader` class. + + Args: + params: A `BertPretrainDataConfig` object. + """ + self._params = params + self._seq_length = params.seq_length + self._max_predictions_per_seq = params.max_predictions_per_seq + self._use_next_sentence_label = params.use_next_sentence_label + self._use_position_id = params.use_position_id + + def _decode(self, record: tf.Tensor): + """Decodes a serialized tf.Example.""" + name_to_features = { + 'input_ids': + tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'input_mask': + tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'segment_ids': + tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'masked_lm_positions': + tf.io.FixedLenFeature([self._max_predictions_per_seq], tf.int64), + 'masked_lm_ids': + tf.io.FixedLenFeature([self._max_predictions_per_seq], tf.int64), + 'masked_lm_weights': + tf.io.FixedLenFeature([self._max_predictions_per_seq], tf.float32), + } + if self._use_next_sentence_label: + name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1], + tf.int64) + if self._use_position_id: + name_to_features['position_ids'] = tf.io.FixedLenFeature( + [self._seq_length], tf.int64) + + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = t + + return example + + def _parse(self, record: Mapping[str, tf.Tensor]): + """Parses raw tensors into a dict of tensors to be consumed by the model.""" + x = { + 'input_word_ids': record['input_ids'], + 'input_mask': record['input_mask'], + 'input_type_ids': record['segment_ids'], + 'masked_lm_positions': record['masked_lm_positions'], + 'masked_lm_ids': record['masked_lm_ids'], + 'masked_lm_weights': record['masked_lm_weights'], + } + if self._use_next_sentence_label: + x['next_sentence_labels'] = record['next_sentence_labels'] + if self._use_position_id: + x['position_ids'] = record['position_ids'] + + return x + + def load(self, input_context: Optional[tf.distribute.InputContext] = None): + """Returns a tf.dataset.Dataset.""" + reader = input_reader.InputReader( + params=self._params, + decoder_fn=self._decode, + parser_fn=self._parse) + return reader.read(input_context) diff --git a/models/official/nlp/data/sentence_prediction_dataloader.py b/models/official/nlp/data/sentence_prediction_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..60dd788403725aeeca2028b237c3330bbf22716c --- /dev/null +++ b/models/official/nlp/data/sentence_prediction_dataloader.py @@ -0,0 +1,64 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Loads dataset for the sentence prediction (classification) task.""" +from typing import Mapping, Optional +import tensorflow as tf + +from official.core import input_reader + + +class SentencePredictionDataLoader: + """A class to load dataset for sentence prediction (classification) task.""" + + def __init__(self, params): + self._params = params + self._seq_length = params.seq_length + + def _decode(self, record: tf.Tensor): + """Decodes a serialized tf.Example.""" + name_to_features = { + 'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'label_ids': tf.io.FixedLenFeature([], tf.int64), + } + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in example: + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = t + + return example + + def _parse(self, record: Mapping[str, tf.Tensor]): + """Parses raw tensors into a dict of tensors to be consumed by the model.""" + x = { + 'input_word_ids': record['input_ids'], + 'input_mask': record['input_mask'], + 'input_type_ids': record['segment_ids'] + } + y = record['label_ids'] + return (x, y) + + def load(self, input_context: Optional[tf.distribute.InputContext] = None): + """Returns a tf.dataset.Dataset.""" + reader = input_reader.InputReader( + params=self._params, decoder_fn=self._decode, parser_fn=self._parse) + return reader.read(input_context) diff --git a/models/official/nlp/data/sentence_retrieval_lib.py b/models/official/nlp/data/sentence_retrieval_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e83ae579f8221b93e790ea62b91c3d6d2b9e90 --- /dev/null +++ b/models/official/nlp/data/sentence_retrieval_lib.py @@ -0,0 +1,168 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BERT library to process data for cross lingual sentence retrieval task.""" + +import os + +from absl import logging +from official.nlp.bert import tokenization +from official.nlp.data import classifier_data_lib + + +class BuccProcessor(classifier_data_lib.DataProcessor): + """Procssor for Xtreme BUCC data set.""" + supported_languages = ["de", "fr", "ru", "zh"] + + def __init__(self, + process_text_fn=tokenization.convert_to_unicode): + super(BuccProcessor, self).__init__(process_text_fn) + self.languages = BuccProcessor.supported_languages + + def get_dev_examples(self, data_dir, file_pattern): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, file_pattern.format("dev"))), + "sample") + + def get_test_examples(self, data_dir, file_pattern): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, file_pattern.format("test"))), + "test") + + @staticmethod + def get_processor_name(): + """See base class.""" + return "BUCC" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + guid = "%s-%s" % (set_type, i) + int_iden = int(line[0].split("-")[1]) + text_a = self.process_text_fn(line[1]) + examples.append( + classifier_data_lib.InputExample( + guid=guid, text_a=text_a, int_iden=int_iden)) + return examples + + +class TatoebaProcessor(classifier_data_lib.DataProcessor): + """Procssor for Xtreme Tatoeba data set.""" + supported_languages = [ + "af", "ar", "bg", "bn", "de", "el", "es", "et", "eu", "fa", "fi", "fr", + "he", "hi", "hu", "id", "it", "ja", "jv", "ka", "kk", "ko", "ml", "mr", + "nl", "pt", "ru", "sw", "ta", "te", "th", "tl", "tr", "ur", "vi", "zh" + ] + + def __init__(self, + process_text_fn=tokenization.convert_to_unicode): + super(TatoebaProcessor, self).__init__(process_text_fn) + self.languages = TatoebaProcessor.supported_languages + + def get_test_examples(self, data_dir, file_path): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, file_path)), "test") + + @staticmethod + def get_processor_name(): + """See base class.""" + return "TATOEBA" + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + guid = "%s-%s" % (set_type, i) + text_a = self.process_text_fn(line[0]) + examples.append( + classifier_data_lib.InputExample( + guid=guid, text_a=text_a, int_iden=i)) + return examples + + +def generate_sentence_retrevial_tf_record(processor, + data_dir, + tokenizer, + eval_data_output_path=None, + test_data_output_path=None, + max_seq_length=128): + """Generates the tf records for retrieval tasks. + + Args: + processor: Input processor object to be used for generating data. Subclass + of `DataProcessor`. + data_dir: Directory that contains train/eval data to process. Data files + should be in from. + tokenizer: The tokenizer to be applied on the data. + eval_data_output_path: Output to which processed tf record for evaluation + will be saved. + test_data_output_path: Output to which processed tf record for testing + will be saved. Must be a pattern template with {} if processor has + language specific test data. + max_seq_length: Maximum sequence length of the to be generated + training/eval data. + + Returns: + A dictionary containing input meta data. + """ + assert eval_data_output_path or test_data_output_path + + if processor.get_processor_name() == "BUCC": + path_pattern = "{}-en.{{}}.{}" + + if processor.get_processor_name() == "TATOEBA": + path_pattern = "{}-en.{}" + + meta_data = { + "processor_type": processor.get_processor_name(), + "max_seq_length": max_seq_length, + "number_eval_data": {}, + "number_test_data": {}, + } + logging.info("Start to process %s task data", processor.get_processor_name()) + + for lang_a in processor.languages: + for lang_b in [lang_a, "en"]: + if eval_data_output_path: + eval_input_data_examples = processor.get_dev_examples( + data_dir, os.path.join(path_pattern.format(lang_a, lang_b))) + + num_eval_data = len(eval_input_data_examples) + logging.info("Processing %d dev examples of %s-en.%s", num_eval_data, + lang_a, lang_b) + output_file = os.path.join( + eval_data_output_path, + "{}-en-{}.{}.tfrecords".format(lang_a, lang_b, "dev")) + classifier_data_lib.file_based_convert_examples_to_features( + eval_input_data_examples, None, max_seq_length, tokenizer, + output_file, None) + meta_data["number_eval_data"][f"{lang_a}-en.{lang_b}"] = num_eval_data + + if test_data_output_path: + test_input_data_examples = processor.get_test_examples( + data_dir, os.path.join(path_pattern.format(lang_a, lang_b))) + + num_test_data = len(test_input_data_examples) + logging.info("Processing %d test examples of %s-en.%s", num_test_data, + lang_a, lang_b) + output_file = os.path.join( + test_data_output_path, + "{}-en-{}.{}.tfrecords".format(lang_a, lang_b, "test")) + classifier_data_lib.file_based_convert_examples_to_features( + test_input_data_examples, None, max_seq_length, tokenizer, + output_file, None) + meta_data["number_test_data"][f"{lang_a}-en.{lang_b}"] = num_test_data + + return meta_data diff --git a/models/official/nlp/data/squad_lib.py b/models/official/nlp/data/squad_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf4c604123c541e7830ffa7176b182a843eef58 --- /dev/null +++ b/models/official/nlp/data/squad_lib.py @@ -0,0 +1,898 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library to process data for SQuAD 1.1 and SQuAD 2.0.""" + +# pylint: disable=g-bad-import-order +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import copy +import json +import math +import os +import six + +from absl import logging +import tensorflow as tf + +from official.nlp.bert import tokenization + + +class SquadExample(object): + """A single training/test example for simple sequence classification. + + For examples without an answer, the start and end position are -1. + + Attributes: + qas_id: ID of the question-answer pair. + question_text: Original text for the question. + doc_tokens: The list of tokens in the context obtained by splitting + on whitespace only. + orig_answer_text: Original text for the answer. + start_position: Starting index of the answer in `doc_tokens`. + end_position: Ending index of the answer in `doc_tokens`. + is_impossible: Whether the question is impossible to answer given the + context. Only used in SQuAD 2.0. + """ + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + tokenization.printable_text(self.question_text)) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", end_position: %d" % (self.end_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +class FeatureWriter(object): + """Writes InputFeature to TF example file.""" + + def __init__(self, filename, is_training): + self.filename = filename + self.is_training = is_training + self.num_features = 0 + tf.io.gfile.makedirs(os.path.dirname(filename)) + self._writer = tf.io.TFRecordWriter(filename) + + def process_feature(self, feature): + """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" + self.num_features += 1 + + def create_int_feature(values): + feature = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values))) + return feature + + features = collections.OrderedDict() + features["unique_ids"] = create_int_feature([feature.unique_id]) + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_int_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + + if self.is_training: + features["start_positions"] = create_int_feature([feature.start_position]) + features["end_positions"] = create_int_feature([feature.end_position]) + impossible = 0 + if feature.is_impossible: + impossible = 1 + features["is_impossible"] = create_int_feature([impossible]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + self._writer.write(tf_example.SerializeToString()) + + def close(self): + self._writer.close() + + +def read_squad_examples(input_file, is_training, version_2_with_negative): + """Read a SQuAD json file into a list of SquadExample.""" + with tf.io.gfile.GFile(input_file, "r") as reader: + input_data = json.load(reader)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + end_position = None + orig_answer_text = None + is_impossible = False + if is_training: + + if version_2_with_negative: + is_impossible = qa["is_impossible"] + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + answer_offset = answer["answer_start"] + answer_length = len(orig_answer_text) + start_position = char_to_word_offset[answer_offset] + end_position = char_to_word_offset[answer_offset + answer_length - + 1] + # Only add answers where the text can be exactly recovered from the + # document. If this CAN'T happen it's likely due to weird Unicode + # stuff so we will just skip the example. + # + # Note that this means for training mode, every example is NOT + # guaranteed to be preserved. + actual_text = " ".join( + doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join( + tokenization.whitespace_tokenize(orig_answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + logging.warning("Could not find answer: '%s' vs. '%s'", + actual_text, cleaned_answer_text) + continue + else: + start_position = -1 + end_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + is_impossible=is_impossible) + examples.append(example) + + return examples + + +def convert_examples_to_features(examples, + tokenizer, + max_seq_length, + doc_stride, + max_query_length, + is_training, + output_fn, + batch_size=None): + """Loads a data file into a list of `InputBatch`s.""" + + base_id = 1000000000 + unique_id = base_id + feature = None + for (example_index, example) in enumerate(examples): + query_tokens = tokenizer.tokenize(example.question_text) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if is_training and not example.is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and + tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + start_position = 0 + end_position = 0 + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and example.is_impossible: + start_position = 0 + end_position = 0 + + if example_index < 20: + logging.info("*** Example ***") + logging.info("unique_id: %s", (unique_id)) + logging.info("example_index: %s", (example_index)) + logging.info("doc_span_index: %s", (doc_span_index)) + logging.info("tokens: %s", + " ".join([tokenization.printable_text(x) for x in tokens])) + logging.info( + "token_to_orig_map: %s", " ".join([ + "%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map) + ])) + logging.info( + "token_is_max_context: %s", " ".join([ + "%d:%s" % (x, y) + for (x, y) in six.iteritems(token_is_max_context) + ])) + logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) + logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) + logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) + if is_training and example.is_impossible: + logging.info("impossible example") + if is_training and not example.is_impossible: + answer_text = " ".join(tokens[start_position:(end_position + 1)]) + logging.info("start_position: %d", (start_position)) + logging.info("end_position: %d", (end_position)) + logging.info("answer: %s", tokenization.printable_text(answer_text)) + + feature = InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + start_position=start_position, + end_position=end_position, + is_impossible=example.is_impossible) + + # Run callback + if is_training: + output_fn(feature) + else: + output_fn(feature, is_padding=False) + + unique_id += 1 + + if not is_training and feature: + assert batch_size + num_padding = 0 + num_examples = unique_id - base_id + if unique_id % batch_size != 0: + num_padding = batch_size - (num_examples % batch_size) + logging.info("Adding padding examples to make sure no partial batch.") + logging.info("Adds %d padding examples for inference.", num_padding) + dummy_feature = copy.deepcopy(feature) + for _ in range(num_padding): + dummy_feature.unique_id = unique_id + + # Run callback + output_fn(feature, is_padding=True) + unique_id += 1 + return unique_id - base_id + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + + # The SQuAD annotations are character based. We first project them to + # whitespace-tokenized words. But then after WordPiece tokenization, we can + # often find a "better match". For example: + # + # Question: What year was John Smith born? + # Context: The leader was John Smith (1895-1943). + # Answer: 1895 + # + # The original whitespace-tokenized answer will be "(1895-1943).". However + # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match + # the exact answer, 1895. + # + # However, this is not always possible. Consider the following: + # + # Question: What country is the top exporter of electornics? + # Context: The Japanese electronics industry is the lagest in the world. + # Answer: Japan + # + # In this case, the annotator chose "Japan" as a character sub-span of + # the word "Japanese". Since our WordPiece tokenizer does not split + # "Japanese", we just use "Japanese" as the annotation. This is fairly rare + # in SQuAD, but does happen. + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def write_predictions(all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + do_lower_case, + output_prediction_file, + output_nbest_file, + output_null_log_odds_file, + version_2_with_negative=False, + null_score_diff_threshold=0.0, + verbose=False): + """Write final predictions to the json file and log-odds of null if needed.""" + logging.info("Writing predictions to: %s", (output_prediction_file)) + logging.info("Writing nbest to: %s", (output_nbest_file)) + + all_predictions, all_nbest_json, scores_diff_json = ( + postprocess_output(all_examples=all_examples, + all_features=all_features, + all_results=all_results, + n_best_size=n_best_size, + max_answer_length=max_answer_length, + do_lower_case=do_lower_case, + version_2_with_negative=version_2_with_negative, + null_score_diff_threshold=null_score_diff_threshold, + verbose=verbose)) + + write_to_json_files(all_predictions, output_prediction_file) + write_to_json_files(all_nbest_json, output_nbest_file) + if version_2_with_negative: + write_to_json_files(scores_diff_json, output_null_log_odds_file) + + +def postprocess_output(all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + do_lower_case, + version_2_with_negative=False, + null_score_diff_threshold=0.0, + verbose=False): + """Postprocess model output, to form predicton results.""" + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min mull score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=0, + end_index=0, + start_logit=null_start_logit, + end_logit=null_end_logit)) + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text( + tok_text, orig_text, do_lower_case, verbose=verbose) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + + # if we didn't inlude the empty option in the n-best, inlcude it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append( + _NbestPrediction( + text="", start_logit=null_start_logit, + end_logit=null_end_logit)) + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + # pytype: disable=attribute-error + # predict "" iff the null score - the score of best non-null > threshold + if best_non_null_entry is not None: + score_diff = score_null - best_non_null_entry.start_logit - ( + best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > null_score_diff_threshold: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + else: + logging.warning("best_non_null_entry is None") + scores_diff_json[example.qas_id] = score_null + all_predictions[example.qas_id] = "" + # pytype: enable=attribute-error + + all_nbest_json[example.qas_id] = nbest_json + + return all_predictions, all_nbest_json, scores_diff_json + + +def write_to_json_files(json_records, json_file): + with tf.io.gfile.GFile(json_file, "w") as writer: + writer.write(json.dumps(json_records, indent=4) + "\n") + + +def get_final_text(pred_text, orig_text, do_lower_case, verbose=False): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the SQuAD eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heruistic between + # `pred_text` and `orig_text` to get a character-to-charcter alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if verbose: + logging.info("Unable to find text: '%s' in '%s'", pred_text, orig_text) + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if verbose: + logging.info("Length not equal after stripping spaces: '%s' vs '%s'", + orig_ns_text, tok_ns_text) + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in six.iteritems(tok_ns_to_s_map): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if verbose: + logging.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if verbose: + logging.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): # pylint: disable=consider-using-enumerate + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +def generate_tf_record_from_json_file(input_file_path, + vocab_file_path, + output_path, + max_seq_length=384, + do_lower_case=True, + max_query_length=64, + doc_stride=128, + version_2_with_negative=False): + """Generates and saves training data into a tf record file.""" + train_examples = read_squad_examples( + input_file=input_file_path, + is_training=True, + version_2_with_negative=version_2_with_negative) + tokenizer = tokenization.FullTokenizer( + vocab_file=vocab_file_path, do_lower_case=do_lower_case) + train_writer = FeatureWriter(filename=output_path, is_training=True) + number_of_examples = convert_examples_to_features( + examples=train_examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + is_training=True, + output_fn=train_writer.process_feature) + train_writer.close() + + meta_data = { + "task_type": "bert_squad", + "train_data_size": number_of_examples, + "max_seq_length": max_seq_length, + "max_query_length": max_query_length, + "doc_stride": doc_stride, + "version_2_with_negative": version_2_with_negative, + } + + return meta_data diff --git a/models/official/nlp/data/squad_lib_sp.py b/models/official/nlp/data/squad_lib_sp.py new file mode 100644 index 0000000000000000000000000000000000000000..c65f713fd09bc4858f77f8ce823b17467606271c --- /dev/null +++ b/models/official/nlp/data/squad_lib_sp.py @@ -0,0 +1,892 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run ALBERT on SQuAD 1.1 and SQuAD 2.0 using sentence piece tokenization. + +The file is forked from: + +https://github.com/google-research/ALBERT/blob/master/run_squad_sp.py +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import copy +import json +import math +import os +from absl import logging +import numpy as np +import tensorflow as tf + +from official.nlp.bert import tokenization + + +class SquadExample(object): + """A single training/test example for simple sequence classification. + + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + paragraph_text, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.paragraph_text = paragraph_text + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + tokenization.printable_text(self.question_text)) + s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", end_position: %d" % (self.end_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tok_start_to_orig_index, + tok_end_to_orig_index, + token_is_max_context, + tokens, + input_ids, + input_mask, + segment_ids, + paragraph_len, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tok_start_to_orig_index = tok_start_to_orig_index + self.tok_end_to_orig_index = tok_end_to_orig_index + self.token_is_max_context = token_is_max_context + self.tokens = tokens + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.paragraph_len = paragraph_len + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +def read_squad_examples(input_file, is_training, version_2_with_negative): + """Read a SQuAD json file into a list of SquadExample.""" + del version_2_with_negative + with tf.io.gfile.GFile(input_file, "r") as reader: + input_data = json.load(reader)["data"] + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + orig_answer_text = None + is_impossible = False + + if is_training: + is_impossible = qa.get("is_impossible", False) + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + start_position = answer["answer_start"] + else: + start_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + paragraph_text=paragraph_text, + orig_answer_text=orig_answer_text, + start_position=start_position, + is_impossible=is_impossible) + examples.append(example) + + return examples + + +def _convert_index(index, pos, m=None, is_start=True): + """Converts index.""" + if index[pos] is not None: + return index[pos] + n = len(index) + rear = pos + while rear < n - 1 and index[rear] is None: + rear += 1 + front = pos + while front > 0 and index[front] is None: + front -= 1 + assert index[front] is not None or index[rear] is not None + if index[front] is None: + if index[rear] >= 1: + if is_start: + return 0 + else: + return index[rear] - 1 + return index[rear] + if index[rear] is None: + if m is not None and index[front] < m - 1: + if is_start: + return index[front] + 1 + else: + return m - 1 + return index[front] + if is_start: + if index[rear] > index[front] + 1: + return index[front] + 1 + else: + return index[rear] + else: + if index[rear] > index[front] + 1: + return index[rear] - 1 + else: + return index[front] + + +def convert_examples_to_features(examples, + tokenizer, + max_seq_length, + doc_stride, + max_query_length, + is_training, + output_fn, + do_lower_case, + batch_size=None): + """Loads a data file into a list of `InputBatch`s.""" + cnt_pos, cnt_neg = 0, 0 + base_id = 1000000000 + unique_id = base_id + max_n, max_m = 1024, 1024 + f = np.zeros((max_n, max_m), dtype=np.float32) + + for (example_index, example) in enumerate(examples): + + if example_index % 100 == 0: + logging.info("Converting %d/%d pos %d neg %d", example_index, + len(examples), cnt_pos, cnt_neg) + + query_tokens = tokenization.encode_ids( + tokenizer.sp_model, + tokenization.preprocess_text( + example.question_text, lower=do_lower_case)) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + paragraph_text = example.paragraph_text + para_tokens = tokenization.encode_pieces( + tokenizer.sp_model, + tokenization.preprocess_text( + example.paragraph_text, lower=do_lower_case)) + + chartok_to_tok_index = [] + tok_start_to_chartok_index = [] + tok_end_to_chartok_index = [] + char_cnt = 0 + for i, token in enumerate(para_tokens): + new_token = token.replace(tokenization.SPIECE_UNDERLINE, " ") + chartok_to_tok_index.extend([i] * len(new_token)) + tok_start_to_chartok_index.append(char_cnt) + char_cnt += len(new_token) + tok_end_to_chartok_index.append(char_cnt - 1) + + tok_cat_text = "".join(para_tokens).replace(tokenization.SPIECE_UNDERLINE, + " ") + n, m = len(paragraph_text), len(tok_cat_text) + + if n > max_n or m > max_m: + max_n = max(n, max_n) + max_m = max(m, max_m) + f = np.zeros((max_n, max_m), dtype=np.float32) + + g = {} + # pylint: disable=cell-var-from-loop + def _lcs_match(max_dist, n=n, m=m): + """Longest-common-substring algorithm.""" + f.fill(0) + g.clear() + + ### longest common sub sequence + # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) + for i in range(n): + + # unlike standard LCS, this is specifically optimized for the setting + # because the mismatch between sentence pieces and original text will + # be small + for j in range(i - max_dist, i + max_dist): + if j >= m or j < 0: + continue + + if i > 0: + g[(i, j)] = 0 + f[i, j] = f[i - 1, j] + + if j > 0 and f[i, j - 1] > f[i, j]: + g[(i, j)] = 1 + f[i, j] = f[i, j - 1] + + f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 + if (tokenization.preprocess_text( + paragraph_text[i], lower=do_lower_case, + remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]): + g[(i, j)] = 2 + f[i, j] = f_prev + 1 + # pylint: enable=cell-var-from-loop + + max_dist = abs(n - m) + 5 + for _ in range(2): + _lcs_match(max_dist) + if f[n - 1, m - 1] > 0.8 * n: + break + max_dist *= 2 + + orig_to_chartok_index = [None] * n + chartok_to_orig_index = [None] * m + i, j = n - 1, m - 1 + while i >= 0 and j >= 0: + if (i, j) not in g: + break + if g[(i, j)] == 2: + orig_to_chartok_index[i] = j + chartok_to_orig_index[j] = i + i, j = i - 1, j - 1 + elif g[(i, j)] == 1: + j = j - 1 + else: + i = i - 1 + + if (all(v is None for v in orig_to_chartok_index) or + f[n - 1, m - 1] < 0.8 * n): + logging.info("MISMATCH DETECTED!") + continue + + tok_start_to_orig_index = [] + tok_end_to_orig_index = [] + for i in range(len(para_tokens)): + start_chartok_pos = tok_start_to_chartok_index[i] + end_chartok_pos = tok_end_to_chartok_index[i] + start_orig_pos = _convert_index( + chartok_to_orig_index, start_chartok_pos, n, is_start=True) + end_orig_pos = _convert_index( + chartok_to_orig_index, end_chartok_pos, n, is_start=False) + + tok_start_to_orig_index.append(start_orig_pos) + tok_end_to_orig_index.append(end_orig_pos) + + if not is_training: + tok_start_position = tok_end_position = None + + if is_training and example.is_impossible: + tok_start_position = 0 + tok_end_position = 0 + + if is_training and not example.is_impossible: + start_position = example.start_position + end_position = start_position + len(example.orig_answer_text) - 1 + + start_chartok_pos = _convert_index( + orig_to_chartok_index, start_position, is_start=True) + tok_start_position = chartok_to_tok_index[start_chartok_pos] + + end_chartok_pos = _convert_index( + orig_to_chartok_index, end_position, is_start=False) + tok_end_position = chartok_to_tok_index[end_chartok_pos] + assert tok_start_position <= tok_end_position + + def _piece_to_id(x): + return tokenizer.sp_model.PieceToId(x) + + all_doc_tokens = list(map(_piece_to_id, para_tokens)) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_is_max_context = {} + segment_ids = [] + + cur_tok_start_to_orig_index = [] + cur_tok_end_to_orig_index = [] + + tokens.append(tokenizer.sp_model.PieceToId("[CLS]")) + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append(tokenizer.sp_model.PieceToId("[SEP]")) + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + + cur_tok_start_to_orig_index.append( + tok_start_to_orig_index[split_token_index]) + cur_tok_end_to_orig_index.append( + tok_end_to_orig_index[split_token_index]) + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append(tokenizer.sp_model.PieceToId("[SEP]")) + segment_ids.append(1) + + paragraph_len = len(tokens) + input_ids = tokens + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + span_is_impossible = example.is_impossible + start_position = None + end_position = None + if is_training and not span_is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and + tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + # continue + start_position = 0 + end_position = 0 + span_is_impossible = True + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and span_is_impossible: + start_position = 0 + end_position = 0 + + if example_index < 20: + logging.info("*** Example ***") + logging.info("unique_id: %s", (unique_id)) + logging.info("example_index: %s", (example_index)) + logging.info("doc_span_index: %s", (doc_span_index)) + logging.info("tok_start_to_orig_index: %s", + " ".join([str(x) for x in cur_tok_start_to_orig_index])) + logging.info("tok_end_to_orig_index: %s", + " ".join([str(x) for x in cur_tok_end_to_orig_index])) + logging.info( + "token_is_max_context: %s", " ".join( + ["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()])) + logging.info( + "input_pieces: %s", + " ".join([tokenizer.sp_model.IdToPiece(x) for x in tokens])) + logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) + logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) + logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) + + if is_training and span_is_impossible: + logging.info("impossible example span") + + if is_training and not span_is_impossible: + pieces = [ + tokenizer.sp_model.IdToPiece(token) + for token in tokens[start_position:(end_position + 1)] + ] + answer_text = tokenizer.sp_model.DecodePieces(pieces) + logging.info("start_position: %d", (start_position)) + logging.info("end_position: %d", (end_position)) + logging.info("answer: %s", (tokenization.printable_text(answer_text))) + + # With multi processing, the example_index is actually the index + # within the current process therefore we use example_index=None + # to avoid being used in the future. + # The current code does not use example_index of training data. + if is_training: + feat_example_index = None + else: + feat_example_index = example_index + + feature = InputFeatures( + unique_id=unique_id, + example_index=feat_example_index, + doc_span_index=doc_span_index, + tok_start_to_orig_index=cur_tok_start_to_orig_index, + tok_end_to_orig_index=cur_tok_end_to_orig_index, + token_is_max_context=token_is_max_context, + tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens], + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + paragraph_len=paragraph_len, + start_position=start_position, + end_position=end_position, + is_impossible=span_is_impossible) + + # Run callback + if is_training: + output_fn(feature) + else: + output_fn(feature, is_padding=False) + + unique_id += 1 + if span_is_impossible: + cnt_neg += 1 + else: + cnt_pos += 1 + + if not is_training and feature: + assert batch_size + num_padding = 0 + num_examples = unique_id - base_id + if unique_id % batch_size != 0: + num_padding = batch_size - (num_examples % batch_size) + dummy_feature = copy.deepcopy(feature) + for _ in range(num_padding): + dummy_feature.unique_id = unique_id + + # Run callback + output_fn(feature, is_padding=True) + unique_id += 1 + + logging.info("Total number of instances: %d = pos %d neg %d", + cnt_pos + cnt_neg, cnt_pos, cnt_neg) + return unique_id - base_id + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def write_predictions(all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + do_lower_case, + output_prediction_file, + output_nbest_file, + output_null_log_odds_file, + version_2_with_negative=False, + null_score_diff_threshold=0.0, + verbose=False): + """Write final predictions to the json file and log-odds of null if needed.""" + logging.info("Writing predictions to: %s", (output_prediction_file)) + logging.info("Writing nbest to: %s", (output_nbest_file)) + + all_predictions, all_nbest_json, scores_diff_json = ( + postprocess_output(all_examples=all_examples, + all_features=all_features, + all_results=all_results, + n_best_size=n_best_size, + max_answer_length=max_answer_length, + do_lower_case=do_lower_case, + version_2_with_negative=version_2_with_negative, + null_score_diff_threshold=null_score_diff_threshold, + verbose=verbose)) + + write_to_json_files(all_predictions, output_prediction_file) + write_to_json_files(all_nbest_json, output_nbest_file) + if version_2_with_negative: + write_to_json_files(scores_diff_json, output_null_log_odds_file) + + +def postprocess_output(all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + do_lower_case, + version_2_with_negative=False, + null_score_diff_threshold=0.0, + verbose=False): + """Postprocess model output, to form predicton results.""" + + del do_lower_case, verbose + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min mull score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + for start_index in start_indexes: + for end_index in end_indexes: + doc_offset = feature.tokens.index("[SEP]") + 1 + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index - doc_offset >= len(feature.tok_start_to_orig_index): + continue + if end_index - doc_offset >= len(feature.tok_end_to_orig_index): + continue + # if start_index not in feature.tok_start_to_orig_index: + # continue + # if end_index not in feature.tok_end_to_orig_index: + # continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index - doc_offset, + end_index=end_index - doc_offset, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=-1, + end_index=-1, + start_logit=null_start_logit, + end_logit=null_end_logit)) + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index >= 0: # this is a non-null prediction + tok_start_to_orig_index = feature.tok_start_to_orig_index + tok_end_to_orig_index = feature.tok_end_to_orig_index + start_orig_pos = tok_start_to_orig_index[pred.start_index] + end_orig_pos = tok_end_to_orig_index[pred.end_index] + + paragraph_text = example.paragraph_text + final_text = paragraph_text[start_orig_pos:end_orig_pos + 1].strip() + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + + # if we didn't inlude the empty option in the n-best, inlcude it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append( + _NbestPrediction( + text="", start_logit=null_start_logit, + end_logit=null_end_logit)) + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + assert best_non_null_entry is not None + # predict "" iff the null score - the score of best non-null > threshold + score_diff = score_null - best_non_null_entry.start_logit - ( + best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > null_score_diff_threshold: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + + all_nbest_json[example.qas_id] = nbest_json + + return all_predictions, all_nbest_json, scores_diff_json + + +def write_to_json_files(json_records, json_file): + with tf.io.gfile.GFile(json_file, "w") as writer: + writer.write(json.dumps(json_records, indent=4) + "\n") + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +class FeatureWriter(object): + """Writes InputFeature to TF example file.""" + + def __init__(self, filename, is_training): + self.filename = filename + self.is_training = is_training + self.num_features = 0 + tf.io.gfile.makedirs(os.path.dirname(filename)) + self._writer = tf.io.TFRecordWriter(filename) + + def process_feature(self, feature): + """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" + self.num_features += 1 + + def create_int_feature(values): + feature = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values))) + return feature + + features = collections.OrderedDict() + features["unique_ids"] = create_int_feature([feature.unique_id]) + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_int_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + + if self.is_training: + features["start_positions"] = create_int_feature([feature.start_position]) + features["end_positions"] = create_int_feature([feature.end_position]) + impossible = 0 + if feature.is_impossible: + impossible = 1 + features["is_impossible"] = create_int_feature([impossible]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + self._writer.write(tf_example.SerializeToString()) + + def close(self): + self._writer.close() + + +def generate_tf_record_from_json_file(input_file_path, + sp_model_file, + output_path, + max_seq_length=384, + do_lower_case=True, + max_query_length=64, + doc_stride=128, + version_2_with_negative=False): + """Generates and saves training data into a tf record file.""" + train_examples = read_squad_examples( + input_file=input_file_path, + is_training=True, + version_2_with_negative=version_2_with_negative) + tokenizer = tokenization.FullSentencePieceTokenizer( + sp_model_file=sp_model_file) + train_writer = FeatureWriter(filename=output_path, is_training=True) + number_of_examples = convert_examples_to_features( + examples=train_examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + is_training=True, + output_fn=train_writer.process_feature, + do_lower_case=do_lower_case) + train_writer.close() + + meta_data = { + "task_type": "bert_squad", + "train_data_size": number_of_examples, + "max_seq_length": max_seq_length, + "max_query_length": max_query_length, + "doc_stride": doc_stride, + "version_2_with_negative": version_2_with_negative, + } + + return meta_data diff --git a/models/official/nlp/data/tagging_data_loader.py b/models/official/nlp/data/tagging_data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..127a5e004023008dbf765aec7bb8bfb7e5f89de1 --- /dev/null +++ b/models/official/nlp/data/tagging_data_loader.py @@ -0,0 +1,64 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Loads dataset for the tagging (e.g., NER/POS) task.""" +from typing import Mapping, Optional +import tensorflow as tf + +from official.core import input_reader + + +class TaggingDataLoader: + """A class to load dataset for tagging (e.g., NER and POS) task.""" + + def __init__(self, params): + self._params = params + self._seq_length = params.seq_length + + def _decode(self, record: tf.Tensor): + """Decodes a serialized tf.Example.""" + name_to_features = { + 'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64), + 'label_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64), + } + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in example: + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = t + + return example + + def _parse(self, record: Mapping[str, tf.Tensor]): + """Parses raw tensors into a dict of tensors to be consumed by the model.""" + x = { + 'input_word_ids': record['input_ids'], + 'input_mask': record['input_mask'], + 'input_type_ids': record['segment_ids'] + } + y = record['label_ids'] + return (x, y) + + def load(self, input_context: Optional[tf.distribute.InputContext] = None): + """Returns a tf.dataset.Dataset.""" + reader = input_reader.InputReader( + params=self._params, decoder_fn=self._decode, parser_fn=self._parse) + return reader.read(input_context) diff --git a/models/official/nlp/modeling/README.md b/models/official/nlp/modeling/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0e74b4637c44ed82392203ad4fb148420c05f18b --- /dev/null +++ b/models/official/nlp/modeling/README.md @@ -0,0 +1,43 @@ +# NLP Modeling Library + +This libary provides a set of Keras primitives (Layers, Networks, and Models) +that can be assembled into transformer-based models. They are +flexible, validated, interoperable, and both TF1 and TF2 compatible. + +* [`layers`](layers) are the fundamental building blocks for NLP models. +They can be used to assemble new layers, networks, or models. + +* [`networks`](networks) are combinations of layers (and possibly other networks). They are sub-units of models that would not be trained alone. They +encapsulate common network structures like a classification head +or a transformer encoder into an easily handled object with a +standardized configuration. + +* [`models`](models) are combinations of layers and networks that would be trained. Pre-built canned models are provided as both convenience functions and canonical examples. + +* [`losses`](losses) contains common loss computation used in NLP tasks. + +Besides the pre-defined primitives, it also provides scaffold classes to allow +easy experimentation with noval achitectures, e.g., you don’t need to fork a whole Transformer object to try a different kind of attention primitive, for instance. + +* [`TransformerScaffold`](layers/transformer_scaffold.py) implements the +Transformer from ["Attention Is All You Need"] +(https://arxiv.org/abs/1706.03762), with a customizable attention layer +option. Users can pass a class to `attention_cls` and associated config to +`attention_cfg`, in which case the scaffold will instantiate the class with +the config, or pass a class instance to `attention_cls`. + +* [`EncoderScaffold`](networks/encoder_scaffold.py) implements the transformer +encoder from ["BERT: Pre-training of Deep Bidirectional Transformers for +Language Understanding"](https://arxiv.org/abs/1810.04805), with customizable +embedding subnetwork (which will replace the standard embedding logic) and/or a +custom hidden layer (which will replace the Transformer instantiation in the +encoder). + +BERT and ALBERT models in this repo are implemented using this library. Code examples can be found in the corresponding model folder. + + + + + + + diff --git a/models/official/nlp/modeling/__init__.py b/models/official/nlp/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/official/nlp/modeling/__init__.py @@ -0,0 +1 @@ + diff --git a/models/official/nlp/modeling/layers/README.md b/models/official/nlp/modeling/layers/README.md new file mode 100644 index 0000000000000000000000000000000000000000..42f299a3f2308f63f5339bd3f639bef0607f5e97 --- /dev/null +++ b/models/official/nlp/modeling/layers/README.md @@ -0,0 +1,64 @@ +# Layers + +Layers are the fundamental building blocks for NLP models. They can be used to +assemble new layers, networks, or models. + +* [DenseEinsum](dense_einsum.py) implements a feedforward network using + tf.einsum. This layer contains the einsum op, the associated weight, and the + logic required to generate the einsum expression for the given + initialization parameters. + +* [MultiHeadAttention](attention.py) implements an optionally masked attention + between query, key, value tensors as described in + ["Attention Is All You Need"](https://arxiv.org/abs/1706.03762). If + `from_tensor` and `to_tensor` are the same, then this is self-attention. + +* [CachedAttention](attention.py) implements an attention layer with cache + used for auto-agressive decoding. + +* [MultiChannelAttention](multi_channel_attention.py) implements an variant of + multi-head attention which can be used to merge multiple streams for + cross-attentions. + +* [TalkingHeadsAttention](talking_heads_attention.py) implements the talking + heads attention, as decribed in + ["Talking-Heads Attention"](https://arxiv.org/abs/2003.02436). + +* [Transformer](transformer.py) implements an optionally masked transformer as + described in + ["Attention Is All You Need"](https://arxiv.org/abs/1706.03762). + +* [TransformerDecoderLayer](transformer.py) TransformerDecoderLayer is made up + of self multi-head attention, cross multi-head attention and + feedforward network. + +* [ReZeroTransformer](rezero_transformer.py) implements Transformer with + ReZero described in + ["ReZero is All You Need: Fast Convergence at Large Depth"](https://arxiv.org/abs/2003.04887). + +* [OnDeviceEmbedding](on_device_embedding.py) implements efficient embedding + lookups designed for TPU-based models. + +* [PositionalEmbedding](position_embedding.py) creates a positional embedding + as described in ["BERT: Pre-training of Deep Bidirectional Transformers for + Language Understanding"](https://arxiv.org/abs/1810.04805). + +* [SelfAttentionMask](self_attention_mask.py) creates a 3D attention mask from + a 2D tensor mask. + +* [MaskedSoftmax](masked_softmax.py) implements a softmax with an optional + masking input. If no mask is provided to this layer, it performs a standard + softmax; however, if a mask tensor is applied (which should be 1 in + positions where the data should be allowed through, and 0 where the data + should be masked), the output will have masked positions set to + approximately zero. + +* [`MaskedLM`](masked_lm.py) implements a masked language model. It assumes + the embedding table variable is passed to it. + +* [ClassificationHead](cls_head.py) A pooling head over a sequence of + embeddings, commonly used by classification tasks. + +* [GatedFeedforward](gated_feedforward.py) implements the gated linear layer + feedforward as described in + ["GLU Variants Improve Transformer"](https://arxiv.org/abs/2002.05202). diff --git a/models/official/nlp/modeling/layers/__init__.py b/models/official/nlp/modeling/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2cd8e7b9e59ceab76e268f83907833eec32c73ce --- /dev/null +++ b/models/official/nlp/modeling/layers/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Layers package definition.""" +# pylint: disable=wildcard-import +from official.nlp.modeling.layers.attention import * +from official.nlp.modeling.layers.cls_head import * +from official.nlp.modeling.layers.dense_einsum import DenseEinsum +from official.nlp.modeling.layers.gated_feedforward import GatedFeedforward +from official.nlp.modeling.layers.masked_lm import MaskedLM +from official.nlp.modeling.layers.masked_softmax import MaskedSoftmax +from official.nlp.modeling.layers.multi_channel_attention import * +from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding +from official.nlp.modeling.layers.position_embedding import PositionEmbedding +from official.nlp.modeling.layers.rezero_transformer import ReZeroTransformer +from official.nlp.modeling.layers.self_attention_mask import SelfAttentionMask +from official.nlp.modeling.layers.talking_heads_attention import TalkingHeadsAttention +from official.nlp.modeling.layers.transformer import * +from official.nlp.modeling.layers.transformer_scaffold import TransformerScaffold diff --git a/models/official/nlp/modeling/layers/attention.py b/models/official/nlp/modeling/layers/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..99692b281794385a97af341d03dea0ee6c46b95b --- /dev/null +++ b/models/official/nlp/modeling/layers/attention.py @@ -0,0 +1,530 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based attention layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import collections +import math +import string + +import numpy as np +import tensorflow as tf + +from official.nlp.modeling.layers import masked_softmax + +EinsumDense = tf.keras.layers.experimental.EinsumDense +_CHR_IDX = string.ascii_lowercase + + +def _build_attention_equation(qkv_rank, attn_axes): + """Builds einsum equations for the attention computation. + + Query, key, value inputs after projection are expected to have the shape as: + (bs, , , num_heads, channels). + bs and are treated as . + The attention operations can be generalized: + (1) Query-key dot product: + (, , num_heads, channels), (, + , num_heads, channels) -> (, + num_heads, , ) + (2) Combination: + (, num_heads, , ), + (, , num_heads, channels) -> (, + , num_heads, channels) + + Args: + qkv_rank: the rank of query, key, value tensors. + attn_axes: a list/tuple of axes, [1, rank), that will do attention. + + Returns: + Einsum equations. + """ + target_notation = _CHR_IDX[:qkv_rank] + # `batch_dims` includes the head dim. + batch_dims = tuple(np.delete(range(qkv_rank), attn_axes + (qkv_rank - 1,))) + letter_offset = qkv_rank + source_notation = "" + for i in range(qkv_rank): + if i in batch_dims or i == qkv_rank - 1: + source_notation += target_notation[i] + else: + source_notation += _CHR_IDX[letter_offset] + letter_offset += 1 + + product_notation = "".join([target_notation[i] for i in batch_dims] + + [target_notation[i] for i in attn_axes] + + [source_notation[i] for i in attn_axes]) + dot_product_equation = "%s,%s->%s" % (source_notation, target_notation, + product_notation) + attn_scores_rank = len(product_notation) + combine_equation = "%s,%s->%s" % (product_notation, source_notation, + target_notation) + return dot_product_equation, combine_equation, attn_scores_rank + + +def _build_proj_equation(free_dims, bound_dims, output_dims): + """Builds an einsum equation for projections inside multi-head attention.""" + input_str = "" + kernel_str = "" + output_str = "" + bias_axes = "" + letter_offset = 0 + for i in range(free_dims): + char = _CHR_IDX[i + letter_offset] + input_str += char + output_str += char + + letter_offset += free_dims + for i in range(bound_dims): + char = _CHR_IDX[i + letter_offset] + input_str += char + kernel_str += char + + letter_offset += bound_dims + for i in range(output_dims): + char = _CHR_IDX[i + letter_offset] + kernel_str += char + output_str += char + bias_axes += char + equation = "%s,%s->%s" % (input_str, kernel_str, output_str) + + return equation, bias_axes, len(output_str) + + +def _get_output_shape(output_rank, known_last_dims): + return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims) + + +@tf.keras.utils.register_keras_serializable(package="Text") +class MultiHeadAttention(tf.keras.layers.Layer): + """MultiHeadAttention layer. + + This is an implementation of multi-headed attention based on "Attention + is all you Need". If `query`, `key,` `value` are the same, then + this is self-attention. Each timestep in `query` attends to the + corresponding sequence in `key`, and returns a fixed-width vector. + + This layer first projects `query`, `key` and `value`. These are + (effectively) a list of tensors of length `num_attention_heads`, where the + corresponding shapes are [batch_size, , key_size], + [batch_size, , key_size], + [batch_size, , value_size]. + + Then, the query and key tensors are dot-producted and scaled. These are + softmaxed to obtain attention probabilities. The value tensors are then + interpolated by these probabilities, then concatenated back to a single + tensor. + + Finally, the result tensor with the last dimension as value_size can take an + linear projection and return. + + Examples: + + Performs 1D cross-attention over two sequence inputs with an attention mask. + Returns the additional attention weights over heads. + + >>> layer = MultiHeadAttention(num_heads=2, key_size=2, + ... return_attention_scores=True) + >>> target = tf.keras.Input(shape=[8, 16]) + >>> source = tf.keras.Input(shape=[4, 16]) + >>> mask_tensor = tf.keras.Input(shape=[8, 4]) + >>> output_tensor, weights = layer([target, source]) + >>> print(output_tensor.shape), print(weights.shape) + (None, 8, 16) (None, 2, 8, 4) + + Performs 2D self-attention over a 5D input tensor on axes 2 and 3. + + >>> layer = MultiHeadAttention(num_heads=2, key_size=2, attention_axes=(2, 3)) + >>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16]) + >>> output_tensor = layer([input_tensor, input_tensor]) + >>> print(output_tensor.shape) + (None, 5, 3, 4, 16) + + Arguments: + num_heads: Number of attention heads. + key_size: Size of each attention head for query and key. + value_size: Size of each attention head for value. + dropout: Dropout probability. + use_bias: Boolean, whether the dense layers use bias vectors/matrices. + output_shape: The expected shape of an output tensor, besides the batch and + sequence dims. If not specified, projects back to the key feature dim. + attention_axes: axes over which the attention is applied. `None` means + attention over all axes, but batch, heads, and features. + return_attention_scores: bool, if `True`, returns the multi-head + attention scores as an additional output argument. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def __init__(self, + num_heads, + key_size, + value_size=None, + dropout=0.0, + use_bias=True, + output_shape=None, + attention_axes=None, + return_attention_scores=False, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(MultiHeadAttention, self).__init__(**kwargs) + self._num_heads = num_heads + self._key_size = key_size + self._value_size = value_size if value_size else key_size + self._dropout = dropout + self._use_bias = use_bias + self._output_shape = output_shape + self._return_attention_scores = return_attention_scores + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + if attention_axes is not None and not isinstance(attention_axes, + collections.abc.Sized): + self._attention_axes = (attention_axes,) + else: + self._attention_axes = attention_axes + + def get_config(self): + config = { + "num_heads": + self._num_heads, + "key_size": + self._key_size, + "value_size": + self._value_size, + "dropout": + self._dropout, + "use_bias": + self._use_bias, + "output_shape": + self._output_shape, + "attention_axes": + self._attention_axes, + "return_attention_scores": + self._return_attention_scores, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint) + } + base_config = super(MultiHeadAttention, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + inputs_len = len(input_shape) + if inputs_len > 3 or inputs_len < 2: + raise ValueError( + "Expects inputs list of length 2 or 3, namely [query, value] or " + "[query, value, key]. " + "Given length: %d" % inputs_len) + tensor_shapes = tf.nest.map_structure(tf.TensorShape, input_shape) + query_shape = tensor_shapes[0] + value_shape = tensor_shapes[1] + key_shape = tensor_shapes[2] if inputs_len == 3 else value_shape + + common_kwargs = dict( + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint) + + free_dims = query_shape.rank - 1 + einsum_equation, bias_axes, output_rank = _build_proj_equation( + free_dims, bound_dims=1, output_dims=2) + self._query_dense = EinsumDense( + einsum_equation, + output_shape=_get_output_shape(output_rank - 1, + [self._num_heads, self._key_size]), + bias_axes=bias_axes if self._use_bias else None, + name="query", + **common_kwargs) + einsum_equation, bias_axes, output_rank = _build_proj_equation( + key_shape.rank - 1, bound_dims=1, output_dims=2) + self._key_dense = EinsumDense( + einsum_equation, + output_shape=_get_output_shape(output_rank - 1, + [self._num_heads, self._key_size]), + bias_axes=bias_axes if self._use_bias else None, + name="key", + **common_kwargs) + einsum_equation, bias_axes, output_rank = _build_proj_equation( + value_shape.rank - 1, bound_dims=1, output_dims=2) + self._value_dense = EinsumDense( + einsum_equation, + output_shape=_get_output_shape(output_rank - 1, + [self._num_heads, self._value_size]), + bias_axes=bias_axes if self._use_bias else None, + name="value", + **common_kwargs) + + # Builds the attention computations for multi-head dot product attention. + # These computations could be wrapped into the keras attention layer once it + # support mult-head einsum computations. + self._build_attention(output_rank) + if self._output_shape: + if not isinstance(self._output_shape, collections.abc.Sized): + output_shape = [self._output_shape] + else: + output_shape = self._output_shape + else: + output_shape = [query_shape[-1]] + einsum_equation, bias_axes, output_rank = _build_proj_equation( + free_dims, bound_dims=2, output_dims=len(output_shape)) + self._output_dense = EinsumDense( + einsum_equation, + output_shape=_get_output_shape(output_rank - 1, output_shape), + bias_axes=bias_axes if self._use_bias else None, + name="attention_output", + **common_kwargs) + super(MultiHeadAttention, self).build(input_shape) + + def _build_attention(self, qkv_rank): + """Builds multi-head dot-product attention computations. + + This function builds attributes necessary for `_compute_attention` to + costomize attention computation to replace the default dot-product + attention. + + Args: + qkv_rank: the rank of query, key, value tensors. + """ + if self._attention_axes is None: + self._attention_axes = tuple(range(1, qkv_rank - 2)) + else: + self._attention_axes = tuple(self._attention_axes) + self._dot_product_equation, self._combine_equation, attn_scores_rank = ( + _build_attention_equation(qkv_rank, attn_axes=self._attention_axes)) + norm_axes = tuple( + range(attn_scores_rank - len(self._attention_axes), attn_scores_rank)) + self._masked_softmax = masked_softmax.MaskedSoftmax( + mask_expansion_axes=[1], normalization_axes=norm_axes) + self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout) + + def _compute_attention(self, + query_tensor, + key_tensor, + value_tensor, + attention_mask=None): + """Applies Dot-product attention with query, key, value tensors. + + This function defines the computation inside `call` with projected + multi-head Q, K, V inputs. Users can override this function for customized + attention implementation. + + Args: + query_tensor: Projected query `Tensor` of shape `[B, T, N, key_size]`. + key_tensor: Projected key `Tensor` of shape `[B, T, N, key_size]`. + value_tensor: Projected value `Tensor` of shape `[B, T, N, value_size]`. + attention_mask: a boolean mask of shape `[B, T, S]`, that prevents + attention to certain positions. + + Returns: + attention_output: Multi-headed outputs of attention computation. + attention_scores: Multi-headed attention weights. + """ + # Take the dot product between "query" and "key" to get the raw + # attention scores. + attention_scores = tf.einsum(self._dot_product_equation, key_tensor, + query_tensor) + attention_scores = tf.multiply(attention_scores, + 1.0 / math.sqrt(float(self._key_size))) + + # Normalize the attention scores to probabilities. + # `attention_scores` = [B, N, T, S] + attention_scores = self._masked_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_scores_dropout = self._dropout_layer(attention_scores) + + # `context_layer` = [B, T, N, H] + attention_output = tf.einsum(self._combine_equation, + attention_scores_dropout, value_tensor) + return attention_output, attention_scores + + def call(self, inputs, attention_mask=None): + """Implements the forward pass. + + Size glossary: + * Number of heads (H): the number of attention heads. + * Value size (V): the size of each value embedding per head. + * Key size (K): the size of each key embedding per head. Equally, the size + of each query embedding per head. Typically K <= V. + * Batch dimensions (B). + * Query (target) attention axes shape (T). + * Value (source) attention axes shape (S), the rank must match the target. + + Args: + inputs: List of the following tensors: + * query: Query `Tensor` of shape `[B, T, dim]`. + * value: Value `Tensor` of shape `[B, S, dim]`. + * key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will + use `value` for both `key` and `value`, which is the most common case. + attention_mask: a boolean mask of shape `[B, T, S]`, that prevents + attention to certain positions. + + Returns: + attention_output: The result of the computation, of shape [B, T, E], + where `T` is for target sequence shapes and `E` is the query input last + dimension if `output_shape` is `None`. Otherwise, the multi-head outputs + are project to the shape specified by `output_shape`. + attention_scores: [Optional] multi-head attention coeffients over + attention + axes. + """ + inputs_len = len(inputs) + if inputs_len > 3 or inputs_len < 2: + raise ValueError( + "Expects inputs list of length 2 or 3, namely [query, value] or " + "[query, value, key]. " + "Given length: %d" % inputs_len) + query = inputs[0] + value = inputs[1] + key = inputs[2] if inputs_len == 3 else value + + # N = `num_attention_heads` + # H = `size_per_head` + # `query_tensor` = [B, T, N ,H] + query_tensor = self._query_dense(query) + + # `key_tensor` = [B, S, N, H] + key_tensor = self._key_dense(key) + + # `value_tensor` = [B, S, N, H] + value_tensor = self._value_dense(value) + + attention_output, attention_scores = self._compute_attention( + query_tensor, key_tensor, value_tensor, attention_mask) + attention_output = self._output_dense(attention_output) + + if self._return_attention_scores: + return attention_output, attention_scores + return attention_output + + +@tf.keras.utils.register_keras_serializable(package="Text") +class CachedAttention(MultiHeadAttention): + """Attention layer with cache used for auto-agressive decoding. + + Arguments are the same as `MultiHeadAttention` layer. + """ + + def _update_cache(self, key_tensor, value_tensor, cache, decode_loop_step): + """Updates cache states and gets full-length key/value tensors.""" + # Combines cached keys and values with new keys and values. + if decode_loop_step is not None: + # TPU special case. + key_seq_dim = cache["key"].shape.as_list()[1] + indices = tf.reshape( + tf.one_hot(decode_loop_step, key_seq_dim, dtype=key_tensor.dtype), + [1, key_seq_dim, 1, 1]) + key_tensor = cache["key"] + key_tensor * indices + value_seq_dim = cache["value"].shape.as_list()[1] + indices = tf.reshape( + tf.one_hot(decode_loop_step, value_seq_dim, dtype=value_tensor.dtype), + [1, value_seq_dim, 1, 1]) + value_tensor = cache["value"] + value_tensor * indices + else: + key_tensor = tf.concat( + [tf.cast(cache["key"], key_tensor.dtype), key_tensor], axis=1) + value_tensor = tf.concat( + [tf.cast(cache["value"], value_tensor.dtype), value_tensor], axis=1) + + # Update cache + cache["key"] = key_tensor + cache["value"] = value_tensor + + return key_tensor, value_tensor + + def call(self, + inputs, + attention_mask=None, + cache=None, + decode_loop_step=None): + from_tensor = inputs[0] + to_tensor = inputs[1] + + # Scalar dimensions referenced here: + # B = batch size (number of sequences) + # F = `from_tensor` sequence length + # T = `to_tensor` sequence length + # N = `num_attention_heads` + # H = `size_per_head` + # `query_tensor` = [B, F, N ,H] + query_tensor = self._query_dense(from_tensor) + + # `key_tensor` = [B, T, N, H] + key_tensor = self._key_dense(to_tensor) + + # `value_tensor` = [B, T, N, H] + value_tensor = self._value_dense(to_tensor) + + if cache: + key_tensor, value_tensor = self._update_cache(key_tensor, value_tensor, + cache, decode_loop_step) + + # Take the dot product between "query" and "key" to get the raw + # attention scores. + attention_scores = tf.einsum(self._dot_product_equation, key_tensor, + query_tensor) + attention_scores = tf.multiply(attention_scores, + 1.0 / math.sqrt(float(self._key_size))) + + # Normalize the attention scores to probabilities. + # `attention_scores` = [B, N, F, T] + attention_scores = self._masked_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_scores = self._dropout_layer(attention_scores) + # `context_layer` = [B, F, N, H] + attention_output = tf.einsum(self._combine_equation, attention_scores, + value_tensor) + attention_output = self._output_dense(attention_output) + if self._return_attention_scores: + return attention_output, attention_scores, cache + return attention_output, cache diff --git a/models/official/nlp/modeling/layers/attention_test.py b/models/official/nlp/modeling/layers/attention_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb96f5084d795cdbafa7cdb352fb4692034f803 --- /dev/null +++ b/models/official/nlp/modeling/layers/attention_test.py @@ -0,0 +1,255 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the attention layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import attention + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class MultiHeadAttentionTest(keras_parameterized.TestCase): + + @parameterized.named_parameters( + ("key_value_same_proj", None, None, [40, 80]), + ("key_value_different_proj", 32, 60, [40, 60]), + ) + def test_non_masked_attention(self, value_size, output_shape, output_dims): + """Test that the attention layer can be created without a mask tensor.""" + test_layer = attention.MultiHeadAttention( + num_heads=12, + key_size=64, + value_size=value_size, + output_shape=output_shape) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + value = tf.keras.Input(shape=(20, 80)) + output = test_layer([query, value]) + self.assertEqual(output.shape.as_list(), [None] + output_dims) + + def test_non_masked_self_attention(self): + """Test with one input (self-attenntion) and no mask tensor.""" + test_layer = attention.MultiHeadAttention(num_heads=12, key_size=64) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + + def test_attention_scores(self): + """Test attention outputs with coefficients.""" + test_layer = attention.MultiHeadAttention( + num_heads=12, key_size=64, return_attention_scores=True) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output, coef = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40]) + + @parameterized.named_parameters(("with_bias", True), ("no_bias", False)) + def test_masked_attention(self, use_bias): + """Test with a mask tensor.""" + test_layer = attention.MultiHeadAttention( + num_heads=2, key_size=2, use_bias=use_bias) + # Create a 3-dimensional input (the first dimension is implicit). + batch_size = 3 + query = tf.keras.Input(shape=(4, 8)) + value = tf.keras.Input(shape=(2, 8)) + mask_tensor = tf.keras.Input(shape=(4, 2)) + output = test_layer([query, value], mask_tensor) + + # Create a model containing the test layer. + model = tf.keras.Model([query, value, mask_tensor], output) + + # Generate data for the input (non-mask) tensors. + from_data = 10 * np.random.random_sample((batch_size, 4, 8)) + to_data = 10 * np.random.random_sample((batch_size, 2, 8)) + + # Invoke the data with a random set of mask data. This should mask at least + # one element. + mask_data = np.random.randint(2, size=(batch_size, 4, 2)) + masked_output_data = model.predict([from_data, to_data, mask_data]) + + # Invoke the same data, but with a null mask (where no elements are masked). + null_mask_data = np.ones((batch_size, 4, 2)) + unmasked_output_data = model.predict([from_data, to_data, null_mask_data]) + + # Because one data is masked and one is not, the outputs should not be the + # same. + self.assertNotAllClose(masked_output_data, unmasked_output_data) + + # Tests the layer with three inputs: Q, K, V. + key = tf.keras.Input(shape=(2, 8)) + output = test_layer([query, value, key], mask_tensor) + model = tf.keras.Model([query, value, key, mask_tensor], output) + + masked_output_data = model.predict([from_data, to_data, to_data, mask_data]) + unmasked_output_data = model.predict( + [from_data, to_data, to_data, null_mask_data]) + # Because one data is masked and one is not, the outputs should not be the + # same. + self.assertNotAllClose(masked_output_data, unmasked_output_data) + + if use_bias: + self.assertLen(test_layer._query_dense.trainable_variables, 2) + self.assertLen(test_layer._output_dense.trainable_variables, 2) + else: + self.assertLen(test_layer._query_dense.trainable_variables, 1) + self.assertLen(test_layer._output_dense.trainable_variables, 1) + + def test_initializer(self): + """Test with a specified initializer.""" + test_layer = attention.MultiHeadAttention( + num_heads=12, + key_size=64, + kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + + @parameterized.named_parameters( + ("4d_inputs_one_free_batch", [3, 4], [3, 2], [4, 2], (2,)), + ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)), + ("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3))) + def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes): + """Test with a mask tensor.""" + test_layer = attention.MultiHeadAttention( + num_heads=2, key_size=2, attention_axes=attention_axes) + batch_size, hidden_size = 3, 8 + # Generate data for the input (non-mask) tensors. + query_shape = [batch_size] + q_dims + [hidden_size] + value_shape = [batch_size] + v_dims + [hidden_size] + mask_shape = [batch_size] + mask_dims + query = 10 * np.random.random_sample(query_shape) + value = 10 * np.random.random_sample(value_shape) + + # Invoke the data with a random set of mask data. This should mask at least + # one element. + mask_data = np.random.randint(2, size=mask_shape).astype("bool") + output = test_layer([query, value], mask_data) + + # Invoke the same data, but with a null mask (where no elements are masked). + null_mask_data = np.ones(mask_shape) + unmasked_output = test_layer([query, value], null_mask_data) + # Because one data is masked and one is not, the outputs should not be the + # same. + self.assertNotAllClose(output, unmasked_output) + + +class SubclassAttention(attention.MultiHeadAttention): + + def _build_attention(self, qkv_rank): + pass + + def _compute_attention(self, + query_tensor, + key_tensor, + value_tensor, + attention_mask=None): + return value_tensor, None + + +@keras_parameterized.run_all_keras_modes +class AttentionSubclassTest(keras_parameterized.TestCase): + + def test_initializer(self): + """Test with a specified initializer.""" + test_layer = SubclassAttention( + num_heads=12, + key_size=64) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + + +def _create_cache(batch_size, init_decode_length, num_heads, head_size): + return { + "key": + tf.zeros([batch_size, init_decode_length, num_heads, head_size], + dtype=tf.float32), + "value": + tf.zeros([batch_size, init_decode_length, num_heads, head_size], + dtype=tf.float32) + } + + +@keras_parameterized.run_all_keras_modes +class CachedAttentionTest(keras_parameterized.TestCase): + + def test_masked_attention(self): + """Test with a mask tensor.""" + num_heads, head_size = 2, 2 + # Create a 3-dimensional input (the first dimension is implicit). + from_seq_length = 4 + batch_size = 3 + # GPU/CPU case. + init_decode_length = 0 + # Directly tests the keras layer. + cache = _create_cache(batch_size, init_decode_length, num_heads, head_size) + layer = attention.CachedAttention(num_heads=num_heads, key_size=head_size) + + # Generate data for the input (non-mask) tensors. + from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32) + # Invoke the data with a random set of mask data. This should mask at least + # one element. + mask_data = np.random.randint( + 2, size=(batch_size, from_seq_length, from_seq_length)) + masked_output_data, cache = layer([from_data, from_data], mask_data, cache) + self.assertEqual(masked_output_data.shape, (3, 4, 8)) + self.assertEqual(cache["value"].shape, (3, 4, 2, 2)) + + # Tests inputs without cache. + masked_output_data, cache = layer([from_data, from_data, mask_data]) + self.assertEqual(masked_output_data.shape, (3, 4, 8)) + self.assertIsNone(cache) + + def test_padded_decode(self): + """Test with a mask tensor.""" + num_heads, head_size = 2, 2 + from_seq_length = 4 + # TPU decoding should pre-allocate the entire sequence. + batch_size = 3 + init_decode_length = from_seq_length + + # Directly tests the keras layer. + cache = _create_cache(batch_size, init_decode_length, num_heads, head_size) + layer = attention.CachedAttention(num_heads=num_heads, key_size=head_size) + + # Generate data for the input (non-mask) tensors. + from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32) + decode_loop_step = 2 + mask_data = np.random.randint( + 2, size=(batch_size, from_seq_length, from_seq_length), dtype=np.int32) + # Testing the invocation directly as Keras cannot consume inputs correctly. + masked_output_data, cache = layer([from_data, from_data], + mask_data, + cache, + decode_loop_step=decode_loop_step) + self.assertEqual(masked_output_data.shape, (3, 4, 8)) + self.assertEqual(cache["value"].shape, (3, 4, 2, 2)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/cls_head.py b/models/official/nlp/modeling/layers/cls_head.py new file mode 100644 index 0000000000000000000000000000000000000000..0240511429e58453fa9c483be120705347a0c754 --- /dev/null +++ b/models/official/nlp/modeling/layers/cls_head.py @@ -0,0 +1,90 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A Classification head layer which is common used with sequence encoders.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.modeling import tf_utils + + +class ClassificationHead(tf.keras.layers.Layer): + """Pooling head for sentence-level classification tasks.""" + + def __init__(self, + inner_dim, + num_classes, + cls_token_idx=0, + activation="tanh", + dropout_rate=0.0, + initializer="glorot_uniform", + **kwargs): + """Initializes the `ClassificationHead`. + + Args: + inner_dim: The dimensionality of inner projection layer. + num_classes: Number of output classes. + cls_token_idx: The index inside the sequence to pool. + activation: Dense layer activation. + dropout_rate: Dropout probability. + initializer: Initializer for dense layer kernels. + **kwargs: Keyword arguments. + """ + super(ClassificationHead, self).__init__(**kwargs) + self.dropout_rate = dropout_rate + self.inner_dim = inner_dim + self.num_classes = num_classes + self.activation = tf_utils.get_activation(activation) + self.initializer = tf.keras.initializers.get(initializer) + self.cls_token_idx = cls_token_idx + + self.dense = tf.keras.layers.Dense( + units=inner_dim, + activation=self.activation, + kernel_initializer=self.initializer, + name="pooler_dense") + self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate) + self.out_proj = tf.keras.layers.Dense( + units=num_classes, kernel_initializer=self.initializer, name="logits") + + def call(self, features): + x = features[:, self.cls_token_idx, :] # take token. + x = self.dense(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + def get_config(self): + config = { + "dropout_rate": self.dropout_rate, + "num_classes": self.num_classes, + "inner_dim": self.inner_dim, + "activation": tf.keras.activations.serialize(self.activation), + "initializer": tf.keras.initializers.serialize(self.initializer), + } + config.update(super(ClassificationHead, self).get_config()) + return config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) + + @property + def checkpoint_items(self): + return {self.dense.name: self.dense} diff --git a/models/official/nlp/modeling/layers/cls_head_test.py b/models/official/nlp/modeling/layers/cls_head_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ea671f94f5806800f1f5ce07df9fffeff7a3ab68 --- /dev/null +++ b/models/official/nlp/modeling/layers/cls_head_test.py @@ -0,0 +1,42 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for cls_head.""" + +import tensorflow as tf + +from official.nlp.modeling.layers import cls_head + + +class ClassificationHead(tf.test.TestCase): + + def test_layer_invocation(self): + test_layer = cls_head.ClassificationHead(inner_dim=5, num_classes=2) + features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32) + output = test_layer(features) + self.assertAllClose(output, [[0., 0.], [0., 0.]]) + self.assertSameElements(test_layer.checkpoint_items.keys(), + ["pooler_dense"]) + + def test_layer_serialization(self): + layer = cls_head.ClassificationHead(10, 2) + new_layer = cls_head.ClassificationHead.from_config(layer.get_config()) + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(layer.get_config(), new_layer.get_config()) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/dense_einsum.py b/models/official/nlp/modeling/layers/dense_einsum.py new file mode 100644 index 0000000000000000000000000000000000000000..ba2383e6d9e47f1e1d39898c16bf99748e4d38e3 --- /dev/null +++ b/models/official/nlp/modeling/layers/dense_einsum.py @@ -0,0 +1,180 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based einsum layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +_CHR_IDX = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"] + + +@tf.keras.utils.register_keras_serializable(package="Text") +class DenseEinsum(tf.keras.layers.Layer): + """A densely connected layer that uses tf.einsum as the backing computation. + + This layer can perform einsum calculations of arbitrary dimensionality. + + Arguments: + output_shape: Positive integer or tuple, dimensionality of the output space. + num_summed_dimensions: The number of dimensions to sum over. Standard 2D + matmul should use 1, 3D matmul should use 2, and so forth. + activation: Activation function to use. If you don't specify anything, no + activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix. + bias_initializer: Initializer for the bias vector. + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to the output of the + layer (its "activation").. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. + bias_constraint: Constraint function applied to the bias vector. + Input shape: + N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common + situation would be a 2D input with shape `(batch_size, input_dim)`. + Output shape: + N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D + input with shape `(batch_size, input_dim)`, the output would have shape + `(batch_size, units)`. + """ + + def __init__(self, + output_shape, + num_summed_dimensions=1, + activation=None, + use_bias=True, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(DenseEinsum, self).__init__(**kwargs) + self._output_shape = output_shape if isinstance( + output_shape, (list, tuple)) else (output_shape,) + self._activation = tf.keras.activations.get(activation) + self._use_bias = use_bias + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + self._num_summed_dimensions = num_summed_dimensions + self._einsum_string = None + + def _build_einsum_string(self, free_input_dims, bound_dims, output_dims): + input_str = "" + kernel_str = "" + output_str = "" + letter_offset = 0 + for i in range(free_input_dims): + char = _CHR_IDX[i + letter_offset] + input_str += char + output_str += char + + letter_offset += free_input_dims + for i in range(bound_dims): + char = _CHR_IDX[i + letter_offset] + input_str += char + kernel_str += char + + letter_offset += bound_dims + for i in range(output_dims): + char = _CHR_IDX[i + letter_offset] + kernel_str += char + output_str += char + + return input_str + "," + kernel_str + "->" + output_str + + def build(self, input_shape): + input_shape = tf.TensorShape(input_shape) + input_rank = input_shape.rank + free_input_dims = input_rank - self._num_summed_dimensions + output_dims = len(self._output_shape) + + self._einsum_string = self._build_einsum_string(free_input_dims, + self._num_summed_dimensions, + output_dims) + + # This is only saved for testing purposes. + self._kernel_shape = ( + input_shape[free_input_dims:].concatenate(self._output_shape)) + + self._kernel = self.add_weight( + "kernel", + shape=self._kernel_shape, + initializer=self._kernel_initializer, + regularizer=self._kernel_regularizer, + constraint=self._kernel_constraint, + dtype=self.dtype, + trainable=True) + if self._use_bias: + self._bias = self.add_weight( + "bias", + shape=self._output_shape, + initializer=self._bias_initializer, + regularizer=self._bias_regularizer, + constraint=self._bias_constraint, + dtype=self.dtype, + trainable=True) + else: + self._bias = None + super(DenseEinsum, self).build(input_shape) + + def get_config(self): + config = { + "output_shape": + self._output_shape, + "num_summed_dimensions": + self._num_summed_dimensions, + "activation": + tf.keras.activations.serialize(self._activation), + "use_bias": + self._use_bias, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint) + } + base_config = super(DenseEinsum, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs): + ret = tf.einsum(self._einsum_string, inputs, self._kernel) + if self._use_bias: + ret += self._bias + if self._activation is not None: + ret = self._activation(ret) + return ret diff --git a/models/official/nlp/modeling/layers/dense_einsum_test.py b/models/official/nlp/modeling/layers/dense_einsum_test.py new file mode 100644 index 0000000000000000000000000000000000000000..57a60fe52fa835c09df228274d42ed7eb8f39595 --- /dev/null +++ b/models/official/nlp/modeling/layers/dense_einsum_test.py @@ -0,0 +1,123 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based einsum layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import dense_einsum + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class DenseEinsumLayer(keras_parameterized.TestCase): + + def test_3D_einsum_with_two_bound_dimensions(self): + test_layer = dense_einsum.DenseEinsum( + output_shape=(64,), num_summed_dimensions=2) + # Create a 4-dimensional input (the first dimension is implicit). + input_tensor = tf.keras.Input(shape=(None, 40, 80)) + _ = test_layer(input_tensor) + self.assertEqual(test_layer._einsum_string, "abcd,cde->abe") + self.assertEqual(test_layer._kernel_shape, (40, 80, 64)) + + def test_3D_einsum_with_one_bound_dimensions(self): + test_layer = dense_einsum.DenseEinsum( + output_shape=(64, 32), num_summed_dimensions=1) + # Create a 3-dimensional input (the first dimension is implicit). + input_tensor = tf.keras.Input(shape=(None, 80)) + _ = test_layer(input_tensor) + self.assertEqual(test_layer._einsum_string, "abc,cde->abde") + self.assertEqual(test_layer._kernel_shape, (80, 64, 32)) + + def test_2D_einsum_with_one_bound_dimensions(self): + test_layer = dense_einsum.DenseEinsum( + output_shape=(64,), num_summed_dimensions=1) + # Create a 3-dimensional input (the first dimension is implicit). + input_tensor = tf.keras.Input(shape=(None, 80)) + _ = test_layer(input_tensor) + self.assertEqual(test_layer._einsum_string, "abc,cd->abd") + self.assertEqual(test_layer._kernel_shape, (80, 64)) + + def test_bias_term_can_be_disabled(self): + # A layer created using the bias should have two weights. + test_layer = dense_einsum.DenseEinsum( + output_shape=64, num_summed_dimensions=1, use_bias=True) + input_tensor = tf.keras.Input(shape=(None, 80)) + _ = test_layer(input_tensor) + self.assertEqual(2, len(test_layer.get_weights())) + + # A layer created without the bias should have only one weight. + test_layer = dense_einsum.DenseEinsum( + output_shape=64, num_summed_dimensions=1, use_bias=False) + input_tensor = tf.keras.Input(shape=(None, 80)) + _ = test_layer(input_tensor) + self.assertEqual(1, len(test_layer.get_weights())) + + def test_activation(self): + # Create a model that does not use an activation. + no_activation_layer = dense_einsum.DenseEinsum( + output_shape=64, num_summed_dimensions=1, activation=None) + input_tensor = tf.keras.Input(shape=(None, 80)) + output_tensor = no_activation_layer(input_tensor) + no_activation_model = tf.keras.Model(input_tensor, output_tensor) + + # Create a model that uses a softmax activation. + activation_layer = dense_einsum.DenseEinsum( + output_shape=64, num_summed_dimensions=1, activation="softmax") + input_tensor = tf.keras.Input(shape=(None, 80)) + output_tensor = activation_layer(input_tensor) + activation_model = tf.keras.Model(input_tensor, output_tensor) + + # Make sure the models' weights are identical. + activation_model.set_weights(no_activation_model.get_weights()) + + # Predict using each model on the same input data. The output should be + # different, since one is using a softmax - even though the models' weights + # are the same. + input_values = 10 * np.random.random_sample((10, 4, 80)) + non_activated_data = no_activation_model.predict(input_values) + activated_data = activation_model.predict(input_values) + self.assertNotAllClose(activated_data, non_activated_data) + + def test_non_iterable_output_shape(self): + test_layer = dense_einsum.DenseEinsum( + output_shape=64, num_summed_dimensions=1) + # Create a 3-dimensional input (the first dimension is implicit). + input_tensor = tf.keras.Input(shape=(None, 80)) + _ = test_layer(input_tensor) + self.assertEqual(test_layer._einsum_string, "abc,cd->abd") + self.assertEqual(test_layer._kernel_shape, (80, 64)) + + def test_with_explicit_initializer(self): + test_layer = dense_einsum.DenseEinsum( + output_shape=(64,), + num_summed_dimensions=2, + kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) + # Create a 4-dimensional input (the first dimension is implicit). + input_tensor = tf.keras.Input(shape=(None, 40, 80)) + _ = test_layer(input_tensor) + self.assertEqual(test_layer._einsum_string, "abcd,cde->abe") + self.assertEqual(test_layer._kernel_shape, (40, 80, 64)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/gated_feedforward.py b/models/official/nlp/modeling/layers/gated_feedforward.py new file mode 100644 index 0000000000000000000000000000000000000000..11c912885a7b8eb68e6d764653275fb2b5d2de92 --- /dev/null +++ b/models/official/nlp/modeling/layers/gated_feedforward.py @@ -0,0 +1,210 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based gated feedforward layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import gin +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package="Text") +@gin.configurable +class GatedFeedforward(tf.keras.layers.Layer): + """Gated linear feedforward layer. + + This layer follows the paper "GLU Variants Improve Transformer" + (https://arxiv.org/abs/2002.05202). In additional, it allows to stack + multiple feedforward blocks and specify the position of dropout layer. + + Arguments: + intermediate_size: Size of the intermediate layer. + intermediate_activation: Activation for the intermediate layer. + dropout: Dropout probability for the output dropout. + use_gate: Whether to use gated linear units. If True, assuming `GELU` as + the activation and omitting bias, will apply + `GEGLU(x, W, V, W_2) = (GEGLU(xW) * xV)W2`; if False, will follow + "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) paper + and apply `FFN(x, W, W_2) = GELU(xW_1)W_2.` + num_blocks: The number of feedforward blocks to stack. Each block contains + a (gated) linear layer and a fully connected layer followed by dropout, + layer norm and residual. + dropout_position: Where to apply the dropout, the value can be either + `before_residual` or `after_residual`. If `before_residual`, will apply + `layer_output = layer_norm(dropout(layer_output) + layer_input)`; + if `after residual`, will apply + `layer_output = dropout(layer_norm(layer_output + layer_input))`. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def __init__(self, + intermediate_size, + intermediate_activation, + dropout, + use_gate=True, + num_blocks=1, + dropout_position="before_residual", + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(GatedFeedforward, self).__init__(**kwargs) + self._intermediate_size = intermediate_size + self._intermediate_activation = intermediate_activation + self._dropout = dropout + self._use_gate = use_gate + self._num_blocks = num_blocks + self._dropout_position = dropout_position + if self._dropout_position not in ("before_residual", "after_residual"): + raise ValueError( + "The dropout_position should be either `before_residual` or" + "`after_residual`, got: %s" % self._dropout_position) + + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + + def build(self, input_shape): + hidden_size = input_shape.as_list()[-1] + + common_kwargs = dict( + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint) + self._intermediate_dense = [] + self._intermediate_activation_layers = [] + self._gate_dense = [] + self._output_dense = [] + self._output_dropout = [] + self._output_layer_norm = [] + activation_policy = tf.keras.mixed_precision.experimental.global_policy() + if activation_policy.name == "mixed_bfloat16": + # bfloat16 causes BERT with the LAMB optimizer to not converge + # as well, so we use float32. + # TODO(b/154538392): Investigate this. + activation_policy = tf.float32 + for i in range(self._num_blocks): + self._intermediate_dense.append( + tf.keras.layers.experimental.EinsumDense( + "abc,cd->abd", + output_shape=(None, self._intermediate_size), + bias_axes="d", + name="intermediate_%d" % i, + **common_kwargs)) + self._intermediate_activation_layers.append(tf.keras.layers.Activation( + self._intermediate_activation, dtype=activation_policy)) + if self._use_gate: + self._gate_dense.append( + tf.keras.layers.experimental.EinsumDense( + "abc,cd->abd", + output_shape=(None, self._intermediate_size), + bias_axes="d", + name="gate_%d" % i, + **common_kwargs)) + self._output_dense.append( + tf.keras.layers.experimental.EinsumDense( + "abc,cd->abd", + output_shape=(None, hidden_size), + bias_axes="d", + name="output_%d" % i, + **common_kwargs)) + self._output_dropout.append( + tf.keras.layers.Dropout(rate=self._dropout)) + # Use float32 in layernorm for numeric stability. + self._output_layer_norm.append( + tf.keras.layers.LayerNormalization( + name="output_layer_norm_%d" % i, + axis=-1, + epsilon=1e-12, + dtype=tf.float32)) + + def get_config(self): + config = { + "intermediate_size": + self._intermediate_size, + "intermediate_activation": + self._intermediate_activation, + "dropout": + self._dropout, + "use_gate": + self._use_gate, + "num_blocks": + self._num_blocks, + "dropout_position": + self._dropout_position, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint) + } + base_config = super(GatedFeedforward, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs): + layer_output = inputs + for i in range(self._num_blocks): + layer_input = layer_output + intermediate_output = self._intermediate_dense[i](layer_input) + intermediate_output = self._intermediate_activation_layers[i]( + intermediate_output) + if self._use_gate: + gated_linear = self._gate_dense[i](layer_input) + intermediate_output = intermediate_output * gated_linear + + layer_output = self._output_dense[i](intermediate_output) + if self._dropout_position == "before_residual": + layer_output = self._output_dropout[i](layer_output) + + # During mixed precision training, `layer_input` may be from layer norm. + # If so, it is always fp32. Cast layer_output to fp32 for the subsequent + # add. + if layer_input.dtype == tf.float32: + layer_output = tf.cast(layer_output, tf.float32) + layer_output = self._output_layer_norm[i](layer_output + layer_input) + if self._dropout_position == "after_residual": + layer_output = self._output_dropout[i](layer_output) + + return layer_output diff --git a/models/official/nlp/modeling/layers/gated_feedforward_test.py b/models/official/nlp/modeling/layers/gated_feedforward_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8daeb5d32fde9be2765fe3819b13ee9a13546f55 --- /dev/null +++ b/models/official/nlp/modeling/layers/gated_feedforward_test.py @@ -0,0 +1,127 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based gated feedforward layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import gated_feedforward + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class GatedFeedforwardTest(keras_parameterized.TestCase): + + def tearDown(self): + super(GatedFeedforwardTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy("float32") + + @parameterized.parameters( + (True, 1, "after_residual", "float32"), + (True, 1, "after_residual", "mixed_float16"), + (False, 4, "before_residual", "float32"), + (False, 4, "before_residual", "mixed_float16"), + (True, 4, "after_residual", "float32"), + (True, 4, "after_residual", "mixed_float16"), + (False, 1, "before_residual", "float32"), + (False, 1, "before_residual", "mixed_float16"), + ) + def test_layer_creation(self, use_gate, num_blocks, dropout_position, dtype): + tf.keras.mixed_precision.experimental.set_policy(dtype) + kwargs = dict( + intermediate_size=128, + intermediate_activation="relu", + dropout=0.1, + use_gate=use_gate, + num_blocks=num_blocks, + dropout_position=dropout_position, + kernel_initializer="glorot_uniform", + bias_initializer="zeros") + test_layer = gated_feedforward.GatedFeedforward(**kwargs) + + sequence_length = 64 + width = 128 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) + + @parameterized.parameters( + (True, 1, "after_residual", "float32"), + (True, 1, "after_residual", "mixed_float16"), + (False, 4, "before_residual", "float32"), + (False, 4, "before_residual", "mixed_float16"), + (True, 4, "after_residual", "float32"), + (True, 4, "after_residual", "mixed_float16"), + (False, 1, "before_residual", "float32"), + (False, 1, "before_residual", "mixed_float16"), + ) + def test_layer_invocation(self, use_gate, num_blocks, dropout_position, + dtype): + tf.keras.mixed_precision.experimental.set_policy(dtype) + kwargs = dict( + intermediate_size=16, + intermediate_activation="relu", + dropout=0.1, + use_gate=use_gate, + num_blocks=num_blocks, + dropout_position=dropout_position, + kernel_initializer="glorot_uniform", + bias_initializer="zeros") + test_layer = gated_feedforward.GatedFeedforward(**kwargs) + + sequence_length = 16 + width = 32 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(data_tensor, output_tensor) + + # Invoke the model on test data. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + output_data = model.predict(input_data) + self.assertEqual(output_data.shape, (batch_size, sequence_length, width)) + + def test_serialize_deserialize(self): + kwargs = dict( + intermediate_size=16, + intermediate_activation="relu", + dropout=0.1, + use_gate=False, + num_blocks=4, + dropout_position="after_residual", + kernel_initializer="glorot_uniform", + bias_initializer="zeros") + test_layer = gated_feedforward.GatedFeedforward(**kwargs) + new_layer = gated_feedforward.GatedFeedforward.from_config( + test_layer.get_config()) + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(test_layer.get_config(), new_layer.get_config()) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/masked_lm.py b/models/official/nlp/modeling/layers/masked_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..3b81556f4c7d82e79c9d9cda4894a26fde6a93f7 --- /dev/null +++ b/models/official/nlp/modeling/layers/masked_lm.py @@ -0,0 +1,124 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Masked language model network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.modeling import tf_utils + + +@tf.keras.utils.register_keras_serializable(package='Text') +class MaskedLM(tf.keras.layers.Layer): + """Masked language model network head for BERT modeling. + + This network implements a masked language model based on the provided network. + It assumes that the network being passed has a "get_embedding_table()" method. + + Arguments: + embedding_table: The embedding table of the targets. + activation: The activation, if any, for the dense layer. + initializer: The intializer for the dense layer. Defaults to a Glorot + uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + embedding_table, + activation=None, + initializer='glorot_uniform', + output='logits', + name='cls/predictions', + **kwargs): + super(MaskedLM, self).__init__(name=name, **kwargs) + self.embedding_table = embedding_table + self.activation = activation + self.initializer = tf.keras.initializers.get(initializer) + + if output not in ('predictions', 'logits'): + raise ValueError( + ('Unknown `output` value "%s". `output` can be either "logits" or ' + '"predictions"') % output) + self._output_type = output + + def build(self, input_shape): + self._vocab_size, hidden_size = self.embedding_table.shape + self.dense = tf.keras.layers.Dense( + hidden_size, + activation=self.activation, + kernel_initializer=self.initializer, + name='transform/dense') + self.layer_norm = tf.keras.layers.LayerNormalization( + axis=-1, epsilon=1e-12, name='transform/LayerNorm') + self.bias = self.add_weight( + 'output_bias/bias', + shape=(self._vocab_size,), + initializer='zeros', + trainable=True) + + super(MaskedLM, self).build(input_shape) + + def call(self, sequence_data, masked_positions): + masked_lm_input = self._gather_indexes(sequence_data, masked_positions) + lm_data = self.dense(masked_lm_input) + lm_data = self.layer_norm(lm_data) + lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True) + logits = tf.nn.bias_add(lm_data, self.bias) + + masked_positions_shape = tf_utils.get_shape_list( + masked_positions, name='masked_positions_tensor') + logits = tf.reshape(logits, + [-1, masked_positions_shape[1], self._vocab_size]) + if self._output_type == 'logits': + return logits + return tf.nn.log_softmax(logits) + + def get_config(self): + raise NotImplementedError('MaskedLM cannot be directly serialized because ' + 'it has variable sharing logic.') + + def _gather_indexes(self, sequence_tensor, positions): + """Gathers the vectors at the specific positions. + + Args: + sequence_tensor: Sequence output of `BertModel` layer of shape + (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of + hidden units of `BertModel` layer. + positions: Positions ids of tokens in sequence to mask for pretraining + of with dimension (batch_size, num_predictions) where + `num_predictions` is maximum number of tokens to mask out and predict + per each sequence. + + Returns: + Masked out sequence tensor of shape (batch_size * num_predictions, + num_hidden). + """ + sequence_shape = tf_utils.get_shape_list( + sequence_tensor, name='sequence_output_tensor') + batch_size, seq_length, width = sequence_shape + + flat_offsets = tf.reshape( + tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) + flat_positions = tf.reshape(positions + flat_offsets, [-1]) + flat_sequence_tensor = tf.reshape(sequence_tensor, + [batch_size * seq_length, width]) + output_tensor = tf.gather(flat_sequence_tensor, flat_positions) + + return output_tensor diff --git a/models/official/nlp/modeling/layers/masked_lm_test.py b/models/official/nlp/modeling/layers/masked_lm_test.py new file mode 100644 index 0000000000000000000000000000000000000000..12e28ec95ff49c95c2729efeae04382bad5c611f --- /dev/null +++ b/models/official/nlp/modeling/layers/masked_lm_test.py @@ -0,0 +1,162 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for masked language model network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import + +from official.nlp.modeling.layers import masked_lm +from official.nlp.modeling.networks import transformer_encoder + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class MaskedLMTest(keras_parameterized.TestCase): + + def create_layer(self, + vocab_size, + sequence_length, + hidden_size, + output='predictions', + xformer_stack=None): + # First, create a transformer stack that we can use to get the LM's + # vocabulary weight. + if xformer_stack is None: + xformer_stack = transformer_encoder.TransformerEncoder( + vocab_size=vocab_size, + num_layers=1, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_attention_heads=4, + ) + + # Create a maskedLM from the transformer stack. + test_layer = masked_lm.MaskedLM( + embedding_table=xformer_stack.get_embedding_table(), + output=output) + return test_layer + + def test_layer_creation(self): + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + test_layer = self.create_layer( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size) + + # Make sure that the output tensor of the masked LM is the right shape. + lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) + masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32) + output = test_layer(lm_input_tensor, masked_positions=masked_positions) + + expected_output_shape = [None, num_predictions, vocab_size] + self.assertEqual(expected_output_shape, output.shape.as_list()) + + def test_layer_invocation_with_external_logits(self): + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + xformer_stack = transformer_encoder.TransformerEncoder( + vocab_size=vocab_size, + num_layers=1, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_attention_heads=4, + ) + test_layer = self.create_layer( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size, + xformer_stack=xformer_stack, + output='predictions') + logit_layer = self.create_layer( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size, + xformer_stack=xformer_stack, + output='logits') + + # Create a model from the masked LM layer. + lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) + masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32) + output = test_layer(lm_input_tensor, masked_positions) + logit_output = logit_layer(lm_input_tensor, masked_positions) + logit_output = tf.keras.layers.Activation(tf.nn.log_softmax)(logit_output) + logit_layer.set_weights(test_layer.get_weights()) + model = tf.keras.Model([lm_input_tensor, masked_positions], output) + logits_model = tf.keras.Model(([lm_input_tensor, masked_positions]), + logit_output) + + # Invoke the masked LM on some fake data to make sure there are no runtime + # errors in the code. + batch_size = 3 + lm_input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, hidden_size)) + masked_position_data = np.random.randint( + sequence_length, size=(batch_size, num_predictions)) + # ref_outputs = model.predict([lm_input_data, masked_position_data]) + # outputs = logits_model.predict([lm_input_data, masked_position_data]) + ref_outputs = model([lm_input_data, masked_position_data]) + outputs = logits_model([lm_input_data, masked_position_data]) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, num_predictions, vocab_size) + self.assertEqual(expected_output_shape, ref_outputs.shape) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertAllClose(ref_outputs, outputs) + + def test_layer_invocation(self): + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + test_layer = self.create_layer( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size) + + # Create a model from the masked LM layer. + lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) + masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32) + output = test_layer(lm_input_tensor, masked_positions) + model = tf.keras.Model([lm_input_tensor, masked_positions], output) + + # Invoke the masked LM on some fake data to make sure there are no runtime + # errors in the code. + batch_size = 3 + lm_input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, hidden_size)) + masked_position_data = np.random.randint( + 2, size=(batch_size, num_predictions)) + _ = model.predict([lm_input_data, masked_position_data]) + + def test_unknown_output_type_fails(self): + with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): + _ = self.create_layer( + vocab_size=8, sequence_length=8, hidden_size=8, output='bad') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/layers/masked_softmax.py b/models/official/nlp/modeling/layers/masked_softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..42a9e97a329e6c2892bb584f38375888a7fbdd2f --- /dev/null +++ b/models/official/nlp/modeling/layers/masked_softmax.py @@ -0,0 +1,72 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based softmax layer with optional masking.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package='Text') +class MaskedSoftmax(tf.keras.layers.Layer): + """Performs a softmax with optional masking on a tensor. + + Arguments: + mask_expansion_axes: Any axes that should be padded on the mask tensor. + normalization_axes: On which axes the softmax should perform. + """ + + def __init__(self, + mask_expansion_axes=None, + normalization_axes=None, + **kwargs): + self._mask_expansion_axes = mask_expansion_axes + if normalization_axes is None: + self._normalization_axes = (-1,) + else: + self._normalization_axes = normalization_axes + super(MaskedSoftmax, self).__init__(**kwargs) + + def call(self, scores, mask=None): + + if mask is not None: + for _ in range(len(scores.shape) - len(mask.shape)): + mask = tf.expand_dims(mask, axis=self._mask_expansion_axes) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + adder = (1.0 - tf.cast(mask, scores.dtype)) * -10000.0 + + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + scores += adder + + if len(self._normalization_axes) == 1: + return tf.nn.softmax(scores, axis=self._normalization_axes[0]) + else: + return tf.math.exp(scores - tf.math.reduce_logsumexp( + scores, axis=self._normalization_axes, keepdims=True)) + + def get_config(self): + config = { + 'mask_expansion_axes': self._mask_expansion_axes, + 'normalization_axes': self._normalization_axes + } + base_config = super(MaskedSoftmax, self).get_config() + return dict(list(base_config.items()) + list(config.items())) diff --git a/models/official/nlp/modeling/layers/masked_softmax_test.py b/models/official/nlp/modeling/layers/masked_softmax_test.py new file mode 100644 index 0000000000000000000000000000000000000000..befe0f786a7b4d84a5dc975d1780acdd2c964a2c --- /dev/null +++ b/models/official/nlp/modeling/layers/masked_softmax_test.py @@ -0,0 +1,119 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based masked softmax layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import masked_softmax + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class MaskedSoftmaxLayerTest(keras_parameterized.TestCase): + + def test_non_masked_softmax(self): + test_layer = masked_softmax.MaskedSoftmax() + input_tensor = tf.keras.Input(shape=(4, 8)) + output = test_layer(input_tensor) + model = tf.keras.Model(input_tensor, output) + + input_data = 10 * np.random.random_sample((3, 4, 8)) + output_data = model.predict(input_data) + expected_data = tf.nn.softmax(input_data) + self.assertAllClose(expected_data, output_data) + + def test_masked_softmax(self): + test_layer = masked_softmax.MaskedSoftmax() + input_tensor = tf.keras.Input(shape=(4, 8)) + mask_tensor = tf.keras.Input(shape=(4, 8)) + output = test_layer(input_tensor, mask_tensor) + model = tf.keras.Model([input_tensor, mask_tensor], output) + + input_data = 10 * np.random.random_sample((3, 4, 8)) + mask_data = np.random.randint(2, size=(3, 4, 8)) + + output_data = model.predict([input_data, mask_data]) + expected_zeros = np.greater(mask_data, 0) + is_zeros = np.greater(output_data, 0) + self.assertAllEqual(expected_zeros, is_zeros) + + def test_masked_softmax_with_none_mask(self): + test_layer = masked_softmax.MaskedSoftmax() + input_tensor = tf.keras.Input(shape=(4, 8)) + output = test_layer(input_tensor, None) + model = tf.keras.Model(input_tensor, output) + + input_data = 10 * np.random.random_sample((3, 4, 8)) + output_data = model.predict(input_data) + expected_data = tf.nn.softmax(input_data) + self.assertAllClose(expected_data, output_data) + + def test_softmax_with_axes_expansion(self): + test_layer = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1]) + input_tensor = tf.keras.Input(shape=(4, 8)) + mask_tensor = tf.keras.Input(shape=(8)) + output = test_layer(input_tensor, mask_tensor) + model = tf.keras.Model([input_tensor, mask_tensor], output) + + input_data = 10 * np.random.random_sample((3, 4, 8)) + mask_data = np.random.randint(2, size=(3, 8)) + + output_data = model.predict([input_data, mask_data]) + expanded_mask = np.expand_dims(mask_data, axis=1) * np.ones_like(input_data) + expected_zeros = np.greater(expanded_mask, 0) + is_zeros = np.greater(output_data, 0) + self.assertAllEqual(expected_zeros, is_zeros) + + def test_masked_softmax_high_dims(self): + test_layer = masked_softmax.MaskedSoftmax( + mask_expansion_axes=[1], normalization_axes=[6, 7]) + input_shape = [2, 3, 4, 5, 6, 7, 8] + mask_shape = [5, 6, 7, 8] + input_tensor = tf.keras.Input(shape=input_shape) + mask_tensor = tf.keras.Input(shape=mask_shape) + output = test_layer(input_tensor, mask_tensor) + model = tf.keras.Model([input_tensor, mask_tensor], output) + + input_data = 10 * np.random.random_sample([3] + input_shape) + mask_data = np.random.randint(2, size=[3] + mask_shape) + + output_data = model.predict([input_data, mask_data]) + expanded_mask = np.expand_dims(mask_data, axis=1) + expanded_mask = np.expand_dims(expanded_mask, axis=1) + expanded_mask = np.expand_dims( + expanded_mask, axis=1) * np.ones_like(input_data) + expected_zeros = np.greater(expanded_mask, 0) + is_zeros = np.greater(output_data, 0) + self.assertAllEqual(expected_zeros, is_zeros) + + def test_serialize_deserialize(self): + test_layer = masked_softmax.MaskedSoftmax( + mask_expansion_axes=[1], normalization_axes=[6, 7]) + new_layer = masked_softmax.MaskedSoftmax.from_config( + test_layer.get_config()) + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(test_layer.get_config(), new_layer.get_config()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/layers/multi_channel_attention.py b/models/official/nlp/modeling/layers/multi_channel_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..499d977c753518f0892267ac98abc6bf7618c2cd --- /dev/null +++ b/models/official/nlp/modeling/layers/multi_channel_attention.py @@ -0,0 +1,165 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Multi-channel Attention.""" +# pylint: disable=g-classes-have-attributes + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import math + +import tensorflow as tf +from official.modeling import tf_utils +from official.nlp.modeling.layers import attention +from official.nlp.modeling.layers import dense_einsum +from official.nlp.modeling.layers import masked_softmax + + +class VotingAttention(tf.keras.layers.Layer): + """Voting Attention layer. + + Arguments: + num_heads: the number of attention heads. + head_size: per-head hidden size. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def __init__(self, + num_heads, + head_size, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(VotingAttention, self).__init__(**kwargs) + self._num_heads = num_heads + self._head_size = head_size + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + + def build(self, unused_input_shapes): + self._query_dense = dense_einsum.DenseEinsum( + output_shape=(self._num_heads, self._head_size), + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + dtype=self.dtype, + name="encdocatt_query") + self._key_dense = dense_einsum.DenseEinsum( + output_shape=(self._num_heads, self._head_size), + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + dtype=self.dtype, + name="encdocatt_key") + super(VotingAttention, self).build(unused_input_shapes) + + def call(self, encoder_outputs, doc_attention_mask): + num_docs = tf_utils.get_shape_list(encoder_outputs, expected_rank=[4])[1] + cls_embeddings = encoder_outputs[:, :, 0, :] + key = self._key_dense(cls_embeddings) + query = self._query_dense(cls_embeddings) + doc_attention_mask = tf.cast(doc_attention_mask, tf.float32) + + key = tf.einsum("BANH,BA->BANH", key, doc_attention_mask) + query = tf.einsum("BANH,BA->BANH", query, doc_attention_mask) + attention_matrix = tf.einsum("BXNH,BYNH->BNXY", query, key) + mask = tf.ones([num_docs, num_docs]) + mask = tf.linalg.set_diag(mask, tf.zeros(num_docs)) + attention_matrix = tf.einsum("BNXY,XY->BNXY", attention_matrix, mask) + doc_attention_probs = tf.einsum("BNAY->BNA", attention_matrix) + doc_attention_probs = tf.einsum("BNA->BA", doc_attention_probs) + infadder = (1.0 - doc_attention_mask) * -100000.0 + return tf.nn.softmax(doc_attention_probs + infadder) + + +class MultiChannelAttention(attention.MultiHeadAttention): + """Multi-channel Attention layer. + + Introduced in: https://arxiv.org/abs/2001.09386. Expects multiple + cross-attention target sequences. + """ + + def build(self, input_shape): + super(MultiChannelAttention, self).build(input_shape) + self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[2]) + + def call(self, inputs, attention_mask=None): + from_tensor = inputs[0] + to_tensor = inputs[1] + doc_attention_probs = inputs[2] + + # Scalar dimensions referenced here: + # B = batch size (number of stories) + # A = num_docs (number of docs) + # F = `from_tensor` sequence length + # T = `to_tensor` sequence length + # N = `num_attention_heads` + # H = `size_per_head` + # `query_tensor` = [B, F, N ,H] + query_tensor = self._query_dense(from_tensor) + + # `key_tensor` = [B, A, T, N, H] + key_tensor = self._key_dense(to_tensor) + + # `value_tensor` = [B, A, T, N, H] + value_tensor = self._value_dense(to_tensor) + + # Take the dot product between "query" and "key" to get the raw + # attention scores. + attention_scores = tf.einsum("BATNH,BFNH->BANFT", key_tensor, query_tensor) + attention_scores = tf.multiply(attention_scores, + 1.0 / math.sqrt(float(self._key_size))) + + # Normalize the attention scores to probabilities. + # `attention_probs` = [B, A, N, F, T] + attention_probs = self._masked_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self._dropout_layer(attention_probs) + + # `context_layer` = [B, F, N, H] + context_layer = tf.einsum("BANFT,BATNH->BAFNH", attention_probs, + value_tensor) + attention_output = tf.einsum("BNFA,BAFNH->BFNH", doc_attention_probs, + context_layer) + attention_output = self._output_dense(attention_output) + return attention_output diff --git a/models/official/nlp/modeling/layers/multi_channel_attention_test.py b/models/official/nlp/modeling/layers/multi_channel_attention_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6e0e7fec48635d09e6e30c3ad247044ae9785f --- /dev/null +++ b/models/official/nlp/modeling/layers/multi_channel_attention_test.py @@ -0,0 +1,56 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for nlp.nhnet.multi_channel_attention.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from official.nlp.modeling.layers import multi_channel_attention + + +class MultiChannelAttentionTest(tf.test.TestCase): + + def test_doc_attention(self): + num_heads = 2 + doc_attention = multi_channel_attention.VotingAttention( + num_heads, head_size=8) + num_docs = 3 + inputs = np.zeros((2, num_docs, 10, 16), dtype=np.float32) + doc_mask = np.zeros((2, num_docs), dtype=np.float32) + outputs = doc_attention(inputs, doc_mask) + self.assertEqual(outputs.shape, (2, num_docs)) + + def test_multi_channel_attention(self): + num_heads = 2 + num_docs = 5 + attention_layer = multi_channel_attention.MultiChannelAttention( + num_heads, key_size=2) + + from_data = 10 * np.random.random_sample((3, 4, 8)) + to_data = 10 * np.random.random_sample((3, num_docs, 2, 8)) + mask_data = np.random.randint(2, size=(3, num_docs, 4, 2)) + doc_probs = np.random.randint( + 2, size=(3, num_heads, 4, num_docs)).astype(float) + outputs = attention_layer([from_data, to_data, doc_probs], mask_data) + self.assertEqual(outputs.shape, (3, 4, 8)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/on_device_embedding.py b/models/official/nlp/modeling/layers/on_device_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..739cdb7e4dde157ef52d7a98769a4c40819634a7 --- /dev/null +++ b/models/official/nlp/modeling/layers/on_device_embedding.py @@ -0,0 +1,88 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based one-hot embedding layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package="Text") +class OnDeviceEmbedding(tf.keras.layers.Layer): + """Performs an embedding lookup suitable for accelerator devices. + + This layer uses either tf.gather or tf.one_hot to translate integer indices to + float embeddings. + + Arguments: + vocab_size: Number of elements in the vocabulary. + embedding_width: Output size of the embedding layer. + initializer: The initializer to use for the embedding weights. Defaults to + "glorot_uniform". + use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding + lookup. Defaults to False (that is, using tf.gather). Setting this option + to True may improve performance, especially on small vocabulary sizes, but + will generally require more memory. + """ + + def __init__(self, + vocab_size, + embedding_width, + initializer="glorot_uniform", + use_one_hot=False, + **kwargs): + + super(OnDeviceEmbedding, self).__init__(**kwargs) + self._vocab_size = vocab_size + self._embedding_width = embedding_width + self._initializer = initializer + self._use_one_hot = use_one_hot + + def get_config(self): + config = { + "vocab_size": self._vocab_size, + "embedding_width": self._embedding_width, + "initializer": self._initializer, + "use_one_hot": self._use_one_hot, + } + base_config = super(OnDeviceEmbedding, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + self.embeddings = self.add_weight( + "embeddings", + shape=[self._vocab_size, self._embedding_width], + initializer=self._initializer, + dtype=tf.float32) + + super(OnDeviceEmbedding, self).build(input_shape) + + def call(self, inputs): + flat_inputs = tf.reshape(inputs, [-1]) + if self._use_one_hot: + one_hot_data = tf.one_hot( + flat_inputs, depth=self._vocab_size, dtype=self.embeddings.dtype) + embeddings = tf.matmul(one_hot_data, self.embeddings) + else: + embeddings = tf.gather(self.embeddings, flat_inputs) + embeddings = tf.reshape( + embeddings, + # Work around b/142213824: prefer concat to shape over a Python list. + tf.concat([tf.shape(inputs), [self._embedding_width]], axis=0)) + embeddings.set_shape(inputs.shape.as_list() + [self._embedding_width]) + return embeddings diff --git a/models/official/nlp/modeling/layers/on_device_embedding_test.py b/models/official/nlp/modeling/layers/on_device_embedding_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b9b98f181470ea233d8297550a2dd92786baae --- /dev/null +++ b/models/official/nlp/modeling/layers/on_device_embedding_test.py @@ -0,0 +1,198 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based one-hot embedding layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import on_device_embedding + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class OnDeviceEmbeddingTest(keras_parameterized.TestCase): + + def test_layer_creation(self): + vocab_size = 31 + embedding_width = 27 + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # The output should be the same as the input, save that it has an extra + # embedding_width dimension on the end. + expected_output_shape = [None, sequence_length, embedding_width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + self.assertEqual(output_tensor.dtype, tf.float32) + + def test_layer_creation_with_mixed_precision(self): + vocab_size = 31 + embedding_width = 27 + policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width, dtype=policy) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # The output should be the same as the input, save that it has an extra + # embedding_width dimension on the end. + expected_output_shape = [None, sequence_length, embedding_width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + self.assertEqual(output_tensor.dtype, tf.float16) + + def test_layer_invocation(self): + vocab_size = 31 + embedding_width = 27 + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(input_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 3 + input_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + output = model.predict(input_data) + self.assertEqual(tf.float32, output.dtype) + + def test_layer_invocation_with_mixed_precision(self): + vocab_size = 31 + embedding_width = 27 + policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width, + dtype=policy) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(input_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 3 + input_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + output = model.predict(input_data) + self.assertEqual(tf.float16, output.dtype) + + def test_one_hot_layer_creation(self): + vocab_size = 31 + embedding_width = 27 + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=embedding_width, + use_one_hot=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # The output should be the same as the input, save that it has an extra + # embedding_width dimension on the end. + expected_output_shape = [None, sequence_length, embedding_width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + self.assertEqual(output_tensor.dtype, tf.float32) + + def test_one_hot_layer_creation_with_mixed_precision(self): + vocab_size = 31 + embedding_width = 27 + policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=embedding_width, + dtype=policy, + use_one_hot=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # The output should be the same as the input, save that it has an extra + # embedding_width dimension on the end. + expected_output_shape = [None, sequence_length, embedding_width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + self.assertEqual(output_tensor.dtype, tf.float16) + + def test_one_hot_layer_invocation(self): + vocab_size = 31 + embedding_width = 27 + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=embedding_width, + use_one_hot=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(input_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 3 + input_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + output = model.predict(input_data) + self.assertEqual(tf.float32, output.dtype) + + def test_one_hot_layer_invocation_with_mixed_precision(self): + vocab_size = 31 + embedding_width = 27 + policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=embedding_width, + dtype=policy, + use_one_hot=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(input_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 3 + input_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + output = model.predict(input_data) + self.assertEqual(tf.float16, output.dtype) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/position_embedding.py b/models/official/nlp/modeling/layers/position_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..169e54de112d9a3ce65e9fa68f066a107d35c7a4 --- /dev/null +++ b/models/official/nlp/modeling/layers/position_embedding.py @@ -0,0 +1,205 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based positional embedding layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import math + +import tensorflow as tf + +from official.modeling import tf_utils + + +@tf.keras.utils.register_keras_serializable(package="Text") +class PositionEmbedding(tf.keras.layers.Layer): + """Creates a positional embedding. + + This layer creates a positional embedding as described in "BERT: Pre-training + of Deep Bidirectional Transformers for Language Understanding" + (https://arxiv.org/abs/1810.04805). + + This layer can be set up to either create a statically shaped slice or a + dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor + can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the + input size must be fixed. + + Arguments: + use_dynamic_slicing: Whether to use the dynamic slicing path. + max_sequence_length: The maximum size of the dynamic sequence. Only + applicable if `use_dynamic_slicing` is True. + initializer: The initializer to use for the embedding weights. Defaults to + "glorot_uniform". + """ + + def __init__(self, + initializer="glorot_uniform", + use_dynamic_slicing=False, + max_sequence_length=None, + **kwargs): + # We need to have a default dtype of float32, since the inputs (which Keras + # usually uses to infer the dtype) will always be int32. + if "dtype" not in kwargs: + kwargs["dtype"] = "float32" + + super(PositionEmbedding, self).__init__(**kwargs) + if use_dynamic_slicing and max_sequence_length is None: + raise ValueError( + "If `use_dynamic_slicing` is True, `max_sequence_length` must be set." + ) + self._max_sequence_length = max_sequence_length + self._initializer = tf.keras.initializers.get(initializer) + self._use_dynamic_slicing = use_dynamic_slicing + + def get_config(self): + config = { + "max_sequence_length": self._max_sequence_length, + "initializer": tf.keras.initializers.serialize(self._initializer), + "use_dynamic_slicing": self._use_dynamic_slicing, + } + base_config = super(PositionEmbedding, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + """Implements build() for the layer.""" + dimension_list = input_shape.as_list() + + if len(dimension_list) != 3: + raise ValueError("PositionEmbedding expects a 3-dimensional input tensor " + "of shape [batch, sequence, width]") + seq_length = dimension_list[1] + width = dimension_list[2] + + # If we are not using dynamic slicing, we must assume that the sequence + # length is fixed and max_sequence_length should not be specified. + if not self._use_dynamic_slicing: + if seq_length is None: + raise ValueError( + "PositionEmbedding must have `use_dynamic_slicing` set " + "to True (and max_sequence_length set) when the " + "sequence (1st) dimension of the input is None.") + if self._max_sequence_length is not None: + raise ValueError( + "When `use_dynamic_slicing` is False, max_sequence_length should " + "not be specified and we ought to use seq_length to get the " + "variable shape.") + + if self._max_sequence_length is not None: + weight_sequence_length = self._max_sequence_length + else: + weight_sequence_length = seq_length + + self._position_embeddings = self.add_weight( + "embeddings", + shape=[weight_sequence_length, width], + initializer=self._initializer) + + super(PositionEmbedding, self).build(input_shape) + + def call(self, inputs): + """Implements call() for the layer.""" + input_shape = tf_utils.get_shape_list(inputs, expected_rank=3) + if self._use_dynamic_slicing: + position_embeddings = self._position_embeddings[:input_shape[1], :] + else: + position_embeddings = self._position_embeddings + + return tf.broadcast_to(position_embeddings, input_shape) + + +@tf.keras.utils.register_keras_serializable(package="Text") +class RelativePositionEmbedding(tf.keras.layers.Layer): + """Creates a positional embedding. + + This layer calculates the position encoding as a mix of sine and cosine + functions with geometrically increasing wavelengths. Defined and formulized in + "Attention is All You Need", section 3.5. + (https://arxiv.org/abs/1706.03762). + + Arguments: + hidden_size: Size of the hidden layer. + min_timescale: Minimum scale that will be applied at each position + max_timescale: Maximum scale that will be applied at each position. + """ + + def __init__(self, + hidden_size, + min_timescale=1.0, + max_timescale=1.0e4, + **kwargs): + # We need to have a default dtype of float32, since the inputs (which Keras + # usually uses to infer the dtype) will always be int32. + # We compute the positional encoding in float32 even if the model uses + # float16, as many of the ops used, like log and exp, are numerically + # unstable in float16. + if "dtype" not in kwargs: + kwargs["dtype"] = "float32" + + super(RelativePositionEmbedding, self).__init__(**kwargs) + self._hidden_size = hidden_size + self._min_timescale = min_timescale + self._max_timescale = max_timescale + + def get_config(self): + config = { + "hidden_size": self._hidden_size, + "min_timescale": self._min_timescale, + "max_timescale": self._max_timescale, + "length": self._length, + } + base_config = super(RelativePositionEmbedding, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs, length=None): + """Implements call() for the layer. + + Args: + inputs: An tensor whose second dimension will be used as `length`. If + `None`, the other `length` argument must be specified. + length: An optional integer specifying the number of positions. If both + `inputs` and `length` are spcified, `length` must be equal to the + second dimension of `inputs`. + + Returns: + A tensor in shape of [length, hidden_size]. + """ + if inputs is None and length is None: + raise ValueError( + "If inputs is None, `length` must be set in " + "RelativePositionEmbedding().") + if inputs is not None: + input_shape = tf_utils.get_shape_list(inputs) + if length is not None and length != input_shape[1]: + raise ValueError( + "If inputs is not None, `length` must equal to input_shape[1]." + ) + length = input_shape[1] + position = tf.cast(tf.range(length), tf.float32) + num_timescales = self._hidden_size // 2 + min_timescale, max_timescale = self._min_timescale, self._max_timescale + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (tf.cast(num_timescales, tf.float32) - 1)) + inv_timescales = min_timescale * tf.exp( + tf.cast(tf.range(num_timescales), tf.float32) * + -log_timescale_increment) + scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, + 0) + position_embeddings = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], + axis=1) + return position_embeddings diff --git a/models/official/nlp/modeling/layers/position_embedding_test.py b/models/official/nlp/modeling/layers/position_embedding_test.py new file mode 100644 index 0000000000000000000000000000000000000000..89a29af7a4e8dc7c0369131f635d1c6abe74fdbc --- /dev/null +++ b/models/official/nlp/modeling/layers/position_embedding_test.py @@ -0,0 +1,131 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based positional embedding layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import position_embedding + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class PositionEmbeddingLayerTest(keras_parameterized.TestCase): + + def test_static_layer_output_shape(self): + test_layer = position_embedding.PositionEmbedding() + # Create a 3-dimensional input (the first dimension is implicit). + sequence_length = 21 + width = 30 + input_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(input_tensor) + + # When using static positional embedding shapes, the output is expected + # to be the same as the input shape in all dimensions save batch. + expected_output_shape = [None, sequence_length, width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + # The default output dtype for this layer should be tf.float32. + self.assertEqual(tf.float32, output_tensor.dtype) + + def test_float16_dtype(self): + test_layer = position_embedding.PositionEmbedding(dtype="float16") + # Create a 3-dimensional input (the first dimension is implicit). + sequence_length = 21 + width = 30 + input_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(input_tensor) + + # When using static positional embedding shapes, the output is expected + # to be the same as the input shape in all dimensions save batch. + expected_output_shape = [None, sequence_length, width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + # The default output dtype for this layer should be tf.float32. + self.assertEqual(tf.float16, output_tensor.dtype) + + def test_dynamic_layer_output_shape(self): + max_sequence_length = 40 + test_layer = position_embedding.PositionEmbedding( + use_dynamic_slicing=True, max_sequence_length=max_sequence_length) + # Create a 3-dimensional input (the first dimension is implicit). + width = 30 + input_tensor = tf.keras.Input(shape=(None, width)) + output_tensor = test_layer(input_tensor) + + # When using dynamic positional embedding shapes, the output is expected + # to be the same as the input shape in all dimensions - but may be None if + # the input shape is None there. + expected_output_shape = [None, None, width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + + def test_dynamic_layer_slicing(self): + max_sequence_length = 40 + test_layer = position_embedding.PositionEmbedding( + use_dynamic_slicing=True, max_sequence_length=max_sequence_length) + # Create a 3-dimensional input (the first dimension is implicit). + width = 30 + input_tensor = tf.keras.Input(shape=(None, width)) + output_tensor = test_layer(input_tensor) + + model = tf.keras.Model(input_tensor, output_tensor) + + # Create input data that is shorter than max_sequence_length, which should + # trigger a down-slice. + input_length = 17 + # Note: This test explicitly uses a batch size of 1. This is to get around + # Keras' restriction on Model invocations: inputs are expected to have the + # same batch cardinality as outputs. In practice, this layer should be used + # inside a model, where it can be projected when added to another tensor. + input_data = np.ones((1, input_length, width)) + output_data = model.predict(input_data) + + self.assertAllEqual([1, input_length, width], output_data.shape) + + def test_relative_tensor_input(self): + hidden_size = 8 + test_layer = position_embedding.RelativePositionEmbedding( + hidden_size=hidden_size) + + # create a 3-dimensional input for test_layer to infer length as 1. + input_tensor = tf.constant([[[0] * hidden_size]]) + output_tensor = test_layer(input_tensor) + + # expected output is the theoretical result of the input based on + # sine cosine relative position embedding formula. + expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]]) + self.assertAllEqual(output_tensor, expected_output_tensor) + + def test_relative_length_input(self): + hidden_size = 8 + + # When we do not have tensor as input, we explicitly specify length + # value when initializing test_layer. + test_layer = position_embedding.RelativePositionEmbedding( + hidden_size=hidden_size) + input_tensor = None + output_tensor = test_layer(input_tensor, length=1) + + # expected output is the theoretical result of the input based on + # sine cosine relative position embedding formula. + expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]]) + self.assertAllEqual(output_tensor, expected_output_tensor) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/rezero_transformer.py b/models/official/nlp/modeling/layers/rezero_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..42bc1af0031db97a12a730b8e1abe98f3c9318e0 --- /dev/null +++ b/models/official/nlp/modeling/layers/rezero_transformer.py @@ -0,0 +1,247 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based rezero-transformer block layer (Transformer with ReZero).""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import gin +import tensorflow as tf + +from official.nlp.modeling.layers import attention +from official.nlp.modeling.layers import dense_einsum + + +@tf.keras.utils.register_keras_serializable(package="Text") +@gin.configurable +class ReZeroTransformer(tf.keras.layers.Layer): + """Transformer layer with ReZero. + + This layer implements the Transformer from "Attention Is All You Need". + (https://arxiv.org/abs/1706.03762). + The residual connection implements the ReZero method. + (https://arxiv.org/abs/2003.04887) + + Arguments: + num_attention_heads: Number of attention heads. + intermediate_size: Size of the intermediate layer. + intermediate_activation: Activation for the intermediate layer. + dropout_rate: Dropout probability for the post-attention and output dropout. + attention_dropout_rate: Dropout probability for within the attention layer. + output_range: the sequence output range, [0, output_range) by slicing the + target sequence. `None` means the target sequence is not sliced. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + use_layer_norm: If add layer_norm on top of the ReZero. + """ + + def __init__(self, + num_attention_heads, + intermediate_size, + intermediate_activation, + dropout_rate=0.0, + attention_dropout_rate=0.0, + output_range=None, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + use_layer_norm=False, + **kwargs): + super(ReZeroTransformer, self).__init__(**kwargs) + + self._num_heads = num_attention_heads + self._intermediate_size = intermediate_size + self._intermediate_activation = intermediate_activation + self._attention_dropout_rate = attention_dropout_rate + self._dropout_rate = dropout_rate + self._output_range = output_range + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + self._use_layer_norm = use_layer_norm + + def build(self, input_shape): + input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape + input_tensor_shape = tf.TensorShape(input_tensor) + if len(input_tensor_shape) != 3: + raise ValueError("TransformerLayer expects a three-dimensional input of " + "shape [batch, sequence, width].") + batch_size, sequence_length, hidden_size = input_tensor_shape + + if len(input_shape) == 2: + mask_tensor_shape = tf.TensorShape(input_shape[1]) + expected_mask_tensor_shape = tf.TensorShape( + [batch_size, sequence_length, sequence_length]) + if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): + raise ValueError("When passing a mask tensor to TransformerLayer, the " + "mask tensor must be of shape [batch, " + "sequence_length, sequence_length] (here %s). Got a " + "mask tensor of shape %s." % + (expected_mask_tensor_shape, mask_tensor_shape)) + if hidden_size % self._num_heads != 0: + raise ValueError( + "The input size (%d) is not a multiple of the number of attention " + "heads (%d)" % (hidden_size, self._num_heads)) + self._attention_head_size = int(hidden_size // self._num_heads) + + self._attention_layer = attention.MultiHeadAttention( + num_heads=self._num_heads, + key_size=self._attention_head_size, + dropout=self._attention_dropout_rate, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="self_attention") + self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) + if self._use_layer_norm: + # Use float32 in layernorm for numeric stability. + # It is probably safe in mixed_float16, but we haven't validated this yet. + self._attention_layer_norm = ( + tf.keras.layers.LayerNormalization( + name="self_attention_layer_norm", + axis=-1, + epsilon=1e-12, + dtype=tf.float32)) + self._intermediate_dense = dense_einsum.DenseEinsum( + output_shape=self._intermediate_size, + activation=None, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="intermediate") + policy = tf.keras.mixed_precision.experimental.global_policy() + if policy.name == "mixed_bfloat16": + # bfloat16 causes BERT with the LAMB optimizer to not converge + # as well, so we use float32. + # TODO(b/154538392): Investigate this. + policy = tf.float32 + self._intermediate_activation_layer = tf.keras.layers.Activation( + self._intermediate_activation, dtype=policy) + self._output_dense = dense_einsum.DenseEinsum( + output_shape=hidden_size, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="output") + self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) + if self._use_layer_norm: + # Use float32 in layernorm for numeric stability. + self._output_layer_norm = tf.keras.layers.LayerNormalization( + name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) + + self._rezero_a = self.add_weight( + name="rezero_alpha", + initializer=tf.keras.initializers.Zeros(), + trainable=True, dtype=tf.float32) + + super(ReZeroTransformer, self).build(input_shape) + + def get_config(self): + config = { + "num_attention_heads": + self._num_heads, + "intermediate_size": + self._intermediate_size, + "intermediate_activation": + self._intermediate_activation, + "dropout_rate": + self._dropout_rate, + "attention_dropout_rate": + self._attention_dropout_rate, + "output_range": + self._output_range, + "use_layer_norm": + self._use_layer_norm, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint), + } + base_config = super(ReZeroTransformer, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def reset_rezero(self): + self._rezero_a.assign(0.) + + def call(self, inputs): + if isinstance(inputs, (list, tuple)) and len(inputs) == 2: + input_tensor, attention_mask = inputs + else: + input_tensor, attention_mask = (inputs, None) + + if self._output_range: + target_tensor = input_tensor[:, 0:self._output_range, :] + attention_mask = attention_mask[:, 0:self._output_range, :] + else: + target_tensor = input_tensor + attention_inputs = [target_tensor, input_tensor] + + attention_output = self._attention_layer(attention_inputs, attention_mask) + attention_output = self._attention_dropout(attention_output) + attention_output = target_tensor + self._rezero_a * attention_output + if self._use_layer_norm: + attention_output = self._attention_layer_norm(attention_output) + else: + attention_output = tf.cast(attention_output, tf.float32) + + intermediate_output = self._intermediate_dense(attention_output) + intermediate_output = self._intermediate_activation_layer( + intermediate_output) + layer_output = self._output_dense(intermediate_output) + layer_output = self._output_dropout(layer_output) + # During mixed precision training, attention_output is from layer norm and + # is always fp32 for now. Cast layer_output to fp32 for the subsequent add. + layer_output = attention_output + tf.cast(self._rezero_a * layer_output, + tf.float32) + if self._use_layer_norm: + layer_output = self._output_layer_norm(layer_output) + + return layer_output diff --git a/models/official/nlp/modeling/layers/rezero_transformer_test.py b/models/official/nlp/modeling/layers/rezero_transformer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6ef0aa218c70c919f62492b00ef5f53348dd5938 --- /dev/null +++ b/models/official/nlp/modeling/layers/rezero_transformer_test.py @@ -0,0 +1,133 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based rezero-transformer block layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import rezero_transformer + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class TransformerWithReZeroLayerTest(keras_parameterized.TestCase): + + def tearDown(self): + super(TransformerWithReZeroLayerTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy('float32') + + def test_layer_invocation_with_float16_dtype(self): + tf.keras.mixed_precision.experimental.set_policy('mixed_float16') + test_layer = rezero_transformer.ReZeroTransformer( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = (10 * np.random.random_sample( + (batch_size, sequence_length, width))) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + _ = model.predict([input_data, mask_data]) + + def test_rezero_without_layer_norm(self): + test_layer = rezero_transformer.ReZeroTransformer( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + use_layer_norm=False) + + input_length, width = 16, 30 + input_tensor = tf.keras.Input(shape=(input_length, width)) + output_tensor = test_layer(input_tensor) + model = tf.keras.Model(input_tensor, output_tensor) + + input_data = np.random.rand(2, input_length, width) + test_layer._rezero_a.assign(1.0) + test_layer.reset_rezero() + output_data = model.predict(input_data) + + self.assertAllClose(input_data, output_data) + + def test_rezero_with_layer_norm(self): + test_layer = rezero_transformer.ReZeroTransformer( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + use_layer_norm=True) + + input_length, width = 16, 30 + input_tensor = tf.keras.Input(shape=(input_length, width)) + output_tensor = test_layer(input_tensor) + model = tf.keras.Model(input_tensor, output_tensor) + + input_data = np.random.rand(2, input_length, width) + 2.0 + output_data = model.predict(input_data) + input_data_normed = ( + input_data - np.mean(input_data, axis=-1, keepdims=True)) / ( + np.std(input_data, axis=-1, keepdims=True)) + + self.assertAllClose(input_data_normed, output_data) + + def test_layer_output_range(self): + test_layer = rezero_transformer.ReZeroTransformer( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + output_tensor = test_layer([input_data, mask_data]) + + # The layer only attends to the first token and outputs the first token + # embeeding. + new_layer = rezero_transformer.ReZeroTransformer( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + output_range=1) + _ = new_layer([input_data, mask_data]) + new_layer.set_weights(test_layer.get_weights()) + new_output_tensor = new_layer([input_data, mask_data]) + self.assertAllClose(new_output_tensor, output_tensor[:, 0:1, :]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/layers/self_attention_mask.py b/models/official/nlp/modeling/layers/self_attention_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..933b4960dc0a86d4a0a767f8017853c2f2290d16 --- /dev/null +++ b/models/official/nlp/modeling/layers/self_attention_mask.py @@ -0,0 +1,63 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras layer that creates a self-attention mask.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf +from official.modeling import tf_utils + + +@tf.keras.utils.register_keras_serializable(package='Text') +class SelfAttentionMask(tf.keras.layers.Layer): + """Create 3D attention mask from a 2D tensor mask. + + inputs[0]: from_tensor: 2D or 3D Tensor of shape + [batch_size, from_seq_length, ...]. + inputs[1]: to_mask: int32 Tensor of shape [batch_size, to_seq_length]. + + Returns: + float Tensor of shape [batch_size, from_seq_length, to_seq_length]. + """ + + def call(self, inputs): + from_tensor = inputs[0] + to_mask = inputs[1] + from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3]) + batch_size = from_shape[0] + from_seq_length = from_shape[1] + + to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2) + to_seq_length = to_shape[1] + + to_mask = tf.cast( + tf.reshape(to_mask, [batch_size, 1, to_seq_length]), + dtype=from_tensor.dtype) + + # We don't assume that `from_tensor` is a mask (although it could be). We + # don't actually care if we attend *from* padding tokens (only *to* padding) + # tokens so we create a tensor of all ones. + # + # `broadcast_ones` = [batch_size, from_seq_length, 1] + broadcast_ones = tf.ones( + shape=[batch_size, from_seq_length, 1], dtype=from_tensor.dtype) + + # Here we broadcast along two dimensions to create the mask. + mask = broadcast_ones * to_mask + + return mask diff --git a/models/official/nlp/modeling/layers/talking_heads_attention.py b/models/official/nlp/modeling/layers/talking_heads_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..c65ba1e66165617aaf5652c2f77015e9a3eb7ccb --- /dev/null +++ b/models/official/nlp/modeling/layers/talking_heads_attention.py @@ -0,0 +1,153 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Talking Head Attention layer.""" +# pylint: disable=g-classes-have-attributes +import math +import string + +import gin +import tensorflow as tf + +from official.nlp.modeling.layers import attention + +_CHR_IDX = string.ascii_lowercase + + +@tf.keras.utils.register_keras_serializable(package="Text") +@gin.configurable +class TalkingHeadsAttention(attention.MultiHeadAttention): + """Implements Talking-Heads Attention. + + This is an implementation of Talking-Heads Attention based on the paper + Talking-Heads Attention (https://arxiv.org/abs/2003.02436): it enhanced + multi-head attention by including linearprojections across the attention-heads + dimension, immediately before and after the softmax operation. + + See the base class `MultiHeadAttention` for more details. + + Arguments: + num_heads: Number of attention heads. + key_size: Size of each attention head for query and key. + value_size: Size of each attention head for value. + dropout: Dropout probability. + use_bias: Boolean, whether the dense layers use bias vectors/matrices. + output_shape: The expected shape of an output tensor, besides the batch and + sequence dims. If not specified, projects back to the key feature dim. + attention_axes: axes over which the attention is applied. `None` means + attention over all axes, but batch, heads, and features. + return_attention_scores: bool, if `True`, returns the multi-head attention + scores as an additional output argument. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def _build_attention(self, qkv_rank): + """Builds multi-head dot-product attention computations. + + This function overrides base class to create additional linear projection + that will be applied on attention scores before and after softmax. + + Args: + qkv_rank: the rank of query, key, value tensors after projection. + """ + super(TalkingHeadsAttention, self)._build_attention(qkv_rank) + + # Build an equation: + # (, num_heads_a, ...),(num_heads_a, num_heads_b) -> + # (, num_heads_b, ...) + # qkv_ranks has `batch_dims`, `attention_dims`, `num_heads` and `channels`. + num_batch_dims = qkv_rank - len(self._attention_axes) - 2 + + # The shape of attn_scores is: + # (, num_heads, , ) + attn_scores_rank = num_batch_dims + 1 + len(self._attention_axes) * 2 + scores_notation = _CHR_IDX[:attn_scores_rank] + projection_notation = scores_notation[num_batch_dims] + ( + _CHR_IDX[attn_scores_rank]) + projected_scores_notation = scores_notation[:num_batch_dims] + ( + _CHR_IDX[attn_scores_rank] + scores_notation[num_batch_dims + 1:]) + self._talking_heads_equation = "%s,%s->%s" % ( + scores_notation, projection_notation, projected_scores_notation) + + self._pre_softmax_weight = self.add_weight( + "pre_softmax_weight", + shape=(self._num_heads, self._num_heads), + initializer=self._kernel_initializer, + regularizer=self._kernel_regularizer, + constraint=self._kernel_constraint, + dtype=self.dtype, + trainable=True) + self._post_softmax_weight = self.add_weight( + "post_softmax_weight", + shape=(self._num_heads, self._num_heads), + initializer=self._kernel_initializer, + regularizer=self._kernel_regularizer, + constraint=self._kernel_constraint, + dtype=self.dtype, + trainable=True) + + def _compute_attention(self, + query_tensor, + key_tensor, + value_tensor, + attention_mask=None): + """Applies Dot-product attention with query, key, value tensors. + + This function overrides base class to apply additional linear projection + on attention scores before and after softmax. + + Args: + query_tensor: Projected query `Tensor` of shape `[B, T, N, key_size]`. + key_tensor: Projected key `Tensor` of shape `[B, T, N, key_size]`. + value_tensor: Projected value `Tensor` of shape `[B, T, N, value_size]`. + attention_mask: a boolean mask of shape `[B, T, S]`, that prevents + attention to certain positions. + + Returns: + attention_output: Multi-headed outputs of attention computation. + attention_scores: Multi-headed attention weights. + """ + # Take the dot product between "query" and "key" to get the raw + # attention scores. + attention_scores = tf.einsum(self._dot_product_equation, key_tensor, + query_tensor) + attention_scores = tf.multiply(attention_scores, + 1.0 / math.sqrt(float(self._key_size))) + + # Apply linear projection before softmax + attention_scores = tf.einsum(self._talking_heads_equation, attention_scores, + self._pre_softmax_weight) + + # Normalize the attention scores to probabilities. + # `attention_scores` = [B, N, T, S] + attention_scores = self._masked_softmax(attention_scores, attention_mask) + + # Apply linear projection after softmax + attention_scores = tf.einsum(self._talking_heads_equation, attention_scores, + self._post_softmax_weight) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_scores_dropout = self._dropout_layer(attention_scores) + + # `context_layer` = [B, T, N, H] + attention_output = tf.einsum(self._combine_equation, + attention_scores_dropout, value_tensor) + return attention_output, attention_scores diff --git a/models/official/nlp/modeling/layers/talking_heads_attention_test.py b/models/official/nlp/modeling/layers/talking_heads_attention_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ed24eda26c6f532b5e5011f5bfc8109eeca68a03 --- /dev/null +++ b/models/official/nlp/modeling/layers/talking_heads_attention_test.py @@ -0,0 +1,163 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the attention layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import talking_heads_attention + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +# This test is revised base on attention.MultiHeadAttentionTest. +@keras_parameterized.run_all_keras_modes +class TalkingHeadsAttentionTest(keras_parameterized.TestCase): + + @parameterized.named_parameters( + ("key_value_same_proj", None, None, [40, 80]), + ("key_value_different_proj", 32, 60, [40, 60]), + ) + def test_non_masked_attention(self, value_size, output_shape, output_dims): + """Test that the attention layer can be created without a mask tensor.""" + test_layer = talking_heads_attention.TalkingHeadsAttention( + num_heads=12, + key_size=64, + value_size=value_size, + output_shape=output_shape) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + value = tf.keras.Input(shape=(20, 80)) + output = test_layer([query, value]) + self.assertEqual(output.shape.as_list(), [None] + output_dims) + + def test_non_masked_self_attention(self): + """Test with one input (self-attenntion) and no mask tensor.""" + test_layer = talking_heads_attention.TalkingHeadsAttention( + num_heads=12, key_size=64) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + + def test_attention_scores(self): + """Test attention outputs with coefficients.""" + test_layer = talking_heads_attention.TalkingHeadsAttention( + num_heads=12, key_size=64, return_attention_scores=True) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output, coef = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40]) + + @parameterized.named_parameters(("with_bias", True), ("no_bias", False)) + def test_masked_attention(self, use_bias): + """Test with a mask tensor.""" + test_layer = talking_heads_attention.TalkingHeadsAttention( + num_heads=12, key_size=2, use_bias=use_bias) + # Create a 3-dimensional input (the first dimension is implicit). + batch_size = 3 + query = tf.keras.Input(shape=(4, 8)) + value = tf.keras.Input(shape=(2, 8)) + mask_tensor = tf.keras.Input(shape=(4, 2)) + output = test_layer([query, value], mask_tensor) + + # Create a model containing the test layer. + model = tf.keras.Model([query, value, mask_tensor], output) + + # Generate data for the input (non-mask) tensors. + from_data = 10 * np.random.random_sample((batch_size, 4, 8)) + to_data = 10 * np.random.random_sample((batch_size, 2, 8)) + + # Invoke the data with a random set of mask data. This should mask at least + # one element. + mask_data = np.random.randint(2, size=(batch_size, 4, 2)) + masked_output_data = model.predict([from_data, to_data, mask_data]) + + # Invoke the same data, but with a null mask (where no elements are masked). + null_mask_data = np.ones((batch_size, 4, 2)) + unmasked_output_data = model.predict([from_data, to_data, null_mask_data]) + + # Because one data is masked and one is not, the outputs should not be the + # same. + self.assertNotAllClose(masked_output_data, unmasked_output_data) + + # Tests the layer with three inputs: Q, K, V. + key = tf.keras.Input(shape=(2, 8)) + output = test_layer([query, value, key], mask_tensor) + model = tf.keras.Model([query, value, key, mask_tensor], output) + + masked_output_data = model.predict([from_data, to_data, to_data, mask_data]) + unmasked_output_data = model.predict( + [from_data, to_data, to_data, null_mask_data]) + # Because one data is masked and one is not, the outputs should not be the + # same. + self.assertNotAllClose(masked_output_data, unmasked_output_data) + + if use_bias: + self.assertLen(test_layer._query_dense.trainable_variables, 2) + self.assertLen(test_layer._output_dense.trainable_variables, 2) + else: + self.assertLen(test_layer._query_dense.trainable_variables, 1) + self.assertLen(test_layer._output_dense.trainable_variables, 1) + + def test_initializer(self): + """Test with a specified initializer.""" + test_layer = talking_heads_attention.TalkingHeadsAttention( + num_heads=12, + key_size=64, + kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) + # Create a 3-dimensional input (the first dimension is implicit). + query = tf.keras.Input(shape=(40, 80)) + output = test_layer([query, query]) + self.assertEqual(output.shape.as_list(), [None, 40, 80]) + + @parameterized.named_parameters( + ("4d_inputs_one_free_batch", [3, 4], [3, 2], [4, 2], (2,)), + ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)), + ("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3))) + def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes): + """Test with a mask tensor.""" + test_layer = talking_heads_attention.TalkingHeadsAttention( + num_heads=12, key_size=2, attention_axes=attention_axes) + batch_size, hidden_size = 3, 8 + # Generate data for the input (non-mask) tensors. + query_shape = [batch_size] + q_dims + [hidden_size] + value_shape = [batch_size] + v_dims + [hidden_size] + mask_shape = [batch_size] + mask_dims + query = 10 * np.random.random_sample(query_shape) + value = 10 * np.random.random_sample(value_shape) + + # Invoke the data with a random set of mask data. This should mask at least + # one element. + mask_data = np.random.randint(2, size=mask_shape).astype("bool") + output = test_layer([query, value], mask_data) + + # Invoke the same data, but with a null mask (where no elements are masked). + null_mask_data = np.ones(mask_shape) + unmasked_output = test_layer([query, value], null_mask_data) + # Because one data is masked and one is not, the outputs should not be the + # same. + self.assertNotAllClose(output, unmasked_output) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/layers/transformer.py b/models/official/nlp/modeling/layers/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..92f509cf26b802dcd769b97e4c11987e713d8d16 --- /dev/null +++ b/models/official/nlp/modeling/layers/transformer.py @@ -0,0 +1,437 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based transformer block layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import gin +import tensorflow as tf + +from official.nlp.modeling.layers import attention +from official.nlp.modeling.layers import dense_einsum +from official.nlp.modeling.layers import multi_channel_attention +from official.nlp.modeling.layers.util import tf_function_if_eager + + +@tf.keras.utils.register_keras_serializable(package="Text") +class Transformer(tf.keras.layers.Layer): + """Transformer layer. + + This layer implements the Transformer from "Attention Is All You Need". + (https://arxiv.org/abs/1706.03762). + + Arguments: + num_attention_heads: Number of attention heads. + intermediate_size: Size of the intermediate layer. + intermediate_activation: Activation for the intermediate layer. + dropout_rate: Dropout probability for the post-attention and output dropout. + attention_dropout_rate: Dropout probability for within the attention layer. + output_range: the sequence output range, [0, output_range) by slicing the + target sequence. `None` means the target sequence is not sliced. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def __init__(self, + num_attention_heads, + intermediate_size, + intermediate_activation, + dropout_rate=0.0, + attention_dropout_rate=0.0, + output_range=None, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(Transformer, self).__init__(**kwargs) + + self._num_heads = num_attention_heads + self._intermediate_size = intermediate_size + self._intermediate_activation = intermediate_activation + self._attention_dropout_rate = attention_dropout_rate + self._dropout_rate = dropout_rate + self._output_range = output_range + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + + def build(self, input_shape): + input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape + input_tensor_shape = tf.TensorShape(input_tensor) + if len(input_tensor_shape) != 3: + raise ValueError("TransformerLayer expects a three-dimensional input of " + "shape [batch, sequence, width].") + batch_size, sequence_length, hidden_size = input_tensor_shape + + if len(input_shape) == 2: + mask_tensor_shape = tf.TensorShape(input_shape[1]) + expected_mask_tensor_shape = tf.TensorShape( + [batch_size, sequence_length, sequence_length]) + if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): + raise ValueError("When passing a mask tensor to TransformerLayer, the " + "mask tensor must be of shape [batch, " + "sequence_length, sequence_length] (here %s). Got a " + "mask tensor of shape %s." % + (expected_mask_tensor_shape, mask_tensor_shape)) + if hidden_size % self._num_heads != 0: + raise ValueError( + "The input size (%d) is not a multiple of the number of attention " + "heads (%d)" % (hidden_size, self._num_heads)) + self._attention_head_size = int(hidden_size // self._num_heads) + + self._attention_layer = attention.MultiHeadAttention( + num_heads=self._num_heads, + key_size=self._attention_head_size, + dropout=self._attention_dropout_rate, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="self_attention") + # pylint: disable=protected-access + self._attention_layer.build([input_tensor_shape] * 3) + self._attention_output_dense = self._attention_layer._output_dense + # pylint: enable=protected-access + self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) + # Use float32 in layernorm for numeric stability. + # It is probably safe in mixed_float16, but we haven't validated this yet. + self._attention_layer_norm = ( + tf.keras.layers.LayerNormalization( + name="self_attention_layer_norm", + axis=-1, + epsilon=1e-12, + dtype=tf.float32)) + self._intermediate_dense = dense_einsum.DenseEinsum( + output_shape=self._intermediate_size, + activation=None, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="intermediate") + policy = tf.keras.mixed_precision.experimental.global_policy() + if policy.name == "mixed_bfloat16": + # bfloat16 causes BERT with the LAMB optimizer to not converge + # as well, so we use float32. + # TODO(b/154538392): Investigate this. + policy = tf.float32 + self._intermediate_activation_layer = tf.keras.layers.Activation( + self._intermediate_activation, dtype=policy) + self._output_dense = dense_einsum.DenseEinsum( + output_shape=hidden_size, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="output") + self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) + # Use float32 in layernorm for numeric stability. + self._output_layer_norm = tf.keras.layers.LayerNormalization( + name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) + + super(Transformer, self).build(input_shape) + + def get_config(self): + config = { + "num_attention_heads": + self._num_heads, + "intermediate_size": + self._intermediate_size, + "intermediate_activation": + self._intermediate_activation, + "dropout_rate": + self._dropout_rate, + "attention_dropout_rate": + self._attention_dropout_rate, + "output_range": + self._output_range, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint) + } + base_config = super(Transformer, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs): + if isinstance(inputs, (list, tuple)) and len(inputs) == 2: + input_tensor, attention_mask = inputs + else: + input_tensor, attention_mask = (inputs, None) + + if self._output_range: + target_tensor = input_tensor[:, 0:self._output_range, :] + attention_mask = attention_mask[:, 0:self._output_range, :] + else: + target_tensor = input_tensor + attention_inputs = [target_tensor, input_tensor] + + attention_output = self._attention_layer(attention_inputs, attention_mask) + attention_output = self._attention_dropout(attention_output) + attention_output = self._attention_layer_norm(target_tensor + + attention_output) + intermediate_output = self._intermediate_dense(attention_output) + intermediate_output = self._intermediate_activation_layer( + intermediate_output) + layer_output = self._output_dense(intermediate_output) + layer_output = self._output_dropout(layer_output) + # During mixed precision training, attention_output is from layer norm and + # is always fp32 for now. Cast layer_output to fp32 for the subsequent + # add. + layer_output = tf.cast(layer_output, tf.float32) + layer_output = self._output_layer_norm(layer_output + attention_output) + + return layer_output + + +@tf.keras.utils.register_keras_serializable(package="Text") +@gin.configurable +class CompiledTransformer(Transformer): + + @tf_function_if_eager(experimental_compile=True) + def call(self, inputs): + return super(CompiledTransformer, self).call(inputs) + + +@tf.keras.utils.register_keras_serializable(package="Text") +class TransformerDecoderLayer(tf.keras.layers.Layer): + """Single transformer layer for decoder. + + It has three sub-layers: + (1) a multi-head self-attention mechanism. + (2) a encoder-decoder attention. + (3) a positionwise fully connected feed-forward network. + + Arguments: + num_attention_heads: Number of attention heads. + intermediate_size: Size of the intermediate layer. + intermediate_activation: Activation for the intermediate layer. + dropout_rate: Dropout probability for the post-attention and output dropout. + attention_dropout_rate: Dropout probability for within the attention layer. + multi_channel_cross_attention: Whether to use `MultiChannelAttention` for + cross-attention between target sequences and source sequences. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def __init__(self, + num_attention_heads, + intermediate_size, + intermediate_activation, + dropout_rate=0.0, + attention_dropout_rate=0.0, + multi_channel_cross_attention=False, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(TransformerDecoderLayer, self).__init__(**kwargs) + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.intermediate_activation = tf.keras.activations.get( + intermediate_activation) + self.dropout_rate = dropout_rate + self.attention_dropout_rate = attention_dropout_rate + self.multi_channel_cross_attention = multi_channel_cross_attention + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + if self.multi_channel_cross_attention: + self._cross_attention_cls = multi_channel_attention.MultiChannelAttention + else: + self._cross_attention_cls = attention.MultiHeadAttention + + def build(self, input_shape): + target_tensor_shape = tf.TensorShape(input_shape[0]) + if len(target_tensor_shape) != 3: + raise ValueError("TransformerLayer expects a three-dimensional input of " + "shape [batch, sequence, width].") + hidden_size = target_tensor_shape[2] + if hidden_size % self.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (hidden_size, self.num_attention_heads)) + self.attention_head_size = int(hidden_size / self.num_attention_heads) + # Self attention. + self.self_attention = attention.CachedAttention( + num_heads=self.num_attention_heads, + key_size=self.attention_head_size, + dropout=self.attention_dropout_rate, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="self_attention") + self.self_attention_output_dense = dense_einsum.DenseEinsum( + output_shape=hidden_size, + num_summed_dimensions=2, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="self_attention_output") + self.self_attention_dropout = tf.keras.layers.Dropout( + rate=self.dropout_rate) + self.self_attention_layer_norm = ( + tf.keras.layers.LayerNormalization( + name="self_attention_layer_norm", axis=-1, epsilon=1e-12)) + # Encoder-decoder attention. + self.encdec_attention = self._cross_attention_cls( + num_heads=self.num_attention_heads, + key_size=self.attention_head_size, + dropout=self.attention_dropout_rate, + output_shape=hidden_size, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="attention/encdec") + + self.encdec_attention_dropout = tf.keras.layers.Dropout( + rate=self.dropout_rate) + self.encdec_attention_layer_norm = ( + tf.keras.layers.LayerNormalization( + name="attention/encdec_output_layer_norm", axis=-1, epsilon=1e-12)) + + # Feed-forward projection. + self.intermediate_dense = dense_einsum.DenseEinsum( + output_shape=self.intermediate_size, + activation=None, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="intermediate") + self.intermediate_activation_layer = tf.keras.layers.Activation( + self.intermediate_activation) + self.output_dense = dense_einsum.DenseEinsum( + output_shape=hidden_size, + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint, + name="output") + self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate) + self.output_layer_norm = tf.keras.layers.LayerNormalization( + name="output_layer_norm", axis=-1, epsilon=1e-12) + super(TransformerDecoderLayer, self).build(input_shape) + + def common_layers_with_encoder(self): + """Gets layer objects that can make a Transformer encoder block.""" + return [ + self.self_attention, self.self_attention_layer_norm, + self.intermediate_dense, self.output_dense, self.output_layer_norm + ] + + def call(self, inputs, cache=None, decode_loop_step=None): + if self.multi_channel_cross_attention: + if len(inputs) != 5: + raise ValueError( + "TransformerDecoderLayer must have 5 inputs, when it uses " + "multi_channel_cross_attention. But it got: %d" % len(inputs)) + elif len(inputs) != 4: + raise ValueError( + "TransformerDecoderLayer must have 4 inputs, but it got: %d" % + len(inputs)) + input_tensor, memory, attention_mask, self_attention_mask = inputs[:4] + self_attention_inputs = [input_tensor, input_tensor] + self_attention_output, cache = self.self_attention( + self_attention_inputs, + attention_mask=self_attention_mask, + cache=cache, + decode_loop_step=decode_loop_step) + self_attention_output = self.self_attention_dropout(self_attention_output) + self_attention_output = self.self_attention_layer_norm( + input_tensor + self_attention_output) + + cross_attn_inputs = [self_attention_output, memory] + if self.multi_channel_cross_attention: + # Accesses the 5-th input tensor for the doc-attention probabilities. + cross_attn_inputs.append(inputs[-1]) + attention_output = self.encdec_attention(cross_attn_inputs, attention_mask) + attention_output = self.encdec_attention_dropout(attention_output) + attention_output = self.encdec_attention_layer_norm(self_attention_output + + attention_output) + + intermediate_output = self.intermediate_dense(attention_output) + intermediate_output = self.intermediate_activation_layer( + intermediate_output) + layer_output = self.output_dense(intermediate_output) + layer_output = self.output_dropout(layer_output) + layer_output = self.output_layer_norm(layer_output + attention_output) + return layer_output, cache diff --git a/models/official/nlp/modeling/layers/transformer_scaffold.py b/models/official/nlp/modeling/layers/transformer_scaffold.py new file mode 100644 index 0000000000000000000000000000000000000000..d988febfa68a3e45d3919892ba677c85350f71d6 --- /dev/null +++ b/models/official/nlp/modeling/layers/transformer_scaffold.py @@ -0,0 +1,285 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based transformer scaffold layer.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import gin +import tensorflow as tf + +from official.nlp.modeling.layers import attention + + +@tf.keras.utils.register_keras_serializable(package="Text") +@gin.configurable +class TransformerScaffold(tf.keras.layers.Layer): + """Transformer scaffold layer. + + This layer implements the Transformer from "Attention Is All You Need". + (https://arxiv.org/abs/1706.03762), with a customizable attention layer and + feedforward layer option. Users can pass a class to + `attention_cls`/`feedforward_cls` and associated config to + `attention_cfg`/`feedforward_cfg`, in which case the scaffold will + instantiate the class with the config, or pass a class instance to + `attention_cls`/`feedforward_cls`. + + Arguments: + num_attention_heads: Number of attention heads. + intermediate_size: Size of the intermediate layer. + intermediate_activation: Activation for the intermediate layer. + attention_cls: A class to instantiate attention layer, or a layer instance. + attention_cfg: The config with which to instantiate `attention_cls`. Ignored + if attention_cls is a layer instance or None. If `attention_cls` is a + class, but `attention_cfg` is None, following kwargs will be used to + instantiate the attention instance: + { + "num_heads": num_attention_heads, + "key_size": int(hidden_size // num_attention_heads), + "dropout": attention_dropout_rate, + "name": "self_attention" + }, where `hidden_size` is the input tensor's last dimension. + feedforward_cls: A class to instantiate feedforward layer, or a layer + instance. If None, will use the standard feedforward layer as described + in "Attention Is All You Need" paper. If not None, the instantiated + feedforward layer is expected to take the output of attention as input + and its output is this transformer layer's output. + feedforward_cfg: The config with which to instantiate `feedforward_cls`. + Ignored if feedforward_cls is a layer instance or is None. + If `feedforward_cls` is a class, but `feedforward_cfg` is None, following + kwargs will be used to instantiate the feedforward instance: + { + "intermediate_size": intermediate_size, + "intermediate_activation": intermediate_activation, + "dropout": dropout_rate, + "name": "feedforward" + }. + dropout_rate: Dropout probability for the post-attention and output dropout. + attention_dropout_rate: Dropout probability for within the attention layer. + kernel_initializer: Initializer for dense layer kernels. + bias_initializer: Initializer for dense layer biases. + kernel_regularizer: Regularizer for dense layer kernels. + bias_regularizer: Regularizer for dense layer biases. + activity_regularizer: Regularizer for dense layer activity. + kernel_constraint: Constraint for dense layer kernels. + bias_constraint: Constraint for dense layer kernels. + """ + + def __init__(self, + num_attention_heads, + intermediate_size, + intermediate_activation, + attention_cls=attention.MultiHeadAttention, + attention_cfg=None, + feedforward_cls=None, + feedforward_cfg=None, + dropout_rate=0.0, + attention_dropout_rate=0.0, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + **kwargs): + super(TransformerScaffold, self).__init__(**kwargs) + + self._attention_cfg = attention_cfg + self._attention_cls = attention_cls + self._feedforward_cls = feedforward_cls + self._feedforward_cfg = feedforward_cfg + self._num_heads = num_attention_heads + self._intermediate_size = intermediate_size + self._intermediate_activation = intermediate_activation + self._attention_dropout_rate = attention_dropout_rate + self._dropout_rate = dropout_rate + self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) + self._bias_initializer = tf.keras.initializers.get(bias_initializer) + self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) + self._bias_constraint = tf.keras.constraints.get(bias_constraint) + + def build(self, input_shape): + input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape + input_tensor_shape = tf.TensorShape(input_tensor) + if len(input_tensor_shape) != 3: + raise ValueError( + "TransformerScaffold expects a three-dimensional input of " + "shape [batch, sequence, width].") + batch_size, sequence_length, hidden_size = input_tensor_shape + + if len(input_shape) == 2: + mask_tensor_shape = tf.TensorShape(input_shape[1]) + expected_mask_tensor_shape = tf.TensorShape( + [batch_size, sequence_length, sequence_length]) + if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): + raise ValueError("When passing a mask tensor to TransformerLayer, the " + "mask tensor must be of shape [batch, " + "sequence_length, sequence_length] (here %s). Got a " + "mask tensor of shape %s." % + (expected_mask_tensor_shape, mask_tensor_shape)) + if hidden_size % self._num_heads != 0: + raise ValueError( + "The input size (%d) is not a multiple of the number of attention " + "heads (%d)" % (hidden_size, self._num_heads)) + self._attention_head_size = int(hidden_size // self._num_heads) + + common_kwargs = dict( + kernel_initializer=self._kernel_initializer, + bias_initializer=self._bias_initializer, + kernel_regularizer=self._kernel_regularizer, + bias_regularizer=self._bias_regularizer, + activity_regularizer=self._activity_regularizer, + kernel_constraint=self._kernel_constraint, + bias_constraint=self._bias_constraint) + + def get_layer_instance(instance_or_cls, config, default_config): + if isinstance(instance_or_cls, tf.keras.layers.Layer): + return instance_or_cls + else: + if config is None: + return instance_or_cls(**default_config) + else: + return instance_or_cls(**config) + + default_attention_cfg = { + "num_heads": self._num_heads, + "key_size": self._attention_head_size, + "dropout": self._attention_dropout_rate, + "name": "self_attention" + } + default_attention_cfg.update(common_kwargs) + self._attention_layer = get_layer_instance( + self._attention_cls, + config=self._attention_cfg, + default_config=default_attention_cfg) + + if self._feedforward_cls is not None: + default_feedforward_cfg = { + "intermediate_size": self._intermediate_size, + "intermediate_activation": self._intermediate_activation, + "dropout": self._dropout_rate, + "name": "feedforward", + } + default_feedforward_cfg.update(common_kwargs) + self._feedforward_block = get_layer_instance( + self._feedforward_cls, + config=self._feedforward_cfg, + default_config=default_feedforward_cfg) + else: + self._feedforward_block = None + + self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) + # Use float32 in layernorm for numeric stability. + # It is probably safe in mixed_float16, but we haven't validated this yet. + self._attention_layer_norm = ( + tf.keras.layers.LayerNormalization( + name="self_attention_layer_norm", axis=-1, epsilon=1e-12, + dtype=tf.float32)) + + if self._feedforward_block is None: + self._intermediate_dense = tf.keras.layers.experimental.EinsumDense( + "abc,cd->abd", + output_shape=(None, self._intermediate_size), + bias_axes="d", + name="intermediate", + **common_kwargs) + policy = tf.keras.mixed_precision.experimental.global_policy() + if policy.name == "mixed_bfloat16": + # bfloat16 causes BERT with the LAMB optimizer to not converge + # as well, so we use float32. + # TODO(b/154538392): Investigate this. + policy = tf.float32 + self._intermediate_activation_layer = tf.keras.layers.Activation( + self._intermediate_activation, dtype=policy) + self._output_dense = tf.keras.layers.experimental.EinsumDense( + "abc,cd->abd", + output_shape=(None, hidden_size), + bias_axes="d", + name="output", + **common_kwargs) + + self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) + # Use float32 in layernorm for numeric stability. + self._output_layer_norm = tf.keras.layers.LayerNormalization( + name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) + + super(TransformerScaffold, self).build(input_shape) + + def get_config(self): + config = { + "attention_cls": + self._attention_layer, + "feedforward_cls": + self._feedforward_block, + "num_attention_heads": + self._num_heads, + "intermediate_size": + self._intermediate_size, + "intermediate_activation": + self._intermediate_activation, + "dropout_rate": + self._dropout_rate, + "attention_dropout_rate": + self._attention_dropout_rate, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint) + } + base_config = super(TransformerScaffold, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs): + if isinstance(inputs, (list, tuple)) and len(inputs) == 2: + input_tensor, attention_mask = inputs + else: + input_tensor, attention_mask = (inputs, None) + + attention_inputs = [input_tensor, input_tensor] + + attention_output = self._attention_layer(attention_inputs, attention_mask) + attention_output = self._attention_dropout(attention_output) + attention_output = self._attention_layer_norm(input_tensor + + attention_output) + if self._feedforward_block is None: + intermediate_output = self._intermediate_dense(attention_output) + intermediate_output = self._intermediate_activation_layer( + intermediate_output) + layer_output = self._output_dense(intermediate_output) + layer_output = self._output_dropout(layer_output) + # During mixed precision training, attention_output is from layer norm + # and is always fp32 for now. Cast layer_output to fp32 for the subsequent + # add. + layer_output = tf.cast(layer_output, tf.float32) + layer_output = self._output_layer_norm(layer_output + attention_output) + else: + layer_output = self._feedforward_block(attention_output) + + return layer_output diff --git a/models/official/nlp/modeling/layers/transformer_scaffold_test.py b/models/official/nlp/modeling/layers/transformer_scaffold_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ad919889569501c1c29a3c0f88f3e1d1621aec3a --- /dev/null +++ b/models/official/nlp/modeling/layers/transformer_scaffold_test.py @@ -0,0 +1,544 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based transformer block layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import attention +from official.nlp.modeling.layers import transformer_scaffold + + +# Test class that wraps a standard attention layer. If this layer is called +# at any point, the list passed to the config object will be filled with a +# boolean 'True'. We register this class as a Keras serializable so we can +# test serialization below. +@tf.keras.utils.register_keras_serializable(package='TestOnlyAttention') +class ValidatedAttentionLayer(attention.MultiHeadAttention): + + def __init__(self, call_list, **kwargs): + super(ValidatedAttentionLayer, self).__init__(**kwargs) + self.list = call_list + + def call(self, inputs, attention_mask=None): + self.list.append(True) + return super(ValidatedAttentionLayer, self).call( + inputs, attention_mask=attention_mask) + + def get_config(self): + config = super(ValidatedAttentionLayer, self).get_config() + config['call_list'] = [] + return config + + +# Test class implements a simple feedforward layer. If this layer is called +# at any point, the list passed to the config object will be filled with a +# boolean 'True'. We register this class as a Keras serializable so we can +# test serialization below. +@tf.keras.utils.register_keras_serializable(package='TestOnlyFeedforward') +class ValidatedFeedforwardLayer(tf.keras.layers.Layer): + + def __init__(self, call_list, activation, **kwargs): + super(ValidatedFeedforwardLayer, self).__init__(**kwargs) + self.list = call_list + self.activation = activation + + def build(self, input_shape): + hidden_size = input_shape.as_list()[-1] + self._feedforward_dense = tf.keras.layers.experimental.EinsumDense( + '...x,xy->...y', + output_shape=hidden_size, + bias_axes='y', + activation=self.activation, + name='feedforward') + + def call(self, inputs): + self.list.append(True) + return self._feedforward_dense(inputs) + + def get_config(self): + config = super(ValidatedFeedforwardLayer, self).get_config() + config['call_list'] = [] + config['activation'] = self.activation + return config + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class TransformerLayerTest(keras_parameterized.TestCase): + + def tearDown(self): + super(TransformerLayerTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy('float32') + + def test_layer_creation(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) + + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_layer_creation_with_feedforward_cls(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + feedforward_call_list = [] + feedforward_layer_cfg = { + 'activation': 'relu', + 'call_list': feedforward_call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + feedforward_cls=ValidatedFeedforwardLayer, + feedforward_cfg=feedforward_layer_cfg, + num_attention_heads=10, + intermediate_size=None, + intermediate_activation=None) + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) + + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + self.assertNotEmpty(feedforward_call_list) + self.assertTrue(feedforward_call_list[0], + "The passed layer class wasn't instantiated.") + + def test_layer_creation_with_mask(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_layer_creation_with_incorrect_mask_fails(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3)) + with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'): + _ = test_layer([data_tensor, mask_tensor]) + + def test_layer_invocation(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(data_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + _ = model.predict(input_data) + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_layer_invocation_with_feedforward_cls(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + feedforward_call_list = [] + feedforward_layer_cfg = { + 'activation': 'relu', + 'call_list': feedforward_call_list, + } + feedforward_layer = ValidatedFeedforwardLayer(**feedforward_layer_cfg) + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + feedforward_cls=feedforward_layer, + num_attention_heads=10, + intermediate_size=None, + intermediate_activation=None) + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + _ = model.predict([input_data, mask_data]) + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + self.assertNotEmpty(feedforward_call_list) + self.assertTrue(feedforward_call_list[0], + "The passed layer class wasn't instantiated.") + + def test_layer_invocation_with_mask(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + _ = model.predict([input_data, mask_data]) + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_layer_invocation_with_float16_dtype(self): + tf.keras.mixed_precision.experimental.set_policy('mixed_float16') + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = (10 * np.random.random_sample( + (batch_size, sequence_length, width))) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + _ = model.predict([input_data, mask_data]) + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_transform_with_initializer(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output = test_layer(data_tensor) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0]) + + def test_layer_restoration_from_config(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + 'name': 'test_layer', + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + pre_serialization_output = model.predict([input_data, mask_data]) + + # Serialize the model config. Pass the serialized data through json to + # ensure that we can serialize this layer to disk. + serialized_data = json.dumps(model.get_config()) + post_string_serialized_data = json.loads(serialized_data) + + # Create a new model from the old config, and copy the weights. These models + # should have identical outputs. + new_model = tf.keras.Model.from_config(post_string_serialized_data) + new_model.set_weights(model.get_weights()) + output = new_model.predict([input_data, mask_data]) + + self.assertAllClose(pre_serialization_output, output) + + # If the layer was configured correctly, it should have a list attribute + # (since it should have the custom class and config passed to it). + new_model.summary() + new_call_list = new_model.get_layer( + name='transformer_scaffold')._attention_layer.list + self.assertNotEmpty(new_call_list) + self.assertTrue(new_call_list[0], + "The passed layer class wasn't instantiated.") + + def test_layer_with_feedforward_cls_restoration_from_config(self): + sequence_length = 21 + width = 80 + + call_list = [] + attention_layer_cfg = { + 'num_heads': 10, + 'key_size': 8, + 'call_list': call_list, + 'name': 'test_layer', + } + feedforward_call_list = [] + feedforward_layer_cfg = { + 'activation': 'relu', + 'call_list': feedforward_call_list, + } + test_layer = transformer_scaffold.TransformerScaffold( + attention_cls=ValidatedAttentionLayer, + attention_cfg=attention_layer_cfg, + feedforward_cls=ValidatedFeedforwardLayer, + feedforward_cfg=feedforward_layer_cfg, + num_attention_heads=10, + intermediate_size=None, + intermediate_activation=None) + + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + pre_serialization_output = model.predict([input_data, mask_data]) + + # Serialize the model config. Pass the serialized data through json to + # ensure that we can serialize this layer to disk. + serialized_data = json.dumps(model.get_config()) + post_string_serialized_data = json.loads(serialized_data) + + # Create a new model from the old config, and copy the weights. These models + # should have identical outputs. + new_model = tf.keras.Model.from_config(post_string_serialized_data) + new_model.set_weights(model.get_weights()) + output = new_model.predict([input_data, mask_data]) + + self.assertAllClose(pre_serialization_output, output) + + # If the layer was configured correctly, it should have a list attribute + # (since it should have the custom class and config passed to it). + new_model.summary() + new_call_list = new_model.get_layer( + name='transformer_scaffold')._attention_layer.list + self.assertNotEmpty(new_call_list) + self.assertTrue(new_call_list[0], + "The passed layer class wasn't instantiated.") + new_feedforward_call_list = new_model.get_layer( + name='transformer_scaffold')._feedforward_block.list + self.assertNotEmpty(new_feedforward_call_list) + self.assertTrue(new_feedforward_call_list[0], + "The passed layer class wasn't instantiated.") + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/layers/transformer_test.py b/models/official/nlp/modeling/layers/transformer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..841feb9948cb69abe1b1b73364b6f09fa2bde836 --- /dev/null +++ b/models/official/nlp/modeling/layers/transformer_test.py @@ -0,0 +1,253 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras-based transformer block layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.layers import transformer + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +@parameterized.named_parameters(('base', transformer.Transformer), + ('xla', transformer.CompiledTransformer)) +class TransformerLayerTest(keras_parameterized.TestCase): + + def tearDown(self): + super(TransformerLayerTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy('float32') + + def test_layer_creation(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) + + def test_layer_creation_with_mask(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) + + def test_layer_creation_with_incorrect_mask_fails(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3)) + with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'): + _ = test_layer([data_tensor, mask_tensor]) + + def test_layer_invocation(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output_tensor = test_layer(data_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(data_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + _ = model.predict(input_data) + + def test_layer_invocation_with_mask(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + _ = model.predict([input_data, mask_data]) + + def test_layer_output_range(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + + batch_size = 6 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, width)) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + output_tensor = test_layer([input_data, mask_data]) + + # The layer only attends to the first token and outputs the first token + # embeeding. + new_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + output_range=1) + _ = new_layer([input_data, mask_data]) + new_layer.set_weights(test_layer.get_weights()) + new_output_tensor = new_layer([input_data, mask_data]) + self.assertAllClose(new_output_tensor, output_tensor[:, 0:1, :]) + + def test_layer_invocation_with_float16_dtype(self, transformer_cls): + tf.keras.mixed_precision.experimental.set_policy('mixed_float16') + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu') + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + # Create a 2-dimensional input (the first dimension is implicit). + mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) + output_tensor = test_layer([data_tensor, mask_tensor]) + + # Create a model from the test layer. + model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 6 + input_data = (10 * np.random.random_sample( + (batch_size, sequence_length, width))) + # The attention mask should be of shape (batch, from_seq_len, to_seq_len), + # which here is (batch, sequence_length, sequence_length) + mask_data = np.random.randint( + 2, size=(batch_size, sequence_length, sequence_length)) + _ = model.predict([input_data, mask_data]) + + def test_transform_with_initializer(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) + sequence_length = 21 + width = 80 + # Create a 3-dimensional input (the first dimension is implicit). + data_tensor = tf.keras.Input(shape=(sequence_length, width)) + output = test_layer(data_tensor) + # The default output of a transformer layer should be the same as the input. + self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) + + def test_dynamic_layer_sequence(self, transformer_cls): + test_layer = transformer_cls( + num_attention_heads=10, + intermediate_size=2048, + intermediate_activation='relu', + kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) + # Create a 3-dimensional input (the first dimension is implicit). + width = 30 + input_tensor = tf.keras.Input(shape=(None, width)) + output_tensor = test_layer(input_tensor) + model = tf.keras.Model(input_tensor, output_tensor) + + input_length = 17 + input_data = np.ones((1, input_length, width)) + output_data = model.predict(input_data) + + self.assertAllEqual([1, input_length, width], output_data.shape) + + +def _create_cache(batch_size, init_decode_length, num_heads, head_size): + return { + 'key': + tf.zeros([batch_size, init_decode_length, num_heads, head_size], + dtype=tf.float32), + 'value': + tf.zeros([batch_size, init_decode_length, num_heads, head_size], + dtype=tf.float32) + } + + +@keras_parameterized.run_all_keras_modes +class TransformerDecoderLayerTest(keras_parameterized.TestCase): + + def test_decoder_block_with_cache(self): + num_attention_heads = 2 + hidden_size = 16 + decoder_block = transformer.TransformerDecoderLayer( + num_attention_heads=num_attention_heads, + intermediate_size=32, + intermediate_activation='relu', + dropout_rate=0.1, + attention_dropout_rate=0.1) + # Forward path. + dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) + dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) + inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask] + cache = _create_cache(2, 0, num_attention_heads, + hidden_size // num_attention_heads) + output, cache = decoder_block(inputs, cache) + self.assertEqual(output.shape, (2, 4, hidden_size)) + self.assertEqual(cache['value'].shape, (2, 4, 2, 8)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/layers/util.py b/models/official/nlp/modeling/layers/util.py new file mode 100644 index 0000000000000000000000000000000000000000..354f216ea4ea743fb48be256126df100abe5cfa9 --- /dev/null +++ b/models/official/nlp/modeling/layers/util.py @@ -0,0 +1,51 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras-based transformer block layer.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import functools + +import tensorflow as tf + + +class TfFunctionIfEagerDecorator(object): + """Helper decorator function to optionally apply the @tf.function annotation.""" + + def __init__(self, **kwargs): + self.func_kwargs = kwargs + + def __call__(self, func): + + @functools.wraps(func) + def wrapped_func(*args): + # TODO(b/150147476, b/150024785): Fix tf.function in TF1 crash. + if not hasattr(tf.compat.v1, "executing_eagerly_outside_functions" + ) or tf.compat.v1.executing_eagerly_outside_functions(): + return tf.function(func=func, **self.func_kwargs)(*args) + return func(*args) + + # Cache the created function in self._call_impl. + if not hasattr(self, "_call_impl"): + self._call_impl = wrapped_func + return self._call_impl + + +def tf_function_if_eager(**kwargs): + """Applies the @tf.function decorator only if running in eager mode.""" + return TfFunctionIfEagerDecorator(**kwargs) diff --git a/models/official/nlp/modeling/losses/README.md b/models/official/nlp/modeling/losses/README.md new file mode 100644 index 0000000000000000000000000000000000000000..522150cfa1518797b488146fae506bfcaf063b8e --- /dev/null +++ b/models/official/nlp/modeling/losses/README.md @@ -0,0 +1,9 @@ +# Losses + +Losses contains common loss computation used in NLP tasks. + +* `weighted_sparse_categorical_crossentropy_loss` computes per-batch sparse +categorical crossentropy loss. + +* `weighted_sparse_categorical_crossentropy_per_example_loss` computes +per-example sparse categorical crossentropy loss. diff --git a/models/official/nlp/modeling/losses/__init__.py b/models/official/nlp/modeling/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..919bad30809b1a4967ecb7edcb206e92637477db --- /dev/null +++ b/models/official/nlp/modeling/losses/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Activations package definition. Subject to change.""" +from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import loss as weighted_sparse_categorical_crossentropy_loss +from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import per_example_loss as weighted_sparse_categorical_crossentropy_per_example_loss diff --git a/models/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py b/models/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py new file mode 100644 index 0000000000000000000000000000000000000000..b88d8e3665b70be63aaa4aa2f90bb78e4bd9af3f --- /dev/null +++ b/models/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py @@ -0,0 +1,106 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Sparse categorical cross-entropy losses.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + + +def _adjust_labels(labels, predictions): + """Adjust the 'labels' tensor by squeezing it if needed.""" + labels = tf.cast(labels, tf.int32) + if len(predictions.shape) == len(labels.shape): + labels = tf.squeeze(labels, [-1]) + return labels, predictions + + +def _validate_rank(labels, predictions, weights): + if weights is not None and len(weights.shape) != len(labels.shape): + raise RuntimeError( + ("Weight and label tensors were not of the same rank. weights.shape " + "was %s, and labels.shape was %s.") % + (predictions.shape, labels.shape)) + if (len(predictions.shape) - 1) != len(labels.shape): + raise RuntimeError( + ("Weighted sparse categorical crossentropy expects `labels` to have a " + "rank of one less than `predictions`. labels.shape was %s, and " + "predictions.shape was %s.") % (labels.shape, predictions.shape)) + + +def per_example_loss(labels, predictions, weights=None): + """Calculate a per-example sparse categorical crossentropy loss. + + This loss function assumes that the predictions are post-softmax. + Args: + labels: The labels to evaluate against. Should be a set of integer indices + ranging from 0 to (vocab_size-1). + predictions: The network predictions. Should have softmax already applied. + weights: An optional weight array of the same shape as the 'labels' array. + If None, all examples will be used. + + Returns: + A tensor of shape predictions.shape[:-1] containing the per-example + loss. + """ + # When using these functions with the Keras core API, we will need to squeeze + # the labels tensor - Keras adds a spurious inner dimension. + labels, predictions = _adjust_labels(labels, predictions) + _validate_rank(labels, predictions, weights) + + labels_one_hot = tf.one_hot(labels, predictions.shape[-1]) + labels_one_hot = tf.cast(labels_one_hot, predictions.dtype) + per_example_loss_data = -tf.reduce_sum( + predictions * labels_one_hot, axis=[-1]) + if weights is not None: + weights = tf.cast(weights, per_example_loss_data.dtype) + per_example_loss_data = weights * per_example_loss_data + return per_example_loss_data + + +def loss(labels, predictions, weights=None): + """Calculate a per-batch sparse categorical crossentropy loss. + + This loss function assumes that the predictions are post-softmax. + Args: + labels: The labels to evaluate against. Should be a set of integer indices + ranging from 0 to (vocab_size-1). + predictions: The network predictions. Should have softmax already applied. + weights: An optional weight array of the same shape as the 'labels' array. + If None, all examples will be used. + + Returns: + A loss scalar. + + Raises: + RuntimeError if the passed tensors do not have the same rank. + """ + # When using these functions with the Keras core API, we will need to squeeze + # the labels tensor - Keras adds a spurious inner dimension. + labels, predictions = _adjust_labels(labels, predictions) + _validate_rank(labels, predictions, weights) + + per_example_loss_data = per_example_loss(labels, predictions, weights) + + if weights is None: + return tf.reduce_mean(per_example_loss_data) + else: + numerator = tf.reduce_sum(per_example_loss_data) + weights = tf.cast(weights, predictions.dtype) + denominator = tf.reduce_sum(weights) + 1e-5 + return numerator / denominator diff --git a/models/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py b/models/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2fec2a318b06f0af44b73d200b22d8a22ba88ddf --- /dev/null +++ b/models/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py @@ -0,0 +1,380 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for masked LM loss.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling import layers +from official.nlp.modeling import networks +from official.nlp.modeling.losses import weighted_sparse_categorical_crossentropy + + +@keras_parameterized.run_all_keras_modes +class ClassificationLossTest(keras_parameterized.TestCase): + + def create_lm_model(self, + vocab_size, + sequence_length, + hidden_size, + num_predictions, + output="predictions"): + # First, create a transformer stack that we can use to get the LM's + # vocabulary weight. + xformer_stack = networks.TransformerEncoder( + vocab_size=vocab_size, + num_layers=1, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_attention_heads=4, + ) + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + _ = xformer_stack([word_ids, mask, type_ids]) + + # Create a maskedLM from the transformer stack. + test_layer = layers.MaskedLM( + embedding_table=xformer_stack.get_embedding_table(), + output=output) + + # Create a model from the masked LM layer. + lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) + masked_lm_positions = tf.keras.Input( + shape=(num_predictions,), dtype=tf.int32) + output = test_layer(lm_input_tensor, masked_positions=masked_lm_positions) + return tf.keras.Model([lm_input_tensor, masked_lm_positions], output) + + def create_classification_model(self, input_width, num_classes): + test_object = networks.Classification( + input_width=input_width, num_classes=num_classes) + # Create a 2-dimensional input (the first dimension is implicit). + pooled_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) + output = test_object(pooled_data) + return tf.keras.Model(pooled_data, output) + + def test_per_example_loss_3d_input(self): + """Test per-example loss with a 3-dimensional input, from a masked LM.""" + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + model = self.create_lm_model( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_predictions=num_predictions) + + # Get the output of the masked LM. + batch_size = 3 + lm_input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, hidden_size)) + masked_position_data = np.random.randint( + 2, size=(batch_size, num_predictions)) + output_data = model.predict([lm_input_data, masked_position_data]) + + # Calculate per-example loss. + labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) + per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels) + + # Per-example loss data should have one value per prediction, and those + # values shouldn't be zero in this case (as we're using random data). + expected_shape = [batch_size, num_predictions] + self.assertEqual(expected_shape, per_example_loss_data.shape.as_list()) + self.assertNotAllClose( + tf.zeros_like(per_example_loss_data), per_example_loss_data) + + def test_per_example_loss_2d_input(self): + """Test per-example loss with a 2-d input, from a classifier.""" + input_width = 512 + num_classes = 10 + model = self.create_classification_model(input_width, num_classes) + + # Invoke the network as part of a Model. + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + output_data = model.predict(input_data) + + # Calculate per example loss. + labels = np.random.randint(num_classes, size=(batch_size)) + per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels) + + # Per-example loss data should have one value per batch item, and those + # values shouldn't be zero in this case (as we're using random data). + self.assertEqual([batch_size], per_example_loss_data.shape.as_list()) + self.assertNotAllClose( + tf.zeros_like(per_example_loss_data), per_example_loss_data) + + def test_per_example_loss_weights_3d_input(self): + """Test weighted per-example loss with a 3-d input, from a masked LM.""" + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + model = self.create_lm_model( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_predictions=num_predictions) + + # Get the output of the masked LM. + batch_size = 3 + lm_input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, hidden_size)) + masked_position_data = np.random.randint( + 2, size=(batch_size, num_predictions)) + output_data = model.predict([lm_input_data, masked_position_data]) + + # Calculate per-example loss with weights. + labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) + weights = np.random.randint(2, size=(batch_size, num_predictions)) + + per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels, weights=weights) + + # Weighted per-example loss data should be equivalent to multiplying the + # loss tensor by the weights tensor. + expected_weighted_loss = per_example_loss_data * weights + self.assertAllClose(expected_weighted_loss, per_example_loss_data) + + def test_per_example_loss_weights_2d_input(self): + """Test weighted per-example loss with a 2-d input, from a classifier.""" + input_width = 512 + num_classes = 10 + model = self.create_classification_model(input_width, num_classes) + + # Invoke the network as part of a Model. + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + output_data = model.predict(input_data) + + # Calculate per-example loss with weights. + labels = np.random.randint(num_classes, size=(batch_size)) + weights = np.random.randint(2, size=(batch_size)) + + per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels, weights=weights) + + # Weighted per-example loss data should be equivalent to multiplying the + # loss tensor by the weights tensor. + expected_weighted_loss = per_example_loss_data * weights + self.assertAllClose(expected_weighted_loss, per_example_loss_data) + + def test_loss_3d_input(self): + """Test overall loss with a 3-dimensional input, from a masked LM.""" + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + model = self.create_lm_model( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_predictions=num_predictions) + + # Get the output of the masked LM. + batch_size = 3 + lm_input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, hidden_size)) + masked_position_data = np.random.randint( + 2, size=(batch_size, num_predictions)) + output_data = model.predict([lm_input_data, masked_position_data]) + + # Calculate loss. + labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) + weights = np.random.randint(2, size=(batch_size, num_predictions)) + per_example_loss_data = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=weights) + + # Total loss data should have one value, and that value shouldn't be zero + # in this case (as we're using random data). + expected_shape = [] # Scalar + self.assertEqual(expected_shape, per_example_loss_data.shape.as_list()) + self.assertNotAllClose( + tf.zeros_like(per_example_loss_data), per_example_loss_data) + + def test_loss_2d_input(self): + """Test overall loss with a 2-d input, from a classifier.""" + input_width = 512 + num_classes = 10 + model = self.create_classification_model(input_width, num_classes) + + # Invoke the network as part of a Model. + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + output_data = model.predict(input_data) + + # Calculate per example loss. + labels = np.random.randint(num_classes, size=(batch_size)) + loss_data = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels) + + # Loss data should have one value only, and that value shouldn't be zero in + # this case (as we're using random data). + self.assertNotAllClose(0, loss_data) + + def test_loss_weights_3d_input(self): + """Test masked loss with a 3-dimensional input, from a masked LM.""" + vocab_size = 100 + sequence_length = 32 + hidden_size = 64 + num_predictions = 21 + model = self.create_lm_model( + vocab_size=vocab_size, + sequence_length=sequence_length, + hidden_size=hidden_size, + num_predictions=num_predictions) + + # Get the output of the masked LM. + batch_size = 3 + lm_input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, hidden_size)) + masked_position_data = np.random.randint( + 2, size=(batch_size, num_predictions)) + output_data = model.predict([lm_input_data, masked_position_data]) + + # Calculate a fully masked weight tensor. This should give a loss of zero. + labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) + null_weights = np.zeros((batch_size, num_predictions)) + weighted_loss_data = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=null_weights) + + # Because the tensor is fully masked, the loss should be 0. + self.assertAllClose(0, weighted_loss_data) + + def test_loss_weights_2d_input(self): + """Test masked loss with a 2-d input, from a classifier.""" + input_width = 512 + num_classes = 10 + model = self.create_classification_model(input_width, num_classes) + + # Invoke the network as part of a Model. + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + output_data = model.predict(input_data) + + # Calculate a fully masked weight tensor. This should give a loss of zero. + labels = np.random.randint(num_classes, size=(batch_size)) + null_weights = np.zeros((batch_size)) + weighted_loss_data = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=null_weights) + + # Because the tensor is fully masked, the loss should be 0. + self.assertAllClose(0, weighted_loss_data) + + def test_mismatched_predictions_and_labels_ranks_squeezes(self): + """Test that the loss asserts when rank(predictions)-1 != rank(labels).""" + batch_size = 3 + output_data = np.random.random_sample((batch_size, 10)) + labels = np.random.randint(10, size=(batch_size, 1)) + + # All that this test tests is that the squeeze is successful. + _ = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels) + + def test_mismatched_weights_and_labels_ranks_fail(self): + """Test that the loss asserts when rank(predictions) != rank(labels).""" + batch_size = 3 + output_data = np.random.random_sample((batch_size, 10, 15)) + labels = np.random.randint(10, size=(batch_size, 10)) + weights = np.random.randint(2, size=(batch_size)) + + with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"): + _ = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels, weights=weights) + with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"): + _ = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=weights) + + def test_tf_tensor_inputs(self): + """Test that tf.Tensors can be used as inputs to the loss function.""" + batch_size = 3 + output_data = tf.convert_to_tensor( + np.random.random_sample((batch_size, 10, 15))) + labels = tf.convert_to_tensor(np.random.randint(10, size=(batch_size, 10))) + weights = tf.convert_to_tensor(np.random.randint(2, size=(batch_size, 10))) + + # We're not trying to validate numerical correctness, just ensure that + # we can in fact pass tensors to these functions without causing runtime + # errors from the shape checking code. + _ = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels, weights=weights) + _ = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=weights) + + def test_legacy_lm_loss_compatibility(self): + """Test to validate computational correctness during refactors.""" + # This is the empirical output of a masked LM with the following parameters: + # batch_size = 3 + # vocab_size = 5 + # sequence_length = 4 + # num_predictions = 2 + output_data = np.array( + [[[-2.5286622, -1.0963473, -1.4925185, -2.4451098, -1.2923571], + [-2.7117882, -1.1205841, -4.02187, -0.9966936, -1.5119683]], + [[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741], + [-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741]], + [[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509], + [-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509]]]) + labels = np.array([[4, 0], [2, 2], [2, 1]]) + + # Validate that per_example loss calculations are the same. + per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels) + expected_per_example_loss_data = [[1.2923571, 2.7117882], + [2.287932, 2.287932], + [3.0924666, 1.8219438]] + self.assertAllClose(expected_per_example_loss_data, per_example_loss_data) + + # Validate that overall loss calculations are the same. + weights = np.array([[1, 0], [0, 0], [0, 0]]) + loss_data = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=weights) + expected_loss_data = 1.2923441 + self.assertAllClose(expected_loss_data, loss_data) + + def test_legacy_classification_loss_compatibility(self): + """Test to validate computational correctness during refactors.""" + # This is the empirical output of a classifier with the following params: + # batch_size = 2 + # num_classes = 3 + output_data = np.array([[-1.6094601e-03, -1.0966038e+01, -6.4434357e+00], + [-1.6975292e-03, -6.4009643e+00, -1.0226612e+01]]) + labels = np.array([2, 1]) + + # Validate that per_example loss calculations are the same. + per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( + predictions=output_data, labels=labels) + expected_per_example_loss_data = [6.4434357, 6.4009643] + self.assertAllClose(expected_per_example_loss_data, per_example_loss_data) + + # Validate that overall loss calculations are the same. + weights = None + loss_data = weighted_sparse_categorical_crossentropy.loss( + predictions=output_data, labels=labels, weights=weights) + expected_loss_data = 6.4222 + self.assertAllClose(expected_loss_data, loss_data) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/models/README.md b/models/official/nlp/modeling/models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c2e572b6fe07631c17f37b29723fc7a0ac94a81e --- /dev/null +++ b/models/official/nlp/modeling/models/README.md @@ -0,0 +1,22 @@ +# Models + +Models are combinations of layers and networks that would be trained. + +Several pre-built canned models are provided to train encoder networks. These +models are intended as both convenience functions and canonical examples. + +* [`BertClassifier`](bert_classifier.py) implements a simple classification +model containing a single classification head using the Classification network. +It can be used as a regression model as well. + +* [`BertTokenClassifier`](bert_token_classifier.py) implements a simple token +classification model containing a single classification head using the +TokenClassification network. + +* [`BertSpanLabeler`](bert_span_labeler.py) implementats a simple single-span +start-end predictor (that is, a model that predicts two values: a start token +index and an end token index), suitable for SQuAD-style tasks. + +* [`BertPretrainer`](bert_pretrainer.py) implements a masked LM and a +classification head using the Masked LM and Classification networks, +respectively. diff --git a/models/official/nlp/modeling/models/__init__.py b/models/official/nlp/modeling/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7912e3cf8a70c19a35ef51a123b5ef3d1335617f --- /dev/null +++ b/models/official/nlp/modeling/models/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Models package definition.""" +from official.nlp.modeling.models.bert_classifier import BertClassifier +from official.nlp.modeling.models.bert_pretrainer import BertPretrainer +from official.nlp.modeling.models.bert_span_labeler import BertSpanLabeler +from official.nlp.modeling.models.bert_token_classifier import BertTokenClassifier diff --git a/models/official/nlp/modeling/models/bert_classifier.py b/models/official/nlp/modeling/models/bert_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..8db6faeba0dbebfe4f7b63cb4c3c4c33607c56cc --- /dev/null +++ b/models/official/nlp/modeling/models/bert_classifier.py @@ -0,0 +1,91 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer network for BERT-style models.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.modeling import networks + + +@tf.keras.utils.register_keras_serializable(package='Text') +class BertClassifier(tf.keras.Model): + """Classifier model based on a BERT-style transformer-based encoder. + + This is an implementation of the network structure surrounding a transformer + encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers + for Language Understanding" (https://arxiv.org/abs/1810.04805). + + The BertClassifier allows a user to pass in a transformer stack, and + instantiates a classification network based on the passed `num_classes` + argument. If `num_classes` is set to 1, a regression network is instantiated. + + Arguments: + network: A transformer network. This network should output a sequence output + and a classification output. Furthermore, it should expose its embedding + table via a "get_embedding_table" method. + num_classes: Number of classes to predict from the classification network. + initializer: The initializer (if any) to use in the classification networks. + Defaults to a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + network, + num_classes, + initializer='glorot_uniform', + output='logits', + dropout_rate=0.1, + **kwargs): + self._self_setattr_tracking = False + self._config = { + 'network': network, + 'num_classes': num_classes, + 'initializer': initializer, + 'output': output, + } + + # We want to use the inputs of the passed network as the inputs to this + # Model. To do this, we need to keep a handle to the network inputs for use + # when we construct the Model object at the end of init. + inputs = network.inputs + + # Because we have a copy of inputs to create this Model object, we can + # invoke the Network object with its own input tensors to start the Model. + _, cls_output = network(inputs) + cls_output = tf.keras.layers.Dropout(rate=dropout_rate)(cls_output) + + self.classifier = networks.Classification( + input_width=cls_output.shape[-1], + num_classes=num_classes, + initializer=initializer, + output=output, + name='classification') + predictions = self.classifier(cls_output) + + super(BertClassifier, self).__init__( + inputs=inputs, outputs=predictions, **kwargs) + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/models/bert_classifier_test.py b/models/official/nlp/modeling/models/bert_classifier_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4dade8508592d5e3344b79e50dce74ccc27526c7 --- /dev/null +++ b/models/official/nlp/modeling/models/bert_classifier_test.py @@ -0,0 +1,107 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for BERT trainer network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling import networks +from official.nlp.modeling.models import bert_classifier + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class BertClassifierTest(keras_parameterized.TestCase): + + @parameterized.parameters(1, 3) + def test_bert_trainer(self, num_classes): + """Validate that the Keras object can be created.""" + # Build a transformer network to use within the BERT trainer. + vocab_size = 100 + sequence_length = 512 + test_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_classifier.BertClassifier( + test_network, + num_classes=num_classes) + + # Create a set of 2-dimensional inputs (the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + + # Invoke the trainer model on the inputs. This causes the layer to be built. + cls_outs = bert_trainer_model([word_ids, mask, type_ids]) + + # Validate that the outputs are of the expected shape. + expected_classification_shape = [None, num_classes] + self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) + + @parameterized.parameters(1, 2) + def test_bert_trainer_tensor_call(self, num_classes): + """Validate that the Keras object can be invoked.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=2) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_classifier.BertClassifier( + test_network, num_classes=num_classes) + + # Create a set of 2-dimensional data tensors to feed into the model. + word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) + type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + + # Invoke the trainer model on the tensors. In Eager mode, this does the + # actual calculation. (We can't validate the outputs, since the network is + # too complex: this simply ensures we're not hitting runtime errors.) + _ = bert_trainer_model([word_ids, mask, type_ids]) + + def test_serialize_deserialize(self): + """Validate that the BERT trainer can be serialized and deserialized.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=5) + + # Create a BERT trainer with the created network. (Note that all the args + # are different, so we can catch any serialization mismatches.) + bert_trainer_model = bert_classifier.BertClassifier( + test_network, num_classes=4, initializer='zeros', output='predictions') + + # Create another BERT trainer via serialization and deserialization. + config = bert_trainer_model.get_config() + new_bert_trainer_model = bert_classifier.BertClassifier.from_config(config) + + # Validate that the config can be forced to JSON. + _ = new_bert_trainer_model.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(bert_trainer_model.get_config(), + new_bert_trainer_model.get_config()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/models/bert_pretrainer.py b/models/official/nlp/modeling/models/bert_pretrainer.py new file mode 100644 index 0000000000000000000000000000000000000000..bce33747f03af723927fba138ddec55160262449 --- /dev/null +++ b/models/official/nlp/modeling/models/bert_pretrainer.py @@ -0,0 +1,231 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer network for BERT-style models.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import copy +from typing import List, Optional + +import gin +import tensorflow as tf + +from official.nlp.modeling import layers +from official.nlp.modeling import networks + + +@tf.keras.utils.register_keras_serializable(package='Text') +class BertPretrainer(tf.keras.Model): + """BERT network training model. + + This is an implementation of the network structure surrounding a transformer + encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers + for Language Understanding" (https://arxiv.org/abs/1810.04805). + + The BertPretrainer allows a user to pass in a transformer stack, and + instantiates the masked language model and classification networks that are + used to create the training objectives. + + Arguments: + network: A transformer network. This network should output a sequence output + and a classification output. + num_classes: Number of classes to predict from the classification network. + num_token_predictions: Number of tokens to predict from the masked LM. + embedding_table: Embedding table of a network. If None, the + "network.get_embedding_table()" is used. + activation: The activation (if any) to use in the masked LM network. If + None, no activation will be used. + initializer: The initializer (if any) to use in the masked LM and + classification networks. Defaults to a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + network, + num_classes, + num_token_predictions, + embedding_table=None, + activation=None, + initializer='glorot_uniform', + output='logits', + **kwargs): + self._self_setattr_tracking = False + self._config = { + 'network': network, + 'num_classes': num_classes, + 'num_token_predictions': num_token_predictions, + 'activation': activation, + 'initializer': initializer, + 'output': output, + } + self.encoder = network + # We want to use the inputs of the passed network as the inputs to this + # Model. To do this, we need to keep a copy of the network inputs for use + # when we construct the Model object at the end of init. (We keep a copy + # because we'll be adding another tensor to the copy later.) + network_inputs = self.encoder.inputs + inputs = copy.copy(network_inputs) + + # Because we have a copy of inputs to create this Model object, we can + # invoke the Network object with its own input tensors to start the Model. + # Note that, because of how deferred construction happens, we can't use + # the copy of the list here - by the time the network is invoked, the list + # object contains the additional input added below. + sequence_output, cls_output = self.encoder(network_inputs) + + # The encoder network may get outputs from all layers. + if isinstance(sequence_output, list): + sequence_output = sequence_output[-1] + if isinstance(cls_output, list): + cls_output = cls_output[-1] + sequence_output_length = sequence_output.shape.as_list()[1] + if sequence_output_length < num_token_predictions: + raise ValueError( + "The passed network's output length is %s, which is less than the " + 'requested num_token_predictions %s.' % + (sequence_output_length, num_token_predictions)) + + masked_lm_positions = tf.keras.layers.Input( + shape=(num_token_predictions,), + name='masked_lm_positions', + dtype=tf.int32) + inputs.append(masked_lm_positions) + + if embedding_table is None: + embedding_table = self.encoder.get_embedding_table() + self.masked_lm = layers.MaskedLM( + embedding_table=embedding_table, + activation=activation, + initializer=initializer, + output=output, + name='cls/predictions') + lm_outputs = self.masked_lm( + sequence_output, masked_positions=masked_lm_positions) + + self.classification = networks.Classification( + input_width=cls_output.shape[-1], + num_classes=num_classes, + initializer=initializer, + output=output, + name='classification') + sentence_outputs = self.classification(cls_output) + + super(BertPretrainer, self).__init__( + inputs=inputs, + outputs=dict(masked_lm=lm_outputs, classification=sentence_outputs), + **kwargs) + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) + + +# TODO(hongkuny): Migrate to BertPretrainerV2 for all usages. +@tf.keras.utils.register_keras_serializable(package='Text') +@gin.configurable +class BertPretrainerV2(tf.keras.Model): + """BERT pretraining model V2. + + (Experimental). + Adds the masked language model head and optional classification heads upon the + transformer encoder. When num_masked_tokens == 0, there won't be MaskedLM + head. + + Arguments: + num_masked_tokens: Number of tokens to predict from the masked LM. + encoder_network: A transformer network. This network should output a + sequence output and a classification output. + mlm_activation: The activation (if any) to use in the masked LM network. If + None, no activation will be used. + mlm_initializer: The initializer (if any) to use in the masked LM. Default + to a Glorot uniform initializer. + classification_heads: A list of optional head layers to transform on encoder + sequence outputs. + name: The name of the model. + Inputs: Inputs defined by the encoder network, plus `masked_lm_positions` as a + dictionary. + Outputs: A dictionary of `lm_output` and classification head outputs keyed by + head names. + """ + + def __init__( + self, + num_masked_tokens: int, + encoder_network: tf.keras.Model, + mlm_activation=None, + mlm_initializer='glorot_uniform', + classification_heads: Optional[List[tf.keras.layers.Layer]] = None, + name: str = 'bert', + **kwargs): + self._self_setattr_tracking = False + self._config = { + 'encoder_network': encoder_network, + 'num_masked_tokens': num_masked_tokens, + 'mlm_initializer': mlm_initializer, + 'classification_heads': classification_heads, + 'name': name, + } + + self.encoder_network = encoder_network + inputs = copy.copy(self.encoder_network.inputs) + sequence_output, _ = self.encoder_network(inputs) + + self.classification_heads = classification_heads or [] + if len(set([cls.name for cls in self.classification_heads])) != len( + self.classification_heads): + raise ValueError('Classification heads should have unique names.') + + outputs = dict() + if num_masked_tokens > 0: + self.masked_lm = layers.MaskedLM( + embedding_table=self.encoder_network.get_embedding_table(), + activation=mlm_activation, + initializer=mlm_initializer, + name='cls/predictions') + masked_lm_positions = tf.keras.layers.Input( + shape=(num_masked_tokens,), + name='masked_lm_positions', + dtype=tf.int32) + inputs.append(masked_lm_positions) + outputs['lm_output'] = self.masked_lm( + sequence_output, masked_positions=masked_lm_positions) + for cls_head in self.classification_heads: + outputs[cls_head.name] = cls_head(sequence_output) + + super(BertPretrainerV2, self).__init__( + inputs=inputs, outputs=outputs, name=name, **kwargs) + + @property + def checkpoint_items(self): + """Returns a dictionary of items to be additionally checkpointed.""" + items = dict(encoder=self.encoder_network) + for head in self.classification_heads: + for key, item in head.checkpoint_items.items(): + items['.'.join([head.name, key])] = item + return items + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/models/bert_pretrainer_test.py b/models/official/nlp/modeling/models/bert_pretrainer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..eb9ace5ccf132ec0423276b28fa1e1e473a97290 --- /dev/null +++ b/models/official/nlp/modeling/models/bert_pretrainer_test.py @@ -0,0 +1,164 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for BERT trainer network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling import networks +from official.nlp.modeling.models import bert_pretrainer + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class BertPretrainerTest(keras_parameterized.TestCase): + + def test_bert_pretrainer(self): + """Validate that the Keras object can be created.""" + # Build a transformer network to use within the BERT trainer. + vocab_size = 100 + sequence_length = 512 + test_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a BERT trainer with the created network. + num_classes = 3 + num_token_predictions = 2 + bert_trainer_model = bert_pretrainer.BertPretrainer( + test_network, + num_classes=num_classes, + num_token_predictions=num_token_predictions) + + # Create a set of 2-dimensional inputs (the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + masked_lm_positions = tf.keras.Input( + shape=(num_token_predictions,), dtype=tf.int32) + + # Invoke the trainer model on the inputs. This causes the layer to be built. + outputs = bert_trainer_model( + [word_ids, mask, type_ids, masked_lm_positions]) + + # Validate that the outputs are of the expected shape. + expected_lm_shape = [None, num_token_predictions, vocab_size] + expected_classification_shape = [None, num_classes] + self.assertAllEqual(expected_lm_shape, outputs['masked_lm'].shape.as_list()) + self.assertAllEqual(expected_classification_shape, + outputs['classification'].shape.as_list()) + + def test_bert_trainer_tensor_call(self): + """Validate that the Keras object can be invoked.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=2) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_pretrainer.BertPretrainer( + test_network, num_classes=2, num_token_predictions=2) + + # Create a set of 2-dimensional data tensors to feed into the model. + word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) + type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + lm_mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) + + # Invoke the trainer model on the tensors. In Eager mode, this does the + # actual calculation. (We can't validate the outputs, since the network is + # too complex: this simply ensures we're not hitting runtime errors.) + _ = bert_trainer_model([word_ids, mask, type_ids, lm_mask]) + + def test_serialize_deserialize(self): + """Validate that the BERT trainer can be serialized and deserialized.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=5) + + # Create a BERT trainer with the created network. (Note that all the args + # are different, so we can catch any serialization mismatches.) + bert_trainer_model = bert_pretrainer.BertPretrainer( + test_network, num_classes=4, num_token_predictions=3) + + # Create another BERT trainer via serialization and deserialization. + config = bert_trainer_model.get_config() + new_bert_trainer_model = bert_pretrainer.BertPretrainer.from_config(config) + + # Validate that the config can be forced to JSON. + _ = new_bert_trainer_model.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(bert_trainer_model.get_config(), + new_bert_trainer_model.get_config()) + + def test_bert_pretrainerv2(self): + """Validate that the Keras object can be created.""" + # Build a transformer network to use within the BERT trainer. + vocab_size = 100 + sequence_length = 512 + test_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a BERT trainer with the created network. + num_token_predictions = 2 + bert_trainer_model = bert_pretrainer.BertPretrainerV2( + encoder_network=test_network, num_masked_tokens=num_token_predictions) + + # Create a set of 2-dimensional inputs (the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + lm_mask = tf.keras.Input(shape=(num_token_predictions,), dtype=tf.int32) + + # Invoke the trainer model on the inputs. This causes the layer to be built. + outputs = bert_trainer_model([word_ids, mask, type_ids, lm_mask]) + + # Validate that the outputs are of the expected shape. + expected_lm_shape = [None, num_token_predictions, vocab_size] + self.assertAllEqual(expected_lm_shape, outputs['lm_output'].shape.as_list()) + + def test_v2_serialize_deserialize(self): + """Validate that the BERT trainer can be serialized and deserialized.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=5) + + # Create a BERT trainer with the created network. (Note that all the args + # are different, so we can catch any serialization mismatches.) + bert_trainer_model = bert_pretrainer.BertPretrainerV2( + encoder_network=test_network, num_masked_tokens=2) + + # Create another BERT trainer via serialization and deserialization. + config = bert_trainer_model.get_config() + new_bert_trainer_model = bert_pretrainer.BertPretrainerV2.from_config( + config) + + # Validate that the config can be forced to JSON. + _ = new_bert_trainer_model.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(bert_trainer_model.get_config(), + new_bert_trainer_model.get_config()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/models/bert_span_labeler.py b/models/official/nlp/modeling/models/bert_span_labeler.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd9ab13f518373b6bf82800256d75df9d553750 --- /dev/null +++ b/models/official/nlp/modeling/models/bert_span_labeler.py @@ -0,0 +1,103 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer network for BERT-style models.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.modeling import networks + + +@tf.keras.utils.register_keras_serializable(package='Text') +class BertSpanLabeler(tf.keras.Model): + """Span labeler model based on a BERT-style transformer-based encoder. + + This is an implementation of the network structure surrounding a transformer + encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers + for Language Understanding" (https://arxiv.org/abs/1810.04805). + + The BertSpanLabeler allows a user to pass in a transformer stack, and + instantiates a span labeling network based on a single dense layer. + + Arguments: + network: A transformer network. This network should output a sequence output + and a classification output. Furthermore, it should expose its embedding + table via a "get_embedding_table" method. + initializer: The initializer (if any) to use in the span labeling network. + Defaults to a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + network, + initializer='glorot_uniform', + output='logits', + **kwargs): + self._self_setattr_tracking = False + self._network = network + self._config = { + 'network': network, + 'initializer': initializer, + 'output': output, + } + + # We want to use the inputs of the passed network as the inputs to this + # Model. To do this, we need to keep a handle to the network inputs for use + # when we construct the Model object at the end of init. + inputs = network.inputs + + # Because we have a copy of inputs to create this Model object, we can + # invoke the Network object with its own input tensors to start the Model. + sequence_output, _ = network(inputs) + + # This is an instance variable for ease of access to the underlying task + # network. + self.span_labeling = networks.SpanLabeling( + input_width=sequence_output.shape[-1], + initializer=initializer, + output=output, + name='span_labeling') + start_logits, end_logits = self.span_labeling(sequence_output) + + # Use identity layers wrapped in lambdas to explicitly name the output + # tensors. This allows us to use string-keyed dicts in Keras fit/predict/ + # evaluate calls. + start_logits = tf.keras.layers.Lambda( + tf.identity, name='start_positions')( + start_logits) + end_logits = tf.keras.layers.Lambda( + tf.identity, name='end_positions')( + end_logits) + + logits = [start_logits, end_logits] + + super(BertSpanLabeler, self).__init__( + inputs=inputs, outputs=logits, **kwargs) + + @property + def checkpoint_items(self): + return dict(encoder=self._network) + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/models/bert_span_labeler_test.py b/models/official/nlp/modeling/models/bert_span_labeler_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d05e91b52c9ba69a65df7dee4783ffc4113b8a3c --- /dev/null +++ b/models/official/nlp/modeling/models/bert_span_labeler_test.py @@ -0,0 +1,124 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for BERT trainer network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling import networks +from official.nlp.modeling.models import bert_span_labeler + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class BertSpanLabelerTest(keras_parameterized.TestCase): + + def test_bert_trainer(self): + """Validate that the Keras object can be created.""" + # Build a transformer network to use within the BERT trainer. + vocab_size = 100 + sequence_length = 512 + test_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) + + # Create a set of 2-dimensional inputs (the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + + # Invoke the trainer model on the inputs. This causes the layer to be built. + cls_outs = bert_trainer_model([word_ids, mask, type_ids]) + + # Validate that there are 2 outputs are of the expected shape. + self.assertEqual(2, len(cls_outs)) + expected_shape = [None, sequence_length] + for out in cls_outs: + self.assertAllEqual(expected_shape, out.shape.as_list()) + + def test_bert_trainer_named_compilation(self): + """Validate compilation using explicit output names.""" + # Build a transformer network to use within the BERT trainer. + vocab_size = 100 + sequence_length = 512 + test_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) + + # Attempt to compile the model using a string-keyed dict of output names to + # loss functions. This will validate that the outputs are named as we + # expect. + bert_trainer_model.compile( + optimizer='sgd', + loss={ + 'start_positions': 'mse', + 'end_positions': 'mse' + }) + + def test_bert_trainer_tensor_call(self): + """Validate that the Keras object can be invoked.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=2) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) + + # Create a set of 2-dimensional data tensors to feed into the model. + word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) + type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + + # Invoke the trainer model on the tensors. In Eager mode, this does the + # actual calculation. (We can't validate the outputs, since the network is + # too complex: this simply ensures we're not hitting runtime errors.) + _ = bert_trainer_model([word_ids, mask, type_ids]) + + def test_serialize_deserialize(self): + """Validate that the BERT trainer can be serialized and deserialized.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=5) + + # Create a BERT trainer with the created network. (Note that all the args + # are different, so we can catch any serialization mismatches.) + bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) + + # Create another BERT trainer via serialization and deserialization. + config = bert_trainer_model.get_config() + new_bert_trainer_model = bert_span_labeler.BertSpanLabeler.from_config( + config) + + # Validate that the config can be forced to JSON. + _ = new_bert_trainer_model.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(bert_trainer_model.get_config(), + new_bert_trainer_model.get_config()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/models/bert_token_classifier.py b/models/official/nlp/modeling/models/bert_token_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..4967d71776d685c8631d19d3c07a9fc1e8a25bf6 --- /dev/null +++ b/models/official/nlp/modeling/models/bert_token_classifier.py @@ -0,0 +1,97 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer network for BERT-style models.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.modeling import networks + + +@tf.keras.utils.register_keras_serializable(package='Text') +class BertTokenClassifier(tf.keras.Model): + """Token classifier model based on a BERT-style transformer-based encoder. + + This is an implementation of the network structure surrounding a transformer + encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers + for Language Understanding" (https://arxiv.org/abs/1810.04805). + + The BertTokenClassifier allows a user to pass in a transformer stack, and + instantiates a token classification network based on the passed `num_classes` + argument. + + Arguments: + network: A transformer network. This network should output a sequence output + and a classification output. Furthermore, it should expose its embedding + table via a "get_embedding_table" method. + num_classes: Number of classes to predict from the classification network. + initializer: The initializer (if any) to use in the classification networks. + Defaults to a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + network, + num_classes, + initializer='glorot_uniform', + output='logits', + dropout_rate=0.1, + **kwargs): + self._self_setattr_tracking = False + self._network = network + self._config = { + 'network': network, + 'num_classes': num_classes, + 'initializer': initializer, + 'output': output, + } + + # We want to use the inputs of the passed network as the inputs to this + # Model. To do this, we need to keep a handle to the network inputs for use + # when we construct the Model object at the end of init. + inputs = network.inputs + + # Because we have a copy of inputs to create this Model object, we can + # invoke the Network object with its own input tensors to start the Model. + sequence_output, _ = network(inputs) + sequence_output = tf.keras.layers.Dropout( + rate=dropout_rate)(sequence_output) + + self.classifier = networks.TokenClassification( + input_width=sequence_output.shape[-1], + num_classes=num_classes, + initializer=initializer, + output=output, + name='classification') + predictions = self.classifier(sequence_output) + + super(BertTokenClassifier, self).__init__( + inputs=inputs, outputs=predictions, **kwargs) + + @property + def checkpoint_items(self): + return dict(encoder=self._network) + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/models/bert_token_classifier_test.py b/models/official/nlp/modeling/models/bert_token_classifier_test.py new file mode 100644 index 0000000000000000000000000000000000000000..970b531cf5673e4040ceb417ffb67a8ef6aea70a --- /dev/null +++ b/models/official/nlp/modeling/models/bert_token_classifier_test.py @@ -0,0 +1,107 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for BERT trainer network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling import networks +from official.nlp.modeling.models import bert_token_classifier + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class BertTokenClassifierTest(keras_parameterized.TestCase): + + def test_bert_trainer(self): + """Validate that the Keras object can be created.""" + # Build a transformer network to use within the BERT trainer. + vocab_size = 100 + sequence_length = 512 + test_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a BERT trainer with the created network. + num_classes = 3 + bert_trainer_model = bert_token_classifier.BertTokenClassifier( + test_network, + num_classes=num_classes) + + # Create a set of 2-dimensional inputs (the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + + # Invoke the trainer model on the inputs. This causes the layer to be built. + sequence_outs = bert_trainer_model([word_ids, mask, type_ids]) + + # Validate that the outputs are of the expected shape. + expected_classification_shape = [None, sequence_length, num_classes] + self.assertAllEqual(expected_classification_shape, + sequence_outs.shape.as_list()) + + def test_bert_trainer_tensor_call(self): + """Validate that the Keras object can be invoked.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=2) + + # Create a BERT trainer with the created network. + bert_trainer_model = bert_token_classifier.BertTokenClassifier( + test_network, num_classes=2) + + # Create a set of 2-dimensional data tensors to feed into the model. + word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) + type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) + + # Invoke the trainer model on the tensors. In Eager mode, this does the + # actual calculation. (We can't validate the outputs, since the network is + # too complex: this simply ensures we're not hitting runtime errors.) + _ = bert_trainer_model([word_ids, mask, type_ids]) + + def test_serialize_deserialize(self): + """Validate that the BERT trainer can be serialized and deserialized.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_network = networks.TransformerEncoder( + vocab_size=100, num_layers=2, sequence_length=5) + + # Create a BERT trainer with the created network. (Note that all the args + # are different, so we can catch any serialization mismatches.) + bert_trainer_model = bert_token_classifier.BertTokenClassifier( + test_network, num_classes=4, initializer='zeros', output='predictions') + + # Create another BERT trainer via serialization and deserialization. + config = bert_trainer_model.get_config() + new_bert_trainer_model = ( + bert_token_classifier.BertTokenClassifier.from_config(config)) + + # Validate that the config can be forced to JSON. + _ = new_bert_trainer_model.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(bert_trainer_model.get_config(), + new_bert_trainer_model.get_config()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/models/electra_pretrainer.py b/models/official/nlp/modeling/models/electra_pretrainer.py new file mode 100644 index 0000000000000000000000000000000000000000..21fe3a0d9719739fa1adce3d628a3df6b261c177 --- /dev/null +++ b/models/official/nlp/modeling/models/electra_pretrainer.py @@ -0,0 +1,307 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer network for ELECTRA models.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import copy +import tensorflow as tf + +from official.modeling import tf_utils +from official.nlp.modeling import layers + + +@tf.keras.utils.register_keras_serializable(package='Text') +class ElectraPretrainer(tf.keras.Model): + """ELECTRA network training model. + + This is an implementation of the network structure described in "ELECTRA: + Pre-training Text Encoders as Discriminators Rather Than Generators" ( + https://arxiv.org/abs/2003.10555). + + The ElectraPretrainer allows a user to pass in two transformer models, one for + generator, the other for discriminator, and instantiates the masked language + model (at generator side) and classification networks (at discriminator side) + that are used to create the training objectives. + + Arguments: + generator_network: A transformer network for generator, this network should + output a sequence output and an optional classification output. + discriminator_network: A transformer network for discriminator, this network + should output a sequence output + vocab_size: Size of generator output vocabulary + num_classes: Number of classes to predict from the classification network + for the generator network (not used now) + sequence_length: Input sequence length + last_hidden_dim: Last hidden dim of generator transformer output + num_token_predictions: Number of tokens to predict from the masked LM. + mlm_activation: The activation (if any) to use in the masked LM and + classification networks. If None, no activation will be used. + mlm_initializer: The initializer (if any) to use in the masked LM and + classification networks. Defaults to a Glorot uniform initializer. + output_type: The output style for this network. Can be either 'logits' or + 'predictions'. + disallow_correct: Whether to disallow the generator to generate the exact + same token in the original sentence + """ + + def __init__(self, + generator_network, + discriminator_network, + vocab_size, + num_classes, + sequence_length, + last_hidden_dim, + num_token_predictions, + mlm_activation=None, + mlm_initializer='glorot_uniform', + output_type='logits', + disallow_correct=False, + **kwargs): + super(ElectraPretrainer, self).__init__() + self._config = { + 'generator_network': generator_network, + 'discriminator_network': discriminator_network, + 'vocab_size': vocab_size, + 'num_classes': num_classes, + 'sequence_length': sequence_length, + 'last_hidden_dim': last_hidden_dim, + 'num_token_predictions': num_token_predictions, + 'mlm_activation': mlm_activation, + 'mlm_initializer': mlm_initializer, + 'output_type': output_type, + 'disallow_correct': disallow_correct, + } + for k, v in kwargs.items(): + self._config[k] = v + + self.generator_network = generator_network + self.discriminator_network = discriminator_network + self.vocab_size = vocab_size + self.num_classes = num_classes + self.sequence_length = sequence_length + self.last_hidden_dim = last_hidden_dim + self.num_token_predictions = num_token_predictions + self.mlm_activation = mlm_activation + self.mlm_initializer = mlm_initializer + self.output_type = output_type + self.disallow_correct = disallow_correct + self.masked_lm = layers.MaskedLM( + embedding_table=generator_network.get_embedding_table(), + activation=mlm_activation, + initializer=mlm_initializer, + output=output_type, + name='generator_masked_lm') + self.classification = layers.ClassificationHead( + inner_dim=last_hidden_dim, + num_classes=num_classes, + initializer=mlm_initializer, + name='generator_classification_head') + self.discriminator_head = tf.keras.layers.Dense( + units=1, kernel_initializer=mlm_initializer) + + def call(self, inputs): + input_word_ids = inputs['input_word_ids'] + input_mask = inputs['input_mask'] + input_type_ids = inputs['input_type_ids'] + masked_lm_positions = inputs['masked_lm_positions'] + + ### Generator ### + sequence_output, cls_output = self.generator_network( + [input_word_ids, input_mask, input_type_ids]) + + # The generator encoder network may get outputs from all layers. + if isinstance(sequence_output, list): + sequence_output = sequence_output[-1] + if isinstance(cls_output, list): + cls_output = cls_output[-1] + + lm_outputs = self.masked_lm(sequence_output, masked_lm_positions) + sentence_outputs = self.classification(sequence_output) + + ### Sampling from generator ### + fake_data = self._get_fake_data(inputs, lm_outputs, duplicate=True) + + ### Discriminator ### + disc_input = fake_data['inputs'] + disc_label = fake_data['is_fake_tokens'] + disc_sequence_output, _ = self.discriminator_network([ + disc_input['input_word_ids'], disc_input['input_mask'], + disc_input['input_type_ids'] + ]) + + # The discriminator encoder network may get outputs from all layers. + if isinstance(disc_sequence_output, list): + disc_sequence_output = disc_sequence_output[-1] + + disc_logits = self.discriminator_head(disc_sequence_output) + disc_logits = tf.squeeze(disc_logits, axis=-1) + + return lm_outputs, sentence_outputs, disc_logits, disc_label + + def _get_fake_data(self, inputs, mlm_logits, duplicate=True): + """Generate corrupted data for discriminator. + + Args: + inputs: A dict of all inputs, same as the input of call() function + mlm_logits: The generator's output logits + duplicate: Whether to copy the original inputs dict during modifications + + Returns: + A dict of generated fake data + """ + inputs = unmask(inputs, duplicate) + + if self.disallow_correct: + disallow = tf.one_hot( + inputs['masked_lm_ids'], depth=self.vocab_size, dtype=tf.float32) + else: + disallow = None + + sampled_tokens = tf.stop_gradient( + sample_from_softmax(mlm_logits, disallow=disallow)) + sampled_tokids = tf.argmax(sampled_tokens, -1, output_type=tf.int32) + updated_input_ids, masked = scatter_update(inputs['input_word_ids'], + sampled_tokids, + inputs['masked_lm_positions']) + labels = masked * (1 - tf.cast( + tf.equal(updated_input_ids, inputs['input_word_ids']), tf.int32)) + + updated_inputs = get_updated_inputs( + inputs, duplicate, input_word_ids=updated_input_ids) + + return { + 'inputs': updated_inputs, + 'is_fake_tokens': labels, + 'sampled_tokens': sampled_tokens + } + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) + + +def scatter_update(sequence, updates, positions): + """Scatter-update a sequence. + + Args: + sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] tensor + updates: A tensor of size batch_size*seq_len(*depth) + positions: A [batch_size, n_positions] tensor + + Returns: + updated_sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] + tensor of "sequence" with elements at "positions" replaced by the values + at "updates". Updates to index 0 are ignored. If there are duplicated + positions the update is only applied once. + updates_mask: A [batch_size, seq_len] mask tensor of which inputs were + updated. + """ + shape = tf_utils.get_shape_list(sequence, expected_rank=[2, 3]) + depth_dimension = (len(shape) == 3) + if depth_dimension: + batch_size, seq_len, depth = shape + else: + batch_size, seq_len = shape + depth = 1 + sequence = tf.expand_dims(sequence, -1) + n_positions = tf_utils.get_shape_list(positions)[1] + + shift = tf.expand_dims(seq_len * tf.range(batch_size), -1) + flat_positions = tf.reshape(positions + shift, [-1, 1]) + flat_updates = tf.reshape(updates, [-1, depth]) + updates = tf.scatter_nd(flat_positions, flat_updates, + [batch_size * seq_len, depth]) + updates = tf.reshape(updates, [batch_size, seq_len, depth]) + + flat_updates_mask = tf.ones([batch_size * n_positions], tf.int32) + updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, + [batch_size * seq_len]) + updates_mask = tf.reshape(updates_mask, [batch_size, seq_len]) + not_first_token = tf.concat([ + tf.zeros((batch_size, 1), tf.int32), + tf.ones((batch_size, seq_len - 1), tf.int32) + ], -1) + updates_mask *= not_first_token + updates_mask_3d = tf.expand_dims(updates_mask, -1) + + # account for duplicate positions + if sequence.dtype == tf.float32: + updates_mask_3d = tf.cast(updates_mask_3d, tf.float32) + updates /= tf.maximum(1.0, updates_mask_3d) + else: + assert sequence.dtype == tf.int32 + updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d)) + updates_mask = tf.minimum(updates_mask, 1) + updates_mask_3d = tf.minimum(updates_mask_3d, 1) + + updated_sequence = (((1 - updates_mask_3d) * sequence) + + (updates_mask_3d * updates)) + if not depth_dimension: + updated_sequence = tf.squeeze(updated_sequence, -1) + + return updated_sequence, updates_mask + + +def sample_from_softmax(logits, disallow=None): + """Implement softmax sampling using gumbel softmax trick. + + Args: + logits: A [batch_size, num_token_predictions, vocab_size] tensor indicating + the generator output logits for each masked position. + disallow: If `None`, we directly sample tokens from the logits. Otherwise, + this is a tensor of size [batch_size, num_token_predictions, vocab_size] + indicating the true word id in each masked position. + + Returns: + sampled_tokens: A [batch_size, num_token_predictions, vocab_size] one hot + tensor indicating the sampled word id in each masked position. + """ + if disallow is not None: + logits -= 1000.0 * disallow + uniform_noise = tf.random.uniform( + tf_utils.get_shape_list(logits), minval=0, maxval=1) + gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9) + + # Here we essentially follow the original paper and use temperature 1.0 for + # generator output logits. + sampled_tokens = tf.one_hot( + tf.argmax(tf.nn.softmax(logits + gumbel_noise), -1, output_type=tf.int32), + logits.shape[-1]) + return sampled_tokens + + +def unmask(inputs, duplicate): + unmasked_input_word_ids, _ = scatter_update(inputs['input_word_ids'], + inputs['masked_lm_ids'], + inputs['masked_lm_positions']) + return get_updated_inputs( + inputs, duplicate, input_word_ids=unmasked_input_word_ids) + + +def get_updated_inputs(inputs, duplicate, **kwargs): + if duplicate: + new_inputs = copy.copy(inputs) + else: + new_inputs = inputs + for k, v in kwargs.items(): + new_inputs[k] = v + return new_inputs diff --git a/models/official/nlp/modeling/models/electra_pretrainer_test.py b/models/official/nlp/modeling/models/electra_pretrainer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b5644ab1a0e5812bac4a3202e8d53cffb260550f --- /dev/null +++ b/models/official/nlp/modeling/models/electra_pretrainer_test.py @@ -0,0 +1,156 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ELECTRA pre trainer network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling import networks +from official.nlp.modeling.models import electra_pretrainer + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class ElectraPretrainerTest(keras_parameterized.TestCase): + + def test_electra_pretrainer(self): + """Validate that the Keras object can be created.""" + # Build a transformer network to use within the ELECTRA trainer. + vocab_size = 100 + sequence_length = 512 + test_generator_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + test_discriminator_network = networks.TransformerEncoder( + vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + + # Create a ELECTRA trainer with the created network. + num_classes = 3 + num_token_predictions = 2 + eletrca_trainer_model = electra_pretrainer.ElectraPretrainer( + generator_network=test_generator_network, + discriminator_network=test_discriminator_network, + vocab_size=vocab_size, + num_classes=num_classes, + sequence_length=sequence_length, + last_hidden_dim=768, + num_token_predictions=num_token_predictions, + disallow_correct=True) + + # Create a set of 2-dimensional inputs (the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + lm_positions = tf.keras.Input( + shape=(num_token_predictions,), dtype=tf.int32) + lm_ids = tf.keras.Input(shape=(num_token_predictions,), dtype=tf.int32) + inputs = { + 'input_word_ids': word_ids, + 'input_mask': mask, + 'input_type_ids': type_ids, + 'masked_lm_positions': lm_positions, + 'masked_lm_ids': lm_ids + } + + # Invoke the trainer model on the inputs. This causes the layer to be built. + lm_outs, cls_outs, disc_logits, disc_label = eletrca_trainer_model(inputs) + + # Validate that the outputs are of the expected shape. + expected_lm_shape = [None, num_token_predictions, vocab_size] + expected_classification_shape = [None, num_classes] + expected_disc_logits_shape = [None, sequence_length] + expected_disc_label_shape = [None, sequence_length] + self.assertAllEqual(expected_lm_shape, lm_outs.shape.as_list()) + self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) + self.assertAllEqual(expected_disc_logits_shape, disc_logits.shape.as_list()) + self.assertAllEqual(expected_disc_label_shape, disc_label.shape.as_list()) + + def test_electra_trainer_tensor_call(self): + """Validate that the Keras object can be invoked.""" + # Build a transformer network to use within the ELECTRA trainer. (Here, we + # use a short sequence_length for convenience.) + test_generator_network = networks.TransformerEncoder( + vocab_size=100, num_layers=4, sequence_length=3) + test_discriminator_network = networks.TransformerEncoder( + vocab_size=100, num_layers=4, sequence_length=3) + + # Create a ELECTRA trainer with the created network. + eletrca_trainer_model = electra_pretrainer.ElectraPretrainer( + generator_network=test_generator_network, + discriminator_network=test_discriminator_network, + vocab_size=100, + num_classes=2, + sequence_length=3, + last_hidden_dim=768, + num_token_predictions=2) + + # Create a set of 2-dimensional data tensors to feed into the model. + word_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32) + mask = tf.constant([[1, 1, 1], [1, 0, 0]], dtype=tf.int32) + type_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32) + lm_positions = tf.constant([[0, 1], [0, 2]], dtype=tf.int32) + lm_ids = tf.constant([[10, 20], [20, 30]], dtype=tf.int32) + inputs = { + 'input_word_ids': word_ids, + 'input_mask': mask, + 'input_type_ids': type_ids, + 'masked_lm_positions': lm_positions, + 'masked_lm_ids': lm_ids + } + + # Invoke the trainer model on the tensors. In Eager mode, this does the + # actual calculation. (We can't validate the outputs, since the network is + # too complex: this simply ensures we're not hitting runtime errors.) + _, _, _, _ = eletrca_trainer_model(inputs) + + def test_serialize_deserialize(self): + """Validate that the ELECTRA trainer can be serialized and deserialized.""" + # Build a transformer network to use within the BERT trainer. (Here, we use + # a short sequence_length for convenience.) + test_generator_network = networks.TransformerEncoder( + vocab_size=100, num_layers=4, sequence_length=3) + test_discriminator_network = networks.TransformerEncoder( + vocab_size=100, num_layers=4, sequence_length=3) + + # Create a ELECTRA trainer with the created network. (Note that all the args + # are different, so we can catch any serialization mismatches.) + electra_trainer_model = electra_pretrainer.ElectraPretrainer( + generator_network=test_generator_network, + discriminator_network=test_discriminator_network, + vocab_size=100, + num_classes=2, + sequence_length=3, + last_hidden_dim=768, + num_token_predictions=2) + + # Create another BERT trainer via serialization and deserialization. + config = electra_trainer_model.get_config() + new_electra_trainer_model = electra_pretrainer.ElectraPretrainer.from_config( + config) + + # Validate that the config can be forced to JSON. + _ = new_electra_trainer_model.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(electra_trainer_model.get_config(), + new_electra_trainer_model.get_config()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/networks/README.md b/models/official/nlp/modeling/networks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..42347373edc1e2999019c7259dda78bc58138ef2 --- /dev/null +++ b/models/official/nlp/modeling/networks/README.md @@ -0,0 +1,27 @@ +# Networks + +Networks are combinations of layers (and possibly other networks). They are sub-units of models that would not be trained alone. It +encapsulates common network structures like a classification head +or a transformer encoder into an easily handled object with a +standardized configuration. + +* [`TransformerEncoder`](transformer_encoder.py) implements a bi-directional +Transformer-based encoder as described in ["BERT: Pre-training of Deep +Bidirectional Transformers for Language Understanding"](https://arxiv.org/abs/1810.04805). It includes the embedding lookups, +transformer layers and pooling layer. + +* [`AlbertTransformerEncoder`](albert_transformer_encoder.py) implements a +Transformer-encoder described in the paper ["ALBERT: A Lite BERT for +Self-supervised Learning of Language Representations] +(https://arxiv.org/abs/1909.11942). Compared with [BERT](https://arxiv.org/abs/1810.04805), ALBERT refactorizes embedding parameters +into two smaller matrices and shares parameters across layers. + +* [`Classification`](classification.py) contains a single hidden layer, and is +intended for use as a classification or regression (if number of classes is set +to 1) head. + +* [`TokenClassification`](token_classification.py) contains a single hidden +layer, and is intended for use as a token classification head. + +* [`SpanLabeling`](span_labeling.py) implements a single-span labeler (that is, a prediction head that can predict one start and end index per batch item) based on a single dense hidden layer. It can be used in the SQuAD task. + diff --git a/models/official/nlp/modeling/networks/__init__.py b/models/official/nlp/modeling/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b8443e9f9303326a82212ef3da4e3057218522bb --- /dev/null +++ b/models/official/nlp/modeling/networks/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Networks package definition.""" +from official.nlp.modeling.networks.albert_transformer_encoder import AlbertTransformerEncoder +from official.nlp.modeling.networks.classification import Classification +from official.nlp.modeling.networks.encoder_scaffold import EncoderScaffold +from official.nlp.modeling.networks.span_labeling import SpanLabeling +from official.nlp.modeling.networks.token_classification import TokenClassification +from official.nlp.modeling.networks.transformer_encoder import TransformerEncoder diff --git a/models/official/nlp/modeling/networks/albert_transformer_encoder.py b/models/official/nlp/modeling/networks/albert_transformer_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..398fb00c18c7341765beec50e9b0e6ecaee46e5c --- /dev/null +++ b/models/official/nlp/modeling/networks/albert_transformer_encoder.py @@ -0,0 +1,192 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.modeling import activations +from official.nlp.modeling import layers + + +@tf.keras.utils.register_keras_serializable(package='Text') +class AlbertTransformerEncoder(tf.keras.Model): + """ALBERT (https://arxiv.org/abs/1810.04805) text encoder network. + + This network implements the encoder described in the paper "ALBERT: A Lite + BERT for Self-supervised Learning of Language Representations" + (https://arxiv.org/abs/1909.11942). + + Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes + embedding parameters into two smaller matrices and shares parameters + across layers. + + The default values for this object are taken from the ALBERT-Base + implementation described in the paper. + + Arguments: + vocab_size: The size of the token vocabulary. + embedding_width: The width of the word embeddings. If the embedding width is + not equal to hidden size, embedding parameters will be factorized into two + matrices in the shape of ['vocab_size', 'embedding_width'] and + ['embedding_width', 'hidden_size'] ('embedding_width' is usually much + smaller than 'hidden_size'). + hidden_size: The size of the transformer hidden layers. + num_layers: The number of transformer layers. + num_attention_heads: The number of attention heads for each transformer. The + hidden size must be divisible by the number of attention heads. + sequence_length: The sequence length that this encoder expects. If None, the + sequence length is dynamic; if an integer, the encoder will require + sequences padded to this length. + max_sequence_length: The maximum sequence length that this encoder can + consume. If None, max_sequence_length uses the value from sequence length. + This determines the variable shape for positional embeddings. + type_vocab_size: The number of types that the 'type_ids' input can take. + intermediate_size: The intermediate size for the transformer layers. + activation: The activation to use for the transformer layers. + dropout_rate: The dropout rate to use for the transformer layers. + attention_dropout_rate: The dropout rate to use for the attention layers + within the transformer layers. + initializer: The initialzer to use for all weights in this encoder. + """ + + def __init__(self, + vocab_size, + embedding_width=128, + hidden_size=768, + num_layers=12, + num_attention_heads=12, + sequence_length=512, + max_sequence_length=None, + type_vocab_size=16, + intermediate_size=3072, + activation=activations.gelu, + dropout_rate=0.1, + attention_dropout_rate=0.1, + initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), + **kwargs): + activation = tf.keras.activations.get(activation) + initializer = tf.keras.initializers.get(initializer) + + if not max_sequence_length: + max_sequence_length = sequence_length + self._self_setattr_tracking = False + self._config_dict = { + 'vocab_size': vocab_size, + 'embedding_width': embedding_width, + 'hidden_size': hidden_size, + 'num_layers': num_layers, + 'num_attention_heads': num_attention_heads, + 'sequence_length': sequence_length, + 'max_sequence_length': max_sequence_length, + 'type_vocab_size': type_vocab_size, + 'intermediate_size': intermediate_size, + 'activation': tf.keras.activations.serialize(activation), + 'dropout_rate': dropout_rate, + 'attention_dropout_rate': attention_dropout_rate, + 'initializer': tf.keras.initializers.serialize(initializer), + } + + word_ids = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') + mask = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name='input_mask') + type_ids = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') + + if embedding_width is None: + embedding_width = hidden_size + self._embedding_layer = layers.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=embedding_width, + initializer=initializer, + name='word_embeddings') + word_embeddings = self._embedding_layer(word_ids) + + # Always uses dynamic slicing for simplicity. + self._position_embedding_layer = layers.PositionEmbedding( + initializer=initializer, + use_dynamic_slicing=True, + max_sequence_length=max_sequence_length, + name='position_embedding') + position_embeddings = self._position_embedding_layer(word_embeddings) + + type_embeddings = ( + layers.OnDeviceEmbedding( + vocab_size=type_vocab_size, + embedding_width=embedding_width, + initializer=initializer, + use_one_hot=True, + name='type_embeddings')(type_ids)) + + embeddings = tf.keras.layers.Add()( + [word_embeddings, position_embeddings, type_embeddings]) + embeddings = ( + tf.keras.layers.LayerNormalization( + name='embeddings/layer_norm', + axis=-1, + epsilon=1e-12, + dtype=tf.float32)(embeddings)) + embeddings = (tf.keras.layers.Dropout(rate=dropout_rate)(embeddings)) + # We project the 'embedding' output to 'hidden_size' if it is not already + # 'hidden_size'. + if embedding_width != hidden_size: + embeddings = tf.keras.layers.experimental.EinsumDense( + '...x,xy->...y', + output_shape=hidden_size, + bias_axes='y', + kernel_initializer=initializer, + name='embedding_projection')( + embeddings) + + data = embeddings + attention_mask = layers.SelfAttentionMask()([data, mask]) + shared_layer = layers.Transformer( + num_attention_heads=num_attention_heads, + intermediate_size=intermediate_size, + intermediate_activation=activation, + dropout_rate=dropout_rate, + attention_dropout_rate=attention_dropout_rate, + kernel_initializer=initializer, + name='transformer') + for _ in range(num_layers): + data = shared_layer([data, attention_mask]) + + first_token_tensor = ( + tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(data) + ) + cls_output = tf.keras.layers.Dense( + units=hidden_size, + activation='tanh', + kernel_initializer=initializer, + name='pooler_transform')( + first_token_tensor) + + super(AlbertTransformerEncoder, self).__init__( + inputs=[word_ids, mask, type_ids], outputs=[data, cls_output], **kwargs) + + def get_embedding_table(self): + return self._embedding_layer.embeddings + + def get_config(self): + return self._config_dict + + @classmethod + def from_config(cls, config): + return cls(**config) diff --git a/models/official/nlp/modeling/networks/albert_transformer_encoder_test.py b/models/official/nlp/modeling/networks/albert_transformer_encoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..44368e494ae04dd9b92c63987e6881aabd8ff4c2 --- /dev/null +++ b/models/official/nlp/modeling/networks/albert_transformer_encoder_test.py @@ -0,0 +1,174 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ALBERT transformer-based text encoder network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.networks import albert_transformer_encoder + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class AlbertTransformerEncoderTest(keras_parameterized.TestCase): + + def tearDown(self): + super(AlbertTransformerEncoderTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy("float32") + + @parameterized.named_parameters( + dict(testcase_name="default", expected_dtype=tf.float32), + dict( + testcase_name="with_float16_dtype", + expected_dtype=tf.float16), + ) + def test_network_creation(self, expected_dtype): + hidden_size = 32 + sequence_length = 21 + + kwargs = dict( + vocab_size=100, + hidden_size=hidden_size, + sequence_length=sequence_length, + num_attention_heads=2, + num_layers=3) + if expected_dtype == tf.float16: + tf.keras.mixed_precision.experimental.set_policy("mixed_float16") + + # Create a small TransformerEncoder for testing. + test_network = albert_transformer_encoder.AlbertTransformerEncoder(**kwargs) + + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + expected_data_shape = [None, sequence_length, hidden_size] + expected_pooled_shape = [None, hidden_size] + self.assertAllEqual(expected_data_shape, data.shape.as_list()) + self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) + + # If float_dtype is set to float16, the data output is float32 (from a layer + # norm) and pool output should be float16. + self.assertEqual(tf.float32, data.dtype) + self.assertEqual(expected_dtype, pooled.dtype) + + # ALBERT has additonal 'embedding_hidden_mapping_in' weights and + # it shares transformer weights. + self.assertNotEmpty( + [x for x in test_network.weights if "embedding_projection/" in x.name]) + self.assertNotEmpty( + [x for x in test_network.weights if "transformer/" in x.name]) + self.assertEmpty( + [x for x in test_network.weights if "transformer/layer" in x.name]) + + def test_network_invocation(self): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + num_types = 7 + # Create a small TransformerEncoder for testing. + test_network = albert_transformer_encoder.AlbertTransformerEncoder( + vocab_size=vocab_size, + embedding_width=8, + hidden_size=hidden_size, + sequence_length=sequence_length, + num_attention_heads=2, + num_layers=3, + type_vocab_size=num_types) + self.assertTrue( + test_network._position_embedding_layer._use_dynamic_slicing) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + # Create a model based off of this network: + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + + # Invoke the model. We can't validate the output data here (the model is too + # complex) but this will catch structural runtime errors. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + type_id_data = np.random.randint( + num_types, size=(batch_size, sequence_length)) + _ = model.predict([word_id_data, mask_data, type_id_data]) + + # Creates a TransformerEncoder with max_sequence_length != sequence_length + max_sequence_length = 128 + test_network = albert_transformer_encoder.AlbertTransformerEncoder( + vocab_size=vocab_size, + embedding_width=8, + hidden_size=hidden_size, + sequence_length=sequence_length, + max_sequence_length=max_sequence_length, + num_attention_heads=2, + num_layers=3, + type_vocab_size=num_types) + self.assertTrue(test_network._position_embedding_layer._use_dynamic_slicing) + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + _ = model.predict([word_id_data, mask_data, type_id_data]) + + def test_serialize_deserialize(self): + tf.keras.mixed_precision.experimental.set_policy("mixed_float16") + # Create a network object that sets all of its config options. + kwargs = dict( + vocab_size=100, + embedding_width=8, + hidden_size=32, + num_layers=3, + num_attention_heads=2, + sequence_length=21, + max_sequence_length=21, + type_vocab_size=12, + intermediate_size=1223, + activation="relu", + dropout_rate=0.05, + attention_dropout_rate=0.22, + initializer="glorot_uniform") + network = albert_transformer_encoder.AlbertTransformerEncoder(**kwargs) + + expected_config = dict(kwargs) + expected_config["activation"] = tf.keras.activations.serialize( + tf.keras.activations.get(expected_config["activation"])) + expected_config["initializer"] = tf.keras.initializers.serialize( + tf.keras.initializers.get(expected_config["initializer"])) + self.assertEqual(network.get_config(), expected_config) + + # Create another network object from the first object's config. + new_network = ( + albert_transformer_encoder.AlbertTransformerEncoder.from_config( + network.get_config())) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(network.get_config(), new_network.get_config()) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/networks/classification.py b/models/official/nlp/modeling/networks/classification.py new file mode 100644 index 0000000000000000000000000000000000000000..fc326136cd18593bc5e06dd2f68a1e0da17a1409 --- /dev/null +++ b/models/official/nlp/modeling/networks/classification.py @@ -0,0 +1,91 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Classification and regression network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package='Text') +class Classification(tf.keras.Model): + """Classification network head for BERT modeling. + + This network implements a simple classifier head based on a dense layer. If + num_classes is one, it can be considered as a regression problem. + + Arguments: + input_width: The innermost dimension of the input tensor to this network. + num_classes: The number of classes that this network should classify to. If + equal to 1, a regression problem is assumed. + activation: The activation, if any, for the dense layer in this network. + initializer: The intializer for the dense layer in this network. Defaults to + a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + input_width, + num_classes, + initializer='glorot_uniform', + output='logits', + **kwargs): + self._self_setattr_tracking = False + self._config_dict = { + 'input_width': input_width, + 'num_classes': num_classes, + 'initializer': initializer, + 'output': output, + } + + cls_output = tf.keras.layers.Input( + shape=(input_width,), name='cls_output', dtype=tf.float32) + + self.logits = tf.keras.layers.Dense( + num_classes, + activation=None, + kernel_initializer=initializer, + name='predictions/transform/logits')( + cls_output) + + policy = tf.keras.mixed_precision.experimental.global_policy() + if policy.name == 'mixed_bfloat16': + # b/158514794: bf16 is not stable with post-softmax cross-entropy. + policy = tf.float32 + predictions = tf.keras.layers.Activation(tf.nn.log_softmax, + dtype=policy)(self.logits) + + if output == 'logits': + output_tensors = self.logits + elif output == 'predictions': + output_tensors = predictions + else: + raise ValueError( + ('Unknown `output` value "%s". `output` can be either "logits" or ' + '"predictions"') % output) + + super(Classification, self).__init__( + inputs=[cls_output], outputs=output_tensors, **kwargs) + + def get_config(self): + return self._config_dict + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/networks/classification_test.py b/models/official/nlp/modeling/networks/classification_test.py new file mode 100644 index 0000000000000000000000000000000000000000..457c135be4bce0c11faef36f099515ba4b0e8c53 --- /dev/null +++ b/models/official/nlp/modeling/networks/classification_test.py @@ -0,0 +1,181 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for classification network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.networks import classification + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class ClassificationTest(keras_parameterized.TestCase): + + @parameterized.parameters(1, 10) + def test_network_creation(self, num_classes): + """Validate that the Keras object can be created.""" + input_width = 512 + test_object = classification.Classification( + input_width=input_width, num_classes=num_classes) + # Create a 2-dimensional input (the first dimension is implicit). + cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) + output = test_object(cls_data) + + # Validate that the outputs are of the expected shape. + expected_output_shape = [None, num_classes] + self.assertEqual(expected_output_shape, output.shape.as_list()) + + @parameterized.parameters(1, 10) + def test_network_invocation(self, num_classes): + """Validate that the Keras object can be invoked.""" + input_width = 512 + test_object = classification.Classification( + input_width=input_width, num_classes=num_classes, output='predictions') + # Create a 2-dimensional input (the first dimension is implicit). + cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) + output = test_object(cls_data) + + # Invoke the network as part of a Model. + model = tf.keras.Model(cls_data, output) + input_data = 10 * np.random.random_sample((3, input_width)) + _ = model.predict(input_data) + + def test_network_invocation_with_internal_logits(self): + """Validate that the logit outputs are correct.""" + input_width = 512 + num_classes = 10 + test_object = classification.Classification( + input_width=input_width, num_classes=num_classes, output='predictions') + + # Create a 2-dimensional input (the first dimension is implicit). + cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) + output = test_object(cls_data) + model = tf.keras.Model(cls_data, output) + logits_model = tf.keras.Model(test_object.inputs, test_object.logits) + + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + outputs = model.predict(input_data) + logits = logits_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, num_classes) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertEqual(expected_output_shape, logits.shape) + + # Ensure that the logits, when softmaxed, create the outputs. + input_tensor = tf.keras.Input(expected_output_shape[1:]) + output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) + softmax_model = tf.keras.Model(input_tensor, output_tensor) + + calculated_softmax = softmax_model.predict(logits) + self.assertAllClose(outputs, calculated_softmax) + + @parameterized.parameters(1, 10) + def test_network_invocation_with_internal_and_external_logits(self, + num_classes): + """Validate that the logit outputs are correct.""" + input_width = 512 + test_object = classification.Classification( + input_width=input_width, num_classes=num_classes, output='logits') + + # Create a 2-dimensional input (the first dimension is implicit). + cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) + output = test_object(cls_data) + model = tf.keras.Model(cls_data, output) + logits_model = tf.keras.Model(test_object.inputs, test_object.logits) + + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + outputs = model.predict(input_data) + logits = logits_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, num_classes) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertEqual(expected_output_shape, logits.shape) + + self.assertAllClose(outputs, logits) + + def test_network_invocation_with_logit_output(self): + """Validate that the logit outputs are correct.""" + input_width = 512 + num_classes = 10 + test_object = classification.Classification( + input_width=input_width, num_classes=num_classes, output='predictions') + logit_object = classification.Classification( + input_width=input_width, num_classes=num_classes, output='logits') + logit_object.set_weights(test_object.get_weights()) + + # Create a 2-dimensional input (the first dimension is implicit). + cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) + output = test_object(cls_data) + logit_output = logit_object(cls_data) + + model = tf.keras.Model(cls_data, output) + logits_model = tf.keras.Model(cls_data, logit_output) + + batch_size = 3 + input_data = 10 * np.random.random_sample((batch_size, input_width)) + outputs = model.predict(input_data) + logits = logits_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, num_classes) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertEqual(expected_output_shape, logits.shape) + + # Ensure that the logits, when softmaxed, create the outputs. + input_tensor = tf.keras.Input(expected_output_shape[1:]) + output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) + softmax_model = tf.keras.Model(input_tensor, output_tensor) + + calculated_softmax = softmax_model.predict(logits) + self.assertAllClose(outputs, calculated_softmax) + + def test_serialize_deserialize(self): + # Create a network object that sets all of its config options. + network = classification.Classification( + input_width=128, + num_classes=10, + initializer='zeros', + output='predictions') + + # Create another network object from the first object's config. + new_network = classification.Classification.from_config( + network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(network.get_config(), new_network.get_config()) + + def test_unknown_output_type_fails(self): + with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): + _ = classification.Classification( + input_width=128, num_classes=10, output='bad') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/networks/encoder_scaffold.py b/models/official/nlp/modeling/networks/encoder_scaffold.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9b2d102db9c3a49de509e9d9011bcf6a758e7f --- /dev/null +++ b/models/official/nlp/modeling/networks/encoder_scaffold.py @@ -0,0 +1,273 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transformer-based text encoder network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import inspect + +import gin +import tensorflow as tf + +from official.nlp.modeling import layers + + +@tf.keras.utils.register_keras_serializable(package='Text') +@gin.configurable +class EncoderScaffold(tf.keras.Model): + """Bi-directional Transformer-based encoder network scaffold. + + This network allows users to flexibly implement an encoder similar to the one + described in "BERT: Pre-training of Deep Bidirectional Transformers for + Language Understanding" (https://arxiv.org/abs/1810.04805). + + In this network, users can choose to provide a custom embedding subnetwork + (which will replace the standard embedding logic) and/or a custom hidden layer + class (which will replace the Transformer instantiation in the encoder). For + each of these custom injection points, users can pass either a class or a + class instance. If a class is passed, that class will be instantiated using + the 'embedding_cfg' or 'hidden_cfg' argument, respectively; if an instance + is passed, that instance will be invoked. (In the case of hidden_cls, the + instance will be invoked 'num_hidden_instances' times. + + If the hidden_cls is not overridden, a default transformer layer will be + instantiated. + + Arguments: + pooled_output_dim: The dimension of pooled output. + pooler_layer_initializer: The initializer for the classification + layer. + embedding_cls: The class or instance to use to embed the input data. This + class or instance defines the inputs to this encoder and outputs + (1) embeddings tensor with shape [batch_size, seq_length, hidden_size] and + (2) attention masking with tensor [batch_size, seq_length, seq_length]. + If embedding_cls is not set, a default embedding network + (from the original BERT paper) will be created. + embedding_cfg: A dict of kwargs to pass to the embedding_cls, if it needs to + be instantiated. If embedding_cls is not set, a config dict must be + passed to 'embedding_cfg' with the following values: + "vocab_size": The size of the token vocabulary. + "type_vocab_size": The size of the type vocabulary. + "hidden_size": The hidden size for this encoder. + "max_seq_length": The maximum sequence length for this encoder. + "seq_length": The sequence length for this encoder. + "initializer": The initializer for the embedding portion of this encoder. + "dropout_rate": The dropout rate to apply before the encoding layers. + embedding_data: A reference to the embedding weights that will be used to + train the masked language model, if necessary. This is optional, and only + needed if (1) you are overriding embedding_cls and (2) are doing standard + pretraining. + num_hidden_instances: The number of times to instantiate and/or invoke the + hidden_cls. + hidden_cls: The class or instance to encode the input data. If hidden_cls is + not set, a KerasBERT transformer layer will be used as the encoder class. + hidden_cfg: A dict of kwargs to pass to the hidden_cls, if it needs to be + instantiated. If hidden_cls is not set, a config dict must be passed to + 'hidden_cfg' with the following values: + "num_attention_heads": The number of attention heads. The hidden size + must be divisible by num_attention_heads. + "intermediate_size": The intermediate size of the transformer. + "intermediate_activation": The activation to apply in the transfomer. + "dropout_rate": The overall dropout rate for the transformer layers. + "attention_dropout_rate": The dropout rate for the attention layers. + "kernel_initializer": The initializer for the transformer layers. + return_all_layer_outputs: Whether to output sequence embedding outputs of + all encoder transformer layers. + """ + + def __init__( + self, + pooled_output_dim, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + embedding_cls=None, + embedding_cfg=None, + embedding_data=None, + num_hidden_instances=1, + hidden_cls=layers.Transformer, + hidden_cfg=None, + return_all_layer_outputs=False, + **kwargs): + self._self_setattr_tracking = False + self._hidden_cls = hidden_cls + self._hidden_cfg = hidden_cfg + self._num_hidden_instances = num_hidden_instances + self._pooled_output_dim = pooled_output_dim + self._pooler_layer_initializer = pooler_layer_initializer + self._embedding_cls = embedding_cls + self._embedding_cfg = embedding_cfg + self._embedding_data = embedding_data + self._return_all_layer_outputs = return_all_layer_outputs + self._kwargs = kwargs + + if embedding_cls: + if inspect.isclass(embedding_cls): + self._embedding_network = embedding_cls( + **embedding_cfg) if embedding_cfg else embedding_cls() + else: + self._embedding_network = embedding_cls + inputs = self._embedding_network.inputs + embeddings, attention_mask = self._embedding_network(inputs) + else: + self._embedding_network = None + word_ids = tf.keras.layers.Input( + shape=(embedding_cfg['seq_length'],), + dtype=tf.int32, + name='input_word_ids') + mask = tf.keras.layers.Input( + shape=(embedding_cfg['seq_length'],), + dtype=tf.int32, + name='input_mask') + type_ids = tf.keras.layers.Input( + shape=(embedding_cfg['seq_length'],), + dtype=tf.int32, + name='input_type_ids') + inputs = [word_ids, mask, type_ids] + + self._embedding_layer = layers.OnDeviceEmbedding( + vocab_size=embedding_cfg['vocab_size'], + embedding_width=embedding_cfg['hidden_size'], + initializer=embedding_cfg['initializer'], + name='word_embeddings') + + word_embeddings = self._embedding_layer(word_ids) + + # Always uses dynamic slicing for simplicity. + self._position_embedding_layer = layers.PositionEmbedding( + initializer=embedding_cfg['initializer'], + use_dynamic_slicing=True, + max_sequence_length=embedding_cfg['max_seq_length'], + name='position_embedding') + position_embeddings = self._position_embedding_layer(word_embeddings) + + type_embeddings = ( + layers.OnDeviceEmbedding( + vocab_size=embedding_cfg['type_vocab_size'], + embedding_width=embedding_cfg['hidden_size'], + initializer=embedding_cfg['initializer'], + use_one_hot=True, + name='type_embeddings')(type_ids)) + + embeddings = tf.keras.layers.Add()( + [word_embeddings, position_embeddings, type_embeddings]) + embeddings = ( + tf.keras.layers.LayerNormalization( + name='embeddings/layer_norm', + axis=-1, + epsilon=1e-12, + dtype=tf.float32)(embeddings)) + embeddings = ( + tf.keras.layers.Dropout( + rate=embedding_cfg['dropout_rate'])(embeddings)) + + attention_mask = layers.SelfAttentionMask()([embeddings, mask]) + + data = embeddings + + layer_output_data = [] + self._hidden_layers = [] + for _ in range(num_hidden_instances): + if inspect.isclass(hidden_cls): + layer = hidden_cls(**hidden_cfg) if hidden_cfg else hidden_cls() + else: + layer = hidden_cls + data = layer([data, attention_mask]) + layer_output_data.append(data) + self._hidden_layers.append(layer) + + first_token_tensor = ( + tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))( + layer_output_data[-1])) + self._pooler_layer = tf.keras.layers.Dense( + units=pooled_output_dim, + activation='tanh', + kernel_initializer=pooler_layer_initializer, + name='cls_transform') + cls_output = self._pooler_layer(first_token_tensor) + + if return_all_layer_outputs: + outputs = [layer_output_data, cls_output] + else: + outputs = [layer_output_data[-1], cls_output] + + super(EncoderScaffold, self).__init__( + inputs=inputs, outputs=outputs, **kwargs) + + def get_config(self): + config_dict = { + 'num_hidden_instances': + self._num_hidden_instances, + 'pooled_output_dim': + self._pooled_output_dim, + 'pooler_layer_initializer': + self._pooler_layer_initializer, + 'embedding_cls': + self._embedding_network, + 'embedding_cfg': + self._embedding_cfg, + 'hidden_cfg': + self._hidden_cfg, + 'return_all_layer_outputs': + self._return_all_layer_outputs, + } + if inspect.isclass(self._hidden_cls): + config_dict['hidden_cls_string'] = tf.keras.utils.get_registered_name( + self._hidden_cls) + else: + config_dict['hidden_cls'] = self._hidden_cls + + config_dict.update(self._kwargs) + return config_dict + + @classmethod + def from_config(cls, config, custom_objects=None): + if 'hidden_cls_string' in config: + config['hidden_cls'] = tf.keras.utils.get_registered_object( + config['hidden_cls_string'], custom_objects=custom_objects) + del config['hidden_cls_string'] + return cls(**config) + + def get_embedding_table(self): + if self._embedding_network is None: + # In this case, we don't have a custom embedding network and can return + # the standard embedding data. + return self._embedding_layer.embeddings + + if self._embedding_data is None: + raise RuntimeError(('The EncoderScaffold %s does not have a reference ' + 'to the embedding data. This is required when you ' + 'pass a custom embedding network to the scaffold. ' + 'It is also possible that you are trying to get ' + 'embedding data from an embedding scaffold with a ' + 'custom embedding network where the scaffold has ' + 'been serialized and deserialized. Unfortunately, ' + 'accessing custom embedding references after ' + 'serialization is not yet supported.') % self.name) + else: + return self._embedding_data + + @property + def hidden_layers(self): + """List of hidden layers in the encoder.""" + return self._hidden_layers + + @property + def pooler_layer(self): + """The pooler dense layer after the transformer layers.""" + return self._pooler_layer diff --git a/models/official/nlp/modeling/networks/encoder_scaffold_test.py b/models/official/nlp/modeling/networks/encoder_scaffold_test.py new file mode 100644 index 0000000000000000000000000000000000000000..664bccd08e11720918e0060458dc934350d2d594 --- /dev/null +++ b/models/official/nlp/modeling/networks/encoder_scaffold_test.py @@ -0,0 +1,646 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for transformer-based text encoder network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.modeling import activations +from official.nlp.modeling import layers +from official.nlp.modeling.networks import encoder_scaffold + + +# Test class that wraps a standard transformer layer. If this layer is called +# at any point, the list passed to the config object will be filled with a +# boolean 'True'. We register this class as a Keras serializable so we can +# test serialization below. +@tf.keras.utils.register_keras_serializable(package="TestOnly") +class ValidatedTransformerLayer(layers.Transformer): + + def __init__(self, call_list, **kwargs): + super(ValidatedTransformerLayer, self).__init__(**kwargs) + self.list = call_list + + def call(self, inputs): + self.list.append(True) + return super(ValidatedTransformerLayer, self).call(inputs) + + def get_config(self): + config = super(ValidatedTransformerLayer, self).get_config() + config["call_list"] = [] + return config + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class EncoderScaffoldLayerClassTest(keras_parameterized.TestCase): + + def tearDown(self): + super(EncoderScaffoldLayerClassTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy("float32") + + @parameterized.named_parameters( + dict(testcase_name="only_final_output", return_all_layer_outputs=False), + dict(testcase_name="all_layer_outputs", return_all_layer_outputs=True)) + def test_network_creation(self, return_all_layer_outputs): + hidden_size = 32 + sequence_length = 21 + num_hidden_instances = 3 + embedding_cfg = { + "vocab_size": 100, + "type_vocab_size": 16, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + + call_list = [] + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + "call_list": + call_list + } + # Create a small EncoderScaffold for testing. + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=num_hidden_instances, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cls=ValidatedTransformerLayer, + hidden_cfg=hidden_cfg, + embedding_cfg=embedding_cfg, + return_all_layer_outputs=return_all_layer_outputs) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + output_data, pooled = test_network([word_ids, mask, type_ids]) + + if return_all_layer_outputs: + self.assertIsInstance(output_data, list) + self.assertLen(output_data, num_hidden_instances) + data = output_data[-1] + else: + data = output_data + self.assertIsInstance(test_network.hidden_layers, list) + self.assertLen(test_network.hidden_layers, num_hidden_instances) + self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) + + expected_data_shape = [None, sequence_length, hidden_size] + expected_pooled_shape = [None, hidden_size] + self.assertAllEqual(expected_data_shape, data.shape.as_list()) + self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) + + # The default output dtype is float32. + self.assertAllEqual(tf.float32, data.dtype) + self.assertAllEqual(tf.float32, pooled.dtype) + + # If call_list[0] exists and is True, the passed layer class was + # instantiated from the given config properly. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_network_creation_with_float16_dtype(self): + tf.keras.mixed_precision.experimental.set_policy("mixed_float16") + hidden_size = 32 + sequence_length = 21 + embedding_cfg = { + "vocab_size": 100, + "type_vocab_size": 16, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + } + # Create a small EncoderScaffold for testing. + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cfg=hidden_cfg, + embedding_cfg=embedding_cfg) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + expected_data_shape = [None, sequence_length, hidden_size] + expected_pooled_shape = [None, hidden_size] + self.assertAllEqual(expected_data_shape, data.shape.as_list()) + self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) + + # If float_dtype is set to float16, the data output is float32 (from a layer + # norm) and pool output should be float16. + self.assertAllEqual(tf.float32, data.dtype) + self.assertAllEqual(tf.float16, pooled.dtype) + + def test_network_invocation(self): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + num_types = 7 + embedding_cfg = { + "vocab_size": vocab_size, + "type_vocab_size": num_types, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + } + # Create a small EncoderScaffold for testing. + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cfg=hidden_cfg, + embedding_cfg=embedding_cfg) + + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + # Create a model based off of this network: + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + + # Invoke the model. We can't validate the output data here (the model is too + # complex) but this will catch structural runtime errors. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + type_id_data = np.random.randint( + num_types, size=(batch_size, sequence_length)) + _ = model.predict([word_id_data, mask_data, type_id_data]) + + # Creates a EncoderScaffold with max_sequence_length != sequence_length + num_types = 7 + embedding_cfg = { + "vocab_size": vocab_size, + "type_vocab_size": num_types, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length * 2, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + } + # Create a small EncoderScaffold for testing. + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cfg=hidden_cfg, + embedding_cfg=embedding_cfg) + + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + _ = model.predict([word_id_data, mask_data, type_id_data]) + + def test_serialize_deserialize(self): + # Create a network object that sets all of its config options. + hidden_size = 32 + sequence_length = 21 + embedding_cfg = { + "vocab_size": 100, + "type_vocab_size": 16, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + } + # Create a small EncoderScaffold for testing. + network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cfg=hidden_cfg, + embedding_cfg=embedding_cfg) + + # Create another network object from the first object's config. + new_network = encoder_scaffold.EncoderScaffold.from_config( + network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(network.get_config(), new_network.get_config()) + + +@keras_parameterized.run_all_keras_modes +class EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase): + + def test_network_invocation(self): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + + # Build an embedding network to swap in for the default network. This one + # will have 2 inputs (mask and word_ids) instead of 3, and won't use + # positional embeddings. + + word_ids = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") + mask = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name="input_mask") + embedding_layer = layers.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=hidden_size, + initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), + name="word_embeddings") + word_embeddings = embedding_layer(word_ids) + attention_mask = layers.SelfAttentionMask()([word_embeddings, mask]) + network = tf.keras.Model([word_ids, mask], + [word_embeddings, attention_mask]) + + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + } + + # Create a small EncoderScaffold for testing. + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cfg=hidden_cfg, + embedding_cls=network, + embedding_data=embedding_layer.embeddings) + + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask]) + + # Create a model based off of this network: + model = tf.keras.Model([word_ids, mask], [data, pooled]) + + # Invoke the model. We can't validate the output data here (the model is too + # complex) but this will catch structural runtime errors. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + _ = model.predict([word_id_data, mask_data]) + + # Test that we can get the embedding data that we passed to the object. This + # is necessary to support standard language model training. + self.assertIs(embedding_layer.embeddings, + test_network.get_embedding_table()) + + def test_serialize_deserialize(self): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + + # Build an embedding network to swap in for the default network. This one + # will have 2 inputs (mask and word_ids) instead of 3, and won't use + # positional embeddings. + + word_ids = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") + mask = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name="input_mask") + embedding_layer = layers.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=hidden_size, + initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), + name="word_embeddings") + word_embeddings = embedding_layer(word_ids) + attention_mask = layers.SelfAttentionMask()([word_embeddings, mask]) + network = tf.keras.Model([word_ids, mask], + [word_embeddings, attention_mask]) + + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + } + + # Create a small EncoderScaffold for testing. + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cfg=hidden_cfg, + embedding_cls=network, + embedding_data=embedding_layer.embeddings) + + # Create another network object from the first object's config. + new_network = encoder_scaffold.EncoderScaffold.from_config( + test_network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(test_network.get_config(), new_network.get_config()) + + # Create a model based off of the old and new networks: + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + + data, pooled = new_network([word_ids, mask]) + new_model = tf.keras.Model([word_ids, mask], [data, pooled]) + + data, pooled = test_network([word_ids, mask]) + model = tf.keras.Model([word_ids, mask], [data, pooled]) + + # Copy the weights between models. + new_model.set_weights(model.get_weights()) + + # Invoke the models. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + data, cls = model.predict([word_id_data, mask_data]) + new_data, new_cls = new_model.predict([word_id_data, mask_data]) + + # The output should be equal. + self.assertAllEqual(data, new_data) + self.assertAllEqual(cls, new_cls) + + # We should not be able to get a reference to the embedding data. + with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"): + new_network.get_embedding_table() + + +@keras_parameterized.run_all_keras_modes +class EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase): + + def test_network_invocation(self): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + num_types = 7 + + embedding_cfg = { + "vocab_size": vocab_size, + "type_vocab_size": num_types, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + + call_list = [] + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + "call_list": + call_list + } + # Create a small EncoderScaffold for testing. This time, we pass an already- + # instantiated layer object. + + xformer = ValidatedTransformerLayer(**hidden_cfg) + + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cls=xformer, + embedding_cfg=embedding_cfg) + + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + # Create a model based off of this network: + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + + # Invoke the model. We can't validate the output data here (the model is too + # complex) but this will catch structural runtime errors. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + type_id_data = np.random.randint( + num_types, size=(batch_size, sequence_length)) + _ = model.predict([word_id_data, mask_data, type_id_data]) + + # If call_list[0] exists and is True, the passed layer class was + # called as part of the graph creation. + self.assertNotEmpty(call_list) + self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") + + def test_serialize_deserialize(self): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + num_types = 7 + + embedding_cfg = { + "vocab_size": vocab_size, + "type_vocab_size": num_types, + "hidden_size": hidden_size, + "seq_length": sequence_length, + "max_seq_length": sequence_length, + "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), + "dropout_rate": 0.1, + } + + call_list = [] + hidden_cfg = { + "num_attention_heads": + 2, + "intermediate_size": + 3072, + "intermediate_activation": + activations.gelu, + "dropout_rate": + 0.1, + "attention_dropout_rate": + 0.1, + "kernel_initializer": + tf.keras.initializers.TruncatedNormal(stddev=0.02), + "call_list": + call_list + } + # Create a small EncoderScaffold for testing. This time, we pass an already- + # instantiated layer object. + + xformer = ValidatedTransformerLayer(**hidden_cfg) + + test_network = encoder_scaffold.EncoderScaffold( + num_hidden_instances=3, + pooled_output_dim=hidden_size, + pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( + stddev=0.02), + hidden_cls=xformer, + embedding_cfg=embedding_cfg) + + # Create another network object from the first object's config. + new_network = encoder_scaffold.EncoderScaffold.from_config( + test_network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(test_network.get_config(), new_network.get_config()) + + # Create a model based off of the old and new networks: + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + + data, pooled = new_network([word_ids, mask, type_ids]) + new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + + data, pooled = test_network([word_ids, mask, type_ids]) + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + + # Copy the weights between models. + new_model.set_weights(model.get_weights()) + + # Invoke the models. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + type_id_data = np.random.randint( + num_types, size=(batch_size, sequence_length)) + data, cls = model.predict([word_id_data, mask_data, type_id_data]) + new_data, new_cls = new_model.predict( + [word_id_data, mask_data, type_id_data]) + + # The output should be equal. + self.assertAllEqual(data, new_data) + self.assertAllEqual(cls, new_cls) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/modeling/networks/span_labeling.py b/models/official/nlp/modeling/networks/span_labeling.py new file mode 100644 index 0000000000000000000000000000000000000000..2d704c33b6d62ae059d01b81bca146ca1c5adca4 --- /dev/null +++ b/models/official/nlp/modeling/networks/span_labeling.py @@ -0,0 +1,92 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Span labeling network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package='Text') +class SpanLabeling(tf.keras.Model): + """Span labeling network head for BERT modeling. + + This network implements a simple single-span labeler based on a dense layer. + + Arguments: + input_width: The innermost dimension of the input tensor to this network. + activation: The activation, if any, for the dense layer in this network. + initializer: The intializer for the dense layer in this network. Defaults to + a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + input_width, + activation=None, + initializer='glorot_uniform', + output='logits', + **kwargs): + self._self_setattr_tracking = False + self._config = { + 'input_width': input_width, + 'activation': activation, + 'initializer': initializer, + 'output': output, + } + + sequence_data = tf.keras.layers.Input( + shape=(None, input_width), name='sequence_data', dtype=tf.float32) + + intermediate_logits = tf.keras.layers.Dense( + 2, # This layer predicts start location and end location. + activation=activation, + kernel_initializer=initializer, + name='predictions/transform/logits')( + sequence_data) + self.start_logits, self.end_logits = ( + tf.keras.layers.Lambda(self._split_output_tensor)(intermediate_logits)) + + start_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)( + self.start_logits) + end_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)( + self.end_logits) + + if output == 'logits': + output_tensors = [self.start_logits, self.end_logits] + elif output == 'predictions': + output_tensors = [start_predictions, end_predictions] + else: + raise ValueError( + ('Unknown `output` value "%s". `output` can be either "logits" or ' + '"predictions"') % output) + + super(SpanLabeling, self).__init__( + inputs=[sequence_data], outputs=output_tensors, **kwargs) + + def _split_output_tensor(self, tensor): + transposed_tensor = tf.transpose(tensor, [2, 0, 1]) + return tf.unstack(transposed_tensor) + + def get_config(self): + return self._config + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/networks/span_labeling_test.py b/models/official/nlp/modeling/networks/span_labeling_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8533a77b7830c1abe921fa93cd4e0cd7e8229475 --- /dev/null +++ b/models/official/nlp/modeling/networks/span_labeling_test.py @@ -0,0 +1,174 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for span_labeling network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.networks import span_labeling + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class SpanLabelingTest(keras_parameterized.TestCase): + + def test_network_creation(self): + """Validate that the Keras object can be created.""" + sequence_length = 15 + input_width = 512 + test_network = span_labeling.SpanLabeling( + input_width=input_width, output='predictions') + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input( + shape=(sequence_length, input_width), dtype=tf.float32) + start_outputs, end_outputs = test_network(sequence_data) + + # Validate that the outputs are of the expected shape. + expected_output_shape = [None, sequence_length] + self.assertEqual(expected_output_shape, start_outputs.shape.as_list()) + self.assertEqual(expected_output_shape, end_outputs.shape.as_list()) + + def test_network_invocation(self): + """Validate that the Keras object can be invoked.""" + sequence_length = 15 + input_width = 512 + test_network = span_labeling.SpanLabeling(input_width=input_width) + + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input( + shape=(sequence_length, input_width), dtype=tf.float32) + outputs = test_network(sequence_data) + model = tf.keras.Model(sequence_data, outputs) + + # Invoke the network as part of a Model. + batch_size = 3 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, input_width)) + start_outputs, end_outputs = model.predict(input_data) + + # Validate that the outputs are of the expected shape. + expected_output_shape = (batch_size, sequence_length) + self.assertEqual(expected_output_shape, start_outputs.shape) + self.assertEqual(expected_output_shape, end_outputs.shape) + + def test_network_invocation_with_internal_logit_output(self): + """Validate that the logit outputs are correct.""" + sequence_length = 15 + input_width = 512 + test_network = span_labeling.SpanLabeling( + input_width=input_width, output='predictions') + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input( + shape=(sequence_length, input_width), dtype=tf.float32) + output = test_network(sequence_data) + model = tf.keras.Model(sequence_data, output) + logit_model = tf.keras.Model( + test_network.inputs, + [test_network.start_logits, test_network.end_logits]) + + batch_size = 3 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, input_width)) + start_outputs, end_outputs = model.predict(input_data) + start_logits, end_logits = logit_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, sequence_length) + self.assertEqual(expected_output_shape, start_outputs.shape) + self.assertEqual(expected_output_shape, end_outputs.shape) + self.assertEqual(expected_output_shape, start_logits.shape) + self.assertEqual(expected_output_shape, end_logits.shape) + + # Ensure that the logits, when softmaxed, create the outputs. + input_tensor = tf.keras.Input(expected_output_shape[1:]) + output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) + softmax_model = tf.keras.Model(input_tensor, output_tensor) + + start_softmax = softmax_model.predict(start_logits) + self.assertAllClose(start_outputs, start_softmax) + end_softmax = softmax_model.predict(end_logits) + self.assertAllClose(end_outputs, end_softmax) + + def test_network_invocation_with_external_logit_output(self): + """Validate that the logit outputs are correct.""" + sequence_length = 15 + input_width = 512 + test_network = span_labeling.SpanLabeling( + input_width=input_width, output='predictions') + logit_network = span_labeling.SpanLabeling( + input_width=input_width, output='logits') + logit_network.set_weights(test_network.get_weights()) + + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input( + shape=(sequence_length, input_width), dtype=tf.float32) + output = test_network(sequence_data) + logit_output = logit_network(sequence_data) + model = tf.keras.Model(sequence_data, output) + logit_model = tf.keras.Model(sequence_data, logit_output) + + batch_size = 3 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, input_width)) + start_outputs, end_outputs = model.predict(input_data) + start_logits, end_logits = logit_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, sequence_length) + self.assertEqual(expected_output_shape, start_outputs.shape) + self.assertEqual(expected_output_shape, end_outputs.shape) + self.assertEqual(expected_output_shape, start_logits.shape) + self.assertEqual(expected_output_shape, end_logits.shape) + + # Ensure that the logits, when softmaxed, create the outputs. + input_tensor = tf.keras.Input(expected_output_shape[1:]) + output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) + softmax_model = tf.keras.Model(input_tensor, output_tensor) + + start_softmax = softmax_model.predict(start_logits) + self.assertAllClose(start_outputs, start_softmax) + end_softmax = softmax_model.predict(end_logits) + self.assertAllClose(end_outputs, end_softmax) + + def test_serialize_deserialize(self): + # Create a network object that sets all of its config options. + network = span_labeling.SpanLabeling( + input_width=128, + activation='relu', + initializer='zeros', + output='predictions') + + # Create another network object from the first object's config. + new_network = span_labeling.SpanLabeling.from_config(network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(network.get_config(), new_network.get_config()) + + def test_unknown_output_type_fails(self): + with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): + _ = span_labeling.SpanLabeling(input_width=10, output='bad') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/networks/token_classification.py b/models/official/nlp/modeling/networks/token_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..ff6163481e6f267a5aefac352ff38447a275a13a --- /dev/null +++ b/models/official/nlp/modeling/networks/token_classification.py @@ -0,0 +1,83 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Classification network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + + +@tf.keras.utils.register_keras_serializable(package='Text') +class TokenClassification(tf.keras.Model): + """TokenClassification network head for BERT modeling. + + This network implements a simple token classifier head based on a dense layer. + + Arguments: + input_width: The innermost dimension of the input tensor to this network. + num_classes: The number of classes that this network should classify to. + activation: The activation, if any, for the dense layer in this network. + initializer: The intializer for the dense layer in this network. Defaults to + a Glorot uniform initializer. + output: The output style for this network. Can be either 'logits' or + 'predictions'. + """ + + def __init__(self, + input_width, + num_classes, + initializer='glorot_uniform', + output='logits', + **kwargs): + self._self_setattr_tracking = False + self._config_dict = { + 'input_width': input_width, + 'num_classes': num_classes, + 'initializer': initializer, + 'output': output, + } + + sequence_data = tf.keras.layers.Input( + shape=(None, input_width), name='sequence_data', dtype=tf.float32) + + self.logits = tf.keras.layers.Dense( + num_classes, + activation=None, + kernel_initializer=initializer, + name='predictions/transform/logits')( + sequence_data) + predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(self.logits) + + if output == 'logits': + output_tensors = self.logits + elif output == 'predictions': + output_tensors = predictions + else: + raise ValueError( + ('Unknown `output` value "%s". `output` can be either "logits" or ' + '"predictions"') % output) + + super(TokenClassification, self).__init__( + inputs=[sequence_data], outputs=output_tensors, **kwargs) + + def get_config(self): + return self._config_dict + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/networks/token_classification_test.py b/models/official/nlp/modeling/networks/token_classification_test.py new file mode 100644 index 0000000000000000000000000000000000000000..eb695c7845b125a5f34d82ff38218ca2dccdfe54 --- /dev/null +++ b/models/official/nlp/modeling/networks/token_classification_test.py @@ -0,0 +1,192 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for token classification network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.networks import token_classification + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class TokenClassificationTest(keras_parameterized.TestCase): + + def test_network_creation(self): + """Validate that the Keras object can be created.""" + sequence_length = 5 + input_width = 512 + num_classes = 10 + test_object = token_classification.TokenClassification( + input_width=input_width, num_classes=num_classes) + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input(shape=(sequence_length, input_width), + dtype=tf.float32) + output = test_object(sequence_data) + + # Validate that the outputs are of the expected shape. + expected_output_shape = [None, sequence_length, num_classes] + self.assertEqual(expected_output_shape, output.shape.as_list()) + + def test_network_invocation(self): + """Validate that the Keras object can be invoked.""" + sequence_length = 5 + input_width = 512 + num_classes = 10 + test_object = token_classification.TokenClassification( + input_width=input_width, num_classes=num_classes, output='predictions') + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input(shape=(sequence_length, input_width), + dtype=tf.float32) + output = test_object(sequence_data) + + # Invoke the network as part of a Model. + model = tf.keras.Model(sequence_data, output) + input_data = 10 * np.random.random_sample((3, sequence_length, input_width)) + _ = model.predict(input_data) + + def test_network_invocation_with_internal_logits(self): + """Validate that the logit outputs are correct.""" + sequence_length = 5 + input_width = 512 + num_classes = 10 + test_object = token_classification.TokenClassification( + input_width=input_width, num_classes=num_classes, output='predictions') + + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input(shape=(sequence_length, input_width), + dtype=tf.float32) + output = test_object(sequence_data) + model = tf.keras.Model(sequence_data, output) + logits_model = tf.keras.Model(test_object.inputs, test_object.logits) + + batch_size = 3 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, input_width)) + outputs = model.predict(input_data) + logits = logits_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, sequence_length, num_classes) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertEqual(expected_output_shape, logits.shape) + + # Ensure that the logits, when softmaxed, create the outputs. + input_tensor = tf.keras.Input(expected_output_shape[1:]) + output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) + softmax_model = tf.keras.Model(input_tensor, output_tensor) + + calculated_softmax = softmax_model.predict(logits) + self.assertAllClose(outputs, calculated_softmax) + + def test_network_invocation_with_internal_and_external_logits(self): + """Validate that the logit outputs are correct.""" + sequence_length = 5 + input_width = 512 + num_classes = 10 + test_object = token_classification.TokenClassification( + input_width=input_width, num_classes=num_classes, output='logits') + + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input(shape=(sequence_length, input_width), + dtype=tf.float32) + output = test_object(sequence_data) + model = tf.keras.Model(sequence_data, output) + logits_model = tf.keras.Model(test_object.inputs, test_object.logits) + + batch_size = 3 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, input_width)) + outputs = model.predict(input_data) + logits = logits_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, sequence_length, num_classes) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertEqual(expected_output_shape, logits.shape) + + self.assertAllClose(outputs, logits) + + def test_network_invocation_with_logit_output(self): + """Validate that the logit outputs are correct.""" + sequence_length = 5 + input_width = 512 + num_classes = 10 + test_object = token_classification.TokenClassification( + input_width=input_width, num_classes=num_classes, output='predictions') + logit_object = token_classification.TokenClassification( + input_width=input_width, num_classes=num_classes, output='logits') + logit_object.set_weights(test_object.get_weights()) + + # Create a 3-dimensional input (the first dimension is implicit). + sequence_data = tf.keras.Input(shape=(sequence_length, input_width), + dtype=tf.float32) + output = test_object(sequence_data) + logit_output = logit_object(sequence_data) + + model = tf.keras.Model(sequence_data, output) + logits_model = tf.keras.Model(sequence_data, logit_output) + + batch_size = 3 + input_data = 10 * np.random.random_sample( + (batch_size, sequence_length, input_width)) + outputs = model.predict(input_data) + logits = logits_model.predict(input_data) + + # Ensure that the tensor shapes are correct. + expected_output_shape = (batch_size, sequence_length, num_classes) + self.assertEqual(expected_output_shape, outputs.shape) + self.assertEqual(expected_output_shape, logits.shape) + + # Ensure that the logits, when softmaxed, create the outputs. + input_tensor = tf.keras.Input(expected_output_shape[1:]) + output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) + softmax_model = tf.keras.Model(input_tensor, output_tensor) + + calculated_softmax = softmax_model.predict(logits) + self.assertAllClose(outputs, calculated_softmax) + + def test_serialize_deserialize(self): + # Create a network object that sets all of its config options. + network = token_classification.TokenClassification( + input_width=128, + num_classes=10, + initializer='zeros', + output='predictions') + + # Create another network object from the first object's config. + new_network = token_classification.TokenClassification.from_config( + network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(network.get_config(), new_network.get_config()) + + def test_unknown_output_type_fails(self): + with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): + _ = token_classification.TokenClassification( + input_width=128, num_classes=10, output='bad') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/modeling/networks/transformer_encoder.py b/models/official/nlp/modeling/networks/transformer_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..7c6054ddcc242d5184c6e0e4dcd5102e6955b915 --- /dev/null +++ b/models/official/nlp/modeling/networks/transformer_encoder.py @@ -0,0 +1,238 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transformer-based text encoder network.""" +# pylint: disable=g-classes-have-attributes +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.modeling import activations +from official.nlp.modeling import layers + + +@tf.keras.utils.register_keras_serializable(package='Text') +class TransformerEncoder(tf.keras.Model): + """Bi-directional Transformer-based encoder network. + + This network implements a bi-directional Transformer-based encoder as + described in "BERT: Pre-training of Deep Bidirectional Transformers for + Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the + embedding lookups and transformer layers, but not the masked language model + or classification task networks. + + The default values for this object are taken from the BERT-Base implementation + in "BERT: Pre-training of Deep Bidirectional Transformers for Language + Understanding". + + Arguments: + vocab_size: The size of the token vocabulary. + hidden_size: The size of the transformer hidden layers. + num_layers: The number of transformer layers. + num_attention_heads: The number of attention heads for each transformer. The + hidden size must be divisible by the number of attention heads. + sequence_length: The sequence length that this encoder expects. If None, the + sequence length is dynamic; if an integer, the encoder will require + sequences padded to this length. + max_sequence_length: The maximum sequence length that this encoder can + consume. If None, max_sequence_length uses the value from sequence length. + This determines the variable shape for positional embeddings. + type_vocab_size: The number of types that the 'type_ids' input can take. + intermediate_size: The intermediate size for the transformer layers. + activation: The activation to use for the transformer layers. + dropout_rate: The dropout rate to use for the transformer layers. + attention_dropout_rate: The dropout rate to use for the attention layers + within the transformer layers. + initializer: The initialzer to use for all weights in this encoder. + return_all_encoder_outputs: Whether to output sequence embedding outputs of + all encoder transformer layers. + output_range: The sequence output range, [0, output_range), by slicing the + target sequence of the last transformer layer. `None` means the entire + target sequence will attend to the source sequence, which yeilds the full + output. + embedding_width: The width of the word embeddings. If the embedding width + is not equal to hidden size, embedding parameters will be factorized into + two matrices in the shape of ['vocab_size', 'embedding_width'] and + ['embedding_width', 'hidden_size'] ('embedding_width' is usually much + smaller than 'hidden_size'). + embedding_layer: The word embedding layer. `None` means we will create a new + embedding layer. Otherwise, we will reuse the given embedding layer. This + parameter is originally added for ELECTRA model which needs to tie the + generator embeddings with the discriminator embeddings. + """ + + def __init__(self, + vocab_size, + hidden_size=768, + num_layers=12, + num_attention_heads=12, + sequence_length=512, + max_sequence_length=None, + type_vocab_size=16, + intermediate_size=3072, + activation=activations.gelu, + dropout_rate=0.1, + attention_dropout_rate=0.1, + initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), + return_all_encoder_outputs=False, + output_range=None, + embedding_width=None, + embedding_layer=None, + **kwargs): + activation = tf.keras.activations.get(activation) + initializer = tf.keras.initializers.get(initializer) + + if not max_sequence_length: + max_sequence_length = sequence_length + self._self_setattr_tracking = False + self._config_dict = { + 'vocab_size': vocab_size, + 'hidden_size': hidden_size, + 'num_layers': num_layers, + 'num_attention_heads': num_attention_heads, + 'sequence_length': sequence_length, + 'max_sequence_length': max_sequence_length, + 'type_vocab_size': type_vocab_size, + 'intermediate_size': intermediate_size, + 'activation': tf.keras.activations.serialize(activation), + 'dropout_rate': dropout_rate, + 'attention_dropout_rate': attention_dropout_rate, + 'initializer': tf.keras.initializers.serialize(initializer), + 'return_all_encoder_outputs': return_all_encoder_outputs, + 'output_range': output_range, + 'embedding_width': embedding_width, + } + + word_ids = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') + mask = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name='input_mask') + type_ids = tf.keras.layers.Input( + shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') + + if embedding_width is None: + embedding_width = hidden_size + if embedding_layer is None: + self._embedding_layer = layers.OnDeviceEmbedding( + vocab_size=vocab_size, + embedding_width=embedding_width, + initializer=initializer, + name='word_embeddings') + else: + self._embedding_layer = embedding_layer + word_embeddings = self._embedding_layer(word_ids) + + # Always uses dynamic slicing for simplicity. + self._position_embedding_layer = layers.PositionEmbedding( + initializer=initializer, + use_dynamic_slicing=True, + max_sequence_length=max_sequence_length, + name='position_embedding') + position_embeddings = self._position_embedding_layer(word_embeddings) + self._type_embedding_layer = layers.OnDeviceEmbedding( + vocab_size=type_vocab_size, + embedding_width=embedding_width, + initializer=initializer, + use_one_hot=True, + name='type_embeddings') + type_embeddings = self._type_embedding_layer(type_ids) + + embeddings = tf.keras.layers.Add()( + [word_embeddings, position_embeddings, type_embeddings]) + + embeddings = ( + tf.keras.layers.LayerNormalization( + name='embeddings/layer_norm', + axis=-1, + epsilon=1e-12, + dtype=tf.float32)(embeddings)) + embeddings = ( + tf.keras.layers.Dropout(rate=dropout_rate)(embeddings)) + + # We project the 'embedding' output to 'hidden_size' if it is not already + # 'hidden_size'. + if embedding_width != hidden_size: + self._embedding_projection = tf.keras.layers.experimental.EinsumDense( + '...x,xy->...y', + output_shape=hidden_size, + bias_axes='y', + kernel_initializer=initializer, + name='embedding_projection') + embeddings = self._embedding_projection(embeddings) + + self._transformer_layers = [] + data = embeddings + attention_mask = layers.SelfAttentionMask()([data, mask]) + encoder_outputs = [] + for i in range(num_layers): + if i == num_layers - 1 and output_range is not None: + transformer_output_range = output_range + else: + transformer_output_range = None + layer = layers.Transformer( + num_attention_heads=num_attention_heads, + intermediate_size=intermediate_size, + intermediate_activation=activation, + dropout_rate=dropout_rate, + attention_dropout_rate=attention_dropout_rate, + output_range=transformer_output_range, + kernel_initializer=initializer, + name='transformer/layer_%d' % i) + self._transformer_layers.append(layer) + data = layer([data, attention_mask]) + encoder_outputs.append(data) + + first_token_tensor = ( + tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))( + encoder_outputs[-1])) + self._pooler_layer = tf.keras.layers.Dense( + units=hidden_size, + activation='tanh', + kernel_initializer=initializer, + name='pooler_transform') + cls_output = self._pooler_layer(first_token_tensor) + + if return_all_encoder_outputs: + outputs = [encoder_outputs, cls_output] + else: + outputs = [encoder_outputs[-1], cls_output] + + super(TransformerEncoder, self).__init__( + inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs) + + def get_embedding_table(self): + return self._embedding_layer.embeddings + + def get_embedding_layer(self): + return self._embedding_layer + + def get_config(self): + return self._config_dict + + @property + def transformer_layers(self): + """List of Transformer layers in the encoder.""" + return self._transformer_layers + + @property + def pooler_layer(self): + """The pooler dense layer after the transformer layers.""" + return self._pooler_layer + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) diff --git a/models/official/nlp/modeling/networks/transformer_encoder_test.py b/models/official/nlp/modeling/networks/transformer_encoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e9fbc3aaa25e39908618626538902643edaabe72 --- /dev/null +++ b/models/official/nlp/modeling/networks/transformer_encoder_test.py @@ -0,0 +1,231 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for transformer-based text encoder network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import +from official.nlp.modeling.networks import transformer_encoder + + +# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It +# guarantees forward compatibility of this code for the V2 switchover. +@keras_parameterized.run_all_keras_modes +class TransformerEncoderTest(keras_parameterized.TestCase): + + def tearDown(self): + super(TransformerEncoderTest, self).tearDown() + tf.keras.mixed_precision.experimental.set_policy("float32") + + def test_network_creation(self): + hidden_size = 32 + sequence_length = 21 + # Create a small TransformerEncoder for testing. + test_network = transformer_encoder.TransformerEncoder( + vocab_size=100, + hidden_size=hidden_size, + sequence_length=sequence_length, + num_attention_heads=2, + num_layers=3) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + self.assertIsInstance(test_network.transformer_layers, list) + self.assertLen(test_network.transformer_layers, 3) + self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) + + expected_data_shape = [None, sequence_length, hidden_size] + expected_pooled_shape = [None, hidden_size] + self.assertAllEqual(expected_data_shape, data.shape.as_list()) + self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) + + # The default output dtype is float32. + self.assertAllEqual(tf.float32, data.dtype) + self.assertAllEqual(tf.float32, pooled.dtype) + + def test_all_encoder_outputs_network_creation(self): + hidden_size = 32 + sequence_length = 21 + # Create a small TransformerEncoder for testing. + test_network = transformer_encoder.TransformerEncoder( + vocab_size=100, + hidden_size=hidden_size, + sequence_length=sequence_length, + num_attention_heads=2, + num_layers=3, + return_all_encoder_outputs=True) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + all_encoder_outputs, pooled = test_network([word_ids, mask, type_ids]) + + expected_data_shape = [None, sequence_length, hidden_size] + expected_pooled_shape = [None, hidden_size] + self.assertLen(all_encoder_outputs, 3) + for data in all_encoder_outputs: + self.assertAllEqual(expected_data_shape, data.shape.as_list()) + self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) + + # The default output dtype is float32. + self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) + self.assertAllEqual(tf.float32, pooled.dtype) + + def test_network_creation_with_float16_dtype(self): + hidden_size = 32 + sequence_length = 21 + tf.keras.mixed_precision.experimental.set_policy("mixed_float16") + # Create a small TransformerEncoder for testing. + test_network = transformer_encoder.TransformerEncoder( + vocab_size=100, + hidden_size=hidden_size, + sequence_length=sequence_length, + num_attention_heads=2, + num_layers=3) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + expected_data_shape = [None, sequence_length, hidden_size] + expected_pooled_shape = [None, hidden_size] + self.assertAllEqual(expected_data_shape, data.shape.as_list()) + self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) + + # If float_dtype is set to float16, the data output is float32 (from a layer + # norm) and pool output should be float16. + self.assertAllEqual(tf.float32, data.dtype) + self.assertAllEqual(tf.float16, pooled.dtype) + + @parameterized.named_parameters( + ("all_sequence", None, 21), + ("output_range", 1, 1), + ) + def test_network_invocation(self, output_range, out_seq_len): + hidden_size = 32 + sequence_length = 21 + vocab_size = 57 + num_types = 7 + # Create a small TransformerEncoder for testing. + test_network = transformer_encoder.TransformerEncoder( + vocab_size=vocab_size, + hidden_size=hidden_size, + sequence_length=sequence_length, + num_attention_heads=2, + num_layers=3, + type_vocab_size=num_types, + output_range=output_range) + self.assertTrue( + test_network._position_embedding_layer._use_dynamic_slicing) + # Create the inputs (note that the first dimension is implicit). + word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) + data, pooled = test_network([word_ids, mask, type_ids]) + + # Create a model based off of this network: + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + + # Invoke the model. We can't validate the output data here (the model is too + # complex) but this will catch structural runtime errors. + batch_size = 3 + word_id_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + mask_data = np.random.randint(2, size=(batch_size, sequence_length)) + type_id_data = np.random.randint( + num_types, size=(batch_size, sequence_length)) + _ = model.predict([word_id_data, mask_data, type_id_data]) + + # Creates a TransformerEncoder with max_sequence_length != sequence_length + max_sequence_length = 128 + test_network = transformer_encoder.TransformerEncoder( + vocab_size=vocab_size, + hidden_size=hidden_size, + sequence_length=sequence_length, + max_sequence_length=max_sequence_length, + num_attention_heads=2, + num_layers=3, + type_vocab_size=num_types) + self.assertTrue(test_network._position_embedding_layer._use_dynamic_slicing) + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + outputs = model.predict([word_id_data, mask_data, type_id_data]) + self.assertEqual(outputs[0].shape[1], out_seq_len) + + # Creates a TransformerEncoder with embedding_width != hidden_size + test_network = transformer_encoder.TransformerEncoder( + vocab_size=vocab_size, + hidden_size=hidden_size, + sequence_length=sequence_length, + max_sequence_length=max_sequence_length, + num_attention_heads=2, + num_layers=3, + type_vocab_size=num_types, + embedding_width=16) + model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) + outputs = model.predict([word_id_data, mask_data, type_id_data]) + self.assertEqual(outputs[0].shape[-1], hidden_size) + self.assertTrue(hasattr(test_network, "_embedding_projection")) + + def test_serialize_deserialize(self): + tf.keras.mixed_precision.experimental.set_policy("mixed_float16") + # Create a network object that sets all of its config options. + kwargs = dict( + vocab_size=100, + hidden_size=32, + num_layers=3, + num_attention_heads=2, + sequence_length=21, + max_sequence_length=21, + type_vocab_size=12, + intermediate_size=1223, + activation="relu", + dropout_rate=0.05, + attention_dropout_rate=0.22, + initializer="glorot_uniform", + return_all_encoder_outputs=False, + output_range=-1, + embedding_width=16) + network = transformer_encoder.TransformerEncoder(**kwargs) + + expected_config = dict(kwargs) + expected_config["activation"] = tf.keras.activations.serialize( + tf.keras.activations.get(expected_config["activation"])) + expected_config["initializer"] = tf.keras.initializers.serialize( + tf.keras.initializers.get(expected_config["initializer"])) + self.assertEqual(network.get_config(), expected_config) + + # Create another network object from the first object's config. + new_network = transformer_encoder.TransformerEncoder.from_config( + network.get_config()) + + # Validate that the config can be forced to JSON. + _ = new_network.to_json() + + # If the serialization was successful, the new config should match the old. + self.assertAllEqual(network.get_config(), new_network.get_config()) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/nhnet/README.md b/models/official/nlp/nhnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..14c55636ab52b4582cb6b12e88a282c7adbb059e --- /dev/null +++ b/models/official/nlp/nhnet/README.md @@ -0,0 +1,168 @@ +# Multi-doc News Headline Generation Model: NHNet + +This repository contains TensorFlow 2.x implementation for NHNet [[1]](#1) as +well as instructions for producing the data we described in the paper. + +## Introduction + +NHNet is a multi-doc news headline generation model. It extends a standard +Transformer-based encoder-decoder model to multi-doc setting and relies on an +article-level attention layer to capture information common to most (if not all) +input news articles in a news cluster or story, and provide robustness against +potential outliers in the input due to clustering quality. + +Our academic paper [[1]](#1) which describes NHNet in detail can be found here: +https://arxiv.org/abs/2001.09386. + +## Dataset + +**Raw Data:** One can [download](https://github.com/google-research-datasets/NewSHead) +our multi-doc headline dataset which +contains 369,940 news stories and 932,571 unique URLs. We split these stories +into train (359,940 stories), validation (5,000 stories) and test set (5,000 +stories) by timestamp. + +More information, please checkout: +https://github.com/google-research-datasets/NewSHead + +### Crawling + +Unfortunately, we will not be able to release the pre-processed dataset that is +exactly used in the paper. Users need to crawl the URLs and the recommended +pre-processing is using an open-sourced library to download and parse the news +content including title and leading paragraphs. For ease of this process, we +provide a config of [news-please](https://github.com/fhamborg/news-please) that +will crawl and extract news articles on a local machine. + +First, install the `news-please` CLI (requires python 3.x) +```shell +$ pip3 install news-please +``` + +Next, run the crawler with our provided [config and URL list](https://github.com/google-research-datasets/NewSHead/releases) + +```shell +# Sets to path of the downloaded data folder. +$ DATA_FOLDER=/path/to/downloaded_dataset + +# Uses CLI interface to crawl. We assume news_please subfolder contains the +# decompressed config.cfg and sitelist.hjson. +$ news-please -c $DATA_FOLDER/news_please +``` +By default, it will store crawled +articles under `/tmp/nhnet/`. To terminate the process press `CTRL+C`. + +The crawling may take some days (48 hours in our test) and it depends on the +network environment and #threads set in the config. As the crawling tool won't +stop automatically, it is not straightforward to check the progress. We suggest +to terminate the job if there are no new articles crawled in a short time period +(e.g., 10 minutes) by running +```shell +$ find /tmp/nhnet -type f | wc -l +``` +Please note that it is expected that some URLs are no longer available on the +web as time goes by. + +### Data Processing + +Given the crawled articles under `/tmp/nhnet/`, we would like to transform these +textual articles into a set of `TFRecord` files containing serialized +tensorflow.Example protocol buffers, with feature keys following the BERT +[[2]](#2) tradition but is extended for multiple text segments. We will later +use these processed TFRecords for training and evaluation. + +To do this, please first download a [BERT pretrained checkpoint](https://github.com/tensorflow/models/tree/master/official/nlp/bert#access-to-pretrained-checkpoints) +(`BERT-Base,Uncased` preferred for efficiency) and decompress the `tar.gz` file. +We need the vocabulary file and later use the checkpoint for NHNet +initialization. + +Next, we can run the following data preprocess script which may take a few hours + to read files and tokenize article content. + + +```shell +# Recall that we use DATA_FOLDER=/path/to/downloaded_dataset. +$ python3 raw_data_preprocess.py \ + -crawled_articles=/tmp/nhnet \ + -vocab=/path/to/bert_checkpoint/vocab.txt \ + -do_lower_case=True \ + -len_title=15 \ + -len_passage=200 \ + -max_num_articles=5 \ + -data_folder=$DATA_FOLDER +``` + +This python script will export processed train/valid/eval files under +`$DATA_FOLDER/processed/`. + +## Training + +Please first install TensorFlow 2 and Tensorflow Model Garden following the +[requirments section](https://github.com/tensorflow/models/tree/master/official#requirements). + +### CPU/GPU +```shell +$ python3 trainer.py \ + --mode=train_and_eval \ + --vocab=/path/to/bert_checkpoint/vocab.txt \ + --init_checkpoint=/path/to/bert_checkpoint/bert_model.ckpt \ + --params_override='init_from_bert2bert=false' \ + --train_file_pattern=$DATA_FOLDER/processed/train.tfrecord* \ + --model_dir=/path/to/output/model \ + --len_title=15 \ + --len_passage=200 \ + --max_num_articles=5 \ + --model_type=nhnet \ + --train_batch_size=16 \ + --train_steps=10000 \ + --steps_per_loop=1 \ + --checkpoint_interval=100 +``` + +### TPU +```shell +$ python3 trainer.py \ + --mode=train_and_eval \ + --vocab=/path/to/bert_checkpoint/vocab.txt \ + --init_checkpoint=/path/to/bert_checkpoint/bert_model.ckpt \ + --params_override='init_from_bert2bert=false' \ + --train_file_pattern=$DATA_FOLDER/processed/train.tfrecord* \ + --model_dir=/path/to/output/model \ + --len_title=15 \ + --len_passage=200 \ + --max_num_articles=5 \ + --model_type=nhnet \ + --train_batch_size=1024 \ + --train_steps=10000 \ + --steps_per_loop=1000 \ + --checkpoint_interval=1000 \ + --distribution_strategy=tpu \ + --tpu=grpc://${TPU_IP_ADDRESS}:8470 +``` +In the paper, we train more than 10k steps with batch size set as 1024 with +TPU-v3-64. + +Note that, `trainer.py` also supports `train` mode and continuous `eval` mode. +For large scale TPU training, we recommend the have a process running the +`train` mode and another process running the continuous `eval` mode which can +runs on GPUs. +This is the setting we commonly used for large-scale experiments, because `eval` +will be non-blocking to the expensive training load. + +### Metrics +**Note: the metrics reported by `evaluation.py` are approximated on +word-piece level rather than the real string tokens. Some metrics like BLEU +scores can be off.** + +We will release a colab to evaluate results on string-level soon. + +## References + +[1] Xiaotao Gu, Yuning Mao, Jiawei Han, Jialu Liu, You Wu, Cong +Yu, Daniel Finnie, Hongkun Yu, Jiaqi Zhai and Nicholas Zukoski "Generating +Representative Headlines for News Stories": https://arxiv.org/abs/2001.09386. +World Wide Web Conf. (WWW’2020). + +[2] Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina +Toutanova "BERT: Pre-training of Deep Bidirectional Transformers for Language +Understanding": https://arxiv.org/abs/1810.04805. diff --git a/models/official/nlp/nhnet/__init__.py b/models/official/nlp/nhnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/nhnet/configs.py b/models/official/nlp/nhnet/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..41cfa6117cb49e00224becb87b129401562a9807 --- /dev/null +++ b/models/official/nlp/nhnet/configs.py @@ -0,0 +1,107 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common NHNet/Bert2Bert configuration.""" + +from typing import List, Text + +import dataclasses + +from official.modeling.hyperparams import base_config + + +@dataclasses.dataclass +class BERT2BERTConfig(base_config.Config): + """High-level configurations for BERT2BERT model. + + These include parameters that are not directly related to the experiment, + e.g. encoder, decoder, prediction, training, etc. + """ + vocab_size: int = 30522 + hidden_size: int = 768 + num_hidden_layers: int = 12 + num_attention_heads: int = 12 + intermediate_size: int = 3072 + hidden_act: str = "gelu" + hidden_dropout_prob: float = 0.1 + attention_probs_dropout_prob: float = 0.1 + max_position_embeddings: int = 512 + type_vocab_size: int = 2 + initializer_range: float = 0.02 + decoder_intermediate_size: int = 3072 + num_decoder_attn_heads: int = 12 + num_decoder_layers: int = 12 + + label_smoothing: float = 0.1 + learning_rate: float = 0.05 + learning_rate_warmup_steps: int = 20000 + optimizer: str = "Adam" + adam_beta1: float = 0.9 + adam_beta2: float = 0.997 + adam_epsilon: float = 1e-09 + + # predict params + beam_size: int = 5 + alpha: float = 0.6 + initializer_gain: float = 1.0 + use_cache: bool = True + + # input params + input_sharding: bool = False + input_data_not_padded: bool = False + pad_token_id: int = 0 + end_token_id: int = 102 + start_token_id: int = 101 + + +@dataclasses.dataclass +class NHNetConfig(BERT2BERTConfig): + """High-level configurations for NHNet model. + + These include parameters that are not directly related to the experiment, + e.g. encoder, decoder, prediction, training, etc. + """ + multi_channel_cross_attention: bool = True + passage_list: List[Text] = dataclasses.field( + default_factory=lambda: [chr(ord("b") + i) for i in range(5)]) + + # Initialization method. + # If init_from_bert2bert is false, we assume the checkpoint is from BERT + # pretraining and only encoder and self-attention variables are initialized. + init_from_bert2bert: bool = True + + +UNITTEST_CONFIG = { + "attention_probs_dropout_prob": 0.0, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.0, + "hidden_size": 16, + "initializer_range": 0.02, + "intermediate_size": 32, + "max_position_embeddings": 128, + "num_attention_heads": 2, + "num_hidden_layers": 1, + "type_vocab_size": 2, + "vocab_size": 30522, + "initializer_gain": 1.0, + "decoder_intermediate_size": 32, + "num_decoder_attn_heads": 2, + "num_decoder_layers": 1, + "use_cache": True, + "input_data_not_padded": False, + "pad_token_id": 0, + "end_token_id": 102, + "start_token_id": 101, +} diff --git a/models/official/nlp/nhnet/configs_test.py b/models/official/nlp/nhnet/configs_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2b855ec6a955cd7f2a50fb173f7f5efb68b84263 --- /dev/null +++ b/models/official/nlp/nhnet/configs_test.py @@ -0,0 +1,121 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for configs.""" + +import tensorflow as tf +from official.nlp.nhnet import configs + +BERT2BERT_CONFIG = { + "vocab_size": 30522, + "hidden_size": 768, + "num_hidden_layers": 12, + "num_attention_heads": 12, + "intermediate_size": 3072, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "attention_probs_dropout_prob": 0.1, + "max_position_embeddings": 512, + "type_vocab_size": 2, + "initializer_range": 0.02, + + # model params + "decoder_intermediate_size": 3072, + "num_decoder_attn_heads": 12, + "num_decoder_layers": 12, + + # training params + "label_smoothing": 0.1, + "learning_rate": 0.05, + "learning_rate_warmup_steps": 20000, + "optimizer": "Adam", + "adam_beta1": 0.9, + "adam_beta2": 0.997, + "adam_epsilon": 1e-09, + + # predict params + "beam_size": 5, + "alpha": 0.6, + "initializer_gain": 1.0, + "use_cache": True, + + # input params + "input_sharding": False, + "input_data_not_padded": False, + "pad_token_id": 0, + "end_token_id": 102, + "start_token_id": 101, +} + +NHNET_CONFIG = { + "vocab_size": 30522, + "hidden_size": 768, + "num_hidden_layers": 12, + "num_attention_heads": 12, + "intermediate_size": 3072, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "attention_probs_dropout_prob": 0.1, + "max_position_embeddings": 512, + "type_vocab_size": 2, + "initializer_range": 0.02, + + # model params + "decoder_intermediate_size": 3072, + "num_decoder_attn_heads": 12, + "num_decoder_layers": 12, + "multi_channel_cross_attention": True, + + # training params + "label_smoothing": 0.1, + "learning_rate": 0.05, + "learning_rate_warmup_steps": 20000, + "optimizer": "Adam", + "adam_beta1": 0.9, + "adam_beta2": 0.997, + "adam_epsilon": 1e-09, + + # predict params + "beam_size": 5, + "alpha": 0.6, + "initializer_gain": 1.0, + "use_cache": True, + + # input params + "passage_list": ["b", "c", "d", "e", "f"], + "input_sharding": False, + "input_data_not_padded": False, + "pad_token_id": 0, + "end_token_id": 102, + "start_token_id": 101, + + "init_from_bert2bert": True, +} + + +class ConfigsTest(tf.test.TestCase): + + def test_configs(self): + cfg = configs.BERT2BERTConfig() + cfg.validate() + self.assertEqual(cfg.as_dict(), BERT2BERT_CONFIG) + + def test_nhnet_config(self): + cfg = configs.NHNetConfig() + cfg.validate() + self.assertEqual(cfg.as_dict(), NHNET_CONFIG) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/nhnet/decoder.py b/models/official/nlp/nhnet/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..b38fa2a6b6a251af48848e5d0a8d684be8f4c098 --- /dev/null +++ b/models/official/nlp/nhnet/decoder.py @@ -0,0 +1,375 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transformer decoder that mimics a BERT encoder, to load BERT checkpoints.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf +from official.modeling import tf_utils +from official.nlp.modeling import layers +from official.nlp.modeling.layers import transformer +from official.nlp.transformer import model_utils as transformer_utils + + +class TransformerDecoder(tf.keras.layers.Layer): + """Transformer decoder stack.""" + + def __init__(self, + num_hidden_layers=12, + hidden_size=768, + num_attention_heads=12, + intermediate_size=3072, + intermediate_activation="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + attend_to_last_layer=True, + multi_channel_cross_attention=False, + **kwargs): + super(TransformerDecoder, self).__init__(**kwargs) + self.num_hidden_layers = num_hidden_layers + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.intermediate_activation = tf_utils.get_activation( + intermediate_activation) + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.attend_to_last_layer = attend_to_last_layer + self.multi_channel_cross_attention = multi_channel_cross_attention + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + self.layers = [] + for i in range(self.num_hidden_layers): + self.layers.append( + transformer.TransformerDecoderLayer( + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + intermediate_activation=self.intermediate_activation, + dropout_rate=self.hidden_dropout_prob, + attention_dropout_rate=self.attention_probs_dropout_prob, + kernel_initializer=tf.keras.initializers.TruncatedNormal( + stddev=self.initializer_range), + multi_channel_cross_attention=self.multi_channel_cross_attention, + name=("layer_%d" % i))) + super(TransformerDecoder, self).build(unused_input_shapes) + + def call(self, inputs, cache=None, decode_loop_step=None): + """Return the output of the decoder layer stacks. + + Args: + inputs: A dictionary of inputs. `decoder_inputs` is a tf.int32 tensor for + input ids. `encoder_outputs` is a list of tensors with shape + [batch_size, input_length, hidden_size]. `self_attention_mask` is the + bias for decoder self-attention layer. [1, 1, target_length, + target_length]. `attention_mask` is the bias for encoder-decoder + attention layer, [batch_size, 1, 1, input_length]. + cache: A dictionary of cache tensors, including key & value attentions. + decode_loop_step: an integer to indicate the step inside a decoding loop. + + Returns: + Output of decoder layer stack. + float32 tensor with shape [batch_size, target_length, hidden_size] + """ + decoder_inputs = inputs["decoder_inputs"] + encoder_outputs = inputs["encoder_outputs"] + self_attention_mask = inputs["self_attention_mask"] + attention_mask = inputs["attention_mask"] + decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3) + batch_size = decoder_shape[0] + decoder_length = decoder_shape[1] + + def _to_bert_self_attention_mask(matrix): + """[1, 1, target_len, target_len] -> [bs, target_len, target_len].""" + matrix = tf.squeeze(matrix, axis=[1]) + matrix = tf.tile(matrix, [batch_size, 1, 1]) + return matrix + + def _to_bert_encdec_attention_mask(matrix): + """[bs, 1, 1, input_len] -> [bs, target_len, input_len].""" + if self.multi_channel_cross_attention: + matrix = tf.expand_dims(matrix, axis=2) + matrix = tf.tile(matrix, [1, 1, decoder_length, 1]) + else: + matrix = tf.squeeze(matrix, axis=[1]) + matrix = tf.tile(matrix, [1, decoder_length, 1]) + return matrix + + attention_mask = _to_bert_encdec_attention_mask(attention_mask) + self_attention_mask = _to_bert_self_attention_mask(self_attention_mask) + + output_tensor = decoder_inputs + for layer_idx in range(self.num_hidden_layers): + if self.attend_to_last_layer: + memory = encoder_outputs[-1] + else: + memory = encoder_outputs[layer_idx] + if self.multi_channel_cross_attention: + transformer_inputs = [ + output_tensor, memory, attention_mask, self_attention_mask, + inputs["doc_attention_probs"] + ] + else: + transformer_inputs = [ + output_tensor, memory, attention_mask, self_attention_mask + ] + # Gets the cache for decoding. + if cache is None: + output_tensor, _ = self.layers[layer_idx](transformer_inputs) + else: + cache_layer_idx = str(layer_idx) + output_tensor, cache[cache_layer_idx] = self.layers[layer_idx]( + transformer_inputs, + cache=cache[cache_layer_idx], + decode_loop_step=decode_loop_step) + return output_tensor, cache + + +def get_attention_bias(input_tensor, + bias_type, + padding_value=0, + max_length=None): + """A helper function to get various attention bias tensors.""" + if bias_type not in ("single_cross", "multi_cross", "decoder_self"): + raise ValueError("Invalid attention bias type: %s" % bias_type) + if bias_type == "single_cross": + length = tf_utils.get_shape_list(input_tensor, expected_rank=2)[1] + bias = transformer_utils.get_padding_bias( + input_tensor, padding_value=padding_value) + elif bias_type == "multi_cross": + length = tf_utils.get_shape_list(input_tensor, expected_rank=3)[2] + padding = transformer_utils.get_padding( + input_tensor, padding_value=padding_value) + bias = padding * -1e9 + else: + if max_length is not None: + length = max_length + else: + length = tf_utils.get_shape_list(input_tensor, expected_rank=2)[1] + bias = transformer_utils.get_decoder_self_attention_bias(length) + + return tf.where(bias < 0, tf.zeros_like(bias), tf.ones_like(bias)) + + +class AttentionBias(tf.keras.layers.Layer): + + def __init__(self, bias_type, **kwargs): + super(AttentionBias, self).__init__(**kwargs) + self.bias_type = bias_type + + def call(self, inputs): + return get_attention_bias(inputs, self.bias_type) + + +class EmbeddingPostprocessor(tf.keras.layers.Layer): + """Performs various post-processing on a word embedding tensor.""" + + def __init__(self, + use_type_embeddings=False, + token_type_vocab_size=None, + use_position_embeddings=True, + max_position_embeddings=512, + dropout_prob=0.0, + initializer_range=0.02, + initializer=None, + **kwargs): + super(EmbeddingPostprocessor, self).__init__(**kwargs) + self.use_type_embeddings = use_type_embeddings + self.token_type_vocab_size = token_type_vocab_size + self.use_position_embeddings = use_position_embeddings + self.max_position_embeddings = max_position_embeddings + self.dropout_prob = dropout_prob + self.initializer_range = initializer_range + + if not initializer: + self.initializer = tf.keras.initializers.TruncatedNormal( + stddev=initializer_range) + else: + self.initializer = initializer + + if self.use_type_embeddings and not self.token_type_vocab_size: + raise ValueError("If `use_type_embeddings` is True, then " + "`token_type_vocab_size` must be specified.") + + def build(self, input_shapes): + """Implements build() for the layer.""" + (word_embeddings_shape, _) = input_shapes + width = word_embeddings_shape.as_list()[-1] + self.type_embeddings = None + if self.use_type_embeddings: + self.type_embeddings = self.add_weight( + "type_embeddings", + shape=[self.token_type_vocab_size, width], + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self.initializer_range), + dtype=self.dtype) + + self.position_embeddings = None + if self.use_position_embeddings: + self.position_embeddings = self.add_weight( + "position_embeddings", + shape=[self.max_position_embeddings, width], + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self.initializer_range), + dtype=self.dtype) + + self.output_layer_norm = tf.keras.layers.LayerNormalization( + name="layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) + self.output_dropout = tf.keras.layers.Dropout( + rate=self.dropout_prob, dtype=tf.float32) + super(EmbeddingPostprocessor, self).build(input_shapes) + + def __call__(self, word_embeddings, token_type_ids=None, **kwargs): + inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids]) + return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs) + + def call(self, inputs): + """Implements call() for the layer.""" + unpacked_inputs = tf_utils.unpack_inputs(inputs) + word_embeddings = unpacked_inputs[0] + token_type_ids = unpacked_inputs[1] + input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3) + batch_size = input_shape[0] + seq_length = input_shape[1] + width = input_shape[2] + + output = word_embeddings + if self.use_type_embeddings: + flat_token_type_ids = tf.reshape(token_type_ids, [-1]) + token_type_embeddings = tf.gather(self.type_embeddings, + flat_token_type_ids) + token_type_embeddings = tf.reshape(token_type_embeddings, + [batch_size, seq_length, width]) + output += token_type_embeddings + + if self.use_position_embeddings: + position_embeddings = tf.expand_dims( + tf.slice(self.position_embeddings, [0, 0], [seq_length, width]), + axis=0) + + output += position_embeddings + + output = self.output_layer_norm(output) + output = self.output_dropout(output) + + return output + + +class Decoder(tf.keras.layers.Layer): + """The decoder network which can reuse encoder embeddings for target.""" + + def __init__(self, config, embedding_lookup=None, **kwargs): + super(Decoder, self).__init__(**kwargs) + self.config = config + # Shares vocabulary embedding. + self.embedding_lookup = None + if embedding_lookup: + self.embedding_lookup = embedding_lookup + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + if self.embedding_lookup is None: + self.embedding_lookup = layers.OnDeviceEmbedding( + vocab_size=self.config.vocab_size, + embedding_width=self.config.hidden_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self.config.initializer_range), + name="target_embeddings") + self.embedding_postprocessor = EmbeddingPostprocessor( + use_type_embeddings=False, + use_position_embeddings=True, + max_position_embeddings=self.config.max_position_embeddings, + dropout_prob=self.config.hidden_dropout_prob, + initializer=tf.keras.initializers.VarianceScaling( + scale=self.config.initializer_gain, + mode="fan_avg", + distribution="uniform"), + name="embedding_postprocessor") + # Decoder can use a different intermediate size. + self.multi_channel_cross_attention = self.config.get( + "multi_channel_cross_attention", False) + self.decoder = TransformerDecoder( + num_hidden_layers=self.config.num_decoder_layers, + hidden_size=self.config.hidden_size, + num_attention_heads=self.config.num_decoder_attn_heads, + intermediate_size=self.config.decoder_intermediate_size, + intermediate_activation=self.config.hidden_act, + hidden_dropout_prob=self.config.hidden_dropout_prob, + attention_probs_dropout_prob=self.config.attention_probs_dropout_prob, + initializer_range=self.config.initializer_range, + multi_channel_cross_attention=self.multi_channel_cross_attention, + name="decoder") + super(Decoder, self).build(unused_input_shapes) + + def _decoding_step_time_signal(self, target_embeds, decode_loop_step): + """Applies time signal (positional embeddings) for decoded embeddings.""" + # TODO(hongkuny): migrate to keras bert and design a module to handle this. + output = target_embeds + if self.embedding_postprocessor.use_position_embeddings: + position_embeddings = tf.gather( + self.embedding_postprocessor.position_embeddings, [decode_loop_step]) + # Broadcasts to all sequences inside a batch. + output += position_embeddings + + output = self.embedding_postprocessor.output_layer_norm(output) + output = self.embedding_postprocessor.output_dropout(output) + return output + + def call(self, + inputs, + cache=None, + decode_loop_step=None, + padded_decode=False): + """Implements call() for the layer. + + Args: + inputs: a list of input tensors. + cache: A dictionary of cache tensors, including key & value attentions. + Due to the limit of keras, we uses the side effect to update cache and + states of tensors will be mutated. + decode_loop_step: an integer to indicate the step inside a decoding loop. + padded_decode: a boolean indicates if the pass is for padded decoding. + + Returns: + Decoder output tensors. + """ + attention_bias = inputs["attention_bias"] + target_ids = inputs["target_ids"] + all_encoder_outputs = inputs["all_encoder_outputs"] + self_attention_bias = inputs["self_attention_bias"] + if not isinstance(all_encoder_outputs, list): + all_encoder_outputs = [all_encoder_outputs] + + target_embeds = self.embedding_lookup(target_ids) + if decode_loop_step is None: + target_embeds = self.embedding_postprocessor(target_embeds) + else: + target_embeds = self._decoding_step_time_signal(target_embeds, + decode_loop_step) + decoder_inputs = dict( + decoder_inputs=target_embeds, + encoder_outputs=all_encoder_outputs, + self_attention_mask=self_attention_bias, + attention_mask=attention_bias) + if self.multi_channel_cross_attention: + decoder_inputs["doc_attention_probs"] = inputs["doc_attention_probs"] + decode_outputs, cache = self.decoder( + decoder_inputs, cache, decode_loop_step if padded_decode else None) + return decode_outputs diff --git a/models/official/nlp/nhnet/decoder_test.py b/models/official/nlp/nhnet/decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f5effbdb090e9c08939bfc203091e960741700c6 --- /dev/null +++ b/models/official/nlp/nhnet/decoder_test.py @@ -0,0 +1,151 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for nlp.nhnet.decoder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +from official.nlp.modeling import layers +from official.nlp.nhnet import configs +from official.nlp.nhnet import decoder +from official.nlp.nhnet import utils + + +class DecoderTest(tf.test.TestCase): + + def setUp(self): + super(DecoderTest, self).setUp() + self._config = utils.get_test_params() + + def test_transformer_decoder(self): + decoder_block = decoder.TransformerDecoder( + num_hidden_layers=self._config.num_hidden_layers, + hidden_size=self._config.hidden_size, + num_attention_heads=self._config.num_attention_heads, + intermediate_size=self._config.intermediate_size, + intermediate_activation=self._config.hidden_act, + hidden_dropout_prob=self._config.hidden_dropout_prob, + attention_probs_dropout_prob=self._config.attention_probs_dropout_prob, + initializer_range=self._config.initializer_range) + decoder_block.build(None) + self.assertEqual(len(decoder_block.layers), self._config.num_hidden_layers) + + def test_bert_decoder(self): + seq_length = 10 + encoder_input_ids = tf.keras.layers.Input( + shape=(seq_length,), name="encoder_input_ids", dtype=tf.int32) + target_ids = tf.keras.layers.Input( + shape=(seq_length,), name="target_ids", dtype=tf.int32) + encoder_outputs = tf.keras.layers.Input( + shape=(seq_length, self._config.hidden_size), + name="all_encoder_outputs", + dtype=tf.float32) + embedding_lookup = layers.OnDeviceEmbedding( + vocab_size=self._config.vocab_size, + embedding_width=self._config.hidden_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self._config.initializer_range), + name="word_embeddings") + cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")( + encoder_input_ids) + self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( + target_ids) + inputs = dict( + attention_bias=cross_attention_bias, + self_attention_bias=self_attention_bias, + target_ids=target_ids, + all_encoder_outputs=encoder_outputs) + decoder_layer = decoder.Decoder(self._config, embedding_lookup) + outputs = decoder_layer(inputs) + model_inputs = dict( + encoder_input_ids=encoder_input_ids, + target_ids=target_ids, + all_encoder_outputs=encoder_outputs) + model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test") + self.assertLen(decoder_layer.trainable_weights, 30) + # Forward path. + fake_inputs = { + "encoder_input_ids": np.zeros((2, 10), dtype=np.int32), + "target_ids": np.zeros((2, 10), dtype=np.int32), + "all_encoder_outputs": np.zeros((2, 10, 16), dtype=np.float32), + } + output_tensor = model(fake_inputs) + self.assertEqual(output_tensor.shape, (2, 10, 16)) + + def test_multi_doc_decoder(self): + self._config = utils.get_test_params(cls=configs.NHNetConfig) + seq_length = 10 + num_docs = 5 + encoder_input_ids = tf.keras.layers.Input( + shape=(num_docs, seq_length), name="encoder_input_ids", dtype=tf.int32) + target_ids = tf.keras.layers.Input( + shape=(seq_length,), name="target_ids", dtype=tf.int32) + encoder_outputs = tf.keras.layers.Input( + shape=(num_docs, seq_length, self._config.hidden_size), + name="all_encoder_outputs", + dtype=tf.float32) + embedding_lookup = layers.OnDeviceEmbedding( + vocab_size=self._config.vocab_size, + embedding_width=self._config.hidden_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self._config.initializer_range), + name="word_embeddings") + doc_attention_probs = tf.keras.layers.Input( + shape=(self._config.num_decoder_attn_heads, seq_length, num_docs), + name="doc_attention_probs", + dtype=tf.float32) + cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")( + encoder_input_ids) + self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( + target_ids) + + inputs = dict( + attention_bias=cross_attention_bias, + self_attention_bias=self_attention_bias, + target_ids=target_ids, + all_encoder_outputs=encoder_outputs, + doc_attention_probs=doc_attention_probs) + + decoder_layer = decoder.Decoder(self._config, embedding_lookup) + outputs = decoder_layer(inputs) + model_inputs = dict( + encoder_input_ids=encoder_input_ids, + target_ids=target_ids, + all_encoder_outputs=encoder_outputs, + doc_attention_probs=doc_attention_probs) + model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test") + self.assertLen(decoder_layer.trainable_weights, 30) + # Forward path. + fake_inputs = { + "encoder_input_ids": + np.zeros((2, num_docs, seq_length), dtype=np.int32), + "target_ids": + np.zeros((2, seq_length), dtype=np.int32), + "all_encoder_outputs": + np.zeros((2, num_docs, seq_length, 16), dtype=np.float32), + "doc_attention_probs": + np.zeros( + (2, self._config.num_decoder_attn_heads, seq_length, num_docs), + dtype=np.float32) + } + output_tensor = model(fake_inputs) + self.assertEqual(output_tensor.shape, (2, seq_length, 16)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/nhnet/evaluation.py b/models/official/nlp/nhnet/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..b9c94dcfb71aa763c2acab5ffd022db94c20d776 --- /dev/null +++ b/models/official/nlp/nhnet/evaluation.py @@ -0,0 +1,185 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluation for Bert2Bert.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os +from absl import logging +import numpy as np +import tensorflow as tf + +from official.nlp.nhnet import input_pipeline +from official.nlp.nhnet import models +from official.nlp.transformer import metrics as metrics_v2 +from official.nlp.transformer.utils import metrics + + +def rouge_l_fscore(logits, labels): + """ROUGE scores computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + logits: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge_l_fscore: approx rouge-l f1 score. + """ + predictions = np.argmax(logits, axis=-1) + rouge_l_f_score = metrics.rouge_l_sentence_level(predictions, labels) + return rouge_l_f_score + + +def rouge_2_fscore(logits, labels): + """ROUGE-2 F1 score computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + logits: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge2_fscore: approx rouge-2 f1 score. + """ + predictions = np.argmax(logits, axis=-1) + rouge_2_f_score = metrics.rouge_n(predictions, labels) + return rouge_2_f_score + + +def bleu_score(logits, labels): + """Approximate BLEU score computation between labels and predictions. + + An approximate BLEU scoring method since we do not glue word pieces or + decode the ids and tokenize the output. By default, we use ngram order of 4 + and use brevity penalty. Also, this does not have beam search. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch-size, length_labels] + + Returns: + bleu: int, approx bleu score + """ + predictions = np.argmax(logits, axis=-1) + bleu = metrics.compute_bleu(labels, predictions) + return bleu + + +def continuous_eval(strategy, + params, + model_type, + eval_file_pattern=None, + batch_size=4, + eval_steps=None, + model_dir=None, + timeout=3000): + """Continuously evaluate checkpoints on testing data.""" + test_dataset = input_pipeline.get_input_dataset( + eval_file_pattern, + batch_size=batch_size, + params=params, + is_training=False, + strategy=strategy) + + with strategy.scope(): + model = models.create_model(model_type, params) + metric_layer = metrics_v2.MetricLayer(params.vocab_size) + eval_summary_writer = tf.summary.create_file_writer( + os.path.join(model_dir, "summaries/eval")) + global_step = tf.Variable( + 0, + trainable=False, + dtype=tf.int64, + aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, + shape=[]) + model.global_step = global_step + + @tf.function + def test_step(inputs): + """Calculates evaluation metrics on distributed devices.""" + + def _test_step_fn(inputs): + """Replicated accuracy calculation.""" + targets = models.remove_sos_from_seq(inputs["target_ids"], + params.pad_token_id) + + # Using ground truth sequences as targets to calculate logits for accuracy + # and perplexity metrics. + logits, _, _ = model(inputs, training=False, mode="train") + metric_layer([logits, targets]) + + # Get logits from top beam search results for bleu and rouge metrics. + logits = model(inputs, training=False, mode="eval") + + return targets, logits + + outputs = strategy.run(_test_step_fn, args=(inputs,)) + + return tf.nest.map_structure(strategy.experimental_local_results, outputs) + + metrics_and_funcs = [ + (tf.keras.metrics.Mean("bleu", dtype=tf.float32), bleu_score), + (tf.keras.metrics.Mean("rouge_2_fscore", + dtype=tf.float32), rouge_2_fscore), + (tf.keras.metrics.Mean("rouge_l_fscore", + dtype=tf.float32), rouge_l_fscore), + ] + eval_results = {} + for latest_checkpoint in tf.train.checkpoints_iterator( + model_dir, timeout=timeout): + checkpoint = tf.train.Checkpoint(model=model) + checkpoint.restore(latest_checkpoint).expect_partial() + logging.info("Loaded checkpoint %s", latest_checkpoint) + + for i, inputs in enumerate(test_dataset): + if eval_steps and i >= eval_steps: + break + outputs = test_step(inputs) + for metric, func in metrics_and_funcs: + for targets, logits in zip(outputs[0], outputs[1]): + metric.update_state(func(logits.numpy(), targets.numpy())) + + with eval_summary_writer.as_default(): + step = model.global_step.numpy() + for metric, _ in metrics_and_funcs: + eval_results[metric.name] = metric.result().numpy().astype(float) + tf.summary.scalar( + metric.name, + eval_results[metric.name], + step=step) + for metric in metric_layer.metrics: + eval_results[metric.name] = metric.result().numpy().astype(float) + tf.summary.scalar( + metric.name, + eval_results[metric.name], + step=step) + logging.info("Step %d Metrics= %s", step, str(eval_results)) + eval_summary_writer.flush() + + # Resets metrics. + for metric, _ in metrics_and_funcs: + metric.reset_states() + for metric in metric_layer.metrics: + metric.reset_states() + return eval_results diff --git a/models/official/nlp/nhnet/input_pipeline.py b/models/official/nlp/nhnet/input_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..cadf3f085c868e56039679fdb2124b23f33fc19b --- /dev/null +++ b/models/official/nlp/nhnet/input_pipeline.py @@ -0,0 +1,254 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Input pipelines.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v2 as tf + + +def decode_record(record, name_to_features): + """Decodes a record to a TensorFlow example.""" + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = t + + return example + + +def process_singledoc_dataset(dataset, batch_size, params): + """Parses and batches single-doc dataset.""" + name_to_features = { + "input_ids_a": tf.io.FixedLenFeature([params.len_title], tf.int64), + "input_ids_b": tf.io.FixedLenFeature([params.len_passage], tf.int64), + "input_mask_b": tf.io.FixedLenFeature([params.len_passage], tf.int64), + "segment_ids_b": tf.io.FixedLenFeature([params.len_passage], tf.int64), + } + decode_fn = lambda record: decode_record(record, name_to_features) + dataset = dataset.map( + decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + + def _select_data_from_record(record): + """Filter out features to use for pretraining.""" + return { + "input_ids": record["input_ids_b"], + "input_mask": record["input_mask_b"], + "segment_ids": record["segment_ids_b"], + "target_ids": record["input_ids_a"], + } + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=True) + return dataset + + +def decode_sparse_record(record, name_to_features): + """Decodes a sparse record to a TensorFlow example.""" + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = tf.sparse.to_dense(t) + + return example + + +def _filter_max_length(example, max_title_length=256): + """Indicates whether the example's length is lower than the maximum length.""" + return tf.size(example["targets"]) <= max_title_length + + +def process_singledoc_transformer_dataset(dataset, batch_size, params): + """Parses, batches and pads single-doc dataset.""" + name_to_features = { + "inputs": tf.io.VarLenFeature(tf.int64), + "targets": tf.io.VarLenFeature(tf.int64), + } + decode_fn = lambda record: decode_sparse_record(record, name_to_features) + dataset = dataset.map( + decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + + def _select_data_from_record(record): + """Filter out features to use for pretraining.""" + input_ids = record["inputs"][:params.len_passage] + target_ids = record["targets"] + input_mask = tf.ones_like(input_ids) + segment_ids = tf.zeros_like(input_ids) + return { + "input_ids": input_ids, + "input_mask": input_mask, + "segment_ids": segment_ids, + "target_ids": target_ids, + } + + dataset = dataset.filter(lambda x: _filter_max_length(x, params.len_title)) + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + dataset = dataset.padded_batch( + batch_size, { + "input_ids": [params.len_passage], + "input_mask": [params.len_passage], + "segment_ids": [params.len_passage], + "target_ids": [params.len_title], + }, + padding_values={ + "input_ids": params.pad_token_id, + "input_mask": 0, + "segment_ids": 0, + "target_ids": params.pad_token_id, + }, + drop_remainder=True) + + return dataset + + +def multidoc_parse_spec(params, training=True): + """Gets the mutli-doc tf.Example parsing spec.""" + len_p = params.len_passage + name_to_features = {} + feature_list = ["input_ids", "input_mask", "segment_ids"] + for idx in params.passage_list: + for feature in feature_list: + name_to_features["%s_%s" % (feature, idx)] = tf.io.FixedLenFeature( + [len_p], tf.int64) + if training: + # Cluster title. + name_to_features["input_ids_a"] = tf.io.FixedLenFeature([params.len_title], + tf.int64) + return name_to_features, feature_list + + +def process_multidoc_dataset(dataset, batch_size, params): + """Parses, organizes and batches multi-doc dataset.""" + name_to_features, feature_list = multidoc_parse_spec(params) + decode_fn = lambda record: decode_record(record, name_to_features) + dataset = dataset.map( + decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + + def _select_data_from_record(record): + """Filter out features to use for pretraining.""" + features = {"target_ids": record["input_ids_a"]} + for feature in feature_list: + tensors = [record["%s_%s" % (feature, i)] for i in params.passage_list] + features[feature] = tf.stack(tensors) + return features + + dataset = dataset.map( + _select_data_from_record, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=True) + return dataset + + +def create_dataset(file_paths, + batch_size, + params, + is_training=True, + input_pipeline_context=None): + """Creates input dataset from (tf)records files for pretraining.""" + dataset = tf.data.Dataset.list_files(file_paths, shuffle=is_training) + + if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: + if not is_training or params.input_sharding: + dataset = dataset.shard(input_pipeline_context.num_input_pipelines, + input_pipeline_context.input_pipeline_id) + + if is_training: + dataset = dataset.repeat() + # We set shuffle buffer to exactly match total number of + # training files to ensure that training data is well shuffled. + dataset = dataset.shuffle(len(file_paths)) + + # In parallel, create tf record dataset for each train files. + # cycle_length = 8 means that up to 8 files will be read and deserialized in + # parallel. You may want to increase this number if you have a large number of + # CPU cores. + dataset = dataset.interleave( + tf.data.TFRecordDataset, + cycle_length=8, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if is_training: + dataset = dataset.shuffle(100) + + if params.get("multi_channel_cross_attention", value=False): + dataset = process_multidoc_dataset(dataset, batch_size, params) + else: + if not params.input_data_not_padded: + dataset = process_singledoc_dataset(dataset, batch_size, params) + else: + dataset = process_singledoc_transformer_dataset(dataset, batch_size, + params) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset + + +def get_input_dataset(input_file_pattern, + batch_size, + params, + is_training, + strategy=None): + """Returns input dataset from input file string.""" + + # When using TPU pods, we need to clone dataset across + # workers and need to pass in function that returns the dataset rather + # than passing dataset instance itself. + use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy) + if use_dataset_fn: + if batch_size % strategy.num_replicas_in_sync != 0: + raise ValueError( + "Batch size must be divisible by number of replicas : {}".format( + strategy.num_replicas_in_sync)) + + # As auto rebatching is not supported in + # `experimental_distribute_datasets_from_function()` API, which is + # required when cloning dataset to multiple workers in eager mode, + # we use per-replica batch size. + batch_size = int(batch_size / strategy.num_replicas_in_sync) + + def _dataset_fn(ctx=None): + """Returns tf.data.Dataset for distributed BERT pretraining.""" + input_files = [] + for input_pattern in input_file_pattern.split(","): + input_files.extend(tf.io.gfile.glob(input_pattern)) + + return create_dataset( + input_files, + batch_size, + params, + is_training=is_training, + input_pipeline_context=ctx) + + if use_dataset_fn: + return strategy.experimental_distribute_datasets_from_function(_dataset_fn) + else: + return strategy.experimental_distribute_dataset(_dataset_fn()) diff --git a/models/official/nlp/nhnet/models.py b/models/official/nlp/nhnet/models.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f70e7f36d8a30ed869c1ca135ef3262fd2150e --- /dev/null +++ b/models/official/nlp/nhnet/models.py @@ -0,0 +1,590 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""tf.keras Models for NHNet.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging +import gin +import tensorflow as tf +from typing import Optional, Text + +from official.modeling import tf_utils +from official.modeling.hyperparams import params_dict +from official.nlp.modeling import networks +from official.nlp.modeling.layers import multi_channel_attention +from official.nlp.nhnet import configs +from official.nlp.nhnet import decoder +from official.nlp.nhnet import utils +from official.nlp.transformer import beam_search + + +def embedding_linear(embedding_matrix, x): + """Uses embeddings as linear transformation weights.""" + with tf.name_scope("presoftmax_linear"): + batch_size = tf.shape(x)[0] + length = tf.shape(x)[1] + hidden_size = tf.shape(x)[2] + vocab_size = tf.shape(embedding_matrix)[0] + + x = tf.reshape(x, [-1, hidden_size]) + logits = tf.matmul(x, embedding_matrix, transpose_b=True) + + return tf.reshape(logits, [batch_size, length, vocab_size]) + + +def _add_sos_to_seq(seq, start_token_id): + """Add a start sequence token while keeping seq length.""" + batch_size = tf.shape(seq)[0] + seq_len = tf.shape(seq)[1] + sos_ids = tf.ones([batch_size], tf.int32) * start_token_id + targets = tf.concat([tf.expand_dims(sos_ids, axis=1), seq], axis=1) + targets = targets[:, :-1] + tf.assert_equal(tf.shape(targets), (batch_size, seq_len)) + return targets + + +def remove_sos_from_seq(seq, pad_token_id): + """Remove the start sequence token while keeping seq length.""" + batch_size, seq_len = tf_utils.get_shape_list(seq, expected_rank=2) + # remove + targets = seq[:, 1:] + # pad + pad_ids = tf.ones([batch_size], tf.int32) * pad_token_id + targets = tf.concat([targets, tf.expand_dims(pad_ids, axis=1)], axis=1) + tf.assert_equal(tf.shape(targets), (batch_size, seq_len)) + return targets + + +class Bert2Bert(tf.keras.Model): + """Bert2Bert encoder decoder model for training.""" + + def __init__(self, params, bert_layer, decoder_layer, name=None): + super(Bert2Bert, self).__init__(name=name) + self.params = params + if not bert_layer.built: + raise ValueError("bert_layer should be built.") + if not decoder_layer.built: + raise ValueError("decoder_layer should be built.") + self.bert_layer = bert_layer + self.decoder_layer = decoder_layer + + def get_config(self): + return {"params": self.params.as_dict()} + + def get_decode_logits(self, + decoder_inputs, + ids, + decoder_self_attention_bias, + step, + cache=None): + if cache: + if self.params.get("padded_decode", False): + bias_shape = decoder_self_attention_bias.shape.as_list() + self_attention_bias = tf.slice( + decoder_self_attention_bias, [0, 0, step, 0], + [bias_shape[0], bias_shape[1], 1, bias_shape[3]]) + else: + self_attention_bias = decoder_self_attention_bias[:, :, step:step + + 1, :step + 1] + # Sets decoder input to the last generated IDs. + decoder_input = ids[:, -1:] + else: + self_attention_bias = decoder_self_attention_bias[:, :, :step + 1, :step + + 1] + decoder_input = ids + decoder_inputs["target_ids"] = decoder_input + decoder_inputs["self_attention_bias"] = self_attention_bias + if cache: + decoder_outputs = self.decoder_layer( + decoder_inputs, + cache, + decode_loop_step=step, + padded_decode=self.params.get("padded_decode", False)) + else: + decoder_outputs = self.decoder_layer(decoder_inputs) + logits = embedding_linear(self.decoder_layer.embedding_lookup.embeddings, + decoder_outputs[:, -1:, :]) + logits = tf.squeeze(logits, axis=[1]) + return logits + + def _get_symbols_to_logits_fn(self, max_decode_length): + """Returns a decoding function that calculates logits of the next tokens.""" + # Max decode length should be smaller than the positional embedding max + # sequence length. + decoder_self_attention_bias = decoder.get_attention_bias( + input_tensor=None, + bias_type="decoder_self", + max_length=max_decode_length) + + def _symbols_to_logits_fn(ids, i, cache): + """Generate logits for next candidate IDs. + + Args: + ids: Current decoded sequences. int tensor with shape [batch_size * + beam_size, i + 1] + i: Loop index + cache: dictionary of values storing the encoder output, encoder-decoder + attention bias, and previous decoder attention values. + + Returns: + Tuple of + (logits with shape [batch_size * beam_size, vocab_size], + updated cache values) + """ + decoder_inputs = dict( + all_encoder_outputs=cache["all_encoder_outputs"], + attention_bias=cache["attention_bias"]) + logits = self.get_decode_logits( + decoder_inputs, + ids, + decoder_self_attention_bias, + step=i, + cache=cache if self.params.use_cache else None) + return logits, cache + + return _symbols_to_logits_fn + + def train_decode(self, decode_outputs): + logits = embedding_linear(self.decoder_layer.embedding_lookup.embeddings, + decode_outputs) + decode_output_ids = tf.cast(tf.argmax(logits, axis=-1), tf.int32) + output_log_probs = tf.nn.log_softmax(logits, axis=-1) + return logits, decode_output_ids, output_log_probs + + def predict_decode(self, start_token_ids, cache): + symbols_to_logits_fn = self._get_symbols_to_logits_fn(self.params.len_title) + # Use beam search to find the top beam_size sequences and scores. + decoded_ids, scores = beam_search.sequence_beam_search( + symbols_to_logits_fn=symbols_to_logits_fn, + initial_ids=start_token_ids, + initial_cache=cache, + vocab_size=self.params.vocab_size, + beam_size=self.params.beam_size, + alpha=self.params.alpha, + max_decode_length=self.params.len_title, + padded_decode=self.params.get("padded_decode", False), + eos_id=self.params.end_token_id) + return decoded_ids, scores + + def _get_logits_for_decode_ids(self, decoder_inputs, top_decoded_ids): + """Returns the log probabilities for ids.""" + target_ids = _add_sos_to_seq(top_decoded_ids, self.params.start_token_id) + decoder_inputs["self_attention_bias"] = decoder.get_attention_bias( + target_ids, bias_type="decoder_self") + decoder_inputs["target_ids"] = target_ids + decoder_outputs = self.decoder_layer(decoder_inputs) + logits = embedding_linear(self.decoder_layer.embedding_lookup.embeddings, + decoder_outputs) + return logits + + def _init_cache(self, batch_size): + num_heads = self.params.num_decoder_attn_heads + dim_per_head = self.params.hidden_size // num_heads + init_decode_length = ( + self.params.len_title if self.params.get("padded_decode", False) else 0) + cache = {} + for layer in range(self.params.num_decoder_layers): + cache[str(layer)] = { + "key": + tf.zeros( + [batch_size, init_decode_length, num_heads, dim_per_head], + dtype=tf.float32), + "value": + tf.zeros( + [batch_size, init_decode_length, num_heads, dim_per_head], + dtype=tf.float32) + } + return cache + + def call(self, inputs, mode="train"): + """Implements call(). + + Args: + inputs: a dictionary of tensors. + mode: string, an enum for mode, train/eval. + + Returns: + logits, decode_output_ids, output_log_probs for training. top_decoded_ids + for eval. + """ + input_ids = inputs["input_ids"] + input_mask = inputs["input_mask"] + segment_ids = inputs["segment_ids"] + all_encoder_outputs, _ = self.bert_layer( + [input_ids, input_mask, segment_ids]) + + if mode not in ("train", "eval", "predict"): + raise ValueError("Invalid call mode: %s" % mode) + encoder_decoder_attention_bias = decoder.get_attention_bias( + input_ids, + bias_type="single_cross", + padding_value=self.params.pad_token_id) + if mode == "train": + self_attention_bias = decoder.get_attention_bias( + inputs["target_ids"], bias_type="decoder_self") + decoder_inputs = dict( + attention_bias=encoder_decoder_attention_bias, + all_encoder_outputs=all_encoder_outputs, + target_ids=inputs["target_ids"], + self_attention_bias=self_attention_bias) + decoder_outputs = self.decoder_layer(decoder_inputs) + return self.train_decode(decoder_outputs) + + batch_size = tf.shape(input_ids)[0] + start_token_ids = tf.ones([batch_size], + tf.int32) * self.params.start_token_id + # Add encoder output and attention bias to the cache. + if self.params.use_cache: + cache = self._init_cache(batch_size) + else: + cache = {} + cache["all_encoder_outputs"] = all_encoder_outputs + cache["attention_bias"] = encoder_decoder_attention_bias + decoded_ids, scores = self.predict_decode(start_token_ids, cache) + if mode == "predict": + return decoded_ids[:, :self.params.beam_size, + 1:], scores[:, :self.params.beam_size] + + decoder_inputs = dict( + attention_bias=encoder_decoder_attention_bias, + all_encoder_outputs=all_encoder_outputs) + top_decoded_ids = decoded_ids[:, 0, 1:] + return self._get_logits_for_decode_ids(decoder_inputs, top_decoded_ids) + + +class NHNet(Bert2Bert): + """NHNet model which performs multi-doc decoding.""" + + def __init__(self, params, bert_layer, decoder_layer, name=None): + super(NHNet, self).__init__(params, bert_layer, decoder_layer, name=name) + self.doc_attention = multi_channel_attention.VotingAttention( + num_heads=params.num_decoder_attn_heads, + head_size=params.hidden_size // params.num_decoder_attn_heads) + + def _expand_doc_attention_probs(self, doc_attention_probs, target_length): + """Expands doc attention probs to fit the decoding sequence length.""" + doc_attention_probs = tf.expand_dims( + doc_attention_probs, axis=[1]) # [B, 1, A] + doc_attention_probs = tf.expand_dims( + doc_attention_probs, axis=[2]) # [B, 1, 1, A] + return tf.tile(doc_attention_probs, + [1, self.params.num_decoder_attn_heads, target_length, 1]) + + def _get_symbols_to_logits_fn(self, max_decode_length): + """Returns a decoding function that calculates logits of the next tokens.""" + # Max decode length should be smaller than the positional embedding max + # sequence length. + decoder_self_attention_bias = decoder.get_attention_bias( + input_tensor=None, + bias_type="decoder_self", + max_length=max_decode_length) + + def _symbols_to_logits_fn(ids, i, cache): + """Generate logits for next candidate IDs.""" + if self.params.use_cache: + target_length = 1 + else: + target_length = i + 1 + decoder_inputs = dict( + doc_attention_probs=self._expand_doc_attention_probs( + cache["doc_attention_probs"], target_length), + all_encoder_outputs=cache["all_encoder_outputs"], + attention_bias=cache["attention_bias"]) + logits = self.get_decode_logits( + decoder_inputs, + ids, + decoder_self_attention_bias, + step=i, + cache=cache if self.params.use_cache else None) + return logits, cache + + return _symbols_to_logits_fn + + def call(self, inputs, mode="training"): + input_shape = tf_utils.get_shape_list(inputs["input_ids"], expected_rank=3) + batch_size, num_docs, len_passage = (input_shape[0], input_shape[1], + input_shape[2]) + input_ids = tf.reshape(inputs["input_ids"], [-1, len_passage]) + input_mask = tf.reshape(inputs["input_mask"], [-1, len_passage]) + segment_ids = tf.reshape(inputs["segment_ids"], [-1, len_passage]) + all_encoder_outputs, _ = self.bert_layer( + [input_ids, input_mask, segment_ids]) + encoder_outputs = tf.reshape( + all_encoder_outputs[-1], + [batch_size, num_docs, len_passage, self.params.hidden_size]) + doc_attention_mask = tf.reshape( + tf.cast( + tf.math.count_nonzero(input_mask, axis=1, dtype=tf.int32) > 2, + tf.int32), [batch_size, num_docs]) + + doc_attention_probs = self.doc_attention(encoder_outputs, + doc_attention_mask) + encoder_decoder_attention_bias = decoder.get_attention_bias( + inputs["input_ids"], + bias_type="multi_cross", + padding_value=self.params.pad_token_id) + + if mode == "train": + target_length = tf_utils.get_shape_list( + inputs["target_ids"], expected_rank=2)[1] + doc_attention_probs = self._expand_doc_attention_probs( + doc_attention_probs, target_length) + self_attention_bias = decoder.get_attention_bias( + inputs["target_ids"], bias_type="decoder_self") + decoder_inputs = dict( + attention_bias=encoder_decoder_attention_bias, + self_attention_bias=self_attention_bias, + target_ids=inputs["target_ids"], + all_encoder_outputs=encoder_outputs, + doc_attention_probs=doc_attention_probs) + decoder_outputs = self.decoder_layer(decoder_inputs) + return self.train_decode(decoder_outputs) + + # Adds encoder output and attention bias to the cache. + if self.params.use_cache: + cache = self._init_cache(batch_size) + else: + cache = {} + cache["all_encoder_outputs"] = [encoder_outputs] + cache["attention_bias"] = encoder_decoder_attention_bias + cache["doc_attention_probs"] = doc_attention_probs + + start_token_ids = tf.ones([batch_size], + tf.int32) * self.params.start_token_id + decoded_ids, scores = self.predict_decode(start_token_ids, cache) + if mode == "predict": + return decoded_ids[:, :self.params.beam_size, + 1:], scores[:, :self.params.beam_size] + + top_decoded_ids = decoded_ids[:, 0, 1:] + target_length = tf_utils.get_shape_list(top_decoded_ids)[-1] + decoder_inputs = dict( + attention_bias=encoder_decoder_attention_bias, + all_encoder_outputs=[encoder_outputs], + doc_attention_probs=self._expand_doc_attention_probs( + doc_attention_probs, target_length)) + return self._get_logits_for_decode_ids(decoder_inputs, top_decoded_ids) + + +def get_bert2bert_layers(params: configs.BERT2BERTConfig): + """Creates a Bert2Bert stem model and returns Bert encoder/decoder. + + We use funtional-style to create stem model because we need to make all layers + built to restore variables in a customized way. The layers are called with + placeholder inputs to make them fully built. + + Args: + params: ParamsDict. + + Returns: + two keras Layers, bert_model_layer and decoder_layer + """ + input_ids = tf.keras.layers.Input( + shape=(None,), name="input_ids", dtype=tf.int32) + input_mask = tf.keras.layers.Input( + shape=(None,), name="input_mask", dtype=tf.int32) + segment_ids = tf.keras.layers.Input( + shape=(None,), name="segment_ids", dtype=tf.int32) + target_ids = tf.keras.layers.Input( + shape=(None,), name="target_ids", dtype=tf.int32) + bert_config = utils.get_bert_config_from_params(params) + bert_model_layer = networks.TransformerEncoder( + vocab_size=bert_config.vocab_size, + hidden_size=bert_config.hidden_size, + num_layers=bert_config.num_hidden_layers, + num_attention_heads=bert_config.num_attention_heads, + intermediate_size=bert_config.intermediate_size, + activation=tf_utils.get_activation(bert_config.hidden_act), + dropout_rate=bert_config.hidden_dropout_prob, + attention_dropout_rate=bert_config.attention_probs_dropout_prob, + sequence_length=None, + max_sequence_length=bert_config.max_position_embeddings, + type_vocab_size=bert_config.type_vocab_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range), + return_all_encoder_outputs=True, + name="bert_encoder") + all_encoder_outputs, _ = bert_model_layer( + [input_ids, input_mask, segment_ids]) + # pylint: disable=protected-access + decoder_layer = decoder.Decoder(params, bert_model_layer._embedding_layer) + # pylint: enable=protected-access + cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")( + input_ids) + self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( + target_ids) + decoder_inputs = dict( + attention_bias=cross_attention_bias, + self_attention_bias=self_attention_bias, + target_ids=target_ids, + all_encoder_outputs=all_encoder_outputs) + _ = decoder_layer(decoder_inputs) + + return bert_model_layer, decoder_layer + + +def get_nhnet_layers(params: configs.NHNetConfig): + """Creates a Mult-doc encoder/decoder. + + Args: + params: ParamsDict. + + Returns: + two keras Layers, bert_model_layer and decoder_layer + """ + input_ids = tf.keras.layers.Input( + shape=(None,), name="input_ids", dtype=tf.int32) + input_mask = tf.keras.layers.Input( + shape=(None,), name="input_mask", dtype=tf.int32) + segment_ids = tf.keras.layers.Input( + shape=(None,), name="segment_ids", dtype=tf.int32) + bert_config = utils.get_bert_config_from_params(params) + bert_model_layer = networks.TransformerEncoder( + vocab_size=bert_config.vocab_size, + hidden_size=bert_config.hidden_size, + num_layers=bert_config.num_hidden_layers, + num_attention_heads=bert_config.num_attention_heads, + intermediate_size=bert_config.intermediate_size, + activation=tf_utils.get_activation(bert_config.hidden_act), + dropout_rate=bert_config.hidden_dropout_prob, + attention_dropout_rate=bert_config.attention_probs_dropout_prob, + sequence_length=None, + max_sequence_length=bert_config.max_position_embeddings, + type_vocab_size=bert_config.type_vocab_size, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=bert_config.initializer_range), + return_all_encoder_outputs=True, + name="bert_encoder") + bert_model_layer([input_ids, input_mask, segment_ids]) + + input_ids = tf.keras.layers.Input( + shape=(None, None), name="input_ids", dtype=tf.int32) + all_encoder_outputs = tf.keras.layers.Input((None, None, params.hidden_size), + dtype=tf.float32) + target_ids = tf.keras.layers.Input( + shape=(None,), name="target_ids", dtype=tf.int32) + doc_attention_probs = tf.keras.layers.Input( + (params.num_decoder_attn_heads, None, None), dtype=tf.float32) + # pylint: disable=protected-access + decoder_layer = decoder.Decoder(params, bert_model_layer._embedding_layer) + # pylint: enable=protected-access + cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")( + input_ids) + self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")( + target_ids) + decoder_inputs = dict( + attention_bias=cross_attention_bias, + self_attention_bias=self_attention_bias, + target_ids=target_ids, + all_encoder_outputs=all_encoder_outputs, + doc_attention_probs=doc_attention_probs) + _ = decoder_layer(decoder_inputs) + + return bert_model_layer, decoder_layer + + +def create_transformer_model(params, + init_checkpoint: Optional[Text] = None + ) -> tf.keras.Model: + """A helper to create Transformer model.""" + bert_layer, decoder_layer = get_bert2bert_layers(params=params) + model = Bert2Bert( + params=params, + bert_layer=bert_layer, + decoder_layer=decoder_layer, + name="transformer") + + if init_checkpoint: + logging.info( + "Checkpoint file %s found and restoring from " + "initial checkpoint.", init_checkpoint) + ckpt = tf.train.Checkpoint(model=model) + ckpt.restore(init_checkpoint).expect_partial() + + return model + + +def create_bert2bert_model( + params: configs.BERT2BERTConfig, + cls=Bert2Bert, + init_checkpoint: Optional[Text] = None) -> tf.keras.Model: + """A helper to create Bert2Bert model.""" + bert_layer, decoder_layer = get_bert2bert_layers(params=params) + if init_checkpoint: + utils.initialize_bert2bert_from_pretrained_bert(bert_layer, decoder_layer, + init_checkpoint) + return cls( + params=params, + bert_layer=bert_layer, + decoder_layer=decoder_layer, + name="bert2bert") + + +def create_nhnet_model( + params: configs.NHNetConfig, + cls=NHNet, + init_checkpoint: Optional[Text] = None) -> tf.keras.Model: + """A helper to create NHNet model.""" + bert_layer, decoder_layer = get_nhnet_layers(params=params) + model = cls( + params=params, + bert_layer=bert_layer, + decoder_layer=decoder_layer, + name="nhnet") + if init_checkpoint: + logging.info( + "Checkpoint file %s found and restoring from " + "initial checkpoint.", init_checkpoint) + if params.init_from_bert2bert: + ckpt = tf.train.Checkpoint(model=model) + ckpt.restore(init_checkpoint).assert_existing_objects_matched() + else: + utils.initialize_bert2bert_from_pretrained_bert(bert_layer, decoder_layer, + init_checkpoint) + return model + + +@gin.configurable +def get_model_params(model: Optional[Text] = "bert2bert", + config_class=None) -> params_dict.ParamsDict: + """Helper function to convert config file to ParamsDict.""" + if model == "bert2bert": + return configs.BERT2BERTConfig() + elif model == "nhnet": + return configs.NHNetConfig() + elif config_class: + return config_class() + else: + raise KeyError("The model type is not defined: %s" % model) + + +@gin.configurable +def create_model(model_type: Text, + params, + init_checkpoint: Optional[Text] = None): + """A factory function to create different types of models.""" + if model_type == "bert2bert": + return create_bert2bert_model(params, init_checkpoint=init_checkpoint) + elif model_type == "nhnet": + return create_nhnet_model(params, init_checkpoint=init_checkpoint) + elif "transformer" in model_type: + return create_transformer_model( + params, init_checkpoint=init_checkpoint) + else: + raise KeyError("The model type is not defined: %s" % model_type) diff --git a/models/official/nlp/nhnet/models_test.py b/models/official/nlp/nhnet/models_test.py new file mode 100644 index 0000000000000000000000000000000000000000..39676a347d65e2dc19e99a7dec4d22dfb4c60df4 --- /dev/null +++ b/models/official/nlp/nhnet/models_test.py @@ -0,0 +1,324 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for nlp.nhnet.models.""" + +import os + +from absl import logging +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +# pylint: enable=g-direct-tensorflow-import +from official.nlp.nhnet import configs +from official.nlp.nhnet import models +from official.nlp.nhnet import utils + + +def all_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + strategy_combinations.mirrored_strategy_with_gpu_and_cpu, + strategy_combinations.mirrored_strategy_with_two_gpus, + ], + mode="eager", + ) + + +def distribution_forward_path(strategy, + model, + inputs, + batch_size, + mode="train"): + dataset = tf.data.Dataset.from_tensor_slices((inputs)) + dataset = dataset.batch(batch_size) + dataset = strategy.experimental_distribute_dataset(dataset) + + @tf.function + def test_step(inputs): + """Calculates evaluation metrics on distributed devices.""" + + def _test_step_fn(inputs): + """Replicated accuracy calculation.""" + return model(inputs, mode=mode, training=False) + + outputs = strategy.run(_test_step_fn, args=(inputs,)) + return tf.nest.map_structure(strategy.experimental_local_results, outputs) + + return [test_step(inputs) for inputs in dataset] + + +def process_decoded_ids(predictions, end_token_id): + """Transforms decoded tensors to lists ending with END_TOKEN_ID.""" + if isinstance(predictions, tf.Tensor): + predictions = predictions.numpy() + flatten_ids = predictions.reshape((-1, predictions.shape[-1])) + results = [] + for ids in flatten_ids: + ids = list(ids) + if end_token_id in ids: + ids = ids[:ids.index(end_token_id)] + results.append(ids) + return results + + +class Bert2BertTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super(Bert2BertTest, self).setUp() + self._config = utils.get_test_params() + + def test_model_creation(self): + model = models.create_bert2bert_model(params=self._config) + fake_ids = np.zeros((2, 10), dtype=np.int32) + fake_inputs = { + "input_ids": fake_ids, + "input_mask": fake_ids, + "segment_ids": fake_ids, + "target_ids": fake_ids, + } + model(fake_inputs) + + @combinations.generate(all_strategy_combinations()) + def test_bert2bert_train_forward(self, distribution): + seq_length = 10 + # Defines the model inside distribution strategy scope. + with distribution.scope(): + # Forward path. + batch_size = 2 + batches = 4 + fake_ids = np.zeros((batch_size * batches, seq_length), dtype=np.int32) + fake_inputs = { + "input_ids": fake_ids, + "input_mask": fake_ids, + "segment_ids": fake_ids, + "target_ids": fake_ids, + } + model = models.create_bert2bert_model(params=self._config) + results = distribution_forward_path(distribution, model, fake_inputs, + batch_size) + logging.info("Forward path results: %s", str(results)) + self.assertLen(results, batches) + + def test_bert2bert_decoding(self): + seq_length = 10 + self._config.override( + { + "beam_size": 3, + "len_title": seq_length, + "alpha": 0.6, + }, + is_strict=False) + + batch_size = 2 + fake_ids = np.zeros((batch_size, seq_length), dtype=np.int32) + fake_inputs = { + "input_ids": fake_ids, + "input_mask": fake_ids, + "segment_ids": fake_ids, + } + self._config.override({ + "padded_decode": False, + "use_cache": False, + }, + is_strict=False) + model = models.create_bert2bert_model(params=self._config) + ckpt = tf.train.Checkpoint(model=model) + + # Initializes variables from checkpoint to keep outputs deterministic. + init_checkpoint = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt")) + ckpt.restore(init_checkpoint).assert_existing_objects_matched() + top_ids, scores = model(fake_inputs, mode="predict") + + self._config.override({ + "padded_decode": False, + "use_cache": True, + }, + is_strict=False) + model = models.create_bert2bert_model(params=self._config) + ckpt = tf.train.Checkpoint(model=model) + ckpt.restore(init_checkpoint).assert_existing_objects_matched() + cached_top_ids, cached_scores = model(fake_inputs, mode="predict") + self.assertEqual( + process_decoded_ids(top_ids, self._config.end_token_id), + process_decoded_ids(cached_top_ids, self._config.end_token_id)) + self.assertAllClose(scores, cached_scores) + + self._config.override({ + "padded_decode": True, + "use_cache": True, + }, + is_strict=False) + model = models.create_bert2bert_model(params=self._config) + ckpt = tf.train.Checkpoint(model=model) + ckpt.restore(init_checkpoint).assert_existing_objects_matched() + padded_top_ids, padded_scores = model(fake_inputs, mode="predict") + self.assertEqual( + process_decoded_ids(top_ids, self._config.end_token_id), + process_decoded_ids(padded_top_ids, self._config.end_token_id)) + self.assertAllClose(scores, padded_scores) + + @combinations.generate(all_strategy_combinations()) + def test_bert2bert_eval(self, distribution): + seq_length = 10 + padded_decode = isinstance(distribution, + tf.distribute.experimental.TPUStrategy) + self._config.override( + { + "beam_size": 3, + "len_title": seq_length, + "alpha": 0.6, + "padded_decode": padded_decode, + }, + is_strict=False) + # Defines the model inside distribution strategy scope. + with distribution.scope(): + # Forward path. + batch_size = 2 + batches = 4 + fake_ids = np.zeros((batch_size * batches, seq_length), dtype=np.int32) + fake_inputs = { + "input_ids": fake_ids, + "input_mask": fake_ids, + "segment_ids": fake_ids, + } + model = models.create_bert2bert_model(params=self._config) + results = distribution_forward_path( + distribution, model, fake_inputs, batch_size, mode="predict") + self.assertLen(results, batches) + results = distribution_forward_path( + distribution, model, fake_inputs, batch_size, mode="eval") + self.assertLen(results, batches) + + +class NHNetTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super(NHNetTest, self).setUp() + self._nhnet_config = configs.NHNetConfig() + self._nhnet_config.override(utils.get_test_params().as_dict()) + self._bert2bert_config = configs.BERT2BERTConfig() + self._bert2bert_config.override(utils.get_test_params().as_dict()) + + def _count_params(self, layer, trainable_only=True): + """Returns the count of all model parameters, or just trainable ones.""" + if not trainable_only: + return layer.count_params() + else: + return int( + np.sum([ + tf.keras.backend.count_params(p) for p in layer.trainable_weights + ])) + + def test_create_nhnet_layers(self): + single_doc_bert, single_doc_decoder = models.get_bert2bert_layers( + self._bert2bert_config) + multi_doc_bert, multi_doc_decoder = models.get_nhnet_layers( + self._nhnet_config) + # Expects multi-doc encoder/decoder have the same number of parameters as + # single-doc encoder/decoder. + self.assertEqual( + self._count_params(multi_doc_bert), self._count_params(single_doc_bert)) + self.assertEqual( + self._count_params(multi_doc_decoder), + self._count_params(single_doc_decoder)) + + def test_checkpoint_restore(self): + bert2bert_model = models.create_bert2bert_model(self._bert2bert_config) + ckpt = tf.train.Checkpoint(model=bert2bert_model) + init_checkpoint = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt")) + nhnet_model = models.create_nhnet_model( + params=self._nhnet_config, init_checkpoint=init_checkpoint) + source_weights = ( + bert2bert_model.bert_layer.trainable_weights + + bert2bert_model.decoder_layer.trainable_weights) + dest_weights = ( + nhnet_model.bert_layer.trainable_weights + + nhnet_model.decoder_layer.trainable_weights) + for source_weight, dest_weight in zip(source_weights, dest_weights): + self.assertAllClose(source_weight.numpy(), dest_weight.numpy()) + + @combinations.generate(all_strategy_combinations()) + def test_nhnet_train_forward(self, distribution): + seq_length = 10 + # Defines the model inside distribution strategy scope. + with distribution.scope(): + # Forward path. + batch_size = 2 + num_docs = 2 + batches = 4 + fake_ids = np.zeros((batch_size * batches, num_docs, seq_length), + dtype=np.int32) + fake_inputs = { + "input_ids": + fake_ids, + "input_mask": + fake_ids, + "segment_ids": + fake_ids, + "target_ids": + np.zeros((batch_size * batches, seq_length * 2), dtype=np.int32), + } + model = models.create_nhnet_model(params=self._nhnet_config) + results = distribution_forward_path(distribution, model, fake_inputs, + batch_size) + logging.info("Forward path results: %s", str(results)) + self.assertLen(results, batches) + + @combinations.generate(all_strategy_combinations()) + def test_nhnet_eval(self, distribution): + seq_length = 10 + padded_decode = isinstance(distribution, + tf.distribute.experimental.TPUStrategy) + self._nhnet_config.override( + { + "beam_size": 4, + "len_title": seq_length, + "alpha": 0.6, + "multi_channel_cross_attention": True, + "padded_decode": padded_decode, + }, + is_strict=False) + # Defines the model inside distribution strategy scope. + with distribution.scope(): + # Forward path. + batch_size = 2 + num_docs = 2 + batches = 4 + fake_ids = np.zeros((batch_size * batches, num_docs, seq_length), + dtype=np.int32) + fake_inputs = { + "input_ids": fake_ids, + "input_mask": fake_ids, + "segment_ids": fake_ids, + "target_ids": np.zeros((batch_size * batches, 5), dtype=np.int32), + } + model = models.create_nhnet_model(params=self._nhnet_config) + results = distribution_forward_path( + distribution, model, fake_inputs, batch_size, mode="predict") + self.assertLen(results, batches) + results = distribution_forward_path( + distribution, model, fake_inputs, batch_size, mode="eval") + self.assertLen(results, batches) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/nhnet/optimizer.py b/models/official/nlp/nhnet/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..15c7e248019399f1abc94f64f5acd509db104f38 --- /dev/null +++ b/models/official/nlp/nhnet/optimizer.py @@ -0,0 +1,82 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Optimizer and learning rate scheduler.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from official.modeling.hyperparams import params_dict + + +class LearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): + """Learning rate schedule.""" + + def __init__(self, initial_learning_rate, hidden_size, warmup_steps): + """Initialize configuration of the learning rate schedule. + + Args: + initial_learning_rate: A float, the initial learning rate. + hidden_size: An integer, the model dimension in the hidden layers. + warmup_steps: An integer, the number of steps required for linear warmup. + """ + super(LearningRateSchedule, self).__init__() + self.initial_learning_rate = initial_learning_rate + self.hidden_size = hidden_size + self.warmup_steps = tf.cast(warmup_steps, tf.float32) + + def __call__(self, global_step): + """Calculate learning rate with linear warmup and rsqrt decay. + + Args: + global_step: An integer, the current global step used for learning rate + calculation. + + Returns: + A float, the learning rate needs to be used for current global step. + """ + with tf.name_scope('learning_rate_schedule'): + global_step = tf.cast(global_step, tf.float32) + learning_rate = self.initial_learning_rate + learning_rate *= (self.hidden_size**-0.5) + # Apply linear warmup + learning_rate *= tf.minimum(1.0, global_step / self.warmup_steps) + # Apply rsqrt decay + learning_rate /= tf.sqrt(tf.maximum(global_step, self.warmup_steps)) + return learning_rate + + def get_config(self): + """Get the configuration of the learning rate schedule.""" + return { + 'initial_learning_rate': self.initial_learning_rate, + 'hidden_size': self.hidden_size, + 'warmup_steps': self.warmup_steps, + } + + +def create_optimizer(params: params_dict.ParamsDict): + """Creates optimizer.""" + lr_schedule = LearningRateSchedule( + params.learning_rate, + params.hidden_size, + params.learning_rate_warmup_steps) + return tf.keras.optimizers.Adam( + learning_rate=lr_schedule, + beta_1=params.adam_beta1, + beta_2=params.adam_beta2, + epsilon=params.adam_epsilon) diff --git a/models/official/nlp/nhnet/raw_data_process.py b/models/official/nlp/nhnet/raw_data_process.py new file mode 100644 index 0000000000000000000000000000000000000000..9597043237355f2c4b4399c490e105672e406b62 --- /dev/null +++ b/models/official/nlp/nhnet/raw_data_process.py @@ -0,0 +1,91 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Processes crawled content from news URLs by generating tfrecords.""" + +import os +from absl import app +from absl import flags +from official.nlp.nhnet import raw_data_processor + +FLAGS = flags.FLAGS + +flags.DEFINE_string("crawled_articles", "/tmp/nhnet/", + "Folder path to the crawled articles using news-please.") +flags.DEFINE_string("vocab", None, "Filepath of the BERT vocabulary.") +flags.DEFINE_bool("do_lower_case", True, + "Whether the vocabulary is uncased or not.") +flags.DEFINE_integer("len_title", 15, + "Maximum number of tokens in story headline.") +flags.DEFINE_integer("len_passage", 200, + "Maximum number of tokens in article passage.") +flags.DEFINE_integer("max_num_articles", 5, + "Maximum number of articles in a story.") +flags.DEFINE_bool("include_article_title_in_passage", False, + "Whether to include article title in article passage.") +flags.DEFINE_string("data_folder", None, + "Folder path to the downloaded data folder (output).") +flags.DEFINE_integer("num_tfrecords_shards", 20, + "Number of shards for train/valid/test.") + + +def transform_as_tfrecords(data_processor, filename): + """Transforms story from json to tfrecord (sharded). + + Args: + data_processor: Instance of RawDataProcessor. + filename: 'train', 'valid', or 'test'. + """ + print("Transforming json to tfrecord for %s..." % filename) + story_filepath = os.path.join(FLAGS.data_folder, filename + ".json") + output_folder = os.path.join(FLAGS.data_folder, "processed") + os.makedirs(output_folder, exist_ok=True) + output_filepaths = [] + for i in range(FLAGS.num_tfrecords_shards): + output_filepaths.append( + os.path.join( + output_folder, "%s.tfrecord-%.5d-of-%.5d" % + (filename, i, FLAGS.num_tfrecords_shards))) + (total_num_examples, + generated_num_examples) = data_processor.generate_examples( + story_filepath, output_filepaths) + print("For %s, %d examples have been generated from %d stories in json." % + (filename, generated_num_examples, total_num_examples)) + + +def main(_): + if not FLAGS.data_folder: + raise ValueError("data_folder must be set as the downloaded folder path.") + if not FLAGS.vocab: + raise ValueError("vocab must be set as the filepath of BERT vocabulary.") + data_processor = raw_data_processor.RawDataProcessor( + vocab=FLAGS.vocab, + do_lower_case=FLAGS.do_lower_case, + len_title=FLAGS.len_title, + len_passage=FLAGS.len_passage, + max_num_articles=FLAGS.max_num_articles, + include_article_title_in_passage=FLAGS.include_article_title_in_passage, + include_text_snippet_in_example=True) + print("Loading crawled articles...") + num_articles = data_processor.read_crawled_articles(FLAGS.crawled_articles) + print("Total number of articles loaded: %d" % num_articles) + print() + transform_as_tfrecords(data_processor, "train") + transform_as_tfrecords(data_processor, "valid") + transform_as_tfrecords(data_processor, "test") + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/nhnet/raw_data_processor.py b/models/official/nlp/nhnet/raw_data_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..0a30532f4f401e6f2b29430d353767c6cdea0966 --- /dev/null +++ b/models/official/nlp/nhnet/raw_data_processor.py @@ -0,0 +1,228 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library for processing crawled content and generating tfrecords.""" + +import collections +import json +import multiprocessing +import os +import urllib.parse +import tensorflow as tf + +from official.nlp.bert import tokenization +from official.nlp.data import classifier_data_lib + + +class RawDataProcessor(object): + """Data converter for story examples.""" + + def __init__(self, + vocab: str, + do_lower_case: bool, + len_title: int = 15, + len_passage: int = 200, + max_num_articles: int = 5, + include_article_title_in_passage: bool = False, + include_text_snippet_in_example: bool = False): + """Constructs a RawDataProcessor. + + Args: + vocab: Filepath of the BERT vocabulary. + do_lower_case: Whether the vocabulary is uncased or not. + len_title: Maximum number of tokens in story headline. + len_passage: Maximum number of tokens in article passage. + max_num_articles: Maximum number of articles in a story. + include_article_title_in_passage: Whether to include article title in + article passage. + include_text_snippet_in_example: Whether to include text snippet + (headline and article content) in generated tensorflow Examples, for + debug usage. If include_article_title_in_passage=True, title and body + will be separated by [SEP]. + """ + self.articles = dict() + self.tokenizer = tokenization.FullTokenizer( + vocab, do_lower_case=do_lower_case, split_on_punc=False) + self.len_title = len_title + self.len_passage = len_passage + self.max_num_articles = max_num_articles + self.include_article_title_in_passage = include_article_title_in_passage + self.include_text_snippet_in_example = include_text_snippet_in_example + # ex_index=5 deactivates printing inside convert_single_example. + self.ex_index = 5 + # Parameters used in InputExample, not used in NHNet. + self.label = 0 + self.guid = 0 + self.num_generated_examples = 0 + + def read_crawled_articles(self, folder_path): + """Reads crawled articles under folder_path.""" + for path, _, files in os.walk(folder_path): + for name in files: + if not name.endswith(".json"): + continue + url, article = self._get_article_content_from_json( + os.path.join(path, name)) + if not article.text_a: + continue + self.articles[RawDataProcessor.normalize_url(url)] = article + if len(self.articles) % 5000 == 0: + print("Number of articles loaded: %d\r" % len(self.articles), end="") + print() + return len(self.articles) + + def generate_examples(self, input_file, output_files): + """Loads story from input json file and exports examples in output_files.""" + writers = [] + story_partition = [] + for output_file in output_files: + writers.append(tf.io.TFRecordWriter(output_file)) + story_partition.append(list()) + with tf.io.gfile.GFile(input_file, "r") as story_json_file: + stories = json.load(story_json_file) + writer_index = 0 + for story in stories: + articles = [] + for url in story["urls"]: + normalized_url = RawDataProcessor.normalize_url(url) + if normalized_url in self.articles: + articles.append(self.articles[normalized_url]) + if not articles: + continue + story_partition[writer_index].append((story["label"], articles)) + writer_index = (writer_index + 1) % len(writers) + lock = multiprocessing.Lock() + pool = multiprocessing.pool.ThreadPool(len(writers)) + data = [(story_partition[i], writers[i], lock) for i in range(len(writers))] + pool.map(self._write_story_partition, data) + return len(stories), self.num_generated_examples + + @classmethod + def normalize_url(cls, url): + """Normalize url for better matching.""" + url = urllib.parse.unquote( + urllib.parse.urlsplit(url)._replace(query=None).geturl()) + output, part = [], None + for part in url.split("//"): + if part == "http:" or part == "https:": + continue + else: + output.append(part) + return "//".join(output) + + def _get_article_content_from_json(self, file_path): + """Returns (url, InputExample) keeping content extracted from file_path.""" + with tf.io.gfile.GFile(file_path, "r") as article_json_file: + article = json.load(article_json_file) + if self.include_article_title_in_passage: + return article["url"], classifier_data_lib.InputExample( + guid=self.guid, + text_a=article["title"], + text_b=article["maintext"], + label=self.label) + else: + return article["url"], classifier_data_lib.InputExample( + guid=self.guid, text_a=article["maintext"], label=self.label) + + def _write_story_partition(self, data): + """Writes stories in a partition into file.""" + for (story_headline, articles) in data[0]: + story_example = tf.train.Example( + features=tf.train.Features( + feature=self._get_single_story_features(story_headline, + articles))) + data[1].write(story_example.SerializeToString()) + data[2].acquire() + try: + self.num_generated_examples += 1 + if self.num_generated_examples % 1000 == 0: + print( + "Number of stories written: %d\r" % self.num_generated_examples, + end="") + finally: + data[2].release() + + def _get_single_story_features(self, story_headline, articles): + """Converts a list of articles to a tensorflow Example.""" + def get_text_snippet(article): + if article.text_b: + return " [SEP] ".join([article.text_a, article.text_b]) + else: + return article.text_a + + story_features = collections.OrderedDict() + story_headline_feature = classifier_data_lib.convert_single_example( + ex_index=self.ex_index, + example=classifier_data_lib.InputExample( + guid=self.guid, text_a=story_headline, label=self.label), + label_list=[self.label], + max_seq_length=self.len_title, + tokenizer=self.tokenizer) + if self.include_text_snippet_in_example: + story_headline_feature.label_id = story_headline + self._add_feature_with_suffix( + feature=story_headline_feature, + suffix="a", + story_features=story_features) + for (article_index, article) in enumerate(articles): + if article_index == self.max_num_articles: + break + article_feature = classifier_data_lib.convert_single_example( + ex_index=self.ex_index, + example=article, + label_list=[self.label], + max_seq_length=self.len_passage, + tokenizer=self.tokenizer) + if self.include_text_snippet_in_example: + article_feature.label_id = get_text_snippet(article) + suffix = chr(ord("b") + article_index) + self._add_feature_with_suffix( + feature=article_feature, suffix=suffix, story_features=story_features) + + # Adds empty features as placeholder. + for article_index in range(len(articles), self.max_num_articles): + suffix = chr(ord("b") + article_index) + empty_article = classifier_data_lib.InputExample( + guid=self.guid, text_a="", label=self.label) + empty_feature = classifier_data_lib.convert_single_example( + ex_index=self.ex_index, + example=empty_article, + label_list=[self.label], + max_seq_length=self.len_passage, + tokenizer=self.tokenizer) + if self.include_text_snippet_in_example: + empty_feature.label_id = "" + self._add_feature_with_suffix( + feature=empty_feature, suffix=suffix, story_features=story_features) + return story_features + + def _add_feature_with_suffix(self, feature, suffix, story_features): + """Appends suffix to feature names and fills in the corresponding values.""" + + def _create_int_feature(values): + return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + + def _create_string_feature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + story_features["input_ids_%c" % suffix] = _create_int_feature( + feature.input_ids) + story_features["input_mask_%c" % suffix] = _create_int_feature( + feature.input_mask) + story_features["segment_ids_%c" % suffix] = _create_int_feature( + feature.segment_ids) + if self.include_text_snippet_in_example: + story_features["text_snippet_%c" % suffix] = _create_string_feature( + bytes(feature.label_id.encode())) diff --git a/models/official/nlp/nhnet/testdata/crawled_articles/domain_0.com/url_000.html b/models/official/nlp/nhnet/testdata/crawled_articles/domain_0.com/url_000.html new file mode 100644 index 0000000000000000000000000000000000000000..0a8549c1d274dc2ba29862860391e65bca391242 --- /dev/null +++ b/models/official/nlp/nhnet/testdata/crawled_articles/domain_0.com/url_000.html @@ -0,0 +1,3 @@ + + +Page Title 0 diff --git a/models/official/nlp/nhnet/testdata/crawled_articles/domain_0.com/url_000.json b/models/official/nlp/nhnet/testdata/crawled_articles/domain_0.com/url_000.json new file mode 100644 index 0000000000000000000000000000000000000000..b7308592b77a0d3b6b3534a3ecbf00b717b62d26 --- /dev/null +++ b/models/official/nlp/nhnet/testdata/crawled_articles/domain_0.com/url_000.json @@ -0,0 +1,5 @@ +{ + "title": "title for 0", + "maintext": "text snippet for 0", + "url": "http://url_000.html" +} diff --git a/models/official/nlp/nhnet/testdata/crawled_articles/domain_1.com/url_001.html b/models/official/nlp/nhnet/testdata/crawled_articles/domain_1.com/url_001.html new file mode 100644 index 0000000000000000000000000000000000000000..7c8bb8d285c3e9da41ea8ca546d6d1503e3a7e51 --- /dev/null +++ b/models/official/nlp/nhnet/testdata/crawled_articles/domain_1.com/url_001.html @@ -0,0 +1,3 @@ + + +Page Title 1 diff --git a/models/official/nlp/nhnet/testdata/crawled_articles/domain_1.com/url_001.json b/models/official/nlp/nhnet/testdata/crawled_articles/domain_1.com/url_001.json new file mode 100644 index 0000000000000000000000000000000000000000..dbc2322c7debc7ae695f763a75843bc1ea0a2f22 --- /dev/null +++ b/models/official/nlp/nhnet/testdata/crawled_articles/domain_1.com/url_001.json @@ -0,0 +1,5 @@ +{ + "title": "title for 1", + "maintext": "text snippet for 1", + "url": "url_001.html" +} diff --git a/models/official/nlp/nhnet/testdata/stories.json b/models/official/nlp/nhnet/testdata/stories.json new file mode 100644 index 0000000000000000000000000000000000000000..0618f3d5c8afdbd7e164d02dd663507da467e8b2 --- /dev/null +++ b/models/official/nlp/nhnet/testdata/stories.json @@ -0,0 +1,29 @@ +[ + { + "urls": [ + "http://url_000.html", + "http://url_001.html" + ], + "label": "headline 0" + }, + { + "urls": [ + "http://url_000.html", + "http://url_001.html" + ], + "label": "headline 1" + }, + { + "urls": [ + "http://url_002.html", + "http://url_001.html" + ], + "label": "headline 2" + }, + { + "urls": [ + "http://url_003.html" + ], + "label": "headline 3" + } +] diff --git a/models/official/nlp/nhnet/testdata/vocab.txt b/models/official/nlp/nhnet/testdata/vocab.txt new file mode 100644 index 0000000000000000000000000000000000000000..dd708d71c2fec475901fe9b2a23c7e6c2b539d95 --- /dev/null +++ b/models/official/nlp/nhnet/testdata/vocab.txt @@ -0,0 +1,23 @@ +[UNK] +[CLS] +[SEP] +[MASK] +0 +1 +this +is +a +title +snippet +for +url +main +text +http +www +html +: +// +. +_ +headline diff --git a/models/official/nlp/nhnet/trainer.py b/models/official/nlp/nhnet/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..3fa26a53a0d002247eb656691e8f49fa42bbe80f --- /dev/null +++ b/models/official/nlp/nhnet/trainer.py @@ -0,0 +1,232 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run NHNet model training and eval.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags +from absl import logging +from six.moves import zip +import tensorflow as tf +from official.modeling.hyperparams import params_dict +from official.nlp.nhnet import evaluation +from official.nlp.nhnet import input_pipeline +from official.nlp.nhnet import models +from official.nlp.nhnet import optimizer +from official.nlp.transformer import metrics as transformer_metrics +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + +FLAGS = flags.FLAGS + + +def define_flags(): + """Defines command line flags used by NHNet trainer.""" + ## Required parameters + flags.DEFINE_enum("mode", "train", ["train", "eval", "train_and_eval"], + "Execution mode.") + flags.DEFINE_string("train_file_pattern", "", "Train file pattern.") + flags.DEFINE_string("eval_file_pattern", "", "Eval file pattern.") + flags.DEFINE_string( + "model_dir", None, + "The output directory where the model checkpoints will be written.") + + # Model training specific flags. + flags.DEFINE_enum( + "distribution_strategy", "mirrored", ["tpu", "mirrored"], + "Distribution Strategy type to use for training. `tpu` uses TPUStrategy " + "for running on TPUs, `mirrored` uses GPUs with single host.") + flags.DEFINE_string("tpu", "", "TPU address to connect to.") + flags.DEFINE_string( + "init_checkpoint", None, + "Initial checkpoint (usually from a pre-trained BERT model).") + flags.DEFINE_integer("train_steps", 100000, "Max train steps") + flags.DEFINE_integer("eval_steps", 32, "Number of eval steps per run.") + flags.DEFINE_integer("eval_timeout", 3000, "Timeout waiting for checkpoints.") + flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") + flags.DEFINE_integer("eval_batch_size", 4, "Total batch size for evaluation.") + flags.DEFINE_integer( + "steps_per_loop", 1000, + "Number of steps per graph-mode loop. Only training step " + "happens inside the loop.") + flags.DEFINE_integer("checkpoint_interval", 2000, "Checkpointing interval.") + flags.DEFINE_integer("len_title", 15, "Title length.") + flags.DEFINE_integer("len_passage", 200, "Passage length.") + flags.DEFINE_integer("num_encoder_layers", 12, + "Number of hidden layers of encoder.") + flags.DEFINE_integer("num_decoder_layers", 12, + "Number of hidden layers of decoder.") + flags.DEFINE_string("model_type", "nhnet", + "Model type to choose a model configuration.") + flags.DEFINE_integer( + "num_nhnet_articles", 5, + "Maximum number of articles in NHNet, only used when model_type=nhnet") + flags.DEFINE_string( + "params_override", + default=None, + help=("a YAML/JSON string or a YAML file which specifies additional " + "overrides over the default parameters")) + + +# pylint: disable=protected-access + + +class Trainer(tf.keras.Model): + """A training only model.""" + + def __init__(self, model, params): + super(Trainer, self).__init__() + self.model = model + self.params = params + self._num_replicas_in_sync = tf.distribute.get_strategy( + ).num_replicas_in_sync + + def call(self, inputs, mode="train"): + return self.model(inputs, mode) + + def train_step(self, inputs): + """The logic for one training step.""" + with tf.GradientTape() as tape: + logits, _, _ = self(inputs, mode="train", training=True) + targets = models.remove_sos_from_seq(inputs["target_ids"], + self.params.pad_token_id) + loss = transformer_metrics.transformer_loss(logits, targets, + self.params.label_smoothing, + self.params.vocab_size) + # Scales the loss, which results in using the average loss across all + # of the replicas for backprop. + scaled_loss = loss / self._num_replicas_in_sync + + tvars = self.trainable_variables + grads = tape.gradient(scaled_loss, tvars) + self.optimizer.apply_gradients(list(zip(grads, tvars))) + return { + "training_loss": loss, + "learning_rate": self.optimizer._decayed_lr(var_dtype=tf.float32) + } + + +def train(params, strategy, dataset=None): + """Runs training.""" + + if not dataset: + dataset = input_pipeline.get_input_dataset( + FLAGS.train_file_pattern, + FLAGS.train_batch_size, + params, + is_training=True, + strategy=strategy) + + with strategy.scope(): + model = models.create_model( + FLAGS.model_type, params, init_checkpoint=FLAGS.init_checkpoint) + opt = optimizer.create_optimizer(params) + trainer = Trainer(model, params) + model.global_step = opt.iterations + + trainer.compile( + optimizer=opt, + experimental_steps_per_execution=FLAGS.steps_per_loop) + summary_dir = os.path.join(FLAGS.model_dir, "summaries") + summary_callback = tf.keras.callbacks.TensorBoard( + summary_dir, update_freq=max(100, FLAGS.steps_per_loop)) + checkpoint = tf.train.Checkpoint(model=model, optimizer=opt) + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + directory=FLAGS.model_dir, + max_to_keep=10, + step_counter=model.global_step, + checkpoint_interval=FLAGS.checkpoint_interval) + if checkpoint_manager.restore_or_initialize(): + logging.info("Training restored from the checkpoints in: %s", + FLAGS.model_dir) + checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager) + + # Trains the model. + steps_per_epoch = min(FLAGS.train_steps, FLAGS.checkpoint_interval) + epochs = FLAGS.train_steps // steps_per_epoch + history = trainer.fit( + x=dataset, + steps_per_epoch=steps_per_epoch, + epochs=epochs, + callbacks=[summary_callback, checkpoint_callback], + verbose=2) + train_hist = history.history + # Gets final loss from training. + stats = dict(training_loss=float(train_hist["training_loss"][-1])) + return stats + + +def run(): + """Runs NHNet using Keras APIs.""" + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, tpu_address=FLAGS.tpu) + if strategy: + logging.info("***** Number of cores used : %d", + strategy.num_replicas_in_sync) + + params = models.get_model_params(FLAGS.model_type) + params = params_dict.override_params_dict( + params, FLAGS.params_override, is_strict=True) + params.override( + { + "len_title": + FLAGS.len_title, + "len_passage": + FLAGS.len_passage, + "num_hidden_layers": + FLAGS.num_encoder_layers, + "num_decoder_layers": + FLAGS.num_decoder_layers, + "passage_list": + [chr(ord("b") + i) for i in range(FLAGS.num_nhnet_articles)], + }, + is_strict=False) + stats = {} + if "train" in FLAGS.mode: + stats = train(params, strategy) + if "eval" in FLAGS.mode: + timeout = 0 if FLAGS.mode == "train_and_eval" else FLAGS.eval_timeout + # Uses padded decoding for TPU. Always uses cache. + padded_decode = isinstance(strategy, tf.distribute.experimental.TPUStrategy) + params.override({ + "padded_decode": padded_decode, + }, is_strict=False) + stats = evaluation.continuous_eval( + strategy, + params, + model_type=FLAGS.model_type, + eval_file_pattern=FLAGS.eval_file_pattern, + batch_size=FLAGS.eval_batch_size, + eval_steps=FLAGS.eval_steps, + model_dir=FLAGS.model_dir, + timeout=timeout) + return stats + + +def main(_): + stats = run() + if stats: + logging.info("Stats:\n%s", stats) + +if __name__ == "__main__": + define_flags() + app.run(main) diff --git a/models/official/nlp/nhnet/trainer_test.py b/models/official/nlp/nhnet/trainer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..39673dd2c7afe0f7310e556395824b9ba4582262 --- /dev/null +++ b/models/official/nlp/nhnet/trainer_test.py @@ -0,0 +1,104 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.nlp.nhnet.trainer.""" + +import os + +from absl import flags +from absl.testing import parameterized +import tensorflow as tf + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +# pylint: enable=g-direct-tensorflow-import +from official.nlp.nhnet import trainer +from official.nlp.nhnet import utils + +FLAGS = flags.FLAGS +trainer.define_flags() + + +def all_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.one_device_strategy, + strategy_combinations.one_device_strategy_gpu, + strategy_combinations.mirrored_strategy_with_gpu_and_cpu, + strategy_combinations.tpu_strategy, + ], + mode="eager", + ) + + +def get_trivial_data(config) -> tf.data.Dataset: + """Gets trivial data in the ImageNet size.""" + batch_size, num_docs = 2, len(config.passage_list), + len_passage = config.len_passage + len_title = config.len_title + + def generate_data(_) -> tf.data.Dataset: + fake_ids = tf.zeros((num_docs, len_passage), dtype=tf.int32) + title = tf.zeros((len_title), dtype=tf.int32) + return dict( + input_ids=fake_ids, + input_mask=fake_ids, + segment_ids=fake_ids, + target_ids=title) + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map( + generate_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.prefetch(buffer_size=1).batch(batch_size) + return dataset + + +class TrainerTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super(TrainerTest, self).setUp() + self._config = utils.get_test_params() + self._config.override( + { + "vocab_size": 49911, + "max_position_embeddings": 200, + "len_title": 15, + "len_passage": 20, + "beam_size": 5, + "alpha": 0.6, + "learning_rate": 0.0, + "learning_rate_warmup_steps": 0, + "multi_channel_cross_attention": True, + "passage_list": ["a", "b"], + }, + is_strict=False) + + @combinations.generate(all_strategy_combinations()) + def test_train(self, distribution): + FLAGS.train_steps = 10 + FLAGS.checkpoint_interval = 5 + FLAGS.model_dir = self.get_temp_dir() + FLAGS.model_type = "nhnet" + stats = trainer.train(self._config, distribution, + get_trivial_data(self._config)) + self.assertIn("training_loss", stats) + self.assertLen( + tf.io.gfile.glob(os.path.join(FLAGS.model_dir, "ckpt*.index")), 2) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/nhnet/utils.py b/models/official/nlp/nhnet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f588798b7feee95a33b3b003f77570fe48340fe7 --- /dev/null +++ b/models/official/nlp/nhnet/utils.py @@ -0,0 +1,90 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility helpers for Bert2Bert.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging +import tensorflow as tf +from typing import Optional, Text +from official.modeling.hyperparams import params_dict +from official.nlp.bert import configs +from official.nlp.nhnet import configs as nhnet_configs + + +def get_bert_config_from_params( + params: params_dict.ParamsDict) -> configs.BertConfig: + """Converts a BertConfig to ParamsDict.""" + return configs.BertConfig.from_dict(params.as_dict()) + + +def get_test_params(cls=nhnet_configs.BERT2BERTConfig): + return cls.from_args(**nhnet_configs.UNITTEST_CONFIG) + + +# pylint: disable=protected-access +def encoder_common_layers(transformer_block): + return [ + transformer_block._attention_layer, + transformer_block._attention_layer_norm, + transformer_block._intermediate_dense, transformer_block._output_dense, + transformer_block._output_layer_norm + ] +# pylint: enable=protected-access + + +def initialize_bert2bert_from_pretrained_bert( + bert_encoder: tf.keras.layers.Layer, + bert_decoder: tf.keras.layers.Layer, + init_checkpoint: Optional[Text] = None) -> None: + """Helper function to initialze Bert2Bert from Bert pretrained checkpoint.""" + ckpt = tf.train.Checkpoint(model=bert_encoder) + logging.info( + "Checkpoint file %s found and restoring from " + "initial checkpoint for core model.", init_checkpoint) + status = ckpt.restore(init_checkpoint) + + # Expects the bert model is a subset of checkpoint as pooling layer is + # not used. + status.assert_existing_objects_matched() + logging.info("Loading from checkpoint file completed.") + + # Saves a checkpoint with transformer layers. + encoder_layers = [] + for transformer_block in bert_encoder.transformer_layers: + encoder_layers.extend(encoder_common_layers(transformer_block)) + + # Restores from the checkpoint with encoder layers. + decoder_layers_to_initialize = [] + for decoder_block in bert_decoder.decoder.layers: + decoder_layers_to_initialize.extend( + decoder_block.common_layers_with_encoder()) + + if len(decoder_layers_to_initialize) != len(encoder_layers): + raise ValueError( + "Source encoder layers with %d objects does not match destination " + "decoder layers with %d objects." % + (len(decoder_layers_to_initialize), len(encoder_layers))) + + for dest_layer, source_layer in zip(decoder_layers_to_initialize, + encoder_layers): + try: + dest_layer.set_weights(source_layer.get_weights()) + except ValueError as e: + logging.error( + "dest_layer: %s failed to set weights from " + "source_layer: %s as %s", dest_layer.name, source_layer.name, str(e)) diff --git a/models/official/nlp/optimization.py b/models/official/nlp/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..51289a535b239d5831dd76bae57d6306f604e746 --- /dev/null +++ b/models/official/nlp/optimization.py @@ -0,0 +1,228 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions and classes related to optimization (weight updates).""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re + +from absl import logging +import gin +import tensorflow as tf +import tensorflow_addons.optimizers as tfa_optimizers + + +class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): + """Applies a warmup schedule on a given learning rate decay schedule.""" + + def __init__(self, + initial_learning_rate, + decay_schedule_fn, + warmup_steps, + power=1.0, + name=None): + super(WarmUp, self).__init__() + self.initial_learning_rate = initial_learning_rate + self.warmup_steps = warmup_steps + self.power = power + self.decay_schedule_fn = decay_schedule_fn + self.name = name + + def __call__(self, step): + with tf.name_scope(self.name or 'WarmUp') as name: + # Implements polynomial warmup. i.e., if global_step < warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + global_step_float = tf.cast(step, tf.float32) + warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) + warmup_percent_done = global_step_float / warmup_steps_float + warmup_learning_rate = ( + self.initial_learning_rate * + tf.math.pow(warmup_percent_done, self.power)) + return tf.cond( + global_step_float < warmup_steps_float, + lambda: warmup_learning_rate, + lambda: self.decay_schedule_fn(step), + name=name) + + def get_config(self): + return { + 'initial_learning_rate': self.initial_learning_rate, + 'decay_schedule_fn': self.decay_schedule_fn, + 'warmup_steps': self.warmup_steps, + 'power': self.power, + 'name': self.name + } + + +@gin.configurable +def create_optimizer(init_lr, + num_train_steps, + num_warmup_steps, + end_lr=0.0, + optimizer_type='adamw'): + """Creates an optimizer with learning rate schedule.""" + # Implements linear decay of the learning rate. + lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay( + initial_learning_rate=init_lr, + decay_steps=num_train_steps, + end_learning_rate=end_lr) + if num_warmup_steps: + lr_schedule = WarmUp( + initial_learning_rate=init_lr, + decay_schedule_fn=lr_schedule, + warmup_steps=num_warmup_steps) + + if optimizer_type == 'adamw': + logging.info('using Adamw optimizer') + optimizer = AdamWeightDecay( + learning_rate=lr_schedule, + weight_decay_rate=0.01, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias']) + elif optimizer_type == 'lamb': + logging.info('using Lamb optimizer') + optimizer = tfa_optimizers.LAMB( + learning_rate=lr_schedule, + weight_decay_rate=0.01, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias']) + else: + raise ValueError('Unsupported optimizer type: ', optimizer_type) + + return optimizer + + +class AdamWeightDecay(tf.keras.optimizers.Adam): + """Adam enables L2 weight decay and clip_by_global_norm on gradients. + + Just adding the square of the weights to the loss function is *not* the + correct way of using L2 regularization/weight decay with Adam, since that will + interact with the m and v parameters in strange ways. + + Instead we want ot decay the weights in a manner that doesn't interact with + the m/v parameters. This is equivalent to adding the square of the weights to + the loss with plain (non-momentum) SGD. + """ + + def __init__(self, + learning_rate=0.001, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-7, + amsgrad=False, + weight_decay_rate=0.0, + include_in_weight_decay=None, + exclude_from_weight_decay=None, + name='AdamWeightDecay', + **kwargs): + super(AdamWeightDecay, self).__init__(learning_rate, beta_1, beta_2, + epsilon, amsgrad, name, **kwargs) + self.weight_decay_rate = weight_decay_rate + self._include_in_weight_decay = include_in_weight_decay + self._exclude_from_weight_decay = exclude_from_weight_decay + + @classmethod + def from_config(cls, config): + """Creates an optimizer from its config with WarmUp custom object.""" + custom_objects = {'WarmUp': WarmUp} + return super(AdamWeightDecay, cls).from_config( + config, custom_objects=custom_objects) + + def _prepare_local(self, var_device, var_dtype, apply_state): + super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, + apply_state) + apply_state[(var_device, var_dtype)]['weight_decay_rate'] = tf.constant( + self.weight_decay_rate, name='adam_weight_decay_rate') + + def _decay_weights_op(self, var, learning_rate, apply_state): + do_decay = self._do_use_weight_decay(var.name) + if do_decay: + return var.assign_sub( + learning_rate * var * + apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'], + use_locking=self._use_locking) + return tf.no_op() + + def apply_gradients(self, + grads_and_vars, + name=None, + experimental_aggregate_gradients=True): + grads, tvars = list(zip(*grads_and_vars)) + if experimental_aggregate_gradients: + # when experimental_aggregate_gradients = False, apply_gradients() no + # longer implicitly allreduce gradients, users manually allreduce gradient + # and passed the allreduced grads_and_vars. For now, the + # clip_by_global_norm will be moved to before the explicit allreduce to + # keep the math the same as TF 1 and pre TF 2.2 implementation. + (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) + return super(AdamWeightDecay, self).apply_gradients( + zip(grads, tvars), + name=name, + experimental_aggregate_gradients=experimental_aggregate_gradients) + + def _get_lr(self, var_device, var_dtype, apply_state): + """Retrieves the learning rate with the given state.""" + if apply_state is None: + return self._decayed_lr_t[var_dtype], {} + + apply_state = apply_state or {} + coefficients = apply_state.get((var_device, var_dtype)) + if coefficients is None: + coefficients = self._fallback_apply_state(var_device, var_dtype) + apply_state[(var_device, var_dtype)] = coefficients + + return coefficients['lr_t'], dict(apply_state=apply_state) + + def _resource_apply_dense(self, grad, var, apply_state=None): + lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) + decay = self._decay_weights_op(var, lr_t, apply_state) + with tf.control_dependencies([decay]): + return super(AdamWeightDecay, + self)._resource_apply_dense(grad, var, **kwargs) + + def _resource_apply_sparse(self, grad, var, indices, apply_state=None): + lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) + decay = self._decay_weights_op(var, lr_t, apply_state) + with tf.control_dependencies([decay]): + return super(AdamWeightDecay, + self)._resource_apply_sparse(grad, var, indices, **kwargs) + + def get_config(self): + config = super(AdamWeightDecay, self).get_config() + config.update({ + 'weight_decay_rate': self.weight_decay_rate, + }) + return config + + def _do_use_weight_decay(self, param_name): + """Whether to use L2 weight decay for `param_name`.""" + if self.weight_decay_rate == 0: + return False + + if self._include_in_weight_decay: + for r in self._include_in_weight_decay: + if re.search(r, param_name) is not None: + return True + + if self._exclude_from_weight_decay: + for r in self._exclude_from_weight_decay: + if re.search(r, param_name) is not None: + return False + return True diff --git a/models/official/nlp/tasks/__init__.py b/models/official/nlp/tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/tasks/masked_lm.py b/models/official/nlp/tasks/masked_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..4d392ad1117f54f5539c76988495f4e5999eb4ba --- /dev/null +++ b/models/official/nlp/tasks/masked_lm.py @@ -0,0 +1,171 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Masked language task.""" +import dataclasses +import tensorflow as tf + +from official.core import base_task +from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.configs import bert +from official.nlp.data import pretrain_dataloader +from official.nlp.modeling import losses as loss_lib + + +@dataclasses.dataclass +class MaskedLMConfig(cfg.TaskConfig): + """The model config.""" + network: bert.BertPretrainerConfig = bert.BertPretrainerConfig(cls_heads=[ + bert.ClsHeadConfig( + inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence') + ]) + train_data: cfg.DataConfig = cfg.DataConfig() + validation_data: cfg.DataConfig = cfg.DataConfig() + + +@base_task.register_task_cls(MaskedLMConfig) +class MaskedLMTask(base_task.Task): + """Mock task object for testing.""" + + def build_model(self): + return bert.instantiate_bertpretrainer_from_cfg(self.task_config.network) + + def build_losses(self, + labels, + model_outputs, + metrics, + aux_losses=None) -> tf.Tensor: + metrics = dict([(metric.name, metric) for metric in metrics]) + lm_output = tf.nn.log_softmax( + tf.cast(model_outputs['lm_output'], tf.float32), axis=-1) + mlm_loss = loss_lib.weighted_sparse_categorical_crossentropy_loss( + labels=labels['masked_lm_ids'], + predictions=lm_output, + weights=labels['masked_lm_weights']) + metrics['lm_example_loss'].update_state(mlm_loss) + if 'next_sentence_labels' in labels: + sentence_labels = labels['next_sentence_labels'] + sentence_outputs = tf.cast( + model_outputs['next_sentence'], dtype=tf.float32) + sentence_loss = loss_lib.weighted_sparse_categorical_crossentropy_loss( + labels=sentence_labels, + predictions=tf.nn.log_softmax(sentence_outputs, axis=-1)) + metrics['next_sentence_loss'].update_state(sentence_loss) + total_loss = mlm_loss + sentence_loss + else: + total_loss = mlm_loss + + if aux_losses: + total_loss += tf.add_n(aux_losses) + return total_loss + + def build_inputs(self, params, input_context=None): + """Returns tf.data.Dataset for pretraining.""" + if params.input_path == 'dummy': + def dummy_data(_): + dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32) + dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32) + return dict( + input_word_ids=dummy_ids, + input_mask=dummy_ids, + input_type_ids=dummy_ids, + masked_lm_positions=dummy_lm, + masked_lm_ids=dummy_lm, + masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32), + next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32)) + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map( + dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + return pretrain_dataloader.BertPretrainDataLoader(params).load( + input_context) + + def build_metrics(self, training=None): + del training + metrics = [ + tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'), + tf.keras.metrics.Mean(name='lm_example_loss') + ] + # TODO(hongkuny): rethink how to manage metrics creation with heads. + if self.task_config.train_data.use_next_sentence_label: + metrics.append( + tf.keras.metrics.SparseCategoricalAccuracy( + name='next_sentence_accuracy')) + metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss')) + return metrics + + def process_metrics(self, metrics, labels, model_outputs): + metrics = dict([(metric.name, metric) for metric in metrics]) + if 'masked_lm_accuracy' in metrics: + metrics['masked_lm_accuracy'].update_state(labels['masked_lm_ids'], + model_outputs['lm_output'], + labels['masked_lm_weights']) + if 'next_sentence_accuracy' in metrics: + metrics['next_sentence_accuracy'].update_state( + labels['next_sentence_labels'], model_outputs['next_sentence']) + + def train_step(self, inputs, model: tf.keras.Model, + optimizer: tf.keras.optimizers.Optimizer, metrics): + """Does forward and backward. + + Args: + inputs: a dictionary of input tensors. + model: the model, forward pass definition. + optimizer: the optimizer for this training step. + metrics: a nested structure of metrics objects. + + Returns: + A dictionary of logs. + """ + with tf.GradientTape() as tape: + outputs = model(inputs, training=True) + # Computes per-replica loss. + loss = self.build_losses( + labels=inputs, + model_outputs=outputs, + metrics=metrics, + aux_losses=model.losses) + # Scales loss as the default gradients allreduce performs sum inside the + # optimizer. + # TODO(b/154564893): enable loss scaling. + # scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync + tvars = model.trainable_variables + grads = tape.gradient(loss, tvars) + optimizer.apply_gradients(list(zip(grads, tvars))) + self.process_metrics(metrics, inputs, outputs) + return {self.loss: loss} + + def validation_step(self, inputs, model: tf.keras.Model, metrics): + """Validatation step. + + Args: + inputs: a dictionary of input tensors. + model: the keras.Model. + metrics: a nested structure of metrics objects. + + Returns: + A dictionary of logs. + """ + outputs = self.inference_step(inputs, model) + loss = self.build_losses( + labels=inputs, + model_outputs=outputs, + metrics=metrics, + aux_losses=model.losses) + self.process_metrics(metrics, inputs, outputs) + return {self.loss: loss} diff --git a/models/official/nlp/tasks/masked_lm_test.py b/models/official/nlp/tasks/masked_lm_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0124165ed097d80d31d83ad82c5fac256dfddc5d --- /dev/null +++ b/models/official/nlp/tasks/masked_lm_test.py @@ -0,0 +1,53 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.nlp.tasks.masked_lm.""" + +import tensorflow as tf + +from official.nlp.configs import bert +from official.nlp.configs import encoders +from official.nlp.tasks import masked_lm + + +class MLMTaskTest(tf.test.TestCase): + + def test_task(self): + config = masked_lm.MaskedLMConfig( + network=bert.BertPretrainerConfig( + encoders.TransformerEncoderConfig(vocab_size=30522, num_layers=1), + num_masked_tokens=20, + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, num_classes=2, name="next_sentence") + ]), + train_data=bert.BertPretrainDataConfig( + input_path="dummy", + max_predictions_per_seq=20, + seq_length=128, + global_batch_size=1)) + task = masked_lm.MaskedLMTask(config) + model = task.build_model() + metrics = task.build_metrics() + dataset = task.build_inputs(config.train_data) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/tasks/question_answering.py b/models/official/nlp/tasks/question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..7b3cb7f4e6d78c0f5b9d758ac76768100ad703f9 --- /dev/null +++ b/models/official/nlp/tasks/question_answering.py @@ -0,0 +1,156 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Question answering task.""" +import logging +import dataclasses +import tensorflow as tf +import tensorflow_hub as hub + +from official.core import base_task +from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.bert import input_pipeline +from official.nlp.configs import encoders +from official.nlp.modeling import models +from official.nlp.tasks import utils + + +@dataclasses.dataclass +class QuestionAnsweringConfig(cfg.TaskConfig): + """The model config.""" + # At most one of `init_checkpoint` and `hub_module_url` can be specified. + init_checkpoint: str = '' + hub_module_url: str = '' + network: encoders.TransformerEncoderConfig = ( + encoders.TransformerEncoderConfig()) + train_data: cfg.DataConfig = cfg.DataConfig() + validation_data: cfg.DataConfig = cfg.DataConfig() + + +@base_task.register_task_cls(QuestionAnsweringConfig) +class QuestionAnsweringTask(base_task.Task): + """Task object for question answering. + + TODO(lehou): Add post-processing. + """ + + def __init__(self, params=cfg.TaskConfig): + super(QuestionAnsweringTask, self).__init__(params) + if params.hub_module_url and params.init_checkpoint: + raise ValueError('At most one of `hub_module_url` and ' + '`init_checkpoint` can be specified.') + if params.hub_module_url: + self._hub_module = hub.load(params.hub_module_url) + else: + self._hub_module = None + + def build_model(self): + if self._hub_module: + encoder_network = utils.get_encoder_from_hub(self._hub_module) + else: + encoder_network = encoders.instantiate_encoder_from_cfg( + self.task_config.network) + + return models.BertSpanLabeler( + network=encoder_network, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self.task_config.network.initializer_range)) + + def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: + start_positions = labels['start_positions'] + end_positions = labels['end_positions'] + start_logits, end_logits = model_outputs + + start_loss = tf.keras.losses.sparse_categorical_crossentropy( + start_positions, + tf.cast(start_logits, dtype=tf.float32), + from_logits=True) + end_loss = tf.keras.losses.sparse_categorical_crossentropy( + end_positions, + tf.cast(end_logits, dtype=tf.float32), + from_logits=True) + + loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2 + return loss + + def build_inputs(self, params, input_context=None): + """Returns tf.data.Dataset for sentence_prediction task.""" + if params.input_path == 'dummy': + def dummy_data(_): + dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32) + x = dict( + input_word_ids=dummy_ids, + input_mask=dummy_ids, + input_type_ids=dummy_ids) + y = dict( + start_positions=tf.constant(0, dtype=tf.int32), + end_positions=tf.constant(1, dtype=tf.int32)) + return (x, y) + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map( + dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + batch_size = input_context.get_per_replica_batch_size( + params.global_batch_size) if input_context else params.global_batch_size + # TODO(chendouble): add and use nlp.data.question_answering_dataloader. + dataset = input_pipeline.create_squad_dataset( + params.input_path, + params.seq_length, + batch_size, + is_training=params.is_training, + input_pipeline_context=input_context) + return dataset + + def build_metrics(self, training=None): + del training + # TODO(lehou): a list of metrics doesn't work the same as in compile/fit. + metrics = [ + tf.keras.metrics.SparseCategoricalAccuracy( + name='start_position_accuracy'), + tf.keras.metrics.SparseCategoricalAccuracy( + name='end_position_accuracy'), + ] + return metrics + + def process_metrics(self, metrics, labels, model_outputs): + metrics = dict([(metric.name, metric) for metric in metrics]) + start_logits, end_logits = model_outputs + metrics['start_position_accuracy'].update_state( + labels['start_positions'], start_logits) + metrics['end_position_accuracy'].update_state( + labels['end_positions'], end_logits) + + def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): + start_logits, end_logits = model_outputs + compiled_metrics.update_state( + y_true=labels, # labels has keys 'start_positions' and 'end_positions'. + y_pred={'start_positions': start_logits, 'end_positions': end_logits}) + + def initialize(self, model): + """Load a pretrained checkpoint (if exists) and then train from iter 0.""" + ckpt_dir_or_file = self.task_config.init_checkpoint + if tf.io.gfile.isdir(ckpt_dir_or_file): + ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) + if not ckpt_dir_or_file: + return + + ckpt = tf.train.Checkpoint(**model.checkpoint_items) + status = ckpt.restore(ckpt_dir_or_file) + status.expect_partial().assert_existing_objects_matched() + logging.info('finished loading pretrained checkpoint from %s', + ckpt_dir_or_file) diff --git a/models/official/nlp/tasks/question_answering_test.py b/models/official/nlp/tasks/question_answering_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8e0f3f10bcc28e324b73528465cdcebda5633b56 --- /dev/null +++ b/models/official/nlp/tasks/question_answering_test.py @@ -0,0 +1,130 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.nlp.tasks.question_answering.""" +import functools +import os +import tensorflow as tf + +from official.nlp.bert import configs +from official.nlp.bert import export_tfhub +from official.nlp.configs import bert +from official.nlp.configs import encoders +from official.nlp.tasks import question_answering + + +class QuestionAnsweringTaskTest(tf.test.TestCase): + + def setUp(self): + super(QuestionAnsweringTaskTest, self).setUp() + self._encoder_config = encoders.TransformerEncoderConfig( + vocab_size=30522, num_layers=1) + self._train_data_config = bert.QADataConfig( + input_path="dummy", seq_length=128, global_batch_size=1) + + def _run_task(self, config): + task = question_answering.QuestionAnsweringTask(config) + model = task.build_model() + metrics = task.build_metrics() + + strategy = tf.distribute.get_strategy() + dataset = strategy.experimental_distribute_datasets_from_function( + functools.partial(task.build_inputs, config.train_data)) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + + def test_task(self): + # Saves a checkpoint. + pretrain_cfg = bert.BertPretrainerConfig( + encoder=self._encoder_config, + num_masked_tokens=20, + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, num_classes=3, name="next_sentence") + ]) + pretrain_model = bert.instantiate_bertpretrainer_from_cfg(pretrain_cfg) + ckpt = tf.train.Checkpoint( + model=pretrain_model, **pretrain_model.checkpoint_items) + saved_path = ckpt.save(self.get_temp_dir()) + + config = question_answering.QuestionAnsweringConfig( + init_checkpoint=saved_path, + network=self._encoder_config, + train_data=self._train_data_config) + task = question_answering.QuestionAnsweringTask(config) + model = task.build_model() + metrics = task.build_metrics() + dataset = task.build_inputs(config.train_data) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + task.initialize(model) + + def test_task_with_fit(self): + config = question_answering.QuestionAnsweringConfig( + network=self._encoder_config, + train_data=self._train_data_config) + task = question_answering.QuestionAnsweringTask(config) + model = task.build_model() + model = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(lr=0.1), + train_step=task.train_step, + metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")]) + dataset = task.build_inputs(config.train_data) + logs = model.fit(dataset, epochs=1, steps_per_epoch=2) + self.assertIn("loss", logs.history) + self.assertIn("start_positions_accuracy", logs.history) + self.assertIn("end_positions_accuracy", logs.history) + + def _export_bert_tfhub(self): + bert_config = configs.BertConfig( + vocab_size=30522, + hidden_size=16, + intermediate_size=32, + max_position_embeddings=128, + num_attention_heads=2, + num_hidden_layers=1) + _, encoder = export_tfhub.create_bert_model(bert_config) + model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.save(os.path.join(model_checkpoint_dir, "test")) + model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) + + vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt") + with tf.io.gfile.GFile(vocab_file, "w") as f: + f.write("dummy content") + + hub_destination = os.path.join(self.get_temp_dir(), "hub") + export_tfhub.export_bert_tfhub(bert_config, model_checkpoint_path, + hub_destination, vocab_file) + return hub_destination + + def test_task_with_hub(self): + hub_module_url = self._export_bert_tfhub() + config = question_answering.QuestionAnsweringConfig( + hub_module_url=hub_module_url, + network=self._encoder_config, + train_data=self._train_data_config) + self._run_task(config) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/tasks/sentence_prediction.py b/models/official/nlp/tasks/sentence_prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..b2eb0bf47de273408459e35cf45ff01ac69a9d2c --- /dev/null +++ b/models/official/nlp/tasks/sentence_prediction.py @@ -0,0 +1,190 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Sentence prediction (classification) task.""" +from absl import logging +import dataclasses +import numpy as np +from scipy import stats +from sklearn import metrics as sklearn_metrics +import tensorflow as tf +import tensorflow_hub as hub + +from official.core import base_task +from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.configs import bert +from official.nlp.data import sentence_prediction_dataloader +from official.nlp.modeling import losses as loss_lib +from official.nlp.tasks import utils + + +@dataclasses.dataclass +class SentencePredictionConfig(cfg.TaskConfig): + """The model config.""" + # At most one of `init_checkpoint` and `hub_module_url` can + # be specified. + init_checkpoint: str = '' + hub_module_url: str = '' + metric_type: str = 'accuracy' + network: bert.BertPretrainerConfig = bert.BertPretrainerConfig( + num_masked_tokens=0, # No masked language modeling head. + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=768, + num_classes=3, + dropout_rate=0.1, + name='sentence_prediction') + ]) + train_data: cfg.DataConfig = cfg.DataConfig() + validation_data: cfg.DataConfig = cfg.DataConfig() + + +@base_task.register_task_cls(SentencePredictionConfig) +class SentencePredictionTask(base_task.Task): + """Task object for sentence_prediction.""" + + def __init__(self, params=cfg.TaskConfig): + super(SentencePredictionTask, self).__init__(params) + if params.hub_module_url and params.init_checkpoint: + raise ValueError('At most one of `hub_module_url` and ' + '`pretrain_checkpoint_dir` can be specified.') + if params.hub_module_url: + self._hub_module = hub.load(params.hub_module_url) + else: + self._hub_module = None + self.metric_type = params.metric_type + + def build_model(self): + if self._hub_module: + encoder_from_hub = utils.get_encoder_from_hub(self._hub_module) + return bert.instantiate_bertpretrainer_from_cfg( + self.task_config.network, encoder_network=encoder_from_hub) + else: + return bert.instantiate_bertpretrainer_from_cfg(self.task_config.network) + + def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: + loss = loss_lib.weighted_sparse_categorical_crossentropy_loss( + labels=labels, + predictions=tf.nn.log_softmax( + tf.cast(model_outputs['sentence_prediction'], tf.float32), axis=-1)) + + if aux_losses: + loss += tf.add_n(aux_losses) + return loss + + def build_inputs(self, params, input_context=None): + """Returns tf.data.Dataset for sentence_prediction task.""" + if params.input_path == 'dummy': + + def dummy_data(_): + dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32) + x = dict( + input_word_ids=dummy_ids, + input_mask=dummy_ids, + input_type_ids=dummy_ids) + y = tf.ones((1, 1), dtype=tf.int32) + return (x, y) + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map( + dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + return sentence_prediction_dataloader.SentencePredictionDataLoader( + params).load(input_context) + + def build_metrics(self, training=None): + del training + metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy')] + return metrics + + def process_metrics(self, metrics, labels, model_outputs): + for metric in metrics: + metric.update_state(labels, model_outputs['sentence_prediction']) + + def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): + compiled_metrics.update_state(labels, model_outputs['sentence_prediction']) + + def validation_step(self, inputs, model: tf.keras.Model, metrics=None): + if self.metric_type == 'accuracy': + return super(SentencePredictionTask, + self).validation_step(inputs, model, metrics) + features, labels = inputs + outputs = self.inference_step(features, model) + loss = self.build_losses( + labels=labels, model_outputs=outputs, aux_losses=model.losses) + if self.metric_type == 'matthews_corrcoef': + return { + self.loss: + loss, + 'sentence_prediction': + tf.expand_dims( + tf.math.argmax(outputs['sentence_prediction'], axis=1), + axis=0), + 'labels': + labels, + } + if self.metric_type == 'pearson_spearman_corr': + return { + self.loss: loss, + 'sentence_prediction': outputs['sentence_prediction'], + 'labels': labels, + } + + def aggregate_logs(self, state=None, step_outputs=None): + if state is None: + state = {'sentence_prediction': [], 'labels': []} + state['sentence_prediction'].append( + np.concatenate([v.numpy() for v in step_outputs['sentence_prediction']], + axis=0)) + state['labels'].append( + np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0)) + return state + + def reduce_aggregated_logs(self, aggregated_logs): + if self.metric_type == 'matthews_corrcoef': + preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0) + labels = np.concatenate(aggregated_logs['labels'], axis=0) + return { + self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels) + } + if self.metric_type == 'pearson_spearman_corr': + preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0) + labels = np.concatenate(aggregated_logs['labels'], axis=0) + pearson_corr = stats.pearsonr(preds, labels)[0] + spearman_corr = stats.spearmanr(preds, labels)[0] + corr_metric = (pearson_corr + spearman_corr) / 2 + return {self.metric_type: corr_metric} + + def initialize(self, model): + """Load a pretrained checkpoint (if exists) and then train from iter 0.""" + ckpt_dir_or_file = self.task_config.init_checkpoint + if tf.io.gfile.isdir(ckpt_dir_or_file): + ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) + if not ckpt_dir_or_file: + return + + pretrain2finetune_mapping = { + 'encoder': + model.checkpoint_items['encoder'], + 'next_sentence.pooler_dense': + model.checkpoint_items['sentence_prediction.pooler_dense'], + } + ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping) + status = ckpt.restore(ckpt_dir_or_file) + status.expect_partial().assert_existing_objects_matched() + logging.info('finished loading pretrained checkpoint from %s', + ckpt_dir_or_file) diff --git a/models/official/nlp/tasks/sentence_prediction_test.py b/models/official/nlp/tasks/sentence_prediction_test.py new file mode 100644 index 0000000000000000000000000000000000000000..09419f54c4642f08ca37e2588103c45d0847b7bc --- /dev/null +++ b/models/official/nlp/tasks/sentence_prediction_test.py @@ -0,0 +1,163 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.nlp.tasks.sentence_prediction.""" +import functools +import os + +from absl.testing import parameterized +import tensorflow as tf + +from official.nlp.bert import configs +from official.nlp.bert import export_tfhub +from official.nlp.configs import bert +from official.nlp.configs import encoders +from official.nlp.tasks import sentence_prediction + + +class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super(SentencePredictionTaskTest, self).setUp() + self._train_data_config = bert.SentencePredictionDataConfig( + input_path="dummy", seq_length=128, global_batch_size=1) + + def get_network_config(self, num_classes): + return bert.BertPretrainerConfig( + encoder=encoders.TransformerEncoderConfig( + vocab_size=30522, num_layers=1), + num_masked_tokens=0, + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, + num_classes=num_classes, + name="sentence_prediction") + ]) + + def _run_task(self, config): + task = sentence_prediction.SentencePredictionTask(config) + model = task.build_model() + metrics = task.build_metrics() + + strategy = tf.distribute.get_strategy() + dataset = strategy.experimental_distribute_datasets_from_function( + functools.partial(task.build_inputs, config.train_data)) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + + def test_task(self): + config = sentence_prediction.SentencePredictionConfig( + init_checkpoint=self.get_temp_dir(), + network=self.get_network_config(2), + train_data=self._train_data_config) + task = sentence_prediction.SentencePredictionTask(config) + model = task.build_model() + metrics = task.build_metrics() + dataset = task.build_inputs(config.train_data) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + + # Saves a checkpoint. + pretrain_cfg = bert.BertPretrainerConfig( + encoder=encoders.TransformerEncoderConfig( + vocab_size=30522, num_layers=1), + num_masked_tokens=20, + cls_heads=[ + bert.ClsHeadConfig( + inner_dim=10, num_classes=3, name="next_sentence") + ]) + pretrain_model = bert.instantiate_bertpretrainer_from_cfg(pretrain_cfg) + ckpt = tf.train.Checkpoint( + model=pretrain_model, **pretrain_model.checkpoint_items) + ckpt.save(config.init_checkpoint) + task.initialize(model) + + @parameterized.parameters(("matthews_corrcoef", 2), + ("pearson_spearman_corr", 1)) + def test_np_metrics(self, metric_type, num_classes): + config = sentence_prediction.SentencePredictionConfig( + metric_type=metric_type, + init_checkpoint=self.get_temp_dir(), + network=self.get_network_config(num_classes), + train_data=self._train_data_config) + task = sentence_prediction.SentencePredictionTask(config) + model = task.build_model() + dataset = task.build_inputs(config.train_data) + + iterator = iter(dataset) + strategy = tf.distribute.get_strategy() + distributed_outputs = strategy.run( + functools.partial(task.validation_step, model=model), + args=(next(iterator),)) + outputs = tf.nest.map_structure(strategy.experimental_local_results, + distributed_outputs) + aggregated = task.aggregate_logs(step_outputs=outputs) + aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs) + self.assertIn(metric_type, task.reduce_aggregated_logs(aggregated)) + + def test_task_with_fit(self): + config = sentence_prediction.SentencePredictionConfig( + network=self.get_network_config(2), train_data=self._train_data_config) + task = sentence_prediction.SentencePredictionTask(config) + model = task.build_model() + model = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(lr=0.1), + train_step=task.train_step, + metrics=task.build_metrics()) + dataset = task.build_inputs(config.train_data) + logs = model.fit(dataset, epochs=1, steps_per_epoch=2) + self.assertIn("loss", logs.history) + + def _export_bert_tfhub(self): + bert_config = configs.BertConfig( + vocab_size=30522, + hidden_size=16, + intermediate_size=32, + max_position_embeddings=128, + num_attention_heads=2, + num_hidden_layers=1) + _, encoder = export_tfhub.create_bert_model(bert_config) + model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.save(os.path.join(model_checkpoint_dir, "test")) + model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) + + vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt") + with tf.io.gfile.GFile(vocab_file, "w") as f: + f.write("dummy content") + + hub_destination = os.path.join(self.get_temp_dir(), "hub") + export_tfhub.export_bert_tfhub(bert_config, model_checkpoint_path, + hub_destination, vocab_file) + return hub_destination + + def test_task_with_hub(self): + hub_module_url = self._export_bert_tfhub() + config = sentence_prediction.SentencePredictionConfig( + hub_module_url=hub_module_url, + network=self.get_network_config(2), + train_data=self._train_data_config) + self._run_task(config) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/tasks/tagging.py b/models/official/nlp/tasks/tagging.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f20b1360a952b0a5c6fabc2a3ee252c2ef5137 --- /dev/null +++ b/models/official/nlp/tasks/tagging.py @@ -0,0 +1,147 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tagging (e.g., NER/POS) task.""" +import logging +import dataclasses +import tensorflow as tf +import tensorflow_hub as hub + +from official.core import base_task +from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.configs import encoders +from official.nlp.data import tagging_data_loader +from official.nlp.modeling import models +from official.nlp.tasks import utils + + +@dataclasses.dataclass +class TaggingConfig(cfg.TaskConfig): + """The model config.""" + # At most one of `init_checkpoint` and `hub_module_url` can be specified. + init_checkpoint: str = '' + hub_module_url: str = '' + network: encoders.TransformerEncoderConfig = ( + encoders.TransformerEncoderConfig()) + num_classes: int = 0 + # The ignored label id will not contribute to loss. + # A word may be tokenized into multiple word_pieces tokens, and we usually + # assign the real label id for the first token of the word, and + # `ignore_label_id` for the remaining tokens. + ignore_label_id: int = 0 + train_data: cfg.DataConfig = cfg.DataConfig() + validation_data: cfg.DataConfig = cfg.DataConfig() + + +@base_task.register_task_cls(TaggingConfig) +class TaggingTask(base_task.Task): + """Task object for tagging (e.g., NER or POS).""" + + def __init__(self, params=cfg.TaskConfig): + super(TaggingTask, self).__init__(params) + if params.hub_module_url and params.init_checkpoint: + raise ValueError('At most one of `hub_module_url` and ' + '`init_checkpoint` can be specified.') + if params.num_classes == 0: + raise ValueError('TaggingConfig.num_classes cannot be 0.') + + if params.hub_module_url: + self._hub_module = hub.load(params.hub_module_url) + else: + self._hub_module = None + + def build_model(self): + if self._hub_module: + encoder_network = utils.get_encoder_from_hub(self._hub_module) + else: + encoder_network = encoders.instantiate_encoder_from_cfg( + self.task_config.network) + + return models.BertTokenClassifier( + network=encoder_network, + num_classes=self.task_config.num_classes, + initializer=tf.keras.initializers.TruncatedNormal( + stddev=self.task_config.network.initializer_range), + dropout_rate=self.task_config.network.dropout_rate, + output='logits') + + def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: + model_outputs = tf.cast(model_outputs, tf.float32) + loss = tf.keras.losses.sparse_categorical_crossentropy( + labels, model_outputs, from_logits=True) + # `ignore_label_id` will not contribute to loss. + label_weights = tf.cast( + tf.not_equal(labels, self.task_config.ignore_label_id), + dtype=tf.float32) + numerator_loss = tf.reduce_sum(loss * label_weights) + denominator_loss = tf.reduce_sum(label_weights) + loss = tf.math.divide_no_nan(numerator_loss, denominator_loss) + return loss + + def build_inputs(self, params, input_context=None): + """Returns tf.data.Dataset for sentence_prediction task.""" + if params.input_path == 'dummy': + + def dummy_data(_): + dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32) + x = dict( + input_word_ids=dummy_ids, + input_mask=dummy_ids, + input_type_ids=dummy_ids) + y = tf.ones((1, params.seq_length), dtype=tf.int32) + return (x, y) + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map( + dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + dataset = tagging_data_loader.TaggingDataLoader(params).load(input_context) + return dataset + + def build_metrics(self, training=None): + del training + # TODO(chendouble): evaluate using seqeval's f1/precision/recall. + return [tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')] + + def process_metrics(self, metrics, labels, model_outputs): + # `ignore_label_id` will not contribute to metrics. + sample_weight = tf.cast( + tf.not_equal(labels, self.task_config.ignore_label_id), + dtype=tf.float32) + for metric in metrics: + metric.update_state(labels, model_outputs, sample_weight) + + def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): + # `ignore_label_id` will not contribute to metrics. + sample_weight = tf.cast( + tf.not_equal(labels, self.task_config.ignore_label_id), + dtype=tf.float32) + compiled_metrics.update_state(labels, model_outputs, sample_weight) + + def initialize(self, model): + """Load a pretrained checkpoint (if exists) and then train from iter 0.""" + ckpt_dir_or_file = self.task_config.init_checkpoint + if tf.io.gfile.isdir(ckpt_dir_or_file): + ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) + if not ckpt_dir_or_file: + return + + ckpt = tf.train.Checkpoint(**model.checkpoint_items) + status = ckpt.restore(ckpt_dir_or_file) + status.expect_partial().assert_existing_objects_matched() + logging.info('finished loading pretrained checkpoint from %s', + ckpt_dir_or_file) diff --git a/models/official/nlp/tasks/tagging_test.py b/models/official/nlp/tasks/tagging_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6707a50a824246062be9387c9c83f49c1a3309f5 --- /dev/null +++ b/models/official/nlp/tasks/tagging_test.py @@ -0,0 +1,125 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.nlp.tasks.tagging.""" +import functools +import os +import tensorflow as tf + +from official.nlp.bert import configs +from official.nlp.bert import export_tfhub +from official.nlp.configs import bert +from official.nlp.configs import encoders +from official.nlp.tasks import tagging + + +class TaggingTest(tf.test.TestCase): + + def setUp(self): + super(TaggingTest, self).setUp() + self._encoder_config = encoders.TransformerEncoderConfig( + vocab_size=30522, num_layers=1) + self._train_data_config = bert.TaggingDataConfig( + input_path="dummy", seq_length=128, global_batch_size=1) + + def _run_task(self, config): + task = tagging.TaggingTask(config) + model = task.build_model() + metrics = task.build_metrics() + + strategy = tf.distribute.get_strategy() + dataset = strategy.experimental_distribute_datasets_from_function( + functools.partial(task.build_inputs, config.train_data)) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + + def test_task(self): + # Saves a checkpoint. + encoder = encoders.instantiate_encoder_from_cfg(self._encoder_config) + ckpt = tf.train.Checkpoint(encoder=encoder) + saved_path = ckpt.save(self.get_temp_dir()) + + config = tagging.TaggingConfig( + init_checkpoint=saved_path, + network=self._encoder_config, + train_data=self._train_data_config, + num_classes=3) + task = tagging.TaggingTask(config) + model = task.build_model() + metrics = task.build_metrics() + dataset = task.build_inputs(config.train_data) + + iterator = iter(dataset) + optimizer = tf.keras.optimizers.SGD(lr=0.1) + task.train_step(next(iterator), model, optimizer, metrics=metrics) + task.validation_step(next(iterator), model, metrics=metrics) + task.initialize(model) + + def test_task_with_fit(self): + config = tagging.TaggingConfig( + network=self._encoder_config, + train_data=self._train_data_config, + num_classes=3) + + task = tagging.TaggingTask(config) + model = task.build_model() + model = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(lr=0.1), + train_step=task.train_step, + metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")]) + dataset = task.build_inputs(config.train_data) + logs = model.fit(dataset, epochs=1, steps_per_epoch=2) + self.assertIn("loss", logs.history) + self.assertIn("accuracy", logs.history) + + def _export_bert_tfhub(self): + bert_config = configs.BertConfig( + vocab_size=30522, + hidden_size=16, + intermediate_size=32, + max_position_embeddings=128, + num_attention_heads=2, + num_hidden_layers=1) + _, encoder = export_tfhub.create_bert_model(bert_config) + model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") + checkpoint = tf.train.Checkpoint(model=encoder) + checkpoint.save(os.path.join(model_checkpoint_dir, "test")) + model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) + + vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt") + with tf.io.gfile.GFile(vocab_file, "w") as f: + f.write("dummy content") + + hub_destination = os.path.join(self.get_temp_dir(), "hub") + export_tfhub.export_bert_tfhub(bert_config, model_checkpoint_path, + hub_destination, vocab_file) + return hub_destination + + def test_task_with_hub(self): + hub_module_url = self._export_bert_tfhub() + config = tagging.TaggingConfig( + hub_module_url=hub_module_url, + network=self._encoder_config, + num_classes=4, + train_data=self._train_data_config) + self._run_task(config) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/tasks/utils.py b/models/official/nlp/tasks/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..467dafe31f813779b7af5ea0209aadccb6d1bdf8 --- /dev/null +++ b/models/official/nlp/tasks/utils.py @@ -0,0 +1,34 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common utils for tasks.""" +import tensorflow as tf +import tensorflow_hub as hub + + +def get_encoder_from_hub(hub_module: str) -> tf.keras.Model: + """Gets an encoder from hub.""" + input_word_ids = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name='input_word_ids') + input_mask = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name='input_mask') + input_type_ids = tf.keras.layers.Input( + shape=(None,), dtype=tf.int32, name='input_type_ids') + hub_layer = hub.KerasLayer(hub_module, trainable=True) + pooled_output, sequence_output = hub_layer( + [input_word_ids, input_mask, input_type_ids]) + return tf.keras.Model( + inputs=[input_word_ids, input_mask, input_type_ids], + outputs=[sequence_output, pooled_output]) diff --git a/models/official/nlp/transformer/README.md b/models/official/nlp/transformer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1215ed574b316030f69713de8dc3000ea64e3df6 --- /dev/null +++ b/models/official/nlp/transformer/README.md @@ -0,0 +1,218 @@ +# Transformer Translation Model +This is an implementation of the Transformer translation model as described in +the [Attention is All You Need](https://arxiv.org/abs/1706.03762) paper. The +implementation leverages tf.keras and makes sure it is compatible with TF 2.x. + +**Note: this transformer folder is subject to be integrated into official/nlp +folder. Due to its dependencies, we will finish the refactoring after the model +garden 2.1 release.** + +## Contents + * [Contents](#contents) + * [Walkthrough](#walkthrough) + * [Detailed instructions](#detailed-instructions) + * [Environment preparation](#environment-preparation) + * [Download and preprocess datasets](#download-and-preprocess-datasets) + * [Model training and evaluation](#model-training-and-evaluation) + * [Implementation overview](#implementation-overview) + * [Model Definition](#model-definition) + * [Model Trainer](#model-trainer) + * [Test dataset](#test-dataset) + +## Walkthrough + +Below are the commands for running the Transformer model. See the +[Detailed instructions](#detailed-instructions) for more details on running the +model. + +``` +# Ensure that PYTHONPATH is correctly defined as described in +# https://github.com/tensorflow/models/tree/master/official#requirements +export PYTHONPATH="$PYTHONPATH:/path/to/models" + +cd /path/to/models/official/nlp/transformer + +# Export variables +PARAM_SET=big +DATA_DIR=$HOME/transformer/data +MODEL_DIR=$HOME/transformer/model_$PARAM_SET +VOCAB_FILE=$DATA_DIR/vocab.ende.32768 + +# Download training/evaluation/test datasets +python3 data_download.py --data_dir=$DATA_DIR + +# Train the model for 100000 steps and evaluate every 5000 steps on a single GPU. +# Each train step, takes 4096 tokens as a batch budget with 64 as sequence +# maximal length. +python3 transformer_main.py --data_dir=$DATA_DIR --model_dir=$MODEL_DIR \ + --vocab_file=$VOCAB_FILE --param_set=$PARAM_SET \ + --train_steps=100000 --steps_between_evals=5000 \ + --batch_size=4096 --max_length=64 \ + --bleu_source=$DATA_DIR/newstest2014.en \ + --bleu_ref=$DATA_DIR/newstest2014.de \ + --num_gpus=1 \ + --enable_time_history=false + +# Run during training in a separate process to get continuous updates, +# or after training is complete. +tensorboard --logdir=$MODEL_DIR +``` + +## Detailed instructions + + +0. ### Environment preparation + + #### Add models repo to PYTHONPATH + Follow the instructions described in the [Requirements](https://github.com/tensorflow/models/tree/master/official#requirements) section to add the models folder to the python path. + + #### Export variables (optional) + + Export the following variables, or modify the values in each of the snippets below: + + ```shell + PARAM_SET=big + DATA_DIR=$HOME/transformer/data + MODEL_DIR=$HOME/transformer/model_$PARAM_SET + VOCAB_FILE=$DATA_DIR/vocab.ende.32768 + ``` + +1. ### Download and preprocess datasets + + [data_download.py](data_download.py) downloads and preprocesses the training and evaluation WMT datasets. After the data is downloaded and extracted, the training data is used to generate a vocabulary of subtokens. The evaluation and training strings are tokenized, and the resulting data is sharded, shuffled, and saved as TFRecords. + + 1.75GB of compressed data will be downloaded. In total, the raw files (compressed, extracted, and combined files) take up 8.4GB of disk space. The resulting TFRecord and vocabulary files are 722MB. The script takes around 40 minutes to run, with the bulk of the time spent downloading and ~15 minutes spent on preprocessing. + + Command to run: + ``` + python3 data_download.py --data_dir=$DATA_DIR + ``` + + Arguments: + * `--data_dir`: Path where the preprocessed TFRecord data, and vocab file will be saved. + * Use the `--help` or `-h` flag to get a full list of possible arguments. + +2. ### Model training and evaluation + + [transformer_main.py](transformer_main.py) creates a Transformer keras model, + and trains it uses keras model.fit(). + + Users need to adjust `batch_size` and `num_gpus` to get good performance + running multiple GPUs. + + **Note that:** + when using multiple GPUs or TPUs, this is the global batch size for all + devices. For example, if the batch size is `4096*4` and there are 4 devices, + each device will take 4096 tokens as a batch budget. + + Command to run: + ``` + python3 transformer_main.py --data_dir=$DATA_DIR --model_dir=$MODEL_DIR \ + --vocab_file=$VOCAB_FILE --param_set=$PARAM_SET + ``` + + Arguments: + * `--data_dir`: This should be set to the same directory given to the `data_download`'s `data_dir` argument. + * `--model_dir`: Directory to save Transformer model training checkpoints. + * `--vocab_file`: Path to subtoken vocabulary file. If data_download was used, you may find the file in `data_dir`. + * `--param_set`: Parameter set to use when creating and training the model. Options are `base` and `big` (default). + * `--enable_time_history`: Whether add TimeHistory call. If so, --log_steps must be specified. + * `--batch_size`: The number of tokens to consider in a batch. Combining with + `--max_length`, they decide how many sequences are used per batch. + * Use the `--help` or `-h` flag to get a full list of possible arguments. + + #### Using multiple GPUs + You can train these models on multiple GPUs using `tf.distribute.Strategy` API. + You can read more about them in this + [guide](https://www.tensorflow.org/guide/distribute_strategy). + + In this example, we have made it easier to use is with just a command line flag + `--num_gpus`. By default this flag is 1 if TensorFlow is compiled with CUDA, + and 0 otherwise. + + - --num_gpus=0: Uses tf.distribute.OneDeviceStrategy with CPU as the device. + - --num_gpus=1: Uses tf.distribute.OneDeviceStrategy with GPU as the device. + - --num_gpus=2+: Uses tf.distribute.MirroredStrategy to run synchronous + distributed training across the GPUs. + + #### Using Cloud TPUs + + You can train the Transformer model on Cloud TPUs using + `tf.distribute.TPUStrategy`. If you are not familiar with Cloud TPUs, it is + strongly recommended that you go through the + [quickstart](https://cloud.google.com/tpu/docs/quickstart) to learn how to + create a TPU and GCE VM. + + To run the Transformer model on a TPU, you must set + `--distribution_strategy=tpu`, `--tpu=$TPU_NAME`, and `--use_ctl=True` where + `$TPU_NAME` the name of your TPU in the Cloud Console. + + An example command to run Transformer on a v2-8 or v3-8 TPU would be: + + ```bash + python transformer_main.py \ + --tpu=$TPU_NAME \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --vocab_file=$DATA_DIR/vocab.ende.32768 \ + --bleu_source=$DATA_DIR/newstest2014.en \ + --bleu_ref=$DATA_DIR/newstest2014.end \ + --batch_size=6144 \ + --train_steps=2000 \ + --static_batch=true \ + --use_ctl=true \ + --param_set=big \ + --max_length=64 \ + --decode_batch_size=32 \ + --decode_max_length=97 \ + --padded_decode=true \ + --distribution_strategy=tpu + ``` + Note: `$MODEL_DIR` and `$DATA_DIR` must be GCS paths. + + #### Customizing training schedule + + By default, the model will train for 10 epochs, and evaluate after every epoch. The training schedule may be defined through the flags: + + * Training with steps: + * `--train_steps`: sets the total number of training steps to run. + * `--steps_between_evals`: Number of training steps to run between evaluations. + + #### Compute BLEU score during model evaluation + + Use these flags to compute the BLEU when the model evaluates: + + * `--bleu_source`: Path to file containing text to translate. + * `--bleu_ref`: Path to file containing the reference translation. + + When running `transformer_main.py`, use the flags: `--bleu_source=$DATA_DIR/newstest2014.en --bleu_ref=$DATA_DIR/newstest2014.de` + + #### Tensorboard + Training and evaluation metrics (loss, accuracy, approximate BLEU score, etc.) are logged, and can be displayed in the browser using Tensorboard. + ``` + tensorboard --logdir=$MODEL_DIR + ``` + The values are displayed at [localhost:6006](localhost:6006). + +## Implementation overview + +A brief look at each component in the code: + +### Model Definition +* [transformer.py](transformer.py): Defines a tf.keras.Model: `Transformer`. +* [embedding_layer.py](embedding_layer.py): Contains the layer that calculates the embeddings. The embedding weights are also used to calculate the pre-softmax probabilities from the decoder output. +* [attention_layer.py](attention_layer.py): Defines the multi-headed and self attention layers that are used in the encoder/decoder stacks. +* [ffn_layer.py](ffn_layer.py): Defines the feedforward network that is used in the encoder/decoder stacks. The network is composed of 2 fully connected layers. + +Other files: +* [beam_search.py](beam_search.py) contains the beam search implementation, which is used during model inference to find high scoring translations. + +### Model Trainer +[transformer_main.py](transformer_main.py) creates an `TransformerTask` to train and evaluate the model using tf.keras. + +### Test dataset +The [newstest2014 files](https://storage.googleapis.com/tf-perf-public/official_transformer/test_data/newstest2014.tgz) +are extracted from the [NMT Seq2Seq tutorial](https://google.github.io/seq2seq/nmt/#download-data). +The raw text files are converted from the SGM format of the +[WMT 2016](http://www.statmt.org/wmt16/translation-task.html) test sets. The +newstest2014 files are put into the `$DATA_DIR` when executing `data_download.py` diff --git a/models/official/nlp/transformer/__init__.py b/models/official/nlp/transformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/transformer/attention_layer.py b/models/official/nlp/transformer/attention_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..114bd5fadc3064f0ff3c895245d8676e47a0bad4 --- /dev/null +++ b/models/official/nlp/transformer/attention_layer.py @@ -0,0 +1,170 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of multiheaded attention and self-attention layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import tensorflow as tf +from official.nlp.modeling import layers + + +class Attention(tf.keras.layers.Layer): + """Multi-headed attention layer.""" + + def __init__(self, hidden_size, num_heads, attention_dropout): + """Initialize Attention. + + Args: + hidden_size: int, output dim of hidden layer. + num_heads: int, number of heads to repeat the same attention structure. + attention_dropout: float, dropout rate inside attention for training. + """ + if hidden_size % num_heads: + raise ValueError( + "Hidden size ({}) must be divisible by the number of heads ({})." + .format(hidden_size, num_heads)) + + super(Attention, self).__init__() + self.hidden_size = hidden_size + self.num_heads = num_heads + self.attention_dropout = attention_dropout + + def build(self, input_shape): + """Builds the layer.""" + # Layers for linearly projecting the queries, keys, and values. + size_per_head = self.hidden_size // self.num_heads + + def _glorot_initializer(fan_in, fan_out): + limit = math.sqrt(6.0 / (fan_in + fan_out)) + return tf.keras.initializers.RandomUniform(minval=-limit, maxval=limit) + + attention_initializer = _glorot_initializer(input_shape.as_list()[-1], + self.hidden_size) + self.query_dense_layer = layers.DenseEinsum( + output_shape=(self.num_heads, size_per_head), + kernel_initializer=attention_initializer, + use_bias=False, + name="query") + self.key_dense_layer = layers.DenseEinsum( + output_shape=(self.num_heads, size_per_head), + kernel_initializer=attention_initializer, + use_bias=False, + name="key") + self.value_dense_layer = layers.DenseEinsum( + output_shape=(self.num_heads, size_per_head), + kernel_initializer=attention_initializer, + use_bias=False, + name="value") + + output_initializer = _glorot_initializer(self.hidden_size, self.hidden_size) + self.output_dense_layer = layers.DenseEinsum( + output_shape=self.hidden_size, + num_summed_dimensions=2, + kernel_initializer=output_initializer, + use_bias=False, + name="output_transform") + super(Attention, self).build(input_shape) + + def get_config(self): + return { + "hidden_size": self.hidden_size, + "num_heads": self.num_heads, + "attention_dropout": self.attention_dropout, + } + + def call(self, query_input, source_input, bias, training, cache=None, + decode_loop_step=None): + """Apply attention mechanism to query_input and source_input. + + Args: + query_input: A tensor with shape [batch_size, length_query, hidden_size]. + source_input: A tensor with shape [batch_size, length_source, + hidden_size]. + bias: A tensor with shape [batch_size, 1, length_query, length_source], + the attention bias that will be added to the result of the dot product. + training: A bool, whether in training mode or not. + cache: (Used during prediction) A dictionary with tensors containing + results of previous attentions. The dictionary must have the items: + {"k": tensor with shape [batch_size, i, heads, dim_per_head], + "v": tensor with shape [batch_size, i, heads, dim_per_head]} + where i is the current decoded length for non-padded decode, or max + sequence length for padded decode. + decode_loop_step: An integer, step number of the decoding loop. Used only + for autoregressive inference on TPU. + + Returns: + Attention layer output with shape [batch_size, length_query, hidden_size] + """ + # Linearly project the query, key and value using different learned + # projections. Splitting heads is automatically done during the linear + # projections --> [batch_size, length, num_heads, dim_per_head]. + query = self.query_dense_layer(query_input) + key = self.key_dense_layer(source_input) + value = self.value_dense_layer(source_input) + + if cache is not None: + # Combine cached keys and values with new keys and values. + if decode_loop_step is not None: + cache_k_shape = cache["k"].shape.as_list() + indices = tf.reshape( + tf.one_hot(decode_loop_step, cache_k_shape[1], dtype=key.dtype), + [1, cache_k_shape[1], 1, 1]) + key = cache["k"] + key * indices + cache_v_shape = cache["v"].shape.as_list() + indices = tf.reshape( + tf.one_hot(decode_loop_step, cache_v_shape[1], dtype=value.dtype), + [1, cache_v_shape[1], 1, 1]) + value = cache["v"] + value * indices + else: + key = tf.concat([tf.cast(cache["k"], key.dtype), key], axis=1) + value = tf.concat([tf.cast(cache["v"], value.dtype), value], axis=1) + + # Update cache + cache["k"] = key + cache["v"] = value + + # Scale query to prevent the dot product between query and key from growing + # too large. + depth = (self.hidden_size // self.num_heads) + query *= depth ** -0.5 + + # Calculate dot product attention + logits = tf.einsum("BTNH,BFNH->BNFT", key, query) + logits += bias + # Note that softmax internally performs math operations using float32 + # for numeric stability. When training with float16, we keep the input + # and output in float16 for better performance. + weights = tf.nn.softmax(logits, name="attention_weights") + if training: + weights = tf.nn.dropout(weights, rate=self.attention_dropout) + attention_output = tf.einsum("BNFT,BTNH->BFNH", weights, value) + + # Run the outputs through another linear projection layer. Recombining heads + # is automatically done --> [batch_size, length, hidden_size] + attention_output = self.output_dense_layer(attention_output) + return attention_output + + +class SelfAttention(Attention): + """Multiheaded self-attention layer.""" + + def call(self, query_input, bias, training, cache=None, + decode_loop_step=None): + return super(SelfAttention, self).call( + query_input, query_input, bias, training, cache, decode_loop_step) diff --git a/models/official/nlp/transformer/beam_search.py b/models/official/nlp/transformer/beam_search.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c1127535e6ae805f6619819737c379cadca6f2 --- /dev/null +++ b/models/official/nlp/transformer/beam_search.py @@ -0,0 +1,132 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Beam search in TF v2.""" + +import tensorflow as tf + +from official.nlp.transformer import beam_search_v1 as v1 + +_StateKeys = v1._StateKeys # pylint: disable=protected-access + + +class SequenceBeamSearchV2(v1.SequenceBeamSearch): + """Implementation of beam search loop in v2.""" + + def search(self, initial_ids, initial_cache): + """Beam search for sequences with highest scores.""" + state, state_shapes = self._create_initial_state(initial_ids, initial_cache) + + finished_state = tf.nest.map_structure( + tf.stop_gradient, + tf.while_loop(self._continue_search, + self._search_step, + loop_vars=[state], + shape_invariants=[state_shapes], + parallel_iterations=1)) + finished_state = finished_state[0] + + alive_seq = finished_state[_StateKeys.ALIVE_SEQ] + alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS] + finished_seq = finished_state[_StateKeys.FINISHED_SEQ] + finished_scores = finished_state[_StateKeys.FINISHED_SCORES] + finished_flags = finished_state[_StateKeys.FINISHED_FLAGS] + + # 2.0 changes tf.where behavior. Should make parameters broadcastable. + finished_cond = tf.reduce_any(finished_flags, 1, name="finished_cond") + seq_cond = _expand_to_same_rank(finished_cond, finished_seq) + score_cond = _expand_to_same_rank(finished_cond, finished_scores) + + # Account for corner case where there are no finished sequences for a + # particular batch item. In that case, return alive sequences for that batch + # item. + finished_seq = tf.where(seq_cond, finished_seq, alive_seq) + finished_scores = tf.where( + score_cond, finished_scores, alive_log_probs) + return finished_seq, finished_scores + + +def sequence_beam_search(symbols_to_logits_fn, + initial_ids, + initial_cache, + vocab_size, + beam_size, + alpha, + max_decode_length, + eos_id, + padded_decode=False, + dtype="float32"): + """Search for sequence of subtoken ids with the largest probability. + + Args: + symbols_to_logits_fn: A function that takes in ids, index, and cache as + arguments. The passed in arguments will have shape: + ids -> A tensor with shape [batch_size * beam_size, index]. + index -> A scalar. + cache -> A nested dictionary of tensors [batch_size * beam_size, ...]. + The function must return a tuple of logits and new cache: + logits -> A tensor with shape [batch * beam_size, vocab_size]. + new cache -> A nested dictionary with the same shape/structure as the + inputted cache. + initial_ids: An int32 tensor with shape [batch_size]. Starting ids for + each batch item. + initial_cache: A dictionary, containing starting decoder variables + information. + vocab_size: An integer, the size of tokens. + beam_size: An integer, the number of beams. + alpha: A float, defining the strength of length normalization. + max_decode_length: An integer, the maximum length to decoded a sequence. + eos_id: An integer, ID of eos token, used to determine when a sequence has + finished. + padded_decode: A bool, indicating if max_sequence_length padding is used + for beam search. + dtype: A tensorflow data type used for score computation. The default is + tf.float32. + + Returns: + Top decoded sequences [batch_size, beam_size, max_decode_length] + sequence scores [batch_size, beam_size] + """ + batch_size = ( + initial_ids.shape.as_list()[0] if padded_decode else + tf.shape(initial_ids)[0]) + sbs = SequenceBeamSearchV2(symbols_to_logits_fn, vocab_size, batch_size, + beam_size, alpha, max_decode_length, eos_id, + padded_decode, dtype) + return sbs.search(initial_ids, initial_cache) + + +def _expand_to_same_rank(tensor, target): + """Expands a given tensor to target's rank to be broadcastable. + + Args: + tensor: input tensor to tile. Shape: [b, d1, ..., da] + target: target tensor. Shape: [b, d1, ..., da, ..., dn] + + Returns: + Tiled tensor of shape [b, d1, ..., da, 1, ..., 1] with same rank of target. + + Raises: + ValueError, if the shape rank of rank tensor/target is None. + """ + if tensor.shape.rank is None: + raise ValueError("Expect rank for tensor shape, but got None.") + if target.shape.rank is None: + raise ValueError("Expect rank for target shape, but got None.") + + with tf.name_scope("expand_rank"): + diff_rank = target.shape.rank - tensor.shape.rank + for _ in range(diff_rank): + tensor = tf.expand_dims(tensor, -1) + return tensor diff --git a/models/official/nlp/transformer/beam_search_v1.py b/models/official/nlp/transformer/beam_search_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..8b143b1b30ef462f6187850b12a5ca9dfe3ab39b --- /dev/null +++ b/models/official/nlp/transformer/beam_search_v1.py @@ -0,0 +1,675 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Beam search to find the translated sequence with the highest probability. + +Source implementation from Tensor2Tensor: +https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py +""" + +import numpy as np +import tensorflow.compat.v1 as tf +from tensorflow.python.util import nest + + +def inf(dtype): + """Returns a value close to infinity, but is still finite in `dtype`. + + This is useful to get a very large value that is still zero when multiplied by + zero. The floating-point "Inf" value is NaN when multiplied by zero. + + Args: + dtype: A dtype. The returned value will be finite when casted to this dtype. + + Returns: + A very large value. + """ + if dtype == "float32" or dtype == "bfloat16": + return 1e7 + elif dtype == "float16": + # Disable no-member lint error, as the linter thinks np.float16 does not + # exist for some reason. + return np.finfo(np.float16).max # pylint: disable=no-member + else: + raise AssertionError('Invalid dtype: %s' % dtype) + + +class _StateKeys(object): + """Keys to dictionary storing the state of the beam search loop.""" + + # Variable storing the loop index. + CUR_INDEX = "CUR_INDEX" + + # Top sequences that are alive for each batch item. Alive sequences are ones + # that have not generated an EOS token. Sequences that reach EOS are marked as + # finished and moved to the FINISHED_SEQ tensor. + # Has shape [batch_size, beam_size, CUR_INDEX + 1] + ALIVE_SEQ = "ALIVE_SEQ" + # Log probabilities of each alive sequence. Shape [batch_size, beam_size] + ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS" + # Dictionary of cached values for each alive sequence. The cache stores + # the encoder output, attention bias, and the decoder attention output from + # the previous iteration. + ALIVE_CACHE = "ALIVE_CACHE" + + # Top finished sequences for each batch item. + # Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are + # shorter than CUR_INDEX + 1 are padded with 0s. + FINISHED_SEQ = "FINISHED_SEQ" + # Scores for each finished sequence. Score = log probability / length norm + # Shape [batch_size, beam_size] + FINISHED_SCORES = "FINISHED_SCORES" + # Flags indicating which sequences in the finished sequences are finished. + # At the beginning, all of the sequences in FINISHED_SEQ are filler values. + # True -> finished sequence, False -> filler. Shape [batch_size, beam_size] + FINISHED_FLAGS = "FINISHED_FLAGS" + + +class SequenceBeamSearch(object): + """Implementation of beam search loop.""" + + def __init__(self, + symbols_to_logits_fn, + vocab_size, + batch_size, + beam_size, + alpha, + max_decode_length, + eos_id, + padded_decode, + dtype=tf.float32): + """Initialize sequence beam search. + + Args: + symbols_to_logits_fn: A function to provide logits, which is the + interface to the Transformer model. The passed in arguments are: + ids -> A tensor with shape [batch_size * beam_size, index]. + index -> A scalar. + cache -> A nested dictionary of tensors [batch_size * beam_size, ...]. + The function must return a tuple of logits and the updated cache: + logits -> A tensor with shape [batch * beam_size, vocab_size]. + updated cache -> A nested dictionary with the same structure as the + input cache. + vocab_size: An integer, the size of the vocabulary, used for topk + computation. + batch_size: An integer, the decode batch size. + beam_size: An integer, number of beams for beam search. + alpha: A float, defining the strength of length normalization. + max_decode_length: An integer, the maximum number of steps to decode + a sequence. + eos_id: An integer. ID of end of sentence token. + padded_decode: A bool, indicating if max_sequence_length padding is used + for beam search. + dtype: A tensorflow data type used for score computation. The default is + tf.float32. + """ + self.symbols_to_logits_fn = symbols_to_logits_fn + self.vocab_size = vocab_size + self.batch_size = batch_size + self.beam_size = beam_size + self.alpha = alpha + self.max_decode_length = max_decode_length + self.eos_id = eos_id + self.padded_decode = padded_decode + self.dtype = tf.as_dtype(dtype) + + def search(self, initial_ids, initial_cache): + """Beam search for sequences with highest scores.""" + state, state_shapes = self._create_initial_state(initial_ids, initial_cache) + + finished_state = tf.while_loop( + self._continue_search, self._search_step, loop_vars=[state], + shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False) + finished_state = finished_state[0] + + alive_seq = finished_state[_StateKeys.ALIVE_SEQ] + alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS] + finished_seq = finished_state[_StateKeys.FINISHED_SEQ] + finished_scores = finished_state[_StateKeys.FINISHED_SCORES] + finished_flags = finished_state[_StateKeys.FINISHED_FLAGS] + + # Account for corner case where there are no finished sequences for a + # particular batch item. In that case, return alive sequences for that batch + # item. + finished_seq = tf.where( + tf.reduce_any(finished_flags, 1), finished_seq, alive_seq) + finished_scores = tf.where( + tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs) + return finished_seq, finished_scores + + def _create_initial_state(self, initial_ids, initial_cache): + """Return initial state dictionary and its shape invariants. + + Args: + initial_ids: initial ids to pass into the symbols_to_logits_fn. + int tensor with shape [batch_size, 1] + initial_cache: dictionary storing values to be passed into the + symbols_to_logits_fn. + + Returns: + state and shape invariant dictionaries with keys from _StateKeys + """ + for key, value in initial_cache.items(): + for inner_value in nest.flatten(value): + if inner_value.dtype != self.dtype: + raise TypeError( + "initial_cache element for key '%s' has dtype %s that does not " + "match SequenceBeamSearch's dtype of %s. Value: %s" % + (key, value.dtype.name, self.dtype.name, inner_value)) + + # Current loop index (starts at 0) + cur_index = tf.constant(0) + + # Create alive sequence with shape [batch_size, beam_size, 1] + alive_seq = _expand_to_beam_size(initial_ids, self.beam_size) + alive_seq = tf.expand_dims(alive_seq, axis=2) + if self.padded_decode: + alive_seq = tf.tile(alive_seq, [1, 1, self.max_decode_length + 1]) + + # Create tensor for storing initial log probabilities. + # Assume initial_ids are prob 1.0 + initial_log_probs = tf.constant( + [[0.] + [-float("inf")] * (self.beam_size - 1)], dtype=self.dtype) + alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1]) + + # Expand all values stored in the dictionary to the beam size, so that each + # beam has a separate cache. + alive_cache = nest.map_structure( + lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache) + + # Initialize tensor storing finished sequences with filler values. + finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32) + + # Set scores of the initial finished seqs to negative infinity. + finished_scores = tf.ones([self.batch_size, self.beam_size], + dtype=self.dtype) * -inf(self.dtype) + + # Initialize finished flags with all False values. + finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool) + + # Create state dictionary + state = { + _StateKeys.CUR_INDEX: cur_index, + _StateKeys.ALIVE_SEQ: alive_seq, + _StateKeys.ALIVE_LOG_PROBS: alive_log_probs, + _StateKeys.ALIVE_CACHE: alive_cache, + _StateKeys.FINISHED_SEQ: finished_seq, + _StateKeys.FINISHED_SCORES: finished_scores, + _StateKeys.FINISHED_FLAGS: finished_flags + } + + # Create state invariants for each value in the state dictionary. Each + # dimension must be a constant or None. A None dimension means either: + # 1) the dimension's value is a tensor that remains the same but may + # depend on the input sequence to the model (e.g. batch size). + # 2) the dimension may have different values on different iterations. + if self.padded_decode: + state_shape_invariants = { + _StateKeys.CUR_INDEX: + tf.TensorShape([]), + _StateKeys.ALIVE_SEQ: + tf.TensorShape( + [self.batch_size, self.beam_size, + self.max_decode_length + 1]), + _StateKeys.ALIVE_LOG_PROBS: + tf.TensorShape([self.batch_size, self.beam_size]), + _StateKeys.ALIVE_CACHE: + nest.map_structure(_get_shape, alive_cache), + _StateKeys.FINISHED_SEQ: + tf.TensorShape( + [self.batch_size, self.beam_size, + self.max_decode_length + 1]), + _StateKeys.FINISHED_SCORES: + tf.TensorShape([self.batch_size, self.beam_size]), + _StateKeys.FINISHED_FLAGS: + tf.TensorShape([self.batch_size, self.beam_size]) + } + else: + state_shape_invariants = { + _StateKeys.CUR_INDEX: + tf.TensorShape([]), + _StateKeys.ALIVE_SEQ: + tf.TensorShape([None, self.beam_size, None]), + _StateKeys.ALIVE_LOG_PROBS: + tf.TensorShape([None, self.beam_size]), + _StateKeys.ALIVE_CACHE: + nest.map_structure(_get_shape_keep_last_dim, alive_cache), + _StateKeys.FINISHED_SEQ: + tf.TensorShape([None, self.beam_size, None]), + _StateKeys.FINISHED_SCORES: + tf.TensorShape([None, self.beam_size]), + _StateKeys.FINISHED_FLAGS: + tf.TensorShape([None, self.beam_size]) + } + + return state, state_shape_invariants + + def _continue_search(self, state): + """Return whether to continue the search loop. + + The loops should terminate when + 1) when decode length has been reached, or + 2) when the worst score in the finished sequences is better than the best + score in the alive sequences (i.e. the finished sequences are provably + unchanging) + + Args: + state: A dictionary with the current loop state. + + Returns: + Bool tensor with value True if loop should continue, False if loop should + terminate. + """ + i = state[_StateKeys.CUR_INDEX] + alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] + finished_scores = state[_StateKeys.FINISHED_SCORES] + finished_flags = state[_StateKeys.FINISHED_FLAGS] + + not_at_max_decode_length = tf.less(i, self.max_decode_length) + + # Calculate largest length penalty (the larger penalty, the better score). + max_length_norm = _length_normalization(self.alpha, self.max_decode_length, + dtype=self.dtype) + # Get the best possible scores from alive sequences. + best_alive_scores = alive_log_probs[:, 0] / max_length_norm + + # Compute worst score in finished sequences for each batch element + finished_scores *= tf.cast(finished_flags, + self.dtype) # set filler scores to zero + lowest_finished_scores = tf.reduce_min(finished_scores, axis=1) + + # If there are no finished sequences in a batch element, then set the lowest + # finished score to -INF for that element. + finished_batches = tf.reduce_any(finished_flags, 1) + lowest_finished_scores += ((1.0 - + tf.cast(finished_batches, self.dtype)) * + -inf(self.dtype)) + + worst_finished_score_better_than_best_alive_score = tf.reduce_all( + tf.greater(lowest_finished_scores, best_alive_scores) + ) + + return tf.logical_and( + not_at_max_decode_length, + tf.logical_not(worst_finished_score_better_than_best_alive_score) + ) + + def _search_step(self, state): + """Beam search loop body. + + Grow alive sequences by a single ID. Sequences that have reached the EOS + token are marked as finished. The alive and finished sequences with the + highest log probabilities and scores are returned. + + A sequence's finished score is calculating by dividing the log probability + by the length normalization factor. Without length normalization, the + search is more likely to return shorter sequences. + + Args: + state: A dictionary with the current loop state. + + Returns: + new state dictionary. + """ + # Grow alive sequences by one token. + new_seq, new_log_probs, topk_ids, new_cache = self._grow_alive_seq(state) + new_finished_flags = tf.equal(topk_ids, self.eos_id) + # Collect top beam_size alive sequences + alive_state = self._get_new_alive_state(new_seq, new_log_probs, + new_finished_flags, new_cache) + + # Combine newly finished sequences with existing finished sequences, and + # collect the top k scoring sequences. + finished_state = self._get_new_finished_state(state, new_seq, new_log_probs, + new_finished_flags) + + # Increment loop index and create new state dictionary + new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1} + new_state.update(alive_state) + new_state.update(finished_state) + return [new_state] + + def _grow_alive_seq(self, state): + """Grow alive sequences by one token, and collect top 2*beam_size sequences. + + 2*beam_size sequences are collected because some sequences may have reached + the EOS token. 2*beam_size ensures that at least beam_size sequences are + still alive. + + Args: + state: A dictionary with the current loop state. + Returns: + Tuple of + (Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1], + Scores of returned sequences [batch_size, 2 * beam_size], + New alive cache, for each of the 2 * beam_size sequences) + """ + i = state[_StateKeys.CUR_INDEX] + alive_seq = state[_StateKeys.ALIVE_SEQ] + alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] + alive_cache = state[_StateKeys.ALIVE_CACHE] + + beams_to_keep = 2 * self.beam_size + + # Get logits for the next candidate IDs for the alive sequences. Get the new + # cache values at the same time. + if self.padded_decode: + flat_ids = tf.reshape( + tf.slice(alive_seq, [0, 0, i], [self.batch_size, self.beam_size, 1]), + [self.batch_size * self.beam_size, -1]) + else: + flat_ids = _flatten_beam_dim(alive_seq) # [batch_size * beam_size] + flat_cache = nest.map_structure(_flatten_beam_dim, alive_cache) + + flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache) + + # Unflatten logits to shape [batch_size, beam_size, vocab_size] + logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size) + new_cache = nest.map_structure( + lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size), + flat_cache) + + # Convert logits to normalized log probs + candidate_log_probs = _log_prob_from_logits(logits) + + # Calculate new log probabilities if each of the alive sequences were + # extended # by the the candidate IDs. + # Shape [batch_size, beam_size, vocab_size] + log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2) + + # Each batch item has beam_size * vocab_size candidate sequences. For each + # batch item, get the k candidates with the highest log probabilities. + flat_log_probs = tf.reshape(log_probs, + [-1, self.beam_size * self.vocab_size]) + topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep) + + # Extract the alive sequences that generate the highest log probabilities + # after being extended. + topk_beam_indices = topk_indices // self.vocab_size + topk_seq, new_cache = _gather_beams( + [alive_seq, new_cache], topk_beam_indices, self.batch_size, + beams_to_keep) + + # Append the most probable IDs to the topk sequences + topk_ids = topk_indices % self.vocab_size + if self.padded_decode: + topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1]) + # TODO(b/145533236, hongkuny): Reverts once TF fix the validation. + topk_seq = tf.tensor_scatter_nd_update(topk_seq, [[i + 1]], + tf.expand_dims(topk_ids, axis=0)) + topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0]) + else: + topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2) + return topk_seq, topk_log_probs, topk_ids, new_cache + + def _get_new_alive_state(self, new_seq, new_log_probs, new_finished_flags, + new_cache): + """Gather the top k sequences that are still alive. + + Args: + new_seq: New sequences generated by growing the current alive sequences + int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1] + new_log_probs: Log probabilities of new sequences float32 tensor with + shape [batch_size, beam_size] + new_finished_flags: A boolean Tensor indicates which sequences are live + inside the beam. + new_cache: Dict of cached values for each sequence. + + Returns: + Dictionary with alive keys from _StateKeys: + {Top beam_size sequences that are still alive (don't end with eos_id) + Log probabilities of top alive sequences + Dict cache storing decoder states for top alive sequences} + """ + # To prevent finished sequences from being considered, set log probs to -inf + new_log_probs += tf.cast(new_finished_flags, self.dtype) * -inf(self.dtype) + + top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams( + [new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size, + self.beam_size) + + return { + _StateKeys.ALIVE_SEQ: top_alive_seq, + _StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs, + _StateKeys.ALIVE_CACHE: top_alive_cache + } + + def _get_new_finished_state(self, state, new_seq, new_log_probs, + new_finished_flags): + """Combine new and old finished sequences, and gather the top k sequences. + + Args: + state: A dictionary with the current loop state. + new_seq: New sequences generated by growing the current alive sequences + int32 tensor with shape [batch_size, beam_size, i + 1] + new_log_probs: Log probabilities of new sequences float32 tensor with + shape [batch_size, beam_size] + new_finished_flags: A boolean Tensor indicates which sequences are live + inside the beam. + + Returns: + Dictionary with finished keys from _StateKeys: + {Top beam_size finished sequences based on score, + Scores of finished sequences, + Finished flags of finished sequences} + """ + i = state[_StateKeys.CUR_INDEX] + finished_seq = state[_StateKeys.FINISHED_SEQ] + finished_scores = state[_StateKeys.FINISHED_SCORES] + finished_flags = state[_StateKeys.FINISHED_FLAGS] + + # First append a column of 0-ids to finished_seq to increment the length. + # New shape of finished_seq: [batch_size, beam_size, i + 1] + if not self.padded_decode: + finished_seq = tf.concat([ + finished_seq, + tf.zeros([self.batch_size, self.beam_size, 1], tf.int32) + ], + axis=2) + + # Calculate new seq scores from log probabilities. + length_norm = _length_normalization(self.alpha, i + 1, dtype=self.dtype) + new_scores = new_log_probs / length_norm + + # Set the scores of the still-alive seq in new_seq to large negative values. + new_scores += ((1. - tf.cast(new_finished_flags, self.dtype)) * + -inf(self.dtype)) + + # Combine sequences, scores, and flags. + finished_seq = tf.concat([finished_seq, new_seq], axis=1) + finished_scores = tf.concat([finished_scores, new_scores], axis=1) + finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1) + + # Return the finished sequences with the best scores. + top_finished_seq, top_finished_scores, top_finished_flags = ( + _gather_topk_beams([finished_seq, finished_scores, finished_flags], + finished_scores, self.batch_size, self.beam_size)) + + return { + _StateKeys.FINISHED_SEQ: top_finished_seq, + _StateKeys.FINISHED_SCORES: top_finished_scores, + _StateKeys.FINISHED_FLAGS: top_finished_flags + } + + +def sequence_beam_search( + symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size, + alpha, max_decode_length, eos_id, padded_decode=False): + """Search for sequence of subtoken ids with the largest probability. + + Args: + symbols_to_logits_fn: A function that takes in ids, index, and cache as + arguments. The passed in arguments will have shape: + ids -> A tensor with shape [batch_size * beam_size, index]. + index -> A scalar. + cache -> A nested dictionary of tensors [batch_size * beam_size, ...]. + The function must return a tuple of logits and new cache: + logits -> A tensor with shape [batch * beam_size, vocab_size]. + new cache -> A nested dictionary with the same shape/structure as the + inputted cache. + initial_ids: An int32 tensor with shape [batch_size]. Starting ids for + each batch item. + initial_cache: A dictionary, containing starting decoder variables + information. + vocab_size: An integer, the size of the vocabulary, used for topk + computation. + beam_size: An integer, the number of beams. + alpha: A float, defining the strength of length normalization. + max_decode_length: An integer, the maximum length to decoded a sequence. + eos_id: An integer, ID of eos token, used to determine when a sequence has + finished. + padded_decode: A bool, indicating if max_sequence_length padding is used + for beam search. + + Returns: + Top decoded sequences [batch_size, beam_size, max_decode_length] + sequence scores [batch_size, beam_size] + """ + batch_size = ( + initial_ids.shape.as_list()[0] if padded_decode else + tf.shape(initial_ids)[0]) + sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size, + beam_size, alpha, max_decode_length, eos_id, + padded_decode) + return sbs.search(initial_ids, initial_cache) + + +def _log_prob_from_logits(logits): + return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True) + + +def _length_normalization(alpha, length, dtype=tf.float32): + """Return length normalization factor.""" + return tf.pow(((5. + tf.cast(length, dtype)) / 6.), alpha) + + +def _expand_to_beam_size(tensor, beam_size): + """Tiles a given tensor by beam_size. + + Args: + tensor: tensor to tile [batch_size, ...] + beam_size: How much to tile the tensor by. + + Returns: + Tiled tensor [batch_size, beam_size, ...] + """ + tensor = tf.expand_dims(tensor, axis=1) + tile_dims = [1] * tensor.shape.ndims + tile_dims[1] = beam_size + + return tf.tile(tensor, tile_dims) + + +def _shape_list(tensor): + """Return a list of the tensor's shape, and ensure no None values in list.""" + # Get statically known shape (may contain None's for unknown dimensions) + shape = tensor.get_shape().as_list() + + # Ensure that the shape values are not None + dynamic_shape = tf.shape(tensor) + for i in range(len(shape)): # pylint: disable=consider-using-enumerate + if shape[i] is None: + shape[i] = dynamic_shape[i] + return shape + + +def _get_shape_keep_last_dim(tensor): + shape_list = _shape_list(tensor) + + # Only the last + for i in range(len(shape_list) - 1): + shape_list[i] = None + + if isinstance(shape_list[-1], tf.Tensor): + shape_list[-1] = None + return tf.TensorShape(shape_list) + + +def _get_shape(tensor): + """Return the shape of the input tensor.""" + return tf.TensorShape(_shape_list(tensor)) + + +def _flatten_beam_dim(tensor): + """Reshapes first two dimensions in to single dimension. + + Args: + tensor: Tensor to reshape of shape [A, B, ...] + + Returns: + Reshaped tensor of shape [A*B, ...] + """ + shape = _shape_list(tensor) + shape[0] *= shape[1] + shape.pop(1) # Remove beam dim + return tf.reshape(tensor, shape) + + +def _unflatten_beam_dim(tensor, batch_size, beam_size): + """Reshapes first dimension back to [batch_size, beam_size]. + + Args: + tensor: Tensor to reshape of shape [batch_size*beam_size, ...] + batch_size: Tensor, original batch size. + beam_size: int, original beam size. + + Returns: + Reshaped tensor of shape [batch_size, beam_size, ...] + """ + shape = _shape_list(tensor) + new_shape = [batch_size, beam_size] + shape[1:] + return tf.reshape(tensor, new_shape) + + +def _gather_beams(nested, beam_indices, batch_size, new_beam_size): + """Gather beams from nested structure of tensors. + + Each tensor in nested represents a batch of beams, where beam refers to a + single search state (beam search involves searching through multiple states + in parallel). + + This function is used to gather the top beams, specified by + beam_indices, from the nested tensors. + + Args: + nested: Nested structure (tensor, list, tuple or dict) containing tensors + with shape [batch_size, beam_size, ...]. + beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each + value in beam_indices must be between [0, beam_size), and are not + necessarily unique. + batch_size: int size of batch + new_beam_size: int number of beams to be pulled from the nested tensors. + + Returns: + Nested structure containing tensors with shape + [batch_size, new_beam_size, ...] + """ + # Computes the i'th coodinate that contains the batch index for gather_nd. + # Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. + batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size + batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size]) + + # Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor + # with shape [batch_size, beam_size, 2], where the last dimension contains + # the (i, j) gathering coordinates. + coordinates = tf.stack([batch_pos, beam_indices], axis=2) + + return nest.map_structure( + lambda state: tf.gather_nd(state, coordinates), nested) + + +def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size): + """Gather top beams from nested structure.""" + _, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size) + return _gather_beams(nested, topk_indexes, batch_size, beam_size) diff --git a/models/official/nlp/transformer/beam_search_v1_test.py b/models/official/nlp/transformer/beam_search_v1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..53cf921fb90e93950a05e999807fc497390674a1 --- /dev/null +++ b/models/official/nlp/transformer/beam_search_v1_test.py @@ -0,0 +1,101 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test beam search helper methods.""" + +import tensorflow.compat.v1 as tf + +from official.nlp.transformer import beam_search_v1 as beam_search + + +class BeamSearchHelperTests(tf.test.TestCase): + + def setUp(self): + super(BeamSearchHelperTests, self).setUp() + tf.compat.v1.disable_eager_execution() + + def test_expand_to_beam_size(self): + x = tf.ones([7, 4, 2, 5]) + x = beam_search._expand_to_beam_size(x, 3) + with self.session() as sess: + shape = sess.run(tf.shape(x)) + self.assertAllEqual([7, 3, 4, 2, 5], shape) + + def test_shape_list(self): + y = tf.compat.v1.placeholder(dtype=tf.int32, shape=[]) + x = tf.ones([7, y, 2, 5]) + shape = beam_search._shape_list(x) + self.assertIsInstance(shape[0], int) + self.assertIsInstance(shape[1], tf.Tensor) + self.assertIsInstance(shape[2], int) + self.assertIsInstance(shape[3], int) + + def test_get_shape_keep_last_dim(self): + y = tf.constant(4.0) + x = tf.ones([7, tf.cast(tf.sqrt(y), tf.int32), 2, 5]) + shape = beam_search._get_shape_keep_last_dim(x) + self.assertAllEqual([None, None, None, 5], + shape.as_list()) + + def test_flatten_beam_dim(self): + x = tf.ones([7, 4, 2, 5]) + x = beam_search._flatten_beam_dim(x) + with self.session() as sess: + shape = sess.run(tf.shape(x)) + self.assertAllEqual([28, 2, 5], shape) + + def test_unflatten_beam_dim(self): + x = tf.ones([28, 2, 5]) + x = beam_search._unflatten_beam_dim(x, 7, 4) + with self.session() as sess: + shape = sess.run(tf.shape(x)) + self.assertAllEqual([7, 4, 2, 5], shape) + + def test_gather_beams(self): + x = tf.reshape(tf.range(24), [2, 3, 4]) + # x looks like: [[[ 0 1 2 3] + # [ 4 5 6 7] + # [ 8 9 10 11]] + # + # [[12 13 14 15] + # [16 17 18 19] + # [20 21 22 23]]] + + y = beam_search._gather_beams(x, [[1, 2], [0, 2]], 2, 2) + with self.session() as sess: + y = sess.run(y) + + self.assertAllEqual([[[4, 5, 6, 7], + [8, 9, 10, 11]], + [[12, 13, 14, 15], + [20, 21, 22, 23]]], + y) + + def test_gather_topk_beams(self): + x = tf.reshape(tf.range(24), [2, 3, 4]) + x_scores = [[0, 1, 1], [1, 0, 1]] + + y = beam_search._gather_topk_beams(x, x_scores, 2, 2) + with self.session() as sess: + y = sess.run(y) + + self.assertAllEqual([[[4, 5, 6, 7], + [8, 9, 10, 11]], + [[12, 13, 14, 15], + [20, 21, 22, 23]]], + y) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/transformer/compute_bleu.py b/models/official/nlp/transformer/compute_bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..92d54c30ecbc844d271b49f49ed19abc09098abf --- /dev/null +++ b/models/official/nlp/transformer/compute_bleu.py @@ -0,0 +1,148 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Script to compute official BLEU score. + +Source: +https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re +import sys +import unicodedata + +from absl import app as absl_app +from absl import flags +import six +from six.moves import range +import tensorflow as tf + +from official.nlp.transformer.utils import metrics +from official.nlp.transformer.utils import tokenizer +from official.utils.flags import core as flags_core + + +class UnicodeRegex(object): + """Ad-hoc hack to recognize all punctuation and symbols.""" + + def __init__(self): + punctuation = self.property_chars("P") + self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])") + self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])") + self.symbol_re = re.compile("([" + self.property_chars("S") + "])") + + def property_chars(self, prefix): + return "".join( + six.unichr(x) + for x in range(sys.maxunicode) + if unicodedata.category(six.unichr(x)).startswith(prefix)) + + +uregex = UnicodeRegex() + + +def bleu_tokenize(string): + r"""Tokenize a string following the official BLEU implementation. + + See https://github.com/moses-smt/mosesdecoder/' + 'blob/master/scripts/generic/mteval-v14.pl#L954-L983 + In our case, the input string is expected to be just one line + and no HTML entities de-escaping is needed. + So we just tokenize on punctuation and symbols, + except when a punctuation is preceded and followed by a digit + (e.g. a comma/dot as a thousand/decimal separator). + + Note that a numer (e.g. a year) followed by a dot at the end of sentence + is NOT tokenized, + i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` + does not match this case (unless we add a space after each sentence). + However, this error is already in the original mteval-v14.pl + and we want to be consistent with it. + + Args: + string: the input string + + Returns: + a list of tokens + """ + string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) + string = uregex.punct_nondigit_re.sub(r" \1 \2", string) + string = uregex.symbol_re.sub(r" \1 ", string) + return string.split() + + +def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False): + """Compute BLEU for two files (reference and hypothesis translation).""" + ref_lines = tokenizer.native_to_unicode( + tf.io.gfile.GFile(ref_filename).read()).strip().splitlines() + hyp_lines = tokenizer.native_to_unicode( + tf.io.gfile.GFile(hyp_filename).read()).strip().splitlines() + + if len(ref_lines) != len(hyp_lines): + raise ValueError( + "Reference and translation files have different number of " + "lines (%d VS %d). If training only a few steps (100-200), the " + "translation may be empty." % (len(ref_lines), len(hyp_lines))) + if not case_sensitive: + ref_lines = [x.lower() for x in ref_lines] + hyp_lines = [x.lower() for x in hyp_lines] + ref_tokens = [bleu_tokenize(x) for x in ref_lines] + hyp_tokens = [bleu_tokenize(x) for x in hyp_lines] + return metrics.compute_bleu(ref_tokens, hyp_tokens) * 100 + + +def main(unused_argv): + if FLAGS.bleu_variant in ("both", "uncased"): + score = bleu_wrapper(FLAGS.reference, FLAGS.translation, False) + tf.logging.info("Case-insensitive results: %f" % score) + + if FLAGS.bleu_variant in ("both", "cased"): + score = bleu_wrapper(FLAGS.reference, FLAGS.translation, True) + tf.logging.info("Case-sensitive results: %f" % score) + + +def define_compute_bleu_flags(): + """Add flags for computing BLEU score.""" + flags.DEFINE_string( + name="translation", + default=None, + help=flags_core.help_wrap("File containing translated text.")) + flags.mark_flag_as_required("translation") + + flags.DEFINE_string( + name="reference", + default=None, + help=flags_core.help_wrap("File containing reference translation.")) + flags.mark_flag_as_required("reference") + + flags.DEFINE_enum( + name="bleu_variant", + short_name="bv", + default="both", + enum_values=["both", "uncased", "cased"], + case_sensitive=False, + help=flags_core.help_wrap( + "Specify one or more BLEU variants to calculate. Variants: \"cased\"" + ", \"uncased\", or \"both\".")) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + define_compute_bleu_flags() + FLAGS = flags.FLAGS + absl_app.run(main) diff --git a/models/official/nlp/transformer/compute_bleu_test.py b/models/official/nlp/transformer/compute_bleu_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6c578e3698a7e6cc2d7170433d4565cd3d8091ed --- /dev/null +++ b/models/official/nlp/transformer/compute_bleu_test.py @@ -0,0 +1,64 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test functions in compute_blue.py.""" + +import tempfile + +import tensorflow as tf + +from official.nlp.transformer import compute_bleu + + +class ComputeBleuTest(tf.test.TestCase): + + def _create_temp_file(self, text): + temp_file = tempfile.NamedTemporaryFile(delete=False) + with tf.io.gfile.GFile(temp_file.name, "w") as w: + w.write(text) + return temp_file.name + + def test_bleu_same(self): + ref = self._create_temp_file("test 1 two 3\nmore tests!") + hyp = self._create_temp_file("test 1 two 3\nmore tests!") + + uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False) + cased_score = compute_bleu.bleu_wrapper(ref, hyp, True) + self.assertEqual(100, uncased_score) + self.assertEqual(100, cased_score) + + def test_bleu_same_different_case(self): + ref = self._create_temp_file("Test 1 two 3\nmore tests!") + hyp = self._create_temp_file("test 1 two 3\nMore tests!") + uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False) + cased_score = compute_bleu.bleu_wrapper(ref, hyp, True) + self.assertEqual(100, uncased_score) + self.assertLess(cased_score, 100) + + def test_bleu_different(self): + ref = self._create_temp_file("Testing\nmore tests!") + hyp = self._create_temp_file("Dog\nCat") + uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False) + cased_score = compute_bleu.bleu_wrapper(ref, hyp, True) + self.assertLess(uncased_score, 100) + self.assertLess(cased_score, 100) + + def test_bleu_tokenize(self): + s = "Test0, 1 two, 3" + tokenized = compute_bleu.bleu_tokenize(s) + self.assertEqual(["Test0", ",", "1", "two", ",", "3"], tokenized) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/transformer/data_download.py b/models/official/nlp/transformer/data_download.py new file mode 100644 index 0000000000000000000000000000000000000000..e5f66685611e1ad379d05dcf321a679527914b19 --- /dev/null +++ b/models/official/nlp/transformer/data_download.py @@ -0,0 +1,439 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Download and preprocess WMT17 ende training and evaluation datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import random +import tarfile + +# pylint: disable=g-bad-import-order +from absl import app as absl_app +from absl import flags +from absl import logging +import six +from six.moves import range +from six.moves import urllib +from six.moves import zip +import tensorflow.compat.v1 as tf + +from official.nlp.transformer.utils import tokenizer +from official.utils.flags import core as flags_core +# pylint: enable=g-bad-import-order + +# Data sources for training/evaluating the transformer translation model. +# If any of the training sources are changed, then either: +# 1) use the flag `--search` to find the best min count or +# 2) update the _TRAIN_DATA_MIN_COUNT constant. +# min_count is the minimum number of times a token must appear in the data +# before it is added to the vocabulary. "Best min count" refers to the value +# that generates a vocabulary set that is closest in size to _TARGET_VOCAB_SIZE. +_TRAIN_DATA_SOURCES = [ + { + "url": "http://data.statmt.org/wmt17/translation-task/" + "training-parallel-nc-v12.tgz", + "input": "news-commentary-v12.de-en.en", + "target": "news-commentary-v12.de-en.de", + }, + { + "url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + "input": "commoncrawl.de-en.en", + "target": "commoncrawl.de-en.de", + }, + { + "url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + "input": "europarl-v7.de-en.en", + "target": "europarl-v7.de-en.de", + }, +] +# Use pre-defined minimum count to generate subtoken vocabulary. +_TRAIN_DATA_MIN_COUNT = 6 + +_EVAL_DATA_SOURCES = [ + { + "url": "http://data.statmt.org/wmt17/translation-task/dev.tgz", + "input": "newstest2013.en", + "target": "newstest2013.de", + } +] + +_TEST_DATA_SOURCES = [ + { + "url": ("https://storage.googleapis.com/tf-perf-public/" + "official_transformer/test_data/newstest2014.tgz"), + "input": "newstest2014.en", + "target": "newstest2014.de", + } +] + +# Vocabulary constants +_TARGET_VOCAB_SIZE = 32768 # Number of subtokens in the vocabulary list. +_TARGET_THRESHOLD = 327 # Accept vocabulary if size is within this threshold +VOCAB_FILE = "vocab.ende.%d" % _TARGET_VOCAB_SIZE + +# Strings to inclue in the generated files. +_PREFIX = "wmt32k" +_TRAIN_TAG = "train" +_EVAL_TAG = "dev" # Following WMT and Tensor2Tensor conventions, in which the +# evaluation datasets are tagged as "dev" for development. + +# Number of files to split train and evaluation data +_TRAIN_SHARDS = 100 +_EVAL_SHARDS = 1 + + +def find_file(path, filename, max_depth=5): + """Returns full filepath if the file is in path or a subdirectory.""" + for root, dirs, files in os.walk(path): + if filename in files: + return os.path.join(root, filename) + + # Don't search past max_depth + depth = root[len(path) + 1:].count(os.sep) + if depth > max_depth: + del dirs[:] # Clear dirs + return None + + +############################################################################### +# Download and extraction functions +############################################################################### +def get_raw_files(raw_dir, data_source): + """Return raw files from source. Downloads/extracts if needed. + + Args: + raw_dir: string directory to store raw files + data_source: dictionary with + {"url": url of compressed dataset containing input and target files + "input": file with data in input language + "target": file with data in target language} + + Returns: + dictionary with + {"inputs": list of files containing data in input language + "targets": list of files containing corresponding data in target language + } + """ + raw_files = { + "inputs": [], + "targets": [], + } # keys + for d in data_source: + input_file, target_file = download_and_extract( + raw_dir, d["url"], d["input"], d["target"]) + raw_files["inputs"].append(input_file) + raw_files["targets"].append(target_file) + return raw_files + + +def download_report_hook(count, block_size, total_size): + """Report hook for download progress. + + Args: + count: current block number + block_size: block size + total_size: total size + """ + percent = int(count * block_size * 100 / total_size) + print(six.ensure_str("\r%d%%" % percent) + " completed", end="\r") + + +def download_from_url(path, url): + """Download content from a url. + + Args: + path: string directory where file will be downloaded + url: string url + + Returns: + Full path to downloaded file + """ + filename = six.ensure_str(url).split("/")[-1] + found_file = find_file(path, filename, max_depth=0) + if found_file is None: + filename = os.path.join(path, filename) + logging.info("Downloading from %s to %s." % (url, filename)) + inprogress_filepath = six.ensure_str(filename) + ".incomplete" + inprogress_filepath, _ = urllib.request.urlretrieve( + url, inprogress_filepath, reporthook=download_report_hook) + # Print newline to clear the carriage return from the download progress. + print() + tf.gfile.Rename(inprogress_filepath, filename) + return filename + else: + logging.info("Already downloaded: %s (at %s)." % (url, found_file)) + return found_file + + +def download_and_extract(path, url, input_filename, target_filename): + """Extract files from downloaded compressed archive file. + + Args: + path: string directory where the files will be downloaded + url: url containing the compressed input and target files + input_filename: name of file containing data in source language + target_filename: name of file containing data in target language + + Returns: + Full paths to extracted input and target files. + + Raises: + OSError: if the the download/extraction fails. + """ + # Check if extracted files already exist in path + input_file = find_file(path, input_filename) + target_file = find_file(path, target_filename) + if input_file and target_file: + logging.info("Already downloaded and extracted %s." % url) + return input_file, target_file + + # Download archive file if it doesn't already exist. + compressed_file = download_from_url(path, url) + + # Extract compressed files + logging.info("Extracting %s." % compressed_file) + with tarfile.open(compressed_file, "r:gz") as corpus_tar: + corpus_tar.extractall(path) + + # Return file paths of the requested files. + input_file = find_file(path, input_filename) + target_file = find_file(path, target_filename) + + if input_file and target_file: + return input_file, target_file + + raise OSError("Download/extraction failed for url %s to path %s" % + (url, path)) + + +def txt_line_iterator(path): + """Iterate through lines of file.""" + with tf.io.gfile.GFile(path) as f: + for line in f: + yield line.strip() + + +def compile_files(raw_dir, raw_files, tag): + """Compile raw files into a single file for each language. + + Args: + raw_dir: Directory containing downloaded raw files. + raw_files: Dict containing filenames of input and target data. + {"inputs": list of files containing data in input language + "targets": list of files containing corresponding data in target language + } + tag: String to append to the compiled filename. + + Returns: + Full path of compiled input and target files. + """ + logging.info("Compiling files with tag %s." % tag) + filename = "%s-%s" % (_PREFIX, tag) + input_compiled_file = os.path.join(raw_dir, + six.ensure_str(filename) + ".lang1") + target_compiled_file = os.path.join(raw_dir, + six.ensure_str(filename) + ".lang2") + + with tf.io.gfile.GFile(input_compiled_file, mode="w") as input_writer: + with tf.io.gfile.GFile(target_compiled_file, mode="w") as target_writer: + for i in range(len(raw_files["inputs"])): + input_file = raw_files["inputs"][i] + target_file = raw_files["targets"][i] + + logging.info("Reading files %s and %s." % (input_file, target_file)) + write_file(input_writer, input_file) + write_file(target_writer, target_file) + return input_compiled_file, target_compiled_file + + +def write_file(writer, filename): + """Write all of lines from file using the writer.""" + for line in txt_line_iterator(filename): + writer.write(line) + writer.write("\n") + + +############################################################################### +# Data preprocessing +############################################################################### +def encode_and_save_files( + subtokenizer, data_dir, raw_files, tag, total_shards): + """Save data from files as encoded Examples in TFrecord format. + + Args: + subtokenizer: Subtokenizer object that will be used to encode the strings. + data_dir: The directory in which to write the examples + raw_files: A tuple of (input, target) data files. Each line in the input and + the corresponding line in target file will be saved in a tf.Example. + tag: String that will be added onto the file names. + total_shards: Number of files to divide the data into. + + Returns: + List of all files produced. + """ + # Create a file for each shard. + filepaths = [shard_filename(data_dir, tag, n + 1, total_shards) + for n in range(total_shards)] + + if all_exist(filepaths): + logging.info("Files with tag %s already exist." % tag) + return filepaths + + logging.info("Saving files with tag %s." % tag) + input_file = raw_files[0] + target_file = raw_files[1] + + # Write examples to each shard in round robin order. + tmp_filepaths = [six.ensure_str(fname) + ".incomplete" for fname in filepaths] + writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths] + counter, shard = 0, 0 + for counter, (input_line, target_line) in enumerate(zip( + txt_line_iterator(input_file), txt_line_iterator(target_file))): + if counter > 0 and counter % 100000 == 0: + logging.info("\tSaving case %d." % counter) + example = dict_to_example( + {"inputs": subtokenizer.encode(input_line, add_eos=True), + "targets": subtokenizer.encode(target_line, add_eos=True)}) + writers[shard].write(example.SerializeToString()) + shard = (shard + 1) % total_shards + for writer in writers: + writer.close() + + for tmp_name, final_name in zip(tmp_filepaths, filepaths): + tf.gfile.Rename(tmp_name, final_name) + + logging.info("Saved %d Examples", counter + 1) + return filepaths + + +def shard_filename(path, tag, shard_num, total_shards): + """Create filename for data shard.""" + return os.path.join( + path, "%s-%s-%.5d-of-%.5d" % (_PREFIX, tag, shard_num, total_shards)) + + +def shuffle_records(fname): + """Shuffle records in a single file.""" + logging.info("Shuffling records in file %s" % fname) + + # Rename file prior to shuffling + tmp_fname = six.ensure_str(fname) + ".unshuffled" + tf.gfile.Rename(fname, tmp_fname) + + reader = tf.io.tf_record_iterator(tmp_fname) + records = [] + for record in reader: + records.append(record) + if len(records) % 100000 == 0: + logging.info("\tRead: %d", len(records)) + + random.shuffle(records) + + # Write shuffled records to original file name + with tf.python_io.TFRecordWriter(fname) as w: + for count, record in enumerate(records): + w.write(record) + if count > 0 and count % 100000 == 0: + logging.info("\tWriting record: %d" % count) + + tf.gfile.Remove(tmp_fname) + + +def dict_to_example(dictionary): + """Converts a dictionary of string->int to a tf.Example.""" + features = {} + for k, v in six.iteritems(dictionary): + features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v)) + return tf.train.Example(features=tf.train.Features(feature=features)) + + +def all_exist(filepaths): + """Returns true if all files in the list exist.""" + for fname in filepaths: + if not tf.gfile.Exists(fname): + return False + return True + + +def make_dir(path): + if not tf.gfile.Exists(path): + logging.info("Creating directory %s" % path) + tf.gfile.MakeDirs(path) + + +def main(unused_argv): + """Obtain training and evaluation data for the Transformer model.""" + make_dir(FLAGS.raw_dir) + make_dir(FLAGS.data_dir) + + # Download test_data + logging.info("Step 1/5: Downloading test data") + get_raw_files(FLAGS.data_dir, _TEST_DATA_SOURCES) + + # Get paths of download/extracted training and evaluation files. + logging.info("Step 2/5: Downloading data from source") + train_files = get_raw_files(FLAGS.raw_dir, _TRAIN_DATA_SOURCES) + eval_files = get_raw_files(FLAGS.raw_dir, _EVAL_DATA_SOURCES) + + # Create subtokenizer based on the training files. + logging.info("Step 3/5: Creating subtokenizer and building vocabulary") + train_files_flat = train_files["inputs"] + train_files["targets"] + vocab_file = os.path.join(FLAGS.data_dir, VOCAB_FILE) + subtokenizer = tokenizer.Subtokenizer.init_from_files( + vocab_file, train_files_flat, _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD, + min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT) + + logging.info("Step 4/5: Compiling training and evaluation data") + compiled_train_files = compile_files(FLAGS.raw_dir, train_files, _TRAIN_TAG) + compiled_eval_files = compile_files(FLAGS.raw_dir, eval_files, _EVAL_TAG) + + # Tokenize and save data as Examples in the TFRecord format. + logging.info("Step 5/5: Preprocessing and saving data") + train_tfrecord_files = encode_and_save_files( + subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG, + _TRAIN_SHARDS) + encode_and_save_files( + subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG, + _EVAL_SHARDS) + + for fname in train_tfrecord_files: + shuffle_records(fname) + + +def define_data_download_flags(): + """Add flags specifying data download arguments.""" + flags.DEFINE_string( + name="data_dir", short_name="dd", default="/tmp/translate_ende", + help=flags_core.help_wrap( + "Directory for where the translate_ende_wmt32k dataset is saved.")) + flags.DEFINE_string( + name="raw_dir", short_name="rd", default="/tmp/translate_ende_raw", + help=flags_core.help_wrap( + "Path where the raw data will be downloaded and extracted.")) + flags.DEFINE_bool( + name="search", default=False, + help=flags_core.help_wrap( + "If set, use binary search to find the vocabulary set with size" + "closest to the target size (%d)." % _TARGET_VOCAB_SIZE)) + + +if __name__ == "__main__": + logging.set_verbosity(logging.INFO) + define_data_download_flags() + FLAGS = flags.FLAGS + absl_app.run(main) diff --git a/models/official/nlp/transformer/data_pipeline.py b/models/official/nlp/transformer/data_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..cedd2c309d3194a07841610f8f1039a1a1e7ac51 --- /dev/null +++ b/models/official/nlp/transformer/data_pipeline.py @@ -0,0 +1,316 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Input pipeline for the transformer model to read, filter, and batch examples. + +Two things to note in the pipeline: + +1. Batching scheme + + The examples encoded in the TFRecord files contain data in the format: + {"inputs": [variable length array of integers], + "targets": [variable length array of integers]} + Where integers in the arrays refer to tokens in the English and German vocab + file (named `vocab.ende.32768`). + + Prior to batching, elements in the dataset are grouped by length (max between + "inputs" and "targets" length). Each group is then batched such that: + group_batch_size * length <= batch_size. + + Another way to view batch_size is the maximum number of tokens in each batch. + + Once batched, each element in the dataset will have the shape: + {"inputs": [group_batch_size, padded_input_length], + "targets": [group_batch_size, padded_target_length]} + Lengths are padded to the longest "inputs" or "targets" sequence in the batch + (padded_input_length and padded_target_length can be different). + + This batching scheme decreases the fraction of padding tokens per training + batch, thus improving the training speed significantly. + +2. Shuffling + + While training, the dataset is shuffled in two places in the code. The first + is the list of training files. Second, while reading records using + `parallel_interleave`, the `sloppy` argument is used to generate randomness + in the order of the examples. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import logging +import tensorflow as tf + +from official.utils.misc import model_helpers + +# Buffer size for reading records from a TFRecord file. Each training file is +# 7.2 MB, so 8 MB allows an entire file to be kept in memory. +_READ_RECORD_BUFFER = 8 * 1000 * 1000 + +# Example grouping constants. Defines length boundaries for each group. +# These values are the defaults used in Tensor2Tensor. +_MIN_BOUNDARY = 8 +_BOUNDARY_SCALE = 1.1 + + +def _load_records(filename): + """Read file and return a dataset of tf.Examples.""" + return tf.data.TFRecordDataset(filename, buffer_size=_READ_RECORD_BUFFER) + + +def _parse_example(serialized_example): + """Return inputs and targets Tensors from a serialized tf.Example.""" + data_fields = { + "inputs": tf.io.VarLenFeature(tf.int64), + "targets": tf.io.VarLenFeature(tf.int64) + } + parsed = tf.io.parse_single_example(serialized_example, data_fields) + inputs = tf.sparse.to_dense(parsed["inputs"]) + targets = tf.sparse.to_dense(parsed["targets"]) + return inputs, targets + + +def _filter_max_length(example, max_length=256): + """Indicates whether the example's length is lower than the maximum length.""" + return tf.logical_and(tf.size(example[0]) <= max_length, + tf.size(example[1]) <= max_length) + + +def _get_example_length(example): + """Returns the maximum length between the example inputs and targets.""" + length = tf.maximum(tf.shape(example[0])[0], tf.shape(example[1])[0]) + return length + + +def _create_min_max_boundaries( + max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE): + """Create min and max boundary lists up to max_length. + + For example, when max_length=24, min_boundary=4 and boundary_scale=2, the + returned values will be: + buckets_min = [0, 4, 8, 16, 24] + buckets_max = [4, 8, 16, 24, 25] + + Args: + max_length: The maximum length of example in dataset. + min_boundary: Minimum length in boundary. + boundary_scale: Amount to scale consecutive boundaries in the list. + + Returns: + min and max boundary lists + + """ + # Create bucket boundaries list by scaling the previous boundary or adding 1 + # (to ensure increasing boundary sizes). + bucket_boundaries = [] + x = min_boundary + while x < max_length: + bucket_boundaries.append(x) + x = max(x + 1, int(x * boundary_scale)) + + # Create min and max boundary lists from the initial list. + buckets_min = [0] + bucket_boundaries + buckets_max = bucket_boundaries + [max_length + 1] + return buckets_min, buckets_max + + +def _batch_examples(dataset, batch_size, max_length): + """Group examples by similar lengths, and return batched dataset. + + Each batch of similar-length examples are padded to the same length, and may + have different number of elements in each batch, such that: + group_batch_size * padded_length <= batch_size. + + This decreases the number of padding tokens per batch, which improves the + training speed. + + Args: + dataset: Dataset of unbatched examples. + batch_size: Max number of tokens per batch of examples. + max_length: Max number of tokens in an example input or target sequence. + + Returns: + Dataset of batched examples with similar lengths. + """ + # Get min and max boundary lists for each example. These are used to calculate + # the `bucket_id`, which is the index at which: + # buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id] + # Note that using both min and max lists improves the performance. + buckets_min, buckets_max = _create_min_max_boundaries(max_length) + + # Create list of batch sizes for each bucket_id, so that + # bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size + bucket_batch_sizes = [int(batch_size) // x for x in buckets_max] + # bucket_id will be a tensor, so convert this list to a tensor as well. + bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64) + + def example_to_bucket_id(example_input, example_target): + """Return int64 bucket id for this example, calculated based on length.""" + seq_length = _get_example_length((example_input, example_target)) + + # TODO(xunkai): investigate if removing code branching improves performance. + conditions_c = tf.logical_and( + tf.less_equal(buckets_min, seq_length), + tf.less(seq_length, buckets_max)) + bucket_id = tf.reduce_min(tf.where(conditions_c)) + return bucket_id + + def window_size_fn(bucket_id): + """Return number of examples to be grouped when given a bucket id.""" + return bucket_batch_sizes[bucket_id] + + def batching_fn(bucket_id, grouped_dataset): + """Batch and add padding to a dataset of elements with similar lengths.""" + bucket_batch_size = window_size_fn(bucket_id) + + # Batch the dataset and add padding so that all input sequences in the + # examples have the same length, and all target sequences have the same + # lengths as well. Resulting lengths of inputs and targets can differ. + return grouped_dataset.padded_batch(bucket_batch_size, ([None], [None])) + + return dataset.apply(tf.data.experimental.group_by_window( + key_func=example_to_bucket_id, + reduce_func=batching_fn, + window_size=None, + window_size_func=window_size_fn)) + + +def _read_and_batch_from_files( + file_pattern, batch_size, max_length, max_io_parallelism, shuffle, repeat, + static_batch=False, num_replicas=1, ctx=None): + """Create dataset where each item is a dict of "inputs" and "targets". + + Args: + file_pattern: String used to match the input TFRecord files. + batch_size: Maximum number of tokens per global batch of examples. + max_length: Maximum number of tokens per example + max_io_parallelism: Max number of cpu cores for parallel input processing. + shuffle: If true, randomizes order of elements. + repeat: Number of times to repeat the dataset. If None, the dataset is + repeated forever. + static_batch: Whether the batches in the dataset should have static shapes. + If True, the input is batched so that every batch has the + shape [batch_size // max_length, max_length]. If False, the input is + grouped by length, and batched so that batches may have different + shapes [N, M], where: + N * M <= batch_size + M <= max_length + In general, this setting should be False. Dynamic shapes allow the inputs + to be grouped so that the number of padding tokens is minimized, and helps + model training. In cases where the input shape must be static + (e.g. running on TPU), this setting should be set to True. + num_replicas: Number of GPUs or other workers. We will generate global + batches, and each global batch is equally divisible by number of replicas. + Currently it is only effective when static_batch==True. TODO: make it + effective when static_batch=False. + ctx: Input context. + + Returns: + tf.data.Dataset object containing examples loaded from the files. + """ + dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle) + + if ctx and ctx.num_input_pipelines > 1: + logging.info("Shard %d of the dataset.", ctx.input_pipeline_id) + dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) + + # Read files and interleave results. When training, the order of the examples + # will be non-deterministic. + options = tf.data.Options() + options.experimental_deterministic = False + dataset = dataset.interleave( + _load_records, + cycle_length=max_io_parallelism, + num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options) + + # Parse each tf.Example into a dictionary + # TODO: Look into prefetch_input_elements for performance optimization. + dataset = dataset.map(_parse_example, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + # Remove examples where the input or target length exceeds the maximum length, + dataset = dataset.filter(lambda x, y: _filter_max_length((x, y), max_length)) + + if static_batch: + dataset = dataset.padded_batch( + # First calculate batch size (token number) per worker, then divide it + # into sentences, and finally expand to a global batch. It could prove + # the global batch divisble for distribution strategy. + int(batch_size // num_replicas // max_length * num_replicas), + ([max_length], [max_length]), drop_remainder=True) + else: + # Group and batch such that each batch has examples of similar length. + # TODO(xunkai): _batch_examples might need to do something special for + # num_replicas. + dataset = _batch_examples(dataset, batch_size, max_length) + + dataset = dataset.repeat(repeat) + + # Prefetch the next element to improve speed of input pipeline. + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + return dataset + + +def _generate_synthetic_data(params): + """Create synthetic data based on the parameter batch size.""" + batch_size = int(params["batch_size"] // params["max_length"]) + length = params["max_length"] + dataset = model_helpers.generate_synthetic_data( + input_shape=tf.TensorShape([length]), + input_value=1, + input_dtype=tf.int64, + label_shape=tf.TensorShape([length]), + label_value=1, + label_dtype=tf.int64, + ) + if params["static_batch"]: + dataset = dataset.batch(batch_size, drop_remainder=True) + else: + dataset = dataset.padded_batch(batch_size, ([None], [None])) + return dataset + + +def train_input_fn(params, ctx=None): + """Load and return dataset of batched examples for use during training.""" + file_pattern = os.path.join(params["data_dir"] or "", "*train*") + if params["use_synthetic_data"]: + return _generate_synthetic_data(params) + return _read_and_batch_from_files( + file_pattern, params["batch_size"], params["max_length"], + params["max_io_parallelism"], shuffle=True, + repeat=params["repeat_dataset"], static_batch=params["static_batch"], + num_replicas=params["num_gpus"], ctx=ctx) + + +def eval_input_fn(params, ctx=None): + """Load and return dataset of batched examples for use during evaluation.""" + file_pattern = os.path.join(params["data_dir"] or "", "*dev*") + if params["use_synthetic_data"]: + return _generate_synthetic_data(params) + return _read_and_batch_from_files( + file_pattern, params["batch_size"], params["max_length"], + params["max_io_parallelism"], shuffle=False, repeat=1, + static_batch=params["static_batch"], num_replicas=params["num_gpus"], + ctx=ctx) + + +def map_data_for_transformer_fn(x, y): + """Maps data for training, and handles weried behaviors for different vers.""" + # Will transform input x and targets y into tuple(x, y) as new model inputs. + # For TF v2, the 2nd parameter is omitted to make Keras training work. + return ((x, y),) diff --git a/models/official/nlp/transformer/embedding_layer.py b/models/official/nlp/transformer/embedding_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..6694e2b42af47673ee3ce0b9572ec5867d69cb7d --- /dev/null +++ b/models/official/nlp/transformer/embedding_layer.py @@ -0,0 +1,103 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of embedding layer with shared weights.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +class EmbeddingSharedWeights(tf.keras.layers.Layer): + """Calculates input embeddings and pre-softmax linear with shared weights.""" + + def __init__(self, vocab_size, hidden_size): + """Specify characteristic parameters of embedding layer. + + Args: + vocab_size: Number of tokens in the embedding. (Typically ~32,000) + hidden_size: Dimensionality of the embedding. (Typically 512 or 1024) + """ + super(EmbeddingSharedWeights, self).__init__() + self.vocab_size = vocab_size + self.hidden_size = hidden_size + + def build(self, input_shape): + """Build embedding layer.""" + with tf.name_scope("embedding_and_softmax"): + # Create and initialize weights. The random normal initializer was chosen + # arbitrarily, and works well. + self.shared_weights = self.add_weight( + "weights", + shape=[self.vocab_size, self.hidden_size], + initializer=tf.random_normal_initializer( + mean=0., stddev=self.hidden_size**-0.5)) + super(EmbeddingSharedWeights, self).build(input_shape) + + def get_config(self): + return { + "vocab_size": self.vocab_size, + "hidden_size": self.hidden_size, + } + + def call(self, inputs, mode="embedding"): + """Get token embeddings of inputs. + + Args: + inputs: An int64 tensor with shape [batch_size, length] + mode: string, a valid value is one of "embedding" and "linear". + Returns: + outputs: (1) If mode == "embedding", output embedding tensor, float32 with + shape [batch_size, length, embedding_size]; (2) mode == "linear", output + linear tensor, float32 with shape [batch_size, length, vocab_size]. + Raises: + ValueError: if mode is not valid. + """ + if mode == "embedding": + return self._embedding(inputs) + elif mode == "linear": + return self._linear(inputs) + else: + raise ValueError("mode {} is not valid.".format(mode)) + + def _embedding(self, inputs): + """Applies embedding based on inputs tensor.""" + with tf.name_scope("embedding"): + # Create binary mask of size [batch_size, length] + embeddings = tf.gather(self.shared_weights, inputs) + mask = tf.cast(tf.not_equal(inputs, 0), embeddings.dtype) + embeddings *= tf.expand_dims(mask, -1) + # Scale embedding by the sqrt of the hidden size + embeddings *= self.hidden_size ** 0.5 + + return embeddings + + def _linear(self, inputs): + """Computes logits by running inputs through a linear layer. + + Args: + inputs: A float32 tensor with shape [batch_size, length, hidden_size] + Returns: + float32 tensor with shape [batch_size, length, vocab_size]. + """ + with tf.name_scope("presoftmax_linear"): + batch_size = tf.shape(inputs)[0] + length = tf.shape(inputs)[1] + + x = tf.reshape(inputs, [-1, self.hidden_size]) + logits = tf.matmul(x, self.shared_weights, transpose_b=True) + + return tf.reshape(logits, [batch_size, length, self.vocab_size]) diff --git a/models/official/nlp/transformer/ffn_layer.py b/models/official/nlp/transformer/ffn_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..a7785f27dd0c3fed01c514d052749dcafd163605 --- /dev/null +++ b/models/official/nlp/transformer/ffn_layer.py @@ -0,0 +1,77 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of fully connected network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +class FeedForwardNetwork(tf.keras.layers.Layer): + """Fully connected feedforward network.""" + + def __init__(self, hidden_size, filter_size, relu_dropout): + """Initialize FeedForwardNetwork. + + Args: + hidden_size: int, output dim of hidden layer. + filter_size: int, filter size for the inner (first) dense layer. + relu_dropout: float, dropout rate for training. + """ + super(FeedForwardNetwork, self).__init__() + self.hidden_size = hidden_size + self.filter_size = filter_size + self.relu_dropout = relu_dropout + + def build(self, input_shape): + self.filter_dense_layer = tf.keras.layers.Dense( + self.filter_size, + use_bias=True, + activation=tf.nn.relu, + name="filter_layer") + self.output_dense_layer = tf.keras.layers.Dense( + self.hidden_size, use_bias=True, name="output_layer") + super(FeedForwardNetwork, self).build(input_shape) + + def get_config(self): + return { + "hidden_size": self.hidden_size, + "filter_size": self.filter_size, + "relu_dropout": self.relu_dropout, + } + + def call(self, x, training): + """Return outputs of the feedforward network. + + Args: + x: tensor with shape [batch_size, length, hidden_size] + training: boolean, whether in training mode or not. + + Returns: + Output of the feedforward network. + tensor with shape [batch_size, length, hidden_size] + """ + # Retrieve dynamically known shapes + batch_size = tf.shape(x)[0] + length = tf.shape(x)[1] + + output = self.filter_dense_layer(x) + if training: + output = tf.nn.dropout(output, rate=self.relu_dropout) + output = self.output_dense_layer(output) + + return output diff --git a/models/official/nlp/transformer/metrics.py b/models/official/nlp/transformer/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd6bba6e6862d643c6cb9bb9fb857b70b3cc00f --- /dev/null +++ b/models/official/nlp/transformer/metrics.py @@ -0,0 +1,183 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for calculating loss, accuracy, and other model metrics. + +Metrics: + - Padded loss, accuracy, and negative log perplexity. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py + - BLEU approximation. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py + - ROUGE score. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import tensorflow as tf + + +def _pad_tensors_to_same_length(x, y): + """Pad x and y so that the results have the same length (second dimension).""" + with tf.name_scope("pad_to_same_length"): + x_length = tf.shape(x)[1] + y_length = tf.shape(y)[1] + + max_length = tf.maximum(x_length, y_length) + + x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]]) + y = tf.pad(y, [[0, 0], [0, max_length - y_length]]) + return x, y + + +def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): + """Calculate cross entropy loss while ignoring padding. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch_size, length_labels] + smoothing: Label smoothing constant, used to determine the on and off values + vocab_size: int size of the vocabulary + + Returns: + Returns the cross entropy loss and weight tensors: float32 tensors with + shape [batch_size, max(length_logits, length_labels)] + """ + with tf.name_scope("loss"): + logits, labels = _pad_tensors_to_same_length(logits, labels) + + # Calculate smoothing cross entropy + with tf.name_scope("smoothing_cross_entropy"): + confidence = 1.0 - smoothing + low_confidence = (1.0 - confidence) / tf.cast(vocab_size - 1, tf.float32) + soft_targets = tf.one_hot( + tf.cast(labels, tf.int32), + depth=vocab_size, + on_value=confidence, + off_value=low_confidence) + xentropy = tf.nn.softmax_cross_entropy_with_logits( + logits=logits, labels=soft_targets) + + # Calculate the best (lowest) possible value of cross entropy, and + # subtract from the cross entropy loss. + normalizing_constant = -( + confidence * tf.math.log(confidence) + + tf.cast(vocab_size - 1, tf.float32) * low_confidence * + tf.math.log(low_confidence + 1e-20)) + xentropy -= normalizing_constant + + weights = tf.cast(tf.not_equal(labels, 0), tf.float32) + return xentropy * weights, weights + + +def padded_accuracy(logits, labels): + """Percentage of times that predictions matches labels on non-0s.""" + with tf.name_scope("padded_accuracy"): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.cast(tf.not_equal(labels, 0), tf.float32) + outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32) + padded_labels = tf.cast(labels, tf.int32) + return tf.cast(tf.equal(outputs, padded_labels), tf.float32), weights + + +def padded_accuracy_topk(logits, labels, k): + """Percentage of times that top-k predictions matches labels on non-0s.""" + with tf.name_scope("padded_accuracy_topk"): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.cast(tf.not_equal(labels, 0), tf.float32) + effective_k = tf.minimum(k, tf.shape(logits)[-1]) + _, outputs = tf.nn.top_k(logits, k=effective_k) + outputs = tf.cast(outputs, tf.int32) + padded_labels = tf.cast(labels, tf.int32) + padded_labels = tf.expand_dims(padded_labels, axis=-1) + padded_labels += tf.zeros_like(outputs) # Pad to same shape. + same = tf.cast(tf.equal(outputs, padded_labels), tf.float32) + same_topk = tf.reduce_sum(same, axis=-1) + return same_topk, weights + + +def padded_accuracy_top5(logits, labels): + return padded_accuracy_topk(logits, labels, 5) + + +def padded_sequence_accuracy(logits, labels): + """Percentage of times that predictions matches labels everywhere (non-0).""" + with tf.name_scope("padded_sequence_accuracy"): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.cast(tf.not_equal(labels, 0), tf.float32) + outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32) + padded_labels = tf.cast(labels, tf.int32) + not_correct = tf.cast(tf.not_equal(outputs, padded_labels), + tf.float32) * weights + axis = list(range(1, len(outputs.get_shape()))) + correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) + return correct_seq, tf.constant(1.0) + + +def padded_neg_log_perplexity(logits, labels, vocab_size): + """Average log-perplexity excluding padding 0s. No smoothing.""" + num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size) + return -num, den + + +class MetricLayer(tf.keras.layers.Layer): + """Custom a layer of metrics for Transformer model.""" + + def __init__(self, vocab_size): + super(MetricLayer, self).__init__() + self.vocab_size = vocab_size + self.metric_mean_fns = [] + + def build(self, input_shape): + """"Builds metric layer.""" + neg_log_perplexity = functools.partial( + padded_neg_log_perplexity, vocab_size=self.vocab_size) + self.metric_mean_fns = [ + (tf.keras.metrics.Mean("accuracy"), padded_accuracy), + (tf.keras.metrics.Mean("accuracy_top5"), padded_accuracy_top5), + (tf.keras.metrics.Mean("accuracy_per_sequence"), + padded_sequence_accuracy), + (tf.keras.metrics.Mean("neg_log_perplexity"), neg_log_perplexity), + ] + super(MetricLayer, self).build(input_shape) + + def get_config(self): + return {"vocab_size": self.vocab_size} + + def call(self, inputs): + logits, targets = inputs[0], inputs[1] + for mean, fn in self.metric_mean_fns: + m = mean(*fn(logits, targets)) + self.add_metric(m) + return logits + + +def transformer_loss(logits, labels, smoothing, vocab_size): + """Calculates total loss containing cross entropy with padding ignored. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch_size, length_labels] + smoothing: Label smoothing constant, used to determine the on and off values + vocab_size: int size of the vocabulary + + Returns: + A scalar float tensor for loss. + """ + xentropy, weights = padded_cross_entropy_loss(logits, labels, smoothing, + vocab_size) + return tf.reduce_sum(xentropy) / tf.reduce_sum(weights) diff --git a/models/official/nlp/transformer/misc.py b/models/official/nlp/transformer/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b351ae652b7f644c8d598aef67b188ced01d68 --- /dev/null +++ b/models/official/nlp/transformer/misc.py @@ -0,0 +1,260 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Misc for Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=g-bad-import-order +from absl import flags +import tensorflow as tf + +from official.nlp.transformer import model_params +from official.utils.flags import core as flags_core +from official.utils.misc import keras_utils + +FLAGS = flags.FLAGS + +PARAMS_MAP = { + 'tiny': model_params.TINY_PARAMS, + 'base': model_params.BASE_PARAMS, + 'big': model_params.BIG_PARAMS, +} + + +def get_model_params(param_set, num_gpus): + """Gets predefined model params.""" + if num_gpus > 1: + if param_set == 'big': + return model_params.BIG_MULTI_GPU_PARAMS.copy() + elif param_set == 'base': + return model_params.BASE_MULTI_GPU_PARAMS.copy() + else: + raise ValueError('Not valid params: param_set={} num_gpus={}'.format( + param_set, num_gpus)) + + return PARAMS_MAP[param_set].copy() + + +def define_transformer_flags(): + """Add flags and flag validators for running transformer_main.""" + # Add common flags (data_dir, model_dir, etc.). + flags_core.define_base(num_gpu=True, distribution_strategy=True) + flags_core.define_performance( + num_parallel_calls=True, + inter_op=False, + intra_op=False, + synthetic_data=True, + max_train_steps=False, + dtype=True, + loss_scale=True, + all_reduce_alg=True, + num_packs=True, + tf_gpu_thread_mode=True, + datasets_num_private_threads=True, + enable_xla=True, + fp16_implementation=True + ) + + flags_core.define_benchmark() + flags_core.define_device(tpu=True) + + flags.DEFINE_integer( + name='train_steps', short_name='ts', default=300000, + help=flags_core.help_wrap('The number of steps used to train.')) + flags.DEFINE_integer( + name='steps_between_evals', short_name='sbe', default=5000, + help=flags_core.help_wrap( + 'The Number of training steps to run between evaluations. This is ' + 'used if --train_steps is defined.')) + flags.DEFINE_boolean( + name='enable_time_history', default=True, + help='Whether to enable TimeHistory callback.') + flags.DEFINE_boolean( + name='enable_tensorboard', default=False, + help='Whether to enable Tensorboard callback.') + flags.DEFINE_boolean( + name='enable_metrics_in_training', default=False, + help='Whether to enable metrics during training.') + flags.DEFINE_boolean( + name='enable_mlir_bridge', + default=False, + help='Whether to enable the TF to XLA bridge.') + # Set flags from the flags_core module as 'key flags' so they're listed when + # the '-h' flag is used. Without this line, the flags defined above are + # only shown in the full `--helpful` help text. + flags.adopt_module_key_flags(flags_core) + + # Add transformer-specific flags + flags.DEFINE_enum( + name='param_set', short_name='mp', default='big', + enum_values=PARAMS_MAP.keys(), + help=flags_core.help_wrap( + 'Parameter set to use when creating and training the model. The ' + 'parameters define the input shape (batch size and max length), ' + 'model configuration (size of embedding, # of hidden layers, etc.), ' + 'and various other settings. The big parameter set increases the ' + 'default batch size, embedding/hidden size, and filter size. For a ' + 'complete list of parameters, please see model/model_params.py.')) + + flags.DEFINE_bool( + name='static_batch', short_name='sb', default=False, + help=flags_core.help_wrap( + 'Whether the batches in the dataset should have static shapes. In ' + 'general, this setting should be False. Dynamic shapes allow the ' + 'inputs to be grouped so that the number of padding tokens is ' + 'minimized, and helps model training. In cases where the input shape ' + 'must be static (e.g. running on TPU), this setting will be ignored ' + 'and static batching will always be used.')) + flags.DEFINE_integer( + name='max_length', short_name='ml', default=256, + help=flags_core.help_wrap( + 'Max sentence length for Transformer. Default is 256. Note: Usually ' + 'it is more effective to use a smaller max length if static_batch is ' + 'enabled, e.g. 64.')) + + # Flags for training with steps (may be used for debugging) + flags.DEFINE_integer( + name='validation_steps', short_name='vs', default=64, + help=flags_core.help_wrap('The number of steps used in validation.')) + + # BLEU score computation + flags.DEFINE_string( + name='bleu_source', short_name='bls', default=None, + help=flags_core.help_wrap( + 'Path to source file containing text translate when calculating the ' + 'official BLEU score. Both --bleu_source and --bleu_ref must be set. ' + )) + flags.DEFINE_string( + name='bleu_ref', short_name='blr', default=None, + help=flags_core.help_wrap( + 'Path to source file containing text translate when calculating the ' + 'official BLEU score. Both --bleu_source and --bleu_ref must be set. ' + )) + flags.DEFINE_string( + name='vocab_file', short_name='vf', default=None, + help=flags_core.help_wrap( + 'Path to subtoken vocabulary file. If data_download.py was used to ' + 'download and encode the training data, look in the data_dir to find ' + 'the vocab file.')) + flags.DEFINE_string( + name='mode', default='train', + help=flags_core.help_wrap('mode: train, eval, or predict')) + flags.DEFINE_bool( + name='use_ctl', + default=False, + help=flags_core.help_wrap( + 'Whether the model runs with custom training loop.')) + flags.DEFINE_integer( + name='decode_batch_size', + default=32, + help=flags_core.help_wrap( + 'Global batch size used for Transformer autoregressive decoding on ' + 'TPU.')) + flags.DEFINE_integer( + name='decode_max_length', + default=97, + help=flags_core.help_wrap( + 'Max sequence length of the decode/eval data. This is used by ' + 'Transformer autoregressive decoding on TPU to have minimum ' + 'paddings.')) + flags.DEFINE_bool( + name='padded_decode', + default=False, + help=flags_core.help_wrap( + 'Whether the autoregressive decoding runs with input data padded to ' + 'the decode_max_length. For TPU/XLA-GPU runs, this flag has to be ' + 'set due the static shape requirement. Although CPU/GPU could also ' + 'use padded_decode, it has not been tested. In addition, this method ' + 'will introduce unnecessary overheads which grow quadratically with ' + 'the max sequence length.')) + flags.DEFINE_bool( + name='enable_checkpointing', + default=True, + help=flags_core.help_wrap( + 'Whether to do checkpointing during training. When running under ' + 'benchmark harness, we will avoid checkpointing.')) + + flags_core.set_defaults(data_dir='/tmp/translate_ende', + model_dir='/tmp/transformer_model', + batch_size=None) + + # pylint: disable=unused-variable + @flags.multi_flags_validator( + ['bleu_source', 'bleu_ref'], + message='Both or neither --bleu_source and --bleu_ref must be defined.') + def _check_bleu_files(flags_dict): + return (flags_dict['bleu_source'] is None) == ( + flags_dict['bleu_ref'] is None) + + @flags.multi_flags_validator( + ['bleu_source', 'bleu_ref', 'vocab_file'], + message='--vocab_file must be defined if --bleu_source and --bleu_ref ' + 'are defined.') + def _check_bleu_vocab_file(flags_dict): + if flags_dict['bleu_source'] and flags_dict['bleu_ref']: + return flags_dict['vocab_file'] is not None + return True + # pylint: enable=unused-variable + + +def get_callbacks(): + """Returns common callbacks.""" + callbacks = [] + if FLAGS.enable_time_history: + time_callback = keras_utils.TimeHistory( + FLAGS.batch_size, + FLAGS.log_steps, + logdir=FLAGS.model_dir if FLAGS.enable_tensorboard else None) + callbacks.append(time_callback) + + if FLAGS.enable_tensorboard: + tensorboard_callback = tf.keras.callbacks.TensorBoard( + log_dir=FLAGS.model_dir) + callbacks.append(tensorboard_callback) + + return callbacks + + +def update_stats(history, stats, callbacks): + """Normalizes and updates dictionary of stats. + + Args: + history: Results of the training step. + stats: Dict with pre-existing training stats. + callbacks: a list of callbacks which might include a time history callback + used during keras.fit. + """ + + if history and history.history: + train_hist = history.history + # Gets final loss from training. + stats['loss'] = float(train_hist['loss'][-1]) + + if not callbacks: + return + + # Look for the time history callback which was used during keras.fit + for callback in callbacks: + if isinstance(callback, keras_utils.TimeHistory): + timestamp_log = callback.timestamp_log + stats['step_timestamp_log'] = timestamp_log + stats['train_finish_time'] = callback.train_finish_time + if len(timestamp_log) > 1: + stats['avg_exp_per_second'] = ( + callback.batch_size * callback.log_steps * + (len(callback.timestamp_log)-1) / + (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) diff --git a/models/official/nlp/transformer/model_params.py b/models/official/nlp/transformer/model_params.py new file mode 100644 index 0000000000000000000000000000000000000000..e978abeafca5a627c698f291432f24119ae3fa68 --- /dev/null +++ b/models/official/nlp/transformer/model_params.py @@ -0,0 +1,96 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines Transformer model parameters.""" + +from collections import defaultdict + + +BASE_PARAMS = defaultdict( + lambda: None, # Set default value to None. + + # Input params + default_batch_size=2048, # Maximum number of tokens per batch of examples. + default_batch_size_tpu=32768, + max_length=256, # Maximum number of tokens per example. + + # Model params + initializer_gain=1.0, # Used in trainable variable initialization. + vocab_size=33708, # Number of tokens defined in the vocabulary file. + hidden_size=512, # Model dimension in the hidden layers. + num_hidden_layers=6, # Number of layers in the encoder and decoder stacks. + num_heads=8, # Number of heads to use in multi-headed attention. + filter_size=2048, # Inner layer dimension in the feedforward network. + + # Dropout values (only used when training) + layer_postprocess_dropout=0.1, + attention_dropout=0.1, + relu_dropout=0.1, + + # Training params + label_smoothing=0.1, + learning_rate=2.0, + learning_rate_decay_rate=1.0, + learning_rate_warmup_steps=16000, + + # Optimizer params + optimizer_adam_beta1=0.9, + optimizer_adam_beta2=0.997, + optimizer_adam_epsilon=1e-09, + + # Default prediction params + extra_decode_length=50, + beam_size=4, + alpha=0.6, # used to calculate length normalization in beam search + + # TPU specific parameters + use_tpu=False, + static_batch=False, + allow_ffn_pad=True, +) + +BIG_PARAMS = BASE_PARAMS.copy() +BIG_PARAMS.update( + default_batch_size=4096, + + # default batch size is smaller than for BASE_PARAMS due to memory limits. + default_batch_size_tpu=16384, + + hidden_size=1024, + filter_size=4096, + num_heads=16, +) + +# Parameters for running the model in multi gpu. These should not change the +# params that modify the model shape (such as the hidden_size or num_heads). +BASE_MULTI_GPU_PARAMS = BASE_PARAMS.copy() +BASE_MULTI_GPU_PARAMS.update( + learning_rate_warmup_steps=8000 +) + +BIG_MULTI_GPU_PARAMS = BIG_PARAMS.copy() +BIG_MULTI_GPU_PARAMS.update( + layer_postprocess_dropout=0.3, + learning_rate_warmup_steps=8000 +) + +# Parameters for testing the model +TINY_PARAMS = BASE_PARAMS.copy() +TINY_PARAMS.update( + default_batch_size=1024, + default_batch_size_tpu=1024, + hidden_size=32, + num_heads=4, + filter_size=256, +) diff --git a/models/official/nlp/transformer/model_utils.py b/models/official/nlp/transformer/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3f860f049cd0bcf0467913c91ee6312356f3ad23 --- /dev/null +++ b/models/official/nlp/transformer/model_utils.py @@ -0,0 +1,123 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transformer model helper methods.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import numpy as np +import tensorflow as tf + +# Very low numbers to represent -infinity. We do not actually use -Inf, since we +# want to be able to multiply these values by zero to get zero. (-Inf * 0 = NaN) +_NEG_INF_FP32 = -1e9 +_NEG_INF_FP16 = np.finfo(np.float16).min + + +def get_position_encoding( + length, hidden_size, min_timescale=1.0, max_timescale=1.0e4): + """Return positional encoding. + + Calculates the position encoding as a mix of sine and cosine functions with + geometrically increasing wavelengths. + Defined and formulized in Attention is All You Need, section 3.5. + + Args: + length: Sequence length. + hidden_size: Size of the + min_timescale: Minimum scale that will be applied at each position + max_timescale: Maximum scale that will be applied at each position + + Returns: + Tensor with shape [length, hidden_size] + """ + # We compute the positional encoding in float32 even if the model uses + # float16, as many of the ops used, like log and exp, are numerically unstable + # in float16. + position = tf.cast(tf.range(length), tf.float32) + num_timescales = hidden_size // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (tf.cast(num_timescales, tf.float32) - 1)) + inv_timescales = min_timescale * tf.exp( + tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment) + scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + return signal + + +def get_decoder_self_attention_bias(length, dtype=tf.float32): + """Calculate bias for decoder that maintains model's autoregressive property. + + Creates a tensor that masks out locations that correspond to illegal + connections, so prediction at position i cannot draw information from future + positions. + + Args: + length: int length of sequences in batch. + dtype: The dtype of the return value. + + Returns: + float tensor of shape [1, 1, length, length] + """ + neg_inf = _NEG_INF_FP16 if dtype == tf.float16 else _NEG_INF_FP32 + with tf.name_scope("decoder_self_attention_bias"): + valid_locs = tf.linalg.band_part(tf.ones([length, length], dtype=dtype), + -1, 0) + valid_locs = tf.reshape(valid_locs, [1, 1, length, length]) + decoder_bias = neg_inf * (1.0 - valid_locs) + return decoder_bias + + +def get_padding(x, padding_value=0, dtype=tf.float32): + """Return float tensor representing the padding values in x. + + Args: + x: int tensor with any shape + padding_value: int which represents padded values in input + dtype: The dtype of the return value. + + Returns: + float tensor with same shape as x containing values 0 or 1. + 0 -> non-padding, 1 -> padding + """ + with tf.name_scope("padding"): + return tf.cast(tf.equal(x, padding_value), dtype) + + +def get_padding_bias(x, padding_value=0, dtype=tf.float32): + """Calculate bias tensor from padding values in tensor. + + Bias tensor that is added to the pre-softmax multi-headed attention logits, + which has shape [batch_size, num_heads, length, length]. The tensor is zero at + non-padding locations, and -1e9 (negative infinity) at padding locations. + + Args: + x: int tensor with shape [batch_size, length] + padding_value: int which represents padded values in input + dtype: The dtype of the return value + + Returns: + Attention bias tensor of shape [batch_size, 1, 1, length]. + """ + with tf.name_scope("attention_bias"): + padding = get_padding(x, padding_value, dtype) + attention_bias = padding * _NEG_INF_FP32 + attention_bias = tf.expand_dims( + tf.expand_dims(attention_bias, axis=1), axis=1) + return attention_bias diff --git a/models/official/nlp/transformer/model_utils_test.py b/models/official/nlp/transformer/model_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c4a15c9aba8dbff043088a392fe415f22206ca --- /dev/null +++ b/models/official/nlp/transformer/model_utils_test.py @@ -0,0 +1,62 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test Transformer model helper methods.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.transformer import model_utils + +NEG_INF = -1e9 + + +class ModelUtilsTest(tf.test.TestCase): + + def test_get_padding(self): + x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]]) + padding = model_utils.get_padding(x, padding_value=0) + + self.assertAllEqual([[0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [1, 0, 0, 1, 0]], + padding) + + def test_get_padding_bias(self): + x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]]) + bias = model_utils.get_padding_bias(x) + bias_shape = tf.shape(bias) + flattened_bias = tf.reshape(bias, [3, 5]) + + self.assertAllEqual([[0, NEG_INF, NEG_INF, NEG_INF, 0], + [0, 0, NEG_INF, NEG_INF, NEG_INF], + [NEG_INF, 0, 0, NEG_INF, 0]], + flattened_bias) + self.assertAllEqual([3, 1, 1, 5], bias_shape) + + def test_get_decoder_self_attention_bias(self): + length = 5 + bias = model_utils.get_decoder_self_attention_bias(length) + + self.assertAllEqual([[[[0, NEG_INF, NEG_INF, NEG_INF, NEG_INF], + [0, 0, NEG_INF, NEG_INF, NEG_INF], + [0, 0, 0, NEG_INF, NEG_INF], + [0, 0, 0, 0, NEG_INF], + [0, 0, 0, 0, 0]]]], + bias) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/transformer/optimizer.py b/models/official/nlp/transformer/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..176b5eb8c6ffcea8a9bccbad5fbdef1d2106e106 --- /dev/null +++ b/models/official/nlp/transformer/optimizer.py @@ -0,0 +1,137 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Optimizer from addons and learning rate scheduler.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +K = tf.keras.backend + + +class LearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): + """Learning rate schedule.""" + + def __init__(self, initial_learning_rate, hidden_size, warmup_steps): + """Initialize configuration of the learning rate schedule. + + Args: + initial_learning_rate: A float, the initial learning rate. + hidden_size: An integer, the model dimension in the hidden layers. + warmup_steps: An integer, the number of steps required for linear warmup. + """ + super(LearningRateSchedule, self).__init__() + self.initial_learning_rate = initial_learning_rate + self.hidden_size = hidden_size + self.warmup_steps = tf.cast(warmup_steps, tf.float32) + + def __call__(self, global_step): + """Calculate learning rate with linear warmup and rsqrt decay. + + Args: + global_step: An integer, the current global step used for learning rate + calculation. + + Returns: + A float, the learning rate needs to be used for current global step. + """ + with tf.name_scope('learning_rate_schedule'): + global_step = tf.cast(global_step, tf.float32) + learning_rate = self.initial_learning_rate + learning_rate *= (self.hidden_size**-0.5) + # Apply linear warmup + learning_rate *= tf.minimum(1.0, global_step / self.warmup_steps) + # Apply rsqrt decay + learning_rate /= tf.sqrt(tf.maximum(global_step, self.warmup_steps)) + return learning_rate + + def get_config(self): + """Get the configuration of the learning rate schedule.""" + return { + 'initial_learning_rate': self.initial_learning_rate, + 'hidden_size': self.hidden_size, + 'warmup_steps': self.warmup_steps, + } + + +class LearningRateFn(object): + """Creates learning rate function.""" + + def __init__(self, learning_rate, hidden_size, warmup_steps): + self.learning_rate = learning_rate + self.hidden_size = hidden_size + self.warmup_steps = float(warmup_steps) + + def __call__(self, global_step): + """Calculate learning rate with linear warmup and rsqrt decay.""" + step = float(global_step) + learning_rate = self.learning_rate + learning_rate *= (self.hidden_size ** -0.5) + # Apply linear warmup + learning_rate *= np.minimum(1.0, step / self.warmup_steps) + # Apply rsqrt decay + learning_rate /= np.sqrt(np.maximum(step, self.warmup_steps)) + return learning_rate + + +class LearningRateScheduler(tf.keras.callbacks.Callback): + """Keras callback to schedule learning rate. + + TODO(tianlin): Refactor this scheduler and LearningRateBatchScheduler in + official/resnet/keras/keras_common.py. + """ + + def __init__(self, schedule, init_steps=None, verbose=False): + super(LearningRateScheduler, self).__init__() + self.schedule = schedule + self.verbose = verbose + if init_steps is None: + init_steps = 0.0 + self.steps = float(init_steps) # Total steps during training. + + def on_epoch_begin(self, epoch, logs=None): + if not hasattr(self.model.optimizer, 'lr'): + raise ValueError('Optimizer must have a "lr" attribute.') + if not hasattr(self.model.optimizer, 'iterations'): + raise ValueError('Optimizer must have a "iterations" attribute.') + + def on_train_batch_begin(self, batch, logs=None): + """Adjusts learning rate for each train batch.""" + if self.verbose > 0: + iterations = K.get_value(self.model.optimizer.iterations) + print('Original iteration %d' % iterations) + + self.steps += 1.0 + try: # new API + lr = float(K.get_value(self.model.optimizer.lr)) + lr = self.schedule(self.steps, lr) + except TypeError: # Support for old API for backward compatibility + lr = self.schedule(self.steps) + if not isinstance(lr, (float, np.float32, np.float64)): + raise ValueError('The output of the "schedule" function ' + 'should be float.') + K.set_value(self.model.optimizer.lr, lr) + K.set_value(self.model.optimizer.iterations, self.steps) + + if self.verbose > 0: + print('Batch %05d Step %05d: LearningRateScheduler setting learning ' + 'rate to %s.' % (batch + 1, self.steps, lr)) + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + logs['lr'] = K.get_value(self.model.optimizer.lr) + logs['steps'] = self.steps diff --git a/models/official/nlp/transformer/transformer.py b/models/official/nlp/transformer/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..773e79449cdc493a96a5078ce85e801f8f9da250 --- /dev/null +++ b/models/official/nlp/transformer/transformer.py @@ -0,0 +1,565 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines the Transformer model in TF 2.0. + +Model paper: https://arxiv.org/pdf/1706.03762.pdf +Transformer model code source: https://github.com/tensorflow/tensor2tensor +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from official.nlp.modeling.layers import position_embedding +from official.nlp.transformer import attention_layer +from official.nlp.transformer import beam_search +from official.nlp.transformer import embedding_layer +from official.nlp.transformer import ffn_layer +from official.nlp.transformer import metrics +from official.nlp.transformer import model_utils +from official.nlp.transformer.utils.tokenizer import EOS_ID + + +# Disable the not-callable lint error, since it claims many objects are not +# callable when they actually are. +# pylint: disable=not-callable + + +def create_model(params, is_train): + """Creates transformer model.""" + with tf.name_scope("model"): + if is_train: + inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs") + targets = tf.keras.layers.Input((None,), dtype="int64", name="targets") + internal_model = Transformer(params, name="transformer_v2") + logits = internal_model([inputs, targets], training=is_train) + vocab_size = params["vocab_size"] + label_smoothing = params["label_smoothing"] + if params["enable_metrics_in_training"]: + logits = metrics.MetricLayer(vocab_size)([logits, targets]) + logits = tf.keras.layers.Lambda(lambda x: x, name="logits", + dtype=tf.float32)(logits) + model = tf.keras.Model([inputs, targets], logits) + # TODO(reedwm): Can we do this loss in float16 instead of float32? + loss = metrics.transformer_loss( + logits, targets, label_smoothing, vocab_size) + model.add_loss(loss) + return model + + else: + inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs") + internal_model = Transformer(params, name="transformer_v2") + ret = internal_model([inputs], training=is_train) + outputs, scores = ret["outputs"], ret["scores"] + return tf.keras.Model(inputs, [outputs, scores]) + + +class Transformer(tf.keras.Model): + """Transformer model with Keras. + + Implemented as described in: https://arxiv.org/pdf/1706.03762.pdf + + The Transformer model consists of an encoder and decoder. The input is an int + sequence (or a batch of sequences). The encoder produces a continuous + representation, and the decoder uses the encoder output to generate + probabilities for the output sequence. + """ + + def __init__(self, params, name=None): + """Initialize layers to build Transformer model. + + Args: + params: hyperparameter object defining layer sizes, dropout values, etc. + name: name of the model. + """ + super(Transformer, self).__init__(name=name) + self.params = params + self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights( + params["vocab_size"], params["hidden_size"]) + self.encoder_stack = EncoderStack(params) + self.decoder_stack = DecoderStack(params) + self.position_embedding = position_embedding.RelativePositionEmbedding( + hidden_size=self.params["hidden_size"]) + + def get_config(self): + return { + "params": self.params, + } + + def call(self, inputs, training): + """Calculate target logits or inferred target sequences. + + Args: + inputs: input tensor list of size 1 or 2. + First item, inputs: int tensor with shape [batch_size, input_length]. + Second item (optional), targets: None or int tensor with shape + [batch_size, target_length]. + training: boolean, whether in training mode or not. + + Returns: + If targets is defined, then return logits for each word in the target + sequence. float tensor with shape [batch_size, target_length, vocab_size] + If target is none, then generate output sequence one token at a time. + returns a dictionary { + outputs: [batch_size, decoded length] + scores: [batch_size, float]} + Even when float16 is used, the output tensor(s) are always float32. + + Raises: + NotImplementedError: If try to use padded decode method on CPU/GPUs. + """ + if len(inputs) == 2: + inputs, targets = inputs[0], inputs[1] + else: + # Decoding path. + inputs, targets = inputs[0], None + if self.params["padded_decode"]: + if not self.params["num_replicas"]: + raise NotImplementedError( + "Padded decoding on CPU/GPUs is not supported.") + decode_batch_size = int(self.params["decode_batch_size"] / + self.params["num_replicas"]) + inputs.set_shape([ + decode_batch_size, self.params["decode_max_length"] + ]) + + # Variance scaling is used here because it seems to work in many problems. + # Other reasonable initializers may also work just as well. + with tf.name_scope("Transformer"): + # Calculate attention bias for encoder self-attention and decoder + # multi-headed attention layers. + attention_bias = model_utils.get_padding_bias(inputs) + + # Run the inputs through the encoder layer to map the symbol + # representations to continuous representations. + encoder_outputs = self.encode(inputs, attention_bias, training) + # Generate output sequence if targets is None, or return logits if target + # sequence is known. + if targets is None: + return self.predict(encoder_outputs, attention_bias, training) + else: + logits = self.decode(targets, encoder_outputs, attention_bias, training) + return logits + + def encode(self, inputs, attention_bias, training): + """Generate continuous representation for inputs. + + Args: + inputs: int tensor with shape [batch_size, input_length]. + attention_bias: float tensor with shape [batch_size, 1, 1, input_length]. + training: boolean, whether in training mode or not. + + Returns: + float tensor with shape [batch_size, input_length, hidden_size] + """ + with tf.name_scope("encode"): + # Prepare inputs to the layer stack by adding positional encodings and + # applying dropout. + embedded_inputs = self.embedding_softmax_layer(inputs) + embedded_inputs = tf.cast(embedded_inputs, self.params["dtype"]) + inputs_padding = model_utils.get_padding(inputs) + attention_bias = tf.cast(attention_bias, self.params["dtype"]) + + with tf.name_scope("add_pos_encoding"): + pos_encoding = self.position_embedding(inputs=embedded_inputs) + pos_encoding = tf.cast(pos_encoding, self.params["dtype"]) + encoder_inputs = embedded_inputs + pos_encoding + + if training: + encoder_inputs = tf.nn.dropout( + encoder_inputs, rate=self.params["layer_postprocess_dropout"]) + + return self.encoder_stack( + encoder_inputs, attention_bias, inputs_padding, training=training) + + def decode(self, targets, encoder_outputs, attention_bias, training): + """Generate logits for each value in the target sequence. + + Args: + targets: target values for the output sequence. int tensor with shape + [batch_size, target_length] + encoder_outputs: continuous representation of input sequence. float tensor + with shape [batch_size, input_length, hidden_size] + attention_bias: float tensor with shape [batch_size, 1, 1, input_length] + training: boolean, whether in training mode or not. + + Returns: + float32 tensor with shape [batch_size, target_length, vocab_size] + """ + with tf.name_scope("decode"): + # Prepare inputs to decoder layers by shifting targets, adding positional + # encoding and applying dropout. + decoder_inputs = self.embedding_softmax_layer(targets) + decoder_inputs = tf.cast(decoder_inputs, self.params["dtype"]) + attention_bias = tf.cast(attention_bias, self.params["dtype"]) + with tf.name_scope("shift_targets"): + # Shift targets to the right, and remove the last element + decoder_inputs = tf.pad(decoder_inputs, + [[0, 0], [1, 0], [0, 0]])[:, :-1, :] + with tf.name_scope("add_pos_encoding"): + length = tf.shape(decoder_inputs)[1] + pos_encoding = self.position_embedding(decoder_inputs) + pos_encoding = tf.cast(pos_encoding, self.params["dtype"]) + decoder_inputs += pos_encoding + if training: + decoder_inputs = tf.nn.dropout( + decoder_inputs, rate=self.params["layer_postprocess_dropout"]) + + # Run values + decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias( + length, dtype=self.params["dtype"]) + outputs = self.decoder_stack( + decoder_inputs, + encoder_outputs, + decoder_self_attention_bias, + attention_bias, + training=training) + logits = self.embedding_softmax_layer(outputs, mode="linear") + logits = tf.cast(logits, tf.float32) + return logits + + def _get_symbols_to_logits_fn(self, max_decode_length, training): + """Returns a decoding function that calculates logits of the next tokens.""" + timing_signal = self.position_embedding( + inputs=None, length=max_decode_length + 1) + timing_signal = tf.cast(timing_signal, self.params["dtype"]) + decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias( + max_decode_length, dtype=self.params["dtype"]) + + # TODO(b/139770046): Refactor code with better naming of i. + def symbols_to_logits_fn(ids, i, cache): + """Generate logits for next potential IDs. + + Args: + ids: Current decoded sequences. int tensor with shape [batch_size * + beam_size, i + 1]. + i: Loop index. + cache: dictionary of values storing the encoder output, encoder-decoder + attention bias, and previous decoder attention values. + + Returns: + Tuple of + (logits with shape [batch_size * beam_size, vocab_size], + updated cache values) + """ + # Set decoder input to the last generated IDs + decoder_input = ids[:, -1:] + + # Preprocess decoder input by getting embeddings and adding timing signal. + decoder_input = self.embedding_softmax_layer(decoder_input) + + if self.params["padded_decode"]: + timing_signal_shape = timing_signal.shape.as_list() + decoder_input += tf.slice(timing_signal, [i, 0], + [1, timing_signal_shape[1]]) + + bias_shape = decoder_self_attention_bias.shape.as_list() + self_attention_bias = tf.slice( + decoder_self_attention_bias, [0, 0, i, 0], + [bias_shape[0], bias_shape[1], 1, bias_shape[3]]) + else: + decoder_input += timing_signal[i:i + 1] + + self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1] + + decoder_outputs = self.decoder_stack( + decoder_input, + cache.get("encoder_outputs"), + self_attention_bias, + cache.get("encoder_decoder_attention_bias"), + training=training, + cache=cache, + decode_loop_step=i if self.params["padded_decode"] else None) + logits = self.embedding_softmax_layer(decoder_outputs, mode="linear") + logits = tf.squeeze(logits, axis=[1]) + return logits, cache + + return symbols_to_logits_fn + + def predict(self, encoder_outputs, encoder_decoder_attention_bias, training): + """Return predicted sequence.""" + encoder_outputs = tf.cast(encoder_outputs, self.params["dtype"]) + if self.params["padded_decode"]: + batch_size = encoder_outputs.shape.as_list()[0] + input_length = encoder_outputs.shape.as_list()[1] + else: + batch_size = tf.shape(encoder_outputs)[0] + input_length = tf.shape(encoder_outputs)[1] + max_decode_length = input_length + self.params["extra_decode_length"] + encoder_decoder_attention_bias = tf.cast(encoder_decoder_attention_bias, + self.params["dtype"]) + + symbols_to_logits_fn = self._get_symbols_to_logits_fn( + max_decode_length, training) + + # Create initial set of IDs that will be passed into symbols_to_logits_fn. + initial_ids = tf.zeros([batch_size], dtype=tf.int32) + + # Create cache storing decoder attention values for each layer. + # pylint: disable=g-complex-comprehension + init_decode_length = ( + max_decode_length if self.params["padded_decode"] else 0) + num_heads = self.params["num_heads"] + dim_per_head = self.params["hidden_size"] // num_heads + cache = { + "layer_%d" % layer: { + "k": + tf.zeros([ + batch_size, init_decode_length, num_heads, dim_per_head + ], + dtype=self.params["dtype"]), + "v": + tf.zeros([ + batch_size, init_decode_length, num_heads, dim_per_head + ], + dtype=self.params["dtype"]) + } for layer in range(self.params["num_hidden_layers"]) + } + # pylint: enable=g-complex-comprehension + + # Add encoder output and attention bias to the cache. + cache["encoder_outputs"] = encoder_outputs + cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias + + # Use beam search to find the top beam_size sequences and scores. + decoded_ids, scores = beam_search.sequence_beam_search( + symbols_to_logits_fn=symbols_to_logits_fn, + initial_ids=initial_ids, + initial_cache=cache, + vocab_size=self.params["vocab_size"], + beam_size=self.params["beam_size"], + alpha=self.params["alpha"], + max_decode_length=max_decode_length, + eos_id=EOS_ID, + padded_decode=self.params["padded_decode"], + dtype=self.params["dtype"]) + + # Get the top sequence for each batch element + top_decoded_ids = decoded_ids[:, 0, 1:] + top_scores = scores[:, 0] + + return {"outputs": top_decoded_ids, "scores": top_scores} + + +class PrePostProcessingWrapper(tf.keras.layers.Layer): + """Wrapper class that applies layer pre-processing and post-processing.""" + + def __init__(self, layer, params): + super(PrePostProcessingWrapper, self).__init__() + self.layer = layer + self.params = params + self.postprocess_dropout = params["layer_postprocess_dropout"] + + def build(self, input_shape): + # Create normalization layer + self.layer_norm = tf.keras.layers.LayerNormalization( + epsilon=1e-6, dtype="float32") + super(PrePostProcessingWrapper, self).build(input_shape) + + def get_config(self): + return { + "params": self.params, + } + + def call(self, x, *args, **kwargs): + """Calls wrapped layer with same parameters.""" + # Preprocessing: apply layer normalization + training = kwargs["training"] + + y = self.layer_norm(x) + + # Get layer output + y = self.layer(y, *args, **kwargs) + + # Postprocessing: apply dropout and residual connection + if training: + y = tf.nn.dropout(y, rate=self.postprocess_dropout) + return x + y + + +class EncoderStack(tf.keras.layers.Layer): + """Transformer encoder stack. + + The encoder stack is made up of N identical layers. Each layer is composed + of the sublayers: + 1. Self-attention layer + 2. Feedforward network (which is 2 fully-connected layers) + """ + + def __init__(self, params): + super(EncoderStack, self).__init__() + self.params = params + self.layers = [] + + def build(self, input_shape): + """Builds the encoder stack.""" + params = self.params + for _ in range(params["num_hidden_layers"]): + # Create sublayers for each layer. + self_attention_layer = attention_layer.SelfAttention( + params["hidden_size"], params["num_heads"], + params["attention_dropout"]) + feed_forward_network = ffn_layer.FeedForwardNetwork( + params["hidden_size"], params["filter_size"], params["relu_dropout"]) + + self.layers.append([ + PrePostProcessingWrapper(self_attention_layer, params), + PrePostProcessingWrapper(feed_forward_network, params) + ]) + + # Create final layer normalization layer. + self.output_normalization = tf.keras.layers.LayerNormalization( + epsilon=1e-6, dtype="float32") + super(EncoderStack, self).build(input_shape) + + def get_config(self): + return { + "params": self.params, + } + + def call(self, encoder_inputs, attention_bias, inputs_padding, training): + """Return the output of the encoder layer stacks. + + Args: + encoder_inputs: tensor with shape [batch_size, input_length, hidden_size] + attention_bias: bias for the encoder self-attention layer. [batch_size, 1, + 1, input_length] + inputs_padding: tensor with shape [batch_size, input_length], inputs with + zero paddings. + training: boolean, whether in training mode or not. + + Returns: + Output of encoder layer stack. + float32 tensor with shape [batch_size, input_length, hidden_size] + """ + for n, layer in enumerate(self.layers): + # Run inputs through the sublayers. + self_attention_layer = layer[0] + feed_forward_network = layer[1] + + with tf.name_scope("layer_%d" % n): + with tf.name_scope("self_attention"): + encoder_inputs = self_attention_layer( + encoder_inputs, attention_bias, training=training) + with tf.name_scope("ffn"): + encoder_inputs = feed_forward_network( + encoder_inputs, training=training) + + return self.output_normalization(encoder_inputs) + + +class DecoderStack(tf.keras.layers.Layer): + """Transformer decoder stack. + + Like the encoder stack, the decoder stack is made up of N identical layers. + Each layer is composed of the sublayers: + 1. Self-attention layer + 2. Multi-headed attention layer combining encoder outputs with results from + the previous self-attention layer. + 3. Feedforward network (2 fully-connected layers) + """ + + def __init__(self, params): + super(DecoderStack, self).__init__() + self.params = params + self.layers = [] + + def build(self, input_shape): + """Builds the decoder stack.""" + params = self.params + for _ in range(params["num_hidden_layers"]): + self_attention_layer = attention_layer.SelfAttention( + params["hidden_size"], params["num_heads"], + params["attention_dropout"]) + enc_dec_attention_layer = attention_layer.Attention( + params["hidden_size"], params["num_heads"], + params["attention_dropout"]) + feed_forward_network = ffn_layer.FeedForwardNetwork( + params["hidden_size"], params["filter_size"], params["relu_dropout"]) + + self.layers.append([ + PrePostProcessingWrapper(self_attention_layer, params), + PrePostProcessingWrapper(enc_dec_attention_layer, params), + PrePostProcessingWrapper(feed_forward_network, params) + ]) + self.output_normalization = tf.keras.layers.LayerNormalization( + epsilon=1e-6, dtype="float32") + super(DecoderStack, self).build(input_shape) + + def get_config(self): + return { + "params": self.params, + } + + def call(self, + decoder_inputs, + encoder_outputs, + decoder_self_attention_bias, + attention_bias, + training, + cache=None, + decode_loop_step=None): + """Return the output of the decoder layer stacks. + + Args: + decoder_inputs: A tensor with shape + [batch_size, target_length, hidden_size]. + encoder_outputs: A tensor with shape + [batch_size, input_length, hidden_size] + decoder_self_attention_bias: A tensor with shape + [1, 1, target_len, target_length], the bias for decoder self-attention + layer. + attention_bias: A tensor with shape [batch_size, 1, 1, input_length], + the bias for encoder-decoder attention layer. + training: A bool, whether in training mode or not. + cache: (Used for fast decoding) A nested dictionary storing previous + decoder self-attention values. The items are: + {layer_n: {"k": A tensor with shape [batch_size, i, key_channels], + "v": A tensor with shape [batch_size, i, value_channels]}, + ...} + decode_loop_step: An integer, the step number of the decoding loop. Used + only for autoregressive inference on TPU. + + Returns: + Output of decoder layer stack. + float32 tensor with shape [batch_size, target_length, hidden_size] + """ + for n, layer in enumerate(self.layers): + self_attention_layer = layer[0] + enc_dec_attention_layer = layer[1] + feed_forward_network = layer[2] + + # Run inputs through the sublayers. + layer_name = "layer_%d" % n + layer_cache = cache[layer_name] if cache is not None else None + with tf.name_scope(layer_name): + with tf.name_scope("self_attention"): + decoder_inputs = self_attention_layer( + decoder_inputs, + decoder_self_attention_bias, + training=training, + cache=layer_cache, + decode_loop_step=decode_loop_step) + with tf.name_scope("encdec_attention"): + decoder_inputs = enc_dec_attention_layer( + decoder_inputs, + encoder_outputs, + attention_bias, + training=training) + with tf.name_scope("ffn"): + decoder_inputs = feed_forward_network( + decoder_inputs, training=training) + + return self.output_normalization(decoder_inputs) diff --git a/models/official/nlp/transformer/transformer_layers_test.py b/models/official/nlp/transformer/transformer_layers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..82d37259da2854fb83e086749fe7a8df2c22e955 --- /dev/null +++ b/models/official/nlp/transformer/transformer_layers_test.py @@ -0,0 +1,97 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for layers in Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.transformer import attention_layer +from official.nlp.transformer import embedding_layer +from official.nlp.transformer import ffn_layer +from official.nlp.transformer import metrics + + +class TransformerLayersTest(tf.test.TestCase): + + def test_attention_layer(self): + hidden_size = 64 + num_heads = 4 + dropout = 0.5 + dim_per_head = hidden_size // num_heads + layer = attention_layer.SelfAttention(hidden_size, num_heads, dropout) + self.assertDictEqual(layer.get_config(), { + "hidden_size": hidden_size, + "num_heads": num_heads, + "attention_dropout": dropout, + }) + length = 2 + x = tf.ones([1, length, hidden_size]) + bias = tf.ones([1]) + cache = { + "k": tf.zeros([1, 0, num_heads, dim_per_head]), + "v": tf.zeros([1, 0, num_heads, dim_per_head]), + } + y = layer(x, bias, training=True, cache=cache) + self.assertEqual(y.shape, (1, length, 64,)) + self.assertEqual(cache["k"].shape, (1, length, num_heads, dim_per_head,)) + self.assertEqual(cache["v"].shape, (1, length, num_heads, dim_per_head,)) + + def test_embedding_shared_weights(self): + vocab_size = 50 + hidden_size = 64 + length = 2 + layer = embedding_layer.EmbeddingSharedWeights(vocab_size, hidden_size) + self.assertDictEqual(layer.get_config(), { + "vocab_size": 50, + "hidden_size": 64, + }) + + idx = tf.ones([1, length], dtype="int32") + y = layer(idx) + self.assertEqual(y.shape, (1, length, hidden_size,)) + x = tf.ones([1, length, hidden_size]) + output = layer(x, "linear") + self.assertEqual(output.shape, (1, length, vocab_size,)) + + def test_feed_forward_network(self): + hidden_size = 64 + filter_size = 32 + relu_dropout = 0.5 + layer = ffn_layer.FeedForwardNetwork(hidden_size, filter_size, relu_dropout) + self.assertDictEqual(layer.get_config(), { + "hidden_size": hidden_size, + "filter_size": filter_size, + "relu_dropout": relu_dropout, + }) + length = 2 + x = tf.ones([1, length, hidden_size]) + y = layer(x, training=True) + self.assertEqual(y.shape, (1, length, hidden_size,)) + + def test_metric_layer(self): + vocab_size = 50 + logits = tf.keras.layers.Input((None, vocab_size), + dtype="float32", + name="logits") + targets = tf.keras.layers.Input((None,), dtype="int64", name="targets") + output_logits = metrics.MetricLayer(vocab_size)([logits, targets]) + self.assertEqual(output_logits.shape.as_list(), [None, None, vocab_size,]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/transformer/transformer_main.py b/models/official/nlp/transformer/transformer_main.py new file mode 100644 index 0000000000000000000000000000000000000000..14177d8562b6ec4b190fe5d773998368ffc0b881 --- /dev/null +++ b/models/official/nlp/transformer/transformer_main.py @@ -0,0 +1,496 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Train and evaluate the Transformer model. + +See README for description of setting the training schedule and evaluating the +BLEU score. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +from official.modeling import performance +from official.nlp.transformer import compute_bleu +from official.nlp.transformer import data_pipeline +from official.nlp.transformer import metrics +from official.nlp.transformer import misc +from official.nlp.transformer import optimizer +from official.nlp.transformer import transformer +from official.nlp.transformer import translate +from official.nlp.transformer.utils import tokenizer +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + +INF = int(1e9) +BLEU_DIR = "bleu" +_SINGLE_SAMPLE = 1 + + +def translate_and_compute_bleu(model, + params, + subtokenizer, + bleu_source, + bleu_ref, + distribution_strategy=None): + """Translate file and report the cased and uncased bleu scores. + + Args: + model: A Keras model, used to generate the translations. + params: A dictionary, containing the translation related parameters. + subtokenizer: A subtokenizer object, used for encoding and decoding source + and translated lines. + bleu_source: A file containing source sentences for translation. + bleu_ref: A file containing the reference for the translated sentences. + distribution_strategy: A platform distribution strategy, used for TPU based + translation. + + Returns: + uncased_score: A float, the case insensitive BLEU score. + cased_score: A float, the case sensitive BLEU score. + """ + # Create temporary file to store translation. + tmp = tempfile.NamedTemporaryFile(delete=False) + tmp_filename = tmp.name + + translate.translate_file( + model, + params, + subtokenizer, + bleu_source, + output_file=tmp_filename, + print_all_translations=False, + distribution_strategy=distribution_strategy) + + # Compute uncased and cased bleu scores. + uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False) + cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True) + os.remove(tmp_filename) + return uncased_score, cased_score + + +def evaluate_and_log_bleu(model, + params, + bleu_source, + bleu_ref, + vocab_file, + distribution_strategy=None): + """Calculate and record the BLEU score. + + Args: + model: A Keras model, used to generate the translations. + params: A dictionary, containing the translation related parameters. + bleu_source: A file containing source sentences for translation. + bleu_ref: A file containing the reference for the translated sentences. + vocab_file: A file containing the vocabulary for translation. + distribution_strategy: A platform distribution strategy, used for TPU based + translation. + + Returns: + uncased_score: A float, the case insensitive BLEU score. + cased_score: A float, the case sensitive BLEU score. + """ + subtokenizer = tokenizer.Subtokenizer(vocab_file) + + uncased_score, cased_score = translate_and_compute_bleu( + model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy) + + logging.info("Bleu score (uncased): %s", uncased_score) + logging.info("Bleu score (cased): %s", cased_score) + return uncased_score, cased_score + + +class TransformerTask(object): + """Main entry of Transformer model.""" + + def __init__(self, flags_obj): + """Init function of TransformerMain. + + Args: + flags_obj: Object containing parsed flag values, i.e., FLAGS. + + Raises: + ValueError: if not using static batch for input data on TPU. + """ + self.flags_obj = flags_obj + self.predict_model = None + + # Add flag-defined parameters to params object + num_gpus = flags_core.get_num_gpus(flags_obj) + self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus) + + params["num_gpus"] = num_gpus + params["use_ctl"] = flags_obj.use_ctl + params["data_dir"] = flags_obj.data_dir + params["model_dir"] = flags_obj.model_dir + params["static_batch"] = flags_obj.static_batch + params["max_length"] = flags_obj.max_length + params["decode_batch_size"] = flags_obj.decode_batch_size + params["decode_max_length"] = flags_obj.decode_max_length + params["padded_decode"] = flags_obj.padded_decode + params["max_io_parallelism"] = ( + flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE) + + params["use_synthetic_data"] = flags_obj.use_synthetic_data + params["batch_size"] = flags_obj.batch_size or params["default_batch_size"] + params["repeat_dataset"] = None + params["dtype"] = flags_core.get_tf_dtype(flags_obj) + params["enable_tensorboard"] = flags_obj.enable_tensorboard + params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training + params["steps_between_evals"] = flags_obj.steps_between_evals + params["enable_checkpointing"] = flags_obj.enable_checkpointing + + self.distribution_strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=num_gpus, + all_reduce_alg=flags_obj.all_reduce_alg, + num_packs=flags_obj.num_packs, + tpu_address=flags_obj.tpu or "") + if self.use_tpu: + params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync + else: + logging.info("Running transformer with num_gpus = %d", num_gpus) + + if self.distribution_strategy: + logging.info("For training, using distribution strategy: %s", + self.distribution_strategy) + else: + logging.info("Not using any distribution strategy.") + + performance.set_mixed_precision_policy( + params["dtype"], + flags_core.get_loss_scale(flags_obj, default_for_fp16="dynamic")) + + @property + def use_tpu(self): + if self.distribution_strategy: + return isinstance(self.distribution_strategy, + tf.distribute.experimental.TPUStrategy) + return False + + def train(self): + """Trains the model.""" + params = self.params + flags_obj = self.flags_obj + # Sets config options. + keras_utils.set_session_config(enable_xla=flags_obj.enable_xla) + + _ensure_dir(flags_obj.model_dir) + with distribution_utils.get_strategy_scope(self.distribution_strategy): + model = transformer.create_model(params, is_train=True) + opt = self._create_optimizer() + + current_step = 0 + checkpoint = tf.train.Checkpoint(model=model, optimizer=opt) + latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir) + if latest_checkpoint: + checkpoint.restore(latest_checkpoint) + logging.info("Loaded checkpoint %s", latest_checkpoint) + current_step = opt.iterations.numpy() + + if params["use_ctl"]: + train_loss_metric = tf.keras.metrics.Mean( + "training_loss", dtype=tf.float32) + if params["enable_tensorboard"]: + summary_writer = tf.compat.v2.summary.create_file_writer( + flags_obj.model_dir) + else: + summary_writer = tf.compat.v2.summary.create_noop_writer() + train_metrics = [train_loss_metric] + if params["enable_metrics_in_training"]: + train_metrics = train_metrics + model.metrics + else: + model.compile(opt) + + model.summary() + + if self.use_tpu: + # Different from experimental_distribute_dataset, + # experimental_distribute_datasets_from_function requires + # per-replica/local batch size. + params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync + train_ds = ( + self.distribution_strategy + .experimental_distribute_datasets_from_function( + lambda ctx: data_pipeline.train_input_fn(params, ctx))) + else: + train_ds = data_pipeline.train_input_fn(params) + map_data_fn = data_pipeline.map_data_for_transformer_fn + train_ds = train_ds.map( + map_data_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + if params["use_ctl"]: + train_ds_iterator = iter(train_ds) + + callbacks = self._create_callbacks(flags_obj.model_dir, 0, params) + + # Only TimeHistory callback is supported for CTL + if params["use_ctl"]: + callbacks = [cb for cb in callbacks + if isinstance(cb, keras_utils.TimeHistory)] + + # TODO(b/139418525): Refactor the custom training loop logic. + @tf.function + def train_steps(iterator, steps): + """Training steps function for TPU runs. + + Args: + iterator: The input iterator of the training dataset. + steps: An integer, the number of training steps. + + Returns: + A float, the loss value. + """ + + def _step_fn(inputs): + """Per-replica step function.""" + inputs, targets = inputs + with tf.GradientTape() as tape: + logits = model([inputs, targets], training=True) + loss = metrics.transformer_loss(logits, targets, + params["label_smoothing"], + params["vocab_size"]) + # Scales the loss, which results in using the average loss across all + # of the replicas for backprop. + scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync + + # De-dupes variables due to keras tracking issues. + tvars = list({id(v): v for v in model.trainable_variables}.values()) + grads = tape.gradient(scaled_loss, tvars) + opt.apply_gradients(zip(grads, tvars)) + # For reporting, the metric takes the mean of losses. + train_loss_metric.update_state(loss) + + for _ in tf.range(steps): + train_loss_metric.reset_states() + self.distribution_strategy.run( + _step_fn, args=(next(iterator),)) + + cased_score, uncased_score = None, None + cased_score_history, uncased_score_history = [], [] + while current_step < flags_obj.train_steps: + remaining_steps = flags_obj.train_steps - current_step + train_steps_per_eval = ( + remaining_steps if remaining_steps < flags_obj.steps_between_evals + else flags_obj.steps_between_evals) + current_iteration = current_step // flags_obj.steps_between_evals + + logging.info( + "Start train iteration at global step:{}".format(current_step)) + history = None + if params["use_ctl"]: + if not self.use_tpu: + raise NotImplementedError( + "Custom training loop on GPUs is not implemented.") + + # Runs training steps. + with summary_writer.as_default(): + for cb in callbacks: + cb.on_epoch_begin(current_iteration) + cb.on_batch_begin(0) + + train_steps( + train_ds_iterator, + tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32)) + current_step += train_steps_per_eval + train_loss = train_loss_metric.result().numpy().astype(float) + logging.info("Train Step: %d/%d / loss = %s", current_step, + flags_obj.train_steps, train_loss) + + for cb in callbacks: + cb.on_batch_end(train_steps_per_eval - 1) + cb.on_epoch_end(current_iteration) + + if params["enable_tensorboard"]: + for metric_obj in train_metrics: + tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(), + current_step) + summary_writer.flush() + + for cb in callbacks: + cb.on_train_end() + + if flags_obj.enable_checkpointing: + # avoid check-pointing when running for benchmarking. + checkpoint_name = checkpoint.save( + os.path.join(flags_obj.model_dir, + "ctl_step_{}.ckpt".format(current_step))) + logging.info("Saved checkpoint to %s", checkpoint_name) + else: + if self.use_tpu: + raise NotImplementedError( + "Keras model.fit on TPUs is not implemented.") + history = model.fit( + train_ds, + initial_epoch=current_iteration, + epochs=current_iteration + 1, + steps_per_epoch=train_steps_per_eval, + callbacks=callbacks, + # If TimeHistory is enabled, progress bar would be messy. Increase + # the verbose level to get rid of it. + verbose=(2 if flags_obj.enable_time_history else 1)) + current_step += train_steps_per_eval + logging.info("Train history: {}".format(history.history)) + + logging.info("End train iteration at global step:{}".format(current_step)) + + if (flags_obj.bleu_source and flags_obj.bleu_ref): + uncased_score, cased_score = self.eval() + cased_score_history.append([current_iteration + 1, cased_score]) + uncased_score_history.append([current_iteration + 1, uncased_score]) + + stats = ({ + "loss": train_loss + } if history is None else {}) + misc.update_stats(history, stats, callbacks) + if uncased_score and cased_score: + stats["bleu_uncased"] = uncased_score + stats["bleu_cased"] = cased_score + stats["bleu_uncased_history"] = uncased_score_history + stats["bleu_cased_history"] = cased_score_history + return stats + + def eval(self): + """Evaluates the model.""" + distribution_strategy = self.distribution_strategy if self.use_tpu else None + + # We only want to create the model under DS scope for TPU case. + # When 'distribution_strategy' is None, a no-op DummyContextManager will + # be used. + with distribution_utils.get_strategy_scope(distribution_strategy): + if not self.predict_model: + self.predict_model = transformer.create_model(self.params, False) + self._load_weights_if_possible( + self.predict_model, + tf.train.latest_checkpoint(self.flags_obj.model_dir)) + self.predict_model.summary() + return evaluate_and_log_bleu( + self.predict_model, self.params, self.flags_obj.bleu_source, + self.flags_obj.bleu_ref, self.flags_obj.vocab_file, + distribution_strategy) + + def predict(self): + """Predicts result from the model.""" + params = self.params + flags_obj = self.flags_obj + + with tf.name_scope("model"): + model = transformer.create_model(params, is_train=False) + self._load_weights_if_possible( + model, tf.train.latest_checkpoint(self.flags_obj.model_dir)) + model.summary() + subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file) + + ds = data_pipeline.eval_input_fn(params) + ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE) + ret = model.predict(ds) + val_outputs, _ = ret + length = len(val_outputs) + for i in range(length): + translate.translate_from_input(val_outputs[i], subtokenizer) + + def _create_callbacks(self, cur_log_dir, init_steps, params): + """Creates a list of callbacks.""" + sfunc = optimizer.LearningRateFn(params["learning_rate"], + params["hidden_size"], + params["learning_rate_warmup_steps"]) + scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps) + callbacks = misc.get_callbacks() + callbacks.append(scheduler_callback) + if params["enable_checkpointing"]: + ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt") + callbacks.append( + tf.keras.callbacks.ModelCheckpoint( + ckpt_full_path, save_weights_only=True)) + return callbacks + + def _load_weights_if_possible(self, model, init_weight_path=None): + """Loads model weights when it is provided.""" + if init_weight_path: + logging.info("Load weights: {}".format(init_weight_path)) + # TODO(b/139414977): Having the same variable restoring method for both + # TPU and GPU. + if self.use_tpu: + checkpoint = tf.train.Checkpoint( + model=model, optimizer=self._create_optimizer()) + checkpoint.restore(init_weight_path) + else: + model.load_weights(init_weight_path) + else: + logging.info("Weights not loaded from path:{}".format(init_weight_path)) + + def _create_optimizer(self): + """Creates optimizer.""" + params = self.params + lr_schedule = optimizer.LearningRateSchedule( + params["learning_rate"], params["hidden_size"], + params["learning_rate_warmup_steps"]) + opt = tf.keras.optimizers.Adam( + lr_schedule if self.use_tpu else params["learning_rate"], + params["optimizer_adam_beta1"], + params["optimizer_adam_beta2"], + epsilon=params["optimizer_adam_epsilon"]) + + opt = performance.configure_optimizer( + opt, + use_float16=params["dtype"] == tf.float16, + use_graph_rewrite=self.flags_obj.fp16_implementation == "graph_rewrite", + loss_scale=flags_core.get_loss_scale( + self.flags_obj, default_for_fp16="dynamic")) + + return opt + + +def _ensure_dir(log_dir): + """Makes log dir if not existed.""" + if not tf.io.gfile.exists(log_dir): + tf.io.gfile.makedirs(log_dir) + + +def main(_): + flags_obj = flags.FLAGS + if flags_obj.enable_mlir_bridge: + tf.config.experimental.enable_mlir_bridge() + task = TransformerTask(flags_obj) + + # Execute flag override logic for better model performance + if flags_obj.tf_gpu_thread_mode: + keras_utils.set_gpu_thread_mode_and_count( + per_gpu_thread_count=flags_obj.per_gpu_thread_count, + gpu_thread_mode=flags_obj.tf_gpu_thread_mode, + num_gpus=flags_obj.num_gpus, + datasets_num_private_threads=flags_obj.datasets_num_private_threads) + + if flags_obj.mode == "train": + task.train() + elif flags_obj.mode == "predict": + task.predict() + elif flags_obj.mode == "eval": + task.eval() + else: + raise ValueError("Invalid mode {}".format(flags_obj.mode)) + + +if __name__ == "__main__": + logging.set_verbosity(logging.INFO) + misc.define_transformer_flags() + app.run(main) diff --git a/models/official/nlp/transformer/transformer_main_test.py b/models/official/nlp/transformer/transformer_main_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a65cc4bcbf3a1c4281a36730a1ab60c496f3c7aa --- /dev/null +++ b/models/official/nlp/transformer/transformer_main_test.py @@ -0,0 +1,191 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test Transformer model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import sys +import unittest + +from absl import flags +from absl.testing import flagsaver +import tensorflow as tf +from tensorflow.python.eager import context # pylint: disable=ungrouped-imports +from official.nlp.transformer import misc +from official.nlp.transformer import transformer_main +from official.utils.misc import keras_utils + +FLAGS = flags.FLAGS +FIXED_TIMESTAMP = 'my_time_stamp' +WEIGHT_PATTERN = re.compile(r'weights-epoch-.+\.hdf5') + + +def _generate_file(filepath, lines): + with open(filepath, 'w') as f: + for l in lines: + f.write('{}\n'.format(l)) + + +class TransformerTaskTest(tf.test.TestCase): + local_flags = None + + def setUp(self): + temp_dir = self.get_temp_dir() + if TransformerTaskTest.local_flags is None: + misc.define_transformer_flags() + # Loads flags, array cannot be blank. + flags.FLAGS(['foo']) + TransformerTaskTest.local_flags = flagsaver.save_flag_values() + else: + flagsaver.restore_flag_values(TransformerTaskTest.local_flags) + FLAGS.model_dir = os.path.join(temp_dir, FIXED_TIMESTAMP) + FLAGS.param_set = 'tiny' + FLAGS.use_synthetic_data = True + FLAGS.steps_between_evals = 1 + FLAGS.train_steps = 2 + FLAGS.validation_steps = 1 + FLAGS.batch_size = 8 + FLAGS.max_length = 1 + FLAGS.num_gpus = 1 + FLAGS.distribution_strategy = 'off' + FLAGS.dtype = 'fp32' + self.model_dir = FLAGS.model_dir + self.temp_dir = temp_dir + self.vocab_file = os.path.join(temp_dir, 'vocab') + self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)['vocab_size'] + self.bleu_source = os.path.join(temp_dir, 'bleu_source') + self.bleu_ref = os.path.join(temp_dir, 'bleu_ref') + self.orig_policy = ( + tf.compat.v2.keras.mixed_precision.experimental.global_policy()) + + def tearDown(self): + tf.compat.v2.keras.mixed_precision.experimental.set_policy(self.orig_policy) + + def _assert_exists(self, filepath): + self.assertTrue(os.path.exists(filepath)) + + def test_train_no_dist_strat(self): + if context.num_gpus() >= 2: + self.skipTest('No need to test 2+ GPUs without a distribution strategy.') + t = transformer_main.TransformerTask(FLAGS) + t.train() + + def test_train_static_batch(self): + if context.num_gpus() >= 2: + self.skipTest('No need to test 2+ GPUs without a distribution strategy.') + FLAGS.distribution_strategy = 'one_device' + if tf.test.is_built_with_cuda(): + FLAGS.num_gpus = 1 + else: + FLAGS.num_gpus = 0 + FLAGS.static_batch = True + t = transformer_main.TransformerTask(FLAGS) + t.train() + + @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') + def test_train_1_gpu_with_dist_strat(self): + FLAGS.distribution_strategy = 'one_device' + t = transformer_main.TransformerTask(FLAGS) + t.train() + + @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') + def test_train_fp16(self): + FLAGS.distribution_strategy = 'one_device' + FLAGS.dtype = 'fp16' + t = transformer_main.TransformerTask(FLAGS) + t.train() + + @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') + def test_train_2_gpu(self): + if context.num_gpus() < 2: + self.skipTest( + '{} GPUs are not available for this test. {} GPUs are available' + .format(2, context.num_gpus())) + FLAGS.distribution_strategy = 'mirrored' + FLAGS.num_gpus = 2 + FLAGS.param_set = 'base' + t = transformer_main.TransformerTask(FLAGS) + t.train() + + @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') + def test_train_2_gpu_fp16(self): + if context.num_gpus() < 2: + self.skipTest( + '{} GPUs are not available for this test. {} GPUs are available' + .format(2, context.num_gpus())) + FLAGS.distribution_strategy = 'mirrored' + FLAGS.num_gpus = 2 + FLAGS.param_set = 'base' + FLAGS.dtype = 'fp16' + t = transformer_main.TransformerTask(FLAGS) + t.train() + + def _prepare_files_and_flags(self, *extra_flags): + # Make log dir. + if not os.path.exists(self.temp_dir): + os.makedirs(self.temp_dir) + + # Fake vocab, bleu_source and bleu_ref. + tokens = [ + "''", "''", "'_'", "'a'", "'b'", "'c'", "'d'", "'a_'", "'b_'", + "'c_'", "'d_'" + ] + tokens += ["'{}'".format(i) for i in range(self.vocab_size - len(tokens))] + _generate_file(self.vocab_file, tokens) + _generate_file(self.bleu_source, ['a b', 'c d']) + _generate_file(self.bleu_ref, ['a b', 'd c']) + + # Update flags. + update_flags = [ + 'ignored_program_name', + '--vocab_file={}'.format(self.vocab_file), + '--bleu_source={}'.format(self.bleu_source), + '--bleu_ref={}'.format(self.bleu_ref), + ] + if extra_flags: + update_flags.extend(extra_flags) + FLAGS(update_flags) + + def test_predict(self): + if context.num_gpus() >= 2: + self.skipTest('No need to test 2+ GPUs without a distribution strategy.') + self._prepare_files_and_flags() + t = transformer_main.TransformerTask(FLAGS) + t.predict() + + @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') + def test_predict_fp16(self): + if context.num_gpus() >= 2: + self.skipTest('No need to test 2+ GPUs without a distribution strategy.') + self._prepare_files_and_flags('--dtype=fp16') + t = transformer_main.TransformerTask(FLAGS) + t.predict() + + def test_eval(self): + if context.num_gpus() >= 2: + self.skipTest('No need to test 2+ GPUs without a distribution strategy.') + if 'test_xla' in sys.argv[0]: + self.skipTest('TODO(xla): Make this test faster under XLA.') + self._prepare_files_and_flags() + t = transformer_main.TransformerTask(FLAGS) + t.eval() + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/nlp/transformer/transformer_test.py b/models/official/nlp/transformer/transformer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..227b43dc6ff194ab74effc37214ae9253823310d --- /dev/null +++ b/models/official/nlp/transformer/transformer_test.py @@ -0,0 +1,68 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test Transformer model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.nlp.transformer import model_params +from official.nlp.transformer import transformer + + +class TransformerV2Test(tf.test.TestCase): + + def setUp(self): + self.params = params = model_params.TINY_PARAMS + params["batch_size"] = params["default_batch_size"] = 16 + params["use_synthetic_data"] = True + params["hidden_size"] = 12 + params["num_hidden_layers"] = 2 + params["filter_size"] = 14 + params["num_heads"] = 2 + params["vocab_size"] = 41 + params["extra_decode_length"] = 2 + params["beam_size"] = 3 + params["dtype"] = tf.float32 + + def test_create_model_train(self): + model = transformer.create_model(self.params, True) + inputs, outputs = model.inputs, model.outputs + self.assertEqual(len(inputs), 2) + self.assertEqual(len(outputs), 1) + self.assertEqual(inputs[0].shape.as_list(), [None, None]) + self.assertEqual(inputs[0].dtype, tf.int64) + self.assertEqual(inputs[1].shape.as_list(), [None, None]) + self.assertEqual(inputs[1].dtype, tf.int64) + self.assertEqual(outputs[0].shape.as_list(), [None, None, 41]) + self.assertEqual(outputs[0].dtype, tf.float32) + + def test_create_model_not_train(self): + model = transformer.create_model(self.params, False) + inputs, outputs = model.inputs, model.outputs + self.assertEqual(len(inputs), 1) + self.assertEqual(len(outputs), 2) + self.assertEqual(inputs[0].shape.as_list(), [None, None]) + self.assertEqual(inputs[0].dtype, tf.int64) + self.assertEqual(outputs[0].shape.as_list(), [None, None]) + self.assertEqual(outputs[0].dtype, tf.int32) + self.assertEqual(outputs[1].shape.as_list(), [None]) + self.assertEqual(outputs[1].dtype, tf.float32) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/transformer/translate.py b/models/official/nlp/transformer/translate.py new file mode 100644 index 0000000000000000000000000000000000000000..1f92504142e08918a972dff10c422a58fcfbbd04 --- /dev/null +++ b/models/official/nlp/transformer/translate.py @@ -0,0 +1,199 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Translate text or files using trained transformer model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +import numpy as np +import tensorflow as tf + +from official.nlp.transformer.utils import tokenizer + +_EXTRA_DECODE_LENGTH = 100 +_BEAM_SIZE = 4 +_ALPHA = 0.6 + + +def _get_sorted_inputs(filename): + """Read and sort lines from the file sorted by decreasing length. + + Args: + filename: String name of file to read inputs from. + Returns: + Sorted list of inputs, and dictionary mapping original index->sorted index + of each element. + """ + with tf.io.gfile.GFile(filename) as f: + records = f.read().split("\n") + inputs = [record.strip() for record in records] + if not inputs[-1]: + inputs.pop() + + input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)] + sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True) + + sorted_inputs = [None] * len(sorted_input_lens) + sorted_keys = [0] * len(sorted_input_lens) + for i, (index, _) in enumerate(sorted_input_lens): + sorted_inputs[i] = inputs[index] + sorted_keys[index] = i + return sorted_inputs, sorted_keys + + +def _encode_and_add_eos(line, subtokenizer): + """Encode line with subtokenizer, and add EOS id to the end.""" + return subtokenizer.encode(line) + [tokenizer.EOS_ID] + + +def _trim_and_decode(ids, subtokenizer): + """Trim EOS and PAD tokens from ids, and decode to return a string.""" + try: + index = list(ids).index(tokenizer.EOS_ID) + return subtokenizer.decode(ids[:index]) + except ValueError: # No EOS found in sequence + return subtokenizer.decode(ids) + + +def translate_file(model, + params, + subtokenizer, + input_file, + output_file=None, + print_all_translations=True, + distribution_strategy=None): + """Translate lines in file, and save to output file if specified. + + Args: + model: A Keras model, used to generate the translations. + params: A dictionary, containing the translation related parameters. + subtokenizer: A subtokenizer object, used for encoding and decoding source + and translated lines. + input_file: A file containing lines to translate. + output_file: A file that stores the generated translations. + print_all_translations: A bool. If true, all translations are printed to + stdout. + distribution_strategy: A distribution strategy, used to perform inference + directly with tf.function instead of Keras model.predict(). + + Raises: + ValueError: if output file is invalid. + """ + batch_size = params["decode_batch_size"] + + # Read and sort inputs by length. Keep dictionary (original index-->new index + # in sorted list) to write translations in the original order. + sorted_inputs, sorted_keys = _get_sorted_inputs(input_file) + total_samples = len(sorted_inputs) + num_decode_batches = (total_samples - 1) // batch_size + 1 + + def input_generator(): + """Yield encoded strings from sorted_inputs.""" + for i in range(num_decode_batches): + lines = [ + sorted_inputs[j + i * batch_size] + for j in range(batch_size) + if j + i * batch_size < total_samples + ] + lines = [_encode_and_add_eos(l, subtokenizer) for l in lines] + if distribution_strategy: + for j in range(batch_size - len(lines)): + lines.append([tokenizer.EOS_ID]) + batch = tf.keras.preprocessing.sequence.pad_sequences( + lines, + maxlen=params["decode_max_length"], + dtype="int32", + padding="post") + logging.info("Decoding batch %d out of %d.", i, num_decode_batches) + yield batch + + @tf.function + def predict_step(inputs): + """Decoding step function for TPU runs.""" + + def _step_fn(inputs): + """Per replica step function.""" + tag = inputs[0] + val_inputs = inputs[1] + val_outputs, _ = model([val_inputs], training=False) + return tag, val_outputs + + return distribution_strategy.run(_step_fn, args=(inputs,)) + + translations = [] + if distribution_strategy: + num_replicas = distribution_strategy.num_replicas_in_sync + local_batch_size = params["decode_batch_size"] // num_replicas + for i, text in enumerate(input_generator()): + if distribution_strategy: + text = np.reshape(text, [num_replicas, local_batch_size, -1]) + # Add tag to the input of each replica with the reordering logic after + # outputs, to ensure the output order matches the input order. + text = tf.constant(text) + + @tf.function + def text_as_per_replica(): + replica_context = tf.distribute.get_replica_context() + replica_id = replica_context.replica_id_in_sync_group + return replica_id, text[replica_id] + + text = distribution_strategy.run(text_as_per_replica) + outputs = distribution_strategy.experimental_local_results( + predict_step(text)) + tags, unordered_val_outputs = outputs[0] + tags = [tag.numpy() for tag in tags._values] + unordered_val_outputs = [ + val_output.numpy() for val_output in unordered_val_outputs._values] + # pylint: enable=protected-access + val_outputs = [None] * len(tags) + for k in range(len(tags)): + val_outputs[tags[k]] = unordered_val_outputs[k] + val_outputs = np.reshape(val_outputs, [params["decode_batch_size"], -1]) + else: + val_outputs, _ = model.predict(text) + + length = len(val_outputs) + for j in range(length): + if j + i * batch_size < total_samples: + translation = _trim_and_decode(val_outputs[j], subtokenizer) + translations.append(translation) + if print_all_translations: + logging.info("Translating:\n\tInput: %s\n\tOutput: %s", + sorted_inputs[j + i * batch_size], translation) + + # Write translations in the order they appeared in the original file. + if output_file is not None: + if tf.io.gfile.isdir(output_file): + raise ValueError("File output is a directory, will not save outputs to " + "file.") + logging.info("Writing to file %s", output_file) + with tf.compat.v1.gfile.Open(output_file, "w") as f: + for i in sorted_keys: + f.write("%s\n" % translations[i]) + + +def translate_from_text(model, subtokenizer, txt): + encoded_txt = _encode_and_add_eos(txt, subtokenizer) + result = model.predict(encoded_txt) + outputs = result["outputs"] + logging.info("Original: \"%s\"", txt) + translate_from_input(outputs, subtokenizer) + + +def translate_from_input(outputs, subtokenizer): + translation = _trim_and_decode(outputs, subtokenizer) + logging.info("Translation: \"%s\"", translation) diff --git a/models/official/nlp/transformer/utils/__init__.py b/models/official/nlp/transformer/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/nlp/transformer/utils/metrics.py b/models/official/nlp/transformer/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..7900cf807768f81af7a8afeee1f467074b04189f --- /dev/null +++ b/models/official/nlp/transformer/utils/metrics.py @@ -0,0 +1,490 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for calculating loss, accuracy, and other model metrics. + +Metrics: + - Padded loss, accuracy, and negative log perplexity. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py + - BLEU approximation. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py + - ROUGE score. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import math + +import numpy as np +import six +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow.compat.v1 as tf + + +def _pad_tensors_to_same_length(x, y): + """Pad x and y so that the results have the same length (second dimension).""" + with tf.name_scope("pad_to_same_length"): + x_length = tf.shape(x)[1] + y_length = tf.shape(y)[1] + + max_length = tf.maximum(x_length, y_length) + + x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]]) + y = tf.pad(y, [[0, 0], [0, max_length - y_length]]) + return x, y + + +def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): + """Calculate cross entropy loss while ignoring padding. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch_size, length_labels] + smoothing: Label smoothing constant, used to determine the on and off values + vocab_size: int size of the vocabulary + Returns: + Returns the cross entropy loss and weight tensors: float32 tensors with + shape [batch_size, max(length_logits, length_labels)] + """ + with tf.name_scope("loss", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + + # Calculate smoothing cross entropy + with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]): + confidence = 1.0 - smoothing + low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1) + soft_targets = tf.one_hot( + tf.cast(labels, tf.int32), + depth=vocab_size, + on_value=confidence, + off_value=low_confidence) + xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=logits, labels=soft_targets) + + # Calculate the best (lowest) possible value of cross entropy, and + # subtract from the cross entropy loss. + normalizing_constant = -( + confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * + low_confidence * tf.log(low_confidence + 1e-20)) + xentropy -= normalizing_constant + + weights = tf.to_float(tf.not_equal(labels, 0)) + return xentropy * weights, weights + + +def _convert_to_eval_metric(metric_fn): + """Wrap a metric fn that returns scores and weights as an eval metric fn. + + The input metric_fn returns values for the current batch. The wrapper + aggregates the return values collected over all of the batches evaluated. + + Args: + metric_fn: function that returns scores and weights for the current batch's + logits and predicted labels. + + Returns: + function that aggregates the scores and weights from metric_fn. + """ + def problem_metric_fn(*args): + """Returns an aggregation of the metric_fn's returned values.""" + (scores, weights) = metric_fn(*args) + + # The tf.metrics.mean function assures correct aggregation. + return tf.metrics.mean(scores, weights) + return problem_metric_fn + + +def get_eval_metrics(logits, labels, params): + """Return dictionary of model evaluation metrics.""" + metrics = { + "accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels), + "accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)( + logits, labels), + "accuracy_per_sequence": _convert_to_eval_metric( + padded_sequence_accuracy)(logits, labels), + "neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)( + logits, labels, params["vocab_size"]), + } + + if not params["use_tpu"]: + # TPU does not support tf.py_func + metrics.update({ + "approx_bleu_score": _convert_to_eval_metric( + bleu_score)(logits, labels), + "rouge_2_fscore": _convert_to_eval_metric( + rouge_2_fscore)(logits, labels), + "rouge_L_fscore": _convert_to_eval_metric( + rouge_l_fscore)(logits, labels), + }) + + # Prefix each of the metric names with "metrics/". This allows the metric + # graphs to display under the "metrics" category in TensorBoard. + metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)} + return metrics + + +def padded_accuracy(logits, labels): + """Percentage of times that predictions matches labels on non-0s.""" + with tf.variable_scope("padded_accuracy", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.to_float(tf.not_equal(labels, 0)) + outputs = tf.to_int32(tf.argmax(logits, axis=-1)) + padded_labels = tf.to_int32(labels) + return tf.to_float(tf.equal(outputs, padded_labels)), weights + + +def padded_accuracy_topk(logits, labels, k): + """Percentage of times that top-k predictions matches labels on non-0s.""" + with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.to_float(tf.not_equal(labels, 0)) + effective_k = tf.minimum(k, tf.shape(logits)[-1]) + _, outputs = tf.nn.top_k(logits, k=effective_k) + outputs = tf.to_int32(outputs) + padded_labels = tf.to_int32(labels) + padded_labels = tf.expand_dims(padded_labels, axis=-1) + padded_labels += tf.zeros_like(outputs) # Pad to same shape. + same = tf.to_float(tf.equal(outputs, padded_labels)) + same_topk = tf.reduce_sum(same, axis=-1) + return same_topk, weights + + +def padded_accuracy_top5(logits, labels): + return padded_accuracy_topk(logits, labels, 5) + + +def padded_sequence_accuracy(logits, labels): + """Percentage of times that predictions matches labels everywhere (non-0).""" + with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.to_float(tf.not_equal(labels, 0)) + outputs = tf.to_int32(tf.argmax(logits, axis=-1)) + padded_labels = tf.to_int32(labels) + not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights + axis = list(range(1, len(outputs.get_shape()))) + correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) + return correct_seq, tf.constant(1.0) + + +def padded_neg_log_perplexity(logits, labels, vocab_size): + """Average log-perplexity excluding padding 0s. No smoothing.""" + num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size) + return -num, den + + +def bleu_score(logits, labels): + """Approximate BLEU score computation between labels and predictions. + + An approximate BLEU scoring method since we do not glue word pieces or + decode the ids and tokenize the output. By default, we use ngram order of 4 + and use brevity penalty. Also, this does not have beam search. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch-size, length_labels] + + Returns: + bleu: int, approx bleu score + """ + predictions = tf.to_int32(tf.argmax(logits, axis=-1)) + # TODO: Look into removing use of py_func + bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) + return bleu, tf.constant(1.0) + + +def _get_ngrams_with_counter(segment, max_order): + """Extracts all n-grams up to a given maximum order from an input segment. + + Args: + segment: text segment from which n-grams will be extracted. + max_order: maximum length in tokens of the n-grams returned by this + methods. + + Returns: + The Counter containing all n-grams upto max_order in segment + with a count of how many times each n-gram occurred. + """ + ngram_counts = collections.Counter() + for order in xrange(1, max_order + 1): + for i in xrange(0, len(segment) - order + 1): + ngram = tuple(segment[i:i + order]) + ngram_counts[ngram] += 1 + return ngram_counts + + +def compute_bleu(reference_corpus, translation_corpus, max_order=4, + use_bp=True): + """Computes BLEU score of translated segments against one or more references. + + Args: + reference_corpus: list of references for each translation. Each + reference should be tokenized into a list of tokens. + translation_corpus: list of translations to score. Each translation + should be tokenized into a list of tokens. + max_order: Maximum n-gram order to use when computing BLEU score. + use_bp: boolean, whether to apply brevity penalty. + + Returns: + BLEU score. + """ + reference_length = 0 + translation_length = 0 + bp = 1.0 + geo_mean = 0 + + matches_by_order = [0] * max_order + possible_matches_by_order = [0] * max_order + precisions = [] + + for (references, translations) in zip(reference_corpus, translation_corpus): + reference_length += len(references) + translation_length += len(translations) + ref_ngram_counts = _get_ngrams_with_counter(references, max_order) + translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) + + overlap = dict((ngram, + min(count, translation_ngram_counts[ngram])) + for ngram, count in ref_ngram_counts.items()) + + for ngram in overlap: + matches_by_order[len(ngram) - 1] += overlap[ngram] + for ngram in translation_ngram_counts: + possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[ + ngram] + + precisions = [0] * max_order + smooth = 1.0 + + for i in xrange(0, max_order): + if possible_matches_by_order[i] > 0: + precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i] + if matches_by_order[i] > 0: + precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[ + i] + else: + smooth *= 2 + precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) + else: + precisions[i] = 0.0 + + if max(precisions) > 0: + p_log_sum = sum(math.log(p) for p in precisions if p) + geo_mean = math.exp(p_log_sum / max_order) + + if use_bp: + ratio = translation_length / reference_length + bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0 + bleu = geo_mean * bp + return np.float32(bleu) + + +def rouge_2_fscore(logits, labels): + """ROUGE-2 F1 score computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + logits: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge2_fscore: approx rouge-2 f1 score. + """ + predictions = tf.to_int32(tf.argmax(logits, axis=-1)) + # TODO: Look into removing use of py_func + rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) + return rouge_2_f_score, tf.constant(1.0) + + +def _get_ngrams(n, text): + """Calculates n-grams. + + Args: + n: which n-grams to calculate + text: An array of tokens + + Returns: + A set of n-grams + """ + ngram_set = set() + text_length = len(text) + max_index_ngram_start = text_length - n + for i in range(max_index_ngram_start + 1): + ngram_set.add(tuple(text[i:i + n])) + return ngram_set + + +def rouge_n(eval_sentences, ref_sentences, n=2): + """Computes ROUGE-N f1 score of two text collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Args: + eval_sentences: Predicted sentences. + ref_sentences: Sentences from the reference set + n: Size of ngram. Defaults to 2. + + Returns: + f1 score for ROUGE-N + """ + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + eval_ngrams = _get_ngrams(n, eval_sentence) + ref_ngrams = _get_ngrams(n, ref_sentence) + ref_count = len(ref_ngrams) + eval_count = len(eval_ngrams) + + # Count the overlapping ngrams between evaluated and reference + overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) + overlapping_count = len(overlapping_ngrams) + + # Handle edge case. This isn't mathematically correct, but it's good enough + if eval_count == 0: + precision = 0.0 + else: + precision = float(overlapping_count) / eval_count + if ref_count == 0: + recall = 0.0 + else: + recall = float(overlapping_count) / ref_count + f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8))) + + # return overlapping_count / reference_count + return np.mean(f1_scores, dtype=np.float32) + + +def rouge_l_fscore(predictions, labels): + """ROUGE scores computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge_l_fscore: approx rouge-l f1 score. + """ + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), + tf.float32) + return rouge_l_f_score, tf.constant(1.0) + + +def rouge_l_sentence_level(eval_sentences, ref_sentences): + """Computes ROUGE-L (sentence level) of two collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Calculated according to: + R_lcs = LCS(X,Y)/m + P_lcs = LCS(X,Y)/n + F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) + + where: + X = reference summary + Y = Candidate summary + m = length of reference summary + n = length of candidate summary + + Args: + eval_sentences: The sentences that have been picked by the summarizer + ref_sentences: The sentences from the reference set + + Returns: + A float: F_lcs + """ + + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + m = float(len(ref_sentence)) + n = float(len(eval_sentence)) + lcs = _len_lcs(eval_sentence, ref_sentence) + f1_scores.append(_f_lcs(lcs, m, n)) + return np.mean(f1_scores, dtype=np.float32) + + +def _len_lcs(x, y): + """Returns the length of the Longest Common Subsequence between two seqs. + + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: sequence of words + y: sequence of words + + Returns + integer: Length of LCS between x and y + """ + table = _lcs(x, y) + n, m = len(x), len(y) + return table[n, m] + + +def _lcs(x, y): + """Computes the length of the LCS between two seqs. + + The implementation below uses a DP programming algorithm and runs + in O(nm) time where n = len(x) and m = len(y). + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: collection of words + y: collection of words + + Returns: + Table of dictionary of coord and len lcs + """ + n, m = len(x), len(y) + table = dict() + for i in range(n + 1): + for j in range(m + 1): + if i == 0 or j == 0: + table[i, j] = 0 + elif x[i - 1] == y[j - 1]: + table[i, j] = table[i - 1, j - 1] + 1 + else: + table[i, j] = max(table[i - 1, j], table[i, j - 1]) + return table + + +def _f_lcs(llcs, m, n): + """Computes the LCS-based F-measure score. + + Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/ + rouge-working-note-v1.3.1.pdf + + Args: + llcs: Length of LCS + m: number of words in reference summary + n: number of words in candidate summary + + Returns: + Float. LCS-based F-measure score + """ + r_lcs = llcs / m + p_lcs = llcs / n + beta = p_lcs / (r_lcs + 1e-12) + num = (1 + (beta ** 2)) * r_lcs * p_lcs + denom = r_lcs + ((beta ** 2) * p_lcs) + f_lcs = num / (denom + 1e-12) + return f_lcs diff --git a/models/official/nlp/transformer/utils/tokenizer.py b/models/official/nlp/transformer/utils/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3749dfe9de6263a4cc185928b7f8967c56250216 --- /dev/null +++ b/models/official/nlp/transformer/utils/tokenizer.py @@ -0,0 +1,660 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines Subtokenizer class to encode and decode strings.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import sys +import unicodedata +from absl import logging + +import numpy as np +import six +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + + +# pylint: disable=g-complex-comprehension +PAD = "" +PAD_ID = 0 +EOS = "" +EOS_ID = 1 +RESERVED_TOKENS = [PAD, EOS] + +# Set of characters that will be used in the function _escape_token() (see func +# docstring for more details). +# This set is added to the alphabet list to ensure that all escaped tokens can +# be encoded. +_ESCAPE_CHARS = set(u"\\_u;0123456789") +# Regex for the function _unescape_token(), the inverse of _escape_token(). +# This is used to find "\u", "\\", and "\###;" substrings in the token. +_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") + +_UNDEFINED_UNICODE = u"\u3013" + + +def alphanumeric_char_set(): + return set( + six.unichr(i) + for i in xrange(sys.maxunicode) + if (unicodedata.category(six.unichr(i)).startswith("L") or + unicodedata.category(six.unichr(i)).startswith("N"))) + + +# Set contains all letter and number characters. +_ALPHANUMERIC_CHAR_SET = alphanumeric_char_set() + +# min_count is the minimum number of times a subtoken must appear in the data +# before before it is added to the vocabulary. The value is found using binary +# search to obtain the target vocabulary size. +_MIN_MIN_COUNT = 1 # min value to use when binary searching for min_count +_MAX_MIN_COUNT = 1000 # max value to use when binary searching for min_count + + +class Subtokenizer(object): + """Encodes and decodes strings to/from integer IDs.""" + + def __init__(self, vocab_file, reserved_tokens=None, master_char_set=None): + """Initializes class, creating a vocab file if data_files is provided.""" + logging.info("Initializing Subtokenizer from file %s.", vocab_file) + + if master_char_set is None: + master_char_set = _ALPHANUMERIC_CHAR_SET + + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens) + self.alphabet = _generate_alphabet_dict(self.subtoken_list) + self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list) + + self.max_subtoken_length = 0 + for subtoken in self.subtoken_list: + self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken)) + + # Create cache to speed up subtokenization + self._cache_size = 2**20 + self._cache = [(None, None)] * self._cache_size + self._master_char_set = master_char_set + + @staticmethod + def init_from_files(vocab_file, + files, + target_vocab_size, + threshold, + min_count=None, + file_byte_limit=1e6, + reserved_tokens=None, + correct_strip=True, + master_char_set=None): + """Create subtoken vocabulary based on files, and save vocab to file. + + Args: + vocab_file: String name of vocab file to store subtoken vocabulary. + files: List of file paths that will be used to generate vocabulary. + target_vocab_size: target vocabulary size to generate. + threshold: int threshold of vocabulary size to accept. + min_count: int minimum count to use for generating the vocabulary. The min + count is the minimum number of times a subtoken should appear in the + files before it is added to the vocabulary. If set to none, this value + is found using binary search. + file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that + will be drawn from the files. + reserved_tokens: List of string tokens that are guaranteed to be at the + beginning of the subtoken vocabulary list. + correct_strip: Whether to convert text to unicode before strip. + master_char_set: the char set. + + Returns: + Subtokenizer object + """ + if master_char_set is None: + master_char_set = _ALPHANUMERIC_CHAR_SET + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + if tf.io.gfile.exists(vocab_file): + logging.info("Vocab file already exists (%s)", vocab_file) + else: + logging.info("Begin steps to create subtoken vocabulary...") + token_counts = _count_tokens(files, file_byte_limit, correct_strip, + master_char_set) + alphabet = _generate_alphabet_dict(token_counts) + subtoken_list = _generate_subtokens_with_target_vocab_size( + token_counts, alphabet, target_vocab_size, threshold, min_count, + reserved_tokens) + logging.info("Generated vocabulary with %d subtokens.", + len(subtoken_list)) + _save_vocab_file(vocab_file, subtoken_list) + return Subtokenizer(vocab_file, master_char_set=master_char_set) + + def encode(self, raw_string, add_eos=False): + """Encodes a string into a list of int subtoken ids.""" + ret = [] + tokens = _split_string_to_tokens( + native_to_unicode(raw_string), self._master_char_set) + for token in tokens: + ret.extend(self._token_to_subtoken_ids(token)) + if add_eos: + assert EOS in self.subtoken_list, \ + "Can't append 'EOS' because it is not in list of known subtokens." + ret.append(EOS_ID) + return ret + + def _token_to_subtoken_ids(self, token): + """Encode a single token into a list of subtoken ids.""" + cache_location = hash(token) % self._cache_size + cache_key, cache_value = self._cache[cache_location] + if cache_key == token: + return cache_value + + ret = _split_token_to_subtokens( + _escape_token(token, self.alphabet), self.subtoken_to_id_dict, + self.max_subtoken_length) + ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret] + + self._cache[cache_location] = (token, ret) + return ret + + def decode(self, subtokens): + """Converts list of int subtokens ids into a string.""" + if isinstance(subtokens, np.ndarray): + # Note that list(subtokens) converts subtokens to a python list, but the + # items remain as np.int32. This converts both the array and its items. + subtokens = subtokens.tolist() + + if not subtokens: + return "" + + assert isinstance(subtokens, list) and isinstance(subtokens[0], int), ( + "Subtokens argument passed into decode() must be a list of integers.") + + return _unicode_to_native( + _join_tokens_to_string( + self._subtoken_ids_to_tokens(subtokens), self._master_char_set)) + + def _subtoken_ids_to_tokens(self, subtokens): + """Convert list of int subtoken ids to a list of string tokens.""" + escaped_tokens = "".join([ + self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list) + ]) + escaped_tokens = escaped_tokens.split("_") + + # All tokens in the vocabulary list have been escaped (see _escape_token()) + # so each token must be unescaped when decoding. + ret = [] + for token in escaped_tokens: + if token: + ret.append(_unescape_token(token)) + return ret + + +def _save_vocab_file(vocab_file, subtoken_list): + """Save subtokens to file.""" + with tf.io.gfile.GFile(vocab_file, mode="w") as f: + for subtoken in subtoken_list: + f.write("'%s'\n" % _unicode_to_native(subtoken)) + + +def _load_vocab_file(vocab_file, reserved_tokens=None): + """Load vocabulary while ensuring reserved tokens are at the top.""" + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + subtoken_list = [] + with tf.io.gfile.GFile(vocab_file, mode="r") as f: + for line in f: + subtoken = native_to_unicode(line.strip()) + subtoken = subtoken[1:-1] # Remove surrounding single-quotes + if subtoken in reserved_tokens: + continue + subtoken_list.append(native_to_unicode(subtoken)) + return reserved_tokens + subtoken_list + + +def native_to_unicode(s): + """Convert string to unicode (required in Python 2).""" + try: # Python 2 + return s if isinstance(s, unicode) else s.decode("utf-8") + except NameError: # Python 3 + return s + + +def _unicode_to_native(s): + """Convert string from unicode to native format (required in Python 2).""" + try: # Python 2 + return s.encode("utf-8") if isinstance(s, unicode) else s + except NameError: # Python 3 + return s + + +def _split_string_to_tokens(text, master_char_set): + """Splits text to a list of string tokens.""" + if not text: + return [] + ret = [] + token_start = 0 + # Classify each character in the input string + is_master = [c in master_char_set for c in text] + for pos in xrange(1, len(text)): + if is_master[pos] != is_master[pos - 1]: + token = text[token_start:pos] + if token != u" " or token_start == 0: + ret.append(token) + token_start = pos + final_token = text[token_start:] + ret.append(final_token) + return ret + + +def _join_tokens_to_string(tokens, master_char_set): + """Join a list of string tokens into a single string.""" + token_is_master = [t[0] in master_char_set for t in tokens] + ret = [] + for i, token in enumerate(tokens): + if i > 0 and token_is_master[i - 1] and token_is_master[i]: + ret.append(u" ") + ret.append(token) + return "".join(ret) + + +def _escape_token(token, alphabet): + r"""Replace characters that aren't in the alphabet and append "_" to token. + + Apply three transformations to the token: + 1. Replace underline character "_" with "\u", and backslash "\" with "\\". + 2. Replace characters outside of the alphabet with "\###;", where ### is the + character's Unicode code point. + 3. Appends "_" to mark the end of a token. + + Args: + token: unicode string to be escaped + alphabet: list of all known characters + + Returns: + escaped string + """ + token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") + ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] + return u"".join(ret) + "_" + + +def _unescape_token(token): + r"""Replaces escaped characters in the token with their unescaped versions. + + Applies inverse transformations as _escape_token(): + 1. Replace "\u" with "_", and "\\" with "\". + 2. Replace "\###;" with the unicode character the ### refers to. + + Args: + token: escaped string + + Returns: + unescaped string + """ + + def match(m): + r"""Returns replacement string for matched object. + + Matched objects contain one of the strings that matches the regex pattern: + r"\\u|\\\\|\\([0-9]+);" + The strings can be '\u', '\\', or '\###;' (### is any digit number). + + m.group(0) refers to the entire matched string ('\u', '\\', or '\###;'). + m.group(1) refers to the first parenthesized subgroup ('###'). + + m.group(0) exists for all match objects, while m.group(1) exists only for + the string '\###;'. + + This function looks to see if m.group(1) exists. If it doesn't, then the + matched string must be '\u' or '\\' . In this case, the corresponding + replacement ('_' and '\') are returned. Note that in python, a single + backslash is written as '\\', and double backslash as '\\\\'. + + If m.goup(1) exists, then use the integer in m.group(1) to return a + unicode character. + + Args: + m: match object + + Returns: + String to replace matched object with. + """ + # Check if the matched strings are '\u' or '\\'. + if m.group(1) is None: + return u"_" if m.group(0) == u"\\u" else u"\\" + + # If m.group(1) exists, try and return unicode character. + try: + return six.unichr(int(m.group(1))) + except (ValueError, OverflowError) as _: + return _UNDEFINED_UNICODE + + # Use match function to replace escaped substrings in the token. + return _UNESCAPE_REGEX.sub(match, token) + + +def _count_tokens(files, + file_byte_limit=1e6, + correct_strip=True, + master_char_set=None): + """Return token counts of words in the files. + + Samples file_byte_limit bytes from each file, and counts the words that appear + in the samples. The samples are semi-evenly distributed across the file. + + Args: + files: List of filepaths + file_byte_limit: Max number of bytes that will be read from each file. + correct_strip: Whether to convert text to unicode before strip. This affects + vocabulary generation for PY2. Sets correct_strip to False in PY2 to + reproduce previous common public result. Sets correct_strip to True will + let PY2 and PY3 get a consistent vocabulary. + master_char_set: the char set. + + Returns: + Dictionary mapping tokens to the number of times they appear in the sampled + lines from the files. + """ + if master_char_set is None: + master_char_set = _ALPHANUMERIC_CHAR_SET + + token_counts = collections.defaultdict(int) + + for filepath in files: + with tf.io.gfile.GFile(filepath, mode="r") as reader: + file_byte_budget = file_byte_limit + counter = 0 + lines_to_skip = int(reader.size() / (file_byte_budget * 2)) + for line in reader: + if counter < lines_to_skip: + counter += 1 + else: + if file_byte_budget < 0: + break + if correct_strip: + line = native_to_unicode(line) + line = line.strip() + file_byte_budget -= len(line) + counter = 0 + + # Add words to token counts + for token in _split_string_to_tokens( + native_to_unicode(line), master_char_set): + token_counts[token] += 1 + return token_counts + + +def _list_to_index_dict(lst): + """Create dictionary mapping list items to their indices in the list.""" + return {item: n for n, item in enumerate(lst)} + + +def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length): + """Splits a token into subtokens defined in the subtoken dict.""" + ret = [] + start = 0 + token_len = len(token) + while start < token_len: + # Find the longest subtoken, so iterate backwards. + for end in xrange(min(token_len, start + max_subtoken_length), start, -1): + subtoken = token[start:end] + if subtoken in subtoken_dict: + ret.append(subtoken) + start = end + break + else: # Did not break + # If there is no possible encoding of the escaped token then one of the + # characters in the token is not in the alphabet. This should be + # impossible and would be indicative of a bug. + raise ValueError("Was unable to split token \"%s\" into subtokens." % + token) + return ret + + +def _generate_subtokens_with_target_vocab_size(token_counts, + alphabet, + target_size, + threshold, + min_count=None, + reserved_tokens=None): + """Generate subtoken vocabulary close to the target size.""" + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + if min_count is not None: + logging.info("Using min_count=%d to generate vocab with target size %d", + min_count, target_size) + return _generate_subtokens( + token_counts, alphabet, min_count, reserved_tokens=reserved_tokens) + + def bisect(min_val, max_val): + """Recursive function to binary search for subtoken vocabulary.""" + cur_count = (min_val + max_val) // 2 + logging.info("Binary search: trying min_count=%d (%d %d)", cur_count, + min_val, max_val) + subtoken_list = _generate_subtokens( + token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens) + + val = len(subtoken_list) + logging.info("Binary search: min_count=%d resulted in %d tokens", cur_count, + val) + + within_threshold = abs(val - target_size) < threshold + if within_threshold or min_val >= max_val or cur_count < 2: + return subtoken_list + if val > target_size: + other_subtoken_list = bisect(cur_count + 1, max_val) + else: + other_subtoken_list = bisect(min_val, cur_count - 1) + + # Return vocabulary dictionary with the closest number of tokens. + other_val = len(other_subtoken_list) + if abs(other_val - target_size) < abs(val - target_size): + return other_subtoken_list + return subtoken_list + + logging.info("Finding best min_count to get target size of %d", target_size) + return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT) + + +def _generate_alphabet_dict(iterable, reserved_tokens=None): + """Create set of characters that appear in any element in the iterable.""" + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + alphabet = {c for token in iterable for c in token} + alphabet |= {c for token in reserved_tokens for c in token} + alphabet |= _ESCAPE_CHARS # Add escape characters to alphabet set. + return alphabet + + +def _count_and_gen_subtokens(token_counts, alphabet, subtoken_dict, + max_subtoken_length): + """Count number of times subtokens appear, and generate new subtokens. + + Args: + token_counts: dict mapping tokens to the number of times they appear in the + original files. + alphabet: list of allowed characters. Used to escape the tokens, which + guarantees that all tokens can be split into subtokens. + subtoken_dict: dict mapping subtokens to ids. + max_subtoken_length: maximum length of subtoken in subtoken_dict. + + Returns: + A defaultdict mapping subtokens to the number of times they appear in the + tokens. The dict may contain new subtokens. + """ + subtoken_counts = collections.defaultdict(int) + for token, count in six.iteritems(token_counts): + token = _escape_token(token, alphabet) + subtokens = _split_token_to_subtokens(token, subtoken_dict, + max_subtoken_length) + + # Generate new subtokens by taking substrings from token. + start = 0 + for subtoken in subtokens: + for end in xrange(start + 1, len(token) + 1): + new_subtoken = token[start:end] + subtoken_counts[new_subtoken] += count + start += len(subtoken) + + return subtoken_counts + + +def _filter_and_bucket_subtokens(subtoken_counts, min_count): + """Return a bucketed list of subtokens that are filtered by count. + + Args: + subtoken_counts: defaultdict mapping subtokens to their counts + min_count: int count used to filter subtokens + + Returns: + List of subtoken sets, where subtokens in set i have the same length=i. + """ + # Create list of buckets, where subtokens in bucket i have length i. + subtoken_buckets = [] + for subtoken, count in six.iteritems(subtoken_counts): + if count < min_count: # Filter out subtokens that don't appear enough + continue + while len(subtoken_buckets) <= len(subtoken): + subtoken_buckets.append(set()) + subtoken_buckets[len(subtoken)].add(subtoken) + return subtoken_buckets + + +def _gen_new_subtoken_list(subtoken_counts, + min_count, + alphabet, + reserved_tokens=None): + """Generate candidate subtokens ordered by count, and new max subtoken length. + + Add subtokens to the candiate list in order of length (longest subtokens + first). When a subtoken is added, the counts of each of its prefixes are + decreased. Prefixes that don't appear much outside the subtoken are not added + to the candidate list. + + For example: + subtoken being added to candidate list: 'translate' + subtoken_counts: {'translate':10, 't':40, 'tr':16, 'tra':12, ...} + min_count: 5 + + When 'translate' is added, subtoken_counts is updated to: + {'translate':0, 't':30, 'tr':6, 'tra': 2, ...} + + The subtoken 'tra' will not be added to the candidate list, because it appears + twice (less than min_count) outside of 'translate'. + + Args: + subtoken_counts: defaultdict mapping str subtokens to int counts + min_count: int minumum count requirement for subtokens + alphabet: set of characters. Each character is added to the subtoken list to + guarantee that all tokens can be encoded. + reserved_tokens: list of tokens that will be added to the beginning of the + returned subtoken list. + + Returns: + List of candidate subtokens in decreasing count order, and maximum subtoken + length + """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + # Create a list of (count, subtoken) for each candidate subtoken. + subtoken_candidates = [] + + # Use bucketted list to iterate through subtokens in order of length. + # subtoken_buckets[i] = set(subtokens), where each subtoken has length i. + subtoken_buckets = _filter_and_bucket_subtokens(subtoken_counts, min_count) + max_subtoken_length = len(subtoken_buckets) - 1 + + # Go through the list in reverse order to consider longer subtokens first. + for subtoken_len in xrange(max_subtoken_length, 0, -1): + for subtoken in subtoken_buckets[subtoken_len]: + count = subtoken_counts[subtoken] + + # Possible if this subtoken is a prefix of another token. + if count < min_count: + continue + + # Ignore alphabet/reserved tokens, which will be added manually later. + if subtoken not in alphabet and subtoken not in reserved_tokens: + subtoken_candidates.append((count, subtoken)) + + # Decrement count of the subtoken's prefixes (if a longer subtoken is + # added, its prefixes lose priority to be added). + for end in xrange(1, subtoken_len): + subtoken_counts[subtoken[:end]] -= count + + # Add alphabet subtokens (guarantees that all strings are encodable). + subtoken_candidates.extend((subtoken_counts.get(a, 0), a) for a in alphabet) + + # Order subtoken candidates by decreasing count. + subtoken_list = [t for _, t in sorted(subtoken_candidates, reverse=True)] + + # Add reserved tokens to beginning of the list. + subtoken_list = reserved_tokens + subtoken_list + return subtoken_list, max_subtoken_length + + +def _generate_subtokens(token_counts, + alphabet, + min_count, + num_iterations=4, + reserved_tokens=None): + """Create a list of subtokens in decreasing order of frequency. + + Args: + token_counts: dict mapping str tokens -> int count + alphabet: set of characters + min_count: int minimum number of times a subtoken must appear before it is + added to the vocabulary. + num_iterations: int number of iterations to generate new tokens. + reserved_tokens: list of tokens that will be added to the beginning to the + returned subtoken list. + + Returns: + Sorted list of subtokens (most frequent first) + """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + # Use alphabet set to create initial list of subtokens + subtoken_list = reserved_tokens + list(alphabet) + max_subtoken_length = 1 + + # On each iteration, segment all words using the subtokens defined in + # subtoken_dict, count how often the resulting subtokens appear, and update + # the dictionary with subtokens w/ high enough counts. + for i in xrange(num_iterations): + logging.info("\tGenerating subtokens: iteration %d", i) + # Generate new subtoken->id dictionary using the new subtoken list. + subtoken_dict = _list_to_index_dict(subtoken_list) + + # Create dict mapping subtoken->count, with additional subtokens created + # from substrings taken from the tokens. + subtoken_counts = _count_and_gen_subtokens(token_counts, alphabet, + subtoken_dict, + max_subtoken_length) + + # Generate new list of subtokens sorted by subtoken count. + subtoken_list, max_subtoken_length = _gen_new_subtoken_list( + subtoken_counts, min_count, alphabet, reserved_tokens) + + logging.info("\tVocab size: %d", len(subtoken_list)) + return subtoken_list diff --git a/models/official/nlp/transformer/utils/tokenizer_test.py b/models/official/nlp/transformer/utils/tokenizer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..307398fd3aeaf55a5bec495006a1fb65ebadd639 --- /dev/null +++ b/models/official/nlp/transformer/utils/tokenizer_test.py @@ -0,0 +1,204 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test Subtokenizer and string helper methods.""" + +import collections +import tempfile + +import tensorflow as tf + +from official.nlp.transformer.utils import tokenizer + + +class SubtokenizerTest(tf.test.TestCase): + + def _init_subtokenizer(self, vocab_list): + temp_file = tempfile.NamedTemporaryFile(delete=False) + with tf.io.gfile.GFile(temp_file.name, "w") as w: + for subtoken in vocab_list: + w.write("'%s'" % subtoken) + w.write("\n") + return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) + + def test_encode(self): + vocab_list = ["123_", "test", "ing_"] + subtokenizer = self._init_subtokenizer(vocab_list) + s = "testing 123" + encoded_list = subtokenizer.encode(s) + self.assertEqual([1, 2, 0], encoded_list) + + def test_decode(self): + vocab_list = ["123_", "test", "ing_"] + subtokenizer = self._init_subtokenizer(vocab_list) + encoded_list = [1, 2, 0] # testing 123 + decoded_str = subtokenizer.decode(encoded_list) + self.assertEqual("testing 123", decoded_str) + + def test_subtoken_ids_to_tokens(self): + vocab_list = ["123_", "test", "ing_"] + subtokenizer = self._init_subtokenizer(vocab_list) + encoded_list = [1, 2, 0] # testing 123 + token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) + self.assertEqual([u"testing", u"123"], token_list) + + +class StringHelperTest(tf.test.TestCase): + + def test_split_string_to_tokens(self): + text = "test? testing 123." + + tokens = tokenizer._split_string_to_tokens(text, + tokenizer._ALPHANUMERIC_CHAR_SET) + self.assertEqual(["test", "? ", "testing", "123", "."], tokens) + + def test_join_tokens_to_string(self): + tokens = ["test", "? ", "testing", "123", "."] + + s = tokenizer._join_tokens_to_string(tokens, + tokenizer._ALPHANUMERIC_CHAR_SET) + self.assertEqual("test? testing 123.", s) + + def test_escape_token(self): + token = u"abc_\\4" + alphabet = set("abc_\\u;") + + escaped_token = tokenizer._escape_token(token, alphabet) + self.assertEqual("abc\\u\\\\\\52;_", escaped_token) + + def test_unescape_token(self): + escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;" + + unescaped_token = tokenizer._unescape_token(escaped_token) + self.assertEqual("Underline: _, Backslash: \\, Unicode: 4", unescaped_token) + + def test_list_to_index_dict(self): + lst = ["test", "strings"] + + d = tokenizer._list_to_index_dict(lst) + self.assertDictEqual({"test": 0, "strings": 1}, d) + + def test_split_token_to_subtokens(self): + token = "abc" + subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3} + max_subtoken_length = 2 + + subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, + max_subtoken_length) + self.assertEqual(["ab", "c"], subtokens) + + def test_generate_alphabet_dict(self): + s = ["testing", "123"] + reserved_tokens = ["???"] + + alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) + self.assertIn("?", alphabet) + self.assertIn("t", alphabet) + self.assertIn("e", alphabet) + self.assertIn("s", alphabet) + self.assertIn("i", alphabet) + self.assertIn("n", alphabet) + self.assertIn("g", alphabet) + self.assertIn("1", alphabet) + self.assertIn("2", alphabet) + self.assertIn("3", alphabet) + + def test_count_and_gen_subtokens(self): + token_counts = {"abc": 5} + alphabet = set("abc_") + subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3} + max_subtoken_length = 2 + + subtoken_counts = tokenizer._count_and_gen_subtokens( + token_counts, alphabet, subtoken_dict, max_subtoken_length) + + self.assertIsInstance(subtoken_counts, collections.defaultdict) + self.assertDictEqual( + { + "a": 5, + "b": 5, + "c": 5, + "_": 5, + "ab": 5, + "bc": 5, + "c_": 5, + "abc": 5, + "bc_": 5, + "abc_": 5 + }, subtoken_counts) + + def test_filter_and_bucket_subtokens(self): + subtoken_counts = collections.defaultdict(int, { + "a": 2, + "b": 4, + "c": 1, + "ab": 6, + "ac": 3, + "abbc": 5 + }) + min_count = 3 + + subtoken_buckets = tokenizer._filter_and_bucket_subtokens( + subtoken_counts, min_count) + + self.assertEqual(len(subtoken_buckets[0]), 0) + self.assertEqual(set("b"), subtoken_buckets[1]) + self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2]) + self.assertEqual(len(subtoken_buckets[3]), 0) + self.assertEqual(set(["abbc"]), subtoken_buckets[4]) + + def test_gen_new_subtoken_list(self): + subtoken_counts = collections.defaultdict(int, { + "translate": 10, + "t": 40, + "tr": 16, + "tra": 12 + }) + min_count = 5 + alphabet = set("translate") + reserved_tokens = ["reserved", "tokens"] + + subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( + subtoken_counts, min_count, alphabet, reserved_tokens) + + # Check that "tra" isn"t in the list (its count should be decremented to 2, + # so it should not be added to the canddiate list). + self.assertNotIn("tra", subtoken_list) + + self.assertIn("tr", subtoken_list) + self.assertIn("t", subtoken_list) + + self.assertEqual(len("translate"), max_token_length) + + def test_generate_subtokens(self): + token_counts = {"ab": 1, "bc": 3, "abc": 5} + alphabet = set("abc_") + min_count = 100 + num_iterations = 1 + reserved_tokens = ["reserved", "tokens"] + + vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, + min_count, num_iterations, + reserved_tokens) + + # Check that reserved tokens are at the front of the list + self.assertEqual(vocab_list[:2], reserved_tokens) + + # Check that each character in alphabet is in the vocab list + for c in alphabet: + self.assertIn(c, vocab_list) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/nlp/xlnet/README.md b/models/official/nlp/xlnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9675f01a57fd26a83ed5103e116257b3664396cb --- /dev/null +++ b/models/official/nlp/xlnet/README.md @@ -0,0 +1,16 @@ +# XLNet: Generalized Autoregressive Pretraining for Language Understanding + +The academic paper which describes XLNet in detail and provides full results on +a number of tasks can be found here: https://arxiv.org/abs/1906.08237. + +**Instructions and user guide will be added soon.** + +XLNet is a generalized autoregressive BERT-like pretraining language model that +enables learning bidirectional contexts by maximizing the expected likelihood +over all permutations of the factorization order. It can learn dependency beyond +a fixed length without disrupting temporal coherence by using segment-level +recurrence mechanism and relative positional encoding scheme introduced in +[Transformer-XL](https://arxiv.org/pdf/1901.02860.pdf). XLNet outperforms BERT +on 20 NLP benchmark tasks and achieves state-of-the-art results on 18 tasks +including question answering, natural language inference, sentiment analysis, +and document ranking. diff --git a/models/official/nlp/xlnet/__init__.py b/models/official/nlp/xlnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/official/nlp/xlnet/__init__.py @@ -0,0 +1 @@ + diff --git a/models/official/nlp/xlnet/classifier_utils.py b/models/official/nlp/xlnet/classifier_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..64363e322633f7ae43d6ffc65c99ee1beff36827 --- /dev/null +++ b/models/official/nlp/xlnet/classifier_utils.py @@ -0,0 +1,162 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for pre-processing classification data.""" +from absl import logging + +from official.nlp.xlnet import data_utils + +SEG_ID_A = 0 +SEG_ID_B = 1 + + +class PaddingInputExample(object): + """Fake example so the num input examples is a multiple of the batch size. + + When running eval/predict on the TPU, we need to pad the number of examples + to be a multiple of the batch size, because the TPU requires a fixed batch + size. The alternative is to drop the last batch, which is bad because it means + the entire output data won't be generated. + We use this class instead of `None` because treating `None` as padding + battches could cause silent errors. + """ + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + input_ids, + input_mask, + segment_ids, + label_id, + is_real_example=True): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id + self.is_real_example = is_real_example + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +def convert_single_example(example_index, example, label_list, max_seq_length, + tokenize_fn, use_bert_format): + """Converts a single `InputExample` into a single `InputFeatures`.""" + + if isinstance(example, PaddingInputExample): + return InputFeatures( + input_ids=[0] * max_seq_length, + input_mask=[1] * max_seq_length, + segment_ids=[0] * max_seq_length, + label_id=0, + is_real_example=False) + + if label_list is not None: + label_map = {} + for (i, label) in enumerate(label_list): + label_map[label] = i + + tokens_a = tokenize_fn(example.text_a) + tokens_b = None + if example.text_b: + tokens_b = tokenize_fn(example.text_b) + + if tokens_b: + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for two [SEP] & one [CLS] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for one [SEP] & one [CLS] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:max_seq_length - 2] + + tokens = [] + segment_ids = [] + for token in tokens_a: + tokens.append(token) + segment_ids.append(SEG_ID_A) + tokens.append(data_utils.SEP_ID) + segment_ids.append(SEG_ID_A) + + if tokens_b: + for token in tokens_b: + tokens.append(token) + segment_ids.append(SEG_ID_B) + tokens.append(data_utils.SEP_ID) + segment_ids.append(SEG_ID_B) + + if use_bert_format: + tokens.insert(0, data_utils.CLS_ID) + segment_ids.insert(0, data_utils.SEG_ID_CLS) + else: + tokens.append(data_utils.CLS_ID) + segment_ids.append(data_utils.SEG_ID_CLS) + + input_ids = tokens + + # The mask has 0 for real tokens and 1 for padding tokens. Only real + # tokens are attended to. + input_mask = [0] * len(input_ids) + + # Zero-pad up to the sequence length. + if len(input_ids) < max_seq_length: + delta_len = max_seq_length - len(input_ids) + if use_bert_format: + input_ids = input_ids + [0] * delta_len + input_mask = input_mask + [1] * delta_len + segment_ids = segment_ids + [data_utils.SEG_ID_PAD] * delta_len + else: + input_ids = [0] * delta_len + input_ids + input_mask = [1] * delta_len + input_mask + segment_ids = [data_utils.SEG_ID_PAD] * delta_len + segment_ids + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + if label_list is not None: + label_id = label_map[example.label] + else: + label_id = example.label + if example_index < 5: + logging.info("*** Example ***") + logging.info("guid: %s", (example.guid)) + logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) + logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) + logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) + logging.info("label: %d (id = %d)", example.label, label_id) + + feature = InputFeatures( + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id) + return feature diff --git a/models/official/nlp/xlnet/common_flags.py b/models/official/nlp/xlnet/common_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..93d9499f19475b96095c409fb20a5efb35f3f9b5 --- /dev/null +++ b/models/official/nlp/xlnet/common_flags.py @@ -0,0 +1,146 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common flags used in XLNet model.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import flags + +flags.DEFINE_string("master", default=None, help="master") +flags.DEFINE_string( + "tpu", + default=None, + help="The Cloud TPU to use for training. This should be " + "either the name used when creating the Cloud TPU, or a " + "url like grpc://ip.address.of.tpu:8470.") +flags.DEFINE_bool( + "use_tpu", default=True, help="Use TPUs rather than plain CPUs.") +flags.DEFINE_string("tpu_topology", "2x2", help="TPU topology.") +flags.DEFINE_integer( + "num_core_per_host", default=8, help="number of cores per host") + +flags.DEFINE_string("model_dir", default=None, help="Estimator model_dir.") +flags.DEFINE_string( + "init_checkpoint", + default=None, + help="Checkpoint path for initializing the model.") +flags.DEFINE_bool( + "init_from_transformerxl", + default=False, + help="Init from a transformerxl model checkpoint. Otherwise, init from the " + "entire model checkpoint.") + +# Optimization config +flags.DEFINE_float("learning_rate", default=1e-4, help="Maximum learning rate.") +flags.DEFINE_float("clip", default=1.0, help="Gradient clipping value.") +flags.DEFINE_float("weight_decay_rate", default=0.0, help="Weight decay rate.") + +# lr decay +flags.DEFINE_integer( + "warmup_steps", default=0, help="Number of steps for linear lr warmup.") +flags.DEFINE_float("adam_epsilon", default=1e-8, help="Adam epsilon.") +flags.DEFINE_float( + "lr_layer_decay_rate", + default=1.0, + help="Top layer: lr[L] = FLAGS.learning_rate." + "Lower layers: lr[l-1] = lr[l] * lr_layer_decay_rate.") +flags.DEFINE_float( + "min_lr_ratio", default=0.0, help="Minimum ratio learning rate.") + +# Training config +flags.DEFINE_integer( + "train_batch_size", + default=16, + help="Size of the train batch across all hosts.") +flags.DEFINE_integer( + "train_steps", default=100000, help="Total number of training steps.") +flags.DEFINE_integer( + "iterations", default=1000, help="Number of iterations per repeat loop.") + +# Data config +flags.DEFINE_integer( + "seq_len", default=0, help="Sequence length for pretraining.") +flags.DEFINE_integer( + "reuse_len", + default=0, + help="How many tokens to be reused in the next batch. " + "Could be half of `seq_len`.") +flags.DEFINE_bool("uncased", False, help="Use uncased inputs or not.") +flags.DEFINE_bool( + "bi_data", + default=False, + help="Use bidirectional data streams, " + "i.e., forward & backward.") +flags.DEFINE_integer("n_token", 32000, help="Vocab size") + +# Model config +flags.DEFINE_integer("mem_len", default=0, help="Number of steps to cache") +flags.DEFINE_bool("same_length", default=False, help="Same length attention") +flags.DEFINE_integer("clamp_len", default=-1, help="Clamp length") + +flags.DEFINE_integer("n_layer", default=6, help="Number of layers.") +flags.DEFINE_integer("d_model", default=32, help="Dimension of the model.") +flags.DEFINE_integer("d_embed", default=32, help="Dimension of the embeddings.") +flags.DEFINE_integer("n_head", default=4, help="Number of attention heads.") +flags.DEFINE_integer( + "d_head", default=8, help="Dimension of each attention head.") +flags.DEFINE_integer( + "d_inner", + default=32, + help="Dimension of inner hidden size in positionwise " + "feed-forward.") +flags.DEFINE_float("dropout", default=0.1, help="Dropout rate.") +flags.DEFINE_float("dropout_att", default=0.1, help="Attention dropout rate.") +flags.DEFINE_bool("untie_r", default=False, help="Untie r_w_bias and r_r_bias") +flags.DEFINE_string( + "ff_activation", + default="relu", + help="Activation type used in position-wise feed-forward.") +flags.DEFINE_string( + "strategy_type", + default="tpu", + help="Activation type used in position-wise feed-forward.") +flags.DEFINE_bool("use_bfloat16", False, help="Whether to use bfloat16.") + +# Parameter initialization +flags.DEFINE_enum( + "init_method", + default="normal", + enum_values=["normal", "uniform"], + help="Initialization method.") +flags.DEFINE_float( + "init_std", default=0.02, help="Initialization std when init is normal.") +flags.DEFINE_float( + "init_range", default=0.1, help="Initialization std when init is uniform.") + +flags.DEFINE_integer( + "test_data_size", default=12048, help="Number of test data samples.") +flags.DEFINE_string( + "train_tfrecord_path", + default=None, + help="Path to preprocessed training set tfrecord.") +flags.DEFINE_string( + "test_tfrecord_path", + default=None, + help="Path to preprocessed test set tfrecord.") +flags.DEFINE_integer( + "test_batch_size", + default=16, + help="Size of the test batch across all hosts.") +flags.DEFINE_integer( + "save_steps", default=1000, help="Number of steps for saving checkpoint.") +FLAGS = flags.FLAGS diff --git a/models/official/nlp/xlnet/data_utils.py b/models/official/nlp/xlnet/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1dfe5e7ffb06ff8d38c11271b5758db48c4c4cb --- /dev/null +++ b/models/official/nlp/xlnet/data_utils.py @@ -0,0 +1,816 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities used for data preparation.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import collections +import json +import os +from absl import logging + +import numpy as np +import tensorflow as tf + + +special_symbols = { + "": 0, + "": 1, + "": 2, + "": 3, + "": 4, + "": 5, + "": 6, + "": 7, + "": 8, +} + +VOCAB_SIZE = 32000 +UNK_ID = special_symbols[""] +CLS_ID = special_symbols[""] +SEP_ID = special_symbols[""] +MASK_ID = special_symbols[""] +EOD_ID = special_symbols[""] +SEG_ID_P = 0 +SEG_ID_Q = 1 +SEG_ID_CLS = 2 +SEG_ID_PAD = 3 + + +OnlineMaskingConfig = collections.namedtuple("OnlineMaskingConfig", [ + "sample_strategy", "max_num_tokens", "min_num_tokens", "max_num_words", + "min_num_words"]) + + +def file_based_input_fn_builder(input_file, name_to_features, batch_size, + is_training): + """Creates an `input_fn` closure.""" + + logging.info("Input tfrecord file %s", input_file) + + def _decode_record(record, name_to_features): + """Decodes a record to a TensorFlow example.""" + example = tf.io.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.cast(t, tf.int32) + example[name] = t + + return example + + def input_fn(): + """Returns dataset for training/evaluation.""" + num_threads = 8 + if isinstance(input_file, str): + d = tf.data.TFRecordDataset(input_file) + # For training, we want a lot of parallel reading and shuffling. + # For eval, we want no shuffling and parallel reading doesn't matter. + if is_training: + d = d.shuffle(2048) + d = d.repeat() + else: + cycle_length = min(num_threads, len(input_file)) + d = tf.data.Dataset.from_tensor_slices(input_file) + # file level shuffle + d = d.shuffle(len(input_file)).repeat() + + d = d.interleave( + tf.data.TFRecordDataset, + sloppy=is_training, + cycle_length=cycle_length) + + if is_training: + # sample level shuffle + d = d.shuffle(buffer_size=2048) + d = d.map( + lambda record: _decode_record(record, name_to_features), + num_parallel_calls=tf.data.experimental.AUTOTUNE) + d = d.batch(batch_size, drop_remainder=is_training) + + # When `input_file` is a path to a single file or a list + # containing a single path, disable auto sharding so that + # same input file is sent to all workers. + if isinstance(input_file, str) or len(input_file) == 1: + options = tf.data.Options() + options.experimental_distribute.auto_shard_policy = ( + tf.data.experimental.AutoShardPolicy.OFF) + d = d.with_options(options) + + d = d.prefetch(tf.data.experimental.AUTOTUNE) + return d + + return input_fn + + +def create_classification_dataset(file_path, seq_length, batch_size, + is_training): + """Creates input dataset from (tf)records files for pretraining.""" + name_to_features = { + "input_ids": tf.io.FixedLenFeature([seq_length], tf.int64), + "input_mask": tf.io.FixedLenFeature([seq_length], tf.float32), + "segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64), + "label_ids": tf.io.FixedLenFeature([], tf.int64), + "is_real_example": tf.io.FixedLenFeature([], tf.int64), + } + + input_fn = file_based_input_fn_builder(file_path, name_to_features, + batch_size, is_training) + dataset = input_fn() + return dataset + + +def create_squad_dataset(file_path, seq_length, batch_size, is_training): + """Creates input dataset from (tf)records files for pretraining.""" + name_to_features = { + "unique_ids": tf.io.FixedLenFeature([], tf.int64), + "input_ids": tf.io.FixedLenFeature([seq_length], tf.int64), + "input_mask": tf.io.FixedLenFeature([seq_length], tf.float32), + "segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64), + "cls_index": tf.io.FixedLenFeature([], tf.int64), + "p_mask": tf.io.FixedLenFeature([seq_length], tf.float32) + } + + if is_training: + name_to_features["start_positions"] = tf.io.FixedLenFeature([], tf.int64) + name_to_features["end_positions"] = tf.io.FixedLenFeature([], tf.int64) + name_to_features["is_impossible"] = tf.io.FixedLenFeature([], tf.float32) + + input_fn = file_based_input_fn_builder(file_path, name_to_features, + batch_size, is_training) + dataset = input_fn() + return dataset + + +def get_input_iterator(input_fn, strategy): + """Returns distributed dataset iterator.""" + + # When training with TPU pods, datasets needs to be cloned across + # workers. Since Dataset instance cannot be cloned in eager mode, we instead + # pass callable that returns a dataset. + input_data = input_fn() + if callable(input_data): + iterator = iter( + strategy.experimental_distribute_datasets_from_function(input_data)) + else: + iterator = iter(strategy.experimental_distribute_dataset(input_data)) + return iterator + + +def get_classification_input_data(batch_size, seq_len, strategy, is_training, + file_path): + """Returns input dataset from input file string.""" + + # When using TPU pods, we need to clone dataset across + # workers and need to pass in function that returns the dataset rather + # than passing dataset instance itself. + use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy) + if use_dataset_fn: + if batch_size % strategy.num_replicas_in_sync != 0: + raise ValueError( + "Batch size must be divisible by number of replicas : {}".format( + strategy.num_replicas_in_sync)) + + # As auto rebatching is not supported in + # `experimental_distribute_datasets_from_function()` API, which is + # required when cloning dataset to multiple workers in eager mode, + # we use per-replica batch size. + batch_size = int(batch_size / strategy.num_replicas_in_sync) + + def _dataset_fn(ctx=None): + del ctx + + train_dataset = create_classification_dataset( + file_path=file_path, + seq_length=seq_len, + batch_size=batch_size, + is_training=is_training) + return train_dataset + + return _dataset_fn if use_dataset_fn else _dataset_fn() + + +def get_squad_input_data(batch_size, seq_len, q_len, strategy, is_training, + file_path): + """Returns input dataset from input file string.""" + + # When using TPU pods, we need to clone dataset across + # workers and need to pass in function that returns the dataset rather + # than passing dataset instance itself. + use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy) + if use_dataset_fn: + if batch_size % strategy.num_replicas_in_sync != 0: + raise ValueError( + "Batch size must be divisible by number of replicas : {}".format( + strategy.num_replicas_in_sync)) + + # As auto rebatching is not supported in + # `experimental_distribute_datasets_from_function()` API, which is + # required when cloning dataset to multiple workers in eager mode, + # we use per-replica batch size. + batch_size = int(batch_size / strategy.num_replicas_in_sync) + + if is_training: + input_glob = os.path.join( + file_path, + "spiece.model.*.slen-{}.qlen-{}.train.tf_record".format(seq_len, q_len)) + + global_input_paths = tf.io.gfile.glob(input_glob) + else: + global_input_paths = file_path + + def _dataset_fn(ctx=None): + del ctx + + train_dataset = create_squad_dataset( + file_path=global_input_paths, + seq_length=seq_len, + batch_size=batch_size, + is_training=is_training) + return train_dataset + + return _dataset_fn if use_dataset_fn else _dataset_fn() + + +def _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len, num_predict): + """Turn beg and end indices into actual mask.""" + non_func_mask = tf.logical_and( + tf.not_equal(inputs, SEP_ID), + tf.not_equal(inputs, CLS_ID)) + all_indices = tf.where( + non_func_mask, + tf.range(tgt_len, dtype=tf.int64), + tf.constant(-1, shape=[tgt_len], dtype=tf.int64)) + candidate_matrix = tf.cast( + tf.logical_and( + all_indices[None, :] >= beg_indices[:, None], + all_indices[None, :] < end_indices[:, None]), + tf.float32) + cumsum_matrix = tf.reshape( + tf.cumsum(tf.reshape(candidate_matrix, [-1])), + [-1, tgt_len]) + masked_matrix = tf.cast(cumsum_matrix <= num_predict, tf.float32) + target_mask = tf.reduce_sum(candidate_matrix * masked_matrix, axis=0) + is_masked = tf.cast(target_mask, tf.bool) + + return is_masked, target_mask + + +def _word_span_mask(inputs, tgt_len, num_predict, min_num_words, + max_num_words, boundary): + """Sample whole word spans as prediction targets.""" + # Note: 1.2 is the token-to-word ratio + mask_alpha = tgt_len / num_predict / 1.2 + round_to_int = lambda x: tf.cast(tf.round(x), tf.int64) + + # Sample span lengths from a zipf distribution + span_len_seq = np.arange(min_num_words, max_num_words + 1) + probs = np.array([1.0 / (i + 1) for i in span_len_seq]) + probs /= np.sum(probs) + logits = tf.constant(np.log(probs), dtype=tf.float32) + + # Sample `num_predict` words here: note that this is over sampling + span_lens = tf.random.categorical( + logits=logits[None], + num_samples=num_predict, + dtype=tf.int64, + )[0] + min_num_words + + # Sample the ratio [0.0, 1.0) of left context lengths + span_lens_float = tf.cast(span_lens, tf.float32) + left_ratio = tf.random.uniform(shape=[num_predict], minval=0.0, maxval=1.0) + left_ctx_len = left_ratio * span_lens_float * (mask_alpha - 1) + + left_ctx_len = round_to_int(left_ctx_len) + right_offset = round_to_int(span_lens_float * mask_alpha) - left_ctx_len + + beg_indices = (tf.cumsum(left_ctx_len) + + tf.cumsum(right_offset, exclusive=True)) + end_indices = beg_indices + span_lens + + # Remove out of range indices + max_boundary_index = tf.cast(tf.shape(boundary)[0] - 1, tf.int64) + valid_idx_mask = end_indices < max_boundary_index + beg_indices = tf.boolean_mask(beg_indices, valid_idx_mask) + end_indices = tf.boolean_mask(end_indices, valid_idx_mask) + + beg_indices = tf.gather(boundary, beg_indices) + end_indices = tf.gather(boundary, end_indices) + + # Shuffle valid indices + num_valid = tf.cast(tf.shape(beg_indices)[0], tf.int64) + order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int64)) + beg_indices = tf.gather(beg_indices, order) + end_indices = tf.gather(end_indices, order) + + return _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len, + num_predict) + + +def _token_span_mask(inputs, tgt_len, num_predict, min_num_tokens, + max_num_tokens): + """Sample token spans as prediction targets.""" + mask_alpha = tgt_len / num_predict + round_to_int = lambda x: tf.cast(tf.round(x), tf.int64) + + # Sample span lengths from a zipf distribution + span_len_seq = np.arange(min_num_tokens, max_num_tokens + 1) + probs = np.array([1.0 / (i + 1) for i in span_len_seq]) + + probs /= np.sum(probs) + logits = tf.constant(np.log(probs), dtype=tf.float32) + span_lens = tf.random.categorical( + logits=logits[None], + num_samples=num_predict, + dtype=tf.int64, + )[0] + min_num_tokens + + # Sample the ratio [0.0, 1.0) of left context lengths + span_lens_float = tf.cast(span_lens, tf.float32) + left_ratio = tf.random.uniform(shape=[num_predict], minval=0.0, maxval=1.0) + left_ctx_len = left_ratio * span_lens_float * (mask_alpha - 1) + left_ctx_len = round_to_int(left_ctx_len) + + # Compute the offset from left start to the right end + right_offset = round_to_int(span_lens_float * mask_alpha) - left_ctx_len + + # Get the actual begin and end indices + beg_indices = (tf.cumsum(left_ctx_len) + + tf.cumsum(right_offset, exclusive=True)) + end_indices = beg_indices + span_lens + + # Remove out of range indices + valid_idx_mask = end_indices < tgt_len + beg_indices = tf.boolean_mask(beg_indices, valid_idx_mask) + end_indices = tf.boolean_mask(end_indices, valid_idx_mask) + + # Shuffle valid indices + num_valid = tf.cast(tf.shape(beg_indices)[0], tf.int64) + order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int64)) + beg_indices = tf.gather(beg_indices, order) + end_indices = tf.gather(end_indices, order) + + return _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len, + num_predict) + + +def _whole_word_mask(inputs, tgt_len, num_predict, boundary): + """Sample whole words as prediction targets.""" + pair_indices = tf.concat([boundary[:-1, None], boundary[1:, None]], axis=1) + cand_pair_indices = tf.random.shuffle(pair_indices)[:num_predict] + beg_indices = cand_pair_indices[:, 0] + end_indices = cand_pair_indices[:, 1] + + return _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len, + num_predict) + + +def _single_token_mask(inputs, tgt_len, num_predict): + """Sample individual tokens as prediction targets.""" + all_indices = tf.range(tgt_len, dtype=tf.int64) + non_func_mask = tf.logical_and( + tf.not_equal(inputs, SEP_ID), + tf.not_equal(inputs, CLS_ID)) + non_func_indices = tf.boolean_mask(all_indices, non_func_mask) + + masked_pos = tf.random.shuffle(non_func_indices) + masked_pos = tf.sort(masked_pos[:num_predict]) + target_mask = tf.sparse_to_dense( + sparse_indices=masked_pos, + output_shape=[tgt_len], + sparse_values=1.0, + default_value=0.0) + + is_masked = tf.cast(target_mask, tf.bool) + + return is_masked, target_mask + + +def _online_sample_masks(inputs, tgt_len, num_predict, online_masking_config, + boundary=None): + """Sample target positions to predict.""" + logging.info("Online sample with strategy: `%s`.", + online_masking_config.sample_strategy) + if online_masking_config.sample_strategy == "single_token": + return _single_token_mask(inputs, tgt_len, num_predict) + elif online_masking_config.sample_strategy == "whole_word": + assert boundary is not None, "whole word sampling requires `boundary`" + return _whole_word_mask(inputs, tgt_len, num_predict, boundary) + elif online_masking_config.sample_strategy == "token_span": + return _token_span_mask(inputs, tgt_len, num_predict, + online_masking_config.min_num_tokens, + online_masking_config.max_num_tokens) + elif online_masking_config.sample_strategy == "word_span": + assert boundary is not None, "word span sampling requires `boundary`" + return _word_span_mask(inputs, tgt_len, num_predict, + online_masking_config.min_num_words, + online_masking_config.max_num_words, + boundary) + else: + raise NotImplementedError + + +def create_pretrain_dataset(file_names, + bsz_per_core, + seq_len, + reuse_len, + perm_size, + leak_ratio, + online_masking_config, + num_predict=None, + input_pipeline_context=None): + """Creates pretrain dataset.""" + + def parser(record): + """Function used to parse tfrecord.""" + + record_spec = { + "input": tf.io.FixedLenFeature([seq_len], tf.int64), + "seg_id": tf.io.FixedLenFeature([seq_len], tf.int64), + "label": tf.io.FixedLenFeature([1], tf.int64), + } + + if online_masking_config.sample_strategy in ["whole_word", "word_span"]: + logging.info("Add `boundary` spec for %s", + online_masking_config.sample_strategy) + record_spec["boundary"] = tf.io.VarLenFeature(tf.int64) + + # retrieve serialized example + example = tf.io.parse_single_example( + serialized=record, features=record_spec) + + inputs = example.pop("input") + if online_masking_config.sample_strategy in ["whole_word", "word_span"]: + boundary = tf.sparse.to_dense(example.pop("boundary")) + else: + boundary = None + is_masked, _ = _online_sample_masks( + inputs, seq_len, num_predict, online_masking_config, boundary=boundary) + + if reuse_len > 0: + ##### Use memory + # permutate the reuse and non-reuse parts separately + non_reuse_len = seq_len - reuse_len + assert reuse_len % perm_size == 0 and non_reuse_len % perm_size == 0 + + # Creates permutation mask and target mask for the first reuse_len tokens. + # The tokens in this part are reused from the last sequence. + perm_mask_0, target_mask_0, input_k_0, input_q_0 = _local_perm( + inputs[:reuse_len], is_masked[:reuse_len], perm_size, reuse_len, + leak_ratio) + + # Creates permutation mask and target mask for the rest of tokens in + # current example, which are concatentation of two new segments. + perm_mask_1, target_mask_1, input_k_1, input_q_1 = _local_perm( + inputs[reuse_len:], is_masked[reuse_len:], perm_size, non_reuse_len, + leak_ratio) + + perm_mask_0 = tf.concat( + [perm_mask_0, tf.ones([reuse_len, non_reuse_len])], axis=1) + perm_mask_1 = tf.concat( + [tf.zeros([non_reuse_len, reuse_len]), perm_mask_1], axis=1) + perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0) + target_mask = tf.concat([target_mask_0, target_mask_1], axis=0) + input_k = tf.concat([input_k_0, input_k_1], axis=0) + input_q = tf.concat([input_q_0, input_q_1], axis=0) + else: + ##### Do not use memory + assert seq_len % perm_size == 0 + # permutate the entire sequence together + perm_mask, target_mask, input_k, input_q = _local_perm( + inputs, is_masked, perm_size, seq_len, leak_ratio) + + # reshape back to fixed shape + example["perm_mask"] = tf.reshape(perm_mask, [seq_len, seq_len]) + example["input_k"] = tf.reshape(input_k, [seq_len]) + example["input_q"] = tf.reshape(input_q, [seq_len]) + + # Directly use raw inputs as the target + target = inputs + + if num_predict is not None: + indices = tf.range(seq_len, dtype=tf.int64) + bool_target_mask = tf.cast(target_mask, tf.bool) + indices = tf.boolean_mask(indices, bool_target_mask) + + ##### extra padding due to CLS/SEP introduced after prepro + actual_num_predict = tf.shape(indices)[0] + pad_len = num_predict - actual_num_predict + + ##### target_mapping + target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32) + paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype) + target_mapping = tf.concat([target_mapping, paddings], axis=0) + example["target_mapping"] = tf.reshape(target_mapping, + [num_predict, seq_len]) + + ##### target + target = tf.boolean_mask(target, bool_target_mask) + paddings = tf.zeros([pad_len], dtype=target.dtype) + target = tf.concat([target, paddings], axis=0) + example["target"] = tf.reshape(target, [num_predict]) + + ##### target mask + target_mask = tf.concat( + [tf.ones([actual_num_predict], dtype=tf.float32), + tf.zeros([pad_len], dtype=tf.float32)], + axis=0) + example["target_mask"] = tf.reshape(target_mask, [num_predict]) + else: + example["target"] = tf.reshape(target, [seq_len]) + example["target_mask"] = tf.reshape(target_mask, [seq_len]) + + for key in list(example.keys()): + val = example[key] + if tf.keras.backend.is_sparse(val): + val = tf.sparse.to_dense(val) + if val.dtype == tf.int64: + val = tf.cast(val, tf.int32) + + example[key] = val + + for k, v in example.items(): + logging.info("%s: %s", k, v) + + return example + + dataset = parse_files_to_dataset( + parser=parser, + file_paths=file_names, + bsz_per_core=bsz_per_core, + sequential=reuse_len > 0, + input_pipeline_context=input_pipeline_context) + + return dataset + + +def format_filename(prefix, suffix, bsz_per_host, seq_len, reuse_len=None, + uncased=False): + """Generates input file name pattern.""" + if reuse_len is not None and reuse_len > 0: + reuse_str = "reuse-{}.".format(reuse_len) + bsz_str = "hostbsz-{}.".format(bsz_per_host) + else: + reuse_str = "" + bsz_str = "" + + if not uncased: + case_str = "" + else: + case_str = "uncased." + + file_name = "{}.seq-{}.{}{}{}{}".format( + prefix, seq_len, reuse_str, bsz_str, case_str, suffix) + + return file_name + + +def get_pretrain_input_data(batch_size, + seq_len, + strategy, + file_path, + reuse_len, + perm_size, + leak_ratio, + num_predict, + uncased, + online_masking_config, + num_hosts=1): + """Returns input dataset from input file string.""" + + # When using TPU pods, we need to clone dataset across + # workers and need to pass in function that returns the dataset rather + # than passing dataset instance itself. + use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy) + split = "train" + bsz_per_host = int(batch_size / num_hosts) + record_glob_base = format_filename( + prefix="meta.{}.pass-*".format(split), + suffix="json*", + bsz_per_host=bsz_per_host, + seq_len=seq_len, + reuse_len=reuse_len, + uncased=uncased) + + def _get_num_batch(info): + if "num_batch" in info: + return info["num_batch"] + elif "num_example" in info: + return info["num_example"] / bsz_per_host + else: + raise ValueError("Do not have sample info.") + + if use_dataset_fn: + if batch_size % strategy.num_replicas_in_sync != 0: + raise ValueError( + "Batch size must be divisible by number of replicas : {}".format( + strategy.num_replicas_in_sync)) + + # As auto rebatching is not supported in + # `experimental_distribute_datasets_from_function()` API, which is + # required when cloning dataset to multiple workers in eager mode, + # we use per-replica batch size. + batch_size = int(batch_size / strategy.num_replicas_in_sync) + + record_info = {"num_batch": 0, "filenames": []} + + tfrecord_dirs = file_path.split(",") + logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs) + + for idx, record_dir in enumerate(tfrecord_dirs): + record_glob = os.path.join(record_dir, record_glob_base) + logging.info("[%d] Record glob: %s", idx, record_glob) + + record_paths = sorted(tf.io.gfile.glob(record_glob)) + logging.info("[%d] Num of record info path: %d", idx, len(record_paths)) + + cur_record_info = {"num_batch": 0, "filenames": []} + + for record_info_path in record_paths: + with tf.io.gfile.GFile(record_info_path, "r") as fp: + info = json.load(fp) + cur_record_info["num_batch"] += int(_get_num_batch(info)) + cur_record_info["filenames"] += info["filenames"] + + # overwrite directory for `cur_record_info` + new_filenames = [] + for filename in cur_record_info["filenames"]: + basename = os.path.basename(filename) + new_filename = os.path.join(record_dir, basename) + new_filenames.append(new_filename) + cur_record_info["filenames"] = new_filenames + + logging.info("[Dir %d] Number of chosen batches: %s", idx, + cur_record_info["num_batch"]) + logging.info("[Dir %d] Number of chosen files: %s", idx, + len(cur_record_info["filenames"])) + logging.info(cur_record_info["filenames"]) + + # add `cur_record_info` to global `record_info` + record_info["num_batch"] += cur_record_info["num_batch"] + record_info["filenames"] += cur_record_info["filenames"] + + logging.info("Total number of batches: %d", record_info["num_batch"]) + logging.info("Total number of files: %d", len(record_info["filenames"])) + logging.info(record_info["filenames"]) + + def _dataset_fn(ctx=None): + """Function that can create a pretrain dataset.""" + + train_dataset = create_pretrain_dataset( + file_names=record_info["filenames"], + bsz_per_core=batch_size, + seq_len=seq_len, + reuse_len=reuse_len, + perm_size=perm_size, + leak_ratio=leak_ratio, + online_masking_config=online_masking_config, + num_predict=num_predict, + input_pipeline_context=ctx) + return train_dataset + + return _dataset_fn if use_dataset_fn else _dataset_fn() + + +def parse_files_to_dataset(parser, + file_paths, + bsz_per_core, + sequential, + input_pipeline_context=None): + """Creates the dataset given file paths.""" + + dataset = tf.data.Dataset.from_tensor_slices(file_paths) + + # Note: we cannot perform sample-level shuffle here because this will violate + # the consecutive requirement of data stream. + + if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: + dataset = dataset.shard(input_pipeline_context.num_input_pipelines, + input_pipeline_context.input_pipeline_id) + # file-level shuffle + if len(file_paths) > 1: + dataset = dataset.shuffle(len(file_paths)) + + if sequential: + # Note: cannot perform sample-level shuffle here because this will violate + # the consecutive requirement of data stream. + dataset = tf.data.TFRecordDataset(dataset) + else: + # `cycle_length` is the number of parallel files that get read. + cycle_length = min(8, len(file_paths)) + logging.info("Interleave %d files", cycle_length) + + # `sloppy` mode means that the interleaving is not exact. This adds + # even more randomness to the training pipeline. + dataset = dataset.apply( + tf.data.experimental.parallel_interleave( + tf.data.TFRecordDataset, + sloppy=True, + cycle_length=cycle_length)) + buffer_size = 2048 + logging.info("Perform sample-level shuffle with size %d", buffer_size) + dataset = dataset.shuffle(buffer_size=buffer_size) + + dataset = dataset.cache().repeat().map(parser) + dataset = dataset.batch(bsz_per_core, drop_remainder=True) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + + return dataset + + +def _local_perm(inputs, is_masked, perm_size, seq_len, leak_ratio): + """Samples a permutation of the factorization order. + + Creates perm_mask and target_mask accordingly. + + Args: + inputs: int64 Tensor in shape [seq_len], input ids. + is_masked: bool Tensor in shape [seq_len]. True means being selected for + partial prediction. + perm_size: the length of longest permutation. Could be set to be reuse_len. + Should not be larger than reuse_len or there will be data leaks. + seq_len: int, sequence length. + leak_ratio: float, percent of masked tokens that are leaked. + + Returns: + perm_mask: float32 Tensor in shape [seq_len, seq_len] consisted of 0 and 1. + If perm_mask[i][j] == 1, it means the ith token (in original order) cannot + attend to the jth token + (in original order). This case will happen only when the ith token's + permutated position <= the jth token's permutated position, + and the jth token is masked or is func token. If perm_mask[i][j] == 0, it + means the ith token (in original order) can attend to the jth token + (in original order). Note that non-masked tokens can be attended by all + other tokens, which is different from the description in original paper. + target_mask: float32 Tensor in shape [seq_len] consisted of 0 and 1. If + target_mask[i] == 1, + the ith token needs to be predicted and mask will be used as input. This + token will count for loss. + If target_mask[i] == 0, token (or [SEP], [CLS]) will be used as input. This + token will not count for loss. + inputs_k: int64 Tensor in shape [seq_len], input ids. + inputs_q: float32 Tensor in shape [seq_len], the same as target_mask. + + """ + + # Generate permutation indices + index = tf.range(seq_len, dtype=tf.int64) + index = tf.transpose(tf.reshape(index, [-1, perm_size])) + index = tf.random.shuffle(index) + index = tf.reshape(tf.transpose(index), [-1]) + + # non-functional tokens + non_func_tokens = tf.logical_not(tf.logical_or( + tf.equal(inputs, SEP_ID), + tf.equal(inputs, CLS_ID))) + masked_tokens = tf.logical_and(is_masked, non_func_tokens) + non_masked_or_func_tokens = tf.logical_not(masked_tokens) + + smallest_index = -2 * tf.ones([seq_len], dtype=tf.int64) + + # Similar to BERT, randomly leak some masked tokens + if leak_ratio > 0: + leak_tokens = tf.logical_and( + masked_tokens, + tf.random.uniform([seq_len], maxval=1.0) < leak_ratio) + can_attend_self = tf.logical_or(non_masked_or_func_tokens, leak_tokens) + else: + can_attend_self = non_masked_or_func_tokens + to_index = tf.where(can_attend_self, smallest_index, index) + from_index = tf.where(can_attend_self, to_index + 1, to_index) + + # For masked tokens, can attend if i > j + # For context tokens, always can attend each other + can_attend = from_index[:, None] > to_index[None, :] + + # In modeling, 1 indicates cannot attend. Hence, reverse the value here. + perm_mask = 1.0 - tf.cast(can_attend, tf.float32) + + # Only masked tokens are included in the loss + target_mask = tf.cast(masked_tokens, tf.float32) + + # construct inputs_k + inputs_k = inputs + + # construct inputs_q + inputs_q = masked_tokens + + return perm_mask, target_mask, inputs_k, inputs_q diff --git a/models/official/nlp/xlnet/optimization.py b/models/official/nlp/xlnet/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9031647faef79c7e4f722dfeca7e3c1fd7712f --- /dev/null +++ b/models/official/nlp/xlnet/optimization.py @@ -0,0 +1,102 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions and classes related to optimization (weight updates).""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging +import tensorflow as tf +from official.nlp import optimization + + +class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): + """Applys a warmup schedule on a given learning rate decay schedule.""" + + def __init__(self, + initial_learning_rate, + decay_schedule_fn, + warmup_steps, + power=1.0, + name=None): + super(WarmUp, self).__init__() + self.initial_learning_rate = initial_learning_rate + self.warmup_steps = warmup_steps + self.power = power + self.decay_schedule_fn = decay_schedule_fn + self.name = name + + def __call__(self, step): + with tf.name_scope(self.name or "WarmUp") as name: + # Implements polynomial warmup. i.e., if global_step < warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + global_step_float = tf.cast(step, tf.float32) + warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) + warmup_percent_done = global_step_float / warmup_steps_float + warmup_learning_rate = ( + self.initial_learning_rate * + tf.math.pow(warmup_percent_done, self.power)) + return tf.cond( + global_step_float < warmup_steps_float, + lambda: warmup_learning_rate, + lambda: self.decay_schedule_fn(step - self.warmup_steps), + name=name) + + def get_config(self): + return { + "initial_learning_rate": self.initial_learning_rate, + "decay_schedule_fn": self.decay_schedule_fn, + "warmup_steps": self.warmup_steps, + "power": self.power, + "name": self.name + } + + +def create_optimizer(init_lr, + num_train_steps, + num_warmup_steps, + min_lr_ratio=0.0, + adam_epsilon=1e-8, + weight_decay_rate=0.0): + """Creates an optimizer with learning rate schedule.""" + # Implements linear decay of the learning rate. + learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( + initial_learning_rate=init_lr, + decay_steps=num_train_steps - num_warmup_steps, + end_learning_rate=init_lr * min_lr_ratio) + if num_warmup_steps: + learning_rate_fn = WarmUp( + initial_learning_rate=init_lr, + decay_schedule_fn=learning_rate_fn, + warmup_steps=num_warmup_steps) + if weight_decay_rate > 0.0: + logging.info( + "Using AdamWeightDecay with adam_epsilon=%.9f weight_decay_rate=%.3f", + adam_epsilon, weight_decay_rate) + optimizer = optimization.AdamWeightDecay( + learning_rate=learning_rate_fn, + weight_decay_rate=weight_decay_rate, + beta_1=0.9, + beta_2=0.999, + epsilon=adam_epsilon, + exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], + include_in_weight_decay=["r_s_bias", "r_r_bias", "r_w_bias"]) + else: + logging.info("Using Adam with adam_epsilon=%.9f", (adam_epsilon)) + optimizer = tf.keras.optimizers.Adam( + learning_rate=learning_rate_fn, epsilon=adam_epsilon) + + return optimizer, learning_rate_fn diff --git a/models/official/nlp/xlnet/preprocess_classification_data.py b/models/official/nlp/xlnet/preprocess_classification_data.py new file mode 100644 index 0000000000000000000000000000000000000000..9b34ffef7c7ed66a87b8386e1675e14c11b0791d --- /dev/null +++ b/models/official/nlp/xlnet/preprocess_classification_data.py @@ -0,0 +1,457 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Script to pre-process classification data into tfrecords.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import csv +import os + +from absl import app +from absl import flags +from absl import logging +import numpy as np +import tensorflow as tf + +import sentencepiece as spm +from official.nlp.xlnet import classifier_utils +from official.nlp.xlnet import preprocess_utils + + +flags.DEFINE_bool( + "overwrite_data", + default=False, + help="If False, will use cached data if available.") +flags.DEFINE_string("output_dir", default="", help="Output dir for TF records.") +flags.DEFINE_string( + "spiece_model_file", default="", help="Sentence Piece model path.") +flags.DEFINE_string("data_dir", default="", help="Directory for input data.") + +# task specific +flags.DEFINE_string("eval_split", default="dev", help="could be dev or test") +flags.DEFINE_string("task_name", default=None, help="Task name") +flags.DEFINE_integer( + "eval_batch_size", default=64, help="batch size for evaluation") +flags.DEFINE_integer("max_seq_length", default=128, help="Max sequence length") +flags.DEFINE_integer( + "num_passes", + default=1, + help="Num passes for processing training data. " + "This is use to batch data without loss for TPUs.") +flags.DEFINE_bool("uncased", default=False, help="Use uncased.") +flags.DEFINE_bool( + "is_regression", default=False, help="Whether it's a regression task.") +flags.DEFINE_bool( + "use_bert_format", + default=False, + help="Whether to use BERT format to arrange input data.") + +FLAGS = flags.FLAGS + + +class InputExample(object): + """A single training/test example for simple sequence classification.""" + + def __init__(self, guid, text_a, text_b=None, label=None): + """Constructs a InputExample. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label + + +class DataProcessor(object): + """Base class for data converters for sequence classification data sets.""" + + def get_train_examples(self, data_dir): + """Gets a collection of `InputExample`s for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of `InputExample`s for the dev set.""" + raise NotImplementedError() + + def get_test_examples(self, data_dir): + """Gets a collection of `InputExample`s for prediction.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with tf.io.gfile.GFile(input_file, "r") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + # pylint: disable=g-explicit-length-test + if len(line) == 0: + continue + lines.append(line) + return lines + + +class GLUEProcessor(DataProcessor): + """GLUEProcessor.""" + + def __init__(self): + self.train_file = "train.tsv" + self.dev_file = "dev.tsv" + self.test_file = "test.tsv" + self.label_column = None + self.text_a_column = None + self.text_b_column = None + self.contains_header = True + self.test_text_a_column = None + self.test_text_b_column = None + self.test_contains_header = True + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, self.train_file)), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, self.dev_file)), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + if self.test_text_a_column is None: + self.test_text_a_column = self.text_a_column + if self.test_text_b_column is None: + self.test_text_b_column = self.text_b_column + + return self._create_examples( + self._read_tsv(os.path.join(data_dir, self.test_file)), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0 and self.contains_header and set_type != "test": + continue + if i == 0 and self.test_contains_header and set_type == "test": + continue + guid = "%s-%s" % (set_type, i) + + a_column = ( + self.text_a_column if set_type != "test" else self.test_text_a_column) + b_column = ( + self.text_b_column if set_type != "test" else self.test_text_b_column) + + # there are some incomplete lines in QNLI + if len(line) <= a_column: + logging.warning("Incomplete line, ignored.") + continue + text_a = line[a_column] + + if b_column is not None: + if len(line) <= b_column: + logging.warning("Incomplete line, ignored.") + continue + text_b = line[b_column] + else: + text_b = None + + if set_type == "test": + label = self.get_labels()[0] + else: + if len(line) <= self.label_column: + logging.warning("Incomplete line, ignored.") + continue + label = line[self.label_column] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class Yelp5Processor(DataProcessor): + """Yelp5Processor.""" + + def get_train_examples(self, data_dir): + return self._create_examples(os.path.join(data_dir, "train.csv")) + + def get_dev_examples(self, data_dir): + return self._create_examples(os.path.join(data_dir, "test.csv")) + + def get_labels(self): + """See base class.""" + return ["1", "2", "3", "4", "5"] + + def _create_examples(self, input_file): + """Creates examples for the training and dev sets.""" + examples = [] + with tf.io.gfile.GFile(input_file) as f: + reader = csv.reader(f) + for i, line in enumerate(reader): + + label = line[0] + text_a = line[1].replace('""', '"').replace('\\"', '"') + examples.append( + InputExample(guid=str(i), text_a=text_a, text_b=None, label=label)) + return examples + + +class ImdbProcessor(DataProcessor): + """ImdbProcessor.""" + + def get_labels(self): + return ["neg", "pos"] + + def get_train_examples(self, data_dir): + return self._create_examples(os.path.join(data_dir, "train")) + + def get_dev_examples(self, data_dir): + return self._create_examples(os.path.join(data_dir, "test")) + + def _create_examples(self, data_dir): + """Creates examples.""" + examples = [] + for label in ["neg", "pos"]: + cur_dir = os.path.join(data_dir, label) + for filename in tf.io.gfile.listdir(cur_dir): + if not filename.endswith("txt"): + continue + + if len(examples) % 1000 == 0: + logging.info("Loading dev example %d", len(examples)) + + path = os.path.join(cur_dir, filename) + with tf.io.gfile.GFile(path) as f: + text = f.read().strip().replace("
", " ") + examples.append( + InputExample( + guid="unused_id", text_a=text, text_b=None, label=label)) + return examples + + +class MnliMatchedProcessor(GLUEProcessor): + """MnliMatchedProcessor.""" + + def __init__(self): + super(MnliMatchedProcessor, self).__init__() + self.dev_file = "dev_matched.tsv" + self.test_file = "test_matched.tsv" + self.label_column = -1 + self.text_a_column = 8 + self.text_b_column = 9 + + def get_labels(self): + return ["contradiction", "entailment", "neutral"] + + +class MnliMismatchedProcessor(MnliMatchedProcessor): + + def __init__(self): + super(MnliMismatchedProcessor, self).__init__() + self.dev_file = "dev_mismatched.tsv" + self.test_file = "test_mismatched.tsv" + + +class StsbProcessor(GLUEProcessor): + """StsbProcessor.""" + + def __init__(self): + super(StsbProcessor, self).__init__() + self.label_column = 9 + self.text_a_column = 7 + self.text_b_column = 8 + + def get_labels(self): + return [0.0] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0 and self.contains_header and set_type != "test": + continue + if i == 0 and self.test_contains_header and set_type == "test": + continue + guid = "%s-%s" % (set_type, i) + + a_column = ( + self.text_a_column if set_type != "test" else self.test_text_a_column) + b_column = ( + self.text_b_column if set_type != "test" else self.test_text_b_column) + + # there are some incomplete lines in QNLI + if len(line) <= a_column: + logging.warning("Incomplete line, ignored.") + continue + text_a = line[a_column] + + if b_column is not None: + if len(line) <= b_column: + logging.warning("Incomplete line, ignored.") + continue + text_b = line[b_column] + else: + text_b = None + + if set_type == "test": + label = self.get_labels()[0] + else: + if len(line) <= self.label_column: + logging.warning("Incomplete line, ignored.") + continue + label = float(line[self.label_column]) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + + return examples + + +def file_based_convert_examples_to_features(examples, + label_list, + max_seq_length, + tokenize_fn, + output_file, + num_passes=1): + """Convert a set of `InputExample`s to a TFRecord file.""" + + # do not create duplicated records + if tf.io.gfile.exists(output_file) and not FLAGS.overwrite_data: + logging.info("Do not overwrite tfrecord %s exists.", output_file) + return + + logging.info("Create new tfrecord %s.", output_file) + + writer = tf.io.TFRecordWriter(output_file) + + examples *= num_passes + + for (ex_index, example) in enumerate(examples): + if ex_index % 10000 == 0: + logging.info("Writing example %d of %d", ex_index, len(examples)) + + feature = classifier_utils.convert_single_example(ex_index, example, + label_list, + max_seq_length, + tokenize_fn, + FLAGS.use_bert_format) + + def create_int_feature(values): + f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return f + + def create_float_feature(values): + f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return f + + features = collections.OrderedDict() + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_float_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + if label_list is not None: + features["label_ids"] = create_int_feature([feature.label_id]) + else: + features["label_ids"] = create_float_feature([float(feature.label_id)]) + features["is_real_example"] = create_int_feature( + [int(feature.is_real_example)]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(tf_example.SerializeToString()) + writer.close() + + +def main(_): + logging.set_verbosity(logging.INFO) + processors = { + "mnli_matched": MnliMatchedProcessor, + "mnli_mismatched": MnliMismatchedProcessor, + "sts-b": StsbProcessor, + "imdb": ImdbProcessor, + "yelp5": Yelp5Processor + } + + task_name = FLAGS.task_name.lower() + + if task_name not in processors: + raise ValueError("Task not found: %s" % (task_name)) + + processor = processors[task_name]() + label_list = processor.get_labels() if not FLAGS.is_regression else None + + sp = spm.SentencePieceProcessor() + sp.Load(FLAGS.spiece_model_file) + + def tokenize_fn(text): + text = preprocess_utils.preprocess_text(text, lower=FLAGS.uncased) + return preprocess_utils.encode_ids(sp, text) + + spm_basename = os.path.basename(FLAGS.spiece_model_file) + + train_file_base = "{}.len-{}.train.tf_record".format(spm_basename, + FLAGS.max_seq_length) + train_file = os.path.join(FLAGS.output_dir, train_file_base) + logging.info("Use tfrecord file %s", train_file) + + train_examples = processor.get_train_examples(FLAGS.data_dir) + np.random.shuffle(train_examples) + logging.info("Num of train samples: %d", len(train_examples)) + + file_based_convert_examples_to_features(train_examples, label_list, + FLAGS.max_seq_length, tokenize_fn, + train_file, FLAGS.num_passes) + if FLAGS.eval_split == "dev": + eval_examples = processor.get_dev_examples(FLAGS.data_dir) + else: + eval_examples = processor.get_test_examples(FLAGS.data_dir) + + logging.info("Num of eval samples: %d", len(eval_examples)) + + # TPU requires a fixed batch size for all batches, therefore the number + # of examples must be a multiple of the batch size, or else examples + # will get dropped. So we pad with fake examples which are ignored + # later on. These do NOT count towards the metric (all tf.metrics + # support a per-instance weight, and these get a weight of 0.0). + # + # Modified in XL: We also adopt the same mechanism for GPUs. + while len(eval_examples) % FLAGS.eval_batch_size != 0: + eval_examples.append(classifier_utils.PaddingInputExample()) + + eval_file_base = "{}.len-{}.{}.eval.tf_record".format(spm_basename, + FLAGS.max_seq_length, + FLAGS.eval_split) + eval_file = os.path.join(FLAGS.output_dir, eval_file_base) + + file_based_convert_examples_to_features(eval_examples, label_list, + FLAGS.max_seq_length, tokenize_fn, + eval_file) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/xlnet/preprocess_pretrain_data.py b/models/official/nlp/xlnet/preprocess_pretrain_data.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf5367611ca656e88c969e4711334911e9cedd0 --- /dev/null +++ b/models/official/nlp/xlnet/preprocess_pretrain_data.py @@ -0,0 +1,998 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Script to pre-process pre-training data into tfrecords.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import random + +from absl import app +from absl import flags +import absl.logging as _logging # pylint: disable=unused-import + +import numpy as np + + +import tensorflow.google as tf +from official.nlp.xlnet import preprocess_utils +import sentencepiece as spm + + +special_symbols = { + "" : 0, + "" : 1, + "" : 2, + "" : 3, + "" : 4, + "" : 5, + "" : 6, + "" : 7, + "" : 8, +} + +VOCAB_SIZE = 32000 +UNK_ID = special_symbols[""] +CLS_ID = special_symbols[""] +SEP_ID = special_symbols[""] +MASK_ID = special_symbols[""] +EOD_ID = special_symbols[""] + + +def _int64_feature(values): + return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) + + +def _float_feature(values): + return tf.train.Feature(float_list=tf.train.FloatList(value=values)) + + +def format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix, + mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False, + fixed_num_predict=None): + """docs.""" + if reuse_len is None: + reuse_len_str = "" + else: + reuse_len_str = "reuse-{}.".format(reuse_len) + if not uncased: + uncased_str = "" + else: + uncased_str = "uncased." + if bi_data: + bi_data_str = "bi" + else: + bi_data_str = "uni" + if fixed_num_predict is not None: + fnp_str = "fnp-{}.".format(fixed_num_predict) + else: + fnp_str = "" + + file_name = "{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}".format( + prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str, + mask_alpha, mask_beta, fnp_str, suffix) + + return file_name + + +def _create_data(idx, input_paths): + # Load sentence-piece model + sp = spm.SentencePieceProcessor() + sp.Load(FLAGS.sp_path) + + input_shards = [] + total_line_cnt = 0 + for input_path in input_paths: + input_data, sent_ids = [], [] + sent_id, line_cnt = True, 0 + tf.logging.info("Processing %s", input_path) + for line in tf.gfile.Open(input_path): + if line_cnt % 100000 == 0: + tf.logging.info("Loading line %d", line_cnt) + line_cnt += 1 + + if not line.strip(): + if FLAGS.use_eod: + sent_id = not sent_id + cur_sent = [EOD_ID] + else: + continue + else: + if FLAGS.from_raw_text: + cur_sent = preprocess_utils.preprocess_text( + line.strip(), lower=FLAGS.uncased) + cur_sent = preprocess_utils.encode_ids(sp, cur_sent) + else: + cur_sent = list(map(int, line.strip().split())) + + input_data.extend(cur_sent) + sent_ids.extend([sent_id] * len(cur_sent)) + sent_id = not sent_id + + tf.logging.info("Finish with line %d", line_cnt) + if line_cnt == 0: + continue + + input_data = np.array(input_data, dtype=np.int64) + sent_ids = np.array(sent_ids, dtype=np.bool) + + total_line_cnt += line_cnt + input_shards.append((input_data, sent_ids)) + + tf.logging.info("[Task %d] Total number line: %d", idx, total_line_cnt) + + tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") + + filenames, num_batch = [], 0 + + # Randomly shuffle input shards (with a fixed but distinct random seed) + np.random.seed(100 * FLAGS.task + FLAGS.pass_id) + + perm_indices = np.random.permutation(len(input_shards)) + tf.logging.info("Using perm indices %s for pass %d", + perm_indices.tolist(), FLAGS.pass_id) + + input_data_list, sent_ids_list = [], [] + prev_sent_id = None + for perm_idx in perm_indices: + input_data, sent_ids = input_shards[perm_idx] + # make sure the `send_ids[0] == not prev_sent_id` + if prev_sent_id is not None and sent_ids[0] == prev_sent_id: + sent_ids = np.logical_not(sent_ids) + + # append to temporary list + input_data_list.append(input_data) + sent_ids_list.append(sent_ids) + + # update `prev_sent_id` + prev_sent_id = sent_ids[-1] + + input_data = np.concatenate(input_data_list) + sent_ids = np.concatenate(sent_ids_list) + + file_name, cur_num_batch = create_tfrecords( + save_dir=tfrecord_dir, + basename="{}-{}-{}".format(FLAGS.split, idx, FLAGS.pass_id), + data=[input_data, sent_ids], + bsz_per_host=FLAGS.bsz_per_host, + seq_len=FLAGS.seq_len, + bi_data=FLAGS.bi_data, + sp=sp, + ) + + filenames.append(file_name) + num_batch += cur_num_batch + + record_info = { + "filenames": filenames, + "num_batch": num_batch + } + + return record_info + + +def create_data(_): + # Validate FLAGS + assert FLAGS.bsz_per_host % FLAGS.num_core_per_host == 0 + if not FLAGS.use_tpu: + FLAGS.num_core_per_host = 1 # forced to be one + + # Make workdirs + if not tf.gfile.Exists(FLAGS.save_dir): + tf.gfile.MakeDirs(FLAGS.save_dir) + + tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") + if not tf.gfile.Exists(tfrecord_dir): + tf.gfile.MakeDirs(tfrecord_dir) + + # Create and dump corpus_info from task 0 + if FLAGS.task == 0 and FLAGS.pass_id == 0: + corpus_info = { + "vocab_size": VOCAB_SIZE, + "bsz_per_host": FLAGS.bsz_per_host, + "num_core_per_host": FLAGS.num_core_per_host, + "seq_len": FLAGS.seq_len, + "reuse_len": FLAGS.reuse_len, + "uncased": FLAGS.uncased, + "bi_data": FLAGS.bi_data, + "mask_alpha": FLAGS.mask_alpha, + "mask_beta": FLAGS.mask_beta, + "num_predict": FLAGS.num_predict, + "use_eod": FLAGS.use_eod, + "sp_path": FLAGS.sp_path, + "input_glob": FLAGS.input_glob, + } + corpus_info_path = os.path.join(FLAGS.save_dir, "corpus_info.json") + with tf.gfile.Open(corpus_info_path, "w") as fp: + json.dump(corpus_info, fp) + + # Interleavely split the work into FLAGS.num_task splits + file_paths = sorted(tf.gfile.Glob(FLAGS.input_glob)) + tf.logging.info("Use glob: %s", FLAGS.input_glob) + tf.logging.info("Find %d files: %s", len(file_paths), file_paths) + + task_file_paths = file_paths[FLAGS.task::FLAGS.num_task] + if not task_file_paths: + tf.logging.info("Exit: task %d has no file to process.", FLAGS.task) + return + + tf.logging.info("Task %d process %d files: %s", + FLAGS.task, len(task_file_paths), task_file_paths) + record_info = _create_data(FLAGS.task, task_file_paths) + + record_prefix = "record_info-{}-{}-{}".format( + FLAGS.split, FLAGS.task, FLAGS.pass_id) + record_name = format_filename( + prefix=record_prefix, + bsz_per_host=FLAGS.bsz_per_host, + seq_len=FLAGS.seq_len, + mask_alpha=FLAGS.mask_alpha, + mask_beta=FLAGS.mask_beta, + reuse_len=FLAGS.reuse_len, + bi_data=FLAGS.bi_data, + suffix="json", + uncased=FLAGS.uncased, + fixed_num_predict=FLAGS.num_predict) + record_info_path = os.path.join(tfrecord_dir, record_name) + + with tf.gfile.Open(record_info_path, "w") as fp: + json.dump(record_info, fp) + + +def batchify(data, bsz_per_host, sent_ids=None): + num_step = len(data) // bsz_per_host + data = data[:bsz_per_host * num_step] + data = data.reshape(bsz_per_host, num_step) + if sent_ids is not None: + sent_ids = sent_ids[:bsz_per_host * num_step] + sent_ids = sent_ids.reshape(bsz_per_host, num_step) + + if sent_ids is not None: + return data, sent_ids + return data + + +def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False): + """Split two segments from `data` starting from the index `begin_idx`.""" + + data_len = data.shape[0] + if begin_idx + tot_len >= data_len: + tf.logging.info("[_split_a_and_b] returns None: " + "begin_idx %d + tot_len %d >= data_len %d", + begin_idx, tot_len, data_len) + return None + + end_idx = begin_idx + 1 + cut_points = [] + while end_idx < data_len: + if sent_ids[end_idx] != sent_ids[end_idx - 1]: + if end_idx - begin_idx >= tot_len: break + cut_points.append(end_idx) + end_idx += 1 + + a_begin = begin_idx + if len(cut_points) == 0 or random.random() < 0.5: + label = 0 + if len(cut_points) == 0: + a_end = end_idx + else: + a_end = random.choice(cut_points) + + b_len = max(1, tot_len - (a_end - a_begin)) + # (zihangd): `data_len - 1` to account for extend_target + b_begin = random.randint(0, data_len - 1 - b_len) + b_end = b_begin + b_len + while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]: + b_begin -= 1 + # (zihangd): `data_len - 1` to account for extend_target + while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]: + b_end += 1 + + new_begin = a_end + else: + label = 1 + a_end = random.choice(cut_points) + b_begin = a_end + b_end = end_idx + + new_begin = b_end + + while a_end - a_begin + b_end - b_begin > tot_len: + if a_end - a_begin > b_end - b_begin: + # delete the right side only for the LM objective + a_end -= 1 + else: + b_end -= 1 + + ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin] + + if extend_target: + if a_end >= data_len or b_end >= data_len: + tf.logging.info("[_split_a_and_b] returns None: " + "a_end %d or b_end %d >= data_len %d", + a_end, b_end, data_len) + return None + a_target = data[a_begin + 1: a_end + 1] + b_target = data[b_begin: b_end + 1] + ret.extend([a_target, b_target]) + + return ret + + +def _is_start_piece(piece): + special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~')) + if (piece.startswith("▁") or piece.startswith("<") + or piece in special_pieces): + return True + else: + return False + + +def _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None): + """Sample `goal_num_predict` tokens for partial prediction. + About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.""" + + seg_len = len(seg) + mask = np.array([False] * seg_len, dtype=np.bool) + + num_predict = 0 + + ngrams = np.arange(1, max_gram + 1, dtype=np.int64) + pvals = 1. / np.arange(1, max_gram + 1) + pvals /= pvals.sum(keepdims=True) + + if reverse: + seg = np.flip(seg, 0) + + cur_len = 0 + while cur_len < seg_len: + if goal_num_predict is not None and num_predict >= goal_num_predict: break + + n = np.random.choice(ngrams, p=pvals) + if goal_num_predict is not None: + n = min(n, goal_num_predict - num_predict) + ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta + l_ctx = np.random.choice(ctx_size) + r_ctx = ctx_size - l_ctx + + # Find the start position of a complete token + beg = cur_len + l_ctx + while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())): + beg += 1 + if beg >= seg_len: + break + + # Find the end position of the n-gram (start pos of the n+1-th gram) + end = beg + 1 + cnt_ngram = 1 + while end < seg_len: + cnt_ngram += 1 + if cnt_ngram > n: + break + end += 1 + if end >= seg_len: + break + + # Update + mask[beg:end] = True + num_predict += end - beg + + cur_len = end + r_ctx + + while goal_num_predict is not None and num_predict < goal_num_predict: + i = np.random.randint(seg_len) + if not mask[i]: + mask[i] = True + num_predict += 1 + + if reverse: + mask = np.flip(mask, 0) + + return mask + + +def _sample_mask_ngram(sp, seg, reverse=False, max_gram=5, + goal_num_predict=None): + """Sample `goal_num_predict` tokens for partial prediction. + About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.""" + + seg_len = len(seg) + mask = np.array([False] * seg_len, dtype=np.bool) + + num_predict = 0 + + ngrams = np.arange(1, max_gram + 1, dtype=np.int64) + pvals = 1. / np.arange(1, max_gram + 1) + pvals /= pvals.sum(keepdims=True) + + if reverse: + seg = np.flip(seg, 0) + + cur_len = 0 + while cur_len < seg_len: + if goal_num_predict is not None and num_predict >= goal_num_predict: break + + n = np.random.choice(ngrams, p=pvals) + if goal_num_predict is not None: + n = min(n, goal_num_predict - num_predict) + ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta + l_ctx = np.random.choice(ctx_size) + r_ctx = ctx_size - l_ctx + + # Find the start position of a complete token + beg = cur_len + l_ctx + while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())): + beg += 1 + if beg >= seg_len: + break + + # Find the end position of the n-gram (start pos of the n+1-th gram) + end = beg + cnt_ngram = 0 + while end < seg_len: + if _is_start_piece(sp.IdToPiece(seg[end].item())): + cnt_ngram += 1 + if cnt_ngram > n: + break + + # select current piece + mask[end] = True + + # update the end pointer and increment num_predict + end += 1 + num_predict += 1 + + if goal_num_predict is not None and num_predict >= goal_num_predict: + break + + cur_len = end + r_ctx + + while goal_num_predict is not None and num_predict < goal_num_predict: + i = np.random.randint(seg_len) + if not mask[i]: + mask[i] = True + num_predict += 1 + + if reverse: + mask = np.flip(mask, 0) + + return mask + + +def create_tfrecords(save_dir, basename, data, bsz_per_host, seq_len, + bi_data, sp): + data, sent_ids = data[0], data[1] + + num_core = FLAGS.num_core_per_host + bsz_per_core = bsz_per_host // num_core + + if bi_data: + assert bsz_per_host % (2 * FLAGS.num_core_per_host) == 0 + fwd_data, fwd_sent_ids = batchify(data, bsz_per_host // 2, sent_ids) + + fwd_data = fwd_data.reshape(num_core, 1, bsz_per_core // 2, -1) + fwd_sent_ids = fwd_sent_ids.reshape(num_core, 1, bsz_per_core // 2, -1) + + bwd_data = fwd_data[:, :, :, ::-1] + bwd_sent_ids = fwd_sent_ids[:, :, :, ::-1] + + data = np.concatenate( + [fwd_data, bwd_data], 1).reshape(bsz_per_host, -1) + sent_ids = np.concatenate( + [fwd_sent_ids, bwd_sent_ids], 1).reshape(bsz_per_host, -1) + else: + data, sent_ids = batchify(data, bsz_per_host, sent_ids) + + tf.logging.info("Raw data shape %s.", data.shape) + + file_name = format_filename( + prefix=basename, + bsz_per_host=bsz_per_host, + seq_len=seq_len, + bi_data=bi_data, + suffix="tfrecords", + mask_alpha=FLAGS.mask_alpha, + mask_beta=FLAGS.mask_beta, + reuse_len=FLAGS.reuse_len, + uncased=FLAGS.uncased, + fixed_num_predict=FLAGS.num_predict + ) + save_path = os.path.join(save_dir, file_name) + record_writer = tf.python_io.TFRecordWriter(save_path) + tf.logging.info("Start writing %s.", save_path) + + num_batch = 0 + reuse_len = FLAGS.reuse_len + + # [sep] x 2 + [cls] + assert reuse_len < seq_len - 3 + + data_len = data.shape[1] + sep_array = np.array([SEP_ID], dtype=np.int64) + cls_array = np.array([CLS_ID], dtype=np.int64) + + i = 0 + while i + seq_len <= data_len: + if num_batch % 500 == 0: + tf.logging.info("Processing batch %d", num_batch) + + all_ok = True + features = [] + for idx in range(bsz_per_host): + inp = data[idx, i: i + reuse_len] + tgt = data[idx, i + 1: i + reuse_len + 1] + + results = _split_a_and_b( + data[idx], + sent_ids[idx], + begin_idx=i + reuse_len, + tot_len=seq_len - reuse_len - 3, + extend_target=True) + if results is None: + tf.logging.info("Break out with seq idx %d", i) + all_ok = False + break + + # unpack the results + (a_data, b_data, label, _, a_target, b_target) = tuple(results) + + # sample ngram spans to predict + reverse = bi_data and (idx // (bsz_per_core // 2)) % 2 == 1 + if FLAGS.num_predict is None: + num_predict_0 = num_predict_1 = None + else: + num_predict_1 = FLAGS.num_predict // 2 + num_predict_0 = FLAGS.num_predict - num_predict_1 + mask_0 = _sample_mask(sp, inp, reverse=reverse, + goal_num_predict=num_predict_0) + mask_1 = _sample_mask(sp, np.concatenate([a_data, sep_array, b_data, + sep_array, cls_array]), + reverse=reverse, goal_num_predict=num_predict_1) + + # concatenate data + cat_data = np.concatenate([inp, a_data, sep_array, b_data, + sep_array, cls_array]) + seg_id = ([0] * (reuse_len + a_data.shape[0]) + [0] + + [1] * b_data.shape[0] + [1] + [2]) + assert cat_data.shape[0] == seq_len + assert mask_0.shape[0] == seq_len // 2 + assert mask_1.shape[0] == seq_len // 2 + + # the last two CLS's are not used, just for padding purposes + tgt = np.concatenate([tgt, a_target, b_target, cls_array, cls_array]) + assert tgt.shape[0] == seq_len + + is_masked = np.concatenate([mask_0, mask_1], 0) + if FLAGS.num_predict is not None: + assert np.sum(is_masked) == FLAGS.num_predict + + feature = { + "input": _int64_feature(cat_data), + "is_masked": _int64_feature(is_masked), + "target": _int64_feature(tgt), + "seg_id": _int64_feature(seg_id), + "label": _int64_feature([label]), + } + features.append(feature) + + if all_ok: + assert len(features) == bsz_per_host + for feature in features: + example = tf.train.Example(features=tf.train.Features(feature=feature)) + record_writer.write(example.SerializeToString()) + num_batch += 1 + else: + break + + i += reuse_len + + record_writer.close() + tf.logging.info("Done writing %s. Num of batches: %d", save_path, num_batch) + + return save_path, num_batch + + +################ +# get_input_fn # +################ +def _convert_example(example, use_bfloat16): + """Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.""" + for key in list(example.keys()): + val = example[key] + if tf.keras.backend.is_sparse(val): + val = tf.sparse.to_dense(val) + if val.dtype == tf.int64: + val = tf.cast(val, tf.int32) + if use_bfloat16 and val.dtype == tf.float32: + val = tf.cast(val, tf.bfloat16) + + example[key] = val + + +def parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts, + host_id, num_core_per_host, bsz_per_core): + # list of file pathes + num_files = len(file_names) + num_files_per_host = num_files // num_hosts + my_start_file_id = host_id * num_files_per_host + my_end_file_id = (host_id + 1) * num_files_per_host + if host_id == num_hosts - 1: + my_end_file_id = num_files + file_paths = file_names[my_start_file_id: my_end_file_id] + tf.logging.info("Host %d handles %d files", host_id, len(file_paths)) + + assert split == "train" + dataset = tf.data.Dataset.from_tensor_slices(file_paths) + + # file-level shuffle + if len(file_paths) > 1: + dataset = dataset.shuffle(len(file_paths)) + + # Note: we cannot perform sample-level shuffle here because this will violate + # the consecutive requirement of data stream. + dataset = tf.data.TFRecordDataset(dataset) + + # Note: since we are doing online preprocessing, the parsed result of + # the same input at each time will be different. Thus, cache processed data + # is not helpful. It will use a lot of memory and lead to contrainer OOM. + # So, change to cache non-parsed raw data instead. + dataset = dataset.cache().map(parser).repeat() + dataset = dataset.batch(bsz_per_core, drop_remainder=True) + dataset = dataset.prefetch(num_core_per_host * bsz_per_core) + + return dataset + + +def _local_perm(inputs, targets, is_masked, perm_size, seq_len): + """ + Sample a permutation of the factorization order, and create an + attention mask accordingly. + + Args: + inputs: int64 Tensor in shape [seq_len], input ids. + targets: int64 Tensor in shape [seq_len], target ids. + is_masked: bool Tensor in shape [seq_len]. True means being selected + for partial prediction. + perm_size: the length of longest permutation. Could be set to be reuse_len. + Should not be larger than reuse_len or there will be data leaks. + seq_len: int, sequence length. + """ + + # Generate permutation indices + index = tf.range(seq_len, dtype=tf.int64) + index = tf.transpose(tf.reshape(index, [-1, perm_size])) + index = tf.random_shuffle(index) + index = tf.reshape(tf.transpose(index), [-1]) + + # `perm_mask` and `target_mask` + # non-functional tokens + non_func_tokens = tf.logical_not(tf.logical_or( + tf.equal(inputs, SEP_ID), + tf.equal(inputs, CLS_ID))) + + non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens) + masked_or_func_tokens = tf.logical_not(non_mask_tokens) + + # Set the permutation indices of non-masked (& non-funcional) tokens to the + # smallest index (-1): + # (1) they can be seen by all other positions + # (2) they cannot see masked positions, so there won"t be information leak + smallest_index = -tf.ones([seq_len], dtype=tf.int64) + rev_index = tf.where(non_mask_tokens, smallest_index, index) + + # Create `target_mask`: non-funcional and maksed tokens + # 1: use mask as input and have loss + # 0: use token (or [SEP], [CLS]) as input and do not have loss + target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens) + target_mask = tf.cast(target_tokens, tf.float32) + + # Create `perm_mask` + # `target_tokens` cannot see themselves + self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1) + + # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens) + # 0: can attend if i > j or j is non-masked + perm_mask = tf.logical_and( + self_rev_index[:, None] <= rev_index[None, :], + masked_or_func_tokens) + perm_mask = tf.cast(perm_mask, tf.float32) + + # new target: [next token] for LM and [curr token] (self) for PLM + new_targets = tf.concat([inputs[0: 1], targets[: -1]], + axis=0) + + # construct inputs_k + inputs_k = inputs + + # construct inputs_q + inputs_q = target_mask + + return perm_mask, new_targets, target_mask, inputs_k, inputs_q + + +def get_dataset(params, num_hosts, num_core_per_host, split, file_names, + num_batch, seq_len, reuse_len, perm_size, mask_alpha, + mask_beta, use_bfloat16=False, num_predict=None): + + bsz_per_core = params["batch_size"] + if num_hosts > 1: + host_id = params["context"].current_host + else: + host_id = 0 + + #### Function used to parse tfrecord + def parser(record): + """function used to parse tfrecord.""" + + record_spec = { + "input": tf.FixedLenFeature([seq_len], tf.int64), + "target": tf.FixedLenFeature([seq_len], tf.int64), + "seg_id": tf.FixedLenFeature([seq_len], tf.int64), + "label": tf.FixedLenFeature([1], tf.int64), + "is_masked": tf.FixedLenFeature([seq_len], tf.int64), + } + + # retrieve serialized example + example = tf.parse_single_example( + serialized=record, + features=record_spec) + + inputs = example.pop("input") + target = example.pop("target") + is_masked = tf.cast(example.pop("is_masked"), tf.bool) + + non_reuse_len = seq_len - reuse_len + assert perm_size <= reuse_len and perm_size <= non_reuse_len + + perm_mask_0, target_0, target_mask_0, input_k_0, input_q_0 = _local_perm( + inputs[:reuse_len], + target[:reuse_len], + is_masked[:reuse_len], + perm_size, + reuse_len) + + perm_mask_1, target_1, target_mask_1, input_k_1, input_q_1 = _local_perm( + inputs[reuse_len:], + target[reuse_len:], + is_masked[reuse_len:], + perm_size, + non_reuse_len) + + perm_mask_0 = tf.concat([perm_mask_0, tf.ones([reuse_len, non_reuse_len])], + axis=1) + perm_mask_1 = tf.concat([tf.zeros([non_reuse_len, reuse_len]), perm_mask_1], + axis=1) + perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0) + target = tf.concat([target_0, target_1], axis=0) + target_mask = tf.concat([target_mask_0, target_mask_1], axis=0) + input_k = tf.concat([input_k_0, input_k_1], axis=0) + input_q = tf.concat([input_q_0, input_q_1], axis=0) + + if num_predict is not None: + indices = tf.range(seq_len, dtype=tf.int64) + bool_target_mask = tf.cast(target_mask, tf.bool) + indices = tf.boolean_mask(indices, bool_target_mask) + + ##### extra padding due to CLS/SEP introduced after prepro + actual_num_predict = tf.shape(indices)[0] + pad_len = num_predict - actual_num_predict + + ##### target_mapping + target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32) + paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype) + target_mapping = tf.concat([target_mapping, paddings], axis=0) + example["target_mapping"] = tf.reshape(target_mapping, + [num_predict, seq_len]) + + ##### target + target = tf.boolean_mask(target, bool_target_mask) + paddings = tf.zeros([pad_len], dtype=target.dtype) + target = tf.concat([target, paddings], axis=0) + example["target"] = tf.reshape(target, [num_predict]) + + ##### target mask + target_mask = tf.concat( + [tf.ones([actual_num_predict], dtype=tf.float32), + tf.zeros([pad_len], dtype=tf.float32)], + axis=0) + example["target_mask"] = tf.reshape(target_mask, [num_predict]) + else: + example["target"] = tf.reshape(target, [seq_len]) + example["target_mask"] = tf.reshape(target_mask, [seq_len]) + + # reshape back to fixed shape + example["perm_mask"] = tf.reshape(perm_mask, [seq_len, seq_len]) + example["input_k"] = tf.reshape(input_k, [seq_len]) + example["input_q"] = tf.reshape(input_q, [seq_len]) + + _convert_example(example, use_bfloat16) + + for k, v in example.items(): + tf.logging.info("%s: %s", k, v) + + return example + + # Get dataset + dataset = parse_files_to_dataset( + parser=parser, + file_names=file_names, + split=split, + num_batch=num_batch, + num_hosts=num_hosts, + host_id=host_id, + num_core_per_host=num_core_per_host, + bsz_per_core=bsz_per_core) + + return dataset + + +def get_input_fn( + tfrecord_dir, + split, + bsz_per_host, + seq_len, + reuse_len, + bi_data, + num_hosts=1, + num_core_per_host=1, + perm_size=None, + mask_alpha=None, + mask_beta=None, + uncased=False, + num_passes=None, + use_bfloat16=False, + num_predict=None): + + # Merge all record infos into a single one + record_glob_base = format_filename( + prefix="record_info-{}-*".format(split), + bsz_per_host=bsz_per_host, + seq_len=seq_len, + bi_data=bi_data, + suffix="json", + mask_alpha=mask_alpha, + mask_beta=mask_beta, + reuse_len=reuse_len, + uncased=uncased, + fixed_num_predict=num_predict) + + record_info = {"num_batch": 0, "filenames": []} + + tfrecord_dirs = tfrecord_dir.split(",") + tf.logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs) + + for idx, record_dir in enumerate(tfrecord_dirs): + record_glob = os.path.join(record_dir, record_glob_base) + tf.logging.info("[%d] Record glob: %s", idx, record_glob) + + record_paths = sorted(tf.gfile.Glob(record_glob)) + tf.logging.info("[%d] Num of record info path: %d", + idx, len(record_paths)) + + cur_record_info = {"num_batch": 0, "filenames": []} + + for record_info_path in record_paths: + if num_passes is not None: + record_info_name = os.path.basename(record_info_path) + fields = record_info_name.split(".")[0].split("-") + pass_id = int(fields[-1]) + if len(fields) == 5 and pass_id >= num_passes: + tf.logging.info("Skip pass %d: %s", pass_id, record_info_name) + continue + + with tf.gfile.Open(record_info_path, "r") as fp: + info = json.load(fp) + if num_passes is not None: + eff_num_passes = min(num_passes, len(info["filenames"])) + ratio = eff_num_passes / len(info["filenames"]) + cur_record_info["num_batch"] += int(info["num_batch"] * ratio) + cur_record_info["filenames"] += info["filenames"][:eff_num_passes] + else: + cur_record_info["num_batch"] += info["num_batch"] + cur_record_info["filenames"] += info["filenames"] + + # overwrite directory for `cur_record_info` + new_filenames = [] + for filename in cur_record_info["filenames"]: + basename = os.path.basename(filename) + new_filename = os.path.join(record_dir, basename) + new_filenames.append(new_filename) + cur_record_info["filenames"] = new_filenames + + tf.logging.info("[Dir %d] Number of chosen batches: %s", + idx, cur_record_info["num_batch"]) + tf.logging.info("[Dir %d] Number of chosen files: %s", + idx, len(cur_record_info["filenames"])) + tf.logging.info(cur_record_info["filenames"]) + + # add `cur_record_info` to global `record_info` + record_info["num_batch"] += cur_record_info["num_batch"] + record_info["filenames"] += cur_record_info["filenames"] + + tf.logging.info("Total number of batches: %d", + record_info["num_batch"]) + tf.logging.info("Total number of files: %d", + len(record_info["filenames"])) + tf.logging.info(record_info["filenames"]) + + def input_fn(params): + """docs.""" + assert params["batch_size"] * num_core_per_host == bsz_per_host + + dataset = get_dataset( + params=params, + num_hosts=num_hosts, + num_core_per_host=num_core_per_host, + split=split, + file_names=record_info["filenames"], + num_batch=record_info["num_batch"], + seq_len=seq_len, + reuse_len=reuse_len, + perm_size=perm_size, + mask_alpha=mask_alpha, + mask_beta=mask_beta, + use_bfloat16=use_bfloat16, + num_predict=num_predict) + + return dataset + + return input_fn, record_info + + +if __name__ == "__main__": + FLAGS = flags.FLAGS + flags.DEFINE_bool("use_tpu", True, help="whether to use TPUs") + flags.DEFINE_integer("bsz_per_host", 32, help="batch size per host.") + flags.DEFINE_integer("num_core_per_host", 8, help="num TPU cores per host.") + + flags.DEFINE_integer("seq_len", 512, + help="Sequence length.") + flags.DEFINE_integer("reuse_len", 256, + help="Number of token that can be reused as memory. " + "Could be half of `seq_len`.") + flags.DEFINE_bool("uncased", False, help="Use uncased inputs or not.") + flags.DEFINE_bool("bi_data", True, + help="whether to create bidirectional data") + flags.DEFINE_integer("mask_alpha", default=6, + help="How many tokens to form a group.") + flags.DEFINE_integer("mask_beta", default=1, + help="How many tokens to mask within each group.") + flags.DEFINE_bool("use_eod", True, + help="whether to append EOD at the end of a doc.") + flags.DEFINE_bool("from_raw_text", True, + help="Whether the input is raw text or encoded ids.") + flags.DEFINE_integer("num_predict", default=85, + help="Num of tokens to predict.") + + flags.DEFINE_string("input_glob", "data/example/*.txt", + help="Input file glob.") + flags.DEFINE_string("sp_path", "", help="Path to the sentence piece model.") + flags.DEFINE_string("save_dir", "proc_data/example", + help="Directory for saving the processed data.") + flags.DEFINE_enum("split", "train", ["train", "dev", "test"], + help="Save the data as which split.") + + flags.DEFINE_integer("pass_id", 0, help="ID of the current pass." + "Different passes sample different negative segment.") + flags.DEFINE_integer("num_task", 1, help="Number of total tasks.") + flags.DEFINE_integer("task", 0, help="The Task ID. This value is used when " + "using multiple workers to identify each worker.") + + tf.logging.set_verbosity(tf.logging.INFO) + app.run(create_data) diff --git a/models/official/nlp/xlnet/preprocess_squad_data.py b/models/official/nlp/xlnet/preprocess_squad_data.py new file mode 100644 index 0000000000000000000000000000000000000000..59c8944697348f12b185399463978c170b4ee46b --- /dev/null +++ b/models/official/nlp/xlnet/preprocess_squad_data.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Script to pre-process SQUAD data into tfrecords.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import random + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +import sentencepiece as spm +from official.nlp.xlnet import squad_utils + +flags.DEFINE_integer( + "num_proc", default=1, help="Number of preprocessing processes.") +flags.DEFINE_integer("proc_id", default=0, help="Process id for preprocessing.") + +# I/O paths +flags.DEFINE_string("output_dir", default="", help="Output dir for TF records.") +flags.DEFINE_string( + "spiece_model_file", default="", help="Sentence Piece model path.") +flags.DEFINE_string("train_file", default="", help="Path of train file.") +flags.DEFINE_string("predict_file", default="", help="Path of prediction file.") + +# Data preprocessing config +flags.DEFINE_integer("max_seq_length", default=512, help="Max sequence length") +flags.DEFINE_integer("max_query_length", default=64, help="Max query length") +flags.DEFINE_integer("doc_stride", default=128, help="Doc stride") +flags.DEFINE_bool("uncased", default=False, help="Use uncased data.") +flags.DEFINE_bool( + "create_train_data", default=True, help="Whether to create training data.") +flags.DEFINE_bool( + "create_eval_data", default=False, help="Whether to create eval data.") + +FLAGS = flags.FLAGS + + +def preprocess(): + """Preprocesses SQUAD data.""" + sp_model = spm.SentencePieceProcessor() + sp_model.Load(FLAGS.spiece_model_file) + spm_basename = os.path.basename(FLAGS.spiece_model_file) + if FLAGS.create_train_data: + train_rec_file = os.path.join( + FLAGS.output_dir, + "{}.{}.slen-{}.qlen-{}.train.tf_record".format(spm_basename, + FLAGS.proc_id, + FLAGS.max_seq_length, + FLAGS.max_query_length)) + + logging.info("Read examples from %s", FLAGS.train_file) + train_examples = squad_utils.read_squad_examples( + FLAGS.train_file, is_training=True) + train_examples = train_examples[FLAGS.proc_id::FLAGS.num_proc] + + # Pre-shuffle the input to avoid having to make a very large shuffle + # buffer in the `input_fn`. + random.shuffle(train_examples) + write_to_logging = "Write to " + train_rec_file + logging.info(write_to_logging) + train_writer = squad_utils.FeatureWriter( + filename=train_rec_file, is_training=True) + squad_utils.convert_examples_to_features( + examples=train_examples, + sp_model=sp_model, + max_seq_length=FLAGS.max_seq_length, + doc_stride=FLAGS.doc_stride, + max_query_length=FLAGS.max_query_length, + is_training=True, + output_fn=train_writer.process_feature, + uncased=FLAGS.uncased) + train_writer.close() + if FLAGS.create_eval_data: + eval_examples = squad_utils.read_squad_examples( + FLAGS.predict_file, is_training=False) + squad_utils.create_eval_data(spm_basename, sp_model, eval_examples, + FLAGS.max_seq_length, FLAGS.max_query_length, + FLAGS.doc_stride, FLAGS.uncased, + FLAGS.output_dir) + + +def main(_): + logging.set_verbosity(logging.INFO) + + if not tf.io.gfile.exists(FLAGS.output_dir): + tf.io.gfile.mkdir(FLAGS.output_dir) + + preprocess() + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/xlnet/preprocess_utils.py b/models/official/nlp/xlnet/preprocess_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e8ae8398111ae73185a4594f1ab9d7dac7dd38 --- /dev/null +++ b/models/official/nlp/xlnet/preprocess_utils.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for pre-processing.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unicodedata + +import six + + +SPIECE_UNDERLINE = '▁' + + +def printable_text(text): + """Returns text encoded in a way suitable for print or `tf.logging`.""" + + # These functions want `str` for both Python2 and Python3, but in one case + # it's a Unicode string and in the other it's a byte string. + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode('utf-8', 'ignore') + else: + raise ValueError('Unsupported string type: %s' % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return text.encode('utf-8') + else: + raise ValueError('Unsupported string type: %s' % (type(text))) + else: + raise ValueError('Not running on Python2 or Python 3?') + + +def print_(*args): + new_args = [] + for arg in args: + if isinstance(arg, list): + s = [printable_text(i) for i in arg] + s = ' '.join(s) + new_args.append(s) + else: + new_args.append(printable_text(arg)) + print(*new_args) + + +def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False): + """Preprocesses texts.""" + if remove_space: + outputs = ' '.join(inputs.strip().split()) + else: + outputs = inputs + + outputs = outputs.replace('``', '"').replace("''", '"') + + if six.PY2 and isinstance(outputs, str): + outputs = outputs.decode('utf-8') + + if not keep_accents: + outputs = unicodedata.normalize('NFKD', outputs) + outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) + if lower: + outputs = outputs.lower() + + return outputs + + +def encode_pieces(sp_model, text, return_unicode=True, sample=False): + """Encodes pieces.""" + # return_unicode is used only for py2 + + if six.PY2 and isinstance(text, unicode): + text = text.encode('utf-8') + + if not sample: + pieces = sp_model.EncodeAsPieces(text) + else: + pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) + new_pieces = [] + for piece in pieces: + if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit(): + cur_pieces = sp_model.EncodeAsPieces( + piece[:-1].replace(SPIECE_UNDERLINE, '')) + if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: + if len(cur_pieces[0]) == 1: + cur_pieces = cur_pieces[1:] + else: + cur_pieces[0] = cur_pieces[0][1:] + cur_pieces.append(piece[-1]) + new_pieces.extend(cur_pieces) + else: + new_pieces.append(piece) + + # note(zhiliny): convert back to unicode for py2 + if six.PY2 and return_unicode: + ret_pieces = [] + for piece in new_pieces: + if isinstance(piece, str): + piece = piece.decode('utf-8') + ret_pieces.append(piece) + new_pieces = ret_pieces + + return new_pieces + + +def encode_ids(sp_model, text, sample=False): + pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample) + ids = [sp_model.PieceToId(piece) for piece in pieces] + return ids diff --git a/models/official/nlp/xlnet/run_classifier.py b/models/official/nlp/xlnet/run_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..79a27f244d87617ea3cb34913154e7725cc94b1f --- /dev/null +++ b/models/official/nlp/xlnet/run_classifier.py @@ -0,0 +1,196 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""XLNet classification finetuning runner in tf2.0.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import functools +from absl import app +from absl import flags +from absl import logging + +import numpy as np +import tensorflow as tf +# pylint: disable=unused-import +from official.nlp.xlnet import common_flags +from official.nlp.xlnet import data_utils +from official.nlp.xlnet import optimization +from official.nlp.xlnet import training_utils +from official.nlp.xlnet import xlnet_config +from official.nlp.xlnet import xlnet_modeling as modeling +from official.utils.misc import tpu_lib + +flags.DEFINE_integer("n_class", default=2, help="Number of classes.") +flags.DEFINE_string( + "summary_type", + default="last", + help="Method used to summarize a sequence into a vector.") + +FLAGS = flags.FLAGS + + +def get_classificationxlnet_model(model_config, + run_config, + n_class, + summary_type="last"): + model = modeling.ClassificationXLNetModel( + model_config, run_config, n_class, summary_type, name="model") + return model + + +def run_evaluation(strategy, + test_input_fn, + eval_steps, + model, + step, + eval_summary_writer=None): + """Run evaluation for classification task. + + Args: + strategy: distribution strategy. + test_input_fn: input function for evaluation data. + eval_steps: total number of evaluation steps. + model: keras model object. + step: current train step. + eval_summary_writer: summary writer used to record evaluation metrics. As + there are fake data samples in validation set, we use mask to get rid of + them when calculating the accuracy. For the reason that there will be + dynamic-shape tensor, we first collect logits, labels and masks from TPU + and calculate the accuracy via numpy locally. + + Returns: + A float metric, accuracy. + """ + + def _test_step_fn(inputs): + """Replicated validation step.""" + + inputs["mems"] = None + _, logits = model(inputs, training=False) + return logits, inputs["label_ids"], inputs["is_real_example"] + + @tf.function + def _run_evaluation(test_iterator): + """Runs validation steps.""" + logits, labels, masks = strategy.run( + _test_step_fn, args=(next(test_iterator),)) + return logits, labels, masks + + test_iterator = data_utils.get_input_iterator(test_input_fn, strategy) + correct = 0 + total = 0 + for _ in range(eval_steps): + logits, labels, masks = _run_evaluation(test_iterator) + logits = strategy.experimental_local_results(logits) + labels = strategy.experimental_local_results(labels) + masks = strategy.experimental_local_results(masks) + merged_logits = [] + merged_labels = [] + merged_masks = [] + + for i in range(strategy.num_replicas_in_sync): + merged_logits.append(logits[i].numpy()) + merged_labels.append(labels[i].numpy()) + merged_masks.append(masks[i].numpy()) + merged_logits = np.vstack(np.array(merged_logits)) + merged_labels = np.hstack(np.array(merged_labels)) + merged_masks = np.hstack(np.array(merged_masks)) + real_index = np.where(np.equal(merged_masks, 1)) + correct += np.sum( + np.equal( + np.argmax(merged_logits[real_index], axis=-1), + merged_labels[real_index])) + total += np.shape(real_index)[-1] + accuracy = float(correct) / float(total) + logging.info("Train step: %d / acc = %d/%d = %f", step, correct, total, + accuracy) + if eval_summary_writer: + with eval_summary_writer.as_default(): + tf.summary.scalar("eval_acc", float(correct) / float(total), step=step) + eval_summary_writer.flush() + return accuracy + + +def get_metric_fn(): + train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy( + "acc", dtype=tf.float32) + return train_acc_metric + + +def main(unused_argv): + del unused_argv + if FLAGS.strategy_type == "mirror": + strategy = tf.distribute.MirroredStrategy() + elif FLAGS.strategy_type == "tpu": + cluster_resolver = tpu_lib.tpu_initialize(FLAGS.tpu) + strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver) + else: + raise ValueError("The distribution strategy type is not supported: %s" % + FLAGS.strategy_type) + if strategy: + logging.info("***** Number of cores used : %d", + strategy.num_replicas_in_sync) + train_input_fn = functools.partial(data_utils.get_classification_input_data, + FLAGS.train_batch_size, FLAGS.seq_len, + strategy, True, FLAGS.train_tfrecord_path) + test_input_fn = functools.partial(data_utils.get_classification_input_data, + FLAGS.test_batch_size, FLAGS.seq_len, + strategy, False, FLAGS.test_tfrecord_path) + + total_training_steps = FLAGS.train_steps + steps_per_loop = FLAGS.iterations + eval_steps = int(FLAGS.test_data_size / FLAGS.test_batch_size) + eval_fn = functools.partial(run_evaluation, strategy, test_input_fn, + eval_steps) + optimizer, learning_rate_fn = optimization.create_optimizer( + FLAGS.learning_rate, + total_training_steps, + FLAGS.warmup_steps, + adam_epsilon=FLAGS.adam_epsilon) + model_config = xlnet_config.XLNetConfig(FLAGS) + run_config = xlnet_config.create_run_config(True, False, FLAGS) + model_fn = functools.partial(get_classificationxlnet_model, model_config, + run_config, FLAGS.n_class, FLAGS.summary_type) + input_meta_data = {} + input_meta_data["d_model"] = FLAGS.d_model + input_meta_data["mem_len"] = FLAGS.mem_len + input_meta_data["batch_size_per_core"] = int(FLAGS.train_batch_size / + strategy.num_replicas_in_sync) + input_meta_data["n_layer"] = FLAGS.n_layer + input_meta_data["lr_layer_decay_rate"] = FLAGS.lr_layer_decay_rate + input_meta_data["n_class"] = FLAGS.n_class + + training_utils.train( + strategy=strategy, + model_fn=model_fn, + input_meta_data=input_meta_data, + eval_fn=eval_fn, + metric_fn=get_metric_fn, + train_input_fn=train_input_fn, + init_checkpoint=FLAGS.init_checkpoint, + init_from_transformerxl=FLAGS.init_from_transformerxl, + total_training_steps=total_training_steps, + steps_per_loop=steps_per_loop, + optimizer=optimizer, + learning_rate_fn=learning_rate_fn, + model_dir=FLAGS.model_dir, + save_steps=FLAGS.save_steps) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/xlnet/run_pretrain.py b/models/official/nlp/xlnet/run_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..e136f4d12ab01d0b48c0d0765b8e3e8bbf8eedd7 --- /dev/null +++ b/models/official/nlp/xlnet/run_pretrain.py @@ -0,0 +1,156 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""XLNet classification finetuning runner in tf2.0.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import functools +import os + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf +# pylint: disable=unused-import +from official.nlp.xlnet import common_flags +from official.nlp.xlnet import data_utils +from official.nlp.xlnet import optimization +from official.nlp.xlnet import training_utils +from official.nlp.xlnet import xlnet_config +from official.nlp.xlnet import xlnet_modeling as modeling +from official.utils.misc import tpu_lib + +flags.DEFINE_integer( + "num_predict", + default=None, + help="Number of tokens to predict in partial prediction.") + +# FLAGS for pretrain input preprocessing +flags.DEFINE_integer("perm_size", 0, help="Window size of permutation.") +flags.DEFINE_float("leak_ratio", default=0.1, + help="Percent of masked tokens that are leaked.") + +flags.DEFINE_enum("sample_strategy", default="token_span", + enum_values=["single_token", "whole_word", "token_span", + "word_span"], + help="Stragey used to sample prediction targets.") +flags.DEFINE_integer("max_num_tokens", default=5, + help="Maximum number of tokens to sample in a span." + "Effective when token_span strategy is used.") +flags.DEFINE_integer("min_num_tokens", default=1, + help="Minimum number of tokens to sample in a span." + "Effective when token_span strategy is used.") + +flags.DEFINE_integer("max_num_words", default=5, + help="Maximum number of whole words to sample in a span." + "Effective when word_span strategy is used.") +flags.DEFINE_integer("min_num_words", default=1, + help="Minimum number of whole words to sample in a span." + "Effective when word_span strategy is used.") +FLAGS = flags.FLAGS + + +def get_pretrainxlnet_model(model_config, run_config): + return modeling.PretrainingXLNetModel( + use_proj=True, + xlnet_config=model_config, + run_config=run_config, + name="model") + + +def main(unused_argv): + del unused_argv + num_hosts = 1 + if FLAGS.strategy_type == "mirror": + strategy = tf.distribute.MirroredStrategy() + elif FLAGS.strategy_type == "tpu": + cluster_resolver = tpu_lib.tpu_initialize(FLAGS.tpu) + strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver) + topology = FLAGS.tpu_topology.split("x") + total_num_core = 2 * int(topology[0]) * int(topology[1]) + num_hosts = total_num_core // FLAGS.num_core_per_host + else: + raise ValueError("The distribution strategy type is not supported: %s" % + FLAGS.strategy_type) + if strategy: + logging.info("***** Number of cores used : %d", + strategy.num_replicas_in_sync) + logging.info("***** Number of hosts used : %d", num_hosts) + online_masking_config = data_utils.OnlineMaskingConfig( + sample_strategy=FLAGS.sample_strategy, + max_num_tokens=FLAGS.max_num_tokens, + min_num_tokens=FLAGS.min_num_tokens, + max_num_words=FLAGS.max_num_words, + min_num_words=FLAGS.min_num_words) + + train_input_fn = functools.partial( + data_utils.get_pretrain_input_data, FLAGS.train_batch_size, FLAGS.seq_len, + strategy, FLAGS.train_tfrecord_path, FLAGS.reuse_len, FLAGS.perm_size, + FLAGS.leak_ratio, FLAGS.num_predict, FLAGS.uncased, online_masking_config, + num_hosts) + + total_training_steps = FLAGS.train_steps + + steps_per_loop = FLAGS.iterations + + optimizer, learning_rate_fn = optimization.create_optimizer( + init_lr=FLAGS.learning_rate, + num_train_steps=total_training_steps, + num_warmup_steps=FLAGS.warmup_steps, + min_lr_ratio=FLAGS.min_lr_ratio, + adam_epsilon=FLAGS.adam_epsilon, + weight_decay_rate=FLAGS.weight_decay_rate) + + model_config = xlnet_config.XLNetConfig(FLAGS) + run_config = xlnet_config.create_run_config(True, False, FLAGS) + input_meta_data = {} + input_meta_data["d_model"] = FLAGS.d_model + input_meta_data["mem_len"] = FLAGS.mem_len + input_meta_data["batch_size_per_core"] = int(FLAGS.train_batch_size / + strategy.num_replicas_in_sync) + input_meta_data["n_layer"] = FLAGS.n_layer + input_meta_data["lr_layer_decay_rate"] = FLAGS.lr_layer_decay_rate + model_fn = functools.partial(get_pretrainxlnet_model, model_config, + run_config) + + model = training_utils.train( + strategy=strategy, + model_fn=model_fn, + input_meta_data=input_meta_data, + eval_fn=None, + metric_fn=None, + train_input_fn=train_input_fn, + init_checkpoint=FLAGS.init_checkpoint, + init_from_transformerxl=FLAGS.init_from_transformerxl, + total_training_steps=total_training_steps, + steps_per_loop=steps_per_loop, + optimizer=optimizer, + learning_rate_fn=learning_rate_fn, + model_dir=FLAGS.model_dir, + save_steps=FLAGS.save_steps) + + # Export transformer-xl model checkpoint to be used in finetuning. + checkpoint = tf.train.Checkpoint(transformer_xl=model.transformerxl_model) + saved_path = checkpoint.save( + os.path.join(FLAGS.model_dir, "pretrained/transformer_xl.ckpt")) + logging.info("Exporting the transformer-xl model as a new TF checkpoint: %s", + saved_path) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/xlnet/run_squad.py b/models/official/nlp/xlnet/run_squad.py new file mode 100644 index 0000000000000000000000000000000000000000..013893f1a289bb446dd67f33d9178903f706b2c8 --- /dev/null +++ b/models/official/nlp/xlnet/run_squad.py @@ -0,0 +1,304 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""XLNet SQUAD finetuning runner in tf2.0.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import functools +import json +import os +import pickle + +from absl import app +from absl import flags +from absl import logging + +import tensorflow as tf +# pylint: disable=unused-import +import sentencepiece as spm +from official.nlp.xlnet import common_flags +from official.nlp.xlnet import data_utils +from official.nlp.xlnet import optimization +from official.nlp.xlnet import squad_utils +from official.nlp.xlnet import training_utils +from official.nlp.xlnet import xlnet_config +from official.nlp.xlnet import xlnet_modeling as modeling +from official.utils.misc import tpu_lib + +flags.DEFINE_string( + "test_feature_path", default=None, help="Path to feature of test set.") +flags.DEFINE_integer("query_len", default=64, help="Max query length.") +flags.DEFINE_integer("start_n_top", default=5, help="Beam size for span start.") +flags.DEFINE_integer("end_n_top", default=5, help="Beam size for span end.") +flags.DEFINE_string( + "predict_dir", default=None, help="Path to write predictions.") +flags.DEFINE_string( + "predict_file", default=None, help="Path to json file of test set.") +flags.DEFINE_integer( + "n_best_size", default=5, help="n best size for predictions.") +flags.DEFINE_integer("max_answer_length", default=64, help="Max answer length.") +# Data preprocessing config +flags.DEFINE_string( + "spiece_model_file", default=None, help="Sentence Piece model path.") +flags.DEFINE_integer("max_seq_length", default=512, help="Max sequence length.") +flags.DEFINE_integer("max_query_length", default=64, help="Max query length.") +flags.DEFINE_integer("doc_stride", default=128, help="Doc stride.") + +FLAGS = flags.FLAGS + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tok_start_to_orig_index, + tok_end_to_orig_index, + token_is_max_context, + input_ids, + input_mask, + p_mask, + segment_ids, + paragraph_len, + cls_index, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tok_start_to_orig_index = tok_start_to_orig_index + self.tok_end_to_orig_index = tok_end_to_orig_index + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.p_mask = p_mask + self.segment_ids = segment_ids + self.paragraph_len = paragraph_len + self.cls_index = cls_index + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +# pylint: disable=unused-argument +def run_evaluation(strategy, test_input_fn, eval_examples, eval_features, + original_data, eval_steps, input_meta_data, model, + current_step, eval_summary_writer): + """Run evaluation for SQUAD task. + + Args: + strategy: distribution strategy. + test_input_fn: input function for evaluation data. + eval_examples: tf.Examples of the evaluation set. + eval_features: Feature objects of the evaluation set. + original_data: The original json data for the evaluation set. + eval_steps: total number of evaluation steps. + input_meta_data: input meta data. + model: keras model object. + current_step: current training step. + eval_summary_writer: summary writer used to record evaluation metrics. + + Returns: + A float metric, F1 score. + """ + + def _test_step_fn(inputs): + """Replicated validation step.""" + + inputs["mems"] = None + res = model(inputs, training=False) + return res, inputs["unique_ids"] + + @tf.function + def _run_evaluation(test_iterator): + """Runs validation steps.""" + res, unique_ids = strategy.run( + _test_step_fn, args=(next(test_iterator),)) + return res, unique_ids + + test_iterator = data_utils.get_input_iterator(test_input_fn, strategy) + cur_results = [] + for _ in range(eval_steps): + results, unique_ids = _run_evaluation(test_iterator) + unique_ids = strategy.experimental_local_results(unique_ids) + + for result_key in results: + results[result_key] = ( + strategy.experimental_local_results(results[result_key])) + for core_i in range(strategy.num_replicas_in_sync): + bsz = int(input_meta_data["test_batch_size"] / + strategy.num_replicas_in_sync) + for j in range(bsz): + result = {} + for result_key in results: + result[result_key] = results[result_key][core_i].numpy()[j] + result["unique_ids"] = unique_ids[core_i].numpy()[j] + # We appended a fake example into dev set to make data size can be + # divided by test_batch_size. Ignores this fake example during + # evaluation. + if result["unique_ids"] == 1000012047: + continue + unique_id = int(result["unique_ids"]) + + start_top_log_probs = ([ + float(x) for x in result["start_top_log_probs"].flat + ]) + start_top_index = [int(x) for x in result["start_top_index"].flat] + end_top_log_probs = ([ + float(x) for x in result["end_top_log_probs"].flat + ]) + end_top_index = [int(x) for x in result["end_top_index"].flat] + + cls_logits = float(result["cls_logits"].flat[0]) + cur_results.append( + squad_utils.RawResult( + unique_id=unique_id, + start_top_log_probs=start_top_log_probs, + start_top_index=start_top_index, + end_top_log_probs=end_top_log_probs, + end_top_index=end_top_index, + cls_logits=cls_logits)) + if len(cur_results) % 1000 == 0: + logging.info("Processing example: %d", len(cur_results)) + + output_prediction_file = os.path.join(input_meta_data["predict_dir"], + "predictions.json") + output_nbest_file = os.path.join(input_meta_data["predict_dir"], + "nbest_predictions.json") + output_null_log_odds_file = os.path.join(input_meta_data["predict_dir"], + "null_odds.json") + + results = squad_utils.write_predictions( + eval_examples, eval_features, cur_results, input_meta_data["n_best_size"], + input_meta_data["max_answer_length"], output_prediction_file, + output_nbest_file, output_null_log_odds_file, original_data, + input_meta_data["start_n_top"], input_meta_data["end_n_top"]) + + # Log current results. + log_str = "Result | " + for key, val in results.items(): + log_str += "{} {} | ".format(key, val) + logging.info(log_str) + with eval_summary_writer.as_default(): + tf.summary.scalar("best_f1", results["best_f1"], step=current_step) + tf.summary.scalar("best_exact", results["best_exact"], step=current_step) + eval_summary_writer.flush() + return results["best_f1"] + + +def get_qaxlnet_model(model_config, run_config, start_n_top, end_n_top): + model = modeling.QAXLNetModel( + model_config, + run_config, + start_n_top=start_n_top, + end_n_top=end_n_top, + name="model") + return model + + +def main(unused_argv): + del unused_argv + if FLAGS.strategy_type == "mirror": + strategy = tf.distribute.MirroredStrategy() + elif FLAGS.strategy_type == "tpu": + cluster_resolver = tpu_lib.tpu_initialize(FLAGS.tpu) + strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver) + else: + raise ValueError("The distribution strategy type is not supported: %s" % + FLAGS.strategy_type) + if strategy: + logging.info("***** Number of cores used : %d", + strategy.num_replicas_in_sync) + train_input_fn = functools.partial(data_utils.get_squad_input_data, + FLAGS.train_batch_size, FLAGS.seq_len, + FLAGS.query_len, strategy, True, + FLAGS.train_tfrecord_path) + + test_input_fn = functools.partial(data_utils.get_squad_input_data, + FLAGS.test_batch_size, FLAGS.seq_len, + FLAGS.query_len, strategy, False, + FLAGS.test_tfrecord_path) + + total_training_steps = FLAGS.train_steps + steps_per_loop = FLAGS.iterations + eval_steps = int(FLAGS.test_data_size / FLAGS.test_batch_size) + + optimizer, learning_rate_fn = optimization.create_optimizer( + FLAGS.learning_rate, + total_training_steps, + FLAGS.warmup_steps, + adam_epsilon=FLAGS.adam_epsilon) + model_config = xlnet_config.XLNetConfig(FLAGS) + run_config = xlnet_config.create_run_config(True, False, FLAGS) + input_meta_data = {} + input_meta_data["start_n_top"] = FLAGS.start_n_top + input_meta_data["end_n_top"] = FLAGS.end_n_top + input_meta_data["lr_layer_decay_rate"] = FLAGS.lr_layer_decay_rate + input_meta_data["predict_dir"] = FLAGS.predict_dir + input_meta_data["n_best_size"] = FLAGS.n_best_size + input_meta_data["max_answer_length"] = FLAGS.max_answer_length + input_meta_data["test_batch_size"] = FLAGS.test_batch_size + input_meta_data["batch_size_per_core"] = int(FLAGS.train_batch_size / + strategy.num_replicas_in_sync) + input_meta_data["mem_len"] = FLAGS.mem_len + model_fn = functools.partial(get_qaxlnet_model, model_config, run_config, + FLAGS.start_n_top, FLAGS.end_n_top) + eval_examples = squad_utils.read_squad_examples( + FLAGS.predict_file, is_training=False) + if FLAGS.test_feature_path: + logging.info("start reading pickle file...") + with tf.io.gfile.GFile(FLAGS.test_feature_path, "rb") as f: + eval_features = pickle.load(f) + logging.info("finishing reading pickle file...") + else: + sp_model = spm.SentencePieceProcessor() + sp_model.LoadFromSerializedProto( + tf.io.gfile.GFile(FLAGS.spiece_model_file, "rb").read()) + spm_basename = os.path.basename(FLAGS.spiece_model_file) + eval_features = squad_utils.create_eval_data( + spm_basename, sp_model, eval_examples, FLAGS.max_seq_length, + FLAGS.max_query_length, FLAGS.doc_stride, FLAGS.uncased) + + with tf.io.gfile.GFile(FLAGS.predict_file) as f: + original_data = json.load(f)["data"] + eval_fn = functools.partial(run_evaluation, strategy, test_input_fn, + eval_examples, eval_features, original_data, + eval_steps, input_meta_data) + + training_utils.train( + strategy=strategy, + model_fn=model_fn, + input_meta_data=input_meta_data, + eval_fn=eval_fn, + metric_fn=None, + train_input_fn=train_input_fn, + init_checkpoint=FLAGS.init_checkpoint, + init_from_transformerxl=FLAGS.init_from_transformerxl, + total_training_steps=total_training_steps, + steps_per_loop=steps_per_loop, + optimizer=optimizer, + learning_rate_fn=learning_rate_fn, + model_dir=FLAGS.model_dir, + save_steps=FLAGS.save_steps) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/nlp/xlnet/squad_utils.py b/models/official/nlp/xlnet/squad_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..efab6da6f80658213317e13dee86b09b2cb94c63 --- /dev/null +++ b/models/official/nlp/xlnet/squad_utils.py @@ -0,0 +1,973 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# coding=utf-8 +"""Utilities used in SQUAD task.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import collections +import gc +import json +import math +import os +import pickle +import re +import string + +from absl import logging +import numpy as np +import six +import tensorflow as tf + +from official.nlp.xlnet import data_utils +from official.nlp.xlnet import preprocess_utils + +SPIECE_UNDERLINE = u"▁" + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tok_start_to_orig_index, + tok_end_to_orig_index, + token_is_max_context, + input_ids, + input_mask, + p_mask, + segment_ids, + paragraph_len, + cls_index, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tok_start_to_orig_index = tok_start_to_orig_index + self.tok_end_to_orig_index = tok_end_to_orig_index + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.p_mask = p_mask + self.segment_ids = segment_ids + self.paragraph_len = paragraph_len + self.cls_index = cls_index + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +def make_qid_to_has_ans(dataset): + qid_to_has_ans = {} + for article in dataset: + for p in article["paragraphs"]: + for qa in p["qas"]: + qid_to_has_ans[qa["id"]] = bool(qa["answers"]) + return qid_to_has_ans + + +def get_raw_scores(dataset, preds): + """Gets exact scores and f1 scores.""" + exact_scores = {} + f1_scores = {} + for article in dataset: + for p in article["paragraphs"]: + for qa in p["qas"]: + qid = qa["id"] + gold_answers = [ + a["text"] for a in qa["answers"] if normalize_answer(a["text"]) + ] + if not gold_answers: + # For unanswerable questions, only correct answer is empty string + gold_answers = [""] + if qid not in preds: + print("Missing prediction for %s" % qid) + continue + a_pred = preds[qid] + # Take max over all gold answers + exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) + f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) + return exact_scores, f1_scores + + +def normalize_answer(s): + """Lower text and remove punctuation, articles and extra whitespace.""" + + def remove_articles(text): + regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) + return re.sub(regex, " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def compute_exact(a_gold, a_pred): + return int(normalize_answer(a_gold) == normalize_answer(a_pred)) + + +def get_tokens(s): + if not s: + return [] + return normalize_answer(s).split() + + +def compute_f1(a_gold, a_pred): + """Computes f1 score.""" + gold_toks = get_tokens(a_gold) + pred_toks = get_tokens(a_pred) + common = collections.Counter(gold_toks) & collections.Counter(pred_toks) + num_same = sum(common.values()) + # pylint: disable=g-explicit-length-test + if len(gold_toks) == 0 or len(pred_toks) == 0: + # If either is no-answer, then F1 is 1 if they agree, 0 otherwise + return int(gold_toks == pred_toks) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(pred_toks) + recall = 1.0 * num_same / len(gold_toks) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + +def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): + """Finds best threshold.""" + num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) + cur_score = num_no_ans + best_score = cur_score + best_thresh = 0.0 + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + for qid in qid_list: + if qid not in scores: + continue + if qid_to_has_ans[qid]: + diff = scores[qid] + else: + if preds[qid]: + diff = -1 + else: + diff = 0 + cur_score += diff + if cur_score > best_score: + best_score = cur_score + best_thresh = na_probs[qid] + + has_ans_score, has_ans_cnt = 0, 0 + for qid in qid_list: + if not qid_to_has_ans[qid]: + continue + has_ans_cnt += 1 + + if qid not in scores: + continue + has_ans_score += scores[qid] + + return 100.0 * best_score / len( + scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt + + +def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, + qid_to_has_ans): + """Finds all best threshold.""" + best_exact, exact_thresh, has_ans_exact = find_best_thresh( + preds, exact_raw, na_probs, qid_to_has_ans) + best_f1, f1_thresh, has_ans_f1 = find_best_thresh(preds, f1_raw, na_probs, + qid_to_has_ans) + main_eval["best_exact"] = best_exact + main_eval["best_exact_thresh"] = exact_thresh + main_eval["best_f1"] = best_f1 + main_eval["best_f1_thresh"] = f1_thresh + main_eval["has_ans_exact"] = has_ans_exact + main_eval["has_ans_f1"] = has_ans_f1 + + +_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", [ + "feature_index", "start_index", "end_index", "start_log_prob", + "end_log_prob" + ]) + +_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) +RawResult = collections.namedtuple("RawResult", [ + "unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", + "end_top_index", "cls_logits" +]) + + +def _compute_softmax(scores): + """Computes softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +class SquadExample(object): + """A single training/test example for simple sequence classification. + + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + paragraph_text, + orig_answer_text=None, + start_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.paragraph_text = paragraph_text + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (preprocess_utils.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + preprocess_utils.printable_text(self.question_text)) + s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + +def write_predictions(all_examples, all_features, all_results, n_best_size, + max_answer_length, output_prediction_file, + output_nbest_file, output_null_log_odds_file, orig_data, + start_n_top, end_n_top): + """Writes final predictions to the json file and log-odds of null if needed.""" + logging.info("Writing predictions to: %s", (output_prediction_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + + cur_null_score = result.cls_logits + + # if we could have irrelevant answers, get the min score of irrelevant + score_null = min(score_null, cur_null_score) + + for i in range(start_n_top): + for j in range(end_n_top): + start_log_prob = result.start_top_log_probs[i] + start_index = result.start_top_index[i] + + j_index = i * end_n_top + j + + end_log_prob = result.end_top_log_probs[j_index] + end_index = result.end_top_index[j_index] + + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= feature.paragraph_len - 1: + continue + if end_index >= feature.paragraph_len - 1: + continue + + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_log_prob=start_log_prob, + end_log_prob=end_log_prob)) + + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_log_prob + x.end_log_prob), + reverse=True) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + + tok_start_to_orig_index = feature.tok_start_to_orig_index + tok_end_to_orig_index = feature.tok_end_to_orig_index + start_orig_pos = tok_start_to_orig_index[pred.start_index] + end_orig_pos = tok_end_to_orig_index[pred.end_index] + + paragraph_text = example.paragraph_text + final_text = paragraph_text[start_orig_pos:end_orig_pos + 1].strip() + + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_log_prob=pred.start_log_prob, + end_log_prob=pred.end_log_prob)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_log_prob + entry.end_log_prob) + if not best_non_null_entry: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_log_prob"] = entry.start_log_prob + output["end_log_prob"] = entry.end_log_prob + nbest_json.append(output) + + assert len(nbest_json) >= 1 + assert best_non_null_entry is not None + + score_diff = score_null + scores_diff_json[example.qas_id] = score_diff + + all_predictions[example.qas_id] = best_non_null_entry.text + + all_nbest_json[example.qas_id] = nbest_json + + with tf.io.gfile.GFile(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with tf.io.gfile.GFile(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + with tf.io.gfile.GFile(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + qid_to_has_ans = make_qid_to_has_ans(orig_data) + exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) + out_eval = {} + + find_all_best_thresh(out_eval, all_predictions, exact_raw, f1_raw, + scores_diff_json, qid_to_has_ans) + + return out_eval + + +def read_squad_examples(input_file, is_training): + """Reads a SQuAD json file into a list of SquadExample.""" + with tf.io.gfile.GFile(input_file, "r") as reader: + input_data = json.load(reader)["data"] + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + orig_answer_text = None + is_impossible = False + + if is_training: + is_impossible = qa["is_impossible"] + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + start_position = answer["answer_start"] + else: + start_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + paragraph_text=paragraph_text, + orig_answer_text=orig_answer_text, + start_position=start_position, + is_impossible=is_impossible) + examples.append(example) + + return examples + + +# pylint: disable=invalid-name +def _convert_index(index, pos, M=None, is_start=True): + """Converts index.""" + if index[pos] is not None: + return index[pos] + N = len(index) + rear = pos + while rear < N - 1 and index[rear] is None: + rear += 1 + front = pos + while front > 0 and index[front] is None: + front -= 1 + assert index[front] is not None or index[rear] is not None + if index[front] is None: + if index[rear] >= 1: + if is_start: + return 0 + else: + return index[rear] - 1 + return index[rear] + if index[rear] is None: + if M is not None and index[front] < M - 1: + if is_start: + return index[front] + 1 + else: + return M - 1 + return index[front] + if is_start: + if index[rear] > index[front] + 1: + return index[front] + 1 + else: + return index[rear] + else: + if index[rear] > index[front] + 1: + return index[rear] - 1 + else: + return index[front] + + +def convert_examples_to_features(examples, sp_model, max_seq_length, doc_stride, + max_query_length, is_training, output_fn, + uncased): + """Loads a data file into a list of `InputBatch`s.""" + + cnt_pos, cnt_neg = 0, 0 + unique_id = 1000000000 + max_N, max_M = 1024, 1024 + f = np.zeros((max_N, max_M), dtype=np.float32) + + for (example_index, example) in enumerate(examples): + # pylint: disable=logging-format-interpolation + if example_index % 100 == 0: + logging.info("Converting {}/{} pos {} neg {}".format( + example_index, len(examples), cnt_pos, cnt_neg)) + + query_tokens = preprocess_utils.encode_ids( + sp_model, + preprocess_utils.preprocess_text(example.question_text, lower=uncased)) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + paragraph_text = example.paragraph_text + para_tokens = preprocess_utils.encode_pieces( + sp_model, + preprocess_utils.preprocess_text(example.paragraph_text, lower=uncased)) + + chartok_to_tok_index = [] + tok_start_to_chartok_index = [] + tok_end_to_chartok_index = [] + char_cnt = 0 + for i, token in enumerate(para_tokens): + chartok_to_tok_index.extend([i] * len(token)) + tok_start_to_chartok_index.append(char_cnt) + char_cnt += len(token) + tok_end_to_chartok_index.append(char_cnt - 1) + + tok_cat_text = "".join(para_tokens).replace(SPIECE_UNDERLINE, " ") + N, M = len(paragraph_text), len(tok_cat_text) + + if N > max_N or M > max_M: + max_N = max(N, max_N) + max_M = max(M, max_M) + f = np.zeros((max_N, max_M), dtype=np.float32) + gc.collect() + + g = {} + + # pylint: disable=cell-var-from-loop + def _lcs_match(max_dist): + """LCS match.""" + f.fill(0) + g.clear() + + ### longest common sub sequence + # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) + for i in range(N): + + # note(zhiliny): + # unlike standard LCS, this is specifically optimized for the setting + # because the mismatch between sentence pieces and original text will + # be small + for j in range(i - max_dist, i + max_dist): + if j >= M or j < 0: + continue + + if i > 0: + g[(i, j)] = 0 + f[i, j] = f[i - 1, j] + + if j > 0 and f[i, j - 1] > f[i, j]: + g[(i, j)] = 1 + f[i, j] = f[i, j - 1] + + f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 + if (preprocess_utils.preprocess_text( + paragraph_text[i], lower=uncased, + remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]): + g[(i, j)] = 2 + f[i, j] = f_prev + 1 + + max_dist = abs(N - M) + 5 + for _ in range(2): + _lcs_match(max_dist) + if f[N - 1, M - 1] > 0.8 * N: + break + max_dist *= 2 + + orig_to_chartok_index = [None] * N + chartok_to_orig_index = [None] * M + i, j = N - 1, M - 1 + while i >= 0 and j >= 0: + if (i, j) not in g: + break + if g[(i, j)] == 2: + orig_to_chartok_index[i] = j + chartok_to_orig_index[j] = i + i, j = i - 1, j - 1 + elif g[(i, j)] == 1: + j = j - 1 + else: + i = i - 1 + + if all( + v is None for v in orig_to_chartok_index) or f[N - 1, M - 1] < 0.8 * N: + print("MISMATCH DETECTED!") + continue + + tok_start_to_orig_index = [] + tok_end_to_orig_index = [] + for i in range(len(para_tokens)): + start_chartok_pos = tok_start_to_chartok_index[i] + end_chartok_pos = tok_end_to_chartok_index[i] + start_orig_pos = _convert_index( + chartok_to_orig_index, start_chartok_pos, N, is_start=True) + end_orig_pos = _convert_index( + chartok_to_orig_index, end_chartok_pos, N, is_start=False) + + tok_start_to_orig_index.append(start_orig_pos) + tok_end_to_orig_index.append(end_orig_pos) + + if not is_training: + tok_start_position = tok_end_position = None + + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + + if is_training and not example.is_impossible: + start_position = example.start_position + end_position = start_position + len(example.orig_answer_text) - 1 + + start_chartok_pos = _convert_index( + orig_to_chartok_index, start_position, is_start=True) + tok_start_position = chartok_to_tok_index[start_chartok_pos] + + end_chartok_pos = _convert_index( + orig_to_chartok_index, end_position, is_start=False) + tok_end_position = chartok_to_tok_index[end_chartok_pos] + assert tok_start_position <= tok_end_position + + def _piece_to_id(x): + if six.PY2 and isinstance(x, unicode): + x = x.encode("utf-8") + return sp_model.PieceToId(x) + + all_doc_tokens = list(map(_piece_to_id, para_tokens)) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_is_max_context = {} + segment_ids = [] + p_mask = [] + + cur_tok_start_to_orig_index = [] + cur_tok_end_to_orig_index = [] + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + + cur_tok_start_to_orig_index.append( + tok_start_to_orig_index[split_token_index]) + cur_tok_end_to_orig_index.append( + tok_end_to_orig_index[split_token_index]) + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(data_utils.SEG_ID_P) + p_mask.append(0) + + paragraph_len = len(tokens) + + tokens.append(data_utils.SEP_ID) + segment_ids.append(data_utils.SEG_ID_P) + p_mask.append(1) + + # note(zhiliny): we put P before Q + # because during pretraining, B is always shorter than A + for token in query_tokens: + tokens.append(token) + segment_ids.append(data_utils.SEG_ID_Q) + p_mask.append(1) + tokens.append(data_utils.SEP_ID) + segment_ids.append(data_utils.SEG_ID_Q) + p_mask.append(1) + + cls_index = len(segment_ids) + tokens.append(data_utils.CLS_ID) + segment_ids.append(data_utils.SEG_ID_CLS) + p_mask.append(0) + + input_ids = tokens + + # The mask has 0 for real tokens and 1 for padding tokens. Only real + # tokens are attended to. + input_mask = [0] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(1) + segment_ids.append(data_utils.SEG_ID_PAD) + p_mask.append(1) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + assert len(p_mask) == max_seq_length + + span_is_impossible = example.is_impossible + start_position = None + end_position = None + if is_training and not span_is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and + tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + # continue + start_position = 0 + end_position = 0 + span_is_impossible = True + else: + # note: we put P before Q, so doc_offset should be zero. + # doc_offset = len(query_tokens) + 2 + doc_offset = 0 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and span_is_impossible: + start_position = cls_index + end_position = cls_index + + if example_index < 20: + logging.info("*** Example ***") + logging.info("unique_id: %s", unique_id) + logging.info("example_index: %s", example_index) + logging.info("doc_span_index: %s", doc_span_index) + logging.info("tok_start_to_orig_index: %s", + " ".join([str(x) for x in cur_tok_start_to_orig_index])) + logging.info("tok_end_to_orig_index: %s", + " ".join([str(x) for x in cur_tok_end_to_orig_index])) + logging.info( + "token_is_max_context: %s", " ".join([ + "%d:%s" % (x, y) + for (x, y) in six.iteritems(token_is_max_context) + ])) + logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) + logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) + logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) + + if is_training and span_is_impossible: + logging.info("impossible example span") + + if is_training and not span_is_impossible: + pieces = [ + sp_model.IdToPiece(token) + for token in tokens[start_position:(end_position + 1)] + ] + answer_text = sp_model.DecodePieces(pieces) + logging.info("start_position: %d", start_position) + logging.info("end_position: %d", end_position) + logging.info("answer: %s", + preprocess_utils.printable_text(answer_text)) + + # With multi processing, the example_index is actually the index + # within the current process therefore we use example_index=None to + # avoid being used in the future. # The current code does not use + # example_index of training data. + if is_training: + feat_example_index = None + else: + feat_example_index = example_index + + feature = InputFeatures( + unique_id=unique_id, + example_index=feat_example_index, + doc_span_index=doc_span_index, + tok_start_to_orig_index=cur_tok_start_to_orig_index, + tok_end_to_orig_index=cur_tok_end_to_orig_index, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + p_mask=p_mask, + segment_ids=segment_ids, + paragraph_len=paragraph_len, + cls_index=cls_index, + start_position=start_position, + end_position=end_position, + is_impossible=span_is_impossible) + + # Run callback + output_fn(feature) + + unique_id += 1 + if span_is_impossible: + cnt_neg += 1 + else: + cnt_pos += 1 + + logging.info("Total number of instances: %d = pos %d + neg %d", + cnt_pos + cnt_neg, cnt_pos, cnt_neg) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the "max context" doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word "bought" will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for "bought" would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +class FeatureWriter(object): + """Writes InputFeature to TF example file.""" + + def __init__(self, filename, is_training): + self.filename = filename + self.is_training = is_training + self.num_features = 0 + self._writer = tf.io.TFRecordWriter(filename) + + def process_feature(self, feature): + """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" + self.num_features += 1 + + def create_int_feature(values): + feature = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values))) + return feature + + def create_float_feature(values): + f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return f + + features = collections.OrderedDict() + features["unique_ids"] = create_int_feature([feature.unique_id]) + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_float_feature(feature.input_mask) + features["p_mask"] = create_float_feature(feature.p_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + + features["cls_index"] = create_int_feature([feature.cls_index]) + + if self.is_training: + features["start_positions"] = create_int_feature([feature.start_position]) + features["end_positions"] = create_int_feature([feature.end_position]) + impossible = 0 + if feature.is_impossible: + impossible = 1 + features["is_impossible"] = create_float_feature([impossible]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + self._writer.write(tf_example.SerializeToString()) + + def close(self): + self._writer.close() + + +def create_eval_data(spm_basename, + sp_model, + eval_examples, + max_seq_length, + max_query_length, + doc_stride, + uncased, + output_dir=None): + """Creates evaluation tfrecords.""" + eval_features = [] + eval_writer = None + if output_dir: + eval_rec_file = os.path.join( + output_dir, + "{}.slen-{}.qlen-{}.eval.tf_record".format(spm_basename, max_seq_length, + max_query_length)) + eval_feature_file = os.path.join( + output_dir, + "{}.slen-{}.qlen-{}.eval.features.pkl".format(spm_basename, + max_seq_length, + max_query_length)) + + eval_writer = FeatureWriter(filename=eval_rec_file, is_training=False) + + def append_feature(feature): + eval_features.append(feature) + if eval_writer: + eval_writer.process_feature(feature) + + convert_examples_to_features( + examples=eval_examples, + sp_model=sp_model, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + is_training=False, + output_fn=append_feature, + uncased=uncased) + + if eval_writer: + eval_writer.close() + with tf.io.gfile.GFile(eval_feature_file, "wb") as fout: + pickle.dump(eval_features, fout) + + return eval_features diff --git a/models/official/nlp/xlnet/training_utils.py b/models/official/nlp/xlnet/training_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..293e4633d8f4ae0f00fc5fbabb3a3996827ced81 --- /dev/null +++ b/models/official/nlp/xlnet/training_utils.py @@ -0,0 +1,310 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""XLNet training utils.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os +import re + +from absl import logging + +# pytype: disable=attribute-error +# pylint: disable=g-bare-generic,unused-import +import tensorflow as tf +from typing import Any, Callable, Dict, Text, Optional + +from official.nlp.bert import model_training_utils +from official.nlp.xlnet import data_utils +from official.nlp.xlnet import xlnet_modeling as modeling + +_MIN_SUMMARY_STEPS = 10 + + +def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix): + """Saves model to with provided checkpoint prefix.""" + + checkpoint_path = os.path.join(model_dir, checkpoint_prefix) + saved_path = checkpoint.save(checkpoint_path) + logging.info("Saving model as TF checkpoint: %s", saved_path) + return + + +def _float_metric_value(metric): + """Gets the value of a float-value keras metric.""" + return metric.result().numpy().astype(float) + + +def train( + strategy: tf.distribute.Strategy, + model_fn: Callable, + input_meta_data: Dict, + train_input_fn: Callable, + total_training_steps: int, + steps_per_loop: int, + optimizer: tf.keras.optimizers.Optimizer, + learning_rate_fn: tf.keras.optimizers.schedules.LearningRateSchedule, + eval_fn: Optional[Callable[[tf.keras.Model, int, tf.summary.SummaryWriter], + Any]] = None, + metric_fn: Optional[Callable[[], tf.keras.metrics.Metric]] = None, + init_checkpoint: Optional[Text] = None, + init_from_transformerxl: Optional[bool] = False, + model_dir: Optional[Text] = None, + save_steps: Optional[int] = None, + run_eagerly: Optional[bool] = False): + """Runs customized training. + + Args: + strategy: Distribution strategy on which to run low level training loop. + model_fn: The function returns a keras.Model. + input_meta_data: A dictionary of params: `mem_len`, `lr_layer_decay_rate`, + `n_layer`, `batch_size_per_core` and `d_model`. + train_input_fn: Function returns a tf.data.Dataset used for training. + total_training_steps: Number of steps to train in total. + steps_per_loop: Number of steps per graph-mode loop. In order to reduce + communication in eager context, training logs are printed every + steps_per_loop. + optimizer: The optimizer for model. + learning_rate_fn: the learning rate schedule. + eval_fn: A callback of evaluation function, that takes a keras.Model, + current step and evaluation summary writer. + metric_fn: A metrics function returns a Keras Metric object to record + evaluation result using evaluation dataset or with training dataset + after every epoch. + init_checkpoint: Optional checkpoint to load to `sub_model` returned by + `model_fn`. + init_from_transformerxl: Whether to load to `transformerxl_model` of + `model_fn`. + model_dir: The directory of model (checkpoints, summaries). + save_steps: The frequency to save checkpoints. Every save_steps, we save a + model checkpoint. Model checkpoint will be saved and evaluation will be + conducted if evaluation dataset is provided. + run_eagerly: Whether to run training eagerly. + + Returns: + Last training step logits if training happens, otherwise returns None. + Raises: + TypeError: if model directory is not specified. + """ + required_arguments = [ + train_input_fn, total_training_steps, steps_per_loop, optimizer, + learning_rate_fn, save_steps + ] + if [arg for arg in required_arguments if arg is None]: + raise ValueError("`train_input_fn`, `total_training_steps`, " + "`steps_per_loop`, `optimizer`, `save_steps` and " + "`learning_rate_fn` are required parameters.") + if not model_dir: + raise TypeError("Model directory must be specified.") + train_iterator = data_utils.get_input_iterator(train_input_fn, strategy) + if not tf.io.gfile.exists(model_dir): + tf.io.gfile.mkdir(model_dir) + # Create summary writers + summary_dir = os.path.join(model_dir, "summaries") + if not tf.io.gfile.exists(summary_dir): + tf.io.gfile.mkdir(summary_dir) + train_summary_writer = None + eval_summary_writer = None + if eval_fn: + eval_summary_writer = tf.summary.create_file_writer( + os.path.join(summary_dir, "eval")) + if steps_per_loop >= _MIN_SUMMARY_STEPS: + # Only writes summary when the stats are collected sufficiently over + # enough steps. + train_summary_writer = tf.summary.create_file_writer( + os.path.join(summary_dir, "train")) + + with strategy.scope(): + model = model_fn() + + if init_checkpoint: + logging.info("restore from %s", init_checkpoint) + if init_from_transformerxl: + checkpoint = tf.train.Checkpoint( + transformer_xl=model.transformerxl_model) + else: + checkpoint = tf.train.Checkpoint(model=model) + checkpoint.restore(init_checkpoint) + + model.optimizer = optimizer + + if not hasattr(model, "optimizer"): + raise ValueError("User should set optimizer attribute to model.") + + train_loss_metric = tf.keras.metrics.Mean("training_loss", dtype=tf.float32) + train_metric = None + if metric_fn: + train_metric = metric_fn() + + def _replicated_step(inputs, mem=None): + """Replicated training step.""" + + inputs["mems"] = mem + with tf.GradientTape() as tape: + mem, logits = model(inputs, training=True) + loss = model.losses + train_loss_metric.update_state(loss) + if train_metric: + train_metric.update_state(inputs["label_ids"], logits) + scaled_loss = loss[0] * 1.0 / float(strategy.num_replicas_in_sync) + + # Collects training variables. + tvars = model.trainable_variables + grads = tape.gradient(scaled_loss, tvars) + clipped, _ = tf.clip_by_global_norm(grads, clip_norm=1.0) + + if input_meta_data["lr_layer_decay_rate"] != 1.0: + n_layer = 0 + for i in range(len(clipped)): + m = re.search(r"model/transformer/layer_(\d+?)/", tvars[i].name) + if not m: + continue + n_layer = max(n_layer, int(m.group(1)) + 1) + + for i in range(len(clipped)): + for l in range(n_layer): + if "model/transformer/layer_{}/".format(l) in tvars[i].name: + abs_rate = input_meta_data["lr_layer_decay_rate"]**( + n_layer - 1 - l) + clipped[i] *= abs_rate + logging.info("Apply mult {:.4f} to layer-{} grad of {}".format( + abs_rate, l, tvars[i].name)) + break + + optimizer.apply_gradients(zip(clipped, tvars)) + if input_meta_data["mem_len"] > 0: + return mem + + def train_steps(iterator, steps): + """Performs distributed training steps in a loop. + + Args: + iterator: the distributed iterator of training datasets. + steps: an tf.int32 integer tensor to specify number of steps to run + inside host training loop. + + Raises: + ValueError: Any of the arguments or tensor shapes are invalid. + + Returns: + logits: logits computed. + """ + if not isinstance(steps, tf.Tensor): + raise ValueError("steps should be an Tensor. Python object may cause " + "retracing.") + + def cache_fn(): + """Initializes memory tensor used in XLNet pretraining.""" + mems = [] + if input_meta_data["mem_len"] > 0: + for _ in range(input_meta_data["n_layer"]): + zeros = tf.zeros([ + input_meta_data["mem_len"], + input_meta_data["batch_size_per_core"], + input_meta_data["d_model"] + ], + dtype=tf.float32) + mems.append(zeros) + return mems + + if input_meta_data["mem_len"] > 0: + mem = strategy.run(cache_fn) + for _ in tf.range(steps): + mem = strategy.run( + _replicated_step, args=( + next(iterator), + mem, + )) + else: + for _ in tf.range(steps): + strategy.run(_replicated_step, args=(next(iterator),)) + + if not run_eagerly: + train_steps = tf.function(train_steps) + + logging.info("Start training...") + checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) + latest_checkpoint_file = tf.train.latest_checkpoint(model_dir) + if latest_checkpoint_file: + logging.info("Checkpoint file %s found and restoring from checkpoint", + latest_checkpoint_file) + checkpoint.restore(latest_checkpoint_file) + logging.info("Loading from checkpoint file completed") + + current_step = optimizer.iterations.numpy() + checkpoint_name = "xlnet_step_{step}.ckpt" + + while current_step < total_training_steps: + train_loss_metric.reset_states() + if train_metric: + train_metric.reset_states() + + steps = model_training_utils.steps_to_run(current_step, save_steps, + steps_per_loop) + train_steps(train_iterator, tf.convert_to_tensor(steps, dtype=tf.int32)) + current_step += steps + train_loss = _float_metric_value(train_loss_metric) + log_stream = "Train step: %d/%d / lr = %.9f / loss = %.7f" % ( + current_step, total_training_steps, learning_rate_fn(current_step), + train_loss) + if train_metric: + log_stream += " / %s = %f" % (train_metric.name, + _float_metric_value(train_metric)) + logging.info(log_stream) + if train_summary_writer: + with train_summary_writer.as_default(): + tf.summary.scalar( + "learning_rate", + learning_rate_fn(current_step), + step=current_step) + tf.summary.scalar( + train_loss_metric.name, train_loss, step=current_step) + if train_metric: + tf.summary.scalar( + train_metric.name, + _float_metric_value(train_metric), + step=current_step) + train_summary_writer.flush() + if model_dir and current_step % save_steps == 0: + _save_checkpoint(checkpoint, model_dir, + checkpoint_name.format(step=current_step)) + + if eval_fn and current_step % save_steps == 0: + + logging.info("Running evaluation after step: %s.", current_step) + + eval_fn(model, current_step, eval_summary_writer) + if model_dir: + _save_checkpoint(checkpoint, model_dir, + checkpoint_name.format(step=current_step)) + if eval_fn: + logging.info("Running final evaluation after training is complete.") + eval_metric = eval_fn(model, current_step, eval_summary_writer) + + training_summary = { + "total_training_steps": total_training_steps, + "train_loss": _float_metric_value(train_loss_metric), + } + if train_metric: + training_summary["last_train_metrics"] = _float_metric_value(train_metric) + if eval_fn: + # eval_metric is supposed to be a float. + training_summary["eval_metrics"] = eval_metric + + model_training_utils.write_txt_summary(training_summary, summary_dir) + + return model diff --git a/models/official/nlp/xlnet/xlnet_config.py b/models/official/nlp/xlnet/xlnet_config.py new file mode 100644 index 0000000000000000000000000000000000000000..7852eadf469476b4772533dce563366cd3478317 --- /dev/null +++ b/models/official/nlp/xlnet/xlnet_config.py @@ -0,0 +1,181 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions used in XLNet model.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import json +import os + +import tensorflow as tf + + +def create_run_config(is_training, is_finetune, flags): + """Helper function for creating RunConfig.""" + kwargs = dict( + is_training=is_training, + use_tpu=flags.use_tpu, + dropout=flags.dropout, + dropout_att=flags.dropout_att, + init_method=flags.init_method, + init_range=flags.init_range, + init_std=flags.init_std, + clamp_len=flags.clamp_len) + + if not is_finetune: + kwargs.update(dict( + mem_len=flags.mem_len, + reuse_len=flags.reuse_len, + bi_data=flags.bi_data, + clamp_len=flags.clamp_len, + same_length=flags.same_length)) + + return RunConfig(**kwargs) + + +# TODO(hongkuny): refactor XLNetConfig and RunConfig. +class XLNetConfig(object): + """Configs for XLNet model. + + XLNetConfig contains hyperparameters that are specific to a model checkpoint; + i.e., these hyperparameters should be the same between + pretraining and finetuning. + + The following hyperparameters are defined: + n_layer: int, the number of layers. + d_model: int, the hidden size. + n_head: int, the number of attention heads. + d_head: int, the dimension size of each attention head. + d_inner: int, the hidden size in feed-forward layers. + ff_activation: str, "relu" or "gelu". + untie_r: bool, whether to untie the biases in attention. + n_token: int, the vocab size. + """ + + def __init__(self, FLAGS=None, json_path=None, args_dict=None): + """Constructing an XLNetConfig. + + One of FLAGS or json_path should be provided. + + Args: + FLAGS: An FLAGS instance. + json_path: A path to a json config file. + args_dict: A dict for args. + """ + + assert FLAGS is not None or json_path is not None or args_dict is not None + + self.keys = ['n_layer', 'd_model', 'n_head', 'd_head', 'd_inner', + 'ff_activation', 'untie_r', 'n_token'] + + if FLAGS is not None: + self.init_from_flags(FLAGS) + + if json_path is not None: + self.init_from_json(json_path) + + if args_dict is not None: + self.init_from_dict(args_dict) + + def init_from_dict(self, args_dict): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + for key in self.keys: + setattr(self, key, args_dict[key]) + + def init_from_flags(self, flags): + for key in self.keys: + setattr(self, key, getattr(flags, key)) + + def init_from_json(self, json_path): + with tf.io.gfile.GFile(json_path) as f: + json_data = json.load(f) + self.init_from_dict(json_data) + + def to_json(self, json_path): + """Save XLNetConfig to a json file.""" + json_data = {} + for key in self.keys: + json_data[key] = getattr(self, key) + + json_dir = os.path.dirname(json_path) + if not tf.io.gfile.exists(json_dir): + tf.io.gfile.makedirs(json_dir) + with tf.io.gfile.GFile(json_path, 'w') as f: + json.dump(json_data, f, indent=4, sort_keys=True) + + +class RunConfig(object): + """Class of RunConfig. + + RunConfig contains hyperparameters that could be different + between pretraining and finetuning. + These hyperparameters can also be changed from run to run. + We store them separately from XLNetConfig for flexibility. + """ + + def __init__(self, + is_training, + use_tpu, + dropout, + dropout_att, + init_method='normal', + init_range=0.1, + init_std=0.02, + mem_len=None, + reuse_len=None, + bi_data=False, + clamp_len=-1, + same_length=False, + use_cls_mask=True): + """Initializes RunConfig. + + Args: + is_training: bool, whether in training mode. + use_tpu: bool, whether TPUs are used. + dropout: float, dropout rate. + dropout_att: float, dropout rate on attention probabilities. + init_method: str, the initialization scheme, either "normal" or "uniform". + init_range: float, initialize the parameters with a uniform distribution + in [-init_range, init_range]. Only effective when init="uniform". + init_std: float, initialize the parameters with a normal distribution + with mean 0 and stddev init_std. Only effective when init="normal". + mem_len: int, the number of tokens to cache. + reuse_len: int, the number of tokens in the currect batch to be cached + and reused in the future. + bi_data: bool, whether to use bidirectional input pipeline. + Usually set to True during pretraining and False during finetuning. + clamp_len: int, clamp all relative distances larger than clamp_len. + -1 means no clamping. + same_length: bool, whether to use the same attention length + for each token. + use_cls_mask: bool, whether to introduce cls mask. + """ + + self.init_method = init_method + self.init_range = init_range + self.init_std = init_std + self.is_training = is_training + self.dropout = dropout + self.dropout_att = dropout_att + self.use_tpu = use_tpu + self.mem_len = mem_len + self.reuse_len = reuse_len + self.bi_data = bi_data + self.clamp_len = clamp_len + self.same_length = same_length + self.use_cls_mask = use_cls_mask diff --git a/models/official/nlp/xlnet/xlnet_modeling.py b/models/official/nlp/xlnet/xlnet_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..3e16af8e9930ba4dabb8e92743769cf1ebb48585 --- /dev/null +++ b/models/official/nlp/xlnet/xlnet_modeling.py @@ -0,0 +1,1290 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras layers of XLNet model in TF 2.0.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import copy +import numpy as np + +import tensorflow as tf +from official.nlp.xlnet import data_utils + + +def gelu(x): + """Gaussian Error Linear Unit. + + This is a smoother version of the RELU. + Original paper: https://arxiv.org/abs/1606.08415 + Args: + x: float Tensor to perform activation. + + Returns: + `x` with the GELU activation applied. + """ + cdf = 0.5 * (1.0 + tf.tanh( + (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) + return x * cdf + + +def rel_shift(x, klen=-1): + """Performs relative shift to form the relative attention score.""" + x_size = tf.shape(x) + + x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]]) + x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) + x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]]) + x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1]) + + return x + + +def _get_initializer(flags): + """Get variable intializer.""" + if flags.init_method == 'uniform': + initializer = tf.keras.initializers.RandomUniform( + minval=-flags.init_range, maxval=flags.init_range) + elif flags.init_method == 'normal': + initializer = tf.keras.initializers.RandomNormal(stddev=flags.init_std) + else: + raise ValueError('Initializer {} not supported'.format(flags.init_method)) + return initializer + + +def _create_mask(qlen, mlen, dtype=tf.float32, same_length=False): + """Creates attention mask when single-side context allowed only.""" + attn_mask = tf.ones([qlen, qlen], dtype=dtype) + mask_u = tf.linalg.band_part(attn_mask, 0, -1) + mask_dia = tf.linalg.band_part(attn_mask, 0, 0) + attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype) + ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) + if same_length: + mask_l = tf.linalg.band_part(attn_mask, -1, 0) + ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) + + return ret + + +def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None): + """cache hidden states into memory.""" + + if mem_len is None or mem_len == 0: + return None + else: + if reuse_len is not None and reuse_len > 0: + curr_out = curr_out[:reuse_len] + + if prev_mem is None: + new_mem = curr_out[-mem_len:] + else: + new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:] + + return tf.keras.backend.stop_gradient(new_mem) + + +def is_special_none_tensor(tensor): + """Checks if a tensor is a special None Tensor.""" + return tensor.shape.ndims == 0 and tensor.dtype == tf.int32 + + +class PositionalEmbedding(tf.keras.layers.Layer): + """Generates relative positional embeddings used in Transformer-XL and XLNet.""" + + def __init__(self, dim, **kwargs): + super(PositionalEmbedding, self).__init__(**kwargs) + self.dim = dim + + def build(self, unused_input_shapes): + """Constructs inversed frequency vector for positional embedding layer.""" + self.inv_freq = 1.0 / (10000.0**(tf.range(0, self.dim, 2.0) / self.dim)) + super(PositionalEmbedding, self).build(unused_input_shapes) + + def call(self, pos_seq, batch_size): + """Implements call() for the layer.""" + sinusoid_inp = tf.einsum('i,d->id', pos_seq, self.inv_freq) + pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) + pos_emb = pos_emb[:, None, :] + + if batch_size is not None: + pos_emb = tf.tile(pos_emb, [1, batch_size, 1]) + + return pos_emb + + +class RelativeAttention(tf.keras.layers.Layer): + """Core calculations for relative attention.""" + + def __init__(self, dropout_att, scale): + super(RelativeAttention, self).__init__() + self.scale = scale + self.dropout_att = dropout_att + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + + self.attention_probs_dropout = tf.keras.layers.Dropout( + rate=self.dropout_att) + + super(RelativeAttention, self).build(unused_input_shapes) + + def call(self, q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, + r_w_bias, r_r_bias, r_s_bias, attn_mask): + """Implements call() for the layer.""" + + # content based attention score + ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h) + + # position based attention score + bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r) + bd = rel_shift(bd, klen=tf.shape(ac)[1]) + + # segment-based attention score + if seg_mat is None: + ef = 0 + else: + ef = tf.einsum('ibnd,snd->isbn', q_head + r_s_bias, seg_embed) + tgt_shape = tf.shape(bd) + ef = tf.where( + tf.broadcast_to(tf.expand_dims(seg_mat, 3), tgt_shape), + tf.broadcast_to(ef[:, 1:, :, :], tgt_shape), + tf.broadcast_to(ef[:, :1, :, :], tgt_shape)) + + # merges attention scores and performs masking + attn_score = (ac + bd + ef) * self.scale + if attn_mask is not None: + attn_score = attn_score - 1e30 * attn_mask + + # attention probability + attn_prob = tf.nn.softmax(attn_score, 1) + attn_prob = self.attention_probs_dropout(attn_prob) + + # attention output + attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h) + + return attn_vec + + +class PositionwiseFF(tf.keras.layers.Layer): + """Positionwise feed-forward layer.""" + + def __init__(self, d_model, d_inner, dropout, kernel_initializer, + activation_type, **kwargs): + super(PositionwiseFF, self).__init__(**kwargs) + self.d_model = d_model + self.d_inner = d_inner + self.dropout = dropout + self.activation_type = activation_type + self.kernel_initializer = kernel_initializer + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + if self.activation_type == 'relu': + activation = tf.nn.relu + elif self.activation_type == 'gelu': + activation = gelu + else: + raise (ValueError('Unsupported activation type {}'.format( + self.activation_type))) + self.inner_projection_layer = ( + tf.keras.layers.Dense( + units=self.d_inner, + activation=activation, + kernel_initializer=self.kernel_initializer, + name='layer_1')) + self.output_projection_layer = ( + tf.keras.layers.Dense( + units=self.d_model, + kernel_initializer=self.kernel_initializer, + name='layer_2')) + self.output_dropout = tf.keras.layers.Dropout( + rate=self.dropout, name='drop_2') + self.output_layer_norm = ( + tf.keras.layers.LayerNormalization( + name='LayerNorm', axis=-1, epsilon=1e-12)) + super(PositionwiseFF, self).build(unused_input_shapes) + + def call(self, inp): + """Implements call() for the layer.""" + + output = self.inner_projection_layer(inp) + output = self.output_projection_layer(output) + output = self.output_dropout(output) + output = self.output_layer_norm(output + inp) + return output + + +class EmbeddingLookup(tf.keras.layers.Layer): + """Looks up words embeddings for id tensor.""" + + def __init__(self, n_token, d_embed, initializer, **kwargs): + super(EmbeddingLookup, self).__init__(**kwargs) + self.n_token = n_token + self.d_embed = d_embed + self.initializer = initializer + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + self.lookup_table = self.add_weight( + 'lookup_table', + shape=[self.n_token, self.d_embed], + initializer=self.initializer, + dtype=self.dtype) + + super(EmbeddingLookup, self).build(unused_input_shapes) + + def call(self, inputs): + return tf.nn.embedding_lookup(self.lookup_table, inputs) + + +class RelativeMultiheadAttention(tf.keras.layers.Layer): + """Multi-head attention with relative embedding.""" + + def __init__(self, d_model, n_head, d_head, dropout, dropout_att, + kernel_initializer, **kwargs): + super(RelativeMultiheadAttention, self).__init__(**kwargs) + self.d_model = d_model + self.n_head = n_head + self.d_head = d_head + self.dropout = dropout + self.dropout_att = dropout_att + self.initializer = kernel_initializer + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + self.scale = 1.0 / (self.d_head**0.5) + + self.output_layer_norm = tf.keras.layers.LayerNormalization( + name='LayerNorm', axis=-1, epsilon=1e-12) + + self.kh_projection_layer = self.add_weight( + 'k/kernel', + shape=[self.d_model, self.n_head, self.d_head], + initializer=self.initializer) + self.vh_projection_layer = self.add_weight( + 'v/kernel', + shape=[self.d_model, self.n_head, self.d_head], + initializer=self.initializer) + self.kr_projection_layer = self.add_weight( + 'r/kernel', + shape=[self.d_model, self.n_head, self.d_head], + initializer=self.initializer) + self.qh_projection_layer = self.add_weight( + 'q/kernel', + shape=[self.d_model, self.n_head, self.d_head], + initializer=self.initializer) + + self.relative_attention_layer = RelativeAttention( + dropout_att=self.dropout_att, scale=self.scale) + + self.proj_o = self.add_weight( + 'o/kernel', + shape=[self.d_model, self.n_head, self.d_head], + initializer=self.initializer) + + self.attention_dropout = tf.keras.layers.Dropout(rate=self.dropout) + + super(RelativeMultiheadAttention, self).build(unused_input_shapes) + + def call(self, h, g, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, + attn_mask_h, attn_mask_g, mems, target_mapping): + """Implements call() for the layer.""" + + if mems is not None and mems.shape.ndims > 1: + cat = tf.concat([mems, h], 0) + else: + cat = h + + # content heads + q_head_h = tf.einsum('ibh,hnd->ibnd', h, self.qh_projection_layer) + k_head_h = tf.einsum('ibh,hnd->ibnd', cat, self.kh_projection_layer) + v_head_h = tf.einsum('ibh,hnd->ibnd', cat, self.vh_projection_layer) + + # positional heads + k_head_r = tf.einsum('ibh,hnd->ibnd', r, self.kr_projection_layer) + + # core attention ops + attn_vec_h = self.relative_attention_layer(q_head_h, k_head_h, v_head_h, + k_head_r, seg_embed, seg_mat, + r_w_bias, r_r_bias, r_s_bias, + attn_mask_h) + + # post processing + output_h = tf.einsum('ibnd,hnd->ibh', attn_vec_h, self.proj_o) + output_h = self.attention_dropout(output_h) + output_h = self.output_layer_norm(output_h + h) + + output_g = None + if g is not None: # enable two-stream attention + # g-stream + q_head_g = tf.einsum('ibh,hnd->ibnd', g, self.qh_projection_layer) + if target_mapping is not None: + q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping) + attn_vec_g = self.relative_attention_layer(q_head_g, k_head_h, v_head_h, + k_head_r, seg_embed, seg_mat, + r_w_bias, r_r_bias, r_s_bias, + attn_mask_g) + attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping) + + else: + attn_vec_g = self.relative_attention_layer(q_head_g, k_head_h, v_head_h, + k_head_r, seg_embed, seg_mat, + r_w_bias, r_r_bias, r_s_bias, + attn_mask_g) + + # post processing + output_g = tf.einsum('ibnd,hnd->ibh', attn_vec_g, self.proj_o) + output_g = self.attention_dropout(output_g) + output_g = self.output_layer_norm(output_g + g) + + return (output_h, output_g) + + +class TransformerXLModel(tf.keras.layers.Layer): + """Defines a Transformer-XL computation graph with additional support for XLNet.""" + + def __init__(self, + n_token, + n_layer, + d_model, + n_head, + d_head, + d_inner, + dropout, + dropout_att, + attn_type, + bi_data, + is_training, + initializer, + mem_len=None, + same_length=False, + clamp_len=-1, + untie_r=False, + use_tpu=True, + reuse_len=None, + ff_activation='relu', + use_cls_mask=False, + **kwargs): + """Initializes TransformerXLModel. + + Args: + n_token: int, the number of tokens in vocabulary. + n_layer: int, the number of layers. + d_model: int, the hidden size. + n_head: int, the number of attention heads. + d_head: int, the dimension size of each attention head. + d_inner: int, the hidden size in feed-forward layers. + dropout: float, dropout rate. + dropout_att: float, dropout rate on attention probabilities. + attn_type: str, "uni" or "bi". + bi_data: bool, whether to use bidirectional input pipeline. Usually set to + True during pretraining and False during finetuning. + is_training: bool, whether in training mode. + initializer: A tf initializer. + mem_len: int, the number of tokens to cache. + same_length: bool, whether to use the same attention length for each + token. + clamp_len: int, clamp all relative distances larger than clamp_len. -1 + means no clamping. + untie_r: bool, whether to untie the biases in attention. + use_tpu: bool, whether TPUs are used. + reuse_len: int, the number of tokens in the currect batch to be cached and + reused in the future. + ff_activation: str, "relu" or "gelu". + use_cls_mask: bool, whether to introduce cls mask. + **kwargs: Other parameters. + """ + + super(TransformerXLModel, self).__init__(**kwargs) + + self.n_token = n_token + self.initializer = initializer + self.attn_type = attn_type + self.n_layer = n_layer + self.d_model = d_model + self.n_head = n_head + self.d_head = d_head + self.d_inner = d_inner + self.ff_activation = ff_activation + self.untie_r = untie_r + self.use_tpu = use_tpu + self.dropout = dropout + self.dropout_att = dropout_att + + self.mem_len = mem_len + self.reuse_len = reuse_len + self.bi_data = bi_data + self.clamp_len = clamp_len + self.same_length = same_length + self.use_cls_mask = use_cls_mask + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + self.tf_float = tf.float32 + + self.embedding_lookup = EmbeddingLookup( + n_token=self.n_token, + d_embed=self.d_model, + initializer=self.initializer, + dtype=self.tf_float, + name='word_embedding') + + self.h_dropout = tf.keras.layers.Dropout(rate=self.dropout) + self.g_dropout = tf.keras.layers.Dropout(rate=self.dropout) + + if self.untie_r: + self.r_w_bias = ( + self.add_weight( + 'r_w_bias', + shape=[self.n_layer, self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer)) + self.r_r_bias = ( + self.add_weight( + 'r_r_bias', + shape=[self.n_layer, self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer)) + self.r_s_bias = ( + self.add_weight( + 'r_s_bias', + shape=[self.n_layer, self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer)) + else: + self.r_w_bias = ( + self.add_weight( + 'r_w_bias', + shape=[self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer)) + self.r_r_bias = ( + self.add_weight( + 'r_r_bias', + shape=[self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer)) + self.r_s_bias = ( + self.add_weight( + 'r_s_bias', [self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer)) + + self.seg_embed = self.add_weight( + 'seg_embed', [self.n_layer, 2, self.n_head, self.d_head], + dtype=self.tf_float, + initializer=self.initializer) + + self.mask_emb = self.add_weight( + 'mask_emb/mask_emb', shape=[1, 1, self.d_model], dtype=self.tf_float) + + self.emb_dropout = tf.keras.layers.Dropout(rate=self.dropout) + self.fwd_position_embedding = PositionalEmbedding(self.d_model) + self.bwd_position_embedding = PositionalEmbedding(self.d_model) + + self.rel_multihead_layers = [] + self.h_positionwise_ffn_layers = [] + for i in range(self.n_layer): + self.rel_multihead_layers.append( + RelativeMultiheadAttention( + d_model=self.d_model, + dropout=self.dropout, + n_head=self.n_head, + d_head=self.d_head, + dropout_att=self.dropout_att, + kernel_initializer=self.initializer, + name='layer_%d/rel_attn' % (i))) + self.h_positionwise_ffn_layers.append( + PositionwiseFF( + d_model=self.d_model, + d_inner=self.d_inner, + dropout=self.dropout, + kernel_initializer=self.initializer, + activation_type=self.ff_activation, + name='layer_%d/ff' % (i))) + + self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout) + + super(TransformerXLModel, self).build(unused_input_shapes) + + def __call__(self, + inp_k, + seg_id=None, + input_mask=None, + mems=None, + perm_mask=None, + target_mapping=None, + inp_q=None, + **kwargs): + # Uses dict to feed inputs into call() in order to keep mems as a python + # list. + inputs = { + 'inp_k': inp_k, + 'seg_id': seg_id, + 'input_mask': input_mask, + 'mems': mems, + 'perm_mask': perm_mask, + 'target_mapping': target_mapping, + 'inp_q': inp_q + } + return super(TransformerXLModel, self).__call__(inputs, **kwargs) + + def call(self, inputs): + """Implements call() for the layer.""" + inp_k = inputs['inp_k'] + seg_id = inputs['seg_id'] + input_mask = inputs['input_mask'] + mems = inputs['mems'] + perm_mask = inputs['perm_mask'] + target_mapping = inputs['target_mapping'] + inp_q = inputs['inp_q'] + + new_mems = [] + + bsz = tf.shape(inp_k)[1] + + qlen = inp_k.shape.as_list()[0] + + mlen = mems[0].shape.as_list()[0] if mems is not None else 0 + klen = mlen + qlen + + ##### Attention mask + # causal attention mask + if self.attn_type == 'uni': + attn_mask = _create_mask(qlen, mlen, self.tf_float, self.same_length) + # pylint: enable=protected-access + attn_mask = attn_mask[:, :, None, None] + elif self.attn_type == 'bi': + attn_mask = None + else: + raise ValueError('Unsupported attention type: {}'.format(self.attn_type)) + + # data mask: input mask & perm mask + if input_mask is not None and perm_mask is not None: + data_mask = input_mask[None] + perm_mask + + elif input_mask is not None and perm_mask is None: + data_mask = input_mask[None] + elif input_mask is None and perm_mask is not None: + data_mask = perm_mask + else: + data_mask = None + + if data_mask is not None: + # all mems can be attended to + mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz], + dtype=self.tf_float) + data_mask = tf.concat([mems_mask, data_mask], 1) + if attn_mask is None: + attn_mask = data_mask[:, :, :, None] + else: + attn_mask += data_mask[:, :, :, None] + + if attn_mask is not None: + attn_mask = tf.cast(attn_mask > 0, dtype=self.tf_float) + + if attn_mask is not None: + non_tgt_mask = -tf.eye(qlen, dtype=self.tf_float) + non_tgt_mask = tf.concat( + [tf.zeros([qlen, mlen], dtype=self.tf_float), non_tgt_mask], axis=-1) + non_tgt_mask = tf.cast( + (attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=self.tf_float) + else: + non_tgt_mask = None + + word_emb_k = self.embedding_lookup(inp_k) + + if inp_q is not None: + if target_mapping is not None: + word_emb_q = tf.tile(self.mask_emb, + [tf.shape(target_mapping)[0], bsz, 1]) + else: + inp_q_ext = inp_q[:, :, None] + word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k + + output_h = self.h_dropout(word_emb_k) + output_g = None + if inp_q is not None: + output_g = self.g_dropout(word_emb_q) + + ##### Segment embedding + if seg_id is not None: + + # Convert `seg_id` to one-hot `seg_mat` + + mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32) + + cat_id = tf.concat([mem_pad, seg_id], 0) + + if self.use_cls_mask: + # `1` indicates not in the same segment [qlen x klen x bsz] + # seg_id: [qlen x bsz] & cat_id: [klen x bsz] + cls_mat = tf.logical_or( + tf.equal(seg_id, tf.constant([data_utils.SEG_ID_CLS]))[:, None], + tf.equal(cat_id, tf.constant([data_utils.SEG_ID_CLS]))[None, :]) + seg_mat = tf.equal(seg_id[:, None], cat_id[None, :]) + seg_mat = tf.logical_or(cls_mat, seg_mat) + else: + seg_mat = tf.logical_not(tf.equal(seg_id[:, None], cat_id[None, :])) + else: + seg_mat = None + + dtype = self.tf_float + freq_seq = tf.range(0, self.d_model, 2.0) + if dtype is not None and dtype != tf.float32: + freq_seq = tf.cast(freq_seq, dtype=self.dtype) + + if self.attn_type == 'bi': + beg, end = klen, -qlen + elif self.attn_type == 'uni': + beg, end = klen, -1 + else: + raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type)) + + if self.bi_data: + fwd_pos_seq = tf.range(beg, end, -1.0) + bwd_pos_seq = tf.range(-beg, -end, 1.0) + + if dtype is not None and dtype != tf.float32: + fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) + bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype) + + if self.clamp_len > 0: + fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, + self.clamp_len) + bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, + self.clamp_len) + + if bsz is not None: + fwd_pos_emb = self.fwd_position_embedding(fwd_pos_seq, bsz // 2) + bwd_pos_emb = self.bwd_position_embedding(bwd_pos_seq, bsz // 2) + else: + fwd_pos_emb = self.fwd_position_embedding(fwd_pos_seq, None) + bwd_pos_emb = self.bwd_position_embedding(bwd_pos_seq, None) + + pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) + else: + fwd_pos_seq = tf.range(beg, end, -1.0) + if dtype is not None and dtype != tf.float32: + fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) + if self.clamp_len > 0: + fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, + self.lamp_len) + + pos_emb = self.fwd_position_embedding(fwd_pos_seq, bsz) + + pos_emb = self.emb_dropout(pos_emb) + + if mems is None: + mems = [None] * self.n_layer + for i in range(self.n_layer): + # cache new mems + new_mems.append( + _cache_mem(output_h, mems[i], self.mem_len, self.reuse_len)) + # pylint: enable=protected-access + + # segment bias + if seg_id is None: + r_s_bias_i = None + seg_embed_i = None + else: + r_s_bias_i = self.r_s_bias if not self.untie_r else self.r_s_bias[i] + seg_embed_i = self.seg_embed[i] + + ffn_layer = self.h_positionwise_ffn_layers[i] + attention_layer = self.rel_multihead_layers[i] + output_h, output_g = attention_layer( + h=output_h, + g=output_g, + r=pos_emb, + r_w_bias=self.r_w_bias if not self.untie_r else self.r_w_bias[i], + r_r_bias=self.r_r_bias if not self.untie_r else self.r_r_bias[i], + seg_mat=seg_mat, + r_s_bias=r_s_bias_i, + seg_embed=seg_embed_i, + attn_mask_h=non_tgt_mask, + attn_mask_g=attn_mask, + mems=mems[i], + target_mapping=target_mapping) + output_h = ffn_layer(output_h) + if output_g is not None: + output_g = ffn_layer(output_g) + + if inp_q is not None: + output = output_g + else: + output = output_h + + return output, new_mems, None + + +class PretrainingXLNetModel(tf.keras.Model): + """XLNet keras model combined with pretraining LM loss layer. + + See the original paper: https://arxiv.org/pdf/1906.08237.pdf + + """ + + def __init__(self, use_proj, xlnet_config, run_config, **kwargs): + super(PretrainingXLNetModel, self).__init__(**kwargs) + self.run_config = run_config + self.initializer = _get_initializer(run_config) + self.xlnet_config = copy.deepcopy(xlnet_config) + + self.transformerxl_model = TransformerXLModel( + n_token=self.xlnet_config.n_token, + initializer=self.initializer, + attn_type='bi', + n_layer=self.xlnet_config.n_layer, + d_model=self.xlnet_config.d_model, + n_head=self.xlnet_config.n_head, + d_head=self.xlnet_config.d_head, + d_inner=self.xlnet_config.d_inner, + ff_activation=self.xlnet_config.ff_activation, + untie_r=self.xlnet_config.untie_r, + is_training=self.run_config.is_training, + use_tpu=self.run_config.use_tpu, + dropout=self.run_config.dropout, + dropout_att=self.run_config.dropout_att, + mem_len=self.run_config.mem_len, + reuse_len=self.run_config.reuse_len, + bi_data=self.run_config.bi_data, + clamp_len=self.run_config.clamp_len, + same_length=self.run_config.same_length, + use_cls_mask=self.run_config.use_cls_mask, + name='transformer') + self.lmloss_layer = LMLossLayer( + n_token=self.xlnet_config.n_token, + d_model=self.xlnet_config.d_model, + initializer=self.initializer, + tie_weight=True, + bi_data=self.run_config.bi_data, + use_tpu=self.run_config.use_tpu, + use_proj=use_proj, + name='lm_loss') + + def call(self, features): + """Implements call() for the layer.""" + + input_ids = tf.transpose(features['input_k'], [1, 0]) + inp_q = tf.transpose(features['input_q'], [1, 0]) + + seg_ids = tf.transpose(features['seg_id'], [1, 0]) + + perm_mask = tf.transpose(features['perm_mask'], [1, 2, 0]) + + target_mapping = tf.transpose(features['target_mapping'], [1, 2, 0]) + + # target for LM loss + target = tf.transpose(features['target'], [1, 0]) + + # target mask for LM loss + tgt_mask = tf.transpose(features['target_mask'], [1, 0]) + + mems = features.get('mems', None) + + transformerxl_output, self.new_mems, self.lookup_table = self.transformerxl_model( + input_ids, + seg_id=seg_ids, + input_mask=None, + mems=mems, + perm_mask=perm_mask, + target_mapping=target_mapping, + inp_q=inp_q) + lm_loss, _ = self.lmloss_layer( + hidden=transformerxl_output, + target=target, + lookup_table=self.transformerxl_model.embedding_lookup.lookup_table, + target_mask=tgt_mask) + self.add_loss(lm_loss) + return self.new_mems, transformerxl_output + + +class ClassificationXLNetModel(tf.keras.Model): + """XLNet keras model combined with classification loss layer. + + See the original paper: https://arxiv.org/pdf/1906.08237.pdf + + """ + + def __init__(self, xlnet_config, run_config, n_class, summary_type, **kwargs): + super(ClassificationXLNetModel, self).__init__(**kwargs) + self.run_config = run_config + self.initializer = _get_initializer(run_config) + self.xlnet_config = copy.deepcopy(xlnet_config) + + self.transformerxl_model = TransformerXLModel( + n_token=self.xlnet_config.n_token, + initializer=self.initializer, + attn_type='bi', + n_layer=self.xlnet_config.n_layer, + d_model=self.xlnet_config.d_model, + n_head=self.xlnet_config.n_head, + d_head=self.xlnet_config.d_head, + d_inner=self.xlnet_config.d_inner, + ff_activation=self.xlnet_config.ff_activation, + untie_r=self.xlnet_config.untie_r, + is_training=self.run_config.is_training, + use_tpu=self.run_config.use_tpu, + dropout=self.run_config.dropout, + dropout_att=self.run_config.dropout_att, + mem_len=self.run_config.mem_len, + reuse_len=self.run_config.reuse_len, + bi_data=self.run_config.bi_data, + clamp_len=self.run_config.clamp_len, + same_length=self.run_config.same_length, + name='transformer') + + self.summarization_layer = Summarization( + d_model=self.xlnet_config.d_model, + n_head=self.xlnet_config.n_head, + d_head=self.xlnet_config.d_head, + dropout=self.run_config.dropout, + dropout_att=self.run_config.dropout_att, + initializer=self.initializer, + use_proj=True, + summary_type=summary_type, + name='sequence_summary') + + self.cl_loss_layer = ClassificationLossLayer( + n_class=n_class, initializer=self.initializer, name='classification') + + def call(self, features): + """Implements call() for the layer.""" + bsz_per_core = tf.shape(features['input_ids'])[0] + + input_ids = tf.transpose(features['input_ids'], [1, 0]) + seg_ids = tf.transpose(features['segment_ids'], [1, 0]) + input_mask = tf.transpose(features['input_mask'], [1, 0]) + + label = tf.reshape(features['label_ids'], [bsz_per_core]) + + mems = features.get('mems', None) + + transformerxl_output, new_mems, self.lookup_table = ( + self.transformerxl_model(input_ids, seg_ids, input_mask, mems)) + + summary = self.summarization_layer(transformerxl_output) + per_example_loss, logits = self.cl_loss_layer(hidden=summary, labels=label) + self.add_loss(tf.keras.backend.mean(per_example_loss)) + return new_mems, logits + + +class LMLossLayer(tf.keras.layers.Layer): + """Layer computing cross entropy loss for language modeling.""" + + def __init__(self, + n_token, + d_model, + initializer, + tie_weight=False, + bi_data=True, + use_tpu=False, + use_proj=False, + **kwargs): + """Constructs LMLoss layer. + + Args: + n_token: Number of tokens in vocabulary. + d_model: The dimension of model hidden state. + initializer: Initializer used for parameters. + tie_weight: Whether to share weights between embedding lookup layer and + next-token prediction layer. + bi_data: Whether to use bidirectional input pipeline. Usually set to True + during pretraining and False during finetuning. + use_tpu: bool, whether to use TPU. + use_proj: bool, whether to add a projection layer before LM prediction. + **kwargs: Other parameters. + """ + super(LMLossLayer, self).__init__(**kwargs) + self.n_token = n_token + self.d_model = d_model + self.initializer = initializer + + self.tie_weight = tie_weight + self.bi_data = bi_data + self.use_tpu = use_tpu + self.use_proj = use_proj + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + if self.use_proj: + self.proj_layer = tf.keras.layers.Dense( + units=self.d_model, + kernel_initializer=self.initializer, + activation=gelu, + name='lm_projection/dense') + self.proj_layer_norm = tf.keras.layers.LayerNormalization( + axis=-1, epsilon=1e-12, name='lm_projection/LayerNorm') + if not self.tie_weight: + self.softmax_w = self.add_weight( + 'weight', + shape=[self.n_token, self.d_model], + initializer=self.initializer) + + self.softmax_b = self.add_weight( + 'bias', shape=[self.n_token], initializer=tf.zeros_initializer()) + + super(LMLossLayer, self).build(unused_input_shapes) + + def call(self, hidden, target, lookup_table, target_mask): + """Implements call() for the layer.""" + if self.use_proj: + hidden = self.proj_layer_norm(self.proj_layer(hidden)) + if self.tie_weight: + logits = tf.einsum('ibd,nd->ibn', hidden, lookup_table) + self.softmax_b + else: + logits = tf.einsum('ibd,nd->ibn', hidden, self.softmax_w) + self.softmax_b + + if self.use_tpu: + one_hot_target = tf.one_hot(target, self.n_token, dtype=logits.dtype) + loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) + else: + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=target, logits=logits) + + total_loss = tf.reduce_sum(loss * target_mask) / tf.reduce_sum(target_mask) + + return total_loss, logits + + +class Summarization(tf.keras.layers.Layer): + """The layer to pool the output from XLNet model into a vector.""" + + def __init__(self, + d_model, + n_head, + d_head, + dropout, + dropout_att, + initializer, + use_proj=True, + summary_type='last', + **kwargs): + """Constructs Summarization layer. + + Args: + d_model: int, the dimension of model hidden state. + n_head: int, the number of attention heads. + d_head: int, the dimension size of each attention head. + dropout: float, dropout rate. + dropout_att: float, dropout rate on attention probabilities. + initializer: Initializer used for parameters. + use_proj: bool, whether to use projection layer for summarization. + summary_type: Method used to summarize a sequence into a compact vector. + **kwargs: Other parameters. + """ + super(Summarization, self).__init__(**kwargs) + self.d_model = d_model + self.n_head = n_head + self.d_head = d_head + self.initializer = initializer + + self.dropout = dropout + self.dropout_att = dropout_att + self.use_proj = use_proj + self.summary_type = summary_type + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + if self.use_proj: + self.proj_layer = tf.keras.layers.Dense( + units=self.d_model, + kernel_initializer=self.initializer, + activation=tf.nn.tanh, + name='summary') + self.dropout_layer = tf.keras.layers.Dropout(rate=self.dropout) + + super(Summarization, self).build(unused_input_shapes) + + def call(self, inputs): + """Implements call() for the layer.""" + if self.summary_type == 'last': + summary = inputs[-1] + elif self.summary_type == 'first': + summary = inputs[0] + else: + raise ValueError('Invalid summary type provided: %s' % self.summary_type) + if self.use_proj: + summary = self.proj_layer(summary) + summary = self.dropout_layer(summary) + return summary + + +class ClassificationLossLayer(tf.keras.layers.Layer): + """Layer computing cross entropy loss for classification task.""" + + def __init__(self, n_class, initializer, **kwargs): + """Constructs Summarization layer. + + Args: + n_class: Number of tokens in vocabulary. + initializer: Initializer used for parameters. + **kwargs: Other parameters. + """ + super(ClassificationLossLayer, self).__init__(**kwargs) + + self.n_class = n_class + self.initializer = initializer + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + self.proj_layer = tf.keras.layers.Dense( + units=self.n_class, kernel_initializer=self.initializer, name='logit') + + super(ClassificationLossLayer, self).build(unused_input_shapes) + + def call(self, hidden, labels): + """Implements call() for the layer.""" + + logits = self.proj_layer(hidden) + one_hot_target = tf.one_hot(labels, self.n_class, dtype=hidden.dtype) # pytype: disable=attribute-error + loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) + + return loss, logits + + +class QAXLNetModel(tf.keras.Model): + """XLNet keras model combined with question answering loss layer. + + See the original paper: https://arxiv.org/pdf/1906.08237.pdf + + """ + + def __init__(self, xlnet_config, run_config, start_n_top, end_n_top, + **kwargs): + super(QAXLNetModel, self).__init__(**kwargs) + self.run_config = run_config + self.initializer = _get_initializer(run_config) + self.xlnet_config = copy.deepcopy(xlnet_config) + + self.transformerxl_model = TransformerXLModel( + n_token=self.xlnet_config.n_token, + initializer=self.initializer, + attn_type='bi', + n_layer=self.xlnet_config.n_layer, + d_model=self.xlnet_config.d_model, + n_head=self.xlnet_config.n_head, + d_head=self.xlnet_config.d_head, + d_inner=self.xlnet_config.d_inner, + ff_activation=self.xlnet_config.ff_activation, + untie_r=self.xlnet_config.untie_r, + is_training=self.run_config.is_training, + use_tpu=self.run_config.use_tpu, + dropout=self.run_config.dropout, + dropout_att=self.run_config.dropout_att, + mem_len=self.run_config.mem_len, + reuse_len=self.run_config.reuse_len, + bi_data=self.run_config.bi_data, + clamp_len=self.run_config.clamp_len, + same_length=self.run_config.same_length, + name='transformer') + + self.qa_loss_layer = QALossLayer( + d_model=self.xlnet_config.d_model, + start_n_top=start_n_top, + end_n_top=end_n_top, + initializer=self.initializer, + dropout=self.run_config.dropout) + + def call(self, features, training=False): + """Implements call() for the layer.""" + + input_ids = tf.transpose(features['input_ids'], [1, 0]) + seg_ids = tf.transpose(features['segment_ids'], [1, 0]) + input_mask = tf.transpose(features['input_mask'], [1, 0]) + + cls_index = tf.reshape(features['cls_index'], [-1]) + p_mask = features['p_mask'] + + transformerxl_output, new_mems, self.lookup_table = ( + self.transformerxl_model(input_ids, seg_ids, input_mask)) + + if training: + loss, logits = self.qa_loss_layer( + hidden=transformerxl_output, + p_mask=p_mask, + cls_index=cls_index, + start_positions=features['start_positions'], + end_positions=features['end_positions'], + is_impossible=features['is_impossible']) + self.add_loss(loss) + return new_mems, logits + else: + results = self.qa_loss_layer( + hidden=transformerxl_output, p_mask=p_mask, cls_index=cls_index) + return results + + +class QALossLayer(tf.keras.layers.Layer): + """Layer computing position and regression loss for question answering task.""" + + def __init__(self, d_model, start_n_top, end_n_top, initializer, dropout, + **kwargs): + """Constructs Summarization layer. + + Args: + d_model: Int, the hidden size. + start_n_top: Beam size for span start. + end_n_top: Beam size for span end. + initializer: Initializer used for parameters. + dropout: float, dropout rate. + **kwargs: Other parameters. + """ + super(QALossLayer, self).__init__(**kwargs) + self.d_model = d_model + self.start_n_top = start_n_top + self.end_n_top = end_n_top + self.initializer = initializer + self.dropout = dropout + + def build(self, unused_input_shapes): + """Implements build() for the layer.""" + self.start_logits_proj_layer = tf.keras.layers.Dense( + units=1, kernel_initializer=self.initializer, name='start_logits/dense') + self.end_logits_proj_layer0 = tf.keras.layers.Dense( + units=self.d_model, + kernel_initializer=self.initializer, + activation=tf.nn.tanh, + name='end_logits/dense_0') + self.end_logits_proj_layer1 = tf.keras.layers.Dense( + units=1, kernel_initializer=self.initializer, name='end_logits/dense_1') + self.end_logits_layer_norm = tf.keras.layers.LayerNormalization( + axis=-1, epsilon=1e-12, name='end_logits/LayerNorm') + self.answer_class_proj_layer0 = tf.keras.layers.Dense( + units=self.d_model, + kernel_initializer=self.initializer, + activation=tf.nn.tanh, + name='answer_class/dense_0') + self.answer_class_proj_layer1 = tf.keras.layers.Dense( + units=1, + kernel_initializer=self.initializer, + use_bias=False, + name='answer_class/dense_1') + self.ans_feature_dropout = tf.keras.layers.Dropout(rate=self.dropout) + super(QALossLayer, self).build(unused_input_shapes) + + def __call__(self, hidden, p_mask, cls_index, **kwargs): + return super(QALossLayer, self).__call__( + (hidden, p_mask, cls_index, kwargs)) + + def call(self, inputs, training=False): + """Implements call() for the layer.""" + hidden, p_mask, cls_index, kwargs = inputs + return_dict = {} + seq_len = tf.shape(hidden)[0] + + start_logits = self.start_logits_proj_layer(hidden) + start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) + start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask + start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) + if training: + start_positions = kwargs['start_positions'] + end_positions = kwargs['end_positions'] + is_impossible = kwargs['is_impossible'] + start_positions = tf.reshape(start_positions, [-1]) + start_index = tf.one_hot( + start_positions, depth=seq_len, axis=-1, dtype=tf.float32) + start_features = tf.einsum('lbh,bl->bh', hidden, start_index) + start_features = tf.tile(start_features[None], [seq_len, 1, 1]) + end_logits = self.end_logits_proj_layer0( + tf.concat([hidden, start_features], axis=-1)) + + end_logits = self.end_logits_layer_norm(end_logits) + + end_logits = self.end_logits_proj_layer1(end_logits) + end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) + end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask + end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) + else: + # during inference, compute the end logits based on beam search + + start_top_log_probs, start_top_index = tf.nn.top_k( + start_log_probs, k=self.start_n_top) + start_index = tf.one_hot( + start_top_index, depth=seq_len, axis=-1, dtype=tf.float32) + start_features = tf.einsum('lbh,bkl->bkh', hidden, start_index) + end_input = tf.tile(hidden[:, :, None], [1, 1, self.start_n_top, 1]) + start_features = tf.tile(start_features[None], [seq_len, 1, 1, 1]) + end_input = tf.concat([end_input, start_features], axis=-1) + end_logits = self.end_logits_proj_layer0(end_input) + end_logits = tf.reshape(end_logits, [seq_len, -1, self.d_model]) + end_logits = self.end_logits_layer_norm(end_logits) + + end_logits = tf.reshape(end_logits, + [seq_len, -1, self.start_n_top, self.d_model]) + + end_logits = self.end_logits_proj_layer1(end_logits) + end_logits = tf.reshape(end_logits, [seq_len, -1, self.start_n_top]) + end_logits = tf.transpose(end_logits, [1, 2, 0]) + end_logits_masked = end_logits * ( + 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] + end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) + end_top_log_probs, end_top_index = tf.nn.top_k( + end_log_probs, k=self.end_n_top) + end_top_log_probs = tf.reshape(end_top_log_probs, + [-1, self.start_n_top * self.end_n_top]) + end_top_index = tf.reshape(end_top_index, + [-1, self.start_n_top * self.end_n_top]) + + if training: + return_dict['start_log_probs'] = start_log_probs + return_dict['end_log_probs'] = end_log_probs + else: + return_dict['start_top_log_probs'] = start_top_log_probs + return_dict['start_top_index'] = start_top_index + return_dict['end_top_log_probs'] = end_top_log_probs + return_dict['end_top_index'] = end_top_index + # an additional layer to predict answerability + + # get the representation of CLS + cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) + cls_feature = tf.einsum('lbh,bl->bh', hidden, cls_index) + + # get the representation of START + start_p = tf.nn.softmax(start_logits_masked, axis=-1, name='softmax_start') + start_feature = tf.einsum('lbh,bl->bh', hidden, start_p) + + ans_feature = tf.concat([start_feature, cls_feature], -1) + ans_feature = self.answer_class_proj_layer0(ans_feature) + ans_feature = self.ans_feature_dropout(ans_feature) + cls_logits = self.answer_class_proj_layer1(ans_feature) + cls_logits = tf.squeeze(cls_logits, -1) + return_dict['cls_logits'] = cls_logits + + if not training: + return return_dict + + def compute_loss(log_probs, positions): + one_hot_positions = tf.one_hot(positions, depth=seq_len, dtype=tf.float32) + + loss = -tf.reduce_sum(one_hot_positions * log_probs, axis=-1) + loss = tf.reduce_mean(loss) + return loss + + start_loss = compute_loss(start_log_probs, start_positions) + end_loss = compute_loss(end_log_probs, end_positions) + + total_loss = (start_loss + end_loss) * 0.5 + + is_impossible = tf.reshape(is_impossible, [-1]) + regression_loss = tf.nn.sigmoid_cross_entropy_with_logits( + labels=is_impossible, logits=cls_logits) + regression_loss = tf.reduce_mean(regression_loss) + + total_loss += regression_loss * 0.5 + return total_loss, cls_logits diff --git a/models/official/nlp/xlnet/xlnet_modeling_test.py b/models/official/nlp/xlnet/xlnet_modeling_test.py new file mode 100644 index 0000000000000000000000000000000000000000..dce887aebd77c75999091af9ec112f8d0d336eee --- /dev/null +++ b/models/official/nlp/xlnet/xlnet_modeling_test.py @@ -0,0 +1,52 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +import numpy as np +import tensorflow as tf + +from official.nlp.xlnet import xlnet_modeling + + +class PositionalEmbeddingLayerTest(tf.test.TestCase): + + def test_positional_embedding(self): + """A low-dimensional example is tested. + + With len(pos_seq)=2 and d_model=4: + + pos_seq = [[1.], [0.]] + inv_freq = [1., 0.01] + pos_seq x inv_freq = [[1, 0.01], [0., 0.]] + pos_emb = [[sin(1.), sin(0.01), cos(1.), cos(0.01)], + [sin(0.), sin(0.), cos(0.), cos(0.)]] + = [[0.84147096, 0.00999983, 0.54030228, 0.99994999], + [0., 0., 1., 1.]] + """ + target = np.array([[[0.84147096, 0.00999983, 0.54030228, 0.99994999]], + [[0., 0., 1., 1.]]]) + d_model = 4 + pos_seq = tf.range(1, -1, -1.0) # [1., 0.] + pos_emb_layer = xlnet_modeling.PositionalEmbedding(d_model) + pos_emb = pos_emb_layer(pos_seq, batch_size=None).numpy().astype(float) + + logging.info(pos_emb) + self.assertAllClose(pos_emb, target) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/pip_package/setup.py b/models/official/pip_package/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..8433f25321c6f28c1c0c51797154633aa1a3ec71 --- /dev/null +++ b/models/official/pip_package/setup.py @@ -0,0 +1,89 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Sets up TensorFlow Official Models.""" +import datetime +import os +import sys + +from setuptools import find_packages +from setuptools import setup + +version = '2.3.0' + +project_name = 'tf-models-official' + +long_description = """The TensorFlow official models are a collection of +models that use TensorFlow's high-level APIs. +They are intended to be well-maintained, tested, and kept up to date with the +latest TensorFlow API. They should also be reasonably optimized for fast +performance while still being easy to read.""" + +if '--project_name' in sys.argv: + project_name_idx = sys.argv.index('--project_name') + project_name = sys.argv[project_name_idx + 1] + sys.argv.remove('--project_name') + sys.argv.pop(project_name_idx) + + +def _get_requirements(): + """Parses requirements.txt file.""" + install_requires_tmp = [] + dependency_links_tmp = [] + with open( + os.path.join(os.path.dirname(__file__), '../requirements.txt'), 'r') as f: + for line in f: + package_name = line.strip() + if package_name.startswith('-e '): + dependency_links_tmp.append(package_name[3:].strip()) + else: + install_requires_tmp.append(package_name) + return install_requires_tmp, dependency_links_tmp + +install_requires, dependency_links = _get_requirements() + +if project_name == 'tf-models-nightly': + version += '.dev' + datetime.datetime.now().strftime('%Y%m%d') + install_requires.append('tf-nightly') +else: + install_requires.append('tensorflow>=2.3.0') + +print('install_requires: ', install_requires) +print('dependency_links: ', dependency_links) + +setup( + name=project_name, + version=version, + description='TensorFlow Official Models', + long_description=long_description, + author='Google Inc.', + author_email='no-reply@google.com', + url='https://github.com/tensorflow/models', + license='Apache 2.0', + packages=find_packages(exclude=[ + 'research*', + 'tutorials*', + 'samples*', + 'official.r1*', + 'official.pip_package*', + 'official.benchmark*', + 'official.colab*', + ]), + exclude_package_data={ + '': ['*_test.py',], + }, + install_requires=install_requires, + dependency_links=dependency_links, + python_requires='>=3.6', +) diff --git a/models/official/recommendation/README.md b/models/official/recommendation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..441bc128681c3189b53f7909b22c70fccf564414 --- /dev/null +++ b/models/official/recommendation/README.md @@ -0,0 +1,72 @@ +# Recommendation Model +## Overview +This is an implementation of the Neural Collaborative Filtering (NCF) framework with Neural Matrix Factorization (NeuMF) model as described in the [Neural Collaborative Filtering](https://arxiv.org/abs/1708.05031) paper. Current implementation is based on the code from the authors' [NCF code](https://github.com/hexiangnan/neural_collaborative_filtering) and the Stanford implementation in the [MLPerf Repo](https://github.com/mlperf/reference/tree/master/recommendation/pytorch). + +NCF is a general framework for collaborative filtering of recommendations in which a neural network architecture is used to model user-item interactions. Unlike traditional models, NCF does not resort to Matrix Factorization (MF) with an inner product on latent features of users and items. It replaces the inner product with a multi-layer perceptron that can learn an arbitrary function from data. + +Two instantiations of NCF are Generalized Matrix Factorization (GMF) and Multi-Layer Perceptron (MLP). GMF applies a linear kernel to model the latent feature interactions, and and MLP uses a nonlinear kernel to learn the interaction function from data. NeuMF is a fused model of GMF and MLP to better model the complex user-item interactions, and unifies the strengths of linearity of MF and non-linearity of MLP for modeling the user-item latent structures. NeuMF allows GMF and MLP to learn separate embeddings, and combines the two models by concatenating their last hidden layer. [neumf_model.py](neumf_model.py) defines the architecture details. + +Some abbreviations used the code base include: + - NCF: Neural Collaborative Filtering + - NeuMF: Neural Matrix Factorization + - GMF: Generalized Matrix Factorization + - MLP: Multi-Layer Perceptron + - HR: Hit Ratio (HR) + - NDCG: Normalized Discounted Cumulative Gain + - ml-1m: MovieLens 1 million dataset + - ml-20m: MovieLens 20 million dataset + +## Dataset +The [MovieLens datasets](http://files.grouplens.org/datasets/movielens/) are used for model training and evaluation. Specifically, we use two datasets: **ml-1m** (short for MovieLens 1 million) and **ml-20m** (short for MovieLens 20 million). + +### ml-1m +ml-1m dataset contains 1,000,209 anonymous ratings of approximately 3,706 movies made by 6,040 users who joined MovieLens in 2000. All ratings are contained in the file "ratings.dat" without header row, and are in the following format: +``` + UserID::MovieID::Rating::Timestamp +``` + - UserIDs range between 1 and 6040. + - MovieIDs range between 1 and 3952. + - Ratings are made on a 5-star scale (whole-star ratings only). + +### ml-20m +ml-20m dataset contains 20,000,263 ratings of 26,744 movies by 138493 users. All ratings are contained in the file "ratings.csv". Each line of this file after the header row represents one rating of one movie by one user, and has the following format: +``` +userId,movieId,rating,timestamp +``` + - The lines within this file are ordered first by userId, then, within user, by movieId. + - Ratings are made on a 5-star scale, with half-star increments (0.5 stars - 5.0 stars). + +In both datasets, the timestamp is represented in seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970. Each user has at least 20 ratings. + +## Running Code + +### Download and preprocess dataset +To download the dataset, please install Pandas package first. Then issue the following command: +``` +python movielens.py +``` +Arguments: + * `--data_dir`: Directory where to download and save the preprocessed data. By default, it is `/tmp/movielens-data/`. + * `--dataset`: The dataset name to be downloaded and preprocessed. By default, it is `ml-1m`. + +Use the `--help` or `-h` flag to get a full list of possible arguments. + +Note the ml-20m dataset is large (the rating file is ~500 MB), and it may take several minutes (~2 mins) for data preprocessing. +Both the ml-1m and ml-20m datasets will be coerced into a common format when downloaded. + +### Train and evaluate model + +[ncf_keras_main.py](ncf_keras_main.py) is the Keras trainer that supports +features in TF 2.x. Users can train the model on both GPU and TPU. + +To train and evaluate the model, issue the following command: +``` +python ncf_keras_main.py +``` +Arguments: + * `--model_dir`: Directory to save model training checkpoints. By default, it is `/tmp/ncf/`. + * `--data_dir`: This should be set to the same directory given to the `data_download`'s `data_dir` argument. + * `--dataset`: The dataset name to be downloaded and preprocessed. By default, it is `ml-1m`. + * `--num_gpus`: The number of GPUs used for training/evaluation of the model. Use CPU if this flag is 0. By default, it is 1. + +There are other arguments about models and training process. Refer to the [Flags package](https://abseil.io/docs/python/guides/flags) documentation or use the `--helpfull` flag to get a full list of possible arguments with detailed descriptions. diff --git a/models/official/recommendation/__init__.py b/models/official/recommendation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/recommendation/constants.py b/models/official/recommendation/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..8e313bfa66a2133862e79dbad89f03421fee39c5 --- /dev/null +++ b/models/official/recommendation/constants.py @@ -0,0 +1,79 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Central location for NCF specific values.""" + +import sys + +import numpy as np + +from official.recommendation import movielens + +# ============================================================================== +# == Main Thread Data Processing =============================================== +# ============================================================================== + +# Keys for data shards +TRAIN_USER_KEY = "train_{}".format(movielens.USER_COLUMN) +TRAIN_ITEM_KEY = "train_{}".format(movielens.ITEM_COLUMN) +TRAIN_LABEL_KEY = "train_labels" +MASK_START_INDEX = "mask_start_index" +VALID_POINT_MASK = "valid_point_mask" +EVAL_USER_KEY = "eval_{}".format(movielens.USER_COLUMN) +EVAL_ITEM_KEY = "eval_{}".format(movielens.ITEM_COLUMN) + +USER_MAP = "user_map" +ITEM_MAP = "item_map" + +USER_DTYPE = np.int32 +ITEM_DTYPE = np.int32 + +# In both datasets, each user has at least 20 ratings. +MIN_NUM_RATINGS = 20 + +# The number of negative examples attached with a positive example +# when performing evaluation. +NUM_EVAL_NEGATIVES = 999 + +# keys for evaluation metrics +TOP_K = 10 # Top-k list for evaluation +HR_KEY = "HR" +NDCG_KEY = "NDCG" +DUPLICATE_MASK = "duplicate_mask" + +# Metric names +HR_METRIC_NAME = "HR_METRIC" +NDCG_METRIC_NAME = "NDCG_METRIC" + +# Trying to load a cache created in py2 when running in py3 will cause an +# error due to differences in unicode handling. +RAW_CACHE_FILE = "raw_data_cache_py{}.pickle".format(sys.version_info[0]) +CACHE_INVALIDATION_SEC = 3600 * 24 + +# ============================================================================== +# == Data Generation =========================================================== +# ============================================================================== +CYCLES_TO_BUFFER = 3 # The number of train cycles worth of data to "run ahead" + # of the main training loop. + +# Number of batches to run per epoch when using synthetic data. At high batch +# sizes, we run for more batches than with real data, which is good since +# running more batches reduces noise when measuring the average batches/second. +SYNTHETIC_BATCHES_PER_EPOCH = 2000 + +# Only used when StreamingFilesDataset is used. +NUM_FILE_SHARDS = 16 +TRAIN_FOLDER_TEMPLATE = "training_cycle_{}" +EVAL_FOLDER = "eval_data" +SHARD_TEMPLATE = "shard_{}.tfrecords" diff --git a/models/official/recommendation/create_ncf_data.py b/models/official/recommendation/create_ncf_data.py new file mode 100644 index 0000000000000000000000000000000000000000..60267bcd5f77ec7cb2036cb2037efe9360d692ba --- /dev/null +++ b/models/official/recommendation/create_ncf_data.py @@ -0,0 +1,117 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Binary to generate training/evaluation dataset for NCF model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json + +# pylint: disable=g-bad-import-order +from absl import app +from absl import flags +import tensorflow.compat.v2 as tf +# pylint: enable=g-bad-import-order + +from official.recommendation import movielens +from official.recommendation import data_preprocessing + +flags.DEFINE_string( + "data_dir", None, + "The input data dir at which training and evaluation tf record files " + "will be saved.") +flags.DEFINE_string("meta_data_file_path", None, + "The path in which input meta data will be written.") +flags.DEFINE_enum("dataset", "ml-20m", ["ml-1m", "ml-20m"], + "Dataset to be trained/evaluated.") +flags.DEFINE_enum( + "constructor_type", "bisection", ["bisection", "materialized"], + "Strategy to use for generating false negatives. materialized has a " + "precompute that scales badly, but a faster per-epoch construction " + "time and can be faster on very large systems.") +flags.DEFINE_integer("num_train_epochs", 14, + "Total number of training epochs to generate.") +flags.DEFINE_integer( + "num_negative_samples", 4, + "Number of negative instances to pair with positive instance.") +flags.DEFINE_integer( + "train_prebatch_size", 99000, + "Batch size to be used for prebatching the dataset " + "for training.") +flags.DEFINE_integer( + "eval_prebatch_size", 99000, + "Batch size to be used for prebatching the dataset " + "for training.") + +FLAGS = flags.FLAGS + + +def prepare_raw_data(flag_obj): + """Downloads and prepares raw data for data generation.""" + movielens.download(flag_obj.dataset, flag_obj.data_dir) + + data_processing_params = { + "train_epochs": flag_obj.num_train_epochs, + "batch_size": flag_obj.train_prebatch_size, + "eval_batch_size": flag_obj.eval_prebatch_size, + "batches_per_step": 1, + "stream_files": True, + "num_neg": flag_obj.num_negative_samples, + } + + num_users, num_items, producer = data_preprocessing.instantiate_pipeline( + dataset=flag_obj.dataset, + data_dir=flag_obj.data_dir, + params=data_processing_params, + constructor_type=flag_obj.constructor_type, + epoch_dir=flag_obj.data_dir, + generate_data_offline=True) + + # pylint: disable=protected-access + input_metadata = { + "num_users": num_users, + "num_items": num_items, + "constructor_type": flag_obj.constructor_type, + "num_train_elements": producer._elements_in_epoch, + "num_eval_elements": producer._eval_elements_in_epoch, + "num_train_epochs": flag_obj.num_train_epochs, + "train_prebatch_size": flag_obj.train_prebatch_size, + "eval_prebatch_size": flag_obj.eval_prebatch_size, + "num_train_steps": producer.train_batches_per_epoch, + "num_eval_steps": producer.eval_batches_per_epoch, + } + # pylint: enable=protected-access + + return producer, input_metadata + + +def generate_data(): + """Creates NCF train/eval dataset and writes input metadata as a file.""" + producer, input_metadata = prepare_raw_data(FLAGS) + producer.run() + + with tf.io.gfile.GFile(FLAGS.meta_data_file_path, "w") as writer: + writer.write(json.dumps(input_metadata, indent=4) + "\n") + + +def main(_): + generate_data() + + +if __name__ == "__main__": + flags.mark_flag_as_required("data_dir") + flags.mark_flag_as_required("meta_data_file_path") + app.run(main) diff --git a/models/official/recommendation/data_pipeline.py b/models/official/recommendation/data_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..1b4dd33afe25df2468cdfcbb2c146392d7bec76e --- /dev/null +++ b/models/official/recommendation/data_pipeline.py @@ -0,0 +1,959 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Asynchronous data producer for the NCF pipeline.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import atexit +import functools +import os +import sys +import tempfile +import threading +import time +import timeit +import traceback +import typing + +import numpy as np +import six +from six.moves import queue +import tensorflow as tf +from absl import logging + +from official.recommendation import constants as rconst +from official.recommendation import movielens +from official.recommendation import popen_helper +from official.recommendation import stat_utils +from tensorflow.python.tpu.datasets import StreamingFilesDataset + + +SUMMARY_TEMPLATE = """General: +{spacer}Num users: {num_users} +{spacer}Num items: {num_items} + +Training: +{spacer}Positive count: {train_pos_ct} +{spacer}Batch size: {train_batch_size} {multiplier} +{spacer}Batch count per epoch: {train_batch_ct} + +Eval: +{spacer}Positive count: {eval_pos_ct} +{spacer}Batch size: {eval_batch_size} {multiplier} +{spacer}Batch count per epoch: {eval_batch_ct}""" + + +class DatasetManager(object): + """Helper class for handling TensorFlow specific data tasks. + + This class takes the (relatively) framework agnostic work done by the data + constructor classes and handles the TensorFlow specific portions (TFRecord + management, tf.Dataset creation, etc.). + """ + + def __init__(self, + is_training, + stream_files, + batches_per_epoch, + shard_root=None, + deterministic=False, + num_train_epochs=None): + # type: (bool, bool, int, typing.Optional[str], bool, int) -> None + """Constructs a `DatasetManager` instance. + Args: + is_training: Boolean of whether the data provided is training or + evaluation data. This determines whether to reuse the data + (if is_training=False) and the exact structure to use when storing and + yielding data. + stream_files: Boolean indicating whether data should be serialized and + written to file shards. + batches_per_epoch: The number of batches in a single epoch. + shard_root: The base directory to be used when stream_files=True. + deterministic: Forgo non-deterministic speedups. (i.e. sloppy=True) + num_train_epochs: Number of epochs to generate. If None, then each + call to `get_dataset()` increments the number of epochs requested. + """ + self._is_training = is_training + self._deterministic = deterministic + self._stream_files = stream_files + self._writers = [] + self._write_locks = [threading.RLock() for _ in + range(rconst.NUM_FILE_SHARDS)] if stream_files else [] + self._batches_per_epoch = batches_per_epoch + self._epochs_completed = 0 + self._epochs_requested = num_train_epochs if num_train_epochs else 0 + self._shard_root = shard_root + + self._result_queue = queue.Queue() + self._result_reuse = [] + + @property + def current_data_root(self): + subdir = (rconst.TRAIN_FOLDER_TEMPLATE.format(self._epochs_completed) + if self._is_training else rconst.EVAL_FOLDER) + return os.path.join(self._shard_root, subdir) + + def buffer_reached(self): + # Only applicable for training. + return (self._epochs_completed - self._epochs_requested >= + rconst.CYCLES_TO_BUFFER and self._is_training) + + @staticmethod + def serialize(data): + """Convert NumPy arrays into a TFRecords entry.""" + + def create_int_feature(values): + return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + + feature_dict = { + k: create_int_feature(v.astype(np.int64)) for k, v in data.items() + } + + return tf.train.Example( + features=tf.train.Features(feature=feature_dict)).SerializeToString() + + @staticmethod + def deserialize(serialized_data, batch_size=None, is_training=True): + """Convert serialized TFRecords into tensors. + + Args: + serialized_data: A tensor containing serialized records. + batch_size: The data arrives pre-batched, so batch size is needed to + deserialize the data. + is_training: Boolean, whether data to deserialize to training data + or evaluation data. + """ + + def _get_feature_map(batch_size, is_training=True): + """Returns data format of the serialized tf record file.""" + + if is_training: + return { + movielens.USER_COLUMN: + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64), + movielens.ITEM_COLUMN: + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64), + rconst.VALID_POINT_MASK: + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64), + "labels": + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64) + } + else: + return { + movielens.USER_COLUMN: + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64), + movielens.ITEM_COLUMN: + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64), + rconst.DUPLICATE_MASK: + tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64) + } + + features = tf.io.parse_single_example( + serialized_data, _get_feature_map(batch_size, is_training=is_training)) + users = tf.cast(features[movielens.USER_COLUMN], rconst.USER_DTYPE) + items = tf.cast(features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE) + + if is_training: + valid_point_mask = tf.cast(features[rconst.VALID_POINT_MASK], tf.bool) + fake_dup_mask = tf.zeros_like(users) + return { + movielens.USER_COLUMN: users, + movielens.ITEM_COLUMN: items, + rconst.VALID_POINT_MASK: valid_point_mask, + rconst.TRAIN_LABEL_KEY: + tf.reshape(tf.cast(features["labels"], tf.bool), + (batch_size, 1)), + rconst.DUPLICATE_MASK: fake_dup_mask + } + else: + labels = tf.cast(tf.zeros_like(users), tf.bool) + fake_valid_pt_mask = tf.cast(tf.zeros_like(users), tf.bool) + return { + movielens.USER_COLUMN: + users, + movielens.ITEM_COLUMN: + items, + rconst.DUPLICATE_MASK: + tf.cast(features[rconst.DUPLICATE_MASK], tf.bool), + rconst.VALID_POINT_MASK: + fake_valid_pt_mask, + rconst.TRAIN_LABEL_KEY: + labels + } + + def put(self, index, data): + # type: (int, dict) -> None + """Store data for later consumption. + + Because there are several paths for storing and yielding data (queues, + lists, files) the data producer simply provides the data in a standard + format at which point the dataset manager handles storing it in the correct + form. + + Args: + index: Used to select shards when writing to files. + data: A dict of the data to be stored. This method mutates data, and + therefore expects to be the only consumer. + """ + if self._is_training: + mask_start_index = data.pop(rconst.MASK_START_INDEX) + batch_size = data[movielens.ITEM_COLUMN].shape[0] + data[rconst.VALID_POINT_MASK] = np.expand_dims( + np.less(np.arange(batch_size), mask_start_index), -1) + + if self._stream_files: + example_bytes = self.serialize(data) + with self._write_locks[index % rconst.NUM_FILE_SHARDS]: + self._writers[index % rconst.NUM_FILE_SHARDS].write(example_bytes) + + else: + self._result_queue.put(( + data, data.pop("labels")) if self._is_training else data) + + def start_construction(self): + if self._stream_files: + tf.io.gfile.makedirs(self.current_data_root) + template = os.path.join(self.current_data_root, rconst.SHARD_TEMPLATE) + self._writers = [tf.io.TFRecordWriter(template.format(i)) + for i in range(rconst.NUM_FILE_SHARDS)] + + def end_construction(self): + if self._stream_files: + [writer.close() for writer in self._writers] + self._writers = [] + self._result_queue.put(self.current_data_root) + + self._epochs_completed += 1 + + def data_generator(self, epochs_between_evals): + """Yields examples during local training.""" + assert not self._stream_files + assert self._is_training or epochs_between_evals == 1 + + if self._is_training: + for _ in range(self._batches_per_epoch * epochs_between_evals): + yield self._result_queue.get(timeout=300) + + else: + if self._result_reuse: + assert len(self._result_reuse) == self._batches_per_epoch + + for i in self._result_reuse: + yield i + else: + # First epoch. + for _ in range(self._batches_per_epoch * epochs_between_evals): + result = self._result_queue.get(timeout=300) + self._result_reuse.append(result) + yield result + + def increment_request_epoch(self): + self._epochs_requested += 1 + + def get_dataset(self, batch_size, epochs_between_evals): + """Construct the dataset to be used for training and eval. + + For local training, data is provided through Dataset.from_generator. For + remote training (TPUs) the data is first serialized to files and then sent + to the TPU through a StreamingFilesDataset. + + Args: + batch_size: The per-replica batch size of the dataset. + epochs_between_evals: How many epochs worth of data to yield. + (Generator mode only.) + """ + self.increment_request_epoch() + if self._stream_files: + if epochs_between_evals > 1: + raise ValueError("epochs_between_evals > 1 not supported for file " + "based dataset.") + epoch_data_dir = self._result_queue.get(timeout=300) + if not self._is_training: + self._result_queue.put(epoch_data_dir) # Eval data is reused. + + file_pattern = os.path.join( + epoch_data_dir, rconst.SHARD_TEMPLATE.format("*")) + dataset = StreamingFilesDataset( + files=file_pattern, worker_job=popen_helper.worker_job(), + num_parallel_reads=rconst.NUM_FILE_SHARDS, num_epochs=1, + sloppy=not self._deterministic) + map_fn = functools.partial( + self.deserialize, + batch_size=batch_size, + is_training=self._is_training) + dataset = dataset.map(map_fn, num_parallel_calls=16) + + else: + types = {movielens.USER_COLUMN: rconst.USER_DTYPE, + movielens.ITEM_COLUMN: rconst.ITEM_DTYPE} + shapes = { + movielens.USER_COLUMN: tf.TensorShape([batch_size, 1]), + movielens.ITEM_COLUMN: tf.TensorShape([batch_size, 1]) + } + + if self._is_training: + types[rconst.VALID_POINT_MASK] = np.bool + shapes[rconst.VALID_POINT_MASK] = tf.TensorShape([batch_size, 1]) + + types = (types, np.bool) + shapes = (shapes, tf.TensorShape([batch_size, 1])) + + else: + types[rconst.DUPLICATE_MASK] = np.bool + shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size, 1]) + + data_generator = functools.partial( + self.data_generator, epochs_between_evals=epochs_between_evals) + dataset = tf.data.Dataset.from_generator( + generator=data_generator, output_types=types, + output_shapes=shapes) + + return dataset.prefetch(16) + + def make_input_fn(self, batch_size): + """Create an input_fn which checks for batch size consistency.""" + + def input_fn(params): + """Returns batches for training.""" + + # Estimator passes batch_size during training and eval_batch_size during + # eval. + param_batch_size = (params["batch_size"] if self._is_training else + params.get("eval_batch_size") or params["batch_size"]) + if batch_size != param_batch_size: + raise ValueError("producer batch size ({}) differs from params batch " + "size ({})".format(batch_size, param_batch_size)) + + epochs_between_evals = (params.get("epochs_between_evals", 1) + if self._is_training else 1) + return self.get_dataset(batch_size=batch_size, + epochs_between_evals=epochs_between_evals) + + return input_fn + + +class BaseDataConstructor(threading.Thread): + """Data constructor base class. + + This class manages the control flow for constructing data. It is not meant + to be used directly, but instead subclasses should implement the following + two methods: + + self.construct_lookup_variables + self.lookup_negative_items + + """ + + def __init__( + self, + maximum_number_epochs, # type: int + num_users, # type: int + num_items, # type: int + user_map, # type: dict + item_map, # type: dict + train_pos_users, # type: np.ndarray + train_pos_items, # type: np.ndarray + train_batch_size, # type: int + batches_per_train_step, # type: int + num_train_negatives, # type: int + eval_pos_users, # type: np.ndarray + eval_pos_items, # type: np.ndarray + eval_batch_size, # type: int + batches_per_eval_step, # type: int + stream_files, # type: bool + deterministic=False, # type: bool + epoch_dir=None, # type: str + num_train_epochs=None, # type: int + create_data_offline=False # type: bool + ): + # General constants + self._maximum_number_epochs = maximum_number_epochs + self._num_users = num_users + self._num_items = num_items + self.user_map = user_map + self.item_map = item_map + self._train_pos_users = train_pos_users + self._train_pos_items = train_pos_items + self.train_batch_size = train_batch_size + self._num_train_negatives = num_train_negatives + self._batches_per_train_step = batches_per_train_step + self._eval_pos_users = eval_pos_users + self._eval_pos_items = eval_pos_items + self.eval_batch_size = eval_batch_size + self.num_train_epochs = num_train_epochs + self.create_data_offline = create_data_offline + + # Training + if self._train_pos_users.shape != self._train_pos_items.shape: + raise ValueError( + "User positives ({}) is different from item positives ({})".format( + self._train_pos_users.shape, self._train_pos_items.shape)) + + (self._train_pos_count,) = self._train_pos_users.shape + self._elements_in_epoch = (1 + num_train_negatives) * self._train_pos_count + self.train_batches_per_epoch = self._count_batches( + self._elements_in_epoch, train_batch_size, batches_per_train_step) + + # Evaluation + if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES): + raise ValueError("Eval batch size {} is not divisible by {}".format( + eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES)) + self._eval_users_per_batch = int( + eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES)) + self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES) + self.eval_batches_per_epoch = self._count_batches( + self._eval_elements_in_epoch, eval_batch_size, batches_per_eval_step) + + # Intermediate artifacts + self._current_epoch_order = np.empty(shape=(0,)) + self._shuffle_iterator = None + + self._shuffle_with_forkpool = not stream_files + if stream_files: + self._shard_root = epoch_dir or tempfile.mkdtemp(prefix="ncf_") + if not create_data_offline: + atexit.register(tf.io.gfile.rmtree, self._shard_root) + else: + self._shard_root = None + + self._train_dataset = DatasetManager(True, stream_files, + self.train_batches_per_epoch, + self._shard_root, deterministic, + num_train_epochs) + self._eval_dataset = DatasetManager(False, stream_files, + self.eval_batches_per_epoch, + self._shard_root, deterministic, + num_train_epochs) + + # Threading details + super(BaseDataConstructor, self).__init__() + self.daemon = True + self._stop_loop = False + self._fatal_exception = None + self.deterministic = deterministic + + def __str__(self): + multiplier = ("(x{} devices)".format(self._batches_per_train_step) + if self._batches_per_train_step > 1 else "") + summary = SUMMARY_TEMPLATE.format( + spacer=" ", num_users=self._num_users, num_items=self._num_items, + train_pos_ct=self._train_pos_count, + train_batch_size=self.train_batch_size, + train_batch_ct=self.train_batches_per_epoch, + eval_pos_ct=self._num_users, eval_batch_size=self.eval_batch_size, + eval_batch_ct=self.eval_batches_per_epoch, multiplier=multiplier) + return super(BaseDataConstructor, self).__str__() + "\n" + summary + + @staticmethod + def _count_batches(example_count, batch_size, batches_per_step): + """Determine the number of batches, rounding up to fill all devices.""" + x = (example_count + batch_size - 1) // batch_size + return (x + batches_per_step - 1) // batches_per_step * batches_per_step + + def stop_loop(self): + self._stop_loop = True + + def construct_lookup_variables(self): + """Perform any one time pre-compute work.""" + raise NotImplementedError + + def lookup_negative_items(self, **kwargs): + """Randomly sample negative items for given users.""" + raise NotImplementedError + + def _run(self): + atexit.register(self.stop_loop) + self._start_shuffle_iterator() + self.construct_lookup_variables() + self._construct_training_epoch() + self._construct_eval_epoch() + for _ in range(self._maximum_number_epochs - 1): + self._construct_training_epoch() + self.stop_loop() + + def run(self): + try: + self._run() + except Exception as e: + # The Thread base class swallows stack traces, so unfortunately it is + # necessary to catch and re-raise to get debug output + traceback.print_exc() + self._fatal_exception = e + sys.stderr.flush() + raise + + def _start_shuffle_iterator(self): + if self._shuffle_with_forkpool: + pool = popen_helper.get_forkpool(3, closing=False) + else: + pool = popen_helper.get_threadpool(1, closing=False) + atexit.register(pool.close) + args = [(self._elements_in_epoch, stat_utils.random_int32()) + for _ in range(self._maximum_number_epochs)] + imap = pool.imap if self.deterministic else pool.imap_unordered + self._shuffle_iterator = imap(stat_utils.permutation, args) + + def _get_training_batch(self, i): + """Construct a single batch of training data. + + Args: + i: The index of the batch. This is used when stream_files=True to assign + data to file shards. + """ + batch_indices = self._current_epoch_order[i * self.train_batch_size: + (i + 1) * self.train_batch_size] + (mask_start_index,) = batch_indices.shape + + batch_ind_mod = np.mod(batch_indices, self._train_pos_count) + users = self._train_pos_users[batch_ind_mod] + + negative_indices = np.greater_equal(batch_indices, self._train_pos_count) + negative_users = users[negative_indices] + + negative_items = self.lookup_negative_items(negative_users=negative_users) + + items = self._train_pos_items[batch_ind_mod] + items[negative_indices] = negative_items + + labels = np.logical_not(negative_indices) + + # Pad last partial batch + pad_length = self.train_batch_size - mask_start_index + if pad_length: + # We pad with arange rather than zeros because the network will still + # compute logits for padded examples, and padding with zeros would create + # a very "hot" embedding key which can have performance implications. + user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users + item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items + label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype) + users = np.concatenate([users, user_pad]) + items = np.concatenate([items, item_pad]) + labels = np.concatenate([labels, label_pad]) + + self._train_dataset.put( + i, { + movielens.USER_COLUMN: + np.reshape(users, (self.train_batch_size, 1)), + movielens.ITEM_COLUMN: + np.reshape(items, (self.train_batch_size, 1)), + rconst.MASK_START_INDEX: + np.array(mask_start_index, dtype=np.int32), + "labels": + np.reshape(labels, (self.train_batch_size, 1)), + }) + + def _wait_to_construct_train_epoch(self): + count = 0 + while self._train_dataset.buffer_reached() and not self._stop_loop: + time.sleep(0.01) + count += 1 + if count >= 100 and np.log10(count) == np.round(np.log10(count)): + logging.info( + "Waited {} times for training data to be consumed".format(count)) + + def _construct_training_epoch(self): + """Loop to construct a batch of training data.""" + if not self.create_data_offline: + self._wait_to_construct_train_epoch() + + start_time = timeit.default_timer() + if self._stop_loop: + return + + self._train_dataset.start_construction() + map_args = list(range(self.train_batches_per_epoch)) + self._current_epoch_order = next(self._shuffle_iterator) + + get_pool = (popen_helper.get_fauxpool if self.deterministic else + popen_helper.get_threadpool) + with get_pool(6) as pool: + pool.map(self._get_training_batch, map_args) + self._train_dataset.end_construction() + + logging.info("Epoch construction complete. Time: {:.1f} seconds".format( + timeit.default_timer() - start_time)) + + @staticmethod + def _assemble_eval_batch(users, positive_items, negative_items, + users_per_batch): + """Construct duplicate_mask and structure data accordingly. + + The positive items should be last so that they lose ties. However, they + should not be masked out if the true eval positive happens to be + selected as a negative. So instead, the positive is placed in the first + position, and then switched with the last element after the duplicate + mask has been computed. + + Args: + users: An array of users in a batch. (should be identical along axis 1) + positive_items: An array (batch_size x 1) of positive item indices. + negative_items: An array of negative item indices. + users_per_batch: How many users should be in the batch. This is passed + as an argument so that ncf_test.py can use this method. + + Returns: + User, item, and duplicate_mask arrays. + """ + items = np.concatenate([positive_items, negative_items], axis=1) + + # We pad the users and items here so that the duplicate mask calculation + # will include padding. The metric function relies on all padded elements + # except the positive being marked as duplicate to mask out padded points. + if users.shape[0] < users_per_batch: + pad_rows = users_per_batch - users.shape[0] + padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32) + users = np.concatenate([users, padding.astype(users.dtype)], axis=0) + items = np.concatenate([items, padding.astype(items.dtype)], axis=0) + + duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.bool) + + items[:, (0, -1)] = items[:, (-1, 0)] + duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)] + + assert users.shape == items.shape == duplicate_mask.shape + return users, items, duplicate_mask + + def _get_eval_batch(self, i): + """Construct a single batch of evaluation data. + + Args: + i: The index of the batch. + """ + low_index = i * self._eval_users_per_batch + high_index = (i + 1) * self._eval_users_per_batch + users = np.repeat(self._eval_pos_users[low_index:high_index, np.newaxis], + 1 + rconst.NUM_EVAL_NEGATIVES, axis=1) + positive_items = self._eval_pos_items[low_index:high_index, np.newaxis] + negative_items = (self.lookup_negative_items(negative_users=users[:, :-1]) + .reshape(-1, rconst.NUM_EVAL_NEGATIVES)) + + users, items, duplicate_mask = self._assemble_eval_batch( + users, positive_items, negative_items, self._eval_users_per_batch) + + self._eval_dataset.put( + i, { + movielens.USER_COLUMN: + np.reshape(users.flatten(), (self.eval_batch_size, 1)), + movielens.ITEM_COLUMN: + np.reshape(items.flatten(), (self.eval_batch_size, 1)), + rconst.DUPLICATE_MASK: + np.reshape(duplicate_mask.flatten(), (self.eval_batch_size, 1)), + }) + + def _construct_eval_epoch(self): + """Loop to construct data for evaluation.""" + if self._stop_loop: + return + + start_time = timeit.default_timer() + + self._eval_dataset.start_construction() + map_args = [i for i in range(self.eval_batches_per_epoch)] + + get_pool = (popen_helper.get_fauxpool if self.deterministic else + popen_helper.get_threadpool) + with get_pool(6) as pool: + pool.map(self._get_eval_batch, map_args) + self._eval_dataset.end_construction() + + logging.info("Eval construction complete. Time: {:.1f} seconds".format( + timeit.default_timer() - start_time)) + + def make_input_fn(self, is_training): + # It isn't feasible to provide a foolproof check, so this is designed to + # catch most failures rather than provide an exhaustive guard. + if self._fatal_exception is not None: + raise ValueError("Fatal exception in the data production loop: {}" + .format(self._fatal_exception)) + + return ( + self._train_dataset.make_input_fn(self.train_batch_size) if is_training + else self._eval_dataset.make_input_fn(self.eval_batch_size)) + + def increment_request_epoch(self): + self._train_dataset.increment_request_epoch() + + +class DummyConstructor(threading.Thread): + """Class for running with synthetic data.""" + + def __init__(self, *args, **kwargs): + super(DummyConstructor, self).__init__(*args, **kwargs) + self.train_batches_per_epoch = rconst.SYNTHETIC_BATCHES_PER_EPOCH + self.eval_batches_per_epoch = rconst.SYNTHETIC_BATCHES_PER_EPOCH + + def run(self): + pass + + def stop_loop(self): + pass + + def increment_request_epoch(self): + pass + + @staticmethod + def make_input_fn(is_training): + """Construct training input_fn that uses synthetic data.""" + + def input_fn(params): + """Returns dummy input batches for training.""" + + # Estimator passes batch_size during training and eval_batch_size during + # eval. + batch_size = (params["batch_size"] if is_training else + params.get("eval_batch_size") or params["batch_size"]) + num_users = params["num_users"] + num_items = params["num_items"] + + users = tf.random.uniform([batch_size, 1], + dtype=tf.int32, + minval=0, + maxval=num_users) + items = tf.random.uniform([batch_size, 1], + dtype=tf.int32, + minval=0, + maxval=num_items) + + if is_training: + valid_point_mask = tf.cast( + tf.random.uniform([batch_size, 1], + dtype=tf.int32, + minval=0, + maxval=2), tf.bool) + labels = tf.cast( + tf.random.uniform([batch_size, 1], + dtype=tf.int32, + minval=0, + maxval=2), tf.bool) + data = { + movielens.USER_COLUMN: users, + movielens.ITEM_COLUMN: items, + rconst.VALID_POINT_MASK: valid_point_mask, + }, labels + else: + dupe_mask = tf.cast( + tf.random.uniform([batch_size, 1], + dtype=tf.int32, + minval=0, + maxval=2), tf.bool) + data = { + movielens.USER_COLUMN: users, + movielens.ITEM_COLUMN: items, + rconst.DUPLICATE_MASK: dupe_mask, + } + + dataset = tf.data.Dataset.from_tensors(data).repeat( + rconst.SYNTHETIC_BATCHES_PER_EPOCH * params["batches_per_step"]) + dataset = dataset.prefetch(32) + return dataset + + return input_fn + + +class MaterializedDataConstructor(BaseDataConstructor): + """Materialize a table of negative examples for fast negative generation. + + This class creates a table (num_users x num_items) containing all of the + negative examples for each user. This table is conceptually ragged; that is to + say the items dimension will have a number of unused elements at the end equal + to the number of positive elements for a given user. For instance: + + num_users = 3 + num_items = 5 + positives = [[1, 3], [0], [1, 2, 3, 4]] + + will generate a negative table: + [ + [0 2 4 int32max int32max], + [1 2 3 4 int32max], + [0 int32max int32max int32max int32max], + ] + + and a vector of per-user negative counts, which in this case would be: + [3, 4, 1] + + When sampling negatives, integers are (nearly) uniformly selected from the + range [0, per_user_neg_count[user]) which gives a column_index, at which + point the negative can be selected as: + negative_table[user, column_index] + + This technique will not scale; however MovieLens is small enough that even + a pre-compute which is quadratic in problem size will still fit in memory. A + more scalable lookup method is in the works. + """ + def __init__(self, *args, **kwargs): + super(MaterializedDataConstructor, self).__init__(*args, **kwargs) + self._negative_table = None + self._per_user_neg_count = None + + def construct_lookup_variables(self): + # Materialize negatives for fast lookup sampling. + start_time = timeit.default_timer() + inner_bounds = np.argwhere(self._train_pos_users[1:] - + self._train_pos_users[:-1])[:, 0] + 1 + (upper_bound,) = self._train_pos_users.shape + index_bounds = [0] + inner_bounds.tolist() + [upper_bound] + self._negative_table = np.zeros(shape=(self._num_users, self._num_items), + dtype=rconst.ITEM_DTYPE) + + # Set the table to the max value to make sure the embedding lookup will fail + # if we go out of bounds, rather than just overloading item zero. + self._negative_table += np.iinfo(rconst.ITEM_DTYPE).max + assert self._num_items < np.iinfo(rconst.ITEM_DTYPE).max + + # Reuse arange during generation. np.delete will make a copy. + full_set = np.arange(self._num_items, dtype=rconst.ITEM_DTYPE) + + self._per_user_neg_count = np.zeros( + shape=(self._num_users,), dtype=np.int32) + + # Threading does not improve this loop. For some reason, the np.delete + # call does not parallelize well. Multiprocessing incurs too much + # serialization overhead to be worthwhile. + for i in range(self._num_users): + positives = self._train_pos_items[index_bounds[i]:index_bounds[i+1]] + negatives = np.delete(full_set, positives) + self._per_user_neg_count[i] = self._num_items - positives.shape[0] + self._negative_table[i, :self._per_user_neg_count[i]] = negatives + + logging.info("Negative sample table built. Time: {:.1f} seconds".format( + timeit.default_timer() - start_time)) + + def lookup_negative_items(self, negative_users, **kwargs): + negative_item_choice = stat_utils.very_slightly_biased_randint( + self._per_user_neg_count[negative_users]) + return self._negative_table[negative_users, negative_item_choice] + + +class BisectionDataConstructor(BaseDataConstructor): + """Use bisection to index within positive examples. + + This class tallies the number of negative items which appear before each + positive item for a user. This means that in order to select the ith negative + item for a user, it only needs to determine which two positive items bound + it at which point the item id for the ith negative is a simply algebraic + expression. + """ + def __init__(self, *args, **kwargs): + super(BisectionDataConstructor, self).__init__(*args, **kwargs) + self.index_bounds = None + self._sorted_train_pos_items = None + self._total_negatives = None + + def _index_segment(self, user): + lower, upper = self.index_bounds[user:user+2] + items = self._sorted_train_pos_items[lower:upper] + + negatives_since_last_positive = np.concatenate( + [items[0][np.newaxis], items[1:] - items[:-1] - 1]) + + return np.cumsum(negatives_since_last_positive) + + def construct_lookup_variables(self): + start_time = timeit.default_timer() + inner_bounds = np.argwhere(self._train_pos_users[1:] - + self._train_pos_users[:-1])[:, 0] + 1 + (upper_bound,) = self._train_pos_users.shape + self.index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound]) + + # Later logic will assume that the users are in sequential ascending order. + assert np.array_equal(self._train_pos_users[self.index_bounds[:-1]], + np.arange(self._num_users)) + + self._sorted_train_pos_items = self._train_pos_items.copy() + + for i in range(self._num_users): + lower, upper = self.index_bounds[i:i+2] + self._sorted_train_pos_items[lower:upper].sort() + + self._total_negatives = np.concatenate([ + self._index_segment(i) for i in range(self._num_users)]) + + logging.info("Negative total vector built. Time: {:.1f} seconds".format( + timeit.default_timer() - start_time)) + + def lookup_negative_items(self, negative_users, **kwargs): + output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1 + + left_index = self.index_bounds[negative_users] + right_index = self.index_bounds[negative_users + 1] - 1 + + num_positives = right_index - left_index + 1 + num_negatives = self._num_items - num_positives + neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives) + + # Shortcuts: + # For points where the negative is greater than or equal to the tally before + # the last positive point there is no need to bisect. Instead the item id + # corresponding to the negative item choice is simply: + # last_postive_index + 1 + (neg_choice - last_negative_tally) + # Similarly, if the selection is less than the tally at the first positive + # then the item_id is simply the selection. + # + # Because MovieLens organizes popular movies into low integers (which is + # preserved through the preprocessing), the first shortcut is very + # efficient, allowing ~60% of samples to bypass the bisection. For the same + # reason, the second shortcut is rarely triggered (<0.02%) and is therefore + # not worth implementing. + use_shortcut = neg_item_choice >= self._total_negatives[right_index] + output[use_shortcut] = ( + self._sorted_train_pos_items[right_index] + 1 + + (neg_item_choice - self._total_negatives[right_index]) + )[use_shortcut] + + if np.all(use_shortcut): + # The bisection code is ill-posed when there are no elements. + return output + + not_use_shortcut = np.logical_not(use_shortcut) + left_index = left_index[not_use_shortcut] + right_index = right_index[not_use_shortcut] + neg_item_choice = neg_item_choice[not_use_shortcut] + + num_loops = np.max( + np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32)) + + for i in range(num_loops): + mid_index = (left_index + right_index) // 2 + right_criteria = self._total_negatives[mid_index] > neg_item_choice + left_criteria = np.logical_not(right_criteria) + + right_index[right_criteria] = mid_index[right_criteria] + left_index[left_criteria] = mid_index[left_criteria] + + # Expected state after bisection pass: + # The right index is the smallest index whose tally is greater than the + # negative item choice index. + + assert np.all((right_index - left_index) <= 1) + + output[not_use_shortcut] = ( + self._sorted_train_pos_items[right_index] - + (self._total_negatives[right_index] - neg_item_choice) + ) + + assert np.all(output >= 0) + + return output + + +def get_constructor(name): + if name == "bisection": + return BisectionDataConstructor + if name == "materialized": + return MaterializedDataConstructor + raise ValueError("Unrecognized constructor: {}".format(name)) diff --git a/models/official/recommendation/data_preprocessing.py b/models/official/recommendation/data_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..3d7a3f856a7d8de45ff00ff3a0e1a6e6eacadd3a --- /dev/null +++ b/models/official/recommendation/data_preprocessing.py @@ -0,0 +1,265 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preprocess dataset and construct any necessary artifacts.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os +import pickle +import time +import timeit + +# pylint: disable=wrong-import-order +from absl import logging +import numpy as np +import pandas as pd +import tensorflow as tf +import typing +from typing import Dict, Text, Tuple +# pylint: enable=wrong-import-order + +from official.recommendation import constants as rconst +from official.recommendation import data_pipeline +from official.recommendation import movielens + + +_EXPECTED_CACHE_KEYS = ( + rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY, + rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP) + + +def read_dataframe( + raw_rating_path: Text +) -> Tuple[Dict[int, int], Dict[int, int], pd.DataFrame]: + """Read in data CSV, and output DataFrame for downstream processing. + + This function reads in the raw CSV of positive items, and performs three + preprocessing transformations: + + 1) Filter out all users who have not rated at least a certain number + of items. (Typically 20 items) + + 2) Zero index the users and items such that the largest user_id is + `num_users - 1` and the largest item_id is `num_items - 1` + + 3) Sort the dataframe by user_id, with timestamp as a secondary sort key. + This allows the dataframe to be sliced by user in-place, and for the last + item to be selected simply by calling the `-1` index of a user's slice. + + Args: + raw_rating_path: The path to the CSV which contains the raw dataset. + + Returns: + A dict mapping raw user IDs to regularized user IDs, a dict mapping raw + item IDs to regularized item IDs, and a filtered, zero-index remapped, + sorted dataframe. + """ + with tf.io.gfile.GFile(raw_rating_path) as f: + df = pd.read_csv(f) + + # Get the info of users who have more than 20 ratings on items + grouped = df.groupby(movielens.USER_COLUMN) + df = grouped.filter( + lambda x: len(x) >= rconst.MIN_NUM_RATINGS) # type: pd.DataFrame + + original_users = df[movielens.USER_COLUMN].unique() + original_items = df[movielens.ITEM_COLUMN].unique() + + # Map the ids of user and item to 0 based index for following processing + logging.info("Generating user_map and item_map...") + user_map = {user: index for index, user in enumerate(original_users)} + item_map = {item: index for index, item in enumerate(original_items)} + + df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply( + lambda user: user_map[user]) + df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply( + lambda item: item_map[item]) + + num_users = len(original_users) + num_items = len(original_items) + + assert num_users <= np.iinfo(rconst.USER_DTYPE).max + assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max + assert df[movielens.USER_COLUMN].max() == num_users - 1 + assert df[movielens.ITEM_COLUMN].max() == num_items - 1 + + # This sort is used to shard the dataframe by user, and later to select + # the last item for a user to be used in validation. + logging.info("Sorting by user, timestamp...") + + # This sort is equivalent to + # df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN], + # inplace=True) + # except that the order of items with the same user and timestamp are + # sometimes different. For some reason, this sort results in a better + # hit-rate during evaluation, matching the performance of the MLPerf + # reference implementation. + df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True) + df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN], + inplace=True, + kind="mergesort") + + # The dataframe does not reconstruct indices in the sort or filter steps. + return user_map, item_map, df.reset_index() + + +def _filter_index_sort(raw_rating_path: Text, + cache_path: Text) -> Tuple[pd.DataFrame, bool]: + """Read in data CSV, and output structured data. + + This function reads in the raw CSV of positive items, and performs three + preprocessing transformations: + + 1) Filter out all users who have not rated at least a certain number + of items. (Typically 20 items) + + 2) Zero index the users and items such that the largest user_id is + `num_users - 1` and the largest item_id is `num_items - 1` + + 3) Sort the dataframe by user_id, with timestamp as a secondary sort key. + This allows the dataframe to be sliced by user in-place, and for the last + item to be selected simply by calling the `-1` index of a user's slice. + + While all of these transformations are performed by Pandas (and are therefore + single-threaded), they only take ~2 minutes, and the overhead to apply a + MapReduce pattern to parallel process the dataset adds significant complexity + for no computational gain. For a larger dataset parallelizing this + preprocessing could yield speedups. (Also, this preprocessing step is only + performed once for an entire run. + + Args: + raw_rating_path: The path to the CSV which contains the raw dataset. + cache_path: The path to the file where results of this function are saved. + + Returns: + A filtered, zero-index remapped, sorted dataframe, a dict mapping raw user + IDs to regularized user IDs, and a dict mapping raw item IDs to regularized + item IDs. + """ + valid_cache = tf.io.gfile.exists(cache_path) + if valid_cache: + with tf.io.gfile.GFile(cache_path, "rb") as f: + cached_data = pickle.load(f) + + # (nnigania)disabled this check as the dataset is not expected to change + # cache_age = time.time() - cached_data.get("create_time", 0) + # if cache_age > rconst.CACHE_INVALIDATION_SEC: + # valid_cache = False + + for key in _EXPECTED_CACHE_KEYS: + if key not in cached_data: + valid_cache = False + + if not valid_cache: + logging.info("Removing stale raw data cache file.") + tf.io.gfile.remove(cache_path) + + if valid_cache: + data = cached_data + else: + user_map, item_map, df = read_dataframe(raw_rating_path) + + grouped = df.groupby(movielens.USER_COLUMN, group_keys=False) + eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1]) + + data = { + rconst.TRAIN_USER_KEY: train_df[movielens.USER_COLUMN] + .values.astype(rconst.USER_DTYPE), + rconst.TRAIN_ITEM_KEY: train_df[movielens.ITEM_COLUMN] + .values.astype(rconst.ITEM_DTYPE), + rconst.EVAL_USER_KEY: eval_df[movielens.USER_COLUMN] + .values.astype(rconst.USER_DTYPE), + rconst.EVAL_ITEM_KEY: eval_df[movielens.ITEM_COLUMN] + .values.astype(rconst.ITEM_DTYPE), + rconst.USER_MAP: user_map, + rconst.ITEM_MAP: item_map, + "create_time": time.time(), + } + + logging.info("Writing raw data cache.") + with tf.io.gfile.GFile(cache_path, "wb") as f: + pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) + + # TODO(robieta): MLPerf cache clear. + return data, valid_cache + + +def instantiate_pipeline(dataset, + data_dir, + params, + constructor_type=None, + deterministic=False, + epoch_dir=None, + generate_data_offline=False): + # type: (str, str, dict, typing.Optional[str], bool, typing.Optional[str], bool) -> (int, int, data_pipeline.BaseDataConstructor) + """Load and digest data CSV into a usable form. + + Args: + dataset: The name of the dataset to be used. + data_dir: The root directory of the dataset. + params: dict of parameters for the run. + constructor_type: The name of the constructor subclass that should be used + for the input pipeline. + deterministic: Tell the data constructor to produce deterministically. + epoch_dir: Directory in which to store the training epochs. + generate_data_offline: Boolean, whether current pipeline is done offline + or while training. + """ + logging.info("Beginning data preprocessing.") + + st = timeit.default_timer() + raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE) + cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE) + + raw_data, _ = _filter_index_sort(raw_rating_path, cache_path) + user_map, item_map = raw_data["user_map"], raw_data["item_map"] + num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[dataset] + + if num_users != len(user_map): + raise ValueError("Expected to find {} users, but found {}".format( + num_users, len(user_map))) + if num_items != len(item_map): + raise ValueError("Expected to find {} items, but found {}".format( + num_items, len(item_map))) + + producer = data_pipeline.get_constructor(constructor_type or "materialized")( + maximum_number_epochs=params["train_epochs"], + num_users=num_users, + num_items=num_items, + user_map=user_map, + item_map=item_map, + train_pos_users=raw_data[rconst.TRAIN_USER_KEY], + train_pos_items=raw_data[rconst.TRAIN_ITEM_KEY], + train_batch_size=params["batch_size"], + batches_per_train_step=params["batches_per_step"], + num_train_negatives=params["num_neg"], + eval_pos_users=raw_data[rconst.EVAL_USER_KEY], + eval_pos_items=raw_data[rconst.EVAL_ITEM_KEY], + eval_batch_size=params["eval_batch_size"], + batches_per_eval_step=params["batches_per_step"], + stream_files=params["stream_files"], + deterministic=deterministic, + epoch_dir=epoch_dir, + create_data_offline=generate_data_offline) + + run_time = timeit.default_timer() - st + logging.info("Data preprocessing complete. Time: {:.1f} sec." + .format(run_time)) + + print(producer) + return num_users, num_items, producer diff --git a/models/official/recommendation/data_test.py b/models/official/recommendation/data_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9541ee3f8bb4c65fb1f69070fa3876ee51b6c191 --- /dev/null +++ b/models/official/recommendation/data_test.py @@ -0,0 +1,355 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test NCF data pipeline.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import defaultdict +import hashlib +import os + +import mock +import numpy as np +import scipy.stats +import tensorflow as tf + +from official.recommendation import constants as rconst +from official.recommendation import data_preprocessing +from official.recommendation import movielens +from official.recommendation import popen_helper + + +DATASET = "ml-test" +NUM_USERS = 1000 +NUM_ITEMS = 2000 +NUM_PTS = 50000 +BATCH_SIZE = 2048 +EVAL_BATCH_SIZE = 4000 +NUM_NEG = 4 + + +END_TO_END_TRAIN_MD5 = "b218738e915e825d03939c5e305a2698" +END_TO_END_EVAL_MD5 = "d753d0f3186831466d6e218163a9501e" +FRESH_RANDOMNESS_MD5 = "63d0dff73c0e5f1048fbdc8c65021e22" + + +def mock_download(*args, **kwargs): + return + + +# The forkpool used by data producers interacts badly with the threading +# used by TestCase. Without this patch tests will hang, and no amount +# of diligent closing and joining within the producer will prevent it. +@mock.patch.object(popen_helper, "get_forkpool", popen_helper.get_fauxpool) +class BaseTest(tf.test.TestCase): + + def setUp(self): + tf.compat.v1.disable_eager_execution() + self.temp_data_dir = self.get_temp_dir() + ratings_folder = os.path.join(self.temp_data_dir, DATASET) + tf.io.gfile.makedirs(ratings_folder) + np.random.seed(0) + raw_user_ids = np.arange(NUM_USERS * 3) + np.random.shuffle(raw_user_ids) + raw_user_ids = raw_user_ids[:NUM_USERS] + + raw_item_ids = np.arange(NUM_ITEMS * 3) + np.random.shuffle(raw_item_ids) + raw_item_ids = raw_item_ids[:NUM_ITEMS] + + users = np.random.choice(raw_user_ids, NUM_PTS) + items = np.random.choice(raw_item_ids, NUM_PTS) + scores = np.random.randint(low=0, high=5, size=NUM_PTS) + times = np.random.randint(low=1000000000, high=1200000000, size=NUM_PTS) + + self.rating_file = os.path.join(ratings_folder, movielens.RATINGS_FILE) + self.seen_pairs = set() + self.holdout = {} + with tf.io.gfile.GFile(self.rating_file, "w") as f: + f.write("user_id,item_id,rating,timestamp\n") + for usr, itm, scr, ts in zip(users, items, scores, times): + pair = (usr, itm) + if pair in self.seen_pairs: + continue + self.seen_pairs.add(pair) + if usr not in self.holdout or (ts, itm) > self.holdout[usr]: + self.holdout[usr] = (ts, itm) + + f.write("{},{},{},{}\n".format(usr, itm, scr, ts)) + + movielens.download = mock_download + movielens.NUM_RATINGS[DATASET] = NUM_PTS + movielens.DATASET_TO_NUM_USERS_AND_ITEMS[DATASET] = (NUM_USERS, NUM_ITEMS) + + def make_params(self, train_epochs=1): + return { + "train_epochs": train_epochs, + "batches_per_step": 1, + "use_seed": False, + "batch_size": BATCH_SIZE, + "eval_batch_size": EVAL_BATCH_SIZE, + "num_neg": NUM_NEG, + "match_mlperf": True, + "use_tpu": False, + "use_xla_for_gpu": False, + "stream_files": False, + } + + def test_preprocessing(self): + # For the most part the necessary checks are performed within + # _filter_index_sort() + + cache_path = os.path.join(self.temp_data_dir, "test_cache.pickle") + data, valid_cache = data_preprocessing._filter_index_sort( + self.rating_file, cache_path=cache_path) + + assert len(data[rconst.USER_MAP]) == NUM_USERS + assert len(data[rconst.ITEM_MAP]) == NUM_ITEMS + + def drain_dataset(self, dataset, g): + # type: (tf.data.Dataset, tf.Graph) -> list + with self.session(graph=g) as sess: + with g.as_default(): + batch = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() + output = [] + while True: + try: + output.append(sess.run(batch)) + except tf.errors.OutOfRangeError: + break + return output + + def _test_end_to_end(self, constructor_type): + params = self.make_params(train_epochs=1) + _, _, producer = data_preprocessing.instantiate_pipeline( + dataset=DATASET, data_dir=self.temp_data_dir, params=params, + constructor_type=constructor_type, deterministic=True) + + producer.start() + producer.join() + assert producer._fatal_exception is None + + user_inv_map = {v: k for k, v in producer.user_map.items()} + item_inv_map = {v: k for k, v in producer.item_map.items()} + + # ========================================================================== + # == Training Data ========================================================= + # ========================================================================== + g = tf.Graph() + with g.as_default(): + input_fn = producer.make_input_fn(is_training=True) + dataset = input_fn(params) + + first_epoch = self.drain_dataset(dataset=dataset, g=g) + + counts = defaultdict(int) + train_examples = { + True: set(), + False: set(), + } + + md5 = hashlib.md5() + for features, labels in first_epoch: + data_list = [ + features[movielens.USER_COLUMN].flatten(), + features[movielens.ITEM_COLUMN].flatten(), + features[rconst.VALID_POINT_MASK].flatten(), + labels.flatten() + ] + for i in data_list: + md5.update(i.tobytes()) + + for u, i, v, l in zip(*data_list): + if not v: + continue # ignore padding + + u_raw = user_inv_map[u] + i_raw = item_inv_map[i] + if ((u_raw, i_raw) in self.seen_pairs) != l: + # The evaluation item is not considered during false negative + # generation, so it will occasionally appear as a negative example + # during training. + assert not l + self.assertEqual(i_raw, self.holdout[u_raw][1]) + train_examples[l].add((u_raw, i_raw)) + counts[(u_raw, i_raw)] += 1 + + self.assertRegexpMatches(md5.hexdigest(), END_TO_END_TRAIN_MD5) + + num_positives_seen = len(train_examples[True]) + self.assertEqual(producer._train_pos_users.shape[0], num_positives_seen) + + # This check is more heuristic because negatives are sampled with + # replacement. It only checks that negative generation is reasonably random. + self.assertGreater( + len(train_examples[False]) / NUM_NEG / num_positives_seen, 0.9) + + # This checks that the samples produced are independent by checking the + # number of duplicate entries. If workers are not properly independent there + # will be lots of repeated pairs. + self.assertLess(np.mean(list(counts.values())), 1.1) + + # ========================================================================== + # == Eval Data ============================================================= + # ========================================================================== + with g.as_default(): + input_fn = producer.make_input_fn(is_training=False) + dataset = input_fn(params) + + eval_data = self.drain_dataset(dataset=dataset, g=g) + + current_user = None + md5 = hashlib.md5() + for features in eval_data: + data_list = [ + features[movielens.USER_COLUMN].flatten(), + features[movielens.ITEM_COLUMN].flatten(), + features[rconst.DUPLICATE_MASK].flatten() + ] + for i in data_list: + md5.update(i.tobytes()) + + for idx, (u, i, d) in enumerate(zip(*data_list)): + u_raw = user_inv_map[u] + i_raw = item_inv_map[i] + if current_user is None: + current_user = u + + # Ensure that users appear in blocks, as the evaluation logic expects + # this structure. + self.assertEqual(u, current_user) + + # The structure of evaluation data is 999 negative examples followed + # by the holdout positive. + if not (idx + 1) % (rconst.NUM_EVAL_NEGATIVES + 1): + # Check that the last element in each chunk is the holdout item. + self.assertEqual(i_raw, self.holdout[u_raw][1]) + current_user = None + + elif i_raw == self.holdout[u_raw][1]: + # Because the holdout item is not given to the negative generation + # process, it can appear as a negative. In that case, it should be + # masked out as a duplicate. (Since the true positive is placed at + # the end and would therefore lose the tie.) + assert d + + else: + # Otherwise check that the other 999 points for a user are selected + # from the negatives. + assert (u_raw, i_raw) not in self.seen_pairs + + self.assertRegexpMatches(md5.hexdigest(), END_TO_END_EVAL_MD5) + + def _test_fresh_randomness(self, constructor_type): + train_epochs = 5 + params = self.make_params(train_epochs=train_epochs) + _, _, producer = data_preprocessing.instantiate_pipeline( + dataset=DATASET, data_dir=self.temp_data_dir, params=params, + constructor_type=constructor_type, deterministic=True) + + producer.start() + + results = [] + g = tf.Graph() + with g.as_default(): + for _ in range(train_epochs): + input_fn = producer.make_input_fn(is_training=True) + dataset = input_fn(params) + results.extend(self.drain_dataset(dataset=dataset, g=g)) + + producer.join() + assert producer._fatal_exception is None + + positive_counts, negative_counts = defaultdict(int), defaultdict(int) + md5 = hashlib.md5() + for features, labels in results: + data_list = [ + features[movielens.USER_COLUMN].flatten(), + features[movielens.ITEM_COLUMN].flatten(), + features[rconst.VALID_POINT_MASK].flatten(), + labels.flatten() + ] + for i in data_list: + md5.update(i.tobytes()) + + for u, i, v, l in zip(*data_list): + if not v: + continue # ignore padding + + if l: + positive_counts[(u, i)] += 1 + else: + negative_counts[(u, i)] += 1 + + self.assertRegexpMatches(md5.hexdigest(), FRESH_RANDOMNESS_MD5) + + # The positive examples should appear exactly once each epoch + self.assertAllEqual(list(positive_counts.values()), + [train_epochs for _ in positive_counts]) + + # The threshold for the negatives is heuristic, but in general repeats are + # expected, but should not appear too frequently. + + pair_cardinality = NUM_USERS * NUM_ITEMS + neg_pair_cardinality = pair_cardinality - len(self.seen_pairs) + + # Approximation for the expectation number of times that a particular + # negative will appear in a given epoch. Implicit in this calculation is the + # treatment of all negative pairs as equally likely. Normally is not + # necessarily reasonable; however the generation in self.setUp() will + # approximate this behavior sufficiently for heuristic testing. + e_sample = len(self.seen_pairs) * NUM_NEG / neg_pair_cardinality + + # The frequency of occurance of a given negative pair should follow an + # approximately binomial distribution in the limit that the cardinality of + # the negative pair set >> number of samples per epoch. + approx_pdf = scipy.stats.binom.pmf(k=np.arange(train_epochs+1), + n=train_epochs, p=e_sample) + + # Tally the actual observed counts. + count_distribution = [0 for _ in range(train_epochs + 1)] + for i in negative_counts.values(): + i = min([i, train_epochs]) # round down tail for simplicity. + count_distribution[i] += 1 + count_distribution[0] = neg_pair_cardinality - sum(count_distribution[1:]) + + # Check that the frequency of negative pairs is approximately binomial. + for i in range(train_epochs + 1): + if approx_pdf[i] < 0.05: + continue # Variance will be high at the tails. + + observed_fraction = count_distribution[i] / neg_pair_cardinality + deviation = (2 * abs(observed_fraction - approx_pdf[i]) / + (observed_fraction + approx_pdf[i])) + + self.assertLess(deviation, 0.2) + + def test_end_to_end_materialized(self): + self._test_end_to_end("materialized") + + def test_end_to_end_bisection(self): + self._test_end_to_end("bisection") + + def test_fresh_randomness_materialized(self): + self._test_fresh_randomness("materialized") + + def test_fresh_randomness_bisection(self): + self._test_fresh_randomness("bisection") + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/recommendation/movielens.py b/models/official/recommendation/movielens.py new file mode 100644 index 0000000000000000000000000000000000000000..576519a316bb3e05d786ac737da19cb44d2b61c4 --- /dev/null +++ b/models/official/recommendation/movielens.py @@ -0,0 +1,317 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Download and extract the MovieLens dataset from GroupLens website. + +Download the dataset, and perform basic preprocessing. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import tempfile +import zipfile + +# pylint: disable=g-bad-import-order +import numpy as np +import pandas as pd +import six +from six.moves import urllib # pylint: disable=redefined-builtin +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.utils.flags import core as flags_core + + +ML_1M = "ml-1m" +ML_20M = "ml-20m" +DATASETS = [ML_1M, ML_20M] + +RATINGS_FILE = "ratings.csv" +MOVIES_FILE = "movies.csv" + +# URL to download dataset +_DATA_URL = "http://files.grouplens.org/datasets/movielens/" + +GENRE_COLUMN = "genres" +ITEM_COLUMN = "item_id" # movies +RATING_COLUMN = "rating" +TIMESTAMP_COLUMN = "timestamp" +TITLE_COLUMN = "titles" +USER_COLUMN = "user_id" + +GENRES = [ + 'Action', 'Adventure', 'Animation', "Children", 'Comedy', 'Crime', + 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', "IMAX", 'Musical', + 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western' +] +N_GENRE = len(GENRES) + +RATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN] +MOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN] + +# Note: Users are indexed [1, k], not [0, k-1] +NUM_USER_IDS = { + ML_1M: 6040, + ML_20M: 138493, +} + +# Note: Movies are indexed [1, k], not [0, k-1] +# Both the 1m and 20m datasets use the same movie set. +NUM_ITEM_IDS = 3952 + +MAX_RATING = 5 + +NUM_RATINGS = { + ML_1M: 1000209, + ML_20M: 20000263 +} + +DATASET_TO_NUM_USERS_AND_ITEMS = {ML_1M: (6040, 3706), ML_20M: (138493, 26744)} + + +def _download_and_clean(dataset, data_dir): + """Download MovieLens dataset in a standard format. + + This function downloads the specified MovieLens format and coerces it into a + standard format. The only difference between the ml-1m and ml-20m datasets + after this point (other than size, of course) is that the 1m dataset uses + whole number ratings while the 20m dataset allows half integer ratings. + """ + if dataset not in DATASETS: + raise ValueError("dataset {} is not in {{{}}}".format( + dataset, ",".join(DATASETS))) + + data_subdir = os.path.join(data_dir, dataset) + + expected_files = ["{}.zip".format(dataset), RATINGS_FILE, MOVIES_FILE] + + tf.io.gfile.makedirs(data_subdir) + if set(expected_files).intersection( + tf.io.gfile.listdir(data_subdir)) == set(expected_files): + logging.info("Dataset {} has already been downloaded".format(dataset)) + return + + url = "{}{}.zip".format(_DATA_URL, dataset) + + temp_dir = tempfile.mkdtemp() + try: + zip_path = os.path.join(temp_dir, "{}.zip".format(dataset)) + zip_path, _ = urllib.request.urlretrieve(url, zip_path) + statinfo = os.stat(zip_path) + # A new line to clear the carriage return from download progress + # logging.info is not applicable here + print() + logging.info( + "Successfully downloaded {} {} bytes".format( + zip_path, statinfo.st_size)) + + zipfile.ZipFile(zip_path, "r").extractall(temp_dir) + + if dataset == ML_1M: + _regularize_1m_dataset(temp_dir) + else: + _regularize_20m_dataset(temp_dir) + + for fname in tf.io.gfile.listdir(temp_dir): + if not tf.io.gfile.exists(os.path.join(data_subdir, fname)): + tf.io.gfile.copy(os.path.join(temp_dir, fname), + os.path.join(data_subdir, fname)) + else: + logging.info("Skipping copy of {}, as it already exists in the " + "destination folder.".format(fname)) + + finally: + tf.io.gfile.rmtree(temp_dir) + + +def _transform_csv(input_path, output_path, names, skip_first, separator=","): + """Transform csv to a regularized format. + + Args: + input_path: The path of the raw csv. + output_path: The path of the cleaned csv. + names: The csv column names. + skip_first: Boolean of whether to skip the first line of the raw csv. + separator: Character used to separate fields in the raw csv. + """ + if six.PY2: + names = [six.ensure_text(n, "utf-8") for n in names] + + with tf.io.gfile.GFile(output_path, "wb") as f_out, \ + tf.io.gfile.GFile(input_path, "rb") as f_in: + + # Write column names to the csv. + f_out.write(",".join(names).encode("utf-8")) + f_out.write(b"\n") + for i, line in enumerate(f_in): + if i == 0 and skip_first: + continue # ignore existing labels in the csv + + line = six.ensure_text(line, "utf-8", errors="ignore") + fields = line.split(separator) + if separator != ",": + fields = ['"{}"'.format(field) if "," in field else field + for field in fields] + f_out.write(",".join(fields).encode("utf-8")) + + +def _regularize_1m_dataset(temp_dir): + """ + ratings.dat + The file has no header row, and each line is in the following format: + UserID::MovieID::Rating::Timestamp + - UserIDs range from 1 and 6040 + - MovieIDs range from 1 and 3952 + - Ratings are made on a 5-star scale (whole-star ratings only) + - Timestamp is represented in seconds since midnight Coordinated Universal + Time (UTC) of January 1, 1970. + - Each user has at least 20 ratings + + movies.dat + Each line has the following format: + MovieID::Title::Genres + - MovieIDs range from 1 and 3952 + """ + working_dir = os.path.join(temp_dir, ML_1M) + + _transform_csv( + input_path=os.path.join(working_dir, "ratings.dat"), + output_path=os.path.join(temp_dir, RATINGS_FILE), + names=RATING_COLUMNS, skip_first=False, separator="::") + + _transform_csv( + input_path=os.path.join(working_dir, "movies.dat"), + output_path=os.path.join(temp_dir, MOVIES_FILE), + names=MOVIE_COLUMNS, skip_first=False, separator="::") + + tf.io.gfile.rmtree(working_dir) + + +def _regularize_20m_dataset(temp_dir): + """ + ratings.csv + Each line of this file after the header row represents one rating of one + movie by one user, and has the following format: + userId,movieId,rating,timestamp + - The lines within this file are ordered first by userId, then, within user, + by movieId. + - Ratings are made on a 5-star scale, with half-star increments + (0.5 stars - 5.0 stars). + - Timestamps represent seconds since midnight Coordinated Universal Time + (UTC) of January 1, 1970. + - All the users had rated at least 20 movies. + + movies.csv + Each line has the following format: + MovieID,Title,Genres + - MovieIDs range from 1 and 3952 + """ + working_dir = os.path.join(temp_dir, ML_20M) + + _transform_csv( + input_path=os.path.join(working_dir, "ratings.csv"), + output_path=os.path.join(temp_dir, RATINGS_FILE), + names=RATING_COLUMNS, skip_first=True, separator=",") + + _transform_csv( + input_path=os.path.join(working_dir, "movies.csv"), + output_path=os.path.join(temp_dir, MOVIES_FILE), + names=MOVIE_COLUMNS, skip_first=True, separator=",") + + tf.io.gfile.rmtree(working_dir) + + +def download(dataset, data_dir): + if dataset: + _download_and_clean(dataset, data_dir) + else: + _ = [_download_and_clean(d, data_dir) for d in DATASETS] + + +def ratings_csv_to_dataframe(data_dir, dataset): + with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f: + return pd.read_csv(f, encoding="utf-8") + + +def csv_to_joint_dataframe(data_dir, dataset): + ratings = ratings_csv_to_dataframe(data_dir, dataset) + + with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f: + movies = pd.read_csv(f, encoding="utf-8") + + df = ratings.merge(movies, on=ITEM_COLUMN) + df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32) + + return df + + +def integerize_genres(dataframe): + """Replace genre string with a binary vector. + + Args: + dataframe: a pandas dataframe of movie data. + + Returns: + The transformed dataframe. + """ + def _map_fn(entry): + entry.replace("Children's", "Children") # naming difference. + movie_genres = entry.split("|") + output = np.zeros((len(GENRES),), dtype=np.int64) + for i, genre in enumerate(GENRES): + if genre in movie_genres: + output[i] = 1 + return output + + dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn) + + return dataframe + + +def define_flags(): + """Add flags specifying data usage arguments.""" + flags.DEFINE_enum( + name="dataset", + default=None, + enum_values=DATASETS, + case_sensitive=False, + help=flags_core.help_wrap("Dataset to be trained and evaluated.")) + + +def define_data_download_flags(): + """Add flags specifying data download and usage arguments.""" + flags.DEFINE_string( + name="data_dir", default="/tmp/movielens-data/", + help=flags_core.help_wrap( + "Directory to download and extract data.")) + + define_flags() + + +def main(_): + """Download and extract the data from GroupLens website.""" + download(flags.FLAGS.dataset, flags.FLAGS.data_dir) + + +if __name__ == "__main__": + define_data_download_flags() + FLAGS = flags.FLAGS + app.run(main) diff --git a/models/official/recommendation/ncf_common.py b/models/official/recommendation/ncf_common.py new file mode 100644 index 0000000000000000000000000000000000000000..8abc927bfa29c52d6c151023d281d7e4f6f52100 --- /dev/null +++ b/models/official/recommendation/ncf_common.py @@ -0,0 +1,327 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common functionalities used by both Keras and Estimator implementations. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os + +# pylint: disable=g-bad-import-order +import numpy as np +from absl import flags +from absl import logging +import tensorflow as tf +# pylint: enable=g-bad-import-order + +from official.recommendation import constants as rconst +from official.recommendation import data_pipeline +from official.recommendation import data_preprocessing +from official.recommendation import movielens +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils + +FLAGS = flags.FLAGS + + +def get_inputs(params): + """Returns some parameters used by the model.""" + if FLAGS.download_if_missing and not FLAGS.use_synthetic_data: + movielens.download(FLAGS.dataset, FLAGS.data_dir) + + if FLAGS.seed is not None: + np.random.seed(FLAGS.seed) + + if FLAGS.use_synthetic_data: + producer = data_pipeline.DummyConstructor() + num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[ + FLAGS.dataset] + num_train_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH + num_eval_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH + else: + num_users, num_items, producer = data_preprocessing.instantiate_pipeline( + dataset=FLAGS.dataset, data_dir=FLAGS.data_dir, params=params, + constructor_type=FLAGS.constructor_type, + deterministic=FLAGS.seed is not None) + num_train_steps = producer.train_batches_per_epoch + num_eval_steps = producer.eval_batches_per_epoch + + return num_users, num_items, num_train_steps, num_eval_steps, producer + + +def parse_flags(flags_obj): + """Convenience function to turn flags into params.""" + num_gpus = flags_core.get_num_gpus(flags_obj) + + batch_size = flags_obj.batch_size + eval_batch_size = flags_obj.eval_batch_size or flags_obj.batch_size + + return { + "train_epochs": flags_obj.train_epochs, + "batches_per_step": 1, + "use_seed": flags_obj.seed is not None, + "batch_size": batch_size, + "eval_batch_size": eval_batch_size, + "learning_rate": flags_obj.learning_rate, + "mf_dim": flags_obj.num_factors, + "model_layers": [int(layer) for layer in flags_obj.layers], + "mf_regularization": flags_obj.mf_regularization, + "mlp_reg_layers": [float(reg) for reg in flags_obj.mlp_regularization], + "num_neg": flags_obj.num_neg, + "distribution_strategy": flags_obj.distribution_strategy, + "num_gpus": num_gpus, + "use_tpu": flags_obj.tpu is not None, + "tpu": flags_obj.tpu, + "tpu_zone": flags_obj.tpu_zone, + "tpu_gcp_project": flags_obj.tpu_gcp_project, + "beta1": flags_obj.beta1, + "beta2": flags_obj.beta2, + "epsilon": flags_obj.epsilon, + "match_mlperf": flags_obj.ml_perf, + "epochs_between_evals": FLAGS.epochs_between_evals, + "keras_use_ctl": flags_obj.keras_use_ctl, + "hr_threshold": flags_obj.hr_threshold, + "stream_files": flags_obj.tpu is not None, + "train_dataset_path": flags_obj.train_dataset_path, + "eval_dataset_path": flags_obj.eval_dataset_path, + "input_meta_data_path": flags_obj.input_meta_data_path, + } + + +def get_v1_distribution_strategy(params): + """Returns the distribution strategy to use.""" + if params["use_tpu"]: + # Some of the networking libraries are quite chatty. + for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache", + "oauth2client.transport"]: + logging.getLogger(name).setLevel(logging.ERROR) + + tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( + tpu=params["tpu"], + zone=params["tpu_zone"], + project=params["tpu_gcp_project"], + coordinator_name="coordinator" + ) + + logging.info("Issuing reset command to TPU to ensure a clean state.") + tf.Session.reset(tpu_cluster_resolver.get_master()) + + # Estimator looks at the master it connects to for MonitoredTrainingSession + # by reading the `TF_CONFIG` environment variable, and the coordinator + # is used by StreamingFilesDataset. + tf_config_env = { + "session_master": tpu_cluster_resolver.get_master(), + "eval_session_master": tpu_cluster_resolver.get_master(), + "coordinator": tpu_cluster_resolver.cluster_spec() + .as_dict()["coordinator"] + } + os.environ["TF_CONFIG"] = json.dumps(tf_config_env) + + distribution = tf.distribute.experimental.TPUStrategy( + tpu_cluster_resolver, steps_per_run=100) + + else: + distribution = distribution_utils.get_distribution_strategy( + num_gpus=params["num_gpus"]) + + return distribution + + +def define_ncf_flags(): + """Add flags for running ncf_main.""" + # Add common flags + flags_core.define_base(model_dir=True, clean=True, train_epochs=True, + epochs_between_evals=True, export_dir=False, + run_eagerly=True, stop_threshold=True, num_gpu=True, + distribution_strategy=True) + flags_core.define_performance( + synthetic_data=True, + dtype=True, + fp16_implementation=True, + loss_scale=True, + dynamic_loss_scale=True, + enable_xla=True, + ) + flags_core.define_device(tpu=True) + flags_core.define_benchmark() + + flags.adopt_module_key_flags(flags_core) + + movielens.define_flags() + + flags_core.set_defaults( + model_dir="/tmp/ncf/", + data_dir="/tmp/movielens-data/", + dataset=movielens.ML_1M, + train_epochs=2, + batch_size=99000, + tpu=None + ) + + # Add ncf-specific flags + flags.DEFINE_boolean( + name="download_if_missing", default=True, help=flags_core.help_wrap( + "Download data to data_dir if it is not already present.")) + + flags.DEFINE_integer( + name="eval_batch_size", default=None, help=flags_core.help_wrap( + "The batch size used for evaluation. This should generally be larger" + "than the training batch size as the lack of back propagation during" + "evaluation can allow for larger batch sizes to fit in memory. If not" + "specified, the training batch size (--batch_size) will be used.")) + + flags.DEFINE_integer( + name="num_factors", default=8, + help=flags_core.help_wrap("The Embedding size of MF model.")) + + # Set the default as a list of strings to be consistent with input arguments + flags.DEFINE_list( + name="layers", default=["64", "32", "16", "8"], + help=flags_core.help_wrap( + "The sizes of hidden layers for MLP. Example " + "to specify different sizes of MLP layers: --layers=32,16,8,4")) + + flags.DEFINE_float( + name="mf_regularization", default=0., + help=flags_core.help_wrap( + "The regularization factor for MF embeddings. The factor is used by " + "regularizer which allows to apply penalties on layer parameters or " + "layer activity during optimization.")) + + flags.DEFINE_list( + name="mlp_regularization", default=["0.", "0.", "0.", "0."], + help=flags_core.help_wrap( + "The regularization factor for each MLP layer. See mf_regularization " + "help for more info about regularization factor.")) + + flags.DEFINE_integer( + name="num_neg", default=4, + help=flags_core.help_wrap( + "The Number of negative instances to pair with a positive instance.")) + + flags.DEFINE_float( + name="learning_rate", default=0.001, + help=flags_core.help_wrap("The learning rate.")) + + flags.DEFINE_float( + name="beta1", default=0.9, + help=flags_core.help_wrap("beta1 hyperparameter for the Adam optimizer.")) + + flags.DEFINE_float( + name="beta2", default=0.999, + help=flags_core.help_wrap("beta2 hyperparameter for the Adam optimizer.")) + + flags.DEFINE_float( + name="epsilon", default=1e-8, + help=flags_core.help_wrap("epsilon hyperparameter for the Adam " + "optimizer.")) + + flags.DEFINE_float( + name="hr_threshold", default=1.0, + help=flags_core.help_wrap( + "If passed, training will stop when the evaluation metric HR is " + "greater than or equal to hr_threshold. For dataset ml-1m, the " + "desired hr_threshold is 0.68 which is the result from the paper; " + "For dataset ml-20m, the threshold can be set as 0.95 which is " + "achieved by MLPerf implementation.")) + + flags.DEFINE_enum( + name="constructor_type", default="bisection", + enum_values=["bisection", "materialized"], case_sensitive=False, + help=flags_core.help_wrap( + "Strategy to use for generating false negatives. materialized has a" + "precompute that scales badly, but a faster per-epoch construction" + "time and can be faster on very large systems.")) + + flags.DEFINE_string( + name="train_dataset_path", + default=None, + help=flags_core.help_wrap("Path to training data.")) + + flags.DEFINE_string( + name="eval_dataset_path", + default=None, + help=flags_core.help_wrap("Path to evaluation data.")) + + flags.DEFINE_string( + name="input_meta_data_path", + default=None, + help=flags_core.help_wrap("Path to input meta data file.")) + + flags.DEFINE_bool( + name="ml_perf", default=False, + help=flags_core.help_wrap( + "If set, changes the behavior of the model slightly to match the " + "MLPerf reference implementations here: \n" + "https://github.com/mlperf/reference/tree/master/recommendation/" + "pytorch\n" + "The two changes are:\n" + "1. When computing the HR and NDCG during evaluation, remove " + "duplicate user-item pairs before the computation. This results in " + "better HRs and NDCGs.\n" + "2. Use a different soring algorithm when sorting the input data, " + "which performs better due to the fact the sorting algorithms are " + "not stable.")) + + flags.DEFINE_bool( + name="output_ml_perf_compliance_logging", default=False, + help=flags_core.help_wrap( + "If set, output the MLPerf compliance logging. This is only useful " + "if one is running the model for MLPerf. See " + "https://github.com/mlperf/policies/blob/master/training_rules.adoc" + "#submission-compliance-logs for details. This uses sudo and so may " + "ask for your password, as root access is needed to clear the system " + "caches, which is required for MLPerf compliance." + ) + ) + + flags.DEFINE_integer( + name="seed", default=None, help=flags_core.help_wrap( + "This value will be used to seed both NumPy and TensorFlow.")) + + @flags.validator("eval_batch_size", "eval_batch_size must be at least {}" + .format(rconst.NUM_EVAL_NEGATIVES + 1)) + def eval_size_check(eval_batch_size): + return (eval_batch_size is None or + int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES) + + flags.DEFINE_bool( + name="early_stopping", + default=False, + help=flags_core.help_wrap( + "If True, we stop the training when it reaches hr_threshold")) + + flags.DEFINE_bool( + name="keras_use_ctl", + default=False, + help=flags_core.help_wrap( + "If True, we use a custom training loop for keras.")) + + +def convert_to_softmax_logits(logits): + """Convert the logits returned by the base model to softmax logits. + + Args: + logits: used to create softmax. + + Returns: + Softmax with the first column of zeros is equivalent to sigmoid. + """ + softmax_logits = tf.concat([logits * 0, logits], axis=1) + return softmax_logits diff --git a/models/official/recommendation/ncf_input_pipeline.py b/models/official/recommendation/ncf_input_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..4dab86c43bfde14eb5adfc82e52b30b315060217 --- /dev/null +++ b/models/official/recommendation/ncf_input_pipeline.py @@ -0,0 +1,200 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""NCF model input pipeline.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +# pylint: disable=g-bad-import-order +import tensorflow.compat.v2 as tf +# pylint: enable=g-bad-import-order + +from official.recommendation import constants as rconst +from official.recommendation import movielens +from official.recommendation import data_pipeline + +NUM_SHARDS = 16 + + +def create_dataset_from_tf_record_files(input_file_pattern, + pre_batch_size, + batch_size, + is_training=True, + rebatch=False): + """Creates dataset from (tf)records files for training/evaluation.""" + + files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training) + + def make_dataset(files_dataset, shard_index): + """Returns dataset for sharded tf record files.""" + if pre_batch_size != batch_size: + raise ValueError("Pre-batch ({}) size is not equal to batch " + "size ({})".format(pre_batch_size, batch_size)) + files_dataset = files_dataset.shard(NUM_SHARDS, shard_index) + dataset = files_dataset.interleave( + tf.data.TFRecordDataset, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + decode_fn = functools.partial( + data_pipeline.DatasetManager.deserialize, + batch_size=pre_batch_size, + is_training=is_training) + dataset = dataset.map( + decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + dataset = tf.data.Dataset.range(NUM_SHARDS) + map_fn = functools.partial(make_dataset, files) + dataset = dataset.interleave( + map_fn, + cycle_length=NUM_SHARDS, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if rebatch: + # A workaround for TPU Pod evaluation dataset. + # TODO (b/162341937) remove once it's fixed. + dataset = dataset.unbatch() + dataset = dataset.batch(pre_batch_size) + + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset + + +def create_dataset_from_data_producer(producer, params): + """Return dataset online-generating data.""" + + def preprocess_train_input(features, labels): + """Pre-process the training data. + + This is needed because + - The label needs to be extended to be used in the loss fn + - We need the same inputs for training and eval so adding fake inputs + for DUPLICATE_MASK in training data. + + Args: + features: Dictionary of features for training. + labels: Training labels. + + Returns: + Processed training features. + """ + fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN]) + features[rconst.DUPLICATE_MASK] = fake_dup_mask + features[rconst.TRAIN_LABEL_KEY] = labels + return features + + train_input_fn = producer.make_input_fn(is_training=True) + train_input_dataset = train_input_fn(params).map(preprocess_train_input) + + def preprocess_eval_input(features): + """Pre-process the eval data. + + This is needed because: + - The label needs to be extended to be used in the loss fn + - We need the same inputs for training and eval so adding fake inputs + for VALID_PT_MASK in eval data. + + Args: + features: Dictionary of features for evaluation. + + Returns: + Processed evaluation features. + """ + labels = tf.cast(tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool) + fake_valid_pt_mask = tf.cast( + tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool) + features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask + features[rconst.TRAIN_LABEL_KEY] = labels + return features + + eval_input_fn = producer.make_input_fn(is_training=False) + eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input) + + return train_input_dataset, eval_input_dataset + + +def create_ncf_input_data(params, + producer=None, + input_meta_data=None, + strategy=None): + """Creates NCF training/evaluation dataset. + + Args: + params: Dictionary containing parameters for train/evaluation data. + producer: Instance of BaseDataConstructor that generates data online. Must + not be None when params['train_dataset_path'] or + params['eval_dataset_path'] is not specified. + input_meta_data: A dictionary of input metadata to be used when reading data + from tf record files. Must be specified when params["train_input_dataset"] + is specified. + strategy: Distribution strategy used for distributed training. If specified, + used to assert that evaluation batch size is correctly a multiple of + total number of devices used. + + Returns: + (training dataset, evaluation dataset, train steps per epoch, + eval steps per epoch) + + Raises: + ValueError: If data is being generated online for when using TPU's. + """ + # NCF evaluation metric calculation logic assumes that evaluation data + # sample size are in multiples of (1 + number of negative samples in + # evaluation) for each device. As so, evaluation batch size must be a + # multiple of (number of replicas * (1 + number of negative samples)). + num_devices = strategy.num_replicas_in_sync if strategy else 1 + if (params["eval_batch_size"] % (num_devices * + (1 + rconst.NUM_EVAL_NEGATIVES))): + raise ValueError("Evaluation batch size must be divisible by {} " + "times {}".format(num_devices, + (1 + rconst.NUM_EVAL_NEGATIVES))) + + if params["train_dataset_path"]: + assert params["eval_dataset_path"] + + train_dataset = create_dataset_from_tf_record_files( + params["train_dataset_path"], + input_meta_data["train_prebatch_size"], + params["batch_size"], + is_training=True, + rebatch=False) + + # Re-batch evaluation dataset for TPU Pods. + # TODO (b/162341937) remove once it's fixed. + eval_rebatch = (params["use_tpu"] and strategy.num_replicas_in_sync > 8) + eval_dataset = create_dataset_from_tf_record_files( + params["eval_dataset_path"], + input_meta_data["eval_prebatch_size"], + params["eval_batch_size"], + is_training=False, + rebatch=eval_rebatch) + + num_train_steps = int(input_meta_data["num_train_steps"]) + num_eval_steps = int(input_meta_data["num_eval_steps"]) + else: + if params["use_tpu"]: + raise ValueError("TPU training does not support data producer yet. " + "Use pre-processed data.") + + assert producer + # Start retrieving data from producer. + train_dataset, eval_dataset = create_dataset_from_data_producer( + producer, params) + num_train_steps = producer.train_batches_per_epoch + num_eval_steps = producer.eval_batches_per_epoch + + return train_dataset, eval_dataset, num_train_steps, num_eval_steps diff --git a/models/official/recommendation/ncf_keras_main.py b/models/official/recommendation/ncf_keras_main.py new file mode 100644 index 0000000000000000000000000000000000000000..c850539d4bf24e159cbf04a2c029c1e2bf4d5c26 --- /dev/null +++ b/models/official/recommendation/ncf_keras_main.py @@ -0,0 +1,567 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""NCF framework to train and evaluate the NeuMF model. + +The NeuMF model assembles both MF and MLP models under the NCF framework. Check +`neumf_model.py` for more details about the models. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os + +# pylint: disable=g-bad-import-order +from absl import app +from absl import flags +from absl import logging +import tensorflow.compat.v2 as tf +# pylint: enable=g-bad-import-order + +from official.recommendation import constants as rconst +from official.recommendation import movielens +from official.recommendation import ncf_common +from official.recommendation import ncf_input_pipeline +from official.recommendation import neumf_model +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.utils.misc import model_helpers + + +FLAGS = flags.FLAGS + + +def metric_fn(logits, dup_mask, match_mlperf): + dup_mask = tf.cast(dup_mask, tf.float32) + logits = tf.slice(logits, [0, 1], [-1, -1]) + in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg( + logits, + dup_mask, + match_mlperf) + metric_weights = tf.cast(metric_weights, tf.float32) + return in_top_k, metric_weights + + +class MetricLayer(tf.keras.layers.Layer): + """Custom layer of metrics for NCF model.""" + + def __init__(self, match_mlperf): + super(MetricLayer, self).__init__() + self.match_mlperf = match_mlperf + + def get_config(self): + return {"match_mlperf": self.match_mlperf} + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) + + def call(self, inputs, training=False): + logits, dup_mask = inputs + + if training: + hr_sum = 0.0 + hr_count = 0.0 + else: + metric, metric_weights = metric_fn(logits, dup_mask, self.match_mlperf) + hr_sum = tf.reduce_sum(metric * metric_weights) + hr_count = tf.reduce_sum(metric_weights) + + self.add_metric(hr_sum, name="hr_sum", aggregation="mean") + self.add_metric(hr_count, name="hr_count", aggregation="mean") + return logits + + +class LossLayer(tf.keras.layers.Layer): + """Pass-through loss layer for NCF model.""" + + def __init__(self, loss_normalization_factor): + # The loss may overflow in float16, so we use float32 instead. + super(LossLayer, self).__init__(dtype="float32") + self.loss_normalization_factor = loss_normalization_factor + self.loss = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction="sum") + + def get_config(self): + return {"loss_normalization_factor": self.loss_normalization_factor} + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**config) + + def call(self, inputs): + logits, labels, valid_pt_mask_input = inputs + loss = self.loss( + y_true=labels, y_pred=logits, sample_weight=valid_pt_mask_input) + loss = loss * (1.0 / self.loss_normalization_factor) + self.add_loss(loss) + return logits + + +class IncrementEpochCallback(tf.keras.callbacks.Callback): + """A callback to increase the requested epoch for the data producer. + + The reason why we need this is because we can only buffer a limited amount of + data. So we keep a moving window to represent the buffer. This is to move the + one of the window's boundaries for each epoch. + """ + + def __init__(self, producer): + self._producer = producer + + def on_epoch_begin(self, epoch, logs=None): + self._producer.increment_request_epoch() + + +class CustomEarlyStopping(tf.keras.callbacks.Callback): + """Stop training has reached a desired hit rate.""" + + def __init__(self, monitor, desired_value): + super(CustomEarlyStopping, self).__init__() + + self.monitor = monitor + self.desired = desired_value + self.stopped_epoch = 0 + + def on_epoch_end(self, epoch, logs=None): + current = self.get_monitor_value(logs) + if current and current >= self.desired: + self.stopped_epoch = epoch + self.model.stop_training = True + + def on_train_end(self, logs=None): + if self.stopped_epoch > 0: + print("Epoch %05d: early stopping" % (self.stopped_epoch + 1)) + + def get_monitor_value(self, logs): + logs = logs or {} + monitor_value = logs.get(self.monitor) + if monitor_value is None: + logging.warning("Early stopping conditioned on metric `%s` " + "which is not available. Available metrics are: %s", + self.monitor, ",".join(list(logs.keys()))) + return monitor_value + + +def _get_keras_model(params): + """Constructs and returns the model.""" + batch_size = params["batch_size"] + + user_input = tf.keras.layers.Input( + shape=(1,), name=movielens.USER_COLUMN, dtype=tf.int32) + + item_input = tf.keras.layers.Input( + shape=(1,), name=movielens.ITEM_COLUMN, dtype=tf.int32) + + valid_pt_mask_input = tf.keras.layers.Input( + shape=(1,), name=rconst.VALID_POINT_MASK, dtype=tf.bool) + + dup_mask_input = tf.keras.layers.Input( + shape=(1,), name=rconst.DUPLICATE_MASK, dtype=tf.int32) + + label_input = tf.keras.layers.Input( + shape=(1,), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool) + + base_model = neumf_model.construct_model(user_input, item_input, params) + + logits = base_model.output + + zeros = tf.keras.layers.Lambda( + lambda x: x * 0)(logits) + + softmax_logits = tf.keras.layers.concatenate( + [zeros, logits], + axis=-1) + + # Custom training loop calculates loss and metric as a part of + # training/evaluation step function. + if not params["keras_use_ctl"]: + softmax_logits = MetricLayer( + params["match_mlperf"])([softmax_logits, dup_mask_input]) + # TODO(b/134744680): Use model.add_loss() instead once the API is well + # supported. + softmax_logits = LossLayer(batch_size)( + [softmax_logits, label_input, valid_pt_mask_input]) + + keras_model = tf.keras.Model( + inputs={ + movielens.USER_COLUMN: user_input, + movielens.ITEM_COLUMN: item_input, + rconst.VALID_POINT_MASK: valid_pt_mask_input, + rconst.DUPLICATE_MASK: dup_mask_input, + rconst.TRAIN_LABEL_KEY: label_input}, + outputs=softmax_logits) + + keras_model.summary() + return keras_model + + +def run_ncf(_): + """Run NCF training and eval with Keras.""" + + keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) + + if FLAGS.seed is not None: + print("Setting tf seed") + tf.random.set_seed(FLAGS.seed) + + model_helpers.apply_clean(FLAGS) + + if FLAGS.dtype == "fp16" and FLAGS.fp16_implementation == "keras": + policy = tf.keras.mixed_precision.experimental.Policy( + "mixed_float16", + loss_scale=flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic")) + tf.keras.mixed_precision.experimental.set_policy(policy) + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=FLAGS.distribution_strategy, + num_gpus=FLAGS.num_gpus, + tpu_address=FLAGS.tpu) + + params = ncf_common.parse_flags(FLAGS) + params["distribute_strategy"] = strategy + params["use_tpu"] = (FLAGS.distribution_strategy == "tpu") + + if params["use_tpu"] and not params["keras_use_ctl"]: + logging.error("Custom training loop must be used when using TPUStrategy.") + return + + batch_size = params["batch_size"] + time_callback = keras_utils.TimeHistory(batch_size, FLAGS.log_steps) + callbacks = [time_callback] + + producer, input_meta_data = None, None + generate_input_online = params["train_dataset_path"] is None + + if generate_input_online: + # Start data producing thread. + num_users, num_items, _, _, producer = ncf_common.get_inputs(params) + producer.start() + per_epoch_callback = IncrementEpochCallback(producer) + callbacks.append(per_epoch_callback) + else: + assert params["eval_dataset_path"] and params["input_meta_data_path"] + with tf.io.gfile.GFile(params["input_meta_data_path"], "rb") as reader: + input_meta_data = json.loads(reader.read().decode("utf-8")) + num_users = input_meta_data["num_users"] + num_items = input_meta_data["num_items"] + + params["num_users"], params["num_items"] = num_users, num_items + + if FLAGS.early_stopping: + early_stopping_callback = CustomEarlyStopping( + "val_HR_METRIC", desired_value=FLAGS.hr_threshold) + callbacks.append(early_stopping_callback) + + (train_input_dataset, eval_input_dataset, + num_train_steps, num_eval_steps) = \ + (ncf_input_pipeline.create_ncf_input_data( + params, producer, input_meta_data, strategy)) + steps_per_epoch = None if generate_input_online else num_train_steps + + with distribution_utils.get_strategy_scope(strategy): + keras_model = _get_keras_model(params) + optimizer = tf.keras.optimizers.Adam( + learning_rate=params["learning_rate"], + beta_1=params["beta1"], + beta_2=params["beta2"], + epsilon=params["epsilon"]) + if FLAGS.fp16_implementation == "graph_rewrite": + optimizer = \ + tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite( + optimizer, + loss_scale=flags_core.get_loss_scale(FLAGS, + default_for_fp16="dynamic")) + elif FLAGS.dtype == "fp16" and params["keras_use_ctl"]: + # When keras_use_ctl is False, instead Model.fit() automatically applies + # loss scaling so we don't need to create a LossScaleOptimizer. + optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer( + optimizer, + tf.keras.mixed_precision.experimental.global_policy().loss_scale) + + if params["keras_use_ctl"]: + train_loss, eval_results = run_ncf_custom_training( + params, + strategy, + keras_model, + optimizer, + callbacks, + train_input_dataset, + eval_input_dataset, + num_train_steps, + num_eval_steps, + generate_input_online=generate_input_online) + else: + keras_model.compile(optimizer=optimizer, run_eagerly=FLAGS.run_eagerly) + + if not FLAGS.ml_perf: + # Create Tensorboard summary and checkpoint callbacks. + summary_dir = os.path.join(FLAGS.model_dir, "summaries") + summary_callback = tf.keras.callbacks.TensorBoard(summary_dir) + checkpoint_path = os.path.join(FLAGS.model_dir, "checkpoint") + checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + checkpoint_path, save_weights_only=True) + + callbacks += [summary_callback, checkpoint_callback] + + history = keras_model.fit( + train_input_dataset, + epochs=FLAGS.train_epochs, + steps_per_epoch=steps_per_epoch, + callbacks=callbacks, + validation_data=eval_input_dataset, + validation_steps=num_eval_steps, + verbose=2) + + logging.info("Training done. Start evaluating") + + eval_loss_and_metrics = keras_model.evaluate( + eval_input_dataset, steps=num_eval_steps, verbose=2) + + logging.info("Keras evaluation is done.") + + # Keras evaluate() API returns scalar loss and metric values from + # evaluation as a list. Here, the returned list would contain + # [evaluation loss, hr sum, hr count]. + eval_hit_rate = eval_loss_and_metrics[1] / eval_loss_and_metrics[2] + + # Format evaluation result into [eval loss, eval hit accuracy]. + eval_results = [eval_loss_and_metrics[0], eval_hit_rate] + + if history and history.history: + train_history = history.history + train_loss = train_history["loss"][-1] + + stats = build_stats(train_loss, eval_results, time_callback) + return stats + + +def run_ncf_custom_training(params, + strategy, + keras_model, + optimizer, + callbacks, + train_input_dataset, + eval_input_dataset, + num_train_steps, + num_eval_steps, + generate_input_online=True): + """Runs custom training loop. + + Args: + params: Dictionary containing training parameters. + strategy: Distribution strategy to be used for distributed training. + keras_model: Model used for training. + optimizer: Optimizer used for training. + callbacks: Callbacks to be invoked between batches/epochs. + train_input_dataset: tf.data.Dataset used for training. + eval_input_dataset: tf.data.Dataset used for evaluation. + num_train_steps: Total number of steps to run for training. + num_eval_steps: Total number of steps to run for evaluation. + generate_input_online: Whether input data was generated by data producer. + When data is generated by data producer, then train dataset must be + re-initialized after every epoch. + + Returns: + A tuple of train loss and a list of training and evaluation results. + """ + loss_object = tf.keras.losses.SparseCategoricalCrossentropy( + reduction="sum", from_logits=True) + train_input_iterator = iter( + strategy.experimental_distribute_dataset(train_input_dataset)) + + def train_step(train_iterator): + """Called once per step to train the model.""" + + def step_fn(features): + """Computes loss and applied gradient per replica.""" + with tf.GradientTape() as tape: + softmax_logits = keras_model(features) + # The loss can overflow in float16, so we cast to float32. + softmax_logits = tf.cast(softmax_logits, "float32") + labels = features[rconst.TRAIN_LABEL_KEY] + loss = loss_object( + labels, + softmax_logits, + sample_weight=features[rconst.VALID_POINT_MASK]) + loss *= (1.0 / params["batch_size"]) + if FLAGS.dtype == "fp16": + loss = optimizer.get_scaled_loss(loss) + + grads = tape.gradient(loss, keras_model.trainable_variables) + if FLAGS.dtype == "fp16": + grads = optimizer.get_unscaled_gradients(grads) + # Converting gradients to dense form helps in perf on GPU for NCF + grads = neumf_model.sparse_to_dense_grads( + list(zip(grads, keras_model.trainable_variables))) + optimizer.apply_gradients(grads) + return loss + + per_replica_losses = strategy.run( + step_fn, args=(next(train_iterator),)) + mean_loss = strategy.reduce( + tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) + return mean_loss + + def eval_step(eval_iterator): + """Called once per eval step to compute eval metrics.""" + + def step_fn(features): + """Computes eval metrics per replica.""" + softmax_logits = keras_model(features) + in_top_k, metric_weights = metric_fn(softmax_logits, + features[rconst.DUPLICATE_MASK], + params["match_mlperf"]) + hr_sum = tf.reduce_sum(in_top_k * metric_weights) + hr_count = tf.reduce_sum(metric_weights) + return hr_sum, hr_count + + per_replica_hr_sum, per_replica_hr_count = ( + strategy.run( + step_fn, args=(next(eval_iterator),))) + hr_sum = strategy.reduce( + tf.distribute.ReduceOp.SUM, per_replica_hr_sum, axis=None) + hr_count = strategy.reduce( + tf.distribute.ReduceOp.SUM, per_replica_hr_count, axis=None) + return hr_sum, hr_count + + if not FLAGS.run_eagerly: + train_step = tf.function(train_step) + eval_step = tf.function(eval_step) + + for callback in callbacks: + callback.on_train_begin() + + # Not writing tensorboard summaries if running in MLPerf. + if FLAGS.ml_perf: + eval_summary_writer, train_summary_writer = None, None + else: + summary_dir = os.path.join(FLAGS.model_dir, "summaries") + eval_summary_writer = tf.summary.create_file_writer( + os.path.join(summary_dir, "eval")) + train_summary_writer = tf.summary.create_file_writer( + os.path.join(summary_dir, "train")) + + train_loss = 0 + for epoch in range(FLAGS.train_epochs): + for cb in callbacks: + cb.on_epoch_begin(epoch) + + # As NCF dataset is sampled with randomness, not repeating + # data elements in each epoch has significant impact on + # convergence. As so, offline-generated TF record files + # contains all epoch worth of data. Thus we do not need + # to initialize dataset when reading from tf record files. + if generate_input_online: + train_input_iterator = iter( + strategy.experimental_distribute_dataset(train_input_dataset)) + + train_loss = 0 + for step in range(num_train_steps): + current_step = step + epoch * num_train_steps + for c in callbacks: + c.on_batch_begin(current_step) + + train_loss += train_step(train_input_iterator) + + # Write train loss once in every 1000 steps. + if train_summary_writer and step % 1000 == 0: + with train_summary_writer.as_default(): + tf.summary.scalar("training_loss", train_loss/(step + 1), + step=current_step) + + for c in callbacks: + c.on_batch_end(current_step) + + train_loss /= num_train_steps + logging.info("Done training epoch %s, epoch loss=%.3f", epoch + 1, + train_loss) + + eval_input_iterator = iter( + strategy.experimental_distribute_dataset(eval_input_dataset)) + + hr_sum = 0.0 + hr_count = 0.0 + for _ in range(num_eval_steps): + step_hr_sum, step_hr_count = eval_step(eval_input_iterator) + hr_sum += step_hr_sum + hr_count += step_hr_count + + logging.info("Done eval epoch %s, hit_rate=%.3f", epoch + 1, + hr_sum / hr_count) + if eval_summary_writer: + with eval_summary_writer.as_default(): + tf.summary.scalar("hit_rate", hr_sum / hr_count, step=current_step) + + if (FLAGS.early_stopping and + float(hr_sum / hr_count) > params["hr_threshold"]): + break + + for c in callbacks: + c.on_train_end() + + # Saving the model at the end of training. + if not FLAGS.ml_perf: + checkpoint = tf.train.Checkpoint(model=keras_model, optimizer=optimizer) + checkpoint_path = os.path.join(FLAGS.model_dir, "ctl_checkpoint") + checkpoint.save(checkpoint_path) + logging.info("Saving model as TF checkpoint: %s", checkpoint_path) + + return train_loss, [None, hr_sum / hr_count] + + +def build_stats(loss, eval_result, time_callback): + """Normalizes and returns dictionary of stats. + + Args: + loss: The final loss at training time. + eval_result: Output of the eval step. Assumes first value is eval_loss and + second value is accuracy_top_1. + time_callback: Time tracking callback likely used during keras.fit. + + Returns: + Dictionary of normalized results. + """ + stats = {} + if loss: + stats["loss"] = loss + + if eval_result: + stats["eval_loss"] = eval_result[0] + stats["eval_hit_rate"] = eval_result[1] + + if time_callback: + timestamp_log = time_callback.timestamp_log + stats["step_timestamp_log"] = timestamp_log + stats["train_finish_time"] = time_callback.train_finish_time + if len(timestamp_log) > 1: + stats["avg_exp_per_second"] = ( + time_callback.batch_size * time_callback.log_steps * + (len(time_callback.timestamp_log)-1) / + (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) + + return stats + + +def main(_): + logging.info("Result is %s", run_ncf(FLAGS)) + + +if __name__ == "__main__": + ncf_common.define_ncf_flags() + app.run(main) diff --git a/models/official/recommendation/ncf_test.py b/models/official/recommendation/ncf_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5103283e0aa617b0042ca75f5d2e9572cecb1b68 --- /dev/null +++ b/models/official/recommendation/ncf_test.py @@ -0,0 +1,111 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests NCF.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest + +import tensorflow as tf +from tensorflow.python.eager import context # pylint: disable=ungrouped-imports +from official.recommendation import constants as rconst +from official.recommendation import ncf_common +from official.recommendation import ncf_keras_main +from official.utils.testing import integration + +NUM_TRAIN_NEG = 4 + + +class NcfTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): # pylint: disable=invalid-name + super(NcfTest, cls).setUpClass() + ncf_common.define_ncf_flags() + + def setUp(self): + self.top_k_old = rconst.TOP_K + self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES + rconst.NUM_EVAL_NEGATIVES = 2 + + def tearDown(self): + rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old + rconst.TOP_K = self.top_k_old + + _BASE_END_TO_END_FLAGS = ['-batch_size', '1044', '-train_epochs', '1'] + + @unittest.mock.patch.object(rconst, "SYNTHETIC_BATCHES_PER_EPOCH", 100) + def test_end_to_end_keras_no_dist_strat(self): + integration.run_synthetic( + ncf_keras_main.main, tmp_root=self.get_temp_dir(), + extra_flags=self._BASE_END_TO_END_FLAGS + + ['-distribution_strategy', 'off']) + + @unittest.mock.patch.object(rconst, "SYNTHETIC_BATCHES_PER_EPOCH", 100) + def test_end_to_end_keras_dist_strat(self): + integration.run_synthetic( + ncf_keras_main.main, tmp_root=self.get_temp_dir(), + extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0']) + + @unittest.mock.patch.object(rconst, "SYNTHETIC_BATCHES_PER_EPOCH", 100) + def test_end_to_end_keras_dist_strat_ctl(self): + flags = (self._BASE_END_TO_END_FLAGS + + ['-num_gpus', '0'] + + ['-keras_use_ctl', 'True']) + integration.run_synthetic( + ncf_keras_main.main, tmp_root=self.get_temp_dir(), + extra_flags=flags) + + @unittest.mock.patch.object(rconst, "SYNTHETIC_BATCHES_PER_EPOCH", 100) + def test_end_to_end_keras_1_gpu_dist_strat_fp16(self): + if context.num_gpus() < 1: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(1, context.num_gpus())) + + integration.run_synthetic( + ncf_keras_main.main, tmp_root=self.get_temp_dir(), + extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '1', + '--dtype', 'fp16']) + + @unittest.mock.patch.object(rconst, "SYNTHETIC_BATCHES_PER_EPOCH", 100) + def test_end_to_end_keras_1_gpu_dist_strat_ctl_fp16(self): + if context.num_gpus() < 1: + self.skipTest( + '{} GPUs are not available for this test. {} GPUs are available'. + format(1, context.num_gpus())) + + integration.run_synthetic( + ncf_keras_main.main, tmp_root=self.get_temp_dir(), + extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '1', + '--dtype', 'fp16', + '--keras_use_ctl']) + + @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100) + def test_end_to_end_keras_2_gpu_fp16(self): + if context.num_gpus() < 2: + self.skipTest( + "{} GPUs are not available for this test. {} GPUs are available". + format(2, context.num_gpus())) + + integration.run_synthetic( + ncf_keras_main.main, tmp_root=self.get_temp_dir(), + extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '2', + '--dtype', 'fp16']) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/recommendation/neumf_model.py b/models/official/recommendation/neumf_model.py new file mode 100644 index 0000000000000000000000000000000000000000..48b09293af065a19db2dbfb1d44023439c2b9765 --- /dev/null +++ b/models/official/recommendation/neumf_model.py @@ -0,0 +1,431 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines NeuMF model for NCF framework. + +Some abbreviations used in the code base: +NeuMF: Neural Matrix Factorization +NCF: Neural Collaborative Filtering +GMF: Generalized Matrix Factorization +MLP: Multi-Layer Perceptron + +GMF applies a linear kernel to model the latent feature interactions, and MLP +uses a nonlinear kernel to learn the interaction function from data. NeuMF model +is a fused model of GMF and MLP to better model the complex user-item +interactions, and unifies the strengths of linearity of MF and non-linearity of +MLP for modeling the user-item latent structures. + +In NeuMF model, it allows GMF and MLP to learn separate embeddings, and combine +the two models by concatenating their last hidden layer. +""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import sys + +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +from typing import Any, Dict, Text + +from official.recommendation import constants as rconst +from official.recommendation import movielens +from official.recommendation import ncf_common +from official.recommendation import stat_utils + + +def sparse_to_dense_grads(grads_and_vars): + """Convert sparse gradients to dense gradients. + + All sparse gradients, which are represented as instances of tf.IndexedSlices, + are converted to dense Tensors. Dense gradients, which are represents as + Tensors, are unchanged. + + The purpose of this conversion is that for small embeddings, which are used by + this model, applying dense gradients with the AdamOptimizer is faster than + applying sparse gradients. + + Args + grads_and_vars: A list of (gradient, variable) tuples. Each gradient can + be a Tensor or an IndexedSlices. Tensors are unchanged, and IndexedSlices + are converted to dense Tensors. + Returns: + The same list of (gradient, variable) as `grads_and_vars`, except each + IndexedSlices gradient is converted to a Tensor. + """ + + # Calling convert_to_tensor changes IndexedSlices into Tensors, and leaves + # Tensors unchanged. + return [(tf.convert_to_tensor(g), v) for g, v in grads_and_vars] + + +def neumf_model_fn(features, labels, mode, params): + """Model Function for NeuMF estimator.""" + if params.get("use_seed"): + tf.set_random_seed(stat_utils.random_int32()) + + users = features[movielens.USER_COLUMN] + items = features[movielens.ITEM_COLUMN] + + user_input = tf.keras.layers.Input(tensor=users) + item_input = tf.keras.layers.Input(tensor=items) + logits = construct_model(user_input, item_input, params).output + + # Softmax with the first column of zeros is equivalent to sigmoid. + softmax_logits = ncf_common.convert_to_softmax_logits(logits) + + if mode == tf.estimator.ModeKeys.EVAL: + duplicate_mask = tf.cast(features[rconst.DUPLICATE_MASK], tf.float32) + return _get_estimator_spec_with_metrics( + logits, + softmax_logits, + duplicate_mask, + params["num_neg"], + params["match_mlperf"], + use_tpu_spec=params["use_tpu"]) + + elif mode == tf.estimator.ModeKeys.TRAIN: + labels = tf.cast(labels, tf.int32) + valid_pt_mask = features[rconst.VALID_POINT_MASK] + + optimizer = tf.compat.v1.train.AdamOptimizer( + learning_rate=params["learning_rate"], + beta1=params["beta1"], + beta2=params["beta2"], + epsilon=params["epsilon"]) + if params["use_tpu"]: + optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer) + + loss = tf.compat.v1.losses.sparse_softmax_cross_entropy( + labels=labels, + logits=softmax_logits, + weights=tf.cast(valid_pt_mask, tf.float32) + ) + + tf.identity(loss, name="cross_entropy") + + global_step = tf.compat.v1.train.get_global_step() + tvars = tf.compat.v1.trainable_variables() + gradients = optimizer.compute_gradients( + loss, tvars, colocate_gradients_with_ops=True) + gradients = sparse_to_dense_grads(gradients) + minimize_op = optimizer.apply_gradients( + gradients, global_step=global_step, name="train") + update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) + train_op = tf.group(minimize_op, update_ops) + + return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) + + else: + raise NotImplementedError + + +def _strip_first_and_last_dimension(x, batch_size): + return tf.reshape(x[0, :], (batch_size,)) + + +def construct_model(user_input: tf.Tensor, item_input: tf.Tensor, + params: Dict[Text, Any]) -> tf.keras.Model: + """Initialize NeuMF model. + + Args: + user_input: keras input layer for users + item_input: keras input layer for items + params: Dict of hyperparameters. + + Raises: + ValueError: if the first model layer is not even. + Returns: + model: a keras Model for computing the logits + """ + num_users = params["num_users"] + num_items = params["num_items"] + + model_layers = params["model_layers"] + + mf_regularization = params["mf_regularization"] + mlp_reg_layers = params["mlp_reg_layers"] + + mf_dim = params["mf_dim"] + + if model_layers[0] % 2 != 0: + raise ValueError("The first layer size should be multiple of 2!") + + # Initializer for embedding layers + embedding_initializer = "glorot_uniform" + + def mf_slice_fn(x): + x = tf.squeeze(x, [1]) + return x[:, :mf_dim] + + def mlp_slice_fn(x): + x = tf.squeeze(x, [1]) + return x[:, mf_dim:] + + # It turns out to be significantly more effecient to store the MF and MLP + # embedding portions in the same table, and then slice as needed. + embedding_user = tf.keras.layers.Embedding( + num_users, + mf_dim + model_layers[0] // 2, + embeddings_initializer=embedding_initializer, + embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization), + input_length=1, + name="embedding_user")( + user_input) + + embedding_item = tf.keras.layers.Embedding( + num_items, + mf_dim + model_layers[0] // 2, + embeddings_initializer=embedding_initializer, + embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization), + input_length=1, + name="embedding_item")( + item_input) + + # GMF part + mf_user_latent = tf.keras.layers.Lambda( + mf_slice_fn, name="embedding_user_mf")(embedding_user) + mf_item_latent = tf.keras.layers.Lambda( + mf_slice_fn, name="embedding_item_mf")(embedding_item) + + # MLP part + mlp_user_latent = tf.keras.layers.Lambda( + mlp_slice_fn, name="embedding_user_mlp")(embedding_user) + mlp_item_latent = tf.keras.layers.Lambda( + mlp_slice_fn, name="embedding_item_mlp")(embedding_item) + + # Element-wise multiply + mf_vector = tf.keras.layers.multiply([mf_user_latent, mf_item_latent]) + + # Concatenation of two latent features + mlp_vector = tf.keras.layers.concatenate([mlp_user_latent, mlp_item_latent]) + + num_layer = len(model_layers) # Number of layers in the MLP + for layer in xrange(1, num_layer): + model_layer = tf.keras.layers.Dense( + model_layers[layer], + kernel_regularizer=tf.keras.regularizers.l2(mlp_reg_layers[layer]), + activation="relu") + mlp_vector = model_layer(mlp_vector) + + # Concatenate GMF and MLP parts + predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector]) + + # Final prediction layer + logits = tf.keras.layers.Dense( + 1, activation=None, kernel_initializer="lecun_uniform", + name=movielens.RATING_COLUMN)(predict_vector) + + # Print model topology. + model = tf.keras.models.Model([user_input, item_input], logits) + model.summary() + sys.stdout.flush() + + return model + + +def _get_estimator_spec_with_metrics(logits: tf.Tensor, + softmax_logits: tf.Tensor, + duplicate_mask: tf.Tensor, + num_training_neg: int, + match_mlperf: bool = False, + use_tpu_spec: bool = False): + """Returns a EstimatorSpec that includes the metrics.""" + cross_entropy, \ + metric_fn, \ + in_top_k, \ + ndcg, \ + metric_weights = compute_eval_loss_and_metrics_helper( + logits, + softmax_logits, + duplicate_mask, + num_training_neg, + match_mlperf) + + if use_tpu_spec: + return tf.estimator.tpu.TPUEstimatorSpec( + mode=tf.estimator.ModeKeys.EVAL, + loss=cross_entropy, + eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights])) + + return tf.estimator.EstimatorSpec( + mode=tf.estimator.ModeKeys.EVAL, + loss=cross_entropy, + eval_metric_ops=metric_fn(in_top_k, ndcg, metric_weights) + ) + + +def compute_eval_loss_and_metrics_helper(logits: tf.Tensor, + softmax_logits: tf.Tensor, + duplicate_mask: tf.Tensor, + num_training_neg: int, + match_mlperf: bool = False): + """Model evaluation with HR and NDCG metrics. + + The evaluation protocol is to rank the test interacted item (truth items) + among the randomly chosen 999 items that are not interacted by the user. + The performance of the ranked list is judged by Hit Ratio (HR) and Normalized + Discounted Cumulative Gain (NDCG). + + For evaluation, the ranked list is truncated at 10 for both metrics. As such, + the HR intuitively measures whether the test item is present on the top-10 + list, and the NDCG accounts for the position of the hit by assigning higher + scores to hits at top ranks. Both metrics are calculated for each test user, + and the average scores are reported. + + If `match_mlperf` is True, then the HR and NDCG computations are done in a + slightly unusual way to match the MLPerf reference implementation. + Specifically, if the evaluation negatives contain duplicate items, it will be + treated as if the item only appeared once. Effectively, for duplicate items in + a row, the predicted score for all but one of the items will be set to + -infinity + + For example, suppose we have that following inputs: + logits_by_user: [[ 2, 3, 3], + [ 5, 4, 4]] + + items_by_user: [[10, 20, 20], + [30, 40, 40]] + + # Note: items_by_user is not explicitly present. Instead the relevant \ + information is contained within `duplicate_mask` + + top_k: 2 + + Then with match_mlperf=True, the HR would be 2/2 = 1.0. With + match_mlperf=False, the HR would be 1/2 = 0.5. This is because each user has + predicted scores for only 2 unique items: 10 and 20 for the first user, and 30 + and 40 for the second. Therefore, with match_mlperf=True, it's guaranteed the + first item's score is in the top 2. With match_mlperf=False, this function + would compute the first user's first item is not in the top 2, because item 20 + has a higher score, and item 20 occurs twice. + + Args: + logits: A tensor containing the predicted logits for each user. The shape of + logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a + user are grouped, and the last element of the group is the true element. + softmax_logits: The same tensor, but with zeros left-appended. + duplicate_mask: A vector with the same shape as logits, with a value of 1 if + the item corresponding to the logit at that position has already appeared + for that user. + num_training_neg: The number of negatives per positive during training. + match_mlperf: Use the MLPerf reference convention for computing rank. + + Returns: + cross_entropy: the loss + metric_fn: the metrics function + in_top_k: hit rate metric + ndcg: ndcg metric + metric_weights: metric weights + """ + in_top_k, ndcg, metric_weights, logits_by_user = compute_top_k_and_ndcg( + logits, duplicate_mask, match_mlperf) + + # Examples are provided by the eval Dataset in a structured format, so eval + # labels can be reconstructed on the fly. + eval_labels = tf.reshape(shape=(-1,), tensor=tf.one_hot( + tf.zeros(shape=(logits_by_user.shape[0],), dtype=tf.int32) + + rconst.NUM_EVAL_NEGATIVES, logits_by_user.shape[1], dtype=tf.int32)) + + eval_labels_float = tf.cast(eval_labels, tf.float32) + + # During evaluation, the ratio of negatives to positives is much higher + # than during training. (Typically 999 to 1 vs. 4 to 1) By adjusting the + # weights for the negative examples we compute a loss which is consistent with + # the training data. (And provides apples-to-apples comparison) + negative_scale_factor = num_training_neg / rconst.NUM_EVAL_NEGATIVES + example_weights = ( + (eval_labels_float + (1 - eval_labels_float) * negative_scale_factor) * + (1 + rconst.NUM_EVAL_NEGATIVES) / (1 + num_training_neg)) + + # Tile metric weights back to logit dimensions + expanded_metric_weights = tf.reshape(tf.tile( + metric_weights[:, tf.newaxis], (1, rconst.NUM_EVAL_NEGATIVES + 1)), (-1,)) + + # ignore padded examples + example_weights *= tf.cast(expanded_metric_weights, tf.float32) + + cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy( + logits=softmax_logits, labels=eval_labels, weights=example_weights) + + def metric_fn(top_k_tensor, ndcg_tensor, weight_tensor): + return { + rconst.HR_KEY: tf.compat.v1.metrics.mean(top_k_tensor, + weights=weight_tensor, + name=rconst.HR_METRIC_NAME), + rconst.NDCG_KEY: tf.compat.v1.metrics.mean(ndcg_tensor, + weights=weight_tensor, + name=rconst.NDCG_METRIC_NAME) + } + + return cross_entropy, metric_fn, in_top_k, ndcg, metric_weights + + +def compute_top_k_and_ndcg(logits: tf.Tensor, + duplicate_mask: tf.Tensor, + match_mlperf: bool = False): + """Compute inputs of metric calculation. + + Args: + logits: A tensor containing the predicted logits for each user. The shape of + logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a + user are grouped, and the first element of the group is the true element. + duplicate_mask: A vector with the same shape as logits, with a value of 1 if + the item corresponding to the logit at that position has already appeared + for that user. + match_mlperf: Use the MLPerf reference convention for computing rank. + + Returns: + is_top_k, ndcg and weights, all of which has size (num_users_in_batch,), and + logits_by_user which has size + (num_users_in_batch, (rconst.NUM_EVAL_NEGATIVES + 1)). + """ + logits_by_user = tf.reshape(logits, (-1, rconst.NUM_EVAL_NEGATIVES + 1)) + duplicate_mask_by_user = tf.cast( + tf.reshape(duplicate_mask, (-1, rconst.NUM_EVAL_NEGATIVES + 1)), + logits_by_user.dtype) + + if match_mlperf: + # Set duplicate logits to the min value for that dtype. The MLPerf + # reference dedupes during evaluation. + logits_by_user *= (1 - duplicate_mask_by_user) + logits_by_user += duplicate_mask_by_user * logits_by_user.dtype.min + + # Determine the location of the first element in each row after the elements + # are sorted. + sort_indices = tf.argsort( + logits_by_user, axis=1, direction="DESCENDING") + + # Use matrix multiplication to extract the position of the true item from the + # tensor of sorted indices. This approach is chosen because both GPUs and TPUs + # perform matrix multiplications very quickly. This is similar to np.argwhere. + # However this is a special case because the target will only appear in + # sort_indices once. + one_hot_position = tf.cast(tf.equal(sort_indices, rconst.NUM_EVAL_NEGATIVES), + tf.int32) + sparse_positions = tf.multiply( + one_hot_position, tf.range(logits_by_user.shape[1])[tf.newaxis, :]) + position_vector = tf.reduce_sum(sparse_positions, axis=1) + + in_top_k = tf.cast(tf.less(position_vector, rconst.TOP_K), tf.float32) + ndcg = tf.math.log(2.) / tf.math.log( + tf.cast(position_vector, tf.float32) + 2) + ndcg *= in_top_k + + # If a row is a padded row, all but the first element will be a duplicate. + metric_weights = tf.not_equal(tf.reduce_sum(duplicate_mask_by_user, axis=1), + rconst.NUM_EVAL_NEGATIVES) + + return in_top_k, ndcg, metric_weights, logits_by_user diff --git a/models/official/recommendation/popen_helper.py b/models/official/recommendation/popen_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..dcdca4ced8e0b45294023c4675d16efd875694b7 --- /dev/null +++ b/models/official/recommendation/popen_helper.py @@ -0,0 +1,64 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper file for running the async data generation process in OSS.""" + +import contextlib +import multiprocessing +import multiprocessing.pool + + +def get_forkpool(num_workers, init_worker=None, closing=True): + pool = multiprocessing.Pool(processes=num_workers, initializer=init_worker) + return contextlib.closing(pool) if closing else pool + + +def get_threadpool(num_workers, init_worker=None, closing=True): + pool = multiprocessing.pool.ThreadPool(processes=num_workers, + initializer=init_worker) + return contextlib.closing(pool) if closing else pool + + +class FauxPool(object): + """Mimic a pool using for loops. + + This class is used in place of proper pools when true determinism is desired + for testing or debugging. + """ + def __init__(self, *args, **kwargs): + pass + + def map(self, func, iterable, chunksize=None): + return [func(i) for i in iterable] + + def imap(self, func, iterable, chunksize=1): + for i in iterable: + yield func(i) + + def close(self): + pass + + def terminate(self): + pass + + def join(self): + pass + +def get_fauxpool(num_workers, init_worker=None, closing=True): + pool = FauxPool(processes=num_workers, initializer=init_worker) + return contextlib.closing(pool) if closing else pool + + +def worker_job(): + return "worker" diff --git a/models/official/recommendation/run.sh b/models/official/recommendation/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..b8e1143a38ba0cc26e97be6bad20a5ae6c13be65 --- /dev/null +++ b/models/official/recommendation/run.sh @@ -0,0 +1,101 @@ +#!/bin/bash +set -e + +if [ `id -u` != 0 ]; then + echo "Calling sudo to gain root for this shell. (Needed to clear caches.)" + sudo echo "Success" +fi + +SCRIPT_DIR=`dirname "$BASH_SOURCE"` +export PYTHONPATH="${SCRIPT_DIR}/../../" +MAIN_SCRIPT="ncf_estimator_main.py" + +DATASET="ml-20m" + +BUCKET=${BUCKET:-""} +ROOT_DIR="${BUCKET:-/tmp}/MLPerf_NCF" +echo "Root directory: ${ROOT_DIR}" + +if [[ -z ${BUCKET} ]]; then + LOCAL_ROOT=${ROOT_DIR} +else + LOCAL_ROOT="/tmp/MLPerf_NCF" + mkdir -p ${LOCAL_ROOT} + echo "Local root (for files which cannot use GCS): ${LOCAL_ROOT}" +fi + +DATE=$(date '+%Y-%m-%d_%H:%M:%S') +TEST_DIR="${ROOT_DIR}/${DATE}" +LOCAL_TEST_DIR="${LOCAL_ROOT}/${DATE}" +mkdir -p ${LOCAL_TEST_DIR} + +TPU=${TPU:-""} +if [[ -z ${TPU} ]]; then + DEVICE_FLAG="--num_gpus -1" # --use_xla_for_gpu" +else + DEVICE_FLAG="--tpu ${TPU} --num_gpus 0" +fi + +DATA_DIR="${ROOT_DIR}/movielens_data" +python "${SCRIPT_DIR}/movielens.py" --data_dir ${DATA_DIR} --dataset ${DATASET} + +if [ "$1" == "keras" ] +then + MAIN_SCRIPT="ncf_keras_main.py" + BATCH_SIZE=99000 + DEVICE_FLAG="--num_gpus 1" +else + BATCH_SIZE=98340 +fi + +{ + +for i in `seq 0 4`; +do + START_TIME=$(date +%s) + MODEL_DIR="${TEST_DIR}/model_dir_${i}" + + RUN_LOG="${LOCAL_TEST_DIR}/run_${i}.log" + export COMPLIANCE_FILE="${LOCAL_TEST_DIR}/run_${i}_compliance_raw.log" + export STITCHED_COMPLIANCE_FILE="${LOCAL_TEST_DIR}/run_${i}_compliance_submission.log" + echo "" + echo "Beginning run ${i}" + echo " Complete output logs are in ${RUN_LOG}" + echo " Compliance logs: (submission log is created after run.)" + echo " ${COMPLIANCE_FILE}" + echo " ${STITCHED_COMPLIANCE_FILE}" + + # To reduce variation set the seed flag: + # --seed ${i} + + python -u "${SCRIPT_DIR}/${MAIN_SCRIPT}" \ + --model_dir ${MODEL_DIR} \ + --data_dir ${DATA_DIR} \ + --dataset ${DATASET} --hooks "" \ + ${DEVICE_FLAG} \ + --clean \ + --train_epochs 14 \ + --batch_size ${BATCH_SIZE} \ + --eval_batch_size 160000 \ + --learning_rate 0.00382059 \ + --beta1 0.783529 \ + --beta2 0.909003 \ + --epsilon 1.45439e-07 \ + --layers 256,256,128,64 --num_factors 64 \ + --hr_threshold 0.635 \ + --ml_perf \ + |& tee ${RUN_LOG} \ + | grep --line-buffered -E --regexp="(Iteration [0-9]+: HR = [0-9\.]+, NDCG = [0-9\.]+, Loss = [0-9\.]+)|(pipeline_hash)|(MLPerf time:)" + + END_TIME=$(date +%s) + echo "Run ${i} complete: $(( $END_TIME - $START_TIME )) seconds." + + # Don't fill up the local hard drive. + if [[ -z ${BUCKET} ]]; then + echo "Removing model directory to save space." + rm -r ${MODEL_DIR} + fi + +done + +} |& tee "${LOCAL_TEST_DIR}/summary.log" diff --git a/models/official/recommendation/stat_utils.py b/models/official/recommendation/stat_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..658a2721e98a88d71dc2ac4562366283ffd2fc47 --- /dev/null +++ b/models/official/recommendation/stat_utils.py @@ -0,0 +1,92 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Statistics utility functions of NCF.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import numpy as np + + +def random_int32(): + return np.random.randint(low=0, high=np.iinfo(np.int32).max, dtype=np.int32) + + +def permutation(args): + """Fork safe permutation function. + + This function can be called within a multiprocessing worker and give + appropriately random results. + + Args: + args: A size two tuple that will unpacked into the size of the permutation + and the random seed. This form is used because starmap is not universally + available. + + returns: + A NumPy array containing a random permutation. + """ + x, seed = args + + # If seed is None NumPy will seed randomly. + state = np.random.RandomState(seed=seed) # pylint: disable=no-member + output = np.arange(x, dtype=np.int32) + state.shuffle(output) + return output + + +def very_slightly_biased_randint(max_val_vector): + sample_dtype = np.uint64 + out_dtype = max_val_vector.dtype + samples = np.random.randint(low=0, high=np.iinfo(sample_dtype).max, + size=max_val_vector.shape, dtype=sample_dtype) + return np.mod(samples, max_val_vector.astype(sample_dtype)).astype(out_dtype) + + +def mask_duplicates(x, axis=1): # type: (np.ndarray, int) -> np.ndarray + """Identify duplicates from sampling with replacement. + + Args: + x: A 2D NumPy array of samples + axis: The axis along which to de-dupe. + + Returns: + A NumPy array with the same shape as x with one if an element appeared + previously along axis 1, else zero. + """ + if axis != 1: + raise NotImplementedError + + x_sort_ind = np.argsort(x, axis=1, kind="mergesort") + sorted_x = x[np.arange(x.shape[0])[:, np.newaxis], x_sort_ind] + + # compute the indices needed to map values back to their original position. + inv_x_sort_ind = np.argsort(x_sort_ind, axis=1, kind="mergesort") + + # Compute the difference of adjacent sorted elements. + diffs = sorted_x[:, :-1] - sorted_x[:, 1:] + + # We are only interested in whether an element is zero. Therefore left padding + # with ones to restore the original shape is sufficient. + diffs = np.concatenate( + [np.ones((diffs.shape[0], 1), dtype=diffs.dtype), diffs], axis=1) + + # Duplicate values will have a difference of zero. By definition the first + # element is never a duplicate. + return np.where(diffs[np.arange(x.shape[0])[:, np.newaxis], + inv_x_sort_ind], 0, 1) diff --git a/models/official/requirements.txt b/models/official/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d492baaac5e1f2b25ea4db08f4d28075626f4c3 --- /dev/null +++ b/models/official/requirements.txt @@ -0,0 +1,23 @@ +six +google-api-python-client>=1.6.7 +google-cloud-bigquery>=0.31.0 +kaggle>=1.3.9 +numpy>=1.15.4 +pandas>=0.22.0 +psutil>=5.4.3 +py-cpuinfo>=3.3.0 +scipy>=0.19.1 +tensorflow-hub>=0.6.0 +tensorflow-model-optimization>=0.2.1 +tensorflow-datasets +tensorflow-addons +dataclasses +gin-config +tf_slim>=1.1.0 +sentencepiece +Cython +matplotlib +opencv-python-headless +pyyaml +Pillow +-e git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI diff --git a/models/official/staging/__init__.py b/models/official/staging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/staging/training/__init__.py b/models/official/staging/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/staging/training/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/staging/training/controller.py b/models/official/staging/training/controller.py new file mode 100644 index 0000000000000000000000000000000000000000..a07be66329ad49ba07dff300d66f153552e1c78f --- /dev/null +++ b/models/official/staging/training/controller.py @@ -0,0 +1,337 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A light weight utilities to train TF2 models.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import time + +from absl import logging + +import tensorflow.compat.v2 as tf +from typing import Callable, Dict, Optional, Text + +from official.staging.training import utils + + +class Controller(object): + """Class that facilitates training and evaluation of models.""" + + def __init__( + self, + strategy: Optional[tf.distribute.Strategy] = None, + train_fn: Optional[Callable[[tf.Tensor], + Optional[Dict[Text, tf.Tensor]]]] = None, + eval_fn: Optional[Callable[[tf.Tensor], + Optional[Dict[Text, tf.Tensor]]]] = None, + global_step: Optional[tf.Variable] = None, + # Train related + train_steps: Optional[int] = None, + steps_per_loop: Optional[int] = None, + summary_dir: Optional[Text] = None, + checkpoint_manager: Optional[tf.train.CheckpointManager] = None, + # summary related + summary_interval: Optional[int] = None, + # Evaluation related + eval_summary_dir: Optional[Text] = None, + eval_steps: Optional[int] = None, + eval_interval: Optional[int] = None): + """Constructs a `Controller` instance. + + Args: + strategy: An instance of `tf.distribute.Strategy`. + train_fn: A callable defined as `def train_fn(num_steps)`, which + `num_steps` indicates the number of steps to run for each loop. + eval_fn: A callable defined as `def eval_fn(num_steps)`, which `num_steps` + indicates the number of steps for one evaluation. + global_step: An integer `tf.Variable` indicating the global training step + number. Usually this can be obtained from `iterations` property of the + model's optimizer (e.g. `self.optimizer.iterations`), or users can + create their own global step variable as well. If the users create their + own global step variable, it is recommended to create the `tf.Variable` + inside strategy scope, and with + `aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA`. + train_steps: The total (maximum) number of training steps to perform. + steps_per_loop: The number of steps to run in each "inner loop" of + training (passed to the `num_steps` parameter of `train_fn`). + summary_dir: The directory to restore and write checkpoints and summaries. + If None, it will be set to `checkpoint_manager.directory`. + checkpoint_manager: An instance of `tf.train.CheckpointManager`. + summary_interval: Step interval for training summaries. Note that this + argument only applies to the summaries outside the training loop. If the + value is None, then training summaries are not enabled. + eval_summary_dir: The directory to write eval summaries. If None, it will + be set to `summary_dir`. + eval_steps: Number of steps to run evaluation. + eval_interval: Step interval for evaluation. If None, will skip evaluation + in the middle of training. Note that evaluation only happens outside the + training loop, which the loop iteration is specify by `steps_per_loop` + parameter. + + Raises: + ValueError: If both `train_fn` and `eval_fn` are None. + ValueError: If `train_fn` is not None and `train_steps` is None. + ValueError: If `steps_per_loop` is None when `train_fn` is provided. + ValueError: If `steps_per_loop` is not a positive integer. + """ + if train_fn is None and eval_fn is None: + raise ValueError("`train_fn` and `eval_fn` should not both be None") + + # TODO(rxsang): Support training until exhaustion by passing + # `train_steps=-1`. Currently it cannot be supported with a host training + # loop because break statements are not supported with distributed dataset. + if train_fn is not None: + if train_steps is None: + raise ValueError("`train_steps` is required when `train_fn` is " + "provided.") + if steps_per_loop is None: + raise ValueError("`steps_per_loop` is required when `train_fn is " + "provided.") + if not isinstance(steps_per_loop, int) or steps_per_loop < 1: + raise ValueError("`steps_per_loop` should be a positive integer") + if summary_interval is not None and summary_interval <= 0: + raise ValueError("`summary_interval` should be larger than 0") + + self.strategy = strategy or tf.distribute.get_strategy() + + self.train_fn = train_fn + self.eval_fn = eval_fn + self.global_step = global_step + self.checkpoint_manager = checkpoint_manager + + if self.train_fn is not None: + self.train_steps = train_steps + self.steps_per_loop = steps_per_loop + if summary_dir: + self.summary_dir = summary_dir + elif checkpoint_manager: + self.summary_dir = checkpoint_manager.directory + else: + self.summary_dir = None + + self.summary_interval = summary_interval + if self.summary_dir and self.summary_interval: + summary_writer = tf.summary.create_file_writer(self.summary_dir) + else: + summary_writer = None + # TODO(rxsang): Consider pass SummaryManager directly into Controller for + # maximum customizability. + self.summary_manager = utils.SummaryManager( + summary_writer, + tf.summary.scalar, + global_step=self.global_step, + summary_interval=self.summary_interval) + + if self.eval_fn is not None: + eval_summary_dir = eval_summary_dir or self.summary_dir + eval_summary_writer = tf.summary.create_file_writer( + eval_summary_dir) if eval_summary_dir else None + self.eval_summary_manager = utils.SummaryManager( + eval_summary_writer, tf.summary.scalar, global_step=self.global_step) + + self.eval_steps = eval_steps + self.eval_interval = eval_interval + + # Creates and initializes the interval triggers. + self.eval_trigger = utils.IntervalTrigger(self.eval_interval, + self.global_step.numpy()) # pytype: disable=attribute-error + + if self.global_step: + tf.summary.experimental.set_step(self.global_step) + + # Restores the model if needed. + if self.checkpoint_manager is not None: + model_restored = self._restore_model() + if not model_restored and self.checkpoint_manager.checkpoint_interval: + # If the model is not restored from a checkpoint, save an initial + # checkpoint. + ckpt_path = self.checkpoint_manager.save( + checkpoint_number=self.global_step) + logging.info("Saved checkpoins in %s", ckpt_path) + + def _restore_model(self, checkpoint_path=None): + """Restore or initialize the model. + + Args: + checkpoint_path: An optional string indicates the checkpoint path to + restore. If None, will restore from `self.checkpoint_manager`. + + Returns: + True if the latest checkpoint is found or restored. Otherwise False. + """ + with self.strategy.scope(): + # Checkpoint restoring should be inside scope. b/139450638 + if checkpoint_path is not None: + self.checkpoint_manager.checkpoint.restore(checkpoint_path) + return True + return self.checkpoint_manager.restore_or_initialize() + + def _evaluate_once(self, current_step): + """Runs the evaluation once.""" + logging.info("Start evaluation at step: %s", current_step) + + with self.eval_summary_manager.summary_writer.as_default(): + eval_outputs = self.eval_fn(self.eval_steps) + + if eval_outputs: + eval_outputs = tf.nest.map_structure(lambda x: x.numpy(), eval_outputs) + + info = "step: {} evaluation metric: {}".format( + current_step, eval_outputs) + self._log_info(info) + + self.eval_summary_manager.write_summaries(eval_outputs) + self.eval_summary_manager.flush() + + def _maybe_save_checkpoints(self, current_step, force_trigger=False): + if self.checkpoint_manager and self.checkpoint_manager.checkpoint_interval: + ckpt_path = self.checkpoint_manager.save( + checkpoint_number=current_step, check_interval=not force_trigger) + if ckpt_path is not None: + logging.info("Saved checkpoins in %s", ckpt_path) + + def _maybe_evaluate(self, current_step, force_trigger=False): + if self.eval_trigger(current_step, force_trigger): + self._evaluate_once(current_step) + + def _log_info(self, message): + """Logs `message` to the `info` log, and also prints to stdout.""" + logging.info(message) + print(message) + + def train(self, evaluate=True): + """Runs the training, with optional evaluation. + + This handles evaluation, gathering summaries, and saving checkpoints. + + Args: + evaluate: A boolean indicates whether to perform evaluation during + training. + + Raises: + RuntimeError: If `global_step` is not updated correctly in `train_fn`. + """ + if self.train_fn is None: + raise ValueError("`self.train_fn` is required when calling `train` " + "method.") + if self.global_step is None: + raise ValueError("`self.global_step` is required when calling `train` " + "method.") + if evaluate and self.eval_fn is None: + raise ValueError("`self.eval_fn` is required when calling `train` method " + "with `evaluate=True`") + + step_timer = _StepTimer(self.global_step) + current_step = self.global_step.numpy() + logging.info("Train at step %s of %s", current_step, self.train_steps) + while current_step < self.train_steps: + # Calculates steps to run for the next train loop. + steps_per_loop = min(self.train_steps - current_step, self.steps_per_loop) + logging.info("Entering training loop with %s steps, at step %s of %s", + steps_per_loop, current_step, self.train_steps) + current_step += steps_per_loop + steps_per_loop = tf.convert_to_tensor(steps_per_loop, dtype=tf.int32) + + with self.summary_manager.summary_writer.as_default(): + train_outputs = self.train_fn(steps_per_loop) + + # Updates and verifies the current step after a training loop finishes. + if current_step != self.global_step.numpy(): + raise RuntimeError("`self.train_fn` is not updating `global_step` " + "correctly, expected: %s, actual: %s" % + (current_step, self.global_step.numpy())) + + # Print information like metrics and steps_per_second after a training + # loop. + if train_outputs: + train_outputs = tf.nest.map_structure( + lambda x: x.numpy(), train_outputs) + steps_per_second = step_timer.steps_per_second() + info = "step: {} steps_per_second: {:.2f} {}".format( + current_step, steps_per_second, train_outputs) + self._log_info(info) + + train_outputs = train_outputs or {} + train_outputs["steps_per_second"] = steps_per_second + self.summary_manager.write_summaries(train_outputs) + + self._maybe_save_checkpoints(current_step) + + if evaluate: + self._maybe_evaluate(current_step) + + self.summary_manager.write_summaries(train_outputs, always_write=True) + self.summary_manager.flush() + self._maybe_save_checkpoints(current_step, force_trigger=True) + if evaluate: + self._maybe_evaluate(current_step, force_trigger=True) + + def evaluate(self, continuous=False, timeout_fn=None): + """Runs the evaluation. + + Args: + continuous: If `True`, will continously monitor the checkpoint directory + to evaluate on the latest checkpoint. If `False`, will do the evaluation + once. + timeout_fn: Optional callable to call after a timeout. If the function + returns True, then it means that no new checkpoints will be generated + and the iterator will exit. + + Raises: + ValueError: If no checkpoint found in `self.checkpoint_manager.directory`. + """ + if self.eval_fn is None: + raise ValueError("`self.eval_fn` should not be None to call " + "`evaluate()` method.") + + if not continuous and timeout_fn is not None: + raise ValueError("`timeout_fn` can be only passed when `continuous` is " + "True") + + if continuous: + for checkpoint_path in tf.train.checkpoints_iterator( + self.checkpoint_manager.directory, timeout_fn=timeout_fn): + self._restore_model(checkpoint_path) + self._evaluate_once(self.global_step.numpy()) + return + + latest_checkpoint = self.checkpoint_manager.latest_checkpoint + if not latest_checkpoint: + raise ValueError("no checkpoint found in dir %s" % + self.checkpoint_manager.directory) + self._restore_model() + self._evaluate_once(self.global_step.numpy()) + + +class _StepTimer(object): + """Utility class for measuring steps/second.""" + + def __init__(self, step): + self.step = step + self.start() + + def start(self): + self.last_iteration = self.step.numpy() + self.last_time = time.time() + + def steps_per_second(self, restart=True): + value = ((self.step.numpy() - self.last_iteration) / + (time.time() - self.last_time)) + if restart: + self.start() + return value diff --git a/models/official/staging/training/controller_test.py b/models/official/staging/training/controller_test.py new file mode 100644 index 0000000000000000000000000000000000000000..eeaa191c04d40fcc108ed7b00dec86d30d5a2a0b --- /dev/null +++ b/models/official/staging/training/controller_test.py @@ -0,0 +1,308 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for official.staging.training.controller.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from official.staging.training import controller +from official.staging.training import standard_runnable + + +def all_strategy_combinations(): + """Gets combinations of distribution strategies.""" + return combinations.combine( + strategy=[ + strategy_combinations.one_device_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + strategy_combinations.mirrored_strategy_with_gpu_and_cpu, + ], + mode="eager", + ) + + +def create_model(): + x = tf.keras.layers.Input(shape=(3,), name="input") + y = tf.keras.layers.Dense(4, name="dense")(x) + model = tf.keras.Model(x, y) + return model + + +def summaries_with_matching_keyword(keyword, summary_dir): + """Yields summary protos matching given keyword from event file.""" + event_paths = tf.io.gfile.glob(os.path.join(summary_dir, "events*")) + for event in tf.compat.v1.train.summary_iterator(event_paths[-1]): + if event.summary is not None: + for value in event.summary.value: + if keyword in value.tag: + tf.compat.v1.logging.error(event) + yield event.summary + + +def check_eventfile_for_keyword(keyword, summary_dir): + """Checks event files for the keyword.""" + return any(summaries_with_matching_keyword(keyword, summary_dir)) + + +def dataset_fn(ctx): + del ctx + inputs = np.zeros((10, 3), dtype=np.float32) + targets = np.zeros((10, 4), dtype=np.float32) + dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) + dataset = dataset.repeat(100) + dataset = dataset.batch(10, drop_remainder=True) + return dataset + + +class TestRunnable(standard_runnable.StandardTrainable, + standard_runnable.StandardEvaluable): + """Implements the training and evaluation APIs for the test model.""" + + def __init__(self): + standard_runnable.StandardTrainable.__init__(self) + standard_runnable.StandardEvaluable.__init__(self) + self.strategy = tf.distribute.get_strategy() + self.model = create_model() + self.optimizer = tf.keras.optimizers.RMSprop() + self.global_step = self.optimizer.iterations + self.train_loss = tf.keras.metrics.Mean("train_loss", dtype=tf.float32) + self.eval_loss = tf.keras.metrics.Mean("eval_loss", dtype=tf.float32) + + def build_train_dataset(self): + return self.strategy.experimental_distribute_datasets_from_function( + dataset_fn) + + def train_step(self, iterator): + + def _replicated_step(inputs): + """Replicated training step.""" + inputs, targets = inputs + with tf.GradientTape() as tape: + outputs = self.model(inputs) + loss = tf.math.reduce_sum(outputs - targets) + grads = tape.gradient(loss, self.model.variables) + self.optimizer.apply_gradients(zip(grads, self.model.variables)) + self.train_loss.update_state(loss) + + self.strategy.run(_replicated_step, args=(next(iterator),)) + + def train_loop_end(self): + return { + "loss": self.train_loss.result(), + } + + def build_eval_dataset(self): + return self.strategy.experimental_distribute_datasets_from_function( + dataset_fn) + + def eval_begin(self): + self.eval_loss.reset_states() + + def eval_step(self, iterator): + + def _replicated_step(inputs): + """Replicated evaluation step.""" + inputs, targets = inputs + outputs = self.model(inputs) + loss = tf.math.reduce_sum(outputs - targets) + self.eval_loss.update_state(loss) + + self.strategy.run(_replicated_step, args=(next(iterator),)) + + def eval_end(self): + return { + "eval_loss": self.eval_loss.result(), + } + + +class ControllerTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super(ControllerTest, self).setUp() + self.model_dir = self.get_temp_dir() + + def test_no_checkpoint(self): + test_runnable = TestRunnable() + # No checkpoint manager and no strategy. + test_controller = controller.Controller( + train_fn=test_runnable.train, + eval_fn=test_runnable.evaluate, + global_step=test_runnable.global_step, + train_steps=10, + steps_per_loop=2, + summary_dir=os.path.join(self.model_dir, "summaries/train"), + summary_interval=2, + eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"), + eval_steps=2, + eval_interval=5) + test_controller.train(evaluate=True) + self.assertEqual(test_runnable.global_step.numpy(), 10) + # Loss and accuracy values should be written into summaries. + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train"))) + self.assertTrue( + check_eventfile_for_keyword( + "loss", os.path.join(self.model_dir, "summaries/train"))) + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval"))) + self.assertTrue( + check_eventfile_for_keyword( + "eval_loss", os.path.join(self.model_dir, "summaries/eval"))) + # No checkpoint, so global step starts from 0. + test_runnable.global_step.assign(0) + test_controller.train(evaluate=True) + self.assertEqual(test_runnable.global_step.numpy(), 10) + + def test_no_checkpoint_and_summaries(self): + test_runnable = TestRunnable() + # No checkpoint + summary directories. + test_controller = controller.Controller( + train_fn=test_runnable.train, + eval_fn=test_runnable.evaluate, + global_step=test_runnable.global_step, + train_steps=10, + steps_per_loop=2, + eval_steps=2, + eval_interval=5) + test_controller.train(evaluate=True) + self.assertEqual(test_runnable.global_step.numpy(), 10) + + @combinations.generate(all_strategy_combinations()) + def test_train_and_evaluate(self, strategy): + with strategy.scope(): + test_runnable = TestRunnable() + + checkpoint = tf.train.Checkpoint( + model=test_runnable.model, optimizer=test_runnable.optimizer) + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + self.model_dir, + max_to_keep=None, + step_counter=test_runnable.global_step, + checkpoint_interval=10) + test_controller = controller.Controller( + strategy=strategy, + train_fn=test_runnable.train, + eval_fn=test_runnable.evaluate, + global_step=test_runnable.global_step, + train_steps=10, + steps_per_loop=2, + summary_dir=os.path.join(self.model_dir, "summaries/train"), + summary_interval=2, + checkpoint_manager=checkpoint_manager, + eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"), + eval_steps=2, + eval_interval=5) + test_controller.train(evaluate=True) + + # Checkpoints are saved. + self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*"))) + + # Loss and accuracy values should be written into summaries. + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train"))) + self.assertTrue( + check_eventfile_for_keyword( + "loss", os.path.join(self.model_dir, "summaries/train"))) + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval"))) + self.assertTrue( + check_eventfile_for_keyword( + "eval_loss", os.path.join(self.model_dir, "summaries/eval"))) + + @combinations.generate(all_strategy_combinations()) + def test_train_only(self, strategy): + with strategy.scope(): + test_runnable = TestRunnable() + + checkpoint = tf.train.Checkpoint( + model=test_runnable.model, optimizer=test_runnable.optimizer) + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + self.model_dir, + max_to_keep=None, + step_counter=test_runnable.global_step, + checkpoint_interval=10) + test_controller = controller.Controller( + strategy=strategy, + train_fn=test_runnable.train, + global_step=test_runnable.global_step, + train_steps=10, + steps_per_loop=2, + summary_dir=os.path.join(self.model_dir, "summaries/train"), + summary_interval=2, + checkpoint_manager=checkpoint_manager, + eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"), + ) + test_controller.train(evaluate=False) + + # Checkpoints are saved. + self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*"))) + + # Only train summaries are written. + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train"))) + self.assertTrue( + check_eventfile_for_keyword( + "loss", os.path.join(self.model_dir, "summaries/train"))) + self.assertFalse( + tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/eval"))) + + @combinations.generate(all_strategy_combinations()) + def test_evaluate_only(self, strategy): + with strategy.scope(): + test_runnable = TestRunnable() + + checkpoint = tf.train.Checkpoint(model=test_runnable.model) + checkpoint.save(os.path.join(self.model_dir, "ckpt")) + + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + self.model_dir, + max_to_keep=None, + step_counter=test_runnable.global_step) + test_controller = controller.Controller( + strategy=strategy, + eval_fn=test_runnable.evaluate, + global_step=test_runnable.global_step, + checkpoint_manager=checkpoint_manager, + summary_dir=os.path.join(self.model_dir, "summaries/train"), + eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"), + eval_steps=2, + eval_interval=5) + test_controller.evaluate() + + # Only eval summaries are written + self.assertFalse( + tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/train"))) + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval"))) + self.assertTrue( + check_eventfile_for_keyword( + "eval_loss", os.path.join(self.model_dir, "summaries/eval"))) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/staging/training/grad_utils.py b/models/official/staging/training/grad_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..efda2e7616e5ca841dae0877f951982371a44bba --- /dev/null +++ b/models/official/staging/training/grad_utils.py @@ -0,0 +1,143 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Some gradient util functions to help users writing custom training loop.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging + +import tensorflow.compat.v2 as tf + + +def _filter_grads(grads_and_vars): + """Filter out iterable with grad equal to None.""" + grads_and_vars = tuple(grads_and_vars) + if not grads_and_vars: + return grads_and_vars + filtered = [] + vars_with_empty_grads = [] + for grad, var in grads_and_vars: + if grad is None: + vars_with_empty_grads.append(var) + else: + filtered.append((grad, var)) + filtered = tuple(filtered) + if not filtered: + raise ValueError("No gradients provided for any variable: %s." % + ([v.name for _, v in grads_and_vars],)) + if vars_with_empty_grads: + logging.warning( + ("Gradients do not exist for variables %s when minimizing the loss."), + ([v.name for v in vars_with_empty_grads])) + return filtered + + +def _filter_and_allreduce_gradients(grads_and_vars, + allreduce_precision="float32"): + """Filter None grads and then allreduce gradients in specified precision. + + This utils function is used when users intent to explicitly allreduce + gradients and customize gradients operations before and after allreduce. + The allreduced gradients are then passed to optimizer.apply_gradients( + experimental_aggregate_gradients=False). + + Arguments: + grads_and_vars: gradients and variables pairs. + allreduce_precision: Whether to allreduce gradients in float32 or float16. + + Returns: + pairs of allreduced non-None gradients and variables. + """ + filtered_grads_and_vars = _filter_grads(grads_and_vars) + (grads, variables) = zip(*filtered_grads_and_vars) + if allreduce_precision == "float16": + grads = [tf.cast(grad, "float16") for grad in grads] + allreduced_grads = tf.distribute.get_replica_context().all_reduce( + tf.distribute.ReduceOp.SUM, grads) + if allreduce_precision == "float16": + allreduced_grads = [tf.cast(grad, "float32") for grad in allreduced_grads] + return allreduced_grads, variables + + +def _run_callbacks(callbacks, grads_and_vars): + for callback in callbacks: + grads_and_vars = callback(grads_and_vars) + return grads_and_vars + + +def minimize_using_explicit_allreduce(tape, + optimizer, + loss, + trainable_variables, + pre_allreduce_callbacks=None, + post_allreduce_callbacks=None): + """Minimizes loss for one step by updating `trainable_variables`. + + Minimizes loss for one step by updating `trainable_variables`. + This explicitly performs gradient allreduce, instead of relying on implicit + allreduce in optimizer.apply_gradients(). If training using FP16 mixed + precision, explicit allreduce will aggregate gradients in FP16 format. + For TPU and GPU training using FP32, explicit allreduce will aggregate + gradients in FP32 format. + + Arguments: + tape: An instance of `tf.GradientTape`. + optimizer: An instance of `tf.keras.optimizers.Optimizer`. + loss: the loss tensor. + trainable_variables: A list of model Variables. + pre_allreduce_callbacks: A list of callback functions that takes gradients + and model variables pairs as input, manipulate them, and returns a new + gradients and model variables pairs. The callback functions will be + invoked in the list order and before gradients are allreduced. + With mixed precision training, the pre_allreduce_allbacks will be + applied on scaled_gradients. Default is no callbacks. + post_allreduce_callbacks: A list of callback functions that takes + gradients and model variables pairs as input, manipulate them, and + returns a new gradients and model variables paris. The callback + functions will be invoked in the list order and right before gradients + are applied to variables for updates. Default is no callbacks. + """ + if isinstance(optimizer, + tf.keras.mixed_precision.experimental.LossScaleOptimizer): + # FP16 GPU code path + with tape: + scaled_loss = optimizer.get_scaled_loss(loss) + scaled_grads = tape.gradient(scaled_loss, trainable_variables) + grads_and_vars = zip(scaled_grads, trainable_variables) + if pre_allreduce_callbacks: + grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars) + (allreduced_scaled_grads, + filtered_training_vars) = _filter_and_allreduce_gradients( + grads_and_vars, allreduce_precision="float16") + allreduced_unscaled_grads = optimizer.get_unscaled_gradients( + allreduced_scaled_grads) + grads_and_vars = zip(allreduced_unscaled_grads, filtered_training_vars) + else: + # TPU or FP32 GPU code path + grads = tape.gradient(loss, trainable_variables) + grads_and_vars = zip(grads, trainable_variables) + if pre_allreduce_callbacks: + grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars) + (allreduced_grads, + filtered_training_vars) = _filter_and_allreduce_gradients( + grads_and_vars, allreduce_precision="float32") + grads_and_vars = zip(allreduced_grads, filtered_training_vars) + if post_allreduce_callbacks: + grads_and_vars = _run_callbacks(post_allreduce_callbacks, grads_and_vars) + optimizer.apply_gradients( + grads_and_vars, experimental_aggregate_gradients=False) diff --git a/models/official/staging/training/runnable.py b/models/official/staging/training/runnable.py new file mode 100644 index 0000000000000000000000000000000000000000..1af6eca06a337506a68d6329e0da16c9ca095e0a --- /dev/null +++ b/models/official/staging/training/runnable.py @@ -0,0 +1,79 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An abstraction that users can easily handle their custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v2 as tf +from typing import Dict, Optional, Text + + +@six.add_metaclass(abc.ABCMeta) +class AbstractTrainable(tf.Module): + """An abstract class defining the APIs required for training.""" + + @abc.abstractmethod + def train(self, + num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """Implements model training with multiple steps. + + In training, it is common to break the total training steps into several + training loops, so users can do checkpointing, write summaries and run some + python callbacks. This is necessary for getting good performance in TPU + training, as the overhead for launching a multi worker tf.function may be + large in Eager mode. It is usually encouraged to create a host training loop + (e.g. using a `tf.range` wrapping `strategy.run` inside a + `tf.function`) in the TPU case. For the cases that don't require host + training loop to acheive peak performance, users can just implement a simple + python loop to drive each step. + + Args: + num_steps: A guideline for how many training steps to run. Note that it is + up to the model what constitutes a "step" (this may involve more than + one update to model parameters, e.g. if training a GAN). + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class AbstractEvaluable(tf.Module): + """An abstract class defining the APIs required for evaluation.""" + + @abc.abstractmethod + def evaluate( + self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """Implements model evaluation. + + Args: + num_steps: A guideline for how many evaluation steps to run. Note that it + is up to the model what constitutes a "step". Generally, it may be + desirable to support both a limited number of eval steps and iterating + over a full dataset (however many steps are required) when `num_steps` + is `None`. + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass diff --git a/models/official/staging/training/standard_runnable.py b/models/official/staging/training/standard_runnable.py new file mode 100644 index 0000000000000000000000000000000000000000..20dd66f28e44f7b799dff4af826dcb22bb13595a --- /dev/null +++ b/models/official/staging/training/standard_runnable.py @@ -0,0 +1,181 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An abstraction that users can easily handle their custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v2 as tf +from typing import Dict, Optional, Text + +from official.staging.training import runnable +from official.staging.training import utils + + +@six.add_metaclass(abc.ABCMeta) +class StandardTrainable(runnable.AbstractTrainable): + """Implements the standard functionality of AbstractTrainable APIs.""" + + def __init__(self, use_tf_while_loop=True, use_tf_function=True): + if use_tf_while_loop and not use_tf_function: + raise ValueError("`use_tf_while_loop=True` and `use_tf_function=False` " + "is not supported") + self.use_tf_while_loop = use_tf_while_loop + self.use_tf_function = use_tf_function + self.train_dataset = None + self.train_iter = None + self.train_loop_fn = None + + @abc.abstractmethod + def build_train_dataset(self): + """Builds the training datasets. + + Returns: + A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset. + """ + pass + + def train(self, + num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """See base class.""" + if self.train_dataset is None: + # Build train input dataset + self.train_dataset = self.build_train_dataset() + self.train_iter = tf.nest.map_structure(iter, self.train_dataset) + + if self.train_loop_fn is None: + train_fn = self.train_step + if self.use_tf_while_loop: + self.train_loop_fn = utils.create_tf_while_loop_fn(train_fn) + else: + if self.use_tf_function: + train_fn = tf.function(train_fn) + self.train_loop_fn = utils.create_loop_fn(train_fn) + + self.train_loop_begin() + self.train_loop_fn(self.train_iter, num_steps) + return self.train_loop_end() + + def train_loop_begin(self): + """Called once at the beginning of the training loop. + + This is a good place to reset metrics that accumulate values over multiple + steps of training. + """ + pass + + @abc.abstractmethod + def train_step(self, iterator): + """Implements one step of training. + + What a "step" consists of is up to the implementer. If using distribution + strategies, the call to this method should take place in the "cross-replica + context" for generality, to allow e.g. multiple iterator dequeues and calls + to `strategy.run`. + + Args: + iterator: A tf.nest-compatible structure of tf.data Iterator or + DistributedIterator. + """ + pass + + def train_loop_end(self) -> Optional[Dict[Text, tf.Tensor]]: + """Called at the end of the training loop. + + This is a good place to get metric results. The value returned from this + function will be returned as-is from the train() method. + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class StandardEvaluable(runnable.AbstractEvaluable): + """Implements the standard functionality of AbstractEvaluable APIs.""" + + def __init__(self, use_tf_function=True): + self.eval_use_tf_function = use_tf_function + self.eval_dataset = None + self.eval_loop_fn = None + + @abc.abstractmethod + def build_eval_dataset(self): + """Builds the evaluation datasets. + + Returns: + A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset. + """ + pass + + def evaluate( + self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """See base class.""" + if self.eval_dataset is None: + # Build train input dataset + self.eval_dataset = self.build_eval_dataset() + + if self.eval_loop_fn is None: + eval_fn = self.eval_step + if self.eval_use_tf_function: + eval_fn = tf.function(eval_fn) + self.eval_loop_fn = utils.create_loop_fn(eval_fn) + + eval_iter = tf.nest.map_structure(iter, self.eval_dataset) + + self.eval_begin() + self.eval_loop_fn(eval_iter, num_steps) + return self.eval_end() + + def eval_begin(self): + """Called once at the beginning of the evaluation. + + This is a good place to reset metrics that accumulate values over the entire + evaluation. + """ + pass + + @abc.abstractmethod + def eval_step(self, iterator): + """Implements one step of evaluation. + + What a "step" consists of is up to the implementer. If using distribution + strategies, the call to this method should take place in the "cross-replica + context" for generality, to allow e.g. multiple iterator dequeues and calls + to `strategy.run`. + + Args: + iterator: A tf.nest-compatible structure of tf.data Iterator or + DistributedIterator. + """ + pass + + def eval_end(self) -> Optional[Dict[Text, tf.Tensor]]: + """Called at the end of the evaluation. + + This is a good place to get metric results. The value returned from this + function will be returned as-is from the evaluate() method. + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass diff --git a/models/official/staging/training/utils.py b/models/official/staging/training/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..33fa368b7b966e449c8309e523cd31db73efb978 --- /dev/null +++ b/models/official/staging/training/utils.py @@ -0,0 +1,342 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Some layered modules/functions to help users writing custom training loop.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import abc +import inspect +import six + +import tensorflow.compat.v2 as tf + + +def create_loop_fn(step_fn): + """Creates a multiple steps function driven by the python while loop. + + Args: + step_fn: A function which takes `iterator` as input. + + Returns: + A callable defined as the `loop_fn` defination below. + """ + + def loop_fn(iterator, num_steps, state=None, reduce_fn=None): + """A loop function with multiple steps. + + Args: + iterator: A nested structure of tf.data `Iterator` or + `DistributedIterator`. + num_steps: The number of steps in the loop. If `num_steps==-1`, will + iterate until exausting the iterator. + state: An optional initial state before running the loop. + reduce_fn: a callable defined as `def reduce_fn(state, value)`, where + `value` is the outputs from `step_fn`. + + Returns: + The updated state. + """ + try: + step = 0 + # To make sure the OutOfRangeError exception can be handled well with + # async remote eager, we need to wrap the loop body in a `async_scope`. + with tf.experimental.async_scope(): + while (num_steps == -1 or step < num_steps): + outputs = step_fn(iterator) + if reduce_fn is not None: + state = reduce_fn(state, outputs) + step += 1 + return state + except (StopIteration, tf.errors.OutOfRangeError): + tf.experimental.async_clear_error() + return state + + return loop_fn + + +def create_tf_while_loop_fn(step_fn): + """Create a multiple steps function driven by tf.while_loop on the host. + + Args: + step_fn: A function which takes `iterator` as input. + + Returns: + A callable defined as the `loop_fn` defination below. + """ + + @tf.function + def loop_fn(iterator, num_steps): + """A loop function with multiple steps. + + Args: + iterator: A nested structure of tf.data `Iterator` or + `DistributedIterator`. + num_steps: The number of steps in the loop. Must be a tf.Tensor. + """ + if not isinstance(num_steps, tf.Tensor): + raise ValueError("`num_steps` should be an `tf.Tensor`. Python object " + "may cause retracing.") + + for _ in tf.range(num_steps): + step_fn(iterator) + + return loop_fn + + +def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs): + """A helper function to create distributed dataset. + + Args: + strategy: An instance of `tf.distribute.Strategy`. + dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an + `tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If + it is a function, it could optionally have an argument named + `input_context` which is `tf.distribute.InputContext` argument type. + *args: The list of arguments to be passed to dataset_or_fn. + **kwargs: Any keyword arguments to be passed. + + Returns: + A distributed Dataset. + """ + if strategy is None: + strategy = tf.distribute.get_strategy() + + if isinstance(dataset_or_fn, tf.data.Dataset): + return strategy.experimental_distribute_dataset(dataset_or_fn) + + if not callable(dataset_or_fn): + raise ValueError("`dataset_or_fn` should be either callable or an instance " + "of `tf.data.Dataset`") + + def dataset_fn(ctx): + """Wrapped dataset function for creating distributed dataset..""" + + # If `dataset_or_fn` is a function and has `input_context` as argument + # names, pass `ctx` as the value of `input_context` when calling + # `dataset_or_fn`. Otherwise `ctx` will not be used when calling + # `dataset_or_fn`. + if six.PY3: + argspec = inspect.getfullargspec(dataset_or_fn) + else: + argspec = inspect.getargspec(dataset_or_fn) + args_names = argspec.args + + if "input_context" in args_names: + kwargs["input_context"] = ctx + ds = dataset_or_fn(*args, **kwargs) + return ds + + return strategy.experimental_distribute_datasets_from_function(dataset_fn) + + +class SummaryManager(object): + """A class manages writing summaries.""" + + def __init__(self, + summary_writer, + summary_fn, + global_step=None, + summary_interval=None): + """Construct a summary manager object. + + Args: + summary_writer: A `tf.summary.SummaryWriter` instance for writing + summaries. + summary_fn: A callable defined as `def summary_fn(name, tensor, + step=None)`, which describes the summary operation. + global_step: A `tf.Variable` instance for checking the current global step + value, in case users want to save summaries every N steps. + summary_interval: An integer, indicates the minimum step interval between + two summaries. + """ + if summary_writer is not None: + self._summary_writer = summary_writer + self._enabled = True + else: + self._summary_writer = tf.summary.create_noop_writer() + self._enabled = False + self._summary_fn = summary_fn + + if global_step is None: + self._global_step = tf.summary.experimental.get_step() + else: + self._global_step = global_step + + if summary_interval is not None: + if self._global_step is None: + raise ValueError("`summary_interval` is not None, but no `global_step` " + "can be obtained ") + self._last_summary_step = self._global_step.numpy() + self._summary_interval = summary_interval + + @property + def summary_interval(self): + return self._summary_interval + + @property + def summary_writer(self): + """Returns the underlying summary writer.""" + return self._summary_writer + + def flush(self): + """Flush the underlying summary writer.""" + if self._enabled: + tf.summary.flush(self._summary_writer) + + def write_summaries(self, items, always_write=True): + """Write a bulk of summaries. + + Args: + items: a dictionary of `Tensors` for writing summaries. + always_write: An optional boolean. If `True`, the manager will always + write summaries unless the summaries have been written for the same + step. Otherwise the manager will only write the summaries if the + interval between summaries are larger than `summary_interval`. + + Returns: + A boolean indicates whether the summaries are written or not. + """ + # TODO(rxsang): Support writing summaries with nested structure, so users + # can split the summaries into different directories for nicer visualization + # in Tensorboard, like train and eval metrics. + if not self._enabled: + return False + + if self._summary_interval is not None: + current_step = self._global_step.numpy() + if current_step == self._last_summary_step: + return False + if not always_write and current_step < (self._last_summary_step + + self._summary_interval): + return False + self._last_summary_step = current_step + + with self._summary_writer.as_default(): + for name, tensor in items.items(): + self._summary_fn(name, tensor, step=self._global_step) + return True + + +@six.add_metaclass(abc.ABCMeta) +class Trigger(object): + """An abstract class representing a "trigger" for some event.""" + + @abc.abstractmethod + def __call__(self, value: float, force_trigger=False): + """Maybe trigger the event based on the given value. + + Args: + value: the value for triggering. + force_trigger: Whether the trigger is forced triggered. + + Returns: + `True` if the trigger is triggered on the given `value`, and + `False` otherwise. + """ + + @abc.abstractmethod + def reset(self): + """Reset states in the trigger.""" + + +class IntervalTrigger(Trigger): + """Triggers on every fixed interval.""" + + def __init__(self, interval, start=0): + """Constructs the IntervalTrigger. + + Args: + interval: The triggering interval. + start: An initial value for the trigger. + """ + self._interval = interval + self._last_trigger_value = start + + def __call__(self, value, force_trigger=False): + """Maybe trigger the event based on the given value. + + Args: + value: the value for triggering. + force_trigger: If True, the trigger will be forced triggered unless the + last trigger value is equal to `value`. + + Returns: + `True` if the trigger is triggered on the given `value`, and + `False` otherwise. + """ + if force_trigger and value != self._last_trigger_value: + self._last_trigger_value = value + return True + + if self._interval and self._interval > 0: + if value >= self._last_trigger_value + self._interval: + self._last_trigger_value = value + return True + return False + + def reset(self): + """See base class.""" + self._last_trigger_value = 0 + + +class EpochHelper(object): + """A Helper class to handle epochs in Customized Training Loop.""" + + def __init__(self, epoch_steps, global_step): + """Constructs the EpochHelper. + + Args: + epoch_steps: An integer indicates how many steps in an epoch. + global_step: A `tf.Variable` instance indicates the current global step. + """ + self._epoch_steps = epoch_steps + self._global_step = global_step + self._current_epoch = None + self._epoch_start_step = None + self._in_epoch = False + + def epoch_begin(self): + """Returns whether a new epoch should begin.""" + if self._in_epoch: + return False + current_step = self._global_step.numpy() + self._epoch_start_step = current_step + self._current_epoch = current_step // self._epoch_steps + self._in_epoch = True + return True + + def epoch_end(self): + """Returns whether the current epoch should end.""" + if not self._in_epoch: + raise ValueError("`epoch_end` can only be called inside an epoch") + current_step = self._global_step.numpy() + epoch = current_step // self._epoch_steps + + if epoch > self._current_epoch: + self._in_epoch = False + return True + return False + + @property + def batch_index(self): + """Index of the next batch within the current epoch.""" + return self._global_step.numpy() - self._epoch_start_step + + @property + def current_epoch(self): + return self._current_epoch diff --git a/models/official/utils/__init__.py b/models/official/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/utils/flags/README.md b/models/official/utils/flags/README.md new file mode 100644 index 0000000000000000000000000000000000000000..18160f780a0928a2f28ab9a8e66433938179d581 --- /dev/null +++ b/models/official/utils/flags/README.md @@ -0,0 +1,97 @@ +# Adding Abseil (absl) flags quickstart +## Defining a flag +absl flag definitions are similar to argparse, although they are defined on a global namespace. + +For instance defining a string flag looks like: +```$xslt +from absl import flags +flags.DEFINE_string( + name="my_flag", + default="a_sensible_default", + help="Here is what this flag does." +) +``` + +All three arguments are required, but default may be `None`. A common optional argument is +short_name for defining abreviations. Certain `DEFINE_*` methods will have other required arguments. +For instance `DEFINE_enum` requires the `enum_values` argument to be specified. + +## Key Flags +absl has the concept of a key flag. Any flag defined in `__main__` is considered a key flag by +default. Key flags are displayed in `--help`, others only appear in `--helpfull`. In order to +handle key flags that are defined outside the module in question, absl provides the +`flags.adopt_module_key_flags()` method. This adds the key flags of a different module to one's own +key flags. For example: +```$xslt +File: flag_source.py +--------------------------------------- + +from absl import flags +flags.DEFINE_string(name="my_flag", default="abc", help="a flag.") +``` + +```$xslt +File: my_module.py +--------------------------------------- + +from absl import app as absl_app +from absl import flags + +import flag_source + +flags.adopt_module_key_flags(flag_source) + +def main(_): + pass + +absl_app.run(main, [__file__, "-h"] +``` + +when `my_module.py` is run it will show the help text for `my_flag`. Because not all flags defined +in a file are equally important, `official/utils/flags/core.py` (generally imported as flags_core) +provides an abstraction for handling key flag declaration in an easy way through the +`register_key_flags_in_core()` function, which allows a module to make a single +`adopt_key_flags(flags_core)` call when using the util flag declaration functions. + +## Validators +Often the constraints on a flag are complicated. absl provides the validator decorator to allow +one to mark a function as a flag validation function. Suppose we want users to provide a flag +which is a palindrome. + +```$xslt +from absl import flags + +flags.DEFINE_string(name="pal_flag", short_name="pf", default="", help="Give me a palindrome") + +@flags.validator("pal_flag") +def _check_pal(provided_pal_flag): + return provided_pal_flag == provided_pal_flag[::-1] + +``` + +Validators take the form that returning True (truthy) passes, and all others +(False, None, exception) fail. + +## Testing +To test using absl, simply declare flags in the setupClass method of TensorFlow's TestCase. + +```$xslt +from absl import flags +import tensorflow as tf + +def define_flags(): + flags.DEFINE_string(name="test_flag", default="abc", help="an example flag") + + +class BaseTester(unittest.TestCase): + + @classmethod + def setUpClass(cls): + super(BaseTester, cls).setUpClass() + define_flags() + + def test_trivial(self): + flags_core.parse_flags([__file__, "test_flag", "def"]) + self.AssertEqual(flags.FLAGS.test_flag, "def") + +``` diff --git a/models/official/utils/flags/__init__.py b/models/official/utils/flags/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/utils/flags/_base.py b/models/official/utils/flags/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..a143e078200eac12b0836eb32fa7c7d0416a8e66 --- /dev/null +++ b/models/official/utils/flags/_base.py @@ -0,0 +1,157 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flags which will be nearly universal across models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +import tensorflow as tf +from official.utils.flags._conventions import help_wrap + + +def define_base(data_dir=True, model_dir=True, clean=False, train_epochs=False, + epochs_between_evals=False, stop_threshold=False, + batch_size=True, num_gpu=False, hooks=False, export_dir=False, + distribution_strategy=False, run_eagerly=False): + """Register base flags. + + Args: + data_dir: Create a flag for specifying the input data directory. + model_dir: Create a flag for specifying the model file directory. + clean: Create a flag for removing the model_dir. + train_epochs: Create a flag to specify the number of training epochs. + epochs_between_evals: Create a flag to specify the frequency of testing. + stop_threshold: Create a flag to specify a threshold accuracy or other + eval metric which should trigger the end of training. + batch_size: Create a flag to specify the batch size. + num_gpu: Create a flag to specify the number of GPUs used. + hooks: Create a flag to specify hooks for logging. + export_dir: Create a flag to specify where a SavedModel should be exported. + distribution_strategy: Create a flag to specify which Distribution Strategy + to use. + run_eagerly: Create a flag to specify to run eagerly op by op. + Returns: + A list of flags for core.py to marks as key flags. + """ + key_flags = [] + + if data_dir: + flags.DEFINE_string( + name="data_dir", short_name="dd", default="/tmp", + help=help_wrap("The location of the input data.")) + key_flags.append("data_dir") + + if model_dir: + flags.DEFINE_string( + name="model_dir", short_name="md", default="/tmp", + help=help_wrap("The location of the model checkpoint files.")) + key_flags.append("model_dir") + + if clean: + flags.DEFINE_boolean( + name="clean", default=False, + help=help_wrap("If set, model_dir will be removed if it exists.")) + key_flags.append("clean") + + if train_epochs: + flags.DEFINE_integer( + name="train_epochs", short_name="te", default=1, + help=help_wrap("The number of epochs used to train.")) + key_flags.append("train_epochs") + + if epochs_between_evals: + flags.DEFINE_integer( + name="epochs_between_evals", short_name="ebe", default=1, + help=help_wrap("The number of training epochs to run between " + "evaluations.")) + key_flags.append("epochs_between_evals") + + if stop_threshold: + flags.DEFINE_float( + name="stop_threshold", short_name="st", + default=None, + help=help_wrap("If passed, training will stop at the earlier of " + "train_epochs and when the evaluation metric is " + "greater than or equal to stop_threshold.")) + + if batch_size: + flags.DEFINE_integer( + name="batch_size", short_name="bs", default=32, + help=help_wrap("Batch size for training and evaluation. When using " + "multiple gpus, this is the global batch size for " + "all devices. For example, if the batch size is 32 " + "and there are 4 GPUs, each GPU will get 8 examples on " + "each step.")) + key_flags.append("batch_size") + + if num_gpu: + flags.DEFINE_integer( + name="num_gpus", short_name="ng", + default=1, + help=help_wrap( + "How many GPUs to use at each worker with the " + "DistributionStrategies API. The default is 1.")) + + if run_eagerly: + flags.DEFINE_boolean( + name="run_eagerly", default=False, + help="Run the model op by op without building a model function.") + + if hooks: + flags.DEFINE_list( + name="hooks", short_name="hk", default="LoggingTensorHook", + help=help_wrap( + u"A list of (case insensitive) strings to specify the names of " + u"training hooks. Example: `--hooks ProfilerHook," + u"ExamplesPerSecondHook`\n See hooks_helper " + u"for details.") + ) + key_flags.append("hooks") + + if export_dir: + flags.DEFINE_string( + name="export_dir", short_name="ed", default=None, + help=help_wrap("If set, a SavedModel serialization of the model will " + "be exported to this directory at the end of training. " + "See the README for more details and relevant links.") + ) + key_flags.append("export_dir") + + if distribution_strategy: + flags.DEFINE_string( + name="distribution_strategy", short_name="ds", default="mirrored", + help=help_wrap("The Distribution Strategy to use for training. " + "Accepted values are 'off', 'one_device', " + "'mirrored', 'parameter_server', 'collective', " + "case insensitive. 'off' means not to use " + "Distribution Strategy; 'default' means to choose " + "from `MirroredStrategy` or `OneDeviceStrategy` " + "according to the number of GPUs.") + ) + + + return key_flags + + +def get_num_gpus(flags_obj): + """Treat num_gpus=-1 as 'use all'.""" + if flags_obj.num_gpus != -1: + return flags_obj.num_gpus + + from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top + local_device_protos = device_lib.list_local_devices() + return sum([1 for d in local_device_protos if d.device_type == "GPU"]) diff --git a/models/official/utils/flags/_benchmark.py b/models/official/utils/flags/_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa01421c5f5c7fede94b971d6674267f232b6da --- /dev/null +++ b/models/official/utils/flags/_benchmark.py @@ -0,0 +1,108 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flags for benchmarking models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags + +from official.utils.flags._conventions import help_wrap + + +def define_log_steps(): + flags.DEFINE_integer( + name="log_steps", default=100, + help="Frequency with which to log timing information with TimeHistory.") + + return [] + + +def define_benchmark(benchmark_log_dir=True, bigquery_uploader=True): + """Register benchmarking flags. + + Args: + benchmark_log_dir: Create a flag to specify location for benchmark logging. + bigquery_uploader: Create flags for uploading results to BigQuery. + + Returns: + A list of flags for core.py to marks as key flags. + """ + + key_flags = [] + + flags.DEFINE_enum( + name="benchmark_logger_type", default="BaseBenchmarkLogger", + enum_values=["BaseBenchmarkLogger", "BenchmarkFileLogger"], + help=help_wrap("The type of benchmark logger to use. Defaults to using " + "BaseBenchmarkLogger which logs to STDOUT. Different " + "loggers will require other flags to be able to work.")) + flags.DEFINE_string( + name="benchmark_test_id", short_name="bti", default=None, + help=help_wrap("The unique test ID of the benchmark run. It could be the " + "combination of key parameters. It is hardware " + "independent and could be used compare the performance " + "between different test runs. This flag is designed for " + "human consumption, and does not have any impact within " + "the system.")) + + define_log_steps() + + if benchmark_log_dir: + flags.DEFINE_string( + name="benchmark_log_dir", short_name="bld", default=None, + help=help_wrap("The location of the benchmark logging.") + ) + + if bigquery_uploader: + flags.DEFINE_string( + name="gcp_project", short_name="gp", default=None, + help=help_wrap( + "The GCP project name where the benchmark will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_data_set", short_name="bds", default="test_benchmark", + help=help_wrap( + "The Bigquery dataset name where the benchmark will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_run_table", short_name="brt", default="benchmark_run", + help=help_wrap("The Bigquery table name where the benchmark run " + "information will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_run_status_table", short_name="brst", + default="benchmark_run_status", + help=help_wrap("The Bigquery table name where the benchmark run " + "status information will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_metric_table", short_name="bmt", + default="benchmark_metric", + help=help_wrap("The Bigquery table name where the benchmark metric " + "information will be uploaded.")) + + @flags.multi_flags_validator( + ["benchmark_logger_type", "benchmark_log_dir"], + message="--benchmark_logger_type=BenchmarkFileLogger will require " + "--benchmark_log_dir being set") + def _check_benchmark_log_dir(flags_dict): + benchmark_logger_type = flags_dict["benchmark_logger_type"] + if benchmark_logger_type == "BenchmarkFileLogger": + return flags_dict["benchmark_log_dir"] + return True + + return key_flags diff --git a/models/official/utils/flags/_conventions.py b/models/official/utils/flags/_conventions.py new file mode 100644 index 0000000000000000000000000000000000000000..e04448ab81fc6db7fd8ba1650b427320ff00c05e --- /dev/null +++ b/models/official/utils/flags/_conventions.py @@ -0,0 +1,54 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Central location for shared argparse convention definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import codecs +import functools + +from absl import app as absl_app +from absl import flags + + +# This codifies help string conventions and makes it easy to update them if +# necessary. Currently the only major effect is that help bodies start on the +# line after flags are listed. All flag definitions should wrap the text bodies +# with help wrap when calling DEFINE_*. +_help_wrap = functools.partial(flags.text_wrap, length=80, indent="", + firstline_indent="\n") + + +# Pretty formatting causes issues when utf-8 is not installed on a system. +def _stdout_utf8(): + try: + codecs.lookup("utf-8") + except LookupError: + return False + return getattr(sys.stdout, "encoding", "") == "UTF-8" + + +if _stdout_utf8(): + help_wrap = _help_wrap +else: + def help_wrap(text, *args, **kwargs): + return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"") + + +# Replace None with h to also allow -h +absl_app.HelpshortFlag.SHORT_NAME = "h" diff --git a/models/official/utils/flags/_device.py b/models/official/utils/flags/_device.py new file mode 100644 index 0000000000000000000000000000000000000000..d8974fc48d1fc77d227745191579df16b2e46bcc --- /dev/null +++ b/models/official/utils/flags/_device.py @@ -0,0 +1,85 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flags for managing compute devices. Currently only contains TPU flags.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +from absl import logging + +from official.utils.flags._conventions import help_wrap + + +def require_cloud_storage(flag_names): + """Register a validator to check directory flags. + Args: + flag_names: An iterable of strings containing the names of flags to be + checked. + """ + msg = "TPU requires GCS path for {}".format(", ".join(flag_names)) + @flags.multi_flags_validator(["tpu"] + flag_names, message=msg) + def _path_check(flag_values): # pylint: disable=missing-docstring + if flag_values["tpu"] is None: + return True + + valid_flags = True + for key in flag_names: + if not flag_values[key].startswith("gs://"): + logging.error("%s must be a GCS path.", key) + valid_flags = False + + return valid_flags + + +def define_device(tpu=True): + """Register device specific flags. + Args: + tpu: Create flags to specify TPU operation. + Returns: + A list of flags for core.py to marks as key flags. + """ + + key_flags = [] + + if tpu: + flags.DEFINE_string( + name="tpu", default=None, + help=help_wrap( + "The Cloud TPU to use for training. This should be either the name " + "used when creating the Cloud TPU, or a " + "grpc://ip.address.of.tpu:8470 url. Passing `local` will use the" + "CPU of the local instance instead. (Good for debugging.)")) + key_flags.append("tpu") + + flags.DEFINE_string( + name="tpu_zone", default=None, + help=help_wrap( + "[Optional] GCE zone where the Cloud TPU is located in. If not " + "specified, we will attempt to automatically detect the GCE " + "project from metadata.")) + + flags.DEFINE_string( + name="tpu_gcp_project", default=None, + help=help_wrap( + "[Optional] Project name for the Cloud TPU-enabled project. If not " + "specified, we will attempt to automatically detect the GCE " + "project from metadata.")) + + flags.DEFINE_integer(name="num_tpu_shards", default=8, + help=help_wrap("Number of shards (TPU chips).")) + + return key_flags diff --git a/models/official/utils/flags/_distribution.py b/models/official/utils/flags/_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..ca331bf24affed5185273a19752d28a491ea3711 --- /dev/null +++ b/models/official/utils/flags/_distribution.py @@ -0,0 +1,54 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flags related to distributed execution.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +import tensorflow as tf + +from official.utils.flags._conventions import help_wrap + + +def define_distribution(worker_hosts=True, task_index=True): + """Register distributed execution flags. + + Args: + worker_hosts: Create a flag for specifying comma-separated list of workers. + task_index: Create a flag for specifying index of task. + + Returns: + A list of flags for core.py to marks as key flags. + """ + key_flags = [] + + if worker_hosts: + flags.DEFINE_string( + name='worker_hosts', default=None, + help=help_wrap( + 'Comma-separated list of worker ip:port pairs for running ' + 'multi-worker models with DistributionStrategy. The user would ' + 'start the program on each host with identical value for this ' + 'flag.')) + + if task_index: + flags.DEFINE_integer( + name='task_index', default=-1, + help=help_wrap('If multi-worker training, the task_index of this ' + 'worker.')) + + return key_flags diff --git a/models/official/utils/flags/_misc.py b/models/official/utils/flags/_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..c6fa24b5ae7e29827967c5c6a1b78dc3613d40fe --- /dev/null +++ b/models/official/utils/flags/_misc.py @@ -0,0 +1,50 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Misc flags.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags + +from official.utils.flags._conventions import help_wrap + + +def define_image(data_format=True): + """Register image specific flags. + + Args: + data_format: Create a flag to specify image axis convention. + + Returns: + A list of flags for core.py to marks as key flags. + """ + + key_flags = [] + + if data_format: + flags.DEFINE_enum( + name="data_format", short_name="df", default=None, + enum_values=["channels_first", "channels_last"], + help=help_wrap( + "A flag to override the data format used in the model. " + "channels_first provides a performance boost on GPU but is not " + "always compatible with CPU. If left unspecified, the data format " + "will be chosen automatically based on whether TensorFlow was " + "built for CPU or GPU.")) + key_flags.append("data_format") + + return key_flags diff --git a/models/official/utils/flags/_performance.py b/models/official/utils/flags/_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5840f95e1ea26697951d1b78fe847526d5859b --- /dev/null +++ b/models/official/utils/flags/_performance.py @@ -0,0 +1,289 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Register flags for optimizing performance.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import multiprocessing + +from absl import flags # pylint: disable=g-bad-import-order +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.utils.flags._conventions import help_wrap + + +# Map string to TensorFlow dtype +DTYPE_MAP = { + "fp16": tf.float16, + "bf16": tf.bfloat16, + "fp32": tf.float32, +} + + +def get_tf_dtype(flags_obj): + if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite": + # If the graph_rewrite is used, we build the graph with fp32, and let the + # graph rewrite change ops to fp16. + return tf.float32 + return DTYPE_MAP[flags_obj.dtype] + + +def get_loss_scale(flags_obj, default_for_fp16): + dtype = get_tf_dtype(flags_obj) + if flags_obj.loss_scale == "dynamic": + return flags_obj.loss_scale + elif flags_obj.loss_scale is not None: + return float(flags_obj.loss_scale) + elif dtype == tf.float32 or dtype == tf.bfloat16: + return 1 # No loss scaling is needed for fp32 + else: + assert dtype == tf.float16 + return default_for_fp16 + + +def define_performance(num_parallel_calls=False, inter_op=False, intra_op=False, + synthetic_data=False, max_train_steps=False, dtype=False, + all_reduce_alg=False, num_packs=False, + tf_gpu_thread_mode=False, + datasets_num_private_threads=False, + datasets_num_parallel_batches=False, + dynamic_loss_scale=False, fp16_implementation=False, + loss_scale=False, + tf_data_experimental_slack=False, enable_xla=False, + training_dataset_cache=False): + """Register flags for specifying performance tuning arguments. + + Args: + num_parallel_calls: Create a flag to specify parallelism of data loading. + inter_op: Create a flag to allow specification of inter op threads. + intra_op: Create a flag to allow specification of intra op threads. + synthetic_data: Create a flag to allow the use of synthetic data. + max_train_steps: Create a flags to allow specification of maximum number + of training steps + dtype: Create flags for specifying dtype. + all_reduce_alg: If set forces a specific algorithm for multi-gpu. + num_packs: If set provides number of packs for MirroredStrategy's cross + device ops. + tf_gpu_thread_mode: gpu_private triggers us of private thread pool. + datasets_num_private_threads: Number of private threads for datasets. + datasets_num_parallel_batches: Determines how many batches to process in + parallel when using map and batch from tf.data. + dynamic_loss_scale: Allow the "loss_scale" flag to take on the value + "dynamic". Only valid if `dtype` is True. + fp16_implementation: Create fp16_implementation flag. + loss_scale: Controls the loss scaling, normally for mixed-precision + training. Can only be turned on if dtype is also True. + tf_data_experimental_slack: Determines whether to enable tf.data's + `experimental_slack` option. + enable_xla: Determines if XLA (auto clustering) is turned on. + training_dataset_cache: Whether to cache the training dataset on workers. + Typically used to improve training performance when training data is in + remote storage and can fit into worker memory. + + Returns: + A list of flags for core.py to marks as key flags. + """ + + key_flags = [] + if num_parallel_calls: + flags.DEFINE_integer( + name="num_parallel_calls", short_name="npc", + default=multiprocessing.cpu_count(), + help=help_wrap("The number of records that are processed in parallel " + "during input processing. This can be optimized per " + "data set but for generally homogeneous data sets, " + "should be approximately the number of available CPU " + "cores. (default behavior)")) + + if inter_op: + flags.DEFINE_integer( + name="inter_op_parallelism_threads", short_name="inter", default=0, + help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. " + "See TensorFlow config.proto for details.") + ) + + if intra_op: + flags.DEFINE_integer( + name="intra_op_parallelism_threads", short_name="intra", default=0, + help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. " + "See TensorFlow config.proto for details.")) + + if synthetic_data: + flags.DEFINE_bool( + name="use_synthetic_data", short_name="synth", default=False, + help=help_wrap( + "If set, use fake data (zeroes) instead of a real dataset. " + "This mode is useful for performance debugging, as it removes " + "input processing steps, but will not learn anything.")) + + if max_train_steps: + flags.DEFINE_integer( + name="max_train_steps", short_name="mts", default=None, help=help_wrap( + "The model will stop training if the global_step reaches this " + "value. If not set, training will run until the specified number " + "of epochs have run as usual. It is generally recommended to set " + "--train_epochs=1 when using this flag." + )) + + if dtype: + flags.DEFINE_enum( + name="dtype", short_name="dt", default="fp32", + enum_values=DTYPE_MAP.keys(), + help=help_wrap("The TensorFlow datatype used for calculations. " + "Variables may be cast to a higher precision on a " + "case-by-case basis for numerical stability.")) + + loss_scale_help_text = ( + "The amount to scale the loss by when the model is run. {}. Before " + "gradients are computed, the loss is multiplied by the loss scale, " + "making all gradients loss_scale times larger. To adjust for this, " + "gradients are divided by the loss scale before being applied to " + "variables. This is mathematically equivalent to training without " + "a loss scale, but the loss scale helps avoid some intermediate " + "gradients from underflowing to zero. If not provided the default " + "for fp16 is 128 and 1 for all other dtypes.{}" + ) + if dynamic_loss_scale: + loss_scale_help_text = loss_scale_help_text.format( + "This can be an int/float or the string 'dynamic'", + " The string 'dynamic' can be used to dynamically determine the " + "optimal loss scale during training, but currently this " + "significantly slows down performance") + loss_scale_validation_msg = ("loss_scale should be a positive int/float " + "or the string 'dynamic'.") + else: + loss_scale_help_text = loss_scale_help_text.format( + "This must be an int/float", "") + loss_scale_validation_msg = "loss_scale should be a positive int/float." + if loss_scale: + flags.DEFINE_string( + name="loss_scale", short_name="ls", default=None, + help=help_wrap(loss_scale_help_text)) + + @flags.validator(flag_name="loss_scale", + message=loss_scale_validation_msg) + def _check_loss_scale(loss_scale): # pylint: disable=unused-variable + """Validator to check the loss scale flag is valid.""" + if loss_scale is None: + return True # null case is handled in get_loss_scale() + + if loss_scale == "dynamic" and dynamic_loss_scale: + return True + + try: + loss_scale = float(loss_scale) + except ValueError: + return False + + return loss_scale > 0 + + if fp16_implementation: + flags.DEFINE_enum( + name="fp16_implementation", default="keras", + enum_values=("keras', 'graph_rewrite"), + help=help_wrap( + "When --dtype=fp16, how fp16 should be implemented. This has no " + "impact on correctness. 'keras' uses the " + "tf.keras.mixed_precision API. 'graph_rewrite' uses the " + "tf.train.experimental.enable_mixed_precision_graph_rewrite " + "API.")) + + @flags.multi_flags_validator(["fp16_implementation", "dtype", + "loss_scale"]) + def _check_fp16_implementation(flags_dict): + """Validator to check fp16_implementation flag is valid.""" + if (flags_dict["fp16_implementation"] == "graph_rewrite" and + flags_dict["dtype"] != "fp16"): + raise flags.ValidationError("--fp16_implementation should not be " + "specified unless --dtype=fp16") + return True + + if all_reduce_alg: + flags.DEFINE_string( + name="all_reduce_alg", short_name="ara", default=None, + help=help_wrap("Defines the algorithm to use for performing all-reduce." + "When specified with MirroredStrategy for single " + "worker, this controls " + "tf.contrib.distribute.AllReduceCrossTowerOps. When " + "specified with MultiWorkerMirroredStrategy, this " + "controls " + "tf.distribute.experimental.CollectiveCommunication; " + "valid options are `ring` and `nccl`.")) + + if num_packs: + flags.DEFINE_integer( + name="num_packs", default=1, + help=help_wrap("Sets `num_packs` in the cross device ops used in " + "MirroredStrategy. For details, see " + "tf.distribute.NcclAllReduce.")) + + if tf_gpu_thread_mode: + flags.DEFINE_string( + name="tf_gpu_thread_mode", short_name="gt_mode", default=None, + help=help_wrap( + "Whether and how the GPU device uses its own threadpool.") + ) + + flags.DEFINE_integer( + name="per_gpu_thread_count", short_name="pgtc", default=0, + help=help_wrap( + "The number of threads to use for GPU. Only valid when " + "tf_gpu_thread_mode is not global.") + ) + + if datasets_num_private_threads: + flags.DEFINE_integer( + name="datasets_num_private_threads", + default=None, + help=help_wrap( + "Number of threads for a private threadpool created for all" + "datasets computation..") + ) + + if datasets_num_parallel_batches: + flags.DEFINE_integer( + name="datasets_num_parallel_batches", + default=None, + help=help_wrap( + "Determines how many batches to process in parallel when using " + "map and batch from tf.data.") + ) + + if training_dataset_cache: + flags.DEFINE_boolean( + name="training_dataset_cache", + default=False, + help=help_wrap( + "Determines whether to cache the training dataset on workers. " + "Typically used to improve training performance when training " + "data is in remote storage and can fit into worker memory.") + ) + + if tf_data_experimental_slack: + flags.DEFINE_boolean( + name="tf_data_experimental_slack", + default=False, + help=help_wrap( + "Whether to enable tf.data's `experimental_slack` option.") + ) + + if enable_xla: + flags.DEFINE_boolean( + name="enable_xla", default=False, + help="Whether to enable XLA auto jit compilation") + + return key_flags diff --git a/models/official/utils/flags/core.py b/models/official/utils/flags/core.py new file mode 100644 index 0000000000000000000000000000000000000000..fa36944893a579fe5d4a65af9262651db0abc1ba --- /dev/null +++ b/models/official/utils/flags/core.py @@ -0,0 +1,133 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Public interface for flag definition. + +See _example.py for detailed instructions on defining flags. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +from six.moves import shlex_quote + +from absl import app as absl_app +from absl import flags + +from official.utils.flags import _base +from official.utils.flags import _benchmark +from official.utils.flags import _conventions +from official.utils.flags import _device +from official.utils.flags import _distribution +from official.utils.flags import _misc +from official.utils.flags import _performance + + +def set_defaults(**kwargs): + for key, value in kwargs.items(): + flags.FLAGS.set_default(name=key, value=value) + + +def parse_flags(argv=None): + """Reset flags and reparse. Currently only used in testing.""" + flags.FLAGS.unparse_flags() + absl_app.parse_flags_with_usage(argv or sys.argv) + + +def register_key_flags_in_core(f): + """Defines a function in core.py, and registers its key flags. + + absl uses the location of a flags.declare_key_flag() to determine the context + in which a flag is key. By making all declares in core, this allows model + main functions to call flags.adopt_module_key_flags() on core and correctly + chain key flags. + + Args: + f: The function to be wrapped + + Returns: + The "core-defined" version of the input function. + """ + + def core_fn(*args, **kwargs): + key_flags = f(*args, **kwargs) + [flags.declare_key_flag(fl) for fl in key_flags] # pylint: disable=expression-not-assigned + return core_fn + + +define_base = register_key_flags_in_core(_base.define_base) +# We have define_base_eager for compatibility, since it used to be a separate +# function from define_base. +define_base_eager = define_base +define_log_steps = register_key_flags_in_core(_benchmark.define_log_steps) +define_benchmark = register_key_flags_in_core(_benchmark.define_benchmark) +define_device = register_key_flags_in_core(_device.define_device) +define_image = register_key_flags_in_core(_misc.define_image) +define_performance = register_key_flags_in_core(_performance.define_performance) +define_distribution = register_key_flags_in_core( + _distribution.define_distribution) + + +help_wrap = _conventions.help_wrap + + +get_num_gpus = _base.get_num_gpus +get_tf_dtype = _performance.get_tf_dtype +get_loss_scale = _performance.get_loss_scale +DTYPE_MAP = _performance.DTYPE_MAP +require_cloud_storage = _device.require_cloud_storage + +def _get_nondefault_flags_as_dict(): + """Returns the nondefault flags as a dict from flag name to value.""" + nondefault_flags = {} + for flag_name in flags.FLAGS: + flag_value = getattr(flags.FLAGS, flag_name) + if (flag_name != flags.FLAGS[flag_name].short_name and + flag_value != flags.FLAGS[flag_name].default): + nondefault_flags[flag_name] = flag_value + return nondefault_flags + + +def get_nondefault_flags_as_str(): + """Returns flags as a string that can be passed as command line arguments. + + E.g., returns: "--batch_size=256 --use_synthetic_data" for the following code + block: + + ``` + flags.FLAGS.batch_size = 256 + flags.FLAGS.use_synthetic_data = True + print(get_nondefault_flags_as_str()) + ``` + + Only flags with nondefault values are returned, as passing default flags as + command line arguments has no effect. + + Returns: + A string with the flags, that can be passed as command line arguments to a + program to use the flags. + """ + nondefault_flags = _get_nondefault_flags_as_dict() + flag_strings = [] + for name, value in sorted(nondefault_flags.items()): + if isinstance(value, bool): + flag_str = '--{}'.format(name) if value else '--no{}'.format(name) + elif isinstance(value, list): + flag_str = '--{}={}'.format(name, ','.join(value)) + else: + flag_str = '--{}={}'.format(name, value) + flag_strings.append(flag_str) + return ' '.join(shlex_quote(flag_str) for flag_str in flag_strings) diff --git a/models/official/utils/flags/flags_test.py b/models/official/utils/flags/flags_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e11a1642242bf134f3a9f1df0908f29b00cecf74 --- /dev/null +++ b/models/official/utils/flags/flags_test.py @@ -0,0 +1,162 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import unittest + +from absl import flags +import tensorflow as tf + +from official.utils.flags import core as flags_core # pylint: disable=g-bad-import-order + + +def define_flags(): + flags_core.define_base(clean=True, num_gpu=False, stop_threshold=True, + hooks=True, train_epochs=True, + epochs_between_evals=True) + flags_core.define_performance( + num_parallel_calls=True, inter_op=True, intra_op=True, + dynamic_loss_scale=True, loss_scale=True, synthetic_data=True, + dtype=True) + flags_core.define_image() + flags_core.define_benchmark() + + +class BaseTester(unittest.TestCase): + + @classmethod + def setUpClass(cls): + super(BaseTester, cls).setUpClass() + define_flags() + + def test_default_setting(self): + """Test to ensure fields exist and defaults can be set. + """ + + defaults = dict( + data_dir="dfgasf", + model_dir="dfsdkjgbs", + train_epochs=534, + epochs_between_evals=15, + batch_size=256, + hooks=["LoggingTensorHook"], + num_parallel_calls=18, + inter_op_parallelism_threads=5, + intra_op_parallelism_threads=10, + data_format="channels_first" + ) + + flags_core.set_defaults(**defaults) + flags_core.parse_flags() + + for key, value in defaults.items(): + assert flags.FLAGS.get_flag_value(name=key, default=None) == value + + def test_benchmark_setting(self): + defaults = dict( + hooks=["LoggingMetricHook"], + benchmark_log_dir="/tmp/12345", + gcp_project="project_abc", + ) + + flags_core.set_defaults(**defaults) + flags_core.parse_flags() + + for key, value in defaults.items(): + assert flags.FLAGS.get_flag_value(name=key, default=None) == value + + def test_booleans(self): + """Test to ensure boolean flags trigger as expected. + """ + + flags_core.parse_flags([__file__, "--use_synthetic_data"]) + + assert flags.FLAGS.use_synthetic_data + + def test_parse_dtype_info(self): + flags_core.parse_flags([__file__, "--dtype", "fp16"]) + self.assertEqual(flags_core.get_tf_dtype(flags.FLAGS), tf.float16) + self.assertEqual(flags_core.get_loss_scale(flags.FLAGS, + default_for_fp16=2), 2) + + flags_core.parse_flags( + [__file__, "--dtype", "fp16", "--loss_scale", "5"]) + self.assertEqual(flags_core.get_loss_scale(flags.FLAGS, + default_for_fp16=2), 5) + + flags_core.parse_flags( + [__file__, "--dtype", "fp16", "--loss_scale", "dynamic"]) + self.assertEqual(flags_core.get_loss_scale(flags.FLAGS, + default_for_fp16=2), "dynamic") + + flags_core.parse_flags([__file__, "--dtype", "fp32"]) + self.assertEqual(flags_core.get_tf_dtype(flags.FLAGS), tf.float32) + self.assertEqual(flags_core.get_loss_scale(flags.FLAGS, + default_for_fp16=2), 1) + + flags_core.parse_flags([__file__, "--dtype", "fp32", "--loss_scale", "5"]) + self.assertEqual(flags_core.get_loss_scale(flags.FLAGS, + default_for_fp16=2), 5) + + + with self.assertRaises(SystemExit): + flags_core.parse_flags([__file__, "--dtype", "int8"]) + + with self.assertRaises(SystemExit): + flags_core.parse_flags([__file__, "--dtype", "fp16", + "--loss_scale", "abc"]) + + def test_get_nondefault_flags_as_str(self): + defaults = dict( + clean=True, + data_dir="abc", + hooks=["LoggingTensorHook"], + stop_threshold=1.5, + use_synthetic_data=False + ) + flags_core.set_defaults(**defaults) + flags_core.parse_flags() + + expected_flags = "" + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + flags.FLAGS.clean = False + expected_flags += "--noclean" + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + flags.FLAGS.data_dir = "xyz" + expected_flags += " --data_dir=xyz" + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + flags.FLAGS.hooks = ["aaa", "bbb", "ccc"] + expected_flags += " --hooks=aaa,bbb,ccc" + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + flags.FLAGS.stop_threshold = 3. + expected_flags += " --stop_threshold=3.0" + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + flags.FLAGS.use_synthetic_data = True + expected_flags += " --use_synthetic_data" + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + # Assert that explicit setting a flag to its default value does not cause it + # to appear in the string + flags.FLAGS.use_synthetic_data = False + expected_flags = expected_flags[:-len(" --use_synthetic_data")] + self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags) + + +if __name__ == "__main__": + unittest.main() diff --git a/models/official/utils/flags/guidelines.md b/models/official/utils/flags/guidelines.md new file mode 100644 index 0000000000000000000000000000000000000000..db963aabebccad8614a1b59ea7ff9b828bcee3b4 --- /dev/null +++ b/models/official/utils/flags/guidelines.md @@ -0,0 +1,65 @@ +# Using flags in official models + +1. **All common flags must be incorporated in the models.** + + Common flags (i.e. batch_size, model_dir, etc.) are provided by various flag definition functions, + and channeled through `official.utils.flags.core`. For instance to define common supervised + learning parameters one could use the following code: + + ```$xslt + from absl import app as absl_app + from absl import flags + + from official.utils.flags import core as flags_core + + + def define_flags(): + flags_core.define_base() + flags.adopt_key_flags(flags_core) + + + def main(_): + flags_obj = flags.FLAGS + print(flags_obj) + + + if __name__ == "__main__" + absl_app.run(main) + ``` +2. **Validate flag values.** + + See the [Validators](#validators) section for implementation details. + + Validators in the official model repo should not access the file system, such as verifying + that files exist, due to the strict ordering requirements. + +3. **Flag values should not be mutated.** + + Instead of mutating flag values, use getter functions to return the desired values. An example + getter function is `get_tf_dtype` function below: + + ``` + # Map string to TensorFlow dtype + DTYPE_MAP = { + "fp16": tf.float16, + "fp32": tf.float32, + } + + def get_tf_dtype(flags_obj): + if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite": + # If the graph_rewrite is used, we build the graph with fp32, and let the + # graph rewrite change ops to fp16. + return tf.float32 + return DTYPE_MAP[flags_obj.dtype] + + + def main(_): + flags_obj = flags.FLAGS() + + # Do not mutate flags_obj + # if flags_obj.fp16_implementation == "graph_rewrite": + # flags_obj.dtype = "float32" # Don't do this + + print(get_tf_dtype(flags_obj)) + ... + ``` \ No newline at end of file diff --git a/models/official/utils/hyperparams_flags.py b/models/official/utils/hyperparams_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8150677e43b68a68b9234dd852f6df894ea849 --- /dev/null +++ b/models/official/utils/hyperparams_flags.py @@ -0,0 +1,128 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common flags for importing hyperparameters.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import flags +from official.utils.flags import core as flags_core + +FLAGS = flags.FLAGS + + +def define_gin_flags(): + """Define common gin configurable flags.""" + flags.DEFINE_multi_string('gin_file', None, + 'List of paths to the config files.') + flags.DEFINE_multi_string( + 'gin_param', None, 'Newline separated list of Gin parameter bindings.') + + +def define_common_hparams_flags(): + """Define the common flags across models.""" + + flags.DEFINE_string( + 'model_dir', + default=None, + help=('The directory where the model and training/evaluation summaries' + 'are stored.')) + + flags.DEFINE_integer( + 'train_batch_size', default=None, help='Batch size for training.') + + flags.DEFINE_integer( + 'eval_batch_size', default=None, help='Batch size for evaluation.') + + flags.DEFINE_string( + 'precision', + default=None, + help=('Precision to use; one of: {bfloat16, float32}')) + + flags.DEFINE_string( + 'config_file', + default=None, + help=('A YAML file which specifies overrides. Note that this file can be ' + 'used as an override template to override the default parameters ' + 'specified in Python. If the same parameter is specified in both ' + '`--config_file` and `--params_override`, the one in ' + '`--params_override` will be used finally.')) + + flags.DEFINE_string( + 'params_override', + default=None, + help=('a YAML/JSON string or a YAML file which specifies additional ' + 'overrides over the default parameters and those specified in ' + '`--config_file`. Note that this is supposed to be used only to ' + 'override the model parameters, but not the parameters like TPU ' + 'specific flags. One canonical use case of `--config_file` and ' + '`--params_override` is users first define a template config file ' + 'using `--config_file`, then use `--params_override` to adjust the ' + 'minimal set of tuning parameters, for example setting up different' + ' `train_batch_size`. ' + 'The final override order of parameters: default_model_params --> ' + 'params from config_file --> params in params_override.' + 'See also the help message of `--config_file`.')) + flags.DEFINE_integer('save_checkpoint_freq', None, + 'Number of steps to save checkpoint.') + + +def initialize_common_flags(): + """Define the common flags across models.""" + define_common_hparams_flags() + + flags_core.define_device(tpu=True) + flags_core.define_base( + num_gpu=True, model_dir=False, data_dir=False, batch_size=False) + flags_core.define_distribution(worker_hosts=True, task_index=True) + flags_core.define_performance(all_reduce_alg=True, num_packs=True) + + # Reset the default value of num_gpus to zero. + FLAGS.num_gpus = 0 + + flags.DEFINE_string( + 'strategy_type', 'mirrored', 'Type of distribute strategy.' + 'One of mirrored, tpu and multiworker.') + + +def strategy_flags_dict(): + """Returns TPU and/or GPU related flags in a dictionary.""" + return { + 'distribution_strategy': FLAGS.strategy_type, + # TPUStrategy related flags. + 'tpu': FLAGS.tpu, + # MultiWorkerMirroredStrategy related flags. + 'all_reduce_alg': FLAGS.all_reduce_alg, + 'worker_hosts': FLAGS.worker_hosts, + 'task_index': FLAGS.task_index, + # MirroredStrategy and OneDeviceStrategy + 'num_gpus': FLAGS.num_gpus, + 'num_packs': FLAGS.num_packs, + } + + +def hparam_flags_dict(): + """Returns model params related flags in a dictionary.""" + return { + 'data_dir': FLAGS.data_dir, + 'model_dir': FLAGS.model_dir, + 'train_batch_size': FLAGS.train_batch_size, + 'eval_batch_size': FLAGS.eval_batch_size, + 'precision': FLAGS.precision, + 'config_file': FLAGS.config_file, + 'params_override': FLAGS.params_override, + } diff --git a/models/official/utils/misc/__init__.py b/models/official/utils/misc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/utils/misc/callstack_sampler.py b/models/official/utils/misc/callstack_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..984f133e9c68a73569717bff47154110c718e3ce --- /dev/null +++ b/models/official/utils/misc/callstack_sampler.py @@ -0,0 +1,62 @@ +"""A simple Python callstack sampler.""" + +import contextlib +import datetime +import signal +import traceback + + +class CallstackSampler(object): + """A simple signal-based Python callstack sampler. + """ + + def __init__(self, interval=None): + self.stacks = [] + self.interval = 0.001 if interval is None else interval + + def _sample(self, signum, frame): + """Samples the current stack.""" + del signum + stack = traceback.extract_stack(frame) + formatted_stack = [] + formatted_stack.append(datetime.datetime.utcnow()) + for filename, lineno, function_name, text in stack: + formatted_frame = '{}:{}({})({})'.format(filename, lineno, function_name, + text) + formatted_stack.append(formatted_frame) + self.stacks.append(formatted_stack) + signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0) + + @contextlib.contextmanager + def profile(self): + signal.signal(signal.SIGVTALRM, self._sample) + signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0) + try: + yield + finally: + signal.setitimer(signal.ITIMER_VIRTUAL, 0) + + def save(self, fname): + with open(fname, 'w') as f: + for s in self.stacks: + for l in s: + f.write('%s\n' % l) + f.write('\n') + + +@contextlib.contextmanager +def callstack_sampling(filename, interval=None): + """Periodically samples the Python callstack. + + Args: + filename: the filename + interval: the sampling interval, in seconds. Defaults to 0.001. + + Yields: + nothing + """ + sampler = CallstackSampler(interval=interval) + with sampler.profile(): + yield + sampler.save(filename) + diff --git a/models/official/utils/misc/distribution_utils.py b/models/official/utils/misc/distribution_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e4823a9b1e6f5cb8d1ff4d7d86340d8656934a6e --- /dev/null +++ b/models/official/utils/misc/distribution_utils.py @@ -0,0 +1,205 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for running models in a distributed setting.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import random +import string + +from absl import logging +import tensorflow.compat.v2 as tf + +from official.utils.misc import tpu_lib + + +def _collective_communication(all_reduce_alg): + """Return a CollectiveCommunication based on all_reduce_alg. + + Args: + all_reduce_alg: a string specifying which collective communication to pick, + or None. + + Returns: + tf.distribute.experimental.CollectiveCommunication object + + Raises: + ValueError: if `all_reduce_alg` not in [None, "ring", "nccl"] + """ + collective_communication_options = { + None: tf.distribute.experimental.CollectiveCommunication.AUTO, + "ring": tf.distribute.experimental.CollectiveCommunication.RING, + "nccl": tf.distribute.experimental.CollectiveCommunication.NCCL + } + if all_reduce_alg not in collective_communication_options: + raise ValueError( + "When used with `multi_worker_mirrored`, valid values for " + "all_reduce_alg are [`ring`, `nccl`]. Supplied value: {}".format( + all_reduce_alg)) + return collective_communication_options[all_reduce_alg] + + +def _mirrored_cross_device_ops(all_reduce_alg, num_packs): + """Return a CrossDeviceOps based on all_reduce_alg and num_packs. + + Args: + all_reduce_alg: a string specifying which cross device op to pick, or None. + num_packs: an integer specifying number of packs for the cross device op. + + Returns: + tf.distribute.CrossDeviceOps object or None. + + Raises: + ValueError: if `all_reduce_alg` not in [None, "nccl", "hierarchical_copy"]. + """ + if all_reduce_alg is None: + return None + mirrored_all_reduce_options = { + "nccl": tf.distribute.NcclAllReduce, + "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce + } + if all_reduce_alg not in mirrored_all_reduce_options: + raise ValueError( + "When used with `mirrored`, valid values for all_reduce_alg are " + "[`nccl`, `hierarchical_copy`]. Supplied value: {}".format( + all_reduce_alg)) + cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] + return cross_device_ops_class(num_packs=num_packs) + + +def get_distribution_strategy(distribution_strategy="mirrored", + num_gpus=0, + all_reduce_alg=None, + num_packs=1, + tpu_address=None): + """Return a DistributionStrategy for running the model. + + Args: + distribution_strategy: a string specifying which distribution strategy to + use. Accepted values are "off", "one_device", "mirrored", + "parameter_server", "multi_worker_mirrored", and "tpu" -- case insensitive. + "off" means not to use Distribution Strategy; "tpu" means to use + TPUStrategy using `tpu_address`. + num_gpus: Number of GPUs to run this model. + all_reduce_alg: Optional. Specifies which algorithm to use when performing + all-reduce. For `MirroredStrategy`, valid values are "nccl" and + "hierarchical_copy". For `MultiWorkerMirroredStrategy`, valid values are + "ring" and "nccl". If None, DistributionStrategy will choose based on + device topology. + num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce` + or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`. + tpu_address: Optional. String that represents TPU to connect to. Must not + be None if `distribution_strategy` is set to `tpu`. + Returns: + tf.distribute.DistibutionStrategy object. + Raises: + ValueError: if `distribution_strategy` is "off" or "one_device" and + `num_gpus` is larger than 1; or `num_gpus` is negative or if + `distribution_strategy` is `tpu` but `tpu_address` is not specified. + """ + if num_gpus < 0: + raise ValueError("`num_gpus` can not be negative.") + + distribution_strategy = distribution_strategy.lower() + if distribution_strategy == "off": + if num_gpus > 1: + raise ValueError( + "When {} GPUs are specified, distribution_strategy " + "flag cannot be set to `off`.".format(num_gpus)) + return None + + if distribution_strategy == "tpu": + # When tpu_address is an empty string, we communicate with local TPUs. + cluster_resolver = tpu_lib.tpu_initialize(tpu_address) + return tf.distribute.experimental.TPUStrategy(cluster_resolver) + + if distribution_strategy == "multi_worker_mirrored": + return tf.distribute.experimental.MultiWorkerMirroredStrategy( + communication=_collective_communication(all_reduce_alg)) + + if distribution_strategy == "one_device": + if num_gpus == 0: + return tf.distribute.OneDeviceStrategy("device:CPU:0") + if num_gpus > 1: + raise ValueError("`OneDeviceStrategy` can not be used for more than " + "one device.") + return tf.distribute.OneDeviceStrategy("device:GPU:0") + + if distribution_strategy == "mirrored": + if num_gpus == 0: + devices = ["device:CPU:0"] + else: + devices = ["device:GPU:%d" % i for i in range(num_gpus)] + return tf.distribute.MirroredStrategy( + devices=devices, + cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs)) + + if distribution_strategy == "parameter_server": + return tf.distribute.experimental.ParameterServerStrategy() + + raise ValueError( + "Unrecognized Distribution Strategy: %r" % distribution_strategy) + + +def configure_cluster(worker_hosts=None, task_index=-1): + """Set multi-worker cluster spec in TF_CONFIG environment variable. + + Args: + worker_hosts: comma-separated list of worker ip:port pairs. + + Returns: + Number of workers in the cluster. + """ + tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) + if tf_config: + num_workers = (len(tf_config["cluster"].get("chief", [])) + + len(tf_config["cluster"].get("worker", []))) + elif worker_hosts: + workers = worker_hosts.split(",") + num_workers = len(workers) + if num_workers > 1 and task_index < 0: + raise ValueError("Must specify task_index when number of workers > 1") + task_index = 0 if num_workers == 1 else task_index + os.environ["TF_CONFIG"] = json.dumps({ + "cluster": { + "worker": workers + }, + "task": {"type": "worker", "index": task_index} + }) + else: + num_workers = 1 + return num_workers + + +def get_strategy_scope(strategy): + if strategy: + strategy_scope = strategy.scope() + else: + strategy_scope = DummyContextManager() + + return strategy_scope + + +class DummyContextManager(object): + + def __enter__(self): + pass + + def __exit__(self, *args): + pass diff --git a/models/official/utils/misc/distribution_utils_test.py b/models/official/utils/misc/distribution_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4fd7bff09daaf2f5c85af2a0e7b7efbd00dc42c1 --- /dev/null +++ b/models/official/utils/misc/distribution_utils_test.py @@ -0,0 +1,49 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" Tests for distribution util functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v2 as tf + +from official.utils.misc import distribution_utils + + +class GetDistributionStrategyTest(tf.test.TestCase): + """Tests for get_distribution_strategy.""" + def test_one_device_strategy_cpu(self): + ds = distribution_utils.get_distribution_strategy(num_gpus=0) + self.assertEquals(ds.num_replicas_in_sync, 1) + self.assertEquals(len(ds.extended.worker_devices), 1) + self.assertIn('CPU', ds.extended.worker_devices[0]) + + def test_one_device_strategy_gpu(self): + ds = distribution_utils.get_distribution_strategy(num_gpus=1) + self.assertEquals(ds.num_replicas_in_sync, 1) + self.assertEquals(len(ds.extended.worker_devices), 1) + self.assertIn('GPU', ds.extended.worker_devices[0]) + + def test_mirrored_strategy(self): + ds = distribution_utils.get_distribution_strategy(num_gpus=5) + self.assertEquals(ds.num_replicas_in_sync, 5) + self.assertEquals(len(ds.extended.worker_devices), 5) + for device in ds.extended.worker_devices: + self.assertIn('GPU', device) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/utils/misc/keras_utils.py b/models/official/utils/misc/keras_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2cca51f1d24701802b0fd7cfc62a84306eedded2 --- /dev/null +++ b/models/official/utils/misc/keras_utils.py @@ -0,0 +1,199 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for the Keras implementations of models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import multiprocessing +import os +import time + +from absl import logging +import tensorflow as tf + + +class BatchTimestamp(object): + """A structure to store batch time stamp.""" + + def __init__(self, batch_index, timestamp): + self.batch_index = batch_index + self.timestamp = timestamp + + def __repr__(self): + return "'BatchTimestamp'".format( + self.batch_index, self.timestamp) + + +class TimeHistory(tf.keras.callbacks.Callback): + """Callback for Keras models.""" + + def __init__(self, batch_size, log_steps, initial_step=0, logdir=None): + """Callback for logging performance. + + Args: + batch_size: Total batch size. + log_steps: Interval of steps between logging of batch level stats. + initial_step: Optional, initial step. + logdir: Optional directory to write TensorBoard summaries. + """ + # TODO(wcromar): remove this parameter and rely on `logs` parameter of + # on_train_batch_end() + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.log_steps = log_steps + self.last_log_step = initial_step + self.steps_before_epoch = initial_step + self.steps_in_epoch = 0 + self.start_time = None + + if logdir: + self.summary_writer = tf.summary.create_file_writer(logdir) + else: + self.summary_writer = None + + # Logs start of step 1 then end of each step based on log_steps interval. + self.timestamp_log = [] + + # Records the time each epoch takes to run from start to finish of epoch. + self.epoch_runtime_log = [] + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + @property + def average_steps_per_second(self): + """The average training steps per second across all epochs.""" + return self.global_steps / sum(self.epoch_runtime_log) + + @property + def average_examples_per_second(self): + """The average number of training examples per second across all epochs.""" + return self.average_steps_per_second * self.batch_size + + def get_examples_per_sec(self, warmup=1): + """Calculates examples/sec through timestamp_log and skip warmup period.""" + # First entry in timestamp_log is the start of the step 1. The rest of the + # entries are the end of each step recorded. + time_log = self.timestamp_log + seconds = time_log[-1].timestamp - time_log[warmup].timestamp + steps = time_log[-1].batch_index - time_log[warmup].batch_index + return self.batch_size * steps / seconds + + def get_startup_time(self, start_time_sec): + return self.timestamp_log[0].timestamp - start_time_sec + + def on_train_end(self, logs=None): + self.train_finish_time = time.time() + + if self.summary_writer: + self.summary_writer.flush() + + def on_epoch_begin(self, epoch, logs=None): + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + # Record the timestamp of the first global step + if not self.timestamp_log: + self.timestamp_log.append(BatchTimestamp(self.global_steps, + self.start_time)) + + def on_batch_end(self, batch, logs=None): + """Records elapse time of the batch and calculates examples per second.""" + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + + self.timestamp_log.append(BatchTimestamp(self.global_steps, now)) + logging.info( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d', elapsed_time, examples_per_second, self.last_log_step, + self.global_steps) + + if self.summary_writer: + with self.summary_writer.as_default(): + tf.summary.scalar('steps_per_second', steps_per_second, + self.global_steps) + tf.summary.scalar('examples_per_second', examples_per_second, + self.global_steps) + + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.epoch_runtime_log.append(epoch_run_time) + + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + + +class SimpleCheckpoint(tf.keras.callbacks.Callback): + """Keras callback to save tf.train.Checkpoints.""" + + def __init__(self, checkpoint_manager): + super(SimpleCheckpoint, self).__init__() + self.checkpoint_manager = checkpoint_manager + + def on_epoch_end(self, epoch, logs=None): + step_counter = self.checkpoint_manager._step_counter.numpy() # pylint: disable=protected-access + self.checkpoint_manager.save(checkpoint_number=step_counter) + + +def set_session_config(enable_xla=False): + """Sets the session config.""" + if enable_xla: + tf.config.optimizer.set_jit(True) + +# TODO(hongkuny): remove set_config_v2 globally. +set_config_v2 = set_session_config + + +def set_gpu_thread_mode_and_count(gpu_thread_mode, + datasets_num_private_threads, + num_gpus, per_gpu_thread_count): + """Set GPU thread mode and count, and adjust dataset threads count.""" + cpu_count = multiprocessing.cpu_count() + logging.info('Logical CPU cores: %s', cpu_count) + + # Allocate private thread pool for each GPU to schedule and launch kernels + per_gpu_thread_count = per_gpu_thread_count or 2 + os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode + os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) + logging.info('TF_GPU_THREAD_COUNT: %s', + os.environ['TF_GPU_THREAD_COUNT']) + logging.info('TF_GPU_THREAD_MODE: %s', + os.environ['TF_GPU_THREAD_MODE']) + + # Limit data preprocessing threadpool to CPU cores minus number of total GPU + # private threads and memory copy threads. + total_gpu_thread_count = per_gpu_thread_count * num_gpus + num_runtime_threads = num_gpus + if not datasets_num_private_threads: + datasets_num_private_threads = min( + cpu_count - total_gpu_thread_count - num_runtime_threads, + num_gpus * 8) + logging.info('Set datasets_num_private_threads to %s', + datasets_num_private_threads) diff --git a/models/official/utils/misc/model_helpers.py b/models/official/utils/misc/model_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..9a44e50ac46162821dcbfacc55b5b1e5c30eba8f --- /dev/null +++ b/models/official/utils/misc/model_helpers.py @@ -0,0 +1,95 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Miscellaneous functions that can be called by models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numbers + +from absl import logging +import tensorflow as tf + +from tensorflow.python.util import nest +# pylint:disable=logging-format-interpolation + + +def past_stop_threshold(stop_threshold, eval_metric): + """Return a boolean representing whether a model should be stopped. + + Args: + stop_threshold: float, the threshold above which a model should stop + training. + eval_metric: float, the current value of the relevant metric to check. + + Returns: + True if training should stop, False otherwise. + + Raises: + ValueError: if either stop_threshold or eval_metric is not a number + """ + if stop_threshold is None: + return False + + if not isinstance(stop_threshold, numbers.Number): + raise ValueError("Threshold for checking stop conditions must be a number.") + if not isinstance(eval_metric, numbers.Number): + raise ValueError("Eval metric being checked against stop conditions " + "must be a number.") + + if eval_metric >= stop_threshold: + logging.info("Stop threshold of {} was passed with metric value {}.".format( + stop_threshold, eval_metric)) + return True + + return False + + +def generate_synthetic_data( + input_shape, input_value=0, input_dtype=None, label_shape=None, + label_value=0, label_dtype=None): + """Create a repeating dataset with constant values. + + Args: + input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of + the input data. + input_value: Value of each input element. + input_dtype: Input dtype. If None, will be inferred by the input value. + label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of + the label data. + label_value: Value of each input element. + label_dtype: Input dtype. If None, will be inferred by the target value. + + Returns: + Dataset of tensors or tuples of tensors (if label_shape is set). + """ + # TODO(kathywu): Replace with SyntheticDataset once it is in contrib. + element = input_element = nest.map_structure( + lambda s: tf.constant(input_value, input_dtype, s), input_shape) + + if label_shape: + label_element = nest.map_structure( + lambda s: tf.constant(label_value, label_dtype, s), label_shape) + element = (input_element, label_element) + + return tf.data.Dataset.from_tensors(element).repeat() + + +def apply_clean(flags_obj): + if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir): + logging.info("--clean flag set. Removing existing model dir:" + " {}".format(flags_obj.model_dir)) + tf.io.gfile.rmtree(flags_obj.model_dir) diff --git a/models/official/utils/misc/model_helpers_test.py b/models/official/utils/misc/model_helpers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9f2487e4223e7b46854db918114d2507fc891155 --- /dev/null +++ b/models/official/utils/misc/model_helpers_test.py @@ -0,0 +1,125 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Model Helper functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.utils.misc import model_helpers + + +class PastStopThresholdTest(tf.test.TestCase): + """Tests for past_stop_threshold.""" + + def setUp(self): + super(PastStopThresholdTest, self).setUp() + tf.compat.v1.disable_eager_execution() + + def test_past_stop_threshold(self): + """Tests for normal operating conditions.""" + self.assertTrue(model_helpers.past_stop_threshold(0.54, 1)) + self.assertTrue(model_helpers.past_stop_threshold(54, 100)) + self.assertFalse(model_helpers.past_stop_threshold(0.54, 0.1)) + self.assertFalse(model_helpers.past_stop_threshold(-0.54, -1.5)) + self.assertTrue(model_helpers.past_stop_threshold(-0.54, 0)) + self.assertTrue(model_helpers.past_stop_threshold(0, 0)) + self.assertTrue(model_helpers.past_stop_threshold(0.54, 0.54)) + + def test_past_stop_threshold_none_false(self): + """Tests that check None returns false.""" + self.assertFalse(model_helpers.past_stop_threshold(None, -1.5)) + self.assertFalse(model_helpers.past_stop_threshold(None, None)) + self.assertFalse(model_helpers.past_stop_threshold(None, 1.5)) + # Zero should be okay, though. + self.assertTrue(model_helpers.past_stop_threshold(0, 1.5)) + + def test_past_stop_threshold_not_number(self): + """Tests for error conditions.""" + with self.assertRaises(ValueError): + model_helpers.past_stop_threshold("str", 1) + + with self.assertRaises(ValueError): + model_helpers.past_stop_threshold("str", tf.constant(5)) + + with self.assertRaises(ValueError): + model_helpers.past_stop_threshold("str", "another") + + with self.assertRaises(ValueError): + model_helpers.past_stop_threshold(0, None) + + with self.assertRaises(ValueError): + model_helpers.past_stop_threshold(0.7, "str") + + with self.assertRaises(ValueError): + model_helpers.past_stop_threshold(tf.constant(4), None) + + +class SyntheticDataTest(tf.test.TestCase): + """Tests for generate_synthetic_data.""" + + def test_generate_synethetic_data(self): + input_element, label_element = tf.compat.v1.data.make_one_shot_iterator( + model_helpers.generate_synthetic_data(input_shape=tf.TensorShape([5]), + input_value=123, + input_dtype=tf.float32, + label_shape=tf.TensorShape([]), + label_value=456, + label_dtype=tf.int32)).get_next() + + with self.session() as sess: + for n in range(5): + inp, lab = sess.run((input_element, label_element)) + self.assertAllClose(inp, [123., 123., 123., 123., 123.]) + self.assertEquals(lab, 456) + + def test_generate_only_input_data(self): + d = model_helpers.generate_synthetic_data( + input_shape=tf.TensorShape([4]), + input_value=43.5, + input_dtype=tf.float32) + + element = tf.compat.v1.data.make_one_shot_iterator(d).get_next() + self.assertFalse(isinstance(element, tuple)) + + with self.session() as sess: + inp = sess.run(element) + self.assertAllClose(inp, [43.5, 43.5, 43.5, 43.5]) + + def test_generate_nested_data(self): + d = model_helpers.generate_synthetic_data( + input_shape={'a': tf.TensorShape([2]), + 'b': {'c': tf.TensorShape([3]), 'd': tf.TensorShape([])}}, + input_value=1.1) + + element = tf.compat.v1.data.make_one_shot_iterator(d).get_next() + self.assertIn('a', element) + self.assertIn('b', element) + self.assertEquals(len(element['b']), 2) + self.assertIn('c', element['b']) + self.assertIn('d', element['b']) + self.assertNotIn('c', element) + + with self.session() as sess: + inp = sess.run(element) + self.assertAllClose(inp['a'], [1.1, 1.1]) + self.assertAllClose(inp['b']['c'], [1.1, 1.1, 1.1]) + self.assertAllClose(inp['b']['d'], 1.1) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/utils/misc/tpu_lib.py b/models/official/utils/misc/tpu_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4cddb1c6b015091ed2da57df49277e3008c252 --- /dev/null +++ b/models/official/utils/misc/tpu_lib.py @@ -0,0 +1,34 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Initializes TPU system for TF 2.0.""" + +import tensorflow as tf + + +def tpu_initialize(tpu_address): + """Initializes TPU for TF 2.0 training. + + Args: + tpu_address: string, bns address of master TPU worker. + + Returns: + A TPUClusterResolver. + """ + cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( + tpu=tpu_address) + if tpu_address not in ('', 'local'): + tf.config.experimental_connect_to_cluster(cluster_resolver) + tf.tpu.experimental.initialize_tpu_system(cluster_resolver) + return cluster_resolver diff --git a/models/official/utils/registry.py b/models/official/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..4aff59813f11b1085860faac8c62ca8ce9e0a1f1 --- /dev/null +++ b/models/official/utils/registry.py @@ -0,0 +1,98 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Registry utility.""" + + +def register(registered_collection, reg_key): + """Register decorated function or class to collection. + + Register decorated function or class into registered_collection, in a + hierarchical order. For example, when reg_key="my_model/my_exp/my_config_0" + the decorated function or class is stored under + registered_collection["my_model"]["my_exp"]["my_config_0"]. + This decorator is supposed to be used together with the lookup() function in + this file. + + Args: + registered_collection: a dictionary. The decorated function or class will be + put into this collection. + reg_key: The key for retrieving the registered function or class. If reg_key + is a string, it can be hierarchical like my_model/my_exp/my_config_0 + Returns: + A decorator function + Raises: + KeyError: when function or class to register already exists. + """ + def decorator(fn_or_cls): + """Put fn_or_cls in the dictionary.""" + if isinstance(reg_key, str): + hierarchy = reg_key.split("/") + collection = registered_collection + for h_idx, entry_name in enumerate(hierarchy[:-1]): + if entry_name not in collection: + collection[entry_name] = {} + collection = collection[entry_name] + if not isinstance(collection, dict): + raise KeyError( + "Collection path {} at position {} already registered as " + "a function or class.".format(entry_name, h_idx)) + leaf_reg_key = hierarchy[-1] + else: + collection = registered_collection + leaf_reg_key = reg_key + + if leaf_reg_key in collection: + raise KeyError("Function or class {} registered multiple times.".format( + leaf_reg_key)) + + collection[leaf_reg_key] = fn_or_cls + return fn_or_cls + return decorator + + +def lookup(registered_collection, reg_key): + """Lookup and return decorated function or class in the collection. + + Lookup decorated function or class in registered_collection, in a + hierarchical order. For example, when + reg_key="my_model/my_exp/my_config_0", + this function will return + registered_collection["my_model"]["my_exp"]["my_config_0"]. + + Args: + registered_collection: a dictionary. The decorated function or class will be + retrieved from this collection. + reg_key: The key for retrieving the registered function or class. If reg_key + is a string, it can be hierarchical like my_model/my_exp/my_config_0 + Returns: + The registered function or class. + Raises: + LookupError: when reg_key cannot be found. + """ + if isinstance(reg_key, str): + hierarchy = reg_key.split("/") + collection = registered_collection + for h_idx, entry_name in enumerate(hierarchy): + if entry_name not in collection: + raise LookupError( + "collection path {} at position {} never registered.".format( + entry_name, h_idx)) + collection = collection[entry_name] + return collection + else: + if reg_key not in registered_collection: + raise LookupError("registration key {} never registered.".format(reg_key)) + return registered_collection[reg_key] diff --git a/models/official/utils/registry_test.py b/models/official/utils/registry_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb230c75891aaebb8306bb84a235e2d2ecd70e5 --- /dev/null +++ b/models/official/utils/registry_test.py @@ -0,0 +1,85 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for registry.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from official.utils import registry + + +class RegistryTest(tf.test.TestCase): + + def test_register(self): + collection = {} + + @registry.register(collection, 'functions/func_0') + def func_test(): + pass + self.assertEqual( + registry.lookup(collection, 'functions/func_0'), func_test) + + @registry.register(collection, 'classes/cls_0') + class ClassRegistryKey: + pass + self.assertEqual( + registry.lookup(collection, 'classes/cls_0'), ClassRegistryKey) + + @registry.register(collection, ClassRegistryKey) + class ClassRegistryValue: + pass + self.assertEqual( + registry.lookup(collection, ClassRegistryKey), ClassRegistryValue) + + def test_register_hierarchy(self): + collection = {} + + @registry.register(collection, 'functions/func_0') + def func_test0(): + pass + @registry.register(collection, 'func_1') + def func_test1(): + pass + @registry.register(collection, func_test1) + def func_test2(): + pass + expected_collection = { + 'functions': { + 'func_0': func_test0, + }, + 'func_1': func_test1, + func_test1: func_test2, + } + self.assertEqual(collection, expected_collection) + + def test_register_error(self): + collection = {} + + @registry.register(collection, 'functions/func_0') + def func_test0(): # pylint: disable=unused-variable + pass + with self.assertRaises(KeyError): + @registry.register(collection, 'functions/func_0/sub_func') + def func_test1(): # pylint: disable=unused-variable + pass + with self.assertRaises(LookupError): + registry.lookup(collection, 'non-exist') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/utils/testing/__init__.py b/models/official/utils/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/utils/testing/integration.py b/models/official/utils/testing/integration.py new file mode 100644 index 0000000000000000000000000000000000000000..b4809a4815cd76c637e2b319352a1d15ab89b87b --- /dev/null +++ b/models/official/utils/testing/integration.py @@ -0,0 +1,71 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper code to run complete models from within python. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil +import sys +import tempfile + +from absl import flags +from absl.testing import flagsaver + +from official.utils.flags import core as flags_core + + +@flagsaver.flagsaver +def run_synthetic(main, tmp_root, extra_flags=None, synth=True, train_epochs=1, + epochs_between_evals=1): + """Performs a minimal run of a model. + + This function is intended to test for syntax errors throughout a model. A + very limited run is performed using synthetic data. + + Args: + main: The primary function used to exercise a code path. Generally this + function is ".main(argv)". + tmp_root: Root path for the temp directory created by the test class. + extra_flags: Additional flags passed by the caller of this function. + synth: Use synthetic data. + train_epochs: Value of the --train_epochs flag. + epochs_between_evals: Value of the --epochs_between_evals flag. + """ + + extra_flags = [] if extra_flags is None else extra_flags + + model_dir = tempfile.mkdtemp(dir=tmp_root) + + args = [sys.argv[0], "--model_dir", model_dir] + extra_flags + + if synth: + args.append("--use_synthetic_data") + + if train_epochs is not None: + args.extend(["--train_epochs", str(train_epochs)]) + + if epochs_between_evals is not None: + args.extend(["--epochs_between_evals", str(epochs_between_evals)]) + + try: + flags_core.parse_flags(argv=args) + main(flags.FLAGS) + finally: + if os.path.exists(model_dir): + shutil.rmtree(model_dir) diff --git a/models/official/utils/testing/pylint.rcfile b/models/official/utils/testing/pylint.rcfile new file mode 100644 index 0000000000000000000000000000000000000000..b872802a81187b63e82ead282dd38fad1d1b5ded --- /dev/null +++ b/models/official/utils/testing/pylint.rcfile @@ -0,0 +1,168 @@ +[MESSAGES CONTROL] +disable=R,W,bad-option-value,trailing-newlines,no-name-in-module + +[REPORTS] +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=no + +[BASIC] + +# Regular expression matching correct argument names +argument-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct attribute names +attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct class names +class-rgx=^_?[A-Z][a-zA-Z0-9]*$ + +# Regular expression matching correct constant names +const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=10 + +# Regular expression matching correct function names +function-rgx=^(?:(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ + +# Good variable names which should always be accepted, separated by a comma +good-names=main,_ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct method names +method-rgx=^(?:(?P__[a-z0-9_]+__|next)|(?P_{0,2}[A-Z][a-zA-Z0-9]*)|(?P_{0,2}[a-z][a-z0-9_]*)|(setUp|tearDown))$ + +# Regular expression matching correct module names +module-rgx=^(_?[a-z][a-z0-9_]*)|__init__|PRESUBMIT|PRESUBMIT_unittest$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=(__.*__|main|.*ArgParser) + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=^[a-z][a-z0-9_]*$ + +[TYPECHECK] + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules=absl, absl.*, official, official.*, tensorflow, tensorflow.*, LazyLoader, google, google.cloud.* + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + +# This is deprecated, because it is not used anymore. +#ignore-iface-methods= + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls,class_ + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of arguments for function / method +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of statements in function / method body +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=StandardError,Exception,BaseException + + +[FORMAT] + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=80 + +# Maximum number of lines in a module +max-module-lines=99999 + +# List of optional constructs for which whitespace checking is disabled +no-space-check= + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=yes + +# Allow URLs and comment type annotations to exceed the max line length as neither can be easily +# split across lines. +ignore-long-lines=^\s*(?:(# )??$|# type:) + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) + +# Tells whether we should check for unused import in __init__ files. +init-import=no diff --git a/models/official/utils/testing/scripts/builds_common.sh b/models/official/utils/testing/scripts/builds_common.sh new file mode 100644 index 0000000000000000000000000000000000000000..3cf08bb510d2a8ba0b06b1d38ccd1294b159ce15 --- /dev/null +++ b/models/official/utils/testing/scripts/builds_common.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Common Bash functions used by build scripts + +COLOR_NC='\033[0m' +COLOR_BOLD='\033[1m' +COLOR_LIGHT_GRAY='\033[0;37m' +COLOR_GREEN='\033[0;32m' +COLOR_RED='\033[0;31m' + +die() { + # Print a message and exit with code 1. + # + # Usage: die + # e.g., die "Something bad happened." + + echo $@ + exit 1 +} + +num_cpus() { + # Get the number of CPUs + N_CPUS=$(grep -c ^processor /proc/cpuinfo) + if [[ -z ${N_CPUS} ]]; then + die "ERROR: Unable to determine the number of CPUs" + fi + + echo ${N_CPUS} +} + +# List files changed (i.e., added, or revised) from +# the common ancestor of HEAD and the latest master branch. +# Usage: get_changed_files_from_master_branch +get_changed_files_from_master_branch() { + ANCESTOR=$(git merge-base HEAD master origin/master) + git diff ${ANCESTOR} --diff-filter=d --name-only "$@" +} + +# List python files changed that still exist, +# i.e., not removed. +# Usage: get_py_files_to_check [--incremental] +get_py_files_to_check() { + if [[ "$1" == "--incremental" ]]; then + get_changed_files_from_master_branch -- '*.py' + elif [[ -z "$1" ]]; then + find official/ -name '*.py' + else + die "Found unsupported args: $@ for get_py_files_to_check." + fi +} diff --git a/models/official/utils/testing/scripts/ci_sanity.sh b/models/official/utils/testing/scripts/ci_sanity.sh new file mode 100644 index 0000000000000000000000000000000000000000..97d6bc290eff327f340088b960f910af2afa626b --- /dev/null +++ b/models/official/utils/testing/scripts/ci_sanity.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Sanity check script that runs tests and lint under local environment. +# Make sure that tensorflow and pylint is installed. +# usage: models >: ./official/utils/testing/scripts/ci_sanity.sh do_pylint --incremental +set +x + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "${SCRIPT_DIR}/builds_common.sh" +cd "$SCRIPT_DIR/../../../.." +MODEL_ROOT="$(pwd)" + +export PYTHONPATH="$PYTHONPATH:${MODEL_ROOT}" + +# Run pylint +do_pylint() { + # Usage: do_pylint [--incremental] + # + # Options: + # --incremental Performs check on only the python files changed in the + # last non-merge git commit. + + # Use this list to whitelist pylint errors + ERROR_WHITELIST="" + + echo "ERROR_WHITELIST=\"${ERROR_WHITELIST}\"" + + PYLINT_BIN="python3 -m pylint" + + PYTHON_SRC_FILES=$(get_py_files_to_check $1) + if [[ -z ${PYTHON_SRC_FILES} ]]; then + echo "do_pylint found no Python files to check. Returning." + return 0 + fi + + PYLINTRC_FILE="official/utils/testing/pylint.rcfile" + + if [[ ! -f "${PYLINTRC_FILE}" ]]; then + die "ERROR: Cannot find pylint rc file at ${PYLINTRC_FILE}" + fi + + NUM_SRC_FILES=$(echo ${PYTHON_SRC_FILES} | wc -w) + NUM_CPUS=$(num_cpus) + + echo "Running pylint on ${NUM_SRC_FILES} files with ${NUM_CPUS} "\ + "parallel jobs..." + echo "" + + PYLINT_START_TIME=$(date +'%s') + OUTPUT_FILE="$(mktemp)_pylint_output.log" + ERRORS_FILE="$(mktemp)_pylint_errors.log" + NONWL_ERRORS_FILE="$(mktemp)_pylint_nonwl_errors.log" + + rm -rf ${OUTPUT_FILE} + rm -rf ${ERRORS_FILE} + rm -rf ${NONWL_ERRORS_FILE} + touch ${NONWL_ERRORS_FILE} + + ${PYLINT_BIN} --rcfile="${PYLINTRC_FILE}" --output-format=parseable \ + --jobs=${NUM_CPUS} ${PYTHON_SRC_FILES} > ${OUTPUT_FILE} 2>&1 + PYLINT_END_TIME=$(date +'%s') + + echo "" + echo "pylint took $((PYLINT_END_TIME - PYLINT_START_TIME)) s" + echo "" + + # Report only what we care about + # Ref https://pylint.readthedocs.io/en/latest/technical_reference/features.html + # E: all errors + # W0311 bad-indentation + # W0312 mixed-indentation + # C0330 bad-continuation + # C0301 line-too-long + # C0326 bad-whitespace + # W0611 unused-import + # W0622 redefined-builtin + grep -E '(\[E|\[W0311|\[W0312|\[C0330|\[C0301|\[C0326|\[W0611|\[W0622)' ${OUTPUT_FILE} > ${ERRORS_FILE} + + N_ERRORS=0 + while read -r LINE; do + IS_WHITELISTED=0 + for WL_REGEX in ${ERROR_WHITELIST}; do + if echo ${LINE} | grep -q "${WL_REGEX}"; then + echo "Found a whitelisted error:" + echo " ${LINE}" + IS_WHITELISTED=1 + fi + done + + if [[ ${IS_WHITELISTED} == "0" ]]; then + echo "${LINE}" >> ${NONWL_ERRORS_FILE} + echo "" >> ${NONWL_ERRORS_FILE} + ((N_ERRORS++)) + fi + done <${ERRORS_FILE} + + echo "Raw lint output file: ${OUTPUT_FILE}" + + echo "" + if [[ ${N_ERRORS} != 0 ]]; then + echo "FAIL: Found ${N_ERRORS} non-whitelited pylint errors:" + cat "${NONWL_ERRORS_FILE}" + return 1 + else + echo "PASS: No non-whitelisted pylint errors were found." + return 0 + fi +} + +test_result=0 + +TESTS="$@" + +for t in "${TESTS}"; do + ${t} || test_result=$? +done + +exit "${test_result}" diff --git a/models/official/utils/testing/scripts/presubmit.sh b/models/official/utils/testing/scripts/presubmit.sh new file mode 100644 index 0000000000000000000000000000000000000000..954d96df7f8c5f95546fb642ce6f9597f935cb3c --- /dev/null +++ b/models/official/utils/testing/scripts/presubmit.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Presubmit script that runs tests and lint under local environment. +# Make sure that tensorflow and pylint is installed. +# usage: models >: ./official/utils/testing/scripts/presubmit.sh +# usage: models >: ./official/utils/testing/scripts/presubmit.sh lint py2_test py3_test +set +x + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR/../../../.." +MODEL_ROOT="$(pwd)" + +export PYTHONPATH="$PYTHONPATH:${MODEL_ROOT}" + +py_test() { + local PY_BINARY="$1" + local exit_code=0 + + echo "===========Running Python test============" + + for test_file in `find official/ -name '*test.py' -print` + do + echo "####=======Testing ${test_file}=======####" + ${PY_BINARY} "${test_file}" + _exit_code=$? + if [[ $_exit_code != 0 ]]; then + exit_code=$_exit_code + echo "FAIL: ${test_file}" + fi + done + + return "${exit_code}" +} + +py2_test() { + local PY_BINARY=$(which python2) + py_test "$PY_BINARY" + return $? +} + +py3_test() { + local PY_BINARY=$(which python3) + py_test "$PY_BINARY" + return $? +} + +test_result=0 + +if [ "$#" -eq 0 ]; then + TESTS="lint py2_test py3_test" +else + TESTS="$@" +fi + +for t in "${TESTS}"; do + ${t} || test_result=$? +done + +exit "${test_result}" diff --git a/models/official/vision/__init__.py b/models/official/vision/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/vision/detection/README.md b/models/official/vision/detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..53134ec553f8bb5bbd4d299a69f0e8fbb4176083 --- /dev/null +++ b/models/official/vision/detection/README.md @@ -0,0 +1,395 @@ +# Object Detection Models on TensorFlow 2 + +**Note**: This repository is still under construction. +More features and instructions will be added soon. + +## Prerequsite +To get started, download the code from TensorFlow models GitHub repository or +use the pre-installed Google Cloud VM. + +```bash +git clone https://github.com/tensorflow/models.git +``` + +Next, make sure to use TensorFlow 2.1+ on Google Cloud. Also here are +a few package you need to install to get started: + +```bash +sudo apt-get install -y python-tk && \ +pip3 install -r ~/models/official/requirements.txt +``` + +## Train RetinaNet on TPU + +### Train a vanilla ResNet-50 based RetinaNet. + +```bash +TPU_NAME="" +MODEL_DIR="" +RESNET_CHECKPOINT="" +TRAIN_FILE_PATTERN="" +EVAL_FILE_PATTERN="" +VAL_JSON_FILE="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu="${TPU_NAME?}" \ + --model_dir="${MODEL_DIR?}" \ + --mode=train \ + --params_override="{ type: retinanet, train: { checkpoint: { path: ${RESNET_CHECKPOINT?}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN?} }, eval: { val_json_file: ${VAL_JSON_FILE?}, eval_file_pattern: ${EVAL_FILE_PATTERN?} } }" +``` + +The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz). + +Note: The ResNet implementation under +[detection/](https://github.com/tensorflow/models/tree/master/official/vision/detection) +is currently different from the one under +[classification/](https://github.com/tensorflow/models/tree/master/official/vision/image_classification), +so the checkpoints are not compatible. +We will unify the implementation soon. + + + +### Train a custom RetinaNet using the config file. + +First, create a YAML config file, e.g. *my_retinanet.yaml*. This file specifies +the parameters to be overridden, which should at least include the following +fields. + +```YAML +# my_retinanet.yaml +type: 'retinanet' +train: + train_file_pattern: +eval: + eval_file_pattern: + val_json_file: +``` + +Once the YAML config file is created, you can launch the training using the +following command. + +```bash +TPU_NAME="" +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu="${TPU_NAME?}" \ + --model_dir="${MODEL_DIR?}" \ + --mode=train \ + --config_file="my_retinanet.yaml" +``` + +## Train RetinaNet on GPU + +Training on GPU is similar to that on TPU. The major change is the strategy +type (use "[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)" for multiple GPU and +"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)" for single GPU). + +Multi-GPUs example (assuming there are 8GPU connected to the host): + +```bash +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=mirrored \ + --num_gpus=8 \ + --model_dir="${MODEL_DIR?}" \ + --mode=train \ + --config_file="my_retinanet.yaml" +``` + +```bash +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=one_device \ + --num_gpus=1 \ + --model_dir="${MODEL_DIR?}" \ + --mode=train \ + --config_file="my_retinanet.yaml" +``` + +An example with inline configuration (YAML or JSON format): + +``` +python3 ~/models/official/vision/detection/main.py \ + --model_dir= \ + --strategy_type=one_device \ + --num_gpus=1 \ + --mode=train \ + --params_override="eval: + eval_file_pattern: + batch_size: 8 + val_json_file: +predict: + predict_batch_size: 8 +architecture: + use_bfloat16: False +train: + total_steps: 1 + batch_size: 8 + train_file_pattern: +use_tpu: False +" +``` + +--- + +## Train Mask R-CNN on TPU + +### Train a vanilla ResNet-50 based Mask R-CNN. + +```bash +TPU_NAME="" +MODEL_DIR="" +RESNET_CHECKPOINT="" +TRAIN_FILE_PATTERN="" +EVAL_FILE_PATTERN="" +VAL_JSON_FILE="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu=${TPU_NAME} \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=mask_rcnn \ + --params_override="{train: { checkpoint: { path: ${RESNET_CHECKPOINT}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN} }, eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN} } }" +``` + +The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz). + +Note: The ResNet implementation under +[detection/](https://github.com/tensorflow/models/tree/master/official/vision/detection) +is currently different from the one under +[classification/](https://github.com/tensorflow/models/tree/master/official/vision/image_classification), +so the checkpoints are not compatible. +We will unify the implementation soon. + + +### Train a custom Mask R-CNN using the config file. + +First, create a YAML config file, e.g. *my_maskrcnn.yaml*. +This file specifies the parameters to be overridden, +which should at least include the following fields. + +```YAML +# my_maskrcnn.yaml +train: + train_file_pattern: +eval: + eval_file_pattern: + val_json_file: +``` + +Once the YAML config file is created, you can launch the training using the +following command. + +```bash +TPU_NAME="" +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu=${TPU_NAME} \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=mask_rcnn \ + --config_file="my_maskrcnn.yaml" +``` + +## Train Mask R-CNN on GPU + +Training on GPU is similar to that on TPU. The major change is the strategy type +(use +"[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)" +for multiple GPU and +"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)" +for single GPU). + +Multi-GPUs example (assuming there are 8GPU connected to the host): + +```bash +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=mirrored \ + --num_gpus=8 \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=mask_rcnn \ + --config_file="my_maskrcnn.yaml" +``` + +```bash +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=one_device \ + --num_gpus=1 \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=mask_rcnn \ + --config_file="my_maskrcnn.yaml" +``` + +An example with inline configuration (YAML or JSON format): + +``` +python3 ~/models/official/vision/detection/main.py \ + --model_dir= \ + --strategy_type=one_device \ + --num_gpus=1 \ + --mode=train \ + --model=mask_rcnn \ + --params_override="eval: + eval_file_pattern: + batch_size: 8 + val_json_file: +predict: + predict_batch_size: 8 +architecture: + use_bfloat16: False +train: + total_steps: 1000 + batch_size: 8 + train_file_pattern: +use_tpu: False +" +``` + +## Train ShapeMask on TPU + +### Train a ResNet-50 based ShapeMask. + +```bash +TPU_NAME="" +MODEL_DIR="" +RESNET_CHECKPOINT="" +TRAIN_FILE_PATTERN="" +EVAL_FILE_PATTERN="" +VAL_JSON_FILE="" +SHAPE_PRIOR_PATH="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu=${TPU_NAME} \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=shapemask \ + --params_override="{train: { checkpoint: { path: ${RESNET_CHECKPOINT}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN} }, eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN} } shapemask_head: {use_category_for_mask: true, shape_prior_path: ${SHAPE_PRIOR_PATH}} }" +``` + +The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz). + +The shape priors can be downloaded [here] +(https://storage.googleapis.com/cloud-tpu-checkpoints/shapemask/kmeans_class_priors_91x20x32x32.npy) + + +### Train a custom ShapeMask using the config file. + +First, create a YAML config file, e.g. *my_shapemask.yaml*. +This file specifies the parameters to be overridden: + +```YAML +# my_shapemask.yaml +train: + train_file_pattern: + total_steps: + batch_size: +eval: + eval_file_pattern: + val_json_file: + batch_size: +shapemask_head: + shape_prior_path: +``` + +Once the YAML config file is created, you can launch the training using the +following command. + +```bash +TPU_NAME="" +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu=${TPU_NAME} \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=shapemask \ + --config_file="my_shapemask.yaml" +``` + +## Train ShapeMask on GPU + +Training on GPU is similar to that on TPU. The major change is the strategy type +(use +"[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)" +for multiple GPU and +"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)" +for single GPU). + +Multi-GPUs example (assuming there are 8GPU connected to the host): + +```bash +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=mirrored \ + --num_gpus=8 \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=shapemask \ + --config_file="my_shapemask.yaml" +``` + +A single GPU example + +```bash +MODEL_DIR="" +python3 ~/models/official/vision/detection/main.py \ + --strategy_type=one_device \ + --num_gpus=1 \ + --model_dir=${MODEL_DIR} \ + --mode=train \ + --model=shapemask \ + --config_file="my_shapemask.yaml" +``` + + +An example with inline configuration (YAML or JSON format): + +``` +python3 ~/models/official/vision/detection/main.py \ + --model_dir= \ + --strategy_type=one_device \ + --num_gpus=1 \ + --mode=train \ + --model=shapemask \ + --params_override="eval: + eval_file_pattern: + batch_size: 8 + val_json_file: +train: + total_steps: 1000 + batch_size: 8 + train_file_pattern: +use_tpu: False +" +``` + + +### Run the evaluation (after training) + +``` +python3 /usr/share/models/official/vision/detection/main.py \ + --strategy_type=tpu \ + --tpu=${TPU_NAME} \ + --model_dir=${MODEL_DIR} \ + --mode=eval \ + --model=shapemask \ + --params_override="{eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN}, eval_samples: 5000 } }" +``` + +`MODEL_DIR` needs to point to the trained path of ShapeMask model. +Change `strategy_type=mirrored` and `num_gpus=1` to run on a GPU. + +Note: The JSON groundtruth file is useful for [COCO dataset](http://cocodataset.org/#home) and can be +downloaded from the [COCO website](http://cocodataset.org/#download). For custom dataset, it is unncessary because the groundtruth can be included in the TFRecord files. + +## References + +1. [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002). + Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dollár. IEEE + International Conference on Computer Vision (ICCV), 2017. diff --git a/models/official/vision/detection/__init__.py b/models/official/vision/detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/vision/detection/configs/__init__.py b/models/official/vision/detection/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/configs/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/configs/base_config.py b/models/official/vision/detection/configs/base_config.py new file mode 100644 index 0000000000000000000000000000000000000000..0a4e2f5fbf001039a88bed6d834835348807719c --- /dev/null +++ b/models/official/vision/detection/configs/base_config.py @@ -0,0 +1,135 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base config template.""" + + +BACKBONES = [ + 'resnet', +] + +MULTILEVEL_FEATURES = [ + 'fpn', +] + +# pylint: disable=line-too-long +# For ResNet, this freezes the variables of the first conv1 and conv2_x +# layers [1], which leads to higher training speed and slightly better testing +# accuracy. The intuition is that the low-level architecture (e.g., ResNet-50) +# is able to capture low-level features such as edges; therefore, it does not +# need to be fine-tuned for the detection task. +# Note that we need to trailing `/` to avoid the incorrect match. +# [1]: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/config.py#L198 +RESNET_FROZEN_VAR_PREFIX = r'(resnet\d+)\/(conv2d(|_([1-9]|10))|batch_normalization(|_([1-9]|10)))\/' +REGULARIZATION_VAR_REGEX = r'.*(kernel|weight):0$' + +BASE_CFG = { + 'model_dir': '', + 'use_tpu': True, + 'strategy_type': 'tpu', + 'isolate_session_state': False, + 'train': { + 'iterations_per_loop': 100, + 'batch_size': 64, + 'total_steps': 22500, + 'num_cores_per_replica': None, + 'input_partition_dims': None, + 'optimizer': { + 'type': 'momentum', + 'momentum': 0.9, + 'nesterov': True, # `False` is better for TPU v3-128. + }, + 'learning_rate': { + 'type': 'step', + 'warmup_learning_rate': 0.0067, + 'warmup_steps': 500, + 'init_learning_rate': 0.08, + 'learning_rate_levels': [0.008, 0.0008], + 'learning_rate_steps': [15000, 20000], + }, + 'checkpoint': { + 'path': '', + 'prefix': '', + }, + # One can use 'RESNET_FROZEN_VAR_PREFIX' to speed up ResNet training + # when loading from the checkpoint. + 'frozen_variable_prefix': '', + 'train_file_pattern': '', + 'train_dataset_type': 'tfrecord', + # TODO(b/142174042): Support transpose_input option. + 'transpose_input': False, + 'regularization_variable_regex': REGULARIZATION_VAR_REGEX, + 'l2_weight_decay': 0.0001, + 'gradient_clip_norm': 0.0, + 'input_sharding': False, + }, + 'eval': { + 'input_sharding': True, + 'batch_size': 8, + 'eval_samples': 5000, + 'min_eval_interval': 180, + 'eval_timeout': None, + 'num_steps_per_eval': 1000, + 'type': 'box', + 'use_json_file': True, + 'val_json_file': '', + 'eval_file_pattern': '', + 'eval_dataset_type': 'tfrecord', + # When visualizing images, set evaluation batch size to 40 to avoid + # potential OOM. + 'num_images_to_visualize': 0, + }, + 'predict': { + 'batch_size': 8, + }, + 'architecture': { + 'backbone': 'resnet', + 'min_level': 3, + 'max_level': 7, + 'multilevel_features': 'fpn', + 'use_bfloat16': True, + # Note that `num_classes` is the total number of classes including + # one background classes whose index is 0. + 'num_classes': 91, + }, + 'anchor': { + 'num_scales': 3, + 'aspect_ratios': [1.0, 2.0, 0.5], + 'anchor_size': 4.0, + }, + 'norm_activation': { + 'activation': 'relu', + 'batch_norm_momentum': 0.997, + 'batch_norm_epsilon': 1e-4, + 'batch_norm_trainable': True, + 'use_sync_bn': False, + }, + 'resnet': { + 'resnet_depth': 50, + }, + 'fpn': { + 'fpn_feat_dims': 256, + 'use_separable_conv': False, + 'use_batch_norm': True, + }, + 'postprocess': { + 'use_batched_nms': False, + 'max_total_size': 100, + 'nms_iou_threshold': 0.5, + 'score_threshold': 0.05, + 'pre_nms_num_boxes': 5000, + }, + 'enable_summary': False, +} +# pylint: enable=line-too-long diff --git a/models/official/vision/detection/configs/factory.py b/models/official/vision/detection/configs/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..d60ea1e01133fdfffd76ad54daf4ee20ed1e46e0 --- /dev/null +++ b/models/official/vision/detection/configs/factory.py @@ -0,0 +1,37 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Factory to provide model configs.""" + +from official.modeling.hyperparams import params_dict +from official.vision.detection.configs import maskrcnn_config +from official.vision.detection.configs import retinanet_config +from official.vision.detection.configs import shapemask_config + + +def config_generator(model): + """Model function generator.""" + if model == 'retinanet': + default_config = retinanet_config.RETINANET_CFG + restrictions = retinanet_config.RETINANET_RESTRICTIONS + elif model == 'mask_rcnn': + default_config = maskrcnn_config.MASKRCNN_CFG + restrictions = maskrcnn_config.MASKRCNN_RESTRICTIONS + elif model == 'shapemask': + default_config = shapemask_config.SHAPEMASK_CFG + restrictions = shapemask_config.SHAPEMASK_RESTRICTIONS + else: + raise ValueError('Model %s is not supported.' % model) + + return params_dict.ParamsDict(default_config, restrictions) diff --git a/models/official/vision/detection/configs/maskrcnn_config.py b/models/official/vision/detection/configs/maskrcnn_config.py new file mode 100644 index 0000000000000000000000000000000000000000..70c9b31448d3d83754c439c87ce9f0d0a04f88c9 --- /dev/null +++ b/models/official/vision/detection/configs/maskrcnn_config.py @@ -0,0 +1,116 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Config template to train Mask R-CNN.""" + +from official.modeling.hyperparams import params_dict +from official.vision.detection.configs import base_config + + +# pylint: disable=line-too-long +MASKRCNN_CFG = params_dict.ParamsDict(base_config.BASE_CFG) +MASKRCNN_CFG.override({ + 'type': 'mask_rcnn', + 'eval': { + 'type': 'box_and_mask', + 'num_images_to_visualize': 0, + }, + 'architecture': { + 'parser': 'maskrcnn_parser', + 'min_level': 2, + 'max_level': 6, + 'include_mask': True, + 'mask_target_size': 28, + }, + 'maskrcnn_parser': { + 'output_size': [1024, 1024], + 'num_channels': 3, + 'rpn_match_threshold': 0.7, + 'rpn_unmatched_threshold': 0.3, + 'rpn_batch_size_per_im': 256, + 'rpn_fg_fraction': 0.5, + 'aug_rand_hflip': True, + 'aug_scale_min': 1.0, + 'aug_scale_max': 1.0, + 'skip_crowd_during_training': True, + 'max_num_instances': 100, + 'mask_crop_size': 112, + }, + 'anchor': { + 'num_scales': 1, + 'anchor_size': 8, + }, + 'rpn_head': { + 'anchors_per_location': 3, + 'num_convs': 2, + 'num_filters': 256, + 'use_separable_conv': False, + 'use_batch_norm': False, + }, + 'frcnn_head': { + 'num_convs': 0, + 'num_filters': 256, + 'use_separable_conv': False, + 'num_fcs': 2, + 'fc_dims': 1024, + 'use_batch_norm': False, + }, + 'mrcnn_head': { + 'num_convs': 4, + 'num_filters': 256, + 'use_separable_conv': False, + 'use_batch_norm': False, + }, + 'rpn_score_loss': { + 'rpn_batch_size_per_im': 256, + }, + 'rpn_box_loss': { + 'huber_loss_delta': 1.0 / 9.0, + }, + 'frcnn_box_loss': { + 'huber_loss_delta': 1.0, + }, + 'roi_proposal': { + 'rpn_pre_nms_top_k': 2000, + 'rpn_post_nms_top_k': 1000, + 'rpn_nms_threshold': 0.7, + 'rpn_score_threshold': 0.0, + 'rpn_min_size_threshold': 0.0, + 'test_rpn_pre_nms_top_k': 1000, + 'test_rpn_post_nms_top_k': 1000, + 'test_rpn_nms_threshold': 0.7, + 'test_rpn_score_threshold': 0.0, + 'test_rpn_min_size_threshold': 0.0, + 'use_batched_nms': False, + }, + 'roi_sampling': { + 'num_samples_per_image': 512, + 'fg_fraction': 0.25, + 'fg_iou_thresh': 0.5, + 'bg_iou_thresh_hi': 0.5, + 'bg_iou_thresh_lo': 0.0, + 'mix_gt_boxes': True, + }, + 'mask_sampling': { + 'num_mask_samples_per_image': 128, # Typically = `num_samples_per_image` * `fg_fraction`. + }, + 'postprocess': { + 'pre_nms_num_boxes': 1000, + }, +}, is_strict=False) + + +MASKRCNN_RESTRICTIONS = [ +] +# pylint: enable=line-too-long diff --git a/models/official/vision/detection/configs/retinanet_config.py b/models/official/vision/detection/configs/retinanet_config.py new file mode 100644 index 0000000000000000000000000000000000000000..579e30d083aacf138a2f9baffe1be7713ad21583 --- /dev/null +++ b/models/official/vision/detection/configs/retinanet_config.py @@ -0,0 +1,59 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Config template to train Retinanet.""" + +from official.modeling.hyperparams import params_dict +from official.vision.detection.configs import base_config + + +# pylint: disable=line-too-long +RETINANET_CFG = params_dict.ParamsDict(base_config.BASE_CFG) +RETINANET_CFG.override({ + 'type': 'retinanet', + 'architecture': { + 'parser': 'retinanet_parser', + }, + 'retinanet_parser': { + 'output_size': [640, 640], + 'num_channels': 3, + 'match_threshold': 0.5, + 'unmatched_threshold': 0.5, + 'aug_rand_hflip': True, + 'aug_scale_min': 1.0, + 'aug_scale_max': 1.0, + 'use_autoaugment': False, + 'autoaugment_policy_name': 'v0', + 'skip_crowd_during_training': True, + 'max_num_instances': 100, + }, + 'retinanet_head': { + 'anchors_per_location': 9, + 'num_convs': 4, + 'num_filters': 256, + 'use_separable_conv': False, + }, + 'retinanet_loss': { + 'focal_loss_alpha': 0.25, + 'focal_loss_gamma': 1.5, + 'huber_loss_delta': 0.1, + 'box_loss_weight': 50, + }, + 'enable_summary': True, +}, is_strict=False) + +RETINANET_RESTRICTIONS = [ +] + +# pylint: enable=line-too-long diff --git a/models/official/vision/detection/configs/shapemask_config.py b/models/official/vision/detection/configs/shapemask_config.py new file mode 100644 index 0000000000000000000000000000000000000000..0914c492e15f65e5ba66701f27ca0d88d13698ff --- /dev/null +++ b/models/official/vision/detection/configs/shapemask_config.py @@ -0,0 +1,98 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Config to train shapemask on COCO.""" + +from official.modeling.hyperparams import params_dict +from official.vision.detection.configs import base_config + +SHAPEMASK_RESNET_FROZEN_VAR_PREFIX = r'(resnet\d+/)conv2d(|_([1-9]|10))\/' + +SHAPEMASK_CFG = params_dict.ParamsDict(base_config.BASE_CFG) +SHAPEMASK_CFG.override({ + 'type': 'shapemask', + 'architecture': { + 'parser': 'shapemask_parser', + 'backbone': 'resnet', + 'multilevel_features': 'fpn', + 'outer_box_scale': 1.25, + }, + 'train': { + 'total_steps': 45000, + 'learning_rate': { + 'learning_rate_steps': [30000, 40000], + }, + 'frozen_variable_prefix': SHAPEMASK_RESNET_FROZEN_VAR_PREFIX, + 'regularization_variable_regex': None, + }, + 'eval': { + 'type': 'shapemask_box_and_mask', + 'mask_eval_class': 'all', # 'all', 'voc', or 'nonvoc'. + }, + 'shapemask_parser': { + 'output_size': [640, 640], + 'num_channels': 3, + 'match_threshold': 0.5, + 'unmatched_threshold': 0.5, + 'aug_rand_hflip': True, + 'aug_scale_min': 0.8, + 'aug_scale_max': 1.2, + 'skip_crowd_during_training': True, + 'max_num_instances': 100, + # Shapemask specific parameters + 'mask_train_class': 'all', # 'all', 'voc', or 'nonvoc'. + 'use_category': True, + 'outer_box_scale': 1.25, + 'num_sampled_masks': 8, + 'mask_crop_size': 32, + 'mask_min_level': 3, + 'mask_max_level': 5, + 'box_jitter_scale': 0.025, + 'upsample_factor': 4, + }, + 'retinanet_head': { + 'anchors_per_location': 9, + 'num_convs': 4, + 'num_filters': 256, + 'use_separable_conv': False, + 'use_batch_norm': True, + }, + 'shapemask_head': { + 'num_downsample_channels': 128, + 'mask_crop_size': 32, + 'use_category_for_mask': True, + 'num_convs': 4, + 'upsample_factor': 4, + 'shape_prior_path': '', + }, + 'retinanet_loss': { + 'focal_loss_alpha': 0.4, + 'focal_loss_gamma': 1.5, + 'huber_loss_delta': 0.15, + 'box_loss_weight': 50, + }, + 'shapemask_loss': { + 'shape_prior_loss_weight': 0.1, + 'coarse_mask_loss_weight': 1.0, + 'fine_mask_loss_weight': 1.0, + }, +}, is_strict=False) + +SHAPEMASK_RESTRICTIONS = [ + 'shapemask_head.mask_crop_size == shapemask_parser.mask_crop_size', + 'shapemask_head.upsample_factor == shapemask_parser.upsample_factor', + 'shapemask_parser.outer_box_scale == architecture.outer_box_scale', +] + +# pylint: enable=line-too-long diff --git a/models/official/vision/detection/dataloader/__init__.py b/models/official/vision/detection/dataloader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/dataloader/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/dataloader/anchor.py b/models/official/vision/detection/dataloader/anchor.py new file mode 100644 index 0000000000000000000000000000000000000000..f46f7480062e75cec55d48ff683dcad8301e4994 --- /dev/null +++ b/models/official/vision/detection/dataloader/anchor.py @@ -0,0 +1,292 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Anchor box and labeler definition.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import tensorflow as tf +from official.vision.detection.utils.object_detection import argmax_matcher +from official.vision.detection.utils.object_detection import balanced_positive_negative_sampler +from official.vision.detection.utils.object_detection import box_list +from official.vision.detection.utils.object_detection import faster_rcnn_box_coder +from official.vision.detection.utils.object_detection import region_similarity_calculator +from official.vision.detection.utils.object_detection import target_assigner + + +class Anchor(object): + """Anchor class for anchor-based object detectors.""" + + def __init__(self, + min_level, + max_level, + num_scales, + aspect_ratios, + anchor_size, + image_size): + """Constructs multiscale anchors. + + Args: + min_level: integer number of minimum level of the output feature pyramid. + max_level: integer number of maximum level of the output feature pyramid. + num_scales: integer number representing intermediate scales added + on each level. For instances, num_scales=2 adds one additional + intermediate anchor scales [2^0, 2^0.5] on each level. + aspect_ratios: list of float numbers representing the aspect raito anchors + added on each level. The number indicates the ratio of width to height. + For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each + scale level. + anchor_size: float number representing the scale of size of the base + anchor to the feature stride 2^level. + image_size: a list of integer numbers or Tensors representing + [height, width] of the input image size.The image_size should be divided + by the largest feature stride 2^max_level. + """ + self.min_level = min_level + self.max_level = max_level + self.num_scales = num_scales + self.aspect_ratios = aspect_ratios + self.anchor_size = anchor_size + self.image_size = image_size + self.boxes = self._generate_boxes() + + def _generate_boxes(self): + """Generates multiscale anchor boxes. + + Returns: + a Tensor of shape [N, 4], represneting anchor boxes of all levels + concatenated together. + """ + boxes_all = [] + for level in range(self.min_level, self.max_level + 1): + boxes_l = [] + for scale in range(self.num_scales): + for aspect_ratio in self.aspect_ratios: + stride = 2 ** level + intermidate_scale = 2 ** (scale / float(self.num_scales)) + base_anchor_size = self.anchor_size * stride * intermidate_scale + aspect_x = aspect_ratio ** 0.5 + aspect_y = aspect_ratio ** -0.5 + half_anchor_size_x = base_anchor_size * aspect_x / 2.0 + half_anchor_size_y = base_anchor_size * aspect_y / 2.0 + x = tf.range(stride / 2, self.image_size[1], stride) + y = tf.range(stride / 2, self.image_size[0], stride) + xv, yv = tf.meshgrid(x, y) + xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32) + yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32) + # Tensor shape Nx4. + boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x, + yv + half_anchor_size_y, xv + half_anchor_size_x], + axis=1) + boxes_l.append(boxes) + # Concat anchors on the same level to tensor shape NxAx4. + boxes_l = tf.stack(boxes_l, axis=1) + boxes_l = tf.reshape(boxes_l, [-1, 4]) + boxes_all.append(boxes_l) + return tf.concat(boxes_all, axis=0) + + def unpack_labels(self, labels): + """Unpacks an array of labels into multiscales labels.""" + unpacked_labels = collections.OrderedDict() + count = 0 + for level in range(self.min_level, self.max_level + 1): + feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32) + feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32) + steps = feat_size_y * feat_size_x * self.anchors_per_location + unpacked_labels[level] = tf.reshape( + labels[count:count + steps], [feat_size_y, feat_size_x, -1]) + count += steps + return unpacked_labels + + @property + def anchors_per_location(self): + return self.num_scales * len(self.aspect_ratios) + + @property + def multilevel_boxes(self): + return self.unpack_labels(self.boxes) + + +class AnchorLabeler(object): + """Labeler for dense object detector.""" + + def __init__(self, + anchor, + match_threshold=0.5, + unmatched_threshold=0.5): + """Constructs anchor labeler to assign labels to anchors. + + Args: + anchor: an instance of class Anchors. + match_threshold: a float number between 0 and 1 representing the + lower-bound threshold to assign positive labels for anchors. An anchor + with a score over the threshold is labeled positive. + unmatched_threshold: a float number between 0 and 1 representing the + upper-bound threshold to assign negative labels for anchors. An anchor + with a score below the threshold is labeled negative. + """ + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher( + match_threshold, + unmatched_threshold=unmatched_threshold, + negatives_lower_than_unmatched=True, + force_match_for_each_row=True) + box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + + self._target_assigner = target_assigner.TargetAssigner( + similarity_calc, matcher, box_coder) + self._anchor = anchor + self._match_threshold = match_threshold + self._unmatched_threshold = unmatched_threshold + + def label_anchors(self, gt_boxes, gt_labels): + """Labels anchors with ground truth inputs. + + Args: + gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes. + For each row, it stores [y0, x0, y1, x1] for four corners of a box. + gt_labels: A integer tensor with shape [N, 1] representing groundtruth + classes. + Returns: + cls_targets_dict: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, num_anchors_per_location]. The height_l and + width_l represent the dimension of class logits at l-th level. + box_targets_dict: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, num_anchors_per_location * 4]. The height_l + and width_l represent the dimension of bounding box regression output at + l-th level. + num_positives: scalar tensor storing number of positives in an image. + """ + gt_box_list = box_list.BoxList(gt_boxes) + anchor_box_list = box_list.BoxList(self._anchor.boxes) + + # The cls_weights, box_weights are not used. + cls_targets, _, box_targets, _, matches = self._target_assigner.assign( + anchor_box_list, gt_box_list, gt_labels) + + # Labels definition in matches.match_results: + # (1) match_results[i]>=0, meaning that column i is matched with row + # match_results[i]. + # (2) match_results[i]=-1, meaning that column i is not matched. + # (3) match_results[i]=-2, meaning that column i is ignored. + match_results = tf.expand_dims(matches.match_results, axis=1) + cls_targets = tf.cast(cls_targets, tf.int32) + cls_targets = tf.where( + tf.equal(match_results, -1), -tf.ones_like(cls_targets), cls_targets) + cls_targets = tf.where( + tf.equal(match_results, -2), -2 * tf.ones_like(cls_targets), + cls_targets) + + # Unpacks labels into multi-level representations. + cls_targets_dict = self._anchor.unpack_labels(cls_targets) + box_targets_dict = self._anchor.unpack_labels(box_targets) + num_positives = tf.reduce_sum( + input_tensor=tf.cast(tf.greater(matches.match_results, -1), tf.float32)) + + return cls_targets_dict, box_targets_dict, num_positives + + +class RpnAnchorLabeler(AnchorLabeler): + """Labeler for Region Proposal Network.""" + + def __init__(self, anchor, match_threshold=0.7, + unmatched_threshold=0.3, rpn_batch_size_per_im=256, + rpn_fg_fraction=0.5): + AnchorLabeler.__init__(self, anchor, match_threshold=0.7, + unmatched_threshold=0.3) + self._rpn_batch_size_per_im = rpn_batch_size_per_im + self._rpn_fg_fraction = rpn_fg_fraction + + def _get_rpn_samples(self, match_results): + """Computes anchor labels. + + This function performs subsampling for foreground (fg) and background (bg) + anchors. + Args: + match_results: A integer tensor with shape [N] representing the + matching results of anchors. (1) match_results[i]>=0, + meaning that column i is matched with row match_results[i]. + (2) match_results[i]=-1, meaning that column i is not matched. + (3) match_results[i]=-2, meaning that column i is ignored. + Returns: + score_targets: a integer tensor with the a shape of [N]. + (1) score_targets[i]=1, the anchor is a positive sample. + (2) score_targets[i]=0, negative. (3) score_targets[i]=-1, the anchor is + don't care (ignore). + """ + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + positive_fraction=self._rpn_fg_fraction, is_static=False)) + # indicator includes both positive and negative labels. + # labels includes only positives labels. + # positives = indicator & labels. + # negatives = indicator & !labels. + # ignore = !indicator. + indicator = tf.greater(match_results, -2) + labels = tf.greater(match_results, -1) + + samples = sampler.subsample( + indicator, self._rpn_batch_size_per_im, labels) + positive_labels = tf.where( + tf.logical_and(samples, labels), + tf.constant(2, dtype=tf.int32, shape=match_results.shape), + tf.constant(0, dtype=tf.int32, shape=match_results.shape)) + negative_labels = tf.where( + tf.logical_and(samples, tf.logical_not(labels)), + tf.constant(1, dtype=tf.int32, shape=match_results.shape), + tf.constant(0, dtype=tf.int32, shape=match_results.shape)) + ignore_labels = tf.fill(match_results.shape, -1) + + return (ignore_labels + positive_labels + negative_labels, + positive_labels, negative_labels) + + def label_anchors(self, gt_boxes, gt_labels): + """Labels anchors with ground truth inputs. + + Args: + gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes. + For each row, it stores [y0, x0, y1, x1] for four corners of a box. + gt_labels: A integer tensor with shape [N, 1] representing groundtruth + classes. + Returns: + score_targets_dict: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, num_anchors]. The height_l and width_l + represent the dimension of class logits at l-th level. + box_targets_dict: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, num_anchors * 4]. The height_l and + width_l represent the dimension of bounding box regression output at + l-th level. + """ + gt_box_list = box_list.BoxList(gt_boxes) + anchor_box_list = box_list.BoxList(self._anchor.boxes) + + # cls_targets, cls_weights, box_weights are not used. + _, _, box_targets, _, matches = self._target_assigner.assign( + anchor_box_list, gt_box_list, gt_labels) + + # score_targets contains the subsampled positive and negative anchors. + score_targets, _, _ = self._get_rpn_samples(matches.match_results) + + # Unpacks labels. + score_targets_dict = self._anchor.unpack_labels(score_targets) + box_targets_dict = self._anchor.unpack_labels(box_targets) + + return score_targets_dict, box_targets_dict diff --git a/models/official/vision/detection/dataloader/factory.py b/models/official/vision/detection/dataloader/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..1e13aec222f529d97ee9c502d408648b9d091e5b --- /dev/null +++ b/models/official/vision/detection/dataloader/factory.py @@ -0,0 +1,103 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model architecture factory.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from official.vision.detection.dataloader import maskrcnn_parser +from official.vision.detection.dataloader import retinanet_parser +from official.vision.detection.dataloader import shapemask_parser + + +def parser_generator(params, mode): + """Generator function for various dataset parser.""" + if params.architecture.parser == 'retinanet_parser': + anchor_params = params.anchor + parser_params = params.retinanet_parser + parser_fn = retinanet_parser.Parser( + output_size=parser_params.output_size, + min_level=params.architecture.min_level, + max_level=params.architecture.max_level, + num_scales=anchor_params.num_scales, + aspect_ratios=anchor_params.aspect_ratios, + anchor_size=anchor_params.anchor_size, + match_threshold=parser_params.match_threshold, + unmatched_threshold=parser_params.unmatched_threshold, + aug_rand_hflip=parser_params.aug_rand_hflip, + aug_scale_min=parser_params.aug_scale_min, + aug_scale_max=parser_params.aug_scale_max, + use_autoaugment=parser_params.use_autoaugment, + autoaugment_policy_name=parser_params.autoaugment_policy_name, + skip_crowd_during_training=parser_params.skip_crowd_during_training, + max_num_instances=parser_params.max_num_instances, + use_bfloat16=params.architecture.use_bfloat16, + mode=mode) + elif params.architecture.parser == 'maskrcnn_parser': + anchor_params = params.anchor + parser_params = params.maskrcnn_parser + parser_fn = maskrcnn_parser.Parser( + output_size=parser_params.output_size, + min_level=params.architecture.min_level, + max_level=params.architecture.max_level, + num_scales=anchor_params.num_scales, + aspect_ratios=anchor_params.aspect_ratios, + anchor_size=anchor_params.anchor_size, + rpn_match_threshold=parser_params.rpn_match_threshold, + rpn_unmatched_threshold=parser_params.rpn_unmatched_threshold, + rpn_batch_size_per_im=parser_params.rpn_batch_size_per_im, + rpn_fg_fraction=parser_params.rpn_fg_fraction, + aug_rand_hflip=parser_params.aug_rand_hflip, + aug_scale_min=parser_params.aug_scale_min, + aug_scale_max=parser_params.aug_scale_max, + skip_crowd_during_training=parser_params.skip_crowd_during_training, + max_num_instances=parser_params.max_num_instances, + include_mask=params.architecture.include_mask, + mask_crop_size=parser_params.mask_crop_size, + use_bfloat16=params.architecture.use_bfloat16, + mode=mode) + elif params.architecture.parser == 'shapemask_parser': + anchor_params = params.anchor + parser_params = params.shapemask_parser + parser_fn = shapemask_parser.Parser( + output_size=parser_params.output_size, + min_level=params.architecture.min_level, + max_level=params.architecture.max_level, + num_scales=anchor_params.num_scales, + aspect_ratios=anchor_params.aspect_ratios, + anchor_size=anchor_params.anchor_size, + use_category=parser_params.use_category, + outer_box_scale=parser_params.outer_box_scale, + box_jitter_scale=parser_params.box_jitter_scale, + num_sampled_masks=parser_params.num_sampled_masks, + mask_crop_size=parser_params.mask_crop_size, + mask_min_level=parser_params.mask_min_level, + mask_max_level=parser_params.mask_max_level, + upsample_factor=parser_params.upsample_factor, + match_threshold=parser_params.match_threshold, + unmatched_threshold=parser_params.unmatched_threshold, + aug_rand_hflip=parser_params.aug_rand_hflip, + aug_scale_min=parser_params.aug_scale_min, + aug_scale_max=parser_params.aug_scale_max, + skip_crowd_during_training=parser_params.skip_crowd_during_training, + max_num_instances=parser_params.max_num_instances, + use_bfloat16=params.architecture.use_bfloat16, + mask_train_class=parser_params.mask_train_class, + mode=mode) + else: + raise ValueError('Parser %s is not supported.' % params.architecture.parser) + + return parser_fn diff --git a/models/official/vision/detection/dataloader/input_reader.py b/models/official/vision/detection/dataloader/input_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..6e65243f6863ccadb45704b3ed487aec3b8ab21a --- /dev/null +++ b/models/official/vision/detection/dataloader/input_reader.py @@ -0,0 +1,107 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Data loader and input processing.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from typing import Text, Optional +from official.modeling.hyperparams import params_dict +from official.vision.detection.dataloader import factory +from official.vision.detection.dataloader import mode_keys as ModeKeys + + +class InputFn(object): + """Input function that creates dataset from files.""" + + def __init__(self, + file_pattern: Text, + params: params_dict.ParamsDict, + mode: Text, + batch_size: int, + num_examples: Optional[int] = -1): + """Initialize. + + Args: + file_pattern: the file pattern for the data example (TFRecords). + params: the parameter object for constructing example parser and model. + mode: ModeKeys.TRAIN or ModeKeys.Eval + batch_size: the data batch size. + num_examples: If positive, only takes this number of examples and raise + tf.errors.OutOfRangeError after that. If non-positive, it will be + ignored. + """ + assert file_pattern is not None + assert mode is not None + assert batch_size is not None + self._file_pattern = file_pattern + self._mode = mode + self._is_training = (mode == ModeKeys.TRAIN) + self._batch_size = batch_size + self._num_examples = num_examples + self._parser_fn = factory.parser_generator(params, mode) + self._dataset_fn = tf.data.TFRecordDataset + + self._input_sharding = (not self._is_training) + try: + if self._is_training: + self._input_sharding = params.train.input_sharding + else: + self._input_sharding = params.eval.input_sharding + except AttributeError: + pass + + def __call__(self, ctx=None, batch_size: int = None): + """Provides tf.data.Dataset object. + + Args: + ctx: context object. + batch_size: expected batch size input data. + + Returns: + tf.data.Dataset object. + """ + if not batch_size: + batch_size = self._batch_size + assert batch_size is not None + dataset = tf.data.Dataset.list_files( + self._file_pattern, shuffle=self._is_training) + + if self._input_sharding and ctx and ctx.num_input_pipelines > 1: + dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) + dataset = dataset.cache() + + if self._is_training: + dataset = dataset.repeat() + + dataset = dataset.interleave( + map_func=self._dataset_fn, cycle_length=32, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if self._is_training: + dataset = dataset.shuffle(1000) + if self._num_examples > 0: + dataset = dataset.take(self._num_examples) + + # Parses the fetched records to input tensors for model function. + dataset = dataset.map( + self._parser_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=True) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset diff --git a/models/official/vision/detection/dataloader/maskrcnn_parser.py b/models/official/vision/detection/dataloader/maskrcnn_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..933e1b75c04ee04e4fbb60eaeb1ac9a48412a970 --- /dev/null +++ b/models/official/vision/detection/dataloader/maskrcnn_parser.py @@ -0,0 +1,385 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Data parser and processing for Mask R-CNN.""" + +import tensorflow as tf + +from official.vision.detection.dataloader import anchor +from official.vision.detection.dataloader import mode_keys as ModeKeys +from official.vision.detection.dataloader import tf_example_decoder +from official.vision.detection.utils import box_utils +from official.vision.detection.utils import dataloader_utils +from official.vision.detection.utils import input_utils + + +class Parser(object): + """Parser to parse an image and its annotations into a dictionary of tensors.""" + + def __init__(self, + output_size, + min_level, + max_level, + num_scales, + aspect_ratios, + anchor_size, + rpn_match_threshold=0.7, + rpn_unmatched_threshold=0.3, + rpn_batch_size_per_im=256, + rpn_fg_fraction=0.5, + aug_rand_hflip=False, + aug_scale_min=1.0, + aug_scale_max=1.0, + skip_crowd_during_training=True, + max_num_instances=100, + include_mask=False, + mask_crop_size=112, + use_bfloat16=True, + mode=None): + """Initializes parameters for parsing annotations in the dataset. + + Args: + output_size: `Tensor` or `list` for [height, width] of output image. The + output_size should be divided by the largest feature stride 2^max_level. + min_level: `int` number of minimum level of the output feature pyramid. + max_level: `int` number of maximum level of the output feature pyramid. + num_scales: `int` number representing intermediate scales added + on each level. For instances, num_scales=2 adds one additional + intermediate anchor scales [2^0, 2^0.5] on each level. + aspect_ratios: `list` of float numbers representing the aspect raito + anchors added on each level. The number indicates the ratio of width to + height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors + on each scale level. + anchor_size: `float` number representing the scale of size of the base + anchor to the feature stride 2^level. + rpn_match_threshold: + rpn_unmatched_threshold: + rpn_batch_size_per_im: + rpn_fg_fraction: + aug_rand_hflip: `bool`, if True, augment training with random + horizontal flip. + aug_scale_min: `float`, the minimum scale applied to `output_size` for + data augmentation during training. + aug_scale_max: `float`, the maximum scale applied to `output_size` for + data augmentation during training. + skip_crowd_during_training: `bool`, if True, skip annotations labeled with + `is_crowd` equals to 1. + max_num_instances: `int` number of maximum number of instances in an + image. The groundtruth data will be padded to `max_num_instances`. + include_mask: a bool to indicate whether parse mask groundtruth. + mask_crop_size: the size which groundtruth mask is cropped to. + use_bfloat16: `bool`, if True, cast output image to tf.bfloat16. + mode: a ModeKeys. Specifies if this is training, evaluation, prediction + or prediction with groundtruths in the outputs. + """ + self._mode = mode + self._max_num_instances = max_num_instances + self._skip_crowd_during_training = skip_crowd_during_training + self._is_training = (mode == ModeKeys.TRAIN) + + self._example_decoder = tf_example_decoder.TfExampleDecoder( + include_mask=include_mask) + + # Anchor. + self._output_size = output_size + self._min_level = min_level + self._max_level = max_level + self._num_scales = num_scales + self._aspect_ratios = aspect_ratios + self._anchor_size = anchor_size + + # Target assigning. + self._rpn_match_threshold = rpn_match_threshold + self._rpn_unmatched_threshold = rpn_unmatched_threshold + self._rpn_batch_size_per_im = rpn_batch_size_per_im + self._rpn_fg_fraction = rpn_fg_fraction + + # Data augmentation. + self._aug_rand_hflip = aug_rand_hflip + self._aug_scale_min = aug_scale_min + self._aug_scale_max = aug_scale_max + + # Mask. + self._include_mask = include_mask + self._mask_crop_size = mask_crop_size + + # Device. + self._use_bfloat16 = use_bfloat16 + + # Data is parsed depending on the model Modekey. + if mode == ModeKeys.TRAIN: + self._parse_fn = self._parse_train_data + elif mode == ModeKeys.EVAL: + self._parse_fn = self._parse_eval_data + elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT: + self._parse_fn = self._parse_predict_data + else: + raise ValueError('mode is not defined.') + + def __call__(self, value): + """Parses data to an image and associated training labels. + + Args: + value: a string tensor holding a serialized tf.Example proto. + + Returns: + image, labels: if mode == ModeKeys.TRAIN. see _parse_train_data. + {'images': image, 'labels': labels}: if mode == ModeKeys.PREDICT + or ModeKeys.PREDICT_WITH_GT. + """ + with tf.name_scope('parser'): + data = self._example_decoder.decode(value) + return self._parse_fn(data) + + def _parse_train_data(self, data): + """Parses data for training. + + Args: + data: the decoded tensor dictionary from TfExampleDecoder. + + Returns: + image: image tensor that is preproessed to have normalized value and + dimension [output_size[0], output_size[1], 3] + labels: a dictionary of tensors used for training. The following describes + {key: value} pairs in the dictionary. + image_info: a 2D `Tensor` that encodes the information of the image and + the applied preprocessing. It is in the format of + [[original_height, original_width], [scaled_height, scaled_width], + anchor_boxes: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, 4] representing anchor boxes at each level. + rpn_score_targets: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, anchors_per_location]. The height_l and + width_l represent the dimension of class logits at l-th level. + rpn_box_targets: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, anchors_per_location * 4]. The height_l and + width_l represent the dimension of bounding box regression output at + l-th level. + gt_boxes: Groundtruth bounding box annotations. The box is represented + in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled + image that is fed to the network. The tennsor is padded with -1 to + the fixed dimension [self._max_num_instances, 4]. + gt_classes: Groundtruth classes annotations. The tennsor is padded + with -1 to the fixed dimension [self._max_num_instances]. + gt_masks: groundtrugh masks cropped by the bounding box and + resized to a fixed size determined by mask_crop_size. + """ + classes = data['groundtruth_classes'] + boxes = data['groundtruth_boxes'] + if self._include_mask: + masks = data['groundtruth_instance_masks'] + + is_crowds = data['groundtruth_is_crowd'] + # Skips annotations with `is_crowd` = True. + if self._skip_crowd_during_training and self._is_training: + num_groundtrtuhs = tf.shape(classes)[0] + with tf.control_dependencies([num_groundtrtuhs, is_crowds]): + indices = tf.cond( + tf.greater(tf.size(is_crowds), 0), + lambda: tf.where(tf.logical_not(is_crowds))[:, 0], + lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64)) + classes = tf.gather(classes, indices) + boxes = tf.gather(boxes, indices) + if self._include_mask: + masks = tf.gather(masks, indices) + + # Gets original image and its size. + image = data['image'] + image_shape = tf.shape(image)[0:2] + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Flips image randomly during training. + if self._aug_rand_hflip: + if self._include_mask: + image, boxes, masks = input_utils.random_horizontal_flip( + image, boxes, masks) + else: + image, boxes = input_utils.random_horizontal_flip( + image, boxes) + + # Converts boxes from normalized coordinates to pixel coordinates. + # Now the coordinates of boxes are w.r.t. the original image. + boxes = box_utils.denormalize_boxes(boxes, image_shape) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + padded_size=input_utils.compute_padded_size( + self._output_size, 2 ** self._max_level), + aug_scale_min=self._aug_scale_min, + aug_scale_max=self._aug_scale_max) + image_height, image_width, _ = image.get_shape().as_list() + + # Resizes and crops boxes. + # Now the coordinates of boxes are w.r.t the scaled image. + image_scale = image_info[2, :] + offset = image_info[3, :] + boxes = input_utils.resize_and_crop_boxes( + boxes, image_scale, image_info[1, :], offset) + + # Filters out ground truth boxes that are all zeros. + indices = box_utils.get_non_empty_box_indices(boxes) + boxes = tf.gather(boxes, indices) + classes = tf.gather(classes, indices) + if self._include_mask: + masks = tf.gather(masks, indices) + # Transfer boxes to the original image space and do normalization. + cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0), [1, 2]) + cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2]) + cropped_boxes = box_utils.normalize_boxes(cropped_boxes, image_shape) + num_masks = tf.shape(masks)[0] + masks = tf.image.crop_and_resize( + tf.expand_dims(masks, axis=-1), + cropped_boxes, + box_indices=tf.range(num_masks, dtype=tf.int32), + crop_size=[self._mask_crop_size, self._mask_crop_size], + method='bilinear') + masks = tf.squeeze(masks, axis=-1) + + # Assigns anchor targets. + # Note that after the target assignment, box targets are absolute pixel + # offsets w.r.t. the scaled image. + input_anchor = anchor.Anchor( + self._min_level, + self._max_level, + self._num_scales, + self._aspect_ratios, + self._anchor_size, + (image_height, image_width)) + anchor_labeler = anchor.RpnAnchorLabeler( + input_anchor, + self._rpn_match_threshold, + self._rpn_unmatched_threshold, + self._rpn_batch_size_per_im, + self._rpn_fg_fraction) + rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors( + boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32)) + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + inputs = { + 'image': image, + 'image_info': image_info, + } + # Packs labels for model_fn outputs. + labels = { + 'anchor_boxes': input_anchor.multilevel_boxes, + 'image_info': image_info, + 'rpn_score_targets': rpn_score_targets, + 'rpn_box_targets': rpn_box_targets, + } + inputs['gt_boxes'] = input_utils.pad_to_fixed_size(boxes, + self._max_num_instances, + -1) + inputs['gt_classes'] = input_utils.pad_to_fixed_size( + classes, self._max_num_instances, -1) + if self._include_mask: + inputs['gt_masks'] = input_utils.pad_to_fixed_size( + masks, self._max_num_instances, -1) + + return inputs, labels + + def _parse_eval_data(self, data): + """Parses data for evaluation.""" + raise NotImplementedError('Not implemented!') + + def _parse_predict_data(self, data): + """Parses data for prediction. + + Args: + data: the decoded tensor dictionary from TfExampleDecoder. + + Returns: + A dictionary of {'images': image, 'labels': labels} where + image: image tensor that is preproessed to have normalized value and + dimension [output_size[0], output_size[1], 3] + labels: a dictionary of tensors used for training. The following + describes {key: value} pairs in the dictionary. + source_ids: Source image id. Default value -1 if the source id is + empty in the groundtruth annotation. + image_info: a 2D `Tensor` that encodes the information of the image + and the applied preprocessing. It is in the format of + [[original_height, original_width], [scaled_height, scaled_width], + anchor_boxes: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, 4] representing anchor boxes at each + level. + """ + # Gets original image and its size. + image = data['image'] + image_shape = tf.shape(image)[0:2] + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + padded_size=input_utils.compute_padded_size( + self._output_size, 2 ** self._max_level), + aug_scale_min=1.0, + aug_scale_max=1.0) + image_height, image_width, _ = image.get_shape().as_list() + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + # Compute Anchor boxes. + input_anchor = anchor.Anchor( + self._min_level, + self._max_level, + self._num_scales, + self._aspect_ratios, + self._anchor_size, + (image_height, image_width)) + + labels = { + 'image_info': image_info, + } + + if self._mode == ModeKeys.PREDICT_WITH_GT: + # Converts boxes from normalized coordinates to pixel coordinates. + boxes = box_utils.denormalize_boxes( + data['groundtruth_boxes'], image_shape) + groundtruths = { + 'source_id': data['source_id'], + 'height': data['height'], + 'width': data['width'], + 'num_detections': tf.shape(data['groundtruth_classes']), + 'boxes': boxes, + 'classes': data['groundtruth_classes'], + 'areas': data['groundtruth_area'], + 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), + } + groundtruths['source_id'] = dataloader_utils.process_source_id( + groundtruths['source_id']) + groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size( + groundtruths, self._max_num_instances) + # TODO(yeqing): Remove the `groundtrtuh` layer key (no longer needed). + labels['groundtruths'] = groundtruths + inputs = { + 'image': image, + 'image_info': image_info, + } + + return inputs, labels diff --git a/models/official/vision/detection/dataloader/mode_keys.py b/models/official/vision/detection/dataloader/mode_keys.py new file mode 100644 index 0000000000000000000000000000000000000000..020382b2486ca25a41f0c3eb88b1f2038c538e7e --- /dev/null +++ b/models/official/vision/detection/dataloader/mode_keys.py @@ -0,0 +1,33 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Standard names for input dataloader modes. + +The following standard keys are defined: + +* `TRAIN`: training mode. +* `EVAL`: evaluation mode. +* `PREDICT`: prediction mode. +* `PREDICT_WITH_GT`: prediction mode with groundtruths in returned variables. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +TRAIN = 'train' +EVAL = 'eval' +PREDICT = 'predict' +PREDICT_WITH_GT = 'predict_with_gt' diff --git a/models/official/vision/detection/dataloader/retinanet_parser.py b/models/official/vision/detection/dataloader/retinanet_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..d226a6da7e2fc2e650ad6ecdfb5a431d13df97a3 --- /dev/null +++ b/models/official/vision/detection/dataloader/retinanet_parser.py @@ -0,0 +1,422 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Data parser and processing. + +Parse image and ground truths in a dataset to training targets and package them +into (image, labels) tuple for RetinaNet. + +T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar +Focal Loss for Dense Object Detection. arXiv:1708.02002 +""" + +import tensorflow as tf + +from official.vision.detection.dataloader import anchor +from official.vision.detection.dataloader import mode_keys as ModeKeys +from official.vision.detection.dataloader import tf_example_decoder +from official.vision.detection.utils import box_utils +from official.vision.detection.utils import input_utils + + +def process_source_id(source_id): + """Processes source_id to the right format.""" + if source_id.dtype == tf.string: + source_id = tf.cast(tf.strings.to_number(source_id), tf.int32) + with tf.control_dependencies([source_id]): + source_id = tf.cond( + pred=tf.equal(tf.size(input=source_id), 0), + true_fn=lambda: tf.cast(tf.constant(-1), tf.int32), + false_fn=lambda: tf.identity(source_id)) + return source_id + + +def pad_groundtruths_to_fixed_size(gt, n): + """Pads the first dimension of groundtruths labels to the fixed size.""" + gt['boxes'] = input_utils.pad_to_fixed_size(gt['boxes'], n, -1) + gt['is_crowds'] = input_utils.pad_to_fixed_size(gt['is_crowds'], n, 0) + gt['areas'] = input_utils.pad_to_fixed_size(gt['areas'], n, -1) + gt['classes'] = input_utils.pad_to_fixed_size(gt['classes'], n, -1) + return gt + + +class Parser(object): + """Parser to parse an image and its annotations into a dictionary of tensors.""" + + def __init__(self, + output_size, + min_level, + max_level, + num_scales, + aspect_ratios, + anchor_size, + match_threshold=0.5, + unmatched_threshold=0.5, + aug_rand_hflip=False, + aug_scale_min=1.0, + aug_scale_max=1.0, + use_autoaugment=False, + autoaugment_policy_name='v0', + skip_crowd_during_training=True, + max_num_instances=100, + use_bfloat16=True, + mode=None): + """Initializes parameters for parsing annotations in the dataset. + + Args: + output_size: `Tensor` or `list` for [height, width] of output image. The + output_size should be divided by the largest feature stride 2^max_level. + min_level: `int` number of minimum level of the output feature pyramid. + max_level: `int` number of maximum level of the output feature pyramid. + num_scales: `int` number representing intermediate scales added + on each level. For instances, num_scales=2 adds one additional + intermediate anchor scales [2^0, 2^0.5] on each level. + aspect_ratios: `list` of float numbers representing the aspect raito + anchors added on each level. The number indicates the ratio of width to + height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors + on each scale level. + anchor_size: `float` number representing the scale of size of the base + anchor to the feature stride 2^level. + match_threshold: `float` number between 0 and 1 representing the + lower-bound threshold to assign positive labels for anchors. An anchor + with a score over the threshold is labeled positive. + unmatched_threshold: `float` number between 0 and 1 representing the + upper-bound threshold to assign negative labels for anchors. An anchor + with a score below the threshold is labeled negative. + aug_rand_hflip: `bool`, if True, augment training with random + horizontal flip. + aug_scale_min: `float`, the minimum scale applied to `output_size` for + data augmentation during training. + aug_scale_max: `float`, the maximum scale applied to `output_size` for + data augmentation during training. + use_autoaugment: `bool`, if True, use the AutoAugment augmentation policy + during training. + autoaugment_policy_name: `string` that specifies the name of the + AutoAugment policy that will be used during training. + skip_crowd_during_training: `bool`, if True, skip annotations labeled with + `is_crowd` equals to 1. + max_num_instances: `int` number of maximum number of instances in an + image. The groundtruth data will be padded to `max_num_instances`. + use_bfloat16: `bool`, if True, cast output image to tf.bfloat16. + mode: a ModeKeys. Specifies if this is training, evaluation, prediction + or prediction with groundtruths in the outputs. + """ + self._mode = mode + self._max_num_instances = max_num_instances + self._skip_crowd_during_training = skip_crowd_during_training + self._is_training = (mode == ModeKeys.TRAIN) + + self._example_decoder = tf_example_decoder.TfExampleDecoder( + include_mask=False) + + # Anchor. + self._output_size = output_size + self._min_level = min_level + self._max_level = max_level + self._num_scales = num_scales + self._aspect_ratios = aspect_ratios + self._anchor_size = anchor_size + self._match_threshold = match_threshold + self._unmatched_threshold = unmatched_threshold + + # Data augmentation. + self._aug_rand_hflip = aug_rand_hflip + self._aug_scale_min = aug_scale_min + self._aug_scale_max = aug_scale_max + + # Data Augmentation with AutoAugment. + self._use_autoaugment = use_autoaugment + self._autoaugment_policy_name = autoaugment_policy_name + + # Device. + self._use_bfloat16 = use_bfloat16 + + # Data is parsed depending on the model Modekey. + if mode == ModeKeys.TRAIN: + self._parse_fn = self._parse_train_data + elif mode == ModeKeys.EVAL: + self._parse_fn = self._parse_eval_data + elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT: + self._parse_fn = self._parse_predict_data + else: + raise ValueError('mode is not defined.') + + def __call__(self, value): + """Parses data to an image and associated training labels. + + Args: + value: a string tensor holding a serialized tf.Example proto. + + Returns: + image: image tensor that is preproessed to have normalized value and + dimension [output_size[0], output_size[1], 3] + labels: + cls_targets: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, anchors_per_location]. The height_l and + width_l represent the dimension of class logits at l-th level. + box_targets: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, anchors_per_location * 4]. The height_l and + width_l represent the dimension of bounding box regression output at + l-th level. + num_positives: number of positive anchors in the image. + anchor_boxes: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, 4] representing anchor boxes at each level. + image_info: a 2D `Tensor` that encodes the information of the image and + the applied preprocessing. It is in the format of + [[original_height, original_width], [scaled_height, scaled_width], + [y_scale, x_scale], [y_offset, x_offset]]. + groundtruths: + source_id: source image id. Default value -1 if the source id is empty + in the groundtruth annotation. + boxes: groundtruth bounding box annotations. The box is represented in + [y1, x1, y2, x2] format. The tennsor is padded with -1 to the fixed + dimension [self._max_num_instances, 4]. + classes: groundtruth classes annotations. The tennsor is padded with + -1 to the fixed dimension [self._max_num_instances]. + areas: groundtruth areas annotations. The tennsor is padded with -1 + to the fixed dimension [self._max_num_instances]. + is_crowds: groundtruth annotations to indicate if an annotation + represents a group of instances by value {0, 1}. The tennsor is + padded with 0 to the fixed dimension [self._max_num_instances]. + """ + with tf.name_scope('parser'): + data = self._example_decoder.decode(value) + return self._parse_fn(data) + + def _parse_train_data(self, data): + """Parses data for training and evaluation.""" + classes = data['groundtruth_classes'] + boxes = data['groundtruth_boxes'] + is_crowds = data['groundtruth_is_crowd'] + # Skips annotations with `is_crowd` = True. + if self._skip_crowd_during_training and self._is_training: + num_groundtrtuhs = tf.shape(input=classes)[0] + with tf.control_dependencies([num_groundtrtuhs, is_crowds]): + indices = tf.cond( + pred=tf.greater(tf.size(input=is_crowds), 0), + true_fn=lambda: tf.where(tf.logical_not(is_crowds))[:, 0], + false_fn=lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64)) + classes = tf.gather(classes, indices) + boxes = tf.gather(boxes, indices) + + # Gets original image and its size. + image = data['image'] + + image_shape = tf.shape(input=image)[0:2] + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Flips image randomly during training. + if self._aug_rand_hflip: + image, boxes = input_utils.random_horizontal_flip(image, boxes) + + # Converts boxes from normalized coordinates to pixel coordinates. + boxes = box_utils.denormalize_boxes(boxes, image_shape) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + padded_size=input_utils.compute_padded_size( + self._output_size, 2 ** self._max_level), + aug_scale_min=self._aug_scale_min, + aug_scale_max=self._aug_scale_max) + image_height, image_width, _ = image.get_shape().as_list() + + # Resizes and crops boxes. + image_scale = image_info[2, :] + offset = image_info[3, :] + boxes = input_utils.resize_and_crop_boxes( + boxes, image_scale, image_info[1, :], offset) + # Filters out ground truth boxes that are all zeros. + indices = box_utils.get_non_empty_box_indices(boxes) + boxes = tf.gather(boxes, indices) + classes = tf.gather(classes, indices) + + # Assigns anchors. + input_anchor = anchor.Anchor( + self._min_level, self._max_level, self._num_scales, + self._aspect_ratios, self._anchor_size, (image_height, image_width)) + anchor_labeler = anchor.AnchorLabeler( + input_anchor, self._match_threshold, self._unmatched_threshold) + (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors( + boxes, + tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + # Packs labels for model_fn outputs. + labels = { + 'cls_targets': cls_targets, + 'box_targets': box_targets, + 'anchor_boxes': input_anchor.multilevel_boxes, + 'num_positives': num_positives, + 'image_info': image_info, + } + return image, labels + + def _parse_eval_data(self, data): + """Parses data for training and evaluation.""" + groundtruths = {} + classes = data['groundtruth_classes'] + boxes = data['groundtruth_boxes'] + + # Gets original image and its size. + image = data['image'] + image_shape = tf.shape(input=image)[0:2] + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Converts boxes from normalized coordinates to pixel coordinates. + boxes = box_utils.denormalize_boxes(boxes, image_shape) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + padded_size=input_utils.compute_padded_size( + self._output_size, 2 ** self._max_level), + aug_scale_min=1.0, + aug_scale_max=1.0) + image_height, image_width, _ = image.get_shape().as_list() + + # Resizes and crops boxes. + image_scale = image_info[2, :] + offset = image_info[3, :] + boxes = input_utils.resize_and_crop_boxes( + boxes, image_scale, image_info[1, :], offset) + # Filters out ground truth boxes that are all zeros. + indices = box_utils.get_non_empty_box_indices(boxes) + boxes = tf.gather(boxes, indices) + classes = tf.gather(classes, indices) + + # Assigns anchors. + input_anchor = anchor.Anchor( + self._min_level, self._max_level, self._num_scales, + self._aspect_ratios, self._anchor_size, (image_height, image_width)) + anchor_labeler = anchor.AnchorLabeler( + input_anchor, self._match_threshold, self._unmatched_threshold) + (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors( + boxes, + tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + # Sets up groundtruth data for evaluation. + groundtruths = { + 'source_id': data['source_id'], + 'num_groundtrtuhs': tf.shape(data['groundtruth_classes']), + 'image_info': image_info, + 'boxes': box_utils.denormalize_boxes( + data['groundtruth_boxes'], image_shape), + 'classes': data['groundtruth_classes'], + 'areas': data['groundtruth_area'], + 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), + } + groundtruths['source_id'] = process_source_id(groundtruths['source_id']) + groundtruths = pad_groundtruths_to_fixed_size( + groundtruths, self._max_num_instances) + + # Packs labels for model_fn outputs. + labels = { + 'cls_targets': cls_targets, + 'box_targets': box_targets, + 'anchor_boxes': input_anchor.multilevel_boxes, + 'num_positives': num_positives, + 'image_info': image_info, + 'groundtruths': groundtruths, + } + return image, labels + + def _parse_predict_data(self, data): + """Parses data for prediction.""" + # Gets original image and its size. + image = data['image'] + image_shape = tf.shape(input=image)[0:2] + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + padded_size=input_utils.compute_padded_size( + self._output_size, 2 ** self._max_level), + aug_scale_min=1.0, + aug_scale_max=1.0) + image_height, image_width, _ = image.get_shape().as_list() + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + # Compute Anchor boxes. + input_anchor = anchor.Anchor( + self._min_level, self._max_level, self._num_scales, + self._aspect_ratios, self._anchor_size, (image_height, image_width)) + + labels = { + 'anchor_boxes': input_anchor.multilevel_boxes, + 'image_info': image_info, + } + # If mode is PREDICT_WITH_GT, returns groundtruths and training targets + # in labels. + if self._mode == ModeKeys.PREDICT_WITH_GT: + # Converts boxes from normalized coordinates to pixel coordinates. + boxes = box_utils.denormalize_boxes( + data['groundtruth_boxes'], image_shape) + groundtruths = { + 'source_id': data['source_id'], + 'num_detections': tf.shape(data['groundtruth_classes']), + 'boxes': boxes, + 'classes': data['groundtruth_classes'], + 'areas': data['groundtruth_area'], + 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), + } + groundtruths['source_id'] = process_source_id(groundtruths['source_id']) + groundtruths = pad_groundtruths_to_fixed_size( + groundtruths, self._max_num_instances) + labels['groundtruths'] = groundtruths + + # Computes training objective for evaluation loss. + classes = data['groundtruth_classes'] + + image_scale = image_info[2, :] + offset = image_info[3, :] + boxes = input_utils.resize_and_crop_boxes( + boxes, image_scale, image_info[1, :], offset) + # Filters out ground truth boxes that are all zeros. + indices = box_utils.get_non_empty_box_indices(boxes) + boxes = tf.gather(boxes, indices) + + # Assigns anchors. + anchor_labeler = anchor.AnchorLabeler( + input_anchor, self._match_threshold, self._unmatched_threshold) + (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors( + boxes, + tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) + labels['cls_targets'] = cls_targets + labels['box_targets'] = box_targets + labels['num_positives'] = num_positives + return image, labels diff --git a/models/official/vision/detection/dataloader/shapemask_parser.py b/models/official/vision/detection/dataloader/shapemask_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc368c0ef290291405157b772ed523f3725e0a3 --- /dev/null +++ b/models/official/vision/detection/dataloader/shapemask_parser.py @@ -0,0 +1,522 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Data parser and processing. + +Parse image and ground truths in a dataset to training targets and package them +into (image, labels) tuple for ShapeMask. + +Weicheng Kuo, Anelia Angelova, Jitendra Malik, Tsung-Yi Lin +ShapeMask: Learning to Segment Novel Objects by Refining Shape Priors. +arXiv:1904.03239. +""" + +import tensorflow as tf + +from official.vision.detection.dataloader import anchor +from official.vision.detection.dataloader import mode_keys as ModeKeys +from official.vision.detection.dataloader import tf_example_decoder +from official.vision.detection.utils import box_utils +from official.vision.detection.utils import class_utils +from official.vision.detection.utils import dataloader_utils +from official.vision.detection.utils import input_utils + + +def pad_to_size(input_tensor, size): + """Pads data with zeros to a given length at the first dimension if needed. + + Args: + input_tensor: `Tensor` with any dimension. + size: `int` number for the first dimension of output Tensor. + + Returns: + `Tensor` with the first dimension padded to `size` if the first diemsion + is less than `size`, otherwise no padding. + """ + input_shape = tf.shape(input_tensor) + padding_shape = [] + + # Computes the padding length on the first dimension. + padding_length = tf.maximum(0, size - tf.shape(input_tensor)[0]) + assert_length = tf.Assert( + tf.greater_equal(padding_length, 0), [padding_length]) + with tf.control_dependencies([assert_length]): + padding_shape.append(padding_length) + + # Copies shapes of the rest of input shape dimensions. + for i in range(1, len(input_shape)): + padding_shape.append(tf.shape(input=input_tensor)[i]) + + # Pads input tensor to the fixed first dimension. + paddings = tf.cast(tf.zeros(padding_shape), input_tensor.dtype) + padded_tensor = tf.concat([input_tensor, paddings], axis=0) + return padded_tensor + + +class Parser(object): + """ShapeMask Parser to parse an image and its annotations into a dictionary of tensors.""" + + def __init__(self, + output_size, + min_level, + max_level, + num_scales, + aspect_ratios, + anchor_size, + use_category=True, + outer_box_scale=1.0, + box_jitter_scale=0.025, + num_sampled_masks=8, + mask_crop_size=32, + mask_min_level=3, + mask_max_level=5, + upsample_factor=4, + match_threshold=0.5, + unmatched_threshold=0.5, + aug_rand_hflip=False, + aug_scale_min=1.0, + aug_scale_max=1.0, + skip_crowd_during_training=True, + max_num_instances=100, + use_bfloat16=True, + mask_train_class='all', + mode=None): + """Initializes parameters for parsing annotations in the dataset. + + Args: + output_size: `Tensor` or `list` for [height, width] of output image. The + output_size should be divided by the largest feature stride 2^max_level. + min_level: `int` number of minimum level of the output feature pyramid. + max_level: `int` number of maximum level of the output feature pyramid. + num_scales: `int` number representing intermediate scales added + on each level. For instances, num_scales=2 adds one additional + intermediate anchor scales [2^0, 2^0.5] on each level. + aspect_ratios: `list` of float numbers representing the aspect raito + anchors added on each level. The number indicates the ratio of width to + height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors + on each scale level. + anchor_size: `float` number representing the scale of size of the base + anchor to the feature stride 2^level. + use_category: if `False`, treat all object in all classes in one + foreground category. + outer_box_scale: `float` number in a range of [1.0, inf) representing + the scale from object box to outer box. The mask branch predicts + instance mask enclosed in outer box. + box_jitter_scale: `float` number representing the noise magnitude to + jitter the training groundtruth boxes for mask branch. + num_sampled_masks: `int` number of sampled masks for training. + mask_crop_size: `list` for [height, width] of output training masks. + mask_min_level: `int` number indicating the minimum feature level to + obtain instance features. + mask_max_level: `int` number indicating the maximum feature level to + obtain instance features. + upsample_factor: `int` factor of upsampling the fine mask predictions. + match_threshold: `float` number between 0 and 1 representing the + lower-bound threshold to assign positive labels for anchors. An anchor + with a score over the threshold is labeled positive. + unmatched_threshold: `float` number between 0 and 1 representing the + upper-bound threshold to assign negative labels for anchors. An anchor + with a score below the threshold is labeled negative. + aug_rand_hflip: `bool`, if True, augment training with random + horizontal flip. + aug_scale_min: `float`, the minimum scale applied to `output_size` for + data augmentation during training. + aug_scale_max: `float`, the maximum scale applied to `output_size` for + data augmentation during training. + skip_crowd_during_training: `bool`, if True, skip annotations labeled with + `is_crowd` equals to 1. + max_num_instances: `int` number of maximum number of instances in an + image. The groundtruth data will be padded to `max_num_instances`. + use_bfloat16: `bool`, if True, cast output image to tf.bfloat16. + mask_train_class: a string of experiment mode: `all`, `voc` or `nonvoc`. + mode: a ModeKeys. Specifies if this is training, evaluation, prediction + or prediction with groundtruths in the outputs. + """ + self._mode = mode + self._mask_train_class = mask_train_class + self._max_num_instances = max_num_instances + self._skip_crowd_during_training = skip_crowd_during_training + self._is_training = (mode == ModeKeys.TRAIN) + + self._example_decoder = tf_example_decoder.TfExampleDecoder( + include_mask=True) + + # Anchor. + self._output_size = output_size + self._min_level = min_level + self._max_level = max_level + self._num_scales = num_scales + self._aspect_ratios = aspect_ratios + self._anchor_size = anchor_size + self._match_threshold = match_threshold + self._unmatched_threshold = unmatched_threshold + + # Data augmentation. + self._aug_rand_hflip = aug_rand_hflip + self._aug_scale_min = aug_scale_min + self._aug_scale_max = aug_scale_max + + # Device. + self._use_bfloat16 = use_bfloat16 + + # ShapeMask specific. + # Control of which category to use. + self._use_category = use_category + self._num_sampled_masks = num_sampled_masks + self._mask_crop_size = mask_crop_size + self._mask_min_level = mask_min_level + self._mask_max_level = mask_max_level + self._outer_box_scale = outer_box_scale + self._box_jitter_scale = box_jitter_scale + self._up_sample_factor = upsample_factor + + # Data is parsed depending on the model Modekey. + if mode == ModeKeys.TRAIN: + self._parse_fn = self._parse_train_data + elif mode == ModeKeys.EVAL: + self._parse_fn = self._parse_eval_data + elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT: + self._parse_fn = self._parse_predict_data + else: + raise ValueError('mode is not defined.') + + def __call__(self, value): + """Parses data to an image and associated training labels. + + Args: + value: a string tensor holding a serialized tf.Example proto. + + Returns: + inputs: + image: image tensor that is preproessed to have normalized value and + dimension [output_size[0], output_size[1], 3] + mask_boxes: sampled boxes that tightly enclose the training masks. The + box is represented in [y1, x1, y2, x2] format. The tensor is sampled + to the fixed dimension [self._num_sampled_masks, 4]. + mask_outer_boxes: loose box that enclose sampled tight box. The + box is represented in [y1, x1, y2, x2] format. The tensor is sampled + to the fixed dimension [self._num_sampled_masks, 4]. + mask_classes: the class ids of sampled training masks. The tensor has + shape [self._num_sampled_masks]. + labels: + cls_targets: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, anchors_per_location]. The height_l and + width_l represent the dimension of class logits at l-th level. + box_targets: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, anchors_per_location * 4]. The height_l and + width_l represent the dimension of bounding box regression output at + l-th level. + num_positives: number of positive anchors in the image. + anchor_boxes: ordered dictionary with keys + [min_level, min_level+1, ..., max_level]. The values are tensor with + shape [height_l, width_l, 4] representing anchor boxes at each level. + image_scale: 2D float `Tensor` representing scale factors that apply + to [height, width] of input image. + mask_targets: training binary mask targets. The tensor has shape + [self._num_sampled_masks, self._mask_crop_size, self._mask_crop_size]. + mask_is_valid: the binary tensor to indicate if the sampled masks are + valide. The sampled masks are invalid when no mask annotations are + included in the image. The tensor has shape [1]. + groundtruths: + source_id: source image id. Default value -1 if the source id is empty + in the groundtruth annotation. + boxes: groundtruth bounding box annotations. The box is represented in + [y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed + dimension [self._max_num_instances, 4]. + classes: groundtruth classes annotations. The tensor is padded with + -1 to the fixed dimension [self._max_num_instances]. + areas: groundtruth areas annotations. The tensor is padded with -1 + to the fixed dimension [self._max_num_instances]. + is_crowds: groundtruth annotations to indicate if an annotation + represents a group of instances by value {0, 1}. The tensor is + padded with 0 to the fixed dimension [self._max_num_instances]. + """ + with tf.name_scope('parser'): + data = self._example_decoder.decode(value) + return self._parse_fn(data) + + def _parse_train_data(self, data): + """Parse data for ShapeMask training.""" + classes = data['groundtruth_classes'] + boxes = data['groundtruth_boxes'] + masks = data['groundtruth_instance_masks'] + is_crowds = data['groundtruth_is_crowd'] + # Skips annotations with `is_crowd` = True. + if self._skip_crowd_during_training and self._is_training: + num_groundtrtuhs = tf.shape(classes)[0] + with tf.control_dependencies([num_groundtrtuhs, is_crowds]): + indices = tf.cond( + tf.greater(tf.size(is_crowds), 0), + lambda: tf.where(tf.logical_not(is_crowds))[:, 0], + lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64)) + classes = tf.gather(classes, indices) + boxes = tf.gather(boxes, indices) + masks = tf.gather(masks, indices) + + # Gets original image and its size. + image = data['image'] + image_shape = tf.shape(image)[0:2] + + # If not using category, makes all categories with id = 0. + if not self._use_category: + classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32) + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Flips image randomly during training. + if self._aug_rand_hflip: + image, boxes, masks = input_utils.random_horizontal_flip( + image, boxes, masks) + + # Converts boxes from normalized coordinates to pixel coordinates. + boxes = box_utils.denormalize_boxes(boxes, image_shape) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + self._output_size, + aug_scale_min=self._aug_scale_min, + aug_scale_max=self._aug_scale_max) + image_scale = image_info[2, :] + offset = image_info[3, :] + + # Resizes and crops boxes and masks. + boxes = input_utils.resize_and_crop_boxes( + boxes, image_scale, image_info[1, :], offset) + + # Filters out ground truth boxes that are all zeros. + indices = box_utils.get_non_empty_box_indices(boxes) + boxes = tf.gather(boxes, indices) + classes = tf.gather(classes, indices) + masks = tf.gather(masks, indices) + + # Assigns anchors. + input_anchor = anchor.Anchor( + self._min_level, self._max_level, self._num_scales, + self._aspect_ratios, self._anchor_size, self._output_size) + anchor_labeler = anchor.AnchorLabeler( + input_anchor, self._match_threshold, self._unmatched_threshold) + (cls_targets, + box_targets, + num_positives) = anchor_labeler.label_anchors( + boxes, + tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) + + # Sample groundtruth masks/boxes/classes for mask branch. + num_masks = tf.shape(masks)[0] + mask_shape = tf.shape(masks)[1:3] + + # Pad sampled boxes/masks/classes to a constant batch size. + padded_boxes = pad_to_size(boxes, self._num_sampled_masks) + padded_classes = pad_to_size(classes, self._num_sampled_masks) + padded_masks = pad_to_size(masks, self._num_sampled_masks) + + # Randomly sample groundtruth masks for mask branch training. For the image + # without groundtruth masks, it will sample the dummy padded tensors. + rand_indices = tf.random.shuffle( + tf.range(tf.maximum(num_masks, self._num_sampled_masks))) + rand_indices = tf.math.mod(rand_indices, tf.maximum(num_masks, 1)) + rand_indices = rand_indices[0:self._num_sampled_masks] + rand_indices = tf.reshape(rand_indices, [self._num_sampled_masks]) + + sampled_boxes = tf.gather(padded_boxes, rand_indices) + sampled_classes = tf.gather(padded_classes, rand_indices) + sampled_masks = tf.gather(padded_masks, rand_indices) + # Jitter the sampled boxes to mimic the noisy detections. + sampled_boxes = box_utils.jitter_boxes( + sampled_boxes, noise_scale=self._box_jitter_scale) + sampled_boxes = box_utils.clip_boxes(sampled_boxes, self._output_size) + # Compute mask targets in feature crop. A feature crop fully contains a + # sampled box. + mask_outer_boxes = box_utils.compute_outer_boxes( + sampled_boxes, tf.shape(image)[0:2], scale=self._outer_box_scale) + mask_outer_boxes = box_utils.clip_boxes(mask_outer_boxes, self._output_size) + # Compensate the offset of mask_outer_boxes to map it back to original image + # scale. + mask_outer_boxes_ori = mask_outer_boxes + mask_outer_boxes_ori += tf.tile(tf.expand_dims(offset, axis=0), [1, 2]) + mask_outer_boxes_ori /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2]) + norm_mask_outer_boxes_ori = box_utils.normalize_boxes( + mask_outer_boxes_ori, mask_shape) + + # Set sampled_masks shape to [batch_size, height, width, 1]. + sampled_masks = tf.cast(tf.expand_dims(sampled_masks, axis=-1), tf.float32) + mask_targets = tf.image.crop_and_resize( + sampled_masks, + norm_mask_outer_boxes_ori, + box_indices=tf.range(self._num_sampled_masks), + crop_size=[self._mask_crop_size, self._mask_crop_size], + method='bilinear', + extrapolation_value=0, + name='train_mask_targets') + mask_targets = tf.where(tf.greater_equal(mask_targets, 0.5), + tf.ones_like(mask_targets), + tf.zeros_like(mask_targets)) + mask_targets = tf.squeeze(mask_targets, axis=-1) + if self._up_sample_factor > 1: + fine_mask_targets = tf.image.crop_and_resize( + sampled_masks, + norm_mask_outer_boxes_ori, + box_indices=tf.range(self._num_sampled_masks), + crop_size=[ + self._mask_crop_size * self._up_sample_factor, + self._mask_crop_size * self._up_sample_factor + ], + method='bilinear', + extrapolation_value=0, + name='train_mask_targets') + fine_mask_targets = tf.where( + tf.greater_equal(fine_mask_targets, 0.5), + tf.ones_like(fine_mask_targets), tf.zeros_like(fine_mask_targets)) + fine_mask_targets = tf.squeeze(fine_mask_targets, axis=-1) + else: + fine_mask_targets = mask_targets + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + valid_image = tf.cast(tf.not_equal(num_masks, 0), tf.int32) + if self._mask_train_class == 'all': + mask_is_valid = valid_image * tf.ones_like(sampled_classes, tf.int32) + else: + # Get the intersection of sampled classes with training splits. + mask_valid_classes = tf.cast( + tf.expand_dims( + class_utils.coco_split_class_ids(self._mask_train_class), 1), + sampled_classes.dtype) + match = tf.reduce_any( + tf.equal(tf.expand_dims(sampled_classes, 0), mask_valid_classes), 0) + mask_is_valid = valid_image * tf.cast(match, tf.int32) + + # Packs labels for model_fn outputs. + labels = { + 'cls_targets': cls_targets, + 'box_targets': box_targets, + 'anchor_boxes': input_anchor.multilevel_boxes, + 'num_positives': num_positives, + 'image_info': image_info, + # For ShapeMask. + 'mask_targets': mask_targets, + 'fine_mask_targets': fine_mask_targets, + 'mask_is_valid': mask_is_valid, + } + + inputs = { + 'image': image, + 'image_info': image_info, + 'mask_boxes': sampled_boxes, + 'mask_outer_boxes': mask_outer_boxes, + 'mask_classes': sampled_classes, + } + return inputs, labels + + def _parse_predict_data(self, data): + """Parse data for ShapeMask training.""" + classes = data['groundtruth_classes'] + boxes = data['groundtruth_boxes'] + masks = data['groundtruth_instance_masks'] + + # Gets original image and its size. + image = data['image'] + image_shape = tf.shape(image)[0:2] + + # If not using category, makes all categories with id = 0. + if not self._use_category: + classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32) + + # Normalizes image with mean and std pixel values. + image = input_utils.normalize_image(image) + + # Converts boxes from normalized coordinates to pixel coordinates. + boxes = box_utils.denormalize_boxes(boxes, image_shape) + + # Resizes and crops image. + image, image_info = input_utils.resize_and_crop_image( + image, + self._output_size, + self._output_size, + aug_scale_min=1.0, + aug_scale_max=1.0) + image_scale = image_info[2, :] + offset = image_info[3, :] + + # Resizes and crops boxes and masks. + boxes = input_utils.resize_and_crop_boxes( + boxes, image_scale, image_info[1, :], offset) + masks = input_utils.resize_and_crop_masks( + tf.expand_dims(masks, axis=-1), image_scale, self._output_size, offset) + + # Filters out ground truth boxes that are all zeros. + indices = box_utils.get_non_empty_box_indices(boxes) + boxes = tf.gather(boxes, indices) + classes = tf.gather(classes, indices) + + # Assigns anchors. + input_anchor = anchor.Anchor( + self._min_level, self._max_level, self._num_scales, + self._aspect_ratios, self._anchor_size, self._output_size) + anchor_labeler = anchor.AnchorLabeler( + input_anchor, self._match_threshold, self._unmatched_threshold) + + # If bfloat16 is used, casts input image to tf.bfloat16. + if self._use_bfloat16: + image = tf.cast(image, dtype=tf.bfloat16) + + labels = { + 'anchor_boxes': input_anchor.multilevel_boxes, + 'image_info': image_info, + } + if self._mode == ModeKeys.PREDICT_WITH_GT: + # Converts boxes from normalized coordinates to pixel coordinates. + groundtruths = { + 'source_id': data['source_id'], + 'height': data['height'], + 'width': data['width'], + 'num_detections': tf.shape(data['groundtruth_classes']), + 'boxes': box_utils.denormalize_boxes( + data['groundtruth_boxes'], image_shape), + 'classes': data['groundtruth_classes'], + # 'masks': tf.squeeze(masks, axis=-1), + 'areas': data['groundtruth_area'], + 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), + } + groundtruths['source_id'] = dataloader_utils.process_source_id( + groundtruths['source_id']) + groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size( + groundtruths, self._max_num_instances) + # Computes training labels. + (cls_targets, + box_targets, + num_positives) = anchor_labeler.label_anchors( + boxes, + tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) + # Packs labels for model_fn outputs. + labels.update({ + 'cls_targets': cls_targets, + 'box_targets': box_targets, + 'num_positives': num_positives, + 'groundtruths': groundtruths, + }) + + inputs = { + 'image': image, + 'image_info': image_info, + } + + return inputs, labels diff --git a/models/official/vision/detection/dataloader/tf_example_decoder.py b/models/official/vision/detection/dataloader/tf_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f719a9168a4d3106600fffcc47c14cc90f3cadc7 --- /dev/null +++ b/models/official/vision/detection/dataloader/tf_example_decoder.py @@ -0,0 +1,156 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tensorflow Example proto decoder for object detection. + +A decoder to decode string tensors containing serialized tensorflow.Example +protos for object detection. +""" +import tensorflow as tf + + +class TfExampleDecoder(object): + """Tensorflow Example proto decoder.""" + + def __init__(self, include_mask=False): + self._include_mask = include_mask + self._keys_to_features = { + 'image/encoded': + tf.io.FixedLenFeature((), tf.string), + 'image/source_id': + tf.io.FixedLenFeature((), tf.string), + 'image/height': + tf.io.FixedLenFeature((), tf.int64), + 'image/width': + tf.io.FixedLenFeature((), tf.int64), + 'image/object/bbox/xmin': + tf.io.VarLenFeature(tf.float32), + 'image/object/bbox/xmax': + tf.io.VarLenFeature(tf.float32), + 'image/object/bbox/ymin': + tf.io.VarLenFeature(tf.float32), + 'image/object/bbox/ymax': + tf.io.VarLenFeature(tf.float32), + 'image/object/class/label': + tf.io.VarLenFeature(tf.int64), + 'image/object/area': + tf.io.VarLenFeature(tf.float32), + 'image/object/is_crowd': + tf.io.VarLenFeature(tf.int64), + } + if include_mask: + self._keys_to_features.update({ + 'image/object/mask': + tf.io.VarLenFeature(tf.string), + }) + + def _decode_image(self, parsed_tensors): + """Decodes the image and set its static shape.""" + image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3) + image.set_shape([None, None, 3]) + return image + + def _decode_boxes(self, parsed_tensors): + """Concat box coordinates in the format of [ymin, xmin, ymax, xmax].""" + xmin = parsed_tensors['image/object/bbox/xmin'] + xmax = parsed_tensors['image/object/bbox/xmax'] + ymin = parsed_tensors['image/object/bbox/ymin'] + ymax = parsed_tensors['image/object/bbox/ymax'] + return tf.stack([ymin, xmin, ymax, xmax], axis=-1) + + def _decode_masks(self, parsed_tensors): + """Decode a set of PNG masks to the tf.float32 tensors.""" + def _decode_png_mask(png_bytes): + mask = tf.squeeze( + tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1) + mask = tf.cast(mask, dtype=tf.float32) + mask.set_shape([None, None]) + return mask + + height = parsed_tensors['image/height'] + width = parsed_tensors['image/width'] + masks = parsed_tensors['image/object/mask'] + return tf.cond( + pred=tf.greater(tf.size(input=masks), 0), + true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32), + false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32)) + + def _decode_areas(self, parsed_tensors): + xmin = parsed_tensors['image/object/bbox/xmin'] + xmax = parsed_tensors['image/object/bbox/xmax'] + ymin = parsed_tensors['image/object/bbox/ymin'] + ymax = parsed_tensors['image/object/bbox/ymax'] + return tf.cond( + tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0), + lambda: parsed_tensors['image/object/area'], + lambda: (xmax - xmin) * (ymax - ymin)) + + def decode(self, serialized_example): + """Decode the serialized example. + + Args: + serialized_example: a single serialized tf.Example string. + + Returns: + decoded_tensors: a dictionary of tensors with the following fields: + - image: a uint8 tensor of shape [None, None, 3]. + - source_id: a string scalar tensor. + - height: an integer scalar tensor. + - width: an integer scalar tensor. + - groundtruth_classes: a int64 tensor of shape [None]. + - groundtruth_is_crowd: a bool tensor of shape [None]. + - groundtruth_area: a float32 tensor of shape [None]. + - groundtruth_boxes: a float32 tensor of shape [None, 4]. + - groundtruth_instance_masks: a float32 tensor of shape + [None, None, None]. + - groundtruth_instance_masks_png: a string tensor of shape [None]. + """ + parsed_tensors = tf.io.parse_single_example( + serialized=serialized_example, features=self._keys_to_features) + for k in parsed_tensors: + if isinstance(parsed_tensors[k], tf.SparseTensor): + if parsed_tensors[k].dtype == tf.string: + parsed_tensors[k] = tf.sparse.to_dense( + parsed_tensors[k], default_value='') + else: + parsed_tensors[k] = tf.sparse.to_dense( + parsed_tensors[k], default_value=0) + + image = self._decode_image(parsed_tensors) + boxes = self._decode_boxes(parsed_tensors) + areas = self._decode_areas(parsed_tensors) + is_crowds = tf.cond( + tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0), + lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool), + lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long + if self._include_mask: + masks = self._decode_masks(parsed_tensors) + + decoded_tensors = { + 'image': image, + 'source_id': parsed_tensors['image/source_id'], + 'height': parsed_tensors['image/height'], + 'width': parsed_tensors['image/width'], + 'groundtruth_classes': parsed_tensors['image/object/class/label'], + 'groundtruth_is_crowd': is_crowds, + 'groundtruth_area': areas, + 'groundtruth_boxes': boxes, + } + if self._include_mask: + decoded_tensors.update({ + 'groundtruth_instance_masks': masks, + 'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'], + }) + return decoded_tensors diff --git a/models/official/vision/detection/evaluation/__init__.py b/models/official/vision/detection/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/evaluation/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/evaluation/coco_evaluator.py b/models/official/vision/detection/evaluation/coco_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..dc56a9332784dd66d5393bbf0d4c996fe5141c6d --- /dev/null +++ b/models/official/vision/detection/evaluation/coco_evaluator.py @@ -0,0 +1,343 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The COCO-style evaluator. + +The following snippet demonstrates the use of interfaces: + + evaluator = COCOEvaluator(...) + for _ in range(num_evals): + for _ in range(num_batches_per_eval): + predictions, groundtruth = predictor.predict(...) # pop a batch. + evaluator.update(predictions, groundtruths) # aggregate internal stats. + evaluator.evaluate() # finish one full eval. + +See also: https://github.com/cocodataset/cocoapi/ +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import atexit +import tempfile +import numpy as np +from absl import logging +from pycocotools import cocoeval +import six +import tensorflow as tf + +from official.vision.detection.evaluation import coco_utils +from official.vision.detection.utils import class_utils + + +class MetricWrapper(object): + # This is only a wrapper for COCO metric and works on for numpy array. So it + # doesn't inherit from tf.keras.layers.Layer or tf.keras.metrics.Metric. + + def __init__(self, evaluator): + self._evaluator = evaluator + + def update_state(self, y_true, y_pred): + labels = tf.nest.map_structure(lambda x: x.numpy(), y_true) + outputs = tf.nest.map_structure(lambda x: x.numpy(), y_pred) + groundtruths = {} + predictions = {} + for key, val in outputs.items(): + if isinstance(val, tuple): + val = np.concatenate(val) + predictions[key] = val + for key, val in labels.items(): + if isinstance(val, tuple): + val = np.concatenate(val) + groundtruths[key] = val + self._evaluator.update(predictions, groundtruths) + + def result(self): + return self._evaluator.evaluate() + + def reset_states(self): + return self._evaluator.reset() + + +class COCOEvaluator(object): + """COCO evaluation metric class.""" + + def __init__(self, annotation_file, include_mask, need_rescale_bboxes=True): + """Constructs COCO evaluation class. + + The class provides the interface to metrics_fn in TPUEstimator. The + _update_op() takes detections from each image and push them to + self.detections. The _evaluate() loads a JSON file in COCO annotation format + as the groundtruths and runs COCO evaluation. + + Args: + annotation_file: a JSON file that stores annotations of the eval dataset. + If `annotation_file` is None, groundtruth annotations will be loaded + from the dataloader. + include_mask: a boolean to indicate whether or not to include the mask + eval. + need_rescale_bboxes: If true bboxes in `predictions` will be rescaled back + to absolute values (`image_info` is needed in this case). + """ + if annotation_file: + if annotation_file.startswith('gs://'): + _, local_val_json = tempfile.mkstemp(suffix='.json') + tf.io.gfile.remove(local_val_json) + + tf.io.gfile.copy(annotation_file, local_val_json) + atexit.register(tf.io.gfile.remove, local_val_json) + else: + local_val_json = annotation_file + self._coco_gt = coco_utils.COCOWrapper( + eval_type=('mask' if include_mask else 'box'), + annotation_file=local_val_json) + self._annotation_file = annotation_file + self._include_mask = include_mask + self._metric_names = [ + 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10', + 'ARmax100', 'ARs', 'ARm', 'ARl' + ] + self._required_prediction_fields = [ + 'source_id', 'num_detections', 'detection_classes', 'detection_scores', + 'detection_boxes' + ] + self._need_rescale_bboxes = need_rescale_bboxes + if self._need_rescale_bboxes: + self._required_prediction_fields.append('image_info') + self._required_groundtruth_fields = [ + 'source_id', 'height', 'width', 'classes', 'boxes' + ] + if self._include_mask: + mask_metric_names = ['mask_' + x for x in self._metric_names] + self._metric_names.extend(mask_metric_names) + self._required_prediction_fields.extend(['detection_masks']) + self._required_groundtruth_fields.extend(['masks']) + + self.reset() + + def reset(self): + """Resets internal states for a fresh run.""" + self._predictions = {} + if not self._annotation_file: + self._groundtruths = {} + + def evaluate(self): + """Evaluates with detections from all images with COCO API. + + Returns: + coco_metric: float numpy array with shape [24] representing the + coco-style evaluation metrics (box and mask). + """ + if not self._annotation_file: + logging.info('Thre is no annotation_file in COCOEvaluator.') + gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset( + self._groundtruths) + coco_gt = coco_utils.COCOWrapper( + eval_type=('mask' if self._include_mask else 'box'), + gt_dataset=gt_dataset) + else: + logging.info('Using annotation file: %s', self._annotation_file) + coco_gt = self._coco_gt + coco_predictions = coco_utils.convert_predictions_to_coco_annotations( + self._predictions) + coco_dt = coco_gt.loadRes(predictions=coco_predictions) + image_ids = [ann['image_id'] for ann in coco_predictions] + + coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox') + coco_eval.params.imgIds = image_ids + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + coco_metrics = coco_eval.stats + + if self._include_mask: + mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm') + mcoco_eval.params.imgIds = image_ids + mcoco_eval.evaluate() + mcoco_eval.accumulate() + mcoco_eval.summarize() + mask_coco_metrics = mcoco_eval.stats + + if self._include_mask: + metrics = np.hstack((coco_metrics, mask_coco_metrics)) + else: + metrics = coco_metrics + + # Cleans up the internal variables in order for a fresh eval next time. + self.reset() + + metrics_dict = {} + for i, name in enumerate(self._metric_names): + metrics_dict[name] = metrics[i].astype(np.float32) + return metrics_dict + + def _process_predictions(self, predictions): + image_scale = np.tile(predictions['image_info'][:, 2:3, :], (1, 1, 2)) + predictions['detection_boxes'] = ( + predictions['detection_boxes'].astype(np.float32)) + predictions['detection_boxes'] /= image_scale + if 'detection_outer_boxes' in predictions: + predictions['detection_outer_boxes'] = ( + predictions['detection_outer_boxes'].astype(np.float32)) + predictions['detection_outer_boxes'] /= image_scale + + def update(self, predictions, groundtruths=None): + """Update and aggregate detection results and groundtruth data. + + Args: + predictions: a dictionary of numpy arrays including the fields below. + See different parsers under `../dataloader` for more details. + Required fields: + - source_id: a numpy array of int or string of shape [batch_size]. + - image_info [if `need_rescale_bboxes` is True]: a numpy array of + float of shape [batch_size, 4, 2]. + - num_detections: a numpy array of + int of shape [batch_size]. + - detection_boxes: a numpy array of float of shape [batch_size, K, 4]. + - detection_classes: a numpy array of int of shape [batch_size, K]. + - detection_scores: a numpy array of float of shape [batch_size, K]. + Optional fields: + - detection_masks: a numpy array of float of shape + [batch_size, K, mask_height, mask_width]. + groundtruths: a dictionary of numpy arrays including the fields below. + See also different parsers under `../dataloader` for more details. + Required fields: + - source_id: a numpy array of int or string of shape [batch_size]. + - height: a numpy array of int of shape [batch_size]. + - width: a numpy array of int of shape [batch_size]. + - num_detections: a numpy array of int of shape [batch_size]. + - boxes: a numpy array of float of shape [batch_size, K, 4]. + - classes: a numpy array of int of shape [batch_size, K]. + Optional fields: + - is_crowds: a numpy array of int of shape [batch_size, K]. If the + field is absent, it is assumed that this instance is not crowd. + - areas: a numy array of float of shape [batch_size, K]. If the + field is absent, the area is calculated using either boxes or + masks depending on which one is available. + - masks: a numpy array of float of shape + [batch_size, K, mask_height, mask_width], + + Raises: + ValueError: if the required prediction or groundtruth fields are not + present in the incoming `predictions` or `groundtruths`. + """ + for k in self._required_prediction_fields: + if k not in predictions: + raise ValueError( + 'Missing the required key `{}` in predictions!'.format(k)) + if self._need_rescale_bboxes: + self._process_predictions(predictions) + for k, v in six.iteritems(predictions): + if k not in self._predictions: + self._predictions[k] = [v] + else: + self._predictions[k].append(v) + + if not self._annotation_file: + assert groundtruths + for k in self._required_groundtruth_fields: + if k not in groundtruths: + raise ValueError( + 'Missing the required key `{}` in groundtruths!'.format(k)) + for k, v in six.iteritems(groundtruths): + if k not in self._groundtruths: + self._groundtruths[k] = [v] + else: + self._groundtruths[k].append(v) + + +class ShapeMaskCOCOEvaluator(COCOEvaluator): + """COCO evaluation metric class for ShapeMask.""" + + def __init__(self, mask_eval_class, **kwargs): + """Constructs COCO evaluation class. + + The class provides the interface to metrics_fn in TPUEstimator. The + _update_op() takes detections from each image and push them to + self.detections. The _evaluate() loads a JSON file in COCO annotation format + as the groundtruths and runs COCO evaluation. + + Args: + mask_eval_class: the set of classes for mask evaluation. + **kwargs: other keyword arguments passed to the parent class initializer. + """ + super(ShapeMaskCOCOEvaluator, self).__init__(**kwargs) + self._mask_eval_class = mask_eval_class + self._eval_categories = class_utils.coco_split_class_ids(mask_eval_class) + if mask_eval_class != 'all': + self._metric_names = [ + x.replace('mask', 'novel_mask') for x in self._metric_names + ] + + def evaluate(self): + """Evaluates with detections from all images with COCO API. + + Returns: + coco_metric: float numpy array with shape [24] representing the + coco-style evaluation metrics (box and mask). + """ + if not self._annotation_file: + gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset( + self._groundtruths) + coco_gt = coco_utils.COCOWrapper( + eval_type=('mask' if self._include_mask else 'box'), + gt_dataset=gt_dataset) + else: + coco_gt = self._coco_gt + coco_predictions = coco_utils.convert_predictions_to_coco_annotations( + self._predictions) + coco_dt = coco_gt.loadRes(predictions=coco_predictions) + image_ids = [ann['image_id'] for ann in coco_predictions] + + coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox') + coco_eval.params.imgIds = image_ids + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + coco_metrics = coco_eval.stats + + if self._include_mask: + mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm') + mcoco_eval.params.imgIds = image_ids + mcoco_eval.evaluate() + mcoco_eval.accumulate() + mcoco_eval.summarize() + if self._mask_eval_class == 'all': + metrics = np.hstack((coco_metrics, mcoco_eval.stats)) + else: + mask_coco_metrics = mcoco_eval.category_stats + val_catg_idx = np.isin(mcoco_eval.params.catIds, + self._eval_categories) + # Gather the valid evaluation of the eval categories. + if np.any(val_catg_idx): + mean_val_metrics = [] + for mid in range(len(self._metric_names) // 2): + mean_val_metrics.append( + np.nanmean(mask_coco_metrics[mid][val_catg_idx])) + + mean_val_metrics = np.array(mean_val_metrics) + else: + mean_val_metrics = np.zeros(len(self._metric_names) // 2) + metrics = np.hstack((coco_metrics, mean_val_metrics)) + else: + metrics = coco_metrics + + # Cleans up the internal variables in order for a fresh eval next time. + self.reset() + + metrics_dict = {} + for i, name in enumerate(self._metric_names): + metrics_dict[name] = metrics[i].astype(np.float32) + return metrics_dict diff --git a/models/official/vision/detection/evaluation/coco_utils.py b/models/official/vision/detection/evaluation/coco_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8155d1fbb89ac143eb7cc03457a6645a5b5ab505 --- /dev/null +++ b/models/official/vision/detection/evaluation/coco_utils.py @@ -0,0 +1,374 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Util functions related to pycocotools and COCO eval.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import json + +from absl import logging +import numpy as np +from PIL import Image +from pycocotools import coco +from pycocotools import mask as mask_api +import six +import tensorflow as tf + +from official.vision.detection.dataloader import tf_example_decoder +from official.vision.detection.utils import box_utils +from official.vision.detection.utils import mask_utils + + +class COCOWrapper(coco.COCO): + """COCO wrapper class. + + This class wraps COCO API object, which provides the following additional + functionalities: + 1. Support string type image id. + 2. Support loading the groundtruth dataset using the external annotation + dictionary. + 3. Support loading the prediction results using the external annotation + dictionary. + """ + + def __init__(self, eval_type='box', annotation_file=None, gt_dataset=None): + """Instantiates a COCO-style API object. + + Args: + eval_type: either 'box' or 'mask'. + annotation_file: a JSON file that stores annotations of the eval dataset. + This is required if `gt_dataset` is not provided. + gt_dataset: the groundtruth eval datatset in COCO API format. + """ + if ((annotation_file and gt_dataset) or + ((not annotation_file) and (not gt_dataset))): + raise ValueError('One and only one of `annotation_file` and `gt_dataset` ' + 'needs to be specified.') + + if eval_type not in ['box', 'mask']: + raise ValueError('The `eval_type` can only be either `box` or `mask`.') + + coco.COCO.__init__(self, annotation_file=annotation_file) + self._eval_type = eval_type + if gt_dataset: + self.dataset = gt_dataset + self.createIndex() + + def loadRes(self, predictions): + """Loads result file and return a result api object. + + Args: + predictions: a list of dictionary each representing an annotation in COCO + format. The required fields are `image_id`, `category_id`, `score`, + `bbox`, `segmentation`. + + Returns: + res: result COCO api object. + + Raises: + ValueError: if the set of image id from predctions is not the subset of + the set of image id of the groundtruth dataset. + """ + res = coco.COCO() + res.dataset['images'] = copy.deepcopy(self.dataset['images']) + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + + image_ids = [ann['image_id'] for ann in predictions] + if set(image_ids) != (set(image_ids) & set(self.getImgIds())): + raise ValueError('Results do not correspond to the current dataset!') + for ann in predictions: + x1, x2, y1, y2 = [ann['bbox'][0], ann['bbox'][0] + ann['bbox'][2], + ann['bbox'][1], ann['bbox'][1] + ann['bbox'][3]] + if self._eval_type == 'box': + ann['area'] = ann['bbox'][2] * ann['bbox'][3] + ann['segmentation'] = [ + [x1, y1, x1, y2, x2, y2, x2, y1]] + elif self._eval_type == 'mask': + ann['area'] = mask_api.area(ann['segmentation']) + + res.dataset['annotations'] = copy.deepcopy(predictions) + res.createIndex() + return res + + +def convert_predictions_to_coco_annotations(predictions): + """Converts a batch of predictions to annotations in COCO format. + + Args: + predictions: a dictionary of lists of numpy arrays including the following + fields. K below denotes the maximum number of instances per image. + Required fields: + - source_id: a list of numpy arrays of int or string of shape + [batch_size]. + - num_detections: a list of numpy arrays of int of shape [batch_size]. + - detection_boxes: a list of numpy arrays of float of shape + [batch_size, K, 4], where coordinates are in the original image + space (not the scaled image space). + - detection_classes: a list of numpy arrays of int of shape + [batch_size, K]. + - detection_scores: a list of numpy arrays of float of shape + [batch_size, K]. + Optional fields: + - detection_masks: a list of numpy arrays of float of shape + [batch_size, K, mask_height, mask_width]. + + Returns: + coco_predictions: prediction in COCO annotation format. + """ + coco_predictions = [] + num_batches = len(predictions['source_id']) + batch_size = predictions['source_id'][0].shape[0] + max_num_detections = predictions['detection_classes'][0].shape[1] + use_outer_box = 'detection_outer_boxes' in predictions + for i in range(num_batches): + predictions['detection_boxes'][i] = box_utils.yxyx_to_xywh( + predictions['detection_boxes'][i]) + if use_outer_box: + predictions['detection_outer_boxes'][i] = box_utils.yxyx_to_xywh( + predictions['detection_outer_boxes'][i]) + mask_boxes = predictions['detection_outer_boxes'] + else: + mask_boxes = predictions['detection_boxes'] + + for j in range(batch_size): + if 'detection_masks' in predictions: + image_masks = mask_utils.paste_instance_masks( + predictions['detection_masks'][i][j], + mask_boxes[i][j], + int(predictions['image_info'][i][j, 0, 0]), + int(predictions['image_info'][i][j, 0, 1])) + binary_masks = (image_masks > 0.0).astype(np.uint8) + encoded_masks = [ + mask_api.encode(np.asfortranarray(binary_mask)) + for binary_mask in list(binary_masks)] + for k in range(max_num_detections): + ann = {} + ann['image_id'] = predictions['source_id'][i][j] + ann['category_id'] = predictions['detection_classes'][i][j, k] + ann['bbox'] = predictions['detection_boxes'][i][j, k] + ann['score'] = predictions['detection_scores'][i][j, k] + if 'detection_masks' in predictions: + ann['segmentation'] = encoded_masks[k] + coco_predictions.append(ann) + + for i, ann in enumerate(coco_predictions): + ann['id'] = i + 1 + + return coco_predictions + + +def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None): + """Converts groundtruths to the dataset in COCO format. + + Args: + groundtruths: a dictionary of numpy arrays including the fields below. + Note that each element in the list represent the number for a single + example without batch dimension. K below denotes the actual number of + instances for each image. + Required fields: + - source_id: a list of numpy arrays of int or string of shape + [batch_size]. + - height: a list of numpy arrays of int of shape [batch_size]. + - width: a list of numpy arrays of int of shape [batch_size]. + - num_detections: a list of numpy arrays of int of shape [batch_size]. + - boxes: a list of numpy arrays of float of shape [batch_size, K, 4], + where coordinates are in the original image space (not the + normalized coordinates). + - classes: a list of numpy arrays of int of shape [batch_size, K]. + Optional fields: + - is_crowds: a list of numpy arrays of int of shape [batch_size, K]. If + th field is absent, it is assumed that this instance is not crowd. + - areas: a list of numy arrays of float of shape [batch_size, K]. If the + field is absent, the area is calculated using either boxes or + masks depending on which one is available. + - masks: a list of numpy arrays of string of shape [batch_size, K], + label_map: (optional) a dictionary that defines items from the category id + to the category name. If `None`, collect the category mappping from the + `groundtruths`. + + Returns: + coco_groundtruths: the groundtruth dataset in COCO format. + """ + source_ids = np.concatenate(groundtruths['source_id'], axis=0) + heights = np.concatenate(groundtruths['height'], axis=0) + widths = np.concatenate(groundtruths['width'], axis=0) + gt_images = [{'id': int(i), 'height': int(h), 'width': int(w)} for i, h, w + in zip(source_ids, heights, widths)] + + gt_annotations = [] + num_batches = len(groundtruths['source_id']) + batch_size = groundtruths['source_id'][0].shape[0] + for i in range(num_batches): + for j in range(batch_size): + num_instances = groundtruths['num_detections'][i][j] + for k in range(num_instances): + ann = {} + ann['image_id'] = int(groundtruths['source_id'][i][j]) + if 'is_crowds' in groundtruths: + ann['iscrowd'] = int(groundtruths['is_crowds'][i][j, k]) + else: + ann['iscrowd'] = 0 + ann['category_id'] = int(groundtruths['classes'][i][j, k]) + boxes = groundtruths['boxes'][i] + ann['bbox'] = [ + float(boxes[j, k, 1]), + float(boxes[j, k, 0]), + float(boxes[j, k, 3] - boxes[j, k, 1]), + float(boxes[j, k, 2] - boxes[j, k, 0])] + if 'areas' in groundtruths: + ann['area'] = float(groundtruths['areas'][i][j, k]) + else: + ann['area'] = float( + (boxes[j, k, 3] - boxes[j, k, 1]) * + (boxes[j, k, 2] - boxes[j, k, 0])) + if 'masks' in groundtruths: + mask = Image.open(six.StringIO(groundtruths['masks'][i][j, k])) + width, height = mask.size + np_mask = ( + np.array(mask.getdata()).reshape(height, width).astype(np.uint8)) + np_mask[np_mask > 0] = 255 + encoded_mask = mask_api.encode(np.asfortranarray(np_mask)) + ann['segmentation'] = encoded_mask + if 'areas' not in groundtruths: + ann['area'] = mask_api.area(encoded_mask) + gt_annotations.append(ann) + + for i, ann in enumerate(gt_annotations): + ann['id'] = i + 1 + + if label_map: + gt_categories = [{'id': i, 'name': label_map[i]} for i in label_map] + else: + category_ids = [gt['category_id'] for gt in gt_annotations] + gt_categories = [{'id': i} for i in set(category_ids)] + + gt_dataset = { + 'images': gt_images, + 'categories': gt_categories, + 'annotations': copy.deepcopy(gt_annotations), + } + return gt_dataset + + +class COCOGroundtruthGenerator(object): + """Generates the groundtruth annotations from a single example.""" + + def __init__(self, file_pattern, num_examples, include_mask): + self._file_pattern = file_pattern + self._num_examples = num_examples + self._include_mask = include_mask + self._dataset_fn = tf.data.TFRecordDataset + + def _parse_single_example(self, example): + """Parses a single serialized tf.Example proto. + + Args: + example: a serialized tf.Example proto string. + + Returns: + A dictionary of groundtruth with the following fields: + source_id: a scalar tensor of int64 representing the image source_id. + height: a scalar tensor of int64 representing the image height. + width: a scalar tensor of int64 representing the image width. + boxes: a float tensor of shape [K, 4], representing the groundtruth + boxes in absolute coordinates with respect to the original image size. + classes: a int64 tensor of shape [K], representing the class labels of + each instances. + is_crowds: a bool tensor of shape [K], indicating whether the instance + is crowd. + areas: a float tensor of shape [K], indicating the area of each + instance. + masks: a string tensor of shape [K], containing the bytes of the png + mask of each instance. + """ + decoder = tf_example_decoder.TfExampleDecoder( + include_mask=self._include_mask) + decoded_tensors = decoder.decode(example) + + image = decoded_tensors['image'] + image_size = tf.shape(image)[0:2] + boxes = box_utils.denormalize_boxes( + decoded_tensors['groundtruth_boxes'], image_size) + groundtruths = { + 'source_id': tf.string_to_number( + decoded_tensors['source_id'], out_type=tf.int64), + 'height': decoded_tensors['height'], + 'width': decoded_tensors['width'], + 'num_detections': tf.shape(decoded_tensors['groundtruth_classes'])[0], + 'boxes': boxes, + 'classes': decoded_tensors['groundtruth_classes'], + 'is_crowds': decoded_tensors['groundtruth_is_crowd'], + 'areas': decoded_tensors['groundtruth_area'], + } + if self._include_mask: + groundtruths.update({ + 'masks': decoded_tensors['groundtruth_instance_masks_png'], + }) + return groundtruths + + def _build_pipeline(self): + """Builds data pipeline to generate groundtruth annotations.""" + dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False) + dataset = dataset.apply( + tf.data.experimental.parallel_interleave( + lambda filename: self._dataset_fn(filename).prefetch(1), + cycle_length=32, + sloppy=False)) + dataset = dataset.map(self._parse_single_example, num_parallel_calls=64) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(1, drop_remainder=False) + return dataset + + def __call__(self): + with tf.Graph().as_default(): + dataset = self._build_pipeline() + groundtruth = dataset.make_one_shot_iterator().get_next() + + with tf.Session() as sess: + for _ in range(self._num_examples): + groundtruth_result = sess.run(groundtruth) + yield groundtruth_result + + +def scan_and_generator_annotation_file(file_pattern, + num_samples, + include_mask, + annotation_file): + """Scans and generate the COCO-style annotation JSON file given a dataset.""" + groundtruth_generator = COCOGroundtruthGenerator( + file_pattern, num_samples, include_mask) + generate_annotation_file(groundtruth_generator, annotation_file) + + +def generate_annotation_file(groundtruth_generator, + annotation_file): + """Generates COCO-style annotation JSON file given a groundtruth generator.""" + groundtruths = {} + logging.info('Loading groundtruth annotations from dataset to memory...') + for groundtruth in groundtruth_generator(): + for k, v in six.iteritems(groundtruth): + if k not in groundtruths: + groundtruths[k] = [v] + else: + groundtruths[k].append(v) + gt_dataset = convert_groundtruths_to_coco_dataset(groundtruths) + + logging.info('Saving groundtruth annotations to the JSON file...') + with tf.io.gfile.GFile(annotation_file, 'w') as f: + f.write(json.dumps(gt_dataset)) + logging.info('Done saving the JSON file...') diff --git a/models/official/vision/detection/evaluation/factory.py b/models/official/vision/detection/evaluation/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..4d44bf177071a97b663b41410a05d59d59f04456 --- /dev/null +++ b/models/official/vision/detection/evaluation/factory.py @@ -0,0 +1,40 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluator factory.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from official.vision.detection.evaluation import coco_evaluator + + +def evaluator_generator(params): + """Generator function for various evaluators.""" + if params.type == 'box': + evaluator = coco_evaluator.COCOEvaluator( + annotation_file=params.val_json_file, include_mask=False) + elif params.type == 'box_and_mask': + evaluator = coco_evaluator.COCOEvaluator( + annotation_file=params.val_json_file, include_mask=True) + elif params.type == 'shapemask_box_and_mask': + evaluator = coco_evaluator.ShapeMaskCOCOEvaluator( + mask_eval_class=params.mask_eval_class, + annotation_file=params.val_json_file, include_mask=True) + + else: + raise ValueError('Evaluator %s is not supported.' % params.type) + + return coco_evaluator.MetricWrapper(evaluator) diff --git a/models/official/vision/detection/executor/__init__.py b/models/official/vision/detection/executor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/executor/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/executor/detection_executor.py b/models/official/vision/detection/executor/detection_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..26ff028cf67d6df37e5a0af31bc2e54844231fcd --- /dev/null +++ b/models/official/vision/detection/executor/detection_executor.py @@ -0,0 +1,160 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An executor class for running model on TensorFlow 2.0.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging + +import tensorflow as tf +from official.modeling.training import distributed_executor as executor +from official.vision.detection.utils.object_detection import visualization_utils + + +class DetectionDistributedExecutor(executor.DistributedExecutor): + """Detection specific customer training loop executor. + + Subclasses the DistributedExecutor and adds support for numpy based metrics. + """ + + def __init__(self, + predict_post_process_fn=None, + trainable_variables_filter=None, + **kwargs): + super(DetectionDistributedExecutor, self).__init__(**kwargs) + if predict_post_process_fn: + assert callable(predict_post_process_fn) + if trainable_variables_filter: + assert callable(trainable_variables_filter) + self._predict_post_process_fn = predict_post_process_fn + self._trainable_variables_filter = trainable_variables_filter + self.eval_steps = tf.Variable( + 0, + trainable=False, + dtype=tf.int32, + synchronization=tf.VariableSynchronization.ON_READ, + aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, + shape=[]) + + def _create_replicated_step(self, + strategy, + model, + loss_fn, + optimizer, + metric=None): + trainable_variables = model.trainable_variables + if self._trainable_variables_filter: + trainable_variables = self._trainable_variables_filter( + trainable_variables) + logging.info('Filter trainable variables from %d to %d', + len(model.trainable_variables), len(trainable_variables)) + _update_state = lambda labels, outputs: None + if isinstance(metric, tf.keras.metrics.Metric): + _update_state = lambda labels, outputs: metric.update_state( + labels, outputs) + else: + logging.error('Detection: train metric is not an instance of ' + 'tf.keras.metrics.Metric.') + + def _replicated_step(inputs): + """Replicated training step.""" + inputs, labels = inputs + + with tf.GradientTape() as tape: + outputs = model(inputs, training=True) + all_losses = loss_fn(labels, outputs) + losses = {} + for k, v in all_losses.items(): + losses[k] = tf.reduce_mean(v) + per_replica_loss = losses['total_loss'] / strategy.num_replicas_in_sync + _update_state(labels, outputs) + + grads = tape.gradient(per_replica_loss, trainable_variables) + optimizer.apply_gradients(zip(grads, trainable_variables)) + return losses + + return _replicated_step + + def _create_test_step(self, strategy, model, metric): + """Creates a distributed test step.""" + + @tf.function + def test_step(iterator, eval_steps): + """Calculates evaluation metrics on distributed devices.""" + + def _test_step_fn(inputs, eval_steps): + """Replicated accuracy calculation.""" + inputs, labels = inputs + model_outputs = model(inputs, training=False) + if self._predict_post_process_fn: + labels, prediction_outputs = self._predict_post_process_fn( + labels, model_outputs) + num_remaining_visualizations = ( + self._params.eval.num_images_to_visualize - eval_steps) + # If there are remaining number of visualizations that needs to be + # done, add next batch outputs for visualization. + # + # TODO(hongjunchoi): Once dynamic slicing is supported on TPU, only + # write correct slice of outputs to summary file. + if num_remaining_visualizations > 0: + visualization_utils.visualize_images_with_bounding_boxes( + inputs, prediction_outputs['detection_boxes'], + self.global_train_step, self.eval_summary_writer) + + return labels, prediction_outputs + + labels, outputs = strategy.run( + _test_step_fn, args=( + next(iterator), + eval_steps, + )) + outputs = tf.nest.map_structure(strategy.experimental_local_results, + outputs) + labels = tf.nest.map_structure(strategy.experimental_local_results, + labels) + + eval_steps.assign_add(self._params.eval.batch_size) + return labels, outputs + + return test_step + + def _run_evaluation(self, test_step, current_training_step, metric, + test_iterator): + """Runs validation steps and aggregate metrics.""" + self.eval_steps.assign(0) + if not test_iterator or not metric: + logging.warning( + 'Both test_iterator (%s) and metrics (%s) must not be None.', + test_iterator, metric) + return None + logging.info('Running evaluation after step: %s.', current_training_step) + while True: + try: + labels, outputs = test_step(test_iterator, self.eval_steps) + if metric: + metric.update_state(labels, outputs) + except (StopIteration, tf.errors.OutOfRangeError): + break + + metric_result = metric.result() + if isinstance(metric, tf.keras.metrics.Metric): + metric_result = tf.nest.map_structure(lambda x: x.numpy().astype(float), + metric_result) + logging.info('Step: [%d] Validation metric = %s', current_training_step, + metric_result) + return metric_result diff --git a/models/official/vision/detection/main.py b/models/official/vision/detection/main.py new file mode 100644 index 0000000000000000000000000000000000000000..542be3a1dcc73f82719af2d60dc9abd210787931 --- /dev/null +++ b/models/official/vision/detection/main.py @@ -0,0 +1,271 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Main function to train various object detection models.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import functools +import pprint + +# pylint: disable=g-bad-import-order +import tensorflow as tf + +from absl import app +from absl import flags +from absl import logging +# pylint: enable=g-bad-import-order + +from official.modeling.hyperparams import params_dict +from official.modeling.training import distributed_executor as executor +from official.utils import hyperparams_flags +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.vision.detection.configs import factory as config_factory +from official.vision.detection.dataloader import input_reader +from official.vision.detection.dataloader import mode_keys as ModeKeys +from official.vision.detection.executor.detection_executor import DetectionDistributedExecutor +from official.vision.detection.modeling import factory as model_factory + +hyperparams_flags.initialize_common_flags() +flags_core.define_log_steps() + +flags.DEFINE_bool('enable_xla', default=False, help='Enable XLA for GPU') + +flags.DEFINE_string( + 'mode', default='train', help='Mode to run: `train` or `eval`.') + +flags.DEFINE_string( + 'model', default='retinanet', + help='Model to run: `retinanet`, `mask_rcnn` or `shapemask`.') + +flags.DEFINE_string('training_file_pattern', None, + 'Location of the train data.') + +flags.DEFINE_string('eval_file_pattern', None, 'Location of ther eval data') + +flags.DEFINE_string( + 'checkpoint_path', None, + 'The checkpoint path to eval. Only used in eval_once mode.') + +FLAGS = flags.FLAGS + + +def run_executor(params, + mode, + checkpoint_path=None, + train_input_fn=None, + eval_input_fn=None, + callbacks=None, + prebuilt_strategy=None): + """Runs the object detection model on distribution strategy defined by the user.""" + + if params.architecture.use_bfloat16: + policy = tf.compat.v2.keras.mixed_precision.experimental.Policy( + 'mixed_bfloat16') + tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy) + + model_builder = model_factory.model_generator(params) + + if prebuilt_strategy is not None: + strategy = prebuilt_strategy + else: + strategy_config = params.strategy_config + distribution_utils.configure_cluster(strategy_config.worker_hosts, + strategy_config.task_index) + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=params.strategy_type, + num_gpus=strategy_config.num_gpus, + all_reduce_alg=strategy_config.all_reduce_alg, + num_packs=strategy_config.num_packs, + tpu_address=strategy_config.tpu) + + num_workers = int(strategy.num_replicas_in_sync + 7) // 8 + is_multi_host = (int(num_workers) >= 2) + + if mode == 'train': + + def _model_fn(params): + return model_builder.build_model(params, mode=ModeKeys.TRAIN) + + logging.info( + 'Train num_replicas_in_sync %d num_workers %d is_multi_host %s', + strategy.num_replicas_in_sync, num_workers, is_multi_host) + + dist_executor = DetectionDistributedExecutor( + strategy=strategy, + params=params, + model_fn=_model_fn, + loss_fn=model_builder.build_loss_fn, + is_multi_host=is_multi_host, + predict_post_process_fn=model_builder.post_processing, + trainable_variables_filter=model_builder + .make_filter_trainable_variables_fn()) + + if is_multi_host: + train_input_fn = functools.partial( + train_input_fn, + batch_size=params.train.batch_size // strategy.num_replicas_in_sync) + + return dist_executor.train( + train_input_fn=train_input_fn, + model_dir=params.model_dir, + iterations_per_loop=params.train.iterations_per_loop, + total_steps=params.train.total_steps, + init_checkpoint=model_builder.make_restore_checkpoint_fn(), + custom_callbacks=callbacks, + save_config=True) + elif mode == 'eval' or mode == 'eval_once': + + def _model_fn(params): + return model_builder.build_model(params, mode=ModeKeys.PREDICT_WITH_GT) + + logging.info('Eval num_replicas_in_sync %d num_workers %d is_multi_host %s', + strategy.num_replicas_in_sync, num_workers, is_multi_host) + + if is_multi_host: + eval_input_fn = functools.partial( + eval_input_fn, + batch_size=params.eval.batch_size // strategy.num_replicas_in_sync) + + dist_executor = DetectionDistributedExecutor( + strategy=strategy, + params=params, + model_fn=_model_fn, + loss_fn=model_builder.build_loss_fn, + is_multi_host=is_multi_host, + predict_post_process_fn=model_builder.post_processing, + trainable_variables_filter=model_builder + .make_filter_trainable_variables_fn()) + + if mode == 'eval': + results = dist_executor.evaluate_from_model_dir( + model_dir=params.model_dir, + eval_input_fn=eval_input_fn, + eval_metric_fn=model_builder.eval_metrics, + eval_timeout=params.eval.eval_timeout, + min_eval_interval=params.eval.min_eval_interval, + total_steps=params.train.total_steps) + else: + # Run evaluation once for a single checkpoint. + if not checkpoint_path: + raise ValueError('checkpoint_path cannot be empty.') + if tf.io.gfile.isdir(checkpoint_path): + checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) + summary_writer = executor.SummaryWriter(params.model_dir, 'eval') + results, _ = dist_executor.evaluate_checkpoint( + checkpoint_path=checkpoint_path, + eval_input_fn=eval_input_fn, + eval_metric_fn=model_builder.eval_metrics, + summary_writer=summary_writer) + for k, v in results.items(): + logging.info('Final eval metric %s: %f', k, v) + return results + else: + raise ValueError('Mode not found: %s.' % mode) + + +def run(callbacks=None): + keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) + + params = config_factory.config_generator(FLAGS.model) + + params = params_dict.override_params_dict( + params, FLAGS.config_file, is_strict=True) + + params = params_dict.override_params_dict( + params, FLAGS.params_override, is_strict=True) + params.override( + { + 'strategy_type': FLAGS.strategy_type, + 'model_dir': FLAGS.model_dir, + 'strategy_config': executor.strategy_flags_dict(), + }, + is_strict=False) + + # Make sure use_tpu and strategy_type are in sync. + params.use_tpu = (params.strategy_type == 'tpu') + + if not params.use_tpu: + params.override({ + 'architecture': { + 'use_bfloat16': False, + }, + 'norm_activation': { + 'use_sync_bn': False, + }, + }, is_strict=True) + + params.validate() + params.lock() + pp = pprint.PrettyPrinter() + params_str = pp.pformat(params.as_dict()) + logging.info('Model Parameters: %s', params_str) + + train_input_fn = None + eval_input_fn = None + training_file_pattern = FLAGS.training_file_pattern or params.train.train_file_pattern + eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern + if not training_file_pattern and not eval_file_pattern: + raise ValueError('Must provide at least one of training_file_pattern and ' + 'eval_file_pattern.') + + if training_file_pattern: + # Use global batch size for single host. + train_input_fn = input_reader.InputFn( + file_pattern=training_file_pattern, + params=params, + mode=input_reader.ModeKeys.TRAIN, + batch_size=params.train.batch_size) + + if eval_file_pattern: + eval_input_fn = input_reader.InputFn( + file_pattern=eval_file_pattern, + params=params, + mode=input_reader.ModeKeys.PREDICT_WITH_GT, + batch_size=params.eval.batch_size, + num_examples=params.eval.eval_samples) + + if callbacks is None: + callbacks = [] + + if FLAGS.log_steps: + callbacks.append( + keras_utils.TimeHistory( + batch_size=params.train.batch_size, + log_steps=FLAGS.log_steps, + )) + + return run_executor( + params, + FLAGS.mode, + checkpoint_path=FLAGS.checkpoint_path, + train_input_fn=train_input_fn, + eval_input_fn=eval_input_fn, + callbacks=callbacks) + + +def main(argv): + del argv # Unused. + + run() + + +if __name__ == '__main__': + tf.config.set_soft_device_placement(True) + app.run(main) diff --git a/models/official/vision/detection/modeling/__init__.py b/models/official/vision/detection/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/modeling/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/modeling/architecture/__init__.py b/models/official/vision/detection/modeling/architecture/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/modeling/architecture/factory.py b/models/official/vision/detection/modeling/architecture/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5647d6fb83fbd7c404a4573ff247acb8999b8c --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/factory.py @@ -0,0 +1,163 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model architecture factory.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from official.vision.detection.modeling.architecture import fpn +from official.vision.detection.modeling.architecture import heads +from official.vision.detection.modeling.architecture import identity +from official.vision.detection.modeling.architecture import nn_ops +from official.vision.detection.modeling.architecture import resnet + + +def norm_activation_generator(params): + return nn_ops.norm_activation_builder( + momentum=params.batch_norm_momentum, + epsilon=params.batch_norm_epsilon, + trainable=params.batch_norm_trainable, + activation=params.activation) + + +def backbone_generator(params): + """Generator function for various backbone models.""" + if params.architecture.backbone == 'resnet': + resnet_params = params.resnet + backbone_fn = resnet.Resnet( + resnet_depth=resnet_params.resnet_depth, + activation=params.norm_activation.activation, + norm_activation=norm_activation_generator( + params.norm_activation)) + else: + raise ValueError('Backbone model `{}` is not supported.' + .format(params.architecture.backbone)) + + return backbone_fn + + +def multilevel_features_generator(params): + """Generator function for various FPN models.""" + if params.architecture.multilevel_features == 'fpn': + fpn_params = params.fpn + fpn_fn = fpn.Fpn( + min_level=params.architecture.min_level, + max_level=params.architecture.max_level, + fpn_feat_dims=fpn_params.fpn_feat_dims, + use_separable_conv=fpn_params.use_separable_conv, + activation=params.norm_activation.activation, + use_batch_norm=fpn_params.use_batch_norm, + norm_activation=norm_activation_generator( + params.norm_activation)) + elif params.architecture.multilevel_features == 'identity': + fpn_fn = identity.Identity() + else: + raise ValueError('The multi-level feature model `{}` is not supported.' + .format(params.architecture.multilevel_features)) + return fpn_fn + + +def retinanet_head_generator(params): + """Generator function for RetinaNet head architecture.""" + head_params = params.retinanet_head + return heads.RetinanetHead( + params.architecture.min_level, + params.architecture.max_level, + params.architecture.num_classes, + head_params.anchors_per_location, + head_params.num_convs, + head_params.num_filters, + head_params.use_separable_conv, + norm_activation=norm_activation_generator(params.norm_activation)) + + +def rpn_head_generator(params): + """Generator function for RPN head architecture.""" + head_params = params.rpn_head + return heads.RpnHead( + params.architecture.min_level, + params.architecture.max_level, + head_params.anchors_per_location, + head_params.num_convs, + head_params.num_filters, + head_params.use_separable_conv, + params.norm_activation.activation, + head_params.use_batch_norm, + norm_activation=norm_activation_generator(params.norm_activation)) + + +def fast_rcnn_head_generator(params): + """Generator function for Fast R-CNN head architecture.""" + head_params = params.frcnn_head + return heads.FastrcnnHead( + params.architecture.num_classes, + head_params.num_convs, + head_params.num_filters, + head_params.use_separable_conv, + head_params.num_fcs, + head_params.fc_dims, + params.norm_activation.activation, + head_params.use_batch_norm, + norm_activation=norm_activation_generator(params.norm_activation)) + + +def mask_rcnn_head_generator(params): + """Generator function for Mask R-CNN head architecture.""" + head_params = params.mrcnn_head + return heads.MaskrcnnHead( + params.architecture.num_classes, + params.architecture.mask_target_size, + head_params.num_convs, + head_params.num_filters, + head_params.use_separable_conv, + params.norm_activation.activation, + head_params.use_batch_norm, + norm_activation=norm_activation_generator(params.norm_activation)) + + +def shapeprior_head_generator(params): + """Generator function for shape prior head architecture.""" + head_params = params.shapemask_head + return heads.ShapemaskPriorHead( + params.architecture.num_classes, + head_params.num_downsample_channels, + head_params.mask_crop_size, + head_params.use_category_for_mask, + head_params.shape_prior_path) + + +def coarsemask_head_generator(params): + """Generator function for ShapeMask coarse mask head architecture.""" + head_params = params.shapemask_head + return heads.ShapemaskCoarsemaskHead( + params.architecture.num_classes, + head_params.num_downsample_channels, + head_params.mask_crop_size, + head_params.use_category_for_mask, + head_params.num_convs, + norm_activation=norm_activation_generator(params.norm_activation)) + + +def finemask_head_generator(params): + """Generator function for Shapemask fine mask head architecture.""" + head_params = params.shapemask_head + return heads.ShapemaskFinemaskHead( + params.architecture.num_classes, + head_params.num_downsample_channels, + head_params.mask_crop_size, + head_params.use_category_for_mask, + head_params.num_convs, + head_params.upsample_factor) diff --git a/models/official/vision/detection/modeling/architecture/fpn.py b/models/official/vision/detection/modeling/architecture/fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..b968dc2e152eb66e2df7ca7673b506c123b59d0f --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/fpn.py @@ -0,0 +1,151 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Feature Pyramid Networks. + +Feature Pyramid Networks were proposed in: +[1] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, + , and Serge Belongie + Feature Pyramid Networks for Object Detection. CVPR 2017. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import tensorflow as tf + +from tensorflow.python.keras import backend +from official.vision.detection.modeling.architecture import nn_ops +from official.vision.detection.ops import spatial_transform_ops + + +class Fpn(object): + """Feature pyramid networks.""" + + def __init__(self, + min_level=3, + max_level=7, + fpn_feat_dims=256, + use_separable_conv=False, + activation='relu', + use_batch_norm=True, + norm_activation=nn_ops.norm_activation_builder( + activation='relu')): + """FPN initialization function. + + Args: + min_level: `int` minimum level in FPN output feature maps. + max_level: `int` maximum level in FPN output feature maps. + fpn_feat_dims: `int` number of filters in FPN layers. + use_separable_conv: `bool`, if True use separable convolution for + convolution in FPN layers. + use_batch_norm: 'bool', indicating whether batchnorm layers are added. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + """ + self._min_level = min_level + self._max_level = max_level + self._fpn_feat_dims = fpn_feat_dims + if use_separable_conv: + self._conv2d_op = functools.partial( + tf.keras.layers.SeparableConv2D, depth_multiplier=1) + else: + self._conv2d_op = tf.keras.layers.Conv2D + if activation == 'relu': + self._activation_op = tf.nn.relu + elif activation == 'swish': + self._activation_op = tf.nn.swish + else: + raise ValueError('Unsupported activation `{}`.'.format(activation)) + self._use_batch_norm = use_batch_norm + self._norm_activation = norm_activation + + self._norm_activations = {} + self._lateral_conv2d_op = {} + self._post_hoc_conv2d_op = {} + self._coarse_conv2d_op = {} + for level in range(self._min_level, self._max_level + 1): + if self._use_batch_norm: + self._norm_activations[level] = norm_activation( + use_activation=False, name='p%d-bn' % level) + self._lateral_conv2d_op[level] = self._conv2d_op( + filters=self._fpn_feat_dims, + kernel_size=(1, 1), + padding='same', + name='l%d' % level) + self._post_hoc_conv2d_op[level] = self._conv2d_op( + filters=self._fpn_feat_dims, + strides=(1, 1), + kernel_size=(3, 3), + padding='same', + name='post_hoc_d%d' % level) + self._coarse_conv2d_op[level] = self._conv2d_op( + filters=self._fpn_feat_dims, + strides=(2, 2), + kernel_size=(3, 3), + padding='same', + name='p%d' % level) + + def __call__(self, multilevel_features, is_training=None): + """Returns the FPN features for a given multilevel features. + + Args: + multilevel_features: a `dict` containing `int` keys for continuous feature + levels, e.g., [2, 3, 4, 5]. The values are corresponding features with + shape [batch_size, height_l, width_l, num_filters]. + is_training: `bool` if True, the model is in training mode. + + Returns: + a `dict` containing `int` keys for continuous feature levels + [min_level, min_level + 1, ..., max_level]. The values are corresponding + FPN features with shape [batch_size, height_l, width_l, fpn_feat_dims]. + """ + input_levels = list(multilevel_features.keys()) + if min(input_levels) > self._min_level: + raise ValueError( + 'The minimum backbone level %d should be '%(min(input_levels)) + + 'less or equal to FPN minimum level %d.:'%(self._min_level)) + backbone_max_level = min(max(input_levels), self._max_level) + with backend.get_graph().as_default(), tf.name_scope('fpn'): + # Adds lateral connections. + feats_lateral = {} + for level in range(self._min_level, backbone_max_level + 1): + feats_lateral[level] = self._lateral_conv2d_op[level]( + multilevel_features[level]) + + # Adds top-down path. + feats = {backbone_max_level: feats_lateral[backbone_max_level]} + for level in range(backbone_max_level - 1, self._min_level - 1, -1): + feats[level] = spatial_transform_ops.nearest_upsampling( + feats[level + 1], 2) + feats_lateral[level] + + # Adds post-hoc 3x3 convolution kernel. + for level in range(self._min_level, backbone_max_level + 1): + feats[level] = self._post_hoc_conv2d_op[level](feats[level]) + + # Adds coarser FPN levels introduced for RetinaNet. + for level in range(backbone_max_level + 1, self._max_level + 1): + feats_in = feats[level - 1] + if level > backbone_max_level + 1: + feats_in = self._activation_op(feats_in) + feats[level] = self._coarse_conv2d_op[level](feats_in) + if self._use_batch_norm: + # Adds batch_norm layer. + for level in range(self._min_level, self._max_level + 1): + feats[level] = self._norm_activations[level]( + feats[level], is_training=is_training) + return feats diff --git a/models/official/vision/detection/modeling/architecture/heads.py b/models/official/vision/detection/modeling/architecture/heads.py new file mode 100644 index 0000000000000000000000000000000000000000..7f6954aecbbef8e8807345e643555ba222b0e1b9 --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/heads.py @@ -0,0 +1,999 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Classes to build various prediction heads in all supported models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np +import tensorflow as tf +from tensorflow.python.keras import backend +from official.vision.detection.modeling.architecture import nn_ops +from official.vision.detection.ops import spatial_transform_ops + + +class RpnHead(tf.keras.layers.Layer): + """Region Proposal Network head.""" + + def __init__(self, + min_level, + max_level, + anchors_per_location, + num_convs=2, + num_filters=256, + use_separable_conv=False, + activation='relu', + use_batch_norm=True, + norm_activation=nn_ops.norm_activation_builder( + activation='relu')): + """Initialize params to build Region Proposal Network head. + + Args: + min_level: `int` number of minimum feature level. + max_level: `int` number of maximum feature level. + anchors_per_location: `int` number of number of anchors per pixel + location. + num_convs: `int` number that represents the number of the intermediate + conv layers before the prediction. + num_filters: `int` number that represents the number of filters of the + intermediate conv layers. + use_separable_conv: `bool`, indicating whether the separable conv layers + is used. + activation: activation function. Support 'relu' and 'swish'. + use_batch_norm: 'bool', indicating whether batchnorm layers are added. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + """ + self._min_level = min_level + self._max_level = max_level + self._anchors_per_location = anchors_per_location + if activation == 'relu': + self._activation_op = tf.nn.relu + elif activation == 'swish': + self._activation_op = tf.nn.swish + else: + raise ValueError('Unsupported activation `{}`.'.format(activation)) + self._use_batch_norm = use_batch_norm + + if use_separable_conv: + self._conv2d_op = functools.partial( + tf.keras.layers.SeparableConv2D, + depth_multiplier=1, + bias_initializer=tf.zeros_initializer()) + else: + self._conv2d_op = functools.partial( + tf.keras.layers.Conv2D, + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), + bias_initializer=tf.zeros_initializer()) + + self._rpn_conv = self._conv2d_op( + num_filters, + kernel_size=(3, 3), + strides=(1, 1), + activation=(None if self._use_batch_norm else self._activation_op), + padding='same', + name='rpn') + self._rpn_class_conv = self._conv2d_op( + anchors_per_location, + kernel_size=(1, 1), + strides=(1, 1), + padding='valid', + name='rpn-class') + self._rpn_box_conv = self._conv2d_op( + 4 * anchors_per_location, + kernel_size=(1, 1), + strides=(1, 1), + padding='valid', + name='rpn-box') + + self._norm_activations = {} + if self._use_batch_norm: + for level in range(self._min_level, self._max_level + 1): + self._norm_activations[level] = norm_activation(name='rpn-l%d-bn' % + level) + + def _shared_rpn_heads(self, features, anchors_per_location, level, + is_training): + """Shared RPN heads.""" + features = self._rpn_conv(features) + if self._use_batch_norm: + # The batch normalization layers are not shared between levels. + features = self._norm_activations[level]( + features, is_training=is_training) + # Proposal classification scores + scores = self._rpn_class_conv(features) + # Proposal bbox regression deltas + bboxes = self._rpn_box_conv(features) + + return scores, bboxes + + def __call__(self, features, is_training=None): + + scores_outputs = {} + box_outputs = {} + + with backend.get_graph().as_default(), tf.name_scope('rpn_head'): + for level in range(self._min_level, self._max_level + 1): + scores_output, box_output = self._shared_rpn_heads( + features[level], self._anchors_per_location, level, is_training) + scores_outputs[level] = scores_output + box_outputs[level] = box_output + return scores_outputs, box_outputs + + +class FastrcnnHead(tf.keras.layers.Layer): + """Fast R-CNN box head.""" + + def __init__(self, + num_classes, + num_convs=0, + num_filters=256, + use_separable_conv=False, + num_fcs=2, + fc_dims=1024, + activation='relu', + use_batch_norm=True, + norm_activation=nn_ops.norm_activation_builder( + activation='relu')): + """Initialize params to build Fast R-CNN box head. + + Args: + num_classes: a integer for the number of classes. + num_convs: `int` number that represents the number of the intermediate + conv layers before the FC layers. + num_filters: `int` number that represents the number of filters of the + intermediate conv layers. + use_separable_conv: `bool`, indicating whether the separable conv layers + is used. + num_fcs: `int` number that represents the number of FC layers before the + predictions. + fc_dims: `int` number that represents the number of dimension of the FC + layers. + activation: activation function. Support 'relu' and 'swish'. + use_batch_norm: 'bool', indicating whether batchnorm layers are added. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + """ + self._num_classes = num_classes + + self._num_convs = num_convs + self._num_filters = num_filters + if use_separable_conv: + self._conv2d_op = functools.partial( + tf.keras.layers.SeparableConv2D, + depth_multiplier=1, + bias_initializer=tf.zeros_initializer()) + else: + self._conv2d_op = functools.partial( + tf.keras.layers.Conv2D, + kernel_initializer=tf.keras.initializers.VarianceScaling( + scale=2, mode='fan_out', distribution='untruncated_normal'), + bias_initializer=tf.zeros_initializer()) + + self._num_fcs = num_fcs + self._fc_dims = fc_dims + if activation == 'relu': + self._activation_op = tf.nn.relu + elif activation == 'swish': + self._activation_op = tf.nn.swish + else: + raise ValueError('Unsupported activation `{}`.'.format(activation)) + self._use_batch_norm = use_batch_norm + self._norm_activation = norm_activation + + self._conv_ops = [] + self._conv_bn_ops = [] + for i in range(self._num_convs): + self._conv_ops.append( + self._conv2d_op( + self._num_filters, + kernel_size=(3, 3), + strides=(1, 1), + padding='same', + dilation_rate=(1, 1), + activation=(None if self._use_batch_norm else self._activation_op), + name='conv_{}'.format(i))) + if self._use_batch_norm: + self._conv_bn_ops.append(self._norm_activation()) + + self._fc_ops = [] + self._fc_bn_ops = [] + for i in range(self._num_fcs): + self._fc_ops.append( + tf.keras.layers.Dense( + units=self._fc_dims, + activation=(None if self._use_batch_norm else self._activation_op), + name='fc{}'.format(i))) + if self._use_batch_norm: + self._fc_bn_ops.append(self._norm_activation(fused=False)) + + self._class_predict = tf.keras.layers.Dense( + self._num_classes, + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), + bias_initializer=tf.zeros_initializer(), + name='class-predict') + self._box_predict = tf.keras.layers.Dense( + self._num_classes * 4, + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001), + bias_initializer=tf.zeros_initializer(), + name='box-predict') + + def __call__(self, roi_features, is_training=None): + """Box and class branches for the Mask-RCNN model. + + Args: + roi_features: A ROI feature tensor of shape + [batch_size, num_rois, height_l, width_l, num_filters]. + is_training: `boolean`, if True if model is in training mode. + + Returns: + class_outputs: a tensor with a shape of + [batch_size, num_rois, num_classes], representing the class predictions. + box_outputs: a tensor with a shape of + [batch_size, num_rois, num_classes * 4], representing the box + predictions. + """ + + with backend.get_graph().as_default(), tf.name_scope('fast_rcnn_head'): + # reshape inputs beofre FC. + _, num_rois, height, width, filters = roi_features.get_shape().as_list() + + net = tf.reshape(roi_features, [-1, height, width, filters]) + for i in range(self._num_convs): + net = self._conv_ops[i](net) + if self._use_batch_norm: + net = self._conv_bn_ops[i](net, is_training=is_training) + + filters = self._num_filters if self._num_convs > 0 else filters + net = tf.reshape(net, [-1, num_rois, height * width * filters]) + + for i in range(self._num_fcs): + net = self._fc_ops[i](net) + if self._use_batch_norm: + net = self._fc_bn_ops[i](net, is_training=is_training) + + class_outputs = self._class_predict(net) + box_outputs = self._box_predict(net) + return class_outputs, box_outputs + + +class MaskrcnnHead(tf.keras.layers.Layer): + """Mask R-CNN head.""" + + def __init__(self, + num_classes, + mask_target_size, + num_convs=4, + num_filters=256, + use_separable_conv=False, + activation='relu', + use_batch_norm=True, + norm_activation=nn_ops.norm_activation_builder( + activation='relu')): + """Initialize params to build Fast R-CNN head. + + Args: + num_classes: a integer for the number of classes. + mask_target_size: a integer that is the resolution of masks. + num_convs: `int` number that represents the number of the intermediate + conv layers before the prediction. + num_filters: `int` number that represents the number of filters of the + intermediate conv layers. + use_separable_conv: `bool`, indicating whether the separable conv layers + is used. + activation: activation function. Support 'relu' and 'swish'. + use_batch_norm: 'bool', indicating whether batchnorm layers are added. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + """ + self._num_classes = num_classes + self._mask_target_size = mask_target_size + + self._num_convs = num_convs + self._num_filters = num_filters + if use_separable_conv: + self._conv2d_op = functools.partial( + tf.keras.layers.SeparableConv2D, + depth_multiplier=1, + bias_initializer=tf.zeros_initializer()) + else: + self._conv2d_op = functools.partial( + tf.keras.layers.Conv2D, + kernel_initializer=tf.keras.initializers.VarianceScaling( + scale=2, mode='fan_out', distribution='untruncated_normal'), + bias_initializer=tf.zeros_initializer()) + if activation == 'relu': + self._activation_op = tf.nn.relu + elif activation == 'swish': + self._activation_op = tf.nn.swish + else: + raise ValueError('Unsupported activation `{}`.'.format(activation)) + self._use_batch_norm = use_batch_norm + self._norm_activation = norm_activation + self._conv2d_ops = [] + for i in range(self._num_convs): + self._conv2d_ops.append( + self._conv2d_op( + self._num_filters, + kernel_size=(3, 3), + strides=(1, 1), + padding='same', + dilation_rate=(1, 1), + activation=(None if self._use_batch_norm else self._activation_op), + name='mask-conv-l%d' % i)) + self._mask_conv_transpose = tf.keras.layers.Conv2DTranspose( + self._num_filters, + kernel_size=(2, 2), + strides=(2, 2), + padding='valid', + activation=(None if self._use_batch_norm else self._activation_op), + kernel_initializer=tf.keras.initializers.VarianceScaling( + scale=2, mode='fan_out', distribution='untruncated_normal'), + bias_initializer=tf.zeros_initializer(), + name='conv5-mask') + + def __call__(self, roi_features, class_indices, is_training=None): + """Mask branch for the Mask-RCNN model. + + Args: + roi_features: A ROI feature tensor of shape + [batch_size, num_rois, height_l, width_l, num_filters]. + class_indices: a Tensor of shape [batch_size, num_rois], indicating + which class the ROI is. + is_training: `boolean`, if True if model is in training mode. + + Returns: + mask_outputs: a tensor with a shape of + [batch_size, num_masks, mask_height, mask_width, num_classes], + representing the mask predictions. + fg_gather_indices: a tensor with a shape of [batch_size, num_masks, 2], + representing the fg mask targets. + Raises: + ValueError: If boxes is not a rank-3 tensor or the last dimension of + boxes is not 4. + """ + + with backend.get_graph().as_default(): + with tf.name_scope('mask_head'): + _, num_rois, height, width, filters = roi_features.get_shape().as_list() + net = tf.reshape(roi_features, [-1, height, width, filters]) + + for i in range(self._num_convs): + net = self._conv2d_ops[i](net) + if self._use_batch_norm: + net = self._norm_activation()(net, is_training=is_training) + + net = self._mask_conv_transpose(net) + if self._use_batch_norm: + net = self._norm_activation()(net, is_training=is_training) + + mask_outputs = self._conv2d_op( + self._num_classes, + kernel_size=(1, 1), + strides=(1, 1), + padding='valid', + name='mask_fcn_logits')( + net) + mask_outputs = tf.reshape(mask_outputs, [ + -1, num_rois, self._mask_target_size, self._mask_target_size, + self._num_classes + ]) + + with tf.name_scope('masks_post_processing'): + # TODO(pengchong): Figure out the way not to use the static inferred + # batch size. + batch_size, num_masks = class_indices.get_shape().as_list() + mask_outputs = tf.transpose(a=mask_outputs, perm=[0, 1, 4, 2, 3]) + # Contructs indices for gather. + batch_indices = tf.tile( + tf.expand_dims(tf.range(batch_size), axis=1), [1, num_masks]) + mask_indices = tf.tile( + tf.expand_dims(tf.range(num_masks), axis=0), [batch_size, 1]) + gather_indices = tf.stack( + [batch_indices, mask_indices, class_indices], axis=2) + mask_outputs = tf.gather_nd(mask_outputs, gather_indices) + return mask_outputs + + +class RetinanetHead(object): + """RetinaNet head.""" + + def __init__(self, + min_level, + max_level, + num_classes, + anchors_per_location, + num_convs=4, + num_filters=256, + use_separable_conv=False, + norm_activation=nn_ops.norm_activation_builder( + activation='relu')): + """Initialize params to build RetinaNet head. + + Args: + min_level: `int` number of minimum feature level. + max_level: `int` number of maximum feature level. + num_classes: `int` number of classification categories. + anchors_per_location: `int` number of anchors per pixel location. + num_convs: `int` number of stacked convolution before the last prediction + layer. + num_filters: `int` number of filters used in the head architecture. + use_separable_conv: `bool` to indicate whether to use separable + convoluation. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + """ + self._min_level = min_level + self._max_level = max_level + + self._num_classes = num_classes + self._anchors_per_location = anchors_per_location + + self._num_convs = num_convs + self._num_filters = num_filters + self._use_separable_conv = use_separable_conv + with tf.name_scope('class_net') as scope_name: + self._class_name_scope = tf.name_scope(scope_name) + with tf.name_scope('box_net') as scope_name: + self._box_name_scope = tf.name_scope(scope_name) + self._build_class_net_layers(norm_activation) + self._build_box_net_layers(norm_activation) + + def _class_net_batch_norm_name(self, i, level): + return 'class-%d-%d' % (i, level) + + def _box_net_batch_norm_name(self, i, level): + return 'box-%d-%d' % (i, level) + + def _build_class_net_layers(self, norm_activation): + """Build re-usable layers for class prediction network.""" + if self._use_separable_conv: + self._class_predict = tf.keras.layers.SeparableConv2D( + self._num_classes * self._anchors_per_location, + kernel_size=(3, 3), + bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), + padding='same', + name='class-predict') + else: + self._class_predict = tf.keras.layers.Conv2D( + self._num_classes * self._anchors_per_location, + kernel_size=(3, 3), + bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5), + padding='same', + name='class-predict') + self._class_conv = [] + self._class_norm_activation = {} + for i in range(self._num_convs): + if self._use_separable_conv: + self._class_conv.append( + tf.keras.layers.SeparableConv2D( + self._num_filters, + kernel_size=(3, 3), + bias_initializer=tf.zeros_initializer(), + activation=None, + padding='same', + name='class-' + str(i))) + else: + self._class_conv.append( + tf.keras.layers.Conv2D( + self._num_filters, + kernel_size=(3, 3), + bias_initializer=tf.zeros_initializer(), + kernel_initializer=tf.keras.initializers.RandomNormal( + stddev=0.01), + activation=None, + padding='same', + name='class-' + str(i))) + for level in range(self._min_level, self._max_level + 1): + name = self._class_net_batch_norm_name(i, level) + self._class_norm_activation[name] = norm_activation(name=name) + + def _build_box_net_layers(self, norm_activation): + """Build re-usable layers for box prediction network.""" + if self._use_separable_conv: + self._box_predict = tf.keras.layers.SeparableConv2D( + 4 * self._anchors_per_location, + kernel_size=(3, 3), + bias_initializer=tf.zeros_initializer(), + padding='same', + name='box-predict') + else: + self._box_predict = tf.keras.layers.Conv2D( + 4 * self._anchors_per_location, + kernel_size=(3, 3), + bias_initializer=tf.zeros_initializer(), + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5), + padding='same', + name='box-predict') + self._box_conv = [] + self._box_norm_activation = {} + for i in range(self._num_convs): + if self._use_separable_conv: + self._box_conv.append( + tf.keras.layers.SeparableConv2D( + self._num_filters, + kernel_size=(3, 3), + activation=None, + bias_initializer=tf.zeros_initializer(), + padding='same', + name='box-' + str(i))) + else: + self._box_conv.append( + tf.keras.layers.Conv2D( + self._num_filters, + kernel_size=(3, 3), + activation=None, + bias_initializer=tf.zeros_initializer(), + kernel_initializer=tf.keras.initializers.RandomNormal( + stddev=0.01), + padding='same', + name='box-' + str(i))) + for level in range(self._min_level, self._max_level + 1): + name = self._box_net_batch_norm_name(i, level) + self._box_norm_activation[name] = norm_activation(name=name) + + def __call__(self, fpn_features, is_training=None): + """Returns outputs of RetinaNet head.""" + class_outputs = {} + box_outputs = {} + with backend.get_graph().as_default(), tf.name_scope('retinanet_head'): + for level in range(self._min_level, self._max_level + 1): + features = fpn_features[level] + + class_outputs[level] = self.class_net( + features, level, is_training=is_training) + box_outputs[level] = self.box_net( + features, level, is_training=is_training) + return class_outputs, box_outputs + + def class_net(self, features, level, is_training): + """Class prediction network for RetinaNet.""" + with self._class_name_scope: + for i in range(self._num_convs): + features = self._class_conv[i](features) + # The convolution layers in the class net are shared among all levels, + # but each level has its batch normlization to capture the statistical + # difference among different levels. + name = self._class_net_batch_norm_name(i, level) + features = self._class_norm_activation[name]( + features, is_training=is_training) + + classes = self._class_predict(features) + return classes + + def box_net(self, features, level, is_training=None): + """Box regression network for RetinaNet.""" + with self._box_name_scope: + for i in range(self._num_convs): + features = self._box_conv[i](features) + # The convolution layers in the box net are shared among all levels, but + # each level has its batch normlization to capture the statistical + # difference among different levels. + name = self._box_net_batch_norm_name(i, level) + features = self._box_norm_activation[name]( + features, is_training=is_training) + + boxes = self._box_predict(features) + return boxes + + +# TODO(yeqing): Refactor this class when it is ready for var_scope reuse. +class ShapemaskPriorHead(object): + """ShapeMask Prior head.""" + + def __init__(self, + num_classes, + num_downsample_channels, + mask_crop_size, + use_category_for_mask, + shape_prior_path): + """Initialize params to build RetinaNet head. + + Args: + num_classes: Number of output classes. + num_downsample_channels: number of channels in mask branch. + mask_crop_size: feature crop size. + use_category_for_mask: use class information in mask branch. + shape_prior_path: the path to load shape priors. + """ + self._mask_num_classes = num_classes if use_category_for_mask else 1 + self._num_downsample_channels = num_downsample_channels + self._mask_crop_size = mask_crop_size + self._shape_prior_path = shape_prior_path + self._use_category_for_mask = use_category_for_mask + + self._shape_prior_fc = tf.keras.layers.Dense( + self._num_downsample_channels, name='shape-prior-fc') + + def __call__(self, fpn_features, boxes, outer_boxes, classes, is_training): + """Generate the detection priors from the box detections and FPN features. + + This corresponds to the Fig. 4 of the ShapeMask paper at + https://arxiv.org/pdf/1904.03239.pdf + + Args: + fpn_features: a dictionary of FPN features. + boxes: a float tensor of shape [batch_size, num_instances, 4] + representing the tight gt boxes from dataloader/detection. + outer_boxes: a float tensor of shape [batch_size, num_instances, 4] + representing the loose gt boxes from dataloader/detection. + classes: a int Tensor of shape [batch_size, num_instances] + of instance classes. + is_training: training mode or not. + + Returns: + instance_features: a float Tensor of shape [batch_size * num_instances, + mask_crop_size, mask_crop_size, num_downsample_channels]. This is the + instance feature crop. + detection_priors: A float Tensor of shape [batch_size * num_instances, + mask_size, mask_size, 1]. + """ + with backend.get_graph().as_default(), tf.name_scope('prior_mask'): + batch_size, num_instances, _ = boxes.get_shape().as_list() + outer_boxes = tf.cast(outer_boxes, tf.float32) + boxes = tf.cast(boxes, tf.float32) + instance_features = spatial_transform_ops.multilevel_crop_and_resize( + fpn_features, outer_boxes, output_size=self._mask_crop_size) + instance_features = self._shape_prior_fc(instance_features) + + shape_priors = self._get_priors() + + # Get uniform priors for each outer box. + uniform_priors = tf.ones([batch_size, num_instances, self._mask_crop_size, + self._mask_crop_size]) + uniform_priors = spatial_transform_ops.crop_mask_in_target_box( + uniform_priors, boxes, outer_boxes, self._mask_crop_size) + + # Classify shape priors using uniform priors + instance features. + prior_distribution = self._classify_shape_priors( + tf.cast(instance_features, tf.float32), uniform_priors, classes) + + instance_priors = tf.gather(shape_priors, classes) + instance_priors *= tf.expand_dims(tf.expand_dims( + tf.cast(prior_distribution, tf.float32), axis=-1), axis=-1) + instance_priors = tf.reduce_sum(instance_priors, axis=2) + detection_priors = spatial_transform_ops.crop_mask_in_target_box( + instance_priors, boxes, outer_boxes, self._mask_crop_size) + + return instance_features, detection_priors + + def _get_priors(self): + """Load shape priors from file.""" + # loads class specific or agnostic shape priors + if self._shape_prior_path: + # Priors are loaded into shape [mask_num_classes, num_clusters, 32, 32]. + priors = np.load(tf.io.gfile.GFile(self._shape_prior_path, 'rb')) + priors = tf.convert_to_tensor(priors, dtype=tf.float32) + self._num_clusters = priors.get_shape().as_list()[1] + else: + # If prior path does not exist, do not use priors, i.e., pirors equal to + # uniform empty 32x32 patch. + self._num_clusters = 1 + priors = tf.zeros([self._mask_num_classes, self._num_clusters, + self._mask_crop_size, self._mask_crop_size]) + return priors + + def _classify_shape_priors(self, features, uniform_priors, classes): + """Classify the uniform prior by predicting the shape modes. + + Classify the object crop features into K modes of the clusters for each + category. + + Args: + features: A float Tensor of shape [batch_size, num_instances, + mask_size, mask_size, num_channels]. + uniform_priors: A float Tensor of shape [batch_size, num_instances, + mask_size, mask_size] representing the uniform detection priors. + classes: A int Tensor of shape [batch_size, num_instances] + of detection class ids. + + Returns: + prior_distribution: A float Tensor of shape + [batch_size, num_instances, num_clusters] representing the classifier + output probability over all possible shapes. + """ + + batch_size, num_instances, _, _, _ = features.get_shape().as_list() + features *= tf.expand_dims(uniform_priors, axis=-1) + # Reduce spatial dimension of features. The features have shape + # [batch_size, num_instances, num_channels]. + features = tf.reduce_mean(features, axis=(2, 3)) + logits = tf.keras.layers.Dense( + self._mask_num_classes * self._num_clusters, + kernel_initializer=tf.random_normal_initializer(stddev=0.01))(features) + logits = tf.reshape(logits, + [batch_size, num_instances, + self._mask_num_classes, self._num_clusters]) + if self._use_category_for_mask: + logits = tf.gather(logits, tf.expand_dims(classes, axis=-1), batch_dims=2) + logits = tf.squeeze(logits, axis=2) + else: + logits = logits[:, :, 0, :] + + distribution = tf.nn.softmax(logits, name='shape_prior_weights') + return distribution + + +class ShapemaskCoarsemaskHead(object): + """ShapemaskCoarsemaskHead head.""" + + def __init__(self, + num_classes, + num_downsample_channels, + mask_crop_size, + use_category_for_mask, + num_convs, + norm_activation=nn_ops.norm_activation_builder()): + """Initialize params to build ShapeMask coarse and fine prediction head. + + Args: + num_classes: `int` number of mask classification categories. + num_downsample_channels: `int` number of filters at mask head. + mask_crop_size: feature crop size. + use_category_for_mask: use class information in mask branch. + num_convs: `int` number of stacked convolution before the last prediction + layer. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + """ + self._mask_num_classes = num_classes if use_category_for_mask else 1 + self._use_category_for_mask = use_category_for_mask + self._num_downsample_channels = num_downsample_channels + self._mask_crop_size = mask_crop_size + self._num_convs = num_convs + self._norm_activation = norm_activation + + self._coarse_mask_fc = tf.keras.layers.Dense( + self._num_downsample_channels, name='coarse-mask-fc') + + self._class_conv = [] + self._class_norm_activation = [] + + for i in range(self._num_convs): + self._class_conv.append(tf.keras.layers.Conv2D( + self._num_downsample_channels, + kernel_size=(3, 3), + bias_initializer=tf.zeros_initializer(), + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), + padding='same', + name='coarse-mask-class-%d' % i)) + + self._class_norm_activation.append( + norm_activation(name='coarse-mask-class-%d-bn' % i)) + + self._class_predict = tf.keras.layers.Conv2D( + self._mask_num_classes, + kernel_size=(1, 1), + # Focal loss bias initialization to have foreground 0.01 probability. + bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), + padding='same', + name='coarse-mask-class-predict') + + def __call__(self, features, detection_priors, classes, is_training): + """Generate instance masks from FPN features and detection priors. + + This corresponds to the Fig. 5-6 of the ShapeMask paper at + https://arxiv.org/pdf/1904.03239.pdf + + Args: + features: a float Tensor of shape [batch_size, num_instances, + mask_crop_size, mask_crop_size, num_downsample_channels]. This is the + instance feature crop. + detection_priors: a float Tensor of shape [batch_size, num_instances, + mask_crop_size, mask_crop_size, 1]. This is the detection prior for + the instance. + classes: a int Tensor of shape [batch_size, num_instances] + of instance classes. + is_training: a bool indicating whether in training mode. + + Returns: + mask_outputs: instance mask prediction as a float Tensor of shape + [batch_size, num_instances, mask_size, mask_size]. + """ + with backend.get_graph().as_default(), tf.name_scope('coarse_mask'): + # Transform detection priors to have the same dimension as features. + detection_priors = tf.expand_dims(detection_priors, axis=-1) + detection_priors = self._coarse_mask_fc(detection_priors) + + features += detection_priors + mask_logits = self.decoder_net(features, is_training) + # Gather the logits with right input class. + if self._use_category_for_mask: + mask_logits = tf.transpose(mask_logits, [0, 1, 4, 2, 3]) + mask_logits = tf.gather(mask_logits, tf.expand_dims(classes, -1), + batch_dims=2) + mask_logits = tf.squeeze(mask_logits, axis=2) + else: + mask_logits = mask_logits[..., 0] + + return mask_logits + + def decoder_net(self, features, is_training=False): + """Coarse mask decoder network architecture. + + Args: + features: A tensor of size [batch, height_in, width_in, channels_in]. + is_training: Whether batch_norm layers are in training mode. + + Returns: + images: A feature tensor of size [batch, output_size, output_size, + num_channels] + """ + (batch_size, num_instances, height, width, + num_channels) = features.get_shape().as_list() + features = tf.reshape(features, [batch_size * num_instances, height, width, + num_channels]) + for i in range(self._num_convs): + features = self._class_conv[i](features) + features = self._class_norm_activation[i](features, + is_training=is_training) + + mask_logits = self._class_predict(features) + mask_logits = tf.reshape(mask_logits, [batch_size, num_instances, height, + width, self._mask_num_classes]) + return mask_logits + + +class ShapemaskFinemaskHead(object): + """ShapemaskFinemaskHead head.""" + + def __init__(self, + num_classes, + num_downsample_channels, + mask_crop_size, + use_category_for_mask, + num_convs, + upsample_factor, + norm_activation=nn_ops.norm_activation_builder()): + """Initialize params to build ShapeMask coarse and fine prediction head. + + Args: + num_classes: `int` number of mask classification categories. + num_downsample_channels: `int` number of filters at mask head. + mask_crop_size: feature crop size. + use_category_for_mask: use class information in mask branch. + num_convs: `int` number of stacked convolution before the last prediction + layer. + upsample_factor: `int` number of fine mask upsampling factor. + norm_activation: an operation that includes a batch normalization layer + followed by a relu layer(optional). + """ + self._use_category_for_mask = use_category_for_mask + self._mask_num_classes = num_classes if use_category_for_mask else 1 + self._num_downsample_channels = num_downsample_channels + self._mask_crop_size = mask_crop_size + self._num_convs = num_convs + self.up_sample_factor = upsample_factor + + self._fine_mask_fc = tf.keras.layers.Dense( + self._num_downsample_channels, name='fine-mask-fc') + + self._upsample_conv = tf.keras.layers.Conv2DTranspose( + self._num_downsample_channels, + (self.up_sample_factor, self.up_sample_factor), + (self.up_sample_factor, self.up_sample_factor), + name='fine-mask-conv2d-tran') + + self._fine_class_conv = [] + self._fine_class_bn = [] + for i in range(self._num_convs): + self._fine_class_conv.append( + tf.keras.layers.Conv2D( + self._num_downsample_channels, + kernel_size=(3, 3), + bias_initializer=tf.zeros_initializer(), + kernel_initializer=tf.keras.initializers.RandomNormal( + stddev=0.01), + activation=None, + padding='same', + name='fine-mask-class-%d' % i)) + self._fine_class_bn.append(norm_activation( + name='fine-mask-class-%d-bn' % i)) + + self._class_predict_conv = tf.keras.layers.Conv2D( + self._mask_num_classes, + kernel_size=(1, 1), + # Focal loss bias initialization to have foreground 0.01 probability. + bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), + kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), + padding='same', + name='fine-mask-class-predict') + + def __call__(self, features, mask_logits, classes, is_training): + """Generate instance masks from FPN features and detection priors. + + This corresponds to the Fig. 5-6 of the ShapeMask paper at + https://arxiv.org/pdf/1904.03239.pdf + + Args: + features: a float Tensor of shape + [batch_size, num_instances, mask_crop_size, mask_crop_size, + num_downsample_channels]. This is the instance feature crop. + mask_logits: a float Tensor of shape + [batch_size, num_instances, mask_crop_size, mask_crop_size] indicating + predicted mask logits. + classes: a int Tensor of shape [batch_size, num_instances] + of instance classes. + is_training: a bool indicating whether in training mode. + + Returns: + mask_outputs: instance mask prediction as a float Tensor of shape + [batch_size, num_instances, mask_size, mask_size]. + """ + # Extract the foreground mean features + # with tf.variable_scope('fine_mask', reuse=tf.AUTO_REUSE): + with backend.get_graph().as_default(), tf.name_scope('fine_mask'): + mask_probs = tf.nn.sigmoid(mask_logits) + # Compute instance embedding for hard average. + binary_mask = tf.cast(tf.greater(mask_probs, 0.5), features.dtype) + instance_embedding = tf.reduce_sum( + features * tf.expand_dims(binary_mask, axis=-1), axis=(2, 3)) + instance_embedding /= tf.expand_dims( + tf.reduce_sum(binary_mask, axis=(2, 3)) + 1e-20, axis=-1) + # Take the difference between crop features and mean instance features. + features -= tf.expand_dims( + tf.expand_dims(instance_embedding, axis=2), axis=2) + + features += self._fine_mask_fc(tf.expand_dims(mask_probs, axis=-1)) + + # Decoder to generate upsampled segmentation mask. + mask_logits = self.decoder_net(features, is_training) + if self._use_category_for_mask: + mask_logits = tf.transpose(mask_logits, [0, 1, 4, 2, 3]) + mask_logits = tf.gather(mask_logits, + tf.expand_dims(classes, -1), batch_dims=2) + mask_logits = tf.squeeze(mask_logits, axis=2) + else: + mask_logits = mask_logits[..., 0] + + return mask_logits + + def decoder_net(self, features, is_training=False): + """Fine mask decoder network architecture. + + Args: + features: A tensor of size [batch, height_in, width_in, channels_in]. + is_training: Whether batch_norm layers are in training mode. + + Returns: + images: A feature tensor of size [batch, output_size, output_size, + num_channels], where output size is self._gt_upsample_scale times + that of input. + """ + (batch_size, num_instances, height, width, + num_channels) = features.get_shape().as_list() + features = tf.reshape(features, [batch_size * num_instances, height, width, + num_channels]) + for i in range(self._num_convs): + features = self._fine_class_conv[i](features) + features = self._fine_class_bn[i](features, is_training=is_training) + + if self.up_sample_factor > 1: + features = self._upsample_conv(features) + + # Predict per-class instance masks. + mask_logits = self._class_predict_conv(features) + + mask_logits = tf.reshape(mask_logits, + [batch_size, num_instances, + height * self.up_sample_factor, + width * self.up_sample_factor, + self._mask_num_classes]) + return mask_logits diff --git a/models/official/vision/detection/modeling/architecture/identity.py b/models/official/vision/detection/modeling/architecture/identity.py new file mode 100644 index 0000000000000000000000000000000000000000..acc90c4d5efddcac50eb95b1229c3c5500917445 --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/identity.py @@ -0,0 +1,28 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Identity Fn that forwards the input features.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class Identity(object): + """Identity function that forwards the input features.""" + + def __call__(self, features, is_training=False): + """Only forwards the input features.""" + return features + diff --git a/models/official/vision/detection/modeling/architecture/nn_ops.py b/models/official/vision/detection/modeling/architecture/nn_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..8b3617d6c5b23dd31a9f891985dcf8361ff1e177 --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/nn_ops.py @@ -0,0 +1,108 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Neural network operations commonly shared by the architectures.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import tensorflow as tf + + +class NormActivation(tf.keras.layers.Layer): + """Combined Normalization and Activation layers.""" + + def __init__(self, + momentum=0.997, + epsilon=1e-4, + trainable=True, + init_zero=False, + use_activation=True, + activation='relu', + fused=True, + name=None): + """A class to construct layers for a batch normalization followed by a ReLU. + + Args: + momentum: momentum for the moving average. + epsilon: small float added to variance to avoid dividing by zero. + trainable: `bool`, if True also add variables to the graph collection + GraphKeys.TRAINABLE_VARIABLES. If False, freeze batch normalization + layer. + init_zero: `bool` if True, initializes scale parameter of batch + normalization with 0. If False, initialize it with 1. + fused: `bool` fused option in batch normalziation. + use_actiation: `bool`, whether to add the optional activation layer after + the batch normalization layer. + activation: 'string', the type of the activation layer. Currently support + `relu` and `swish`. + name: `str` name for the operation. + """ + super(NormActivation, self).__init__(trainable=trainable) + if init_zero: + gamma_initializer = tf.keras.initializers.Zeros() + else: + gamma_initializer = tf.keras.initializers.Ones() + self._normalization_op = tf.keras.layers.BatchNormalization( + momentum=momentum, + epsilon=epsilon, + center=True, + scale=True, + trainable=trainable, + fused=fused, + gamma_initializer=gamma_initializer, + name=name) + self._use_activation = use_activation + if activation == 'relu': + self._activation_op = tf.nn.relu + elif activation == 'swish': + self._activation_op = tf.nn.swish + else: + raise ValueError('Unsupported activation `{}`.'.format(activation)) + + def __call__(self, inputs, is_training=None): + """Builds the normalization layer followed by an optional activation layer. + + Args: + inputs: `Tensor` of shape `[batch, channels, ...]`. + is_training: `boolean`, if True if model is in training mode. + + Returns: + A normalized `Tensor` with the same `data_format`. + """ + # We will need to keep training=None by default, so that it can be inherit + # from keras.Model.training + if is_training and self.trainable: + is_training = True + inputs = self._normalization_op(inputs, training=is_training) + + if self._use_activation: + inputs = self._activation_op(inputs) + return inputs + + +def norm_activation_builder(momentum=0.997, + epsilon=1e-4, + trainable=True, + activation='relu', + **kwargs): + return functools.partial( + NormActivation, + momentum=momentum, + epsilon=epsilon, + trainable=trainable, + activation=activation, + **kwargs) diff --git a/models/official/vision/detection/modeling/architecture/resnet.py b/models/official/vision/detection/modeling/architecture/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..abbc7213ea971f0cb014d770e7e0c1707855fb08 --- /dev/null +++ b/models/official/vision/detection/modeling/architecture/resnet.py @@ -0,0 +1,309 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains definitions for the post-activation form of Residual Networks. + +Residual networks (ResNets) were proposed in: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +import tensorflow as tf +from tensorflow.python.keras import backend +from official.vision.detection.modeling.architecture import nn_ops + +# TODO(b/140112644): Refactor the code with Keras style, i.e. build and call. +class Resnet(object): + """Class to build ResNet family model.""" + + def __init__(self, + resnet_depth, + activation='relu', + norm_activation=nn_ops.norm_activation_builder( + activation='relu'), + data_format='channels_last'): + """ResNet initialization function. + + Args: + resnet_depth: `int` depth of ResNet backbone model. + norm_activation: an operation that includes a normalization layer + followed by an optional activation layer. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + """ + self._resnet_depth = resnet_depth + if activation == 'relu': + self._activation_op = tf.nn.relu + elif activation == 'swish': + self._activation_op = tf.nn.swish + else: + raise ValueError('Unsupported activation `{}`.'.format(activation)) + self._norm_activation = norm_activation + self._data_format = data_format + + model_params = { + 10: {'block': self.residual_block, 'layers': [1, 1, 1, 1]}, + 18: {'block': self.residual_block, 'layers': [2, 2, 2, 2]}, + 34: {'block': self.residual_block, 'layers': [3, 4, 6, 3]}, + 50: {'block': self.bottleneck_block, 'layers': [3, 4, 6, 3]}, + 101: {'block': self.bottleneck_block, 'layers': [3, 4, 23, 3]}, + 152: {'block': self.bottleneck_block, 'layers': [3, 8, 36, 3]}, + 200: {'block': self.bottleneck_block, 'layers': [3, 24, 36, 3]} + } + + if resnet_depth not in model_params: + valid_resnet_depths = ', '.join( + [str(depth) for depth in sorted(model_params.keys())]) + raise ValueError( + 'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%( + valid_resnet_depths), self._resnet_depth) + params = model_params[resnet_depth] + self._resnet_fn = self.resnet_v1_generator( + params['block'], params['layers']) + + def __call__(self, inputs, is_training=None): + """Returns the ResNet model for a given size and number of output classes. + + Args: + inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing + a batch of images. + is_training: `bool` if True, the model is in training mode. + + Returns: + a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5]. + The values are corresponding feature hierarchy in ResNet with shape + [batch_size, height_l, width_l, num_filters]. + """ + with backend.get_graph().as_default(): + with tf.name_scope('resnet%s' % self._resnet_depth): + return self._resnet_fn(inputs, is_training) + + def fixed_padding(self, inputs, kernel_size): + """Pads the input along the spatial dimensions independently of input size. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]` or + `[batch, height, width, channels]` depending on `data_format`. + kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d` + operations. Should be a positive integer. + + Returns: + A padded `Tensor` of the same `data_format` with size either intact + (if `kernel_size == 1`) or padded (if `kernel_size > 1`). + """ + pad_total = kernel_size - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + if self._data_format == 'channels_first': + padded_inputs = tf.pad( + tensor=inputs, + paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) + else: + padded_inputs = tf.pad( + tensor=inputs, + paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + + return padded_inputs + + def conv2d_fixed_padding(self, inputs, filters, kernel_size, strides): + """Strided 2-D convolution with explicit padding. + + The padding is consistent and is based only on `kernel_size`, not on the + dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). + + Args: + inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. + filters: `int` number of filters in the convolution. + kernel_size: `int` size of the kernel to be used in the convolution. + strides: `int` strides of the convolution. + + Returns: + A `Tensor` of shape `[batch, filters, height_out, width_out]`. + """ + if strides > 1: + inputs = self.fixed_padding(inputs, kernel_size) + + return tf.keras.layers.Conv2D( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=('SAME' if strides == 1 else 'VALID'), + use_bias=False, + kernel_initializer=tf.initializers.VarianceScaling(), + data_format=self._data_format)( + inputs=inputs) + + def residual_block(self, + inputs, + filters, + strides, + use_projection=False, + is_training=None): + """Standard building block for residual networks with BN after convolutions. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first two convolutions. Note that + the third and final convolution will use 4 times as many filters. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + use_projection: `bool` for whether this block should use a projection + shortcut (versus the default identity shortcut). This is usually + `True` for the first block of a block group, which may change the + number of filters and the resolution. + is_training: `bool` if True, the model is in training mode. + Returns: + The output `Tensor` of the block. + """ + shortcut = inputs + if use_projection: + # Projection shortcut in first layer to match filters and strides + shortcut = self.conv2d_fixed_padding( + inputs=inputs, filters=filters, kernel_size=1, strides=strides) + shortcut = self._norm_activation(use_activation=False)( + shortcut, is_training=is_training) + + inputs = self.conv2d_fixed_padding( + inputs=inputs, filters=filters, kernel_size=3, strides=strides) + inputs = self._norm_activation()(inputs, is_training=is_training) + + inputs = self.conv2d_fixed_padding( + inputs=inputs, filters=filters, kernel_size=3, strides=1) + inputs = self._norm_activation(use_activation=False, init_zero=True)( + inputs, is_training=is_training) + + return self._activation_op(inputs + shortcut) + + def bottleneck_block(self, + inputs, + filters, + strides, + use_projection=False, + is_training=None): + """Bottleneck block variant for residual networks with BN after convolutions. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first two convolutions. Note that + the third and final convolution will use 4 times as many filters. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + use_projection: `bool` for whether this block should use a projection + shortcut (versus the default identity shortcut). This is usually + `True` for the first block of a block group, which may change the + number of filters and the resolution. + is_training: `bool` if True, the model is in training mode. + + Returns: + The output `Tensor` of the block. + """ + shortcut = inputs + if use_projection: + # Projection shortcut only in first block within a group. Bottleneck + # blocks end with 4 times the number of filters. + filters_out = 4 * filters + shortcut = self.conv2d_fixed_padding( + inputs=inputs, filters=filters_out, kernel_size=1, strides=strides) + shortcut = self._norm_activation(use_activation=False)( + shortcut, is_training=is_training) + + inputs = self.conv2d_fixed_padding( + inputs=inputs, filters=filters, kernel_size=1, strides=1) + inputs = self._norm_activation()(inputs, is_training=is_training) + + inputs = self.conv2d_fixed_padding( + inputs=inputs, filters=filters, kernel_size=3, strides=strides) + inputs = self._norm_activation()(inputs, is_training=is_training) + + inputs = self.conv2d_fixed_padding( + inputs=inputs, filters=4 * filters, kernel_size=1, strides=1) + inputs = self._norm_activation(use_activation=False, init_zero=True)( + inputs, is_training=is_training) + + return self._activation_op(inputs + shortcut) + + def block_group(self, inputs, filters, block_fn, blocks, strides, name, + is_training): + """Creates one group of blocks for the ResNet model. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first convolution of the layer. + block_fn: `function` for the block to use within the model + blocks: `int` number of blocks contained in the layer. + strides: `int` stride to use for the first convolution of the layer. If + greater than 1, this layer will downsample the input. + name: `str`name for the Tensor output of the block layer. + is_training: `bool` if True, the model is in training mode. + + Returns: + The output `Tensor` of the block layer. + """ + # Only the first block per block_group uses projection shortcut and strides. + inputs = block_fn(inputs, filters, strides, use_projection=True, + is_training=is_training) + + for _ in range(1, blocks): + inputs = block_fn(inputs, filters, 1, is_training=is_training) + + return tf.identity(inputs, name) + + def resnet_v1_generator(self, block_fn, layers): + """Generator for ResNet v1 models. + + Args: + block_fn: `function` for the block to use within the model. Either + `residual_block` or `bottleneck_block`. + layers: list of 4 `int`s denoting the number of blocks to include in each + of the 4 block groups. Each group consists of blocks that take inputs of + the same resolution. + + Returns: + Model `function` that takes in `inputs` and `is_training` and returns the + output `Tensor` of the ResNet model. + """ + + def model(inputs, is_training=None): + """Creation of the model graph.""" + inputs = self.conv2d_fixed_padding( + inputs=inputs, filters=64, kernel_size=7, strides=2) + inputs = tf.identity(inputs, 'initial_conv') + inputs = self._norm_activation()(inputs, is_training=is_training) + + inputs = tf.keras.layers.MaxPool2D( + pool_size=3, strides=2, padding='SAME', + data_format=self._data_format)( + inputs) + inputs = tf.identity(inputs, 'initial_max_pool') + + c2 = self.block_group( + inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0], + strides=1, name='block_group1', is_training=is_training) + c3 = self.block_group( + inputs=c2, filters=128, block_fn=block_fn, blocks=layers[1], + strides=2, name='block_group2', is_training=is_training) + c4 = self.block_group( + inputs=c3, filters=256, block_fn=block_fn, blocks=layers[2], + strides=2, name='block_group3', is_training=is_training) + c5 = self.block_group( + inputs=c4, filters=512, block_fn=block_fn, blocks=layers[3], + strides=2, name='block_group4', is_training=is_training) + return {2: c2, 3: c3, 4: c4, 5: c5} + + return model diff --git a/models/official/vision/detection/modeling/base_model.py b/models/official/vision/detection/modeling/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..8d18f12f5b7c52ca02334c4c685b70d353de83c5 --- /dev/null +++ b/models/official/vision/detection/modeling/base_model.py @@ -0,0 +1,138 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base Model definition.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import functools +import re +import tensorflow as tf +from official.vision.detection.modeling import checkpoint_utils +from official.vision.detection.modeling import learning_rates +from official.vision.detection.modeling import optimizers + + +def _make_filter_trainable_variables_fn(frozen_variable_prefix): + """Creates a function for filtering trainable varialbes.""" + + def _filter_trainable_variables(variables): + """Filters trainable varialbes. + + Args: + variables: a list of tf.Variable to be filtered. + + Returns: + filtered_variables: a list of tf.Variable filtered out the frozen ones. + """ + # frozen_variable_prefix: a regex string specifing the prefix pattern of + # the frozen variables' names. + filtered_variables = [ + v for v in variables + if not frozen_variable_prefix or + not re.match(frozen_variable_prefix, v.name) + ] + return filtered_variables + + return _filter_trainable_variables + + +class Model(object): + """Base class for model function.""" + + __metaclass__ = abc.ABCMeta + + def __init__(self, params): + self._use_bfloat16 = params.architecture.use_bfloat16 + + if params.architecture.use_bfloat16: + policy = tf.compat.v2.keras.mixed_precision.experimental.Policy( + 'mixed_bfloat16') + tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy) + + # Optimization. + self._optimizer_fn = optimizers.OptimizerFactory(params.train.optimizer) + self._learning_rate = learning_rates.learning_rate_generator( + params.train.total_steps, params.train.learning_rate) + + self._frozen_variable_prefix = params.train.frozen_variable_prefix + self._regularization_var_regex = params.train.regularization_variable_regex + self._l2_weight_decay = params.train.l2_weight_decay + + # Checkpoint restoration. + self._checkpoint = params.train.checkpoint.as_dict() + + # Summary. + self._enable_summary = params.enable_summary + self._model_dir = params.model_dir + + @abc.abstractmethod + def build_outputs(self, inputs, mode): + """Build the graph of the forward path.""" + pass + + @abc.abstractmethod + def build_model(self, params, mode): + """Build the model object.""" + pass + + @abc.abstractmethod + def build_loss_fn(self): + """Build the model object.""" + pass + + def post_processing(self, labels, outputs): + """Post-processing function.""" + return labels, outputs + + def model_outputs(self, inputs, mode): + """Build the model outputs.""" + return self.build_outputs(inputs, mode) + + def build_optimizer(self): + """Returns train_op to optimize total loss.""" + # Sets up the optimizer. + return self._optimizer_fn(self._learning_rate) + + def make_filter_trainable_variables_fn(self): + """Creates a function for filtering trainable varialbes.""" + return _make_filter_trainable_variables_fn(self._frozen_variable_prefix) + + def weight_decay_loss(self, trainable_variables): + reg_variables = [ + v for v in trainable_variables + if self._regularization_var_regex is None + or re.match(self._regularization_var_regex, v.name) + ] + + return self._l2_weight_decay * tf.add_n( + [tf.nn.l2_loss(v) for v in reg_variables]) + + def make_restore_checkpoint_fn(self): + """Returns scaffold function to restore parameters from v1 checkpoint.""" + if 'skip_checkpoint_variables' in self._checkpoint: + skip_regex = self._checkpoint['skip_checkpoint_variables'] + else: + skip_regex = None + return checkpoint_utils.make_restore_checkpoint_fn( + self._checkpoint['path'], + prefix=self._checkpoint['prefix'], + skip_regex=skip_regex) + + def eval_metrics(self): + """Returns tuple of metric function and its inputs for evaluation.""" + raise NotImplementedError('Unimplemented eval_metrics') diff --git a/models/official/vision/detection/modeling/checkpoint_utils.py b/models/official/vision/detection/modeling/checkpoint_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1bb798396a714cbbc1a36309c99ceaa636a30354 --- /dev/null +++ b/models/official/vision/detection/modeling/checkpoint_utils.py @@ -0,0 +1,131 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Util functions for loading checkpoints. Especially for loading Tensorflow 1.x +checkpoint to Tensorflow 2.x (keras) model. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import re +from absl import logging + +import tensorflow as tf + + +def _build_assignment_map(keras_model, + prefix='', + skip_variables_regex=None, + var_to_shape_map=None): + """Compute an assignment mapping for loading older checkpoints into a Keras + model. Variable names are remapped from the original TPUEstimator model to + the new Keras name. + + Args: + keras_model: tf.keras.Model object to provide variables to assign. + prefix: prefix in the variable name to be remove for alignment with names in + the checkpoint. + skip_variables_regex: regular expression to math the names of variables that + do not need to be assign. + var_to_shape_map: variable name to shape mapping from the checkpoint. + + Returns: + The variable assignment map. + """ + assignment_map = {} + + + checkpoint_names = None + if var_to_shape_map: + checkpoint_names = list(filter( + lambda x: not x.endswith('Momentum') and not x.endswith( + 'global_step'), var_to_shape_map.keys())) + + for var in keras_model.variables: + var_name = var.name + + if skip_variables_regex and re.match(skip_variables_regex, var_name): + continue + # Trim the index of the variable. + if ':' in var_name: + var_name = var_name[:var_name.rindex(':')] + if var_name.startswith(prefix): + var_name = var_name[len(prefix):] + + if not var_to_shape_map: + assignment_map[var_name] = var + continue + + # Match name with variables in the checkpoint. + match_names = list(filter(lambda x: x.endswith(var_name), checkpoint_names)) + try: + if match_names: + assert len(match_names) == 1, 'more then on matches for {}: {}'.format( + var_name, match_names) + checkpoint_names.remove(match_names[0]) + assignment_map[match_names[0]] = var + else: + logging.info('Error not found var name: %s', var_name) + except Exception as e: + logging.info('Error removing the match_name: %s', match_names) + logging.info('Exception: %s', e) + raise + logging.info('Found variable in checkpoint: %d', len(assignment_map)) + return assignment_map + + +def _get_checkpoint_map(checkpoint_path): + reader = tf.train.load_checkpoint(checkpoint_path) + return reader.get_variable_to_shape_map() + + +def make_restore_checkpoint_fn(checkpoint_path, prefix='', skip_regex=None): + """Returns scaffold function to restore parameters from v1 checkpoint. + Args: + checkpoint_path: path of the checkpoint folder or file. + Example 1: '/path/to/model_dir/' + Example 2: '/path/to/model.ckpt-22500' + prefix: prefix in the variable name to be remove for alignment with names in + the checkpoint. + skip_regex: regular expression to math the names of variables that + do not need to be assign. + + Returns: + Callable[tf.kears.Model] -> void. Fn to load v1 checkpoint to keras model. + """ + + def _restore_checkpoint_fn(keras_model): + """Loads pretrained model through scaffold function.""" + if not checkpoint_path: + logging.info('checkpoint_path is empty') + return + var_prefix = prefix + if prefix and not prefix.endswith('/'): + var_prefix += '/' + var_to_shape_map = _get_checkpoint_map(checkpoint_path) + assert var_to_shape_map, 'var_to_shape_map should not be empty' + vars_to_load = _build_assignment_map( + keras_model, + prefix=var_prefix, + skip_variables_regex=skip_regex, + var_to_shape_map=var_to_shape_map) + if not vars_to_load: + raise ValueError('Variables to load is empty.') + tf.compat.v1.train.init_from_checkpoint(checkpoint_path, + vars_to_load) + + return _restore_checkpoint_fn diff --git a/models/official/vision/detection/modeling/factory.py b/models/official/vision/detection/modeling/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..b140416dfdba90420f99a8bcb3b07cc04a63cc3e --- /dev/null +++ b/models/official/vision/detection/modeling/factory.py @@ -0,0 +1,34 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Factory to build detection model.""" + + +from official.vision.detection.modeling import maskrcnn_model +from official.vision.detection.modeling import retinanet_model +from official.vision.detection.modeling import shapemask_model + + +def model_generator(params): + """Model function generator.""" + if params.type == 'retinanet': + model_fn = retinanet_model.RetinanetModel(params) + elif params.type == 'mask_rcnn': + model_fn = maskrcnn_model.MaskrcnnModel(params) + elif params.type == 'shapemask': + model_fn = shapemask_model.ShapeMaskModel(params) + else: + raise ValueError('Model %s is not supported.'% params.type) + + return model_fn diff --git a/models/official/vision/detection/modeling/learning_rates.py b/models/official/vision/detection/modeling/learning_rates.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc24ffadb073c79f71725b1adcb61cbd83127cd --- /dev/null +++ b/models/official/vision/detection/modeling/learning_rates.py @@ -0,0 +1,98 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Learning rate schedule.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np +import tensorflow as tf +from official.modeling.hyperparams import params_dict + + +class StepLearningRateWithLinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule): + """Class to generate learning rate tensor.""" + + def __init__(self, total_steps, params): + """Creates the step learning rate tensor with linear warmup.""" + super(StepLearningRateWithLinearWarmup, self).__init__() + self._total_steps = total_steps + assert isinstance(params, (dict, params_dict.ParamsDict)) + if isinstance(params, dict): + params = params_dict.ParamsDict(params) + self._params = params + + def __call__(self, global_step): + warmup_lr = self._params.warmup_learning_rate + warmup_steps = self._params.warmup_steps + init_lr = self._params.init_learning_rate + lr_levels = self._params.learning_rate_levels + lr_steps = self._params.learning_rate_steps + linear_warmup = ( + warmup_lr + tf.cast(global_step, dtype=tf.float32) / warmup_steps * + (init_lr - warmup_lr)) + learning_rate = tf.where(global_step < warmup_steps, linear_warmup, init_lr) + + for next_learning_rate, start_step in zip(lr_levels, lr_steps): + learning_rate = tf.where(global_step >= start_step, next_learning_rate, + learning_rate) + return learning_rate + + def get_config(self): + return {'_params': self._params.as_dict()} + + +class CosineLearningRateWithLinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule): + """Class to generate learning rate tensor.""" + + def __init__(self, total_steps, params): + """Creates the consine learning rate tensor with linear warmup.""" + super(CosineLearningRateWithLinearWarmup, self).__init__() + self._total_steps = total_steps + assert isinstance(params, (dict, params_dict.ParamsDict)) + if isinstance(params, dict): + params = params_dict.ParamsDict(params) + self._params = params + + def __call__(self, global_step): + global_step = tf.cast(global_step, dtype=tf.float32) + warmup_lr = self._params.warmup_learning_rate + warmup_steps = self._params.warmup_steps + init_lr = self._params.init_learning_rate + total_steps = self._total_steps + linear_warmup = ( + warmup_lr + global_step / warmup_steps * (init_lr - warmup_lr)) + cosine_learning_rate = ( + init_lr * (tf.cos(np.pi * (global_step - warmup_steps) / + (total_steps - warmup_steps)) + 1.0) / 2.0) + learning_rate = tf.where(global_step < warmup_steps, linear_warmup, + cosine_learning_rate) + return learning_rate + + def get_config(self): + return {'_params': self._params.as_dict()} + + +def learning_rate_generator(total_steps, params): + """The learning rate function generator.""" + if params.type == 'step': + return StepLearningRateWithLinearWarmup(total_steps, params) + elif params.type == 'cosine': + return CosineLearningRateWithLinearWarmup(total_steps, params) + else: + raise ValueError('Unsupported learning rate type: {}.'.format(params.type)) diff --git a/models/official/vision/detection/modeling/losses.py b/models/official/vision/detection/modeling/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..4b993061b3c51c9ae6456d84a79f7fea5d74c77e --- /dev/null +++ b/models/official/vision/detection/modeling/losses.py @@ -0,0 +1,542 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Losses used for detection models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +import tensorflow as tf + + +def focal_loss(logits, targets, alpha, gamma, normalizer): + """Compute the focal loss between `logits` and the golden `target` values. + + Focal loss = -(1-pt)^gamma * log(pt) + where pt is the probability of being classified to the true class. + + Args: + logits: A float32 tensor of size + [batch, height_in, width_in, num_predictions]. + targets: A float32 tensor of size + [batch, height_in, width_in, num_predictions]. + alpha: A float32 scalar multiplying alpha to the loss from positive examples + and (1-alpha) to the loss from negative examples. + gamma: A float32 scalar modulating loss from hard and easy examples. + normalizer: A float32 scalar normalizes the total loss from all examples. + + Returns: + loss: A float32 Tensor of size [batch, height_in, width_in, num_predictions] + representing normalized loss on the prediction map. + """ + with tf.name_scope('focal_loss'): + positive_label_mask = tf.math.equal(targets, 1.0) + cross_entropy = ( + tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits)) + # Below are comments/derivations for computing modulator. + # For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x) + # for positive samples and 1 - sigmoid(x) for negative examples. + # + # The modulator, defined as (1 - P_t)^r, is a critical part in focal loss + # computation. For r > 0, it puts more weights on hard examples, and less + # weights on easier ones. However if it is directly computed as (1 - P_t)^r, + # its back-propagation is not stable when r < 1. The implementation here + # resolves the issue. + # + # For positive samples (labels being 1), + # (1 - p_t)^r + # = (1 - sigmoid(x))^r + # = (1 - (1 / (1 + exp(-x))))^r + # = (exp(-x) / (1 + exp(-x)))^r + # = exp(log((exp(-x) / (1 + exp(-x)))^r)) + # = exp(r * log(exp(-x)) - r * log(1 + exp(-x))) + # = exp(- r * x - r * log(1 + exp(-x))) + # + # For negative samples (labels being 0), + # (1 - p_t)^r + # = (sigmoid(x))^r + # = (1 / (1 + exp(-x)))^r + # = exp(log((1 / (1 + exp(-x)))^r)) + # = exp(-r * log(1 + exp(-x))) + # + # Therefore one unified form for positive (z = 1) and negative (z = 0) + # samples is: + # (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))). + neg_logits = -1.0 * logits + modulator = tf.math.exp(gamma * targets * neg_logits - + gamma * tf.math.log1p(tf.math.exp(neg_logits))) + loss = modulator * cross_entropy + weighted_loss = tf.where(positive_label_mask, alpha * loss, + (1.0 - alpha) * loss) + weighted_loss /= normalizer + return weighted_loss + + +class RpnScoreLoss(object): + """Region Proposal Network score loss function.""" + + def __init__(self, params): + self._rpn_batch_size_per_im = params.rpn_batch_size_per_im + self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy( + reduction=tf.keras.losses.Reduction.SUM, from_logits=True) + + def __call__(self, score_outputs, labels): + """Computes total RPN detection loss. + + Computes total RPN detection loss including box and score from all levels. + + Args: + score_outputs: an OrderDict with keys representing levels and values + representing scores in [batch_size, height, width, num_anchors]. + labels: the dictionary that returned from dataloader that includes + groundturth targets. + + Returns: + rpn_score_loss: a scalar tensor representing total score loss. + """ + with tf.name_scope('rpn_loss'): + levels = sorted(score_outputs.keys()) + + score_losses = [] + for level in levels: + score_losses.append( + self._rpn_score_loss( + score_outputs[level], + labels[level], + normalizer=tf.cast( + tf.shape(score_outputs[level])[0] * + self._rpn_batch_size_per_im, dtype=tf.float32))) + + # Sums per level losses to total loss. + return tf.math.add_n(score_losses) + + def _rpn_score_loss(self, score_outputs, score_targets, normalizer=1.0): + """Computes score loss.""" + # score_targets has three values: + # (1) score_targets[i]=1, the anchor is a positive sample. + # (2) score_targets[i]=0, negative. + # (3) score_targets[i]=-1, the anchor is don't care (ignore). + with tf.name_scope('rpn_score_loss'): + mask = tf.math.logical_or(tf.math.equal(score_targets, 1), + tf.math.equal(score_targets, 0)) + + score_targets = tf.math.maximum(score_targets, + tf.zeros_like(score_targets)) + + score_targets = tf.expand_dims(score_targets, axis=-1) + score_outputs = tf.expand_dims(score_outputs, axis=-1) + score_loss = self._binary_crossentropy( + score_targets, score_outputs, sample_weight=mask) + + score_loss /= normalizer + return score_loss + + +class RpnBoxLoss(object): + """Region Proposal Network box regression loss function.""" + + def __init__(self, params): + logging.info('RpnBoxLoss huber_loss_delta %s', params.huber_loss_delta) + # The delta is typically around the mean value of regression target. + # for instances, the regression targets of 512x512 input with 6 anchors on + # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2]. + self._huber_loss = tf.keras.losses.Huber( + delta=params.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM) + + def __call__(self, box_outputs, labels): + """Computes total RPN detection loss. + + Computes total RPN detection loss including box and score from all levels. + + Args: + box_outputs: an OrderDict with keys representing levels and values + representing box regression targets in + [batch_size, height, width, num_anchors * 4]. + labels: the dictionary that returned from dataloader that includes + groundturth targets. + + Returns: + rpn_box_loss: a scalar tensor representing total box regression loss. + """ + with tf.name_scope('rpn_loss'): + levels = sorted(box_outputs.keys()) + + box_losses = [] + for level in levels: + box_losses.append(self._rpn_box_loss(box_outputs[level], labels[level])) + + # Sum per level losses to total loss. + return tf.add_n(box_losses) + + def _rpn_box_loss(self, box_outputs, box_targets, normalizer=1.0): + """Computes box regression loss.""" + with tf.name_scope('rpn_box_loss'): + mask = tf.cast(tf.not_equal(box_targets, 0.0), dtype=tf.float32) + box_targets = tf.expand_dims(box_targets, axis=-1) + box_outputs = tf.expand_dims(box_outputs, axis=-1) + box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask) + # The loss is normalized by the sum of non-zero weights and additional + # normalizer provided by the function caller. Using + 0.01 here to avoid + # division by zero. + box_loss /= normalizer * (tf.reduce_sum(mask) + 0.01) + return box_loss + + +class FastrcnnClassLoss(object): + """Fast R-CNN classification loss function.""" + + def __init__(self): + self._categorical_crossentropy = tf.keras.losses.CategoricalCrossentropy( + reduction=tf.keras.losses.Reduction.SUM, from_logits=True) + + def __call__(self, class_outputs, class_targets): + """Computes the class loss (Fast-RCNN branch) of Mask-RCNN. + + This function implements the classification loss of the Fast-RCNN. + + The classification loss is softmax on all RoIs. + Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long + + Args: + class_outputs: a float tensor representing the class prediction for each box + with a shape of [batch_size, num_boxes, num_classes]. + class_targets: a float tensor representing the class label for each box + with a shape of [batch_size, num_boxes]. + + Returns: + a scalar tensor representing total class loss. + """ + with tf.name_scope('fast_rcnn_loss'): + batch_size, num_boxes, num_classes = class_outputs.get_shape().as_list() + class_targets = tf.cast(class_targets, dtype=tf.int32) + class_targets_one_hot = tf.one_hot(class_targets, num_classes) + return self._fast_rcnn_class_loss(class_outputs, class_targets_one_hot, + normalizer=batch_size * num_boxes / 2.0) + + def _fast_rcnn_class_loss(self, class_outputs, class_targets_one_hot, + normalizer): + """Computes classification loss.""" + with tf.name_scope('fast_rcnn_class_loss'): + class_loss = self._categorical_crossentropy(class_targets_one_hot, + class_outputs) + + class_loss /= normalizer + return class_loss + + +class FastrcnnBoxLoss(object): + """Fast R-CNN box regression loss function.""" + + def __init__(self, params): + logging.info('FastrcnnBoxLoss huber_loss_delta %s', params.huber_loss_delta) + # The delta is typically around the mean value of regression target. + # for instances, the regression targets of 512x512 input with 6 anchors on + # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2]. + self._huber_loss = tf.keras.losses.Huber( + delta=params.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM) + + def __call__(self, box_outputs, class_targets, box_targets): + """Computes the box loss (Fast-RCNN branch) of Mask-RCNN. + + This function implements the box regression loss of the Fast-RCNN. As the + `box_outputs` produces `num_classes` boxes for each RoI, the reference model + expands `box_targets` to match the shape of `box_outputs` and selects only + the target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py) # pylint: disable=line-too-long + Instead, this function selects the `box_outputs` by the `class_targets` so + that it doesn't expand `box_targets`. + + The box loss is smooth L1-loss on only positive samples of RoIs. + Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long + + Args: + box_outputs: a float tensor representing the box prediction for each box + with a shape of [batch_size, num_boxes, num_classes * 4]. + class_targets: a float tensor representing the class label for each box + with a shape of [batch_size, num_boxes]. + box_targets: a float tensor representing the box label for each box + with a shape of [batch_size, num_boxes, 4]. + + Returns: + box_loss: a scalar tensor representing total box regression loss. + """ + with tf.name_scope('fast_rcnn_loss'): + class_targets = tf.cast(class_targets, dtype=tf.int32) + + # Selects the box from `box_outputs` based on `class_targets`, with which + # the box has the maximum overlap. + (batch_size, num_rois, + num_class_specific_boxes) = box_outputs.get_shape().as_list() + num_classes = num_class_specific_boxes // 4 + box_outputs = tf.reshape(box_outputs, + [batch_size, num_rois, num_classes, 4]) + + box_indices = tf.reshape( + class_targets + tf.tile( + tf.expand_dims( + tf.range(batch_size) * num_rois * num_classes, 1), + [1, num_rois]) + tf.tile( + tf.expand_dims(tf.range(num_rois) * num_classes, 0), + [batch_size, 1]), [-1]) + + box_outputs = tf.matmul( + tf.one_hot( + box_indices, + batch_size * num_rois * num_classes, + dtype=box_outputs.dtype), tf.reshape(box_outputs, [-1, 4])) + box_outputs = tf.reshape(box_outputs, [batch_size, -1, 4]) + + return self._fast_rcnn_box_loss(box_outputs, box_targets, class_targets) + + def _fast_rcnn_box_loss(self, box_outputs, box_targets, class_targets, + normalizer=1.0): + """Computes box regression loss.""" + with tf.name_scope('fast_rcnn_box_loss'): + mask = tf.tile(tf.expand_dims(tf.greater(class_targets, 0), axis=2), + [1, 1, 4]) + mask = tf.cast(mask, dtype=tf.float32) + box_targets = tf.expand_dims(box_targets, axis=-1) + box_outputs = tf.expand_dims(box_outputs, axis=-1) + box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask) + # The loss is normalized by the number of ones in mask, + # additianal normalizer provided by the user and using 0.01 here to avoid + # division by 0. + box_loss /= normalizer * (tf.reduce_sum(mask) + 0.01) + return box_loss + + +class MaskrcnnLoss(object): + """Mask R-CNN instance segmentation mask loss function.""" + + def __init__(self): + self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy( + reduction=tf.keras.losses.Reduction.SUM, from_logits=True) + + def __call__(self, mask_outputs, mask_targets, select_class_targets): + """Computes the mask loss of Mask-RCNN. + + This function implements the mask loss of Mask-RCNN. As the `mask_outputs` + produces `num_classes` masks for each RoI, the reference model expands + `mask_targets` to match the shape of `mask_outputs` and selects only the + target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py) # pylint: disable=line-too-long + Instead, this implementation selects the `mask_outputs` by the `class_targets` + so that it doesn't expand `mask_targets`. Note that the selection logic is + done in the post-processing of mask_rcnn_fn in mask_rcnn_architecture.py. + + Args: + mask_outputs: a float tensor representing the prediction for each mask, + with a shape of + [batch_size, num_masks, mask_height, mask_width]. + mask_targets: a float tensor representing the binary mask of ground truth + labels for each mask with a shape of + [batch_size, num_masks, mask_height, mask_width]. + select_class_targets: a tensor with a shape of [batch_size, num_masks], + representing the foreground mask targets. + + Returns: + mask_loss: a float tensor representing total mask loss. + """ + with tf.name_scope('mask_rcnn_loss'): + (batch_size, num_masks, mask_height, + mask_width) = mask_outputs.get_shape().as_list() + + weights = tf.tile( + tf.reshape(tf.greater(select_class_targets, 0), + [batch_size, num_masks, 1, 1]), + [1, 1, mask_height, mask_width]) + weights = tf.cast(weights, dtype=tf.float32) + + mask_targets = tf.expand_dims(mask_targets, axis=-1) + mask_outputs = tf.expand_dims(mask_outputs, axis=-1) + mask_loss = self._binary_crossentropy(mask_targets, mask_outputs, + sample_weight=weights) + + # The loss is normalized by the number of 1's in weights and + # + 0.01 is used to avoid division by zero. + return mask_loss / (tf.reduce_sum(weights) + 0.01) + + +class RetinanetClassLoss(object): + """RetinaNet class loss.""" + + def __init__(self, params, num_classes): + self._num_classes = num_classes + self._focal_loss_alpha = params.focal_loss_alpha + self._focal_loss_gamma = params.focal_loss_gamma + + def __call__(self, cls_outputs, labels, num_positives): + """Computes total detection loss. + + Computes total detection loss including box and class loss from all levels. + + Args: + cls_outputs: an OrderDict with keys representing levels and values + representing logits in [batch_size, height, width, + num_anchors * num_classes]. + labels: the dictionary that returned from dataloader that includes + class groundturth targets. + num_positives: number of positive examples in the minibatch. + + Returns: + an integar tensor representing total class loss. + """ + # Sums all positives in a batch for normalization and avoids zero + # num_positives_sum, which would lead to inf loss during training + num_positives_sum = tf.reduce_sum(input_tensor=num_positives) + 1.0 + + cls_losses = [] + for level in cls_outputs.keys(): + cls_losses.append(self.class_loss( + cls_outputs[level], labels[level], num_positives_sum)) + # Sums per level losses to total loss. + return tf.add_n(cls_losses) + + def class_loss(self, cls_outputs, cls_targets, num_positives, + ignore_label=-2): + """Computes RetinaNet classification loss.""" + # Onehot encoding for classification labels. + cls_targets_one_hot = tf.one_hot(cls_targets, self._num_classes) + bs, height, width, _, _ = cls_targets_one_hot.get_shape().as_list() + cls_targets_one_hot = tf.reshape(cls_targets_one_hot, + [bs, height, width, -1]) + loss = focal_loss(tf.cast(cls_outputs, dtype=tf.float32), + tf.cast(cls_targets_one_hot, dtype=tf.float32), + self._focal_loss_alpha, + self._focal_loss_gamma, + num_positives) + + ignore_loss = tf.where( + tf.equal(cls_targets, ignore_label), + tf.zeros_like(cls_targets, dtype=tf.float32), + tf.ones_like(cls_targets, dtype=tf.float32), + ) + ignore_loss = tf.expand_dims(ignore_loss, -1) + ignore_loss = tf.tile(ignore_loss, [1, 1, 1, 1, self._num_classes]) + ignore_loss = tf.reshape(ignore_loss, tf.shape(input=loss)) + return tf.reduce_sum(input_tensor=ignore_loss * loss) + + +class RetinanetBoxLoss(object): + """RetinaNet box loss.""" + + def __init__(self, params): + self._huber_loss = tf.keras.losses.Huber( + delta=params.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM) + + def __call__(self, box_outputs, labels, num_positives): + """Computes box detection loss. + + Computes total detection loss including box and class loss from all levels. + + Args: + box_outputs: an OrderDict with keys representing levels and values + representing box regression targets in [batch_size, height, width, + num_anchors * 4]. + labels: the dictionary that returned from dataloader that includes + box groundturth targets. + num_positives: number of positive examples in the minibatch. + + Returns: + an integar tensor representing total box regression loss. + """ + # Sums all positives in a batch for normalization and avoids zero + # num_positives_sum, which would lead to inf loss during training + num_positives_sum = tf.reduce_sum(input_tensor=num_positives) + 1.0 + + box_losses = [] + for level in box_outputs.keys(): + # Onehot encoding for classification labels. + box_targets_l = labels[level] + box_losses.append( + self.box_loss(box_outputs[level], box_targets_l, num_positives_sum)) + # Sums per level losses to total loss. + return tf.add_n(box_losses) + + def box_loss(self, box_outputs, box_targets, num_positives): + """Computes RetinaNet box regression loss.""" + # The delta is typically around the mean value of regression target. + # for instances, the regression targets of 512x512 input with 6 anchors on + # P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2]. + normalizer = num_positives * 4.0 + mask = tf.cast(tf.not_equal(box_targets, 0.0), dtype=tf.float32) + box_targets = tf.expand_dims(box_targets, axis=-1) + box_outputs = tf.expand_dims(box_outputs, axis=-1) + box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask) + box_loss /= normalizer + return box_loss + + +class ShapemaskMseLoss(object): + """ShapeMask mask Mean Squared Error loss function wrapper.""" + + def __call__(self, probs, labels, valid_mask): + """Compute instance segmentation loss. + + Args: + probs: A Tensor of shape [batch_size * num_points, height, width, + num_classes]. The logits are not necessarily between 0 and 1. + labels: A float32/float16 Tensor of shape [batch_size, num_instances, + mask_size, mask_size], where mask_size = + mask_crop_size * gt_upsample_scale for fine mask, or mask_crop_size + for coarse masks and shape priors. + valid_mask: a binary mask indicating valid training masks. + + Returns: + loss: an float tensor representing total mask classification loss. + """ + with tf.name_scope('shapemask_prior_loss'): + batch_size, num_instances = valid_mask.get_shape().as_list()[:2] + diff = (tf.cast(labels, dtype=tf.float32) - + tf.cast(probs, dtype=tf.float32)) + diff *= tf.cast( + tf.reshape(valid_mask, [batch_size, num_instances, 1, 1]), + tf.float32) + # Adding 0.001 in the denominator to avoid division by zero. + loss = tf.nn.l2_loss(diff) / (tf.reduce_sum(labels) + 0.001) + return loss + + +class ShapemaskLoss(object): + """ShapeMask mask loss function wrapper.""" + + def __init__(self): + self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy( + reduction=tf.keras.losses.Reduction.SUM, from_logits=True) + + def __call__(self, logits, labels, valid_mask): + """ShapeMask mask cross entropy loss function wrapper. + + Args: + logits: A Tensor of shape [batch_size * num_instances, height, width, + num_classes]. The logits are not necessarily between 0 and 1. + labels: A float16/float32 Tensor of shape [batch_size, num_instances, + mask_size, mask_size], where mask_size = + mask_crop_size * gt_upsample_scale for fine mask, or mask_crop_size + for coarse masks and shape priors. + valid_mask: a binary mask of shape [batch_size, num_instances] + indicating valid training masks. + Returns: + loss: an float tensor representing total mask classification loss. + """ + with tf.name_scope('shapemask_loss'): + batch_size, num_instances = valid_mask.get_shape().as_list()[:2] + labels = tf.cast(labels, tf.float32) + logits = tf.cast(logits, tf.float32) + loss = self._binary_crossentropy(labels, logits) + loss *= tf.cast(tf.reshape( + valid_mask, [batch_size, num_instances, 1, 1]), loss.dtype) + # Adding 0.001 in the denominator to avoid division by zero. + loss = tf.reduce_sum(loss) / (tf.reduce_sum(labels) + 0.001) + return loss diff --git a/models/official/vision/detection/modeling/maskrcnn_model.py b/models/official/vision/detection/modeling/maskrcnn_model.py new file mode 100644 index 0000000000000000000000000000000000000000..e5cbe7d56ba7d82836ef58df201aa74779cb2f69 --- /dev/null +++ b/models/official/vision/detection/modeling/maskrcnn_model.py @@ -0,0 +1,344 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model defination for the Mask R-CNN Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import backend +from official.vision.detection.dataloader import anchor +from official.vision.detection.dataloader import mode_keys +from official.vision.detection.evaluation import factory as eval_factory +from official.vision.detection.modeling import base_model +from official.vision.detection.modeling import losses +from official.vision.detection.modeling.architecture import factory +from official.vision.detection.ops import postprocess_ops +from official.vision.detection.ops import roi_ops +from official.vision.detection.ops import spatial_transform_ops +from official.vision.detection.ops import target_ops +from official.vision.detection.utils import box_utils + + +class MaskrcnnModel(base_model.Model): + """Mask R-CNN model function.""" + + def __init__(self, params): + super(MaskrcnnModel, self).__init__(params) + + # For eval metrics. + self._params = params + self._keras_model = None + + self._include_mask = params.architecture.include_mask + + # Architecture generators. + self._backbone_fn = factory.backbone_generator(params) + self._fpn_fn = factory.multilevel_features_generator(params) + self._rpn_head_fn = factory.rpn_head_generator(params) + self._generate_rois_fn = roi_ops.ROIGenerator(params.roi_proposal) + self._sample_rois_fn = target_ops.ROISampler(params.roi_sampling) + self._sample_masks_fn = target_ops.MaskSampler( + params.architecture.mask_target_size, + params.mask_sampling.num_mask_samples_per_image) + + self._frcnn_head_fn = factory.fast_rcnn_head_generator(params) + if self._include_mask: + self._mrcnn_head_fn = factory.mask_rcnn_head_generator(params) + + # Loss function. + self._rpn_score_loss_fn = losses.RpnScoreLoss(params.rpn_score_loss) + self._rpn_box_loss_fn = losses.RpnBoxLoss(params.rpn_box_loss) + self._frcnn_class_loss_fn = losses.FastrcnnClassLoss() + self._frcnn_box_loss_fn = losses.FastrcnnBoxLoss(params.frcnn_box_loss) + if self._include_mask: + self._mask_loss_fn = losses.MaskrcnnLoss() + + self._generate_detections_fn = postprocess_ops.GenericDetectionGenerator( + params.postprocess) + + self._transpose_input = params.train.transpose_input + assert not self._transpose_input, 'Transpose input is not supportted.' + + def build_outputs(self, inputs, mode): + is_training = mode == mode_keys.TRAIN + model_outputs = {} + + image = inputs['image'] + _, image_height, image_width, _ = image.get_shape().as_list() + backbone_features = self._backbone_fn(image, is_training) + fpn_features = self._fpn_fn(backbone_features, is_training) + + rpn_score_outputs, rpn_box_outputs = self._rpn_head_fn( + fpn_features, is_training) + model_outputs.update({ + 'rpn_score_outputs': + tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), + rpn_score_outputs), + 'rpn_box_outputs': + tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), + rpn_box_outputs), + }) + input_anchor = anchor.Anchor(self._params.architecture.min_level, + self._params.architecture.max_level, + self._params.anchor.num_scales, + self._params.anchor.aspect_ratios, + self._params.anchor.anchor_size, + (image_height, image_width)) + rpn_rois, _ = self._generate_rois_fn(rpn_box_outputs, rpn_score_outputs, + input_anchor.multilevel_boxes, + inputs['image_info'][:, 1, :], + is_training) + if is_training: + rpn_rois = tf.stop_gradient(rpn_rois) + + # Sample proposals. + rpn_rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices = ( + self._sample_rois_fn(rpn_rois, inputs['gt_boxes'], + inputs['gt_classes'])) + + # Create bounding box training targets. + box_targets = box_utils.encode_boxes( + matched_gt_boxes, rpn_rois, weights=[10.0, 10.0, 5.0, 5.0]) + # If the target is background, the box target is set to all 0s. + box_targets = tf.where( + tf.tile( + tf.expand_dims(tf.equal(matched_gt_classes, 0), axis=-1), + [1, 1, 4]), + tf.zeros_like(box_targets), + box_targets) + model_outputs.update({ + 'class_targets': matched_gt_classes, + 'box_targets': box_targets, + }) + + roi_features = spatial_transform_ops.multilevel_crop_and_resize( + fpn_features, rpn_rois, output_size=7) + + class_outputs, box_outputs = self._frcnn_head_fn(roi_features, is_training) + + model_outputs.update({ + 'class_outputs': + tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), + class_outputs), + 'box_outputs': + tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), + box_outputs), + }) + + # Add this output to train to make the checkpoint loadable in predict mode. + # If we skip it in train mode, the heads will be out-of-order and checkpoint + # loading will fail. + boxes, scores, classes, valid_detections = self._generate_detections_fn( + box_outputs, class_outputs, rpn_rois, inputs['image_info'][:, 1:2, :]) + model_outputs.update({ + 'num_detections': valid_detections, + 'detection_boxes': boxes, + 'detection_classes': classes, + 'detection_scores': scores, + }) + + if not self._include_mask: + return model_outputs + + if is_training: + rpn_rois, classes, mask_targets = self._sample_masks_fn( + rpn_rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices, + inputs['gt_masks']) + mask_targets = tf.stop_gradient(mask_targets) + + classes = tf.cast(classes, dtype=tf.int32) + + model_outputs.update({ + 'mask_targets': mask_targets, + 'sampled_class_targets': classes, + }) + else: + rpn_rois = boxes + classes = tf.cast(classes, dtype=tf.int32) + + mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize( + fpn_features, rpn_rois, output_size=14) + + mask_outputs = self._mrcnn_head_fn(mask_roi_features, classes, is_training) + + if is_training: + model_outputs.update({ + 'mask_outputs': + tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), + mask_outputs), + }) + else: + model_outputs.update({ + 'detection_masks': tf.nn.sigmoid(mask_outputs) + }) + + return model_outputs + + def build_loss_fn(self): + if self._keras_model is None: + raise ValueError('build_loss_fn() must be called after build_model().') + + filter_fn = self.make_filter_trainable_variables_fn() + trainable_variables = filter_fn(self._keras_model.trainable_variables) + + def _total_loss_fn(labels, outputs): + rpn_score_loss = self._rpn_score_loss_fn(outputs['rpn_score_outputs'], + labels['rpn_score_targets']) + rpn_box_loss = self._rpn_box_loss_fn(outputs['rpn_box_outputs'], + labels['rpn_box_targets']) + + frcnn_class_loss = self._frcnn_class_loss_fn(outputs['class_outputs'], + outputs['class_targets']) + frcnn_box_loss = self._frcnn_box_loss_fn(outputs['box_outputs'], + outputs['class_targets'], + outputs['box_targets']) + + if self._include_mask: + mask_loss = self._mask_loss_fn(outputs['mask_outputs'], + outputs['mask_targets'], + outputs['sampled_class_targets']) + else: + mask_loss = 0.0 + + model_loss = ( + rpn_score_loss + rpn_box_loss + frcnn_class_loss + frcnn_box_loss + + mask_loss) + + l2_regularization_loss = self.weight_decay_loss(trainable_variables) + total_loss = model_loss + l2_regularization_loss + return { + 'total_loss': total_loss, + 'loss': total_loss, + 'fast_rcnn_class_loss': frcnn_class_loss, + 'fast_rcnn_box_loss': frcnn_box_loss, + 'mask_loss': mask_loss, + 'model_loss': model_loss, + 'l2_regularization_loss': l2_regularization_loss, + 'rpn_score_loss': rpn_score_loss, + 'rpn_box_loss': rpn_box_loss, + } + + return _total_loss_fn + + def build_input_layers(self, params, mode): + is_training = mode == mode_keys.TRAIN + input_shape = ( + params.maskrcnn_parser.output_size + + [params.maskrcnn_parser.num_channels]) + if is_training: + batch_size = params.train.batch_size + input_layer = { + 'image': + tf.keras.layers.Input( + shape=input_shape, + batch_size=batch_size, + name='image', + dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32), + 'image_info': + tf.keras.layers.Input( + shape=[4, 2], + batch_size=batch_size, + name='image_info', + ), + 'gt_boxes': + tf.keras.layers.Input( + shape=[params.maskrcnn_parser.max_num_instances, 4], + batch_size=batch_size, + name='gt_boxes'), + 'gt_classes': + tf.keras.layers.Input( + shape=[params.maskrcnn_parser.max_num_instances], + batch_size=batch_size, + name='gt_classes', + dtype=tf.int64), + } + if self._include_mask: + input_layer['gt_masks'] = tf.keras.layers.Input( + shape=[ + params.maskrcnn_parser.max_num_instances, + params.maskrcnn_parser.mask_crop_size, + params.maskrcnn_parser.mask_crop_size + ], + batch_size=batch_size, + name='gt_masks') + else: + batch_size = params.eval.batch_size + input_layer = { + 'image': + tf.keras.layers.Input( + shape=input_shape, + batch_size=batch_size, + name='image', + dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32), + 'image_info': + tf.keras.layers.Input( + shape=[4, 2], + batch_size=batch_size, + name='image_info', + ), + } + return input_layer + + def build_model(self, params, mode): + if self._keras_model is None: + input_layers = self.build_input_layers(self._params, mode) + with backend.get_graph().as_default(): + outputs = self.model_outputs(input_layers, mode) + + model = tf.keras.models.Model( + inputs=input_layers, outputs=outputs, name='maskrcnn') + assert model is not None, 'Fail to build tf.keras.Model.' + model.optimizer = self.build_optimizer() + self._keras_model = model + + return self._keras_model + + def post_processing(self, labels, outputs): + required_output_fields = ['class_outputs', 'box_outputs'] + for field in required_output_fields: + if field not in outputs: + raise ValueError('"%s" is missing in outputs, requried %s found %s' + %(field, required_output_fields, outputs.keys())) + predictions = { + 'image_info': labels['image_info'], + 'num_detections': outputs['num_detections'], + 'detection_boxes': outputs['detection_boxes'], + 'detection_classes': outputs['detection_classes'], + 'detection_scores': outputs['detection_scores'], + } + if self._include_mask: + predictions.update({ + 'detection_masks': outputs['detection_masks'], + }) + + if 'groundtruths' in labels: + predictions['source_id'] = labels['groundtruths']['source_id'] + predictions['gt_source_id'] = labels['groundtruths']['source_id'] + predictions['gt_height'] = labels['groundtruths']['height'] + predictions['gt_width'] = labels['groundtruths']['width'] + predictions['gt_image_info'] = labels['image_info'] + predictions['gt_num_detections'] = ( + labels['groundtruths']['num_detections']) + predictions['gt_boxes'] = labels['groundtruths']['boxes'] + predictions['gt_classes'] = labels['groundtruths']['classes'] + predictions['gt_areas'] = labels['groundtruths']['areas'] + predictions['gt_is_crowds'] = labels['groundtruths']['is_crowds'] + return labels, predictions + + def eval_metrics(self): + return eval_factory.evaluator_generator(self._params.eval) diff --git a/models/official/vision/detection/modeling/optimizers.py b/models/official/vision/detection/modeling/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..fd51bb59f579b3de027cba26ef3bee0e67d0c74f --- /dev/null +++ b/models/official/vision/detection/modeling/optimizers.py @@ -0,0 +1,50 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Optimizers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np +import tensorflow as tf + + +class OptimizerFactory(object): + """Class to generate optimizer function.""" + + def __init__(self, params): + """Creates optimized based on the specified flags.""" + if params.type == 'momentum': + self._optimizer = functools.partial( + tf.keras.optimizers.SGD, + momentum=params.momentum, + nesterov=params.nesterov) + elif params.type == 'adam': + self._optimizer = tf.keras.optimizers.Adam + elif params.type == 'adadelta': + self._optimizer = tf.keras.optimizers.Adadelta + elif params.type == 'adagrad': + self._optimizer = tf.keras.optimizers.Adagrad + elif params.type == 'rmsprop': + self._optimizer = functools.partial( + tf.keras.optimizers.RMSprop, momentum=params.momentum) + else: + raise ValueError('Unsupported optimizer type `{}`.'.format(params.type)) + + def __call__(self, learning_rate): + return self._optimizer(learning_rate=learning_rate) diff --git a/models/official/vision/detection/modeling/retinanet_model.py b/models/official/vision/detection/modeling/retinanet_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ff299674f0044cd208a1657a962d133744b78b77 --- /dev/null +++ b/models/official/vision/detection/modeling/retinanet_model.py @@ -0,0 +1,170 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model defination for the RetinaNet Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import backend +from official.vision.detection.dataloader import mode_keys +from official.vision.detection.evaluation import factory as eval_factory +from official.vision.detection.modeling import base_model +from official.vision.detection.modeling import losses +from official.vision.detection.modeling.architecture import factory +from official.vision.detection.ops import postprocess_ops + + +class RetinanetModel(base_model.Model): + """RetinaNet model function.""" + + def __init__(self, params): + super(RetinanetModel, self).__init__(params) + + # For eval metrics. + self._params = params + + # Architecture generators. + self._backbone_fn = factory.backbone_generator(params) + self._fpn_fn = factory.multilevel_features_generator(params) + self._head_fn = factory.retinanet_head_generator(params) + + # Loss function. + self._cls_loss_fn = losses.RetinanetClassLoss( + params.retinanet_loss, params.architecture.num_classes) + self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss) + self._box_loss_weight = params.retinanet_loss.box_loss_weight + self._keras_model = None + + # Predict function. + self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator( + params.architecture.min_level, + params.architecture.max_level, + params.postprocess) + + self._transpose_input = params.train.transpose_input + assert not self._transpose_input, 'Transpose input is not supportted.' + # Input layer. + input_shape = ( + params.retinanet_parser.output_size + + [params.retinanet_parser.num_channels]) + self._input_layer = tf.keras.layers.Input( + shape=input_shape, name='', + dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32) + + def build_outputs(self, inputs, mode): + # If the input image is transposed (from NHWC to HWCN), we need to revert it + # back to the original shape before it's used in the computation. + if self._transpose_input: + inputs = tf.transpose(inputs, [3, 0, 1, 2]) + + backbone_features = self._backbone_fn( + inputs, is_training=(mode == mode_keys.TRAIN)) + fpn_features = self._fpn_fn( + backbone_features, is_training=(mode == mode_keys.TRAIN)) + cls_outputs, box_outputs = self._head_fn( + fpn_features, is_training=(mode == mode_keys.TRAIN)) + + if self._use_bfloat16: + levels = cls_outputs.keys() + for level in levels: + cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32) + box_outputs[level] = tf.cast(box_outputs[level], tf.float32) + + model_outputs = { + 'cls_outputs': cls_outputs, + 'box_outputs': box_outputs, + } + return model_outputs + + def build_loss_fn(self): + if self._keras_model is None: + raise ValueError('build_loss_fn() must be called after build_model().') + + filter_fn = self.make_filter_trainable_variables_fn() + trainable_variables = filter_fn(self._keras_model.trainable_variables) + + def _total_loss_fn(labels, outputs): + cls_loss = self._cls_loss_fn(outputs['cls_outputs'], + labels['cls_targets'], + labels['num_positives']) + box_loss = self._box_loss_fn(outputs['box_outputs'], + labels['box_targets'], + labels['num_positives']) + model_loss = cls_loss + self._box_loss_weight * box_loss + l2_regularization_loss = self.weight_decay_loss(trainable_variables) + total_loss = model_loss + l2_regularization_loss + return { + 'total_loss': total_loss, + 'cls_loss': cls_loss, + 'box_loss': box_loss, + 'model_loss': model_loss, + 'l2_regularization_loss': l2_regularization_loss, + } + + return _total_loss_fn + + def build_model(self, params, mode=None): + if self._keras_model is None: + with backend.get_graph().as_default(): + outputs = self.model_outputs(self._input_layer, mode) + + model = tf.keras.models.Model( + inputs=self._input_layer, outputs=outputs, name='retinanet') + assert model is not None, 'Fail to build tf.keras.Model.' + model.optimizer = self.build_optimizer() + self._keras_model = model + + return self._keras_model + + def post_processing(self, labels, outputs): + # TODO(yeqing): Moves the output related part into build_outputs. + required_output_fields = ['cls_outputs', 'box_outputs'] + for field in required_output_fields: + if field not in outputs: + raise ValueError('"%s" is missing in outputs, requried %s found %s', + field, required_output_fields, outputs.keys()) + required_label_fields = ['image_info', 'groundtruths'] + for field in required_label_fields: + if field not in labels: + raise ValueError('"%s" is missing in outputs, requried %s found %s', + field, required_label_fields, labels.keys()) + boxes, scores, classes, valid_detections = self._generate_detections_fn( + outputs['box_outputs'], outputs['cls_outputs'], + labels['anchor_boxes'], labels['image_info'][:, 1:2, :]) + # Discards the old output tensors to save memory. The `cls_outputs` and + # `box_outputs` are pretty big and could potentiall lead to memory issue. + outputs = { + 'source_id': labels['groundtruths']['source_id'], + 'image_info': labels['image_info'], + 'num_detections': valid_detections, + 'detection_boxes': boxes, + 'detection_classes': classes, + 'detection_scores': scores, + } + + if 'groundtruths' in labels: + labels['source_id'] = labels['groundtruths']['source_id'] + labels['boxes'] = labels['groundtruths']['boxes'] + labels['classes'] = labels['groundtruths']['classes'] + labels['areas'] = labels['groundtruths']['areas'] + labels['is_crowds'] = labels['groundtruths']['is_crowds'] + + return labels, outputs + + def eval_metrics(self): + return eval_factory.evaluator_generator(self._params.eval) diff --git a/models/official/vision/detection/modeling/shapemask_model.py b/models/official/vision/detection/modeling/shapemask_model.py new file mode 100644 index 0000000000000000000000000000000000000000..174187ed02ae7a7617f259974d64b1906a3d16e0 --- /dev/null +++ b/models/official/vision/detection/modeling/shapemask_model.py @@ -0,0 +1,314 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model definition for the ShapeMask Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import backend +from official.vision.detection.dataloader import anchor +from official.vision.detection.dataloader import mode_keys +from official.vision.detection.evaluation import factory as eval_factory +from official.vision.detection.modeling import base_model +from official.vision.detection.modeling import losses +from official.vision.detection.modeling.architecture import factory +from official.vision.detection.ops import postprocess_ops +from official.vision.detection.utils import box_utils + + +class ShapeMaskModel(base_model.Model): + """ShapeMask model function.""" + + def __init__(self, params): + super(ShapeMaskModel, self).__init__(params) + + self._params = params + self._keras_model = None + + # Architecture generators. + self._backbone_fn = factory.backbone_generator(params) + self._fpn_fn = factory.multilevel_features_generator(params) + self._retinanet_head_fn = factory.retinanet_head_generator(params) + self._shape_prior_head_fn = factory.shapeprior_head_generator(params) + self._coarse_mask_fn = factory.coarsemask_head_generator(params) + self._fine_mask_fn = factory.finemask_head_generator(params) + + # Loss functions. + self._cls_loss_fn = losses.RetinanetClassLoss( + params.retinanet_loss, params.architecture.num_classes) + self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss) + self._box_loss_weight = params.retinanet_loss.box_loss_weight + + # Mask loss function. + self._shapemask_prior_loss_fn = losses.ShapemaskMseLoss() + self._shapemask_loss_fn = losses.ShapemaskLoss() + self._shape_prior_loss_weight = ( + params.shapemask_loss.shape_prior_loss_weight) + self._coarse_mask_loss_weight = ( + params.shapemask_loss.coarse_mask_loss_weight) + self._fine_mask_loss_weight = ( + params.shapemask_loss.fine_mask_loss_weight) + + # Predict function. + self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator( + params.architecture.min_level, + params.architecture.max_level, + params.postprocess) + + def build_outputs(self, inputs, mode): + is_training = mode == mode_keys.TRAIN + images = inputs['image'] + + if 'anchor_boxes' in inputs: + anchor_boxes = inputs['anchor_boxes'] + else: + anchor_boxes = anchor.Anchor( + self._params.architecture.min_level, + self._params.architecture.max_level, + self._params.anchor.num_scales, + self._params.anchor.aspect_ratios, + self._params.anchor.anchor_size, + images.get_shape().as_list()[1:3]).multilevel_boxes + + batch_size = tf.shape(images)[0] + for level in anchor_boxes: + anchor_boxes[level] = tf.tile( + tf.expand_dims(anchor_boxes[level], 0), [batch_size, 1, 1, 1]) + + backbone_features = self._backbone_fn(images, is_training=is_training) + fpn_features = self._fpn_fn(backbone_features, is_training=is_training) + cls_outputs, box_outputs = self._retinanet_head_fn( + fpn_features, is_training=is_training) + + valid_boxes, valid_scores, valid_classes, valid_detections = ( + self._generate_detections_fn(box_outputs, cls_outputs, + anchor_boxes, + inputs['image_info'][:, 1:2, :])) + + image_size = images.get_shape().as_list()[1:3] + valid_outer_boxes = box_utils.compute_outer_boxes( + tf.reshape(valid_boxes, [-1, 4]), + image_size, + scale=self._params.shapemask_parser.outer_box_scale) + valid_outer_boxes = tf.reshape(valid_outer_boxes, tf.shape(valid_boxes)) + + # Wrapping if else code paths into a layer to make the checkpoint loadable + # in prediction mode. + class SampledBoxesLayer(tf.keras.layers.Layer): + """ShapeMask model function.""" + + def call(self, inputs, val_boxes, val_classes, val_outer_boxes, training): + if training: + boxes = inputs['mask_boxes'] + outer_boxes = inputs['mask_outer_boxes'] + classes = inputs['mask_classes'] + else: + boxes = val_boxes + classes = val_classes + outer_boxes = val_outer_boxes + return boxes, classes, outer_boxes + + boxes, classes, outer_boxes = SampledBoxesLayer()( + inputs, valid_boxes, valid_classes, + valid_outer_boxes, training=is_training) + + instance_features, prior_masks = self._shape_prior_head_fn(fpn_features, + boxes, + outer_boxes, + classes, + is_training) + coarse_mask_logits = self._coarse_mask_fn(instance_features, + prior_masks, + classes, + is_training) + fine_mask_logits = self._fine_mask_fn(instance_features, + coarse_mask_logits, + classes, + is_training) + + model_outputs = { + 'cls_outputs': cls_outputs, + 'box_outputs': box_outputs, + 'fine_mask_logits': fine_mask_logits, + 'coarse_mask_logits': coarse_mask_logits, + 'prior_masks': prior_masks, + } + + if not is_training: + model_outputs.update({ + 'num_detections': valid_detections, + 'detection_boxes': valid_boxes, + 'detection_outer_boxes': valid_outer_boxes, + 'detection_masks': fine_mask_logits, + 'detection_classes': valid_classes, + 'detection_scores': valid_scores, + }) + + return model_outputs + + def build_loss_fn(self): + if self._keras_model is None: + raise ValueError('build_loss_fn() must be called after build_model().') + + filter_fn = self.make_filter_trainable_variables_fn() + trainable_variables = filter_fn(self._keras_model.trainable_variables) + + def _total_loss_fn(labels, outputs): + cls_loss = self._cls_loss_fn(outputs['cls_outputs'], + labels['cls_targets'], + labels['num_positives']) + box_loss = self._box_loss_fn(outputs['box_outputs'], + labels['box_targets'], + labels['num_positives']) + + # Adds Shapemask model losses. + shape_prior_loss = self._shapemask_prior_loss_fn( + outputs['prior_masks'], + labels['mask_targets'], + labels['mask_is_valid']) + coarse_mask_loss = self._shapemask_loss_fn( + outputs['coarse_mask_logits'], + labels['mask_targets'], + labels['mask_is_valid']) + fine_mask_loss = self._shapemask_loss_fn( + outputs['fine_mask_logits'], + labels['fine_mask_targets'], + labels['mask_is_valid']) + + model_loss = ( + cls_loss + self._box_loss_weight * box_loss + + shape_prior_loss * self._shape_prior_loss_weight + + coarse_mask_loss * self._coarse_mask_loss_weight + + fine_mask_loss * self._fine_mask_loss_weight) + + l2_regularization_loss = self.weight_decay_loss(trainable_variables) + total_loss = model_loss + l2_regularization_loss + + shapemask_losses = { + 'total_loss': total_loss, + 'loss': total_loss, + 'retinanet_cls_loss': cls_loss, + 'l2_regularization_loss': l2_regularization_loss, + 'retinanet_box_loss': box_loss, + 'shapemask_prior_loss': shape_prior_loss, + 'shapemask_coarse_mask_loss': coarse_mask_loss, + 'shapemask_fine_mask_loss': fine_mask_loss, + 'model_loss': model_loss, + } + return shapemask_losses + + return _total_loss_fn + + def build_input_layers(self, params, mode): + is_training = mode == mode_keys.TRAIN + input_shape = ( + params.shapemask_parser.output_size + + [params.shapemask_parser.num_channels]) + if is_training: + batch_size = params.train.batch_size + input_layer = { + 'image': tf.keras.layers.Input( + shape=input_shape, + batch_size=batch_size, + name='image', + dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32), + 'image_info': tf.keras.layers.Input( + shape=[4, 2], + batch_size=batch_size, + name='image_info'), + 'mask_classes': tf.keras.layers.Input( + shape=[params.shapemask_parser.num_sampled_masks], + batch_size=batch_size, + name='mask_classes', + dtype=tf.int64), + 'mask_outer_boxes': tf.keras.layers.Input( + shape=[params.shapemask_parser.num_sampled_masks, 4], + batch_size=batch_size, + name='mask_outer_boxes', + dtype=tf.float32), + 'mask_boxes': tf.keras.layers.Input( + shape=[params.shapemask_parser.num_sampled_masks, 4], + batch_size=batch_size, + name='mask_boxes', + dtype=tf.float32), + } + else: + batch_size = params.eval.batch_size + input_layer = { + 'image': tf.keras.layers.Input( + shape=input_shape, + batch_size=batch_size, + name='image', + dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32), + 'image_info': tf.keras.layers.Input( + shape=[4, 2], + batch_size=batch_size, + name='image_info'), + } + return input_layer + + def build_model(self, params, mode): + if self._keras_model is None: + input_layers = self.build_input_layers(self._params, mode) + with backend.get_graph().as_default(): + outputs = self.model_outputs(input_layers, mode) + + model = tf.keras.models.Model( + inputs=input_layers, outputs=outputs, name='shapemask') + assert model is not None, 'Fail to build tf.keras.Model.' + model.optimizer = self.build_optimizer() + self._keras_model = model + + return self._keras_model + + def post_processing(self, labels, outputs): + required_output_fields = ['num_detections', 'detection_boxes', + 'detection_classes', 'detection_masks', + 'detection_scores'] + + for field in required_output_fields: + if field not in outputs: + raise ValueError( + '"{}" is missing in outputs, requried {} found {}'.format( + field, required_output_fields, outputs.keys())) + + required_label_fields = ['image_info'] + for field in required_label_fields: + if field not in labels: + raise ValueError( + '"{}" is missing in labels, requried {} found {}'.format( + field, required_label_fields, labels.keys())) + + predictions = { + 'image_info': labels['image_info'], + 'num_detections': outputs['num_detections'], + 'detection_boxes': outputs['detection_boxes'], + 'detection_outer_boxes': outputs['detection_outer_boxes'], + 'detection_classes': outputs['detection_classes'], + 'detection_scores': outputs['detection_scores'], + 'detection_masks': outputs['detection_masks'], + } + + if 'groundtruths' in labels: + predictions['source_id'] = labels['groundtruths']['source_id'] + labels = labels['groundtruths'] + + return labels, predictions + + def eval_metrics(self): + return eval_factory.evaluator_generator(self._params.eval) diff --git a/models/official/vision/detection/ops/__init__.py b/models/official/vision/detection/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/ops/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/ops/nms.py b/models/official/vision/detection/ops/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..bc516e5991a824b1d2f8e0261750cde2481fda2f --- /dev/null +++ b/models/official/vision/detection/ops/nms.py @@ -0,0 +1,205 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensorflow implementation of non max suppression.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.vision.detection.utils import box_utils + + +NMS_TILE_SIZE = 512 + + +def _self_suppression(iou, _, iou_sum): + batch_size = tf.shape(iou)[0] + can_suppress_others = tf.cast( + tf.reshape(tf.reduce_max(iou, 1) <= 0.5, [batch_size, -1, 1]), iou.dtype) + iou_suppressed = tf.reshape( + tf.cast(tf.reduce_max(can_suppress_others * iou, 1) <= 0.5, iou.dtype), + [batch_size, -1, 1]) * iou + iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2]) + return [ + iou_suppressed, + tf.reduce_any(iou_sum - iou_sum_new > 0.5), iou_sum_new + ] + + +def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx): + batch_size = tf.shape(boxes)[0] + new_slice = tf.slice(boxes, [0, inner_idx * NMS_TILE_SIZE, 0], + [batch_size, NMS_TILE_SIZE, 4]) + iou = box_utils.bbox_overlap(new_slice, box_slice) + ret_slice = tf.expand_dims( + tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), + 2) * box_slice + return boxes, ret_slice, iou_threshold, inner_idx + 1 + + +def _suppression_loop_body(boxes, iou_threshold, output_size, idx): + """Process boxes in the range [idx*NMS_TILE_SIZE, (idx+1)*NMS_TILE_SIZE). + + Args: + boxes: a tensor with a shape of [batch_size, anchors, 4]. + iou_threshold: a float representing the threshold for deciding whether boxes + overlap too much with respect to IOU. + output_size: an int32 tensor of size [batch_size]. Representing the number + of selected boxes for each batch. + idx: an integer scalar representing induction variable. + + Returns: + boxes: updated boxes. + iou_threshold: pass down iou_threshold to the next iteration. + output_size: the updated output_size. + idx: the updated induction variable. + """ + num_tiles = tf.shape(boxes)[1] // NMS_TILE_SIZE + batch_size = tf.shape(boxes)[0] + + # Iterates over tiles that can possibly suppress the current tile. + box_slice = tf.slice(boxes, [0, idx * NMS_TILE_SIZE, 0], + [batch_size, NMS_TILE_SIZE, 4]) + _, box_slice, _, _ = tf.while_loop( + lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, + _cross_suppression, [boxes, box_slice, iou_threshold, + tf.constant(0)]) + + # Iterates over the current tile to compute self-suppression. + iou = box_utils.bbox_overlap(box_slice, box_slice) + mask = tf.expand_dims( + tf.reshape(tf.range(NMS_TILE_SIZE), [1, -1]) > tf.reshape( + tf.range(NMS_TILE_SIZE), [-1, 1]), 0) + iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype) + suppressed_iou, _, _ = tf.while_loop( + lambda _iou, loop_condition, _iou_sum: loop_condition, _self_suppression, + [iou, tf.constant(True), + tf.reduce_sum(iou, [1, 2])]) + suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0 + box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2) + + # Uses box_slice to update the input boxes. + mask = tf.reshape( + tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) + boxes = tf.tile(tf.expand_dims( + box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape( + boxes, [batch_size, num_tiles, NMS_TILE_SIZE, 4]) * (1 - mask) + boxes = tf.reshape(boxes, [batch_size, -1, 4]) + + # Updates output_size. + output_size += tf.reduce_sum( + tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1]) + return boxes, iou_threshold, output_size, idx + 1 + + +def sorted_non_max_suppression_padded(scores, + boxes, + max_output_size, + iou_threshold): + """A wrapper that handles non-maximum suppression. + + Assumption: + * The boxes are sorted by scores unless the box is a dot (all coordinates + are zero). + * Boxes with higher scores can be used to suppress boxes with lower scores. + + The overal design of the algorithm is to handle boxes tile-by-tile: + + boxes = boxes.pad_to_multiply_of(tile_size) + num_tiles = len(boxes) // tile_size + output_boxes = [] + for i in range(num_tiles): + box_tile = boxes[i*tile_size : (i+1)*tile_size] + for j in range(i - 1): + suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] + iou = bbox_overlap(box_tile, suppressing_tile) + # if the box is suppressed in iou, clear it to a dot + box_tile *= _update_boxes(iou) + # Iteratively handle the diagnal tile. + iou = _box_overlap(box_tile, box_tile) + iou_changed = True + while iou_changed: + # boxes that are not suppressed by anything else + suppressing_boxes = _get_suppressing_boxes(iou) + # boxes that are suppressed by suppressing_boxes + suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) + # clear iou to 0 for boxes that are suppressed, as they cannot be used + # to suppress other boxes any more + new_iou = _clear_iou(iou, suppressed_boxes) + iou_changed = (new_iou != iou) + iou = new_iou + # remaining boxes that can still suppress others, are selected boxes. + output_boxes.append(_get_suppressing_boxes(iou)) + if len(output_boxes) >= max_output_size: + break + + Args: + scores: a tensor with a shape of [batch_size, anchors]. + boxes: a tensor with a shape of [batch_size, anchors, 4]. + max_output_size: a scalar integer `Tensor` representing the maximum number + of boxes to be selected by non max suppression. + iou_threshold: a float representing the threshold for deciding whether boxes + overlap too much with respect to IOU. + + Returns: + nms_scores: a tensor with a shape of [batch_size, anchors]. It has same + dtype as input scores. + nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has + same dtype as input boxes. + """ + batch_size = tf.shape(boxes)[0] + num_boxes = tf.shape(boxes)[1] + pad = tf.cast( + tf.math.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE), + tf.int32) * NMS_TILE_SIZE - num_boxes + boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]]) + scores = tf.pad( + tf.cast(scores, tf.float32), [[0, 0], [0, pad]], constant_values=-1) + num_boxes += pad + + def _loop_cond(unused_boxes, unused_threshold, output_size, idx): + return tf.logical_and( + tf.reduce_min(output_size) < max_output_size, + idx < num_boxes // NMS_TILE_SIZE) + + selected_boxes, _, output_size, _ = tf.while_loop( + _loop_cond, _suppression_loop_body, [ + boxes, iou_threshold, + tf.zeros([batch_size], tf.int32), + tf.constant(0) + ]) + idx = num_boxes - tf.cast( + tf.nn.top_k( + tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) * + tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0], + tf.int32) + idx = tf.minimum(idx, num_boxes - 1) + idx = tf.reshape( + idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1]) + boxes = tf.reshape( + tf.gather(tf.reshape(boxes, [-1, 4]), idx), + [batch_size, max_output_size, 4]) + boxes = boxes * tf.cast( + tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape( + output_size, [-1, 1, 1]), boxes.dtype) + scores = tf.reshape( + tf.gather(tf.reshape(scores, [-1, 1]), idx), + [batch_size, max_output_size]) + scores = scores * tf.cast( + tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape( + output_size, [-1, 1]), scores.dtype) + return scores, boxes diff --git a/models/official/vision/detection/ops/postprocess_ops.py b/models/official/vision/detection/ops/postprocess_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb06c34ab114d171f30cb52e69d8dc73996e302 --- /dev/null +++ b/models/official/vision/detection/ops/postprocess_ops.py @@ -0,0 +1,413 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Post-processing model outputs to generate detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import tensorflow as tf + +from official.vision.detection.ops import nms +from official.vision.detection.utils import box_utils + + +def generate_detections_factory(params): + """Factory to select function to generate detection.""" + if params.use_batched_nms: + func = functools.partial( + _generate_detections_batched, + max_total_size=params.max_total_size, + nms_iou_threshold=params.nms_iou_threshold, + score_threshold=params.score_threshold) + else: + func = functools.partial( + _generate_detections, + max_total_size=params.max_total_size, + nms_iou_threshold=params.nms_iou_threshold, + score_threshold=params.score_threshold, + pre_nms_num_boxes=params.pre_nms_num_boxes) + return func + + +def _select_top_k_scores(scores_in, pre_nms_num_detections): + """Select top_k scores and indices for each class. + + Args: + scores_in: a Tensor with shape [batch_size, N, num_classes], which stacks + class logit outputs on all feature levels. The N is the number of total + anchors on all levels. The num_classes is the number of classes predicted + by the model. + pre_nms_num_detections: Number of candidates before NMS. + + Returns: + scores and indices: Tensors with shape [batch_size, pre_nms_num_detections, + num_classes]. + """ + batch_size, num_anchors, num_class = scores_in.get_shape().as_list() + scores_trans = tf.transpose(scores_in, perm=[0, 2, 1]) + scores_trans = tf.reshape(scores_trans, [-1, num_anchors]) + + top_k_scores, top_k_indices = tf.nn.top_k( + scores_trans, k=pre_nms_num_detections, sorted=True) + + top_k_scores = tf.reshape(top_k_scores, + [batch_size, num_class, pre_nms_num_detections]) + top_k_indices = tf.reshape(top_k_indices, + [batch_size, num_class, pre_nms_num_detections]) + + return tf.transpose(top_k_scores, + [0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1]) + + +def _generate_detections(boxes, + scores, + max_total_size=100, + nms_iou_threshold=0.3, + score_threshold=0.05, + pre_nms_num_boxes=5000): + """Generate the final detections given the model outputs. + + This uses classes unrolling with while loop based NMS, could be parralled + at batch dimension. + + Args: + boxes: a tensor with shape [batch_size, N, num_classes, 4] or [batch_size, + N, 1, 4], which box predictions on all feature levels. The N is the number + of total anchors on all levels. + scores: a tensor with shape [batch_size, N, num_classes], which stacks class + probability on all feature levels. The N is the number of total anchors on + all levels. The num_classes is the number of classes predicted by the + model. Note that the class_outputs here is the raw score. + max_total_size: a scalar representing maximum number of boxes retained over + all classes. + nms_iou_threshold: a float representing the threshold for deciding whether + boxes overlap too much with respect to IOU. + score_threshold: a float representing the threshold for deciding when to + remove boxes based on score. + pre_nms_num_boxes: an int number of top candidate detections per class + before NMS. + + Returns: + nms_boxes: `float` Tensor of shape [batch_size, max_total_size, 4] + representing top detected boxes in [y1, x1, y2, x2]. + nms_scores: `float` Tensor of shape [batch_size, max_total_size] + representing sorted confidence scores for detected boxes. The values are + between [0, 1]. + nms_classes: `int` Tensor of shape [batch_size, max_total_size] representing + classes for detected boxes. + valid_detections: `int` Tensor of shape [batch_size] only the top + `valid_detections` boxes are valid detections. + """ + with tf.name_scope('generate_detections'): + nmsed_boxes = [] + nmsed_classes = [] + nmsed_scores = [] + valid_detections = [] + batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list() + _, total_anchors, num_classes = scores.get_shape().as_list() + # Selects top pre_nms_num scores and indices before NMS. + scores, indices = _select_top_k_scores( + scores, min(total_anchors, pre_nms_num_boxes)) + for i in range(num_classes): + boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :] + scores_i = scores[:, :, i] + # Obtains pre_nms_num_boxes before running NMS. + boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1) + + # Filter out scores. + boxes_i, scores_i = box_utils.filter_boxes_by_scores( + boxes_i, scores_i, min_score_threshold=score_threshold) + + (nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded( + tf.cast(scores_i, tf.float32), + tf.cast(boxes_i, tf.float32), + max_total_size, + iou_threshold=nms_iou_threshold) + nmsed_classes_i = tf.fill([batch_size, max_total_size], i) + nmsed_boxes.append(nmsed_boxes_i) + nmsed_scores.append(nmsed_scores_i) + nmsed_classes.append(nmsed_classes_i) + nmsed_boxes = tf.concat(nmsed_boxes, axis=1) + nmsed_scores = tf.concat(nmsed_scores, axis=1) + nmsed_classes = tf.concat(nmsed_classes, axis=1) + nmsed_scores, indices = tf.nn.top_k( + nmsed_scores, k=max_total_size, sorted=True) + nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1) + nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1) + valid_detections = tf.reduce_sum( + input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1) + return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections + + +def _generate_detections_per_image(boxes, + scores, + max_total_size=100, + nms_iou_threshold=0.3, + score_threshold=0.05, + pre_nms_num_boxes=5000): + """Generate the final detections per image given the model outputs. + + Args: + boxes: a tensor with shape [N, num_classes, 4] or [N, 1, 4], which box + predictions on all feature levels. The N is the number of total anchors on + all levels. + scores: a tensor with shape [N, num_classes], which stacks class probability + on all feature levels. The N is the number of total anchors on all levels. + The num_classes is the number of classes predicted by the model. Note that + the class_outputs here is the raw score. + max_total_size: a scalar representing maximum number of boxes retained over + all classes. + nms_iou_threshold: a float representing the threshold for deciding whether + boxes overlap too much with respect to IOU. + score_threshold: a float representing the threshold for deciding when to + remove boxes based on score. + pre_nms_num_boxes: an int number of top candidate detections per class + before NMS. + + Returns: + nms_boxes: `float` Tensor of shape [max_total_size, 4] representing top + detected boxes in [y1, x1, y2, x2]. + nms_scores: `float` Tensor of shape [max_total_size] representing sorted + confidence scores for detected boxes. The values are between [0, 1]. + nms_classes: `int` Tensor of shape [max_total_size] representing classes for + detected boxes. + valid_detections: `int` Tensor of shape [1] only the top `valid_detections` + boxes are valid detections. + """ + nmsed_boxes = [] + nmsed_scores = [] + nmsed_classes = [] + num_classes_for_box = boxes.get_shape().as_list()[1] + num_classes = scores.get_shape().as_list()[1] + for i in range(num_classes): + boxes_i = boxes[:, min(num_classes_for_box - 1, i)] + scores_i = scores[:, i] + + # Obtains pre_nms_num_boxes before running NMS. + scores_i, indices = tf.nn.top_k( + scores_i, k=tf.minimum(tf.shape(input=scores_i)[-1], pre_nms_num_boxes)) + boxes_i = tf.gather(boxes_i, indices) + + (nmsed_indices_i, + nmsed_num_valid_i) = tf.image.non_max_suppression_padded( + tf.cast(boxes_i, tf.float32), + tf.cast(scores_i, tf.float32), + max_total_size, + iou_threshold=nms_iou_threshold, + score_threshold=score_threshold, + pad_to_max_output_size=True, + name='nms_detections_' + str(i)) + nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i) + nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i) + # Sets scores of invalid boxes to -1. + nmsed_scores_i = tf.where( + tf.less(tf.range(max_total_size), [nmsed_num_valid_i]), nmsed_scores_i, + -tf.ones_like(nmsed_scores_i)) + nmsed_classes_i = tf.fill([max_total_size], i) + nmsed_boxes.append(nmsed_boxes_i) + nmsed_scores.append(nmsed_scores_i) + nmsed_classes.append(nmsed_classes_i) + + # Concats results from all classes and sort them. + nmsed_boxes = tf.concat(nmsed_boxes, axis=0) + nmsed_scores = tf.concat(nmsed_scores, axis=0) + nmsed_classes = tf.concat(nmsed_classes, axis=0) + nmsed_scores, indices = tf.nn.top_k( + nmsed_scores, k=max_total_size, sorted=True) + nmsed_boxes = tf.gather(nmsed_boxes, indices) + nmsed_classes = tf.gather(nmsed_classes, indices) + valid_detections = tf.reduce_sum( + input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32)) + return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections + + +def _generate_detections_batched(boxes, + scores, + max_total_size, + nms_iou_threshold, + score_threshold): + """Generates detected boxes with scores and classes for one-stage detector. + + The function takes output of multi-level ConvNets and anchor boxes and + generates detected boxes. Note that this used batched nms, which is not + supported on TPU currently. + + Args: + boxes: a tensor with shape [batch_size, N, num_classes, 4] or + [batch_size, N, 1, 4], which box predictions on all feature levels. The N + is the number of total anchors on all levels. + scores: a tensor with shape [batch_size, N, num_classes], which + stacks class probability on all feature levels. The N is the number of + total anchors on all levels. The num_classes is the number of classes + predicted by the model. Note that the class_outputs here is the raw score. + max_total_size: a scalar representing maximum number of boxes retained over + all classes. + nms_iou_threshold: a float representing the threshold for deciding whether + boxes overlap too much with respect to IOU. + score_threshold: a float representing the threshold for deciding when to + remove boxes based on score. + Returns: + nms_boxes: `float` Tensor of shape [batch_size, max_total_size, 4] + representing top detected boxes in [y1, x1, y2, x2]. + nms_scores: `float` Tensor of shape [batch_size, max_total_size] + representing sorted confidence scores for detected boxes. The values are + between [0, 1]. + nms_classes: `int` Tensor of shape [batch_size, max_total_size] representing + classes for detected boxes. + valid_detections: `int` Tensor of shape [batch_size] only the top + `valid_detections` boxes are valid detections. + """ + with tf.name_scope('generate_detections'): + # TODO(tsungyi): Removes normalization/denomalization once the + # tf.image.combined_non_max_suppression is coordinate system agnostic. + # Normalizes maximum box cooridinates to 1. + normalizer = tf.reduce_max(boxes) + boxes /= normalizer + (nmsed_boxes, nmsed_scores, nmsed_classes, + valid_detections) = tf.image.combined_non_max_suppression( + boxes, + scores, + max_output_size_per_class=max_total_size, + max_total_size=max_total_size, + iou_threshold=nms_iou_threshold, + score_threshold=score_threshold, + pad_per_class=False,) + # De-normalizes box cooridinates. + nmsed_boxes *= normalizer + nmsed_classes = tf.cast(nmsed_classes, tf.int32) + return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections + + +class MultilevelDetectionGenerator(object): + """Generates detected boxes with scores and classes for one-stage detector.""" + + def __init__(self, min_level, max_level, params): + self._min_level = min_level + self._max_level = max_level + self._generate_detections = generate_detections_factory(params) + + def __call__(self, box_outputs, class_outputs, anchor_boxes, image_shape): + # Collects outputs from all levels into a list. + boxes = [] + scores = [] + for i in range(self._min_level, self._max_level + 1): + box_outputs_i_shape = tf.shape(box_outputs[i]) + batch_size = box_outputs_i_shape[0] + num_anchors_per_locations = box_outputs_i_shape[-1] // 4 + num_classes = tf.shape(class_outputs[i])[-1] // num_anchors_per_locations + + # Applies score transformation and remove the implicit background class. + scores_i = tf.sigmoid( + tf.reshape(class_outputs[i], [batch_size, -1, num_classes])) + scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1]) + + # Box decoding. + # The anchor boxes are shared for all data in a batch. + # One stage detector only supports class agnostic box regression. + anchor_boxes_i = tf.reshape(anchor_boxes[i], [batch_size, -1, 4]) + box_outputs_i = tf.reshape(box_outputs[i], [batch_size, -1, 4]) + boxes_i = box_utils.decode_boxes(box_outputs_i, anchor_boxes_i) + + # Box clipping. + boxes_i = box_utils.clip_boxes(boxes_i, image_shape) + + boxes.append(boxes_i) + scores.append(scores_i) + boxes = tf.concat(boxes, axis=1) + scores = tf.concat(scores, axis=1) + + nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = ( + self._generate_detections(tf.expand_dims(boxes, axis=2), scores)) + + # Adds 1 to offset the background class which has index 0. + nmsed_classes += 1 + return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections + + +class GenericDetectionGenerator(object): + """Generates the final detected boxes with scores and classes.""" + + def __init__(self, params): + self._generate_detections = generate_detections_factory(params) + + def __call__(self, box_outputs, class_outputs, anchor_boxes, image_shape): + """Generate final detections. + + Args: + box_outputs: a tensor of shape of [batch_size, K, num_classes * 4] + representing the class-specific box coordinates relative to anchors. + class_outputs: a tensor of shape of [batch_size, K, num_classes] + representing the class logits before applying score activiation. + anchor_boxes: a tensor of shape of [batch_size, K, 4] representing the + corresponding anchor boxes w.r.t `box_outputs`. + image_shape: a tensor of shape of [batch_size, 2] storing the image height + and width w.r.t. the scaled image, i.e. the same image space as + `box_outputs` and `anchor_boxes`. + + Returns: + nms_boxes: `float` Tensor of shape [batch_size, max_total_size, 4] + representing top detected boxes in [y1, x1, y2, x2]. + nms_scores: `float` Tensor of shape [batch_size, max_total_size] + representing sorted confidence scores for detected boxes. The values are + between [0, 1]. + nms_classes: `int` Tensor of shape [batch_size, max_total_size] + representing classes for detected boxes. + valid_detections: `int` Tensor of shape [batch_size] only the top + `valid_detections` boxes are valid detections. + """ + class_outputs = tf.nn.softmax(class_outputs, axis=-1) + + # Removes the background class. + class_outputs_shape = tf.shape(class_outputs) + batch_size = class_outputs_shape[0] + num_locations = class_outputs_shape[1] + num_classes = class_outputs_shape[-1] + num_detections = num_locations * (num_classes - 1) + + class_outputs = tf.slice(class_outputs, [0, 0, 1], [-1, -1, -1]) + box_outputs = tf.reshape( + box_outputs, + tf.stack([batch_size, num_locations, num_classes, 4], axis=-1)) + box_outputs = tf.slice( + box_outputs, [0, 0, 1, 0], [-1, -1, -1, -1]) + anchor_boxes = tf.tile( + tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1]) + box_outputs = tf.reshape( + box_outputs, + tf.stack([batch_size, num_detections, 4], axis=-1)) + anchor_boxes = tf.reshape( + anchor_boxes, + tf.stack([batch_size, num_detections, 4], axis=-1)) + + # Box decoding. + decoded_boxes = box_utils.decode_boxes( + box_outputs, anchor_boxes, weights=[10.0, 10.0, 5.0, 5.0]) + + # Box clipping + decoded_boxes = box_utils.clip_boxes(decoded_boxes, image_shape) + + decoded_boxes = tf.reshape( + decoded_boxes, + tf.stack([batch_size, num_locations, num_classes - 1, 4], axis=-1)) + + nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = ( + self._generate_detections(decoded_boxes, class_outputs)) + + # Adds 1 to offset the background class which has index 0. + nmsed_classes += 1 + + return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections diff --git a/models/official/vision/detection/ops/roi_ops.py b/models/official/vision/detection/ops/roi_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a21bc7b2882de39b12bc76dacd37047fabac1766 --- /dev/null +++ b/models/official/vision/detection/ops/roi_ops.py @@ -0,0 +1,237 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ROI-related ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.vision.detection.ops import nms +from official.vision.detection.utils import box_utils + + +def multilevel_propose_rois(rpn_boxes, + rpn_scores, + anchor_boxes, + image_shape, + rpn_pre_nms_top_k=2000, + rpn_post_nms_top_k=1000, + rpn_nms_threshold=0.7, + rpn_score_threshold=0.0, + rpn_min_size_threshold=0.0, + decode_boxes=True, + clip_boxes=True, + use_batched_nms=False, + apply_sigmoid_to_score=True): + """Proposes RoIs given a group of candidates from different FPN levels. + + The following describes the steps: + 1. For each individual level: + a. Apply sigmoid transform if specified. + b. Decode boxes if specified. + c. Clip boxes if specified. + d. Filter small boxes and those fall outside image if specified. + e. Apply pre-NMS filtering including pre-NMS top k and score thresholding. + f. Apply NMS. + 2. Aggregate post-NMS boxes from each level. + 3. Apply an overall top k to generate the final selected RoIs. + + Args: + rpn_boxes: a dict with keys representing FPN levels and values representing + box tenors of shape [batch_size, feature_h, feature_w, num_anchors * 4]. + rpn_scores: a dict with keys representing FPN levels and values representing + logit tensors of shape [batch_size, feature_h, feature_w, num_anchors]. + anchor_boxes: a dict with keys representing FPN levels and values + representing anchor box tensors of shape + [batch_size, feature_h, feature_w, num_anchors * 4]. + image_shape: a tensor of shape [batch_size, 2] where the last dimension are + [height, width] of the scaled image. + rpn_pre_nms_top_k: an integer of top scoring RPN proposals *per level* to + keep before applying NMS. Default: 2000. + rpn_post_nms_top_k: an integer of top scoring RPN proposals *in total* to + keep after applying NMS. Default: 1000. + rpn_nms_threshold: a float between 0 and 1 representing the IoU threshold + used for NMS. If 0.0, no NMS is applied. Default: 0.7. + rpn_score_threshold: a float between 0 and 1 representing the minimal box + score to keep before applying NMS. This is often used as a pre-filtering + step for better performance. If 0, no filtering is applied. Default: 0. + rpn_min_size_threshold: a float representing the minimal box size in each + side (w.r.t. the scaled image) to keep before applying NMS. This is often + used as a pre-filtering step for better performance. If 0, no filtering is + applied. Default: 0. + decode_boxes: a boolean indicating whether `rpn_boxes` needs to be decoded + using `anchor_boxes`. If False, use `rpn_boxes` directly and ignore + `anchor_boxes`. Default: True. + clip_boxes: a boolean indicating whether boxes are first clipped to the + scaled image size before appliying NMS. If False, no clipping is applied + and `image_shape` is ignored. Default: True. + use_batched_nms: a boolean indicating whether NMS is applied in batch using + `tf.image.combined_non_max_suppression`. Currently only available in + CPU/GPU. Default: False. + apply_sigmoid_to_score: a boolean indicating whether apply sigmoid to + `rpn_scores` before applying NMS. Default: True. + + Returns: + selected_rois: a tensor of shape [batch_size, rpn_post_nms_top_k, 4], + representing the box coordinates of the selected proposals w.r.t. the + scaled image. + selected_roi_scores: a tensor of shape [batch_size, rpn_post_nms_top_k, 1], + representing the scores of the selected proposals. + """ + with tf.name_scope('multilevel_propose_rois'): + rois = [] + roi_scores = [] + image_shape = tf.expand_dims(image_shape, axis=1) + for level in sorted(rpn_scores.keys()): + with tf.name_scope('level_%d' % level): + _, feature_h, feature_w, num_anchors_per_location = ( + rpn_scores[level].get_shape().as_list()) + + num_boxes = feature_h * feature_w * num_anchors_per_location + this_level_scores = tf.reshape(rpn_scores[level], [-1, num_boxes]) + this_level_boxes = tf.reshape(rpn_boxes[level], [-1, num_boxes, 4]) + this_level_anchors = tf.cast( + tf.reshape(anchor_boxes[level], [-1, num_boxes, 4]), + dtype=this_level_scores.dtype) + + if apply_sigmoid_to_score: + this_level_scores = tf.sigmoid(this_level_scores) + + if decode_boxes: + this_level_boxes = box_utils.decode_boxes( + this_level_boxes, this_level_anchors) + if clip_boxes: + this_level_boxes = box_utils.clip_boxes( + this_level_boxes, image_shape) + + if rpn_min_size_threshold > 0.0: + this_level_boxes, this_level_scores = box_utils.filter_boxes( + this_level_boxes, + this_level_scores, + image_shape, + rpn_min_size_threshold) + + this_level_pre_nms_top_k = min(num_boxes, rpn_pre_nms_top_k) + this_level_post_nms_top_k = min(num_boxes, rpn_post_nms_top_k) + if rpn_nms_threshold > 0.0: + if use_batched_nms: + this_level_rois, this_level_roi_scores, _, _ = ( + tf.image.combined_non_max_suppression( + tf.expand_dims(this_level_boxes, axis=2), + tf.expand_dims(this_level_scores, axis=-1), + max_output_size_per_class=this_level_pre_nms_top_k, + max_total_size=this_level_post_nms_top_k, + iou_threshold=rpn_nms_threshold, + score_threshold=rpn_score_threshold, + pad_per_class=False, + clip_boxes=False)) + else: + if rpn_score_threshold > 0.0: + this_level_boxes, this_level_scores = ( + box_utils.filter_boxes_by_scores( + this_level_boxes, this_level_scores, rpn_score_threshold)) + this_level_boxes, this_level_scores = box_utils.top_k_boxes( + this_level_boxes, this_level_scores, k=this_level_pre_nms_top_k) + this_level_roi_scores, this_level_rois = ( + nms.sorted_non_max_suppression_padded( + this_level_scores, + this_level_boxes, + max_output_size=this_level_post_nms_top_k, + iou_threshold=rpn_nms_threshold)) + else: + this_level_rois, this_level_roi_scores = box_utils.top_k_boxes( + this_level_rois, + this_level_scores, + k=this_level_post_nms_top_k) + + rois.append(this_level_rois) + roi_scores.append(this_level_roi_scores) + + all_rois = tf.concat(rois, axis=1) + all_roi_scores = tf.concat(roi_scores, axis=1) + + with tf.name_scope('top_k_rois'): + _, num_valid_rois = all_roi_scores.get_shape().as_list() + overall_top_k = min(num_valid_rois, rpn_post_nms_top_k) + + selected_rois, selected_roi_scores = box_utils.top_k_boxes( + all_rois, all_roi_scores, k=overall_top_k) + + return selected_rois, selected_roi_scores + + +class ROIGenerator(object): + """Proposes RoIs for the second stage processing.""" + + def __init__(self, params): + self._rpn_pre_nms_top_k = params.rpn_pre_nms_top_k + self._rpn_post_nms_top_k = params.rpn_post_nms_top_k + self._rpn_nms_threshold = params.rpn_nms_threshold + self._rpn_score_threshold = params.rpn_score_threshold + self._rpn_min_size_threshold = params.rpn_min_size_threshold + self._test_rpn_pre_nms_top_k = params.test_rpn_pre_nms_top_k + self._test_rpn_post_nms_top_k = params.test_rpn_post_nms_top_k + self._test_rpn_nms_threshold = params.test_rpn_nms_threshold + self._test_rpn_score_threshold = params.test_rpn_score_threshold + self._test_rpn_min_size_threshold = params.test_rpn_min_size_threshold + self._use_batched_nms = params.use_batched_nms + + def __call__(self, boxes, scores, anchor_boxes, image_shape, is_training): + """Generates RoI proposals. + + Args: + boxes: a dict with keys representing FPN levels and values representing + box tenors of shape [batch_size, feature_h, feature_w, num_anchors * 4]. + scores: a dict with keys representing FPN levels and values representing + logit tensors of shape [batch_size, feature_h, feature_w, num_anchors]. + anchor_boxes: a dict with keys representing FPN levels and values + representing anchor box tensors of shape + [batch_size, feature_h, feature_w, num_anchors * 4]. + image_shape: a tensor of shape [batch_size, 2] where the last dimension + are [height, width] of the scaled image. + is_training: a bool indicating whether it is in training or inference + mode. + + Returns: + proposed_rois: a tensor of shape [batch_size, rpn_post_nms_top_k, 4], + representing the box coordinates of the proposed RoIs w.r.t. the + scaled image. + proposed_roi_scores: a tensor of shape + [batch_size, rpn_post_nms_top_k, 1], representing the scores of the + proposed RoIs. + + """ + proposed_rois, proposed_roi_scores = multilevel_propose_rois( + boxes, + scores, + anchor_boxes, + image_shape, + rpn_pre_nms_top_k=(self._rpn_pre_nms_top_k if is_training + else self._test_rpn_pre_nms_top_k), + rpn_post_nms_top_k=(self._rpn_post_nms_top_k if is_training + else self._test_rpn_post_nms_top_k), + rpn_nms_threshold=(self._rpn_nms_threshold if is_training + else self._test_rpn_nms_threshold), + rpn_score_threshold=(self._rpn_score_threshold if is_training + else self._test_rpn_score_threshold), + rpn_min_size_threshold=(self._rpn_min_size_threshold if is_training + else self._test_rpn_min_size_threshold), + decode_boxes=True, + clip_boxes=True, + use_batched_nms=self._use_batched_nms, + apply_sigmoid_to_score=True) + return proposed_rois, proposed_roi_scores diff --git a/models/official/vision/detection/ops/spatial_transform_ops.py b/models/official/vision/detection/ops/spatial_transform_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ae60d20f0e8c8454bd7972e851c33b6dca56ed90 --- /dev/null +++ b/models/official/vision/detection/ops/spatial_transform_ops.py @@ -0,0 +1,608 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions to performa spatial transformation for Tensor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +_EPSILON = 1e-8 + + +def nearest_upsampling(data, scale): + """Nearest neighbor upsampling implementation. + + Args: + data: A tensor with a shape of [batch, height_in, width_in, channels]. + scale: An integer multiple to scale resolution of input data. + Returns: + data_up: A tensor with a shape of + [batch, height_in*scale, width_in*scale, channels]. Same dtype as input + data. + """ + with tf.name_scope('nearest_upsampling'): + bs, _, _, c = data.get_shape().as_list() + shape = tf.shape(input=data) + h = shape[1] + w = shape[2] + bs = -1 if bs is None else bs + # Uses reshape to quickly upsample the input. The nearest pixel is selected + # implicitly via broadcasting. + data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones( + [1, 1, scale, 1, scale, 1], dtype=data.dtype) + return tf.reshape(data, [bs, h * scale, w * scale, c]) + + +def feature_bilinear_interpolation(features, kernel_y, kernel_x): + """Feature bilinear interpolation. + + The RoIAlign feature f can be computed by bilinear interpolation + of four neighboring feature points f0, f1, f2, and f3. + + f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T + [f10, f11]] + f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11 + f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11 + kernel_y = [hy, ly] + kernel_x = [hx, lx] + + Args: + features: The features are in shape of [batch_size, num_boxes, output_size * + 2, output_size * 2, num_filters]. + kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1]. + kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1]. + + Returns: + A 5-D tensor representing feature crop of shape + [batch_size, num_boxes, output_size, output_size, num_filters]. + + """ + (batch_size, num_boxes, output_size, _, + num_filters) = features.get_shape().as_list() + output_size = output_size // 2 + kernel_y = tf.reshape(kernel_y, [batch_size, num_boxes, output_size * 2, 1]) + kernel_x = tf.reshape(kernel_x, [batch_size, num_boxes, 1, output_size * 2]) + # Use implicit broadcast to generate the interpolation kernel. The + # multiplier `4` is for avg pooling. + interpolation_kernel = kernel_y * kernel_x * 4 + + # Interpolate the gathered features with computed interpolation kernels. + features *= tf.cast( + tf.expand_dims(interpolation_kernel, axis=-1), dtype=features.dtype) + features = tf.reshape( + features, + [batch_size * num_boxes, output_size * 2, output_size * 2, num_filters]) + features = tf.nn.avg_pool(features, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID') + features = tf.reshape( + features, [batch_size, num_boxes, output_size, output_size, num_filters]) + return features + + +def compute_grid_positions(boxes, boundaries, output_size, sample_offset): + """Compute the grid position w.r.t. + + the corresponding feature map. + + Args: + boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the + information of each box w.r.t. the corresponding feature map. + boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left + corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float) + in terms of the number of pixels of the corresponding feature map size. + boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing + the boundary (in (y, x)) of the corresponding feature map for each box. + Any resampled grid points that go beyond the bounary will be clipped. + output_size: a scalar indicating the output crop size. + sample_offset: a float number in [0, 1] indicates the subpixel sample offset + from grid point. + + Returns: + kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1]. + kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1]. + box_grid_y0y1: Tensor of size [batch_size, boxes, output_size, 2] + box_grid_x0x1: Tensor of size [batch_size, boxes, output_size, 2] + """ + batch_size, num_boxes, _ = boxes.get_shape().as_list() + box_grid_x = [] + box_grid_y = [] + for i in range(output_size): + box_grid_x.append(boxes[:, :, 1] + + (i + sample_offset) * boxes[:, :, 3] / output_size) + box_grid_y.append(boxes[:, :, 0] + + (i + sample_offset) * boxes[:, :, 2] / output_size) + box_grid_x = tf.stack(box_grid_x, axis=2) + box_grid_y = tf.stack(box_grid_y, axis=2) + + box_grid_y0 = tf.floor(box_grid_y) + box_grid_x0 = tf.floor(box_grid_x) + box_grid_x0 = tf.maximum(0., box_grid_x0) + box_grid_y0 = tf.maximum(0., box_grid_y0) + + box_grid_x0 = tf.minimum(box_grid_x0, tf.expand_dims(boundaries[:, :, 1], -1)) + box_grid_x1 = tf.minimum(box_grid_x0 + 1, + tf.expand_dims(boundaries[:, :, 1], -1)) + box_grid_y0 = tf.minimum(box_grid_y0, tf.expand_dims(boundaries[:, :, 0], -1)) + box_grid_y1 = tf.minimum(box_grid_y0 + 1, + tf.expand_dims(boundaries[:, :, 0], -1)) + + box_gridx0x1 = tf.stack([box_grid_x0, box_grid_x1], axis=-1) + box_gridy0y1 = tf.stack([box_grid_y0, box_grid_y1], axis=-1) + + # The RoIAlign feature f can be computed by bilinear interpolation of four + # neighboring feature points f0, f1, f2, and f3. + # f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T + # [f10, f11]] + # f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11 + # f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11 + ly = box_grid_y - box_grid_y0 + lx = box_grid_x - box_grid_x0 + hy = 1.0 - ly + hx = 1.0 - lx + kernel_y = tf.reshape( + tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size, 2, 1]) + kernel_x = tf.reshape( + tf.stack([hx, lx], axis=3), [batch_size, num_boxes, output_size, 2, 1]) + return kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 + + +def get_grid_one_hot(box_gridy0y1, box_gridx0x1, feature_height, feature_width): + """Get grid_one_hot from indices and feature_size.""" + (batch_size, num_boxes, output_size, _) = box_gridx0x1.get_shape().as_list() + y_indices = tf.cast( + tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size, 2]), + dtype=tf.int32) + x_indices = tf.cast( + tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size, 2]), + dtype=tf.int32) + + # shape is [batch_size, num_boxes, output_size, 2, height] + grid_y_one_hot = tf.one_hot(tf.cast(y_indices, tf.int32), feature_height) + # shape is [batch_size, num_boxes, output_size, 2, width] + grid_x_one_hot = tf.one_hot(tf.cast(x_indices, tf.int32), feature_width) + + return grid_y_one_hot, grid_x_one_hot + + +def selective_crop_and_resize(features, + boxes, + box_levels, + boundaries, + output_size=7, + sample_offset=0.5, + use_einsum_gather=False): + """Crop and resize boxes on a set of feature maps. + + Given multiple features maps indexed by different levels, and a set of boxes + where each box is mapped to a certain level, it selectively crops and resizes + boxes from the corresponding feature maps to generate the box features. + + We follow the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf, + figure 3 for reference). Specifically, for each feature map, we select an + (output_size, output_size) set of pixels corresponding to the box location, + and then use bilinear interpolation to select the feature value for each + pixel. + + For performance, we perform the gather and interpolation on all layers as a + single operation. In this op the multi-level features are first stacked and + gathered into [2*output_size, 2*output_size] feature points. Then bilinear + interpolation is performed on the gathered feature points to generate + [output_size, output_size] RoIAlign feature map. + + Here is the step-by-step algorithm: + 1. The multi-level features are gathered into a + [batch_size, num_boxes, output_size*2, output_size*2, num_filters] + Tensor. The Tensor contains four neighboring feature points for each + vertice in the output grid. + 2. Compute the interpolation kernel of shape + [batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis + can be seen as stacking 2x2 interpolation kernels for all vertices in the + output grid. + 3. Element-wise multiply the gathered features and interpolation kernel. + Then apply 2x2 average pooling to reduce spatial dimension to + output_size. + + Args: + features: a 5-D tensor of shape [batch_size, num_levels, max_height, + max_width, num_filters] where cropping and resizing are based. + boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the + information of each box w.r.t. the corresponding feature map. + boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left + corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float) + in terms of the number of pixels of the corresponding feature map size. + box_levels: a 3-D tensor of shape [batch_size, num_boxes, 1] representing + the 0-based corresponding feature level index of each box. + boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing + the boundary (in (y, x)) of the corresponding feature map for each box. + Any resampled grid points that go beyond the bounary will be clipped. + output_size: a scalar indicating the output crop size. + sample_offset: a float number in [0, 1] indicates the subpixel sample offset + from grid point. + use_einsum_gather: use einsum to replace gather or not. Replacing einsum + with gather can improve performance when feature size is not large, einsum + is friendly with model partition as well. Gather's performance is better + when feature size is very large and there are multiple box levels. + + Returns: + features_per_box: a 5-D tensor of shape + [batch_size, num_boxes, output_size, output_size, num_filters] + representing the cropped features. + """ + (batch_size, num_levels, max_feature_height, max_feature_width, + num_filters) = features.get_shape().as_list() + _, num_boxes, _ = boxes.get_shape().as_list() + + kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = compute_grid_positions( + boxes, boundaries, output_size, sample_offset) + x_indices = tf.cast( + tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]), + dtype=tf.int32) + y_indices = tf.cast( + tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]), + dtype=tf.int32) + + if use_einsum_gather: + # Blinear interpolation is done during the last two gathers: + # f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T + # [f10, f11]] + # [[f00, f01], + # [f10, f11]] = tf.einsum(tf.einsum(features, y_one_hot), x_one_hot) + # where [hy, ly] and [hx, lx] are the bilinear interpolation kernel. + + # shape is [batch_size, boxes, output_size, 2, 1] + grid_y_one_hot, grid_x_one_hot = get_grid_one_hot(box_gridy0y1, + box_gridx0x1, + max_feature_height, + max_feature_width) + + # shape is [batch_size, num_boxes, output_size, height] + grid_y_weight = tf.reduce_sum( + tf.multiply(grid_y_one_hot, kernel_y), axis=-2) + # shape is [batch_size, num_boxes, output_size, width] + grid_x_weight = tf.reduce_sum( + tf.multiply(grid_x_one_hot, kernel_x), axis=-2) + + # Gather for y_axis. + # shape is [batch_size, num_boxes, output_size, width, features] + features_per_box = tf.einsum('bmhwf,bmoh->bmowf', features, + tf.cast(grid_y_weight, features.dtype)) + # Gather for x_axis. + # shape is [batch_size, num_boxes, output_size, output_size, features] + features_per_box = tf.einsum('bmhwf,bmow->bmhof', features_per_box, + tf.cast(grid_x_weight, features.dtype)) + else: + height_dim_offset = max_feature_width + level_dim_offset = max_feature_height * height_dim_offset + batch_dim_offset = num_levels * level_dim_offset + + batch_size_offset = tf.tile( + tf.reshape( + tf.range(batch_size) * batch_dim_offset, [batch_size, 1, 1, 1]), + [1, num_boxes, output_size * 2, output_size * 2]) + box_levels_offset = tf.tile( + tf.reshape(box_levels * level_dim_offset, + [batch_size, num_boxes, 1, 1]), + [1, 1, output_size * 2, output_size * 2]) + y_indices_offset = tf.tile( + tf.reshape(y_indices * height_dim_offset, + [batch_size, num_boxes, output_size * 2, 1]), + [1, 1, 1, output_size * 2]) + x_indices_offset = tf.tile( + tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]), + [1, 1, output_size * 2, 1]) + + indices = tf.reshape( + batch_size_offset + box_levels_offset + y_indices_offset + + x_indices_offset, [-1]) + + features = tf.reshape(features, [-1, num_filters]) + # TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar + # performance. + features_per_box = tf.reshape( + tf.gather(features, indices), + [batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]) + features_per_box = feature_bilinear_interpolation(features_per_box, + kernel_y, kernel_x) + + return features_per_box + + +def multilevel_crop_and_resize(features, boxes, output_size=7): + """Crop and resize on multilevel feature pyramid. + + Generate the (output_size, output_size) set of pixels for each input box + by first locating the box into the correct feature level, and then cropping + and resizing it using the correspoding feature map of that level. + + Args: + features: A dictionary with key as pyramid level and value as features. The + features are in shape of [batch_size, height_l, width_l, num_filters]. + boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents + a box with [y1, x1, y2, x2] in un-normalized coordinates. + output_size: A scalar to indicate the output crop size. + + Returns: + A 5-D tensor representing feature crop of shape + [batch_size, num_boxes, output_size, output_size, num_filters]. + """ + + with tf.name_scope('multilevel_crop_and_resize'): + levels = list(features.keys()) + min_level = min(levels) + max_level = max(levels) + batch_size, max_feature_height, max_feature_width, num_filters = ( + features[min_level].get_shape().as_list()) + _, num_boxes, _ = boxes.get_shape().as_list() + + # Stack feature pyramid into a features_all of shape + # [batch_size, levels, height, width, num_filters]. + features_all = [] + feature_heights = [] + feature_widths = [] + for level in range(min_level, max_level + 1): + shape = features[level].get_shape().as_list() + feature_heights.append(shape[1]) + feature_widths.append(shape[2]) + # Concat tensor of [batch_size, height_l * width_l, num_filters] for each + # levels. + features_all.append( + tf.reshape(features[level], [batch_size, -1, num_filters])) + features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters]) + + # Calculate height_l * width_l for each level. + level_dim_sizes = [ + feature_widths[i] * feature_heights[i] + for i in range(len(feature_widths)) + ] + # level_dim_offsets is accumulated sum of level_dim_size. + level_dim_offsets = [0] + for i in range(len(feature_widths) - 1): + level_dim_offsets.append(level_dim_offsets[i] + level_dim_sizes[i]) + batch_dim_size = level_dim_offsets[-1] + level_dim_sizes[-1] + level_dim_offsets = tf.constant(level_dim_offsets, tf.int32) + height_dim_sizes = tf.constant(feature_widths, tf.int32) + + # Assigns boxes to the right level. + box_width = boxes[:, :, 3] - boxes[:, :, 1] + box_height = boxes[:, :, 2] - boxes[:, :, 0] + areas_sqrt = tf.sqrt(box_height * box_width) + levels = tf.cast( + tf.math.floordiv( + tf.math.log(tf.divide(areas_sqrt, 224.0)), tf.math.log(2.0)) + + 4.0, + dtype=tf.int32) + # Maps levels between [min_level, max_level]. + levels = tf.minimum(max_level, tf.maximum(levels, min_level)) + + # Projects box location and sizes to corresponding feature levels. + scale_to_level = tf.cast( + tf.pow(tf.constant(2.0), tf.cast(levels, tf.float32)), + dtype=boxes.dtype) + boxes /= tf.expand_dims(scale_to_level, axis=2) + box_width /= scale_to_level + box_height /= scale_to_level + boxes = tf.concat([boxes[:, :, 0:2], + tf.expand_dims(box_height, -1), + tf.expand_dims(box_width, -1)], axis=-1) + + # Maps levels to [0, max_level-min_level]. + levels -= min_level + level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32)) + boundary = tf.cast( + tf.concat([ + tf.expand_dims( + [[tf.cast(max_feature_height, tf.float32)]] / level_strides - 1, + axis=-1), + tf.expand_dims( + [[tf.cast(max_feature_width, tf.float32)]] / level_strides - 1, + axis=-1), + ], + axis=-1), boxes.dtype) + + # Compute grid positions. + kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = compute_grid_positions( + boxes, boundary, output_size, sample_offset=0.5) + + x_indices = tf.cast( + tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]), + dtype=tf.int32) + y_indices = tf.cast( + tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]), + dtype=tf.int32) + + batch_size_offset = tf.tile( + tf.reshape( + tf.range(batch_size) * batch_dim_size, [batch_size, 1, 1, 1]), + [1, num_boxes, output_size * 2, output_size * 2]) + # Get level offset for each box. Each box belongs to one level. + levels_offset = tf.tile( + tf.reshape( + tf.gather(level_dim_offsets, levels), + [batch_size, num_boxes, 1, 1]), + [1, 1, output_size * 2, output_size * 2]) + y_indices_offset = tf.tile( + tf.reshape( + y_indices * tf.expand_dims(tf.gather(height_dim_sizes, levels), -1), + [batch_size, num_boxes, output_size * 2, 1]), + [1, 1, 1, output_size * 2]) + x_indices_offset = tf.tile( + tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]), + [1, 1, output_size * 2, 1]) + indices = tf.reshape( + batch_size_offset + levels_offset + y_indices_offset + x_indices_offset, + [-1]) + + # TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar + # performance. + features_per_box = tf.reshape( + tf.gather(features_r2, indices), + [batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]) + + # Bilinear interpolation. + features_per_box = feature_bilinear_interpolation(features_per_box, + kernel_y, kernel_x) + return features_per_box + + +def single_level_feature_crop(features, level_boxes, detection_prior_levels, + min_mask_level, mask_crop_size): + """Crop the FPN features at the appropriate levels for each detection. + + + Args: + features: a float tensor of shape [batch_size, num_levels, + max_feature_size, max_feature_size, num_downsample_channels]. + level_boxes: a float Tensor of the level boxes to crop from. + [batch_size, num_instances, 4]. + detection_prior_levels: an int Tensor of instance assigned level of shape + [batch_size, num_instances]. + min_mask_level: minimum FPN level to crop mask feature from. + mask_crop_size: an int of mask crop size. + + Returns: + crop_features: a float Tensor of shape [batch_size * num_instances, + mask_crop_size, mask_crop_size, num_downsample_channels]. This is the + instance feature crop. + """ + (batch_size, num_levels, max_feature_size, + _, num_downsample_channels) = features.get_shape().as_list() + _, num_of_instances, _ = level_boxes.get_shape().as_list() + level_boxes = tf.cast(level_boxes, tf.int32) + assert num_of_instances == detection_prior_levels.get_shape().as_list()[1] + + x_start_indices = level_boxes[:, :, 1] + y_start_indices = level_boxes[:, :, 0] + # generate the full indices (not just the starting index) + x_idx_list = [] + y_idx_list = [] + for i in range(mask_crop_size): + x_idx_list.append(x_start_indices + i) + y_idx_list.append(y_start_indices + i) + + x_indices = tf.stack(x_idx_list, axis=2) + y_indices = tf.stack(y_idx_list, axis=2) + levels = detection_prior_levels - min_mask_level + height_dim_size = max_feature_size + level_dim_size = max_feature_size * height_dim_size + batch_dim_size = num_levels * level_dim_size + # TODO(weicheng) change this to gather_nd for better readability. + indices = tf.reshape( + tf.tile( + tf.reshape( + tf.range(batch_size) * batch_dim_size, + [batch_size, 1, 1, 1]), + [1, num_of_instances, + mask_crop_size, mask_crop_size]) + + tf.tile( + tf.reshape(levels * level_dim_size, + [batch_size, num_of_instances, 1, 1]), + [1, 1, mask_crop_size, mask_crop_size]) + + tf.tile( + tf.reshape(y_indices * height_dim_size, + [batch_size, num_of_instances, + mask_crop_size, 1]), + [1, 1, 1, mask_crop_size]) + + tf.tile( + tf.reshape(x_indices, + [batch_size, num_of_instances, + 1, mask_crop_size]), + [1, 1, mask_crop_size, 1]), [-1]) + + features_r2 = tf.reshape(features, + [-1, num_downsample_channels]) + crop_features = tf.reshape( + tf.gather(features_r2, indices), + [batch_size * num_of_instances, + mask_crop_size, mask_crop_size, + num_downsample_channels]) + + return crop_features + + +def crop_mask_in_target_box(masks, + boxes, + target_boxes, + output_size, + sample_offset=0, + use_einsum=True): + """Crop masks in target boxes. + + Args: + masks: A tensor with a shape of [batch_size, num_masks, height, width]. + boxes: a float tensor representing box cooridnates that tightly enclose + masks with a shape of [batch_size, num_masks, 4] in un-normalized + coordinates. A box is represented by [ymin, xmin, ymax, xmax]. + target_boxes: a float tensor representing target box cooridnates for + masks with a shape of [batch_size, num_masks, 4] in un-normalized + coordinates. A box is represented by [ymin, xmin, ymax, xmax]. + output_size: A scalar to indicate the output crop size. It currently only + supports to output a square shape outputs. + sample_offset: a float number in [0, 1] indicates the subpixel sample offset + from grid point. + use_einsum: Use einsum to replace gather in selective_crop_and_resize. + + Returns: + A 4-D tensor representing feature crop of shape + [batch_size, num_boxes, output_size, output_size]. + """ + with tf.name_scope('crop_mask_in_target_box'): + batch_size, num_masks, height, width = masks.get_shape().as_list() + masks = tf.reshape(masks, [batch_size*num_masks, height, width, 1]) + # Pad zeros on the boundary of masks. + masks = tf.image.pad_to_bounding_box(masks, 2, 2, height + 4, width + 4) + masks = tf.reshape(masks, [batch_size, num_masks, height+4, width+4, 1]) + + # Projects target box locations and sizes to corresponding cropped + # mask coordinates. + gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split( + value=boxes, num_or_size_splits=4, axis=2) + bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split( + value=target_boxes, num_or_size_splits=4, axis=2) + y_transform = (bb_y_min - gt_y_min) * height / ( + gt_y_max - gt_y_min + _EPSILON) + 2 + x_transform = (bb_x_min - gt_x_min) * height / ( + gt_x_max - gt_x_min + _EPSILON) + 2 + h_transform = (bb_y_max - bb_y_min) * width / ( + gt_y_max - gt_y_min + _EPSILON) + w_transform = (bb_x_max - bb_x_min) * width / ( + gt_x_max - gt_x_min + _EPSILON) + + boundaries = tf.concat([ + tf.cast( + tf.ones_like(y_transform) * ((height + 4) - 1), dtype=tf.float32), + tf.cast( + tf.ones_like(x_transform) * ((width + 4) - 1), dtype=tf.float32) + ], + axis=-1) + + # Reshape tensors to have the right shape for selective_crop_and_resize. + trasnformed_boxes = tf.concat( + [y_transform, x_transform, h_transform, w_transform], -1) + levels = tf.tile(tf.reshape(tf.range(num_masks), [1, num_masks]), + [batch_size, 1]) + + cropped_masks = selective_crop_and_resize( + masks, + trasnformed_boxes, + levels, + boundaries, + output_size, + sample_offset=sample_offset, + use_einsum_gather=use_einsum) + cropped_masks = tf.squeeze(cropped_masks, axis=-1) + + return cropped_masks diff --git a/models/official/vision/detection/ops/target_ops.py b/models/official/vision/detection/ops/target_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7d6856511f846365041527f2532c8f2b376244 --- /dev/null +++ b/models/official/vision/detection/ops/target_ops.py @@ -0,0 +1,399 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Target and sampling related ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.vision.detection.ops import spatial_transform_ops +from official.vision.detection.utils import box_utils +from official.vision.detection.utils.object_detection import balanced_positive_negative_sampler + + +def box_matching(boxes, gt_boxes, gt_classes): + """Match boxes to groundtruth boxes. + + Given the proposal boxes and the groundtruth boxes and classes, perform the + groundtruth matching by taking the argmax of the IoU between boxes and + groundtruth boxes. + + Args: + boxes: a tensor of shape of [batch_size, N, 4] representing the box + coordiantes to be matched to groundtruth boxes. + gt_boxes: a tensor of shape of [batch_size, MAX_INSTANCES, 4] representing + the groundtruth box coordinates. It is padded with -1s to indicate the + invalid boxes. + gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box + classes. It is padded with -1s to indicate the invalid classes. + + Returns: + matched_gt_boxes: a tensor of shape of [batch_size, N, 4], representing + the matched groundtruth box coordinates for each input box. If the box + does not overlap with any groundtruth boxes, the matched boxes of it + will be set to all 0s. + matched_gt_classes: a tensor of shape of [batch_size, N], representing + the matched groundtruth classes for each input box. If the box does not + overlap with any groundtruth boxes, the matched box classes of it will + be set to 0, which corresponds to the background class. + matched_gt_indices: a tensor of shape of [batch_size, N], representing + the indices of the matched groundtruth boxes in the original gt_boxes + tensor. If the box does not overlap with any groundtruth boxes, the + index of the matched groundtruth will be set to -1. + matched_iou: a tensor of shape of [batch_size, N], representing the IoU + between the box and its matched groundtruth box. The matched IoU is the + maximum IoU of the box and all the groundtruth boxes. + iou: a tensor of shape of [batch_size, N, K], representing the IoU matrix + between boxes and the groundtruth boxes. The IoU between a box and the + invalid groundtruth boxes whose coordinates are [-1, -1, -1, -1] is -1. + """ + # Compute IoU between boxes and gt_boxes. + # iou <- [batch_size, N, K] + iou = box_utils.bbox_overlap(boxes, gt_boxes) + + # max_iou <- [batch_size, N] + # 0.0 -> no match to gt, or -1.0 match to no gt + matched_iou = tf.reduce_max(iou, axis=-1) + + # background_box_mask <- bool, [batch_size, N] + background_box_mask = tf.less_equal(matched_iou, 0.0) + + argmax_iou_indices = tf.argmax(iou, axis=-1, output_type=tf.int32) + + argmax_iou_indices_shape = tf.shape(argmax_iou_indices) + batch_indices = ( + tf.expand_dims(tf.range(argmax_iou_indices_shape[0]), axis=-1) * + tf.ones([1, argmax_iou_indices_shape[-1]], dtype=tf.int32)) + gather_nd_indices = tf.stack([batch_indices, argmax_iou_indices], axis=-1) + + matched_gt_boxes = tf.gather_nd(gt_boxes, gather_nd_indices) + matched_gt_boxes = tf.where( + tf.tile(tf.expand_dims(background_box_mask, axis=-1), [1, 1, 4]), + tf.zeros_like(matched_gt_boxes, dtype=matched_gt_boxes.dtype), + matched_gt_boxes) + + matched_gt_classes = tf.gather_nd(gt_classes, gather_nd_indices) + matched_gt_classes = tf.where( + background_box_mask, + tf.zeros_like(matched_gt_classes), + matched_gt_classes) + + matched_gt_indices = tf.where( + background_box_mask, + -tf.ones_like(argmax_iou_indices), + argmax_iou_indices) + + return (matched_gt_boxes, matched_gt_classes, matched_gt_indices, + matched_iou, iou) + + +def assign_and_sample_proposals(proposed_boxes, + gt_boxes, + gt_classes, + num_samples_per_image=512, + mix_gt_boxes=True, + fg_fraction=0.25, + fg_iou_thresh=0.5, + bg_iou_thresh_hi=0.5, + bg_iou_thresh_lo=0.0): + """Assigns the proposals with groundtruth classes and performs subsmpling. + + Given `proposed_boxes`, `gt_boxes`, and `gt_classes`, the function uses the + following algorithm to generate the final `num_samples_per_image` RoIs. + 1. Calculates the IoU between each proposal box and each gt_boxes. + 2. Assigns each proposed box with a groundtruth class and box by choosing + the largest IoU overlap. + 3. Samples `num_samples_per_image` boxes from all proposed boxes, and + returns box_targets, class_targets, and RoIs. + + Args: + proposed_boxes: a tensor of shape of [batch_size, N, 4]. N is the number + of proposals before groundtruth assignment. The last dimension is the + box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax] + format. + gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4]. + The coordinates of gt_boxes are in the pixel coordinates of the scaled + image. This tensor might have padding of values -1 indicating the invalid + box coordinates. + gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This + tensor might have paddings with values of -1 indicating the invalid + classes. + num_samples_per_image: a integer represents RoI minibatch size per image. + mix_gt_boxes: a bool indicating whether to mix the groundtruth boxes before + sampling proposals. + fg_fraction: a float represents the target fraction of RoI minibatch that + is labeled foreground (i.e., class > 0). + fg_iou_thresh: a float represents the IoU overlap threshold for an RoI to be + considered foreground (if >= fg_iou_thresh). + bg_iou_thresh_hi: a float represents the IoU overlap threshold for an RoI to + be considered background (class = 0 if overlap in [LO, HI)). + bg_iou_thresh_lo: a float represents the IoU overlap threshold for an RoI to + be considered background (class = 0 if overlap in [LO, HI)). + + Returns: + sampled_rois: a tensor of shape of [batch_size, K, 4], representing the + coordinates of the sampled RoIs, where K is the number of the sampled + RoIs, i.e. K = num_samples_per_image. + sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the + box coordinates of the matched groundtruth boxes of the samples RoIs. + sampled_gt_classes: a tensor of shape of [batch_size, K], storing the + classes of the matched groundtruth boxes of the sampled RoIs. + sampled_gt_indices: a tensor of shape of [batch_size, K], storing the + indices of the sampled groudntruth boxes in the original `gt_boxes` + tensor, i.e. gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i]. + """ + + with tf.name_scope('sample_proposals'): + if mix_gt_boxes: + boxes = tf.concat([proposed_boxes, gt_boxes], axis=1) + else: + boxes = proposed_boxes + + (matched_gt_boxes, matched_gt_classes, matched_gt_indices, + matched_iou, _) = box_matching(boxes, gt_boxes, gt_classes) + + positive_match = tf.greater(matched_iou, fg_iou_thresh) + negative_match = tf.logical_and( + tf.greater_equal(matched_iou, bg_iou_thresh_lo), + tf.less(matched_iou, bg_iou_thresh_hi)) + ignored_match = tf.less(matched_iou, 0.0) + + # re-assign negatively matched boxes to the background class. + matched_gt_classes = tf.where( + negative_match, tf.zeros_like(matched_gt_classes), matched_gt_classes) + matched_gt_indices = tf.where( + negative_match, tf.zeros_like(matched_gt_indices), matched_gt_indices) + + sample_candidates = tf.logical_and( + tf.logical_or(positive_match, negative_match), + tf.logical_not(ignored_match)) + + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + positive_fraction=fg_fraction, is_static=True)) + + batch_size, _ = sample_candidates.get_shape().as_list() + sampled_indicators = [] + for i in range(batch_size): + sampled_indicator = sampler.subsample( + sample_candidates[i], num_samples_per_image, positive_match[i]) + sampled_indicators.append(sampled_indicator) + sampled_indicators = tf.stack(sampled_indicators) + _, sampled_indices = tf.nn.top_k( + tf.cast(sampled_indicators, dtype=tf.int32), + k=num_samples_per_image, + sorted=True) + + sampled_indices_shape = tf.shape(sampled_indices) + batch_indices = ( + tf.expand_dims(tf.range(sampled_indices_shape[0]), axis=-1) * + tf.ones([1, sampled_indices_shape[-1]], dtype=tf.int32)) + gather_nd_indices = tf.stack([batch_indices, sampled_indices], axis=-1) + + sampled_rois = tf.gather_nd(boxes, gather_nd_indices) + sampled_gt_boxes = tf.gather_nd(matched_gt_boxes, gather_nd_indices) + sampled_gt_classes = tf.gather_nd( + matched_gt_classes, gather_nd_indices) + sampled_gt_indices = tf.gather_nd( + matched_gt_indices, gather_nd_indices) + + return (sampled_rois, sampled_gt_boxes, sampled_gt_classes, + sampled_gt_indices) + + +def sample_and_crop_foreground_masks(candidate_rois, + candidate_gt_boxes, + candidate_gt_classes, + candidate_gt_indices, + gt_masks, + num_mask_samples_per_image=128, + mask_target_size=28): + """Samples and creates cropped foreground masks for training. + + Args: + candidate_rois: a tensor of shape of [batch_size, N, 4], where N is the + number of candidate RoIs to be considered for mask sampling. It includes + both positive and negative RoIs. The `num_mask_samples_per_image` positive + RoIs will be sampled to create mask training targets. + candidate_gt_boxes: a tensor of shape of [batch_size, N, 4], storing the + corresponding groundtruth boxes to the `candidate_rois`. + candidate_gt_classes: a tensor of shape of [batch_size, N], storing the + corresponding groundtruth classes to the `candidate_rois`. 0 in the tensor + corresponds to the background class, i.e. negative RoIs. + candidate_gt_indices: a tensor of shape [batch_size, N], storing the + corresponding groundtruth instance indices to the `candidate_gt_boxes`, + i.e. gt_boxes[candidate_gt_indices[:, i]] = candidate_gt_boxes[:, i] and + gt_boxes which is of shape [batch_size, MAX_INSTANCES, 4], M >= N, is the + superset of candidate_gt_boxes. + gt_masks: a tensor of [batch_size, MAX_INSTANCES, mask_height, mask_width] + containing all the groundtruth masks which sample masks are drawn from. + num_mask_samples_per_image: an integer which specifies the number of masks + to sample. + mask_target_size: an integer which specifies the final cropped mask size + after sampling. The output masks are resized w.r.t the sampled RoIs. + + Returns: + foreground_rois: a tensor of shape of [batch_size, K, 4] storing the RoI + that corresponds to the sampled foreground masks, where + K = num_mask_samples_per_image. + foreground_classes: a tensor of shape of [batch_size, K] storing the classes + corresponding to the sampled foreground masks. + cropoped_foreground_masks: a tensor of shape of + [batch_size, K, mask_target_size, mask_target_size] storing the cropped + foreground masks used for training. + """ + with tf.name_scope('sample_and_crop_foreground_masks'): + _, fg_instance_indices = tf.nn.top_k( + tf.cast(tf.greater(candidate_gt_classes, 0), dtype=tf.int32), + k=num_mask_samples_per_image) + + fg_instance_indices_shape = tf.shape(fg_instance_indices) + batch_indices = ( + tf.expand_dims(tf.range(fg_instance_indices_shape[0]), axis=-1) * + tf.ones([1, fg_instance_indices_shape[-1]], dtype=tf.int32)) + + gather_nd_instance_indices = tf.stack( + [batch_indices, fg_instance_indices], axis=-1) + foreground_rois = tf.gather_nd( + candidate_rois, gather_nd_instance_indices) + foreground_boxes = tf.gather_nd( + candidate_gt_boxes, gather_nd_instance_indices) + foreground_classes = tf.gather_nd( + candidate_gt_classes, gather_nd_instance_indices) + foreground_gt_indices = tf.gather_nd( + candidate_gt_indices, gather_nd_instance_indices) + + foreground_gt_indices_shape = tf.shape(foreground_gt_indices) + batch_indices = ( + tf.expand_dims(tf.range(foreground_gt_indices_shape[0]), axis=-1) * + tf.ones([1, foreground_gt_indices_shape[-1]], dtype=tf.int32)) + gather_nd_gt_indices = tf.stack( + [batch_indices, foreground_gt_indices], axis=-1) + foreground_masks = tf.gather_nd(gt_masks, gather_nd_gt_indices) + + cropped_foreground_masks = spatial_transform_ops.crop_mask_in_target_box( + foreground_masks, foreground_boxes, foreground_rois, mask_target_size, + sample_offset=0.5) + + return foreground_rois, foreground_classes, cropped_foreground_masks + + +class ROISampler(object): + """Samples RoIs and creates training targets.""" + + def __init__(self, params): + self._num_samples_per_image = params.num_samples_per_image + self._fg_fraction = params.fg_fraction + self._fg_iou_thresh = params.fg_iou_thresh + self._bg_iou_thresh_hi = params.bg_iou_thresh_hi + self._bg_iou_thresh_lo = params.bg_iou_thresh_lo + self._mix_gt_boxes = params.mix_gt_boxes + + def __call__(self, rois, gt_boxes, gt_classes): + """Sample and assign RoIs for training. + + Args: + rois: a tensor of shape of [batch_size, N, 4]. N is the number + of proposals before groundtruth assignment. The last dimension is the + box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax] + format. + gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4]. + The coordinates of gt_boxes are in the pixel coordinates of the scaled + image. This tensor might have padding of values -1 indicating the + invalid box coordinates. + gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This + tensor might have paddings with values of -1 indicating the invalid + classes. + + Returns: + sampled_rois: a tensor of shape of [batch_size, K, 4], representing the + coordinates of the sampled RoIs, where K is the number of the sampled + RoIs, i.e. K = num_samples_per_image. + sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the + box coordinates of the matched groundtruth boxes of the samples RoIs. + sampled_gt_classes: a tensor of shape of [batch_size, K], storing the + classes of the matched groundtruth boxes of the sampled RoIs. + """ + sampled_rois, sampled_gt_boxes, sampled_gt_classes, sampled_gt_indices = ( + assign_and_sample_proposals( + rois, + gt_boxes, + gt_classes, + num_samples_per_image=self._num_samples_per_image, + mix_gt_boxes=self._mix_gt_boxes, + fg_fraction=self._fg_fraction, + fg_iou_thresh=self._fg_iou_thresh, + bg_iou_thresh_hi=self._bg_iou_thresh_hi, + bg_iou_thresh_lo=self._bg_iou_thresh_lo)) + return (sampled_rois, sampled_gt_boxes, sampled_gt_classes, + sampled_gt_indices) + + +class MaskSampler(object): + """Samples and creates mask training targets.""" + + def __init__(self, mask_target_size, num_mask_samples_per_image): + self._mask_target_size = mask_target_size + self._num_mask_samples_per_image = num_mask_samples_per_image + + def __call__(self, + candidate_rois, + candidate_gt_boxes, + candidate_gt_classes, + candidate_gt_indices, + gt_masks): + """Sample and create mask targets for training. + + Args: + candidate_rois: a tensor of shape of [batch_size, N, 4], where N is the + number of candidate RoIs to be considered for mask sampling. It includes + both positive and negative RoIs. The `num_mask_samples_per_image` + positive RoIs will be sampled to create mask training targets. + candidate_gt_boxes: a tensor of shape of [batch_size, N, 4], storing the + corresponding groundtruth boxes to the `candidate_rois`. + candidate_gt_classes: a tensor of shape of [batch_size, N], storing the + corresponding groundtruth classes to the `candidate_rois`. 0 in the + tensor corresponds to the background class, i.e. negative RoIs. + candidate_gt_indices: a tensor of shape [batch_size, N], storing the + corresponding groundtruth instance indices to the `candidate_gt_boxes`, + i.e. gt_boxes[candidate_gt_indices[:, i]] = candidate_gt_boxes[:, i], + where gt_boxes which is of shape [batch_size, MAX_INSTANCES, 4], M >= N, + is the superset of candidate_gt_boxes. + gt_masks: a tensor of [batch_size, MAX_INSTANCES, mask_height, mask_width] + containing all the groundtruth masks which sample masks are drawn from. + after sampling. The output masks are resized w.r.t the sampled RoIs. + + Returns: + foreground_rois: a tensor of shape of [batch_size, K, 4] storing the RoI + that corresponds to the sampled foreground masks, where + K = num_mask_samples_per_image. + foreground_classes: a tensor of shape of [batch_size, K] storing the + classes corresponding to the sampled foreground masks. + cropoped_foreground_masks: a tensor of shape of + [batch_size, K, mask_target_size, mask_target_size] storing the + cropped foreground masks used for training. + """ + foreground_rois, foreground_classes, cropped_foreground_masks = ( + sample_and_crop_foreground_masks( + candidate_rois, + candidate_gt_boxes, + candidate_gt_classes, + candidate_gt_indices, + gt_masks, + self._num_mask_samples_per_image, + self._mask_target_size)) + return foreground_rois, foreground_classes, cropped_foreground_masks diff --git a/models/official/vision/detection/utils/__init__.py b/models/official/vision/detection/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/detection/utils/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/utils/box_utils.py b/models/official/vision/detection/utils/box_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4c2ebf5781f44363b090f3e272101d6014f2edd0 --- /dev/null +++ b/models/official/vision/detection/utils/box_utils.py @@ -0,0 +1,551 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for bounding box processing.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +EPSILON = 1e-8 +BBOX_XFORM_CLIP = np.log(1000. / 16.) + + +def visualize_images_with_bounding_boxes(images, box_outputs, step, + summary_writer): + """Records subset of evaluation images with bounding boxes.""" + image_shape = tf.shape(images[0]) + image_height = tf.cast(image_shape[0], tf.float32) + image_width = tf.cast(image_shape[1], tf.float32) + normalized_boxes = normalize_boxes(box_outputs, [image_height, image_width]) + + bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]]) + image_summary = tf.image.draw_bounding_boxes(images, normalized_boxes, + bounding_box_color) + with summary_writer.as_default(): + tf.summary.image('bounding_box_summary', image_summary, step=step) + summary_writer.flush() + + +def yxyx_to_xywh(boxes): + """Converts boxes from ymin, xmin, ymax, xmax to xmin, ymin, width, height. + + Args: + boxes: a numpy array whose last dimension is 4 representing the coordinates + of boxes in ymin, xmin, ymax, xmax order. + + Returns: + boxes: a numpy array whose shape is the same as `boxes` in new format. + + Raises: + ValueError: If the last dimension of boxes is not 4. + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + boxes_ymin = boxes[..., 0] + boxes_xmin = boxes[..., 1] + boxes_width = boxes[..., 3] - boxes[..., 1] + boxes_height = boxes[..., 2] - boxes[..., 0] + new_boxes = np.stack([boxes_xmin, boxes_ymin, boxes_width, boxes_height], + axis=-1) + + return new_boxes + + +def jitter_boxes(boxes, noise_scale=0.025): + """Jitter the box coordinates by some noise distribution. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates of + boxes in ymin, xmin, ymax, xmax order. + noise_scale: a python float which specifies the magnitude of noise. The rule + of thumb is to set this between (0, 0.1]. The default value is found to + mimic the noisy detections best empirically. + + Returns: + jittered_boxes: a tensor whose shape is the same as `boxes` representing + the jittered boxes. + + Raises: + ValueError: If the last dimension of boxes is not 4. + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + with tf.name_scope('jitter_boxes'): + bbox_jitters = tf.random.normal(boxes.get_shape(), stddev=noise_scale) + ymin = boxes[..., 0:1] + xmin = boxes[..., 1:2] + ymax = boxes[..., 2:3] + xmax = boxes[..., 3:4] + width = xmax - xmin + height = ymax - ymin + new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[..., 0:1] * width + new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[..., 1:2] * height + new_width = width * tf.math.exp(bbox_jitters[..., 2:3]) + new_height = height * tf.math.exp(bbox_jitters[..., 3:4]) + jittered_boxes = tf.concat([ + new_center_y - new_height * 0.5, new_center_x - new_width * 0.5, + new_center_y + new_height * 0.5, new_center_x + new_width * 0.5 + ], + axis=-1) + + return jittered_boxes + + +def normalize_boxes(boxes, image_shape): + """Converts boxes to the normalized coordinates. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates + of boxes in ymin, xmin, ymax, xmax order. + image_shape: a list of two integers, a two-element vector or a tensor such + that all but the last dimensions are `broadcastable` to `boxes`. The last + dimension is 2, which represents [height, width]. + + Returns: + normalized_boxes: a tensor whose shape is the same as `boxes` representing + the normalized boxes. + + Raises: + ValueError: If the last dimension of boxes is not 4. + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + with tf.name_scope('normalize_boxes'): + if isinstance(image_shape, list) or isinstance(image_shape, tuple): + height, width = image_shape + else: + image_shape = tf.cast(image_shape, dtype=boxes.dtype) + height = image_shape[..., 0:1] + width = image_shape[..., 1:2] + + ymin = boxes[..., 0:1] / height + xmin = boxes[..., 1:2] / width + ymax = boxes[..., 2:3] / height + xmax = boxes[..., 3:4] / width + + normalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1) + return normalized_boxes + + +def denormalize_boxes(boxes, image_shape): + """Converts boxes normalized by [height, width] to pixel coordinates. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates + of boxes in ymin, xmin, ymax, xmax order. + image_shape: a list of two integers, a two-element vector or a tensor such + that all but the last dimensions are `broadcastable` to `boxes`. The last + dimension is 2, which represents [height, width]. + + Returns: + denormalized_boxes: a tensor whose shape is the same as `boxes` representing + the denormalized boxes. + + Raises: + ValueError: If the last dimension of boxes is not 4. + """ + with tf.name_scope('denormalize_boxes'): + if isinstance(image_shape, list) or isinstance(image_shape, tuple): + height, width = image_shape + else: + image_shape = tf.cast(image_shape, dtype=boxes.dtype) + height, width = tf.split(image_shape, 2, axis=-1) + + ymin, xmin, ymax, xmax = tf.split(boxes, 4, axis=-1) + ymin = ymin * height + xmin = xmin * width + ymax = ymax * height + xmax = xmax * width + + denormalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1) + return denormalized_boxes + + +def clip_boxes(boxes, image_shape): + """Clips boxes to image boundaries. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates + of boxes in ymin, xmin, ymax, xmax order. + image_shape: a list of two integers, a two-element vector or a tensor such + that all but the last dimensions are `broadcastable` to `boxes`. The last + dimension is 2, which represents [height, width]. + + Returns: + clipped_boxes: a tensor whose shape is the same as `boxes` representing the + clipped boxes. + + Raises: + ValueError: If the last dimension of boxes is not 4. + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + with tf.name_scope('clip_boxes'): + if isinstance(image_shape, list) or isinstance(image_shape, tuple): + height, width = image_shape + max_length = [height - 1.0, width - 1.0, height - 1.0, width - 1.0] + else: + image_shape = tf.cast(image_shape, dtype=boxes.dtype) + height, width = tf.unstack(image_shape, axis=-1) + max_length = tf.stack( + [height - 1.0, width - 1.0, height - 1.0, width - 1.0], axis=-1) + + clipped_boxes = tf.math.maximum(tf.math.minimum(boxes, max_length), 0.0) + return clipped_boxes + + +def compute_outer_boxes(boxes, image_shape, scale=1.0): + """Compute outer box encloses an object with a margin. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates of + boxes in ymin, xmin, ymax, xmax order. + image_shape: a list of two integers, a two-element vector or a tensor such + that all but the last dimensions are `broadcastable` to `boxes`. The last + dimension is 2, which represents [height, width]. + scale: a float number specifying the scale of output outer boxes to input + `boxes`. + + Returns: + outer_boxes: a tensor whose shape is the same as `boxes` representing the + outer boxes. + """ + if scale < 1.0: + raise ValueError( + 'scale is {}, but outer box scale must be greater than 1.0.'.format( + scale)) + centers_y = (boxes[..., 0] + boxes[..., 2]) / 2.0 + centers_x = (boxes[..., 1] + boxes[..., 3]) / 2.0 + box_height = (boxes[..., 2] - boxes[..., 0]) * scale + box_width = (boxes[..., 3] - boxes[..., 1]) * scale + outer_boxes = tf.stack([ + centers_y - box_height / 2.0, centers_x - box_width / 2.0, + centers_y + box_height / 2.0, centers_x + box_width / 2.0 + ], + axis=1) + outer_boxes = clip_boxes(outer_boxes, image_shape) + return outer_boxes + + +def encode_boxes(boxes, anchors, weights=None): + """Encode boxes to targets. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates + of boxes in ymin, xmin, ymax, xmax order. + anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`, + representing the coordinates of anchors in ymin, xmin, ymax, xmax order. + weights: None or a list of four float numbers used to scale coordinates. + + Returns: + encoded_boxes: a tensor whose shape is the same as `boxes` representing the + encoded box targets. + + Raises: + ValueError: If the last dimension of boxes is not 4. + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + with tf.name_scope('encode_boxes'): + boxes = tf.cast(boxes, dtype=anchors.dtype) + ymin = boxes[..., 0:1] + xmin = boxes[..., 1:2] + ymax = boxes[..., 2:3] + xmax = boxes[..., 3:4] + box_h = ymax - ymin + 1.0 + box_w = xmax - xmin + 1.0 + box_yc = ymin + 0.5 * box_h + box_xc = xmin + 0.5 * box_w + + anchor_ymin = anchors[..., 0:1] + anchor_xmin = anchors[..., 1:2] + anchor_ymax = anchors[..., 2:3] + anchor_xmax = anchors[..., 3:4] + anchor_h = anchor_ymax - anchor_ymin + 1.0 + anchor_w = anchor_xmax - anchor_xmin + 1.0 + anchor_yc = anchor_ymin + 0.5 * anchor_h + anchor_xc = anchor_xmin + 0.5 * anchor_w + + encoded_dy = (box_yc - anchor_yc) / anchor_h + encoded_dx = (box_xc - anchor_xc) / anchor_w + encoded_dh = tf.math.log(box_h / anchor_h) + encoded_dw = tf.math.log(box_w / anchor_w) + if weights: + encoded_dy *= weights[0] + encoded_dx *= weights[1] + encoded_dh *= weights[2] + encoded_dw *= weights[3] + + encoded_boxes = tf.concat( + [encoded_dy, encoded_dx, encoded_dh, encoded_dw], + axis=-1) + return encoded_boxes + + +def decode_boxes(encoded_boxes, anchors, weights=None): + """Decode boxes. + + Args: + encoded_boxes: a tensor whose last dimension is 4 representing the + coordinates of encoded boxes in ymin, xmin, ymax, xmax order. + anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`, + representing the coordinates of anchors in ymin, xmin, ymax, xmax order. + weights: None or a list of four float numbers used to scale coordinates. + + Returns: + encoded_boxes: a tensor whose shape is the same as `boxes` representing the + decoded box targets. + """ + if encoded_boxes.shape[-1] != 4: + raise ValueError('encoded_boxes.shape[-1] is {:d}, but must be 4.'.format( + encoded_boxes.shape[-1])) + + with tf.name_scope('decode_boxes'): + encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype) + dy = encoded_boxes[..., 0:1] + dx = encoded_boxes[..., 1:2] + dh = encoded_boxes[..., 2:3] + dw = encoded_boxes[..., 3:4] + if weights: + dy /= weights[0] + dx /= weights[1] + dh /= weights[2] + dw /= weights[3] + dh = tf.math.minimum(dh, BBOX_XFORM_CLIP) + dw = tf.math.minimum(dw, BBOX_XFORM_CLIP) + + anchor_ymin = anchors[..., 0:1] + anchor_xmin = anchors[..., 1:2] + anchor_ymax = anchors[..., 2:3] + anchor_xmax = anchors[..., 3:4] + anchor_h = anchor_ymax - anchor_ymin + 1.0 + anchor_w = anchor_xmax - anchor_xmin + 1.0 + anchor_yc = anchor_ymin + 0.5 * anchor_h + anchor_xc = anchor_xmin + 0.5 * anchor_w + + decoded_boxes_yc = dy * anchor_h + anchor_yc + decoded_boxes_xc = dx * anchor_w + anchor_xc + decoded_boxes_h = tf.math.exp(dh) * anchor_h + decoded_boxes_w = tf.math.exp(dw) * anchor_w + + decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h + decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w + decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0 + decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0 + + decoded_boxes = tf.concat( + [decoded_boxes_ymin, decoded_boxes_xmin, + decoded_boxes_ymax, decoded_boxes_xmax], + axis=-1) + return decoded_boxes + + +def filter_boxes(boxes, scores, image_shape, min_size_threshold): + """Filter and remove boxes that are too small or fall outside the image. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates of + boxes in ymin, xmin, ymax, xmax order. + scores: a tensor whose shape is the same as tf.shape(boxes)[:-1] + representing the original scores of the boxes. + image_shape: a tensor whose shape is the same as, or `broadcastable` to + `boxes` except the last dimension, which is 2, representing [height, + width] of the scaled image. + min_size_threshold: a float representing the minimal box size in each side + (w.r.t. the scaled image). Boxes whose sides are smaller than it will be + filtered out. + + Returns: + filtered_boxes: a tensor whose shape is the same as `boxes` but with + the position of the filtered boxes are filled with 0. + filtered_scores: a tensor whose shape is the same as 'scores' but with + the positinon of the filtered boxes filled with 0. + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + with tf.name_scope('filter_boxes'): + if isinstance(image_shape, list) or isinstance(image_shape, tuple): + height, width = image_shape + else: + image_shape = tf.cast(image_shape, dtype=boxes.dtype) + height = image_shape[..., 0] + width = image_shape[..., 1] + + ymin = boxes[..., 0] + xmin = boxes[..., 1] + ymax = boxes[..., 2] + xmax = boxes[..., 3] + + h = ymax - ymin + 1.0 + w = xmax - xmin + 1.0 + yc = ymin + 0.5 * h + xc = xmin + 0.5 * w + + min_size = tf.cast( + tf.math.maximum(min_size_threshold, 1.0), dtype=boxes.dtype) + + filtered_size_mask = tf.math.logical_and( + tf.math.greater(h, min_size), tf.math.greater(w, min_size)) + filtered_center_mask = tf.logical_and( + tf.math.logical_and(tf.math.greater(yc, 0.0), tf.math.less(yc, height)), + tf.math.logical_and(tf.math.greater(xc, 0.0), tf.math.less(xc, width))) + filtered_mask = tf.math.logical_and(filtered_size_mask, + filtered_center_mask) + + filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores)) + filtered_boxes = tf.cast( + tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes + + return filtered_boxes, filtered_scores + + +def filter_boxes_by_scores(boxes, scores, min_score_threshold): + """Filter and remove boxes whose scores are smaller than the threshold. + + Args: + boxes: a tensor whose last dimension is 4 representing the coordinates of + boxes in ymin, xmin, ymax, xmax order. + scores: a tensor whose shape is the same as tf.shape(boxes)[:-1] + representing the original scores of the boxes. + min_score_threshold: a float representing the minimal box score threshold. + Boxes whose score are smaller than it will be filtered out. + + Returns: + filtered_boxes: a tensor whose shape is the same as `boxes` but with + the position of the filtered boxes are filled with -1. + filtered_scores: a tensor whose shape is the same as 'scores' but with + the + """ + if boxes.shape[-1] != 4: + raise ValueError('boxes.shape[1] is {:d}, but must be 4.'.format( + boxes.shape[-1])) + + with tf.name_scope('filter_boxes_by_scores'): + filtered_mask = tf.math.greater(scores, min_score_threshold) + filtered_scores = tf.where(filtered_mask, scores, -tf.ones_like(scores)) + filtered_boxes = tf.cast( + tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes + + return filtered_boxes, filtered_scores + + +def top_k_boxes(boxes, scores, k): + """Sort and select top k boxes according to the scores. + + Args: + boxes: a tensor of shape [batch_size, N, 4] representing the coordiante of + the boxes. N is the number of boxes per image. + scores: a tensor of shsape [batch_size, N] representing the socre of the + boxes. + k: an integer or a tensor indicating the top k number. + + Returns: + selected_boxes: a tensor of shape [batch_size, k, 4] representing the + selected top k box coordinates. + selected_scores: a tensor of shape [batch_size, k] representing the selected + top k box scores. + """ + with tf.name_scope('top_k_boxes'): + selected_scores, top_k_indices = tf.nn.top_k(scores, k=k, sorted=True) + + batch_size, _ = scores.get_shape().as_list() + if batch_size == 1: + selected_boxes = tf.squeeze( + tf.gather(boxes, top_k_indices, axis=1), axis=1) + else: + top_k_indices_shape = tf.shape(top_k_indices) + batch_indices = ( + tf.expand_dims(tf.range(top_k_indices_shape[0]), axis=-1) * + tf.ones([1, top_k_indices_shape[-1]], dtype=tf.int32)) + gather_nd_indices = tf.stack([batch_indices, top_k_indices], axis=-1) + selected_boxes = tf.gather_nd(boxes, gather_nd_indices) + + return selected_boxes, selected_scores + + +def bbox_overlap(boxes, gt_boxes): + """Calculates the overlap between proposal and ground truth boxes. + + Some `gt_boxes` may have been padded. The returned `iou` tensor for these + boxes will be -1. + + Args: + boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of + proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The + last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. + gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This + tensor might have paddings with a negative value. + + Returns: + iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES]. + """ + with tf.name_scope('bbox_overlap'): + bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split( + value=boxes, num_or_size_splits=4, axis=2) + gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split( + value=gt_boxes, num_or_size_splits=4, axis=2) + + # Calculates the intersection area. + i_xmin = tf.math.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1])) + i_xmax = tf.math.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1])) + i_ymin = tf.math.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1])) + i_ymax = tf.math.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1])) + i_area = tf.math.maximum((i_xmax - i_xmin), 0) * tf.math.maximum( + (i_ymax - i_ymin), 0) + + # Calculates the union area. + bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min) + gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min) + # Adds a small epsilon to avoid divide-by-zero. + u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8 + + # Calculates IoU. + iou = i_area / u_area + + # Fills -1 for IoU entries between the padded ground truth boxes. + gt_invalid_mask = tf.less( + tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0) + padding_mask = tf.logical_or( + tf.zeros_like(bb_x_min, dtype=tf.bool), + tf.transpose(gt_invalid_mask, [0, 2, 1])) + iou = tf.where(padding_mask, -tf.ones_like(iou), iou) + + return iou + + +def get_non_empty_box_indices(boxes): + """Get indices for non-empty boxes.""" + # Selects indices if box height or width is 0. + height = boxes[:, 2] - boxes[:, 0] + width = boxes[:, 3] - boxes[:, 1] + indices = tf.where(tf.logical_and(tf.greater(height, 0), + tf.greater(width, 0))) + return indices[:, 0] diff --git a/models/official/vision/detection/utils/class_utils.py b/models/official/vision/detection/utils/class_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cce9cf982bbbce7b90ee44e67ebe65997b7a91da --- /dev/null +++ b/models/official/vision/detection/utils/class_utils.py @@ -0,0 +1,44 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for handling dataset object categories.""" + + +def coco_split_class_ids(split_name): + """Return the COCO class split ids based on split name and training mode. + + Args: + split_name: The name of dataset split. + + Returns: + class_ids: a python list of integer. + """ + if split_name == 'all': + return [] + + elif split_name == 'voc': + return [ + 1, 2, 3, 4, 5, 6, 7, 9, 16, 17, 18, 19, 20, 21, 44, 62, 63, 64, 67, 72 + ] + + elif split_name == 'nonvoc': + return [ + 8, 10, 11, 13, 14, 15, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 65, 70, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, + 85, 86, 87, 88, 89, 90 + ] + + else: + raise ValueError('Invalid split name {}!!!'.format(split_name)) diff --git a/models/official/vision/detection/utils/dataloader_utils.py b/models/official/vision/detection/utils/dataloader_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..da82203511da50393a352bf75ee56f25c6626c05 --- /dev/null +++ b/models/official/vision/detection/utils/dataloader_utils.py @@ -0,0 +1,40 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for dataloader.""" + +import tensorflow as tf + +from official.vision.detection.utils import input_utils + + +def process_source_id(source_id): + """Processes source_id to the right format.""" + if source_id.dtype == tf.string: + source_id = tf.cast(tf.strings.to_number(source_id), tf.int64) + with tf.control_dependencies([source_id]): + source_id = tf.cond( + pred=tf.equal(tf.size(input=source_id), 0), + true_fn=lambda: tf.cast(tf.constant(-1), tf.int64), + false_fn=lambda: tf.identity(source_id)) + return source_id + + +def pad_groundtruths_to_fixed_size(gt, n): + """Pads the first dimension of groundtruths labels to the fixed size.""" + gt['boxes'] = input_utils.pad_to_fixed_size(gt['boxes'], n, -1) + gt['is_crowds'] = input_utils.pad_to_fixed_size(gt['is_crowds'], n, 0) + gt['areas'] = input_utils.pad_to_fixed_size(gt['areas'], n, -1) + gt['classes'] = input_utils.pad_to_fixed_size(gt['classes'], n, -1) + return gt diff --git a/models/official/vision/detection/utils/input_utils.py b/models/official/vision/detection/utils/input_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6010dc8973f387318c4553d3014ccf495cf01fc6 --- /dev/null +++ b/models/official/vision/detection/utils/input_utils.py @@ -0,0 +1,366 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for input processing.""" + +import math +import tensorflow as tf + +from official.vision.detection.utils import box_utils +from official.vision.detection.utils.object_detection import preprocessor + + +def pad_to_fixed_size(input_tensor, size, constant_values=0): + """Pads data to a fixed length at the first dimension. + + Args: + input_tensor: `Tensor` with any dimension. + size: `int` number for the first dimension of output Tensor. + constant_values: `int` value assigned to the paddings. + + Returns: + `Tensor` with the first dimension padded to `size`. + """ + input_shape = input_tensor.get_shape().as_list() + padding_shape = [] + + # Computes the padding length on the first dimension. + padding_length = tf.maximum(0, size - tf.shape(input_tensor)[0]) + assert_length = tf.Assert( + tf.greater_equal(padding_length, 0), [padding_length]) + with tf.control_dependencies([assert_length]): + padding_shape.append(padding_length) + + # Copies shapes of the rest of input shape dimensions. + for i in range(1, len(input_shape)): + padding_shape.append(tf.shape(input=input_tensor)[i]) + + # Pads input tensor to the fixed first dimension. + paddings = tf.cast(constant_values * tf.ones(padding_shape), + input_tensor.dtype) + padded_tensor = tf.concat([input_tensor, paddings], axis=0) + output_shape = input_shape + output_shape[0] = size + padded_tensor.set_shape(output_shape) + return padded_tensor + + +def normalize_image(image, + offset=(0.485, 0.456, 0.406), + scale=(0.229, 0.224, 0.225)): + """Normalizes the image to zero mean and unit variance.""" + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + offset = tf.constant(offset) + offset = tf.expand_dims(offset, axis=0) + offset = tf.expand_dims(offset, axis=0) + image -= offset + + scale = tf.constant(scale) + scale = tf.expand_dims(scale, axis=0) + scale = tf.expand_dims(scale, axis=0) + image /= scale + return image + + +def compute_padded_size(desired_size, stride): + """Compute the padded size given the desired size and the stride. + + The padded size will be the smallest rectangle, such that each dimension is + the smallest multiple of the stride which is larger than the desired + dimension. For example, if desired_size = (100, 200) and stride = 32, + the output padded_size = (128, 224). + + Args: + desired_size: a `Tensor` or `int` list/tuple of two elements representing + [height, width] of the target output image size. + stride: an integer, the stride of the backbone network. + + Returns: + padded_size: a `Tensor` or `int` list/tuple of two elements representing + [height, width] of the padded output image size. + """ + if isinstance(desired_size, list) or isinstance(desired_size, tuple): + padded_size = [int(math.ceil(d * 1.0 / stride) * stride) + for d in desired_size] + else: + padded_size = tf.cast( + tf.math.ceil( + tf.cast(desired_size, dtype=tf.float32) / stride) * stride, + tf.int32) + return padded_size + + +def resize_and_crop_image(image, + desired_size, + padded_size, + aug_scale_min=1.0, + aug_scale_max=1.0, + seed=1, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes the input image to output size. + + Resize and pad images given the desired output size of the image and + stride size. + + Here are the preprocessing steps. + 1. For a given image, keep its aspect ratio and rescale the image to make it + the largest rectangle to be bounded by the rectangle specified by the + `desired_size`. + 2. Pad the rescaled image to the padded_size. + + Args: + image: a `Tensor` of shape [height, width, 3] representing an image. + desired_size: a `Tensor` or `int` list/tuple of two elements representing + [height, width] of the desired actual output image size. + padded_size: a `Tensor` or `int` list/tuple of two elements representing + [height, width] of the padded output image size. Padding will be applied + after scaling the image to the desired_size. + aug_scale_min: a `float` with range between [0, 1.0] representing minimum + random scale applied to desired_size for training scale jittering. + aug_scale_max: a `float` with range between [1.0, inf] representing maximum + random scale applied to desired_size for training scale jittering. + seed: seed for random scale jittering. + method: function to resize input image to scaled image. + + Returns: + output_image: `Tensor` of shape [height, width, 3] where [height, width] + equals to `output_size`. + image_info: a 2D `Tensor` that encodes the information of the image and the + applied preprocessing. It is in the format of + [[original_height, original_width], [desired_height, desired_width], + [y_scale, x_scale], [y_offset, x_offset]], where [desired_height, + desireed_width] is the actual scaled image size, and [y_scale, x_scale] is + the scaling factory, which is the ratio of + scaled dimension / original dimension. + """ + with tf.name_scope('resize_and_crop_image'): + image_size = tf.cast(tf.shape(input=image)[0:2], tf.float32) + + random_jittering = (aug_scale_min != 1.0 or aug_scale_max != 1.0) + + if random_jittering: + random_scale = tf.random.uniform([], + aug_scale_min, + aug_scale_max, + seed=seed) + scaled_size = tf.round(random_scale * desired_size) + else: + scaled_size = desired_size + + scale = tf.minimum( + scaled_size[0] / image_size[0], scaled_size[1] / image_size[1]) + scaled_size = tf.round(image_size * scale) + + # Computes 2D image_scale. + image_scale = scaled_size / image_size + + # Selects non-zero random offset (x, y) if scaled image is larger than + # desired_size. + if random_jittering: + max_offset = scaled_size - desired_size + max_offset = tf.where(tf.less(max_offset, 0), + tf.zeros_like(max_offset), + max_offset) + offset = max_offset * tf.random.uniform([ + 2, + ], 0, 1, seed=seed) + offset = tf.cast(offset, tf.int32) + else: + offset = tf.zeros((2,), tf.int32) + + scaled_image = tf.image.resize( + image, tf.cast(scaled_size, tf.int32), method=method) + + if random_jittering: + scaled_image = scaled_image[offset[0]:offset[0] + desired_size[0], + offset[1]:offset[1] + desired_size[1], :] + + output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0, + padded_size[0], padded_size[1]) + + image_info = tf.stack([ + image_size, + tf.cast(desired_size, dtype=tf.float32), + image_scale, + tf.cast(offset, tf.float32)]) + return output_image, image_info + + +def resize_and_crop_image_v2(image, + short_side, + long_side, + padded_size, + aug_scale_min=1.0, + aug_scale_max=1.0, + seed=1, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes the input image to output size (Faster R-CNN style). + + Resize and pad images given the specified short / long side length and the + stride size. + + Here are the preprocessing steps. + 1. For a given image, keep its aspect ratio and first try to rescale the short + side of the original image to `short_side`. + 2. If the scaled image after 1 has a long side that exceeds `long_side`, keep + the aspect ratio and rescal the long side of the image to `long_side`. + 2. Pad the rescaled image to the padded_size. + + Args: + image: a `Tensor` of shape [height, width, 3] representing an image. + short_side: a scalar `Tensor` or `int` representing the desired short side + to be rescaled to. + long_side: a scalar `Tensor` or `int` representing the desired long side to + be rescaled to. + padded_size: a `Tensor` or `int` list/tuple of two elements representing + [height, width] of the padded output image size. Padding will be applied + after scaling the image to the desired_size. + aug_scale_min: a `float` with range between [0, 1.0] representing minimum + random scale applied to desired_size for training scale jittering. + aug_scale_max: a `float` with range between [1.0, inf] representing maximum + random scale applied to desired_size for training scale jittering. + seed: seed for random scale jittering. + method: function to resize input image to scaled image. + + Returns: + output_image: `Tensor` of shape [height, width, 3] where [height, width] + equals to `output_size`. + image_info: a 2D `Tensor` that encodes the information of the image and the + applied preprocessing. It is in the format of + [[original_height, original_width], [desired_height, desired_width], + [y_scale, x_scale], [y_offset, x_offset]], where [desired_height, + desired_width] is the actual scaled image size, and [y_scale, x_scale] is + the scaling factor, which is the ratio of + scaled dimension / original dimension. + """ + with tf.name_scope('resize_and_crop_image_v2'): + image_size = tf.cast(tf.shape(image)[0:2], tf.float32) + + scale_using_short_side = ( + short_side / tf.math.minimum(image_size[0], image_size[1])) + scale_using_long_side = ( + long_side / tf.math.maximum(image_size[0], image_size[1])) + + scaled_size = tf.math.round(image_size * scale_using_short_side) + scaled_size = tf.where( + tf.math.greater( + tf.math.maximum(scaled_size[0], scaled_size[1]), long_side), + tf.math.round(image_size * scale_using_long_side), scaled_size) + desired_size = scaled_size + + random_jittering = (aug_scale_min != 1.0 or aug_scale_max != 1.0) + + if random_jittering: + random_scale = tf.random.uniform([], + aug_scale_min, + aug_scale_max, + seed=seed) + scaled_size = tf.math.round(random_scale * scaled_size) + + # Computes 2D image_scale. + image_scale = scaled_size / image_size + + # Selects non-zero random offset (x, y) if scaled image is larger than + # desired_size. + if random_jittering: + max_offset = scaled_size - desired_size + max_offset = tf.where( + tf.math.less(max_offset, 0), tf.zeros_like(max_offset), max_offset) + offset = max_offset * tf.random.uniform([ + 2, + ], 0, 1, seed=seed) + offset = tf.cast(offset, tf.int32) + else: + offset = tf.zeros((2,), tf.int32) + + scaled_image = tf.image.resize( + image, tf.cast(scaled_size, tf.int32), method=method) + + if random_jittering: + scaled_image = scaled_image[ + offset[0]:offset[0] + desired_size[0], + offset[1]:offset[1] + desired_size[1], :] + + output_image = tf.image.pad_to_bounding_box( + scaled_image, 0, 0, padded_size[0], padded_size[1]) + + image_info = tf.stack([ + image_size, + tf.cast(desired_size, dtype=tf.float32), + image_scale, + tf.cast(offset, tf.float32)]) + return output_image, image_info + + +def resize_and_crop_boxes(boxes, + image_scale, + output_size, + offset): + """Resizes boxes to output size with scale and offset. + + Args: + boxes: `Tensor` of shape [N, 4] representing ground truth boxes. + image_scale: 2D float `Tensor` representing scale factors that apply to + [height, width] of input image. + output_size: 2D `Tensor` or `int` representing [height, width] of target + output image size. + offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled + boxes. + + Returns: + boxes: `Tensor` of shape [N, 4] representing the scaled boxes. + """ + # Adjusts box coordinates based on image_scale and offset. + boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2]) + boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2]) + # Clips the boxes. + boxes = box_utils.clip_boxes(boxes, output_size) + return boxes + + +def resize_and_crop_masks(masks, + image_scale, + output_size, + offset): + """Resizes boxes to output size with scale and offset. + + Args: + masks: `Tensor` of shape [N, H, W, 1] representing ground truth masks. + image_scale: 2D float `Tensor` representing scale factors that apply to + [height, width] of input image. + output_size: 2D `Tensor` or `int` representing [height, width] of target + output image size. + offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled + boxes. + + Returns: + masks: `Tensor` of shape [N, H, W, 1] representing the scaled masks. + """ + mask_size = tf.shape(input=masks)[1:3] + scaled_size = tf.cast(image_scale * tf.cast(mask_size, image_scale.dtype), + tf.int32) + scaled_masks = tf.image.resize( + masks, scaled_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) + offset = tf.cast(offset, tf.int32) + scaled_masks = scaled_masks[:, offset[0]:offset[0] + output_size[0], + offset[1]:offset[1] + output_size[1], :] + + output_masks = tf.image.pad_to_bounding_box(scaled_masks, 0, 0, + output_size[0], output_size[1]) + return output_masks + + +def random_horizontal_flip(image, boxes=None, masks=None): + """Randomly flips input image and bounding boxes.""" + return preprocessor.random_horizontal_flip(image, boxes, masks) diff --git a/models/official/vision/detection/utils/mask_utils.py b/models/official/vision/detection/utils/mask_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..637d0484f4b48213c4b323be6e0c88f9fa19ebcc --- /dev/null +++ b/models/official/vision/detection/utils/mask_utils.py @@ -0,0 +1,192 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for segmentations.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import numpy as np +import cv2 + + +def paste_instance_masks(masks, + detected_boxes, + image_height, + image_width): + """Paste instance masks to generate the image segmentation results. + + Args: + masks: a numpy array of shape [N, mask_height, mask_width] representing the + instance masks w.r.t. the `detected_boxes`. + detected_boxes: a numpy array of shape [N, 4] representing the reference + bounding boxes. + image_height: an integer representing the height of the image. + image_width: an integer representing the width of the image. + + Returns: + segms: a numpy array of shape [N, image_height, image_width] representing + the instance masks *pasted* on the image canvas. + """ + + def expand_boxes(boxes, scale): + """Expands an array of boxes by a given scale.""" + # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227 # pylint: disable=line-too-long + # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form, + # whereas `boxes` here is in [x1, y1, w, h] form + w_half = boxes[:, 2] * .5 + h_half = boxes[:, 3] * .5 + x_c = boxes[:, 0] + w_half + y_c = boxes[:, 1] + h_half + + w_half *= scale + h_half *= scale + + boxes_exp = np.zeros(boxes.shape) + boxes_exp[:, 0] = x_c - w_half + boxes_exp[:, 2] = x_c + w_half + boxes_exp[:, 1] = y_c - h_half + boxes_exp[:, 3] = y_c + h_half + + return boxes_exp + + # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812 # pylint: disable=line-too-long + # To work around an issue with cv2.resize (it seems to automatically pad + # with repeated border values), we manually zero-pad the masks by 1 pixel + # prior to resizing back to the original image resolution. This prevents + # "top hat" artifacts. We therefore need to expand the reference boxes by an + # appropriate factor. + _, mask_height, mask_width = masks.shape + scale = max((mask_width + 2.0) / mask_width, + (mask_height + 2.0) / mask_height) + + ref_boxes = expand_boxes(detected_boxes, scale) + ref_boxes = ref_boxes.astype(np.int32) + padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32) + segms = [] + for mask_ind, mask in enumerate(masks): + im_mask = np.zeros((image_height, image_width), dtype=np.uint8) + # Process mask inside bounding boxes. + padded_mask[1:-1, 1:-1] = mask[:, :] + + ref_box = ref_boxes[mask_ind, :] + w = ref_box[2] - ref_box[0] + 1 + h = ref_box[3] - ref_box[1] + 1 + w = np.maximum(w, 1) + h = np.maximum(h, 1) + + mask = cv2.resize(padded_mask, (w, h)) + mask = np.array(mask > 0.5, dtype=np.uint8) + + x_0 = min(max(ref_box[0], 0), image_width) + x_1 = min(max(ref_box[2] + 1, 0), image_width) + y_0 = min(max(ref_box[1], 0), image_height) + y_1 = min(max(ref_box[3] + 1, 0), image_height) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - ref_box[1]):(y_1 - ref_box[1]), + (x_0 - ref_box[0]):(x_1 - ref_box[0]) + ] + segms.append(im_mask) + + segms = np.array(segms) + assert masks.shape[0] == segms.shape[0] + return segms + + +def paste_instance_masks_v2(masks, + detected_boxes, + image_height, + image_width): + """Paste instance masks to generate the image segmentation (v2). + + Args: + masks: a numpy array of shape [N, mask_height, mask_width] representing the + instance masks w.r.t. the `detected_boxes`. + detected_boxes: a numpy array of shape [N, 4] representing the reference + bounding boxes. + image_height: an integer representing the height of the image. + image_width: an integer representing the width of the image. + + Returns: + segms: a numpy array of shape [N, image_height, image_width] representing + the instance masks *pasted* on the image canvas. + """ + _, mask_height, mask_width = masks.shape + + segms = [] + for i, mask in enumerate(masks): + box = detected_boxes[i, :] + xmin = box[0] + ymin = box[1] + xmax = xmin + box[2] + ymax = ymin + box[3] + + # Sample points of the cropped mask w.r.t. the image grid. + # Note that these coordinates may fall beyond the image. + # Pixel clipping will happen after warping. + xmin_int = int(math.floor(xmin)) + xmax_int = int(math.ceil(xmax)) + ymin_int = int(math.floor(ymin)) + ymax_int = int(math.ceil(ymax)) + + alpha = box[2] / (1.0 * mask_width) + beta = box[3] / (1.0 * mask_height) + # pylint: disable=invalid-name + # Transformation from mask pixel indices to image coordinate. + M_mask_to_image = np.array( + [[alpha, 0, xmin], + [0, beta, ymin], + [0, 0, 1]], + dtype=np.float32) + # Transformation from image to cropped mask coordinate. + M_image_to_crop = np.array( + [[1, 0, -xmin_int], + [0, 1, -ymin_int], + [0, 0, 1]], + dtype=np.float32) + M = np.dot(M_image_to_crop, M_mask_to_image) + # Compensate the half pixel offset that OpenCV has in the + # warpPerspective implementation: the top-left pixel is sampled + # at (0,0), but we want it to be at (0.5, 0.5). + M = np.dot( + np.dot( + np.array([[1, 0, -0.5], + [0, 1, -0.5], + [0, 0, 1]], np.float32), + M), + np.array([[1, 0, 0.5], + [0, 1, 0.5], + [0, 0, 1]], np.float32)) + # pylint: enable=invalid-name + cropped_mask = cv2.warpPerspective( + mask.astype(np.float32), M, + (xmax_int - xmin_int, ymax_int - ymin_int)) + cropped_mask = np.array(cropped_mask > 0.5, dtype=np.uint8) + + img_mask = np.zeros((image_height, image_width)) + x0 = max(min(xmin_int, image_width), 0) + x1 = max(min(xmax_int, image_width), 0) + y0 = max(min(ymin_int, image_height), 0) + y1 = max(min(ymax_int, image_height), 0) + img_mask[y0:y1, x0:x1] = cropped_mask[ + (y0 - ymin_int):(y1 - ymin_int), + (x0 - xmin_int):(x1 - xmin_int)] + + segms.append(img_mask) + + segms = np.array(segms) + return segms + diff --git a/models/official/vision/detection/utils/object_detection/__init__.py b/models/official/vision/detection/utils/object_detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..85c94f4b6bd7567796755895505a320405a40777 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/detection/utils/object_detection/argmax_matcher.py b/models/official/vision/detection/utils/object_detection/argmax_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8b051bfb08a72846482c0da9c79d1b98418c38 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/argmax_matcher.py @@ -0,0 +1,201 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Argmax matcher implementation. + +This class takes a similarity matrix and matches columns to rows based on the +maximum value per column. One can specify matched_thresholds and +to prevent columns from matching to rows (generally resulting in a negative +training example) and unmatched_theshold to ignore the match (generally +resulting in neither a positive or negative training example). + +This matcher is used in Fast(er)-RCNN. + +Note: matchers are used in TargetAssigners. There is a create_target_assigner +factory function for popular implementations. +""" +import tensorflow as tf + +from official.vision.detection.utils.object_detection import matcher +from official.vision.detection.utils.object_detection import shape_utils + + +class ArgMaxMatcher(matcher.Matcher): + """Matcher based on highest value. + + This class computes matches from a similarity matrix. Each column is matched + to a single row. + + To support object detection target assignment this class enables setting both + matched_threshold (upper threshold) and unmatched_threshold (lower thresholds) + defining three categories of similarity which define whether examples are + positive, negative, or ignored: + (1) similarity >= matched_threshold: Highest similarity. Matched/Positive! + (2) matched_threshold > similarity >= unmatched_threshold: Medium similarity. + Depending on negatives_lower_than_unmatched, this is either + Unmatched/Negative OR Ignore. + (3) unmatched_threshold > similarity: Lowest similarity. Depending on flag + negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore. + For ignored matches this class sets the values in the Match object to -2. + """ + + def __init__(self, + matched_threshold, + unmatched_threshold=None, + negatives_lower_than_unmatched=True, + force_match_for_each_row=False): + """Construct ArgMaxMatcher. + + Args: + matched_threshold: Threshold for positive matches. Positive if + sim >= matched_threshold, where sim is the maximum value of the + similarity matrix for a given column. Set to None for no threshold. + unmatched_threshold: Threshold for negative matches. Negative if + sim < unmatched_threshold. Defaults to matched_threshold + when set to None. + negatives_lower_than_unmatched: Boolean which defaults to True. If True + then negative matches are the ones below the unmatched_threshold, + whereas ignored matches are in between the matched and umatched + threshold. If False, then negative matches are in between the matched + and unmatched threshold, and everything lower than unmatched is ignored. + force_match_for_each_row: If True, ensures that each row is matched to + at least one column (which is not guaranteed otherwise if the + matched_threshold is high). Defaults to False. See + argmax_matcher_test.testMatcherForceMatch() for an example. + + Raises: + ValueError: if unmatched_threshold is set but matched_threshold is not set + or if unmatched_threshold > matched_threshold. + """ + if (matched_threshold is None) and (unmatched_threshold is not None): + raise ValueError('Need to also define matched_threshold when' + 'unmatched_threshold is defined') + self._matched_threshold = matched_threshold + if unmatched_threshold is None: + self._unmatched_threshold = matched_threshold + else: + if unmatched_threshold > matched_threshold: + raise ValueError('unmatched_threshold needs to be smaller or equal' + 'to matched_threshold') + self._unmatched_threshold = unmatched_threshold + if not negatives_lower_than_unmatched: + if self._unmatched_threshold == self._matched_threshold: + raise ValueError('When negatives are in between matched and ' + 'unmatched thresholds, these cannot be of equal ' + 'value. matched: %s, unmatched: %s', + self._matched_threshold, self._unmatched_threshold) + self._force_match_for_each_row = force_match_for_each_row + self._negatives_lower_than_unmatched = negatives_lower_than_unmatched + + def _match(self, similarity_matrix): + """Tries to match each column of the similarity matrix to a row. + + Args: + similarity_matrix: tensor of shape [N, M] representing any similarity + metric. + + Returns: + Match object with corresponding matches for each of M columns. + """ + + def _match_when_rows_are_empty(): + """Performs matching when the rows of similarity matrix are empty. + + When the rows are empty, all detections are false positives. So we return + a tensor of -1's to indicate that the columns do not match to any rows. + + Returns: + matches: int32 tensor indicating the row each column matches to. + """ + similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( + similarity_matrix) + return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32) + + def _match_when_rows_are_non_empty(): + """Performs matching when the rows of similarity matrix are non empty. + + Returns: + matches: int32 tensor indicating the row each column matches to. + """ + # Matches for each column + matches = tf.argmax(input=similarity_matrix, axis=0, output_type=tf.int32) + + # Deal with matched and unmatched threshold + if self._matched_threshold is not None: + # Get logical indices of ignored and unmatched columns as tf.int64 + matched_vals = tf.reduce_max(input_tensor=similarity_matrix, axis=0) + below_unmatched_threshold = tf.greater(self._unmatched_threshold, + matched_vals) + between_thresholds = tf.logical_and( + tf.greater_equal(matched_vals, self._unmatched_threshold), + tf.greater(self._matched_threshold, matched_vals)) + + if self._negatives_lower_than_unmatched: + matches = self._set_values_using_indicator(matches, + below_unmatched_threshold, + -1) + matches = self._set_values_using_indicator(matches, + between_thresholds, + -2) + else: + matches = self._set_values_using_indicator(matches, + below_unmatched_threshold, + -2) + matches = self._set_values_using_indicator(matches, + between_thresholds, + -1) + + if self._force_match_for_each_row: + similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( + similarity_matrix) + force_match_column_ids = tf.argmax( + input=similarity_matrix, axis=1, output_type=tf.int32) + force_match_column_indicators = tf.one_hot( + force_match_column_ids, depth=similarity_matrix_shape[1]) + force_match_row_ids = tf.argmax( + input=force_match_column_indicators, axis=0, output_type=tf.int32) + force_match_column_mask = tf.cast( + tf.reduce_max(input_tensor=force_match_column_indicators, axis=0), + tf.bool) + final_matches = tf.where(force_match_column_mask, force_match_row_ids, + matches) + return final_matches + else: + return matches + + if similarity_matrix.shape.is_fully_defined(): + if similarity_matrix.shape.dims[0].value == 0: + return _match_when_rows_are_empty() + else: + return _match_when_rows_are_non_empty() + else: + return tf.cond( + pred=tf.greater(tf.shape(input=similarity_matrix)[0], 0), + true_fn=_match_when_rows_are_non_empty, + false_fn=_match_when_rows_are_empty) + + def _set_values_using_indicator(self, x, indicator, val): + """Set the indicated fields of x to val. + + Args: + x: tensor. + indicator: boolean with same shape as x. + val: scalar with value to set. + + Returns: + modified tensor. + """ + indicator = tf.cast(indicator, x.dtype) + return tf.add(tf.multiply(x, 1 - indicator), val * indicator) diff --git a/models/official/vision/detection/utils/object_detection/balanced_positive_negative_sampler.py b/models/official/vision/detection/utils/object_detection/balanced_positive_negative_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..f969182b05a29167649d5c022a667b3f768f0143 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/balanced_positive_negative_sampler.py @@ -0,0 +1,274 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class to subsample minibatches by balancing positives and negatives. + +Subsamples minibatches based on a pre-specified positive fraction in range +[0,1]. The class presumes there are many more negatives than positive examples: +if the desired batch_size cannot be achieved with the pre-specified positive +fraction, it fills the rest with negative examples. If this is not sufficient +for obtaining the desired batch_size, it returns fewer examples. + +The main function to call is Subsample(self, indicator, labels). For convenience +one can also call SubsampleWeights(self, weights, labels) which is defined in +the minibatch_sampler base class. + +When is_static is True, it implements a method that guarantees static shapes. +It also ensures the length of output of the subsample is always batch_size, even +when number of examples set to True in indicator is less than batch_size. + +This is originally implemented in TensorFlow Object Detection API. +""" + +import tensorflow as tf + +from official.vision.detection.utils.object_detection import minibatch_sampler +from official.vision.detection.utils.object_detection import ops + + +class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): + """Subsamples minibatches to a desired balance of positives and negatives.""" + + def __init__(self, positive_fraction=0.5, is_static=False): + """Constructs a minibatch sampler. + + Args: + positive_fraction: desired fraction of positive examples (scalar in [0,1]) + in the batch. + is_static: If True, uses an implementation with static shape guarantees. + + Raises: + ValueError: if positive_fraction < 0, or positive_fraction > 1 + """ + if positive_fraction < 0 or positive_fraction > 1: + raise ValueError('positive_fraction should be in range [0,1]. ' + 'Received: %s.' % positive_fraction) + self._positive_fraction = positive_fraction + self._is_static = is_static + + def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): + """Counts the number of positives and negatives numbers to be sampled. + + Args: + sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains + the signed indices of the examples where the sign is based on the label + value. The examples that cannot be sampled are set to 0. It samples + atmost sample_size*positive_fraction positive examples and remaining + from negative examples. + sample_size: Size of subsamples. + + Returns: + A tuple containing the number of positive and negative labels in the + subsample. + """ + input_length = tf.shape(input=sorted_indices_tensor)[0] + valid_positive_index = tf.greater(sorted_indices_tensor, + tf.zeros(input_length, tf.int32)) + num_sampled_pos = tf.reduce_sum( + input_tensor=tf.cast(valid_positive_index, tf.int32)) + max_num_positive_samples = tf.constant( + int(sample_size * self._positive_fraction), tf.int32) + num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) + num_negative_samples = tf.constant(sample_size, + tf.int32) - num_positive_samples + + return num_positive_samples, num_negative_samples + + def _get_values_from_start_and_end(self, input_tensor, num_start_samples, + num_end_samples, total_num_samples): + """slices num_start_samples and last num_end_samples from input_tensor. + + Args: + input_tensor: An int32 tensor of shape [N] to be sliced. + num_start_samples: Number of examples to be sliced from the beginning + of the input tensor. + num_end_samples: Number of examples to be sliced from the end of the + input tensor. + total_num_samples: Sum of is num_start_samples and num_end_samples. This + should be a scalar. + + Returns: + A tensor containing the first num_start_samples and last num_end_samples + from input_tensor. + + """ + input_length = tf.shape(input=input_tensor)[0] + start_positions = tf.less(tf.range(input_length), num_start_samples) + end_positions = tf.greater_equal( + tf.range(input_length), input_length - num_end_samples) + selected_positions = tf.logical_or(start_positions, end_positions) + selected_positions = tf.cast(selected_positions, tf.float32) + indexed_positions = tf.multiply(tf.cumsum(selected_positions), + selected_positions) + one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, + total_num_samples, + dtype=tf.float32) + return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), + one_hot_selector, axes=[0, 0]), tf.int32) + + def _static_subsample(self, indicator, batch_size, labels): + """Returns subsampled minibatch. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + N should be a complie time constant. + batch_size: desired batch size. This scalar cannot be None. + labels: boolean tensor of shape [N] denoting positive(=True) and negative + (=False) examples. N should be a complie time constant. + + Returns: + sampled_idx_indicator: boolean tensor of shape [N], True for entries which + are sampled. It ensures the length of output of the subsample is always + batch_size, even when number of examples set to True in indicator is + less than batch_size. + + Raises: + ValueError: if labels and indicator are not 1D boolean tensors. + """ + # Check if indicator and labels have a static size. + if not indicator.shape.is_fully_defined(): + raise ValueError('indicator must be static in shape when is_static is' + 'True') + if not labels.shape.is_fully_defined(): + raise ValueError('labels must be static in shape when is_static is' + 'True') + if not isinstance(batch_size, int): + raise ValueError('batch_size has to be an integer when is_static is' + 'True.') + + input_length = tf.shape(input=indicator)[0] + + # Set the number of examples set True in indicator to be at least + # batch_size. + num_true_sampled = tf.reduce_sum( + input_tensor=tf.cast(indicator, tf.float32)) + additional_false_sample = tf.less_equal( + tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), + batch_size - num_true_sampled) + indicator = tf.logical_or(indicator, additional_false_sample) + + # Shuffle indicator and label. Need to store the permutation to restore the + # order post sampling. + permutation = tf.random.shuffle(tf.range(input_length)) + indicator = ops.matmul_gather_on_zeroth_axis( + tf.cast(indicator, tf.float32), permutation) + labels = ops.matmul_gather_on_zeroth_axis( + tf.cast(labels, tf.float32), permutation) + + # index (starting from 1) when indicator is True, 0 when False + indicator_idx = tf.where( + tf.cast(indicator, tf.bool), tf.range(1, input_length + 1), + tf.zeros(input_length, tf.int32)) + + # Replace -1 for negative, +1 for positive labels + signed_label = tf.where( + tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32), + tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) + # negative of index for negative label, positive index for positive label, + # 0 when indicator is False. + signed_indicator_idx = tf.multiply(indicator_idx, signed_label) + sorted_signed_indicator_idx = tf.nn.top_k( + signed_indicator_idx, input_length, sorted=True).values + + [num_positive_samples, + num_negative_samples] = self._get_num_pos_neg_samples( + sorted_signed_indicator_idx, batch_size) + + sampled_idx = self._get_values_from_start_and_end( + sorted_signed_indicator_idx, num_positive_samples, + num_negative_samples, batch_size) + + # Shift the indices to start from 0 and remove any samples that are set as + # False. + sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) + sampled_idx = tf.multiply( + tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), + sampled_idx) + + sampled_idx_indicator = tf.cast( + tf.reduce_sum( + input_tensor=tf.one_hot(sampled_idx, depth=input_length), axis=0), + tf.bool) + + # project back the order based on stored permutations + reprojections = tf.one_hot(permutation, depth=input_length, + dtype=tf.float32) + return tf.cast(tf.tensordot( + tf.cast(sampled_idx_indicator, tf.float32), + reprojections, axes=[0, 0]), tf.bool) + + def subsample(self, indicator, batch_size, labels, scope=None): + """Returns subsampled minibatch. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + batch_size: desired batch size. If None, keeps all positive samples and + randomly selects negative samples so that the positive sample fraction + matches self._positive_fraction. It cannot be None is is_static is True. + labels: boolean tensor of shape [N] denoting positive(=True) and negative + (=False) examples. + scope: name scope. + + Returns: + sampled_idx_indicator: boolean tensor of shape [N], True for entries which + are sampled. + + Raises: + ValueError: if labels and indicator are not 1D boolean tensors. + """ + if len(indicator.get_shape().as_list()) != 1: + raise ValueError('indicator must be 1 dimensional, got a tensor of ' + 'shape %s' % indicator.get_shape()) + if len(labels.get_shape().as_list()) != 1: + raise ValueError('labels must be 1 dimensional, got a tensor of ' + 'shape %s' % labels.get_shape()) + if labels.dtype != tf.bool: + raise ValueError('labels should be of type bool. Received: %s' % + labels.dtype) + if indicator.dtype != tf.bool: + raise ValueError('indicator should be of type bool. Received: %s' % + indicator.dtype) + scope = scope or 'BalancedPositiveNegativeSampler' + with tf.name_scope(scope): + if self._is_static: + return self._static_subsample(indicator, batch_size, labels) + + else: + # Only sample from indicated samples + negative_idx = tf.logical_not(labels) + positive_idx = tf.logical_and(labels, indicator) + negative_idx = tf.logical_and(negative_idx, indicator) + + # Sample positive and negative samples separately + if batch_size is None: + max_num_pos = tf.reduce_sum( + input_tensor=tf.cast(positive_idx, dtype=tf.int32)) + else: + max_num_pos = int(self._positive_fraction * batch_size) + sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) + num_sampled_pos = tf.reduce_sum( + input_tensor=tf.cast(sampled_pos_idx, tf.int32)) + if batch_size is None: + negative_positive_ratio = ( + 1 - self._positive_fraction) / self._positive_fraction + max_num_neg = tf.cast( + negative_positive_ratio * + tf.cast(num_sampled_pos, dtype=tf.float32), + dtype=tf.int32) + else: + max_num_neg = batch_size - num_sampled_pos + sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) + + return tf.logical_or(sampled_pos_idx, sampled_neg_idx) diff --git a/models/official/vision/detection/utils/object_detection/box_coder.py b/models/official/vision/detection/utils/object_detection/box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..f20ac956dfbce1fa69d1b9e6f5b023b704e1ec8a --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/box_coder.py @@ -0,0 +1,151 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base box coder. + +Box coders convert between coordinate frames, namely image-centric +(with (0,0) on the top left of image) and anchor-centric (with (0,0) being +defined by a specific anchor). + +Users of a BoxCoder can call two methods: + encode: which encodes a box with respect to a given anchor + (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and + decode: which inverts this encoding with a decode operation. +In both cases, the arguments are assumed to be in 1-1 correspondence already; +it is not the job of a BoxCoder to perform matching. +""" +from abc import ABCMeta +from abc import abstractmethod +from abc import abstractproperty + +import tensorflow as tf + + +# Box coder types. +FASTER_RCNN = 'faster_rcnn' +KEYPOINT = 'keypoint' +MEAN_STDDEV = 'mean_stddev' +SQUARE = 'square' + + +class BoxCoder(object): + """Abstract base class for box coder.""" + __metaclass__ = ABCMeta + + @abstractproperty + def code_size(self): + """Return the size of each code. + + This number is a constant and should agree with the output of the `encode` + op (e.g. if rel_codes is the output of self.encode(...), then it should have + shape [N, code_size()]). This abstractproperty should be overridden by + implementations. + + Returns: + an integer constant + """ + pass + + def encode(self, boxes, anchors): + """Encode a box list relative to an anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded + anchors: BoxList of N anchors + + Returns: + a tensor representing N relative-encoded boxes + """ + with tf.name_scope('Encode'): + return self._encode(boxes, anchors) + + def decode(self, rel_codes, anchors): + """Decode boxes that are encoded relative to an anchor collection. + + Args: + rel_codes: a tensor representing N relative-encoded boxes + anchors: BoxList of anchors + + Returns: + boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., + with corners y_min, x_min, y_max, x_max) + """ + with tf.name_scope('Decode'): + return self._decode(rel_codes, anchors) + + @abstractmethod + def _encode(self, boxes, anchors): + """Method to be overriden by implementations. + + Args: + boxes: BoxList holding N boxes to be encoded + anchors: BoxList of N anchors + + Returns: + a tensor representing N relative-encoded boxes + """ + pass + + @abstractmethod + def _decode(self, rel_codes, anchors): + """Method to be overriden by implementations. + + Args: + rel_codes: a tensor representing N relative-encoded boxes + anchors: BoxList of anchors + + Returns: + boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., + with corners y_min, x_min, y_max, x_max) + """ + pass + + +def batch_decode(encoded_boxes, box_coder, anchors): + """Decode a batch of encoded boxes. + + This op takes a batch of encoded bounding boxes and transforms + them to a batch of bounding boxes specified by their corners in + the order of [y_min, x_min, y_max, x_max]. + + Args: + encoded_boxes: a float32 tensor of shape [batch_size, num_anchors, + code_size] representing the location of the objects. + box_coder: a BoxCoder object. + anchors: a BoxList of anchors used to encode `encoded_boxes`. + + Returns: + decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, + coder_size] representing the corners of the objects in the order + of [y_min, x_min, y_max, x_max]. + + Raises: + ValueError: if batch sizes of the inputs are inconsistent, or if + the number of anchors inferred from encoded_boxes and anchors are + inconsistent. + """ + encoded_boxes.get_shape().assert_has_rank(3) + if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static(): + raise ValueError('The number of anchors inferred from encoded_boxes' + ' and anchors are inconsistent: shape[1] of encoded_boxes' + ' %s should be equal to the number of anchors: %s.' % + (encoded_boxes.get_shape()[1].value, + anchors.num_boxes_static())) + + decoded_boxes = tf.stack([ + box_coder.decode(boxes, anchors).get() + for boxes in tf.unstack(encoded_boxes) + ]) + return decoded_boxes diff --git a/models/official/vision/detection/utils/object_detection/box_list.py b/models/official/vision/detection/utils/object_detection/box_list.py new file mode 100644 index 0000000000000000000000000000000000000000..113fab8c197194f1cd0099d5a177cd9f1fb6e64c --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/box_list.py @@ -0,0 +1,211 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List definition. + +BoxList represents a list of bounding boxes as tensorflow +tensors, where each bounding box is represented as a row of 4 numbers, +[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes +within a given list correspond to a single image. See also +box_list_ops.py for common box related operations (such as area, iou, etc). + +Optionally, users can add additional related fields (such as weights). +We assume the following things to be true about fields: +* they correspond to boxes in the box_list along the 0th dimension +* they have inferrable rank at graph construction time +* all dimensions except for possibly the 0th can be inferred + (i.e., not None) at graph construction time. + +Some other notes: + * Following tensorflow conventions, we use height, width ordering, + and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering + * Tensors are always provided as (flat) [N, 4] tensors. +""" + +import tensorflow as tf + + +class BoxList(object): + """Box collection.""" + + def __init__(self, boxes): + """Constructs box collection. + + Args: + boxes: a tensor of shape [N, 4] representing box corners + + Raises: + ValueError: if invalid dimensions for bbox data or if bbox data is not in + float32 format. + """ + if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: + raise ValueError('Invalid dimensions for box data.') + if boxes.dtype != tf.float32: + raise ValueError('Invalid tensor type: should be tf.float32') + self.data = {'boxes': boxes} + + def num_boxes(self): + """Returns number of boxes held in collection. + + Returns: + a tensor representing the number of boxes held in the collection. + """ + return tf.shape(input=self.data['boxes'])[0] + + def num_boxes_static(self): + """Returns number of boxes held in collection. + + This number is inferred at graph construction time rather than run-time. + + Returns: + Number of boxes held in collection (integer) or None if this is not + inferrable at graph construction time. + """ + return self.data['boxes'].get_shape().dims[0].value + + def get_all_fields(self): + """Returns all fields.""" + return self.data.keys() + + def get_extra_fields(self): + """Returns all non-box fields (i.e., everything not named 'boxes').""" + return [k for k in self.data.keys() if k != 'boxes'] + + def add_field(self, field, field_data): + """Add field to box list. + + This method can be used to add related box data such as + weights/labels, etc. + + Args: + field: a string key to access the data via `get` + field_data: a tensor containing the data to store in the BoxList + """ + self.data[field] = field_data + + def has_field(self, field): + return field in self.data + + def get(self): + """Convenience function for accessing box coordinates. + + Returns: + a tensor with shape [N, 4] representing box coordinates. + """ + return self.get_field('boxes') + + def set(self, boxes): + """Convenience function for setting box coordinates. + + Args: + boxes: a tensor of shape [N, 4] representing box corners + + Raises: + ValueError: if invalid dimensions for bbox data + """ + if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: + raise ValueError('Invalid dimensions for box data.') + self.data['boxes'] = boxes + + def get_field(self, field): + """Accesses a box collection and associated fields. + + This function returns specified field with object; if no field is specified, + it returns the box coordinates. + + Args: + field: this optional string parameter can be used to specify + a related field to be accessed. + + Returns: + a tensor representing the box collection or an associated field. + + Raises: + ValueError: if invalid field + """ + if not self.has_field(field): + raise ValueError('field ' + str(field) + ' does not exist') + return self.data[field] + + def set_field(self, field, value): + """Sets the value of a field. + + Updates the field of a box_list with a given value. + + Args: + field: (string) name of the field to set value. + value: the value to assign to the field. + + Raises: + ValueError: if the box_list does not have specified field. + """ + if not self.has_field(field): + raise ValueError('field %s does not exist' % field) + self.data[field] = value + + def get_center_coordinates_and_sizes(self, scope=None): + """Computes the center coordinates, height and width of the boxes. + + Args: + scope: name scope of the function. + + Returns: + a list of 4 1-D tensors [ycenter, xcenter, height, width]. + """ + if not scope: + scope = 'get_center_coordinates_and_sizes' + with tf.name_scope(scope): + box_corners = self.get() + ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(a=box_corners)) + width = xmax - xmin + height = ymax - ymin + ycenter = ymin + height / 2. + xcenter = xmin + width / 2. + return [ycenter, xcenter, height, width] + + def transpose_coordinates(self, scope=None): + """Transpose the coordinate representation in a boxlist. + + Args: + scope: name scope of the function. + """ + if not scope: + scope = 'transpose_coordinates' + with tf.name_scope(scope): + y_min, x_min, y_max, x_max = tf.split( + value=self.get(), num_or_size_splits=4, axis=1) + self.set(tf.concat([x_min, y_min, x_max, y_max], 1)) + + def as_tensor_dict(self, fields=None): + """Retrieves specified fields as a dictionary of tensors. + + Args: + fields: (optional) list of fields to return in the dictionary. + If None (default), all fields are returned. + + Returns: + tensor_dict: A dictionary of tensors specified by fields. + + Raises: + ValueError: if specified field is not contained in boxlist. + """ + tensor_dict = {} + if fields is None: + fields = self.get_all_fields() + for field in fields: + if not self.has_field(field): + raise ValueError('boxlist must contain all specified fields') + tensor_dict[field] = self.get_field(field) + return tensor_dict diff --git a/models/official/vision/detection/utils/object_detection/box_list_ops.py b/models/official/vision/detection/utils/object_detection/box_list_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1b06e28d588eb05c9ea8596b44d08690481eae --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/box_list_ops.py @@ -0,0 +1,1079 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List operations. + +Example box operations that are supported: + * areas: compute bounding box areas + * iou: pairwise intersection-over-union scores + * sq_dist: pairwise distances between bounding boxes + +Whenever box_list_ops functions output a BoxList, the fields of the incoming +BoxList are retained unless documented otherwise. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow as tf + +from official.vision.detection.utils.object_detection import box_list +from official.vision.detection.utils.object_detection import ops + + +class SortOrder(object): + """Enum class for sort order. + + Attributes: + ascend: ascend order. + descend: descend order. + """ + ascend = 1 + descend = 2 + + +def area(boxlist, scope=None): + """Computes area of boxes. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing box areas. + """ + with tf.name_scope(scope, 'Area'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) + + +def height_width(boxlist, scope=None): + """Computes height and width of boxes in boxlist. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + Height: A tensor with shape [N] representing box heights. + Width: A tensor with shape [N] representing box widths. + """ + with tf.name_scope(scope, 'HeightWidth'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1]) + + +def scale(boxlist, y_scale, x_scale, scope=None): + """scale box coordinates in x and y dimensions. + + Args: + boxlist: BoxList holding N boxes + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + boxlist: BoxList holding N boxes + """ + with tf.name_scope(scope, 'Scale'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + y_min = y_scale * y_min + y_max = y_scale * y_max + x_min = x_scale * x_min + x_max = x_scale * x_max + scaled_boxlist = box_list.BoxList( + tf.concat([y_min, x_min, y_max, x_max], 1)) + return _copy_extra_fields(scaled_boxlist, boxlist) + + +def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None): + """Clip bounding boxes to a window. + + This op clips any input bounding boxes (represented by bounding box + corners) to a window, optionally filtering out boxes that do not + overlap at all with the window. + + Args: + boxlist: BoxList holding M_in boxes + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window to which the op should clip boxes. + filter_nonoverlapping: whether to filter out boxes that do not overlap at + all with the window. + scope: name scope. + + Returns: + a BoxList holding M_out boxes where M_out <= M_in + """ + with tf.name_scope(scope, 'ClipToWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min) + y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min) + x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min) + x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min) + clipped = box_list.BoxList( + tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped], + 1)) + clipped = _copy_extra_fields(clipped, boxlist) + if filter_nonoverlapping: + areas = area(clipped) + nonzero_area_indices = tf.cast( + tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32) + clipped = gather(clipped, nonzero_area_indices) + return clipped + + +def prune_outside_window(boxlist, window, scope=None): + """Prunes bounding boxes that fall outside a given window. + + This function prunes bounding boxes that even partially fall outside the given + window. See also clip_to_window which only prunes bounding boxes that fall + completely outside the window, and clips any bounding boxes that partially + overflow. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] + of the window + scope: name scope. + + Returns: + pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + with tf.name_scope(scope, 'PruneOutsideWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + coordinate_violations = tf.concat([ + tf.less(y_min, win_y_min), tf.less(x_min, win_x_min), + tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max) + ], 1) + valid_indices = tf.reshape( + tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def prune_completely_outside_window(boxlist, window, scope=None): + """Prunes bounding boxes that fall completely outside of the given window. + + The function clip_to_window prunes bounding boxes that fall + completely outside the window, but also clips any bounding boxes that + partially overflow. This function does not clip partially overflowing boxes. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] + of the window + scope: name scope. + + Returns: + pruned_boxlist: a new BoxList with all bounding boxes partially or fully in + the window. + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + coordinate_violations = tf.concat([ + tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), + tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min) + ], 1) + valid_indices = tf.reshape( + tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def intersection(boxlist1, boxlist2, scope=None): + """Compute pairwise intersection areas between boxes. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise intersections + """ + with tf.name_scope(scope, 'Intersection'): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) + all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) + intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) + all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) + all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) + intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) + return intersect_heights * intersect_widths + + +def matched_intersection(boxlist1, boxlist2, scope=None): + """Compute intersection areas between corresponding boxes in two boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing pairwise intersections + """ + with tf.name_scope(scope, 'MatchedIntersection'): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + min_ymax = tf.minimum(y_max1, y_max2) + max_ymin = tf.maximum(y_min1, y_min2) + intersect_heights = tf.maximum(0.0, min_ymax - max_ymin) + min_xmax = tf.minimum(x_max1, x_max2) + max_xmin = tf.maximum(x_min1, x_min2) + intersect_widths = tf.maximum(0.0, min_xmax - max_xmin) + return tf.reshape(intersect_heights * intersect_widths, [-1]) + + +def iou(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise iou scores. + """ + with tf.name_scope(scope, 'IOU'): + intersections = intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = ( + tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) + return tf.where( + tf.equal(intersections, 0.0), + tf.zeros_like(intersections), tf.truediv(intersections, unions)) + + +def matched_iou(boxlist1, boxlist2, scope=None): + """Compute intersection-over-union between corresponding boxes in boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing pairwise iou scores. + """ + with tf.name_scope(scope, 'MatchedIOU'): + intersections = matched_intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = areas1 + areas2 - intersections + return tf.where( + tf.equal(intersections, 0.0), + tf.zeros_like(intersections), tf.truediv(intersections, unions)) + + +def ioa(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-area between box collections. + + intersection-over-area (IOA) between two boxes box1 and box2 is defined as + their intersection area over box2's area. Note that ioa is not symmetric, + that is, ioa(box1, box2) != ioa(box2, box1). + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise ioa scores. + """ + with tf.name_scope(scope, 'IOA'): + intersections = intersection(boxlist1, boxlist2) + areas = tf.expand_dims(area(boxlist2), 0) + return tf.truediv(intersections, areas) + + +def prune_non_overlapping_boxes( + boxlist1, boxlist2, min_overlap=0.0, scope=None): + """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. + + For each box in boxlist1, we want its IOA to be more than minoverlap with + at least one of the boxes in boxlist2. If it does not, we remove it. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + min_overlap: Minimum required overlap between boxes, to count them as + overlapping. + scope: name scope. + + Returns: + new_boxlist1: A pruned boxlist with size [N', 4]. + keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the + first input BoxList `boxlist1`. + """ + with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): + ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor + ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor + keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) + keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1]) + new_boxlist1 = gather(boxlist1, keep_inds) + return new_boxlist1, keep_inds + + +def prune_small_boxes(boxlist, min_side, scope=None): + """Prunes small boxes in the boxlist which have a side smaller than min_side. + + Args: + boxlist: BoxList holding N boxes. + min_side: Minimum width AND height of box to survive pruning. + scope: name scope. + + Returns: + A pruned boxlist. + """ + with tf.name_scope(scope, 'PruneSmallBoxes'): + height, width = height_width(boxlist) + is_valid = tf.logical_and(tf.greater_equal(width, min_side), + tf.greater_equal(height, min_side)) + return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) + + +def change_coordinate_frame(boxlist, window, scope=None): + """Change coordinate frame of the boxlist to be relative to window's frame. + + Given a window of the form [ymin, xmin, ymax, xmax], + changes bounding box coordinates from boxlist to be relative to this window + (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). + + An example use case is data augmentation: where we are given groundtruth + boxes (boxlist) and would like to randomly crop the image to some + window (window). In this case we need to change the coordinate frame of + each groundtruth box to be relative to this new window. + + Args: + boxlist: A BoxList object holding N boxes. + window: A rank 1 tensor [4]. + scope: name scope. + + Returns: + Returns a BoxList object with N boxes. + """ + with tf.name_scope(scope, 'ChangeCoordinateFrame'): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + boxlist_new = scale(box_list.BoxList( + boxlist.get() - [window[0], window[1], window[0], window[1]]), + 1.0 / win_height, 1.0 / win_width) + boxlist_new = _copy_extra_fields(boxlist_new, boxlist) + return boxlist_new + + +def sq_dist(boxlist1, boxlist2, scope=None): + """Computes the pairwise squared distances between box corners. + + This op treats each box as if it were a point in a 4d Euclidean space and + computes pairwise squared distances. + + Mathematically, we are given two matrices of box coordinates X and Y, + where X(i,:) is the i'th row of X, containing the 4 numbers defining the + corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to + boxlist2. We compute + Z(i,j) = ||X(i,:) - Y(j,:)||^2 + = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:), + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise distances + """ + with tf.name_scope(scope, 'SqDist'): + sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True) + sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True) + innerprod = tf.matmul(boxlist1.get(), boxlist2.get(), + transpose_a=False, transpose_b=True) + return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod + + +def boolean_mask(boxlist, indicator, fields=None, scope=None, + use_static_shapes=False, indicator_sum=None): + """Select boxes from BoxList according to indicator and return new BoxList. + + `boolean_mask` returns the subset of boxes that are marked as "True" by the + indicator tensor. By default, `boolean_mask` returns boxes corresponding to + the input index list, as well as all additional fields stored in the boxlist + (indexing into the first dimension). However one can optionally only draw + from a subset of fields. + + Args: + boxlist: BoxList holding N boxes + indicator: a rank-1 boolean tensor + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + scope: name scope. + use_static_shapes: Whether to use an implementation with static shape + gurantees. + indicator_sum: An integer containing the sum of `indicator` vector. Only + required if `use_static_shape` is True. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indicator + Raises: + ValueError: if `indicator` is not a rank-1 boolean tensor. + """ + with tf.name_scope(scope, 'BooleanMask'): + if indicator.shape.ndims != 1: + raise ValueError('indicator should have rank 1') + if indicator.dtype != tf.bool: + raise ValueError('indicator should be a boolean tensor') + if use_static_shapes: + if not (indicator_sum and isinstance(indicator_sum, int)): + raise ValueError('`indicator_sum` must be a of type int') + selected_positions = tf.cast(indicator, dtype=tf.float32) + indexed_positions = tf.cast( + tf.multiply( + tf.cumsum(selected_positions), selected_positions), + dtype=tf.int32) + one_hot_selector = tf.one_hot( + indexed_positions - 1, indicator_sum, dtype=tf.float32) + sampled_indices = tf.cast( + tf.tensordot( + tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32), + one_hot_selector, + axes=[0, 0]), + dtype=tf.int32) + return gather(boxlist, sampled_indices, use_static_shapes=True) + else: + subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator)) + if fields is None: + fields = boxlist.get_extra_fields() + for field in fields: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all specified fields') + subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator) + subboxlist.add_field(field, subfieldlist) + return subboxlist + + +def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False): + """Gather boxes from BoxList according to indices and return new BoxList. + + By default, `gather` returns boxes corresponding to the input index list, as + well as all additional fields stored in the boxlist (indexing into the + first dimension). However one can optionally only gather from a + subset of fields. + + Args: + boxlist: BoxList holding N boxes + indices: a rank-1 tensor of type int32 / int64 + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + scope: name scope. + use_static_shapes: Whether to use an implementation with static shape + gurantees. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indices + Raises: + ValueError: if specified field is not contained in boxlist or if the + indices are not of type int32 + """ + with tf.name_scope(scope, 'Gather'): + if len(indices.shape.as_list()) != 1: + raise ValueError('indices should have rank 1') + if indices.dtype != tf.int32 and indices.dtype != tf.int64: + raise ValueError('indices should be an int32 / int64 tensor') + gather_op = tf.gather + if use_static_shapes: + gather_op = ops.matmul_gather_on_zeroth_axis + subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices)) + if fields is None: + fields = boxlist.get_extra_fields() + fields += ['boxes'] + for field in fields: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all specified fields') + subfieldlist = gather_op(boxlist.get_field(field), indices) + subboxlist.add_field(field, subfieldlist) + return subboxlist + + +def concatenate(boxlists, fields=None, scope=None): + """Concatenate list of BoxLists. + + This op concatenates a list of input BoxLists into a larger BoxList. It also + handles concatenation of BoxList fields as long as the field tensor shapes + are equal except for the first dimension. + + Args: + boxlists: list of BoxList objects + fields: optional list of fields to also concatenate. By default, all + fields from the first BoxList in the list are included in the + concatenation. + scope: name scope. + + Returns: + a BoxList with number of boxes equal to + sum([boxlist.num_boxes() for boxlist in BoxList]) + Raises: + ValueError: if boxlists is invalid (i.e., is not a list, is empty, or + contains non BoxList objects), or if requested fields are not contained in + all boxlists + """ + with tf.name_scope(scope, 'Concatenate'): + if not isinstance(boxlists, list): + raise ValueError('boxlists should be a list') + if not boxlists: + raise ValueError('boxlists should have nonzero length') + for boxlist in boxlists: + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('all elements of boxlists should be BoxList objects') + concatenated = box_list.BoxList( + tf.concat([boxlist.get() for boxlist in boxlists], 0)) + if fields is None: + fields = boxlists[0].get_extra_fields() + for field in fields: + first_field_shape = boxlists[0].get_field(field).get_shape().as_list() + first_field_shape[0] = -1 + if None in first_field_shape: + raise ValueError('field %s must have fully defined shape except for the' + ' 0th dimension.' % field) + for boxlist in boxlists: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all requested fields') + field_shape = boxlist.get_field(field).get_shape().as_list() + field_shape[0] = -1 + if field_shape != first_field_shape: + raise ValueError('field %s must have same shape for all boxlists ' + 'except for the 0th dimension.' % field) + concatenated_field = tf.concat( + [boxlist.get_field(field) for boxlist in boxlists], 0) + concatenated.add_field(field, concatenated_field) + return concatenated + + +def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None): + """Sort boxes and associated fields according to a scalar field. + + A common use case is reordering the boxes according to descending scores. + + Args: + boxlist: BoxList holding N boxes. + field: A BoxList field for sorting and reordering the BoxList. + order: (Optional) descend or ascend. Default is descend. + scope: name scope. + + Returns: + sorted_boxlist: A sorted BoxList with the field in the specified order. + + Raises: + ValueError: if specified field does not exist + ValueError: if the order is not either descend or ascend + """ + with tf.name_scope(scope, 'SortByField'): + if order != SortOrder.descend and order != SortOrder.ascend: + raise ValueError('Invalid sort order') + + field_to_sort = boxlist.get_field(field) + if len(field_to_sort.shape.as_list()) != 1: + raise ValueError('Field should have rank 1') + + num_boxes = boxlist.num_boxes() + num_entries = tf.size(field_to_sort) + length_assert = tf.Assert( + tf.equal(num_boxes, num_entries), + ['Incorrect field size: actual vs expected.', num_entries, num_boxes]) + + with tf.control_dependencies([length_assert]): + _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True) + + if order == SortOrder.ascend: + sorted_indices = tf.reverse_v2(sorted_indices, [0]) + + return gather(boxlist, sorted_indices) + + +def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None): + """Overlay bounding box list on image. + + Currently this visualization plots a 1 pixel thick red bounding box on top + of the image. Note that tf.image.draw_bounding_boxes essentially is + 1 indexed. + + Args: + image: an image tensor with shape [height, width, 3] + boxlist: a BoxList + normalized: (boolean) specify whether corners are to be interpreted + as absolute coordinates in image space or normalized with respect to the + image size. + scope: name scope. + + Returns: + image_and_boxes: an image tensor with shape [height, width, 3] + """ + with tf.name_scope(scope, 'VisualizeBoxesInImage'): + if not normalized: + height, width, _ = tf.unstack(tf.shape(image)) + boxlist = scale(boxlist, + 1.0 / tf.cast(height, tf.float32), + 1.0 / tf.cast(width, tf.float32)) + corners = tf.expand_dims(boxlist.get(), 0) + image = tf.expand_dims(image, 0) + return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0]) + + +def filter_field_value_equals(boxlist, field, value, scope=None): + """Filter to keep only boxes with field entries equal to the given value. + + Args: + boxlist: BoxList holding N boxes. + field: field name for filtering. + value: scalar value. + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not have + the specified field. + """ + with tf.name_scope(scope, 'FilterFieldValueEquals'): + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field(field): + raise ValueError('boxlist must contain the specified field') + filter_field = boxlist.get_field(field) + gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1]) + return gather(boxlist, gather_index) + + +def filter_greater_than(boxlist, thresh, scope=None): + """Filter to keep only boxes with score exceeding a given threshold. + + This op keeps the collection of boxes whose corresponding scores are + greater than the input threshold. + + TODO(jonathanhuang): Change function name to filter_scores_greater_than + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not + have a scores field + """ + with tf.name_scope(scope, 'FilterGreaterThan'): + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + scores = boxlist.get_field('scores') + if len(scores.shape.as_list()) > 2: + raise ValueError('Scores should have rank 1 or 2') + if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1: + raise ValueError('Scores should have rank 1 or have shape ' + 'consistent with [None, 1]') + high_score_indices = tf.cast(tf.reshape( + tf.where(tf.greater(scores, thresh)), + [-1]), tf.int32) + return gather(boxlist, high_score_indices) + + +def non_max_suppression(boxlist, thresh, max_output_size, scope=None): + """Non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. Note that this only works for a single class --- + to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression. + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + max_output_size: maximum number of retained boxes + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= max_output_size + Raises: + ValueError: if thresh is not in [0, 1] + """ + with tf.name_scope(scope, 'NonMaxSuppression'): + if not 0 <= thresh <= 1.0: + raise ValueError('thresh must be between 0 and 1') + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + selected_indices = tf.image.non_max_suppression( + boxlist.get(), boxlist.get_field('scores'), + max_output_size, iou_threshold=thresh) + return gather(boxlist, selected_indices) + + +def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): + """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. + + Args: + boxlist_to_copy_to: BoxList to which extra fields are copied. + boxlist_to_copy_from: BoxList from which fields are copied. + + Returns: + boxlist_to_copy_to with extra fields. + """ + for field in boxlist_to_copy_from.get_extra_fields(): + boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) + return boxlist_to_copy_to + + +def to_normalized_coordinates(boxlist, height, width, + check_range=True, scope=None): + """Converts absolute box coordinates to normalized coordinates in [0, 1]. + + Usually one uses the dynamic shape of the image or conv-layer tensor: + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(images)[1], + tf.shape(images)[2]), + + This function raises an assertion failed error at graph execution time when + the maximum coordinate is smaller than 1.01 (which means that coordinates are + already normalized). The value 1.01 is to deal with small rounding errors. + + Args: + boxlist: BoxList with coordinates in terms of pixel-locations. + height: Maximum value for height of absolute box coordinates. + width: Maximum value for width of absolute box coordinates. + check_range: If True, checks if the coordinates are normalized or not. + scope: name scope. + + Returns: + boxlist with normalized coordinates in [0, 1]. + """ + with tf.name_scope(scope, 'ToNormalizedCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(boxlist.get()) + max_assert = tf.Assert(tf.greater(max_val, 1.01), + ['max value is lower than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(boxlist, 1 / height, 1 / width) + + +def to_absolute_coordinates(boxlist, + height, + width, + check_range=True, + maximum_normalized_coordinate=1.1, + scope=None): + """Converts normalized box coordinates to absolute pixel coordinates. + + This function raises an assertion failed error when the maximum box coordinate + value is larger than maximum_normalized_coordinate (in which case coordinates + are already absolute). + + Args: + boxlist: BoxList with coordinates in range [0, 1]. + height: Maximum value for height of absolute box coordinates. + width: Maximum value for width of absolute box coordinates. + check_range: If True, checks if the coordinates are normalized or not. + maximum_normalized_coordinate: Maximum coordinate value to be considered + as normalized, default to 1.1. + scope: name scope. + + Returns: + boxlist with absolute coordinates in terms of the image size. + + """ + with tf.name_scope(scope, 'ToAbsoluteCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + # Ensure range of input boxes is correct. + if check_range: + box_maximum = tf.reduce_max(boxlist.get()) + max_assert = tf.Assert( + tf.greater_equal(maximum_normalized_coordinate, box_maximum), + ['maximum box coordinate value is larger ' + 'than %f: ' % maximum_normalized_coordinate, box_maximum]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(boxlist, height, width) + + +def refine_boxes_multi_class(pool_boxes, + num_classes, + nms_iou_thresh, + nms_max_detections, + voting_iou_thresh=0.5): + """Refines a pool of boxes using non max suppression and box voting. + + Box refinement is done independently for each class. + + Args: + pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must + have a rank 1 'scores' field and a rank 1 'classes' field. + num_classes: (int scalar) Number of classes. + nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). + nms_max_detections: (int scalar) maximum output size for NMS. + voting_iou_thresh: (float scalar) iou threshold for box voting. + + Returns: + BoxList of refined boxes. + + Raises: + ValueError: if + a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. + b) pool_boxes is not a BoxList. + c) pool_boxes does not have a scores and classes field. + """ + if not 0.0 <= nms_iou_thresh <= 1.0: + raise ValueError('nms_iou_thresh must be between 0 and 1') + if not 0.0 <= voting_iou_thresh <= 1.0: + raise ValueError('voting_iou_thresh must be between 0 and 1') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + if not pool_boxes.has_field('classes'): + raise ValueError('pool_boxes must have a \'classes\' field') + + refined_boxes = [] + for i in range(num_classes): + boxes_class = filter_field_value_equals(pool_boxes, 'classes', i) + refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh, + nms_max_detections, voting_iou_thresh) + refined_boxes.append(refined_boxes_class) + return sort_by_field(concatenate(refined_boxes), 'scores') + + +def refine_boxes(pool_boxes, + nms_iou_thresh, + nms_max_detections, + voting_iou_thresh=0.5): + """Refines a pool of boxes using non max suppression and box voting. + + Args: + pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must + have a rank 1 'scores' field. + nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). + nms_max_detections: (int scalar) maximum output size for NMS. + voting_iou_thresh: (float scalar) iou threshold for box voting. + + Returns: + BoxList of refined boxes. + + Raises: + ValueError: if + a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. + b) pool_boxes is not a BoxList. + c) pool_boxes does not have a scores field. + """ + if not 0.0 <= nms_iou_thresh <= 1.0: + raise ValueError('nms_iou_thresh must be between 0 and 1') + if not 0.0 <= voting_iou_thresh <= 1.0: + raise ValueError('voting_iou_thresh must be between 0 and 1') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + + nms_boxes = non_max_suppression( + pool_boxes, nms_iou_thresh, nms_max_detections) + return box_voting(nms_boxes, pool_boxes, voting_iou_thresh) + + +def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5): + """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015. + + Performs box voting as described in 'Object detection via a multi-region & + semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For + each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes + with iou overlap >= iou_thresh. The location of B is set to the weighted + average location of boxes in S (scores are used for weighting). And the score + of B is set to the average score of boxes in S. + + Args: + selected_boxes: BoxList containing a subset of boxes in pool_boxes. These + boxes are usually selected from pool_boxes using non max suppression. + pool_boxes: BoxList containing a set of (possibly redundant) boxes. + iou_thresh: (float scalar) iou threshold for matching boxes in + selected_boxes and pool_boxes. + + Returns: + BoxList containing averaged locations and scores for each box in + selected_boxes. + + Raises: + ValueError: if + a) selected_boxes or pool_boxes is not a BoxList. + b) if iou_thresh is not in [0, 1]. + c) pool_boxes does not have a scores field. + """ + if not 0.0 <= iou_thresh <= 1.0: + raise ValueError('iou_thresh must be between 0 and 1') + if not isinstance(selected_boxes, box_list.BoxList): + raise ValueError('selected_boxes must be a BoxList') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + + iou_ = iou(selected_boxes, pool_boxes) + match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32) + num_matches = tf.reduce_sum(match_indicator, 1) + # TODO(kbanoop): Handle the case where some boxes in selected_boxes do not + # match to any boxes in pool_boxes. For such boxes without any matches, we + # should return the original boxes without voting. + match_assert = tf.Assert( + tf.reduce_all(tf.greater(num_matches, 0)), + ['Each box in selected_boxes must match with at least one box ' + 'in pool_boxes.']) + + scores = tf.expand_dims(pool_boxes.get_field('scores'), 1) + scores_assert = tf.Assert( + tf.reduce_all(tf.greater_equal(scores, 0)), + ['Scores must be non negative.']) + + with tf.control_dependencies([scores_assert, match_assert]): + sum_scores = tf.matmul(match_indicator, scores) + averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches + + box_locations = tf.matmul(match_indicator, + pool_boxes.get() * scores) / sum_scores + averaged_boxes = box_list.BoxList(box_locations) + _copy_extra_fields(averaged_boxes, selected_boxes) + averaged_boxes.add_field('scores', averaged_scores) + return averaged_boxes + + +def get_minimal_coverage_box(boxlist, + default_box=None, + scope=None): + """Creates a single bounding box which covers all boxes in the boxlist. + + Args: + boxlist: A Boxlist. + default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, + this default box will be returned. If None, will use a default box of + [[0., 0., 1., 1.]]. + scope: Name scope. + + Returns: + A [1, 4] float32 tensor with a bounding box that tightly covers all the + boxes in the box list. If the boxlist does not contain any boxes, the + default box is returned. + """ + with tf.name_scope(scope, 'CreateCoverageBox'): + num_boxes = boxlist.num_boxes() + + def coverage_box(bboxes): + y_min, x_min, y_max, x_max = tf.split( + value=bboxes, num_or_size_splits=4, axis=1) + y_min_coverage = tf.reduce_min(y_min, axis=0) + x_min_coverage = tf.reduce_min(x_min, axis=0) + y_max_coverage = tf.reduce_max(y_max, axis=0) + x_max_coverage = tf.reduce_max(x_max, axis=0) + return tf.stack( + [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage], + axis=1) + + default_box = default_box or tf.constant([[0., 0., 1., 1.]]) + return tf.cond( + tf.greater_equal(num_boxes, 1), + true_fn=lambda: coverage_box(boxlist.get()), + false_fn=lambda: default_box) + + +def sample_boxes_by_jittering(boxlist, + num_boxes_to_sample, + stddev=0.1, + scope=None): + """Samples num_boxes_to_sample boxes by jittering around boxlist boxes. + + It is possible that this function might generate boxes with size 0. The larger + the stddev, this is more probable. For a small stddev of 0.1 this probability + is very small. + + Args: + boxlist: A boxlist containing N boxes in normalized coordinates. + num_boxes_to_sample: A positive integer containing the number of boxes to + sample. + stddev: Standard deviation. This is used to draw random offsets for the + box corners from a normal distribution. The offset is multiplied by the + box size so will be larger in terms of pixels for larger boxes. + scope: Name scope. + + Returns: + sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in + normalized coordinates. + """ + with tf.name_scope(scope, 'SampleBoxesByJittering'): + num_boxes = boxlist.num_boxes() + box_indices = tf.random_uniform( + [num_boxes_to_sample], + minval=0, + maxval=num_boxes, + dtype=tf.int32) + sampled_boxes = tf.gather(boxlist.get(), box_indices) + sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0] + sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1] + rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0] + minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1] + maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2] + maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3] + maxy = tf.maximum(miny, maxy) + maxx = tf.maximum(minx, maxx) + sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1) + sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0) + return box_list.BoxList(sampled_boxes) diff --git a/models/official/vision/detection/utils/object_detection/faster_rcnn_box_coder.py b/models/official/vision/detection/utils/object_detection/faster_rcnn_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..235df4ede474e89687a17413e81e60aa21772e23 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/faster_rcnn_box_coder.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Faster RCNN box coder. + +Faster RCNN box coder follows the coding schema described below: + ty = (y - ya) / ha + tx = (x - xa) / wa + th = log(h / ha) + tw = log(w / wa) + where x, y, w, h denote the box's center coordinates, width and height + respectively. Similarly, xa, ya, wa, ha denote the anchor's center + coordinates, width and height. tx, ty, tw and th denote the anchor-encoded + center, width and height respectively. + + See http://arxiv.org/abs/1506.01497 for details. +""" + +import tensorflow as tf + +from official.vision.detection.utils.object_detection import box_coder +from official.vision.detection.utils.object_detection import box_list + +EPSILON = 1e-8 + + +class FasterRcnnBoxCoder(box_coder.BoxCoder): + """Faster RCNN box coder.""" + + def __init__(self, scale_factors=None): + """Constructor for FasterRcnnBoxCoder. + + Args: + scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. + If set to None, does not perform scaling. For Faster RCNN, + the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. + """ + if scale_factors: + assert len(scale_factors) == 4 + for scalar in scale_factors: + assert scalar > 0 + self._scale_factors = scale_factors + + @property + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + """Encode a box collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, th, tw]. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + # Avoid NaN in division and log below. + ha += EPSILON + wa += EPSILON + h += EPSILON + w += EPSILON + + tx = (xcenter - xcenter_a) / wa + ty = (ycenter - ycenter_a) / ha + tw = tf.math.log(w / wa) + th = tf.math.log(h / ha) + # Scales location targets as used in paper for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + th *= self._scale_factors[2] + tw *= self._scale_factors[3] + return tf.transpose(a=tf.stack([ty, tx, th, tw])) + + def _decode(self, rel_codes, anchors): + """Decode relative codes to boxes. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + + ty, tx, th, tw = tf.unstack(tf.transpose(a=rel_codes)) + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + th /= self._scale_factors[2] + tw /= self._scale_factors[3] + w = tf.exp(tw) * wa + h = tf.exp(th) * ha + ycenter = ty * ha + ycenter_a + xcenter = tx * wa + xcenter_a + ymin = ycenter - h / 2. + xmin = xcenter - w / 2. + ymax = ycenter + h / 2. + xmax = xcenter + w / 2. + return box_list.BoxList(tf.transpose(a=tf.stack([ymin, xmin, ymax, xmax]))) diff --git a/models/official/vision/detection/utils/object_detection/matcher.py b/models/official/vision/detection/utils/object_detection/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..4a025d5e7118ee20f136c8a31b4c183de11f1e7f --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/matcher.py @@ -0,0 +1,243 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Matcher interface and Match class. + +This module defines the Matcher interface and the Match object. The job of the +matcher is to match row and column indices based on the similarity matrix and +other optional parameters. Each column is matched to at most one row. There +are three possibilities for the matching: + +1) match: A column matches a row. +2) no_match: A column does not match any row. +3) ignore: A column that is neither 'match' nor no_match. + +The ignore case is regularly encountered in object detection: when an anchor has +a relatively small overlap with a ground-truth box, one neither wants to +consider this box a positive example (match) nor a negative example (no match). + +The Match class is used to store the match results and it provides simple apis +to query the results. +""" +from abc import ABCMeta +from abc import abstractmethod + +import tensorflow as tf + + +class Match(object): + """Class to store results from the matcher. + + This class is used to store the results from the matcher. It provides + convenient methods to query the matching results. + """ + + def __init__(self, match_results): + """Constructs a Match object. + + Args: + match_results: Integer tensor of shape [N] with (1) match_results[i]>=0, + meaning that column i is matched with row match_results[i]. + (2) match_results[i]=-1, meaning that column i is not matched. + (3) match_results[i]=-2, meaning that column i is ignored. + + Raises: + ValueError: if match_results does not have rank 1 or is not an + integer int32 scalar tensor + """ + if match_results.shape.ndims != 1: + raise ValueError('match_results should have rank 1') + if match_results.dtype != tf.int32: + raise ValueError('match_results should be an int32 or int64 scalar ' + 'tensor') + self._match_results = match_results + + @property + def match_results(self): + """The accessor for match results. + + Returns: + the tensor which encodes the match results. + """ + return self._match_results + + def matched_column_indices(self): + """Returns column indices that match to some row. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1))) + + def matched_column_indicator(self): + """Returns column indices that are matched. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return tf.greater_equal(self._match_results, 0) + + def num_matched_columns(self): + """Returns number (int32 scalar tensor) of matched columns.""" + return tf.size(input=self.matched_column_indices()) + + def unmatched_column_indices(self): + """Returns column indices that do not match any row. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1))) + + def unmatched_column_indicator(self): + """Returns column indices that are unmatched. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return tf.equal(self._match_results, -1) + + def num_unmatched_columns(self): + """Returns number (int32 scalar tensor) of unmatched columns.""" + return tf.size(input=self.unmatched_column_indices()) + + def ignored_column_indices(self): + """Returns column indices that are ignored (neither Matched nor Unmatched). + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(self.ignored_column_indicator())) + + def ignored_column_indicator(self): + """Returns boolean column indicator where True means the colum is ignored. + + Returns: + column_indicator: boolean vector which is True for all ignored column + indices. + """ + return tf.equal(self._match_results, -2) + + def num_ignored_columns(self): + """Returns number (int32 scalar tensor) of matched columns.""" + return tf.size(input=self.ignored_column_indices()) + + def unmatched_or_ignored_column_indices(self): + """Returns column indices that are unmatched or ignored. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results))) + + def matched_row_indices(self): + """Returns row indices that match some column. + + The indices returned by this op are ordered so as to be in correspondence + with the output of matched_column_indicator(). For example if + self.matched_column_indicator() is [0,2], and self.matched_row_indices() is + [7, 3], then we know that column 0 was matched to row 7 and column 2 was + matched to row 3. + + Returns: + row_indices: int32 tensor of shape [K] with row indices. + """ + return self._reshape_and_cast( + tf.gather(self._match_results, self.matched_column_indices())) + + def _reshape_and_cast(self, t): + return tf.cast(tf.reshape(t, [-1]), tf.int32) + + def gather_based_on_match(self, input_tensor, unmatched_value, + ignored_value): + """Gathers elements from `input_tensor` based on match results. + + For columns that are matched to a row, gathered_tensor[col] is set to + input_tensor[match_results[col]]. For columns that are unmatched, + gathered_tensor[col] is set to unmatched_value. Finally, for columns that + are ignored gathered_tensor[col] is set to ignored_value. + + Note that the input_tensor.shape[1:] must match with unmatched_value.shape + and ignored_value.shape + + Args: + input_tensor: Tensor to gather values from. + unmatched_value: Constant tensor value for unmatched columns. + ignored_value: Constant tensor value for ignored columns. + + Returns: + gathered_tensor: A tensor containing values gathered from input_tensor. + The shape of the gathered tensor is [match_results.shape[0]] + + input_tensor.shape[1:]. + """ + input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]), + input_tensor], axis=0) + gather_indices = tf.maximum(self.match_results + 2, 0) + gathered_tensor = tf.gather(input_tensor, gather_indices) + return gathered_tensor + + +class Matcher(object): + """Abstract base class for matcher. + """ + __metaclass__ = ABCMeta + + def match(self, similarity_matrix, scope=None, **params): + """Computes matches among row and column indices and returns the result. + + Computes matches among the row and column indices based on the similarity + matrix and optional arguments. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher value means more similar. + scope: Op scope name. Defaults to 'Match' if None. + **params: Additional keyword arguments for specific implementations of + the Matcher. + + Returns: + A Match object with the results of matching. + """ + if not scope: + scope = 'Match' + with tf.name_scope(scope) as scope: + return Match(self._match(similarity_matrix, **params)) + + @abstractmethod + def _match(self, similarity_matrix, **params): + """Method to be overridden by implementations. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher value means more similar. + **params: Additional keyword arguments for specific implementations of + the Matcher. + + Returns: + match_results: Integer tensor of shape [M]: match_results[i]>=0 means + that column i is matched to row match_results[i], match_results[i]=-1 + means that the column is not matched. match_results[i]=-2 means that + the column is ignored (usually this happens when there is a very weak + match which one neither wants as positive nor negative example). + """ + pass diff --git a/models/official/vision/detection/utils/object_detection/minibatch_sampler.py b/models/official/vision/detection/utils/object_detection/minibatch_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..b9f529ab5976ca56f014788c1263e5887fde0444 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/minibatch_sampler.py @@ -0,0 +1,93 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base minibatch sampler module. + +The job of the minibatch_sampler is to subsample a minibatch based on some +criterion. + +The main function call is: + subsample(indicator, batch_size, **params). +Indicator is a 1d boolean tensor where True denotes which examples can be +sampled. It returns a boolean indicator where True denotes an example has been +sampled.. + +Subclasses should implement the Subsample function and can make use of the +@staticmethod SubsampleIndicator. + +This is originally implemented in TensorFlow Object Detection API. +""" + +from abc import ABCMeta +from abc import abstractmethod + +import tensorflow as tf + +from official.vision.detection.utils.object_detection import ops + + +class MinibatchSampler(object): + """Abstract base class for subsampling minibatches.""" + __metaclass__ = ABCMeta + + def __init__(self): + """Constructs a minibatch sampler.""" + pass + + @abstractmethod + def subsample(self, indicator, batch_size, **params): + """Returns subsample of entries in indicator. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + batch_size: desired batch size. + **params: additional keyword arguments for specific implementations of + the MinibatchSampler. + + Returns: + sample_indicator: boolean tensor of shape [N] whose True entries have been + sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size + """ + pass + + @staticmethod + def subsample_indicator(indicator, num_samples): + """Subsample indicator vector. + + Given a boolean indicator vector with M elements set to `True`, the function + assigns all but `num_samples` of these previously `True` elements to + `False`. If `num_samples` is greater than M, the original indicator vector + is returned. + + Args: + indicator: a 1-dimensional boolean tensor indicating which elements + are allowed to be sampled and which are not. + num_samples: int32 scalar tensor + + Returns: + a boolean tensor with the same shape as input (indicator) tensor + """ + indices = tf.where(indicator) + indices = tf.random.shuffle(indices) + indices = tf.reshape(indices, [-1]) + + num_samples = tf.minimum(tf.size(input=indices), num_samples) + selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) + + selected_indicator = ops.indices_to_dense_vector( + selected_indices, + tf.shape(input=indicator)[0]) + + return tf.equal(selected_indicator, 1) diff --git a/models/official/vision/detection/utils/object_detection/ops.py b/models/official/vision/detection/utils/object_detection/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfc1ae9353604986ad3f1f06a4f8e2e72bb5ca0 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/ops.py @@ -0,0 +1,82 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A module for helper tensorflow ops. + +This is originally implemented in TensorFlow Object Detection API. +""" + +import tensorflow as tf + +from official.vision.detection.utils.object_detection import shape_utils + + +def indices_to_dense_vector(indices, + size, + indices_value=1., + default_value=0, + dtype=tf.float32): + """Creates dense vector with indices set to specific value and rest to zeros. + + This function exists because it is unclear if it is safe to use + tf.sparse_to_dense(indices, [size], 1, validate_indices=False) + with indices which are not ordered. + This function accepts a dynamic size (e.g. tf.shape(tensor)[0]) + + Args: + indices: 1d Tensor with integer indices which are to be set to + indices_values. + size: scalar with size (integer) of output Tensor. + indices_value: values of elements specified by indices in the output vector + default_value: values of other elements in the output vector. + dtype: data type. + + Returns: + dense 1D Tensor of shape [size] with indices set to indices_values and the + rest set to default_value. + """ + size = tf.cast(size, dtype=tf.int32) + zeros = tf.ones([size], dtype=dtype) * default_value + values = tf.ones_like(indices, dtype=dtype) * indices_value + + return tf.dynamic_stitch( + [tf.range(size), tf.cast(indices, dtype=tf.int32)], [zeros, values]) + + +def matmul_gather_on_zeroth_axis(params, indices, scope=None): + """Matrix multiplication based implementation of tf.gather on zeroth axis. + + TODO(rathodv, jonathanhuang): enable sparse matmul option. + + Args: + params: A float32 Tensor. The tensor from which to gather values. + Must be at least rank 1. + indices: A Tensor. Must be one of the following types: int32, int64. + Must be in range [0, params.shape[0]) + scope: A name for the operation (optional). + + Returns: + A Tensor. Has the same type as params. Values from params gathered + from indices given by indices, with shape indices.shape + params.shape[1:]. + """ + scope = scope or 'MatMulGather' + with tf.name_scope(scope): + params_shape = shape_utils.combined_static_and_dynamic_shape(params) + indices_shape = shape_utils.combined_static_and_dynamic_shape(indices) + params2d = tf.reshape(params, [params_shape[0], -1]) + indicator_matrix = tf.one_hot(indices, params_shape[0]) + gathered_result_flattened = tf.matmul(indicator_matrix, params2d) + return tf.reshape(gathered_result_flattened, + tf.stack(indices_shape + params_shape[1:])) diff --git a/models/official/vision/detection/utils/object_detection/preprocessor.py b/models/official/vision/detection/utils/object_detection/preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..55da5d2dfafda816be7dcb2d334a3a0711e0b699 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/preprocessor.py @@ -0,0 +1,525 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preprocess images and bounding boxes for detection. + +We perform two sets of operations in preprocessing stage: +(a) operations that are applied to both training and testing data, +(b) operations that are applied only to training data for the purpose of + data augmentation. + +A preprocessing function receives a set of inputs, +e.g. an image and bounding boxes, +performs an operation on them, and returns them. +Some examples are: randomly cropping the image, randomly mirroring the image, + randomly changing the brightness, contrast, hue and + randomly jittering the bounding boxes. + +The image is a rank 4 tensor: [1, height, width, channels] with +dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where +in each row there is a box with [ymin xmin ymax xmax]. +Boxes are in normalized coordinates meaning +their coordinate values range in [0, 1] + +Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing +functions receive a rank 3 tensor for processing the image. Thus, inside the +preprocess function we squeeze the image to become a rank 3 tensor and then +we pass it to the functions. At the end of the preprocess we expand the image +back to rank 4. +""" + +import tensorflow as tf + +import numpy as np + +from official.vision.detection.utils.object_detection import box_list + + +def _flip_boxes_left_right(boxes): + """Left-right flip the boxes. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Flipped boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) + flipped_xmin = tf.subtract(1.0, xmax) + flipped_xmax = tf.subtract(1.0, xmin) + flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1) + return flipped_boxes + + +def _flip_masks_left_right(masks): + """Left-right flip masks. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + flipped masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + return masks[:, :, ::-1] + + +def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation, + scope=None): + """Flips the keypoints horizontally around the flip_point. + + This operation flips the x coordinate for each keypoint around the flip_point + and also permutes the keypoints in a manner specified by flip_permutation. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + flip_point: (float) scalar tensor representing the x coordinate to flip the + keypoints around. + flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. This specifies the mapping from original keypoint indices + to the flipped keypoint indices. This is used primarily for keypoints + that are not reflection invariant. E.g. Suppose there are 3 keypoints + representing ['head', 'right_eye', 'left_eye'], then a logical choice for + flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' + and 'right_eye' after a horizontal flip. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + if not scope: + scope = 'FlipHorizontal' + with tf.name_scope(scope): + keypoints = tf.transpose(a=keypoints, perm=[1, 0, 2]) + keypoints = tf.gather(keypoints, flip_permutation) + v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + u = flip_point * 2.0 - u + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(a=new_keypoints, perm=[1, 0, 2]) + return new_keypoints + + +def keypoint_change_coordinate_frame(keypoints, window, scope=None): + """Changes coordinate frame of the keypoints to be relative to window's frame. + + Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint + coordinates from keypoints of shape [num_instances, num_keypoints, 2] + to be relative to this window. + + An example use case is data augmentation: where we are given groundtruth + keypoints and would like to randomly crop the image to some window. In this + case we need to change the coordinate frame of each groundtruth keypoint to be + relative to this new window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window we should change the coordinate frame to. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + if not scope: + scope = 'ChangeCoordinateFrame' + with tf.name_scope(scope): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + new_keypoints = box_list_ops.scale(keypoints - [window[0], window[1]], + 1.0 / win_height, 1.0 / win_width) + return new_keypoints + + +def keypoint_prune_outside_window(keypoints, window, scope=None): + """Prunes keypoints that fall outside a given window. + + This function replaces keypoints that fall outside the given window with nan. + See also clip_to_window which clips any keypoints that fall outside the given + window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window outside of which the op should prune the keypoints. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + if not scope: + scope = 'PruneOutsideWindow' + with tf.name_scope(scope): + y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + + valid_indices = tf.logical_and( + tf.logical_and(y >= win_y_min, y <= win_y_max), + tf.logical_and(x >= win_x_min, x <= win_x_max)) + + new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y)) + new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x)) + new_keypoints = tf.concat([new_y, new_x], 2) + + return new_keypoints + + +def random_horizontal_flip(image, + boxes=None, + masks=None, + keypoints=None, + keypoint_flip_permutation=None, + seed=None): + """Randomly flips the image and detections horizontally. + + The probability of flipping the image is 50%. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. + seed: random seed + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, keypoints, and keypoint_flip_permutation are not None, + the function also returns the following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + + Raises: + ValueError: if keypoints are provided but keypoint_flip_permutation is not. + """ + + def _flip_image(image): + # flip image + image_flipped = tf.image.flip_left_right(image) + return image_flipped + + if keypoints is not None and keypoint_flip_permutation is None: + raise ValueError( + 'keypoints are provided but keypoints_flip_permutation is not provided') + + with tf.name_scope('RandomHorizontalFlip'): + result = [] + # random variable defining whether to do flip or not + do_a_flip_random = tf.greater(tf.random.uniform([], seed=seed), 0.5) + + # flip image + image = tf.cond( + pred=do_a_flip_random, + true_fn=lambda: _flip_image(image), + false_fn=lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond( + pred=do_a_flip_random, + true_fn=lambda: _flip_boxes_left_right(boxes), + false_fn=lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond( + pred=do_a_flip_random, + true_fn=lambda: _flip_masks_left_right(masks), + false_fn=lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None and keypoint_flip_permutation is not None: + permutation = keypoint_flip_permutation + keypoints = tf.cond( + pred=do_a_flip_random, + true_fn=lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation), + false_fn=lambda: keypoints) + result.append(keypoints) + + return tuple(result) + + +def _compute_new_static_size(image, min_dimension, max_dimension): + """Compute new static shape for resize_to_range method.""" + image_shape = image.get_shape().as_list() + orig_height = image_shape[0] + orig_width = image_shape[1] + num_channels = image_shape[2] + orig_min_dim = min(orig_height, orig_width) + # Calculates the larger of the possible sizes + large_scale_factor = min_dimension / float(orig_min_dim) + # Scaling orig_(height|width) by large_scale_factor will make the smaller + # dimension equal to min_dimension, save for floating point rounding errors. + # For reasonably-sized images, taking the nearest integer will reliably + # eliminate this error. + large_height = int(round(orig_height * large_scale_factor)) + large_width = int(round(orig_width * large_scale_factor)) + large_size = [large_height, large_width] + if max_dimension: + # Calculates the smaller of the possible sizes, use that if the larger + # is too big. + orig_max_dim = max(orig_height, orig_width) + small_scale_factor = max_dimension / float(orig_max_dim) + # Scaling orig_(height|width) by small_scale_factor will make the larger + # dimension equal to max_dimension, save for floating point rounding + # errors. For reasonably-sized images, taking the nearest integer will + # reliably eliminate this error. + small_height = int(round(orig_height * small_scale_factor)) + small_width = int(round(orig_width * small_scale_factor)) + small_size = [small_height, small_width] + new_size = large_size + if max(large_size) > max_dimension: + new_size = small_size + else: + new_size = large_size + return tf.constant(new_size + [num_channels]) + + +def _compute_new_dynamic_size(image, min_dimension, max_dimension): + """Compute new dynamic shape for resize_to_range method.""" + image_shape = tf.shape(input=image) + orig_height = tf.cast(image_shape[0], dtype=tf.float32) + orig_width = tf.cast(image_shape[1], dtype=tf.float32) + num_channels = image_shape[2] + orig_min_dim = tf.minimum(orig_height, orig_width) + # Calculates the larger of the possible sizes + min_dimension = tf.constant(min_dimension, dtype=tf.float32) + large_scale_factor = min_dimension / orig_min_dim + # Scaling orig_(height|width) by large_scale_factor will make the smaller + # dimension equal to min_dimension, save for floating point rounding errors. + # For reasonably-sized images, taking the nearest integer will reliably + # eliminate this error. + large_height = tf.cast( + tf.round(orig_height * large_scale_factor), dtype=tf.int32) + large_width = tf.cast( + tf.round(orig_width * large_scale_factor), dtype=tf.int32) + large_size = tf.stack([large_height, large_width]) + if max_dimension: + # Calculates the smaller of the possible sizes, use that if the larger + # is too big. + orig_max_dim = tf.maximum(orig_height, orig_width) + max_dimension = tf.constant(max_dimension, dtype=tf.float32) + small_scale_factor = max_dimension / orig_max_dim + # Scaling orig_(height|width) by small_scale_factor will make the larger + # dimension equal to max_dimension, save for floating point rounding + # errors. For reasonably-sized images, taking the nearest integer will + # reliably eliminate this error. + small_height = tf.cast( + tf.round(orig_height * small_scale_factor), dtype=tf.int32) + small_width = tf.cast( + tf.round(orig_width * small_scale_factor), dtype=tf.int32) + small_size = tf.stack([small_height, small_width]) + new_size = tf.cond( + pred=tf.cast(tf.reduce_max(input_tensor=large_size), dtype=tf.float32) > + max_dimension, + true_fn=lambda: small_size, + false_fn=lambda: large_size) + else: + new_size = large_size + return tf.stack(tf.unstack(new_size) + [num_channels]) + + +def resize_to_range(image, + masks=None, + min_dimension=None, + max_dimension=None, + method=tf.image.ResizeMethod.BILINEAR, + align_corners=False, + pad_to_max_dimension=False): + """Resizes an image so its dimensions are within the provided value. + + The output size can be described by two cases: + 1. If the image can be rescaled so its minimum dimension is equal to the + provided value without the other dimension exceeding max_dimension, + then do so. + 2. Otherwise, resize so the largest dimension is equal to max_dimension. + + Args: + image: A 3D tensor of shape [height, width, channels] + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. + min_dimension: (optional) (scalar) desired size of the smaller image + dimension. + max_dimension: (optional) (scalar) maximum allowed size + of the larger image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + align_corners: bool. If true, exactly align all 4 corners of the input + and output. Defaults to False. + pad_to_max_dimension: Whether to resize the image and pad it with zeros + so the resulting image is of the spatial size + [max_dimension, max_dimension]. If masks are included they are padded + similarly. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A 3D tensor of shape [new_height, new_width, channels], + where the image has been resized (with bilinear interpolation) so that + min(new_height, new_width) == min_dimension or + max(new_height, new_width) == max_dimension. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width]. + resized_image_shape: A 1D tensor of shape [3] containing shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizeToRange'): + if image.get_shape().is_fully_defined(): + new_size = _compute_new_static_size(image, min_dimension, max_dimension) + else: + new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension) + new_image = tf.image.resize(image, new_size[:-1], method=method) + + if pad_to_max_dimension: + new_image = tf.image.pad_to_bounding_box( + new_image, 0, 0, max_dimension, max_dimension) + + result = [new_image] + if masks is not None: + new_masks = tf.expand_dims(masks, 3) + new_masks = tf.image.resize( + new_masks, + new_size[:-1], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) + new_masks = tf.squeeze(new_masks, 3) + if pad_to_max_dimension: + new_masks = tf.image.pad_to_bounding_box( + new_masks, 0, 0, max_dimension, max_dimension) + result.append(new_masks) + + result.append(new_size) + return result + + +def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): + """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. + + Args: + boxlist_to_copy_to: BoxList to which extra fields are copied. + boxlist_to_copy_from: BoxList from which fields are copied. + + Returns: + boxlist_to_copy_to with extra fields. + """ + for field in boxlist_to_copy_from.get_extra_fields(): + boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) + return boxlist_to_copy_to + + +def box_list_scale(boxlist, y_scale, x_scale, scope=None): + """scale box coordinates in x and y dimensions. + + Args: + boxlist: BoxList holding N boxes + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + boxlist: BoxList holding N boxes + """ + if not scope: + scope = 'Scale' + with tf.name_scope(scope): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + y_min = y_scale * y_min + y_max = y_scale * y_max + x_min = x_scale * x_min + x_max = x_scale * x_max + scaled_boxlist = box_list.BoxList( + tf.concat([y_min, x_min, y_max, x_max], 1)) + return _copy_extra_fields(scaled_boxlist, boxlist) + + +def keypoint_scale(keypoints, y_scale, x_scale, scope=None): + """Scales keypoint coordinates in x and y dimensions. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + if not scope: + scope = 'Scale' + with tf.name_scope(scope): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + new_keypoints = keypoints * [[[y_scale, x_scale]]] + return new_keypoints + + +def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): + """Scales boxes from normalized to pixel coordinates. + + Args: + image: A 3D float32 tensor of shape [height, width, channels]. + boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding + boxes in normalized coordinates. Each row is of the form + [ymin, xmin, ymax, xmax]. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + + Returns: + image: unchanged input image. + scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the + bounding boxes in pixel coordinates. + scaled_keypoints: a 3D float32 tensor with shape + [num_instances, num_keypoints, 2] containing the keypoints in pixel + coordinates. + """ + boxlist = box_list.BoxList(boxes) + image_height = tf.shape(input=image)[0] + image_width = tf.shape(input=image)[1] + scaled_boxes = box_list_scale(boxlist, image_height, image_width).get() + result = [image, scaled_boxes] + if keypoints is not None: + scaled_keypoints = keypoint_scale(keypoints, image_height, image_width) + result.append(scaled_keypoints) + return tuple(result) diff --git a/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py b/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py new file mode 100644 index 0000000000000000000000000000000000000000..0af2ce495ad53c9df0f8d2eb79f7431b02ab430e --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py @@ -0,0 +1,143 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Region Similarity Calculators for BoxLists. + +Region Similarity Calculators compare a pairwise measure of similarity +between the boxes in two BoxLists. +""" +from abc import ABCMeta +from abc import abstractmethod + +import tensorflow as tf + + +def area(boxlist, scope=None): + """Computes area of boxes. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing box areas. + """ + if not scope: + scope = 'Area' + with tf.name_scope(scope): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) + + +def intersection(boxlist1, boxlist2, scope=None): + """Compute pairwise intersection areas between boxes. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise intersections + """ + if not scope: + scope = 'Intersection' + with tf.name_scope(scope): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(a=y_max2)) + all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(a=y_min2)) + intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) + all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(a=x_max2)) + all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(a=x_min2)) + intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) + return intersect_heights * intersect_widths + + +def iou(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise iou scores. + """ + if not scope: + scope = 'IOU' + with tf.name_scope(scope): + intersections = intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = ( + tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) + return tf.where( + tf.equal(intersections, 0.0), tf.zeros_like(intersections), + tf.truediv(intersections, unions)) + + +class RegionSimilarityCalculator(object): + """Abstract base class for region similarity calculator.""" + __metaclass__ = ABCMeta + + def compare(self, boxlist1, boxlist2, scope=None): + """Computes matrix of pairwise similarity between BoxLists. + + This op (to be overriden) computes a measure of pairwise similarity between + the boxes in the given BoxLists. Higher values indicate more similarity. + + Note that this method simply measures similarity and does not explicitly + perform a matching. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + scope: Op scope name. Defaults to 'Compare' if None. + + Returns: + a (float32) tensor of shape [N, M] with pairwise similarity score. + """ + if not scope: + scope = 'Compare' + with tf.name_scope(scope) as scope: + return self._compare(boxlist1, boxlist2) + + @abstractmethod + def _compare(self, boxlist1, boxlist2): + pass + + +class IouSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on Intersection over Union (IOU) metric. + + This class computes pairwise similarity between two BoxLists based on IOU. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOU similarity between the two BoxLists. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing pairwise iou scores. + """ + return iou(boxlist1, boxlist2) diff --git a/models/official/vision/detection/utils/object_detection/shape_utils.py b/models/official/vision/detection/utils/object_detection/shape_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e30b62b7acc15b7f9f98b6c27b1a22efaf2998a8 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/shape_utils.py @@ -0,0 +1,112 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utils used to manipulate tensor shapes.""" + +import tensorflow as tf + + +def assert_shape_equal(shape_a, shape_b): + """Asserts that shape_a and shape_b are equal. + + If the shapes are static, raises a ValueError when the shapes + mismatch. + + If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes + mismatch. + + Args: + shape_a: a list containing shape of the first tensor. + shape_b: a list containing shape of the second tensor. + + Returns: + Either a tf.no_op() when shapes are all static and a tf.assert_equal() op + when the shapes are dynamic. + + Raises: + ValueError: When shapes are both static and unequal. + """ + if (all(isinstance(dim, int) for dim in shape_a) and + all(isinstance(dim, int) for dim in shape_b)): + if shape_a != shape_b: + raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b)) + else: return tf.no_op() + else: + return tf.assert_equal(shape_a, shape_b) + + +def combined_static_and_dynamic_shape(tensor): + """Returns a list containing static and dynamic values for the dimensions. + + Returns a list of static and dynamic values for shape dimensions. This is + useful to preserve static shapes when available in reshape operation. + + Args: + tensor: A tensor of any type. + + Returns: + A list of size tensor.shape.ndims containing integers or a scalar tensor. + """ + static_tensor_shape = tensor.shape.as_list() + dynamic_tensor_shape = tf.shape(input=tensor) + combined_shape = [] + for index, dim in enumerate(static_tensor_shape): + if dim is not None: + combined_shape.append(dim) + else: + combined_shape.append(dynamic_tensor_shape[index]) + return combined_shape + + +def pad_or_clip_nd(tensor, output_shape): + """Pad or Clip given tensor to the output shape. + + Args: + tensor: Input tensor to pad or clip. + output_shape: A list of integers / scalar tensors (or None for dynamic dim) + representing the size to pad or clip each dimension of the input tensor. + + Returns: + Input tensor padded and clipped to the output shape. + """ + tensor_shape = tf.shape(input=tensor) + clip_size = [ + tf.where(tensor_shape[i] - shape > 0, shape, -1) + if shape is not None else -1 for i, shape in enumerate(output_shape) + ] + clipped_tensor = tf.slice( + tensor, + begin=tf.zeros(len(clip_size), dtype=tf.int32), + size=clip_size) + + # Pad tensor if the shape of clipped tensor is smaller than the expected + # shape. + clipped_tensor_shape = tf.shape(input=clipped_tensor) + trailing_paddings = [ + shape - clipped_tensor_shape[i] if shape is not None else 0 + for i, shape in enumerate(output_shape) + ] + paddings = tf.stack( + [ + tf.zeros(len(trailing_paddings), dtype=tf.int32), + trailing_paddings + ], + axis=1) + padded_tensor = tf.pad(tensor=clipped_tensor, paddings=paddings) + output_static_shape = [ + dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape + ] + padded_tensor.set_shape(output_static_shape) + return padded_tensor diff --git a/models/official/vision/detection/utils/object_detection/target_assigner.py b/models/official/vision/detection/utils/object_detection/target_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..c04448efb052b45da65366b26e7d773b62015773 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/target_assigner.py @@ -0,0 +1,314 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base target assigner module. + +The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and +groundtruth detections (bounding boxes), to assign classification and regression +targets to each anchor as well as weights to each anchor (specifying, e.g., +which anchors should not contribute to training loss). + +It assigns classification/regression targets by performing the following steps: +1) Computing pairwise similarity between anchors and groundtruth boxes using a + provided RegionSimilarity Calculator +2) Computing a matching based on the similarity matrix using a provided Matcher +3) Assigning regression targets based on the matching and a provided BoxCoder +4) Assigning classification targets based on the matching and groundtruth labels + +Note that TargetAssigners only operate on detections from a single +image at a time, so any logic for applying a TargetAssigner to multiple +images must be handled externally. +""" +import tensorflow as tf + +from official.vision.detection.utils.object_detection import box_list +from official.vision.detection.utils.object_detection import shape_utils + + +KEYPOINTS_FIELD_NAME = 'keypoints' + + +class TargetAssigner(object): + """Target assigner to compute classification and regression targets.""" + + def __init__(self, similarity_calc, matcher, box_coder, + negative_class_weight=1.0, unmatched_cls_target=None): + """Construct Object Detection Target Assigner. + + Args: + similarity_calc: a RegionSimilarityCalculator + matcher: Matcher used to match groundtruth to anchors. + box_coder: BoxCoder used to encode matching groundtruth boxes with + respect to anchors. + negative_class_weight: classification weight to be associated to negative + anchors (default: 1.0). The weight must be in [0., 1.]. + unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + If set to None, unmatched_cls_target is set to be [0] for each anchor. + + Raises: + ValueError: if similarity_calc is not a RegionSimilarityCalculator or + if matcher is not a Matcher or if box_coder is not a BoxCoder + """ + self._similarity_calc = similarity_calc + self._matcher = matcher + self._box_coder = box_coder + self._negative_class_weight = negative_class_weight + if unmatched_cls_target is None: + self._unmatched_cls_target = tf.constant([0], tf.float32) + else: + self._unmatched_cls_target = unmatched_cls_target + + @property + def box_coder(self): + return self._box_coder + + def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, + groundtruth_weights=None, **params): + """Assign classification and regression targets to each anchor. + + For a given set of anchors and groundtruth detections, match anchors + to groundtruth_boxes and assign classification and regression targets to + each anchor as well as weights based on the resulting match (specifying, + e.g., which anchors should not contribute to training loss). + + Anchors that are not matched to anything are given a classification target + of self._unmatched_cls_target which can be specified via the constructor. + + Args: + anchors: a BoxList representing N anchors + groundtruth_boxes: a BoxList representing M groundtruth boxes + groundtruth_labels: a tensor of shape [M, d_1, ... d_k] + with labels for each of the ground_truth boxes. The subshape + [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set + to None, groundtruth_labels assumes a binary problem where all + ground_truth boxes get a positive label (of 1). + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. The weights + must be in [0., 1.]. If None, all weights are set to 1. + **params: Additional keyword arguments for specific implementations of + the Matcher. + + Returns: + cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], + where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels + which has shape [num_gt_boxes, d_1, d_2, ... d_k]. + cls_weights: a float32 tensor with shape [num_anchors] + reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension] + reg_weights: a float32 tensor with shape [num_anchors] + match: a matcher.Match object encoding the match between anchors and + groundtruth boxes, with rows corresponding to groundtruth boxes + and columns corresponding to anchors. + + Raises: + ValueError: if anchors or groundtruth_boxes are not of type + box_list.BoxList + """ + if not isinstance(anchors, box_list.BoxList): + raise ValueError('anchors must be an BoxList') + if not isinstance(groundtruth_boxes, box_list.BoxList): + raise ValueError('groundtruth_boxes must be an BoxList') + + if groundtruth_labels is None: + groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), + 0)) + groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) + unmatched_shape_assert = shape_utils.assert_shape_equal( + shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], + shape_utils.combined_static_and_dynamic_shape( + self._unmatched_cls_target)) + labels_and_box_shapes_assert = shape_utils.assert_shape_equal( + shape_utils.combined_static_and_dynamic_shape( + groundtruth_labels)[:1], + shape_utils.combined_static_and_dynamic_shape( + groundtruth_boxes.get())[:1]) + + if groundtruth_weights is None: + num_gt_boxes = groundtruth_boxes.num_boxes_static() + if not num_gt_boxes: + num_gt_boxes = groundtruth_boxes.num_boxes() + groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32) + with tf.control_dependencies( + [unmatched_shape_assert, labels_and_box_shapes_assert]): + match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, + anchors) + match = self._matcher.match(match_quality_matrix, **params) + reg_targets = self._create_regression_targets(anchors, + groundtruth_boxes, + match) + cls_targets = self._create_classification_targets(groundtruth_labels, + match) + reg_weights = self._create_regression_weights(match, groundtruth_weights) + cls_weights = self._create_classification_weights(match, + groundtruth_weights) + + num_anchors = anchors.num_boxes_static() + if num_anchors is not None: + reg_targets = self._reset_target_shape(reg_targets, num_anchors) + cls_targets = self._reset_target_shape(cls_targets, num_anchors) + reg_weights = self._reset_target_shape(reg_weights, num_anchors) + cls_weights = self._reset_target_shape(cls_weights, num_anchors) + + return cls_targets, cls_weights, reg_targets, reg_weights, match + + def _reset_target_shape(self, target, num_anchors): + """Sets the static shape of the target. + + Args: + target: the target tensor. Its first dimension will be overwritten. + num_anchors: the number of anchors, which is used to override the target's + first dimension. + + Returns: + A tensor with the shape info filled in. + """ + target_shape = target.get_shape().as_list() + target_shape[0] = num_anchors + target.set_shape(target_shape) + return target + + def _create_regression_targets(self, anchors, groundtruth_boxes, match): + """Returns a regression target for each anchor. + + Args: + anchors: a BoxList representing N anchors + groundtruth_boxes: a BoxList representing M groundtruth_boxes + match: a matcher.Match object + + Returns: + reg_targets: a float32 tensor with shape [N, box_code_dimension] + """ + matched_gt_boxes = match.gather_based_on_match( + groundtruth_boxes.get(), + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) + if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME): + groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME) + matched_keypoints = match.gather_based_on_match( + groundtruth_keypoints, + unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), + ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])) + matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints) + matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors) + match_results_shape = shape_utils.combined_static_and_dynamic_shape( + match.match_results) + + # Zero out the unmatched and ignored regression targets. + unmatched_ignored_reg_targets = tf.tile( + self._default_regression_target(), [match_results_shape[0], 1]) + matched_anchors_mask = match.matched_column_indicator() + # To broadcast matched_anchors_mask to the same shape as + # matched_reg_targets. + matched_anchors_mask = tf.tile( + tf.expand_dims(matched_anchors_mask, 1), + [1, tf.shape(matched_reg_targets)[1]]) + reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, + unmatched_ignored_reg_targets) + return reg_targets + + def _default_regression_target(self): + """Returns the default target for anchors to regress to. + + Default regression targets are set to zero (though in + this implementation what these targets are set to should + not matter as the regression weight of any box set to + regress to the default target is zero). + + Returns: + default_target: a float32 tensor with shape [1, box_code_dimension] + """ + return tf.constant([self._box_coder.code_size*[0]], tf.float32) + + def _create_classification_targets(self, groundtruth_labels, match): + """Create classification targets for each anchor. + + Assign a classification target of for each anchor to the matching + groundtruth label that is provided by match. Anchors that are not matched + to anything are given the target self._unmatched_cls_target + + Args: + groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] + with labels for each of the ground_truth boxes. The subshape + [d_1, ... d_k] can be empty (corresponding to scalar labels). + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + + Returns: + a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the + subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has + shape [num_gt_boxes, d_1, d_2, ... d_k]. + """ + return match.gather_based_on_match( + groundtruth_labels, + unmatched_value=self._unmatched_cls_target, + ignored_value=self._unmatched_cls_target) + + def _create_regression_weights(self, match, groundtruth_weights): + """Set regression weight for each anchor. + + Only positive anchors are set to contribute to the regression loss, so this + method returns a weight of 1 for every positive anchor and 0 for every + negative anchor. + + Args: + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. + + Returns: + a float32 tensor with shape [num_anchors] representing regression weights. + """ + return match.gather_based_on_match( + groundtruth_weights, ignored_value=0., unmatched_value=0.) + + def _create_classification_weights(self, + match, + groundtruth_weights): + """Create classification weights for each anchor. + + Positive (matched) anchors are associated with a weight of + positive_class_weight and negative (unmatched) anchors are associated with + a weight of negative_class_weight. When anchors are ignored, weights are set + to zero. By default, both positive/negative weights are set to 1.0, + but they can be adjusted to handle class imbalance (which is almost always + the case in object detection). + + Args: + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. + + Returns: + a float32 tensor with shape [num_anchors] representing classification + weights. + """ + return match.gather_based_on_match( + groundtruth_weights, + ignored_value=0., + unmatched_value=self._negative_class_weight) + + def get_box_coder(self): + """Get BoxCoder of this TargetAssigner. + + Returns: + BoxCoder object. + """ + return self._box_coder diff --git a/models/official/vision/detection/utils/object_detection/visualization_utils.py b/models/official/vision/detection/utils/object_detection/visualization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..db4af8089df673cd5c57c4a020b5d7e8f03846c9 --- /dev/null +++ b/models/official/vision/detection/utils/object_detection/visualization_utils.py @@ -0,0 +1,733 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A set of functions that are used for visualization. + +These functions often receive an image, perform some visualization on the image. +The functions do not return a value, instead they modify the image itself. + +""" +import collections +import functools +from absl import logging +# Set headless-friendly backend. +import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements +import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top +import numpy as np +import PIL.Image as Image +import PIL.ImageColor as ImageColor +import PIL.ImageDraw as ImageDraw +import PIL.ImageFont as ImageFont +import six +import tensorflow as tf + +from official.vision.detection.utils import box_utils +from official.vision.detection.utils.object_detection import shape_utils + + +_TITLE_LEFT_MARGIN = 10 +_TITLE_TOP_MARGIN = 10 +STANDARD_COLORS = [ + 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque', + 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite', + 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', + 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange', + 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet', + 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite', + 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod', + 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki', + 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue', + 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey', + 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', + 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime', + 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid', + 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen', + 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin', + 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed', + 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed', + 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple', + 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown', + 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue', + 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow', + 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', + 'WhiteSmoke', 'Yellow', 'YellowGreen' +] + + +def save_image_array_as_png(image, output_path): + """Saves an image (represented as a numpy array) to PNG. + + Args: + image: a numpy array with shape [height, width, 3]. + output_path: path to which image should be written. + """ + image_pil = Image.fromarray(np.uint8(image)).convert('RGB') + with tf.io.gfile.GFile(output_path, 'w') as fid: + image_pil.save(fid, 'PNG') + + +def encode_image_array_as_png_str(image): + """Encodes a numpy array into a PNG string. + + Args: + image: a numpy array with shape [height, width, 3]. + + Returns: + PNG encoded image string. + """ + image_pil = Image.fromarray(np.uint8(image)) + output = six.BytesIO() + image_pil.save(output, format='PNG') + png_string = output.getvalue() + output.close() + return png_string + + +def visualize_images_with_bounding_boxes(images, box_outputs, step, + summary_writer): + """Records subset of evaluation images with bounding boxes.""" + if not isinstance(images, list): + logging.warning('visualize_images_with_bounding_boxes expects list of ' + 'images but received type: %s and value: %s', + type(images), images) + return + + image_shape = tf.shape(images[0]) + image_height = tf.cast(image_shape[0], tf.float32) + image_width = tf.cast(image_shape[1], tf.float32) + normalized_boxes = box_utils.normalize_boxes(box_outputs, + [image_height, image_width]) + + bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]]) + image_summary = tf.image.draw_bounding_boxes( + tf.cast(images, tf.float32), normalized_boxes, bounding_box_color) + with summary_writer.as_default(): + tf.summary.image('bounding_box_summary', image_summary, step=step) + summary_writer.flush() + + +def draw_bounding_box_on_image_array(image, + ymin, + xmin, + ymax, + xmax, + color='red', + thickness=4, + display_str_list=(), + use_normalized_coordinates=True): + """Adds a bounding box to an image (numpy array). + + Bounding box coordinates can be specified in either absolute (pixel) or + normalized coordinates by setting the use_normalized_coordinates argument. + + Args: + image: a numpy array with shape [height, width, 3]. + ymin: ymin of bounding box. + xmin: xmin of bounding box. + ymax: ymax of bounding box. + xmax: xmax of bounding box. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list: list of strings to display in box + (each to be shown on its own line). + use_normalized_coordinates: If True (default), treat coordinates + ymin, xmin, ymax, xmax as relative to the image. Otherwise treat + coordinates as absolute. + """ + image_pil = Image.fromarray(np.uint8(image)).convert('RGB') + draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, + thickness, display_str_list, + use_normalized_coordinates) + np.copyto(image, np.array(image_pil)) + + +def draw_bounding_box_on_image(image, + ymin, + xmin, + ymax, + xmax, + color='red', + thickness=4, + display_str_list=(), + use_normalized_coordinates=True): + """Adds a bounding box to an image. + + Bounding box coordinates can be specified in either absolute (pixel) or + normalized coordinates by setting the use_normalized_coordinates argument. + + Each string in display_str_list is displayed on a separate line above the + bounding box in black text on a rectangle filled with the input 'color'. + If the top of the bounding box extends to the edge of the image, the strings + are displayed below the bounding box. + + Args: + image: a PIL.Image object. + ymin: ymin of bounding box. + xmin: xmin of bounding box. + ymax: ymax of bounding box. + xmax: xmax of bounding box. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list: list of strings to display in box + (each to be shown on its own line). + use_normalized_coordinates: If True (default), treat coordinates + ymin, xmin, ymax, xmax as relative to the image. Otherwise treat + coordinates as absolute. + """ + draw = ImageDraw.Draw(image) + im_width, im_height = image.size + if use_normalized_coordinates: + (left, right, top, bottom) = (xmin * im_width, xmax * im_width, + ymin * im_height, ymax * im_height) + else: + (left, right, top, bottom) = (xmin, xmax, ymin, ymax) + draw.line([(left, top), (left, bottom), (right, bottom), + (right, top), (left, top)], width=thickness, fill=color) + try: + font = ImageFont.truetype('arial.ttf', 24) + except IOError: + font = ImageFont.load_default() + + # If the total height of the display strings added to the top of the bounding + # box exceeds the top of the image, stack the strings below the bounding box + # instead of above. + display_str_heights = [font.getsize(ds)[1] for ds in display_str_list] + # Each display_str has a top and bottom margin of 0.05x. + total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights) + + if top > total_display_str_height: + text_bottom = top + else: + text_bottom = bottom + total_display_str_height + # Reverse list and print from bottom to top. + for display_str in display_str_list[::-1]: + text_width, text_height = font.getsize(display_str) + margin = np.ceil(0.05 * text_height) + draw.rectangle( + [(left, text_bottom - text_height - 2 * margin), (left + text_width, + text_bottom)], + fill=color) + draw.text( + (left + margin, text_bottom - text_height - margin), + display_str, + fill='black', + font=font) + text_bottom -= text_height - 2 * margin + + +def draw_bounding_boxes_on_image_array(image, + boxes, + color='red', + thickness=4, + display_str_list_list=()): + """Draws bounding boxes on image (numpy array). + + Args: + image: a numpy array object. + boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). + The coordinates are in normalized format between [0, 1]. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list_list: list of list of strings. + a list of strings for each bounding box. + The reason to pass a list of strings for a + bounding box is that it might contain + multiple labels. + + Raises: + ValueError: if boxes is not a [N, 4] array + """ + image_pil = Image.fromarray(image) + draw_bounding_boxes_on_image(image_pil, boxes, color, thickness, + display_str_list_list) + np.copyto(image, np.array(image_pil)) + + +def draw_bounding_boxes_on_image(image, + boxes, + color='red', + thickness=4, + display_str_list_list=()): + """Draws bounding boxes on image. + + Args: + image: a PIL.Image object. + boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). + The coordinates are in normalized format between [0, 1]. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list_list: list of list of strings. + a list of strings for each bounding box. + The reason to pass a list of strings for a + bounding box is that it might contain + multiple labels. + + Raises: + ValueError: if boxes is not a [N, 4] array + """ + boxes_shape = boxes.shape + if not boxes_shape: + return + if len(boxes_shape) != 2 or boxes_shape[1] != 4: + raise ValueError('Input must be of size [N, 4]') + for i in range(boxes_shape[0]): + display_str_list = () + if display_str_list_list: + display_str_list = display_str_list_list[i] + draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2], + boxes[i, 3], color, thickness, display_str_list) + + +def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, boxes, classes, scores, category_index=category_index, **kwargs) + + +def _visualize_boxes_and_masks(image, boxes, classes, scores, masks, + category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index=category_index, + instance_masks=masks, + **kwargs) + + +def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints, + category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index=category_index, + keypoints=keypoints, + **kwargs) + + +def _visualize_boxes_and_masks_and_keypoints( + image, boxes, classes, scores, masks, keypoints, category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index=category_index, + instance_masks=masks, + keypoints=keypoints, + **kwargs) + + +def _resize_original_image(image, image_shape): + image = tf.expand_dims(image, 0) + image = tf.image.resize( + image, image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) + return tf.cast(tf.squeeze(image, 0), tf.uint8) + + +def draw_bounding_boxes_on_image_tensors(images, + boxes, + classes, + scores, + category_index, + original_image_spatial_shape=None, + true_image_shape=None, + instance_masks=None, + keypoints=None, + max_boxes_to_draw=20, + min_score_thresh=0.2, + use_normalized_coordinates=True): + """Draws bounding boxes, masks, and keypoints on batch of image tensors. + + Args: + images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional + channels will be ignored. If C = 1, then we convert the images to RGB + images. + boxes: [N, max_detections, 4] float32 tensor of detection boxes. + classes: [N, max_detections] int tensor of detection classes. Note that + classes are 1-indexed. + scores: [N, max_detections] float32 tensor of detection scores. + category_index: a dict that maps integer ids to category dicts. e.g. + {1: {1: 'dog'}, 2: {2: 'cat'}, ...} + original_image_spatial_shape: [N, 2] tensor containing the spatial size of + the original image. + true_image_shape: [N, 3] tensor containing the spatial size of unpadded + original_image. + instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with + instance masks. + keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2] + with keypoints. + max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20. + min_score_thresh: Minimum score threshold for visualization. Default 0.2. + use_normalized_coordinates: Whether to assume boxes and kepoints are in + normalized coordinates (as opposed to absolute coordiantes). + Default is True. + + Returns: + 4D image tensor of type uint8, with boxes drawn on top. + """ + # Additional channels are being ignored. + if images.shape[3] > 3: + images = images[:, :, :, 0:3] + elif images.shape[3] == 1: + images = tf.image.grayscale_to_rgb(images) + visualization_keyword_args = { + 'use_normalized_coordinates': use_normalized_coordinates, + 'max_boxes_to_draw': max_boxes_to_draw, + 'min_score_thresh': min_score_thresh, + 'agnostic_mode': False, + 'line_thickness': 4 + } + if true_image_shape is None: + true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3]) + else: + true_shapes = true_image_shape + if original_image_spatial_shape is None: + original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2]) + else: + original_shapes = original_image_spatial_shape + + if instance_masks is not None and keypoints is None: + visualize_boxes_fn = functools.partial( + _visualize_boxes_and_masks, + category_index=category_index, + **visualization_keyword_args) + elems = [ + true_shapes, original_shapes, images, boxes, classes, scores, + instance_masks + ] + elif instance_masks is None and keypoints is not None: + visualize_boxes_fn = functools.partial( + _visualize_boxes_and_keypoints, + category_index=category_index, + **visualization_keyword_args) + elems = [ + true_shapes, original_shapes, images, boxes, classes, scores, keypoints + ] + elif instance_masks is not None and keypoints is not None: + visualize_boxes_fn = functools.partial( + _visualize_boxes_and_masks_and_keypoints, + category_index=category_index, + **visualization_keyword_args) + elems = [ + true_shapes, original_shapes, images, boxes, classes, scores, + instance_masks, keypoints + ] + else: + visualize_boxes_fn = functools.partial( + _visualize_boxes, + category_index=category_index, + **visualization_keyword_args) + elems = [ + true_shapes, original_shapes, images, boxes, classes, scores + ] + + def draw_boxes(image_and_detections): + """Draws boxes on image.""" + true_shape = image_and_detections[0] + original_shape = image_and_detections[1] + if true_image_shape is not None: + image = shape_utils.pad_or_clip_nd( + image_and_detections[2], [true_shape[0], true_shape[1], 3]) + if original_image_spatial_shape is not None: + image_and_detections[2] = _resize_original_image(image, original_shape) + + image_with_boxes = tf.compat.v1.py_func(visualize_boxes_fn, + image_and_detections[2:], tf.uint8) + return image_with_boxes + + images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False) + return images + + +def draw_keypoints_on_image_array(image, + keypoints, + color='red', + radius=2, + use_normalized_coordinates=True): + """Draws keypoints on an image (numpy array). + + Args: + image: a numpy array with shape [height, width, 3]. + keypoints: a numpy array with shape [num_keypoints, 2]. + color: color to draw the keypoints with. Default is red. + radius: keypoint radius. Default value is 2. + use_normalized_coordinates: if True (default), treat keypoint values as + relative to the image. Otherwise treat them as absolute. + """ + image_pil = Image.fromarray(np.uint8(image)).convert('RGB') + draw_keypoints_on_image(image_pil, keypoints, color, radius, + use_normalized_coordinates) + np.copyto(image, np.array(image_pil)) + + +def draw_keypoints_on_image(image, + keypoints, + color='red', + radius=2, + use_normalized_coordinates=True): + """Draws keypoints on an image. + + Args: + image: a PIL.Image object. + keypoints: a numpy array with shape [num_keypoints, 2]. + color: color to draw the keypoints with. Default is red. + radius: keypoint radius. Default value is 2. + use_normalized_coordinates: if True (default), treat keypoint values as + relative to the image. Otherwise treat them as absolute. + """ + draw = ImageDraw.Draw(image) + im_width, im_height = image.size + keypoints_x = [k[1] for k in keypoints] + keypoints_y = [k[0] for k in keypoints] + if use_normalized_coordinates: + keypoints_x = tuple([im_width * x for x in keypoints_x]) + keypoints_y = tuple([im_height * y for y in keypoints_y]) + for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y): + draw.ellipse([(keypoint_x - radius, keypoint_y - radius), + (keypoint_x + radius, keypoint_y + radius)], + outline=color, fill=color) + + +def draw_mask_on_image_array(image, mask, color='red', alpha=0.4): + """Draws mask on an image. + + Args: + image: uint8 numpy array with shape (img_height, img_height, 3) + mask: a uint8 numpy array of shape (img_height, img_height) with + values between either 0 or 1. + color: color to draw the keypoints with. Default is red. + alpha: transparency value between 0 and 1. (default: 0.4) + + Raises: + ValueError: On incorrect data type for image or masks. + """ + if image.dtype != np.uint8: + raise ValueError('`image` not of type np.uint8') + if mask.dtype != np.uint8: + raise ValueError('`mask` not of type np.uint8') + if np.any(np.logical_and(mask != 1, mask != 0)): + raise ValueError('`mask` elements should be in [0, 1]') + if image.shape[:2] != mask.shape: + raise ValueError('The image has spatial dimensions %s but the mask has ' + 'dimensions %s' % (image.shape[:2], mask.shape)) + rgb = ImageColor.getrgb(color) + pil_image = Image.fromarray(image) + + solid_color = np.expand_dims( + np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) + pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') + pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') + pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) + np.copyto(image, np.array(pil_image.convert('RGB'))) + + +def visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index, + instance_masks=None, + instance_boundaries=None, + keypoints=None, + use_normalized_coordinates=False, + max_boxes_to_draw=20, + min_score_thresh=.5, + agnostic_mode=False, + line_thickness=4, + groundtruth_box_visualization_color='black', + skip_scores=False, + skip_labels=False): + """Overlay labeled boxes on an image with formatted scores and label names. + + This function groups boxes that correspond to the same location + and creates a display string for each detection and overlays these + on the image. Note that this function modifies the image in place, and returns + that same image. + + Args: + image: uint8 numpy array with shape (img_height, img_width, 3) + boxes: a numpy array of shape [N, 4] + classes: a numpy array of shape [N]. Note that class indices are 1-based, + and match the keys in the label map. + scores: a numpy array of shape [N] or None. If scores=None, then + this function assumes that the boxes to be plotted are groundtruth + boxes and plot all boxes as black with no classes or scores. + category_index: a dict containing category dictionaries (each holding + category index `id` and category name `name`) keyed by category indices. + instance_masks: a numpy array of shape [N, image_height, image_width] with + values ranging between 0 and 1, can be None. + instance_boundaries: a numpy array of shape [N, image_height, image_width] + with values ranging between 0 and 1, can be None. + keypoints: a numpy array of shape [N, num_keypoints, 2], can + be None + use_normalized_coordinates: whether boxes is to be interpreted as + normalized coordinates or not. + max_boxes_to_draw: maximum number of boxes to visualize. If None, draw + all boxes. + min_score_thresh: minimum score threshold for a box to be visualized + agnostic_mode: boolean (default: False) controlling whether to evaluate in + class-agnostic mode or not. This mode will display scores but ignore + classes. + line_thickness: integer (default: 4) controlling line width of the boxes. + groundtruth_box_visualization_color: box color for visualizing groundtruth + boxes + skip_scores: whether to skip score when drawing a single detection + skip_labels: whether to skip label when drawing a single detection + + Returns: + uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes. + """ + # Create a display string (and color) for every box location, group any boxes + # that correspond to the same location. + box_to_display_str_map = collections.defaultdict(list) + box_to_color_map = collections.defaultdict(str) + box_to_instance_masks_map = {} + box_to_instance_boundaries_map = {} + box_to_keypoints_map = collections.defaultdict(list) + if not max_boxes_to_draw: + max_boxes_to_draw = boxes.shape[0] + for i in range(min(max_boxes_to_draw, boxes.shape[0])): + if scores is None or scores[i] > min_score_thresh: + box = tuple(boxes[i].tolist()) + if instance_masks is not None: + box_to_instance_masks_map[box] = instance_masks[i] + if instance_boundaries is not None: + box_to_instance_boundaries_map[box] = instance_boundaries[i] + if keypoints is not None: + box_to_keypoints_map[box].extend(keypoints[i]) + if scores is None: + box_to_color_map[box] = groundtruth_box_visualization_color + else: + display_str = '' + if not skip_labels: + if not agnostic_mode: + if classes[i] in category_index.keys(): + class_name = category_index[classes[i]]['name'] + else: + class_name = 'N/A' + display_str = str(class_name) + if not skip_scores: + if not display_str: + display_str = '{}%'.format(int(100*scores[i])) + else: + display_str = '{}: {}%'.format(display_str, int(100*scores[i])) + box_to_display_str_map[box].append(display_str) + if agnostic_mode: + box_to_color_map[box] = 'DarkOrange' + else: + box_to_color_map[box] = STANDARD_COLORS[ + classes[i] % len(STANDARD_COLORS)] + + # Draw all boxes onto image. + for box, color in box_to_color_map.items(): + ymin, xmin, ymax, xmax = box + if instance_masks is not None: + draw_mask_on_image_array( + image, + box_to_instance_masks_map[box], + color=color + ) + if instance_boundaries is not None: + draw_mask_on_image_array( + image, + box_to_instance_boundaries_map[box], + color='red', + alpha=1.0 + ) + draw_bounding_box_on_image_array( + image, + ymin, + xmin, + ymax, + xmax, + color=color, + thickness=line_thickness, + display_str_list=box_to_display_str_map[box], + use_normalized_coordinates=use_normalized_coordinates) + if keypoints is not None: + draw_keypoints_on_image_array( + image, + box_to_keypoints_map[box], + color=color, + radius=line_thickness / 2, + use_normalized_coordinates=use_normalized_coordinates) + + return image + + +def add_cdf_image_summary(values, name): + """Adds a tf.summary.image for a CDF plot of the values. + + Normalizes `values` such that they sum to 1, plots the cumulative distribution + function and creates a tf image summary. + + Args: + values: a 1-D float32 tensor containing the values. + name: name for the image summary. + """ + def cdf_plot(values): + """Numpy function to plot CDF.""" + normalized_values = values / np.sum(values) + sorted_values = np.sort(normalized_values) + cumulative_values = np.cumsum(sorted_values) + fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32) + / cumulative_values.size) + fig = plt.figure(frameon=False) + ax = fig.add_subplot('111') + ax.plot(fraction_of_examples, cumulative_values) + ax.set_ylabel('cumulative normalized values') + ax.set_xlabel('fraction of examples') + fig.canvas.draw() + width, height = fig.get_size_inches() * fig.get_dpi() + image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape( + 1, int(height), int(width), 3) + return image + + cdf_plot = tf.compat.v1.py_func(cdf_plot, [values], tf.uint8) + tf.compat.v1.summary.image(name, cdf_plot) + + +def add_hist_image_summary(values, bins, name): + """Adds a tf.summary.image for a histogram plot of the values. + + Plots the histogram of values and creates a tf image summary. + + Args: + values: a 1-D float32 tensor containing the values. + bins: bin edges which will be directly passed to np.histogram. + name: name for the image summary. + """ + + def hist_plot(values, bins): + """Numpy function to plot hist.""" + fig = plt.figure(frameon=False) + ax = fig.add_subplot('111') + y, x = np.histogram(values, bins=bins) + ax.plot(x[:-1], y) + ax.set_ylabel('count') + ax.set_xlabel('value') + fig.canvas.draw() + width, height = fig.get_size_inches() * fig.get_dpi() + image = np.fromstring( + fig.canvas.tostring_rgb(), dtype='uint8').reshape( + 1, int(height), int(width), 3) + return image + + hist_plot = tf.compat.v1.py_func(hist_plot, [values, bins], tf.uint8) + tf.compat.v1.summary.image(name, hist_plot) diff --git a/models/official/vision/image_classification/README.md b/models/official/vision/image_classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..eb061d5b5f3284255bdb484cfbbb20bb3e157268 --- /dev/null +++ b/models/official/vision/image_classification/README.md @@ -0,0 +1,182 @@ +# Image Classification + +This folder contains TF 2.0 model examples for image classification: + +* [MNIST](#mnist) +* [Classifier Trainer](#classifier-trainer), a framework that uses the Keras +compile/fit methods for image classification models, including: + * ResNet + * EfficientNet[^1] + +[^1]: Currently a work in progress. We cannot match "AutoAugment (AA)" in [the original version](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). +For more information about other types of models, please refer to this +[README file](../../README.md). + +## Before you begin +Please make sure that you have the latest version of TensorFlow +installed and +[add the models folder to your Python path](/official/#running-the-models). + +### ImageNet preparation + +#### Using TFDS +`classifier_trainer.py` supports ImageNet with +[TensorFlow Datasets (TFDS)](https://www.tensorflow.org/datasets/overview). + +Please see the following [example snippet](https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/scripts/download_and_prepare.py) +for more information on how to use TFDS to download and prepare datasets, and +specifically the [TFDS ImageNet readme](https://github.com/tensorflow/datasets/blob/master/docs/catalog/imagenet2012.md) +for manual download instructions. + +#### Legacy TFRecords +Download the ImageNet dataset and convert it to TFRecord format. +The following [script](https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py) +and [README](https://github.com/tensorflow/tpu/tree/master/tools/datasets#imagenet_to_gcspy) +provide a few options. + +Note that the legacy ResNet runners, e.g. [resnet/resnet_ctl_imagenet_main.py](resnet/resnet_ctl_imagenet_main.py) +require TFRecords whereas `classifier_trainer.py` can use both by setting the +builder to 'records' or 'tfds' in the configurations. + +### Running on Cloud TPUs + +Note: These models will **not** work with TPUs on Colab. + +You can train image classification models on Cloud TPUs using +[tf.distribute.experimental.TPUStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy?version=nightly). +If you are not familiar with Cloud TPUs, it is strongly recommended that you go +through the +[quickstart](https://cloud.google.com/tpu/docs/quickstart) to learn how to +create a TPU and GCE VM. + +### Running on multiple GPU hosts + +You can also train these models on multiple hosts, each with GPUs, using +[tf.distribute.experimental.MultiWorkerMirroredStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy). + +The easiest way to run multi-host benchmarks is to set the +[`TF_CONFIG`](https://www.tensorflow.org/guide/distributed_training#TF_CONFIG) +appropriately at each host. e.g., to run using `MultiWorkerMirroredStrategy` on +2 hosts, the `cluster` in `TF_CONFIG` should have 2 `host:port` entries, and +host `i` should have the `task` in `TF_CONFIG` set to `{"type": "worker", +"index": i}`. `MultiWorkerMirroredStrategy` will automatically use all the +available GPUs at each host. + +## MNIST + +To download the data and run the MNIST sample model locally for the first time, +run one of the following command: + +```bash +python3 mnist_main.py \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --train_epochs=10 \ + --distribution_strategy=one_device \ + --num_gpus=$NUM_GPUS \ + --download +``` + +To train the model on a Cloud TPU, run the following command: + +```bash +python3 mnist_main.py \ + --tpu=$TPU_NAME \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --train_epochs=10 \ + --distribution_strategy=tpu \ + --download +``` + +Note: the `--download` flag is only required the first time you run the model. + + +## Classifier Trainer +The classifier trainer is a unified framework for running image classification +models using Keras's compile/fit methods. Experiments should be provided in the +form of YAML files, some examples are included within the configs/examples +folder. Please see [configs/examples](./configs/examples) for more example +configurations. + +The provided configuration files use a per replica batch size and is scaled +by the number of devices. For instance, if `batch size` = 64, then for 1 GPU +the global batch size would be 64 * 1 = 64. For 8 GPUs, the global batch size +would be 64 * 8 = 512. Similarly, for a v3-8 TPU, the global batch size would +be 64 * 8 = 512, and for a v3-32, the global batch size is 64 * 32 = 2048. + +### ResNet50 + +#### On GPU: +```bash +python3 classifier_trainer.py \ + --mode=train_and_eval \ + --model_type=resnet \ + --dataset=imagenet \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --config_file=configs/examples/resnet/imagenet/gpu.yaml \ + --params_override='runtime.num_gpus=$NUM_GPUS' +``` + +To train on multiple hosts, each with GPUs attached using +[MultiWorkerMirroredStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy) +please update `runtime` section in gpu.yaml +(or override using `--params_override`) with: + +```YAML +# gpu.yaml +runtime: + distribution_strategy: 'multi_worker_mirrored' + worker_hosts: '$HOST1:port,$HOST2:port' + num_gpus: $NUM_GPUS + task_index: 0 +``` +By having `task_index: 0` on the first host and `task_index: 1` on the second +and so on. `$HOST1` and `$HOST2` are the IP addresses of the hosts, and `port` +can be chosen any free port on the hosts. Only the first host will write +TensorBoard Summaries and save checkpoints. + +#### On TPU: +```bash +python3 classifier_trainer.py \ + --mode=train_and_eval \ + --model_type=resnet \ + --dataset=imagenet \ + --tpu=$TPU_NAME \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --config_file=configs/examples/resnet/imagenet/tpu.yaml +``` + +### EfficientNet +**Note: EfficientNet development is a work in progress.** +#### On GPU: +```bash +python3 classifier_trainer.py \ + --mode=train_and_eval \ + --model_type=efficientnet \ + --dataset=imagenet \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --config_file=configs/examples/efficientnet/imagenet/efficientnet-b0-gpu.yaml \ + --params_override='runtime.num_gpus=$NUM_GPUS' +``` + + +#### On TPU: +```bash +python3 classifier_trainer.py \ + --mode=train_and_eval \ + --model_type=efficientnet \ + --dataset=imagenet \ + --tpu=$TPU_NAME \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --config_file=configs/examples/efficientnet/imagenet/efficientnet-b0-tpu.yaml +``` + +Note that the number of GPU devices can be overridden in the command line using +`--params_overrides`. The TPU does not need this override as the device is fixed +by providing the TPU address or name with the `--tpu` flag. + diff --git a/models/official/vision/image_classification/__init__.py b/models/official/vision/image_classification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/vision/image_classification/augment.py b/models/official/vision/image_classification/augment.py new file mode 100644 index 0000000000000000000000000000000000000000..b6ef23a229c80bcc1fec92d431996688dc34eaad --- /dev/null +++ b/models/official/vision/image_classification/augment.py @@ -0,0 +1,999 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""AutoAugment and RandAugment policies for enhanced image preprocessing. + +AutoAugment Reference: https://arxiv.org/abs/1805.09501 +RandAugment Reference: https://arxiv.org/abs/1909.13719 +""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import math +import tensorflow as tf +from typing import Any, Dict, List, Optional, Text, Tuple + +from tensorflow.python.keras.layers.preprocessing import image_preprocessing as image_ops + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10. + + +def to_4d(image: tf.Tensor) -> tf.Tensor: + """Converts an input Tensor to 4 dimensions. + + 4D image => [N, H, W, C] or [N, C, H, W] + 3D image => [1, H, W, C] or [1, C, H, W] + 2D image => [1, H, W, 1] + + Args: + image: The 2/3/4D input tensor. + + Returns: + A 4D image tensor. + + Raises: + `TypeError` if `image` is not a 2/3/4D tensor. + + """ + shape = tf.shape(image) + original_rank = tf.rank(image) + left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32) + right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32) + new_shape = tf.concat( + [ + tf.ones(shape=left_pad, dtype=tf.int32), + shape, + tf.ones(shape=right_pad, dtype=tf.int32), + ], + axis=0, + ) + return tf.reshape(image, new_shape) + + +def from_4d(image: tf.Tensor, ndims: tf.Tensor) -> tf.Tensor: + """Converts a 4D image back to `ndims` rank.""" + shape = tf.shape(image) + begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32) + end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32) + new_shape = shape[begin:end] + return tf.reshape(image, new_shape) + + +def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor: + """Converts translations to a projective transform. + + The translation matrix looks like this: + [[1 0 -dx] + [0 1 -dy] + [0 0 1]] + + Args: + translations: The 2-element list representing [dx, dy], or a matrix of + 2-element lists representing [dx dy] to translate for each image. The + shape must be static. + + Returns: + The transformation matrix of shape (num_images, 8). + + Raises: + `TypeError` if + - the shape of `translations` is not known or + - the shape of `translations` is not rank 1 or 2. + + """ + translations = tf.convert_to_tensor(translations, dtype=tf.float32) + if translations.get_shape().ndims is None: + raise TypeError('translations rank must be statically known') + elif len(translations.get_shape()) == 1: + translations = translations[None] + elif len(translations.get_shape()) != 2: + raise TypeError('translations should have rank 1 or 2.') + num_translations = tf.shape(translations)[0] + + return tf.concat( + values=[ + tf.ones((num_translations, 1), tf.dtypes.float32), + tf.zeros((num_translations, 1), tf.dtypes.float32), + -translations[:, 0, None], + tf.zeros((num_translations, 1), tf.dtypes.float32), + tf.ones((num_translations, 1), tf.dtypes.float32), + -translations[:, 1, None], + tf.zeros((num_translations, 2), tf.dtypes.float32), + ], + axis=1, + ) + + +def _convert_angles_to_transform( + angles: tf.Tensor, + image_width: tf.Tensor, + image_height: tf.Tensor) -> tf.Tensor: + """Converts an angle or angles to a projective transform. + + Args: + angles: A scalar to rotate all images, or a vector to rotate a batch of + images. This must be a scalar. + image_width: The width of the image(s) to be transformed. + image_height: The height of the image(s) to be transformed. + + Returns: + A tensor of shape (num_images, 8). + + Raises: + `TypeError` if `angles` is not rank 0 or 1. + + """ + angles = tf.convert_to_tensor(angles, dtype=tf.float32) + if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test + angles = angles[None] + elif len(angles.get_shape()) != 1: + raise TypeError('Angles should have a rank 0 or 1.') + x_offset = ((image_width - 1) - + (tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) * + (image_height - 1))) / 2.0 + y_offset = ((image_height - 1) - + (tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) * + (image_height - 1))) / 2.0 + num_angles = tf.shape(angles)[0] + return tf.concat( + values=[ + tf.math.cos(angles)[:, None], + -tf.math.sin(angles)[:, None], + x_offset[:, None], + tf.math.sin(angles)[:, None], + tf.math.cos(angles)[:, None], + y_offset[:, None], + tf.zeros((num_angles, 2), tf.dtypes.float32), + ], + axis=1, + ) + + +def transform(image: tf.Tensor, transforms) -> tf.Tensor: + """Prepares input data for `image_ops.transform`.""" + original_ndims = tf.rank(image) + transforms = tf.convert_to_tensor(transforms, dtype=tf.float32) + if transforms.shape.rank == 1: + transforms = transforms[None] + image = to_4d(image) + image = image_ops.transform( + images=image, + transforms=transforms, + interpolation='nearest') + return from_4d(image, original_ndims) + + +def translate(image: tf.Tensor, translations) -> tf.Tensor: + """Translates image(s) by provided vectors. + + Args: + image: An image Tensor of type uint8. + translations: A vector or matrix representing [dx dy]. + + Returns: + The translated version of the image. + + """ + transforms = _convert_translation_to_transform(translations) + return transform(image, transforms=transforms) + + +def rotate(image: tf.Tensor, degrees: float) -> tf.Tensor: + """Rotates the image by degrees either clockwise or counterclockwise. + + Args: + image: An image Tensor of type uint8. + degrees: Float, a scalar angle in degrees to rotate all images by. If + degrees is positive the image will be rotated clockwise otherwise it will + be rotated counterclockwise. + + Returns: + The rotated version of image. + + """ + # Convert from degrees to radians. + degrees_to_radians = math.pi / 180.0 + radians = tf.cast(degrees * degrees_to_radians, tf.float32) + + original_ndims = tf.rank(image) + image = to_4d(image) + + image_height = tf.cast(tf.shape(image)[1], tf.float32) + image_width = tf.cast(tf.shape(image)[2], tf.float32) + transforms = _convert_angles_to_transform(angles=radians, + image_width=image_width, + image_height=image_height) + # In practice, we should randomize the rotation degrees by flipping + # it negatively half the time, but that's done on 'degrees' outside + # of the function. + image = transform(image, transforms=transforms) + return from_4d(image, original_ndims) + + +def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor: + """Blend image1 and image2 using 'factor'. + + Factor can be above 0.0. A value of 0.0 means only image1 is used. + A value of 1.0 means only image2 is used. A value between 0.0 and + 1.0 means we linearly interpolate the pixel values between the two + images. A value greater than 1.0 "extrapolates" the difference + between the two pixel values, and we clip the results to values + between 0 and 255. + + Args: + image1: An image Tensor of type uint8. + image2: An image Tensor of type uint8. + factor: A floating point value above 0.0. + + Returns: + A blended image Tensor of type uint8. + """ + if factor == 0.0: + return tf.convert_to_tensor(image1) + if factor == 1.0: + return tf.convert_to_tensor(image2) + + image1 = tf.cast(image1, tf.float32) + image2 = tf.cast(image2, tf.float32) + + difference = image2 - image1 + scaled = factor * difference + + # Do addition in float. + temp = tf.cast(image1, tf.float32) + scaled + + # Interpolate + if factor > 0.0 and factor < 1.0: + # Interpolation means we always stay within 0 and 255. + return tf.cast(temp, tf.uint8) + + # Extrapolate: + # + # We need to clip and then cast. + return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8) + + +def cutout(image: tf.Tensor, pad_size: int, replace: int = 0) -> tf.Tensor: + """Apply cutout (https://arxiv.org/abs/1708.04552) to image. + + This operation applies a (2*pad_size x 2*pad_size) mask of zeros to + a random location within `img`. The pixel values filled in will be of the + value `replace`. The located where the mask will be applied is randomly + chosen uniformly over the whole image. + + Args: + image: An image Tensor of type uint8. + pad_size: Specifies how big the zero mask that will be generated is that + is applied to the image. The mask will be of size + (2*pad_size x 2*pad_size). + replace: What pixel value to fill in the image in the area that has + the cutout mask applied to it. + + Returns: + An image Tensor that is of type uint8. + """ + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + # Sample the center location in the image where the zero mask will be applied. + cutout_center_height = tf.random.uniform( + shape=[], minval=0, maxval=image_height, + dtype=tf.int32) + + cutout_center_width = tf.random.uniform( + shape=[], minval=0, maxval=image_width, + dtype=tf.int32) + + lower_pad = tf.maximum(0, cutout_center_height - pad_size) + upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size) + left_pad = tf.maximum(0, cutout_center_width - pad_size) + right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size) + + cutout_shape = [image_height - (lower_pad + upper_pad), + image_width - (left_pad + right_pad)] + padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] + mask = tf.pad( + tf.zeros(cutout_shape, dtype=image.dtype), + padding_dims, constant_values=1) + mask = tf.expand_dims(mask, -1) + mask = tf.tile(mask, [1, 1, 3]) + image = tf.where( + tf.equal(mask, 0), + tf.ones_like(image, dtype=image.dtype) * replace, + image) + return image + + +def solarize(image: tf.Tensor, threshold: int = 128) -> tf.Tensor: + # For each pixel in the image, select the pixel + # if the value is less than the threshold. + # Otherwise, subtract 255 from the pixel. + return tf.where(image < threshold, image, 255 - image) + + +def solarize_add(image: tf.Tensor, + addition: int = 0, + threshold: int = 128) -> tf.Tensor: + # For each pixel in the image less than threshold + # we add 'addition' amount to it and then clip the + # pixel value to be between 0 and 255. The value + # of 'addition' is between -128 and 128. + added_image = tf.cast(image, tf.int64) + addition + added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8) + return tf.where(image < threshold, added_image, image) + + +def color(image: tf.Tensor, factor: float) -> tf.Tensor: + """Equivalent of PIL Color.""" + degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) + return blend(degenerate, image, factor) + + +def contrast(image: tf.Tensor, factor: float) -> tf.Tensor: + """Equivalent of PIL Contrast.""" + degenerate = tf.image.rgb_to_grayscale(image) + # Cast before calling tf.histogram. + degenerate = tf.cast(degenerate, tf.int32) + + # Compute the grayscale histogram, then compute the mean pixel value, + # and create a constant image size of that value. Use that as the + # blending degenerate target of the original image. + hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256) + mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0 + degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean + degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) + degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8)) + return blend(degenerate, image, factor) + + +def brightness(image: tf.Tensor, factor: float) -> tf.Tensor: + """Equivalent of PIL Brightness.""" + degenerate = tf.zeros_like(image) + return blend(degenerate, image, factor) + + +def posterize(image: tf.Tensor, bits: int) -> tf.Tensor: + """Equivalent of PIL Posterize.""" + shift = 8 - bits + return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) + + +def wrapped_rotate(image: tf.Tensor, degrees: float, replace: int) -> tf.Tensor: + """Applies rotation with wrap/unwrap.""" + image = rotate(wrap(image), degrees=degrees) + return unwrap(image, replace) + + +def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: + """Equivalent of PIL Translate in X dimension.""" + image = translate(wrap(image), [-pixels, 0]) + return unwrap(image, replace) + + +def translate_y(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: + """Equivalent of PIL Translate in Y dimension.""" + image = translate(wrap(image), [0, -pixels]) + return unwrap(image, replace) + + +def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor: + """Equivalent of PIL Shearing in X dimension.""" + # Shear parallel to x axis is a projective transform + # with a matrix form of: + # [1 level + # 0 1]. + image = transform(image=wrap(image), + transforms=[1., level, 0., 0., 1., 0., 0., 0.]) + return unwrap(image, replace) + + +def shear_y(image: tf.Tensor, level: float, replace: int) -> tf.Tensor: + """Equivalent of PIL Shearing in Y dimension.""" + # Shear parallel to y axis is a projective transform + # with a matrix form of: + # [1 0 + # level 1]. + image = transform(image=wrap(image), + transforms=[1., 0., 0., level, 1., 0., 0., 0.]) + return unwrap(image, replace) + + +def autocontrast(image: tf.Tensor) -> tf.Tensor: + """Implements Autocontrast function from PIL using TF ops. + + Args: + image: A 3D uint8 tensor. + + Returns: + The image after it has had autocontrast applied to it and will be of type + uint8. + """ + + def scale_channel(image: tf.Tensor) -> tf.Tensor: + """Scale the 2D image using the autocontrast rule.""" + # A possibly cheaper version can be done using cumsum/unique_with_counts + # over the histogram values, rather than iterating over the entire image. + # to compute mins and maxes. + lo = tf.cast(tf.reduce_min(image), tf.float32) + hi = tf.cast(tf.reduce_max(image), tf.float32) + + # Scale the image, making the lowest value 0 and the highest value 255. + def scale_values(im): + scale = 255.0 / (hi - lo) + offset = -lo * scale + im = tf.cast(im, tf.float32) * scale + offset + im = tf.clip_by_value(im, 0.0, 255.0) + return tf.cast(im, tf.uint8) + + result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image) + return result + + # Assumes RGB for now. Scales each channel independently + # and then stacks the result. + s1 = scale_channel(image[:, :, 0]) + s2 = scale_channel(image[:, :, 1]) + s3 = scale_channel(image[:, :, 2]) + image = tf.stack([s1, s2, s3], 2) + return image + + +def sharpness(image: tf.Tensor, factor: float) -> tf.Tensor: + """Implements Sharpness function from PIL using TF ops.""" + orig_image = image + image = tf.cast(image, tf.float32) + # Make image 4D for conv operation. + image = tf.expand_dims(image, 0) + # SMOOTH PIL Kernel. + kernel = tf.constant( + [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, + shape=[3, 3, 1, 1]) / 13. + # Tile across channel dimension. + kernel = tf.tile(kernel, [1, 1, 3, 1]) + strides = [1, 1, 1, 1] + degenerate = tf.nn.depthwise_conv2d( + image, kernel, strides, padding='VALID', dilations=[1, 1]) + degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) + degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0]) + + # For the borders of the resulting image, fill in the values of the + # original image. + mask = tf.ones_like(degenerate) + padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) + padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) + result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image) + + # Blend the final result. + return blend(result, orig_image, factor) + + +def equalize(image: tf.Tensor) -> tf.Tensor: + """Implements Equalize function from PIL using TF ops.""" + def scale_channel(im, c): + """Scale the data in the channel to implement equalize.""" + im = tf.cast(im[:, :, c], tf.int32) + # Compute the histogram of the image channel. + histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) + + # For the purposes of computing the step, filter out the nonzeros. + nonzero = tf.where(tf.not_equal(histo, 0)) + nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1]) + step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255 + + def build_lut(histo, step): + # Compute the cumulative sum, shifting by step // 2 + # and then normalization by step. + lut = (tf.cumsum(histo) + (step // 2)) // step + # Shift lut, prepending with 0. + lut = tf.concat([[0], lut[:-1]], 0) + # Clip the counts to be in range. This is done + # in the C code for image.point. + return tf.clip_by_value(lut, 0, 255) + + # If step is zero, return the original image. Otherwise, build + # lut from the full histogram and step and then index from it. + result = tf.cond(tf.equal(step, 0), + lambda: im, + lambda: tf.gather(build_lut(histo, step), im)) + + return tf.cast(result, tf.uint8) + + # Assumes RGB for now. Scales each channel independently + # and then stacks the result. + s1 = scale_channel(image, 0) + s2 = scale_channel(image, 1) + s3 = scale_channel(image, 2) + image = tf.stack([s1, s2, s3], 2) + return image + + +def invert(image: tf.Tensor) -> tf.Tensor: + """Inverts the image pixels.""" + image = tf.convert_to_tensor(image) + return 255 - image + + +def wrap(image: tf.Tensor) -> tf.Tensor: + """Returns 'image' with an extra channel set to all 1s.""" + shape = tf.shape(image) + extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype) + extended = tf.concat([image, extended_channel], axis=2) + return extended + + +def unwrap(image: tf.Tensor, replace: int) -> tf.Tensor: + """Unwraps an image produced by wrap. + + Where there is a 0 in the last channel for every spatial position, + the rest of the three channels in that spatial dimension are grayed + (set to 128). Operations like translate and shear on a wrapped + Tensor will leave 0s in empty locations. Some transformations look + at the intensity of values to do preprocessing, and we want these + empty pixels to assume the 'average' value, rather than pure black. + + + Args: + image: A 3D Image Tensor with 4 channels. + replace: A one or three value 1D tensor to fill empty pixels. + + Returns: + image: A 3D image Tensor with 3 channels. + """ + image_shape = tf.shape(image) + # Flatten the spatial dimensions. + flattened_image = tf.reshape(image, [-1, image_shape[2]]) + + # Find all pixels where the last channel is zero. + alpha_channel = tf.expand_dims(flattened_image[:, 3], axis=-1) + + replace = tf.concat([replace, tf.ones([1], image.dtype)], 0) + + # Where they are zero, fill them in with 'replace'. + flattened_image = tf.where( + tf.equal(alpha_channel, 0), + tf.ones_like(flattened_image, dtype=image.dtype) * replace, + flattened_image) + + image = tf.reshape(flattened_image, image_shape) + image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3]) + return image + + +def _randomly_negate_tensor(tensor): + """With 50% prob turn the tensor negative.""" + should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool) + final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor) + return final_tensor + + +def _rotate_level_to_arg(level: float): + level = (level/_MAX_LEVEL) * 30. + level = _randomly_negate_tensor(level) + return (level,) + + +def _shrink_level_to_arg(level: float): + """Converts level to ratio by which we shrink the image content.""" + if level == 0: + return (1.0,) # if level is zero, do not shrink the image + # Maximum shrinking ratio is 2.9. + level = 2. / (_MAX_LEVEL / level) + 0.9 + return (level,) + + +def _enhance_level_to_arg(level: float): + return ((level/_MAX_LEVEL) * 1.8 + 0.1,) + + +def _shear_level_to_arg(level: float): + level = (level/_MAX_LEVEL) * 0.3 + # Flip level to negative with 50% chance. + level = _randomly_negate_tensor(level) + return (level,) + + +def _translate_level_to_arg(level: float, translate_const: float): + level = (level/_MAX_LEVEL) * float(translate_const) + # Flip level to negative with 50% chance. + level = _randomly_negate_tensor(level) + return (level,) + + +def _mult_to_arg(level: float, multiplier: float = 1.): + return (int((level / _MAX_LEVEL) * multiplier),) + + +def _apply_func_with_prob(func: Any, + image: tf.Tensor, + args: Any, + prob: float): + """Apply `func` to image w/ `args` as input with probability `prob`.""" + assert isinstance(args, tuple) + + # Apply the function with probability `prob`. + should_apply_op = tf.cast( + tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool) + augmented_image = tf.cond( + should_apply_op, + lambda: func(image, *args), + lambda: image) + return augmented_image + + +def select_and_apply_random_policy(policies: Any, image: tf.Tensor): + """Select a random policy from `policies` and apply it to `image`.""" + policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32) + # Note that using tf.case instead of tf.conds would result in significantly + # larger graphs and would even break export for some larger policies. + for (i, policy) in enumerate(policies): + image = tf.cond( + tf.equal(i, policy_to_select), + lambda selected_policy=policy: selected_policy(image), + lambda: image) + return image + + +NAME_TO_FUNC = { + 'AutoContrast': autocontrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': wrapped_rotate, + 'Posterize': posterize, + 'Solarize': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'Contrast': contrast, + 'Brightness': brightness, + 'Sharpness': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x, + 'TranslateY': translate_y, + 'Cutout': cutout, +} + +# Functions that have a 'replace' parameter +REPLACE_FUNCS = frozenset({ + 'Rotate', + 'TranslateX', + 'ShearX', + 'ShearY', + 'TranslateY', + 'Cutout', +}) + + +def level_to_arg(cutout_const: float, translate_const: float): + """Creates a dict mapping image operation names to their arguments.""" + + no_arg = lambda level: () + posterize_arg = lambda level: _mult_to_arg(level, 4) + solarize_arg = lambda level: _mult_to_arg(level, 256) + solarize_add_arg = lambda level: _mult_to_arg(level, 110) + cutout_arg = lambda level: _mult_to_arg(level, cutout_const) + translate_arg = lambda level: _translate_level_to_arg(level, translate_const) + + args = { + 'AutoContrast': no_arg, + 'Equalize': no_arg, + 'Invert': no_arg, + 'Rotate': _rotate_level_to_arg, + 'Posterize': posterize_arg, + 'Solarize': solarize_arg, + 'SolarizeAdd': solarize_add_arg, + 'Color': _enhance_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'Cutout': cutout_arg, + 'TranslateX': translate_arg, + 'TranslateY': translate_arg, + } + return args + + +def _parse_policy_info(name: Text, + prob: float, + level: float, + replace_value: List[int], + cutout_const: float, + translate_const: float) -> Tuple[Any, float, Any]: + """Return the function that corresponds to `name` and update `level` param.""" + func = NAME_TO_FUNC[name] + args = level_to_arg(cutout_const, translate_const)[name](level) + + if name in REPLACE_FUNCS: + # Add in replace arg if it is required for the function that is called. + args = tuple(list(args) + [replace_value]) + + return func, prob, args + + +class ImageAugment(object): + """Image augmentation class for applying image distortions.""" + + def distort(self, image: tf.Tensor) -> tf.Tensor: + """Given an image tensor, returns a distorted image with the same shape. + + Args: + image: `Tensor` of shape [height, width, 3] representing an image. + + Returns: + The augmented version of `image`. + """ + raise NotImplementedError() + + +class AutoAugment(ImageAugment): + """Applies the AutoAugment policy to images. + + AutoAugment is from the paper: https://arxiv.org/abs/1805.09501. + """ + + def __init__(self, + augmentation_name: Text = 'v0', + policies: Optional[Dict[Text, Any]] = None, + cutout_const: float = 100, + translate_const: float = 250): + """Applies the AutoAugment policy to images. + + Args: + augmentation_name: The name of the AutoAugment policy to use. The + available options are `v0` and `test`. `v0` is the policy used for all + of the results in the paper and was found to achieve the best results on + the COCO dataset. `v1`, `v2` and `v3` are additional good policies found + on the COCO dataset that have slight variation in what operations were + used during the search procedure along with how many operations are + applied in parallel to a single image (2 vs 3). + policies: list of lists of tuples in the form `(func, prob, level)`, + `func` is a string name of the augmentation function, `prob` is the + probability of applying the `func` operation, `level` is the input + argument for `func`. + cutout_const: multiplier for applying cutout. + translate_const: multiplier for applying translation. + """ + super(AutoAugment, self).__init__() + + if policies is None: + self.available_policies = { + 'v0': self.policy_v0(), + 'test': self.policy_test(), + 'simple': self.policy_simple(), + } + + if augmentation_name not in self.available_policies: + raise ValueError( + 'Invalid augmentation_name: {}'.format(augmentation_name)) + + self.augmentation_name = augmentation_name + self.policies = self.available_policies[augmentation_name] + self.cutout_const = float(cutout_const) + self.translate_const = float(translate_const) + + def distort(self, image: tf.Tensor) -> tf.Tensor: + """Applies the AutoAugment policy to `image`. + + AutoAugment is from the paper: https://arxiv.org/abs/1805.09501. + + Args: + image: `Tensor` of shape [height, width, 3] representing an image. + + Returns: + A version of image that now has data augmentation applied to it based on + the `policies` pass into the function. + """ + input_image_type = image.dtype + + if input_image_type != tf.uint8: + image = tf.clip_by_value(image, 0.0, 255.0) + image = tf.cast(image, dtype=tf.uint8) + + replace_value = [128] * 3 + + # func is the string name of the augmentation function, prob is the + # probability of applying the operation and level is the parameter + # associated with the tf op. + + # tf_policies are functions that take in an image and return an augmented + # image. + tf_policies = [] + for policy in self.policies: + tf_policy = [] + # Link string name to the correct python function and make sure the + # correct argument is passed into that function. + for policy_info in policy: + policy_info = list(policy_info) + [ + replace_value, self.cutout_const, self.translate_const + ] + tf_policy.append(_parse_policy_info(*policy_info)) + # Now build the tf policy that will apply the augmentation procedue + # on image. + def make_final_policy(tf_policy_): + + def final_policy(image_): + for func, prob, args in tf_policy_: + image_ = _apply_func_with_prob(func, image_, args, prob) + return image_ + + return final_policy + + tf_policies.append(make_final_policy(tf_policy)) + + image = select_and_apply_random_policy(tf_policies, image) + image = tf.cast(image, dtype=input_image_type) + return image + + @staticmethod + def policy_v0(): + """Autoaugment policy that was used in AutoAugment Paper. + + Each tuple is an augmentation operation of the form + (operation, probability, magnitude). Each element in policy is a + sub-policy that will be applied sequentially on the image. + + Returns: + the policy. + """ + + # TODO(dankondratyuk): tensorflow_addons defines custom ops, which + # for some reason are not included when building/linking + # This results in the error, "Op type not registered + # 'Addons>ImageProjectiveTransformV2' in binary" when running on borg TPUs + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + return policy + + @staticmethod + def policy_simple(): + """Same as `policy_v0`, except with custom ops removed.""" + + policy = [ + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + ] + return policy + + @staticmethod + def policy_test(): + """Autoaugment test policy for debugging.""" + policy = [ + [('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)], + ] + return policy + + +class RandAugment(ImageAugment): + """Applies the RandAugment policy to images. + + RandAugment is from the paper https://arxiv.org/abs/1909.13719, + """ + + def __init__(self, + num_layers: int = 2, + magnitude: float = 10., + cutout_const: float = 40., + translate_const: float = 100.): + """Applies the RandAugment policy to images. + + Args: + num_layers: Integer, the number of augmentation transformations to apply + sequentially to an image. Represented as (N) in the paper. Usually best + values will be in the range [1, 3]. + magnitude: Integer, shared magnitude across all augmentation operations. + Represented as (M) in the paper. Usually best values are in the range + [5, 10]. + cutout_const: multiplier for applying cutout. + translate_const: multiplier for applying translation. + """ + super(RandAugment, self).__init__() + + self.num_layers = num_layers + self.magnitude = float(magnitude) + self.cutout_const = float(cutout_const) + self.translate_const = float(translate_const) + self.available_ops = [ + 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', + 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', + 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd' + ] + + def distort(self, image: tf.Tensor) -> tf.Tensor: + """Applies the RandAugment policy to `image`. + + Args: + image: `Tensor` of shape [height, width, 3] representing an image. + + Returns: + The augmented version of `image`. + """ + input_image_type = image.dtype + + if input_image_type != tf.uint8: + image = tf.clip_by_value(image, 0.0, 255.0) + image = tf.cast(image, dtype=tf.uint8) + + replace_value = [128] * 3 + min_prob, max_prob = 0.2, 0.8 + + for _ in range(self.num_layers): + op_to_select = tf.random.uniform( + [], maxval=len(self.available_ops) + 1, dtype=tf.int32) + + branch_fns = [] + for (i, op_name) in enumerate(self.available_ops): + prob = tf.random.uniform([], + minval=min_prob, + maxval=max_prob, + dtype=tf.float32) + func, _, args = _parse_policy_info(op_name, + prob, + self.magnitude, + replace_value, + self.cutout_const, + self.translate_const) + branch_fns.append(( + i, + # pylint:disable=g-long-lambda + lambda selected_func=func, selected_args=args: selected_func( + image, *selected_args))) + # pylint:enable=g-long-lambda + + image = tf.switch_case(branch_index=op_to_select, + branch_fns=branch_fns, + default=lambda: tf.identity(image)) + + image = tf.cast(image, dtype=input_image_type) + return image diff --git a/models/official/vision/image_classification/augment_test.py b/models/official/vision/image_classification/augment_test.py new file mode 100644 index 0000000000000000000000000000000000000000..76bdb2b7b9db4fc109f39674c68ae0c1169f3f12 --- /dev/null +++ b/models/official/vision/image_classification/augment_test.py @@ -0,0 +1,143 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for autoaugment.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl.testing import parameterized + +import tensorflow as tf + +from official.vision.image_classification import augment + + +def get_dtype_test_cases(): + return [ + ('uint8', tf.uint8), + ('int32', tf.int32), + ('float16', tf.float16), + ('float32', tf.float32), + ] + + +@parameterized.named_parameters(get_dtype_test_cases()) +class TransformsTest(parameterized.TestCase, tf.test.TestCase): + """Basic tests for fundamental transformations.""" + + def test_to_from_4d(self, dtype): + for shape in [(10, 10), (10, 10, 10), (10, 10, 10, 10)]: + original_ndims = len(shape) + image = tf.zeros(shape, dtype=dtype) + image_4d = augment.to_4d(image) + self.assertEqual(4, tf.rank(image_4d)) + self.assertAllEqual(image, augment.from_4d(image_4d, original_ndims)) + + def test_transform(self, dtype): + image = tf.constant([[1, 2], [3, 4]], dtype=dtype) + self.assertAllEqual(augment.transform(image, transforms=[1]*8), + [[4, 4], [4, 4]]) + + def test_translate(self, dtype): + image = tf.constant( + [[1, 0, 1, 0], + [0, 1, 0, 1], + [1, 0, 1, 0], + [0, 1, 0, 1]], + dtype=dtype) + translations = [-1, -1] + translated = augment.translate(image=image, + translations=translations) + expected = [ + [1, 0, 1, 1], + [0, 1, 0, 0], + [1, 0, 1, 1], + [1, 0, 1, 1]] + self.assertAllEqual(translated, expected) + + def test_translate_shapes(self, dtype): + translation = [0, 0] + for shape in [(3, 3), (5, 5), (224, 224, 3)]: + image = tf.zeros(shape, dtype=dtype) + self.assertAllEqual(image, augment.translate(image, translation)) + + def test_translate_invalid_translation(self, dtype): + image = tf.zeros((1, 1), dtype=dtype) + invalid_translation = [[[1, 1]]] + with self.assertRaisesRegex(TypeError, 'rank 1 or 2'): + _ = augment.translate(image, invalid_translation) + + def test_rotate(self, dtype): + image = tf.reshape(tf.cast(tf.range(9), dtype), (3, 3)) + rotation = 90. + transformed = augment.rotate(image=image, degrees=rotation) + expected = [[2, 5, 8], + [1, 4, 7], + [0, 3, 6]] + self.assertAllEqual(transformed, expected) + + def test_rotate_shapes(self, dtype): + degrees = 0. + for shape in [(3, 3), (5, 5), (224, 224, 3)]: + image = tf.zeros(shape, dtype=dtype) + self.assertAllEqual(image, augment.rotate(image, degrees)) + + +class AutoaugmentTest(tf.test.TestCase): + + def test_autoaugment(self): + """Smoke test to be sure there are no syntax errors.""" + image = tf.zeros((224, 224, 3), dtype=tf.uint8) + + augmenter = augment.AutoAugment() + aug_image = augmenter.distort(image) + + self.assertEqual((224, 224, 3), aug_image.shape) + + def test_randaug(self): + """Smoke test to be sure there are no syntax errors.""" + image = tf.zeros((224, 224, 3), dtype=tf.uint8) + + augmenter = augment.RandAugment() + aug_image = augmenter.distort(image) + + self.assertEqual((224, 224, 3), aug_image.shape) + + def test_all_policy_ops(self): + """Smoke test to be sure all augmentation functions can execute.""" + + prob = 1 + magnitude = 10 + replace_value = [128] * 3 + cutout_const = 100 + translate_const = 250 + + image = tf.ones((224, 224, 3), dtype=tf.uint8) + + for op_name in augment.NAME_TO_FUNC: + func, _, args = augment._parse_policy_info(op_name, + prob, + magnitude, + replace_value, + cutout_const, + translate_const) + image = func(image, *args) + + self.assertEqual((224, 224, 3), image.shape) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/vision/image_classification/callbacks.py b/models/official/vision/image_classification/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..985d0c60cc0b866e10ad350986c004e4ea4ac161 --- /dev/null +++ b/models/official/vision/image_classification/callbacks.py @@ -0,0 +1,258 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common modules for callbacks.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os +from typing import Any, List, MutableMapping, Text +from absl import logging +import tensorflow as tf + +from official.utils.misc import keras_utils +from official.vision.image_classification import optimizer_factory + + +def get_callbacks(model_checkpoint: bool = True, + include_tensorboard: bool = True, + time_history: bool = True, + track_lr: bool = True, + write_model_weights: bool = True, + apply_moving_average: bool = False, + initial_step: int = 0, + batch_size: int = 0, + log_steps: int = 0, + model_dir: str = None) -> List[tf.keras.callbacks.Callback]: + """Get all callbacks.""" + model_dir = model_dir or '' + callbacks = [] + if model_checkpoint: + ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}') + callbacks.append(tf.keras.callbacks.ModelCheckpoint( + ckpt_full_path, save_weights_only=True, verbose=1)) + if include_tensorboard: + callbacks.append( + CustomTensorBoard( + log_dir=model_dir, + track_lr=track_lr, + initial_step=initial_step, + write_images=write_model_weights)) + if time_history: + callbacks.append( + keras_utils.TimeHistory( + batch_size, + log_steps, + logdir=model_dir if include_tensorboard else None)) + if apply_moving_average: + # Save moving average model to a different file so that + # we can resume training from a checkpoint + ckpt_full_path = os.path.join( + model_dir, 'average', 'model.ckpt-{epoch:04d}') + callbacks.append(AverageModelCheckpoint( + update_weights=False, + filepath=ckpt_full_path, + save_weights_only=True, + verbose=1)) + callbacks.append(MovingAverageCallback()) + return callbacks + + +def get_scalar_from_tensor(t: tf.Tensor) -> int: + """Utility function to convert a Tensor to a scalar.""" + t = tf.keras.backend.get_value(t) + if callable(t): + return t() + else: + return t + + +class CustomTensorBoard(tf.keras.callbacks.TensorBoard): + """A customized TensorBoard callback that tracks additional datapoints. + + Metrics tracked: + - Global learning rate + + Attributes: + log_dir: the path of the directory where to save the log files to be parsed + by TensorBoard. + track_lr: `bool`, whether or not to track the global learning rate. + initial_step: the initial step, used for preemption recovery. + **kwargs: Additional arguments for backwards compatibility. Possible key is + `period`. + """ + + # TODO(b/146499062): track params, flops, log lr, l2 loss, + # classification loss + + def __init__(self, + log_dir: str, + track_lr: bool = False, + initial_step: int = 0, + **kwargs): + super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs) + self.step = initial_step + self._track_lr = track_lr + + def on_batch_begin(self, + epoch: int, + logs: MutableMapping[str, Any] = None) -> None: + self.step += 1 + if logs is None: + logs = {} + logs.update(self._calculate_metrics()) + super(CustomTensorBoard, self).on_batch_begin(epoch, logs) + + def on_epoch_begin(self, + epoch: int, + logs: MutableMapping[str, Any] = None) -> None: + if logs is None: + logs = {} + metrics = self._calculate_metrics() + logs.update(metrics) + for k, v in metrics.items(): + logging.info('Current %s: %f', k, v) + super(CustomTensorBoard, self).on_epoch_begin(epoch, logs) + + def on_epoch_end(self, + epoch: int, + logs: MutableMapping[str, Any] = None) -> None: + if logs is None: + logs = {} + metrics = self._calculate_metrics() + logs.update(metrics) + super(CustomTensorBoard, self).on_epoch_end(epoch, logs) + + def _calculate_metrics(self) -> MutableMapping[str, Any]: + logs = {} + # TODO(b/149030439): disable LR reporting. + # if self._track_lr: + # logs['learning_rate'] = self._calculate_lr() + return logs + + def _calculate_lr(self) -> int: + """Calculates the learning rate given the current step.""" + return get_scalar_from_tensor( + self._get_base_optimizer()._decayed_lr(var_dtype=tf.float32)) # pylint:disable=protected-access + + def _get_base_optimizer(self) -> tf.keras.optimizers.Optimizer: + """Get the base optimizer used by the current model.""" + + optimizer = self.model.optimizer + + # The optimizer might be wrapped by another class, so unwrap it + while hasattr(optimizer, '_optimizer'): + optimizer = optimizer._optimizer # pylint:disable=protected-access + + return optimizer + + +class MovingAverageCallback(tf.keras.callbacks.Callback): + """A Callback to be used with a `MovingAverage` optimizer. + + Applies moving average weights to the model during validation time to test + and predict on the averaged weights rather than the current model weights. + Once training is complete, the model weights will be overwritten with the + averaged weights (by default). + + Attributes: + overwrite_weights_on_train_end: Whether to overwrite the current model + weights with the averaged weights from the moving average optimizer. + **kwargs: Any additional callback arguments. + """ + + def __init__(self, + overwrite_weights_on_train_end: bool = False, + **kwargs): + super(MovingAverageCallback, self).__init__(**kwargs) + self.overwrite_weights_on_train_end = overwrite_weights_on_train_end + + def set_model(self, model: tf.keras.Model): + super(MovingAverageCallback, self).set_model(model) + assert isinstance(self.model.optimizer, + optimizer_factory.MovingAverage) + self.model.optimizer.shadow_copy(self.model) + + def on_test_begin(self, logs: MutableMapping[Text, Any] = None): + self.model.optimizer.swap_weights() + + def on_test_end(self, logs: MutableMapping[Text, Any] = None): + self.model.optimizer.swap_weights() + + def on_train_end(self, logs: MutableMapping[Text, Any] = None): + if self.overwrite_weights_on_train_end: + self.model.optimizer.assign_average_vars(self.model.variables) + + +class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): + """Saves and, optionally, assigns the averaged weights. + + Taken from tfa.callbacks.AverageModelCheckpoint. + + Attributes: + update_weights: If True, assign the moving average weights + to the model, and save them. If False, keep the old + non-averaged weights, but the saved model uses the + average weights. + See `tf.keras.callbacks.ModelCheckpoint` for the other args. + """ + + def __init__( + self, + update_weights: bool, + filepath: str, + monitor: str = 'val_loss', + verbose: int = 0, + save_best_only: bool = False, + save_weights_only: bool = False, + mode: str = 'auto', + save_freq: str = 'epoch', + **kwargs): + self.update_weights = update_weights + super().__init__( + filepath, + monitor, + verbose, + save_best_only, + save_weights_only, + mode, + save_freq, + **kwargs) + + def set_model(self, model): + if not isinstance(model.optimizer, optimizer_factory.MovingAverage): + raise TypeError( + 'AverageModelCheckpoint is only used when training' + 'with MovingAverage') + return super().set_model(model) + + def _save_model(self, epoch, logs): + assert isinstance(self.model.optimizer, optimizer_factory.MovingAverage) + + if self.update_weights: + self.model.optimizer.assign_average_vars(self.model.variables) + return super()._save_model(epoch, logs) + else: + # Note: `model.get_weights()` gives us the weights (non-ref) + # whereas `model.variables` returns references to the variables. + non_avg_weights = self.model.get_weights() + self.model.optimizer.assign_average_vars(self.model.variables) + # result is currently None, since `super._save_model` doesn't + # return anything, but this may change in the future. + result = super()._save_model(epoch, logs) + self.model.set_weights(non_avg_weights) + return result diff --git a/models/official/vision/image_classification/classifier_trainer.py b/models/official/vision/image_classification/classifier_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..1e5ea468c9a0895658c28e89b8537e0056148fa0 --- /dev/null +++ b/models/official/vision/image_classification/classifier_trainer.py @@ -0,0 +1,456 @@ +# Lint as: python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs an Image Classification model.""" + +import os +import pprint +from typing import Any, Tuple, Text, Optional, Mapping + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +from official.modeling import hyperparams +from official.modeling import performance +from official.utils import hyperparams_flags +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.vision.image_classification import callbacks as custom_callbacks +from official.vision.image_classification import dataset_factory +from official.vision.image_classification import optimizer_factory +from official.vision.image_classification.configs import base_configs +from official.vision.image_classification.configs import configs +from official.vision.image_classification.efficientnet import efficientnet_model +from official.vision.image_classification.resnet import common +from official.vision.image_classification.resnet import resnet_model + + +def get_models() -> Mapping[str, tf.keras.Model]: + """Returns the mapping from model type name to Keras model.""" + return { + 'efficientnet': efficientnet_model.EfficientNet.from_name, + 'resnet': resnet_model.resnet50, + } + + +def get_dtype_map() -> Mapping[str, tf.dtypes.DType]: + """Returns the mapping from dtype string representations to TF dtypes.""" + return { + 'float32': tf.float32, + 'bfloat16': tf.bfloat16, + 'float16': tf.float16, + 'fp32': tf.float32, + 'bf16': tf.bfloat16, + } + + +def _get_metrics(one_hot: bool) -> Mapping[Text, Any]: + """Get a dict of available metrics to track.""" + if one_hot: + return { + # (name, metric_fn) + 'acc': tf.keras.metrics.CategoricalAccuracy(name='accuracy'), + 'accuracy': tf.keras.metrics.CategoricalAccuracy(name='accuracy'), + 'top_1': tf.keras.metrics.CategoricalAccuracy(name='accuracy'), + 'top_5': tf.keras.metrics.TopKCategoricalAccuracy( + k=5, + name='top_5_accuracy'), + } + else: + return { + # (name, metric_fn) + 'acc': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'), + 'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'), + 'top_1': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'), + 'top_5': tf.keras.metrics.SparseTopKCategoricalAccuracy( + k=5, + name='top_5_accuracy'), + } + + +def get_image_size_from_model( + params: base_configs.ExperimentConfig) -> Optional[int]: + """If the given model has a preferred image size, return it.""" + if params.model_name == 'efficientnet': + efficientnet_name = params.model.model_params.model_name + if efficientnet_name in efficientnet_model.MODEL_CONFIGS: + return efficientnet_model.MODEL_CONFIGS[efficientnet_name].resolution + return None + + +def _get_dataset_builders(params: base_configs.ExperimentConfig, + strategy: tf.distribute.Strategy, + one_hot: bool + ) -> Tuple[Any, Any]: + """Create and return train and validation dataset builders.""" + if one_hot: + logging.warning('label_smoothing > 0, so datasets will be one hot encoded.') + else: + logging.warning('label_smoothing not applied, so datasets will not be one ' + 'hot encoded.') + + num_devices = strategy.num_replicas_in_sync if strategy else 1 + + image_size = get_image_size_from_model(params) + + dataset_configs = [ + params.train_dataset, params.validation_dataset + ] + builders = [] + + for config in dataset_configs: + if config is not None and config.has_data: + builder = dataset_factory.DatasetBuilder( + config, + image_size=image_size or config.image_size, + num_devices=num_devices, + one_hot=one_hot) + else: + builder = None + builders.append(builder) + + return builders + + +def get_loss_scale(params: base_configs.ExperimentConfig, + fp16_default: float = 128.) -> float: + """Returns the loss scale for initializations.""" + loss_scale = params.runtime.loss_scale + if loss_scale == 'dynamic': + return loss_scale + elif loss_scale is not None: + return float(loss_scale) + elif (params.train_dataset.dtype == 'float32' or + params.train_dataset.dtype == 'bfloat16'): + return 1. + else: + assert params.train_dataset.dtype == 'float16' + return fp16_default + + +def _get_params_from_flags(flags_obj: flags.FlagValues): + """Get ParamsDict from flags.""" + model = flags_obj.model_type.lower() + dataset = flags_obj.dataset.lower() + params = configs.get_config(model=model, dataset=dataset) + + flags_overrides = { + 'model_dir': flags_obj.model_dir, + 'mode': flags_obj.mode, + 'model': { + 'name': model, + }, + 'runtime': { + 'run_eagerly': flags_obj.run_eagerly, + 'tpu': flags_obj.tpu, + }, + 'train_dataset': { + 'data_dir': flags_obj.data_dir, + }, + 'validation_dataset': { + 'data_dir': flags_obj.data_dir, + }, + 'train': { + 'time_history': { + 'log_steps': flags_obj.log_steps, + }, + }, + } + + overriding_configs = (flags_obj.config_file, + flags_obj.params_override, + flags_overrides) + + pp = pprint.PrettyPrinter() + + logging.info('Base params: %s', pp.pformat(params.as_dict())) + + for param in overriding_configs: + logging.info('Overriding params: %s', param) + params = hyperparams.override_params_dict(params, param, is_strict=True) + + params.validate() + params.lock() + + logging.info('Final model parameters: %s', pp.pformat(params.as_dict())) + return params + + +def resume_from_checkpoint(model: tf.keras.Model, + model_dir: str, + train_steps: int) -> int: + """Resumes from the latest checkpoint, if possible. + + Loads the model weights and optimizer settings from a checkpoint. + This function should be used in case of preemption recovery. + + Args: + model: The model whose weights should be restored. + model_dir: The directory where model weights were saved. + train_steps: The number of steps to train. + + Returns: + The epoch of the latest checkpoint, or 0 if not restoring. + + """ + logging.info('Load from checkpoint is enabled.') + latest_checkpoint = tf.train.latest_checkpoint(model_dir) + logging.info('latest_checkpoint: %s', latest_checkpoint) + if not latest_checkpoint: + logging.info('No checkpoint detected.') + return 0 + + logging.info('Checkpoint file %s found and restoring from ' + 'checkpoint', latest_checkpoint) + model.load_weights(latest_checkpoint) + initial_epoch = model.optimizer.iterations // train_steps + logging.info('Completed loading from checkpoint.') + logging.info('Resuming from epoch %d', initial_epoch) + return int(initial_epoch) + + +def initialize(params: base_configs.ExperimentConfig, + dataset_builder: dataset_factory.DatasetBuilder): + """Initializes backend related initializations.""" + keras_utils.set_session_config( + enable_xla=params.runtime.enable_xla) + performance.set_mixed_precision_policy(dataset_builder.dtype, + get_loss_scale(params)) + if tf.config.list_physical_devices('GPU'): + data_format = 'channels_first' + else: + data_format = 'channels_last' + tf.keras.backend.set_image_data_format(data_format) + if params.runtime.run_eagerly: + # Enable eager execution to allow step-by-step debugging + tf.config.experimental_run_functions_eagerly(True) + if tf.config.list_physical_devices('GPU'): + if params.runtime.gpu_thread_mode: + keras_utils.set_gpu_thread_mode_and_count( + per_gpu_thread_count=params.runtime.per_gpu_thread_count, + gpu_thread_mode=params.runtime.gpu_thread_mode, + num_gpus=params.runtime.num_gpus, + datasets_num_private_threads=params.runtime.dataset_num_private_threads) # pylint:disable=line-too-long + if params.runtime.batchnorm_spatial_persistent: + os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1' + + +def define_classifier_flags(): + """Defines common flags for image classification.""" + hyperparams_flags.initialize_common_flags() + flags.DEFINE_string( + 'data_dir', + default=None, + help='The location of the input data.') + flags.DEFINE_string( + 'mode', + default=None, + help='Mode to run: `train`, `eval`, `train_and_eval` or `export`.') + flags.DEFINE_bool( + 'run_eagerly', + default=None, + help='Use eager execution and disable autograph for debugging.') + flags.DEFINE_string( + 'model_type', + default=None, + help='The type of the model, e.g. EfficientNet, etc.') + flags.DEFINE_string( + 'dataset', + default=None, + help='The name of the dataset, e.g. ImageNet, etc.') + flags.DEFINE_integer( + 'log_steps', + default=100, + help='The interval of steps between logging of batch level stats.') + + +def serialize_config(params: base_configs.ExperimentConfig, + model_dir: str): + """Serializes and saves the experiment config.""" + params_save_path = os.path.join(model_dir, 'params.yaml') + logging.info('Saving experiment configuration to %s', params_save_path) + tf.io.gfile.makedirs(model_dir) + hyperparams.save_params_dict_to_yaml(params, params_save_path) + + +def train_and_eval( + params: base_configs.ExperimentConfig, + strategy_override: tf.distribute.Strategy) -> Mapping[str, Any]: + """Runs the train and eval path using compile/fit.""" + logging.info('Running train and eval.') + + distribution_utils.configure_cluster( + params.runtime.worker_hosts, + params.runtime.task_index) + + # Note: for TPUs, strategy and scope should be created before the dataset + strategy = strategy_override or distribution_utils.get_distribution_strategy( + distribution_strategy=params.runtime.distribution_strategy, + all_reduce_alg=params.runtime.all_reduce_alg, + num_gpus=params.runtime.num_gpus, + tpu_address=params.runtime.tpu) + + strategy_scope = distribution_utils.get_strategy_scope(strategy) + + logging.info('Detected %d devices.', + strategy.num_replicas_in_sync if strategy else 1) + + label_smoothing = params.model.loss.label_smoothing + one_hot = label_smoothing and label_smoothing > 0 + + builders = _get_dataset_builders(params, strategy, one_hot) + datasets = [builder.build(strategy) + if builder else None for builder in builders] + + # Unpack datasets and builders based on train/val/test splits + train_builder, validation_builder = builders # pylint: disable=unbalanced-tuple-unpacking + train_dataset, validation_dataset = datasets + + train_epochs = params.train.epochs + train_steps = params.train.steps or train_builder.num_steps + validation_steps = params.evaluation.steps or validation_builder.num_steps + + initialize(params, train_builder) + + logging.info('Global batch size: %d', train_builder.global_batch_size) + + with strategy_scope: + model_params = params.model.model_params.as_dict() + model = get_models()[params.model.name](**model_params) + learning_rate = optimizer_factory.build_learning_rate( + params=params.model.learning_rate, + batch_size=train_builder.global_batch_size, + train_epochs=train_epochs, + train_steps=train_steps) + optimizer = optimizer_factory.build_optimizer( + optimizer_name=params.model.optimizer.name, + base_learning_rate=learning_rate, + params=params.model.optimizer.as_dict()) + + metrics_map = _get_metrics(one_hot) + metrics = [metrics_map[metric] for metric in params.train.metrics] + steps_per_loop = train_steps if params.train.set_epoch_loop else 1 + + if one_hot: + loss_obj = tf.keras.losses.CategoricalCrossentropy( + label_smoothing=params.model.loss.label_smoothing) + else: + loss_obj = tf.keras.losses.SparseCategoricalCrossentropy() + model.compile(optimizer=optimizer, + loss=loss_obj, + metrics=metrics, + experimental_steps_per_execution=steps_per_loop) + + initial_epoch = 0 + if params.train.resume_checkpoint: + initial_epoch = resume_from_checkpoint(model=model, + model_dir=params.model_dir, + train_steps=train_steps) + + callbacks = custom_callbacks.get_callbacks( + model_checkpoint=params.train.callbacks.enable_checkpoint_and_export, + include_tensorboard=params.train.callbacks.enable_tensorboard, + time_history=params.train.callbacks.enable_time_history, + track_lr=params.train.tensorboard.track_lr, + write_model_weights=params.train.tensorboard.write_model_weights, + initial_step=initial_epoch * train_steps, + batch_size=train_builder.global_batch_size, + log_steps=params.train.time_history.log_steps, + model_dir=params.model_dir) + + serialize_config(params=params, model_dir=params.model_dir) + + if params.evaluation.skip_eval: + validation_kwargs = {} + else: + validation_kwargs = { + 'validation_data': validation_dataset, + 'validation_steps': validation_steps, + 'validation_freq': params.evaluation.epochs_between_evals, + } + + history = model.fit( + train_dataset, + epochs=train_epochs, + steps_per_epoch=train_steps, + initial_epoch=initial_epoch, + callbacks=callbacks, + verbose=2, + **validation_kwargs) + + validation_output = None + if not params.evaluation.skip_eval: + validation_output = model.evaluate( + validation_dataset, steps=validation_steps, verbose=2) + + # TODO(dankondratyuk): eval and save final test accuracy + stats = common.build_stats(history, + validation_output, + callbacks) + return stats + + +def export(params: base_configs.ExperimentConfig): + """Runs the model export functionality.""" + logging.info('Exporting model.') + model_params = params.model.model_params.as_dict() + model = get_models()[params.model.name](**model_params) + checkpoint = params.export.checkpoint + if checkpoint is None: + logging.info('No export checkpoint was provided. Using the latest ' + 'checkpoint from model_dir.') + checkpoint = tf.train.latest_checkpoint(params.model_dir) + + model.load_weights(checkpoint) + model.save(params.export.destination) + + +def run(flags_obj: flags.FlagValues, + strategy_override: tf.distribute.Strategy = None) -> Mapping[str, Any]: + """Runs Image Classification model using native Keras APIs. + + Args: + flags_obj: An object containing parsed flag values. + strategy_override: A `tf.distribute.Strategy` object to use for model. + + Returns: + Dictionary of training/eval stats + """ + params = _get_params_from_flags(flags_obj) + if params.mode == 'train_and_eval': + return train_and_eval(params, strategy_override) + elif params.mode == 'export_only': + export(params) + else: + raise ValueError('{} is not a valid mode.'.format(params.mode)) + + +def main(_): + stats = run(flags.FLAGS) + if stats: + logging.info('Run stats:\n%s', stats) + + +if __name__ == '__main__': + logging.set_verbosity(logging.INFO) + define_classifier_flags() + flags.mark_flag_as_required('data_dir') + flags.mark_flag_as_required('mode') + flags.mark_flag_as_required('model_type') + flags.mark_flag_as_required('dataset') + + app.run(main) diff --git a/models/official/vision/image_classification/classifier_trainer_test.py b/models/official/vision/image_classification/classifier_trainer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..244425feef76bf89d4de939cb8a1914a6f0f47c6 --- /dev/null +++ b/models/official/vision/image_classification/classifier_trainer_test.py @@ -0,0 +1,387 @@ +# Lint as: python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Unit tests for the classifier trainer models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +import json + +import os +import sys + +from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Tuple + +from absl import flags +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from official.utils.flags import core as flags_core +from official.vision.image_classification import classifier_trainer +from official.vision.image_classification import dataset_factory +from official.vision.image_classification import test_utils +from official.vision.image_classification.configs import base_configs + +classifier_trainer.define_classifier_flags() + + +def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]: + """Returns the combinations of end-to-end tests to run.""" + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + strategy_combinations.mirrored_strategy_with_two_gpus, + ], + model=[ + 'efficientnet', + 'resnet', + ], + mode='eager', + dataset=[ + 'imagenet', + ], + ) + + +def get_params_override(params_override: Mapping[str, Any]) -> str: + """Converts params_override dict to string command.""" + return '--params_override=' + json.dumps(params_override) + + +def basic_params_override(dtype: str = 'float32') -> MutableMapping[str, Any]: + """Returns a basic parameter configuration for testing.""" + return { + 'train_dataset': { + 'builder': 'synthetic', + 'use_per_replica_batch_size': True, + 'batch_size': 1, + 'image_size': 224, + 'dtype': dtype, + }, + 'validation_dataset': { + 'builder': 'synthetic', + 'batch_size': 1, + 'use_per_replica_batch_size': True, + 'image_size': 224, + 'dtype': dtype, + }, + 'train': { + 'steps': 1, + 'epochs': 1, + 'callbacks': { + 'enable_checkpoint_and_export': True, + 'enable_tensorboard': False, + }, + }, + 'evaluation': { + 'steps': 1, + }, + } + + +def get_trivial_model(num_classes: int) -> tf.keras.Model: + """Creates and compiles trivial model for ImageNet dataset.""" + model = test_utils.trivial_model(num_classes=num_classes) + lr = 0.01 + optimizer = tf.keras.optimizers.SGD(learning_rate=lr) + loss_obj = tf.keras.losses.SparseCategoricalCrossentropy() + model.compile(optimizer=optimizer, + loss=loss_obj, + run_eagerly=True) + return model + + +def get_trivial_data() -> tf.data.Dataset: + """Gets trivial data in the ImageNet size.""" + def generate_data(_) -> tf.data.Dataset: + image = tf.zeros(shape=(224, 224, 3), dtype=tf.float32) + label = tf.zeros([1], dtype=tf.int32) + return image, label + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map(generate_data, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.prefetch(buffer_size=1).batch(1) + return dataset + + +def run_end_to_end(main: Callable[[Any], None], + extra_flags: Optional[Iterable[str]] = None, + model_dir: Optional[str] = None): + """Runs the classifier trainer end-to-end.""" + extra_flags = [] if extra_flags is None else extra_flags + args = [sys.argv[0], '--model_dir', model_dir] + extra_flags + flags_core.parse_flags(argv=args) + main(flags.FLAGS) + + +class ClassifierTest(tf.test.TestCase, parameterized.TestCase): + """Unit tests for Keras models.""" + _tempdir = None + + @classmethod + def setUpClass(cls): # pylint: disable=invalid-name + super(ClassifierTest, cls).setUpClass() + + def tearDown(self): + super(ClassifierTest, self).tearDown() + tf.io.gfile.rmtree(self.get_temp_dir()) + + @combinations.generate(distribution_strategy_combinations()) + def test_end_to_end_train_and_eval(self, distribution, model, dataset): + """Test train_and_eval and export for Keras classifier models.""" + # Some parameters are not defined as flags (e.g. cannot run + # classifier_train.py --batch_size=...) by design, so use + # "--params_override=..." instead + model_dir = self.get_temp_dir() + base_flags = [ + '--data_dir=not_used', + '--model_type=' + model, + '--dataset=' + dataset, + ] + train_and_eval_flags = base_flags + [ + get_params_override(basic_params_override()), + '--mode=train_and_eval', + ] + + run = functools.partial(classifier_trainer.run, + strategy_override=distribution) + run_end_to_end(main=run, + extra_flags=train_and_eval_flags, + model_dir=model_dir) + + @combinations.generate( + combinations.combine( + distribution=[ + strategy_combinations.one_device_strategy_gpu, + ], + model=[ + 'efficientnet', + 'resnet', + ], + mode='eager', + dataset='imagenet', + dtype='float16', + )) + def test_gpu_train(self, distribution, model, dataset, dtype): + """Test train_and_eval and export for Keras classifier models.""" + # Some parameters are not defined as flags (e.g. cannot run + # classifier_train.py --batch_size=...) by design, so use + # "--params_override=..." instead + model_dir = self.get_temp_dir() + base_flags = [ + '--data_dir=not_used', + '--model_type=' + model, + '--dataset=' + dataset, + ] + train_and_eval_flags = base_flags + [ + get_params_override(basic_params_override(dtype)), + '--mode=train_and_eval', + ] + + export_params = basic_params_override() + export_path = os.path.join(model_dir, 'export') + export_params['export'] = {} + export_params['export']['destination'] = export_path + export_flags = base_flags + [ + '--mode=export_only', + get_params_override(export_params) + ] + + run = functools.partial(classifier_trainer.run, + strategy_override=distribution) + run_end_to_end(main=run, + extra_flags=train_and_eval_flags, + model_dir=model_dir) + run_end_to_end(main=run, + extra_flags=export_flags, + model_dir=model_dir) + self.assertTrue(os.path.exists(export_path)) + + @combinations.generate( + combinations.combine( + distribution=[ + strategy_combinations.tpu_strategy, + ], + model=[ + 'efficientnet', + 'resnet', + ], + mode='eager', + dataset='imagenet', + dtype='bfloat16', + )) + def test_tpu_train(self, distribution, model, dataset, dtype): + """Test train_and_eval and export for Keras classifier models.""" + # Some parameters are not defined as flags (e.g. cannot run + # classifier_train.py --batch_size=...) by design, so use + # "--params_override=..." instead + model_dir = self.get_temp_dir() + base_flags = [ + '--data_dir=not_used', + '--model_type=' + model, + '--dataset=' + dataset, + ] + train_and_eval_flags = base_flags + [ + get_params_override(basic_params_override(dtype)), + '--mode=train_and_eval', + ] + + run = functools.partial(classifier_trainer.run, + strategy_override=distribution) + run_end_to_end(main=run, + extra_flags=train_and_eval_flags, + model_dir=model_dir) + + @combinations.generate(distribution_strategy_combinations()) + def test_end_to_end_invalid_mode(self, distribution, model, dataset): + """Test the Keras EfficientNet model with `strategy`.""" + model_dir = self.get_temp_dir() + extra_flags = [ + '--data_dir=not_used', + '--mode=invalid_mode', + '--model_type=' + model, + '--dataset=' + dataset, + get_params_override(basic_params_override()), + ] + + run = functools.partial(classifier_trainer.run, + strategy_override=distribution) + with self.assertRaises(ValueError): + run_end_to_end(main=run, extra_flags=extra_flags, model_dir=model_dir) + + +class UtilTests(parameterized.TestCase, tf.test.TestCase): + """Tests for individual utility functions within classifier_trainer.py.""" + + @parameterized.named_parameters( + ('efficientnet-b0', 'efficientnet', 'efficientnet-b0', 224), + ('efficientnet-b1', 'efficientnet', 'efficientnet-b1', 240), + ('efficientnet-b2', 'efficientnet', 'efficientnet-b2', 260), + ('efficientnet-b3', 'efficientnet', 'efficientnet-b3', 300), + ('efficientnet-b4', 'efficientnet', 'efficientnet-b4', 380), + ('efficientnet-b5', 'efficientnet', 'efficientnet-b5', 456), + ('efficientnet-b6', 'efficientnet', 'efficientnet-b6', 528), + ('efficientnet-b7', 'efficientnet', 'efficientnet-b7', 600), + ('resnet', 'resnet', '', None), + ) + def test_get_model_size(self, model, model_name, expected): + config = base_configs.ExperimentConfig( + model_name=model, + model=base_configs.ModelConfig( + model_params={ + 'model_name': model_name, + }, + ) + ) + size = classifier_trainer.get_image_size_from_model(config) + self.assertEqual(size, expected) + + @parameterized.named_parameters( + ('dynamic', 'dynamic', None, 'dynamic'), + ('scalar', 128., None, 128.), + ('float32', None, 'float32', 1), + ('float16', None, 'float16', 128), + ) + def test_get_loss_scale(self, loss_scale, dtype, expected): + config = base_configs.ExperimentConfig( + runtime=base_configs.RuntimeConfig( + loss_scale=loss_scale), + train_dataset=dataset_factory.DatasetConfig(dtype=dtype)) + ls = classifier_trainer.get_loss_scale(config, fp16_default=128) + self.assertEqual(ls, expected) + + @parameterized.named_parameters( + ('float16', 'float16'), + ('bfloat16', 'bfloat16') + ) + def test_initialize(self, dtype): + config = base_configs.ExperimentConfig( + runtime=base_configs.RuntimeConfig( + run_eagerly=False, + enable_xla=False, + per_gpu_thread_count=1, + gpu_thread_mode='gpu_private', + num_gpus=1, + dataset_num_private_threads=1, + ), + train_dataset=dataset_factory.DatasetConfig(dtype=dtype), + model=base_configs.ModelConfig(), + ) + + class EmptyClass: + pass + fake_ds_builder = EmptyClass() + fake_ds_builder.dtype = dtype + fake_ds_builder.config = EmptyClass() + classifier_trainer.initialize(config, fake_ds_builder) + + def test_resume_from_checkpoint(self): + """Tests functionality for resuming from checkpoint.""" + # Set the keras policy + policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') + tf.keras.mixed_precision.experimental.set_policy(policy) + + # Get the model, datasets, and compile it. + model = get_trivial_model(10) + + # Create the checkpoint + model_dir = self.get_temp_dir() + train_epochs = 1 + train_steps = 10 + ds = get_trivial_data() + callbacks = [ + tf.keras.callbacks.ModelCheckpoint( + os.path.join(model_dir, 'model.ckpt-{epoch:04d}'), + save_weights_only=True) + ] + model.fit( + ds, + callbacks=callbacks, + epochs=train_epochs, + steps_per_epoch=train_steps) + + # Test load from checkpoint + clean_model = get_trivial_model(10) + weights_before_load = copy.deepcopy(clean_model.get_weights()) + initial_epoch = classifier_trainer.resume_from_checkpoint( + model=clean_model, + model_dir=model_dir, + train_steps=train_steps) + self.assertEqual(initial_epoch, 1) + self.assertNotAllClose(weights_before_load, clean_model.get_weights()) + + tf.io.gfile.rmtree(model_dir) + + def test_serialize_config(self): + """Tests functionality for serializing data.""" + config = base_configs.ExperimentConfig() + model_dir = self.get_temp_dir() + classifier_trainer.serialize_config(params=config, model_dir=model_dir) + saved_params_path = os.path.join(model_dir, 'params.yaml') + self.assertTrue(os.path.exists(saved_params_path)) + tf.io.gfile.rmtree(model_dir) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/vision/image_classification/configs/__init__.py b/models/official/vision/image_classification/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/models/official/vision/image_classification/configs/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/official/vision/image_classification/configs/base_configs.py b/models/official/vision/image_classification/configs/base_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..11fcb5305660ec71153ebfc12631f455a3464115 --- /dev/null +++ b/models/official/vision/image_classification/configs/base_configs.py @@ -0,0 +1,231 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Definitions for high level configuration groups..""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from typing import Any, List, Mapping, Optional + +import dataclasses + +from official.modeling import hyperparams +from official.modeling.hyperparams import config_definitions + +CallbacksConfig = config_definitions.CallbacksConfig +TensorboardConfig = config_definitions.TensorboardConfig +RuntimeConfig = config_definitions.RuntimeConfig + + +@dataclasses.dataclass +class ExportConfig(hyperparams.Config): + """Configuration for exports. + + Attributes: + checkpoint: the path to the checkpoint to export. + destination: the path to where the checkpoint should be exported. + """ + checkpoint: str = None + destination: str = None + + +@dataclasses.dataclass +class MetricsConfig(hyperparams.Config): + """Configuration for Metrics. + + Attributes: + accuracy: Whether or not to track accuracy as a Callback. Defaults to None. + top_5: Whether or not to track top_5_accuracy as a Callback. Defaults to + None. + """ + accuracy: bool = None + top_5: bool = None + + +@dataclasses.dataclass +class TimeHistoryConfig(hyperparams.Config): + """Configuration for the TimeHistory callback. + + Attributes: + log_steps: Interval of steps between logging of batch level stats. + """ + log_steps: int = None + + +@dataclasses.dataclass +class TrainConfig(hyperparams.Config): + """Configuration for training. + + Attributes: + resume_checkpoint: Whether or not to enable load checkpoint loading. + Defaults to None. + epochs: The number of training epochs to run. Defaults to None. + steps: The number of steps to run per epoch. If None, then this will be + inferred based on the number of images and batch size. Defaults to None. + callbacks: An instance of CallbacksConfig. + metrics: An instance of MetricsConfig. + tensorboard: An instance of TensorboardConfig. + set_epoch_loop: Whether or not to set `experimental_steps_per_execution` to + equal the number of training steps in `model.compile`. This reduces the + number of callbacks run per epoch which significantly improves end-to-end + TPU training time. + """ + resume_checkpoint: bool = None + epochs: int = None + steps: int = None + callbacks: CallbacksConfig = CallbacksConfig() + metrics: MetricsConfig = None + tensorboard: TensorboardConfig = TensorboardConfig() + time_history: TimeHistoryConfig = TimeHistoryConfig() + set_epoch_loop: bool = False + + +@dataclasses.dataclass +class EvalConfig(hyperparams.Config): + """Configuration for evaluation. + + Attributes: + epochs_between_evals: The number of train epochs to run between evaluations. + Defaults to None. + steps: The number of eval steps to run during evaluation. If None, this will + be inferred based on the number of images and batch size. Defaults to + None. + skip_eval: Whether or not to skip evaluation. + """ + epochs_between_evals: int = None + steps: int = None + skip_eval: bool = False + + +@dataclasses.dataclass +class LossConfig(hyperparams.Config): + """Configuration for Loss. + + Attributes: + name: The name of the loss. Defaults to None. + label_smoothing: Whether or not to apply label smoothing to the loss. This + only applies to 'categorical_cross_entropy'. + """ + name: str = None + label_smoothing: float = None + + +@dataclasses.dataclass +class OptimizerConfig(hyperparams.Config): + """Configuration for Optimizers. + + Attributes: + name: The name of the optimizer. Defaults to None. + decay: Decay or rho, discounting factor for gradient. Defaults to None. + epsilon: Small value used to avoid 0 denominator. Defaults to None. + momentum: Plain momentum constant. Defaults to None. + nesterov: Whether or not to apply Nesterov momentum. Defaults to None. + moving_average_decay: The amount of decay to apply. If 0 or None, then + exponential moving average is not used. Defaults to None. + lookahead: Whether or not to apply the lookahead optimizer. Defaults to + None. + beta_1: The exponential decay rate for the 1st moment estimates. Used in the + Adam optimizers. Defaults to None. + beta_2: The exponential decay rate for the 2nd moment estimates. Used in the + Adam optimizers. Defaults to None. + epsilon: Small value used to avoid 0 denominator. Defaults to 1e-7. + """ + name: str = None + decay: float = None + epsilon: float = None + momentum: float = None + nesterov: bool = None + moving_average_decay: Optional[float] = None + lookahead: Optional[bool] = None + beta_1: float = None + beta_2: float = None + epsilon: float = None + + +@dataclasses.dataclass +class LearningRateConfig(hyperparams.Config): + """Configuration for learning rates. + + Attributes: + name: The name of the learning rate. Defaults to None. + initial_lr: The initial learning rate. Defaults to None. + decay_epochs: The number of decay epochs. Defaults to None. + decay_rate: The rate of decay. Defaults to None. + warmup_epochs: The number of warmup epochs. Defaults to None. + batch_lr_multiplier: The multiplier to apply to the base learning rate, if + necessary. Defaults to None. + examples_per_epoch: the number of examples in a single epoch. Defaults to + None. + boundaries: boundaries used in piecewise constant decay with warmup. + multipliers: multipliers used in piecewise constant decay with warmup. + scale_by_batch_size: Scale the learning rate by a fraction of the batch + size. Set to 0 for no scaling (default). + staircase: Apply exponential decay at discrete values instead of continuous. + """ + name: str = None + initial_lr: float = None + decay_epochs: float = None + decay_rate: float = None + warmup_epochs: int = None + examples_per_epoch: int = None + boundaries: List[int] = None + multipliers: List[float] = None + scale_by_batch_size: float = 0. + staircase: bool = None + + +@dataclasses.dataclass +class ModelConfig(hyperparams.Config): + """Configuration for Models. + + Attributes: + name: The name of the model. Defaults to None. + model_params: The parameters used to create the model. Defaults to None. + num_classes: The number of classes in the model. Defaults to None. + loss: A `LossConfig` instance. Defaults to None. + optimizer: An `OptimizerConfig` instance. Defaults to None. + """ + name: str = None + model_params: hyperparams.Config = None + num_classes: int = None + loss: LossConfig = None + optimizer: OptimizerConfig = None + + +@dataclasses.dataclass +class ExperimentConfig(hyperparams.Config): + """Base configuration for an image classification experiment. + + Attributes: + model_dir: The directory to use when running an experiment. + mode: e.g. 'train_and_eval', 'export' + runtime: A `RuntimeConfig` instance. + train: A `TrainConfig` instance. + evaluation: An `EvalConfig` instance. + model: A `ModelConfig` instance. + export: An `ExportConfig` instance. + """ + model_dir: str = None + model_name: str = None + mode: str = None + runtime: RuntimeConfig = None + train_dataset: Any = None + validation_dataset: Any = None + train: TrainConfig = None + evaluation: EvalConfig = None + model: ModelConfig = None + export: ExportConfig = None diff --git a/models/official/vision/image_classification/configs/configs.py b/models/official/vision/image_classification/configs/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..8a79a1cd9b563a554614b9d4f2f0b93acf016791 --- /dev/null +++ b/models/official/vision/image_classification/configs/configs.py @@ -0,0 +1,118 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Configuration utils for image classification experiments.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import dataclasses + +from official.vision.image_classification import dataset_factory +from official.vision.image_classification.configs import base_configs +from official.vision.image_classification.efficientnet import efficientnet_config +from official.vision.image_classification.resnet import resnet_config + + +@dataclasses.dataclass +class EfficientNetImageNetConfig(base_configs.ExperimentConfig): + """Base configuration to train efficientnet-b0 on ImageNet. + + Attributes: + export: An `ExportConfig` instance + runtime: A `RuntimeConfig` instance. + dataset: A `DatasetConfig` instance. + train: A `TrainConfig` instance. + evaluation: An `EvalConfig` instance. + model: A `ModelConfig` instance. + + """ + export: base_configs.ExportConfig = base_configs.ExportConfig() + runtime: base_configs.RuntimeConfig = base_configs.RuntimeConfig() + train_dataset: dataset_factory.DatasetConfig = \ + dataset_factory.ImageNetConfig(split='train') + validation_dataset: dataset_factory.DatasetConfig = \ + dataset_factory.ImageNetConfig(split='validation') + train: base_configs.TrainConfig = base_configs.TrainConfig( + resume_checkpoint=True, + epochs=500, + steps=None, + callbacks=base_configs.CallbacksConfig(enable_checkpoint_and_export=True, + enable_tensorboard=True), + metrics=['accuracy', 'top_5'], + time_history=base_configs.TimeHistoryConfig(log_steps=100), + tensorboard=base_configs.TensorboardConfig(track_lr=True, + write_model_weights=False), + set_epoch_loop=False) + evaluation: base_configs.EvalConfig = base_configs.EvalConfig( + epochs_between_evals=1, + steps=None) + model: base_configs.ModelConfig = \ + efficientnet_config.EfficientNetModelConfig() + + +@dataclasses.dataclass +class ResNetImagenetConfig(base_configs.ExperimentConfig): + """Base configuration to train resnet-50 on ImageNet.""" + export: base_configs.ExportConfig = base_configs.ExportConfig() + runtime: base_configs.RuntimeConfig = base_configs.RuntimeConfig() + train_dataset: dataset_factory.DatasetConfig = \ + dataset_factory.ImageNetConfig(split='train', + one_hot=False, + mean_subtract=True, + standardize=True) + validation_dataset: dataset_factory.DatasetConfig = \ + dataset_factory.ImageNetConfig(split='validation', + one_hot=False, + mean_subtract=True, + standardize=True) + train: base_configs.TrainConfig = base_configs.TrainConfig( + resume_checkpoint=True, + epochs=90, + steps=None, + callbacks=base_configs.CallbacksConfig(enable_checkpoint_and_export=True, + enable_tensorboard=True), + metrics=['accuracy', 'top_5'], + time_history=base_configs.TimeHistoryConfig(log_steps=100), + tensorboard=base_configs.TensorboardConfig(track_lr=True, + write_model_weights=False), + set_epoch_loop=False) + evaluation: base_configs.EvalConfig = base_configs.EvalConfig( + epochs_between_evals=1, + steps=None) + model: base_configs.ModelConfig = resnet_config.ResNetModelConfig() + + +def get_config(model: str, dataset: str) -> base_configs.ExperimentConfig: + """Given model and dataset names, return the ExperimentConfig.""" + dataset_model_config_map = { + 'imagenet': { + 'efficientnet': EfficientNetImageNetConfig(), + 'resnet': ResNetImagenetConfig(), + } + } + try: + return dataset_model_config_map[dataset][model] + except KeyError: + if dataset not in dataset_model_config_map: + raise KeyError('Invalid dataset received. Received: {}. Supported ' + 'datasets include: {}'.format( + dataset, + ', '.join(dataset_model_config_map.keys()))) + raise KeyError('Invalid model received. Received: {}. Supported models for' + '{} include: {}'.format( + model, + dataset, + ', '.join(dataset_model_config_map[dataset].keys()))) diff --git a/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-gpu.yaml b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-gpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f40ffb1e3020a231832a120d9938bf77e9cc74b --- /dev/null +++ b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-gpu.yaml @@ -0,0 +1,52 @@ +# Training configuration for EfficientNet-b0 trained on ImageNet on GPUs. +# Takes ~32 minutes per epoch for 8 V100s. +# Reaches ~76.1% within 350 epochs. +# Note: This configuration uses a scaled per-replica batch size based on the number of devices. +runtime: + distribution_strategy: 'mirrored' + num_gpus: 1 +train_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'train' + num_classes: 1000 + num_examples: 1281167 + batch_size: 32 + use_per_replica_batch_size: True + dtype: 'float32' + augmenter: + name: 'autoaugment' +validation_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'validation' + num_classes: 1000 + num_examples: 50000 + batch_size: 32 + use_per_replica_batch_size: True + dtype: 'float32' +model: + model_params: + model_name: 'efficientnet-b0' + overrides: + num_classes: 1000 + batch_norm: 'default' + dtype: 'float32' + activation: 'swish' + optimizer: + name: 'rmsprop' + momentum: 0.9 + decay: 0.9 + moving_average_decay: 0.0 + lookahead: false + learning_rate: + name: 'exponential' + loss: + label_smoothing: 0.1 +train: + resume_checkpoint: True + epochs: 500 +evaluation: + epochs_between_evals: 1 diff --git a/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-tpu.yaml b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-tpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5be7e9ba32fc7e8f3999df8e7446405dd2d4173 --- /dev/null +++ b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-tpu.yaml @@ -0,0 +1,52 @@ +# Training configuration for EfficientNet-b0 trained on ImageNet on TPUs. +# Takes ~2 minutes, 50 seconds per epoch for v3-32. +# Reaches ~76.1% within 350 epochs. +# Note: This configuration uses a scaled per-replica batch size based on the number of devices. +runtime: + distribution_strategy: 'tpu' +train_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'train' + num_classes: 1000 + num_examples: 1281167 + batch_size: 128 + use_per_replica_batch_size: True + dtype: 'bfloat16' + augmenter: + name: 'autoaugment' +validation_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'validation' + num_classes: 1000 + num_examples: 50000 + batch_size: 128 + use_per_replica_batch_size: True + dtype: 'bfloat16' +model: + model_params: + model_name: 'efficientnet-b0' + overrides: + num_classes: 1000 + batch_norm: 'tpu' + dtype: 'bfloat16' + activation: 'swish' + optimizer: + name: 'rmsprop' + momentum: 0.9 + decay: 0.9 + moving_average_decay: 0.0 + lookahead: false + learning_rate: + name: 'exponential' + loss: + label_smoothing: 0.1 +train: + resume_checkpoint: True + epochs: 500 + set_epoch_loop: True +evaluation: + epochs_between_evals: 1 diff --git a/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-gpu.yaml b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-gpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f3dce01a46c64c4d92e97091628daeadaceb21d --- /dev/null +++ b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-gpu.yaml @@ -0,0 +1,47 @@ +# Note: This configuration uses a scaled per-replica batch size based on the number of devices. +runtime: + distribution_strategy: 'mirrored' + num_gpus: 1 +train_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'train' + num_classes: 1000 + num_examples: 1281167 + batch_size: 32 + use_per_replica_batch_size: True + dtype: 'float32' +validation_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'validation' + num_classes: 1000 + num_examples: 50000 + batch_size: 32 + use_per_replica_batch_size: True + dtype: 'float32' +model: + model_params: + model_name: 'efficientnet-b1' + overrides: + num_classes: 1000 + batch_norm: 'default' + dtype: 'float32' + activation: 'swish' + optimizer: + name: 'rmsprop' + momentum: 0.9 + decay: 0.9 + moving_average_decay: 0.0 + lookahead: false + learning_rate: + name: 'exponential' + loss: + label_smoothing: 0.1 +train: + resume_checkpoint: True + epochs: 500 +evaluation: + epochs_between_evals: 1 diff --git a/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-tpu.yaml b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-tpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bb6a9fe6f0b417f92686178d4bc79a44c5a4aa7 --- /dev/null +++ b/models/official/vision/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-tpu.yaml @@ -0,0 +1,51 @@ +# Training configuration for EfficientNet-b1 trained on ImageNet on TPUs. +# Takes ~3 minutes, 15 seconds per epoch for v3-32. +# Note: This configuration uses a scaled per-replica batch size based on the number of devices. +runtime: + distribution_strategy: 'tpu' +train_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'train' + num_classes: 1000 + num_examples: 1281167 + batch_size: 128 + use_per_replica_batch_size: True + dtype: 'bfloat16' + augmenter: + name: 'autoaugment' +validation_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'records' + split: 'validation' + num_classes: 1000 + num_examples: 50000 + batch_size: 128 + use_per_replica_batch_size: True + dtype: 'bfloat16' +model: + model_params: + model_name: 'efficientnet-b1' + overrides: + num_classes: 1000 + batch_norm: 'tpu' + dtype: 'bfloat16' + activation: 'swish' + optimizer: + name: 'rmsprop' + momentum: 0.9 + decay: 0.9 + moving_average_decay: 0.0 + lookahead: false + learning_rate: + name: 'exponential' + loss: + label_smoothing: 0.1 +train: + resume_checkpoint: True + epochs: 500 + set_epoch_loop: True +evaluation: + epochs_between_evals: 1 diff --git a/models/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml b/models/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..56844b81db70fbd5e8291a4c1c2eb60e3c488088 --- /dev/null +++ b/models/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml @@ -0,0 +1,51 @@ +# Training configuration for ResNet trained on ImageNet on GPUs. +# Reaches > 76.1% within 90 epochs. +# Note: This configuration uses a scaled per-replica batch size based on the number of devices. +runtime: + distribution_strategy: 'mirrored' + num_gpus: 1 + batchnorm_spatial_persistent: True +train_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'tfds' + split: 'train' + image_size: 224 + num_classes: 1000 + num_examples: 1281167 + batch_size: 256 + use_per_replica_batch_size: True + dtype: 'float16' + mean_subtract: True + standardize: True +validation_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'tfds' + split: 'validation' + image_size: 224 + num_classes: 1000 + num_examples: 50000 + batch_size: 256 + use_per_replica_batch_size: True + dtype: 'float16' + mean_subtract: True + standardize: True +model: + name: 'resnet' + model_params: + rescale_inputs: False + optimizer: + name: 'momentum' + momentum: 0.9 + decay: 0.9 + epsilon: 0.001 + learning_rate: + name: 'piecewise_constant_with_warmup' + loss: + label_smoothing: 0.1 +train: + resume_checkpoint: True + epochs: 90 +evaluation: + epochs_between_evals: 1 diff --git a/models/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml b/models/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae975c16251ac0a23877bf8f6804cdea6b2baadf --- /dev/null +++ b/models/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml @@ -0,0 +1,57 @@ +# Training configuration for ResNet trained on ImageNet on TPUs. +# Takes ~4 minutes, 30 seconds seconds per epoch for a v3-32. +# Reaches > 76.1% within 90 epochs. +# Note: This configuration uses a scaled per-replica batch size based on the number of devices. +runtime: + distribution_strategy: 'tpu' +train_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'tfds' + split: 'train' + one_hot: False + image_size: 224 + num_classes: 1000 + num_examples: 1281167 + batch_size: 128 + use_per_replica_batch_size: True + mean_subtract: False + standardize: False + dtype: 'bfloat16' +validation_dataset: + name: 'imagenet2012' + data_dir: null + builder: 'tfds' + split: 'validation' + one_hot: False + image_size: 224 + num_classes: 1000 + num_examples: 50000 + batch_size: 128 + use_per_replica_batch_size: True + mean_subtract: False + standardize: False + dtype: 'bfloat16' +model: + name: 'resnet' + model_params: + rescale_inputs: True + optimizer: + name: 'momentum' + momentum: 0.9 + decay: 0.9 + epsilon: 0.001 + moving_average_decay: 0. + lookahead: False + learning_rate: + name: 'piecewise_constant_with_warmup' + loss: + label_smoothing: 0.1 +train: + callbacks: + enable_checkpoint_and_export: True + resume_checkpoint: True + epochs: 90 + set_epoch_loop: True +evaluation: + epochs_between_evals: 1 diff --git a/models/official/vision/image_classification/dataset_factory.py b/models/official/vision/image_classification/dataset_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..e9dad1268a7bed86f622f80ca28f4d485a0fab31 --- /dev/null +++ b/models/official/vision/image_classification/dataset_factory.py @@ -0,0 +1,536 @@ +# Lint as: python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dataset utilities for vision tasks using TFDS and tf.data.Dataset.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os +from typing import Any, List, Optional, Tuple, Mapping, Union +from absl import logging +from dataclasses import dataclass +import tensorflow as tf +import tensorflow_datasets as tfds + +from official.modeling.hyperparams import base_config +from official.vision.image_classification import augment +from official.vision.image_classification import preprocessing + + +AUGMENTERS = { + 'autoaugment': augment.AutoAugment, + 'randaugment': augment.RandAugment, +} + + +@dataclass +class AugmentConfig(base_config.Config): + """Configuration for image augmenters. + + Attributes: + name: The name of the image augmentation to use. Possible options are + None (default), 'autoaugment', or 'randaugment'. + params: Any paramaters used to initialize the augmenter. + """ + name: Optional[str] = None + params: Optional[Mapping[str, Any]] = None + + def build(self) -> augment.ImageAugment: + """Build the augmenter using this config.""" + params = self.params or {} + augmenter = AUGMENTERS.get(self.name, None) + return augmenter(**params) if augmenter is not None else None + + +@dataclass +class DatasetConfig(base_config.Config): + """The base configuration for building datasets. + + Attributes: + name: The name of the Dataset. Usually should correspond to a TFDS dataset. + data_dir: The path where the dataset files are stored, if available. + filenames: Optional list of strings representing the TFRecord names. + builder: The builder type used to load the dataset. Value should be one of + 'tfds' (load using TFDS), 'records' (load from TFRecords), or 'synthetic' + (generate dummy synthetic data without reading from files). + split: The split of the dataset. Usually 'train', 'validation', or 'test'. + image_size: The size of the image in the dataset. This assumes that + `width` == `height`. Set to 'infer' to infer the image size from TFDS + info. This requires `name` to be a registered dataset in TFDS. + num_classes: The number of classes given by the dataset. Set to 'infer' + to infer the image size from TFDS info. This requires `name` to be a + registered dataset in TFDS. + num_channels: The number of channels given by the dataset. Set to 'infer' + to infer the image size from TFDS info. This requires `name` to be a + registered dataset in TFDS. + num_examples: The number of examples given by the dataset. Set to 'infer' + to infer the image size from TFDS info. This requires `name` to be a + registered dataset in TFDS. + batch_size: The base batch size for the dataset. + use_per_replica_batch_size: Whether to scale the batch size based on + available resources. If set to `True`, the dataset builder will return + batch_size multiplied by `num_devices`, the number of device replicas + (e.g., the number of GPUs or TPU cores). This setting should be `True` if + the strategy argument is passed to `build()` and `num_devices > 1`. + num_devices: The number of replica devices to use. This should be set by + `strategy.num_replicas_in_sync` when using a distribution strategy. + dtype: The desired dtype of the dataset. This will be set during + preprocessing. + one_hot: Whether to apply one hot encoding. Set to `True` to be able to use + label smoothing. + augmenter: The augmenter config to use. No augmentation is used by default. + download: Whether to download data using TFDS. + shuffle_buffer_size: The buffer size used for shuffling training data. + file_shuffle_buffer_size: The buffer size used for shuffling raw training + files. + skip_decoding: Whether to skip image decoding when loading from TFDS. + cache: whether to cache to dataset examples. Can be used to avoid re-reading + from disk on the second epoch. Requires significant memory overhead. + tf_data_service: The URI of a tf.data service to offload preprocessing onto + during training. The URI should be in the format "protocol://address", + e.g. "grpc://tf-data-service:5050". + mean_subtract: whether or not to apply mean subtraction to the dataset. + standardize: whether or not to apply standardization to the dataset. + """ + name: Optional[str] = None + data_dir: Optional[str] = None + filenames: Optional[List[str]] = None + builder: str = 'tfds' + split: str = 'train' + image_size: Union[int, str] = 'infer' + num_classes: Union[int, str] = 'infer' + num_channels: Union[int, str] = 'infer' + num_examples: Union[int, str] = 'infer' + batch_size: int = 128 + use_per_replica_batch_size: bool = True + num_devices: int = 1 + dtype: str = 'float32' + one_hot: bool = True + augmenter: AugmentConfig = AugmentConfig() + download: bool = False + shuffle_buffer_size: int = 10000 + file_shuffle_buffer_size: int = 1024 + skip_decoding: bool = True + cache: bool = False + tf_data_service: Optional[str] = None + mean_subtract: bool = False + standardize: bool = False + + @property + def has_data(self): + """Whether this dataset is has any data associated with it.""" + return self.name or self.data_dir or self.filenames + + +@dataclass +class ImageNetConfig(DatasetConfig): + """The base ImageNet dataset config.""" + name: str = 'imagenet2012' + # Note: for large datasets like ImageNet, using records is faster than tfds + builder: str = 'records' + image_size: int = 224 + batch_size: int = 128 + + +@dataclass +class Cifar10Config(DatasetConfig): + """The base CIFAR-10 dataset config.""" + name: str = 'cifar10' + image_size: int = 224 + batch_size: int = 128 + download: bool = True + cache: bool = True + + +class DatasetBuilder: + """An object for building datasets. + + Allows building various pipelines fetching examples, preprocessing, etc. + Maintains additional state information calculated from the dataset, i.e., + training set split, batch size, and number of steps (batches). + """ + + def __init__(self, config: DatasetConfig, **overrides: Any): + """Initialize the builder from the config.""" + self.config = config.replace(**overrides) + self.builder_info = None + + if self.config.augmenter is not None: + logging.info('Using augmentation: %s', self.config.augmenter.name) + self.augmenter = self.config.augmenter.build() + else: + self.augmenter = None + + @property + def is_training(self) -> bool: + """Whether this is the training set.""" + return self.config.split == 'train' + + @property + def batch_size(self) -> int: + """The batch size, multiplied by the number of replicas (if configured).""" + if self.config.use_per_replica_batch_size: + return self.config.batch_size * self.config.num_devices + else: + return self.config.batch_size + + @property + def global_batch_size(self): + """The global batch size across all replicas.""" + return self.batch_size + + @property + def local_batch_size(self): + """The base unscaled batch size.""" + if self.config.use_per_replica_batch_size: + return self.config.batch_size + else: + return self.config.batch_size // self.config.num_devices + + @property + def num_steps(self) -> int: + """The number of steps (batches) to exhaust this dataset.""" + # Always divide by the global batch size to get the correct # of steps + return self.num_examples // self.global_batch_size + + @property + def dtype(self) -> tf.dtypes.DType: + """Converts the config's dtype string to a tf dtype. + + Returns: + A mapping from string representation of a dtype to the `tf.dtypes.DType`. + + Raises: + ValueError if the config's dtype is not supported. + + """ + dtype_map = { + 'float32': tf.float32, + 'bfloat16': tf.bfloat16, + 'float16': tf.float16, + 'fp32': tf.float32, + 'bf16': tf.bfloat16, + } + try: + return dtype_map[self.config.dtype] + except: + raise ValueError('Invalid DType provided. Supported types: {}'.format( + dtype_map.keys())) + + @property + def image_size(self) -> int: + """The size of each image (can be inferred from the dataset).""" + + if self.config.image_size == 'infer': + return self.info.features['image'].shape[0] + else: + return int(self.config.image_size) + + @property + def num_channels(self) -> int: + """The number of image channels (can be inferred from the dataset).""" + if self.config.num_channels == 'infer': + return self.info.features['image'].shape[-1] + else: + return int(self.config.num_channels) + + @property + def num_examples(self) -> int: + """The number of examples (can be inferred from the dataset).""" + if self.config.num_examples == 'infer': + return self.info.splits[self.config.split].num_examples + else: + return int(self.config.num_examples) + + @property + def num_classes(self) -> int: + """The number of classes (can be inferred from the dataset).""" + if self.config.num_classes == 'infer': + return self.info.features['label'].num_classes + else: + return int(self.config.num_classes) + + @property + def info(self) -> tfds.core.DatasetInfo: + """The TFDS dataset info, if available.""" + if self.builder_info is None: + self.builder_info = tfds.builder(self.config.name).info + return self.builder_info + + def build(self, strategy: tf.distribute.Strategy = None) -> tf.data.Dataset: + """Construct a dataset end-to-end and return it using an optional strategy. + + Args: + strategy: a strategy that, if passed, will distribute the dataset + according to that strategy. If passed and `num_devices > 1`, + `use_per_replica_batch_size` must be set to `True`. + + Returns: + A TensorFlow dataset outputting batched images and labels. + """ + if strategy: + if strategy.num_replicas_in_sync != self.config.num_devices: + logging.warn('Passed a strategy with %d devices, but expected' + '%d devices.', + strategy.num_replicas_in_sync, + self.config.num_devices) + dataset = strategy.experimental_distribute_datasets_from_function( + self._build) + else: + dataset = self._build() + + return dataset + + def _build(self, input_context: tf.distribute.InputContext = None + ) -> tf.data.Dataset: + """Construct a dataset end-to-end and return it. + + Args: + input_context: An optional context provided by `tf.distribute` for + cross-replica training. + + Returns: + A TensorFlow dataset outputting batched images and labels. + """ + builders = { + 'tfds': self.load_tfds, + 'records': self.load_records, + 'synthetic': self.load_synthetic, + } + + builder = builders.get(self.config.builder, None) + + if builder is None: + raise ValueError('Unknown builder type {}'.format(self.config.builder)) + + self.input_context = input_context + dataset = builder() + dataset = self.pipeline(dataset) + + return dataset + + def load_tfds(self) -> tf.data.Dataset: + """Return a dataset loading files from TFDS.""" + + logging.info('Using TFDS to load data.') + + builder = tfds.builder(self.config.name, + data_dir=self.config.data_dir) + + if self.config.download: + builder.download_and_prepare() + + decoders = {} + + if self.config.skip_decoding: + decoders['image'] = tfds.decode.SkipDecoding() + + read_config = tfds.ReadConfig( + interleave_cycle_length=10, + interleave_block_length=1, + input_context=self.input_context) + + dataset = builder.as_dataset( + split=self.config.split, + as_supervised=True, + shuffle_files=True, + decoders=decoders, + read_config=read_config) + + return dataset + + def load_records(self) -> tf.data.Dataset: + """Return a dataset loading files with TFRecords.""" + logging.info('Using TFRecords to load data.') + if self.config.filenames is None: + if self.config.data_dir is None: + raise ValueError('Dataset must specify a path for the data files.') + + file_pattern = os.path.join(self.config.data_dir, + '{}*'.format(self.config.split)) + dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False) + else: + dataset = tf.data.Dataset.from_tensor_slices(self.config.filenames) + + return dataset + + def load_synthetic(self) -> tf.data.Dataset: + """Return a dataset generating dummy synthetic data.""" + logging.info('Generating a synthetic dataset.') + + def generate_data(_): + image = tf.zeros([self.image_size, self.image_size, self.num_channels], + dtype=self.dtype) + label = tf.zeros([1], dtype=tf.int32) + return image, label + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map(generate_data, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset + + def pipeline(self, dataset: tf.data.Dataset) -> tf.data.Dataset: + """Build a pipeline fetching, shuffling, and preprocessing the dataset. + + Args: + dataset: A `tf.data.Dataset` that loads raw files. + + Returns: + A TensorFlow dataset outputting batched images and labels. + """ + if (self.config.builder != 'tfds' and self.input_context + and self.input_context.num_input_pipelines > 1): + dataset = dataset.shard(self.input_context.num_input_pipelines, + self.input_context.input_pipeline_id) + logging.info('Sharding the dataset: input_pipeline_id=%d ' + 'num_input_pipelines=%d', + self.input_context.num_input_pipelines, + self.input_context.input_pipeline_id) + + if self.is_training and self.config.builder == 'records': + # Shuffle the input files. + dataset.shuffle(buffer_size=self.config.file_shuffle_buffer_size) + + if self.is_training and not self.config.cache: + dataset = dataset.repeat() + + if self.config.builder == 'records': + # Read the data from disk in parallel + dataset = dataset.interleave( + tf.data.TFRecordDataset, + cycle_length=10, + block_length=1, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if self.config.cache: + dataset = dataset.cache() + + if self.is_training: + dataset = dataset.shuffle(self.config.shuffle_buffer_size) + dataset = dataset.repeat() + + # Parse, pre-process, and batch the data in parallel + if self.config.builder == 'records': + preprocess = self.parse_record + else: + preprocess = self.preprocess + dataset = dataset.map(preprocess, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if self.input_context and self.config.num_devices > 1: + if not self.config.use_per_replica_batch_size: + raise ValueError( + 'The builder does not support a global batch size with more than ' + 'one replica. Got {} replicas. Please set a ' + '`per_replica_batch_size` and enable ' + '`use_per_replica_batch_size=True`.'.format( + self.config.num_devices)) + + # The batch size of the dataset will be multiplied by the number of + # replicas automatically when strategy.distribute_datasets_from_function + # is called, so we use local batch size here. + dataset = dataset.batch(self.local_batch_size, + drop_remainder=self.is_training) + else: + dataset = dataset.batch(self.global_batch_size, + drop_remainder=self.is_training) + + # Prefetch overlaps in-feed with training + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + + if self.config.tf_data_service: + if not hasattr(tf.data.experimental, 'service'): + raise ValueError('The tf_data_service flag requires Tensorflow version ' + '>= 2.3.0, but the version is {}'.format( + tf.__version__)) + dataset = dataset.apply( + tf.data.experimental.service.distribute( + processing_mode='parallel_epochs', + service=self.config.tf_data_service, + job_name='resnet_train')) + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + + return dataset + + def parse_record(self, record: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: + """Parse an ImageNet record from a serialized string Tensor.""" + keys_to_features = { + 'image/encoded': + tf.io.FixedLenFeature((), tf.string, ''), + 'image/format': + tf.io.FixedLenFeature((), tf.string, 'jpeg'), + 'image/class/label': + tf.io.FixedLenFeature([], tf.int64, -1), + 'image/class/text': + tf.io.FixedLenFeature([], tf.string, ''), + 'image/object/bbox/xmin': + tf.io.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/ymin': + tf.io.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/xmax': + tf.io.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/ymax': + tf.io.VarLenFeature(dtype=tf.float32), + 'image/object/class/label': + tf.io.VarLenFeature(dtype=tf.int64), + } + + parsed = tf.io.parse_single_example(record, keys_to_features) + + label = tf.reshape(parsed['image/class/label'], shape=[1]) + + # Subtract one so that labels are in [0, 1000) + label -= 1 + + image_bytes = tf.reshape(parsed['image/encoded'], shape=[]) + image, label = self.preprocess(image_bytes, label) + + return image, label + + def preprocess(self, image: tf.Tensor, label: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Apply image preprocessing and augmentation to the image and label.""" + if self.is_training: + image = preprocessing.preprocess_for_train( + image, + image_size=self.image_size, + mean_subtract=self.config.mean_subtract, + standardize=self.config.standardize, + dtype=self.dtype, + augmenter=self.augmenter) + else: + image = preprocessing.preprocess_for_eval( + image, + image_size=self.image_size, + num_channels=self.num_channels, + mean_subtract=self.config.mean_subtract, + standardize=self.config.standardize, + dtype=self.dtype) + + label = tf.cast(label, tf.int32) + if self.config.one_hot: + label = tf.one_hot(label, self.num_classes) + label = tf.reshape(label, [self.num_classes]) + + return image, label + + @classmethod + def from_params(cls, *args, **kwargs): + """Construct a dataset builder from a default config and any overrides.""" + config = DatasetConfig.from_args(*args, **kwargs) + return cls(config) diff --git a/models/official/vision/image_classification/efficientnet/__init__.py b/models/official/vision/image_classification/efficientnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/vision/image_classification/efficientnet/common_modules.py b/models/official/vision/image_classification/efficientnet/common_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9c2097d2398ec78cae5e1265478f804860f944 --- /dev/null +++ b/models/official/vision/image_classification/efficientnet/common_modules.py @@ -0,0 +1,117 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common modeling utilities.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import numpy as np +import tensorflow as tf +import tensorflow.compat.v1 as tf1 +from typing import Text, Optional + +from tensorflow.python.tpu import tpu_function + + +@tf.keras.utils.register_keras_serializable(package='Vision') +class TpuBatchNormalization(tf.keras.layers.BatchNormalization): + """Cross replica batch normalization.""" + + def __init__(self, fused: Optional[bool] = False, **kwargs): + if fused in (True, None): + raise ValueError('TpuBatchNormalization does not support fused=True.') + super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs) + + def _cross_replica_average(self, t: tf.Tensor, num_shards_per_group: int): + """Calculates the average value of input tensor across TPU replicas.""" + num_shards = tpu_function.get_tpu_context().number_of_shards + group_assignment = None + if num_shards_per_group > 1: + if num_shards % num_shards_per_group != 0: + raise ValueError( + 'num_shards: %d mod shards_per_group: %d, should be 0' % + (num_shards, num_shards_per_group)) + num_groups = num_shards // num_shards_per_group + group_assignment = [[ + x for x in range(num_shards) if x // num_shards_per_group == y + ] for y in range(num_groups)] + return tf1.tpu.cross_replica_sum(t, group_assignment) / tf.cast( + num_shards_per_group, t.dtype) + + def _moments(self, inputs: tf.Tensor, reduction_axes: int, keep_dims: int): + """Compute the mean and variance: it overrides the original _moments.""" + shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments( + inputs, reduction_axes, keep_dims=keep_dims) + + num_shards = tpu_function.get_tpu_context().number_of_shards or 1 + if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices. + num_shards_per_group = 1 + else: + num_shards_per_group = max(8, num_shards // 8) + if num_shards_per_group > 1: + # Compute variance using: Var[X]= E[X^2] - E[X]^2. + shard_square_of_mean = tf.math.square(shard_mean) + shard_mean_of_square = shard_variance + shard_square_of_mean + group_mean = self._cross_replica_average(shard_mean, num_shards_per_group) + group_mean_of_square = self._cross_replica_average( + shard_mean_of_square, num_shards_per_group) + group_variance = group_mean_of_square - tf.math.square(group_mean) + return (group_mean, group_variance) + else: + return (shard_mean, shard_variance) + + +def get_batch_norm(batch_norm_type: Text) -> tf.keras.layers.BatchNormalization: + """A helper to create a batch normalization getter. + + Args: + batch_norm_type: The type of batch normalization layer implementation. `tpu` + will use `TpuBatchNormalization`. + + Returns: + An instance of `tf.keras.layers.BatchNormalization`. + """ + if batch_norm_type == 'tpu': + return TpuBatchNormalization + + return tf.keras.layers.BatchNormalization + + +def count_params(model, trainable_only=True): + """Returns the count of all model parameters, or just trainable ones.""" + if not trainable_only: + return model.count_params() + else: + return int(np.sum([tf.keras.backend.count_params(p) + for p in model.trainable_weights])) + + +def load_weights(model: tf.keras.Model, + model_weights_path: Text, + weights_format: Text = 'saved_model'): + """Load model weights from the given file path. + + Args: + model: the model to load weights into + model_weights_path: the path of the model weights + weights_format: the model weights format. One of 'saved_model', 'h5', + or 'checkpoint'. + """ + if weights_format == 'saved_model': + loaded_model = tf.keras.models.load_model(model_weights_path) + model.set_weights(loaded_model.get_weights()) + else: + model.load_weights(model_weights_path) diff --git a/models/official/vision/image_classification/efficientnet/efficientnet_config.py b/models/official/vision/image_classification/efficientnet/efficientnet_config.py new file mode 100644 index 0000000000000000000000000000000000000000..a758cc63c944463ebf184eaeae26cebd5935031a --- /dev/null +++ b/models/official/vision/image_classification/efficientnet/efficientnet_config.py @@ -0,0 +1,78 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Configuration definitions for EfficientNet losses, learning rates, and optimizers.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from typing import Any, Mapping + +import dataclasses + +from official.modeling.hyperparams import base_config +from official.vision.image_classification.configs import base_configs + + +@dataclasses.dataclass +class EfficientNetModelConfig(base_configs.ModelConfig): + """Configuration for the EfficientNet model. + + This configuration will default to settings used for training efficientnet-b0 + on a v3-8 TPU on ImageNet. + + Attributes: + name: The name of the model. Defaults to 'EfficientNet'. + num_classes: The number of classes in the model. + model_params: A dictionary that represents the parameters of the + EfficientNet model. These will be passed in to the "from_name" function. + loss: The configuration for loss. Defaults to a categorical cross entropy + implementation. + optimizer: The configuration for optimizations. Defaults to an RMSProp + configuration. + learning_rate: The configuration for learning rate. Defaults to an + exponential configuration. + """ + name: str = 'EfficientNet' + num_classes: int = 1000 + model_params: base_config.Config = dataclasses.field( + default_factory=lambda: { + 'model_name': 'efficientnet-b0', + 'model_weights_path': '', + 'weights_format': 'saved_model', + 'overrides': { + 'batch_norm': 'default', + 'rescale_input': True, + 'num_classes': 1000, + 'activation': 'swish', + 'dtype': 'float32', + } + }) + loss: base_configs.LossConfig = base_configs.LossConfig( + name='categorical_crossentropy', label_smoothing=0.1) + optimizer: base_configs.OptimizerConfig = base_configs.OptimizerConfig( + name='rmsprop', + decay=0.9, + epsilon=0.001, + momentum=0.9, + moving_average_decay=None) + learning_rate: base_configs.LearningRateConfig = base_configs.LearningRateConfig( # pylint: disable=line-too-long + name='exponential', + initial_lr=0.008, + decay_epochs=2.4, + decay_rate=0.97, + warmup_epochs=5, + scale_by_batch_size=1. / 128., + staircase=True) diff --git a/models/official/vision/image_classification/efficientnet/efficientnet_model.py b/models/official/vision/image_classification/efficientnet/efficientnet_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ab81fc25d1200557c99f77424d34c74cf8774d84 --- /dev/null +++ b/models/official/vision/image_classification/efficientnet/efficientnet_model.py @@ -0,0 +1,505 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains definitions for EfficientNet model. + +[1] Mingxing Tan, Quoc V. Le + EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. + ICML'19, https://arxiv.org/abs/1905.11946 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os +from typing import Any, Dict, Optional, Text, Tuple + +from absl import logging +from dataclasses import dataclass +import tensorflow as tf + +from official.modeling import tf_utils +from official.modeling.hyperparams import base_config +from official.vision.image_classification import preprocessing +from official.vision.image_classification.efficientnet import common_modules + + +@dataclass +class BlockConfig(base_config.Config): + """Config for a single MB Conv Block.""" + input_filters: int = 0 + output_filters: int = 0 + kernel_size: int = 3 + num_repeat: int = 1 + expand_ratio: int = 1 + strides: Tuple[int, int] = (1, 1) + se_ratio: Optional[float] = None + id_skip: bool = True + fused_conv: bool = False + conv_type: str = 'depthwise' + + +@dataclass +class ModelConfig(base_config.Config): + """Default Config for Efficientnet-B0.""" + width_coefficient: float = 1.0 + depth_coefficient: float = 1.0 + resolution: int = 224 + dropout_rate: float = 0.2 + blocks: Tuple[BlockConfig, ...] = ( + # (input_filters, output_filters, kernel_size, num_repeat, + # expand_ratio, strides, se_ratio) + # pylint: disable=bad-whitespace + BlockConfig.from_args(32, 16, 3, 1, 1, (1, 1), 0.25), + BlockConfig.from_args(16, 24, 3, 2, 6, (2, 2), 0.25), + BlockConfig.from_args(24, 40, 5, 2, 6, (2, 2), 0.25), + BlockConfig.from_args(40, 80, 3, 3, 6, (2, 2), 0.25), + BlockConfig.from_args(80, 112, 5, 3, 6, (1, 1), 0.25), + BlockConfig.from_args(112, 192, 5, 4, 6, (2, 2), 0.25), + BlockConfig.from_args(192, 320, 3, 1, 6, (1, 1), 0.25), + # pylint: enable=bad-whitespace + ) + stem_base_filters: int = 32 + top_base_filters: int = 1280 + activation: str = 'simple_swish' + batch_norm: str = 'default' + bn_momentum: float = 0.99 + bn_epsilon: float = 1e-3 + # While the original implementation used a weight decay of 1e-5, + # tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras + weight_decay: float = 5e-6 + drop_connect_rate: float = 0.2 + depth_divisor: int = 8 + min_depth: Optional[int] = None + use_se: bool = True + input_channels: int = 3 + num_classes: int = 1000 + model_name: str = 'efficientnet' + rescale_input: bool = True + data_format: str = 'channels_last' + dtype: str = 'float32' + + +MODEL_CONFIGS = { + # (width, depth, resolution, dropout) + 'efficientnet-b0': ModelConfig.from_args(1.0, 1.0, 224, 0.2), + 'efficientnet-b1': ModelConfig.from_args(1.0, 1.1, 240, 0.2), + 'efficientnet-b2': ModelConfig.from_args(1.1, 1.2, 260, 0.3), + 'efficientnet-b3': ModelConfig.from_args(1.2, 1.4, 300, 0.3), + 'efficientnet-b4': ModelConfig.from_args(1.4, 1.8, 380, 0.4), + 'efficientnet-b5': ModelConfig.from_args(1.6, 2.2, 456, 0.4), + 'efficientnet-b6': ModelConfig.from_args(1.8, 2.6, 528, 0.5), + 'efficientnet-b7': ModelConfig.from_args(2.0, 3.1, 600, 0.5), + 'efficientnet-b8': ModelConfig.from_args(2.2, 3.6, 672, 0.5), + 'efficientnet-l2': ModelConfig.from_args(4.3, 5.3, 800, 0.5), +} + +CONV_KERNEL_INITIALIZER = { + 'class_name': 'VarianceScaling', + 'config': { + 'scale': 2.0, + 'mode': 'fan_out', + # Note: this is a truncated normal distribution + 'distribution': 'normal' + } +} + +DENSE_KERNEL_INITIALIZER = { + 'class_name': 'VarianceScaling', + 'config': { + 'scale': 1 / 3.0, + 'mode': 'fan_out', + 'distribution': 'uniform' + } +} + + +def round_filters(filters: int, + config: ModelConfig) -> int: + """Round number of filters based on width coefficient.""" + width_coefficient = config.width_coefficient + min_depth = config.min_depth + divisor = config.depth_divisor + orig_filters = filters + + if not width_coefficient: + return filters + + filters *= width_coefficient + min_depth = min_depth or divisor + new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_filters < 0.9 * filters: + new_filters += divisor + logging.info('round_filter input=%s output=%s', orig_filters, new_filters) + return int(new_filters) + + +def round_repeats(repeats: int, depth_coefficient: float) -> int: + """Round number of repeats based on depth coefficient.""" + return int(math.ceil(depth_coefficient * repeats)) + + +def conv2d_block(inputs: tf.Tensor, + conv_filters: Optional[int], + config: ModelConfig, + kernel_size: Any = (1, 1), + strides: Any = (1, 1), + use_batch_norm: bool = True, + use_bias: bool = False, + activation: Any = None, + depthwise: bool = False, + name: Text = None): + """A conv2d followed by batch norm and an activation.""" + batch_norm = common_modules.get_batch_norm(config.batch_norm) + bn_momentum = config.bn_momentum + bn_epsilon = config.bn_epsilon + data_format = tf.keras.backend.image_data_format() + weight_decay = config.weight_decay + + name = name or '' + + # Collect args based on what kind of conv2d block is desired + init_kwargs = { + 'kernel_size': kernel_size, + 'strides': strides, + 'use_bias': use_bias, + 'padding': 'same', + 'name': name + '_conv2d', + 'kernel_regularizer': tf.keras.regularizers.l2(weight_decay), + 'bias_regularizer': tf.keras.regularizers.l2(weight_decay), + } + + if depthwise: + conv2d = tf.keras.layers.DepthwiseConv2D + init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER}) + else: + conv2d = tf.keras.layers.Conv2D + init_kwargs.update({'filters': conv_filters, + 'kernel_initializer': CONV_KERNEL_INITIALIZER}) + + x = conv2d(**init_kwargs)(inputs) + + if use_batch_norm: + bn_axis = 1 if data_format == 'channels_first' else -1 + x = batch_norm(axis=bn_axis, + momentum=bn_momentum, + epsilon=bn_epsilon, + name=name + '_bn')(x) + + if activation is not None: + x = tf.keras.layers.Activation(activation, + name=name + '_activation')(x) + return x + + +def mb_conv_block(inputs: tf.Tensor, + block: BlockConfig, + config: ModelConfig, + prefix: Text = None): + """Mobile Inverted Residual Bottleneck. + + Args: + inputs: the Keras input to the block + block: BlockConfig, arguments to create a Block + config: ModelConfig, a set of model parameters + prefix: prefix for naming all layers + + Returns: + the output of the block + """ + use_se = config.use_se + activation = tf_utils.get_activation(config.activation) + drop_connect_rate = config.drop_connect_rate + data_format = tf.keras.backend.image_data_format() + use_depthwise = block.conv_type != 'no_depthwise' + prefix = prefix or '' + + filters = block.input_filters * block.expand_ratio + + x = inputs + + if block.fused_conv: + # If we use fused mbconv, skip expansion and use regular conv. + x = conv2d_block(x, + filters, + config, + kernel_size=block.kernel_size, + strides=block.strides, + activation=activation, + name=prefix + 'fused') + else: + if block.expand_ratio != 1: + # Expansion phase + kernel_size = (1, 1) if use_depthwise else (3, 3) + x = conv2d_block(x, + filters, + config, + kernel_size=kernel_size, + activation=activation, + name=prefix + 'expand') + + # Depthwise Convolution + if use_depthwise: + x = conv2d_block(x, + conv_filters=None, + config=config, + kernel_size=block.kernel_size, + strides=block.strides, + activation=activation, + depthwise=True, + name=prefix + 'depthwise') + + # Squeeze and Excitation phase + if use_se: + assert block.se_ratio is not None + assert 0 < block.se_ratio <= 1 + num_reduced_filters = max(1, int( + block.input_filters * block.se_ratio + )) + + if data_format == 'channels_first': + se_shape = (filters, 1, 1) + else: + se_shape = (1, 1, filters) + + se = tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x) + se = tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape')(se) + + se = conv2d_block(se, + num_reduced_filters, + config, + use_bias=True, + use_batch_norm=False, + activation=activation, + name=prefix + 'se_reduce') + se = conv2d_block(se, + filters, + config, + use_bias=True, + use_batch_norm=False, + activation='sigmoid', + name=prefix + 'se_expand') + x = tf.keras.layers.multiply([x, se], name=prefix + 'se_excite') + + # Output phase + x = conv2d_block(x, + block.output_filters, + config, + activation=None, + name=prefix + 'project') + + # Add identity so that quantization-aware training can insert quantization + # ops correctly. + x = tf.keras.layers.Activation(tf_utils.get_activation('identity'), + name=prefix + 'id')(x) + + if (block.id_skip + and all(s == 1 for s in block.strides) + and block.input_filters == block.output_filters): + if drop_connect_rate and drop_connect_rate > 0: + # Apply dropconnect + # The only difference between dropout and dropconnect in TF is scaling by + # drop_connect_rate during training. See: + # https://github.com/keras-team/keras/pull/9898#issuecomment-380577612 + x = tf.keras.layers.Dropout(drop_connect_rate, + noise_shape=(None, 1, 1, 1), + name=prefix + 'drop')(x) + + x = tf.keras.layers.add([x, inputs], name=prefix + 'add') + + return x + + +def efficientnet(image_input: tf.keras.layers.Input, + config: ModelConfig): + """Creates an EfficientNet graph given the model parameters. + + This function is wrapped by the `EfficientNet` class to make a tf.keras.Model. + + Args: + image_input: the input batch of images + config: the model config + + Returns: + the output of efficientnet + """ + depth_coefficient = config.depth_coefficient + blocks = config.blocks + stem_base_filters = config.stem_base_filters + top_base_filters = config.top_base_filters + activation = tf_utils.get_activation(config.activation) + dropout_rate = config.dropout_rate + drop_connect_rate = config.drop_connect_rate + num_classes = config.num_classes + input_channels = config.input_channels + rescale_input = config.rescale_input + data_format = tf.keras.backend.image_data_format() + dtype = config.dtype + weight_decay = config.weight_decay + + x = image_input + if data_format == 'channels_first': + # Happens on GPU/TPU if available. + x = tf.keras.layers.Permute((3, 1, 2))(x) + if rescale_input: + x = preprocessing.normalize_images(x, + num_channels=input_channels, + dtype=dtype, + data_format=data_format) + + # Build stem + x = conv2d_block(x, + round_filters(stem_base_filters, config), + config, + kernel_size=[3, 3], + strides=[2, 2], + activation=activation, + name='stem') + + # Build blocks + num_blocks_total = sum( + round_repeats(block.num_repeat, depth_coefficient) for block in blocks) + block_num = 0 + + for stack_idx, block in enumerate(blocks): + assert block.num_repeat > 0 + # Update block input and output filters based on depth multiplier + block = block.replace( + input_filters=round_filters(block.input_filters, config), + output_filters=round_filters(block.output_filters, config), + num_repeat=round_repeats(block.num_repeat, depth_coefficient)) + + # The first block needs to take care of stride and filter size increase + drop_rate = drop_connect_rate * float(block_num) / num_blocks_total + config = config.replace(drop_connect_rate=drop_rate) + block_prefix = 'stack_{}/block_0/'.format(stack_idx) + x = mb_conv_block(x, block, config, block_prefix) + block_num += 1 + if block.num_repeat > 1: + block = block.replace( + input_filters=block.output_filters, + strides=[1, 1] + ) + + for block_idx in range(block.num_repeat - 1): + drop_rate = drop_connect_rate * float(block_num) / num_blocks_total + config = config.replace(drop_connect_rate=drop_rate) + block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1) + x = mb_conv_block(x, block, config, prefix=block_prefix) + block_num += 1 + + # Build top + x = conv2d_block(x, + round_filters(top_base_filters, config), + config, + activation=activation, + name='top') + + # Build classifier + x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool')(x) + if dropout_rate and dropout_rate > 0: + x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x) + x = tf.keras.layers.Dense( + num_classes, + kernel_initializer=DENSE_KERNEL_INITIALIZER, + kernel_regularizer=tf.keras.regularizers.l2(weight_decay), + bias_regularizer=tf.keras.regularizers.l2(weight_decay), + name='logits')(x) + x = tf.keras.layers.Activation('softmax', name='probs')(x) + + return x + + +@tf.keras.utils.register_keras_serializable(package='Vision') +class EfficientNet(tf.keras.Model): + """Wrapper class for an EfficientNet Keras model. + + Contains helper methods to build, manage, and save metadata about the model. + """ + + def __init__(self, + config: ModelConfig = None, + overrides: Dict[Text, Any] = None): + """Create an EfficientNet model. + + Args: + config: (optional) the main model parameters to create the model + overrides: (optional) a dict containing keys that can override + config + """ + overrides = overrides or {} + config = config or ModelConfig() + + self.config = config.replace(**overrides) + + input_channels = self.config.input_channels + model_name = self.config.model_name + input_shape = (None, None, input_channels) # Should handle any size image + image_input = tf.keras.layers.Input(shape=input_shape) + + output = efficientnet(image_input, self.config) + + # Cast to float32 in case we have a different model dtype + output = tf.cast(output, tf.float32) + + logging.info('Building model %s with params %s', + model_name, + self.config) + + super(EfficientNet, self).__init__( + inputs=image_input, outputs=output, name=model_name) + + @classmethod + def from_name(cls, + model_name: Text, + model_weights_path: Text = None, + weights_format: Text = 'saved_model', + overrides: Dict[Text, Any] = None): + """Construct an EfficientNet model from a predefined model name. + + E.g., `EfficientNet.from_name('efficientnet-b0')`. + + Args: + model_name: the predefined model name + model_weights_path: the path to the weights (h5 file or saved model dir) + weights_format: the model weights format. One of 'saved_model', 'h5', + or 'checkpoint'. + overrides: (optional) a dict containing keys that can override config + + Returns: + A constructed EfficientNet instance. + """ + model_configs = dict(MODEL_CONFIGS) + overrides = dict(overrides) if overrides else {} + + # One can define their own custom models if necessary + model_configs.update(overrides.pop('model_config', {})) + + if model_name not in model_configs: + raise ValueError('Unknown model name {}'.format(model_name)) + + config = model_configs[model_name] + + model = cls(config=config, overrides=overrides) + + if model_weights_path: + common_modules.load_weights(model, + model_weights_path, + weights_format=weights_format) + + return model diff --git a/models/official/vision/image_classification/efficientnet/tfhub_export.py b/models/official/vision/image_classification/efficientnet/tfhub_export.py new file mode 100644 index 0000000000000000000000000000000000000000..3be8608a5cfc25442f5f936b4052f90b89c6cfce --- /dev/null +++ b/models/official/vision/image_classification/efficientnet/tfhub_export.py @@ -0,0 +1,69 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A script to export TF-Hub SavedModel.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os + +from absl import app +from absl import flags + +import tensorflow as tf + +from official.vision.image_classification.efficientnet import efficientnet_model + +FLAGS = flags.FLAGS + +flags.DEFINE_string("model_name", None, + "EfficientNet model name.") +flags.DEFINE_string("model_path", None, + "File path to TF model checkpoint.") +flags.DEFINE_string("export_path", None, + "TF-Hub SavedModel destination path to export.") + + +def export_tfhub(model_path, hub_destination, model_name): + """Restores a tf.keras.Model and saves for TF-Hub.""" + model_configs = dict(efficientnet_model.MODEL_CONFIGS) + config = model_configs[model_name] + + image_input = tf.keras.layers.Input( + shape=(None, None, 3), name="image_input", dtype=tf.float32) + x = image_input * 255.0 + ouputs = efficientnet_model.efficientnet(x, config) + hub_model = tf.keras.Model(image_input, ouputs) + ckpt = tf.train.Checkpoint(model=hub_model) + ckpt.restore(model_path).assert_existing_objects_matched() + hub_model.save( + os.path.join(hub_destination, "classification"), include_optimizer=False) + + feature_vector_output = hub_model.get_layer(name="top_pool").get_output_at(0) + hub_model2 = tf.keras.Model(image_input, feature_vector_output) + hub_model2.save( + os.path.join(hub_destination, "feature-vector"), include_optimizer=False) + + +def main(argv): + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + export_tfhub(FLAGS.model_path, FLAGS.export_path, FLAGS.model_name) + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/vision/image_classification/learning_rate.py b/models/official/vision/image_classification/learning_rate.py new file mode 100644 index 0000000000000000000000000000000000000000..1c78b04bc6297a08a8bc7823dccc00f464e05ad4 --- /dev/null +++ b/models/official/vision/image_classification/learning_rate.py @@ -0,0 +1,164 @@ +# Lint as: python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Learning rate utilities for vision tasks.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from typing import Any, List, Mapping + +import numpy as np +import tensorflow as tf + +BASE_LEARNING_RATE = 0.1 + + +class WarmupDecaySchedule(tf.keras.optimizers.schedules.LearningRateSchedule): + """A wrapper for LearningRateSchedule that includes warmup steps.""" + + def __init__( + self, + lr_schedule: tf.keras.optimizers.schedules.LearningRateSchedule, + warmup_steps: int): + """Add warmup decay to a learning rate schedule. + + Args: + lr_schedule: base learning rate scheduler + warmup_steps: number of warmup steps + + """ + super(WarmupDecaySchedule, self).__init__() + self._lr_schedule = lr_schedule + self._warmup_steps = warmup_steps + + def __call__(self, step: int): + lr = self._lr_schedule(step) + if self._warmup_steps: + initial_learning_rate = tf.convert_to_tensor( + self._lr_schedule.initial_learning_rate, name="initial_learning_rate") + dtype = initial_learning_rate.dtype + global_step_recomp = tf.cast(step, dtype) + warmup_steps = tf.cast(self._warmup_steps, dtype) + warmup_lr = initial_learning_rate * global_step_recomp / warmup_steps + lr = tf.cond(global_step_recomp < warmup_steps, + lambda: warmup_lr, + lambda: lr) + return lr + + def get_config(self) -> Mapping[str, Any]: + config = self._lr_schedule.get_config() + config.update({ + "warmup_steps": self._warmup_steps, + }) + return config + + +# TODO(b/149030439) - refactor this with +# tf.keras.optimizers.schedules.PiecewiseConstantDecay + WarmupDecaySchedule. +class PiecewiseConstantDecayWithWarmup( + tf.keras.optimizers.schedules.LearningRateSchedule): + """Piecewise constant decay with warmup schedule.""" + + def __init__(self, + batch_size: int, + epoch_size: int, + warmup_epochs: int, + boundaries: List[int], + multipliers: List[float]): + """Piecewise constant decay with warmup. + + Args: + batch_size: The training batch size used in the experiment. + epoch_size: The size of an epoch, or the number of examples in an epoch. + warmup_epochs: The number of warmup epochs to apply. + boundaries: The list of floats with strictly increasing entries. + multipliers: The list of multipliers/learning rates to use for the + piecewise portion. The length must be 1 less than that of boundaries. + + """ + super(PiecewiseConstantDecayWithWarmup, self).__init__() + if len(boundaries) != len(multipliers) - 1: + raise ValueError("The length of boundaries must be 1 less than the " + "length of multipliers") + + base_lr_batch_size = 256 + steps_per_epoch = epoch_size // batch_size + + self._rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size + self._step_boundaries = [float(steps_per_epoch) * x for x in boundaries] + self._lr_values = [self._rescaled_lr * m for m in multipliers] + self._warmup_steps = warmup_epochs * steps_per_epoch + + def __call__(self, step: int): + """Compute learning rate at given step.""" + def warmup_lr(): + return self._rescaled_lr * ( + step / tf.cast(self._warmup_steps, tf.float32)) + def piecewise_lr(): + return tf.compat.v1.train.piecewise_constant( + tf.cast(step, tf.float32), self._step_boundaries, self._lr_values) + return tf.cond(step < self._warmup_steps, warmup_lr, piecewise_lr) + + def get_config(self) -> Mapping[str, Any]: + return { + "rescaled_lr": self._rescaled_lr, + "step_boundaries": self._step_boundaries, + "lr_values": self._lr_values, + "warmup_steps": self._warmup_steps, + } + + +class CosineDecayWithWarmup(tf.keras.optimizers.schedules.LearningRateSchedule): + """Class to generate learning rate tensor.""" + + def __init__(self, batch_size: int, total_steps: int, warmup_steps: int): + """Creates the consine learning rate tensor with linear warmup. + + Args: + batch_size: The training batch size used in the experiment. + total_steps: Total training steps. + warmup_steps: Steps for the warm up period. + """ + super(CosineDecayWithWarmup, self).__init__() + base_lr_batch_size = 256 + self._total_steps = total_steps + self._init_learning_rate = BASE_LEARNING_RATE * batch_size / base_lr_batch_size + self._warmup_steps = warmup_steps + + def __call__(self, global_step: int): + global_step = tf.cast(global_step, dtype=tf.float32) + warmup_steps = self._warmup_steps + init_lr = self._init_learning_rate + total_steps = self._total_steps + + linear_warmup = global_step / warmup_steps * init_lr + + cosine_learning_rate = init_lr * (tf.cos(np.pi * + (global_step - warmup_steps) / + (total_steps - warmup_steps)) + + 1.0) / 2.0 + + learning_rate = tf.where(global_step < warmup_steps, linear_warmup, + cosine_learning_rate) + return learning_rate + + def get_config(self): + return { + "total_steps": self._total_steps, + "warmup_learning_rate": self._warmup_learning_rate, + "warmup_steps": self._warmup_steps, + "init_learning_rate": self._init_learning_rate, + } diff --git a/models/official/vision/image_classification/learning_rate_test.py b/models/official/vision/image_classification/learning_rate_test.py new file mode 100644 index 0000000000000000000000000000000000000000..272d2935fd7f1e6a7f1810e9247c4ef505021fde --- /dev/null +++ b/models/official/vision/image_classification/learning_rate_test.py @@ -0,0 +1,99 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for learning_rate.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.vision.image_classification import learning_rate + + +class LearningRateTests(tf.test.TestCase): + + def test_warmup_decay(self): + """Basic computational test for warmup decay.""" + initial_lr = 0.01 + decay_steps = 100 + decay_rate = 0.01 + warmup_steps = 10 + + base_lr = tf.keras.optimizers.schedules.ExponentialDecay( + initial_learning_rate=initial_lr, + decay_steps=decay_steps, + decay_rate=decay_rate) + lr = learning_rate.WarmupDecaySchedule( + lr_schedule=base_lr, + warmup_steps=warmup_steps) + + for step in range(warmup_steps - 1): + config = lr.get_config() + self.assertEqual(config['warmup_steps'], warmup_steps) + self.assertAllClose(self.evaluate(lr(step)), + step / warmup_steps * initial_lr) + + def test_piecewise_constant_decay_with_warmup(self): + """Basic computational test for piecewise constant decay with warmup.""" + boundaries = [1, 2, 3] + warmup_epochs = boundaries[0] + learning_rate_multipliers = [1.0, 0.1, 0.001] + expected_keys = [ + 'rescaled_lr', 'step_boundaries', 'lr_values', 'warmup_steps', + ] + + expected_lrs = [0.0, 0.1, 0.1] + + lr = learning_rate.PiecewiseConstantDecayWithWarmup( + batch_size=256, + epoch_size=256, + warmup_epochs=warmup_epochs, + boundaries=boundaries[1:], + multipliers=learning_rate_multipliers) + + step = 0 + + config = lr.get_config() + self.assertAllInSet(list(config.keys()), expected_keys) + + for boundary, expected_lr in zip(boundaries, expected_lrs): + for _ in range(step, boundary): + self.assertAllClose(self.evaluate(lr(step)), expected_lr) + step += 1 + + def test_piecewise_constant_decay_invalid_boundaries(self): + with self.assertRaisesRegex(ValueError, + 'The length of boundaries must be 1 less '): + learning_rate.PiecewiseConstantDecayWithWarmup( + batch_size=256, + epoch_size=256, + warmup_epochs=1, + boundaries=[1, 2], + multipliers=[1, 2]) + + def test_cosine_decay_with_warmup(self): + """Basic computational test for cosine decay with warmup.""" + expected_lrs = [0.0, 0.1, 0.05, 0.0] + + lr = learning_rate.CosineDecayWithWarmup( + batch_size=256, total_steps=3, warmup_steps=1) + + for step in [0, 1, 2, 3]: + self.assertAllClose(lr(step), expected_lrs[step]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/vision/image_classification/mnist_main.py b/models/official/vision/image_classification/mnist_main.py new file mode 100644 index 0000000000000000000000000000000000000000..1470c02d05b431e95de3c5807b68678a96d2b520 --- /dev/null +++ b/models/official/vision/image_classification/mnist_main.py @@ -0,0 +1,171 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a simple model on the MNIST dataset.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf +import tensorflow_datasets as tfds + +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import model_helpers +from official.vision.image_classification.resnet import common + +FLAGS = flags.FLAGS + + +def build_model(): + """Constructs the ML model used to predict handwritten digits.""" + + image = tf.keras.layers.Input(shape=(28, 28, 1)) + + y = tf.keras.layers.Conv2D(filters=32, + kernel_size=5, + padding='same', + activation='relu')(image) + y = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), + strides=(2, 2), + padding='same')(y) + y = tf.keras.layers.Conv2D(filters=32, + kernel_size=5, + padding='same', + activation='relu')(y) + y = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), + strides=(2, 2), + padding='same')(y) + y = tf.keras.layers.Flatten()(y) + y = tf.keras.layers.Dense(1024, activation='relu')(y) + y = tf.keras.layers.Dropout(0.4)(y) + + probs = tf.keras.layers.Dense(10, activation='softmax')(y) + + model = tf.keras.models.Model(image, probs, name='mnist') + + return model + + +@tfds.decode.make_decoder(output_dtype=tf.float32) +def decode_image(example, feature): + """Convert image to float32 and normalize from [0, 255] to [0.0, 1.0].""" + return tf.cast(feature.decode_example(example), dtype=tf.float32) / 255 + + +def run(flags_obj, datasets_override=None, strategy_override=None): + """Run MNIST model training and eval loop using native Keras APIs. + + Args: + flags_obj: An object containing parsed flag values. + datasets_override: A pair of `tf.data.Dataset` objects to train the model, + representing the train and test sets. + strategy_override: A `tf.distribute.Strategy` object to use for model. + + Returns: + Dictionary of training and eval stats. + """ + strategy = strategy_override or distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=flags_obj.num_gpus, + tpu_address=flags_obj.tpu) + + strategy_scope = distribution_utils.get_strategy_scope(strategy) + + mnist = tfds.builder('mnist', data_dir=flags_obj.data_dir) + if flags_obj.download: + mnist.download_and_prepare() + + mnist_train, mnist_test = datasets_override or mnist.as_dataset( + split=['train', 'test'], + decoders={'image': decode_image()}, # pylint: disable=no-value-for-parameter + as_supervised=True) + train_input_dataset = mnist_train.cache().repeat().shuffle( + buffer_size=50000).batch(flags_obj.batch_size) + eval_input_dataset = mnist_test.cache().repeat().batch(flags_obj.batch_size) + + with strategy_scope: + lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( + 0.05, decay_steps=100000, decay_rate=0.96) + optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule) + + model = build_model() + model.compile( + optimizer=optimizer, + loss='sparse_categorical_crossentropy', + metrics=['sparse_categorical_accuracy']) + + num_train_examples = mnist.info.splits['train'].num_examples + train_steps = num_train_examples // flags_obj.batch_size + train_epochs = flags_obj.train_epochs + + ckpt_full_path = os.path.join(flags_obj.model_dir, 'model.ckpt-{epoch:04d}') + callbacks = [ + tf.keras.callbacks.ModelCheckpoint( + ckpt_full_path, save_weights_only=True), + tf.keras.callbacks.TensorBoard(log_dir=flags_obj.model_dir), + ] + + num_eval_examples = mnist.info.splits['test'].num_examples + num_eval_steps = num_eval_examples // flags_obj.batch_size + + history = model.fit( + train_input_dataset, + epochs=train_epochs, + steps_per_epoch=train_steps, + callbacks=callbacks, + validation_steps=num_eval_steps, + validation_data=eval_input_dataset, + validation_freq=flags_obj.epochs_between_evals) + + export_path = os.path.join(flags_obj.model_dir, 'saved_model') + model.save(export_path, include_optimizer=False) + + eval_output = model.evaluate( + eval_input_dataset, steps=num_eval_steps, verbose=2) + + stats = common.build_stats(history, eval_output, callbacks) + return stats + + +def define_mnist_flags(): + """Define command line flags for MNIST model.""" + flags_core.define_base( + clean=True, + num_gpu=True, + train_epochs=True, + epochs_between_evals=True, + distribution_strategy=True) + flags_core.define_device() + flags_core.define_distribution() + flags.DEFINE_bool('download', False, + 'Whether to download data to `--data_dir`.') + FLAGS.set_default('batch_size', 1024) + + +def main(_): + model_helpers.apply_clean(FLAGS) + stats = run(flags.FLAGS) + logging.info('Run stats:\n%s', stats) + + +if __name__ == '__main__': + logging.set_verbosity(logging.INFO) + define_mnist_flags() + app.run(main) diff --git a/models/official/vision/image_classification/mnist_test.py b/models/official/vision/image_classification/mnist_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c05efcfe5d68fbbb3c181c19b59444db1abe5702 --- /dev/null +++ b/models/official/vision/image_classification/mnist_test.py @@ -0,0 +1,87 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test the Keras MNIST model on GPU.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from official.utils.testing import integration +from official.vision.image_classification import mnist_main + + +def eager_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + ], + mode="eager", + ) + + +class KerasMnistTest(tf.test.TestCase, parameterized.TestCase): + """Unit tests for sample Keras MNIST model.""" + _tempdir = None + + @classmethod + def setUpClass(cls): # pylint: disable=invalid-name + super(KerasMnistTest, cls).setUpClass() + mnist_main.define_mnist_flags() + + def tearDown(self): + super(KerasMnistTest, self).tearDown() + tf.io.gfile.rmtree(self.get_temp_dir()) + + @combinations.generate(eager_strategy_combinations()) + def test_end_to_end(self, distribution): + """Test Keras MNIST model with `strategy`.""" + + extra_flags = [ + "-train_epochs", "1", + # Let TFDS find the metadata folder automatically + "--data_dir=" + ] + + dummy_data = ( + tf.ones(shape=(10, 28, 28, 1), dtype=tf.int32), + tf.range(10), + ) + datasets = ( + tf.data.Dataset.from_tensor_slices(dummy_data), + tf.data.Dataset.from_tensor_slices(dummy_data), + ) + + run = functools.partial(mnist_main.run, + datasets_override=datasets, + strategy_override=distribution) + + integration.run_synthetic( + main=run, + synth=False, + tmp_root=self.get_temp_dir(), + extra_flags=extra_flags) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/official/vision/image_classification/optimizer_factory.py b/models/official/vision/image_classification/optimizer_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..d15aa79e0db61e36074c7227e1eca73df163ffa0 --- /dev/null +++ b/models/official/vision/image_classification/optimizer_factory.py @@ -0,0 +1,391 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Optimizer factory for vision tasks.""" +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging +import tensorflow as tf +import tensorflow_addons as tfa + +from typing import Any, Dict, Text, List +from official.vision.image_classification import learning_rate +from official.vision.image_classification.configs import base_configs + +# pylint: disable=protected-access + + +class MovingAverage(tf.keras.optimizers.Optimizer): + """Optimizer that computes a moving average of the variables. + + Empirically it has been found that using the moving average of the trained + parameters of a deep network is better than using its trained parameters + directly. This optimizer allows you to compute this moving average and swap + the variables at save time so that any code outside of the training loop + will use by default the average values instead of the original ones. + + Example of usage for training: + ```python + opt = tf.keras.optimizers.SGD(learning_rate) + opt = MovingAverage(opt) + + opt.shadow_copy(model) + ``` + + At test time, swap the shadow variables to evaluate on the averaged weights: + ```python + opt.swap_weights() + # Test eval the model here + opt.swap_weights() + ``` + """ + + def __init__(self, + optimizer: tf.keras.optimizers.Optimizer, + average_decay: float = 0.99, + start_step: int = 0, + dynamic_decay: bool = True, + name: Text = 'moving_average', + **kwargs): + """Construct a new MovingAverage optimizer. + + Args: + optimizer: `tf.keras.optimizers.Optimizer` that will be + used to compute and apply gradients. + average_decay: float. Decay to use to maintain the moving averages + of trained variables. + start_step: int. What step to start the moving average. + dynamic_decay: bool. Whether to change the decay based on the number + of optimizer updates. Decay will start at 0.1 and gradually increase + up to `average_decay` after each optimizer update. This behavior is + similar to `tf.train.ExponentialMovingAverage` in TF 1.x. + name: Optional name for the operations created when applying + gradients. Defaults to "moving_average". + **kwargs: keyword arguments. Allowed to be {`clipnorm`, + `clipvalue`, `lr`, `decay`}. + """ + super(MovingAverage, self).__init__(name, **kwargs) + self._optimizer = optimizer + self._average_decay = average_decay + self._start_step = tf.constant(start_step, tf.float32) + self._dynamic_decay = dynamic_decay + + def shadow_copy(self, model: tf.keras.Model): + """Creates shadow variables for the given model weights.""" + for var in model.weights: + self.add_slot(var, 'average', initializer='zeros') + self._average_weights = [ + self.get_slot(var, 'average') for var in model.weights + ] + self._model_weights = model.weights + + @property + def has_shadow_copy(self): + """Whether this optimizer has created shadow variables.""" + return self._model_weights is not None + + def _create_slots(self, var_list): + self._optimizer._create_slots(var_list=var_list) # pylint: disable=protected-access + + def apply_gradients(self, grads_and_vars, name: Text = None): + result = self._optimizer.apply_gradients(grads_and_vars, name) + self.update_average(self._optimizer.iterations) + return result + + @tf.function + def update_average(self, step: tf.Tensor): + step = tf.cast(step, tf.float32) + if step < self._start_step: + decay = tf.constant(0., tf.float32) + elif self._dynamic_decay: + decay = step - self._start_step + decay = tf.minimum(self._average_decay, (1. + decay) / (10. + decay)) + else: + decay = self._average_decay + + def _apply_moving(v_moving, v_normal): + diff = v_moving - v_normal + v_moving.assign_sub(tf.cast(1. - decay, v_moving.dtype) * diff) + return v_moving + + def _update(strategy, v_moving_and_v_normal): + for v_moving, v_normal in v_moving_and_v_normal: + strategy.extended.update(v_moving, _apply_moving, args=(v_normal,)) + + ctx = tf.distribute.get_replica_context() + return ctx.merge_call(_update, args=(zip(self._average_weights, + self._model_weights),)) + + def swap_weights(self): + """Swap the average and moving weights. + + This is a convenience method to allow one to evaluate the averaged weights + at test time. Loads the weights stored in `self._average` into the model, + keeping a copy of the original model weights. Swapping twice will return + the original weights. + """ + if tf.distribute.in_cross_replica_context(): + strategy = tf.distribute.get_strategy() + strategy.run(self._swap_weights, args=()) + else: + raise ValueError('Swapping weights must occur under a ' + 'tf.distribute.Strategy') + + @tf.function + def _swap_weights(self): + def fn_0(a, b): + a.assign_add(b) + return a + def fn_1(b, a): + b.assign(a - b) + return b + def fn_2(a, b): + a.assign_sub(b) + return a + + def swap(strategy, a_and_b): + """Swap `a` and `b` and mirror to all devices.""" + for a, b in a_and_b: + strategy.extended.update(a, fn_0, args=(b,)) # a = a + b + strategy.extended.update(b, fn_1, args=(a,)) # b = a - b + strategy.extended.update(a, fn_2, args=(b,)) # a = a - b + + ctx = tf.distribute.get_replica_context() + return ctx.merge_call( + swap, args=(zip(self._average_weights, self._model_weights),)) + + def assign_average_vars(self, var_list: List[tf.Variable]): + """Assign variables in var_list with their respective averages. + + Args: + var_list: List of model variables to be assigned to their average. + Returns: + assign_op: The op corresponding to the assignment operation of + variables to their average. + """ + assign_op = tf.group([ + var.assign(self.get_slot(var, 'average')) for var in var_list + if var.trainable + ]) + return assign_op + + def _create_hypers(self): + self._optimizer._create_hypers() # pylint: disable=protected-access + + def _prepare(self, var_list): + return self._optimizer._prepare(var_list=var_list) # pylint: disable=protected-access + + @property + def iterations(self): + return self._optimizer.iterations + + @iterations.setter + def iterations(self, variable): + self._optimizer.iterations = variable + + @property + def weights(self): + # return self._weights + self._optimizer.weights + return self._optimizer.weights + + @property + def lr(self): + return self._optimizer._get_hyper('learning_rate') + + @lr.setter + def lr(self, lr): + self._optimizer._set_hyper('learning_rate', lr) + + @property + def learning_rate(self): + return self._optimizer._get_hyper('learning_rate') + + @learning_rate.setter + def learning_rate(self, learning_rate): # pylint: disable=redefined-outer-name + self._optimizer._set_hyper('learning_rate', learning_rate) + + def _resource_apply_dense(self, grad, var): + return self._optimizer._resource_apply_dense(grad, var) + + def _resource_apply_sparse(self, grad, var, indices): + return self._optimizer._resource_apply_sparse(grad, var, indices) + + def _resource_apply_sparse_duplicate_indices(self, grad, var, indices): + return self._optimizer._resource_apply_sparse_duplicate_indices( + grad, var, indices) + + def get_config(self): + config = { + 'optimizer': tf.keras.optimizers.serialize(self._optimizer), + 'average_decay': self._average_decay, + 'start_step': self._start_step, + 'dynamic_decay': self._dynamic_decay, + } + base_config = super(MovingAverage, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + @classmethod + def from_config(cls, config, custom_objects=None): + optimizer = tf.keras.optimizers.deserialize( + config.pop('optimizer'), + custom_objects=custom_objects, + ) + return cls(optimizer, **config) + + +def build_optimizer( + optimizer_name: Text, + base_learning_rate: tf.keras.optimizers.schedules.LearningRateSchedule, + params: Dict[Text, Any]): + """Build the optimizer based on name. + + Args: + optimizer_name: String representation of the optimizer name. Examples: + sgd, momentum, rmsprop. + base_learning_rate: `tf.keras.optimizers.schedules.LearningRateSchedule` + base learning rate. + params: String -> Any dictionary representing the optimizer params. + This should contain optimizer specific parameters such as + `base_learning_rate`, `decay`, etc. + + Returns: + A tf.keras.Optimizer. + + Raises: + ValueError if the provided optimizer_name is not supported. + + """ + optimizer_name = optimizer_name.lower() + logging.info('Building %s optimizer with params %s', optimizer_name, params) + + if optimizer_name == 'sgd': + logging.info('Using SGD optimizer') + nesterov = params.get('nesterov', False) + optimizer = tf.keras.optimizers.SGD(learning_rate=base_learning_rate, + nesterov=nesterov) + elif optimizer_name == 'momentum': + logging.info('Using momentum optimizer') + nesterov = params.get('nesterov', False) + optimizer = tf.keras.optimizers.SGD(learning_rate=base_learning_rate, + momentum=params['momentum'], + nesterov=nesterov) + elif optimizer_name == 'rmsprop': + logging.info('Using RMSProp') + rho = params.get('decay', None) or params.get('rho', 0.9) + momentum = params.get('momentum', 0.9) + epsilon = params.get('epsilon', 1e-07) + optimizer = tf.keras.optimizers.RMSprop(learning_rate=base_learning_rate, + rho=rho, + momentum=momentum, + epsilon=epsilon) + elif optimizer_name == 'adam': + logging.info('Using Adam') + beta_1 = params.get('beta_1', 0.9) + beta_2 = params.get('beta_2', 0.999) + epsilon = params.get('epsilon', 1e-07) + optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate, + beta_1=beta_1, + beta_2=beta_2, + epsilon=epsilon) + elif optimizer_name == 'adamw': + logging.info('Using AdamW') + weight_decay = params.get('weight_decay', 0.01) + beta_1 = params.get('beta_1', 0.9) + beta_2 = params.get('beta_2', 0.999) + epsilon = params.get('epsilon', 1e-07) + optimizer = tfa.optimizers.AdamW(weight_decay=weight_decay, + learning_rate=base_learning_rate, + beta_1=beta_1, + beta_2=beta_2, + epsilon=epsilon) + else: + raise ValueError('Unknown optimizer %s' % optimizer_name) + + if params.get('lookahead', None): + logging.info('Using lookahead optimizer.') + optimizer = tfa.optimizers.Lookahead(optimizer) + + # Moving average should be applied last, as it's applied at test time + moving_average_decay = params.get('moving_average_decay', 0.) + if moving_average_decay is not None and moving_average_decay > 0.: + logging.info('Including moving average decay.') + optimizer = MovingAverage( + optimizer, + average_decay=moving_average_decay) + return optimizer + + +def build_learning_rate(params: base_configs.LearningRateConfig, + batch_size: int = None, + train_epochs: int = None, + train_steps: int = None): + """Build the learning rate given the provided configuration.""" + decay_type = params.name + base_lr = params.initial_lr + decay_rate = params.decay_rate + if params.decay_epochs is not None: + decay_steps = params.decay_epochs * train_steps + else: + decay_steps = 0 + if params.warmup_epochs is not None: + warmup_steps = params.warmup_epochs * train_steps + else: + warmup_steps = 0 + + lr_multiplier = params.scale_by_batch_size + + if lr_multiplier and lr_multiplier > 0: + # Scale the learning rate based on the batch size and a multiplier + base_lr *= lr_multiplier * batch_size + logging.info('Scaling the learning rate based on the batch size ' + 'multiplier. New base_lr: %f', base_lr) + + if decay_type == 'exponential': + logging.info('Using exponential learning rate with: ' + 'initial_learning_rate: %f, decay_steps: %d, ' + 'decay_rate: %f', base_lr, decay_steps, decay_rate) + lr = tf.keras.optimizers.schedules.ExponentialDecay( + initial_learning_rate=base_lr, + decay_steps=decay_steps, + decay_rate=decay_rate, + staircase=params.staircase) + elif decay_type == 'piecewise_constant_with_warmup': + logging.info('Using Piecewise constant decay with warmup. ' + 'Parameters: batch_size: %d, epoch_size: %d, ' + 'warmup_epochs: %d, boundaries: %s, multipliers: %s', + batch_size, params.examples_per_epoch, + params.warmup_epochs, params.boundaries, + params.multipliers) + lr = learning_rate.PiecewiseConstantDecayWithWarmup( + batch_size=batch_size, + epoch_size=params.examples_per_epoch, + warmup_epochs=params.warmup_epochs, + boundaries=params.boundaries, + multipliers=params.multipliers) + elif decay_type == 'cosine_with_warmup': + lr = learning_rate.CosineDecayWithWarmup( + batch_size=batch_size, + total_steps=train_epochs * train_steps, + warmup_steps=warmup_steps) + if warmup_steps > 0: + if decay_type not in [ + 'piecewise_constant_with_warmup', 'cosine_with_warmup' + ]: + logging.info('Applying %d warmup steps to the learning rate', + warmup_steps) + lr = learning_rate.WarmupDecaySchedule(lr, warmup_steps) + return lr diff --git a/models/official/vision/image_classification/optimizer_factory_test.py b/models/official/vision/image_classification/optimizer_factory_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c952618c126b4ee18b4a7f0ee87a91cff873a109 --- /dev/null +++ b/models/official/vision/image_classification/optimizer_factory_test.py @@ -0,0 +1,117 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for optimizer_factory.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf + +from absl.testing import parameterized +from official.vision.image_classification import optimizer_factory +from official.vision.image_classification.configs import base_configs + + +class OptimizerFactoryTest(tf.test.TestCase, parameterized.TestCase): + + @parameterized.named_parameters( + ('sgd', 'sgd', 0., False), + ('momentum', 'momentum', 0., False), + ('rmsprop', 'rmsprop', 0., False), + ('adam', 'adam', 0., False), + ('adamw', 'adamw', 0., False), + ('momentum_lookahead', 'momentum', 0., True), + ('sgd_ema', 'sgd', 0.999, False), + ('momentum_ema', 'momentum', 0.999, False), + ('rmsprop_ema', 'rmsprop', 0.999, False)) + def test_optimizer(self, optimizer_name, moving_average_decay, lookahead): + """Smoke test to be sure no syntax errors.""" + params = { + 'learning_rate': 0.001, + 'rho': 0.09, + 'momentum': 0., + 'epsilon': 1e-07, + 'moving_average_decay': moving_average_decay, + 'lookahead': lookahead, + } + optimizer = optimizer_factory.build_optimizer( + optimizer_name=optimizer_name, + base_learning_rate=params['learning_rate'], + params=params) + self.assertTrue(issubclass(type(optimizer), tf.keras.optimizers.Optimizer)) + + def test_unknown_optimizer(self): + with self.assertRaises(ValueError): + optimizer_factory.build_optimizer( + optimizer_name='this_optimizer_does_not_exist', + base_learning_rate=None, + params=None) + + def test_learning_rate_without_decay_or_warmups(self): + params = base_configs.LearningRateConfig( + name='exponential', + initial_lr=0.01, + decay_rate=0.01, + decay_epochs=None, + warmup_epochs=None, + scale_by_batch_size=0.01, + examples_per_epoch=1, + boundaries=[0], + multipliers=[0, 1]) + batch_size = 1 + train_steps = 1 + + lr = optimizer_factory.build_learning_rate( + params=params, + batch_size=batch_size, + train_steps=train_steps) + self.assertTrue( + issubclass( + type(lr), tf.keras.optimizers.schedules.LearningRateSchedule)) + + @parameterized.named_parameters( + ('exponential', 'exponential'), + ('piecewise_constant_with_warmup', 'piecewise_constant_with_warmup'), + ('cosine_with_warmup', 'cosine_with_warmup')) + def test_learning_rate_with_decay_and_warmup(self, lr_decay_type): + """Basic smoke test for syntax.""" + params = base_configs.LearningRateConfig( + name=lr_decay_type, + initial_lr=0.01, + decay_rate=0.01, + decay_epochs=1, + warmup_epochs=1, + scale_by_batch_size=0.01, + examples_per_epoch=1, + boundaries=[0], + multipliers=[0, 1]) + batch_size = 1 + train_epochs = 1 + train_steps = 1 + + lr = optimizer_factory.build_learning_rate( + params=params, + batch_size=batch_size, + train_epochs=train_epochs, + train_steps=train_steps) + self.assertTrue( + issubclass( + type(lr), tf.keras.optimizers.schedules.LearningRateSchedule)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/official/vision/image_classification/preprocessing.py b/models/official/vision/image_classification/preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..3f2019189d4e5f9c269a67276531b4344ede7e32 --- /dev/null +++ b/models/official/vision/image_classification/preprocessing.py @@ -0,0 +1,391 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preprocessing functions for images.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import tensorflow as tf +from typing import List, Optional, Text, Tuple + +from official.vision.image_classification import augment + + +# Calculated from the ImageNet training set +MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255) +STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255) + +IMAGE_SIZE = 224 +CROP_PADDING = 32 + + +def mean_image_subtraction( + image_bytes: tf.Tensor, + means: Tuple[float, ...], + num_channels: int = 3, + dtype: tf.dtypes.DType = tf.float32, +) -> tf.Tensor: + """Subtracts the given means from each image channel. + + For example: + means = [123.68, 116.779, 103.939] + image_bytes = mean_image_subtraction(image_bytes, means) + + Note that the rank of `image` must be known. + + Args: + image_bytes: a tensor of size [height, width, C]. + means: a C-vector of values to subtract from each channel. + num_channels: number of color channels in the image that will be distorted. + dtype: the dtype to convert the images to. Set to `None` to skip conversion. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `means`. + """ + if image_bytes.get_shape().ndims != 3: + raise ValueError('Input must be of size [height, width, C>0]') + + if len(means) != num_channels: + raise ValueError('len(means) must match the number of channels') + + # We have a 1-D tensor of means; convert to 3-D. + # Note(b/130245863): we explicitly call `broadcast` instead of simply + # expanding dimensions for better performance. + means = tf.broadcast_to(means, tf.shape(image_bytes)) + if dtype is not None: + means = tf.cast(means, dtype=dtype) + + return image_bytes - means + + +def standardize_image( + image_bytes: tf.Tensor, + stddev: Tuple[float, ...], + num_channels: int = 3, + dtype: tf.dtypes.DType = tf.float32, +) -> tf.Tensor: + """Divides the given stddev from each image channel. + + For example: + stddev = [123.68, 116.779, 103.939] + image_bytes = standardize_image(image_bytes, stddev) + + Note that the rank of `image` must be known. + + Args: + image_bytes: a tensor of size [height, width, C]. + stddev: a C-vector of values to divide from each channel. + num_channels: number of color channels in the image that will be distorted. + dtype: the dtype to convert the images to. Set to `None` to skip conversion. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `stddev`. + """ + if image_bytes.get_shape().ndims != 3: + raise ValueError('Input must be of size [height, width, C>0]') + + if len(stddev) != num_channels: + raise ValueError('len(stddev) must match the number of channels') + + # We have a 1-D tensor of stddev; convert to 3-D. + # Note(b/130245863): we explicitly call `broadcast` instead of simply + # expanding dimensions for better performance. + stddev = tf.broadcast_to(stddev, tf.shape(image_bytes)) + if dtype is not None: + stddev = tf.cast(stddev, dtype=dtype) + + return image_bytes / stddev + + +def normalize_images(features: tf.Tensor, + mean_rgb: Tuple[float, ...] = MEAN_RGB, + stddev_rgb: Tuple[float, ...] = STDDEV_RGB, + num_channels: int = 3, + dtype: tf.dtypes.DType = tf.float32, + data_format: Text = 'channels_last') -> tf.Tensor: + """Normalizes the input image channels with the given mean and stddev. + + Args: + features: `Tensor` representing decoded images in float format. + mean_rgb: the mean of the channels to subtract. + stddev_rgb: the stddev of the channels to divide. + num_channels: the number of channels in the input image tensor. + dtype: the dtype to convert the images to. Set to `None` to skip conversion. + data_format: the format of the input image tensor + ['channels_first', 'channels_last']. + + Returns: + A normalized image `Tensor`. + """ + # TODO(allencwang) - figure out how to use mean_image_subtraction and + # standardize_image on batches of images and replace the following. + if data_format == 'channels_first': + stats_shape = [num_channels, 1, 1] + else: + stats_shape = [1, 1, num_channels] + + if dtype is not None: + features = tf.image.convert_image_dtype(features, dtype=dtype) + + if mean_rgb is not None: + mean_rgb = tf.constant(mean_rgb, + shape=stats_shape, + dtype=features.dtype) + mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features)) + features = features - mean_rgb + + if stddev_rgb is not None: + stddev_rgb = tf.constant(stddev_rgb, + shape=stats_shape, + dtype=features.dtype) + stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features)) + features = features / stddev_rgb + + return features + + +def decode_and_center_crop(image_bytes: tf.Tensor, + image_size: int = IMAGE_SIZE, + crop_padding: int = CROP_PADDING) -> tf.Tensor: + """Crops to center of image with padding then scales image_size. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + image_size: image height/width dimension. + crop_padding: the padding size to use when centering the crop. + + Returns: + A decoded and cropped image `Tensor`. + """ + decoded = image_bytes.dtype != tf.string + shape = (tf.shape(image_bytes) if decoded + else tf.image.extract_jpeg_shape(image_bytes)) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + ((image_size / (image_size + crop_padding)) * + tf.cast(tf.minimum(image_height, image_width), tf.float32)), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + if decoded: + image = tf.image.crop_to_bounding_box( + image_bytes, + offset_height=offset_height, + offset_width=offset_width, + target_height=padded_center_crop_size, + target_width=padded_center_crop_size) + else: + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + + image = resize_image(image_bytes=image, + height=image_size, + width=image_size) + + return image + + +def decode_crop_and_flip(image_bytes: tf.Tensor) -> tf.Tensor: + """Crops an image to a random part of the image, then randomly flips. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + + Returns: + A decoded and cropped image `Tensor`. + + """ + decoded = image_bytes.dtype != tf.string + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + shape = (tf.shape(image_bytes) if decoded + else tf.image.extract_jpeg_shape(image_bytes)) + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + shape, + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.75, 1.33], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Reassemble the bounding box in the format the crop op requires. + offset_height, offset_width, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_height, offset_width, + target_height, target_width]) + if decoded: + cropped = tf.image.crop_to_bounding_box( + image_bytes, + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width) + else: + cropped = tf.image.decode_and_crop_jpeg(image_bytes, + crop_window, + channels=3) + + # Flip to add a little more random distortion in. + cropped = tf.image.random_flip_left_right(cropped) + return cropped + + +def resize_image(image_bytes: tf.Tensor, + height: int = IMAGE_SIZE, + width: int = IMAGE_SIZE) -> tf.Tensor: + """Resizes an image to a given height and width. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + height: image height dimension. + width: image width dimension. + + Returns: + A tensor containing the resized image. + + """ + return tf.compat.v1.image.resize( + image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR, + align_corners=False) + + +def preprocess_for_eval( + image_bytes: tf.Tensor, + image_size: int = IMAGE_SIZE, + num_channels: int = 3, + mean_subtract: bool = False, + standardize: bool = False, + dtype: tf.dtypes.DType = tf.float32 +) -> tf.Tensor: + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + image_size: image height/width dimension. + num_channels: number of image input channels. + mean_subtract: whether or not to apply mean subtraction. + standardize: whether or not to apply standardization. + dtype: the dtype to convert the images to. Set to `None` to skip conversion. + + Returns: + A preprocessed and normalized image `Tensor`. + """ + images = decode_and_center_crop(image_bytes, image_size) + images = tf.reshape(images, [image_size, image_size, num_channels]) + + if mean_subtract: + images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB) + if standardize: + images = standardize_image(image_bytes=images, stddev=STDDEV_RGB) + if dtype is not None: + images = tf.image.convert_image_dtype(images, dtype=dtype) + + return images + + +def load_eval_image(filename: Text, image_size: int = IMAGE_SIZE) -> tf.Tensor: + """Reads an image from the filesystem and applies image preprocessing. + + Args: + filename: a filename path of an image. + image_size: image height/width dimension. + + Returns: + A preprocessed and normalized image `Tensor`. + """ + image_bytes = tf.io.read_file(filename) + image = preprocess_for_eval(image_bytes, image_size) + + return image + + +def build_eval_dataset(filenames: List[Text], + labels: List[int] = None, + image_size: int = IMAGE_SIZE, + batch_size: int = 1) -> tf.Tensor: + """Builds a tf.data.Dataset from a list of filenames and labels. + + Args: + filenames: a list of filename paths of images. + labels: a list of labels corresponding to each image. + image_size: image height/width dimension. + batch_size: the batch size used by the dataset + + Returns: + A preprocessed and normalized image `Tensor`. + """ + if labels is None: + labels = [0] * len(filenames) + + filenames = tf.constant(filenames) + labels = tf.constant(labels) + dataset = tf.data.Dataset.from_tensor_slices((filenames, labels)) + + dataset = dataset.map( + lambda filename, label: (load_eval_image(filename, image_size), label)) + dataset = dataset.batch(batch_size) + + return dataset + + +def preprocess_for_train(image_bytes: tf.Tensor, + image_size: int = IMAGE_SIZE, + augmenter: Optional[augment.ImageAugment] = None, + mean_subtract: bool = False, + standardize: bool = False, + dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor: + """Preprocesses the given image for training. + + Args: + image_bytes: `Tensor` representing an image binary of + arbitrary size of dtype tf.uint8. + image_size: image height/width dimension. + augmenter: the image augmenter to apply. + mean_subtract: whether or not to apply mean subtraction. + standardize: whether or not to apply standardization. + dtype: the dtype to convert the images to. Set to `None` to skip conversion. + + Returns: + A preprocessed and normalized image `Tensor`. + """ + images = decode_crop_and_flip(image_bytes=image_bytes) + images = resize_image(images, height=image_size, width=image_size) + if mean_subtract: + images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB) + if standardize: + images = standardize_image(image_bytes=images, stddev=STDDEV_RGB) + if augmenter is not None: + images = augmenter.distort(images) + if dtype is not None: + images = tf.image.convert_image_dtype(images, dtype) + + return images diff --git a/models/official/vision/image_classification/resnet/README.md b/models/official/vision/image_classification/resnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5064523fbdcd4222c2159bdc1c09b7156800bf54 --- /dev/null +++ b/models/official/vision/image_classification/resnet/README.md @@ -0,0 +1,125 @@ +This folder contains a +[custom training loop (CTL)](#resnet-custom-training-loop) implementation for +ResNet50. + +## Before you begin +Please refer to the [README](../README.md) in the parent directory for +information on setup and preparing the data. + +## ResNet (custom training loop) + +Similar to the [estimator implementation](../../../r1/resnet), the Keras +implementation has code for the ImageNet dataset. The ImageNet +version uses a ResNet50 model implemented in +[`resnet_model.py`](./resnet_model.py). + + +### Pretrained Models + +* [ResNet50 Checkpoints](https://storage.googleapis.com/cloud-tpu-checkpoints/resnet/resnet50.tar.gz) + +* ResNet50 TFHub: [feature vector](https://tfhub.dev/tensorflow/resnet_50/feature_vector/1) +and [classification](https://tfhub.dev/tensorflow/resnet_50/classification/1) + +Again, if you did not download the data to the default directory, specify the +location with the `--data_dir` flag: + +```bash +python3 resnet_ctl_imagenet_main.py --data_dir=/path/to/imagenet +``` + +There are more flag options you can specify. Here are some examples: + +- `--use_synthetic_data`: when set to true, synthetic data, rather than real +data, are used; +- `--batch_size`: the batch size used for the model; +- `--model_dir`: the directory to save the model checkpoint; +- `--train_epochs`: number of epoches to run for training the model; +- `--train_steps`: number of steps to run for training the model. We now only +support a number that is smaller than the number of batches in an epoch. +- `--skip_eval`: when set to true, evaluation as well as validation during +training is skipped + +For example, this is a typical command line to run with ImageNet data with +batch size 128 per GPU: + +```bash +python3 -m resnet_ctl_imagenet_main.py \ + --model_dir=/tmp/model_dir/something \ + --num_gpus=2 \ + --batch_size=128 \ + --train_epochs=90 \ + --train_steps=10 \ + --use_synthetic_data=false +``` + +See [`common.py`](common.py) for full list of options. + +### Using multiple GPUs + +You can train these models on multiple GPUs using `tf.distribute.Strategy` API. +You can read more about them in this +[guide](https://www.tensorflow.org/guide/distribute_strategy). + +In this example, we have made it easier to use is with just a command line flag +`--num_gpus`. By default this flag is 1 if TensorFlow is compiled with CUDA, +and 0 otherwise. + +- --num_gpus=0: Uses tf.distribute.OneDeviceStrategy with CPU as the device. +- --num_gpus=1: Uses tf.distribute.OneDeviceStrategy with GPU as the device. +- --num_gpus=2+: Uses tf.distribute.MirroredStrategy to run synchronous +distributed training across the GPUs. + +If you wish to run without `tf.distribute.Strategy`, you can do so by setting +`--distribution_strategy=off`. + +### Running on multiple GPU hosts + +You can also train these models on multiple hosts, each with GPUs, using +`tf.distribute.Strategy`. + +The easiest way to run multi-host benchmarks is to set the +[`TF_CONFIG`](https://www.tensorflow.org/guide/distributed_training#TF_CONFIG) +appropriately at each host. e.g., to run using `MultiWorkerMirroredStrategy` on +2 hosts, the `cluster` in `TF_CONFIG` should have 2 `host:port` entries, and +host `i` should have the `task` in `TF_CONFIG` set to `{"type": "worker", +"index": i}`. `MultiWorkerMirroredStrategy` will automatically use all the +available GPUs at each host. + +### Running on Cloud TPUs + +Note: This model will **not** work with TPUs on Colab. + +You can train the ResNet CTL model on Cloud TPUs using +`tf.distribute.TPUStrategy`. If you are not familiar with Cloud TPUs, it is +strongly recommended that you go through the +[quickstart](https://cloud.google.com/tpu/docs/quickstart) to learn how to +create a TPU and GCE VM. + +To run ResNet model on a TPU, you must set `--distribution_strategy=tpu` and +`--tpu=$TPU_NAME`, where `$TPU_NAME` the name of your TPU in the Cloud Console. +From a GCE VM, you can run the following command to train ResNet for one epoch +on a v2-8 or v3-8 TPU by setting `TRAIN_EPOCHS` to 1: + +```bash +python3 resnet_ctl_imagenet_main.py \ + --tpu=$TPU_NAME \ + --model_dir=$MODEL_DIR \ + --data_dir=$DATA_DIR \ + --batch_size=1024 \ + --steps_per_loop=500 \ + --train_epochs=$TRAIN_EPOCHS \ + --use_synthetic_data=false \ + --dtype=fp32 \ + --enable_eager=true \ + --enable_tensorboard=true \ + --distribution_strategy=tpu \ + --log_steps=50 \ + --single_l2_loss_op=true \ + --use_tf_function=true +``` + +To train the ResNet to convergence, run it for 90 epochs by setting +`TRAIN_EPOCHS` to 90. + +Note: `$MODEL_DIR` and `$DATA_DIR` must be GCS paths. diff --git a/models/official/vision/image_classification/resnet/__init__.py b/models/official/vision/image_classification/resnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/official/vision/image_classification/resnet/common.py b/models/official/vision/image_classification/resnet/common.py new file mode 100644 index 0000000000000000000000000000000000000000..a9a64aa4064978863332a8024f4e46d64b9baaef --- /dev/null +++ b/models/official/vision/image_classification/resnet/common.py @@ -0,0 +1,387 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common util functions and classes used by both keras cifar and imagenet.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import tensorflow as tf + +from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2 +import tensorflow_model_optimization as tfmot +from official.utils.flags import core as flags_core +from official.utils.misc import keras_utils + +FLAGS = flags.FLAGS +BASE_LEARNING_RATE = 0.1 # This matches Jing's version. +TRAIN_TOP_1 = 'training_accuracy_top_1' +LR_SCHEDULE = [ # (multiplier, epoch to start) tuples + (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80) +] + + +class PiecewiseConstantDecayWithWarmup( + tf.keras.optimizers.schedules.LearningRateSchedule): + """Piecewise constant decay with warmup schedule.""" + + def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries, + multipliers, compute_lr_on_cpu=True, name=None): + super(PiecewiseConstantDecayWithWarmup, self).__init__() + if len(boundaries) != len(multipliers) - 1: + raise ValueError('The length of boundaries must be 1 less than the ' + 'length of multipliers') + + base_lr_batch_size = 256 + steps_per_epoch = epoch_size // batch_size + + self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size + self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries] + self.lr_values = [self.rescaled_lr * m for m in multipliers] + self.warmup_steps = warmup_epochs * steps_per_epoch + self.compute_lr_on_cpu = compute_lr_on_cpu + self.name = name + + self.learning_rate_ops_cache = {} + + def __call__(self, step): + if tf.executing_eagerly(): + return self._get_learning_rate(step) + + # In an eager function or graph, the current implementation of optimizer + # repeatedly call and thus create ops for the learning rate schedule. To + # avoid this, we cache the ops if not executing eagerly. + graph = tf.compat.v1.get_default_graph() + if graph not in self.learning_rate_ops_cache: + if self.compute_lr_on_cpu: + with tf.device('/device:CPU:0'): + self.learning_rate_ops_cache[graph] = self._get_learning_rate(step) + else: + self.learning_rate_ops_cache[graph] = self._get_learning_rate(step) + return self.learning_rate_ops_cache[graph] + + def _get_learning_rate(self, step): + """Compute learning rate at given step.""" + with tf.name_scope('PiecewiseConstantDecayWithWarmup'): + def warmup_lr(step): + return self.rescaled_lr * ( + tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32)) + def piecewise_lr(step): + return tf.compat.v1.train.piecewise_constant( + step, self.step_boundaries, self.lr_values) + return tf.cond(step < self.warmup_steps, + lambda: warmup_lr(step), + lambda: piecewise_lr(step)) + + def get_config(self): + return { + 'rescaled_lr': self.rescaled_lr, + 'step_boundaries': self.step_boundaries, + 'lr_values': self.lr_values, + 'warmup_steps': self.warmup_steps, + 'compute_lr_on_cpu': self.compute_lr_on_cpu, + 'name': self.name + } + + +def get_optimizer(learning_rate=0.1): + """Returns optimizer to use.""" + # The learning_rate is overwritten at the beginning of each step by callback. + return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9) + + +def get_callbacks( + pruning_method=None, + enable_checkpoint_and_export=False, + model_dir=None): + """Returns common callbacks.""" + time_callback = keras_utils.TimeHistory( + FLAGS.batch_size, + FLAGS.log_steps, + logdir=FLAGS.model_dir if FLAGS.enable_tensorboard else None) + callbacks = [time_callback] + + if FLAGS.enable_tensorboard: + tensorboard_callback = tf.keras.callbacks.TensorBoard( + log_dir=FLAGS.model_dir, + profile_batch=FLAGS.profile_steps) + callbacks.append(tensorboard_callback) + + is_pruning_enabled = pruning_method is not None + if is_pruning_enabled: + callbacks.append(tfmot.sparsity.keras.UpdatePruningStep()) + if model_dir is not None: + callbacks.append(tfmot.sparsity.keras.PruningSummaries( + log_dir=model_dir, profile_batch=0)) + + if enable_checkpoint_and_export: + if model_dir is not None: + ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}') + callbacks.append( + tf.keras.callbacks.ModelCheckpoint(ckpt_full_path, + save_weights_only=True)) + return callbacks + + +def build_stats(history, eval_output, callbacks): + """Normalizes and returns dictionary of stats. + + Args: + history: Results of the training step. Supports both categorical_accuracy + and sparse_categorical_accuracy. + eval_output: Output of the eval step. Assumes first value is eval_loss and + second value is accuracy_top_1. + callbacks: a list of callbacks which might include a time history callback + used during keras.fit. + + Returns: + Dictionary of normalized results. + """ + stats = {} + if eval_output: + stats['accuracy_top_1'] = float(eval_output[1]) + stats['eval_loss'] = float(eval_output[0]) + if history and history.history: + train_hist = history.history + # Gets final loss from training. + stats['loss'] = float(train_hist['loss'][-1]) + # Gets top_1 training accuracy. + if 'categorical_accuracy' in train_hist: + stats[TRAIN_TOP_1] = float(train_hist['categorical_accuracy'][-1]) + elif 'sparse_categorical_accuracy' in train_hist: + stats[TRAIN_TOP_1] = float(train_hist['sparse_categorical_accuracy'][-1]) + elif 'accuracy' in train_hist: + stats[TRAIN_TOP_1] = float(train_hist['accuracy'][-1]) + + if not callbacks: + return stats + + # Look for the time history callback which was used during keras.fit + for callback in callbacks: + if isinstance(callback, keras_utils.TimeHistory): + timestamp_log = callback.timestamp_log + stats['step_timestamp_log'] = timestamp_log + stats['train_finish_time'] = callback.train_finish_time + if callback.epoch_runtime_log: + stats['avg_exp_per_second'] = callback.average_examples_per_second + + return stats + + +def define_keras_flags( + dynamic_loss_scale=True, + model=False, + optimizer=False, + pretrained_filepath=False): + """Define flags for Keras models.""" + flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True, + train_epochs=True, epochs_between_evals=True, + distribution_strategy=True) + flags_core.define_performance(num_parallel_calls=False, + synthetic_data=True, + dtype=True, + all_reduce_alg=True, + num_packs=True, + tf_gpu_thread_mode=True, + datasets_num_private_threads=True, + dynamic_loss_scale=dynamic_loss_scale, + loss_scale=True, + fp16_implementation=True, + tf_data_experimental_slack=True, + enable_xla=True, + training_dataset_cache=True) + flags_core.define_image() + flags_core.define_benchmark() + flags_core.define_distribution() + flags.adopt_module_key_flags(flags_core) + + flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?') + flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?') + # TODO(b/135607288): Remove this flag once we understand the root cause of + # slowdown when setting the learning phase in Keras backend. + flags.DEFINE_boolean( + name='set_learning_phase_to_train', default=True, + help='If skip eval, also set Keras learning phase to 1 (training).') + flags.DEFINE_boolean( + name='explicit_gpu_placement', default=False, + help='If not using distribution strategy, explicitly set device scope ' + 'for the Keras training loop.') + flags.DEFINE_boolean(name='use_trivial_model', default=False, + help='Whether to use a trivial Keras model.') + flags.DEFINE_boolean(name='report_accuracy_metrics', default=True, + help='Report metrics during training and evaluation.') + flags.DEFINE_boolean(name='use_tensor_lr', default=True, + help='Use learning rate tensor instead of a callback.') + flags.DEFINE_boolean( + name='enable_tensorboard', default=False, + help='Whether to enable Tensorboard callback.') + flags.DEFINE_string( + name='profile_steps', default=None, + help='Save profiling data to model dir at given range of global steps. The ' + 'value must be a comma separated pair of positive integers, specifying ' + 'the first and last step to profile. For example, "--profile_steps=2,4" ' + 'triggers the profiler to process 3 steps, starting from the 2nd step. ' + 'Note that profiler has a non-trivial performance overhead, and the ' + 'output file can be gigantic if profiling many steps.') + flags.DEFINE_integer( + name='train_steps', default=None, + help='The number of steps to run for training. If it is larger than ' + '# batches per epoch, then use # batches per epoch. This flag will be ' + 'ignored if train_epochs is set to be larger than 1. ') + flags.DEFINE_boolean( + name='batchnorm_spatial_persistent', default=True, + help='Enable the spacial persistent mode for CuDNN batch norm kernel.') + flags.DEFINE_boolean( + name='enable_get_next_as_optional', default=False, + help='Enable get_next_as_optional behavior in DistributedIterator.') + flags.DEFINE_boolean( + name='enable_checkpoint_and_export', default=False, + help='Whether to enable a checkpoint callback and export the savedmodel.') + flags.DEFINE_string( + name='tpu', default='', help='TPU address to connect to.') + flags.DEFINE_integer( + name='steps_per_loop', + default=500, + help='Number of steps per training loop. Only training step happens ' + 'inside the loop. Callbacks will not be called inside. Will be capped at ' + 'steps per epoch.') + flags.DEFINE_boolean( + name='use_tf_while_loop', + default=True, + help='Whether to build a tf.while_loop inside the training loop on the ' + 'host. Setting it to True is critical to have peak performance on ' + 'TPU.') + + if model: + flags.DEFINE_string('model', 'resnet50_v1.5', + 'Name of model preset. (mobilenet, resnet50_v1.5)') + if optimizer: + flags.DEFINE_string('optimizer', 'resnet50_default', + 'Name of optimizer preset. ' + '(mobilenet_default, resnet50_default)') + # TODO(kimjaehong): Replace as general hyper-params not only for mobilenet. + flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007, + 'Initial value of learning rate per sample for ' + 'mobilenet_default.') + flags.DEFINE_float('lr_decay_factor', 0.94, + 'Learning rate decay factor for mobilenet_default.') + flags.DEFINE_float('num_epochs_per_decay', 2.5, + 'Number of epochs per decay for mobilenet_default.') + if pretrained_filepath: + flags.DEFINE_string('pretrained_filepath', '', + 'Pretrained file path.') + + +def get_synth_data(height, width, num_channels, num_classes, dtype): + """Creates a set of synthetic random data. + + Args: + height: Integer height that will be used to create a fake image tensor. + width: Integer width that will be used to create a fake image tensor. + num_channels: Integer depth that will be used to create a fake image tensor. + num_classes: Number of classes that should be represented in the fake labels + tensor + dtype: Data type for features/images. + + Returns: + A tuple of tensors representing the inputs and labels. + + """ + # Synthetic input should be within [0, 255]. + inputs = tf.random.truncated_normal([height, width, num_channels], + dtype=dtype, + mean=127, + stddev=60, + name='synthetic_inputs') + labels = tf.random.uniform([1], + minval=0, + maxval=num_classes - 1, + dtype=tf.int32, + name='synthetic_labels') + return inputs, labels + + +def define_pruning_flags(): + """Define flags for pruning methods.""" + flags.DEFINE_string('pruning_method', None, + 'Pruning method.' + 'None (no pruning) or polynomial_decay.') + flags.DEFINE_float('pruning_initial_sparsity', 0.0, + 'Initial sparsity for pruning.') + flags.DEFINE_float('pruning_final_sparsity', 0.5, + 'Final sparsity for pruning.') + flags.DEFINE_integer('pruning_begin_step', 0, + 'Begin step for pruning.') + flags.DEFINE_integer('pruning_end_step', 100000, + 'End step for pruning.') + flags.DEFINE_integer('pruning_frequency', 100, + 'Frequency for pruning.') + + +def get_synth_input_fn(height, width, num_channels, num_classes, + dtype=tf.float32, drop_remainder=True): + """Returns an input function that returns a dataset with random data. + + This input_fn returns a data set that iterates over a set of random data and + bypasses all preprocessing, e.g. jpeg decode and copy. The host to device + copy is still included. This used to find the upper throughput bound when + tuning the full input pipeline. + + Args: + height: Integer height that will be used to create a fake image tensor. + width: Integer width that will be used to create a fake image tensor. + num_channels: Integer depth that will be used to create a fake image tensor. + num_classes: Number of classes that should be represented in the fake labels + tensor + dtype: Data type for features/images. + drop_remainder: A boolean indicates whether to drop the remainder of the + batches. If True, the batch dimension will be static. + + Returns: + An input_fn that can be used in place of a real one to return a dataset + that can be used for iteration. + """ + # pylint: disable=unused-argument + def input_fn(is_training, data_dir, batch_size, *args, **kwargs): + """Returns dataset filled with random data.""" + inputs, labels = get_synth_data(height=height, + width=width, + num_channels=num_channels, + num_classes=num_classes, + dtype=dtype) + # Cast to float32 for Keras model. + labels = tf.cast(labels, dtype=tf.float32) + data = tf.data.Dataset.from_tensors((inputs, labels)).repeat() + + # `drop_remainder` will make dataset produce outputs with known shapes. + data = data.batch(batch_size, drop_remainder=drop_remainder) + data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + return data + + return input_fn + + +def set_cudnn_batchnorm_mode(): + """Set CuDNN batchnorm mode for better performance. + + Note: Spatial Persistent mode may lead to accuracy losses for certain + models. + """ + if FLAGS.batchnorm_spatial_persistent: + os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1' + else: + os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None) diff --git a/models/official/vision/image_classification/resnet/imagenet_preprocessing.py b/models/official/vision/image_classification/resnet/imagenet_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..f1490c22d8d769f32a6f6a1c6d29455519e8743a --- /dev/null +++ b/models/official/vision/image_classification/resnet/imagenet_preprocessing.py @@ -0,0 +1,561 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images. + +Training images are sampled using the provided bounding boxes, and subsequently +cropped to the sampled bounding box. Images are additionally flipped randomly, +then resized to the target output size (without aspect-ratio preservation). + +Images used during evaluation are resized (with aspect-ratio preservation) and +centrally cropped. + +All images undergo mean color subtraction. + +Note that these steps are colloquially referred to as "ResNet preprocessing," +and they differ from "VGG preprocessing," which does not use bounding boxes +and instead does an aspect-preserving resize followed by random crop during +training. (These both differ from "Inception preprocessing," which introduces +color distortion steps.) + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from absl import logging +import tensorflow as tf + +DEFAULT_IMAGE_SIZE = 224 +NUM_CHANNELS = 3 +NUM_CLASSES = 1001 + +NUM_IMAGES = { + 'train': 1281167, + 'validation': 50000, +} + +_NUM_TRAIN_FILES = 1024 +_SHUFFLE_BUFFER = 10000 + +_R_MEAN = 123.68 +_G_MEAN = 116.78 +_B_MEAN = 103.94 +CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN] + +# The lower bound for the smallest side of the image for aspect-preserving +# resizing. For example, if an image is 500 x 1000, it will be resized to +# _RESIZE_MIN x (_RESIZE_MIN * 2). +_RESIZE_MIN = 256 + + +def process_record_dataset(dataset, + is_training, + batch_size, + shuffle_buffer, + parse_record_fn, + dtype=tf.float32, + datasets_num_private_threads=None, + drop_remainder=False, + tf_data_experimental_slack=False): + """Given a Dataset with raw records, return an iterator over the records. + + Args: + dataset: A Dataset representing raw records + is_training: A boolean denoting whether the input is for training. + batch_size: The number of samples per batch. + shuffle_buffer: The buffer size to use when shuffling records. A larger + value results in better randomness, but smaller values reduce startup + time and use less memory. + parse_record_fn: A function that takes a raw record and returns the + corresponding (image, label) pair. + dtype: Data type to use for images/features. + datasets_num_private_threads: Number of threads for a private + threadpool created for all datasets computation. + drop_remainder: A boolean indicates whether to drop the remainder of the + batches. If True, the batch dimension will be static. + tf_data_experimental_slack: Whether to enable tf.data's + `experimental_slack` option. + + Returns: + Dataset of (image, label) pairs ready for iteration. + """ + # Defines a specific size thread pool for tf.data operations. + if datasets_num_private_threads: + options = tf.data.Options() + options.experimental_threading.private_threadpool_size = ( + datasets_num_private_threads) + dataset = dataset.with_options(options) + logging.info( + 'datasets_num_private_threads: %s', datasets_num_private_threads) + + if is_training: + # Shuffles records before repeating to respect epoch boundaries. + dataset = dataset.shuffle(buffer_size=shuffle_buffer) + # Repeats the dataset for the number of epochs to train. + dataset = dataset.repeat() + + # Parses the raw records into images and labels. + dataset = dataset.map( + lambda value: parse_record_fn(value, is_training, dtype), + num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) + + # Operations between the final prefetch and the get_next call to the iterator + # will happen synchronously during run time. We prefetch here again to + # background all of the above processing work and keep it out of the + # critical training path. Setting buffer_size to tf.data.experimental.AUTOTUNE + # allows DistributionStrategies to adjust how many batches to fetch based + # on how many devices are present. + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + + options = tf.data.Options() + options.experimental_slack = tf_data_experimental_slack + dataset = dataset.with_options(options) + + return dataset + + +def get_filenames(is_training, data_dir): + """Return filenames for dataset.""" + if is_training: + return [ + os.path.join(data_dir, 'train-%05d-of-01024' % i) + for i in range(_NUM_TRAIN_FILES)] + else: + return [ + os.path.join(data_dir, 'validation-%05d-of-00128' % i) + for i in range(128)] + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields (values are included as examples): + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in [ + 'image/object/bbox/xmin', 'image/object/bbox/ymin', + 'image/object/bbox/xmax', 'image/object/bbox/ymax']}) + + features = tf.io.parse_single_example(serialized=example_serialized, + features=feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(a=bbox, perm=[0, 2, 1]) + + return features['image/encoded'], label, bbox + + +def parse_record(raw_record, is_training, dtype): + """Parses a record containing a training example of an image. + + The input record is parsed into a label and image, and the image is passed + through preprocessing steps (cropping, flipping, and so on). + + Args: + raw_record: scalar Tensor tf.string containing a serialized + Example protocol buffer. + is_training: A boolean denoting whether the input is for training. + dtype: data type to use for images/features. + + Returns: + Tuple with processed image tensor in a channel-last format and + one-hot-encoded label tensor. + """ + image_buffer, label, bbox = parse_example_proto(raw_record) + + image = preprocess_image( + image_buffer=image_buffer, + bbox=bbox, + output_height=DEFAULT_IMAGE_SIZE, + output_width=DEFAULT_IMAGE_SIZE, + num_channels=NUM_CHANNELS, + is_training=is_training) + image = tf.cast(image, dtype) + + # Subtract one so that labels are in [0, 1000), and cast to float32 for + # Keras model. + label = tf.cast(tf.cast(tf.reshape(label, shape=[1]), dtype=tf.int32) - 1, + dtype=tf.float32) + return image, label + + +def get_parse_record_fn(use_keras_image_data_format=False): + """Get a function for parsing the records, accounting for image format. + + This is useful by handling different types of Keras models. For instance, + the current resnet_model.resnet50 input format is always channel-last, + whereas the keras_applications mobilenet input format depends on + tf.keras.backend.image_data_format(). We should set + use_keras_image_data_format=False for the former and True for the latter. + + Args: + use_keras_image_data_format: A boolean denoting whether data format is keras + backend image data format. If False, the image format is channel-last. If + True, the image format matches tf.keras.backend.image_data_format(). + + Returns: + Function to use for parsing the records. + """ + def parse_record_fn(raw_record, is_training, dtype): + image, label = parse_record(raw_record, is_training, dtype) + if use_keras_image_data_format: + if tf.keras.backend.image_data_format() == 'channels_first': + image = tf.transpose(image, perm=[2, 0, 1]) + return image, label + return parse_record_fn + + +def input_fn(is_training, + data_dir, + batch_size, + dtype=tf.float32, + datasets_num_private_threads=None, + parse_record_fn=parse_record, + input_context=None, + drop_remainder=False, + tf_data_experimental_slack=False, + training_dataset_cache=False, + filenames=None): + """Input function which provides batches for train or eval. + + Args: + is_training: A boolean denoting whether the input is for training. + data_dir: The directory containing the input data. + batch_size: The number of samples per batch. + dtype: Data type to use for images/features + datasets_num_private_threads: Number of private threads for tf.data. + parse_record_fn: Function to use for parsing the records. + input_context: A `tf.distribute.InputContext` object passed in by + `tf.distribute.Strategy`. + drop_remainder: A boolean indicates whether to drop the remainder of the + batches. If True, the batch dimension will be static. + tf_data_experimental_slack: Whether to enable tf.data's + `experimental_slack` option. + training_dataset_cache: Whether to cache the training dataset on workers. + Typically used to improve training performance when training data is in + remote storage and can fit into worker memory. + filenames: Optional field for providing the file names of the TFRecords. + + Returns: + A dataset that can be used for iteration. + """ + if filenames is None: + filenames = get_filenames(is_training, data_dir) + dataset = tf.data.Dataset.from_tensor_slices(filenames) + + if input_context: + logging.info( + 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d', + input_context.input_pipeline_id, input_context.num_input_pipelines) + dataset = dataset.shard(input_context.num_input_pipelines, + input_context.input_pipeline_id) + + if is_training: + # Shuffle the input files + dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES) + + # Convert to individual records. + # cycle_length = 10 means that up to 10 files will be read and deserialized in + # parallel. You may want to increase this number if you have a large number of + # CPU cores. + dataset = dataset.interleave( + tf.data.TFRecordDataset, + cycle_length=10, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if is_training and training_dataset_cache: + # Improve training performance when training data is in remote storage and + # can fit into worker memory. + dataset = dataset.cache() + + return process_record_dataset( + dataset=dataset, + is_training=is_training, + batch_size=batch_size, + shuffle_buffer=_SHUFFLE_BUFFER, + parse_record_fn=parse_record_fn, + dtype=dtype, + datasets_num_private_threads=datasets_num_private_threads, + drop_remainder=drop_remainder, + tf_data_experimental_slack=tf_data_experimental_slack, + ) + + +def _decode_crop_and_flip(image_buffer, bbox, num_channels): + """Crops the given image to a random part of the image, and randomly flips. + + We use the fused decode_and_crop op, which performs better than the two ops + used separately in series, but note that this requires that the image be + passed in as an un-decoded string Tensor. + + Args: + image_buffer: scalar string Tensor representing the raw JPEG image buffer. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + num_channels: Integer depth of the image buffer for decoding. + + Returns: + 3-D tensor with cropped image. + + """ + # A large fraction of image datasets contain a human-annotated bounding box + # delineating the region of the image containing the object of interest. We + # choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.image.extract_jpeg_shape(image_buffer), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.75, 1.33], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Reassemble the bounding box in the format the crop op requires. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + + # Use the fused decode and crop op here, which is faster than each in series. + cropped = tf.image.decode_and_crop_jpeg( + image_buffer, crop_window, channels=num_channels) + + # Flip to add a little more random distortion in. + cropped = tf.image.random_flip_left_right(cropped) + return cropped + + +def _central_crop(image, crop_height, crop_width): + """Performs central crops of the given image list. + + Args: + image: a 3-D image tensor + crop_height: the height of the image following the crop. + crop_width: the width of the image following the crop. + + Returns: + 3-D tensor with cropped image. + """ + shape = tf.shape(input=image) + height, width = shape[0], shape[1] + + amount_to_be_cropped_h = (height - crop_height) + crop_top = amount_to_be_cropped_h // 2 + amount_to_be_cropped_w = (width - crop_width) + crop_left = amount_to_be_cropped_w // 2 + return tf.slice( + image, [crop_top, crop_left, 0], [crop_height, crop_width, -1]) + + +def _mean_image_subtraction(image, means, num_channels): + """Subtracts the given means from each image channel. + + For example: + means = [123.68, 116.779, 103.939] + image = _mean_image_subtraction(image, means) + + Note that the rank of `image` must be known. + + Args: + image: a tensor of size [height, width, C]. + means: a C-vector of values to subtract from each channel. + num_channels: number of color channels in the image that will be distorted. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `means`. + """ + if image.get_shape().ndims != 3: + raise ValueError('Input must be of size [height, width, C>0]') + + if len(means) != num_channels: + raise ValueError('len(means) must match the number of channels') + + # We have a 1-D tensor of means; convert to 3-D. + # Note(b/130245863): we explicitly call `broadcast` instead of simply + # expanding dimensions for better performance. + means = tf.broadcast_to(means, tf.shape(image)) + + return image - means + + +def _smallest_size_at_least(height, width, resize_min): + """Computes new shape with the smallest side equal to `smallest_side`. + + Computes new shape with the smallest side equal to `smallest_side` while + preserving the original aspect ratio. + + Args: + height: an int32 scalar tensor indicating the current height. + width: an int32 scalar tensor indicating the current width. + resize_min: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + new_height: an int32 scalar tensor indicating the new height. + new_width: an int32 scalar tensor indicating the new width. + """ + resize_min = tf.cast(resize_min, tf.float32) + + # Convert to floats to make subsequent calculations go smoothly. + height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) + + smaller_dim = tf.minimum(height, width) + scale_ratio = resize_min / smaller_dim + + # Convert back to ints to make heights and widths that TF ops will accept. + new_height = tf.cast(height * scale_ratio, tf.int32) + new_width = tf.cast(width * scale_ratio, tf.int32) + + return new_height, new_width + + +def _aspect_preserving_resize(image, resize_min): + """Resize images preserving the original aspect ratio. + + Args: + image: A 3-D image `Tensor`. + resize_min: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + resized_image: A 3-D tensor containing the resized image. + """ + shape = tf.shape(input=image) + height, width = shape[0], shape[1] + + new_height, new_width = _smallest_size_at_least(height, width, resize_min) + + return _resize_image(image, new_height, new_width) + + +def _resize_image(image, height, width): + """Simple wrapper around tf.resize_images. + + This is primarily to make sure we use the same `ResizeMethod` and other + details each time. + + Args: + image: A 3-D image `Tensor`. + height: The target height for the resized image. + width: The target width for the resized image. + + Returns: + resized_image: A 3-D tensor containing the resized image. The first two + dimensions have the shape [height, width]. + """ + return tf.compat.v1.image.resize( + image, [height, width], method=tf.image.ResizeMethod.BILINEAR, + align_corners=False) + + +def preprocess_image(image_buffer, bbox, output_height, output_width, + num_channels, is_training=False): + """Preprocesses the given image. + + Preprocessing includes decoding, cropping, and resizing for both training + and eval images. Training preprocessing, however, introduces some random + distortion of the image to improve accuracy. + + Args: + image_buffer: scalar string Tensor representing the raw JPEG image buffer. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + num_channels: Integer depth of the image buffer for decoding. + is_training: `True` if we're preprocessing the image for training and + `False` otherwise. + + Returns: + A preprocessed image. + """ + if is_training: + # For training, we want to randomize some of the distortions. + image = _decode_crop_and_flip(image_buffer, bbox, num_channels) + image = _resize_image(image, output_height, output_width) + else: + # For validation, we want to decode, resize, then just crop the middle. + image = tf.image.decode_jpeg(image_buffer, channels=num_channels) + image = _aspect_preserving_resize(image, _RESIZE_MIN) + image = _central_crop(image, output_height, output_width) + + image.set_shape([output_height, output_width, num_channels]) + + return _mean_image_subtraction(image, CHANNEL_MEANS, num_channels) diff --git a/models/official/vision/image_classification/resnet/resnet_config.py b/models/official/vision/image_classification/resnet/resnet_config.py new file mode 100644 index 0000000000000000000000000000000000000000..a746257f02b85eddfc72192b9474638b92378644 --- /dev/null +++ b/models/official/vision/image_classification/resnet/resnet_config.py @@ -0,0 +1,63 @@ +# Lint as: python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Configuration definitions for ResNet losses, learning rates, and optimizers.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from typing import Any, Mapping + +import dataclasses + +from official.modeling.hyperparams import base_config +from official.vision.image_classification.configs import base_configs + + +_RESNET_LR_SCHEDULE = [ # (multiplier, epoch to start) tuples + (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80) +] +_RESNET_LR_BOUNDARIES = list(p[1] for p in _RESNET_LR_SCHEDULE[1:]) +_RESNET_LR_MULTIPLIERS = list(p[0] for p in _RESNET_LR_SCHEDULE) +_RESNET_LR_WARMUP_EPOCHS = _RESNET_LR_SCHEDULE[0][1] + + +@dataclasses.dataclass +class ResNetModelConfig(base_configs.ModelConfig): + """Configuration for the ResNet model.""" + name: str = 'ResNet' + num_classes: int = 1000 + model_params: base_config.Config = dataclasses.field( + default_factory=lambda: { + 'num_classes': 1000, + 'batch_size': None, + 'use_l2_regularizer': True, + 'rescale_inputs': False, + }) + loss: base_configs.LossConfig = base_configs.LossConfig( + name='sparse_categorical_crossentropy') + optimizer: base_configs.OptimizerConfig = base_configs.OptimizerConfig( + name='momentum', + decay=0.9, + epsilon=0.001, + momentum=0.9, + moving_average_decay=None) + learning_rate: base_configs.LearningRateConfig = ( + base_configs.LearningRateConfig( + name='piecewise_constant_with_warmup', + examples_per_epoch=1281167, + warmup_epochs=_RESNET_LR_WARMUP_EPOCHS, + boundaries=_RESNET_LR_BOUNDARIES, + multipliers=_RESNET_LR_MULTIPLIERS)) diff --git a/models/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py b/models/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py new file mode 100644 index 0000000000000000000000000000000000000000..c128dc0b99535d806634b42b99a2e56211c567ca --- /dev/null +++ b/models/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py @@ -0,0 +1,196 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a ResNet model on the ImageNet dataset using custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf + +from official.modeling import performance +from official.staging.training import controller +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import keras_utils +from official.utils.misc import model_helpers +from official.vision.image_classification.resnet import common +from official.vision.image_classification.resnet import imagenet_preprocessing +from official.vision.image_classification.resnet import resnet_runnable + +flags.DEFINE_boolean(name='use_tf_function', default=True, + help='Wrap the train and test step inside a ' + 'tf.function.') +flags.DEFINE_boolean(name='single_l2_loss_op', default=False, + help='Calculate L2_loss on concatenated weights, ' + 'instead of using Keras per-layer L2 loss.') + + +def build_stats(runnable, time_callback): + """Normalizes and returns dictionary of stats. + + Args: + runnable: The module containing all the training and evaluation metrics. + time_callback: Time tracking callback instance. + + Returns: + Dictionary of normalized results. + """ + stats = {} + + if not runnable.flags_obj.skip_eval: + stats['eval_loss'] = runnable.test_loss.result().numpy() + stats['eval_acc'] = runnable.test_accuracy.result().numpy() + + stats['train_loss'] = runnable.train_loss.result().numpy() + stats['train_acc'] = runnable.train_accuracy.result().numpy() + + if time_callback: + timestamp_log = time_callback.timestamp_log + stats['step_timestamp_log'] = timestamp_log + stats['train_finish_time'] = time_callback.train_finish_time + if time_callback.epoch_runtime_log: + stats['avg_exp_per_second'] = time_callback.average_examples_per_second + + return stats + + +def get_num_train_iterations(flags_obj): + """Returns the number of training steps, train and test epochs.""" + train_steps = ( + imagenet_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size) + train_epochs = flags_obj.train_epochs + + if flags_obj.train_steps: + train_steps = min(flags_obj.train_steps, train_steps) + train_epochs = 1 + + eval_steps = math.ceil(1.0 * imagenet_preprocessing.NUM_IMAGES['validation'] / + flags_obj.batch_size) + + return train_steps, train_epochs, eval_steps + + +def _steps_to_run(steps_in_current_epoch, steps_per_epoch, steps_per_loop): + """Calculates steps to run on device.""" + if steps_per_loop <= 0: + raise ValueError('steps_per_loop should be positive integer.') + if steps_per_loop == 1: + return steps_per_loop + return min(steps_per_loop, steps_per_epoch - steps_in_current_epoch) + + +def run(flags_obj): + """Run ResNet ImageNet training and eval loop using custom training loops. + + Args: + flags_obj: An object containing parsed flag values. + + Raises: + ValueError: If fp16 is passed as it is not currently supported. + + Returns: + Dictionary of training and eval stats. + """ + keras_utils.set_session_config( + enable_xla=flags_obj.enable_xla) + performance.set_mixed_precision_policy(flags_core.get_tf_dtype(flags_obj)) + + if tf.config.list_physical_devices('GPU'): + if flags_obj.tf_gpu_thread_mode: + keras_utils.set_gpu_thread_mode_and_count( + per_gpu_thread_count=flags_obj.per_gpu_thread_count, + gpu_thread_mode=flags_obj.tf_gpu_thread_mode, + num_gpus=flags_obj.num_gpus, + datasets_num_private_threads=flags_obj.datasets_num_private_threads) + common.set_cudnn_batchnorm_mode() + + # TODO(anj-s): Set data_format without using Keras. + data_format = flags_obj.data_format + if data_format is None: + data_format = ('channels_first' if tf.config.list_physical_devices('GPU') + else 'channels_last') + tf.keras.backend.set_image_data_format(data_format) + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=flags_obj.num_gpus, + all_reduce_alg=flags_obj.all_reduce_alg, + num_packs=flags_obj.num_packs, + tpu_address=flags_obj.tpu) + + per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations( + flags_obj) + steps_per_loop = min(flags_obj.steps_per_loop, per_epoch_steps) + + logging.info( + 'Training %d epochs, each epoch has %d steps, ' + 'total steps: %d; Eval %d steps', train_epochs, per_epoch_steps, + train_epochs * per_epoch_steps, eval_steps) + + time_callback = keras_utils.TimeHistory( + flags_obj.batch_size, + flags_obj.log_steps, + logdir=flags_obj.model_dir if flags_obj.enable_tensorboard else None) + with distribution_utils.get_strategy_scope(strategy): + runnable = resnet_runnable.ResnetRunnable(flags_obj, time_callback, + per_epoch_steps) + + eval_interval = flags_obj.epochs_between_evals * per_epoch_steps + checkpoint_interval = ( + per_epoch_steps if flags_obj.enable_checkpoint_and_export else None) + summary_interval = per_epoch_steps if flags_obj.enable_tensorboard else None + + checkpoint_manager = tf.train.CheckpointManager( + runnable.checkpoint, + directory=flags_obj.model_dir, + max_to_keep=10, + step_counter=runnable.global_step, + checkpoint_interval=checkpoint_interval) + + resnet_controller = controller.Controller( + strategy, + runnable.train, + runnable.evaluate if not flags_obj.skip_eval else None, + global_step=runnable.global_step, + steps_per_loop=steps_per_loop, + train_steps=per_epoch_steps * train_epochs, + checkpoint_manager=checkpoint_manager, + summary_interval=summary_interval, + eval_steps=eval_steps, + eval_interval=eval_interval) + + time_callback.on_train_begin() + resnet_controller.train(evaluate=not flags_obj.skip_eval) + time_callback.on_train_end() + + stats = build_stats(runnable, time_callback) + return stats + + +def main(_): + model_helpers.apply_clean(flags.FLAGS) + stats = run(flags.FLAGS) + logging.info('Run stats:\n%s', stats) + + +if __name__ == '__main__': + logging.set_verbosity(logging.INFO) + common.define_keras_flags() + app.run(main) diff --git a/models/official/vision/image_classification/resnet/resnet_model.py b/models/official/vision/image_classification/resnet/resnet_model.py new file mode 100644 index 0000000000000000000000000000000000000000..10f1233356ece188cce51ec254f0064739cd6f41 --- /dev/null +++ b/models/official/vision/image_classification/resnet/resnet_model.py @@ -0,0 +1,329 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ResNet50 model for Keras. + +Adapted from tf.keras.applications.resnet50.ResNet50(). +This is ResNet model version 1.5. + +Related papers/blogs: +- https://arxiv.org/abs/1512.03385 +- https://arxiv.org/pdf/1603.05027v2.pdf +- http://torch.ch/blog/2016/02/04/resnets.html + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.keras import backend +from tensorflow.python.keras import initializers +from tensorflow.python.keras import models +from tensorflow.python.keras import regularizers +from official.vision.image_classification.resnet import imagenet_preprocessing + +layers = tf.keras.layers + + +def _gen_l2_regularizer(use_l2_regularizer=True, l2_weight_decay=1e-4): + return regularizers.l2(l2_weight_decay) if use_l2_regularizer else None + + +def identity_block(input_tensor, + kernel_size, + filters, + stage, + block, + use_l2_regularizer=True, + batch_norm_decay=0.9, + batch_norm_epsilon=1e-5): + """The identity block is the block that has no conv layer at shortcut. + + Args: + input_tensor: input tensor + kernel_size: default 3, the kernel size of middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + use_l2_regularizer: whether to use L2 regularizer on Conv layer. + batch_norm_decay: Moment of batch norm layers. + batch_norm_epsilon: Epsilon of batch borm layers. + + Returns: + Output tensor for the block. + """ + filters1, filters2, filters3 = filters + if backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = layers.Conv2D( + filters1, (1, 1), + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '2a')( + input_tensor) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '2a')( + x) + x = layers.Activation('relu')(x) + + x = layers.Conv2D( + filters2, + kernel_size, + padding='same', + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '2b')( + x) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '2b')( + x) + x = layers.Activation('relu')(x) + + x = layers.Conv2D( + filters3, (1, 1), + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '2c')( + x) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '2c')( + x) + + x = layers.add([x, input_tensor]) + x = layers.Activation('relu')(x) + return x + + +def conv_block(input_tensor, + kernel_size, + filters, + stage, + block, + strides=(2, 2), + use_l2_regularizer=True, + batch_norm_decay=0.9, + batch_norm_epsilon=1e-5): + """A block that has a conv layer at shortcut. + + Note that from stage 3, + the second conv layer at main path is with strides=(2, 2) + And the shortcut should have strides=(2, 2) as well + + Args: + input_tensor: input tensor + kernel_size: default 3, the kernel size of middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + strides: Strides for the second conv layer in the block. + use_l2_regularizer: whether to use L2 regularizer on Conv layer. + batch_norm_decay: Moment of batch norm layers. + batch_norm_epsilon: Epsilon of batch borm layers. + + Returns: + Output tensor for the block. + """ + filters1, filters2, filters3 = filters + if backend.image_data_format() == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = layers.Conv2D( + filters1, (1, 1), + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '2a')( + input_tensor) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '2a')( + x) + x = layers.Activation('relu')(x) + + x = layers.Conv2D( + filters2, + kernel_size, + strides=strides, + padding='same', + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '2b')( + x) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '2b')( + x) + x = layers.Activation('relu')(x) + + x = layers.Conv2D( + filters3, (1, 1), + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '2c')( + x) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '2c')( + x) + + shortcut = layers.Conv2D( + filters3, (1, 1), + strides=strides, + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name=conv_name_base + '1')( + input_tensor) + shortcut = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name=bn_name_base + '1')( + shortcut) + + x = layers.add([x, shortcut]) + x = layers.Activation('relu')(x) + return x + + +def resnet50(num_classes, + batch_size=None, + use_l2_regularizer=True, + rescale_inputs=False, + batch_norm_decay=0.9, + batch_norm_epsilon=1e-5): + """Instantiates the ResNet50 architecture. + + Args: + num_classes: `int` number of classes for image classification. + batch_size: Size of the batches for each step. + use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer. + rescale_inputs: whether to rescale inputs from 0 to 1. + batch_norm_decay: Moment of batch norm layers. + batch_norm_epsilon: Epsilon of batch borm layers. + + Returns: + A Keras model instance. + """ + input_shape = (224, 224, 3) + img_input = layers.Input(shape=input_shape, batch_size=batch_size) + if rescale_inputs: + # Hub image modules expect inputs in the range [0, 1]. This rescales these + # inputs to the range expected by the trained model. + x = layers.Lambda( + lambda x: x * 255.0 - backend.constant( + imagenet_preprocessing.CHANNEL_MEANS, + shape=[1, 1, 3], + dtype=x.dtype), + name='rescale')( + img_input) + else: + x = img_input + + if backend.image_data_format() == 'channels_first': + x = layers.Permute((3, 1, 2))(x) + bn_axis = 1 + else: # channels_last + bn_axis = 3 + + block_config = dict( + use_l2_regularizer=use_l2_regularizer, + batch_norm_decay=batch_norm_decay, + batch_norm_epsilon=batch_norm_epsilon) + x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x) + x = layers.Conv2D( + 64, (7, 7), + strides=(2, 2), + padding='valid', + use_bias=False, + kernel_initializer='he_normal', + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name='conv1')( + x) + x = layers.BatchNormalization( + axis=bn_axis, + momentum=batch_norm_decay, + epsilon=batch_norm_epsilon, + name='bn_conv1')( + x) + x = layers.Activation('relu')(x) + x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) + + x = conv_block( + x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), **block_config) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', **block_config) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', **block_config) + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', **block_config) + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', **block_config) + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', **block_config) + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', **block_config) + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', **block_config) + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', **block_config) + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', **block_config) + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', **block_config) + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', **block_config) + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', **block_config) + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', **block_config) + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', **block_config) + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', **block_config) + + x = layers.GlobalAveragePooling2D()(x) + x = layers.Dense( + num_classes, + kernel_initializer=initializers.RandomNormal(stddev=0.01), + kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), + bias_regularizer=_gen_l2_regularizer(use_l2_regularizer), + name='fc1000')( + x) + + # A softmax that is followed by the model loss must be done cannot be done + # in float16 due to numeric issues. So we pass dtype=float32. + x = layers.Activation('softmax', dtype='float32')(x) + + # Create model. + return models.Model(img_input, x, name='resnet50') diff --git a/models/official/vision/image_classification/resnet/resnet_runnable.py b/models/official/vision/image_classification/resnet/resnet_runnable.py new file mode 100644 index 0000000000000000000000000000000000000000..473b18daf7aaf02bfb1dc86110b3ae0fd2704359 --- /dev/null +++ b/models/official/vision/image_classification/resnet/resnet_runnable.py @@ -0,0 +1,221 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Runs a ResNet model on the ImageNet dataset using custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from official.modeling import performance +from official.staging.training import grad_utils +from official.staging.training import standard_runnable +from official.staging.training import utils +from official.utils.flags import core as flags_core +from official.vision.image_classification.resnet import common +from official.vision.image_classification.resnet import imagenet_preprocessing +from official.vision.image_classification.resnet import resnet_model + + +class ResnetRunnable(standard_runnable.StandardTrainable, + standard_runnable.StandardEvaluable): + """Implements the training and evaluation APIs for Resnet model.""" + + def __init__(self, flags_obj, time_callback, epoch_steps): + standard_runnable.StandardTrainable.__init__(self, + flags_obj.use_tf_while_loop, + flags_obj.use_tf_function) + standard_runnable.StandardEvaluable.__init__(self, + flags_obj.use_tf_function) + + self.strategy = tf.distribute.get_strategy() + self.flags_obj = flags_obj + self.dtype = flags_core.get_tf_dtype(flags_obj) + self.time_callback = time_callback + + # Input pipeline related + batch_size = flags_obj.batch_size + if batch_size % self.strategy.num_replicas_in_sync != 0: + raise ValueError( + 'Batch size must be divisible by number of replicas : {}'.format( + self.strategy.num_replicas_in_sync)) + + # As auto rebatching is not supported in + # `experimental_distribute_datasets_from_function()` API, which is + # required when cloning dataset to multiple workers in eager mode, + # we use per-replica batch size. + self.batch_size = int(batch_size / self.strategy.num_replicas_in_sync) + + if self.flags_obj.use_synthetic_data: + self.input_fn = common.get_synth_input_fn( + height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE, + width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE, + num_channels=imagenet_preprocessing.NUM_CHANNELS, + num_classes=imagenet_preprocessing.NUM_CLASSES, + dtype=self.dtype, + drop_remainder=True) + else: + self.input_fn = imagenet_preprocessing.input_fn + + self.model = resnet_model.resnet50( + num_classes=imagenet_preprocessing.NUM_CLASSES, + use_l2_regularizer=not flags_obj.single_l2_loss_op) + + lr_schedule = common.PiecewiseConstantDecayWithWarmup( + batch_size=flags_obj.batch_size, + epoch_size=imagenet_preprocessing.NUM_IMAGES['train'], + warmup_epochs=common.LR_SCHEDULE[0][1], + boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]), + multipliers=list(p[0] for p in common.LR_SCHEDULE), + compute_lr_on_cpu=True) + self.optimizer = common.get_optimizer(lr_schedule) + # Make sure iterations variable is created inside scope. + self.global_step = self.optimizer.iterations + + use_graph_rewrite = flags_obj.fp16_implementation == 'graph_rewrite' + if use_graph_rewrite and not flags_obj.use_tf_function: + raise ValueError('--fp16_implementation=graph_rewrite requires ' + '--use_tf_function to be true') + self.optimizer = performance.configure_optimizer( + self.optimizer, + use_float16=self.dtype == tf.float16, + use_graph_rewrite=use_graph_rewrite, + loss_scale=flags_core.get_loss_scale(flags_obj, default_for_fp16=128)) + + self.train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32) + self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( + 'train_accuracy', dtype=tf.float32) + self.test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32) + self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( + 'test_accuracy', dtype=tf.float32) + + self.checkpoint = tf.train.Checkpoint( + model=self.model, optimizer=self.optimizer) + + # Handling epochs. + self.epoch_steps = epoch_steps + self.epoch_helper = utils.EpochHelper(epoch_steps, self.global_step) + + def build_train_dataset(self): + """See base class.""" + return utils.make_distributed_dataset( + self.strategy, + self.input_fn, + is_training=True, + data_dir=self.flags_obj.data_dir, + batch_size=self.batch_size, + parse_record_fn=imagenet_preprocessing.parse_record, + datasets_num_private_threads=self.flags_obj + .datasets_num_private_threads, + dtype=self.dtype, + drop_remainder=True) + + def build_eval_dataset(self): + """See base class.""" + return utils.make_distributed_dataset( + self.strategy, + self.input_fn, + is_training=False, + data_dir=self.flags_obj.data_dir, + batch_size=self.batch_size, + parse_record_fn=imagenet_preprocessing.parse_record, + dtype=self.dtype) + + def train_loop_begin(self): + """See base class.""" + # Reset all metrics + self.train_loss.reset_states() + self.train_accuracy.reset_states() + + self._epoch_begin() + self.time_callback.on_batch_begin(self.epoch_helper.batch_index) + + def train_step(self, iterator): + """See base class.""" + + def step_fn(inputs): + """Function to run on the device.""" + images, labels = inputs + with tf.GradientTape() as tape: + logits = self.model(images, training=True) + + prediction_loss = tf.keras.losses.sparse_categorical_crossentropy( + labels, logits) + loss = tf.reduce_sum(prediction_loss) * (1.0 / + self.flags_obj.batch_size) + num_replicas = self.strategy.num_replicas_in_sync + l2_weight_decay = 1e-4 + if self.flags_obj.single_l2_loss_op: + l2_loss = l2_weight_decay * 2 * tf.add_n([ + tf.nn.l2_loss(v) + for v in self.model.trainable_variables + if 'bn' not in v.name + ]) + + loss += (l2_loss / num_replicas) + else: + loss += (tf.reduce_sum(self.model.losses) / num_replicas) + + grad_utils.minimize_using_explicit_allreduce( + tape, self.optimizer, loss, self.model.trainable_variables) + self.train_loss.update_state(loss) + self.train_accuracy.update_state(labels, logits) + + self.strategy.run(step_fn, args=(next(iterator),)) + + def train_loop_end(self): + """See base class.""" + metrics = { + 'train_loss': self.train_loss.result(), + 'train_accuracy': self.train_accuracy.result(), + } + self.time_callback.on_batch_end(self.epoch_helper.batch_index - 1) + self._epoch_end() + return metrics + + def eval_begin(self): + """See base class.""" + self.test_loss.reset_states() + self.test_accuracy.reset_states() + + def eval_step(self, iterator): + """See base class.""" + + def step_fn(inputs): + """Function to run on the device.""" + images, labels = inputs + logits = self.model(images, training=False) + loss = tf.keras.losses.sparse_categorical_crossentropy(labels, logits) + loss = tf.reduce_sum(loss) * (1.0 / self.flags_obj.batch_size) + self.test_loss.update_state(loss) + self.test_accuracy.update_state(labels, logits) + + self.strategy.run(step_fn, args=(next(iterator),)) + + def eval_end(self): + """See base class.""" + return { + 'test_loss': self.test_loss.result(), + 'test_accuracy': self.test_accuracy.result() + } + + def _epoch_begin(self): + if self.epoch_helper.epoch_begin(): + self.time_callback.on_epoch_begin(self.epoch_helper.current_epoch) + + def _epoch_end(self): + if self.epoch_helper.epoch_end(): + self.time_callback.on_epoch_end(self.epoch_helper.current_epoch) diff --git a/models/official/vision/image_classification/resnet/tfhub_export.py b/models/official/vision/image_classification/resnet/tfhub_export.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1f124a1d67c93b9deee453a23cf71133bb6434 --- /dev/null +++ b/models/official/vision/image_classification/resnet/tfhub_export.py @@ -0,0 +1,66 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A script to export TF-Hub SavedModel.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import os + +from absl import app +from absl import flags + +import tensorflow as tf + +from official.vision.image_classification.resnet import imagenet_preprocessing +from official.vision.image_classification.resnet import resnet_model + +FLAGS = flags.FLAGS + +flags.DEFINE_string("model_path", None, + "File path to TF model checkpoint or H5 file.") +flags.DEFINE_string("export_path", None, + "TF-Hub SavedModel destination path to export.") + + +def export_tfhub(model_path, hub_destination): + """Restores a tf.keras.Model and saves for TF-Hub.""" + model = resnet_model.resnet50( + num_classes=imagenet_preprocessing.NUM_CLASSES, rescale_inputs=True) + model.load_weights(model_path) + model.save( + os.path.join(hub_destination, "classification"), include_optimizer=False) + + # Extracts a sub-model to use pooling feature vector as model output. + image_input = model.get_layer(index=0).get_output_at(0) + feature_vector_output = model.get_layer(name="reduce_mean").get_output_at(0) + hub_model = tf.keras.Model(image_input, feature_vector_output) + + # Exports a SavedModel. + hub_model.save( + os.path.join(hub_destination, "feature-vector"), include_optimizer=False) + + +def main(argv): + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + export_tfhub(FLAGS.model_path, FLAGS.export_path) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/official/vision/image_classification/test_utils.py b/models/official/vision/image_classification/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a6dc91dc775ce25950a8918450548c19992eb2c4 --- /dev/null +++ b/models/official/vision/image_classification/test_utils.py @@ -0,0 +1,38 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test utilities for image classification tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow.python.keras import backend +from tensorflow.python.keras import layers +from tensorflow.python.keras import models + + +def trivial_model(num_classes): + """Trivial model for ImageNet dataset.""" + + input_shape = (224, 224, 3) + img_input = layers.Input(shape=input_shape) + + x = layers.Lambda(lambda x: backend.reshape(x, [-1, 224 * 224 * 3]), + name='reshape')(img_input) + x = layers.Dense(1, name='fc1')(x) + x = layers.Dense(num_classes, name='fc1000')(x) + x = layers.Activation('softmax', dtype='float32')(x) + + return models.Model(img_input, x, name='trivial') diff --git a/models/research/README.md b/models/research/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f9e84fb86f44b687c6a9c221fa72cd461e84c01e --- /dev/null +++ b/models/research/README.md @@ -0,0 +1,124 @@ +![Logo](https://storage.googleapis.com/model_garden_artifacts/TF_Model_Garden.png) + +# TensorFlow Research Models + +This directory contains code implementations and pre-trained models of published research papers. + +The research models are maintained by their respective authors. + +## Table of Contents +- [Modeling Libraries and Models](#modeling-libraries-and-models) +- [Models and Implementations](#models-and-implementations) + * [Computer Vision](#computer-vision) + * [Natural Language Processing](#natural-language-processing) + * [Audio and Speech](#audio-and-speech) + * [Reinforcement Learning](#reinforcement-learning) + * [Others](#others) +- [Archived Models and Implementations](#warning-archived-models-and-implementations) (:no_entry_sign: No longer maintained) + +## Modeling Libraries and Models + +| Directory | Name | Description | Maintainer(s) | +|-----------|------|-------------|---------------| +| [object_detection](object_detection) | TensorFlow Object Detection API | A framework that makes it easy to construct, train and deploy object detection models

A collection of object detection models pre-trained on the COCO dataset, the Kitti dataset, the Open Images dataset, the AVA v2.1 dataset, and the iNaturalist Species Detection Dataset| jch1, tombstone, pkulzc | +| [slim](slim) | TensorFlow-Slim Image Classification Model Library | A lightweight high-level API of TensorFlow for defining, training and evaluating image classification models
• Inception V1/V2/V3/V4
• Inception-ResNet-v2
• ResNet V1/V2
• VGG 16/19
• MobileNet V1/V2/V3
• NASNet-A_Mobile/Large
• PNASNet-5_Large/Mobile | sguada, marksandler2 | + +## Models and Implementations + +### Computer Vision + +| Directory | Paper(s) | Conference | Maintainer(s) | +|-----------|----------|------------|---------------| +| [attention_ocr](attention_ocr) | [Attention-based Extraction of Structured Information from Street View Imagery](https://arxiv.org/abs/1704.03549) | ICDAR 2017 | xavigibert | +| [autoaugment](autoaugment) | [1] [AutoAugment](https://arxiv.org/abs/1805.09501)
[2] [Wide Residual Networks](https://arxiv.org/abs/1605.07146)
[3] [Shake-Shake regularization](https://arxiv.org/abs/1705.07485)
[4] [ShakeDrop Regularization for Deep Residual Learning](https://arxiv.org/abs/1802.02375) | [1] CVPR 2019
[2] BMVC 2016
[3] ICLR 2017
[4] ICLR 2018 | barretzoph | +| [deeplab](deeplab) | [1] [DeepLabv1: Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs](https://arxiv.org/abs/1412.7062)
[2] [DeepLabv2: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs](https://arxiv.org/abs/1606.00915)
[3] [DeepLabv3: Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587)
[4] [DeepLabv3+: Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1802.02611)
| [1] ICLR 2015
[2] TPAMI 2017
[4] ECCV 2018 | aquariusjay, yknzhu | +| [delf](delf) | [1] DELF (DEep Local Features): [Large-Scale Image Retrieval with Attentive Deep Local Features](https://arxiv.org/abs/1612.06321)
[2] [Detect-to-Retrieve: Efficient Regional Aggregation for Image Search](https://arxiv.org/abs/1812.01584)
[3] DELG (DEep Local and Global features): [Unifying Deep Local and Global Features for Image Search](https://arxiv.org/abs/2001.05027)
[4] GLDv2: [Google Landmarks Dataset v2 -- A Large-Scale Benchmark for Instance-Level Recognition and Retrieval](https://arxiv.org/abs/2004.01804) | [1] ICCV 2017
[2] CVPR 2019
[4] CVPR 2020 | andrefaraujo | +| [lstm_object_detection](lstm_object_detection) | [Mobile Video Object Detection with Temporally-Aware Feature Maps](https://arxiv.org/abs/1711.06368) | CVPR 2018 | yinxiaoli, yongzhe2160, lzyuan | +| [marco](marco) | MARCO: [Classification of crystallization outcomes using deep convolutional neural networks](https://arxiv.org/abs/1803.10342) | | vincentvanhoucke | +| [vid2depth](vid2depth) | [Unsupervised Learning of Depth and Ego-Motion from Monocular Video Using 3D Geometric Constraints](https://arxiv.org/abs/1802.05522) | CVPR 2018 | rezama | + +### Natural Language Processing + +| Directory | Paper(s) | Conference | Maintainer(s) | +|-----------|----------|------------|---------------| +| [adversarial_text](adversarial_text) | [1] [Adversarial Training Methods for Semi-Supervised Text](https://arxiv.org/abs/1605.07725) Classification
[2] [Semi-supervised Sequence Learning](https://arxiv.org/abs/1511.01432) | [1] ICLR 2017
[2] NIPS 2015 | rsepassi, a-dai | +| [cvt_text](cvt_text) | [Semi-Supervised Sequence Modeling with Cross-View Training](https://arxiv.org/abs/1809.08370) | EMNLP 2018 | clarkkev, lmthang | + +### Audio and Speech + +| Directory | Paper(s) | Conference | Maintainer(s) | +|-----------|----------|------------|---------------| +| [audioset](audioset) | [1] [Audio Set: An ontology and human-labeled dataset for audio events](https://research.google/pubs/pub45857/)
[2] [CNN Architectures for Large-Scale Audio Classification](https://research.google/pubs/pub45611/) | ICASSP 2017 | plakal, dpwe | + +### Reinforcement Learning + +| Directory | Paper(s) | Conference | Maintainer(s) | +|-----------|----------|------------|---------------| +| [efficient-hrl](efficient-hrl) | [1] [Data-Efficient Hierarchical Reinforcement Learning](https://arxiv.org/abs/1805.08296)
[2] [Near-Optimal Representation Learning for Hierarchical Reinforcement Learning](https://arxiv.org/abs/1810.01257) | [1] NIPS 2018
[2] ICLR 2019 | ofirnachum | +| [pcl_rl](pcl_rl) | [1] [Improving Policy Gradient by Exploring Under-appreciated Rewards](https://arxiv.org/abs/1611.09321)
[2] [Bridging the Gap Between Value and Policy Based Reinforcement Learning](https://arxiv.org/abs/1702.08892)
[3] [Trust-PCL: An Off-Policy Trust Region Method for Continuous Control](https://arxiv.org/abs/1707.01891) | [1] ICLR 2017
[2] NIPS 2017
[3] ICLR 2018 | ofirnachum | + +### Others + +| Directory | Paper(s) | Conference | Maintainer(s) | +|-----------|----------|------------|---------------| +| [lfads](lfads) | [LFADS - Latent Factor Analysis via Dynamical Systems](https://arxiv.org/abs/1608.06315) | | jazcollins, sussillo | +| [rebar](rebar) | [REBAR: Low-variance, unbiased gradient estimates for discrete latent variable models](https://arxiv.org/abs/1703.07370) | NIPS 2017 | gjtucker | + +--- + +## :warning: Archived Models and Implementations + +The following research models are no longer maintained. + +**Note**: We will remove archived models from the master branch in June, 2020. +After removal, you will still be able to access archived models in the archive branch. + +| Directory | Paper(s) | Conference | Maintainer(s) | +|-----------|----------|------------|---------------| +| [adv_imagenet_models](adv_imagenet_models) | [1] [Adversarial Machine Learning at Scale](https://arxiv.org/abs/1611.01236)
[2] [Ensemble Adversarial Training: Attacks and Defenses](https://arxiv.org/abs/1705.07204) | [1] ICLR 2017
[2] ICLR 2018 | alexeykurakin | +| [adversarial_crypto](adversarial_crypto) | [Learning to Protect Communications with Adversarial Neural Cryptography](https://arxiv.org/abs/1610.06918) | | dave-andersen | +| [adversarial_logit_pairing](adversarial_logit_pairing) | [Adversarial Logit Pairing](https://arxiv.org/abs/1803.06373) | | alexeykurakin | +| [autoencoder](autoencoder) | Various autoencoders | | snurkabill | +| [brain_coder](brain_coder) | [Neural Program Synthesis with Priority Queue Training](https://arxiv.org/abs/1801.03526) | | danabo, mnorouzi | +| [cognitive_mapping_and_planning](cognitive_mapping_and_planning) | [Cognitive Mapping and Planning for Visual Navigation](https://arxiv.org/abs/1702.03920) | CVPR 2017 | s-gupta | +| [compression](compression) | [Full Resolution Image Compression with Recurrent Neural Networks](https://arxiv.org/abs/1608.05148) | CVPR 2017 | nmjohn | +| [deep_contextual_bandits](deep_contextual_bandits) | [Deep Bayesian Bandits Showdown: An Empirical Comparison of Bayesian Deep Networks for Thompson Sampling](https://arxiv.org/abs/1802.09127) | ICLR 2018 | rikel | +| [deep_speech](deep_speech) | [Deep Speech 2](https://arxiv.org/abs/1512.02595) | ICLR 2016 | yhliang2018 | +| [domain_adaptation](domain_adaptation) | [1] [Domain Separation Networks](https://arxiv.org/abs/1608.06019)
[2] [Unsupervised Pixel-Level Domain Adaptation with Generative Adversarial Networks](https://arxiv.org/abs/1612.05424) | NIPS 2016 | bousmalis, dmrd | +| [feelvos](feelvos)| [FEELVOS](https://arxiv.org/abs/1902.09513) | CVPR 2019 | pvoigtlaender, yuningchai, aquariusjay | +| [fivo](fivo)| [Filtering variational objectives for training generative sequence models](https://arxiv.org/abs/1705.09279) | NIPS 2017 | dieterichlawson | +| [global_objectives](global_objectives) | [Scalable Learning of Non-Decomposable Objectives](https://arxiv.org/abs/1608.04802) | AISTATS 2017 | mackeya-google | +| [im2txt](im2txt) | [Show and Tell: Lessons learned from the 2015 MSCOCO Image Captioning Challenge](https://arxiv.org/abs/1609.06647) | TPAMI 2016 | cshallue | +| [inception](inception) | [Rethinking the Inception Architecture for Computer Vision](https://arxiv.org/abs/1512.00567) | CVPR 2016 | shlens, vincentvanhoucke | +| [keypointnet](keypointnet) | [KeypointNet](https://arxiv.org/abs/1807.03146) | | mnorouzi | +| [learned_optimizer](learned_optimizer) | [Learned Optimizers that Scale and Generalize](https://arxiv.org/abs/1703.04813) | ICML 2017 | olganw, nirum | +| [learning_to_remember_rare_events](learning_to_remember_rare_events) | [Learning to Remember Rare Events](https://arxiv.org/abs/1703.03129) | ICLR 2017| lukaszkaiser, ofirnachum | +| [learning_unsupervised_learning](learning_unsupervised_learning) | [Meta-Learning Update Rules for Unsupervised Representation Learning](https://arxiv.org/abs/1804.00222) | ICLR 2019 | lukemetz, nirum | +| [lexnet_nc](lexnet_nc) | [Olive Oil is Made of Olives, Baby Oil is Made for Babies: Interpreting Noun Compounds using Paraphrases in a Neural Model](https://arxiv.org/abs/1803.08073) | NAACL 2018 | vered1986, waterson | +| [lm_1b](lm_1b) | [Exploring the Limits of Language Modeling](https://arxiv.org/abs/1602.02410) | | oriolvinyals, panyx0718 | +| [lm_commonsense](lm_commonsense) | [A Simple Method for Commonsense Reasoning](https://arxiv.org/abs/1806.02847) | | thtrieu | +| [maskgan](maskgan)| [MaskGAN: Better Text Generation via Filling in the](https://arxiv.org/abs/1801.07736) | ICLR 2018 | liamb315, a-dai | +| [namignizer](namignizer)| Namignizer | | knathanieltucker | +| [neural_gpu](neural_gpu)| [Neural GPUs Learn Algorithms](https://arxiv.org/abs/1511.08228) | | lukaszkaiser | +| [neural_programmer](neural_programmer) | [Learning a Natural Language Interface with Neural Programmer](https://arxiv.org/abs/1611.08945) | ICLR 2017 | arvind2505 | +| [next_frame_prediction](next_frame_prediction) | [Visual Dynamics: Probabilistic Future Frame Synthesis via Cross Convolutional Networks](https://arxiv.org/abs/1607.02586) | NIPS 2016 | panyx0718 | +| [ptn](ptn) | [Perspective Transformer Nets: Learning Single-View 3D Object Reconstruction without 3D Supervision](https://arxiv.org/abs/1612.00814) | NIPS 2016 | xcyan, arkanath, hellojas, honglaklee | +| [qa_kg](qa_kg) | [Learning to Reason: End-to-End Module Networks for Visual Question Answering](https://arxiv.org/abs/1704.05526) | ICCV 2017 | yuyuz | +| [real_nvp](real_nvp) | [Density estimation using Real NVP](https://arxiv.org/abs/1605.08803) | ICLR 2017 | laurent-dinh | +| [sentiment_analysis](sentiment_analysis)| [Effective Use of Word Order for Text Categorization with Convolutional Neural Networks](https://arxiv.org/abs/1412.1058) | NAACL HLT 2015 | sculd | +| [seq2species](seq2species) | [Seq2Species: A deep learning approach to pattern recognition for short DNA sequences](https://doi.org/10.1101/353474) | | apbusia, depristo | +| [skip_thoughts](skip_thoughts) | [Skip-Thought Vectors](https://arxiv.org/abs/1506.06726) | | cshallue | +| [steve](steve) | [Sample-Efficient Reinforcement Learning with Stochastic Ensemble Value Expansion](https://arxiv.org/abs/1807.01675) | NeurIPS 2018 | buckman-google | +| [street](street) | [End-to-End Interpretation of the French Street Name Signs Dataset](https://arxiv.org/abs/1702.03970) | ECCV 2016 | theraysmith | +| [struct2depth](struct2depth)| [Depth Prediction Without the Sensors: Leveraging Structure for Unsupervised Learning from Monocular Videos](https://arxiv.org/abs/1811.06152) | AAAI 2019 | aneliaangelova | +| [swivel](swivel) | [Swivel: Improving Embeddings by Noticing What's Missing](https://arxiv.org/abs/1602.02215) | | waterson | +| [tcn](tcn) | [Time-Contrastive Networks: Self-Supervised Learning from Video](https://arxiv.org/abs/1704.06888) | ICRA 2018 | coreylynch, sermanet | +| [textsum](textsum)| [A Neural Attention Model for Abstractive Sentence Summarization](https://arxiv.org/abs/1509.00685) | EMNLP 2015 | panyx0718, peterjliu | +| [transformer](transformer) | [Spatial Transformer Network](https://arxiv.org/abs/1506.02025) | NIPS 2015 | daviddao| +| [video_prediction](video_prediction) | [Unsupervised Learning for Physical Interaction through Video Prediction](https://arxiv.org/abs/1605.07157) | NIPS 2016 | cbfinn | + +--- + +## Contributions + +If you want to contribute, please review the [contribution guidelines](https://github.com/tensorflow/models/wiki/How-to-contribute). diff --git a/models/research/a3c_blogpost/README.md b/models/research/a3c_blogpost/README.md new file mode 100644 index 0000000000000000000000000000000000000000..55e390e703db361fbc4b1d89bb3baff9abb30dac --- /dev/null +++ b/models/research/a3c_blogpost/README.md @@ -0,0 +1,6 @@ +# A3C Blog Post +In order to run this code, you will need the following prerequisites: + +* [OpenAI Gym](https://github.com/openai/gym) - `pip install gym` +* [pyglet](https://bitbucket.org/pyglet/pyglet/wiki/Home) - `pip install pyglet` +* [TensorFlow](https://www.tensorflow.org/install/) - `pip install tensorflow==2.2.0` diff --git a/models/research/a3c_blogpost/a3c_cartpole.py b/models/research/a3c_blogpost/a3c_cartpole.py new file mode 100644 index 0000000000000000000000000000000000000000..62fdcf84929d76b9ccae564db77320d15e774002 --- /dev/null +++ b/models/research/a3c_blogpost/a3c_cartpole.py @@ -0,0 +1,366 @@ +import os +os.environ["CUDA_VISIBLE_DEVICES"] = "" + +import threading +import gym +import multiprocessing +import numpy as np +from queue import Queue +import argparse +import matplotlib.pyplot as plt + + +import tensorflow as tf +from tensorflow.python import keras +from tensorflow.python.keras import layers + +parser = argparse.ArgumentParser(description='Run A3C algorithm on the game ' + 'Cartpole.') +parser.add_argument('--algorithm', default='a3c', type=str, + help='Choose between \'a3c\' and \'random\'.') +parser.add_argument('--train', dest='train', action='store_true', + help='Train our model.') +parser.add_argument('--lr', default=0.001, + help='Learning rate for the shared optimizer.') +parser.add_argument('--update-freq', default=20, type=int, + help='How often to update the global model.') +parser.add_argument('--max-eps', default=1000, type=int, + help='Global maximum number of episodes to run.') +parser.add_argument('--gamma', default=0.99, + help='Discount factor of rewards.') +parser.add_argument('--save-dir', default='/tmp/', type=str, + help='Directory in which you desire to save the model.') +args = parser.parse_args() + +class ActorCriticModel(keras.Model): + def __init__(self, state_size, action_size): + super(ActorCriticModel, self).__init__() + self.state_size = state_size + self.action_size = action_size + self.dense1 = layers.Dense(100, activation='relu') + self.policy_logits = layers.Dense(action_size) + self.dense2 = layers.Dense(100, activation='relu') + self.values = layers.Dense(1) + + def call(self, inputs): + # Forward pass + x = self.dense1(inputs) + logits = self.policy_logits(x) + v1 = self.dense2(inputs) + values = self.values(v1) + return logits, values + +def record(episode, + episode_reward, + worker_idx, + global_ep_reward, + result_queue, + total_loss, + num_steps): + """Helper function to store score and print statistics. + + Arguments: + episode: Current episode + episode_reward: Reward accumulated over the current episode + worker_idx: Which thread (worker) + global_ep_reward: The moving average of the global reward + result_queue: Queue storing the moving average of the scores + total_loss: The total loss accumualted over the current episode + num_steps: The number of steps the episode took to complete + """ + if global_ep_reward == 0: + global_ep_reward = episode_reward + else: + global_ep_reward = global_ep_reward * 0.99 + episode_reward * 0.01 + print( + f"Episode: {episode} | " + f"Moving Average Reward: {int(global_ep_reward)} | " + f"Episode Reward: {int(episode_reward)} | " + f"Loss: {int(total_loss / float(num_steps) * 1000) / 1000} | " + f"Steps: {num_steps} | " + f"Worker: {worker_idx}" + ) + result_queue.put(global_ep_reward) + return global_ep_reward + + +class RandomAgent: + """Random Agent that will play the specified game + + Arguments: + env_name: Name of the environment to be played + max_eps: Maximum number of episodes to run agent for. + """ + def __init__(self, env_name, max_eps): + self.env = gym.make(env_name) + self.max_episodes = max_eps + self.global_moving_average_reward = 0 + self.res_queue = Queue() + + def run(self): + reward_avg = 0 + for episode in range(self.max_episodes): + done = False + self.env.reset() + reward_sum = 0.0 + steps = 0 + while not done: + # Sample randomly from the action space and step + _, reward, done, _ = self.env.step(self.env.action_space.sample()) + steps += 1 + reward_sum += reward + # Record statistics + self.global_moving_average_reward = record(episode, + reward_sum, + 0, + self.global_moving_average_reward, + self.res_queue, 0, steps) + + reward_avg += reward_sum + final_avg = reward_avg / float(self.max_episodes) + print("Average score across {} episodes: {}".format(self.max_episodes, final_avg)) + return final_avg + + +class MasterAgent(): + def __init__(self): + self.game_name = 'CartPole-v0' + save_dir = args.save_dir + self.save_dir = save_dir + if not os.path.exists(save_dir): + os.makedirs(save_dir) + + env = gym.make(self.game_name) + self.state_size = env.observation_space.shape[0] + self.action_size = env.action_space.n + self.opt = tf.compat.v1.train.AdamOptimizer(args.lr, use_locking=True) + print(self.state_size, self.action_size) + + self.global_model = ActorCriticModel(self.state_size, self.action_size) # global network + self.global_model(tf.convert_to_tensor(np.random.random((1, self.state_size)), dtype=tf.float32)) + + def train(self): + if args.algorithm == 'random': + random_agent = RandomAgent(self.game_name, args.max_eps) + random_agent.run() + return + + res_queue = Queue() + + workers = [Worker(self.state_size, + self.action_size, + self.global_model, + self.opt, res_queue, + i, game_name=self.game_name, + save_dir=self.save_dir) for i in range(multiprocessing.cpu_count())] + + for i, worker in enumerate(workers): + print("Starting worker {}".format(i)) + worker.start() + + moving_average_rewards = [] # record episode reward to plot + while True: + reward = res_queue.get() + if reward is not None: + moving_average_rewards.append(reward) + else: + break + [w.join() for w in workers] + + plt.plot(moving_average_rewards) + plt.ylabel('Moving average ep reward') + plt.xlabel('Step') + plt.savefig(os.path.join(self.save_dir, + '{} Moving Average.png'.format(self.game_name))) + plt.show() + + def play(self): + env = gym.make(self.game_name).unwrapped + state = env.reset() + model = self.global_model + model_path = os.path.join(self.save_dir, 'model_{}.h5'.format(self.game_name)) + print('Loading model from: {}'.format(model_path)) + model.load_weights(model_path) + done = False + step_counter = 0 + reward_sum = 0 + + try: + while not done: + env.render(mode='rgb_array') + policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32)) + policy = tf.nn.softmax(policy) + action = np.argmax(policy) + state, reward, done, _ = env.step(action) + reward_sum += reward + print("{}. Reward: {}, action: {}".format(step_counter, reward_sum, action)) + step_counter += 1 + except KeyboardInterrupt: + print("Received Keyboard Interrupt. Shutting down.") + finally: + env.close() + + +class Memory: + def __init__(self): + self.states = [] + self.actions = [] + self.rewards = [] + + def store(self, state, action, reward): + self.states.append(state) + self.actions.append(action) + self.rewards.append(reward) + + def clear(self): + self.states = [] + self.actions = [] + self.rewards = [] + + +class Worker(threading.Thread): + # Set up global variables across different threads + global_episode = 0 + # Moving average reward + global_moving_average_reward = 0 + best_score = 0 + save_lock = threading.Lock() + + def __init__(self, + state_size, + action_size, + global_model, + opt, + result_queue, + idx, + game_name='CartPole-v0', + save_dir='/tmp'): + super(Worker, self).__init__() + self.state_size = state_size + self.action_size = action_size + self.result_queue = result_queue + self.global_model = global_model + self.opt = opt + self.local_model = ActorCriticModel(self.state_size, self.action_size) + self.worker_idx = idx + self.game_name = game_name + self.env = gym.make(self.game_name).unwrapped + self.save_dir = save_dir + self.ep_loss = 0.0 + + def run(self): + total_step = 1 + mem = Memory() + while Worker.global_episode < args.max_eps: + current_state = self.env.reset() + mem.clear() + ep_reward = 0. + ep_steps = 0 + self.ep_loss = 0 + + time_count = 0 + done = False + while not done: + logits, _ = self.local_model( + tf.convert_to_tensor(current_state[None, :], + dtype=tf.float32)) + probs = tf.nn.softmax(logits) + + action = np.random.choice(self.action_size, p=probs.numpy()[0]) + new_state, reward, done, _ = self.env.step(action) + if done: + reward = -1 + ep_reward += reward + mem.store(current_state, action, reward) + + if time_count == args.update_freq or done: + # Calculate gradient wrt to local model. We do so by tracking the + # variables involved in computing the loss by using tf.GradientTape + with tf.GradientTape() as tape: + total_loss = self.compute_loss(done, + new_state, + mem, + args.gamma) + self.ep_loss += total_loss + # Calculate local gradients + grads = tape.gradient(total_loss, self.local_model.trainable_weights) + # Push local gradients to global model + self.opt.apply_gradients(zip(grads, + self.global_model.trainable_weights)) + # Update local model with new weights + self.local_model.set_weights(self.global_model.get_weights()) + + mem.clear() + time_count = 0 + + if done: # done and print information + Worker.global_moving_average_reward = \ + record(Worker.global_episode, ep_reward, self.worker_idx, + Worker.global_moving_average_reward, self.result_queue, + self.ep_loss, ep_steps) + # We must use a lock to save our model and to print to prevent data races. + if ep_reward > Worker.best_score: + with Worker.save_lock: + print("Saving best model to {}, " + "episode score: {}".format(self.save_dir, ep_reward)) + self.global_model.save_weights( + os.path.join(self.save_dir, + 'model_{}.h5'.format(self.game_name)) + ) + Worker.best_score = ep_reward + Worker.global_episode += 1 + ep_steps += 1 + + time_count += 1 + current_state = new_state + total_step += 1 + self.result_queue.put(None) + + def compute_loss(self, + done, + new_state, + memory, + gamma=0.99): + if done: + reward_sum = 0. # terminal + else: + reward_sum = self.local_model( + tf.convert_to_tensor(new_state[None, :], + dtype=tf.float32))[-1].numpy()[0] + + # Get discounted rewards + discounted_rewards = [] + for reward in memory.rewards[::-1]: # reverse buffer r + reward_sum = reward + gamma * reward_sum + discounted_rewards.append(reward_sum) + discounted_rewards.reverse() + + logits, values = self.local_model( + tf.convert_to_tensor(np.vstack(memory.states), + dtype=tf.float32)) + # Get our advantages + advantage = tf.convert_to_tensor(np.array(discounted_rewards)[:, None], + dtype=tf.float32) - values + # Value loss + value_loss = advantage ** 2 + + # Calculate our policy loss + policy = tf.nn.softmax(logits) + entropy = tf.nn.softmax_cross_entropy_with_logits(labels=policy, logits=logits) + + policy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=memory.actions, + logits=logits) + policy_loss *= tf.stop_gradient(advantage) + policy_loss -= 0.01 * entropy + total_loss = tf.reduce_mean((0.5 * value_loss + policy_loss)) + return total_loss + + +if __name__ == '__main__': + print(args) + master = MasterAgent() + if args.train: + master.train() + else: + master.play() + diff --git a/models/research/adv_imagenet_models/README.md b/models/research/adv_imagenet_models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6129f7347effe09ef0272de9ac42d4872726fcd1 --- /dev/null +++ b/models/research/adv_imagenet_models/README.md @@ -0,0 +1,91 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Adversarially trained ImageNet models + +Pre-trained ImageNet models from the following papers: + +* [Adversarial Machine Learning at Scale](https://arxiv.org/abs/1611.01236) +* [Ensemble Adversarial Training: Attacks and Defenses](https://arxiv.org/abs/1705.07204) + +## Contact + +Author: Alexey Kurakin, +github: [AlexeyKurakin](https://github.com/AlexeyKurakin) + +## Pre-requesites and installation + +Ensure that you have installed TensorFlow 1.1 or greater +([instructions](https://www.tensorflow.org/install/)). + +You also need copy of ImageNet dataset if you want to run provided example. +Follow +[Preparing the dataset](https://github.com/tensorflow/models/tree/master/research/slim#Data) +instructions in TF-Slim library to get and preprocess ImageNet data. + +## Available models + +Following pre-trained models are available: + +Network Architecture | Adversarial training | Checkpoint +---------------------|----------------------|---------------- +Inception v3 | Step L.L. | [adv_inception_v3_2017_08_18.tar.gz](http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz) +Inception v3 | Step L.L. on ensemble of 3 models | [ens3_adv_inception_v3_2017_08_18.tar.gz](http://download.tensorflow.org/models/ens3_adv_inception_v3_2017_08_18.tar.gz) +Inception v3 | Step L.L. on ensemble of 4 models| [ens4_adv_inception_v3_2017_08_18.tar.gz](http://download.tensorflow.org/models/ens4_adv_inception_v3_2017_08_18.tar.gz) +Inception ResNet v2 | Step L.L. | [adv_inception_resnet_v2_2017_12_18.tar.gz](http://download.tensorflow.org/models/adv_inception_resnet_v2_2017_12_18.tar.gz) +Inception ResNet v2 | Step L.L. on ensemble of 3 models | [ens_adv_inception_resnet_v2_2017_08_18.tar.gz](http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz) + +All checkpoints are compatible with +[TF-Slim](https://github.com/tensorflow/models/tree/master/research/slim) +implementation of Inception v3 and Inception Resnet v2. + +## How to evaluate models on ImageNet test data + +Python script `eval_on_adversarial.py` allow you to evaluate provided models +on white-box adversarial examples generated from ImageNet test set. + +Usage is following: + +```bash +# ${MODEL_NAME} - type of network architecture, +# either "inception_v3" or "inception_resnet_v2" +# ${CHECKPOINT_PATH} - path to model checkpoint +# ${DATASET_DIR} - directory with ImageNet test set +# ${ADV_METHOD} - which method to use to generate adversarial images, +# supported method: +# "none" - use clean images from the dataset +# "stepll" - one step towards least likely class method (StepLL), +# see https://arxiv.org/abs/1611.01236 for details +# "stepllnoise" - RAND+StepLL method from https://arxiv.org/abs/1705.07204 +# ${ADV_EPS} - size of adversarial perturbation, ignored when method is none +python eval_on_adversarial.py \ + --model_name=${MODEL_NAME} \ + --checkpoint_path=${CHECKPOINT_PATH} \ + --dataset_dir=${DATASET_DIR} \ + --batch_size=50 \ + --adversarial_method=${ADV_METHOD} \ + --adversarial_eps=${ADV_EPS} +``` + +Below is an example how to evaluate one of the models on RAND+StepLL adversarial +examples: + +```bash +# Download checkpoint +CHECKPOINT_DIR=/tmp/checkpoints +mkdir ${CHECKPOINT_DIR} +wget http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz +tar -xvf ens_adv_inception_resnet_v2_2017_08_18.tar.gz +mv ens_adv_inception_resnet_v2.ckpt* ${CHECKPOINT_DIR} +rm ens_adv_inception_resnet_v2_2017_08_18.tar.gz + +# Run evaluation +python eval_on_adversarial.py \ + --model_name=inception_v3 \ + --checkpoint_path=${CHECKPOINT_DIR}/ens_adv_inception_resnet_v2.ckpt \ + --dataset_dir=${DATASET_DIR} \ + --batch_size=50 \ + --adversarial_method=stepllnoise \ + --adversarial_eps=16 +``` diff --git a/models/research/adv_imagenet_models/eval_on_adversarial.py b/models/research/adv_imagenet_models/eval_on_adversarial.py new file mode 100644 index 0000000000000000000000000000000000000000..f9188845c6c4e10484f9b24797d9ece3b730ffb0 --- /dev/null +++ b/models/research/adv_imagenet_models/eval_on_adversarial.py @@ -0,0 +1,331 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Script which evaluates model on adversarial examples.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import imagenet +import inception_resnet_v2 + +import tensorflow as tf +from tensorflow.contrib.slim.nets import inception + + +slim = tf.contrib.slim + +tf.app.flags.DEFINE_integer( + 'batch_size', 50, 'The number of samples in each batch.') + +tf.app.flags.DEFINE_integer( + 'max_num_batches', None, + 'Max number of batches to evaluate by default use all.') + +tf.app.flags.DEFINE_string( + 'master', '', 'The address of the TensorFlow master to use.') + +tf.app.flags.DEFINE_string( + 'checkpoint_path', '/tmp/tfmodel/', + 'The directory where the model was written to or an absolute path to a ' + 'checkpoint file.') + +tf.app.flags.DEFINE_integer( + 'num_preprocessing_threads', 4, + 'The number of threads used to create the batches.') + +tf.app.flags.DEFINE_string( + 'split_name', 'validation', 'The name of the train/test split.') + +tf.app.flags.DEFINE_string( + 'dataset_dir', None, 'The directory where the dataset files are stored.') + +tf.app.flags.DEFINE_string( + 'model_name', 'inception_v3', + 'Name of the model to use, either "inception_v3" or "inception_resnet_v2"') + +tf.app.flags.DEFINE_float( + 'moving_average_decay', None, + 'The decay to use for the moving average.' + 'If left as None, then moving averages are not used.') + +tf.app.flags.DEFINE_string( + 'adversarial_method', 'none', + 'What kind of adversarial examples to use for evaluation. ' + 'Could be one of: "none", "stepll", "stepllnoise".') + +tf.app.flags.DEFINE_float( + 'adversarial_eps', 0.0, + 'Size of adversarial perturbation in range [0, 255].') + + +FLAGS = tf.app.flags.FLAGS + + +IMAGE_SIZE = 299 +NUM_CLASSES = 1001 + + +def preprocess_for_eval(image, height, width, + central_fraction=0.875, scope=None): + """Prepare one image for evaluation. + + If height and width are specified it would output an image with that size by + applying resize_bilinear. + If central_fraction is specified it would crop the central fraction of the + input image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details) + height: integer + width: integer + central_fraction: Optional Float, fraction of the image to crop. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.name_scope(scope, 'eval_image', [image, height, width]): + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +def create_model(x, reuse=None): + """Create model graph. + + Args: + x: input images + reuse: reuse parameter which will be passed to underlying variable scopes. + Should be None first call and True every subsequent call. + + Returns: + (logits, end_points) - tuple of model logits and enpoints + + Raises: + ValueError: if model type specified by --model_name flag is invalid. + """ + if FLAGS.model_name == 'inception_v3': + with slim.arg_scope(inception.inception_v3_arg_scope()): + return inception.inception_v3( + x, num_classes=NUM_CLASSES, is_training=False, reuse=reuse) + elif FLAGS.model_name == 'inception_resnet_v2': + with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()): + return inception_resnet_v2.inception_resnet_v2( + x, num_classes=NUM_CLASSES, is_training=False, reuse=reuse) + else: + raise ValueError('Invalid model name: %s' % (FLAGS.model_name)) + + +def step_target_class_adversarial_images(x, eps, one_hot_target_class): + """Base code for one step towards target class methods. + + Args: + x: source images + eps: size of adversarial perturbation + one_hot_target_class: one hot encoded target classes for all images + + Returns: + tensor with adversarial images + """ + logits, end_points = create_model(x, reuse=True) + cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class, + logits, + label_smoothing=0.1, + weights=1.0) + cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class, + end_points['AuxLogits'], + label_smoothing=0.1, + weights=0.4) + x_adv = x - eps * tf.sign(tf.gradients(cross_entropy, x)[0]) + x_adv = tf.clip_by_value(x_adv, -1.0, 1.0) + return tf.stop_gradient(x_adv) + + +def stepll_adversarial_images(x, eps): + """One step towards least likely class (Step L.L.) adversarial examples. + + This method is an alternative to FGSM which does not use true classes. + Method is described in the "Adversarial Machine Learning at Scale" paper, + https://arxiv.org/abs/1611.01236 + + Args: + x: source images + eps: size of adversarial perturbation + + Returns: + adversarial images + """ + logits, _ = create_model(x, reuse=True) + least_likely_class = tf.argmin(logits, 1) + one_hot_ll_class = tf.one_hot(least_likely_class, NUM_CLASSES) + return step_target_class_adversarial_images(x, eps, one_hot_ll_class) + + +def stepllnoise_adversarial_images(x, eps): + """Step L.L. with noise method. + + This is an imporvement of Step L.L. method. This method is better against + adversarially trained models which learn to mask gradient. + Method is described in the section "New randomized one shot attack" of + "Ensemble Adversarial Training: Attacks and Defenses" paper, + https://arxiv.org/abs/1705.07204 + + Args: + x: source images + eps: size of adversarial perturbation + + Returns: + adversarial images + """ + logits, _ = create_model(x, reuse=True) + least_likely_class = tf.argmin(logits, 1) + one_hot_ll_class = tf.one_hot(least_likely_class, NUM_CLASSES) + x_noise = x + eps / 2 * tf.sign(tf.random_normal(x.shape)) + return step_target_class_adversarial_images(x_noise, eps / 2, + one_hot_ll_class) + + +def get_input_images(dataset_images): + """Gets input images for the evaluation. + + Args: + dataset_images: tensor with dataset images + + Returns: + tensor with input images, which is either dataset images or adversarial + images. + + Raises: + ValueError: if adversarial method specified by --adversarial_method flag + is invalid. + """ + # adversarial_eps defines max difference of values of pixels if + # pixels are in range [0, 255]. However values of dataset pixels are + # in range [-1, 1], so converting epsilon. + eps = FLAGS.adversarial_eps / 255 * 2.0 + + if FLAGS.adversarial_method == 'stepll': + return stepll_adversarial_images(dataset_images, eps) + elif FLAGS.adversarial_method == 'stepllnoise': + return stepllnoise_adversarial_images(dataset_images, eps) + elif FLAGS.adversarial_method == 'none': + return dataset_images + else: + raise ValueError('Invalid adversarial method: %s' + % (FLAGS.adversarial_method)) + + +def main(_): + if not FLAGS.dataset_dir: + raise ValueError('You must supply the dataset directory with --dataset_dir') + + tf.logging.set_verbosity(tf.logging.INFO) + with tf.Graph().as_default(): + tf_global_step = tf.train.get_or_create_global_step() + + ################### + # Prepare dataset # + ################### + dataset = imagenet.get_split(FLAGS.split_name, FLAGS.dataset_dir) + provider = slim.dataset_data_provider.DatasetDataProvider( + dataset, + shuffle=False, + common_queue_capacity=2 * FLAGS.batch_size, + common_queue_min=FLAGS.batch_size) + [dataset_image, label] = provider.get(['image', 'label']) + dataset_image = preprocess_for_eval(dataset_image, IMAGE_SIZE, IMAGE_SIZE) + dataset_images, labels = tf.train.batch( + [dataset_image, label], + batch_size=FLAGS.batch_size, + num_threads=FLAGS.num_preprocessing_threads, + capacity=5 * FLAGS.batch_size) + + ######################################## + # Define the model and input exampeles # + ######################################## + create_model(tf.placeholder(tf.float32, shape=dataset_images.shape)) + input_images = get_input_images(dataset_images) + logits, _ = create_model(input_images, reuse=True) + + if FLAGS.moving_average_decay > 0: + variable_averages = tf.train.ExponentialMovingAverage( + FLAGS.moving_average_decay, tf_global_step) + variables_to_restore = variable_averages.variables_to_restore( + slim.get_model_variables()) + variables_to_restore[tf_global_step.op.name] = tf_global_step + else: + variables_to_restore = slim.get_variables_to_restore() + + ###################### + # Define the metrics # + ###################### + predictions = tf.argmax(logits, 1) + labels = tf.squeeze(labels) + names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ + 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), + 'Recall_5': slim.metrics.streaming_sparse_recall_at_k( + logits, tf.reshape(labels, [-1, 1]), 5), + }) + + ###################### + # Run evaluation # + ###################### + if FLAGS.max_num_batches: + num_batches = FLAGS.max_num_batches + else: + # This ensures that we make a single pass over all of the data. + num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) + + if tf.gfile.IsDirectory(FLAGS.checkpoint_path): + checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path) + else: + checkpoint_path = FLAGS.checkpoint_path + + tf.logging.info('Evaluating %s' % checkpoint_path) + + top1_accuracy, top5_accuracy = slim.evaluation.evaluate_once( + master=FLAGS.master, + checkpoint_path=checkpoint_path, + logdir=None, + summary_op=None, + num_evals=num_batches, + eval_op=list(names_to_updates.values()), + final_op=[names_to_values['Accuracy'], names_to_values['Recall_5']], + variables_to_restore=variables_to_restore) + + print('Top1 Accuracy: ', top1_accuracy) + print('Top5 Accuracy: ', top5_accuracy) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/adv_imagenet_models/imagenet.py b/models/research/adv_imagenet_models/imagenet.py new file mode 100644 index 0000000000000000000000000000000000000000..26c4c7a388a234f647e446951a0765d1c53184cb --- /dev/null +++ b/models/research/adv_imagenet_models/imagenet.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides data for the ImageNet ILSVRC 2012 Dataset plus some bounding boxes. + +Some images have one or more bounding boxes associated with the label of the +image. See details here: http://image-net.org/download-bboxes + +WARNING: Don't use for object detection, in this case all the bounding boxes +of the image belong to just one class. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tensorflow as tf + +slim = tf.contrib.slim + +_FILE_PATTERN = '%s-*' + +_SPLITS_TO_SIZES = { + 'train': 1281167, + 'validation': 50000, +} + +_ITEMS_TO_DESCRIPTIONS = { + 'image': 'A color image of varying height and width.', + 'label': 'The label id of the image, integer between 0 and 999', + 'label_text': 'The text of the label.', + 'object/bbox': 'A list of bounding boxes.', + 'object/label': 'A list of labels, one per each object.', +} + +_NUM_CLASSES = 1001 + + +def get_split(split_name, dataset_dir, file_pattern=None, reader=None): + """Gets a dataset tuple with instructions for reading ImageNet. + + Args: + split_name: A train/test split name. + dataset_dir: The base directory of the dataset sources. + file_pattern: The file pattern to use when matching the dataset sources. + It is assumed that the pattern contains a '%s' string so that the split + name can be inserted. + reader: The TensorFlow reader type. + + Returns: + A `Dataset` namedtuple. + + Raises: + ValueError: if `split_name` is not a valid train/test split. + """ + if split_name not in _SPLITS_TO_SIZES: + raise ValueError('split name %s was not recognized.' % split_name) + + if not file_pattern: + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(dataset_dir, file_pattern % split_name) + + # Allowing None in the signature so that dataset_factory can use the default. + if reader is None: + reader = tf.TFRecordReader + + keys_to_features = { + 'image/encoded': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/format': tf.FixedLenFeature( + (), tf.string, default_value='jpeg'), + 'image/class/label': tf.FixedLenFeature( + [], dtype=tf.int64, default_value=-1), + 'image/class/text': tf.FixedLenFeature( + [], dtype=tf.string, default_value=''), + 'image/object/bbox/xmin': tf.VarLenFeature( + dtype=tf.float32), + 'image/object/bbox/ymin': tf.VarLenFeature( + dtype=tf.float32), + 'image/object/bbox/xmax': tf.VarLenFeature( + dtype=tf.float32), + 'image/object/bbox/ymax': tf.VarLenFeature( + dtype=tf.float32), + 'image/object/class/label': tf.VarLenFeature( + dtype=tf.int64), + } + + items_to_handlers = { + 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), + 'label': slim.tfexample_decoder.Tensor('image/class/label'), + 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), + 'object/bbox': slim.tfexample_decoder.BoundingBox( + ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), + 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'), + } + + decoder = slim.tfexample_decoder.TFExampleDecoder( + keys_to_features, items_to_handlers) + + return slim.dataset.Dataset( + data_sources=file_pattern, + reader=reader, + decoder=decoder, + num_samples=_SPLITS_TO_SIZES[split_name], + items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, + num_classes=_NUM_CLASSES) diff --git a/models/research/adv_imagenet_models/inception_resnet_v2.py b/models/research/adv_imagenet_models/inception_resnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..2f690e8d2f70ecde9a55f40375a7f74cd25651c7 --- /dev/null +++ b/models/research/adv_imagenet_models/inception_resnet_v2.py @@ -0,0 +1,358 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains the definition of the Inception Resnet V2 architecture. + +As described in http://arxiv.org/abs/1602.07261. + + Inception-v4, Inception-ResNet and the Impact of Residual Connections + on Learning + Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +slim = tf.contrib.slim + + +def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): + """Builds the 35x35 resnet block.""" + with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') + with tf.variable_scope('Branch_2'): + tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') + tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') + tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') + mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2]) + up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, + activation_fn=None, scope='Conv2d_1x1') + net += scale * up + if activation_fn: + net = activation_fn(net) + return net + + +def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): + """Builds the 17x17 resnet block.""" + with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], + scope='Conv2d_0b_1x7') + tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], + scope='Conv2d_0c_7x1') + mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) + up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, + activation_fn=None, scope='Conv2d_1x1') + net += scale * up + if activation_fn: + net = activation_fn(net) + return net + + +def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): + """Builds the 8x8 resnet block.""" + with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], + scope='Conv2d_0b_1x3') + tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], + scope='Conv2d_0c_3x1') + mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) + up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, + activation_fn=None, scope='Conv2d_1x1') + net += scale * up + if activation_fn: + net = activation_fn(net) + return net + + +def inception_resnet_v2_base(inputs, + final_endpoint='Conv2d_7b_1x1', + output_stride=16, + align_feature_maps=False, + scope=None): + """Inception model from http://arxiv.org/abs/1602.07261. + + Constructs an Inception Resnet v2 network from inputs to the given final + endpoint. This method can construct the network up to the final inception + block Conv2d_7b_1x1. + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + final_endpoint: specifies the endpoint to construct the network up to. It + can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', + 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', + 'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1'] + output_stride: A scalar that specifies the requested ratio of input to + output spatial resolution. Only supports 8 and 16. + align_feature_maps: When true, changes all the VALID paddings in the network + to SAME padding so that the feature maps are aligned. + scope: Optional variable_scope. + + Returns: + tensor_out: output tensor corresponding to the final_endpoint. + end_points: a set of activations for external use, for example summaries or + losses. + + Raises: + ValueError: if final_endpoint is not set to one of the predefined values, + or if the output_stride is not 8 or 16, or if the output_stride is 8 and + we request an end point after 'PreAuxLogits'. + """ + if output_stride != 8 and output_stride != 16: + raise ValueError('output_stride must be 8 or 16.') + + padding = 'SAME' if align_feature_maps else 'VALID' + + end_points = {} + + def add_and_check_final(name, net): + end_points[name] = net + return name == final_endpoint + + with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]): + with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], + stride=1, padding='SAME'): + # 149 x 149 x 32 + net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding, + scope='Conv2d_1a_3x3') + if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points + + # 147 x 147 x 32 + net = slim.conv2d(net, 32, 3, padding=padding, + scope='Conv2d_2a_3x3') + if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points + # 147 x 147 x 64 + net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3') + if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points + # 73 x 73 x 64 + net = slim.max_pool2d(net, 3, stride=2, padding=padding, + scope='MaxPool_3a_3x3') + if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points + # 73 x 73 x 80 + net = slim.conv2d(net, 80, 1, padding=padding, + scope='Conv2d_3b_1x1') + if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points + # 71 x 71 x 192 + net = slim.conv2d(net, 192, 3, padding=padding, + scope='Conv2d_4a_3x3') + if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points + # 35 x 35 x 192 + net = slim.max_pool2d(net, 3, stride=2, padding=padding, + scope='MaxPool_5a_3x3') + if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points + + # 35 x 35 x 320 + with tf.variable_scope('Mixed_5b'): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1') + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5, + scope='Conv2d_0b_5x5') + with tf.variable_scope('Branch_2'): + tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1') + tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3, + scope='Conv2d_0b_3x3') + tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3, + scope='Conv2d_0c_3x3') + with tf.variable_scope('Branch_3'): + tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME', + scope='AvgPool_0a_3x3') + tower_pool_1 = slim.conv2d(tower_pool, 64, 1, + scope='Conv2d_0b_1x1') + net = tf.concat( + [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3) + + if add_and_check_final('Mixed_5b', net): return net, end_points + # TODO(alemi): Register intermediate endpoints + net = slim.repeat(net, 10, block35, scale=0.17) + + # 17 x 17 x 1088 if output_stride == 8, + # 33 x 33 x 1088 if output_stride == 16 + use_atrous = output_stride == 8 + + with tf.variable_scope('Mixed_6a'): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2, + padding=padding, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3, + scope='Conv2d_0b_3x3') + tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3, + stride=1 if use_atrous else 2, + padding=padding, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_2'): + tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2, + padding=padding, + scope='MaxPool_1a_3x3') + net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) + + if add_and_check_final('Mixed_6a', net): return net, end_points + + # TODO(alemi): register intermediate endpoints + with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1): + net = slim.repeat(net, 20, block17, scale=0.10) + if add_and_check_final('PreAuxLogits', net): return net, end_points + + if output_stride == 8: + # TODO(gpapan): Properly support output_stride for the rest of the net. + raise ValueError('output_stride==8 is only supported up to the ' + 'PreAuxlogits end_point for now.') + + # 8 x 8 x 2080 + with tf.variable_scope('Mixed_7a'): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') + tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, + padding=padding, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_1'): + tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2, + padding=padding, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_2'): + tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') + tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, + scope='Conv2d_0b_3x3') + tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2, + padding=padding, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_3'): + tower_pool = slim.max_pool2d(net, 3, stride=2, + padding=padding, + scope='MaxPool_1a_3x3') + net = tf.concat( + [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) + + if add_and_check_final('Mixed_7a', net): return net, end_points + + # TODO(alemi): register intermediate endpoints + net = slim.repeat(net, 9, block8, scale=0.20) + net = block8(net, activation_fn=None) + + # 8 x 8 x 1536 + net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1') + if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points + + raise ValueError('final_endpoint (%s) not recognized', final_endpoint) + + +def inception_resnet_v2(inputs, num_classes=1001, is_training=True, + dropout_keep_prob=0.8, + reuse=None, + scope='InceptionResnetV2', + create_aux_logits=True): + """Creates the Inception Resnet V2 model. + + Args: + inputs: a 4-D tensor of size [batch_size, height, width, 3]. + num_classes: number of predicted classes. + is_training: whether is training or not. + dropout_keep_prob: float, the fraction to keep before final layer. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + create_aux_logits: Whether to include the auxilliary logits. + + Returns: + logits: the logits outputs of the model. + end_points: the set of end_points from the inception model. + """ + end_points = {} + + with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, num_classes], + reuse=reuse) as scope: + with slim.arg_scope([slim.batch_norm, slim.dropout], + is_training=is_training): + + net, end_points = inception_resnet_v2_base(inputs, scope=scope) + + if create_aux_logits: + with tf.variable_scope('AuxLogits'): + aux = end_points['PreAuxLogits'] + aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID', + scope='Conv2d_1a_3x3') + aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1') + aux = slim.conv2d(aux, 768, aux.get_shape()[1:3], + padding='VALID', scope='Conv2d_2a_5x5') + aux = slim.flatten(aux) + aux = slim.fully_connected(aux, num_classes, activation_fn=None, + scope='Logits') + end_points['AuxLogits'] = aux + + with tf.variable_scope('Logits'): + net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID', + scope='AvgPool_1a_8x8') + net = slim.flatten(net) + + net = slim.dropout(net, dropout_keep_prob, is_training=is_training, + scope='Dropout') + + end_points['PreLogitsFlatten'] = net + logits = slim.fully_connected(net, num_classes, activation_fn=None, + scope='Logits') + end_points['Logits'] = logits + end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions') + + return logits, end_points +inception_resnet_v2.default_image_size = 299 + + +def inception_resnet_v2_arg_scope(weight_decay=0.00004, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001): + """Returns the scope with the default parameters for inception_resnet_v2. + + Args: + weight_decay: the weight decay for weights variables. + batch_norm_decay: decay for the moving average of batch_norm momentums. + batch_norm_epsilon: small float added to variance to avoid dividing by zero. + + Returns: + a arg_scope with the parameters needed for inception_resnet_v2. + """ + # Set weight_decay for weights in conv2d and fully_connected layers. + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + biases_regularizer=slim.l2_regularizer(weight_decay)): + + batch_norm_params = { + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + } + # Set activation_fn and parameters for batch_norm. + with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params) as scope: + return scope diff --git a/models/research/adversarial_crypto/README.md b/models/research/adversarial_crypto/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3822def1325b8d4eb1fd31335f2f8ce053ff747a --- /dev/null +++ b/models/research/adversarial_crypto/README.md @@ -0,0 +1,62 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Learning to Protect Communications with Adversarial Neural Cryptography + +This is a slightly-updated model used for the paper +["Learning to Protect Communications with Adversarial Neural +Cryptography"](https://arxiv.org/abs/1610.06918). + +> We ask whether neural networks can learn to use secret keys to protect +> information from other neural networks. Specifically, we focus on ensuring +> confidentiality properties in a multiagent system, and we specify those +> properties in terms of an adversary. Thus, a system may consist of neural +> networks named Alice and Bob, and we aim to limit what a third neural +> network named Eve learns from eavesdropping on the communication between +> Alice and Bob. We do not prescribe specific cryptographic algorithms to +> these neural networks; instead, we train end-to-end, adversarially. +> We demonstrate that the neural networks can learn how to perform forms of +> encryption and decryption, and also how to apply these operations +> selectively in order to meet confidentiality goals. + +This code allows you to train encoder/decoder/adversary network triplets +and evaluate their effectiveness on randomly generated input and key +pairs. + +## Prerequisites + +The only software requirements for running the encoder and decoder is having +TensorFlow installed. + +Requires TensorFlow r0.12 or later. + +## Training and evaluating + +After installing TensorFlow and ensuring that your paths are configured +appropriately: + +``` +python train_eval.py +``` + +This will begin training a fresh model. If and when the model becomes +sufficiently well-trained, it will reset the Eve model multiple times +and retrain it from scratch, outputting the accuracy thus obtained +in each run. + +## Model differences from the paper + +The model has been simplified slightly from the one described in +the paper - the convolutional layer width was reduced by a factor +of two. In the version in the paper, there was a nonlinear unit +after the fully-connected layer; that nonlinear has been removed +here. These changes improve the robustness of training. The +initializer for the convolution layers has switched to the +`tf.contrib.layers default` of `xavier_initializer` instead of +a simpler `truncated_normal`. + +## Contact information + +This model repository is maintained by David G. Andersen +([dave-andersen](https://github.com/dave-andersen)). diff --git a/models/research/adversarial_crypto/train_eval.py b/models/research/adversarial_crypto/train_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..df7a00ad50f2ec01b37d8c162309a928207088d6 --- /dev/null +++ b/models/research/adversarial_crypto/train_eval.py @@ -0,0 +1,276 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Adversarial training to learn trivial encryption functions, +from the paper "Learning to Protect Communications with +Adversarial Neural Cryptography", Abadi & Andersen, 2016. + +https://arxiv.org/abs/1610.06918 + +This program creates and trains three neural networks, +termed Alice, Bob, and Eve. Alice takes inputs +in_m (message), in_k (key) and outputs 'ciphertext'. + +Bob takes inputs in_k, ciphertext and tries to reconstruct +the message. + +Eve is an adversarial network that takes input ciphertext +and also tries to reconstruct the message. + +The main function attempts to train these networks and then +evaluates them, all on random plaintext and key values. + +""" + +# TensorFlow Python 3 compatibility +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import signal +import sys +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +flags = tf.app.flags + +flags.DEFINE_float('learning_rate', 0.0008, 'Constant learning rate') +flags.DEFINE_integer('batch_size', 4096, 'Batch size') + +FLAGS = flags.FLAGS + +# Input and output configuration. +TEXT_SIZE = 16 +KEY_SIZE = 16 + +# Training parameters. +ITERS_PER_ACTOR = 1 +EVE_MULTIPLIER = 2 # Train Eve 2x for every step of Alice/Bob +# Train until either max loops or Alice/Bob "good enough": +MAX_TRAINING_LOOPS = 850000 +BOB_LOSS_THRESH = 0.02 # Exit when Bob loss < 0.02 and Eve > 7.7 bits +EVE_LOSS_THRESH = 7.7 + +# Logging and evaluation. +PRINT_EVERY = 200 # In training, log every 200 steps. +EVE_EXTRA_ROUNDS = 2000 # At end, train eve a bit more. +RETRAIN_EVE_ITERS = 10000 # Retrain eve up to ITERS*LOOPS times. +RETRAIN_EVE_LOOPS = 25 # With an evaluation each loop +NUMBER_OF_EVE_RESETS = 5 # And do this up to 5 times with a fresh eve. +# Use EVAL_BATCHES samples each time we check accuracy. +EVAL_BATCHES = 1 + + +def batch_of_random_bools(batch_size, n): + """Return a batch of random "boolean" numbers. + + Args: + batch_size: Batch size dimension of returned tensor. + n: number of entries per batch. + + Returns: + A [batch_size, n] tensor of "boolean" numbers, where each number is + preresented as -1 or 1. + """ + + as_int = tf.random.uniform( + [batch_size, n], minval=0, maxval=2, dtype=tf.int32) + expanded_range = (as_int * 2) - 1 + return tf.cast(expanded_range, tf.float32) + + +class AdversarialCrypto(object): + """Primary model implementation class for Adversarial Neural Crypto. + + This class contains the code for the model itself, + and when created, plumbs the pathways from Alice to Bob and + Eve, creates the optimizers and loss functions, etc. + + Attributes: + eve_loss: Eve's loss function. + bob_loss: Bob's loss function. Different units from eve_loss. + eve_optimizer: A tf op that runs Eve's optimizer. + bob_optimizer: A tf op that runs Bob's optimizer. + bob_reconstruction_loss: Bob's message reconstruction loss, + which is comparable to eve_loss. + reset_eve_vars: Execute this op to completely reset Eve. + """ + + def get_message_and_key(self): + """Generate random pseudo-boolean key and message values.""" + + batch_size = tf.compat.v1.placeholder_with_default(FLAGS.batch_size, shape=[]) + + in_m = batch_of_random_bools(batch_size, TEXT_SIZE) + in_k = batch_of_random_bools(batch_size, KEY_SIZE) + return in_m, in_k + + def model(self, collection, message, key=None): + """The model for Alice, Bob, and Eve. If key=None, the first fully connected layer + takes only the message as inputs. Otherwise, it uses both the key + and the message. + + Args: + collection: The graph keys collection to add new vars to. + message: The input message to process. + key: The input key (if any) to use. + """ + + if key is not None: + combined_message = tf.concat(axis=1, values=[message, key]) + else: + combined_message = message + + # Ensure that all variables created are in the specified collection. + with tf.contrib.framework.arg_scope( + [tf.contrib.layers.fully_connected, tf.contrib.layers.conv2d], + variables_collections=[collection]): + + fc = tf.contrib.layers.fully_connected( + combined_message, + TEXT_SIZE + KEY_SIZE, + biases_initializer=tf.constant_initializer(0.0), + activation_fn=None) + + # Perform a sequence of 1D convolutions (by expanding the message out to 2D + # and then squeezing it back down). + fc = tf.expand_dims(fc, 2) # 2D + fc = tf.expand_dims(fc, 3) # 3D -- conv2d needs a depth + # 2,1 -> 1,2 + conv = tf.contrib.layers.conv2d( + fc, 2, 2, 2, 'SAME', activation_fn=tf.nn.sigmoid) + # 1,2 -> 1, 2 + conv = tf.contrib.layers.conv2d( + conv, 2, 1, 1, 'SAME', activation_fn=tf.nn.sigmoid) + # 1,2 -> 1, 1 + conv = tf.contrib.layers.conv2d( + conv, 1, 1, 1, 'SAME', activation_fn=tf.nn.tanh) + conv = tf.squeeze(conv, 3) + conv = tf.squeeze(conv, 2) + return conv + + def __init__(self): + in_m, in_k = self.get_message_and_key() + encrypted = self.model('alice', in_m, in_k) + decrypted = self.model('bob', encrypted, in_k) + eve_out = self.model('eve', encrypted, None) + + self.reset_eve_vars = tf.group( + *[w.initializer for w in tf.compat.v1.get_collection('eve')]) + + optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) + + # Eve's goal is to decrypt the entire message: + eve_bits_wrong = tf.reduce_sum( + tf.abs((eve_out + 1.0) / 2.0 - (in_m + 1.0) / 2.0), [1]) + self.eve_loss = tf.reduce_sum(eve_bits_wrong) + self.eve_optimizer = optimizer.minimize( + self.eve_loss, var_list=tf.compat.v1.get_collection('eve')) + + # Alice and Bob want to be accurate... + self.bob_bits_wrong = tf.reduce_sum( + tf.abs((decrypted + 1.0) / 2.0 - (in_m + 1.0) / 2.0), [1]) + # ... and to not let Eve do better than guessing. + self.bob_reconstruction_loss = tf.reduce_sum(self.bob_bits_wrong) + bob_eve_error_deviation = tf.abs(float(TEXT_SIZE) / 2.0 - eve_bits_wrong) + # 7-9 bits wrong is OK too, so we squish the error function a bit. + # Without doing this, we often tend to hang out at 0.25 / 7.5 error, + # and it seems bad to have continued, high communication error. + bob_eve_loss = tf.reduce_sum( + tf.square(bob_eve_error_deviation) / (TEXT_SIZE / 2)**2) + + # Rescale the losses to [0, 1] per example and combine. + self.bob_loss = (self.bob_reconstruction_loss / TEXT_SIZE + bob_eve_loss) + + self.bob_optimizer = optimizer.minimize( + self.bob_loss, + var_list=(tf.compat.v1.get_collection('alice') + tf.compat.v1.get_collection('bob'))) + + +def doeval(s, ac, n, itercount): + """Evaluate the current network on n batches of random examples. + + Args: + s: The current TensorFlow session + ac: an instance of the AdversarialCrypto class + n: The number of iterations to run. + itercount: Iteration count label for logging. + + Returns: + Bob and Eve's loss, as a percent of bits incorrect. + """ + + bob_loss_accum = 0 + eve_loss_accum = 0 + for _ in xrange(n): + bl, el = s.run([ac.bob_reconstruction_loss, ac.eve_loss]) + bob_loss_accum += bl + eve_loss_accum += el + bob_loss_percent = bob_loss_accum / (n * FLAGS.batch_size) + eve_loss_percent = eve_loss_accum / (n * FLAGS.batch_size) + print('%10d\t%20.2f\t%20.2f'%(itercount, bob_loss_percent, eve_loss_percent)) + sys.stdout.flush() + return bob_loss_percent, eve_loss_percent + + +def train_until_thresh(s, ac): + for j in xrange(MAX_TRAINING_LOOPS): + for _ in xrange(ITERS_PER_ACTOR): + s.run(ac.bob_optimizer) + for _ in xrange(ITERS_PER_ACTOR * EVE_MULTIPLIER): + s.run(ac.eve_optimizer) + if j % PRINT_EVERY == 0: + bob_avg_loss, eve_avg_loss = doeval(s, ac, EVAL_BATCHES, j) + if (bob_avg_loss < BOB_LOSS_THRESH and eve_avg_loss > EVE_LOSS_THRESH): + print('Target losses achieved.') + return True + return False + + +def train_and_evaluate(): + """Run the full training and evaluation loop.""" + + ac = AdversarialCrypto() + init = tf.compat.v1.global_variables_initializer() + + with tf.compat.v1.Session() as s: + s.run(init) + print('# Batch size: ', FLAGS.batch_size) + print('# %10s\t%20s\t%20s'%("Iter","Bob_Recon_Error","Eve_Recon_Error")) + + if train_until_thresh(s, ac): + for _ in xrange(EVE_EXTRA_ROUNDS): + s.run(ac.eve_optimizer) + print('Loss after eve extra training:') + doeval(s, ac, EVAL_BATCHES * 2, 0) + for _ in xrange(NUMBER_OF_EVE_RESETS): + print('Resetting Eve') + s.run(ac.reset_eve_vars) + eve_counter = 0 + for _ in xrange(RETRAIN_EVE_LOOPS): + for _ in xrange(RETRAIN_EVE_ITERS): + eve_counter += 1 + s.run(ac.eve_optimizer) + doeval(s, ac, EVAL_BATCHES, eve_counter) + doeval(s, ac, EVAL_BATCHES, eve_counter) + + +def main(unused_argv): + # Exit more quietly with Ctrl-C. + signal.signal(signal.SIGINT, signal.SIG_DFL) + train_and_evaluate() + + +if __name__ == '__main__': + tf.compat.v1.app.run() diff --git a/models/research/adversarial_logit_pairing/README.md b/models/research/adversarial_logit_pairing/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d3f576836c4e0fb28eee9882906b18d88a90c564 --- /dev/null +++ b/models/research/adversarial_logit_pairing/README.md @@ -0,0 +1,281 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Adversarial logit pairing + +This directory contains implementation of +[Adversarial logit pairing](https://arxiv.org/abs/1803.06373) paper as well as +few models pre-trained on ImageNet and Tiny ImageNet. + +Please contact [Alexey Kurakin](https://github.com/AlexeyKurakin) regarding +this code. + +## Pre-requesites + +Code dependencies: + +* TensorFlow 1.8 and Python 2.7 (other versions may work, but were not tested) +* [Abseil Python](https://github.com/abseil/abseil-py). +* Script which converts Tiny Imagenet dataset into TFRecord format also + depends on [Pandas](https://pandas.pydata.org/). + +## Datasets + +To use this code you need to download datasets. You only need to download +those datasets which you're going to use. Following list of datasets is +supported: + +* [ImageNet](http://www.image-net.org/). Follow + [Preparing the datasets](https://github.com/tensorflow/models/tree/master/research/slim#Data) + instructions in TF-Slim documentation to download and convert ImageNet dataset + to TFRecord format. + +* [Tiny ImageNet](https://tiny-imagenet.herokuapp.com/). + To obtain Tiny ImageNet dataset do following: + + ``` + # Download zip archive with TinyImagenet + curl -O http://cs231n.stanford.edu/tiny-imagenet-200.zip + + # Extract archive + unzip tiny-imagenet-200.zip + + # Convert dataset to TFRecord format + mkdir tiny-imagenet-tfrecord + python tiny_imagenet_converter/converter.py \ + --input_dir=tiny-imagenet-200 \ + --output_dir=tiny-imagenet-tfrecord + ``` + +## Running the code + +NOTE: Provided code supports distributed training on multiple machines, +and all provided checkpoints were trained in a distributed way. However it is +beyond the scope of this document to describe how to do distributed training. +Readed should refer to +[other material](https://www.tensorflow.org/deploy/distributed) to learn +about it. + +### Training + +Following command runs training: + +``` +# Following arguments has to be specified for training: +# - MAX_NUMBER_OF_TRAINING_STEPS - maximum number of training steps, +# omit this flag or set it to -1 to have unlimited number of training steps. +# - MODEL_NAME - name of the model, now only "resnet_v2_50" is supported. +# - MOVING_AVG_DECAY - decay rate for exponential moving average of the +# trainable variables. Training with exponential moving average usually +# leads to better accuracy. Default of 0.9999. -1 disable exponential moving +# average. Default works well, so typically you set it only if you want +# to disable this feature. +# - HYPERPARAMETERS - string with hyperparameters, +# see model_lib.py for full list of hyperparameters. +# - DATASET - dataset, either "imagenet" or "tiny_imagenet". +# - IMAGE_SIZE - size of the image (single number). +# - OUTPUT_DIRECTORY - directory where to write results. +# - IMAGENET_DIR - directory with ImageNet dataset in TFRecord format. +# - TINY_IMAGENET_DIR - directory with Tiny ImageNet dataset in TFRecord format. +# +# Note that only one of IMAGENET_DIR or TINY_IMAGENET_DIR has to be provided +# depending on which dataset you use. +# +python train.py \ + --max_steps="${MAX_NUMBER_OF_TRAINING_STEPS}" \ + --model_name="${MODEL_NAME}" \ + --moving_average_decay="${MOVING_AVG_DECAY}" \ + --hparams="${HYPERPARAMETERS}" \ + --dataset="${DATASET}" \ + --dataset_image_size="${IMAGE_SIZE}" \ + --output_dir="${OUTPUT_DIRECTORY}" \ + --imagenet_data_dir="${IMAGENET_DIR}" \ + --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" +``` + +Full list of training hyperparameters could be found in `model_lib.py`. +These hyperparameters control learning rate schedule, optimizer, weight decay, +label smoothing and adversarial training. + +Adversarial training is controlled by following hyperparameters: + +* `train_adv_method` - method which is used to craft adversarial examples during + training. Could be one of the following: + + * `clean` - perform regular training with clean examples; + * `pgd_EPS_STEP_NITER` - use non targeted PGD with maximum size of + perturbation equal to `EPS`, step size equal to `STEP` + and number of iterations equal to `NITER`. Size of perturbation and step + size are expected to be integers between 1 and 255. + * `pgdll_EPS_STEP_NITER` - use targeted PGD, where target class is least + likely prediction of the network. + * `pgdrnd_EPS_STEP_NITER` - use targeted PGD, where target class is chosen + randomly. + +* `train_lp_weight` - weight of adversarial logit pairing loss. If zero or + negarive, then no logit pairing is performed and training is done using + mixed minibatch PGD. If positive then adversarial logit pairing term is added + to the loss. + +Below is example of how to run training with adversarial logit pairing on +ImageNet 64x64: + +``` +python train.py \ + --model_name="resnet_v2_50" \ + --hparams="train_adv_method=pgdll_16_2_10,train_lp_weight=0.5" \ + --dataset="imagenet" \ + --dataset_image_size=64 \ + --output_dir="/tmp/adv_train" \ + --imagenet_data_dir="${IMAGENET_DIR}" +``` + +### Fine tuning + +Provided trainin script could be used to fine tune pre-trained checkpoint. +Following command does this: + +``` +# Fine tuning adds following additional arguments: +# - SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT - comma separates list of scopes of +# variables, which should not be loadeded from checkpoint (and default +# initialization should be used instead). +# SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT should be either same or a subset of +# LIST_OF_SCOPES_OF_TRAINABLE_VARS. +# - LIST_OF_SCOPES_OF_TRAINABLE_VARS - comma separated list of scopes of +# trainable variables. Only variables which are prefixed with these scopes +# will be trained. +# - PATH_TO_PRETRAINED_CHECKPOINT - directory with pretrained checkpoint which +# is used as initialization for fine tuning. +# +python train.py \ + --max_steps="${MAX_NUMBER_OF_TRAINING_STEPS}" \ + --model_name="${MODEL_NAME}" \ + --moving_average_decay="${MOVING_AVG_DECAY}" \ + --hparams="${HYPERPARAMETERS}" \ + --dataset="${DATASET}" \ + --dataset_image_size="${IMAGE_SIZE}" \ + --output_dir="${OUTPUT_DIRECTORY}" \ + --imagenet_data_dir="${IMAGENET_DIR}" \ + --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" \ + --finetune_exclude_pretrained_scopes="${SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT}" \ + --finetune_trainable_scopes="${LIST_OF_SCOPES_OF_TRAINABLE_VARS}" \ + --finetune_checkpoint_path="${PATH_TO_PRETRAINED_CHECKPOINT}" +``` + +Below is an example of how to fine tune last few layers of the model on +Tiny Imagenet dataset: + +``` +python train.py \ + --model_name="resnet_v2_50" \ + --hparams="train_adv_method=pgdll_16_2_10,train_lp_weight=0.5,learning_rate=0.02" \ + --dataset="tiny_imagenet" \ + --dataset_image_size=64 \ + --output_dir="/tmp/adv_finetune" \ + --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" \ + --finetune_exclude_pretrained_scopes="resnet_v2_50/logits" \ + --finetune_trainable_scopes="resnet_v2_50/logits,resnet_v2_50/postnorm" \ + --finetune_checkpoint_path="/tmp/adv_train" +``` + +### Evaluation + +Following command runs evaluation: + +``` +# Following arguments should be provided for eval: +# - TRAINING_DIRECTORY - directory where training checkpoints are saved. +# - TRAINABLE_SCOPES - when loading checkpoint which was obtained by fine tuning +# this argument should be the same as LIST_OF_SCOPES_OF_TRAINABLE_VARS +# during training. Otherwise it should be empty. +# This is needed to properly load exponential moving average variables. +# If exponential moving averages are disabled then this flag could be +# omitted. +# - EVAL_SUBDIR_NAME - name of the subdirectory inside TRAINING_DIRECTORY +# where evaluation code will be saving event files. +# - DATASET - name of the dataset. +# - IMAGE_SIZE - size of the image in the dataset. +# - DATSET_SPLIT_NAME - name of the split in the dataset, +# either 'train' or 'validation'. Default is 'validation'. +# - MODEL_NAME - name of the model. +# - MOVING_AVG_DECAY - decay rate for exponential moving average. +# - ADV_METHOD_FOR_EVAL - should be "clean" to evaluate on clean example or +# description of the adversarial method to evaluate on adversarial examples. +# - HYPERPARAMETERS - hyperparameters, only "eval_batch_size" matters for eval +# - NUMBER_OF_EXAMPLES - how many examples from the dataset use for evaluation, +# specify -1 to use all examples. +# - EVAL_ONCE - if True then evaluate only once, otherwise keep evaluation +# running repeatedly on new checkpoints. Repeated evaluation might be useful +# when running concurrent with training. +# - IMAGENET_DIR - directory with ImageNet dataset in TFRecord format. +# - TINY_IMAGENET_DIR - directory with Tiny ImageNet dataset in TFRecord format. +# +python eval.py \ + --train_dir="${TRAINING_DIRECTORY} \ + --trainable_scopes="${TRAINABLE_SCOPES}" \ + --eval_name="${EVAL_SUBDIR_NAME}" \ + --dataset="${DATASET}" \ + --dataset_image_size="${IMAGE_SIZE}" \ + --split_name="${DATSET_SPLIT_NAME}" \ + --model_name="${MODEL_NAME}" \ + --moving_average_decay="${MOVING_AVG_DECAY}" \ + --adv_method="${ADV_METHOD_FOR_EVAL}" \ + --hparams="${HYPERPARAMETERS}" \ + --num_examples="${NUMBER_OF_EXAMPLES}" \ + --eval_once="${EVAL_ONCE}" \ + --imagenet_data_dir="${IMAGENET_DIR}" \ + --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" +``` + +Example of running evaluation on 10000 of clean examples from ImageNet +training set: + +``` +python eval.py \ + --train_dir=/tmp/adv_train \ + --dataset=imagenet \ + --dataset_image_size=64 \ + --split_name=train \ + --adv_method=clean \ + --hparams="eval_batch_size=50" \ + --num_examples=10000 \ + --eval_once=True \ + --imagenet_data_dir="${IMAGENET_DIR}" +``` + +Example of running evaluatin on adversarial images generated from Tiny ImageNet +validation set using fine-tuned checkpoint: + +``` +python eval.py \ + --train_dir=tmp/adv_finetune \ + --trainable_scopes="resnet_v2_50/logits,resnet_v2_50/postnorm" \ + --dataset=tiny_imagenet \ + --dataset_image_size=64 \ + --adv_method=pgdrnd_16_2_10 \ + --hparams="eval_batch_size=50" \ + --eval_once=True \ + --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" +``` + +### Pre-trained models + +Following set of pre-trained checkpoints released with this code: + +| Model | Dataset | Accuracy on
clean images | Accuracy on
`pgdll_16_1_20` | Accuracy on
`pgdll_16_2_10` | +| ----------- | ------------ | --------------- | --------------------------- | -------------- | +| [Baseline ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/imagenet64_base_2018_06_26.ckpt.tar.gz) | ImageNet 64x64 | 60.5% | 1.8% | 3.5% | +| [ALP-trained ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/imagenet64_alp025_2018_06_26.ckpt.tar.gz) | ImageNet 64x64 | 55.7% | 27.5% | 27.8% | +| [Baseline ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/tiny_imagenet_base_2018_06_26.ckpt.tar.gz) | Tiny ImageNet | 69.2% | 0.1% | 0.3% | +| [ALP-trained ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/tiny_imagenet_alp05_2018_06_26.ckpt.tar.gz) | Tiny ImageNet | 72.0% | 41.3% | 40.8% | + + +* All provided checkpoints were initially trained with exponential moving + average. However for ease of use they were re-saved without it. + So to load and use provided checkpoints you need to specify + `--moving_average_decay=-1` flag. +* All ALP models were trained with `pgdll_16_2_10` adversarial examples. +* All Tiny Imagenet models were obtained by fine tuning corresponding + ImageNet 64x64 models. ALP-trained models were fine tuned with ALP. diff --git a/models/research/adversarial_logit_pairing/adversarial_attack.py b/models/research/adversarial_logit_pairing/adversarial_attack.py new file mode 100644 index 0000000000000000000000000000000000000000..804bd64bcf4444007638f9802a83973ee68eb3cf --- /dev/null +++ b/models/research/adversarial_logit_pairing/adversarial_attack.py @@ -0,0 +1,219 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Library with adversarial attacks. + +This library designed to be self-contained and have no dependencies other +than TensorFlow. It only contains PGD / Iterative FGSM attacks, +see https://arxiv.org/abs/1706.06083 and https://arxiv.org/abs/1607.02533 +for details. + +For wider set of adversarial attacks refer to Cleverhans library: +https://github.com/tensorflow/cleverhans +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +def generate_pgd_common(x, + bounds, + model_fn, + attack_params, + one_hot_labels, + perturbation_multiplier): + """Common code for generating PGD adversarial examples. + + Args: + x: original examples. + bounds: tuple with bounds of image values, bounds[0] < bounds[1]. + model_fn: model function with signature model_fn(images). + attack_params: parameters of the attack. + one_hot_labels: one hot label vector to use in the loss. + perturbation_multiplier: multiplier of adversarial perturbation, + either +1.0 or -1.0. + + Returns: + Tensor with adversarial examples. + + Raises: + ValueError: if attack parameters are invalid. + """ + # parse attack_params + # Format of attack_params: 'EPS_STEP_NITER' + # where EPS - epsilon, STEP - step size, NITER - number of iterations + params_list = attack_params.split('_') + if len(params_list) != 3: + raise ValueError('Invalid parameters of PGD attack: %s' % attack_params) + epsilon = int(params_list[0]) + step_size = int(params_list[1]) + niter = int(params_list[2]) + + # rescale epsilon and step size to image bounds + epsilon = float(epsilon) / 255.0 * (bounds[1] - bounds[0]) + step_size = float(step_size) / 255.0 * (bounds[1] - bounds[0]) + + # clipping boundaries + clip_min = tf.maximum(x - epsilon, bounds[0]) + clip_max = tf.minimum(x + epsilon, bounds[1]) + + # compute starting point + start_x = x + tf.random_uniform(tf.shape(x), -epsilon, epsilon) + start_x = tf.clip_by_value(start_x, clip_min, clip_max) + + # main iteration of PGD + loop_vars = [0, start_x] + + def loop_cond(index, _): + return index < niter + + def loop_body(index, adv_images): + logits = model_fn(adv_images) + loss = tf.reduce_sum( + tf.nn.softmax_cross_entropy_with_logits_v2( + labels=one_hot_labels, + logits=logits)) + perturbation = step_size * tf.sign(tf.gradients(loss, adv_images)[0]) + new_adv_images = adv_images + perturbation_multiplier * perturbation + new_adv_images = tf.clip_by_value(new_adv_images, clip_min, clip_max) + return index + 1, new_adv_images + + with tf.control_dependencies([start_x]): + _, result = tf.while_loop( + loop_cond, + loop_body, + loop_vars, + back_prop=False, + parallel_iterations=1) + return result + + +def generate_pgd_ll(x, bounds, model_fn, attack_params): + # pylint: disable=g-doc-args + """Generats targeted PGD adversarial examples with least likely target class. + + See generate_pgd_common for description of arguments. + + Returns: + Tensor with adversarial examples. + """ + # pylint: enable=g-doc-args + + # compute one hot least likely class + logits = model_fn(x) + num_classes = tf.shape(logits)[1] + one_hot_labels = tf.one_hot(tf.argmin(model_fn(x), axis=1), num_classes) + + return generate_pgd_common(x, bounds, model_fn, attack_params, + one_hot_labels=one_hot_labels, + perturbation_multiplier=-1.0) + + +def generate_pgd_rand(x, bounds, model_fn, attack_params): + # pylint: disable=g-doc-args + """Generats targeted PGD adversarial examples with random target class. + + See generate_pgd_common for description of arguments. + + Returns: + Tensor with adversarial examples. + """ + # pylint: enable=g-doc-args + + # compute one hot random class + logits = model_fn(x) + batch_size = tf.shape(logits)[0] + num_classes = tf.shape(logits)[1] + random_labels = tf.random_uniform(shape=[batch_size], + minval=0, + maxval=num_classes, + dtype=tf.int32) + one_hot_labels = tf.one_hot(random_labels, num_classes) + + return generate_pgd_common(x, bounds, model_fn, attack_params, + one_hot_labels=one_hot_labels, + perturbation_multiplier=-1.0) + + +def generate_pgd(x, bounds, model_fn, attack_params): + # pylint: disable=g-doc-args + """Generats non-targeted PGD adversarial examples. + + See generate_pgd_common for description of arguments. + + Returns: + tensor with adversarial examples. + """ + # pylint: enable=g-doc-args + + # compute one hot predicted class + logits = model_fn(x) + num_classes = tf.shape(logits)[1] + one_hot_labels = tf.one_hot(tf.argmax(model_fn(x), axis=1), num_classes) + + return generate_pgd_common(x, bounds, model_fn, attack_params, + one_hot_labels=one_hot_labels, + perturbation_multiplier=1.0) + + +def generate_adversarial_examples(x, bounds, model_fn, attack_description): + """Generates adversarial examples. + + Args: + x: original examples. + bounds: tuple with bounds of image values, bounds[0] < bounds[1] + model_fn: model function with signature model_fn(images). + attack_description: string which describes an attack, see notes below for + details. + + Returns: + Tensor with adversarial examples. + + Raises: + ValueError: if attack description is invalid. + + + Attack description could be one of the following strings: + - "clean" - no attack, return original images. + - "pgd_EPS_STEP_NITER" - non-targeted PGD attack. + - "pgdll_EPS_STEP_NITER" - tageted PGD attack with least likely target class. + - "pgdrnd_EPS_STEP_NITER" - targetd PGD attack with random target class. + + Meaning of attack parameters is following: + - EPS - maximum size of adversarial perturbation, between 0 and 255. + - STEP - step size of one iteration of PGD, between 0 and 255. + - NITER - number of iterations. + """ + if attack_description == 'clean': + return x + idx = attack_description.find('_') + if idx < 0: + raise ValueError('Invalid value of attack description %s' + % attack_description) + attack_name = attack_description[:idx] + attack_params = attack_description[idx+1:] + if attack_name == 'pgdll': + return generate_pgd_ll(x, bounds, model_fn, attack_params) + elif attack_name == 'pgdrnd': + return generate_pgd_rand(x, bounds, model_fn, attack_params) + elif attack_name == 'pgd': + return generate_pgd(x, bounds, model_fn, attack_params) + else: + raise ValueError('Invalid value of attack description %s' + % attack_description) + diff --git a/models/research/adversarial_logit_pairing/datasets/__init__.py b/models/research/adversarial_logit_pairing/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/adversarial_logit_pairing/datasets/dataset_factory.py b/models/research/adversarial_logit_pairing/datasets/dataset_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..01c36d4ff4710e1742e989b20a3daef75a6922e1 --- /dev/null +++ b/models/research/adversarial_logit_pairing/datasets/dataset_factory.py @@ -0,0 +1,62 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Library which creates datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datasets import imagenet_input +from datasets import tiny_imagenet_input + + +def get_dataset(dataset_name, split, batch_size, image_size, is_training): + """Returns dataset. + + Args: + dataset_name: name of the dataset, "imagenet" or "tiny_imagenet". + split: name of the split, "train" or "validation". + batch_size: size of the minibatch. + image_size: size of the one side of the image. Output images will be + resized to square shape image_size*image_size. + is_training: if True then training preprocessing is done, otherwise eval + preprocessing is done. + + Raises: + ValueError: if dataset_name is invalid. + + Returns: + dataset: instance of tf.data.Dataset with the dataset. + num_examples: number of examples in given split of the dataset. + num_classes: number of classes in the dataset. + bounds: tuple with bounds of image values. All returned image pixels + are between bounds[0] and bounds[1]. + """ + if dataset_name == 'tiny_imagenet': + dataset = tiny_imagenet_input.tiny_imagenet_input( + split, batch_size, image_size, is_training) + num_examples = tiny_imagenet_input.num_examples_per_epoch(split) + num_classes = 200 + bounds = (-1, 1) + elif dataset_name == 'imagenet': + dataset = imagenet_input.imagenet_input( + split, batch_size, image_size, is_training) + num_examples = imagenet_input.num_examples_per_epoch(split) + num_classes = 1001 + bounds = (-1, 1) + else: + raise ValueError('Invalid dataset %s' % dataset_name) + return dataset, num_examples, num_classes, bounds diff --git a/models/research/adversarial_logit_pairing/datasets/imagenet_input.py b/models/research/adversarial_logit_pairing/datasets/imagenet_input.py new file mode 100644 index 0000000000000000000000000000000000000000..0b210b8ce11f3dbf1f14482b1b4f3a95da02a48a --- /dev/null +++ b/models/research/adversarial_logit_pairing/datasets/imagenet_input.py @@ -0,0 +1,255 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Imagenet input.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from absl import flags +import tensorflow as tf + +FLAGS = flags.FLAGS + + +flags.DEFINE_string('imagenet_data_dir', None, + 'Directory with Imagenet dataset in TFRecord format.') + + +def _decode_and_random_crop(image_buffer, bbox, image_size): + """Randomly crops image and then scales to target size.""" + with tf.name_scope('distorted_bounding_box_crop', + values=[image_buffer, bbox]): + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.image.extract_jpeg_shape(image_buffer), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.75, 1.33], + area_range=[0.08, 1.0], + max_attempts=10, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3) + image = tf.image.convert_image_dtype( + image, dtype=tf.float32) + + image = tf.image.resize_bicubic([image], + [image_size, image_size])[0] + + return image + + +def _decode_and_center_crop(image_buffer, image_size): + """Crops to center of image with padding then scales to target size.""" + shape = tf.image.extract_jpeg_shape(image_buffer) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + 0.875 * tf.cast(tf.minimum(image_height, image_width), tf.float32), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3) + image = tf.image.convert_image_dtype( + image, dtype=tf.float32) + + image = tf.image.resize_bicubic([image], + [image_size, image_size])[0] + + return image + + +def _normalize(image): + """Rescale image to [-1, 1] range.""" + return tf.multiply(tf.subtract(image, 0.5), 2.0) + + +def image_preprocessing(image_buffer, bbox, image_size, is_training): + """Does image decoding and preprocessing. + + Args: + image_buffer: string tensor with encoded image. + bbox: bounding box of the object at the image. + image_size: image size. + is_training: whether to do training or eval preprocessing. + + Returns: + Tensor with the image. + """ + if is_training: + image = _decode_and_random_crop(image_buffer, bbox, image_size) + image = _normalize(image) + image = tf.image.random_flip_left_right(image) + else: + image = _decode_and_center_crop(image_buffer, image_size) + image = _normalize(image) + image = tf.reshape(image, [image_size, image_size, 3]) + return image + + +def imagenet_parser(value, image_size, is_training): + """Parse an ImageNet record from a serialized string Tensor. + + Args: + value: encoded example. + image_size: size of the output image. + is_training: if True then do training preprocessing, + otherwise do eval preprocessing. + + Returns: + image: tensor with the image. + label: true label of the image. + """ + keys_to_features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, ''), + 'image/format': + tf.FixedLenFeature((), tf.string, 'jpeg'), + 'image/class/label': + tf.FixedLenFeature([], tf.int64, -1), + 'image/class/text': + tf.FixedLenFeature([], tf.string, ''), + 'image/object/bbox/xmin': + tf.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/ymin': + tf.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/xmax': + tf.VarLenFeature(dtype=tf.float32), + 'image/object/bbox/ymax': + tf.VarLenFeature(dtype=tf.float32), + 'image/object/class/label': + tf.VarLenFeature(dtype=tf.int64), + } + + parsed = tf.parse_single_example(value, keys_to_features) + + image_buffer = tf.reshape(parsed['image/encoded'], shape=[]) + + xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0) + # Note that ordering is (y, x) + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + image = image_preprocessing( + image_buffer=image_buffer, + bbox=bbox, + image_size=image_size, + is_training=is_training + ) + + # Labels are in [1, 1000] range + label = tf.cast( + tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) + + return image, label + + +def imagenet_input(split, batch_size, image_size, is_training): + """Returns ImageNet dataset. + + Args: + split: name of the split, "train" or "validation". + batch_size: size of the minibatch. + image_size: size of the one side of the image. Output images will be + resized to square shape image_size*image_size. + is_training: if True then training preprocessing is done, otherwise eval + preprocessing is done. + + Raises: + ValueError: if name of the split is incorrect. + + Returns: + Instance of tf.data.Dataset with the dataset. + """ + if split.lower().startswith('train'): + file_pattern = os.path.join(FLAGS.imagenet_data_dir, 'train-*') + elif split.lower().startswith('validation'): + file_pattern = os.path.join(FLAGS.imagenet_data_dir, 'validation-*') + else: + raise ValueError('Invalid split: %s' % split) + + dataset = tf.data.Dataset.list_files(file_pattern, shuffle=is_training) + + if is_training: + dataset = dataset.repeat() + + def fetch_dataset(filename): + return tf.data.TFRecordDataset(filename, buffer_size=8*1024*1024) + + # Read the data from disk in parallel + dataset = dataset.apply( + tf.data.experimental.parallel_interleave( + fetch_dataset, cycle_length=4, sloppy=True)) + dataset = dataset.shuffle(1024) + + # Parse, preprocess, and batch the data in parallel + dataset = dataset.apply( + tf.data.experimental.map_and_batch( + lambda value: imagenet_parser(value, image_size, is_training), + batch_size=batch_size, + num_parallel_batches=4, + drop_remainder=True)) + + def set_shapes(images, labels): + """Statically set the batch_size dimension.""" + images.set_shape(images.get_shape().merge_with( + tf.TensorShape([batch_size, None, None, None]))) + labels.set_shape(labels.get_shape().merge_with( + tf.TensorShape([batch_size]))) + return images, labels + + # Assign static batch size dimension + dataset = dataset.map(set_shapes) + + # Prefetch overlaps in-feed with training + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + return dataset + + +def num_examples_per_epoch(split): + """Returns the number of examples in the data set. + + Args: + split: name of the split, "train" or "validation". + + Raises: + ValueError: if split name is incorrect. + + Returns: + Number of example in the split. + """ + if split.lower().startswith('train'): + return 1281167 + elif split.lower().startswith('validation'): + return 50000 + else: + raise ValueError('Invalid split: %s' % split) diff --git a/models/research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py b/models/research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py new file mode 100644 index 0000000000000000000000000000000000000000..6d216d53ed0bd9f6e7a5770510cedc7f3d9f0a42 --- /dev/null +++ b/models/research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py @@ -0,0 +1,157 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tiny imagenet input.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from absl import flags +import tensorflow as tf + +FLAGS = flags.FLAGS + + +flags.DEFINE_string('tiny_imagenet_data_dir', None, + 'Directory with Tiny Imagenet dataset in TFRecord format.') + + +def tiny_imagenet_parser(value, image_size, is_training): + """Parses tiny imagenet example. + + Args: + value: encoded example. + image_size: size of the image. + is_training: if True then do training preprocessing (which includes + random cropping), otherwise do eval preprocessing. + + Returns: + image: tensor with the image. + label: true label of the image. + """ + keys_to_features = { + 'image/encoded': tf.FixedLenFeature((), tf.string, ''), + 'label/tiny_imagenet': tf.FixedLenFeature([], tf.int64, -1), + } + + parsed = tf.parse_single_example(value, keys_to_features) + + image_buffer = tf.reshape(parsed['image/encoded'], shape=[]) + image = tf.image.decode_image(image_buffer, channels=3) + image = tf.image.convert_image_dtype( + image, dtype=tf.float32) + + # Crop image + if is_training: + bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=tf.constant([0.0, 0.0, 1.0, 1.0], + dtype=tf.float32, + shape=[1, 1, 4]), + min_object_covered=0.5, + aspect_ratio_range=[0.75, 1.33], + area_range=[0.5, 1.0], + max_attempts=20, + use_image_if_no_bounding_boxes=True) + image = tf.slice(image, bbox_begin, bbox_size) + + # resize image + image = tf.image.resize_bicubic([image], [image_size, image_size])[0] + + # Rescale image to [-1, 1] range. + image = tf.multiply(tf.subtract(image, 0.5), 2.0) + + image = tf.reshape(image, [image_size, image_size, 3]) + + # Labels are in [0, 199] range + label = tf.cast( + tf.reshape(parsed['label/tiny_imagenet'], shape=[]), dtype=tf.int32) + + return image, label + + +def tiny_imagenet_input(split, batch_size, image_size, is_training): + """Returns Tiny Imagenet Dataset. + + Args: + split: name of the split, "train" or "validation". + batch_size: size of the minibatch. + image_size: size of the one side of the image. Output images will be + resized to square shape image_size*image_size. + is_training: if True then training preprocessing is done, otherwise eval + preprocessing is done.instance of tf.data.Dataset with the dataset. + + Raises: + ValueError: if name of the split is incorrect. + + Returns: + Instance of tf.data.Dataset with the dataset. + """ + if split.lower().startswith('train'): + filepath = os.path.join(FLAGS.tiny_imagenet_data_dir, 'train.tfrecord') + elif split.lower().startswith('validation'): + filepath = os.path.join(FLAGS.tiny_imagenet_data_dir, 'validation.tfrecord') + else: + raise ValueError('Invalid split: %s' % split) + + dataset = tf.data.TFRecordDataset(filepath, buffer_size=8*1024*1024) + + if is_training: + dataset = dataset.shuffle(10000) + dataset = dataset.repeat() + + dataset = dataset.apply( + tf.data.experimental.map_and_batch( + lambda value: tiny_imagenet_parser(value, image_size, is_training), + batch_size=batch_size, + num_parallel_batches=4, + drop_remainder=True)) + + def set_shapes(images, labels): + """Statically set the batch_size dimension.""" + images.set_shape(images.get_shape().merge_with( + tf.TensorShape([batch_size, None, None, None]))) + labels.set_shape(labels.get_shape().merge_with( + tf.TensorShape([batch_size]))) + return images, labels + + # Assign static batch size dimension + dataset = dataset.map(set_shapes) + + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + + return dataset + + +def num_examples_per_epoch(split): + """Returns the number of examples in the data set. + + Args: + split: name of the split, "train" or "validation". + + Raises: + ValueError: if split name is incorrect. + + Returns: + Number of example in the split. + """ + if split.lower().startswith('train'): + return 100000 + elif split.lower().startswith('validation'): + return 10000 + else: + raise ValueError('Invalid split: %s' % split) diff --git a/models/research/adversarial_logit_pairing/eval.py b/models/research/adversarial_logit_pairing/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..504cc0b0bcf52edff9e7aaa2c0d051079ba521aa --- /dev/null +++ b/models/research/adversarial_logit_pairing/eval.py @@ -0,0 +1,181 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Program which runs evaluation of Imagenet 64x64 and TinyImagenet models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags + +import tensorflow as tf + +import adversarial_attack +import model_lib +from datasets import dataset_factory + +FLAGS = flags.FLAGS + + +flags.DEFINE_string('train_dir', None, + 'Training directory. If specified then this program ' + 'runs in continuous evaluation mode.') + +flags.DEFINE_string('checkpoint_path', None, + 'Path to the file with checkpoint. If specified then ' + 'this program evaluates only provided checkpoint one time.') + +flags.DEFINE_string('output_file', None, + 'Name of output file. Used only in single evaluation mode.') + +flags.DEFINE_string('eval_name', 'default', 'Name for eval subdirectory.') + +flags.DEFINE_string('master', '', 'Tensorflow master.') + +flags.DEFINE_string('model_name', 'resnet_v2_50', 'Name of the model.') + +flags.DEFINE_string('adv_method', 'clean', + 'Method which is used to generate adversarial examples.') + +flags.DEFINE_string('dataset', 'imagenet', + 'Dataset: "tiny_imagenet" or "imagenet".') + +flags.DEFINE_integer('dataset_image_size', 64, + 'Size of the images in the dataset.') + +flags.DEFINE_string('hparams', '', 'Hyper parameters.') + +flags.DEFINE_string('split_name', 'validation', 'Name of the split.') + +flags.DEFINE_float('moving_average_decay', 0.9999, + 'The decay to use for the moving average.') + +flags.DEFINE_integer('eval_interval_secs', 120, + 'The frequency, in seconds, with which evaluation is run.') + +flags.DEFINE_integer( + 'num_examples', -1, + 'If positive - maximum number of example to use for evaluation.') + +flags.DEFINE_bool('eval_once', False, + 'If true then evaluate model only once.') + +flags.DEFINE_string('trainable_scopes', None, + 'If set then it defines list of variable scopes for ' + 'trainable variables.') + + +def main(_): + if not FLAGS.train_dir and not FLAGS.checkpoint_path: + print('Either --train_dir or --checkpoint_path flags has to be provided.') + if FLAGS.train_dir and FLAGS.checkpoint_path: + print('Only one of --train_dir or --checkpoint_path should be provided.') + params = model_lib.default_hparams() + params.parse(FLAGS.hparams) + tf.logging.info('User provided hparams: %s', FLAGS.hparams) + tf.logging.info('All hyper parameters: %s', params) + batch_size = params.eval_batch_size + graph = tf.Graph() + with graph.as_default(): + # dataset + dataset, num_examples, num_classes, bounds = dataset_factory.get_dataset( + FLAGS.dataset, + FLAGS.split_name, + batch_size, + FLAGS.dataset_image_size, + is_training=False) + dataset_iterator = dataset.make_one_shot_iterator() + images, labels = dataset_iterator.get_next() + if FLAGS.num_examples > 0: + num_examples = min(num_examples, FLAGS.num_examples) + + # setup model + global_step = tf.train.get_or_create_global_step() + model_fn_two_args = model_lib.get_model(FLAGS.model_name, num_classes) + model_fn = lambda x: model_fn_two_args(x, is_training=False) + if not FLAGS.adv_method or FLAGS.adv_method == 'clean': + logits = model_fn(images) + else: + adv_examples = adversarial_attack.generate_adversarial_examples( + images, bounds, model_fn, FLAGS.adv_method) + logits = model_fn(adv_examples) + + # update trainable variables if fine tuning is used + model_lib.filter_trainable_variables(FLAGS.trainable_scopes) + + # Setup the moving averages + if FLAGS.moving_average_decay and (FLAGS.moving_average_decay > 0): + variable_averages = tf.train.ExponentialMovingAverage( + FLAGS.moving_average_decay, global_step) + variables_to_restore = variable_averages.variables_to_restore( + tf.contrib.framework.get_model_variables()) + variables_to_restore[global_step.op.name] = global_step + else: + variables_to_restore = tf.contrib.framework.get_variables_to_restore() + + # Setup evaluation metric + with tf.name_scope('Eval'): + names_to_values, names_to_updates = ( + tf.contrib.metrics.aggregate_metric_map({ + 'Accuracy': tf.metrics.accuracy(labels, tf.argmax(logits, 1)), + 'Top5': tf.metrics.recall_at_k(tf.to_int64(labels), logits, 5) + })) + + for name, value in names_to_values.iteritems(): + tf.summary.scalar(name, value) + + # Run evaluation + num_batches = int(num_examples / batch_size) + if FLAGS.train_dir: + output_dir = os.path.join(FLAGS.train_dir, FLAGS.eval_name) + if not tf.gfile.Exists(output_dir): + tf.gfile.MakeDirs(output_dir) + tf.contrib.training.evaluate_repeatedly( + FLAGS.train_dir, + master=FLAGS.master, + scaffold=tf.train.Scaffold( + saver=tf.train.Saver(variables_to_restore)), + eval_ops=names_to_updates.values(), + eval_interval_secs=FLAGS.eval_interval_secs, + hooks=[ + tf.contrib.training.StopAfterNEvalsHook(num_batches), + tf.contrib.training.SummaryAtEndHook(output_dir), + tf.train.LoggingTensorHook(names_to_values, at_end=True), + ], + max_number_of_evaluations=1 if FLAGS.eval_once else None) + else: + result = tf.contrib.training.evaluate_once( + FLAGS.checkpoint_path, + master=FLAGS.master, + scaffold=tf.train.Scaffold( + saver=tf.train.Saver(variables_to_restore)), + eval_ops=names_to_updates.values(), + final_ops=names_to_values, + hooks=[ + tf.contrib.training.StopAfterNEvalsHook(num_batches), + tf.train.LoggingTensorHook(names_to_values, at_end=True), + ]) + if FLAGS.output_file: + with tf.gfile.Open(FLAGS.output_file, 'a') as f: + f.write('%s,%.3f,%.3f\n' + % (FLAGS.eval_name, result['Accuracy'], result['Top5'])) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/adversarial_logit_pairing/model_lib.py b/models/research/adversarial_logit_pairing/model_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..1499a378ea1ba6511122ebe54ceed1226d38d649 --- /dev/null +++ b/models/research/adversarial_logit_pairing/model_lib.py @@ -0,0 +1,189 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Library with common functions for training and eval.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + +import tensorflow as tf + +from tensorflow.contrib.slim.nets import resnet_v2 + + +def default_hparams(): + """Returns default hyperparameters.""" + return tf.contrib.training.HParams( + # Batch size for training and evaluation. + batch_size=32, + eval_batch_size=50, + + # General training parameters. + weight_decay=0.0001, + label_smoothing=0.1, + + # Parameters of the adversarial training. + train_adv_method='clean', # adversarial training method + train_lp_weight=0.0, # Weight of adversarial logit pairing loss + + # Parameters of the optimizer. + optimizer='rms', # possible values are: 'rms', 'momentum', 'adam' + momentum=0.9, # momentum + rmsprop_decay=0.9, # Decay term for RMSProp + rmsprop_epsilon=1.0, # Epsilon term for RMSProp + + # Parameters of learning rate schedule. + lr_schedule='exp_decay', # Possible values: 'exp_decay', 'step', 'fixed' + learning_rate=0.045, + lr_decay_factor=0.94, # Learning exponential decay + lr_num_epochs_per_decay=2.0, # Number of epochs per lr decay + lr_list=[1.0 / 6, 2.0 / 6, 3.0 / 6, + 4.0 / 6, 5.0 / 6, 1.0, 0.1, 0.01, + 0.001, 0.0001], + lr_decay_epochs=[1, 2, 3, 4, 5, 30, 60, 80, + 90]) + + +def get_lr_schedule(hparams, examples_per_epoch, replicas_to_aggregate=1): + """Returns TensorFlow op which compute learning rate. + + Args: + hparams: hyper parameters. + examples_per_epoch: number of training examples per epoch. + replicas_to_aggregate: number of training replicas running in parallel. + + Raises: + ValueError: if learning rate schedule specified in hparams is incorrect. + + Returns: + learning_rate: tensor with learning rate. + steps_per_epoch: number of training steps per epoch. + """ + global_step = tf.train.get_or_create_global_step() + steps_per_epoch = float(examples_per_epoch) / float(hparams.batch_size) + if replicas_to_aggregate > 0: + steps_per_epoch /= replicas_to_aggregate + + if hparams.lr_schedule == 'exp_decay': + decay_steps = long(steps_per_epoch * hparams.lr_num_epochs_per_decay) + learning_rate = tf.train.exponential_decay( + hparams.learning_rate, + global_step, + decay_steps, + hparams.lr_decay_factor, + staircase=True) + elif hparams.lr_schedule == 'step': + lr_decay_steps = [long(epoch * steps_per_epoch) + for epoch in hparams.lr_decay_epochs] + learning_rate = tf.train.piecewise_constant( + global_step, lr_decay_steps, hparams.lr_list) + elif hparams.lr_schedule == 'fixed': + learning_rate = hparams.learning_rate + else: + raise ValueError('Invalid value of lr_schedule: %s' % hparams.lr_schedule) + + if replicas_to_aggregate > 0: + learning_rate *= replicas_to_aggregate + + return learning_rate, steps_per_epoch + + +def get_optimizer(hparams, learning_rate): + """Returns optimizer. + + Args: + hparams: hyper parameters. + learning_rate: learning rate tensor. + + Raises: + ValueError: if type of optimizer specified in hparams is incorrect. + + Returns: + Instance of optimizer class. + """ + if hparams.optimizer == 'rms': + optimizer = tf.train.RMSPropOptimizer(learning_rate, + hparams.rmsprop_decay, + hparams.momentum, + hparams.rmsprop_epsilon) + elif hparams.optimizer == 'momentum': + optimizer = tf.train.MomentumOptimizer(learning_rate, + hparams.momentum) + elif hparams.optimizer == 'adam': + optimizer = tf.train.AdamOptimizer(learning_rate) + else: + raise ValueError('Invalid value of optimizer: %s' % hparams.optimizer) + return optimizer + + +RESNET_MODELS = {'resnet_v2_50': resnet_v2.resnet_v2_50} + + +def get_model(model_name, num_classes): + """Returns function which creates model. + + Args: + model_name: Name of the model. + num_classes: Number of classes. + + Raises: + ValueError: If model_name is invalid. + + Returns: + Function, which creates model when called. + """ + if model_name.startswith('resnet'): + def resnet_model(images, is_training, reuse=tf.AUTO_REUSE): + with tf.contrib.framework.arg_scope(resnet_v2.resnet_arg_scope()): + resnet_fn = RESNET_MODELS[model_name] + logits, _ = resnet_fn(images, num_classes, is_training=is_training, + reuse=reuse) + logits = tf.reshape(logits, [-1, num_classes]) + return logits + return resnet_model + else: + raise ValueError('Invalid model: %s' % model_name) + + +def filter_trainable_variables(trainable_scopes): + """Keep only trainable variables which are prefixed with given scopes. + + Args: + trainable_scopes: either list of trainable scopes or string with comma + separated list of trainable scopes. + + This function removes all variables which are not prefixed with given + trainable_scopes from collection of trainable variables. + Useful during network fine tuning, when you only need to train subset of + variables. + """ + if not trainable_scopes: + return + if isinstance(trainable_scopes, six.string_types): + trainable_scopes = [scope.strip() for scope in trainable_scopes.split(',')] + trainable_scopes = {scope for scope in trainable_scopes if scope} + if not trainable_scopes: + return + trainable_collection = tf.get_collection_ref( + tf.GraphKeys.TRAINABLE_VARIABLES) + non_trainable_vars = [ + v for v in trainable_collection + if not any([v.op.name.startswith(s) for s in trainable_scopes]) + ] + for v in non_trainable_vars: + trainable_collection.remove(v) diff --git a/models/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py b/models/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..4fdccc32071f8c677bb1395e324c6b94aa7e85af --- /dev/null +++ b/models/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py @@ -0,0 +1,241 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts Tiny Imagenet dataset into TFRecord format. + +As an output this program generates following files in TFRecord format: +- train.tfrecord +- validation.tfrecord +- test.tfrecord + +Generated train and validation files will contain tf.Example entries with +following features: +- image/encoded - encoded image +- image/format - image format +- label/wnid - label WordNet ID +- label/imagenet - imagenet label [1 ... 1000] +- label/tiny_imagenet - tiny imagenet label [0 ... 199] +- bbox/xmin +- bbox/ymin +- bbox/xmax +- bbox/ymax + +Test file will contain entries with 'image/encoded' and 'image/format' features. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +import os +import random + +from absl import app +from absl import flags +from absl import logging + +import pandas as pd + +import tensorflow as tf + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('input_dir', '', 'Input directory') +flags.DEFINE_string('output_dir', '', 'Output directory') + +flags.DEFINE_string('imagenet_synsets_path', '', + 'Optional path to /imagenet_lsvrc_2015_synsets.txt') + + +ImageMetadata = namedtuple('ImageMetadata', ['label', 'x1', 'y1', 'x2', 'y2']) + + +class WnIdToNodeIdConverter(object): + """Converts WordNet IDs to numerical labels.""" + + def __init__(self, wnids_path, background_class): + self._wnid_to_node_id = {} + self._node_id_to_wnid = {} + with tf.gfile.Open(wnids_path) as f: + wnids_sequence = [wnid.strip() for wnid in f.readlines() if wnid.strip()] + node_id_offset = 1 if background_class else 0 + for i, label in enumerate(wnids_sequence): + self._wnid_to_node_id[label] = i + node_id_offset + self._node_id_to_wnid[i + node_id_offset] = label + + def to_node_id(self, wnid): + return self._wnid_to_node_id[wnid] + + def to_wnid(self, node_id): + return self._node_id_to_wnid[node_id] + + def all_wnids(self): + return self._wnid_to_node_id.keys() + + +def read_tiny_imagenet_annotations(annotations_filename, + images_dir, + one_label=None): + """Reads one file with Tiny Imagenet annotations.""" + result = [] + if one_label: + column_names = ['filename', 'x1', 'y1', 'x2', 'y2'] + else: + column_names = ['filename', 'label', 'x1', 'y1', 'x2', 'y2'] + with tf.gfile.Open(annotations_filename) as f: + data = pd.read_csv(f, sep='\t', names=column_names) + for row in data.itertuples(): + label = one_label if one_label else getattr(row, 'label') + full_filename = os.path.join(images_dir, getattr(row, 'filename')) + result.append((full_filename, + ImageMetadata(label=label, + x1=getattr(row, 'x1'), + y1=getattr(row, 'y1'), + x2=getattr(row, 'x2'), + y2=getattr(row, 'y2')))) + return result + + +def read_validation_annotations(validation_dir): + """Reads validation data annotations.""" + return read_tiny_imagenet_annotations( + os.path.join(validation_dir, 'val_annotations.txt'), + os.path.join(validation_dir, 'images')) + + +def read_training_annotations(training_dir): + """Reads training data annotations.""" + result = [] + sub_dirs = tf.gfile.ListDirectory(training_dir) + for sub_dir in sub_dirs: + if not sub_dir.startswith('n'): + logging.warning('Found non-class directory in training dir: %s', sub_dir) + continue + sub_dir_results = read_tiny_imagenet_annotations( + os.path.join(training_dir, sub_dir, sub_dir + '_boxes.txt'), + os.path.join(training_dir, sub_dir, 'images'), + one_label=sub_dir) + result.extend(sub_dir_results) + return result + + +def read_test_annotations(test_dir): + """Reads test data annotations.""" + files = tf.gfile.ListDirectory(os.path.join(test_dir, 'images')) + return [(os.path.join(test_dir, 'images', f), None) + for f in files if f.endswith('.JPEG')] + + +def get_image_format(filename): + """Returns image format from filename.""" + filename = filename.lower() + if filename.endswith('jpeg') or filename.endswith('jpg'): + return 'jpeg' + elif filename.endswith('png'): + return 'png' + else: + raise ValueError('Unrecognized file format: %s' % filename) + + +class TinyImagenetWriter(object): + """Helper class which writes Tiny Imagenet dataset into TFRecord file.""" + + def __init__(self, tiny_imagenet_wnid_conveter, imagenet_wnid_converter): + self.tiny_imagenet_wnid_conveter = tiny_imagenet_wnid_conveter + self.imagenet_wnid_converter = imagenet_wnid_converter + + def write_tf_record(self, + annotations, + output_file): + """Generates TFRecord file from given list of annotations.""" + with tf.python_io.TFRecordWriter(output_file) as writer: + for image_filename, image_metadata in annotations: + with tf.gfile.Open(image_filename) as f: + image_buffer = f.read() + image_format = get_image_format(image_filename) + features = { + 'image/encoded': tf.train.Feature( + bytes_list=tf.train.BytesList(value=[image_buffer])), + 'image/format': tf.train.Feature( + bytes_list=tf.train.BytesList(value=[image_format])) + } + if image_metadata: + # bounding box features + features['bbox/xmin'] = tf.train.Feature( + int64_list=tf.train.Int64List(value=[image_metadata.x1])) + features['bbox/ymin'] = tf.train.Feature( + int64_list=tf.train.Int64List(value=[image_metadata.y1])) + features['bbox/xmax'] = tf.train.Feature( + int64_list=tf.train.Int64List(value=[image_metadata.x2])) + features['bbox/ymax'] = tf.train.Feature( + int64_list=tf.train.Int64List(value=[image_metadata.y2])) + # tiny imagenet label, from [0, 200) iterval + tiny_imagenet_label = self.tiny_imagenet_wnid_conveter.to_node_id( + image_metadata.label) + features['label/wnid'] = tf.train.Feature( + bytes_list=tf.train.BytesList(value=image_metadata.label)) + features['label/tiny_imagenet'] = tf.train.Feature( + int64_list=tf.train.Int64List(value=[tiny_imagenet_label])) + # full imagenet label, from [1, 1001) interval + if self.imagenet_wnid_converter: + imagenet_label = self.imagenet_wnid_converter.to_node_id( + image_metadata.label) + features['label/imagenet'] = tf.train.Feature( + int64_list=tf.train.Int64List(value=[imagenet_label])) + example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(example.SerializeToString()) + + +def main(_): + assert FLAGS.input_dir, 'Input directory must be provided' + assert FLAGS.output_dir, 'Output directory must be provided' + + # Create WordNet ID conveters for tiny imagenet and possibly for imagenet + tiny_imagenet_wnid_conveter = WnIdToNodeIdConverter( + os.path.join(FLAGS.input_dir, 'wnids.txt'), + background_class=False) + if FLAGS.imagenet_synsets_path: + imagenet_wnid_converter = WnIdToNodeIdConverter(FLAGS.imagenet_synsets_path, + background_class=True) + else: + imagenet_wnid_converter = None + + # read tiny imagenet annotations + train_annotations = read_training_annotations( + os.path.join(FLAGS.input_dir, 'train')) + random.shuffle(train_annotations) + val_annotations = read_validation_annotations( + os.path.join(FLAGS.input_dir, 'val')) + test_filenames = read_test_annotations(os.path.join(FLAGS.input_dir, 'test')) + + # Generate TFRecord files + writer = TinyImagenetWriter(tiny_imagenet_wnid_conveter, + imagenet_wnid_converter) + tf.logging.info('Converting %d training images', len(train_annotations)) + writer.write_tf_record(train_annotations, + os.path.join(FLAGS.output_dir, 'train.tfrecord')) + tf.logging.info('Converting %d validation images ', len(val_annotations)) + writer.write_tf_record(val_annotations, + os.path.join(FLAGS.output_dir, 'validation.tfrecord')) + tf.logging.info('Converting %d test images', len(test_filenames)) + writer.write_tf_record(test_filenames, + os.path.join(FLAGS.output_dir, 'test.tfrecord')) + tf.logging.info('All files are converted') + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/adversarial_logit_pairing/train.py b/models/research/adversarial_logit_pairing/train.py new file mode 100644 index 0000000000000000000000000000000000000000..dd20969f8d09c59f7d294ee34a9e41bd44f86b39 --- /dev/null +++ b/models/research/adversarial_logit_pairing/train.py @@ -0,0 +1,288 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Program which train models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags + +import tensorflow as tf + +import adversarial_attack +import model_lib +from datasets import dataset_factory + +FLAGS = flags.FLAGS + + +flags.DEFINE_integer('max_steps', -1, 'Number of steps to stop at.') + +flags.DEFINE_string('output_dir', None, + 'Training directory where checkpoints will be saved.') + +flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter servers.') + +flags.DEFINE_integer('task', 0, 'Task ID for running distributed training.') + +flags.DEFINE_string('master', '', 'Tensorflow master.') + +flags.DEFINE_string('model_name', 'resnet_v2_50', 'Name of the model.') + +flags.DEFINE_string('dataset', 'imagenet', + 'Dataset: "tiny_imagenet" or "imagenet".') + +flags.DEFINE_integer('dataset_image_size', 64, + 'Size of the images in the dataset.') + +flags.DEFINE_integer('num_summary_images', 3, + 'Number of images to display in Tensorboard.') + +flags.DEFINE_integer( + 'save_summaries_steps', 100, + 'The frequency with which summaries are saved, in steps.') + +flags.DEFINE_integer( + 'save_summaries_secs', None, + 'The frequency with which summaries are saved, in seconds.') + +flags.DEFINE_integer( + 'save_model_steps', 500, + 'The frequency with which the model is saved, in steps.') + +flags.DEFINE_string('hparams', '', 'Hyper parameters.') + +flags.DEFINE_integer('replicas_to_aggregate', 1, + 'Number of gradients to collect before param updates.') + +flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.') + +flags.DEFINE_float('moving_average_decay', 0.9999, + 'The decay to use for the moving average.') + +# Flags to control fine tuning + +flags.DEFINE_string('finetune_checkpoint_path', None, + 'Path to checkpoint for fine tuning. ' + 'If None then no fine tuning is done.') + +flags.DEFINE_string('finetune_exclude_pretrained_scopes', '', + 'Variable scopes to exclude when loading checkpoint for ' + 'fine tuning.') + +flags.DEFINE_string('finetune_trainable_scopes', None, + 'If set then it defines list of variable scopes for ' + 'trainable variables.') + + +def _get_finetuning_init_fn(variable_averages): + """Returns an init functions, used for fine tuning.""" + if not FLAGS.finetune_checkpoint_path: + return None + + if tf.train.latest_checkpoint(FLAGS.output_dir): + return None + + if tf.gfile.IsDirectory(FLAGS.finetune_checkpoint_path): + checkpoint_path = tf.train.latest_checkpoint(FLAGS.finetune_checkpoint_path) + else: + checkpoint_path = FLAGS.finetune_checkpoint_path + + if not checkpoint_path: + tf.logging.warning('Not doing fine tuning, can not find checkpoint in %s', + FLAGS.finetune_checkpoint_path) + return None + + tf.logging.info('Fine-tuning from %s', checkpoint_path) + + if FLAGS.finetune_exclude_pretrained_scopes: + exclusions = { + scope.strip() + for scope in FLAGS.finetune_exclude_pretrained_scopes.split(',') + } + else: + exclusions = set() + + filtered_model_variables = [ + v for v in tf.contrib.framework.get_model_variables() + if not any([v.op.name.startswith(e) for e in exclusions]) + ] + + if variable_averages: + variables_to_restore = {} + for v in filtered_model_variables: + # variables_to_restore[variable_averages.average_name(v)] = v + if v in tf.trainable_variables(): + variables_to_restore[variable_averages.average_name(v)] = v + else: + variables_to_restore[v.op.name] = v + else: + variables_to_restore = {v.op.name: v for v in filtered_model_variables} + + assign_fn = tf.contrib.framework.assign_from_checkpoint_fn( + checkpoint_path, + variables_to_restore) + if assign_fn: + return lambda _, sess: assign_fn(sess) + else: + return None + + +def main(_): + assert FLAGS.output_dir, '--output_dir has to be provided' + if not tf.gfile.Exists(FLAGS.output_dir): + tf.gfile.MakeDirs(FLAGS.output_dir) + params = model_lib.default_hparams() + params.parse(FLAGS.hparams) + tf.logging.info('User provided hparams: %s', FLAGS.hparams) + tf.logging.info('All hyper parameters: %s', params) + batch_size = params.batch_size + graph = tf.Graph() + with graph.as_default(): + with tf.device(tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks)): + # dataset + dataset, examples_per_epoch, num_classes, bounds = ( + dataset_factory.get_dataset( + FLAGS.dataset, + 'train', + batch_size, + FLAGS.dataset_image_size, + is_training=True)) + dataset_iterator = dataset.make_one_shot_iterator() + images, labels = dataset_iterator.get_next() + one_hot_labels = tf.one_hot(labels, num_classes) + + # set up model + global_step = tf.train.get_or_create_global_step() + model_fn = model_lib.get_model(FLAGS.model_name, num_classes) + if params.train_adv_method == 'clean': + logits = model_fn(images, is_training=True) + adv_examples = None + else: + model_fn_eval_mode = lambda x: model_fn(x, is_training=False) + adv_examples = adversarial_attack.generate_adversarial_examples( + images, bounds, model_fn_eval_mode, params.train_adv_method) + all_examples = tf.concat([images, adv_examples], axis=0) + logits = model_fn(all_examples, is_training=True) + one_hot_labels = tf.concat([one_hot_labels, one_hot_labels], axis=0) + + # update trainable variables if fine tuning is used + model_lib.filter_trainable_variables( + FLAGS.finetune_trainable_scopes) + + # set up losses + total_loss = tf.losses.softmax_cross_entropy( + onehot_labels=one_hot_labels, + logits=logits, + label_smoothing=params.label_smoothing) + tf.summary.scalar('loss_xent', total_loss) + + if params.train_lp_weight > 0: + images1, images2 = tf.split(logits, 2) + loss_lp = tf.losses.mean_squared_error( + images1, images2, weights=params.train_lp_weight) + tf.summary.scalar('loss_lp', loss_lp) + total_loss += loss_lp + + if params.weight_decay > 0: + loss_wd = ( + params.weight_decay + * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) + ) + tf.summary.scalar('loss_wd', loss_wd) + total_loss += loss_wd + + # Setup the moving averages: + if FLAGS.moving_average_decay and (FLAGS.moving_average_decay > 0): + with tf.name_scope('moving_average'): + moving_average_variables = tf.contrib.framework.get_model_variables() + variable_averages = tf.train.ExponentialMovingAverage( + FLAGS.moving_average_decay, global_step) + else: + moving_average_variables = None + variable_averages = None + + # set up optimizer and training op + learning_rate, steps_per_epoch = model_lib.get_lr_schedule( + params, examples_per_epoch, FLAGS.replicas_to_aggregate) + + optimizer = model_lib.get_optimizer(params, learning_rate) + + optimizer = tf.train.SyncReplicasOptimizer( + opt=optimizer, + replicas_to_aggregate=FLAGS.replicas_to_aggregate, + total_num_replicas=FLAGS.worker_replicas, + variable_averages=variable_averages, + variables_to_average=moving_average_variables) + + train_op = tf.contrib.training.create_train_op( + total_loss, optimizer, + update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) + + tf.summary.image('images', images[0:FLAGS.num_summary_images]) + if adv_examples is not None: + tf.summary.image('adv_images', adv_examples[0:FLAGS.num_summary_images]) + tf.summary.scalar('total_loss', total_loss) + tf.summary.scalar('learning_rate', learning_rate) + tf.summary.scalar('current_epoch', + tf.to_double(global_step) / steps_per_epoch) + + # Training + is_chief = FLAGS.task == 0 + + scaffold = tf.train.Scaffold( + init_fn=_get_finetuning_init_fn(variable_averages)) + hooks = [ + tf.train.LoggingTensorHook({'total_loss': total_loss, + 'global_step': global_step}, + every_n_iter=1), + tf.train.NanTensorHook(total_loss), + ] + chief_only_hooks = [ + tf.train.SummarySaverHook(save_steps=FLAGS.save_summaries_steps, + save_secs=FLAGS.save_summaries_secs, + output_dir=FLAGS.output_dir, + scaffold=scaffold), + tf.train.CheckpointSaverHook(FLAGS.output_dir, + save_steps=FLAGS.save_model_steps, + scaffold=scaffold), + ] + + if FLAGS.max_steps > 0: + hooks.append( + tf.train.StopAtStepHook(last_step=FLAGS.max_steps)) + + # hook for sync replica training + hooks.append(optimizer.make_session_run_hook(is_chief)) + + with tf.train.MonitoredTrainingSession( + master=FLAGS.master, + is_chief=is_chief, + checkpoint_dir=FLAGS.output_dir, + scaffold=scaffold, + hooks=hooks, + chief_only_hooks=chief_only_hooks, + save_checkpoint_secs=None, + save_summaries_steps=None, + save_summaries_secs=None) as session: + while not session.should_stop(): + session.run([train_op]) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/adversarial_text/README.md b/models/research/adversarial_text/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36b5d657615882908719c32ecdabcd75c2668382 --- /dev/null +++ b/models/research/adversarial_text/README.md @@ -0,0 +1,160 @@ +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Adversarial Text Classification + +Code for [*Adversarial Training Methods for Semi-Supervised Text Classification*](https://arxiv.org/abs/1605.07725) and [*Semi-Supervised Sequence Learning*](https://arxiv.org/abs/1511.01432). + +## Requirements + +* TensorFlow >= v1.3 + +## End-to-end IMDB Sentiment Classification + +### Fetch data + +```bash +$ wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz \ + -O /tmp/imdb.tar.gz +$ tar -xf /tmp/imdb.tar.gz -C /tmp +``` + +The directory `/tmp/aclImdb` contains the raw IMDB data. + +### Generate vocabulary + +```bash +$ IMDB_DATA_DIR=/tmp/imdb +$ python gen_vocab.py \ + --output_dir=$IMDB_DATA_DIR \ + --dataset=imdb \ + --imdb_input_dir=/tmp/aclImdb \ + --lowercase=False +``` + +Vocabulary and frequency files will be generated in `$IMDB_DATA_DIR`. + +###  Generate training, validation, and test data + +```bash +$ python gen_data.py \ + --output_dir=$IMDB_DATA_DIR \ + --dataset=imdb \ + --imdb_input_dir=/tmp/aclImdb \ + --lowercase=False \ + --label_gain=False +``` + +`$IMDB_DATA_DIR` contains TFRecords files. + +### Pretrain IMDB Language Model + +```bash +$ PRETRAIN_DIR=/tmp/models/imdb_pretrain +$ python pretrain.py \ + --train_dir=$PRETRAIN_DIR \ + --data_dir=$IMDB_DATA_DIR \ + --vocab_size=86934 \ + --embedding_dims=256 \ + --rnn_cell_size=1024 \ + --num_candidate_samples=1024 \ + --batch_size=256 \ + --learning_rate=0.001 \ + --learning_rate_decay_factor=0.9999 \ + --max_steps=100000 \ + --max_grad_norm=1.0 \ + --num_timesteps=400 \ + --keep_prob_emb=0.5 \ + --normalize_embeddings +``` + +`$PRETRAIN_DIR` contains checkpoints of the pretrained language model. + +### Train classifier + +Most flags stay the same, save for the removal of candidate sampling and the +addition of `pretrained_model_dir`, from which the classifier will load the +pretrained embedding and LSTM variables, and flags related to adversarial +training and classification. + +```bash +$ TRAIN_DIR=/tmp/models/imdb_classify +$ python train_classifier.py \ + --train_dir=$TRAIN_DIR \ + --pretrained_model_dir=$PRETRAIN_DIR \ + --data_dir=$IMDB_DATA_DIR \ + --vocab_size=86934 \ + --embedding_dims=256 \ + --rnn_cell_size=1024 \ + --cl_num_layers=1 \ + --cl_hidden_size=30 \ + --batch_size=64 \ + --learning_rate=0.0005 \ + --learning_rate_decay_factor=0.9998 \ + --max_steps=15000 \ + --max_grad_norm=1.0 \ + --num_timesteps=400 \ + --keep_prob_emb=0.5 \ + --normalize_embeddings \ + --adv_training_method=vat \ + --perturb_norm_length=5.0 +``` + +### Evaluate on test data + +```bash +$ EVAL_DIR=/tmp/models/imdb_eval +$ python evaluate.py \ + --eval_dir=$EVAL_DIR \ + --checkpoint_dir=$TRAIN_DIR \ + --eval_data=test \ + --run_once \ + --num_examples=25000 \ + --data_dir=$IMDB_DATA_DIR \ + --vocab_size=86934 \ + --embedding_dims=256 \ + --rnn_cell_size=1024 \ + --batch_size=256 \ + --num_timesteps=400 \ + --normalize_embeddings +``` + +## Code Overview + +The main entry points are the binaries listed below. Each training binary builds +a `VatxtModel`, defined in `graphs.py`, which in turn uses graph building blocks +defined in `inputs.py` (defines input data reading and parsing), `layers.py` +(defines core model components), and `adversarial_losses.py` (defines +adversarial training losses). The training loop itself is defined in +`train_utils.py`. + +### Binaries + +* Pretraining: `pretrain.py` +* Classifier Training: `train_classifier.py` +* Evaluation: `evaluate.py` + +### Command-Line Flags + +Flags related to distributed training and the training loop itself are defined +in [`train_utils.py`](https://github.com/tensorflow/models/tree/master/research/adversarial_text/train_utils.py). + +Flags related to model hyperparameters are defined in [`graphs.py`](https://github.com/tensorflow/models/tree/master/research/adversarial_text/graphs.py). + +Flags related to adversarial training are defined in [`adversarial_losses.py`](https://github.com/tensorflow/models/tree/master/research/adversarial_text/adversarial_losses.py). + +Flags particular to each job are defined in the main binary files. + +### Data Generation + +* Vocabulary generation: [`gen_vocab.py`](https://github.com/tensorflow/models/tree/master/research/adversarial_text/gen_vocab.py) +* Data generation: [`gen_data.py`](https://github.com/tensorflow/models/tree/master/research/adversarial_text/gen_data.py) + +Command-line flags defined in [`document_generators.py`](https://github.com/tensorflow/models/tree/master/research/adversarial_text/data/document_generators.py) +control which dataset is processed and how. + +## Contact for Issues + +* Ryan Sepassi, @rsepassi +* Andrew M. Dai, @a-dai +* Takeru Miyato, @takerum (Original implementation) diff --git a/models/research/adversarial_text/__init__.py b/models/research/adversarial_text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/adversarial_text/adversarial_losses.py b/models/research/adversarial_text/adversarial_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..671315e8a99c6e68679daa592514f29bc67bbc80 --- /dev/null +++ b/models/research/adversarial_text/adversarial_losses.py @@ -0,0 +1,236 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Adversarial losses for text models.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +from six.moves import xrange +import tensorflow as tf + +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Adversarial and virtual adversarial training parameters. +flags.DEFINE_float('perturb_norm_length', 5.0, + 'Norm length of adversarial perturbation to be ' + 'optimized with validation. ' + '5.0 is optimal on IMDB with virtual adversarial training. ') + +# Virtual adversarial training parameters +flags.DEFINE_integer('num_power_iteration', 1, 'The number of power iteration') +flags.DEFINE_float('small_constant_for_finite_diff', 1e-1, + 'Small constant for finite difference method') + +# Parameters for building the graph +flags.DEFINE_string('adv_training_method', None, + 'The flag which specifies training method. ' + '"" : non-adversarial training (e.g. for running the ' + ' semi-supervised sequence learning model) ' + '"rp" : random perturbation training ' + '"at" : adversarial training ' + '"vat" : virtual adversarial training ' + '"atvat" : at + vat ') +flags.DEFINE_float('adv_reg_coeff', 1.0, + 'Regularization coefficient of adversarial loss.') + + +def random_perturbation_loss(embedded, length, loss_fn): + """Adds noise to embeddings and recomputes classification loss.""" + noise = tf.random_normal(shape=tf.shape(embedded)) + perturb = _scale_l2(_mask_by_length(noise, length), FLAGS.perturb_norm_length) + return loss_fn(embedded + perturb) + + +def adversarial_loss(embedded, loss, loss_fn): + """Adds gradient to embedding and recomputes classification loss.""" + grad, = tf.gradients( + loss, + embedded, + aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) + grad = tf.stop_gradient(grad) + perturb = _scale_l2(grad, FLAGS.perturb_norm_length) + return loss_fn(embedded + perturb) + + +def virtual_adversarial_loss(logits, embedded, inputs, + logits_from_embedding_fn): + """Virtual adversarial loss. + + Computes virtual adversarial perturbation by finite difference method and + power iteration, adds it to the embedding, and computes the KL divergence + between the new logits and the original logits. + + Args: + logits: 3-D float Tensor, [batch_size, num_timesteps, m], where m=1 if + num_classes=2, otherwise m=num_classes. + embedded: 3-D float Tensor, [batch_size, num_timesteps, embedding_dim]. + inputs: VatxtInput. + logits_from_embedding_fn: callable that takes embeddings and returns + classifier logits. + + Returns: + kl: float scalar. + """ + # Stop gradient of logits. See https://arxiv.org/abs/1507.00677 for details. + logits = tf.stop_gradient(logits) + + # Only care about the KL divergence on the final timestep. + weights = inputs.eos_weights + assert weights is not None + if FLAGS.single_label: + indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1) + weights = tf.expand_dims(tf.gather_nd(inputs.eos_weights, indices), 1) + + # Initialize perturbation with random noise. + # shape(embedded) = (batch_size, num_timesteps, embedding_dim) + d = tf.random_normal(shape=tf.shape(embedded)) + + # Perform finite difference method and power iteration. + # See Eq.(8) in the paper http://arxiv.org/pdf/1507.00677.pdf, + # Adding small noise to input and taking gradient with respect to the noise + # corresponds to 1 power iteration. + for _ in xrange(FLAGS.num_power_iteration): + d = _scale_l2( + _mask_by_length(d, inputs.length), FLAGS.small_constant_for_finite_diff) + + d_logits = logits_from_embedding_fn(embedded + d) + kl = _kl_divergence_with_logits(logits, d_logits, weights) + d, = tf.gradients( + kl, + d, + aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) + d = tf.stop_gradient(d) + + perturb = _scale_l2(d, FLAGS.perturb_norm_length) + vadv_logits = logits_from_embedding_fn(embedded + perturb) + return _kl_divergence_with_logits(logits, vadv_logits, weights) + + +def random_perturbation_loss_bidir(embedded, length, loss_fn): + """Adds noise to embeddings and recomputes classification loss.""" + noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded] + masked = [_mask_by_length(n, length) for n in noise] + scaled = [_scale_l2(m, FLAGS.perturb_norm_length) for m in masked] + return loss_fn([e + s for (e, s) in zip(embedded, scaled)]) + + +def adversarial_loss_bidir(embedded, loss, loss_fn): + """Adds gradient to embeddings and recomputes classification loss.""" + grads = tf.gradients( + loss, + embedded, + aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) + adv_exs = [ + emb + _scale_l2(tf.stop_gradient(g), FLAGS.perturb_norm_length) + for emb, g in zip(embedded, grads) + ] + return loss_fn(adv_exs) + + +def virtual_adversarial_loss_bidir(logits, embedded, inputs, + logits_from_embedding_fn): + """Virtual adversarial loss for bidirectional models.""" + logits = tf.stop_gradient(logits) + f_inputs, _ = inputs + weights = f_inputs.eos_weights + if FLAGS.single_label: + indices = tf.stack([tf.range(FLAGS.batch_size), f_inputs.length - 1], 1) + weights = tf.expand_dims(tf.gather_nd(f_inputs.eos_weights, indices), 1) + assert weights is not None + + perturbs = [ + _mask_by_length(tf.random_normal(shape=tf.shape(emb)), f_inputs.length) + for emb in embedded + ] + for _ in xrange(FLAGS.num_power_iteration): + perturbs = [ + _scale_l2(d, FLAGS.small_constant_for_finite_diff) for d in perturbs + ] + d_logits = logits_from_embedding_fn( + [emb + d for (emb, d) in zip(embedded, perturbs)]) + kl = _kl_divergence_with_logits(logits, d_logits, weights) + perturbs = tf.gradients( + kl, + perturbs, + aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) + perturbs = [tf.stop_gradient(d) for d in perturbs] + + perturbs = [_scale_l2(d, FLAGS.perturb_norm_length) for d in perturbs] + vadv_logits = logits_from_embedding_fn( + [emb + d for (emb, d) in zip(embedded, perturbs)]) + return _kl_divergence_with_logits(logits, vadv_logits, weights) + + +def _mask_by_length(t, length): + """Mask t, 3-D [batch, time, dim], by length, 1-D [batch,].""" + maxlen = t.get_shape().as_list()[1] + + # Subtract 1 from length to prevent the perturbation from going on 'eos' + mask = tf.sequence_mask(length - 1, maxlen=maxlen) + mask = tf.expand_dims(tf.cast(mask, tf.float32), -1) + # shape(mask) = (batch, num_timesteps, 1) + return t * mask + + +def _scale_l2(x, norm_length): + # shape(x) = (batch, num_timesteps, d) + # Divide x by max(abs(x)) for a numerically stable L2 norm. + # 2norm(x) = a * 2norm(x/a) + # Scale over the full sequence, dims (1, 2) + alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12 + l2_norm = alpha * tf.sqrt( + tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6) + x_unit = x / l2_norm + return norm_length * x_unit + + +def _kl_divergence_with_logits(q_logits, p_logits, weights): + """Returns weighted KL divergence between distributions q and p. + + Args: + q_logits: logits for 1st argument of KL divergence shape + [batch_size, num_timesteps, num_classes] if num_classes > 2, and + [batch_size, num_timesteps] if num_classes == 2. + p_logits: logits for 2nd argument of KL divergence with same shape q_logits. + weights: 1-D float tensor with shape [batch_size, num_timesteps]. + Elements should be 1.0 only on end of sequences + + Returns: + KL: float scalar. + """ + # For logistic regression + if FLAGS.num_classes == 2: + q = tf.nn.sigmoid(q_logits) + kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) + + tf.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q)) + kl = tf.squeeze(kl, 2) + + # For softmax regression + else: + q = tf.nn.softmax(q_logits) + kl = tf.reduce_sum( + q * (tf.nn.log_softmax(q_logits) - tf.nn.log_softmax(p_logits)), -1) + + num_labels = tf.reduce_sum(weights) + num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels) + + kl.get_shape().assert_has_rank(2) + weights.get_shape().assert_has_rank(2) + + loss = tf.identity(tf.reduce_sum(weights * kl) / num_labels, name='kl') + return loss diff --git a/models/research/adversarial_text/data/__init__.py b/models/research/adversarial_text/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/adversarial_text/data/data_utils.py b/models/research/adversarial_text/data/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..55d9e3a0922d4ff54a4c6deb38b62be9bfa76c2e --- /dev/null +++ b/models/research/adversarial_text/data/data_utils.py @@ -0,0 +1,332 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for generating/preprocessing data for adversarial text models.""" + +import operator +import os +import random +import re + +# Dependency imports + +import tensorflow as tf + +EOS_TOKEN = '
' + +# Data filenames +# Sequence Autoencoder +ALL_SA = 'all_sa.tfrecords' +TRAIN_SA = 'train_sa.tfrecords' +TEST_SA = 'test_sa.tfrecords' +# Language Model +ALL_LM = 'all_lm.tfrecords' +TRAIN_LM = 'train_lm.tfrecords' +TEST_LM = 'test_lm.tfrecords' +# Classification +TRAIN_CLASS = 'train_classification.tfrecords' +TEST_CLASS = 'test_classification.tfrecords' +VALID_CLASS = 'validate_classification.tfrecords' +# LM with bidirectional LSTM +TRAIN_REV_LM = 'train_reverse_lm.tfrecords' +TEST_REV_LM = 'test_reverse_lm.tfrecords' +# Classification with bidirectional LSTM +TRAIN_BD_CLASS = 'train_bidir_classification.tfrecords' +TEST_BD_CLASS = 'test_bidir_classification.tfrecords' +VALID_BD_CLASS = 'validate_bidir_classification.tfrecords' + + +class ShufflingTFRecordWriter(object): + """Thin wrapper around TFRecordWriter that shuffles records.""" + + def __init__(self, path): + self._path = path + self._records = [] + self._closed = False + + def write(self, record): + assert not self._closed + self._records.append(record) + + def close(self): + assert not self._closed + random.shuffle(self._records) + with tf.python_io.TFRecordWriter(self._path) as f: + for record in self._records: + f.write(record) + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, unused_type, unused_value, unused_traceback): + self.close() + + +class Timestep(object): + """Represents a single timestep in a SequenceWrapper.""" + + def __init__(self, token, label, weight, multivalent_tokens=False): + """Constructs Timestep from empty Features.""" + self._token = token + self._label = label + self._weight = weight + self._multivalent_tokens = multivalent_tokens + self._fill_with_defaults() + + @property + def token(self): + if self._multivalent_tokens: + raise TypeError('Timestep may contain multiple values; use `tokens`') + return self._token.int64_list.value[0] + + @property + def tokens(self): + return self._token.int64_list.value + + @property + def label(self): + return self._label.int64_list.value[0] + + @property + def weight(self): + return self._weight.float_list.value[0] + + def set_token(self, token): + if self._multivalent_tokens: + raise TypeError('Timestep may contain multiple values; use `add_token`') + self._token.int64_list.value[0] = token + return self + + def add_token(self, token): + self._token.int64_list.value.append(token) + return self + + def set_label(self, label): + self._label.int64_list.value[0] = label + return self + + def set_weight(self, weight): + self._weight.float_list.value[0] = weight + return self + + def copy_from(self, timestep): + self.set_token(timestep.token).set_label(timestep.label).set_weight( + timestep.weight) + return self + + def _fill_with_defaults(self): + if not self._multivalent_tokens: + self._token.int64_list.value.append(0) + self._label.int64_list.value.append(0) + self._weight.float_list.value.append(0.0) + + +class SequenceWrapper(object): + """Wrapper around tf.SequenceExample.""" + + F_TOKEN_ID = 'token_id' + F_LABEL = 'label' + F_WEIGHT = 'weight' + + def __init__(self, multivalent_tokens=False): + self._seq = tf.train.SequenceExample() + self._flist = self._seq.feature_lists.feature_list + self._timesteps = [] + self._multivalent_tokens = multivalent_tokens + + @property + def seq(self): + return self._seq + + @property + def multivalent_tokens(self): + return self._multivalent_tokens + + @property + def _tokens(self): + return self._flist[SequenceWrapper.F_TOKEN_ID].feature + + @property + def _labels(self): + return self._flist[SequenceWrapper.F_LABEL].feature + + @property + def _weights(self): + return self._flist[SequenceWrapper.F_WEIGHT].feature + + def add_timestep(self): + timestep = Timestep( + self._tokens.add(), + self._labels.add(), + self._weights.add(), + multivalent_tokens=self._multivalent_tokens) + self._timesteps.append(timestep) + return timestep + + def __iter__(self): + for timestep in self._timesteps: + yield timestep + + def __len__(self): + return len(self._timesteps) + + def __getitem__(self, idx): + return self._timesteps[idx] + + +def build_reverse_sequence(seq): + """Builds a sequence that is the reverse of the input sequence.""" + reverse_seq = SequenceWrapper() + + # Copy all but last timestep + for timestep in reversed(seq[:-1]): + reverse_seq.add_timestep().copy_from(timestep) + + # Copy final timestep + reverse_seq.add_timestep().copy_from(seq[-1]) + + return reverse_seq + + +def build_bidirectional_seq(seq, rev_seq): + bidir_seq = SequenceWrapper(multivalent_tokens=True) + for forward_ts, reverse_ts in zip(seq, rev_seq): + bidir_seq.add_timestep().add_token(forward_ts.token).add_token( + reverse_ts.token) + + return bidir_seq + + +def build_lm_sequence(seq): + """Builds language model sequence from input sequence. + + Args: + seq: SequenceWrapper. + + Returns: + SequenceWrapper with `seq` tokens copied over to output sequence tokens and + labels (offset by 1, i.e. predict next token) with weights set to 1.0, + except for token. + """ + lm_seq = SequenceWrapper() + for i, timestep in enumerate(seq): + if i == len(seq) - 1: + lm_seq.add_timestep().set_token(timestep.token).set_label( + seq[i].token).set_weight(0.0) + else: + lm_seq.add_timestep().set_token(timestep.token).set_label( + seq[i + 1].token).set_weight(1.0) + return lm_seq + + +def build_seq_ae_sequence(seq): + """Builds seq_ae sequence from input sequence. + + Args: + seq: SequenceWrapper. + + Returns: + SequenceWrapper with `seq` inputs copied and concatenated, and with labels + copied in on the right-hand (i.e. decoder) side with weights set to 1.0. + The new sequence will have length `len(seq) * 2 - 1`, as the last timestep + of the encoder section and the first step of the decoder section will + overlap. + """ + seq_ae_seq = SequenceWrapper() + + for i in range(len(seq) * 2 - 1): + ts = seq_ae_seq.add_timestep() + + if i < len(seq) - 1: + # Encoder + ts.set_token(seq[i].token) + elif i == len(seq) - 1: + # Transition step + ts.set_token(seq[i].token) + ts.set_label(seq[0].token) + ts.set_weight(1.0) + else: + # Decoder + ts.set_token(seq[i % len(seq)].token) + ts.set_label(seq[(i + 1) % len(seq)].token) + ts.set_weight(1.0) + + return seq_ae_seq + + +def build_labeled_sequence(seq, class_label, label_gain=False): + """Builds labeled sequence from input sequence. + + Args: + seq: SequenceWrapper. + class_label: integer, starting from 0. + label_gain: bool. If True, class_label will be put on every timestep and + weight will increase linearly from 0 to 1. + + Returns: + SequenceWrapper with `seq` copied in and `class_label` added as label to + final timestep. + """ + label_seq = SequenceWrapper(multivalent_tokens=seq.multivalent_tokens) + + # Copy sequence without labels + seq_len = len(seq) + final_timestep = None + for i, timestep in enumerate(seq): + label_timestep = label_seq.add_timestep() + if seq.multivalent_tokens: + for token in timestep.tokens: + label_timestep.add_token(token) + else: + label_timestep.set_token(timestep.token) + if label_gain: + label_timestep.set_label(int(class_label)) + weight = 1.0 if seq_len < 2 else float(i) / (seq_len - 1) + label_timestep.set_weight(weight) + if i == (seq_len - 1): + final_timestep = label_timestep + + # Edit final timestep to have class label and weight = 1. + final_timestep.set_label(int(class_label)).set_weight(1.0) + + return label_seq + + +def split_by_punct(segment): + """Splits str segment by punctuation, filters our empties and spaces.""" + return [s for s in re.split(r'\W+', segment) if s and not s.isspace()] + + +def sort_vocab_by_frequency(vocab_freq_map): + """Sorts vocab_freq_map by count. + + Args: + vocab_freq_map: dict, vocabulary terms with counts. + + Returns: + list> sorted by count, descending. + """ + return sorted( + vocab_freq_map.items(), key=operator.itemgetter(1), reverse=True) + + +def write_vocab_and_frequency(ordered_vocab_freqs, output_dir): + """Writes ordered_vocab_freqs into vocab.txt and vocab_freq.txt.""" + tf.gfile.MakeDirs(output_dir) + with open(os.path.join(output_dir, 'vocab.txt'), 'w', encoding='utf-8') as vocab_f: + with open(os.path.join(output_dir, 'vocab_freq.txt'), 'w', encoding='utf-8') as freq_f: + for word, freq in ordered_vocab_freqs: + vocab_f.write('{}\n'.format(word)) + freq_f.write('{}\n'.format(freq)) diff --git a/models/research/adversarial_text/data/data_utils_test.py b/models/research/adversarial_text/data/data_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7d225ef08c0bfaa36b2ae32469ca1e3946e3b41a --- /dev/null +++ b/models/research/adversarial_text/data/data_utils_test.py @@ -0,0 +1,200 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for data_utils.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +from data import data_utils + +data = data_utils + + +class SequenceWrapperTest(tf.test.TestCase): + + def testDefaultTimesteps(self): + seq = data.SequenceWrapper() + t1 = seq.add_timestep() + _ = seq.add_timestep() + self.assertEqual(len(seq), 2) + + self.assertEqual(t1.weight, 0.0) + self.assertEqual(t1.label, 0) + self.assertEqual(t1.token, 0) + + def testSettersAndGetters(self): + ts = data.SequenceWrapper().add_timestep() + ts.set_token(3) + ts.set_label(4) + ts.set_weight(2.0) + self.assertEqual(ts.token, 3) + self.assertEqual(ts.label, 4) + self.assertEqual(ts.weight, 2.0) + + def testTimestepIteration(self): + seq = data.SequenceWrapper() + seq.add_timestep().set_token(0) + seq.add_timestep().set_token(1) + seq.add_timestep().set_token(2) + for i, ts in enumerate(seq): + self.assertEqual(ts.token, i) + + def testFillsSequenceExampleCorrectly(self): + seq = data.SequenceWrapper() + seq.add_timestep().set_token(1).set_label(2).set_weight(3.0) + seq.add_timestep().set_token(10).set_label(20).set_weight(30.0) + + seq_ex = seq.seq + fl = seq_ex.feature_lists.feature_list + fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature + fl_label = fl[data.SequenceWrapper.F_LABEL].feature + fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature + _ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]] + self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10]) + self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20]) + self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0]) + + +class DataUtilsTest(tf.test.TestCase): + + def testSplitByPunct(self): + output = data.split_by_punct( + 'hello! world, i\'ve been\nwaiting\tfor\ryou for.a long time') + expected = [ + 'hello', 'world', 'i', 've', 'been', 'waiting', 'for', 'you', 'for', + 'a', 'long', 'time' + ] + self.assertListEqual(output, expected) + + def _buildDummySequence(self): + seq = data.SequenceWrapper() + for i in range(10): + seq.add_timestep().set_token(i) + return seq + + def testBuildLMSeq(self): + seq = self._buildDummySequence() + lm_seq = data.build_lm_sequence(seq) + for i, ts in enumerate(lm_seq): + # For end of sequence, the token and label should be same, and weight + # should be 0.0. + if i == len(lm_seq) - 1: + self.assertEqual(ts.token, i) + self.assertEqual(ts.label, i) + self.assertEqual(ts.weight, 0.0) + else: + self.assertEqual(ts.token, i) + self.assertEqual(ts.label, i + 1) + self.assertEqual(ts.weight, 1.0) + + def testBuildSAESeq(self): + seq = self._buildDummySequence() + sa_seq = data.build_seq_ae_sequence(seq) + + self.assertEqual(len(sa_seq), len(seq) * 2 - 1) + + # Tokens should be sequence twice, minus the EOS token at the end + for i, ts in enumerate(sa_seq): + self.assertEqual(ts.token, seq[i % 10].token) + + # Weights should be len-1 0.0's and len 1.0's. + for i in range(len(seq) - 1): + self.assertEqual(sa_seq[i].weight, 0.0) + for i in range(len(seq) - 1, len(sa_seq)): + self.assertEqual(sa_seq[i].weight, 1.0) + + # Labels should be len-1 0's, and then the sequence + for i in range(len(seq) - 1): + self.assertEqual(sa_seq[i].label, 0) + for i in range(len(seq) - 1, len(sa_seq)): + self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token) + + def testBuildLabelSeq(self): + seq = self._buildDummySequence() + eos_id = len(seq) - 1 + label_seq = data.build_labeled_sequence(seq, True) + for i, ts in enumerate(label_seq[:-1]): + self.assertEqual(ts.token, i) + self.assertEqual(ts.label, 0) + self.assertEqual(ts.weight, 0.0) + + final_timestep = label_seq[-1] + self.assertEqual(final_timestep.token, eos_id) + self.assertEqual(final_timestep.label, 1) + self.assertEqual(final_timestep.weight, 1.0) + + def testBuildBidirLabelSeq(self): + seq = self._buildDummySequence() + reverse_seq = data.build_reverse_sequence(seq) + bidir_seq = data.build_bidirectional_seq(seq, reverse_seq) + label_seq = data.build_labeled_sequence(bidir_seq, True) + + for (i, ts), j in zip( + enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))): + self.assertAllEqual(ts.tokens, [i, j]) + self.assertEqual(ts.label, 0) + self.assertEqual(ts.weight, 0.0) + + final_timestep = label_seq[-1] + eos_id = len(seq) - 1 + self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id]) + self.assertEqual(final_timestep.label, 1) + self.assertEqual(final_timestep.weight, 1.0) + + def testReverseSeq(self): + seq = self._buildDummySequence() + reverse_seq = data.build_reverse_sequence(seq) + for i, ts in enumerate(reversed(reverse_seq[:-1])): + self.assertEqual(ts.token, i) + self.assertEqual(ts.label, 0) + self.assertEqual(ts.weight, 0.0) + + final_timestep = reverse_seq[-1] + eos_id = len(seq) - 1 + self.assertEqual(final_timestep.token, eos_id) + self.assertEqual(final_timestep.label, 0) + self.assertEqual(final_timestep.weight, 0.0) + + def testBidirSeq(self): + seq = self._buildDummySequence() + reverse_seq = data.build_reverse_sequence(seq) + bidir_seq = data.build_bidirectional_seq(seq, reverse_seq) + for (i, ts), j in zip( + enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))): + self.assertAllEqual(ts.tokens, [i, j]) + self.assertEqual(ts.label, 0) + self.assertEqual(ts.weight, 0.0) + + final_timestep = bidir_seq[-1] + eos_id = len(seq) - 1 + self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id]) + self.assertEqual(final_timestep.label, 0) + self.assertEqual(final_timestep.weight, 0.0) + + def testLabelGain(self): + seq = self._buildDummySequence() + label_seq = data.build_labeled_sequence(seq, True, label_gain=True) + for i, ts in enumerate(label_seq): + self.assertEqual(ts.token, i) + self.assertEqual(ts.label, 1) + self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/adversarial_text/data/document_generators.py b/models/research/adversarial_text/data/document_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..00d515bff7b8a505d4a2086f4258c453e78b278c --- /dev/null +++ b/models/research/adversarial_text/data/document_generators.py @@ -0,0 +1,383 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Input readers and document/token generators for datasets.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +import csv +import os +import random + +# Dependency imports + +import tensorflow as tf + +from data import data_utils + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('dataset', '', 'Which dataset to generate data for') + +# Preprocessing config +flags.DEFINE_boolean('output_unigrams', True, 'Whether to output unigrams.') +flags.DEFINE_boolean('output_bigrams', False, 'Whether to output bigrams.') +flags.DEFINE_boolean('output_char', False, 'Whether to output characters.') +flags.DEFINE_boolean('lowercase', True, 'Whether to lowercase document terms.') + +# IMDB +flags.DEFINE_string('imdb_input_dir', '', 'The input directory containing the ' + 'IMDB sentiment dataset.') +flags.DEFINE_integer('imdb_validation_pos_start_id', 10621, 'File id of the ' + 'first file in the pos sentiment validation set.') +flags.DEFINE_integer('imdb_validation_neg_start_id', 10625, 'File id of the ' + 'first file in the neg sentiment validation set.') + +# DBpedia +flags.DEFINE_string('dbpedia_input_dir', '', + 'Path to DBpedia directory containing train.csv and ' + 'test.csv.') + +# Reuters Corpus (rcv1) +flags.DEFINE_string('rcv1_input_dir', '', + 'Path to rcv1 directory containing train.csv, unlab.csv, ' + 'and test.csv.') + +# Rotten Tomatoes +flags.DEFINE_string('rt_input_dir', '', + 'The Rotten Tomatoes dataset input directory.') + +# The amazon reviews input file to use in either the RT or IMDB datasets. +flags.DEFINE_string('amazon_unlabeled_input_file', '', + 'The unlabeled Amazon Reviews dataset input file. If set, ' + 'the input file is used to augment RT and IMDB vocab.') + +Document = namedtuple('Document', + 'content is_validation is_test label add_tokens') + + +def documents(dataset='train', + include_unlabeled=False, + include_validation=False): + """Generates Documents based on FLAGS.dataset. + + Args: + dataset: str, identifies folder within IMDB data directory, test or train. + include_unlabeled: bool, whether to include the unsup directory. Only valid + when dataset=train. + include_validation: bool, whether to include validation data. + + Yields: + Document + + Raises: + ValueError: if include_unlabeled is true but dataset is not 'train' + """ + + if include_unlabeled and dataset != 'train': + raise ValueError('If include_unlabeled=True, must use train dataset') + + # Set the random seed so that we have the same validation set when running + # gen_data and gen_vocab. + random.seed(302) + + ds = FLAGS.dataset + if ds == 'imdb': + docs_gen = imdb_documents + elif ds == 'dbpedia': + docs_gen = dbpedia_documents + elif ds == 'rcv1': + docs_gen = rcv1_documents + elif ds == 'rt': + docs_gen = rt_documents + else: + raise ValueError('Unrecognized dataset %s' % FLAGS.dataset) + + for doc in docs_gen(dataset, include_unlabeled, include_validation): + yield doc + + +def tokens(doc): + """Given a Document, produces character or word tokens. + + Tokens can be either characters, or word-level tokens (unigrams and/or + bigrams). + + Args: + doc: Document to produce tokens from. + + Yields: + token + + Raises: + ValueError: if all FLAGS.{output_unigrams, output_bigrams, output_char} + are False. + """ + if not (FLAGS.output_unigrams or FLAGS.output_bigrams or FLAGS.output_char): + raise ValueError( + 'At least one of {FLAGS.output_unigrams, FLAGS.output_bigrams, ' + 'FLAGS.output_char} must be true') + + content = doc.content.strip() + if FLAGS.lowercase: + content = content.lower() + + if FLAGS.output_char: + for char in content: + yield char + + else: + tokens_ = data_utils.split_by_punct(content) + for i, token in enumerate(tokens_): + if FLAGS.output_unigrams: + yield token + + if FLAGS.output_bigrams: + previous_token = (tokens_[i - 1] if i > 0 else data_utils.EOS_TOKEN) + bigram = '_'.join([previous_token, token]) + yield bigram + if (i + 1) == len(tokens_): + bigram = '_'.join([token, data_utils.EOS_TOKEN]) + yield bigram + + +def imdb_documents(dataset='train', + include_unlabeled=False, + include_validation=False): + """Generates Documents for IMDB dataset. + + Data from http://ai.stanford.edu/~amaas/data/sentiment/ + + Args: + dataset: str, identifies folder within IMDB data directory, test or train. + include_unlabeled: bool, whether to include the unsup directory. Only valid + when dataset=train. + include_validation: bool, whether to include validation data. + + Yields: + Document + + Raises: + ValueError: if FLAGS.imdb_input_dir is empty. + """ + if not FLAGS.imdb_input_dir: + raise ValueError('Must provide FLAGS.imdb_input_dir') + + tf.logging.info('Generating IMDB documents...') + + def check_is_validation(filename, class_label): + if class_label is None: + return False + file_idx = int(filename.split('_')[0]) + is_pos_valid = (class_label and + file_idx >= FLAGS.imdb_validation_pos_start_id) + is_neg_valid = (not class_label and + file_idx >= FLAGS.imdb_validation_neg_start_id) + return is_pos_valid or is_neg_valid + + dirs = [(dataset + '/pos', True), (dataset + '/neg', False)] + if include_unlabeled: + dirs.append(('train/unsup', None)) + + for d, class_label in dirs: + for filename in os.listdir(os.path.join(FLAGS.imdb_input_dir, d)): + is_validation = check_is_validation(filename, class_label) + if is_validation and not include_validation: + continue + + with open(os.path.join(FLAGS.imdb_input_dir, d, filename), encoding='utf-8') as imdb_f: + content = imdb_f.read() + yield Document( + content=content, + is_validation=is_validation, + is_test=False, + label=class_label, + add_tokens=True) + + if FLAGS.amazon_unlabeled_input_file and include_unlabeled: + with open(FLAGS.amazon_unlabeled_input_file, encoding='utf-8') as rt_f: + for content in rt_f: + yield Document( + content=content, + is_validation=False, + is_test=False, + label=None, + add_tokens=False) + + +def dbpedia_documents(dataset='train', + include_unlabeled=False, + include_validation=False): + """Generates Documents for DBpedia dataset. + + Dataset linked to at https://github.com/zhangxiangxiao/Crepe. + + Args: + dataset: str, identifies the csv file within the DBpedia data directory, + test or train. + include_unlabeled: bool, unused. + include_validation: bool, whether to include validation data, which is a + randomly selected 10% of the data. + + Yields: + Document + + Raises: + ValueError: if FLAGS.dbpedia_input_dir is empty. + """ + del include_unlabeled + + if not FLAGS.dbpedia_input_dir: + raise ValueError('Must provide FLAGS.dbpedia_input_dir') + + tf.logging.info('Generating DBpedia documents...') + + with open(os.path.join(FLAGS.dbpedia_input_dir, dataset + '.csv')) as db_f: + reader = csv.reader(db_f) + for row in reader: + # 10% of the data is randomly held out + is_validation = random.randint(1, 10) == 1 + if is_validation and not include_validation: + continue + + content = row[1] + ' ' + row[2] + yield Document( + content=content, + is_validation=is_validation, + is_test=False, + label=int(row[0]) - 1, # Labels should start from 0 + add_tokens=True) + + +def rcv1_documents(dataset='train', + include_unlabeled=True, + include_validation=False): + # pylint:disable=line-too-long + """Generates Documents for Reuters Corpus (rcv1) dataset. + + Dataset described at + http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm + + Args: + dataset: str, identifies the csv file within the rcv1 data directory. + include_unlabeled: bool, whether to include the unlab file. Only valid + when dataset=train. + include_validation: bool, whether to include validation data, which is a + randomly selected 10% of the data. + + Yields: + Document + + Raises: + ValueError: if FLAGS.rcv1_input_dir is empty. + """ + # pylint:enable=line-too-long + + if not FLAGS.rcv1_input_dir: + raise ValueError('Must provide FLAGS.rcv1_input_dir') + + tf.logging.info('Generating rcv1 documents...') + + datasets = [dataset] + if include_unlabeled: + if dataset == 'train': + datasets.append('unlab') + for dset in datasets: + with open(os.path.join(FLAGS.rcv1_input_dir, dset + '.csv')) as db_f: + reader = csv.reader(db_f) + for row in reader: + # 10% of the data is randomly held out + is_validation = random.randint(1, 10) == 1 + if is_validation and not include_validation: + continue + + content = row[1] + yield Document( + content=content, + is_validation=is_validation, + is_test=False, + label=int(row[0]), + add_tokens=True) + + +def rt_documents(dataset='train', + include_unlabeled=True, + include_validation=False): + # pylint:disable=line-too-long + """Generates Documents for the Rotten Tomatoes dataset. + + Dataset available at http://www.cs.cornell.edu/people/pabo/movie-review-data/ + In this dataset, amazon reviews are used for the unlabeled data. + + Args: + dataset: str, identifies the data subdirectory. + include_unlabeled: bool, whether to include the unlabeled data. Only valid + when dataset=train. + include_validation: bool, whether to include validation data, which is a + randomly selected 10% of the data. + + Yields: + Document + + Raises: + ValueError: if FLAGS.rt_input_dir is empty. + """ + # pylint:enable=line-too-long + + if not FLAGS.rt_input_dir: + raise ValueError('Must provide FLAGS.rt_input_dir') + + tf.logging.info('Generating rt documents...') + + data_files = [] + input_filenames = os.listdir(FLAGS.rt_input_dir) + for inp_fname in input_filenames: + if inp_fname.endswith('.pos'): + data_files.append((os.path.join(FLAGS.rt_input_dir, inp_fname), True)) + elif inp_fname.endswith('.neg'): + data_files.append((os.path.join(FLAGS.rt_input_dir, inp_fname), False)) + if include_unlabeled and FLAGS.amazon_unlabeled_input_file: + data_files.append((FLAGS.amazon_unlabeled_input_file, None)) + + for filename, class_label in data_files: + with open(filename) as rt_f: + for content in rt_f: + if class_label is None: + # Process Amazon Review data for unlabeled dataset + if content.startswith('review/text'): + yield Document( + content=content, + is_validation=False, + is_test=False, + label=None, + add_tokens=False) + else: + # 10% of the data is randomly held out for the validation set and + # another 10% of it is randomly held out for the test set + random_int = random.randint(1, 10) + is_validation = random_int == 1 + is_test = random_int == 2 + if (is_test and dataset != 'test') or (is_validation and + not include_validation): + continue + + yield Document( + content=content, + is_validation=is_validation, + is_test=is_test, + label=class_label, + add_tokens=True) diff --git a/models/research/adversarial_text/evaluate.py b/models/research/adversarial_text/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ea8c0188f2ba3780a3d3644cbe53514cef2f1e --- /dev/null +++ b/models/research/adversarial_text/evaluate.py @@ -0,0 +1,140 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluates text classification model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import time + +# Dependency imports + +import tensorflow as tf + +import graphs + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', + 'BNS name prefix of the Tensorflow eval master, ' + 'or "local".') +flags.DEFINE_string('eval_dir', '/tmp/text_eval', + 'Directory where to write event logs.') +flags.DEFINE_string('eval_data', 'test', 'Specify which dataset is used. ' + '("train", "valid", "test") ') + +flags.DEFINE_string('checkpoint_dir', '/tmp/text_train', + 'Directory where to read model checkpoints.') +flags.DEFINE_integer('eval_interval_secs', 60, 'How often to run the eval.') +flags.DEFINE_integer('num_examples', 32, 'Number of examples to run.') +flags.DEFINE_bool('run_once', False, 'Whether to run eval only once.') + + +def restore_from_checkpoint(sess, saver): + """Restore model from checkpoint. + + Args: + sess: Session. + saver: Saver for restoring the checkpoint. + + Returns: + bool: Whether the checkpoint was found and restored + """ + ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) + if not ckpt or not ckpt.model_checkpoint_path: + tf.logging.info('No checkpoint found at %s', FLAGS.checkpoint_dir) + return False + + saver.restore(sess, ckpt.model_checkpoint_path) + return True + + +def run_eval(eval_ops, summary_writer, saver): + """Runs evaluation over FLAGS.num_examples examples. + + Args: + eval_ops: dict + summary_writer: Summary writer. + saver: Saver. + + Returns: + dict, with value being the average over all examples. + """ + sv = tf.train.Supervisor( + logdir=FLAGS.eval_dir, saver=None, summary_op=None, summary_writer=None) + with sv.managed_session( + master=FLAGS.master, start_standard_services=False) as sess: + if not restore_from_checkpoint(sess, saver): + return + sv.start_queue_runners(sess) + + metric_names, ops = zip(*eval_ops.items()) + value_ops, update_ops = zip(*ops) + + value_ops_dict = dict(zip(metric_names, value_ops)) + + # Run update ops + num_batches = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size)) + tf.logging.info('Running %d batches for evaluation.', num_batches) + for i in range(num_batches): + if (i + 1) % 10 == 0: + tf.logging.info('Running batch %d/%d...', i + 1, num_batches) + if (i + 1) % 50 == 0: + _log_values(sess, value_ops_dict) + sess.run(update_ops) + + _log_values(sess, value_ops_dict, summary_writer=summary_writer) + + +def _log_values(sess, value_ops, summary_writer=None): + """Evaluate, log, and write summaries of the eval metrics in value_ops.""" + metric_names, value_ops = zip(*value_ops.items()) + values = sess.run(value_ops) + + tf.logging.info('Eval metric values:') + summary = tf.summary.Summary() + for name, val in zip(metric_names, values): + summary.value.add(tag=name, simple_value=val) + tf.logging.info('%s = %.3f', name, val) + + if summary_writer is not None: + global_step_val = sess.run(tf.train.get_global_step()) + tf.logging.info('Finished eval for step ' + str(global_step_val)) + summary_writer.add_summary(summary, global_step_val) + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + tf.gfile.MakeDirs(FLAGS.eval_dir) + tf.logging.info('Building eval graph...') + output = graphs.get_model().eval_graph(FLAGS.eval_data) + eval_ops, moving_averaged_variables = output + + saver = tf.train.Saver(moving_averaged_variables) + summary_writer = tf.summary.FileWriter( + FLAGS.eval_dir, graph=tf.get_default_graph()) + + while True: + run_eval(eval_ops, summary_writer, saver) + if FLAGS.run_once: + break + time.sleep(FLAGS.eval_interval_secs) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/adversarial_text/gen_data.py b/models/research/adversarial_text/gen_data.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3de65b7999a3986bf98c613db27e72c67aabc0 --- /dev/null +++ b/models/research/adversarial_text/gen_data.py @@ -0,0 +1,217 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Create TFRecord files of SequenceExample protos from dataset. + +Constructs 3 datasets: + 1. Labeled data for the LSTM classification model, optionally with label gain. + "*_classification.tfrecords" (for both unidirectional and bidirectional + models). + 2. Data for the unsupervised LM-LSTM model that predicts the next token. + "*_lm.tfrecords" (generates forward and reverse data). + 3. Data for the unsupervised SA-LSTM model that uses Seq2Seq. + "*_sa.tfrecords". +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import string + +# Dependency imports + +import tensorflow as tf + +from data import data_utils +from data import document_generators + +data = data_utils +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Flags for input data are in document_generators.py +flags.DEFINE_string('vocab_file', '', 'Path to the vocabulary file. Defaults ' + 'to FLAGS.output_dir/vocab.txt.') +flags.DEFINE_string('output_dir', '', 'Path to save tfrecords.') + +# Config +flags.DEFINE_boolean('label_gain', False, + 'Enable linear label gain. If True, sentiment label will ' + 'be included at each timestep with linear weight ' + 'increase.') + + +def build_shuffling_tf_record_writer(fname): + return data.ShufflingTFRecordWriter(os.path.join(FLAGS.output_dir, fname)) + + +def build_tf_record_writer(fname): + return tf.python_io.TFRecordWriter(os.path.join(FLAGS.output_dir, fname)) + + +def build_input_sequence(doc, vocab_ids): + """Builds input sequence from file. + + Splits lines on whitespace. Treats punctuation as whitespace. For word-level + sequences, only keeps terms that are in the vocab. + + Terms are added as token in the SequenceExample. The EOS_TOKEN is also + appended. Label and weight features are set to 0. + + Args: + doc: Document (defined in `document_generators`) from which to build the + sequence. + vocab_ids: dict. + + Returns: + SequenceExampleWrapper. + """ + seq = data.SequenceWrapper() + for token in document_generators.tokens(doc): + if token in vocab_ids: + seq.add_timestep().set_token(vocab_ids[token]) + + # Add EOS token to end + seq.add_timestep().set_token(vocab_ids[data.EOS_TOKEN]) + + return seq + + +def make_vocab_ids(vocab_filename): + if FLAGS.output_char: + ret = dict([(char, i) for i, char in enumerate(string.printable)]) + ret[data.EOS_TOKEN] = len(string.printable) + return ret + else: + with open(vocab_filename, encoding='utf-8') as vocab_f: + return dict([(line.strip(), i) for i, line in enumerate(vocab_f)]) + + +def generate_training_data(vocab_ids, writer_lm_all, writer_seq_ae_all): + """Generates training data.""" + + # Construct training data writers + writer_lm = build_shuffling_tf_record_writer(data.TRAIN_LM) + writer_seq_ae = build_shuffling_tf_record_writer(data.TRAIN_SA) + writer_class = build_shuffling_tf_record_writer(data.TRAIN_CLASS) + writer_valid_class = build_tf_record_writer(data.VALID_CLASS) + writer_rev_lm = build_shuffling_tf_record_writer(data.TRAIN_REV_LM) + writer_bd_class = build_shuffling_tf_record_writer(data.TRAIN_BD_CLASS) + writer_bd_valid_class = build_shuffling_tf_record_writer(data.VALID_BD_CLASS) + + for doc in document_generators.documents( + dataset='train', include_unlabeled=True, include_validation=True): + input_seq = build_input_sequence(doc, vocab_ids) + if len(input_seq) < 2: + continue + rev_seq = data.build_reverse_sequence(input_seq) + lm_seq = data.build_lm_sequence(input_seq) + rev_lm_seq = data.build_lm_sequence(rev_seq) + seq_ae_seq = data.build_seq_ae_sequence(input_seq) + if doc.label is not None: + # Used for sentiment classification. + label_seq = data.build_labeled_sequence( + input_seq, + doc.label, + label_gain=(FLAGS.label_gain and not doc.is_validation)) + bd_label_seq = data.build_labeled_sequence( + data.build_bidirectional_seq(input_seq, rev_seq), + doc.label, + label_gain=(FLAGS.label_gain and not doc.is_validation)) + class_writer = writer_valid_class if doc.is_validation else writer_class + bd_class_writer = (writer_bd_valid_class + if doc.is_validation else writer_bd_class) + class_writer.write(label_seq.seq.SerializeToString()) + bd_class_writer.write(bd_label_seq.seq.SerializeToString()) + + # Write + lm_seq_ser = lm_seq.seq.SerializeToString() + seq_ae_seq_ser = seq_ae_seq.seq.SerializeToString() + writer_lm_all.write(lm_seq_ser) + writer_seq_ae_all.write(seq_ae_seq_ser) + if not doc.is_validation: + writer_lm.write(lm_seq_ser) + writer_rev_lm.write(rev_lm_seq.seq.SerializeToString()) + writer_seq_ae.write(seq_ae_seq_ser) + + # Close writers + writer_lm.close() + writer_seq_ae.close() + writer_class.close() + writer_valid_class.close() + writer_rev_lm.close() + writer_bd_class.close() + writer_bd_valid_class.close() + + +def generate_test_data(vocab_ids, writer_lm_all, writer_seq_ae_all): + """Generates test data.""" + # Construct test data writers + writer_lm = build_shuffling_tf_record_writer(data.TEST_LM) + writer_rev_lm = build_shuffling_tf_record_writer(data.TEST_REV_LM) + writer_seq_ae = build_shuffling_tf_record_writer(data.TEST_SA) + writer_class = build_tf_record_writer(data.TEST_CLASS) + writer_bd_class = build_shuffling_tf_record_writer(data.TEST_BD_CLASS) + + for doc in document_generators.documents( + dataset='test', include_unlabeled=False, include_validation=True): + input_seq = build_input_sequence(doc, vocab_ids) + if len(input_seq) < 2: + continue + rev_seq = data.build_reverse_sequence(input_seq) + lm_seq = data.build_lm_sequence(input_seq) + rev_lm_seq = data.build_lm_sequence(rev_seq) + seq_ae_seq = data.build_seq_ae_sequence(input_seq) + label_seq = data.build_labeled_sequence(input_seq, doc.label) + bd_label_seq = data.build_labeled_sequence( + data.build_bidirectional_seq(input_seq, rev_seq), doc.label) + + # Write + writer_class.write(label_seq.seq.SerializeToString()) + writer_bd_class.write(bd_label_seq.seq.SerializeToString()) + lm_seq_ser = lm_seq.seq.SerializeToString() + seq_ae_seq_ser = seq_ae_seq.seq.SerializeToString() + writer_lm.write(lm_seq_ser) + writer_rev_lm.write(rev_lm_seq.seq.SerializeToString()) + writer_seq_ae.write(seq_ae_seq_ser) + writer_lm_all.write(lm_seq_ser) + writer_seq_ae_all.write(seq_ae_seq_ser) + + # Close test writers + writer_lm.close() + writer_rev_lm.close() + writer_seq_ae.close() + writer_class.close() + writer_bd_class.close() + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.info('Assigning vocabulary ids...') + vocab_ids = make_vocab_ids( + FLAGS.vocab_file or os.path.join(FLAGS.output_dir, 'vocab.txt')) + + with build_shuffling_tf_record_writer(data.ALL_LM) as writer_lm_all: + with build_shuffling_tf_record_writer(data.ALL_SA) as writer_seq_ae_all: + + tf.logging.info('Generating training data...') + generate_training_data(vocab_ids, writer_lm_all, writer_seq_ae_all) + + tf.logging.info('Generating test data...') + generate_test_data(vocab_ids, writer_lm_all, writer_seq_ae_all) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/adversarial_text/gen_vocab.py b/models/research/adversarial_text/gen_vocab.py new file mode 100644 index 0000000000000000000000000000000000000000..17b91864ce727886adaafa5402fd9e62897563ca --- /dev/null +++ b/models/research/adversarial_text/gen_vocab.py @@ -0,0 +1,101 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generates vocabulary and term frequency files for datasets.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six import iteritems + +from collections import defaultdict + +# Dependency imports + +import tensorflow as tf + +from data import data_utils +from data import document_generators + +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Flags controlling input are in document_generators.py + +flags.DEFINE_string('output_dir', '', + 'Path to save vocab.txt and vocab_freq.txt.') + +flags.DEFINE_boolean('use_unlabeled', True, 'Whether to use the ' + 'unlabeled sentiment dataset in the vocabulary.') +flags.DEFINE_boolean('include_validation', False, 'Whether to include the ' + 'validation set in the vocabulary.') +flags.DEFINE_integer('doc_count_threshold', 1, 'The minimum number of ' + 'documents a word or bigram should occur in to keep ' + 'it in the vocabulary.') + +MAX_VOCAB_SIZE = 100 * 1000 + + +def fill_vocab_from_doc(doc, vocab_freqs, doc_counts): + """Fills vocabulary and doc counts with tokens from doc. + + Args: + doc: Document to read tokens from. + vocab_freqs: dict + doc_counts: dict + + Returns: + None + """ + doc_seen = set() + + for token in document_generators.tokens(doc): + if doc.add_tokens or token in vocab_freqs: + vocab_freqs[token] += 1 + if token not in doc_seen: + doc_counts[token] += 1 + doc_seen.add(token) + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + vocab_freqs = defaultdict(int) + doc_counts = defaultdict(int) + + # Fill vocabulary frequencies map and document counts map + for doc in document_generators.documents( + dataset='train', + include_unlabeled=FLAGS.use_unlabeled, + include_validation=FLAGS.include_validation): + fill_vocab_from_doc(doc, vocab_freqs, doc_counts) + + # Filter out low-occurring terms + vocab_freqs = dict((term, freq) for term, freq in iteritems(vocab_freqs) + if doc_counts[term] > FLAGS.doc_count_threshold) + + # Sort by frequency + ordered_vocab_freqs = data_utils.sort_vocab_by_frequency(vocab_freqs) + + # Limit vocab size + ordered_vocab_freqs = ordered_vocab_freqs[:MAX_VOCAB_SIZE] + + # Add EOS token + ordered_vocab_freqs.append((data_utils.EOS_TOKEN, 1)) + + # Write + tf.gfile.MakeDirs(FLAGS.output_dir) + data_utils.write_vocab_and_frequency(ordered_vocab_freqs, FLAGS.output_dir) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/adversarial_text/graphs.py b/models/research/adversarial_text/graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..9610a698dd0420177a0dead4904830d661e9d3bf --- /dev/null +++ b/models/research/adversarial_text/graphs.py @@ -0,0 +1,687 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Virtual adversarial text models.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv +import os + +# Dependency imports + +import tensorflow as tf + +import adversarial_losses as adv_lib +import inputs as inputs_lib +import layers as layers_lib + +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Flags governing adversarial training are defined in adversarial_losses.py. + +# Classifier +flags.DEFINE_integer('num_classes', 2, 'Number of classes for classification') + +# Data path +flags.DEFINE_string('data_dir', '/tmp/IMDB', + 'Directory path to preprocessed text dataset.') +flags.DEFINE_string('vocab_freq_path', None, + 'Path to pre-calculated vocab frequency data. If ' + 'None, use FLAGS.data_dir/vocab_freq.txt.') +flags.DEFINE_integer('batch_size', 64, 'Size of the batch.') +flags.DEFINE_integer('num_timesteps', 100, 'Number of timesteps for BPTT') + +# Model architechture +flags.DEFINE_bool('bidir_lstm', False, 'Whether to build a bidirectional LSTM.') +flags.DEFINE_bool('single_label', True, 'Whether the sequence has a single ' + 'label, for optimization.') +flags.DEFINE_integer('rnn_num_layers', 1, 'Number of LSTM layers.') +flags.DEFINE_integer('rnn_cell_size', 512, + 'Number of hidden units in the LSTM.') +flags.DEFINE_integer('cl_num_layers', 1, + 'Number of hidden layers of classification model.') +flags.DEFINE_integer('cl_hidden_size', 30, + 'Number of hidden units in classification layer.') +flags.DEFINE_integer('num_candidate_samples', -1, + 'Num samples used in the sampled output layer.') +flags.DEFINE_bool('use_seq2seq_autoencoder', False, + 'If True, seq2seq auto-encoder is used to pretrain. ' + 'If False, standard language model is used.') + +# Vocabulary and embeddings +flags.DEFINE_integer('embedding_dims', 256, 'Dimensions of embedded vector.') +flags.DEFINE_integer('vocab_size', 86934, + 'The size of the vocaburary. This value ' + 'should be exactly same as the number of the ' + 'vocabulary used in dataset. Because the last ' + 'indexed vocabulary of the dataset preprocessed by ' + 'my preprocessed code, is always and here we ' + 'specify the with the the index.') +flags.DEFINE_bool('normalize_embeddings', True, + 'Normalize word embeddings by vocab frequency') + +# Optimization +flags.DEFINE_float('learning_rate', 0.001, 'Learning rate while fine-tuning.') +flags.DEFINE_float('learning_rate_decay_factor', 1.0, + 'Learning rate decay factor') +flags.DEFINE_boolean('sync_replicas', False, 'sync_replica or not') +flags.DEFINE_integer('replicas_to_aggregate', 1, + 'The number of replicas to aggregate') + +# Regularization +flags.DEFINE_float('max_grad_norm', 1.0, + 'Clip the global gradient norm to this value.') +flags.DEFINE_float('keep_prob_emb', 1.0, 'keep probability on embedding layer. ' + '0.5 is optimal on IMDB with virtual adversarial training.') +flags.DEFINE_float('keep_prob_lstm_out', 1.0, + 'keep probability on lstm output.') +flags.DEFINE_float('keep_prob_cl_hidden', 1.0, + 'keep probability on classification hidden layer') + + +def get_model(): + if FLAGS.bidir_lstm: + return VatxtBidirModel() + else: + return VatxtModel() + + +class VatxtModel(object): + """Constructs training and evaluation graphs. + + Main methods: `classifier_training()`, `language_model_training()`, + and `eval_graph()`. + + Variable reuse is a critical part of the model, both for sharing variables + between the language model and the classifier, and for reusing variables for + the adversarial loss calculation. To ensure correct variable reuse, all + variables are created in Keras-style layers, wherein stateful layers (i.e. + layers with variables) are represented as callable instances of the Layer + class. Each time the Layer instance is called, it is using the same variables. + + All Layers are constructed in the __init__ method and reused in the various + graph-building functions. + """ + + def __init__(self, cl_logits_input_dim=None): + self.global_step = tf.train.get_or_create_global_step() + self.vocab_freqs = _get_vocab_freqs() + + # Cache VatxtInput objects + self.cl_inputs = None + self.lm_inputs = None + + # Cache intermediate Tensors that are reused + self.tensors = {} + + # Construct layers which are reused in constructing the LM and + # Classification graphs. Instantiating them all once here ensures that + # variable reuse works correctly. + self.layers = {} + self.layers['embedding'] = layers_lib.Embedding( + FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings, + self.vocab_freqs, FLAGS.keep_prob_emb) + self.layers['lstm'] = layers_lib.LSTM( + FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out) + self.layers['lm_loss'] = layers_lib.SoftmaxLoss( + FLAGS.vocab_size, + FLAGS.num_candidate_samples, + self.vocab_freqs, + name='LM_loss') + + cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size + self.layers['cl_logits'] = layers_lib.cl_logits_subgraph( + [FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim, + FLAGS.num_classes, FLAGS.keep_prob_cl_hidden) + + @property + def pretrained_variables(self): + return (self.layers['embedding'].trainable_weights + + self.layers['lstm'].trainable_weights) + + def classifier_training(self): + loss = self.classifier_graph() + train_op = optimize(loss, self.global_step) + return train_op, loss, self.global_step + + def language_model_training(self): + loss = self.language_model_graph() + train_op = optimize(loss, self.global_step) + return train_op, loss, self.global_step + + def classifier_graph(self): + """Constructs classifier graph from inputs to classifier loss. + + * Caches the VatxtInput object in `self.cl_inputs` + * Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss` + + Returns: + loss: scalar float. + """ + inputs = _inputs('train', pretrain=False) + self.cl_inputs = inputs + embedded = self.layers['embedding'](inputs.tokens) + self.tensors['cl_embedded'] = embedded + + _, next_state, logits, loss = self.cl_loss_from_embedding( + embedded, return_intermediates=True) + tf.summary.scalar('classification_loss', loss) + self.tensors['cl_logits'] = logits + self.tensors['cl_loss'] = loss + + if FLAGS.single_label: + indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1) + labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1) + weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1) + else: + labels = inputs.labels + weights = inputs.weights + acc = layers_lib.accuracy(logits, labels, weights) + tf.summary.scalar('accuracy', acc) + + adv_loss = (self.adversarial_loss() * tf.constant( + FLAGS.adv_reg_coeff, name='adv_reg_coeff')) + tf.summary.scalar('adversarial_loss', adv_loss) + + total_loss = loss + adv_loss + + with tf.control_dependencies([inputs.save_state(next_state)]): + total_loss = tf.identity(total_loss) + tf.summary.scalar('total_classification_loss', total_loss) + return total_loss + + def language_model_graph(self, compute_loss=True): + """Constructs LM graph from inputs to LM loss. + + * Caches the VatxtInput object in `self.lm_inputs` + * Caches tensors: `lm_embedded` + + Args: + compute_loss: bool, whether to compute and return the loss or stop after + the LSTM computation. + + Returns: + loss: scalar float. + """ + inputs = _inputs('train', pretrain=True) + self.lm_inputs = inputs + return self._lm_loss(inputs, compute_loss=compute_loss) + + def _lm_loss(self, + inputs, + emb_key='lm_embedded', + lstm_layer='lstm', + lm_loss_layer='lm_loss', + loss_name='lm_loss', + compute_loss=True): + embedded = self.layers['embedding'](inputs.tokens) + self.tensors[emb_key] = embedded + lstm_out, next_state = self.layers[lstm_layer](embedded, inputs.state, + inputs.length) + if compute_loss: + loss = self.layers[lm_loss_layer]( + [lstm_out, inputs.labels, inputs.weights]) + with tf.control_dependencies([inputs.save_state(next_state)]): + loss = tf.identity(loss) + tf.summary.scalar(loss_name, loss) + + return loss + + def eval_graph(self, dataset='test'): + """Constructs classifier evaluation graph. + + Args: + dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}. + + Returns: + eval_ops: dict + var_restore_dict: dict mapping variable restoration names to variables. + Trainable variables will be mapped to their moving average names. + """ + inputs = _inputs(dataset, pretrain=False) + embedded = self.layers['embedding'](inputs.tokens) + _, next_state, logits, _ = self.cl_loss_from_embedding( + embedded, inputs=inputs, return_intermediates=True) + + if FLAGS.single_label: + indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1) + labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1) + weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1) + else: + labels = inputs.labels + weights = inputs.weights + eval_ops = { + 'accuracy': + tf.contrib.metrics.streaming_accuracy( + layers_lib.predictions(logits), labels, weights) + } + + with tf.control_dependencies([inputs.save_state(next_state)]): + acc, acc_update = eval_ops['accuracy'] + acc_update = tf.identity(acc_update) + eval_ops['accuracy'] = (acc, acc_update) + + var_restore_dict = make_restore_average_vars_dict() + return eval_ops, var_restore_dict + + def cl_loss_from_embedding(self, + embedded, + inputs=None, + return_intermediates=False): + """Compute classification loss from embedding. + + Args: + embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim] + inputs: VatxtInput, defaults to self.cl_inputs. + return_intermediates: bool, whether to return intermediate tensors or only + the final loss. + + Returns: + If return_intermediates is True: + lstm_out, next_state, logits, loss + Else: + loss + """ + if inputs is None: + inputs = self.cl_inputs + + lstm_out, next_state = self.layers['lstm'](embedded, inputs.state, + inputs.length) + if FLAGS.single_label: + indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1) + lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1) + labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1) + weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1) + else: + labels = inputs.labels + weights = inputs.weights + logits = self.layers['cl_logits'](lstm_out) + loss = layers_lib.classification_loss(logits, labels, weights) + + if return_intermediates: + return lstm_out, next_state, logits, loss + else: + return loss + + def adversarial_loss(self): + """Compute adversarial loss based on FLAGS.adv_training_method.""" + + def random_perturbation_loss(): + return adv_lib.random_perturbation_loss(self.tensors['cl_embedded'], + self.cl_inputs.length, + self.cl_loss_from_embedding) + + def adversarial_loss(): + return adv_lib.adversarial_loss(self.tensors['cl_embedded'], + self.tensors['cl_loss'], + self.cl_loss_from_embedding) + + def virtual_adversarial_loss(): + """Computes virtual adversarial loss. + + Uses lm_inputs and constructs the language model graph if it hasn't yet + been constructed. + + Also ensures that the LM input states are saved for LSTM state-saving + BPTT. + + Returns: + loss: float scalar. + """ + if self.lm_inputs is None: + self.language_model_graph(compute_loss=False) + + def logits_from_embedding(embedded, return_next_state=False): + _, next_state, logits, _ = self.cl_loss_from_embedding( + embedded, inputs=self.lm_inputs, return_intermediates=True) + if return_next_state: + return next_state, logits + else: + return logits + + next_state, lm_cl_logits = logits_from_embedding( + self.tensors['lm_embedded'], return_next_state=True) + + va_loss = adv_lib.virtual_adversarial_loss( + lm_cl_logits, self.tensors['lm_embedded'], self.lm_inputs, + logits_from_embedding) + + with tf.control_dependencies([self.lm_inputs.save_state(next_state)]): + va_loss = tf.identity(va_loss) + + return va_loss + + def combo_loss(): + return adversarial_loss() + virtual_adversarial_loss() + + adv_training_methods = { + # Random perturbation + 'rp': random_perturbation_loss, + # Adversarial training + 'at': adversarial_loss, + # Virtual adversarial training + 'vat': virtual_adversarial_loss, + # Both at and vat + 'atvat': combo_loss, + '': lambda: tf.constant(0.), + None: lambda: tf.constant(0.), + } + + with tf.name_scope('adversarial_loss'): + return adv_training_methods[FLAGS.adv_training_method]() + + +class VatxtBidirModel(VatxtModel): + """Extension of VatxtModel that supports bidirectional input.""" + + def __init__(self): + super(VatxtBidirModel, + self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2) + + # Reverse LSTM and LM loss for bidirectional models + self.layers['lstm_reverse'] = layers_lib.LSTM( + FLAGS.rnn_cell_size, + FLAGS.rnn_num_layers, + FLAGS.keep_prob_lstm_out, + name='LSTM_Reverse') + self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss( + FLAGS.vocab_size, + FLAGS.num_candidate_samples, + self.vocab_freqs, + name='LM_loss_reverse') + + @property + def pretrained_variables(self): + variables = super(VatxtBidirModel, self).pretrained_variables + variables.extend(self.layers['lstm_reverse'].trainable_weights) + return variables + + def classifier_graph(self): + """Constructs classifier graph from inputs to classifier loss. + + * Caches the VatxtInput objects in `self.cl_inputs` + * Caches tensors: `cl_embedded` (tuple of forward and reverse), `cl_logits`, + `cl_loss` + + Returns: + loss: scalar float. + """ + inputs = _inputs('train', pretrain=False, bidir=True) + self.cl_inputs = inputs + f_inputs, _ = inputs + + # Embed both forward and reverse with a shared embedding + embedded = [self.layers['embedding'](inp.tokens) for inp in inputs] + self.tensors['cl_embedded'] = embedded + + _, next_states, logits, loss = self.cl_loss_from_embedding( + embedded, return_intermediates=True) + tf.summary.scalar('classification_loss', loss) + self.tensors['cl_logits'] = logits + self.tensors['cl_loss'] = loss + + acc = layers_lib.accuracy(logits, f_inputs.labels, f_inputs.weights) + tf.summary.scalar('accuracy', acc) + + adv_loss = (self.adversarial_loss() * tf.constant( + FLAGS.adv_reg_coeff, name='adv_reg_coeff')) + tf.summary.scalar('adversarial_loss', adv_loss) + + total_loss = loss + adv_loss + + + saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)] + with tf.control_dependencies(saves): + total_loss = tf.identity(total_loss) + tf.summary.scalar('total_classification_loss', total_loss) + return total_loss + + def language_model_graph(self, compute_loss=True): + """Constructs forward and reverse LM graphs from inputs to LM losses. + + * Caches the VatxtInput objects in `self.lm_inputs` + * Caches tensors: `lm_embedded`, `lm_embedded_reverse` + + Args: + compute_loss: bool, whether to compute and return the loss or stop after + the LSTM computation. + + Returns: + loss: scalar float, sum of forward and reverse losses. + """ + inputs = _inputs('train', pretrain=True, bidir=True) + self.lm_inputs = inputs + f_inputs, r_inputs = inputs + f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) + r_loss = self._lm_loss( + r_inputs, + emb_key='lm_embedded_reverse', + lstm_layer='lstm_reverse', + lm_loss_layer='lm_loss_reverse', + loss_name='lm_loss_reverse', + compute_loss=compute_loss) + if compute_loss: + return f_loss + r_loss + + def eval_graph(self, dataset='test'): + """Constructs classifier evaluation graph. + + Args: + dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}. + + Returns: + eval_ops: dict + var_restore_dict: dict mapping variable restoration names to variables. + Trainable variables will be mapped to their moving average names. + """ + inputs = _inputs(dataset, pretrain=False, bidir=True) + embedded = [self.layers['embedding'](inp.tokens) for inp in inputs] + _, next_states, logits, _ = self.cl_loss_from_embedding( + embedded, inputs=inputs, return_intermediates=True) + f_inputs, _ = inputs + + eval_ops = { + 'accuracy': + tf.contrib.metrics.streaming_accuracy( + layers_lib.predictions(logits), f_inputs.labels, + f_inputs.weights) + } + + # Save states on accuracy update + saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)] + with tf.control_dependencies(saves): + acc, acc_update = eval_ops['accuracy'] + acc_update = tf.identity(acc_update) + eval_ops['accuracy'] = (acc, acc_update) + + var_restore_dict = make_restore_average_vars_dict() + return eval_ops, var_restore_dict + + def cl_loss_from_embedding(self, + embedded, + inputs=None, + return_intermediates=False): + """Compute classification loss from embedding. + + Args: + embedded: Length 2 tuple of 3-D float Tensor + [batch_size, num_timesteps, embedding_dim]. + inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs. + return_intermediates: bool, whether to return intermediate tensors or only + the final loss. + + Returns: + If return_intermediates is True: + lstm_out, next_states, logits, loss + Else: + loss + """ + if inputs is None: + inputs = self.cl_inputs + + out = [] + for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded, + inputs): + out.append(self.layers[layer_name](emb, inp.state, inp.length)) + lstm_outs, next_states = zip(*out) + + # Concatenate output of forward and reverse LSTMs + lstm_out = tf.concat(lstm_outs, 1) + + logits = self.layers['cl_logits'](lstm_out) + f_inputs, _ = inputs # pylint: disable=unpacking-non-sequence + loss = layers_lib.classification_loss(logits, f_inputs.labels, + f_inputs.weights) + + if return_intermediates: + return lstm_out, next_states, logits, loss + else: + return loss + + def adversarial_loss(self): + """Compute adversarial loss based on FLAGS.adv_training_method.""" + + def random_perturbation_loss(): + return adv_lib.random_perturbation_loss_bidir(self.tensors['cl_embedded'], + self.cl_inputs[0].length, + self.cl_loss_from_embedding) + + def adversarial_loss(): + return adv_lib.adversarial_loss_bidir(self.tensors['cl_embedded'], + self.tensors['cl_loss'], + self.cl_loss_from_embedding) + + def virtual_adversarial_loss(): + """Computes virtual adversarial loss. + + Uses lm_inputs and constructs the language model graph if it hasn't yet + been constructed. + + Also ensures that the LM input states are saved for LSTM state-saving + BPTT. + + Returns: + loss: float scalar. + """ + if self.lm_inputs is None: + self.language_model_graph(compute_loss=False) + + def logits_from_embedding(embedded, return_next_state=False): + _, next_states, logits, _ = self.cl_loss_from_embedding( + embedded, inputs=self.lm_inputs, return_intermediates=True) + if return_next_state: + return next_states, logits + else: + return logits + + lm_embedded = (self.tensors['lm_embedded'], + self.tensors['lm_embedded_reverse']) + next_states, lm_cl_logits = logits_from_embedding( + lm_embedded, return_next_state=True) + + va_loss = adv_lib.virtual_adversarial_loss_bidir( + lm_cl_logits, lm_embedded, self.lm_inputs, logits_from_embedding) + + saves = [ + inp.save_state(state) + for (inp, state) in zip(self.lm_inputs, next_states) + ] + with tf.control_dependencies(saves): + va_loss = tf.identity(va_loss) + + return va_loss + + def combo_loss(): + return adversarial_loss() + virtual_adversarial_loss() + + adv_training_methods = { + # Random perturbation + 'rp': random_perturbation_loss, + # Adversarial training + 'at': adversarial_loss, + # Virtual adversarial training + 'vat': virtual_adversarial_loss, + # Both at and vat + 'atvat': combo_loss, + '': lambda: tf.constant(0.), + None: lambda: tf.constant(0.), + } + + with tf.name_scope('adversarial_loss'): + return adv_training_methods[FLAGS.adv_training_method]() + + +def _inputs(dataset='train', pretrain=False, bidir=False): + return inputs_lib.inputs( + data_dir=FLAGS.data_dir, + phase=dataset, + bidir=bidir, + pretrain=pretrain, + use_seq2seq=pretrain and FLAGS.use_seq2seq_autoencoder, + state_size=FLAGS.rnn_cell_size, + num_layers=FLAGS.rnn_num_layers, + batch_size=FLAGS.batch_size, + unroll_steps=FLAGS.num_timesteps, + eos_id=FLAGS.vocab_size - 1) + + +def _get_vocab_freqs(): + """Returns vocab frequencies. + + Returns: + List of integers, length=FLAGS.vocab_size. + + Raises: + ValueError: if the length of the frequency file is not equal to the vocab + size, or if the file is not found. + """ + path = FLAGS.vocab_freq_path or os.path.join(FLAGS.data_dir, 'vocab_freq.txt') + + if tf.gfile.Exists(path): + with tf.gfile.Open(path) as f: + # Get pre-calculated frequencies of words. + reader = csv.reader(f, quoting=csv.QUOTE_NONE) + freqs = [int(row[-1]) for row in reader] + if len(freqs) != FLAGS.vocab_size: + raise ValueError('Frequency file length %d != vocab size %d' % + (len(freqs), FLAGS.vocab_size)) + else: + if FLAGS.vocab_freq_path: + raise ValueError('vocab_freq_path not found') + freqs = [1] * FLAGS.vocab_size + + return freqs + + +def make_restore_average_vars_dict(): + """Returns dict mapping moving average names to variables.""" + var_restore_dict = {} + variable_averages = tf.train.ExponentialMovingAverage(0.999) + for v in tf.global_variables(): + if v in tf.trainable_variables(): + name = variable_averages.average_name(v) + else: + name = v.op.name + var_restore_dict[name] = v + return var_restore_dict + + +def optimize(loss, global_step): + return layers_lib.optimize( + loss, global_step, FLAGS.max_grad_norm, FLAGS.learning_rate, + FLAGS.learning_rate_decay_factor, FLAGS.sync_replicas, + FLAGS.replicas_to_aggregate, FLAGS.task) diff --git a/models/research/adversarial_text/graphs_test.py b/models/research/adversarial_text/graphs_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b04765a316f35a2232bb836012e2978da7c3c128 --- /dev/null +++ b/models/research/adversarial_text/graphs_test.py @@ -0,0 +1,225 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for graphs.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import defaultdict +import operator +import os +import random +import shutil +import string +import tempfile + +# Dependency imports + +import tensorflow as tf + +import graphs +from data import data_utils + +flags = tf.app.flags +FLAGS = flags.FLAGS +data = data_utils + +flags.DEFINE_integer('task', 0, 'Task id; needed for SyncReplicas test') + + +def _build_random_vocabulary(vocab_size=100): + """Builds and returns a dict.""" + vocab = set() + while len(vocab) < (vocab_size - 1): + rand_word = ''.join( + random.choice(string.ascii_lowercase) + for _ in range(random.randint(1, 10))) + vocab.add(rand_word) + + vocab_ids = dict([(word, i) for i, word in enumerate(vocab)]) + vocab_ids[data.EOS_TOKEN] = vocab_size - 1 + return vocab_ids + + +def _build_random_sequence(vocab_ids): + seq_len = random.randint(10, 200) + ids = vocab_ids.values() + seq = data.SequenceWrapper() + for token_id in [random.choice(ids) for _ in range(seq_len)]: + seq.add_timestep().set_token(token_id) + return seq + + +def _build_vocab_frequencies(seqs, vocab_ids): + vocab_freqs = defaultdict(int) + ids_to_words = dict([(i, word) for word, i in vocab_ids.iteritems()]) + for seq in seqs: + for timestep in seq: + vocab_freqs[ids_to_words[timestep.token]] += 1 + + vocab_freqs[data.EOS_TOKEN] = 0 + return vocab_freqs + + +class GraphsTest(tf.test.TestCase): + """Test graph construction methods.""" + + @classmethod + def setUpClass(cls): + # Make model small + FLAGS.batch_size = 2 + FLAGS.num_timesteps = 3 + FLAGS.embedding_dims = 4 + FLAGS.rnn_num_layers = 2 + FLAGS.rnn_cell_size = 4 + FLAGS.cl_num_layers = 2 + FLAGS.cl_hidden_size = 4 + FLAGS.vocab_size = 10 + + # Set input/output flags + FLAGS.data_dir = tempfile.mkdtemp() + + # Build and write sequence files. + vocab_ids = _build_random_vocabulary(FLAGS.vocab_size) + seqs = [_build_random_sequence(vocab_ids) for _ in range(5)] + seqs_label = [ + data.build_labeled_sequence(seq, random.choice([True, False])) + for seq in seqs + ] + seqs_lm = [data.build_lm_sequence(seq) for seq in seqs] + seqs_ae = [data.build_seq_ae_sequence(seq) for seq in seqs] + seqs_rev = [data.build_reverse_sequence(seq) for seq in seqs] + seqs_bidir = [ + data.build_bidirectional_seq(seq, rev) + for seq, rev in zip(seqs, seqs_rev) + ] + seqs_bidir_label = [ + data.build_labeled_sequence(bd_seq, random.choice([True, False])) + for bd_seq in seqs_bidir + ] + + filenames = [ + data.TRAIN_CLASS, data.TRAIN_LM, data.TRAIN_SA, data.TEST_CLASS, + data.TRAIN_REV_LM, data.TRAIN_BD_CLASS, data.TEST_BD_CLASS + ] + seq_lists = [ + seqs_label, seqs_lm, seqs_ae, seqs_label, seqs_rev, seqs_bidir, + seqs_bidir_label + ] + for fname, seq_list in zip(filenames, seq_lists): + with tf.python_io.TFRecordWriter( + os.path.join(FLAGS.data_dir, fname)) as writer: + for seq in seq_list: + writer.write(seq.seq.SerializeToString()) + + # Write vocab.txt and vocab_freq.txt + vocab_freqs = _build_vocab_frequencies(seqs, vocab_ids) + ordered_vocab_freqs = sorted( + vocab_freqs.items(), key=operator.itemgetter(1), reverse=True) + with open(os.path.join(FLAGS.data_dir, 'vocab.txt'), 'w') as vocab_f: + with open(os.path.join(FLAGS.data_dir, 'vocab_freq.txt'), 'w') as freq_f: + for word, freq in ordered_vocab_freqs: + vocab_f.write('{}\n'.format(word)) + freq_f.write('{}\n'.format(freq)) + + @classmethod + def tearDownClass(cls): + shutil.rmtree(FLAGS.data_dir) + + def setUp(self): + # Reset FLAGS + FLAGS.rnn_num_layers = 1 + FLAGS.sync_replicas = False + FLAGS.adv_training_method = None + FLAGS.num_candidate_samples = -1 + FLAGS.num_classes = 2 + FLAGS.use_seq2seq_autoencoder = False + + # Reset Graph + tf.reset_default_graph() + + def testClassifierGraph(self): + FLAGS.rnn_num_layers = 2 + model = graphs.VatxtModel() + train_op, _, _ = model.classifier_training() + # Pretrained vars: embedding + LSTM layers + self.assertEqual( + len(model.pretrained_variables), 1 + 2 * FLAGS.rnn_num_layers) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + tf.train.start_queue_runners(sess) + sess.run(train_op) + + def testLanguageModelGraph(self): + train_op, _, _ = graphs.VatxtModel().language_model_training() + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + tf.train.start_queue_runners(sess) + sess.run(train_op) + + def testMulticlass(self): + FLAGS.num_classes = 10 + graphs.VatxtModel().classifier_graph() + + def testATMethods(self): + at_methods = [None, 'rp', 'at', 'vat', 'atvat'] + for method in at_methods: + FLAGS.adv_training_method = method + with tf.Graph().as_default(): + graphs.VatxtModel().classifier_graph() + + # Ensure variables have been reused + # Embedding + LSTM layers + hidden layers + logits layer + expected_num_vars = 1 + 2 * FLAGS.rnn_num_layers + 2 * ( + FLAGS.cl_num_layers) + 2 + self.assertEqual(len(tf.trainable_variables()), expected_num_vars) + + def testSyncReplicas(self): + FLAGS.sync_replicas = True + graphs.VatxtModel().language_model_training() + + def testCandidateSampling(self): + FLAGS.num_candidate_samples = 10 + graphs.VatxtModel().language_model_training() + + def testSeqAE(self): + FLAGS.use_seq2seq_autoencoder = True + graphs.VatxtModel().language_model_training() + + def testBidirLM(self): + graphs.VatxtBidirModel().language_model_graph() + + def testBidirClassifier(self): + at_methods = [None, 'rp', 'at', 'vat', 'atvat'] + for method in at_methods: + FLAGS.adv_training_method = method + with tf.Graph().as_default(): + graphs.VatxtBidirModel().classifier_graph() + + # Ensure variables have been reused + # Embedding + 2 LSTM layers + hidden layers + logits layer + expected_num_vars = 1 + 2 * 2 * FLAGS.rnn_num_layers + 2 * ( + FLAGS.cl_num_layers) + 2 + self.assertEqual(len(tf.trainable_variables()), expected_num_vars) + + def testEvalGraph(self): + _, _ = graphs.VatxtModel().eval_graph() + + def testBidirEvalGraph(self): + _, _ = graphs.VatxtBidirModel().eval_graph() + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/adversarial_text/inputs.py b/models/research/adversarial_text/inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..48a523d8d489ec03a10f68847fd263cc1641e678 --- /dev/null +++ b/models/research/adversarial_text/inputs.py @@ -0,0 +1,342 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Input utils for virtual adversarial text classification.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +# Dependency imports + +import tensorflow as tf + +from data import data_utils + + +class VatxtInput(object): + """Wrapper around NextQueuedSequenceBatch.""" + + def __init__(self, + batch, + state_name=None, + tokens=None, + num_states=0, + eos_id=None): + """Construct VatxtInput. + + Args: + batch: NextQueuedSequenceBatch. + state_name: str, name of state to fetch and save. + tokens: int Tensor, tokens. Defaults to batch's F_TOKEN_ID sequence. + num_states: int The number of states to store. + eos_id: int Id of end of Sequence. + """ + self._batch = batch + self._state_name = state_name + self._tokens = (tokens if tokens is not None else + batch.sequences[data_utils.SequenceWrapper.F_TOKEN_ID]) + self._num_states = num_states + + w = batch.sequences[data_utils.SequenceWrapper.F_WEIGHT] + self._weights = w + + l = batch.sequences[data_utils.SequenceWrapper.F_LABEL] + self._labels = l + + # eos weights + self._eos_weights = None + if eos_id: + ew = tf.cast(tf.equal(self._tokens, eos_id), tf.float32) + self._eos_weights = ew + + @property + def tokens(self): + return self._tokens + + @property + def weights(self): + return self._weights + + @property + def eos_weights(self): + return self._eos_weights + + @property + def labels(self): + return self._labels + + @property + def length(self): + return self._batch.length + + @property + def state_name(self): + return self._state_name + + @property + def state(self): + # LSTM tuple states + state_names = _get_tuple_state_names(self._num_states, self._state_name) + return tuple([ + tf.contrib.rnn.LSTMStateTuple( + self._batch.state(c_name), self._batch.state(h_name)) + for c_name, h_name in state_names + ]) + + def save_state(self, value): + # LSTM tuple states + state_names = _get_tuple_state_names(self._num_states, self._state_name) + save_ops = [] + for (c_state, h_state), (c_name, h_name) in zip(value, state_names): + save_ops.append(self._batch.save_state(c_name, c_state)) + save_ops.append(self._batch.save_state(h_name, h_state)) + return tf.group(*save_ops) + + +def _get_tuple_state_names(num_states, base_name): + """Returns state names for use with LSTM tuple state.""" + state_names = [('{}_{}_c'.format(i, base_name), '{}_{}_h'.format( + i, base_name)) for i in range(num_states)] + return state_names + + +def _split_bidir_tokens(batch): + tokens = batch.sequences[data_utils.SequenceWrapper.F_TOKEN_ID] + # Tokens have shape [batch, time, 2] + # forward and reverse have shape [batch, time]. + forward, reverse = [ + tf.squeeze(t, axis=[2]) for t in tf.split(tokens, 2, axis=2) + ] + return forward, reverse + + +def _filenames_for_data_spec(phase, bidir, pretrain, use_seq2seq): + """Returns input filenames for configuration. + + Args: + phase: str, 'train', 'test', or 'valid'. + bidir: bool, bidirectional model. + pretrain: bool, pretraining or classification. + use_seq2seq: bool, seq2seq data, only valid if pretrain=True. + + Returns: + Tuple of filenames. + + Raises: + ValueError: if an invalid combination of arguments is provided that does not + map to any data files (e.g. pretrain=False, use_seq2seq=True). + """ + data_spec = (phase, bidir, pretrain, use_seq2seq) + data_specs = { + ('train', True, True, False): (data_utils.TRAIN_LM, + data_utils.TRAIN_REV_LM), + ('train', True, False, False): (data_utils.TRAIN_BD_CLASS,), + ('train', False, True, False): (data_utils.TRAIN_LM,), + ('train', False, True, True): (data_utils.TRAIN_SA,), + ('train', False, False, False): (data_utils.TRAIN_CLASS,), + ('test', True, True, False): (data_utils.TEST_LM, + data_utils.TRAIN_REV_LM), + ('test', True, False, False): (data_utils.TEST_BD_CLASS,), + ('test', False, True, False): (data_utils.TEST_LM,), + ('test', False, True, True): (data_utils.TEST_SA,), + ('test', False, False, False): (data_utils.TEST_CLASS,), + ('valid', True, False, False): (data_utils.VALID_BD_CLASS,), + ('valid', False, False, False): (data_utils.VALID_CLASS,), + } + if data_spec not in data_specs: + raise ValueError( + 'Data specification (phase, bidir, pretrain, use_seq2seq) %s not ' + 'supported' % str(data_spec)) + + return data_specs[data_spec] + + +def _read_single_sequence_example(file_list, tokens_shape=None): + """Reads and parses SequenceExamples from TFRecord-encoded file_list.""" + tf.logging.info('Constructing TFRecordReader from files: %s', file_list) + file_queue = tf.train.string_input_producer(file_list) + reader = tf.TFRecordReader() + seq_key, serialized_record = reader.read(file_queue) + ctx, sequence = tf.parse_single_sequence_example( + serialized_record, + sequence_features={ + data_utils.SequenceWrapper.F_TOKEN_ID: + tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64), + data_utils.SequenceWrapper.F_LABEL: + tf.FixedLenSequenceFeature([], dtype=tf.int64), + data_utils.SequenceWrapper.F_WEIGHT: + tf.FixedLenSequenceFeature([], dtype=tf.float32), + }) + return seq_key, ctx, sequence + + +def _read_and_batch(data_dir, + fname, + state_name, + state_size, + num_layers, + unroll_steps, + batch_size, + bidir_input=False): + """Inputs for text model. + + Args: + data_dir: str, directory containing TFRecord files of SequenceExample. + fname: str, input file name. + state_name: string, key for saved state of LSTM. + state_size: int, size of LSTM state. + num_layers: int, the number of layers in the LSTM. + unroll_steps: int, number of timesteps to unroll for TBTT. + batch_size: int, batch size. + bidir_input: bool, whether the input is bidirectional. If True, creates 2 + states, state_name and state_name + '_reverse'. + + Returns: + Instance of NextQueuedSequenceBatch + + Raises: + ValueError: if file for input specification is not found. + """ + data_path = os.path.join(data_dir, fname) + if not tf.gfile.Exists(data_path): + raise ValueError('Failed to find file: %s' % data_path) + + tokens_shape = [2] if bidir_input else [] + seq_key, ctx, sequence = _read_single_sequence_example( + [data_path], tokens_shape=tokens_shape) + # Set up stateful queue reader. + state_names = _get_tuple_state_names(num_layers, state_name) + initial_states = {} + for c_state, h_state in state_names: + initial_states[c_state] = tf.zeros(state_size) + initial_states[h_state] = tf.zeros(state_size) + if bidir_input: + rev_state_names = _get_tuple_state_names(num_layers, + '{}_reverse'.format(state_name)) + for rev_c_state, rev_h_state in rev_state_names: + initial_states[rev_c_state] = tf.zeros(state_size) + initial_states[rev_h_state] = tf.zeros(state_size) + batch = tf.contrib.training.batch_sequences_with_states( + input_key=seq_key, + input_sequences=sequence, + input_context=ctx, + input_length=tf.shape(sequence['token_id'])[0], + initial_states=initial_states, + num_unroll=unroll_steps, + batch_size=batch_size, + allow_small_batch=False, + num_threads=4, + capacity=batch_size * 10, + make_keys_unique=True, + make_keys_unique_seed=29392) + return batch + + +def inputs(data_dir=None, + phase='train', + bidir=False, + pretrain=False, + use_seq2seq=False, + state_name='lstm', + state_size=None, + num_layers=0, + batch_size=32, + unroll_steps=100, + eos_id=None): + """Inputs for text model. + + Args: + data_dir: str, directory containing TFRecord files of SequenceExample. + phase: str, dataset for evaluation {'train', 'valid', 'test'}. + bidir: bool, bidirectional LSTM. + pretrain: bool, whether to read pretraining data or classification data. + use_seq2seq: bool, whether to read seq2seq data or the language model data. + state_name: string, key for saved state of LSTM. + state_size: int, size of LSTM state. + num_layers: int, the number of LSTM layers. + batch_size: int, batch size. + unroll_steps: int, number of timesteps to unroll for TBTT. + eos_id: int, id of end of sequence. used for the kl weights on vat + Returns: + Instance of VatxtInput (x2 if bidir=True and pretrain=True, i.e. forward and + reverse). + """ + with tf.name_scope('inputs'): + filenames = _filenames_for_data_spec(phase, bidir, pretrain, use_seq2seq) + + if bidir and pretrain: + # Bidirectional pretraining + # Requires separate forward and reverse language model data. + forward_fname, reverse_fname = filenames + forward_batch = _read_and_batch(data_dir, forward_fname, state_name, + state_size, num_layers, unroll_steps, + batch_size) + state_name_rev = state_name + '_reverse' + reverse_batch = _read_and_batch(data_dir, reverse_fname, state_name_rev, + state_size, num_layers, unroll_steps, + batch_size) + forward_input = VatxtInput( + forward_batch, + state_name=state_name, + num_states=num_layers, + eos_id=eos_id) + reverse_input = VatxtInput( + reverse_batch, + state_name=state_name_rev, + num_states=num_layers, + eos_id=eos_id) + return forward_input, reverse_input + + elif bidir: + # Classifier bidirectional LSTM + # Shared data source, but separate token/state streams + fname, = filenames + batch = _read_and_batch( + data_dir, + fname, + state_name, + state_size, + num_layers, + unroll_steps, + batch_size, + bidir_input=True) + forward_tokens, reverse_tokens = _split_bidir_tokens(batch) + forward_input = VatxtInput( + batch, + state_name=state_name, + tokens=forward_tokens, + num_states=num_layers) + reverse_input = VatxtInput( + batch, + state_name=state_name + '_reverse', + tokens=reverse_tokens, + num_states=num_layers) + return forward_input, reverse_input + else: + # Unidirectional LM or classifier + fname, = filenames + batch = _read_and_batch( + data_dir, + fname, + state_name, + state_size, + num_layers, + unroll_steps, + batch_size, + bidir_input=False) + return VatxtInput( + batch, state_name=state_name, num_states=num_layers, eos_id=eos_id) diff --git a/models/research/adversarial_text/layers.py b/models/research/adversarial_text/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..be4c7a47e0871182d82310e07e5739c2fc9f8744 --- /dev/null +++ b/models/research/adversarial_text/layers.py @@ -0,0 +1,397 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Layers for VatxtModel.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +from six.moves import xrange +import tensorflow as tf +K = tf.keras + + +def cl_logits_subgraph(layer_sizes, input_size, num_classes, keep_prob=1.): + """Construct multiple ReLU layers with dropout and a linear layer.""" + subgraph = K.models.Sequential(name='cl_logits') + for i, layer_size in enumerate(layer_sizes): + if i == 0: + subgraph.add( + K.layers.Dense(layer_size, activation='relu', input_dim=input_size)) + else: + subgraph.add(K.layers.Dense(layer_size, activation='relu')) + + if keep_prob < 1.: + subgraph.add(K.layers.Dropout(1. - keep_prob)) + subgraph.add(K.layers.Dense(1 if num_classes == 2 else num_classes)) + return subgraph + + +class Embedding(K.layers.Layer): + """Embedding layer with frequency-based normalization and dropout.""" + + def __init__(self, + vocab_size, + embedding_dim, + normalize=False, + vocab_freqs=None, + keep_prob=1., + **kwargs): + self.vocab_size = vocab_size + self.embedding_dim = embedding_dim + self.normalized = normalize + self.keep_prob = keep_prob + + if normalize: + assert vocab_freqs is not None + self.vocab_freqs = tf.constant( + vocab_freqs, dtype=tf.float32, shape=(vocab_size, 1)) + + super(Embedding, self).__init__(**kwargs) + + def build(self, input_shape): + with tf.device('/cpu:0'): + self.var = self.add_weight( + shape=(self.vocab_size, self.embedding_dim), + initializer=tf.random_uniform_initializer(-1., 1.), + name='embedding', + dtype=tf.float32) + + if self.normalized: + self.var = self._normalize(self.var) + + super(Embedding, self).build(input_shape) + + def call(self, x): + embedded = tf.nn.embedding_lookup(self.var, x) + if self.keep_prob < 1.: + shape = embedded.get_shape().as_list() + + # Use same dropout masks at each timestep with specifying noise_shape. + # This slightly improves performance. + # Please see https://arxiv.org/abs/1512.05287 for the theoretical + # explanation. + embedded = tf.nn.dropout( + embedded, self.keep_prob, noise_shape=(shape[0], 1, shape[2])) + return embedded + + def _normalize(self, emb): + weights = self.vocab_freqs / tf.reduce_sum(self.vocab_freqs) + mean = tf.reduce_sum(weights * emb, 0, keep_dims=True) + var = tf.reduce_sum(weights * tf.pow(emb - mean, 2.), 0, keep_dims=True) + stddev = tf.sqrt(1e-6 + var) + return (emb - mean) / stddev + + +class LSTM(object): + """LSTM layer using dynamic_rnn. + + Exposes variables in `trainable_weights` property. + """ + + def __init__(self, cell_size, num_layers=1, keep_prob=1., name='LSTM'): + self.cell_size = cell_size + self.num_layers = num_layers + self.keep_prob = keep_prob + self.reuse = None + self.trainable_weights = None + self.name = name + + def __call__(self, x, initial_state, seq_length): + with tf.variable_scope(self.name, reuse=self.reuse) as vs: + cell = tf.contrib.rnn.MultiRNNCell([ + tf.contrib.rnn.BasicLSTMCell( + self.cell_size, + forget_bias=0.0, + reuse=tf.get_variable_scope().reuse) + for _ in xrange(self.num_layers) + ]) + + # shape(x) = (batch_size, num_timesteps, embedding_dim) + + lstm_out, next_state = tf.nn.dynamic_rnn( + cell, x, initial_state=initial_state, sequence_length=seq_length) + + # shape(lstm_out) = (batch_size, timesteps, cell_size) + + if self.keep_prob < 1.: + lstm_out = tf.nn.dropout(lstm_out, self.keep_prob) + + if self.reuse is None: + self.trainable_weights = vs.global_variables() + + self.reuse = True + + return lstm_out, next_state + + +class SoftmaxLoss(K.layers.Layer): + """Softmax xentropy loss with candidate sampling.""" + + def __init__(self, + vocab_size, + num_candidate_samples=-1, + vocab_freqs=None, + **kwargs): + self.vocab_size = vocab_size + self.num_candidate_samples = num_candidate_samples + self.vocab_freqs = vocab_freqs + super(SoftmaxLoss, self).__init__(**kwargs) + self.multiclass_dense_layer = K.layers.Dense(self.vocab_size) + + def build(self, input_shape): + input_shape = input_shape[0].as_list() + with tf.device('/cpu:0'): + self.lin_w = self.add_weight( + shape=(input_shape[-1], self.vocab_size), + name='lm_lin_w', + initializer=K.initializers.glorot_uniform()) + self.lin_b = self.add_weight( + shape=(self.vocab_size,), + name='lm_lin_b', + initializer=K.initializers.glorot_uniform()) + self.multiclass_dense_layer.build(input_shape) + + super(SoftmaxLoss, self).build(input_shape) + + def call(self, inputs): + x, labels, weights = inputs + if self.num_candidate_samples > -1: + assert self.vocab_freqs is not None + labels_reshaped = tf.reshape(labels, [-1]) + labels_reshaped = tf.expand_dims(labels_reshaped, -1) + sampled = tf.nn.fixed_unigram_candidate_sampler( + true_classes=labels_reshaped, + num_true=1, + num_sampled=self.num_candidate_samples, + unique=True, + range_max=self.vocab_size, + unigrams=self.vocab_freqs) + inputs_reshaped = tf.reshape(x, [-1, int(x.get_shape()[2])]) + + lm_loss = tf.nn.sampled_softmax_loss( + weights=tf.transpose(self.lin_w), + biases=self.lin_b, + labels=labels_reshaped, + inputs=inputs_reshaped, + num_sampled=self.num_candidate_samples, + num_classes=self.vocab_size, + sampled_values=sampled) + lm_loss = tf.reshape( + lm_loss, + [int(x.get_shape()[0]), int(x.get_shape()[1])]) + else: + logits = self.multiclass_dense_layer(x) + lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, labels=labels) + + lm_loss = tf.identity( + tf.reduce_sum(lm_loss * weights) / _num_labels(weights), + name='lm_xentropy_loss') + return lm_loss + + +def classification_loss(logits, labels, weights): + """Computes cross entropy loss between logits and labels. + + Args: + logits: 2-D [timesteps*batch_size, m] float tensor, where m=1 if + num_classes=2, otherwise m=num_classes. + labels: 1-D [timesteps*batch_size] integer tensor. + weights: 1-D [timesteps*batch_size] float tensor. + + Returns: + Loss scalar of type float. + """ + inner_dim = logits.get_shape().as_list()[-1] + with tf.name_scope('classifier_loss'): + # Logistic loss + if inner_dim == 1: + loss = tf.nn.sigmoid_cross_entropy_with_logits( + logits=tf.squeeze(logits, -1), labels=tf.cast(labels, tf.float32)) + # Softmax loss + else: + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, labels=labels) + + num_lab = _num_labels(weights) + tf.summary.scalar('num_labels', num_lab) + return tf.identity( + tf.reduce_sum(weights * loss) / num_lab, name='classification_xentropy') + + +def accuracy(logits, targets, weights): + """Computes prediction accuracy. + + Args: + logits: 2-D classifier logits [timesteps*batch_size, num_classes] + targets: 1-D [timesteps*batch_size] integer tensor. + weights: 1-D [timesteps*batch_size] float tensor. + + Returns: + Accuracy: float scalar. + """ + with tf.name_scope('accuracy'): + eq = tf.cast(tf.equal(predictions(logits), targets), tf.float32) + return tf.identity( + tf.reduce_sum(weights * eq) / _num_labels(weights), name='accuracy') + + +def predictions(logits): + """Class prediction from logits.""" + inner_dim = logits.get_shape().as_list()[-1] + with tf.name_scope('predictions'): + # For binary classification + if inner_dim == 1: + pred = tf.cast(tf.greater(tf.squeeze(logits, -1), 0.), tf.int64) + # For multi-class classification + else: + pred = tf.argmax(logits, 2) + return pred + + +def _num_labels(weights): + """Number of 1's in weights. Returns 1. if 0.""" + num_labels = tf.reduce_sum(weights) + num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels) + return num_labels + + +def optimize(loss, + global_step, + max_grad_norm, + lr, + lr_decay, + sync_replicas=False, + replicas_to_aggregate=1, + task_id=0): + """Builds optimization graph. + + * Creates an optimizer, and optionally wraps with SyncReplicasOptimizer + * Computes, clips, and applies gradients + * Maintains moving averages for all trainable variables + * Summarizes variables and gradients + + Args: + loss: scalar loss to minimize. + global_step: integer scalar Variable. + max_grad_norm: float scalar. Grads will be clipped to this value. + lr: float scalar, learning rate. + lr_decay: float scalar, learning rate decay rate. + sync_replicas: bool, whether to use SyncReplicasOptimizer. + replicas_to_aggregate: int, number of replicas to aggregate when using + SyncReplicasOptimizer. + task_id: int, id of the current task; used to ensure proper initialization + of SyncReplicasOptimizer. + + Returns: + train_op + """ + with tf.name_scope('optimization'): + # Compute gradients. + tvars = tf.trainable_variables() + grads = tf.gradients( + loss, + tvars, + aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) + + # Clip non-embedding grads + non_embedding_grads_and_vars = [(g, v) for (g, v) in zip(grads, tvars) + if 'embedding' not in v.op.name] + embedding_grads_and_vars = [(g, v) for (g, v) in zip(grads, tvars) + if 'embedding' in v.op.name] + + ne_grads, ne_vars = zip(*non_embedding_grads_and_vars) + ne_grads, _ = tf.clip_by_global_norm(ne_grads, max_grad_norm) + non_embedding_grads_and_vars = zip(ne_grads, ne_vars) + + grads_and_vars = embedding_grads_and_vars + list(non_embedding_grads_and_vars) + + # Summarize + _summarize_vars_and_grads(grads_and_vars) + + # Decaying learning rate + lr = tf.train.exponential_decay( + lr, global_step, 1, lr_decay, staircase=True) + tf.summary.scalar('learning_rate', lr) + opt = tf.train.AdamOptimizer(lr) + + # Track the moving averages of all trainable variables. + variable_averages = tf.train.ExponentialMovingAverage(0.999, global_step) + + # Apply gradients + if sync_replicas: + opt = tf.train.SyncReplicasOptimizer( + opt, + replicas_to_aggregate, + variable_averages=variable_averages, + variables_to_average=tvars, + total_num_replicas=replicas_to_aggregate) + apply_gradient_op = opt.apply_gradients( + grads_and_vars, global_step=global_step) + with tf.control_dependencies([apply_gradient_op]): + train_op = tf.no_op(name='train_op') + + # Initialization ops + tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, + opt.get_chief_queue_runner()) + if task_id == 0: # Chief task + local_init_op = opt.chief_init_op + tf.add_to_collection('chief_init_op', opt.get_init_tokens_op()) + else: + local_init_op = opt.local_step_init_op + tf.add_to_collection('local_init_op', local_init_op) + tf.add_to_collection('ready_for_local_init_op', + opt.ready_for_local_init_op) + else: + # Non-sync optimizer + apply_gradient_op = opt.apply_gradients(grads_and_vars, global_step) + with tf.control_dependencies([apply_gradient_op]): + train_op = variable_averages.apply(tvars) + + return train_op + + +def _summarize_vars_and_grads(grads_and_vars): + tf.logging.info('Trainable variables:') + tf.logging.info('-' * 60) + for grad, var in grads_and_vars: + tf.logging.info(var) + + def tag(name, v=var): + return v.op.name + '_' + name + + # Variable summary + mean = tf.reduce_mean(var) + tf.summary.scalar(tag('mean'), mean) + with tf.name_scope(tag('stddev')): + stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) + tf.summary.scalar(tag('stddev'), stddev) + tf.summary.scalar(tag('max'), tf.reduce_max(var)) + tf.summary.scalar(tag('min'), tf.reduce_min(var)) + tf.summary.histogram(tag('histogram'), var) + + # Gradient summary + if grad is not None: + if isinstance(grad, tf.IndexedSlices): + grad_values = grad.values + else: + grad_values = grad + + tf.summary.histogram(tag('gradient'), grad_values) + tf.summary.scalar(tag('gradient_norm'), tf.global_norm([grad_values])) + else: + tf.logging.info('Var %s has no gradient', var.op.name) diff --git a/models/research/adversarial_text/pretrain.py b/models/research/adversarial_text/pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1fa6a4cbbfd1b9f5086036555627f3453acc70 --- /dev/null +++ b/models/research/adversarial_text/pretrain.py @@ -0,0 +1,46 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Pretrains a recurrent language model. + +Computational time: + 2 days to train 100000 steps on 1 layer 1024 hidden units LSTM, + 256 embeddings, 400 truncated BP, 256 minibatch and on single GPU (Pascal + Titan X, cuDNNv5). +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +import graphs +import train_utils + +FLAGS = tf.app.flags.FLAGS + + +def main(_): + """Trains Language Model.""" + tf.logging.set_verbosity(tf.logging.INFO) + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): + model = graphs.get_model() + train_op, loss, global_step = model.language_model_training() + train_utils.run_training(train_op, loss, global_step) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/adversarial_text/train_classifier.py b/models/research/adversarial_text/train_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..f498d2c2fb9fd16f5c38bc10e9d80c124e127cb4 --- /dev/null +++ b/models/research/adversarial_text/train_classifier.py @@ -0,0 +1,63 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trains LSTM text classification model. + +Model trains with adversarial or virtual adversarial training. + +Computational time: + 1.8 hours to train 10000 steps without adversarial or virtual adversarial + training, on 1 layer 1024 hidden units LSTM, 256 embeddings, 400 truncated + BP, 64 minibatch and on single GPU (Pascal Titan X, cuDNNv5). + + 4 hours to train 10000 steps with adversarial or virtual adversarial + training, with above condition. + +To initialize embedding and LSTM cell weights from a pretrained model, set +FLAGS.pretrained_model_dir to the pretrained model's checkpoint directory. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +import graphs +import train_utils + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('pretrained_model_dir', None, + 'Directory path to pretrained model to restore from') + + +def main(_): + """Trains LSTM classification model.""" + tf.logging.set_verbosity(tf.logging.INFO) + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): + model = graphs.get_model() + train_op, loss, global_step = model.classifier_training() + train_utils.run_training( + train_op, + loss, + global_step, + variables_to_restore=model.pretrained_variables, + pretrained_model_dir=FLAGS.pretrained_model_dir) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/adversarial_text/train_utils.py b/models/research/adversarial_text/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..577237967d0bb26b073f7146eb42106fc630da5e --- /dev/null +++ b/models/research/adversarial_text/train_utils.py @@ -0,0 +1,133 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for training adversarial text models.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time + +# Dependency imports + +import numpy as np +import tensorflow as tf + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'Master address.') +flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.') +flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter servers.') +flags.DEFINE_string('train_dir', '/tmp/text_train', + 'Directory for logs and checkpoints.') +flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.') +flags.DEFINE_boolean('log_device_placement', False, + 'Whether to log device placement.') + + +def run_training(train_op, + loss, + global_step, + variables_to_restore=None, + pretrained_model_dir=None): + """Sets up and runs training loop.""" + tf.gfile.MakeDirs(FLAGS.train_dir) + + # Create pretrain Saver + if pretrained_model_dir: + assert variables_to_restore + tf.logging.info('Will attempt restore from %s: %s', pretrained_model_dir, + variables_to_restore) + saver_for_restore = tf.train.Saver(variables_to_restore) + + # Init ops + if FLAGS.sync_replicas: + local_init_op = tf.get_collection('local_init_op')[0] + ready_for_local_init_op = tf.get_collection('ready_for_local_init_op')[0] + else: + local_init_op = tf.train.Supervisor.USE_DEFAULT + ready_for_local_init_op = tf.train.Supervisor.USE_DEFAULT + + is_chief = FLAGS.task == 0 + sv = tf.train.Supervisor( + logdir=FLAGS.train_dir, + is_chief=is_chief, + save_summaries_secs=30, + save_model_secs=30, + local_init_op=local_init_op, + ready_for_local_init_op=ready_for_local_init_op, + global_step=global_step) + + # Delay starting standard services to allow possible pretrained model restore. + with sv.managed_session( + master=FLAGS.master, + config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement), + start_standard_services=False) as sess: + # Initialization + if is_chief: + if pretrained_model_dir: + maybe_restore_pretrained_model(sess, saver_for_restore, + pretrained_model_dir) + if FLAGS.sync_replicas: + sess.run(tf.get_collection('chief_init_op')[0]) + sv.start_standard_services(sess) + + sv.start_queue_runners(sess) + + # Training loop + global_step_val = 0 + while not sv.should_stop() and global_step_val < FLAGS.max_steps: + global_step_val = train_step(sess, train_op, loss, global_step) + + # Final checkpoint + if is_chief and global_step_val >= FLAGS.max_steps: + sv.saver.save(sess, sv.save_path, global_step=global_step) + + +def maybe_restore_pretrained_model(sess, saver_for_restore, model_dir): + """Restores pretrained model if there is no ckpt model.""" + ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) + checkpoint_exists = ckpt and ckpt.model_checkpoint_path + if checkpoint_exists: + tf.logging.info('Checkpoint exists in FLAGS.train_dir; skipping ' + 'pretraining restore') + return + + pretrain_ckpt = tf.train.get_checkpoint_state(model_dir) + if not (pretrain_ckpt and pretrain_ckpt.model_checkpoint_path): + raise ValueError( + 'Asked to restore model from %s but no checkpoint found.' % model_dir) + saver_for_restore.restore(sess, pretrain_ckpt.model_checkpoint_path) + + +def train_step(sess, train_op, loss, global_step): + """Runs a single training step.""" + start_time = time.time() + _, loss_val, global_step_val = sess.run([train_op, loss, global_step]) + duration = time.time() - start_time + + # Logging + if global_step_val % 10 == 0: + examples_per_sec = FLAGS.batch_size / duration + sec_per_batch = float(duration) + + format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') + tf.logging.info(format_str % (global_step_val, loss_val, examples_per_sec, + sec_per_batch)) + + if np.isnan(loss_val): + raise OverflowError('Loss is nan') + + return global_step_val diff --git a/models/research/attention_ocr/README.md b/models/research/attention_ocr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0056262343d9e486ecb5a83cb65bed3af948e426 --- /dev/null +++ b/models/research/attention_ocr/README.md @@ -0,0 +1,201 @@ +## Attention-based Extraction of Structured Information from Street View Imagery + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/attention-based-extraction-of-structured/optical-character-recognition-on-fsns-test)](https://paperswithcode.com/sota/optical-character-recognition-on-fsns-test?p=attention-based-extraction-of-structured) +[![Paper](http://img.shields.io/badge/paper-arXiv.1704.03549-B3181B.svg)](https://arxiv.org/abs/1704.03549) +[![TensorFlow 1.15](https://img.shields.io/badge/tensorflow-1.15-brightgreen)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +*A TensorFlow model for real-world image text extraction problems.* + +This folder contains the code needed to train a new Attention OCR model on the +[FSNS dataset][FSNS] dataset to transcribe street names in France. You can +also use it to train it on your own data. + +More details can be found in our paper: + +["Attention-based Extraction of Structured Information from Street View +Imagery"](https://arxiv.org/abs/1704.03549) + +## Contacts + +Authors + +* Zbigniew Wojna (zbigniewwojna@gmail.com) +* Alexander Gorban (gorban@google.com) + +Maintainer: Xavier Gibert [@xavigibert](https://github.com/xavigibert) + +## Requirements + +1. Install the TensorFlow library ([instructions][TF]). For example: + +``` +python3 -m venv ~/.tensorflow +source ~/.tensorflow/bin/activate +pip install --upgrade pip +pip install --upgrade tensorflow-gpu=1.15 +``` + +2. At least 158GB of free disk space to download the FSNS dataset: + +``` +cd research/attention_ocr/python/datasets +aria2c -c -j 20 -i ../../../street/python/fsns_urls.txt +cd .. +``` + +3. 16GB of RAM or more; 32GB is recommended. +4. `train.py` works with both CPU and GPU, though using GPU is preferable. It has been tested with a Titan X and with a GTX980. + +[TF]: https://www.tensorflow.org/install/ +[FSNS]: https://github.com/tensorflow/models/tree/master/research/street + +## How to use this code + +To run all unit tests: + +``` +cd research/attention_ocr/python +find . -name "*_test.py" -printf '%P\n' | xargs python3 -m unittest +``` + +To train from scratch: + +``` +python train.py +``` + +To train a model using pre-trained Inception weights as initialization: + +``` +wget http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz +tar xf inception_v3_2016_08_28.tar.gz +python train.py --checkpoint_inception=./inception_v3.ckpt +``` + +To fine tune the Attention OCR model using a checkpoint: + +``` +wget http://download.tensorflow.org/models/attention_ocr_2017_08_09.tar.gz +tar xf attention_ocr_2017_08_09.tar.gz +python train.py --checkpoint=model.ckpt-399731 +``` + +## How to use your own image data to train the model + +You need to define a new dataset. There are two options: + +1. Store data in the same format as the FSNS dataset and just reuse the +[python/datasets/fsns.py](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/datasets/fsns.py) +module. E.g., create a file datasets/newtextdataset.py: +``` +import fsns + +DEFAULT_DATASET_DIR = 'path/to/the/dataset' + +DEFAULT_CONFIG = { + 'name': + 'MYDATASET', + 'splits': { + 'train': { + 'size': 123, + 'pattern': 'tfexample_train*' + }, + 'test': { + 'size': 123, + 'pattern': 'tfexample_test*' + } + }, + 'charset_filename': + 'charset_size.txt', + 'image_shape': (150, 600, 3), + 'num_of_views': + 4, + 'max_sequence_length': + 37, + 'null_code': + 42, + 'items_to_descriptions': { + 'image': + 'A [150 x 600 x 3] color image.', + 'label': + 'Characters codes.', + 'text': + 'A unicode string.', + 'length': + 'A length of the encoded text.', + 'num_of_views': + 'A number of different views stored within the image.' + } +} + + +def get_split(split_name, dataset_dir=None, config=None): + if not dataset_dir: + dataset_dir = DEFAULT_DATASET_DIR + if not config: + config = DEFAULT_CONFIG + + return fsns.get_split(split_name, dataset_dir, config) +``` +You will also need to include it into the `datasets/__init__.py` and specify the +dataset name in the command line. + +``` +python train.py --dataset_name=newtextdataset +``` + +Please note that eval.py will also require the same flag. + +To learn how to store a data in the FSNS + format please refer to the https://stackoverflow.com/a/44461910/743658. + +2. Define a new dataset format. The model needs the following data to train: + +- images: input images, shape [batch_size x H x W x 3]; +- labels: ground truth label ids, shape=[batch_size x seq_length]; +- labels_one_hot: labels in one-hot encoding, shape [batch_size x seq_length x num_char_classes]; + +Refer to [python/data_provider.py](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/data_provider.py#L33) +for more details. You can use [python/datasets/fsns.py](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/datasets/fsns.py) +as the example. + +## How to use a pre-trained model + +The inference part was not released yet, but it is pretty straightforward to +implement one in Python or C++. + +The recommended way is to use the [Serving infrastructure][serving]. + +Alternatively you can: +1. define a placeholder for images (or use directly an numpy array) +2. [create a graph ](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/eval.py#L60) +``` +endpoints = model.create_base(images_placeholder, labels_one_hot=None) +``` +3. [load a pretrained model](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/model.py#L494) +4. run computations through the graph: +``` +predictions = sess.run(endpoints.predicted_chars, + feed_dict={images_placeholder:images_actual_data}) +``` +5. Convert character IDs (predictions) to UTF8 using the provided charset file. + +Please note that tensor names may change overtime and old stored checkpoints can +become unloadable. In many cases such backward incompatible changes can be +fixed with a [string substitution][1] to update the checkpoint itself or using a +custom var_list with [assign_from_checkpoint_fn][2]. For anything +other than a one time experiment please use the [TensorFlow Serving][serving]. + +[1]: https://github.com/tensorflow/tensorflow/blob/aaf7adc/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py +[2]: https://www.tensorflow.org/api_docs/python/tf/contrib/framework/assign_from_checkpoint_fn +[serving]: https://tensorflow.github.io/serving/serving_basic + +## Disclaimer + +This code is a modified version of the internal model we used for our paper. +Currently it reaches 83.79% full sequence accuracy after 400k steps of training. +The main difference between this version and the version used in the paper - for +the paper we used a distributed training with 50 GPU (K80) workers (asynchronous +updates), the provided checkpoint was created using this code after ~6 days of +training on a single GPU (Titan X) (it reached 81% after 24 hours of training), +the coordinate encoding is disabled by default. diff --git a/models/research/attention_ocr/python/all_jobs.screenrc b/models/research/attention_ocr/python/all_jobs.screenrc new file mode 100644 index 0000000000000000000000000000000000000000..ef7fdf237387c95eeb9a61e507b1c74db212502d --- /dev/null +++ b/models/research/attention_ocr/python/all_jobs.screenrc @@ -0,0 +1,9 @@ +# A GPU/screen config to run all jobs for training and evaluation in parallel. +# Execute: +# source /path/to/your/virtualenv/bin/activate +# screen -R TF -c all_jobs.screenrc + +screen -t train 0 python train.py --train_log_dir=workdir/train +screen -t eval_train 1 python eval.py --split_name=train --train_log_dir=workdir/train --eval_log_dir=workdir/eval_train +screen -t eval_test 2 python eval.py --split_name=test --train_log_dir=workdir/train --eval_log_dir=workdir/eval_test +screen -t tensorboard 3 tensorboard --logdir=workdir diff --git a/models/research/attention_ocr/python/common_flags.py b/models/research/attention_ocr/python/common_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb67179f5e8c772070a6f5c44a88838ee91e6e9 --- /dev/null +++ b/models/research/attention_ocr/python/common_flags.py @@ -0,0 +1,149 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define flags are common for both train.py and eval.py scripts.""" +import sys + +from tensorflow.python.platform import flags +import logging + +import datasets +import model + +FLAGS = flags.FLAGS + +logging.basicConfig( + level=logging.DEBUG, + stream=sys.stderr, + format='%(levelname)s ' + '%(asctime)s.%(msecs)06d: ' + '%(filename)s: ' + '%(lineno)d ' + '%(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + + +def define(): + """Define common flags.""" + # yapf: disable + flags.DEFINE_integer('batch_size', 32, + 'Batch size.') + + flags.DEFINE_integer('crop_width', None, + 'Width of the central crop for images.') + + flags.DEFINE_integer('crop_height', None, + 'Height of the central crop for images.') + + flags.DEFINE_string('train_log_dir', '/tmp/attention_ocr/train', + 'Directory where to write event logs.') + + flags.DEFINE_string('dataset_name', 'fsns', + 'Name of the dataset. Supported: fsns') + + flags.DEFINE_string('split_name', 'train', + 'Dataset split name to run evaluation for: test,train.') + + flags.DEFINE_string('dataset_dir', None, + 'Dataset root folder.') + + flags.DEFINE_string('checkpoint', '', + 'Path for checkpoint to restore weights from.') + + flags.DEFINE_string('master', + '', + 'BNS name of the TensorFlow master to use.') + + # Model hyper parameters + flags.DEFINE_float('learning_rate', 0.004, + 'learning rate') + + flags.DEFINE_string('optimizer', 'momentum', + 'the optimizer to use') + + flags.DEFINE_float('momentum', 0.9, + 'momentum value for the momentum optimizer if used') + + flags.DEFINE_bool('use_augment_input', True, + 'If True will use image augmentation') + + # Method hyper parameters + # conv_tower_fn + flags.DEFINE_string('final_endpoint', 'Mixed_5d', + 'Endpoint to cut inception tower') + + # sequence_logit_fn + flags.DEFINE_bool('use_attention', True, + 'If True will use the attention mechanism') + + flags.DEFINE_bool('use_autoregression', True, + 'If True will use autoregression (a feedback link)') + + flags.DEFINE_integer('num_lstm_units', 256, + 'number of LSTM units for sequence LSTM') + + flags.DEFINE_float('weight_decay', 0.00004, + 'weight decay for char prediction FC layers') + + flags.DEFINE_float('lstm_state_clip_value', 10.0, + 'cell state is clipped by this value prior to the cell' + ' output activation') + + # 'sequence_loss_fn' + flags.DEFINE_float('label_smoothing', 0.1, + 'weight for label smoothing') + + flags.DEFINE_bool('ignore_nulls', True, + 'ignore null characters for computing the loss') + + flags.DEFINE_bool('average_across_timesteps', False, + 'divide the returned cost by the total label weight') + # yapf: enable + + +def get_crop_size(): + if FLAGS.crop_width and FLAGS.crop_height: + return (FLAGS.crop_width, FLAGS.crop_height) + else: + return None + + +def create_dataset(split_name): + ds_module = getattr(datasets, FLAGS.dataset_name) + return ds_module.get_split(split_name, dataset_dir=FLAGS.dataset_dir) + + +def create_mparams(): + return { + 'conv_tower_fn': + model.ConvTowerParams(final_endpoint=FLAGS.final_endpoint), + 'sequence_logit_fn': + model.SequenceLogitsParams( + use_attention=FLAGS.use_attention, + use_autoregression=FLAGS.use_autoregression, + num_lstm_units=FLAGS.num_lstm_units, + weight_decay=FLAGS.weight_decay, + lstm_state_clip_value=FLAGS.lstm_state_clip_value), + 'sequence_loss_fn': + model.SequenceLossParams( + label_smoothing=FLAGS.label_smoothing, + ignore_nulls=FLAGS.ignore_nulls, + average_across_timesteps=FLAGS.average_across_timesteps) + } + + +def create_model(*args, **kwargs): + ocr_model = model.Model(mparams=create_mparams(), *args, **kwargs) + return ocr_model diff --git a/models/research/attention_ocr/python/data_provider.py b/models/research/attention_ocr/python/data_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..6beba17f6d07de1afb7fa90aa7e051fda402df3e --- /dev/null +++ b/models/research/attention_ocr/python/data_provider.py @@ -0,0 +1,199 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to read, decode and pre-process input data for the Model. +""" +import collections +import functools +import tensorflow as tf +from tensorflow.contrib import slim + +import inception_preprocessing + +# Tuple to store input data endpoints for the Model. +# It has following fields (tensors): +# images: input images, +# shape [batch_size x H x W x 3]; +# labels: ground truth label ids, +# shape=[batch_size x seq_length]; +# labels_one_hot: labels in one-hot encoding, +# shape [batch_size x seq_length x num_char_classes]; +InputEndpoints = collections.namedtuple( + 'InputEndpoints', ['images', 'images_orig', 'labels', 'labels_one_hot']) + +# A namedtuple to define a configuration for shuffled batch fetching. +# num_batching_threads: A number of parallel threads to fetch data. +# queue_capacity: a max number of elements in the batch shuffling queue. +# min_after_dequeue: a min number elements in the queue after a dequeue, used +# to ensure a level of mixing of elements. +ShuffleBatchConfig = collections.namedtuple('ShuffleBatchConfig', [ + 'num_batching_threads', 'queue_capacity', 'min_after_dequeue' +]) + +DEFAULT_SHUFFLE_CONFIG = ShuffleBatchConfig( + num_batching_threads=8, queue_capacity=3000, min_after_dequeue=1000) + + +def augment_image(image): + """Augmentation the image with a random modification. + + Args: + image: input Tensor image of rank 3, with the last dimension + of size 3. + + Returns: + Distorted Tensor image of the same shape. + """ + with tf.variable_scope('AugmentImage'): + height = image.get_shape().dims[0].value + width = image.get_shape().dims[1].value + + # Random crop cut from the street sign image, resized to the same size. + # Assures that the crop is covers at least 0.8 area of the input image. + bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=tf.zeros([0, 0, 4]), + min_object_covered=0.8, + aspect_ratio_range=[0.8, 1.2], + area_range=[0.8, 1.0], + use_image_if_no_bounding_boxes=True) + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # Randomly chooses one of the 4 interpolation methods + distorted_image = inception_preprocessing.apply_with_random_selector( + distorted_image, + lambda x, method: tf.image.resize_images(x, [height, width], method), + num_cases=4) + distorted_image.set_shape([height, width, 3]) + + # Color distortion + distorted_image = inception_preprocessing.apply_with_random_selector( + distorted_image, + functools.partial( + inception_preprocessing.distort_color, fast_mode=False), + num_cases=4) + distorted_image = tf.clip_by_value(distorted_image, -1.5, 1.5) + + return distorted_image + + +def central_crop(image, crop_size): + """Returns a central crop for the specified size of an image. + + Args: + image: A tensor with shape [height, width, channels] + crop_size: A tuple (crop_width, crop_height) + + Returns: + A tensor of shape [crop_height, crop_width, channels]. + """ + with tf.variable_scope('CentralCrop'): + target_width, target_height = crop_size + image_height, image_width = tf.shape(image)[0], tf.shape(image)[1] + assert_op1 = tf.Assert( + tf.greater_equal(image_height, target_height), + ['image_height < target_height', image_height, target_height]) + assert_op2 = tf.Assert( + tf.greater_equal(image_width, target_width), + ['image_width < target_width', image_width, target_width]) + with tf.control_dependencies([assert_op1, assert_op2]): + offset_width = tf.cast((image_width - target_width) / 2, tf.int32) + offset_height = tf.cast((image_height - target_height) / 2, tf.int32) + return tf.image.crop_to_bounding_box(image, offset_height, offset_width, + target_height, target_width) + + +def preprocess_image(image, augment=False, central_crop_size=None, + num_towers=4): + """Normalizes image to have values in a narrow range around zero. + + Args: + image: a [H x W x 3] uint8 tensor. + augment: optional, if True do random image distortion. + central_crop_size: A tuple (crop_width, crop_height). + num_towers: optional, number of shots of the same image in the input image. + + Returns: + A float32 tensor of shape [H x W x 3] with RGB values in the required + range. + """ + with tf.variable_scope('PreprocessImage'): + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + if augment or central_crop_size: + if num_towers == 1: + images = [image] + else: + images = tf.split(value=image, num_or_size_splits=num_towers, axis=1) + if central_crop_size: + view_crop_size = (int(central_crop_size[0] / num_towers), + central_crop_size[1]) + images = [central_crop(img, view_crop_size) for img in images] + if augment: + images = [augment_image(img) for img in images] + image = tf.concat(images, 1) + + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.5) + + return image + + +def get_data(dataset, + batch_size, + augment=False, + central_crop_size=None, + shuffle_config=None, + shuffle=True): + """Wraps calls to DatasetDataProviders and shuffle_batch. + + For more details about supported Dataset objects refer to datasets/fsns.py. + + Args: + dataset: a slim.data.dataset.Dataset object. + batch_size: number of samples per batch. + augment: optional, if True does random image distortion. + central_crop_size: A CharLogittuple (crop_width, crop_height). + shuffle_config: A namedtuple ShuffleBatchConfig. + shuffle: if True use data shuffling. + + Returns: + + """ + if not shuffle_config: + shuffle_config = DEFAULT_SHUFFLE_CONFIG + + provider = slim.dataset_data_provider.DatasetDataProvider( + dataset, + shuffle=shuffle, + common_queue_capacity=2 * batch_size, + common_queue_min=batch_size) + image_orig, label = provider.get(['image', 'label']) + + image = preprocess_image( + image_orig, augment, central_crop_size, num_towers=dataset.num_of_views) + label_one_hot = slim.one_hot_encoding(label, dataset.num_char_classes) + + images, images_orig, labels, labels_one_hot = (tf.train.shuffle_batch( + [image, image_orig, label, label_one_hot], + batch_size=batch_size, + num_threads=shuffle_config.num_batching_threads, + capacity=shuffle_config.queue_capacity, + min_after_dequeue=shuffle_config.min_after_dequeue)) + + return InputEndpoints( + images=images, + images_orig=images_orig, + labels=labels, + labels_one_hot=labels_one_hot) diff --git a/models/research/attention_ocr/python/data_provider_test.py b/models/research/attention_ocr/python/data_provider_test.py new file mode 100644 index 0000000000000000000000000000000000000000..551bc75e02cc470c40aad8a4066b6bba7ceeb62c --- /dev/null +++ b/models/research/attention_ocr/python/data_provider_test.py @@ -0,0 +1,72 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for data_provider.""" + +import numpy as np +import tensorflow as tf +from tensorflow.contrib.slim import queues + +import datasets +import data_provider + + +class DataProviderTest(tf.test.TestCase): + def setUp(self): + tf.test.TestCase.setUp(self) + + def test_preprocessed_image_values_are_in_range(self): + image_shape = (5, 4, 3) + fake_image = np.random.randint(low=0, high=255, size=image_shape) + image_tf = data_provider.preprocess_image(fake_image) + + with self.test_session() as sess: + image_np = sess.run(image_tf) + + self.assertEqual(image_np.shape, image_shape) + min_value, max_value = np.min(image_np), np.max(image_np) + self.assertTrue((-1.28 < min_value) and (min_value < 1.27)) + self.assertTrue((-1.28 < max_value) and (max_value < 1.27)) + + def test_provided_data_has_correct_shape(self): + batch_size = 4 + data = data_provider.get_data( + dataset=datasets.fsns_test.get_test_split(), + batch_size=batch_size, + augment=True, + central_crop_size=None) + + with self.test_session() as sess, queues.QueueRunners(sess): + images_np, labels_np = sess.run([data.images, data.labels_one_hot]) + + self.assertEqual(images_np.shape, (batch_size, 150, 600, 3)) + self.assertEqual(labels_np.shape, (batch_size, 37, 134)) + + def test_optionally_applies_central_crop(self): + batch_size = 4 + data = data_provider.get_data( + dataset=datasets.fsns_test.get_test_split(), + batch_size=batch_size, + augment=True, + central_crop_size=(500, 100)) + + with self.test_session() as sess, queues.QueueRunners(sess): + images_np = sess.run(data.images) + + self.assertEqual(images_np.shape, (batch_size, 100, 500, 3)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/datasets/__init__.py b/models/research/attention_ocr/python/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9a20dc7b960b6cf7464317095c465fee5b9f1b --- /dev/null +++ b/models/research/attention_ocr/python/datasets/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from datasets import fsns +from datasets import fsns_test + +__all__ = [fsns, fsns_test] diff --git a/models/research/attention_ocr/python/datasets/fsns.py b/models/research/attention_ocr/python/datasets/fsns.py new file mode 100644 index 0000000000000000000000000000000000000000..c7203ffcff972207795b4ef5b1e755d35559033a --- /dev/null +++ b/models/research/attention_ocr/python/datasets/fsns.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Configuration to read FSNS dataset https://goo.gl/3Ldm8v.""" + +import os +import re +import tensorflow as tf +from tensorflow.contrib import slim +import logging + +DEFAULT_DATASET_DIR = os.path.join(os.path.dirname(__file__), 'data', 'fsns') + +# The dataset configuration, should be used only as a default value. +DEFAULT_CONFIG = { + 'name': 'FSNS', + 'splits': { + 'train': { + 'size': 1044868, + 'pattern': 'train/train*' + }, + 'test': { + 'size': 20404, + 'pattern': 'test/test*' + }, + 'validation': { + 'size': 16150, + 'pattern': 'validation/validation*' + } + }, + 'charset_filename': 'charset_size=134.txt', + 'image_shape': (150, 600, 3), + 'num_of_views': 4, + 'max_sequence_length': 37, + 'null_code': 133, + 'items_to_descriptions': { + 'image': 'A [150 x 600 x 3] color image.', + 'label': 'Characters codes.', + 'text': 'A unicode string.', + 'length': 'A length of the encoded text.', + 'num_of_views': 'A number of different views stored within the image.' + } +} + + +def read_charset(filename, null_character=u'\u2591'): + """Reads a charset definition from a tab separated text file. + + charset file has to have format compatible with the FSNS dataset. + + Args: + filename: a path to the charset file. + null_character: a unicode character used to replace '' character. the + default value is a light shade block '░'. + + Returns: + a dictionary with keys equal to character codes and values - unicode + characters. + """ + pattern = re.compile(r'(\d+)\t(.+)') + charset = {} + with tf.gfile.GFile(filename) as f: + for i, line in enumerate(f): + m = pattern.match(line) + if m is None: + logging.warning('incorrect charset file. line #%d: %s', i, line) + continue + code = int(m.group(1)) + char = m.group(2) + if char == '': + char = null_character + charset[code] = char + return charset + + +class _NumOfViewsHandler(slim.tfexample_decoder.ItemHandler): + """Convenience handler to determine number of views stored in an image.""" + + def __init__(self, width_key, original_width_key, num_of_views): + super(_NumOfViewsHandler, self).__init__([width_key, original_width_key]) + self._width_key = width_key + self._original_width_key = original_width_key + self._num_of_views = num_of_views + + def tensors_to_item(self, keys_to_tensors): + return tf.to_int64( + self._num_of_views * keys_to_tensors[self._original_width_key] / + keys_to_tensors[self._width_key]) + + +def get_split(split_name, dataset_dir=None, config=None): + """Returns a dataset tuple for FSNS dataset. + + Args: + split_name: A train/test split name. + dataset_dir: The base directory of the dataset sources, by default it uses + a predefined CNS path (see DEFAULT_DATASET_DIR). + config: A dictionary with dataset configuration. If None - will use the + DEFAULT_CONFIG. + + Returns: + A `Dataset` namedtuple. + + Raises: + ValueError: if `split_name` is not a valid train/test split. + """ + if not dataset_dir: + dataset_dir = DEFAULT_DATASET_DIR + + if not config: + config = DEFAULT_CONFIG + + if split_name not in config['splits']: + raise ValueError('split name %s was not recognized.' % split_name) + + logging.info('Using %s dataset split_name=%s dataset_dir=%s', config['name'], + split_name, dataset_dir) + + # Ignores the 'image/height' feature. + zero = tf.zeros([1], dtype=tf.int64) + keys_to_features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='png'), + 'image/width': + tf.FixedLenFeature([1], tf.int64, default_value=zero), + 'image/orig_width': + tf.FixedLenFeature([1], tf.int64, default_value=zero), + 'image/class': + tf.FixedLenFeature([config['max_sequence_length']], tf.int64), + 'image/unpadded_class': + tf.VarLenFeature(tf.int64), + 'image/text': + tf.FixedLenFeature([1], tf.string, default_value=''), + } + items_to_handlers = { + 'image': + slim.tfexample_decoder.Image( + shape=config['image_shape'], + image_key='image/encoded', + format_key='image/format'), + 'label': + slim.tfexample_decoder.Tensor(tensor_key='image/class'), + 'text': + slim.tfexample_decoder.Tensor(tensor_key='image/text'), + 'num_of_views': + _NumOfViewsHandler( + width_key='image/width', + original_width_key='image/orig_width', + num_of_views=config['num_of_views']) + } + decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, + items_to_handlers) + charset_file = os.path.join(dataset_dir, config['charset_filename']) + charset = read_charset(charset_file) + file_pattern = os.path.join(dataset_dir, + config['splits'][split_name]['pattern']) + return slim.dataset.Dataset( + data_sources=file_pattern, + reader=tf.TFRecordReader, + decoder=decoder, + num_samples=config['splits'][split_name]['size'], + items_to_descriptions=config['items_to_descriptions'], + # additional parameters for convenience. + charset=charset, + num_char_classes=len(charset), + num_of_views=config['num_of_views'], + max_sequence_length=config['max_sequence_length'], + null_code=config['null_code']) diff --git a/models/research/attention_ocr/python/datasets/fsns_test.py b/models/research/attention_ocr/python/datasets/fsns_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4daedfbd12a58b6635cefed2bdc02bc84fc2c9ef --- /dev/null +++ b/models/research/attention_ocr/python/datasets/fsns_test.py @@ -0,0 +1,103 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for FSNS datasets module.""" + +import collections +import os +import tensorflow as tf +from tensorflow.contrib import slim + +from datasets import fsns +from datasets import unittest_utils + +FLAGS = tf.flags.FLAGS + + +def get_test_split(): + config = fsns.DEFAULT_CONFIG.copy() + config['splits'] = {'test': {'size': 5, 'pattern': 'fsns-00000-of-00001'}} + return fsns.get_split('test', dataset_dir(), config) + + +def dataset_dir(): + return os.path.join(os.path.dirname(__file__), 'testdata/fsns') + + +class FsnsTest(tf.test.TestCase): + def test_decodes_example_proto(self): + expected_label = range(37) + expected_image, encoded = unittest_utils.create_random_image( + 'PNG', shape=(150, 600, 3)) + serialized = unittest_utils.create_serialized_example({ + 'image/encoded': [encoded], + 'image/format': [b'PNG'], + 'image/class': + expected_label, + 'image/unpadded_class': + range(10), + 'image/text': [b'Raw text'], + 'image/orig_width': [150], + 'image/width': [600] + }) + + decoder = fsns.get_split('train', dataset_dir()).decoder + with self.test_session() as sess: + data_tuple = collections.namedtuple('DecodedData', decoder.list_items()) + data = sess.run(data_tuple(*decoder.decode(serialized))) + + self.assertAllEqual(expected_image, data.image) + self.assertAllEqual(expected_label, data.label) + self.assertEqual([b'Raw text'], data.text) + self.assertEqual([1], data.num_of_views) + + def test_label_has_shape_defined(self): + serialized = 'fake' + decoder = fsns.get_split('train', dataset_dir()).decoder + + [label_tf] = decoder.decode(serialized, ['label']) + + self.assertEqual(label_tf.get_shape().dims[0], 37) + + def test_dataset_tuple_has_all_extra_attributes(self): + dataset = fsns.get_split('train', dataset_dir()) + + self.assertTrue(dataset.charset) + self.assertTrue(dataset.num_char_classes) + self.assertTrue(dataset.num_of_views) + self.assertTrue(dataset.max_sequence_length) + self.assertTrue(dataset.null_code) + + def test_can_use_the_test_data(self): + batch_size = 1 + dataset = get_test_split() + provider = slim.dataset_data_provider.DatasetDataProvider( + dataset, + shuffle=True, + common_queue_capacity=2 * batch_size, + common_queue_min=batch_size) + image_tf, label_tf = provider.get(['image', 'label']) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + with slim.queues.QueueRunners(sess): + image_np, label_np = sess.run([image_tf, label_tf]) + + self.assertEqual((150, 600, 3), image_np.shape) + self.assertEqual((37, ), label_np.shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/datasets/testdata/fsns/charset_size=134.txt b/models/research/attention_ocr/python/datasets/testdata/fsns/charset_size=134.txt new file mode 100644 index 0000000000000000000000000000000000000000..5c7fcde2ae0ab679f279a083d6de1c50d33ff90b --- /dev/null +++ b/models/research/attention_ocr/python/datasets/testdata/fsns/charset_size=134.txt @@ -0,0 +1,139 @@ +0 +133 +1 l +2 ’ +3 é +4 t +5 e +6 i +7 n +8 s +9 x +10 g +11 u +12 o +13 1 +14 8 +15 7 +16 0 +17 - +18 . +19 p +20 a +21 r +22 è +23 d +24 c +25 V +26 v +27 b +28 m +29 ) +30 C +31 z +32 S +33 y +34 , +35 k +36 É +37 A +38 h +39 E +40 » +41 D +42 / +43 H +44 M +45 ( +46 G +47 P +48 ç +2 ' +49 R +50 f +51 " +52 2 +53 j +54 | +55 N +56 6 +57 ° +58 5 +59 T +60 O +61 U +62 3 +63 % +64 9 +65 q +66 Z +67 B +68 K +69 w +70 W +71 : +72 4 +73 L +74 F +75 ] +76 ï +2 ‘ +77 I +78 J +79 ä +80 î +81 ; +82 à +83 ê +84 X +85 ü +86 Y +87 ô +88 = +89 + +90 \ +91 { +92 } +93 _ +94 Q +95 œ +96 ñ +97 * +98 ! +99 Ü +51 “ +100 â +101 Ç +102 Œ +103 û +104 ? +105 $ +106 ë +107 « +108 € +109 & +110 < +51 ” +111 æ +112 # +113 ® +114  +115 È +116 > +117 [ +17 — +118 Æ +119 ù +120 Î +121 Ô +122 ÿ +123 À +124 Ê +125 @ +126 Ï +127 © +128 Ë +129 Ù +130 £ +131 Ÿ +132 Û diff --git a/models/research/attention_ocr/python/datasets/testdata/fsns/download_data.py b/models/research/attention_ocr/python/datasets/testdata/fsns/download_data.py new file mode 100644 index 0000000000000000000000000000000000000000..559e3195f2156af3be97395b5bc8c0d8ea62f174 --- /dev/null +++ b/models/research/attention_ocr/python/datasets/testdata/fsns/download_data.py @@ -0,0 +1,16 @@ +import urllib.request +import tensorflow as tf +import itertools + +URL = 'http://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001' +DST_ORIG = 'fsns-00000-of-00001.orig' +DST = 'fsns-00000-of-00001' +KEEP_NUM_RECORDS = 5 + +print('Downloading %s ...' % URL) +urllib.request.urlretrieve(URL, DST_ORIG) + +print('Writing %d records from %s to %s ...' % (KEEP_NUM_RECORDS, DST_ORIG, DST)) +with tf.io.TFRecordWriter(DST) as writer: + for raw_record in itertools.islice(tf.python_io.tf_record_iterator(DST_ORIG), KEEP_NUM_RECORDS): + writer.write(raw_record) diff --git a/models/research/attention_ocr/python/datasets/testdata/fsns/fsns-00000-of-00001 b/models/research/attention_ocr/python/datasets/testdata/fsns/fsns-00000-of-00001 new file mode 100644 index 0000000000000000000000000000000000000000..4f2f188529777fc0c58b850bac62f1a92f32fb42 Binary files /dev/null and b/models/research/attention_ocr/python/datasets/testdata/fsns/fsns-00000-of-00001 differ diff --git a/models/research/attention_ocr/python/datasets/testdata/fsns/links.txt b/models/research/attention_ocr/python/datasets/testdata/fsns/links.txt new file mode 100644 index 0000000000000000000000000000000000000000..da98d305fa02a61a9ac42b5e5490aa4e0c709b7e --- /dev/null +++ b/models/research/attention_ocr/python/datasets/testdata/fsns/links.txt @@ -0,0 +1 @@ +http://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001 diff --git a/models/research/attention_ocr/python/datasets/unittest_utils.py b/models/research/attention_ocr/python/datasets/unittest_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7f483dbfaf6d47c3292c4b623b0cc4b46f7c38c2 --- /dev/null +++ b/models/research/attention_ocr/python/datasets/unittest_utils.py @@ -0,0 +1,63 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to make unit testing easier.""" + +import numpy as np +import io +from PIL import Image as PILImage +import tensorflow as tf + +def create_random_image(image_format, shape): + """Creates an image with random values. + + Args: + image_format: An image format (PNG or JPEG). + shape: A tuple with image shape (including channels). + + Returns: + A tuple (, ) + """ + image = np.random.randint(low=0, high=255, size=shape, dtype='uint8') + fd = io.BytesIO() + image_pil = PILImage.fromarray(image) + image_pil.save(fd, image_format, subsampling=0, quality=100) + return image, fd.getvalue() + + +def create_serialized_example(name_to_values): + """Creates a tf.Example proto using a dictionary. + + It automatically detects type of values and define a corresponding feature. + + Args: + name_to_values: A dictionary. + + Returns: + tf.Example proto. + """ + example = tf.train.Example() + for name, values in name_to_values.items(): + feature = example.features.feature[name] + if isinstance(values[0], str) or isinstance(values[0], bytes): + add = feature.bytes_list.value.extend + elif isinstance(values[0], float): + add = feature.float32_list.value.extend + elif isinstance(values[0], int): + add = feature.int64_list.value.extend + else: + raise AssertionError('Unsupported type: %s' % type(values[0])) + add(values) + return example.SerializeToString() diff --git a/models/research/attention_ocr/python/datasets/unittest_utils_test.py b/models/research/attention_ocr/python/datasets/unittest_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c241387463720c16c6d6b96c236c15e709209ee7 --- /dev/null +++ b/models/research/attention_ocr/python/datasets/unittest_utils_test.py @@ -0,0 +1,64 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for unittest_utils.""" + +import numpy as np +import io +from PIL import Image as PILImage +import tensorflow as tf + +from datasets import unittest_utils + + +class UnittestUtilsTest(tf.test.TestCase): + def test_creates_an_image_of_specified_shape(self): + image, _ = unittest_utils.create_random_image('PNG', (10, 20, 3)) + self.assertEqual(image.shape, (10, 20, 3)) + + def test_encoded_image_corresponds_to_numpy_array(self): + image, encoded = unittest_utils.create_random_image('PNG', (20, 10, 3)) + pil_image = PILImage.open(io.BytesIO(encoded)) + self.assertAllEqual(image, np.array(pil_image)) + + def test_created_example_has_correct_values(self): + example_serialized = unittest_utils.create_serialized_example({ + 'labels': [1, 2, 3], + 'data': [b'FAKE'] + }) + example = tf.train.Example() + example.ParseFromString(example_serialized) + self.assertProtoEquals(""" + features { + feature { + key: "labels" + value { int64_list { + value: 1 + value: 2 + value: 3 + }} + } + feature { + key: "data" + value { bytes_list { + value: "FAKE" + }} + } + } + """, example) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/demo_inference.py b/models/research/attention_ocr/python/demo_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..d5fcf2515b85412aad272749cc50f5e81752b35d --- /dev/null +++ b/models/research/attention_ocr/python/demo_inference.py @@ -0,0 +1,96 @@ +"""A script to run inference on a set of image files. + +NOTE #1: The Attention OCR model was trained only using FSNS train dataset and +it will work only for images which look more or less similar to french street +names. In order to apply it to images from a different distribution you need +to retrain (or at least fine-tune) it using images from that distribution. + +NOTE #2: This script exists for demo purposes only. It is highly recommended +to use tools and mechanisms provided by the TensorFlow Serving system to run +inference on TensorFlow models in production: +https://www.tensorflow.org/serving/serving_basic + +Usage: +python demo_inference.py --batch_size=32 \ + --checkpoint=model.ckpt-399731\ + --image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png +""" +import numpy as np +import PIL.Image + +import tensorflow as tf +from tensorflow.python.platform import flags +from tensorflow.python.training import monitored_session + +import common_flags +import datasets +import data_provider + +FLAGS = flags.FLAGS +common_flags.define() + +# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png +flags.DEFINE_string('image_path_pattern', '', + 'A file pattern with a placeholder for the image index.') + + +def get_dataset_image_size(dataset_name): + # Ideally this info should be exposed through the dataset interface itself. + # But currently it is not available by other means. + ds_module = getattr(datasets, dataset_name) + height, width, _ = ds_module.DEFAULT_CONFIG['image_shape'] + return width, height + + +def load_images(file_pattern, batch_size, dataset_name): + width, height = get_dataset_image_size(dataset_name) + images_actual_data = np.ndarray(shape=(batch_size, height, width, 3), + dtype='uint8') + for i in range(batch_size): + path = file_pattern % i + print("Reading %s" % path) + pil_image = PIL.Image.open(tf.gfile.GFile(path, 'rb')) + images_actual_data[i, ...] = np.asarray(pil_image) + return images_actual_data + + +def create_model(batch_size, dataset_name): + width, height = get_dataset_image_size(dataset_name) + dataset = common_flags.create_dataset(split_name=FLAGS.split_name) + model = common_flags.create_model( + num_char_classes=dataset.num_char_classes, + seq_length=dataset.max_sequence_length, + num_views=dataset.num_of_views, + null_code=dataset.null_code, + charset=dataset.charset) + raw_images = tf.placeholder(tf.uint8, shape=[batch_size, height, width, 3]) + images = tf.map_fn(data_provider.preprocess_image, raw_images, + dtype=tf.float32) + endpoints = model.create_base(images, labels_one_hot=None) + return raw_images, endpoints + + +def run(checkpoint, batch_size, dataset_name, image_path_pattern): + images_placeholder, endpoints = create_model(batch_size, + dataset_name) + images_data = load_images(image_path_pattern, batch_size, + dataset_name) + session_creator = monitored_session.ChiefSessionCreator( + checkpoint_filename_with_path=checkpoint) + with monitored_session.MonitoredSession( + session_creator=session_creator) as sess: + predictions = sess.run(endpoints.predicted_text, + feed_dict={images_placeholder: images_data}) + return [pr_bytes.decode('utf-8') for pr_bytes in predictions.tolist()] + + +def main(_): + print("Predicted strings:") + predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name, + FLAGS.image_path_pattern) + for line in predictions: + print(line) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/attention_ocr/python/demo_inference_test.py b/models/research/attention_ocr/python/demo_inference_test.py new file mode 100644 index 0000000000000000000000000000000000000000..457fb5ab9ef5dbcb326585c2dc8281ee23d319d1 --- /dev/null +++ b/models/research/attention_ocr/python/demo_inference_test.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- +import os +import demo_inference +import tensorflow as tf +from tensorflow.python.training import monitored_session + +_CHECKPOINT = 'model.ckpt-399731' +_CHECKPOINT_URL = 'http://download.tensorflow.org/models/attention_ocr_2017_08_09.tar.gz' + + +class DemoInferenceTest(tf.test.TestCase): + def setUp(self): + super(DemoInferenceTest, self).setUp() + for suffix in ['.meta', '.index', '.data-00000-of-00001']: + filename = _CHECKPOINT + suffix + self.assertTrue(tf.gfile.Exists(filename), + msg='Missing checkpoint file %s. ' + 'Please download and extract it from %s' % + (filename, _CHECKPOINT_URL)) + self._batch_size = 32 + tf.flags.FLAGS.dataset_dir = os.path.join(os.path.dirname(__file__), 'datasets/testdata/fsns') + + def test_moving_variables_properly_loaded_from_a_checkpoint(self): + batch_size = 32 + dataset_name = 'fsns' + images_placeholder, endpoints = demo_inference.create_model(batch_size, + dataset_name) + image_path_pattern = 'testdata/fsns_train_%02d.png' + images_data = demo_inference.load_images(image_path_pattern, batch_size, + dataset_name) + tensor_name = 'AttentionOcr_v1/conv_tower_fn/INCE/InceptionV3/Conv2d_2a_3x3/BatchNorm/moving_mean' + moving_mean_tf = tf.get_default_graph().get_tensor_by_name( + tensor_name + ':0') + reader = tf.train.NewCheckpointReader(_CHECKPOINT) + moving_mean_expected = reader.get_tensor(tensor_name) + + session_creator = monitored_session.ChiefSessionCreator( + checkpoint_filename_with_path=_CHECKPOINT) + with monitored_session.MonitoredSession( + session_creator=session_creator) as sess: + moving_mean_np = sess.run(moving_mean_tf, + feed_dict={images_placeholder: images_data}) + + self.assertAllEqual(moving_mean_expected, moving_mean_np) + + def test_correct_results_on_test_data(self): + image_path_pattern = 'testdata/fsns_train_%02d.png' + predictions = demo_inference.run(_CHECKPOINT, self._batch_size, + 'fsns', + image_path_pattern) + self.assertEqual([ + u'Boulevard de Lunel░░░░░░░░░░░░░░░░░░░', + 'Rue de Provence░░░░░░░░░░░░░░░░░░░░░░', + 'Rue de Port Maria░░░░░░░░░░░░░░░░░░░░', + 'Avenue Charles Gounod░░░░░░░░░░░░░░░░', + 'Rue de l‘Aurore░░░░░░░░░░░░░░░░░░░░░░', + 'Rue de Beuzeville░░░░░░░░░░░░░░░░░░░░', + 'Rue d‘Orbey░░░░░░░░░░░░░░░░░░░░░░░░░░', + 'Rue Victor Schoulcher░░░░░░░░░░░░░░░░', + 'Rue de la Gare░░░░░░░░░░░░░░░░░░░░░░░', + 'Rue des Tulipes░░░░░░░░░░░░░░░░░░░░░░', + 'Rue André Maginot░░░░░░░░░░░░░░░░░░░░', + 'Route de Pringy░░░░░░░░░░░░░░░░░░░░░░', + 'Rue des Landelles░░░░░░░░░░░░░░░░░░░░', + 'Rue des Ilettes░░░░░░░░░░░░░░░░░░░░░░', + 'Avenue de Maurin░░░░░░░░░░░░░░░░░░░░░', + 'Rue Théresa░░░░░░░░░░░░░░░░░░░░░░░░░░', # GT='Rue Thérésa' + 'Route de la Balme░░░░░░░░░░░░░░░░░░░░', + 'Rue Hélène Roederer░░░░░░░░░░░░░░░░░░', + 'Rue Emile Bernard░░░░░░░░░░░░░░░░░░░░', + 'Place de la Mairie░░░░░░░░░░░░░░░░░░░', + 'Rue des Perrots░░░░░░░░░░░░░░░░░░░░░░', + 'Rue de la Libération░░░░░░░░░░░░░░░░░', + 'Impasse du Capcir░░░░░░░░░░░░░░░░░░░░', + 'Avenue de la Grand Mare░░░░░░░░░░░░░░', + 'Rue Pierre Brossolette░░░░░░░░░░░░░░░', + 'Rue de Provence░░░░░░░░░░░░░░░░░░░░░░', + 'Rue du Docteur Mourre░░░░░░░░░░░░░░░░', + 'Rue d‘Ortheuil░░░░░░░░░░░░░░░░░░░░░░░', + 'Rue des Sarments░░░░░░░░░░░░░░░░░░░░░', + 'Rue du Centre░░░░░░░░░░░░░░░░░░░░░░░░', + 'Impasse Pierre Mourgues░░░░░░░░░░░░░░', + 'Rue Marcel Dassault░░░░░░░░░░░░░░░░░░' + ], predictions) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/eval.py b/models/research/attention_ocr/python/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..ec68ad50bc25cd8528f4e9fd7976adad72782641 --- /dev/null +++ b/models/research/attention_ocr/python/eval.py @@ -0,0 +1,78 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Script to evaluate a trained Attention OCR model. + +A simple usage example: +python eval.py +""" +import tensorflow as tf +from tensorflow.contrib import slim +from tensorflow import app +from tensorflow.python.platform import flags + +import data_provider +import common_flags + +FLAGS = flags.FLAGS +common_flags.define() + +# yapf: disable +flags.DEFINE_integer('num_batches', 100, + 'Number of batches to run eval for.') + +flags.DEFINE_string('eval_log_dir', '/tmp/attention_ocr/eval', + 'Directory where the evaluation results are saved to.') + +flags.DEFINE_integer('eval_interval_secs', 60, + 'Frequency in seconds to run evaluations.') + +flags.DEFINE_integer('number_of_steps', None, + 'Number of times to run evaluation.') +# yapf: enable + + +def main(_): + if not tf.gfile.Exists(FLAGS.eval_log_dir): + tf.gfile.MakeDirs(FLAGS.eval_log_dir) + + dataset = common_flags.create_dataset(split_name=FLAGS.split_name) + model = common_flags.create_model(dataset.num_char_classes, + dataset.max_sequence_length, + dataset.num_of_views, dataset.null_code) + data = data_provider.get_data( + dataset, + FLAGS.batch_size, + augment=False, + central_crop_size=common_flags.get_crop_size()) + endpoints = model.create_base(data.images, labels_one_hot=None) + model.create_loss(data, endpoints) + eval_ops = model.create_summaries( + data, endpoints, dataset.charset, is_training=False) + slim.get_or_create_global_step() + session_config = tf.ConfigProto(device_count={"GPU": 0}) + slim.evaluation.evaluation_loop( + master=FLAGS.master, + checkpoint_dir=FLAGS.train_log_dir, + logdir=FLAGS.eval_log_dir, + eval_op=eval_ops, + num_evals=FLAGS.num_batches, + eval_interval_secs=FLAGS.eval_interval_secs, + max_number_of_evaluations=FLAGS.number_of_steps, + session_config=session_config) + + +if __name__ == '__main__': + app.run() diff --git a/models/research/attention_ocr/python/inception_preprocessing.py b/models/research/attention_ocr/python/inception_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..a4827f2cab742340da2d8d4972c41b35c9862a1e --- /dev/null +++ b/models/research/attention_ocr/python/inception_preprocessing.py @@ -0,0 +1,315 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images for the Inception networks.""" + +# TODO(gorban): add as a dependency, when slim or tensorflow/models are pipfied +# Source: +# https://raw.githubusercontent.com/tensorflow/models/a9d0e6e8923a4/slim/preprocessing/inception_preprocessing.py +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.ops import control_flow_ops + + +def apply_with_random_selector(x, func, num_cases): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([ + func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) + for case in range(num_cases) + ])[0] + + +def distort_color(image, color_ordering=0, fast_mode=True, scope=None): + """Distort the color of a Tensor image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather than adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: 3-D Tensor containing single image in [0, 1]. + color_ordering: Python int, a type of distortion (valid values: 0-3). + fast_mode: Avoids slower ops (random_hue and random_contrast) + scope: Optional scope for name_scope. + Returns: + 3-D Tensor color-distorted image on range [0, 1] + Raises: + ValueError: if color_ordering not in [0, 3] + """ + with tf.name_scope(scope, 'distort_color', [image]): + if fast_mode: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + elif color_ordering == 2: + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + elif color_ordering == 3: + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + raise ValueError('color_ordering must be in [0, 3]') + + # The random_* ops do not necessarily clamp. + return tf.clip_by_value(image, 0.0, 1.0) + + +def distorted_bounding_box_crop(image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using a one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image: 3-D Tensor of image (it will be converted to floats in [0, 1]). + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the + whole image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding box + supplied. + aspect_ratio_range: An optional list of `floats`. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `floats`. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional scope for name_scope. + Returns: + A tuple, a 3-D Tensor cropped_image and the distorted bbox + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + cropped_image = tf.slice(image, bbox_begin, bbox_size) + return cropped_image, distort_bbox + + +def preprocess_for_train(image, + height, + width, + bbox, + fast_mode=True, + scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Additionally it would create image_summaries to display the different + transformations applied to the image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + fast_mode: Optional boolean, if True avoids slower transformations (i.e. + bi-cubic resizing, random_hue or random_contrast). + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of distorted image used for training with range [-1, 1]. + """ + with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + if bbox is None: + bbox = tf.constant( + [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + image_with_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), bbox) + tf.summary.image('image_with_bounding_boxes', image_with_box) + + distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([None, None, 3]) + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distorted_bbox) + tf.summary.image('images_with_distorted_bounding_box', + image_with_distorted_box) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + + # We select only 1 case for fast_mode bilinear. + num_resize_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, method: tf.image.resize_images(x, [height, width], method=method), + num_cases=num_resize_cases) + + tf.summary.image('cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. There are 4 ways to do it. + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, ordering: distort_color(x, ordering, fast_mode), + num_cases=4) + + tf.summary.image('final_distorted_image', + tf.expand_dims(distorted_image, 0)) + distorted_image = tf.subtract(distorted_image, 0.5) + distorted_image = tf.multiply(distorted_image, 2.0) + return distorted_image + + +def preprocess_for_eval(image, + height, + width, + central_fraction=0.875, + scope=None): + """Prepare one image for evaluation. + + If height and width are specified it would output an image with that size by + applying resize_bilinear. + + If central_fraction is specified it would cropt the central fraction of the + input image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details) + height: integer + width: integer + central_fraction: Optional Float, fraction of the image to crop. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.name_scope(scope, 'eval_image', [image, height, width]): + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear( + image, [height, width], align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +def preprocess_image(image, + height, + width, + is_training=False, + bbox=None, + fast_mode=True): + """Pre-process one image for training or evaluation. + + Args: + image: 3-D Tensor [height, width, channels] with the image. + height: integer, image expected height. + width: integer, image expected width. + is_training: Boolean. If true it would transform an image for train, + otherwise it would transform it for evaluation. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + fast_mode: Optional boolean, if True avoids slower transformations. + + Returns: + 3-D float Tensor containing an appropriately scaled image + + Raises: + ValueError: if user does not provide bounding box + """ + if is_training: + return preprocess_for_train(image, height, width, bbox, fast_mode) + else: + return preprocess_for_eval(image, height, width) diff --git a/models/research/attention_ocr/python/metrics.py b/models/research/attention_ocr/python/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9e2a6a7579812583dc60546f97976f05befe07ff --- /dev/null +++ b/models/research/attention_ocr/python/metrics.py @@ -0,0 +1,90 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Quality metrics for the model.""" + +import tensorflow as tf + + +def char_accuracy(predictions, targets, rej_char, streaming=False): + """Computes character level accuracy. + + Both predictions and targets should have the same shape + [batch_size x seq_length]. + + Args: + predictions: predicted characters ids. + targets: ground truth character ids. + rej_char: the character id used to mark an empty element (end of sequence). + streaming: if True, uses the streaming mean from the slim.metric module. + + Returns: + a update_ops for execution and value tensor whose value on evaluation + returns the total character accuracy. + """ + with tf.variable_scope('CharAccuracy'): + predictions.get_shape().assert_is_compatible_with(targets.get_shape()) + + targets = tf.to_int32(targets) + const_rej_char = tf.constant(rej_char, shape=targets.get_shape()) + weights = tf.to_float(tf.not_equal(targets, const_rej_char)) + correct_chars = tf.to_float(tf.equal(predictions, targets)) + accuracy_per_example = tf.div( + tf.reduce_sum(tf.multiply(correct_chars, weights), 1), + tf.reduce_sum(weights, 1)) + if streaming: + return tf.contrib.metrics.streaming_mean(accuracy_per_example) + else: + return tf.reduce_mean(accuracy_per_example) + + +def sequence_accuracy(predictions, targets, rej_char, streaming=False): + """Computes sequence level accuracy. + + Both input tensors should have the same shape: [batch_size x seq_length]. + + Args: + predictions: predicted character classes. + targets: ground truth character classes. + rej_char: the character id used to mark empty element (end of sequence). + streaming: if True, uses the streaming mean from the slim.metric module. + + Returns: + a update_ops for execution and value tensor whose value on evaluation + returns the total sequence accuracy. + """ + + with tf.variable_scope('SequenceAccuracy'): + predictions.get_shape().assert_is_compatible_with(targets.get_shape()) + + targets = tf.to_int32(targets) + const_rej_char = tf.constant( + rej_char, shape=targets.get_shape(), dtype=tf.int32) + include_mask = tf.not_equal(targets, const_rej_char) + include_predictions = tf.to_int32( + tf.where(include_mask, predictions, + tf.zeros_like(predictions) + rej_char)) + correct_chars = tf.to_float(tf.equal(include_predictions, targets)) + correct_chars_counts = tf.cast( + tf.reduce_sum(correct_chars, reduction_indices=[1]), dtype=tf.int32) + target_length = targets.get_shape().dims[1].value + target_chars_counts = tf.constant( + target_length, shape=correct_chars_counts.get_shape()) + accuracy_per_example = tf.to_float( + tf.equal(correct_chars_counts, target_chars_counts)) + if streaming: + return tf.contrib.metrics.streaming_mean(accuracy_per_example) + else: + return tf.reduce_mean(accuracy_per_example) diff --git a/models/research/attention_ocr/python/metrics_test.py b/models/research/attention_ocr/python/metrics_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5560ec2c898fe7674715ec54daa08ba9e7471adf --- /dev/null +++ b/models/research/attention_ocr/python/metrics_test.py @@ -0,0 +1,97 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for the metrics module.""" +import contextlib +import numpy as np +import tensorflow as tf + +import metrics + + +class AccuracyTest(tf.test.TestCase): + def setUp(self): + tf.test.TestCase.setUp(self) + self.rng = np.random.RandomState([11, 23, 50]) + self.num_char_classes = 3 + self.batch_size = 4 + self.seq_length = 5 + self.rej_char = 42 + + @contextlib.contextmanager + def initialized_session(self): + """Wrapper for test session context manager with required initialization. + + Yields: + A session object that should be used as a context manager. + """ + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + yield sess + + def _fake_labels(self): + return self.rng.randint( + low=0, + high=self.num_char_classes, + size=(self.batch_size, self.seq_length), + dtype='int32') + + def _incorrect_copy(self, values, bad_indexes): + incorrect = np.copy(values) + incorrect[bad_indexes] = values[bad_indexes] + 1 + return incorrect + + def test_sequence_accuracy_identical_samples(self): + labels_tf = tf.convert_to_tensor(self._fake_labels()) + + accuracy_tf = metrics.sequence_accuracy(labels_tf, labels_tf, + self.rej_char) + with self.initialized_session() as sess: + accuracy_np = sess.run(accuracy_tf) + + self.assertAlmostEqual(accuracy_np, 1.0) + + def test_sequence_accuracy_one_char_difference(self): + ground_truth_np = self._fake_labels() + ground_truth_tf = tf.convert_to_tensor(ground_truth_np) + prediction_tf = tf.convert_to_tensor( + self._incorrect_copy(ground_truth_np, bad_indexes=((0, 0)))) + + accuracy_tf = metrics.sequence_accuracy(prediction_tf, ground_truth_tf, + self.rej_char) + with self.initialized_session() as sess: + accuracy_np = sess.run(accuracy_tf) + + # 1 of 4 sequences is incorrect. + self.assertAlmostEqual(accuracy_np, 1.0 - 1.0 / self.batch_size) + + def test_char_accuracy_one_char_difference_with_padding(self): + ground_truth_np = self._fake_labels() + ground_truth_tf = tf.convert_to_tensor(ground_truth_np) + prediction_tf = tf.convert_to_tensor( + self._incorrect_copy(ground_truth_np, bad_indexes=((0, 0)))) + + accuracy_tf = metrics.char_accuracy(prediction_tf, ground_truth_tf, + self.rej_char) + with self.initialized_session() as sess: + accuracy_np = sess.run(accuracy_tf) + + chars_count = self.seq_length * self.batch_size + self.assertAlmostEqual(accuracy_np, 1.0 - 1.0 / chars_count) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/model.py b/models/research/attention_ocr/python/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c633c5c39a0463c026cc944218cd2cc0ea7ebfb0 --- /dev/null +++ b/models/research/attention_ocr/python/model.py @@ -0,0 +1,583 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to build the Attention OCR model. + +Usage example: + ocr_model = model.Model(num_char_classes, seq_length, num_of_views) + + data = ... # create namedtuple InputEndpoints + endpoints = model.create_base(data.images, data.labels_one_hot) + # endpoints.predicted_chars is a tensor with predicted character codes. + total_loss = model.create_loss(data, endpoints) +""" +import sys +import collections +import logging +import tensorflow as tf +from tensorflow.contrib import slim +from tensorflow.contrib.slim.nets import inception + +import metrics +import sequence_layers +import utils + +OutputEndpoints = collections.namedtuple('OutputEndpoints', [ + 'chars_logit', 'chars_log_prob', 'predicted_chars', 'predicted_scores', + 'predicted_text' +]) + +# TODO(gorban): replace with tf.HParams when it is released. +ModelParams = collections.namedtuple('ModelParams', [ + 'num_char_classes', 'seq_length', 'num_views', 'null_code' +]) + +ConvTowerParams = collections.namedtuple('ConvTowerParams', ['final_endpoint']) + +SequenceLogitsParams = collections.namedtuple('SequenceLogitsParams', [ + 'use_attention', 'use_autoregression', 'num_lstm_units', 'weight_decay', + 'lstm_state_clip_value' +]) + +SequenceLossParams = collections.namedtuple('SequenceLossParams', [ + 'label_smoothing', 'ignore_nulls', 'average_across_timesteps' +]) + +EncodeCoordinatesParams = collections.namedtuple('EncodeCoordinatesParams', [ + 'enabled' +]) + + +def _dict_to_array(id_to_char, default_character): + num_char_classes = max(id_to_char.keys()) + 1 + array = [default_character] * num_char_classes + for k, v in id_to_char.items(): + array[k] = v + return array + + +class CharsetMapper(object): + """A simple class to map tensor ids into strings. + + It works only when the character set is 1:1 mapping between individual + characters and individual ids. + + Make sure you call tf.tables_initializer().run() as part of the init op. + """ + + def __init__(self, charset, default_character='?'): + """Creates a lookup table. + + Args: + charset: a dictionary with id-to-character mapping. + """ + mapping_strings = tf.constant(_dict_to_array(charset, default_character)) + self.table = tf.contrib.lookup.index_to_string_table_from_tensor( + mapping=mapping_strings, default_value=default_character) + + def get_text(self, ids): + """Returns a string corresponding to a sequence of character ids. + + Args: + ids: a tensor with shape [batch_size, max_sequence_length] + """ + return tf.reduce_join( + self.table.lookup(tf.to_int64(ids)), reduction_indices=1) + + +def get_softmax_loss_fn(label_smoothing): + """Returns sparse or dense loss function depending on the label_smoothing. + + Args: + label_smoothing: weight for label smoothing + + Returns: + a function which takes labels and predictions as arguments and returns + a softmax loss for the selected type of labels (sparse or dense). + """ + if label_smoothing > 0: + + def loss_fn(labels, logits): + return (tf.nn.softmax_cross_entropy_with_logits( + logits=logits, labels=labels)) + else: + + def loss_fn(labels, logits): + return tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, labels=labels) + + return loss_fn + + +class Model(object): + """Class to create the Attention OCR Model.""" + + def __init__(self, + num_char_classes, + seq_length, + num_views, + null_code, + mparams=None, + charset=None): + """Initialized model parameters. + + Args: + num_char_classes: size of character set. + seq_length: number of characters in a sequence. + num_views: Number of views (conv towers) to use. + null_code: A character code corresponding to a character which + indicates end of a sequence. + mparams: a dictionary with hyper parameters for methods, keys - + function names, values - corresponding namedtuples. + charset: an optional dictionary with a mapping between character ids and + utf8 strings. If specified the OutputEndpoints.predicted_text will + utf8 encoded strings corresponding to the character ids returned by + OutputEndpoints.predicted_chars (by default the predicted_text contains + an empty vector). + NOTE: Make sure you call tf.tables_initializer().run() if the charset + specified. + """ + super(Model, self).__init__() + self._params = ModelParams( + num_char_classes=num_char_classes, + seq_length=seq_length, + num_views=num_views, + null_code=null_code) + self._mparams = self.default_mparams() + if mparams: + self._mparams.update(mparams) + self._charset = charset + + def default_mparams(self): + return { + 'conv_tower_fn': + ConvTowerParams(final_endpoint='Mixed_5d'), + 'sequence_logit_fn': + SequenceLogitsParams( + use_attention=True, + use_autoregression=True, + num_lstm_units=256, + weight_decay=0.00004, + lstm_state_clip_value=10.0), + 'sequence_loss_fn': + SequenceLossParams( + label_smoothing=0.1, + ignore_nulls=True, + average_across_timesteps=False), + 'encode_coordinates_fn': EncodeCoordinatesParams(enabled=False) + } + + def set_mparam(self, function, **kwargs): + self._mparams[function] = self._mparams[function]._replace(**kwargs) + + def conv_tower_fn(self, images, is_training=True, reuse=None): + """Computes convolutional features using the InceptionV3 model. + + Args: + images: A tensor of shape [batch_size, height, width, channels]. + is_training: whether is training or not. + reuse: whether or not the network and its variables should be reused. To + be able to reuse 'scope' must be given. + + Returns: + A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of + output feature map and N is number of output features (depends on the + network architecture). + """ + mparams = self._mparams['conv_tower_fn'] + logging.debug('Using final_endpoint=%s', mparams.final_endpoint) + with tf.variable_scope('conv_tower_fn/INCE'): + if reuse: + tf.get_variable_scope().reuse_variables() + with slim.arg_scope(inception.inception_v3_arg_scope()): + with slim.arg_scope([slim.batch_norm, slim.dropout], + is_training=is_training): + net, _ = inception.inception_v3_base( + images, final_endpoint=mparams.final_endpoint) + return net + + def _create_lstm_inputs(self, net): + """Splits an input tensor into a list of tensors (features). + + Args: + net: A feature map of shape [batch_size, num_features, feature_size]. + + Raises: + AssertionError: if num_features is less than seq_length. + + Returns: + A list with seq_length tensors of shape [batch_size, feature_size] + """ + num_features = net.get_shape().dims[1].value + if num_features < self._params.seq_length: + raise AssertionError('Incorrect dimension #1 of input tensor' + ' %d should be bigger than %d (shape=%s)' % + (num_features, self._params.seq_length, + net.get_shape())) + elif num_features > self._params.seq_length: + logging.warning('Ignoring some features: use %d of %d (shape=%s)', + self._params.seq_length, num_features, net.get_shape()) + net = tf.slice(net, [0, 0, 0], [-1, self._params.seq_length, -1]) + + return tf.unstack(net, axis=1) + + def sequence_logit_fn(self, net, labels_one_hot): + mparams = self._mparams['sequence_logit_fn'] + # TODO(gorban): remove /alias suffixes from the scopes. + with tf.variable_scope('sequence_logit_fn/SQLR'): + layer_class = sequence_layers.get_layer_class(mparams.use_attention, + mparams.use_autoregression) + layer = layer_class(net, labels_one_hot, self._params, mparams) + return layer.create_logits() + + def max_pool_views(self, nets_list): + """Max pool across all nets in spatial dimensions. + + Args: + nets_list: A list of 4D tensors with identical size. + + Returns: + A tensor with the same size as any input tensors. + """ + batch_size, height, width, num_features = [ + d.value for d in nets_list[0].get_shape().dims + ] + xy_flat_shape = (batch_size, 1, height * width, num_features) + nets_for_merge = [] + with tf.variable_scope('max_pool_views', values=nets_list): + for net in nets_list: + nets_for_merge.append(tf.reshape(net, xy_flat_shape)) + merged_net = tf.concat(nets_for_merge, 1) + net = slim.max_pool2d( + merged_net, kernel_size=[len(nets_list), 1], stride=1) + net = tf.reshape(net, (batch_size, height, width, num_features)) + return net + + def pool_views_fn(self, nets): + """Combines output of multiple convolutional towers into a single tensor. + + It stacks towers one on top another (in height dim) in a 4x1 grid. + The order is arbitrary design choice and shouldn't matter much. + + Args: + nets: list of tensors of shape=[batch_size, height, width, num_features]. + + Returns: + A tensor of shape [batch_size, seq_length, features_size]. + """ + with tf.variable_scope('pool_views_fn/STCK'): + net = tf.concat(nets, 1) + batch_size = net.get_shape().dims[0].value + feature_size = net.get_shape().dims[3].value + return tf.reshape(net, [batch_size, -1, feature_size]) + + def char_predictions(self, chars_logit): + """Returns confidence scores (softmax values) for predicted characters. + + Args: + chars_logit: chars logits, a tensor with shape + [batch_size x seq_length x num_char_classes] + + Returns: + A tuple (ids, log_prob, scores), where: + ids - predicted characters, a int32 tensor with shape + [batch_size x seq_length]; + log_prob - a log probability of all characters, a float tensor with + shape [batch_size, seq_length, num_char_classes]; + scores - corresponding confidence scores for characters, a float + tensor + with shape [batch_size x seq_length]. + """ + log_prob = utils.logits_to_log_prob(chars_logit) + ids = tf.to_int32(tf.argmax(log_prob, axis=2), name='predicted_chars') + mask = tf.cast( + slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool) + all_scores = tf.nn.softmax(chars_logit) + selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores') + scores = tf.reshape(selected_scores, shape=(-1, self._params.seq_length)) + return ids, log_prob, scores + + def encode_coordinates_fn(self, net): + """Adds one-hot encoding of coordinates to different views in the networks. + + For each "pixel" of a feature map it adds a onehot encoded x and y + coordinates. + + Args: + net: a tensor of shape=[batch_size, height, width, num_features] + + Returns: + a tensor with the same height and width, but altered feature_size. + """ + mparams = self._mparams['encode_coordinates_fn'] + if mparams.enabled: + batch_size, h, w, _ = net.shape.as_list() + x, y = tf.meshgrid(tf.range(w), tf.range(h)) + w_loc = slim.one_hot_encoding(x, num_classes=w) + h_loc = slim.one_hot_encoding(y, num_classes=h) + loc = tf.concat([h_loc, w_loc], 2) + loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1]) + return tf.concat([net, loc], 3) + else: + return net + + def create_base(self, + images, + labels_one_hot, + scope='AttentionOcr_v1', + reuse=None): + """Creates a base part of the Model (no gradients, losses or summaries). + + Args: + images: A tensor of shape [batch_size, height, width, channels]. + labels_one_hot: Optional (can be None) one-hot encoding for ground truth + labels. If provided the function will create a model for training. + scope: Optional variable_scope. + reuse: whether or not the network and its variables should be reused. To + be able to reuse 'scope' must be given. + + Returns: + A named tuple OutputEndpoints. + """ + logging.debug('images: %s', images) + is_training = labels_one_hot is not None + with tf.variable_scope(scope, reuse=reuse): + views = tf.split( + value=images, num_or_size_splits=self._params.num_views, axis=2) + logging.debug('Views=%d single view: %s', len(views), views[0]) + + nets = [ + self.conv_tower_fn(v, is_training, reuse=(i != 0)) + for i, v in enumerate(views) + ] + logging.debug('Conv tower: %s', nets[0]) + + nets = [self.encode_coordinates_fn(net) for net in nets] + logging.debug('Conv tower w/ encoded coordinates: %s', nets[0]) + + net = self.pool_views_fn(nets) + logging.debug('Pooled views: %s', net) + + chars_logit = self.sequence_logit_fn(net, labels_one_hot) + logging.debug('chars_logit: %s', chars_logit) + + predicted_chars, chars_log_prob, predicted_scores = ( + self.char_predictions(chars_logit)) + if self._charset: + character_mapper = CharsetMapper(self._charset) + predicted_text = character_mapper.get_text(predicted_chars) + else: + predicted_text = tf.constant([]) + return OutputEndpoints( + chars_logit=chars_logit, + chars_log_prob=chars_log_prob, + predicted_chars=predicted_chars, + predicted_scores=predicted_scores, + predicted_text=predicted_text) + + def create_loss(self, data, endpoints): + """Creates all losses required to train the model. + + Args: + data: InputEndpoints namedtuple. + endpoints: Model namedtuple. + + Returns: + Total loss. + """ + # NOTE: the return value of ModelLoss is not used directly for the + # gradient computation because under the hood it calls slim.losses.AddLoss, + # which registers the loss in an internal collection and later returns it + # as part of GetTotalLoss. We need to use total loss because model may have + # multiple losses including regularization losses. + self.sequence_loss_fn(endpoints.chars_logit, data.labels) + total_loss = slim.losses.get_total_loss() + tf.summary.scalar('TotalLoss', total_loss) + return total_loss + + def label_smoothing_regularization(self, chars_labels, weight=0.1): + """Applies a label smoothing regularization. + + Uses the same method as in https://arxiv.org/abs/1512.00567. + + Args: + chars_labels: ground truth ids of charactes, + shape=[batch_size, seq_length]; + weight: label-smoothing regularization weight. + + Returns: + A sensor with the same shape as the input. + """ + one_hot_labels = tf.one_hot( + chars_labels, depth=self._params.num_char_classes, axis=-1) + pos_weight = 1.0 - weight + neg_weight = weight / self._params.num_char_classes + return one_hot_labels * pos_weight + neg_weight + + def sequence_loss_fn(self, chars_logits, chars_labels): + """Loss function for char sequence. + + Depending on values of hyper parameters it applies label smoothing and can + also ignore all null chars after the first one. + + Args: + chars_logits: logits for predicted characters, + shape=[batch_size, seq_length, num_char_classes]; + chars_labels: ground truth ids of characters, + shape=[batch_size, seq_length]; + mparams: method hyper parameters. + + Returns: + A Tensor with shape [batch_size] - the log-perplexity for each sequence. + """ + mparams = self._mparams['sequence_loss_fn'] + with tf.variable_scope('sequence_loss_fn/SLF'): + if mparams.label_smoothing > 0: + smoothed_one_hot_labels = self.label_smoothing_regularization( + chars_labels, mparams.label_smoothing) + labels_list = tf.unstack(smoothed_one_hot_labels, axis=1) + else: + # NOTE: in case of sparse softmax we are not using one-hot + # encoding. + labels_list = tf.unstack(chars_labels, axis=1) + + batch_size, seq_length, _ = chars_logits.shape.as_list() + if mparams.ignore_nulls: + weights = tf.ones((batch_size, seq_length), dtype=tf.float32) + else: + # Suppose that reject character is the last in the charset. + reject_char = tf.constant( + self._params.num_char_classes - 1, + shape=(batch_size, seq_length), + dtype=tf.int64) + known_char = tf.not_equal(chars_labels, reject_char) + weights = tf.to_float(known_char) + + logits_list = tf.unstack(chars_logits, axis=1) + weights_list = tf.unstack(weights, axis=1) + loss = tf.contrib.legacy_seq2seq.sequence_loss( + logits_list, + labels_list, + weights_list, + softmax_loss_function=get_softmax_loss_fn(mparams.label_smoothing), + average_across_timesteps=mparams.average_across_timesteps) + tf.losses.add_loss(loss) + return loss + + def create_summaries(self, data, endpoints, charset, is_training): + """Creates all summaries for the model. + + Args: + data: InputEndpoints namedtuple. + endpoints: OutputEndpoints namedtuple. + charset: A dictionary with mapping between character codes and + unicode characters. Use the one provided by a dataset.charset. + is_training: If True will create summary prefixes for training job, + otherwise - for evaluation. + + Returns: + A list of evaluation ops + """ + + def sname(label): + prefix = 'train' if is_training else 'eval' + return '%s/%s' % (prefix, label) + + max_outputs = 4 + # TODO(gorban): uncomment, when tf.summary.text released. + # charset_mapper = CharsetMapper(charset) + # pr_text = charset_mapper.get_text( + # endpoints.predicted_chars[:max_outputs,:]) + # tf.summary.text(sname('text/pr'), pr_text) + # gt_text = charset_mapper.get_text(data.labels[:max_outputs,:]) + # tf.summary.text(sname('text/gt'), gt_text) + tf.summary.image(sname('image'), data.images, max_outputs=max_outputs) + + if is_training: + tf.summary.image( + sname('image/orig'), data.images_orig, max_outputs=max_outputs) + for var in tf.trainable_variables(): + tf.summary.histogram(var.op.name, var) + return None + + else: + names_to_values = {} + names_to_updates = {} + + def use_metric(name, value_update_tuple): + names_to_values[name] = value_update_tuple[0] + names_to_updates[name] = value_update_tuple[1] + + use_metric('CharacterAccuracy', + metrics.char_accuracy( + endpoints.predicted_chars, + data.labels, + streaming=True, + rej_char=self._params.null_code)) + # Sequence accuracy computed by cutting sequence at the first null char + use_metric('SequenceAccuracy', + metrics.sequence_accuracy( + endpoints.predicted_chars, + data.labels, + streaming=True, + rej_char=self._params.null_code)) + + for name, value in names_to_values.items(): + summary_name = 'eval/' + name + tf.summary.scalar(summary_name, tf.Print(value, [value], summary_name)) + return list(names_to_updates.values()) + + def create_init_fn_to_restore(self, master_checkpoint, + inception_checkpoint=None): + """Creates an init operations to restore weights from various checkpoints. + + Args: + master_checkpoint: path to a checkpoint which contains all weights for + the whole model. + inception_checkpoint: path to a checkpoint which contains weights for the + inception part only. + + Returns: + a function to run initialization ops. + """ + all_assign_ops = [] + all_feed_dict = {} + + def assign_from_checkpoint(variables, checkpoint): + logging.info('Request to re-store %d weights from %s', + len(variables), checkpoint) + if not variables: + logging.error('Can\'t find any variables to restore.') + sys.exit(1) + assign_op, feed_dict = slim.assign_from_checkpoint(checkpoint, variables) + all_assign_ops.append(assign_op) + all_feed_dict.update(feed_dict) + + logging.info('variables_to_restore:\n%s' % utils.variables_to_restore().keys()) + logging.info('moving_average_variables:\n%s' % [v.op.name for v in tf.moving_average_variables()]) + logging.info('trainable_variables:\n%s' % [v.op.name for v in tf.trainable_variables()]) + if master_checkpoint: + assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint) + + if inception_checkpoint: + variables = utils.variables_to_restore( + 'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True) + assign_from_checkpoint(variables, inception_checkpoint) + + def init_assign_fn(sess): + logging.info('Restoring checkpoint(s)') + sess.run(all_assign_ops, all_feed_dict) + + return init_assign_fn diff --git a/models/research/attention_ocr/python/model_test.py b/models/research/attention_ocr/python/model_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9b47d2b06e50ea57cf8de2109102c3e0c60606a4 --- /dev/null +++ b/models/research/attention_ocr/python/model_test.py @@ -0,0 +1,278 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for the model.""" + +import numpy as np +import string +import tensorflow as tf +from tensorflow.contrib import slim + +import model +import data_provider + + +def create_fake_charset(num_char_classes): + charset = {} + for i in range(num_char_classes): + charset[i] = string.printable[i % len(string.printable)] + return charset + + +class ModelTest(tf.test.TestCase): + def setUp(self): + tf.test.TestCase.setUp(self) + + self.rng = np.random.RandomState([11, 23, 50]) + + self.batch_size = 4 + self.image_width = 600 + self.image_height = 30 + self.seq_length = 40 + self.num_char_classes = 72 + self.null_code = 62 + self.num_views = 4 + + feature_size = 288 + self.conv_tower_shape = (self.batch_size, 1, 72, feature_size) + self.features_shape = (self.batch_size, self.seq_length, feature_size) + self.chars_logit_shape = (self.batch_size, self.seq_length, + self.num_char_classes) + self.length_logit_shape = (self.batch_size, self.seq_length + 1) + + self.initialize_fakes() + + def initialize_fakes(self): + self.images_shape = (self.batch_size, self.image_height, self.image_width, + 3) + self.fake_images = tf.constant( + self.rng.randint(low=0, high=255, + size=self.images_shape).astype('float32'), + name='input_node') + self.fake_conv_tower_np = self.rng.randn( + *self.conv_tower_shape).astype('float32') + self.fake_conv_tower = tf.constant(self.fake_conv_tower_np) + self.fake_logits = tf.constant( + self.rng.randn(*self.chars_logit_shape).astype('float32')) + self.fake_labels = tf.constant( + self.rng.randint( + low=0, + high=self.num_char_classes, + size=(self.batch_size, self.seq_length)).astype('int64')) + + def create_model(self, charset=None): + return model.Model( + self.num_char_classes, self.seq_length, num_views=4, null_code=62, + charset=charset) + + def test_char_related_shapes(self): + ocr_model = self.create_model() + with self.test_session() as sess: + endpoints_tf = ocr_model.create_base( + images=self.fake_images, labels_one_hot=None) + + sess.run(tf.global_variables_initializer()) + endpoints = sess.run(endpoints_tf) + + self.assertEqual((self.batch_size, self.seq_length, + self.num_char_classes), endpoints.chars_logit.shape) + self.assertEqual((self.batch_size, self.seq_length, + self.num_char_classes), endpoints.chars_log_prob.shape) + self.assertEqual((self.batch_size, self.seq_length), + endpoints.predicted_chars.shape) + self.assertEqual((self.batch_size, self.seq_length), + endpoints.predicted_scores.shape) + + def test_predicted_scores_are_within_range(self): + ocr_model = self.create_model() + + _, _, scores = ocr_model.char_predictions(self.fake_logits) + with self.test_session() as sess: + scores_np = sess.run(scores) + + values_in_range = (scores_np >= 0.0) & (scores_np <= 1.0) + self.assertTrue( + np.all(values_in_range), + msg=('Scores contains out of the range values %s' % + scores_np[np.logical_not(values_in_range)])) + + def test_conv_tower_shape(self): + with self.test_session() as sess: + ocr_model = self.create_model() + conv_tower = ocr_model.conv_tower_fn(self.fake_images) + + sess.run(tf.global_variables_initializer()) + conv_tower_np = sess.run(conv_tower) + + self.assertEqual(self.conv_tower_shape, conv_tower_np.shape) + + def test_model_size_less_then1_gb(self): + # NOTE: Actual amount of memory occupied my TF during training will be at + # least 4X times bigger because of space need to store original weights, + # updates, gradients and variances. It also depends on the type of used + # optimizer. + ocr_model = self.create_model() + ocr_model.create_base(images=self.fake_images, labels_one_hot=None) + with self.test_session() as sess: + tfprof_root = tf.profiler.profile( + sess.graph, + options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()) + + model_size_bytes = 4 * tfprof_root.total_parameters + self.assertLess(model_size_bytes, 1 * 2**30) + + def test_create_summaries_is_runnable(self): + ocr_model = self.create_model() + data = data_provider.InputEndpoints( + images=self.fake_images, + images_orig=self.fake_images, + labels=self.fake_labels, + labels_one_hot=slim.one_hot_encoding(self.fake_labels, + self.num_char_classes)) + endpoints = ocr_model.create_base( + images=self.fake_images, labels_one_hot=None) + charset = create_fake_charset(self.num_char_classes) + summaries = ocr_model.create_summaries( + data, endpoints, charset, is_training=False) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + tf.tables_initializer().run() + sess.run(summaries) # just check it is runnable + + def test_sequence_loss_function_without_label_smoothing(self): + model = self.create_model() + model.set_mparam('sequence_loss_fn', label_smoothing=0) + + loss = model.sequence_loss_fn(self.fake_logits, self.fake_labels) + with self.test_session() as sess: + loss_np = sess.run(loss) + + # This test checks that the loss function is 'runnable'. + self.assertEqual(loss_np.shape, tuple()) + + def encode_coordinates_alt(self, net): + """An alternative implemenation for the encoding coordinates. + + Args: + net: a tensor of shape=[batch_size, height, width, num_features] + + Returns: + a list of tensors with encoded image coordinates in them. + """ + batch_size, h, w, _ = net.shape.as_list() + h_loc = [ + tf.tile( + tf.reshape( + tf.contrib.layers.one_hot_encoding( + tf.constant([i]), num_classes=h), [h, 1]), [1, w]) + for i in range(h) + ] + h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2) + w_loc = [ + tf.tile( + tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w), + [h, 1]) for i in range(w) + ] + w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2) + loc = tf.concat([h_loc, w_loc], 2) + loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1]) + return tf.concat([net, loc], 3) + + def test_encoded_coordinates_have_correct_shape(self): + model = self.create_model() + model.set_mparam('encode_coordinates_fn', enabled=True) + conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower) + + with self.test_session() as sess: + conv_w_coords = sess.run(conv_w_coords_tf) + + batch_size, height, width, feature_size = self.conv_tower_shape + self.assertEqual(conv_w_coords.shape, (batch_size, height, width, + feature_size + height + width)) + + def test_disabled_coordinate_encoding_returns_features_unchanged(self): + model = self.create_model() + model.set_mparam('encode_coordinates_fn', enabled=False) + conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower) + + with self.test_session() as sess: + conv_w_coords = sess.run(conv_w_coords_tf) + + self.assertAllEqual(conv_w_coords, self.fake_conv_tower_np) + + def test_coordinate_encoding_is_correct_for_simple_example(self): + shape = (1, 2, 3, 4) # batch_size, height, width, feature_size + fake_conv_tower = tf.constant(2 * np.ones(shape), dtype=tf.float32) + model = self.create_model() + model.set_mparam('encode_coordinates_fn', enabled=True) + conv_w_coords_tf = model.encode_coordinates_fn(fake_conv_tower) + + with self.test_session() as sess: + conv_w_coords = sess.run(conv_w_coords_tf) + + # Original features + self.assertAllEqual(conv_w_coords[0, :, :, :4], + [[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]], + [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]]) + # Encoded coordinates + self.assertAllEqual(conv_w_coords[0, :, :, 4:], + [[[1, 0, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]], + [[0, 1, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 0, 1]]]) + + def test_alt_implementation_of_coordinate_encoding_returns_same_values(self): + model = self.create_model() + model.set_mparam('encode_coordinates_fn', enabled=True) + conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower) + conv_w_coords_alt_tf = self.encode_coordinates_alt(self.fake_conv_tower) + + with self.test_session() as sess: + conv_w_coords_tf, conv_w_coords_alt_tf = sess.run( + [conv_w_coords_tf, conv_w_coords_alt_tf]) + + self.assertAllEqual(conv_w_coords_tf, conv_w_coords_alt_tf) + + def test_predicted_text_has_correct_shape_w_charset(self): + charset = create_fake_charset(self.num_char_classes) + ocr_model = self.create_model(charset=charset) + + with self.test_session() as sess: + endpoints_tf = ocr_model.create_base( + images=self.fake_images, labels_one_hot=None) + + sess.run(tf.global_variables_initializer()) + tf.tables_initializer().run() + endpoints = sess.run(endpoints_tf) + + self.assertEqual(endpoints.predicted_text.shape, (self.batch_size,)) + self.assertEqual(len(endpoints.predicted_text[0]), self.seq_length) + + +class CharsetMapperTest(tf.test.TestCase): + def test_text_corresponds_to_ids(self): + charset = create_fake_charset(36) + ids = tf.constant( + [[17, 14, 21, 21, 24], [32, 24, 27, 21, 13]], dtype=tf.int64) + charset_mapper = model.CharsetMapper(charset) + + with self.test_session() as sess: + tf.tables_initializer().run() + text = sess.run(charset_mapper.get_text(ids)) + + self.assertAllEqual(text, [b'hello', b'world']) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/sequence_layers.py b/models/research/attention_ocr/python/sequence_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..9261f210ba5c28cc243098de17db850e3f90c2c4 --- /dev/null +++ b/models/research/attention_ocr/python/sequence_layers.py @@ -0,0 +1,422 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Various implementations of sequence layers for character prediction. + +A 'sequence layer' is a part of a computation graph which is responsible of +producing a sequence of characters using extracted image features. There are +many reasonable ways to implement such layers. All of them are using RNNs. +This module provides implementations which uses 'attention' mechanism to +spatially 'pool' image features and also can use a previously predicted +character to predict the next (aka auto regression). + +Usage: + Select one of available classes, e.g. Attention or use a wrapper function to + pick one based on your requirements: + layer_class = sequence_layers.get_layer_class(use_attention=True, + use_autoregression=True) + layer = layer_class(net, labels_one_hot, model_params, method_params) + char_logits = layer.create_logits() +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import abc +import logging +import numpy as np + +import tensorflow as tf + +from tensorflow.contrib import slim + + +def orthogonal_initializer(shape, dtype=tf.float32, *args, **kwargs): + """Generates orthonormal matrices with random values. + + Orthonormal initialization is important for RNNs: + http://arxiv.org/abs/1312.6120 + http://smerity.com/articles/2016/orthogonal_init.html + + For non-square shapes the returned matrix will be semi-orthonormal: if the + number of columns exceeds the number of rows, then the rows are orthonormal + vectors; but if the number of rows exceeds the number of columns, then the + columns are orthonormal vectors. + + We use SVD decomposition to generate an orthonormal matrix with random + values. The same way as it is done in the Lasagne library for Theano. Note + that both u and v returned by the svd are orthogonal and random. We just need + to pick one with the right shape. + + Args: + shape: a shape of the tensor matrix to initialize. + dtype: a dtype of the initialized tensor. + *args: not used. + **kwargs: not used. + + Returns: + An initialized tensor. + """ + del args + del kwargs + flat_shape = (shape[0], np.prod(shape[1:])) + w = np.random.randn(*flat_shape) + u, _, v = np.linalg.svd(w, full_matrices=False) + w = u if u.shape == flat_shape else v + return tf.constant(w.reshape(shape), dtype=dtype) + + +SequenceLayerParams = collections.namedtuple('SequenceLogitsParams', [ + 'num_lstm_units', 'weight_decay', 'lstm_state_clip_value' +]) + + +class SequenceLayerBase(object): + """A base abstruct class for all sequence layers. + + A child class has to define following methods: + get_train_input + get_eval_input + unroll_cell + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, net, labels_one_hot, model_params, method_params): + """Stores argument in member variable for further use. + + Args: + net: A tensor with shape [batch_size, num_features, feature_size] which + contains some extracted image features. + labels_one_hot: An optional (can be None) ground truth labels for the + input features. Is a tensor with shape + [batch_size, seq_length, num_char_classes] + model_params: A namedtuple with model parameters (model.ModelParams). + method_params: A SequenceLayerParams instance. + """ + self._params = model_params + self._mparams = method_params + self._net = net + self._labels_one_hot = labels_one_hot + self._batch_size = net.get_shape().dims[0].value + + # Initialize parameters for char logits which will be computed on the fly + # inside an LSTM decoder. + self._char_logits = {} + regularizer = slim.l2_regularizer(self._mparams.weight_decay) + self._softmax_w = slim.model_variable( + 'softmax_w', + [self._mparams.num_lstm_units, self._params.num_char_classes], + initializer=orthogonal_initializer, + regularizer=regularizer) + self._softmax_b = slim.model_variable( + 'softmax_b', [self._params.num_char_classes], + initializer=tf.zeros_initializer(), + regularizer=regularizer) + + @abc.abstractmethod + def get_train_input(self, prev, i): + """Returns a sample to be used to predict a character during training. + + This function is used as a loop_function for an RNN decoder. + + Args: + prev: output tensor from previous step of the RNN. A tensor with shape: + [batch_size, num_char_classes]. + i: index of a character in the output sequence. + + Returns: + A tensor with shape [batch_size, ?] - depth depends on implementation + details. + """ + pass + + @abc.abstractmethod + def get_eval_input(self, prev, i): + """Returns a sample to be used to predict a character during inference. + + This function is used as a loop_function for an RNN decoder. + + Args: + prev: output tensor from previous step of the RNN. A tensor with shape: + [batch_size, num_char_classes]. + i: index of a character in the output sequence. + + Returns: + A tensor with shape [batch_size, ?] - depth depends on implementation + details. + """ + raise AssertionError('Not implemented') + + @abc.abstractmethod + def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell): + """Unrolls an RNN cell for all inputs. + + This is a placeholder to call some RNN decoder. It has a similar to + tf.seq2seq.rnn_decode interface. + + Args: + decoder_inputs: A list of 2D Tensors* [batch_size x input_size]. In fact, + most of existing decoders in presence of a loop_function use only the + first element to determine batch_size and length of the list to + determine number of steps. + initial_state: 2D Tensor with shape [batch_size x cell.state_size]. + loop_function: function will be applied to the i-th output in order to + generate the i+1-st input (see self.get_input). + cell: rnn_cell.RNNCell defining the cell function and size. + + Returns: + A tuple of the form (outputs, state), where: + outputs: A list of character logits of the same length as + decoder_inputs of 2D Tensors with shape [batch_size x num_characters]. + state: The state of each cell at the final time-step. + It is a 2D Tensor of shape [batch_size x cell.state_size]. + """ + pass + + def is_training(self): + """Returns True if the layer is created for training stage.""" + return self._labels_one_hot is not None + + def char_logit(self, inputs, char_index): + """Creates logits for a character if required. + + Args: + inputs: A tensor with shape [batch_size, ?] (depth is implementation + dependent). + char_index: A integer index of a character in the output sequence. + + Returns: + A tensor with shape [batch_size, num_char_classes] + """ + if char_index not in self._char_logits: + self._char_logits[char_index] = tf.nn.xw_plus_b(inputs, self._softmax_w, + self._softmax_b) + return self._char_logits[char_index] + + def char_one_hot(self, logit): + """Creates one hot encoding for a logit of a character. + + Args: + logit: A tensor with shape [batch_size, num_char_classes]. + + Returns: + A tensor with shape [batch_size, num_char_classes] + """ + prediction = tf.argmax(logit, axis=1) + return slim.one_hot_encoding(prediction, self._params.num_char_classes) + + def get_input(self, prev, i): + """A wrapper for get_train_input and get_eval_input. + + Args: + prev: output tensor from previous step of the RNN. A tensor with shape: + [batch_size, num_char_classes]. + i: index of a character in the output sequence. + + Returns: + A tensor with shape [batch_size, ?] - depth depends on implementation + details. + """ + if self.is_training(): + return self.get_train_input(prev, i) + else: + return self.get_eval_input(prev, i) + + def create_logits(self): + """Creates character sequence logits for a net specified in the constructor. + + A "main" method for the sequence layer which glues together all pieces. + + Returns: + A tensor with shape [batch_size, seq_length, num_char_classes]. + """ + with tf.variable_scope('LSTM'): + first_label = self.get_input(prev=None, i=0) + decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1) + lstm_cell = tf.contrib.rnn.LSTMCell( + self._mparams.num_lstm_units, + use_peepholes=False, + cell_clip=self._mparams.lstm_state_clip_value, + state_is_tuple=True, + initializer=orthogonal_initializer) + lstm_outputs, _ = self.unroll_cell( + decoder_inputs=decoder_inputs, + initial_state=lstm_cell.zero_state(self._batch_size, tf.float32), + loop_function=self.get_input, + cell=lstm_cell) + + with tf.variable_scope('logits'): + logits_list = [ + tf.expand_dims(self.char_logit(logit, i), dim=1) + for i, logit in enumerate(lstm_outputs) + ] + + return tf.concat(logits_list, 1) + + +class NetSlice(SequenceLayerBase): + """A layer which uses a subset of image features to predict each character. + """ + + def __init__(self, *args, **kwargs): + super(NetSlice, self).__init__(*args, **kwargs) + self._zero_label = tf.zeros( + [self._batch_size, self._params.num_char_classes]) + + def get_image_feature(self, char_index): + """Returns a subset of image features for a character. + + Args: + char_index: an index of a character. + + Returns: + A tensor with shape [batch_size, ?]. The output depth depends on the + depth of input net. + """ + batch_size, features_num, _ = [d.value for d in self._net.get_shape()] + slice_len = int(features_num / self._params.seq_length) + # In case when features_num != seq_length, we just pick a subset of image + # features, this choice is arbitrary and there is no intuitive geometrical + # interpretation. If features_num is not dividable by seq_length there will + # be unused image features. + net_slice = self._net[:, char_index:char_index + slice_len, :] + feature = tf.reshape(net_slice, [batch_size, -1]) + logging.debug('Image feature: %s', feature) + return feature + + def get_eval_input(self, prev, i): + """See SequenceLayerBase.get_eval_input for details.""" + del prev + return self.get_image_feature(i) + + def get_train_input(self, prev, i): + """See SequenceLayerBase.get_train_input for details.""" + return self.get_eval_input(prev, i) + + def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell): + """See SequenceLayerBase.unroll_cell for details.""" + return tf.contrib.legacy_seq2seq.rnn_decoder( + decoder_inputs=decoder_inputs, + initial_state=initial_state, + cell=cell, + loop_function=self.get_input) + + +class NetSliceWithAutoregression(NetSlice): + """A layer similar to NetSlice, but it also uses auto regression. + + The "auto regression" means that we use network output for previous character + as a part of input for the current character. + """ + + def __init__(self, *args, **kwargs): + super(NetSliceWithAutoregression, self).__init__(*args, **kwargs) + + def get_eval_input(self, prev, i): + """See SequenceLayerBase.get_eval_input for details.""" + if i == 0: + prev = self._zero_label + else: + logit = self.char_logit(prev, char_index=i - 1) + prev = self.char_one_hot(logit) + image_feature = self.get_image_feature(char_index=i) + return tf.concat([image_feature, prev], 1) + + def get_train_input(self, prev, i): + """See SequenceLayerBase.get_train_input for details.""" + if i == 0: + prev = self._zero_label + else: + prev = self._labels_one_hot[:, i - 1, :] + image_feature = self.get_image_feature(i) + return tf.concat([image_feature, prev], 1) + + +class Attention(SequenceLayerBase): + """A layer which uses attention mechanism to select image features.""" + + def __init__(self, *args, **kwargs): + super(Attention, self).__init__(*args, **kwargs) + self._zero_label = tf.zeros( + [self._batch_size, self._params.num_char_classes]) + + def get_eval_input(self, prev, i): + """See SequenceLayerBase.get_eval_input for details.""" + del prev, i + # The attention_decoder will fetch image features from the net, no need for + # extra inputs. + return self._zero_label + + def get_train_input(self, prev, i): + """See SequenceLayerBase.get_train_input for details.""" + return self.get_eval_input(prev, i) + + def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell): + return tf.contrib.legacy_seq2seq.attention_decoder( + decoder_inputs=decoder_inputs, + initial_state=initial_state, + attention_states=self._net, + cell=cell, + loop_function=self.get_input) + + +class AttentionWithAutoregression(Attention): + """A layer which uses both attention and auto regression.""" + + def __init__(self, *args, **kwargs): + super(AttentionWithAutoregression, self).__init__(*args, **kwargs) + + def get_train_input(self, prev, i): + """See SequenceLayerBase.get_train_input for details.""" + if i == 0: + return self._zero_label + else: + # TODO(gorban): update to gradually introduce gt labels. + return self._labels_one_hot[:, i - 1, :] + + def get_eval_input(self, prev, i): + """See SequenceLayerBase.get_eval_input for details.""" + if i == 0: + return self._zero_label + else: + logit = self.char_logit(prev, char_index=i - 1) + return self.char_one_hot(logit) + + +def get_layer_class(use_attention, use_autoregression): + """A convenience function to get a layer class based on requirements. + + Args: + use_attention: if True a returned class will use attention. + use_autoregression: if True a returned class will use auto regression. + + Returns: + One of available sequence layers (child classes for SequenceLayerBase). + """ + if use_attention and use_autoregression: + layer_class = AttentionWithAutoregression + elif use_attention and not use_autoregression: + layer_class = Attention + elif not use_attention and not use_autoregression: + layer_class = NetSlice + elif not use_attention and use_autoregression: + layer_class = NetSliceWithAutoregression + else: + raise AssertionError('Unsupported sequence layer class') + + logging.debug('Use %s as a layer class', layer_class.__name__) + return layer_class diff --git a/models/research/attention_ocr/python/sequence_layers_test.py b/models/research/attention_ocr/python/sequence_layers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fd41e2d824c014084129707631d45de334ec741b --- /dev/null +++ b/models/research/attention_ocr/python/sequence_layers_test.py @@ -0,0 +1,112 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for sequence_layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +from tensorflow.contrib import slim + +import model +import sequence_layers + + +def fake_net(batch_size, num_features, feature_size): + return tf.convert_to_tensor( + np.random.uniform(size=(batch_size, num_features, feature_size)), + dtype=tf.float32) + + +def fake_labels(batch_size, seq_length, num_char_classes): + labels_np = tf.convert_to_tensor( + np.random.randint( + low=0, high=num_char_classes, size=(batch_size, seq_length))) + return slim.one_hot_encoding(labels_np, num_classes=num_char_classes) + + +def create_layer(layer_class, batch_size, seq_length, num_char_classes): + model_params = model.ModelParams( + num_char_classes=num_char_classes, + seq_length=seq_length, + num_views=1, + null_code=num_char_classes) + net = fake_net( + batch_size=batch_size, num_features=seq_length * 5, feature_size=6) + labels_one_hot = fake_labels(batch_size, seq_length, num_char_classes) + layer_params = sequence_layers.SequenceLayerParams( + num_lstm_units=10, weight_decay=0.00004, lstm_state_clip_value=10.0) + return layer_class(net, labels_one_hot, model_params, layer_params) + + +class SequenceLayersTest(tf.test.TestCase): + def test_net_slice_char_logits_with_correct_shape(self): + batch_size = 2 + seq_length = 4 + num_char_classes = 3 + + layer = create_layer(sequence_layers.NetSlice, batch_size, seq_length, + num_char_classes) + char_logits = layer.create_logits() + + self.assertEqual( + tf.TensorShape([batch_size, seq_length, num_char_classes]), + char_logits.get_shape()) + + def test_net_slice_with_autoregression_char_logits_with_correct_shape(self): + batch_size = 2 + seq_length = 4 + num_char_classes = 3 + + layer = create_layer(sequence_layers.NetSliceWithAutoregression, + batch_size, seq_length, num_char_classes) + char_logits = layer.create_logits() + + self.assertEqual( + tf.TensorShape([batch_size, seq_length, num_char_classes]), + char_logits.get_shape()) + + def test_attention_char_logits_with_correct_shape(self): + batch_size = 2 + seq_length = 4 + num_char_classes = 3 + + layer = create_layer(sequence_layers.Attention, batch_size, seq_length, + num_char_classes) + char_logits = layer.create_logits() + + self.assertEqual( + tf.TensorShape([batch_size, seq_length, num_char_classes]), + char_logits.get_shape()) + + def test_attention_with_autoregression_char_logits_with_correct_shape(self): + batch_size = 2 + seq_length = 4 + num_char_classes = 3 + + layer = create_layer(sequence_layers.AttentionWithAutoregression, + batch_size, seq_length, num_char_classes) + char_logits = layer.create_logits() + + self.assertEqual( + tf.TensorShape([batch_size, seq_length, num_char_classes]), + char_logits.get_shape()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/attention_ocr/python/testdata/fsns_train_00.png b/models/research/attention_ocr/python/testdata/fsns_train_00.png new file mode 100644 index 0000000000000000000000000000000000000000..17b59aeb8bd81c58d775245b8d03dc528e7421f6 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_00.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_01.png b/models/research/attention_ocr/python/testdata/fsns_train_01.png new file mode 100644 index 0000000000000000000000000000000000000000..808ba29539f08e05e9d8602580dc77a9235c3ead Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_01.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_02.png b/models/research/attention_ocr/python/testdata/fsns_train_02.png new file mode 100644 index 0000000000000000000000000000000000000000..d2afa3273206b35415ad88a6438ce10aeca90c94 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_02.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_03.png b/models/research/attention_ocr/python/testdata/fsns_train_03.png new file mode 100644 index 0000000000000000000000000000000000000000..2db55919f727ea47463cbe03ebc2346c4ac3f186 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_03.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_04.png b/models/research/attention_ocr/python/testdata/fsns_train_04.png new file mode 100644 index 0000000000000000000000000000000000000000..d18b4f2483f912a59e1248755953f42e06d2df32 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_04.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_05.png b/models/research/attention_ocr/python/testdata/fsns_train_05.png new file mode 100644 index 0000000000000000000000000000000000000000..f2fe0b62f4c69187a04a6369544d8275cd132773 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_05.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_06.png b/models/research/attention_ocr/python/testdata/fsns_train_06.png new file mode 100644 index 0000000000000000000000000000000000000000..94280ba572fa290c1d4fca39dae8e44ebc372ae6 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_06.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_07.png b/models/research/attention_ocr/python/testdata/fsns_train_07.png new file mode 100644 index 0000000000000000000000000000000000000000..8fc25f851ffbff2e420ce4d0e88bae4fd175a053 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_07.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_08.png b/models/research/attention_ocr/python/testdata/fsns_train_08.png new file mode 100644 index 0000000000000000000000000000000000000000..df94c054e8d5ad2e66f4295132e1ff4ece43500f Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_08.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_09.png b/models/research/attention_ocr/python/testdata/fsns_train_09.png new file mode 100644 index 0000000000000000000000000000000000000000..fa9a58d9b587fc5d6749af25404e98bf41e1ec97 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_09.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_10.png b/models/research/attention_ocr/python/testdata/fsns_train_10.png new file mode 100644 index 0000000000000000000000000000000000000000..f32e32e3b428c6751345187463c29bab87b2a3cd Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_10.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_11.png b/models/research/attention_ocr/python/testdata/fsns_train_11.png new file mode 100644 index 0000000000000000000000000000000000000000..d0e984cd178c4b055f8add908403937791d045ff Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_11.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_12.png b/models/research/attention_ocr/python/testdata/fsns_train_12.png new file mode 100644 index 0000000000000000000000000000000000000000..0be37f903351a03bbf08d1532e7780241cd34848 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_12.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_13.png b/models/research/attention_ocr/python/testdata/fsns_train_13.png new file mode 100644 index 0000000000000000000000000000000000000000..bbc63da2d843d2c4af54f6f7dcc9be8c4550bc0a Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_13.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_14.png b/models/research/attention_ocr/python/testdata/fsns_train_14.png new file mode 100644 index 0000000000000000000000000000000000000000..a4dabe1748f888300e9a9916a4ab2c544f0c4ede Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_14.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_15.png b/models/research/attention_ocr/python/testdata/fsns_train_15.png new file mode 100644 index 0000000000000000000000000000000000000000..695553a68f8a99e7cf938bf0685579a492974c1b Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_15.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_16.png b/models/research/attention_ocr/python/testdata/fsns_train_16.png new file mode 100644 index 0000000000000000000000000000000000000000..7aebe189ba842531f2cb3ea2d773efc463f9c724 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_16.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_17.png b/models/research/attention_ocr/python/testdata/fsns_train_17.png new file mode 100644 index 0000000000000000000000000000000000000000..407ee6bd082f2bac55456cd748a1fa68fe735b71 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_17.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_18.png b/models/research/attention_ocr/python/testdata/fsns_train_18.png new file mode 100644 index 0000000000000000000000000000000000000000..14dfacdd3392473224e3df5d9452e50b5875a184 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_18.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_19.png b/models/research/attention_ocr/python/testdata/fsns_train_19.png new file mode 100644 index 0000000000000000000000000000000000000000..d874db7d4a054ac112f7db6fba30fe7972b9cfd6 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_19.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_20.png b/models/research/attention_ocr/python/testdata/fsns_train_20.png new file mode 100644 index 0000000000000000000000000000000000000000..c0af3bcd0d157dd75bbef31e9139612d63231f44 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_20.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_21.png b/models/research/attention_ocr/python/testdata/fsns_train_21.png new file mode 100644 index 0000000000000000000000000000000000000000..f57e191e3e62fb6fb92f1648c4b4911a26f0f30c Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_21.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_22.png b/models/research/attention_ocr/python/testdata/fsns_train_22.png new file mode 100644 index 0000000000000000000000000000000000000000..dd02ef39a629ecb1bd2427674fd4c25d9332b975 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_22.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_23.png b/models/research/attention_ocr/python/testdata/fsns_train_23.png new file mode 100644 index 0000000000000000000000000000000000000000..b7669c4bd529f616cc4fd10e8dccd7d24e8f1095 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_23.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_24.png b/models/research/attention_ocr/python/testdata/fsns_train_24.png new file mode 100644 index 0000000000000000000000000000000000000000..95ec53f81331f640431d2ce5065c16ccf51b6dbe Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_24.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_25.png b/models/research/attention_ocr/python/testdata/fsns_train_25.png new file mode 100644 index 0000000000000000000000000000000000000000..15b21a9a9e1205b18cb093cf235123321722783d Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_25.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_26.png b/models/research/attention_ocr/python/testdata/fsns_train_26.png new file mode 100644 index 0000000000000000000000000000000000000000..ca4eaea80bbc18465caef0e4e9a91e93972020a3 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_26.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_27.png b/models/research/attention_ocr/python/testdata/fsns_train_27.png new file mode 100644 index 0000000000000000000000000000000000000000..cf1949479f5c7a290fb585a8477e41998e035bde Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_27.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_28.png b/models/research/attention_ocr/python/testdata/fsns_train_28.png new file mode 100644 index 0000000000000000000000000000000000000000..c95a96e446c0d8a907f99f5456dfc634708fdf7f Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_28.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_29.png b/models/research/attention_ocr/python/testdata/fsns_train_29.png new file mode 100644 index 0000000000000000000000000000000000000000..301bb1300fd97d7f9e94aa805f12ee9a4536f837 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_29.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_30.png b/models/research/attention_ocr/python/testdata/fsns_train_30.png new file mode 100644 index 0000000000000000000000000000000000000000..d01325cf22653565a90ebd9e125a72bb42726051 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_30.png differ diff --git a/models/research/attention_ocr/python/testdata/fsns_train_31.png b/models/research/attention_ocr/python/testdata/fsns_train_31.png new file mode 100644 index 0000000000000000000000000000000000000000..30f8ff43ef68735560c168280121ecc23d289d14 Binary files /dev/null and b/models/research/attention_ocr/python/testdata/fsns_train_31.png differ diff --git a/models/research/attention_ocr/python/train.py b/models/research/attention_ocr/python/train.py new file mode 100644 index 0000000000000000000000000000000000000000..fa91fb73b412287889f05d0af5875e269f1ce367 --- /dev/null +++ b/models/research/attention_ocr/python/train.py @@ -0,0 +1,209 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Script to train the Attention OCR model. + +A simple usage example: +python train.py +""" +import collections +import logging +import tensorflow as tf +from tensorflow.contrib import slim +from tensorflow import app +from tensorflow.python.platform import flags +from tensorflow.contrib.tfprof import model_analyzer + +import data_provider +import common_flags + +FLAGS = flags.FLAGS +common_flags.define() + +# yapf: disable +flags.DEFINE_integer('task', 0, + 'The Task ID. This value is used when training with ' + 'multiple workers to identify each worker.') + +flags.DEFINE_integer('ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then' + ' the parameters are handled locally by the worker.') + +flags.DEFINE_integer('save_summaries_secs', 60, + 'The frequency with which summaries are saved, in ' + 'seconds.') + +flags.DEFINE_integer('save_interval_secs', 600, + 'Frequency in seconds of saving the model.') + +flags.DEFINE_integer('max_number_of_steps', int(1e10), + 'The maximum number of gradient steps.') + +flags.DEFINE_string('checkpoint_inception', '', + 'Checkpoint to recover inception weights from.') + +flags.DEFINE_float('clip_gradient_norm', 2.0, + 'If greater than 0 then the gradients would be clipped by ' + 'it.') + +flags.DEFINE_bool('sync_replicas', False, + 'If True will synchronize replicas during training.') + +flags.DEFINE_integer('replicas_to_aggregate', 1, + 'The number of gradients updates before updating params.') + +flags.DEFINE_integer('total_num_replicas', 1, + 'Total number of worker replicas.') + +flags.DEFINE_integer('startup_delay_steps', 15, + 'Number of training steps between replicas startup.') + +flags.DEFINE_boolean('reset_train_dir', False, + 'If true will delete all files in the train_log_dir') + +flags.DEFINE_boolean('show_graph_stats', False, + 'Output model size stats to stderr.') +# yapf: enable + +TrainingHParams = collections.namedtuple('TrainingHParams', [ + 'learning_rate', + 'optimizer', + 'momentum', + 'use_augment_input', +]) + + +def get_training_hparams(): + return TrainingHParams( + learning_rate=FLAGS.learning_rate, + optimizer=FLAGS.optimizer, + momentum=FLAGS.momentum, + use_augment_input=FLAGS.use_augment_input) + + +def create_optimizer(hparams): + """Creates optimized based on the specified flags.""" + if hparams.optimizer == 'momentum': + optimizer = tf.train.MomentumOptimizer( + hparams.learning_rate, momentum=hparams.momentum) + elif hparams.optimizer == 'adam': + optimizer = tf.train.AdamOptimizer(hparams.learning_rate) + elif hparams.optimizer == 'adadelta': + optimizer = tf.train.AdadeltaOptimizer(hparams.learning_rate) + elif hparams.optimizer == 'adagrad': + optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) + elif hparams.optimizer == 'rmsprop': + optimizer = tf.train.RMSPropOptimizer( + hparams.learning_rate, momentum=hparams.momentum) + return optimizer + + +def train(loss, init_fn, hparams): + """Wraps slim.learning.train to run a training loop. + + Args: + loss: a loss tensor + init_fn: A callable to be executed after all other initialization is done. + hparams: a model hyper parameters + """ + optimizer = create_optimizer(hparams) + + if FLAGS.sync_replicas: + replica_id = tf.constant(FLAGS.task, tf.int32, shape=()) + optimizer = tf.LegacySyncReplicasOptimizer( + opt=optimizer, + replicas_to_aggregate=FLAGS.replicas_to_aggregate, + replica_id=replica_id, + total_num_replicas=FLAGS.total_num_replicas) + sync_optimizer = optimizer + startup_delay_steps = 0 + else: + startup_delay_steps = 0 + sync_optimizer = None + + train_op = slim.learning.create_train_op( + loss, + optimizer, + summarize_gradients=True, + clip_gradient_norm=FLAGS.clip_gradient_norm) + + slim.learning.train( + train_op=train_op, + logdir=FLAGS.train_log_dir, + graph=loss.graph, + master=FLAGS.master, + is_chief=(FLAGS.task == 0), + number_of_steps=FLAGS.max_number_of_steps, + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs, + startup_delay_steps=startup_delay_steps, + sync_optimizer=sync_optimizer, + init_fn=init_fn) + + +def prepare_training_dir(): + if not tf.gfile.Exists(FLAGS.train_log_dir): + logging.info('Create a new training directory %s', FLAGS.train_log_dir) + tf.gfile.MakeDirs(FLAGS.train_log_dir) + else: + if FLAGS.reset_train_dir: + logging.info('Reset the training directory %s', FLAGS.train_log_dir) + tf.gfile.DeleteRecursively(FLAGS.train_log_dir) + tf.gfile.MakeDirs(FLAGS.train_log_dir) + else: + logging.info('Use already existing training directory %s', + FLAGS.train_log_dir) + + +def calculate_graph_metrics(): + param_stats = model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS) + return param_stats.total_parameters + + +def main(_): + prepare_training_dir() + + dataset = common_flags.create_dataset(split_name=FLAGS.split_name) + model = common_flags.create_model(dataset.num_char_classes, + dataset.max_sequence_length, + dataset.num_of_views, dataset.null_code) + hparams = get_training_hparams() + + # If ps_tasks is zero, the local device is used. When using multiple + # (non-local) replicas, the ReplicaDeviceSetter distributes the variables + # across the different devices. + device_setter = tf.train.replica_device_setter( + FLAGS.ps_tasks, merge_devices=True) + with tf.device(device_setter): + data = data_provider.get_data( + dataset, + FLAGS.batch_size, + augment=hparams.use_augment_input, + central_crop_size=common_flags.get_crop_size()) + endpoints = model.create_base(data.images, data.labels_one_hot) + total_loss = model.create_loss(data, endpoints) + model.create_summaries(data, endpoints, dataset.charset, is_training=True) + init_fn = model.create_init_fn_to_restore(FLAGS.checkpoint, + FLAGS.checkpoint_inception) + if FLAGS.show_graph_stats: + logging.info('Total number of weights in the graph: %s', + calculate_graph_metrics()) + train(total_loss, init_fn, hparams) + + +if __name__ == '__main__': + app.run() diff --git a/models/research/attention_ocr/python/utils.py b/models/research/attention_ocr/python/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..10d93ad21e1444736bf4562ef0df1c939617a5c1 --- /dev/null +++ b/models/research/attention_ocr/python/utils.py @@ -0,0 +1,80 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to support building models for StreetView text transcription.""" + +import tensorflow as tf +from tensorflow.contrib import slim + + +def logits_to_log_prob(logits): + """Computes log probabilities using numerically stable trick. + + This uses two numerical stability tricks: + 1) softmax(x) = softmax(x - c) where c is a constant applied to all + arguments. If we set c = max(x) then the softmax is more numerically + stable. + 2) log softmax(x) is not numerically stable, but we can stabilize it + by using the identity log softmax(x) = x - log sum exp(x) + + Args: + logits: Tensor of arbitrary shape whose last dimension contains logits. + + Returns: + A tensor of the same shape as the input, but with corresponding log + probabilities. + """ + + with tf.variable_scope('log_probabilities'): + reduction_indices = len(logits.shape.as_list()) - 1 + max_logits = tf.reduce_max( + logits, reduction_indices=reduction_indices, keep_dims=True) + safe_logits = tf.subtract(logits, max_logits) + sum_exp = tf.reduce_sum( + tf.exp(safe_logits), + reduction_indices=reduction_indices, + keep_dims=True) + log_probs = tf.subtract(safe_logits, tf.log(sum_exp)) + return log_probs + + +def variables_to_restore(scope=None, strip_scope=False): + """Returns a list of variables to restore for the specified list of methods. + + It is supposed that variable name starts with the method's scope (a prefix + returned by _method_scope function). + + Args: + methods_names: a list of names of configurable methods. + strip_scope: if True will return variable names without method's scope. + If methods_names is None will return names unchanged. + model_scope: a scope for a whole model. + + Returns: + a dictionary mapping variable names to variables for restore. + """ + if scope: + variable_map = {} + method_variables = slim.get_variables_to_restore(include=[scope]) + for var in method_variables: + if strip_scope: + var_name = var.op.name[len(scope) + 1:] + else: + var_name = var.op.name + variable_map[var_name] = var + + return variable_map + else: + return {v.op.name: v for v in slim.get_variables_to_restore()} diff --git a/models/research/audioset/README.md b/models/research/audioset/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c5a39b28ec1778a558a74692817b40d5d906e18c --- /dev/null +++ b/models/research/audioset/README.md @@ -0,0 +1,55 @@ +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Models for AudioSet: A Large Scale Dataset of Audio Events + +This repository provides models and supporting code associated with +[AudioSet](http://g.co/audioset), a dataset of over 2 million human-labeled +10-second YouTube video soundtracks, with labels taken from an ontology of +more than 600 audio event classes. + +AudioSet was +[released](https://research.googleblog.com/2017/03/announcing-audioset-dataset-for-audio.html) +in March 2017 by Google's Sound Understanding team to provide a common +large-scale evaluation task for audio event detection as well as a starting +point for a comprehensive vocabulary of sound events. + +For more details about AudioSet and the various models we have trained, please +visit the [AudioSet website](http://g.co/audioset) and read our papers: + +* Gemmeke, J. et. al., + [AudioSet: An ontology and human-labelled dataset for audio events](https://research.google.com/pubs/pub45857.html), + ICASSP 2017 + +* Hershey, S. et. al., + [CNN Architectures for Large-Scale Audio Classification](https://research.google.com/pubs/pub45611.html), + ICASSP 2017 + +If you use any of our pre-trained models in your published research, we ask that +you cite [CNN Architectures for Large-Scale Audio Classification](https://research.google.com/pubs/pub45611.html). +If you use the AudioSet dataset or the released embeddings of AudioSet segments, +please cite +[AudioSet: An ontology and human-labelled dataset for audio events](https://research.google.com/pubs/pub45857.html). + +## Contact + +For general questions about AudioSet and these models, please use the +[audioset-users@googlegroups.com](https://groups.google.com/forum/#!forum/audioset-users) +mailing list. + +For technical problems with the released model and code, please open an issue on +the [tensorflow/models issue tracker](https://github.com/tensorflow/models/issues) +and __*assign to @plakal and @dpwe*__. Please note that because the issue tracker +is shared across all models released by Google, we won't be notified about an +issue unless you explicitly @-mention us (@plakal and @dpwe) or assign the issue +to us. + +## Credits + +Original authors and reviewers of the code in this package include (in +alphabetical order): + +* DAn Ellis +* Shawn Hershey +* Aren Jansen +* Manoj Plakal diff --git a/models/research/audioset/vggish/README.md b/models/research/audioset/vggish/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0be0ae86687b81f2074d9011f4c16b5402a138bf --- /dev/null +++ b/models/research/audioset/vggish/README.md @@ -0,0 +1,184 @@ +# VGGish + +The initial AudioSet release included 128-dimensional embeddings of each +AudioSet segment produced from a VGG-like audio classification model that was +trained on a large YouTube dataset (a preliminary version of what later became +[YouTube-8M](https://research.google.com/youtube8m)). + +We provide a TensorFlow definition of this model, which we call __*VGGish*__, as +well as supporting code to extract input features for the model from audio +waveforms and to post-process the model embedding output into the same format as +the released embedding features. + +## Installation + +VGGish depends on the following Python packages: + +* [`numpy`](http://www.numpy.org/) +* [`resampy`](http://resampy.readthedocs.io/en/latest/) +* [`tensorflow`](http://www.tensorflow.org/) (currently, only TF v1.x) +* [`tf_slim`](https://github.com/google-research/tf-slim) +* [`six`](https://pythonhosted.org/six/) +* [`soundfile`](https://pysoundfile.readthedocs.io/) + +These are all easily installable via, e.g., `pip install numpy` (as in the +sample installation session below). + +Any reasonably recent version of these packages shold work. Note that we currently only support +TensorFlow v1.x due to a [`tf_slim` limitation](https://github.com/google-research/tf-slim/pull/1). +TensorFlow v1.15 (the latest version as of Jan 2020) has been tested to work. + +VGGish also requires downloading two data files: + +* [VGGish model checkpoint](https://storage.googleapis.com/audioset/vggish_model.ckpt), + in TensorFlow checkpoint format. +* [Embedding PCA parameters](https://storage.googleapis.com/audioset/vggish_pca_params.npz), + in NumPy compressed archive format. + +After downloading these files into the same directory as this README, the +installation can be tested by running `python vggish_smoke_test.py` which +runs a known signal through the model and checks the output. + +Here's a sample installation and test session: + +```shell +# You can optionally install and test VGGish within a Python virtualenv, which +# is useful for isolating changes from the rest of your system. For example, you +# may have an existing version of some packages that you do not want to upgrade, +# or you want to try Python 3 instead of Python 2. If you decide to use a +# virtualenv, you can create one by running +# $ virtualenv vggish # For Python 2 +# or +# $ python3 -m venv vggish # For Python 3 +# and then enter the virtual environment by running +# $ source vggish/bin/activate # Assuming you use bash +# Leave the virtual environment at the end of the session by running +# $ deactivate +# Within the virtual environment, do not use 'sudo'. + +# Upgrade pip first. Also make sure wheel is installed. +$ sudo python -m pip install --upgrade pip wheel + +# Install all dependences. +$ sudo pip install numpy resampy tensorflow==1.15 tf_slim six soundfile + +# Clone TensorFlow models repo into a 'models' directory. +$ git clone https://github.com/tensorflow/models.git +$ cd models/research/audioset/vggish +# Download data files into same directory as code. +$ curl -O https://storage.googleapis.com/audioset/vggish_model.ckpt +$ curl -O https://storage.googleapis.com/audioset/vggish_pca_params.npz + +# Installation ready, let's test it. +$ python vggish_smoke_test.py +# If we see "Looks Good To Me", then we're all set. +``` + +## Usage + +VGGish can be used in two ways: + +* *As a feature extractor*: VGGish converts audio input features into a + semantically meaningful, high-level 128-D embedding which can be fed as input + to a downstream classification model. The downstream model can be shallower + than usual because the VGGish embedding is more semantically compact than raw + audio features. + + So, for example, you could train a classifier for 10 of the AudioSet classes + by using the released embeddings as features. Then, you could use that + trained classifier with any arbitrary audio input by running the audio through + the audio feature extractor and VGGish model provided here, passing the + resulting embedding features as input to your trained model. + `vggish_inference_demo.py` shows how to produce VGGish embeddings from + arbitrary audio. + +* *As part of a larger model*: Here, we treat VGGish as a "warm start" for the + lower layers of a model that takes audio features as input and adds more + layers on top of the VGGish embedding. This can be used to fine-tune VGGish + (or parts thereof) if you have large datasets that might be very different + from the typical YouTube video clip. `vggish_train_demo.py` shows how to add + layers on top of VGGish and train the whole model. + +## About the Model + +The VGGish code layout is as follows: + +* `vggish_slim.py`: Model definition in TensorFlow Slim notation. +* `vggish_params.py`: Hyperparameters. +* `vggish_input.py`: Converter from audio waveform into input examples. +* `mel_features.py`: Audio feature extraction helpers. +* `vggish_postprocess.py`: Embedding postprocessing. +* `vggish_inference_demo.py`: Demo of VGGish in inference mode. +* `vggish_train_demo.py`: Demo of VGGish in training mode. +* `vggish_smoke_test.py`: Simple test of a VGGish installation + +### Architecture + +See `vggish_slim.py` and `vggish_params.py`. + +VGGish is a variant of the [VGG](https://arxiv.org/abs/1409.1556) model, in +particular Configuration A with 11 weight layers. Specifically, here are the +changes we made: + +* The input size was changed to 96x64 for log mel spectrogram audio inputs. + +* We drop the last group of convolutional and maxpool layers, so we now have + only four groups of convolution/maxpool layers instead of five. + +* Instead of a 1000-wide fully connected layer at the end, we use a 128-wide + fully connected layer. This acts as a compact embedding layer. + +The model definition provided here defines layers up to and including the +128-wide embedding layer. + +### Input: Audio Features + +See `vggish_input.py` and `mel_features.py`. + +VGGish was trained with audio features computed as follows: + +* All audio is resampled to 16 kHz mono. +* A spectrogram is computed using magnitudes of the Short-Time Fourier Transform + with a window size of 25 ms, a window hop of 10 ms, and a periodic Hann + window. +* A mel spectrogram is computed by mapping the spectrogram to 64 mel bins + covering the range 125-7500 Hz. +* A stabilized log mel spectrogram is computed by applying + log(mel-spectrum + 0.01) where the offset is used to avoid taking a logarithm + of zero. +* These features are then framed into non-overlapping examples of 0.96 seconds, + where each example covers 64 mel bands and 96 frames of 10 ms each. + +We provide our own NumPy implementation that produces features that are very +similar to those produced by our internal production code. This results in +embedding outputs that are closely match the embeddings that we have already +released. Note that these embeddings will *not* be bit-for-bit identical to the +released embeddings due to small differences between the feature computation +code paths, and even between two different installations of VGGish with +different underlying libraries and hardware. However, we expect that the +embeddings will be equivalent in the context of a downstream classification +task. + +### Output: Embeddings + +See `vggish_postprocess.py`. + +The released AudioSet embeddings were postprocessed before release by applying a +PCA transformation (which performs both PCA and whitening) as well as +quantization to 8 bits per embedding element. This was done to be compatible +with the [YouTube-8M](https://research.google.com/youtube8m) project which has +released visual and audio embeddings for millions of YouTube videos in the same +PCA/whitened/quantized format. + +We provide a Python implementation of the postprocessing which can be applied to +batches of embeddings produced by VGGish. `vggish_inference_demo.py` shows how +the postprocessor can be run after inference. + +If you don't need to use the released embeddings or YouTube-8M, then you could +skip postprocessing and use raw embeddings. + +A [Colab](https://colab.research.google.com/) +showing how to download the model and calculate the embeddings on your +own sound data is available here: +[AudioSet Embedding Colab](https://colab.research.google.com/drive/1TbX92UL9sYWbdwdGE0rJ9owmezB-Rl1C). + diff --git a/models/research/audioset/vggish/mel_features.py b/models/research/audioset/vggish/mel_features.py new file mode 100644 index 0000000000000000000000000000000000000000..ac58fb5427f772fcced9cbd3cec3373ffbe5908c --- /dev/null +++ b/models/research/audioset/vggish/mel_features.py @@ -0,0 +1,223 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Defines routines to compute mel spectrogram features from audio waveform.""" + +import numpy as np + + +def frame(data, window_length, hop_length): + """Convert array into a sequence of successive possibly overlapping frames. + + An n-dimensional array of shape (num_samples, ...) is converted into an + (n+1)-D array of shape (num_frames, window_length, ...), where each frame + starts hop_length points after the preceding one. + + This is accomplished using stride_tricks, so the original data is not + copied. However, there is no zero-padding, so any incomplete frames at the + end are not included. + + Args: + data: np.array of dimension N >= 1. + window_length: Number of samples in each frame. + hop_length: Advance (in samples) between each window. + + Returns: + (N+1)-D np.array with as many rows as there are complete frames that can be + extracted. + """ + num_samples = data.shape[0] + num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length)) + shape = (num_frames, window_length) + data.shape[1:] + strides = (data.strides[0] * hop_length,) + data.strides + return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) + + +def periodic_hann(window_length): + """Calculate a "periodic" Hann window. + + The classic Hann window is defined as a raised cosine that starts and + ends on zero, and where every value appears twice, except the middle + point for an odd-length window. Matlab calls this a "symmetric" window + and np.hanning() returns it. However, for Fourier analysis, this + actually represents just over one cycle of a period N-1 cosine, and + thus is not compactly expressed on a length-N Fourier basis. Instead, + it's better to use a raised cosine that ends just before the final + zero value - i.e. a complete cycle of a period-N cosine. Matlab + calls this a "periodic" window. This routine calculates it. + + Args: + window_length: The number of points in the returned window. + + Returns: + A 1D np.array containing the periodic hann window. + """ + return 0.5 - (0.5 * np.cos(2 * np.pi / window_length * + np.arange(window_length))) + + +def stft_magnitude(signal, fft_length, + hop_length=None, + window_length=None): + """Calculate the short-time Fourier transform magnitude. + + Args: + signal: 1D np.array of the input time-domain signal. + fft_length: Size of the FFT to apply. + hop_length: Advance (in samples) between each frame passed to FFT. + window_length: Length of each block of samples to pass to FFT. + + Returns: + 2D np.array where each row contains the magnitudes of the fft_length/2+1 + unique values of the FFT for the corresponding frame of input samples. + """ + frames = frame(signal, window_length, hop_length) + # Apply frame window to each frame. We use a periodic Hann (cosine of period + # window_length) instead of the symmetric Hann of np.hanning (period + # window_length-1). + window = periodic_hann(window_length) + windowed_frames = frames * window + return np.abs(np.fft.rfft(windowed_frames, int(fft_length))) + + +# Mel spectrum constants and functions. +_MEL_BREAK_FREQUENCY_HERTZ = 700.0 +_MEL_HIGH_FREQUENCY_Q = 1127.0 + + +def hertz_to_mel(frequencies_hertz): + """Convert frequencies to mel scale using HTK formula. + + Args: + frequencies_hertz: Scalar or np.array of frequencies in hertz. + + Returns: + Object of same size as frequencies_hertz containing corresponding values + on the mel scale. + """ + return _MEL_HIGH_FREQUENCY_Q * np.log( + 1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)) + + +def spectrogram_to_mel_matrix(num_mel_bins=20, + num_spectrogram_bins=129, + audio_sample_rate=8000, + lower_edge_hertz=125.0, + upper_edge_hertz=3800.0): + """Return a matrix that can post-multiply spectrogram rows to make mel. + + Returns a np.array matrix A that can be used to post-multiply a matrix S of + spectrogram values (STFT magnitudes) arranged as frames x bins to generate a + "mel spectrogram" M of frames x num_mel_bins. M = S A. + + The classic HTK algorithm exploits the complementarity of adjacent mel bands + to multiply each FFT bin by only one mel weight, then add it, with positive + and negative signs, to the two adjacent mel bands to which that bin + contributes. Here, by expressing this operation as a matrix multiply, we go + from num_fft multiplies per frame (plus around 2*num_fft adds) to around + num_fft^2 multiplies and adds. However, because these are all presumably + accomplished in a single call to np.dot(), it's not clear which approach is + faster in Python. The matrix multiplication has the attraction of being more + general and flexible, and much easier to read. + + Args: + num_mel_bins: How many bands in the resulting mel spectrum. This is + the number of columns in the output matrix. + num_spectrogram_bins: How many bins there are in the source spectrogram + data, which is understood to be fft_size/2 + 1, i.e. the spectrogram + only contains the nonredundant FFT bins. + audio_sample_rate: Samples per second of the audio at the input to the + spectrogram. We need this to figure out the actual frequencies for + each spectrogram bin, which dictates how they are mapped into mel. + lower_edge_hertz: Lower bound on the frequencies to be included in the mel + spectrum. This corresponds to the lower edge of the lowest triangular + band. + upper_edge_hertz: The desired top edge of the highest frequency band. + + Returns: + An np.array with shape (num_spectrogram_bins, num_mel_bins). + + Raises: + ValueError: if frequency edges are incorrectly ordered or out of range. + """ + nyquist_hertz = audio_sample_rate / 2. + if lower_edge_hertz < 0.0: + raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz) + if lower_edge_hertz >= upper_edge_hertz: + raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" % + (lower_edge_hertz, upper_edge_hertz)) + if upper_edge_hertz > nyquist_hertz: + raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" % + (upper_edge_hertz, nyquist_hertz)) + spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins) + spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz) + # The i'th mel band (starting from i=1) has center frequency + # band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge + # band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in + # the band_edges_mel arrays. + band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz), + hertz_to_mel(upper_edge_hertz), num_mel_bins + 2) + # Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins + # of spectrogram values. + mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins)) + for i in range(num_mel_bins): + lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3] + # Calculate lower and upper slopes for every spectrogram bin. + # Line segments are linear in the *mel* domain, not hertz. + lower_slope = ((spectrogram_bins_mel - lower_edge_mel) / + (center_mel - lower_edge_mel)) + upper_slope = ((upper_edge_mel - spectrogram_bins_mel) / + (upper_edge_mel - center_mel)) + # .. then intersect them with each other and zero. + mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope, + upper_slope)) + # HTK excludes the spectrogram DC bin; make sure it always gets a zero + # coefficient. + mel_weights_matrix[0, :] = 0.0 + return mel_weights_matrix + + +def log_mel_spectrogram(data, + audio_sample_rate=8000, + log_offset=0.0, + window_length_secs=0.025, + hop_length_secs=0.010, + **kwargs): + """Convert waveform to a log magnitude mel-frequency spectrogram. + + Args: + data: 1D np.array of waveform data. + audio_sample_rate: The sampling rate of data. + log_offset: Add this to values when taking log to avoid -Infs. + window_length_secs: Duration of each window to analyze. + hop_length_secs: Advance between successive analysis windows. + **kwargs: Additional arguments to pass to spectrogram_to_mel_matrix. + + Returns: + 2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank + magnitudes for successive frames. + """ + window_length_samples = int(round(audio_sample_rate * window_length_secs)) + hop_length_samples = int(round(audio_sample_rate * hop_length_secs)) + fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0))) + spectrogram = stft_magnitude( + data, + fft_length=fft_length, + hop_length=hop_length_samples, + window_length=window_length_samples) + mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix( + num_spectrogram_bins=spectrogram.shape[1], + audio_sample_rate=audio_sample_rate, **kwargs)) + return np.log(mel_spectrogram + log_offset) diff --git a/models/research/audioset/vggish/vggish_inference_demo.py b/models/research/audioset/vggish/vggish_inference_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..6d9d631b36d8eeac68ea23b59bd0938b5dbbd30c --- /dev/null +++ b/models/research/audioset/vggish/vggish_inference_demo.py @@ -0,0 +1,154 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""A simple demonstration of running VGGish in inference mode. + +This is intended as a toy example that demonstrates how the various building +blocks (feature extraction, model definition and loading, postprocessing) work +together in an inference context. + +A WAV file (assumed to contain signed 16-bit PCM samples) is read in, converted +into log mel spectrogram examples, fed into VGGish, the raw embedding output is +whitened and quantized, and the postprocessed embeddings are optionally written +in a SequenceExample to a TFRecord file (using the same format as the embedding +features released in AudioSet). + +Usage: + # Run a WAV file through the model and print the embeddings. The model + # checkpoint is loaded from vggish_model.ckpt and the PCA parameters are + # loaded from vggish_pca_params.npz in the current directory. + $ python vggish_inference_demo.py --wav_file /path/to/a/wav/file + + # Run a WAV file through the model and also write the embeddings to + # a TFRecord file. The model checkpoint and PCA parameters are explicitly + # passed in as well. + $ python vggish_inference_demo.py --wav_file /path/to/a/wav/file \ + --tfrecord_file /path/to/tfrecord/file \ + --checkpoint /path/to/model/checkpoint \ + --pca_params /path/to/pca/params + + # Run a built-in input (a sine wav) through the model and print the + # embeddings. Associated model files are read from the current directory. + $ python vggish_inference_demo.py +""" + +from __future__ import print_function + +import numpy as np +import six +import soundfile +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +import vggish_input +import vggish_params +import vggish_postprocess +import vggish_slim + +flags = tf.app.flags + +flags.DEFINE_string( + 'wav_file', None, + 'Path to a wav file. Should contain signed 16-bit PCM samples. ' + 'If none is provided, a synthetic sound is used.') + +flags.DEFINE_string( + 'checkpoint', 'vggish_model.ckpt', + 'Path to the VGGish checkpoint file.') + +flags.DEFINE_string( + 'pca_params', 'vggish_pca_params.npz', + 'Path to the VGGish PCA parameters file.') + +flags.DEFINE_string( + 'tfrecord_file', None, + 'Path to a TFRecord file where embeddings will be written.') + +FLAGS = flags.FLAGS + + +def main(_): + # In this simple example, we run the examples from a single audio file through + # the model. If none is provided, we generate a synthetic input. + if FLAGS.wav_file: + wav_file = FLAGS.wav_file + else: + # Write a WAV of a sine wav into an in-memory file object. + num_secs = 5 + freq = 1000 + sr = 44100 + t = np.linspace(0, num_secs, int(num_secs * sr)) + x = np.sin(2 * np.pi * freq * t) + # Convert to signed 16-bit samples. + samples = np.clip(x * 32768, -32768, 32767).astype(np.int16) + wav_file = six.BytesIO() + soundfile.write(wav_file, samples, sr, format='WAV', subtype='PCM_16') + wav_file.seek(0) + examples_batch = vggish_input.wavfile_to_examples(wav_file) + print(examples_batch) + + # Prepare a postprocessor to munge the model embeddings. + pproc = vggish_postprocess.Postprocessor(FLAGS.pca_params) + + # If needed, prepare a record writer to store the postprocessed embeddings. + writer = tf.python_io.TFRecordWriter( + FLAGS.tfrecord_file) if FLAGS.tfrecord_file else None + + with tf.Graph().as_default(), tf.Session() as sess: + # Define the model in inference mode, load the checkpoint, and + # locate input and output tensors. + vggish_slim.define_vggish_slim(training=False) + vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint) + features_tensor = sess.graph.get_tensor_by_name( + vggish_params.INPUT_TENSOR_NAME) + embedding_tensor = sess.graph.get_tensor_by_name( + vggish_params.OUTPUT_TENSOR_NAME) + + # Run inference and postprocessing. + [embedding_batch] = sess.run([embedding_tensor], + feed_dict={features_tensor: examples_batch}) + print(embedding_batch) + postprocessed_batch = pproc.postprocess(embedding_batch) + print(postprocessed_batch) + + # Write the postprocessed embeddings as a SequenceExample, in a similar + # format as the features released in AudioSet. Each row of the batch of + # embeddings corresponds to roughly a second of audio (96 10ms frames), and + # the rows are written as a sequence of bytes-valued features, where each + # feature value contains the 128 bytes of the whitened quantized embedding. + seq_example = tf.train.SequenceExample( + feature_lists=tf.train.FeatureLists( + feature_list={ + vggish_params.AUDIO_EMBEDDING_FEATURE_NAME: + tf.train.FeatureList( + feature=[ + tf.train.Feature( + bytes_list=tf.train.BytesList( + value=[embedding.tobytes()])) + for embedding in postprocessed_batch + ] + ) + } + ) + ) + print(seq_example) + if writer: + writer.write(seq_example.SerializeToString()) + + if writer: + writer.close() + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/audioset/vggish/vggish_input.py b/models/research/audioset/vggish/vggish_input.py new file mode 100644 index 0000000000000000000000000000000000000000..f283afbcffacece33e3e3a6886fbe7609be8c77e --- /dev/null +++ b/models/research/audioset/vggish/vggish_input.py @@ -0,0 +1,97 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Compute input examples for VGGish from audio waveform.""" + +import numpy as np +import resampy + +import mel_features +import vggish_params + +try: + import soundfile as sf + + def wav_read(wav_file): + wav_data, sr = sf.read(wav_file, dtype='int16') + return wav_data, sr + +except ImportError: + + def wav_read(wav_file): + raise NotImplementedError('WAV file reading requires soundfile package.') + + +def waveform_to_examples(data, sample_rate): + """Converts audio waveform into an array of examples for VGGish. + + Args: + data: np.array of either one dimension (mono) or two dimensions + (multi-channel, with the outer dimension representing channels). + Each sample is generally expected to lie in the range [-1.0, +1.0], + although this is not required. + sample_rate: Sample rate of data. + + Returns: + 3-D np.array of shape [num_examples, num_frames, num_bands] which represents + a sequence of examples, each of which contains a patch of log mel + spectrogram, covering num_frames frames of audio and num_bands mel frequency + bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS. + """ + # Convert to mono. + if len(data.shape) > 1: + data = np.mean(data, axis=1) + # Resample to the rate assumed by VGGish. + if sample_rate != vggish_params.SAMPLE_RATE: + data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) + + # Compute log mel spectrogram features. + log_mel = mel_features.log_mel_spectrogram( + data, + audio_sample_rate=vggish_params.SAMPLE_RATE, + log_offset=vggish_params.LOG_OFFSET, + window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, + hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, + num_mel_bins=vggish_params.NUM_MEL_BINS, + lower_edge_hertz=vggish_params.MEL_MIN_HZ, + upper_edge_hertz=vggish_params.MEL_MAX_HZ) + + # Frame features into examples. + features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS + example_window_length = int(round( + vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)) + example_hop_length = int(round( + vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)) + log_mel_examples = mel_features.frame( + log_mel, + window_length=example_window_length, + hop_length=example_hop_length) + return log_mel_examples + + +def wavfile_to_examples(wav_file): + """Convenience wrapper around waveform_to_examples() for a common WAV format. + + Args: + wav_file: String path to a file, or a file-like object. The file + is assumed to contain WAV audio data with signed 16-bit PCM samples. + + Returns: + See waveform_to_examples. + """ + wav_data, sr = wav_read(wav_file) + assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype + samples = wav_data / 32768.0 # Convert to [-1.0, +1.0] + return waveform_to_examples(samples, sr) diff --git a/models/research/audioset/vggish/vggish_params.py b/models/research/audioset/vggish/vggish_params.py new file mode 100644 index 0000000000000000000000000000000000000000..a38ce26c9d6a8c53509e66988c33b646b54f9ad8 --- /dev/null +++ b/models/research/audioset/vggish/vggish_params.py @@ -0,0 +1,53 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Global parameters for the VGGish model. + +See vggish_slim.py for more information. +""" + +# Architectural constants. +NUM_FRAMES = 96 # Frames in input mel-spectrogram patch. +NUM_BANDS = 64 # Frequency bands in input mel-spectrogram patch. +EMBEDDING_SIZE = 128 # Size of embedding layer. + +# Hyperparameters used in feature and example generation. +SAMPLE_RATE = 16000 +STFT_WINDOW_LENGTH_SECONDS = 0.025 +STFT_HOP_LENGTH_SECONDS = 0.010 +NUM_MEL_BINS = NUM_BANDS +MEL_MIN_HZ = 125 +MEL_MAX_HZ = 7500 +LOG_OFFSET = 0.01 # Offset used for stabilized log of input mel-spectrogram. +EXAMPLE_WINDOW_SECONDS = 0.96 # Each example contains 96 10ms frames +EXAMPLE_HOP_SECONDS = 0.96 # with zero overlap. + +# Parameters used for embedding postprocessing. +PCA_EIGEN_VECTORS_NAME = 'pca_eigen_vectors' +PCA_MEANS_NAME = 'pca_means' +QUANTIZE_MIN_VAL = -2.0 +QUANTIZE_MAX_VAL = +2.0 + +# Hyperparameters used in training. +INIT_STDDEV = 0.01 # Standard deviation used to initialize weights. +LEARNING_RATE = 1e-4 # Learning rate for the Adam optimizer. +ADAM_EPSILON = 1e-8 # Epsilon for the Adam optimizer. + +# Names of ops, tensors, and features. +INPUT_OP_NAME = 'vggish/input_features' +INPUT_TENSOR_NAME = INPUT_OP_NAME + ':0' +OUTPUT_OP_NAME = 'vggish/embedding' +OUTPUT_TENSOR_NAME = OUTPUT_OP_NAME + ':0' +AUDIO_EMBEDDING_FEATURE_NAME = 'audio_embedding' diff --git a/models/research/audioset/vggish/vggish_postprocess.py b/models/research/audioset/vggish/vggish_postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..aef23babef6a44aa4c1546539bbc41ccf57d59c7 --- /dev/null +++ b/models/research/audioset/vggish/vggish_postprocess.py @@ -0,0 +1,91 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Post-process embeddings from VGGish.""" + +import numpy as np + +import vggish_params + + +class Postprocessor(object): + """Post-processes VGGish embeddings. + + The initial release of AudioSet included 128-D VGGish embeddings for each + segment of AudioSet. These released embeddings were produced by applying + a PCA transformation (technically, a whitening transform is included as well) + and 8-bit quantization to the raw embedding output from VGGish, in order to + stay compatible with the YouTube-8M project which provides visual embeddings + in the same format for a large set of YouTube videos. This class implements + the same PCA (with whitening) and quantization transformations. + """ + + def __init__(self, pca_params_npz_path): + """Constructs a postprocessor. + + Args: + pca_params_npz_path: Path to a NumPy-format .npz file that + contains the PCA parameters used in postprocessing. + """ + params = np.load(pca_params_npz_path) + self._pca_matrix = params[vggish_params.PCA_EIGEN_VECTORS_NAME] + # Load means into a column vector for easier broadcasting later. + self._pca_means = params[vggish_params.PCA_MEANS_NAME].reshape(-1, 1) + assert self._pca_matrix.shape == ( + vggish_params.EMBEDDING_SIZE, vggish_params.EMBEDDING_SIZE), ( + 'Bad PCA matrix shape: %r' % (self._pca_matrix.shape,)) + assert self._pca_means.shape == (vggish_params.EMBEDDING_SIZE, 1), ( + 'Bad PCA means shape: %r' % (self._pca_means.shape,)) + + def postprocess(self, embeddings_batch): + """Applies postprocessing to a batch of embeddings. + + Args: + embeddings_batch: An nparray of shape [batch_size, embedding_size] + containing output from the embedding layer of VGGish. + + Returns: + An nparray of the same shape as the input but of type uint8, + containing the PCA-transformed and quantized version of the input. + """ + assert len(embeddings_batch.shape) == 2, ( + 'Expected 2-d batch, got %r' % (embeddings_batch.shape,)) + assert embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE, ( + 'Bad batch shape: %r' % (embeddings_batch.shape,)) + + # Apply PCA. + # - Embeddings come in as [batch_size, embedding_size]. + # - Transpose to [embedding_size, batch_size]. + # - Subtract pca_means column vector from each column. + # - Premultiply by PCA matrix of shape [output_dims, input_dims] + # where both are are equal to embedding_size in our case. + # - Transpose result back to [batch_size, embedding_size]. + pca_applied = np.dot(self._pca_matrix, + (embeddings_batch.T - self._pca_means)).T + + # Quantize by: + # - clipping to [min, max] range + clipped_embeddings = np.clip( + pca_applied, vggish_params.QUANTIZE_MIN_VAL, + vggish_params.QUANTIZE_MAX_VAL) + # - convert to 8-bit in range [0.0, 255.0] + quantized_embeddings = ( + (clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL) * + (255.0 / + (vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL))) + # - cast 8-bit float to uint8 + quantized_embeddings = quantized_embeddings.astype(np.uint8) + + return quantized_embeddings diff --git a/models/research/audioset/vggish/vggish_slim.py b/models/research/audioset/vggish/vggish_slim.py new file mode 100644 index 0000000000000000000000000000000000000000..0a838c4b8e2619b2573c490f546044b113f3bb55 --- /dev/null +++ b/models/research/audioset/vggish/vggish_slim.py @@ -0,0 +1,130 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Defines the 'VGGish' model used to generate AudioSet embedding features. + +The public AudioSet release (https://research.google.com/audioset/download.html) +includes 128-D features extracted from the embedding layer of a VGG-like model +that was trained on a large Google-internal YouTube dataset. Here we provide +a TF-Slim definition of the same model, without any dependences on libraries +internal to Google. We call it 'VGGish'. + +Note that we only define the model up to the embedding layer, which is the +penultimate layer before the final classifier layer. We also provide various +hyperparameter values (in vggish_params.py) that were used to train this model +internally. + +For comparison, here is TF-Slim's VGG definition: +https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py +""" + +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() +import tf_slim as slim + +import vggish_params as params + + +def define_vggish_slim(training=False): + """Defines the VGGish TensorFlow model. + + All ops are created in the current default graph, under the scope 'vggish/'. + + The input is a placeholder named 'vggish/input_features' of type float32 and + shape [batch_size, num_frames, num_bands] where batch_size is variable and + num_frames and num_bands are constants, and [num_frames, num_bands] represents + a log-mel-scale spectrogram patch covering num_bands frequency bands and + num_frames time frames (where each frame step is usually 10ms). This is + produced by computing the stabilized log(mel-spectrogram + params.LOG_OFFSET). + The output is an op named 'vggish/embedding' which produces the activations of + a 128-D embedding layer, which is usually the penultimate layer when used as + part of a full model with a final classifier layer. + + Args: + training: If true, all parameters are marked trainable. + + Returns: + The op 'vggish/embeddings'. + """ + # Defaults: + # - All weights are initialized to N(0, INIT_STDDEV). + # - All biases are initialized to 0. + # - All activations are ReLU. + # - All convolutions are 3x3 with stride 1 and SAME padding. + # - All max-pools are 2x2 with stride 2 and SAME padding. + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_initializer=tf.truncated_normal_initializer( + stddev=params.INIT_STDDEV), + biases_initializer=tf.zeros_initializer(), + activation_fn=tf.nn.relu, + trainable=training), \ + slim.arg_scope([slim.conv2d], + kernel_size=[3, 3], stride=1, padding='SAME'), \ + slim.arg_scope([slim.max_pool2d], + kernel_size=[2, 2], stride=2, padding='SAME'), \ + tf.variable_scope('vggish'): + # Input: a batch of 2-D log-mel-spectrogram patches. + features = tf.placeholder( + tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS), + name='input_features') + # Reshape to 4-D so that we can convolve a batch with conv2d(). + net = tf.reshape(features, [-1, params.NUM_FRAMES, params.NUM_BANDS, 1]) + + # The VGG stack of alternating convolutions and max-pools. + net = slim.conv2d(net, 64, scope='conv1') + net = slim.max_pool2d(net, scope='pool1') + net = slim.conv2d(net, 128, scope='conv2') + net = slim.max_pool2d(net, scope='pool2') + net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3') + net = slim.max_pool2d(net, scope='pool3') + net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4') + net = slim.max_pool2d(net, scope='pool4') + + # Flatten before entering fully-connected layers + net = slim.flatten(net) + net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1') + # The embedding layer. + net = slim.fully_connected(net, params.EMBEDDING_SIZE, scope='fc2') + return tf.identity(net, name='embedding') + + +def load_vggish_slim_checkpoint(session, checkpoint_path): + """Loads a pre-trained VGGish-compatible checkpoint. + + This function can be used as an initialization function (referred to as + init_fn in TensorFlow documentation) which is called in a Session after + initializating all variables. When used as an init_fn, this will load + a pre-trained checkpoint that is compatible with the VGGish model + definition. Only variables defined by VGGish will be loaded. + + Args: + session: an active TensorFlow session. + checkpoint_path: path to a file containing a checkpoint that is + compatible with the VGGish model definition. + """ + # Get the list of names of all VGGish variables that exist in + # the checkpoint (i.e., all inference-mode VGGish variables). + with tf.Graph().as_default(): + define_vggish_slim(training=False) + vggish_var_names = [v.name for v in tf.global_variables()] + + # Get the list of all currently existing variables that match + # the list of variable names we just computed. + vggish_vars = [v for v in tf.global_variables() if v.name in vggish_var_names] + + # Use a Saver to restore just the variables selected above. + saver = tf.train.Saver(vggish_vars, name='vggish_load_pretrained', + write_version=1) + saver.restore(session, checkpoint_path) diff --git a/models/research/audioset/vggish/vggish_smoke_test.py b/models/research/audioset/vggish/vggish_smoke_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f27e583aee473c6a04a5af20fd101c7a54871e94 --- /dev/null +++ b/models/research/audioset/vggish/vggish_smoke_test.py @@ -0,0 +1,98 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A smoke test for VGGish. + +This is a simple smoke test of a local install of VGGish and its associated +downloaded files. We create a synthetic sound, extract log mel spectrogram +features, run them through VGGish, post-process the embedding ouputs, and +check some simple statistics of the results, allowing for variations that +might occur due to platform/version differences in the libraries we use. + +Usage: +- Download the VGGish checkpoint and PCA parameters into the same directory as + the VGGish source code. If you keep them elsewhere, update the checkpoint_path + and pca_params_path variables below. +- Run: + $ python vggish_smoke_test.py +""" + +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +import vggish_input +import vggish_params +import vggish_postprocess +import vggish_slim + +print('\nTesting your install of VGGish\n') + +# Paths to downloaded VGGish files. +checkpoint_path = 'vggish_model.ckpt' +pca_params_path = 'vggish_pca_params.npz' + +# Relative tolerance of errors in mean and standard deviation of embeddings. +rel_error = 0.1 # Up to 10% + +# Generate a 1 kHz sine wave at 44.1 kHz (we use a high sampling rate +# to test resampling to 16 kHz during feature extraction). +num_secs = 3 +freq = 1000 +sr = 44100 +t = np.linspace(0, num_secs, int(num_secs * sr)) +x = np.sin(2 * np.pi * freq * t) + +# Produce a batch of log mel spectrogram examples. +input_batch = vggish_input.waveform_to_examples(x, sr) +print('Log Mel Spectrogram example: ', input_batch[0]) +np.testing.assert_equal( + input_batch.shape, + [num_secs, vggish_params.NUM_FRAMES, vggish_params.NUM_BANDS]) + +# Define VGGish, load the checkpoint, and run the batch through the model to +# produce embeddings. +with tf.Graph().as_default(), tf.Session() as sess: + vggish_slim.define_vggish_slim() + vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path) + + features_tensor = sess.graph.get_tensor_by_name( + vggish_params.INPUT_TENSOR_NAME) + embedding_tensor = sess.graph.get_tensor_by_name( + vggish_params.OUTPUT_TENSOR_NAME) + [embedding_batch] = sess.run([embedding_tensor], + feed_dict={features_tensor: input_batch}) + print('VGGish embedding: ', embedding_batch[0]) + expected_embedding_mean = 0.131 + expected_embedding_std = 0.238 + np.testing.assert_allclose( + [np.mean(embedding_batch), np.std(embedding_batch)], + [expected_embedding_mean, expected_embedding_std], + rtol=rel_error) + +# Postprocess the results to produce whitened quantized embeddings. +pproc = vggish_postprocess.Postprocessor(pca_params_path) +postprocessed_batch = pproc.postprocess(embedding_batch) +print('Postprocessed VGGish embedding: ', postprocessed_batch[0]) +expected_postprocessed_mean = 123.0 +expected_postprocessed_std = 75.0 +np.testing.assert_allclose( + [np.mean(postprocessed_batch), np.std(postprocessed_batch)], + [expected_postprocessed_mean, expected_postprocessed_std], + rtol=rel_error) + +print('\nLooks Good To Me!\n') diff --git a/models/research/audioset/vggish/vggish_train_demo.py b/models/research/audioset/vggish/vggish_train_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..d8be0f1774549b0b0ec4bdbcf840a16696fa6322 --- /dev/null +++ b/models/research/audioset/vggish/vggish_train_demo.py @@ -0,0 +1,194 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""A simple demonstration of running VGGish in training mode. + +This is intended as a toy example that demonstrates how to use the VGGish model +definition within a larger model that adds more layers on top, and then train +the larger model. If you let VGGish train as well, then this allows you to +fine-tune the VGGish model parameters for your application. If you don't let +VGGish train, then you use VGGish as a feature extractor for the layers above +it. + +For this toy task, we are training a classifier to distinguish between three +classes: sine waves, constant signals, and white noise. We generate synthetic +waveforms from each of these classes, convert into shuffled batches of log mel +spectrogram examples with associated labels, and feed the batches into a model +that includes VGGish at the bottom and a couple of additional layers on top. We +also plumb in labels that are associated with the examples, which feed a label +loss used for training. + +Usage: + # Run training for 100 steps using a model checkpoint in the default + # location (vggish_model.ckpt in the current directory). Allow VGGish + # to get fine-tuned. + $ python vggish_train_demo.py --num_batches 100 + + # Same as before but run for fewer steps and don't change VGGish parameters + # and use a checkpoint in a different location + $ python vggish_train_demo.py --num_batches 50 \ + --train_vggish=False \ + --checkpoint /path/to/model/checkpoint +""" + +from __future__ import print_function + +from random import shuffle + +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() +import tf_slim as slim + +import vggish_input +import vggish_params +import vggish_slim + +flags = tf.app.flags + +flags.DEFINE_integer( + 'num_batches', 30, + 'Number of batches of examples to feed into the model. Each batch is of ' + 'variable size and contains shuffled examples of each class of audio.') + +flags.DEFINE_boolean( + 'train_vggish', True, + 'If True, allow VGGish parameters to change during training, thus ' + 'fine-tuning VGGish. If False, VGGish parameters are fixed, thus using ' + 'VGGish as a fixed feature extractor.') + +flags.DEFINE_string( + 'checkpoint', 'vggish_model.ckpt', + 'Path to the VGGish checkpoint file.') + +FLAGS = flags.FLAGS + +_NUM_CLASSES = 3 + + +def _get_examples_batch(): + """Returns a shuffled batch of examples of all audio classes. + + Note that this is just a toy function because this is a simple demo intended + to illustrate how the training code might work. + + Returns: + a tuple (features, labels) where features is a NumPy array of shape + [batch_size, num_frames, num_bands] where the batch_size is variable and + each row is a log mel spectrogram patch of shape [num_frames, num_bands] + suitable for feeding VGGish, while labels is a NumPy array of shape + [batch_size, num_classes] where each row is a multi-hot label vector that + provides the labels for corresponding rows in features. + """ + # Make a waveform for each class. + num_seconds = 5 + sr = 44100 # Sampling rate. + t = np.linspace(0, num_seconds, int(num_seconds * sr)) # Time axis. + # Random sine wave. + freq = np.random.uniform(100, 1000) + sine = np.sin(2 * np.pi * freq * t) + # Random constant signal. + magnitude = np.random.uniform(-1, 1) + const = magnitude * t + # White noise. + noise = np.random.normal(-1, 1, size=t.shape) + + # Make examples of each signal and corresponding labels. + # Sine is class index 0, Const class index 1, Noise class index 2. + sine_examples = vggish_input.waveform_to_examples(sine, sr) + sine_labels = np.array([[1, 0, 0]] * sine_examples.shape[0]) + const_examples = vggish_input.waveform_to_examples(const, sr) + const_labels = np.array([[0, 1, 0]] * const_examples.shape[0]) + noise_examples = vggish_input.waveform_to_examples(noise, sr) + noise_labels = np.array([[0, 0, 1]] * noise_examples.shape[0]) + + # Shuffle (example, label) pairs across all classes. + all_examples = np.concatenate((sine_examples, const_examples, noise_examples)) + all_labels = np.concatenate((sine_labels, const_labels, noise_labels)) + labeled_examples = list(zip(all_examples, all_labels)) + shuffle(labeled_examples) + + # Separate and return the features and labels. + features = [example for (example, _) in labeled_examples] + labels = [label for (_, label) in labeled_examples] + return (features, labels) + + +def main(_): + with tf.Graph().as_default(), tf.Session() as sess: + # Define VGGish. + embeddings = vggish_slim.define_vggish_slim(FLAGS.train_vggish) + + # Define a shallow classification model and associated training ops on top + # of VGGish. + with tf.variable_scope('mymodel'): + # Add a fully connected layer with 100 units. + num_units = 100 + fc = slim.fully_connected(embeddings, num_units) + + # Add a classifier layer at the end, consisting of parallel logistic + # classifiers, one per class. This allows for multi-class tasks. + logits = slim.fully_connected( + fc, _NUM_CLASSES, activation_fn=None, scope='logits') + tf.sigmoid(logits, name='prediction') + + # Add training ops. + with tf.variable_scope('train'): + global_step = tf.Variable( + 0, name='global_step', trainable=False, + collections=[tf.GraphKeys.GLOBAL_VARIABLES, + tf.GraphKeys.GLOBAL_STEP]) + + # Labels are assumed to be fed as a batch multi-hot vectors, with + # a 1 in the position of each positive class label, and 0 elsewhere. + labels = tf.placeholder( + tf.float32, shape=(None, _NUM_CLASSES), name='labels') + + # Cross-entropy label loss. + xent = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits, labels=labels, name='xent') + loss = tf.reduce_mean(xent, name='loss_op') + tf.summary.scalar('loss', loss) + + # We use the same optimizer and hyperparameters as used to train VGGish. + optimizer = tf.train.AdamOptimizer( + learning_rate=vggish_params.LEARNING_RATE, + epsilon=vggish_params.ADAM_EPSILON) + optimizer.minimize(loss, global_step=global_step, name='train_op') + + # Initialize all variables in the model, and then load the pre-trained + # VGGish checkpoint. + sess.run(tf.global_variables_initializer()) + vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint) + + # Locate all the tensors and ops we need for the training loop. + features_tensor = sess.graph.get_tensor_by_name( + vggish_params.INPUT_TENSOR_NAME) + labels_tensor = sess.graph.get_tensor_by_name('mymodel/train/labels:0') + global_step_tensor = sess.graph.get_tensor_by_name( + 'mymodel/train/global_step:0') + loss_tensor = sess.graph.get_tensor_by_name('mymodel/train/loss_op:0') + train_op = sess.graph.get_operation_by_name('mymodel/train/train_op') + + # The training loop. + for _ in range(FLAGS.num_batches): + (features, labels) = _get_examples_batch() + [num_steps, loss, _] = sess.run( + [global_step_tensor, loss_tensor, train_op], + feed_dict={features_tensor: features, labels_tensor: labels}) + print('Step %d: loss %g' % (num_steps, loss)) + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/audioset/yamnet/README.md b/models/research/audioset/yamnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..983724c0d3b526a288f564d5c90b8f8330ac14ee --- /dev/null +++ b/models/research/audioset/yamnet/README.md @@ -0,0 +1,138 @@ +# YAMNet + +YAMNet is a pretrained deep net that predicts 521 audio event classes based on +the [AudioSet-YouTube corpus](http://g.co/audioset), and employing the +[Mobilenet_v1](https://arxiv.org/pdf/1704.04861.pdf) depthwise-separable +convolution architecture. + +This directory contains the Keras code to construct the model, and example code +for applying the model to input sound files. + +## Installation + +YAMNet depends on the following Python packages: + +* [`numpy`](http://www.numpy.org/) +* [`resampy`](http://resampy.readthedocs.io/en/latest/) +* [`tensorflow`](http://www.tensorflow.org/) +* [`pysoundfile`](https://pysoundfile.readthedocs.io/) + +These are all easily installable via, e.g., `pip install numpy` (as in the +example command sequence below). + +Any reasonably recent version of these packages should work. TensorFlow should +be at least version 1.8 to ensure Keras support is included. Note that while +the code works fine with TensorFlow v1.x or v2.x, we explicitly enable v1.x +behavior. + +YAMNet also requires downloading the following data file: + +* [YAMNet model weights](https://storage.googleapis.com/audioset/yamnet.h5) + in Keras saved weights in HDF5 format. + +After downloading this file into the same directory as this README, the +installation can be tested by running `python yamnet_test.py` which +runs some synthetic signals through the model and checks the outputs. + +Here's a sample installation and test session: + +```shell +# Upgrade pip first. Also make sure wheel is installed. +python -m pip install --upgrade pip wheel + +# Install dependences. +pip install numpy resampy tensorflow soundfile + +# Clone TensorFlow models repo into a 'models' directory. +git clone https://github.com/tensorflow/models.git +cd models/research/audioset/yamnet +# Download data file into same directory as code. +curl -O https://storage.googleapis.com/audioset/yamnet.h5 + +# Installation ready, let's test it. +python yamnet_test.py +# If we see "Ran 4 tests ... OK ...", then we're all set. +``` + +## Usage + +You can run the model over existing soundfiles using inference.py: + +```shell +python inference.py input_sound.wav +``` +The code will report the top-5 highest-scoring classes averaged over all the +frames of the input. You can access greater detail by modifying the example +code in inference.py. + +See the jupyter notebook `yamnet_visualization.ipynb` for an example of +displaying the per-frame model output scores. + + +## About the Model + +The YAMNet code layout is as follows: + +* `yamnet.py`: Model definition in Keras. +* `params.py`: Hyperparameters. You can usefully modify PATCH_HOP_SECONDS. +* `features.py`: Audio feature extraction helpers. +* `inference.py`: Example code to classify input wav files. +* `yamnet_test.py`: Simple test of YAMNet installation + +### Input: Audio Features + +See `features.py`. + +As with our previous release +[VGGish](https://github.com/tensorflow/models/tree/master/research/audioset/vggish), +YAMNet was trained with audio features computed as follows: + +* All audio is resampled to 16 kHz mono. +* A spectrogram is computed using magnitudes of the Short-Time Fourier Transform + with a window size of 25 ms, a window hop of 10 ms, and a periodic Hann + window. +* A mel spectrogram is computed by mapping the spectrogram to 64 mel bins + covering the range 125-7500 Hz. +* A stabilized log mel spectrogram is computed by applying + log(mel-spectrum + 0.001) where the offset is used to avoid taking a logarithm + of zero. +* These features are then framed into 50%-overlapping examples of 0.96 seconds, + where each example covers 64 mel bands and 96 frames of 10 ms each. + +These 96x64 patches are then fed into the Mobilenet_v1 model to yield a 3x2 +array of activations for 1024 kernels at the top of the convolution. These are +averaged to give a 1024-dimension embedding, then put through a single logistic +layer to get the 521 per-class output scores corresponding to the 960 ms input +waveform segment. (Because of the window framing, you need at least 975 ms of +input waveform to get the first frame of output scores.) + +### Class vocabulary + +The file `yamnet_class_map.csv` describes the audio event classes associated +with each of the 521 outputs of the network. Its format is: + +```text +index,mid,display_name +``` + +where `index` is the model output index (0..520), `mid` is the machine +identifier for that class (e.g. `/m/09x0r`), and display_name is a +human-readable description of the class (e.g. `Speech`). + +The original Audioset data release had 527 classes. This model drops six of +them on the recommendation of our Fairness reviewers to avoid potentially +offensive mislabelings. We dropped the gendered versions (Male/Female) of +Speech and Singing. We also dropped Battle cry and Funny music. + +### Performance + +On the 20,366-segment AudioSet eval set, over the 521 included classes, the +balanced average d-prime is 2.318, balanced mAP is 0.306, and the balanced +average lwlrap is 0.393. + +According to our calculations, the classifier has 3.7M weights and performs +69.2M multiplies for each 960ms input frame. + +### Contact information + +This model repository is maintained by [Manoj Plakal](https://github.com/plakal) and [Dan Ellis](https://github.com/dpwe). diff --git a/models/research/audioset/yamnet/features.py b/models/research/audioset/yamnet/features.py new file mode 100644 index 0000000000000000000000000000000000000000..98661124787c1b3f672185483c5715edb375cb2a --- /dev/null +++ b/models/research/audioset/yamnet/features.py @@ -0,0 +1,79 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Feature computation for YAMNet.""" + +import numpy as np +import tensorflow as tf + + +def waveform_to_log_mel_spectrogram(waveform, params): + """Compute log mel spectrogram of a 1-D waveform.""" + with tf.name_scope('log_mel_features'): + # waveform has shape [<# samples>] + + # Convert waveform into spectrogram using a Short-Time Fourier Transform. + # Note that tf.signal.stft() uses a periodic Hann window by default. + window_length_samples = int( + round(params.SAMPLE_RATE * params.STFT_WINDOW_SECONDS)) + hop_length_samples = int( + round(params.SAMPLE_RATE * params.STFT_HOP_SECONDS)) + fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0))) + num_spectrogram_bins = fft_length // 2 + 1 + magnitude_spectrogram = tf.abs(tf.signal.stft( + signals=waveform, + frame_length=window_length_samples, + frame_step=hop_length_samples, + fft_length=fft_length)) + # magnitude_spectrogram has shape [<# STFT frames>, num_spectrogram_bins] + + # Convert spectrogram into log mel spectrogram. + linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix( + num_mel_bins=params.MEL_BANDS, + num_spectrogram_bins=num_spectrogram_bins, + sample_rate=params.SAMPLE_RATE, + lower_edge_hertz=params.MEL_MIN_HZ, + upper_edge_hertz=params.MEL_MAX_HZ) + mel_spectrogram = tf.matmul( + magnitude_spectrogram, linear_to_mel_weight_matrix) + log_mel_spectrogram = tf.math.log(mel_spectrogram + params.LOG_OFFSET) + # log_mel_spectrogram has shape [<# STFT frames>, MEL_BANDS] + + return log_mel_spectrogram + + +def spectrogram_to_patches(spectrogram, params): + """Break up a spectrogram into a stack of fixed-size patches.""" + with tf.name_scope('feature_patches'): + # Frame spectrogram (shape [<# STFT frames>, MEL_BANDS]) into patches + # (the input examples). + # Only complete frames are emitted, so if there is less than + # PATCH_WINDOW_SECONDS of waveform then nothing is emitted + # (to avoid this, zero-pad before processing). + hop_length_samples = int( + round(params.SAMPLE_RATE * params.STFT_HOP_SECONDS)) + spectrogram_sr = params.SAMPLE_RATE / hop_length_samples + patch_window_length_samples = int( + round(spectrogram_sr * params.PATCH_WINDOW_SECONDS)) + patch_hop_length_samples = int( + round(spectrogram_sr * params.PATCH_HOP_SECONDS)) + features = tf.signal.frame( + signal=spectrogram, + frame_length=patch_window_length_samples, + frame_step=patch_hop_length_samples, + axis=0) + # features has shape [<# patches>, <# STFT frames in an patch>, MEL_BANDS] + + return features diff --git a/models/research/audioset/yamnet/inference.py b/models/research/audioset/yamnet/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa015550933c8696e56f92bdedd4de61ac518cb --- /dev/null +++ b/models/research/audioset/yamnet/inference.py @@ -0,0 +1,67 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inference demo for YAMNet.""" +from __future__ import division, print_function + +import sys + +import numpy as np +import resampy +import soundfile as sf +import tensorflow as tf + +import params +import yamnet as yamnet_model + + +def main(argv): + assert argv + + graph = tf.Graph() + with graph.as_default(): + yamnet = yamnet_model.yamnet_frames_model(params) + yamnet.load_weights('yamnet.h5') + yamnet_classes = yamnet_model.class_names('yamnet_class_map.csv') + + for file_name in argv: + # Decode the WAV file. + wav_data, sr = sf.read(file_name, dtype=np.int16) + assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype + waveform = wav_data / 32768.0 # Convert to [-1.0, +1.0] + + # Convert to mono and the sample rate expected by YAMNet. + if len(waveform.shape) > 1: + waveform = np.mean(waveform, axis=1) + if sr != params.SAMPLE_RATE: + waveform = resampy.resample(waveform, sr, params.SAMPLE_RATE) + + # Predict YAMNet classes. + # Second output is log-mel-spectrogram array (used for visualizations). + # (steps=1 is a work around for Keras batching limitations.) + with graph.as_default(): + scores, _ = yamnet.predict(np.reshape(waveform, [1, -1]), steps=1) + # Scores is a matrix of (time_frames, num_classes) classifier scores. + # Average them along time to get an overall classifier output for the clip. + prediction = np.mean(scores, axis=0) + # Report the highest-scoring classes and their scores. + top5_i = np.argsort(prediction)[::-1][:5] + print(file_name, ':\n' + + '\n'.join(' {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i]) + for i in top5_i)) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/models/research/audioset/yamnet/params.py b/models/research/audioset/yamnet/params.py new file mode 100644 index 0000000000000000000000000000000000000000..5d848ad71695f2fdb29eddea5b7c135509fa5fe2 --- /dev/null +++ b/models/research/audioset/yamnet/params.py @@ -0,0 +1,42 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Hyperparameters for YAMNet.""" + +# The following hyperparameters (except PATCH_HOP_SECONDS) were used to train YAMNet, +# so expect some variability in performance if you change these. The patch hop can +# be changed arbitrarily: a smaller hop should give you more patches from the same +# clip and possibly better performance at a larger computational cost. +SAMPLE_RATE = 16000 +STFT_WINDOW_SECONDS = 0.025 +STFT_HOP_SECONDS = 0.010 +MEL_BANDS = 64 +MEL_MIN_HZ = 125 +MEL_MAX_HZ = 7500 +LOG_OFFSET = 0.001 +PATCH_WINDOW_SECONDS = 0.96 +PATCH_HOP_SECONDS = 0.48 + +PATCH_FRAMES = int(round(PATCH_WINDOW_SECONDS / STFT_HOP_SECONDS)) +PATCH_BANDS = MEL_BANDS +NUM_CLASSES = 521 +CONV_PADDING = 'same' +BATCHNORM_CENTER = True +BATCHNORM_SCALE = False +BATCHNORM_EPSILON = 1e-4 +CLASSIFIER_ACTIVATION = 'sigmoid' + +FEATURES_LAYER_NAME = 'features' +EXAMPLE_PREDICTIONS_LAYER_NAME = 'predictions' diff --git a/models/research/audioset/yamnet/yamnet.py b/models/research/audioset/yamnet/yamnet.py new file mode 100644 index 0000000000000000000000000000000000000000..ce36ff8cc462bc3a37bcaacd615d7c997d46f6ef --- /dev/null +++ b/models/research/audioset/yamnet/yamnet.py @@ -0,0 +1,140 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Core model definition of YAMNet.""" + +import csv + +import numpy as np +import tensorflow as tf +from tensorflow.keras import Model, layers + +import features as features_lib +import params + + +def _batch_norm(name): + def _bn_layer(layer_input): + return layers.BatchNormalization( + name=name, + center=params.BATCHNORM_CENTER, + scale=params.BATCHNORM_SCALE, + epsilon=params.BATCHNORM_EPSILON)(layer_input) + return _bn_layer + + +def _conv(name, kernel, stride, filters): + def _conv_layer(layer_input): + output = layers.Conv2D(name='{}/conv'.format(name), + filters=filters, + kernel_size=kernel, + strides=stride, + padding=params.CONV_PADDING, + use_bias=False, + activation=None)(layer_input) + output = _batch_norm(name='{}/conv/bn'.format(name))(output) + output = layers.ReLU(name='{}/relu'.format(name))(output) + return output + return _conv_layer + + +def _separable_conv(name, kernel, stride, filters): + def _separable_conv_layer(layer_input): + output = layers.DepthwiseConv2D(name='{}/depthwise_conv'.format(name), + kernel_size=kernel, + strides=stride, + depth_multiplier=1, + padding=params.CONV_PADDING, + use_bias=False, + activation=None)(layer_input) + output = _batch_norm(name='{}/depthwise_conv/bn'.format(name))(output) + output = layers.ReLU(name='{}/depthwise_conv/relu'.format(name))(output) + output = layers.Conv2D(name='{}/pointwise_conv'.format(name), + filters=filters, + kernel_size=(1, 1), + strides=1, + padding=params.CONV_PADDING, + use_bias=False, + activation=None)(output) + output = _batch_norm(name='{}/pointwise_conv/bn'.format(name))(output) + output = layers.ReLU(name='{}/pointwise_conv/relu'.format(name))(output) + return output + return _separable_conv_layer + + +_YAMNET_LAYER_DEFS = [ + # (layer_function, kernel, stride, num_filters) + (_conv, [3, 3], 2, 32), + (_separable_conv, [3, 3], 1, 64), + (_separable_conv, [3, 3], 2, 128), + (_separable_conv, [3, 3], 1, 128), + (_separable_conv, [3, 3], 2, 256), + (_separable_conv, [3, 3], 1, 256), + (_separable_conv, [3, 3], 2, 512), + (_separable_conv, [3, 3], 1, 512), + (_separable_conv, [3, 3], 1, 512), + (_separable_conv, [3, 3], 1, 512), + (_separable_conv, [3, 3], 1, 512), + (_separable_conv, [3, 3], 1, 512), + (_separable_conv, [3, 3], 2, 1024), + (_separable_conv, [3, 3], 1, 1024) +] + + +def yamnet(features): + """Define the core YAMNet mode in Keras.""" + net = layers.Reshape( + (params.PATCH_FRAMES, params.PATCH_BANDS, 1), + input_shape=(params.PATCH_FRAMES, params.PATCH_BANDS))(features) + for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS): + net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters)(net) + net = layers.GlobalAveragePooling2D()(net) + logits = layers.Dense(units=params.NUM_CLASSES, use_bias=True)(net) + predictions = layers.Activation( + name=params.EXAMPLE_PREDICTIONS_LAYER_NAME, + activation=params.CLASSIFIER_ACTIVATION)(logits) + return predictions + + +def yamnet_frames_model(feature_params): + """Defines the YAMNet waveform-to-class-scores model. + + Args: + feature_params: An object with parameter fields to control the feature + calculation. + + Returns: + A model accepting (1, num_samples) waveform input and emitting a + (num_patches, num_classes) matrix of class scores per time frame as + well as a (num_spectrogram_frames, num_mel_bins) spectrogram feature + matrix. + """ + waveform = layers.Input(batch_shape=(1, None)) + # Store the intermediate spectrogram features to use in visualization. + spectrogram = features_lib.waveform_to_log_mel_spectrogram( + tf.squeeze(waveform, axis=0), feature_params) + patches = features_lib.spectrogram_to_patches(spectrogram, feature_params) + predictions = yamnet(patches) + frames_model = Model(name='yamnet_frames', + inputs=waveform, outputs=[predictions, spectrogram]) + return frames_model + + +def class_names(class_map_csv): + """Read the class name definition file and return a list of strings.""" + with open(class_map_csv) as csv_file: + reader = csv.reader(csv_file) + next(reader) # Skip header + return np.array([display_name for (_, _, display_name) in reader]) diff --git a/models/research/audioset/yamnet/yamnet_class_map.csv b/models/research/audioset/yamnet/yamnet_class_map.csv new file mode 100644 index 0000000000000000000000000000000000000000..a5309539c85dde56bdc660a01c08e9a7aacba09a --- /dev/null +++ b/models/research/audioset/yamnet/yamnet_class_map.csv @@ -0,0 +1,522 @@ +index,mid,display_name +0,/m/09x0r,Speech +1,/m/0ytgt,"Child speech, kid speaking" +2,/m/01h8n0,Conversation +3,/m/02qldy,"Narration, monologue" +4,/m/0261r1,Babbling +5,/m/0brhx,Speech synthesizer +6,/m/07p6fty,Shout +7,/m/07q4ntr,Bellow +8,/m/07rwj3x,Whoop +9,/m/07sr1lc,Yell +10,/t/dd00135,Children shouting +11,/m/03qc9zr,Screaming +12,/m/02rtxlg,Whispering +13,/m/01j3sz,Laughter +14,/t/dd00001,Baby laughter +15,/m/07r660_,Giggle +16,/m/07s04w4,Snicker +17,/m/07sq110,Belly laugh +18,/m/07rgt08,"Chuckle, chortle" +19,/m/0463cq4,"Crying, sobbing" +20,/t/dd00002,"Baby cry, infant cry" +21,/m/07qz6j3,Whimper +22,/m/07qw_06,"Wail, moan" +23,/m/07plz5l,Sigh +24,/m/015lz1,Singing +25,/m/0l14jd,Choir +26,/m/01swy6,Yodeling +27,/m/02bk07,Chant +28,/m/01c194,Mantra +29,/t/dd00005,Child singing +30,/t/dd00006,Synthetic singing +31,/m/06bxc,Rapping +32,/m/02fxyj,Humming +33,/m/07s2xch,Groan +34,/m/07r4k75,Grunt +35,/m/01w250,Whistling +36,/m/0lyf6,Breathing +37,/m/07mzm6,Wheeze +38,/m/01d3sd,Snoring +39,/m/07s0dtb,Gasp +40,/m/07pyy8b,Pant +41,/m/07q0yl5,Snort +42,/m/01b_21,Cough +43,/m/0dl9sf8,Throat clearing +44,/m/01hsr_,Sneeze +45,/m/07ppn3j,Sniff +46,/m/06h7j,Run +47,/m/07qv_x_,Shuffle +48,/m/07pbtc8,"Walk, footsteps" +49,/m/03cczk,"Chewing, mastication" +50,/m/07pdhp0,Biting +51,/m/0939n_,Gargling +52,/m/01g90h,Stomach rumble +53,/m/03q5_w,"Burping, eructation" +54,/m/02p3nc,Hiccup +55,/m/02_nn,Fart +56,/m/0k65p,Hands +57,/m/025_jnm,Finger snapping +58,/m/0l15bq,Clapping +59,/m/01jg02,"Heart sounds, heartbeat" +60,/m/01jg1z,Heart murmur +61,/m/053hz1,Cheering +62,/m/028ght,Applause +63,/m/07rkbfh,Chatter +64,/m/03qtwd,Crowd +65,/m/07qfr4h,"Hubbub, speech noise, speech babble" +66,/t/dd00013,Children playing +67,/m/0jbk,Animal +68,/m/068hy,"Domestic animals, pets" +69,/m/0bt9lr,Dog +70,/m/05tny_,Bark +71,/m/07r_k2n,Yip +72,/m/07qf0zm,Howl +73,/m/07rc7d9,Bow-wow +74,/m/0ghcn6,Growling +75,/t/dd00136,Whimper (dog) +76,/m/01yrx,Cat +77,/m/02yds9,Purr +78,/m/07qrkrw,Meow +79,/m/07rjwbb,Hiss +80,/m/07r81j2,Caterwaul +81,/m/0ch8v,"Livestock, farm animals, working animals" +82,/m/03k3r,Horse +83,/m/07rv9rh,Clip-clop +84,/m/07q5rw0,"Neigh, whinny" +85,/m/01xq0k1,"Cattle, bovinae" +86,/m/07rpkh9,Moo +87,/m/0239kh,Cowbell +88,/m/068zj,Pig +89,/t/dd00018,Oink +90,/m/03fwl,Goat +91,/m/07q0h5t,Bleat +92,/m/07bgp,Sheep +93,/m/025rv6n,Fowl +94,/m/09b5t,"Chicken, rooster" +95,/m/07st89h,Cluck +96,/m/07qn5dc,"Crowing, cock-a-doodle-doo" +97,/m/01rd7k,Turkey +98,/m/07svc2k,Gobble +99,/m/09ddx,Duck +100,/m/07qdb04,Quack +101,/m/0dbvp,Goose +102,/m/07qwf61,Honk +103,/m/01280g,Wild animals +104,/m/0cdnk,"Roaring cats (lions, tigers)" +105,/m/04cvmfc,Roar +106,/m/015p6,Bird +107,/m/020bb7,"Bird vocalization, bird call, bird song" +108,/m/07pggtn,"Chirp, tweet" +109,/m/07sx8x_,Squawk +110,/m/0h0rv,"Pigeon, dove" +111,/m/07r_25d,Coo +112,/m/04s8yn,Crow +113,/m/07r5c2p,Caw +114,/m/09d5_,Owl +115,/m/07r_80w,Hoot +116,/m/05_wcq,"Bird flight, flapping wings" +117,/m/01z5f,"Canidae, dogs, wolves" +118,/m/06hps,"Rodents, rats, mice" +119,/m/04rmv,Mouse +120,/m/07r4gkf,Patter +121,/m/03vt0,Insect +122,/m/09xqv,Cricket +123,/m/09f96,Mosquito +124,/m/0h2mp,"Fly, housefly" +125,/m/07pjwq1,Buzz +126,/m/01h3n,"Bee, wasp, etc." +127,/m/09ld4,Frog +128,/m/07st88b,Croak +129,/m/078jl,Snake +130,/m/07qn4z3,Rattle +131,/m/032n05,Whale vocalization +132,/m/04rlf,Music +133,/m/04szw,Musical instrument +134,/m/0fx80y,Plucked string instrument +135,/m/0342h,Guitar +136,/m/02sgy,Electric guitar +137,/m/018vs,Bass guitar +138,/m/042v_gx,Acoustic guitar +139,/m/06w87,"Steel guitar, slide guitar" +140,/m/01glhc,Tapping (guitar technique) +141,/m/07s0s5r,Strum +142,/m/018j2,Banjo +143,/m/0jtg0,Sitar +144,/m/04rzd,Mandolin +145,/m/01bns_,Zither +146,/m/07xzm,Ukulele +147,/m/05148p4,Keyboard (musical) +148,/m/05r5c,Piano +149,/m/01s0ps,Electric piano +150,/m/013y1f,Organ +151,/m/03xq_f,Electronic organ +152,/m/03gvt,Hammond organ +153,/m/0l14qv,Synthesizer +154,/m/01v1d8,Sampler +155,/m/03q5t,Harpsichord +156,/m/0l14md,Percussion +157,/m/02hnl,Drum kit +158,/m/0cfdd,Drum machine +159,/m/026t6,Drum +160,/m/06rvn,Snare drum +161,/m/03t3fj,Rimshot +162,/m/02k_mr,Drum roll +163,/m/0bm02,Bass drum +164,/m/011k_j,Timpani +165,/m/01p970,Tabla +166,/m/01qbl,Cymbal +167,/m/03qtq,Hi-hat +168,/m/01sm1g,Wood block +169,/m/07brj,Tambourine +170,/m/05r5wn,Rattle (instrument) +171,/m/0xzly,Maraca +172,/m/0mbct,Gong +173,/m/016622,Tubular bells +174,/m/0j45pbj,Mallet percussion +175,/m/0dwsp,"Marimba, xylophone" +176,/m/0dwtp,Glockenspiel +177,/m/0dwt5,Vibraphone +178,/m/0l156b,Steelpan +179,/m/05pd6,Orchestra +180,/m/01kcd,Brass instrument +181,/m/0319l,French horn +182,/m/07gql,Trumpet +183,/m/07c6l,Trombone +184,/m/0l14_3,Bowed string instrument +185,/m/02qmj0d,String section +186,/m/07y_7,"Violin, fiddle" +187,/m/0d8_n,Pizzicato +188,/m/01xqw,Cello +189,/m/02fsn,Double bass +190,/m/085jw,"Wind instrument, woodwind instrument" +191,/m/0l14j_,Flute +192,/m/06ncr,Saxophone +193,/m/01wy6,Clarinet +194,/m/03m5k,Harp +195,/m/0395lw,Bell +196,/m/03w41f,Church bell +197,/m/027m70_,Jingle bell +198,/m/0gy1t2s,Bicycle bell +199,/m/07n_g,Tuning fork +200,/m/0f8s22,Chime +201,/m/026fgl,Wind chime +202,/m/0150b9,Change ringing (campanology) +203,/m/03qjg,Harmonica +204,/m/0mkg,Accordion +205,/m/0192l,Bagpipes +206,/m/02bxd,Didgeridoo +207,/m/0l14l2,Shofar +208,/m/07kc_,Theremin +209,/m/0l14t7,Singing bowl +210,/m/01hgjl,Scratching (performance technique) +211,/m/064t9,Pop music +212,/m/0glt670,Hip hop music +213,/m/02cz_7,Beatboxing +214,/m/06by7,Rock music +215,/m/03lty,Heavy metal +216,/m/05r6t,Punk rock +217,/m/0dls3,Grunge +218,/m/0dl5d,Progressive rock +219,/m/07sbbz2,Rock and roll +220,/m/05w3f,Psychedelic rock +221,/m/06j6l,Rhythm and blues +222,/m/0gywn,Soul music +223,/m/06cqb,Reggae +224,/m/01lyv,Country +225,/m/015y_n,Swing music +226,/m/0gg8l,Bluegrass +227,/m/02x8m,Funk +228,/m/02w4v,Folk music +229,/m/06j64v,Middle Eastern music +230,/m/03_d0,Jazz +231,/m/026z9,Disco +232,/m/0ggq0m,Classical music +233,/m/05lls,Opera +234,/m/02lkt,Electronic music +235,/m/03mb9,House music +236,/m/07gxw,Techno +237,/m/07s72n,Dubstep +238,/m/0283d,Drum and bass +239,/m/0m0jc,Electronica +240,/m/08cyft,Electronic dance music +241,/m/0fd3y,Ambient music +242,/m/07lnk,Trance music +243,/m/0g293,Music of Latin America +244,/m/0ln16,Salsa music +245,/m/0326g,Flamenco +246,/m/0155w,Blues +247,/m/05fw6t,Music for children +248,/m/02v2lh,New-age music +249,/m/0y4f8,Vocal music +250,/m/0z9c,A capella +251,/m/0164x2,Music of Africa +252,/m/0145m,Afrobeat +253,/m/02mscn,Christian music +254,/m/016cjb,Gospel music +255,/m/028sqc,Music of Asia +256,/m/015vgc,Carnatic music +257,/m/0dq0md,Music of Bollywood +258,/m/06rqw,Ska +259,/m/02p0sh1,Traditional music +260,/m/05rwpb,Independent music +261,/m/074ft,Song +262,/m/025td0t,Background music +263,/m/02cjck,Theme music +264,/m/03r5q_,Jingle (music) +265,/m/0l14gg,Soundtrack music +266,/m/07pkxdp,Lullaby +267,/m/01z7dr,Video game music +268,/m/0140xf,Christmas music +269,/m/0ggx5q,Dance music +270,/m/04wptg,Wedding music +271,/t/dd00031,Happy music +272,/t/dd00033,Sad music +273,/t/dd00034,Tender music +274,/t/dd00035,Exciting music +275,/t/dd00036,Angry music +276,/t/dd00037,Scary music +277,/m/03m9d0z,Wind +278,/m/09t49,Rustling leaves +279,/t/dd00092,Wind noise (microphone) +280,/m/0jb2l,Thunderstorm +281,/m/0ngt1,Thunder +282,/m/0838f,Water +283,/m/06mb1,Rain +284,/m/07r10fb,Raindrop +285,/t/dd00038,Rain on surface +286,/m/0j6m2,Stream +287,/m/0j2kx,Waterfall +288,/m/05kq4,Ocean +289,/m/034srq,"Waves, surf" +290,/m/06wzb,Steam +291,/m/07swgks,Gurgling +292,/m/02_41,Fire +293,/m/07pzfmf,Crackle +294,/m/07yv9,Vehicle +295,/m/019jd,"Boat, Water vehicle" +296,/m/0hsrw,"Sailboat, sailing ship" +297,/m/056ks2,"Rowboat, canoe, kayak" +298,/m/02rlv9,"Motorboat, speedboat" +299,/m/06q74,Ship +300,/m/012f08,Motor vehicle (road) +301,/m/0k4j,Car +302,/m/0912c9,"Vehicle horn, car horn, honking" +303,/m/07qv_d5,Toot +304,/m/02mfyn,Car alarm +305,/m/04gxbd,"Power windows, electric windows" +306,/m/07rknqz,Skidding +307,/m/0h9mv,Tire squeal +308,/t/dd00134,Car passing by +309,/m/0ltv,"Race car, auto racing" +310,/m/07r04,Truck +311,/m/0gvgw0,Air brake +312,/m/05x_td,"Air horn, truck horn" +313,/m/02rhddq,Reversing beeps +314,/m/03cl9h,"Ice cream truck, ice cream van" +315,/m/01bjv,Bus +316,/m/03j1ly,Emergency vehicle +317,/m/04qvtq,Police car (siren) +318,/m/012n7d,Ambulance (siren) +319,/m/012ndj,"Fire engine, fire truck (siren)" +320,/m/04_sv,Motorcycle +321,/m/0btp2,"Traffic noise, roadway noise" +322,/m/06d_3,Rail transport +323,/m/07jdr,Train +324,/m/04zmvq,Train whistle +325,/m/0284vy3,Train horn +326,/m/01g50p,"Railroad car, train wagon" +327,/t/dd00048,Train wheels squealing +328,/m/0195fx,"Subway, metro, underground" +329,/m/0k5j,Aircraft +330,/m/014yck,Aircraft engine +331,/m/04229,Jet engine +332,/m/02l6bg,"Propeller, airscrew" +333,/m/09ct_,Helicopter +334,/m/0cmf2,"Fixed-wing aircraft, airplane" +335,/m/0199g,Bicycle +336,/m/06_fw,Skateboard +337,/m/02mk9,Engine +338,/t/dd00065,Light engine (high frequency) +339,/m/08j51y,"Dental drill, dentist's drill" +340,/m/01yg9g,Lawn mower +341,/m/01j4z9,Chainsaw +342,/t/dd00066,Medium engine (mid frequency) +343,/t/dd00067,Heavy engine (low frequency) +344,/m/01h82_,Engine knocking +345,/t/dd00130,Engine starting +346,/m/07pb8fc,Idling +347,/m/07q2z82,"Accelerating, revving, vroom" +348,/m/02dgv,Door +349,/m/03wwcy,Doorbell +350,/m/07r67yg,Ding-dong +351,/m/02y_763,Sliding door +352,/m/07rjzl8,Slam +353,/m/07r4wb8,Knock +354,/m/07qcpgn,Tap +355,/m/07q6cd_,Squeak +356,/m/0642b4,Cupboard open or close +357,/m/0fqfqc,Drawer open or close +358,/m/04brg2,"Dishes, pots, and pans" +359,/m/023pjk,"Cutlery, silverware" +360,/m/07pn_8q,Chopping (food) +361,/m/0dxrf,Frying (food) +362,/m/0fx9l,Microwave oven +363,/m/02pjr4,Blender +364,/m/02jz0l,"Water tap, faucet" +365,/m/0130jx,Sink (filling or washing) +366,/m/03dnzn,Bathtub (filling or washing) +367,/m/03wvsk,Hair dryer +368,/m/01jt3m,Toilet flush +369,/m/012xff,Toothbrush +370,/m/04fgwm,Electric toothbrush +371,/m/0d31p,Vacuum cleaner +372,/m/01s0vc,Zipper (clothing) +373,/m/03v3yw,Keys jangling +374,/m/0242l,Coin (dropping) +375,/m/01lsmm,Scissors +376,/m/02g901,"Electric shaver, electric razor" +377,/m/05rj2,Shuffling cards +378,/m/0316dw,Typing +379,/m/0c2wf,Typewriter +380,/m/01m2v,Computer keyboard +381,/m/081rb,Writing +382,/m/07pp_mv,Alarm +383,/m/07cx4,Telephone +384,/m/07pp8cl,Telephone bell ringing +385,/m/01hnzm,Ringtone +386,/m/02c8p,"Telephone dialing, DTMF" +387,/m/015jpf,Dial tone +388,/m/01z47d,Busy signal +389,/m/046dlr,Alarm clock +390,/m/03kmc9,Siren +391,/m/0dgbq,Civil defense siren +392,/m/030rvx,Buzzer +393,/m/01y3hg,"Smoke detector, smoke alarm" +394,/m/0c3f7m,Fire alarm +395,/m/04fq5q,Foghorn +396,/m/0l156k,Whistle +397,/m/06hck5,Steam whistle +398,/t/dd00077,Mechanisms +399,/m/02bm9n,"Ratchet, pawl" +400,/m/01x3z,Clock +401,/m/07qjznt,Tick +402,/m/07qjznl,Tick-tock +403,/m/0l7xg,Gears +404,/m/05zc1,Pulleys +405,/m/0llzx,Sewing machine +406,/m/02x984l,Mechanical fan +407,/m/025wky1,Air conditioning +408,/m/024dl,Cash register +409,/m/01m4t,Printer +410,/m/0dv5r,Camera +411,/m/07bjf,Single-lens reflex camera +412,/m/07k1x,Tools +413,/m/03l9g,Hammer +414,/m/03p19w,Jackhammer +415,/m/01b82r,Sawing +416,/m/02p01q,Filing (rasp) +417,/m/023vsd,Sanding +418,/m/0_ksk,Power tool +419,/m/01d380,Drill +420,/m/014zdl,Explosion +421,/m/032s66,"Gunshot, gunfire" +422,/m/04zjc,Machine gun +423,/m/02z32qm,Fusillade +424,/m/0_1c,Artillery fire +425,/m/073cg4,Cap gun +426,/m/0g6b5,Fireworks +427,/g/122z_qxw,Firecracker +428,/m/07qsvvw,"Burst, pop" +429,/m/07pxg6y,Eruption +430,/m/07qqyl4,Boom +431,/m/083vt,Wood +432,/m/07pczhz,Chop +433,/m/07pl1bw,Splinter +434,/m/07qs1cx,Crack +435,/m/039jq,Glass +436,/m/07q7njn,"Chink, clink" +437,/m/07rn7sz,Shatter +438,/m/04k94,Liquid +439,/m/07rrlb6,"Splash, splatter" +440,/m/07p6mqd,Slosh +441,/m/07qlwh6,Squish +442,/m/07r5v4s,Drip +443,/m/07prgkl,Pour +444,/m/07pqc89,"Trickle, dribble" +445,/t/dd00088,Gush +446,/m/07p7b8y,Fill (with liquid) +447,/m/07qlf79,Spray +448,/m/07ptzwd,Pump (liquid) +449,/m/07ptfmf,Stir +450,/m/0dv3j,Boiling +451,/m/0790c,Sonar +452,/m/0dl83,Arrow +453,/m/07rqsjt,"Whoosh, swoosh, swish" +454,/m/07qnq_y,"Thump, thud" +455,/m/07rrh0c,Thunk +456,/m/0b_fwt,Electronic tuner +457,/m/02rr_,Effects unit +458,/m/07m2kt,Chorus effect +459,/m/018w8,Basketball bounce +460,/m/07pws3f,Bang +461,/m/07ryjzk,"Slap, smack" +462,/m/07rdhzs,"Whack, thwack" +463,/m/07pjjrj,"Smash, crash" +464,/m/07pc8lb,Breaking +465,/m/07pqn27,Bouncing +466,/m/07rbp7_,Whip +467,/m/07pyf11,Flap +468,/m/07qb_dv,Scratch +469,/m/07qv4k0,Scrape +470,/m/07pdjhy,Rub +471,/m/07s8j8t,Roll +472,/m/07plct2,Crushing +473,/t/dd00112,"Crumpling, crinkling" +474,/m/07qcx4z,Tearing +475,/m/02fs_r,"Beep, bleep" +476,/m/07qwdck,Ping +477,/m/07phxs1,Ding +478,/m/07rv4dm,Clang +479,/m/07s02z0,Squeal +480,/m/07qh7jl,Creak +481,/m/07qwyj0,Rustle +482,/m/07s34ls,Whir +483,/m/07qmpdm,Clatter +484,/m/07p9k1k,Sizzle +485,/m/07qc9xj,Clicking +486,/m/07rwm0c,Clickety-clack +487,/m/07phhsh,Rumble +488,/m/07qyrcz,Plop +489,/m/07qfgpx,"Jingle, tinkle" +490,/m/07rcgpl,Hum +491,/m/07p78v5,Zing +492,/t/dd00121,Boing +493,/m/07s12q4,Crunch +494,/m/028v0c,Silence +495,/m/01v_m0,Sine wave +496,/m/0b9m1,Harmonic +497,/m/0hdsk,Chirp tone +498,/m/0c1dj,Sound effect +499,/m/07pt_g0,Pulse +500,/t/dd00125,"Inside, small room" +501,/t/dd00126,"Inside, large room or hall" +502,/t/dd00127,"Inside, public space" +503,/t/dd00128,"Outside, urban or manmade" +504,/t/dd00129,"Outside, rural or natural" +505,/m/01b9nn,Reverberation +506,/m/01jnbd,Echo +507,/m/096m7z,Noise +508,/m/06_y0by,Environmental noise +509,/m/07rgkc5,Static +510,/m/06xkwv,Mains hum +511,/m/0g12c5,Distortion +512,/m/08p9q4,Sidetone +513,/m/07szfh9,Cacophony +514,/m/0chx_,White noise +515,/m/0cj0r,Pink noise +516,/m/07p_0gm,Throbbing +517,/m/01jwx6,Vibration +518,/m/07c52,Television +519,/m/06bz3,Radio +520,/m/07hvw1,Field recording diff --git a/models/research/audioset/yamnet/yamnet_test.py b/models/research/audioset/yamnet/yamnet_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f64859949ce4bc7cc83529334a9e29da0d0124 --- /dev/null +++ b/models/research/audioset/yamnet/yamnet_test.py @@ -0,0 +1,70 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Installation test for YAMNet.""" + +import numpy as np +import tensorflow as tf + +import params +import yamnet + +class YAMNetTest(tf.test.TestCase): + + _yamnet_graph = None + _yamnet = None + _yamnet_classes = None + + @classmethod + def setUpClass(cls): + super(YAMNetTest, cls).setUpClass() + cls._yamnet_graph = tf.Graph() + with cls._yamnet_graph.as_default(): + cls._yamnet = yamnet.yamnet_frames_model(params) + cls._yamnet.load_weights('yamnet.h5') + cls._yamnet_classes = yamnet.class_names('yamnet_class_map.csv') + + def clip_test(self, waveform, expected_class_name, top_n=10): + """Run the model on the waveform, check that expected class is in top-n.""" + with YAMNetTest._yamnet_graph.as_default(): + prediction = np.mean(YAMNetTest._yamnet.predict( + np.reshape(waveform, [1, -1]), steps=1)[0], axis=0) + top_n_class_names = YAMNetTest._yamnet_classes[ + np.argsort(prediction)[-top_n:]] + self.assertIn(expected_class_name, top_n_class_names) + + def testZeros(self): + self.clip_test( + waveform=np.zeros((1, int(3 * params.SAMPLE_RATE))), + expected_class_name='Silence') + + def testRandom(self): + np.random.seed(51773) # Ensure repeatability. + self.clip_test( + waveform=np.random.uniform(-1.0, +1.0, + (1, int(3 * params.SAMPLE_RATE))), + expected_class_name='White noise') + + def testSine(self): + self.clip_test( + waveform=np.reshape( + np.sin(2 * np.pi * 440 * np.linspace( + 0, 3, int(3 *params.SAMPLE_RATE))), + [1, -1]), + expected_class_name='Sine wave') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/audioset/yamnet/yamnet_visualization.ipynb b/models/research/audioset/yamnet/yamnet_visualization.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..49e2186f2c7df022903f0c74e3937947663dabea --- /dev/null +++ b/models/research/audioset/yamnet/yamnet_visualization.ipynb @@ -0,0 +1,198 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License.\n", + "# ==============================================================================" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visualization of the YAMNet audio event classification model.\n", + "# See https://github.com/tensorflow/models/tree/master/research/audioset/yamnet/" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Imports.\n", + "import numpy as np\n", + "import soundfile as sf\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import params\n", + "import yamnet as yamnet_model\n", + "import tensorflow as tf" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sample rate = 16000\n" + ] + } + ], + "source": [ + "# Read in the audio.\n", + "# You can get this example waveform via:\n", + "# curl -O https://storage.googleapis.com/audioset/speech_whistling2.wav\n", + "\n", + "wav_file_name = 'speech_whistling2.wav'\n", + "\n", + "wav_data, sr = sf.read(wav_file_name, dtype=np.int16)\n", + "waveform = wav_data / 32768.0\n", + "# The graph is designed for a sampling rate of 16 kHz, but higher rates \n", + "# should work too.\n", + "params.SAMPLE_RATE = sr\n", + "print(\"Sample rate =\", params.SAMPLE_RATE)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /Users/dpwe/google/vggish/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1635: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "If using Keras pass *_constraint arguments to layers.\n" + ] + } + ], + "source": [ + "# Set up the YAMNet model.\n", + "class_names = yamnet_model.class_names('yamnet_class_map.csv')\n", + "params.PATCH_HOP_SECONDS = 0.1 # 10 Hz scores frame rate.\n", + "graph = tf.Graph()\n", + "with graph.as_default():\n", + " yamnet = yamnet_model.yamnet_frames_model(params)\n", + " yamnet.load_weights('yamnet.h5')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.\n" + ] + } + ], + "source": [ + "# Run the model.\n", + "with graph.as_default():\n", + " scores, spectrogram = yamnet.predict(np.reshape(waveform, [1, -1]), steps=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArgAAAHSCAYAAAAHR7iOAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOzdd5wcdd3A8c93Ztv1VNIhgYQSCBAIvUqHKIgFwQKoyOOjqDw8iqGKSLWL5REEUVARBAsYivQeILSQAqT3cmnXt838nj9mbrN3ubK7t+3uvu/X65Ldqb/9zc7sd37zK2KMQSmllFJKqYHCKnUClFJKKaWUyicNcJVSSiml1ICiAa5SSimllBpQNMBVSimllFIDiga4SimllFJqQNEAVymllFJKDSiBUidAlY8RI0aYiRMnljoZSimllFK9evPNNzcbY0Z2NU8DXJUyceJE5s6dW+pkKKWUUkr1SkRWdjdPqygopZRSSqkBRQNcpZRSSik1oGiAq5RSSimlBhQNcJXKQkssycRZs3ngjdWlTopSSimluqEBrlJZ2NAYBeC3zy8tcUqUUkop1R0NcJVSSiml1ICiAW6ZE5HTROQDEVkiIrO6mH+hiNSLyDv+30Vp8y4QkcX+3wXFTblSSimlVGloP7hlTERs4NfAycAa4A0RedgYs7DTovcbYy7ptO4w4HvADMAAb/rrbitC0pVSSimlSkZLcMvbocASY8wyY0wc+CtwVobrngo8aYzZ6ge1TwKnFSidg9ai9Y1cePfrxJJOqZOilFJKKZ8GuOVtHJDeXH+NP62zT4rIPBF5UEQmZLOuiFwsInNFZG59fX2+0j1gGdPx/ayH5vHcB/W8vWp7aRKklFJKqZ1ogNv/PQJMNMbsj1dK+8dsVjbG3GGMmWGMmTFyZJfDOSvgoj/OZcYNT+6YIN5/765pAOB27VVBKaUGBGMMD7yxmpZYstRJUX2gAW55WwtMSHs/3p+WYozZYoyJ+W/vBA7OdF2VuacWbWRzc5xVW1sAWFbf0mH+ss0tXa2mlFKqH3l16RbO//3rXP7QPC74/eulTo7qAw1wy9sbwBQRmSQiIeBc4OH0BURkTNrbM4FF/usngFNEZKiIDAVO8aepPuiuKsLKLa1FTolSSql8O+93c3hx8WYA5q7UNtn9mfaiUMaMMUkRuQQvMLWB3xtjFojI9cBcY8zDwDdF5EwgCWwFLvTX3SoiP8ALkgGuN8ZsLfqHUEoppZQqMg1wy5wx5lHg0U7Trk17fQVwRTfr/h74fUETOMi8sULvEZQaDFZuaeHOF5dz3Zn7YltS6uSoEvnH22s4e/r4UidD5UCrKCiVhc3N8VInQSlVBF//y1vcO2clC9Y1lDopqoT+5/53S50ElSMNcJVSSqluNLQlSp2Eotjnmse55bH3AWiNa+8Bqv/TAFepXvzzbe18QqnBZv7aRgC+cNfgaEnflnD47fNLeXz+eqZe+wTz1gy+vr3//taaUidB5ZEGuEr14tL730m9XrKpuYQpUUoVw+qtg7dXlK/+6S0A5q3JvmqG6xpM59Fw+pE/v7aq1ElQeaQBrlJKKZXmTe0eiljSzXjZR95dx8RZs9n9yke57eklBUxVYXV33B23/wbtg5kGuEoppVSa9Kc2g0FXAdxtTy/OeP1v3Pd26vXPnvowL2kqJ399Q0t2+yMNcJXqwZJNTaVOglKqiAZjad321p17hxksjesyccuj75c6CSoHGuAq1YPP3zk4GpgopTyvLd+y07R9rnm8BCkpnu8+NC+v27vv9f5X4tnTjU1TTHuV6I80wFWqB04/bjChlMpBF6d8W8IpfjqK6KlFm/K6vSv+/l5et1cMC9c1ljoJKs80wFVKKaXUoLZYq6MNODpUr1I9qG+KlToJSqkiWra5pcvpq7e2MmFYZZFTU/7i3fS2MHHWbIZWBtnWmmDOFScyui5S5JRl57IHdMSygUZLcJXqxuvLt5Y6CUqpIrv6n/O7nH7MD58tckpK77VlO9dH7mzPqx/rdt62Vq+h2jurtdu1cvX68q28sWJg/tZpgKtUN/I9qMOTCzeScDLvW1IppUrpM3fMyct2ogm97pWrc25/lU//9tVSJ6MgNMBVqhv5HI/9hQ/r+co9c/n5AOwjUimlenLp/e9wXp6CZVUY0QwbUt47Z2WHfo/LmQa4SnXj509l3tF5b7a2eP1MLqvvun6fUkqVgttLv7+rtnQ/bHE2w/K+mkF1B1Vc6cf+3dXbe13+l08v5pp/zueRd9exsTGamr6+oa0g6esrDXCV6kZLHktw2z02fwMvLq7P+3aVUioXu1/5aI/zo8nuS/ba69hmqi1ent2tZRKor9wy8AonrvzHju7cPnPHnC5vdra1xDn4B0/y2d/N4SdP7ngC+YN/L2T11la+eu+bHHHzM7y0eDMAtzz2Pjc9uqjwic+ABrhKdSNfXeA2tCY6DP3ZfiFQqr97+N11PLlwY1YleeVsS/Pg6jWlux4Q0iWd7o/tj57IboSvfa4tzwEzMhm87ot/eKPwCfG1xJKs3tp9yXm+/PWN1R3eX/fIgp2WeWFxPVta4ryytGMJ/L/nreeYHz7L4ws2APD5u15j4qzZ/Pb5pdzxwrLCJToLGuCWORE5TUQ+EJElIjKri/mXichCEZknIk+LyG5p8xwRecf/e7i4KR/4Mn0ss2IA3vmrwaehLdGhhKehLcE373ubr9wzl7/NXVPClOXPwTc81eP85ABrJNpTDwjtnv+w+ydO972+utt53Yn1UCJcKm+u7L2Xh2JWL9v3e09wzA+f5ebHilsSes+rKzvcrG5pjvGtv77Twxrdu+/1Vby8ZDNzSlg1RQPcMiYiNvBr4HRgKnCeiEzttNjbwAxjzP7Ag8AP0+a1GWMO9P/OLEqiB5F/vr2u1ElQqiga2hIc8P3/cOsT77NkUzN7XPkoB3z/P6n5l+d5qNdy9ci8wXfO3/r4+3kdenevq8uvFDeT+qdAUXrBSb+Juv35ZazZVviS3HTpNy1f+/NbOW/nir+/x+fufI1z75jDs+/nd6S8TGmAW94OBZYYY5YZY+LAX4Gz0hcwxjxrjGk/A+YA44ucxkHr1sezezzXzgCPvLuOibNms701nt9EKVUA7QHO7c8v46SfPo+TyTPdAeh/7h+cgwF0NfTu3+ZmX3pbrjLt3WZbS+Gv1+d26m1i3pqGgu8zXXq93Nfy1Bd8Mat3pNMAt7yNA9KvImv8ad35MpD+zCkiInNFZI6IfLyrFUTkYn+ZufX12vipGIwx3PnScgCWdzNqklLlYmNjlFsey+1mrj8ZKPWIM5Xt5+1cJ/Q7Dw6cUvuWDBu/da6zWghzO1WX6Espal9kUm0jGxNnzebZD4pbkqsB7gAhIp8HZgA/Spu8mzFmBvBZ4Ociskfn9YwxdxhjZhhjZowcObJIqVV5a8GmVAHd/8YqDrvp6VInoyj+/Fr+HsP3B9mWwn/2Tu3H9qdPFqYf80XrG5k4azZH3/pMQbafrcZogk/+3yt53+4X736jqE9/NMAtb2uBCWnvx/vTOhCRk4CrgDONMalmwMaYtf7/y4DngOmFTOxAYYzJa2vq15Z3rGT/5MKNqVa7IpK3/SiVT45r+O5DOz+a7s6f5qwsYGoKr7shegeqbPulXb01f32dPvP+xrxtayA4/RcvArBmW/H6k+0p0Nz/uv90O6+v/l3Eeuwa4Ja3N4ApIjJJRELAuUCH3hBEZDpwO15wuylt+lARCfuvRwBHAQuLlvJ+7IG5q3ttTZ2phONy06MdH++u2NLKe2uLW69KqWzdMDu7y0UxA8RVW1oHXZWCfMulIK2hLbt+b7vz5EINcEutMU/HMlu59sqQCw1wy5gxJglcAjwBLAIeMMYsEJHrRaS9V4QfAdXA3zp1B7YPMFdE3gWeBW4xxmiAm4GXl+SvWxNXf4RVP3X3yyuyXqcYQec/317LsT96lv99YHA2+MqXtTmUFv7Fr8bx1qq+1c/MpXuxwez9DY153+Yvns7fSJ3lSgPcMmeMedQYs6cxZg9jzI3+tGuNMQ/7r08yxozq3B2YMeYVY8w0Y8wB/v93lfJzlKMF6xp45N3Sdvvz8V+/POg6l1d9k3RcJs6azT2vrijYPlpzHMXv1sc/yHNKdvZ/zy0F4O9vry3JuZPJ4Aj9wfMfZt/gp73nmE/8Jv/1M1X3Yon8f+f+8MqKvG+z3GiAqwatmbe9xDfue3un6fmsFiv0vrHFm5rzt0M1oD25cCOTr/I6Srn2XzuPOpQv67bnVhfwt88vzXNKdvbBxqbU60I0hOnND3PsHrDcNLbldhOjXRvm197X9D7YxkB7Eji/SFX0NMBVqpN8Nvsy9H5h2tAQ5ehbn+lyaEbXNWxsjOYxRSpTry7dwsRZs5k4azYNraWpr5auJZbkK/fM7TBt5m0vFmRf5TKISUNbgleWbCaedLn50UXsf90THeav2FLcTvCBVBd//V22jczaHXj9k3lOSWmVui53NIPS2YdL/KQx3z76y5dYuK6Rx+ev7zA6Yr5pgKtUAWVy7fz722tZs62NY374LBsadgSzq7a0ctjNT3PYTU/nXKKmcnfe73Z0i3TA9YVrVZyJv7+1hn2/98RO0xesy3/dPIBfPbukINvN1sX3zOWzd77GH15Zzu0vLKMxmlupo1LdSX8iUGyZjoyWS334cnfGbS/y1T+9xe5XPlqwfWiAqwa9znfwxe66K71lcvroaJ++/RXqm7w6hn+as7LkJQ2DSbnVs7yshwZVxhiMMRzzw2e4+bFFBS0RKbb2kZQ690SiVL58sKF0Ae73Hi5cNaP+pDFamCdkGuCqQe/+tNFp/jZ3Nau6qCqQia6qGGQifRz0f7y9NtWIZlvLjpP+N88t5ZF563Pavsre7V3UJb3/jY4DAcxZtqXDsJal8rMnP+SG2YtYvbWN259fVtASkUz0NcDe1Bjln2+vpSWWWWmtPt1QffFKlr3mvLR4c972/ZcsBhfZ48pHmbdme+8LZqC94KRc7H/dfzIuzc5GIO9bVKofSC81fe6Deuau3May+mbeWpX7BeR/H3iXB756RIdpx//ouay3c+vj7+O4LvFOJ3y5XZQGsp90MWLRdx96j6OnjOSoW57hpH124alFXiv0C46YyF6jawqWlt5G/vnL66vZ3Kk3gZeXbOaoySMKlqaeXPiHN3jhw3oW33g6QTv7MpRD/ZHTpo2ry2j5Xz+7hN2GV/KloyYRyGF/qjS2t8YZUhkqdTKybsC1tL6Zo6cU/9xyXMOZv3qZFbfM7PO2nv+wPg8pyq8pVz2Wl8+WTq8GalBK70Pz8QUbePDNNX0KbgFeX7F1p2kbcmwg9uP/7Bxg/fGVFTQV6FGO2qGnEpqjbvGG0mwPbgHWNRS2BLG3ngI6B7cAn7vztUIlp1cv+D+e/3qnbw1jMh0M5c+vreKmR9/nvtdzH2o3XwMYqMy9sjR//Y33RWvCyWr5Gx9dVKCUFE8hSkvLkQa4alBaX+CgpBBWbW3VOltF8Pm7sgsOb32scPVDk47LO6vz81iy2L79t+wHYljShy7z5q3Jveuh/ng96O++9ue3Sp0EAGZnWfWr3Orn5+LnT+1cgFIOHnsvv9XwNMBVKo/a4k7qR7oQjcK2NGsflOXm/QI1UokmnFSft4PFST99Pud1//bmmpzXXV7fktXyA6khnyqNQjWsysTGxvKs7nbp/fkdxlfr4KpBKdMGLNn6xn1vpR5ff+rg8Xnf/kDr8LvclFMn9g/38RF/rgZjbx0PvbU2q+Vnv7eejx0wtiBpeXFxPf94ay0/OeeAgvXoMhiPcbm5/pGFpU5C2YnluXRcS3DVoFSoDuJfTmuR+2AfSpS68+Lizazckl1pk8rMqi2tOXdiX4jSmMsfmpf3bWZiWx4HtegvdcafWrQxq+Ufn7+hIOlIOi5fuOt1/v72Wj72q5cKln9PL8p+mN5C6K+Bdm8NPzOR6+9Df82zUtAAVw06W1sKV0rXlmWDhVwcl0PPDKp3x/7o2ZzX3f+60g4E0ZW3Vm0rdRJ4MY9dKpWT2XmuK9juhcU7WrfPX9vItAJ9r5ZvLo+b5LV+F28X/fGNflXX/PkPS3eDkI/gupzls/qPBrhq0HktxyEq1cD13AflUaKVT7lWcXh/Q2FGR+vNsvrcG5gNFF/6w9ydpq0qwNOmP766Iu/bzMXdL69g+eYWnlq0iY//+uVSJydjXR2nYlnfMLCHbs9nPVwNcJVSg96Fd79R6iTk3b1zVua03i157BXi0SxKOk/4Se4NzNo9Pr//DoayuJshY4/90bPEkk6X3cHlqlwaq9710nI+8uPnSp2MnJSqoeFzZdiHbT49/G7+2h5ogKtUP3Tmr14qdRJUJ9vyWPUlH48hc91GX7rb6uzlJcWtovDVP2Xe9ZQxhtZ4YRqb5uLkn73Q7by9rn6cGTc8RXOeGscWoyrVQLfXNaXp4eT/nl1Skv0W07/eWZuqvtIXGuCqkmmJJdmSx1KJwWTemgYd2azMXHD360ycNZuJs2Zz76sruPIf7/H68p0H/8hErsNFdzZx1uySNkrZ1ppg4qzZ/LSLkeFKbcpVjzH12idyrpP/k/98wMRZs/Ocqp7t970nUq9/9cxi3lxZ+nrWg1XCMSUpxd04CK773/rrO6lBdfpCA1zVrRWbW3bq1Pr15Vs71GF9c+U2bnp0ERf9cW6HH9LGaIIr//Fet62AF61vZN/vPcHBNzxFWzyz0oTZ89Zz82N9H0VmoNRhOuHHzw2aEWn6g/SSz2v+tYC/vLaKc25/FfBGypo4a3aPAe+HG5t4YO5qAO55dUXe0vXRX77ExFmzSWbwXVmyqTB9+t729GJ++/zSgmy7s4mzZjPtuid6XS7pByd/zrEqxy+fWZLa37o+ljZlG2RPnDWbH//nw15Huetvin3DEO1jSfbuVz6ap5RkznFNn3rx6E+j9vX15lwD3DInIqeJyAciskREZnUxPywi9/vzXxORiWnzrvCnfyAip2az318/u4Tjf/wce179GK8s2cyTCzcycdZszrn9VT5zxxyuf2Qh/563jk/+3yvc8cIynlq0kScWeF3tOK7huw/O4y+vreJ3Lywj6bjc+vj7nHfHHE766fO8uLie03/xYmpf+1z7OG+v2sYdLywllnT8E3g9Nz26iA0NURatb2RbS5yv/+Utbn9+GXtf8xhf+sMb/OudtTmNKnP9vwdG/4NNsST3vprbj7PaoRDduaX79bNLOOD7Xmv4c25/lQfeWL3TMsYYTvnZC1z+4DyMMdz98oq87X/BOq/RWG8jtN329GJO+mn3j8n7Kr1u7+9eWMZGfxjrx+dvyHtg0xRN8q931qZK1Hv6ofxJHkqXj7zlmdS+cilVvT2L4D/XG4VowslLMF5o7fmYyQ1ZX/Vl5Lx2xhieWLCBWDLzYLmvN5Jf/dObOa/7jfve7tO+i2nSFY8ycdbsnBuGi/apVr5ExAY+BE4G1gBvAOcZYxamLfM1YH9jzFdF5FzgbGPMZ0RkKnAfcCgwFngK2NMY0+1ZOGPGDDN37ty8/9iMqg0XdOSUV2adQChgUR0OEAnaO81vr7dWHQ6wvqGNI27u+6OPcjKmLsLZ08cxc/8x7D6imoqQjTGmYJ3EF1MxPkexS40Anvv28UwcUZV6/1/3zk3dIBbD0pvOwLY65msp8mHpTWewRwlKwRbfeDpTCjxK3HmH7sq0cXV8fPpYKkMdx1R6b00DH/Pr0f/14sM59445Oe9n79E1PPrNY7Csns+TUhzfvvrZZw7gxH1GEQ5YvLp0C4dOGrZTXvZFIfLkmCkjuPfLh7F2exujayMdzrNtLXGm/yC3vra78sZVJ/Hu6u28u2Y7mxpjrNneyjdOmMJ3H5pHXUWQhy85eqd1+uP3AOCpy45l5m0vEUu6PP+d49l1WCUigoi8aYyZ0dU6GuCWMRE5ArjOGHOq//4KAGPMzWnLPOEv86qIBIANwEhgVvqy6ct1t7/d99nfXPf7h7n2XwsK9ZFUidVEAjRFd26ostvwSlamdUf0+cN35YMNTQRti+2tCSJBi8WbmlPrjqgOYQxs6fRoddyQig6NA0ZUhzlwwhDeWb2drS0xdh9ZTUXQZuWWFuoqg6zeumPZ/cbVMn9t111URYIWgrDv2Fp2HVbJ0s0tfLChkbOnj2PPUTW0xh2qwwHqm2I4xlARtFnf0Ma7qxs4YMIQltY3M3F4JXNXbmNZfQv7j69j3fZoXlumK1Wuzp4+jn+8nd1obf3F6NoIk3ep5rBJw5i3tiF1UzyiOsSUXWpYt72N3UZUEUs4VIRsEkmXWNJl/rpGHslji31VGitv/agGuP2RiHwKOM0Yc5H//gvAYcaYS9KWme8vs8Z/vxQ4DLgOmGOM+ZM//S7gMWPMg532cTFwMUBo9OSDx1zw84J/LqWUUkqpvuopwM1fWb/ql4wxdwB3ABww/WDzzNUncfANT5U4VbkZWRPmkIlDSTqGYVUhXl++lS0t8X5VqT4fRtdGCActDp80nFG1YSIhm7dWbmPPUTUctvtw7n11BdGEy0tLNnPxsbszc9oYkq4hmnBYtL6R8UMrqKsIARAKCI1tSba3xVmwtpHV21o5bs9dGFMXoTGaoDXuUBmyWbOtjYa2BLsOq6QxmqA5msS2hCmjathzVDUv+SNaja6LIAit8STjhlRQ3xxjzbY2wgGL6bsOYdH6Jhasa2Dd9ijhgEXccfn0wRMYXh3Cdb2SmZpIgIAtbG2OM7w6jMEQtC2GVAQJB202NUZxDbjGkHQMCcdFBCJBm3Xb21i9tZW9RtcSTTic//vXS3iklCq833zuIA6cMIQj89AqvVwct+dIDpwwhGjS4bgpIxERRtdFSPrnelM0SWUoQDhgEU062CKEAzahgEU4YBEOWqze2sapPy9cnXNVehrglre1wIS09+P9aV0ts8avolAHbMlw3Q6CtjC8Osyz3z6+T51vd35M/cJ3PtJhGNSLjp7EnS8t73Ld/z5+D1ZsbuExv5XohUdO5A+vrOhxf8tvPiOrepr9tQ5Sd+6+8BCO2GN4l/WPu3LcniO7nXfU5BHdzjt7etZJS9l7dG1Gyx2827Dcd+KrHlnd7bw9R9V0eL/ilpkl+T6suGVmh/cbG6McdtPTBd/v9z42le8/spD3f3DaTt+XYuTDMVNGpIbvHVIZ5J1rTynKfqeOqWXher+x3eG7csPHpxV8vx/ecDqhQNftuF3XpFrg9/U7eOsnpzFt3BCmjs3sHOtPltx4OgG7MG3h9xpdw5F7DOeVpfkb2fI//3Mse4ys3ql+e7r31jTwyLx13PHCsj7vL/060hpPsnJLK1N2qSbpGkK21WW97P76+7filpk0RhNsaowyeZcd13G5tft1tIpCGfMD1g+BE/GC0zeAzxpjFqQt83VgWlojs08YY84RkX2Bv7CjkdnTwJRMGpmB18djezc40PVF+M7zZ3DRPd7yL3znI+w6vDI17+lFG/nyH+futO4/vnYkB04YwhMLNnZoCXrvlw9l3JAKdveDE2MMS+ubmbxLDS8v2cyaba1896H3Oux/j5FVPHzJ0VSFs7tP+8RvXuatVf1n3POe/PScA/jEQeNLnYx+beG6Rs647cXeF8zR45cew2dun5N6ktDdj3b7ObLilpkcdcszeenoPN0vzj2Qsw4c1+38QucDeJ8t6bisb4gyYZh3vWiOJTv075ov737vlFTvFZ1vKKAwP/RHTR7Ony86POv1Hp+/PuNBKl678sQON0PZ3OC/vWob03cdWvZBzm8/fzCn7Te64PvZ3BxjRh+fWHb13epN+g1OMfcL8MunF+el55Bi6umz9tTITEtwy5gxJikilwBPADbwe2PMAhG5HphrjHkYuAu4V0SWAFuBc/11F4jIA8BCIAl8vafgtrP/PWUvLjt5TxKOweDdBP3wk/tz+UPzuHrmPkwdW8uRe+wo7UsPbgFO3GdUh/fPfft4IkGb0XURAE7ddxSTd6lmyaZmZk4bwzFTOpYqikjqLq29VPGEvUcxvCrUa2vh3kwcXjUgAtyLjp6kwW0eFLrka+/Rtbx9zck8+8Gmnc6LdItvPD01+ti+Y2vzFuCef8Ru3PPqSs6YNqbH5aaOrS1oifbb15wMQMC2UsEteL2b5HO/VSGbaz46lbqKYEZBwI8+tT/feXBezvs7fq+R/OycAxlaFcp5G4fvPjzjZUfVRth7dA3vb/C6msrm6dX0XYdmnbZiuvDIiVx35r5F29+I6nAf18/tmPf1N+zuCw/Jed0vHzOp3wS4uQbx7TTALXPGmEeBRztNuzbtdRT4dDfr3gjcmOu+RYRQYMeJeM4hEzj7oHEEM3xkFLItzjxwLECHLpHat/3zzxzIR3/5EkdP6f6xeLqRNX27GBXLmLpIUQaTuPqjUwu+D5W7v1x0GIdO8qpcWJb0GNwCBG2L9loDZx44lv8s7Hu3YecduivXn7Uf15+1X5+3lasvHL4bP/h48fa/4PrTMlrutvOm87MnP+Ts6eNyCnAfv/QYfvzEB/zu/Bl97spuSGV2gdLjlx7LhoYodRXBPu233BQzuM2HuVefXPR9njFtNB/Ze5ec148EMqvKNhBogKuy0jm4fXnWCbR0Mz76hzee3uO29htXx5wrTmRUbXED16lja/l7AbvMeeQbR/f5sZfqf5bffAZX/3M+Xzp6Env0UA84Ex/dfyyX/KXvHbLf/Ilpfd5GX2Ua3I6ti7CuiKMMnnnAWM48YGzO6+89upY7L8i9JK2zV684odc+ul+edULqdfvTMFUavzj3wFInISd9LT0uhr6W3LbTkcxUn4wbUrFTw51sjK6LFH1Agi8dNalg277kI5MZmmVpjCq9SZ2eMORCRLjx7Gl9Dm7zJddS0wPG1+UtDdncvH7jxCl93t871xa/RC1fxtRVdDn9xL134bUrT+Tf3ziacUO6XkYVX0/12Qvp6MndNxJWHWmAqwadQt7BfvvUvbAt4U9fPoxnv318wfbzzP8eV7BtD0aPX3pMqZOQd5+YntsP8HdO3TtvacjmHMjHk5xsH/WXm+ouGsz+4rzpjKqNsN+4/N14fDMPNxP5cMyUEbx33SmlTkbWPn1w6c8KhToAACAASURBVNo+7DmqPG6gC+Wpy/L326YBrlIFcPSUEQzrQ6OTnnxi+rhUbxMqP8J9rJf2xKXH5ikl+ZNt7yLt8lmvM5thVT+yV+71CgeKv3/tyJ2mdRX09tWuwyp7X6gIzp4+jpqI1xgwX4+li+GHn9q/ZPvef/yQku27GCbvkr/fNg1wlSqQIte8UH3Ul5KkvUbnXk2nO9eUqBFhT314FlKxqyqlO+vA7Orinp1j6XhvpnT6cX/qssLcOB20a3kESf21F5hSfle761tZ7UxzSqkCKdQlMJsuhVTmaiLBsqqq8KWjJpZkv4V68lDOeuvhorMzswyIMyUifPuUPQGvGlJ6h/b5NLyqf/RIM5D99JwDSp2EAU8DXKXyZPzQjg1ACnGXP7QyyKdn9M9Sj/5gchlV/RARLj9tr5zWHd6HILWUrfP3LkBJeCY+tn/PfQR31rmkNZ8uOWEKK26ZWdBqSHWVA6t7sf6oc9/vCs7J82+bBrhK5UnnlviFKMG96expJX08NtDlMizotwrYYOdrx0/Oab37Ls5+NK1y0NMw0r35yjG5946S7Tk1fmh51GFVfbd/lr2G7DGy7z2uQGn7dZ+WxwaL+XTVzPxWy9IAV6kCKUQcqrFt4c3YrfvRnt66ZuduqC4+dvdCJof7cwhW+9J1X76k99maqf89JfsS60MneoNpHDk5swFjlEr3ySzrAZdrcJiNLxyxW6mT0KV8D1yiAz0olSfGdHwvBSjD1ZKjwnvwv4/sctjYf3/jaIZVhVKtvTc2Rlm0vjHn3goydVgvda4fvuQobpi9iNeXbwXgr2VQevvQfx+ZU5+tuTSguf+/Dqct4WTVY4Mqva56jCiF06eN5nsPL8h4+Qkl6oEiYAlvXHVSXrZ16tTRXE7uw1MXwvF75b/KhpbgqkHpl+dNL/g+8l3a+revHpHXvjBVdjrn/ajaCMcXqWurq2fu0+28/cbW8bvzZwBw31cOL3kjxGU3ncHBPZSC96a9RDZTIqLBbT900K65f0fyyvS+SLqLjinsE5uuhAIWS246g6F5agBaW1F+58sfvnho3repAa4alD7WhyE6u1MRys8Y358+eDz3fGnnkz0fo22pzLx7bXl1Pv/lo7uvX2pZQl2F15foEXuUvoeNvg6kcsikMgl81KBQE8nusXg+H6Pf6d+Y9iaQ5677yq0dx4c3nF6Q7WqAq1SavnTgfcsnpvV5/7vUhPnRpw/g2LTGNrWR8rvbHujKrZW5iLD85jN2mh7KoVFcufvWiXvyk08fwILvn1rqpKgufP0je5Q6CXmVr4KJXBw5ObMb0sMmZfdUoz+45RPT2Ht0DctvPqNgffsOvKujUjk668CxfRqCcXh17q1iT99vNAAn7L3jkfcfvngIP/zk/vzPyV6/mDUa6BZVe3+kI2vCvHl1fuq+9YWIVwfvof8+IjVtwfUDLwgMBSw+efD4gtdtVrnJ51DOg12mVWvOmTGhwCkprqAtnHvorjx+6bEFLU3WK4hSvukThuT1ZMtmRKjqcIBXZp3QoeuY9PqdXzwq9y6QVG4uOWEKl5xQuC7AcjGyJszImnDBhzX9r+N25/bnlxV0H5n42vF78JvnlpY6GR383+cOKnUS8qIyZNMad3Ja95Spo/jPwo15TpHqTl+r/ZSbFy7/SFH2oyW4SvmOy3ODoaBt8c0s+kgdO6SC4AB85Kz6n0+VyRCq3z5lL9665mSuP2vfLuf/+xtHFzlFsM+Y2qLvsxD60ufwbwZIkF8OXsmgO70jy6BufT6Nqcu+h5Vc6K+pUsDuI6sK0ogr0xGPLizRsKxKdSXXJxn/fXx+62daljCsKsTnDuu6385S9CpSqm6i8u28Q3fNed1cBkRJd9I+xel9pD8Ym0F3etk2hMvE2dPH5X2b5UYD3DIlIsNE5EkRWez/v1PTYhE5UEReFZEFIjJPRD6TNu8PIrJcRN7x/w4s7idQmfrmiVPYd6x2/6XKR66jNRVqVDfbEu6+8JAO0849JL/1Emednlnd0myqHpWzyTkMN9xdSXq2tLvD0juzAD0JZeLuLx7S+0J5ogFu+ZoFPG2MmQI87b/vrBU43xizL3Aa8HMRGZI2/zvGmAP9v3cKn+T+K/0n65sn5DY8ajb2Hl36kaaU6k6uJbiRYOFapH8krQFmTTjABUdOzOv2B1pDnt5kUnLY2flHTMzLvncfmX1wXQ5mThtT6iTkzW7DS/Mkopg9QmiAW77OAv7ov/4j8PHOCxhjPjTGLPZfrwM2AfkfDmSQueyUvTLqomjfsbnXxbv2o1O59CS/tKvzEGhKqW6FbIv3vn9q3uvCDoxy2cK5/LSOwyjPueLEnLfVl4FASml4dX4GWugs308jMtFTlbxClu4Wc1AWDXDL1yhjzHr/9QZgVE8Li8ihQAhIb3J8o1914Wci0mUfViJysYjMFZG59fX1eUl4f/GXrxzGD7p55JZJF0WHTepDxX/xOu//1MHjuejY4o+Mo1Rvrjwju+6gRtXm3k1ephZdfxrzriuvQTgGi+kTOgalo+siOW8rl2GcC+kjGQ4T+5kCBaI3nd33PtSz1dNTmtuKMNJnMWiAW0Ii8pSIzO/i76z05Ywxhh4GFBSRMcC9wBeNMa4/+Qpgb+AQYBjw3a7WNcbcYYyZYYyZMXLk4Cr8PXKPEalhTXN5JFsR6tvpUxMJ8uNPH0BtARoQKNVXFx+7BytumclVZ3Q/THC6H5y1X4FT5HXKX6hqEPkcoaq/+Oj+mT1yP3z3YRy++8AbbKBdpj1KFKqthGUJK26Z2WX3f7/6bGmCzXw/IXnqsuMK3r1hZxrglpAx5iRjzH5d/P0L2OgHru0B7KautiEitcBs4CpjzJy0ba83nhhwN5D/gZ6VUgPeVzJ8wnDy1B4fMpW9TPoarSrhqFeFcEWGNy9/vfiIvPURPr8MR6gbVdt7afSFea7z3Z3Ojf/aC2GK5Ut+n+uPfjN/XfAdOGFITo0a+0oD3PL1MHCB//oC4F+dFxCREPAP4B5jzIOd5rUHx4JXf3d+QVPbT2ntV6V69/Y1J/e6TLmNb5+L3kqYMg0I+4tMqgp0rnvbF/913O5Ul+EIdafuO7rXZb58dHEG2+ncfdfQysLU++3Od0/3jnc+z+d/fv2ovG0rGxrglq9bgJNFZDFwkv8eEZkhInf6y5wDHAtc2EV3YH8WkfeA94ARwA3FTX7/sOuwSiYMq+Daj07Net1DJg7cR3ZKpRtateNH9qBdh+w0f0QfhqnuTz53WO59x5arP190WI/zJ+exx4PvnJK/YDmfMim9Tx9lspC+lDZq5RWn713UbuleveIEwoEdTyke+9Yxfdre5aftxfs/OK2vycqZBrhlyhizxRhzojFmil+VYas/fa4x5iL/9Z+MMcG0rsBS3YEZY04wxkzzqzx83hjTXMrPU64iQZsXLz+BY7uog3XMlBE9rtuXOkqibbZVP9NeR/CSTt3oRYIWT112bIlSVVwDoZS6s6Mm93ydO76HER6/ckx2pZp9HSCilArZBV66ipDNm1efxMxpY/iv4/I7cEpnd10wo8P7ziOM7TOmtsenGvuP775O8tnTx/HVY/coWr51pfyeFShVJk7aZxQvLt7c7fxM7qyrI3qKqYHloF2HErItLj9tLybvUt1jAKT6v1Cg+6D0nBkT+N2Ly3vdxndP25u9x2jf35kaXh3m10UYDnm34X0bvfPhS47mZ09+yC+eXgzAGdNG85vPHZyPpOWF/voqlaNMHssev+dIhlQG2d6aKEKKlCq8IZUhPrzx9FInQ5WBKaMyC1pPnjqqJI2MVM9GZNiv74pbZhJPurQlHA74/n9S08Abnnv+2gaumrlP2Q3g0X+fFyjVD4jIoBshSan+7L6vHF7qJAwoj33rGA1uy9SQtAZsnasrdBYKWNRVBHnyf47tMMhHJGhz14WHlF1wC1qCq1TBDbxae0oNPmP7MLBBf3Xdx3pvfLv/+DrmrWnYafq73zuFpOMyfJA0QOzvTtwns27+Mi21LwdagqtUN4ZU5qnj9y4i3Bqtm6tUWequHdk9Xx58XYlfeFTvjcge+K8jupxeVxHsV8HtlEFayvzM/x7HU5cdV+pkFIQGuEp1Y+a0MZySh87rO/eYcN6hu7LfuMKMiKOU6pvKLgZzOPOAsUzepf+UXBVTKVvJ59Md5/f8iH6g2n1k9YCtQqIBrlLdCNhWXi56nUuETtm3f4/4pNRAZnVRhHvzJ6aVICWqmCaN6L5HgSJ2RavySANcpQpMr41K9R9dlWZVleHoW/n0vS7q2s7cf0zO2/vOqeU5oEOuPrxBew3pjzTAVarABmDf8EoNWJGgXdTRo8rBFw7fbadp++dYjerDG07n6x+Z3PuC/Uh/HqBiMNOjplSBdfXIUylVvgZqo5vudBXAXXDkxIzX/+JR3rJXnrF3jwNDKFVMA/u5i1J5dtI+u/DUok1ZraPhrVL9S3p9zHsHWe8Jd3zhYE7Zd3RW61z70alceOTEPo+MVWqfPGg8D721ptTJUHmit1pK9WLckB3jc08dm/1ju5Ondvyx0IBXKVVuHvzqEVSHAxyxx/Cs1xWRfh/cAlx/1r6lToLKIw1wlerFy7NO6NP608bXce4hOpqZUv3RxAEQuGVixsRhzP/+qdRE8tT/dz800BsTDjZ6NJXKgpa+KjU4/ONrRzJuSAW71A6+EcyUGgg0wFVKKaU6mb7r0FInQZWBWafvXeokqBxpFQWllFJKqS589bg9Sp0ElSMNcJUqgk8cNL7USVBKKdWL939wWqmToPJEA9wyJSLDRORJEVns/9/l8zIRcUTkHf/v4bTpk0TkNRFZIiL3i0ioeKlXnR06aRjHTBkBeC2OlVJKlZ9I0E4FuecfsfMAGKr/0AC3fM0CnjbGTAGe9t93pc0Yc6D/d2ba9FuBnxljJgPbgC8XNrmqN+2tsesqBm8rZaWUKneRoM2KW2Zy/Vn7lTopqg80wC1fZwF/9F//Efh4piuKV0R4AvBgLuurwrhq5j78/sIZHDhhSKmTopRSSg1oGuCWr1HGmPX+6w3AqG6Wi4jIXBGZIyLtQexwYLsxJum/XwOM62plEbnYX39ufX193hI/UFX3oZ/ESNDmhL27O4xKKaWUyhcNcEtIRJ4Skfld/J2VvpwxxgCmm83sZoyZAXwW+LmIZNXk0xhzhzFmhjFmxsiRI3P7IIPIyVO9AHWXmjAAe46qBjqOdqaUUkqp0tJ+cEvIGHNSd/NEZKOIjDHGrBeRMcCmbrax1v9/mYg8B0wHHgKGiEjAL8UdD6zN+wcYhNrbh0WCNgAPX3I0H//1y9zzpcE1Xr1SSilVzrQEt3w9DFzgv74A+FfnBURkqIiE/dcjgKOAhX6J77PAp3paX/VdJGjz+KXH6mhHSimlVBnRALd83QKcLCKLgZP894jIDBG5019mH2CuiLyLF9DeYoxZ6M/7LnCZiCzBq5N7V1FTr5RSSilVIlpFoUwZY7YAJ3YxfS5wkf/6FWBaN+svA/S5eZ789eLD2dgYLXUylFJKKZUBDXCVysDhuw8HYOWWlhKnRCmllFK90QBXqSxMGFrJ5w7blfOPmFjqpCillFKqGxrgKpUFyxJuPLvLWiFKKaWUKhPayEwppZRSSg0oGuAqpZRSSqkBRQNcpZRSSik1oGiAq5RSSimlBhTxBr1SCkSkCfig1OkoUyOAzaVORJnSvOme5k33NG+6p3nTPc2b7g3GvNnNGDOyqxnai4JK94ExZkapE1GORGSu5k3XNG+6p3nTPc2b7mnedE/zpnuaNx1pFQWllFJKKTWgaICrlFJKKaUGFA1wVbo7Sp2AMqZ50z3Nm+5p3nRP86Z7mjfd07zpnuZNGm1kppRSSimlBhQtwVVKKaWUUgOKBrgKABE5TUQ+EJElIjKr1OkpFBH5vYhsEpH5adOGiciTIrLY/3+oP11E5DY/T+aJyEFp61zgL79YRC5Im36wiLznr3ObiEhxP2FuRGSCiDwrIgtFZIGIfMufrnkjEhGR10XkXT9vvu9PnyQir/mf534RCfnTw/77Jf78iWnbusKf/oGInJo2vV+ffyJii8jbIvJv/73mDSAiK/zv/DsiMtefNujPKQARGSIiD4rI+yKySESO0LwBEdnL/760/zWKyKWaNzkwxujfIP8DbGApsDsQAt4FppY6XQX6rMcCBwHz06b9EJjlv54F3Oq/PgN4DBDgcOA1f/owYJn//1D/9VB/3uv+suKve3qpP3OG+TIGOMh/XQN8CEzVvDH46a32XweB1/zP8QBwrj/9t8B/+6+/BvzWf30ucL//eqp/boWBSf45Zw+E8w+4DPgL8G//veaN97lWACM6TRv055Sf9j8CF/mvQ8AQzZud8sgGNgC7ad5k/6cluArgUGCJMWaZMSYO/BU4q8RpKghjzAvA1k6Tz8K72OL///G06fcYzxxgiIiMAU4FnjTGbDXGbAOeBE7z59UaY+YY7ypyT9q2ypoxZr0x5i3/dROwCBiH5g3+Z2z23wb9PwOcADzoT++cN+159iBwol9CchbwV2NMzBizHFiCd+716/NPRMYDM4E7/feC5k1PBv05JSJ1eIUNdwEYY+LGmO1o3nR2IrDUGLMSzZusaYCrwAtkVqe9X+NPGyxGGWPW+683AKP8193lS0/T13QxvV/xHxtPxyup1Lwh9Qj+HWAT3g/FUmC7MSbpL5L+eVJ54M9vAIaTfZ71Fz8HLgdc//1wNG/aGeA/IvKmiFzsT9Nzyiulrwfu9qu23CkiVWjedHYucJ//WvMmSxrgKpXGv6MdtF2LiEg18BBwqTGmMX3eYM4bY4xjjDkQGI9Xqrh3iZNUFkTko8AmY8ybpU5LmTraGHMQcDrwdRE5Nn3mID6nAnhVxf7PGDMdaMF77J4yiPMGAL/e+pnA3zrPG+x5kykNcBXAWmBC2vvx/rTBYqP/2Ab//03+9O7ypafp47uY3i+ISBAvuP2zMebv/mTNmzT+Y9RngSPwHgW2D3ee/nlSeeDPrwO2kH2e9QdHAWeKyAq86gMnAL9A8wYAY8xa//9NwD/wbo70nPJKDdcYY17z3z+IF/Bq3uxwOvCWMWaj/17zJksa4CqAN4Ap4rV8DuE9Fnm4xGkqpoeB9hamFwD/Spt+vt9K9XCgwX9E9ARwiogM9VuyngI84c9rFJHD/XqF56dtq6z56b0LWGSM+WnaLM0bkZEiMsR/XQGcjFdH+VngU/5infOmPc8+BTzjl7g8DJwrXk8Ck4ApeI09+u35Z4y5whgz3hgzES/dzxhjPofmDSJSJSI17a/xzoX56DmFMWYDsFpE9vInnQgsRPMm3XnsqJ4AmjfZ66rlmf4Nvj+8lpgf4tUtvKrU6Sng57wPWA8k8EoRvoxXB/BpYDHwFDDMX1aAX/t58h4wI207X8JrCLME+GLa9Bl4P2JLgV/hD6ZS7n/A0XiPvOYB7/h/Z2jeGID9gbf9vJkPXOtP3x0vCFuC9xgx7E+P+O+X+PN3T9vWVf7n/4C0lssD4fwDjmdHLwqDPm/8PHjX/1vQnnY9p1JpPxCY659X/8Rr6a9546W9Cu/JRl3aNM2bLP90JDOllFJKKTWgaBUFpZRSSik1oGiAq5RSSimlBhQNcJVSSiml1ICiAa5SSimllBpQNMBVSimllFIDiga4SimllFJqQNEAVymllFJKDSga4CqllFJKqQFFA1yllFJKKTWgBEqdAFU+QlbEhGtHeAO2+qyE671wXbAscF1MwEaSjjddBIzp9D/e4IE+Y3lvxDUYSxADuGbHMsZ4227nuhjb8pYDjHjrem/8fywLI+3bdUEEI5L22k9b2jpuyEql2Q2AG/K23b5MqCpBdSBGWJIExCEsSVw/kRExRI0QEUPMQMwEqZQ4jj+/yY3Q5oRIGBtjIJbodGq54t1OumAFXNyEBY7syCYDJuD9Lw6I6/9v6HA8MF5eiJuWx277wfLXS7r+5xJ/BX9BS0hGBDfkb9sBK2FS+SyOQRzj56v/2pYOx8FfEnHcDsc4tQ92LOul3ez4XvjHFpHUd6nzegDGtsAY7/tiW96+UnmQ9l1r3wad8siSjvtMW9X733jLtH+nDbhhm0SVt7wJ4iU+7buBZbxjmNqH/yVvz3u3m33hHRMrSWpZcb1jaCXb89vbnRuwcCq87TghvO+LGLBM6qOIfzBcV7CsHTsRAceRHV9546VJkl4araR/vJPeOpJ0/Tzw3xtIVNk4Fd76gXASyzIELQdjwMVCMCQcG9f/ItiWi2sEWwyuEVwjmB1fEi8NjoAjWEmwEv53N3UcwIomcMNBnIjgVLuEg0ksST+YYIvBAEnXwvL31b6Ma4Ska+G6lre/pIWVoMM5Yce873b7dSl984kqwY0Y7ICLJW4q/bblbSDo/59wvWtHwrFT+Q/e/sQFK95+nL1jKgkHY9v+8d7xPW+/LrlBwYl409yQSR1ry/L+nLTvU/vX2XW97yr+sbUS/rFPguWA+Psm/XqJf/31zwc3ZJGoAoKufxq6/uc1JB0L23IxCELHY+C4HeeJQCJpp9Y3RgjYLq7rrRm0XeJJO/UdtcXFcS0c10LE4LrS4fQ0RpCY95m865+XXiuedu6L96USx/996HR98jeU+qwdfmtI2wZ+HhoDtoUT8Y5TMgImaMA2HX/OnPbrp38N8L/Xdsw/5gmDtI8I61+fd+zT/9//fTO2pI6hGEOixsYJgwS9fEw/vwFs2+A4aRnlChL3zic7bnZcU/z9GxHE7PidNfjpSb8eGsACN2h5nzliEDGY9n0GXO+9/52zLO94tc8Xy5uXzrZd7/tJ+6XZYInBaZ+WtNKuv35eOuIdD8sgjnedEv/zWEn/czmdrpMJP+6wrdTvRFPr+s3GmJF0QQNclVJh13DAMd/ESuz49obXNYMtWE1tuNURpC2OM7wae0uzt0AwAIkkhIIQi3vvnfYgxr+AV4UBsNoSuJEAknSRtjjYfsCZdDBVEdrPGmmN4dZWeBdrvIuC1Zbw5jsuknRwaysxQe/CZLXGMUEbE7SxGtswwYD3Puz/wDgGXJfWCVVeemyhbYRF027+jwsgjrDrwWs5YsRyJoXrGRloZEpwM00mCMBewSRLEjaTgw7LExZLEyM5KLyOJuOdQs+07M27TROoj1YTcwKs2Di8Q94mW4JI2MHELSqHttFaX0WgwU6dyBhIDHUhKYQahECLEN5usBI7TnZjgR0zBNtcAq0ujh+w21HvpHfDFoEWh+C2NoxlQcCCpItb4aXRDdlsmRqhZYIhtF0INRqq1zmpoCe0PYHd4uUlSRerJYpbFUEcxz8ONtji/fA0tHrHuv2Gov0GxSL1XoxB4knv++HPl2jMOz6VYe87EArS4WopglMTRpIuVlsCpyqE3RL3tgHesv53zlSEvG3AjhsuYzCVESQax4SDfposSDo7vl+JJCYURFqjmErve9c2aSgbZ4QAiI5xMCEXCTtgBBOzsGsSOM3BVDLtqiSuI5i4d6NitdqpeSZgsKIWiEFcwW4TIlsg0OLtP9hmCLa6ROrj2E1RjG0jrkt8RCVb9/HOleZdDU6Viwm4WJVJQhHv89u29wsQjQaJRLwIx7ZcLDE0NVdgB7x8cBwLpzVAsD5IoEWIbDaEGwyRrd52wluiOBVB7Na498NiDPUz6th6gLf94ZO2URuJMrKiGdcIrckQAXFZ21RHPOl91ppIjLZEgJpwnJZ4iHjSpi26I49cx8ZpChLYbhPZLFSvdQm2uanriyQNle9vJDp5F7ZPDrH9mCiTx9QTDiRT2wiIQ2UgQdJYbI9VUBmI05IIUxHwPnvUCVDfUkVTSwQnYcPmMJF6i0CLf95VwpAlDuEGB0ka3KBgpwVNGw6P0LpPlLohrf7n8dI/vNLbwIiI9399WzUihrUNdQC0NnvHyWwLEWi2qF4FwVao2JIk2JgkuKEBd4h3vbFa45iA/0MfsHDDAaIjw2yZ6p2XrbslkYokYhkqquJUR2I0tkZSeRAOJr28bQl7gUJSkDabynXeNiNbDZGtLqGGJKGGOG7Awm5LpNZ3KoJYCQc3aNMyPsLGwwTGRgkGHSrC3vlTVxFlc3MVdRVRHP+mpf1mAqAxGqY2EsMxQtBysS2X9dtqqaqIefns2IyobqEpFsZxhTE1TazcNpSq9u2Ho2xtq6ShOUIkkqC1JYIdcFKnfjIeILgqTNUaCDUbQs0uri1UrotiJb3vpBuwsOJJrIZW3JpK7/pUW+FdYwAsC4klUr9HbjiIuG7qGuF9ofzzNOkg8QTukGqa9qgBYOveNm3jk9i1CQLBJIl4ANt2STR51wW7KokTtSFuEWiwqVsMgZihckMC2y8IstqSWLHkjn22B56V3vfFqQ6Da7CSLhJPsv7YoTTu6RDcpc1LXsBBBNpavH3W1rbR1FyRuitz2gJULAsR3gZ1KxLYUdf/XfD274Qt7JhLstLGjrkYSwg0x5GEixvxvm9W0sUN2bSODrN1qk10SpRwRYKYv8+6oS1EQgniSZvWaJhIKEFrNETcnx+sjOM6dioYN0BtdRstbWH/WNpUVMWpCCVobIlgXCG5NZIKxq2Y4FS5BBpsTMC7zgUbbOxWIeBlA5HN3o1wZLvjXSebY5iAhb1xu7fPmkqcmgjiuDw59/sr6YYGuGqHYJCtewWJbDFUbPV+KJun1FGxybuIIYKpDHsX7Arvy47llbA5VSGsoJ26w3IrQ6kAVeJJJJbERIJYbQnvYh8MIM2t3jZsG+OXxEnSReIJcHdcuKy2GCZgewGNazDBAG4kgNXqX0QSSUyll57k8KpUiVx7yXGwMYYkvR8ASXgn99Z9wpiJrakSiGQ0SFUwTlAcqqwYw+1mQuJi+xeoVtdhrO0ANuMDSdY6ccYHKgiKd8F8J9pMlR1ncbQCxwhDaltpag2TiPsXlYqk/+PklTYRcknWQnCbH6Q7IHHBx+sy2AAAIABJREFUjgnBZiHQ7JXMhJpcLD92c0KCuBCrsUlUWBgbAlGD8S/aRgRJGuyKIMa2sOIOBCysmJ+PcQesiBfUt5dwOKR+7MXxLrrSGsNUhv38djGhHQGyJF3sZu/7YII2Ejd+aat/9YomIBjAVIaQ1oR3oxMM7LjQR8I7Sl9DwR3H0fF/xGoiuAELsS0v2K0MYrfEdwSw8YT3edt/qIzxbo5aoqnti+NAPIG4Lm5tpVdaGdgRgBJPINGYd4NlCU5NBS1jghh/ESsmmCFewGEcwRjbK6EIpYpLsCwXEcEB6upaicaDRLd7QYldlcCN29AcwG4R3KAh0AbBVi8PAlFDsNHBijt+SaqFUxUiOixAbIi3i2RdkmBtHNcIlZUxjBGCtoPtl4hVhuMYI9iWS0NLBRXhOMFQklDQO9auEVqSFslqF0laJCuFQBTidQH/WHs/Rm4w4j3ZMH7J31DvnKoOx3Bci42tNdSEYoSsJNXBGNRAyPb2sS1WScByqQ7FqA7F2NhcQ3MyQmWV9/1oaQxgN1tU1AvBJoPlQGh7kuBm77yPj6oiMXaYVzLjeMF7SyJETcg7llEnyLZ4JVujwi6VTSRcm22xSra0VDKyuiV1OKPxIBWRBI1NYSRovO+0X1gV2WoINToEt8cI1DcSHz8MN2SlnkYEm8CqD9EcdGmLBYk1h5GAy+Y13oHYOKYxld/rNw4hEE5iXAu31cvH6tU24e2GuqVxL29dSFQHkF1qU+lzgxGMbRFoinklj0mX8PYElRvav5MB3GCA6NgkLS1B4kNsktEgps2b3xZxsAKudzOVtAhstwk2C1Xr2r9PLpGtCay4iyQcLNfdUdIF3jXMGOykS7A5RGh7kOgIG4KOFzwB8WSAeNxmfUsddbWtxBIBKsMJ2uLB9tOMuGPT7AcxgYBDMhGg2c/IRFswVcpriWF9kxc0bve3v7WxEhEIh5MkkzZuwsKJ2Yh/YxhotKhaC5WbXYLNLpENLbSOryZZFSDQ5n8W15CsDRMA3FAAcYJINJm69pigeDfhoQA4Bqs1hgkHMCHvM4jrIlHv++0Mq8Zq8wpC7Lh/49kCbcY7v23bJWEg0RBGKpKpPJBWGxM0qZJGJ+QV5DhB/wY+bpGsCWOHAljN0VQpPu2/hY7rl+5bJIdVULXRpW20Rdz2rh1xQKqTRCrjRFtDbK+vRmI2xvbWt1ot7Dg4FZCosojUR8EY3PbCnoAgrsGOuditCe+JiX+DIM6OGwVJOASirpe+piCJgEv1EO+8TLoWW7ZXe8e1NUg8HMCN2YifBidpe088K5LE4wEwQmNzBTVV3nnb7ERIJi22NFVDU5DgyDZM+7UTcIICQZfkUENwWwA3IRgL3LCBaPtTDQg1GkLbk5iAEB9RiROyCEW8Y2k3Rf3P1KkouROtg6uUUkoppQYULcEdAERkCHAnsB9eedyXgA+A+4GJwArgHGPMtp62YwTsKCDQMtq/I4yDscPYsRBWwqsHE9kcTZXIWW0J3MowknD9+rkWYGFFk16VBUg9qsMYknUVWPEk4ECVd3dvQoHUIzxxvMdGJmzj2t7dnIX3aM+Eg97jcxHcoI2p8UvMLMsreYwmMHUVYAlW3Csh8zMIEwogSQO2kKjxSzyNEAz4Jbhi2NpWybbqSqImSIsbZrUJUGl5pVHLkpU0uRWp93NbdsdmKXG/2G9VfASbYtXUN3iPMmNNYa+eVlN7SSOYCoMVE+LRKgi7Xv1I/xYzUi8EWoVgM4S3ud6jr3VRTNDCbvEeN8aHhAk1xElWB706XEnXq6IZS6Y+p1MZ8OvSOrhBy6/LZvnH0qFqvYOxbEKNfrWEhkSqbrI4xqvm0dLmVU3wj1mqDnXSTdXnMpVhjC2YCq9UPlnnHUu71d5xjCIhrPZHhjsqkeJWRbDiSdxQACua9B/R+3fibnsdY++7ZrcmvOoF7aU0lRFv+VgcUx3BjQS95YN+KXNlECsmUFsFiaRXWuKX6KTqqaWX5jpeiX6iWjABv4RiWIKg7RKOJIhFg7i2wXEsxK8eYAe9x4i4gCs0NVfgNAS94wm48TASE+yoEGwSKjYbgs0mVUIcaHVIVtpYTgBpS+BUescz2OpSUe8di2RlAKfVxq12iIrBdS3axKSqEIYjCVxXiMcDWGJodsJghDbHryJgBNMawEoIoUYh2GyorE8SaPXOiUBzAjfglWDbMQe7IUrz2KG4fjWM+spqwsEEFcEk9a1VJB2bgO3QGgtR6T92dlyLXWu3sS1WiWsEEUMg6KQe39McxFh4TxravCo2bsjCqYv430cXN2xjOYZA1JDYHqaxIk7S9UpPm6NhLMtlSEWULdEqmmJhLDEkEgHWbfOqCoSCSeKxIHZlDIlZBBsswtv9Ulwg3OBibCG6SwUVjvfo1liSKhUcujhOvDZEMlpBbEwCCbhePUe/1C4aDxKPBWiNehX2k9EgsjVIMOYf64BXx7dlTNB/nNrmlab51d/BeyRskt5TLowh0BBFEi7V6/wvhASJDvfrP7ZYJOwwJAWrza/WkPCfAjTaWAkIb/WevISa/cfiCYMkXJyIjRW3vXMl6WD80i6JJ3FqIiSrQwSbEtSsChAfEiQas5CIlw8tsQi2X8q2bbNX+upUW951DLDCDvFYkFA4QSwaIpnwqp84Ie8zhKriRNtCOA1BiPj1lysSmNWV3rbCBlPpELf+n703+ZVly9K8fms31nhzutu9iIwmI0uZRSGVEAimCKn+AGZMASExqzH8CUxrWkJCDJggJsyYpMS0RAIlRBVQCWRF896L257GG2t2x2BtM7+RiizEgCzlky/p6b57jh8/7ma2t1/71rd+X6Oqbd2W7Enfoz8KZi7072cwut/7Y8R/ORNv6j4fkqqQGdzLWD8TDKnT77uPB+Q0kN/cgSmk2x4zJ4qv/udsMVXFNKdJ97rnM81G946bXwnzrWPKwnQv5NkiSSizvke7DXDW2RA7qVLaPVUbwLmqvFb34OIMsnHYc4Sc9fMQtavEm45S9+b2S2DzfQvo7wi3qtTP1mNsQTaRGM3a3ncnwY5w8+uImQtp4y5+c9R+h9cugYRM8doJK96sCq7kAkn3dTtA6RM5GIZztVFEs86JqGKdEZe1AwmUUfe6CXBNYj41iM08P9bPeVNIwVBGC5tE/G6j14S9qK3mxSEZ3FGwg8WO6sLoP1TP9qQdH/XOF+wcKXL552ratUgupP5iifp9dVVwfxj1D4D/rpTyrwD/GvC/Af8p8KellD8G/rT+/VrXuta1rnWta13rB19XBfdveInILfBvA/8BQCllBmYR+XeBf6c+7L8E/nvgP/kXPldM3PwqkhtRn0ytIuCPUSe/h7ia/QGICTN9ZeK3FlIi7zdI9QUtKprMEXua1in6Um+vcmMp1StaliGm47wOkeVNo1+rd7+p3hkvd6TxtirI2wYK2CHoMNz95vKyDhPkTG4d7pTYfmsprmfe1mGOpjC9cnyadvxjfsbHds8790xXR5XvzJl/Pr/mkDse7IkpO/705V9dn/8ldozJk5PB+QRRsC+O5qmq0BFOv4jYaGgeBSmmemz1jrX7kkit4M+F/v3E+KZhvmtoHi8DKjZk4s5jT2E9XmnTML2qilgomJB1Gn/j1kldU4fQimR2vzxh5w0UcEPChLSqp8UKeAvGYI4DpW3g6ynlxWe7Pr76r3ft7xAVcuswox634i2iI9f699ZhTuM6YBhvVM1dfHTmcMYcRf25jVM/bUqUvg71WEvpHJKzDhT2jVIf6muyx0kH3kSvo2JtnZYWOFUvuXfqAZ4DZFWl3/xPJ57+RK+Xz3uPfWo532aKhfZZSJ27TPZmVpiDq96x/tGsvjx30glnE1Rlaw6JIkJZdltRBdEMkbzxev1XP/T+14tf2jG+MgzvBD47xEDcJ502Bs60mFEH2MJdxp4NxRZM0O/HXaZ5MTTPOmBmIutQIugEtZ108CpuHWa07L4LlP9BFZGnv7PntE9Il3R6OokSCraBuRJCQrA8HXpSsIiBNBvMs8cdqho56aT59vtM/0n9xiYWiqtrYojqX+wc7XNm82vHoew5bvUY+C4yH3uOrse1kTg7rMvEwVWKBczekUfL8VOLZKF/L7ihXAZUjZA6o17JH2/xh0huDLYqatN9T3EwfxPYvzpRitC4iK9q/eenHc4n7vdnhtbr0NXtwOk3qnLGjVCcHudiBInd2vEwi+8y6rpUP2kkbRukwMsf6rE+/BzCNzOui6TO0m5mcjLETe00JYPvA/2PZl6+bJnfCc17h2T9/uZDBmkqmcFgpkwjsvo+49tdvaYScecJW0gPke3dwOvqZf5y7mld4jQ2uN1I6xKti6S9+jJfzh1vb46UIhyawLYJDHu/enTnyeGbyPbHI9ZkxtljbSb90UGPQbD0vXrKp62j6wIhuJUQMP9qi5lFFdDWMN947JQJD5c9PO4b7BBV4fVW173I2qkrnb/sB40jbfyFIAAUb8i7ltw4bO2cuJDIdS9qvwTu/pnw8UbITw10CbZx3drSbGGXaT4b/EEVR3dO2ClfXoM1uJP6RklFPytDWbuZYi32FIi3LUWE6cEzvIGwW/bUAgbSwYO7SLOudgK1G1LW/due4zpkDWAmu64pCQkzzvpZG+pmpc9C7hynt5bxTcY0CefTOsDatOpBniZPaRK+0YG7XDtcdjcTg8U3EWsz/m5gHD3e1/mbLOrT3UWMyaQm0fhEGC5qq9zMlPcd06tM+0XnSbKBsNOjPd0JzXMh/ayle0qYUPAvMzLVIdqtr3vm1YP7Q69fAB+B/0JE/mcR+c9FZAu8K6V8Xx/zW+Dd7/thEfmPReTPROTP5jT8Nb3ka13rWte61rWuda3//+qq4P7NLwf8G8DfL6X8IxH5B/wlO0IppYjI773VKaX8Q+AfAtz6NwWB4ZVh86F69U6J1NmVYQtVFawYmtI3SjsoRb20jcOcJiSl9e4q9/Vuy9Rp7VSQki/Ypq+4hmnbqqqI+imhchwNkLMqsUaq0lvvz1LBvkzkjdIDyFlfV50klpjJm0ZVq9NMqriUsC2k2yrLucJDf8abxIdxz8bMvHPP/GZW3NcXs6Mzgc4EDhUW+u1wx12jNwVDUnUnnh0Rh3SJBIxdVV8HnX6WAsXB5ttC9tB/rn61Y2ZGfVnj64bNt8NlCnfB90ZH2nrMFAmbnmKEuHXkqogVVzBzIrcWSQV7DuudPUC6aXBPI81T0EnvrApD3Olxbj+eMceJ9PpG1e7G/Q5n0j1O5N6TbntVZkNSJcUI7qneHM0BY8yKaMvW6+TwdNlqxOVVlTZjVD9upWCwbS/nLWdK6/X8VeSXmQJyGCneqTqREmw7SvXgLiqFPJ+U1zyHC0HBX15DbhxmQYeJMN/6lXLw8L8IsQc7GnKjPkv/DN1n/f74qnJdIzz874Hx3mLnrOxalPcaK89WkqKpkpdVAU4t+EPCPg9QeZVh32DmTGor+ukpMz1YbBWd066oarnRJxFTSNaTXyd49uS2UGxZFV6ykLoCT8LNL2cQ7cTkZdp78eCNMy4rTzN7Q7jRn+9/K5zagtlFmnZmnjzOTziXV0Znni0ZSxmtelZHS7kLzO0yGW8pTtnLqRXinad5TviD7h1x3+CfRlzMxL7yQHeRvlIYQKfKmz7QNBHpAjEZfBMZPldgr8+YLpGjwb4Yui8Zfy7rmnBDRqJ6oO2gHk5JhXCjJyt7IfaFZj+zqWikh82w4rHcwwupiCqSJpOiw9sLoQBbLvxOgdQbzFSn2KvalL3BhISvfuBS2avzbWUe9xeWrG0SpQjh7OlvK03i3BAOjbJibaYUVbxSbT6FrZCdoTllzJSRrF2UFUmc1NNu5kRurHaNiqLkDlOz/u4pWlofmYIjZ8NxaHEVO2dt5sPLTnm3LvHxpcX7uFJonNfXPVQlb9dPxGTWa2W7mZijJWdD0yTGoSFOTj3PgNhCboTzu4buMeIPcb1OF7VdhkDptPtSUIyg7gtV/Wy9dpky2I/PkG60A7QgDHNWLvJhIt202u3qmrX7lDrL+RtDkUxpE7ZT9XKqymOZLLjMfCuYWQiTMLx2NKfM5lgV2qRIxFwMubGYGNeOJaD7lNHZgmINh58apm8CVP/z5mbEmEIIlpyM8qWDodzUeZb3HfONAawiwvYNTUj6eQjayTTgpkS86+rnwIyERKzdSnPW9ZcbyK8D3iecS3SNfr1xiSk4SnPB9XX9TFuvhV078eW04XxucS7TuMiIJ4bqx24D1ub12hlH7Wwu59p57QqFu4CcHPOtos6ksO4/qlBD+6Jc9uaLetuXj3wTdR2viLi/oq4K7t/8+g3wm1LKP6p//2/Qf/C+F5EfAdQ/P/xLen3Xuta1rnWta13rWn+tdVVw/4ZXKeW3IvJrEfnbpZT/A/h7wD+t//37wH9W//xv/1+fzFimW0PYCPNO732OP9IwgunO4MaC6y4AaYD5rqH9NKoalYqqo5W7t0yNS9Q8sLRRNc6MARIX+Lm9pLy4l1Gnjb290AFS0WnQjIZOnIMmsTQXcLWUsia3FG/Va1tVxbRrVxUy7VqmB8fwTkj7eEmgmoXHsWffjPzdm+94cCdu7IitP3hIHXNx/KH/yHcADXyc93x31mnujPDhtOPm9YnTuSWd3SW9DJ0idmchNwV5Ec7vFoVJH5AaYbw32LngzgV519PnjMRMeKiEglPAf/9C2bQ0H47k3us08eK/GqNOT3tH3vj1eCyBG/akXFr/6Qjs8F/OqoqP6m+VkCht3RKM0eM6JORY1dnGr77bYlVtNseJ3PpVgRVnwOi0sEyRvGv1TrtyatOrHbl+vzQXBq7U8IDilIgRXm31eniekHHGzLVj0LVITBrW0Hpy12CfT18B1bsaAlF5uyFSjEfCV3f6C7f3qw6CpELYLKSH+jCjFBE76Z+rArspzI0qtZs//4J/u2d806rPs5ZkCBtla5q56H81UGPxj+ebHvN8RoZAMwTSbUfqq+I/53VC3Q6aPleswez1OKTBrSlApc2QhLKJxMX3PhlyUxjfFA4/bfBD+SqJDrrHgj1H5YoeA2nrmfcXvSNuoTSZHIVuH9Y0o2n0SFXtms1M00QOjxtKqGt4Nuu0tBsEM4GdNeGrfYy4U1w933ZMqizd9sy3ltTr14dKYdjsJ+5ea6BMzIZNOxOSYZw90lfP46JYW31/807wp4sH18wZNybmG78merVfJsKu8oBzoXk2nA4NHwaPa+PvpBCKwDzbauku6o21BlvXrUQ915sPmf7jrGQIb7DncOFFVwKMmWaKteTekVrL7td1nzSGefCEaiITp77I8VzV1dFizpby7DCAPwjt46X70z4mciP4Q8K/6IxDETBLSl3MFBHMHPGlwI88JCEGy5fPSqzoX59JydA0cVUQjSnrZL2xiZINcbJsb0f1WSZDSpdrxrlMCJ6UCuepwdvENOp7SD6uam6KjnD2MBtK/ZBwSZRJXMM4UqM0mJVKAciuWTuHadusZJal41MwyKRdJeWzL/zcqhIbQxENwHHPum8WZ9YgicNPG17+JCKbiPN5DTszixfWZeViR11b843QfYHu/XRREjNAwk5gRf3ZCz8eNHTDDkquKVYpBu7ZEd+oQjtNfgVwlKTpYdZ/xZDdR4prkARxY3HnxPyqx78sczD6mUEq2BPgDMUobWKZUzBzJHaOzcfM8xdPNJ55H5kqkaBkfY8lGCVH1DrWNf7FbilJcG1kHBpG8ZQs6lEGJShMVj//MuAK5mQp95UGdHJIMJiglBl/FGJXkw6/6jObuWBSof08YT8+Y7b9JTSqqvpp6fz9FXX9B+4Po/4+8F+JSAP838B/iP7T6r8Wkf8I+CXw7/1LfH3Xuta1rnWta13rWn9tdf0H7g+gSin/GPg3f8+3/t7/pyeyhse/I8Q+c/ij+qVR1SsQ2kdD98XQf05kv9ABCtPrjvizDe6sHjOzseTGaI440H4YiHet+v9ywWw9RWT1G+oTgTtZTOfXCfxc1cTiDdka3DkQth1mUi/ZorS13x/IvSduPSbkylY1q28TwH54Qu52pE2DO2WaZ0P5paN9qgrtz+H2j0d+sf28khO+DfeEqjDc2TMf454/C3pgjqnlEFtNdwLO0ROiJSbD/c2ZL3lLng2lVRXBBEfqC7lPzFEn5rtPwnS7KLngT0XJEqIpNekPdzSHtN55S+8wZ526tyGp9y+VdaJ8OYeS88qLlCXmeCkR8qbRKeK9ptItnmQ36oRy3Hpc9ZQR86qUpF2r08IxayJv/Z6ERG7rnXSpFIaXA2XbXyZ8l6jekFTdHWeMCLn3OhlbVRSskHatTrrXiem861aVxpxHVWdDVB/urOe61N9fWospZY3ixBiYZlWdq5JdNurzlVkVbfftF8x4w3z7dfoUtE+Xw+YGTSAD6D5prro7F+UFF/V6Lp2I2AkmFppDJVMI2OErWCXgfvtEenNLfLXDBPUC2+OEaxYOrmX3beH4B4JJ4H4rFGM55cr8nPQacr9WJdcECHuDHeq52hT93Rk2nyIS9e9LpKc7hVUZW9ZN95hWEgTZIMkz3zqenj2lzZqslgT7z5V3Oe8ycTJQfewShe1fOMKCva7r0wT1+5qUCbdeedRovKhUBaZ5TriTx/5Fx3ynL+KMcjbDsUGaxDFucH2NSJ6W2LmisbXf2qpy6/Hf/1K7DrmxzHuPpII/hNWD7I8Lt9TTPhWGo6PYQnryxIcZ+WpN9ZuZxkUOx5549Mhs8HV5dJ9VpW5elEghKSvDu3VrRDYZ9UFOkbJ3Gj0tsu6hd39eCBvD6bnBRDj/OJO2GZnM8hahQO4y/sXQHPRabF4ql3nOSBbcOWA/HyibTrtbdXtdffijUmSaY6b93jO/vqjtw2MPSZittn3swTI/fLVvtEVPaDCc3m/BFcp+ouR6vZ4dk9XHZGOZUSXQvNR5h9f1UEwWc3A0p5pe1VzoAe2zriN/WhIs6591bzDPZyUltF6/Zyu5YnmNMa/e/9L4dX9aOoW5sdoFqY9d97Z6zRdD5fMqx9X4TAxulRXL+w7Juv7tpOl73ZdIuPFKB0L3H0lF96tY6TA1sVHXQlJFvzFIyNz+RcCfHMMbVSbH14W0yZhZyPtKDHhqcHVdd6NSUdxUsLNSSfzLjHk8rqeqbDvyrtHPQbjMJSxJZnX/d0PD7lee6aGQZo+tMdvT64Rso15zHzuloQyyzgM0T/D0dyOxCGWwmMGQ+3wh6QSdNbGjofskzPuCnYUQapcvg38R+vfKv04tnN9pTG/7WLs/U53fsUJuLent3e+k88XbVveU8Ss//O+pqwf3Wte61rWuda1rXetaP6i6KrjXWit7Q9wUyn1YvW3JWsysvspidaL8+ReO7kud+vwQyV7vpIdXqg7YQZXIVAkC85seOyWKqBrrzjPz7cU7U5ys/kU7Cqm1SCnErnpLQ8akQm6tToK3Vl9rvyR0bdXnm4pOgr/eYIZI2jX15xPp4Wb9/+7DGck9cXNJ0JruHM9jxym1DKnhm/YZL4lPQRmSk/d8P98Ss6W3M39+eMvOT8yVRfn+vOdw6ui6wHFoKdFgZoMMy2vUZKt8ksoyFOIG2pottyQvuVG9hM1RFSF3DJcs8ZDBGU3nsYKZAmnXrsqhm6N6WGO+/Mx44UDmjXrY0qZdGZvOGcKN3rm7w4SMATvGmoSjfra439fXqKpH8VbV4blODO+ar/LOC4RA3m1WhVRVkqrCjJqqJF2jNI7DSN536sUFChYJmebjSROMMheeLSAEJSBQvYXGYEKEmjdPUvVWlRtNVFsTfCZVpCSX9ZiRM/lhT+o9219Xr7EA9BSB8UGVNv8546oKe/d/ZlIj2LmQdg3DO+0KLIqlPxXsWK9ZK/hzJLUWV5Wp1FrCHzyQWkvz4QSm+giNwR2r11iE/lPEDXqd+1NWZbh6Hue9UjO6L6rMSlSP9+LzTUfBTgV/ovKQwZ6TJsOhHEl7CuSiilPc19SrKlwef57h7aQ+wMkikyEMHnGZ8LpyaveqdDopzKP68E4/T7hD5Yp+1tSl/nOs0/CJlFk5uIj6iuPWkXpD8wTj20Kp6VqMljA4zGjU2ysF+djjJ0320nMJkgQ3gjmgtIS5rPtLbqQqRoHU1q5PzqqaA80zNHvD9leG84/0QOWjg11VD23hfGw5546SBffkcEdZ1Sr/UuieM/55xswR83wm/8Gd+rpDPRe9Tv7nTauT/70ndRe6ybwzhJ0w3xck6j4rk1FvNSh9JYI/GLIDM+k5XWYcTMy4Q9TO1xyUEb3tSTUBLHVO94zbnrD3NC+R9skx37MEaEEQJAqlK2BVRZQXj5nr58B9qCQbAa+T+uHcrOqnOTjyLiE+w7P+XNoncj2OzhTlF0dRgkNXkCDrmmk/G4pR9Th2ulZy61T5XmY66hqXIZBu2kv6Ye0wmSmoRzNWis4wk3ct5qD+f0OdM3Dqw5XzpB2fei1sfxsp/6NjvnOkBoYfp7o2l+evaZMnve7aJ1VQzZwvKnlGiUIipIetvv7TiMkX9XTxjUpWPnJ7yEheOgswPVhSV2g/e9ygnw3L54Nkvb7b54Q/Ruwp6HuqHl9EtHvQWk0qFMEOug9/zTPXz49M81KYHrRLm5vldwgcPXYQTBQK0H3isre0wuZXjmKckloEcrSkLl9+vqgqH7Z67Ma3CXdciEK6L4SdkHrl3TbP0H/JNM/6RrsPZ00pq3ubzJHSOH1PQLbK8V7mS/6quiq417rWta51rWtd61rX+kHVVcG91qUKNE+GsbVInZjsPhmcht3gT+qZefrbrP7ZYjxuyLhzxp3BnyLZG9zp4ueTUgg75bX6Q9Jc7AKpW1ihekdsgvIr3Un9Vc1TVfWcwT0POnVvhPm+RWKm/1A9YrngXsbV57T4QpdaUmbSRr2GxQi5MTTPl8n6uLNsm5mQLbEYUjEcUoept70bM7OzEwOe99MPzNK7AAAgAElEQVSem2bgGFrGpKrCcWwpWRAplAIl6nTowiW1I8RdUfUHCLeq1C78Qn9URdfMQvelECdNxpICdqjvpdU7ePW8tjqJ36jaDaypP/YUVM1ovSrvi/IREsWoYqZczLzyUPXne9zRrcqtMiXzRQFNGRknMEpGKI2rVIx4IWd4S7rpMHNS221lctqhciLPI6YqCwtZwRzGrxReQ9q3kJSLLClRrEVKZYruOvW15Qy5IKVQ2gYZqkGsNJqSFhO0jR7fkCBEZLnbzxmcVYXXWoox2POsCWigqk0qnH5sSR2kBuKmTmCjftzYq1+s3el5ihtzUZJCxs7KdjVBJ5pN0L/rA4S51aSm+e22enTV87b4BacHhx2VDGJiIWwN52+EtAg1GZKH8zdS/W6CO+nfl9dokiAlY+ZMuHHYUdbXYFLBeIudEvNtg3+ZSb2jGHt5/tmqF9UUSqOe7nK8eDvj5ChRkKbm1NsC4aKojK8K229VpWteZp3sN4J/r+lYy3r1uZB9ixsK2YFU5dL4rIlOryacT6RoybeBMFrMs/5svouYZ0fcqLpmZjDPhdM3+n07q19xvmvIjdA8Vc92PVd2CNjQYGdI+0T7MFBmt3pwBdjuJ46PG2SwxH0ie0P/vipuQ8Efkvrmbztk3yrRJZfV225G7bjYU1BvuDPr+gOYbxxhB3GfcQdD7i7+W72m9Y/hXab4QrGW7pMQtrU7FBwe3efKbqOq5RBW7312SnWgFLw1TPcN2YIJsqpu2qEr+P1MnK2qqdtImqt/s1GCAKbgPnviPoErquai/mCk6GP6RHmIECzGp/Ut2D6uFIJSQAxffUZUgkIruHPBHabLeqz7mzmeSa/2FK/ryhzHNSETID5sK9u5ek+L171poS2cRvAOGWbKplWVNxWm18v8ALQvmaEmfJVGfaX2pOcx9Zoy5k+CO6naXIS/1DFT7nbeaIcq915nAhafr1fFOPULxUO96PM3uu7GN0rckQTTq0yIgjvIOg9QrHqXyfqfhHRRpdH9wz6dkdTpaypFf+dwYaLLMFO8I3V2JcOE20zZLtQJIAl2csS+IBnG16LKLOqbztWaHO4jEoweq3otlCT4T564zwSp1+wukkLtsol2XRDwBwg7Pe52Lhx+psdlfNjTvmT67056fOtcjqlKt/v4op24r+Zsfl9dFdxrXeta17rWta51rWv9oOqq4F7rUgbm+4wZDbZ6r+ykE9rNoawKa+7z6iGb94KdhLgRus+BYlSxKa1ZJ8dz9dHYBO4cVD3pL5Pl2RtMLKTG4M6JcOuxQ8bPqvol15C2LcUZcqP/uVO6KLOtJbze6HR2Vq+ue55WGoM5DOSbjXq/6pTrvDfMe8PTn1RP41t97Ck2vG6PHFPLr4d7XjcqX/+v4cfc+QFnMl4yoRg2LnCOevf/sD0zDJo2BGC6RNy4SqCA5kWIoNnfs6yTsosiN73KlC4jwRD2hrC1uKHQdMLmfVmPkzsGVQhz9VXlsqoHlKLpY95Qiv4pKa884eIt5jzSfZ85/q0bmkqQ0GQckGrqU19XvvhnF+Uzob8zXpLIFqV3UZFJCTsZ9dTmTN5W8kL1vKbbG8xx1DSiTas+uE27+mOJqm6ouqzett9VSFR9WNLq1kS1hbhh7crFLcYg84V3u9AoyFXd3bRKWTCQW0+syXlhb3n+hSVuYXpVFaimYE/1PVQv2Xwn+DN0j4mwNcRuAegK840ntoIbDZ3IqqIC2LGm8JSyTvWrz1iT6ZYKW8O8Vw/pvNd0tbitalZQP7ydIHaFsCs03qze1KlTqoHthfnOrSrg4lv0L7NOJxunZIWsnvdqKcedDGmn3N3i6mVgEzkbclheM5iuqreg6q1A3NTp/rMhtXD6xle+qZJV0vbiQdSUQs2U1+4NpNo1yUFV4TxbTbo6O2Vqtol8W1+CzeQ2w6YgSYkSqTH4YWEcF9xp8QaypnmV5bgb4fzGMLwBfzuRoqVp43rJLEzY3f2ZoW3Voxvb9TgNrw3ZefzO4s6q3JcilF4uxzxl0taTW8VLSMikxqzn3g2F8bU+PlslGKgaapbLifAqYY9WVdeNqmquvkfdc5XxmzftSg4olRNenLmwYo3OAMSt+l7jT1RNdy5Vq7xgbKUHAKlSFkpUcojpI6lLOJdJs8Usqp3LuKZ2WZJBpCC2YO2S0gYlG/rdxCkLZaivrW5dsS/ETth+r2r39G6LOwQlKNQOVrrf11RDp92bYVLm+a5GupWCPQZV0Id53bdk1O5Ovt3W1Lm6N4SEjCPmlXLGx4fKgd8X8k5VeZKQXT2RrhDbzNka2k/auXGVab0c62WfMseReLfRDlPtdAHIlIh37brH5tuG2Bvmej1PrxJY1Icuuhbi3jHfLR1V3ZNT69h9B3Z0pG2jnSLUnxxf7XTWYaMkBTMEZY1P9cNIBBknTFRKy3yb4SbA2a3vs33vyA7yzdLldJhYO313SeklXcK6TKpEk+VakLYQbgz2NmBsgiKE55ZS96YyC+Gm1L0C7Cwcfg6b9+arNaNfP/7hThnaTwH3NF6IQN5RYoL0u3Sav1xXBfda17rWta51rWtd61o/qLoquNdaq4jgfnRmfuwwNZWkGEgtTEbYfFQ6gnsxOL3xx4TCeC+0zzrNnRqDHRPzrbuwcmdNp8lOsKMj9srJW6apTVT1RjPiLanROzn/cvHzqT8QMIIdEqkzSK6K286ROuWC+kMkNwZo17vaZerfzJHiLc9/a8PxJ4bpVSHViW2ZDGN0xGKYsi6Lh+bMj6v56TfTPafYcufPOJP4J5++4ZvtYVVsQ9Kc9XH0vHt44f3nW+wgpGrvGt4Vii2UVzP5sVmnlxcPVNkmTJso0TCLxQSVzeykhAdQRqSkQu4d9hyQKVBaT7irGeOzqtpx11yYh41b8+I1i129r3bY6ePHSFsnV+2YlYH5eCLvek2mq7xBABFBth3xttU0rqBqWG6aVU2XoipCbhySkk6/WktZVJbqnY0PvXp6XU0fqpSEsrG4pwkzBQhR79TNhWlszyNYoylgx1ET1L7yXBdvKF2j/r1KJyjWqnq9wEtBPbkTlNaRt+oZXq7X1BriFs1I96rcymgwX2FB/QFM0rQyO5mVfas/LySvlIXl7xRh8xtdNGnj1WPoLf4xIseBfKv+6WVNSFYCw/mdx8yF1Ovk+TJ1Hm4zpUvkjaX0CTlaYl/Wyfp4k0hHRxp13TXPqSYQ1uut+lD945nSVR+j6VblszglCpQMvo3Mg6ftEuPZrv5U66pa2STSS6OK1z4gXyq9JF28lXFjaZ6UQ7uq/6X68/ee8WFZEFAWJanNbF+dmUZP18/0tyc+P+6wLq0qspiCzEbVoUq9kKjpaQCupmEVVxncc1IvfmWsZq8+xLjLkA03+zM33cQYq4KZDc+HHnxiux2xJvM42dXPmBpV191YuymprBSSpeznA3CjbO7OIijneDkXcSOkVveHtEtKF2gKtnYK0j4j20jyGZkszSfLfAdurB7cWDDRwkOvKmvI0FbeLiDREu573HFWysxW1cfzTyOvt+qDf+jPDNEzRoe3iVyE1l7mGD4etjQu0fpILoK3iZQN5/nigfQ2MUdHSobb7cA4+3XvKUVo/WXm4ThvYb7QKNxZyRhxY2k/T+t+Yg6jqtLotSJzxCb1lebbre4vdW9YfiY3FpGW0lrsYQJ7IRyUzmnXp3pTsd2aehc7w/ha17E5q99bmkLOy2yHEi3cKBQH2QnzbUP3cWB+qHvwzmPGpK81ZexhhBDJd9v1tekDhXTTEDeW0Bvm27ru2gxZVt6zGChuJpl6nD87iiiD20yZ+b5VUsuh+q33DXZKun/Wyr0mUJrz5WsyB6U/GDQFcHCXdMDRMt/nugcAUZTLfLiQP7CFEgxFCq5NxKNfk+r8dibv9PWnQ4fZBcxoyM1iuIbsdb4gO/X0pl1mwK4KbtiKzkbM0Bx0jgGRS5LZixJvVnrEX1FXBfda17rWta51rWtd61o/qLoquNdaS1IhBr2LWqZbh7eZ5sXQv79wJx/+acGf6525WSgIhdTq5HtuDc1LWhUKO2YQo/5dJ+RWsENGLjf0yvIsYKZC8zRfvFbLa8sFEOw5VDVXVoW2eZpJG4c9R+JWPYU6Pf/V81e/W+odx58aTr8IuH3AVEksDZbD0PGd3JK3gjOZh+bMd5NmtX93vuVn2y/8argHYI6WL+OG41Sz1rMhHR1mGxFgsx057lpKv0ym6u9pukgZW1KB4liPkZwttIl2OzPOHdkV7AB2Kiu/UDLYg7IbixWk8mVXhbV6Gu2o6qpUZu2aCGccuXHYw0T7OGHGSG5UWQOwh5G070j3W1WAncEep3UKOVelzx41Ha04gxSrfrJqWlRPrCaQyTBT+gZzni4nIiXKrr8oviHp46tHV45x9dbSOH2f00VJKq7mxz+fwet7IV84lJL1Tn9JNJIxYMaZ0reXx6QC3l2mq6vavaR82Tmz/U7wR0N2Rid9b8C/LBejdi7srH+e35rKw1zOgz7MjeorjL1B0iWZz8zK6TXHGXMaKF2jiW+tV5oCELadJjoVT9wK7qx8yfGhrsNZMJNDklBGUdVnUCUQwD9Z3EmVnuagvsbsZX3PxWXsORBf9epd9xYTCrEK7c2jUIwjGMg+UWa7dity0OOYBwdJkE0El2m+98SdYfP9wn7W39++ZExQr7g/z+SuZt4bMFOm/TRA6ZjuGvU7LgQDl4nRkIJhto4YdW8KT91len9wmALmbJSgkNBUxJqE5I+Z+aHDTkl95eeAfZku10JNsmqeDMPO8Xi45VEKLGpTEkhCOHnssyPdR+Rkid3i8dVjMt0IzbOe2/lOKS9uoZ/c7Qg3SqoAiz1OdEYTmpbrqRjLWSzFFXJblEm7bH8J5EtDaTLmfNGklnPlaypYai3t51GJJlZI1VNuT0HXblBl0Q2q9JvJ8Ok7NX9+2WzJo0XOFnmYa6IXLJtPem6Yny3PP57gsYH7GQ4e9nWTffH19VnMKLzfbOE2YKp/NY1WjyWoStdkihfaj5dOoZkhbAwSG/yxquCblumtemS7354uqYaVJCBTQJZzOUX1Ie87zDiTfAelkG4X73OCVCjeYA6jnvtymWGYb4XTzyNmH9RzXvfd9o2qheOhJd6CCZbSQfdRP7dS91W3MqBdsjEiU9Ium3cXFq/VDkbatbq3nBImFPxhoX7omp4nQ94mzMliz4Krl1JqtHsUO/Xmkwt+SsqEBVKnjPhijHYsRZQlmxYILqsX1x1nui8N850ht5myiKFSsIMlPgQIRvcYW9bUOTMaymTAFXIwZJ+VoFKJGyF069qwZ0OSgtQOC0DzfEnxA30/xWg63PIYOxaaQ6H/HNfOS+4uHFxpXaXrXN7X76urgnuta13rWte61rWuda0fVF0V3Gv9TjX/rNeJ/nplbH5rMJNyXJe0MX8u9O9VlQs7R/M8E7ee1Jk1R5wMqb8oFGbKNFMme0P7aca/f7koaOeRcrsj916NeUaVs8WrZ8eIeRmIr3fMdw2pNbghV6+tcibdIZCXHPBcVK2pE+O5dZhJp0pTY1R1jobNZsJWj9gLG87HlnH0fDlu+NnDI8fQ8rOtRo0Zyfzq9MBvT3v+6PYzw6TEhGFSlaRpovL9TOH9lxuaNlC6RHOrx8n7RIyGMDvKNzMlGMzJrlPxAOWxIb3R6dnmRciNqjSLRxYg3bTY43TJWbcWd9Q7cjMn0rbBPQ3KS2ycKoNrFnl9jn2rWehJj1OuTMZ40+E/HpHjmfTNvaofmwb/vR4D2XSX89N6VYSWhBm7qKFFGbPWQq/+2HTb6/moZcYZCalO/yZkmnXCGTDDRN61YCqdIRVK7y+UBavPbcJA2nbqNYtJ1RjAHA4UZzEhKp0hJuUAzwEzXGgTpW91knqKGNBjNS2T5r6qeoXUC/OtcmXXrHVRtUeS+mvjVlPEFl506lXtlWLw50L7pGl/YafHuXmeCbctpnOUN0r3kFQoTtaEOYDYW5qD+t7DtqYD1UlkO4pyOUuheTL4F+qarWv0RVO93KAMziVJael62HMkdY7UWmyKlE69774SB9xgmUDVy2OD383Ms1IKxNXuzaxUgzI4yOoLliSrn9BMmi8vudT0OFFO8pJQGGv6mwh2yoQtxE1Zux2uiYiA9Xmdxt/tR44CubJJKULeJtyTcn4lqYq2//MDAKc/3JGzJgLal4nSuXUKH+D0k47xjTDfZXAFGQxlm5atibu3B1I2ynF9U5iCY+4c3U90zR0/b+j/omF4I/SfLdO9Q5ImyOXqyY5v+tU7L3nxfqq/Wt8DzDcQbxM33xwYRv87tIpydtBHrM/kLAQvxB2Y+v32JZObyhi3AtaSW0vY6DHq5lRJDg3uGHj5Wct0nzGvJ1VhgfLcKHf7YVbmcDJ0XeB8rP7XPpHqeSn7iLWFtIm4ei3kuxmiIWfBvpvwpmBMYR7reQoGsw34NhLmug52M1Ne2N2GIeueJ8UStwY7FdrP0DxOdV0apeB4ZU6798+121T3+Zsec54xY0SGCWOt8rBrchihzmEUeyHEpLx298xccM+WvA/qP4+GEg3j4gnPAm0GY/HPQvucib0gya+KpHuZyK1DpkTeeErvKz1kiSITZL54tIvAvDe4sa550S6MRJQ/7XXd+EPtAD7r+Y6daBLkmJQRX/f47ldn8rYm2O1bioimqR2H1a+ad73u+QU2HzPzjWV4J8hTTbQ8Ct0X4dBZSpexL1Y7Q/WjqriCfzFkqzSgYgRzsuveRIHS6PxC3uhnrUyyesZTp92u6VWm/WLYfZs5v1PiytL98gH8kLHnWGcsMuY8kd/s6rkAPPDV58rvq6uCe61rXeta17rWta51rR9UXRXca62lKgvqrak3Y9MDSIDUy+qZzdZwMeyAiR47asJP2Kk/1k55nfjOndHJ9FYny3PbEPavVkXMzBl/DBRrSJ1dvbwmLBPdHhczYe+JvSF7oX1KK9NzTQkygpmKqoKVmACsaS9x35Ibo4lKg/7yTaPKYKzTxD/aHziFhpAt59Dwf6XXgE4Ix2x46M+8hI673ZnHw4Z50Lve6djqtOnJ4/cT58ceXGHbX/ynMTaIKRRTsNugvM/FlyboHXIRZKwsxsoFXbyhYW/pfzWQty0S9PXmjV8JAqn3mDGoL2zb6nN2DvtJ1ay835B2DfY4E7s6jXqeVh/TkghUbneYl0ETwlJW7iw6jYsxOsGaEjSe3DWqAlb105xnigjmPGlS0GnEhrSyaPO2pVhL3njs8wBuUfmrF/r1DdOrDjsmVVQFJUYsU+k5K7f2douZo6r/i7IMqthYA41X1cJZpS5sWiQtk9mD0hcySKgKdHNRTv0xMd4Z4kYY3tTkHq/8UVC6R+rUJ5mbUifXoftS/YZVMPLnwu5XZ+xf/Jbyo9fra4x3XY12knVqHCBjcecLr3R85ekeE93HiY//+la5m6eqolmQKJigynzcXLLkQb2oqRXsrClrbsw0T2Fdk6nTRDI7Jc2qHy40Ev0FqrSowVVTy0DV24WiUCrBgG2kRMEePakthBs9V5tvLXEjnF87/FCQvWXz3Ygdl2slqMRiTO1aFKZ7Ie8uXQ1rM42PnE5dPf2CMYW8SDNZ07eCKZA9zYuquOef1Y5ALNgpE3c6Sb6wiJduRvbCdF+wPzmTzh65m9ls5nUiPGXDMHqcy3T9xPHUkc6OY1X17KPX6fozjHeG5phxQ6kT6hefoYkZWVIUW4eEvKpV2aLHwWUK9VjLhSahPmtLmgx2Uo6tHYX2sar5U8G/JJpPJ0rriVtf/c4LokXTz1LvMCET9oJ5Paq/tq3XW/Uae59wLmFMYRwanL90j0KplBSbyVHwXVw9us4CPuFvxrUrdj63q1/b3swYKcRo1zSzIOoFB5BsaA7aKckWmnOhOQRVV+vlYL8cwWz0c2LjyXfb393nJ01ry71H7Ea7SM7oBD56jIuxmmjmVN0tfYOp1+PmU8ZEw3PuCG+i+suT4J9qJ3EQhp9EJKlnOOyE9lmfd7mexreb+jlqcC+jemFzXpPMzByIN50y3Z0wvHaaRFg/JsZXBRN1z5HBUtpM6iEuar4R7KzXuARNkYx3vSbVAWXxt3eOIqryFmMQ73SdUUkz3mCGSDE6+1J8Wf3kcVt4eRtXH3rqMxKE9suSnFd53Bb8wRBuA2U0Sl2oJV2iJEOxCfvkkCjrMSoCYa+dKTsqS9qdiqYv1kZdc8gUEYo3uMOsarg3+Ef1Q+fOkXqP/RcLuFcF91rXuta1rnWta13rWj+suiq411qrOPWiFVsoS0KNNZhJMElov0DqlNsYqwJq58J86zCbqj7N1OQkszI5s9cMeFO9d8XBdGdXBcPW6UsbMvOtI7ZC+5IIXfWYfQlISviXGTeoD8uegjIGQRNttr3erZeifqRksY9qisy7Doze/cXNDjdCyELjEn989xGAf/L5G16qSnTTjjxPHS9Dx5PUr3UT3iZCskzRMUeLMYUyXnzGGCAJ8XOPOxnimxlXbzG3zUwpguknOh8ZZk/ZCrlKauPkmY8N8XOHOxnsKJgIYS8c/0ClueaQGX56g50zfo6Qiqa6jV/hIowh3jf4D0dK61XVbfSuPm2r568my2m8ULkwZBvNac/bFvOSkZQwkyhxYKmclR3b9Ze0MzS5B1AFddtBNjrN3XjiXY/7oucidQ5/mnDfHcgPe1UVtj2pX1iWqjrZc6gqV/0d8eJhM89npXbcbqHZg9WvAZTjCQaD3OxVZRah2EtaGEDZaE67nEby/U7V21yYXrXruew/J07eIkkIt4m4Fcwr9V0OjzWJa9ZJZn9QlWlRbptjVastDO86zKufEzYGGxYFOJEbgxky3fuhJioFyuvdqvSErYNSOL2znF9vsHMh7ETPBxBuCnZSJTc79cWZIISqfk5ZamoTuElJDmSvU/aswhvmPJM7hxkCzcvlOnr/b1nk7cSmmxmOLWIKbV9Zm3kZh47kLLRtJATL3GTKZNf3ML4y2BHMrGzRuDc0zw5TFTV7zMRdhx0jsbekRkibdKEo1GSt4dRRsiCmMH3pkSTr6y99Irw0SFQVOOzBn+DwU71m+49Z31dSGkn3YSTu/OoZj50jdYV4bMAUmjZiTWY467qPzuB9IsyOj4cbGC32fNnb3FBJMpWK4saidJTK3QWQWDBTwn4+Et/eYIZI2vrV73x+o3SI5nvP+fkWXs+Uk8MeK2t3n1QBC4bug8Gf9Dl33+la8c8z/vtHwo/vdd0kpx2VvEjEYMZIqelp46v1MscsfmojlOB0TytC+OWWtM20b3XdzpNHBFyj/twcDHG2axeuBIPfzqSavtY3gdFmTHNRgPt+5uVxg/3sSdtMPrqVVCAJpjtoHwvNMdM8zfjHgdy4dUYg/OgOd5iwjwfMzYa4b3GVFgNKbaHRjlZpdP0Qy7qnkLJSXLpWSQKVpGLPcz0GHalRJrLpdKYi164EaNoaUgg3RRP3vDDvVH1euo22KvN2jOq1zeXSpQKdBQieeevIXrtEYScEtSIT7itdJgtlvciFcLckxhnCWeg+FeJW/d51oej3vdU9tRTSxuFOQbt8+ZI+qZ2TmnYn4I7gDobwUM+V1Q5jyUIenLJ5xTC+0d/hD+qVNhOE+0w5OczDvI4oUCDPlWkbDfxopHzbI3XfiJuCPyiZYXpVCHvtRvUfy5oQmK3BTQU7WczsahdC2d26prLuAfbSJfl9dVVwr3Wta13rWte61rWu9YOqq4J7rbU031wI24wsfppBE3fsoOpttpD3YKuS5EZVZ0qvrL3maaZ5UjVY6hRx2FtlA2b1K+bG0BzyOv1qUsENqmq5c1ZlSgQ7XTw9ufNKD5girvOE+/6SzLLpmN9ect6LN/jjTN6oClMaB9WDO+8tYQfxzUzjIh9Hnco8jQ3Tc8e37pbW6/OmZHRyHDgdOx7uTsRkCMkynBqsT8r/A2S0tB8t09tE89mQOqAIh6oEpSzM0dI4eBk6jBT6JnCqHN04VwZlFOxZE8y6T4WwFdYgncVzXPmNpbX4L8PqQTNj0HSsT2dVY0PCUr2zgB30+4isPmUJUX2qsCq5EpKSELyF+NWd//NZ1QLvMOeATDOlbbD5ouSWtlHv1LbFfnohvdpjDyMyVvbiS51gdlZVy76BUjCjqixmCnS/OSgftvHkTavUhbgklTlovFpYHw+UviXtu0uizR+8U4X6+aTEhZpqZk6j8nZBlZ1eCQvmNEFNxJMqwcatYbw3HH4B4S4qc7VYUvVEupPBDYKZIPWlZtKzkkdMKiQvpEbIzuIHVRvcsSYtecHMdfq8dZTek95sSL1dz3HslRVpohIbzKLWtgt/FRCI+0wRQ+oL7WdZk/Hmu4w7CcUJ2Rb6p0TzeSDcdvVaiOq9jQlzVuKIhMx839RrDHIwnM8bcJluOxOjIc5upRyIzViXGYaGHAy+i8yTBb/QSzKSdS1kX/3Au0taUWq2+BdNN9PHq9qbdCkTRkfymRwNvg/6ooaaWrYof2d9Pnc2um5GPTb9Rz2+7XPCf/ukyukUNWEvF8QsKYr6+BIFt4/EYCmFS1Ja9YnmbDSxaTbktmDGuv+dBX+sPthjWbng7hRXIkbxynJO9/WNUVnIF4EOCthZ8CfhtLO6rywKnssQLGYW5ntVv+zMqiIDMCk7vDiD/3BYPaa6JuuFmQqpppjlzw3cBcpCYthPRJ8JsyNHgdcBBss4XIzdtkmEY4PdBshCAfXhAiEa4uTIPlMKPE89fT8TK8A1Rsvp3GKbTLpNkISyi5R6LQXRvc9EnTko3hBvOv3cWGcMnHaQukb3tiFc1FrQvcQYzNNRuzQvJ90ratLZMhOgx9TCHMCYlZk9vDI6a5JFE/omAz6v1yNOZyaKgAmVVnIquHNezyVG50Hs5wN50yFJExlL7aItaWxSZ1jsCEV0nwcwgyFv6vN9xSFent9MgjsX/JBxp2Hl5egAACAASURBVLju42lf1/VhpDRC9hY7JvXsHkaKtxdeujNIzMSbltQK41t+h+aDFB0JKUAGtw/E50bfP5BHS7yNmJOuPf8wEieH1M/CXFNQTZvIsZIT7gP+qNdS/0FILfijIWyL2s9HmG8uyXaSwHyA8cFi5oxJBYmFtK+zI3PETBEZv4Lp/566KrjXuta1rnWta13rWtf6QdVVwb3WpUqheRTCjaFUcdSNQvMs+BPK68yF1CjfEmD7PmBCZrrzNcXMkv8f9t7c17ZtW+/6tV6Mas65yr332eeee+4rjCwbExGQkAAhWEREIESARIqEkJEzEgIiIEKyRECGRMR/4JDATrGEHw8/33vuKfZexVyzGFUvCFofY+5jvfeEE1vveDbp6qy71yzGHKP3Ptf4+td+nxf8Max3jPVrItZFuRGo9ouSVt7jPCv31gjuFLRz3xpVmKD45zK5csRdw3TnsX1a77xz0ru85I16z8aoKt7iLY3aSZq94fhrw/Ax4OrIj0+3fB81qcx8qrDAua45U2NsJvaWpe3cNIGn5+2qeDAbkvlCzZr0nMy9MH6ISBtgsqtf0QjMs+W8b3GNehePNOvdrn12VEflmdpRu7/rQ6L7nFYl3I6J0FmSCP23O9wpIjGvHjKJGYkBOfXQNcqqjXFVD+xpIoGqSXcNYu3PE76GWdmzhW1LzkoeKHfJkjLxvsP0qt5ijHYQj/HC2h001Sy5SukGIpowNGibsEEJCOHuHjNF7A8vSm04lgFVeWQYiY87ZVqOsybWTOqxpamI21o/y90W83rEfn5FNpp2lJ1V1abyK+sye4s5DysNQs5FUR4nZJwwcsv4cUsqnOdYCdEXNfDJkkqW/cId7X5UJq0/ZuadENpy6IeFLqDJUoh61P0hYse0djqbKSL9xFyYjrE2xNaqIlfGm+szAajmTHKG8UG7/c0XgkWsM/ZsSJVyeLOF7nt9/vmjEheqfSFAFGa0W46hLx3nC13CGGLjOH5d0tYC+N9Va4rRuPGkNmE2M2lYzqMnCqRdwO4dc+1pPllNI0M7/e0Eu98m6ueZ6mVgfNcy3RYO6ltQ4kfnqF4ntr9VFm8IOl7Dxwn5bYvxmTkYZDSYIKvaqedaE7AkCtUB6udM+xRXUgNGGP7wQdMVnyE1qmQtqrHrE90PltNvMikJabT6/uU6SK3+224zEIIlbYR0cpi3Rc1HmcmHTPUWmTdG0/eC8sFBu9ZT55m3DjslzCgc/3BDqMu1HjOb7+D4GyF0meqTI7aZuFt85/o/f1DmcqphqjRpDmCbYPo3v8W/qaKXds3KydbnyzruxjtV+qoXyxxk9cCOACbj25kUHJiMFA8mgFTKwZUqEkeLWH3s9FY4uYOB20SaDeNosVViDnbdAUuTxdWBEAybxzOnl1bnx2ZJQrNkkxnuBTca6s+aSll/OquSC5BZCQSI7kIsPG5g3aHJm5bUesw0K1mh7FCZH5/JD7c63mPS3ZKUyIvxM2uCln8TQqhIbUZGWWVAsxdSrSo9uaixWSk+/qjv7Y76XRYfdyWF0arK3JTzmBIyBvzbhAlKy3h9cMy7spsZRC3sN7P6V6NgR8GWHQM76O5ONkJsLPPO0/xwXv3eufbMu0pJQ89fJEiW6w9Km4htR6wN/TthfIzkNnLzrvSsZMFIJgNz4/A+0AOxqLEBaB97pp0mGYbBIzZhFu98G5SUkTXlMBbWsqy7S3qOzRlI6qeft5nkL5QYk7W3oNknMMLc6t8V7qSfof4cCNtKe1H+kroquNe61rWuda1rXeta1/pF1VXBvdZayalCgNdEH6CwWWF4UEad0hQydipewc5Qv0TaHwZiq52hsTFk61c/je0ToTWYkKleZ+ZdSSwrysJsPNXToGlOVjPazZSYb1UdyFZI5S7ZDkHv/sYv7tycwZ4mjOjdtnnrmb+6We9YszOrt9H2aD52rDGD4OblzrgoVQ/Kg4yCqrNfWJNI+juSILNgJrN6sdxBHxsfAraO+CowvlZMJZf7qfeYzx66RBgqzZrfxIsSdBaqF/V0mRn8OWlG+X7GP+mddbjvME7Vp1ir4i0xr2qUp/jLqp0qqyGRugpbCAZ4p3zbriY2lrBx+DfHfKveqPrHM+ZwVvVjCOrdFVnPHeOgz9805NYXwkYA90UXb+EwkhJMM2LVb5uLgiuNKsZmUv9r7hp9v8Ufm9LaCZ26as2Mz0WhRQR7KuprcuS21mFWGI8cTohz5NstTDO58ZjzCP2AuJ/TILC2dOcK/nXADoXkEDx2zLTPQqiFeae+u8UTLlm7fbMFd850PylrdUkzSlZoniOpUsXBjgkzBsZ3zWXI9g2xVtVWPZuR2Fz0Bjskpp0n1qoGj/eZ1CXMobAsgbhJVJ8s7mwITSa2mdOv9PmxydQvQuigfUogMHzs1t2A9vOBvC2qd+WYHprCr9bnb34H053Qf5XUb+my+gP9xfhpR2F+CEhvMaNootqBlaeZKtj+LtF8VlV/YYTWr3qtY23xU8KOkXnncaOqY9VrmZPnWpUtAWYht5EE5DphP13mDUY7uusXVW/9KSgXm8Jt7Wf17gk/U2/1GIThsXgHRwtBkGBovtfPOd0ZJAmHO6/zPgr2bFYlfd4q/zh6wZ8CEi12TNR/9kS60db4+aHFxEi1V0Z02FW4Pq0q8bwxiqR9gfFe2caLogdgX52OkwT1kxAaXYsXCk0ovloP2H2vfkwRKKqh6WfCbUO2Bn9OdL8X+g+CZMO8XdL9gMkQvUVMJh29slbrCwUhTwa7dySfyW1imuvVI5p9htEivdGAsC4yPFXUz3oeh1/NRGvIJ8f54DGzkLaBWBTiahDqFx3r+j3kCBtL2Oz0XKHqqD1NxG2NOc/EbaXkHKnKnFFygdmfMMsOk3cXikHllX1bO6g9jDOpqy7fMwaGBzVEp0rV29SkVQbMRucBPpOOUpTUzLQz+OMyniy2n0mNV197SeQ0L/oASZnUNSRnCK1e19OvM/G2DKggSBswLmFr9SiHXpm5oCzebLQXZr7RNLK4rdbvw9g46h8OhLtWiTVPJ13D5+mi8pY1un4ZkegxoxAb4bAv60ESGNQTnatEvCk7hIvc30SGU6XEFJchCDlZpDwuzQbjMmkwuCdPtrobMd0t6+dCY9J1KrZJmeKbSHKXeZ2d/u1gR00qNSGtc1dixp1m5b3/JXVVcK91rWtd61rXuta1rvWLqquCe61LiTC+i0gVYSw8w0+amDPdCrZXz5E/q98MVF0lZqa7imxFKQovUdOoSrqV7YOmkxWCwrwxVPuw+gGXMkMAqyk3sXUrZcEOUdWez0dSV5GdXHxTKDkhVVa9WRmkrTQJ7YvXl2nGese7kMm2pf9KPT9rJ7KUJKgoMBloVPUiFDXq7DAnq0zRXeEuznJJa6szpqQB3ezOOJuY4gZ3Lv7WZ4vthdGoWhy7DEe3eqtCl6mfNd88VoI/Juy0SDR6Hv3vX/DGaFdwyszvt7jDSCxZ6xIzsfW4t0HZwEYwOTN/fQOAe+mR4ku1gzJiJWeq1yLbWSHtutVT+892qObaI/2oHldxiGSmxw7/MmAWFm8hKsg5g7XKpnQWub/V1/AOgvJt5eWNfLvDvJ5WCoJ5PpC7BnMeWJK/8qZVEzPAHJSoME4wTGANedutPmJKqlm2qrITM7nxyNystAicRQ5n5eQ2tXJox6C+VIDUIjdelYbKUL8JdkpUz6pQHP6oJYuO/VzajZtPI+evSyfznAkbVV0kgX+bCkWkfISNWeeOmfR8Vc89WYRUVLf5xtN+mgnbpfvZEDq7KoexhnB2a/JPcxaSVWUECh0gsXrlkytUkmXX5Js77DmoIpL1GOebiuZVx9qpsiSnnrjpIVE9WaaHqApT2bWITUZGQ/PJYsvmgR3zqmQ//F8z/TvHdKvkguSE5mlevcipdZAy7mkgfdioevt2mZOhBUwmdRGZDO6TV/VqlpXiUh1g3ujzTFGnzRhX9rEMAXM4aXocUH0+Y54PTH/4Xs+bQPXqGa0QndHPtgv0f1iUy8nQ/OQws8MdheF9YvtPhfGhTApRgobkTPQGdwrEzjF/fUcqPQLaH2DBqMIHFP+8HuN4ozzU4b3uDEgS6hdDaC8+4+wyqdKfY6Oe6/TFt3f9POO/f9U5Nyf1pZaxka3OeVX5PNUpEw/C2wdlnuvFKO/1Wqn/0yd93rGowGW3KruMGQ1xc+EV63koSv8uwGhhNEgQxoWtmlUZlCTYg6ZemaNbmcl2EFyf2fyQcOdI6Cx2SLg+rsQR//sXsrOYyiE5Y4+T9hgUj/yS2rXu5qS0riE6YDWVUWKGedZ1KiT6d6poHv4Y8q/P1PVMnQwpCc5FbGGZD4Nnfqv1mMtx94+G7e8j472+RnLC7v+NmGHWnojzqHPuUHbRjIFtAwJmToTWsfszof+weHSVvGJHUY/9LEiV1+8ZE9Wfn7zgXpQBLPES52XPE9kY7NtI3NW6jo9B15+xeMIbr4+rHO1TYnivnvFwV3Yj97Z4jTP+2TEnwYyXpDKZdNckV2kdN2RIL8uOayZNmriWBapXQ5gvu6F21FRUs7CjP1vMDOnV4EqrhTvrbqau4wnbz0yP7eqtD7taaSHbq4J7rWtd61rXuta1rnWtf4XqquBeay0JmeYHhyS3qk2b3ydiLVSvGX9W760d05qIhBGyCPXTsHpeU2Xxb9OaMGOmSPNJc9jt8xEz3iApY/dFXhLRlBlnCXfKt/ULsxX1krqXoIlkKekdfcqXzv2onZZuP6jf6TiUu/Qid3mnvFfvlJ0YW9xJMJE1OcWE8vNsVo4niZX9J4MhV5kUL6w+hPWuNkVV0ORseb85cZor0iZSP5cO0sVyJ5SkG8EdZGWnulPxWt4Z3DkXH7NgnGBv1BtlzhPz4+aCRnSGsKvV0wx0nw8Yb4m7GpsSMs6kyq00C3MsqmgvWBElTSzEBDTpzJ6UcalKX6EoxKLWb1uoKyUtzBGS4M4z5nBeObWEqO+REjlEpRvMgdxe7rQlJVV2ffFie01QA8g3G+TUr8SD3NXKqiyJNeY8kVNCKn1ubmtVc50qdAubV/ry3zloqhpcvHg567jdduoPtpa89auP2PYz821FaITkhFgLiMG9qrzgz7UmYUmmfg7E1vD8t7qVotA/GJKH5iWTxVA/GcycVvVBUlYP2WHS3HqnqW+588ROP/fw4Ehe/c/JCvU+0z5f1Ki5VWrCvBXsoKlmsYHh8eIp98fL42Ol/MwlccmMUakj51lVJqu7JqEtfu4+c3LCfKPsTzOpz1Jmo8lGFNb1rOPWnSBsYd4Ibgm16wy+Vy+5nZR7baa47qxITMgYdRfh84nh3R2hhfHdwgguiYpJyHVi7qKyou8mUkkdDK2SGpKD468N0QuSKqoXHU/zQ0P2BkTw37+S7rfkm81KaDFzxARPvI1rR//7+wO1LUpRMhx+VROLosfgOQ/NqjI3n4Xz10L3A4z3DhMd7pyU2VpOv//dE/OvH7EvA7b1OueMwZaErmpnmW9kXR/mm6Q7BPUCDNa0vHmTVZHPquQuSvlyPSVE6MeLqlXmTPYWiQl7HHHnWn3ITl9n8+3hMi8lU7uISCZEwxQcU6G8hMmRRovUkRSFejPxZXkfSUloqhkjMAZLzrJSZFIyqoTuRuZ3lhgs+blaySTDV5H6yTLcG/KjwZ8y/pSpfjqtHO/cNcRtrb7qWcdNqjWlCyBuKlVF3yl7mzmQdq0qmADWqIrpLMREeNxi+xl/LvN+sIxnR7DK8k3REiZLLkp7ngzmaHFn3UHwR2Vg+1Nck/FcBkIitQ73+UjuaiU5POgOVqo0PS1VFneakZQZbwxmPZ2qziYPZFEe7CTUr2XeTvqe9T5ix6g8632/sn51TdSf3ac3ZJyVBd7Uq7JtDj3xYUvYeHZ/NtI/toQOfFHrQ5exZyHVkKpM+52j+yHz+jfLOJl17aEMM/equxPL3wwk8Edh3hUiRZeVdFJ2QyUrP7x+EapX3Vna/KREoGX9SU6wU6b5qcf0M6n1uMO0/o1RBhXm+HNSxD9bVwX3Wte61rWuda1rXetav6i6KrjXWstMgeqgLMBFHcgW6r36Ue2YcKeIGePatSlzRE49abchdR677zHBIcOod66od9OESD71kBO2qZEvvVFBWa0yjLhP6rHMbY0Uxt3CZDVHlYbMoIlEpqRjMQdV+SqviS21J4sgi2JXvEepq8gCzWsiNobxPq8Kqu3VM2T64hV2kNuI6fQYkjeYV0+8CUidkGdPbPLqgYxNuZv/eCYhjMHhn5wmTqFJLaGF1CZybyGpQlVCtPAH5ZwmLzSo18qORclayAQi2DGSnMF9eiO+26lvufgqw/sdZL2OaVMTHzeYkLB96bB93Ok5FGUEy1FVzsWn5j+rQin9SG4qVdW/oF3IaSBvGuQcEGOQpwNyu1WVolwbvGjS0DiDNXA4kUNQ5RfU/7qkCYGqwf24HkOqK/BO8+In9bExjOSS1EPKyrAtbF+9eBZz0N2A7KyOvUVJNkY/j3fKBwZNW2tq5DyomlN5VYOK8tF/3THvLOONKhWhVf9YtVcv8/m9xUyaMDbuDHZeEqwKoeBZfdTtU0AihE53RPIiJB9mVfhAWbTOIN6SrcGV7v/61ZAqUZLG28x0X60+VoDQWE2SOmSqU1ZmcTL4S/AUEsANWbvQE/hjWP162SiNIm4qjBXMW6+JQUXhPX+lnrvsM9lmpgeQ0WAHIdRfdN4bmO4T47tM9WxxZwjlUu3/2CoD9xw0ySwo3zo86o6EPc3gDGFTYabIcKdqT+wunkKqpB3dTcS4RIyC+22zztv+60j2Gbe32EEY3ikfVj7qA7qfItNtp4QWd6+qdeeQWd9j3nnq18ypN7TvNN0wZ+E861iYgyVlQSTjXMJ0mdOjw78sahcrH9wEmHYGOySqT6fVp0nKmuaXM6G20DmmnV/X2OFBmHa6ixTqTNxGpI34WudtmC2T9+AyqbJUrwY7QfWm16r5POPeBvKm1V2ft554v1mvtdmfSXcbVbFfR0LbMrxTKseisNY+EJKhcgEBGhfYJ0NdVOS6DrADIxlrEl2l43Tf68V2JjElS0oGYxPO6HufJpX57ndnpmAZZ4cxmWwS6XaGkuQoQTh/nWmehOpNk7rsWZneiwq3zE+zPyvrt6j/y9phj+P6HZG2DWKMMrRN2VncbVTBXYg654nsLd1vlXCw+foWM1eEzhN2pbO/SSsVpHrRI6ledfevec00n/U8+HP5Pox5lQ3jwwYzBFXQyw6VlN6J5ATxVulAfaYcIsnqrs10o3NBku7uLd77+pDVx94H7Fl3ObO1KwuYnDXRriROZqspotkYJJXvU2f1u1UqTr+q8efM5sfMeKOf8+2v6c6kmYSwSYVsAX5f1FWfmduMGXQ9kKhhkEs/iczw8f8c+d2/WzHfZkiq+i7+2rDRJDilBWWqY2K8Kbux5c8CSSXRzhlS4zD9rGP706s+oKl1TR+uCu61rnWta13rWte61rX+FaqrgnutL0rY/D6RnHZpg3Yy2jHR/u5QuuhnTRArahXTrKkww6h3S86u3ZrpXpOasreY46j51l2DOfXkriHcKSfS/7hXla2t1TOZ88/zu0NCjr12zHsHlaaSLdjXdLfBfn4jbRq9kxX5uYLcqG80ixIXmqfAtPWMj+o3AvUaJZ/JXcTUkXzwyGhIiwo8XxiISCbtYmEF612rP6jyi8kYModzrepdyfjW7lj9faoy1YthekhrJ3RyUD+rnzG06uet9qq0TXd6HqqXfDkfMal6C/jPqkCkrlpVROWrZmLrVgUXihoeM+b1UJLKEskuKWBGlYZphtoj40R8f7sq6RJKylnxleW2VgU2y8r8lH5S1aCRVVmR3RZOX/itF//rrOpzbuv1WptTXzzZkdzVetee80UNM8rVpVfFiph0XCxd8yHqtXbqO8zG6M8hXpLMQiSfzlAtfuJAtobhKz0PsTJEr4rrtFO1pv+Q+e3XhbfZq6LrBuWRIqo6zV05huKRc6eoSV2V0W7w9dqIMjuzeoqXayZLuhJQP6mffHhfY8fIcGcxUVMEQb23Zsrq0Zsz01ZZz1LarZVmgCZnzVnVsJjWzn5Qz+JS6bbDDDPho078VEL8sigpoXo22FGVmlgoEDqhwR1lZegmB7kouHZQr3D3hV/eHqeLwtV6pYA0lnnrGB+E+T4iS0e/zYjJmNsJshCPyqKd34WVbiKFTWuCJprFGmIjK6dbE68SZk4rP9qO8UJyqC3+qIlwx08bZd2OZvUTmlkINxFcQlzWJEP5Qo0/q4ru+oykzOaHeR3ncafnN942OhZFyN5oqpwo61ivlWHzfSY0gvyknN7+g2O61/FqT4bmzRDaTNwlzATVPq/UiOHRU/2YdN6J6M7HnJQRC6RtqzsFlSrXsRL1/DvL2eq8nZ4d2WWO7yZy75C5+KvdFzzewRDuAuZkebkJeq6m4k81WXnpkyFtg5JofEYKReYpGsLRQxS63zqmD0lJH+U6VnvBnWD7XaR5XpRGo3O8fJ/QOPVve7cST5JY5VmD8q7HL3Z3ctadqnP5oth2Os73Z1W3+wmZ7WUH65RJz4IUFnLYZGSS9RgX5rmdWP3s062jeZ70+0iPiNhV2PNE7CrMcSLXFokXPrikjDvrmpNqo9708h6hVTaz62F8zDq2o74fwLTRZNHqPJMqi2QIjy3+02k9Z8v4y67sZKa0+pRB17+w3RI6y9wJ/Tv9bjp/XIgIGZmF+U5JGeMHmG8FWxTY2Ol3YHa6HogoISZWeoxVLzz9GzUmQLSZ+T5SPdkLJ7zKmFGYN+APEGoheWHzQ1i97eONLWltDpvyRcVdeOilcl3xl9VVwb3Wta51rWtd61rXutYvqq4K7i+kRMQC/wD4Luf8t0Xkj4D/DXgE/iHwn+acp7/sNUiJ5mlmeOdXgTYb8Ce9ozYvR+14P+hdNADWXpTXYdTfB+VQLp2dyRtS7XBPFz9t6qq1Kz3XXpW5pB49SVnvupdOemeUb+osqfXIEFTpWvh/MRdWKsiUSJsa+SJxCWfJInr37wxmztrxnoTcqHcpWvVO2Tqy2/bI7ZlTX3O77deX+fTjrSqJRw+VpvnEzaIAW5qfLNtu4DhXiJS0mQ+qNEwPBr+3yGRI7yfmVKvXsCgksYO+M6pM/WCwTjj+2uGPmeZFj3F8rImV0DzPpPoBcqb63fN6HsRbZAykribVyh51b+NKo8hCyU/P5F2HHHtVYRel9TzqdbTqXWPxapXBkFqvaT7eYnPWx4qo0rKe70qz143B7I/QtRfaAcDLHu5vVb0NUX1yxQML6OMKD9YcR2g8MkyYqShj3qkim72OM8dKyIDC2U1K5GDsVzUn1/7Cy3x6hZuterjeNOknbmvGW/394TeG6U6z2F2vSqQdLxxHN+i5dEdVcLNRxXxR1KYboX3KhI0lOcH1CQlp5UKvytIU9JhyZr5XZWLx2Fb7iWnn2f+x4/lv7FSxm2X1bCcLblYFZbhTRTA0Zk0qG2+F9pOqyskJtjbKUC0K8Oa7YfXwpdrh9gOp9asXLnQQthnaiDlZ+m8C1En9pr7MO4F81DQmOwrTjXKyF8qImYTpVjh/VeHGRGgspjerfz9VlmzUdxxbx/AucfebVz5sj2VaG24rnX87P3IKFSEZpuR4HfR8HYaafvBMpqEdiuc9XxTW5IT51uAGo/77p6C7Ig+FTBJV0ZQEbjsTZ0NODnev3r4YDUyF63o05C5i3yyxXlR3pUa0P45kr+mC7qDq3aKWS9BdBvd8Im0b0n2N7SO2+IBD41dP8flj8dV+EuxYdhwikGDze+H0jQGj49J80VA+fbUjOaH+3JO3HXZ/ulBkjJDutsgUGL++4fzBYieYQLnfQNzqtWXvkbsZXj3pJiBjWTtMJviIzMpNZtTj8K9lvLSZbFV1x2akK5zckoiZkiC9JW8D800mVQk32UvnfYbmKWPnwkv+/qg7ULsaYxfYOJiXt1XFkzkglXK1AfX8GyVmmJcjuWt0bVjWABFle7/syQ93+hrngfBBCQfNa2S8NZhZd94W3/x6jGXHIsvy76qizxu3snrFWGwfVJWdI/G2ITYWdyge3AyIJrVlIwx3ltDIei3njWivhqf0vej7jXeXBK/shPlekwfNlHAv/UrCyc7ortCpL5QYo+u3lQtpIeguZ6zLurFVBXlhZodtxu8N4Q5kNsgkqrpOZdckCG4STSHrdAdv8dkCzFkT/sJO+b0SdN1adjPnbcYZMEFIldJAkhNe/9ivJBqkKNln/b7BCAkQs/Ctx+IxvpBE/ry6Kri/nPovgX/0xf//74H/Ief8rwEvwH/+L+WornWta13rWte61rX+BddVwf0FlIj8GvgPgP8O+K9ERIB/D/iPy0P+V+C/Bf7nv/SFYsQfZ84f/XpHGVrl3GIMcjwTv36HOfYXX1RdAfbSpT7O5E55tSsFoa7xn47qvfRW77pK4hVo1/7CK0w3LeY0QluvqmBqHHYKpE49kxggZtJGzX7qx1U2a/YWcxrXO1qAtKnVs2WAnOk/eGIlmCBIUXDrbmY4VsSDp/cBYzIxWD79oHf3ZCBKUTAE92YgsSYqKZwRrElMwWFMglFI+3JnveSZV4ntzUDvMt4k5r0qm+5mQraZMDrSs/5bHAVp4Lyk3CyyOjDdemWr3u9+fpta+LaL2ogI9lA6TVdSgVevcqMEBbNf2lsjeZqRulq7U5cksuUUSMqkch3FqgInMSPLa3inirsIWEO66TAvB6CoWTc7HU+Ff5udVZVloWrkrKSGQucgZxgnTWADiIl4t0ViRAbdScjTDHvleYpzmrT2doSmXlV/+93nNREup4jMlTJDvSPtWk3emxbWJPg37Q62I9z8WeTtDy3utKj1qlhoMpWmz2UjK4HADlA/j8w3FWbOuMOsnunX02VMblvSrsGeJpJRv1nybo04cAAAIABJREFUQiwcyPmmY9wZJMD8WOgJov4/YO1AHu6V9BArTaVbu7E9DO+E+kU7s10fNBWoqFHTfYXp3Ko6x01Fat2qEI+PierrE483J6b3jtrpXN74iVAG89vQMN5YTruGCKTeYc4WCct7gGTh8K3BTobd7yLj3YZ6X7ranYA02HNgvPOQhZAMPx3Vu7/fd9TtTO0DtQ+Ms6OrJ+ZosaVL/9u7V576jr1L9HTUP/18vp3fG9rn4r/PYAf1PubiBxzvPCZoYlIG9Z92F2k0B6N+0jqRg6wqrz0WT/GoCuvwoabaB8iqTNsxYoqyKP1MeGx1Z2mcsWdLqi3DY/F0z5loBInqmzazpiou6VUITO8Tw68y7qDrzrQV3FDGa8ycv6rwp6RUjDFgUtJ1ePkczpB9rQlhY2Z8EFJ9WU/cwRCrTGoTnB1yO+vyXBRcM2iSWdhGcpehSkhvmUuqY24S5mxJTYK+ED7e7Jr6mPcVZoaYhPmrCQZL3CT9PECsc+kb0O758eOW+scTZgjrupc6S7q/UVUyJNKmId5U2DedFLFrMJtadxrbWkkLQ1j9pzIp2UVudjr3jcA0Y8tOot0pqQQjq/fa3sykJ71O40Ok/mwZPign1vWWm386qN95ER6zMq5JCfs2ELc1/jyrPxgI77ZkJ9g+loQ5i+8z03b5HlGahqbj6fxX9bNcp6Nw+spRHQz+nIi1wT+ldbzb/Yl0u1Ez+jgh2eu6VxjzAMYZ3E9vyLt3jPdC8pnhMa9499iW3hCbtc9E9HtLgr6JG4XhY5kjNpMThR2vv5+6pPMjAVaJDLHJFx77rGM7eV0Dphv9ufvx4iv354Q/RkxMjPcV9fOoPSaF4GIah/3xFRZV+i+oq4L7y6j/Efg7XFpxHoHXnPPSXfQ74Js/74ki8l+IyD8QkX8w5eHPe8i1rnWta13rWte61l+puiq4f8VLRP428FPO+R+KyL/zz/v8nPPfA/4ewK19l0Pn6X4KuEMhEFjBP5+VVWot9tOrqnNz+f2ixunBoLdzercsxTfp+0m9uaceO87acR+SduujvktN28nYH15U6W1qUunyXnyfUjpnZUnSMkvqSUWunHavnsqd3qxd+HosgewMcVtjpkholoxsCN/rY6aqgk3EnA1T7MhdgN5ixvIeTaJ6sqrQFH9UtZdVUfNHGB5hKgk+/bHGu7wyFLNRhYIonPatkhiyA1dSeJbu7Mkw3Sl3OHlDVcnK2nVnZfiG1lLtZ7ITwm2tzM1SqaswL0ds6caPj7vVp5QaVShkCmCFdNupJ62otfEPvsLue1KrSWAyTqp0FfXUnEbmxw3++UyqHOmmxX7aqydu6dLNGRlmUqdkBJkjabfBfH7RX+82q8or+6BMW7ikjFFU48W3m9LPOme1QzwqySOhxA4RxF/SjiREpG1Wjq96IO06ZuX2hvxWFF8a5Psnun5i+rX68rZ/diZs1G887ywSoX7JtE+qWswbQ/0aOb93xaOe8YeZVBdO5lF3PewYdX4IhG21ek+zMcSbSo8/ZeLGY2JCkhCbomh5JQFko5QAyapumUWMdzDcC5ufEq9/bFX9SoIt96jjY2LzO8N0K/i3TKwN3e/7lb9r+0AqSnuqjPphRWif9To+d4ldN+JNoqpHahuI2ZAQTIFVvt8ccSbx2rbELByGmvO5Jh70WtiDZbxT66J/E4Y7g0RVgEBpFfPGEKsKBG7/BPr9PYdfFzrIi0XeWt6+SqRO/Z9vo1DtDeOjXoufohIBwjYroKOwf79UPwGa54Ad4trt7gpFIRt4+41Tv+dzjZmEZDJh+Wo8OOxgiDvAQDh6JF4UYkTJEfrZdKxIa5CXjFvoJrsGmVXlx1rc60BuvdIUgPpV/dPTrSBByRjzNq/Hnh3Uny/eUH8CO11S6cjgxoTrI6G1SG0xjVs76t1h1B2RMVC1XpVi55h3ghkXnzCIlVWplRdLbLXbHYoPVaB6sYQ2IwdL3ERY09YEexbs4AhdwoxmTYnUYzDUT8LZQjZWz3OT1td3Z/V9S8iEzmJCZnzfgZHVV+4PE+G+VYU6JOWgW8EUWo7MUVO8KlUtzWFApnn15+N0xylvLTLMunYYs35PnL721G+Z/kNJ53uzxMkoNaWcg2zBjII/anJgFik7HyUBcD+RaoMbZ90925ddqLKGute+0DQsWMGfPMOdYd7p77sfE/2jKeq8rh3z7pK4iVGfbraG5inoPO4qzLmo2HdbVYut0TU1RO2VCBH7VvyrcyBtCzO57FYN79PKsaVOultZFHl7NqrOF7HU9eCOSvUwwZYEvnz5riw6W2r1uTkb7FlWxq0ZzZpuNt6X8durmusK+10iHH9V0T7pWjB8qKmf5pV+kssO4fr98RfU9Q/cv/r1bwP/oYj8+0AD3AD/E3AnIq6ouL8GvvuXeIzXuta1rnWta13rWv/C6voH7l/xyjn/XeDvAhQF97/OOf8nIvK/A/8RSlL4z4D/4//Hi+EPE8kb/A+aGKIM2axqXEqqis0zuMtdMTGpl7GRS7LINJM+3ANg3s6q1ha1V449uak1Hxv0LnwIyP6od9mVJ5XMcQDpg6azGO3cJQFz0DtTVHmQOeLmqN7bcodsn1Sly11Dah32ODI9doRG/YkI3P4/+h7Do2F4p2k1boDzR0PcakoSsCqxALFL5Cbh3zzjffGHeeWAhuIPFKsd5f6tdJbeQGw0Hcf+VCFJO4+/7AHNXcS9OpLLuLOqBGZkNRJJonSIC9OtR5IqfP7z5VXMWw91RUa9y+oH1LteexzVi2ctZioEhNpiWvXqmfNMbitVvGunP/fTRZ0Vwb/0MM2YnMnJruo7fulsjvq8WZWDLAK1U5oCrKq7Pq5WisEXqWQSEzln5dkC6aYjbmvcSyFuLGpuQr142atXd1FpvFvv7hcFyzy96Wv7L/iY3kNdkT49Ye5u4XjGDLvy2gn/+UzcKYP2/HVN+xTxBx1vc1eRvFC/ReyQGO8c4b2/+KyNIKmCrGQFe55JlVFFCt0VWXyFsa6pP51X3/KitofWMDwYxgdNyUuen3kmSYI/CeO9ZXxMuLM+7/wrfYw7L3n2aEdyJfQfm4tPuA/rfLR9ILZK3Wh/VEWk+9OW58MDT3XGPfaEp5b6J8vwB9PKNjX3RTU6eNyrJTUZMwjSLl5mTQesn8EfM5sfA+4UL15yA2aMTHc1EhKxtYTGMRV/a7asqpHdOzCZ2CYmA+6kj2k+C7HWdKlYq0fZny+eRjdkqlelrtj9QNzVpFrVagA7J6YbCDdJk9u6iH1xxLbMKZuJD7P6cBNIb0HymlAYGvVcNy8JOyTmrdXOdyOaLFjGmz1rEpOO2ax9Bu913jXPgeQNh994YpcV8WvzSjiQSFGNhfpFcL0m59nSub+k1JEzsXO6XntD6Ip33qrP2VSO7HSM9R+Uo7xwuDWJUTC9qpfJF1b37YWfWr2qAn//jzLPfwvss2Uuv89tZH4ATFbKxDZCE7GfS1JZ2YW6+RMd02GTkbOylZfPGFodE/4YVLVNefUxgzKjl3OXNjXmeVBaTlkrhKJWpkSuK8x5uFBVAMTp7t9x0NTLyhO/uiN2JcHwnSF5VZOHj7pjIBH8cZHSSyJlp3MqWTh/1HRMW+ZVFt0ls4Mj54w5J/3uPC/fi0kpNr7THaJOGB6F8UGf338QskvETcK/qDpavQnzTSEQ3CgvPTbC2x9UbH7U5Dy3+FtHJcosCW8yzcoIt1bZ50B8f0euLcevLW9/I5JtprofuNlcLIoxCXO0iGRCsPRvDakwj8NDxm1nDOBcJCXB2szYl50bl3A+4gFrEyEYjMkYo5/hfKhBoG+r9TveDjouX8uYtaPyvbNx+D7jzol567BFqTYHpV/Y0wS/5S+sqwf3l1v/Ddpw9ieoJ/d/+Zd8PNe61rWuda1rXeta/0LqquD+girn/PeBv19+/lPg3/rnej5gjgNG5JIi9nogDyPS1KoMzrP6Gxd26uEMlSeVdKule57KX7r2N23x/dSq3izdxbHIIKNy+ai88korh8yRuNXh6U6jeoCdJvXkxiPZXVJMFiGj9phB88WTt5jCXg03Dbaflc0aE+O9EOtM7DLuXDyP5aXmXWZ60FQzfCJL8Va92rVT1h8McRayu6TbZFM4s8Dr6wb7Q40/aDoPgJ2E0FiGb2fkZDCj4GBVgrIAJ4c/CqkSbK93sd2niD8uiUcJM6mPMLSW5vuz3qIuyqG1qg4UNXBJ1FkZtSKEXY09Kf9UpvAzhqxMsxr+5oBMmbTr1Dv9Zd63d+pZK15brKafybEorG2NZK/XYNMiPz4hHx4uPu2SqGSGSTmdzpKb6uKlyllpGo83pRsZ/cyFYZu9XTuS9TOLfoaFs5szvL4hm05JHFb0PQqtQQdKIoeINIJ885G4aTCHM1/K6blxxM4xPKpS/vatw4TiV8yQnCF0QnKW0ArVPtO86nUab4VpZ6kOkdNXHr7ybL6fmO4uXFPXJyUbGIGYsU+v5Nst6bEp1zoz3sP47YRvZ4xNNFVQLitgTKbvK+p6xgZLKP9eV/oZp8lx3FdUT5bQCdNRcL3QPC9KU0N26mXLJbWs++mSMtY8aVf0+dvA3HskCDd/mgkbT9hdKAPxzat33Gv6kRsuJ7HaqxoosWTOv0yahpcu7GgzBJrvZ86/2THeGvU4ljkVukz9JOtr68mDtAu4U/FcW5hvVFHNAs2zzkVfkp+SFeadw5+icq4PI2ay65oxt40q41GQ3UwerfKGm6IAvxnitzNp1DlrZmV2Lr54PV7ovh8wU8S/WUxQaoNZ/IK1hZAwv/ukDGhjSDftyhfv31dMW4M/g/8nhukWhq/DSmj5cm2JNdz+adTUvDVtLeHeBmJX0f7jT8y/usceR8xY1NOUsS9nwuOGZGVNlPJH+WJNE6Zbnf79NwEzKDc5dfom7tUy3SfaHw3P/7qOGS6N85iDMojjVr3kVAmz99TPxW/dqBrff4D5Rrv+3Ukuux6OVfkeHiv8MWL7iPnT7y/++rbGTDMyB+w4kxvtvVjOg0Sl6KjfeCJ1DTJO69qSvdPvKGO0v2DTInPi8K2uHeM9ZKu81/Z7x7zJhMfAXPiv4TEo23c0zBtLtRfmrfJjfSGsaKJfIuwqYm1xfYU9zdCWa3EeSfdb5psKd5rZ/zXD/DfOtG3h5Iru6hnJhK8MKYnO+TK/4/ctsdbrdvqVMO88/pCxk54jf87ULzPVp5N+D1hTdlcrTFGysze4H/eEbou5mxBRdXUq61uIprTZWDbtiKsScpuxtqxvo0ckk5JgTCZnISWh2+q67G1kjpaclX8MEGaLL2tTs5mYJ0e4mTX502VCFOZ7u+5a2LOhfjble0PIYmg/zZcEu13DfFOpx/kvqauCe61rXeta17rWta51rV9UXRXca10q59UjSfH0xI+P2P3p4mXq7mCYoC9+nboiO1UoSFn9l2MgNzXmuXhgbzaFX2sgRlLnCduK6rPKmxKTdrQCctSu/vSbD5j54r9CRNVDY4ibGttrFvdS7vOw3q1lK7inI2lXOJCiJAXJmlZWvbWETr204/1FGcoG0lY7e/MuYFwizYvEANmDOwjDh0SuE7a/JEs1T6rqHI8N/p/WuLP+/9CV4zuVTtgg+P3lPRfv5LzJ+KOsfrvtd2ntCF8+2PjgaD5nzBipRiUJSEjqkwVMiOqZxihvU4S48aogAKl2mClipqCKqzEXFaT83p6mlUWrL0phHZcKUf2zzhRFZFKfV/HG5sarV7F4fGW3gX7UMQLk9ovXqjz5UDy4ZTzlx7tCzJjUR9s4zOtJs+NRTy4pYY5nHRPDSA4RbhQUKedBx1lKpMbpa4WE7I/ku+KJnGZNQXs7wLsHPR8l3Qeg/3qDHVSlnDaqqMcGxk5/X73B3P38+k53wvignzHW6ilrPwmnb4TqDeo3S/edfsbUFCV4Stgp6Dn2DqYZV963f++wE8jRkp4docqaPLUMxzpBEMYbwZhEVQXCbL9QeBOxTpjgNOEvKmFgulm8p7KmNNkhq+9x1iRBgPYpYYIhtpbpTrC9+jaTS1Sf9finqiTxmUxyhupV1D9eVFzJ6Hiddf70XzXYITGU89T9qHPYFU5zNoIJrN3cqcqFB5uJ26ipYbcT4a0idIuHOdN8Nsy7jB0KS9YJVdn1mDtlkyYrpK4iebsqyADHbyrGd6ruShZV6BL4l8L0fB/wNpEL5UTTnNSvCnDzTxKStcu7+64HI4StJ3QWiTqH/H7CDUGVyH6EEJi/uV2JGdPGMG+F4V2GrJ5LczarP9YOmvhkBx1bh28c3edE6Mq1DkLY7Kj2gfT5GbtrtYP/7bLTkbaFt30KxNbSfFbVdLrR3/cf9fzFOuOfLXZSPqqE8j3QZrid6bOn2hvcC0x3GSnqZuoiprfIaHRMverBL9dJqTD6mtkoVzX39ovEObA9mEkT75IVXEwwjuSH2/VzYMxKeSEm9eUuOzM5k7d6ztNti/vpDb7c3QlRd2+6Bjmckf0Rebzh+G1Zj//mgboKeBtJydBI5r7riX9Q/N5u5qfjlmHyDNuKoanwb8J0dxmz7ScIjaP9bGg/B53jn99I2+K/b2riFwl3kiAePafp8l2WzxaJQnYZ2QR2Nz1tpWv4i8kMdUv1VBTdWb2rzZM+t3pT9Vb6URmx3qmCXZjnoDti4atbqn3m9LnW+flmePuNfo/YV+Vh520gfN+RnSab9Xfl+zgI/qD++vE+UT8bbA/Hv7ZcB1VgU63jJ1slLJzLzs/uHzvSDdBk7VvZaCqo31vdOUWJMNNNZvOdEkP8KWlCXFHjDeC9Xf9u+IvqquBe61rXuta1rnWta13rF1VXBfdaP6+cIabVd2mPvWZYl872XLy1eVcUsy+UL6y5vIY1pHflzjslBKcJM95hcsbP6ZJU1lXI2GDGGXk7IdZgjuOq9mVXmH5ZM7Xd01EVimPxAnq3HjfO6l29KJkBtGM/bWrl6BpD90mZfy9bVX6gdClbsEeDCWDaonpSJNrSNbuot6BqyuLFM8W+mo5eO5Kj+qSWZClJRfURVT6qV6F+EuaioMw3GX9Srm5yejzVPq6kBID6JawcQHuaNKHr8xvxvZ5nmYLyHaew+pj9S1g78yVnUuOIuxppPabXzm5zKD4m6co5tnptigd2eX72ltiqOiQhKfliEnJdqWIA5K4ulICoqWO3GyU2FJXFvOq1y5tW3+vxTq9vviRqYQQ7R5gD9kUV5aVT2jy9Ed/fYfcnfc2mRqaZ2JVUqLcT+eFWu+QT2JczPO9VhV7u9o360sRt9N9m9SxPt3qt543h9JWOzf6rxUOYV7WerIlg/qi+wukuE24j+JKyU0fSYOk/OqpXPY7zO0usmnIdoPk8EzaOalJCyOpXdxcObrXPhPYLZcfmtZs6W8EdLN3/7Tj+QUJOQmoy1WvpdG4zsku4E9SvqpqZGW7+ybiOBZkuOyS2nxk+blZ/5vCgKUrupP63VMPpm0R+mJFj8RP2FnsyhPdzmROmzJOLHzFZVYNtn/DnQGgszateS9frf2NX4c4RszOY6ZIOaD9pV7tkIYuqheHosSezKov1s+AP6iXefK9UAH9KTJuiur1Eqr1SLMLGl3lrvqBVQPU40NQzbz/skNEwP4aSzARyLqq4ZFINac6rCrlcyyzK8Dx/0+Lfos7x4sEGiK3DDI78Bx+wb6rif7k7lRzMW11fTID+q/Qz6SlVRQnzKE/bCaEWXBFok4rKmDkhv/5IqhzETNwVX+ZLT+gqMJCsYe403W28EWRTzsNG38NMwvSQCNuEPxi63+n4O/2xkiTyXWBODgmqMC/Kpe2VCR0eAzHqc9zpssNlwrKmCanK5KhEnKmogmZWRXC6sZrQ1RrsdyN883ElXsgcSF1JqvMtsuxiuUUG1rV+/e+i3i5JZsW3K/2oJKDi61/6IFISchasyVRuxpvEa9+Qkr7+j9OO4anF7a0uISflTqv3e6GXgDsrM9sNuluWK0/cqYqcyq6bPSub3Z1h+ydeucfod8qy2+KPwvDOcvqx5tCW8zQKBpAguAHqp0yzT9hxeb7RNXSjNIfYemw/Yw7DSi2SY0+6vaP7lJh+a5Wtu81lIAFZiUB58koWMZl5mzGlX0UKC97MSh0iQdiyzkmJQryNIBn32WN7nddz8fiOD5nmk/rZh3fQfSeEja43y3edJF23koNmn7RfwBmGP3ws463syMYvdnn/nLoquNe61rWuda1rXeta1/pF1VXBvdbP66cn7UAvldtaVdq+J4cAXUPuGr1LRP08ch61Q93Z4uvUBCuZVLVLXUWuwMREtlYVm7eBcKOKlnsb1rvsfLNR2sHTK6CZ9OFhgxtmpQSEwhUcxtUbKqceRJO5zGmEBPG2XbuYF0+mxIicEq5vmTaWaq9pLPoiIEsjv4P5uUGyrApGtupDc2dD8kJssqaYlRvI6UbvvO3JYAf1Z1YHVuanCXrn6/YWM+l7hI2+LihjMgv0X2XcUTu1553FH+J6G5qcIDEx3zWY86ykiE27Uimmxw53nLCvZ8KuECuMYIYvkn5EyM5gUlZ/6rEnLZn1IanHdpwwMRLf7VSBMIVN+HJC5vLYpN3hMqtCsXBoU+PU55gzYavXWU79xQdnjP5cfLDxbot9OaxEDDPpuMneIskoz3fTYI5FZa68HpM1yIwqMnNQxaaMH+agXcuuXPcvPcQsPuFUjqVChpl4266eSDtl7CxMNxcKwLzLl652B6HJDB8yqVa1zfRmxW2yd/jx0qGOqJKv6UTqRZScia3jvNlQv85k2SIxM28LYzbCXKm3bvv7yPFjIYgUkcW8avf43KkvHMDvzfp7fxAkG9wJzATVKamPvCiX5hyxZ90FSLVlbhqmG7tmwVdvmVgL1VtmfFSvK0COypvV8aQ+QUajyVV+yZEvBIKTdsZLzJiYIWaq/XTJpO8D41cdyWlnf7JCdcwX/nWEeSuQLdNt4ffuHdl+6UvXLmt/1MfHTnDnRP2ia48dgqql2WpamxNcHxnvL+zlufdMx0p3I6qkSpZc+K757KBJ2IMlA/YsmNLt/fLXheYp43opCWp6bSWxKo+xNshdje0D4a7FTFGVtuLFXnjFJqon30y60xMb/cyhy9hZ1dvlvMQGwheecH9OHL9taBtLbAz+qGq5ngP9rG4/8vbXb3j7I1XLUsU6RiUKYaM+3NRFpNd0vOGxPCAJMprCABbC6nm+jPHhVzMU76h/K2p+GTduMKsfN7vCB88C/x97b/IzS5amef3OYKMPn3/THSIiMyIqK7ursquoQkAjIdESEjuWSPSaP4klvWTHmgWbXkILqWl1Q5FkDZmVU8Sdv8Enm87E4j1ufhOqa0+Wv1Lo3vjcP3M3s2PHrj3neX/PCY/tRcmOhWJcG+pHz3S3wAx+9hLHZSWK5BhkHGuNW5UUT1kV73vUdpJ7SGTuIzkptamRlRqiMNpVNxIWFe17+V7TLxeMQG8hrMI5hTKvSBTPhjJ7xEOTWP464VvF4v2ZKlLsA9olyocOgnBoheKi5/EYK4M5yM2mOMo2ymf5rOkKqsdMB1kK31n6Ms73oeKQx4tPLN959BixXV4hcxH9dCBeL9H7QQgt26PMk6d77LKZr8FyL551MyiKnYwTv4T6k+zPtFL4VtR1dYLQZPVWByAKG3haQ/NGvqNbpUwCMud+E85M+GkjKX22F2KF7RJWIDxyvSPbLA55Lu4j2iW0C/P8FRor512ft/931UXBvdSlLnWpS13qUpe61O9VXRTcS/1uBVFI53Sp7H9MyxZCELVUa8z0mTp6Sk3Z9tBUxCtJnzLHnHS0KDCdhxDQkwOrhWeaPbJhVZGMnhVX8+5hTkEDUZ1SWxFrK4lHtcU+Iwod4F9fC+9VZT/v4PP2cmd+YaEpUbsj8X5D/XFgWrZUDwqbeZfjjXQwxyoR64TpNWHj6X6YlaOjwCiVScJ69KLi1h9z/rYVxSUsItFqUdCC5G2D+Ii6V4bqSeGWCd+IAuKzSqMSVM8KlRTFDuyYqJ49xMR4nfmpUTxP0SrCuhJ/rtXE9qxGxdKg1k32RUsaUKxzwowL2OcOv2nRXc5ir0owJ6qDGI2TbVC7I2Y/yhNzVohTXaIPA6mpiKUQCojxdzi0yZz9jSokwqJCuYDfyKqA3Wnh4H7mn4pXi1mBJSX48ED48VewrDCHEX9VUebVgGQ1qsv+7LKAcSJuVjNJIlUluh+Jixrz4Zl4tSCuG/Fl589TLqB8IG4aYmnwty3jteXwZaYDrMid8ZF0M2FsxKiEd5kTuS1E1VGATpSfbO4WzsqkU9SfRI1p3id0gPIQiTZ3Wr+TcVk+O7QL6MNEWFdMm5LuPn9Gq/At1B8T3Z3Grc5qPyBqSspEh5RTvEbptJffFxW3OCbcQtF+iiSlmNYylkqE5hBKjXZJViVK6G+yB7gS1mn/QtLDUpNBs0486gDh9YQyieK3tSjtZRKm7gnE4hVJK0nOcwbjLLZPcyKcBXyrKQ6B/rbg+IW8f7rJ7NWjdN1rB/HFSPIavbWijma1WA8Kt5Lkv1DJqsh0ZWaSw3RlsV2gfifElrAohSNdZa/xrUI9FRinJGmwCbATWguAXnjiaOS6v0qo0WDfn7u9i14xXitUTAy3muKQJNlqZFZQm08et7SEyhALRXHwTBtLKPN3uBP1dPFbhVtBMcgx9NLmgD0qzCjd6MVBcfwqSaJZZmz7GkIlqVfxVVbhajmuAMN9Q7F3TLcN/a1iuI8sfqNnNi0gbOME/grwCq4cbqHAn05mPg+lfB+3SsQ2Ees8N9gEZQSv0ZNmuguYo57HwvDKi0czSoe8WyWK/Xk8T7cB32ghd1QwPJYUx0TSJXYQmbd6DhRHL6rtfkJNnvKDE84wzD521Y/EdY0uC9TuQPjqPl8zeYnFGmF4lwUqRKqt/Hz9C810pRjuE/aDwKHdOgpdA1h8p5jWmbO8l/mJYa/BAAAgAElEQVS1eYjYPs4reckoUkjo7ZG4bklthZo8xYOcrFgVKKUITYH2EV9nFTzvQnGA4V6uIzMq6k+JUCtufyoq9uMfV/hG1NPl20j1cZB+jNyDoLedpJZtO+KqFu75037ukQDwm4bdDyt8C75R+ObcTwDiI+7vwfYw3p7vdeX2NK/LamYMCt/ItWezFxnAX+XEzkExvBTaUNJnNV85xfGriGs1xRGmK/H83v3FyO6Hsh+3/9eB4UVN+TQRaqH/KB/RY+7FyPSZtDyvNv9ddVFwL3WpS13qUpe61KUu9XtVFwX3UufKaWJ8eoLX9/OPw9VCErKMwrx5kM71rIal3QFlLamtRUmbnChkWjHd5aerBMZH4mZBqC0YUWqKx+yrdKL8YTX6MEBTowY3J2yZY+72H/yZfWvO3hs9iQdT5xSsVFjscyfMVIQuEGuLVgrVT6RFia9V5tDKNo5f5/1pA6oz8Gpk0UwzVzRcK+J3LemHAyoolIb4WIqfCCifFKFBOJoqMW3ElzduZPuxFL+vf5E/xyR8K2oRQPPOoEdR5oxL+EoxfVlIStZBPmO8UpipoTj4zCuN4qfNHlv7cU+4yXSLEAmNpfxwnJVud9NSvt1hugl312IPdvZJA+Aj7qbFPg+oE/dYfSYbOk+yBnfdYA8TjEG8s0YT6zyVpIQ9OkhJPJ6VJZUWk1mnc4qZD6RC/p5KO5M4lAuo5QL7eCS2FbEpKJ6HswJhjXh36wL7/lkIDimJ/xvEm60U5uOzrDgMTpQb99l+KkUq7HzsDv94yeOfgL/NHMjWQ1K07YjVkdFZ+m2NOsg+1h800yaJb7CQFC0Vmb2pKPGg1Z/SuePYJ/R4IiBoVEziT+4n3Ms1KftQTx40kLHgsg84ltB/4VE5D36qAtUHgx3ArU782LPqN13J97JHSdzr7izlMVLuMt/3yqJiQk+JfmOZ1opppdj/OHtXD4ZkEqmQ/0wTCINBFZHpPquXOZnIbWSbJ49myL5K3ypUJd/PjLB4m1Ax4RfnRDgVE+OV4fCVwq0TyaRZHR2/9DBqKBJVpppMvSEt/XzdRAtxlSi3huFefL+hUITi9BmJxXcydvSHJ9KXd9B7hj+RL9n94YRpPMFr6nbCO4PfJBjl99ernr4sCN7QbEYOu4b4pJleZcrKQyEs1CtR4qKR461iYryRc3n1C0f3qhJffatwi5JQKD7+x9nfagK61/gWputEqBObnykOf5BXNTR4r9C9ZnztKFYjfdlSPZyoNcJptp0QV8wkqYfFTr5j/7qm3Iq/e/uPEuZVT3y/wC1ltQpArxxKJ9bLnufnBWnSqNHAQs5xsxrodIMqIkMhaq39WMwe3vJmYDqUrO4PTFeW2ka6Q0XK47Vaj4z7CnSSbU8av4CwlH3cvNqz/e6KaSW0kqTF+5nM2Q/qa0v7QY4vqqR6N+Ukw3ycyoJkDKkWdTS1Faob5tfV4AjXLebQy/y4qlEhzuSQUOXeiWehI/SvEnER5nSt4VZj8y0rlOAXQlOpntLMr3YLS7E7paQF/HXFZzMosS1ICh5/0uRVokT1KMo9wLSOxGWQ61xp+hdybb/9z6p5Tij3zEryeFdTNBazyxx554nrFtVP6G6i8JHUVLjbBfY5L+8oxeEHiv4PJuynglBHhhfndD49iWIbKkVYnxATirE4M+FjI570WEfCCvhgZs94shG1Cky1YXl/5PDYwqhndrTyEBYyjtKjmX3c7/5plRMUYf/tAtcqklKYIRBLIzz3E9ToxH8v/v5/wl4U3Etd6lKXutSlLnWpS/1e1UXBvdS5UkKVJSmmOd1K9ZN0Bbsg3cVWlL1T17sqCukSzV6j1Fbo5wPh9TUxP/EVe4cKgViV6OnEvixmn1syGrMfpOs/RHkCPZEZAL+psYdJVNqcZa8mT8rdseb9M2nZoj4bzn7Tok9P7l2QbVsjSUONQUUY7hLjq8+UPUDtLSqBsYHCBMbcgdy0I/tlpK0c3acW3RtRI/IjolsJMxYFaeXxWJIyMwfXrZR03F85Umdp749YE9k5IUWEWuPW4iNMz8L9DJX6HWVQBVFmVEyYwTPc1VRPafYyp7Yi1FYUTaB87InLEnJyU/HUo4aRuKzo7wv0TcHqr55J2RuqJofpjXT++oAaHSoM8++n9QJ8oHjshHQxOVJTihc3K7DFuy3u5RVKZYVwcOjtkXC3zvuQUAehKijnSdsd6mZz9nzDnKYXayspd4czhUEde1HoP2zzQVGox+3MZUYp8eIpJSsNzhPLAhY1+sOTvKepSfsjOqu4oRRFXR1l/ISsOh22BcppMAl7lK7wU1WPopqVWzlnKkqaHUiXcCiFerH5RaC/1rhGk/JXjKXCHgMqFCirGe5L8VzWavbZ+lYUmukqkqqE3Wr0oIlt9qC1nlAbtj+WBKjpOjBt1Jx+lYpI9dGKctxLN7LtP2OvGjAnS+IQGTdG+JR3IlENqhHfZetJTlNWjr6zUETaW1n2cJPFPYsqR5Gg9ZJRv5dzObyOKKcwnUY7hVsoVNAzqcG9LKgfA+PK0H87oQYjXs5K9nF93RGi5vjcMPUFZeNg5agXE0NW04uDZlg7Dj+SlZP0wc5+ZDkQ4BcF4aaCVwvKp5HptmbIC1SvvnjiMFQcdzXGRO5WR45Twf4gCu+L5YHv/BXXq46UFFNtGV8aylX2fC8c/VNF/d5iuxMPFMJGUexkP7d/0GDHxLhWuTteMdwm2h+ICbU/luhDTWjArQPYxPbHZmbxYhLFQ4FbR7ARYxL+1jGm7LdtlXhyexmHbgmhNIxXsg/GwXhTMVwbFt8+Y1Ri/6IhvRopq/P8F4KitIF2OXLV9rx5c8PmWryjV83AsXI871pWX25JSXH8eE3xUsbL1bLnU19QmEAqFHXhGWyBziq/MRFlI8urnsOugSoSgsI28vrd8oj+YeQpXAsNxCTGG7m+xs2ZXgIGMyVCqfHtFeWzw5x452NPakpCW8x0n7Ro5lWqVFjhEFclaXIko3Gbiu7+rEy6hVArxJ/6GRsWoUTUbwt8IxzzpMVbb6aI7uUzipCkv+HY419cYTqHGh0+r6zpweFXFcOt+JDdreAj3FW+GDWYnUFFhV9E9CSeZJ85uaZX2E5WC7p7zfrXETXF8wraZomKkVQX8z0cpYQ8kd8zXRX0X3ruXuz4xDoTM+J8n0mdJl55Um/QR0NsoqxKXWWJN0riXygjOKFhDN+M8/xZXI24p5rqtmdRTRxoMZ2WOZScigekRWCwCZP5urGAIr+nu1MUXWLcaFa/dvjWEJblvHKsJi+9Fcezt/jvqouCe6lLXepSl7rUpS51qd+ruii4l/rdihE2q5nhmJaZpzpOqBPDNCeVAWA0qa1JWnyFqhvFj1sZ7PGcEY7WqDGIH9RHyo/HmaWbrKShJKVIVy2hMhirOSekqaz4lqiQsB934r3JXyF8cYvuJlQ/ijKYEvbTfk5Co7Dgo/guS4tfGGKRu7TX8gS4Wgw8PayorgeGh4apK6Rr/oM81u43ol5120ZSfGrpnD35h8qdcE9DawgLhR515qXmXZiENWmLgL5xfLHZsR1qVJ/TYaKifJaOWciMSwV2nyi6zJJUolxMK0P1pGf/2SkRzi8KklboPoCWrv6k1cwKjLpA9SX96wXTSmOmRPfNmvqdKHIqp3/NXFvn4dihmpzv3hSgSsy7J1g2pFbGQiwM9lm2Ea4XWfEX7nCsC4iLs8qslDAat0eICbVakj4+kL56nc+Vgcnh7pfi803pd9Td1NbiBS5E3T3xbOcktaqAYSSFiKorUt+j6hKSnjtu42YhFJCnHdxcUe0iq1+amVl8/MoQrbA5p1vpBtf+MxasURTHMy0AJV3IJw9bqISAUBzBtYp6G8UP24U8FiLl91viVUusLNWjY7gt8JWie5UTiVbyubGOqIXHIwleMStKsbOkNoo/MCIc2iYK7QNgNEw3ATMY2vdCSZhWZlZx7SB+2FgoFr/a091tmO4960LO01gH0qSJk0HpRP/UUG8GptGyauRYv39swSSwefUiQUoKctIfLpMIJjk2binKk3aZdTnK5x++UmzuD+x2DdbGOTmqHwqqrDDaymNMFJKCTrCUnw8moRtPzN3+9YMi6cRwc0rvU4zXtaTauUS1zqs3L+X3v10/8hfDa0nWyvXlesexEb/iohj58d0nfD7ZpQn07cihl3nhq7sntuuaT/GGEajfG5QRTu2JknD8gahUy++SpB1amF47rms5jsePLRrof+BQtayU6a/Guc3A9QXTSydt6qNhVCU/+eYN4w/k9v23b+5wqwrfaNp3iXGjsB3zXGJGiVsbrxSreuTj04qwDlid+OGNrGr85vGaH9w9M3rLzaLj1WLH4abiKh8HpRJN4Yirjhg1hQ2s/skDRV4GCFHzox984L4+8LZbc98cWJQTRsvrlfE8Ni2buueDjhid+PRxNR/zw1Ty5/dv+KvC8/7fvUQlUfrSXhHzHGp7IYn4WtHfKapnhVto2vd59Sjzs2NlcOsSuyjEt5mvaxUjuChzY1NijhPdV+286hFLpOt/UCx/KSsxdj3hh7ya2RmGF17m8bcG2wtJ5fi6pOjkPbaLoBqKGIXnHCOptJ+tXJaE2lDuYPjJgAamu8989wtPKEU5TYvAlNXSU2knXu1yL/OTWxmqpzQTIlJlMB+OMDnCq2vcuqJ6u0Mdetyrq7yfivJmICbhaScr1KB0Wv2xCbyiuO+ZDqUwkIvz6k9KoA6WtMiMdh0xZSQamQCb2uHqAu8N/VSgTCK9HBmPMo+rUYONMGlJKdTCFrbdmcSQKogOSIr+RYntI9OmoHw+TbKR0BTE4nOH8/+3LgrupS51qUtd6lKXutSlfq/qouBe6lwKUe6skU53kG73Re42bSvxxoZIzOqo0lr8mKMTVVcpwqJEj2H2gqoxEBaldMbX5aw6zk+1ucs/toV0lx+mvI3srfJpTl4xvTzB+et27uDVPs6d+GqYSHVmpJ68nKUWHmdVEhYlvsm8w7uEymrR/eKID4YXqwPfq0SMihgVbnHy8eYkmYUn2SjpVIM5BR6hJ+Ge2oNsr/kgiu7pdRWEE1rayI9ffATgl8938/tNLx3z402i2EoyU2hE9aqfZSOu1agkXFUzeOlIX1WzBw2ke9t0E+o4EDcLYmkoTt2zPhJXNSp7aqNRKJ8Iy6x0a43O6VZMnrBZUXTDTKPQ+4FUWMLdFXrfiYJ6UlJPTNvSkowS9VVzTpM7ldUzz1g9bknrjZAP3IlzW6KOPdqv0M8H+Wylzkl3hUEPUSgSKUnKXoxzqp18hgU/5JWGnK5nFOpzn29ZkPZCc6i2AZTi+CqrdFvwDYQ2YbeG0Ea0U3PHeRxEaddeVFw9wnid0KekMwPFXlT74VqzfBvEp+eyF/pjN6f96Zj3BVGQ4ktR9Yoi4EyJPhjMjSfoJFzVMSv+gxFCw9KTvEVPighzIlIsREWMlawG2GNCa5hW8vu2T9SPnv6+ZHi9xC8U9c0wK25p0sJDVaBrLx69oEjhM3pJ44mdxdSioCqVZiUVEM/wIuCuwUyGYp+JD3lIFF2kvzH0XwVeLTp2uwZ3KLGL/IYs51zf7UlJMTpL0TqMjlQLmZ+GrqFqHNNYEIMo4OVWuLIgKyt6hNgCneL4Sjyc2OzvT6KKv3r9xD//4b/hT+vfcm+ObDLst1VyXI8xsU+W3/oN/7b7hn/57o8AuK2PNNZx/cc9UzSEH2t6VzB6w3Ev0qPSQn9wV4UkU0UoFo6PD6Jg2q0lLCN3X2x5udzzB8tPNMbxOIlv82lq+MXjHVfNwOAtu67GBcMfX70DoHcF3w+3hFqx/UOZA9wVc4qU7RXJiPK37RrcsWB5f+S//fH/xp81vwZg8e3ElR5Z6Egepjx/WfIQ5Dt0qeIYK55Dy9a31NrxVfnArTnIcdAdrfYUeVJ2KFw6j4WQ/99lk/n3/pp/+9XX/PXhhYzXpPi2/cTDuKD5T75j9Jb9UHF4URPymEqdRQ8aPSiqZ1kdc63GTHJdh8pASvjWyGqi19AU83znF5b618/n/gwf6W/0TDA48Z0T0N8pUh3R5qxc6l4TFgHTa6a1cJ5PKv05ES7l+40mWo19GvH3a0nrBA4/bNE+SXpXGfCTodieucppmUg5DU6XIad3qpk3bEa5p/g2E4BSwi0LioNcM6Gx6GWN8iX6OFIgq1oKZvKQdokQNE+PS8ykMAfFdCWMZRAPt91apiTqsaoDyWlUTnQzz5bYiMorv6CJvSHl/oDDoUbbSFEEdp8WspLTJvQyk0eMpVg43LYiVQFzKCQ9tD0r1faoKIDFO8d4bSnfTgz31dxXE1c12oWLgnupS13qUpe61KUudal/WHVRcC/1WSniZildpyf/qxaCQmoq9K6DcZIEmPeP8vqiRcWI6gZSPxB/+FJUxOMwe0NjaYTV6jxaa1JOcineSyd8skswCj0F9K4TlmOMkjYDqKZEvfkE99fi19Va8smz8qvefiK9viOVFrU7omLEfXlzTlJrCsq/fU+aHLp6QdLgVhCuHeSO7+/qDc4ZtkVNCJoYNHUzEZfnJ0TzyxoXLSpKLnfz/pzUEyrpnvZXCeUUfgH1h7NPMxYQm4DWkZgUv3y4IewL1KnRNYpP0fSZnFBnJu6QZpWg/eAxY8A3Jj/N5vS1MqvAYxC/mZJkJb0fYFl+lvJlxYs4RpqHQPUoHcgzRzJG8AHzIKqt7hzxZjUnodm9EAxiY0G3mKcjdANpfUfK70lGYXcDYVVhjpmEEYVJCcK5TW0h42W1ICmFMka+G6COw8yQTPuDJJ75MKvxyjmhI5B5siFCU88pRvF6RVJg3zySxglVV8TCoI4DKifbmZ0GnRVd5yl2E/bgcG07n0vtFOko6V5h0uK7yyp+qCWtqjgK3zZpBdmDDdJJr52cU9slolFEqxhfnZSmFaHWtN93xNLw/OOa6UoShU6817EvUL3GjIrwrhUF5yxwgE3ogyYZQ/1BE0vx56nTe04BVGNmxRYwVmf1lATT2uJaBYgnfbPseHhe5uOsSa2nqD1+tCw3HeNYiCduOpFFJgadZrhEnAyqM6hNXv25DSwWE/2xZBor/EJhOjWzRA9fGewxYa5HRm+FvtCE2WeslKIpRbENUdOPBcZG9ruGxSqvSpgkynFQcux+5Ogea/EFA7612E5R7hTTSlR1t1IzT/j/fPMF46Hi65snbsyBW91REE+2TQYSLs1BVWx0x4tiN5+Gp6HFJ8111dH5khA1UzCkZFGnc6AjIVn8MpC0ltS1982cxlY+KaakuV8c+PPNd7wsdlTa8W1OW/vkV3zdPtIYx6+6W/5ies3P397zlBO+nDdUbwr8IhIrIX5Ew8zoBuG6Vs+Jw1+vKQPUrzx3dsdGy8lolWehoyjWSoZPqyZuchTjSZF9jhVDKjAkauVo84BaKE+rwCjFlBJGsu3yn1JBKULyODQbc+QP6/cccut+Hwo+TCuWxYhPGk3iqAu0iYTs5aaMRECPhlDJipc9ChEEMhs1icc9aUUs9cxaBrBH6byfy0fMCO37rDq3iuatlgS/L4V/Ox1L4f0i7FY9aTmPyqCCktTKYyLlk10cAqGWpM5kNeFuhXYBdZAehfHqRu4FRjFtK3RnICdUAgy2wg6yOuRDRXGQ+0Eczvch24mvWgVZiVGZFAIQKk0RZYXT7IfMgC+gNMS8UuQWhvCsaL63ed6QFaeTF9n2kiBYv7MMLz3pIP0uaSaTKPBgBkNoogyWBHp3+uekRXkYFgXKySpQ6i20IpHrvcFPNXrM/molPHGirJzJRyTKHRxfFSQDw32FGc6rXX5RUL4/oqqLgnupS13qUpe61KUudal/QHVRcC/1OxVLQ7iuKZ5y96zzqIdn8TWeyAk+CP8WxAfZDUJS2Cwxn3aSFGXNrOwopUiVQTlPWIlKZj/tia141PS2EyZqZp36+zWmm2bfpd4eSW5CdQPxxFN1YfZtKmtxq4ri4Sj+W62F3ZpOySqK8PoGNXhCWxKtdGyqo6V6lH06mgb7UPBxldUfBZOJxKweJK/QWlJezCg529Eys1G1F3+U+aDmJ9r6KdLnYzZthBUaguYv37yUX4unTHNo3wlTt3oSf5V24p00E+IRBcwQsNuRpGqSNpghyhP7KcUmJEJrxWe8qii/f0I78U7LlwSUwu4nTK8xh5FUGFSXCQQnj6rz6H2W2cYJruWYx0VDuKrxjcFoBSzQhxE9ecJCPsOMWW3VSogV6xq7G2Ymo35+xhhFXLakpkA/H0lNNavEOvtl9eBJX74Q9dVH9KfMvS0KSEloCUEUZz4bO0mB2faiYhtNWrXi/z70JP+ZV7nOxyQEQmOx+2n2S5sBmofA8aXBDDB9DsAF7EFRPyTMJIrDdAXrX0XxdgLuoBluFLEQpcUtFKDzn+JBMy7x9JMlm7/pSSo3yN9GYpc7jR9KioOMMT0olDOSKpY7qlOZ/ywSbind+dWTIpwuy/w+HWQ1wDei3lbPmdJQKqalxvaJaSHq8b6vMTb7vasAvcFNwgF2zmJMxK4GfCYOnGgKSiVJrfKKVEbIr5ftREqwWA0cFYSPp3F4Sq0TJStMhsdDy+LVkT+6f8+rWviwt+UBTaLWDq0itfJ0sUSryNbLPPL+izU/391xdfeJ3hdEFN2q4DDIZ3VNha0chw8L6UZPCnPUFFkxm0ILK89fv33Bfz/+M9rC0bmCmM7nfHSWlBRaR0obqEzgzYN0pLunSuYLr7A7I77zSXzYZZc9sEcRr7ST9Ltxo2QeySJ0uU+oqPir717y3fZKKAUmYLP/c/SGRTVRW89hrNjvGopfVzzk46md4uo3idBokpLtxUJoHgDth0i59YRGUz9pXKP49GrNv7D/DJ0lf6MjlfGUJuCjpjYOq+P8uo8arRI+ajpfolWi92dPe6kDJq9OdU788DEpbPZ0K5XmbY3eMgVDiIrJy7xw3NVwkO2ZQ1ZeHeig2LzL19VKUe5k1cqMCTsk2g+TpDUCqh9xL68onmVVKrYl5qmb5z89THn1T+4TcdmweDsx3mT1sxC2dTJyzUWrMuc17+OjZrqOUEbSqHFX4nEHNR9rO1iKYwSjUF4Ux6QU8atbAIpjonuhcUtQvWH5S03/Ks2ecdsrymfFtBaFNlq5F5w8un4BtjulvCUOlWHzi3DuqygV08sFegyEZSUM3MPIeN/O81u0oCfhehdHmNYJMyg48WkNhCYSyyQs7M6QbidSL2ppcZD5Ai2UkqQiyQglCJDVJp1QvQGTMAdRxVMmBiUr85hCYfcGFHJP7c4rTMnIKutwl33HwVD0GpdXVG0XGb9YUr0/8vfVRcG91KUudalLXepSl7rU71VdFNxLzaWUEr/sFOf0F1ISj+PJB2kMFHb2wigfSEUmGPRCMFCjQ/XjWZWbPBGIVwvCssTuR+KyQR9EJUxVCU/bmXeqc850WmZDTj+hVivSogGl8OuaVGiUy0zPys4cQPd6I0+z8TN24BTRvSM2Bf3LksOXiuE+oiKU2U437a2w+I7iNwplwk0anVNWbKfkiRZR8IpD9tVmEaP+mIglNA8R20f6G0vRR7rsE3arhKk9brRokwiTQY8alQ/zcCeKX/s2oZ0wLMtdIhqhSJz3xaNSotw67H4ktAWhsfN+JqVIpcI+j4TrBfbTYU6Ei6uGZLUk+uwH9K6ThLGc561CIC4rtNHEVmgXSiv0Pj8lay3Egxdr8buNgen1GtM5Uu7QTQHM9oh+2BGvlpDTc9Qoj+apriQdL0TpelYKNbmZmKG3R1JZCO3BR/kO/TR7cEmJNExQV/jb5XxcTnQN83REDVn9ryvUscdoLXSNrOCq1ZJkDWmzJFmNbw1Q4k5JY4WivxdKgfKieh6/ks5eEOatWyqCl+Qjt0r4WmU/qyh0dhBFfbgRf6xbKuGRAtNaUW0T/Z1CBUm52r2A+ts93S4zhxcBtStw1158bEm8eCeygPIQ2ghFJJZGErsacKt8TRwlOcgtRR0yXU5Jy+ppsUsM98JNjYWsQBzfL6CW1YL/6B//ilf1ni+qZwrtqZWn0g5DJGRd5BBq9qHmECrGaPk0Lvk4LFlYWVHofcHH4wJrIvevHvjb8QXqscC9OtFRFMVWo54Lhr3lJ3/6G/7L25/xRfE0n9eIZpV9ooUKlAQKFWZv53NseLe5olCBnw1f8L98+hF/tPnAXz1Ld/7d8shV2fM3+p5lPaJVYtfXc0JhXYjnNyXFm7fX2PclYRFFiQbpIB/NzNq1H0vh/eaO8nqvMcMpZVA816dEwkIAAyzfiMJWvx+ZNiWLd4rxSlM/yrH2rSZaAz9t6G5r6gfFcZNmxrYZFM9KzlEyUBYJ5RVVTs6zHRR9pH6O2C6SrCTlzQmFPmHGICrYPhBfFzS/Knm7fXGWuM52XfSkxFuZgGxxTCph94ZkksyDSdS2E9PYLyLKy8qWuw6ohYfdZ9SSIL5SokIHGcban32fVS9+WhVlbLv1qScBysOJYwvtJzkvzdsBvyrQQ5C+DCBeL4SW4qOsTIVEuG5nvjU+wG4ru7peENsC4yLj6sRMTvhW5etekVYe+2jxGzkR1a8sbq0k7TLIMdNO4ZZnBvawEb74cN8wXRnMmKieHNPm1GwhymT3tcPsLONN/tk603qcns+7duKF1YPGZQ6t8sLY7n7gaX9jsT18+tOK6unkOQd7ZWg/eEKtMVMk3pckLWMCYLxSlI/ihXcrSW5zV+cxf6pk5Dwnm+CpROVVoVCJ79etA6iE3VpimWbSA0nUae0Bp8Rrnpg958mk2U8dGyie5L22O/vGzSgq8fDKU7+19C8Vo5OVMwAdEscbi1us4d/x762LgnupS13qUpe61KUudanfq7oouJeaK6UkNIPDhNpL12daNqSUUJMDsnrb9aiyPP1SVnSVME+HkTRNqPUK/SQSRrwSSkIyFnvyR6Ykym0uVVWkGKEk/FEAACAASURBVEl1ifm4JS7bufuflEirVjy30WL6CD24jahd7rbF9JJig0aeVp/H2VtabI+kqkRNCtdq/EKeQE2n6F+e1VEVpcs6KVFsUZpid85B106enpOB8VpUm1Lsgrilotwn6apXiuFWMa0t3ev8xHk3crXq6IYKN1nSqFl+J94kOUiiVoRSvLdJSc55UlBus8fWR/y6Ro8hq0iZaziE/KdHFZpYWcy7B+L9hlTZmR4Q2hLtRTnVnSFcr4ilOVMUlHwZtT2gQ4u/WWBdgGNW2tdL1OQwO6EjpMJSPPWofsKkk1GtIK4a1PtHUmUoPh6IywrzIAcqVaUoyimhu0n82mUxpw2dmLfKR9SbT6hFI+llmcucDgdQoiSbvkR/2pJW7TnJrCzm5Ly4bEUdftqRxhG1zBKt86hhxL9Ys/+6IVSKpK147hByRfeF+N7KR81UKKJJs1o/rYRZK+8Vxat/cSZmjNeiZpheY48w/HBCHQ2rv80sZQP2vfgxh1sl18O3B765eeRnx1fyHjTjbUAvHfFYiB+vCnxuB1Y2wmBEPaki/jahhuwtLYOk6WlY/GjL5CzDx4ZkcupdI97CPqcoVY/CDx3vZdt/fvUd/8Xy/2alJwyJQklHfEDN6unn6kiXDM+x4p3fYLLZ78Ev+Z8+/BnXVUdIiv2rik92xd1tnhcSPD2sSE6YuzEp/s3+G/5V/JFs05d0vqTUAZ80m7JnYeU8f1GJJ7tQgTFZNImf7l+TkuIvn1/w8USDUPDOrpjGgr4TL2Y4WklTApxJ6Px3rUSJ01tNaOS4hKTQg0btDCp7FNu3Crc6KeHic9aTJA7aIQGaykP9LNeV7SPKJ+xzR6zEc1h9CvOqw/GbJavvPdtvLPWDEFrqDwq3zuc5QPsxiX/zpRZm9uqsGi6/D6KA9ZFi7xjuSpJm9uZPmwodovBXj45yZSh2GtCz9zMhKwJm0KKcDprpNmK3WR0tskJ9zOSYk2c8r2rVH82sxtbvLdNaC4kkK92k7C29jhAVxss1VD2euc0A9ij9D/pJVjyKg6TdAdhBrrHq0WMfj+J59mFelUi2RE9B5r/nDgpLbIp57mEYZSWoqYlW072umBaa45dZhV7mvo0kzHJ9NPjrs2//+I1HTZJSaTvF+IUjfuGp/rqZ5/HxWjFeW9r3SUg4rSJU5ZzQ5RtZPSw3I/pNwXgviYWniymtPFOpJQ1QJerlxLCrMI8ncz0MLyLFZqTTifpNQbGH4xcn5rFQI8ZrQ3evKXeiKMdCMa7lQ05Jd4evA1w5muXIop64rvt5X33ShKhxmRPfjeXsS5+cZdmMVNbL9aESRqX5vT4YSit+bpAUO2D2ZveuoBsLuqcG03q8l+tyumY+Dg7xQbcvjozrEve2ymMvjxdr6F4pfHOhKFzqUpe61KUudalLXeofUF0U3Ev9TiWtQGvCnXQJm+eD+BlDIO0GVNui2obU5ae99VL8tN0oT9NVibJWEtCq7KndHkiuRk2OuGjQ270osrnCuiYV13PSiukGwrrCdCdqQ0Qfe+mUz3xevR8o9MnfWmJ2gzytHybxBys1vze1NUyOVDX4RtQCFTJfMD9Zm0GJj64XtdaMknl+quIAzcfIpz8TL66elOTK5wdr5WHx1jFdWQ7fiDLrKggvREUpbKApPPtjTfQKQu7OPXW2lqJONJ8SoZFtj2tN+zFw+IF4ketHj+k9uvfExgoRotRU32WecGXRoxM2rBG+YqqMkCXIPtWUQJXit60LtIvorXhs47oVtm5Vog4delnJ+zNxIFVW+LEhzL5e9dyD0XKMgWS1+GbrKn+++KPD7SqPhU7Oo/PEZY2apOM5ZZ5hKJboYRIe8vUa9kdUiKT1Ih9nP1My9PYo6ufoZgJEqgswGvW8l3FXFSTvRb09+Xh9IO72HL/6lsNX0k0c8zkHUdBVSIRNxAU1d8XPaUXx1Aks59+vIs6pswqjAAV+Lb7HdtNT3Xu2NzLm07YklJJHP97I++vKCQXAZUUiKH7yp7/hdSMm8bXtqbSn0Dn9L2l2vubDuOJX2xtc0GyaYX5dq8TfvH1BAl6vd7ho+NunGn+fZb+PBcmK59D2immTx3Lex192d1zbr9n6FpcMdZYLt75hmQ/Uygy0emRjOp5Di1GRIRbsY15ZiRafNH/zfM9xLHHBkAbDoc+KzWgpvi9RQY7vz37+JT8LX6GzCh3XHnUw2E7jbr0wb21E6YQ+0STe1IRVQFWRly+f+fCwJj1UVA96PlflM7CS/Sv34ps+sU9DKd53lPCPQ+aChqzWlp8M9SfFtIaxiDTvfze5Soezxzbm+at+joRK0byVOdJtKordiOpHyg+KsK4x+1ESA4H210fGFw1mEJrCiW9qhuzpvpbPC6WMRdvL66cKlaLcebQXZvniV3vpU8h9EvW7I/ppT1o0JK1Z/NwRiiviB8X2R1nVuxMPrZ5kHiqOCt9oSWREjt10lYhWOu+LoyjVbpm794uE7SQR60SZQQt5AHJX/FWaVznKZ4VbMTORQ5S/b34uVINoEtEqql2YexDk3gTlQ0dcN6jeQWEJaxlP9rknaY0Kkm6ougFzum8gRKDU1pKCNXhUEM9qOCVoRYhfDsTRUK4mYtAQFXUzzcfamogPmqEv0cCruy1vo6L+uYz50/aSVUQj4823Cl/nDSgZPDFo/CZy9YMtr9c77mpZ1bgvD9wWR1ZmoNKOQ6j5zXjD//H4JQDvnte09UhTeKa14WO4ZrxV1O9l3hjupW8DrYhlzPc1RbFXmP48ZmMBP/kPfsN/fvtz/rj+nld2O+9jSWRI8k/DgCKgGWKBy4bskDS35kCRzcIxabSKDClTMIgUKhCTnt8DzNscUsFHv+a37gYXLV0sGaOlDyXvBrlP/GZ3TW09RkemhWHbNHT7CvVLOZCHryOpTMS3f/8/YS8K7qUudalLXepSl7rUpX6v6qLgXupcSfyjSYHODNpkDSgrZALnSH2PahpYZUWtG+QpSalZZSNGUmkl+QxIXQdVAUoR2wI1ltLlesqVLgzGR9Tg0YdOmLr6M7OhRlipTYF5/yxP5NacOblTyF6sSnydMeKvW/RnOdU6JdyqFAWkU0SbKPaKUMvT++I7xXQlOdzGiadOJen4BaieEq5VVE9CUKieJc1qVnCDHAPXCksxWnDLRLWQp/+XV3vePcvTqTIJNUo2/EnBIEpX8bQWRcktJF1mXGvseOo+lQ7h2BboKaCOE0lrUiMKrXKSlGSehC2rUhKh6USneD7gX1zN7zWjF4ZxJ+eJZUsqDLFcYN72koSWzycgqmpVijqwrM+KaE5Ag8yaTIk0jJhPW6av71AhoQdRfNNCUseU88I/nhzpcIRvv5Df72UlQDiVNWZ3EAV6yjnm9xvxdheWVFjUMJKGEXUaj4+DfL4XFYes3qAUcSVKuHYetVriGsVwnyi20jk93MpxLvZq9hP6daD5rcWtIraXMdl9ESUrftCgE6mMpOJ3WbkkUE4RqoR7bPnqD97yxVrU2J92X9F9E0m/LdAThAZ271bs0oryMY/ZPzzy55vv+Ef1WxZ6YqV76kwxAGY/7HNseXezoVCejemIWbP43l3zP6s/warIp34hDfELz3ol5/qJFeahIFaJqUzENoovNXdC/6+/+ZZ/pb5lOJTgNWbhCLsSu55QmWfqnivMlZjWUwRTBGIwskIBlI0jRoX5mfhhY5Fojwr3JAe33ipJg5sS05WieVfglkKlADCZCWw7RcJKN3dmBvv2dByg+U1BaBOfPrwgVYn2rZ49j7aH4iBpgKcEKJKamcTaZTUygm4UqhYlP9RnBVh7qB/BLxTFPjHeKMoseC3fhMwTjlRPnmlj8bWi3Me5u98eHLp3cu2ECDGhHrfwlRieU6Up9o72kyEUSuYAKzxbEAWw2kbcQqO9+PyL7pxwWD9JwiGAOYwwOUxhZg44Xq7PVNqZZqKDKLUnJXvaQLHThCahoqi0JidqgRwj3yiMk7nOtfLaia3qW6HImE4Rq0T1LP0Kp/lVeUX5JKqtmSTdis/mz8WbyLRWuJVF+wRIGqNv9JlDuw+oKTHdtqiU0FYTazMzYN39guLDAX/donzEvn2Se0n2ncf7TV7hShAjtguAZvOnnwD4D++/47Y4cpfxF5V2bEw3UzxqJcfujb/mp92X/PJ4y8FVvIkK/xNZBXPHAjUa/FL2eXglnl57OKVNKuxR4T7WpGXgv/r6p/zTxS94YcQnvNITOu9wnQ/ufmn51+03chC+kmS7f/38NX/58SV65VAmMaS8gtYGiuVECJqqdnivce9b3FVETSefrvz5x+t3/KftL3hpDqx0JN9m2MeCgGJIBbVybPTAwhz4fIqbk/6SJiqFJrHRZ6XbJVkZG5Ih5MaBk5q7UI5VOcy0FIOovy5ZPni5R/5i/ZK/ObzA6sDz1HJVDXynr9i/zFSehYejZdr8Lvnh/10XBfdSl7rUpS51qUtd6lK/V3VRcC91LqWwhwk9+pkrG69aUVqtQcUo3tsYhTV6+rX9EapS1N+6QD93oNuzgnCzEQqDD+jOkZqs4ObPKB47YmnFg5uSMFhjwl3LU6ndT6QSSEm8weZ31bLyVx9F5UsJ1Y+kpiJpNVMG3LqkjJFiP9E8GUItHZjTlfjGQNQM24tyerINlVvx5YEoDUkLp7baR1yrZyXlVL7RTKuchqPAX3viIJfYR71g0Yz0Y4kfLcaJCpQDmYil+HyjFfWmOED7EBiuDPqYE402Ft9oyp0nlhr7HNH7jnAjCplxARWy+rmoUL0jLOVYA6jSYo7iUY6LrHYbLYo84K4byt98Im6WpKuVvHbsJc0MSCGitCaeVBGrUT0yNp5zp/KJjHF3DYcOMwZiaYht9shqRfFhP/t8CRFe3s3EjbRsUJy9vPLD9JlaLJ5qNYykVYNytXjrTlxma0iHI6ooSM4RF6ICA7/Ldl4v8Y0wHWsnDM5pIy9rj6guzwZ/4+l/4NFLR5+7fWMTKa8HpmNJuZi4WfYUJlDk1Ka75kDnS6yO/PZ5w/PbNW/3KzaNKOLFesQ914Q6oZ0ilIn1X1l8Az57P6dDyc+P93w/bCi1Z2lGKu1xuV37yvYscxSWIXFlPB/9mk6ilXjv1nzZPvM0tXz/3Y1IGV6xIw84pyi3Cu2VkDucrGaoTrbvdgtIUB4UsYIwaMygiV2NOnmR64T6bUOyCQUEA3av0FmV08capWQsJwMqKsrndKZ/OLlOmodIeVB0LzUqnjvrQy3vUUHUxFDJdywOiiHTH1CJopNt6yDJaOU2EbJ/3jdQPUO5TZJQ5aDosk9Rfp3iGIlWtl8cRMFcfC+vjzeKaSUrNvWDqJLNe1Ez4cRzTZS7gO0DsdI07xwYhTnm6yarqamy6H2PORq5TvxpBUq4re3bkVAb7GgZNnqmMGivsUNCxciErOhUTw57OFNmQlNgjk6up7IQ3nW+JnQECps55QWpMBSHgDGK/jb7JgeZ71zuTQgVlLs0e32Pr4SuQJLXtAM9nhXc6kHjrpKwTEs5Lkkn6o8nTq68t9irmYZQ7JjpA7FQNA8pq6rgF2Zm+YYye7KNotpNQqKIMkckrbA78YSHtiRVhcxxVoMX0ovOFJi4avI9YoLnHeVVQyhLfnL9EYD/+uZ/58fFE/fGUqkCjcIoTRflOD/Gicdo2ceGFxmg/rPdK9r1wBcb+f/eFWwzZ9mPFl0G4mTgKPPidC18Ydsr4p2nUIF3fsNjkDm8UIF9qNmGlqUZWOmeW3tgY/IqG+Jh3U4NhQkMJhEOxcyvVp3BhQq7NQwvNWnS2FGWPU7KLRHcOvJ9v+Ff6n/Cztdoldg7ud8+TQ2Pg6yI1dZxVx/ZFD1re6YsdFGoCu+GNVolrIpUWe6PKPauEub0VBOy8fpEV9lUPY1xWBUpdGBjOwIaQ2TMS2fPrmUIlm2/4v1uxTAUhH1x7svpSsonLeSLv6cuCu6lLnWpS13qUpe61KV+r+qi4P7/vJRSPwD+B+Al4lb6Fyml/04pdQP8j8A3wK+A/yal9PTv245Uwn7cEdctYSVKkNkNxKogLi0mJZRSoqCd0q2sDKFUlajtHliRhhGWLXGZ1SKjhLQwOvT+SLxaiJc2Uxb8/Rr7eASjQWv0rkeVBTp3GftlSfHYiY+yssTSkAqN3WcFoyxQxx5VV9KZ7yPlm/P2lY+icAwePSWSlhQeX0LInMZQi/fVTKJQnBJj3PrMZz110+ug0S5Jp/SDPDlPa8PiuyPD9ZJprUglkvCykKfa/s0S/6LHmEgaDLFI9C/PST4KUXOLvWTUV0+J/trgW8UpTqj95DF9RLlIrIwkwe2P2FNK2KqFmIirCvPhGf/6GnOY0IfMiG3KrOwoVB9FVcqJXgCxkKQ6vT2C84QX11AV6MxEVqMjXC8wn3bE69X/w96b9Mq2relZzyhnEdVaa6+19z7VPbdwWkliQNBCQkJI/AB69BANJHfoINEw4hfQ8g+wRIMGHSSQANG0RAPZsoRkLGQneZ15M8+95+5ylVHNahQ0vhEzzk0y06JjK4/ik7bOPjtizZjFmCPWfMf7PS9MUbZhLFyJd4qUUfsjWSmoK8z9FrVuZz8iReVHKdLdBv1yJK4qzHRWV7PVxEWFe/9EXi1QuwP5lcir6sRkVko8wdaAE0VXxpoRFWvhhOQAQlFISf4dyH1PvloVf6aiv804r9BlE92bRFwmSFBd9bT1SOMn6pLAtfEdG9+zcR2v3Y4pG/7B/c/5evEspwCF15HKBMJa0w2OlDSPhzM5hCzJaNMyg4b+JmM7NSdDVb91/KPwC8gKNZY0IJ3hxBUN5XzahGkDdT0xDpbpeE6PMnXE+QCTxj0ZqmdFLPdE9KJUmo45HSg0+nc85emkxB2BJ0P1lOlvNbaISf1dxj+LmTLW4resH+Tvct+VNKox43eZ4UrjD5mwPa/A6DHjDgl3hFBJl/xwdSYV1I9Zkq1WCjNA6hTN54wuPt9slKSHLRR2K/fQtFLzPtbHjN8nmg8Dh69rqueIHs++vVjrOfFpXIt6Oa5kmyDKL1kYrNVzYvF+pHvty30pBIPF+xG7nwo/W2O3/ZwECKIsmscXSYp8fY069KAU5lFUv+wsqvLEhcPuRuxhgtzM9IBqGyHB4mNHtZIkNf9pT1yWOfrxgAo1eteRljVhXWO6Se5BQO87uU/Kilq2mmQVOmTqp7JSV8m1d4dy3Y1QG07UA7TMjaKwZ6alJE3V92fKQvWdMKRB5tLF97B8J/fM9luZ6FKhVEiPg5w/gFDDq//zmem2JbSGxW+O6ONIWNdChQHMrkc9bcnLllxXYBRxWaELTzguPaRChYmJfL1GDSN5K6tLyjty44XcohT2t4+gl9wXtfJX42sSmnexY1X8pMfk+BBvAPjHx2/5k8Md9/2Cx64lJsVu3xCj5rtR3qN0ZuotbB16UOjJYyf5PgGZ31WUVaLdleN/+/5fZ3uoSQXnM20r3GokZUUaDdpHmmZkXVZ/vIks3MjH3ZLd/QLVGdr3ZibATAuwnaShjX2FHoXkkDUs35VrbeHlZ5p/+E9+j3/o/oas2pgsHneAU6LZpNBt4I9GI8zt/XluyXVEuQTPTnpkXMLsC2XhKkBSqE6jgiJdT9CbmeHt7w3JQ7gbZR6bFPiE3p1/Hc0uo65G0mCov/PUo3wnnnznAIuPiWHz53of/lxdFNy//hWA/zLn/AfAvwv850qpPwD+K+Dv55x/D/j75f8vdalLXepSl7rUpX70dVFw/5pXzvk98L78faeU+kPgK+A/Av6D8rb/Dvjfgb/zV29NEW+WxKVnakunc0iEpZf0LKUIX96IB28lMo36f/4M/fpWOtatdLbrpp5TyQDM/YukmRUFQT9shclYuvtjY7HWoLpBOLkleevERs1a6AHq0BF+esu4KvxWX16/XWF2jtw48QcPhdFatqOV/Pz4qimKjahNsRF1CIQ1mW3x/2VRJI4/neYzYwZHWolnNxtFrEtH9ea0j+L1JYvSYTs4LjPplMe+iCiVsVbSqFKVCEqj+x96+eTpWmtJuEoO6vs8+w1jbWnuE+2HiHvsRU21BvbFGzVOswc2e4d5OoJWwq8FwqZCdwH3eY86dMS7K/KmmYkV/uEHft7PL6iTh/pFPKwsW0kI2yxEET+OqG4QEoI/RREZ8tUKvT+C1uTak7WeVWSM/h1PrQpRVgnKeNL7gXjdokMiV14IETnPPmC0himQN0vx/TaVqDpFQVa9+IqFA5yE6nBzBbsD+bq858WQnaHaJUiasIpUf+PAdSvn8a458GX7ws+az9yYA6sCkOyLvHliwnoVeY4tf9h9yVPfUFkZTO93a3bHCu8D42iZBsuwN1D4rv7BUBevozsIlWNawPUv4+zt3H+t0ZMjtGef+HiVsIeiyg3y89M6k7Uj7VtYZKqTNT6rWb3xJXlKfq68vlSsfp3w+8TUCLd5+S7QXxcPbqvoW1FNT77UaSnq6VhSvKonRfWQCQtZdchGFL8f8jazKulKEdqPMoe8/QeiXPZvGpIV3qrbRxafJKFPj2Uflgrb5VnxGjfir8367P10u4ztwG/FC7t4J/t3sgs2DxG/k875+n7CDAm77WdfevV9x/RaxsXxtsEfxJ9rC0Bk2CgWHxLHNxq/ldUgHTKLj4Va0GdRWkPCPu3QV0uyM0INCWcGaHr7StIe+wDeEZYV5kQpUYq48OIrNZr+dYVKefY625eJ4ZVnvCmrU9sR9bTFKCGipI3cl7mp0Pserht0H2YKiipEkXizJjUW9+GFsHiFHhOukzG5eC/Haga59vm0slTmx+oxo6fSK3BMgMbvYFyWzvyiyIaFon7MRC++2uGqsMqXMpe1nxLDWpMtLH8bmNpTUppiumvRfURbhT4MqH5ErSp0XxTaVY2JCRWiePT3Haqy88oMCfTTXnz8x15U3EWNjhIJlxqP/vBA+OkbbKG16AH+6JfCmP27H28Jk8FVgbGshLg6YF3xBQfN1Dn0k8NvNdEL1UNZqAov2O0zw7UQJm7+mbCJu1dniHY2Svo6EjS/tRy+u0M5oNAmfIb8YKm24lUerjODa3g4NQggvvFYZ3yUFMXNnyQOXxSf6yh+6sXHxPgs+6RSScU8zfPbxPo7cEfL1MJ4k2ASrz2Ae3Ci5u8UsbaEhSQl+qfi6Z4kyU9FOc7kIGzOUHnzZMVzHxTNR0U/evyLOqd2KpkjwsrgngyxyfiPllhnTPEJJ5+x7xoZdzETnaK5T4yrkx9bEtuq57/ag3v5BfdHVEqpnwL/NvCPgDfll1+AD4iF4S/6mb8N/G2AmvYvesulLnWpS13qUpe61F+ruvyC+yMppdQS+B+B/yLnvFXq7E3JOWd1glf+uco5/z3g7wFs3F0e7oQheCy8udgsiF6xeD8QlxVx4dBjIjl5wvV1dU6xqjy5MsRmg5qieERBVDxnUFMg3q5RQ0QNI7oog3bphX5QezBKnjSdJZSEGrcdyN4S19dkJSlTOmSyKyk5TqMmj34+CGvSaBgSeSF0AMaJ6fWS5DShklSZ5CG1EYrvKL9YVJKEs/EmY/cKVSX0vRzntMhkIz5JMwgL0+34HQ9QNo7hRpS1/jajBwVfiKTWNCN95wk6Y5YTOSpyUoSjnGdzFC9SNuJtVFnSfrrXalaTpqVCJU3WFe7gsMco/rRWVFu968Vf2wemN2tMH8Tr91mgnS4kprsF5kk6rfWhJzTLmSMJwscNVzXZXgtlAeBGlKK4rFBjEFZuZbCfe+HtHjriW1EY9H4kVwada0mSGyfxGp4U2KYm3K3R/YT++EhetsLiLX7C7Cy6m1BTFFU3Z9LNcu4It796T77ZnNWx0zgv/5+9g9evYHck12VcOQvWoo4iXyqtyZVhaoR/+/anD/z7b/+Er71Y1DfmwJU5sihy53Nc8Bxbftm/La93fJpWGBKfxyX/4Fe/oGkH/vC38rqxkeGhYbAZ+2gxEbQF/1RUnoMoY2aUbv/1d4FpoWk+9oSycqKDI3rYfqupH+R9WWvMCU3sTp3oMt5OSv+J+iFpV+Jt9LtcOM38Dh92WGtRRh4jh7eGxfvE4r2oYU9/02N64Z+ufh3Yf21RUZTY9XfFd77UuGOelRmVES9s6bxXSZiyJ46zO5zY2nItq4eBbDTDjcceAgELSrH6vvgfX8u913yeGK8ssdKYUTyyJyU6VuIF9dvyGceM7UX1Aajve/R+RA0jsCJVhvFVi3sqq0S1k3kkQ1W4s/VTnr3IKLknk5U/3ZuK0Ciq50KAOQTcxy25rUjLFrXviK9FMTSFH61ylu79IZFqW7yiEFbrcq0T5hgYXjnsPmKGjD0EYiXnKZYxEStF1SWhgTT1+R7QXvzz3pIXQrOZrhvsroz3Y096tSa1QhiIGyFk2Kdu9rcev6xRqXhu5zGS5/EiG4L6KdFvNOvfTIwrw3B1wiAoopWViNAqSELBGNdljrZw/EKRnMYM4m1OVs1KvPQ0GNxjIR4sKpTVZKUI14VfXVi/4zfX2IdOVO6UCTeL8npg+MVrkteQ1phJVEv7A+pO3qxkjjIaFSLt58j0p3J+uzcanWHyDlwW/+ijo68KK/3XhvBNpHrQqAxZCfPXHs9e5VjLyohKYPqI6SOLMdG/Ol1DoYqERpTX/kYTPdiykmcPMFyJEpu1/PHPalbzT6sLI4rldxkzJkKtivdZKDz1UyJUcm7NmNFTuf/LilzzsUOHWjzvSRFWalbqQVThZDPVI/S3ChGwzUwWSQ7MoJlWwpK3Rxj27sx0z3LPxKoo/oeT/1heDq3MDc33lmkj29CjfO5pG8mrOeWz/Rw5vjIc3pj5vg61wkya5uEHO/4X1MWD+yMopZRDfrn973PO/1P5549KqS/K618An/5V7d+lLnWpS13qUpe61L/Muii4f81LiVT73wJ/mHP+uz946X8B/lPgvyn//Z//hRszlmFj8FvmpB/bF6NYBQAAIABJREFUydOpGhO7ny3E9/JyzgdXbUsGUuuFfhANmUTYVLhTUpm3ZKdJ60YYmFqTlh7zUBTcjy9ko8neFqXXyc8Uz9DJS5YLS9V24mM97cNw47FG42MkFTXTjNPss0vrmtAYbB9BGVKV8U+K2GqyLftYJengvlaEtnirnt3sCRpfRczBiHe3EgWpesl0t8VX+SIdntELT1JlGG8iDMWjWyucDzTVSEqK4VikNn168gblINeptK7DeCUe4SC2WKoHRX8jPMPqJaPHSDZavL+A9Rb7sGd6syY2BtMHUWBOHrVxwhwDaSmKiIrF81h4muG6wfQBFTLmpZPzl5if/GPjiv/LYPrI9HaD+7xn+skdpjv7lVVIhCvx9vo/3ZGX7ZymhlJCcvCW+NWt/F0p7Is8uodNgzmOhKtG/MzHcSZnAOS7G1Q/oA4dOUSSt+hlez6mlFDHQVL1hhG0FuJHXcGjUA5Yr8QLWRTN1k2EpPk4iaL26+GG1ozsQ8Xz1PJ5WPKb5yuskfO4PdSSJd9blEmoJ0+8b1CFyagPilqLSuuORTVVZ5Vx2CgM4B8zUxJlrr6fmNYe08t4dPtIXhvMCK/+sOf+bzXU9zBIszb2KIxXlU6kgqL27M63s9tJt/u4Eb941ueEtuo5yyBVku6lInR3DtvJMVaFHuB3meHasPw+8Px7roxVuSeW7yb2XznMANUusfvaYIY8q1n1c2bYKJJV+IN8vop55l/HWtjX9hCFbaoUdj8xXst4rrYRFcHtJ+HD9rlwbM+kBbcXdXvxKTCsDTpkxqWm+c1Q9lVjcpbVJatxzz3TVS1jH9CfnrDGkCuDSo7m88jhi2pWod0hYwahQEiqWcYMnD2qKYOzpQehUGC6ILSPeErxSkL88I7pVlK4wsKeexKCEEzsMZGcZvHPPhBv1sTCp66/34JSjHcLSLLKEjeLmcKgYyTdrdG7o9AFtJLzWSovGlF9c6GoWI3dj8R1xbgpnv0TuEIhPQhWxtXJ/68H8Lsk16PL6CkBBj2djwHEk2m6Mz9cn+EoUIZctjLmho2Zz5EdyjZP5IdtR1pW6CkSi+KvBqG2xMpgGicCX8zoKB8Sl17GZsokr3HbkfG6wpeVqFw52WYfhNvuHe6Q8Fs9j5WwyjTvNdM6C4d5OKeQqQj1J4M7wHAjCq89KNz2fG9nI35X02e5l5ViuLYz+7n9FHDPA8evWqrniWQqDl+qmYKQnNzDq+8jh9eaaSUe/XFTzudY+jVGZE7ZJ0yfcHs5B4cvK5JTjGu5d82Qad/19G8q6nuZo8PSkRX4cu80H0UB7st3mdtK4lpy4m1XUZUVjUL1eE6Ma039Sc0Saf1wJgJNy7Jy00HzWQgn04qzp7yTfdcT6ChUiWQpXvrzeFJJvLeh0lTbRPdKz9SN9rPMISdu8l9Wl19w//rXvwf8J8D/rZT6v8q//dfIL7b/g1LqPwO+A/7jf0X7d6lLXepSl7rUpS71L7Uuv+D+Na+c8//B7Jr6/9R/+P9rY1rR3WqGKzUnbIVa4Y6J/bcN/ZXCHRTJKaqX4of69hb73MuTckjEhWNaOdw+MN3KRsx+xOx64qqmv62xx4iOiXwn3cvu/TM4S1aK4aaiQrpNT2pWrsSbZ5/PKt945Wk/iFw13HjsfqT/Yok9BFETvZPOZOSJVSV58pcub0X/OsFqomrkqXY0Dl5EocxVIg9GOKWbkjY0aPxW0d9F3NZg++JRK0JJey9P0LtvHYOVDtTpSgkrEPA2oFQmZ0XlA0OucauBaV8UlKYoFUmUv5yR7vnMrCKHVpS76BTjykDKuP00Pxlnr8m1w3QTyWlUH4hXtfBxEQ+t7ifp1m4c402N244z81iFRKwt7uOWtKpFbeuHHyhNCRXE16aHQFh50rL63dSxlFDHiVRZsJq8WYrnsigzqZFrofrAdCdeyNg6cqEq2G1P2NQyhnYT2Vvsc0dalBSxpcd8+AxNQ359jT70pHUjSXiA/bwl3shnuvc92TthN3uHqotX3BoICTNkTK/59acb7vcL9rtCBjEZsiL2BkaNigq700zXpXP+IHzHZquE31qL4nBi2CYriqrbAwqa+8TxtZ5XRUQZyYRaFC+3i0IgMBRlDLKxoupHOL6pROHoM/7lnNBlelFik5Ox4XfnVQ13hOhE6XF7UWh/6LVGyViqdhG7n6ifNDqK4gPQfDyy/XmL7TPjQpMqIRigzgQDHSTpKvqi0r5k8RgWhmwyRcXuMtVzEFas0zI2AFOS9vxjR1h6QmNI3swqjenB70bxOjpF9Aq/j/Q3BrcvqwpeocfM8c7S3Eeqx4FsKtxTMfOFWDzYGrMfUccB68w8HvN6KYl8xdc/LS065PkY/VaO2e2zUAE2htVvBnR/WvkxhKsGPUb0y5G0qNH9SKo98UoUWPvckTYL9P0LyV9Rvdtjy8oFQGod9ulI1gtshtzWZKfnBC9AaCWxBc28OqJi8Z7ue1RMhLs1qTboIaLGRCyrWfbQo6Y830dh4XAvA8noWYEzU2b5LrB/a/H7TD7KOLUHOc+mWOhFactMCzlPiw/Fi7wLbL/12INc7+5G4w555hEnL8pfrIpi5xTVNs7H6LeRWGshUEyRcLMQFTwk6SUAdD+SFzX2ENDHUVbrQhJPM/Kd4Z9EoXe7gB4jto/nJMfjIF+UKZGuFnPPQnNfVi2eFS+/EMpO9aQ4vpGkwVOy5XCTad+rQvoQlrpK8ue0atHeyzG57STzYs7YPkEZjtWnoyjoxyipe/vEuDe4cp7HtWL1myge01HU8GzAP5/vORTooAi13Hf1bsIUSs3qTwPDqxq/07ISNyTc+yd0v5oTQMPKUz30uKNFJU9/rcVXXij5p+8ZPckKUPsgZJKCBiZU0n8Sa5lHdBaucVqcqQ06ntXm4ZrfIZ9MC1F8sz35kcVXrCf5I9uQtMDqJVE/yLV0B8e0KCrzIf0L1Vu4eHAvdalLXepSl7rUpS71I6uLgnupubKSDk7U+akVBcc3ophMK+nSXr7P9KV7Vk+Z5BeERhN+shB1L2TGKzc/YaWrmuo4ir82Z45vHGbKuH1JqDm0HL9ZYcZEbDTjdUX1bs/4WhSKbBtiZeSp/2Ege/07fq2sYbypMX0kLCzmMIlSMXcfS5f0tNRMrWTWk8H6yKKRJ9/hvsEMGtMrdKcJ64gKkiIF4kUerxLZiWIWG+iv9exdG9aS1BKbzFhlcpUw6xGt5RhD0ty0HUplYtKkW4VSmTD84BactPhCbYakSL6kQZ18bAamtXCB6xfoXznplD3KG8LKkW4X2OOEjgnVD2TXzolHYeUxVuM+vDCtK3QQz19sZB+qD3sIEVW8rWY3oIaR8KakiMVMWHnsbmRaV7jdKIlkTjPdtOU9CftwQGUwLz2pcaC1+C1BvLZaSxpZBr09kvxqVlmI4lszY1GKjyOME6oovOGuhZ9/Kd3grccoJQpO8fJReVEqQyItGlEt8kL28+bEyp3Qxx7brameIP5Jw9E3+JPfeiNd0K6wZnVQRJ+xz2XMB/Gl1Y/iU01GERtOFjXcTq5VWMDit4lkoX8lfrbTdcxa7rf6JWKGxLQSduS4Ll7lfFaH3D7i9kq838XzSKto7oXZWj9mhiuFPWRcd/LJRbY/EfLB8l3E7QNhYWZfe6gVfidpdiiF30XsbmK8EjVMeKuJaanJGg6vDe19YveVmZWY7tYV9VaUvWqXOLR6VvvMkFBJxrQeE7GxVO+2xE3xSw+RtNEMt43YgVNmvLKzihxaTTZe/ME3RW3MlO774t3sMv6QOd5qzEqTbUX1cPaDp7ZC5czwusU9D2A0apg4/kT81tXjQNaK7DTLXz7z+O/csPrNQCweVpU0/ZUpXkhRwrNW85qZ/7Sn/3JF/XQkbVpZJXL6d9TybDWxccSf3JKsIq4q7Oct+vQevWD4cs20NOgxM26uSO48B4erBhUqUTJzJlXS0xALOzrVZVVkCow3Hhq5zs2HIp96R3IG1Y3i3/WafFPjn4ZZ4upu/cwbNmPGdonQaKbCZVZR1MVYadqPET3B1GpMGY/DtcUOkhoXao3tc1mxOPs23THRX9uy2lFWMYpX93hnWbyfhBiRoL/1rH75LNSJkmhJ8Ge121vUMIHW4mcGqoeeuPRErwmNKMHmGGbmdlo3mPePxNeFEJPEu3ryfe6/rKgeRXHUk5ANzMDsj1VJMS3ES6qns6dURRiLJ9xMhvoxoscovQpDmSNc+a5yhtha3G4iVYb6fiRWFc1nGbP7rzyhVjMP23biV55/XstnL55kXskahlcVvhCF3P0R/3JiDzuS16SrJal1UMab7oW+o0Jm2AgJJbRnf76KosiaUby/WZ3nLBnPcuy2E/U1+aJonxjcnjOFpCjXkhp5uq+lV+Xk/xded1GJT771KB7hbBTuqUdFOZ/1UH5nOAbQimn5V/8Ke1FwL3WpS13qUpe61KUu9aOqi4J7qR+UIraZ6kHNvM1TN+zJQ5MtHG/17D2tthplM7uvLLaXDur2c2T3laW9L0+Vu8jx51eMS8PUKsyUGWpNqE+exJUw7oIRVmaj4csloTk9f4lqdHhtmBamdItHum8KR3LKTAuNfx4Iiwo0jBsvPESgvzbUT5FxKU/Gw00mrQN5sGyzKI9XX215eWmZciXq2aDJVZo7aMcvJsnMVtB9EXEvmnFVfIkI19AdMrHKYkqqIjkqvroTBm0uUm9jp/m/Q7SkVJigbmJXlTQvnRmea6IW1erkT20/KvqFPNUe3mhsB+OVpTy8M64N0SmqrcZtAypl4QaXJ3fTBezDgbSspUP+N0+E1+uZLwmQVjVh6TFH2c/py5s50Uw/HclrT6ossdbo0RSqgp6f7v3zRGqlO9tU0iWt+zB3zqdWOJ3pqsYeJqa3G/H97uTxX40BcxghOXQfhJKQ0qzwxsqgR6E0JKdRMdHfeBa/FiJHapz4gUvndWocaV1LelXx6aohkNoaMyaaxyQEDw2nsCC7l+5gPSlMV5jGAdy+dHgHmJbiPbVdPvtfy2msHxK7nwgrM7Qy5rIRlQTEd1a9ZPwuzcpMcnKdTFEoUKJgytjStA/xd5Sg6Mt9sBePn/mkmFo1UxDGQmCAfPayKzV3WyfnSFZRPQlz2HTimzZj+fmritBq+itFbES5ipXch6HgpadWvLHt50R0iu5GGNZD6fhWwVC/JKpHSRLLWtF/s5mVJNU6YiXeX/9hz3i3IFaaw+tCzDDQPCiyF76oOwghwR7PSvW4VPOcYiZ5XU+W2KzKedJUD4OoltcVtrFMSzufW3eQ8aT6SLiWuSDUhmllyva1qFRFLbN9Zve1Z/1dke9iIiwNh19ckaycf/eSMYd+9q4TxTcZlo7qcRAO67JhvJL7vbuTbv6sJU1Kj/L+9s9E8k9Lj4qJ8bqWbvxCmzjNb3Fpqd/L+NeT8GX1JAmUAKpxuMcjad3Iyk3MJKPQ247h1bWM+T6JIjtmQvFbK59xp5DEVWG9Zhg2mqkFf5BUNxBVu/oUSFbRf6GEjNLlmYGsI4wrg+3EgymrFYboTymNYIZ47hF47YmrSs5JmeC000wrR2hWuO0oSVrbDvKpYSSVMVq84lNkuqrx96JkZ6sJX71C9xPhusW9e5KVwUIFcV0ia4iNZrgSmsDp2sNpNUUoAfYoymVWFO4yZW7INL89CHN4SNiXDtNZhldyrcPSkYzCdYG49uQa6vtp3gfbJXQUYsW40nO65WkshVYVX7xi9dtJVk5rhZ5kJ12IkLLM4X1kWmti6yDnmXSTaitjyYpPWodc5jQ5huaTqM7DtaL5KH70rOR4Ze4Q9rQZyupiFtZzc1988U7ILWbIHN/ISkT9kOfve3vgB8dViEMr4feexoNS4PZJeLhG0b9eUj0OMzFI5UzO4Lbn1Zq/qC4K7qUudalLXepSl7rUpX5UdVFwLzWXJO6ISjtz9ybF1a8iLz8z8uR6EH/NyX8UK0V/ZUgVjF7RfE4c3pj56RbkST1Z8XAlpwhJldx1ed2MevbgjEtN8xg5vraz188MmfpxgjeG/lqx+BCZWi0eQsTrNy3Fd6aCdN6GhRFfLHB8o5gWBj2JShcWicVNx6rp52PXKvOSFoRVxD0JQQGl6b6RJ0S/GBkf5Sm8ejTyZBvPT+7RU/xoCjUp1LMjNYltLxL4bXvkuj7SmIkpGd4f17x0NX1XSA+nnHObiApIilwlsJlcUm5MB/W9nLtxLYpKshozFIUjCDfQ9IUzer0gNhpUVX4+EG4WxNaii6Kqhoi7FxpFriv6uwY9JYxWqCjd/fbzQQ4yZ+xuJFuNfx5JlcHsJ3Sw+OfCHa2sKHRaFMlUGcxuwO5ERQmv16LYPolfToWEjkk4ooDaH0k3a0w3SSrbFIUteujLtRYSRDYat+uFmzuk2adnukm8uYcBfejItQVnUCGh+pKs54S/6j93kGqi85gpE2s5j/VB0b3OhFqupzsoss5zh++4yTSfxCNNx5w0N3tHa4V/YfZpZiXX7cRWBVj9WcfxK0kTsp2Zr5l7KcxnZ2jHxLCp2X9pUBEWn+L8GW6fqZ8SbhdIXggNOjJ7dE80g/opCWljiiSrZg+u6RP1p272LuthQr8coaRwJa9JBpbvI8PaFJVIvIAnJUY8dTJnmEnS98aN5NQDXP3ziD0KeWNaOSEANHq+r8nQfBIPbFjX2N2IipmpdP+rBNVTYFqZwqEtfs5toilMT7exdNeG6kUUIr8XJXL+DCWkg2wgJ4UeIroyNJ9lxSA5je4j09qBUrOiNS5lLJhRxkByojgNG1ndmlby1RkW10yNnH8zio+4mqIQEk60iUPHdNeixySsY2dwD4dZgU1WEVpN9RRk/0IiLNy8cmI/vpCbivS2lQ79XSAsrHC9EfV0vFvgXgb0lLCHhDmMM6Uhrr2sZHgjqzjbnlRbiHFeEbDHiJ4yY5kzTwlltiRk5bVi/WcT/Y2Vc2rVPBZB1HyQFRshhMD1H5/n1/7Go0Omv1LUz+Idd/uInorGtodsFbqbIGfhQCtRomNZMTC7nnxXY/qiUHYBc+ypHkti2xTRU0KPqfhGhXusytyhvCW2FvN0IL1qhTrh1Oyd12OGBSzeJ/ZfF/ZtFvURZJVOT5nNrxLuIB7l0MiY0+P5XIRNhS1scRUz09Kf+0WU9KikygqRpvB6TeGAx9+/of31QQgSXy1QUVYv5mtZyc8LySGhvHwP2oOszAxfbXC7Uebwg3ia9ZQgpHOSo1K4lBlvanQUIoF/0TPxQk/Q3yj8LnP1xz39rSTlne57PWXqg3x/x9MqrFXUT4VH7DUqS7/LaZsqQvNQvpf24sFVCTmOIKq+SufUQzNkklfUnwamdYUZEmHp5u+t0DpZPfqjvzq/6qLgXupSl7rUpS51qUtd6kdVFwX3UufKwnW1+3Ni0lTD4++LIpuKj9DtIZ0ejTJ0d+IdU0GemvUk3c0nnuXxtahdthNmqDvAeH32NHZ38qRXP2b6G4UOQlsw5anY9onnn1cMN+KJnNriAS6pTcc3jmGtsMeKWCncwosX8Ors71JJujzHTcLedWidGIOZPbBTNOSjQbWRdNDol9JVGmUb02BRSUEhMIRWfGoz89Mqjre6JBgp4s2EX57aSiFkjSYzJcOV60iNQqtMW8l7joPH+0D3yyvSFz1UEaXEenXygO1+Jiq73auSBy7qzf5t4YpO0HwOjGuLShm9sgxrgyp+QtuVpCkDdDB8e4PpAuqTPNmPX1+TrcIcE9PSobtJ1KTbYs5KmdhYUZ5yxh6CKARjon8jMot/EmU3NJppUaOnjB4rcsmTn5YW/zKKh8oosi2e2rWozNlcSYfvS8f0zbWwU0NCFYajmhKpdoSFpf5+EG+hL6xd4Pj1AjNk7P2O+Ep8mCc1ONenz9AlMS+iY6Z5DER/7v5XQXzo/W2WJKMjDK9gWhd1tBZOrttlqhfxncf67Fv3h0S/0WQrfrTV95Fhc1YVk4PjVzXDSlO/iBJkhiS+t7u27KO8V0+gTWa4VpjxvA1h5mZJSSoJZe54VqFVEu9c9IrxqqJ66DFTYlyVNDKvSF46/nUXUJ/25EUzK7wnGkp9P3J83czd28mdVT2VhY/qi9o2rrX4kO1pHxXV5yOxcdg+cnzjC1f75NuE0BjQ0p3vdoFpZWkeSsrYKCsxbhfRYyZbxfHOoEP5OeT8umOmfpgYri3+JTBcO0KZe+qnyHDtcAfhjobWYXcT/evCfo6ga0Pycg3sMRIag9+fTrSouNNSPs/thQ5wosicVPoTScJ2iuG2keMuHlhYzrxXFTOhNeipJp3GdIKp0bidItaO+pMMpP4Lue/qd5njzza4l4l0K2lkodHnVbRaOMXJ1rKSZRQ6VFSP03wtOVFM9iPJC5kgLq8Y1rJfw1q66XXxMdsuz4SE03lCidfZHhLZFi7xTq6VakXxNxE2//zA8asWcwyz9xTOqlyoRIVXMc+ebxXF420WFfpZKCzDq6oo0qIMZi9UkHSi6GhFfH2NKt7S/usV1YMw2ZNVNO9GSXArPQxZSfd9vF4I+eVNI/dZOcxxJauAfifKcnLQfMyYsnLjDqnQH4TZq5LMt2FhqB7LyotShJWTnoOU6L9aoadEfy3jpXo5+0b9y8i08nKObuU8Ra84/mQxq5v1/ZHui3b2v7YfRkn1OwRUTIDFduHcb3JjJY0yJkLx0ZM8OmZsGW/TppJzMUSiM0yvLaE5p4h1d+fv8GltZ+/1aU5yW0lQi15xWClUkLngNMajF99ttmWeCuCO6cywPQqhYWqVJAKGcxLjD+c3MyRMNxEWFv8y0b39wVjqpe+m//kd/Cl/aV0U3Etd6lKXutSlLnWpS/2o6qLgXupcSpV86IwZzvzDbEoi0iBKUXOfGdfFh3dIqGxQpZsyG7DbPPtvQTpoc5Jcb+XFb+Nf5L8gyljWivqxdKZqaD9G6nclqez1QtTgLD6e0Cjq54TtCiAWg99npqWQDbJpxNt4UpoexC/WvZG0qNWiZ1WNhKQZozyWpqzQo6Z6faTL0GmH7RT1p9J5/95iBkl50SVhpn5ONB9E/cyqJnpF9SDPjN2VIkbNFys5hpvqwNoOJBRDsmiVcToSkyhJMSsW1chuHWEyaBfRJhM6K15cIKwSqExYWsxeo8OJvVjOwpjFO6coSoR0OZ+UmPrzIPzMyjBu5LiaPpJenWgUCdNFUsmrj61H5TyrA/7Tgdgs545eM0RI4n+NlSiDeoxkp7GHSFgY/MtEaC2xLopbrajvE2qUdBr5Nys+XGC6qvCPPTiLe+yYbhrpgC/KojsGCBm7mxjerjBdEJW5bD/Ueh4XceHwf/wRtCYvm5nEcFJ7k5eO61BrklP455Li9RQZFxp3VIRalIjlr5nHvGkkgezqVxPJKZIzsMvzeFv8tiMrUaxjpZha8ZWfKAsnj6g/ZGxRNvSYyPZMJ8laiZpYy5hPXjrOm8cyFqqS9KcMeowkp/G7iVCYxm43lWSwMh7fNthDnBVHlTLmMAp1IkMOgbRuZslDpVyoKKV7Xol66w6ZxQc5v+PalDS4RKw1i3cjejqnDekfKIDj2lI9R9IrQ138s6YP6CnJeNMePYmn+MS21KOoOLHWhFbjtxF30LhjmpVq22eiR7rqU0aPifZdR2jdPKbBUn0QXq2ekvgwi9ev+nik/6IVNXGI2JeB7svFTC7RIaOHTPWcqB8jw0ZIEtVWLuawEUKLjBVFcy/nvX/bUn0u3s8pUn84EFYV2WlylusbFufPkPMdS1e//p0x3f1kIwSNyoii3EfcdmT3beEJJ1HFxrXB7UVdHVeaptxf2WlMH+hfN4zXnqzF/2q6MK+SqSTXy+8mYqUxY6K6H2fvqJ4c9fc77KsG3QXM04HpzXp+XW2M0ASmTHYGMyTGjZ+/B9xB+Lvtp4TtEtXDSFi62acs5IcsKWudl3nIKFHx+tM8L+NajxFyRh8Gws2CbP38evIG/9Axvmrk/rYa/d1HGW9KoV52xK/vhMtcadrvj+y/lS8iO2RSEMJE9SgkBrKojwDDSpdkLsvyN73MaVlU+RPn23//hH22oLUwfL2WlL/SL6JH8caqGNEvB8LiTu7n5rTyIisfekpUjwOpsthDRIWifj51qJUwkVUf0GsvSaHqlHBY5gNvsLuRZBTjRlLnbOmTcC8DYelxzz3VQogiemImk3SIett+KH0QCeqXhC29IO1HuX9jo+dkOpRcY9kH+TwzyMqpKp7204qDfwmMa4OeFFGVNMWjfPecvO1ZK/zjKMxnLb0D1dPEVJjH40rmg5NP/S+ri4J7qUtd6lKXutSlLnWpH1VdFNxLzXVSycaNIhUladxI/nZohRRge+nQPvFxH/7AEVrpKu9vQO+li1ySTwqrcgXJZerP4l2d1uK/DIvSwT0qUpXZ/kIyrpMX1t7Tv1EStJL4t8JSsfuJxh4gtIbjXWELNgrTn1h+wsVEMTNqlZH0sdBm2MjT59+6fo/VkftBfG7/7P4N5u0RYxLaJdSbgfFocQ9yi7TvRMnKC8qTqeStRy/7MCsVO5hWoA+G3/+971k6eWo2KqNVYm0GNrYjZs337povGmFdTlnzPLbknygOg+e4r4iTRtkE7tQSDkpnUhNJo5rVtVN3+eGtpn7MRAcoqJ6LMqDOT7mxMgwbMzMvp41Hj6KQbH8qKvSJXZqNYriu8M+Fn1hIBSoW/6xRpMoxXnvcVrbRfdGiQhZ6Q5YUuf7W0Xwq2/CW41cNeqxJXhGdov0wcvhSBpQ7JKZNhbUafRwZ15ZpoWfFzQwajCjF09oQK43tRC0+lR4TeCdKwje32PdP4sMrKsd45UUdqcVPPK707+TJJ6Pwh0Q2Bj1lYiXX/DSeTszb2a+qFMv3I8c3cn5CK6ooWfxoZDjemll9XX7fEysjKmRRIPzzwPCqRp083wvN7itDWCiGqLEHGd/Vc0kHvDXE2nB4a9j8ScSMgXFMfysXAAAgAElEQVTlzp7GKcLComKm+X7H9vevaN71xLokzgXQLwfi8oZpbamGm5mbCqK4miHz+K/VdHeKaZnxL+L9nukQWjzhOiSOa4fbBhk/ZR/9c2D38yXuKExP489cVBBlMYeEHgN6smSrhI08qzgyfqOXsR69xg6J452ZFbGsFN0rTTYLms8jeozobUeqCge3kQTEXDlJRlPyOarwW+PSS4d2K8rpcNvQX5t5PyXdUc/jqv2U2H/p6V6VFQkvvlszFcVqSvghsvtpg4qFuhITetczfbnEHYOoald+9uBWL1F87Dee9rdHwsoTKz37fkH4qMmKWta99jSfx3m8wmkOULTfvYhS3QgzFkQhtkE4u2NRIevPA8kbqtL5rqOo38O1bLu/cfR3Fas/epLXp7qkYUlqpF553GNP/7Yt50ERGk39GNl/I977ZjuBPq0EZtw+kI1FT5n+dYXpzxxoKJSLMTLdtkxLUQGzURy/ks+oHifMfiRcVdjtQKo95qVn/zcF+dN86DH7QVZsCkUhVRrzA+99ur1muG2EI2wVsXFUzyUJsjV018L2/qFPfGrPJ3rcSL9If+uFfKKUpM9dlTS1X2dyiKixZ/j6DSrmkvoln2G3A6mxxLpGXzeS1HUM6LKSmJWQPaaFRkWPPcgqx1ToFqkR2kesLWlTYfoonPGjrBZU3jBtKvznA3FVU306EJu1cIeLH9o/9NjdQNhUwpfeR4w7pyS6gy4MW8fy3Ug2iv1Xdu4x6G8dfisEjsX7if2XDn/IHMr8FytmRnhWWXpv2rPH9zRPVtvI8U7Y7cqLr/w0p6qcGa88ZkzoKc29AuH2xMEVNf+Hvty/qC4K7qUudalLXepSl7rUpX5UdVFwL3WunDGdqLcn0S8bCAvwz6IS6GKHCiU8RkXx2UQvyta0LikvjpmBZ0aILUwrhe1hLOqWOfFdewBRfZvPuahmZ95mrBWxkqfp5MQTOTXgdicGH6SVov2Qxder5I8/FGWz+H9UyuSouG2PfOhF4TkGeSI89p6UNPv3S0mxWk4QFLp4kZvPie3PNP5Fjnm4EZ9Wd1u8gMc8kyOiz3A7cFMd+P2F+L++9E88hiX/dP8lf3Z8xTftE0O0NMVA240tVkWsTliduLo6cOgqrE0YLWrV9qklDwYSmEGRlXSodq9K92oN0Qkvdf+FITRqVn5AUnSOd+Jpcp2wKGOt5s796OUaDGtN8z5i73cMt7fz8NCHgf5nC8wo16h7W+OfA9X9cO4mL8lUDOJxi43B9qKQAfO1TUaYyONKYSY3f4Z/EQXPPZZO+pBRhdUI4HYj43VF1MVz20pHtSo817ooyeSM7QLZaMZvXmG6aU5Ty1qRjZYx8rnDXlv8NjKUTudkJX2s2kayVpguYfs4H0N/ZfCHH6SQWVFeZq/ejcX0kvMO0jE+rvQ5pWeKmJTRoyIsrPggYyY5IS9A8Z42Vsa2ESUiOWZOaXMfRd10isOXjuo5iTdzL+NpeFVjBuFskrN0gO96TnpHbBxpsyCrwuq8acUTW/idw81G8umdIrS5pLLJvbz9pnhkAyx/MwkZY5/o7zy2zyx/+QzA+GZJcuJBPjGAbX9Wo+qPQTyT3z2RnSEsHMNXDW53TtZTURTO6DXTUtia2YhPEcBMSTrwPfQ3jsVhIq1qYlXGR6Pn1QZ7TOLv1rIyAUL1aH67I3y7xu5GDt+0uGOeE+FsFxjXjnGpMX0ktkIoOV1Ld5Tr3v7xE/03G7LTJCUe7Xl81E6UwqdB/OYhi3JWxosQMRIqZ2IrCXPT0szzn5BRDG1hHqsyVpa/Ldfq2kEuXe+vFmSri5p9VvP1047aG9zBERojtJshoidVtuGpxpH+2sysVT3l2YscWofdj5huYrxy2EMirv3M4l2+i3R3HpRcY/8iSYpuW3jDlcG9eyH5m6KkM8/TMhYGxmuPfe6Y7haynS5AEva6jIVErowkmylFXMu84fayD9PKobuAPg5Uj2ZONcu1n+eEXAkT2/QBvy0e9fJdt/jVC8P6msUHIXKYPtPfmHk1UweoniRpzO0j9jDRvalldalsY/riSuae+z3Ja+wx4j8eGF+Lzzd5K95jgAjTyqAH4YqDKJdm23P8dkO2CvfUEa7OdJPhpsIdgnDSjax4hHWNnWTeM9szV9p92pEbWaH4YSJcaqyQU6Y0c6ZDpbHFEL36TWD/hSVbWc1qvt/RvbqW1RtAdTK31u/7mfTRX6l5RaF6EVKPjlDdSx/IoTJU23S+JxWYLuG3soJWPY2MG3tOdDv1z6RMbIqn+7qifS8ysu4CaGZ++F9WFwX3Upe61KUudalLXepSP6q6KLiXmku8QnD4Js3sVXsQ351KkuQ0rRTTSniyAP5FE2phgZpePGLjRjqbZ8Uqg3uRJ7z+9tQpCsmXp7QoPLz6Xvw7hy81bnferxO5oH7IHL4S5TKXrm4QCoOelDB0o3h+UXAs6uq0FO+wiuLN/enqgUOo+Kef3/KLm3sAvn31xC9/+aXkm29G4qjRg5738eHfUoQ20nyQzllhopbEKpjTfUIrfN/1lVATXJG8r8yRO7ul0hP/64d/k5Xr+ecvd3y9FLXr17trxmjoJ0tKGgeEyaJUYBxFQjBVhCoSj1ZYrEgm+Ek9cDs5J7FSoliXfz91fPc3lmkhCTXRw7hUuCOzslltRZUcF6IsZr8RNal4IvXUkKySruCSGBUqX7Lf5Vr7bWRaGuxRlGN7THRrMye+yfXOhfQg/99vzMw+HV45TJ+FBRnBdpF4Y+dkJYoGmUsql0rQvXYzocCWPPnpbkGyGjtM9G9r7N6Isoyovio62fZCPHixPvt8p4UwPbtr6ZhfHCJk5q54qDneWuqXktSVC8u1KHY6lrz2Vgl5o1G0n8JMF+i+aLBdksSfsk+7v7FkXGraezmQ3dcG02WmpSpKoKSInZTQ0GqGlezztFCopKlinpXJ0Ioq1N01pJ9fiTL/zRXuWY5B+MWRbDXmGEiV4fhFgzuI2pUMJflM5oDsZIUAxdk7+pwZrxwq5TPnN0NcVj+41qK+i1daxle/OflXG5a/PnL8g7fERktSmlV0d24eS7IzQhnIWvz9OmSGQrQY1gZ3EB51qBXjTS3vnWkRZa6pDKaPTGtP86sHpuWrMo40cVWLGr5wolyOeU5oci+J+mPH1C6EG6zk2vp96R+YgFQSpPYTw3VF8qrQOQqLNwohY7yqyAb880S2zGM2eo01cl+h4Pja4bo0j6fqoWdaiarpt5OoceZ8X56SHOW6V6JoZmZyiXIadbchtg41JSzFm2xFTQbxGce6or9VcK/RIbN8NzC+EeVR7g9DqIVbPW4cpo8zpQPAjELByFqhim+yvy7X8iUQ7lZU7/akb1fYfWTc2HkOiI0V3/GrBd2tl3HdWrJR+JfCub2rMIN4hcebGnMMjNd+VqrdbmK8azCDx30+EKvClG6LB1drUi37TRTvuDkGpnUZ862sQFTfvxDra/SQsJ2av8eqF1Eju1emqODnazYU1ng2wh/uf3I1K/H24GdiTP+mwh4T48pQRVF+Y6MZkX2wXSTWsoron4RZfHxbzUx5lRRp0CQnCWi7n7Ysf9OTFmVedDKnhIUl/vRaWNKtqNQzh9sbUmWw2wH7dCT+/Aqj80xySDbPaaWp0mQvKzTjqpAc9gkVMtN1I2q8Br8/35NmythOPL16klQ6dWfm+c+MmWGtyeYEzIZxY3H7yFC8zPXngeMX9bxilbWak+QA7MOBtG5Ezf4r6qLgXupSl7rUpS51qUtd6kdVFwX3UucKkeQhLuMZC9AZ/EshK1hRp/q3cSYkTEkRq0zYJOzOkHXxFl5F/Msprz1jg6ibWYvaOq4z6HMylB5E8Qq1YtxkVFDzE2d/l3EvSry4PqNrhd+e09ZiJVzabCAVX1dyMEpzLclDUEJycB8d4x9Yfn/5gSvXsbBCOfgnT1/hb3rG5wrnAzlbUp1IJ9OiTzApxo0wA6PPVKOaOZJ6gmmhCItEWECrMnd+x7HgJv7x8Vtu7Z4pG143O0Iy/GT1RFUU3reLLSEZtEqEbBij4fVyj1KZT3vxOT39diPe4CRKeX+X8c9qZqc29wkU9Ncav8tMjfhhc/Grdq80zX1iXCrGjZzfZNXcATysNNNK0X5KdHeiZvldUQBKnbi640r8n6ktfsiiFrcfpVN6Wohf0r+URLtynqIX1dgdEkEp6kfhGlcPch36N5K+My5FdaiMXM+TijItdfF9Z8yQxFP8AzVYR4PtEqO32KModuNC1MFQFLHowGfxX6qcGZei7p78X8OVwh4yyUvqXncr6VonFeaU8pWsKGnNQ6S7PfvHyDL+hivF8ntRsseVwQ6F9dtqhrXwU6eVePzGlfg2j3emHKeivhf/sT1mhhs5xydVfmoUdsjkEVyXON4a/E7Nvk+VoX9dMa4Vqvj1slH0NyWVTin0KPuWvKO/tjJWyiGYSRTjrAvHOp8T3GxXxttDYthobJ8ZV4r1d4Fpaeb0KrcPQlooiVX+sef4dTuvDg0bhfqqkeSjBIungVh5oaAgxIxkITkrHthJlFphXZcBqUqinxHf36z4/ID3a/9f9t49WLKzPO/9vWutXn3d19l7ZjSjEYMAgwQISQyEW2LZiX0gcQyxqQDOceAcKOyqkGPw8fElSZXjOKecHGIT42sBAWwfbJJwCwe7bJwQ2QkIw4ABSdwRQhKSRjOz9+xL39btPX+8X180zGh6C81lb72/qqndvfrr1V+vtbrn6+d7vucdKIPl4EWMhezw0jgHevQ6WUfI2nVT8LLScr0xpTtbSBCF/mqNxnpB40w58bU3LM82m4/RqGZV7ep2TYx8wvl8St42b2syqFCBrBNR36jGn4mkm5OmEduH05BYM/WZy0rzJSt0D9ZpnsoZ7KuNPxP2+Rhl2UJt2zb3V0w9jcMqfxXLVtZIqFKxKochxSPrCMP5iKIBg2Vh/p6KrSP1cR+j3PKwyzSi+e0ttr5ngaJVGyvAGpniHg8KymZCtphSNmRcUS7pRagI+VzNZiJ6SvOhjGwhVNarWebtcLlmqTiZqfbDxXg8SxbllvWbWvAMRTsBtZkp+9yaZ72xJlQH56yaX16N1xhA8PSH5I4qS6jSmHiUm90yRTtf7RANq7FC3lgPn/uwbiDObAYqyiuiMmKwEE/WrESW75yvNGicyiibMf0DDeqjJJpYyOZjsk5kyQk6SaUByOeSsSqcLabovrplbceTtInhYkKZCnnHUm/6++t0vmYzM/lSg/qJbfL5uVDZz/zrta2SpB9yatuxzR4VymB/K1QGncxAde4dUDQbViXzUEJ/eQ5RfZhnvL+/bklCSxGt0yXxYPL/jIRqn82TOWUzHs/+jj6TWUjFGeWKt07abEXZiMaZ7UUrIRlUZJ2YWrcyZbtuCShgMybxcGpa8Dy4gus4juM4juPsKVzBdSaUJXlHkWYJW/bLeuR1LZsgOXSvKaBREW0GdXa+RDKBekV0OraV/A1TZ0e+yygzJahKTWXRCqpmRdQPnp96RW0zokxDSoLAYEUpW+EXY6pIGRMPhaQvpsi2TZEF8/6WDauw1l8RoswqpQ32hbdVV8omJFvm9f3y+n7mkgHtZEhRWR++vbFAWUZE7YIij6my2BTmcvSzMyLZME+mlOatjArGK2zBPIpSmv/19HqH3qGUa+qnAfhK7yD39JcZlglZFVOp0KkN2SpM4c2qhEqFBKuqllcxzSQnL2MWmvbrfLg/Ic9j6s2cvjaBmKIjVHGoQLMSUdvW8ar+kco4klctV1TGP2uTvlI0ZewFLJqm0if9ytSdyvyoI6Vo62jLqrVt5OQtU9fSLfslnretzcbRUTax+bnzjnkkR7/Mh/OWvxtlFTQjugdj5u8xDyiYulmmluRRNoUqCRWSiknFpaxj1az6y7GtbO5WDEJiwWj1e9EUaltK0Tb/b39fMlaZo9JU5eFCTOe+jOHhhDiTsQ94dICizI5hWRNQZbAazlUnYrgUUQyUVqFIYUp486QpQRtPrFGEKn1FQ0i75nnO43Dg1VQyjWLqW5brWt+ozNvdGXntrHpQWbe0hyizbo0UsVrPjkm6EdSnwwnD+Wic32lKlY6TNrKGfQZHKk2cqan0JVZ1rim0TpZjL3WyXdJbiUn6SrYgNE5DpkL9jOW9jo61Rqa2SwXDxZjmQ/lYHc07QY2VhPYDGVtPbCM6qbxXJYzVqmRoObBlOpkVSfoVw8WY/rxQ6ymDRaF9wrzLIxW5CvXu7bZ5X7OOUN8cZXBbDrRVVqoYLibknWTsj5XK8m01hrwVZjXqlm0Kppb1VyI695dkc9G44lY/JG7UtyqrJDasGM5H49zjeFBR2574A7sHatR6FVGubB1tEBWM+wBWaU4qu9ZqXfs7WnVetlNa92yy/ZQFsnkhGVjCQn1rUmFLKvvMJ4NqvB5gpISnm6V51ms2+zBK6BDVsTIYZ6aCpxvB47xgPtxRBnbeSYiGJbVCKeYb1NcLhksJjdO2+n+wL6VoRiTbQhn8zv196fhc5u2Y9j3bdK/p2LWZV4hCfd2eXzZiokHJYF9qx29ola10JRmrvFIqqFWsrBJTw/NWNFYWq8TWYFR1AbHPTePBIVVIW4hKJelm5HMpwwMd0oe6DA+2x8om2Gch2RqitSa1bknejmk8ZBfb5pM6tB7KLIsbSLYyS+tYhEb47tDE8nZFdZy3HOU6rjCYbpnP1P6fkzDbKOP8ao1sjUmtW5F3zLee9CuqeFIdsIotd7ms2VqIqFCGh2xmJpuPEW3TX0nG3wWo2rqDkJeetyyFIh4UbF9dDzN5jGdWBqspta75cPOWIInNStTPhOtt3hTrqLDPskaWRT1S+6VS6pumSkdZRfdgajOBzZAqUpus9ymamCd5NaFKonE1teFyjc49PaKhZYoPl+z7ezSbGOUha/0CuILrOI7jOI7j7ClcwXUmxKai1uoFWRayVRWKvmWv5guKdAo0i9AkKEWdnGqrhiQV+WJFvB1ZZidQBP9q0VKqhtK6P2K4pORzFczlVGq/rJPtiKKpJAPL3Kx1BSmgWAom3LSyDD8Nv5DFkhpGfUi6EZIHVSyB+qmQR9sIvs+mrVaNY6FoKpu9Bvd0l6mYKCgrnS73Zwl5kVCupSS9iLJTjb3G6amQ41oKSdfUgnigFEEl0djyfJO+KaL5do1v9xc5UDPD2OH6Gco04nTeHr9mJEq/TKfum//WjrtQk4pcI7Lw07pdy1gfNCmriDxLiE8kZAsVUWZ9yOanqtHNmUeyd3CiwMZD8+rlHSEe2K/yeACdUGWsdzA1ZakZ0Vwr2D6Y0HmgHNf/zkMOapVYhmcVx5SpWK5tMjrXmMrUA+lqWPXO2LOosamr2XxM3hKGi9DvxuPVsYPlCKqQddwxb/Vo1S2MvKHQ22/3298qGC7GNDYm6kJUmtLZvSo11Sysuh+pfsMFoazFVAlsHk3JFoXapo79r/HQ0gtqXSVvCfUNU5tGq4DztpDNQaqWCDB3X0ZZi+ntD3XSF20Wo3FKxjMSSV/Hq5CTvvl78zkhKk0xSwZKf1809p1HOWxfFdsK7U7ISi0mXuPGesFgOSE9PWDjujmbZUlN0bH3YNnApnzaSufBXDSuxjbt3zRvJfRWE5ohxWG4UiNvmyIW5RN1p3mqelg+tVSmgtY3lKwtLD7UZbhk5vftqy11ICoto7ZoSPDU2r6SgamYcabjRInRsbJrKRr7E0ce5cGieeBH77NK7JrprUbUN5S8aarY6Dja/ipTdkN1NplaUR6V0D1Ut6zh0q61wcKkklkt5B33l2MaG5Y9PFiKxmpUrW8+01pfg0fT3leVRgxC1aV4aNes+XMjsjlTwkcKVNI3L3M8VJrrpb3WekUaMp17B+s0E7vWiqYEJdkqewH0Vurj66OqCXk7RpqTPhJB4+SA7WtaFIsx9U1TzaSaJFpomOhprNvnJc5s9fsoOxWBspFQWx/Qv7ptSme3sjxSYLDQML92YVXYpLLrejSzYyvyk0kSRC1iuJCQhipiZRqRnupRHKqHz1oc0jNgsBy+E0MWepTZ3yzMDtVC2oYlPVT09tdIy8ryfDf7REv2H1FViygbCfm8fd8Uiw2bEQoKb9GOSM8U9A93aDzUt4qK/cr6jc0yFE2rBNk8mdM/2Ar+Un2YshhvD9HllN6BlNq2zQ4UDXs83SwtZ76ApFcy2BeuySAi95dsFmaUcV3bhqjSsce3t5KY915MXU23KvJ2NM6m1tg+I6NrOx4SMnChESop1rq2JkCyAo3NY5x3ovHnOmtHRGWY3SiguW656oOlyXlorNvrjvzJ2f6I5unw//F2TraYgsh4vxpP/g+wWRehsW7KdG9/Qt6yhIhsLiRWbCrZgn1+iobNEjXOlNRPhVzlNKa/vzZJWjkPruA6juM4juM4ewpXcJ0JYv5MEaV20n7V5gdyNIqICqFsT6qGaC34bRTiXmS5kWlFORcaRDr2yJoqWDJcFiSH6lCGVgLNUIEmVoig6kdorOSR+djqJ8Lq2KtzqqZStiuioSm5VX3Sl2gYvHOjSishC7dshZ+9wTc7UjlW57ocam2wlrW4d2sRgE4tY3Vhm81Bna1egmZKNJBJikIEcd8SI6LCFMqoZFx5KhqaT7lKdPycB3rz3B4dBiCRitV0i2acU5OSWCoilE4wNNakpBYVxMEoWo9y4lBZZhCU7vVmmweH85wetimriC1tUXUKqmLkbYqobUf0DyqNk0Lej8jbpqAD1NeFKLdzls/ZL/RRAsD0eyhrQn85sYzZfYll7QJVKpRNiPJRNSc7BlQTH2StC4Nly0gVNYW7dbIcrwqvdc2HXdaF/or1u78q4wNZtKD1gNI7KGOVs0qjyap1NS9YNh8FJcdW/45Uv+GCKX1Smr+rTKNQXcmUBwir1nu2EnqwTxguma9tVHnPPL/mL+7vm6zsHSuLdbveal2laJk/rGwKWdh/0VTigXnpyoaQ59A6VY1VGsuJNCWjaJiaN1gwpX2kskQ5dK8KCnCilHXb1joxep+2Anvjujnz5So010qmJiWoahGNtZLuwYQ4h36Tsce2tqm0TpnHMG+PPI0TlXm4EJkqHLztRVBGkUkVMdSuiaJp57OqQTnXGKujRdNmNWrdKuRe2uzB6DhqFPZRs0qFrYdKNI7Hvs3NIwnN0xXpljLYF1E/UzFYjqhtTSqJlXWhsa5sH46of6sgbyXUtnXsaZz7tgZ/bWSK05mSrBWPj1O6WdJfia2637Za/mw9JmuPDmTwtYeJlvqZgt7+dJLiEN6nqVUSfKJ2/EaKWDycqGFlavnNUanjVI3aZsHWkZSiLrROmR+6dl9Jb39YBxFbtTKN7Xg1T5V0DyRsX52O9z9cFqJNu47TrZK4X7J1JFTmqwmD1YZV8gqJJ6P80nGWeMc+E2VqqqCUStLNxxmx9bUh20eaJD1LACiaViEvKke5y5avnbfMY9/fFxPljP3aUlmaxHAuAsxr3X5gSG9/On58eKBtal/wmuetGslgcq5HvvxYlHRbx2rfKAUmyoO3X8x7LSXkB+bH12vRiiBKyFsRrRP5OCd4tDK/TIXeAfOlR0WDohUR9yvyju1fCvOZj/ozWI5pnipAJt74uZMZVT0Zr12o5xVZKxnn2OZzsfn110v6q4mp/+HzYdcztB8sx4kCyXZO0U7GlRyzgzGdBwq2rq5ZSkVTGCwJw7DeJMoA7FwUdaFe2TU72JeMq/uNZvny5abNvpweUDRb4zUKlhCh9FatAmhtsyTaN5nVKOsSUhcs07msR0A8Tkko2gnDxYj2/TlVElPV7HvC+kbIYRfyZkRzvWTrkB3fKp3MrMRZxWA5sTUCIdu8ioUoZLYPV1K7HuqPrNG6gus4juM4juPsKVzBdSYoUIUKWiPrVVIRD4S8o2gcUgUqgeB/jZOKqhIYhtSBuRztJRBUWYDGSSFbEfL5kmgYUWsUZGsNaov2876oxWglVLXKlN/tBK0p404UAiUQg+RClYac3KBcNk5bYoJUo1/ZlmQwzsIMf6uaEg+FRpKzmPRYTHok4Sfj7Q9dRZqUJFEFtYoqFdL1mPh06EJLgwpj/uCyDqiMPY39VVN3R1nByWZMPS4YlvYRWy9STgzmSKRkUNZYaWyzWOszDObGZpzTijJqUUkUfkpHMlGpAVpRxmKtT7eos9jss3Ztn0igykMahUDvKqFoK7VNYbgY1MyRHzG2f/U1pXvIkiDyTmS+VyAeKGVTxsevcboim5/UGJcShkv2QqITFS/vBJ8zlucaZ+aHbqyZSrdxNJkoZmeUpKe2WjiGzn3K1hNkXHs87gvZvCnNGlm7oiU0To88uqaA1bbMl5Zu5PRW44nfMKQGRLlStCPq67Z6t7FWjj22o+MxVoZSU7Zq2yMJA2qDSU5j0YgoGtFYRQFTqsfV/voVKhNJL87sGOTzlkla1WCwOKmk1l82v6WKHVuNbDVyvGHKMUC6XTJcSsjqoyQTBSYeNeZMEdEk+F/nhXhQjdWZkRdTJSjFLfPTjlSaxpmKwaJVkOut2qruKNfxsTGlNlz7Tbvm44GpKKPkkMbJIWvXt6hvKt2rgqKePPx6mbuvMhU8M5VQ40kltDI1pb+qhWzNlcQya0dev4Ylg0SFjvtdptAc6th7l3ciU5tjU7WTvtJ+IKd3oDZuX6Yj77jNTozUIAjV2kaqdm+UmAHN9Wp8rpKBXYdVIubv3NTxbEBRt89Y0leGC0KS2bHL2pMc7yoxH6NloJpS2rmnN1ZHRaHWt5SI2nYJJHQP1iiDulrrhzzkvnnauwfMh9kPlRqlDMkpiVVei7KKshGP1dnhXGwV1LYqmqdKBsshHaMdTRIoUtABdL6dkc8nlGlE61uDcZXDbLkRsldtVXsVm5I/8p5qBO0Hc/orCa0HM/rLsanywdOtYm2r1JJUVKB3oD7JyR1YwoXGYQZuIYBB+rsAACAASURBVFQBbAiLd9n/E92DKZmaAkj4m27bdWyfOwVqdv5OF3QP1hiupERBeRSFxoM9+vvmKVoxrXu32XrSHO17e+E9NOnvi0lPl2RzcfBOR5OqdtuWYWwr+M3Dn/RKWIrH14PkFfl8aucjgqRbjPN5IfhmtyuytnlsR37w6aqfZcPSEzS2ymTZfDyeRZISevsTywoeKo21gv5KOp4tbd8XZgoadnzyTkTzZIaUCVEeZgSXa0S5sn0ote/XtmUqjxXa1NYdxJl9djQZ9XM0k2W5yclQGayakpr0ob86GU4mAyWfj8mb4Ty2mCRqhFmcMg3Vzgq174e2kG6OZncsE18q++zGQ5tNK5qWadw8WUDHvPePhCu4juM4juM4zp7CFVznYUQl5IXAgWy8bbC/omqWSLNEIqVSGSsqC3N9to9WJGrKryrQKInScuz93HpSZYruyMNXCfF8jgYVUPPIlOFEidOKSoJSG36VUqtof7VG92hpOYeJqcmNk/bLvXeV+X2TnjD3bdi+xry+cd/2HxWQL1RUdcivMlX1TNGiHQ/HlcTSpGTta8u0jm6O+zk8ko1zcKUfUTaF9n0R+RwkXRisTDy4eaci3YggUuprYmkFVcxK3coKFWlMv6zRjHMeGnS44+RVLDQHnOmZwbVUYbnVR0TJypgDrS2yKiESpZXYuWjHGblG3Le9SFFFJGlJ1k0hJF5ILhQLFZKNqsaZbzM9M1lNbaot1NdNLcybMlnV3oPOfRXbV0fEfVPC081JlZ3BsmUKR6XtP+nar/p8frK6P5s3VS7vCPWtknRbOH19TFUfZTIKi18v2XxCElZ9m8d0++qRGmUV6NKNkEwxtNcY9TEqzNNJO2Lhaz3Kpqk+44pymYRqa5jKs12RDIP/OnhgywYMYxnnBUshNB/SsYqssZC3GFdcSvoV3QMxVRL6WAUVp2E+2+1DifnKgorTfAg2nmzXb+sB68dwScYVmPKOqYjx0BSbdMsyLzWavA+Nhfb9FcMlU+Q1VdJTEYMV20d9zdoWsb0HTWDrSI2lUNFouFSjCokVUWEKXX1NLesWUzuLhlUJG1XAqmpC87R9HraeYCv1pYL+qs18zN9dMffNLuvXdcLnrkHeFvPWlyFLM47G1dQap5TuAUuGWPxGnxPHmpZN3Jqo7f1VYfGugu6B2CrIPTg5jvEgpBrss3M1Oka1ruXj2vUQlPYIBvsiFr+WsXFtOlbEGmcqylRY+Oo2+fUdU4gXI4pQ3GogsaVNdO2cdg9YVaf6VvhM9JXhYsT8PQWDpZiyZorqdIZtM8x0JANTWqVUNImonw45tnXziff32XfI/N0l0aCgWrI3mm7l1M9EZKFyVpyZQjnKwS0aEel6RtKL6e2vUzQZZyuPrhXUrrPBQkR+Vj6oqFImEnzQpkQPFyy9YjTzUaZQ7BeqWp3mKatwly+3qK2Zuplf06ZKhKJt6SdRYaraKB0gGShxr0CjhOG+mqmUwQcLMAxJHs3TFb0Vu12EBBGA9rcHnHlKy1IWuspgRWidUPqrEd2rwmr64Lcezgtlat7+4UJE2Ri/U6sYl0KZmtIcDyyRwK6niu0n2jVQNITNp8xR61ZsPjkk24SvKCmhvlVQ2xgwXGmOPbjjSoWYpzseWs52nE2u6bKVhMzZ4DteqNn/eaNZxBQ639zm1I3ztB8qyTrRw/zc8QCaJ+xYtE8UdK9KyTqTyn2th6qQJKLM3ZszWLYKdaPZgmzRZhctpUEtg7xXUKYNtg/ZcSwbowxaS4rpHkotFWj03SOwdXVMY81mLqK8CtUiw4xW0/ojlVA0GCeTjI5f0bJrd6Tij73YMjp2EqpFQnd/QnOtsnUNZchxBtr3DRg8s4VGsN2K7fslFbqLEq6nlCgz5feRcAXXcRzHcRzH2VO4gus8DCns52bSCBVszjSIcoE5JU4qiqGt3h79GhNRmvWMbr+OiFJ2axApaaeg3wg/KxslDGPiboTuH5Jv1Il6EeW8qUWSKFpEpA8mZPsiSCtTTsMvwqReki2FX5dANDDf3GB/kAeCChflQn8VC+EVqG2H1aLzaikKNSWqVdy/toCqkFcRaRwyFCuhmi/odU1V0U4BwxhphT5uxUgl9A+GFfJDIJry+YbKLGVpqt9wEe4+sY99je742KZRwZmsyaCssbnVoqwiNteCelAK2ULCcLtOa6HP/fctk85ltBoZSWzHsTtI6W82kH4M7QIthGStNl6dGuVCtlDROBmRL1g/kz7jCjT9/aacSAnptmVULn25x0PPsT5IaWqTFLbSHzUPZPNUNTrZ4+pttS01j1fbfnlHoTpVrTuqYGbe21rXKl9V6UT92DoSUzQZpx1Ew8m5ztum6g732ftBYeGunI1rTcJtnpzkn3aPNGnf2wdJx+rKSLEra6buaSQ0Tudk8wmDpTBjEIMMgFpIJZBQASt4bk05NFXKcltNIRkpvEm/ZPNIQm3LlGoV6Nyfs3V1bfwek75Zx1unSjavMY/rSM3SCJonbRW4lLB9OEYKu54aQUHtryT0V0fZzxVUpv5EYVV681TF/De2Wb9+zs5puOYH+0ylGSxEwXdp5yjKlM4DBetPtj7O31OSt8zf2l81pa3WregeSMbHIMphsKxIZf00P2x7rKbX1wsGixFb+2HublOmt69Ox8cp3VbyOcu7JKjEUcH4XFeJqU39ZftO0Qgap3IGi+n4XI6UHWrB+7dg52M0YzAy2NfXzdO3dWSS3wumjpe1mP6h0YrxIZtH2mNPY5Sbj3M4b/m2UpnCNc76TYP6n1Ukffs8FY1oLA1VNfMOShG+EsUUrqSrzH3LgpfXrm/ZqvOmjBNHhvtb1Hp2rot2Yl7XylaFN9Yq0q3SEhmAratTBqt1at3ScsBzob88UUdH+bBFy3zkeRsa60p93R4fLkQ01i3FYzhv/sqsE1FfL9HIpMH+AUvJyDuw+A3zoIpCsWRS9yjrt7dqKQJJX2mvF3SD13nhGxnDlTT4NiMap3LKtDbOty6a5hUfLESk2+YfTh8qxxXjsoWUsm7XzGDRjvPou2pa/ew3JlUT082SratrY99mMjQ/rvlYLfd1sBSPK521vrXJ+g1LIfUgfD42y/G5XvjmkO3DKZpA0Y5BGkSF+VzBvK+1no6zakdMX9PZXDxWIe3Yx2hkqvToehocMKV663BM54ESDZXJwPZz5ntaVs1sI6O32gqJMJPrMc4h6it5JwaxtBlG6wrqYW1AbKp5Y62kd7hpWeBB/ZRy4kmvde37skpkfBzSbZshyTo2g9FfqVHWhSIffeZstk8jmLu7z2B/HRWYv8e+nNa/p05/KbZkjQ37nmuuVWSdqYgXtcpymkD7nh5nntYJ+x710SoEVql9v0aoZVWH/+t6By1nfJRTfz5cwXUcx3Ecx3H2FK7g7nFE5MXArwMx8A5V/TeP2F5B1mpUB00ekGFEPABpFszP9VjbXASF+avMpFaUEapCtt4gaudIWhHVKpr1jEEzqKFhlT8Hh1SbNZqrPWpJSTO1n4Qn1+aI5kuyJEEGMXRKVKfSDyohfso2NRXy+cRW8ccV2g01yrOgImbm9Wk9aL69kco8XFYaJyKG+yrKuEbRq1MsbFOpEIUSMfONIc3DOWe6TXrDJkmjJGrlZGtm8NJ2iWzHlG2ltmH+RY2UOJv4W6WA2tB+ZecLJWlS8ldfudb6GFdcfXCd7WGKiBJFFXFUkbbtJ2l+oknRjmkt9Ol3bel8kcVIU+kNQy31yFRVjRXZtqQKFcv4BfPElg1Tr2obluta605yJKWAtFsxjCN6ByKKBmw9sTk+95aDaopV0lP6+y1Hd/QeBytKsm2KQdmUsa+0tm37hlBhaF7Iww/yhbtLojwepy7UtpThPiEOVcXmvp0z2JdO1KjGSPFRokxontZxTiWYf6scqXqAJqZy5sEDF2e2Gr4XVphncxFZxyo9FSOrXWTKpnZNdVz6SmnVyUbe0Y1qrJ5lHcuzbK5VIcMTkm41XsEdZ5Z32tufkHdGypJSXzOP7tbhhCizxIT2iTz0KWW4ZMpE696Sh54dKiVtgx4N13TI8S1aStyLSHqCJkq6FRTLjZLB/iZxZuesSkxtGa0IzzodBotCfVNDCkXFQzfWxl7ofi+iSoX6RsnmEyKquhIPo7ESv3BXwfr3JFR1aJyCzr0V24fN+zfy2G5cm1K0hDK1ZIyiaZm4o3MzWDaVrKwJ609tMFyG/upUqkdkvvyiZakYVco4/QBMzV67LmX5KwVnnpRQNuz66O+LxokX/X0y9uFJpXZNbkw8kduHYuobyuY1Ce0HK7qHG+aDfNA6UV/LOfPkOmXD/Ob1zYqtZjxOKFj6Ssbpp6emlmGfbVG1yBLMuzpcFJa/krN9KCFv2+dj6asFWpvoR/UzJVtXR5ayoKYGJn3bZ962pIYy5BlXibB1dW3swTWPecX6U1Jbjd6f+OvBZqjKphIV0ThvtEp0/JmURBksRcRBWR8sRdQ3lc0nJBMPo0CZqinjAulWSX9/ffwdGufK9lUx+Rwsfq2kvxIR9wrAztf21SnNkwVF3dr2V2uWh7sy8kpD60TG6Wc00ASSru1vpPBqbB7P1omMrJOGdBarFjlcCB7aTNm+BloPmNc/n4uJs4nannViFu8a0j2Yms9z3jKwR/mvxTOWSLcqeqvxOL2kt7829reeubZOrR9mrpZjdNXSJkazASqCivn7NTJVtVcL/txw2Q6WIhpBOc/mJFQSs8pf9rmtTB2vbE1AmZr3tRYm+oqG5SkvfDNn89omVQKt0yXdJKw3ORBZakfP1PhRYsIo8z2fs//DO/cIw2Wo7hO6B2LzwAaxMw2fncZ6yXAhRiqlt1/G3veFb9rtqiYUbVtPEhWWMQ5htrJXMf/NAWvXtSy9Y1vpHRh5fMO6Dg3pCUOrcjg6znPfLhnORbRO2uyWxjaTpHFEHrzx8do20Bm/XpWESoZhDUK2XFI2I9r3eorC4xYRiYHfAl4CXA+8SkSuv7y9chzHcRzHubi4gru3eS7wdVW9C0BE3gu8FPjieZ9RQdWoaAcPbn9ZyBeFffM9Fpt99JCQFQnDzC6dffNdNvsNon6ENgVZq7H81NPU4tJycYH6woB20/w5a1uL1JKSg3Nb9Av72ZvWC+K4YqCC1ktanSF5Ho9TFuIvdFh40QliUYZFQqVQVRFnhvaTMFmvTSo4qVWn0oixbzfKrFoUEpFVllfbSYccaZ9hPjEpMRIl15gH5+Z5cG7eDoUKazX7+V5VQjaXoJvmM8vnQk3t8B7TM/ard/7uivXrBJ0rOLy8wephS1FYTnvklSUpfLtrHuBmmhOH55+q1xEgG1qOY7QVU+VCv5FSfMN+yZYHMySpoBubalsKVatCs1Gdc/OuZnOWKJEtWiWw0er9pG+Zg0XLVr9qzFixBVNBi5YpUEXDfrlXqZo/DygbFfPfsJXvRUvHla5q2xPPY9Ey9bVsmu957amWgRsPwmvMC9mCksTC6udzzlxbCxmI4fKrB/VJoaorG9dGpBvRWCEpG5PVwelGQbZo1Y4mvk4BNTXMqiCZSlLVJ5XMNLIc4MZpW7FfpkLzVEmZBv9pv4LI/HTtB3OGiwnte3v0brLzMNgX0zhtFcKWv9Rl48kthovReFV70bD3WSXmQZ37ljJcgPUnWwNNTPGRCta/JyHvhKqAZcQwqNWj/MdyroRYKTsR9ZPxOE9z63BCum0e4bxlislgKWIjrAhf/PI2998yx2DJHts+EpHPhyxroHtVRNKz1eAaT/ozUnc2rk3IFm2GYLiolGlE/UxoL1Pq5RIgVmlMQs7uSP2s6kBl12DehnyuompM+iClkGxNKoXFIRtz5K/VxBTNtacmlE3I22oBJxXjPODRiu5sTtDEZhKm1c1swbJnR8etv89Wv48qR+VNmz0oGgTlM6LW1bHPc+PaGlXNXqdo2L5GK8PBlC17/Zi8I2Rzo4plEcPF2rhNfyV4Wtv22SoaQutB28lwIaW/PKpaJ2MldzTzojGcubbGcNkU7PSMKWujPhTtCm1UDPeZF7i2JZYNHK73pS/nrF1Xs5mWyny23QOm1meh8mSZKsVcRboesX1VYudSJp8Zqew7L59TNp4UE/ehd6jxsOqA9U0JqR2jlAkZp1XM3VuyfbWtfB8umle4aEHSn1xLotC9qhYqZQX/bc2SAcBev5iv6MYR6RlhOBfRXCvZOjKpZNbbn9p13FWyjlDr6dj7WSVCNh+bH7wECnvdUa4zCci2+Wbzjp3npK8M50beVJulWPhaj/XrWuHcTc4DWF510rcqXEXL1P04B41HSQ5hBkPNR99fjtj/kW9w5vtspi/v2HXQX05MJW4Lgyom/DdlFTpDSkLZFAjfcUUrqP2pfe9uXx0h2AxG3hYaZ6nEZR3iQUy2GK7rRaVK7XO51hQQy5pHNOT9TmbNLHVF2Hxi05JkIqiXk1zmqgajWPCiOVGxx7najZh0U9m62rKae4cbIYNbx7Ngmzfut/SROmEWCBqnlXw+fNE3S8pocv2cD1dw9zaHgXun7t8Xto0RkdeLyHEROZ5V/UvaOcdxHMdxnIuBqD7yKjRn9yIiLwderKqvC/d/HPgbqvqG87Q/CXSBU5eul85FYgU/j3sBP497Az+Pux8/h1cmT1DV1XM94BaFvc23gSNT968O286Jqq6KyHFVPXbRe+ZcVPw87g38PO4N/Dzufvwc7j7corC3+TTwFBF5ooikwCuBD1/mPjmO4ziO41xUXMHdw6hqISJvAP4Miwl7p6reeZm75TiO4ziOc1HxAe4eR1X/BPiTHTzlbRerL84lxc/j3sDP497Az+Pux8/hLsMXmTmO4ziO4zh7CvfgOo7jOI7jOHsKH+A6juM4juM4ewof4DoAiMiLReQrIvJ1Efn5y90f5/yIyDtF5CERuWNq27KI/LmIfC38XQrbRUTeGs7rF0Tk5svXc2caETkiIv9dRL4oIneKyE+F7X4udxEi0hCRT4nI58N5/KWw/Yki8lfhfP3HkGSDiNTD/a+Hx49ezv47E0QkFpG/FpGPhPt+DncxPsB1EJEY+C3gJcD1wKtE5PrL2yvnEXg38OKztv088N9U9SnAfwv3wc7pU8K/1wO/c4n66FyYAvg/VfV64HnAPwmfOz+Xu4sh8P2q+izgRuDFIvI84N8Cb1HVJwPrwGtD+9cC62H7W0I758rgp4AvTd33c7iL8QGuA/Bc4OuqepeqZsB7gZde5j4550FV/xJYO2vzS4HfC7d/D3jZ1PbfV+OTwKKIXHVpeuo8Eqr6gKp+Ntzewv5jPYyfy11FOB/b4W4t/FPg+4H3he1nn8fR+X0f8LdFRC5Rd53zICJXA38PeEe4L/g53NX4ANcB+0/13qn794Vtzu7hgKo+EG4/CBwIt/3c7gLCFOdNwF/h53LXEaa2Pwc8BPw58A3gjKoWocn0uRqfx/D4BrDv0vbYOQf/HvhZoAr39+HncFfjA1zH2WOoZf95/t8uQUQ6wPuBN6rq5vRjfi53B6paquqNWDn05wJPu8xdcnaAiPwQ8JCqfuZy98V57PABrgPwbeDI1P2rwzZn93BiNF0d/j4Utvu5vYIRkRo2uH2Pqn4gbPZzuUtR1TPAfweej1lIRsWUps/V+DyGxxeA05e4q87DeSHwwyJyN2bR+37g1/FzuKvxAa4D8GngKWHFaAq8EvjwZe6TszM+DLw63H418F+mtv/jsAL/ecDG1PS3cxkJnr3/AHxJVX9t6iE/l7sIEVkVkcVwuwn8AOan/u/Ay0Ozs8/j6Py+HPiYesWly4qq/oKqXq2qR7H//z6mqv8IP4e7Gq9k5gAgIn8X8yDFwDtV9f++zF1yzoOI/BFwC7ACnAB+EfgQ8J+Aa4BvAf9QVdfCIOo3sdSFHvC/qerxy9Fv5+GIyIuA/wHczsT3988wH66fy12CiNyALTiKMdHoP6nqvxKRazE1cBn4a+B/VdWhiDSAP8A812vAK1X1rsvTe+dsROQW4GdU9Yf8HO5ufIDrOI7jOI7j7CncouA4juM4juPsKXyA6ziO4ziO4+wpfIDrOI7jOI7j7Cl8gOs4juM4juPsKXyA6ziO4ziO4+wpfIDrOI7jOI7j7Cl8gOs4juM4juPsKXyA6ziO4ziO4+wpfIDrOI7jOI7j7CmSy90B58phZWVFjx49erm74ewxqqq6cKMpzpw5M3PbBx98cOa2R44c2VE/drLvJJn9q3R9fX3mtvV6fea2AGmaztx2a2tr5ra7seJlo9HYUft2uz1z2yzLZm67tLQ0c9sHHnhg5rYAO/m+3sk1utNjd9dds1ep7XQ6M7ddXl6eue3JkydnbgsQRbPre6dOnZq57U6+73bSB4D9+/fP3PbAgQM72vdu5TOf+cwpVV0912M+wHXGHD16lOPHvbS989iyvb29o/Yf+chHZm77K7/yKzO3/bVf+7Ud9ePNb37zzG138h/xhz70oZnbPulJT5q5LexsEH/rrbfO3HY4HM7cdqc/aHYy8CrLcua2T3ziE3fUj+c85zkzt7333ntnbvuKV7xi5ra//Mu/PHNbgLe+9a0zt93J4Oi6667bUT9e+cpXztz2ec973sxtf+zHfmzmtm9/+9tnbgs7+/H4zne+c+a23W535rbNZnPmtgBvetObZm77xje+cUf73q2IyLfO95hbFBzHcRzHcZw9hQ9wHyUi8hYReePU/T8TkXdM3f9VEflpETmnHCUi7xCR6x9h/68RkUNT928VkWPh9p+IyOJj804cx3Ecx3H2Fj7AffR8HHgBgIhEwArw9KnHXwCc1xCnqq9T1S8+wv5fAxw61wOq+ndVdXajouM4juM4zuMIH+A+ej4BPD/cfjpwB7AlIksiUgeuAz4LdETkfSLyZRF5j4gITBRZEYlF5N0icoeI3C4ibxKRlwPHgPeIyOdE5GFGHRG5W0RWROSoiHxJRN4uIneKyEdHbUXkOSLyhfD8N4vIHZfouDiO4ziO41xWfID7KFHV+4FCRK7B1NrbgL/CBr3HgNuBDLgJeCNwPXAt8MKzdnUjcFhVn6GqzwTeparvA44D/0hVb1TV/iN05SnAb6nq04EzwI+G7e8CfkJVbwTOuzJDRF4vIsdF5PhOV6E6juM4juNcifgA97vjE9jgdjTAvW3q/sdDm0+p6n2qWgGfA46etY+7gGtF5DdE5MXA5g778E1V/Vy4/RngaPDnzqnqbWH7H57vyar6NlU9pqrHVlfPmbThOI7jOI6zq/AB7nfHyIf7TMyi8ElMwX0BNvgFmM7XKTkrmk1V14FnAbcCPwm8g53xiPt3HMdxHMd5vOED3O+OTwA/BKypaqmqa8AiNsj9xCM+MyAiK0Ckqu8H/gVwc3hoC5h7NJ0KC9C2RORvhE2zhxQ6juM4juPsclzt++64HUtP+MOztnVU9VRYT3YhDgPvCkkMAL8Q/r4b+F0R6TNZzLYTXgu8XUQq4C+AjUexD8dxHMdxnF2HD3C/C1S1BObP2vaaqdu3YtaD0f03TN2+ZeppN3MWQdF9/9SmW6YeOxpungKeMbX93021v1NVbwAQkZ/HFq05juM4juPseXyAu3f5eyLyC9g5/haWq+s4juM4jrPnEVW93H1wrhCOHTumx4+70OtcXnbynfTBD35w5rYf+tCHdtSPG264Yea2H/jAB2Zue/PN3zFhc16+8IUvzNwW4NChc9aGOSc/+7M/O3Pbm266aea2M1qzxmxtbc3cdmNjdqfV3//7f39H/Xje8543c9uXvexlM7f9zd/8zZnb7uQ6Ajh16tTMbb/+9a/P3PZ1r3vdjvpx5513ztz2JS95ycxtT5w4MXPbnVxHAAcPHpy57U6u/wcffHDmtp/61Kdmbgs7u/53ejx2KyLyGVU9dq7HfJGZ4ziO4ziOs6fwAe5FRETeIiJvnLr/ZyLyjqn7vyoiPy0iHznP898hItc/wv5fIyKHpu7fKiLn/CXjOI7jOI7zeMEHuBeXUU4uISVhBSvrO+IFQHq+J6vq61T1i4+w/9cAs89JOo7jOI7jPA7wAe7F5RNMIr6ejhWD2BKRJRGpA9cBnwU6IvI+EfmyiLxHgoltpMiKSCwi7xaRO0TkdhF5k4i8HCsJ/B4R+ZyINKdfWER+UERuE5HPish/FpHOJXvXjuM4juM4lxFPUbiIqOr9IlKIyDVMyvkexga9G1hmbgbchA2A78dU3xcC/3NqVzcCh1X1GQAisqiqZ0TkDcDPqOrxsJ3wdwUrGvF3VLUrIj8H/DTwry7yW3Ycx3Ecx7nsuIJ78fkENrgdDXBvm7r/8dDmU6p6n6pWwOeAo2ft4y7gWhH5DRF5MbB5gdd8HnA98HER+RzwauAJ52ooIq8XkeMicvzkyZM7fnOO4ziO4zhXGj7AvfiMfLjPxCwKn8QU3BcwKec7nGpfcpayrqrrwLOwohE/CbyDR0aAP1fVG8O/61X1tedqqKpvU9VjqnpsdXV1R2/McRzHcRznSsQHuBefTwA/BKypaqmqa8AiNsj9xCM+MxAsB1GobvYvmFQ+2wLmzvGUTwIvFJEnh+e3ReR7vru34TiO4ziOsztwD+7F53YsPeEPz9rWUdVTM4aiHwbeFZIYAH4h/H038Lsi0meymA1VPSkirwH+KCxmAxsYf/XRvgnHcRzHcZzdgg9wLzKqWgLzZ217zdTtWzHrwej+G6Zu3zL1tO8ofxQU3fdPbbpl6rGPAc95lN12HMdxHMfZtbhFwXEcx3Ecx9lT+ADXcRzHcRzH2VO4RcFxnCuKGX3pADz3uc+due1gMNhRP170ohfN3HZ5eXnmtjfddNPMbT/96U/P3Bag05m9nsvRo0dnbquqM7fdyfkDiKLZdZZ6vX7hRoGdHAuAJzzhnEmK52Qn19JO2p44cWLmtgC33XbbzG2/8Y1vzNx2c/NCSZQPZyf93klaz6lTp2Zuu7KyMnNbodzZ/wAAHaZJREFU2Nl5WVxcnLnt85///As3Chw+fHjmtgB/8Rd/saP2j3dcwXUcx3Ecx3H2FD7AvQIQkZeJiIrI08L9oyJyx+Xul+M4juM4zm7EB7hXBq/CSvO+6tE8WUTcauI4juM4jhPwAe5lRkQ6wIuA1wKvPMfjR0Xkf4jIZ8O/F4Ttt4TtHwa+GNp9WUTeLSJfFZH3iMjfEZGPi8jXRGR2s6LjOI7jOM4uxge4l5+XAn+qql8FTovIs896/CHgB1T1ZuAVwFunHrsZ+ClVHVUpezLwq8DTwr8fwwbPPwP8s4v3FhzHcRzHca4cfIB7+XkV8N5w+718p02hBrxdRG4H/jNw/dRjn1LVb07d/6aq3q6qFXAn8N/Ulj/fDhw914uLyOtF5LiIHD958uR3/24cx3Ecx3EuM+7dvIyIyDLw/cAzRUSBGFDgt6aavQk4ATwL+0EynW3SPWuXw6nb1dT9ivOca1V9G/A2gGPHjs2eBeQ4juM4jnOF4gru5eXlwB+o6hNU9aiqHgG+CRyZarMAPBBU2R/HBsGO4ziO4zjOefAB7uXlVcAHz9r2fuAXpu7/NvBqEfk85qs9W7V1HMdxHMdxpnCLwmVEVb/vHNveytRCMlX9GnDDVJOfC9tvBW6danc38Iyp+68532OO4ziO4zh7GVdwHcdxHMdxnD2F7KTGuLO3OXbsmB4/fvxyd+O7Io5ntyhXVXURe7K3eelLXzpz21/8xV/c0b5vuOGGCzcKRNGV8Rt9J9+jV0qfHcdxdjsi8hlVPXaux/yb1nEcx3Ecx9lT+ADXcRzHcRzH2VP4APcSICL/XETuFJEviMjnRORvXILXvFtEVi726ziO4ziO41xpeIrCRUZEng/8EHCzqg7DoDO9zN1yHMdxHMfZs7iCe/G5CjilqkMAVT2lqvcHhfX/EZHbReRTIvJkABFZFZH3i8inw78Xhu1tEXlnaPvXIvLSsD0WkX8nIncEhfifTr32PxWRz4bXeNqlfuOO4ziO4ziXAx/gXnw+ChwRka+KyG+LyPdOPbahqs8EfhP492HbrwNvUdXnAD8KvCNs/+fAx1T1ucD3AW8WkTbweuAocKOq3gC8Z2r/p1T1ZuB3gJ+5OG/PcRzHcRznysItChcZVd0WkWcDfxMbmP5HEfn58PAfTf19S7j9d4DrRWS0i3kR6QA/CPywiIwGqg3gmtD+d1W1CK+3NvXyHwh/PwP8yLn6JyKvxwbJXHPNNY/2bTqO4ziO41wx+AD3EqCqJVZ17FYRuR149eih6WbhbwQ8T1UH0/sQG/H+qKp+5aztj/TSw/C35DznWlXfBrwNLAf3Qu/FcRzHcRznSsctChcZEXmqiDxlatONwLfC7VdM/b0t3P4oMPbRisiN4eafYZ5aCdtvCtv/HPgJEUnC9uXH/E04juM4juPsInyAe/HpAL8nIl8UkS8A1wP/Mjy2FLb9FPCmsO3/AI6FBWNfBH4ybP9loAZ8QUTuDPfBPLr3hO2fB37sYr8hx3Ecx3GcKxm3KFxkVPUzwAvO3h6E2Der6s+d1f4UE2V3ensf+IlzbC+Anw7/prcfnbp9HLjl0fTfcRzHcRxnt+EKruM4juM4jrOncAX3MjGtsDqPHWVZXu4uOGexvr6+o/bvec97Ltwo8OIXv3in3ZmZ06dPz9x2ZWX2ooE7abtTLrDo1HkEdvLdEcfxReyJc6Xh18buxBVcx3Ecx3EcZ0/hA1zHcRzHcRxnT+ED3EuMiLxMRHSW0rki8g4Ruf4xeM2jInLHd7sfx3Ecx3Gc3YAPcC89rwL+Z/j7iKjq61T1ixe/S47jOI7jOHsHH+BeQkLJ3RcBrwVeGbbdIiK3isj7ROTLIvKeqWIOt4rIsXB7W0TeLCJ3ish/FZHnhsfvEpEfDm2Oisj/EJHPhn/fEU/mOI7jOI6z1/EB7qXlpcCfqupXgdMi8uyw/SbgjVgRiGuBF57juW3gY6r6dGAL+NfADwD/APhXoc1DwA+o6s1Ylu5bL9QhEXm9iBwXkeMnT5589O/McRzHcRznCsEHuJeWVwHvDbffy8Sm8ClVvU9VK+BzwNFzPDcD/jTcvh34C1XNw+1R+xrwdhG5HfjP2ID5EVHVt6nqMVU9trq6uvN35DiO4ziOc4XhObiXCBFZBr4feKaIKBADCvwxMJxqWnLu85Krqobb1eg5qlqJyKj9m4ATwLOwHy+Dx/p9OI7jOI7jXOm4gnvpeDnwB6r6BFU9qqpHgG8Cf/MxfI0F4IGgBP84Noh2HMdxHMd5XOED3EvHq4APnrXt/cyQprADfht4tYh8Hnga0H0M9+04juM4jrMrcIvCJUJVv+8c297KWQvBVPUNU7dvmbrdmbr9L896Tif8/Rpww9RDPxe23w0847vovuM4juM4zq7BFVzHcRzHcRxnT+EKruM4F5V+v7+j9uvr6zO3jePZbeYLCws76sdkTeeFSdN0R/uelRCJfVHYyfu7mP24Ung8vEfn0bGTz4pz5eAKruM4juM4jrOn8AGu4ziO4ziOs6fwAe4uQERKEfmciHx+ugSviBwSkffNuI93i8jLL25PHcdxHMdxLj/uwd0d9FX1RgAR+V+AXwG+V1Xvx/J1H4aIJKpaXOI+Oo7jOI7jXBH4AHf3MQ+sA4jIUeAjqvoMEXkN8CNAB4hF5BbgN4AfAO7FSv06juM4juPseXyAuztoisjngAZwFVby91zcDNygqmsi8iPAU4HrgQPAF4F3nv0EEXk98HqAa6655iJ03XEcx3Ec59LiHtzdQV9Vb1TVpwEvBn5fzp1p8+equhZu/y3gj1S1DFaGj51rx6r6NlU9pqrHVldXL07vHcdxHMdxLiE+wN1lqOptwApwrtGol+Z1HMdxHOdxjw9wdxki8jQgBk5foOlfAq8QkVhErgK+o1Sw4ziO4zjOXsQ9uLuDkQcXQIBXq2p5gco7H8S8ul8E7gFuu7hddBzHcRzHuTLwAe4uQFXPWY9UVe8GnhFuvxt499RjCrzh4vfOcRzHcRznysItCo7jOI7jOM6ewhVcx3EuKmma7qh9q9WauW0Uzf4b3SY1ZqeqqpnbXsAudEWyG/t8MfHj4ZyPnXzPOFcOftYcx3Ecx3GcPcXjboArIm8RkTdO3f8zEXnH1P1fFZGfFpFDIvK+sO0WEfnIefZ3t4isPAb9+mER+fnvdj9hX7eKyLHHYl+O4ziO4zi7jcfdABf4OPACABGJsEzZp089/gLgE6p6v6q+/FJ1SlU/rKr/5lK9nuM4juM4zl7l8TjA/QTw/HD76cAdwJaILIlIHbgO+KyIHBWRO85+sojsE5GPisidQfk9p3FLRH5HRI6Hdr80tf1uEfklEfmsiNwecm0RkdeIyG+G2+8Oz/+kiNwVFOR3isiXROTdF3qNqcfjsK87wmu96dEeNMdxHMdxnN3C426AG8rWFiJyDabW3gb8FTboPQbcrqrZI+ziF4H/qapPx7JmrzlPu3+uqseAG4DvFZEbph47pao3A78D/Mx5nr8U+vQm4MPAW7AB+TNF5MYZXgPgRuCwqj5DVZ8JvOsR3pfjOI7jOM6e4HE3wA18Ahvcjga4t03d//gFnvu3gP8XQFX/GFg/T7t/KCKfBf4aG5heP/XYB8LfzwBHz/P8/y9k2d4OnFDV21W1Au6ces4jvQbAXcC1IvIbIvJiYPPsFxGR1wcV+PjJkyfP0xXHcRzHcZzdw+N1gDvy4T4Tsyh8ElNLX4ANfr8rROSJmDL7t1X1BuCPgcZUk2H4W3L+qLZRm2rq9uh+MsNroKrrwLOAW4GfBN7BWajq21T1mKoeW11dnfk9Oo7jOI7jXKk8Xge4nwB+CFhT1VJV14BFbJB7oQHuXwI/BiAiL8GsBGczD3SBDRE5ALzkser4Tl4jpDtEqvp+4F8AN1+EfjiO4ziO41xRPF4LPdyOpSf84VnbOqp66gLP/SXgj0TkTmwwfM/ZDVT18yLy18CXgXu5sO1hx8z4GoeBd4W0CIBfeKz74TiO4ziOc6UhO63u4+xdjh07psePH7/c3XD2GKdOXeg348P54Ac/OHPbl7989iS/Tqezo37spN/tdnvmtnNzczO39epal46d/F/o5+XxxU6qGnrVs0uLiHwmLLb/DvxMOI7jOI7jOHsKH+A6juM4juM4e4rHqwfX2aP4VNKjZyfHbnPzOxLnHpP9Avz4j//4zG13MlW8vb29o34sLi7O3HY4HF640aNomyQ7+4q+WFPncRxflP1eTHZqvyuKYua2Ozkvbmd4OLvRCrKT77Cd9PlKeX97Ff8f3nEcx3Ecx9lTXDEDXBHZmbwyed5Pisg/Psf2c5bavZyEMr0r4fajer+O4ziO4zjOI7PrLQqq+ruXuw8XA7G5CwnVyxzHcRzHcZwZuWIU3BEicouI3Coi7xORL4vIe8JgDxH5NyLyRRH5goj8u7DtX4rIz4TbzxaRz4vI54F/MrXPWETeLCKfDs/9iQv0IRaRd4vIHSJyu4i8KWy/VUTeEkrbfklEniMiHxCRr4nIv556/odE5DMicqeIvH4H7/2oiHxFRH4fq7B2REReFfpwh4j826m259u+Hd7rnSLyX0XkuaHfd4nID8/aF8dxHMdxnN3Klarg3gQ8HbgfK2DwQhH5EvAPgKepqorIuVaAvAt4g6r+pYi8eWr7a4ENVX2OiNSBj4vIR1X1m+d5/RuBw6r6DICzXitT1WMi8lPAfwGeDawB3xCRt6jqaeB/V9U1EWkCnxaR94fts/AU4NWq+kkROQT82/Aa68BHReRlwKfOtV1VPwS0gY+p6v8lIh8E/jXwA8D1wO8BH56xH47jOI7jOLuSK07BDXxKVe8L0/OfA44CG8AA+A8i8iNAb/oJYRC6qKp/GTb9wdTDPwj8Y/n/27vzaDur+ozj34ckkACSCMQsyhRAMDIGuCIqIqJWVCpgGeoKlaiFoqaiDBpalkZbKy1VKIhCRAbRpSKDRGwZjBEpS4ZMECJBNEotUAgVQhhMIHn6x7tvOBxucs9J7s0Z7vNZ66573v3u992/c3aGffbZZ/+k+cCdwFZUA8k1WQzsLOkCSYcBtV8Z7x0gLgAW2n7U9vJyzfbl3CfLLPIdpWxtbdV7yPYd5fEbgJ/bXmL7ReC7wMFrKQdYAdxYE+Ottl8oj8fXNybppDIjPXvJkiVNhBkRERHRntp1gFu7j85KYHgZyB0AXA0czkuDuEYI+DvbE8vPTrZvXlNl208C+wA/B04GLukjtlV1ca4Chks6BHgn8Cbb+wDzgJFNxPpsE3X78oJf2odldYzlzcIrZuxtT7fdY7tn7Nix69l0REREROu16wD3FSRtDoy2/R/Ap6kGoKvZfgp4StJBpWhSzembgI9JGlHutZukzcrjRX20tTWwke1rgLOA/ZoIdTTwpO3nJE0ADmzi2np3AW+TtLWkYcAHgVvXUh4REREx5LXrGty+vAq4XtJIqhnZU/uo82HgUkkGamdoL6H6eH5u+cLaEuDIMpDta6flbYHLJPW+ATiziThvBE4ua4YfoFqmsE5sPyppKjCrxPkT29cDrKk8IiIiYqhTs9leuomkw4GdbZ/f6ljaQU9Pj2fPnt3qMNZLMpmtu8HKZLZixYqm4thiiy0arttMJqDnnnuu/0o1Ro5sfGVRM9nJmrlvMpmtu2Qya0+dmMmsmT8bzfxdaZfn18kkzbHd09e5TprBHXC2b2h1DBERERExsIb0ADe6z7Jlyxqu28yMJcDKlSsbrrvppps2XPeJJ55ouO6YMX3tjrdmzcwQNDMr1Wwc7WCTTTYZtHuPGjVq0O4d66bZ2bERI0YMUiRRqxNnLZv9JCXaQz6jjYiIiIiukgFuRERERHSV9R7gSrKkr9Qcny5p2vredw1tjZH08ZrjP5N09WC0tSGVtMBHtzqOiIiIiG4wEDO4y4EPlC23miZp+NqO64wBVg9wbT9iOwPDiIiIiFhtIAa4LwLTqZIvvIykv5B0p6R5kn4qaVwpnybpSkm3A1dKmixphqSfATMlbS5ppqS5khZIOqLc8mxgF0nzJZ0jabyk+8o9R0q6rNSfJ+ntpXyypGsl3SjpQUn/2t8TkvRzSeeWFLb3S3pDuceDkv6ppt6pku4rP58qZePLNd+UtFDSzZJGlXMTJd0h6V5J10l6dR9tv6PEv0DSpZI2KeXvlbRI0hxJ50u6oea1PL3m+vskjS+Pj5d0V3m9Li5JISIiIiK62kCtwb0QmCRpdF35fwEH2t4X+D7wmZpzuwPvtP3BcrwfcLTttwF/Ao6yvR/wduArJUHDVOC3Jd3uGXVtfQKw7b2oMntdUZJCAEwEjgP2Ao6TtH0Dz2lF2VvtIuD6cv89gcmStpK0P1ViiTdSZSs7UdK+5dpdgQtt7wE8BfxlKf828FnbewMLgM/XNljivRw4rjyP4VQZ2EYCFwPvsb0/0G9OXUmvL8/5LbYnUqU8nrT2qyIiIiI634AMcG0/TTV4+2Tdqe2AmyQtAM4A9qg5N8P28zXHt9j+Y3ks4J8l3Qv8lCqz2Lh+wjgI+E6JZxHwELBbOTfT9lLbfwJ+BezYwNOaUX4vABbaftT2cmAxsH1p7zrbz9p+BrgWeGu55ne255fHc4DxZfA/xnZvSt0rgIPr2nxdufbXdXUmAItt/66Uf6+B+N8B7A/cLWl+Od65vpKkk8pM9ewlS5Y0cNuIiIiI9jaQuyicB3wU2Kym7ALga2U28m+B2hQ+z9ZdX3s8iWqWcv8y+/hY3bXNqk0ztJLG9v/tvWZV3fWrGrh+XdpbVy/y8n7sfZ0EXFFmuyfafp3tafUX255uu8d2z9ix/U4MR0RERLS9ARvgltnXq6gGub1GAw+Xxyc0cbvRwOO2XyhraXtnXJcBr1rDNbdRPoKXtBuwA/DA2hqR9G1JBzQRV317R0raVNJmwFGlrE+2lwJPSuqd5f1r4Na6ag9Qzfa+tq7OA8DOvWtrqZYe9Po91fIOJO0H7FTKZwJHS3pNObelpEZmriMiIiI62kDvg/sVoHY3hWnADyXNARpP1wTfBXrK0oYPAYsAbP8fcHv5ItU5ddd8HdioXPMDYHJZUrA2ewOPNBHXarbnUq2XvQu4E7jE9rx+LjsBOKcsvZgIfLHunn+iWtf7w/I8VgEXlaUcHwduLK/lMmBpuewaYEtJC4EpwK/LvX4FnAXcXNq7BdhmXZ5rRERERCeR7VbH0BKStgC+ZfuYVsfSCEmb236mfNnuQuBB2+cOZBs9PT2ePXv2QN5yg1u6dGn/lYqk6n25ZtJRJjVtRES0mqQ5ZUOAVxiymcxsP90pg9vixPJlsYVUSzgubnE8EREREW1pML/8FAOozNYO6IxtNxo9un6nuva3ww47tDqEQdXMzHez9TfaqPH36MuX97di6eU23njjQYlj2LBsRx0RMdiG7AxuRERERHSnDHAjIiIioqv0O8CVtLKkel0o6R5Jp0na4APjkub2vTXH75c0dRDbG7D7l9S/fS6CHkiSxkj6+GC3ExEREdHOGhmoPl8SBewBvAt4D3UpZjeQicDqAa7tGbbPHqzGBvv+g2QM1XZiEREREUNWUzOxth8HTgKmqDJS0mWSFkiaV5IyIGmypB9JukXS7yVNkXRqqXOHpC1LvV0k3ShpjqTbJE0o5ceUvW7vkfQLSRtT7Rl7XJlNPq608bVSf5yk60r9eyS9uT52Sd8oKWkXSvpCTfnvJX1B0tzyPCbUPIfe+19err9D0mJJh0i6VNL9ki7vr42a88PKve4rbX16ba+3pGmSrpT0S0kPSjqx5twZku6WdG9NW2cDu5TX6BxJ25TXb35p8619txQRERHRPZreRcH2YknDgNcAx1dF3qsMDG9WlUUMYE9gX6rUsb8BPmt7X0nnUiVvOA+YDpxs+0FJb6RK1nAo8Dng3bYfljTG9gpJnwN6bE+BagBaE9b5wK22jyqxbd5H6P9g+4/l/ExJe9u+t5x7wvZ+5eP904G/6eP6VwNvAt4PzADeUurdLWmi7fn9tAHVLPS2tvcsz6GRTU33Bg6kSoE8T9JPymu7K3AAVUreGZIOBqYCe5b0xkg6DbjJ9pdKTK/YnFXSSVRvWrr+2/wRERExNKzvWtqDgO8A2F4EPAT0DnBn2V5mewlV1q0fl/IFVOloNwfeTJW1az7Vvq69mbZuBy4vM5aN7KlzKPCNEsfKkha33rGS5gLzgD2A3WvOXVt+zwHGr6GNH7vKirEAeMz2AturqPal7b1mbW0ALKZKuXuBpMOApxt4btfbft72E8AsqkHtn5efecBcYALVgLfe3cCHJU0D9rK9rL6C7em2e2z3jB07toFwIiIiItpb0zO4knYGVgKP91O1dtPJVTXHq0q7GwFP9c421rJ9cpnRfR8wR9L+zcZZF/NOVDOzb7D9ZFlWMLKPWFey5tekNv765za8gTYo5fsA7wZOBo4FPtJP+PWp5kw1a/tl2y9L9iBpfF17vygzu++jesPwVdvf7qe9iIiIiI7W1AyupLHARcDXymzmbcCkcm43YAfggUbuZftp4HeSjinXqwz+kLSL7Tttfw5YAmwPLANetYbbzQQ+Vq4dJql+t/8tgGeBpZLGUX1RbqD124akrYGNbF8DnAXsV8qnSJqyhvseoWqt81bAIVSzsjcBHymz4EjaVtJrqHuNJO1INdv8TeCS3vYiIiIiulkjM7ijyhKCEcCLwJXAV8u5rwPfkLSgnJtse7mkRtufVK4/q9z/+8A9wDmSdqWaqZxZyv4bmFpi+XLdfU4Bpkv6KNUs7MeAX/aetH2PpHnAIuAPVEsgBlSDbWwLXKaXtlk7s/yesJaY7qVamrA18I+2HwEekfR64JfltX4GON72byXdLuk+4D+B+4AzJL1Q6nxofZ9nRERERLtTNREbrSTpBuADtlfUlU8DnrH9bxsijp6eHs+ePXtDNBVDSFL1vlxS9UZEDAxJc2z3mWeg6TW4MfBsH97qGCIiIiK6RWZwYzVJS6h2wug2WwNPtDqIWC/pw86W/ut86cPO1q39t6PtPreAygA3up6k2Wv6CCM6Q/qws6X/Ol/6sLMNxf5b331wIyIiIiLaSga4EREREdFVMsCNoWB6qwOI9ZY+7Gzpv86XPuxsQ67/sgY3IiIiIrpKZnAjIiIioqtkgBtdTdJhkh6Q9BtJU1sdT/RP0qWSHi8Z+XrLtpR0i6QHy+9XtzLGWDNJ20uaJelXkhZKOqWUpw87QEkNf5eke0r/faGU7yTpzvJv6Q8kNZ4JJVpC0jBJ80oyqSHXhxngRteSNAy4EHgPsDvwQUm7tzaqaMDlwGF1ZVOBmbZ3pUrfnTcr7etF4DTbuwMHAp8of+/Sh51hOXCo7X2AicBhkg4E/gU41/ZrgSeBj7YwxmjMKcD9NcdDqg8zwI1udgDwG9uLSxrk7wNHtDim6IftXwB/rCs+AriiPL4COHKDBhUNs/2o7bnl8TKq/2C3JX3YEVx5phyOKD8GDgWuLuXpvzYnaTvgfcAl5VgMsT7MADe62bbAH2qO/6eURecZZ/vR8vh/gXGtDCYaI2k8sC9wJ+nDjlE+2p4PPA7cAvwWeMr2i6VK/i1tf+cBnwFWleOtGGJ9mAFuRHQUV1u/ZPuXNidpc+Aa4FO2n649lz5sb7ZX2p4IbEf1SdiEFocUTZB0OPC47TmtjqWVhrc6gIhB9DCwfc3xdqUsOs9jkrax/aikbahmlqJNSRpBNbj9ru1rS3H6sMPYfkrSLOBNwBhJw8sMYP4tbW9vAd4v6b3ASGAL4N8ZYn2YGdzoZncDu5Zvjm4M/BUwo8UxxbqZAZxQHp8AXN/CWGItylq/bwH32/5qzan0YQeQNFbSmPJ4FPAuqnXUs4CjS7X0Xxuzfabt7WyPp/p/72e2JzHE+jCJHqKrlXew5wHDgEttf6nFIUU/JH0POATYGngM+DzwI+AqYAfgIeBY2/VfRIs2IOkg4DZgAS+t//t7qnW46cM2J2lvqi8gDaOaBLvK9hcl7Uz1Rd0tgXnA8baXty7SaISkQ4DTbR8+1PowA9yIiIiI6CpZohARERERXSUD3IiIiIjoKhngRkRERERXyQA3IiIiIrpKBrgRERER0VUywI2IiIiIrpIBbkRERER0lQxwIyIiIqKr/D9dM3cJQ9siswAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize the results.\n", + "plt.figure(figsize=(10, 8))\n", + "\n", + "# Plot the waveform.\n", + "plt.subplot(3, 1, 1)\n", + "plt.plot(waveform)\n", + "plt.xlim([0, len(waveform)])\n", + "# Plot the log-mel spectrogram (returned by the model).\n", + "plt.subplot(3, 1, 2)\n", + "plt.imshow(spectrogram.T, aspect='auto', interpolation='nearest', origin='bottom')\n", + "\n", + "# Plot and label the model output scores for the top-scoring classes.\n", + "mean_scores = np.mean(scores, axis=0)\n", + "top_N = 10\n", + "top_class_indices = np.argsort(mean_scores)[::-1][:top_N]\n", + "plt.subplot(3, 1, 3)\n", + "plt.imshow(scores[:, top_class_indices].T, aspect='auto', interpolation='nearest', cmap='gray_r')\n", + "# Compensate for the PATCH_WINDOW_SECONDS (0.96 s) context window to align with spectrogram.\n", + "patch_padding = (params.PATCH_WINDOW_SECONDS / 2) / params.PATCH_HOP_SECONDS\n", + "plt.xlim([-patch_padding, scores.shape[0] + patch_padding])\n", + "# Label the top_N classes.\n", + "yticks = range(0, top_N, 1)\n", + "plt.yticks(yticks, [class_names[top_class_indices[x]] for x in yticks])\n", + "_ = plt.ylim(-0.5 + np.array([top_N, 0]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/models/research/autoaugment/README.md b/models/research/autoaugment/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a26f4872cc3d30adb6d951fbd7dae53e7530a77f --- /dev/null +++ b/models/research/autoaugment/README.md @@ -0,0 +1,70 @@ +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +Train Wide-ResNet, Shake-Shake and ShakeDrop models on CIFAR-10 +and CIFAR-100 dataset with AutoAugment. + +The CIFAR-10/CIFAR-100 data can be downloaded from: +https://www.cs.toronto.edu/~kriz/cifar.html. Use the Python version instead of the binary version. + +The code replicates the results from Tables 1 and 2 on CIFAR-10/100 with the +following models: Wide-ResNet-28-10, Shake-Shake (26 2x32d), Shake-Shake (26 +2x96d) and PyramidNet+ShakeDrop. + +Related papers: + +AutoAugment: Learning Augmentation Policies from Data + +https://arxiv.org/abs/1805.09501 + +Wide Residual Networks + +https://arxiv.org/abs/1605.07146 + +Shake-Shake regularization + +https://arxiv.org/abs/1705.07485 + +ShakeDrop regularization + +https://arxiv.org/abs/1802.02375 + +Settings: + +CIFAR-10 Model | Learning Rate | Weight Decay | Num. Epochs | Batch Size +---------------------- | ------------- | ------------ | ----------- | ---------- +Wide-ResNet-28-10 | 0.1 | 5e-4 | 200 | 128 +Shake-Shake (26 2x32d) | 0.01 | 1e-3 | 1800 | 128 +Shake-Shake (26 2x96d) | 0.01 | 1e-3 | 1800 | 128 +PyramidNet + ShakeDrop | 0.05 | 5e-5 | 1800 | 64 + +Prerequisite: + +1. Install TensorFlow. Be sure to run the code using python2 and not python3. + +2. Download CIFAR-10/CIFAR-100 dataset. + +```shell +curl -o cifar-10-binary.tar.gz https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz +curl -o cifar-100-binary.tar.gz https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz +``` + +How to run: + +```shell +# cd to the your workspace. +# Specify the directory where dataset is located using the data_path flag. +# Note: User can split samples from training set into the eval set by changing train_size and validation_size. + +# For example, to train the Wide-ResNet-28-10 model on a GPU. +python train_cifar.py --model_name=wrn \ + --checkpoint_dir=/tmp/training \ + --data_path=/tmp/data \ + --dataset='cifar10' \ + --use_cpu=0 +``` + +## Contact for Issues + +* Barret Zoph, @barretzoph +* Ekin Dogus Cubuk, diff --git a/models/research/autoaugment/augmentation_transforms.py b/models/research/autoaugment/augmentation_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..584cce45eb57d4796653d2dc8264cdb6a6953fbe --- /dev/null +++ b/models/research/autoaugment/augmentation_transforms.py @@ -0,0 +1,451 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Transforms used in the Augmentation Policies.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random +import numpy as np +# pylint:disable=g-multiple-import +from PIL import ImageOps, ImageEnhance, ImageFilter, Image +# pylint:enable=g-multiple-import + + +IMAGE_SIZE = 32 +# What is the dataset mean and std of the images on the training set +MEANS = [0.49139968, 0.48215841, 0.44653091] +STDS = [0.24703223, 0.24348513, 0.26158784] +PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted + + +def random_flip(x): + """Flip the input x horizontally with 50% probability.""" + if np.random.rand(1)[0] > 0.5: + return np.fliplr(x) + return x + + +def zero_pad_and_crop(img, amount=4): + """Zero pad by `amount` zero pixels on each side then take a random crop. + + Args: + img: numpy image that will be zero padded and cropped. + amount: amount of zeros to pad `img` with horizontally and verically. + + Returns: + The cropped zero padded img. The returned numpy array will be of the same + shape as `img`. + """ + padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2, + img.shape[2])) + padded_img[amount:img.shape[0] + amount, amount: + img.shape[1] + amount, :] = img + top = np.random.randint(low=0, high=2 * amount) + left = np.random.randint(low=0, high=2 * amount) + new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :] + return new_img + + +def create_cutout_mask(img_height, img_width, num_channels, size): + """Creates a zero mask used for cutout of shape `img_height` x `img_width`. + + Args: + img_height: Height of image cutout mask will be applied to. + img_width: Width of image cutout mask will be applied to. + num_channels: Number of channels in the image. + size: Size of the zeros mask. + + Returns: + A mask of shape `img_height` x `img_width` with all ones except for a + square of zeros of shape `size` x `size`. This mask is meant to be + elementwise multiplied with the original image. Additionally returns + the `upper_coord` and `lower_coord` which specify where the cutout mask + will be applied. + """ + assert img_height == img_width + + # Sample center where cutout mask will be applied + height_loc = np.random.randint(low=0, high=img_height) + width_loc = np.random.randint(low=0, high=img_width) + + # Determine upper right and lower left corners of patch + upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2)) + lower_coord = (min(img_height, height_loc + size // 2), + min(img_width, width_loc + size // 2)) + mask_height = lower_coord[0] - upper_coord[0] + mask_width = lower_coord[1] - upper_coord[1] + assert mask_height > 0 + assert mask_width > 0 + + mask = np.ones((img_height, img_width, num_channels)) + zeros = np.zeros((mask_height, mask_width, num_channels)) + mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = ( + zeros) + return mask, upper_coord, lower_coord + + +def cutout_numpy(img, size=16): + """Apply cutout with mask of shape `size` x `size` to `img`. + + The cutout operation is from the paper https://arxiv.org/abs/1708.04552. + This operation applies a `size`x`size` mask of zeros to a random location + within `img`. + + Args: + img: Numpy image that cutout will be applied to. + size: Height/width of the cutout mask that will be + + Returns: + A numpy tensor that is the result of applying the cutout mask to `img`. + """ + img_height, img_width, num_channels = (img.shape[0], img.shape[1], + img.shape[2]) + assert len(img.shape) == 3 + mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size) + return img * mask + + +def float_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled + to level/PARAMETER_MAX. + + Returns: + A float that results from scaling `maxval` according to `level`. + """ + return float(level) * maxval / PARAMETER_MAX + + +def int_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled + to level/PARAMETER_MAX. + + Returns: + An int that results from scaling `maxval` according to `level`. + """ + return int(level * maxval / PARAMETER_MAX) + + +def pil_wrap(img): + """Convert the `img` numpy tensor to a PIL Image.""" + return Image.fromarray( + np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA') + + +def pil_unwrap(pil_img): + """Converts the PIL img to a numpy array.""" + pic_array = (np.array(pil_img.getdata()).reshape((32, 32, 4)) / 255.0) + i1, i2 = np.where(pic_array[:, :, 3] == 0) + pic_array = (pic_array[:, :, :3] - MEANS) / STDS + pic_array[i1, i2] = [0, 0, 0] + return pic_array + + +def apply_policy(policy, img): + """Apply the `policy` to the numpy `img`. + + Args: + policy: A list of tuples with the form (name, probability, level) where + `name` is the name of the augmentation operation to apply, `probability` + is the probability of applying the operation and `level` is what strength + the operation to apply. + img: Numpy image that will have `policy` applied to it. + + Returns: + The result of applying `policy` to `img`. + """ + pil_img = pil_wrap(img) + for xform in policy: + assert len(xform) == 3 + name, probability, level = xform + xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(probability, level) + pil_img = xform_fn(pil_img) + return pil_unwrap(pil_img) + + +class TransformFunction(object): + """Wraps the Transform function for pretty printing options.""" + + def __init__(self, func, name): + self.f = func + self.name = name + + def __repr__(self): + return '<' + self.name + '>' + + def __call__(self, pil_img): + return self.f(pil_img) + + +class TransformT(object): + """Each instance of this class represents a specific transform.""" + + def __init__(self, name, xform_fn): + self.name = name + self.xform = xform_fn + + def pil_transformer(self, probability, level): + + def return_function(im): + if random.random() < probability: + im = self.xform(im, level) + return im + + name = self.name + '({:.1f},{})'.format(probability, level) + return TransformFunction(return_function, name) + + def do_transform(self, image, level): + f = self.pil_transformer(PARAMETER_MAX, level) + return pil_unwrap(f(pil_wrap(image))) + + +################## Transform Functions ################## +identity = TransformT('identity', lambda pil_img, level: pil_img) +flip_lr = TransformT( + 'FlipLR', + lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT)) +flip_ud = TransformT( + 'FlipUD', + lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM)) +# pylint:disable=g-long-lambda +auto_contrast = TransformT( + 'AutoContrast', + lambda pil_img, level: ImageOps.autocontrast( + pil_img.convert('RGB')).convert('RGBA')) +equalize = TransformT( + 'Equalize', + lambda pil_img, level: ImageOps.equalize( + pil_img.convert('RGB')).convert('RGBA')) +invert = TransformT( + 'Invert', + lambda pil_img, level: ImageOps.invert( + pil_img.convert('RGB')).convert('RGBA')) +# pylint:enable=g-long-lambda +blur = TransformT( + 'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR)) +smooth = TransformT( + 'Smooth', + lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH)) + + +def _rotate_impl(pil_img, level): + """Rotates `pil_img` from -30 to 30 degrees depending on `level`.""" + degrees = int_parameter(level, 30) + if random.random() > 0.5: + degrees = -degrees + return pil_img.rotate(degrees) + + +rotate = TransformT('Rotate', _rotate_impl) + + +def _posterize_impl(pil_img, level): + """Applies PIL Posterize to `pil_img`.""" + level = int_parameter(level, 4) + return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA') + + +posterize = TransformT('Posterize', _posterize_impl) + + +def _shear_x_impl(pil_img, level): + """Applies PIL ShearX to `pil_img`. + + The ShearX operation shears the image along the horizontal axis with `level` + magnitude. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had ShearX applied to it. + """ + level = float_parameter(level, 0.3) + if random.random() > 0.5: + level = -level + return pil_img.transform((32, 32), Image.AFFINE, (1, level, 0, 0, 1, 0)) + + +shear_x = TransformT('ShearX', _shear_x_impl) + + +def _shear_y_impl(pil_img, level): + """Applies PIL ShearY to `pil_img`. + + The ShearY operation shears the image along the vertical axis with `level` + magnitude. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had ShearX applied to it. + """ + level = float_parameter(level, 0.3) + if random.random() > 0.5: + level = -level + return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, level, 1, 0)) + + +shear_y = TransformT('ShearY', _shear_y_impl) + + +def _translate_x_impl(pil_img, level): + """Applies PIL TranslateX to `pil_img`. + + Translate the image in the horizontal direction by `level` + number of pixels. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had TranslateX applied to it. + """ + level = int_parameter(level, 10) + if random.random() > 0.5: + level = -level + return pil_img.transform((32, 32), Image.AFFINE, (1, 0, level, 0, 1, 0)) + + +translate_x = TransformT('TranslateX', _translate_x_impl) + + +def _translate_y_impl(pil_img, level): + """Applies PIL TranslateY to `pil_img`. + + Translate the image in the vertical direction by `level` + number of pixels. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had TranslateY applied to it. + """ + level = int_parameter(level, 10) + if random.random() > 0.5: + level = -level + return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, 0, 1, level)) + + +translate_y = TransformT('TranslateY', _translate_y_impl) + + +def _crop_impl(pil_img, level, interpolation=Image.BILINEAR): + """Applies a crop to `pil_img` with the size depending on the `level`.""" + cropped = pil_img.crop((level, level, IMAGE_SIZE - level, IMAGE_SIZE - level)) + resized = cropped.resize((IMAGE_SIZE, IMAGE_SIZE), interpolation) + return resized + + +crop_bilinear = TransformT('CropBilinear', _crop_impl) + + +def _solarize_impl(pil_img, level): + """Applies PIL Solarize to `pil_img`. + + Translate the image in the vertical direction by `level` + number of pixels. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had Solarize applied to it. + """ + level = int_parameter(level, 256) + return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA') + + +solarize = TransformT('Solarize', _solarize_impl) + + +def _cutout_pil_impl(pil_img, level): + """Apply cutout to pil_img at the specified level.""" + size = int_parameter(level, 20) + if size <= 0: + return pil_img + img_height, img_width, num_channels = (32, 32, 3) + _, upper_coord, lower_coord = ( + create_cutout_mask(img_height, img_width, num_channels, size)) + pixels = pil_img.load() # create the pixel map + for i in range(upper_coord[0], lower_coord[0]): # for every col: + for j in range(upper_coord[1], lower_coord[1]): # For every row + pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly + return pil_img + +cutout = TransformT('Cutout', _cutout_pil_impl) + + +def _enhancer_impl(enhancer): + """Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL.""" + def impl(pil_img, level): + v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it + return enhancer(pil_img).enhance(v) + return impl + + +color = TransformT('Color', _enhancer_impl(ImageEnhance.Color)) +contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast)) +brightness = TransformT('Brightness', _enhancer_impl( + ImageEnhance.Brightness)) +sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness)) + +ALL_TRANSFORMS = [ + flip_lr, + flip_ud, + auto_contrast, + equalize, + invert, + rotate, + posterize, + crop_bilinear, + solarize, + color, + contrast, + brightness, + sharpness, + shear_x, + shear_y, + translate_x, + translate_y, + cutout, + blur, + smooth +] + +NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS} +TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys() diff --git a/models/research/autoaugment/custom_ops.py b/models/research/autoaugment/custom_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a690738608cd4b1a7f03e69af8be02648f86f5 --- /dev/null +++ b/models/research/autoaugment/custom_ops.py @@ -0,0 +1,197 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains convenience wrappers for typical Neural Network TensorFlow layers. + + Ops that have different behavior during training or eval have an is_training + parameter. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numpy as np +import tensorflow as tf + + +arg_scope = tf.contrib.framework.arg_scope + + +def variable(name, shape, dtype, initializer, trainable): + """Returns a TF variable with the passed in specifications.""" + var = tf.get_variable( + name, + shape=shape, + dtype=dtype, + initializer=initializer, + trainable=trainable) + return var + + +def global_avg_pool(x, scope=None): + """Average pools away spatial height and width dimension of 4D tensor.""" + assert x.get_shape().ndims == 4 + with tf.name_scope(scope, 'global_avg_pool', [x]): + kernel_size = (1, int(x.shape[1]), int(x.shape[2]), 1) + squeeze_dims = (1, 2) + result = tf.nn.avg_pool( + x, + ksize=kernel_size, + strides=(1, 1, 1, 1), + padding='VALID', + data_format='NHWC') + return tf.squeeze(result, squeeze_dims) + + +def zero_pad(inputs, in_filter, out_filter): + """Zero pads `input` tensor to have `out_filter` number of filters.""" + outputs = tf.pad(inputs, [[0, 0], [0, 0], [0, 0], + [(out_filter - in_filter) // 2, + (out_filter - in_filter) // 2]]) + return outputs + + +@tf.contrib.framework.add_arg_scope +def batch_norm(inputs, + decay=0.999, + center=True, + scale=False, + epsilon=0.001, + is_training=True, + reuse=None, + scope=None): + """Small wrapper around tf.contrib.layers.batch_norm.""" + return tf.contrib.layers.batch_norm( + inputs, + decay=decay, + center=center, + scale=scale, + epsilon=epsilon, + activation_fn=None, + param_initializers=None, + updates_collections=tf.GraphKeys.UPDATE_OPS, + is_training=is_training, + reuse=reuse, + trainable=True, + fused=True, + data_format='NHWC', + zero_debias_moving_mean=False, + scope=scope) + + +def stride_arr(stride_h, stride_w): + return [1, stride_h, stride_w, 1] + + +@tf.contrib.framework.add_arg_scope +def conv2d(inputs, + num_filters_out, + kernel_size, + stride=1, + scope=None, + reuse=None): + """Adds a 2D convolution. + + conv2d creates a variable called 'weights', representing the convolutional + kernel, that is convolved with the input. + + Args: + inputs: a 4D tensor in NHWC format. + num_filters_out: the number of output filters. + kernel_size: an int specifying the kernel height and width size. + stride: an int specifying the height and width stride. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. + Returns: + a tensor that is the result of a convolution being applied to `inputs`. + """ + with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse): + num_filters_in = int(inputs.shape[3]) + weights_shape = [kernel_size, kernel_size, num_filters_in, num_filters_out] + + # Initialization + n = int(weights_shape[0] * weights_shape[1] * weights_shape[3]) + weights_initializer = tf.random_normal_initializer( + stddev=np.sqrt(2.0 / n)) + + weights = variable( + name='weights', + shape=weights_shape, + dtype=tf.float32, + initializer=weights_initializer, + trainable=True) + strides = stride_arr(stride, stride) + outputs = tf.nn.conv2d( + inputs, weights, strides, padding='SAME', data_format='NHWC') + return outputs + + +@tf.contrib.framework.add_arg_scope +def fc(inputs, + num_units_out, + scope=None, + reuse=None): + """Creates a fully connected layer applied to `inputs`. + + Args: + inputs: a tensor that the fully connected layer will be applied to. It + will be reshaped if it is not 2D. + num_units_out: the number of output units in the layer. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. + + Returns: + a tensor that is the result of applying a linear matrix to `inputs`. + """ + if len(inputs.shape) > 2: + inputs = tf.reshape(inputs, [int(inputs.shape[0]), -1]) + + with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse): + num_units_in = inputs.shape[1] + weights_shape = [num_units_in, num_units_out] + unif_init_range = 1.0 / (num_units_out)**(0.5) + weights_initializer = tf.random_uniform_initializer( + -unif_init_range, unif_init_range) + weights = variable( + name='weights', + shape=weights_shape, + dtype=tf.float32, + initializer=weights_initializer, + trainable=True) + bias_initializer = tf.constant_initializer(0.0) + biases = variable( + name='biases', + shape=[num_units_out,], + dtype=tf.float32, + initializer=bias_initializer, + trainable=True) + outputs = tf.nn.xw_plus_b(inputs, weights, biases) + return outputs + + +@tf.contrib.framework.add_arg_scope +def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Wrapper around tf.nn.avg_pool.""" + with tf.name_scope(scope, 'AvgPool', [inputs]): + kernel = stride_arr(kernel_size, kernel_size) + strides = stride_arr(stride, stride) + return tf.nn.avg_pool( + inputs, + ksize=kernel, + strides=strides, + padding=padding, + data_format='NHWC') + diff --git a/models/research/autoaugment/data_utils.py b/models/research/autoaugment/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf911560d10065a7b2acebb417828d47c176ea5 --- /dev/null +++ b/models/research/autoaugment/data_utils.py @@ -0,0 +1,184 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Data utils for CIFAR-10 and CIFAR-100.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import cPickle +import os +import augmentation_transforms +import numpy as np +import policies as found_policies +import tensorflow as tf + + +# pylint:disable=logging-format-interpolation + + +class DataSet(object): + """Dataset object that produces augmented training and eval data.""" + + def __init__(self, hparams): + self.hparams = hparams + self.epochs = 0 + self.curr_train_index = 0 + + all_labels = [] + + self.good_policies = found_policies.good_policies() + + # Determine how many databatched to load + num_data_batches_to_load = 5 + total_batches_to_load = num_data_batches_to_load + train_batches_to_load = total_batches_to_load + assert hparams.train_size + hparams.validation_size <= 50000 + if hparams.eval_test: + total_batches_to_load += 1 + # Determine how many images we have loaded + total_dataset_size = 10000 * num_data_batches_to_load + train_dataset_size = total_dataset_size + if hparams.eval_test: + total_dataset_size += 10000 + + if hparams.dataset == 'cifar10': + all_data = np.empty((total_batches_to_load, 10000, 3072), dtype=np.uint8) + elif hparams.dataset == 'cifar100': + assert num_data_batches_to_load == 5 + all_data = np.empty((1, 50000, 3072), dtype=np.uint8) + if hparams.eval_test: + test_data = np.empty((1, 10000, 3072), dtype=np.uint8) + if hparams.dataset == 'cifar10': + tf.logging.info('Cifar10') + datafiles = [ + 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', + 'data_batch_5'] + + datafiles = datafiles[:train_batches_to_load] + if hparams.eval_test: + datafiles.append('test_batch') + num_classes = 10 + elif hparams.dataset == 'cifar100': + datafiles = ['train'] + if hparams.eval_test: + datafiles.append('test') + num_classes = 100 + else: + raise NotImplementedError('Unimplemented dataset: ', hparams.dataset) + if hparams.dataset != 'test': + for file_num, f in enumerate(datafiles): + d = unpickle(os.path.join(hparams.data_path, f)) + if f == 'test': + test_data[0] = copy.deepcopy(d['data']) + all_data = np.concatenate([all_data, test_data], axis=1) + else: + all_data[file_num] = copy.deepcopy(d['data']) + if hparams.dataset == 'cifar10': + labels = np.array(d['labels']) + else: + labels = np.array(d['fine_labels']) + nsamples = len(labels) + for idx in range(nsamples): + all_labels.append(labels[idx]) + + all_data = all_data.reshape(total_dataset_size, 3072) + all_data = all_data.reshape(-1, 3, 32, 32) + all_data = all_data.transpose(0, 2, 3, 1).copy() + all_data = all_data / 255.0 + mean = augmentation_transforms.MEANS + std = augmentation_transforms.STDS + tf.logging.info('mean:{} std: {}'.format(mean, std)) + + all_data = (all_data - mean) / std + all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)] + assert len(all_data) == len(all_labels) + tf.logging.info( + 'In CIFAR10 loader, number of images: {}'.format(len(all_data))) + + # Break off test data + if hparams.eval_test: + self.test_images = all_data[train_dataset_size:] + self.test_labels = all_labels[train_dataset_size:] + + # Shuffle the rest of the data + all_data = all_data[:train_dataset_size] + all_labels = all_labels[:train_dataset_size] + np.random.seed(0) + perm = np.arange(len(all_data)) + np.random.shuffle(perm) + all_data = all_data[perm] + all_labels = all_labels[perm] + + # Break into train and val + train_size, val_size = hparams.train_size, hparams.validation_size + assert 50000 >= train_size + val_size + self.train_images = all_data[:train_size] + self.train_labels = all_labels[:train_size] + self.val_images = all_data[train_size:train_size + val_size] + self.val_labels = all_labels[train_size:train_size + val_size] + self.num_train = self.train_images.shape[0] + + def next_batch(self): + """Return the next minibatch of augmented data.""" + next_train_index = self.curr_train_index + self.hparams.batch_size + if next_train_index > self.num_train: + # Increase epoch number + epoch = self.epochs + 1 + self.reset() + self.epochs = epoch + batched_data = ( + self.train_images[self.curr_train_index: + self.curr_train_index + self.hparams.batch_size], + self.train_labels[self.curr_train_index: + self.curr_train_index + self.hparams.batch_size]) + final_imgs = [] + + images, labels = batched_data + for data in images: + epoch_policy = self.good_policies[np.random.choice( + len(self.good_policies))] + final_img = augmentation_transforms.apply_policy( + epoch_policy, data) + final_img = augmentation_transforms.random_flip( + augmentation_transforms.zero_pad_and_crop(final_img, 4)) + # Apply cutout + final_img = augmentation_transforms.cutout_numpy(final_img) + final_imgs.append(final_img) + batched_data = (np.array(final_imgs, np.float32), labels) + self.curr_train_index += self.hparams.batch_size + return batched_data + + def reset(self): + """Reset training data and index into the training data.""" + self.epochs = 0 + # Shuffle the training data + perm = np.arange(self.num_train) + np.random.shuffle(perm) + assert self.num_train == self.train_images.shape[ + 0], 'Error incorrect shuffling mask' + self.train_images = self.train_images[perm] + self.train_labels = self.train_labels[perm] + self.curr_train_index = 0 + + +def unpickle(f): + tf.logging.info('loading file: {}'.format(f)) + fo = tf.gfile.Open(f, 'r') + d = cPickle.load(fo) + fo.close() + return d diff --git a/models/research/autoaugment/helper_utils.py b/models/research/autoaugment/helper_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e896874383fb1abc3e9f6f0f452964b681a9c6c0 --- /dev/null +++ b/models/research/autoaugment/helper_utils.py @@ -0,0 +1,149 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Helper functions used for training AutoAugment models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + + +def setup_loss(logits, labels): + """Returns the cross entropy for the given `logits` and `labels`.""" + predictions = tf.nn.softmax(logits) + cost = tf.losses.softmax_cross_entropy(onehot_labels=labels, + logits=logits) + return predictions, cost + + +def decay_weights(cost, weight_decay_rate): + """Calculates the loss for l2 weight decay and adds it to `cost`.""" + costs = [] + for var in tf.trainable_variables(): + costs.append(tf.nn.l2_loss(var)) + cost += tf.multiply(weight_decay_rate, tf.add_n(costs)) + return cost + + +def eval_child_model(session, model, data_loader, mode): + """Evaluates `model` on held out data depending on `mode`. + + Args: + session: TensorFlow session the model will be run with. + model: TensorFlow model that will be evaluated. + data_loader: DataSet object that contains data that `model` will + evaluate. + mode: Will `model` either evaluate validation or test data. + + Returns: + Accuracy of `model` when evaluated on the specified dataset. + + Raises: + ValueError: if invalid dataset `mode` is specified. + """ + if mode == 'val': + images = data_loader.val_images + labels = data_loader.val_labels + elif mode == 'test': + images = data_loader.test_images + labels = data_loader.test_labels + else: + raise ValueError('Not valid eval mode') + assert len(images) == len(labels) + tf.logging.info('model.batch_size is {}'.format(model.batch_size)) + assert len(images) % model.batch_size == 0 + eval_batches = int(len(images) / model.batch_size) + for i in range(eval_batches): + eval_images = images[i * model.batch_size:(i + 1) * model.batch_size] + eval_labels = labels[i * model.batch_size:(i + 1) * model.batch_size] + _ = session.run( + model.eval_op, + feed_dict={ + model.images: eval_images, + model.labels: eval_labels, + }) + return session.run(model.accuracy) + + +def cosine_lr(learning_rate, epoch, iteration, batches_per_epoch, total_epochs): + """Cosine Learning rate. + + Args: + learning_rate: Initial learning rate. + epoch: Current epoch we are one. This is one based. + iteration: Current batch in this epoch. + batches_per_epoch: Batches per epoch. + total_epochs: Total epochs you are training for. + + Returns: + The learning rate to be used for this current batch. + """ + t_total = total_epochs * batches_per_epoch + t_cur = float(epoch * batches_per_epoch + iteration) + return 0.5 * learning_rate * (1 + np.cos(np.pi * t_cur / t_total)) + + +def get_lr(curr_epoch, hparams, iteration=None): + """Returns the learning rate during training based on the current epoch.""" + assert iteration is not None + batches_per_epoch = int(hparams.train_size / hparams.batch_size) + lr = cosine_lr(hparams.lr, curr_epoch, iteration, batches_per_epoch, + hparams.num_epochs) + return lr + + +def run_epoch_training(session, model, data_loader, curr_epoch): + """Runs one epoch of training for the model passed in. + + Args: + session: TensorFlow session the model will be run with. + model: TensorFlow model that will be evaluated. + data_loader: DataSet object that contains data that `model` will + evaluate. + curr_epoch: How many of epochs of training have been done so far. + + Returns: + The accuracy of 'model' on the training set + """ + steps_per_epoch = int(model.hparams.train_size / model.hparams.batch_size) + tf.logging.info('steps per epoch: {}'.format(steps_per_epoch)) + curr_step = session.run(model.global_step) + assert curr_step % steps_per_epoch == 0 + + # Get the current learning rate for the model based on the current epoch + curr_lr = get_lr(curr_epoch, model.hparams, iteration=0) + tf.logging.info('lr of {} for epoch {}'.format(curr_lr, curr_epoch)) + + for step in xrange(steps_per_epoch): + curr_lr = get_lr(curr_epoch, model.hparams, iteration=(step + 1)) + # Update the lr rate variable to the current LR. + model.lr_rate_ph.load(curr_lr, session=session) + if step % 20 == 0: + tf.logging.info('Training {}/{}'.format(step, steps_per_epoch)) + + train_images, train_labels = data_loader.next_batch() + _, step, _ = session.run( + [model.train_op, model.global_step, model.eval_op], + feed_dict={ + model.images: train_images, + model.labels: train_labels, + }) + + train_accuracy = session.run(model.accuracy) + tf.logging.info('Train accuracy: {}'.format(train_accuracy)) + return train_accuracy diff --git a/models/research/autoaugment/policies.py b/models/research/autoaugment/policies.py new file mode 100644 index 0000000000000000000000000000000000000000..36b10b0ee4c0a1b65d6cc0181bce8924322f1390 --- /dev/null +++ b/models/research/autoaugment/policies.py @@ -0,0 +1,140 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def good_policies(): + """AutoAugment policies found on Cifar.""" + exp0_0 = [ + [('Invert', 0.1, 7), ('Contrast', 0.2, 6)], + [('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)], + [('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)], + [('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)], + [('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]] + exp0_1 = [ + [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)], + [('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)], + [('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)], + [('Equalize', 0.8, 8), ('Invert', 0.1, 3)], + [('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)]] + exp0_2 = [ + [('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)], + [('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)], + [('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)], + [('Equalize', 0.7, 5), ('Invert', 0.1, 3)], + [('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)]] + exp0_3 = [ + [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)], + [('TranslateY', 0.8, 9), ('TranslateY', 0.9, 9)], + [('AutoContrast', 0.8, 0), ('TranslateY', 0.7, 9)], + [('TranslateY', 0.2, 7), ('Color', 0.9, 6)], + [('Equalize', 0.7, 6), ('Color', 0.4, 9)]] + exp1_0 = [ + [('ShearY', 0.2, 7), ('Posterize', 0.3, 7)], + [('Color', 0.4, 3), ('Brightness', 0.6, 7)], + [('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)], + [('Equalize', 0.6, 5), ('Equalize', 0.5, 1)], + [('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]] + exp1_1 = [ + [('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)], + [('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)], + [('Solarize', 0.3, 5), ('Equalize', 0.6, 5)], + [('TranslateY', 0.2, 4), ('Sharpness', 0.3, 3)], + [('Brightness', 0.0, 8), ('Color', 0.8, 8)]] + exp1_2 = [ + [('Solarize', 0.2, 6), ('Color', 0.8, 6)], + [('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)], + [('Solarize', 0.4, 1), ('Equalize', 0.6, 5)], + [('Brightness', 0.0, 0), ('Solarize', 0.5, 2)], + [('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]] + exp1_3 = [ + [('Contrast', 0.7, 5), ('Brightness', 0.0, 2)], + [('Solarize', 0.2, 8), ('Solarize', 0.1, 5)], + [('Contrast', 0.5, 1), ('TranslateY', 0.2, 9)], + [('AutoContrast', 0.6, 5), ('TranslateY', 0.0, 9)], + [('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]] + exp1_4 = [ + [('Brightness', 0.0, 7), ('Equalize', 0.4, 7)], + [('Solarize', 0.2, 5), ('Equalize', 0.7, 5)], + [('Equalize', 0.6, 8), ('Color', 0.6, 2)], + [('Color', 0.3, 7), ('Color', 0.2, 4)], + [('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]] + exp1_5 = [ + [('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)], + [('ShearY', 0.6, 5), ('Equalize', 0.6, 5)], + [('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)], + [('Equalize', 0.8, 8), ('Equalize', 0.7, 7)], + [('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]] + exp1_6 = [ + [('Equalize', 0.8, 4), ('TranslateY', 0.8, 9)], + [('TranslateY', 0.8, 9), ('TranslateY', 0.6, 9)], + [('TranslateY', 0.9, 0), ('TranslateY', 0.5, 9)], + [('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)], + [('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]] + exp2_0 = [ + [('Color', 0.7, 7), ('TranslateX', 0.5, 8)], + [('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)], + [('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)], + [('Brightness', 0.9, 6), ('Color', 0.2, 8)], + [('Solarize', 0.5, 2), ('Invert', 0.0, 3)]] + exp2_1 = [ + [('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)], + [('Cutout', 0.2, 4), ('Equalize', 0.1, 1)], + [('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)], + [('Color', 0.1, 8), ('ShearY', 0.2, 3)], + [('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]] + exp2_2 = [ + [('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)], + [('TranslateY', 0.3, 6), ('Cutout', 0.3, 3)], + [('Equalize', 0.5, 0), ('Solarize', 0.6, 6)], + [('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)], + [('Equalize', 0.8, 2), ('Invert', 0.4, 0)]] + exp2_3 = [ + [('Equalize', 0.9, 5), ('Color', 0.7, 0)], + [('Equalize', 0.1, 1), ('ShearY', 0.1, 3)], + [('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)], + [('Brightness', 0.5, 1), ('Contrast', 0.1, 7)], + [('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]] + exp2_4 = [ + [('Solarize', 0.2, 3), ('ShearX', 0.0, 0)], + [('TranslateX', 0.3, 0), ('TranslateX', 0.6, 0)], + [('Equalize', 0.5, 9), ('TranslateY', 0.6, 7)], + [('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)], + [('Equalize', 0.8, 6), ('Invert', 0.3, 6)]] + exp2_5 = [ + [('AutoContrast', 0.3, 9), ('Cutout', 0.5, 3)], + [('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)], + [('ShearX', 0.0, 3), ('Posterize', 0.0, 3)], + [('Solarize', 0.4, 3), ('Color', 0.2, 4)], + [('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]] + exp2_6 = [ + [('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)], + [('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)], + [('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)], + [('Equalize', 0.1, 0), ('Equalize', 0.0, 6)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]] + exp2_7 = [ + [('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)], + [('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)], + [('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)], + [('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)], + [('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]] + exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3 + exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6 + exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7 + return exp0s + exp1s + exp2s diff --git a/models/research/autoaugment/shake_drop.py b/models/research/autoaugment/shake_drop.py new file mode 100644 index 0000000000000000000000000000000000000000..b6d3bcdb6c7fe38c64cfadd728bba8d71b72a518 --- /dev/null +++ b/models/research/autoaugment/shake_drop.py @@ -0,0 +1,178 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builds the Shake-Shake Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import custom_ops as ops +import tensorflow as tf + + +def round_int(x): + """Rounds `x` and then converts to an int.""" + return int(math.floor(x + 0.5)) + + +def shortcut(x, output_filters, stride): + """Applies strided avg pool or zero padding to make output_filters match x.""" + num_filters = int(x.shape[3]) + if stride == 2: + x = ops.avg_pool(x, 2, stride=stride, padding='SAME') + if num_filters != output_filters: + diff = output_filters - num_filters + assert diff > 0 + # Zero padd diff zeros + padding = [[0, 0], [0, 0], [0, 0], [0, diff]] + x = tf.pad(x, padding) + return x + + +def calc_prob(curr_layer, total_layers, p_l): + """Calculates drop prob depending on the current layer.""" + return 1 - (float(curr_layer) / total_layers) * p_l + + +def bottleneck_layer(x, n, stride, prob, is_training, alpha, beta): + """Bottleneck layer for shake drop model.""" + assert alpha[1] > alpha[0] + assert beta[1] > beta[0] + with tf.variable_scope('bottleneck_{}'.format(prob)): + input_layer = x + x = ops.batch_norm(x, scope='bn_1_pre') + x = ops.conv2d(x, n, 1, scope='1x1_conv_contract') + x = ops.batch_norm(x, scope='bn_1_post') + x = tf.nn.relu(x) + x = ops.conv2d(x, n, 3, stride=stride, scope='3x3') + x = ops.batch_norm(x, scope='bn_2') + x = tf.nn.relu(x) + x = ops.conv2d(x, n * 4, 1, scope='1x1_conv_expand') + x = ops.batch_norm(x, scope='bn_3') + + # Apply regularization here + # Sample bernoulli with prob + if is_training: + batch_size = tf.shape(x)[0] + bern_shape = [batch_size, 1, 1, 1] + random_tensor = prob + random_tensor += tf.random_uniform(bern_shape, dtype=tf.float32) + binary_tensor = tf.floor(random_tensor) + + alpha_values = tf.random_uniform( + [batch_size, 1, 1, 1], minval=alpha[0], maxval=alpha[1], + dtype=tf.float32) + beta_values = tf.random_uniform( + [batch_size, 1, 1, 1], minval=beta[0], maxval=beta[1], + dtype=tf.float32) + rand_forward = ( + binary_tensor + alpha_values - binary_tensor * alpha_values) + rand_backward = ( + binary_tensor + beta_values - binary_tensor * beta_values) + x = x * rand_backward + tf.stop_gradient(x * rand_forward - + x * rand_backward) + else: + expected_alpha = (alpha[1] + alpha[0])/2 + # prob is the expectation of the bernoulli variable + x = (prob + expected_alpha - prob * expected_alpha) * x + + res = shortcut(input_layer, n * 4, stride) + return x + res + + +def build_shake_drop_model(images, num_classes, is_training): + """Builds the PyramidNet Shake-Drop model. + + Build the PyramidNet Shake-Drop model from https://arxiv.org/abs/1802.02375. + + Args: + images: Tensor of images that will be fed into the Wide ResNet Model. + num_classes: Number of classed that the model needs to predict. + is_training: Is the model training or not. + + Returns: + The logits of the PyramidNet Shake-Drop model. + """ + # ShakeDrop Hparams + p_l = 0.5 + alpha_shake = [-1, 1] + beta_shake = [0, 1] + + # PyramidNet Hparams + alpha = 200 + depth = 272 + # This is for the bottleneck architecture specifically + n = int((depth - 2) / 9) + start_channel = 16 + add_channel = alpha / (3 * n) + + # Building the models + x = images + x = ops.conv2d(x, 16, 3, scope='init_conv') + x = ops.batch_norm(x, scope='init_bn') + + layer_num = 1 + total_layers = n * 3 + start_channel += add_channel + prob = calc_prob(layer_num, total_layers, p_l) + x = bottleneck_layer( + x, round_int(start_channel), 1, prob, is_training, alpha_shake, + beta_shake) + layer_num += 1 + for _ in range(1, n): + start_channel += add_channel + prob = calc_prob(layer_num, total_layers, p_l) + x = bottleneck_layer( + x, round_int(start_channel), 1, prob, is_training, alpha_shake, + beta_shake) + layer_num += 1 + + start_channel += add_channel + prob = calc_prob(layer_num, total_layers, p_l) + x = bottleneck_layer( + x, round_int(start_channel), 2, prob, is_training, alpha_shake, + beta_shake) + layer_num += 1 + for _ in range(1, n): + start_channel += add_channel + prob = calc_prob(layer_num, total_layers, p_l) + x = bottleneck_layer( + x, round_int(start_channel), 1, prob, is_training, alpha_shake, + beta_shake) + layer_num += 1 + + start_channel += add_channel + prob = calc_prob(layer_num, total_layers, p_l) + x = bottleneck_layer( + x, round_int(start_channel), 2, prob, is_training, alpha_shake, + beta_shake) + layer_num += 1 + for _ in range(1, n): + start_channel += add_channel + prob = calc_prob(layer_num, total_layers, p_l) + x = bottleneck_layer( + x, round_int(start_channel), 1, prob, is_training, alpha_shake, + beta_shake) + layer_num += 1 + + assert layer_num - 1 == total_layers + x = ops.batch_norm(x, scope='final_bn') + x = tf.nn.relu(x) + x = ops.global_avg_pool(x) + # Fully connected + logits = ops.fc(x, num_classes) + return logits diff --git a/models/research/autoaugment/shake_shake.py b/models/research/autoaugment/shake_shake.py new file mode 100644 index 0000000000000000000000000000000000000000..b937372c5e52fc269baa53e4b59e52864acd2e69 --- /dev/null +++ b/models/research/autoaugment/shake_shake.py @@ -0,0 +1,147 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builds the Shake-Shake Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import custom_ops as ops +import tensorflow as tf + + +def _shake_shake_skip_connection(x, output_filters, stride): + """Adds a residual connection to the filter x for the shake-shake model.""" + curr_filters = int(x.shape[3]) + if curr_filters == output_filters: + return x + stride_spec = ops.stride_arr(stride, stride) + # Skip path 1 + path1 = tf.nn.avg_pool( + x, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC') + path1 = ops.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv') + + # Skip path 2 + # First pad with 0's then crop + pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] + path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :] + concat_axis = 3 + + path2 = tf.nn.avg_pool( + path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC') + path2 = ops.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv') + + # Concat and apply BN + final_path = tf.concat(values=[path1, path2], axis=concat_axis) + final_path = ops.batch_norm(final_path, scope='final_path_bn') + return final_path + + +def _shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward, + is_training): + """Building a 2 branching convnet.""" + x = tf.nn.relu(x) + x = ops.conv2d(x, output_filters, 3, stride=stride, scope='conv1') + x = ops.batch_norm(x, scope='bn1') + x = tf.nn.relu(x) + x = ops.conv2d(x, output_filters, 3, scope='conv2') + x = ops.batch_norm(x, scope='bn2') + if is_training: + x = x * rand_backward + tf.stop_gradient(x * rand_forward - + x * rand_backward) + else: + x *= 1.0 / 2 + return x + + +def _shake_shake_block(x, output_filters, stride, is_training): + """Builds a full shake-shake sub layer.""" + batch_size = tf.shape(x)[0] + + # Generate random numbers for scaling the branches + rand_forward = [ + tf.random_uniform( + [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32) + for _ in range(2) + ] + rand_backward = [ + tf.random_uniform( + [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32) + for _ in range(2) + ] + # Normalize so that all sum to 1 + total_forward = tf.add_n(rand_forward) + total_backward = tf.add_n(rand_backward) + rand_forward = [samp / total_forward for samp in rand_forward] + rand_backward = [samp / total_backward for samp in rand_backward] + zipped_rand = zip(rand_forward, rand_backward) + + branches = [] + for branch, (r_forward, r_backward) in enumerate(zipped_rand): + with tf.variable_scope('branch_{}'.format(branch)): + b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward, + is_training) + branches.append(b) + res = _shake_shake_skip_connection(x, output_filters, stride) + return res + tf.add_n(branches) + + +def _shake_shake_layer(x, output_filters, num_blocks, stride, + is_training): + """Builds many sub layers into one full layer.""" + for block_num in range(num_blocks): + curr_stride = stride if (block_num == 0) else 1 + with tf.variable_scope('layer_{}'.format(block_num)): + x = _shake_shake_block(x, output_filters, curr_stride, + is_training) + return x + + +def build_shake_shake_model(images, num_classes, hparams, is_training): + """Builds the Shake-Shake model. + + Build the Shake-Shake model from https://arxiv.org/abs/1705.07485. + + Args: + images: Tensor of images that will be fed into the Wide ResNet Model. + num_classes: Number of classed that the model needs to predict. + hparams: tf.HParams object that contains additional hparams needed to + construct the model. In this case it is the `shake_shake_widen_factor` + that is used to determine how many filters the model has. + is_training: Is the model training or not. + + Returns: + The logits of the Shake-Shake model. + """ + depth = 26 + k = hparams.shake_shake_widen_factor # The widen factor + n = int((depth - 2) / 6) + x = images + + x = ops.conv2d(x, 16, 3, scope='init_conv') + x = ops.batch_norm(x, scope='init_bn') + with tf.variable_scope('L1'): + x = _shake_shake_layer(x, 16 * k, n, 1, is_training) + with tf.variable_scope('L2'): + x = _shake_shake_layer(x, 32 * k, n, 2, is_training) + with tf.variable_scope('L3'): + x = _shake_shake_layer(x, 64 * k, n, 2, is_training) + x = tf.nn.relu(x) + x = ops.global_avg_pool(x) + + # Fully connected + logits = ops.fc(x, num_classes) + return logits diff --git a/models/research/autoaugment/train_cifar.py b/models/research/autoaugment/train_cifar.py new file mode 100644 index 0000000000000000000000000000000000000000..9e3942ee26b1bd68234d34b17a818e058d9c881a --- /dev/null +++ b/models/research/autoaugment/train_cifar.py @@ -0,0 +1,452 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""AutoAugment Train/Eval module. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import os +import time + +import custom_ops as ops +import data_utils +import helper_utils +import numpy as np +from shake_drop import build_shake_drop_model +from shake_shake import build_shake_shake_model +import tensorflow as tf +from wrn import build_wrn_model + +tf.flags.DEFINE_string('model_name', 'wrn', + 'wrn, shake_shake_32, shake_shake_96, shake_shake_112, ' + 'pyramid_net') +tf.flags.DEFINE_string('checkpoint_dir', '/tmp/training', 'Training Directory.') +tf.flags.DEFINE_string('data_path', '/tmp/data', + 'Directory where dataset is located.') +tf.flags.DEFINE_string('dataset', 'cifar10', + 'Dataset to train with. Either cifar10 or cifar100') +tf.flags.DEFINE_integer('use_cpu', 1, '1 if use CPU, else GPU.') + +FLAGS = tf.flags.FLAGS + +arg_scope = tf.contrib.framework.arg_scope + + +def setup_arg_scopes(is_training): + """Sets up the argscopes that will be used when building an image model. + + Args: + is_training: Is the model training or not. + + Returns: + Arg scopes to be put around the model being constructed. + """ + + batch_norm_decay = 0.9 + batch_norm_epsilon = 1e-5 + batch_norm_params = { + # Decay for the moving averages. + 'decay': batch_norm_decay, + # epsilon to prevent 0s in variance. + 'epsilon': batch_norm_epsilon, + 'scale': True, + # collection containing the moving mean and moving variance. + 'is_training': is_training, + } + + scopes = [] + + scopes.append(arg_scope([ops.batch_norm], **batch_norm_params)) + return scopes + + +def build_model(inputs, num_classes, is_training, hparams): + """Constructs the vision model being trained/evaled. + + Args: + inputs: input features/images being fed to the image model build built. + num_classes: number of output classes being predicted. + is_training: is the model training or not. + hparams: additional hyperparameters associated with the image model. + + Returns: + The logits of the image model. + """ + scopes = setup_arg_scopes(is_training) + with contextlib.nested(*scopes): + if hparams.model_name == 'pyramid_net': + logits = build_shake_drop_model( + inputs, num_classes, is_training) + elif hparams.model_name == 'wrn': + logits = build_wrn_model( + inputs, num_classes, hparams.wrn_size) + elif hparams.model_name == 'shake_shake': + logits = build_shake_shake_model( + inputs, num_classes, hparams, is_training) + return logits + + +class CifarModel(object): + """Builds an image model for Cifar10/Cifar100.""" + + def __init__(self, hparams): + self.hparams = hparams + + def build(self, mode): + """Construct the cifar model.""" + assert mode in ['train', 'eval'] + self.mode = mode + self._setup_misc(mode) + self._setup_images_and_labels() + self._build_graph(self.images, self.labels, mode) + + self.init = tf.group(tf.global_variables_initializer(), + tf.local_variables_initializer()) + + def _setup_misc(self, mode): + """Sets up miscellaneous in the cifar model constructor.""" + self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False) + self.reuse = None if (mode == 'train') else True + self.batch_size = self.hparams.batch_size + if mode == 'eval': + self.batch_size = 25 + + def _setup_images_and_labels(self): + """Sets up image and label placeholders for the cifar model.""" + if FLAGS.dataset == 'cifar10': + self.num_classes = 10 + else: + self.num_classes = 100 + self.images = tf.placeholder(tf.float32, [self.batch_size, 32, 32, 3]) + self.labels = tf.placeholder(tf.float32, + [self.batch_size, self.num_classes]) + + def assign_epoch(self, session, epoch_value): + session.run(self._epoch_update, feed_dict={self._new_epoch: epoch_value}) + + def _build_graph(self, images, labels, mode): + """Constructs the TF graph for the cifar model. + + Args: + images: A 4-D image Tensor + labels: A 2-D labels Tensor. + mode: string indicating training mode ( e.g., 'train', 'valid', 'test'). + """ + is_training = 'train' in mode + if is_training: + self.global_step = tf.train.get_or_create_global_step() + + logits = build_model( + images, + self.num_classes, + is_training, + self.hparams) + self.predictions, self.cost = helper_utils.setup_loss( + logits, labels) + self.accuracy, self.eval_op = tf.metrics.accuracy( + tf.argmax(labels, 1), tf.argmax(self.predictions, 1)) + self._calc_num_trainable_params() + + # Adds L2 weight decay to the cost + self.cost = helper_utils.decay_weights(self.cost, + self.hparams.weight_decay_rate) + + if is_training: + self._build_train_op() + + # Setup checkpointing for this child model + # Keep 2 or more checkpoints around during training. + with tf.device('/cpu:0'): + self.saver = tf.train.Saver(max_to_keep=2) + + self.init = tf.group(tf.global_variables_initializer(), + tf.local_variables_initializer()) + + def _calc_num_trainable_params(self): + self.num_trainable_params = np.sum([ + np.prod(var.get_shape().as_list()) for var in tf.trainable_variables() + ]) + tf.logging.info('number of trainable params: {}'.format( + self.num_trainable_params)) + + def _build_train_op(self): + """Builds the train op for the cifar model.""" + hparams = self.hparams + tvars = tf.trainable_variables() + grads = tf.gradients(self.cost, tvars) + if hparams.gradient_clipping_by_global_norm > 0.0: + grads, norm = tf.clip_by_global_norm( + grads, hparams.gradient_clipping_by_global_norm) + tf.summary.scalar('grad_norm', norm) + + # Setup the initial learning rate + initial_lr = self.lr_rate_ph + optimizer = tf.train.MomentumOptimizer( + initial_lr, + 0.9, + use_nesterov=True) + + self.optimizer = optimizer + apply_op = optimizer.apply_gradients( + zip(grads, tvars), global_step=self.global_step, name='train_step') + train_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.control_dependencies([apply_op]): + self.train_op = tf.group(*train_ops) + + +class CifarModelTrainer(object): + """Trains an instance of the CifarModel class.""" + + def __init__(self, hparams): + self._session = None + self.hparams = hparams + + self.model_dir = os.path.join(FLAGS.checkpoint_dir, 'model') + self.log_dir = os.path.join(FLAGS.checkpoint_dir, 'log') + # Set the random seed to be sure the same validation set + # is used for each model + np.random.seed(0) + self.data_loader = data_utils.DataSet(hparams) + np.random.seed() # Put the random seed back to random + self.data_loader.reset() + + def save_model(self, step=None): + """Dumps model into the backup_dir. + + Args: + step: If provided, creates a checkpoint with the given step + number, instead of overwriting the existing checkpoints. + """ + model_save_name = os.path.join(self.model_dir, 'model.ckpt') + if not tf.gfile.IsDirectory(self.model_dir): + tf.gfile.MakeDirs(self.model_dir) + self.saver.save(self.session, model_save_name, global_step=step) + tf.logging.info('Saved child model') + + def extract_model_spec(self): + """Loads a checkpoint with the architecture structure stored in the name.""" + checkpoint_path = tf.train.latest_checkpoint(self.model_dir) + if checkpoint_path is not None: + self.saver.restore(self.session, checkpoint_path) + tf.logging.info('Loaded child model checkpoint from %s', + checkpoint_path) + else: + self.save_model(step=0) + + def eval_child_model(self, model, data_loader, mode): + """Evaluate the child model. + + Args: + model: image model that will be evaluated. + data_loader: dataset object to extract eval data from. + mode: will the model be evalled on train, val or test. + + Returns: + Accuracy of the model on the specified dataset. + """ + tf.logging.info('Evaluating child model in mode %s', mode) + while True: + try: + with self._new_session(model): + accuracy = helper_utils.eval_child_model( + self.session, + model, + data_loader, + mode) + tf.logging.info('Eval child model accuracy: {}'.format(accuracy)) + # If epoch trained without raising the below errors, break + # from loop. + break + except (tf.errors.AbortedError, tf.errors.UnavailableError) as e: + tf.logging.info('Retryable error caught: %s. Retrying.', e) + + return accuracy + + @contextlib.contextmanager + def _new_session(self, m): + """Creates a new session for model m.""" + # Create a new session for this model, initialize + # variables, and save / restore from + # checkpoint. + self._session = tf.Session( + '', + config=tf.ConfigProto( + allow_soft_placement=True, log_device_placement=False)) + self.session.run(m.init) + + # Load in a previous checkpoint, or save this one + self.extract_model_spec() + try: + yield + finally: + tf.Session.reset('') + self._session = None + + def _build_models(self): + """Builds the image models for train and eval.""" + # Determine if we should build the train and eval model. When using + # distributed training we only want to build one or the other and not both. + with tf.variable_scope('model', use_resource=False): + m = CifarModel(self.hparams) + m.build('train') + self._num_trainable_params = m.num_trainable_params + self._saver = m.saver + with tf.variable_scope('model', reuse=True, use_resource=False): + meval = CifarModel(self.hparams) + meval.build('eval') + return m, meval + + def _calc_starting_epoch(self, m): + """Calculates the starting epoch for model m based on global step.""" + hparams = self.hparams + batch_size = hparams.batch_size + steps_per_epoch = int(hparams.train_size / batch_size) + with self._new_session(m): + curr_step = self.session.run(m.global_step) + total_steps = steps_per_epoch * hparams.num_epochs + epochs_left = (total_steps - curr_step) // steps_per_epoch + starting_epoch = hparams.num_epochs - epochs_left + return starting_epoch + + def _run_training_loop(self, m, curr_epoch): + """Trains the cifar model `m` for one epoch.""" + start_time = time.time() + while True: + try: + with self._new_session(m): + train_accuracy = helper_utils.run_epoch_training( + self.session, m, self.data_loader, curr_epoch) + tf.logging.info('Saving model after epoch') + self.save_model(step=curr_epoch) + break + except (tf.errors.AbortedError, tf.errors.UnavailableError) as e: + tf.logging.info('Retryable error caught: %s. Retrying.', e) + tf.logging.info('Finished epoch: {}'.format(curr_epoch)) + tf.logging.info('Epoch time(min): {}'.format( + (time.time() - start_time) / 60.0)) + return train_accuracy + + def _compute_final_accuracies(self, meval): + """Run once training is finished to compute final val/test accuracies.""" + valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val') + if self.hparams.eval_test: + test_accuracy = self.eval_child_model(meval, self.data_loader, 'test') + else: + test_accuracy = 0 + tf.logging.info('Test Accuracy: {}'.format(test_accuracy)) + return valid_accuracy, test_accuracy + + def run_model(self): + """Trains and evalutes the image model.""" + hparams = self.hparams + + # Build the child graph + with tf.Graph().as_default(), tf.device( + '/cpu:0' if FLAGS.use_cpu else '/gpu:0'): + m, meval = self._build_models() + + # Figure out what epoch we are on + starting_epoch = self._calc_starting_epoch(m) + + # Run the validation error right at the beginning + valid_accuracy = self.eval_child_model( + meval, self.data_loader, 'val') + tf.logging.info('Before Training Epoch: {} Val Acc: {}'.format( + starting_epoch, valid_accuracy)) + training_accuracy = None + + for curr_epoch in xrange(starting_epoch, hparams.num_epochs): + + # Run one training epoch + training_accuracy = self._run_training_loop(m, curr_epoch) + + valid_accuracy = self.eval_child_model( + meval, self.data_loader, 'val') + tf.logging.info('Epoch: {} Valid Acc: {}'.format( + curr_epoch, valid_accuracy)) + + valid_accuracy, test_accuracy = self._compute_final_accuracies( + meval) + + tf.logging.info( + 'Train Acc: {} Valid Acc: {} Test Acc: {}'.format( + training_accuracy, valid_accuracy, test_accuracy)) + + @property + def saver(self): + return self._saver + + @property + def session(self): + return self._session + + @property + def num_trainable_params(self): + return self._num_trainable_params + + +def main(_): + if FLAGS.dataset not in ['cifar10', 'cifar100']: + raise ValueError('Invalid dataset: %s' % FLAGS.dataset) + hparams = tf.contrib.training.HParams( + train_size=50000, + validation_size=0, + eval_test=1, + dataset=FLAGS.dataset, + data_path=FLAGS.data_path, + batch_size=128, + gradient_clipping_by_global_norm=5.0) + if FLAGS.model_name == 'wrn': + hparams.add_hparam('model_name', 'wrn') + hparams.add_hparam('num_epochs', 200) + hparams.add_hparam('wrn_size', 160) + hparams.add_hparam('lr', 0.1) + hparams.add_hparam('weight_decay_rate', 5e-4) + elif FLAGS.model_name == 'shake_shake_32': + hparams.add_hparam('model_name', 'shake_shake') + hparams.add_hparam('num_epochs', 1800) + hparams.add_hparam('shake_shake_widen_factor', 2) + hparams.add_hparam('lr', 0.01) + hparams.add_hparam('weight_decay_rate', 0.001) + elif FLAGS.model_name == 'shake_shake_96': + hparams.add_hparam('model_name', 'shake_shake') + hparams.add_hparam('num_epochs', 1800) + hparams.add_hparam('shake_shake_widen_factor', 6) + hparams.add_hparam('lr', 0.01) + hparams.add_hparam('weight_decay_rate', 0.001) + elif FLAGS.model_name == 'shake_shake_112': + hparams.add_hparam('model_name', 'shake_shake') + hparams.add_hparam('num_epochs', 1800) + hparams.add_hparam('shake_shake_widen_factor', 7) + hparams.add_hparam('lr', 0.01) + hparams.add_hparam('weight_decay_rate', 0.001) + elif FLAGS.model_name == 'pyramid_net': + hparams.add_hparam('model_name', 'pyramid_net') + hparams.add_hparam('num_epochs', 1800) + hparams.add_hparam('lr', 0.05) + hparams.add_hparam('weight_decay_rate', 5e-5) + hparams.batch_size = 64 + else: + raise ValueError('Not Valid Model Name: %s' % FLAGS.model_name) + cifar_trainer = CifarModelTrainer(hparams) + cifar_trainer.run_model() + +if __name__ == '__main__': + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/models/research/autoaugment/wrn.py b/models/research/autoaugment/wrn.py new file mode 100644 index 0000000000000000000000000000000000000000..ea04e19cfc30f52fe49c475b6ed35610e7c87aa4 --- /dev/null +++ b/models/research/autoaugment/wrn.py @@ -0,0 +1,158 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builds the Wide-ResNet Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import custom_ops as ops +import numpy as np +import tensorflow as tf + + + +def residual_block( + x, in_filter, out_filter, stride, activate_before_residual=False): + """Adds residual connection to `x` in addition to applying BN->ReLU->3x3 Conv. + + Args: + x: Tensor that is the output of the previous layer in the model. + in_filter: Number of filters `x` has. + out_filter: Number of filters that the output of this layer will have. + stride: Integer that specified what stride should be applied to `x`. + activate_before_residual: Boolean on whether a BN->ReLU should be applied + to x before the convolution is applied. + + Returns: + A Tensor that is the result of applying two sequences of BN->ReLU->3x3 Conv + and then adding that Tensor to `x`. + """ + + if activate_before_residual: # Pass up RELU and BN activation for resnet + with tf.variable_scope('shared_activation'): + x = ops.batch_norm(x, scope='init_bn') + x = tf.nn.relu(x) + orig_x = x + else: + orig_x = x + + block_x = x + if not activate_before_residual: + with tf.variable_scope('residual_only_activation'): + block_x = ops.batch_norm(block_x, scope='init_bn') + block_x = tf.nn.relu(block_x) + + with tf.variable_scope('sub1'): + block_x = ops.conv2d( + block_x, out_filter, 3, stride=stride, scope='conv1') + + with tf.variable_scope('sub2'): + block_x = ops.batch_norm(block_x, scope='bn2') + block_x = tf.nn.relu(block_x) + block_x = ops.conv2d( + block_x, out_filter, 3, stride=1, scope='conv2') + + with tf.variable_scope( + 'sub_add'): # If number of filters do not agree then zero pad them + if in_filter != out_filter: + orig_x = ops.avg_pool(orig_x, stride, stride) + orig_x = ops.zero_pad(orig_x, in_filter, out_filter) + x = orig_x + block_x + return x + + +def _res_add(in_filter, out_filter, stride, x, orig_x): + """Adds `x` with `orig_x`, both of which are layers in the model. + + Args: + in_filter: Number of filters in `orig_x`. + out_filter: Number of filters in `x`. + stride: Integer specifying the stide that should be applied `orig_x`. + x: Tensor that is the output of the previous layer. + orig_x: Tensor that is the output of an earlier layer in the network. + + Returns: + A Tensor that is the result of `x` and `orig_x` being added after + zero padding and striding are applied to `orig_x` to get the shapes + to match. + """ + if in_filter != out_filter: + orig_x = ops.avg_pool(orig_x, stride, stride) + orig_x = ops.zero_pad(orig_x, in_filter, out_filter) + x = x + orig_x + orig_x = x + return x, orig_x + + +def build_wrn_model(images, num_classes, wrn_size): + """Builds the WRN model. + + Build the Wide ResNet model from https://arxiv.org/abs/1605.07146. + + Args: + images: Tensor of images that will be fed into the Wide ResNet Model. + num_classes: Number of classed that the model needs to predict. + wrn_size: Parameter that scales the number of filters in the Wide ResNet + model. + + Returns: + The logits of the Wide ResNet model. + """ + kernel_size = wrn_size + filter_size = 3 + num_blocks_per_resnet = 4 + filters = [ + min(kernel_size, 16), kernel_size, kernel_size * 2, kernel_size * 4 + ] + strides = [1, 2, 2] # stride for each resblock + + # Run the first conv + with tf.variable_scope('init'): + x = images + output_filters = filters[0] + x = ops.conv2d(x, output_filters, filter_size, scope='init_conv') + + first_x = x # Res from the beginning + orig_x = x # Res from previous block + + for block_num in range(1, 4): + with tf.variable_scope('unit_{}_0'.format(block_num)): + activate_before_residual = True if block_num == 1 else False + x = residual_block( + x, + filters[block_num - 1], + filters[block_num], + strides[block_num - 1], + activate_before_residual=activate_before_residual) + for i in range(1, num_blocks_per_resnet): + with tf.variable_scope('unit_{}_{}'.format(block_num, i)): + x = residual_block( + x, + filters[block_num], + filters[block_num], + 1, + activate_before_residual=False) + x, orig_x = _res_add(filters[block_num - 1], filters[block_num], + strides[block_num - 1], x, orig_x) + final_stride_val = np.prod(strides) + x, _ = _res_add(filters[0], filters[3], final_stride_val, x, first_x) + with tf.variable_scope('unit_last'): + x = ops.batch_norm(x, scope='final_bn') + x = tf.nn.relu(x) + x = ops.global_avg_pool(x) + logits = ops.fc(x, num_classes) + return logits diff --git a/models/research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py b/models/research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py new file mode 100644 index 0000000000000000000000000000000000000000..8d8ee08654985250ac61415df96889b4a4cf5f1b --- /dev/null +++ b/models/research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py @@ -0,0 +1,58 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import sklearn.preprocessing as prep +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +from autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder + +mnist = input_data.read_data_sets('MNIST_data', one_hot=True) + + +def standard_scale(X_train, X_test): + preprocessor = prep.StandardScaler().fit(X_train) + X_train = preprocessor.transform(X_train) + X_test = preprocessor.transform(X_test) + return X_train, X_test + + +def get_random_block_from_data(data, batch_size): + start_index = np.random.randint(0, len(data) - batch_size) + return data[start_index:(start_index + batch_size)] + + +X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) + +n_samples = int(mnist.train.num_examples) +training_epochs = 20 +batch_size = 128 +display_step = 1 + +autoencoder = AdditiveGaussianNoiseAutoencoder( + n_input=784, + n_hidden=200, + transfer_function=tf.nn.softplus, + optimizer=tf.train.AdamOptimizer(learning_rate = 0.001), + scale=0.01) + +for epoch in range(training_epochs): + avg_cost = 0. + total_batch = int(n_samples / batch_size) + # Loop over all batches + for i in range(total_batch): + batch_xs = get_random_block_from_data(X_train, batch_size) + + # Fit training using batch data + cost = autoencoder.partial_fit(batch_xs) + # Compute average loss + avg_cost += cost / n_samples * batch_size + + # Display logs per epoch step + if epoch % display_step == 0: + print("Epoch:", '%d,' % (epoch + 1), + "Cost:", "{:.9f}".format(avg_cost)) + +print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/models/research/autoencoder/AutoencoderRunner.py b/models/research/autoencoder/AutoencoderRunner.py new file mode 100644 index 0000000000000000000000000000000000000000..7f1ab2ecd5a91c12960714ea79a864631e634f8c --- /dev/null +++ b/models/research/autoencoder/AutoencoderRunner.py @@ -0,0 +1,55 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import sklearn.preprocessing as prep +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +from autoencoder_models.Autoencoder import Autoencoder + +mnist = input_data.read_data_sets('MNIST_data', one_hot=True) + + +def standard_scale(X_train, X_test): + preprocessor = prep.StandardScaler().fit(X_train) + X_train = preprocessor.transform(X_train) + X_test = preprocessor.transform(X_test) + return X_train, X_test + + +def get_random_block_from_data(data, batch_size): + start_index = np.random.randint(0, len(data) - batch_size) + return data[start_index:(start_index + batch_size)] + + +X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) + +n_samples = int(mnist.train.num_examples) +training_epochs = 20 +batch_size = 128 +display_step = 1 + +autoencoder = Autoencoder(n_layers=[784, 200], + transfer_function = tf.nn.softplus, + optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)) + +for epoch in range(training_epochs): + avg_cost = 0. + total_batch = int(n_samples / batch_size) + # Loop over all batches + for i in range(total_batch): + batch_xs = get_random_block_from_data(X_train, batch_size) + + # Fit training using batch data + cost = autoencoder.partial_fit(batch_xs) + # Compute average loss + avg_cost += cost / n_samples * batch_size + + # Display logs per epoch step + if epoch % display_step == 0: + print("Epoch:", '%d,' % (epoch + 1), + "Cost:", "{:.9f}".format(avg_cost)) + +print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/models/research/autoencoder/MaskingNoiseAutoencoderRunner.py b/models/research/autoencoder/MaskingNoiseAutoencoderRunner.py new file mode 100644 index 0000000000000000000000000000000000000000..b776302e286ff740ba7b8e6f679a54b23944df12 --- /dev/null +++ b/models/research/autoencoder/MaskingNoiseAutoencoderRunner.py @@ -0,0 +1,55 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import sklearn.preprocessing as prep +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +from autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder + +mnist = input_data.read_data_sets('MNIST_data', one_hot=True) + + +def standard_scale(X_train, X_test): + preprocessor = prep.StandardScaler().fit(X_train) + X_train = preprocessor.transform(X_train) + X_test = preprocessor.transform(X_test) + return X_train, X_test + + +def get_random_block_from_data(data, batch_size): + start_index = np.random.randint(0, len(data) - batch_size) + return data[start_index:(start_index + batch_size)] + + +X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) + +n_samples = int(mnist.train.num_examples) +training_epochs = 100 +batch_size = 128 +display_step = 1 + +autoencoder = MaskingNoiseAutoencoder( + n_input=784, + n_hidden=200, + transfer_function=tf.nn.softplus, + optimizer=tf.train.AdamOptimizer(learning_rate=0.001), + dropout_probability=0.95) + +for epoch in range(training_epochs): + avg_cost = 0. + total_batch = int(n_samples / batch_size) + for i in range(total_batch): + batch_xs = get_random_block_from_data(X_train, batch_size) + + cost = autoencoder.partial_fit(batch_xs) + + avg_cost += cost / n_samples * batch_size + + if epoch % display_step == 0: + print("Epoch:", '%d,' % (epoch + 1), + "Cost:", "{:.9f}".format(avg_cost)) + +print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/models/research/autoencoder/README.md b/models/research/autoencoder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cba7b3b66f59ac9e3810ee1b98d67133296aea25 --- /dev/null +++ b/models/research/autoencoder/README.md @@ -0,0 +1,3 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) diff --git a/models/research/autoencoder/VariationalAutoencoderRunner.py b/models/research/autoencoder/VariationalAutoencoderRunner.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ce0045f3c6dfdd357cd874f8ee24df0d8cb3d9 --- /dev/null +++ b/models/research/autoencoder/VariationalAutoencoderRunner.py @@ -0,0 +1,56 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import sklearn.preprocessing as prep +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data + +from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder + +mnist = input_data.read_data_sets('MNIST_data', one_hot=True) + + +def min_max_scale(X_train, X_test): + preprocessor = prep.MinMaxScaler().fit(X_train) + X_train = preprocessor.transform(X_train) + X_test = preprocessor.transform(X_test) + return X_train, X_test + + +def get_random_block_from_data(data, batch_size): + start_index = np.random.randint(0, len(data) - batch_size) + return data[start_index:(start_index + batch_size)] + + +X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images) + +n_samples = int(mnist.train.num_examples) +training_epochs = 20 +batch_size = 128 +display_step = 1 + +autoencoder = VariationalAutoencoder( + n_input=784, + n_hidden=200, + optimizer=tf.train.AdamOptimizer(learning_rate = 0.001)) + +for epoch in range(training_epochs): + avg_cost = 0. + total_batch = int(n_samples / batch_size) + # Loop over all batches + for i in range(total_batch): + batch_xs = get_random_block_from_data(X_train, batch_size) + + # Fit training using batch data + cost = autoencoder.partial_fit(batch_xs) + # Compute average loss + avg_cost += cost / n_samples * batch_size + + # Display logs per epoch step + if epoch % display_step == 0: + print("Epoch:", '%d,' % (epoch + 1), + "Cost:", "{:.9f}".format(avg_cost)) + +print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/models/research/autoencoder/__init__.py b/models/research/autoencoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/autoencoder/autoencoder_models/Autoencoder.py b/models/research/autoencoder/autoencoder_models/Autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..788a14642306ece056fc53a85ba8c60d87d31826 --- /dev/null +++ b/models/research/autoencoder/autoencoder_models/Autoencoder.py @@ -0,0 +1,91 @@ +import numpy as np +import tensorflow as tf + + +class Autoencoder(object): + + def __init__(self, n_layers, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer()): + self.n_layers = n_layers + self.transfer = transfer_function + + network_weights = self._initialize_weights() + self.weights = network_weights + + # model + self.x = tf.placeholder(tf.float32, [None, self.n_layers[0]]) + self.hidden_encode = [] + h = self.x + for layer in range(len(self.n_layers)-1): + h = self.transfer( + tf.add(tf.matmul(h, self.weights['encode'][layer]['w']), + self.weights['encode'][layer]['b'])) + self.hidden_encode.append(h) + + self.hidden_recon = [] + for layer in range(len(self.n_layers)-1): + h = self.transfer( + tf.add(tf.matmul(h, self.weights['recon'][layer]['w']), + self.weights['recon'][layer]['b'])) + self.hidden_recon.append(h) + self.reconstruction = self.hidden_recon[-1] + + # cost + self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) + self.optimizer = optimizer.minimize(self.cost) + + init = tf.global_variables_initializer() + self.sess = tf.Session() + self.sess.run(init) + + + def _initialize_weights(self): + all_weights = dict() + initializer = tf.contrib.layers.xavier_initializer() + # Encoding network weights + encoder_weights = [] + for layer in range(len(self.n_layers)-1): + w = tf.Variable( + initializer((self.n_layers[layer], self.n_layers[layer + 1]), + dtype=tf.float32)) + b = tf.Variable( + tf.zeros([self.n_layers[layer + 1]], dtype=tf.float32)) + encoder_weights.append({'w': w, 'b': b}) + # Recon network weights + recon_weights = [] + for layer in range(len(self.n_layers)-1, 0, -1): + w = tf.Variable( + initializer((self.n_layers[layer], self.n_layers[layer - 1]), + dtype=tf.float32)) + b = tf.Variable( + tf.zeros([self.n_layers[layer - 1]], dtype=tf.float32)) + recon_weights.append({'w': w, 'b': b}) + all_weights['encode'] = encoder_weights + all_weights['recon'] = recon_weights + return all_weights + + def partial_fit(self, X): + cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) + return cost + + def calc_total_cost(self, X): + return self.sess.run(self.cost, feed_dict={self.x: X}) + + def transform(self, X): + return self.sess.run(self.hidden_encode[-1], feed_dict={self.x: X}) + + def generate(self, hidden=None): + if hidden is None: + hidden = np.random.normal(size=self.weights['encode'][-1]['b']) + return self.sess.run(self.reconstruction, feed_dict={self.hidden_encode[-1]: hidden}) + + def reconstruct(self, X): + return self.sess.run(self.reconstruction, feed_dict={self.x: X}) + + def getWeights(self): + raise NotImplementedError + return self.sess.run(self.weights) + + def getBiases(self): + raise NotImplementedError + return self.sess.run(self.weights) + diff --git a/models/research/autoencoder/autoencoder_models/DenoisingAutoencoder.py b/models/research/autoencoder/autoencoder_models/DenoisingAutoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..22b5dcb44a4079b80bfcfc16e3dcda5b21ca8c1b --- /dev/null +++ b/models/research/autoencoder/autoencoder_models/DenoisingAutoencoder.py @@ -0,0 +1,129 @@ +import tensorflow as tf + +class AdditiveGaussianNoiseAutoencoder(object): + def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(), + scale = 0.1): + self.n_input = n_input + self.n_hidden = n_hidden + self.transfer = transfer_function + self.scale = tf.placeholder(tf.float32) + self.training_scale = scale + network_weights = self._initialize_weights() + self.weights = network_weights + + # model + self.x = tf.placeholder(tf.float32, [None, self.n_input]) + self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)), + self.weights['w1']), + self.weights['b1'])) + self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2']) + + # cost + self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) + self.optimizer = optimizer.minimize(self.cost) + + init = tf.global_variables_initializer() + self.sess = tf.Session() + self.sess.run(init) + + def _initialize_weights(self): + all_weights = dict() + all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], + initializer=tf.contrib.layers.xavier_initializer()) + all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32)) + all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32)) + all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32)) + return all_weights + + def partial_fit(self, X): + cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x: X, + self.scale: self.training_scale + }) + return cost + + def calc_total_cost(self, X): + return self.sess.run(self.cost, feed_dict = {self.x: X, + self.scale: self.training_scale + }) + + def transform(self, X): + return self.sess.run(self.hidden, feed_dict = {self.x: X, + self.scale: self.training_scale + }) + + def generate(self, hidden=None): + if hidden is None: + hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) + return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden}) + + def reconstruct(self, X): + return self.sess.run(self.reconstruction, feed_dict = {self.x: X, + self.scale: self.training_scale + }) + + def getWeights(self): + return self.sess.run(self.weights['w1']) + + def getBiases(self): + return self.sess.run(self.weights['b1']) + + +class MaskingNoiseAutoencoder(object): + def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(), + dropout_probability = 0.95): + self.n_input = n_input + self.n_hidden = n_hidden + self.transfer = transfer_function + self.dropout_probability = dropout_probability + self.keep_prob = tf.placeholder(tf.float32) + + network_weights = self._initialize_weights() + self.weights = network_weights + + # model + self.x = tf.placeholder(tf.float32, [None, self.n_input]) + self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']), + self.weights['b1'])) + self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2']) + + # cost + self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) + self.optimizer = optimizer.minimize(self.cost) + + init = tf.global_variables_initializer() + self.sess = tf.Session() + self.sess.run(init) + + def _initialize_weights(self): + all_weights = dict() + all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], + initializer=tf.contrib.layers.xavier_initializer()) + all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32)) + all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32)) + all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32)) + return all_weights + + def partial_fit(self, X): + cost, opt = self.sess.run((self.cost, self.optimizer), + feed_dict = {self.x: X, self.keep_prob: self.dropout_probability}) + return cost + + def calc_total_cost(self, X): + return self.sess.run(self.cost, feed_dict = {self.x: X, self.keep_prob: 1.0}) + + def transform(self, X): + return self.sess.run(self.hidden, feed_dict = {self.x: X, self.keep_prob: 1.0}) + + def generate(self, hidden=None): + if hidden is None: + hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) + return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden}) + + def reconstruct(self, X): + return self.sess.run(self.reconstruction, feed_dict = {self.x: X, self.keep_prob: 1.0}) + + def getWeights(self): + return self.sess.run(self.weights['w1']) + + def getBiases(self): + return self.sess.run(self.weights['b1']) diff --git a/models/research/autoencoder/autoencoder_models/VariationalAutoencoder.py b/models/research/autoencoder/autoencoder_models/VariationalAutoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..3c2556ab89c2d32be0af5e61099aa12f91c1f176 --- /dev/null +++ b/models/research/autoencoder/autoencoder_models/VariationalAutoencoder.py @@ -0,0 +1,70 @@ +import tensorflow as tf + +class VariationalAutoencoder(object): + + def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()): + self.n_input = n_input + self.n_hidden = n_hidden + + network_weights = self._initialize_weights() + self.weights = network_weights + + # model + self.x = tf.placeholder(tf.float32, [None, self.n_input]) + self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']) + self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1']) + + # sample from gaussian distribution + eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32) + self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps)) + + self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2']) + + # cost + reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0), 1) + latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq + - tf.square(self.z_mean) + - tf.exp(self.z_log_sigma_sq), 1) + self.cost = tf.reduce_mean(reconstr_loss + latent_loss) + self.optimizer = optimizer.minimize(self.cost) + + init = tf.global_variables_initializer() + self.sess = tf.Session() + self.sess.run(init) + + def _initialize_weights(self): + all_weights = dict() + all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], + initializer=tf.contrib.layers.xavier_initializer()) + all_weights['log_sigma_w1'] = tf.get_variable("log_sigma_w1", shape=[self.n_input, self.n_hidden], + initializer=tf.contrib.layers.xavier_initializer()) + all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) + all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) + all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32)) + all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32)) + return all_weights + + def partial_fit(self, X): + cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) + return cost + + def calc_total_cost(self, X): + return self.sess.run(self.cost, feed_dict = {self.x: X}) + + def transform(self, X): + return self.sess.run(self.z_mean, feed_dict={self.x: X}) + + def generate(self, hidden = None): + if hidden is None: + hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) + return self.sess.run(self.reconstruction, feed_dict={self.z: hidden}) + + def reconstruct(self, X): + return self.sess.run(self.reconstruction, feed_dict={self.x: X}) + + def getWeights(self): + return self.sess.run(self.weights['w1']) + + def getBiases(self): + return self.sess.run(self.weights['b1']) + diff --git a/models/research/autoencoder/autoencoder_models/__init__.py b/models/research/autoencoder/autoencoder_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/brain_coder/README.md b/models/research/brain_coder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3e2a1656d8f145569266c19c64b41779ccbf308c --- /dev/null +++ b/models/research/brain_coder/README.md @@ -0,0 +1,34 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Brain Coder + +*Authors: Daniel Abolafia, Mohammad Norouzi, Quoc Le* + +Brain coder is a code synthesis experimental environment. We provide code that reproduces the results from our recent paper [Neural Program Synthesis with Priority Queue Training](https://arxiv.org/abs/1801.03526). See single_task/README.md for details on how to build and reproduce those experiments. + +## Installation + +First install dependencies seperately: + +* [bazel](https://docs.bazel.build/versions/master/install.html) +* [TensorFlow](https://www.tensorflow.org/install/) +* [scipy](https://www.scipy.org/install.html) +* [absl-py](https://github.com/abseil/abseil-py) + +Note: even if you already have these dependencies installed, make sure they are +up-to-date to avoid unnecessary debugging. + + +## Building + +Use bazel from the top-level repo directory. + +For example: + +```bash +bazel build single_task:run +``` + +View README.md files in subdirectories for more details. diff --git a/models/research/brain_coder/WORKSPACE b/models/research/brain_coder/WORKSPACE new file mode 100644 index 0000000000000000000000000000000000000000..7c07b5325e71a1684fb38089adeaaa9f4f00a775 --- /dev/null +++ b/models/research/brain_coder/WORKSPACE @@ -0,0 +1,5 @@ +git_repository( + name = "subpar", + remote = "https://github.com/google/subpar", + tag = "1.0.0", +) diff --git a/models/research/brain_coder/common/BUILD b/models/research/brain_coder/common/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..b5f79c25096ca574d4a133f871343eedd985b25e --- /dev/null +++ b/models/research/brain_coder/common/BUILD @@ -0,0 +1,106 @@ +licenses(["notice"]) + +package(default_visibility = [ + "//:__subpackages__", +]) + +py_library( + name = "bf", + srcs = ["bf.py"], +) + +py_test( + name = "bf_test", + srcs = ["bf_test.py"], + deps = [ + ":bf", + # tensorflow dep + ], +) + +py_library( + name = "config_lib", + srcs = ["config_lib.py"], +) + +py_test( + name = "config_lib_test", + srcs = ["config_lib_test.py"], + deps = [ + ":config_lib", + # tensorflow dep + ], +) + +py_library( + name = "reward", + srcs = ["reward.py"], +) + +py_test( + name = "reward_test", + srcs = ["reward_test.py"], + deps = [ + ":reward", + # numpy dep + # tensorflow dep + ], +) + +py_library( + name = "rollout", + srcs = ["rollout.py"], + deps = [ + ":utils", + # numpy dep + # scipy dep + ], +) + +py_test( + name = "rollout_test", + srcs = ["rollout_test.py"], + deps = [ + ":rollout", + # numpy dep + # tensorflow dep + ], +) + +py_library( + name = "schedules", + srcs = ["schedules.py"], + deps = [":config_lib"], +) + +py_test( + name = "schedules_test", + srcs = ["schedules_test.py"], + deps = [ + ":config_lib", + ":schedules", + # numpy dep + # tensorflow dep + ], +) + +py_library( + name = "utils", + srcs = ["utils.py"], + deps = [ + # file dep + # absl dep /logging + # numpy dep + # tensorflow dep + ], +) + +py_test( + name = "utils_test", + srcs = ["utils_test.py"], + deps = [ + ":utils", + # numpy dep + # tensorflow dep + ], +) diff --git a/models/research/brain_coder/common/bf.py b/models/research/brain_coder/common/bf.py new file mode 100644 index 0000000000000000000000000000000000000000..f049c45258f7b78a25b5492108b2f8b37c8a55cd --- /dev/null +++ b/models/research/brain_coder/common/bf.py @@ -0,0 +1,234 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""BrainF**k interpreter. + +Language info: https://en.wikipedia.org/wiki/Brainfuck + +Based on public implementation: +https://github.com/pocmo/Python-Brainfuck/blob/master/brainfuck.py +""" + +from collections import namedtuple +import time + + +EvalResult = namedtuple( + 'EvalResult', ['output', 'success', 'failure_reason', 'steps', 'time', + 'memory', 'program_trace']) + + +ExecutionSnapshot = namedtuple( + 'ExecutionSnapshot', + ['codeptr', 'codechar', 'memptr', 'memval', 'memory', 'next_input', + 'output_buffer']) + + +class Status(object): + SUCCESS = 'success' + TIMEOUT = 'timeout' + STEP_LIMIT = 'step-limit' + SYNTAX_ERROR = 'syntax-error' + + +CHARS = INT_TO_CHAR = ['>', '<', '+', '-', '[', ']', '.', ','] +CHAR_TO_INT = dict([(c, i) for i, c in enumerate(INT_TO_CHAR)]) + + +class LookAheadIterator(object): + """Same API as Python iterator, with additional peek method.""" + + def __init__(self, iterable): + self._it = iter(iterable) + self._current_element = None + self._done = False + self._preload_next() + + def _preload_next(self): + try: + self._current_element = self._it.next() + except StopIteration: + self._done = True + + def next(self): + if self._done: + raise StopIteration + element = self._current_element + self._preload_next() + return element + + def peek(self, default_value=None): + if self._done: + if default_value is None: + raise StopIteration + return default_value + return self._current_element + + +def buildbracemap(code): + """Build jump map. + + Args: + code: List or string or BF chars. + + Returns: + bracemap: dict mapping open and close brace positions in the code to their + destination jumps. Specifically, positions of matching open/close braces + if they exist. + correct_syntax: True if all braces match. False if there are unmatched + braces in the code. Even if there are unmatched braces, a bracemap will + be built, and unmatched braces will map to themselves. + """ + bracestack, bracemap = [], {} + + correct_syntax = True + for position, command in enumerate(code): + if command == '[': + bracestack.append(position) + if command == ']': + if not bracestack: # Unmatched closing brace. + bracemap[position] = position # Don't jump to any position. + correct_syntax = False + continue + start = bracestack.pop() + bracemap[start] = position + bracemap[position] = start + if bracestack: # Unmatched opening braces. + for pos in bracestack: + bracemap[pos] = pos # Don't jump to any position. + correct_syntax = False + return bracemap, correct_syntax + + +def evaluate(code, input_buffer=None, init_memory=None, base=256, timeout=1.0, + max_steps=None, require_correct_syntax=True, output_memory=False, + debug=False): + """Execute BF code. + + Args: + code: String or list of BF characters. Any character not in CHARS will be + ignored. + input_buffer: A list of ints which will be used as the program's input + stream. Each read op "," will read an int from this list. 0's will be + read once the end of the list is reached, or if no input buffer is + given. + init_memory: A list of ints. Memory for first k positions will be + initialized to this list (where k = len(init_memory)). Memory positions + are initialized to 0 by default. + base: Integer base for the memory. When a memory value is incremented to + `base` it will overflow to 0. When a memory value is decremented to -1 + it will underflow to `base` - 1. + timeout: Time limit for program execution in seconds. Set to None to + disable. + max_steps: Execution step limit. An execution step is the execution of one + operation (code character), even if that op has been executed before. + Execution exits when this many steps are reached. Set to None to + disable. Disabled by default. + require_correct_syntax: If True, unmatched braces will cause `evaluate` to + return without executing the code. The failure reason will be + `Status.SYNTAX_ERROR`. If False, unmatched braces are ignored + and execution will continue. + output_memory: If True, the state of the memory at the end of execution is + returned. + debug: If True, then a full program trace will be returned. + + Returns: + EvalResult namedtuple containing + output: List of ints which were written out by the program with the "." + operation. + success: Boolean. Whether execution completed successfully. + failure_reason: One of the attributes of `Status`. Gives extra info + about why execution was not successful. + steps: Number of execution steps the program ran for. + time: Amount of time in seconds the program ran for. + memory: If `output_memory` is True, a list of memory cells up to the last + one written to. otherwise, None. + """ + input_iter = ( + LookAheadIterator(input_buffer) if input_buffer is not None + else LookAheadIterator([])) + + # Null memory value. This is the value of an empty memory. Also the value + # returned by the read operation when the input buffer is empty, or the + # end of the buffer is reached. + null_value = 0 + + code = list(code) + bracemap, correct_syntax = buildbracemap(code) # will modify code list + if require_correct_syntax and not correct_syntax: + return EvalResult([], False, Status.SYNTAX_ERROR, 0, 0.0, + [] if output_memory else None, [] if debug else None) + + output_buffer = [] + + codeptr, cellptr = 0, 0 + + cells = list(init_memory) if init_memory else [0] + + program_trace = [] if debug else None + success = True + reason = Status.SUCCESS + start_time = time.time() + steps = 0 + while codeptr < len(code): + command = code[codeptr] + + if debug: + # Add step to program trace. + program_trace.append(ExecutionSnapshot( + codeptr=codeptr, codechar=command, memptr=cellptr, + memval=cells[cellptr], memory=list(cells), + next_input=input_iter.peek(null_value), + output_buffer=list(output_buffer))) + + if command == '>': + cellptr += 1 + if cellptr == len(cells): cells.append(null_value) + + if command == '<': + cellptr = 0 if cellptr <= 0 else cellptr - 1 + + if command == '+': + cells[cellptr] = cells[cellptr] + 1 if cells[cellptr] < (base - 1) else 0 + + if command == '-': + cells[cellptr] = cells[cellptr] - 1 if cells[cellptr] > 0 else (base - 1) + + if command == '[' and cells[cellptr] == 0: codeptr = bracemap[codeptr] + if command == ']' and cells[cellptr] != 0: codeptr = bracemap[codeptr] + + if command == '.': output_buffer.append(cells[cellptr]) + if command == ',': cells[cellptr] = next(input_iter, null_value) + + codeptr += 1 + steps += 1 + + if timeout is not None and time.time() - start_time > timeout: + success = False + reason = Status.TIMEOUT + break + if max_steps is not None and steps >= max_steps: + success = False + reason = Status.STEP_LIMIT + break + + if debug: + # Add step to program trace. + command = code[codeptr] if codeptr < len(code) else '' + program_trace.append(ExecutionSnapshot( + codeptr=codeptr, codechar=command, memptr=cellptr, + memval=cells[cellptr], memory=list(cells), + next_input=input_iter.peek(null_value), + output_buffer=list(output_buffer))) + + return EvalResult( + output=output_buffer, + success=success, + failure_reason=reason, + steps=steps, + time=time.time() - start_time, + memory=cells if output_memory else None, + program_trace=program_trace) + + diff --git a/models/research/brain_coder/common/bf_test.py b/models/research/brain_coder/common/bf_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2cbf505601a96ec1fc819f1d01fe551f2fae4a5d --- /dev/null +++ b/models/research/brain_coder/common/bf_test.py @@ -0,0 +1,137 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for common.bf.""" + +import tensorflow as tf + +from common import bf # brain coder + + +class BfTest(tf.test.TestCase): + + def assertCorrectOutput(self, target_output, eval_result): + self.assertEqual(target_output, eval_result.output) + self.assertTrue(eval_result.success) + self.assertEqual(bf.Status.SUCCESS, eval_result.failure_reason) + + def testBasicOps(self): + self.assertCorrectOutput( + [3, 1, 2], + bf.evaluate('+++.--.+.')) + self.assertCorrectOutput( + [1, 1, 2], + bf.evaluate('+.<.>++.')) + self.assertCorrectOutput( + [0], + bf.evaluate('+,.')) + self.assertCorrectOutput( + [ord(char) for char in 'Hello World!\n'], + bf.evaluate( + '>++++++++[-<+++++++++>]<.>>+>-[+]++>++>+++[>[->+++<<+++>]<<]>-----' + '.>->+++..+++.>-.<<+[>[+>+]>>]<--------------.>>.+++.------.-------' + '-.>+.>+.')) + + def testBase(self): + self.assertCorrectOutput( + [1, 4], + bf.evaluate('+.--.', base=5, input_buffer=[])) + + def testInputBuffer(self): + self.assertCorrectOutput( + [2, 3, 4], + bf.evaluate('>,[>,]<[.<]', input_buffer=[4, 3, 2])) + + def testBadChars(self): + self.assertCorrectOutput( + [2, 3, 4], + bf.evaluate('>,[>,]hello----.[[[[[>+.', + input_buffer=[], + base=10, + require_correct_syntax=False)) + + eval_result = bf.evaluate( + '+++.]]]]>----.[[[[[>+.', + input_buffer=[], + base=10, + require_correct_syntax=True) + self.assertEqual([], eval_result.output) + self.assertFalse(eval_result.success) + self.assertEqual(bf.Status.SYNTAX_ERROR, + eval_result.failure_reason) + + def testTimeout(self): + er = bf.evaluate('+.[].', base=5, input_buffer=[], timeout=0.1) + self.assertEqual( + ([1], False, bf.Status.TIMEOUT), + (er.output, er.success, er.failure_reason)) + self.assertTrue(0.07 < er.time < 0.21) + + er = bf.evaluate('+.[-].', base=5, input_buffer=[], timeout=0.1) + self.assertEqual( + ([1, 0], True, bf.Status.SUCCESS), + (er.output, er.success, er.failure_reason)) + self.assertTrue(er.time < 0.15) + + def testMaxSteps(self): + er = bf.evaluate('+.[].', base=5, input_buffer=[], timeout=None, + max_steps=100) + self.assertEqual( + ([1], False, bf.Status.STEP_LIMIT, 100), + (er.output, er.success, er.failure_reason, er.steps)) + + er = bf.evaluate('+.[-].', base=5, input_buffer=[], timeout=None, + max_steps=100) + self.assertEqual( + ([1, 0], True, bf.Status.SUCCESS), + (er.output, er.success, er.failure_reason)) + self.assertTrue(er.steps < 100) + + def testOutputMemory(self): + er = bf.evaluate('+>++>+++>++++.', base=256, input_buffer=[], + output_memory=True) + self.assertEqual( + ([4], True, bf.Status.SUCCESS), + (er.output, er.success, er.failure_reason)) + self.assertEqual([1, 2, 3, 4], er.memory) + + def testProgramTrace(self): + es = bf.ExecutionSnapshot + er = bf.evaluate(',[.>,].', base=256, input_buffer=[2, 1], debug=True) + self.assertEqual( + [es(codeptr=0, codechar=',', memptr=0, memval=0, memory=[0], + next_input=2, output_buffer=[]), + es(codeptr=1, codechar='[', memptr=0, memval=2, memory=[2], + next_input=1, output_buffer=[]), + es(codeptr=2, codechar='.', memptr=0, memval=2, memory=[2], + next_input=1, output_buffer=[]), + es(codeptr=3, codechar='>', memptr=0, memval=2, memory=[2], + next_input=1, output_buffer=[2]), + es(codeptr=4, codechar=',', memptr=1, memval=0, memory=[2, 0], + next_input=1, output_buffer=[2]), + es(codeptr=5, codechar=']', memptr=1, memval=1, memory=[2, 1], + next_input=0, output_buffer=[2]), + es(codeptr=2, codechar='.', memptr=1, memval=1, memory=[2, 1], + next_input=0, output_buffer=[2]), + es(codeptr=3, codechar='>', memptr=1, memval=1, memory=[2, 1], + next_input=0, output_buffer=[2, 1]), + es(codeptr=4, codechar=',', memptr=2, memval=0, memory=[2, 1, 0], + next_input=0, output_buffer=[2, 1]), + es(codeptr=5, codechar=']', memptr=2, memval=0, memory=[2, 1, 0], + next_input=0, output_buffer=[2, 1]), + es(codeptr=6, codechar='.', memptr=2, memval=0, memory=[2, 1, 0], + next_input=0, output_buffer=[2, 1]), + es(codeptr=7, codechar='', memptr=2, memval=0, memory=[2, 1, 0], + next_input=0, output_buffer=[2, 1, 0])], + er.program_trace) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/common/config_lib.py b/models/research/brain_coder/common/config_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..733fa202f2e500f964beff2111cb7445fa66a9e1 --- /dev/null +++ b/models/research/brain_coder/common/config_lib.py @@ -0,0 +1,337 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Objects for storing configuration and passing config into binaries. + +Config class stores settings and hyperparameters for models, data, and anything +else that may be specific to a particular run. +""" + +import ast +import itertools +from six.moves import xrange + + +class Config(dict): + """Stores model configuration, hyperparameters, or dataset parameters.""" + + def __getattr__(self, attr): + return self[attr] + + def __setattr__(self, attr, value): + self[attr] = value + + def pretty_str(self, new_lines=True, indent=2, final_indent=0): + prefix = (' ' * indent) if new_lines else '' + final_prefix = (' ' * final_indent) if new_lines else '' + kv = ['%s%s=%s' % (prefix, k, + (repr(v) if not isinstance(v, Config) + else v.pretty_str(new_lines=new_lines, + indent=indent+2, + final_indent=indent))) + for k, v in self.items()] + if new_lines: + return 'Config(\n%s\n%s)' % (',\n'.join(kv), final_prefix) + else: + return 'Config(%s)' % ', '.join(kv) + + def _update_iterator(self, *args, **kwargs): + """Convert mixed input into an iterator over (key, value) tuples. + + Follows the dict.update call signature. + + Args: + *args: (Optional) Pass a dict or iterable of (key, value) 2-tuples as + an unnamed argument. Only one unnamed argument allowed. + **kwargs: (Optional) Pass (key, value) pairs as named arguments, where the + argument name is the key and the argument value is the value. + + Returns: + An iterator over (key, value) tuples given in the input. + + Raises: + TypeError: If more than one unnamed argument is given. + """ + if len(args) > 1: + raise TypeError('Expected at most 1 unnamed arguments, got %d' + % len(args)) + obj = args[0] if args else dict() + if isinstance(obj, dict): + return itertools.chain(obj.items(), kwargs.items()) + # Assume obj is an iterable of 2-tuples. + return itertools.chain(obj, kwargs.items()) + + def make_default(self, keys=None): + """Convert OneOf objects into their default configs. + + Recursively calls into Config objects. + + Args: + keys: Iterable of key names to check. If None, all keys in self will be + used. + """ + if keys is None: + keys = self.keys() + for k in keys: + # Replace OneOf with its default value. + if isinstance(self[k], OneOf): + self[k] = self[k].default() + # Recursively call into all Config objects, even those that came from + # OneOf objects in the previous code line (for nested OneOf objects). + if isinstance(self[k], Config): + self[k].make_default() + + def update(self, *args, **kwargs): + """Same as dict.update except nested Config objects are updated. + + Args: + *args: (Optional) Pass a dict or list of (key, value) 2-tuples as unnamed + argument. + **kwargs: (Optional) Pass (key, value) pairs as named arguments, where the + argument name is the key and the argument value is the value. + """ + key_set = set(self.keys()) + for k, v in self._update_iterator(*args, **kwargs): + if k in key_set: + key_set.remove(k) # This key is updated so exclude from make_default. + if k in self and isinstance(self[k], Config) and isinstance(v, dict): + self[k].update(v) + elif k in self and isinstance(self[k], OneOf) and isinstance(v, dict): + # Replace OneOf with the chosen config. + self[k] = self[k].update(v) + else: + self[k] = v + self.make_default(key_set) + + def strict_update(self, *args, **kwargs): + """Same as Config.update except keys and types are not allowed to change. + + If a given key is not already in this instance, an exception is raised. If a + given value does not have the same type as the existing value for the same + key, an exception is raised. Use this method to catch config mistakes. + + Args: + *args: (Optional) Pass a dict or list of (key, value) 2-tuples as unnamed + argument. + **kwargs: (Optional) Pass (key, value) pairs as named arguments, where the + argument name is the key and the argument value is the value. + + Raises: + TypeError: If more than one unnamed argument is given. + TypeError: If new value type does not match existing type. + KeyError: If a given key is not already defined in this instance. + """ + key_set = set(self.keys()) + for k, v in self._update_iterator(*args, **kwargs): + if k in self: + key_set.remove(k) # This key is updated so exclude from make_default. + if isinstance(self[k], Config): + if not isinstance(v, dict): + raise TypeError('dict required for Config value, got %s' % type(v)) + self[k].strict_update(v) + elif isinstance(self[k], OneOf): + if not isinstance(v, dict): + raise TypeError('dict required for OneOf value, got %s' % type(v)) + # Replace OneOf with the chosen config. + self[k] = self[k].strict_update(v) + else: + if not isinstance(v, type(self[k])): + raise TypeError('Expecting type %s for key %s, got type %s' + % (type(self[k]), k, type(v))) + self[k] = v + else: + raise KeyError( + 'Key %s does not exist. New key creation not allowed in ' + 'strict_update.' % k) + self.make_default(key_set) + + @staticmethod + def from_str(config_str): + """Inverse of Config.__str__.""" + parsed = ast.literal_eval(config_str) + assert isinstance(parsed, dict) + + def _make_config(dictionary): + for k, v in dictionary.items(): + if isinstance(v, dict): + dictionary[k] = _make_config(v) + return Config(**dictionary) + return _make_config(parsed) + + @staticmethod + def parse(key_val_string): + """Parse hyperparameter string into Config object. + + Format is 'key=val,key=val,...' + Values can be any python literal, or another Config object encoded as + 'c(key=val,key=val,...)'. + c(...) expressions can be arbitrarily nested. + + Example: + 'a=1,b=3e-5,c=[1,2,3],d="hello world",e={"a":1,"b":2},f=c(x=1,y=[10,20])' + + Args: + key_val_string: The hyperparameter string. + + Returns: + Config object parsed from the input string. + """ + if not key_val_string.strip(): + return Config() + def _pair_to_kv(pair): + split_index = pair.find('=') + key, val = pair[:split_index].strip(), pair[split_index+1:].strip() + if val.startswith('c(') and val.endswith(')'): + val = Config.parse(val[2:-1]) + else: + val = ast.literal_eval(val) + return key, val + return Config(**dict([_pair_to_kv(pair) + for pair in _comma_iterator(key_val_string)])) + + +class OneOf(object): + """Stores branching config. + + In some cases there may be options which each have their own set of config + params. For example, if specifying config for an environment, each environment + can have custom config options. OneOf is a way to organize branching config. + + Usage example: + one_of = OneOf( + [Config(a=1, b=2), + Config(a=2, c='hello'), + Config(a=3, d=10, e=-10)], + a=1) + config = one_of.strict_update(Config(a=3, d=20)) + config == {'a': 3, 'd': 20, 'e': -10} + """ + + def __init__(self, choices, **kwargs): + """Constructor. + + Usage: OneOf([Config(...), Config(...), ...], attribute=default_value) + + Args: + choices: An iterable of Config objects. When update/strict_update is + called on this OneOf, one of these Config will be selected. + **kwargs: Give exactly one config attribute to branch on. The value of + this attribute during update/strict_update will determine which + Config is used. + + Raises: + ValueError: If kwargs does not contain exactly one entry. Should give one + named argument which is used as the attribute to condition on. + """ + if len(kwargs) != 1: + raise ValueError( + 'Incorrect usage. Must give exactly one named argument. The argument ' + 'name is the config attribute to condition on, and the argument ' + 'value is the default choice. Got %d named arguments.' % len(kwargs)) + key, default_value = kwargs.items()[0] + self.key = key + self.default_value = default_value + + # Make sure each choice is a Config object. + for config in choices: + if not isinstance(config, Config): + raise TypeError('choices must be a list of Config objects. Got %s.' + % type(config)) + + # Map value for key to the config with that value. + self.value_map = {config[key]: config for config in choices} + self.default_config = self.value_map[self.default_value] + + # Make sure there are no duplicate values. + if len(self.value_map) != len(choices): + raise ValueError('Multiple choices given for the same value of %s.' % key) + + # Check that the default value is valid. + if self.default_value not in self.value_map: + raise ValueError( + 'Default value is not an available choice. Got %s=%s. Choices are %s.' + % (key, self.default_value, self.value_map.keys())) + + def default(self): + return self.default_config + + def update(self, other): + """Choose a config and update it. + + If `other` is a Config, one of the config choices is selected and updated. + Otherwise `other` is returned. + + Args: + other: Will update chosen config with this value by calling `update` on + the config. + + Returns: + The chosen config after updating it, or `other` if no config could be + selected. + """ + if not isinstance(other, Config): + return other + if self.key not in other or other[self.key] not in self.value_map: + return other + target = self.value_map[other[self.key]] + target.update(other) + return target + + def strict_update(self, config): + """Choose a config and update it. + + `config` must be a Config object. `config` must have the key used to select + among the config choices, and that key must have a value which one of the + config choices has. + + Args: + config: A Config object. the chosen config will be update by calling + `strict_update`. + + Returns: + The chosen config after updating it. + + Raises: + TypeError: If `config` is not a Config instance. + ValueError: If `config` does not have the branching key in its key set. + ValueError: If the value of the config's branching key is not one of the + valid choices. + """ + if not isinstance(config, Config): + raise TypeError('Expecting Config instance, got %s.' % type(config)) + if self.key not in config: + raise ValueError( + 'Branching key %s required but not found in %s' % (self.key, config)) + if config[self.key] not in self.value_map: + raise ValueError( + 'Value %s for key %s is not a possible choice. Choices are %s.' + % (config[self.key], self.key, self.value_map.keys())) + target = self.value_map[config[self.key]] + target.strict_update(config) + return target + + +def _next_comma(string, start_index): + """Finds the position of the next comma not used in a literal collection.""" + paren_count = 0 + for i in xrange(start_index, len(string)): + c = string[i] + if c == '(' or c == '[' or c == '{': + paren_count += 1 + elif c == ')' or c == ']' or c == '}': + paren_count -= 1 + if paren_count == 0 and c == ',': + return i + return -1 + + +def _comma_iterator(string): + index = 0 + while 1: + next_index = _next_comma(string, index) + if next_index == -1: + yield string[index:] + return + yield string[index:next_index] + index = next_index + 1 diff --git a/models/research/brain_coder/common/config_lib_test.py b/models/research/brain_coder/common/config_lib_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cdc96f92d2428f06e780930979662fdfda92e3f5 --- /dev/null +++ b/models/research/brain_coder/common/config_lib_test.py @@ -0,0 +1,425 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for common.config_lib.""" + +import tensorflow as tf + +from common import config_lib # brain coder + + +class ConfigLibTest(tf.test.TestCase): + + def testConfig(self): + config = config_lib.Config(hello='world', foo='bar', num=123, f=56.7) + self.assertEqual('world', config.hello) + self.assertEqual('bar', config['foo']) + config.hello = 'everyone' + config['bar'] = 9000 + self.assertEqual('everyone', config['hello']) + self.assertEqual(9000, config.bar) + self.assertEqual(5, len(config)) + + def testConfigUpdate(self): + config = config_lib.Config(a=1, b=2, c=3) + config.update({'b': 10, 'd': 4}) + self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4}, config) + + config = config_lib.Config(a=1, b=2, c=3) + config.update(b=10, d=4) + self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4}, config) + + config = config_lib.Config(a=1, b=2, c=3) + config.update({'e': 5}, b=10, d=4) + self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4, 'e': 5}, config) + + config = config_lib.Config( + a=1, + b=2, + x=config_lib.Config( + l='a', + y=config_lib.Config(m=1, n=2), + z=config_lib.Config( + q=config_lib.Config(a=10, b=20), + r=config_lib.Config(s=1, t=2)))) + config.update(x={'y': {'m': 10}, 'z': {'r': {'s': 5}}}) + self.assertEqual( + config_lib.Config( + a=1, b=2, + x=config_lib.Config( + l='a', + y=config_lib.Config(m=10, n=2), + z=config_lib.Config( + q=config_lib.Config(a=10, b=20), + r=config_lib.Config(s=5, t=2)))), + config) + + config = config_lib.Config( + foo='bar', + num=100, + x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)), + y=config_lib.Config(qrs=5, tuv=10), + d={'a': 1, 'b': 2}, + l=[1, 2, 3]) + config.update( + config_lib.Config( + foo='hat', + num=50.5, + x={'a': 5, 'z': -10}, + y=config_lib.Config(wxyz=-1)), + d={'a': 10, 'c': 20}, + l=[3, 4, 5, 6]) + self.assertEqual( + config_lib.Config( + foo='hat', + num=50.5, + x=config_lib.Config(a=5, b=2, z=-10, + c=config_lib.Config(h=10, i=20, j=30)), + y=config_lib.Config(qrs=5, tuv=10, wxyz=-1), + d={'a': 10, 'c': 20}, + l=[3, 4, 5, 6]), + config) + self.assertTrue(isinstance(config.x, config_lib.Config)) + self.assertTrue(isinstance(config.x.c, config_lib.Config)) + self.assertTrue(isinstance(config.y, config_lib.Config)) + + config = config_lib.Config( + foo='bar', + num=100, + x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)), + y=config_lib.Config(qrs=5, tuv=10), + d={'a': 1, 'b': 2}, + l=[1, 2, 3]) + config.update( + config_lib.Config( + foo=1234, + num='hello', + x={'a': 5, 'z': -10, 'c': {'h': -5, 'k': 40}}, + y=[1, 2, 3, 4], + d='stuff', + l={'a': 1, 'b': 2})) + self.assertEqual( + config_lib.Config( + foo=1234, + num='hello', + x=config_lib.Config(a=5, b=2, z=-10, + c=config_lib.Config(h=-5, i=20, j=30, k=40)), + y=[1, 2, 3, 4], + d='stuff', + l={'a': 1, 'b': 2}), + config) + self.assertTrue(isinstance(config.x, config_lib.Config)) + self.assertTrue(isinstance(config.x.c, config_lib.Config)) + self.assertTrue(isinstance(config.y, list)) + + def testConfigStrictUpdate(self): + config = config_lib.Config(a=1, b=2, c=3) + config.strict_update({'b': 10, 'c': 20}) + self.assertEqual({'a': 1, 'b': 10, 'c': 20}, config) + + config = config_lib.Config(a=1, b=2, c=3) + config.strict_update(b=10, c=20) + self.assertEqual({'a': 1, 'b': 10, 'c': 20}, config) + + config = config_lib.Config(a=1, b=2, c=3, d=4) + config.strict_update({'d': 100}, b=10, a=20) + self.assertEqual({'a': 20, 'b': 10, 'c': 3, 'd': 100}, config) + + config = config_lib.Config( + a=1, + b=2, + x=config_lib.Config( + l='a', + y=config_lib.Config(m=1, n=2), + z=config_lib.Config( + q=config_lib.Config(a=10, b=20), + r=config_lib.Config(s=1, t=2)))) + config.strict_update(x={'y': {'m': 10}, 'z': {'r': {'s': 5}}}) + self.assertEqual( + config_lib.Config( + a=1, b=2, + x=config_lib.Config( + l='a', + y=config_lib.Config(m=10, n=2), + z=config_lib.Config( + q=config_lib.Config(a=10, b=20), + r=config_lib.Config(s=5, t=2)))), + config) + + config = config_lib.Config( + foo='bar', + num=100, + x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)), + y=config_lib.Config(qrs=5, tuv=10), + d={'a': 1, 'b': 2}, + l=[1, 2, 3]) + config.strict_update( + config_lib.Config( + foo='hat', + num=50, + x={'a': 5, 'c': {'h': 100}}, + y=config_lib.Config(tuv=-1)), + d={'a': 10, 'c': 20}, + l=[3, 4, 5, 6]) + self.assertEqual( + config_lib.Config( + foo='hat', + num=50, + x=config_lib.Config(a=5, b=2, + c=config_lib.Config(h=100, i=20, j=30)), + y=config_lib.Config(qrs=5, tuv=-1), + d={'a': 10, 'c': 20}, + l=[3, 4, 5, 6]), + config) + + def testConfigStrictUpdateFail(self): + config = config_lib.Config(a=1, b=2, c=3, x=config_lib.Config(a=1, b=2)) + with self.assertRaises(KeyError): + config.strict_update({'b': 10, 'c': 20, 'd': 50}) + with self.assertRaises(KeyError): + config.strict_update(b=10, d=50) + with self.assertRaises(KeyError): + config.strict_update(x={'c': 3}) + with self.assertRaises(TypeError): + config.strict_update(a='string') + with self.assertRaises(TypeError): + config.strict_update(x={'a': 'string'}) + with self.assertRaises(TypeError): + config.strict_update(x=[1, 2, 3]) + + def testConfigFromStr(self): + config = config_lib.Config.from_str("{'c': {'d': 5}, 'b': 2, 'a': 1}") + self.assertEqual( + {'c': {'d': 5}, 'b': 2, 'a': 1}, config) + self.assertTrue(isinstance(config, config_lib.Config)) + self.assertTrue(isinstance(config.c, config_lib.Config)) + + def testConfigParse(self): + config = config_lib.Config.parse( + 'hello="world",num=1234.5,lst=[10,20.5,True,"hi",("a","b","c")],' + 'dct={9:10,"stuff":"qwerty","subdict":{1:True,2:False}},' + 'subconfig=c(a=1,b=[1,2,[3,4]],c=c(f="f",g="g"))') + self.assertEqual( + {'hello': 'world', 'num': 1234.5, + 'lst': [10, 20.5, True, 'hi', ('a', 'b', 'c')], + 'dct': {9: 10, 'stuff': 'qwerty', 'subdict': {1: True, 2: False}}, + 'subconfig': {'a': 1, 'b': [1, 2, [3, 4]], 'c': {'f': 'f', 'g': 'g'}}}, + config) + self.assertTrue(isinstance(config, config_lib.Config)) + self.assertTrue(isinstance(config.subconfig, config_lib.Config)) + self.assertTrue(isinstance(config.subconfig.c, config_lib.Config)) + self.assertFalse(isinstance(config.dct, config_lib.Config)) + self.assertFalse(isinstance(config.dct['subdict'], config_lib.Config)) + self.assertTrue(isinstance(config.lst[4], tuple)) + + def testConfigParseErrors(self): + with self.assertRaises(SyntaxError): + config_lib.Config.parse('a=[1,2,b="hello"') + with self.assertRaises(SyntaxError): + config_lib.Config.parse('a=1,b=c(x="a",y="b"') + with self.assertRaises(SyntaxError): + config_lib.Config.parse('a=1,b=c(x="a")y="b"') + with self.assertRaises(SyntaxError): + config_lib.Config.parse('a=1,b=c(x="a"),y="b",') + + def testOneOf(self): + def make_config(): + return config_lib.Config( + data=config_lib.OneOf( + [config_lib.Config(task=1, a='hello'), + config_lib.Config(task=2, a='world', b='stuff'), + config_lib.Config(task=3, c=1234)], + task=2), + model=config_lib.Config(stuff=1)) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=1,a="hi")')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=1, a='hi'), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=2,a="hi")')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=2, a='hi', b='stuff'), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=3)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=3, c=1234), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=2, a='world', b='stuff'), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=4,d=9999)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=4, d=9999), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2),data=5')) + self.assertEqual( + config_lib.Config( + data=5, + model=config_lib.Config(stuff=2)), + config) + + def testOneOfStrict(self): + def make_config(): + return config_lib.Config( + data=config_lib.OneOf( + [config_lib.Config(task=1, a='hello'), + config_lib.Config(task=2, a='world', b='stuff'), + config_lib.Config(task=3, c=1234)], + task=2), + model=config_lib.Config(stuff=1)) + + config = make_config() + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=1,a="hi")')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=1, a='hi'), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=2,a="hi")')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=2, a='hi', b='stuff'), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=3)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=3, c=1234), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config(task=2, a='world', b='stuff'), + model=config_lib.Config(stuff=2)), + config) + + def testNestedOneOf(self): + def make_config(): + return config_lib.Config( + data=config_lib.OneOf( + [config_lib.Config(task=1, a='hello'), + config_lib.Config( + task=2, + a=config_lib.OneOf( + [config_lib.Config(x=1, y=2), + config_lib.Config(x=-1, y=1000, z=4)], + x=1)), + config_lib.Config(task=3, c=1234)], + task=2), + model=config_lib.Config(stuff=1)) + + config = make_config() + config.update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=2,a=c(x=-1,z=8))')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config( + task=2, + a=config_lib.Config(x=-1, y=1000, z=8)), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=2,a=c(x=-1,z=8))')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config( + task=2, + a=config_lib.Config(x=-1, y=1000, z=8)), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.update(config_lib.Config.parse('model=c(stuff=2)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config( + task=2, + a=config_lib.Config(x=1, y=2)), + model=config_lib.Config(stuff=2)), + config) + + config = make_config() + config.strict_update(config_lib.Config.parse('model=c(stuff=2)')) + self.assertEqual( + config_lib.Config( + data=config_lib.Config( + task=2, + a=config_lib.Config(x=1, y=2)), + model=config_lib.Config(stuff=2)), + config) + + def testOneOfStrictErrors(self): + def make_config(): + return config_lib.Config( + data=config_lib.OneOf( + [config_lib.Config(task=1, a='hello'), + config_lib.Config(task=2, a='world', b='stuff'), + config_lib.Config(task=3, c=1234)], + task=2), + model=config_lib.Config(stuff=1)) + + config = make_config() + with self.assertRaises(TypeError): + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=[1,2,3]')) + + config = make_config() + with self.assertRaises(KeyError): + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=3,c=5678,d=9999)')) + + config = make_config() + with self.assertRaises(ValueError): + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=c(task=4,d=9999)')) + + config = make_config() + with self.assertRaises(TypeError): + config.strict_update(config_lib.Config.parse( + 'model=c(stuff=2),data=5')) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/common/reward.py b/models/research/brain_coder/common/reward.py new file mode 100644 index 0000000000000000000000000000000000000000..87e01c9c52e1ee22f2745dce12bc5e2726711ff7 --- /dev/null +++ b/models/research/brain_coder/common/reward.py @@ -0,0 +1,390 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Reward functions, distance functions, and reward managers.""" + +from abc import ABCMeta +from abc import abstractmethod +from math import log + + +# All sequences here are assumed to be lists of ints bounded +# between 0 and `base`-1 (inclusive). + + +################################# +### Scalar Distance Functions ### +################################# + + +def abs_diff(a, b, base=0): + """Absolute value of difference between scalars. + + abs_diff is symmetric, i.e. `a` and `b` are interchangeable. + + Args: + a: First argument. An int. + b: Seconds argument. An int. + base: Dummy argument so that the argument signature matches other scalar + diff functions. abs_diff is the same in all bases. + + Returns: + abs(a - b). + """ + del base # Unused. + return abs(a - b) + + +def mod_abs_diff(a, b, base): + """Shortest distance between `a` and `b` in the modular integers base `base`. + + The smallest distance between a and b is returned. + Example: mod_abs_diff(1, 99, 100) ==> 2. It is not 98. + + mod_abs_diff is symmetric, i.e. `a` and `b` are interchangeable. + + Args: + a: First argument. An int. + b: Seconds argument. An int. + base: The modulo base. A positive int. + + Returns: + Shortest distance. + """ + diff = abs(a - b) + if diff >= base: + diff %= base + return min(diff, (-diff) + base) + + +############################### +### List Distance Functions ### +############################### + + +def absolute_distance(pred, target, base, scalar_diff_fn=abs_diff): + """Asymmetric list distance function. + + List distance is the sum of element-wise distances, like Hamming distance, but + where `pred` can be longer or shorter than `target`. For each position in both + `pred` and `target`, distance between those elements is computed with + `scalar_diff_fn`. For missing or extra elements in `pred`, the maximum + distance is assigned, which is equal to `base`. + + Distance is 0 when `pred` and `target` are identical, and will be a positive + integer when they are not. + + Args: + pred: Prediction list. Distance from this list is computed. + target: Target list. Distance to this list is computed. + base: The integer base to use. For example, a list of chars would use base + 256. + scalar_diff_fn: Element-wise distance function. + + Returns: + List distance between `pred` and `target`. + """ + d = 0 + for i, target_t in enumerate(target): + if i >= len(pred): + d += base # A missing slot is worth the max distance. + else: + # Add element-wise distance for this slot. + d += scalar_diff_fn(pred[i], target_t, base) + if len(pred) > len(target): + # Each extra slot is worth the max distance. + d += (len(pred) - len(target)) * base + return d + + +def log_absolute_distance(pred, target, base): + """Asymmetric list distance function that uses log distance. + + A list distance which computes sum of element-wise distances, similar to + `absolute_distance`. Unlike `absolute_distance`, this scales the resulting + distance to be a float. + + Element-wise distance are log-scale. Distance between two list changes + relatively less for elements that are far apart, but changes a lot (goes to 0 + faster) when values get close together. + + Args: + pred: List of ints. Computes distance from this list to the target. + target: List of ints. This is the "correct" list which the prediction list + is trying to match. + base: Integer base. + + Returns: + Float distance normalized so that when `pred` is at most as long as `target` + the distance is between 0.0 and 1.0. Distance grows unboundedly large + as `pred` grows past `target` in length. + """ + if not target: + length_normalizer = 1.0 + if not pred: + # Distance between [] and [] is 0.0 since they are equal. + return 0.0 + else: + length_normalizer = float(len(target)) + # max_dist is the maximum element-wise distance, before taking log and + # scaling. Since we use `mod_abs_diff`, it would be (base // 2), but we add + # 1 to it so that missing or extra positions get the maximum penalty. + max_dist = base // 2 + 1 + + # The log-distance will be scaled by a factor. + # Note: +1 is added to the numerator and denominator to avoid log(0). This + # only has a translational effect, i.e. log(dist + 1) / log(max_dist + 1). + factor = log(max_dist + 1) + + d = 0.0 # Total distance to be computed. + for i, target_t in enumerate(target): + if i >= len(pred): + # Assign the max element-wise distance for missing positions. This is 1.0 + # after scaling. + d += 1.0 + else: + # Add the log-dist divided by a scaling factor. + d += log(mod_abs_diff(pred[i], target_t, base) + 1) / factor + if len(pred) > len(target): + # Add the max element-wise distance for each extra position. + # Since max dist after scaling is 1, this is just the difference in list + # lengths. + d += (len(pred) - len(target)) + return d / length_normalizer # Normalize again by the target length. + + +######################## +### Reward Functions ### +######################## + +# Reward functions assign reward based on program output. +# Warning: only use these functions as the terminal rewards in episodes, i.e. +# for the "final" programs. + + +def absolute_distance_reward(pred, target, base, scalar_diff_fn=abs_diff): + """Reward function based on absolute_distance function. + + Maximum reward, 1.0, is given when the lists are equal. Reward is scaled + so that 0.0 reward is given when `pred` is the empty list (assuming `target` + is not empty). Reward can go negative when `pred` is longer than `target`. + + This is an asymmetric reward function, so which list is the prediction and + which is the target matters. + + Args: + pred: Prediction sequence. This should be the sequence outputted by the + generated code. List of ints n, where 0 <= n < base. + target: Target sequence. The correct sequence that the generated code needs + to output. List of ints n, where 0 <= n < base. + base: Base of the computation. + scalar_diff_fn: Element-wise distance function. + + Returns: + Reward computed based on `pred` and `target`. A float. + """ + unit_dist = float(base * len(target)) + if unit_dist == 0: + unit_dist = base + dist = absolute_distance(pred, target, base, scalar_diff_fn=scalar_diff_fn) + return (unit_dist - dist) / unit_dist + + +def absolute_mod_distance_reward(pred, target, base): + """Same as `absolute_distance_reward` but `mod_abs_diff` scalar diff is used. + + Args: + pred: Prediction sequence. This should be the sequence outputted by the + generated code. List of ints n, where 0 <= n < base. + target: Target sequence. The correct sequence that the generated code needs + to output. List of ints n, where 0 <= n < base. + base: Base of the computation. + + Returns: + Reward computed based on `pred` and `target`. A float. + """ + return absolute_distance_reward(pred, target, base, mod_abs_diff) + + +def absolute_log_distance_reward(pred, target, base): + """Compute reward using `log_absolute_distance`. + + Maximum reward, 1.0, is given when the lists are equal. Reward is scaled + so that 0.0 reward is given when `pred` is the empty list (assuming `target` + is not empty). Reward can go negative when `pred` is longer than `target`. + + This is an asymmetric reward function, so which list is the prediction and + which is the target matters. + + This reward function has the nice property that much more reward is given + for getting the correct value (at each position) than for there being any + value at all. For example, in base 100, lets say pred = [1] * 1000 + and target = [10] * 1000. A lot of reward would be given for being 80% + accurate (worst element-wise distance is 50, distances here are 9) using + `absolute_distance`. `log_absolute_distance` on the other hand will give + greater and greater reward increments the closer each predicted value gets to + the target. That makes the reward given for accuracy somewhat independant of + the base. + + Args: + pred: Prediction sequence. This should be the sequence outputted by the + generated code. List of ints n, where 0 <= n < base. + target: Target sequence. The correct sequence that the generated code needs + to output. List of ints n, where 0 <= n < base. + base: Base of the computation. + + Returns: + Reward computed based on `pred` and `target`. A float. + """ + return 1.0 - log_absolute_distance(pred, target, base) + + +####################### +### Reward Managers ### +####################### + +# Reward managers assign reward to many code attempts throughout an episode. + + +class RewardManager(object): + """Reward managers administer reward across an episode. + + Reward managers are used for "editor" environments. These are environments + where the agent has some way to edit its code over time, and run its code + many time in the same episode, so that it can make incremental improvements. + + Reward managers are instantiated with a target sequence, which is the known + correct program output. The manager is called on the output from a proposed + code, and returns reward. If many proposal outputs are tried, reward may be + some stateful function that takes previous tries into account. This is done, + in part, so that an agent cannot accumulate unbounded reward just by trying + junk programs as often as possible. So reward managers should not give the + same reward twice if the next proposal is not better than the last. + """ + __metaclass__ = ABCMeta + + def __init__(self, target, base, distance_fn=absolute_distance): + self._target = list(target) + self._base = base + self._distance_fn = distance_fn + + @abstractmethod + def __call__(self, sequence): + """Call this reward manager like a function to get reward. + + Calls to reward manager are stateful, and will take previous sequences + into account. Repeated calls with the same sequence may produce different + rewards. + + Args: + sequence: List of integers (each between 0 and base - 1). This is the + proposal sequence. Reward will be computed based on the distance + from this sequence to the target (distance function and target are + given in the constructor), as well as previous sequences tried during + the lifetime of this object. + + Returns: + Float value. The reward received from this call. + """ + return 0.0 + + +class DeltaRewardManager(RewardManager): + """Simple reward manager that assigns reward for the net change in distance. + + Given some (possibly asymmetric) list distance function, gives reward for + relative changes in prediction distance to the target. + + For example, if on the first call the distance is 3.0, the change in distance + is -3 (from starting distance of 0). That relative change will be scaled to + produce a negative reward for this step. On the next call, the distance is 2.0 + which is a +1 change, and that will be scaled to give a positive reward. + If the final call has distance 0 (the target is achieved), that is another + positive change of +2. The total reward across all 3 calls is then 0, which is + the highest posible episode total. + + Reward is scaled so that the maximum element-wise distance is worth 1.0. + Maximum total episode reward attainable is 0. + """ + + def __init__(self, target, base, distance_fn=absolute_distance): + super(DeltaRewardManager, self).__init__(target, base, distance_fn) + self._last_diff = 0 + + def _diff(self, seq): + return self._distance_fn(seq, self._target, self._base) + + def _delta_reward(self, seq): + # Reward is relative to previous sequence diff. + # Reward is scaled so that maximum token difference is worth 1.0. + # Reward = (last_diff - this_diff) / self.base. + # Reward is positive if this sequence is closer to the target than the + # previous sequence, and negative if this sequence is further away. + diff = self._diff(seq) + reward = (self._last_diff - diff) / float(self._base) + self._last_diff = diff + return reward + + def __call__(self, seq): + return self._delta_reward(seq) + + +class FloorRewardManager(RewardManager): + """Assigns positive reward for each step taken closer to the target. + + Given some (possibly asymmetric) list distance function, gives reward for + whenever a new episode minimum distance is reached. No reward is given if + the distance regresses to a higher value, so that the sum of rewards + for the episode is positive. + + Reward is scaled so that the maximum element-wise distance is worth 1.0. + Maximum total episode reward attainable is len(target). + + If the prediction sequence is longer than the target, a reward of -1 is given. + Subsequence predictions which are also longer get 0 reward. The -1 penalty + will be canceled out with a +1 reward when a prediction is given which is at + most the length of the target. + """ + + def __init__(self, target, base, distance_fn=absolute_distance): + super(FloorRewardManager, self).__init__(target, base, distance_fn) + self._last_diff = 0 + self._min_diff = self._max_diff() + self._too_long_penality_given = False + + def _max_diff(self): + return self._distance_fn([], self._target, self._base) + + def _diff(self, seq): + return self._distance_fn(seq, self._target, self._base) + + def _delta_reward(self, seq): + # Reward is only given if this sequence is closer to the target than any + # previous sequence. + # Reward is scaled so that maximum token difference is worth 1.0 + # Reward = (min_diff - this_diff) / self.base + # Reward is always positive. + diff = self._diff(seq) + if diff < self._min_diff: + reward = (self._min_diff - diff) / float(self._base) + self._min_diff = diff + else: + reward = 0.0 + return reward + + def __call__(self, seq): + if len(seq) > len(self._target): # Output is too long. + if not self._too_long_penality_given: + self._too_long_penality_given = True + reward = -1.0 + else: + reward = 0.0 # Don't give this penalty more than once. + return reward + + reward = self._delta_reward(seq) + if self._too_long_penality_given: + reward += 1.0 # Return the subtracted reward. + self._too_long_penality_given = False + return reward + diff --git a/models/research/brain_coder/common/reward_test.py b/models/research/brain_coder/common/reward_test.py new file mode 100644 index 0000000000000000000000000000000000000000..38a1d4ace38cbc945362e52adb90cc9dd62f1be7 --- /dev/null +++ b/models/research/brain_coder/common/reward_test.py @@ -0,0 +1,311 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for common.reward.""" + +from math import log +import numpy as np +import tensorflow as tf + +from common import reward # brain coder + + +class RewardTest(tf.test.TestCase): + + def testAbsDiff(self): + self.assertEqual(5, reward.abs_diff(15, 20)) + self.assertEqual(5, reward.abs_diff(20, 15)) + + def testModAbsDiff(self): + self.assertEqual(5, reward.mod_abs_diff(15, 20, 25)) + self.assertEqual(5, reward.mod_abs_diff(20, 15, 25)) + self.assertEqual(2, reward.mod_abs_diff(1, 24, 25)) + self.assertEqual(2, reward.mod_abs_diff(24, 1, 25)) + + self.assertEqual(0, reward.mod_abs_diff(0, 0, 5)) + self.assertEqual(1, reward.mod_abs_diff(0, 1, 5)) + self.assertEqual(2, reward.mod_abs_diff(0, 2, 5)) + self.assertEqual(2, reward.mod_abs_diff(0, 3, 5)) + self.assertEqual(1, reward.mod_abs_diff(0, 4, 5)) + + self.assertEqual(0, reward.mod_abs_diff(-1, 4, 5)) + self.assertEqual(1, reward.mod_abs_diff(-5, 4, 5)) + self.assertEqual(1, reward.mod_abs_diff(-7, 4, 5)) + self.assertEqual(1, reward.mod_abs_diff(13, 4, 5)) + self.assertEqual(1, reward.mod_abs_diff(15, 4, 5)) + + def testAbsoluteDistance_AbsDiffMethod(self): + self.assertEqual( + 4, + reward.absolute_distance([0], [4], 5, scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 0, + reward.absolute_distance([4], [4], 5, scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 0, + reward.absolute_distance([], [], 5, scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([1], [], 5, scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([], [1], 5, scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 0, + reward.absolute_distance([1, 2, 3], [1, 2, 3], 5, + scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 1, + reward.absolute_distance([1, 2, 4], [1, 2, 3], 5, + scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 1, + reward.absolute_distance([1, 2, 2], [1, 2, 3], 5, + scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([1, 2], [1, 2, 3], 5, + scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([1, 2, 3, 4], [1, 2, 3], 5, + scalar_diff_fn=reward.abs_diff)) + self.assertEqual( + 6, + reward.absolute_distance([4, 4, 4], [1, 2, 3], 5, + scalar_diff_fn=reward.abs_diff)) + + def testAbsoluteDistance_ModDiffMethod(self): + self.assertEqual( + 1, + reward.absolute_distance([0], [4], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 0, + reward.absolute_distance([4], [4], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 0, + reward.absolute_distance([], [], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([1], [], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([], [1], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 0, + reward.absolute_distance([1, 2, 3], [1, 2, 3], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 1, + reward.absolute_distance([1, 2, 4], [1, 2, 3], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 1, + reward.absolute_distance([1, 2, 2], [1, 2, 3], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([1, 2], [1, 2, 3], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([1, 2, 3, 4], [1, 2, 3], 5, + scalar_diff_fn=reward.mod_abs_diff)) + self.assertEqual( + 5, + reward.absolute_distance([4, 4, 4], [1, 2, 3], 5, + scalar_diff_fn=reward.mod_abs_diff)) + + def testLogAbsoluteDistance(self): + def log_diff(diff, base): + return log(diff + 1) / log(base // 2 + 2) + + self.assertEqual( + log_diff(1, 5), + reward.log_absolute_distance([0], [4], 5)) + self.assertEqual( + log_diff(2, 5), + reward.log_absolute_distance([1], [4], 5)) + self.assertEqual( + log_diff(2, 5), + reward.log_absolute_distance([2], [4], 5)) + self.assertEqual( + log_diff(1, 5), + reward.log_absolute_distance([3], [4], 5)) + self.assertEqual( + log_diff(3, 5), # max_dist = base // 2 + 1 = 3 + reward.log_absolute_distance([], [4], 5)) + self.assertEqual( + 0 + log_diff(3, 5), # max_dist = base // 2 + 1 = 3 + reward.log_absolute_distance([4, 4], [4], 5)) + self.assertEqual( + 0, + reward.log_absolute_distance([4], [4], 5)) + self.assertEqual( + 0, + reward.log_absolute_distance([], [], 5)) + self.assertEqual( + 1, + reward.log_absolute_distance([1], [], 5)) + self.assertEqual( + 1, + reward.log_absolute_distance([], [1], 5)) + + self.assertEqual( + 0, + reward.log_absolute_distance([1, 2, 3], [1, 2, 3], 5)) + self.assertEqual( + log_diff(1, 5) / 3, # divided by target length. + reward.log_absolute_distance([1, 2, 4], [1, 2, 3], 5)) + self.assertEqual( + log_diff(1, 5) / 3, + reward.log_absolute_distance([1, 2, 2], [1, 2, 3], 5)) + self.assertEqual( + log_diff(3, 5) / 3, # max_dist + reward.log_absolute_distance([1, 2], [1, 2, 3], 5)) + self.assertEqual( + log_diff(3, 5) / 3, # max_dist + reward.log_absolute_distance([1, 2, 3, 4], [1, 2, 3], 5)) + # Add log differences for each position. + self.assertEqual( + (log_diff(2, 5) + log_diff(2, 5) + log_diff(1, 5)) / 3, + reward.log_absolute_distance([4, 4, 4], [1, 2, 3], 5)) + + def testAbsoluteDistanceReward(self): + self.assertEqual( + 1, + reward.absolute_distance_reward([1, 2, 3], [1, 2, 3], 5)) + self.assertEqual( + 1 - 1 / (5 * 3.), # 1 - distance / (base * target_len) + reward.absolute_distance_reward([1, 2, 4], [1, 2, 3], 5)) + self.assertEqual( + 1 - 1 / (5 * 3.), + reward.absolute_distance_reward([1, 2, 2], [1, 2, 3], 5)) + self.assertTrue(np.isclose( + 1 - 5 / (5 * 3.), + reward.absolute_distance_reward([1, 2], [1, 2, 3], 5))) + self.assertTrue(np.isclose( + 1 - 5 / (5 * 3.), + reward.absolute_distance_reward([1, 2, 3, 4], [1, 2, 3], 5))) + # Add log differences for each position. + self.assertEqual( + 1 - (3 + 2 + 1) / (5 * 3.), + reward.absolute_distance_reward([4, 4, 4], [1, 2, 3], 5)) + self.assertEqual( + 1, + reward.absolute_distance_reward([], [], 5)) + + def testAbsoluteModDistanceReward(self): + self.assertEqual( + 1, + reward.absolute_mod_distance_reward([1, 2, 3], [1, 2, 3], 5)) + self.assertEqual( + 1 - 1 / (5 * 3.), # 1 - distance / (base * target_len) + reward.absolute_mod_distance_reward([1, 2, 4], [1, 2, 3], 5)) + self.assertEqual( + 1 - 1 / (5 * 3.), + reward.absolute_mod_distance_reward([1, 2, 2], [1, 2, 3], 5)) + self.assertTrue(np.isclose( + 1 - 5 / (5 * 3.), + reward.absolute_mod_distance_reward([1, 2], [1, 2, 3], 5))) + self.assertTrue(np.isclose( + 1 - 5 / (5 * 3.), + reward.absolute_mod_distance_reward([1, 2, 3, 4], [1, 2, 3], 5))) + # Add log differences for each position. + self.assertTrue(np.isclose( + 1 - (2 + 2 + 1) / (5 * 3.), + reward.absolute_mod_distance_reward([4, 4, 4], [1, 2, 3], 5))) + self.assertTrue(np.isclose( + 1 - (1 + 2 + 2) / (5 * 3.), + reward.absolute_mod_distance_reward([0, 1, 2], [4, 4, 4], 5))) + self.assertEqual( + 1, + reward.absolute_mod_distance_reward([], [], 5)) + + def testAbsoluteLogDistanceReward(self): + def log_diff(diff, base): + return log(diff + 1) / log(base // 2 + 2) + + self.assertEqual( + 1, + reward.absolute_log_distance_reward([1, 2, 3], [1, 2, 3], 5)) + self.assertEqual( + 1 - log_diff(1, 5) / 3, # divided by target length. + reward.absolute_log_distance_reward([1, 2, 4], [1, 2, 3], 5)) + self.assertEqual( + 1 - log_diff(1, 5) / 3, + reward.absolute_log_distance_reward([1, 2, 2], [1, 2, 3], 5)) + self.assertEqual( + 1 - log_diff(3, 5) / 3, # max_dist + reward.absolute_log_distance_reward([1, 2], [1, 2, 3], 5)) + self.assertEqual( + 1 - log_diff(3, 5) / 3, # max_dist + reward.absolute_log_distance_reward([1, 2, 3, 4], [1, 2, 3], 5)) + # Add log differences for each position. + self.assertEqual( + 1 - (log_diff(2, 5) + log_diff(2, 5) + log_diff(1, 5)) / 3, + reward.absolute_log_distance_reward([4, 4, 4], [1, 2, 3], 5)) + self.assertEqual( + 1 - (log_diff(1, 5) + log_diff(2, 5) + log_diff(2, 5)) / 3, + reward.absolute_log_distance_reward([0, 1, 2], [4, 4, 4], 5)) + self.assertEqual( + 1, + reward.absolute_log_distance_reward([], [], 5)) + + def testDeltaRewardManager(self): + reward_manager = reward.DeltaRewardManager( + [1, 2, 3, 4], base=5, distance_fn=reward.absolute_distance) + self.assertEqual(-3, reward_manager([1])) + self.assertEqual(0, reward_manager([1])) + self.assertEqual(4 / 5., reward_manager([1, 3])) + self.assertEqual(-4 / 5, reward_manager([1])) + self.assertEqual(3, reward_manager([1, 2, 3, 4])) + self.assertEqual(-1, reward_manager([1, 2, 3])) + self.assertEqual(0, reward_manager([1, 2, 3, 4, 3])) + self.assertEqual(-1, reward_manager([1, 2, 3, 4, 3, 2])) + self.assertEqual(2, reward_manager([1, 2, 3, 4])) + self.assertEqual(0, reward_manager([1, 2, 3, 4])) + self.assertEqual(0, reward_manager([1, 2, 3, 4])) + + def testFloorRewardMananger(self): + reward_manager = reward.FloorRewardManager( + [1, 2, 3, 4], base=5, distance_fn=reward.absolute_distance) + self.assertEqual(1, reward_manager([1])) + self.assertEqual(0, reward_manager([1])) + self.assertEqual(4 / 5., reward_manager([1, 3])) + self.assertEqual(0, reward_manager([1])) + self.assertEqual(1 / 5., reward_manager([1, 2])) + self.assertEqual(0, reward_manager([0, 1])) + self.assertEqual(0, reward_manager([])) + self.assertEqual(0, reward_manager([1, 2])) + self.assertEqual(2, reward_manager([1, 2, 3, 4])) + self.assertEqual(0, reward_manager([1, 2, 3])) + self.assertEqual(-1, reward_manager([1, 2, 3, 4, 3])) + self.assertEqual(0, reward_manager([1, 2, 3, 4, 3, 2])) + self.assertEqual(1, reward_manager([1, 2, 3, 4])) + self.assertEqual(0, reward_manager([1, 2, 3, 4])) + self.assertEqual(0, reward_manager([1, 2, 3, 4])) + + reward_manager = reward.FloorRewardManager( + [1, 2, 3, 4], base=5, distance_fn=reward.absolute_distance) + self.assertEqual(1, reward_manager([1])) + self.assertEqual(-1, reward_manager([1, 0, 0, 0, 0, 0])) + self.assertEqual(0, reward_manager([1, 2, 3, 4, 0, 0])) + self.assertEqual(0, reward_manager([1, 2, 3, 4, 0])) + self.assertEqual(1, reward_manager([])) + self.assertEqual(0, reward_manager([])) + self.assertEqual(0, reward_manager([1])) + self.assertEqual(1, reward_manager([1, 2])) + self.assertEqual(-1, reward_manager([1, 2, 3, 4, 0, 0])) + self.assertEqual(0, reward_manager([1, 1, 1, 1, 1])) + self.assertEqual(1 + 2, reward_manager([1, 2, 3, 4])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/common/rollout.py b/models/research/brain_coder/common/rollout.py new file mode 100644 index 0000000000000000000000000000000000000000..e377aa662db640dfa907de83d32875cc096c4295 --- /dev/null +++ b/models/research/brain_coder/common/rollout.py @@ -0,0 +1,306 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Utilities related to computing training batches from episode rollouts. + +Implementations here are based on code from Open AI: +https://github.com/openai/universe-starter-agent/blob/master/a3c.py. +""" + +from collections import namedtuple +import numpy as np +import scipy.signal + +from common import utils # brain coder + + +class Rollout(object): + """Holds a rollout for an episode. + + A rollout is a record of the states observed in some environment and actions + taken by the agent to arrive at those states. Other information includes + rewards received after each action, values estimated for each state, whether + the rollout concluded the episide, and total reward received. Everything + should be given in time order. + + At each time t, the agent sees state s_t, takes action a_t, and then receives + reward r_t. The agent may optionally estimate a state value V(s_t) for each + state. + + For an episode of length T: + states = [s_0, ..., s_(T-1)] + actions = [a_0, ..., a_(T-1)] + rewards = [r_0, ..., r_(T-1)] + values = [V(s_0), ..., V(s_(T-1))] + + Note that there is an extra state s_T observed after taking action a_(T-1), + but this is not included in the rollout. + + Rollouts have an `terminated` attribute which is True when the rollout is + "finalized", i.e. it holds a full episode. terminated will be False when + time steps are still being added to it. + """ + + def __init__(self): + self.states = [] + self.actions = [] + self.rewards = [] + self.values = [] + self.total_reward = 0.0 + self.terminated = False + + def add(self, state, action, reward, value=0.0, terminated=False): + """Add the next timestep to this rollout. + + Args: + state: The state observed at the start of this timestep. + action: The action taken after observing the given state. + reward: The reward received for taking the given action. + value: The value estimated for the given state. + terminated: Whether this timestep ends the episode. + + Raises: + ValueError: If this.terminated is already True, meaning that the episode + has already ended. + """ + if self.terminated: + raise ValueError( + 'Trying to add timestep to an already terminal rollout.') + self.states += [state] + self.actions += [action] + self.rewards += [reward] + self.values += [value] + self.terminated = terminated + self.total_reward += reward + + def add_many(self, states, actions, rewards, values=None, terminated=False): + """Add many timesteps to this rollout. + + Arguments are the same as `add`, but are lists of equal size. + + Args: + states: The states observed. + actions: The actions taken. + rewards: The rewards received. + values: The values estimated for the given states. + terminated: Whether this sequence ends the episode. + + Raises: + ValueError: If the lengths of all the input lists are not equal. + ValueError: If this.terminated is already True, meaning that the episode + has already ended. + """ + if len(states) != len(actions): + raise ValueError( + 'Number of states and actions must be the same. Got %d states and ' + '%d actions' % (len(states), len(actions))) + if len(states) != len(rewards): + raise ValueError( + 'Number of states and rewards must be the same. Got %d states and ' + '%d rewards' % (len(states), len(rewards))) + if values is not None and len(states) != len(values): + raise ValueError( + 'Number of states and values must be the same. Got %d states and ' + '%d values' % (len(states), len(values))) + if self.terminated: + raise ValueError( + 'Trying to add timesteps to an already terminal rollout.') + self.states += states + self.actions += actions + self.rewards += rewards + self.values += values if values is not None else [0.0] * len(states) + self.terminated = terminated + self.total_reward += sum(rewards) + + def extend(self, other): + """Append another rollout to this rollout.""" + assert not self.terminated + self.states.extend(other.states) + self.actions.extend(other.actions) + self.rewards.extend(other.rewards) + self.values.extend(other.values) + self.terminated = other.terminated + self.total_reward += other.total_reward + + +def discount(x, gamma): + """Returns discounted sums for each value in x, with discount factor gamma. + + This can be used to compute the return (discounted sum of rewards) at each + timestep given a sequence of rewards. See the definitions for return and + REINFORCE in section 3 of https://arxiv.org/pdf/1602.01783.pdf. + + Let g^k mean gamma ** k. + For list [x_0, ..., x_N], the following list of discounted sums is computed: + [x_0 + g^1 * x_1 + g^2 * x_2 + ... g^N * x_N, + x_1 + g^1 * x_2 + g^2 * x_3 + ... g^(N-1) * x_N, + x_2 + g^1 * x_3 + g^2 * x_4 + ... g^(N-2) * x_N, + ..., + x_(N-1) + g^1 * x_N, + x_N] + + Args: + x: List of numbers [x_0, ..., x_N]. + gamma: Float between 0 and 1 (inclusive). This is the discount factor. + + Returns: + List of discounted sums. + """ + return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] + + +def discounted_advantage_and_rewards(rewards, values, gamma, lambda_=1.0): + """Compute advantages and returns (discounted sum of rewards). + + For an episode of length T, rewards = [r_0, ..., r_(T-1)]. + Each reward r_t is observed after taking action a_t at state s_t. A final + state s_T is observed but no reward is given at this state since no action + a_T is taken (otherwise there would be a new state s_(T+1)). + + `rewards` and `values` are for a single episode. Return R_t is the discounted + sum of future rewards starting at time t, where `gamma` is the discount + factor. + R_t = r_t + gamma * r_(t+1) + gamma**2 * r_(t+2) + ... + + gamma**(T-1-t) * r_(T-1) + + Advantage A(a_t, s_t) is approximated by computing A(a_t, s_t) = R_t - V(s_t) + where V(s_t) is an approximation of the value at that state, given in the + `values` list. Returns R_t are needed for all REINFORCE algorithms. Advantage + is used for the advantage actor critic variant of REINFORCE. + See algorithm S3 in https://arxiv.org/pdf/1602.01783.pdf. + + Additionally another parameter `lambda_` controls the bias-variance tradeoff. + See "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438. + lambda_ = 1 reduces to regular advantage. + 0 <= lambda_ < 1 trades off variance for bias, with lambda_ = 0 being the + most biased. + + Bootstrapping is also supported. If an episode does not end in a terminal + state (either because the episode was ended early, or the environment does not + have end states), the true return cannot be computed from the rewards alone. + However, it can be estimated by computing the value (an approximation of + return) of the last state s_T. Thus the `values` list will have an extra item: + values = [V(s_0), ..., V(s_(T-1)), V(s_T)]. + + Args: + rewards: List of observed rewards [r_0, ..., r_(T-1)]. + values: List of estimated values [V(s_0), ..., V(s_(T-1))] with an optional + extra V(s_T) item. + gamma: Discount factor. Number between 0 and 1. 1 means no discount. + If not 1, gamma is typically near 1, like 0.99. + lambda_: Bias-variance tradeoff factor. Between 0 and 1. + + Returns: + empirical_values: Returns at each timestep. + generalized_advantage: Avantages at each timestep. + + Raises: + ValueError: If shapes of `rewards` and `values` are not rank 1. + ValueError: If len(values) not in (len(rewards), len(rewards) + 1). + """ + rewards = np.asarray(rewards, dtype=np.float32) + values = np.asarray(values, dtype=np.float32) + if rewards.ndim != 1: + raise ValueError('Single episode only. rewards must be rank 1.') + if values.ndim != 1: + raise ValueError('Single episode only. values must be rank 1.') + if len(values) == len(rewards): + # No bootstrapping. + values = np.append(values, 0) + empirical_values = discount(rewards, gamma) + elif len(values) == len(rewards) + 1: + # With bootstrapping. + # Last value is for the terminal state (final state after last action was + # taken). + empirical_values = discount(np.append(rewards, values[-1]), gamma)[:-1] + else: + raise ValueError('values should contain the same number of items or one ' + 'more item than rewards') + delta = rewards + gamma * values[1:] - values[:-1] + generalized_advantage = discount(delta, gamma * lambda_) + + # empirical_values is the discounted sum of rewards into the future. + # generalized_advantage is the target for each policy update. + return empirical_values, generalized_advantage + + +"""Batch holds a minibatch of episodes. + +Let bi = batch_index, i.e. the index of each episode in the minibatch. +Let t = time. + +Attributes: + states: States for each timestep in each episode. Indexed by states[bi, t]. + actions: Actions for each timestep in each episode. Indexed by actions[bi, t]. + discounted_adv: Advantages (computed by discounted_advantage_and_rewards) + for each timestep in each episode. Indexed by discounted_adv[bi, t]. + discounted_r: Returns (discounted sum of rewards computed by + discounted_advantage_and_rewards) for each timestep in each episode. + Indexed by discounted_r[bi, t]. + total_rewards: Total reward for each episode, i.e. sum of rewards across all + timesteps (not discounted). Indexed by total_rewards[bi]. + episode_lengths: Number of timesteps in each episode. If an episode has + N actions, N rewards, and N states, then its length is N. Indexed by + episode_lengths[bi]. + batch_size: Number of episodes in this minibatch. An integer. + max_time: Maximum episode length in the batch. An integer. +""" # pylint: disable=pointless-string-statement +Batch = namedtuple( + 'Batch', + ['states', 'actions', 'discounted_adv', 'discounted_r', 'total_rewards', + 'episode_lengths', 'batch_size', 'max_time']) + + +def process_rollouts(rollouts, gamma, lambda_=1.0): + """Convert a batch of rollouts into tensors ready to be fed into a model. + + Lists from each episode are stacked into 2D tensors and padded with 0s up to + the maximum timestep in the batch. + + Args: + rollouts: A list of Rollout instances. + gamma: The discount factor. A number between 0 and 1 (inclusive). See gamma + argument in discounted_advantage_and_rewards. + lambda_: See lambda_ argument in discounted_advantage_and_rewards. + + Returns: + Batch instance. states, actions, discounted_adv, and discounted_r are + numpy arrays with shape (batch_size, max_episode_length). episode_lengths + is a list of ints. total_rewards is a list of floats (total reward in each + episode). batch_size and max_time are ints. + + Raises: + ValueError: If any of the rollouts are not terminal. + """ + for ro in rollouts: + if not ro.terminated: + raise ValueError('Can only process terminal rollouts.') + + episode_lengths = [len(ro.states) for ro in rollouts] + batch_size = len(rollouts) + max_time = max(episode_lengths) + + states = utils.stack_pad([ro.states for ro in rollouts], 0, max_time) + actions = utils.stack_pad([ro.actions for ro in rollouts], 0, max_time) + + discounted_rewards = [None] * batch_size + discounted_adv = [None] * batch_size + for i, ro in enumerate(rollouts): + disc_r, disc_adv = discounted_advantage_and_rewards( + ro.rewards, ro.values, gamma, lambda_) + discounted_rewards[i] = disc_r + discounted_adv[i] = disc_adv + discounted_rewards = utils.stack_pad(discounted_rewards, 0, max_time) + discounted_adv = utils.stack_pad(discounted_adv, 0, max_time) + + total_rewards = [sum(ro.rewards) for ro in rollouts] + + return Batch(states=states, + actions=actions, + discounted_adv=discounted_adv, + discounted_r=discounted_rewards, + total_rewards=total_rewards, + episode_lengths=episode_lengths, + batch_size=batch_size, + max_time=max_time) diff --git a/models/research/brain_coder/common/rollout_test.py b/models/research/brain_coder/common/rollout_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5be4cb0fafd8a2e94004c17b41e189d989a3a851 --- /dev/null +++ b/models/research/brain_coder/common/rollout_test.py @@ -0,0 +1,129 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for common.rollout.""" + +import numpy as np +import tensorflow as tf + +from common import rollout as rollout_lib # brain coder + + +class RolloutTest(tf.test.TestCase): + + def MakeRollout(self, states, actions, rewards, values=None, terminated=True): + rollout = rollout_lib.Rollout() + rollout.add_many( + states=states, actions=actions, rewards=rewards, values=values, + terminated=terminated) + return rollout + + def testDiscount(self): + discounted = np.array([1.0 / 2 ** n for n in range(4, -1, -1)]) + discounted[:2] += [1.0 / 2 ** n for n in range(1, -1, -1)] + + self.assertTrue(np.array_equal( + rollout_lib.discount([0.0, 1.0, 0.0, 0.0, 1.0], 0.50), + discounted)) + self.assertTrue(np.array_equal( + rollout_lib.discount(np.array([0.0, 1.0, 0.0, 0.0, 1.0]), 0.50), + discounted)) + + def testDiscountedAdvantageAndRewards(self): + # lambda=1, No bootstrapping. + values = [0.1, 0.5, 0.5, 0.25] + (empirical_values, + generalized_advantage) = rollout_lib.discounted_advantage_and_rewards( + [0.0, 0.0, 0.0, 1.0], + values, + gamma=0.75, + lambda_=1.0) + expected_discounted_r = ( + np.array([1.0 * 0.75 ** n for n in range(3, -1, -1)])) + expected_adv = expected_discounted_r - values + self.assertTrue(np.array_equal(empirical_values, expected_discounted_r)) + self.assertTrue(np.allclose(generalized_advantage, expected_adv)) + + # lambda=1, With bootstrapping. + values = [0.1, 0.5, 0.5, 0.25, 0.75] + (empirical_values, + generalized_advantage) = rollout_lib.discounted_advantage_and_rewards( + [0.0, 0.0, 0.0, 1.0], + values, + gamma=0.75, + lambda_=1.0) + expected_discounted_r = ( + np.array([0.75 * 0.75 ** n for n in range(4, 0, -1)]) + + np.array([1.0 * 0.75 ** n for n in range(3, -1, -1)])) + expected_adv = expected_discounted_r - values[:-1] + self.assertTrue(np.array_equal(empirical_values, expected_discounted_r)) + self.assertTrue(np.allclose(generalized_advantage, expected_adv)) + + # lambda=0.5, With bootstrapping. + values = [0.1, 0.5, 0.5, 0.25, 0.75] + rewards = [0.0, 0.0, 0.0, 1.0] + l = 0.5 # lambda + g = 0.75 # gamma + (empirical_values, + generalized_advantage) = rollout_lib.discounted_advantage_and_rewards( + rewards, + values, + gamma=g, + lambda_=l) + expected_discounted_r = ( + np.array([0.75 * g ** n for n in range(4, 0, -1)]) + + np.array([1.0 * g ** n for n in range(3, -1, -1)])) + expected_adv = [0.0] * len(values) + for t in range(3, -1, -1): + delta_t = rewards[t] + g * values[t + 1] - values[t] + expected_adv[t] = delta_t + g * l * expected_adv[t + 1] + expected_adv = expected_adv[:-1] + self.assertTrue(np.array_equal(empirical_values, expected_discounted_r)) + self.assertTrue(np.allclose(generalized_advantage, expected_adv)) + + def testProcessRollouts(self): + g = 0.95 + rollouts = [ + self.MakeRollout( + states=[3, 6, 9], + actions=[1, 2, 3], + rewards=[1.0, -1.0, 0.5], + values=[0.5, 0.5, 0.1]), + self.MakeRollout( + states=[10], + actions=[5], + rewards=[1.0], + values=[0.5])] + batch = rollout_lib.process_rollouts(rollouts, gamma=g) + + self.assertEqual(2, batch.batch_size) + self.assertEqual(3, batch.max_time) + self.assertEqual([3, 1], batch.episode_lengths) + self.assertEqual([0.5, 1.0], batch.total_rewards) + self.assertEqual( + [[3, 6, 9], [10, 0, 0]], + batch.states.tolist()) + self.assertEqual( + [[1, 2, 3], [5, 0, 0]], + batch.actions.tolist()) + + rew1, rew2 = rollouts[0].rewards, rollouts[1].rewards + expected_discounted_rewards = [ + [rew1[0] + g * rew1[1] + g * g * rew1[2], + rew1[1] + g * rew1[2], + rew1[2]], + [rew2[0], 0.0, 0.0]] + expected_advantages = [ + [dr - v + for dr, v + in zip(expected_discounted_rewards[0], rollouts[0].values)], + [expected_discounted_rewards[1][0] - rollouts[1].values[0], 0.0, 0.0]] + self.assertTrue( + np.allclose(expected_discounted_rewards, batch.discounted_r)) + self.assertTrue( + np.allclose(expected_advantages, batch.discounted_adv)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/common/schedules.py b/models/research/brain_coder/common/schedules.py new file mode 100644 index 0000000000000000000000000000000000000000..fff2481e536d65f154ad2d9dc3972657d860abf8 --- /dev/null +++ b/models/research/brain_coder/common/schedules.py @@ -0,0 +1,301 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Schedule functions for controlling hparams over time.""" + +from abc import ABCMeta +from abc import abstractmethod +import math + +from common import config_lib # brain coder + + +class Schedule(object): + """Schedule is a function which sets a hyperparameter's value over time. + + For example, a schedule can be used to decay an hparams, or oscillate it over + time. + + This object is constructed with an instance of config_lib.Config (will be + specific to each class implementation). For example if this is a decay + schedule, the config may specify the rate of decay and decay start time. Then + the object instance is called like a function, mapping global step (an integer + counting how many calls to the train op have been made) to the hparam value. + + Properties of a schedule function f(t): + 0) Domain of t is the non-negative integers (t may be 0). + 1) Range of f is the reals. + 2) Schedule functions can assume that they will be called in time order. This + allows schedules to be stateful. + 3) Schedule functions should be deterministic. Two schedule instances with the + same config must always give the same value for each t, and regardless of + what t's it was previously called on. Users may call f(t) on arbitrary + (positive) time jumps. Essentially, multiple schedule instances used in + replica training will behave the same. + 4) Duplicate successive calls on the same time are allowed. + """ + __metaclass__ = ABCMeta + + @abstractmethod + def __init__(self, config): + """Construct this schedule with a config specific to each class impl. + + Args: + config: An instance of config_lib.Config. + """ + pass + + @abstractmethod + def __call__(self, global_step): + """Map `global_step` to a value. + + `global_step` is an integer counting how many calls to the train op have + been made across all replicas (hence why it is global). Implementations + may assume calls to be made in time order, i.e. `global_step` now >= + previous `global_step` values. + + Args: + global_step: Non-negative integer. + + Returns: + Hparam value at this step. A number. + """ + pass + + +class ConstSchedule(Schedule): + """Constant function. + + config: + const: Constant value at every step. + + f(t) = const. + """ + + def __init__(self, config): + super(ConstSchedule, self).__init__(config) + self.const = config.const + + def __call__(self, global_step): + return self.const + + +class LinearDecaySchedule(Schedule): + """Linear decay function. + + config: + initial: Decay starts from this value. + final: Decay ends at this value. + start_time: Step when decay starts. Constant before it. + end_time: When decay ends. Constant after it. + + f(t) is a linear function when start_time <= t <= end_time, with slope of + (final - initial) / (end_time - start_time). f(t) = initial + when t <= start_time. f(t) = final when t >= end_time. + + If start_time == end_time, this becomes a step function. + """ + + def __init__(self, config): + super(LinearDecaySchedule, self).__init__(config) + self.initial = config.initial + self.final = config.final + self.start_time = config.start_time + self.end_time = config.end_time + + if self.end_time < self.start_time: + raise ValueError('start_time must be before end_time.') + + # Linear interpolation. + self._time_diff = float(self.end_time - self.start_time) + self._diff = float(self.final - self.initial) + self._slope = ( + self._diff / self._time_diff if self._time_diff > 0 else float('inf')) + + def __call__(self, global_step): + if global_step <= self.start_time: + return self.initial + if global_step > self.end_time: + return self.final + return self.initial + (global_step - self.start_time) * self._slope + + +class ExponentialDecaySchedule(Schedule): + """Exponential decay function. + + See https://en.wikipedia.org/wiki/Exponential_decay. + + Use this decay function to decay over orders of magnitude. For example, to + decay learning rate from 1e-2 to 1e-6. Exponential decay will decay the + exponent linearly. + + config: + initial: Decay starts from this value. + final: Decay ends at this value. + start_time: Step when decay starts. Constant before it. + end_time: When decay ends. Constant after it. + + f(t) is an exponential decay function when start_time <= t <= end_time. The + decay rate and amplitude are chosen so that f(t) = initial when + t = start_time, and f(t) = final when t = end_time. f(t) is constant for + t < start_time or t > end_time. initial and final must be positive values. + + If start_time == end_time, this becomes a step function. + """ + + def __init__(self, config): + super(ExponentialDecaySchedule, self).__init__(config) + self.initial = config.initial + self.final = config.final + self.start_time = config.start_time + self.end_time = config.end_time + + if self.initial <= 0 or self.final <= 0: + raise ValueError('initial and final must be positive numbers.') + + # Linear interpolation in log space. + self._linear_fn = LinearDecaySchedule( + config_lib.Config( + initial=math.log(self.initial), + final=math.log(self.final), + start_time=self.start_time, + end_time=self.end_time)) + + def __call__(self, global_step): + return math.exp(self._linear_fn(global_step)) + + +class SmootherstepDecaySchedule(Schedule): + """Smootherstep decay function. + + A sigmoidal like transition from initial to final values. A smoother + transition than linear and exponential decays, hence the name. + See https://en.wikipedia.org/wiki/Smoothstep. + + config: + initial: Decay starts from this value. + final: Decay ends at this value. + start_time: Step when decay starts. Constant before it. + end_time: When decay ends. Constant after it. + + f(t) is fully defined here: + https://en.wikipedia.org/wiki/Smoothstep#Variations. + + f(t) is smooth, as in its first-derivative exists everywhere. + """ + + def __init__(self, config): + super(SmootherstepDecaySchedule, self).__init__(config) + self.initial = config.initial + self.final = config.final + self.start_time = config.start_time + self.end_time = config.end_time + + if self.end_time < self.start_time: + raise ValueError('start_time must be before end_time.') + + self._time_diff = float(self.end_time - self.start_time) + self._diff = float(self.final - self.initial) + + def __call__(self, global_step): + if global_step <= self.start_time: + return self.initial + if global_step > self.end_time: + return self.final + x = (global_step - self.start_time) / self._time_diff + + # Smootherstep + return self.initial + x * x * x * (x * (x * 6 - 15) + 10) * self._diff + + +class HardOscillatorSchedule(Schedule): + """Hard oscillator function. + + config: + high: Max value of the oscillator. Value at constant plateaus. + low: Min value of the oscillator. Value at constant valleys. + start_time: Global step when oscillation starts. Constant before this. + period: Width of one oscillation, i.e. number of steps over which the + oscillation takes place. + transition_fraction: Fraction of the period spent transitioning between high + and low values. 50% of this time is spent rising, and 50% of this time + is spent falling. 50% of the remaining time is spent constant at the + high value, and 50% of the remaining time is spent constant at the low + value. transition_fraction = 1.0 means the entire period is spent + rising and falling. transition_fraction = 0.0 means no time is spent + rising and falling, i.e. the function jumps instantaneously between + high and low. + + f(t) = high when t < start_time. + f(t) is periodic when t >= start_time, with f(t + period) = f(t). + f(t) is linear with positive slope when rising, and negative slope when + falling. At the start of the period t0, f(t0) = high and begins to descend. + At the middle of the period f is low and is constant until the ascension + begins. f then rises from low to high and is constant again until the period + repeats. + + Note: when transition_fraction is 0, f starts the period low and ends high. + """ + + def __init__(self, config): + super(HardOscillatorSchedule, self).__init__(config) + self.high = config.high + self.low = config.low + self.start_time = config.start_time + self.period = float(config.period) + self.transition_fraction = config.transition_fraction + self.half_transition_fraction = config.transition_fraction / 2.0 + + if self.transition_fraction < 0 or self.transition_fraction > 1.0: + raise ValueError('transition_fraction must be between 0 and 1.0') + if self.period <= 0: + raise ValueError('period must be positive') + + self._slope = ( + float(self.high - self.low) / self.half_transition_fraction + if self.half_transition_fraction > 0 else float('inf')) + + def __call__(self, global_step): + if global_step < self.start_time: + return self.high + period_pos = ((global_step - self.start_time) / self.period) % 1.0 + if period_pos >= 0.5: + # ascending + period_pos -= 0.5 + if period_pos < self.half_transition_fraction: + return self.low + period_pos * self._slope + else: + return self.high + else: + # descending + if period_pos < self.half_transition_fraction: + return self.high - period_pos * self._slope + else: + return self.low + + +_NAME_TO_CONFIG = { + 'const': ConstSchedule, + 'linear_decay': LinearDecaySchedule, + 'exp_decay': ExponentialDecaySchedule, + 'smooth_decay': SmootherstepDecaySchedule, + 'hard_osc': HardOscillatorSchedule, +} + + +def make_schedule(config): + """Schedule factory. + + Given `config` containing a `fn` property, a Schedule implementation is + instantiated with `config`. See `_NAME_TO_CONFIG` for `fn` options. + + Args: + config: Config with a `fn` option that specifies which Schedule + implementation to use. `config` is passed into the constructor. + + Returns: + A Schedule impl instance. + """ + schedule_class = _NAME_TO_CONFIG[config.fn] + return schedule_class(config) diff --git a/models/research/brain_coder/common/schedules_test.py b/models/research/brain_coder/common/schedules_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b17022f45a833fb3aa219fd06225f77fbd1b1055 --- /dev/null +++ b/models/research/brain_coder/common/schedules_test.py @@ -0,0 +1,139 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for common.schedules.""" + +from math import exp +from math import sqrt +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from common import config_lib # brain coder +from common import schedules # brain coder + + +class SchedulesTest(tf.test.TestCase): + + def ScheduleTestHelper(self, config, schedule_subtype, io_values): + """Run common checks for schedules. + + Args: + config: Config object which is passed into schedules.make_schedule. + schedule_subtype: The expected schedule type to be instantiated. + io_values: List of (input, output) pairs. Must be in ascending input + order. No duplicate inputs. + """ + + # Check that make_schedule makes the correct type. + f = schedules.make_schedule(config) + self.assertTrue(isinstance(f, schedule_subtype)) + + # Check that multiple instances returned from make_schedule behave the same. + fns = [schedules.make_schedule(config) for _ in xrange(3)] + + # Check that all the inputs map to the right outputs. + for i, o in io_values: + for f in fns: + f_out = f(i) + self.assertTrue( + np.isclose(o, f_out), + 'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out)) + + # Check that a subset of the io_values are still correct. + f = schedules.make_schedule(config) + subseq = [io_values[i**2] for i in xrange(int(sqrt(len(io_values))))] + if subseq[-1] != io_values[-1]: + subseq.append(io_values[-1]) + for i, o in subseq: + f_out = f(i) + self.assertTrue( + np.isclose(o, f_out), + 'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out)) + + # Check duplicate calls. + f = schedules.make_schedule(config) + for i, o in io_values: + for _ in xrange(3): + f_out = f(i) + self.assertTrue( + np.isclose(o, f_out), + 'Duplicate calls at input %d are not equal. Expected %s, got %s' + % (i, o, f_out)) + + def testConstSchedule(self): + self.ScheduleTestHelper( + config_lib.Config(fn='const', const=5), + schedules.ConstSchedule, + [(0, 5), (1, 5), (10, 5), (20, 5), (100, 5), (1000000, 5)]) + + def testLinearDecaySchedule(self): + self.ScheduleTestHelper( + config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10, + end_time=20), + schedules.LinearDecaySchedule, + [(0, 2), (1, 2), (10, 2), (11, 1.8), (15, 1), (19, 0.2), (20, 0), + (100000, 0)]) + + # Test step function. + self.ScheduleTestHelper( + config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10, + end_time=10), + schedules.LinearDecaySchedule, + [(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)]) + + def testExponentialDecaySchedule(self): + self.ScheduleTestHelper( + config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6), + start_time=10, end_time=20), + schedules.ExponentialDecaySchedule, + [(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-1/2. - 1)), + (15, exp(-5/2. - 1)), (19, exp(-9/2. - 1)), (20, exp(-6)), + (100000, exp(-6))]) + + # Test step function. + self.ScheduleTestHelper( + config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6), + start_time=10, end_time=10), + schedules.ExponentialDecaySchedule, + [(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-6)), + (15, exp(-6))]) + + def testSmootherstepDecaySchedule(self): + self.ScheduleTestHelper( + config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10, + end_time=20), + schedules.SmootherstepDecaySchedule, + [(0, 2), (1, 2), (10, 2), (11, 1.98288), (15, 1), (19, 0.01712), + (20, 0), (100000, 0)]) + + # Test step function. + self.ScheduleTestHelper( + config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10, + end_time=10), + schedules.SmootherstepDecaySchedule, + [(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)]) + + def testHardOscillatorSchedule(self): + self.ScheduleTestHelper( + config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100, + period=10, transition_fraction=0.5), + schedules.HardOscillatorSchedule, + [(0, 2), (1, 2), (10, 2), (100, 2), (101, 1.2), (102, 0.4), (103, 0), + (104, 0), (105, 0), (106, 0.8), (107, 1.6), (108, 2), (109, 2), + (110, 2), (111, 1.2), (112, 0.4), (115, 0), (116, 0.8), (119, 2), + (120, 2), (100001, 1.2), (100002, 0.4), (100005, 0), (100006, 0.8), + (100010, 2)]) + + # Test instantaneous step. + self.ScheduleTestHelper( + config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100, + period=10, transition_fraction=0), + schedules.HardOscillatorSchedule, + [(0, 2), (1, 2), (10, 2), (99, 2), (100, 0), (104, 0), (105, 2), + (106, 2), (109, 2), (110, 0)]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/common/utils.py b/models/research/brain_coder/common/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5f1c50768986ee10eee6120a0bca392b1d9d0e --- /dev/null +++ b/models/research/brain_coder/common/utils.py @@ -0,0 +1,558 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Configuration class.""" + +import bisect +from collections import deque +import cPickle +import heapq +import random + +from absl import logging +import numpy as np +import six +from six.moves import xrange +import tensorflow as tf + + +def tuple_to_record(tuple_, record_type): + return record_type(**dict(zip(record_type.__slots__, tuple_))) + + +def make_record(type_name, attributes, defaults=None): + """Factory for mutable record classes. + + A record acts just like a collections.namedtuple except slots are writable. + One exception is that record classes are not equivalent to tuples or other + record classes of the same length. + + Note, each call to `make_record` produces a unique type. Two calls will make + different types even if `type_name` is the same each time. + + Args: + type_name: Name of the record type to create. + attributes: List of names of each record attribute. The order of the list + is preserved. + defaults: (optional) default values for attributes. A dict mapping attribute + names to values. + + Returns: + A new record type. + + Raises: + ValueError: If, + `defaults` is not a dict, + `attributes` contains duplicate names, + `defaults` keys are not contained in `attributes`. + """ + if defaults is None: + defaults = {} + if not isinstance(defaults, dict): + raise ValueError('defaults must be a dict.') + attr_set = set(attributes) + if len(attr_set) < len(attributes): + raise ValueError('No duplicate attributes allowed.') + if not set(defaults.keys()).issubset(attr_set): + raise ValueError('Default attributes must be given in the attributes list.') + + class RecordClass(object): + """A record type. + + Acts like mutable tuple with named slots. + """ + __slots__ = list(attributes) + _defaults = dict(defaults) + + def __init__(self, *args, **kwargs): + if len(args) > len(self.__slots__): + raise ValueError('Too many arguments. %s has length %d.' + % (type(self).__name__, len(self.__slots__))) + for attr, val in self._defaults.items(): + setattr(self, attr, val) + for i, arg in enumerate(args): + setattr(self, self.__slots__[i], arg) + for attr, val in kwargs.items(): + setattr(self, attr, val) + for attr in self.__slots__: + if not hasattr(self, attr): + raise ValueError('Required attr "%s" is not set.' % attr) + + def __len__(self): + return len(self.__slots__) + + def __iter__(self): + for attr in self.__slots__: + yield getattr(self, attr) + + def __getitem__(self, index): + return getattr(self, self.__slots__[index]) + + def __setitem__(self, index, value): + return setattr(self, self.__slots__[index], value) + + def __eq__(self, other): + # Types must be equal as well as values. + return (isinstance(other, type(self)) + and all(a == b for a, b in zip(self, other))) + + def __str__(self): + return '%s(%s)' % ( + type(self).__name__, + ', '.join(attr + '=' + str(getattr(self, attr)) + for attr in self.__slots__)) + + def __repr__(self): + return str(self) + + RecordClass.__name__ = type_name + return RecordClass + + +# Making minibatches. +def stack_pad(tensors, pad_axes=None, pad_to_lengths=None, dtype=np.float32, + pad_value=0): + """Stack tensors along 0-th dim and pad them to be the same shape. + + Args: + tensors: Any list of iterables (python list, numpy array, etc). Can be 1D + or multi-D iterables. + pad_axes: An int or list of ints. Axes to pad along. + pad_to_lengths: Length in each dimension. If pad_axes was an int, this is an + int or None. If pad_axes was a list of ints, this is a list of mixed int + and None types with the same length, or None. A None length means the + maximum length among the given tensors is used. + dtype: Type of output numpy array. Defaults to np.float32. + pad_value: Value to use for padding. Defaults to 0. + + Returns: + Numpy array containing the tensors stacked along the 0-th dimension and + padded along the specified dimensions. + + Raises: + ValueError: If the tensors do not have equal shapes along non-padded + dimensions. + """ + tensors = [np.asarray(t) for t in tensors] + max_lengths = [max(l) for l in zip(*[t.shape for t in tensors])] + same_axes = dict(enumerate(max_lengths)) + if pad_axes is None: + pad_axes = [] + if isinstance(pad_axes, six.integer_types): + if pad_to_lengths is not None: + max_lengths[pad_axes] = pad_to_lengths + del same_axes[pad_axes] + else: + if pad_to_lengths is None: + pad_to_lengths = [None] * len(pad_axes) + for i, l in zip(pad_axes, pad_to_lengths): + if l is not None: + max_lengths[i] = l + del same_axes[i] + same_axes_items = same_axes.items() + dest = np.full([len(tensors)] + max_lengths, pad_value, dtype=dtype) + for i, t in enumerate(tensors): + for j, l in same_axes_items: + if t.shape[j] != l: + raise ValueError( + 'Tensor at index %d does not have size %d along axis %d' + % (i, l, j)) + dest[[i] + [slice(0, d) for d in t.shape]] = t + return dest + + +class RandomQueue(deque): + + def __init__(self, capacity): + super(RandomQueue, self).__init__([], capacity) + self.capacity = capacity + + def random_sample(self, sample_size): + idx = np.random.choice(len(self), sample_size) + return [self[i] for i in idx] + + def push(self, item): + # Append to right. Oldest element will be popped from left. + self.append(item) + + +class MPQItemContainer(object): + """Class for holding an item with its score. + + Defines a comparison function for use in the heap-queue. + """ + + def __init__(self, score, item, extra_data): + self.item = item + self.score = score + self.extra_data = extra_data + + def __cmp__(self, other): + assert isinstance(other, type(self)) + return cmp(self.score, other.score) + + def __iter__(self): + """Allows unpacking like a tuple.""" + yield self.score + yield self.item + yield self.extra_data + + def __repr__(self): + """String representation of this item. + + `extra_data` is not included in the representation. We are assuming that + `extra_data` is not easily interpreted by a human (if it was, it should be + hashable, like a string or tuple). + + Returns: + String representation of `self`. + """ + return str((self.score, self.item)) + + def __str__(self): + return repr(self) + + +class MaxUniquePriorityQueue(object): + """A maximum priority queue where duplicates are not added. + + The top items by score remain in the queue. When the capacity is reached, + the lowest scored item in the queue will be dropped. + + This implementation differs from a typical priority queue, in that the minimum + score is popped, instead of the maximum. Largest scores remain stuck in the + queue. This is useful for accumulating the best known items from a population. + + The items used to determine uniqueness must be hashable, but additional + non-hashable data may be stored with each item. + """ + + def __init__(self, capacity): + self.capacity = capacity + self.heap = [] + self.unique_items = set() + + def push(self, score, item, extra_data=None): + """Push an item onto the queue. + + If the queue is at capacity, the item with the smallest score will be + dropped. Note that it is assumed each item has exactly one score. The same + item with a different score will still be dropped. + + Args: + score: Number used to prioritize items in the queue. Largest scores are + kept in the queue. + item: A hashable item to be stored. Duplicates of this item will not be + added to the queue. + extra_data: An extra (possible not hashable) data to store with the item. + """ + if item in self.unique_items: + return + if len(self.heap) >= self.capacity: + _, popped_item, _ = heapq.heappushpop( + self.heap, MPQItemContainer(score, item, extra_data)) + self.unique_items.add(item) + self.unique_items.remove(popped_item) + else: + heapq.heappush(self.heap, MPQItemContainer(score, item, extra_data)) + self.unique_items.add(item) + + def pop(self): + """Pop the item with the lowest score. + + Returns: + score: Item's score. + item: The item that was popped. + extra_data: Any extra data stored with the item. + """ + if not self.heap: + return () + score, item, extra_data = heapq.heappop(self.heap) + self.unique_items.remove(item) + return score, item, extra_data + + def get_max(self): + """Peek at the item with the highest score. + + Returns: + Same as `pop`. + """ + if not self.heap: + return () + score, item, extra_data = heapq.nlargest(1, self.heap)[0] + return score, item, extra_data + + def get_min(self): + """Peek at the item with the lowest score. + + Returns: + Same as `pop`. + """ + if not self.heap: + return () + score, item, extra_data = heapq.nsmallest(1, self.heap)[0] + return score, item, extra_data + + def random_sample(self, sample_size): + """Randomly select items from the queue. + + This does not modify the queue. + + Items are drawn from a uniform distribution, and not weighted by score. + + Args: + sample_size: Number of random samples to draw. The same item can be + sampled multiple times. + + Returns: + List of sampled items (of length `sample_size`). Each element in the list + is a tuple: (item, extra_data). + """ + idx = np.random.choice(len(self.heap), sample_size) + return [(self.heap[i].item, self.heap[i].extra_data) for i in idx] + + def iter_in_order(self): + """Iterate over items in the queue from largest score to smallest. + + Yields: + item: Hashable item. + extra_data: Extra data stored with the item. + """ + for _, item, extra_data in heapq.nlargest(len(self.heap), self.heap): + yield item, extra_data + + def __len__(self): + return len(self.heap) + + def __iter__(self): + for _, item, _ in self.heap: + yield item + + def __repr__(self): + return '[' + ', '.join(repr(c) for c in self.heap) + ']' + + def __str__(self): + return repr(self) + + +class RouletteWheel(object): + """Randomly samples stored objects proportionally to their given weights. + + Stores objects and weights. Acts like a roulette wheel where each object is + given a slice of the roulette disk proportional to its weight. + + This can be used as a replay buffer where past experiences are sampled + proportionally to their weights. A good choice of "weight" for reinforcement + learning is exp(reward / temperature) where temperature -> inf makes the + distribution more uniform and temperature -> 0 makes the distribution more + peaky. + + To prevent experiences from being overweighted by appearing in the replay + buffer multiple times, a "unique mode" is supported where duplicate + experiences are ignored. In unique mode, weights can be quickly retrieved from + keys. + """ + + def __init__(self, unique_mode=False, save_file=None): + """Construct empty RouletteWheel. + + If `save_file` is not None, and the file already exists on disk, whatever + is in the file will be loaded into this instance. This allows jobs using + RouletteWheel to resume after preemption. + + Args: + unique_mode: If True, puts this RouletteWheel into unique mode, where + objects are added with hashable keys, so that duplicates are ignored. + save_file: Optional file path to save to. Must be a string containing + an absolute path to a file, or None. File will be Python pickle + format. + """ + self.unique_mode = unique_mode + self.objects = [] + self.weights = [] + self.partial_sums = [] + if self.unique_mode: + self.keys_to_weights = {} + self.save_file = save_file + self.save_to_disk_buffer = [] + + if save_file is not None and tf.gfile.Exists(save_file): + # Load from disk. + with tf.gfile.OpenFast(save_file, 'r') as f: + count = 0 + while 1: + try: + obj, weight, key = cPickle.load(f) + except EOFError: + break + else: + self.add(obj, weight, key) + count += 1 + logging.info('Loaded %d samples from disk.', count) + # Clear buffer since these items are already on disk. + self.save_to_disk_buffer = [] + + def __iter__(self): + return iter(zip(self.objects, self.weights)) + + def __len__(self): + return len(self.objects) + + def is_empty(self): + """Returns whether there is anything in the roulette wheel.""" + return not self.partial_sums + + @property + def total_weight(self): + """Total cumulative weight across all objects.""" + if self.partial_sums: + return self.partial_sums[-1] + return 0.0 + + def has_key(self, key): + if self.unique_mode: + RuntimeError('has_key method can only be called in unique mode.') + return key in self.keys_to_weights + + def get_weight(self, key): + if self.unique_mode: + RuntimeError('get_weight method can only be called in unique mode.') + return self.keys_to_weights[key] + + def add(self, obj, weight, key=None): + """Add one object and its weight to the roulette wheel. + + Args: + obj: Any object to be stored. + weight: A non-negative float. The given object will be drawn with + probability proportional to this weight when sampling. + key: This argument is only used when in unique mode. To allow `obj` to + be an unhashable type, like list, a separate hashable key is given. + Each `key` should be unique to each `obj`. `key` is used to check if + `obj` has been added to the roulette wheel before. + + Returns: + True if the object was added, False if it was not added due to it being + a duplicate (this only happens in unique mode). + + Raises: + ValueError: If `weight` is negative. + ValueError: If `key` is not given when in unique mode, or if `key` is + given when not in unique mode. + """ + if weight < 0: + raise ValueError('Weight must be non-negative') + if self.unique_mode: + if key is None: + raise ValueError( + 'Hashable key required for objects when unique mode is enabled.') + if key in self.keys_to_weights: + # Weight updates are not allowed. Ignore the given value of `weight`. + return False + self.keys_to_weights[key] = weight + elif key is not None: + raise ValueError( + 'key argument should not be used when unique mode is disabled.') + self.objects.append(obj) + self.weights.append(weight) + self.partial_sums.append(self.total_weight + weight) + if self.save_file is not None: + # Record new item in buffer. + self.save_to_disk_buffer.append((obj, weight, key)) + return True + + def add_many(self, objs, weights, keys=None): + """Add many object and their weights to the roulette wheel. + + Arguments are the same as the `add` method, except each is a list. Lists + must all be the same length. + + Args: + objs: List of objects to be stored. + weights: List of non-negative floats. See `add` method. + keys: List of hashable keys. This argument is only used when in unique + mode. See `add` method. + + Returns: + Number of objects added. This number will be less than the number of + objects provided if we are in unique mode and some keys are already + in the roulette wheel. + + Raises: + ValueError: If `keys` argument is provided when unique_mode == False, or + is not provided when unique_mode == True. + ValueError: If any of the lists are not the same length. + ValueError: If any of the weights are negative. + """ + if keys is not None and not self.unique_mode: + raise ValueError('Not in unique mode. Do not provide keys.') + elif keys is None and self.unique_mode: + raise ValueError('In unique mode. You must provide hashable keys.') + if keys and len(objs) != len(keys): + raise ValueError('Number of objects does not equal number of keys.') + if len(objs) != len(weights): + raise ValueError('Number of objects does not equal number of weights.') + return sum([self.add(obj, weights[i], key=keys[i] if keys else None) + for i, obj in enumerate(objs)]) + + def sample(self): + """Spin the roulette wheel. + + Randomly select an object with probability proportional to its weight. + + Returns: + object: The selected object. + weight: The weight of the selected object. + + Raises: + RuntimeError: If the roulette wheel is empty. + """ + if self.is_empty(): + raise RuntimeError('Trying to sample from empty roulette wheel.') + spin = random.random() * self.total_weight + + # Binary search. + i = bisect.bisect_right(self.partial_sums, spin) + if i == len(self.partial_sums): + # This should not happen since random.random() will always be strictly + # less than 1.0, and the last partial sum equals self.total_weight(). + # However it may happen due to rounding error. In that case it is easy to + # handle this, just select the last object. + i -= 1 + + return self.objects[i], self.weights[i] + + def sample_many(self, count): + """Spin the roulette wheel `count` times and return the results.""" + if self.is_empty(): + raise RuntimeError('Trying to sample from empty roulette wheel.') + return [self.sample() for _ in xrange(count)] + + def incremental_save(self, log_info=False): + """Write new entries to disk. + + This performs an append operation on the `save_file` given in the + constructor. Any entries added since the last call to `incremental_save` + will be appended to the file. + + If a new RouletteWheel is constructed with the same `save_file`, all the + entries written there will be automatically loaded into the instance. + This is useful when a job resumes after preemption. + + Args: + log_info: If True, info about this operation will be logged. + + Raises: + RuntimeError: If `save_file` given in the constructor is None. + """ + if self.save_file is None: + raise RuntimeError('Cannot call incremental_save. `save_file` is None.') + if log_info: + logging.info('Saving %d new samples to disk.', + len(self.save_to_disk_buffer)) + with tf.gfile.OpenFast(self.save_file, 'a') as f: + for entry in self.save_to_disk_buffer: + cPickle.dump(entry, f) + # Clear the buffer. + self.save_to_disk_buffer = [] diff --git a/models/research/brain_coder/common/utils_test.py b/models/research/brain_coder/common/utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..569c2877d17bf7707616029cdd2a5eac55df7f60 --- /dev/null +++ b/models/research/brain_coder/common/utils_test.py @@ -0,0 +1,382 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for common.utils. +""" + +from collections import Counter +import random +import tempfile +import numpy as np +import tensorflow as tf + +from common import utils # brain coder + + +class UtilsTest(tf.test.TestCase): + + def testStackPad(self): + # 1D. + tensors = [[1, 2, 3], [4, 5, 6, 7, 8], [9]] + result = utils.stack_pad(tensors, pad_axes=0, pad_to_lengths=6) + self.assertTrue(np.array_equal( + result, + np.asarray([[1, 2, 3, 0, 0, 0], + [4, 5, 6, 7, 8, 0], + [9, 0, 0, 0, 0, 0]], dtype=np.float32))) + + # 3D. + tensors = [[[[1, 2, 3], [4, 5, 6]]], + [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], + [[[0, 1, 2]], [[3, 4, 5]]]] + result = utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=[2, 2]) + self.assertTrue(np.array_equal( + result, + np.asarray([[[[1, 2, 3], [4, 5, 6]], + [[0, 0, 0], [0, 0, 0]]], + [[[7, 8, 9], [0, 1, 2]], + [[3, 4, 5], [6, 7, 8]]], + [[[0, 1, 2], [0, 0, 0]], + [[3, 4, 5], [0, 0, 0]]]], dtype=np.float32))) + + def testStackPadNoAxes(self): + # 2D. + tensors = [[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3]], + [[4, 5, 6], [7, 8, 9]]] + result = utils.stack_pad(tensors) + self.assertTrue(np.array_equal( + result, + np.asarray(tensors))) + + def testStackPadNoneLength(self): + # 1D. + tensors = [[1, 2, 3], [4, 5, 6, 7, 8], [9]] + result = utils.stack_pad(tensors, pad_axes=0, pad_to_lengths=None) + self.assertTrue(np.array_equal( + result, + np.asarray([[1, 2, 3, 0, 0], + [4, 5, 6, 7, 8], + [9, 0, 0, 0, 0]], dtype=np.float32))) + + # 3D. + tensors = [[[[1, 2, 3], [4, 5, 6]]], + [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], + [[[0, 1, 2]], [[3, 4, 5]]]] + result = utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=None) + self.assertTrue(np.array_equal( + result, + np.asarray([[[[1, 2, 3], [4, 5, 6]], + [[0, 0, 0], [0, 0, 0]]], + [[[7, 8, 9], [0, 1, 2]], + [[3, 4, 5], [6, 7, 8]]], + [[[0, 1, 2], [0, 0, 0]], + [[3, 4, 5], [0, 0, 0]]]], dtype=np.float32))) + + # 3D with partial pad_to_lengths. + tensors = [[[[1, 2, 3], [4, 5, 6]]], + [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], + [[[0, 1, 2]], [[3, 4, 5]]]] + result = utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=[None, 3]) + self.assertTrue(np.array_equal( + result, + np.asarray([[[[1, 2, 3], [4, 5, 6], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]]], + [[[7, 8, 9], [0, 1, 2], [0, 0, 0]], + [[3, 4, 5], [6, 7, 8], [0, 0, 0]]], + [[[0, 1, 2], [0, 0, 0], [0, 0, 0]], + [[3, 4, 5], [0, 0, 0], [0, 0, 0]]]], dtype=np.float32))) + + def testStackPadValueError(self): + # 3D. + tensors = [[[[1, 2, 3], [4, 5, 6]]], + [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], + [[[0, 1, 2]], [[3, 4, 5]]], + [[[1, 2, 3, 4]]]] + + # Not all tensors have the same shape along axis 2. + with self.assertRaises(ValueError): + utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=[2, 2]) + + def testRecord(self): + my_record = utils.make_record('my_record', ['a', 'b', 'c'], {'b': 55}) + inst = my_record(a=1, b=2, c=3) + self.assertEqual(1, inst.a) + self.assertEqual(2, inst.b) + self.assertEqual(3, inst.c) + self.assertEqual(1, inst[0]) + self.assertEqual(2, inst[1]) + self.assertEqual(3, inst[2]) + self.assertEqual([1, 2, 3], list(iter(inst))) + self.assertEqual(3, len(inst)) + + inst.b = 999 + self.assertEqual(999, inst.b) + self.assertEqual(999, inst[1]) + + inst2 = my_record(1, 999, 3) + self.assertTrue(inst == inst2) + inst2[1] = 3 + self.assertFalse(inst == inst2) + + inst3 = my_record(a=1, c=3) + inst.b = 55 + self.assertEqual(inst, inst3) + + def testRecordUnique(self): + record1 = utils.make_record('record1', ['a', 'b', 'c']) + record2 = utils.make_record('record2', ['a', 'b', 'c']) + self.assertNotEqual(record1(1, 2, 3), record2(1, 2, 3)) + self.assertEqual(record1(1, 2, 3), record1(1, 2, 3)) + + def testTupleToRecord(self): + my_record = utils.make_record('my_record', ['a', 'b', 'c']) + inst = utils.tuple_to_record((5, 6, 7), my_record) + self.assertEqual(my_record(5, 6, 7), inst) + + def testRecordErrors(self): + my_record = utils.make_record('my_record', ['a', 'b', 'c'], {'b': 10}) + + with self.assertRaises(ValueError): + my_record(c=5) # Did not provide required argument 'a'. + with self.assertRaises(ValueError): + my_record(1, 2, 3, 4) # Too many arguments. + + def testRandomQueue(self): + np.random.seed(567890) + queue = utils.RandomQueue(5) + queue.push(5) + queue.push(6) + queue.push(7) + queue.push(8) + queue.push(9) + queue.push(10) + self.assertTrue(5 not in queue) + sample = queue.random_sample(1000) + self.assertEqual(1000, len(sample)) + self.assertEqual([6, 7, 8, 9, 10], sorted(np.unique(sample).tolist())) + + def testMaxUniquePriorityQueue(self): + queue = utils.MaxUniquePriorityQueue(5) + queue.push(1.0, 'string 1') + queue.push(-0.5, 'string 2') + queue.push(0.5, 'string 3') + self.assertEqual((-0.5, 'string 2', None), queue.pop()) + queue.push(0.1, 'string 4') + queue.push(1.5, 'string 5') + queue.push(0.0, 'string 6') + queue.push(0.2, 'string 7') + self.assertEqual((1.5, 'string 5', None), queue.get_max()) + self.assertEqual((0.1, 'string 4', None), queue.get_min()) + self.assertEqual( + [('string 5', None), ('string 1', None), ('string 3', None), + ('string 7', None), ('string 4', None)], + list(queue.iter_in_order())) + + def testMaxUniquePriorityQueue_Duplicates(self): + queue = utils.MaxUniquePriorityQueue(5) + queue.push(0.0, 'string 1') + queue.push(0.0, 'string 2') + queue.push(0.0, 'string 3') + self.assertEqual((0.0, 'string 1', None), queue.pop()) + self.assertEqual((0.0, 'string 2', None), queue.pop()) + self.assertEqual((0.0, 'string 3', None), queue.pop()) + self.assertEqual(0, len(queue)) + queue.push(0.1, 'string 4') + queue.push(1.5, 'string 5') + queue.push(0.3, 'string 6') + queue.push(0.2, 'string 7') + queue.push(0.0, 'string 8') + queue.push(1.5, 'string 5') + queue.push(1.5, 'string 5') + self.assertEqual((1.5, 'string 5', None), queue.get_max()) + self.assertEqual((0.0, 'string 8', None), queue.get_min()) + self.assertEqual( + [('string 5', None), ('string 6', None), ('string 7', None), + ('string 4', None), ('string 8', None)], + list(queue.iter_in_order())) + + def testMaxUniquePriorityQueue_ExtraData(self): + queue = utils.MaxUniquePriorityQueue(5) + queue.push(1.0, 'string 1', [1, 2, 3]) + queue.push(0.5, 'string 2', [4, 5, 6]) + queue.push(0.5, 'string 3', [7, 8, 9]) + queue.push(0.5, 'string 2', [10, 11, 12]) + self.assertEqual((0.5, 'string 2', [4, 5, 6]), queue.pop()) + self.assertEqual((0.5, 'string 3', [7, 8, 9]), queue.pop()) + self.assertEqual((1.0, 'string 1', [1, 2, 3]), queue.pop()) + self.assertEqual(0, len(queue)) + queue.push(0.5, 'string 2', [10, 11, 12]) + self.assertEqual((0.5, 'string 2', [10, 11, 12]), queue.pop()) + + def testRouletteWheel(self): + random.seed(12345678987654321) + r = utils.RouletteWheel() + self.assertTrue(r.is_empty()) + with self.assertRaises(RuntimeError): + r.sample() # Cannot sample when empty. + self.assertEqual(0, r.total_weight) + self.assertEqual(True, r.add('a', 0.1)) + self.assertFalse(r.is_empty()) + self.assertEqual(0.1, r.total_weight) + self.assertEqual(True, r.add('b', 0.01)) + self.assertEqual(0.11, r.total_weight) + self.assertEqual(True, r.add('c', 0.5)) + self.assertEqual(True, r.add('d', 0.1)) + self.assertEqual(True, r.add('e', 0.05)) + self.assertEqual(True, r.add('f', 0.03)) + self.assertEqual(True, r.add('g', 0.001)) + self.assertEqual(0.791, r.total_weight) + self.assertFalse(r.is_empty()) + + # Check that sampling is correct. + obj, weight = r.sample() + self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) + self.assertTrue((obj, weight) in r) + for obj, weight in r.sample_many(100): + self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) + self.assertTrue((obj, weight) in r) + + # Check that sampling distribution is correct. + n = 1000000 + c = Counter(r.sample_many(n)) + for obj, w in r: + estimated_w = c[(obj, w)] / float(n) * r.total_weight + self.assertTrue( + np.isclose(w, estimated_w, atol=1e-3), + 'Expected %s, got %s, for object %s' % (w, estimated_w, obj)) + + def testRouletteWheel_AddMany(self): + random.seed(12345678987654321) + r = utils.RouletteWheel() + self.assertTrue(r.is_empty()) + with self.assertRaises(RuntimeError): + r.sample() # Cannot sample when empty. + self.assertEqual(0, r.total_weight) + count = r.add_many( + ['a', 'b', 'c', 'd', 'e', 'f', 'g'], + [0.1, 0.01, 0.5, 0.1, 0.05, 0.03, 0.001]) + self.assertEqual(7, count) + self.assertFalse(r.is_empty()) + self.assertEqual(0.791, r.total_weight) + + # Adding no items is allowed. + count = r.add_many([], []) + self.assertEqual(0, count) + self.assertFalse(r.is_empty()) + self.assertEqual(0.791, r.total_weight) + + # Check that sampling is correct. + obj, weight = r.sample() + self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) + self.assertTrue((obj, weight) in r) + for obj, weight in r.sample_many(100): + self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) + self.assertTrue((obj, weight) in r) + + # Check that sampling distribution is correct. + n = 1000000 + c = Counter(r.sample_many(n)) + for obj, w in r: + estimated_w = c[(obj, w)] / float(n) * r.total_weight + self.assertTrue( + np.isclose(w, estimated_w, atol=1e-3), + 'Expected %s, got %s, for object %s' % (w, estimated_w, obj)) + + def testRouletteWheel_AddZeroWeights(self): + r = utils.RouletteWheel() + self.assertEqual(True, r.add('a', 0)) + self.assertFalse(r.is_empty()) + self.assertEqual(4, r.add_many(['b', 'c', 'd', 'e'], [0, 0.1, 0, 0])) + self.assertEqual( + [('a', 0.0), ('b', 0.0), ('c', 0.1), ('d', 0.0), ('e', 0.0)], + list(r)) + + def testRouletteWheel_UniqueMode(self): + random.seed(12345678987654321) + r = utils.RouletteWheel(unique_mode=True) + self.assertEqual(True, r.add([1, 2, 3], 1, 'a')) + self.assertEqual(True, r.add([4, 5], 0.5, 'b')) + self.assertEqual(False, r.add([1, 2, 3], 1.5, 'a')) + self.assertEqual( + [([1, 2, 3], 1.0), ([4, 5], 0.5)], + list(r)) + self.assertEqual(1.5, r.total_weight) + self.assertEqual( + 2, + r.add_many( + [[5, 6, 2, 3], [1, 2, 3], [8], [1, 2, 3]], + [0.1, 0.2, 0.1, 2.0], + ['c', 'a', 'd', 'a'])) + self.assertEqual( + [([1, 2, 3], 1.0), ([4, 5], 0.5), ([5, 6, 2, 3], 0.1), ([8], 0.1)], + list(r)) + self.assertTrue(np.isclose(1.7, r.total_weight)) + self.assertEqual(0, r.add_many([], [], [])) # Adding no items is allowed. + with self.assertRaises(ValueError): + # Key not given. + r.add([7, 8, 9], 2.0) + with self.assertRaises(ValueError): + # Keys not given. + r.add_many([[7, 8, 9], [10]], [2.0, 2.0]) + self.assertEqual(True, r.has_key('a')) + self.assertEqual(True, r.has_key('b')) + self.assertEqual(False, r.has_key('z')) + self.assertEqual(1.0, r.get_weight('a')) + self.assertEqual(0.5, r.get_weight('b')) + + r = utils.RouletteWheel(unique_mode=False) + self.assertEqual(True, r.add([1, 2, 3], 1)) + self.assertEqual(True, r.add([4, 5], 0.5)) + self.assertEqual(True, r.add([1, 2, 3], 1.5)) + self.assertEqual( + [([1, 2, 3], 1.0), ([4, 5], 0.5), ([1, 2, 3], 1.5)], + list(r)) + self.assertEqual(3, r.total_weight) + self.assertEqual( + 4, + r.add_many( + [[5, 6, 2, 3], [1, 2, 3], [8], [1, 2, 3]], + [0.1, 0.2, 0.1, 0.2])) + self.assertEqual( + [([1, 2, 3], 1.0), ([4, 5], 0.5), ([1, 2, 3], 1.5), + ([5, 6, 2, 3], 0.1), ([1, 2, 3], 0.2), ([8], 0.1), ([1, 2, 3], 0.2)], + list(r)) + self.assertTrue(np.isclose(3.6, r.total_weight)) + with self.assertRaises(ValueError): + # Key is given. + r.add([7, 8, 9], 2.0, 'a') + with self.assertRaises(ValueError): + # Keys are given. + r.add_many([[7, 8, 9], [10]], [2.0, 2.0], ['a', 'b']) + + def testRouletteWheel_IncrementalSave(self): + f = tempfile.NamedTemporaryFile() + r = utils.RouletteWheel(unique_mode=True, save_file=f.name) + entries = [ + ([1, 2, 3], 0.1, 'a'), + ([4, 5], 0.2, 'b'), + ([6], 0.3, 'c'), + ([7, 8, 9, 10], 0.25, 'd'), + ([-1, -2], 0.15, 'e'), + ([-3, -4, -5], 0.5, 'f')] + + self.assertTrue(r.is_empty()) + for i in range(0, len(entries), 2): + r.add(*entries[i]) + r.add(*entries[i + 1]) + r.incremental_save() + + r2 = utils.RouletteWheel(unique_mode=True, save_file=f.name) + self.assertEqual(i + 2, len(r2)) + count = 0 + for j, (obj, weight) in enumerate(r2): + self.assertEqual(entries[j][0], obj) + self.assertEqual(entries[j][1], weight) + self.assertEqual(weight, r2.get_weight(entries[j][2])) + count += 1 + self.assertEqual(i + 2, count) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/BUILD b/models/research/brain_coder/single_task/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..47e91b12b8ba40a2a9916a89375fbb773758d7cf --- /dev/null +++ b/models/research/brain_coder/single_task/BUILD @@ -0,0 +1,244 @@ +licenses(["notice"]) + +package(default_visibility = [ + "//learning/brain/research/neural_coder:__subpackages__", +]) + +load("@subpar//:subpar.bzl", "par_binary") + +par_binary( + name = "run", + srcs = ["run.py"], + deps = [ + ":defaults", + ":ga_train", + ":pg_train", + # absl dep :app + # absl dep /flags + # absl dep /logging + ], +) + +par_binary( + name = "tune", + srcs = ["tune.py"], + deps = [ + ":defaults", + ":run", + # file dep + # absl dep :app + # absl dep /flags + # absl dep /logging + # numpy dep + # tensorflow dep + ], +) + +py_library( + name = "ga_train", + srcs = ["ga_train.py"], + deps = [ + ":data", + ":defaults", + ":ga_lib", + ":results_lib", + # file dep + # absl dep /flags + # absl dep /logging + # numpy dep + # tensorflow dep + "//common:utils", # project + ], +) + +py_library( + name = "ga_lib", + srcs = ["ga_lib.py"], + deps = [ + ":misc", + # absl dep /flags + # absl dep /logging + # numpy dep + "//common:bf", # project + "//common:utils", # project + ], +) + +py_test( + name = "ga_train_test", + srcs = ["ga_train_test.py"], + deps = [ + ":defaults", + ":run", + # absl dep /flags + # tensorflow dep + ], +) + +py_library( + name = "pg_train", + srcs = ["pg_train.py"], + deps = [ + ":data", + ":defaults", + ":pg_agent", + ":results_lib", + # file dep + # absl dep /flags + # absl dep /logging + # tensorflow dep + # tensorflow internal dep # build_cleaner: keep + ], +) + +py_library( + name = "pg_agent", + srcs = ["pg_agent.py"], + deps = [ + ":misc", + # file dep + # absl dep /logging + # numpy dep + # tensorflow dep + "//common:rollout", # project + "//common:utils", # project + ], +) + +py_test( + name = "pg_agent_test", + srcs = ["pg_agent_test.py"], + deps = [ + ":data", + ":defaults", + ":misc", + ":pg_agent", + ":pg_train", + # absl dep /logging + # numpy dep + # tensorflow dep + "//common:utils", # project + ], +) + +py_library( + name = "defaults", + srcs = ["defaults.py"], + deps = [ + # absl dep /logging + "//common:config_lib", # project + ], +) + +py_library( + name = "misc", + srcs = ["misc.py"], +) + +py_library( + name = "data", + srcs = ["data.py"], + deps = [ + ":code_tasks", + # absl dep /logging + ], +) + +py_library( + name = "code_tasks", + srcs = ["code_tasks.py"], + deps = [ + ":misc", + ":test_tasks", + # absl dep /logging + # numpy dep + "//common:bf", # project + "//common:reward", # project + ], +) + +py_test( + name = "code_tasks_test", + srcs = ["code_tasks_test.py"], + deps = [ + ":code_tasks", + ":defaults", + # numpy dep + # tensorflow dep + ], +) + +py_library( + name = "test_tasks", + srcs = ["test_tasks.py"], + deps = [ + ":misc", + "//common:reward", # project + ], +) + +py_test( + name = "test_tasks_test", + srcs = ["test_tasks_test.py"], + deps = [ + ":misc", + ":test_tasks", + # numpy dep + # tensorflow dep + ], +) + +py_test( + name = "pg_train_test", + size = "large", + srcs = ["pg_train_test.py"], + deps = [ + ":defaults", + ":run", + # absl dep /logging + # tensorflow dep + ], +) + +py_library( + name = "results_lib", + srcs = ["results_lib.py"], + deps = [ + # file dep + # tensorflow dep + ], +) + +py_test( + name = "results_lib_test", + srcs = ["results_lib_test.py"], + deps = [ + ":results_lib", + # tensorflow dep + ], +) + +par_binary( + name = "aggregate_experiment_results", + srcs = ["aggregate_experiment_results.py"], + deps = [ + ":misc", + ":results_lib", + # file dep + # absl dep :app + # absl dep /flags + # numpy dep + # tensorflow dep + ], +) + +par_binary( + name = "aggregate_tuning_results", + srcs = ["aggregate_tuning_results.py"], + deps = [ + # file dep + # absl dep :app + # absl dep /flags + # tensorflow dep + ], +) diff --git a/models/research/brain_coder/single_task/README.md b/models/research/brain_coder/single_task/README.md new file mode 100644 index 0000000000000000000000000000000000000000..69eaabcc6ccabada838a0a2a3f12fd7eed69744c --- /dev/null +++ b/models/research/brain_coder/single_task/README.md @@ -0,0 +1,192 @@ +# Experiments for ICLR 2018 paper. + +[Neural Program Synthesis with Priority Queue Training](https://arxiv.org/abs/1801.03526). + +Runs policy gradient (REINFORCE), priority queue training, genetic algorithm, +and uniform random search. + +Run all examples below out of your top-level repo directory, i.e. where your git +clone resides. + + +## Just tell me how to run something and see results +```bash +# These tasks are the fastest to learn. 'echo' and 'count-down' are very +# easy. run_eval_tasks.py will do most of the work to run all the jobs. +# Should take between 10 and 30 minutes. + +# How many repetitions each experiment will run. In the paper, we use 25. Less +# reps means faster experiments, but noisier results. +REPS=25 + +# Extra description in the job names for these experiments. Use this description +# to distinguish between multiple runs of the same experiment. +DESC="demo" + +# The tasks to run. +TASKS="reverse echo-second-seq" + +# The model types and max NPE. +EXPS=( pg-20M topk-20M ga-20M rand-20M ) + +# Where training data is saved. This is chosen by launch_training.sh. Custom +# implementations of launch_training.sh may use different locations. +MODELS_DIR="/tmp/models" + +# Run run_eval_tasks.py for each experiment name in EXPS. +for exp in "${EXPS[@]}" +do + ./single_task/run_eval_tasks.py \ + --exp "$exp" --tasks $TASKS --desc "$DESC" --reps $REPS +done + +# During training or after completion, run this to aggregate results into a +# table. This is also useful for seeing how much progress has been made. +# Make sure the arguments here match the settings used above. +# Note: This can take a few minutes because it reads from every experiment +# directory. +bazel run single_task:aggregate_experiment_results -- \ + --models_dir="$MODELS_DIR" \ + --max_npe="20M" \ + --task_list="$TASKS" \ + --model_types="[('pg', '$DESC'), ('topk', '$DESC'), ('ga', '$DESC'), + ('rand', '$DESC')]" \ + --csv_file="/tmp/results_table.csv" +``` + + +## Reproduce tuning results in paper +```bash +bazel build -c opt single_task:tune.par + +# PG and TopK Tuning. +MAX_NPE=5000000 +CONFIG=" +env=c(task_cycle=['reverse-tune','remove-tune']), +agent=c( + algorithm='pg', + grad_clip_threshold=50.0,param_init_factor=0.5,entropy_beta=0.05,lr=1e-5, + optimizer='rmsprop',ema_baseline_decay=0.99,topk_loss_hparam=0.0,topk=0, + replay_temperature=1.0,alpha=0.0,eos_token=False), +timestep_limit=50,batch_size=64" + +./single_task/launch_tuning.sh \ + --job_name="iclr_pg_gridsearch.reverse-remove" \ + --config="$CONFIG" \ + --max_npe="$MAX_NPE" \ + --num_workers_per_tuner=1 \ + --num_ps_per_tuner=0 \ + --num_tuners=1 \ + --num_repetitions=50 \ + --hparam_space_type="pg" \ + --stop_on_success=true +./single_task/launch_tuning.sh \ + --job_name="iclr_pg_topk_gridsearch.reverse-remove" \ + --config="$CONFIG" \ + --max_npe="$MAX_NPE" \ + --num_workers_per_tuner=1 \ + --num_ps_per_tuner=0 \ + --num_tuners=1 \ + --num_repetitions=50 \ + --hparam_space_type="pg-topk" \ + --fixed_hparams="topk=10" \ + --stop_on_success=true +./single_task/launch_tuning.sh \ + --job_name="iclr_topk_gridsearch.reverse-remove" \ + --config="$CONFIG" \ + --max_npe="$MAX_NPE" \ + --num_workers_per_tuner=1 \ + --num_ps_per_tuner=0 \ + --num_tuners=1 \ + --num_repetitions=50 \ + --hparam_space_type="topk" \ + --fixed_hparams="topk=10" \ + --stop_on_success=true + +# GA Tuning. +CONFIG=" +env=c(task_cycle=['reverse-tune','remove-char-tune']), +agent=c(algorithm='ga'), +timestep_limit=50" +./single_task/launch_tuning.sh \ + --job_name="iclr_ga_gridsearch.reverse-remove" \ + --config="$CONFIG" \ + --max_npe="$MAX_NPE" \ + --num_workers_per_tuner=25 \ + --num_ps_per_tuner=0 \ + --num_tuners=1 \ + --num_repetitions=50 \ + --hparam_space_type="ga" \ + --stop_on_success=true + +# Aggregate tuning results. Run after tuning jobs complete. +bazel run -c opt single_task:aggregate_tuning_results -- \ + --tuning_dir="$MODELS_DIR/iclr_pg_gridsearch.reverse-remove" +bazel run -c opt single_task:aggregate_tuning_results -- \ + --tuning_dir="$MODELS_DIR/iclr_pg_topk_gridsearch.reverse-remove" +bazel run -c opt single_task:aggregate_tuning_results -- \ + --tuning_dir="$MODELS_DIR/iclr_topk_gridsearch.reverse-remove" +bazel run -c opt single_task:aggregate_tuning_results -- \ + --tuning_dir="$MODELS_DIR/iclr_ga_gridsearch.reverse-remove" +``` + +## Reproduce eval results in paper +```bash +DESC="v0" # Description for each experiment. "Version 0" is a good default. +EXPS=( pg-5M topk-5M ga-5M rand-5M pg-20M topk-20M ga-20M rand-20M ) +for exp in "${EXPS[@]}" +do + ./single_task/run_eval_tasks.py \ + --exp "$exp" --iclr_tasks --desc "$DESC" +done +``` + +## Run single experiment +```bash +EXP="topk-20M" # Learning algorithm + max-NPE +TASK="reverse" # Coding task +DESC="v0" # Description for each experiment. "Version 0" is a good default. +./single_task/run_eval_tasks.py \ + --exp "$EXP" --task "$TASK" --desc "$DESC" +``` + +## Fetch eval results into a table +```bash +# These arguments should match the settings you used to run the experiments. +MODELS_DIR="/tmp/models" +MAX_NPE="20M" +DESC="v0" # Same description used in the experiments. +# MODEL_TYPES specifies each model type and the description used in their +# experiments. +MODEL_TYPES="[('pg', '$DESC'), ('topk', '$DESC'), + ('ga', '$DESC'), ('rand', '$DESC')]" +TASKS="" # Empty string will default to all ICLR tasks. +# To specify custom task list, give task names separated by spaces. Example: +# TASKS="reverse remove-char" +bazel run single_task:aggregate_experiment_results -- \ + --models_dir="$MODELS_DIR" \ + --max_npe="$MAX_NPE" \ + --task_list="$TASKS" \ + --model_types="$MODEL_TYPES" \ + --csv_file="/tmp/results_table.csv" +``` + +## Reproduce shortest code examples in paper +```bash +# Maximum NPE is higher here. We only do 1 repetition, and the algorithm needs +# time to simplify its solution. +MODELS_DIR="/tmp/models" +NPE="500M" +DESC="short-code" +./single_task/run_eval_tasks.py \ + --exp "simpl-$NPE" --desc "$DESC" --iclr_tasks --reps 1 + +# Aggregate best code strings. Run after training completes. +TASKS="" # Empty string. Will default to all ICLR tasks. +bazel run single_task:aggregate_experiment_results -- \ + --models_dir="$MODELS_DIR" \ + --max_npe="$NPE" \ + --task_list="$TASKS" \ + --model_types="[('topk', '$DESC')]" \ + --data=code +``` diff --git a/models/research/brain_coder/single_task/aggregate_experiment_results.py b/models/research/brain_coder/single_task/aggregate_experiment_results.py new file mode 100644 index 0000000000000000000000000000000000000000..f106253004b3bbe1ff32443c41b8999b1c9e96f6 --- /dev/null +++ b/models/research/brain_coder/single_task/aggregate_experiment_results.py @@ -0,0 +1,380 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +r"""This script crawls experiment directories for results and aggregates them. + +Usage example: + +MODELS_DIR="/tmp/models" +bazel run single_task:aggregate_experiment_results -- \ + --models_dir="$MODELS_DIR" \ + --max_npe="20M" \ + --task_list="add echo" \ + --model_types="[('topk', 'v0'), ('ga', 'v0')]" \ + --csv_file=/tmp/results_table.csv +""" + +import ast +from collections import namedtuple +import csv +import os +import re +import StringIO +import sys + +from absl import app +from absl import flags +import numpy as np +import tensorflow as tf + +from single_task import misc # brain coder +from single_task import results_lib # brain coder + +DEFAULT_MODELS = [('pg', 'v0'), ('topk', 'v0'), ('ga', 'v0'), ('rand', 'v0')] +DEFAULT_TASKS = [ + 'reverse', 'remove-char', 'count-char', 'add', 'bool-logic', 'print-hello', + 'echo-twice', 'echo-thrice', 'copy-reverse', 'zero-cascade', 'cascade', + 'shift-left', 'shift-right', 'riffle', 'unriffle', 'middle-char', + 'remove-last', 'remove-last-two', 'echo-alternating', 'echo-half', 'length', + 'echo-second-seq', 'echo-nth-seq', 'substring', 'divide-2', 'dedup'] + +FLAGS = flags.FLAGS +flags.DEFINE_string( + 'models_dir', '', + 'Absolute path where results folders are found.') +flags.DEFINE_string( + 'exp_prefix', 'bf_rl_iclr', + 'Prefix for all experiment folders.') +flags.DEFINE_string( + 'max_npe', '5M', + 'String representation of max NPE of the experiments.') +flags.DEFINE_spaceseplist( + 'task_list', DEFAULT_TASKS, + 'List of task names separated by spaces. If empty string, defaults to ' + '`DEFAULT_TASKS`. These are the rows of the results table.') +flags.DEFINE_string( + 'model_types', str(DEFAULT_MODELS), + 'String representation of a python list of 2-tuples, each a model_type + ' + 'job description pair. Descriptions allow you to choose among different ' + 'runs of the same experiment. These are the columns of the results table.') +flags.DEFINE_string( + 'csv_file', '/tmp/results_table.csv', + 'Where to write results table. Format is CSV.') +flags.DEFINE_enum( + 'data', 'success_rates', ['success_rates', 'code'], + 'What type of data to aggregate.') + + +def make_csv_string(table): + """Convert 2D list to CSV string.""" + s = StringIO.StringIO() + writer = csv.writer(s) + writer.writerows(table) + value = s.getvalue() + s.close() + return value + + +def process_results(metrics): + """Extract useful information from given metrics. + + Args: + metrics: List of results dicts. These should have been written to disk by + training jobs. + + Returns: + Dict mapping stats names to values. + + Raises: + ValueError: If max_npe or max_global_repetitions values are inconsistant + across dicts in the `metrics` list. + """ + count = len(metrics) + success_count = 0 + total_npe = 0 # Counting NPE across all runs. + success_npe = 0 # Counting NPE in successful runs only. + max_npe = 0 + max_repetitions = 0 + for metric_dict in metrics: + if not max_npe: + max_npe = metric_dict['max_npe'] + elif max_npe != metric_dict['max_npe']: + raise ValueError( + 'Invalid experiment. Different reps have different max-NPE settings.') + if not max_repetitions: + max_repetitions = metric_dict['max_global_repetitions'] + elif max_repetitions != metric_dict['max_global_repetitions']: + raise ValueError( + 'Invalid experiment. Different reps have different num-repetition ' + 'settings.') + if metric_dict['found_solution']: + success_count += 1 + success_npe += metric_dict['npe'] + total_npe += metric_dict['npe'] + stats = {} + stats['max_npe'] = max_npe + stats['max_repetitions'] = max_repetitions + stats['repetitions'] = count + stats['successes'] = success_count # successful reps + stats['failures'] = count - success_count # failed reps + stats['success_npe'] = success_npe + stats['total_npe'] = total_npe + if success_count: + # Only successful runs counted. + stats['avg_success_npe'] = stats['success_npe'] / float(success_count) + else: + stats['avg_success_npe'] = 0.0 + if count: + stats['success_rate'] = success_count / float(count) + stats['avg_total_npe'] = stats['total_npe'] / float(count) + else: + stats['success_rate'] = 0.0 + stats['avg_total_npe'] = 0.0 + + return stats + + +ProcessedResults = namedtuple('ProcessedResults', ['metrics', 'processed']) + + +def get_results_for_experiment( + models_dir, task_name, model_type='pg', max_npe='5M', desc='v0', + name_prefix='bf_rl_paper', extra_desc=''): + """Get and process results for a given experiment. + + An experiment is a set of runs with the same hyperparameters and environment. + It is uniquely specified by a (task_name, model_type, max_npe) triple, as + well as an optional description. + + We assume that each experiment has a folder with the same name as the job that + ran the experiment. The name is computed by + "%name_prefix%.%desc%-%max_npe%_%task_name%". + + Args: + models_dir: Parent directory containing experiment folders. + task_name: String name of task (the coding env). See code_tasks.py or + run_eval_tasks.py + model_type: Name of the algorithm, such as 'pg', 'topk', 'ga', 'rand'. + max_npe: String SI unit representation of the maximum NPE threshold for the + experiment. For example, "5M" means 5 million. + desc: Description. + name_prefix: Prefix of job names. Normally leave this as default. + extra_desc: Optional extra description at the end of the job name. + + Returns: + ProcessedResults namedtuple instance, containing + metrics: Raw dicts read from disk. + processed: Stats computed by `process_results`. + + Raises: + ValueError: If max_npe in the metrics does not match NPE in the experiment + folder name. + """ + folder = name_prefix + '.{0}.{1}-{2}_{3}'.format(desc, model_type, max_npe, + task_name) + if extra_desc: + folder += '.' + extra_desc + + results = results_lib.Results(os.path.join(models_dir, folder)) + metrics, _ = results.read_all() + processed = process_results(metrics) + if (not np.isclose(processed['max_npe'], misc.si_to_int(max_npe)) + and processed['repetitions']): + raise ValueError( + 'Invalid experiment. Max-NPE setting does not match expected max-NPE ' + 'in experiment name.') + return ProcessedResults(metrics=metrics, processed=processed) + + +BestCodeResults = namedtuple( + 'BestCodeResults', + ['code', 'reward', 'npe', 'folder', 'finished', 'error']) + + +class BestCodeResultError(object): + success = 0 + no_solution_found = 1 + experiment_does_not_exist = 2 + + +def get_best_code_for_experiment( + models_dir, task_name, model_type='pg', max_npe='5M', desc=0, + name_prefix='bf_rl_paper', extra_desc=''): + """Like `get_results_for_experiment`, but fetches the code solutions.""" + folder = name_prefix + '.{0}.{1}-{2}_{3}'.format(desc, model_type, max_npe, + task_name) + if extra_desc: + folder += '.' + extra_desc + + log_dir = os.path.join(models_dir, folder, 'logs') + search_regex = r'^solutions_([0-9])+\.txt$' + try: + all_children = tf.gfile.ListDirectory(log_dir) + except tf.errors.NotFoundError: + return BestCodeResults( + code=None, reward=0.0, npe=0, folder=folder, finished=False, + error=BestCodeResultError.experiment_does_not_exist) + solution_files = [ + fname for fname in all_children if re.search(search_regex, fname)] + max_reward = 0.0 + npe = 0 + best_code = None + for fname in solution_files: + with tf.gfile.FastGFile(os.path.join(log_dir, fname), 'r') as reader: + results = [ast.literal_eval(entry) for entry in reader] + for res in results: + if res['reward'] > max_reward: + best_code = res['code'] + max_reward = res['reward'] + npe = res['npe'] + error = ( + BestCodeResultError.success if best_code + else BestCodeResultError.no_solution_found) + try: + # If there is a status.txt file, check if it contains the status of the job. + with tf.gfile.FastGFile(os.path.join(log_dir, 'status.txt'), 'r') as f: + # Job is done, so mark this experiment as finished. + finished = f.read().lower().strip() == 'done' + except tf.errors.NotFoundError: + # No status file has been written, so the experiment is not done. No need to + # report an error here, because we do not require that experiment jobs write + # out a status.txt file until they have finished. + finished = False + return BestCodeResults( + code=best_code, reward=max_reward, npe=npe, folder=folder, + finished=finished, error=error) + + +def make_results_table( + models=None, + tasks=None, + max_npe='5M', + name_prefix='bf_rl_paper', + extra_desc='', + models_dir='/tmp'): + """Creates a table of results: algorithm + version by tasks. + + Args: + models: The table columns. A list of (algorithm, desc) tuples. + tasks: The table rows. List of task names. + max_npe: String SI unit representation of the maximum NPE threshold for the + experiment. For example, "5M" means 5 million. All entries in the table + share the same max-NPE. + name_prefix: Name prefix used in logging directory for the experiment. + extra_desc: Extra description added to name of logging directory for the + experiment. + models_dir: Parent directory containing all experiment folders. + + Returns: + A 2D list holding the table cells. + """ + if models is None: + models = DEFAULT_MODELS + if tasks is None: + tasks = DEFAULT_TASKS + model_results = {} + for model_type, desc in models: + model_results[model_type] = { + tname: get_results_for_experiment( + models_dir, tname, model_type, max_npe, desc, + name_prefix=name_prefix, extra_desc=extra_desc + ).processed + for tname in tasks} + + def info(stats): + return [str(stats['repetitions']), + '%.2f' % stats['success_rate'], + str(int(stats['avg_total_npe']))] + + rows = [['max NPE: ' + max_npe] + + misc.flatten([['{0} ({1})'.format(m, d), '', ''] + for m, d in models])] + rows.append( + [''] + misc.flatten([['reps', 'success rate', 'avg NPE'] + for _ in models])) + for tname in tasks: + rows.append( + [tname] + + misc.flatten([info(model_results[model][tname]) + for model, _ in models])) + + return rows + + +def print_results_table(results_table): + """Print human readable results table to stdout.""" + print('') + print('=== Results Table ===') + print('Format: # reps [success rate, avg total NPE]') + + def info_str(info_row): + # num_runs (success_rate, avg_total_npe) + if not info_row[0]: + return '0' + return '%s [%s, %s]' % (str(info_row[0]).ljust(2), info_row[1], info_row[2]) + + nc = len(results_table[0]) # num cols + out_table = [ + [results_table[0][0]] + [results_table[0][i] for i in range(1, nc, 3)]] + for row in results_table[2:]: + out_table.append([row[0]] + [info_str(row[i:i+3]) for i in range(1, nc, 3)]) + + nc = len(out_table[0]) # num cols + col_widths = [max(len(row[col]) for row in out_table) for col in range(nc)] + + table_string = '' + for row in out_table: + table_string += ''.join( + [row[c].ljust(col_widths[c] + 2) for c in range(nc)]) + '\n' + + print(table_string) + + +def main(argv): + del argv # Unused. + + name_prefix = FLAGS.exp_prefix + print('Experiments prefix: %s' % name_prefix) + + model_types = ast.literal_eval(FLAGS.model_types) + + if FLAGS.data == 'success_rates': + results_table = make_results_table( + models=model_types, tasks=FLAGS.task_list, max_npe=FLAGS.max_npe, + models_dir=FLAGS.models_dir, + name_prefix=name_prefix, extra_desc='') + with tf.gfile.FastGFile(FLAGS.csv_file, 'w') as f: + f.write(make_csv_string(results_table)) + + print_results_table(results_table) + else: + # Best code + print('* = experiment is still running') + print('') + print('=== Best Synthesized Code ===') + for model_type, desc in model_types: + print('%s (%s)' % (model_type, desc)) + sys.stdout.flush() + for tname in FLAGS.task_list: + res = get_best_code_for_experiment( + FLAGS.models_dir, tname, model_type, FLAGS.max_npe, desc, + name_prefix=name_prefix, extra_desc='') + unfinished_mark = '' if res.finished else ' *' + tname += unfinished_mark + if res.error == BestCodeResultError.success: + print(' %s' % tname) + print(' %s' % res.code) + print(' R=%.6f, NPE=%s' % (res.reward, misc.int_to_si(res.npe))) + elif res.error == BestCodeResultError.experiment_does_not_exist: + print(' Experiment does not exist. Check arguments.') + print(' Experiment folder: %s' % res.folder) + break + else: + print(' %s' % tname) + print(' (none)') + sys.stdout.flush() + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/brain_coder/single_task/aggregate_tuning_results.py b/models/research/brain_coder/single_task/aggregate_tuning_results.py new file mode 100644 index 0000000000000000000000000000000000000000..bb2e008ce583afbea8acabfe1ed8ccf264698f5e --- /dev/null +++ b/models/research/brain_coder/single_task/aggregate_tuning_results.py @@ -0,0 +1,71 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +r"""After running tuning, use this script to aggregate the results. + +Usage: + +OUT_DIR="" +bazel run -c opt single_task:aggregate_tuning_results -- \ + --alsologtostderr \ + --tuning_dir="$OUT_DIR" +""" + +import ast +import os + +from absl import app +from absl import flags +import tensorflow as tf + + +FLAGS = flags.FLAGS +flags.DEFINE_string( + 'tuning_dir', '', + 'Absolute path where results tuning trial folders are found.') + + +def main(argv): + del argv # Unused. + + try: + trial_dirs = tf.gfile.ListDirectory(FLAGS.tuning_dir) + except tf.errors.NotFoundError: + print('Tuning directory %s does not exist.' % (FLAGS.tuning_dir,)) + return + + metrics = [] + for trial_dir in trial_dirs: + tuning_results_file = os.path.join( + FLAGS.tuning_dir, trial_dir, 'tuning_results.txt') + if tf.gfile.Exists(tuning_results_file): + with tf.gfile.FastGFile(tuning_results_file, 'r') as reader: + for line in reader: + metrics.append(ast.literal_eval(line.replace(': nan,', ': 0.0,'))) + + if not metrics: + print('No trials found.') + return + + num_trials = [m['num_trials'] for m in metrics] + assert all(n == num_trials[0] for n in num_trials) + num_trials = num_trials[0] + print('Found %d completed trials out of %d' % (len(metrics), num_trials)) + + # Sort by objective descending. + sorted_trials = sorted(metrics, key=lambda m: -m['objective']) + + for i, metrics in enumerate(sorted_trials): + hparams = metrics['hparams'] + keys = sorted(hparams.keys()) + print( + str(i).ljust(4) + ': ' + + '{0:.2f}'.format(metrics['objective']).ljust(10) + + '[' + + ','.join(['{}={}'.format(k, hparams[k]).ljust(24) for k in keys]) + + ']') + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/brain_coder/single_task/code_tasks.py b/models/research/brain_coder/single_task/code_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..27cc7ecd1c76f2d765692ce0a94acd1df04ff681 --- /dev/null +++ b/models/research/brain_coder/single_task/code_tasks.py @@ -0,0 +1,1381 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tasks for RL.""" + +import abc +import copy +import itertools +import random + +from absl import logging +import numpy as np +from six.moves import xrange + +from common import bf # brain coder +from common import reward as r # brain coder +from single_task import misc # brain coder +from single_task import test_tasks # brain coder + + +MAX_EXECUTION_STEPS = 5000 + + +def make_task(task_name, override_kwargs=None, max_code_length=100, + require_correct_syntax=False, + do_code_simplification=False, + correct_bonus=2.0, code_length_bonus=1.0): + """Make tasks with setting from paper.""" + logging.info('Making paper-config task.') + n = 16 # Number of test cases. + task_mapping = { + 'print-hello': ( + PrintTask, dict(base=27, fixed_string=[8, 5, 12, 12, 15])), + 'print': (PrintIntTask, dict(base=256, fixed_string=[1, 2, 3, 4, 5])), + 'echo': (EchoTask, dict(base=27, min_length=1, max_length=6)), + 'remove-char': ( + RemoveCharTask, dict(base=256, n=n, min_len=1, max_len=6)), + 'reverse': ( + ReverseTask, dict(base=256, n=n, min_len=1, max_len=6)), + 'reverse-tune': ( + ReverseTaskV2, dict(base=256, reward_type='static-bylen')), + 'remove-char-tune': (RemoveCharTaskV2, dict(base=27)), + 'prefix': (CommonPrefixTask, dict(base=27)), + 'find': (FindSubStrTask, dict(base=27)), + 'sort3': (SortFixedTaskV2, dict(base=27, n=150, length=3)), + 'count-char': (CountCharTaskV2, dict(n=n, max_len=6)), + 'bool-logic': (BooleanLogicTask, dict()), + 'add': (AddTask, dict(n=9)), + 'echo-twice': (EchoTwiceTask, dict(n=n)), + 'echo-thrice': (EchoThriceTask, dict(n=n)), + 'copy-reverse': (CopyReverseTask, dict(n=n)), + 'zero-cascade': (EchoZeroCascadeTask, dict(n=n)), + 'cascade': (EchoCascadeTask, dict(n=n)), + 'shift-left': (ShiftLeftTask, dict(n=n)), + 'shift-right': (ShiftRightTask, dict(n=n)), + 'riffle': (RiffleTask, dict(n=n)), + 'unriffle': (UnriffleTask, dict(n=n)), + 'middle-char': (MiddleCharTask, dict(n=n)), + 'remove-last': (RemoveLastTask, dict(n=n)), + 'remove-last-two': (RemoveLastTwoTask, dict(n=n)), + 'echo-alternating': (EchoAlternatingTask, dict(n=n)), + 'echo-half': (EchoHalfTask, dict(n=n)), + 'length': (LengthTask, dict(n=n)), + 'echo-second-seq': (EchoSecondSequenceTask, dict(n=n)), + 'echo-nth-seq': (EchoNthSequenceTask, dict(n=n)), + 'substring': (SubstringTask, dict(n=n)), + 'divide-2': (Divide2Task, dict(n=n)), + 'dedup': (DedupTask, dict(n=n)), + 'remove-target-char': (RemoveTargetCharTask, dict(n=n)), + 'list-index': (ListIndexTask, dict(n=n)), + 'fib': (FibonacciTask, dict()), + 'count-down': (BottlesOfBeerTask, dict()), + 'split': (SplitTask, dict()), + 'trim-left': (TrimLeftTask, dict()), + 'circle-route': ( + JudgeRouteCircleTask, dict(n=100, max_len=32)), + 'multiply': (MultiplyTask, dict(n=100)), + 'divmod': (DivModTask, dict(n=100)), + } + + if task_name not in task_mapping: + # Test tasks. + if task_name == 'test-hill-climb': + return test_tasks.BasicTaskManager(test_tasks.HillClimbingTask()) + raise ValueError('Unknown task type "%s"' % task_name) + task_cls, kwargs = task_mapping[task_name] + + if override_kwargs: + if not isinstance(override_kwargs, dict): + raise ValueError( + 'override_kwargs must be a dict, got: %s', override_kwargs) + kwargs.update(override_kwargs) + + task = task_cls(**kwargs) + + reward_fn = r.absolute_distance_reward + # reward_fn = r.absolute_mod_distance_reward + # reward_fn = r.absolute_log_distance_reward + logging.info('Using reward function: %s', reward_fn.__name__) + + # We want reward with and without code simplification to be scaled the same + # way. Without code simplification, give the maximum code length bonus + # every time. + min_code_length = 0.0 if do_code_simplification else max_code_length + + return MultiIOTaskManager( + task=task, correct_bonus=correct_bonus, + code_length_bonus=code_length_bonus, + max_code_length=max_code_length, min_code_length=min_code_length, + reward_fn=reward_fn, require_correct_syntax=require_correct_syntax) + + +def concat(lists): + if not lists: + return [] + l = lists[0] + for k in lists[1:]: + l += k + return l + + +def concat_join(lists, sep): + if not lists: + return [] + l = lists[0] + for k in lists[1:]: + l += [sep] + k + return l + + +def clipped_linear(x, x0, y0, slope, y_range): + min_y, max_y = y_range + return min(max(slope * (x - x0) + y0, min_y), max_y) + + +class MultiIOTaskManager(object): + """Supports tasks which test the code with multiple I/O examples.""" + + def __init__(self, task, max_code_length=32, min_code_length=0, + max_execution_steps=MAX_EXECUTION_STEPS, correct_bonus=1.0, + code_length_bonus=1.0, failure_reward=-2.0, reward_fn=None, + require_correct_syntax=False): + assert isinstance(task, BaseTask) + self.task = task + self.max_code_length = max_code_length + self.min_code_length = min_code_length + self.max_execution_steps = max_execution_steps + self.require_correct_syntax = require_correct_syntax + self.correct_bonus = correct_bonus + self.code_length_bonus = code_length_bonus + self.failure_reward = failure_reward + self.time_penalty = ( + 1.0 / (max_code_length - min_code_length) + if max_code_length > min_code_length else 0.0) + if reward_fn is None: + self.reward_fn = r.absolute_distance_reward + else: + self.reward_fn = reward_fn + self.input_type = ( + task.input_type if hasattr(task, 'input_type') else misc.IOType.integer) + self.output_type = ( + task.output_type if hasattr(task, 'output_type') + else misc.IOType.integer) + self._compute_best_reward() + + def _compute_best_reward(self): + io_seqs = self.task.make_io_set() + reward = 0.0 + for _, output_seq in io_seqs: + reward += self.reward_fn(output_seq, output_seq, self.task.base) + reward += self.correct_bonus + reward += self.code_length_bonus # Bonus for shortest code. + self.best_reward = reward + self.good_reward = 0.75 * reward + logging.info('Known best reward: %.4f', self.best_reward) + + def _score_batch(self, code_strings): + return [self._score_code(code) for code in code_strings] + + def _score_code(self, code): + """Run test cases on code and compute reward. + + Args: + code: A single BF code string. + + Returns: + misc.RewardInfo namedtuple instance containing reward and code execution + information, including inputs, expected outputs, code outputs, input + and output types, and reason for the reward obtained. + """ + # Get list of 2-tuples, each containing an input sequence and an output + # sequence. + io_seqs = self.task.make_io_set() + terminal_reward = 0.0 + results = [] + reason = 'correct' + for input_seq, output_seq in io_seqs: + eval_result = bf.evaluate( + code, input_buffer=input_seq, timeout=0.1, + max_steps=self.max_execution_steps, + base=self.task.base, + require_correct_syntax=self.require_correct_syntax) + result, success = eval_result.output, eval_result.success + if not success: + # Code execution timed out. + terminal_reward = self.failure_reward + results = [] + reason = eval_result.failure_reason + break + else: + terminal_reward += self.reward_fn(result, output_seq, self.task.base) + if result == output_seq: + terminal_reward += self.correct_bonus # Bonus for correct answer. + + # Only add additional reward for shorter code. Subtracting reward + # interferes with the main objective. Only optimize for length once + # any solution is found. + if self.min_code_length == self.max_code_length: + terminal_reward += self.code_length_bonus + else: + terminal_reward += self.code_length_bonus * clipped_linear( + x=len(code), x0=self.min_code_length, y0=1.0, + slope=-self.time_penalty, y_range=(0.0, 1.0)) + + # reason remains 'correct' if it is already + elif reason == 'correct': + reason = 'wrong' + results.append(result) + + # Return list of rewards, one for each char in the code. All are 0 except + # for the terminal reward. + terminal_reward /= self.best_reward + return misc.RewardInfo( + episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward], + input_case=misc.IOTuple(i for i, o in io_seqs), + correct_output=misc.IOTuple(o for i, o in io_seqs), + code_output=misc.IOTuple(results), + input_type=self.input_type, + output_type=self.output_type, + reason=reason) + + def rl_batch(self, batch_size): + """Produces list of reward functions. One for each program in the batch.""" + return [self._score_code] * batch_size + + +def conditional_overwrite(current_value, new_value, allowed_overwrite_values): + if current_value in allowed_overwrite_values: + return new_value + return current_value + + +class BaseTask(object): + """A coding task. + + All coding tasks should inherit this class. + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, base=256): + self.base = base # All tasks must set the integer base that the expect. + + @abc.abstractmethod + def make_io_set(self): + """Generate a set of test cases for the task. + + Returns: + List of tuples, where each tuple is (input_case, output_case). + input_case and output_case are lists of integers. + """ + pass + + +# ============================================================================== +# ICLR tasks. +# ============================================================================== + + +class PrintTask(BaseTask): + """Print string coding task. + + Code needs to output a fixed string (given as a hyperparameter to the + task constructor). Program input is ignored. + """ + + def __init__(self, base, fixed_string=None): + super(type(self), self).__init__() + self.base = base # base includes EOS + self.eos = 0 + if fixed_string: + self.fixed_string = fixed_string + else: + self.fixed_string = [1, 2, 3, 0] # ABC + self.min_length = self.max_length = len(self.fixed_string) + + def make_io_set(self): + return [(list(), list(self.fixed_string))] + + +class RemoveCharTaskV2(BaseTask): + """Remove character coding task (version 2). + + Code needs to pipe input to output, but with all the 'A' (value 1) chars + removed. 'A' appears exactly once in each input. + + Test cases are hard-coded. + """ + + def __init__(self, base): + super(type(self), self).__init__() + self.base = base + self.eos = 0 + self.remove_char = 1 + assert base >= 27 + + def make_io_set(self): + rm = self.remove_char + return [ + ([rm, 0], [0]), + ([20, rm, 0], [20, 0]), + ([rm, 13, 0], [13, 0]), + ([6, rm, 17, 0], [6, 17, 0]), + ([rm, 11, 24, 0], [11, 24, 0]), + ([2, 16, 21, rm, 0], [2, 16, 21, 0]), + ([18, rm, 12, 26, 7, 0], [18, 12, 26, 7, 0]), + ([9, 10, 22, rm, 4, 0], [9, 10, 22, 4, 0])] + + +class RemoveCharTask(BaseTask): + """Remove character coding task. + + Code needs to pipe input to output, but with all the 'A' (value 1) chars + removed. 'A' appears at least once in each input. + + Test cases are dynamically generated, allowing for the number of test cases + to be a hyperparameter. + """ + + def __init__(self, base, n, min_len, max_len): + super(type(self), self).__init__() + self.base = base + self.eos = 0 + self.remove_char = 1 + assert base >= 27 + self._io_pairs = self._make_io_examples(n, min_len, max_len) + + def _make_io_examples(self, n, min_len, max_len): + """Generate test cases for the task.""" + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + for _ in xrange(n): + length = rand.randrange(min_len, max_len + 1) + rm_char_pos = rand.randrange(0, length) + input_seq = [rand.randrange(1, self.base) for _ in xrange(length)] + input_seq[rm_char_pos] = self.remove_char + output_seq = list(input_seq) + del output_seq[rm_char_pos] + output_seq.append(0) + io_examples.append((input_seq, output_seq)) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class ReverseTaskV2(BaseTask): + """Reverse string coding task (version 2). + + Code needs to pipe input to output, but in reverse order. + + Stochastic test case = new test case randomly generated for every run of + `make_io_set`, i.e. different test cases every time code is scored. + + Task supports different types of test cases: + rand-one: Code is scored on one stochastic test case. + rand-many: Code is scored on 5 stochastic test cases. + static-bylen: Code is scored on 5 static test cases. There is one test + case for string lengths 1 through 5. + rand-bylen: Code is scored on 5 stochastic test cases, where there is one + test case for string lengths 1 through 5. + """ + + def __init__(self, base, reward_type): + super(type(self), self).__init__() + self.base = base # base includes EOS + assert base >= 27 + self.eos = 0 + self.io_pair_fn = { + # One random example at a time. + 'rand-one': lambda: self._io_rand(1), + # K randomy examples at a time (any lengths). + 'rand-many': lambda: self._io_rand(5), + # Static examples, one for each length. + 'static-bylen': self._io_static_by_len, + # Random examples, one for each length. + 'rand-bylen': self._io_rand_by_len}[reward_type] + + def _make_io_examples(self, sequences): + outputs = [list(i) for i in sequences] + for o in outputs: + o.reverse() + o.append(0) + inputs = [i + [0] for i in sequences] + return zip(inputs, outputs) + + def _io_rand(self, k): + inputs = [(np.random.choice(26, random.randrange(1, 6)) + 1).tolist() + for _ in xrange(k)] + return self._make_io_examples(inputs) + + def _io_rand_by_len(self, k=5): + inputs = [(np.random.choice(26, length) + 1).tolist() + for length in xrange(1, k + 1)] + return self._make_io_examples(inputs) + + def _io_static_by_len(self): + return [ + ([7, 0], [7, 0]), + ([6, 2, 0], [2, 6, 0]), + ([5, 1, 10, 0], [10, 1, 5, 0]), + ([8, 6, 5, 15, 0], [15, 5, 6, 8, 0]), + ([10, 12, 5, 2, 7, 0], [7, 2, 5, 12, 10, 0])] + + def make_io_set(self): + return self.io_pair_fn() + + +class ReverseTask(BaseTask): + """Reverse string coding task. + + Code needs to pipe input to output, but in reverse order. + + Test cases are dynamically generated, allowing for the number of test cases + to be a hyperparameter. + """ + + def __init__(self, base, n, min_len, max_len): + super(type(self), self).__init__() + self.base = base # base includes EOS + assert base >= 27 + self.eos = 0 + self._io_pairs = self._make_io_examples(n, min_len, max_len) + + def _make_io_examples(self, n, min_len, max_len): + """Generate test cases for the task.""" + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + for _ in xrange(n): + length = rand.randrange(min_len, max_len + 1) + input_seq = [rand.randrange(1, self.base) for _ in xrange(length)] + output_seq = list(input_seq) + output_seq.reverse() + output_seq.append(0) + io_examples.append((input_seq, output_seq)) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class CommonPrefixTask(BaseTask): + """Common prefix coding task. + + Code needs to output the common prefix between two input lists. Input lists + are variable length, where each list ends with a 0. A common prefix is a + sequence which both lists start with. + """ + + def __init__(self, base): + super(type(self), self).__init__() + assert base >= 27 + self.base = base + self.eos = 0 + + def make_io_set(self): + return [ + ([12, 24, 18, 0, 12, 5, 0], [12, 0]), + ([1, 2, 3, 0, 1, 2, 17, 14, 0], [1, 2, 0]), + ([15, 2, 1, 9, 2, 0, 15, 2, 1, 25, 8, 14, 0], [15, 2, 1, 0]), + ([14, 9, 7, 8, 6, 16, 0, 14, 9, 7, 8, 8, 6, 8, 26, 0], + [14, 9, 7, 8, 0]), + ([12, 4, 16, 22, 1, 17, 0, 12, 4, 16, 22, 1, 8, 10, 0], + [12, 4, 16, 22, 1, 0])] + + +class CountCharTask(BaseTask): + + def __init__(self): + super(type(self), self).__init__() + self.base = 27 + self.eos = 0 + self.char = 1 + self.input_type = misc.IOType.string + self.output_type = misc.IOType.integer + + def make_io_set(self): + return [ + ([10, 0], [0]), + ([1, 0], [1]), + ([1, 1, 0], [2]), + ([11, 1, 0], [1]), + ([1, 24, 0], [1]), + ([13, 6, 0], [0]), + ([9, 2, 7, 0], [0]), + ([1, 24, 11, 0], [1]), + ([19, 1, 1, 0], [2]), + ([1, 6, 1, 0], [2]), + ([22, 16, 17, 9, 0], [0]), + ([1, 1, 1, 19, 0], [3]), + ([1, 1, 1, 1, 0], [4]), + ([9, 4, 19, 11, 5, 0], [0]), + ([24, 11, 26, 1, 15, 0], [1]), + ([1, 1, 20, 1, 1, 0], [4]), + ([1, 1, 1, 1, 1, 0], [5])] + + +class CountCharTaskV2(BaseTask): + """Count char coding task (version 2). + + Code must output the number of occurances of character 'A' (value 1) in an + input string. + + Test cases are dynamically generated, allowing for the number of test cases + to be a hyperparameter. + """ + + def __init__(self, n, max_len): + super(type(self), self).__init__() + self.base = 27 + self.eos = 0 + self.char = 1 + self.other_chars = [c for c in xrange(self.base) + if c not in (self.eos, self.char)] + self.input_type = misc.IOType.string + self.output_type = misc.IOType.integer + self._io_pairs = self._make_io_examples(n, max_len) + + def _make_io_examples(self, n, max_len): + """Generate test cases for the task.""" + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + io_examples.append(([10, 0], [0])) + io_examples.append(([1, 0], [1])) + io_examples.append(([1, 1, 0], [2])) + io_examples.append(([9, 4, 19, 11, 5, 0], [0])) + io_examples.append(([24, 11, 26, 1, 15, 0], [1])) + for _ in xrange(n - 5): + length = rand.randrange(2, max_len + 1) + num_chars = rand.randrange(0, max_len + 1) + input_seq = [self.char] * num_chars + [0] * (length - num_chars) + rand.shuffle(input_seq) + for i in xrange(len(input_seq)): + if not input_seq[i]: + input_seq[i] = self.other_chars[rand.randrange(len(self.other_chars))] + output_seq = [num_chars] + io_examples.append((input_seq, output_seq)) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class AddTask(BaseTask): + """Addition coding task. + + Code needs to read in two integers and output their sum mod the BF base, + followed by a terminating 0. + """ + + def __init__(self, n=16): + super(type(self), self).__init__() + self.base = 256 + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + self._io_pairs = self._make_io_examples(n) + + def _make_io_examples(self, n): + """Generate test cases for the task.""" + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [ + ([4, 0], [4, 0]), + ([0, 5], [5, 0]), + ([1, 2], [3, 0]), + ([67, 21], [88, 0]), + ([55, 56], [111, 0]), + ([128, 33], [161, 0]), + ([221, 251], [216, 0]), + ([130, 127], [1, 0]), + ([255, 1], [0, 0])] + extra_examples = max(n - len(io_examples), 0) + for _ in xrange(extra_examples): + a = rand.randrange(256) + b = rand.randrange(256) + input_seq = [a, b] + output_seq = [(a + b) % 256, 0] + io_examples.append((input_seq, output_seq)) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class BooleanLogicTask(BaseTask): + """Boolean logic (truth table) coding task. + + Code needs to memorize a boolean truth table. Specifically, it must encode a + mapping from triple of bools to a single bool. + """ + + def __init__(self): + super(type(self), self).__init__() + self.base = 2 + self.input_type = misc.IOType.boolean + self.output_type = misc.IOType.boolean + # X(~Z) + (~Y)(~Z) + (~X)YZ + self._truth_fn = ( + lambda x, y, z: # pylint: disable=g-long-lambda + (x and not z) or (not y and not z) or (not x and y and z)) + self._test_cases = [ + ([x, y, z], [int(self._truth_fn(x, y, z))]) + for x, y, z in itertools.product(range(2), range(2), range(2))] + + def make_io_set(self): + return copy.deepcopy(self._test_cases) + + +# ------------------------------------------------------------------------------ +# The following tasks are generated from known BF solutions. This guarantees +# that each task can be solved within the maximum code length, and maximum +# execution steps. +# ------------------------------------------------------------------------------ + + +def default_input_fn_factory(min_length=1, max_length=6, base=256): + def _input_gen(rand): + l = rand.randrange(min_length, max_length + 1) + return [rand.randrange(base) for _ in xrange(l)] + return _input_gen + + +class KnownCodeBaseTask(BaseTask): + """These tasks generate their test cases from a known BF solution. + + This ensures that each task has a solution which is under the max character + length, and that it solves the test cases under the max number of execution + steps. + """ + + def __init__(self, code_solution, make_input_fn, n=100, base=256, + max_steps=5000, seed=6849275409234): + super(KnownCodeBaseTask, self).__init__() + # Make sure known solution is less than the code length used in experiments. + assert len(code_solution) < 100 + self.code_solution = code_solution + self.make_input_fn = make_input_fn + self.n = n + self.base = base + self.max_steps = max_steps + self.seed = seed + self._test_cases = list(self._test_case_generator(code_solution)) + + def _test_case_generator(self, code_solution): + rand = random.Random(self.seed) + for _ in xrange(self.n): + input_case = self.make_input_fn(rand) + result = bf.evaluate( + code_solution, input_buffer=input_case, max_steps=self.max_steps, + base=self.base, require_correct_syntax=False) + if not result.success: + raise RuntimeError( + 'Program must succeed. Failed on input: %s' % input_case) + yield input_case, result.output + + def make_io_set(self): + return copy.deepcopy(self._test_cases) + + +class EchoTwiceTask(KnownCodeBaseTask): + """Echo twice.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,.[>,.]<[<]>[.>].', + default_input_fn_factory(), + **kwargs) + + +class EchoThriceTask(KnownCodeBaseTask): + """Echo three times.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,.[>,.]<[<]>[.>].<[<]>[.>].', + default_input_fn_factory(), + **kwargs) + + +class CopyReverseTask(KnownCodeBaseTask): + """Echo forwards, backwards, and then forwards again.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,.[>,.]<[.<].>[.>].', + default_input_fn_factory(), + **kwargs) + + +class EchoZeroCascadeTask(KnownCodeBaseTask): + """Print k-th char with k zeros inbetween (1-indexed).""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + ',[.>[->+>.<<]>+[-<+>]<<,]', + default_input_fn_factory(), + **kwargs) + + +class EchoCascadeTask(KnownCodeBaseTask): + """Print k-th char k times (1-indexed).""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + ',>>+<<[>>[-<+>]<[->+<<.>]>+<<,].', + default_input_fn_factory(base=20), + **kwargs) + + +class ShiftLeftTask(KnownCodeBaseTask): + """Circulate shift input left.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + ',>,[.,]<.,.', + default_input_fn_factory(), + **kwargs) + + +class ShiftRightTask(KnownCodeBaseTask): + """Circular shift input right.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,[>,]<.[-]<[<]>[.>].', + default_input_fn_factory(), + **kwargs) + + +class RiffleTask(KnownCodeBaseTask): + """Shuffle like a deck of cards. + + For input of length N, output values in the following index order: + N-1, 0, N-2, 1, N-3, 2, ... + """ + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,[>,]<[.[-]<[<]>.[-]>[>]<]', + default_input_fn_factory(base=20, max_length=8), + **kwargs) + + +class UnriffleTask(KnownCodeBaseTask): + """Inverse of riffle.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,[>,[.[-]],]<[.<].', + default_input_fn_factory(base=20, max_length=8), + **kwargs) + + +class MiddleCharTask(KnownCodeBaseTask): + """Print middle char if length is odd, or 0 if even.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,[>,]<<[[>]<[,<[<]>,>[>]][>]<<]>.', + default_input_fn_factory(max_length=10), + **kwargs) + + +class RemoveLastTask(KnownCodeBaseTask): + """Remove last character.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + ',>,[[<.[-]>[-<+>]],].', + default_input_fn_factory(base=20), + **kwargs) + + +class RemoveLastTwoTask(KnownCodeBaseTask): + """Remove last two characters.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + ',>,>,[[<<.[-]>[-<+>]>[-<+>]],].', + default_input_fn_factory(base=10), + **kwargs) + + +class EchoAlternatingTask(KnownCodeBaseTask): + # Print even numbered chars first (0-indexed), then odd numbered chars + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>,[.,>,]<<[<]>[.>].', + default_input_fn_factory(base=20, max_length=8), + **kwargs) + + +class EchoHalfTask(KnownCodeBaseTask): + """Echo only first half of the input (round down when odd lengthed).""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>>+>,[[<]>+[>],]<[<]>-[-[-<<+>]<[>]>]<<[->+<]>[[>]>.,<+[<]>-].', + default_input_fn_factory(base=20, max_length=9), + **kwargs) + + +class LengthTask(KnownCodeBaseTask): + """Print length of the input sequence.""" + + def __init__(self, **kwargs): + super(type(self), self).__init__( + '>+>,[[<]>+[>],]<[<]>-.', + default_input_fn_factory(max_length=14), + **kwargs) + + +class EchoSecondSequenceTask(KnownCodeBaseTask): + """Echo second sequence. Sequences are separated by 0.""" + + def __init__(self, **kwargs): + def echo_second_gen(rand): + l = rand.randrange(1, 6) + x = [rand.randrange(256) for _ in xrange(l)] + l = rand.randrange(1, 6) + y = [rand.randrange(256) for _ in xrange(l)] + return x + [0] + y + [0] + super(type(self), self).__init__( + ',[,],[.,].', + echo_second_gen, + **kwargs) + + +class EchoNthSequenceTask(KnownCodeBaseTask): + """Echo n-th sequence (1-indexed). Sequences are separated by 0.""" + + def __init__(self, **kwargs): + def echo_nth_gen(rand): + k = rand.randrange(1, 7) + n = rand.randrange(1, k + 1) + x = [] + for _ in xrange(k): + l = rand.randrange(0, 4) + x += [rand.randrange(256) for _ in xrange(l)] + [0] + return [n] + x + super(type(self), self).__init__( + ',-[->,[,]<],[.,].', + echo_nth_gen, + **kwargs) + + +class SubstringTask(KnownCodeBaseTask): + """Echo substring. + + First two inputs are i and l, where i is the starting index (0-indexed) + and l is the length of the substring. + """ + + def __init__(self, **kwargs): + def substring_gen(rand): + l = rand.randrange(2, 16) + i, j = sorted([rand.randrange(l), rand.randrange(l)]) + n = j - i + x = [rand.randrange(256) for _ in xrange(l)] + [0] + return [i, n] + x + super(type(self), self).__init__( + '>,<,>[->,<]>,<<[->>.,<<]', + substring_gen, + **kwargs) + + +class Divide2Task(KnownCodeBaseTask): + """Divide by 2 (integer floor division).""" + + def __init__(self, **kwargs): + def int_input_gen(rand): + return [rand.randrange(256)] + super(type(self), self).__init__( + ',[-[->>+<]>[<]<]>>.', + int_input_gen, + **kwargs) + + +class DedupTask(KnownCodeBaseTask): + """Deduplicate adjacent duplicate chars.""" + + def __init__(self, **kwargs): + def dedup_input_gen(rand): + np_random = np.random.RandomState(rand.randrange(2147483647)) + num_unique = rand.randrange(1, 5) + unique = np_random.choice(6, num_unique, replace=False) + 1 + return [v for v in unique for _ in xrange(rand.randrange(1, 5))] + [0] + super(type(self), self).__init__( + '>>,.[[-<+<+>>],[-<->]<[[-<->]<.>]<[->>+<<]>>]', + dedup_input_gen, + **kwargs) + + +# ============================================================================== +# Extra tasks. +# ============================================================================== + + +class PrintIntTask(BaseTask): + """Print integer coding task. + + Code needs to output a fixed single value (given as a hyperparameter to the + task constructor). Program input is ignored. + """ + + def __init__(self, base, fixed_string): + super(type(self), self).__init__() + self.base = base + self.eos = 0 + self.fixed_string = fixed_string + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + + def make_io_set(self): + return [(list(), list(self.fixed_string))] + + +class EchoTask(BaseTask): + """Echo string coding task. + + Code needs to pipe input to putput (without any modifications). + """ + + def __init__(self, base, min_length=1, max_length=5): + super(type(self), self).__init__() + self.base = base # base includes EOS + self.eos = 0 + self.min_length = min_length + self.max_length = max_length + self._io_pairs = self._make_io_examples(25) + + def _make_io_examples(self, n): + # Test cases are fixed, but varied. + np_random = np.random.RandomState(1234567890) + io_pairs = [] + for _ in xrange(n): + length = np_random.randint(self.min_length, self.max_length + 1) + input_seq = np_random.randint(1, self.base, length).tolist() + [self.eos] + output_seq = list(input_seq) + io_pairs.append((input_seq, output_seq)) + return io_pairs + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class JudgeRouteCircleTask(BaseTask): + """Judge route circle coding task. + + Code needs to determine if the given route makes a closed loop. + Encoding: U = 1, R = 2, D = 3, L = 4. + + Based on + https://leetcode.com/problems/judge-route-circle/description/ + """ + base = 256 + input_type = misc.IOType.integer + output_type = misc.IOType.integer + + def __init__(self, n, max_len=12): + super(type(self), self).__init__() + self.eos = 0 + self._io_pairs = self._make_io_examples(n, max_len) + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + + def _solve(self, input_seq): + assert input_seq[-1] == 0 + pos = [0, 0] # (x, y) + for move in input_seq[:-1]: + assert 0 < move <= 4 + if move & 1 == 0: # Left or Right. + pos[0] += 3 - move # Add or subtract 1. + else: + pos[1] += 2 - move # Add or subtract 1. + return [int(not pos[0] and not pos[1])] + + def _make_io_examples(self, n, max_len): + """Generate test cases for the task.""" + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + io_examples.append(([0], [1])) + io_examples.append(([4, 2, 0], [1])) + io_examples.append(([2, 4, 0], [1])) + io_examples.append(([3, 1, 0], [1])) + io_examples.append(([1, 3, 0], [1])) + io_examples.append(([1, 0], [0])) + io_examples.append(([2, 0], [0])) + io_examples.append(([3, 0], [0])) + io_examples.append(([4, 0], [0])) + for _ in xrange(n): + is_true = rand.randrange(2) + length = rand.randrange(1, max_len + 1) + if is_true: + # Make a true case. + length = (length >> 1) << 1 # Make even. + partition = (rand.randrange(length + 1) >> 1) << 1 + a = partition >> 1 + b = (length - partition) >> 1 + counts = {1: a, 2: b, 3: a, 4: b} + else: + # Make a false case. + partitions = ( + [0] + + sorted([rand.randrange(length + 1) for _ in range(3)]) + + [length]) + counts = {n: partitions[n] - partitions[n - 1] for n in range(1, 5)} + if counts[1] == counts[3] and counts[2] == counts[4]: + # By chance we sampled a true case. Make it false by exchanging + # one count between even and odd pairs. + base = 1 + 2 * rand.randrange(2) + a, b = (base, base + 1) if rand.randrange(2) else (base + 1, base) + if counts[a] == length or counts[b] == 0: + # If counts are at their extreme values, then swap who gets + # incremented and decremented. + a, b = b, a + counts[a] += 1 + counts[b] -= 1 + assert counts[a] <= length and counts[b] >= 0 + assert sum(counts.values()) == length + input_seq = [n for n in xrange(1, 5) for _ in xrange(counts[n])] + rand.shuffle(input_seq) + input_seq += [0] + output_seq = self._solve(input_seq) + assert output_seq[0] == is_true + io_examples.append((input_seq, output_seq)) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class MultiplyTask(BaseTask): + """Multiply coding task. + + Code needs to multiple two ints. + + Solution: + http://robl.co/brief-look-at-brainfuck/ + ,>,><<[->[->+>+<<]>>[-<<+>>]<<<]>>. + """ + base = 512 + input_type = misc.IOType.integer + output_type = misc.IOType.integer + + def __init__(self, n): + super(type(self), self).__init__() + self.eos = 0 + self._io_pairs = self._make_io_examples(n) + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + + def _factors(self, n): + return set(i for i in range(1, int(n**0.5) + 1) if n % i == 0) + + def _make_io_examples(self, n): + """Generate test cases for the task.""" + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + for _ in xrange(n): + n = rand.randrange(self.base) + if n == 0: + a, b = 0, rand.randrange(self.base) + else: + f = list(self._factors(n)) + a = f[rand.randrange(len(f))] + b = n // a + if rand.randrange(2): + a, b = b, a + io_examples.append(([a, b], [n])) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class DivModTask(BaseTask): + """Divmod coding task. + + Code needs to take the quotient and remainder of two ints. + + Solution: + http://robl.co/brief-look-at-brainfuck/ + ,>,><<[>[->+>+<<]>[-<<-[>]>>>[<[-<->]<[>]>>[[-]>>+<]>-<]<<]>>>+<<[-<<+>>]<<<]> + >>>>[-<<<<<+>>>>>]<<<<<.>.> + """ + base = 512 + input_type = misc.IOType.integer + output_type = misc.IOType.integer + + def __init__(self, n): + super(type(self), self).__init__() + self.eos = 0 + self._io_pairs = self._make_io_examples(n) + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + + def _make_io_examples(self, n): + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + for _ in xrange(n): + n = rand.randrange(0, self.base) + k = rand.randrange(1, self.base) # Divisor cannot be 0. + io_examples.append(([n, k], list(divmod(n, k)))) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class FibonacciTask(BaseTask): + + def __init__(self): + super(type(self), self).__init__() + self.base = 256 + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + + def make_io_set(self): + return [ + ([0], [0, 1]), + ([1], [1, 1]), + ([2], [1, 2]), + ([3], [2, 3]), + ([4], [3, 5]), + ([5], [5, 8]), + ([6], [8, 13]), + ([7], [13, 21]), + ([8], [21, 34]), + ([9], [34, 55]), + ([10], [55, 89]), + ([11], [89, 144]), + ([12], [144, 233]), + ([13], [233, 121])] + + +class FindSubStrTask(BaseTask): + """Find sub-string coding task. + + Code needs to output a bool: True if the input string contains a hard-coded + substring, 'AB' (values [1, 2]). + """ + + def __init__(self, base): + super(type(self), self).__init__() + assert base >= 27 + self.base = base + self.eos = 0 + self.find_str = [1, 2] + self.input_type = misc.IOType.string + self.output_type = misc.IOType.boolean + + def make_io_set(self): + return [ + ([1, 1, 23, 0], [0]), + ([21, 3, 2, 0], [0]), + ([2, 1, 19, 0], [0]), + ([2, 24, 15, 3, 0], [0]), + ([24, 6, 10, 16, 4, 0], [0]), + ([1, 2, 12, 0], [1]), + ([7, 1, 2, 0], [1]), + ([1, 2, 11, 3, 0], [1]), + ([1, 1, 2, 18, 0], [1]), + ([7, 25, 1, 2, 0], [1]), + ([3, 1, 2, 11, 8, 0], [1]), + ([15, 16, 20, 1, 2, 0], [1])] + + +class SortFixedTask(BaseTask): + """Sort list coding task. + + Code needs to output a sorted input list. The task consists of lists of the + same length L, where L is provided to this task's constructor as a + hyperparameter. + """ + + def __init__(self, base, length=3): + super(type(self), self).__init__() + assert base >= 27 + self.base = base + self.eos = 0 + self.length = length + assert length == 3 # More lengths will be supported. + + def make_io_set(self): + if self.length == 3: + return [ + ([1, 20, 6], [1, 6, 20]), + ([13, 6, 7], [6, 7, 13]), + ([24, 2, 23], [2, 23, 24]), + ([16, 12, 3], [3, 12, 16]), + ([11, 24, 4], [4, 11, 24]), + ([10, 1, 19], [1, 10, 19])] + + +class SortFixedTaskV2(BaseTask): + """Sort list coding task (version 2). + + Code needs to output a sorted input list. The task consists of lists of the + same length L, where L is provided to this task's constructor as a + hyperparameter. + + Test cases are dynamically generated, allowing for the number of test cases + to be a hyperparameter. + """ + + def __init__(self, base, n, length=3): + super(type(self), self).__init__() + assert base >= 27 + self.base = base + self.eos = 0 + self._io_pairs = self._make_io_examples(n, length) + self.input_type = misc.IOType.integer + self.output_type = misc.IOType.integer + + def _make_io_examples(self, n, length): + rand = random.Random(6849275409234) # Test cases are fixed, but varied. + io_examples = [] + for _ in xrange(n): + input_seq = [rand.randrange(1, self.base) for _ in xrange(length)] + output_seq = sorted(input_seq) + io_examples.append((input_seq, output_seq)) + return io_examples + + def make_io_set(self): + return copy.deepcopy(self._io_pairs) + + +class RemoveTargetCharTask(KnownCodeBaseTask): + """Remove target character from string, where first input is the target. + + Target can appear multiple times. + """ + + def __init__(self, **kwargs): + def randrange_hole(rand, a, hole, b): + x = rand.randrange(a, b - 1) + if x >= hole: + return x + 1 + return x + def remove_target_char_gen(rand): + char = rand.randrange(1, 6) + l = rand.randrange(1, 8) + input_seq = [randrange_hole(rand, 1, char, 256) for _ in xrange(l)] + idx = range(l) + rand.shuffle(idx) + num_targets = rand.randrange(0, l) + for pos in idx[:num_targets]: + input_seq[pos] = char + return [char] + input_seq + [0] + super(type(self), self).__init__( + ',>>>,[<<<[->+>+<<]>>[->->+<<]>[>[-<+>]<.[-]]>[-]<<<[-<+>]>>,].', + remove_target_char_gen, + **kwargs) + + +class ListIndexTask(KnownCodeBaseTask): + """Echo i-th value in the given list.""" + + def __init__(self, **kwargs): + def array_index_gen(rand): + l = rand.randrange(1, 16) + i = rand.randrange(l) + return [i] + [rand.randrange(256) for _ in xrange(l)] + [0] + super(type(self), self).__init__( + ',[->,<]>,.', + array_index_gen, + **kwargs) + + +# ============================================================================== +# Tasks based on primaryobjects paper. +# ============================================================================== + + +def string2tokens(string): + return [ord(c) for c in string] + + +def stringlist2tokens(strings): + return [string2tokens(string) for string in strings] + + +def string2tokens_b27(string): + return [ord(c.lower()) - ord('a') + 1 for c in string] + + +def stringlist2tokens_b27(strings): + return [string2tokens_b27(string) for string in strings] + + +class BottlesOfBeerTask(BaseTask): + """Bottles of beer coding task. + + This is a counting task. Code needs to read in an int N and then output + every int from N to 0, each separated by a 0. + """ + base = 256 + input_type = misc.IOType.integer + output_type = misc.IOType.integer + + def make_io_set(self): + return [ + ([1], [1, 0]), + ([2], [2, 0, 1, 0]), + ([3], [3, 0, 2, 0, 1, 0]), + ([4], [4, 0, 3, 0, 2, 0, 1, 0]), + ([5], [5, 0, 4, 0, 3, 0, 2, 0, 1, 0]), + ([6], [6, 0, 5, 0, 4, 0, 3, 0, 2, 0, 1, 0])] + + +class SplitTask(BaseTask): + """Split coding task. + + Code needs to pipe input strings to output, but insert a 0 after every 3 + characters. This is in essence splitting the string into intervals of length + 3. + """ + base = 28 + input_type = misc.IOType.string + output_type = misc.IOType.integer + + def _splicer(self, lst, insert, interval=3): + for i, item in enumerate(lst): + yield item + if (i + 1) % interval == 0 and i < len(lst) - 1: + yield insert + + def __init__(self): + super(type(self), self).__init__() + inputs = stringlist2tokens_b27( + ['hello', 'orange', 'spaghetti', 'wins', 'one']) + targets = [list(self._splicer(i, 27)) for i in inputs] + self._test_cases = list(zip(inputs, targets)) + + def make_io_set(self): + return copy.deepcopy(self._test_cases) + + +class TrimLeftTask(BaseTask): + """Trim left coding task. + + Code needs to pipe input strings to output, but remove everything before the + first quotation char ("). + """ + base = 256 + input_type = misc.IOType.integer + output_type = misc.IOType.integer + + def __init__(self): + super(type(self), self).__init__() + inputs = stringlist2tokens( + ['a "inside" over', 'xy "test" rights', 'ca6 "foresting" service', + 'abc"def"yz.', 'A"B"']) + targets = stringlist2tokens( + ['"inside" over', '"test" rights', '"foresting" service', '"def"yz.', + '"B"']) + self._test_cases = list(zip(inputs, targets)) + + def make_io_set(self): + return copy.deepcopy(self._test_cases) diff --git a/models/research/brain_coder/single_task/code_tasks_test.py b/models/research/brain_coder/single_task/code_tasks_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d3260a1a56ec0f7c36363d558122f7f7e49198e6 --- /dev/null +++ b/models/research/brain_coder/single_task/code_tasks_test.py @@ -0,0 +1,108 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for code_tasks.""" + +import numpy as np +import tensorflow as tf + +from single_task import code_tasks # brain coder +from single_task import defaults # brain coder + + +def pad(string, pad_length, pad_char): + return string + pad_char * (pad_length - len(string)) + + +class CodeTasksTest(tf.test.TestCase): + + def assertClose(self, a, b): + self.assertTrue( + np.isclose(a, b, atol=1e-4), + 'Expecting approximately equal values. Got: %s, %s' % (a, b)) + + def testMultiIOTaskManager(self): + maxlen = 100 + padchr = '[' + task = code_tasks.make_paper_task( + 'print', timestep_limit=maxlen, do_code_simplification=False) + reward_fns = task.rl_batch(1) + r = reward_fns[0] + self.assertClose( + r(pad('++++++++.---.+++++++...', maxlen, padchr)).episode_rewards[-1], + 0.2444) + self.assertClose( + r(pad('++++++++.---.+++++++..+++.', + maxlen, padchr)).episode_rewards[-1], + 1.0) + + task = code_tasks.make_paper_task( + 'print', timestep_limit=maxlen, do_code_simplification=True) + reward_fns = task.rl_batch(1) + r = reward_fns[0] + self.assertClose( + r('++++++++.---.+++++++...').episode_rewards[-1], + 0.2444) + self.assertClose( + r('++++++++.---.+++++++..+++.').episode_rewards[-1], + 0.935) + self.assertClose( + r(pad('++++++++.---.+++++++..+++.', + maxlen, padchr)).episode_rewards[-1], + 0.75) + + task = code_tasks.make_paper_task( + 'reverse', timestep_limit=maxlen, do_code_simplification=False) + reward_fns = task.rl_batch(1) + r = reward_fns[0] + self.assertClose( + r(pad('>,>,>,.<.<.<.', maxlen, padchr)).episode_rewards[-1], + 0.1345) + self.assertClose( + r(pad(',[>,]+[,<.]', maxlen, padchr)).episode_rewards[-1], + 1.0) + + task = code_tasks.make_paper_task( + 'reverse', timestep_limit=maxlen, do_code_simplification=True) + reward_fns = task.rl_batch(1) + r = reward_fns[0] + self.assertClose(r('>,>,>,.<.<.<.').episode_rewards[-1], 0.1324) + self.assertClose(r(',[>,]+[,<.]').episode_rewards[-1], 0.9725) + self.assertClose( + r(pad(',[>,]+[,<.]', maxlen, padchr)).episode_rewards[-1], + 0.75) + + def testMakeTask(self): + maxlen = 100 + padchr = '[' + config = defaults.default_config_with_updates( + 'env=c(config_for_iclr=False,fixed_string=[8,5,12,12,15])') + task = code_tasks.make_task(config.env, 'print', timestep_limit=maxlen) + reward_fns = task.rl_batch(1) + r = reward_fns[0] + self.assertClose( + r('++++++++.---.+++++++...').episode_rewards[-1], + 0.2444) + self.assertClose( + r('++++++++.---.+++++++..+++.').episode_rewards[-1], + 0.935) + self.assertClose( + r(pad('++++++++.---.+++++++..+++.', + maxlen, padchr)).episode_rewards[-1], + 0.75) + + def testKnownCodeBaseTask(self): + maxlen = 100 + padchr = '[' + task = code_tasks.make_paper_task( + 'shift-left', timestep_limit=maxlen, do_code_simplification=False) + reward_fns = task.rl_batch(1) + r = reward_fns[0] + self.assertClose( + r(pad(',>,[.,]<.,.', maxlen, padchr)).episode_rewards[-1], + 1.0) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/data.py b/models/research/brain_coder/single_task/data.py new file mode 100644 index 0000000000000000000000000000000000000000..8f34464f5a3e1c403b0f253f1520920c303b0819 --- /dev/null +++ b/models/research/brain_coder/single_task/data.py @@ -0,0 +1,89 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Manage data for pretraining and RL tasks.""" + +import ast +from collections import namedtuple + +from absl import logging + +from single_task import code_tasks # brain coder + + +RLBatch = namedtuple('RLBatch', ['reward_fns', 'batch_size', 'good_reward']) + + +class DataManager(object): + """Interface between environment and model.""" + + def __init__(self, global_config, run_number=None, + do_code_simplification=False): + """Constructs a DataManager. + + Args: + global_config: A config_lib.Config instance containing all config. See + config in defaults.py. + run_number: Which run this is (of the same experiment). This should be set + when a task cycle is defined in the config. A task cycle is a list of + tasks to cycle through repeatedly, and the selected task is a function + of the run number, i.e. 0-th run, 1-st run, 2-nd run, etc... + This can be None if only a single task is set in the config. + do_code_simplification: When global_config.env.config_for_iclr is True, + use this option to create code simplification (code golf) tasks, vs + fixed length coding tasks. If True, a task with code simplification + reward will be constructed. + + Raises: + ValueError: If global_config.env.task and global_config.env.task_cycle + are both set, or both not set. Only one should be given. + ValueError: If global_config.env.task_cycle is set but run_number is None. + """ + env_config = global_config.env + self.batch_size = global_config.batch_size + + if env_config.task_cycle: + if env_config.task: + raise ValueError('Do not set both `task` and `task_cycle`.') + if run_number is None: + raise ValueError('Do not use task_cycle for single-run experiment.') + index = run_number % len(env_config.task_cycle) + self.task_name = env_config.task_cycle[index] + logging.info('run_number: %d, task_cycle index: %d', run_number, index) + logging.info('task_cycle: %s', env_config.task_cycle) + elif env_config.task: + self.task_name = env_config.task + else: + raise ValueError('Either `task` or `task_cycle` must be set.') + logging.info('Task for this run: "%s"', self.task_name) + + logging.info('config_for_iclr=True; do_code_simplification=%s', + do_code_simplification) + self.rl_task = code_tasks.make_task( + task_name=self.task_name, + override_kwargs=ast.literal_eval(env_config.task_kwargs), + max_code_length=global_config.timestep_limit, + require_correct_syntax=env_config.correct_syntax, + do_code_simplification=do_code_simplification, + correct_bonus=env_config.task_manager_config.correct_bonus, + code_length_bonus=env_config.task_manager_config.code_length_bonus) + + def sample_rl_batch(self): + """Create reward functions from the current task. + + Returns: + RLBatch namedtuple instance, which holds functions and information for + a minibatch of episodes. + * reward_fns: A reward function for each episode. Maps code string to + reward. + * batch_size: Number of episodes in this minibatch. + * good_reward: Estimated threshold of rewards which indicate the algorithm + is starting to solve the task. This is a heuristic that tries to + reduce the amount of stuff written to disk. + """ + reward_fns = self.rl_task.rl_batch(self.batch_size) + return RLBatch( + reward_fns=reward_fns, + batch_size=self.batch_size, + good_reward=self.rl_task.good_reward) diff --git a/models/research/brain_coder/single_task/defaults.py b/models/research/brain_coder/single_task/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..d9bd8b942532dfffcf06d90d331e58725c4d82a9 --- /dev/null +++ b/models/research/brain_coder/single_task/defaults.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Default configuration for agent and environment.""" + +from absl import logging + +from common import config_lib # brain coder + + +def default_config(): + return config_lib.Config( + agent=config_lib.OneOf( + [config_lib.Config( + algorithm='pg', + policy_lstm_sizes=[35,35], + # Set value_lstm_sizes to None to share weights with policy. + value_lstm_sizes=[35,35], + obs_embedding_size=10, + grad_clip_threshold=10.0, + param_init_factor=1.0, + lr=5e-5, + pi_loss_hparam=1.0, + vf_loss_hparam=0.5, + entropy_beta=1e-2, + regularizer=0.0, + softmax_tr=1.0, # Reciprocal temperature. + optimizer='rmsprop', # 'adam', 'sgd', 'rmsprop' + topk=0, # Top-k unique codes will be stored. + topk_loss_hparam=0.0, # off policy loss multiplier. + # Uniformly sample this many episodes from topk buffer per batch. + # If topk is 0, this has no effect. + topk_batch_size=1, + # Exponential moving average baseline for REINFORCE. + # If zero, A2C is used. + # If non-zero, should be close to 1, like .99, .999, etc. + ema_baseline_decay=0.99, + # Whether agent can emit EOS token. If true, agent can emit EOS + # token which ends the episode early (ends the sequence). + # If false, agent must emit tokens until the timestep limit is + # reached. e.g. True means variable length code, False means fixed + # length code. + # WARNING: Making this false slows things down. + eos_token=False, + replay_temperature=1.0, + # Replay probability. 1 = always replay, 0 = always on policy. + alpha=0.0, + # Whether to normalize importance weights in each minibatch. + iw_normalize=True), + config_lib.Config( + algorithm='ga', + crossover_rate=0.99, + mutation_rate=0.086), + config_lib.Config( + algorithm='rand')], + algorithm='pg', + ), + env=config_lib.Config( + # If True, task-specific settings are not needed. + task='', # 'print', 'echo', 'reverse', 'remove', ... + task_cycle=[], # If non-empty, reptitions will cycle through tasks. + task_kwargs='{}', # Python dict literal. + task_manager_config=config_lib.Config( + # Reward recieved per test case. These bonuses will be scaled + # based on how many test cases there are. + correct_bonus=2.0, # Bonus for code getting correct answer. + code_length_bonus=1.0), # Maximum bonus for short code. + correct_syntax=False, + ), + batch_size=64, + timestep_limit=32) + + +def default_config_with_updates(config_string, do_logging=True): + if do_logging: + logging.info('Config string: "%s"', config_string) + config = default_config() + config.strict_update(config_lib.Config.parse(config_string)) + if do_logging: + logging.info('Config:\n%s', config.pretty_str()) + return config diff --git a/models/research/brain_coder/single_task/ga_lib.py b/models/research/brain_coder/single_task/ga_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..fadb96482b21a5c65c0d6d6cf4a3aec3b5708235 --- /dev/null +++ b/models/research/brain_coder/single_task/ga_lib.py @@ -0,0 +1,472 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Genetic algorithm for BF tasks. + +Inspired by https://github.com/primaryobjects/AI-Programmer. +GA function code borrowed from https://github.com/DEAP/deap. +""" + +from collections import namedtuple +import random + +from absl import flags +from absl import logging +import numpy as np +from six.moves import xrange + +from common import bf # brain coder +from common import utils # brain coder +from single_task import misc # brain coder + +FLAGS = flags.FLAGS + +# Saving reward of previous programs saves computation if a program appears +# again. +USE_REWARD_CACHE = True # Disable this if GA is using up too much memory. +GENES = bf.CHARS +MAX_PROGRAM_STEPS = 500 +STEP_BONUS = True + +ALPHANUM_CHARS = ( + ['_'] + + [chr(ord('a') + i_) for i_ in range(26)] + + [chr(ord('A') + i_) for i_ in range(26)] + + [chr(ord('0') + i_) for i_ in range(10)]) + +Result = namedtuple( + 'Result', + ['reward', 'inputs', 'code_outputs', 'target_outputs', 'type_in', + 'type_out', 'base', 'correct']) + + +class IOType(object): + string = 'string' + integer = 'integer' + + +class CustomType(object): + + def __init__(self, to_str_fn): + self.to_str_fn = to_str_fn + + def __call__(self, obj): + return self.to_str_fn(obj) + + +def tokens_list_repr(tokens, repr_type, base): + """Make human readable representation of program IO.""" + if isinstance(repr_type, CustomType): + return repr_type(tokens) + elif repr_type == IOType.string: + chars = ( + [ALPHANUM_CHARS[t] for t in tokens] if base < len(ALPHANUM_CHARS) + else [chr(t) for t in tokens]) + return ''.join(chars) + elif repr_type == IOType.integer: + return str(tokens) + raise ValueError('No such representation type "%s"', repr_type) + + +def io_repr(result): + """Make human readable representation of test cases.""" + inputs = ','.join( + tokens_list_repr(tokens, result.type_in, result.base) + for tokens in result.inputs) + code_outputs = ','.join( + tokens_list_repr(tokens, result.type_out, result.base) + for tokens in result.code_outputs) + target_outputs = ','.join( + tokens_list_repr(tokens, result.type_out, result.base) + for tokens in result.target_outputs) + return inputs, target_outputs, code_outputs + + +def make_task_eval_fn(task_manager): + """Returns a wrapper that converts an RL task into a GA task. + + Args: + task_manager: Is a task manager object from code_tasks.py + + Returns: + A function that takes as input a single list of a code chars, and outputs + a Result namedtuple instance containing the reward and information about + code execution. + """ + def to_data_list(single_or_tuple): + if isinstance(single_or_tuple, misc.IOTuple): + return list(single_or_tuple) + return [single_or_tuple] + + def to_ga_type(rl_type): + if rl_type == misc.IOType.string: + return IOType.string + return IOType.integer + + # Wrapper function. + def evalbf(bf_chars): + result = task_manager._score_code(''.join(bf_chars)) + reward = sum(result.episode_rewards) + correct = result.reason == 'correct' + return Result( + reward=reward, + inputs=to_data_list(result.input_case), + code_outputs=to_data_list(result.code_output), + target_outputs=to_data_list(result.correct_output), + type_in=to_ga_type(result.input_type), + type_out=to_ga_type(result.output_type), + correct=correct, + base=task_manager.task.base) + + return evalbf + + +def debug_str(individual, task_eval_fn): + res = task_eval_fn(individual) + input_str, target_output_str, code_output_str = io_repr(res) + return ( + ''.join(individual) + + ' | ' + input_str + + ' | ' + target_output_str + + ' | ' + code_output_str + + ' | ' + str(res.reward) + + ' | ' + str(res.correct)) + + +def mutate_single(code_tokens, mutation_rate): + """Mutate a single code string. + + Args: + code_tokens: A string/list/Individual of BF code chars. Must end with EOS + symbol '_'. + mutation_rate: Float between 0 and 1 which sets the probability of each char + being mutated. + + Returns: + An Individual instance containing the mutated code string. + + Raises: + ValueError: If `code_tokens` does not end with EOS symbol. + """ + if len(code_tokens) <= 1: + return code_tokens + if code_tokens[-1] == '_': + # Do this check to ensure that the code strings have not been corrupted. + raise ValueError('`code_tokens` must end with EOS symbol.') + else: + cs = Individual(code_tokens) + eos = [] + mutated = False + for pos in range(len(cs)): + if random.random() < mutation_rate: + mutated = True + new_char = GENES[random.randrange(len(GENES))] + x = random.random() + if x < 0.25 and pos != 0 and pos != len(cs) - 1: + # Insertion mutation. + if random.random() < 0.50: + # Shift up. + cs = cs[:pos] + [new_char] + cs[pos:-1] + else: + # Shift down. + cs = cs[1:pos] + [new_char] + cs[pos:] + elif x < 0.50: + # Deletion mutation. + if random.random() < 0.50: + # Shift down. + cs = cs[:pos] + cs[pos + 1:] + [new_char] + else: + # Shift up. + cs = [new_char] + cs[:pos] + cs[pos + 1:] + elif x < 0.75: + # Shift rotate mutation (position invariant). + if random.random() < 0.50: + # Shift down. + cs = cs[1:] + [cs[0]] + else: + # Shift up. + cs = [cs[-1]] + cs[:-1] + else: + # Replacement mutation. + cs = cs[:pos] + [new_char] + cs[pos + 1:] + assert len(cs) + len(eos) == len(code_tokens) + if mutated: + return Individual(cs + eos) + else: + return Individual(code_tokens) + + +def crossover(parent1, parent2): + """Performs crossover mating between two code strings. + + Crossover mating is where a random position is selected, and the chars + after that point are swapped. The resulting new code strings are returned. + + Args: + parent1: First code string. + parent2: Second code string. + + Returns: + A 2-tuple of children, i.e. the resulting code strings after swapping. + """ + max_parent, min_parent = ( + (parent1, parent2) if len(parent1) > len(parent2) + else (parent2, parent1)) + pos = random.randrange(len(max_parent)) + if pos >= len(min_parent): + child1 = max_parent[:pos] + child2 = min_parent + max_parent[pos:] + else: + child1 = max_parent[:pos] + min_parent[pos:] + child2 = min_parent[:pos] + max_parent[pos:] + return Individual(child1), Individual(child2) + + +def _make_even(n): + """Return largest even integer less than or equal to `n`.""" + return (n >> 1) << 1 + + +def mutate_and_crossover(population, mutation_rate, crossover_rate): + """Take a generational step over a population. + + Transforms population of parents into population of children (of the same + size) via crossover mating and then mutation on the resulting children. + + Args: + population: Parent population. A list of Individual objects. + mutation_rate: Probability of mutation. See `mutate_single`. + crossover_rate: Probability that two parents will mate. + + Returns: + Child population. A list of Individual objects. + """ + children = [None] * len(population) + for i in xrange(0, _make_even(len(population)), 2): + p1 = population[i] + p2 = population[i + 1] + if random.random() < crossover_rate: + p1, p2 = crossover(p1, p2) + c1 = mutate_single(p1, mutation_rate) + c2 = mutate_single(p2, mutation_rate) + children[i] = c1 + children[i + 1] = c2 + if children[-1] is None: + children[-1] = population[-1] + return children + + +def ga_loop(population, cxpb, mutpb, ngen, task_eval_fn, halloffame=None, + checkpoint_writer=None): + """A bare bones genetic algorithm. + + Similar to chapter 7 of Back, Fogel and Michalewicz, "Evolutionary + Computation 1 : Basic Algorithms and Operators", 2000. + + Args: + population: A list of individuals. + cxpb: The probability of mating two individuals. + mutpb: The probability of mutating a gene. + ngen: The number of generation. Unlimited if zero. + task_eval_fn: A python function which maps an Individual to a Result + namedtuple. + halloffame: (optional) a utils.MaxUniquePriorityQueue object that will be + used to aggregate the best individuals found during search. + checkpoint_writer: (optional) an object that can save and load populations. + Needs to have `write`, `load`, and `has_checkpoint` methods. Used to + periodically save progress. In event of a restart, the population will + be loaded from disk. + + Returns: + GaResult namedtuple instance. This contains information about the GA run, + including the resulting population, best reward (fitness) obtained, and + the best code string found. + """ + + has_checkpoint = False + if checkpoint_writer and checkpoint_writer.has_checkpoint(): + try: + gen, population, halloffame = checkpoint_writer.load() + except EOFError: # Data was corrupted. Start over. + pass + else: + has_checkpoint = True + logging.info( + 'Loaded population from checkpoint. Starting at generation %d', gen) + + # Evaluate the individuals with an invalid fitness + invalid_ind = [ind for ind in population if not ind.fitness.valid] + for ind in invalid_ind: + ind.fitness.values = task_eval_fn(ind).reward, + for _, ind in halloffame.iter_in_order(): + ind.fitness.values = task_eval_fn(ind).reward, + + if not has_checkpoint: + # Evaluate the individuals with an invalid fitness + invalid_ind = [ind for ind in population if not ind.fitness.valid] + for ind in invalid_ind: + ind.fitness.values = task_eval_fn(ind).reward, + + if halloffame is not None: + for ind in population: + halloffame.push(ind.fitness.values, tuple(ind), ind) + + logging.info('Initialized new population.') + + gen = 1 + + pop_size = len(population) + program_reward_cache = {} if USE_REWARD_CACHE else None + + # Begin the generational process + while ngen == 0 or gen <= ngen: + # Select the next generation individuals + offspring = roulette_selection(population, pop_size - len(halloffame)) + + # Vary the pool of individuals + # offspring = varAnd(offspring, toolbox, cxpb, mutpb) + offspring = mutate_and_crossover( + offspring, mutation_rate=mutpb, crossover_rate=cxpb) + + # Evaluate the individuals with an invalid fitness + invalid_ind = [ind for ind in offspring if not ind.fitness.valid] + for ind in invalid_ind: + str_repr = ''.join(ind) + if program_reward_cache is not None and str_repr in program_reward_cache: + ind.fitness.values = (program_reward_cache[str_repr],) + else: + eval_result = task_eval_fn(ind) + ind.fitness.values = (eval_result.reward,) + if program_reward_cache is not None: + program_reward_cache[str_repr] = eval_result.reward + + # Replace the current population by the offspring + population = list(offspring) + + # Update the hall of fame with the generated individuals + if halloffame is not None: + for ind in population: + halloffame.push(ind.fitness.values, tuple(ind), ind) + + # elitism + population.extend([ind for _, ind in halloffame.iter_in_order()]) + + if gen % 100 == 0: + top_code = '\n'.join([debug_str(ind, task_eval_fn) + for ind in topk(population, k=4)]) + logging.info('gen: %d\nNPE: %d\n%s\n\n', gen, gen * pop_size, top_code) + + best_code = ''.join(halloffame.get_max()[1]) + res = task_eval_fn(best_code) + + # Write population and hall-of-fame to disk. + if checkpoint_writer: + checkpoint_writer.write(gen, population, halloffame) + + if res.correct: + logging.info('Solution found:\n%s\nreward = %s\n', + best_code, res.reward) + break + + gen += 1 + + best_code = ''.join(halloffame.get_max()[1]) + res = task_eval_fn(best_code) + + return GaResult( + population=population, best_code=best_code, reward=res.reward, + solution_found=res.correct, generations=gen, + num_programs=gen * len(population), + max_generations=ngen, max_num_programs=ngen * len(population)) + + +GaResult = namedtuple( + 'GaResult', + ['population', 'best_code', 'reward', 'generations', 'num_programs', + 'solution_found', 'max_generations', 'max_num_programs']) + + +def reward_conversion(reward): + """Convert real value into positive value.""" + if reward <= 0: + return 0.05 + return reward + 0.05 + + +def roulette_selection(population, k): + """Select `k` individuals with prob proportional to fitness. + + Each of the `k` selections is independent. + + Warning: + The roulette selection by definition cannot be used for minimization + or when the fitness can be smaller or equal to 0. + + Args: + population: A list of Individual objects to select from. + k: The number of individuals to select. + + Returns: + A list of selected individuals. + """ + fitnesses = np.asarray( + [reward_conversion(ind.fitness.values[0]) + for ind in population]) + assert np.all(fitnesses > 0) + + sum_fits = fitnesses.sum() + chosen = [None] * k + for i in xrange(k): + u = random.random() * sum_fits + sum_ = 0 + for ind, fitness in zip(population, fitnesses): + sum_ += fitness + if sum_ > u: + chosen[i] = Individual(ind) + break + if not chosen[i]: + chosen[i] = Individual(population[-1]) + + return chosen + + +def make_population(make_individual_fn, n): + return [make_individual_fn() for _ in xrange(n)] + + +def best(population): + best_ind = None + for ind in population: + if best_ind is None or best_ind.fitness.values < ind.fitness.values: + best_ind = ind + return best_ind + + +def topk(population, k): + q = utils.MaxUniquePriorityQueue(k) + for ind in population: + q.push(ind.fitness.values, tuple(ind), ind) + return [ind for _, ind in q.iter_in_order()] + + +class Fitness(object): + + def __init__(self): + self.values = () + + @property + def valid(self): + """Assess if a fitness is valid or not.""" + return bool(self.values) + + +class Individual(list): + + def __init__(self, *args): + super(Individual, self).__init__(*args) + self.fitness = Fitness() + + +def random_individual(genome_size): + return lambda: Individual(np.random.choice(GENES, genome_size).tolist()) diff --git a/models/research/brain_coder/single_task/ga_train.py b/models/research/brain_coder/single_task/ga_train.py new file mode 100644 index 0000000000000000000000000000000000000000..630eca427e478dbadad58bd94b56e89a5a747526 --- /dev/null +++ b/models/research/brain_coder/single_task/ga_train.py @@ -0,0 +1,324 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Genetic algorithm for BF tasks. + +Also contains the uniform random search algorithm. + +Inspired by https://github.com/primaryobjects/AI-Programmer. +GA function code borrowed from https://github.com/DEAP/deap. +""" + +import cPickle +import os +import sys +from time import sleep + +from absl import flags +from absl import logging +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from common import utils # brain coder +from single_task import data # brain coder +from single_task import defaults # brain coder +from single_task import ga_lib # brain coder +from single_task import results_lib # brain coder + +FLAGS = flags.FLAGS + + +def define_tuner_hparam_space(hparam_space_type): + """Define tunable hparams for grid search.""" + if hparam_space_type != 'ga': + raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) + return { + 'population_size': [10, 25, 50, 100, 500], + 'crossover_rate': [0.2, 0.5, 0.7, 0.9, 0.95], + 'mutation_rate': [0.01, 0.03, 0.05, 0.1, 0.15]} + + +def write_hparams_to_config(config, hparams, hparam_space_type): + """Write hparams given by the tuner into the Config object.""" + if hparam_space_type != 'ga': + raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) + config.batch_size = hparams.population_size + config.agent.crossover_rate = hparams.crossover_rate + config.agent.mutation_rate = hparams.mutation_rate + + +class CheckpointWriter(object): + """Manages loading and saving GA populations to disk. + + This object is used by the genetic algorithm to save progress periodically + so that a recent population can be loaded from disk in the event of a restart. + """ + + def __init__(self, checkpoint_dir, population_size): + self.checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pickle') + self.population_size = population_size + + def write(self, gen, population, halloffame): + """Write GA state to disk. + + Overwrites previous saved state. + + Args: + gen: Generation number. + population: List of Individual objects. + halloffame: Hall-of-fame buffer. Typically a priority queue. + """ + raw = cPickle.dumps((gen, population, halloffame)) + with tf.gfile.FastGFile(self.checkpoint_file, 'w') as f: + f.write(raw) + + def load(self): + """Loads GA state from disk. + + Loads whatever is on disk, which will be whatever the most recent call + to `write` wrote. + + Returns: + gen: Generation number. + population: List of Individual objects. + halloffame: Hall-of-fame buffer. Typically a priority queue. + """ + with tf.gfile.FastGFile(self.checkpoint_file, 'r') as f: + raw = f.read() + objs = cPickle.loads(raw) + # Validate data. + assert isinstance(objs, tuple) and len(objs) == 3, ( + 'Expecting a 3-tuple, but got %s instead.' % (objs,)) + gen, population, halloffame = objs + assert isinstance(gen, int), ( + 'Expecting `gen` to be an integer, got %s' % (gen,)) + assert ( + isinstance(population, list) + and len(population) == self.population_size + ), ( + 'Expecting `population` to be a list with size %d, got %s' + % (self.population_size, population)) + assert halloffame is None or len(halloffame) == 2, ( + 'Expecting hall-of-fame object to have length two, got length %d' + % len(halloffame)) + logging.info('Loaded pop from checkpoint file: "%s".', + self.checkpoint_file) + return gen, population, halloffame + + def has_checkpoint(self): + """Checks if a checkpoint exists on disk, and if so returns True.""" + return tf.gfile.Exists(self.checkpoint_file) + + +def run_training(config=None, tuner=None, logdir=None, trial_name=None, # pylint: disable=unused-argument + is_chief=True): + """Do all training runs. + + This is the top level training function for policy gradient based models. + Run this from the main function. + + Args: + config: config_lib.Config instance containing global config (agent and + environment hparams). If None, config will be parsed from FLAGS.config. + tuner: (unused) A tuner instance. Leave as None if not tuning. + logdir: Parent directory where all data from all runs will be written. If + None, FLAGS.logdir will be used. + trial_name: (unused) If tuning, set this to a unique string that identifies + this trial. If `tuner` is not None, this also must be set. + is_chief: True if this worker is the chief. + + Returns: + List of results dicts which were written to disk. Each training run gets a + results dict. Results dict contains metrics, i.e. (name, value) pairs which + give information about the training run. + + Raises: + ValueError: If FLAGS.num_workers does not divide FLAGS.num_repetitions. + ValueError: If results dicts read from disk contain invalid data. + """ + if not config: + # If custom config is not given, get it from flags. + config = defaults.default_config_with_updates(FLAGS.config) + if not logdir: + logdir = FLAGS.logdir + + if FLAGS.num_repetitions % FLAGS.num_workers != 0: + raise ValueError('Number of workers must divide number of repetitions') + num_local_reps = FLAGS.num_repetitions // FLAGS.num_workers + logging.info('Running %d reps globally.', FLAGS.num_repetitions) + logging.info('This worker will run %d local reps.', num_local_reps) + if FLAGS.max_npe: + max_generations = FLAGS.max_npe // config.batch_size + logging.info('Max samples per rep: %d', FLAGS.max_npe) + logging.info('Max generations per rep: %d', max_generations) + else: + max_generations = sys.maxint + logging.info('Running unlimited generations.') + + assert FLAGS.num_workers > 0 + logging.info('Starting experiment. Directory: "%s"', logdir) + results = results_lib.Results(logdir, FLAGS.task_id) + local_results_list = results.read_this_shard() + if local_results_list: + if local_results_list[0]['max_npe'] != FLAGS.max_npe: + raise ValueError( + 'Cannot resume training. Max-NPE changed. Was %s, now %s', + local_results_list[0]['max_npe'], FLAGS.max_npe) + if local_results_list[0]['max_global_repetitions'] != FLAGS.num_repetitions: + raise ValueError( + 'Cannot resume training. Number of repetitions changed. Was %s, ' + 'now %s', + local_results_list[0]['max_global_repetitions'], + FLAGS.num_repetitions) + start_rep = len(local_results_list) + + for rep in xrange(start_rep, num_local_reps): + global_rep = num_local_reps * FLAGS.task_id + rep + logging.info( + 'Starting repetition: Rep = %d. (global rep = %d)', + rep, global_rep) + + # Save data for each rep, like checkpoints, goes into separate folders. + run_dir = os.path.join(logdir, 'run_%d' % global_rep) + + if not tf.gfile.IsDirectory(run_dir): + tf.gfile.MakeDirs(run_dir) + checkpoint_writer = CheckpointWriter(run_dir, + population_size=config.batch_size) + + data_manager = data.DataManager(config, run_number=global_rep) + task_eval_fn = ga_lib.make_task_eval_fn(data_manager.rl_task) + + if config.agent.algorithm == 'rand': + logging.info('Running random search.') + assert FLAGS.max_npe + result = run_random_search( + FLAGS.max_npe, run_dir, task_eval_fn, config.timestep_limit) + else: + assert config.agent.algorithm == 'ga' + logging.info('Running genetic algorithm.') + pop = ga_lib.make_population( + ga_lib.random_individual(config.timestep_limit), + n=config.batch_size) + hof = utils.MaxUniquePriorityQueue(2) # Hall of fame. + result = ga_lib.ga_loop( + pop, + cxpb=config.agent.crossover_rate, mutpb=config.agent.mutation_rate, + task_eval_fn=task_eval_fn, + ngen=max_generations, halloffame=hof, + checkpoint_writer=checkpoint_writer) + + logging.info('Finished rep. Num gens: %d', result.generations) + + results_dict = { + 'max_npe': FLAGS.max_npe, + 'batch_size': config.batch_size, + 'max_batches': FLAGS.max_npe // config.batch_size, + 'npe': result.num_programs, + 'max_global_repetitions': FLAGS.num_repetitions, + 'max_local_repetitions': num_local_reps, + 'code_solution': result.best_code if result.solution_found else '', + 'best_reward': result.reward, + 'num_batches': result.generations, + 'found_solution': result.solution_found, + 'task': data_manager.task_name, + 'global_rep': global_rep} + logging.info('results_dict: %s', results_dict) + results.append(results_dict) + + if is_chief: + logging.info( + 'Worker is chief. Waiting for all workers to finish so that results ' + 'can be reported to the tuner.') + + global_results_list, shard_stats = results.read_all( + num_shards=FLAGS.num_workers) + while not all(s.finished for s in shard_stats): + logging.info( + 'Still waiting on these workers: %s', + ', '.join( + ['%d (%d reps left)' + % (i, s.max_local_reps - s.num_local_reps_completed) + for i, s in enumerate(shard_stats) + if not s.finished])) + sleep(60) + global_results_list, shard_stats = results.read_all( + num_shards=FLAGS.num_workers) + + logging.info( + '%d results obtained. Chief worker is exiting the experiment.', + len(global_results_list)) + + return global_results_list + + +def run_random_search(max_num_programs, checkpoint_dir, task_eval_fn, + timestep_limit): + """Run uniform random search routine. + + Randomly samples programs from a uniform distribution until either a valid + program is found, or the maximum NPE is reached. Results are written to disk + and returned. + + Args: + max_num_programs: Maximum NPE (number of programs executed). If no solution + is found after this many programs are tried, the run is stopped and + considered a failure. + checkpoint_dir: Where to save state during the run. + task_eval_fn: Function that maps code string to result containing total + reward and info about success. + timestep_limit: Maximum length of code strings. + + Returns: + ga_lib.GaResult namedtuple instance. This contains the best code and highest + reward found. + """ + checkpoint_file = os.path.join(checkpoint_dir, 'random_search.txt') + num_programs_seen = 0 + found_solution = False + best_code = '' + best_reward = 0.0 + if tf.gfile.Exists(checkpoint_file): + try: + with tf.gfile.FastGFile(checkpoint_file, 'r') as f: + lines = list(f) + num_programs_seen = int(lines[0]) + found_solution = bool(int(lines[1])) + if found_solution: + best_code = lines[2] + best_reward = float(lines[3]) + except: # pylint: disable=bare-except + pass + + while not found_solution and num_programs_seen < max_num_programs: + if num_programs_seen % 1000 == 0: + logging.info('num_programs_seen = %d', num_programs_seen) + with tf.gfile.FastGFile(checkpoint_file, 'w') as f: + f.write(str(num_programs_seen) + '\n') + f.write(str(int(found_solution)) + '\n') + + code = np.random.choice(ga_lib.GENES, timestep_limit).tolist() + res = task_eval_fn(code) + found_solution = res.correct + num_programs_seen += 1 + + if found_solution: + best_code = ''.join(code) + best_reward = res.reward + + logging.info('num_programs_seen = %d', num_programs_seen) + logging.info('found solution: %s', found_solution) + with tf.gfile.FastGFile(checkpoint_file, 'w') as f: + f.write(str(num_programs_seen) + '\n') + f.write(str(int(found_solution)) + '\n') + if found_solution: + f.write(best_code + '\n') + f.write(str(best_reward) + '\n') + + return ga_lib.GaResult( + population=[], best_code=best_code, reward=best_reward, + solution_found=found_solution, generations=num_programs_seen, + num_programs=num_programs_seen, max_generations=max_num_programs, + max_num_programs=max_num_programs) diff --git a/models/research/brain_coder/single_task/ga_train_test.py b/models/research/brain_coder/single_task/ga_train_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ff69ad84952a3fb90cad28b3cf8e67ff55c96e95 --- /dev/null +++ b/models/research/brain_coder/single_task/ga_train_test.py @@ -0,0 +1,51 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for ga_train. + +Tests that ga runs for a few generations without crashing. +""" + +from absl import flags +import tensorflow as tf + +from single_task import defaults # brain coder +from single_task import run # brain coder + +FLAGS = flags.FLAGS + + +class GaTest(tf.test.TestCase): + + def RunTrainingSteps(self, config_string, num_steps=10): + """Run a few training steps with the given config. + + Just check that nothing crashes. + + Args: + config_string: Config encoded in a string. See + $REPO_PATH/common/config_lib.py + num_steps: Number of training steps to run. Defaults to 10. + """ + config = defaults.default_config_with_updates(config_string) + FLAGS.max_npe = num_steps * config.batch_size + FLAGS.logdir = tf.test.get_temp_dir() + FLAGS.config = config_string + run.main(None) + + def testGeneticAlgorithm(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="ga"),' + 'timestep_limit=40,batch_size=64') + + def testUniformRandomSearch(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="rand"),' + 'timestep_limit=40,batch_size=64') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/launch_training.sh b/models/research/brain_coder/single_task/launch_training.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4a4688ed2912792185aa8f3134b1680fed6f006 --- /dev/null +++ b/models/research/brain_coder/single_task/launch_training.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# Launches training jobs. +# Modify this file to launch workers with your prefered cloud API. +# The following implementation runs each worker as a subprocess on the local +# machine. + +MODELS_DIR="/tmp/models" + +# Get command line options. +OPTS=$(getopt -n "$0" -o "" --long "job_name:,config:,num_workers:,num_ps:,max_npe:,num_repetitions:,stop_on_success:" -- "$@") +if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi + +eval set -- "$OPTS" + +JOB_NAME="" # Name of the process and the logs directory. +CONFIG="" # Model and environment hparams. +# NUM_WORKERS: Number of workers to launch for this training job. If using +# neural networks, each worker will be 1 replica. +NUM_WORKERS=1 +# NUM_PS: Number of parameter servers to launch for this training job. Only set +# this if using neural networks. For 1 worker, no parameter servers are needed. +# For more than 1 worker, at least 1 parameter server is needed to store the +# global model. +NUM_PS=0 +# MAX_NPE: Maximum number of programs executed. Training will quit once this +# threshold is reached. If 0, the threshold is infinite. +MAX_NPE=0 +NUM_REPETITIONS=1 # How many times to run this experiment. +STOP_ON_SUCCESS=true # Whether to halt training when a solution is found. + +# Parse options into variables. +while true; do + case "$1" in + --job_name ) JOB_NAME="$2"; shift; shift ;; + --config ) CONFIG="$2"; shift; shift ;; + --num_workers ) NUM_WORKERS="$2"; shift; shift ;; + --num_ps ) NUM_PS="$2"; shift; shift ;; + --max_npe ) MAX_NPE="$2"; shift; shift ;; + --num_repetitions ) NUM_REPETITIONS="$2"; shift; shift ;; + --stop_on_success ) STOP_ON_SUCCESS="$2"; shift; shift ;; + -- ) shift; break ;; + * ) break ;; + esac +done + +# Launch jobs. +# TODO: multi-worker RL training + +LOGDIR="$MODELS_DIR/$JOB_NAME" +mkdir -p $LOGDIR + +BIN_DIR="bazel-bin/single_task" +for (( i=0; i "$LOGDIR/task_$i.log" & # Run as subprocess + echo "Launched task $i. Logs: $LOGDIR/task_$i.log" +done + + +# Use "pidof run.par" to find jobs. +# Kill with "pkill run.par" diff --git a/models/research/brain_coder/single_task/launch_tuning.sh b/models/research/brain_coder/single_task/launch_tuning.sh new file mode 100644 index 0000000000000000000000000000000000000000..97ce51b543e13d4b1c412656a93197b5b47373bb --- /dev/null +++ b/models/research/brain_coder/single_task/launch_tuning.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# Launches tuning jobs. +# Modify this file to launch workers with your prefered cloud API. +# The following implementation runs each worker as a subprocess on the local +# machine. + +MODELS_DIR="/tmp/models" + +# Get command line options. +OPTS=$(getopt -n "$0" -o "" --long "job_name:,config:,num_tuners:,num_workers_per_tuner:,num_ps_per_tuner:,max_npe:,num_repetitions:,stop_on_success:,fixed_hparams:,hparam_space_type:" -- "$@") +if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi + +eval set -- "$OPTS" + +JOB_NAME="" # Name of the process and the logs directory. +CONFIG="" # Model and environment hparams. +# NUM_TUNERS: Number of tuning jobs to launch. Each tuning job can train a +# hparam combination. So more tuners means more hparams tried in parallel. +NUM_TUNERS=1 +# NUM_WORKERS_PER_TUNER: Number of workers to launch for each tuning job. If +# using neural networks, each worker will be 1 replica. +NUM_WORKERS_PER_TUNER=1 +# NUM_PS_PER_TUNER: Number of parameter servers to launch for this tuning job. +# Only set this if using neural networks. For 1 worker per tuner, no parameter +# servers are needed. For more than 1 worker per tuner, at least 1 parameter +# server per tuner is needed to store the global model for each tuner. +NUM_PS_PER_TUNER=0 +# MAX_NPE: Maximum number of programs executed. Training will quit once this +# threshold is reached. If 0, the threshold is infinite. +MAX_NPE=0 +NUM_REPETITIONS=25 # How many times to run this experiment. +STOP_ON_SUCCESS=true # Whether to halt training when a solution is found. +# FIXED_HPARAMS: Hold hparams fixed in the grid search. This reduces the search +# space. +FIXED_HPARAMS="" +# HPARAM_SPACE_TYPE: Specifies the hparam search space. See +# `define_tuner_hparam_space` functions defined in pg_train.py and ga_train.py. +HPARAM_SPACE_TYPE="pg" + +# Parse options into variables. +while true; do + case "$1" in + --job_name ) JOB_NAME="$2"; shift; shift ;; + --config ) CONFIG="$2"; shift; shift ;; + --num_tuners ) NUM_TUNERS="$2"; shift; shift ;; + --num_workers_per_tuner ) NUM_WORKERS_PER_TUNER="$2"; shift; shift ;; + --num_ps_per_tuner ) NUM_PS_PER_TUNER="$2"; shift; shift ;; + --max_npe ) MAX_NPE="$2"; shift; shift ;; + --num_repetitions ) NUM_REPETITIONS="$2"; shift; shift ;; + --stop_on_success ) STOP_ON_SUCCESS="$2"; shift; shift ;; + --fixed_hparams ) FIXED_HPARAMS="$2"; shift; shift ;; + --hparam_space_type ) HPARAM_SPACE_TYPE="$2"; shift; shift ;; + -- ) shift; break ;; + * ) break ;; + esac +done + +# Launch jobs. +# TODO: multi-worker RL training + +LOGDIR="$MODELS_DIR/$JOB_NAME" +mkdir -p $LOGDIR + +BIN_DIR="bazel-bin/single_task" +for ((tuner=0;tuner "$LOGDIR/tuner_$tuner.task_$i.log" & # Run as subprocess + echo "Launched tuner $tuner, task $i. Logs: $LOGDIR/tuner_$tuner.task_$i.log" + done +done + +# Use "pidof tune.par" to find jobs. +# Kill with "pkill tune.par" diff --git a/models/research/brain_coder/single_task/misc.py b/models/research/brain_coder/single_task/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..07061d81c8aaafd4d97efc11ecca451528c6e9dd --- /dev/null +++ b/models/research/brain_coder/single_task/misc.py @@ -0,0 +1,149 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Utilities specific to this project.""" + +from collections import namedtuple +from six import string_types + + +##################### +# BF-lang utilities # +##################### + + +BF_EOS_INT = 0 # Also used as SOS (start of sequence). +BF_EOS_CHAR = TEXT_EOS_CHAR = '_' +BF_LANG_INTS = range(1, 9) +BF_INT_TO_CHAR = [BF_EOS_CHAR, '>', '<', '+', '-', '[', ']', '.', ','] +BF_CHAR_TO_INT = dict([(c, i) for i, c in enumerate(BF_INT_TO_CHAR)]) + + +RewardInfo = namedtuple('RewardInfo', ['episode_rewards', 'input_case', + 'correct_output', + 'code_output', 'reason', 'input_type', + 'output_type']) + + +class IOType(object): + string = 'string' + integer = 'integer' + boolean = 'boolean' + + +class IOTuple(tuple): + pass + + +def flatten(lst): + return [item for row in lst for item in row] + + +def bf_num_tokens(): + # BF tokens plus EOS. + return len(BF_INT_TO_CHAR) + + +def bf_char2int(bf_char): + """Convert BF code char to int token.""" + return BF_CHAR_TO_INT[bf_char] + + +def bf_int2char(bf_int): + """Convert BF int token to code char.""" + return BF_INT_TO_CHAR[bf_int] + + +def bf_tokens_to_string(bf_tokens, truncate=True): + """Convert token list to code string. Will truncate at EOS token. + + Args: + bf_tokens: Python list of ints representing the code string. + truncate: If true, the output string will end at the first EOS token. + If false, the entire token list is converted to string. + + Returns: + String representation of the tokens. + + Raises: + ValueError: If bf_tokens is not a python list. + """ + if not isinstance(bf_tokens, list): + raise ValueError('Only python list supported here.') + if truncate: + try: + eos_index = bf_tokens.index(BF_EOS_INT) + except ValueError: + eos_index = len(bf_tokens) + else: + eos_index = len(bf_tokens) + return ''.join([BF_INT_TO_CHAR[t] for t in bf_tokens[:eos_index]]) + + +def bf_string_to_tokens(bf_string): + """Convert string to token list. Will strip and append EOS token.""" + tokens = [BF_CHAR_TO_INT[char] for char in bf_string.strip()] + tokens.append(BF_EOS_INT) + return tokens + + +def tokens_to_text(tokens): + """Convert token list to human readable text.""" + return ''.join( + [TEXT_EOS_CHAR if t == 0 else chr(t - 1 + ord('A')) for t in tokens]) + + +################################### +# Number representation utilities # +################################### + + +# https://en.wikipedia.org/wiki/Metric_prefix +si_magnitudes = { + 'k': 1e3, + 'm': 1e6, + 'g': 1e9} + + +def si_to_int(s): + """Convert string ending with SI magnitude to int. + + Examples: 5K ==> 5000, 12M ==> 12000000. + + Args: + s: String in the form 'xx..xP' where x is a digit and P is an SI prefix. + + Returns: + Integer equivalent to the string. + """ + if isinstance(s, string_types) and s[-1].lower() in si_magnitudes.keys(): + return int(int(s[:-1]) * si_magnitudes[s[-1].lower()]) + return int(s) + + +def int_to_si(n): + """Convert integer to string with SI magnitude. + + `n` will be truncated. + + Examples: 5432 ==> 5k, 12345678 ==> 12M + + Args: + n: Integer to represent as a string. + + Returns: + String representation of `n` containing SI magnitude. + """ + m = abs(n) + sign = -1 if n < 0 else 1 + if m < 1e3: + return str(n) + if m < 1e6: + return '{0}K'.format(sign*int(m / 1e3)) + if m < 1e9: + return '{0}M'.format(sign*int(m / 1e6)) + if m < 1e12: + return '{0}G'.format(sign*int(m / 1e9)) + return str(m) + diff --git a/models/research/brain_coder/single_task/pg_agent.py b/models/research/brain_coder/single_task/pg_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..13fc7da2dc89a1fbcc7fa5efbbce87008580aa92 --- /dev/null +++ b/models/research/brain_coder/single_task/pg_agent.py @@ -0,0 +1,1297 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Language model agent. + +Agent outputs code in a sequence just like a language model. Can be trained +as a language model or using RL, or a combination of the two. +""" + +from collections import namedtuple +from math import exp +from math import log +import time + +from absl import logging +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from common import rollout as rollout_lib # brain coder +from common import utils # brain coder +from single_task import misc # brain coder + + +# Experiments in the ICLR 2018 paper used reduce_sum instead of reduce_mean for +# some losses. We make all loses be batch_size independent, and multiply the +# changed losses by 64, which was the fixed batch_size when the experiments +# where run. The loss hyperparameters still match what is reported in the paper. +MAGIC_LOSS_MULTIPLIER = 64 + + +def rshift_time(tensor_2d, fill=misc.BF_EOS_INT): + """Right shifts a 2D tensor along the time dimension (axis-1).""" + dim_0 = tf.shape(tensor_2d)[0] + fill_tensor = tf.fill([dim_0, 1], fill) + return tf.concat([fill_tensor, tensor_2d[:, :-1]], axis=1) + + +def join(a, b): + # Concat a and b along 0-th dim. + if a is None or len(a) == 0: # pylint: disable=g-explicit-length-test + return b + if b is None or len(b) == 0: # pylint: disable=g-explicit-length-test + return a + return np.concatenate((a, b)) + + +def make_optimizer(kind, lr): + if kind == 'sgd': + return tf.train.GradientDescentOptimizer(lr) + elif kind == 'adam': + return tf.train.AdamOptimizer(lr) + elif kind == 'rmsprop': + return tf.train.RMSPropOptimizer(learning_rate=lr, decay=0.99) + else: + raise ValueError('Optimizer type "%s" not recognized.' % kind) + + +class LinearWrapper(tf.contrib.rnn.RNNCell): + """RNNCell wrapper that adds a linear layer to the output.""" + + def __init__(self, cell, output_size, dtype=tf.float32, suppress_index=None): + self.cell = cell + self._output_size = output_size + self._dtype = dtype + self._suppress_index = suppress_index + self.smallest_float = -2.4e38 + + def __call__(self, inputs, state, scope=None): + with tf.variable_scope(type(self).__name__): + outputs, state = self.cell(inputs, state, scope=scope) + logits = tf.matmul( + outputs, + tf.get_variable('w_output', + [self.cell.output_size, self.output_size], + dtype=self._dtype)) + if self._suppress_index is not None: + # Replace the target index with -inf, so that it never gets selected. + batch_size = tf.shape(logits)[0] + logits = tf.concat( + [logits[:, :self._suppress_index], + tf.fill([batch_size, 1], self.smallest_float), + logits[:, self._suppress_index + 1:]], + axis=1) + + return logits, state + + @property + def output_size(self): + return self._output_size + + @property + def state_size(self): + return self.cell.state_size + + def zero_state(self, batch_size, dtype): + return self.cell.zero_state(batch_size, dtype) + + +UpdateStepResult = namedtuple( + 'UpdateStepResult', + ['global_step', 'global_npe', 'summaries_list', 'gradients_dict']) + + +class AttrDict(dict): + """Dict with attributes as keys. + + https://stackoverflow.com/a/14620633 + """ + + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +class LMAgent(object): + """Language model agent.""" + action_space = misc.bf_num_tokens() + observation_space = misc.bf_num_tokens() + + def __init__(self, global_config, task_id=0, + logging_file=None, + experience_replay_file=None, + global_best_reward_fn=None, + found_solution_op=None, + assign_code_solution_fn=None, + program_count=None, + do_iw_summaries=False, + stop_on_success=True, + dtype=tf.float32, + verbose_level=0, + is_local=True): + self.config = config = global_config.agent + self.logging_file = logging_file + self.experience_replay_file = experience_replay_file + self.task_id = task_id + self.verbose_level = verbose_level + self.global_best_reward_fn = global_best_reward_fn + self.found_solution_op = found_solution_op + self.assign_code_solution_fn = assign_code_solution_fn + self.parent_scope_name = tf.get_variable_scope().name + self.dtype = dtype + self.allow_eos_token = config.eos_token + self.stop_on_success = stop_on_success + self.pi_loss_hparam = config.pi_loss_hparam + self.vf_loss_hparam = config.vf_loss_hparam + self.is_local = is_local + + self.top_reward = 0.0 + self.embeddings_trainable = True + + self.no_op = tf.no_op() + + self.learning_rate = tf.constant( + config.lr, dtype=dtype, name='learning_rate') + self.initializer = tf.contrib.layers.variance_scaling_initializer( + factor=config.param_init_factor, + mode='FAN_AVG', + uniform=True, + dtype=dtype) # TF's default initializer. + tf.get_variable_scope().set_initializer(self.initializer) + + self.a2c = config.ema_baseline_decay == 0 + if not self.a2c: + logging.info('Using exponential moving average REINFORCE baselines.') + self.ema_baseline_decay = config.ema_baseline_decay + self.ema_by_len = [0.0] * global_config.timestep_limit + else: + logging.info('Using advantage (a2c) with learned value function.') + self.ema_baseline_decay = 0.0 + self.ema_by_len = None + + # Top-k + if config.topk and config.topk_loss_hparam: + self.topk_loss_hparam = config.topk_loss_hparam + self.topk_batch_size = config.topk_batch_size + if self.topk_batch_size <= 0: + raise ValueError('topk_batch_size must be a positive integer. Got %s', + self.topk_batch_size) + self.top_episodes = utils.MaxUniquePriorityQueue(config.topk) + logging.info('Made max-priorty-queue with capacity %d', + self.top_episodes.capacity) + else: + self.top_episodes = None + self.topk_loss_hparam = 0.0 + logging.info('No max-priorty-queue') + + # Experience replay. + self.replay_temperature = config.replay_temperature + self.num_replay_per_batch = int(global_config.batch_size * config.alpha) + self.num_on_policy_per_batch = ( + global_config.batch_size - self.num_replay_per_batch) + self.replay_alpha = ( + self.num_replay_per_batch / float(global_config.batch_size)) + logging.info('num_replay_per_batch: %d', self.num_replay_per_batch) + logging.info('num_on_policy_per_batch: %d', self.num_on_policy_per_batch) + logging.info('replay_alpha: %s', self.replay_alpha) + if self.num_replay_per_batch > 0: + # Train with off-policy episodes from replay buffer. + start_time = time.time() + self.experience_replay = utils.RouletteWheel( + unique_mode=True, save_file=experience_replay_file) + logging.info('Took %s sec to load replay buffer from disk.', + int(time.time() - start_time)) + logging.info('Replay buffer file location: "%s"', + self.experience_replay.save_file) + else: + # Only train on-policy. + self.experience_replay = None + + if program_count is not None: + self.program_count = program_count + self.program_count_add_ph = tf.placeholder( + tf.int64, [], 'program_count_add_ph') + self.program_count_add_op = self.program_count.assign_add( + self.program_count_add_ph) + + ################################ + # RL policy and value networks # + ################################ + batch_size = global_config.batch_size + logging.info('batch_size: %d', batch_size) + + self.policy_cell = LinearWrapper( + tf.contrib.rnn.MultiRNNCell( + [tf.contrib.rnn.BasicLSTMCell(cell_size) + for cell_size in config.policy_lstm_sizes]), + self.action_space, + dtype=dtype, + suppress_index=None if self.allow_eos_token else misc.BF_EOS_INT) + self.value_cell = LinearWrapper( + tf.contrib.rnn.MultiRNNCell( + [tf.contrib.rnn.BasicLSTMCell(cell_size) + for cell_size in config.value_lstm_sizes]), + 1, + dtype=dtype) + + obs_embedding_scope = 'obs_embed' + with tf.variable_scope( + obs_embedding_scope, + initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0)): + obs_embeddings = tf.get_variable( + 'embeddings', + [self.observation_space, config.obs_embedding_size], + dtype=dtype, trainable=self.embeddings_trainable) + self.obs_embeddings = obs_embeddings + + ################################ + # RL policy and value networks # + ################################ + + initial_state = tf.fill([batch_size], misc.BF_EOS_INT) + def loop_fn(loop_time, cell_output, cell_state, loop_state): + """Function called by tf.nn.raw_rnn to instantiate body of the while_loop. + + See https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn for more + information. + + When time is 0, and cell_output, cell_state, loop_state are all None, + `loop_fn` will create the initial input, internal cell state, and loop + state. When time > 0, `loop_fn` will operate on previous cell output, + state, and loop state. + + Args: + loop_time: A scalar tensor holding the current timestep (zero based + counting). + cell_output: Output of the raw_rnn cell at the current timestep. + cell_state: Cell internal state at the current timestep. + loop_state: Additional loop state. These tensors were returned by the + previous call to `loop_fn`. + + Returns: + elements_finished: Bool tensor of shape [batch_size] which marks each + sequence in the batch as being finished or not finished. + next_input: A tensor containing input to be fed into the cell at the + next timestep. + next_cell_state: Cell internal state to be fed into the cell at the + next timestep. + emit_output: Tensor to be added to the TensorArray returned by raw_rnn + as output from the while_loop. + next_loop_state: Additional loop state. These tensors will be fed back + into the next call to `loop_fn` as `loop_state`. + """ + if cell_output is None: # 0th time step. + next_cell_state = self.policy_cell.zero_state(batch_size, dtype) + elements_finished = tf.zeros([batch_size], tf.bool) + output_lengths = tf.ones([batch_size], dtype=tf.int32) + next_input = tf.gather(obs_embeddings, initial_state) + emit_output = None + next_loop_state = ( + tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True), + output_lengths, + elements_finished + ) + else: + scaled_logits = cell_output * config.softmax_tr # Scale temperature. + prev_chosen, prev_output_lengths, prev_elements_finished = loop_state + next_cell_state = cell_state + chosen_outputs = tf.to_int32(tf.where( + tf.logical_not(prev_elements_finished), + tf.multinomial(logits=scaled_logits, num_samples=1)[:, 0], + tf.zeros([batch_size], dtype=tf.int64))) + elements_finished = tf.logical_or( + tf.equal(chosen_outputs, misc.BF_EOS_INT), + loop_time >= global_config.timestep_limit) + output_lengths = tf.where( + elements_finished, + prev_output_lengths, + # length includes EOS token. empty seq has len 1. + tf.tile(tf.expand_dims(loop_time + 1, 0), [batch_size]) + ) + next_input = tf.gather(obs_embeddings, chosen_outputs) + emit_output = scaled_logits + next_loop_state = (prev_chosen.write(loop_time - 1, chosen_outputs), + output_lengths, + tf.logical_or(prev_elements_finished, + elements_finished)) + return (elements_finished, next_input, next_cell_state, emit_output, + next_loop_state) + + with tf.variable_scope('policy'): + (decoder_outputs_ta, + _, # decoder_state + (sampled_output_ta, output_lengths, _)) = tf.nn.raw_rnn( + cell=self.policy_cell, + loop_fn=loop_fn) + policy_logits = tf.transpose(decoder_outputs_ta.stack(), (1, 0, 2), + name='policy_logits') + sampled_tokens = tf.transpose(sampled_output_ta.stack(), (1, 0), + name='sampled_tokens') + # Add SOS to beginning of the sequence. + rshift_sampled_tokens = rshift_time(sampled_tokens, fill=misc.BF_EOS_INT) + + # Initial state is 0, 2nd state is first token. + # Note: If value of last state is computed, this will be used as bootstrap. + if self.a2c: + with tf.variable_scope('value'): + value_output, _ = tf.nn.dynamic_rnn( + self.value_cell, + tf.gather(obs_embeddings, rshift_sampled_tokens), + sequence_length=output_lengths, + dtype=dtype) + value = tf.squeeze(value_output, axis=[2]) + else: + value = tf.zeros([], dtype=dtype) + + # for sampling actions from the agent, and which told tensors for doing + # gradient updates on the agent. + self.sampled_batch = AttrDict( + logits=policy_logits, + value=value, + tokens=sampled_tokens, + episode_lengths=output_lengths, + probs=tf.nn.softmax(policy_logits), + log_probs=tf.nn.log_softmax(policy_logits)) + + # adjusted_lengths can be less than the full length of each episode. + # Use this to train on only part of an episode (starting from t=0). + self.adjusted_lengths = tf.placeholder( + tf.int32, [None], name='adjusted_lengths') + self.policy_multipliers = tf.placeholder( + dtype, + [None, None], + name='policy_multipliers') + # Empirical value, i.e. discounted sum of observed future rewards from each + # time step in the episode. + self.empirical_values = tf.placeholder( + dtype, + [None, None], + name='empirical_values') + + # Off-policy training. Just add supervised loss to the RL loss. + self.off_policy_targets = tf.placeholder( + tf.int32, + [None, None], + name='off_policy_targets') + self.off_policy_target_lengths = tf.placeholder( + tf.int32, [None], name='off_policy_target_lengths') + + self.actions = tf.placeholder(tf.int32, [None, None], name='actions') + # Add SOS to beginning of the sequence. + inputs = rshift_time(self.actions, fill=misc.BF_EOS_INT) + with tf.variable_scope('policy', reuse=True): + logits, _ = tf.nn.dynamic_rnn( + self.policy_cell, tf.gather(obs_embeddings, inputs), + sequence_length=self.adjusted_lengths, + dtype=dtype) + + if self.a2c: + with tf.variable_scope('value', reuse=True): + value_output, _ = tf.nn.dynamic_rnn( + self.value_cell, + tf.gather(obs_embeddings, inputs), + sequence_length=self.adjusted_lengths, + dtype=dtype) + value2 = tf.squeeze(value_output, axis=[2]) + else: + value2 = tf.zeros([], dtype=dtype) + + self.given_batch = AttrDict( + logits=logits, + value=value2, + tokens=sampled_tokens, + episode_lengths=self.adjusted_lengths, + probs=tf.nn.softmax(logits), + log_probs=tf.nn.log_softmax(logits)) + + # Episode masks. + max_episode_length = tf.shape(self.actions)[1] + # range_row shape: [1, max_episode_length] + range_row = tf.expand_dims(tf.range(max_episode_length), 0) + episode_masks = tf.cast( + tf.less(range_row, tf.expand_dims(self.given_batch.episode_lengths, 1)), + dtype=dtype) + episode_masks_3d = tf.expand_dims(episode_masks, 2) + + # Length adjusted episodes. + self.a_probs = a_probs = self.given_batch.probs * episode_masks_3d + self.a_log_probs = a_log_probs = ( + self.given_batch.log_probs * episode_masks_3d) + self.a_value = a_value = self.given_batch.value * episode_masks + self.a_policy_multipliers = a_policy_multipliers = ( + self.policy_multipliers * episode_masks) + if self.a2c: + self.a_empirical_values = a_empirical_values = ( + self.empirical_values * episode_masks) + + # pi_loss is scalar + acs_onehot = tf.one_hot(self.actions, self.action_space, dtype=dtype) + self.acs_onehot = acs_onehot + chosen_masked_log_probs = acs_onehot * a_log_probs + pi_target = tf.expand_dims(a_policy_multipliers, -1) + pi_loss_per_step = chosen_masked_log_probs * pi_target # Maximize. + self.pi_loss = pi_loss = ( + -tf.reduce_mean(tf.reduce_sum(pi_loss_per_step, axis=[1, 2]), axis=0) + * MAGIC_LOSS_MULTIPLIER) # Minimize. + assert len(self.pi_loss.shape) == 0 # pylint: disable=g-explicit-length-test + + # shape: [batch_size, time] + self.chosen_log_probs = tf.reduce_sum(chosen_masked_log_probs, axis=2) + self.chosen_probs = tf.reduce_sum(acs_onehot * a_probs, axis=2) + + # loss of value function + if self.a2c: + vf_loss_per_step = tf.square(a_value - a_empirical_values) + self.vf_loss = vf_loss = ( + tf.reduce_mean(tf.reduce_sum(vf_loss_per_step, axis=1), axis=0) + * MAGIC_LOSS_MULTIPLIER) # Minimize. + assert len(self.vf_loss.shape) == 0 # pylint: disable=g-explicit-length-test + else: + self.vf_loss = vf_loss = 0.0 + + # Maximize entropy regularizer + self.entropy = entropy = ( + -tf.reduce_mean( + tf.reduce_sum(a_probs * a_log_probs, axis=[1, 2]), axis=0) + * MAGIC_LOSS_MULTIPLIER) # Maximize + self.negentropy = -entropy # Minimize negentropy. + assert len(self.negentropy.shape) == 0 # pylint: disable=g-explicit-length-test + + # off-policy loss + self.offp_switch = tf.placeholder(dtype, [], name='offp_switch') + if self.top_episodes is not None: + # Add SOS to beginning of the sequence. + offp_inputs = tf.gather(obs_embeddings, + rshift_time(self.off_policy_targets, + fill=misc.BF_EOS_INT)) + with tf.variable_scope('policy', reuse=True): + offp_logits, _ = tf.nn.dynamic_rnn( + self.policy_cell, offp_inputs, self.off_policy_target_lengths, + dtype=dtype) # shape: [batch_size, time, action_space] + topk_loss_per_step = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=self.off_policy_targets, + logits=offp_logits, + name='topk_loss_per_logit') + # Take mean over batch dimension so that the loss multiplier strength is + # independent of batch size. Sum over time dimension. + topk_loss = tf.reduce_mean( + tf.reduce_sum(topk_loss_per_step, axis=1), axis=0) + assert len(topk_loss.shape) == 0 # pylint: disable=g-explicit-length-test + self.topk_loss = topk_loss * self.offp_switch + logging.info('Including off policy loss.') + else: + self.topk_loss = topk_loss = 0.0 + + self.entropy_hparam = tf.constant( + config.entropy_beta, dtype=dtype, name='entropy_beta') + + self.pi_loss_term = pi_loss * self.pi_loss_hparam + self.vf_loss_term = vf_loss * self.vf_loss_hparam + self.entropy_loss_term = self.negentropy * self.entropy_hparam + self.topk_loss_term = self.topk_loss_hparam * topk_loss + self.loss = ( + self.pi_loss_term + + self.vf_loss_term + + self.entropy_loss_term + + self.topk_loss_term) + + params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, + tf.get_variable_scope().name) + self.trainable_variables = params + self.sync_variables = self.trainable_variables + non_embedding_params = [p for p in params + if obs_embedding_scope not in p.name] + self.non_embedding_params = non_embedding_params + self.params = params + + if config.regularizer: + logging.info('Adding L2 regularizer with scale %.2f.', + config.regularizer) + self.regularizer = config.regularizer * sum( + tf.nn.l2_loss(w) for w in non_embedding_params) + self.loss += self.regularizer + else: + logging.info('Skipping regularizer.') + self.regularizer = 0.0 + + # Only build gradients graph for local model. + if self.is_local: + unclipped_grads = tf.gradients(self.loss, params) + self.dense_unclipped_grads = [ + tf.convert_to_tensor(g) for g in unclipped_grads] + self.grads, self.global_grad_norm = tf.clip_by_global_norm( + unclipped_grads, config.grad_clip_threshold) + self.gradients_dict = dict(zip(params, self.grads)) + self.optimizer = make_optimizer(config.optimizer, self.learning_rate) + self.all_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + tf.get_variable_scope().name) + + self.do_iw_summaries = do_iw_summaries + if self.do_iw_summaries: + b = None + self.log_iw_replay_ph = tf.placeholder(tf.float32, [b], + 'log_iw_replay_ph') + self.log_iw_policy_ph = tf.placeholder(tf.float32, [b], + 'log_iw_policy_ph') + self.log_prob_replay_ph = tf.placeholder(tf.float32, [b], + 'log_prob_replay_ph') + self.log_prob_policy_ph = tf.placeholder(tf.float32, [b], + 'log_prob_policy_ph') + self.log_norm_replay_weights_ph = tf.placeholder( + tf.float32, [b], 'log_norm_replay_weights_ph') + self.iw_summary_op = tf.summary.merge([ + tf.summary.histogram('is/log_iw_replay', self.log_iw_replay_ph), + tf.summary.histogram('is/log_iw_policy', self.log_iw_policy_ph), + tf.summary.histogram('is/log_prob_replay', self.log_prob_replay_ph), + tf.summary.histogram('is/log_prob_policy', self.log_prob_policy_ph), + tf.summary.histogram( + 'is/log_norm_replay_weights', self.log_norm_replay_weights_ph), + ]) + + def make_summary_ops(self): + """Construct summary ops for the model.""" + # size = number of timesteps across entire batch. Number normalized by size + # will not be affected by the amount of padding at the ends of sequences + # in the batch. + size = tf.cast( + tf.reduce_sum(self.given_batch.episode_lengths), dtype=self.dtype) + offp_size = tf.cast(tf.reduce_sum(self.off_policy_target_lengths), + dtype=self.dtype) + scope_prefix = self.parent_scope_name + + def _remove_prefix(prefix, name): + assert name.startswith(prefix) + return name[len(prefix):] + + # RL summaries. + self.rl_summary_op = tf.summary.merge( + [tf.summary.scalar('model/policy_loss', self.pi_loss / size), + tf.summary.scalar('model/value_loss', self.vf_loss / size), + tf.summary.scalar('model/topk_loss', self.topk_loss / offp_size), + tf.summary.scalar('model/entropy', self.entropy / size), + tf.summary.scalar('model/loss', self.loss / size), + tf.summary.scalar('model/grad_norm', + tf.global_norm(self.grads)), + tf.summary.scalar('model/unclipped_grad_norm', self.global_grad_norm), + tf.summary.scalar('model/non_embedding_var_norm', + tf.global_norm(self.non_embedding_params)), + tf.summary.scalar('hparams/entropy_beta', self.entropy_hparam), + tf.summary.scalar('hparams/topk_loss_hparam', self.topk_loss_hparam), + tf.summary.scalar('hparams/learning_rate', self.learning_rate), + tf.summary.scalar('model/trainable_var_norm', + tf.global_norm(self.trainable_variables)), + tf.summary.scalar('loss/loss', self.loss), + tf.summary.scalar('loss/entropy', self.entropy_loss_term), + tf.summary.scalar('loss/vf', self.vf_loss_term), + tf.summary.scalar('loss/policy', self.pi_loss_term), + tf.summary.scalar('loss/offp', self.topk_loss_term)] + + [tf.summary.scalar( + 'param_norms/' + _remove_prefix(scope_prefix + '/', p.name), + tf.norm(p)) + for p in self.params] + + [tf.summary.scalar( + 'grad_norms/' + _remove_prefix(scope_prefix + '/', p.name), + tf.norm(g)) + for p, g in zip(self.params, self.grads)] + + [tf.summary.scalar( + 'unclipped_grad_norms/' + _remove_prefix(scope_prefix + '/', + p.name), + tf.norm(g)) + for p, g in zip(self.params, self.dense_unclipped_grads)]) + + self.text_summary_placeholder = tf.placeholder(tf.string, shape=[]) + self.rl_text_summary_op = tf.summary.text('rl', + self.text_summary_placeholder) + + def _rl_text_summary(self, session, step, npe, tot_r, num_steps, + input_case, code_output, code, reason): + """Logs summary about a single episode and creates a text_summary for TB. + + Args: + session: tf.Session instance. + step: Global training step. + npe: Number of programs executed so far. + tot_r: Total reward. + num_steps: Number of timesteps in the episode (i.e. code length). + input_case: Inputs for test cases. + code_output: Outputs produced by running the code on the inputs. + code: String representation of the code. + reason: Reason for the reward assigned by the task. + + Returns: + Serialized text summary data for tensorboard. + """ + if not input_case: + input_case = ' ' + if not code_output: + code_output = ' ' + if not code: + code = ' ' + text = ( + 'Tot R: **%.2f**; Len: **%d**; Reason: **%s**\n\n' + 'Input: **`%s`**; Output: **`%s`**\n\nCode: **`%s`**' + % (tot_r, num_steps, reason, input_case, code_output, code)) + text_summary = session.run(self.rl_text_summary_op, + {self.text_summary_placeholder: text}) + logging.info( + 'Step %d.\t NPE: %d\t Reason: %s.\t Tot R: %.2f.\t Length: %d. ' + '\tInput: %s \tOutput: %s \tProgram: %s', + step, npe, reason, tot_r, num_steps, input_case, + code_output, code) + return text_summary + + def _rl_reward_summary(self, total_rewards): + """Create summary ops that report on episode rewards. + + Creates summaries for average, median, max, and min rewards in the batch. + + Args: + total_rewards: Tensor of shape [batch_size] containing the total reward + from each episode in the batch. + + Returns: + tf.Summary op. + """ + tr = np.asarray(total_rewards) + reward_summary = tf.Summary(value=[ + tf.Summary.Value( + tag='reward/avg', + simple_value=np.mean(tr)), + tf.Summary.Value( + tag='reward/med', + simple_value=np.median(tr)), + tf.Summary.Value( + tag='reward/max', + simple_value=np.max(tr)), + tf.Summary.Value( + tag='reward/min', + simple_value=np.min(tr))]) + return reward_summary + + def _iw_summary(self, session, replay_iw, replay_log_probs, + norm_replay_weights, on_policy_iw, + on_policy_log_probs): + """Compute summaries for importance weights at a given batch. + + Args: + session: tf.Session instance. + replay_iw: Importance weights for episodes from replay buffer. + replay_log_probs: Total log probabilities of the replay episodes under the + current policy. + norm_replay_weights: Normalized replay weights, i.e. values in `replay_iw` + divided by the total weight in the entire replay buffer. Note, this is + also the probability of selecting each episode from the replay buffer + (in a roulette wheel replay buffer). + on_policy_iw: Importance weights for episodes sampled from the current + policy. + on_policy_log_probs: Total log probabilities of the on-policy episodes + under the current policy. + + Returns: + Serialized TF summaries. Use a summary writer to write these summaries to + disk. + """ + return session.run( + self.iw_summary_op, + {self.log_iw_replay_ph: np.log(replay_iw), + self.log_iw_policy_ph: np.log(on_policy_iw), + self.log_norm_replay_weights_ph: np.log(norm_replay_weights), + self.log_prob_replay_ph: replay_log_probs, + self.log_prob_policy_ph: on_policy_log_probs}) + + def _compute_iw(self, policy_log_probs, replay_weights): + """Compute importance weights for a batch of episodes. + + Arguments are iterables of length batch_size. + + Args: + policy_log_probs: Log probability of each episode under the current + policy. + replay_weights: Weight of each episode in the replay buffer. 0 for + episodes not sampled from the replay buffer (i.e. sampled from the + policy). + + Returns: + Numpy array of shape [batch_size] containing the importance weight for + each episode in the batch. + """ + log_total_replay_weight = log(self.experience_replay.total_weight) + + # importance weight + # = 1 / [(1 - a) + a * exp(log(replay_weight / total_weight / p))] + # = 1 / ((1-a) + a*q/p) + a = float(self.replay_alpha) + a_com = 1.0 - a # compliment of a + importance_weights = np.asarray( + [1.0 / (a_com + + a * exp((log(replay_weight) - log_total_replay_weight) + - log_p)) + if replay_weight > 0 else 1.0 / a_com + for log_p, replay_weight + in zip(policy_log_probs, replay_weights)]) + return importance_weights + + def update_step(self, session, rl_batch, train_op, global_step_op, + return_gradients=False): + """Perform gradient update on the model. + + Args: + session: tf.Session instance. + rl_batch: RLBatch instance from data.py. Use DataManager to create a + RLBatch for each call to update_step. RLBatch contains a batch of + tasks. + train_op: A TF op which will perform the gradient update. LMAgent does not + own its training op, so that trainers can do distributed training + and construct a specialized training op. + global_step_op: A TF op which will return the current global step when + run (should not increment it). + return_gradients: If True, the gradients will be saved and returned from + this method call. This is useful for testing. + + Returns: + Results from the update step in a UpdateStepResult namedtuple, including + global step, global NPE, serialized summaries, and optionally gradients. + """ + assert self.is_local + + # Do update for REINFORCE or REINFORCE + replay buffer. + if self.experience_replay is None: + # Train with on-policy REINFORCE. + + # Sample new programs from the policy. + num_programs_from_policy = rl_batch.batch_size + (batch_actions, + batch_values, + episode_lengths) = session.run( + [self.sampled_batch.tokens, self.sampled_batch.value, + self.sampled_batch.episode_lengths]) + if episode_lengths.size == 0: + # This should not happen. + logging.warn( + 'Shapes:\n' + 'batch_actions.shape: %s\n' + 'batch_values.shape: %s\n' + 'episode_lengths.shape: %s\n', + batch_actions.shape, batch_values.shape, episode_lengths.shape) + + # Compute rewards. + code_scores = compute_rewards( + rl_batch, batch_actions, episode_lengths) + code_strings = code_scores.code_strings + batch_tot_r = code_scores.total_rewards + test_cases = code_scores.test_cases + code_outputs = code_scores.code_outputs + reasons = code_scores.reasons + + # Process on-policy samples. + batch_targets, batch_returns = process_episodes( + code_scores.batch_rewards, episode_lengths, a2c=self.a2c, + baselines=self.ema_by_len, + batch_values=batch_values) + batch_policy_multipliers = batch_targets + batch_emp_values = batch_returns if self.a2c else [[]] + adjusted_lengths = episode_lengths + + if self.top_episodes: + assert len(self.top_episodes) > 0 # pylint: disable=g-explicit-length-test + off_policy_targets = [ + item for item, _ + in self.top_episodes.random_sample(self.topk_batch_size)] + off_policy_target_lengths = [len(t) for t in off_policy_targets] + off_policy_targets = utils.stack_pad(off_policy_targets, pad_axes=0, + dtype=np.int32) + offp_switch = 1 + else: + off_policy_targets = [[0]] + off_policy_target_lengths = [1] + offp_switch = 0 + + fetches = { + 'global_step': global_step_op, + 'program_count': self.program_count, + 'summaries': self.rl_summary_op, + 'train_op': train_op, + 'gradients': self.gradients_dict if return_gradients else self.no_op} + fetched = session.run( + fetches, + {self.actions: batch_actions, + self.empirical_values: batch_emp_values, + self.policy_multipliers: batch_policy_multipliers, + self.adjusted_lengths: adjusted_lengths, + self.off_policy_targets: off_policy_targets, + self.off_policy_target_lengths: off_policy_target_lengths, + self.offp_switch: offp_switch}) + + combined_adjusted_lengths = adjusted_lengths + combined_returns = batch_returns + else: + # Train with REINFORCE + off-policy replay buffer by using importance + # sampling. + + # Sample new programs from the policy. + # Note: batch size is constant. A full batch will be sampled, but not all + # programs will be executed and added to the replay buffer. Those which + # are not executed will be discarded and not counted. + batch_actions, batch_values, episode_lengths, log_probs = session.run( + [self.sampled_batch.tokens, self.sampled_batch.value, + self.sampled_batch.episode_lengths, self.sampled_batch.log_probs]) + if episode_lengths.size == 0: + # This should not happen. + logging.warn( + 'Shapes:\n' + 'batch_actions.shape: %s\n' + 'batch_values.shape: %s\n' + 'episode_lengths.shape: %s\n', + batch_actions.shape, batch_values.shape, episode_lengths.shape) + + # Sample from experince replay buffer + empty_replay_buffer = ( + self.experience_replay.is_empty() + if self.experience_replay is not None else True) + num_programs_from_replay_buff = ( + self.num_replay_per_batch if not empty_replay_buffer else 0) + num_programs_from_policy = ( + rl_batch.batch_size - num_programs_from_replay_buff) + if (not empty_replay_buffer) and num_programs_from_replay_buff: + result = self.experience_replay.sample_many( + num_programs_from_replay_buff) + experience_samples, replay_weights = zip(*result) + (replay_actions, + replay_rewards, + _, # log probs + replay_adjusted_lengths) = zip(*experience_samples) + + replay_batch_actions = utils.stack_pad(replay_actions, pad_axes=0, + dtype=np.int32) + + # compute log probs for replay samples under current policy + all_replay_log_probs, = session.run( + [self.given_batch.log_probs], + {self.actions: replay_batch_actions, + self.adjusted_lengths: replay_adjusted_lengths}) + replay_log_probs = [ + np.choose(replay_actions[i], all_replay_log_probs[i, :l].T).sum() + for i, l in enumerate(replay_adjusted_lengths)] + else: + # Replay buffer is empty. Do not sample from it. + replay_actions = None + replay_policy_multipliers = None + replay_adjusted_lengths = None + replay_log_probs = None + replay_weights = None + replay_returns = None + on_policy_weights = [0] * num_programs_from_replay_buff + + assert not self.a2c # TODO(danabo): Support A2C with importance sampling. + + # Compute rewards. + code_scores = compute_rewards( + rl_batch, batch_actions, episode_lengths, + batch_size=num_programs_from_policy) + code_strings = code_scores.code_strings + batch_tot_r = code_scores.total_rewards + test_cases = code_scores.test_cases + code_outputs = code_scores.code_outputs + reasons = code_scores.reasons + + # Process on-policy samples. + p = num_programs_from_policy + batch_targets, batch_returns = process_episodes( + code_scores.batch_rewards, episode_lengths[:p], a2c=False, + baselines=self.ema_by_len) + batch_policy_multipliers = batch_targets + batch_emp_values = [[]] + on_policy_returns = batch_returns + + # Process off-policy samples. + if (not empty_replay_buffer) and num_programs_from_replay_buff: + offp_batch_rewards = [ + [0.0] * (l - 1) + [r] + for l, r in zip(replay_adjusted_lengths, replay_rewards)] + assert len(offp_batch_rewards) == num_programs_from_replay_buff + assert len(replay_adjusted_lengths) == num_programs_from_replay_buff + replay_batch_targets, replay_returns = process_episodes( + offp_batch_rewards, replay_adjusted_lengths, a2c=False, + baselines=self.ema_by_len) + # Convert 2D array back into ragged 2D list. + replay_policy_multipliers = [ + replay_batch_targets[i, :l] + for i, l + in enumerate( + replay_adjusted_lengths[:num_programs_from_replay_buff])] + + adjusted_lengths = episode_lengths[:num_programs_from_policy] + + if self.top_episodes: + assert len(self.top_episodes) > 0 # pylint: disable=g-explicit-length-test + off_policy_targets = [ + item for item, _ + in self.top_episodes.random_sample(self.topk_batch_size)] + off_policy_target_lengths = [len(t) for t in off_policy_targets] + off_policy_targets = utils.stack_pad(off_policy_targets, pad_axes=0, + dtype=np.int32) + offp_switch = 1 + else: + off_policy_targets = [[0]] + off_policy_target_lengths = [1] + offp_switch = 0 + + # On-policy episodes. + if num_programs_from_policy: + separate_actions = [ + batch_actions[i, :l] + for i, l in enumerate(adjusted_lengths)] + chosen_log_probs = [ + np.choose(separate_actions[i], log_probs[i, :l].T) + for i, l in enumerate(adjusted_lengths)] + new_experiences = [ + (separate_actions[i], + batch_tot_r[i], + chosen_log_probs[i].sum(), l) + for i, l in enumerate(adjusted_lengths)] + on_policy_policy_multipliers = [ + batch_policy_multipliers[i, :l] + for i, l in enumerate(adjusted_lengths)] + (on_policy_actions, + _, # rewards + on_policy_log_probs, + on_policy_adjusted_lengths) = zip(*new_experiences) + else: + new_experiences = [] + on_policy_policy_multipliers = [] + on_policy_actions = [] + on_policy_log_probs = [] + on_policy_adjusted_lengths = [] + + if (not empty_replay_buffer) and num_programs_from_replay_buff: + # Look for new experiences in replay buffer. Assign weight if an episode + # is in the buffer. + on_policy_weights = [0] * num_programs_from_policy + for i, cs in enumerate(code_strings): + if self.experience_replay.has_key(cs): + on_policy_weights[i] = self.experience_replay.get_weight(cs) + + # Randomly select on-policy or off policy episodes to train on. + combined_actions = join(replay_actions, on_policy_actions) + combined_policy_multipliers = join( + replay_policy_multipliers, on_policy_policy_multipliers) + combined_adjusted_lengths = join( + replay_adjusted_lengths, on_policy_adjusted_lengths) + combined_returns = join(replay_returns, on_policy_returns) + combined_actions = utils.stack_pad(combined_actions, pad_axes=0) + combined_policy_multipliers = utils.stack_pad(combined_policy_multipliers, + pad_axes=0) + # P + combined_on_policy_log_probs = join(replay_log_probs, on_policy_log_probs) + # Q + # Assume weight is zero for all sequences sampled from the policy. + combined_q_weights = join(replay_weights, on_policy_weights) + + # Importance adjustment. Naive formulation: + # E_{x~p}[f(x)] ~= 1/N sum_{x~p}(f(x)) ~= 1/N sum_{x~q}(f(x) * p(x)/q(x)). + # p(x) is the policy, and q(x) is the off-policy distribution, i.e. replay + # buffer distribution. Importance weight w(x) = p(x) / q(x). + + # Instead of sampling from the replay buffer only, we sample from a + # mixture distribution of the policy and replay buffer. + # We are sampling from the mixture a*q(x) + (1-a)*p(x), where 0 <= a <= 1. + # Thus the importance weight w(x) = p(x) / (a*q(x) + (1-a)*p(x)) + # = 1 / ((1-a) + a*q(x)/p(x)) where q(x) is 0 for x sampled from the + # policy. + # Note: a = self.replay_alpha + if empty_replay_buffer: + # The replay buffer is empty. + # Do no gradient update this step. The replay buffer will have stuff in + # it next time. + combined_policy_multipliers *= 0 + elif not num_programs_from_replay_buff: + combined_policy_multipliers = np.ones([len(combined_actions), 1], + dtype=np.float32) + else: + # If a < 1 compute importance weights + # importance weight + # = 1 / [(1 - a) + a * exp(log(replay_weight / total_weight / p))] + # = 1 / ((1-a) + a*q/p) + importance_weights = self._compute_iw(combined_on_policy_log_probs, + combined_q_weights) + if self.config.iw_normalize: + importance_weights *= ( + float(rl_batch.batch_size) / importance_weights.sum()) + combined_policy_multipliers *= importance_weights.reshape(-1, 1) + + # Train on replay batch, top-k MLE. + assert self.program_count is not None + fetches = { + 'global_step': global_step_op, + 'program_count': self.program_count, + 'summaries': self.rl_summary_op, + 'train_op': train_op, + 'gradients': self.gradients_dict if return_gradients else self.no_op} + fetched = session.run( + fetches, + {self.actions: combined_actions, + self.empirical_values: [[]], # replay_emp_values, + self.policy_multipliers: combined_policy_multipliers, + self.adjusted_lengths: combined_adjusted_lengths, + self.off_policy_targets: off_policy_targets, + self.off_policy_target_lengths: off_policy_target_lengths, + self.offp_switch: offp_switch}) + + # Add to experience replay buffer. + self.experience_replay.add_many( + objs=new_experiences, + weights=[exp(r / self.replay_temperature) for r in batch_tot_r], + keys=code_strings) + + # Update program count. + session.run( + [self.program_count_add_op], + {self.program_count_add_ph: num_programs_from_policy}) + + # Update EMA baselines on the mini-batch which we just did traning on. + if not self.a2c: + for i in xrange(rl_batch.batch_size): + episode_length = combined_adjusted_lengths[i] + empirical_returns = combined_returns[i, :episode_length] + for j in xrange(episode_length): + # Update ema_baselines in place. + self.ema_by_len[j] = ( + self.ema_baseline_decay * self.ema_by_len[j] + + (1 - self.ema_baseline_decay) * empirical_returns[j]) + + global_step = fetched['global_step'] + global_npe = fetched['program_count'] + core_summaries = fetched['summaries'] + summaries_list = [core_summaries] + + if num_programs_from_policy: + s_i = 0 + text_summary = self._rl_text_summary( + session, + global_step, + global_npe, + batch_tot_r[s_i], + episode_lengths[s_i], test_cases[s_i], + code_outputs[s_i], code_strings[s_i], reasons[s_i]) + reward_summary = self._rl_reward_summary(batch_tot_r) + + is_best = False + if self.global_best_reward_fn: + # Save best reward. + best_reward = np.max(batch_tot_r) + is_best = self.global_best_reward_fn(session, best_reward) + + if self.found_solution_op is not None and 'correct' in reasons: + session.run(self.found_solution_op) + + # Save program to disk for record keeping. + if self.stop_on_success: + solutions = [ + {'code': code_strings[i], 'reward': batch_tot_r[i], + 'npe': global_npe} + for i in xrange(len(reasons)) if reasons[i] == 'correct'] + elif is_best: + solutions = [ + {'code': code_strings[np.argmax(batch_tot_r)], + 'reward': np.max(batch_tot_r), + 'npe': global_npe}] + else: + solutions = [] + if solutions: + if self.assign_code_solution_fn: + self.assign_code_solution_fn(session, solutions[0]['code']) + with tf.gfile.FastGFile(self.logging_file, 'a') as writer: + for solution_dict in solutions: + writer.write(str(solution_dict) + '\n') + + max_i = np.argmax(batch_tot_r) + max_tot_r = batch_tot_r[max_i] + if max_tot_r >= self.top_reward: + if max_tot_r >= self.top_reward: + self.top_reward = max_tot_r + logging.info('Top code: r=%.2f, \t%s', max_tot_r, code_strings[max_i]) + if self.top_episodes is not None: + self.top_episodes.push( + max_tot_r, tuple(batch_actions[max_i, :episode_lengths[max_i]])) + + summaries_list += [text_summary, reward_summary] + + if self.do_iw_summaries and not empty_replay_buffer: + # prob of replay samples under replay buffer sampling. + norm_replay_weights = [ + w / self.experience_replay.total_weight + for w in replay_weights] + replay_iw = self._compute_iw(replay_log_probs, replay_weights) + on_policy_iw = self._compute_iw(on_policy_log_probs, on_policy_weights) + summaries_list.append( + self._iw_summary( + session, replay_iw, replay_log_probs, norm_replay_weights, + on_policy_iw, on_policy_log_probs)) + + return UpdateStepResult( + global_step=global_step, + global_npe=global_npe, + summaries_list=summaries_list, + gradients_dict=fetched['gradients']) + + +def io_to_text(io_case, io_type): + if isinstance(io_case, misc.IOTuple): + # If there are many strings, join them with ','. + return ','.join([io_to_text(e, io_type) for e in io_case]) + if io_type == misc.IOType.string: + # There is one string. Return it. + return misc.tokens_to_text(io_case) + if (io_type == misc.IOType.integer + or io_type == misc.IOType.boolean): + if len(io_case) == 1: + return str(io_case[0]) + return str(io_case) + + +CodeScoreInfo = namedtuple( + 'CodeScoreInfo', + ['code_strings', 'batch_rewards', 'total_rewards', 'test_cases', + 'code_outputs', 'reasons']) + + +def compute_rewards(rl_batch, batch_actions, episode_lengths, batch_size=None): + """Compute rewards for each episode in the batch. + + Args: + rl_batch: A data.RLBatch instance. This holds information about the task + each episode is solving, and a reward function for each episode. + batch_actions: Contains batch of episodes. Each sequence of actions will be + converted into a BF program and then scored. A numpy array of shape + [batch_size, max_sequence_length]. + episode_lengths: The sequence length of each episode in the batch. Iterable + of length batch_size. + batch_size: (optional) number of programs to score. Use this to limit the + number of programs executed from this batch. For example, when doing + importance sampling some of the on-policy episodes will be discarded + and they should not be executed. `batch_size` can be less than or equal + to the size of the input batch. + + Returns: + CodeScoreInfo namedtuple instance. This holds not just the computed rewards, + but additional information computed during code execution which can be used + for debugging and monitoring. this includes: BF code strings, test cases + the code was executed on, code outputs from those test cases, and reasons + for success or failure. + """ + code_strings = [ + ''.join([misc.bf_int2char(a) for a in action_sequence[:l]]) + for action_sequence, l in zip(batch_actions, episode_lengths)] + if batch_size is None: + batch_size = len(code_strings) + else: + assert batch_size <= len(code_strings) + code_strings = code_strings[:batch_size] + + if isinstance(rl_batch.reward_fns, (list, tuple)): + # reward_fns is a list of functions, same length as code_strings. + assert len(rl_batch.reward_fns) >= batch_size + r_fn_results = [ + rl_batch.reward_fns[i](code_strings[i]) for i in xrange(batch_size)] + else: + # reward_fns is allowed to be one function which processes a batch of code + # strings. This is useful for efficiency and batch level computation. + r_fn_results = rl_batch.reward_fns(code_strings) + + # Expecting that r_fn returns a list of rewards. Length of list equals + # length of the code string (including EOS char). + + batch_rewards = [r.episode_rewards for r in r_fn_results] + total_rewards = [sum(b) for b in batch_rewards] + test_cases = [io_to_text(r.input_case, r.input_type) for r in r_fn_results] + code_outputs = [io_to_text(r.code_output, r.output_type) + for r in r_fn_results] + reasons = [r.reason for r in r_fn_results] + return CodeScoreInfo( + code_strings=code_strings, + batch_rewards=batch_rewards, + total_rewards=total_rewards, + test_cases=test_cases, + code_outputs=code_outputs, + reasons=reasons) + + +def process_episodes( + batch_rewards, episode_lengths, a2c=False, baselines=None, + batch_values=None): + """Compute REINFORCE targets. + + REINFORCE here takes the form: + grad_t = grad[log(pi(a_t|c_t))*target_t] + where c_t is context: i.e. RNN state or environment state (or both). + + Two types of targets are supported: + 1) Advantage actor critic (a2c). + 2) Vanilla REINFORCE with baseline. + + Args: + batch_rewards: Rewards received in each episode in the batch. A numpy array + of shape [batch_size, max_sequence_length]. Note, these are per-timestep + rewards, not total reward. + episode_lengths: Length of each episode. An iterable of length batch_size. + a2c: A bool. Whether to compute a2c targets (True) or vanilla targets + (False). + baselines: If a2c is False, provide baselines for each timestep. This is a + list (or indexable container) of length max_time. Note: baselines are + shared across all episodes, which is why there is no batch dimension. + It is up to the caller to update baselines accordingly. + batch_values: If a2c is True, provide values computed by a value estimator. + A numpy array of shape [batch_size, max_sequence_length]. + + Returns: + batch_targets: REINFORCE targets for each episode and timestep. A numpy + array of shape [batch_size, max_sequence_length]. + batch_returns: Returns computed for each episode and timestep. This is for + reference, and is not used in the REINFORCE gradient update (but was + used to compute the targets). A numpy array of shape + [batch_size, max_sequence_length]. + """ + num_programs = len(batch_rewards) + assert num_programs <= len(episode_lengths) + batch_returns = [None] * num_programs + batch_targets = [None] * num_programs + for i in xrange(num_programs): + episode_length = episode_lengths[i] + assert len(batch_rewards[i]) == episode_length + # Compute target for each timestep. + # If we are computing A2C: + # target_t = advantage_t = R_t - V(c_t) + # where V(c_t) is a learned value function (provided as `values`). + # Otherwise: + # target_t = R_t - baselines[t] + # where `baselines` are provided. + # In practice we use a more generalized formulation of advantage. See docs + # for `discounted_advantage_and_rewards`. + if a2c: + # Compute advantage. + assert batch_values is not None + episode_values = batch_values[i, :episode_length] + episode_rewards = batch_rewards[i] + emp_val, gen_adv = rollout_lib.discounted_advantage_and_rewards( + episode_rewards, episode_values, gamma=1.0, lambda_=1.0) + batch_returns[i] = emp_val + batch_targets[i] = gen_adv + else: + # Compute return for each timestep. See section 3 of + # https://arxiv.org/pdf/1602.01783.pdf + assert baselines is not None + empirical_returns = rollout_lib.discount(batch_rewards[i], gamma=1.0) + targets = [None] * episode_length + for j in xrange(episode_length): + targets[j] = empirical_returns[j] - baselines[j] + batch_returns[i] = empirical_returns + batch_targets[i] = targets + batch_returns = utils.stack_pad(batch_returns, 0) + if num_programs: + batch_targets = utils.stack_pad(batch_targets, 0) + else: + batch_targets = np.array([], dtype=np.float32) + + return (batch_targets, batch_returns) diff --git a/models/research/brain_coder/single_task/pg_agent_test.py b/models/research/brain_coder/single_task/pg_agent_test.py new file mode 100644 index 0000000000000000000000000000000000000000..503d37ecacbf968b0786b3553e6a97667569bf7d --- /dev/null +++ b/models/research/brain_coder/single_task/pg_agent_test.py @@ -0,0 +1,395 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for pg_agent.""" + +from collections import Counter + +from absl import logging +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from common import utils # brain coder +from single_task import data # brain coder +from single_task import defaults # brain coder +from single_task import misc # brain coder +from single_task import pg_agent as agent_lib # brain coder +from single_task import pg_train # brain coder + + +# Symmetric mean absolute percentage error (SMAPE). +# https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error +def smape(a, b): + return 2.0 * abs(a - b) / float(a + b) + + +def onehot(dim, num_dims): + value = np.zeros(num_dims, dtype=np.float32) + value[dim] = 1 + return value + + +def random_sequence(max_length, num_tokens, eos=0): + length = np.random.randint(1, max_length - 1) + return np.append(np.random.randint(1, num_tokens, length), eos) + + +def repeat_and_pad(v, rep, total_len): + return [v] * rep + [0.0] * (total_len - rep) + + +class AgentTest(tf.test.TestCase): + + def testProcessEpisodes(self): + batch_size = 3 + + def reward_fn(code_string): + return misc.RewardInfo( + episode_rewards=[float(ord(c)) for c in code_string], + input_case=[], + correct_output=[], + code_output=[], + input_type=misc.IOType.integer, + output_type=misc.IOType.integer, + reason='none') + + rl_batch = data.RLBatch( + reward_fns=[reward_fn for _ in range(batch_size)], + batch_size=batch_size, + good_reward=10.0) + batch_actions = np.asarray([ + [4, 5, 3, 6, 8, 1, 0, 0], + [1, 2, 3, 4, 0, 0, 0, 0], + [8, 7, 6, 5, 4, 3, 2, 1]], dtype=np.int32) + batch_values = np.asarray([ + [0, 1, 2, 1, 0, 1, 1, 0], + [0, 2, 1, 2, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 1, 1]], dtype=np.float32) + episode_lengths = np.asarray([7, 5, 8], dtype=np.int32) + + scores = agent_lib.compute_rewards( + rl_batch, batch_actions, episode_lengths) + batch_targets, batch_returns = agent_lib.process_episodes( + scores.batch_rewards, episode_lengths, a2c=True, + batch_values=batch_values) + self.assertEqual( + [[473.0, 428.0, 337.0, 294.0, 201.0, 157.0, 95.0, 0.0], + [305.0, 243.0, 183.0, 140.0, 95.0, 0.0, 0.0, 0.0], + [484.0, 440.0, 394.0, 301.0, 210.0, 165.0, 122.0, 62.0]], + batch_returns.tolist()) + self.assertEqual( + [[473.0, 427.0, 335.0, 293.0, 201.0, 156.0, 94.0, 0.0], + [305.0, 241.0, 182.0, 138.0, 94.0, 0.0, 0.0, 0.0], + [484.0, 439.0, 393.0, 301.0, 210.0, 165.0, 121.0, 61.0]], + batch_targets.tolist()) + + def testVarUpdates(self): + """Tests that variables get updated as expected. + + For the RL update, check that gradients are non-zero and that the global + model gets updated. + """ + config = defaults.default_config_with_updates( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",eos_token=True,optimizer="sgd",lr=1.0)') + lr = config.agent.lr + + tf.reset_default_graph() + trainer = pg_train.AsyncTrainer( + config, task_id=0, ps_tasks=0, num_workers=1) + global_init_op = tf.variables_initializer( + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global')) + with tf.Session() as sess: + sess.run(global_init_op) # Initialize global copy. + trainer.initialize(sess) + model = trainer.model + global_vars = sess.run(trainer.global_model.trainable_variables) + local_vars = sess.run(model.trainable_variables) + + # Make sure names match. + g_prefix = 'global/' + l_prefix = 'local/' + for g, l in zip(trainer.global_model.trainable_variables, + model.trainable_variables): + self.assertEqual(g.name[len(g_prefix):], l.name[len(l_prefix):]) + + # Assert that shapes and values are the same between global and local + # models. + for g, l in zip(global_vars, local_vars): + self.assertEqual(g.shape, l.shape) + self.assertTrue(np.array_equal(g, l)) + + # Make all gradients dense tensors. + for param, grad in model.gradients_dict.items(): + if isinstance(grad, tf.IndexedSlices): + # Converts to dense tensor. + model.gradients_dict[param] = tf.multiply(grad, 1.0) + + # Perform update. + results = model.update_step( + sess, trainer.data_manager.sample_rl_batch(), trainer.train_op, + trainer.global_step, return_gradients=True) + grads_dict = results.gradients_dict + for grad in grads_dict.values(): + self.assertIsNotNone(grad) + self.assertTrue(np.count_nonzero(grad) > 0) + global_update = sess.run(trainer.global_model.trainable_variables) + for tf_var, var_before, var_after in zip( + model.trainable_variables, local_vars, global_update): + # Check that the params were updated. + self.assertTrue(np.allclose( + var_after, + var_before - grads_dict[tf_var] * lr)) + + # Test that global to local sync works. + sess.run(trainer.sync_op) + global_vars = sess.run(trainer.global_model.trainable_variables) + local_vars = sess.run(model.trainable_variables) + for l, g in zip(local_vars, global_vars): + self.assertTrue(np.allclose(l, g)) + + def testMonteCarloGradients(self): + """Test Monte Carlo estimate of REINFORCE gradient. + + Test that the Monte Carlo estimate of the REINFORCE gradient is + approximately equal to the true gradient. We compute the true gradient for a + toy environment with a very small action space. + + Similar to section 5 of https://arxiv.org/pdf/1505.00521.pdf. + """ + # Test may have different outcome on different machines due to different + # rounding behavior of float arithmetic. + tf.reset_default_graph() + tf.set_random_seed(12345678987654321) + np.random.seed(1294024302) + max_length = 2 + num_tokens = misc.bf_num_tokens() + eos = misc.BF_EOS_INT + assert eos == 0 + def sequence_iterator(max_length): + """Iterates through all sequences up to the given length.""" + yield [eos] + for a in xrange(1, num_tokens): + if max_length > 1: + for sub_seq in sequence_iterator(max_length - 1): + yield [a] + sub_seq + else: + yield [a] + actions = list(sequence_iterator(max_length)) + + # This batch contains all possible episodes up to max_length. + actions_batch = utils.stack_pad(actions, 0) + lengths_batch = [len(s) for s in actions] + + reward_map = {tuple(a): np.random.randint(-1, 7) for a in actions_batch} + # reward_map = {tuple(a): np.random.normal(3, 1) + # for a in actions_batch} # normal distribution + # reward_map = {tuple(a): 1.0 + # for a in actions_batch} # expected reward is 1 + + n = 100000 # MC sample size. + config = defaults.default_config_with_updates( + 'env=c(task="print"),' + 'agent=c(algorithm="pg",optimizer="sgd",lr=1.0,ema_baseline_decay=0.99,' + 'entropy_beta=0.0,topk_loss_hparam=0.0,regularizer=0.0,' + 'policy_lstm_sizes=[10],eos_token=True),' + 'batch_size='+str(n)+',timestep_limit='+str(max_length)) + + dtype = tf.float64 + trainer = pg_train.AsyncTrainer( + config, task_id=0, ps_tasks=0, num_workers=1, dtype=dtype) + model = trainer.model + actions_ph = model.actions + lengths_ph = model.adjusted_lengths + multipliers_ph = model.policy_multipliers + + global_init_op = tf.variables_initializer( + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global')) + with tf.Session() as sess, sess.graph.as_default(): + sess.run(global_init_op) # Initialize global copy. + trainer.initialize(sess) + + # Compute exact gradients. + # exact_grads = sum(P(a) * grad(log P(a)) * R(a) for a in actions_batch) + true_loss_unnormalized = 0.0 + exact_grads = [np.zeros(v.shape) for v in model.trainable_variables] + episode_probs_map = {} + grads_map = {} + for a_idx in xrange(len(actions_batch)): + a = actions_batch[a_idx] + grads_result, probs_result, loss = sess.run( + [model.dense_unclipped_grads, model.chosen_probs, model.loss], + {actions_ph: [a], + lengths_ph: [lengths_batch[a_idx]], + multipliers_ph: [ + repeat_and_pad(reward_map[tuple(a)], + lengths_batch[a_idx], + max_length)]}) + # Take product over time axis. + episode_probs_result = np.prod(probs_result[0, :lengths_batch[a_idx]]) + for i in range(0, len(exact_grads)): + exact_grads[i] += grads_result[i] * episode_probs_result + episode_probs_map[tuple(a)] = episode_probs_result + reward_map[tuple(a)] = reward_map[tuple(a)] + grads_map[tuple(a)] = grads_result + true_loss_unnormalized += loss + # Normalize loss. Since each episode is feed into the model one at a time, + # normalization needs to be done manually. + true_loss = true_loss_unnormalized / float(len(actions_batch)) + + # Compute Monte Carlo gradients. + # E_a~P[grad(log P(a)) R(a)] is aprox. eq. to + # sum(grad(log P(a)) R(a) for a in actions_sampled_from_P) / n + # where len(actions_sampled_from_P) == n. + # + # In other words, sample from the policy and compute the gradients of the + # log probs weighted by the returns. This will excersize the code in + # agent.py + sampled_actions, sampled_lengths = sess.run( + [model.sampled_tokens, model.episode_lengths]) + pi_multipliers = [ + repeat_and_pad(reward_map[tuple(a)], l, max_length) + for a, l in zip(sampled_actions, sampled_lengths)] + mc_grads_unnormalized, sampled_probs, mc_loss_unnormalized = sess.run( + [model.dense_unclipped_grads, model.chosen_probs, model.loss], + {actions_ph: sampled_actions, + multipliers_ph: pi_multipliers, + lengths_ph: sampled_lengths}) + # Loss is already normalized across the minibatch, so no normalization + # is needed. + mc_grads = mc_grads_unnormalized + mc_loss = mc_loss_unnormalized + + # Make sure true loss and MC loss are similar. + loss_error = smape(true_loss, mc_loss) + self.assertTrue(loss_error < 0.15, msg='actual: %s' % loss_error) + + # Check that probs computed for episodes sampled from the model are the same + # as the recorded true probs. + for i in range(100): + acs = tuple(sampled_actions[i].tolist()) + sampled_prob = np.prod(sampled_probs[i, :sampled_lengths[i]]) + self.assertTrue(np.isclose(episode_probs_map[acs], sampled_prob)) + + # Make sure MC estimates of true probs are close. + counter = Counter(tuple(e) for e in sampled_actions) + for acs, count in counter.iteritems(): + mc_prob = count / float(len(sampled_actions)) + true_prob = episode_probs_map[acs] + error = smape(mc_prob, true_prob) + self.assertTrue( + error < 0.15, + msg='actual: %s; count: %s; mc_prob: %s; true_prob: %s' + % (error, count, mc_prob, true_prob)) + + # Manually recompute MC gradients and make sure they match MC gradients + # computed in TF. + mc_grads_recompute = [np.zeros(v.shape) for v in model.trainable_variables] + for i in range(n): + acs = tuple(sampled_actions[i].tolist()) + for i in range(0, len(mc_grads_recompute)): + mc_grads_recompute[i] += grads_map[acs][i] + for i in range(0, len(mc_grads_recompute)): + self.assertTrue(np.allclose(mc_grads[i], mc_grads_recompute[i] / n)) + + # Check angle between gradients as fraction of pi. + for index in range(len(mc_grads)): + v1 = mc_grads[index].reshape(-1) + v2 = exact_grads[index].reshape(-1) + # angle = arccos(v1 . v2 / (|v1|*|v2|)) + angle_rad = np.arccos( + np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))) + logging.info('angle / pi: %s', angle_rad / np.pi) + angle_frac = angle_rad / np.pi + self.assertTrue(angle_frac < 0.02, msg='actual: %s' % angle_frac) + # Check norms. + for index in range(len(mc_grads)): + v1_norm = np.linalg.norm(mc_grads[index].reshape(-1)) + v2_norm = np.linalg.norm(exact_grads[index].reshape(-1)) + error = smape(v1_norm, v2_norm) + self.assertTrue(error < 0.02, msg='actual: %s' % error) + + # Check expected rewards. + # E_a~P[R(a)] approx eq sum(P(a) * R(a) for a in actions) + mc_expected_reward = np.mean( + [reward_map[tuple(a)] for a in sampled_actions]) + exact_expected_reward = np.sum( + [episode_probs_map[k] * reward_map[k] for k in reward_map]) + error = smape(mc_expected_reward, exact_expected_reward) + self.assertTrue(error < 0.005, msg='actual: %s' % angle_frac) + + def testNumericalGradChecking(self): + # Similar to + # http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization. + epsilon = 1e-4 + eos = misc.BF_EOS_INT + self.assertEqual(0, eos) + config = defaults.default_config_with_updates( + 'env=c(task="print"),' + 'agent=c(algorithm="pg",optimizer="sgd",lr=1.0,ema_baseline_decay=0.99,' + 'entropy_beta=0.0,topk_loss_hparam=0.0,policy_lstm_sizes=[10],' + 'eos_token=True),' + 'batch_size=64') + dtype = tf.float64 + tf.reset_default_graph() + tf.set_random_seed(12345678987654321) + np.random.seed(1294024302) + trainer = pg_train.AsyncTrainer( + config, task_id=0, ps_tasks=0, num_workers=1, dtype=dtype) + model = trainer.model + actions_ph = model.actions + lengths_ph = model.adjusted_lengths + multipliers_ph = model.policy_multipliers + loss = model.pi_loss + global_init_op = tf.variables_initializer( + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global')) + + assign_add_placeholders = [None] * len(model.trainable_variables) + assign_add_ops = [None] * len(model.trainable_variables) + param_shapes = [None] * len(model.trainable_variables) + for i, param in enumerate(model.trainable_variables): + param_shapes[i] = param.get_shape().as_list() + assign_add_placeholders[i] = tf.placeholder(dtype, + np.prod(param_shapes[i])) + assign_add_ops[i] = param.assign_add( + tf.reshape(assign_add_placeholders[i], param_shapes[i])) + + with tf.Session() as sess: + sess.run(global_init_op) # Initialize global copy. + trainer.initialize(sess) + + actions_raw = [random_sequence(10, 9) for _ in xrange(16)] + actions_batch = utils.stack_pad(actions_raw, 0) + lengths_batch = [len(l) for l in actions_raw] + feed = {actions_ph: actions_batch, + multipliers_ph: np.ones_like(actions_batch), + lengths_ph: lengths_batch} + + estimated_grads = [None] * len(model.trainable_variables) + for i, param in enumerate(model.trainable_variables): + param_size = np.prod(param_shapes[i]) + estimated_grads[i] = np.zeros(param_size, dtype=np.float64) + for index in xrange(param_size): + e = onehot(index, param_size) * epsilon + sess.run(assign_add_ops[i], + {assign_add_placeholders[i]: e}) + j_plus = sess.run(loss, feed) + sess.run(assign_add_ops[i], + {assign_add_placeholders[i]: -2 * e}) + j_minus = sess.run(loss, feed) + sess.run(assign_add_ops[i], + {assign_add_placeholders[i]: e}) + estimated_grads[i][index] = (j_plus - j_minus) / (2 * epsilon) + estimated_grads[i] = estimated_grads[i].reshape(param_shapes[i]) + + analytic_grads = sess.run(model.dense_unclipped_grads, feed) + + for g1, g2 in zip(estimated_grads[1:], analytic_grads[1:]): + logging.info('norm (g1-g2): %s', np.abs(g1 - g2).mean()) + self.assertTrue(np.allclose(g1, g2)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/pg_train.py b/models/research/brain_coder/single_task/pg_train.py new file mode 100644 index 0000000000000000000000000000000000000000..fde7cc84729a56002e8688d268a2085432ee124e --- /dev/null +++ b/models/research/brain_coder/single_task/pg_train.py @@ -0,0 +1,782 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +r"""Train RL agent on coding tasks.""" + +import contextlib +import cPickle +import cProfile +import marshal +import os +import time + +from absl import flags +from absl import logging +import tensorflow as tf + +# internal session lib import + +from single_task import data # brain coder +from single_task import defaults # brain coder +from single_task import pg_agent as agent_lib # brain coder +from single_task import results_lib # brain coder + + +FLAGS = flags.FLAGS +flags.DEFINE_string( + 'master', '', + 'URL of the TensorFlow master to use.') +flags.DEFINE_integer( + 'ps_tasks', 0, + 'Number of parameter server tasks. Only set to 0 for ' + 'single worker training.') +flags.DEFINE_integer( + 'summary_interval', 10, + 'How often to write summaries.') +flags.DEFINE_integer( + 'summary_tasks', 16, + 'If greater than 0 only tasks 0 through summary_tasks - 1 ' + 'will write summaries. If 0, all tasks will write ' + 'summaries.') +flags.DEFINE_bool( + 'stop_on_success', True, + 'If True, training will stop as soon as a solution is found. ' + 'If False, training will continue indefinitely until another ' + 'stopping condition is reached.') +flags.DEFINE_bool( + 'do_profiling', False, + 'If True, cProfile profiler will run and results will be ' + 'written to logdir. WARNING: Results will not be written if ' + 'the code crashes. Make sure it exists successfully.') +flags.DEFINE_integer('model_v', 0, 'Model verbosity level.') +flags.DEFINE_bool( + 'delayed_graph_cleanup', True, + 'If true, container for n-th run will not be reset until the (n+1)-th run ' + 'is complete. This greatly reduces the chance that a worker is still ' + 'using the n-th container when it is cleared.') + + +def define_tuner_hparam_space(hparam_space_type): + """Define tunable hparams for grid search.""" + if hparam_space_type not in ('pg', 'pg-topk', 'topk', 'is'): + raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) + + # Discrete hparam space is stored as a dict from hparam name to discrete + # values. + hparam_space = {} + + if hparam_space_type in ('pg', 'pg-topk', 'is'): + # Add a floating point parameter named learning rate. + hparam_space['lr'] = [1e-5, 1e-4, 1e-3] + hparam_space['entropy_beta'] = [0.005, 0.01, 0.05, 0.10] + else: # 'topk' + # Add a floating point parameter named learning rate. + hparam_space['lr'] = [1e-5, 1e-4, 1e-3] + hparam_space['entropy_beta'] = [0.0, 0.005, 0.01, 0.05, 0.10] + + if hparam_space_type in ('topk', 'pg-topk'): + # topk tuning will be enabled. + hparam_space['topk'] = [10] + hparam_space['topk_loss_hparam'] = [1.0, 10.0, 50.0, 200.0] + + elif hparam_space_type == 'is': + # importance sampling tuning will be enabled. + hparam_space['replay_temperature'] = [0.25, 0.5, 1.0, 2.0] + hparam_space['alpha'] = [0.5, 0.75, 63/64.] + + return hparam_space + + +def write_hparams_to_config(config, hparams, hparam_space_type): + """Write hparams given by the tuner into the Config object.""" + if hparam_space_type not in ('pg', 'pg-topk', 'topk', 'is'): + raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) + + config.agent.lr = hparams.lr + config.agent.entropy_beta = hparams.entropy_beta + + if hparam_space_type in ('topk', 'pg-topk'): + # topk tuning will be enabled. + config.agent.topk = hparams.topk + config.agent.topk_loss_hparam = hparams.topk_loss_hparam + elif hparam_space_type == 'is': + # importance sampling tuning will be enabled. + config.agent.replay_temperature = hparams.replay_temperature + config.agent.alpha = hparams.alpha + + +def make_initialized_variable(value, name, shape=None, dtype=tf.float32): + """Create a tf.Variable with a constant initializer. + + Args: + value: Constant value to initialize the variable with. This is the value + that the variable starts with. + name: Name of the variable in the TF graph. + shape: Shape of the variable. If None, variable will be a scalar. + dtype: Data type of the variable. Should be a TF dtype. Defaults to + tf.float32. + + Returns: + tf.Variable instance. + """ + if shape is None: + shape = [] + return tf.get_variable( + name=name, shape=shape, initializer=tf.constant_initializer(value), + dtype=dtype, trainable=False) + + +class AsyncTrainer(object): + """Manages graph creation and training. + + This async trainer creates a global model on the parameter server, and a local + model (for this worker). Gradient updates are sent to the global model, and + the updated weights are synced to the local copy. + """ + + def __init__(self, config, task_id, ps_tasks, num_workers, is_chief=True, + summary_writer=None, + dtype=tf.float32, + summary_interval=1, + run_number=0, + logging_dir='/tmp', model_v=0): + self.config = config + self.data_manager = data.DataManager( + config, run_number=run_number, + do_code_simplification=not FLAGS.stop_on_success) + self.task_id = task_id + self.ps_tasks = ps_tasks + self.is_chief = is_chief + if ps_tasks == 0: + assert task_id == 0, 'No parameter servers specified. Expecting 1 task.' + assert num_workers == 1, ( + 'No parameter servers specified. Expecting 1 task.') + worker_device = '/job:localhost/replica:%d/task:0/cpu:0' % task_id + # worker_device = '/cpu:0' + # ps_device = '/cpu:0' + else: + assert num_workers > 0, 'There must be at least 1 training worker.' + worker_device = '/job:worker/replica:%d/task:0/cpu:0' % task_id + # ps_device = '/job:ps/replica:0/task:0/cpu:0' + logging.info('worker_device: %s', worker_device) + + logging_file = os.path.join( + logging_dir, 'solutions_%d.txt' % task_id) + experience_replay_file = os.path.join( + logging_dir, 'replay_buffer_%d.pickle' % task_id) + self.topk_file = os.path.join( + logging_dir, 'topk_buffer_%d.pickle' % task_id) + + tf.get_variable_scope().set_use_resource(True) + + # global model + with tf.device(tf.train.replica_device_setter(ps_tasks, + ps_device='/job:ps/replica:0', + worker_device=worker_device)): + with tf.variable_scope('global'): + global_model = agent_lib.LMAgent(config, dtype=dtype, is_local=False) + global_params_dict = {p.name: p + for p in global_model.sync_variables} + self.global_model = global_model + self.global_step = make_initialized_variable( + 0, 'global_step', dtype=tf.int64) + + self.global_best_reward = make_initialized_variable( + -10.0, 'global_best_reward', dtype=tf.float64) + self.is_best_model = make_initialized_variable( + False, 'is_best_model', dtype=tf.bool) + self.reset_is_best_model = self.is_best_model.assign(False) + self.global_best_reward_placeholder = tf.placeholder( + tf.float64, [], name='global_best_reward_placeholder') + self.assign_global_best_reward_op = tf.group( + self.global_best_reward.assign( + self.global_best_reward_placeholder), + self.is_best_model.assign(True)) + def assign_global_best_reward_fn(session, reward): + reward = round(reward, 10) + best_reward = round(session.run(self.global_best_reward), 10) + is_best = reward > best_reward + if is_best: + session.run(self.assign_global_best_reward_op, + {self.global_best_reward_placeholder: reward}) + return is_best + self.assign_global_best_reward_fn = assign_global_best_reward_fn + + # Any worker will set to true when it finds a solution. + self.found_solution_flag = make_initialized_variable( + False, 'found_solution_flag', dtype=tf.bool) + self.found_solution_op = self.found_solution_flag.assign(True) + + self.run_number = make_initialized_variable( + run_number, 'run_number', dtype=tf.int32) + + # Store a solution when found. + self.code_solution_variable = tf.get_variable( + 'code_solution', [], tf.string, + initializer=tf.constant_initializer('')) + self.code_solution_ph = tf.placeholder( + tf.string, [], name='code_solution_ph') + self.code_solution_assign_op = self.code_solution_variable.assign( + self.code_solution_ph) + def assign_code_solution_fn(session, code_solution_string): + session.run(self.code_solution_assign_op, + {self.code_solution_ph: code_solution_string}) + self.assign_code_solution_fn = assign_code_solution_fn + + # Count all programs sampled from policy. This does not include + # programs sampled from replay buffer. + # This equals NPE (number of programs executed). Only programs sampled + # from the policy need to be executed. + self.program_count = make_initialized_variable( + 0, 'program_count', dtype=tf.int64) + + # local model + with tf.device(worker_device): + with tf.variable_scope('local'): + self.model = model = agent_lib.LMAgent( + config, + task_id=task_id, + logging_file=logging_file, + experience_replay_file=experience_replay_file, + dtype=dtype, + global_best_reward_fn=self.assign_global_best_reward_fn, + found_solution_op=self.found_solution_op, + assign_code_solution_fn=self.assign_code_solution_fn, + program_count=self.program_count, + stop_on_success=FLAGS.stop_on_success, + verbose_level=model_v) + local_params = model.trainable_variables + local_params_dict = {p.name: p for p in local_params} + + # Pull global params to local model. + def _global_to_local_scope(name): + assert name.startswith('global/') + return 'local' + name[6:] + sync_dict = { + local_params_dict[_global_to_local_scope(p_name)]: p + for p_name, p in global_params_dict.items()} + self.sync_op = tf.group(*[v_local.assign(v_global) + for v_local, v_global + in sync_dict.items()]) + + # Pair local gradients with global params. + grad_var_dict = { + gradient: sync_dict[local_var] + for local_var, gradient in model.gradients_dict.items()} + + # local model + model.make_summary_ops() # Don't put summaries under 'local' scope. + with tf.variable_scope('local'): + self.train_op = model.optimizer.apply_gradients( + grad_var_dict.items(), global_step=self.global_step) + self.local_init_op = tf.variables_initializer( + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + tf.get_variable_scope().name)) + + self.local_step = 0 + self.last_summary_time = time.time() + self.summary_interval = summary_interval + self.summary_writer = summary_writer + self.cached_global_step = -1 + self.cached_global_npe = -1 + + logging.info('summary_interval: %d', self.summary_interval) + + # Load top-k buffer. + if self.model.top_episodes is not None and tf.gfile.Exists(self.topk_file): + try: + with tf.gfile.FastGFile(self.topk_file, 'r') as f: + self.model.top_episodes = cPickle.loads(f.read()) + logging.info( + 'Loaded top-k buffer from disk with %d items. Location: "%s"', + len(self.model.top_episodes), self.topk_file) + except (cPickle.UnpicklingError, EOFError) as e: + logging.warn( + 'Failed to load existing top-k buffer from disk. Removing bad file.' + '\nLocation: "%s"\nException: %s', self.topk_file, str(e)) + tf.gfile.Remove(self.topk_file) + + def initialize(self, session): + """Run initialization ops.""" + session.run(self.local_init_op) + session.run(self.sync_op) + self.cached_global_step, self.cached_global_npe = session.run( + [self.global_step, self.program_count]) + + def update_global_model(self, session): + """Run an update step. + + 1) Asynchronously copy global weights to local model. + 2) Call into local model's update_step method, which does the following: + a) Sample batch of programs from policy. + b) Compute rewards. + c) Compute gradients and update the global model asynchronously. + 3) Write tensorboard summaries to disk. + + Args: + session: tf.Session instance. + """ + session.run(self.sync_op) # Copy weights from global to local. + + with session.as_default(): + result = self.model.update_step( + session, self.data_manager.sample_rl_batch(), self.train_op, + self.global_step) + global_step = result.global_step + global_npe = result.global_npe + summaries = result.summaries_list + self.cached_global_step = global_step + self.cached_global_npe = global_npe + self.local_step += 1 + + if self.summary_writer and self.local_step % self.summary_interval == 0: + if not isinstance(summaries, (tuple, list)): + summaries = [summaries] + summaries.append(self._local_step_summary()) + if self.is_chief: + (global_best_reward, + found_solution_flag, + program_count) = session.run( + [self.global_best_reward, + self.found_solution_flag, + self.program_count]) + summaries.append( + tf.Summary( + value=[tf.Summary.Value( + tag='model/best_reward', + simple_value=global_best_reward)])) + summaries.append( + tf.Summary( + value=[tf.Summary.Value( + tag='model/solution_found', + simple_value=int(found_solution_flag))])) + summaries.append( + tf.Summary( + value=[tf.Summary.Value( + tag='model/program_count', + simple_value=program_count)])) + for s in summaries: + self.summary_writer.add_summary(s, global_step) + self.last_summary_time = time.time() + + def _local_step_summary(self): + """Compute number of local steps per time increment.""" + dt = time.time() - self.last_summary_time + steps_per_time = self.summary_interval / float(dt) + return tf.Summary(value=[ + tf.Summary.Value( + tag='local_step/per_sec', + simple_value=steps_per_time), + tf.Summary.Value( + tag='local_step/step', + simple_value=self.local_step)]) + + def maybe_save_best_model(self, session, saver, checkpoint_file): + """Check if this model got the highest reward and save to disk if so.""" + if self.is_chief and session.run(self.is_best_model): + logging.info('Saving best model to "%s"', checkpoint_file) + saver.save(session, checkpoint_file) + session.run(self.reset_is_best_model) + + def save_replay_buffer(self): + """Save replay buffer to disk. + + Call this periodically so that training can recover if jobs go down. + """ + if self.model.experience_replay is not None: + logging.info('Saving experience replay buffer to "%s".', + self.model.experience_replay.save_file) + self.model.experience_replay.incremental_save(True) + + def delete_replay_buffer(self): + """Delete replay buffer from disk. + + Call this at the end of training to clean up. Replay buffer can get very + large. + """ + if self.model.experience_replay is not None: + logging.info('Deleting experience replay buffer at "%s".', + self.model.experience_replay.save_file) + tf.gfile.Remove(self.model.experience_replay.save_file) + + def save_topk_buffer(self): + """Save top-k buffer to disk. + + Call this periodically so that training can recover if jobs go down. + """ + if self.model.top_episodes is not None: + logging.info('Saving top-k buffer to "%s".', self.topk_file) + # Overwrite previous data each time. + with tf.gfile.FastGFile(self.topk_file, 'w') as f: + f.write(cPickle.dumps(self.model.top_episodes)) + + +@contextlib.contextmanager +def managed_session(sv, master='', config=None, + start_standard_services=True, + close_summary_writer=True, + max_wait_secs=7200): + # Same as Supervisor.managed_session, but with configurable timeout. + try: + sess = sv.prepare_or_wait_for_session( + master=master, config=config, + start_standard_services=start_standard_services, + max_wait_secs=max_wait_secs) + yield sess + except tf.errors.DeadlineExceededError: + raise + except Exception as e: # pylint: disable=broad-except + sv.request_stop(e) + finally: + try: + # Request all the threads to stop and wait for them to do so. Any + # exception raised by the threads is raised again from stop(). + # Passing stop_grace_period_secs is for blocked enqueue/dequeue + # threads which are not checking for `should_stop()`. They + # will be stopped when we close the session further down. + sv.stop(close_summary_writer=close_summary_writer) + finally: + # Close the session to finish up all pending calls. We do not care + # about exceptions raised when closing. This takes care of + # blocked enqueue/dequeue calls. + try: + sess.close() + except Exception: # pylint: disable=broad-except + # Silently ignore exceptions raised by close(). + pass + + +def train(config, is_chief, tuner=None, run_dir=None, run_number=0, + results_writer=None): + """Run training loop. + + Args: + config: config_lib.Config instance containing global config (agent and env). + is_chief: True if this worker is chief. Chief worker manages writing some + data to disk and initialization of the global model. + tuner: A tuner instance. If not tuning, leave as None. + run_dir: Directory where all data for this run will be written. If None, + run_dir = FLAGS.logdir. Set this argument when doing multiple runs. + run_number: Which run is this. + results_writer: Managest writing training results to disk. Results are a + dict of metric names and values. + + Returns: + The trainer object used to run training updates. + """ + logging.info('Will run asynchronous training.') + + if run_dir is None: + run_dir = FLAGS.logdir + train_dir = os.path.join(run_dir, 'train') + best_model_checkpoint = os.path.join(train_dir, 'best.ckpt') + events_dir = '%s/events_%d' % (run_dir, FLAGS.task_id) + logging.info('Events directory: %s', events_dir) + + logging_dir = os.path.join(run_dir, 'logs') + if not tf.gfile.Exists(logging_dir): + tf.gfile.MakeDirs(logging_dir) + status_file = os.path.join(logging_dir, 'status.txt') + + if FLAGS.summary_tasks and FLAGS.task_id < FLAGS.summary_tasks: + summary_writer = tf.summary.FileWriter(events_dir) + else: + summary_writer = None + + # Only profile task 0. + if FLAGS.do_profiling: + logging.info('Profiling enabled') + profiler = cProfile.Profile() + profiler.enable() + else: + profiler = None + + trainer = AsyncTrainer( + config, FLAGS.task_id, FLAGS.ps_tasks, FLAGS.num_workers, + is_chief=is_chief, + summary_interval=FLAGS.summary_interval, + summary_writer=summary_writer, + logging_dir=logging_dir, + run_number=run_number, + model_v=FLAGS.model_v) + + variables_to_save = [v for v in tf.global_variables() + if v.name.startswith('global')] + global_init_op = tf.variables_initializer(variables_to_save) + saver = tf.train.Saver(variables_to_save) + + var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, + tf.get_variable_scope().name) + logging.info('Trainable vars:') + for v in var_list: + logging.info(' %s, %s, %s', v.name, v.device, v.get_shape()) + + logging.info('All vars:') + for v in tf.global_variables(): + logging.info(' %s, %s, %s', v.name, v.device, v.get_shape()) + + def init_fn(unused_sess): + logging.info('No checkpoint found. Initialized global params.') + + sv = tf.train.Supervisor(is_chief=is_chief, + logdir=train_dir, + saver=saver, + summary_op=None, + init_op=global_init_op, + init_fn=init_fn, + summary_writer=summary_writer, + ready_op=tf.report_uninitialized_variables( + variables_to_save), + ready_for_local_init_op=None, + global_step=trainer.global_step, + save_model_secs=30, + save_summaries_secs=30) + + # Add a thread that periodically checks if this Trial should stop + # based on an early stopping policy. + if tuner: + sv.Loop(60, tuner.check_for_stop, (sv.coord,)) + + last_replay_save_time = time.time() + + global_step = -1 + logging.info( + 'Starting session. ' + 'If this hangs, we\'re mostly likely waiting to connect ' + 'to the parameter server. One common cause is that the parameter ' + 'server DNS name isn\'t resolving yet, or is misspecified.') + should_retry = True + supervisor_deadline_exceeded = False + while should_retry: + try: + with managed_session( + sv, FLAGS.master, max_wait_secs=60) as session, session.as_default(): + should_retry = False + do_training = True + + try: + trainer.initialize(session) + if session.run(trainer.run_number) != run_number: + # If we loaded existing model from disk, and the saved run number is + # different, throw an exception. + raise RuntimeError( + 'Expecting to be on run %d, but is actually on run %d. ' + 'run_dir: "%s"' + % (run_number, session.run(trainer.run_number), run_dir)) + global_step = trainer.cached_global_step + logging.info('Starting training at step=%d', global_step) + while do_training: + trainer.update_global_model(session) + + if is_chief: + trainer.maybe_save_best_model( + session, saver, best_model_checkpoint) + global_step = trainer.cached_global_step + global_npe = trainer.cached_global_npe + + if time.time() - last_replay_save_time >= 30: + trainer.save_replay_buffer() + trainer.save_topk_buffer() + last_replay_save_time = time.time() + + # Stopping conditions. + if tuner and tuner.should_trial_stop(): + logging.info('Tuner requested early stopping. Finishing.') + do_training = False + if is_chief and FLAGS.stop_on_success: + found_solution = session.run(trainer.found_solution_flag) + if found_solution: + do_training = False + logging.info('Solution found. Finishing.') + if FLAGS.max_npe and global_npe >= FLAGS.max_npe: + # Max NPE (number of programs executed) reached. + logging.info('Max NPE reached. Finishing.') + do_training = False + if sv.should_stop(): + logging.info('Supervisor issued stop. Finishing.') + do_training = False + + except tf.errors.NotFoundError: + # Catch "Error while reading resource variable". + # The chief worker likely destroyed the container, so do not retry. + logging.info('Caught NotFoundError. Quitting.') + do_training = False + should_retry = False + break + except tf.errors.InternalError as e: + # Catch "Invalid variable reference." + if str(e).startswith('Invalid variable reference.'): + # The chief worker likely destroyed the container, so do not + # retry. + logging.info( + 'Caught "InternalError: Invalid variable reference.". ' + 'Quitting.') + do_training = False + should_retry = False + break + else: + # Pass exception through. + raise + + # Exited training loop. Write results to disk. + if is_chief and results_writer: + assert not should_retry + with tf.gfile.FastGFile(status_file, 'w') as f: + f.write('done') + (program_count, + found_solution, + code_solution, + best_reward, + global_step) = session.run( + [trainer.program_count, + trainer.found_solution_flag, + trainer.code_solution_variable, + trainer.global_best_reward, + trainer.global_step]) + results_dict = { + 'max_npe': FLAGS.max_npe, + 'batch_size': config.batch_size, + 'max_batches': FLAGS.max_npe // config.batch_size, + 'npe': program_count, + 'max_global_repetitions': FLAGS.num_repetitions, + 'max_local_repetitions': FLAGS.num_repetitions, + 'code_solution': code_solution, + 'best_reward': best_reward, + 'num_batches': global_step, + 'found_solution': found_solution, + 'task': trainer.data_manager.task_name, + 'global_rep': run_number} + logging.info('results_dict: %s', results_dict) + results_writer.append(results_dict) + + except tf.errors.AbortedError: + # Catch "Graph handle is not found" error due to preempted jobs. + logging.info('Caught AbortedError. Retying.') + should_retry = True + except tf.errors.DeadlineExceededError: + supervisor_deadline_exceeded = True + should_retry = False + + if is_chief: + logging.info('This is chief worker. Stopping all workers.') + sv.stop() + + if supervisor_deadline_exceeded: + logging.info('Supervisor timed out. Quitting.') + else: + logging.info('Reached %s steps. Worker stopped.', global_step) + + # Dump profiling. + """ + How to use profiling data. + + Download the profiler dump to your local machine, say to PROF_FILE_PATH. + In a separate script, run something like the following: + + import pstats + p = pstats.Stats(PROF_FILE_PATH) + p.strip_dirs().sort_stats('cumtime').print_stats() + + This will sort by 'cumtime', which "is the cumulative time spent in this and + all subfunctions (from invocation till exit)." + https://docs.python.org/2/library/profile.html#instant-user-s-manual + """ # pylint: disable=pointless-string-statement + if profiler: + prof_file = os.path.join(run_dir, 'task_%d.prof' % FLAGS.task_id) + logging.info('Done profiling.\nDumping to "%s".', prof_file) + profiler.create_stats() + with tf.gfile.Open(prof_file, 'w') as f: + f.write(marshal.dumps(profiler.stats)) + + return trainer + + +def run_training(config=None, tuner=None, logdir=None, trial_name=None, + is_chief=True): + """Do all training runs. + + This is the top level training function for policy gradient based models. + Run this from the main function. + + Args: + config: config_lib.Config instance containing global config (agent and + environment hparams). If None, config will be parsed from FLAGS.config. + tuner: A tuner instance. Leave as None if not tuning. + logdir: Parent directory where all data from all runs will be written. If + None, FLAGS.logdir will be used. + trial_name: If tuning, set this to a unique string that identifies this + trial. If `tuner` is not None, this also must be set. + is_chief: True if this worker is the chief. + + Returns: + List of results dicts which were written to disk. Each training run gets a + results dict. Results dict contains metrics, i.e. (name, value) pairs which + give information about the training run. + + Raises: + ValueError: If results dicts read from disk contain invalid data. + """ + if not config: + # If custom config is not given, get it from flags. + config = defaults.default_config_with_updates(FLAGS.config) + if not logdir: + logdir = FLAGS.logdir + if not tf.gfile.Exists(logdir): + tf.gfile.MakeDirs(logdir) + assert FLAGS.num_repetitions > 0 + results = results_lib.Results(logdir) + results_list, _ = results.read_all() + + logging.info('Starting experiment. Directory: "%s"', logdir) + + if results_list: + if results_list[0]['max_npe'] != FLAGS.max_npe: + raise ValueError( + 'Cannot resume training. Max-NPE changed. Was %s, now %s', + results_list[0]['max_npe'], FLAGS.max_npe) + if results_list[0]['max_global_repetitions'] != FLAGS.num_repetitions: + raise ValueError( + 'Cannot resume training. Number of repetitions changed. Was %s, ' + 'now %s', + results_list[0]['max_global_repetitions'], + FLAGS.num_repetitions) + + while len(results_list) < FLAGS.num_repetitions: + run_number = len(results_list) + rep_container_name = trial_name if trial_name else 'container' + if FLAGS.num_repetitions > 1: + rep_dir = os.path.join(logdir, 'run_%d' % run_number) + rep_container_name = rep_container_name + '_run_' + str(run_number) + else: + rep_dir = logdir + + logging.info( + 'Starting repetition %d (%d out of %d)', run_number, run_number + 1, + FLAGS.num_repetitions) + + # Train will write result to disk. + with tf.container(rep_container_name): + trainer = train(config, is_chief, tuner, rep_dir, run_number, results) + logging.info('Done training.') + + if is_chief: + # Destroy current container immediately (clears current graph). + logging.info('Clearing shared variables.') + tf.Session.reset(FLAGS.master, containers=[rep_container_name]) + logging.info('Shared variables cleared.') + + # Delete replay buffer on disk. + assert trainer + trainer.delete_replay_buffer() + else: + # Give chief worker time to clean up. + sleep_sec = 30.0 + logging.info('Sleeping for %s sec.', sleep_sec) + time.sleep(sleep_sec) + tf.reset_default_graph() + logging.info('Default graph reset.') + + # Expecting that train wrote new result to disk before returning. + results_list, _ = results.read_all() + return results_list diff --git a/models/research/brain_coder/single_task/pg_train_test.py b/models/research/brain_coder/single_task/pg_train_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0a562e5331e638cab82bc8033bfa2c1fc355e960 --- /dev/null +++ b/models/research/brain_coder/single_task/pg_train_test.py @@ -0,0 +1,87 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for pg_train. + +These tests excersize code paths available through configuration options. +Training will be run for just a few steps with the goal being to check that +nothing crashes. +""" + +from absl import flags +import tensorflow as tf + +from single_task import defaults # brain coder +from single_task import run # brain coder + +FLAGS = flags.FLAGS + + +class TrainTest(tf.test.TestCase): + + def RunTrainingSteps(self, config_string, num_steps=10): + """Run a few training steps with the given config. + + Just check that nothing crashes. + + Args: + config_string: Config encoded in a string. See + $REPO_PATH/common/config_lib.py + num_steps: Number of training steps to run. Defaults to 10. + """ + config = defaults.default_config_with_updates(config_string) + FLAGS.master = '' + FLAGS.max_npe = num_steps * config.batch_size + FLAGS.summary_interval = 1 + FLAGS.logdir = tf.test.get_temp_dir() + FLAGS.config = config_string + tf.reset_default_graph() + run.main(None) + + def testVanillaPolicyGradient(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg"),' + 'timestep_limit=90,batch_size=64') + + def testVanillaPolicyGradient_VariableLengthSequences(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",eos_token=False),' + 'timestep_limit=90,batch_size=64') + + def testVanillaActorCritic(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",ema_baseline_decay=0.0),' + 'timestep_limit=90,batch_size=64') + + def testPolicyGradientWithTopK(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",topk_loss_hparam=1.0,topk=10),' + 'timestep_limit=90,batch_size=64') + + def testVanillaActorCriticWithTopK(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",ema_baseline_decay=0.0,topk_loss_hparam=1.0,' + 'topk=10),' + 'timestep_limit=90,batch_size=64') + + def testPolicyGradientWithTopK_VariableLengthSequences(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",topk_loss_hparam=1.0,topk=10,eos_token=False),' + 'timestep_limit=90,batch_size=64') + + def testPolicyGradientWithImportanceSampling(self): + self.RunTrainingSteps( + 'env=c(task="reverse"),' + 'agent=c(algorithm="pg",alpha=0.5),' + 'timestep_limit=90,batch_size=64') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/results_lib.py b/models/research/brain_coder/single_task/results_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..fd28fdd49ba3200dc9faa18d1722235ee4bf2ac2 --- /dev/null +++ b/models/research/brain_coder/single_task/results_lib.py @@ -0,0 +1,155 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Results object manages distributed reading and writing of results to disk.""" + +import ast +from collections import namedtuple +import os +import re +from six.moves import xrange +import tensorflow as tf + + +ShardStats = namedtuple( + 'ShardStats', + ['num_local_reps_completed', 'max_local_reps', 'finished']) + + +def ge_non_zero(a, b): + return a >= b and b > 0 + + +def get_shard_id(file_name): + assert file_name[-4:].lower() == '.txt' + return int(file_name[file_name.rfind('_') + 1: -4]) + + +class Results(object): + """Manages reading and writing training results to disk asynchronously. + + Each worker writes to its own file, so that there are no race conditions when + writing happens. However any worker may read any file, as is the case for + `read_all`. Writes are expected to be atomic so that workers will never + read incomplete data, and this is likely to be the case on Unix systems. + Reading out of date data is fine, as workers calling `read_all` will wait + until data from every worker has been written before proceeding. + """ + file_template = 'experiment_results_{0}.txt' + search_regex = r'^experiment_results_([0-9])+\.txt$' + + def __init__(self, log_dir, shard_id=0): + """Construct `Results` instance. + + Args: + log_dir: Where to write results files. + shard_id: Unique id for this file (i.e. shard). Each worker that will + be writing results should use a different shard id. If there are + N shards, each shard should be numbered 0 through N-1. + """ + # Use different files for workers so that they can write to disk async. + assert 0 <= shard_id + self.file_name = self.file_template.format(shard_id) + self.log_dir = log_dir + self.results_file = os.path.join(self.log_dir, self.file_name) + + def append(self, metrics): + """Append results to results list on disk.""" + with tf.gfile.FastGFile(self.results_file, 'a') as writer: + writer.write(str(metrics) + '\n') + + def read_this_shard(self): + """Read only from this shard.""" + return self._read_shard(self.results_file) + + def _read_shard(self, results_file): + """Read only from the given shard file.""" + try: + with tf.gfile.FastGFile(results_file, 'r') as reader: + results = [ast.literal_eval(entry) for entry in reader] + except tf.errors.NotFoundError: + # No results written to disk yet. Return empty list. + return [] + return results + + def _get_max_local_reps(self, shard_results): + """Get maximum number of repetitions the given shard needs to complete. + + Worker working on each shard needs to complete a certain number of runs + before it finishes. This method will return that number so that we can + determine which shards are still not done. + + We assume that workers are including a 'max_local_repetitions' value in + their results, which should be the total number of repetitions it needs to + run. + + Args: + shard_results: Dict mapping metric names to values. This should be read + from a shard on disk. + + Returns: + Maximum number of repetitions the given shard needs to complete. + """ + mlrs = [r['max_local_repetitions'] for r in shard_results] + if not mlrs: + return 0 + for n in mlrs[1:]: + assert n == mlrs[0], 'Some reps have different max rep.' + return mlrs[0] + + def read_all(self, num_shards=None): + """Read results across all shards, i.e. get global results list. + + Args: + num_shards: (optional) specifies total number of shards. If the caller + wants information about which shards are incomplete, provide this + argument (so that shards which have yet to be created are still + counted as incomplete shards). Otherwise, no information about + incomplete shards will be returned. + + Returns: + aggregate: Global list of results (across all shards). + shard_stats: List of ShardStats instances, one for each shard. Or None if + `num_shards` is None. + """ + try: + all_children = tf.gfile.ListDirectory(self.log_dir) + except tf.errors.NotFoundError: + if num_shards is None: + return [], None + return [], [[] for _ in xrange(num_shards)] + shard_ids = { + get_shard_id(fname): fname + for fname in all_children if re.search(self.search_regex, fname)} + + if num_shards is None: + aggregate = [] + shard_stats = None + for results_file in shard_ids.values(): + aggregate.extend(self._read_shard( + os.path.join(self.log_dir, results_file))) + else: + results_per_shard = [None] * num_shards + for shard_id in xrange(num_shards): + if shard_id in shard_ids: + results_file = shard_ids[shard_id] + results_per_shard[shard_id] = self._read_shard( + os.path.join(self.log_dir, results_file)) + else: + results_per_shard[shard_id] = [] + + # Compute shard stats. + shard_stats = [] + for shard_results in results_per_shard: + max_local_reps = self._get_max_local_reps(shard_results) + shard_stats.append(ShardStats( + num_local_reps_completed=len(shard_results), + max_local_reps=max_local_reps, + finished=ge_non_zero(len(shard_results), max_local_reps))) + + # Compute aggregate. + aggregate = [ + r for shard_results in results_per_shard for r in shard_results] + + return aggregate, shard_stats diff --git a/models/research/brain_coder/single_task/results_lib_test.py b/models/research/brain_coder/single_task/results_lib_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6fe838d74d6a3bdea4c3b219a4d3ceea4385a97e --- /dev/null +++ b/models/research/brain_coder/single_task/results_lib_test.py @@ -0,0 +1,84 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for results_lib.""" + +import contextlib +import os +import shutil +import tempfile +from six.moves import xrange +import tensorflow as tf + +from single_task import results_lib # brain coder + + +@contextlib.contextmanager +def temporary_directory(suffix='', prefix='tmp', base_path=None): + """A context manager to create a temporary directory and clean up on exit. + + The parameters are the same ones expected by tempfile.mkdtemp. + The directory will be securely and atomically created. + Everything under it will be removed when exiting the context. + + Args: + suffix: optional suffix. + prefix: options prefix. + base_path: the base path under which to create the temporary directory. + Yields: + The absolute path of the new temporary directory. + """ + temp_dir_path = tempfile.mkdtemp(suffix, prefix, base_path) + try: + yield temp_dir_path + finally: + try: + shutil.rmtree(temp_dir_path) + except OSError as e: + if e.message == 'Cannot call rmtree on a symbolic link': + # Interesting synthetic exception made up by shutil.rmtree. + # Means we received a symlink from mkdtemp. + # Also means must clean up the symlink instead. + os.unlink(temp_dir_path) + else: + raise + + +def freeze(dictionary): + """Convert dict to hashable frozenset.""" + return frozenset(dictionary.iteritems()) + + +class ResultsLibTest(tf.test.TestCase): + + def testResults(self): + with temporary_directory() as logdir: + results_obj = results_lib.Results(logdir) + self.assertEqual(results_obj.read_this_shard(), []) + results_obj.append( + {'foo': 1.5, 'bar': 2.5, 'baz': 0}) + results_obj.append( + {'foo': 5.5, 'bar': -1, 'baz': 2}) + self.assertEqual( + results_obj.read_this_shard(), + [{'foo': 1.5, 'bar': 2.5, 'baz': 0}, + {'foo': 5.5, 'bar': -1, 'baz': 2}]) + + def testShardedResults(self): + with temporary_directory() as logdir: + n = 4 # Number of shards. + results_objs = [ + results_lib.Results(logdir, shard_id=i) for i in xrange(n)] + for i, robj in enumerate(results_objs): + robj.append({'foo': i, 'bar': 1 + i * 2}) + results_list, _ = results_objs[0].read_all() + + # Check results. Order does not matter here. + self.assertEqual( + set(freeze(r) for r in results_list), + set(freeze({'foo': i, 'bar': 1 + i * 2}) for i in xrange(n))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/run.py b/models/research/brain_coder/single_task/run.py new file mode 100644 index 0000000000000000000000000000000000000000..9d8f37c973dcca3bbf8e25bce3d181e5405c6167 --- /dev/null +++ b/models/research/brain_coder/single_task/run.py @@ -0,0 +1,142 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +r"""Run training. + +Choose training algorithm and task(s) and follow these examples. + +Run synchronous policy gradient training locally: + +CONFIG="agent=c(algorithm='pg'),env=c(task='reverse')" +OUT_DIR="/tmp/bf_pg_local" +rm -rf $OUT_DIR +bazel run -c opt single_task:run -- \ + --alsologtostderr \ + --config="$CONFIG" \ + --max_npe=0 \ + --logdir="$OUT_DIR" \ + --summary_interval=1 \ + --model_v=0 +learning/brain/tensorboard/tensorboard.sh --port 12345 --logdir "$OUT_DIR" + + +Run genetic algorithm locally: + +CONFIG="agent=c(algorithm='ga'),env=c(task='reverse')" +OUT_DIR="/tmp/bf_ga_local" +rm -rf $OUT_DIR +bazel run -c opt single_task:run -- \ + --alsologtostderr \ + --config="$CONFIG" \ + --max_npe=0 \ + --logdir="$OUT_DIR" + + +Run uniform random search locally: + +CONFIG="agent=c(algorithm='rand'),env=c(task='reverse')" +OUT_DIR="/tmp/bf_rand_local" +rm -rf $OUT_DIR +bazel run -c opt single_task:run -- \ + --alsologtostderr \ + --config="$CONFIG" \ + --max_npe=0 \ + --logdir="$OUT_DIR" +""" + +from absl import app +from absl import flags +from absl import logging + +from single_task import defaults # brain coder +from single_task import ga_train # brain coder +from single_task import pg_train # brain coder + +FLAGS = flags.FLAGS +flags.DEFINE_string('config', '', 'Configuration.') +flags.DEFINE_string( + 'logdir', None, 'Absolute path where to write results.') +flags.DEFINE_integer('task_id', 0, 'ID for this worker.') +flags.DEFINE_integer('num_workers', 1, 'How many workers there are.') +flags.DEFINE_integer( + 'max_npe', 0, + 'NPE = number of programs executed. Maximum number of programs to execute ' + 'in each run. Training will complete when this threshold is reached. Set ' + 'to 0 for unlimited training.') +flags.DEFINE_integer( + 'num_repetitions', 1, + 'Number of times the same experiment will be run (globally across all ' + 'workers). Each run is independent.') +flags.DEFINE_string( + 'log_level', 'INFO', + 'The threshold for what messages will be logged. One of DEBUG, INFO, WARN, ' + 'ERROR, or FATAL.') + + +# To register an algorithm: +# 1) Add dependency in the BUILD file to this build rule. +# 2) Import the algorithm's module at the top of this file. +# 3) Add a new entry in the following dict. The key is the algorithm name +# (used to select the algorithm in the config). The value is the module +# defining the expected functions for training and tuning. See the docstring +# for `get_namespace` for further details. +ALGORITHM_REGISTRATION = { + 'pg': pg_train, + 'ga': ga_train, + 'rand': ga_train, +} + + +def get_namespace(config_string): + """Get namespace for the selected algorithm. + + Users who want to add additional algorithm types should modify this function. + The algorithm's namespace should contain the following functions: + run_training: Run the main training loop. + define_tuner_hparam_space: Return the hparam tuning space for the algo. + write_hparams_to_config: Helper for tuning. Write hparams chosen for tuning + to the Config object. + Look at pg_train.py and ga_train.py for function signatures and + implementations. + + Args: + config_string: String representation of a Config object. This will get + parsed into a Config in order to determine what algorithm to use. + + Returns: + algorithm_namespace: The module corresponding to the algorithm given in the + config. + config: The Config object resulting from parsing `config_string`. + + Raises: + ValueError: If config.agent.algorithm is not one of the registered + algorithms. + """ + config = defaults.default_config_with_updates(config_string) + if config.agent.algorithm not in ALGORITHM_REGISTRATION: + raise ValueError('Unknown algorithm type "%s"' % (config.agent.algorithm,)) + else: + return ALGORITHM_REGISTRATION[config.agent.algorithm], config + + +def main(argv): + del argv # Unused. + + logging.set_verbosity(FLAGS.log_level) + + flags.mark_flag_as_required('logdir') + if FLAGS.num_workers <= 0: + raise ValueError('num_workers flag must be greater than 0.') + if FLAGS.task_id < 0: + raise ValueError('task_id flag must be greater than or equal to 0.') + if FLAGS.task_id >= FLAGS.num_workers: + raise ValueError( + 'task_id flag must be strictly less than num_workers flag.') + + ns, _ = get_namespace(FLAGS.config) + ns.run_training(is_chief=FLAGS.task_id == 0) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/brain_coder/single_task/run_eval_tasks.py b/models/research/brain_coder/single_task/run_eval_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..eb684c344381462cd3626404b5d7fd7cf5d72b22 --- /dev/null +++ b/models/research/brain_coder/single_task/run_eval_tasks.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python +from __future__ import print_function + +r"""This script can launch any eval experiments from the paper. + +This is a script. Run with python, not bazel. + +Usage: +./single_task/run_eval_tasks.py \ + --exp EXP --desc DESC [--tuning_tasks] [--iclr_tasks] [--task TASK] \ + [--tasks TASK1 TASK2 ...] + +where EXP is one of the keys in `experiments`, +and DESC is a string description of the set of experiments (such as "v0") + +Set only one of these flags: +--tuning_tasks flag only runs tuning tasks. +--iclr_tasks flag only runs the tasks included in the paper. +--regression_tests flag runs tasks which function as regression tests. +--task flag manually selects a single task to run. +--tasks flag takes a custom list of tasks. + +Other flags: +--reps N specifies N repetitions per experiment, Default is 25. +--training_replicas R specifies that R workers will be launched to train one + task (for neural network algorithms). These workers will update a global + model stored on a parameter server. Defaults to 1. If R > 1, a parameter + server will also be launched. + + +Run everything: +exps=( pg-20M pg-topk-20M topk-20M ga-20M rand-20M ) +BIN_DIR="single_task" +for exp in "${exps[@]}" +do + ./$BIN_DIR/run_eval_tasks.py \ + --exp "$exp" --iclr_tasks +done +""" + +import argparse +from collections import namedtuple +import subprocess + + +S = namedtuple('S', ['length']) +default_length = 100 + + +iclr_tasks = [ + 'reverse', 'remove-char', 'count-char', 'add', 'bool-logic', 'print-hello', + 'echo-twice', 'echo-thrice', 'copy-reverse', 'zero-cascade', 'cascade', + 'shift-left', 'shift-right', 'riffle', 'unriffle', 'middle-char', + 'remove-last', 'remove-last-two', 'echo-alternating', 'echo-half', 'length', + 'echo-second-seq', 'echo-nth-seq', 'substring', 'divide-2', 'dedup'] + + +regression_test_tasks = ['reverse', 'test-hill-climb'] + + +E = namedtuple( + 'E', + ['name', 'method_type', 'config', 'simplify', 'batch_size', 'max_npe']) + + +def make_experiment_settings(name, **kwargs): + # Unpack experiment info from name. + def split_last(string, char): + i = string.rindex(char) + return string[:i], string[i+1:] + def si_to_int(si_string): + return int( + si_string.upper().replace('K', '0'*3).replace('M', '0'*6) + .replace('G', '0'*9)) + method_type, max_npe = split_last(name, '-') + assert method_type + assert max_npe + return E( + name=name, method_type=method_type, max_npe=si_to_int(max_npe), **kwargs) + + +experiments_set = { + make_experiment_settings( + 'pg-20M', + config='entropy_beta=0.05,lr=0.0001,topk_loss_hparam=0.0,topk=0,' + 'pi_loss_hparam=1.0,alpha=0.0', + simplify=False, + batch_size=64), + make_experiment_settings( + 'pg-topk-20M', + config='entropy_beta=0.01,lr=0.0001,topk_loss_hparam=50.0,topk=10,' + 'pi_loss_hparam=1.0,alpha=0.0', + simplify=False, + batch_size=64), + make_experiment_settings( + 'topk-20M', + config='entropy_beta=0.01,lr=0.0001,topk_loss_hparam=200.0,topk=10,' + 'pi_loss_hparam=0.0,alpha=0.0', + simplify=False, + batch_size=64), + make_experiment_settings( + 'topk-0ent-20M', + config='entropy_beta=0.000,lr=0.0001,topk_loss_hparam=200.0,topk=10,' + 'pi_loss_hparam=0.0,alpha=0.0', + simplify=False, + batch_size=64), + make_experiment_settings( + 'ga-20M', + config='crossover_rate=0.95,mutation_rate=0.15', + simplify=False, + batch_size=100), # Population size. + make_experiment_settings( + 'rand-20M', + config='', + simplify=False, + batch_size=1), + make_experiment_settings( + 'simpl-500M', + config='entropy_beta=0.05,lr=0.0001,topk_loss_hparam=0.5,topk=10,' + 'pi_loss_hparam=1.0,alpha=0.0', + simplify=True, + batch_size=64), +} + +experiments = {e.name: e for e in experiments_set} + + +# pylint: disable=redefined-outer-name +def parse_args(extra_args=()): + """Parse arguments and extract task and experiment info.""" + parser = argparse.ArgumentParser(description='Run all eval tasks.') + parser.add_argument('--exp', required=True) + parser.add_argument('--tuning_tasks', action='store_true') + parser.add_argument('--iclr_tasks', action='store_true') + parser.add_argument('--regression_tests', action='store_true') + parser.add_argument('--desc', default='v0') + parser.add_argument('--reps', default=25) + parser.add_argument('--task') + parser.add_argument('--tasks', nargs='+') + for arg_string, default in extra_args: + parser.add_argument(arg_string, default=default) + args = parser.parse_args() + + print('Running experiment: %s' % (args.exp,)) + if args.desc: + print('Extra description: "%s"' % (args.desc,)) + if args.exp not in experiments: + raise ValueError('Experiment name is not valid') + experiment_name = args.exp + experiment_settings = experiments[experiment_name] + assert experiment_settings.name == experiment_name + + if args.tasks: + print('Launching tasks from args: %s' % (args.tasks,)) + tasks = {t: S(length=default_length) for t in args.tasks} + elif args.task: + print('Launching single task "%s"' % args.task) + tasks = {args.task: S(length=default_length)} + elif args.tuning_tasks: + print('Only running tuning tasks') + tasks = {name: S(length=default_length) + for name in ['reverse-tune', 'remove-char-tune']} + elif args.iclr_tasks: + print('Running eval tasks from ICLR paper.') + tasks = {name: S(length=default_length) for name in iclr_tasks} + elif args.regression_tests: + tasks = {name: S(length=default_length) for name in regression_test_tasks} + print('Tasks: %s' % tasks.keys()) + + print('reps = %d' % (int(args.reps),)) + + return args, tasks, experiment_settings + + +def run(command_string): + subprocess.call(command_string, shell=True) + + +if __name__ == '__main__': + LAUNCH_TRAINING_COMMAND = 'single_task/launch_training.sh' + COMPILE_COMMAND = 'bazel build -c opt single_task:run.par' + + args, tasks, experiment_settings = parse_args( + extra_args=(('--training_replicas', 1),)) + + if experiment_settings.method_type in ( + 'pg', 'pg-topk', 'topk', 'topk-0ent', 'simpl'): + # Runs PG and TopK. + + def make_run_cmd(job_name, task, max_npe, num_reps, code_length, + batch_size, do_simplify, custom_config_str): + """Constructs terminal command for launching NN based algorithms. + + The arguments to this function will be used to create config for the + experiment. + + Args: + job_name: Name of the job to launch. Should uniquely identify this + experiment run. + task: Name of the coding task to solve. + max_npe: Maximum number of programs executed. An integer. + num_reps: Number of times to run the experiment. An integer. + code_length: Maximum allowed length of synthesized code. + batch_size: Minibatch size for gradient descent. + do_simplify: Whether to run the experiment in code simplification mode. + A bool. + custom_config_str: Additional config for the model config string. + + Returns: + The terminal command that launches the specified experiment. + """ + config = """ + env=c(task='{0}',correct_syntax=False), + agent=c( + algorithm='pg', + policy_lstm_sizes=[35,35],value_lstm_sizes=[35,35], + grad_clip_threshold=50.0,param_init_factor=0.5,regularizer=0.0, + softmax_tr=1.0,optimizer='rmsprop',ema_baseline_decay=0.99, + eos_token={3},{4}), + timestep_limit={1},batch_size={2} + """.replace(' ', '').replace('\n', '').format( + task, code_length, batch_size, do_simplify, custom_config_str) + num_ps = 0 if args.training_replicas == 1 else 1 + return ( + r'{0} --job_name={1} --config="{2}" --max_npe={3} ' + '--num_repetitions={4} --num_workers={5} --num_ps={6} ' + '--stop_on_success={7}' + .format(LAUNCH_TRAINING_COMMAND, job_name, config, max_npe, num_reps, + args.training_replicas, num_ps, str(not do_simplify).lower())) + + else: + # Runs GA and Rand. + assert experiment_settings.method_type in ('ga', 'rand') + + def make_run_cmd(job_name, task, max_npe, num_reps, code_length, + batch_size, do_simplify, custom_config_str): + """Constructs terminal command for launching GA or uniform random search. + + The arguments to this function will be used to create config for the + experiment. + + Args: + job_name: Name of the job to launch. Should uniquely identify this + experiment run. + task: Name of the coding task to solve. + max_npe: Maximum number of programs executed. An integer. + num_reps: Number of times to run the experiment. An integer. + code_length: Maximum allowed length of synthesized code. + batch_size: Minibatch size for gradient descent. + do_simplify: Whether to run the experiment in code simplification mode. + A bool. + custom_config_str: Additional config for the model config string. + + Returns: + The terminal command that launches the specified experiment. + """ + assert not do_simplify + if custom_config_str: + custom_config_str = ',' + custom_config_str + config = """ + env=c(task='{0}',correct_syntax=False), + agent=c( + algorithm='{4}' + {3}), + timestep_limit={1},batch_size={2} + """.replace(' ', '').replace('\n', '').format( + task, code_length, batch_size, custom_config_str, + experiment_settings.method_type) + num_workers = num_reps # Do each rep in parallel. + return ( + r'{0} --job_name={1} --config="{2}" --max_npe={3} ' + '--num_repetitions={4} --num_workers={5} --num_ps={6} ' + '--stop_on_success={7}' + .format(LAUNCH_TRAINING_COMMAND, job_name, config, max_npe, num_reps, + num_workers, 0, str(not do_simplify).lower())) + + print('Compiling...') + run(COMPILE_COMMAND) + + print('Launching %d coding tasks...' % len(tasks)) + for task, task_settings in tasks.iteritems(): + name = 'bf_rl_iclr' + desc = '{0}.{1}_{2}'.format(args.desc, experiment_settings.name, task) + job_name = '{}.{}'.format(name, desc) + print('Job name: %s' % job_name) + reps = int(args.reps) if not experiment_settings.simplify else 1 + run_cmd = make_run_cmd( + job_name, task, experiment_settings.max_npe, reps, + task_settings.length, experiment_settings.batch_size, + experiment_settings.simplify, + experiment_settings.config) + print('Running command:\n' + run_cmd) + run(run_cmd) + + print('Done.') +# pylint: enable=redefined-outer-name diff --git a/models/research/brain_coder/single_task/test_tasks.py b/models/research/brain_coder/single_task/test_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..fb07a12653ebad6b38dc3786e749d3e8bf2b2072 --- /dev/null +++ b/models/research/brain_coder/single_task/test_tasks.py @@ -0,0 +1,127 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tasks that test correctness of algorithms.""" + +from six.moves import xrange +from common import reward as reward_lib # brain coder +from single_task import misc # brain coder + + +class BasicTaskManager(object): + """Wraps a generic reward function.""" + + def __init__(self, reward_fn): + self.reward_fn = reward_fn + self.good_reward = 1.0 + + def _score_string(self, string): + actions = misc.bf_string_to_tokens(string) + reward, correct = self.reward_fn(actions) + return misc.RewardInfo( + episode_rewards=[0.0] * (len(string) - 1) + [reward], + input_case=None, + correct_output=None, + code_output=actions, + input_type=None, + output_type=misc.IOType.integer, + reason='correct' if correct else 'wrong') + + def rl_batch(self, batch_size): + reward_fns = [self._score_string] * batch_size + return reward_fns + + +class Trie(object): + """Trie for sequences.""" + EOS = () + + def __init__(self): + self.trie = {} + + def insert(self, sequence): + d = self.trie + for e in sequence: + if e not in d: + d[e] = {} + d = d[e] + d[self.EOS] = True # Terminate sequence. + + def prefix_match(self, sequence): + """Return prefix of `sequence` which exists in the trie.""" + d = self.trie + index = 0 + for i, e in enumerate(sequence + [self.EOS]): + index = i + if e in d: + d = d[e] + if e == self.EOS: + return sequence, True + else: + break + return sequence[:index], False + + def next_choices(self, sequence): + d = self.trie + for e in sequence: + if e in d: + d = d[e] + else: + raise ValueError('Sequence not a prefix: %s' % (sequence,)) + return d.keys() + + +class HillClimbingTask(object): + """Simple task that tests reward hill climbing ability. + + There are a set of paths (sequences of tokens) which are rewarded. The total + reward for a path is proportional to its length, so the longest path is the + target. Shorter paths can be dead ends. + """ + + def __init__(self): + # Paths are sequences of sub-sequences. Here we form unique sub-sequences + # out of 3 arbitrary ints. We use sub-sequences instead of single entities + # to make the task harder by making the episodes last longer, i.e. more + # for the agent to remember. + a = (1, 2, 3) + b = (4, 5, 6) + c = (7, 8, 7) + d = (6, 5, 4) + e = (3, 2, 1) + f = (8, 5, 1) + g = (6, 4, 2) + h = (1, 8, 3) + self.paths = Trie() + self.paths.insert([a, b, h]) + self.paths.insert([a, b, c, d, e, f, g, h]) + self.paths.insert([a, b, c, d, e, b, a]) + self.paths.insert([a, b, g, h]) + self.paths.insert([a, e, f, g]) + self.correct_sequence = misc.flatten([a, b, c, d, e, f, g, h]) + + def distance_fn(a, b): + len_diff = abs(len(a) - len(b)) + return sum(reward_lib.mod_abs_diff(ai - 1, bi - 1, 8) + for ai, bi in zip(a, b)) + len_diff * 4 # 8 / 2 = 4 + self.distance_fn = distance_fn + + def __call__(self, actions): + # Compute reward for action sequence. + actions = [a for a in actions if a > 0] + sequence = [tuple(actions[i: i + 3]) for i in xrange(0, len(actions), 3)] + prefix, complete = self.paths.prefix_match(sequence) + if complete: + return float(len(prefix)), actions == self.correct_sequence + if len(prefix) == len(sequence): + return float(len(prefix)), False + next_pred = sequence[len(prefix)] + choices = self.paths.next_choices(prefix) + if choices == [()]: + return (len(prefix) - len(next_pred) / 3.0), False + min_dist = min(self.distance_fn(c, next_pred) for c in choices) + # +1 reward for each element in the sequence correct, plus fraction torwards + # closest next element. + # Maximum distance possible is num_actions * base / 2 = 3 * 8 / 2 = 12 + return (len(prefix) + (1 - min_dist / 12.0)), False diff --git a/models/research/brain_coder/single_task/test_tasks_test.py b/models/research/brain_coder/single_task/test_tasks_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bc905c6936de4c686e6cac1203c65c36bd7a0b16 --- /dev/null +++ b/models/research/brain_coder/single_task/test_tasks_test.py @@ -0,0 +1,63 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +"""Tests for test_tasks.""" + +import numpy as np +import tensorflow as tf + +from single_task import misc # brain coder +from single_task import test_tasks # brain coder + + +def get_reward(reward_fn, candidate): + return sum(reward_fn(misc.bf_tokens_to_string(candidate)).episode_rewards) + + +class TestTasksTest(tf.test.TestCase): + + def testHillClimbingTask(self): + task = test_tasks.BasicTaskManager(test_tasks.HillClimbingTask()) + reward_fns = task.rl_batch(1) + reward_fn = reward_fns[0] + self.assertTrue(np.isclose(get_reward(reward_fn, [1, 2, 0]), 8 / 12.)) + self.assertTrue(np.isclose(get_reward(reward_fn, [1, 2, 2, 0]), 11 / 12.)) + self.assertTrue(np.isclose(get_reward(reward_fn, [1, 2, 3, 0]), 1.0)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 2, 0]), 1. + 8 / 12.)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 0]), 2.0)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 1, 8, 3, 0]), 3.0)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 0]), 3.0)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 1, 8, 3, 1, 0]), + 3.0 - 4 / 12.)) + self.assertTrue( + np.isclose( + get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 1, 8, 3, 1, 1, 1, 1, 0]), + 2.0)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 3, 0]), + 3.0 + 1 / 12.)) + self.assertTrue( + np.isclose( + get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1, + 8, 5, 1, 6, 4, 2, 1, 8, 3, 0]), + 8.0)) + self.assertTrue( + np.isclose( + get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1, + 8, 5, 1, 6, 4, 2, 1, 8, 3, 1, 1, 0]), + 8.0 - 8 / 12.)) + self.assertTrue( + np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, + 2, 1, 8, 5, 1, 6, 4, 2, 1, 8, 3, 1, 1, + 1, 1, 1, 1, 1, 0]), + 7.0)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/brain_coder/single_task/tune.py b/models/research/brain_coder/single_task/tune.py new file mode 100644 index 0000000000000000000000000000000000000000..3473b5e94bd3c1f737a18f0187790d5df2d7a2aa --- /dev/null +++ b/models/research/brain_coder/single_task/tune.py @@ -0,0 +1,262 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +r"""Run grid search. + +Look at launch_tuning.sh for details on how to tune at scale. + +Usage example: +Tune with one worker on the local machine. + +CONFIG="agent=c(algorithm='pg')," +CONFIG+="env=c(task_cycle=['reverse-tune', 'remove-tune'])" +HPARAM_SPACE_TYPE="pg" +OUT_DIR="/tmp/bf_pg_tune" +MAX_NPE=5000000 +NUM_REPETITIONS=50 +rm -rf $OUT_DIR +mkdir $OUT_DIR +bazel run -c opt single_task:tune -- \ + --alsologtostderr \ + --config="$CONFIG" \ + --max_npe="$MAX_NPE" \ + --num_repetitions="$NUM_REPETITIONS" \ + --logdir="$OUT_DIR" \ + --summary_interval=1 \ + --model_v=0 \ + --hparam_space="$HPARAM_SPACE_TYPE" \ + --tuner_id=0 \ + --num_tuners=1 \ + 2>&1 >"$OUT_DIR/tuner_0.log" +learning/brain/tensorboard/tensorboard.sh --port 12345 --logdir "$OUT_DIR" +""" + +import ast +import os + +from absl import app +from absl import flags +from absl import logging +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from single_task import defaults # brain coder +from single_task import run as run_lib # brain coder + +FLAGS = flags.FLAGS +flags.DEFINE_integer( + 'tuner_id', 0, + 'The unique ID for this tuning worker.') +flags.DEFINE_integer( + 'num_tuners', 1, + 'How many tuners are there.') +flags.DEFINE_string( + 'hparam_space', 'default', + 'String name which denotes the hparam space to tune over. This is ' + 'algorithm dependent.') +flags.DEFINE_string( + 'fixed_hparams', '', + 'HParams string. Used to fix hparams during tuning.') +flags.DEFINE_float( + 'success_rate_objective_weight', 1.0, + 'How much to weight success rate vs num programs seen. By default, only ' + 'success rate is optimized (this is the setting used in the paper).') + + +def parse_hparams_string(hparams_str): + hparams = {} + for term in hparams_str.split(','): + if not term: + continue + name, value = term.split('=') + hparams[name.strip()] = ast.literal_eval(value) + return hparams + + +def int_to_multibase(n, bases): + digits = [0] * len(bases) + for i, b in enumerate(bases): + n, d = divmod(n, b) + digits[i] = d + return digits + + +def hparams_for_index(index, tuning_space): + keys = sorted(tuning_space.keys()) + indices = int_to_multibase(index, [len(tuning_space[k]) for k in keys]) + return tf.contrib.training.HParams( + **{k: tuning_space[k][i] for k, i in zip(keys, indices)}) + + +def run_tuner_loop(ns): + """Run tuning loop for this worker.""" + is_chief = FLAGS.task_id == 0 + tuning_space = ns.define_tuner_hparam_space( + hparam_space_type=FLAGS.hparam_space) + fixed_hparams = parse_hparams_string(FLAGS.fixed_hparams) + for name, value in fixed_hparams.iteritems(): + tuning_space[name] = [value] + tuning_space_size = np.prod([len(values) for values in tuning_space.values()]) + + num_local_trials, remainder = divmod(tuning_space_size, FLAGS.num_tuners) + if FLAGS.tuner_id < remainder: + num_local_trials += 1 + starting_trial_id = ( + num_local_trials * FLAGS.tuner_id + min(remainder, FLAGS.tuner_id)) + + logging.info('tuning_space_size: %d', tuning_space_size) + logging.info('num_local_trials: %d', num_local_trials) + logging.info('starting_trial_id: %d', starting_trial_id) + + for local_trial_index in xrange(num_local_trials): + trial_config = defaults.default_config_with_updates(FLAGS.config) + global_trial_index = local_trial_index + starting_trial_id + trial_name = 'trial_' + str(global_trial_index) + trial_dir = os.path.join(FLAGS.logdir, trial_name) + hparams = hparams_for_index(global_trial_index, tuning_space) + ns.write_hparams_to_config( + trial_config, hparams, hparam_space_type=FLAGS.hparam_space) + + results_list = ns.run_training( + config=trial_config, tuner=None, logdir=trial_dir, is_chief=is_chief, + trial_name=trial_name) + + if not is_chief: + # Only chief worker needs to write tuning results to disk. + continue + + objective, metrics = compute_tuning_objective( + results_list, hparams, trial_name, num_trials=tuning_space_size) + logging.info('metrics:\n%s', metrics) + logging.info('objective: %s', objective) + logging.info('programs_seen_fraction: %s', + metrics['programs_seen_fraction']) + logging.info('success_rate: %s', metrics['success_rate']) + logging.info('success_rate_objective_weight: %s', + FLAGS.success_rate_objective_weight) + + tuning_results_file = os.path.join(trial_dir, 'tuning_results.txt') + with tf.gfile.FastGFile(tuning_results_file, 'a') as writer: + writer.write(str(metrics) + '\n') + + logging.info('Trial %s complete.', trial_name) + + +def compute_tuning_objective(results_list, hparams, trial_name, num_trials): + """Compute tuning objective and metrics given results and trial information. + + Args: + results_list: List of results dicts read from disk. These are written by + workers. + hparams: tf.contrib.training.HParams instance containing the hparams used + in this trial (only the hparams which are being tuned). + trial_name: Name of this trial. Used to create a trial directory. + num_trials: Total number of trials that need to be run. This is saved in the + metrics dict for future reference. + + Returns: + objective: The objective computed for this trial. Choose the hparams for the + trial with the largest objective value. + metrics: Information about this trial. A dict. + """ + found_solution = [r['found_solution'] for r in results_list] + successful_program_counts = [ + r['npe'] for r in results_list if r['found_solution']] + + success_rate = sum(found_solution) / float(len(results_list)) + + max_programs = FLAGS.max_npe # Per run. + all_program_counts = [ + r['npe'] if r['found_solution'] else max_programs + for r in results_list] + programs_seen_fraction = ( + float(sum(all_program_counts)) + / (max_programs * len(all_program_counts))) + + # min/max/avg stats are over successful runs. + metrics = { + 'num_runs': len(results_list), + 'num_succeeded': sum(found_solution), + 'success_rate': success_rate, + 'programs_seen_fraction': programs_seen_fraction, + 'avg_programs': np.mean(successful_program_counts), + 'max_possible_programs_per_run': max_programs, + 'global_step': sum([r['num_batches'] for r in results_list]), + 'hparams': hparams.values(), + 'trial_name': trial_name, + 'num_trials': num_trials} + + # Report stats per tasks. + tasks = [r['task'] for r in results_list] + for task in set(tasks): + task_list = [r for r in results_list if r['task'] == task] + found_solution = [r['found_solution'] for r in task_list] + successful_rewards = [ + r['best_reward'] for r in task_list + if r['found_solution']] + successful_num_batches = [ + r['num_batches'] + for r in task_list if r['found_solution']] + successful_program_counts = [ + r['npe'] for r in task_list if r['found_solution']] + metrics_append = { + task + '__num_runs': len(task_list), + task + '__num_succeeded': sum(found_solution), + task + '__success_rate': ( + sum(found_solution) / float(len(task_list)))} + metrics.update(metrics_append) + if any(found_solution): + metrics_append = { + task + '__min_reward': min(successful_rewards), + task + '__max_reward': max(successful_rewards), + task + '__avg_reward': np.median(successful_rewards), + task + '__min_programs': min(successful_program_counts), + task + '__max_programs': max(successful_program_counts), + task + '__avg_programs': np.mean(successful_program_counts), + task + '__min_batches': min(successful_num_batches), + task + '__max_batches': max(successful_num_batches), + task + '__avg_batches': np.mean(successful_num_batches)} + metrics.update(metrics_append) + + # Objective will be maximized. + # Maximize success rate, minimize num programs seen. + # Max objective is always 1. + weight = FLAGS.success_rate_objective_weight + objective = ( + weight * success_rate + + (1 - weight) * (1 - programs_seen_fraction)) + metrics['objective'] = objective + + return objective, metrics + + +def main(argv): + del argv + + logging.set_verbosity(FLAGS.log_level) + + if not FLAGS.logdir: + raise ValueError('logdir flag must be provided.') + if FLAGS.num_workers <= 0: + raise ValueError('num_workers flag must be greater than 0.') + if FLAGS.task_id < 0: + raise ValueError('task_id flag must be greater than or equal to 0.') + if FLAGS.task_id >= FLAGS.num_workers: + raise ValueError( + 'task_id flag must be strictly less than num_workers flag.') + if FLAGS.num_tuners <= 0: + raise ValueError('num_tuners flag must be greater than 0.') + if FLAGS.tuner_id < 0: + raise ValueError('tuner_id flag must be greater than or equal to 0.') + if FLAGS.tuner_id >= FLAGS.num_tuners: + raise ValueError( + 'tuner_id flag must be strictly less than num_tuners flag.') + + ns, _ = run_lib.get_namespace(FLAGS.config) + run_tuner_loop(ns) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/cognitive_mapping_and_planning/.gitignore b/models/research/cognitive_mapping_and_planning/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..cbc6a8f0271075171ffdf3c2bc5fb9c528b08fc6 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/.gitignore @@ -0,0 +1,4 @@ +deps +*.pyc +lib*.so +lib*.so* diff --git a/models/research/cognitive_mapping_and_planning/README.md b/models/research/cognitive_mapping_and_planning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4457bafbb4d229998a01dadc46efe41f4ba1a3e0 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/README.md @@ -0,0 +1,127 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Cognitive Mapping and Planning for Visual Navigation +**Saurabh Gupta, James Davidson, Sergey Levine, Rahul Sukthankar, Jitendra Malik** + +**Computer Vision and Pattern Recognition (CVPR) 2017.** + +**[ArXiv](https://arxiv.org/abs/1702.03920), +[Project Website](https://sites.google.com/corp/view/cognitive-mapping-and-planning/)** + +### Citing +If you find this code base and models useful in your research, please consider +citing the following paper: + ``` + @inproceedings{gupta2017cognitive, + title={Cognitive Mapping and Planning for Visual Navigation}, + author={Gupta, Saurabh and Davidson, James and Levine, Sergey and + Sukthankar, Rahul and Malik, Jitendra}, + booktitle={CVPR}, + year={2017} + } + ``` + +### Contents +1. [Requirements: software](#requirements-software) +2. [Requirements: data](#requirements-data) +3. [Test Pre-trained Models](#test-pre-trained-models) +4. [Train your Own Models](#train-your-own-models) + +### Requirements: software +1. Python Virtual Env Setup: All code is implemented in Python but depends on a + small number of python packages and a couple of C libraries. We recommend + using virtual environment for installing these python packages and python + bindings for these C libraries. + ```Shell + VENV_DIR=venv + pip install virtualenv + virtualenv $VENV_DIR + source $VENV_DIR/bin/activate + + # You may need to upgrade pip for installing openv-python. + pip install --upgrade pip + # Install simple dependencies. + pip install -r requirements.txt + + # Patch bugs in dependencies. + sh patches/apply_patches.sh + ``` + +2. Install [Tensorflow](https://www.tensorflow.org/) inside this virtual + environment. You will need to use one of the latest nightly builds + (see instructions [here](https://github.com/tensorflow/tensorflow#installation)). + +3. Swiftshader: We use + [Swiftshader](https://github.com/google/swiftshader.git), a CPU based + renderer to render the meshes. It is possible to use other renderers, + replace `SwiftshaderRenderer` in `render/swiftshader_renderer.py` with + bindings to your renderer. + ```Shell + mkdir -p deps + git clone --recursive https://github.com/google/swiftshader.git deps/swiftshader-src + cd deps/swiftshader-src && git checkout 91da6b00584afd7dcaed66da88e2b617429b3950 + git submodule update + mkdir build && cd build && cmake .. && make -j 16 libEGL libGLESv2 + cd ../../../ + cp deps/swiftshader-src/build/libEGL* libEGL.so.1 + cp deps/swiftshader-src/build/libGLESv2* libGLESv2.so.2 + ``` + +4. PyAssimp: We use [PyAssimp](https://github.com/assimp/assimp.git) to load + meshes. It is possible to use other libraries to load meshes, replace + `Shape` `render/swiftshader_renderer.py` with bindings to your library for + loading meshes. + ```Shell + mkdir -p deps + git clone https://github.com/assimp/assimp.git deps/assimp-src + cd deps/assimp-src + git checkout 2afeddd5cb63d14bc77b53740b38a54a97d94ee8 + cmake CMakeLists.txt -G 'Unix Makefiles' && make -j 16 + cd port/PyAssimp && python setup.py install + cd ../../../.. + cp deps/assimp-src/lib/libassimp* . + ``` + +5. graph-tool: We use [graph-tool](https://git.skewed.de/count0/graph-tool) + library for graph processing. + ```Shell + mkdir -p deps + # If the following git clone command fails, you can also download the source + # from https://downloads.skewed.de/graph-tool/graph-tool-2.2.44.tar.bz2 + git clone https://git.skewed.de/count0/graph-tool deps/graph-tool-src + cd deps/graph-tool-src && git checkout 178add3a571feb6666f4f119027705d95d2951ab + bash autogen.sh + ./configure --disable-cairo --disable-sparsehash --prefix=$HOME/.local + make -j 16 + make install + cd ../../ + ``` + +### Requirements: data +1. Download the Stanford 3D Indoor Spaces Dataset (S3DIS Dataset) and ImageNet + Pre-trained models for initializing different models. Follow instructions in + `data/README.md` + +### Test Pre-trained Models +1. Download pre-trained models. See `output/README.md`. + +2. Test models using `scripts/script_test_pretrained_models.sh`. + +### Train Your Own Models +All models were trained asynchronously with 16 workers each worker using data +from a single floor. The default hyper-parameters correspond to this setting. +See [distributed training with +Tensorflow](https://www.tensorflow.org/deploy/distributed) for setting up +distributed training. Training with a single worker is possible with the current +code base but will require some minor changes to allow each worker to load all +training environments. + +### Contact +For questions or issues open an issue on the tensorflow/models [issues +tracker](https://github.com/tensorflow/models/issues). Please assign issues to +@s-gupta. + +### Credits +This code was written by Saurabh Gupta (@s-gupta). diff --git a/models/research/cognitive_mapping_and_planning/__init__.py b/models/research/cognitive_mapping_and_planning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/cfgs/__init__.py b/models/research/cognitive_mapping_and_planning/cfgs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/cfgs/config_cmp.py b/models/research/cognitive_mapping_and_planning/cfgs/config_cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..715eee2b973cb66f816ecdb65bbcc3abdd8a9483 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/cfgs/config_cmp.py @@ -0,0 +1,283 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os, sys +import numpy as np +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +import logging +import src.utils as utils +import cfgs.config_common as cc + + +import tensorflow as tf + + +rgb_resnet_v2_50_path = 'data/init_models/resnet_v2_50/model.ckpt-5136169' +d_resnet_v2_50_path = 'data/init_models/distill_rgb_to_d_resnet_v2_50/model.ckpt-120002' + +def get_default_args(): + summary_args = utils.Foo(display_interval=1, test_iters=26, + arop_full_summary_iters=14) + + control_args = utils.Foo(train=False, test=False, + force_batchnorm_is_training_at_test=False, + reset_rng_seed=False, only_eval_when_done=False, + test_mode=None) + return summary_args, control_args + +def get_default_cmp_args(): + batch_norm_param = {'center': True, 'scale': True, + 'activation_fn':tf.nn.relu} + + mapper_arch_args = utils.Foo( + dim_reduce_neurons=64, + fc_neurons=[1024, 1024], + fc_out_size=8, + fc_out_neurons=64, + encoder='resnet_v2_50', + deconv_neurons=[64, 32, 16, 8, 4, 2], + deconv_strides=[2, 2, 2, 2, 2, 2], + deconv_layers_per_block=2, + deconv_kernel_size=4, + fc_dropout=0.5, + combine_type='wt_avg_logits', + batch_norm_param=batch_norm_param) + + readout_maps_arch_args = utils.Foo( + num_neurons=[], + strides=[], + kernel_size=None, + layers_per_block=None) + + arch_args = utils.Foo( + vin_val_neurons=8, vin_action_neurons=8, vin_ks=3, vin_share_wts=False, + pred_neurons=[64, 64], pred_batch_norm_param=batch_norm_param, + conv_on_value_map=0, fr_neurons=16, fr_ver='v2', fr_inside_neurons=64, + fr_stride=1, crop_remove_each=30, value_crop_size=4, + action_sample_type='sample', action_sample_combine_type='one_or_other', + sample_gt_prob_type='inverse_sigmoid_decay', dagger_sample_bn_false=True, + vin_num_iters=36, isd_k=750., use_agent_loc=False, multi_scale=True, + readout_maps=False, rom_arch=readout_maps_arch_args) + + return arch_args, mapper_arch_args + +def get_arch_vars(arch_str): + if arch_str == '': vals = [] + else: vals = arch_str.split('_') + ks = ['var1', 'var2', 'var3'] + ks = ks[:len(vals)] + + # Exp Ver. + if len(vals) == 0: ks.append('var1'); vals.append('v0') + # custom arch. + if len(vals) == 1: ks.append('var2'); vals.append('') + # map scape for projection baseline. + if len(vals) == 2: ks.append('var3'); vals.append('fr2') + + assert(len(vals) == 3) + + vars = utils.Foo() + for k, v in zip(ks, vals): + setattr(vars, k, v) + + logging.error('arch_vars: %s', vars) + return vars + +def process_arch_str(args, arch_str): + # This function modifies args. + args.arch, args.mapper_arch = get_default_cmp_args() + + arch_vars = get_arch_vars(arch_str) + + args.navtask.task_params.outputs.ego_maps = True + args.navtask.task_params.outputs.ego_goal_imgs = True + args.navtask.task_params.outputs.egomotion = True + args.navtask.task_params.toy_problem = False + + if arch_vars.var1 == 'lmap': + args = process_arch_learned_map(args, arch_vars) + + elif arch_vars.var1 == 'pmap': + args = process_arch_projected_map(args, arch_vars) + + else: + logging.fatal('arch_vars.var1 should be lmap or pmap, but is %s', arch_vars.var1) + assert(False) + + return args + +def process_arch_learned_map(args, arch_vars): + # Multiscale vision based system. + args.navtask.task_params.input_type = 'vision' + args.navtask.task_params.outputs.images = True + + if args.navtask.camera_param.modalities[0] == 'rgb': + args.solver.pretrained_path = rgb_resnet_v2_50_path + elif args.navtask.camera_param.modalities[0] == 'depth': + args.solver.pretrained_path = d_resnet_v2_50_path + + if arch_vars.var2 == 'Ssc': + sc = 1./args.navtask.task_params.step_size + args.arch.vin_num_iters = 40 + args.navtask.task_params.map_scales = [sc] + max_dist = args.navtask.task_params.max_dist * \ + args.navtask.task_params.num_goals + args.navtask.task_params.map_crop_sizes = [2*max_dist] + + args.arch.fr_stride = 1 + args.arch.vin_action_neurons = 8 + args.arch.vin_val_neurons = 3 + args.arch.fr_inside_neurons = 32 + + args.mapper_arch.pad_map_with_zeros_each = [24] + args.mapper_arch.deconv_neurons = [64, 32, 16] + args.mapper_arch.deconv_strides = [1, 2, 1] + + elif (arch_vars.var2 == 'Msc' or arch_vars.var2 == 'MscROMms' or + arch_vars.var2 == 'MscROMss' or arch_vars.var2 == 'MscNoVin'): + # Code for multi-scale planner. + args.arch.vin_num_iters = 8 + args.arch.crop_remove_each = 4 + args.arch.value_crop_size = 8 + + sc = 1./args.navtask.task_params.step_size + max_dist = args.navtask.task_params.max_dist * \ + args.navtask.task_params.num_goals + n_scales = np.log2(float(max_dist) / float(args.arch.vin_num_iters)) + n_scales = int(np.ceil(n_scales)+1) + + args.navtask.task_params.map_scales = \ + list(sc*(0.5**(np.arange(n_scales))[::-1])) + args.navtask.task_params.map_crop_sizes = [16 for x in range(n_scales)] + + args.arch.fr_stride = 1 + args.arch.vin_action_neurons = 8 + args.arch.vin_val_neurons = 3 + args.arch.fr_inside_neurons = 32 + + args.mapper_arch.pad_map_with_zeros_each = [0 for _ in range(n_scales)] + args.mapper_arch.deconv_neurons = [64*n_scales, 32*n_scales, 16*n_scales] + args.mapper_arch.deconv_strides = [1, 2, 1] + + if arch_vars.var2 == 'MscNoVin': + # No planning version. + args.arch.fr_stride = [1, 2, 1, 2] + args.arch.vin_action_neurons = None + args.arch.vin_val_neurons = 16 + args.arch.fr_inside_neurons = 32 + + args.arch.crop_remove_each = 0 + args.arch.value_crop_size = 4 + args.arch.vin_num_iters = 0 + + elif arch_vars.var2 == 'MscROMms' or arch_vars.var2 == 'MscROMss': + # Code with read outs, MscROMms flattens and reads out, + # MscROMss does not flatten and produces output at multiple scales. + args.navtask.task_params.outputs.readout_maps = True + args.navtask.task_params.map_resize_method = 'antialiasing' + args.arch.readout_maps = True + + if arch_vars.var2 == 'MscROMms': + args.arch.rom_arch.num_neurons = [64, 1] + args.arch.rom_arch.kernel_size = 4 + args.arch.rom_arch.strides = [2,2] + args.arch.rom_arch.layers_per_block = 2 + + args.navtask.task_params.readout_maps_crop_sizes = [64] + args.navtask.task_params.readout_maps_scales = [sc] + + elif arch_vars.var2 == 'MscROMss': + args.arch.rom_arch.num_neurons = \ + [64, len(args.navtask.task_params.map_scales)] + args.arch.rom_arch.kernel_size = 4 + args.arch.rom_arch.strides = [1,1] + args.arch.rom_arch.layers_per_block = 1 + + args.navtask.task_params.readout_maps_crop_sizes = \ + args.navtask.task_params.map_crop_sizes + args.navtask.task_params.readout_maps_scales = \ + args.navtask.task_params.map_scales + + else: + logging.fatal('arch_vars.var2 not one of Msc, MscROMms, MscROMss, MscNoVin.') + assert(False) + + map_channels = args.mapper_arch.deconv_neurons[-1] / \ + (2*len(args.navtask.task_params.map_scales)) + args.navtask.task_params.map_channels = map_channels + + return args + +def process_arch_projected_map(args, arch_vars): + # Single scale vision based system which does not use a mapper but instead + # uses an analytically estimated map. + ds = int(arch_vars.var3[2]) + args.navtask.task_params.input_type = 'analytical_counts' + args.navtask.task_params.outputs.analytical_counts = True + + assert(args.navtask.task_params.modalities[0] == 'depth') + args.navtask.camera_param.img_channels = None + + analytical_counts = utils.Foo(map_sizes=[512/ds], + xy_resolution=[5.*ds], + z_bins=[[-10, 10, 150, 200]], + non_linearity=[arch_vars.var2]) + args.navtask.task_params.analytical_counts = analytical_counts + + sc = 1./ds + args.arch.vin_num_iters = 36 + args.navtask.task_params.map_scales = [sc] + args.navtask.task_params.map_crop_sizes = [512/ds] + + args.arch.fr_stride = [1,2] + args.arch.vin_action_neurons = 8 + args.arch.vin_val_neurons = 3 + args.arch.fr_inside_neurons = 32 + + map_channels = len(analytical_counts.z_bins[0]) + 1 + args.navtask.task_params.map_channels = map_channels + args.solver.freeze_conv = False + + return args + +def get_args_for_config(config_name): + args = utils.Foo() + + args.summary, args.control = get_default_args() + + exp_name, mode_str = config_name.split('+') + arch_str, solver_str, navtask_str = exp_name.split('.') + logging.error('config_name: %s', config_name) + logging.error('arch_str: %s', arch_str) + logging.error('navtask_str: %s', navtask_str) + logging.error('solver_str: %s', solver_str) + logging.error('mode_str: %s', mode_str) + + args.solver = cc.process_solver_str(solver_str) + args.navtask = cc.process_navtask_str(navtask_str) + + args = process_arch_str(args, arch_str) + args.arch.isd_k = args.solver.isd_k + + # Train, test, etc. + mode, imset = mode_str.split('_') + args = cc.adjust_args_for_mode(args, mode) + args.navtask.building_names = args.navtask.dataset.get_split(imset) + args.control.test_name = '{:s}_on_{:s}'.format(mode, imset) + + # Log the arguments + logging.error('%s', args) + return args diff --git a/models/research/cognitive_mapping_and_planning/cfgs/config_common.py b/models/research/cognitive_mapping_and_planning/cfgs/config_common.py new file mode 100644 index 0000000000000000000000000000000000000000..440bf5b72f87a1eeca38e22f33b22e82de7345c0 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/cfgs/config_common.py @@ -0,0 +1,261 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os +import numpy as np +import logging +import src.utils as utils +import datasets.nav_env_config as nec +from datasets import factory + +def adjust_args_for_mode(args, mode): + if mode == 'train': + args.control.train = True + + elif mode == 'val1': + # Same settings as for training, to make sure nothing wonky is happening + # there. + args.control.test = True + args.control.test_mode = 'val' + args.navtask.task_params.batch_size = 32 + + elif mode == 'val2': + # No data augmentation, not sampling but taking the argmax action, not + # sampling from the ground truth at all. + args.control.test = True + args.arch.action_sample_type = 'argmax' + args.arch.sample_gt_prob_type = 'zero' + args.navtask.task_params.data_augment = \ + utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False, + relight_fast=False, structured=False) + args.control.test_mode = 'val' + args.navtask.task_params.batch_size = 32 + + elif mode == 'bench': + # Actually testing the agent in settings that are kept same between + # different runs. + args.navtask.task_params.batch_size = 16 + args.control.test = True + args.arch.action_sample_type = 'argmax' + args.arch.sample_gt_prob_type = 'zero' + args.navtask.task_params.data_augment = \ + utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False, + relight_fast=False, structured=False) + args.summary.test_iters = 250 + args.control.only_eval_when_done = True + args.control.reset_rng_seed = True + args.control.test_mode = 'test' + else: + logging.fatal('Unknown mode: %s.', mode) + assert(False) + return args + +def get_solver_vars(solver_str): + if solver_str == '': vals = []; + else: vals = solver_str.split('_') + ks = ['clip', 'dlw', 'long', 'typ', 'isdk', 'adam_eps', 'init_lr']; + ks = ks[:len(vals)] + + # Gradient clipping or not. + if len(vals) == 0: ks.append('clip'); vals.append('noclip'); + # data loss weight. + if len(vals) == 1: ks.append('dlw'); vals.append('dlw20') + # how long to train for. + if len(vals) == 2: ks.append('long'); vals.append('nolong') + # Adam + if len(vals) == 3: ks.append('typ'); vals.append('adam2') + # reg loss wt + if len(vals) == 4: ks.append('rlw'); vals.append('rlw1') + # isd_k + if len(vals) == 5: ks.append('isdk'); vals.append('isdk415') # 415, inflexion at 2.5k. + # adam eps + if len(vals) == 6: ks.append('adam_eps'); vals.append('aeps1en8') + # init lr + if len(vals) == 7: ks.append('init_lr'); vals.append('lr1en3') + + assert(len(vals) == 8) + + vars = utils.Foo() + for k, v in zip(ks, vals): + setattr(vars, k, v) + logging.error('solver_vars: %s', vars) + return vars + +def process_solver_str(solver_str): + solver = utils.Foo( + seed=0, learning_rate_decay=None, clip_gradient_norm=None, max_steps=None, + initial_learning_rate=None, momentum=None, steps_per_decay=None, + logdir=None, sync=False, adjust_lr_sync=True, wt_decay=0.0001, + data_loss_wt=None, reg_loss_wt=None, freeze_conv=True, num_workers=1, + task=0, ps_tasks=0, master='local', typ=None, momentum2=None, + adam_eps=None) + + # Clobber with overrides from solver str. + solver_vars = get_solver_vars(solver_str) + + solver.data_loss_wt = float(solver_vars.dlw[3:].replace('x', '.')) + solver.adam_eps = float(solver_vars.adam_eps[4:].replace('x', '.').replace('n', '-')) + solver.initial_learning_rate = float(solver_vars.init_lr[2:].replace('x', '.').replace('n', '-')) + solver.reg_loss_wt = float(solver_vars.rlw[3:].replace('x', '.')) + solver.isd_k = float(solver_vars.isdk[4:].replace('x', '.')) + + long = solver_vars.long + if long == 'long': + solver.steps_per_decay = 40000 + solver.max_steps = 120000 + elif long == 'long2': + solver.steps_per_decay = 80000 + solver.max_steps = 120000 + elif long == 'nolong' or long == 'nol': + solver.steps_per_decay = 20000 + solver.max_steps = 60000 + else: + logging.fatal('solver_vars.long should be long, long2, nolong or nol.') + assert(False) + + clip = solver_vars.clip + if clip == 'noclip' or clip == 'nocl': + solver.clip_gradient_norm = 0 + elif clip[:4] == 'clip': + solver.clip_gradient_norm = float(clip[4:].replace('x', '.')) + else: + logging.fatal('Unknown solver_vars.clip: %s', clip) + assert(False) + + typ = solver_vars.typ + if typ == 'adam': + solver.typ = 'adam' + solver.momentum = 0.9 + solver.momentum2 = 0.999 + solver.learning_rate_decay = 1.0 + elif typ == 'adam2': + solver.typ = 'adam' + solver.momentum = 0.9 + solver.momentum2 = 0.999 + solver.learning_rate_decay = 0.1 + elif typ == 'sgd': + solver.typ = 'sgd' + solver.momentum = 0.99 + solver.momentum2 = None + solver.learning_rate_decay = 0.1 + else: + logging.fatal('Unknown solver_vars.typ: %s', typ) + assert(False) + + logging.error('solver: %s', solver) + return solver + +def get_navtask_vars(navtask_str): + if navtask_str == '': vals = [] + else: vals = navtask_str.split('_') + + ks_all = ['dataset_name', 'modality', 'task', 'history', 'max_dist', + 'num_steps', 'step_size', 'n_ori', 'aux_views', 'data_aug'] + ks = ks_all[:len(vals)] + + # All data or not. + if len(vals) == 0: ks.append('dataset_name'); vals.append('sbpd') + # modality + if len(vals) == 1: ks.append('modality'); vals.append('rgb') + # semantic task? + if len(vals) == 2: ks.append('task'); vals.append('r2r') + # number of history frames. + if len(vals) == 3: ks.append('history'); vals.append('h0') + # max steps + if len(vals) == 4: ks.append('max_dist'); vals.append('32') + # num steps + if len(vals) == 5: ks.append('num_steps'); vals.append('40') + # step size + if len(vals) == 6: ks.append('step_size'); vals.append('8') + # n_ori + if len(vals) == 7: ks.append('n_ori'); vals.append('4') + # Auxiliary views. + if len(vals) == 8: ks.append('aux_views'); vals.append('nv0') + # Normal data augmentation as opposed to structured data augmentation (if set + # to straug. + if len(vals) == 9: ks.append('data_aug'); vals.append('straug') + + assert(len(vals) == 10) + for i in range(len(ks)): + assert(ks[i] == ks_all[i]) + + vars = utils.Foo() + for k, v in zip(ks, vals): + setattr(vars, k, v) + logging.error('navtask_vars: %s', vals) + return vars + +def process_navtask_str(navtask_str): + navtask = nec.nav_env_base_config() + + # Clobber with overrides from strings. + navtask_vars = get_navtask_vars(navtask_str) + + navtask.task_params.n_ori = int(navtask_vars.n_ori) + navtask.task_params.max_dist = int(navtask_vars.max_dist) + navtask.task_params.num_steps = int(navtask_vars.num_steps) + navtask.task_params.step_size = int(navtask_vars.step_size) + navtask.task_params.data_augment.delta_xy = int(navtask_vars.step_size)/2. + n_aux_views_each = int(navtask_vars.aux_views[2]) + aux_delta_thetas = np.concatenate((np.arange(n_aux_views_each) + 1, + -1 -np.arange(n_aux_views_each))) + aux_delta_thetas = aux_delta_thetas*np.deg2rad(navtask.camera_param.fov) + navtask.task_params.aux_delta_thetas = aux_delta_thetas + + if navtask_vars.data_aug == 'aug': + navtask.task_params.data_augment.structured = False + elif navtask_vars.data_aug == 'straug': + navtask.task_params.data_augment.structured = True + else: + logging.fatal('Unknown navtask_vars.data_aug %s.', navtask_vars.data_aug) + assert(False) + + navtask.task_params.num_history_frames = int(navtask_vars.history[1:]) + navtask.task_params.n_views = 1+navtask.task_params.num_history_frames + + navtask.task_params.goal_channels = int(navtask_vars.n_ori) + + if navtask_vars.task == 'hard': + navtask.task_params.type = 'rng_rejection_sampling_many' + navtask.task_params.rejection_sampling_M = 2000 + navtask.task_params.min_dist = 10 + elif navtask_vars.task == 'r2r': + navtask.task_params.type = 'room_to_room_many' + elif navtask_vars.task == 'ST': + # Semantic task at hand. + navtask.task_params.goal_channels = \ + len(navtask.task_params.semantic_task.class_map_names) + navtask.task_params.rel_goal_loc_dim = \ + len(navtask.task_params.semantic_task.class_map_names) + navtask.task_params.type = 'to_nearest_obj_acc' + else: + logging.fatal('navtask_vars.task: should be hard or r2r, ST') + assert(False) + + if navtask_vars.modality == 'rgb': + navtask.camera_param.modalities = ['rgb'] + navtask.camera_param.img_channels = 3 + elif navtask_vars.modality == 'd': + navtask.camera_param.modalities = ['depth'] + navtask.camera_param.img_channels = 2 + + navtask.task_params.img_height = navtask.camera_param.height + navtask.task_params.img_width = navtask.camera_param.width + navtask.task_params.modalities = navtask.camera_param.modalities + navtask.task_params.img_channels = navtask.camera_param.img_channels + navtask.task_params.img_fov = navtask.camera_param.fov + + navtask.dataset = factory.get_dataset(navtask_vars.dataset_name) + return navtask diff --git a/models/research/cognitive_mapping_and_planning/cfgs/config_distill.py b/models/research/cognitive_mapping_and_planning/cfgs/config_distill.py new file mode 100644 index 0000000000000000000000000000000000000000..53be2f8a5f12ee701a53c1c354079659da6958d4 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/cfgs/config_distill.py @@ -0,0 +1,114 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import pprint +import copy +import os +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +import logging +import src.utils as utils +import cfgs.config_common as cc + + +import tensorflow as tf + +rgb_resnet_v2_50_path = 'cache/resnet_v2_50_inception_preprocessed/model.ckpt-5136169' + +def get_default_args(): + robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120, + camera_elevation_degree=-15) + + camera_param = utils.Foo(width=225, height=225, z_near=0.05, z_far=20.0, + fov=60., modalities=['rgb', 'depth']) + + env = utils.Foo(padding=10, resolution=5, num_point_threshold=2, + valid_min=-10, valid_max=200, n_samples_per_face=200) + + data_augment = utils.Foo(lr_flip=0, delta_angle=1, delta_xy=4, relight=False, + relight_fast=False, structured=False) + + task_params = utils.Foo(num_actions=4, step_size=4, num_steps=0, + batch_size=32, room_seed=0, base_class='Building', + task='mapping', n_ori=6, data_augment=data_augment, + output_transform_to_global_map=False, + output_canonical_map=False, + output_incremental_transform=False, + output_free_space=False, move_type='shortest_path', + toy_problem=0) + + buildinger_args = utils.Foo(building_names=['area1_gates_wingA_floor1_westpart'], + env_class=None, robot=robot, + task_params=task_params, env=env, + camera_param=camera_param) + + solver_args = utils.Foo(seed=0, learning_rate_decay=0.1, + clip_gradient_norm=0, max_steps=120000, + initial_learning_rate=0.001, momentum=0.99, + steps_per_decay=40000, logdir=None, sync=False, + adjust_lr_sync=True, wt_decay=0.0001, + data_loss_wt=1.0, reg_loss_wt=1.0, + num_workers=1, task=0, ps_tasks=0, master='local') + + summary_args = utils.Foo(display_interval=1, test_iters=100) + + control_args = utils.Foo(train=False, test=False, + force_batchnorm_is_training_at_test=False) + + arch_args = utils.Foo(rgb_encoder='resnet_v2_50', d_encoder='resnet_v2_50') + + return utils.Foo(solver=solver_args, + summary=summary_args, control=control_args, arch=arch_args, + buildinger=buildinger_args) + +def get_vars(config_name): + vars = config_name.split('_') + if len(vars) == 1: # All data or not. + vars.append('noall') + if len(vars) == 2: # n_ori + vars.append('4') + logging.error('vars: %s', vars) + return vars + +def get_args_for_config(config_name): + args = get_default_args() + config_name, mode = config_name.split('+') + vars = get_vars(config_name) + + logging.info('config_name: %s, mode: %s', config_name, mode) + + args.buildinger.task_params.n_ori = int(vars[2]) + args.solver.freeze_conv = True + args.solver.pretrained_path = rgb_resnet_v2_50_path + args.buildinger.task_params.img_channels = 5 + args.solver.data_loss_wt = 0.00001 + + if vars[0] == 'v0': + None + else: + logging.error('config_name: %s undefined', config_name) + + args.buildinger.task_params.height = args.buildinger.camera_param.height + args.buildinger.task_params.width = args.buildinger.camera_param.width + args.buildinger.task_params.modalities = args.buildinger.camera_param.modalities + + if vars[1] == 'all': + args = cc.get_args_for_mode_building_all(args, mode) + elif vars[1] == 'noall': + args = cc.get_args_for_mode_building(args, mode) + + # Log the arguments + logging.error('%s', args) + return args diff --git a/models/research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py b/models/research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc64fe594ab025fbcfb41543302fa42c7fc0074 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py @@ -0,0 +1,173 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import pprint +import os +import numpy as np +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +import logging +import src.utils as utils +import cfgs.config_common as cc +import datasets.nav_env_config as nec + + +import tensorflow as tf + +FLAGS = flags.FLAGS + +get_solver_vars = cc.get_solver_vars +get_navtask_vars = cc.get_navtask_vars + + +rgb_resnet_v2_50_path = 'data/init_models/resnet_v2_50/model.ckpt-5136169' +d_resnet_v2_50_path = 'data/init_models/distill_rgb_to_d_resnet_v2_50/model.ckpt-120002' + +def get_default_args(): + summary_args = utils.Foo(display_interval=1, test_iters=26, + arop_full_summary_iters=14) + + control_args = utils.Foo(train=False, test=False, + force_batchnorm_is_training_at_test=False, + reset_rng_seed=False, only_eval_when_done=False, + test_mode=None) + return summary_args, control_args + +def get_default_baseline_args(): + batch_norm_param = {'center': True, 'scale': True, + 'activation_fn':tf.nn.relu} + arch_args = utils.Foo( + pred_neurons=[], goal_embed_neurons=[], img_embed_neurons=[], + batch_norm_param=batch_norm_param, dim_reduce_neurons=64, combine_type='', + encoder='resnet_v2_50', action_sample_type='sample', + action_sample_combine_type='one_or_other', + sample_gt_prob_type='inverse_sigmoid_decay', dagger_sample_bn_false=True, + isd_k=750., use_visit_count=False, lstm_output=False, lstm_ego=False, + lstm_img=False, fc_dropout=0.0, embed_goal_for_state=False, + lstm_output_init_state_from_goal=False) + return arch_args + +def get_arch_vars(arch_str): + if arch_str == '': vals = [] + else: vals = arch_str.split('_') + + ks = ['ver', 'lstm_dim', 'dropout'] + + # Exp Ver + if len(vals) == 0: vals.append('v0') + # LSTM dimentsions + if len(vals) == 1: vals.append('lstm2048') + # Dropout + if len(vals) == 2: vals.append('noDO') + + assert(len(vals) == 3) + + vars = utils.Foo() + for k, v in zip(ks, vals): + setattr(vars, k, v) + + logging.error('arch_vars: %s', vars) + return vars + +def process_arch_str(args, arch_str): + # This function modifies args. + args.arch = get_default_baseline_args() + arch_vars = get_arch_vars(arch_str) + + args.navtask.task_params.outputs.rel_goal_loc = True + args.navtask.task_params.input_type = 'vision' + args.navtask.task_params.outputs.images = True + + if args.navtask.camera_param.modalities[0] == 'rgb': + args.solver.pretrained_path = rgb_resnet_v2_50_path + elif args.navtask.camera_param.modalities[0] == 'depth': + args.solver.pretrained_path = d_resnet_v2_50_path + else: + logging.fatal('Neither of rgb or d') + + if arch_vars.dropout == 'DO': + args.arch.fc_dropout = 0.5 + + args.tfcode = 'B' + + exp_ver = arch_vars.ver + if exp_ver == 'v0': + # Multiplicative interaction between goal loc and image features. + args.arch.combine_type = 'multiply' + args.arch.pred_neurons = [256, 256] + args.arch.goal_embed_neurons = [64, 8] + args.arch.img_embed_neurons = [1024, 512, 256*8] + + elif exp_ver == 'v1': + # Additive interaction between goal and image features. + args.arch.combine_type = 'add' + args.arch.pred_neurons = [256, 256] + args.arch.goal_embed_neurons = [64, 256] + args.arch.img_embed_neurons = [1024, 512, 256] + + elif exp_ver == 'v2': + # LSTM at the output on top of multiple interactions. + args.arch.combine_type = 'multiply' + args.arch.goal_embed_neurons = [64, 8] + args.arch.img_embed_neurons = [1024, 512, 256*8] + args.arch.lstm_output = True + args.arch.lstm_output_dim = int(arch_vars.lstm_dim[4:]) + args.arch.pred_neurons = [256] # The other is inside the LSTM. + + elif exp_ver == 'v0blind': + # LSTM only on the goal location. + args.arch.combine_type = 'goalonly' + args.arch.goal_embed_neurons = [64, 256] + args.arch.img_embed_neurons = [2] # I dont know what it will do otherwise. + args.arch.lstm_output = True + args.arch.lstm_output_dim = 256 + args.arch.pred_neurons = [256] # The other is inside the LSTM. + + else: + logging.fatal('exp_ver: %s undefined', exp_ver) + assert(False) + + # Log the arguments + logging.error('%s', args) + return args + +def get_args_for_config(config_name): + args = utils.Foo() + + args.summary, args.control = get_default_args() + + exp_name, mode_str = config_name.split('+') + arch_str, solver_str, navtask_str = exp_name.split('.') + logging.error('config_name: %s', config_name) + logging.error('arch_str: %s', arch_str) + logging.error('navtask_str: %s', navtask_str) + logging.error('solver_str: %s', solver_str) + logging.error('mode_str: %s', mode_str) + + args.solver = cc.process_solver_str(solver_str) + args.navtask = cc.process_navtask_str(navtask_str) + + args = process_arch_str(args, arch_str) + args.arch.isd_k = args.solver.isd_k + + # Train, test, etc. + mode, imset = mode_str.split('_') + args = cc.adjust_args_for_mode(args, mode) + args.navtask.building_names = args.navtask.dataset.get_split(imset) + args.control.test_name = '{:s}_on_{:s}'.format(mode, imset) + + # Log the arguments + logging.error('%s', args) + return args diff --git a/models/research/cognitive_mapping_and_planning/data/.gitignore b/models/research/cognitive_mapping_and_planning/data/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2b6d5e46652d14a9c0a8025dbcccfc2dd4376e4a --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/data/.gitignore @@ -0,0 +1,3 @@ +stanford_building_parser_dataset_raw +stanford_building_parser_dataset +init_models diff --git a/models/research/cognitive_mapping_and_planning/data/README.md b/models/research/cognitive_mapping_and_planning/data/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a8928345351dac19c0e12fd33f99dd2aa600e23b --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/data/README.md @@ -0,0 +1,33 @@ +This directory contains the data needed for training and benchmarking various +navigation models. + +1. Download the data from the [dataset website] + (http://buildingparser.stanford.edu/dataset.html). + 1. [Raw meshes](https://goo.gl/forms/2YSPaO2UKmn5Td5m2). We need the meshes + which are in the noXYZ folder. Download the tar files and place them in + the `stanford_building_parser_dataset_raw` folder. You need to download + `area_1_noXYZ.tar`, `area_3_noXYZ.tar`, `area_5a_noXYZ.tar`, + `area_5b_noXYZ.tar`, `area_6_noXYZ.tar` for training and + `area_4_noXYZ.tar` for evaluation. + 2. [Annotations](https://goo.gl/forms/4SoGp4KtH1jfRqEj2) for setting up + tasks. We will need the file called `Stanford3dDataset_v1.2.zip`. Place + the file in the directory `stanford_building_parser_dataset_raw`. + +2. Preprocess the data. + 1. Extract meshes using `scripts/script_preprocess_meshes_S3DIS.sh`. After + this `ls data/stanford_building_parser_dataset/mesh` should have 6 + folders `area1`, `area3`, `area4`, `area5a`, `area5b`, `area6`, with + textures and obj files within each directory. + 2. Extract out room information and semantics from zip file using + `scripts/script_preprocess_annoations_S3DIS.sh`. After this there should + be `room-dimension` and `class-maps` folder in + `data/stanford_building_parser_dataset`. (If you find this script to + crash because of an exception in np.loadtxt while processing + `Area_5/office_19/Annotations/ceiling_1.txt`, there is a special + character on line 323474, that should be removed manually.) + +3. Download ImageNet Pre-trained models. We used ResNet-v2-50 for representing + images. For RGB images this is pre-trained on ImageNet. For Depth images we + [distill](https://arxiv.org/abs/1507.00448) the RGB model to depth images + using paired RGB-D images. Both there models are available through + `scripts/script_download_init_models.sh` diff --git a/models/research/cognitive_mapping_and_planning/datasets/__init__.py b/models/research/cognitive_mapping_and_planning/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/datasets/factory.py b/models/research/cognitive_mapping_and_planning/datasets/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7b5c0a602dbacf9619dc1c2ec98e94200428b6 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/datasets/factory.py @@ -0,0 +1,113 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Wrapper for selecting the navigation environment that we want to train and +test on. +""" +import numpy as np +import os, glob +import platform + +import logging +from tensorflow.python.platform import app +from tensorflow.python.platform import flags + +import render.swiftshader_renderer as renderer +import src.file_utils as fu +import src.utils as utils + +def get_dataset(dataset_name): + if dataset_name == 'sbpd': + dataset = StanfordBuildingParserDataset(dataset_name) + else: + logging.fatal('Not one of sbpd') + return dataset + +class Loader(): + def get_data_dir(): + pass + + def get_meta_data(self, file_name, data_dir=None): + if data_dir is None: + data_dir = self.get_data_dir() + full_file_name = os.path.join(data_dir, 'meta', file_name) + assert(fu.exists(full_file_name)), \ + '{:s} does not exist'.format(full_file_name) + ext = os.path.splitext(full_file_name)[1] + if ext == '.txt': + ls = [] + with fu.fopen(full_file_name, 'r') as f: + for l in f: + ls.append(l.rstrip()) + elif ext == '.pkl': + ls = utils.load_variables(full_file_name) + return ls + + def load_building(self, name, data_dir=None): + if data_dir is None: + data_dir = self.get_data_dir() + out = {} + out['name'] = name + out['data_dir'] = data_dir + out['room_dimension_file'] = os.path.join(data_dir, 'room-dimension', + name+'.pkl') + out['class_map_folder'] = os.path.join(data_dir, 'class-maps') + return out + + def load_building_meshes(self, building): + dir_name = os.path.join(building['data_dir'], 'mesh', building['name']) + mesh_file_name = glob.glob1(dir_name, '*.obj')[0] + mesh_file_name_full = os.path.join(dir_name, mesh_file_name) + logging.error('Loading building from obj file: %s', mesh_file_name_full) + shape = renderer.Shape(mesh_file_name_full, load_materials=True, + name_prefix=building['name']+'_') + return [shape] + +class StanfordBuildingParserDataset(Loader): + def __init__(self, ver): + self.ver = ver + self.data_dir = None + + def get_data_dir(self): + if self.data_dir is None: + self.data_dir = 'data/stanford_building_parser_dataset/' + return self.data_dir + + def get_benchmark_sets(self): + return self._get_benchmark_sets() + + def get_split(self, split_name): + if self.ver == 'sbpd': + return self._get_split(split_name) + else: + logging.fatal('Unknown version.') + + def _get_benchmark_sets(self): + sets = ['train1', 'val', 'test'] + return sets + + def _get_split(self, split_name): + train = ['area1', 'area5a', 'area5b', 'area6'] + train1 = ['area1'] + val = ['area3'] + test = ['area4'] + + sets = {} + sets['train'] = train + sets['train1'] = train1 + sets['val'] = val + sets['test'] = test + sets['all'] = sorted(list(set(train + val + test))) + return sets[split_name] diff --git a/models/research/cognitive_mapping_and_planning/datasets/nav_env.py b/models/research/cognitive_mapping_and_planning/datasets/nav_env.py new file mode 100644 index 0000000000000000000000000000000000000000..5710e26dcb113121d99400cb060104224dd91749 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/datasets/nav_env.py @@ -0,0 +1,1465 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Navidation Environment. Includes the following classes along with some +helper functions. + Building: Loads buildings, computes traversibility, exposes functionality for + rendering images. + + GridWorld: Base class which implements functionality for moving an agent on a + grid world. + + NavigationEnv: Base class which generates navigation problems on a grid world. + + VisualNavigationEnv: Builds upon NavigationEnv and Building to provide + interface that is used externally to train the agent. + + MeshMapper: Class used for distilling the model, testing the mapper. + + BuildingMultiplexer: Wrapper class that instantiates a VisualNavigationEnv for + each building and multiplexes between them as needed. +""" + +import numpy as np +import os +import re +import matplotlib.pyplot as plt + +import graph_tool as gt +import graph_tool.topology + +from tensorflow.python.platform import gfile +import logging +import src.file_utils as fu +import src.utils as utils +import src.graph_utils as gu +import src.map_utils as mu +import src.depth_utils as du +import render.swiftshader_renderer as sru +from render.swiftshader_renderer import SwiftshaderRenderer +import cv2 + +label_nodes_with_class = gu.label_nodes_with_class +label_nodes_with_class_geodesic = gu.label_nodes_with_class_geodesic +get_distance_node_list = gu.get_distance_node_list +convert_to_graph_tool = gu.convert_to_graph_tool +generate_graph = gu.generate_graph +get_hardness_distribution = gu.get_hardness_distribution +rng_next_goal_rejection_sampling = gu.rng_next_goal_rejection_sampling +rng_next_goal = gu.rng_next_goal +rng_room_to_room = gu.rng_room_to_room +rng_target_dist_field = gu.rng_target_dist_field + +compute_traversibility = mu.compute_traversibility +make_map = mu.make_map +resize_maps = mu.resize_maps +pick_largest_cc = mu.pick_largest_cc +get_graph_origin_loc = mu.get_graph_origin_loc +generate_egocentric_maps = mu.generate_egocentric_maps +generate_goal_images = mu.generate_goal_images +get_map_to_predict = mu.get_map_to_predict + +bin_points = du.bin_points +make_geocentric = du.make_geocentric +get_point_cloud_from_z = du.get_point_cloud_from_z +get_camera_matrix = du.get_camera_matrix + +def _get_semantic_maps(folder_name, building_name, map, flip): + # Load file from the cache. + file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl' + file_name = file_name.format(building_name, map.size[0], map.size[1], + map.origin[0], map.origin[1], map.resolution, + flip) + file_name = os.path.join(folder_name, file_name) + logging.info('Loading semantic maps from %s.', file_name) + + if fu.exists(file_name): + a = utils.load_variables(file_name) + maps = a['maps'] #HxWx#C + cats = a['cats'] + else: + logging.error('file_name: %s not found.', file_name) + maps = None + cats = None + return maps, cats + +def _select_classes(all_maps, all_cats, cats_to_use): + inds = [] + for c in cats_to_use: + ind = all_cats.index(c) + inds.append(ind) + out_maps = all_maps[:,:,inds] + return out_maps + +def _get_room_dimensions(file_name, resolution, origin, flip=False): + if fu.exists(file_name): + a = utils.load_variables(file_name)['room_dimension'] + names = a.keys() + dims = np.concatenate(a.values(), axis=0).reshape((-1,6)) + ind = np.argsort(names) + dims = dims[ind,:] + names = [names[x] for x in ind] + if flip: + dims_new = dims*1 + dims_new[:,1] = -dims[:,4] + dims_new[:,4] = -dims[:,1] + dims = dims_new*1 + + dims = dims*100. + dims[:,0] = dims[:,0] - origin[0] + dims[:,1] = dims[:,1] - origin[1] + dims[:,3] = dims[:,3] - origin[0] + dims[:,4] = dims[:,4] - origin[1] + dims = dims / resolution + out = {'names': names, 'dims': dims} + else: + out = None + return out + +def _filter_rooms(room_dims, room_regex): + pattern = re.compile(room_regex) + ind = [] + for i, name in enumerate(room_dims['names']): + if pattern.match(name): + ind.append(i) + new_room_dims = {} + new_room_dims['names'] = [room_dims['names'][i] for i in ind] + new_room_dims['dims'] = room_dims['dims'][ind,:]*1 + return new_room_dims + +def _label_nodes_with_room_id(xyt, room_dims): + # Label the room with the ID into things. + node_room_id = -1*np.ones((xyt.shape[0], 1)) + dims = room_dims['dims'] + for x, name in enumerate(room_dims['names']): + all_ = np.concatenate((xyt[:,[0]] >= dims[x,0], + xyt[:,[0]] <= dims[x,3], + xyt[:,[1]] >= dims[x,1], + xyt[:,[1]] <= dims[x,4]), axis=1) + node_room_id[np.all(all_, axis=1), 0] = x + return node_room_id + +def get_path_ids(start_node_id, end_node_id, pred_map): + id = start_node_id + path = [id] + while id != end_node_id: + id = pred_map[id] + path.append(id) + return path + +def image_pre(images, modalities): + # Assumes images are ...xHxWxC. + # We always assume images are RGB followed by Depth. + if 'depth' in modalities: + d = images[...,-1][...,np.newaxis]*1. + d[d < 0.01] = np.NaN; isnan = np.isnan(d); + d = 100./d; d[isnan] = 0.; + images = np.concatenate((images[...,:-1], d, isnan), axis=images.ndim-1) + if 'rgb' in modalities: + images[...,:3] = images[...,:3]*1. - 128 + return images + +def _get_relative_goal_loc(goal_loc, loc, theta): + r = np.sqrt(np.sum(np.square(goal_loc - loc), axis=1)) + t = np.arctan2(goal_loc[:,1] - loc[:,1], goal_loc[:,0] - loc[:,0]) + t = t-theta[:,0] + np.pi/2 + return np.expand_dims(r,axis=1), np.expand_dims(t, axis=1) + +def _gen_perturbs(rng, batch_size, num_steps, lr_flip, delta_angle, delta_xy, + structured): + perturbs = [] + for i in range(batch_size): + # Doing things one by one for each episode in this batch. This way this + # remains replicatable even when we change the batch size. + p = np.zeros((num_steps+1, 4)) + if lr_flip: + # Flip the whole trajectory. + p[:,3] = rng.rand(1)-0.5 + if delta_angle > 0: + if structured: + p[:,2] = (rng.rand(1)-0.5)* delta_angle + else: + p[:,2] = (rng.rand(p.shape[0])-0.5)* delta_angle + if delta_xy > 0: + if structured: + p[:,:2] = (rng.rand(1, 2)-0.5)*delta_xy + else: + p[:,:2] = (rng.rand(p.shape[0], 2)-0.5)*delta_xy + perturbs.append(p) + return perturbs + +def get_multiplexer_class(args, task_number): + assert(args.task_params.base_class == 'Building') + logging.info('Returning BuildingMultiplexer') + R = BuildingMultiplexer(args, task_number) + return R + +class GridWorld(): + def __init__(self): + """Class members that will be assigned by any class that actually uses this + class.""" + self.restrict_to_largest_cc = None + self.robot = None + self.env = None + self.category_list = None + self.traversible = None + + def get_loc_axis(self, node, delta_theta, perturb=None): + """Based on the node orientation returns X, and Y axis. Used to sample the + map in egocentric coordinate frame. + """ + if type(node) == tuple: + node = np.array([node]) + if perturb is None: + perturb = np.zeros((node.shape[0], 4)) + xyt = self.to_actual_xyt_vec(node) + x = xyt[:,[0]] + perturb[:,[0]] + y = xyt[:,[1]] + perturb[:,[1]] + t = xyt[:,[2]] + perturb[:,[2]] + theta = t*delta_theta + loc = np.concatenate((x,y), axis=1) + x_axis = np.concatenate((np.cos(theta), np.sin(theta)), axis=1) + y_axis = np.concatenate((np.cos(theta+np.pi/2.), np.sin(theta+np.pi/2.)), + axis=1) + # Flip the sampled map where need be. + y_axis[np.where(perturb[:,3] > 0)[0], :] *= -1. + return loc, x_axis, y_axis, theta + + def to_actual_xyt(self, pqr): + """Converts from node to location on the map.""" + (p, q, r) = pqr + if self.task.n_ori == 6: + out = (p - q * 0.5 + self.task.origin_loc[0], + q * np.sqrt(3.) / 2. + self.task.origin_loc[1], r) + elif self.task.n_ori == 4: + out = (p + self.task.origin_loc[0], + q + self.task.origin_loc[1], r) + return out + + def to_actual_xyt_vec(self, pqr): + """Converts from node array to location array on the map.""" + p = pqr[:,0][:, np.newaxis] + q = pqr[:,1][:, np.newaxis] + r = pqr[:,2][:, np.newaxis] + if self.task.n_ori == 6: + out = np.concatenate((p - q * 0.5 + self.task.origin_loc[0], + q * np.sqrt(3.) / 2. + self.task.origin_loc[1], + r), axis=1) + elif self.task.n_ori == 4: + out = np.concatenate((p + self.task.origin_loc[0], + q + self.task.origin_loc[1], + r), axis=1) + return out + + def raw_valid_fn_vec(self, xyt): + """Returns if the given set of nodes is valid or not.""" + height = self.traversible.shape[0] + width = self.traversible.shape[1] + x = np.round(xyt[:,[0]]).astype(np.int32) + y = np.round(xyt[:,[1]]).astype(np.int32) + is_inside = np.all(np.concatenate((x >= 0, y >= 0, + x < width, y < height), axis=1), axis=1) + x = np.minimum(np.maximum(x, 0), width-1) + y = np.minimum(np.maximum(y, 0), height-1) + ind = np.ravel_multi_index((y,x), self.traversible.shape) + is_traversible = self.traversible.ravel()[ind] + + is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), + axis=1), axis=1) + return is_valid + + + def valid_fn_vec(self, pqr): + """Returns if the given set of nodes is valid or not.""" + xyt = self.to_actual_xyt_vec(np.array(pqr)) + height = self.traversible.shape[0] + width = self.traversible.shape[1] + x = np.round(xyt[:,[0]]).astype(np.int32) + y = np.round(xyt[:,[1]]).astype(np.int32) + is_inside = np.all(np.concatenate((x >= 0, y >= 0, + x < width, y < height), axis=1), axis=1) + x = np.minimum(np.maximum(x, 0), width-1) + y = np.minimum(np.maximum(y, 0), height-1) + ind = np.ravel_multi_index((y,x), self.traversible.shape) + is_traversible = self.traversible.ravel()[ind] + + is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), + axis=1), axis=1) + return is_valid + + def get_feasible_actions(self, node_ids): + """Returns the feasible set of actions from the current node.""" + a = np.zeros((len(node_ids), self.task_params.num_actions), dtype=np.int32) + gtG = self.task.gtG + next_node = [] + for i, c in enumerate(node_ids): + neigh = gtG.vertex(c).out_neighbours() + neigh_edge = gtG.vertex(c).out_edges() + nn = {} + for n, e in zip(neigh, neigh_edge): + _ = gtG.ep['action'][e] + a[i,_] = 1 + nn[_] = int(n) + next_node.append(nn) + return a, next_node + + def take_action(self, current_node_ids, action): + """Returns the new node after taking the action action. Stays at the current + node if the action is invalid.""" + actions, next_node_ids = self.get_feasible_actions(current_node_ids) + new_node_ids = [] + for i, (c,a) in enumerate(zip(current_node_ids, action)): + if actions[i,a] == 1: + new_node_ids.append(next_node_ids[i][a]) + else: + new_node_ids.append(c) + return new_node_ids + + def set_r_obj(self, r_obj): + """Sets the SwiftshaderRenderer object used for rendering.""" + self.r_obj = r_obj + +class Building(GridWorld): + def __init__(self, building_name, robot, env, + category_list=None, small=False, flip=False, logdir=None, + building_loader=None): + + self.restrict_to_largest_cc = True + self.robot = robot + self.env = env + self.logdir = logdir + + # Load the building meta data. + building = building_loader.load_building(building_name) + if small: + building['mesh_names'] = building['mesh_names'][:5] + + # New code. + shapess = building_loader.load_building_meshes(building) + if flip: + for shapes in shapess: + shapes.flip_shape() + + vs = [] + for shapes in shapess: + vs.append(shapes.get_vertices()[0]) + vs = np.concatenate(vs, axis=0) + map = make_map(env.padding, env.resolution, vertex=vs, sc=100.) + map = compute_traversibility( + map, robot.base, robot.height, robot.radius, env.valid_min, + env.valid_max, env.num_point_threshold, shapess=shapess, sc=100., + n_samples_per_face=env.n_samples_per_face) + + room_dims = _get_room_dimensions(building['room_dimension_file'], + env.resolution, map.origin, flip=flip) + class_maps, class_map_names = _get_semantic_maps( + building['class_map_folder'], building_name, map, flip) + + self.class_maps = class_maps + self.class_map_names = class_map_names + self.building = building + self.shapess = shapess + self.map = map + self.traversible = map.traversible*1 + self.building_name = building_name + self.room_dims = room_dims + self.flipped = flip + self.renderer_entitiy_ids = [] + + if self.restrict_to_largest_cc: + self.traversible = pick_largest_cc(self.traversible) + + def load_building_into_scene(self): + # Loads the scene. + self.renderer_entitiy_ids += self.r_obj.load_shapes(self.shapess) + # Free up memory, we dont need the mesh or the materials anymore. + self.shapess = None + + def add_entity_at_nodes(self, nodes, height, shape): + xyt = self.to_actual_xyt_vec(nodes) + nxy = xyt[:,:2]*1. + nxy = nxy * self.map.resolution + nxy = nxy + self.map.origin + Ts = np.concatenate((nxy, nxy[:,:1]), axis=1) + Ts[:,2] = height; Ts = Ts / 100.; + + # Merge all the shapes into a single shape and add that shape. + shape.replicate_shape(Ts) + entity_ids = self.r_obj.load_shapes([shape]) + self.renderer_entitiy_ids += entity_ids + return entity_ids + + def add_shapes(self, shapes): + scene = self.r_obj.viz.scene() + for shape in shapes: + scene.AddShape(shape) + + def add_materials(self, materials): + scene = self.r_obj.viz.scene() + for material in materials: + scene.AddOrUpdateMaterial(material) + + def set_building_visibility(self, visibility): + self.r_obj.set_entity_visible(self.renderer_entitiy_ids, visibility) + + def render_nodes(self, nodes, perturb=None, aux_delta_theta=0.): + self.set_building_visibility(True) + if perturb is None: + perturb = np.zeros((len(nodes), 4)) + + imgs = [] + r = 2 + elevation_z = r * np.tan(np.deg2rad(self.robot.camera_elevation_degree)) + + for i in range(len(nodes)): + xyt = self.to_actual_xyt(nodes[i]) + lookat_theta = 3.0 * np.pi / 2.0 - (xyt[2]+perturb[i,2]+aux_delta_theta) * (self.task.delta_theta) + nxy = np.array([xyt[0]+perturb[i,0], xyt[1]+perturb[i,1]]).reshape(1, -1) + nxy = nxy * self.map.resolution + nxy = nxy + self.map.origin + camera_xyz = np.zeros((1, 3)) + camera_xyz[...] = [nxy[0, 0], nxy[0, 1], self.robot.sensor_height] + camera_xyz = camera_xyz / 100. + lookat_xyz = np.array([-r * np.sin(lookat_theta), + -r * np.cos(lookat_theta), elevation_z]) + lookat_xyz = lookat_xyz + camera_xyz[0, :] + self.r_obj.position_camera(camera_xyz[0, :].tolist(), + lookat_xyz.tolist(), [0.0, 0.0, 1.0]) + img = self.r_obj.render(take_screenshot=True, output_type=0) + img = [x for x in img if x is not None] + img = np.concatenate(img, axis=2).astype(np.float32) + if perturb[i,3]>0: + img = img[:,::-1,:] + imgs.append(img) + + self.set_building_visibility(False) + return imgs + + +class MeshMapper(Building): + def __init__(self, robot, env, task_params, building_name, category_list, + flip, logdir=None, building_loader=None): + Building.__init__(self, building_name, robot, env, category_list, + small=task_params.toy_problem, flip=flip, logdir=logdir, + building_loader=building_loader) + self.task_params = task_params + self.task = None + self._preprocess_for_task(self.task_params.building_seed) + + def _preprocess_for_task(self, seed): + if self.task is None or self.task.seed != seed: + rng = np.random.RandomState(seed) + origin_loc = get_graph_origin_loc(rng, self.traversible) + self.task = utils.Foo(seed=seed, origin_loc=origin_loc, + n_ori=self.task_params.n_ori) + G = generate_graph(self.valid_fn_vec, + self.task_params.step_size, self.task.n_ori, + (0, 0, 0)) + gtG, nodes, nodes_to_id = convert_to_graph_tool(G) + self.task.gtG = gtG + self.task.nodes = nodes + self.task.delta_theta = 2.0*np.pi/(self.task.n_ori*1.) + self.task.nodes_to_id = nodes_to_id + logging.info('Building %s, #V=%d, #E=%d', self.building_name, + self.task.nodes.shape[0], self.task.gtG.num_edges()) + + if self.logdir is not None: + write_traversible = cv2.applyColorMap(self.traversible.astype(np.uint8)*255, cv2.COLORMAP_JET) + img_path = os.path.join(self.logdir, + '{:s}_{:d}_graph.png'.format(self.building_name, + seed)) + node_xyt = self.to_actual_xyt_vec(self.task.nodes) + plt.set_cmap('jet'); + fig, ax = utils.subplot(plt, (1,1), (12,12)) + ax.plot(node_xyt[:,0], node_xyt[:,1], 'm.') + ax.imshow(self.traversible, origin='lower'); + ax.set_axis_off(); ax.axis('equal'); + ax.set_title('{:s}, {:d}, {:d}'.format(self.building_name, + self.task.nodes.shape[0], + self.task.gtG.num_edges())) + if self.room_dims is not None: + for i, r in enumerate(self.room_dims['dims']*1): + min_ = r[:3]*1 + max_ = r[3:]*1 + xmin, ymin, zmin = min_ + xmax, ymax, zmax = max_ + + ax.plot([xmin, xmax, xmax, xmin, xmin], + [ymin, ymin, ymax, ymax, ymin], 'g') + with fu.fopen(img_path, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + plt.close(fig) + + + def _gen_rng(self, rng): + # instances is a list of list of node_ids. + if self.task_params.move_type == 'circle': + _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, + self.task.gtG, rng, 0, 1, + compute_path=True) + instances_ = paths + + instances = [] + for instance_ in instances_: + instance = instance_ + for i in range(self.task_params.num_steps): + instance.append(self.take_action([instance[-1]], [1])[0]) + instances.append(instance) + + elif self.task_params.move_type == 'shortest_path': + _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, + self.task.gtG, rng, + self.task_params.num_steps, + self.task_params.num_steps+1, + compute_path=True) + instances = paths + + elif self.task_params.move_type == 'circle+forward': + _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, + self.task.gtG, rng, 0, 1, + compute_path=True) + instances_ = paths + instances = [] + for instance_ in instances_: + instance = instance_ + for i in range(self.task_params.n_ori-1): + instance.append(self.take_action([instance[-1]], [1])[0]) + while len(instance) <= self.task_params.num_steps: + while self.take_action([instance[-1]], [3])[0] == instance[-1] and len(instance) <= self.task_params.num_steps: + instance.append(self.take_action([instance[-1]], [2])[0]) + if len(instance) <= self.task_params.num_steps: + instance.append(self.take_action([instance[-1]], [3])[0]) + instances.append(instance) + + # Do random perturbation if needed. + perturbs = _gen_perturbs(rng, self.task_params.batch_size, + self.task_params.num_steps, + self.task_params.data_augment.lr_flip, + self.task_params.data_augment.delta_angle, + self.task_params.data_augment.delta_xy, + self.task_params.data_augment.structured) + return instances, perturbs + + def worker(self, instances, perturbs): + # Output the images and the free space. + + # Make the instances be all the same length. + for i in range(len(instances)): + for j in range(self.task_params.num_steps - len(instances[i]) + 1): + instances[i].append(instances[i][-1]) + if perturbs[i].shape[0] < self.task_params.num_steps+1: + p = np.zeros((self.task_params.num_steps+1, 4)) + p[:perturbs[i].shape[0], :] = perturbs[i] + p[perturbs[i].shape[0]:, :] = perturbs[i][-1,:] + perturbs[i] = p + + instances_ = [] + for instance in instances: + instances_ = instances_ + instance + perturbs_ = np.concatenate(perturbs, axis=0) + + instances_nodes = self.task.nodes[instances_,:] + instances_nodes = [tuple(x) for x in instances_nodes] + + imgs_ = self.render_nodes(instances_nodes, perturbs_) + imgs = []; next = 0; + for instance in instances: + img_i = [] + for _ in instance: + img_i.append(imgs_[next]) + next = next+1 + imgs.append(img_i) + imgs = np.array(imgs) + + # Render out the maps in the egocentric view for all nodes and not just the + # last node. + all_nodes = [] + for x in instances: + all_nodes = all_nodes + x + all_perturbs = np.concatenate(perturbs, axis=0) + loc, x_axis, y_axis, theta = self.get_loc_axis( + self.task.nodes[all_nodes, :]*1, delta_theta=self.task.delta_theta, + perturb=all_perturbs) + fss = None + valids = None + loc_on_map = None + theta_on_map = None + cum_fs = None + cum_valid = None + incremental_locs = None + incremental_thetas = None + + if self.task_params.output_free_space: + fss, valids = get_map_to_predict(loc, x_axis, y_axis, + map=self.traversible*1., + map_size=self.task_params.map_size) + fss = np.array(fss) > 0.5 + fss = np.reshape(fss, [self.task_params.batch_size, + self.task_params.num_steps+1, + self.task_params.map_size, + self.task_params.map_size]) + valids = np.reshape(np.array(valids), fss.shape) + + if self.task_params.output_transform_to_global_map: + # Output the transform to the global map. + loc_on_map = np.reshape(loc*1, [self.task_params.batch_size, + self.task_params.num_steps+1, -1]) + # Converting to location wrt to first location so that warping happens + # properly. + theta_on_map = np.reshape(theta*1, [self.task_params.batch_size, + self.task_params.num_steps+1, -1]) + + if self.task_params.output_incremental_transform: + # Output the transform to the global map. + incremental_locs_ = np.reshape(loc*1, [self.task_params.batch_size, + self.task_params.num_steps+1, -1]) + incremental_locs_[:,1:,:] -= incremental_locs_[:,:-1,:] + t0 = -np.pi/2+np.reshape(theta*1, [self.task_params.batch_size, + self.task_params.num_steps+1, -1]) + t = t0*1 + incremental_locs = incremental_locs_*1 + incremental_locs[:,:,0] = np.sum(incremental_locs_ * np.concatenate((np.cos(t), np.sin(t)), axis=-1), axis=-1) + incremental_locs[:,:,1] = np.sum(incremental_locs_ * np.concatenate((np.cos(t+np.pi/2), np.sin(t+np.pi/2)), axis=-1), axis=-1) + incremental_locs[:,0,:] = incremental_locs_[:,0,:] + # print incremental_locs_[0,:,:], incremental_locs[0,:,:], t0[0,:,:] + + incremental_thetas = np.reshape(theta*1, [self.task_params.batch_size, + self.task_params.num_steps+1, + -1]) + incremental_thetas[:,1:,:] += -incremental_thetas[:,:-1,:] + + if self.task_params.output_canonical_map: + loc_ = loc[0::(self.task_params.num_steps+1), :] + x_axis = np.zeros_like(loc_); x_axis[:,1] = 1 + y_axis = np.zeros_like(loc_); y_axis[:,0] = -1 + cum_fs, cum_valid = get_map_to_predict(loc_, x_axis, y_axis, + map=self.traversible*1., + map_size=self.task_params.map_size) + cum_fs = np.array(cum_fs) > 0.5 + cum_fs = np.reshape(cum_fs, [self.task_params.batch_size, 1, + self.task_params.map_size, + self.task_params.map_size]) + cum_valid = np.reshape(np.array(cum_valid), cum_fs.shape) + + + inputs = {'fs_maps': fss, + 'valid_maps': valids, + 'imgs': imgs, + 'loc_on_map': loc_on_map, + 'theta_on_map': theta_on_map, + 'cum_fs_maps': cum_fs, + 'cum_valid_maps': cum_valid, + 'incremental_thetas': incremental_thetas, + 'incremental_locs': incremental_locs} + return inputs + + def pre(self, inputs): + inputs['imgs'] = image_pre(inputs['imgs'], self.task_params.modalities) + if inputs['loc_on_map'] is not None: + inputs['loc_on_map'] = inputs['loc_on_map'] - inputs['loc_on_map'][:,[0],:] + if inputs['theta_on_map'] is not None: + inputs['theta_on_map'] = np.pi/2. - inputs['theta_on_map'] + return inputs + +def _nav_env_reset_helper(type, rng, nodes, batch_size, gtG, max_dist, + num_steps, num_goals, data_augment, **kwargs): + """Generates and returns a new episode.""" + max_compute = max_dist + 4*num_steps + if type == 'general': + start_node_ids, end_node_ids, dist, pred_map, paths = \ + rng_target_dist_field(batch_size, gtG, rng, max_dist, max_compute, + nodes=nodes, compute_path=False) + target_class = None + + elif type == 'room_to_room_many': + goal_node_ids = []; dists = []; + node_room_ids = kwargs['node_room_ids'] + # Sample the first one + start_node_ids_, end_node_ids_, dist_, _, _ = rng_room_to_room( + batch_size, gtG, rng, max_dist, max_compute, + node_room_ids=node_room_ids, nodes=nodes) + start_node_ids = start_node_ids_ + goal_node_ids.append(end_node_ids_) + dists.append(dist_) + for n in range(num_goals-1): + start_node_ids_, end_node_ids_, dist_, _, _ = rng_next_goal( + goal_node_ids[n], batch_size, gtG, rng, max_dist, + max_compute, node_room_ids=node_room_ids, nodes=nodes, + dists_from_start_node=dists[n]) + goal_node_ids.append(end_node_ids_) + dists.append(dist_) + target_class = None + + elif type == 'rng_rejection_sampling_many': + num_goals = num_goals + goal_node_ids = []; dists = []; + + n_ori = kwargs['n_ori'] + step_size = kwargs['step_size'] + min_dist = kwargs['min_dist'] + sampling_distribution = kwargs['sampling_distribution'] + target_distribution = kwargs['target_distribution'] + rejection_sampling_M = kwargs['rejection_sampling_M'] + distribution_bins = kwargs['distribution_bins'] + + for n in range(num_goals): + if n == 0: input_nodes = None + else: input_nodes = goal_node_ids[n-1] + start_node_ids_, end_node_ids_, dist_, _, _, _, _ = rng_next_goal_rejection_sampling( + input_nodes, batch_size, gtG, rng, max_dist, min_dist, + max_compute, sampling_distribution, target_distribution, nodes, + n_ori, step_size, distribution_bins, rejection_sampling_M) + if n == 0: start_node_ids = start_node_ids_ + goal_node_ids.append(end_node_ids_) + dists.append(dist_) + target_class = None + + elif type == 'room_to_room_back': + num_goals = num_goals + assert(num_goals == 2), 'num_goals must be 2.' + goal_node_ids = []; dists = []; + node_room_ids = kwargs['node_room_ids'] + # Sample the first one. + start_node_ids_, end_node_ids_, dist_, _, _ = rng_room_to_room( + batch_size, gtG, rng, max_dist, max_compute, + node_room_ids=node_room_ids, nodes=nodes) + start_node_ids = start_node_ids_ + goal_node_ids.append(end_node_ids_) + dists.append(dist_) + + # Set second goal to be starting position, and compute distance to the start node. + goal_node_ids.append(start_node_ids) + dist = [] + for i in range(batch_size): + dist_ = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=True), + source=gtG.vertex(start_node_ids[i]), target=None) + dist_ = np.array(dist_.get_array()) + dist.append(dist_) + dists.append(dist) + target_class = None + + elif type[:14] == 'to_nearest_obj': + # Generate an episode by sampling one of the target classes (with + # probability proportional to the number of nodes in the world). + # With the sampled class sample a node that is within some distance from + # the sampled class. + class_nodes = kwargs['class_nodes'] + sampling = kwargs['sampling'] + dist_to_class = kwargs['dist_to_class'] + + assert(num_goals == 1), 'Only supports a single goal.' + ind = rng.choice(class_nodes.shape[0], size=batch_size) + target_class = class_nodes[ind,1] + start_node_ids = []; dists = []; goal_node_ids = []; + + for t in target_class: + if sampling == 'uniform': + max_dist = max_dist + cnts = np.bincount(dist_to_class[t], minlength=max_dist+1)*1. + cnts[max_dist+1:] = 0 + p_each = 1./ cnts / (max_dist+1.) + p_each[cnts == 0] = 0 + p = p_each[dist_to_class[t]]*1.; p = p/np.sum(p) + start_node_id = rng.choice(p.shape[0], size=1, p=p)[0] + else: + logging.fatal('Sampling not one of uniform.') + start_node_ids.append(start_node_id) + dists.append(dist_to_class[t]) + # Dummy goal node, same as the start node, so that vis is better. + goal_node_ids.append(start_node_id) + dists = [dists] + goal_node_ids = [goal_node_ids] + + return start_node_ids, goal_node_ids, dists, target_class + + +class NavigationEnv(GridWorld, Building): + """Wrapper around GridWorld which sets up navigation tasks. + """ + def _debug_save_hardness(self, seed): + out_path = os.path.join(self.logdir, '{:s}_{:d}_hardness.png'.format(self.building_name, seed)) + batch_size = 4000 + rng = np.random.RandomState(0) + start_node_ids, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists = \ + rng_next_goal_rejection_sampling( + None, batch_size, self.task.gtG, rng, self.task_params.max_dist, + self.task_params.min_dist, self.task_params.max_dist, + self.task.sampling_distribution, self.task.target_distribution, + self.task.nodes, self.task_params.n_ori, self.task_params.step_size, + self.task.distribution_bins, self.task.rejection_sampling_M) + bins = self.task.distribution_bins + n_bins = self.task.n_bins + with plt.style.context('ggplot'): + fig, axes = utils.subplot(plt, (1,2), (10,10)) + ax = axes[0] + _ = ax.hist(hardnesss, bins=bins, weights=np.ones_like(hardnesss)/len(hardnesss)) + ax.plot(bins[:-1]+0.5/n_bins, self.task.target_distribution, 'g') + ax.plot(bins[:-1]+0.5/n_bins, self.task.sampling_distribution, 'b') + ax.grid('on') + + ax = axes[1] + _ = ax.hist(gt_dists, bins=np.arange(self.task_params.max_dist+1)) + ax.grid('on') + ax.set_title('Mean: {:0.2f}, Median: {:0.2f}'.format(np.mean(gt_dists), + np.median(gt_dists))) + with fu.fopen(out_path, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + + def _debug_save_map_nodes(self, seed): + """Saves traversible space along with nodes generated on the graph. Takes + the seed as input.""" + img_path = os.path.join(self.logdir, '{:s}_{:d}_graph.png'.format(self.building_name, seed)) + node_xyt = self.to_actual_xyt_vec(self.task.nodes) + plt.set_cmap('jet'); + fig, ax = utils.subplot(plt, (1,1), (12,12)) + ax.plot(node_xyt[:,0], node_xyt[:,1], 'm.') + ax.set_axis_off(); ax.axis('equal'); + + if self.room_dims is not None: + for i, r in enumerate(self.room_dims['dims']*1): + min_ = r[:3]*1 + max_ = r[3:]*1 + xmin, ymin, zmin = min_ + xmax, ymax, zmax = max_ + + ax.plot([xmin, xmax, xmax, xmin, xmin], + [ymin, ymin, ymax, ymax, ymin], 'g') + ax.imshow(self.traversible, origin='lower'); + with fu.fopen(img_path, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + + def _debug_semantic_maps(self, seed): + """Saves traversible space along with nodes generated on the graph. Takes + the seed as input.""" + for i, cls in enumerate(self.task_params.semantic_task.class_map_names): + img_path = os.path.join(self.logdir, '{:s}_flip{:d}_{:s}_graph.png'.format(self.building_name, seed, cls)) + maps = self.traversible*1. + maps += 0.5*(self.task.class_maps_dilated[:,:,i]) + write_traversible = (maps*1.+1.)/3.0 + write_traversible = (write_traversible*255.).astype(np.uint8)[:,:,np.newaxis] + write_traversible = write_traversible + np.zeros((1,1,3), dtype=np.uint8) + fu.write_image(img_path, write_traversible[::-1,:,:]) + + def _preprocess_for_task(self, seed): + """Sets up the task field for doing navigation on the grid world.""" + if self.task is None or self.task.seed != seed: + rng = np.random.RandomState(seed) + origin_loc = get_graph_origin_loc(rng, self.traversible) + self.task = utils.Foo(seed=seed, origin_loc=origin_loc, + n_ori=self.task_params.n_ori) + G = generate_graph(self.valid_fn_vec, self.task_params.step_size, + self.task.n_ori, (0, 0, 0)) + gtG, nodes, nodes_to_id = convert_to_graph_tool(G) + self.task.gtG = gtG + self.task.nodes = nodes + self.task.delta_theta = 2.0*np.pi/(self.task.n_ori*1.) + self.task.nodes_to_id = nodes_to_id + + logging.info('Building %s, #V=%d, #E=%d', self.building_name, + self.task.nodes.shape[0], self.task.gtG.num_edges()) + type = self.task_params.type + if type == 'general': + # Do nothing + _ = None + + elif type == 'room_to_room_many' or type == 'room_to_room_back': + if type == 'room_to_room_back': + assert(self.task_params.num_goals == 2), 'num_goals must be 2.' + + self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) + xyt = self.to_actual_xyt_vec(self.task.nodes) + self.task.node_room_ids = _label_nodes_with_room_id(xyt, self.room_dims) + self.task.reset_kwargs = {'node_room_ids': self.task.node_room_ids} + + elif type == 'rng_rejection_sampling_many': + n_bins = 20 + rejection_sampling_M = self.task_params.rejection_sampling_M + min_dist = self.task_params.min_dist + bins = np.arange(n_bins+1)/(n_bins*1.) + target_d = np.zeros(n_bins); target_d[...] = 1./n_bins; + + sampling_d = get_hardness_distribution( + self.task.gtG, self.task_params.max_dist, self.task_params.min_dist, + np.random.RandomState(0), 4000, bins, self.task.nodes, + self.task_params.n_ori, self.task_params.step_size) + + self.task.reset_kwargs = {'distribution_bins': bins, + 'target_distribution': target_d, + 'sampling_distribution': sampling_d, + 'rejection_sampling_M': rejection_sampling_M, + 'n_bins': n_bins, + 'n_ori': self.task_params.n_ori, + 'step_size': self.task_params.step_size, + 'min_dist': self.task_params.min_dist} + self.task.n_bins = n_bins + self.task.distribution_bins = bins + self.task.target_distribution = target_d + self.task.sampling_distribution = sampling_d + self.task.rejection_sampling_M = rejection_sampling_M + + if self.logdir is not None: + self._debug_save_hardness(seed) + + elif type[:14] == 'to_nearest_obj': + self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) + xyt = self.to_actual_xyt_vec(self.task.nodes) + + self.class_maps = _select_classes(self.class_maps, + self.class_map_names, + self.task_params.semantic_task.class_map_names)*1 + self.class_map_names = self.task_params.semantic_task.class_map_names + nodes_xyt = self.to_actual_xyt_vec(np.array(self.task.nodes)) + + tt = utils.Timer(); tt.tic(); + if self.task_params.type == 'to_nearest_obj_acc': + self.task.class_maps_dilated, self.task.node_class_label = label_nodes_with_class_geodesic( + nodes_xyt, self.class_maps, + self.task_params.semantic_task.pix_distance+8, self.map.traversible, + ff_cost=1., fo_cost=1., oo_cost=4., connectivity=8.) + + dists = [] + for i in range(len(self.class_map_names)): + class_nodes_ = np.where(self.task.node_class_label[:,i])[0] + dists.append(get_distance_node_list(gtG, source_nodes=class_nodes_, direction='to')) + self.task.dist_to_class = dists + a_, b_ = np.where(self.task.node_class_label) + self.task.class_nodes = np.concatenate((a_[:,np.newaxis], b_[:,np.newaxis]), axis=1) + + if self.logdir is not None: + self._debug_semantic_maps(seed) + + self.task.reset_kwargs = {'sampling': self.task_params.semantic_task.sampling, + 'class_nodes': self.task.class_nodes, + 'dist_to_class': self.task.dist_to_class} + + if self.logdir is not None: + self._debug_save_map_nodes(seed) + + def reset(self, rngs): + rng = rngs[0]; rng_perturb = rngs[1]; + nodes = self.task.nodes + tp = self.task_params + + start_node_ids, goal_node_ids, dists, target_class = \ + _nav_env_reset_helper(tp.type, rng, self.task.nodes, tp.batch_size, + self.task.gtG, tp.max_dist, tp.num_steps, + tp.num_goals, tp.data_augment, + **(self.task.reset_kwargs)) + + start_nodes = [tuple(nodes[_,:]) for _ in start_node_ids] + goal_nodes = [[tuple(nodes[_,:]) for _ in __] for __ in goal_node_ids] + data_augment = tp.data_augment + perturbs = _gen_perturbs(rng_perturb, tp.batch_size, + (tp.num_steps+1)*tp.num_goals, + data_augment.lr_flip, data_augment.delta_angle, + data_augment.delta_xy, data_augment.structured) + perturbs = np.array(perturbs) # batch x steps x 4 + end_perturbs = perturbs[:,-(tp.num_goals):,:]*1 # fixed perturb for the goal. + perturbs = perturbs[:,:-(tp.num_goals),:]*1 + + history = -np.ones((tp.batch_size, tp.num_steps*tp.num_goals), dtype=np.int32) + self.episode = utils.Foo( + start_nodes=start_nodes, start_node_ids=start_node_ids, + goal_nodes=goal_nodes, goal_node_ids=goal_node_ids, dist_to_goal=dists, + perturbs=perturbs, goal_perturbs=end_perturbs, history=history, + target_class=target_class, history_frames=[]) + return start_node_ids + + def take_action(self, current_node_ids, action, step_number): + """In addition to returning the action, also returns the reward that the + agent receives.""" + goal_number = step_number / self.task_params.num_steps + new_node_ids = GridWorld.take_action(self, current_node_ids, action) + rewards = [] + for i, n in enumerate(new_node_ids): + reward = 0 + if n == self.episode.goal_node_ids[goal_number][i]: + reward = self.task_params.reward_at_goal + reward = reward - self.task_params.reward_time_penalty + rewards.append(reward) + return new_node_ids, rewards + + + def get_optimal_action(self, current_node_ids, step_number): + """Returns the optimal action from the current node.""" + goal_number = step_number / self.task_params.num_steps + gtG = self.task.gtG + a = np.zeros((len(current_node_ids), self.task_params.num_actions), dtype=np.int32) + d_dict = self.episode.dist_to_goal[goal_number] + for i, c in enumerate(current_node_ids): + neigh = gtG.vertex(c).out_neighbours() + neigh_edge = gtG.vertex(c).out_edges() + ds = np.array([d_dict[i][int(x)] for x in neigh]) + ds_min = np.min(ds) + for i_, e in enumerate(neigh_edge): + if ds[i_] == ds_min: + _ = gtG.ep['action'][e] + a[i, _] = 1 + return a + + def get_targets(self, current_node_ids, step_number): + """Returns the target actions from the current node.""" + action = self.get_optimal_action(current_node_ids, step_number) + action = np.expand_dims(action, axis=1) + return vars(utils.Foo(action=action)) + + def get_targets_name(self): + """Returns the list of names of the targets.""" + return ['action'] + + def cleanup(self): + self.episode = None + +class VisualNavigationEnv(NavigationEnv): + """Class for doing visual navigation in environments. Functions for computing + features on states, etc. + """ + def __init__(self, robot, env, task_params, category_list=None, + building_name=None, flip=False, logdir=None, + building_loader=None, r_obj=None): + tt = utils.Timer() + tt.tic() + Building.__init__(self, building_name, robot, env, category_list, + small=task_params.toy_problem, flip=flip, logdir=logdir, + building_loader=building_loader) + + self.set_r_obj(r_obj) + self.task_params = task_params + self.task = None + self.episode = None + self._preprocess_for_task(self.task_params.building_seed) + if hasattr(self.task_params, 'map_scales'): + self.task.scaled_maps = resize_maps( + self.traversible.astype(np.float32)*1, self.task_params.map_scales, + self.task_params.map_resize_method) + else: + logging.fatal('VisualNavigationEnv does not support scale_f anymore.') + self.task.readout_maps_scaled = resize_maps( + self.traversible.astype(np.float32)*1, + self.task_params.readout_maps_scales, + self.task_params.map_resize_method) + tt.toc(log_at=1, log_str='VisualNavigationEnv __init__: ') + + def get_weight(self): + return self.task.nodes.shape[0] + + def get_common_data(self): + goal_nodes = self.episode.goal_nodes + start_nodes = self.episode.start_nodes + perturbs = self.episode.perturbs + goal_perturbs = self.episode.goal_perturbs + target_class = self.episode.target_class + + goal_locs = []; rel_goal_locs = []; + for i in range(len(goal_nodes)): + end_nodes = goal_nodes[i] + goal_loc, _, _, goal_theta = self.get_loc_axis( + np.array(end_nodes), delta_theta=self.task.delta_theta, + perturb=goal_perturbs[:,i,:]) + + # Compute the relative location to all goals from the starting location. + loc, _, _, theta = self.get_loc_axis(np.array(start_nodes), + delta_theta=self.task.delta_theta, + perturb=perturbs[:,0,:]) + r_goal, t_goal = _get_relative_goal_loc(goal_loc*1., loc, theta) + rel_goal_loc = np.concatenate((r_goal*np.cos(t_goal), r_goal*np.sin(t_goal), + np.cos(goal_theta-theta), + np.sin(goal_theta-theta)), axis=1) + rel_goal_locs.append(np.expand_dims(rel_goal_loc, axis=1)) + goal_locs.append(np.expand_dims(goal_loc, axis=1)) + + map = self.traversible*1. + maps = np.repeat(np.expand_dims(np.expand_dims(map, axis=0), axis=0), + self.task_params.batch_size, axis=0)*1 + if self.task_params.type[:14] == 'to_nearest_obj': + for i in range(self.task_params.batch_size): + maps[i,0,:,:] += 0.5*(self.task.class_maps_dilated[:,:,target_class[i]]) + + rel_goal_locs = np.concatenate(rel_goal_locs, axis=1) + goal_locs = np.concatenate(goal_locs, axis=1) + maps = np.expand_dims(maps, axis=-1) + + if self.task_params.type[:14] == 'to_nearest_obj': + rel_goal_locs = np.zeros((self.task_params.batch_size, 1, + len(self.task_params.semantic_task.class_map_names)), + dtype=np.float32) + goal_locs = np.zeros((self.task_params.batch_size, 1, 2), + dtype=np.float32) + for i in range(self.task_params.batch_size): + t = target_class[i] + rel_goal_locs[i,0,t] = 1. + goal_locs[i,0,0] = t + goal_locs[i,0,1] = np.NaN + + return vars(utils.Foo(orig_maps=maps, goal_loc=goal_locs, + rel_goal_loc_at_start=rel_goal_locs)) + + def pre_common_data(self, inputs): + return inputs + + + def get_features(self, current_node_ids, step_number): + task_params = self.task_params + goal_number = step_number / self.task_params.num_steps + end_nodes = self.task.nodes[self.episode.goal_node_ids[goal_number],:]*1 + current_nodes = self.task.nodes[current_node_ids,:]*1 + end_perturbs = self.episode.goal_perturbs[:,goal_number,:][:,np.newaxis,:] + perturbs = self.episode.perturbs + target_class = self.episode.target_class + + # Append to history. + self.episode.history[:,step_number] = np.array(current_node_ids) + + # Render out the images from current node. + outs = {} + + if self.task_params.outputs.images: + imgs_all = [] + imgs = self.render_nodes([tuple(x) for x in current_nodes], + perturb=perturbs[:,step_number,:]) + imgs_all.append(imgs) + aux_delta_thetas = self.task_params.aux_delta_thetas + for i in range(len(aux_delta_thetas)): + imgs = self.render_nodes([tuple(x) for x in current_nodes], + perturb=perturbs[:,step_number,:], + aux_delta_theta=aux_delta_thetas[i]) + imgs_all.append(imgs) + imgs_all = np.array(imgs_all) # A x B x H x W x C + imgs_all = np.transpose(imgs_all, axes=[1,0,2,3,4]) + imgs_all = np.expand_dims(imgs_all, axis=1) # B x N x A x H x W x C + if task_params.num_history_frames > 0: + if step_number == 0: + # Append the same frame 4 times + for i in range(task_params.num_history_frames+1): + self.episode.history_frames.insert(0, imgs_all*1.) + self.episode.history_frames.insert(0, imgs_all) + self.episode.history_frames.pop() + imgs_all_with_history = np.concatenate(self.episode.history_frames, axis=2) + else: + imgs_all_with_history = imgs_all + outs['imgs'] = imgs_all_with_history # B x N x A x H x W x C + + if self.task_params.outputs.node_ids: + outs['node_ids'] = np.array(current_node_ids).reshape((-1,1,1)) + outs['perturbs'] = np.expand_dims(perturbs[:,step_number, :]*1., axis=1) + + if self.task_params.outputs.analytical_counts: + assert(self.task_params.modalities == ['depth']) + d = image_pre(outs['imgs']*1., self.task_params.modalities) + cm = get_camera_matrix(self.task_params.img_width, + self.task_params.img_height, + self.task_params.img_fov) + XYZ = get_point_cloud_from_z(100./d[...,0], cm) + XYZ = make_geocentric(XYZ*100., self.robot.sensor_height, + self.robot.camera_elevation_degree) + for i in range(len(self.task_params.analytical_counts.map_sizes)): + non_linearity = self.task_params.analytical_counts.non_linearity[i] + count, isvalid = bin_points(XYZ*1., + map_size=self.task_params.analytical_counts.map_sizes[i], + xy_resolution=self.task_params.analytical_counts.xy_resolution[i], + z_bins=self.task_params.analytical_counts.z_bins[i]) + assert(count.shape[2] == 1), 'only works for n_views equal to 1.' + count = count[:,:,0,:,:,:] + isvalid = isvalid[:,:,0,:,:,:] + if non_linearity == 'none': + None + elif non_linearity == 'min10': + count = np.minimum(count, 10.) + elif non_linearity == 'sqrt': + count = np.sqrt(count) + else: + logging.fatal('Undefined non_linearity.') + outs['analytical_counts_{:d}'.format(i)] = count + + # Compute the goal location in the cordinate frame of the robot. + if self.task_params.outputs.rel_goal_loc: + if self.task_params.type[:14] != 'to_nearest_obj': + loc, _, _, theta = self.get_loc_axis(current_nodes, + delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number,:]) + goal_loc, _, _, goal_theta = self.get_loc_axis(end_nodes, + delta_theta=self.task.delta_theta, + perturb=end_perturbs[:,0,:]) + r_goal, t_goal = _get_relative_goal_loc(goal_loc, loc, theta) + + rel_goal_loc = np.concatenate((r_goal*np.cos(t_goal), r_goal*np.sin(t_goal), + np.cos(goal_theta-theta), + np.sin(goal_theta-theta)), axis=1) + outs['rel_goal_loc'] = np.expand_dims(rel_goal_loc, axis=1) + elif self.task_params.type[:14] == 'to_nearest_obj': + rel_goal_loc = np.zeros((self.task_params.batch_size, 1, + len(self.task_params.semantic_task.class_map_names)), + dtype=np.float32) + for i in range(self.task_params.batch_size): + t = target_class[i] + rel_goal_loc[i,0,t] = 1. + outs['rel_goal_loc'] = rel_goal_loc + + # Location on map to plot the trajectory during validation. + if self.task_params.outputs.loc_on_map: + loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, + delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number,:]) + outs['loc_on_map'] = np.expand_dims(loc, axis=1) + + # Compute gt_dist to goal + if self.task_params.outputs.gt_dist_to_goal: + gt_dist_to_goal = np.zeros((len(current_node_ids), 1), dtype=np.float32) + for i, n in enumerate(current_node_ids): + gt_dist_to_goal[i,0] = self.episode.dist_to_goal[goal_number][i][n] + outs['gt_dist_to_goal'] = np.expand_dims(gt_dist_to_goal, axis=1) + + # Free space in front of you, map and goal as images. + if self.task_params.outputs.ego_maps: + loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, + delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number,:]) + maps = generate_egocentric_maps(self.task.scaled_maps, + self.task_params.map_scales, + self.task_params.map_crop_sizes, loc, + x_axis, y_axis, theta) + + for i in range(len(self.task_params.map_scales)): + outs['ego_maps_{:d}'.format(i)] = \ + np.expand_dims(np.expand_dims(maps[i], axis=1), axis=-1) + + if self.task_params.outputs.readout_maps: + loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, + delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number,:]) + maps = generate_egocentric_maps(self.task.readout_maps_scaled, + self.task_params.readout_maps_scales, + self.task_params.readout_maps_crop_sizes, + loc, x_axis, y_axis, theta) + for i in range(len(self.task_params.readout_maps_scales)): + outs['readout_maps_{:d}'.format(i)] = \ + np.expand_dims(np.expand_dims(maps[i], axis=1), axis=-1) + + # Images for the goal. + if self.task_params.outputs.ego_goal_imgs: + if self.task_params.type[:14] != 'to_nearest_obj': + loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, + delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number,:]) + goal_loc, _, _, _ = self.get_loc_axis(end_nodes, + delta_theta=self.task.delta_theta, + perturb=end_perturbs[:,0,:]) + rel_goal_orientation = np.mod( + np.int32(current_nodes[:,2:] - end_nodes[:,2:]), self.task_params.n_ori) + goal_dist, goal_theta = _get_relative_goal_loc(goal_loc, loc, theta) + goals = generate_goal_images(self.task_params.map_scales, + self.task_params.map_crop_sizes, + self.task_params.n_ori, goal_dist, + goal_theta, rel_goal_orientation) + for i in range(len(self.task_params.map_scales)): + outs['ego_goal_imgs_{:d}'.format(i)] = np.expand_dims(goals[i], axis=1) + + elif self.task_params.type[:14] == 'to_nearest_obj': + for i in range(len(self.task_params.map_scales)): + num_classes = len(self.task_params.semantic_task.class_map_names) + outs['ego_goal_imgs_{:d}'.format(i)] = np.zeros((self.task_params.batch_size, 1, + self.task_params.map_crop_sizes[i], + self.task_params.map_crop_sizes[i], + self.task_params.goal_channels)) + for i in range(self.task_params.batch_size): + t = target_class[i] + for j in range(len(self.task_params.map_scales)): + outs['ego_goal_imgs_{:d}'.format(j)][i,:,:,:,t] = 1. + + # Incremental locs and theta (for map warping), always in the original scale + # of the map, the subequent steps in the tf code scale appropriately. + # Scaling is done by just multiplying incremental_locs appropriately. + if self.task_params.outputs.egomotion: + if step_number == 0: + # Zero Ego Motion + incremental_locs = np.zeros((self.task_params.batch_size, 1, 2), dtype=np.float32) + incremental_thetas = np.zeros((self.task_params.batch_size, 1, 1), dtype=np.float32) + else: + previous_nodes = self.task.nodes[self.episode.history[:,step_number-1], :]*1 + loc, _, _, theta = self.get_loc_axis(current_nodes, + delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number,:]) + previous_loc, _, _, previous_theta = self.get_loc_axis( + previous_nodes, delta_theta=self.task.delta_theta, + perturb=perturbs[:,step_number-1,:]) + + incremental_locs_ = np.reshape(loc-previous_loc, [self.task_params.batch_size, 1, -1]) + + t = -np.pi/2+np.reshape(theta*1, [self.task_params.batch_size, 1, -1]) + incremental_locs = incremental_locs_*1 + incremental_locs[:,:,0] = np.sum(incremental_locs_ * + np.concatenate((np.cos(t), np.sin(t)), + axis=-1), axis=-1) + incremental_locs[:,:,1] = np.sum(incremental_locs_ * + np.concatenate((np.cos(t+np.pi/2), + np.sin(t+np.pi/2)), + axis=-1), axis=-1) + incremental_thetas = np.reshape(theta-previous_theta, + [self.task_params.batch_size, 1, -1]) + outs['incremental_locs'] = incremental_locs + outs['incremental_thetas'] = incremental_thetas + + if self.task_params.outputs.visit_count: + # Output the visit count for this state, how many times has the current + # state been visited, and how far in the history was the last visit + # (except this one) + visit_count = np.zeros((self.task_params.batch_size, 1), dtype=np.int32) + last_visit = -np.ones((self.task_params.batch_size, 1), dtype=np.int32) + if step_number >= 1: + h = self.episode.history[:,:(step_number)] + visit_count[:,0] = np.sum(h == np.array(current_node_ids).reshape([-1,1]), + axis=1) + last_visit[:,0] = np.argmax(h[:,::-1] == np.array(current_node_ids).reshape([-1,1]), + axis=1) + 1 + last_visit[visit_count == 0] = -1 # -1 if not visited. + outs['visit_count'] = np.expand_dims(visit_count, axis=1) + outs['last_visit'] = np.expand_dims(last_visit, axis=1) + return outs + + def get_features_name(self): + f = [] + if self.task_params.outputs.images: + f.append('imgs') + if self.task_params.outputs.rel_goal_loc: + f.append('rel_goal_loc') + if self.task_params.outputs.loc_on_map: + f.append('loc_on_map') + if self.task_params.outputs.gt_dist_to_goal: + f.append('gt_dist_to_goal') + if self.task_params.outputs.ego_maps: + for i in range(len(self.task_params.map_scales)): + f.append('ego_maps_{:d}'.format(i)) + if self.task_params.outputs.readout_maps: + for i in range(len(self.task_params.readout_maps_scales)): + f.append('readout_maps_{:d}'.format(i)) + if self.task_params.outputs.ego_goal_imgs: + for i in range(len(self.task_params.map_scales)): + f.append('ego_goal_imgs_{:d}'.format(i)) + if self.task_params.outputs.egomotion: + f.append('incremental_locs') + f.append('incremental_thetas') + if self.task_params.outputs.visit_count: + f.append('visit_count') + f.append('last_visit') + if self.task_params.outputs.analytical_counts: + for i in range(len(self.task_params.analytical_counts.map_sizes)): + f.append('analytical_counts_{:d}'.format(i)) + if self.task_params.outputs.node_ids: + f.append('node_ids') + f.append('perturbs') + return f + + def pre_features(self, inputs): + if self.task_params.outputs.images: + inputs['imgs'] = image_pre(inputs['imgs'], self.task_params.modalities) + return inputs + +class BuildingMultiplexer(): + def __init__(self, args, task_number): + params = vars(args) + for k in params.keys(): + setattr(self, k, params[k]) + self.task_number = task_number + self._pick_data(task_number) + logging.info('Env Class: %s.', self.env_class) + if self.task_params.task == 'planning': + self._setup_planner() + elif self.task_params.task == 'mapping': + self._setup_mapper() + elif self.task_params.task == 'map+plan': + self._setup_mapper() + else: + logging.error('Undefined task: %s'.format(self.task_params.task)) + + def _pick_data(self, task_number): + logging.error('Input Building Names: %s', self.building_names) + self.flip = [np.mod(task_number / len(self.building_names), 2) == 1] + id = np.mod(task_number, len(self.building_names)) + self.building_names = [self.building_names[id]] + self.task_params.building_seed = task_number + logging.error('BuildingMultiplexer: Picked Building Name: %s', self.building_names) + self.building_names = self.building_names[0].split('+') + self.flip = [self.flip[0] for _ in self.building_names] + logging.error('BuildingMultiplexer: Picked Building Name: %s', self.building_names) + logging.error('BuildingMultiplexer: Flipping Buildings: %s', self.flip) + logging.error('BuildingMultiplexer: Set building_seed: %d', self.task_params.building_seed) + self.num_buildings = len(self.building_names) + logging.error('BuildingMultiplexer: Num buildings: %d', self.num_buildings) + + def _setup_planner(self): + # Load building env class. + self.buildings = [] + for i, building_name in enumerate(self.building_names): + b = self.env_class(robot=self.robot, env=self.env, + task_params=self.task_params, + building_name=building_name, flip=self.flip[i], + logdir=self.logdir, building_loader=self.dataset) + self.buildings.append(b) + + def _setup_mapper(self): + # Set up the renderer. + cp = self.camera_param + rgb_shader, d_shader = sru.get_shaders(cp.modalities) + r_obj = SwiftshaderRenderer() + r_obj.init_display(width=cp.width, height=cp.height, fov=cp.fov, + z_near=cp.z_near, z_far=cp.z_far, rgb_shader=rgb_shader, + d_shader=d_shader) + self.r_obj = r_obj + r_obj.clear_scene() + + # Load building env class. + self.buildings = [] + wt = [] + for i, building_name in enumerate(self.building_names): + b = self.env_class(robot=self.robot, env=self.env, + task_params=self.task_params, + building_name=building_name, flip=self.flip[i], + logdir=self.logdir, building_loader=self.dataset, + r_obj=r_obj) + wt.append(b.get_weight()) + b.load_building_into_scene() + b.set_building_visibility(False) + self.buildings.append(b) + wt = np.array(wt).astype(np.float32) + wt = wt / np.sum(wt+0.0001) + self.building_sampling_weights = wt + + def sample_building(self, rng): + if self.num_buildings == 1: + building_id = rng.choice(range(len(self.building_names))) + else: + building_id = rng.choice(self.num_buildings, + p=self.building_sampling_weights) + b = self.buildings[building_id] + instances = b._gen_rng(rng) + self._building_id = building_id + return self.buildings[building_id], instances + + def sample_env(self, rngs): + rng = rngs[0]; + if self.num_buildings == 1: + building_id = rng.choice(range(len(self.building_names))) + else: + building_id = rng.choice(self.num_buildings, + p=self.building_sampling_weights) + return self.buildings[building_id] + + def pre(self, inputs): + return self.buildings[self._building_id].pre(inputs) + + def __del__(self): + self.r_obj.clear_scene() + logging.error('Clearing scene.') diff --git a/models/research/cognitive_mapping_and_planning/datasets/nav_env_config.py b/models/research/cognitive_mapping_and_planning/datasets/nav_env_config.py new file mode 100644 index 0000000000000000000000000000000000000000..3d71c5767c4dc0ed9f05cce5c1790f11ede3778a --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/datasets/nav_env_config.py @@ -0,0 +1,127 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Configs for stanford navigation environment. + +Base config for stanford navigation enviornment. +""" +import numpy as np +import src.utils as utils +import datasets.nav_env as nav_env + +def nav_env_base_config(): + """Returns the base config for stanford navigation environment. + + Returns: + Base config for stanford navigation environment. + """ + robot = utils.Foo(radius=15, + base=10, + height=140, + sensor_height=120, + camera_elevation_degree=-15) + + env = utils.Foo(padding=10, + resolution=5, + num_point_threshold=2, + valid_min=-10, + valid_max=200, + n_samples_per_face=200) + + camera_param = utils.Foo(width=225, + height=225, + z_near=0.05, + z_far=20.0, + fov=60., + modalities=['rgb'], + img_channels=3) + + data_augment = utils.Foo(lr_flip=0, + delta_angle=0.5, + delta_xy=4, + relight=True, + relight_fast=False, + structured=False) # if True, uses the same perturb for the whole episode. + + outputs = utils.Foo(images=True, + rel_goal_loc=False, + loc_on_map=True, + gt_dist_to_goal=True, + ego_maps=False, + ego_goal_imgs=False, + egomotion=False, + visit_count=False, + analytical_counts=False, + node_ids=True, + readout_maps=False) + + # class_map_names=['board', 'chair', 'door', 'sofa', 'table'] + class_map_names = ['chair', 'door', 'table'] + semantic_task = utils.Foo(class_map_names=class_map_names, pix_distance=16, + sampling='uniform') + + # time per iteration for cmp is 0.82 seconds per episode with 3.4s overhead per batch. + task_params = utils.Foo(max_dist=32, + step_size=8, + num_steps=40, + num_actions=4, + batch_size=4, + building_seed=0, + num_goals=1, + img_height=None, + img_width=None, + img_channels=None, + modalities=None, + outputs=outputs, + map_scales=[1.], + map_crop_sizes=[64], + rel_goal_loc_dim=4, + base_class='Building', + task='map+plan', + n_ori=4, + type='room_to_room_many', + data_augment=data_augment, + room_regex='^((?!hallway).)*$', + toy_problem=False, + map_channels=1, + gt_coverage=False, + input_type='maps', + full_information=False, + aux_delta_thetas=[], + semantic_task=semantic_task, + num_history_frames=0, + node_ids_dim=1, + perturbs_dim=4, + map_resize_method='linear_noantialiasing', + readout_maps_channels=1, + readout_maps_scales=[], + readout_maps_crop_sizes=[], + n_views=1, + reward_time_penalty=0.1, + reward_at_goal=1., + discount_factor=0.99, + rejection_sampling_M=100, + min_dist=None) + + navtask_args = utils.Foo( + building_names=['area1_gates_wingA_floor1_westpart'], + env_class=nav_env.VisualNavigationEnv, + robot=robot, + task_params=task_params, + env=env, + camera_param=camera_param, + cache_rooms=True) + return navtask_args + diff --git a/models/research/cognitive_mapping_and_planning/matplotlibrc b/models/research/cognitive_mapping_and_planning/matplotlibrc new file mode 100644 index 0000000000000000000000000000000000000000..ed5097572ae68680d0c9afdf510968e1c3d175d4 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/matplotlibrc @@ -0,0 +1 @@ +backend : agg diff --git a/models/research/cognitive_mapping_and_planning/output/.gitignore b/models/research/cognitive_mapping_and_planning/output/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a767cafbbd864d0baf76530294598e4c2be60a24 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/output/.gitignore @@ -0,0 +1 @@ +* diff --git a/models/research/cognitive_mapping_and_planning/output/README.md b/models/research/cognitive_mapping_and_planning/output/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7518c3874390da7e2aa65a89ccdec035ca7610e8 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/output/README.md @@ -0,0 +1,16 @@ +### Pre-Trained Models + +We provide the following pre-trained models: + +Config Name | Checkpoint | Mean Dist. | 50%ile Dist. | 75%ile Dist. | Success %age | +:-: | :-: | :-: | :-: | :-: | :-: | +cmp.lmap_Msc.clip5.sbpd_d_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_r2r.tar) | 4.79 | 0 | 1 | 78.9 | +cmp.lmap_Msc.clip5.sbpd_rgb_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_rgb_r2r.tar) | 7.74 | 0 | 14 | 62.4 | +cmp.lmap_Msc.clip5.sbpd_d_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_ST.tar) | 10.67 | 9 | 19 | 39.7 | +cmp.lmap_Msc.clip5.sbpd_rgb_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_rgb_ST.tar) | 11.27 | 10 | 19 | 35.6 | +cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80 | [ckpt](http:////download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80.tar) | 11.6 | 0 | 19 | 66.9 | +bl.v2.noclip.sbpd_d_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_r2r.tar) | 5.90 | 0 | 6 | 71.2 | +bl.v2.noclip.sbpd_rgb_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_rgb_r2r.tar) | 10.21 | 1 | 21 | 53.4 | +bl.v2.noclip.sbpd_d_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_ST.tar) | 13.29 | 14 | 23 | 28.0 | +bl.v2.noclip.sbpd_rgb_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_rgb_ST.tar) | 13.37 | 13 | 20 | 24.2 | +bl.v2.noclip.sbpd_d_r2r_h0_64_80 | [ckpt](http:////download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_r2r_h0_64_80.tar) | 15.30 | 0 | 29 | 57.9 | diff --git a/models/research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch b/models/research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch new file mode 100644 index 0000000000000000000000000000000000000000..de1be442d5b9fff44862d37b9329e32face2b663 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch @@ -0,0 +1,14 @@ +10c10 +< from OpenGL import platform, constant, arrays +--- +> from OpenGL import platform, constant, arrays, contextdata +249a250 +> from OpenGL._bytes import _NULL_8_BYTE +399c400 +< array = ArrayDatatype.asArray( pointer, type ) +--- +> array = arrays.ArrayDatatype.asArray( pointer, type ) +405c406 +< ArrayDatatype.voidDataPointer( array ) +--- +> arrays.ArrayDatatype.voidDataPointer( array ) diff --git a/models/research/cognitive_mapping_and_planning/patches/apply_patches.sh b/models/research/cognitive_mapping_and_planning/patches/apply_patches.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a786058258decdfb381eff25684183d92788ebe --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/patches/apply_patches.sh @@ -0,0 +1,18 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +echo $VIRTUAL_ENV +patch $VIRTUAL_ENV/local/lib/python2.7/site-packages/OpenGL/GLES2/VERSION/GLES2_2_0.py patches/GLES2_2_0.py.patch +patch $VIRTUAL_ENV/local/lib/python2.7/site-packages/OpenGL/platform/ctypesloader.py patches/ctypesloader.py.patch diff --git a/models/research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch b/models/research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch new file mode 100644 index 0000000000000000000000000000000000000000..27dd43b18010dc5fdcd605b9a5d470abaa19151f --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch @@ -0,0 +1,15 @@ +45c45,46 +< return dllType( name, mode ) +--- +> print './' + name +> return dllType( './' + name, mode ) +47,48c48,53 +< err.args += (name,fullName) +< raise +--- +> try: +> print name +> return dllType( name, mode ) +> except: +> err.args += (name,fullName) +> raise diff --git a/models/research/cognitive_mapping_and_planning/render/__init__.py b/models/research/cognitive_mapping_and_planning/render/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp b/models/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp new file mode 100644 index 0000000000000000000000000000000000000000..23e93d27f585e93896799f177888e9c50fa03eed --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp @@ -0,0 +1,30 @@ +// This shader computes per-pixel depth (-z coordinate in the camera space, or +// orthogonal distance to the camera plane). The result is multiplied by the +// `kFixedPointFraction` constant and is encoded to RGB channels as an integer +// (R being the least significant byte). + +#ifdef GL_ES +#ifdef GL_FRAGMENT_PRECISION_HIGH +precision highp float; +#else +precision mediump float; +#endif +#endif + +const float kFixedPointFraction = 1000.0; + +varying float vDepth; + +void main(void) { + float d = vDepth; + + // Encode the depth to RGB. + d *= (kFixedPointFraction / 255.0); + gl_FragColor.r = mod(d, 1.0); + d = (d - gl_FragColor.r) / 255.0; + gl_FragColor.g = mod(d, 1.0); + d = (d - gl_FragColor.g) / 255.0; + gl_FragColor.b = mod(d, 1.0); + + gl_FragColor.a = 1.0; +} diff --git a/models/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp b/models/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp new file mode 100644 index 0000000000000000000000000000000000000000..2db74f14aa7f253b8f544ec1ab519129f13426a0 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp @@ -0,0 +1,15 @@ +uniform mat4 uViewMatrix; +uniform mat4 uProjectionMatrix; + +attribute vec3 aPosition; + +varying float vDepth; + +void main(void) { + vec4 worldPosition = vec4(aPosition, 1.0); + vec4 viewPosition = uViewMatrix * worldPosition; + gl_Position = uProjectionMatrix * viewPosition; + + // Orthogonal depth is simply -z in the camera space. + vDepth = -viewPosition.z; +} diff --git a/models/research/cognitive_mapping_and_planning/render/rgb_flat_color.fp b/models/research/cognitive_mapping_and_planning/render/rgb_flat_color.fp new file mode 100644 index 0000000000000000000000000000000000000000..c8c24d76103793d9cfa9166517177cb332d1a92c --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/render/rgb_flat_color.fp @@ -0,0 +1,11 @@ +precision highp float; +varying vec4 vColor; +varying vec2 vTextureCoord; + +uniform sampler2D uTexture; + +void main(void) { + vec4 color = vColor; + color = texture2D(uTexture, vTextureCoord); + gl_FragColor = color; +} diff --git a/models/research/cognitive_mapping_and_planning/render/rgb_flat_color.vp b/models/research/cognitive_mapping_and_planning/render/rgb_flat_color.vp new file mode 100644 index 0000000000000000000000000000000000000000..ebc79173405f7449921fd40f778fe3695aab5ea8 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/render/rgb_flat_color.vp @@ -0,0 +1,18 @@ +uniform mat4 uViewMatrix; +uniform mat4 uProjectionMatrix; +uniform vec4 uColor; + +attribute vec4 aColor; +attribute vec3 aPosition; +attribute vec2 aTextureCoord; + +varying vec4 vColor; +varying vec2 vTextureCoord; + +void main(void) { + vec4 worldPosition = vec4(aPosition, 1.0); + gl_Position = uProjectionMatrix * (uViewMatrix * worldPosition); + + vColor = aColor * uColor; + vTextureCoord = aTextureCoord; +} diff --git a/models/research/cognitive_mapping_and_planning/render/swiftshader_renderer.py b/models/research/cognitive_mapping_and_planning/render/swiftshader_renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..74b1be72c11a2877231a66886d02babfd4793ce8 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/render/swiftshader_renderer.py @@ -0,0 +1,427 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Implements loading and rendering of meshes. Contains 2 classes: + Shape: Class that exposes high level functions for loading and manipulating + shapes. This currently is bound to assimp + (https://github.com/assimp/assimp). If you want to interface to a different + library, reimplement this class with bindings to your mesh loading library. + + SwiftshaderRenderer: Class that renders Shapes. Currently this uses python + bindings to OpenGL (EGL), bindings to an alternate renderer may be implemented + here. +""" + +import numpy as np, os +import cv2, ctypes, logging, os, numpy as np +import pyassimp as assimp +from OpenGL.GLES2 import * +from OpenGL.EGL import * +import src.rotation_utils as ru + +__version__ = 'swiftshader_renderer' + +def get_shaders(modalities): + rgb_shader = 'rgb_flat_color' if 'rgb' in modalities else None + d_shader = 'depth_rgb_encoded' if 'depth' in modalities else None + return rgb_shader, d_shader + +def sample_points_on_faces(vs, fs, rng, n_samples_per_face): + idx = np.repeat(np.arange(fs.shape[0]), n_samples_per_face) + + r = rng.rand(idx.size, 2) + r1 = r[:,:1]; r2 = r[:,1:]; sqrt_r1 = np.sqrt(r1); + + v1 = vs[fs[idx, 0], :]; v2 = vs[fs[idx, 1], :]; v3 = vs[fs[idx, 2], :]; + pts = (1-sqrt_r1)*v1 + sqrt_r1*(1-r2)*v2 + sqrt_r1*r2*v3 + + v1 = vs[fs[:,0], :]; v2 = vs[fs[:, 1], :]; v3 = vs[fs[:, 2], :]; + ar = 0.5*np.sqrt(np.sum(np.cross(v1-v3, v2-v3)**2, 1)) + + return pts, ar, idx + +class Shape(): + def get_pyassimp_load_options(self): + load_flags = assimp.postprocess.aiProcess_Triangulate; + load_flags = load_flags | assimp.postprocess.aiProcess_SortByPType; + load_flags = load_flags | assimp.postprocess.aiProcess_OptimizeMeshes; + load_flags = load_flags | assimp.postprocess.aiProcess_RemoveRedundantMaterials; + load_flags = load_flags | assimp.postprocess.aiProcess_FindDegenerates; + load_flags = load_flags | assimp.postprocess.aiProcess_GenSmoothNormals; + load_flags = load_flags | assimp.postprocess.aiProcess_JoinIdenticalVertices; + load_flags = load_flags | assimp.postprocess.aiProcess_ImproveCacheLocality; + load_flags = load_flags | assimp.postprocess.aiProcess_GenUVCoords; + load_flags = load_flags | assimp.postprocess.aiProcess_FindInvalidData; + return load_flags + + def __init__(self, obj_file, material_file=None, load_materials=True, + name_prefix='', name_suffix=''): + if material_file is not None: + logging.error('Ignoring material file input, reading them off obj file.') + load_flags = self.get_pyassimp_load_options() + scene = assimp.load(obj_file, processing=load_flags) + filter_ind = self._filter_triangles(scene.meshes) + self.meshes = [scene.meshes[i] for i in filter_ind] + for m in self.meshes: + m.name = name_prefix + m.name + name_suffix + + dir_name = os.path.dirname(obj_file) + # Load materials + materials = None + if load_materials: + materials = [] + for m in self.meshes: + file_name = os.path.join(dir_name, m.material.properties[('file', 1)]) + assert(os.path.exists(file_name)), \ + 'Texture file {:s} foes not exist.'.format(file_name) + img_rgb = cv2.imread(file_name)[::-1,:,::-1] + if img_rgb.shape[0] != img_rgb.shape[1]: + logging.warn('Texture image not square.') + sz = np.maximum(img_rgb.shape[0], img_rgb.shape[1]) + sz = int(np.power(2., np.ceil(np.log2(sz)))) + img_rgb = cv2.resize(img_rgb, (sz,sz), interpolation=cv2.INTER_LINEAR) + else: + sz = img_rgb.shape[0] + sz_ = int(np.power(2., np.ceil(np.log2(sz)))) + if sz != sz_: + logging.warn('Texture image not square of power of 2 size. ' + + 'Changing size from %d to %d.', sz, sz_) + sz = sz_ + img_rgb = cv2.resize(img_rgb, (sz,sz), interpolation=cv2.INTER_LINEAR) + materials.append(img_rgb) + self.scene = scene + self.materials = materials + + def _filter_triangles(self, meshes): + select = [] + for i in range(len(meshes)): + if meshes[i].primitivetypes == 4: + select.append(i) + return select + + def flip_shape(self): + for m in self.meshes: + m.vertices[:,1] = -m.vertices[:,1] + bb = m.faces*1 + bb[:,1] = m.faces[:,2] + bb[:,2] = m.faces[:,1] + m.faces = bb + # m.vertices[:,[0,1]] = m.vertices[:,[1,0]] + + def get_vertices(self): + vs = [] + for m in self.meshes: + vs.append(m.vertices) + vss = np.concatenate(vs, axis=0) + return vss, vs + + def get_faces(self): + vs = [] + for m in self.meshes: + v = m.faces + vs.append(v) + return vs + + def get_number_of_meshes(self): + return len(self.meshes) + + def scale(self, sx=1., sy=1., sz=1.): + pass + + def sample_points_on_face_of_shape(self, i, n_samples_per_face, sc): + v = self.meshes[i].vertices*sc + f = self.meshes[i].faces + p, face_areas, face_idx = sample_points_on_faces( + v, f, np.random.RandomState(0), n_samples_per_face) + return p, face_areas, face_idx + + def __del__(self): + scene = self.scene + assimp.release(scene) + +class SwiftshaderRenderer(): + def __init__(self): + self.entities = {} + + def init_display(self, width, height, fov, z_near, z_far, rgb_shader, + d_shader): + self.init_renderer_egl(width, height) + dir_path = os.path.dirname(os.path.realpath(__file__)) + if d_shader is not None and rgb_shader is not None: + logging.fatal('Does not support setting both rgb_shader and d_shader.') + + if d_shader is not None: + assert rgb_shader is None + shader = d_shader + self.modality = 'depth' + + if rgb_shader is not None: + assert d_shader is None + shader = rgb_shader + self.modality = 'rgb' + + self.create_shaders(os.path.join(dir_path, shader+'.vp'), + os.path.join(dir_path, shader + '.fp')) + aspect = width*1./(height*1.) + self.set_camera(fov, z_near, z_far, aspect) + + def init_renderer_egl(self, width, height): + major,minor = ctypes.c_long(),ctypes.c_long() + logging.info('init_renderer_egl: EGL_DEFAULT_DISPLAY: %s', EGL_DEFAULT_DISPLAY) + + egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY) + logging.info('init_renderer_egl: egl_display: %s', egl_display) + + eglInitialize(egl_display, major, minor) + logging.info('init_renderer_egl: EGL_OPENGL_API, EGL_OPENGL_ES_API: %s, %s', + EGL_OPENGL_API, EGL_OPENGL_ES_API) + eglBindAPI(EGL_OPENGL_ES_API) + + num_configs = ctypes.c_long() + configs = (EGLConfig*1)() + local_attributes = [EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, + EGL_DEPTH_SIZE, 16, EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE,] + logging.error('init_renderer_egl: local attributes: %s', local_attributes) + local_attributes = arrays.GLintArray.asArray(local_attributes) + success = eglChooseConfig(egl_display, local_attributes, configs, 1, num_configs) + logging.error('init_renderer_egl: eglChooseConfig success, num_configs: %d, %d', success, num_configs.value) + egl_config = configs[0] + + + context_attributes = [EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE] + context_attributes = arrays.GLintArray.asArray(context_attributes) + egl_context = eglCreateContext(egl_display, egl_config, EGL_NO_CONTEXT, context_attributes) + + buffer_attributes = [EGL_WIDTH, width, EGL_HEIGHT, height, EGL_NONE] + buffer_attributes = arrays.GLintArray.asArray(buffer_attributes) + egl_surface = eglCreatePbufferSurface(egl_display, egl_config, buffer_attributes) + + + eglMakeCurrent(egl_display, egl_surface, egl_surface, egl_context) + logging.error("init_renderer_egl: egl_display: %s egl_surface: %s, egl_config: %s", egl_display, egl_surface, egl_context) + + glViewport(0, 0, width, height); + + self.egl_display = egl_display + self.egl_surface = egl_surface + self.egl_config = egl_config + self.egl_mapping = {} + self.render_timer = None + self.load_timer = None + self.height = height + self.width = width + + def create_shaders(self, v_shader_file, f_shader_file): + v_shader = glCreateShader(GL_VERTEX_SHADER) + with open(v_shader_file, 'r') as f: + ls = '' + for l in f: + ls = ls + l + glShaderSource(v_shader, ls) + glCompileShader(v_shader); + assert(glGetShaderiv(v_shader, GL_COMPILE_STATUS) == 1) + + f_shader = glCreateShader(GL_FRAGMENT_SHADER) + with open(f_shader_file, 'r') as f: + ls = '' + for l in f: + ls = ls + l + glShaderSource(f_shader, ls) + glCompileShader(f_shader); + assert(glGetShaderiv(f_shader, GL_COMPILE_STATUS) == 1) + + egl_program = glCreateProgram(); + assert(egl_program) + glAttachShader(egl_program, v_shader) + glAttachShader(egl_program, f_shader) + glLinkProgram(egl_program); + assert(glGetProgramiv(egl_program, GL_LINK_STATUS) == 1) + glUseProgram(egl_program) + + glBindAttribLocation(egl_program, 0, "aPosition") + glBindAttribLocation(egl_program, 1, "aColor") + glBindAttribLocation(egl_program, 2, "aTextureCoord") + + self.egl_program = egl_program + self.egl_mapping['vertexs'] = 0 + self.egl_mapping['vertexs_color'] = 1 + self.egl_mapping['vertexs_tc'] = 2 + + glClearColor(0.0, 0.0, 0.0, 1.0); + # glEnable(GL_CULL_FACE); glCullFace(GL_BACK); + glEnable(GL_DEPTH_TEST); + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + + def set_camera(self, fov_vertical, z_near, z_far, aspect): + width = 2*np.tan(np.deg2rad(fov_vertical)/2.0)*z_near*aspect; + height = 2*np.tan(np.deg2rad(fov_vertical)/2.0)*z_near; + egl_program = self.egl_program + c = np.eye(4, dtype=np.float32) + c[3,3] = 0 + c[3,2] = -1 + c[2,2] = -(z_near+z_far)/(z_far-z_near) + c[2,3] = -2.0*(z_near*z_far)/(z_far-z_near) + c[0,0] = 2.0*z_near/width + c[1,1] = 2.0*z_near/height + c = c.T + + projection_matrix_o = glGetUniformLocation(egl_program, 'uProjectionMatrix') + projection_matrix = np.eye(4, dtype=np.float32) + projection_matrix[...] = c + projection_matrix = np.reshape(projection_matrix, (-1)) + glUniformMatrix4fv(projection_matrix_o, 1, GL_FALSE, projection_matrix) + + + def load_default_object(self): + v = np.array([[0.0, 0.5, 0.0, 1.0, 1.0, 0.0, 1.0], + [-0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 1.0], + [0.5, -0.5, 0.0, 1.0, 1.0, 1.0, 1.0]], dtype=np.float32) + v = np.concatenate((v,v+0.1), axis=0) + v = np.ascontiguousarray(v, dtype=np.float32) + + vbo = glGenBuffers(1) + glBindBuffer (GL_ARRAY_BUFFER, vbo) + glBufferData (GL_ARRAY_BUFFER, v.dtype.itemsize*v.size, v, GL_STATIC_DRAW) + glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 28, ctypes.c_void_p(0)) + glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 28, ctypes.c_void_p(12)) + glEnableVertexAttribArray(0); + glEnableVertexAttribArray(1); + + self.num_to_render = 6; + + def _actual_render(self): + for entity_id, entity in self.entities.iteritems(): + if entity['visible']: + vbo = entity['vbo'] + tbo = entity['tbo'] + num = entity['num'] + + glBindBuffer(GL_ARRAY_BUFFER, vbo) + glVertexAttribPointer(self.egl_mapping['vertexs'], 3, GL_FLOAT, GL_FALSE, + 20, ctypes.c_void_p(0)) + glVertexAttribPointer(self.egl_mapping['vertexs_tc'], 2, GL_FLOAT, + GL_FALSE, 20, ctypes.c_void_p(12)) + glEnableVertexAttribArray(self.egl_mapping['vertexs']); + glEnableVertexAttribArray(self.egl_mapping['vertexs_tc']); + + glBindTexture(GL_TEXTURE_2D, tbo) + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glDrawArrays(GL_TRIANGLES, 0, num) + + def render(self, take_screenshot=False, output_type=0): + # self.render_timer.tic() + self._actual_render() + # self.render_timer.toc(log_at=1000, log_str='render timer', type='time') + + np_rgb_img = None + np_d_img = None + c = 1000. + if take_screenshot: + if self.modality == 'rgb': + screenshot_rgba = np.zeros((self.height, self.width, 4), dtype=np.uint8) + glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE, screenshot_rgba) + np_rgb_img = screenshot_rgba[::-1,:,:3]; + + if self.modality == 'depth': + screenshot_d = np.zeros((self.height, self.width, 4), dtype=np.uint8) + glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE, screenshot_d) + np_d_img = screenshot_d[::-1,:,:3]; + np_d_img = np_d_img[:,:,2]*(255.*255./c) + np_d_img[:,:,1]*(255./c) + np_d_img[:,:,0]*(1./c) + np_d_img = np_d_img.astype(np.float32) + np_d_img[np_d_img == 0] = np.NaN + np_d_img = np_d_img[:,:,np.newaxis] + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + return np_rgb_img, np_d_img + + def _load_mesh_into_gl(self, mesh, material): + vvt = np.concatenate((mesh.vertices, mesh.texturecoords[0,:,:2]), axis=1) + vvt = np.ascontiguousarray(vvt[mesh.faces.reshape((-1)),:], dtype=np.float32) + num = vvt.shape[0] + vvt = np.reshape(vvt, (-1)) + + vbo = glGenBuffers(1) + glBindBuffer(GL_ARRAY_BUFFER, vbo) + glBufferData(GL_ARRAY_BUFFER, vvt.dtype.itemsize*vvt.size, vvt, GL_STATIC_DRAW) + + tbo = glGenTextures(1) + glBindTexture(GL_TEXTURE_2D, tbo) + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, material.shape[1], + material.shape[0], 0, GL_RGB, GL_UNSIGNED_BYTE, + np.reshape(material, (-1))) + return num, vbo, tbo + + def load_shapes(self, shapes): + entities = self.entities + entity_ids = [] + for i, shape in enumerate(shapes): + for j in range(len(shape.meshes)): + name = shape.meshes[j].name + assert name not in entities, '{:s} entity already exists.'.format(name) + num, vbo, tbo = self._load_mesh_into_gl(shape.meshes[j], shape.materials[j]) + entities[name] = {'num': num, 'vbo': vbo, 'tbo': tbo, 'visible': False} + entity_ids.append(name) + return entity_ids + + def set_entity_visible(self, entity_ids, visibility): + for entity_id in entity_ids: + self.entities[entity_id]['visible'] = visibility + + def position_camera(self, camera_xyz, lookat_xyz, up): + camera_xyz = np.array(camera_xyz) + lookat_xyz = np.array(lookat_xyz) + up = np.array(up) + lookat_to = lookat_xyz - camera_xyz + lookat_from = np.array([0, 1., 0.]) + up_from = np.array([0, 0., 1.]) + up_to = up * 1. + # np.set_printoptions(precision=2, suppress=True) + # print up_from, lookat_from, up_to, lookat_to + r = ru.rotate_camera_to_point_at(up_from, lookat_from, up_to, lookat_to) + R = np.eye(4, dtype=np.float32) + R[:3,:3] = r + + t = np.eye(4, dtype=np.float32) + t[:3,3] = -camera_xyz + + view_matrix = np.dot(R.T, t) + flip_yz = np.eye(4, dtype=np.float32) + flip_yz[1,1] = 0; flip_yz[2,2] = 0; flip_yz[1,2] = 1; flip_yz[2,1] = -1; + view_matrix = np.dot(flip_yz, view_matrix) + view_matrix = view_matrix.T + # print np.concatenate((R, t, view_matrix), axis=1) + view_matrix = np.reshape(view_matrix, (-1)) + view_matrix_o = glGetUniformLocation(self.egl_program, 'uViewMatrix') + glUniformMatrix4fv(view_matrix_o, 1, GL_FALSE, view_matrix) + return None, None #camera_xyz, q + + def clear_scene(self): + keys = self.entities.keys() + for entity_id in keys: + entity = self.entities.pop(entity_id, None) + vbo = entity['vbo'] + tbo = entity['tbo'] + num = entity['num'] + glDeleteBuffers(1, [vbo]) + glDeleteTextures(1, [tbo]) + + def __del__(self): + self.clear_scene() + eglMakeCurrent(self.egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT) + eglDestroySurface(self.egl_display, self.egl_surface) + eglTerminate(self.egl_display) diff --git a/models/research/cognitive_mapping_and_planning/requirements.txt b/models/research/cognitive_mapping_and_planning/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..306c807a6c9fd9404afa1c05108e5e835e84edc6 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/requirements.txt @@ -0,0 +1,9 @@ +numpy +pillow +PyOpenGL +PyOpenGL-accelerate +six +networkx +scikit-image +scipy +opencv-python diff --git a/models/research/cognitive_mapping_and_planning/scripts/__init__.py b/models/research/cognitive_mapping_and_planning/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_distill.py b/models/research/cognitive_mapping_and_planning/scripts/script_distill.py new file mode 100644 index 0000000000000000000000000000000000000000..010c690412ed28011146ab44109dc099d02324e7 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_distill.py @@ -0,0 +1,177 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r""" Script to setup the grid moving agent. + +blaze build --define=ION_GFX_OGLES20=1 -c opt --copt=-mavx --config=cuda_clang \ + learning/brain/public/tensorflow_std_server{,_gpu} \ + experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill.par \ + experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill + + +./blaze-bin/experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill \ + --logdir=/cns/iq-d/home/saurabhgupta/output/stanford-distill/local/v0/ \ + --config_name 'v0+train' --gfs_user robot-intelligence-gpu + +""" +import sys, os, numpy as np +import copy +import argparse, pprint +import time +import cProfile + + +import tensorflow as tf +from tensorflow.contrib import slim +from tensorflow.python.framework import ops +from tensorflow.contrib.framework.python.ops import variables + +import logging +from tensorflow.python.platform import gfile +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +from cfgs import config_distill +from tfcode import tf_utils +import src.utils as utils +import src.file_utils as fu +import tfcode.distillation as distill +import datasets.nav_env as nav_env + +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', 'local', + 'The name of the TensorFlow master to use.') +flags.DEFINE_integer('ps_tasks', 0, 'The number of parameter servers. If the ' + 'value is 0, then the parameters are handled locally by ' + 'the worker.') +flags.DEFINE_integer('task', 0, 'The Task ID. This value is used when training ' + 'with multiple workers to identify each worker.') + +flags.DEFINE_integer('num_workers', 1, '') + +flags.DEFINE_string('config_name', '', '') + +flags.DEFINE_string('logdir', '', '') + +def main(_): + args = config_distill.get_args_for_config(FLAGS.config_name) + args.logdir = FLAGS.logdir + args.solver.num_workers = FLAGS.num_workers + args.solver.task = FLAGS.task + args.solver.ps_tasks = FLAGS.ps_tasks + args.solver.master = FLAGS.master + + args.buildinger.env_class = nav_env.MeshMapper + fu.makedirs(args.logdir) + args.buildinger.logdir = args.logdir + R = nav_env.get_multiplexor_class(args.buildinger, args.solver.task) + + if False: + pr = cProfile.Profile() + pr.enable() + rng = np.random.RandomState(0) + for i in range(1): + b, instances_perturbs = R.sample_building(rng) + inputs = b.worker(*(instances_perturbs)) + for j in range(inputs['imgs'].shape[0]): + p = os.path.join('tmp', '{:d}.png'.format(j)) + img = inputs['imgs'][j,0,:,:,:3]*1 + img = (img).astype(np.uint8) + fu.write_image(p, img) + print(inputs['imgs'].shape) + inputs = R.pre(inputs) + pr.disable() + pr.print_stats(2) + + if args.control.train: + if not gfile.Exists(args.logdir): + gfile.MakeDirs(args.logdir) + + m = utils.Foo() + m.tf_graph = tf.Graph() + + config = tf.ConfigProto() + config.device_count['GPU'] = 1 + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction = 0.8 + + with m.tf_graph.as_default(): + with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks)): + m = distill.setup_to_run(m, args, is_training=True, + batch_norm_is_training=True) + + train_step_kwargs = distill.setup_train_step_kwargs_mesh( + m, R, os.path.join(args.logdir, 'train'), + rng_seed=args.solver.task, is_chief=args.solver.task==0, iters=1, + train_display_interval=args.summary.display_interval) + + final_loss = slim.learning.train( + train_op=m.train_op, + logdir=args.logdir, + master=args.solver.master, + is_chief=args.solver.task == 0, + number_of_steps=args.solver.max_steps, + train_step_fn=tf_utils.train_step_custom, + train_step_kwargs=train_step_kwargs, + global_step=m.global_step_op, + init_op=m.init_op, + init_fn=m.init_fn, + sync_optimizer=m.sync_optimizer, + saver=m.saver_op, + summary_op=None, session_config=config) + + if args.control.test: + m = utils.Foo() + m.tf_graph = tf.Graph() + checkpoint_dir = os.path.join(format(args.logdir)) + with m.tf_graph.as_default(): + m = distill.setup_to_run(m, args, is_training=False, + batch_norm_is_training=args.control.force_batchnorm_is_training_at_test) + + train_step_kwargs = distill.setup_train_step_kwargs_mesh( + m, R, os.path.join(args.logdir, args.control.test_name), + rng_seed=args.solver.task+1, is_chief=args.solver.task==0, + iters=args.summary.test_iters, train_display_interval=None) + + sv = slim.learning.supervisor.Supervisor( + graph=ops.get_default_graph(), logdir=None, init_op=m.init_op, + summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op) + + last_checkpoint = None + while True: + last_checkpoint = slim.evaluation.wait_for_new_checkpoint(checkpoint_dir, last_checkpoint) + checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) + start = time.time() + logging.info('Starting evaluation at %s using checkpoint %s.', + time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), + last_checkpoint) + + config = tf.ConfigProto() + config.device_count['GPU'] = 1 + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction = 0.8 + + with sv.managed_session(args.solver.master,config=config, + start_standard_services=False) as sess: + sess.run(m.init_op) + sv.saver.restore(sess, last_checkpoint) + sv.start_queue_runners(sess) + vals, _ = tf_utils.train_step_custom( + sess, None, m.global_step_op, train_step_kwargs, mode='val') + if checkpoint_iter >= args.solver.max_steps: + break + +if __name__ == '__main__': + app.run() diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh b/models/research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh new file mode 100644 index 0000000000000000000000000000000000000000..1900bd0b03566d29dac8a8de5f4fce623be98a92 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh @@ -0,0 +1,18 @@ +# Script to download models to initialize the RGB and D models for training.We +# use ResNet-v2-50 for both modalities. + +mkdir -p data/init_models +cd data/init_models + +# RGB Models are initialized by pre-training on ImageNet. +mkdir -p resnet_v2_50 +RGB_URL="http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz" +wget $RGB_URL +tar -xf resnet_v2_50_2017_04_14.tar.gz -C resnet_v2_50 + +# Depth models are initialized by distilling the RGB model to D images using +# Cross-Modal Distillation (https://arxiv.org/abs/1507.00448). +mkdir -p distill_rgb_to_d_resnet_v2_50 +D_URL="http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/distill_rgb_to_d_resnet_v2_50.tar" +wget $D_URL +tar -xf distill_rgb_to_d_resnet_v2_50.tar -C distill_rgb_to_d_resnet_v2_50 diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_env_vis.py b/models/research/cognitive_mapping_and_planning/scripts/script_env_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..3690ff484fea9344db6fbe20ac54731200f0c84e --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_env_vis.py @@ -0,0 +1,186 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A simple python function to walk in the enviornments that we have created. +PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_env_vis.py \ + --dataset_name sbpd --building_name area3 +""" +import sys +import numpy as np +import matplotlib +matplotlib.use('TkAgg') +from PIL import ImageTk, Image +import Tkinter as tk +import logging +from tensorflow.python.platform import app +from tensorflow.python.platform import flags + +import datasets.nav_env_config as nec +import datasets.nav_env as nav_env +import cv2 +from datasets import factory +import render.swiftshader_renderer as renderer + +SwiftshaderRenderer = renderer.SwiftshaderRenderer +VisualNavigationEnv = nav_env.VisualNavigationEnv + +FLAGS = flags.FLAGS +flags.DEFINE_string('dataset_name', 'sbpd', 'Name of the dataset.') +flags.DEFINE_float('fov', 60., 'Field of view') +flags.DEFINE_integer('image_size', 512, 'Size of the image.') +flags.DEFINE_string('building_name', '', 'Name of the building.') + +def get_args(): + navtask = nec.nav_env_base_config() + navtask.task_params.type = 'rng_rejection_sampling_many' + navtask.task_params.rejection_sampling_M = 2000 + navtask.task_params.min_dist = 10 + sz = FLAGS.image_size + navtask.camera_param.fov = FLAGS.fov + navtask.camera_param.height = sz + navtask.camera_param.width = sz + navtask.task_params.img_height = sz + navtask.task_params.img_width = sz + + # navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table'] + # navtask.task_params.type = 'to_nearest_obj_acc' + + logging.info('navtask: %s', navtask) + return navtask + +def load_building(dataset_name, building_name): + dataset = factory.get_dataset(dataset_name) + + navtask = get_args() + cp = navtask.camera_param + rgb_shader, d_shader = renderer.get_shaders(cp.modalities) + r_obj = SwiftshaderRenderer() + r_obj.init_display(width=cp.width, height=cp.height, + fov=cp.fov, z_near=cp.z_near, z_far=cp.z_far, + rgb_shader=rgb_shader, d_shader=d_shader) + r_obj.clear_scene() + b = VisualNavigationEnv(robot=navtask.robot, env=navtask.env, + task_params=navtask.task_params, + building_name=building_name, flip=False, + logdir=None, building_loader=dataset, + r_obj=r_obj) + b.load_building_into_scene() + b.set_building_visibility(False) + return b + +def walk_through(b): + # init agent at a random location in the environment. + init_env_state = b.reset([np.random.RandomState(0), np.random.RandomState(0)]) + + global current_node + rng = np.random.RandomState(0) + current_node = rng.choice(b.task.nodes.shape[0]) + + root = tk.Tk() + image = b.render_nodes(b.task.nodes[[current_node],:])[0] + print(image.shape) + image = image.astype(np.uint8) + im = Image.fromarray(image) + im = ImageTk.PhotoImage(im) + panel = tk.Label(root, image=im) + + map_size = b.traversible.shape + sc = np.max(map_size)/256. + loc = np.array([[map_size[1]/2., map_size[0]/2.]]) + x_axis = np.zeros_like(loc); x_axis[:,1] = sc + y_axis = np.zeros_like(loc); y_axis[:,0] = -sc + cum_fs, cum_valid = nav_env.get_map_to_predict(loc, x_axis, y_axis, + map=b.traversible*1., + map_size=256) + cum_fs = cum_fs[0] + cum_fs = cv2.applyColorMap((cum_fs*255).astype(np.uint8), cv2.COLORMAP_JET) + im = Image.fromarray(cum_fs) + im = ImageTk.PhotoImage(im) + panel_overhead = tk.Label(root, image=im) + + def refresh(): + global current_node + image = b.render_nodes(b.task.nodes[[current_node],:])[0] + image = image.astype(np.uint8) + im = Image.fromarray(image) + im = ImageTk.PhotoImage(im) + panel.configure(image=im) + panel.image = im + + def left_key(event): + global current_node + current_node = b.take_action([current_node], [2], 1)[0][0] + refresh() + + def up_key(event): + global current_node + current_node = b.take_action([current_node], [3], 1)[0][0] + refresh() + + def right_key(event): + global current_node + current_node = b.take_action([current_node], [1], 1)[0][0] + refresh() + + def quit(event): + root.destroy() + + panel_overhead.grid(row=4, column=5, rowspan=1, columnspan=1, + sticky=tk.W+tk.E+tk.N+tk.S) + panel.bind('', left_key) + panel.bind('', up_key) + panel.bind('', right_key) + panel.bind('q', quit) + panel.focus_set() + panel.grid(row=0, column=0, rowspan=5, columnspan=5, + sticky=tk.W+tk.E+tk.N+tk.S) + root.mainloop() + +def simple_window(): + root = tk.Tk() + + image = np.zeros((128, 128, 3), dtype=np.uint8) + image[32:96, 32:96, 0] = 255 + im = Image.fromarray(image) + im = ImageTk.PhotoImage(im) + + image = np.zeros((128, 128, 3), dtype=np.uint8) + image[32:96, 32:96, 1] = 255 + im2 = Image.fromarray(image) + im2 = ImageTk.PhotoImage(im2) + + panel = tk.Label(root, image=im) + + def left_key(event): + panel.configure(image=im2) + panel.image = im2 + + def quit(event): + sys.exit() + + panel.bind('', left_key) + panel.bind('', left_key) + panel.bind('', left_key) + panel.bind('q', quit) + panel.focus_set() + panel.pack(side = "bottom", fill = "both", expand = "yes") + root.mainloop() + +def main(_): + b = load_building(FLAGS.dataset_name, FLAGS.building_name) + walk_through(b) + +if __name__ == '__main__': + app.run() diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py b/models/research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py new file mode 100644 index 0000000000000000000000000000000000000000..dab2819a6fcf100cb2e385e45b7aa694c4c5f033 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py @@ -0,0 +1,253 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r""" Script to train and test the grid navigation agent. +Usage: + 1. Testing a model. + CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \ + PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \ + --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+bench_test \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r + + 2. Training a model (locally). + CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \ + PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \ + --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+train_train \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_ + + 3. Training a model (distributed). + # See https://www.tensorflow.org/deploy/distributed on how to setup distributed + # training. + CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \ + PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \ + --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+train_train \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_ \ + --ps_tasks $num_ps --master $master_name --task $worker_id +""" + +import sys, os, numpy as np +import copy +import argparse, pprint +import time +import cProfile +import platform + + +import tensorflow as tf +from tensorflow.contrib import slim +from tensorflow.python.framework import ops +from tensorflow.contrib.framework.python.ops import variables + +import logging +from tensorflow.python.platform import gfile +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +from cfgs import config_cmp +from cfgs import config_vision_baseline +import datasets.nav_env as nav_env +import src.file_utils as fu +import src.utils as utils +import tfcode.cmp as cmp +from tfcode import tf_utils +from tfcode import vision_baseline_lstm + +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', + 'The address of the tensorflow master') +flags.DEFINE_integer('ps_tasks', 0, 'The number of parameter servers. If the ' + 'value is 0, then the parameters are handled locally by ' + 'the worker.') +flags.DEFINE_integer('task', 0, 'The Task ID. This value is used when training ' + 'with multiple workers to identify each worker.') + +flags.DEFINE_integer('num_workers', 1, '') + +flags.DEFINE_string('config_name', '', '') + +flags.DEFINE_string('logdir', '', '') + +flags.DEFINE_integer('solver_seed', 0, '') + +flags.DEFINE_integer('delay_start_iters', 20, '') + +logging.basicConfig(level=logging.INFO) + +def main(_): + _launcher(FLAGS.config_name, FLAGS.logdir) + +def _launcher(config_name, logdir): + args = _setup_args(config_name, logdir) + + fu.makedirs(args.logdir) + + if args.control.train: + _train(args) + + if args.control.test: + _test(args) + +def get_args_for_config(config_name): + configs = config_name.split('.') + type = configs[0] + config_name = '.'.join(configs[1:]) + if type == 'cmp': + args = config_cmp.get_args_for_config(config_name) + args.setup_to_run = cmp.setup_to_run + args.setup_train_step_kwargs = cmp.setup_train_step_kwargs + + elif type == 'bl': + args = config_vision_baseline.get_args_for_config(config_name) + args.setup_to_run = vision_baseline_lstm.setup_to_run + args.setup_train_step_kwargs = vision_baseline_lstm.setup_train_step_kwargs + + else: + logging.fatal('Unknown type: {:s}'.format(type)) + return args + +def _setup_args(config_name, logdir): + args = get_args_for_config(config_name) + args.solver.num_workers = FLAGS.num_workers + args.solver.task = FLAGS.task + args.solver.ps_tasks = FLAGS.ps_tasks + args.solver.master = FLAGS.master + args.solver.seed = FLAGS.solver_seed + args.logdir = logdir + args.navtask.logdir = None + return args + +def _train(args): + container_name = "" + + R = lambda: nav_env.get_multiplexer_class(args.navtask, args.solver.task) + m = utils.Foo() + m.tf_graph = tf.Graph() + + config = tf.ConfigProto() + config.device_count['GPU'] = 1 + + with m.tf_graph.as_default(): + with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks, + merge_devices=True)): + with tf.container(container_name): + m = args.setup_to_run(m, args, is_training=True, + batch_norm_is_training=True, summary_mode='train') + + train_step_kwargs = args.setup_train_step_kwargs( + m, R(), os.path.join(args.logdir, 'train'), rng_seed=args.solver.task, + is_chief=args.solver.task==0, + num_steps=args.navtask.task_params.num_steps*args.navtask.task_params.num_goals, iters=1, + train_display_interval=args.summary.display_interval, + dagger_sample_bn_false=args.arch.dagger_sample_bn_false) + + delay_start = (args.solver.task*(args.solver.task+1))/2 * FLAGS.delay_start_iters + logging.error('delaying start for task %d by %d steps.', + args.solver.task, delay_start) + + additional_args = {} + final_loss = slim.learning.train( + train_op=m.train_op, + logdir=args.logdir, + master=args.solver.master, + is_chief=args.solver.task == 0, + number_of_steps=args.solver.max_steps, + train_step_fn=tf_utils.train_step_custom_online_sampling, + train_step_kwargs=train_step_kwargs, + global_step=m.global_step_op, + init_op=m.init_op, + init_fn=m.init_fn, + sync_optimizer=m.sync_optimizer, + saver=m.saver_op, + startup_delay_steps=delay_start, + summary_op=None, session_config=config, **additional_args) + +def _test(args): + args.solver.master = '' + container_name = "" + checkpoint_dir = os.path.join(format(args.logdir)) + logging.error('Checkpoint_dir: %s', args.logdir) + + config = tf.ConfigProto(); + config.device_count['GPU'] = 1; + + m = utils.Foo() + m.tf_graph = tf.Graph() + + rng_data_seed = 0; rng_action_seed = 0; + R = lambda: nav_env.get_multiplexer_class(args.navtask, rng_data_seed) + with m.tf_graph.as_default(): + with tf.container(container_name): + m = args.setup_to_run( + m, args, is_training=False, + batch_norm_is_training=args.control.force_batchnorm_is_training_at_test, + summary_mode=args.control.test_mode) + train_step_kwargs = args.setup_train_step_kwargs( + m, R(), os.path.join(args.logdir, args.control.test_name), + rng_seed=rng_data_seed, is_chief=True, + num_steps=args.navtask.task_params.num_steps*args.navtask.task_params.num_goals, + iters=args.summary.test_iters, train_display_interval=None, + dagger_sample_bn_false=args.arch.dagger_sample_bn_false) + + saver = slim.learning.tf_saver.Saver(variables.get_variables_to_restore()) + + sv = slim.learning.supervisor.Supervisor( + graph=ops.get_default_graph(), logdir=None, init_op=m.init_op, + summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op) + + last_checkpoint = None + reported = False + while True: + last_checkpoint_ = None + while last_checkpoint_ is None: + last_checkpoint_ = slim.evaluation.wait_for_new_checkpoint( + checkpoint_dir, last_checkpoint, seconds_to_sleep=10, timeout=60) + if last_checkpoint_ is None: break + + last_checkpoint = last_checkpoint_ + checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) + + logging.info('Starting evaluation at %s using checkpoint %s.', + time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), + last_checkpoint) + + if (args.control.only_eval_when_done == False or + checkpoint_iter >= args.solver.max_steps): + start = time.time() + logging.info('Starting evaluation at %s using checkpoint %s.', + time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), + last_checkpoint) + + with sv.managed_session(args.solver.master, config=config, + start_standard_services=False) as sess: + sess.run(m.init_op) + sv.saver.restore(sess, last_checkpoint) + sv.start_queue_runners(sess) + if args.control.reset_rng_seed: + train_step_kwargs['rng_data'] = [np.random.RandomState(rng_data_seed), + np.random.RandomState(rng_data_seed)] + train_step_kwargs['rng_action'] = np.random.RandomState(rng_action_seed) + vals, _ = tf_utils.train_step_custom_online_sampling( + sess, None, m.global_step_op, train_step_kwargs, + mode=args.control.test_mode) + should_stop = False + + if checkpoint_iter >= args.solver.max_steps: + should_stop = True + + if should_stop: + break + +if __name__ == '__main__': + app.run() diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py b/models/research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py new file mode 100644 index 0000000000000000000000000000000000000000..08273a83b512fa3100f7df6e20d41d666b037aad --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py @@ -0,0 +1,339 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r""" +Code for plotting trajectories in the top view, and also plot first person views +from saved trajectories. Does not run the network but only loads the mesh data +to plot the view points. + CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 + PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \ + --first_person --num_steps 40 \ + --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \ + --imset test --alsologtostderr --base_dir output --out_dir vis + +""" +import os, sys, numpy as np, copy +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import matplotlib.animation as animation +from matplotlib.gridspec import GridSpec + +import tensorflow as tf +from tensorflow.contrib import slim +import cv2 +import logging +from tensorflow.python.platform import gfile +from tensorflow.python.platform import app +from tensorflow.python.platform import flags + +from datasets import nav_env +import scripts.script_nav_agent_release as sna +import src.file_utils as fu +from src import graph_utils +from src import utils +FLAGS = flags.FLAGS + +flags.DEFINE_string('out_dir', 'vis', 'Directory where to store the output') +flags.DEFINE_string('type', '', 'Optional type.') +flags.DEFINE_bool('first_person', False, 'Visualize the first person view.') +flags.DEFINE_bool('top_view', False, 'Visualize the trajectory in the top view.') +flags.DEFINE_integer('num_steps', 40, 'Number of steps to run the model for.') +flags.DEFINE_string('imset', 'test', '') +flags.DEFINE_string('base_dir', 'output', 'Cache directory.') + +def _get_suffix_str(): + return '' + + +def _load_trajectory(): + base_dir = FLAGS.base_dir + config_name = FLAGS.config_name+_get_suffix_str() + + dir_name = os.path.join(base_dir, FLAGS.type, config_name) + logging.info('Waiting for snapshot in directory %s.', dir_name) + last_checkpoint = slim.evaluation.wait_for_new_checkpoint(dir_name, None) + checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) + + # Load the distances. + a = utils.load_variables(os.path.join(dir_name, 'bench_on_'+FLAGS.imset, + 'all_locs_at_t_{:d}.pkl'.format(checkpoint_iter))) + return a + +def _compute_hardness(): + # Load the stanford data to compute the hardness. + if FLAGS.type == '': + args = sna.get_args_for_config(FLAGS.config_name+'+bench_'+FLAGS.imset) + else: + args = sna.get_args_for_config(FLAGS.type+'.'+FLAGS.config_name+'+bench_'+FLAGS.imset) + + args.navtask.logdir = None + R = lambda: nav_env.get_multiplexer_class(args.navtask, 0) + R = R() + + rng_data = [np.random.RandomState(0), np.random.RandomState(0)] + + # Sample a room. + h_dists = [] + gt_dists = [] + for i in range(250): + e = R.sample_env(rng_data) + nodes = e.task.nodes + + # Initialize the agent. + init_env_state = e.reset(rng_data) + + gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s] + for j, s in enumerate(e.episode.start_node_ids)] + + for j in range(args.navtask.task_params.batch_size): + start_node_id = e.episode.start_node_ids[j] + end_node_id =e.episode.goal_node_ids[0][j] + h_dist = graph_utils.heuristic_fn_vec( + nodes[[start_node_id],:], nodes[[end_node_id], :], + n_ori=args.navtask.task_params.n_ori, + step_size=args.navtask.task_params.step_size)[0][0] + gt_dist = e.episode.dist_to_goal[0][j][start_node_id] + h_dists.append(h_dist) + gt_dists.append(gt_dist) + + h_dists = np.array(h_dists) + gt_dists = np.array(gt_dists) + e = R.sample_env([np.random.RandomState(0), np.random.RandomState(0)]) + input = e.get_common_data() + orig_maps = input['orig_maps'][0,0,:,:,0] + return h_dists, gt_dists, orig_maps + +def plot_trajectory_first_person(dt, orig_maps, out_dir): + out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), + FLAGS.imset) + fu.makedirs(out_dir) + + # Load the model so that we can render. + plt.set_cmap('gray') + samples_per_action = 8; wait_at_action = 0; + + Writer = animation.writers['mencoder'] + writer = Writer(fps=3*(samples_per_action+wait_at_action), + metadata=dict(artist='anonymous'), bitrate=1800) + + args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset) + args.navtask.logdir = None + navtask_ = copy.deepcopy(args.navtask) + navtask_.camera_param.modalities = ['rgb'] + navtask_.task_params.modalities = ['rgb'] + sz = 512 + navtask_.camera_param.height = sz + navtask_.camera_param.width = sz + navtask_.task_params.img_height = sz + navtask_.task_params.img_width = sz + R = lambda: nav_env.get_multiplexer_class(navtask_, 0) + R = R() + b = R.buildings[0] + + f = [0 for _ in range(wait_at_action)] + \ + [float(_)/samples_per_action for _ in range(samples_per_action)]; + + # Generate things for it to render. + inds_to_do = [] + inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390] + + for i in inds_to_do: + fig = plt.figure(figsize=(10,8)) + gs = GridSpec(3,4) + gs.update(wspace=0.05, hspace=0.05, left=0.0, top=0.97, right=1.0, bottom=0.) + ax = fig.add_subplot(gs[:,:-1]) + ax1 = fig.add_subplot(gs[0,-1]) + ax2 = fig.add_subplot(gs[1,-1]) + ax3 = fig.add_subplot(gs[2,-1]) + axes = [ax, ax1, ax2, ax3] + # ax = fig.add_subplot(gs[:,:]) + # axes = [ax] + for ax in axes: + ax.set_axis_off() + + node_ids = dt['all_node_ids'][i, :, 0]*1 + # Prune so that last node is not repeated more than 3 times? + if np.all(node_ids[-4:] == node_ids[-1]): + while node_ids[-4] == node_ids[-1]: + node_ids = node_ids[:-1] + num_steps = np.minimum(FLAGS.num_steps, len(node_ids)) + + xyt = b.to_actual_xyt_vec(b.task.nodes[node_ids]) + xyt_diff = xyt[1:,:] - xyt[:-1:,:] + xyt_diff[:,2] = np.mod(xyt_diff[:,2], 4) + ind = np.where(xyt_diff[:,2] == 3)[0] + xyt_diff[ind, 2] = -1 + xyt_diff = np.expand_dims(xyt_diff, axis=1) + to_cat = [xyt_diff*_ for _ in f] + perturbs_all = np.concatenate(to_cat, axis=1) + perturbs_all = np.concatenate([perturbs_all, np.zeros_like(perturbs_all[:,:,:1])], axis=2) + node_ids_all = np.expand_dims(node_ids, axis=1)*1 + node_ids_all = np.concatenate([node_ids_all for _ in f], axis=1) + node_ids_all = np.reshape(node_ids_all[:-1,:], -1) + perturbs_all = np.reshape(perturbs_all, [-1, 4]) + imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all) + + # Get action at each node. + actions = [] + _, action_to_nodes = b.get_feasible_actions(node_ids) + for j in range(num_steps-1): + action_to_node = action_to_nodes[j] + node_to_action = dict(zip(action_to_node.values(), action_to_node.keys())) + actions.append(node_to_action[node_ids[j+1]]) + + def init_fn(): + return fig, + gt_dist_to_goal = [] + + # Render trajectories. + def worker(j): + # Plot the image. + step_number = j/(samples_per_action + wait_at_action) + img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off(); + img = img.astype(np.uint8); ax.imshow(img); + tt = ax.set_title( + "First Person View\n" + + "Top corners show diagnostics (distance, agents' action) not input to agent.", + fontsize=12) + plt.setp(tt, color='white') + + # Distance to goal. + t = 'Dist to Goal:\n{:2d} steps'.format(int(dt['all_d_at_t'][i, step_number])) + t = ax.text(0.01, 0.99, t, + horizontalalignment='left', + verticalalignment='top', + fontsize=20, color='red', + transform=ax.transAxes, alpha=1.0) + t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1)) + + # Action to take. + action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', r'$\Uparrow$ '] + t = ax.text(0.99, 0.99, action_latex[actions[step_number]], + horizontalalignment='right', + verticalalignment='top', + fontsize=40, color='green', + transform=ax.transAxes, alpha=1.0) + t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1)) + + + # Plot the map top view. + ax = axes[-1] + if j == 0: + # Plot the map + locs = dt['all_locs'][i,:num_steps,:] + goal_loc = dt['all_goal_locs'][i,:,:] + xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) + xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) + xy1 = (xymax+xymin)/2. - 0.7*np.maximum(np.max(xymax-xymin), 24) + xy2 = (xymax+xymin)/2. + 0.7*np.maximum(np.max(xymax-xymin), 24) + + ax.set_axis_on() + ax.patch.set_facecolor((0.333, 0.333, 0.333)) + ax.set_xticks([]); ax.set_yticks([]); + ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0) + ax.plot(goal_loc[:,0], goal_loc[:,1], 'g*', markersize=12) + + locs = dt['all_locs'][i,:1,:] + ax.plot(locs[:,0], locs[:,1], 'b.', markersize=12) + + ax.set_xlim([xy1[0], xy2[0]]) + ax.set_ylim([xy1[1], xy2[1]]) + + locs = dt['all_locs'][i,step_number,:] + locs = np.expand_dims(locs, axis=0) + ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4) + tt = ax.set_title('Trajectory in topview', fontsize=14) + plt.setp(tt, color='white') + return fig, + + line_ani = animation.FuncAnimation(fig, worker, + (num_steps-1)*(wait_at_action+samples_per_action), + interval=500, blit=True, init_func=init_fn) + tmp_file_name = 'tmp.mp4' + line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'}) + out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i)) + print(out_file_name) + + if fu.exists(out_file_name): + gfile.Remove(out_file_name) + gfile.Copy(tmp_file_name, out_file_name) + gfile.Remove(tmp_file_name) + plt.close(fig) + +def plot_trajectory(dt, hardness, orig_maps, out_dir): + out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), + FLAGS.imset) + fu.makedirs(out_dir) + out_file = os.path.join(out_dir, 'all_locs_at_t.pkl') + dt['hardness'] = hardness + utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True) + + #Plot trajectories onto the maps + plt.set_cmap('gray') + for i in range(4000): + goal_loc = dt['all_goal_locs'][i, :, :] + locs = np.concatenate((dt['all_locs'][i,:,:], + dt['all_locs'][i,:,:]), axis=0) + xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) + xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) + xy1 = (xymax+xymin)/2. - 1.*np.maximum(np.max(xymax-xymin), 24) + xy2 = (xymax+xymin)/2. + 1.*np.maximum(np.max(xymax-xymin), 24) + + fig, ax = utils.tight_imshow_figure(plt, figsize=(6,6)) + ax.set_axis_on() + ax.patch.set_facecolor((0.333, 0.333, 0.333)) + ax.set_xticks([]) + ax.set_yticks([]) + + all_locs = dt['all_locs'][i,:,:]*1 + uniq = np.where(np.any(all_locs[1:,:] != all_locs[:-1,:], axis=1))[0]+1 + uniq = np.sort(uniq).tolist() + uniq.insert(0,0) + uniq = np.array(uniq) + all_locs = all_locs[uniq, :] + + ax.plot(dt['all_locs'][i, 0, 0], + dt['all_locs'][i, 0, 1], 'b.', markersize=24) + ax.plot(dt['all_goal_locs'][i, 0, 0], + dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19) + ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2) + ax.scatter(all_locs[:,0], all_locs[:,1], + c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0], + cmap='Reds', s=30, linewidth=0) + ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal') + ax.set_xlim([xy1[0], xy2[0]]) + ax.set_ylim([xy1[1], xy2[1]]) + + file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i)) + print(file_name) + with fu.fopen(file_name, 'w') as f: + plt.savefig(f) + plt.close(fig) + + +def main(_): + a = _load_trajectory() + h_dists, gt_dists, orig_maps = _compute_hardness() + hardness = 1.-h_dists*1./ gt_dists + + if FLAGS.top_view: + plot_trajectory(a, hardness, orig_maps, out_dir=FLAGS.out_dir) + + if FLAGS.first_person: + plot_trajectory_first_person(a, orig_maps, out_dir=FLAGS.out_dir) + +if __name__ == '__main__': + app.run() diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py b/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py new file mode 100644 index 0000000000000000000000000000000000000000..58f32d121acf4c638625079907b02161e808af68 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py @@ -0,0 +1,197 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os +import glob +import numpy as np +import logging +import cPickle +from datasets import nav_env +from datasets import factory +from src import utils +from src import map_utils as mu + +logging.basicConfig(level=logging.INFO) +DATA_DIR = 'data/stanford_building_parser_dataset_raw/' + +mkdir_if_missing = utils.mkdir_if_missing +save_variables = utils.save_variables + +def _get_semantic_maps(building_name, transform, map_, flip, cats): + rooms = get_room_in_building(building_name) + maps = [] + for cat in cats: + maps.append(np.zeros((map_.size[1], map_.size[0]))) + + for r in rooms: + room = load_room(building_name, r, category_list=cats) + classes = room['class_id'] + for i, cat in enumerate(cats): + c_ind = cats.index(cat) + ind = [_ for _, c in enumerate(classes) if c == c_ind] + if len(ind) > 0: + vs = [room['vertexs'][x]*1 for x in ind] + vs = np.concatenate(vs, axis=0) + if transform: + vs = np.array([vs[:,1], vs[:,0], vs[:,2]]).T + vs[:,0] = -vs[:,0] + vs[:,1] += 4.20 + vs[:,0] += 6.20 + vs = vs*100. + if flip: + vs[:,1] = -vs[:,1] + maps[i] = maps[i] + \ + mu._project_to_map(map_, vs, ignore_points_outside_map=True) + return maps + +def _map_building_name(building_name): + b = int(building_name.split('_')[0][4]) + out_name = 'Area_{:d}'.format(b) + if b == 5: + if int(building_name.split('_')[0][5]) == 1: + transform = True + else: + transform = False + else: + transform = False + return out_name, transform + +def get_categories(): + cats = ['beam', 'board', 'bookcase', 'ceiling', 'chair', 'clutter', 'column', + 'door', 'floor', 'sofa', 'table', 'wall', 'window'] + return cats + +def _write_map_files(b_in, b_out, transform): + cats = get_categories() + + env = utils.Foo(padding=10, resolution=5, num_point_threshold=2, + valid_min=-10, valid_max=200, n_samples_per_face=200) + robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120, + camera_elevation_degree=-15) + + building_loader = factory.get_dataset('sbpd') + for flip in [False, True]: + b = nav_env.Building(b_out, robot, env, flip=flip, + building_loader=building_loader) + logging.info("building_in: %s, building_out: %s, transform: %d", b_in, + b_out, transform) + maps = _get_semantic_maps(b_in, transform, b.map, flip, cats) + maps = np.transpose(np.array(maps), axes=[1,2,0]) + + # Load file from the cache. + file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl' + file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1], + b.map.origin[0], b.map.origin[1], + b.map.resolution, flip) + out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name) + logging.info('Writing semantic maps to %s.', out_file) + save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True) + +def _transform_area5b(room_dimension): + for a in room_dimension.keys(): + r = room_dimension[a]*1 + r[[0,1,3,4]] = r[[1,0,4,3]] + r[[0,3]] = -r[[3,0]] + r[[1,4]] += 4.20 + r[[0,3]] += 6.20 + room_dimension[a] = r + return room_dimension + +def collect_room(building_name, room_name): + room_dir = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2', building_name, + room_name, 'Annotations') + files = glob.glob1(room_dir, '*.txt') + files = sorted(files, key=lambda s: s.lower()) + vertexs = []; colors = []; + for f in files: + file_name = os.path.join(room_dir, f) + logging.info(' %s', file_name) + a = np.loadtxt(file_name) + vertex = a[:,:3]*1. + color = a[:,3:]*1 + color = color.astype(np.uint8) + vertexs.append(vertex) + colors.append(color) + files = [f.split('.')[0] for f in files] + out = {'vertexs': vertexs, 'colors': colors, 'names': files} + return out + +def load_room(building_name, room_name, category_list=None): + room = collect_room(building_name, room_name) + room['building_name'] = building_name + room['room_name'] = room_name + instance_id = range(len(room['names'])) + room['instance_id'] = instance_id + if category_list is not None: + name = [r.split('_')[0] for r in room['names']] + class_id = [] + for n in name: + if n in category_list: + class_id.append(category_list.index(n)) + else: + class_id.append(len(category_list)) + room['class_id'] = class_id + room['category_list'] = category_list + return room + +def get_room_in_building(building_name): + building_dir = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2', building_name) + rn = os.listdir(building_dir) + rn = [x for x in rn if os.path.isdir(os.path.join(building_dir, x))] + rn = sorted(rn, key=lambda s: s.lower()) + return rn + +def write_room_dimensions(b_in, b_out, transform): + rooms = get_room_in_building(b_in) + room_dimension = {} + for r in rooms: + room = load_room(b_in, r, category_list=None) + vertex = np.concatenate(room['vertexs'], axis=0) + room_dimension[r] = np.concatenate((np.min(vertex, axis=0), np.max(vertex, axis=0)), axis=0) + if transform == 1: + room_dimension = _transform_area5b(room_dimension) + + out_file = os.path.join(DATA_DIR, 'processing', 'room-dimension', b_out+'.pkl') + save_variables(out_file, [room_dimension], ['room_dimension'], overwrite=True) + +def write_room_dimensions_all(I): + mkdir_if_missing(os.path.join(DATA_DIR, 'processing', 'room-dimension')) + bs_in = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_5', 'Area_6'] + bs_out = ['area1', 'area2', 'area3', 'area4', 'area5a', 'area5b', 'area6'] + transforms = [0, 0, 0, 0, 0, 1, 0] + + for i in I: + b_in = bs_in[i] + b_out = bs_out[i] + t = transforms[i] + write_room_dimensions(b_in, b_out, t) + +def write_class_maps_all(I): + mkdir_if_missing(os.path.join(DATA_DIR, 'processing', 'class-maps')) + bs_in = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_5', 'Area_6'] + bs_out = ['area1', 'area2', 'area3', 'area4', 'area5a', 'area5b', 'area6'] + transforms = [0, 0, 0, 0, 0, 1, 0] + + for i in I: + b_in = bs_in[i] + b_out = bs_out[i] + t = transforms[i] + _write_map_files(b_in, b_out, t) + + +if __name__ == '__main__': + write_room_dimensions_all([0, 2, 3, 4, 5, 6]) + write_class_maps_all([0, 2, 3, 4, 5, 6]) + diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh b/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh new file mode 100644 index 0000000000000000000000000000000000000000..1384fabe69259ccc514a14d62aee358d1909bffb --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh @@ -0,0 +1,24 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +cd data/stanford_building_parser_dataset_raw +unzip Stanford3dDataset_v1.2.zip +cd ../../ +PYOPENGL_PLATFORM=egl PYTHONPATH='.' python scripts/script_preprocess_annoations_S3DIS.py + +mv data/stanford_building_parser_dataset_raw/processing/room-dimension data/stanford_building_parser_dataset/. +mv data/stanford_building_parser_dataset_raw/processing/class-maps data/stanford_building_parser_dataset/. + +echo "You may now delete data/stanford_building_parser_dataset_raw if needed." diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh b/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh new file mode 100644 index 0000000000000000000000000000000000000000..557a4dde611d42e71d71dd1589abf96f55e6eec6 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh @@ -0,0 +1,37 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +mkdir -p data/stanford_building_parser_dataset +mkdir -p data/stanford_building_parser_dataset/mesh +cd data/stanford_building_parser_dataset_raw + +# Untar the files and extract the meshes. +for t in "1" "3" "4" "5a" "5b" "6"; do + tar -xf area_"$t"_noXYZ.tar area_$t/3d/rgb_textures + mv area_$t/3d/rgb_textures ../stanford_building_parser_dataset/mesh/area$t + rmdir area_$t/3d + rmdir area_$t +done + +cd ../../ + +# Preprocess meshes to remove the group and chunk information. +cd data/stanford_building_parser_dataset/ +for t in "1" "3" "4" "5a" "5b" "6"; do + obj_name=`ls mesh/area$t/*.obj` + cp $obj_name "$obj_name".bck + cat $obj_name.bck | grep -v '^g' | grep -v '^o' > $obj_name +done +cd ../../ diff --git a/models/research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh b/models/research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4299fff5346afb53783a61de5c3e84f102a6304 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh @@ -0,0 +1,63 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Test CMP models. +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+bench_test \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_rgb_r2r+bench_test \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_rgb_r2r + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_d_ST+bench_test \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_d_ST + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_rgb_ST+bench_test \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_rgb_ST + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80+bench_test \ + --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80 + +# Test LSTM baseline models. +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_d_r2r+bench_test \ + --logdir output/bl.v2.noclip.sbpd_d_r2r + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_rgb_r2r+bench_test \ + --logdir output/bl.v2.noclip.sbpd_rgb_r2r + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_d_ST+bench_test \ + --logdir output/bl.v2.noclip.sbpd_d_ST + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_rgb_ST+bench_test \ + --logdir output/bl.v2.noclip.sbpd_rgb_ST + +CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ + python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_d_r2r_h0_64_80+bench_test \ + --logdir output/bl.v2.noclip.sbpd_d_r2r_h0_64_80 + +# Visualize test trajectories in top view. +# CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ +# python scripts/script_plot_trajectory.py \ +# --first_person --num_steps 40 \ +# --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \ +# --imset test --alsologtostderr diff --git a/models/research/cognitive_mapping_and_planning/src/__init__.py b/models/research/cognitive_mapping_and_planning/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/src/depth_utils.py b/models/research/cognitive_mapping_and_planning/src/depth_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..35f14fc7c37fffb2a408decede11e378867a2834 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/src/depth_utils.py @@ -0,0 +1,96 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for processing depth images. +""" +import numpy as np +import src.rotation_utils as ru +import src.utils as utils + +def get_camera_matrix(width, height, fov): + """Returns a camera matrix from image size and fov.""" + xc = (width-1.) / 2. + zc = (height-1.) / 2. + f = (width / 2.) / np.tan(np.deg2rad(fov / 2.)) + camera_matrix = utils.Foo(xc=xc, zc=zc, f=f) + return camera_matrix + +def get_point_cloud_from_z(Y, camera_matrix): + """Projects the depth image Y into a 3D point cloud. + Inputs: + Y is ...xHxW + camera_matrix + Outputs: + X is positive going right + Y is positive into the image + Z is positive up in the image + XYZ is ...xHxWx3 + """ + x, z = np.meshgrid(np.arange(Y.shape[-1]), + np.arange(Y.shape[-2]-1, -1, -1)) + for i in range(Y.ndim-2): + x = np.expand_dims(x, axis=0) + z = np.expand_dims(z, axis=0) + X = (x-camera_matrix.xc) * Y / camera_matrix.f + Z = (z-camera_matrix.zc) * Y / camera_matrix.f + XYZ = np.concatenate((X[...,np.newaxis], Y[...,np.newaxis], + Z[...,np.newaxis]), axis=X.ndim) + return XYZ + +def make_geocentric(XYZ, sensor_height, camera_elevation_degree): + """Transforms the point cloud into geocentric coordinate frame. + Input: + XYZ : ...x3 + sensor_height : height of the sensor + camera_elevation_degree : camera elevation to rectify. + Output: + XYZ : ...x3 + """ + R = ru.get_r_matrix([1.,0.,0.], angle=np.deg2rad(camera_elevation_degree)) + XYZ = np.matmul(XYZ.reshape(-1,3), R.T).reshape(XYZ.shape) + XYZ[...,2] = XYZ[...,2] + sensor_height + return XYZ + +def bin_points(XYZ_cms, map_size, z_bins, xy_resolution): + """Bins points into xy-z bins + XYZ_cms is ... x H x W x3 + Outputs is ... x map_size x map_size x (len(z_bins)+1) + """ + sh = XYZ_cms.shape + XYZ_cms = XYZ_cms.reshape([-1, sh[-3], sh[-2], sh[-1]]) + n_z_bins = len(z_bins)+1 + map_center = (map_size-1.)/2. + counts = [] + isvalids = [] + for XYZ_cm in XYZ_cms: + isnotnan = np.logical_not(np.isnan(XYZ_cm[:,:,0])) + X_bin = np.round(XYZ_cm[:,:,0] / xy_resolution + map_center).astype(np.int32) + Y_bin = np.round(XYZ_cm[:,:,1] / xy_resolution + map_center).astype(np.int32) + Z_bin = np.digitize(XYZ_cm[:,:,2], bins=z_bins).astype(np.int32) + + isvalid = np.array([X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, + Z_bin >= 0, Z_bin < n_z_bins, isnotnan]) + isvalid = np.all(isvalid, axis=0) + + ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin + ind[np.logical_not(isvalid)] = 0 + count = np.bincount(ind.ravel(), isvalid.ravel().astype(np.int32), + minlength=map_size*map_size*n_z_bins) + count = np.reshape(count, [map_size, map_size, n_z_bins]) + counts.append(count) + isvalids.append(isvalid) + counts = np.array(counts).reshape(list(sh[:-3]) + [map_size, map_size, n_z_bins]) + isvalids = np.array(isvalids).reshape(list(sh[:-3]) + [sh[-3], sh[-2], 1]) + return counts, isvalids diff --git a/models/research/cognitive_mapping_and_planning/src/file_utils.py b/models/research/cognitive_mapping_and_planning/src/file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b386236ca6e04c9fa1e452b6ad3e70c6ab9bb88a --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/src/file_utils.py @@ -0,0 +1,42 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for manipulating files. +""" +import os +import numpy as np +import PIL +from tensorflow.python.platform import gfile +import cv2 + +exists = lambda path: gfile.Exists(path) +fopen = lambda path, mode: gfile.Open(path, mode) +makedirs = lambda path: gfile.MakeDirs(path) +listdir = lambda path: gfile.ListDir(path) +copyfile = lambda a, b, o: gfile.Copy(a,b,o) + +def write_image(image_path, rgb): + ext = os.path.splitext(image_path)[1] + with gfile.GFile(image_path, 'w') as f: + img_str = cv2.imencode(ext, rgb[:,:,::-1])[1].tostring() + f.write(img_str) + +def read_image(image_path, type='rgb'): + with fopen(image_path, 'r') as f: + I = PIL.Image.open(f) + II = np.array(I) + if type == 'rgb': + II = II[:,:,:3] + return II diff --git a/models/research/cognitive_mapping_and_planning/src/graph_utils.py b/models/research/cognitive_mapping_and_planning/src/graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cd99fd22a2f630438f31eecd7fbfece2c6008ead --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/src/graph_utils.py @@ -0,0 +1,552 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Various function to manipulate graphs for computing distances. +""" +import skimage.morphology +import numpy as np +import networkx as nx +import itertools +import logging +from datasets.nav_env import get_path_ids +import graph_tool as gt +import graph_tool.topology +import graph_tool.generation +import src.utils as utils + +# Compute shortest path from all nodes to or from all source nodes +def get_distance_node_list(gtG, source_nodes, direction, weights=None): + gtG_ = gt.Graph(gtG) + v = gtG_.add_vertex() + + if weights is not None: + weights = gtG_.edge_properties[weights] + + for s in source_nodes: + e = gtG_.add_edge(s, int(v)) + if weights is not None: + weights[e] = 0. + + if direction == 'to': + dist = gt.topology.shortest_distance( + gt.GraphView(gtG_, reversed=True), source=gtG_.vertex(int(v)), + target=None, weights=weights) + elif direction == 'from': + dist = gt.topology.shortest_distance( + gt.GraphView(gtG_, reversed=False), source=gtG_.vertex(int(v)), + target=None, weights=weights) + dist = np.array(dist.get_array()) + dist = dist[:-1] + if weights is None: + dist = dist-1 + return dist + +# Functions for semantically labelling nodes in the traversal graph. +def generate_lattice(sz_x, sz_y): + """Generates a lattice with sz_x vertices along x and sz_y vertices along y + direction Each of these vertices is step_size distance apart. Origin is at + (0,0). """ + g = gt.generation.lattice([sz_x, sz_y]) + x, y = np.meshgrid(np.arange(sz_x), np.arange(sz_y)) + x = np.reshape(x, [-1,1]); y = np.reshape(y, [-1,1]); + nodes = np.concatenate((x,y), axis=1) + return g, nodes + +def add_diagonal_edges(g, nodes, sz_x, sz_y, edge_len): + offset = [sz_x+1, sz_x-1] + for o in offset: + s = np.arange(nodes.shape[0]-o-1) + t = s + o + ind = np.all(np.abs(nodes[s,:] - nodes[t,:]) == np.array([[1,1]]), axis=1) + s = s[ind][:,np.newaxis] + t = t[ind][:,np.newaxis] + st = np.concatenate((s,t), axis=1) + for i in range(st.shape[0]): + e = g.add_edge(st[i,0], st[i,1], add_missing=False) + g.ep['wts'][e] = edge_len + +def convert_traversible_to_graph(traversible, ff_cost=1., fo_cost=1., + oo_cost=1., connectivity=4): + assert(connectivity == 4 or connectivity == 8) + + sz_x = traversible.shape[1] + sz_y = traversible.shape[0] + g, nodes = generate_lattice(sz_x, sz_y) + + # Assign costs. + edge_wts = g.new_edge_property('float') + g.edge_properties['wts'] = edge_wts + wts = np.ones(g.num_edges(), dtype=np.float32) + edge_wts.get_array()[:] = wts + + if connectivity == 8: + add_diagonal_edges(g, nodes, sz_x, sz_y, np.sqrt(2.)) + + se = np.array([[int(e.source()), int(e.target())] for e in g.edges()]) + s_xy = nodes[se[:,0]] + t_xy = nodes[se[:,1]] + s_t = np.ravel_multi_index((s_xy[:,1], s_xy[:,0]), traversible.shape) + t_t = np.ravel_multi_index((t_xy[:,1], t_xy[:,0]), traversible.shape) + s_t = traversible.ravel()[s_t] + t_t = traversible.ravel()[t_t] + + wts = np.zeros(g.num_edges(), dtype=np.float32) + wts[np.logical_and(s_t == True, t_t == True)] = ff_cost + wts[np.logical_and(s_t == False, t_t == False)] = oo_cost + wts[np.logical_xor(s_t, t_t)] = fo_cost + + edge_wts = g.edge_properties['wts'] + for i, e in enumerate(g.edges()): + edge_wts[e] = edge_wts[e] * wts[i] + # d = edge_wts.get_array()*1. + # edge_wts.get_array()[:] = d*wts + return g, nodes + +def label_nodes_with_class(nodes_xyt, class_maps, pix): + """ + Returns: + class_maps__: one-hot class_map for each class. + node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes + """ + # Assign each pixel to a node. + selem = skimage.morphology.disk(pix) + class_maps_ = class_maps*1. + for i in range(class_maps.shape[2]): + class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem) + class_maps__ = np.argmax(class_maps_, axis=2) + class_maps__[np.max(class_maps_, axis=2) == 0] = -1 + + # For each node pick out the label from this class map. + x = np.round(nodes_xyt[:,[0]]).astype(np.int32) + y = np.round(nodes_xyt[:,[1]]).astype(np.int32) + ind = np.ravel_multi_index((y,x), class_maps__.shape) + node_class_label = class_maps__.ravel()[ind][:,0] + + # Convert to one hot versions. + class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool) + node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool) + for i in range(class_maps.shape[2]): + class_maps_one_hot[:,:,i] = class_maps__ == i + node_class_label_one_hot[:,i] = node_class_label == i + return class_maps_one_hot, node_class_label_one_hot + +def label_nodes_with_class_geodesic(nodes_xyt, class_maps, pix, traversible, + ff_cost=1., fo_cost=1., oo_cost=1., + connectivity=4): + """Labels nodes in nodes_xyt with class labels using geodesic distance as + defined by traversible from class_maps. + Inputs: + nodes_xyt + class_maps: counts for each class. + pix: distance threshold to consider close enough to target. + traversible: binary map of whether traversible or not. + Output: + labels: For each node in nodes_xyt returns a label of the class or -1 is + unlabelled. + """ + g, nodes = convert_traversible_to_graph(traversible, ff_cost=ff_cost, + fo_cost=fo_cost, oo_cost=oo_cost, + connectivity=connectivity) + + class_dist = np.zeros_like(class_maps*1.) + n_classes = class_maps.shape[2] + if False: + # Assign each pixel to a class based on number of points. + selem = skimage.morphology.disk(pix) + class_maps_ = class_maps*1. + class_maps__ = np.argmax(class_maps_, axis=2) + class_maps__[np.max(class_maps_, axis=2) == 0] = -1 + + # Label nodes with classes. + for i in range(n_classes): + # class_node_ids = np.where(class_maps__.ravel() == i)[0] + class_node_ids = np.where(class_maps[:,:,i].ravel() > 0)[0] + dist_i = get_distance_node_list(g, class_node_ids, 'to', weights='wts') + class_dist[:,:,i] = np.reshape(dist_i, class_dist[:,:,i].shape) + class_map_geodesic = (class_dist <= pix) + class_map_geodesic = np.reshape(class_map_geodesic, [-1, n_classes]) + + # For each node pick out the label from this class map. + x = np.round(nodes_xyt[:,[0]]).astype(np.int32) + y = np.round(nodes_xyt[:,[1]]).astype(np.int32) + ind = np.ravel_multi_index((y,x), class_dist[:,:,0].shape) + node_class_label = class_map_geodesic[ind[:,0],:] + class_map_geodesic = class_dist <= pix + return class_map_geodesic, node_class_label + +def _get_next_nodes_undirected(n, sc, n_ori): + nodes_to_add = [] + nodes_to_validate = [] + (p, q, r) = n + nodes_to_add.append((n, (p, q, r), 0)) + if n_ori == 4: + for _ in [1, 2, 3, 4]: + if _ == 1: + v = (p - sc, q, r) + elif _ == 2: + v = (p + sc, q, r) + elif _ == 3: + v = (p, q - sc, r) + elif _ == 4: + v = (p, q + sc, r) + nodes_to_validate.append((n, v, _)) + return nodes_to_add, nodes_to_validate + +def _get_next_nodes(n, sc, n_ori): + nodes_to_add = [] + nodes_to_validate = [] + (p, q, r) = n + for r_, a_ in zip([-1, 0, 1], [1, 0, 2]): + nodes_to_add.append((n, (p, q, np.mod(r+r_, n_ori)), a_)) + + if n_ori == 6: + if r == 0: + v = (p + sc, q, r) + elif r == 1: + v = (p + sc, q + sc, r) + elif r == 2: + v = (p, q + sc, r) + elif r == 3: + v = (p - sc, q, r) + elif r == 4: + v = (p - sc, q - sc, r) + elif r == 5: + v = (p, q - sc, r) + elif n_ori == 4: + if r == 0: + v = (p + sc, q, r) + elif r == 1: + v = (p, q + sc, r) + elif r == 2: + v = (p - sc, q, r) + elif r == 3: + v = (p, q - sc, r) + nodes_to_validate.append((n,v,3)) + + return nodes_to_add, nodes_to_validate + +def generate_graph(valid_fn_vec=None, sc=1., n_ori=6, + starting_location=(0, 0, 0), vis=False, directed=True): + timer = utils.Timer() + timer.tic() + if directed: G = nx.DiGraph(directed=True) + else: G = nx.Graph() + G.add_node(starting_location) + new_nodes = G.nodes() + while len(new_nodes) != 0: + nodes_to_add = [] + nodes_to_validate = [] + for n in new_nodes: + if directed: + na, nv = _get_next_nodes(n, sc, n_ori) + else: + na, nv = _get_next_nodes_undirected(n, sc, n_ori) + nodes_to_add = nodes_to_add + na + if valid_fn_vec is not None: + nodes_to_validate = nodes_to_validate + nv + else: + node_to_add = nodes_to_add + nv + + # Validate nodes. + vs = [_[1] for _ in nodes_to_validate] + valids = valid_fn_vec(vs) + + for nva, valid in zip(nodes_to_validate, valids): + if valid: + nodes_to_add.append(nva) + + new_nodes = [] + for n,v,a in nodes_to_add: + if not G.has_node(v): + new_nodes.append(v) + G.add_edge(n, v, action=a) + + timer.toc(average=True, log_at=1, log_str='src.graph_utils.generate_graph') + return (G) + +def vis_G(G, ax, vertex_color='r', edge_color='b', r=None): + if edge_color is not None: + for e in G.edges(): + XYT = zip(*e) + x = XYT[-3] + y = XYT[-2] + t = XYT[-1] + if r is None or t[0] == r: + ax.plot(x, y, edge_color) + if vertex_color is not None: + XYT = zip(*G.nodes()) + x = XYT[-3] + y = XYT[-2] + t = XYT[-1] + ax.plot(x, y, vertex_color + '.') + +def convert_to_graph_tool(G): + timer = utils.Timer() + timer.tic() + gtG = gt.Graph(directed=G.is_directed()) + gtG.ep['action'] = gtG.new_edge_property('int') + + nodes_list = G.nodes() + nodes_array = np.array(nodes_list) + + nodes_id = np.zeros((nodes_array.shape[0],), dtype=np.int64) + + for i in range(nodes_array.shape[0]): + v = gtG.add_vertex() + nodes_id[i] = int(v) + + # d = {key: value for (key, value) in zip(nodes_list, nodes_id)} + d = dict(itertools.izip(nodes_list, nodes_id)) + + for src, dst, data in G.edges_iter(data=True): + e = gtG.add_edge(d[src], d[dst]) + gtG.ep['action'][e] = data['action'] + nodes_to_id = d + timer.toc(average=True, log_at=1, log_str='src.graph_utils.convert_to_graph_tool') + return gtG, nodes_array, nodes_to_id + + +def _rejection_sampling(rng, sampling_d, target_d, bins, hardness, M): + bin_ind = np.digitize(hardness, bins)-1 + i = 0 + ratio = target_d[bin_ind] / (M*sampling_d[bin_ind]) + while i < ratio.size and rng.rand() > ratio[i]: + i = i+1 + return i + +def heuristic_fn_vec(n1, n2, n_ori, step_size): + # n1 is a vector and n2 is a single point. + dx = (n1[:,0] - n2[0,0])/step_size + dy = (n1[:,1] - n2[0,1])/step_size + dt = n1[:,2] - n2[0,2] + dt = np.mod(dt, n_ori) + dt = np.minimum(dt, n_ori-dt) + + if n_ori == 6: + if dx*dy > 0: + d = np.maximum(np.abs(dx), np.abs(dy)) + else: + d = np.abs(dy-dx) + elif n_ori == 4: + d = np.abs(dx) + np.abs(dy) + + return (d + dt).reshape((-1,1)) + +def get_hardness_distribution(gtG, max_dist, min_dist, rng, trials, bins, nodes, + n_ori, step_size): + heuristic_fn = lambda node_ids, node_id: \ + heuristic_fn_vec(nodes[node_ids, :], nodes[[node_id], :], n_ori, step_size) + num_nodes = gtG.num_vertices() + gt_dists = []; h_dists = []; + for i in range(trials): + end_node_id = rng.choice(num_nodes) + gt_dist = gt.topology.shortest_distance(gt.GraphView(gtG, reversed=True), + source=gtG.vertex(end_node_id), + target=None, max_dist=max_dist) + gt_dist = np.array(gt_dist.get_array()) + ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0] + gt_dist = gt_dist[ind] + h_dist = heuristic_fn(ind, end_node_id)[:,0] + gt_dists.append(gt_dist) + h_dists.append(h_dist) + gt_dists = np.concatenate(gt_dists) + h_dists = np.concatenate(h_dists) + hardness = 1. - h_dists*1./gt_dists + hist, _ = np.histogram(hardness, bins) + hist = hist.astype(np.float64) + hist = hist / np.sum(hist) + return hist + +def rng_next_goal_rejection_sampling(start_node_ids, batch_size, gtG, rng, + max_dist, min_dist, max_dist_to_compute, + sampling_d, target_d, + nodes, n_ori, step_size, bins, M): + sample_start_nodes = start_node_ids is None + dists = []; pred_maps = []; end_node_ids = []; start_node_ids_ = []; + hardnesss = []; gt_dists = []; + num_nodes = gtG.num_vertices() + for i in range(batch_size): + done = False + while not done: + if sample_start_nodes: + start_node_id = rng.choice(num_nodes) + else: + start_node_id = start_node_ids[i] + + gt_dist = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=False), source=start_node_id, target=None, + max_dist=max_dist) + gt_dist = np.array(gt_dist.get_array()) + ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0] + ind = rng.permutation(ind) + gt_dist = gt_dist[ind]*1. + h_dist = heuristic_fn_vec(nodes[ind, :], nodes[[start_node_id], :], + n_ori, step_size)[:,0] + hardness = 1. - h_dist / gt_dist + sampled_ind = _rejection_sampling(rng, sampling_d, target_d, bins, + hardness, M) + if sampled_ind < ind.size: + # print sampled_ind + end_node_id = ind[sampled_ind] + hardness = hardness[sampled_ind] + gt_dist = gt_dist[sampled_ind] + done = True + + # Compute distance from end node to all nodes, to return. + dist, pred_map = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=True), source=end_node_id, target=None, + max_dist=max_dist_to_compute, pred_map=True) + dist = np.array(dist.get_array()) + pred_map = np.array(pred_map.get_array()) + + hardnesss.append(hardness); dists.append(dist); pred_maps.append(pred_map); + start_node_ids_.append(start_node_id); end_node_ids.append(end_node_id); + gt_dists.append(gt_dist); + paths = None + return start_node_ids_, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists + + +def rng_next_goal(start_node_ids, batch_size, gtG, rng, max_dist, + max_dist_to_compute, node_room_ids, nodes=None, + compute_path=False, dists_from_start_node=None): + # Compute the distance field from the starting location, and then pick a + # destination in another room if possible otherwise anywhere outside this + # room. + dists = []; pred_maps = []; paths = []; end_node_ids = []; + for i in range(batch_size): + room_id = node_room_ids[start_node_ids[i]] + # Compute distances. + if dists_from_start_node == None: + dist, pred_map = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=False), source=gtG.vertex(start_node_ids[i]), + target=None, max_dist=max_dist_to_compute, pred_map=True) + dist = np.array(dist.get_array()) + else: + dist = dists_from_start_node[i] + + # Randomly sample nodes which are within max_dist. + near_ids = dist <= max_dist + near_ids = near_ids[:, np.newaxis] + # Check to see if there is a non-negative node which is close enough. + non_same_room_ids = node_room_ids != room_id + non_hallway_ids = node_room_ids != -1 + good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids)) + good2_ids = np.logical_and(near_ids, non_hallway_ids) + good3_ids = near_ids + if np.any(good1_ids): + end_node_id = rng.choice(np.where(good1_ids)[0]) + elif np.any(good2_ids): + end_node_id = rng.choice(np.where(good2_ids)[0]) + elif np.any(good3_ids): + end_node_id = rng.choice(np.where(good3_ids)[0]) + else: + logging.error('Did not find any good nodes.') + + # Compute distance to this new goal for doing distance queries. + dist, pred_map = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id), + target=None, max_dist=max_dist_to_compute, pred_map=True) + dist = np.array(dist.get_array()) + pred_map = np.array(pred_map.get_array()) + + dists.append(dist) + pred_maps.append(pred_map) + end_node_ids.append(end_node_id) + + path = None + if compute_path: + path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map) + paths.append(path) + + return start_node_ids, end_node_ids, dists, pred_maps, paths + + +def rng_room_to_room(batch_size, gtG, rng, max_dist, max_dist_to_compute, + node_room_ids, nodes=None, compute_path=False): + # Sample one of the rooms, compute the distance field. Pick a destination in + # another room if possible otherwise anywhere outside this room. + dists = []; pred_maps = []; paths = []; start_node_ids = []; end_node_ids = []; + room_ids = np.unique(node_room_ids[node_room_ids[:,0] >= 0, 0]) + for i in range(batch_size): + room_id = rng.choice(room_ids) + end_node_id = rng.choice(np.where(node_room_ids[:,0] == room_id)[0]) + end_node_ids.append(end_node_id) + + # Compute distances. + dist, pred_map = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id), + target=None, max_dist=max_dist_to_compute, pred_map=True) + dist = np.array(dist.get_array()) + pred_map = np.array(pred_map.get_array()) + dists.append(dist) + pred_maps.append(pred_map) + + # Randomly sample nodes which are within max_dist. + near_ids = dist <= max_dist + near_ids = near_ids[:, np.newaxis] + + # Check to see if there is a non-negative node which is close enough. + non_same_room_ids = node_room_ids != room_id + non_hallway_ids = node_room_ids != -1 + good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids)) + good2_ids = np.logical_and(near_ids, non_hallway_ids) + good3_ids = near_ids + if np.any(good1_ids): + start_node_id = rng.choice(np.where(good1_ids)[0]) + elif np.any(good2_ids): + start_node_id = rng.choice(np.where(good2_ids)[0]) + elif np.any(good3_ids): + start_node_id = rng.choice(np.where(good3_ids)[0]) + else: + logging.error('Did not find any good nodes.') + + start_node_ids.append(start_node_id) + + path = None + if compute_path: + path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map) + paths.append(path) + + return start_node_ids, end_node_ids, dists, pred_maps, paths + + +def rng_target_dist_field(batch_size, gtG, rng, max_dist, max_dist_to_compute, + nodes=None, compute_path=False): + # Sample a single node, compute distance to all nodes less than max_dist, + # sample nodes which are a particular distance away. + dists = []; pred_maps = []; paths = []; start_node_ids = [] + end_node_ids = rng.choice(gtG.num_vertices(), size=(batch_size,), + replace=False).tolist() + + for i in range(batch_size): + dist, pred_map = gt.topology.shortest_distance( + gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_ids[i]), + target=None, max_dist=max_dist_to_compute, pred_map=True) + dist = np.array(dist.get_array()) + pred_map = np.array(pred_map.get_array()) + dists.append(dist) + pred_maps.append(pred_map) + + # Randomly sample nodes which are withing max_dist + near_ids = np.where(dist <= max_dist)[0] + start_node_id = rng.choice(near_ids, size=(1,), replace=False)[0] + start_node_ids.append(start_node_id) + + path = None + if compute_path: + path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map) + paths.append(path) + + return start_node_ids, end_node_ids, dists, pred_maps, paths diff --git a/models/research/cognitive_mapping_and_planning/src/map_utils.py b/models/research/cognitive_mapping_and_planning/src/map_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6756131a9eac161e7633ef089ed573e324f859e1 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/src/map_utils.py @@ -0,0 +1,245 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Various function to compute the ground truth map for training etc. +""" +import copy +import skimage.morphology +import logging +import numpy as np +import scipy.ndimage +import matplotlib.pyplot as plt +import PIL + +import src.utils as utils +import cv2 + +def _get_xy_bounding_box(vertex, padding): + """Returns the xy bounding box of the environment.""" + min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int) + max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int) + return min_, max_ + +def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False): + """Projects points to map, returns how many points are present at each + location.""" + num_points = np.zeros((map.size[1], map.size[0])) + vertex_ = vertex[:, :2] - map.origin + vertex_ = np.round(vertex_ / map.resolution).astype(np.int) + if ignore_points_outside_map: + good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1], + vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]), + axis=0) + vertex_ = vertex_[good_ind, :] + if wt is not None: + wt = wt[good_ind, :] + if wt is None: + np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1) + else: + assert(wt.shape[0] == vertex.shape[0]), \ + 'number of weights should be same as vertices.' + np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt) + return num_points + +def make_map(padding, resolution, vertex=None, sc=1.): + """Returns a map structure.""" + min_, max_ = _get_xy_bounding_box(vertex*sc, padding=padding) + sz = np.ceil((max_ - min_ + 1) / resolution).astype(np.int32) + max_ = min_ + sz * resolution - 1 + map = utils.Foo(origin=min_, size=sz, max=max_, resolution=resolution, + padding=padding) + return map + +def _fill_holes(img, thresh): + """Fills holes less than thresh area (assumes 4 connectivity when computing + hole area.""" + l, n = scipy.ndimage.label(np.logical_not(img)) + img_ = img == True + cnts = np.bincount(l.reshape(-1)) + for i, cnt in enumerate(cnts): + if cnt < thresh: + l[l == i] = -1 + img_[l == -1] = True + return img_ + +def compute_traversibility(map, robot_base, robot_height, robot_radius, + valid_min, valid_max, num_point_threshold, shapess, + sc=100., n_samples_per_face=200): + """Returns a bit map with pixels that are traversible or not as long as the + robot center is inside this volume we are good colisions can be detected by + doing a line search on things, or walking from current location to final + location in the bitmap, or doing bwlabel on the traversibility map.""" + + tt = utils.Timer() + tt.tic() + num_obstcale_points = np.zeros((map.size[1], map.size[0])) + num_points = np.zeros((map.size[1], map.size[0])) + + for i, shapes in enumerate(shapess): + for j in range(shapes.get_number_of_meshes()): + p, face_areas, face_idx = shapes.sample_points_on_face_of_shape( + j, n_samples_per_face, sc) + wt = face_areas[face_idx]/n_samples_per_face + + ind = np.all(np.concatenate( + (p[:, [2]] > robot_base, + p[:, [2]] < robot_base + robot_height), axis=1),axis=1) + num_obstcale_points += _project_to_map(map, p[ind, :], wt[ind]) + + ind = np.all(np.concatenate( + (p[:, [2]] > valid_min, + p[:, [2]] < valid_max), axis=1),axis=1) + num_points += _project_to_map(map, p[ind, :], wt[ind]) + + selem = skimage.morphology.disk(robot_radius / map.resolution) + obstacle_free = skimage.morphology.binary_dilation( + _fill_holes(num_obstcale_points > num_point_threshold, 20), selem) != True + valid_space = _fill_holes(num_points > num_point_threshold, 20) + traversible = np.all(np.concatenate((obstacle_free[...,np.newaxis], + valid_space[...,np.newaxis]), axis=2), + axis=2) + # plt.imshow(np.concatenate((obstacle_free, valid_space, traversible), axis=1)) + # plt.show() + + map_out = copy.deepcopy(map) + map_out.num_obstcale_points = num_obstcale_points + map_out.num_points = num_points + map_out.traversible = traversible + map_out.obstacle_free = obstacle_free + map_out.valid_space = valid_space + tt.toc(log_at=1, log_str='src.map_utils.compute_traversibility: ') + return map_out + + +def resize_maps(map, map_scales, resize_method): + scaled_maps = [] + for i, sc in enumerate(map_scales): + if resize_method == 'antialiasing': + # Resize using open cv so that we can compute the size. + # Use PIL resize to use anti aliasing feature. + map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR) + w = map_.shape[1]; h = map_.shape[0] + + map_img = PIL.Image.fromarray((map*255).astype(np.uint8)) + map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS) + map_ = np.asarray(map__img).astype(np.float32) + map_ = map_/255. + map_ = np.minimum(map_, 1.0) + map_ = np.maximum(map_, 0.0) + elif resize_method == 'linear_noantialiasing': + map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR) + else: + logging.error('Unknown resizing method') + scaled_maps.append(map_) + return scaled_maps + + +def pick_largest_cc(traversible): + out = scipy.ndimage.label(traversible)[0] + cnt = np.bincount(out.reshape(-1))[1:] + return out == np.argmax(cnt) + 1 + +def get_graph_origin_loc(rng, traversible): + """Erode the traversibility mask so that we get points in the bulk of the + graph, and not end up with a situation where the graph is localized in the + corner of a cramped room. Output Locs is in the coordinate frame of the + map.""" + + aa = pick_largest_cc(skimage.morphology.binary_erosion(traversible == True, + selem=np.ones((15,15)))) + y, x = np.where(aa > 0) + ind = rng.choice(y.size) + locs = np.array([x[ind], y[ind]]) + locs = locs + rng.rand(*(locs.shape)) - 0.5 + return locs + + +def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc, + x_axis, y_axis, theta): + maps = [] + for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)): + maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_, + map_crop_size, + interpolation=cv2.INTER_LINEAR)[0]) + maps_i[np.isnan(maps_i)] = 0 + maps.append(maps_i) + return maps + +def generate_goal_images(map_scales, map_crop_sizes, n_ori, goal_dist, + goal_theta, rel_goal_orientation): + goal_dist = goal_dist[:,0] + goal_theta = goal_theta[:,0] + rel_goal_orientation = rel_goal_orientation[:,0] + + goals = []; + # Generate the map images. + for i, (sc, map_crop_size) in enumerate(zip(map_scales, map_crop_sizes)): + goal_i = np.zeros((goal_dist.shape[0], map_crop_size, map_crop_size, n_ori), + dtype=np.float32) + x = goal_dist*np.cos(goal_theta)*sc + (map_crop_size-1.)/2. + y = goal_dist*np.sin(goal_theta)*sc + (map_crop_size-1.)/2. + + for j in range(goal_dist.shape[0]): + gc = rel_goal_orientation[j] + x0 = np.floor(x[j]).astype(np.int32); x1 = x0 + 1; + y0 = np.floor(y[j]).astype(np.int32); y1 = y0 + 1; + if x0 >= 0 and x0 <= map_crop_size-1: + if y0 >= 0 and y0 <= map_crop_size-1: + goal_i[j, y0, x0, gc] = (x1-x[j])*(y1-y[j]) + if y1 >= 0 and y1 <= map_crop_size-1: + goal_i[j, y1, x0, gc] = (x1-x[j])*(y[j]-y0) + + if x1 >= 0 and x1 <= map_crop_size-1: + if y0 >= 0 and y0 <= map_crop_size-1: + goal_i[j, y0, x1, gc] = (x[j]-x0)*(y1-y[j]) + if y1 >= 0 and y1 <= map_crop_size-1: + goal_i[j, y1, x1, gc] = (x[j]-x0)*(y[j]-y0) + + goals.append(goal_i) + return goals + +def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size, + interpolation=cv2.INTER_LINEAR): + fss = [] + valids = [] + + center = (map_size-1.0)/2.0 + dst_theta = np.pi/2.0 + dst_loc = np.array([center, center]) + dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)]) + dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)]) + + def compute_points(center, x_axis, y_axis): + points = np.zeros((3,2),dtype=np.float32) + points[0,:] = center + points[1,:] = center + x_axis + points[2,:] = center + y_axis + return points + + dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis) + for i in range(src_locs.shape[0]): + src_loc = src_locs[i,:] + src_x_axis = src_x_axiss[i,:] + src_y_axis = src_y_axiss[i,:] + src_points = compute_points(src_loc, src_x_axis, src_y_axis) + M = cv2.getAffineTransform(src_points, dst_points) + + fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation, + borderValue=np.NaN) + valid = np.invert(np.isnan(fs)) + valids.append(valid) + fss.append(fs) + return fss, valids + diff --git a/models/research/cognitive_mapping_and_planning/src/rotation_utils.py b/models/research/cognitive_mapping_and_planning/src/rotation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8d6d4f3cbdb1f808d210dce8b22fa3ba831d45a9 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/src/rotation_utils.py @@ -0,0 +1,73 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for generating and applying rotation matrices. +""" +import numpy as np + +ANGLE_EPS = 0.001 + + +def normalize(v): + return v / np.linalg.norm(v) + + +def get_r_matrix(ax_, angle): + ax = normalize(ax_) + if np.abs(angle) > ANGLE_EPS: + S_hat = np.array( + [[0.0, -ax[2], ax[1]], [ax[2], 0.0, -ax[0]], [-ax[1], ax[0], 0.0]], + dtype=np.float32) + R = np.eye(3) + np.sin(angle)*S_hat + \ + (1-np.cos(angle))*(np.linalg.matrix_power(S_hat, 2)) + else: + R = np.eye(3) + return R + + +def r_between(v_from_, v_to_): + v_from = normalize(v_from_) + v_to = normalize(v_to_) + ax = normalize(np.cross(v_from, v_to)) + angle = np.arccos(np.dot(v_from, v_to)) + return get_r_matrix(ax, angle) + + +def rotate_camera_to_point_at(up_from, lookat_from, up_to, lookat_to): + inputs = [up_from, lookat_from, up_to, lookat_to] + for i in range(4): + inputs[i] = normalize(np.array(inputs[i]).reshape((-1,))) + up_from, lookat_from, up_to, lookat_to = inputs + r1 = r_between(lookat_from, lookat_to) + + new_x = np.dot(r1, np.array([1, 0, 0]).reshape((-1, 1))).reshape((-1)) + to_x = normalize(np.cross(lookat_to, up_to)) + angle = np.arccos(np.dot(new_x, to_x)) + if angle > ANGLE_EPS: + if angle < np.pi - ANGLE_EPS: + ax = normalize(np.cross(new_x, to_x)) + flip = np.dot(lookat_to, ax) + if flip > 0: + r2 = get_r_matrix(lookat_to, angle) + elif flip < 0: + r2 = get_r_matrix(lookat_to, -1. * angle) + else: + # Angle of rotation is too close to 180 degrees, direction of rotation + # does not matter. + r2 = get_r_matrix(lookat_to, angle) + else: + r2 = np.eye(3) + return np.dot(r2, r1) + diff --git a/models/research/cognitive_mapping_and_planning/src/utils.py b/models/research/cognitive_mapping_and_planning/src/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a1b9e44260b7c7884855761f56ac60d6f508c2fb --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/src/utils.py @@ -0,0 +1,168 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Generaly Utilities. +""" + +import numpy as np, cPickle, os, time +from six.moves import xrange +import src.file_utils as fu +import logging + +class Timer(): + def __init__(self): + self.calls = 0. + self.start_time = 0. + self.time_per_call = 0. + self.total_time = 0. + self.last_log_time = 0. + + def tic(self): + self.start_time = time.time() + + def toc(self, average=True, log_at=-1, log_str='', type='calls'): + if self.start_time == 0: + logging.error('Timer not started by calling tic().') + t = time.time() + diff = time.time() - self.start_time + self.total_time += diff + self.calls += 1. + self.time_per_call = self.total_time/self.calls + + if type == 'calls' and log_at > 0 and np.mod(self.calls, log_at) == 0: + _ = [] + logging.info('%s: %f seconds.', log_str, self.time_per_call) + elif type == 'time' and log_at > 0 and t - self.last_log_time >= log_at: + _ = [] + logging.info('%s: %f seconds.', log_str, self.time_per_call) + self.last_log_time = t + + if average: + return self.time_per_call + else: + return diff + +class Foo(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + def __str__(self): + str_ = '' + for v in vars(self).keys(): + a = getattr(self, v) + if True: #isinstance(v, object): + str__ = str(a) + str__ = str__.replace('\n', '\n ') + else: + str__ = str(a) + str_ += '{:s}: {:s}'.format(v, str__) + str_ += '\n' + return str_ + + +def dict_equal(dict1, dict2): + assert(set(dict1.keys()) == set(dict2.keys())), "Sets of keys between 2 dictionaries are different." + for k in dict1.keys(): + assert(type(dict1[k]) == type(dict2[k])), "Type of key '{:s}' if different.".format(k) + if type(dict1[k]) == np.ndarray: + assert(dict1[k].dtype == dict2[k].dtype), "Numpy Type of key '{:s}' if different.".format(k) + assert(np.allclose(dict1[k], dict2[k])), "Value for key '{:s}' do not match.".format(k) + else: + assert(dict1[k] == dict2[k]), "Value for key '{:s}' do not match.".format(k) + return True + +def subplot(plt, Y_X, sz_y_sz_x = (10, 10)): + Y,X = Y_X + sz_y, sz_x = sz_y_sz_x + plt.rcParams['figure.figsize'] = (X*sz_x, Y*sz_y) + fig, axes = plt.subplots(Y, X) + plt.subplots_adjust(wspace=0.1, hspace=0.1) + return fig, axes + +def tic_toc_print(interval, string): + global tic_toc_print_time_old + if 'tic_toc_print_time_old' not in globals(): + tic_toc_print_time_old = time.time() + print(string) + else: + new_time = time.time() + if new_time - tic_toc_print_time_old > interval: + tic_toc_print_time_old = new_time; + print(string) + +def mkdir_if_missing(output_dir): + if not fu.exists(output_dir): + fu.makedirs(output_dir) + +def save_variables(pickle_file_name, var, info, overwrite = False): + if fu.exists(pickle_file_name) and overwrite == False: + raise Exception('{:s} exists and over write is false.'.format(pickle_file_name)) + # Construct the dictionary + assert(type(var) == list); assert(type(info) == list); + d = {} + for i in xrange(len(var)): + d[info[i]] = var[i] + with fu.fopen(pickle_file_name, 'w') as f: + cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL) + +def load_variables(pickle_file_name): + if fu.exists(pickle_file_name): + with fu.fopen(pickle_file_name, 'r') as f: + d = cPickle.load(f) + return d + else: + raise Exception('{:s} does not exists.'.format(pickle_file_name)) + +def voc_ap(rec, prec): + rec = rec.reshape((-1,1)) + prec = prec.reshape((-1,1)) + z = np.zeros((1,1)) + o = np.ones((1,1)) + mrec = np.vstack((z, rec, o)) + mpre = np.vstack((z, prec, z)) + for i in range(len(mpre)-2, -1, -1): + mpre[i] = max(mpre[i], mpre[i+1]) + + I = np.where(mrec[1:] != mrec[0:-1])[0]+1; + ap = 0; + for i in I: + ap = ap + (mrec[i] - mrec[i-1])*mpre[i]; + return ap + +def tight_imshow_figure(plt, figsize=None): + fig = plt.figure(figsize=figsize) + ax = plt.Axes(fig, [0,0,1,1]) + ax.set_axis_off() + fig.add_axes(ax) + return fig, ax + +def calc_pr(gt, out, wt=None): + if wt is None: + wt = np.ones((gt.size,1)) + + gt = gt.astype(np.float64).reshape((-1,1)) + wt = wt.astype(np.float64).reshape((-1,1)) + out = out.astype(np.float64).reshape((-1,1)) + + gt = gt*wt + tog = np.concatenate([gt, wt, out], axis=1)*1. + ind = np.argsort(tog[:,2], axis=0)[::-1] + tog = tog[ind,:] + cumsumsortgt = np.cumsum(tog[:,0]) + cumsumsortwt = np.cumsum(tog[:,1]) + prec = cumsumsortgt / cumsumsortwt + rec = cumsumsortgt / np.sum(tog[:,0]) + + ap = voc_ap(rec, prec) + return ap, rec, prec diff --git a/models/research/cognitive_mapping_and_planning/tfcode/__init__.py b/models/research/cognitive_mapping_and_planning/tfcode/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_mapping_and_planning/tfcode/cmp.py b/models/research/cognitive_mapping_and_planning/tfcode/cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..228ef90fddcd9ff41b26795544d93a1f18466158 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/tfcode/cmp.py @@ -0,0 +1,553 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Code for setting up the network for CMP. + +Sets up the mapper and the planner. +""" + +import sys, os, numpy as np +import matplotlib.pyplot as plt +import copy +import argparse, pprint +import time + + +import tensorflow as tf + +from tensorflow.contrib import slim +from tensorflow.contrib.slim import arg_scope + +import logging +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +from src import utils +import src.file_utils as fu +import tfcode.nav_utils as nu +import tfcode.cmp_utils as cu +import tfcode.cmp_summary as cmp_s +from tfcode import tf_utils + +value_iteration_network = cu.value_iteration_network +rotate_preds = cu.rotate_preds +deconv = cu.deconv +get_visual_frustum = cu.get_visual_frustum +fr_v2 = cu.fr_v2 + +setup_train_step_kwargs = nu.default_train_step_kwargs +compute_losses_multi_or = nu.compute_losses_multi_or + +get_repr_from_image = nu.get_repr_from_image + +_save_d_at_t = nu.save_d_at_t +_save_all = nu.save_all +_eval_ap = nu.eval_ap +_eval_dist = nu.eval_dist +_plot_trajectories = nu.plot_trajectories + +_vis_readout_maps = cmp_s._vis_readout_maps +_vis = cmp_s._vis +_summary_vis = cmp_s._summary_vis +_summary_readout_maps = cmp_s._summary_readout_maps +_add_summaries = cmp_s._add_summaries + +def _inputs(problem): + # Set up inputs. + with tf.name_scope('inputs'): + inputs = [] + inputs.append(('orig_maps', tf.float32, + (problem.batch_size, 1, None, None, 1))) + inputs.append(('goal_loc', tf.float32, + (problem.batch_size, problem.num_goals, 2))) + common_input_data, _ = tf_utils.setup_inputs(inputs) + + inputs = [] + if problem.input_type == 'vision': + # Multiple images from an array of cameras. + inputs.append(('imgs', tf.float32, + (problem.batch_size, None, len(problem.aux_delta_thetas)+1, + problem.img_height, problem.img_width, + problem.img_channels))) + elif problem.input_type == 'analytical_counts': + for i in range(len(problem.map_crop_sizes)): + inputs.append(('analytical_counts_{:d}'.format(i), tf.float32, + (problem.batch_size, None, problem.map_crop_sizes[i], + problem.map_crop_sizes[i], problem.map_channels))) + + if problem.outputs.readout_maps: + for i in range(len(problem.readout_maps_crop_sizes)): + inputs.append(('readout_maps_{:d}'.format(i), tf.float32, + (problem.batch_size, None, + problem.readout_maps_crop_sizes[i], + problem.readout_maps_crop_sizes[i], + problem.readout_maps_channels))) + + for i in range(len(problem.map_crop_sizes)): + inputs.append(('ego_goal_imgs_{:d}'.format(i), tf.float32, + (problem.batch_size, None, problem.map_crop_sizes[i], + problem.map_crop_sizes[i], problem.goal_channels))) + for s in ['sum_num', 'sum_denom', 'max_denom']: + inputs.append(('running_'+s+'_{:d}'.format(i), tf.float32, + (problem.batch_size, 1, problem.map_crop_sizes[i], + problem.map_crop_sizes[i], problem.map_channels))) + + inputs.append(('incremental_locs', tf.float32, + (problem.batch_size, None, 2))) + inputs.append(('incremental_thetas', tf.float32, + (problem.batch_size, None, 1))) + inputs.append(('step_number', tf.int32, (1, None, 1))) + inputs.append(('node_ids', tf.int32, (problem.batch_size, None, + problem.node_ids_dim))) + inputs.append(('perturbs', tf.float32, (problem.batch_size, None, + problem.perturbs_dim))) + + # For plotting result plots + inputs.append(('loc_on_map', tf.float32, (problem.batch_size, None, 2))) + inputs.append(('gt_dist_to_goal', tf.float32, (problem.batch_size, None, 1))) + + step_input_data, _ = tf_utils.setup_inputs(inputs) + + inputs = [] + inputs.append(('action', tf.int32, (problem.batch_size, None, problem.num_actions))) + train_data, _ = tf_utils.setup_inputs(inputs) + train_data.update(step_input_data) + train_data.update(common_input_data) + return common_input_data, step_input_data, train_data + +def readout_general(multi_scale_belief, num_neurons, strides, layers_per_block, + kernel_size, batch_norm_is_training_op, wt_decay): + multi_scale_belief = tf.stop_gradient(multi_scale_belief) + with tf.variable_scope('readout_maps_deconv'): + x, outs = deconv(multi_scale_belief, batch_norm_is_training_op, + wt_decay=wt_decay, neurons=num_neurons, strides=strides, + layers_per_block=layers_per_block, kernel_size=kernel_size, + conv_fn=slim.conv2d_transpose, offset=0, + name='readout_maps_deconv') + probs = tf.sigmoid(x) + return x, probs + + +def running_combine(fss_logits, confs_probs, incremental_locs, + incremental_thetas, previous_sum_num, previous_sum_denom, + previous_max_denom, map_size, num_steps): + # fss_logits is B x N x H x W x C + # confs_logits is B x N x H x W x C + # incremental_locs is B x N x 2 + # incremental_thetas is B x N x 1 + # previous_sum_num etc is B x 1 x H x W x C + + with tf.name_scope('combine_{:d}'.format(num_steps)): + running_sum_nums_ = []; running_sum_denoms_ = []; + running_max_denoms_ = []; + + fss_logits_ = tf.unstack(fss_logits, axis=1, num=num_steps) + confs_probs_ = tf.unstack(confs_probs, axis=1, num=num_steps) + incremental_locs_ = tf.unstack(incremental_locs, axis=1, num=num_steps) + incremental_thetas_ = tf.unstack(incremental_thetas, axis=1, num=num_steps) + running_sum_num = tf.unstack(previous_sum_num, axis=1, num=1)[0] + running_sum_denom = tf.unstack(previous_sum_denom, axis=1, num=1)[0] + running_max_denom = tf.unstack(previous_max_denom, axis=1, num=1)[0] + + for i in range(num_steps): + # Rotate the previous running_num and running_denom + running_sum_num, running_sum_denom, running_max_denom = rotate_preds( + incremental_locs_[i], incremental_thetas_[i], map_size, + [running_sum_num, running_sum_denom, running_max_denom], + output_valid_mask=False)[0] + # print i, num_steps, running_sum_num.get_shape().as_list() + running_sum_num = running_sum_num + fss_logits_[i] * confs_probs_[i] + running_sum_denom = running_sum_denom + confs_probs_[i] + running_max_denom = tf.maximum(running_max_denom, confs_probs_[i]) + running_sum_nums_.append(running_sum_num) + running_sum_denoms_.append(running_sum_denom) + running_max_denoms_.append(running_max_denom) + + running_sum_nums = tf.stack(running_sum_nums_, axis=1) + running_sum_denoms = tf.stack(running_sum_denoms_, axis=1) + running_max_denoms = tf.stack(running_max_denoms_, axis=1) + return running_sum_nums, running_sum_denoms, running_max_denoms + +def get_map_from_images(imgs, mapper_arch, task_params, freeze_conv, wt_decay, + is_training, batch_norm_is_training_op, num_maps, + split_maps=True): + # Hit image with a resnet. + n_views = len(task_params.aux_delta_thetas) + 1 + out = utils.Foo() + + images_reshaped = tf.reshape(imgs, + shape=[-1, task_params.img_height, + task_params.img_width, + task_params.img_channels], name='re_image') + + x, out.vars_to_restore = get_repr_from_image( + images_reshaped, task_params.modalities, task_params.data_augment, + mapper_arch.encoder, freeze_conv, wt_decay, is_training) + + # Reshape into nice things so that these can be accumulated over time steps + # for faster backprop. + sh_before = x.get_shape().as_list() + out.encoder_output = tf.reshape(x, shape=[task_params.batch_size, -1, n_views] + sh_before[1:]) + x = tf.reshape(out.encoder_output, shape=[-1] + sh_before[1:]) + + # Add a layer to reduce dimensions for a fc layer. + if mapper_arch.dim_reduce_neurons > 0: + ks = 1; neurons = mapper_arch.dim_reduce_neurons; + init_var = np.sqrt(2.0/(ks**2)/neurons) + batch_norm_param = mapper_arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + out.conv_feat = slim.conv2d(x, neurons, kernel_size=ks, stride=1, + normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_param, + padding='SAME', scope='dim_reduce', + weights_regularizer=slim.l2_regularizer(wt_decay), + weights_initializer=tf.random_normal_initializer(stddev=init_var)) + reshape_conv_feat = slim.flatten(out.conv_feat) + sh = reshape_conv_feat.get_shape().as_list() + out.reshape_conv_feat = tf.reshape(reshape_conv_feat, shape=[-1, sh[1]*n_views]) + + with tf.variable_scope('fc'): + # Fully connected layers to compute the representation in top-view space. + fc_batch_norm_param = {'center': True, 'scale': True, + 'activation_fn':tf.nn.relu, + 'is_training': batch_norm_is_training_op} + f = out.reshape_conv_feat + out_neurons = (mapper_arch.fc_out_size**2)*mapper_arch.fc_out_neurons + neurons = mapper_arch.fc_neurons + [out_neurons] + f, _ = tf_utils.fc_network(f, neurons=neurons, wt_decay=wt_decay, + name='fc', offset=0, + batch_norm_param=fc_batch_norm_param, + is_training=is_training, + dropout_ratio=mapper_arch.fc_dropout) + f = tf.reshape(f, shape=[-1, mapper_arch.fc_out_size, + mapper_arch.fc_out_size, + mapper_arch.fc_out_neurons], name='re_fc') + + # Use pool5 to predict the free space map via deconv layers. + with tf.variable_scope('deconv'): + x, outs = deconv(f, batch_norm_is_training_op, wt_decay=wt_decay, + neurons=mapper_arch.deconv_neurons, + strides=mapper_arch.deconv_strides, + layers_per_block=mapper_arch.deconv_layers_per_block, + kernel_size=mapper_arch.deconv_kernel_size, + conv_fn=slim.conv2d_transpose, offset=0, name='deconv') + + # Reshape x the right way. + sh = x.get_shape().as_list() + x = tf.reshape(x, shape=[task_params.batch_size, -1] + sh[1:]) + out.deconv_output = x + + # Separate out the map and the confidence predictions, pass the confidence + # through a sigmoid. + if split_maps: + with tf.name_scope('split'): + out_all = tf.split(value=x, axis=4, num_or_size_splits=2*num_maps) + out.fss_logits = out_all[:num_maps] + out.confs_logits = out_all[num_maps:] + with tf.name_scope('sigmoid'): + out.confs_probs = [tf.nn.sigmoid(x) for x in out.confs_logits] + return out + +def setup_to_run(m, args, is_training, batch_norm_is_training, summary_mode): + assert(args.arch.multi_scale), 'removed support for old single scale code.' + # Set up the model. + tf.set_random_seed(args.solver.seed) + task_params = args.navtask.task_params + + batch_norm_is_training_op = \ + tf.placeholder_with_default(batch_norm_is_training, shape=[], + name='batch_norm_is_training_op') + + # Setup the inputs + m.input_tensors = {} + m.train_ops = {} + m.input_tensors['common'], m.input_tensors['step'], m.input_tensors['train'] = \ + _inputs(task_params) + + m.init_fn = None + + if task_params.input_type == 'vision': + m.vision_ops = get_map_from_images( + m.input_tensors['step']['imgs'], args.mapper_arch, + task_params, args.solver.freeze_conv, + args.solver.wt_decay, is_training, batch_norm_is_training_op, + num_maps=len(task_params.map_crop_sizes)) + + # Load variables from snapshot if needed. + if args.solver.pretrained_path is not None: + m.init_fn = slim.assign_from_checkpoint_fn(args.solver.pretrained_path, + m.vision_ops.vars_to_restore) + + # Set up caching of vision features if needed. + if args.solver.freeze_conv: + m.train_ops['step_data_cache'] = [m.vision_ops.encoder_output] + else: + m.train_ops['step_data_cache'] = [] + + # Set up blobs that are needed for the computation in rest of the graph. + m.ego_map_ops = m.vision_ops.fss_logits + m.coverage_ops = m.vision_ops.confs_probs + + # Zero pad these to make them same size as what the planner expects. + for i in range(len(m.ego_map_ops)): + if args.mapper_arch.pad_map_with_zeros_each[i] > 0: + paddings = np.zeros((5,2), dtype=np.int32) + paddings[2:4,:] = args.mapper_arch.pad_map_with_zeros_each[i] + paddings_op = tf.constant(paddings, dtype=tf.int32) + m.ego_map_ops[i] = tf.pad(m.ego_map_ops[i], paddings=paddings_op) + m.coverage_ops[i] = tf.pad(m.coverage_ops[i], paddings=paddings_op) + + elif task_params.input_type == 'analytical_counts': + m.ego_map_ops = []; m.coverage_ops = [] + for i in range(len(task_params.map_crop_sizes)): + ego_map_op = m.input_tensors['step']['analytical_counts_{:d}'.format(i)] + coverage_op = tf.cast(tf.greater_equal( + tf.reduce_max(ego_map_op, reduction_indices=[4], + keep_dims=True), 1), tf.float32) + coverage_op = tf.ones_like(ego_map_op) * coverage_op + m.ego_map_ops.append(ego_map_op) + m.coverage_ops.append(coverage_op) + m.train_ops['step_data_cache'] = [] + + num_steps = task_params.num_steps + num_goals = task_params.num_goals + + map_crop_size_ops = [] + for map_crop_size in task_params.map_crop_sizes: + map_crop_size_ops.append(tf.constant(map_crop_size, dtype=tf.int32, shape=(2,))) + + with tf.name_scope('check_size'): + is_single_step = tf.equal(tf.unstack(tf.shape(m.ego_map_ops[0]), num=5)[1], 1) + + fr_ops = []; value_ops = []; + fr_intermediate_ops = []; value_intermediate_ops = []; + crop_value_ops = []; + resize_crop_value_ops = []; + confs = []; occupancys = []; + + previous_value_op = None + updated_state = []; state_names = []; + + for i in range(len(task_params.map_crop_sizes)): + map_crop_size = task_params.map_crop_sizes[i] + with tf.variable_scope('scale_{:d}'.format(i)): + # Accumulate the map. + fn = lambda ns: running_combine( + m.ego_map_ops[i], + m.coverage_ops[i], + m.input_tensors['step']['incremental_locs'] * task_params.map_scales[i], + m.input_tensors['step']['incremental_thetas'], + m.input_tensors['step']['running_sum_num_{:d}'.format(i)], + m.input_tensors['step']['running_sum_denom_{:d}'.format(i)], + m.input_tensors['step']['running_max_denom_{:d}'.format(i)], + map_crop_size, ns) + + running_sum_num, running_sum_denom, running_max_denom = \ + tf.cond(is_single_step, lambda: fn(1), lambda: fn(num_steps*num_goals)) + updated_state += [running_sum_num, running_sum_denom, running_max_denom] + state_names += ['running_sum_num_{:d}'.format(i), + 'running_sum_denom_{:d}'.format(i), + 'running_max_denom_{:d}'.format(i)] + + # Concat the accumulated map and goal + occupancy = running_sum_num / tf.maximum(running_sum_denom, 0.001) + conf = running_max_denom + # print occupancy.get_shape().as_list() + + # Concat occupancy, how much occupied and goal. + with tf.name_scope('concat'): + sh = [-1, map_crop_size, map_crop_size, task_params.map_channels] + occupancy = tf.reshape(occupancy, shape=sh) + conf = tf.reshape(conf, shape=sh) + + sh = [-1, map_crop_size, map_crop_size, task_params.goal_channels] + goal = tf.reshape(m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)], shape=sh) + to_concat = [occupancy, conf, goal] + + if previous_value_op is not None: + to_concat.append(previous_value_op) + + x = tf.concat(to_concat, 3) + + # Pass the map, previous rewards and the goal through a few convolutional + # layers to get fR. + fr_op, fr_intermediate_op = fr_v2( + x, output_neurons=args.arch.fr_neurons, + inside_neurons=args.arch.fr_inside_neurons, + is_training=batch_norm_is_training_op, name='fr', + wt_decay=args.solver.wt_decay, stride=args.arch.fr_stride) + + # Do Value Iteration on the fR + if args.arch.vin_num_iters > 0: + value_op, value_intermediate_op = value_iteration_network( + fr_op, num_iters=args.arch.vin_num_iters, + val_neurons=args.arch.vin_val_neurons, + action_neurons=args.arch.vin_action_neurons, + kernel_size=args.arch.vin_ks, share_wts=args.arch.vin_share_wts, + name='vin', wt_decay=args.solver.wt_decay) + else: + value_op = fr_op + value_intermediate_op = [] + + # Crop out and upsample the previous value map. + remove = args.arch.crop_remove_each + if remove > 0: + crop_value_op = value_op[:, remove:-remove, remove:-remove,:] + else: + crop_value_op = value_op + crop_value_op = tf.reshape(crop_value_op, shape=[-1, args.arch.value_crop_size, + args.arch.value_crop_size, + args.arch.vin_val_neurons]) + if i < len(task_params.map_crop_sizes)-1: + # Reshape it to shape of the next scale. + previous_value_op = tf.image.resize_bilinear(crop_value_op, + map_crop_size_ops[i+1], + align_corners=True) + resize_crop_value_ops.append(previous_value_op) + + occupancys.append(occupancy) + confs.append(conf) + value_ops.append(value_op) + crop_value_ops.append(crop_value_op) + fr_ops.append(fr_op) + fr_intermediate_ops.append(fr_intermediate_op) + + m.value_ops = value_ops + m.value_intermediate_ops = value_intermediate_ops + m.fr_ops = fr_ops + m.fr_intermediate_ops = fr_intermediate_ops + m.final_value_op = crop_value_op + m.crop_value_ops = crop_value_ops + m.resize_crop_value_ops = resize_crop_value_ops + m.confs = confs + m.occupancys = occupancys + + sh = [-1, args.arch.vin_val_neurons*((args.arch.value_crop_size)**2)] + m.value_features_op = tf.reshape(m.final_value_op, sh, name='reshape_value_op') + + # Determine what action to take. + with tf.variable_scope('action_pred'): + batch_norm_param = args.arch.pred_batch_norm_param + if batch_norm_param is not None: + batch_norm_param['is_training'] = batch_norm_is_training_op + m.action_logits_op, _ = tf_utils.fc_network( + m.value_features_op, neurons=args.arch.pred_neurons, + wt_decay=args.solver.wt_decay, name='pred', offset=0, + num_pred=task_params.num_actions, + batch_norm_param=batch_norm_param) + m.action_prob_op = tf.nn.softmax(m.action_logits_op) + + init_state = tf.constant(0., dtype=tf.float32, shape=[ + task_params.batch_size, 1, map_crop_size, map_crop_size, + task_params.map_channels]) + + m.train_ops['state_names'] = state_names + m.train_ops['updated_state'] = updated_state + m.train_ops['init_state'] = [init_state for _ in updated_state] + + m.train_ops['step'] = m.action_prob_op + m.train_ops['common'] = [m.input_tensors['common']['orig_maps'], + m.input_tensors['common']['goal_loc']] + m.train_ops['batch_norm_is_training_op'] = batch_norm_is_training_op + m.loss_ops = []; m.loss_ops_names = []; + + if args.arch.readout_maps: + with tf.name_scope('readout_maps'): + all_occupancys = tf.concat(m.occupancys + m.confs, 3) + readout_maps, probs = readout_general( + all_occupancys, num_neurons=args.arch.rom_arch.num_neurons, + strides=args.arch.rom_arch.strides, + layers_per_block=args.arch.rom_arch.layers_per_block, + kernel_size=args.arch.rom_arch.kernel_size, + batch_norm_is_training_op=batch_norm_is_training_op, + wt_decay=args.solver.wt_decay) + + gt_ego_maps = [m.input_tensors['step']['readout_maps_{:d}'.format(i)] + for i in range(len(task_params.readout_maps_crop_sizes))] + m.readout_maps_gt = tf.concat(gt_ego_maps, 4) + gt_shape = tf.shape(m.readout_maps_gt) + m.readout_maps_logits = tf.reshape(readout_maps, gt_shape) + m.readout_maps_probs = tf.reshape(probs, gt_shape) + + # Add a loss op + m.readout_maps_loss_op = tf.losses.sigmoid_cross_entropy( + tf.reshape(m.readout_maps_gt, [-1, len(task_params.readout_maps_crop_sizes)]), + tf.reshape(readout_maps, [-1, len(task_params.readout_maps_crop_sizes)]), + scope='loss') + m.readout_maps_loss_op = 10.*m.readout_maps_loss_op + + ewma_decay = 0.99 if is_training else 0.0 + weight = tf.ones_like(m.input_tensors['train']['action'], dtype=tf.float32, + name='weight') + m.reg_loss_op, m.data_loss_op, m.total_loss_op, m.acc_ops = \ + compute_losses_multi_or(m.action_logits_op, + m.input_tensors['train']['action'], weights=weight, + num_actions=task_params.num_actions, + data_loss_wt=args.solver.data_loss_wt, + reg_loss_wt=args.solver.reg_loss_wt, + ewma_decay=ewma_decay) + + if args.arch.readout_maps: + m.total_loss_op = m.total_loss_op + m.readout_maps_loss_op + m.loss_ops += [m.readout_maps_loss_op] + m.loss_ops_names += ['readout_maps_loss'] + + m.loss_ops += [m.reg_loss_op, m.data_loss_op, m.total_loss_op] + m.loss_ops_names += ['reg_loss', 'data_loss', 'total_loss'] + + if args.solver.freeze_conv: + vars_to_optimize = list(set(tf.trainable_variables()) - + set(m.vision_ops.vars_to_restore)) + else: + vars_to_optimize = None + + m.lr_op, m.global_step_op, m.train_op, m.should_stop_op, m.optimizer, \ + m.sync_optimizer = tf_utils.setup_training( + m.total_loss_op, + args.solver.initial_learning_rate, + args.solver.steps_per_decay, + args.solver.learning_rate_decay, + args.solver.momentum, + args.solver.max_steps, + args.solver.sync, + args.solver.adjust_lr_sync, + args.solver.num_workers, + args.solver.task, + vars_to_optimize=vars_to_optimize, + clip_gradient_norm=args.solver.clip_gradient_norm, + typ=args.solver.typ, momentum2=args.solver.momentum2, + adam_eps=args.solver.adam_eps) + + if args.arch.sample_gt_prob_type == 'inverse_sigmoid_decay': + m.sample_gt_prob_op = tf_utils.inverse_sigmoid_decay(args.arch.isd_k, + m.global_step_op) + elif args.arch.sample_gt_prob_type == 'zero': + m.sample_gt_prob_op = tf.constant(-1.0, dtype=tf.float32) + + elif args.arch.sample_gt_prob_type.split('_')[0] == 'step': + step = int(args.arch.sample_gt_prob_type.split('_')[1]) + m.sample_gt_prob_op = tf_utils.step_gt_prob( + step, m.input_tensors['step']['step_number'][0,0,0]) + + m.sample_action_type = args.arch.action_sample_type + m.sample_action_combine_type = args.arch.action_sample_combine_type + + m.summary_ops = { + summary_mode: _add_summaries(m, args, summary_mode, + args.summary.arop_full_summary_iters)} + + m.init_op = tf.group(tf.global_variables_initializer(), + tf.local_variables_initializer()) + m.saver_op = tf.train.Saver(keep_checkpoint_every_n_hours=4, + write_version=tf.train.SaverDef.V2) + return m diff --git a/models/research/cognitive_mapping_and_planning/tfcode/cmp_summary.py b/models/research/cognitive_mapping_and_planning/tfcode/cmp_summary.py new file mode 100644 index 0000000000000000000000000000000000000000..55313bfbd52a9e079e1de5093ae1882a9bf1d858 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/tfcode/cmp_summary.py @@ -0,0 +1,213 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Code for setting up summaries for CMP. +""" + +import sys, os, numpy as np +import matplotlib.pyplot as plt + + +import tensorflow as tf + +from tensorflow.contrib import slim +from tensorflow.contrib.slim import arg_scope + +import logging +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +from src import utils +import src.file_utils as fu +import tfcode.nav_utils as nu + +def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N): + # outputs is [gt_map, pred_map]: + if N >= 0: + outputs = outputs[:N] + N = len(outputs) + + plt.set_cmap('jet') + fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5)) + axes = axes.ravel()[::-1].tolist() + for i in range(N): + gt_map, pred_map = outputs[i] + for j in [0]: + for k in range(gt_map.shape[4]): + # Display something like the midpoint of the trajectory. + id = np.int(gt_map.shape[1]/2) + + ax = axes.pop(); + ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none', + vmin=0., vmax=1.) + ax.set_axis_off(); + if i == 0: ax.set_title('gt_map') + + ax = axes.pop(); + ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none', + vmin=0., vmax=1.) + ax.set_axis_off(); + if i == 0: ax.set_title('pred_map') + + file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + plt.close(fig) + +def _vis(outputs, global_step, output_dir, metric_summary, N): + # Plot the value map, goal for various maps to see what if the model is + # learning anything useful. + # + # outputs is [values, goals, maps, occupancy, conf]. + # + if N >= 0: + outputs = outputs[:N] + N = len(outputs) + + plt.set_cmap('jet') + fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*5), (5,5)) + axes = axes.ravel()[::-1].tolist() + for i in range(N): + values, goals, maps, occupancy, conf = outputs[i] + for j in [0]: + for k in range(values.shape[4]): + # Display something like the midpoint of the trajectory. + id = np.int(values.shape[1]/2) + + ax = axes.pop(); + ax.imshow(goals[j,id,:,:,k], origin='lower', interpolation='none') + ax.set_axis_off(); + if i == 0: ax.set_title('goal') + + ax = axes.pop(); + ax.imshow(occupancy[j,id,:,:,k], origin='lower', interpolation='none') + ax.set_axis_off(); + if i == 0: ax.set_title('occupancy') + + ax = axes.pop(); + ax.imshow(conf[j,id,:,:,k], origin='lower', interpolation='none', + vmin=0., vmax=1.) + ax.set_axis_off(); + if i == 0: ax.set_title('conf') + + ax = axes.pop(); + ax.imshow(values[j,id,:,:,k], origin='lower', interpolation='none') + ax.set_axis_off(); + if i == 0: ax.set_title('value') + + ax = axes.pop(); + ax.imshow(maps[j,id,:,:,k], origin='lower', interpolation='none') + ax.set_axis_off(); + if i == 0: ax.set_title('incr map') + + file_name = os.path.join(output_dir, 'value_vis_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + plt.close(fig) + +def _summary_vis(m, batch_size, num_steps, arop_full_summary_iters): + arop = []; arop_summary_iters = []; arop_eval_fns = []; + vis_value_ops = []; vis_goal_ops = []; vis_map_ops = []; + vis_occupancy_ops = []; vis_conf_ops = []; + for i, val_op in enumerate(m.value_ops): + vis_value_op = tf.reduce_mean(tf.abs(val_op), axis=3, keep_dims=True) + vis_value_ops.append(vis_value_op) + + vis_occupancy_op = tf.reduce_mean(tf.abs(m.occupancys[i]), 3, True) + vis_occupancy_ops.append(vis_occupancy_op) + + vis_conf_op = tf.reduce_max(tf.abs(m.confs[i]), axis=3, keep_dims=True) + vis_conf_ops.append(vis_conf_op) + + ego_goal_imgs_i_op = m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)] + vis_goal_op = tf.reduce_max(ego_goal_imgs_i_op, 4, True) + vis_goal_ops.append(vis_goal_op) + + vis_map_op = tf.reduce_mean(tf.abs(m.ego_map_ops[i]), 4, True) + vis_map_ops.append(vis_map_op) + + vis_goal_ops = tf.concat(vis_goal_ops, 4) + vis_map_ops = tf.concat(vis_map_ops, 4) + vis_value_ops = tf.concat(vis_value_ops, 3) + vis_occupancy_ops = tf.concat(vis_occupancy_ops, 3) + vis_conf_ops = tf.concat(vis_conf_ops, 3) + + sh = tf.unstack(tf.shape(vis_value_ops))[1:] + vis_value_ops = tf.reshape(vis_value_ops, shape=[batch_size, -1] + sh) + + sh = tf.unstack(tf.shape(vis_conf_ops))[1:] + vis_conf_ops = tf.reshape(vis_conf_ops, shape=[batch_size, -1] + sh) + + sh = tf.unstack(tf.shape(vis_occupancy_ops))[1:] + vis_occupancy_ops = tf.reshape(vis_occupancy_ops, shape=[batch_size,-1] + sh) + + # Save memory, only return time steps that need to be visualized, factor of + # 32 CPU memory saving. + id = np.int(num_steps/2) + vis_goal_ops = tf.expand_dims(vis_goal_ops[:,id,:,:,:], axis=1) + vis_map_ops = tf.expand_dims(vis_map_ops[:,id,:,:,:], axis=1) + vis_value_ops = tf.expand_dims(vis_value_ops[:,id,:,:,:], axis=1) + vis_conf_ops = tf.expand_dims(vis_conf_ops[:,id,:,:,:], axis=1) + vis_occupancy_ops = tf.expand_dims(vis_occupancy_ops[:,id,:,:,:], axis=1) + + arop += [[vis_value_ops, vis_goal_ops, vis_map_ops, vis_occupancy_ops, + vis_conf_ops]] + arop_summary_iters += [arop_full_summary_iters] + arop_eval_fns += [_vis] + return arop, arop_summary_iters, arop_eval_fns + +def _summary_readout_maps(m, num_steps, arop_full_summary_iters): + arop = []; arop_summary_iters = []; arop_eval_fns = []; + id = np.int(num_steps-1) + vis_readout_maps_gt = m.readout_maps_gt + vis_readout_maps_prob = tf.reshape(m.readout_maps_probs, + shape=tf.shape(vis_readout_maps_gt)) + vis_readout_maps_gt = tf.expand_dims(vis_readout_maps_gt[:,id,:,:,:], 1) + vis_readout_maps_prob = tf.expand_dims(vis_readout_maps_prob[:,id,:,:,:], 1) + arop += [[vis_readout_maps_gt, vis_readout_maps_prob]] + arop_summary_iters += [arop_full_summary_iters] + arop_eval_fns += [_vis_readout_maps] + return arop, arop_summary_iters, arop_eval_fns + +def _add_summaries(m, args, summary_mode, arop_full_summary_iters): + task_params = args.navtask.task_params + + summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op] + \ + m.loss_ops + m.acc_ops + summarize_names = ['lr', 'global_step', 'sample_gt_prob_op'] + \ + m.loss_ops_names + ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))] + to_aggregate = [0, 0, 0] + [1]*len(m.loss_ops_names) + [1]*len(m.acc_ops) + + scope_name = 'summary' + with tf.name_scope(scope_name): + s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters, + summarize_ops, summarize_names, + to_aggregate, m.action_prob_op, + m.input_tensors, scope_name=scope_name) + if summary_mode == 'val': + arop, arop_summary_iters, arop_eval_fns = _summary_vis( + m, task_params.batch_size, task_params.num_steps, + arop_full_summary_iters) + s_ops.additional_return_ops += arop + s_ops.arop_summary_iters += arop_summary_iters + s_ops.arop_eval_fns += arop_eval_fns + + if args.arch.readout_maps: + arop, arop_summary_iters, arop_eval_fns = _summary_readout_maps( + m, task_params.num_steps, arop_full_summary_iters) + s_ops.additional_return_ops += arop + s_ops.arop_summary_iters += arop_summary_iters + s_ops.arop_eval_fns += arop_eval_fns + + return s_ops diff --git a/models/research/cognitive_mapping_and_planning/tfcode/cmp_utils.py b/models/research/cognitive_mapping_and_planning/tfcode/cmp_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d87c697b4b29128c8b8a42caac27aeb4d657ec6 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/tfcode/cmp_utils.py @@ -0,0 +1,164 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions for setting up the CMP graph. +""" + +import os, numpy as np +import matplotlib.pyplot as plt + + +import tensorflow as tf + +from tensorflow.contrib import slim +from tensorflow.contrib.slim import arg_scope +import logging +from src import utils +import src.file_utils as fu +from tfcode import tf_utils + +resnet_v2 = tf_utils.resnet_v2 +custom_residual_block = tf_utils.custom_residual_block + +def value_iteration_network( + fr, num_iters, val_neurons, action_neurons, kernel_size, share_wts=False, + name='vin', wt_decay=0.0001, activation_fn=None, shape_aware=False): + """ + Constructs a Value Iteration Network, convolutions and max pooling across + channels. + Input: + fr: NxWxHxC + val_neurons: Number of channels for maintaining the value. + action_neurons: Computes action_neurons * val_neurons at each iteration to + max pool over. + Output: + value image: NxHxWx(val_neurons) + """ + init_var = np.sqrt(2.0/(kernel_size**2)/(val_neurons*action_neurons)) + vals = [] + with tf.variable_scope(name) as varscope: + if shape_aware == False: + fr_shape = tf.unstack(tf.shape(fr)) + val_shape = tf.stack(fr_shape[:-1] + [val_neurons]) + val = tf.zeros(val_shape, name='val_init') + else: + val = tf.expand_dims(tf.zeros_like(fr[:,:,:,0]), dim=-1) * \ + tf.constant(0., dtype=tf.float32, shape=[1,1,1,val_neurons]) + val_shape = tf.shape(val) + vals.append(val) + for i in range(num_iters): + if share_wts: + # The first Value Iteration maybe special, so it can have its own + # paramterss. + scope = 'conv' + if i == 0: scope = 'conv_0' + if i > 1: varscope.reuse_variables() + else: + scope = 'conv_{:d}'.format(i) + val = slim.conv2d(tf.concat([val, fr], 3, name='concat_{:d}'.format(i)), + num_outputs=action_neurons*val_neurons, + kernel_size=kernel_size, stride=1, activation_fn=activation_fn, + scope=scope, normalizer_fn=None, + weights_regularizer=slim.l2_regularizer(wt_decay), + weights_initializer=tf.random_normal_initializer(stddev=init_var), + biases_initializer=tf.zeros_initializer()) + val = tf.reshape(val, [-1, action_neurons*val_neurons, 1, 1], + name='re_{:d}'.format(i)) + val = slim.max_pool2d(val, kernel_size=[action_neurons,1], + stride=[action_neurons,1], padding='VALID', + scope='val_{:d}'.format(i)) + val = tf.reshape(val, val_shape, name='unre_{:d}'.format(i)) + vals.append(val) + return val, vals + + +def rotate_preds(loc_on_map, relative_theta, map_size, preds, + output_valid_mask): + with tf.name_scope('rotate'): + flow_op = tf_utils.get_flow(loc_on_map, relative_theta, map_size=map_size) + if type(preds) != list: + rotated_preds, valid_mask_warps = tf_utils.dense_resample(preds, flow_op, + output_valid_mask) + else: + rotated_preds = [] ;valid_mask_warps = [] + for pred in preds: + rotated_pred, valid_mask_warp = tf_utils.dense_resample(pred, flow_op, + output_valid_mask) + rotated_preds.append(rotated_pred) + valid_mask_warps.append(valid_mask_warp) + return rotated_preds, valid_mask_warps + +def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]): + with tf.name_scope('visual_frustum'): + l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1] + l = (l == 2).astype(np.float32) + for e in expand_dims: + l = np.expand_dims(l, axis=e) + confs_probs = tf.constant(l, dtype=tf.float32) + confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs + return confs_probs + +def deconv(x, is_training, wt_decay, neurons, strides, layers_per_block, + kernel_size, conv_fn, name, offset=0): + """Generates a up sampling network with residual connections. + """ + batch_norm_param = {'center': True, 'scale': True, + 'activation_fn': tf.nn.relu, + 'is_training': is_training} + outs = [] + for i, (neuron, stride) in enumerate(zip(neurons, strides)): + for s in range(layers_per_block): + scope = '{:s}_{:d}_{:d}'.format(name, i+1+offset,s+1) + x = custom_residual_block(x, neuron, kernel_size, stride, scope, + is_training, wt_decay, use_residual=True, + residual_stride_conv=True, conv_fn=conv_fn, + batch_norm_param=batch_norm_param) + stride = 1 + outs.append((x,True)) + return x, outs + +def fr_v2(x, output_neurons, inside_neurons, is_training, name='fr', + wt_decay=0.0001, stride=1, updates_collections=tf.GraphKeys.UPDATE_OPS): + """Performs fusion of information between the map and the reward map. + Inputs + x: NxHxWxC1 + + Outputs + fr map: NxHxWx(output_neurons) + """ + if type(stride) != list: + stride = [stride] + with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope( + is_training=is_training, weight_decay=wt_decay)): + with slim.arg_scope([slim.batch_norm], updates_collections=updates_collections) as arg_sc: + # Change the updates_collections for the conv normalizer_params to None + for i in range(len(arg_sc.keys())): + if 'convolution' in arg_sc.keys()[i]: + arg_sc.values()[i]['normalizer_params']['updates_collections'] = updates_collections + with slim.arg_scope(arg_sc): + bottleneck = resnet_v2.bottleneck + blocks = [] + for i, s in enumerate(stride): + b = resnet_v2.resnet_utils.Block( + 'block{:d}'.format(i + 1), bottleneck, [{ + 'depth': output_neurons, + 'depth_bottleneck': inside_neurons, + 'stride': stride[i] + }]) + blocks.append(b) + x, outs = resnet_v2.resnet_v2(x, blocks, num_classes=None, global_pool=False, + output_stride=None, include_root_block=False, + reuse=False, scope=name) + return x, outs diff --git a/models/research/cognitive_mapping_and_planning/tfcode/nav_utils.py b/models/research/cognitive_mapping_and_planning/tfcode/nav_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2f764f33df91a80f6539dcbae1e0fa7093becd29 --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/tfcode/nav_utils.py @@ -0,0 +1,435 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Various losses for training navigation agents. + +Defines various loss functions for navigation agents, +compute_losses_multi_or. +""" + +import os, numpy as np +import matplotlib.pyplot as plt + + +import tensorflow as tf + +from tensorflow.contrib import slim +from tensorflow.contrib.slim import arg_scope +from tensorflow.contrib.slim.nets import resnet_v2 +from tensorflow.python.training import moving_averages +import logging +from src import utils +import src.file_utils as fu +from tfcode import tf_utils + + +def compute_losses_multi_or(logits, actions_one_hot, weights=None, + num_actions=-1, data_loss_wt=1., reg_loss_wt=1., + ewma_decay=0.99, reg_loss_op=None): + assert(num_actions > 0), 'num_actions must be specified and must be > 0.' + + with tf.name_scope('loss'): + if weights is None: + weight = tf.ones_like(actions_one_hot, dtype=tf.float32, name='weight') + + actions_one_hot = tf.cast(tf.reshape(actions_one_hot, [-1, num_actions], + 're_actions_one_hot'), tf.float32) + weights = tf.reduce_sum(tf.reshape(weights, [-1, num_actions], 're_weight'), + reduction_indices=1) + total = tf.reduce_sum(weights) + + action_prob = tf.nn.softmax(logits) + action_prob = tf.reduce_sum(tf.multiply(action_prob, actions_one_hot), + reduction_indices=1) + example_loss = -tf.log(tf.maximum(tf.constant(1e-4), action_prob)) + + data_loss_op = tf.reduce_sum(example_loss * weights) / total + if reg_loss_op is None: + if reg_loss_wt > 0: + reg_loss_op = tf.add_n(tf.losses.get_regularization_losses()) + else: + reg_loss_op = tf.constant(0.) + + if reg_loss_wt > 0: + total_loss_op = data_loss_wt*data_loss_op + reg_loss_wt*reg_loss_op + else: + total_loss_op = data_loss_wt*data_loss_op + + is_correct = tf.cast(tf.greater(action_prob, 0.5, name='pred_class'), tf.float32) + acc_op = tf.reduce_sum(is_correct*weights) / total + + ewma_acc_op = moving_averages.weighted_moving_average( + acc_op, ewma_decay, weight=total, name='ewma_acc') + + acc_ops = [ewma_acc_op] + + return reg_loss_op, data_loss_op, total_loss_op, acc_ops + + +def get_repr_from_image(images_reshaped, modalities, data_augment, encoder, + freeze_conv, wt_decay, is_training): + # Pass image through lots of convolutional layers, to obtain pool5 + if modalities == ['rgb']: + with tf.name_scope('pre_rgb'): + x = (images_reshaped + 128.) / 255. # Convert to brightness between 0 and 1. + if data_augment.relight and is_training: + x = tf_utils.distort_image(x, fast_mode=data_augment.relight_fast) + x = (x-0.5)*2.0 + scope_name = encoder + elif modalities == ['depth']: + with tf.name_scope('pre_d'): + d_image = images_reshaped + x = 2*(d_image[...,0] - 80.0)/100.0 + y = d_image[...,1] + d_image = tf.concat([tf.expand_dims(x, -1), tf.expand_dims(y, -1)], 3) + x = d_image + scope_name = 'd_'+encoder + + resnet_is_training = is_training and (not freeze_conv) + with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(resnet_is_training)): + fn = getattr(tf_utils, encoder) + x, end_points = fn(x, num_classes=None, global_pool=False, + output_stride=None, reuse=None, + scope=scope_name) + vars_ = slim.get_variables_to_restore() + + conv_feat = x + return conv_feat, vars_ + +def default_train_step_kwargs(m, obj, logdir, rng_seed, is_chief, num_steps, + iters, train_display_interval, + dagger_sample_bn_false): + train_step_kwargs = {} + train_step_kwargs['obj'] = obj + train_step_kwargs['m'] = m + + # rng_data has 2 independent rngs, one for sampling episodes and one for + # sampling perturbs (so that we can make results reproducible. + train_step_kwargs['rng_data'] = [np.random.RandomState(rng_seed), + np.random.RandomState(rng_seed)] + train_step_kwargs['rng_action'] = np.random.RandomState(rng_seed) + if is_chief: + train_step_kwargs['writer'] = tf.summary.FileWriter(logdir) #, m.tf_graph) + else: + train_step_kwargs['writer'] = None + train_step_kwargs['iters'] = iters + train_step_kwargs['train_display_interval'] = train_display_interval + train_step_kwargs['num_steps'] = num_steps + train_step_kwargs['logdir'] = logdir + train_step_kwargs['dagger_sample_bn_false'] = dagger_sample_bn_false + return train_step_kwargs + +# Utilities for visualizing and analysing validation output. +def save_d_at_t(outputs, global_step, output_dir, metric_summary, N): + """Save distance to goal at all time steps. + + Args: + outputs : [gt_dist_to_goal]. + global_step : number of iterations. + output_dir : output directory. + metric_summary : to append scalars to summary. + N : number of outputs to process. + + """ + d_at_t = np.concatenate(map(lambda x: x[0][:,:,0]*1, outputs), axis=0) + fig, axes = utils.subplot(plt, (1,1), (5,5)) + axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') + axes.set_xlabel('time step') + axes.set_ylabel('dist to next goal') + axes.grid('on') + file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) + utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) + plt.close(fig) + return None + +def save_all(outputs, global_step, output_dir, metric_summary, N): + """Save numerous statistics. + + Args: + outputs : [locs, goal_loc, gt_dist_to_goal, node_ids, perturbs] + global_step : number of iterations. + output_dir : output directory. + metric_summary : to append scalars to summary. + N : number of outputs to process. + """ + all_locs = np.concatenate(map(lambda x: x[0], outputs), axis=0) + all_goal_locs = np.concatenate(map(lambda x: x[1], outputs), axis=0) + all_d_at_t = np.concatenate(map(lambda x: x[2][:,:,0]*1, outputs), axis=0) + all_node_ids = np.concatenate(map(lambda x: x[3], outputs), axis=0) + all_perturbs = np.concatenate(map(lambda x: x[4], outputs), axis=0) + + file_name = os.path.join(output_dir, 'all_locs_at_t_{:d}.pkl'.format(global_step)) + vars = [all_locs, all_goal_locs, all_d_at_t, all_node_ids, all_perturbs] + var_names = ['all_locs', 'all_goal_locs', 'all_d_at_t', 'all_node_ids', 'all_perturbs'] + utils.save_variables(file_name, vars, var_names, overwrite=True) + return None + +def eval_ap(outputs, global_step, output_dir, metric_summary, N, num_classes=4): + """Processes the collected outputs to compute AP for action prediction. + + Args: + outputs : [logits, labels] + global_step : global_step. + output_dir : where to store results. + metric_summary : summary object to add summaries to. + N : number of outputs to process. + num_classes : number of classes to compute AP over, and to reshape tensors. + """ + if N >= 0: + outputs = outputs[:N] + logits = np.concatenate(map(lambda x: x[0], outputs), axis=0).reshape((-1, num_classes)) + labels = np.concatenate(map(lambda x: x[1], outputs), axis=0).reshape((-1, num_classes)) + aps = [] + for i in range(logits.shape[1]): + ap, rec, prec = utils.calc_pr(labels[:,i], logits[:,i]) + ap = ap[0] + tf_utils.add_value_to_summary(metric_summary, 'aps/ap_{:d}: '.format(i), ap) + aps.append(ap) + return aps + +def eval_dist(outputs, global_step, output_dir, metric_summary, N): + """Processes the collected outputs during validation to + 1. Plot the distance over time curve. + 2. Compute mean and median distances. + 3. Plots histogram of end distances. + + Args: + outputs : [locs, goal_loc, gt_dist_to_goal]. + global_step : global_step. + output_dir : where to store results. + metric_summary : summary object to add summaries to. + N : number of outputs to process. + """ + SUCCESS_THRESH = 3 + if N >= 0: + outputs = outputs[:N] + + # Plot distance at time t. + d_at_t = [] + for i in range(len(outputs)): + locs, goal_loc, gt_dist_to_goal = outputs[i] + d_at_t.append(gt_dist_to_goal[:,:,0]*1) + + # Plot the distance. + fig, axes = utils.subplot(plt, (1,1), (5,5)) + d_at_t = np.concatenate(d_at_t, axis=0) + axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') + axes.set_xlabel('time step') + axes.set_ylabel('dist to next goal') + axes.grid('on') + file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) + utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) + plt.close(fig) + + # Plot the trajectories and the init_distance and final distance. + d_inits = [] + d_ends = [] + for i in range(len(outputs)): + locs, goal_loc, gt_dist_to_goal = outputs[i] + d_inits.append(gt_dist_to_goal[:,0,0]*1) + d_ends.append(gt_dist_to_goal[:,-1,0]*1) + + # Plot the distance. + fig, axes = utils.subplot(plt, (1,1), (5,5)) + d_inits = np.concatenate(d_inits, axis=0) + d_ends = np.concatenate(d_ends, axis=0) + axes.plot(d_inits+np.random.rand(*(d_inits.shape))-0.5, + d_ends+np.random.rand(*(d_ends.shape))-0.5, '.', mec='red', mew=1.0) + axes.set_xlabel('init dist'); axes.set_ylabel('final dist'); + axes.grid('on'); axes.axis('equal'); + title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' + title_str = title_str.format( + np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), + 100*(np.mean(d_ends <= SUCCESS_THRESH))) + axes.set_title(title_str) + file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + + file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step)) + utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'], + overwrite=True) + plt.close(fig) + + # Plot the histogram of the end_distance. + with plt.style.context('seaborn-white'): + d_ends_ = np.sort(d_ends) + d_inits_ = np.sort(d_inits) + leg = []; + fig, ax = utils.subplot(plt, (1,1), (5,5)) + ax.grid('on') + ax.set_xlabel('Distance from goal'); ax.xaxis.label.set_fontsize(16); + ax.set_ylabel('Fraction of data'); ax.yaxis.label.set_fontsize(16); + ax.plot(d_ends_, np.arange(d_ends_.size)*1./d_ends_.size, 'r') + ax.plot(d_inits_, np.arange(d_inits_.size)*1./d_inits_.size, 'k') + leg.append('Final'); leg.append('Init'); + ax.legend(leg, fontsize='x-large'); + ax.set_axis_on() + title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' + title_str = title_str.format( + np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), + 100*(np.mean(d_ends <= SUCCESS_THRESH))) + ax.set_title(title_str) + file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + + # Log distance metrics. + tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ', + 100*(np.mean(d_inits <= SUCCESS_THRESH))) + tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ', + 100*(np.mean(d_ends <= SUCCESS_THRESH))) + tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ', + np.percentile(d_inits, q=75)) + tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ', + np.percentile(d_ends, q=75)) + tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ', + np.median(d_inits)) + tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ', + np.median(d_ends)) + tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ', + np.mean(d_inits)) + tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ', + np.mean(d_ends)) + return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \ + np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \ + 100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH) + +def plot_trajectories(outputs, global_step, output_dir, metric_summary, N): + """Processes the collected outputs during validation to plot the trajectories + in the top view. + + Args: + outputs : [locs, orig_maps, goal_loc]. + global_step : global_step. + output_dir : where to store results. + metric_summary : summary object to add summaries to. + N : number of outputs to process. + """ + if N >= 0: + outputs = outputs[:N] + N = len(outputs) + + plt.set_cmap('gray') + fig, axes = utils.subplot(plt, (N, outputs[0][1].shape[0]), (5,5)) + axes = axes.ravel()[::-1].tolist() + for i in range(N): + locs, orig_maps, goal_loc = outputs[i] + is_semantic = np.isnan(goal_loc[0,0,1]) + for j in range(orig_maps.shape[0]): + ax = axes.pop(); + ax.plot(locs[j,0,0], locs[j,0,1], 'ys') + # Plot one by one, so that they come in different colors. + for k in range(goal_loc.shape[1]): + if not is_semantic: + ax.plot(goal_loc[j,k,0], goal_loc[j,k,1], 's') + if False: + ax.plot(locs[j,:,0], locs[j,:,1], 'r.', ms=3) + ax.imshow(orig_maps[j,0,:,:,0], origin='lower') + ax.set_axis_off(); + else: + ax.scatter(locs[j,:,0], locs[j,:,1], c=np.arange(locs.shape[1]), + cmap='jet', s=10, lw=0) + ax.imshow(orig_maps[j,0,:,:,0], origin='lower', vmin=-1.0, vmax=2.0) + if not is_semantic: + xymin = np.minimum(np.min(goal_loc[j,:,:], axis=0), np.min(locs[j,:,:], axis=0)) + xymax = np.maximum(np.max(goal_loc[j,:,:], axis=0), np.max(locs[j,:,:], axis=0)) + else: + xymin = np.min(locs[j,:,:], axis=0) + xymax = np.max(locs[j,:,:], axis=0) + xy1 = (xymax+xymin)/2. - np.maximum(np.max(xymax-xymin), 12) + xy2 = (xymax+xymin)/2. + np.maximum(np.max(xymax-xymin), 12) + ax.set_xlim([xy1[0], xy2[0]]) + ax.set_ylim([xy1[1], xy2[1]]) + ax.set_axis_off() + file_name = os.path.join(output_dir, 'trajectory_{:d}.png'.format(global_step)) + with fu.fopen(file_name, 'w') as f: + fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) + plt.close(fig) + return None + +def add_default_summaries(mode, arop_full_summary_iters, summarize_ops, + summarize_names, to_aggregate, action_prob_op, + input_tensors, scope_name): + assert(mode == 'train' or mode == 'val' or mode == 'test'), \ + 'add_default_summaries mode is neither train or val or test.' + + s_ops = tf_utils.get_default_summary_ops() + + if mode == 'train': + s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \ + arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries( + summarize_ops, summarize_names, mode, to_aggregate=False, + scope_name=scope_name) + s_ops.additional_return_ops += additional_return_ops + s_ops.arop_summary_iters += arop_summary_iters + s_ops.arop_eval_fns += arop_eval_fns + elif mode == 'val': + s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \ + arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries( + summarize_ops, summarize_names, mode, to_aggregate=to_aggregate, + scope_name=scope_name) + s_ops.additional_return_ops += additional_return_ops + s_ops.arop_summary_iters += arop_summary_iters + s_ops.arop_eval_fns += arop_eval_fns + + elif mode == 'test': + s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \ + arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries( + [], [], mode, to_aggregate=[], scope_name=scope_name) + s_ops.additional_return_ops += additional_return_ops + s_ops.arop_summary_iters += arop_summary_iters + s_ops.arop_eval_fns += arop_eval_fns + + + if mode == 'val': + arop = s_ops.additional_return_ops + arop += [[action_prob_op, input_tensors['train']['action']]] + arop += [[input_tensors['step']['loc_on_map'], + input_tensors['common']['goal_loc'], + input_tensors['step']['gt_dist_to_goal']]] + arop += [[input_tensors['step']['loc_on_map'], + input_tensors['common']['orig_maps'], + input_tensors['common']['goal_loc']]] + s_ops.arop_summary_iters += [-1, arop_full_summary_iters, + arop_full_summary_iters] + s_ops.arop_eval_fns += [eval_ap, eval_dist, plot_trajectories] + + elif mode == 'test': + arop = s_ops.additional_return_ops + arop += [[input_tensors['step']['loc_on_map'], + input_tensors['common']['goal_loc'], + input_tensors['step']['gt_dist_to_goal']]] + arop += [[input_tensors['step']['gt_dist_to_goal']]] + arop += [[input_tensors['step']['loc_on_map'], + input_tensors['common']['goal_loc'], + input_tensors['step']['gt_dist_to_goal'], + input_tensors['step']['node_ids'], + input_tensors['step']['perturbs']]] + arop += [[input_tensors['step']['loc_on_map'], + input_tensors['common']['orig_maps'], + input_tensors['common']['goal_loc']]] + s_ops.arop_summary_iters += [-1, -1, -1, arop_full_summary_iters] + s_ops.arop_eval_fns += [eval_dist, save_d_at_t, save_all, + plot_trajectories] + return s_ops + + diff --git a/models/research/cognitive_mapping_and_planning/tfcode/tf_utils.py b/models/research/cognitive_mapping_and_planning/tfcode/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5f96d8ff5ce7473f0ec49096abcbac274e6c4fcc --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/tfcode/tf_utils.py @@ -0,0 +1,840 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import numpy as np +import sys +import tensorflow as tf +import src.utils as utils +import logging +from tensorflow.contrib import slim +from tensorflow.contrib.metrics.python.ops import confusion_matrix_ops +from tensorflow.contrib.slim import arg_scope +from tensorflow.contrib.slim.nets import resnet_v2 +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import variable_scope +sys.path.insert(0, '../slim') +from preprocessing import inception_preprocessing as ip + +resnet_v2_50 = resnet_v2.resnet_v2_50 + + +def custom_residual_block(x, neurons, kernel_size, stride, name, is_training, + wt_decay=0.0001, use_residual=True, + residual_stride_conv=True, conv_fn=slim.conv2d, + batch_norm_param=None): + + # batch norm x and relu + init_var = np.sqrt(2.0/(kernel_size**2)/neurons) + with arg_scope([conv_fn], + weights_regularizer=slim.l2_regularizer(wt_decay), + weights_initializer=tf.random_normal_initializer(stddev=init_var), + biases_initializer=tf.zeros_initializer()): + + if batch_norm_param is None: + batch_norm_param = {'center': True, 'scale': False, + 'activation_fn':tf.nn.relu, + 'is_training': is_training} + + y = slim.batch_norm(x, scope=name+'_bn', **batch_norm_param) + + y = conv_fn(y, num_outputs=neurons, kernel_size=kernel_size, stride=stride, + activation_fn=None, scope=name+'_1', + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_param) + + y = conv_fn(y, num_outputs=neurons, kernel_size=kernel_size, + stride=1, activation_fn=None, scope=name+'_2') + + if use_residual: + if stride != 1 or x.get_shape().as_list()[-1] != neurons: + batch_norm_param_ = dict(batch_norm_param) + batch_norm_param_['activation_fn'] = None + x = conv_fn(x, num_outputs=neurons, kernel_size=1, + stride=stride if residual_stride_conv else 1, + activation_fn=None, scope=name+'_0_1x1', + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_param_) + if not residual_stride_conv: + x = slim.avg_pool2d(x, 1, stride=stride, scope=name+'_0_avg') + + y = tf.add(x, y, name=name+'_add') + + return y + +def step_gt_prob(step, step_number_op): + # Change samping probability from 1 to -1 at step steps. + with tf.name_scope('step_gt_prob'): + out = tf.cond(tf.less(step_number_op, step), + lambda: tf.constant(1.), lambda: tf.constant(-1.)) + return out + +def inverse_sigmoid_decay(k, global_step_op): + with tf.name_scope('inverse_sigmoid_decay'): + k = tf.constant(k, dtype=tf.float32) + tmp = k*tf.exp(-tf.cast(global_step_op, tf.float32)/k) + tmp = tmp / (1. + tmp) + return tmp + +def dense_resample(im, flow_im, output_valid_mask, name='dense_resample'): + """ Resample reward at particular locations. + Args: + im: ...xHxWxC matrix to sample from. + flow_im: ...xHxWx2 matrix, samples the image using absolute offsets as given + by the flow_im. + """ + with tf.name_scope(name): + valid_mask = None + + x, y = tf.unstack(flow_im, axis=-1) + x = tf.cast(tf.reshape(x, [-1]), tf.float32) + y = tf.cast(tf.reshape(y, [-1]), tf.float32) + + # constants + shape = tf.unstack(tf.shape(im)) + channels = shape[-1] + width = shape[-2] + height = shape[-3] + num_batch = tf.cast(tf.reduce_prod(tf.stack(shape[:-3])), 'int32') + zero = tf.constant(0, dtype=tf.int32) + + # Round up and down. + x0 = tf.cast(tf.floor(x), 'int32'); x1 = x0 + 1; + y0 = tf.cast(tf.floor(y), 'int32'); y1 = y0 + 1; + + if output_valid_mask: + valid_mask = tf.logical_and( + tf.logical_and(tf.less_equal(x, tf.cast(width, tf.float32)-1.), tf.greater_equal(x, 0.)), + tf.logical_and(tf.less_equal(y, tf.cast(height, tf.float32)-1.), tf.greater_equal(y, 0.))) + valid_mask = tf.reshape(valid_mask, shape=shape[:-1] + [1]) + + x0 = tf.clip_by_value(x0, zero, width-1) + x1 = tf.clip_by_value(x1, zero, width-1) + y0 = tf.clip_by_value(y0, zero, height-1) + y1 = tf.clip_by_value(y1, zero, height-1) + + dim2 = width; dim1 = width * height; + + # Create base index + base = tf.reshape(tf.range(num_batch) * dim1, shape=[-1,1]) + base = tf.reshape(tf.tile(base, [1, height*width]), shape=[-1]) + + base_y0 = base + y0 * dim2 + base_y1 = base + y1 * dim2 + idx_a = base_y0 + x0 + idx_b = base_y1 + x0 + idx_c = base_y0 + x1 + idx_d = base_y1 + x1 + + # use indices to lookup pixels in the flat image and restore channels dim + sh = tf.stack([tf.constant(-1,dtype=tf.int32), channels]) + im_flat = tf.cast(tf.reshape(im, sh), dtype=tf.float32) + pixel_a = tf.gather(im_flat, idx_a) + pixel_b = tf.gather(im_flat, idx_b) + pixel_c = tf.gather(im_flat, idx_c) + pixel_d = tf.gather(im_flat, idx_d) + + # and finally calculate interpolated values + x1_f = tf.to_float(x1) + y1_f = tf.to_float(y1) + + wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1) + wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1) + wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1) + wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1) + + output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d]) + output = tf.reshape(output, shape=tf.shape(im)) + return output, valid_mask + +def get_flow(t, theta, map_size, name_scope='gen_flow'): + """ + Rotates the map by theta and translates the rotated map by t. + + Assume that the robot rotates by an angle theta and then moves forward by + translation t. This function returns the flow field field. For every pixel in + the new image it tells us which pixel in the original image it came from: + NewI(x, y) = OldI(flow_x(x,y), flow_y(x,y)). + + Assume there is a point p in the original image. Robot rotates by R and moves + forward by t. p1 = Rt*p; p2 = p1 - t; (the world moves in opposite direction. + So, p2 = Rt*p - t, thus p2 came from R*(p2+t), which is what this function + calculates. + + t: ... x 2 (translation for B batches of N motions each). + theta: ... x 1 (rotation for B batches of N motions each). + + Output: ... x map_size x map_size x 2 + """ + + with tf.name_scope(name_scope): + tx, ty = tf.unstack(tf.reshape(t, shape=[-1, 1, 1, 1, 2]), axis=4) + theta = tf.reshape(theta, shape=[-1, 1, 1, 1]) + c = tf.constant((map_size-1.)/2., dtype=tf.float32) + + x, y = np.meshgrid(np.arange(map_size), np.arange(map_size)) + x = tf.constant(x[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='x', + shape=[1, map_size, map_size, 1]) + y = tf.constant(y[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='y', + shape=[1,map_size, map_size, 1]) + + x = x-(-tx+c) + y = y-(-ty+c) + + sin_theta = tf.sin(theta) + cos_theta = tf.cos(theta) + xr = cos_theta*x - sin_theta*y + yr = sin_theta*x + cos_theta*y + + xr = xr + c + yr = yr + c + + flow = tf.stack([xr, yr], axis=-1) + sh = tf.unstack(tf.shape(t), axis=0) + sh = tf.stack(sh[:-1]+[tf.constant(_, dtype=tf.int32) for _ in [map_size, map_size, 2]]) + flow = tf.reshape(flow, shape=sh) + return flow + +def distort_image(im, fast_mode=False): + # All images in the same batch are transformed the same way, but over + # iterations you see different distortions. + # im should be float with values between 0 and 1. + im_ = tf.reshape(im, shape=(-1,1,3)) + im_ = ip.apply_with_random_selector( + im_, lambda x, ordering: ip.distort_color(x, ordering, fast_mode), + num_cases=4) + im_ = tf.reshape(im_, tf.shape(im)) + return im_ + +def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0, + batch_norm_param=None, dropout_ratio=0.0, is_training=None): + if dropout_ratio > 0: + assert(is_training is not None), \ + 'is_training needs to be defined when trainnig with dropout.' + + repr = [] + for i, neuron in enumerate(neurons): + init_var = np.sqrt(2.0/neuron) + if batch_norm_param is not None: + x = slim.fully_connected(x, neuron, activation_fn=None, + weights_initializer=tf.random_normal_initializer(stddev=init_var), + weights_regularizer=slim.l2_regularizer(wt_decay), + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_param, + biases_initializer=tf.zeros_initializer(), + scope='{:s}_{:d}'.format(name, offset+i)) + else: + x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu, + weights_initializer=tf.random_normal_initializer(stddev=init_var), + weights_regularizer=slim.l2_regularizer(wt_decay), + biases_initializer=tf.zeros_initializer(), + scope='{:s}_{:d}'.format(name, offset+i)) + if dropout_ratio > 0: + x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training, + scope='{:s}_{:d}'.format('dropout_'+name, offset+i)) + repr.append(x) + + if num_pred is not None: + init_var = np.sqrt(2.0/num_pred) + x = slim.fully_connected(x, num_pred, + weights_regularizer=slim.l2_regularizer(wt_decay), + weights_initializer=tf.random_normal_initializer(stddev=init_var), + biases_initializer=tf.zeros_initializer(), + activation_fn=None, + scope='{:s}_pred'.format(name)) + return x, repr + +def concat_state_x_list(f, names): + af = {} + for i, k in enumerate(names): + af[k] = np.concatenate([x[i] for x in f], axis=1) + return af + +def concat_state_x(f, names): + af = {} + for k in names: + af[k] = np.concatenate([x[k] for x in f], axis=1) + # af[k] = np.swapaxes(af[k], 0, 1) + return af + +def sample_action(rng, action_probs, optimal_action, sample_gt_prob, + type='sample', combine_type='one_or_other'): + optimal_action_ = optimal_action/np.sum(optimal_action+0., 1, keepdims=True) + action_probs_ = action_probs/np.sum(action_probs+0.001, 1, keepdims=True) + batch_size = action_probs_.shape[0] + + action = np.zeros((batch_size), dtype=np.int32) + action_sample_wt = np.zeros((batch_size), dtype=np.float32) + if combine_type == 'add': + sample_gt_prob_ = np.minimum(np.maximum(sample_gt_prob, 0.), 1.) + + for i in range(batch_size): + if combine_type == 'one_or_other': + sample_gt = rng.rand() < sample_gt_prob + if sample_gt: distr_ = optimal_action_[i,:]*1. + else: distr_ = action_probs_[i,:]*1. + elif combine_type == 'add': + distr_ = optimal_action_[i,:]*sample_gt_prob_ + \ + (1.-sample_gt_prob_)*action_probs_[i,:] + distr_ = distr_ / np.sum(distr_) + + if type == 'sample': + action[i] = np.argmax(rng.multinomial(1, distr_, size=1)) + elif type == 'argmax': + action[i] = np.argmax(distr_) + action_sample_wt[i] = action_probs_[i, action[i]] / distr_[action[i]] + return action, action_sample_wt + +def train_step_custom_online_sampling(sess, train_op, global_step, + train_step_kwargs, mode='train'): + m = train_step_kwargs['m'] + obj = train_step_kwargs['obj'] + rng_data = train_step_kwargs['rng_data'] + rng_action = train_step_kwargs['rng_action'] + writer = train_step_kwargs['writer'] + iters = train_step_kwargs['iters'] + num_steps = train_step_kwargs['num_steps'] + logdir = train_step_kwargs['logdir'] + dagger_sample_bn_false = train_step_kwargs['dagger_sample_bn_false'] + train_display_interval = train_step_kwargs['train_display_interval'] + if 'outputs' not in m.train_ops: + m.train_ops['outputs'] = [] + + s_ops = m.summary_ops[mode] + val_additional_ops = [] + + # Print all variables here. + if False: + v = tf.get_collection(tf.GraphKeys.VARIABLES) + v_op = [_.value() for _ in v] + v_op_value = sess.run(v_op) + + filter = lambda x, y: 'Adam' in x.name + # filter = lambda x, y: np.is_any_nan(y) + ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if filter(_, __)] + v = [v[i] for i in ind] + v_op_value = [v_op_value[i] for i in ind] + + for i in range(len(v)): + logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.', + v[i].name, np.any(np.isnan(v_op_value[i])), + np.linalg.norm(v_op_value[i])) + + tt = utils.Timer() + for i in range(iters): + tt.tic() + # Sample a room. + e = obj.sample_env(rng_data) + + # Initialize the agent. + init_env_state = e.reset(rng_data) + + # Get and process the common data. + input = e.get_common_data() + input = e.pre_common_data(input) + feed_dict = prepare_feed_dict(m.input_tensors['common'], input) + if dagger_sample_bn_false: + feed_dict[m.train_ops['batch_norm_is_training_op']] = False + common_data = sess.run(m.train_ops['common'], feed_dict=feed_dict) + + states = [] + state_features = [] + state_targets = [] + net_state_to_input = [] + step_data_cache = [] + executed_actions = [] + rewards = [] + action_sample_wts = [] + states.append(init_env_state) + + net_state = sess.run(m.train_ops['init_state'], feed_dict=feed_dict) + net_state = dict(zip(m.train_ops['state_names'], net_state)) + net_state_to_input.append(net_state) + for j in range(num_steps): + f = e.get_features(states[j], j) + f = e.pre_features(f) + f.update(net_state) + f['step_number'] = np.ones((1,1,1), dtype=np.int32)*j + state_features.append(f) + + feed_dict = prepare_feed_dict(m.input_tensors['step'], state_features[-1]) + optimal_action = e.get_optimal_action(states[j], j) + for x, v in zip(m.train_ops['common'], common_data): + feed_dict[x] = v + if dagger_sample_bn_false: + feed_dict[m.train_ops['batch_norm_is_training_op']] = False + outs = sess.run([m.train_ops['step'], m.sample_gt_prob_op, + m.train_ops['step_data_cache'], + m.train_ops['updated_state'], + m.train_ops['outputs']], feed_dict=feed_dict) + action_probs = outs[0] + sample_gt_prob = outs[1] + step_data_cache.append(dict(zip(m.train_ops['step_data_cache'], outs[2]))) + net_state = outs[3] + if hasattr(e, 'update_state'): + outputs = outs[4] + outputs = dict(zip(m.train_ops['output_names'], outputs)) + e.update_state(outputs, j) + state_targets.append(e.get_targets(states[j], j)) + + if j < num_steps-1: + # Sample from action_probs and optimal action. + action, action_sample_wt = sample_action( + rng_action, action_probs, optimal_action, sample_gt_prob, + m.sample_action_type, m.sample_action_combine_type) + next_state, reward = e.take_action(states[j], action, j) + executed_actions.append(action) + states.append(next_state) + rewards.append(reward) + action_sample_wts.append(action_sample_wt) + net_state = dict(zip(m.train_ops['state_names'], net_state)) + net_state_to_input.append(net_state) + + # Concatenate things together for training. + rewards = np.array(rewards).T + action_sample_wts = np.array(action_sample_wts).T + executed_actions = np.array(executed_actions).T + all_state_targets = concat_state_x(state_targets, e.get_targets_name()) + all_state_features = concat_state_x(state_features, + e.get_features_name()+['step_number']) + # all_state_net = concat_state_x(net_state_to_input, + # m.train_ops['state_names']) + all_step_data_cache = concat_state_x(step_data_cache, + m.train_ops['step_data_cache']) + + dict_train = dict(input) + dict_train.update(all_state_features) + dict_train.update(all_state_targets) + # dict_train.update(all_state_net) + dict_train.update(net_state_to_input[0]) + dict_train.update(all_step_data_cache) + dict_train.update({'rewards': rewards, + 'action_sample_wts': action_sample_wts, + 'executed_actions': executed_actions}) + feed_dict = prepare_feed_dict(m.input_tensors['train'], dict_train) + for x in m.train_ops['step_data_cache']: + feed_dict[x] = all_step_data_cache[x] + if mode == 'train': + n_step = sess.run(global_step) + + if np.mod(n_step, train_display_interval) == 0: + total_loss, np_global_step, summary, print_summary = sess.run( + [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops], + feed_dict=feed_dict) + logging.error("") + else: + total_loss, np_global_step, summary = sess.run( + [train_op, global_step, s_ops.summary_ops], feed_dict=feed_dict) + + if writer is not None and summary is not None: + writer.add_summary(summary, np_global_step) + + should_stop = sess.run(m.should_stop_op) + + if mode != 'train': + arop = [[] for j in range(len(s_ops.additional_return_ops))] + for j in range(len(s_ops.additional_return_ops)): + if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]: + arop[j] = s_ops.additional_return_ops[j] + val = sess.run(arop, feed_dict=feed_dict) + val_additional_ops.append(val) + tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters), + type='time') + + if mode != 'train': + # Write the default val summaries. + summary, print_summary, np_global_step = sess.run( + [s_ops.summary_ops, s_ops.print_summary_ops, global_step]) + if writer is not None and summary is not None: + writer.add_summary(summary, np_global_step) + + # write custom validation ops + val_summarys = [] + val_additional_ops = zip(*val_additional_ops) + if len(s_ops.arop_eval_fns) > 0: + val_metric_summary = tf.summary.Summary() + for i in range(len(s_ops.arop_eval_fns)): + val_summary = None + if s_ops.arop_eval_fns[i] is not None: + val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i], + np_global_step, logdir, + val_metric_summary, + s_ops.arop_summary_iters[i]) + val_summarys.append(val_summary) + if writer is not None: + writer.add_summary(val_metric_summary, np_global_step) + + # Return the additional val_ops + total_loss = (val_additional_ops, val_summarys) + should_stop = None + + return total_loss, should_stop + +def train_step_custom_v2(sess, train_op, global_step, train_step_kwargs, + mode='train'): + m = train_step_kwargs['m'] + obj = train_step_kwargs['obj'] + rng = train_step_kwargs['rng'] + writer = train_step_kwargs['writer'] + iters = train_step_kwargs['iters'] + logdir = train_step_kwargs['logdir'] + train_display_interval = train_step_kwargs['train_display_interval'] + + s_ops = m.summary_ops[mode] + val_additional_ops = [] + + # Print all variables here. + if False: + v = tf.get_collection(tf.GraphKeys.VARIABLES) + v_op = [_.value() for _ in v] + v_op_value = sess.run(v_op) + + filter = lambda x, y: 'Adam' in x.name + # filter = lambda x, y: np.is_any_nan(y) + ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if filter(_, __)] + v = [v[i] for i in ind] + v_op_value = [v_op_value[i] for i in ind] + + for i in range(len(v)): + logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.', + v[i].name, np.any(np.isnan(v_op_value[i])), + np.linalg.norm(v_op_value[i])) + + tt = utils.Timer() + for i in range(iters): + tt.tic() + e = obj.sample_env(rng) + rngs = e.gen_rng(rng) + input_data = e.gen_data(*rngs) + input_data = e.pre_data(input_data) + feed_dict = prepare_feed_dict(m.input_tensors, input_data) + + if mode == 'train': + n_step = sess.run(global_step) + + if np.mod(n_step, train_display_interval) == 0: + total_loss, np_global_step, summary, print_summary = sess.run( + [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops], + feed_dict=feed_dict) + else: + total_loss, np_global_step, summary = sess.run( + [train_op, global_step, s_ops.summary_ops], + feed_dict=feed_dict) + + if writer is not None and summary is not None: + writer.add_summary(summary, np_global_step) + + should_stop = sess.run(m.should_stop_op) + + if mode != 'train': + arop = [[] for j in range(len(s_ops.additional_return_ops))] + for j in range(len(s_ops.additional_return_ops)): + if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]: + arop[j] = s_ops.additional_return_ops[j] + val = sess.run(arop, feed_dict=feed_dict) + val_additional_ops.append(val) + tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters), + type='time') + + if mode != 'train': + # Write the default val summaries. + summary, print_summary, np_global_step = sess.run( + [s_ops.summary_ops, s_ops.print_summary_ops, global_step]) + if writer is not None and summary is not None: + writer.add_summary(summary, np_global_step) + + # write custom validation ops + val_summarys = [] + val_additional_ops = zip(*val_additional_ops) + if len(s_ops.arop_eval_fns) > 0: + val_metric_summary = tf.summary.Summary() + for i in range(len(s_ops.arop_eval_fns)): + val_summary = None + if s_ops.arop_eval_fns[i] is not None: + val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i], + np_global_step, logdir, + val_metric_summary, + s_ops.arop_summary_iters[i]) + val_summarys.append(val_summary) + if writer is not None: + writer.add_summary(val_metric_summary, np_global_step) + + # Return the additional val_ops + total_loss = (val_additional_ops, val_summarys) + should_stop = None + + return total_loss, should_stop + +def train_step_custom(sess, train_op, global_step, train_step_kwargs, + mode='train'): + m = train_step_kwargs['m'] + params = train_step_kwargs['params'] + rng = train_step_kwargs['rng'] + writer = train_step_kwargs['writer'] + iters = train_step_kwargs['iters'] + gen_rng = train_step_kwargs['gen_rng'] + logdir = train_step_kwargs['logdir'] + gen_data = train_step_kwargs['gen_data'] + pre_data = train_step_kwargs['pre_data'] + train_display_interval = train_step_kwargs['train_display_interval'] + + val_additional_ops = [] + # Print all variables here. + if False: + v = tf.get_collection(tf.GraphKeys.VARIABLES) + for _ in v: + val = sess.run(_.value()) + logging.info('variable: %30s, is_any_nan: %5s, norm: %f.', _.name, + np.any(np.isnan(val)), np.linalg.norm(val)) + + for i in range(iters): + rngs = gen_rng(params, rng) + input_data = gen_data(params, *rngs) + input_data = pre_data(params, input_data) + feed_dict = prepare_feed_dict(m.input_tensors, input_data) + + if mode == 'train': + n_step = sess.run(global_step) + + if np.mod(n_step, train_display_interval) == 0: + total_loss, np_global_step, summary, print_summary = sess.run( + [train_op, global_step, m.summary_op[mode], m.print_summary_op[mode]], + feed_dict=feed_dict) + else: + total_loss, np_global_step, summary = sess.run( + [train_op, global_step, m.summary_op[mode]], + feed_dict=feed_dict) + + if writer is not None: + writer.add_summary(summary, np_global_step) + + should_stop = sess.run(m.should_stop_op) + + if mode == 'val': + val = sess.run(m.agg_update_op[mode] + m.additional_return_op[mode], + feed_dict=feed_dict) + val_additional_ops.append(val[len(m.agg_update_op[mode]):]) + + if mode == 'val': + summary, print_summary, np_global_step = sess.run( + [m.summary_op[mode], m.print_summary_op[mode], global_step]) + if writer is not None: + writer.add_summary(summary, np_global_step) + sess.run([m.agg_reset_op[mode]]) + + # write custom validation ops + if m.eval_metrics_fn[mode] is not None: + val_metric_summary = m.eval_metrics_fn[mode](val_additional_ops, + np_global_step, logdir) + if writer is not None: + writer.add_summary(val_metric_summary, np_global_step) + + total_loss = val_additional_ops + should_stop = None + + return total_loss, should_stop + +def setup_training(loss_op, initial_learning_rate, steps_per_decay, + learning_rate_decay, momentum, max_steps, + sync=False, adjust_lr_sync=True, + num_workers=1, replica_id=0, vars_to_optimize=None, + clip_gradient_norm=0, typ=None, momentum2=0.999, + adam_eps=1e-8): + if sync and adjust_lr_sync: + initial_learning_rate = initial_learning_rate * num_workers + max_steps = np.int(max_steps / num_workers) + steps_per_decay = np.int(steps_per_decay / num_workers) + + global_step_op = slim.get_or_create_global_step() + lr_op = tf.train.exponential_decay(initial_learning_rate, + global_step_op, steps_per_decay, learning_rate_decay, staircase=True) + if typ == 'sgd': + optimizer = tf.train.MomentumOptimizer(lr_op, momentum) + elif typ == 'adam': + optimizer = tf.train.AdamOptimizer(learning_rate=lr_op, beta1=momentum, + beta2=momentum2, epsilon=adam_eps) + + if sync: + + sync_optimizer = tf.train.SyncReplicasOptimizer(optimizer, + replicas_to_aggregate=num_workers, + replica_id=replica_id, + total_num_replicas=num_workers) + train_op = slim.learning.create_train_op(loss_op, sync_optimizer, + variables_to_train=vars_to_optimize, + clip_gradient_norm=clip_gradient_norm) + else: + sync_optimizer = None + train_op = slim.learning.create_train_op(loss_op, optimizer, + variables_to_train=vars_to_optimize, + clip_gradient_norm=clip_gradient_norm) + should_stop_op = tf.greater_equal(global_step_op, max_steps) + return lr_op, global_step_op, train_op, should_stop_op, optimizer, sync_optimizer + +def add_value_to_summary(metric_summary, tag, val, log=True, tag_str=None): + """Adds a scalar summary to the summary object. Optionally also logs to + logging.""" + new_value = metric_summary.value.add(); + new_value.tag = tag + new_value.simple_value = val + if log: + if tag_str is None: + tag_str = tag + '%f' + logging.info(tag_str, val) + +def add_scalar_summary_op(tensor, name=None, + summary_key='summaries', print_summary_key='print_summaries', prefix=''): + collections = [] + op = tf.summary.scalar(name, tensor, collections=collections) + if summary_key != print_summary_key: + tf.add_to_collection(summary_key, op) + + op = tf.Print(op, [tensor], ' {:-<25s}: '.format(name) + prefix) + tf.add_to_collection(print_summary_key, op) + return op + +def setup_inputs(inputs): + input_tensors = {} + input_shapes = {} + for (name, typ, sz) in inputs: + _ = tf.placeholder(typ, shape=sz, name=name) + input_tensors[name] = _ + input_shapes[name] = sz + return input_tensors, input_shapes + +def prepare_feed_dict(input_tensors, inputs): + feed_dict = {} + for n in input_tensors.keys(): + feed_dict[input_tensors[n]] = inputs[n].astype(input_tensors[n].dtype.as_numpy_dtype) + return feed_dict + +def simple_add_summaries(summarize_ops, summarize_names, + summary_key='summaries', + print_summary_key='print_summaries', prefix=''): + for op, name, in zip(summarize_ops, summarize_names): + add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix) + + summary_op = tf.summary.merge_all(summary_key) + print_summary_op = tf.summary.merge_all(print_summary_key) + return summary_op, print_summary_op + +def add_summary_ops(m, summarize_ops, summarize_names, to_aggregate=None, + summary_key='summaries', + print_summary_key='print_summaries', prefix=''): + if type(to_aggregate) != list: + to_aggregate = [to_aggregate for _ in summarize_ops] + + # set up aggregating metrics + if np.any(to_aggregate): + agg_ops = [] + for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate): + if to_agg: + # agg_ops.append(slim.metrics.streaming_mean(op, return_reset_op=True)) + agg_ops.append(tf.contrib.metrics.streaming_mean(op)) + # agg_ops.append(tf.contrib.metrics.streaming_mean(op, return_reset_op=True)) + else: + agg_ops.append([None, None, None]) + + # agg_values_op, agg_update_op, agg_reset_op = zip(*agg_ops) + # agg_update_op = [x for x in agg_update_op if x is not None] + # agg_reset_op = [x for x in agg_reset_op if x is not None] + agg_values_op, agg_update_op = zip(*agg_ops) + agg_update_op = [x for x in agg_update_op if x is not None] + agg_reset_op = [tf.no_op()] + else: + agg_values_op = [None for _ in to_aggregate] + agg_update_op = [tf.no_op()] + agg_reset_op = [tf.no_op()] + + for op, name, to_agg, agg_op in zip(summarize_ops, summarize_names, to_aggregate, agg_values_op): + if to_agg: + add_scalar_summary_op(agg_op, name, summary_key, print_summary_key, prefix) + else: + add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix) + + summary_op = tf.summary.merge_all(summary_key) + print_summary_op = tf.summary.merge_all(print_summary_key) + return summary_op, print_summary_op, agg_update_op, agg_reset_op + + + +def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N): + """Processes the collected outputs to compute AP for action prediction. + + Args: + outputs : List of scalar ops to summarize. + names : Name of the scalar ops. + global_step : global_step. + output_dir : where to store results. + metric_summary : summary object to add summaries to. + N : number of outputs to process. + """ + outs = [] + if N >= 0: + outputs = outputs[:N] + for i in range(len(outputs[0])): + scalar = np.array(map(lambda x: x[i], outputs)) + assert(scalar.ndim == 1) + add_value_to_summary(metric_summary, names[i], np.mean(scalar), + tag_str='{:>27s}: [{:s}]: %f'.format(names[i], '')) + outs.append(np.mean(scalar)) + return outs + +def get_default_summary_ops(): + return utils.Foo(summary_ops=None, print_summary_ops=None, + additional_return_ops=[], arop_summary_iters=[], + arop_eval_fns=[]) + + +def simple_summaries(summarize_ops, summarize_names, mode, to_aggregate=False, + scope_name='summary'): + + if type(to_aggregate) != list: + to_aggregate = [to_aggregate for _ in summarize_ops] + + summary_key = '{:s}_summaries'.format(mode) + print_summary_key = '{:s}_print_summaries'.format(mode) + prefix=' [{:s}]: '.format(mode) + + # Default ops for things that dont need to be aggregated. + if not np.all(to_aggregate): + for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate): + if not to_agg: + add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix) + summary_ops = tf.summary.merge_all(summary_key) + print_summary_ops = tf.summary.merge_all(print_summary_key) + else: + summary_ops = tf.no_op() + print_summary_ops = tf.no_op() + + # Default ops for things that dont need to be aggregated. + if np.any(to_aggregate): + additional_return_ops = [[summarize_ops[i] + for i, x in enumerate(to_aggregate )if x]] + arop_summary_iters = [-1] + s_names = ['{:s}/{:s}'.format(scope_name, summarize_names[i]) + for i, x in enumerate(to_aggregate) if x] + fn = lambda outputs, global_step, output_dir, metric_summary, N: \ + accum_val_ops(outputs, s_names, global_step, output_dir, metric_summary, + N) + arop_eval_fns = [fn] + else: + additional_return_ops = [] + arop_summary_iters = [] + arop_eval_fns = [] + return summary_ops, print_summary_ops, additional_return_ops, \ + arop_summary_iters, arop_eval_fns diff --git a/models/research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py b/models/research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..ccf3ab23b06b71ed2a6d300b9a7d2a67a396c52e --- /dev/null +++ b/models/research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py @@ -0,0 +1,533 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import numpy as np + + +import tensorflow as tf + +from tensorflow.contrib import slim + +import logging +from tensorflow.python.platform import app +from tensorflow.python.platform import flags +from src import utils +import src.file_utils as fu +import tfcode.nav_utils as nu +from tfcode import tf_utils + +setup_train_step_kwargs = nu.default_train_step_kwargs +compute_losses_multi_or = nu.compute_losses_multi_or +get_repr_from_image = nu.get_repr_from_image + +_save_d_at_t = nu.save_d_at_t +_save_all = nu.save_all +_eval_ap = nu.eval_ap +_eval_dist = nu.eval_dist +_plot_trajectories = nu.plot_trajectories + +def lstm_online(cell_fn, num_steps, inputs, state, varscope): + # inputs is B x num_steps x C, C channels. + # state is 2 tuple with B x 1 x C1, B x 1 x C2 + # Output state is always B x 1 x C + inputs = tf.unstack(inputs, axis=1, num=num_steps) + state = tf.unstack(state, axis=1, num=1)[0] + outputs = [] + + if num_steps > 1: + varscope.reuse_variables() + + for s in range(num_steps): + output, state = cell_fn(inputs[s], state) + outputs.append(output) + outputs = tf.stack(outputs, axis=1) + state = tf.stack([state], axis=1) + return outputs, state + +def _inputs(problem, lstm_states, lstm_state_dims): + # Set up inputs. + with tf.name_scope('inputs'): + n_views = problem.n_views + + inputs = [] + inputs.append(('orig_maps', tf.float32, + (problem.batch_size, 1, None, None, 1))) + inputs.append(('goal_loc', tf.float32, + (problem.batch_size, problem.num_goals, 2))) + + # For initing LSTM. + inputs.append(('rel_goal_loc_at_start', tf.float32, + (problem.batch_size, problem.num_goals, + problem.rel_goal_loc_dim))) + common_input_data, _ = tf_utils.setup_inputs(inputs) + + inputs = [] + inputs.append(('imgs', tf.float32, (problem.batch_size, None, n_views, + problem.img_height, problem.img_width, + problem.img_channels))) + # Goal location as a tuple of delta location and delta theta. + inputs.append(('rel_goal_loc', tf.float32, (problem.batch_size, None, + problem.rel_goal_loc_dim))) + if problem.outputs.visit_count: + inputs.append(('visit_count', tf.int32, (problem.batch_size, None, 1))) + inputs.append(('last_visit', tf.int32, (problem.batch_size, None, 1))) + + for i, (state, dim) in enumerate(zip(lstm_states, lstm_state_dims)): + inputs.append((state, tf.float32, (problem.batch_size, 1, dim))) + + if problem.outputs.egomotion: + inputs.append(('incremental_locs', tf.float32, + (problem.batch_size, None, 2))) + inputs.append(('incremental_thetas', tf.float32, + (problem.batch_size, None, 1))) + + inputs.append(('step_number', tf.int32, (1, None, 1))) + inputs.append(('node_ids', tf.int32, (problem.batch_size, None, + problem.node_ids_dim))) + inputs.append(('perturbs', tf.float32, (problem.batch_size, None, + problem.perturbs_dim))) + + # For plotting result plots + inputs.append(('loc_on_map', tf.float32, (problem.batch_size, None, 2))) + inputs.append(('gt_dist_to_goal', tf.float32, (problem.batch_size, None, 1))) + step_input_data, _ = tf_utils.setup_inputs(inputs) + + inputs = [] + inputs.append(('executed_actions', tf.int32, (problem.batch_size, None))) + inputs.append(('rewards', tf.float32, (problem.batch_size, None))) + inputs.append(('action_sample_wts', tf.float32, (problem.batch_size, None))) + inputs.append(('action', tf.int32, (problem.batch_size, None, + problem.num_actions))) + train_data, _ = tf_utils.setup_inputs(inputs) + train_data.update(step_input_data) + train_data.update(common_input_data) + return common_input_data, step_input_data, train_data + + +def _add_summaries(m, summary_mode, arop_full_summary_iters): + summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op, + m.total_loss_op, m.data_loss_op, m.reg_loss_op] + m.acc_ops + summarize_names = ['lr', 'global_step', 'sample_gt_prob_op', 'total_loss', + 'data_loss', 'reg_loss'] + \ + ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))] + to_aggregate = [0, 0, 0, 1, 1, 1] + [1]*len(m.acc_ops) + + scope_name = 'summary' + with tf.name_scope(scope_name): + s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters, + summarize_ops, summarize_names, + to_aggregate, m.action_prob_op, + m.input_tensors, scope_name=scope_name) + m.summary_ops = {summary_mode: s_ops} + +def visit_count_fc(visit_count, last_visit, embed_neurons, wt_decay, fc_dropout): + with tf.variable_scope('embed_visit_count'): + visit_count = tf.reshape(visit_count, shape=[-1]) + last_visit = tf.reshape(last_visit, shape=[-1]) + + visit_count = tf.clip_by_value(visit_count, clip_value_min=-1, + clip_value_max=15) + last_visit = tf.clip_by_value(last_visit, clip_value_min=-1, + clip_value_max=15) + visit_count = tf.one_hot(visit_count, depth=16, axis=1, dtype=tf.float32, + on_value=10., off_value=0.) + last_visit = tf.one_hot(last_visit, depth=16, axis=1, dtype=tf.float32, + on_value=10., off_value=0.) + f = tf.concat([visit_count, last_visit], 1) + x, _ = tf_utils.fc_network( + f, neurons=embed_neurons, wt_decay=wt_decay, name='visit_count_embed', + offset=0, batch_norm_param=None, dropout_ratio=fc_dropout, + is_training=is_training) + return x + +def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out, + num_steps, state_input_op): + # returns state_name, state_init_op, updated_state_op, out_op + with tf.name_scope('reshape_'+name): + sh = x.get_shape().as_list() + x = tf.reshape(x, shape=[batch_size, -1, sh[-1]]) + + with tf.variable_scope(name) as varscope: + cell = tf.contrib.rnn.LSTMCell( + num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False, + num_proj=lstm_out, use_peepholes=True, + initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0), + cell_clip=None, proj_clip=None) + + sh = [batch_size, 1, lstm_dim+lstm_out] + state_init_op = tf.constant(0., dtype=tf.float32, shape=sh) + + fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope) + out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda: + fn(num_steps)) + + return name, state_init_op, updated_state_op, out_op + +def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None, + num_goal_neurons=None): + with tf.name_scope(name + '_' + combine_type): + if combine_type == 'add': + # Simple concat features from goal and image + out = embed_img + embed_goal + + elif combine_type == 'multiply': + # Multiply things together + re_embed_img = tf.reshape( + embed_img, shape=[-1, num_img_neuorons / num_goal_neurons, + num_goal_neurons]) + re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1]) + x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False) + out = slim.flatten(x) + elif combine_type == 'none' or combine_type == 'imgonly': + out = embed_img + elif combine_type == 'goalonly': + out = embed_goal + else: + logging.fatal('Undefined combine_type: %s', combine_type) + return out + + +def preprocess_egomotion(locs, thetas): + with tf.name_scope('pre_ego'): + pre_ego = tf.concat([locs, tf.sin(thetas), tf.cos(thetas)], 2) + sh = pre_ego.get_shape().as_list() + pre_ego = tf.reshape(pre_ego, [-1, sh[-1]]) + return pre_ego + +def setup_to_run(m, args, is_training, batch_norm_is_training, summary_mode): + # Set up the model. + tf.set_random_seed(args.solver.seed) + task_params = args.navtask.task_params + num_steps = task_params.num_steps + num_goals = task_params.num_goals + num_actions = task_params.num_actions + num_actions_ = num_actions + + n_views = task_params.n_views + + batch_norm_is_training_op = \ + tf.placeholder_with_default(batch_norm_is_training, shape=[], + name='batch_norm_is_training_op') + # Setup the inputs + m.input_tensors = {} + lstm_states = []; lstm_state_dims = []; + state_names = []; updated_state_ops = []; init_state_ops = []; + if args.arch.lstm_output: + lstm_states += ['lstm_output'] + lstm_state_dims += [args.arch.lstm_output_dim+task_params.num_actions] + if args.arch.lstm_ego: + lstm_states += ['lstm_ego'] + lstm_state_dims += [args.arch.lstm_ego_dim + args.arch.lstm_ego_out] + lstm_states += ['lstm_img'] + lstm_state_dims += [args.arch.lstm_img_dim + args.arch.lstm_img_out] + elif args.arch.lstm_img: + # An LSTM only on the image + lstm_states += ['lstm_img'] + lstm_state_dims += [args.arch.lstm_img_dim + args.arch.lstm_img_out] + else: + # No LSTMs involved here. + None + + m.input_tensors['common'], m.input_tensors['step'], m.input_tensors['train'] = \ + _inputs(task_params, lstm_states, lstm_state_dims) + + with tf.name_scope('check_size'): + is_single_step = tf.equal(tf.unstack(tf.shape(m.input_tensors['step']['imgs']), + num=6)[1], 1) + + images_reshaped = tf.reshape(m.input_tensors['step']['imgs'], + shape=[-1, task_params.img_height, task_params.img_width, + task_params.img_channels], name='re_image') + + rel_goal_loc_reshaped = tf.reshape(m.input_tensors['step']['rel_goal_loc'], + shape=[-1, task_params.rel_goal_loc_dim], name='re_rel_goal_loc') + + x, vars_ = get_repr_from_image( + images_reshaped, task_params.modalities, task_params.data_augment, + args.arch.encoder, args.solver.freeze_conv, args.solver.wt_decay, + is_training) + + # Reshape into nice things so that these can be accumulated over time steps + # for faster backprop. + sh_before = x.get_shape().as_list() + m.encoder_output = tf.reshape( + x, shape=[task_params.batch_size, -1, n_views] + sh_before[1:]) + x = tf.reshape(m.encoder_output, shape=[-1] + sh_before[1:]) + + # Add a layer to reduce dimensions for a fc layer. + if args.arch.dim_reduce_neurons > 0: + ks = 1; neurons = args.arch.dim_reduce_neurons; + init_var = np.sqrt(2.0/(ks**2)/neurons) + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + m.conv_feat = slim.conv2d( + x, neurons, kernel_size=ks, stride=1, normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_param, padding='SAME', scope='dim_reduce', + weights_regularizer=slim.l2_regularizer(args.solver.wt_decay), + weights_initializer=tf.random_normal_initializer(stddev=init_var)) + reshape_conv_feat = slim.flatten(m.conv_feat) + sh = reshape_conv_feat.get_shape().as_list() + m.reshape_conv_feat = tf.reshape(reshape_conv_feat, + shape=[-1, sh[1]*n_views]) + + # Restore these from a checkpoint. + if args.solver.pretrained_path is not None: + m.init_fn = slim.assign_from_checkpoint_fn(args.solver.pretrained_path, + vars_) + else: + m.init_fn = None + + # Hit the goal_location with a bunch of fully connected layers, to embed it + # into some space. + with tf.variable_scope('embed_goal'): + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + m.embed_goal, _ = tf_utils.fc_network( + rel_goal_loc_reshaped, neurons=args.arch.goal_embed_neurons, + wt_decay=args.solver.wt_decay, name='goal_embed', offset=0, + batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout, + is_training=is_training) + + if args.arch.embed_goal_for_state: + with tf.variable_scope('embed_goal_for_state'): + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + m.embed_goal_for_state, _ = tf_utils.fc_network( + m.input_tensors['common']['rel_goal_loc_at_start'][:,0,:], + neurons=args.arch.goal_embed_neurons, wt_decay=args.solver.wt_decay, + name='goal_embed', offset=0, batch_norm_param=batch_norm_param, + dropout_ratio=args.arch.fc_dropout, is_training=is_training) + + # Hit the goal_location with a bunch of fully connected layers, to embed it + # into some space. + with tf.variable_scope('embed_img'): + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + m.embed_img, _ = tf_utils.fc_network( + m.reshape_conv_feat, neurons=args.arch.img_embed_neurons, + wt_decay=args.solver.wt_decay, name='img_embed', offset=0, + batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout, + is_training=is_training) + + # For lstm_ego, and lstm_image, embed the ego motion, accumulate it into an + # LSTM, combine with image features and accumulate those in an LSTM. Finally + # combine what you get from the image LSTM with the goal to output an action. + if args.arch.lstm_ego: + ego_reshaped = preprocess_egomotion(m.input_tensors['step']['incremental_locs'], + m.input_tensors['step']['incremental_thetas']) + with tf.variable_scope('embed_ego'): + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + m.embed_ego, _ = tf_utils.fc_network( + ego_reshaped, neurons=args.arch.ego_embed_neurons, + wt_decay=args.solver.wt_decay, name='ego_embed', offset=0, + batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout, + is_training=is_training) + + state_name, state_init_op, updated_state_op, out_op = lstm_setup( + 'lstm_ego', m.embed_ego, task_params.batch_size, is_single_step, + args.arch.lstm_ego_dim, args.arch.lstm_ego_out, num_steps*num_goals, + m.input_tensors['step']['lstm_ego']) + state_names += [state_name] + init_state_ops += [state_init_op] + updated_state_ops += [updated_state_op] + + # Combine the output with the vision features. + m.img_ego_op = combine_setup('img_ego', args.arch.combine_type_ego, + m.embed_img, out_op, + args.arch.img_embed_neurons[-1], + args.arch.lstm_ego_out) + + # LSTM on these vision features. + state_name, state_init_op, updated_state_op, out_op = lstm_setup( + 'lstm_img', m.img_ego_op, task_params.batch_size, is_single_step, + args.arch.lstm_img_dim, args.arch.lstm_img_out, num_steps*num_goals, + m.input_tensors['step']['lstm_img']) + state_names += [state_name] + init_state_ops += [state_init_op] + updated_state_ops += [updated_state_op] + + m.img_for_goal = out_op + num_img_for_goal_neurons = args.arch.lstm_img_out + + elif args.arch.lstm_img: + # LSTM on just the image features. + state_name, state_init_op, updated_state_op, out_op = lstm_setup( + 'lstm_img', m.embed_img, task_params.batch_size, is_single_step, + args.arch.lstm_img_dim, args.arch.lstm_img_out, num_steps*num_goals, + m.input_tensors['step']['lstm_img']) + state_names += [state_name] + init_state_ops += [state_init_op] + updated_state_ops += [updated_state_op] + m.img_for_goal = out_op + num_img_for_goal_neurons = args.arch.lstm_img_out + + else: + m.img_for_goal = m.embed_img + num_img_for_goal_neurons = args.arch.img_embed_neurons[-1] + + + if args.arch.use_visit_count: + m.embed_visit_count = visit_count_fc( + m.input_tensors['step']['visit_count'], + m.input_tensors['step']['last_visit'], args.arch.goal_embed_neurons, + args.solver.wt_decay, args.arch.fc_dropout, is_training=is_training) + m.embed_goal = m.embed_goal + m.embed_visit_count + + m.combined_f = combine_setup('img_goal', args.arch.combine_type, + m.img_for_goal, m.embed_goal, + num_img_for_goal_neurons, + args.arch.goal_embed_neurons[-1]) + + # LSTM on the combined representation. + if args.arch.lstm_output: + name = 'lstm_output' + # A few fully connected layers here. + with tf.variable_scope('action_pred'): + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + x, _ = tf_utils.fc_network( + m.combined_f, neurons=args.arch.pred_neurons, + wt_decay=args.solver.wt_decay, name='pred', offset=0, + batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout) + + if args.arch.lstm_output_init_state_from_goal: + # Use the goal embedding to initialize the LSTM state. + # UGLY CLUGGY HACK: if this is doing computation for a single time step + # then this will not involve back prop, so we can use the state input from + # the feed dict, otherwise we compute the state representation from the + # goal and feed that in. Necessary for using goal location to generate the + # state representation. + m.embed_goal_for_state = tf.expand_dims(m.embed_goal_for_state, dim=1) + state_op = tf.cond(is_single_step, lambda: m.input_tensors['step'][name], + lambda: m.embed_goal_for_state) + state_name, state_init_op, updated_state_op, out_op = lstm_setup( + name, x, task_params.batch_size, is_single_step, + args.arch.lstm_output_dim, + num_actions_, + num_steps*num_goals, state_op) + init_state_ops += [m.embed_goal_for_state] + else: + state_op = m.input_tensors['step'][name] + state_name, state_init_op, updated_state_op, out_op = lstm_setup( + name, x, task_params.batch_size, is_single_step, + args.arch.lstm_output_dim, + num_actions_, num_steps*num_goals, state_op) + init_state_ops += [state_init_op] + + state_names += [state_name] + updated_state_ops += [updated_state_op] + + out_op = tf.reshape(out_op, shape=[-1, num_actions_]) + if num_actions_ > num_actions: + m.action_logits_op = out_op[:,:num_actions] + m.baseline_op = out_op[:,num_actions:] + else: + m.action_logits_op = out_op + m.baseline_op = None + m.action_prob_op = tf.nn.softmax(m.action_logits_op) + + else: + # A few fully connected layers here. + with tf.variable_scope('action_pred'): + batch_norm_param = args.arch.batch_norm_param + batch_norm_param['is_training'] = batch_norm_is_training_op + out_op, _ = tf_utils.fc_network( + m.combined_f, neurons=args.arch.pred_neurons, + wt_decay=args.solver.wt_decay, name='pred', offset=0, + num_pred=num_actions_, + batch_norm_param=batch_norm_param, + dropout_ratio=args.arch.fc_dropout, is_training=is_training) + if num_actions_ > num_actions: + m.action_logits_op = out_op[:,:num_actions] + m.baseline_op = out_op[:,num_actions:] + else: + m.action_logits_op = out_op + m.baseline_op = None + m.action_prob_op = tf.nn.softmax(m.action_logits_op) + + m.train_ops = {} + m.train_ops['step'] = m.action_prob_op + m.train_ops['common'] = [m.input_tensors['common']['orig_maps'], + m.input_tensors['common']['goal_loc'], + m.input_tensors['common']['rel_goal_loc_at_start']] + m.train_ops['state_names'] = state_names + m.train_ops['init_state'] = init_state_ops + m.train_ops['updated_state'] = updated_state_ops + m.train_ops['batch_norm_is_training_op'] = batch_norm_is_training_op + + # Flat list of ops which cache the step data. + m.train_ops['step_data_cache'] = [tf.no_op()] + + if args.solver.freeze_conv: + m.train_ops['step_data_cache'] = [m.encoder_output] + else: + m.train_ops['step_data_cache'] = [] + + ewma_decay = 0.99 if is_training else 0.0 + weight = tf.ones_like(m.input_tensors['train']['action'], dtype=tf.float32, + name='weight') + + m.reg_loss_op, m.data_loss_op, m.total_loss_op, m.acc_ops = \ + compute_losses_multi_or( + m.action_logits_op, m.input_tensors['train']['action'], + weights=weight, num_actions=num_actions, + data_loss_wt=args.solver.data_loss_wt, + reg_loss_wt=args.solver.reg_loss_wt, ewma_decay=ewma_decay) + + + if args.solver.freeze_conv: + vars_to_optimize = list(set(tf.trainable_variables()) - set(vars_)) + else: + vars_to_optimize = None + + m.lr_op, m.global_step_op, m.train_op, m.should_stop_op, m.optimizer, \ + m.sync_optimizer = tf_utils.setup_training( + m.total_loss_op, + args.solver.initial_learning_rate, + args.solver.steps_per_decay, + args.solver.learning_rate_decay, + args.solver.momentum, + args.solver.max_steps, + args.solver.sync, + args.solver.adjust_lr_sync, + args.solver.num_workers, + args.solver.task, + vars_to_optimize=vars_to_optimize, + clip_gradient_norm=args.solver.clip_gradient_norm, + typ=args.solver.typ, momentum2=args.solver.momentum2, + adam_eps=args.solver.adam_eps) + + + if args.arch.sample_gt_prob_type == 'inverse_sigmoid_decay': + m.sample_gt_prob_op = tf_utils.inverse_sigmoid_decay(args.arch.isd_k, + m.global_step_op) + elif args.arch.sample_gt_prob_type == 'zero': + m.sample_gt_prob_op = tf.constant(-1.0, dtype=tf.float32) + elif args.arch.sample_gt_prob_type.split('_')[0] == 'step': + step = int(args.arch.sample_gt_prob_type.split('_')[1]) + m.sample_gt_prob_op = tf_utils.step_gt_prob( + step, m.input_tensors['step']['step_number'][0,0,0]) + + m.sample_action_type = args.arch.action_sample_type + m.sample_action_combine_type = args.arch.action_sample_combine_type + _add_summaries(m, summary_mode, args.summary.arop_full_summary_iters) + + m.init_op = tf.group(tf.global_variables_initializer(), + tf.local_variables_initializer()) + m.saver_op = tf.train.Saver(keep_checkpoint_every_n_hours=4, + write_version=tf.train.SaverDef.V2) + + return m diff --git a/models/research/cognitive_planning/BUILD b/models/research/cognitive_planning/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..3561987df8ae43df60c78690b6c3a52fcb887a3e --- /dev/null +++ b/models/research/cognitive_planning/BUILD @@ -0,0 +1,19 @@ +package(default_visibility = [":internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = [ + "//cognitive_planning/...", + ], +) + +py_binary( + name = "train_supervised_active_vision", + srcs = [ + "train_supervised_active_vision.py", + ], +) diff --git a/models/research/cognitive_planning/README.md b/models/research/cognitive_planning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c63ddc3f906a74141e7dccd4dee161eb095e546 --- /dev/null +++ b/models/research/cognitive_planning/README.md @@ -0,0 +1,157 @@ +# cognitive_planning + +**Visual Representation for Semantic Target Driven Navigation** + +Arsalan Mousavian, Alexander Toshev, Marek Fiser, Jana Kosecka, James Davidson + +This is the implementation of semantic target driven navigation training and evaluation on +Active Vision dataset. + +ECCV Workshop on Visual Learning and Embodied Agents in Simulation Environments +2018. + +
+ + + + + + + + + + + + + + + + + +
Target: FridgeTarget: Television
Target: MicrowaveTarget: Couch
+
+ + + +Paper: [https://arxiv.org/abs/1805.06066](https://arxiv.org/abs/1805.06066) + + +## 1. Installation + +### Requirements + +#### Python Packages + +```shell +networkx +gin-config +``` + +### Download cognitive_planning + +```shell +git clone --depth 1 https://github.com/tensorflow/models.git +``` + +## 2. Datasets + +### Download ActiveVision Dataset +We used Active Vision Dataset (AVD) which can be downloaded from [here](http://cs.unc.edu/~ammirato/active_vision_dataset_website/). To make our code faster and reduce memory footprint, we created the AVD Minimal dataset. AVD Minimal consists of low resolution images from the original AVD dataset. In addition, we added annotations for target views, predicted object detections from pre-trained object detector on MS-COCO dataset, and predicted semantic segmentation from pre-trained model on NYU-v2 dataset. AVD minimal can be downloaded from [here](https://storage.googleapis.com/active-vision-dataset/AVD_Minimal.zip). Set `$AVD_DIR` as the path to the downloaded AVD Minimal. + +### TODO: SUNCG Dataset +Current version of the code does not support SUNCG dataset. It can be added by +implementing necessary functions of `envs/task_env.py` using the public +released code of SUNCG environment such as +[House3d](https://github.com/facebookresearch/House3D) and +[MINOS](https://github.com/minosworld/minos). + +### ActiveVisionDataset Demo + + +If you wish to navigate the environment, to see how the AVD looks like you can use the following command: +```shell +python viz_active_vision_dataset_main -- \ + --mode=human \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root=$AVD_DIR' +``` + +## 3. Training +Right now, the released version only supports training and inference using the real data from Active Vision Dataset. + +When RGB image modality is used, the Resnet embeddings are initialized. To start the training download pre-trained Resnet50 check point in the working directory ./resnet_v2_50_checkpoint/resnet_v2_50.ckpt + +``` +wget http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz +``` +### Run training +Use the following command for training: +```shell +# Train +python train_supervised_active_vision.py \ + --mode='train' \ + --logdir=$CHECKPOINT_DIR \ + --modality_types='det' \ + --batch_size=8 \ + --train_iters=200000 \ + --lstm_cell_size=2048 \ + --policy_fc_size=2048 \ + --sequence_length=20 \ + --max_eval_episode_length=100 \ + --test_iters=194 \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root=$AVD_DIR' \ + --logtostderr +``` + +The training can be run for different modalities and modality combinations, including semantic segmentation, object detectors, RGB images, depth images. Low resolution images and outputs of detectors pretrained on COCO dataset and semantic segmenation pre trained on NYU dataset are provided as a part of this distribution and can be found in Meta directory of AVD_Minimal. +Additional details are described in the comments of the code and in the paper. + +### Run Evaluation +Use the following command for unrolling the policy on the eval environments. The inference code periodically check the checkpoint folder for new checkpoints to use it for unrolling the policy on the eval environments. After each evaluation, it will create a folder in the $CHECKPOINT_DIR/evals/$ITER where $ITER is the iteration number at which the checkpoint is stored. +```shell +# Eval +python train_supervised_active_vision.py \ + --mode='eval' \ + --logdir=$CHECKPOINT_DIR \ + --modality_types='det' \ + --batch_size=8 \ + --train_iters=200000 \ + --lstm_cell_size=2048 \ + --policy_fc_size=2048 \ + --sequence_length=20 \ + --max_eval_episode_length=100 \ + --test_iters=194 \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root=$AVD_DIR' \ + --logtostderr +``` +At any point, you can run the following command to compute statistics such as success rate over all the evaluations so far. It also generates gif images for unrolling of the best policy. +```shell +# Visualize and Compute Stats +python viz_active_vision_dataset_main.py \ + --mode=eval \ + --eval_folder=$CHECKPOINT_DIR/evals/ \ + --output_folder=$OUTPUT_GIFS_FOLDER \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root=$AVD_DIR' +``` +## Contact + +To ask questions or report issues please open an issue on the tensorflow/models +[issues tracker](https://github.com/tensorflow/models/issues). +Please assign issues to @arsalan-mousavian. + +## Reference +The details of the training and experiments can be found in the following paper. If you find our work useful in your research please consider citing our paper: + +``` +@inproceedings{MousavianECCVW18, + author = {A. Mousavian and A. Toshev and M. Fiser and J. Kosecka and J. Davidson}, + title = {Visual Representations for Semantic Target Driven Navigation}, + booktitle = {ECCV Workshop on Visual Learning and Embodied Agents in Simulation Environments}, + year = {2018}, +} +``` + + diff --git a/models/research/cognitive_planning/__init__.py b/models/research/cognitive_planning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_planning/command b/models/research/cognitive_planning/command new file mode 100644 index 0000000000000000000000000000000000000000..daf634b3c6e6eabba5df88e77b3c3bd81d163be1 --- /dev/null +++ b/models/research/cognitive_planning/command @@ -0,0 +1,14 @@ +python train_supervised_active_vision \ + --mode='train' \ + --logdir=/usr/local/google/home/kosecka/checkin_log_det/ \ + --modality_types='det' \ + --batch_size=8 \ + --train_iters=200000 \ + --lstm_cell_size=2048 \ + --policy_fc_size=2048 \ + --sequence_length=20 \ + --max_eval_episode_length=100 \ + --test_iters=194 \ + --gin_config=robotics/cognitive_planning/envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root="/usr/local/google/home/kosecka/AVD_minimal/"' \ + --logtostderr diff --git a/models/research/cognitive_planning/embedders.py b/models/research/cognitive_planning/embedders.py new file mode 100644 index 0000000000000000000000000000000000000000..91ed9f45e2f7f0f9388041c1e624a6c0393ab5a1 --- /dev/null +++ b/models/research/cognitive_planning/embedders.py @@ -0,0 +1,547 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Interface for different embedders for modalities.""" + +import abc +import numpy as np +import tensorflow as tf +import preprocessing +from tensorflow.contrib.slim.nets import resnet_v2 + +slim = tf.contrib.slim + + +class Embedder(object): + """Represents the embedder for different modalities. + + Modalities can be semantic segmentation, depth channel, object detection and + so on, which require specific embedder for them. + """ + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def build(self, observation): + """Builds the model to embed the observation modality. + + Args: + observation: tensor that contains the raw observation from modality. + Returns: + Embedding tensor for the given observation tensor. + """ + raise NotImplementedError( + 'Needs to be implemented as part of Embedder Interface') + + +class DetectionBoxEmbedder(Embedder): + """Represents the model that encodes the detection boxes from images.""" + + def __init__(self, rnn_state_size, scope=None): + self._rnn_state_size = rnn_state_size + self._scope = scope + + def build(self, observations): + """Builds the model to embed object detection observations. + + Args: + observations: a tuple of (dets, det_num). + dets is a tensor of BxTxLxE that has the detection boxes in all the + images of the batch. B is the batch size, T is the maximum length of + episode, L is the maximum number of detections per image in the batch + and E is the size of each detection embedding. + det_num is a tensor of BxT that contains the number of detected boxes + each image of each sequence in the batch. + Returns: + For each image in the batch, returns the accumulative embedding of all the + detection boxes in that image. + """ + with tf.variable_scope(self._scope, default_name=''): + shape = observations[0].shape + dets = tf.reshape(observations[0], [-1, shape[-2], shape[-1]]) + det_num = tf.reshape(observations[1], [-1]) + lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._rnn_state_size) + batch_size = tf.shape(dets)[0] + lstm_outputs, _ = tf.nn.dynamic_rnn( + cell=lstm_cell, + inputs=dets, + sequence_length=det_num, + initial_state=lstm_cell.zero_state(batch_size, dtype=tf.float32), + dtype=tf.float32) + # Gathering the last state of each sequence in the batch. + batch_range = tf.range(batch_size) + indices = tf.stack([batch_range, det_num - 1], axis=1) + last_lstm_outputs = tf.gather_nd(lstm_outputs, indices) + last_lstm_outputs = tf.reshape(last_lstm_outputs, + [-1, shape[1], self._rnn_state_size]) + return last_lstm_outputs + + +class ResNet(Embedder): + """Residual net embedder for image data.""" + + def __init__(self, params, *args, **kwargs): + super(ResNet, self).__init__(*args, **kwargs) + self._params = params + self._extra_train_ops = [] + + def build(self, images): + shape = images.get_shape().as_list() + if len(shape) == 5: + images = tf.reshape(images, + [shape[0] * shape[1], shape[2], shape[3], shape[4]]) + embedding = self._build_model(images) + if len(shape) == 5: + embedding = tf.reshape(embedding, [shape[0], shape[1], -1]) + + return embedding + + @property + def extra_train_ops(self): + return self._extra_train_ops + + def _build_model(self, images): + """Builds the model.""" + + # Convert images to floats and normalize them. + images = tf.to_float(images) + bs = images.get_shape().as_list()[0] + images = [ + tf.image.per_image_standardization(tf.squeeze(i)) + for i in tf.split(images, bs) + ] + images = tf.concat([tf.expand_dims(i, axis=0) for i in images], axis=0) + + with tf.variable_scope('init'): + x = self._conv('init_conv', images, 3, 3, 16, self._stride_arr(1)) + + strides = [1, 2, 2] + activate_before_residual = [True, False, False] + if self._params.use_bottleneck: + res_func = self._bottleneck_residual + filters = [16, 64, 128, 256] + else: + res_func = self._residual + filters = [16, 16, 32, 128] + + with tf.variable_scope('unit_1_0'): + x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), + activate_before_residual[0]) + for i in xrange(1, self._params.num_residual_units): + with tf.variable_scope('unit_1_%d' % i): + x = res_func(x, filters[1], filters[1], self._stride_arr(1), False) + + with tf.variable_scope('unit_2_0'): + x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), + activate_before_residual[1]) + for i in xrange(1, self._params.num_residual_units): + with tf.variable_scope('unit_2_%d' % i): + x = res_func(x, filters[2], filters[2], self._stride_arr(1), False) + + with tf.variable_scope('unit_3_0'): + x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), + activate_before_residual[2]) + for i in xrange(1, self._params.num_residual_units): + with tf.variable_scope('unit_3_%d' % i): + x = res_func(x, filters[3], filters[3], self._stride_arr(1), False) + + with tf.variable_scope('unit_last'): + x = self._batch_norm('final_bn', x) + x = self._relu(x, self._params.relu_leakiness) + + with tf.variable_scope('pool_logit'): + x = self._global_avg_pooling(x) + + return x + + def _stride_arr(self, stride): + return [1, stride, stride, 1] + + def _batch_norm(self, name, x): + """batch norm implementation.""" + with tf.variable_scope(name): + params_shape = [x.shape[-1]] + + beta = tf.get_variable( + 'beta', + params_shape, + tf.float32, + initializer=tf.constant_initializer(0.0, tf.float32)) + gamma = tf.get_variable( + 'gamma', + params_shape, + tf.float32, + initializer=tf.constant_initializer(1.0, tf.float32)) + + if self._params.is_train: + mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments') + + moving_mean = tf.get_variable( + 'moving_mean', + params_shape, + tf.float32, + initializer=tf.constant_initializer(0.0, tf.float32), + trainable=False) + moving_variance = tf.get_variable( + 'moving_variance', + params_shape, + tf.float32, + initializer=tf.constant_initializer(1.0, tf.float32), + trainable=False) + + self._extra_train_ops.append( + tf.assign_moving_average(moving_mean, mean, 0.9)) + self._extra_train_ops.append( + tf.assign_moving_average(moving_variance, variance, 0.9)) + else: + mean = tf.get_variable( + 'moving_mean', + params_shape, + tf.float32, + initializer=tf.constant_initializer(0.0, tf.float32), + trainable=False) + variance = tf.get_variable( + 'moving_variance', + params_shape, + tf.float32, + initializer=tf.constant_initializer(1.0, tf.float32), + trainable=False) + tf.summary.histogram(mean.op.name, mean) + tf.summary.histogram(variance.op.name, variance) + # elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net. + y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001) + y.set_shape(x.shape) + return y + + def _residual(self, + x, + in_filter, + out_filter, + stride, + activate_before_residual=False): + """Residual unit with 2 sub layers.""" + + if activate_before_residual: + with tf.variable_scope('shared_activation'): + x = self._batch_norm('init_bn', x) + x = self._relu(x, self._params.relu_leakiness) + orig_x = x + else: + with tf.variable_scope('residual_only_activation'): + orig_x = x + x = self._batch_norm('init_bn', x) + x = self._relu(x, self._params.relu_leakiness) + + with tf.variable_scope('sub1'): + x = self._conv('conv1', x, 3, in_filter, out_filter, stride) + + with tf.variable_scope('sub2'): + x = self._batch_norm('bn2', x) + x = self._relu(x, self._params.relu_leakiness) + x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1]) + + with tf.variable_scope('sub_add'): + if in_filter != out_filter: + orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID') + orig_x = tf.pad( + orig_x, [[0, 0], [0, 0], [0, 0], [(out_filter - in_filter) // 2, + (out_filter - in_filter) // 2]]) + x += orig_x + + return x + + def _bottleneck_residual(self, + x, + in_filter, + out_filter, + stride, + activate_before_residual=False): + """A residual convolutional layer with a bottleneck. + + The layer is a composite of three convolutional layers with a ReLU non- + linearity and batch normalization after each linear convolution. The depth + if the second and third layer is out_filter / 4 (hence it is a bottleneck). + + Args: + x: a float 4 rank Tensor representing the input to the layer. + in_filter: a python integer representing depth of the input. + out_filter: a python integer representing depth of the output. + stride: a python integer denoting the stride of the layer applied before + the first convolution. + activate_before_residual: a python boolean. If True, then a ReLU is + applied as a first operation on the input x before everything else. + Returns: + A 4 rank Tensor with batch_size = batch size of input, width and height = + width / stride and height / stride of the input and depth = out_filter. + """ + if activate_before_residual: + with tf.variable_scope('common_bn_relu'): + x = self._batch_norm('init_bn', x) + x = self._relu(x, self._params.relu_leakiness) + orig_x = x + else: + with tf.variable_scope('residual_bn_relu'): + orig_x = x + x = self._batch_norm('init_bn', x) + x = self._relu(x, self._params.relu_leakiness) + + with tf.variable_scope('sub1'): + x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride) + + with tf.variable_scope('sub2'): + x = self._batch_norm('bn2', x) + x = self._relu(x, self._params.relu_leakiness) + x = self._conv('conv2', x, 3, out_filter / 4, out_filter / 4, + [1, 1, 1, 1]) + + with tf.variable_scope('sub3'): + x = self._batch_norm('bn3', x) + x = self._relu(x, self._params.relu_leakiness) + x = self._conv('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1]) + + with tf.variable_scope('sub_add'): + if in_filter != out_filter: + orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride) + x += orig_x + + return x + + def _decay(self): + costs = [] + for var in tf.trainable_variables(): + if var.op.name.find(r'DW') > 0: + costs.append(tf.nn.l2_loss(var)) + + return tf.mul(self._params.weight_decay_rate, tf.add_n(costs)) + + def _conv(self, name, x, filter_size, in_filters, out_filters, strides): + """Convolution.""" + with tf.variable_scope(name): + n = filter_size * filter_size * out_filters + kernel = tf.get_variable( + 'DW', [filter_size, filter_size, in_filters, out_filters], + tf.float32, + initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 / n))) + return tf.nn.conv2d(x, kernel, strides, padding='SAME') + + def _relu(self, x, leakiness=0.0): + return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu') + + def _fully_connected(self, x, out_dim): + x = tf.reshape(x, [self._params.batch_size, -1]) + w = tf.get_variable( + 'DW', [x.get_shape()[1], out_dim], + initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) + b = tf.get_variable( + 'biases', [out_dim], initializer=tf.constant_initializer()) + return tf.nn.xw_plus_b(x, w, b) + + def _global_avg_pooling(self, x): + assert x.get_shape().ndims == 4 + return tf.reduce_mean(x, [1, 2]) + + +class MLPEmbedder(Embedder): + """Embedder of vectorial data. + + The net is a multi-layer perceptron, with ReLU nonlinearities in all layers + except the last one. + """ + + def __init__(self, layers, *args, **kwargs): + """Constructs MLPEmbedder. + + Args: + layers: a list of python integers representing layer sizes. + *args: arguments for super constructor. + **kwargs: keyed arguments for super constructor. + """ + super(MLPEmbedder, self).__init__(*args, **kwargs) + self._layers = layers + + def build(self, features): + shape = features.get_shape().as_list() + if len(shape) == 3: + features = tf.reshape(features, [shape[0] * shape[1], shape[2]]) + x = features + for i, dim in enumerate(self._layers): + with tf.variable_scope('layer_%i' % i): + x = self._fully_connected(x, dim) + if i < len(self._layers) - 1: + x = self._relu(x) + + if len(shape) == 3: + x = tf.reshape(x, shape[:-1] + [self._layers[-1]]) + return x + + def _fully_connected(self, x, out_dim): + w = tf.get_variable( + 'DW', [x.get_shape()[1], out_dim], + initializer=tf.variance_scaling_initializer(distribution='uniform')) + b = tf.get_variable( + 'biases', [out_dim], initializer=tf.constant_initializer()) + return tf.nn.xw_plus_b(x, w, b) + + def _relu(self, x, leakiness=0.0): + return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu') + + +class SmallNetworkEmbedder(Embedder): + """Embedder for image like observations. + + The network is comprised of multiple conv layers and a fully connected layer + at the end. The number of conv layers and the parameters are configured from + params. + """ + + def __init__(self, params, *args, **kwargs): + """Constructs the small network. + + Args: + params: params should be tf.hparams type. params need to have a list of + conv_sizes, conv_strides, conv_channels. The length of these lists + should be equal to each other and to the number of conv layers in the + network. Plus, it also needs to have boolean variable named to_one_hot + which indicates whether the input should be converted to one hot or not. + The size of the fully connected layer is specified by + params.embedding_size. + + *args: The rest of the parameters. + **kwargs: the reset of the parameters. + + Raises: + ValueError: If the length of params.conv_strides, params.conv_sizes, and + params.conv_channels are not equal. + + """ + + super(SmallNetworkEmbedder, self).__init__(*args, **kwargs) + self._params = params + if len(self._params.conv_sizes) != len(self._params.conv_strides): + raise ValueError( + 'Conv sizes and strides should have the same length: {} != {}'.format( + len(self._params.conv_sizes), len(self._params.conv_strides))) + + if len(self._params.conv_sizes) != len(self._params.conv_channels): + raise ValueError( + 'Conv sizes and channels should have the same length: {} != {}'. + format(len(self._params.conv_sizes), len(self._params.conv_channels))) + + def build(self, images): + """Builds the embedder with the given speicifcation. + + Args: + images: a tensor that contains the input images which has the shape of + NxTxHxWxC where N is the batch size, T is the maximum length of the + sequence, H and W are the height and width of the images and C is the + number of channels. + + Returns: + A tensor that is the embedding of the images. + """ + + shape = images.get_shape().as_list() + images = tf.reshape(images, + [shape[0] * shape[1], shape[2], shape[3], shape[4]]) + + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + activation_fn=tf.nn.relu, + weights_regularizer=slim.l2_regularizer(self._params.weight_decay_rate), + biases_initializer=tf.zeros_initializer()): + with slim.arg_scope([slim.conv2d], padding='SAME'): + # convert the image to one hot if needed. + if self._params.to_one_hot: + net = tf.one_hot( + tf.squeeze(tf.to_int32(images), axis=[-1]), + self._params.one_hot_length) + else: + net = images + + p = self._params + # Adding conv layers with the specified configurations. + for conv_id, kernel_stride_channel in enumerate( + zip(p.conv_sizes, p.conv_strides, p.conv_channels)): + kernel_size, stride, channels = kernel_stride_channel + net = slim.conv2d( + net, + channels, [kernel_size, kernel_size], + stride, + scope='conv_{}'.format(conv_id + 1)) + + net = slim.flatten(net) + net = slim.fully_connected(net, self._params.embedding_size, scope='fc') + + output = tf.reshape(net, [shape[0], shape[1], -1]) + return output + + +class ResNet50Embedder(Embedder): + """Uses ResNet50 to embed input images.""" + + def build(self, images): + """Builds a ResNet50 embedder for the input images. + + It assumes that the range of the pixel values in the images tensor is + [0,255] and should be castable to tf.uint8. + + Args: + images: a tensor that contains the input images which has the shape of + NxTxHxWx3 where N is the batch size, T is the maximum length of the + sequence, H and W are the height and width of the images and C is the + number of channels. + Returns: + The embedding of the input image with the shape of NxTxL where L is the + embedding size of the output. + + Raises: + ValueError: if the shape of the input does not agree with the expected + shape explained in the Args section. + """ + shape = images.get_shape().as_list() + if len(shape) != 5: + raise ValueError( + 'The tensor shape should have 5 elements, {} is provided'.format( + len(shape))) + if shape[4] != 3: + raise ValueError('Three channels are expected for the input image') + + images = tf.cast(images, tf.uint8) + images = tf.reshape(images, + [shape[0] * shape[1], shape[2], shape[3], shape[4]]) + with slim.arg_scope(resnet_v2.resnet_arg_scope()): + + def preprocess_fn(x): + x = tf.expand_dims(x, 0) + x = tf.image.resize_bilinear(x, [299, 299], + align_corners=False) + return(tf.squeeze(x, [0])) + + images = tf.map_fn(preprocess_fn, images, dtype=tf.float32) + + net, _ = resnet_v2.resnet_v2_50( + images, is_training=False, global_pool=True) + output = tf.reshape(net, [shape[0], shape[1], -1]) + return output + + +class IdentityEmbedder(Embedder): + """This embedder just returns the input as the output. + + Used for modalitites that the embedding of the modality is the same as the + modality itself. For example, it can be used for one_hot goal. + """ + + def build(self, images): + return images diff --git a/models/research/cognitive_planning/envs/__init__.py b/models/research/cognitive_planning/envs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cognitive_planning/envs/active_vision_dataset_env.py b/models/research/cognitive_planning/envs/active_vision_dataset_env.py new file mode 100644 index 0000000000000000000000000000000000000000..507cde76890369a0e407ceac074866c84fdfda5b --- /dev/null +++ b/models/research/cognitive_planning/envs/active_vision_dataset_env.py @@ -0,0 +1,1097 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Gym environment for the ActiveVision Dataset. + + The dataset is captured with a robot moving around and taking picture in + multiple directions. The actions are moving in four directions, and rotate + clockwise or counter clockwise. The observations are the output of vision + pipelines such as object detectors. The goal is to find objects of interest + in each environment. For more details, refer: + http://cs.unc.edu/~ammirato/active_vision_dataset_website/. +""" +import tensorflow as tf +import collections +import copy +import json +import os +from StringIO import StringIO +import time +import gym +from gym.envs.registration import register +import gym.spaces +import networkx as nx +import numpy as np +import scipy.io as sio +from absl import logging +import gin +import cv2 +import label_map_util +import visualization_utils as vis_util +from envs import task_env + + +register( + id='active-vision-env-v0', + entry_point= + 'cognitive_planning.envs.active_vision_dataset_env:ActiveVisionDatasetEnv', # pylint: disable=line-too-long +) + +_MAX_DEPTH_VALUE = 12102 + +SUPPORTED_ACTIONS = [ + 'right', 'rotate_cw', 'rotate_ccw', 'forward', 'left', 'backward', 'stop' +] +SUPPORTED_MODALITIES = [ + task_env.ModalityTypes.SEMANTIC_SEGMENTATION, + task_env.ModalityTypes.DEPTH, + task_env.ModalityTypes.OBJECT_DETECTION, + task_env.ModalityTypes.IMAGE, + task_env.ModalityTypes.GOAL, + task_env.ModalityTypes.PREV_ACTION, + task_env.ModalityTypes.DISTANCE, +] + +# Data structure for storing the information related to the graph of the world. +_Graph = collections.namedtuple('_Graph', [ + 'graph', 'id_to_index', 'index_to_id', 'target_indexes', 'distance_to_goal' +]) + + +def _init_category_index(label_map_path): + """Creates category index from class indexes to name of the classes. + + Args: + label_map_path: path to the mapping. + Returns: + A map for mapping int keys to string categories. + """ + + label_map = label_map_util.load_labelmap(label_map_path) + num_classes = np.max(x.id for x in label_map.item) + categories = label_map_util.convert_label_map_to_categories( + label_map, max_num_classes=num_classes, use_display_name=True) + category_index = label_map_util.create_category_index(categories) + return category_index + + +def _draw_detections(image_np, detections, category_index): + """Draws detections on to the image. + + Args: + image_np: Image in the form of uint8 numpy array. + detections: a dictionary that contains the detection outputs. + category_index: contains the mapping between indexes and the category names. + + Returns: + Does not return anything but draws the boxes on the + """ + vis_util.visualize_boxes_and_labels_on_image_array( + image_np, + detections['detection_boxes'], + detections['detection_classes'], + detections['detection_scores'], + category_index, + use_normalized_coordinates=True, + max_boxes_to_draw=1000, + min_score_thresh=.0, + agnostic_mode=False) + + +def generate_detection_image(detections, + image_size, + category_map, + num_classes, + is_binary=True): + """Generates one_hot vector of the image using the detection boxes. + + Args: + detections: 2D object detections from the image. It's a dictionary that + contains detection_boxes, detection_classes, and detection_scores with + dimensions of nx4, nx1, nx1 where n is the number of detections. + image_size: The resolution of the output image. + category_map: dictionary that maps label names to index. + num_classes: Number of classes. + is_binary: If true, it sets the corresponding channels to 0 and 1. + Otherwise, sets the score in the corresponding channel. + Returns: + Returns image_size x image_size x num_classes image for the detection boxes. + """ + res = np.zeros((image_size, image_size, num_classes), dtype=np.float32) + boxes = detections['detection_boxes'] + labels = detections['detection_classes'] + scores = detections['detection_scores'] + for box, label, score in zip(boxes, labels, scores): + transformed_boxes = [int(round(t)) for t in box * image_size] + y1, x1, y2, x2 = transformed_boxes + # Detector returns fixed number of detections. Boxes with area of zero + # are equivalent of boxes that don't correspond to any detection box. + # So, we need to skip the boxes with area 0. + if (y2 - y1) * (x2 - x1) == 0: + continue + assert category_map[label] < num_classes, 'label = {}'.format(label) + value = score + if is_binary: + value = 1 + res[y1:y2, x1:x2, category_map[label]] = value + return res + + +def _get_detection_path(root, detection_folder_name, world): + return os.path.join(root, 'Meta', detection_folder_name, world + '.npy') + + +def _get_image_folder(root, world): + return os.path.join(root, world, 'jpg_rgb') + + +def _get_json_path(root, world): + return os.path.join(root, world, 'annotations.json') + + +def _get_image_path(root, world, image_id): + return os.path.join(_get_image_folder(root, world), image_id + '.jpg') + + +def _get_image_list(path, worlds): + """Builds a dictionary for all the worlds. + + Args: + path: the path to the dataset on cns. + worlds: list of the worlds. + + Returns: + dictionary where the key is the world names and the values + are the image_ids of that world. + """ + world_id_dict = {} + for loc in worlds: + files = [t[:-4] for t in tf.gfile.ListDir(_get_image_folder(path, loc))] + world_id_dict[loc] = files + return world_id_dict + + +def read_all_poses(dataset_root, world): + """Reads all the poses for each world. + + Args: + dataset_root: the path to the root of the dataset. + world: string, name of the world. + + Returns: + Dictionary of poses for all the images in each world. The key is the image + id of each view and the values are tuple of (x, z, R, scale). Where x and z + are the first and third coordinate of translation. R is the 3x3 rotation + matrix and scale is a float scalar that indicates the scale that needs to + be multipled to x and z in order to get the real world coordinates. + + Raises: + ValueError: if the number of images do not match the number of poses read. + """ + path = os.path.join(dataset_root, world, 'image_structs.mat') + with tf.gfile.Open(path) as f: + data = sio.loadmat(f) + xyz = data['image_structs']['world_pos'] + image_names = data['image_structs']['image_name'][0] + rot = data['image_structs']['R'][0] + scale = data['scale'][0][0] + n = xyz.shape[1] + x = [xyz[0][i][0][0] for i in range(n)] + z = [xyz[0][i][2][0] for i in range(n)] + names = [name[0][:-4] for name in image_names] + if len(names) != len(x): + raise ValueError('number of image names are not equal to the number of ' + 'poses {} != {}'.format(len(names), len(x))) + output = {} + for i in range(n): + if rot[i].shape[0] != 0: + assert rot[i].shape[0] == 3 + assert rot[i].shape[1] == 3 + output[names[i]] = (x[i], z[i], rot[i], scale) + else: + output[names[i]] = (x[i], z[i], None, scale) + + return output + + +def read_cached_data(should_load_images, dataset_root, segmentation_file_name, + targets_file_name, output_size): + """Reads all the necessary cached data. + + Args: + should_load_images: whether to load the images or not. + dataset_root: path to the root of the dataset. + segmentation_file_name: The name of the file that contains semantic + segmentation annotations. + targets_file_name: The name of the file the contains targets annotated for + each world. + output_size: Size of the output images. This is used for pre-processing the + loaded images. + Returns: + Dictionary of all the cached data. + """ + + load_start = time.time() + result_data = {} + + annotated_target_path = os.path.join(dataset_root, 'Meta', + targets_file_name + '.npy') + + logging.info('loading targets: %s', annotated_target_path) + with tf.gfile.Open(annotated_target_path) as f: + result_data['targets'] = np.load(f).item() + + depth_image_path = os.path.join(dataset_root, 'Meta/depth_imgs.npy') + logging.info('loading depth: %s', depth_image_path) + with tf.gfile.Open(depth_image_path) as f: + depth_data = np.load(f).item() + + logging.info('processing depth') + for home_id in depth_data: + images = depth_data[home_id] + for image_id in images: + depth = images[image_id] + depth = cv2.resize( + depth / _MAX_DEPTH_VALUE, (output_size, output_size), + interpolation=cv2.INTER_NEAREST) + depth_mask = (depth > 0).astype(np.float32) + depth = np.dstack((depth, depth_mask)) + images[image_id] = depth + result_data[task_env.ModalityTypes.DEPTH] = depth_data + + sseg_path = os.path.join(dataset_root, 'Meta', + segmentation_file_name + '.npy') + logging.info('loading sseg: %s', sseg_path) + with tf.gfile.Open(sseg_path) as f: + sseg_data = np.load(f).item() + + logging.info('processing sseg') + for home_id in sseg_data: + images = sseg_data[home_id] + for image_id in images: + sseg = images[image_id] + sseg = cv2.resize( + sseg, (output_size, output_size), interpolation=cv2.INTER_NEAREST) + images[image_id] = np.expand_dims(sseg, axis=-1).astype(np.float32) + result_data[task_env.ModalityTypes.SEMANTIC_SEGMENTATION] = sseg_data + + if should_load_images: + image_path = os.path.join(dataset_root, 'Meta/imgs.npy') + logging.info('loading imgs: %s', image_path) + with tf.gfile.Open(image_path) as f: + image_data = np.load(f).item() + + result_data[task_env.ModalityTypes.IMAGE] = image_data + + with tf.gfile.Open(os.path.join(dataset_root, 'Meta/world_id_dict.npy')) as f: + result_data['world_id_dict'] = np.load(f).item() + + logging.info('logging done in %f seconds', time.time() - load_start) + return result_data + + +@gin.configurable +def get_spec_dtype_map(): + return {gym.spaces.Box: np.float32} + + +@gin.configurable +class ActiveVisionDatasetEnv(task_env.TaskEnv): + """Simulates the environment from ActiveVisionDataset.""" + cached_data = None + + def __init__( + self, + episode_length, + modality_types, + confidence_threshold, + output_size, + worlds, + targets, + compute_distance, + should_draw_detections, + dataset_root, + labelmap_path, + reward_collision, + reward_goal_range, + num_detection_classes, + segmentation_file_name, + detection_folder_name, + actions, + targets_file_name, + eval_init_points_file_name=None, + shaped_reward=False, + ): + """Instantiates the environment for ActiveVision Dataset. + + Args: + episode_length: the length of each episode. + modality_types: a list of the strings where each entry indicates the name + of the modalities to be loaded. Valid entries are "sseg", "det", + "depth", "image", "distance", and "prev_action". "distance" should be + used for computing metrics in tf agents. + confidence_threshold: Consider detections more than confidence_threshold + for potential targets. + output_size: Resolution of the output image. + worlds: List of the name of the worlds. + targets: List of the target names. Each entry is a string label of the + target category (e.g. 'fridge', 'microwave', so on). + compute_distance: If True, outputs the distance of the view to the goal. + should_draw_detections (bool): If True, the image returned for the + observation will contains the bounding boxes. + dataset_root: the path to the root folder of the dataset. + labelmap_path: path to the dictionary that converts label strings to + indexes. + reward_collision: the reward the agents get after hitting an obstacle. + It should be a non-positive number. + reward_goal_range: the number of steps from goal, such that the agent is + considered to have reached the goal. If the agent's distance is less + than the specified goal range, the episode is also finishes by setting + done = True. + num_detection_classes: number of classes that detector outputs. + segmentation_file_name: the name of the file that contains the semantic + information. The file should be in the dataset_root/Meta/ folder. + detection_folder_name: Name of the folder that contains the detections + for each world. The folder should be under dataset_root/Meta/ folder. + actions: The list of the action names. Valid entries are listed in + SUPPORTED_ACTIONS. + targets_file_name: the name of the file that contains the annotated + targets. The file should be in the dataset_root/Meta/Folder + eval_init_points_file_name: The name of the file that contains the initial + points for evaluating the performance of the agent. If set to None, + episodes start at random locations. Should be only set for evaluation. + shaped_reward: Whether to add delta goal distance to the reward each step. + + Raises: + ValueError: If one of the targets are not available in the annotated + targets or the modality names are not from the domain specified above. + ValueError: If one of the actions is not in SUPPORTED_ACTIONS. + ValueError: If the reward_collision is a positive number. + ValueError: If there is no action other than stop provided. + """ + if reward_collision > 0: + raise ValueError('"reward" for collision should be non positive') + + if reward_goal_range < 0: + logging.warning('environment does not terminate the episode if the agent ' + 'is too close to the environment') + + if not modality_types: + raise ValueError('modality names can not be empty') + + for name in modality_types: + if name not in SUPPORTED_MODALITIES: + raise ValueError('invalid modality type: {}'.format(name)) + + actions_other_than_stop_found = False + for a in actions: + if a != 'stop': + actions_other_than_stop_found = True + if a not in SUPPORTED_ACTIONS: + raise ValueError('invalid action %s', a) + + if not actions_other_than_stop_found: + raise ValueError('environment needs to have actions other than stop.') + + super(ActiveVisionDatasetEnv, self).__init__() + + self._episode_length = episode_length + self._modality_types = set(modality_types) + self._confidence_threshold = confidence_threshold + self._output_size = output_size + self._dataset_root = dataset_root + self._worlds = worlds + self._targets = targets + self._all_graph = {} + for world in self._worlds: + with tf.gfile.Open(_get_json_path(self._dataset_root, world), 'r') as f: + file_content = f.read() + file_content = file_content.replace('.jpg', '') + io = StringIO(file_content) + self._all_graph[world] = json.load(io) + + self._cur_world = '' + self._cur_image_id = '' + self._cur_graph = None # Loaded by _update_graph + self._steps_taken = 0 + self._last_action_success = True + self._category_index = _init_category_index(labelmap_path) + self._category_map = dict( + [(c, i) for i, c in enumerate(self._category_index)]) + self._detection_cache = {} + if not ActiveVisionDatasetEnv.cached_data: + ActiveVisionDatasetEnv.cached_data = read_cached_data( + True, self._dataset_root, segmentation_file_name, targets_file_name, + self._output_size) + cached_data = ActiveVisionDatasetEnv.cached_data + + self._world_id_dict = cached_data['world_id_dict'] + self._depth_images = cached_data[task_env.ModalityTypes.DEPTH] + self._semantic_segmentations = cached_data[ + task_env.ModalityTypes.SEMANTIC_SEGMENTATION] + self._annotated_targets = cached_data['targets'] + self._cached_imgs = cached_data[task_env.ModalityTypes.IMAGE] + self._graph_cache = {} + self._compute_distance = compute_distance + self._should_draw_detections = should_draw_detections + self._reward_collision = reward_collision + self._reward_goal_range = reward_goal_range + self._num_detection_classes = num_detection_classes + self._actions = actions + self._detection_folder_name = detection_folder_name + self._shaped_reward = shaped_reward + + self._eval_init_points = None + if eval_init_points_file_name is not None: + self._eval_init_index = 0 + init_points_path = os.path.join(self._dataset_root, 'Meta', + eval_init_points_file_name + '.npy') + with tf.gfile.Open(init_points_path) as points_file: + data = np.load(points_file).item() + self._eval_init_points = [] + for world in self._worlds: + for goal in self._targets: + if world in self._annotated_targets[goal]: + for image_id in data[world]: + self._eval_init_points.append((world, image_id[0], goal)) + logging.info('loaded %d eval init points', len(self._eval_init_points)) + + self.action_space = gym.spaces.Discrete(len(self._actions)) + + obs_shapes = {} + if task_env.ModalityTypes.SEMANTIC_SEGMENTATION in self._modality_types: + obs_shapes[task_env.ModalityTypes.SEMANTIC_SEGMENTATION] = gym.spaces.Box( + low=0, high=255, shape=(self._output_size, self._output_size, 1)) + if task_env.ModalityTypes.OBJECT_DETECTION in self._modality_types: + obs_shapes[task_env.ModalityTypes.OBJECT_DETECTION] = gym.spaces.Box( + low=0, + high=255, + shape=(self._output_size, self._output_size, + self._num_detection_classes)) + if task_env.ModalityTypes.DEPTH in self._modality_types: + obs_shapes[task_env.ModalityTypes.DEPTH] = gym.spaces.Box( + low=0, + high=_MAX_DEPTH_VALUE, + shape=(self._output_size, self._output_size, 2)) + if task_env.ModalityTypes.IMAGE in self._modality_types: + obs_shapes[task_env.ModalityTypes.IMAGE] = gym.spaces.Box( + low=0, high=255, shape=(self._output_size, self._output_size, 3)) + if task_env.ModalityTypes.GOAL in self._modality_types: + obs_shapes[task_env.ModalityTypes.GOAL] = gym.spaces.Box( + low=0, high=1., shape=(len(self._targets),)) + if task_env.ModalityTypes.PREV_ACTION in self._modality_types: + obs_shapes[task_env.ModalityTypes.PREV_ACTION] = gym.spaces.Box( + low=0, high=1., shape=(len(self._actions) + 1,)) + if task_env.ModalityTypes.DISTANCE in self._modality_types: + obs_shapes[task_env.ModalityTypes.DISTANCE] = gym.spaces.Box( + low=0, high=255, shape=(1,)) + self.observation_space = gym.spaces.Dict(obs_shapes) + + self._prev_action = np.zeros((len(self._actions) + 1), dtype=np.float32) + + # Loading all the poses. + all_poses = {} + for world in self._worlds: + all_poses[world] = read_all_poses(self._dataset_root, world) + self._cached_poses = all_poses + self._vertex_to_pose = {} + self._pose_to_vertex = {} + + @property + def actions(self): + """Returns list of actions for the env.""" + return self._actions + + def _next_image(self, image_id, action): + """Given the action, returns the name of the image that agent ends up in. + + Args: + image_id: The image id of the current view. + action: valid actions are ['right', 'rotate_cw', 'rotate_ccw', + 'forward', 'left']. Each rotation is 30 degrees. + + Returns: + The image name for the next location of the agent. If the action results + in collision or it is not possible for the agent to execute that action, + returns empty string. + """ + assert action in self._actions, 'invalid action : {}'.format(action) + assert self._cur_world in self._all_graph, 'invalid world {}'.format( + self._cur_world) + assert image_id in self._all_graph[ + self._cur_world], 'image_id {} is not in {}'.format( + image_id, self._cur_world) + return self._all_graph[self._cur_world][image_id][action] + + def _largest_detection_for_image(self, image_id, detections_dict): + """Assigns area of the largest box for the view with given image id. + + Args: + image_id: Image id of the view. + detections_dict: Detections for the view. + """ + for cls, box, score in zip(detections_dict['detection_classes'], + detections_dict['detection_boxes'], + detections_dict['detection_scores']): + if cls not in self._targets: + continue + if score < self._confidence_threshold: + continue + ymin, xmin, ymax, xmax = box + area = (ymax - ymin) * (xmax - xmin) + if abs(area) < 1e-5: + continue + if image_id not in self._detection_area: + self._detection_area[image_id] = area + else: + self._detection_area[image_id] = max(self._detection_area[image_id], + area) + + def _compute_goal_indexes(self): + """Computes the goal indexes for the environment. + + Returns: + The indexes of the goals that are closest to target categories. A vertex + is goal vertice if the desired objects are detected in the image and the + target categories are not seen by moving forward from that vertice. + """ + for image_id in self._world_id_dict[self._cur_world]: + detections_dict = self._detection_table[image_id] + self._largest_detection_for_image(image_id, detections_dict) + goal_indexes = [] + for image_id in self._world_id_dict[self._cur_world]: + if image_id not in self._detection_area: + continue + # Detection box is large enough. + if self._detection_area[image_id] < 0.01: + continue + ok = True + next_image_id = self._next_image(image_id, 'forward') + if next_image_id: + if next_image_id in self._detection_area: + ok = False + if ok: + goal_indexes.append(self._cur_graph.id_to_index[image_id]) + return goal_indexes + + def to_image_id(self, vid): + """Converts vertex id to the image id. + + Args: + vid: vertex id of the view. + Returns: + image id of the input vertex id. + """ + return self._cur_graph.index_to_id[vid] + + def to_vertex(self, image_id): + return self._cur_graph.id_to_index[image_id] + + def observation(self, view_pose): + """Returns the observation at the given the vertex. + + Args: + view_pose: pose of the view of interest. + + Returns: + Observation at the given view point. + + Raises: + ValueError: if the given view pose is not similar to any of the poses in + the current world. + """ + vertex = self.pose_to_vertex(view_pose) + if vertex is None: + raise ValueError('The given found is not close enough to any of the poses' + ' in the environment.') + image_id = self._cur_graph.index_to_id[vertex] + output = collections.OrderedDict() + + if task_env.ModalityTypes.SEMANTIC_SEGMENTATION in self._modality_types: + output[task_env.ModalityTypes. + SEMANTIC_SEGMENTATION] = self._semantic_segmentations[ + self._cur_world][image_id] + + detection = None + need_det = ( + task_env.ModalityTypes.OBJECT_DETECTION in self._modality_types or + (task_env.ModalityTypes.IMAGE in self._modality_types and + self._should_draw_detections)) + if need_det: + detection = self._detection_table[image_id] + detection_image = generate_detection_image( + detection, + self._output_size, + self._category_map, + num_classes=self._num_detection_classes) + + if task_env.ModalityTypes.OBJECT_DETECTION in self._modality_types: + output[task_env.ModalityTypes.OBJECT_DETECTION] = detection_image + + if task_env.ModalityTypes.DEPTH in self._modality_types: + output[task_env.ModalityTypes.DEPTH] = self._depth_images[ + self._cur_world][image_id] + + if task_env.ModalityTypes.IMAGE in self._modality_types: + output_img = self._cached_imgs[self._cur_world][image_id] + if self._should_draw_detections: + output_img = output_img.copy() + _draw_detections(output_img, detection, self._category_index) + output[task_env.ModalityTypes.IMAGE] = output_img + + if task_env.ModalityTypes.GOAL in self._modality_types: + goal = np.zeros((len(self._targets),), dtype=np.float32) + goal[self._targets.index(self._cur_goal)] = 1. + output[task_env.ModalityTypes.GOAL] = goal + + if task_env.ModalityTypes.PREV_ACTION in self._modality_types: + output[task_env.ModalityTypes.PREV_ACTION] = self._prev_action + + if task_env.ModalityTypes.DISTANCE in self._modality_types: + output[task_env.ModalityTypes.DISTANCE] = np.asarray( + [self.gt_value(self._cur_goal, vertex)], dtype=np.float32) + + return output + + def _step_no_reward(self, action): + """Performs a step in the environment with given action. + + Args: + action: Action that is used to step in the environment. Action can be + string or integer. If the type is integer then it uses the ith element + from self._actions list. Otherwise, uses the string value as the action. + + Returns: + observation, done, info + observation: dictonary that contains all the observations specified in + modality_types. + observation[task_env.ModalityTypes.OBJECT_DETECTION]: contains the + detection of the current view. + observation[task_env.ModalityTypes.IMAGE]: contains the + image of the current view. Note that if using the images for training, + should_load_images should be set to false. + observation[task_env.ModalityTypes.SEMANTIC_SEGMENTATION]: contains the + semantic segmentation of the current view. + observation[task_env.ModalityTypes.DEPTH]: If selected, returns the + depth map for the current view. + observation[task_env.ModalityTypes.PREV_ACTION]: If selected, returns + a numpy of (action_size + 1,). The first action_size elements indicate + the action and the last element indicates whether the previous action + was successful or not. + done: True after episode_length steps have been taken, False otherwise. + info: Empty dictionary. + + Raises: + ValueError: for invalid actions. + """ + # Primarily used for gym interface. + if not isinstance(action, str): + if not self.action_space.contains(action): + raise ValueError('Not a valid actions: %d', action) + + action = self._actions[action] + + if action not in self._actions: + raise ValueError('Not a valid action: %s', action) + + action_index = self._actions.index(action) + + if action == 'stop': + next_image_id = self._cur_image_id + done = True + success = True + else: + next_image_id = self._next_image(self._cur_image_id, action) + self._steps_taken += 1 + done = False + success = True + if not next_image_id: + success = False + else: + self._cur_image_id = next_image_id + + if self._steps_taken >= self._episode_length: + done = True + + cur_vertex = self._cur_graph.id_to_index[self._cur_image_id] + observation = self.observation(self.vertex_to_pose(cur_vertex)) + + # Concatenation of one-hot prev action + a binary number for success of + # previous actions. + self._prev_action = np.zeros((len(self._actions) + 1,), dtype=np.float32) + self._prev_action[action_index] = 1. + self._prev_action[-1] = float(success) + + distance_to_goal = self.gt_value(self._cur_goal, cur_vertex) + if success: + if distance_to_goal <= self._reward_goal_range: + done = True + + return observation, done, {'success': success} + + @property + def graph(self): + return self._cur_graph.graph + + def state(self): + return self.vertex_to_pose(self.to_vertex(self._cur_image_id)) + + def gt_value(self, goal, v): + """Computes the distance to the goal from vertex v. + + Args: + goal: name of the goal. + v: vertex id. + + Returns: + Minimmum number of steps to the given goal. + """ + assert goal in self._cur_graph.distance_to_goal, 'goal: {}'.format(goal) + assert v in self._cur_graph.distance_to_goal[goal] + res = self._cur_graph.distance_to_goal[goal][v] + return res + + def _update_graph(self): + """Creates the graph for each environment and updates the _cur_graph.""" + if self._cur_world not in self._graph_cache: + graph = nx.DiGraph() + id_to_index = {} + index_to_id = {} + image_list = self._world_id_dict[self._cur_world] + for i, image_id in enumerate(image_list): + id_to_index[image_id] = i + index_to_id[i] = image_id + graph.add_node(i) + + for image_id in image_list: + for action in self._actions: + if action == 'stop': + continue + next_image = self._all_graph[self._cur_world][image_id][action] + if next_image: + graph.add_edge( + id_to_index[image_id], id_to_index[next_image], action=action) + target_indexes = {} + number_of_nodes_without_targets = graph.number_of_nodes() + distance_to_goal = {} + for goal in self._targets: + if self._cur_world not in self._annotated_targets[goal]: + continue + goal_indexes = [ + id_to_index[i] + for i in self._annotated_targets[goal][self._cur_world] + if i + ] + super_source_index = graph.number_of_nodes() + target_indexes[goal] = super_source_index + graph.add_node(super_source_index) + index_to_id[super_source_index] = goal + id_to_index[goal] = super_source_index + for v in goal_indexes: + graph.add_edge(v, super_source_index, action='stop') + graph.add_edge(super_source_index, v, action='stop') + distance_to_goal[goal] = {} + for v in range(number_of_nodes_without_targets): + distance_to_goal[goal][v] = len( + nx.shortest_path(graph, v, super_source_index)) - 2 + + self._graph_cache[self._cur_world] = _Graph( + graph, id_to_index, index_to_id, target_indexes, distance_to_goal) + self._cur_graph = self._graph_cache[self._cur_world] + + def reset_for_eval(self, new_world, new_goal, new_image_id): + """Resets to the given goal and image_id.""" + return self._reset_env(new_world=new_world, new_goal=new_goal, new_image_id=new_image_id) + + def get_init_config(self, path): + """Exposes the initial state of the agent for the given path. + + Args: + path: sequences of the vertexes that the agent moves. + + Returns: + image_id of the first view, world, and the goal. + """ + return self._cur_graph.index_to_id[path[0]], self._cur_world, self._cur_goal + + def _reset_env( + self, + new_world=None, + new_goal=None, + new_image_id=None, + ): + """Resets the agent in a random world and random id. + + Args: + new_world: If not None, sets the new world to new_world. + new_goal: If not None, sets the new goal to new_goal. + new_image_id: If not None, sets the first image id to new_image_id. + + Returns: + observation: dictionary of the observations. Content of the observation + is similar to that of the step function. + Raises: + ValueError: if it can't find a world and annotated goal. + """ + self._steps_taken = 0 + # The first prev_action is special all zero vector + success=1. + self._prev_action = np.zeros((len(self._actions) + 1,), dtype=np.float32) + self._prev_action[len(self._actions)] = 1. + if self._eval_init_points is not None: + if self._eval_init_index >= len(self._eval_init_points): + self._eval_init_index = 0 + a = self._eval_init_points[self._eval_init_index] + self._cur_world, self._cur_image_id, self._cur_goal = a + self._eval_init_index += 1 + elif not new_world: + attempts = 100 + found = False + while attempts >= 0: + attempts -= 1 + self._cur_goal = np.random.choice(self._targets) + available_worlds = list( + set(self._annotated_targets[self._cur_goal].keys()).intersection( + set(self._worlds))) + if available_worlds: + found = True + break + if not found: + raise ValueError('could not find a world that has a target annotated') + self._cur_world = np.random.choice(available_worlds) + else: + self._cur_world = new_world + self._cur_goal = new_goal + if new_world not in self._annotated_targets[new_goal]: + return None + + self._cur_goal_index = self._targets.index(self._cur_goal) + if new_image_id: + self._cur_image_id = new_image_id + else: + self._cur_image_id = np.random.choice( + self._world_id_dict[self._cur_world]) + if self._cur_world not in self._detection_cache: + with tf.gfile.Open( + _get_detection_path(self._dataset_root, self._detection_folder_name, + self._cur_world)) as f: + # Each file contains a dictionary with image ids as keys and detection + # dicts as values. + self._detection_cache[self._cur_world] = np.load(f).item() + self._detection_table = self._detection_cache[self._cur_world] + self._detection_area = {} + self._update_graph() + if self._cur_world not in self._vertex_to_pose: + # adding fake pose for the super nodes of each target categories. + self._vertex_to_pose[self._cur_world] = { + index: (-index,) for index in self._cur_graph.target_indexes.values() + } + # Calling vetex_to_pose for each vertex results in filling out the + # dictionaries that contain pose related data. + for image_id in self._world_id_dict[self._cur_world]: + self.vertex_to_pose(self.to_vertex(image_id)) + + # Filling out pose_to_vertex from vertex_to_pose. + self._pose_to_vertex[self._cur_world] = { + tuple(v): k + for k, v in self._vertex_to_pose[self._cur_world].iteritems() + } + + cur_vertex = self._cur_graph.id_to_index[self._cur_image_id] + observation = self.observation(self.vertex_to_pose(cur_vertex)) + return observation + + def cur_vertex(self): + return self._cur_graph.id_to_index[self._cur_image_id] + + def cur_image_id(self): + return self._cur_image_id + + def path_to_goal(self, image_id=None): + """Returns the path from image_id to the self._cur_goal. + + Args: + image_id: If set to None, computes the path from the current view. + Otherwise, sets the current view to the given image_id. + Returns: + The path to the goal. + Raises: + Exception if there's no path from the view to the goal. + """ + if image_id is None: + image_id = self._cur_image_id + super_source = self._cur_graph.target_indexes[self._cur_goal] + try: + path = nx.shortest_path(self._cur_graph.graph, + self._cur_graph.id_to_index[image_id], + super_source) + except: + print 'path not found, image_id = ', self._cur_world, self._cur_image_id + raise + return path[:-1] + + def targets(self): + return [self.vertex_to_pose(self._cur_graph.target_indexes[self._cur_goal])] + + def vertex_to_pose(self, v): + """Returns pose of the view for a given vertex. + + Args: + v: integer, vertex index. + + Returns: + (x, z, dir_x, dir_z) where x and z are the tranlation and dir_x, dir_z are + a vector giving direction of the view. + """ + if v in self._vertex_to_pose[self._cur_world]: + return np.copy(self._vertex_to_pose[self._cur_world][v]) + + x, z, rot, scale = self._cached_poses[self._cur_world][self.to_image_id( + v)] + if rot is None: # if rotation is not provided for the given vertex. + self._vertex_to_pose[self._cur_world][v] = np.asarray( + [x * scale, z * scale, v]) + return np.copy(self._vertex_to_pose[self._cur_world][v]) + # Multiply rotation matrix by [0,0,1] to get a vector of length 1 in the + # direction of the ray. + direction = np.zeros((3, 1), dtype=np.float32) + direction[2][0] = 1 + direction = np.matmul(np.transpose(rot), direction) + direction = [direction[0][0], direction[2][0]] + self._vertex_to_pose[self._cur_world][v] = np.asarray( + [x * scale, z * scale, direction[0], direction[1]]) + return np.copy(self._vertex_to_pose[self._cur_world][v]) + + def pose_to_vertex(self, pose): + """Returns the vertex id for the given pose.""" + if tuple(pose) not in self._pose_to_vertex[self._cur_world]: + raise ValueError( + 'The given pose is not present in the dictionary: {}'.format( + tuple(pose))) + + return self._pose_to_vertex[self._cur_world][tuple(pose)] + + def check_scene_graph(self, world, goal): + """Checks the connectivity of the scene graph. + + Goes over all the views. computes the shortest path to the goal. If it + crashes it means that it's not connected. Otherwise, the env graph is fine. + + Args: + world: the string name of the world. + goal: the string label for the goal. + Returns: + Nothing. + """ + obs = self._reset_env(new_world=world, new_goal=goal) + if not obs: + print '{} is not availble in {}'.format(goal, world) + return True + for image_id in self._world_id_dict[self._cur_world]: + print 'check image_id = {}'.format(image_id) + self._cur_image_id = image_id + path = self.path_to_goal() + actions = [] + for i in range(len(path) - 2): + actions.append(self.action(path[i], path[i + 1])) + actions.append('stop') + + @property + def goal_one_hot(self): + res = np.zeros((len(self._targets),), dtype=np.float32) + res[self._cur_goal_index] = 1. + return res + + @property + def goal_index(self): + return self._cur_goal_index + + @property + def goal_string(self): + return self._cur_goal + + @property + def worlds(self): + return self._worlds + + @property + def possible_targets(self): + return self._targets + + def action(self, from_pose, to_pose): + """Returns the action that takes source vertex to destination vertex. + + Args: + from_pose: pose of the source. + to_pose: pose of the destination. + Returns: + Returns the index of the action. + Raises: + ValueError: If it is not possible to go from the first vertice to second + vertice with one action, it raises value error. + """ + from_index = self.pose_to_vertex(from_pose) + to_index = self.pose_to_vertex(to_pose) + if to_index not in self.graph[from_index]: + from_image_id = self.to_image_id(from_index) + to_image_id = self.to_image_id(to_index) + raise ValueError('{},{} is not connected to {},{}'.format( + from_index, from_image_id, to_index, to_image_id)) + return self._actions.index(self.graph[from_index][to_index]['action']) + + def random_step_sequence(self, min_len=None, max_len=None): + """Generates random step sequence that takes agent to the goal. + + Args: + min_len: integer, minimum length of a step sequence. Not yet implemented. + max_len: integer, should be set to an integer and it is the maximum number + of observations and path length to be max_len. + Returns: + Tuple of (path, actions, states, step_outputs). + path: a random path from a random starting point and random environment. + actions: actions of the returned path. + states: viewpoints of all the states in between. + step_outputs: list of step() return tuples. + Raises: + ValueError: if first_n is not greater than zero; if min_len is different + from None. + """ + if max_len is None: + raise ValueError('max_len can not be set as None') + if max_len < 1: + raise ValueError('first_n must be greater or equal to 1.') + if min_len is not None: + raise ValueError('min_len is not yet implemented.') + + path = [] + actions = [] + states = [] + step_outputs = [] + obs = self.reset() + last_obs_tuple = [obs, 0, False, {}] + for _ in xrange(max_len): + action = np.random.choice(self._actions) + # We don't want to sample stop action because stop does not add new + # information. + while action == 'stop': + action = np.random.choice(self._actions) + path.append(self.to_vertex(self._cur_image_id)) + onehot = np.zeros((len(self._actions),), dtype=np.float32) + onehot[self._actions.index(action)] = 1. + actions.append(onehot) + states.append(self.vertex_to_pose(path[-1])) + step_outputs.append(copy.deepcopy(last_obs_tuple)) + last_obs_tuple = self.step(action) + + return path, actions, states, step_outputs diff --git a/models/research/cognitive_planning/envs/configs/active_vision_config.gin b/models/research/cognitive_planning/envs/configs/active_vision_config.gin new file mode 100644 index 0000000000000000000000000000000000000000..edb10dc1f5339fc6fc2857ae8870d1df8f497263 --- /dev/null +++ b/models/research/cognitive_planning/envs/configs/active_vision_config.gin @@ -0,0 +1,27 @@ +#-*-Python-*- +ActiveVisionDatasetEnv.episode_length = 200 +ActiveVisionDatasetEnv.actions = [ + 'right', 'rotate_cw', 'rotate_ccw', 'forward', 'left', 'backward', 'stop' +] +ActiveVisionDatasetEnv.confidence_threshold = 0.5 +ActiveVisionDatasetEnv.output_size = 64 +ActiveVisionDatasetEnv.worlds = [ + 'Home_001_1', 'Home_001_2', 'Home_002_1', 'Home_003_1', 'Home_003_2', + 'Home_004_1', 'Home_004_2', 'Home_005_1', 'Home_005_2', 'Home_006_1', + 'Home_007_1', 'Home_010_1', 'Home_011_1', 'Home_013_1', 'Home_014_1', + 'Home_014_2', 'Home_015_1', 'Home_016_1' +] +ActiveVisionDatasetEnv.targets = [ + 'tv', 'dining_table', 'fridge', 'microwave', 'couch' +] +ActiveVisionDatasetEnv.compute_distance = False +ActiveVisionDatasetEnv.should_draw_detections = False +ActiveVisionDatasetEnv.dataset_root = '/usr/local/google/home/kosecka/AVD_Minimal/' +ActiveVisionDatasetEnv.labelmap_path = 'label_map.txt' +ActiveVisionDatasetEnv.reward_collision = 0 +ActiveVisionDatasetEnv.reward_goal_range = 2 +ActiveVisionDatasetEnv.num_detection_classes = 90 +ActiveVisionDatasetEnv.segmentation_file_name='sseg_crf' +ActiveVisionDatasetEnv.detection_folder_name='Detections' +ActiveVisionDatasetEnv.targets_file_name='annotated_targets' +ActiveVisionDatasetEnv.shaped_reward=False diff --git a/models/research/cognitive_planning/envs/task_env.py b/models/research/cognitive_planning/envs/task_env.py new file mode 100644 index 0000000000000000000000000000000000000000..84d527cd2e4e09b587fa47f2a98a6df1592915e9 --- /dev/null +++ b/models/research/cognitive_planning/envs/task_env.py @@ -0,0 +1,218 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""An interface representing the topology of an environment. + +Allows for high level planning and high level instruction generation for +navigation tasks. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import enum +import gym +import gin + + +@gin.config.constants_from_enum +class ModalityTypes(enum.Enum): + """Types of the modalities that can be used.""" + IMAGE = 0 + SEMANTIC_SEGMENTATION = 1 + OBJECT_DETECTION = 2 + DEPTH = 3 + GOAL = 4 + PREV_ACTION = 5 + PREV_SUCCESS = 6 + STATE = 7 + DISTANCE = 8 + CAN_STEP = 9 + + def __lt__(self, other): + if self.__class__ is other.__class__: + return self.value < other.value + return NotImplemented + + +class TaskEnvInterface(object): + """Interface for an environment topology. + + An environment can implement this interface if there is a topological graph + underlying this environment. All paths below are defined as paths in this + graph. Using path_to_actions function one can translate a topological path + to a geometric path in the environment. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def random_step_sequence(self, min_len=None, max_len=None): + """Generates a random sequence of actions and executes them. + + Args: + min_len: integer, minimum length of a step sequence. + max_len: integer, if it is set to non-None, the method returns only + the first n steps of a random sequence. If the environment is + computationally heavy this argument should be set to speed up the + training and avoid unnecessary computations by the environment. + + Returns: + A path, defined as a list of vertex indices, a list of actions, a list of + states, and a list of step() return tuples. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + @abc.abstractmethod + def targets(self): + """A list of targets in the environment. + + Returns: + A list of target locations. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + @abc.abstractproperty + def state(self): + """Returns the position for the current location of agent.""" + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + @abc.abstractproperty + def graph(self): + """Returns a graph representing the environment topology. + + Returns: + nx.Graph object. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + @abc.abstractmethod + def vertex_to_pose(self, vertex_index): + """Maps a vertex index to a pose in the environment. + + Pose of the camera can be represented by (x,y,theta) or (x,y,z,theta). + Args: + vertex_index: index of a vertex in the topology graph. + + Returns: + A np.array of floats of size 3 or 4 representing the pose of the vertex. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + @abc.abstractmethod + def pose_to_vertex(self, pose): + """Maps a coordinate in the maze to the closest vertex in topology graph. + + Args: + pose: np.array of floats containing a the pose of the view. + + Returns: + index of a vertex. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + @abc.abstractmethod + def observation(self, state): + """Returns observation at location xy and orientation theta. + + Args: + state: a np.array of floats containing coordinates of a location and + orientation. + + Returns: + Dictionary of observations in the case of multiple observations. + The keys are the modality names and the values are the np.array of float + of observations for corresponding modality. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + def action(self, init_state, final_state): + """Computes the transition action from state1 to state2. + + If the environment is discrete and the views are not adjacent in the + environment. i.e. it is not possible to move from the first view to the + second view with one action it should return None. In the continuous case, + it will be the continuous difference of first view and second view. + + Args: + init_state: numpy array, the initial view of the agent. + final_state: numpy array, the final view of the agent. + """ + raise NotImplementedError( + 'Needs implementation as part of EnvTopology interface.') + + +@gin.configurable +class TaskEnv(gym.Env, TaskEnvInterface): + """An environment which uses a Task to compute reward. + + The environment implements a a gym interface, as well as EnvTopology. The + former makes sure it can be used within an RL training, while the latter + makes sure it can be used by a Task. + + This environment requires _step_no_reward to be implemented, which steps + through it but does not return reward. Instead, the reward calculation is + delegated to the Task object, which in return can access needed properties + of the environment. These properties are exposed via the EnvTopology + interface. + """ + + def __init__(self, task=None): + self._task = task + + def set_task(self, task): + self._task = task + + @abc.abstractmethod + def _step_no_reward(self, action): + """Same as _step without returning reward. + + Args: + action: see _step. + + Returns: + state, done, info as defined in _step. + """ + raise NotImplementedError('Implement step.') + + @abc.abstractmethod + def _reset_env(self): + """Resets the environment. Returns initial observation.""" + raise NotImplementedError('Implement _reset. Must call super!') + + def step(self, action): + obs, done, info = self._step_no_reward(action) + + reward = 0.0 + if self._task is not None: + obs, reward, done, info = self._task.reward(obs, done, info) + + return obs, reward, done, info + + def reset(self): + """Resets the environment. Gym API.""" + obs = self._reset_env() + if self._task is not None: + self._task.reset(obs) + return obs diff --git a/models/research/cognitive_planning/envs/util.py b/models/research/cognitive_planning/envs/util.py new file mode 100644 index 0000000000000000000000000000000000000000..32a384bc5fab8f75ed62e1c2fa3b1f8f832b228d --- /dev/null +++ b/models/research/cognitive_planning/envs/util.py @@ -0,0 +1,55 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A module with utility functions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def trajectory_to_deltas(trajectory, state): + """Computes a sequence of deltas of a state to traverse a trajectory in 2D. + + The initial state of the agent contains its pose -- location in 2D and + orientation. When the computed deltas are incrementally added to it, it + traverses the specified trajectory while keeping its orientation parallel to + the trajectory. + + Args: + trajectory: a np.array of floats of shape n x 2. The n-th row contains the + n-th point. + state: a 3 element np.array of floats containing agent's location and + orientation in radians. + + Returns: + A np.array of floats of size n x 3. + """ + state = np.reshape(state, [-1]) + init_xy = state[0:2] + init_theta = state[2] + + delta_xy = trajectory - np.concatenate( + [np.reshape(init_xy, [1, 2]), trajectory[:-1, :]], axis=0) + + thetas = np.reshape(np.arctan2(delta_xy[:, 1], delta_xy[:, 0]), [-1, 1]) + thetas = np.concatenate([np.reshape(init_theta, [1, 1]), thetas], axis=0) + delta_thetas = thetas[1:] - thetas[:-1] + + deltas = np.concatenate([delta_xy, delta_thetas], axis=1) + return deltas diff --git a/models/research/cognitive_planning/label_map.txt b/models/research/cognitive_planning/label_map.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f4872bd0c7f53e70beecf88af005c07a5df9e08 --- /dev/null +++ b/models/research/cognitive_planning/label_map.txt @@ -0,0 +1,400 @@ +item { + name: "/m/01g317" + id: 1 + display_name: "person" +} +item { + name: "/m/0199g" + id: 2 + display_name: "bicycle" +} +item { + name: "/m/0k4j" + id: 3 + display_name: "car" +} +item { + name: "/m/04_sv" + id: 4 + display_name: "motorcycle" +} +item { + name: "/m/05czz6l" + id: 5 + display_name: "airplane" +} +item { + name: "/m/01bjv" + id: 6 + display_name: "bus" +} +item { + name: "/m/07jdr" + id: 7 + display_name: "train" +} +item { + name: "/m/07r04" + id: 8 + display_name: "truck" +} +item { + name: "/m/019jd" + id: 9 + display_name: "boat" +} +item { + name: "/m/015qff" + id: 10 + display_name: "traffic light" +} +item { + name: "/m/01pns0" + id: 11 + display_name: "fire hydrant" +} +item { + name: "/m/02pv19" + id: 13 + display_name: "stop sign" +} +item { + name: "/m/015qbp" + id: 14 + display_name: "parking meter" +} +item { + name: "/m/0cvnqh" + id: 15 + display_name: "bench" +} +item { + name: "/m/015p6" + id: 16 + display_name: "bird" +} +item { + name: "/m/01yrx" + id: 17 + display_name: "cat" +} +item { + name: "/m/0bt9lr" + id: 18 + display_name: "dog" +} +item { + name: "/m/03k3r" + id: 19 + display_name: "horse" +} +item { + name: "/m/07bgp" + id: 20 + display_name: "sheep" +} +item { + name: "/m/01xq0k1" + id: 21 + display_name: "cow" +} +item { + name: "/m/0bwd_0j" + id: 22 + display_name: "elephant" +} +item { + name: "/m/01dws" + id: 23 + display_name: "bear" +} +item { + name: "/m/0898b" + id: 24 + display_name: "zebra" +} +item { + name: "/m/03bk1" + id: 25 + display_name: "giraffe" +} +item { + name: "/m/01940j" + id: 27 + display_name: "backpack" +} +item { + name: "/m/0hnnb" + id: 28 + display_name: "umbrella" +} +item { + name: "/m/080hkjn" + id: 31 + display_name: "handbag" +} +item { + name: "/m/01rkbr" + id: 32 + display_name: "tie" +} +item { + name: "/m/01s55n" + id: 33 + display_name: "suitcase" +} +item { + name: "/m/02wmf" + id: 34 + display_name: "frisbee" +} +item { + name: "/m/071p9" + id: 35 + display_name: "skis" +} +item { + name: "/m/06__v" + id: 36 + display_name: "snowboard" +} +item { + name: "/m/018xm" + id: 37 + display_name: "sports ball" +} +item { + name: "/m/02zt3" + id: 38 + display_name: "kite" +} +item { + name: "/m/03g8mr" + id: 39 + display_name: "baseball bat" +} +item { + name: "/m/03grzl" + id: 40 + display_name: "baseball glove" +} +item { + name: "/m/06_fw" + id: 41 + display_name: "skateboard" +} +item { + name: "/m/019w40" + id: 42 + display_name: "surfboard" +} +item { + name: "/m/0dv9c" + id: 43 + display_name: "tennis racket" +} +item { + name: "/m/04dr76w" + id: 44 + display_name: "bottle" +} +item { + name: "/m/09tvcd" + id: 46 + display_name: "wine glass" +} +item { + name: "/m/08gqpm" + id: 47 + display_name: "cup" +} +item { + name: "/m/0dt3t" + id: 48 + display_name: "fork" +} +item { + name: "/m/04ctx" + id: 49 + display_name: "knife" +} +item { + name: "/m/0cmx8" + id: 50 + display_name: "spoon" +} +item { + name: "/m/04kkgm" + id: 51 + display_name: "bowl" +} +item { + name: "/m/09qck" + id: 52 + display_name: "banana" +} +item { + name: "/m/014j1m" + id: 53 + display_name: "apple" +} +item { + name: "/m/0l515" + id: 54 + display_name: "sandwich" +} +item { + name: "/m/0cyhj_" + id: 55 + display_name: "orange" +} +item { + name: "/m/0hkxq" + id: 56 + display_name: "broccoli" +} +item { + name: "/m/0fj52s" + id: 57 + display_name: "carrot" +} +item { + name: "/m/01b9xk" + id: 58 + display_name: "hot dog" +} +item { + name: "/m/0663v" + id: 59 + display_name: "pizza" +} +item { + name: "/m/0jy4k" + id: 60 + display_name: "donut" +} +item { + name: "/m/0fszt" + id: 61 + display_name: "cake" +} +item { + name: "/m/01mzpv" + id: 62 + display_name: "chair" +} +item { + name: "/m/02crq1" + id: 63 + display_name: "couch" +} +item { + name: "/m/03fp41" + id: 64 + display_name: "potted plant" +} +item { + name: "/m/03ssj5" + id: 65 + display_name: "bed" +} +item { + name: "/m/04bcr3" + id: 67 + display_name: "dining table" +} +item { + name: "/m/09g1w" + id: 70 + display_name: "toilet" +} +item { + name: "/m/07c52" + id: 72 + display_name: "tv" +} +item { + name: "/m/01c648" + id: 73 + display_name: "laptop" +} +item { + name: "/m/020lf" + id: 74 + display_name: "mouse" +} +item { + name: "/m/0qjjc" + id: 75 + display_name: "remote" +} +item { + name: "/m/01m2v" + id: 76 + display_name: "keyboard" +} +item { + name: "/m/050k8" + id: 77 + display_name: "cell phone" +} +item { + name: "/m/0fx9l" + id: 78 + display_name: "microwave" +} +item { + name: "/m/029bxz" + id: 79 + display_name: "oven" +} +item { + name: "/m/01k6s3" + id: 80 + display_name: "toaster" +} +item { + name: "/m/0130jx" + id: 81 + display_name: "sink" +} +item { + name: "/m/040b_t" + id: 82 + display_name: "refrigerator" +} +item { + name: "/m/0bt_c3" + id: 84 + display_name: "book" +} +item { + name: "/m/01x3z" + id: 85 + display_name: "clock" +} +item { + name: "/m/02s195" + id: 86 + display_name: "vase" +} +item { + name: "/m/01lsmm" + id: 87 + display_name: "scissors" +} +item { + name: "/m/0kmg4" + id: 88 + display_name: "teddy bear" +} +item { + name: "/m/03wvsk" + id: 89 + display_name: "hair drier" +} +item { + name: "/m/012xff" + id: 90 + display_name: "toothbrush" +} diff --git a/models/research/cognitive_planning/label_map_util.py b/models/research/cognitive_planning/label_map_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e258e3ab57fbe0de3aeb664e64f5df5a6dc5111d --- /dev/null +++ b/models/research/cognitive_planning/label_map_util.py @@ -0,0 +1,181 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Label map utility functions.""" + +import logging + +import tensorflow as tf +from google.protobuf import text_format +import string_int_label_map_pb2 + + +def _validate_label_map(label_map): + """Checks if a label map is valid. + + Args: + label_map: StringIntLabelMap to validate. + + Raises: + ValueError: if label map is invalid. + """ + for item in label_map.item: + if item.id < 0: + raise ValueError('Label map ids should be >= 0.') + if (item.id == 0 and item.name != 'background' and + item.display_name != 'background'): + raise ValueError('Label map id 0 is reserved for the background label') + + +def create_category_index(categories): + """Creates dictionary of COCO compatible categories keyed by category id. + + Args: + categories: a list of dicts, each of which has the following keys: + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza'. + + Returns: + category_index: a dict containing the same entries as categories, but keyed + by the 'id' field of each category. + """ + category_index = {} + for cat in categories: + category_index[cat['id']] = cat + return category_index + + +def get_max_label_map_index(label_map): + """Get maximum index in label map. + + Args: + label_map: a StringIntLabelMapProto + + Returns: + an integer + """ + return max([item.id for item in label_map.item]) + + +def convert_label_map_to_categories(label_map, + max_num_classes, + use_display_name=True): + """Loads label map proto and returns categories list compatible with eval. + + This function loads a label map and returns a list of dicts, each of which + has the following keys: + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza'. + We only allow class into the list if its id-label_id_offset is + between 0 (inclusive) and max_num_classes (exclusive). + If there are several items mapping to the same id in the label map, + we will only keep the first one in the categories list. + + Args: + label_map: a StringIntLabelMapProto or None. If None, a default categories + list is created with max_num_classes categories. + max_num_classes: maximum number of (consecutive) label indices to include. + use_display_name: (boolean) choose whether to load 'display_name' field + as category name. If False or if the display_name field does not exist, + uses 'name' field as category names instead. + Returns: + categories: a list of dictionaries representing all possible categories. + """ + categories = [] + list_of_ids_already_added = [] + if not label_map: + label_id_offset = 1 + for class_id in range(max_num_classes): + categories.append({ + 'id': class_id + label_id_offset, + 'name': 'category_{}'.format(class_id + label_id_offset) + }) + return categories + for item in label_map.item: + if not 0 < item.id <= max_num_classes: + logging.info('Ignore item %d since it falls outside of requested ' + 'label range.', item.id) + continue + if use_display_name and item.HasField('display_name'): + name = item.display_name + else: + name = item.name + if item.id not in list_of_ids_already_added: + list_of_ids_already_added.append(item.id) + categories.append({'id': item.id, 'name': name}) + return categories + + +def load_labelmap(path): + """Loads label map proto. + + Args: + path: path to StringIntLabelMap proto text file. + Returns: + a StringIntLabelMapProto + """ + with tf.gfile.GFile(path, 'r') as fid: + label_map_string = fid.read() + label_map = string_int_label_map_pb2.StringIntLabelMap() + try: + text_format.Merge(label_map_string, label_map) + except text_format.ParseError: + label_map.ParseFromString(label_map_string) + _validate_label_map(label_map) + return label_map + + +def get_label_map_dict(label_map_path, use_display_name=False): + """Reads a label map and returns a dictionary of label names to id. + + Args: + label_map_path: path to label_map. + use_display_name: whether to use the label map items' display names as keys. + + Returns: + A dictionary mapping label names to id. + """ + label_map = load_labelmap(label_map_path) + label_map_dict = {} + for item in label_map.item: + if use_display_name: + label_map_dict[item.display_name] = item.id + else: + label_map_dict[item.name] = item.id + return label_map_dict + + +def create_category_index_from_labelmap(label_map_path): + """Reads a label map and returns a category index. + + Args: + label_map_path: Path to `StringIntLabelMap` proto text file. + + Returns: + A category index, which is a dictionary that maps integer ids to dicts + containing categories, e.g. + {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...} + """ + label_map = load_labelmap(label_map_path) + max_num_classes = max(item.id for item in label_map.item) + categories = convert_label_map_to_categories(label_map, max_num_classes) + return create_category_index(categories) + + +def create_class_agnostic_category_index(): + """Creates a category index with a single `object` class.""" + return {1: {'id': 1, 'name': 'object'}} diff --git a/models/research/cognitive_planning/policies.py b/models/research/cognitive_planning/policies.py new file mode 100644 index 0000000000000000000000000000000000000000..5c7e2207db1302c3fd1d8bff3e30eaba022480fd --- /dev/null +++ b/models/research/cognitive_planning/policies.py @@ -0,0 +1,474 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Interface for the policy of the agents use for navigation.""" + +import abc +import tensorflow as tf +from absl import logging +import embedders +from envs import task_env + +slim = tf.contrib.slim + +def _print_debug_ios(history, goal, output): + """Prints sizes of history, goal and outputs.""" + if history is not None: + shape = history.get_shape().as_list() + # logging.info('history embedding shape ') + # logging.info(shape) + if len(shape) != 3: + raise ValueError('history Tensor must have rank=3') + if goal is not None: + logging.info('goal embedding shape ') + logging.info(goal.get_shape().as_list()) + if output is not None: + logging.info('targets shape ') + logging.info(output.get_shape().as_list()) + + +class Policy(object): + """Represents the policy of the agent for navigation tasks. + + Instantiates a policy that takes embedders for each modality and builds a + model to infer the actions. + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, embedders_dict, action_size): + """Instantiates the policy. + + Args: + embedders_dict: Dictionary of embedders for different modalities. Keys + should be identical to keys of observation modality. + action_size: Number of possible actions. + """ + self._embedders = embedders_dict + self._action_size = action_size + + @abc.abstractmethod + def build(self, observations, prev_state): + """Builds the model that represents the policy of the agent. + + Args: + observations: Dictionary of observations from different modalities. Keys + are the name of the modalities. + prev_state: The tensor of the previous state of the model. Should be set + to None if the policy is stateless + Returns: + Tuple of (action, state) where action is the action logits and state is + the state of the model after taking new observation. + """ + raise NotImplementedError( + 'Needs implementation as part of Policy interface') + + +class LSTMPolicy(Policy): + """Represents the implementation of the LSTM based policy. + + The architecture of the model is as follows. It embeds all the observations + using the embedders, concatenates the embeddings of all the modalities. Feed + them through two fully connected layers. The lstm takes the features from + fully connected layer and the previous action and success of previous action + and feed them to LSTM. The value for each action is predicted afterwards. + Although the class name has the word LSTM in it, it also supports a mode that + builds the network without LSTM just for comparison purposes. + """ + + def __init__(self, + modality_names, + embedders_dict, + action_size, + params, + max_episode_length, + feedforward_mode=False): + """Instantiates the LSTM policy. + + Args: + modality_names: List of modality names. Makes sure the ordering in + concatenation remains the same as modality_names list. Each modality + needs to be in the embedders_dict. + embedders_dict: Dictionary of embedders for different modalities. Keys + should be identical to keys of observation modality. Values should be + instance of Embedder class. All the observations except PREV_ACTION + requires embedder. + action_size: Number of possible actions. + params: is instance of tf.hparams and contains the hyperparameters for the + policy network. + max_episode_length: integer, specifying the maximum length of each + episode. + feedforward_mode: If True, it does not add LSTM to the model. It should + only be set True for comparison between LSTM and feedforward models. + """ + super(LSTMPolicy, self).__init__(embedders_dict, action_size) + + self._modality_names = modality_names + + self._lstm_state_size = params.lstm_state_size + self._fc_channels = params.fc_channels + self._weight_decay = params.weight_decay + self._target_embedding_size = params.target_embedding_size + self._max_episode_length = max_episode_length + self._feedforward_mode = feedforward_mode + + def _build_lstm(self, encoded_inputs, prev_state, episode_length, + prev_action=None): + """Builds an LSTM on top of the encoded inputs. + + If prev_action is not None then it concatenates them to the input of LSTM. + + Args: + encoded_inputs: The embedding of the observations and goal. + prev_state: previous state of LSTM. + episode_length: The tensor that contains the length of the sequence for + each element of the batch. + prev_action: tensor to previous chosen action and additional bit for + indicating whether the previous action was successful or not. + + Returns: + a tuple of (lstm output, lstm state). + """ + + # Adding prev action and success in addition to the embeddings of the + # modalities. + if prev_action is not None: + encoded_inputs = tf.concat([encoded_inputs, prev_action], axis=-1) + + with tf.variable_scope('LSTM'): + lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._lstm_state_size) + if prev_state is None: + # If prev state is set to None, a state of all zeros will be + # passed as a previous value for the cell. Should be used for the + # first step of each episode. + tf_prev_state = lstm_cell.zero_state( + encoded_inputs.get_shape().as_list()[0], dtype=tf.float32) + else: + tf_prev_state = tf.nn.rnn_cell.LSTMStateTuple(prev_state[0], + prev_state[1]) + + lstm_outputs, lstm_state = tf.nn.dynamic_rnn( + cell=lstm_cell, + inputs=encoded_inputs, + sequence_length=episode_length, + initial_state=tf_prev_state, + dtype=tf.float32, + ) + lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size]) + return lstm_outputs, lstm_state + + def build( + self, + observations, + prev_state, + ): + """Builds the model that represents the policy of the agent. + + Args: + observations: Dictionary of observations from different modalities. Keys + are the name of the modalities. Observation should have the following + key-values. + observations['goal']: One-hot tensor that indicates the semantic + category of the goal. The shape should be + (batch_size x max_sequence_length x goals). + observations[task_env.ModalityTypes.PREV_ACTION]: has action_size + 1 + elements where the first action_size numbers are the one hot vector + of the previous action and the last element indicates whether the + previous action was successful or not. If + task_env.ModalityTypes.PREV_ACTION is not in the observation, it + will not be used in the policy. + prev_state: Previous state of the model. It should be a tuple of (c,h) + where c and h are the previous cell value and hidden state of the lstm. + Each element of tuple has shape of (batch_size x lstm_cell_size). + If it is set to None, then it initializes the state of the lstm with all + zeros. + + Returns: + Tuple of (action, state) where action is the action logits and state is + the state of the model after taking new observation. + Raises: + ValueError: If any of the modality names is not in observations or + embedders_dict. + ValueError: If 'goal' is not in the observations. + """ + + for modality_name in self._modality_names: + if modality_name not in observations: + raise ValueError('modality name does not exist in observations: {} not ' + 'in {}'.format(modality_name, observations.keys())) + if modality_name not in self._embedders: + if modality_name == task_env.ModalityTypes.PREV_ACTION: + continue + raise ValueError('modality name does not have corresponding embedder' + ' {} not in {}'.format(modality_name, + self._embedders.keys())) + + if task_env.ModalityTypes.GOAL not in observations: + raise ValueError('goal should be provided in the observations') + + goal = observations[task_env.ModalityTypes.GOAL] + prev_action = None + if task_env.ModalityTypes.PREV_ACTION in observations: + prev_action = observations[task_env.ModalityTypes.PREV_ACTION] + + with tf.variable_scope('policy'): + with slim.arg_scope( + [slim.fully_connected], + activation_fn=tf.nn.relu, + weights_initializer=tf.truncated_normal_initializer(stddev=0.01), + weights_regularizer=slim.l2_regularizer(self._weight_decay)): + all_inputs = [] + + # Concatenating the embedding of each modality by applying the embedders + # to corresponding observations. + def embed(name): + with tf.variable_scope('embed_{}'.format(name)): + # logging.info('Policy uses embedding %s', name) + return self._embedders[name].build(observations[name]) + + all_inputs = map(embed, [ + x for x in self._modality_names + if x != task_env.ModalityTypes.PREV_ACTION + ]) + + # Computing goal embedding. + shape = goal.get_shape().as_list() + with tf.variable_scope('embed_goal'): + encoded_goal = tf.reshape(goal, [shape[0] * shape[1], -1]) + encoded_goal = slim.fully_connected(encoded_goal, + self._target_embedding_size) + encoded_goal = tf.reshape(encoded_goal, [shape[0], shape[1], -1]) + all_inputs.append(encoded_goal) + + # Concatenating all the modalities and goal. + all_inputs = tf.concat(all_inputs, axis=-1, name='concat_embeddings') + + shape = all_inputs.get_shape().as_list() + all_inputs = tf.reshape(all_inputs, [shape[0] * shape[1], shape[2]]) + + # Applying fully connected layers. + encoded_inputs = slim.fully_connected(all_inputs, self._fc_channels) + encoded_inputs = slim.fully_connected(encoded_inputs, self._fc_channels) + + if not self._feedforward_mode: + encoded_inputs = tf.reshape(encoded_inputs, + [shape[0], shape[1], self._fc_channels]) + lstm_outputs, lstm_state = self._build_lstm( + encoded_inputs=encoded_inputs, + prev_state=prev_state, + episode_length=tf.ones((shape[0],), dtype=tf.float32) * + self._max_episode_length, + prev_action=prev_action, + ) + else: + # If feedforward_mode=True, directly compute bypass the whole LSTM + # computations. + lstm_outputs = encoded_inputs + + lstm_outputs = slim.fully_connected(lstm_outputs, self._fc_channels) + action_values = slim.fully_connected( + lstm_outputs, self._action_size, activation_fn=None) + action_values = tf.reshape(action_values, [shape[0], shape[1], -1]) + if not self._feedforward_mode: + return action_values, lstm_state + else: + return action_values, None + + +class TaskPolicy(Policy): + """A covenience abstract class providing functionality to deal with Tasks.""" + + def __init__(self, + task_config, + model_hparams=None, + embedder_hparams=None, + train_hparams=None): + """Constructs a policy which knows how to work with tasks (see tasks.py). + + It allows to read task history, goal and outputs in consistency with the + task config. + + Args: + task_config: an object of type tasks.TaskIOConfig (see tasks.py) + model_hparams: a tf.HParams object containing parameter pertaining to + model (these are implementation specific) + embedder_hparams: a tf.HParams object containing parameter pertaining to + history, goal embedders (these are implementation specific) + train_hparams: a tf.HParams object containing parameter pertaining to + trainin (these are implementation specific)` + """ + super(TaskPolicy, self).__init__(None, None) + self._model_hparams = model_hparams + self._embedder_hparams = embedder_hparams + self._train_hparams = train_hparams + self._task_config = task_config + self._extra_train_ops = [] + + @property + def extra_train_ops(self): + """Training ops in addition to the loss, e.g. batch norm updates. + + Returns: + A list of tf ops. + """ + return self._extra_train_ops + + def _embed_task_ios(self, streams): + """Embeds a list of heterogenous streams. + + These streams correspond to task history, goal and output. The number of + streams is equal to the total number of history, plus one for the goal if + present, plus one for the output. If the number of history is k, then the + first k streams are the history. + + The used embedders depend on the input (or goal) types. If an input is an + image, then a ResNet embedder is used, otherwise + MLPEmbedder (see embedders.py). + + Args: + streams: a list of Tensors. + Returns: + Three float Tensors history, goal, output. If there are no history, or no + goal, then the corresponding returned values are None. The shape of the + embedded history is batch_size x sequence_length x sum of all embedding + dimensions for all history. The shape of the goal is embedding dimension. + """ + # EMBED history. + index = 0 + inps = [] + scopes = [] + for c in self._task_config.inputs: + if c == task_env.ModalityTypes.IMAGE: + scope_name = 'image_embedder/image' + reuse = scope_name in scopes + scopes.append(scope_name) + with tf.variable_scope(scope_name, reuse=reuse): + resnet_embedder = embedders.ResNet(self._embedder_hparams.image) + image_embeddings = resnet_embedder.build(streams[index]) + # Uncover batch norm ops. + if self._embedder_hparams.image.is_train: + self._extra_train_ops += resnet_embedder.extra_train_ops + inps.append(image_embeddings) + index += 1 + else: + scope_name = 'input_embedder/vector' + reuse = scope_name in scopes + scopes.append(scope_name) + with tf.variable_scope(scope_name, reuse=reuse): + input_vector_embedder = embedders.MLPEmbedder( + layers=self._embedder_hparams.vector) + vector_embedder = input_vector_embedder.build(streams[index]) + inps.append(vector_embedder) + index += 1 + history = tf.concat(inps, axis=2) if inps else None + + # EMBED goal. + goal = None + if self._task_config.query is not None: + scope_name = 'image_embedder/query' + reuse = scope_name in scopes + scopes.append(scope_name) + with tf.variable_scope(scope_name, reuse=reuse): + resnet_goal_embedder = embedders.ResNet(self._embedder_hparams.goal) + goal = resnet_goal_embedder.build(streams[index]) + if self._embedder_hparams.goal.is_train: + self._extra_train_ops += resnet_goal_embedder.extra_train_ops + index += 1 + + # Embed true targets if needed (tbd). + true_target = streams[index] + + return history, goal, true_target + + @abc.abstractmethod + def build(self, feeds, prev_state): + pass + + +class ReactivePolicy(TaskPolicy): + """A policy which ignores history. + + It processes only the current observation (last element in history) and the + goal to output a prediction. + """ + + def __init__(self, *args, **kwargs): + super(ReactivePolicy, self).__init__(*args, **kwargs) + + # The current implementation ignores the prev_state as it is purely reactive. + # It returns None for the current state. + def build(self, feeds, prev_state): + history, goal, _ = self._embed_task_ios(feeds) + _print_debug_ios(history, goal, None) + + with tf.variable_scope('output_decoder'): + # Concatenate the embeddings of the current observation and the goal. + reactive_input = tf.concat([tf.squeeze(history[:, -1, :]), goal], axis=1) + oconfig = self._task_config.output.shape + assert len(oconfig) == 1 + decoder = embedders.MLPEmbedder( + layers=self._embedder_hparams.predictions.layer_sizes + oconfig) + predictions = decoder.build(reactive_input) + + return predictions, None + + +class RNNPolicy(TaskPolicy): + """A policy which takes into account the full history via RNN. + + The implementation might and will change. + The history, together with the goal, is processed using a stacked LSTM. The + output of the last LSTM step is used to produce a prediction. Currently, only + a single step output is supported. + """ + + def __init__(self, lstm_hparams, *args, **kwargs): + super(RNNPolicy, self).__init__(*args, **kwargs) + self._lstm_hparams = lstm_hparams + + # The prev_state is ignored as for now the full history is specified as first + # element of the feeds. It might turn out to be beneficial to keep the state + # as part of the policy object. + def build(self, feeds, state): + history, goal, _ = self._embed_task_ios(feeds) + _print_debug_ios(history, goal, None) + + params = self._lstm_hparams + cell = lambda: tf.contrib.rnn.BasicLSTMCell(params.cell_size) + stacked_lstm = tf.contrib.rnn.MultiRNNCell( + [cell() for _ in range(params.num_layers)]) + # history is of shape batch_size x seq_len x embedding_dimension + batch_size, seq_len, _ = tuple(history.get_shape().as_list()) + + if state is None: + state = stacked_lstm.zero_state(batch_size, tf.float32) + for t in range(seq_len): + if params.concat_goal_everywhere: + lstm_input = tf.concat([tf.squeeze(history[:, t, :]), goal], axis=1) + else: + lstm_input = tf.squeeze(history[:, t, :]) + output, state = stacked_lstm(lstm_input, state) + + with tf.variable_scope('output_decoder'): + oconfig = self._task_config.output.shape + assert len(oconfig) == 1 + features = tf.concat([output, goal], axis=1) + assert len(output.get_shape().as_list()) == 2 + assert len(goal.get_shape().as_list()) == 2 + decoder = embedders.MLPEmbedder( + layers=self._embedder_hparams.predictions.layer_sizes + oconfig) + # Prediction is done off the last step lstm output and the goal. + predictions = decoder.build(features) + + return predictions, state diff --git a/models/research/cognitive_planning/preprocessing/__init__.py b/models/research/cognitive_planning/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/research/cognitive_planning/preprocessing/__init__.py @@ -0,0 +1 @@ + diff --git a/models/research/cognitive_planning/preprocessing/cifarnet_preprocessing.py b/models/research/cognitive_planning/preprocessing/cifarnet_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..0b5a88fa4c31c13dca4cf92a5ee7614f08a935af --- /dev/null +++ b/models/research/cognitive_planning/preprocessing/cifarnet_preprocessing.py @@ -0,0 +1,128 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images in CIFAR-10. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +_PADDING = 4 + +slim = tf.contrib.slim + + +def preprocess_for_train(image, + output_height, + output_width, + padding=_PADDING, + add_image_summaries=True): + """Preprocesses the given image for training. + + Note that the actual resizing scale is sampled from + [`resize_size_min`, `resize_size_max`]. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + padding: The amound of padding before and after each dimension of the image. + add_image_summaries: Enable image summaries. + + Returns: + A preprocessed image. + """ + if add_image_summaries: + tf.summary.image('image', tf.expand_dims(image, 0)) + + # Transform the image to floats. + image = tf.to_float(image) + if padding > 0: + image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]]) + # Randomly crop a [height, width] section of the image. + distorted_image = tf.random_crop(image, + [output_height, output_width, 3]) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + if add_image_summaries: + tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0)) + + # Because these operations are not commutative, consider randomizing + # the order their operation. + distorted_image = tf.image.random_brightness(distorted_image, + max_delta=63) + distorted_image = tf.image.random_contrast(distorted_image, + lower=0.2, upper=1.8) + # Subtract off the mean and divide by the variance of the pixels. + return tf.image.per_image_standardization(distorted_image) + + +def preprocess_for_eval(image, output_height, output_width, + add_image_summaries=True): + """Preprocesses the given image for evaluation. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + add_image_summaries: Enable image summaries. + + Returns: + A preprocessed image. + """ + if add_image_summaries: + tf.summary.image('image', tf.expand_dims(image, 0)) + # Transform the image to floats. + image = tf.to_float(image) + + # Resize and crop if needed. + resized_image = tf.image.resize_image_with_crop_or_pad(image, + output_width, + output_height) + if add_image_summaries: + tf.summary.image('resized_image', tf.expand_dims(resized_image, 0)) + + # Subtract off the mean and divide by the variance of the pixels. + return tf.image.per_image_standardization(resized_image) + + +def preprocess_image(image, output_height, output_width, is_training=False, + add_image_summaries=True): + """Preprocesses the given image. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + is_training: `True` if we're preprocessing the image for training and + `False` otherwise. + add_image_summaries: Enable image summaries. + + Returns: + A preprocessed image. + """ + if is_training: + return preprocess_for_train( + image, output_height, output_width, + add_image_summaries=add_image_summaries) + else: + return preprocess_for_eval( + image, output_height, output_width, + add_image_summaries=add_image_summaries) diff --git a/models/research/cognitive_planning/preprocessing/inception_preprocessing.py b/models/research/cognitive_planning/preprocessing/inception_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..846b81b3cd30985f2aa55e6f7c471195ac6e046b --- /dev/null +++ b/models/research/cognitive_planning/preprocessing/inception_preprocessing.py @@ -0,0 +1,318 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images for the Inception networks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.python.ops import control_flow_ops + + +def apply_with_random_selector(x, func, num_cases): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([ + func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) + for case in range(num_cases)])[0] + + +def distort_color(image, color_ordering=0, fast_mode=True, scope=None): + """Distort the color of a Tensor image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: 3-D Tensor containing single image in [0, 1]. + color_ordering: Python int, a type of distortion (valid values: 0-3). + fast_mode: Avoids slower ops (random_hue and random_contrast) + scope: Optional scope for name_scope. + Returns: + 3-D Tensor color-distorted image on range [0, 1] + Raises: + ValueError: if color_ordering not in [0, 3] + """ + with tf.name_scope(scope, 'distort_color', [image]): + if fast_mode: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + elif color_ordering == 2: + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + elif color_ordering == 3: + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + raise ValueError('color_ordering must be in [0, 3]') + + # The random_* ops do not necessarily clamp. + return tf.clip_by_value(image, 0.0, 1.0) + + +def distorted_bounding_box_crop(image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using a one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image: 3-D Tensor of image (it will be converted to floats in [0, 1]). + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding box + supplied. + aspect_ratio_range: An optional list of `floats`. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `floats`. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional scope for name_scope. + Returns: + A tuple, a 3-D Tensor cropped_image and the distorted bbox + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + cropped_image = tf.slice(image, bbox_begin, bbox_size) + return cropped_image, distort_bbox + + +def preprocess_for_train(image, height, width, bbox, + fast_mode=True, + scope=None, + add_image_summaries=True): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Additionally it would create image_summaries to display the different + transformations applied to the image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + fast_mode: Optional boolean, if True avoids slower transformations (i.e. + bi-cubic resizing, random_hue or random_contrast). + scope: Optional scope for name_scope. + add_image_summaries: Enable image summaries. + Returns: + 3-D float Tensor of distorted image used for training with range [-1, 1]. + """ + with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + if bbox is None: + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], + dtype=tf.float32, + shape=[1, 1, 4]) + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + if add_image_summaries: + tf.summary.image('image_with_bounding_boxes', image_with_box) + + distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([None, None, 3]) + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distorted_bbox) + if add_image_summaries: + tf.summary.image('images_with_distorted_bounding_box', + image_with_distorted_box) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + + # We select only 1 case for fast_mode bilinear. + num_resize_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, method: tf.image.resize_images(x, [height, width], method), + num_cases=num_resize_cases) + + if add_image_summaries: + tf.summary.image('cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. There are 1 or 4 ways to do it. + num_distort_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, ordering: distort_color(x, ordering, fast_mode), + num_cases=num_distort_cases) + + if add_image_summaries: + tf.summary.image('final_distorted_image', + tf.expand_dims(distorted_image, 0)) + distorted_image = tf.subtract(distorted_image, 0.5) + distorted_image = tf.multiply(distorted_image, 2.0) + return distorted_image + + +def preprocess_for_eval(image, height, width, + central_fraction=0.875, scope=None): + """Prepare one image for evaluation. + + If height and width are specified it would output an image with that size by + applying resize_bilinear. + + If central_fraction is specified it would crop the central fraction of the + input image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + central_fraction: Optional Float, fraction of the image to crop. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.name_scope(scope, 'eval_image', [image, height, width]): + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +def preprocess_image(image, height, width, + is_training=False, + bbox=None, + fast_mode=True, + add_image_summaries=True): + """Pre-process one image for training or evaluation. + + Args: + image: 3-D Tensor [height, width, channels] with the image. If dtype is + tf.float32 then the range should be [0, 1], otherwise it would converted + to tf.float32 assuming that the range is [0, MAX], where MAX is largest + positive representable number for int(8/16/32) data type (see + `tf.image.convert_image_dtype` for details). + height: integer, image expected height. + width: integer, image expected width. + is_training: Boolean. If true it would transform an image for train, + otherwise it would transform it for evaluation. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + fast_mode: Optional boolean, if True avoids slower transformations. + add_image_summaries: Enable image summaries. + + Returns: + 3-D float Tensor containing an appropriately scaled image + + Raises: + ValueError: if user does not provide bounding box + """ + if is_training: + return preprocess_for_train(image, height, width, bbox, fast_mode, + add_image_summaries=add_image_summaries) + else: + return preprocess_for_eval(image, height, width) diff --git a/models/research/cognitive_planning/preprocessing/lenet_preprocessing.py b/models/research/cognitive_planning/preprocessing/lenet_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..ac5e71af889866312a0896f35023d85b2c260b25 --- /dev/null +++ b/models/research/cognitive_planning/preprocessing/lenet_preprocessing.py @@ -0,0 +1,44 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities for preprocessing.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +slim = tf.contrib.slim + + +def preprocess_image(image, output_height, output_width, is_training): + """Preprocesses the given image. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + is_training: `True` if we're preprocessing the image for training and + `False` otherwise. + + Returns: + A preprocessed image. + """ + image = tf.to_float(image) + image = tf.image.resize_image_with_crop_or_pad( + image, output_width, output_height) + image = tf.subtract(image, 128.0) + image = tf.div(image, 128.0) + return image diff --git a/models/research/cognitive_planning/preprocessing/preprocessing_factory.py b/models/research/cognitive_planning/preprocessing/preprocessing_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd04c252a628fb016ef89bf65e7e5c498516f0c --- /dev/null +++ b/models/research/cognitive_planning/preprocessing/preprocessing_factory.py @@ -0,0 +1,81 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains a factory for building various models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from preprocessing import cifarnet_preprocessing +from preprocessing import inception_preprocessing +from preprocessing import lenet_preprocessing +from preprocessing import vgg_preprocessing + +slim = tf.contrib.slim + + +def get_preprocessing(name, is_training=False): + """Returns preprocessing_fn(image, height, width, **kwargs). + + Args: + name: The name of the preprocessing function. + is_training: `True` if the model is being used for training and `False` + otherwise. + + Returns: + preprocessing_fn: A function that preprocessing a single image (pre-batch). + It has the following signature: + image = preprocessing_fn(image, output_height, output_width, ...). + + Raises: + ValueError: If Preprocessing `name` is not recognized. + """ + preprocessing_fn_map = { + 'cifarnet': cifarnet_preprocessing, + 'inception': inception_preprocessing, + 'inception_v1': inception_preprocessing, + 'inception_v2': inception_preprocessing, + 'inception_v3': inception_preprocessing, + 'inception_v4': inception_preprocessing, + 'inception_resnet_v2': inception_preprocessing, + 'lenet': lenet_preprocessing, + 'mobilenet_v1': inception_preprocessing, + 'nasnet_mobile': inception_preprocessing, + 'nasnet_large': inception_preprocessing, + 'pnasnet_large': inception_preprocessing, + 'resnet_v1_50': vgg_preprocessing, + 'resnet_v1_101': vgg_preprocessing, + 'resnet_v1_152': vgg_preprocessing, + 'resnet_v1_200': vgg_preprocessing, + 'resnet_v2_50': vgg_preprocessing, + 'resnet_v2_101': vgg_preprocessing, + 'resnet_v2_152': vgg_preprocessing, + 'resnet_v2_200': vgg_preprocessing, + 'vgg': vgg_preprocessing, + 'vgg_a': vgg_preprocessing, + 'vgg_16': vgg_preprocessing, + 'vgg_19': vgg_preprocessing, + } + + if name not in preprocessing_fn_map: + raise ValueError('Preprocessing name [%s] was not recognized' % name) + + def preprocessing_fn(image, output_height, output_width, **kwargs): + return preprocessing_fn_map[name].preprocess_image( + image, output_height, output_width, is_training=is_training, **kwargs) + + return preprocessing_fn diff --git a/models/research/cognitive_planning/preprocessing/vgg_preprocessing.py b/models/research/cognitive_planning/preprocessing/vgg_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd50598cde0e1ebf0bcaba38052300cedeb34e8 --- /dev/null +++ b/models/research/cognitive_planning/preprocessing/vgg_preprocessing.py @@ -0,0 +1,365 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images. + +The preprocessing steps for VGG were introduced in the following technical +report: + + Very Deep Convolutional Networks For Large-Scale Image Recognition + Karen Simonyan and Andrew Zisserman + arXiv technical report, 2015 + PDF: http://arxiv.org/pdf/1409.1556.pdf + ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf + CC-BY-4.0 + +More information can be obtained from the VGG website: +www.robots.ox.ac.uk/~vgg/research/very_deep/ +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +slim = tf.contrib.slim + +_R_MEAN = 123.68 +_G_MEAN = 116.78 +_B_MEAN = 103.94 + +_RESIZE_SIDE_MIN = 256 +_RESIZE_SIDE_MAX = 512 + + +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: an image of shape [height, width, channels]. + offset_height: a scalar tensor indicating the height offset. + offset_width: a scalar tensor indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + the cropped (and resized) image. + + Raises: + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), + ['Rank of image must be equal to 3.']) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ['Crop size greater than the image size.']) + + offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + return tf.reshape(image, cropped_shape) + + +def _random_crop(image_list, crop_height, crop_width): + """Crops the given list of images. + + The function applies the same crop to each image in the list. This can be + effectively applied when there are multiple image inputs of the same + dimension such as: + + image, depths, normals = _random_crop([image, depths, normals], 120, 150) + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the new height. + crop_width: the new width. + + Returns: + the image_list with cropped images. + + Raises: + ValueError: if there are multiple image inputs provided with different size + or the images are smaller than the crop dimensions. + """ + if not image_list: + raise ValueError('Empty image_list.') + + # Compute the rank assertions. + rank_assertions = [] + for i in range(len(image_list)): + image_rank = tf.rank(image_list[i]) + rank_assert = tf.Assert( + tf.equal(image_rank, 3), + ['Wrong rank for tensor %s [expected] [actual]', + image_list[i].name, 3, image_rank]) + rank_assertions.append(rank_assert) + + with tf.control_dependencies([rank_assertions[0]]): + image_shape = tf.shape(image_list[0]) + image_height = image_shape[0] + image_width = image_shape[1] + crop_size_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(image_height, crop_height), + tf.greater_equal(image_width, crop_width)), + ['Crop size greater than the image size.']) + + asserts = [rank_assertions[0], crop_size_assert] + + for i in range(1, len(image_list)): + image = image_list[i] + asserts.append(rank_assertions[i]) + with tf.control_dependencies([rank_assertions[i]]): + shape = tf.shape(image) + height = shape[0] + width = shape[1] + + height_assert = tf.Assert( + tf.equal(height, image_height), + ['Wrong height for tensor %s [expected][actual]', + image.name, height, image_height]) + width_assert = tf.Assert( + tf.equal(width, image_width), + ['Wrong width for tensor %s [expected][actual]', + image.name, width, image_width]) + asserts.extend([height_assert, width_assert]) + + # Create a random bounding box. + # + # Use tf.random_uniform and not numpy.random.rand as doing the former would + # generate random numbers at graph eval time, unlike the latter which + # generates random numbers at graph definition time. + with tf.control_dependencies(asserts): + max_offset_height = tf.reshape(image_height - crop_height + 1, []) + with tf.control_dependencies(asserts): + max_offset_width = tf.reshape(image_width - crop_width + 1, []) + offset_height = tf.random_uniform( + [], maxval=max_offset_height, dtype=tf.int32) + offset_width = tf.random_uniform( + [], maxval=max_offset_width, dtype=tf.int32) + + return [_crop(image, offset_height, offset_width, + crop_height, crop_width) for image in image_list] + + +def _central_crop(image_list, crop_height, crop_width): + """Performs central crops of the given image list. + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the height of the image following the crop. + crop_width: the width of the image following the crop. + + Returns: + the list of cropped images. + """ + outputs = [] + for image in image_list: + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + offset_height = (image_height - crop_height) / 2 + offset_width = (image_width - crop_width) / 2 + + outputs.append(_crop(image, offset_height, offset_width, + crop_height, crop_width)) + return outputs + + +def _mean_image_subtraction(image, means): + """Subtracts the given means from each image channel. + + For example: + means = [123.68, 116.779, 103.939] + image = _mean_image_subtraction(image, means) + + Note that the rank of `image` must be known. + + Args: + image: a tensor of size [height, width, C]. + means: a C-vector of values to subtract from each channel. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `means`. + """ + if image.get_shape().ndims != 3: + raise ValueError('Input must be of size [height, width, C>0]') + num_channels = image.get_shape().as_list()[-1] + if len(means) != num_channels: + raise ValueError('len(means) must match the number of channels') + + channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) + for i in range(num_channels): + channels[i] -= means[i] + return tf.concat(axis=2, values=channels) + + +def _smallest_size_at_least(height, width, smallest_side): + """Computes new shape with the smallest side equal to `smallest_side`. + + Computes new shape with the smallest side equal to `smallest_side` while + preserving the original aspect ratio. + + Args: + height: an int32 scalar tensor indicating the current height. + width: an int32 scalar tensor indicating the current width. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + new_height: an int32 scalar tensor indicating the new height. + new_width: and int32 scalar tensor indicating the new width. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + height = tf.to_float(height) + width = tf.to_float(width) + smallest_side = tf.to_float(smallest_side) + + scale = tf.cond(tf.greater(height, width), + lambda: smallest_side / width, + lambda: smallest_side / height) + new_height = tf.to_int32(tf.rint(height * scale)) + new_width = tf.to_int32(tf.rint(width * scale)) + return new_height, new_width + + +def _aspect_preserving_resize(image, smallest_side): + """Resize images preserving the original aspect ratio. + + Args: + image: A 3-D image `Tensor`. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + resized_image: A 3-D tensor containing the resized image. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + shape = tf.shape(image) + height = shape[0] + width = shape[1] + new_height, new_width = _smallest_size_at_least(height, width, smallest_side) + image = tf.expand_dims(image, 0) + resized_image = tf.image.resize_bilinear(image, [new_height, new_width], + align_corners=False) + resized_image = tf.squeeze(resized_image) + resized_image.set_shape([None, None, 3]) + return resized_image + + +def preprocess_for_train(image, + output_height, + output_width, + resize_side_min=_RESIZE_SIDE_MIN, + resize_side_max=_RESIZE_SIDE_MAX): + """Preprocesses the given image for training. + + Note that the actual resizing scale is sampled from + [`resize_size_min`, `resize_size_max`]. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + resize_side_min: The lower bound for the smallest side of the image for + aspect-preserving resizing. + resize_side_max: The upper bound for the smallest side of the image for + aspect-preserving resizing. + + Returns: + A preprocessed image. + """ + resize_side = tf.random_uniform( + [], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32) + + image = _aspect_preserving_resize(image, resize_side) + image = _random_crop([image], output_height, output_width)[0] + image.set_shape([output_height, output_width, 3]) + image = tf.to_float(image) + image = tf.image.random_flip_left_right(image) + return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + + +def preprocess_for_eval(image, output_height, output_width, resize_side): + """Preprocesses the given image for evaluation. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + resize_side: The smallest side of the image for aspect-preserving resizing. + + Returns: + A preprocessed image. + """ + image = _aspect_preserving_resize(image, resize_side) + image = _central_crop([image], output_height, output_width)[0] + image.set_shape([output_height, output_width, 3]) + image = tf.to_float(image) + return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + + +def preprocess_image(image, output_height, output_width, is_training=False, + resize_side_min=_RESIZE_SIDE_MIN, + resize_side_max=_RESIZE_SIDE_MAX): + """Preprocesses the given image. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + is_training: `True` if we're preprocessing the image for training and + `False` otherwise. + resize_side_min: The lower bound for the smallest side of the image for + aspect-preserving resizing. If `is_training` is `False`, then this value + is used for rescaling. + resize_side_max: The upper bound for the smallest side of the image for + aspect-preserving resizing. If `is_training` is `False`, this value is + ignored. Otherwise, the resize side is sampled from + [resize_size_min, resize_size_max]. + + Returns: + A preprocessed image. + """ + if is_training: + return preprocess_for_train(image, output_height, output_width, + resize_side_min, resize_side_max) + else: + return preprocess_for_eval(image, output_height, output_width, + resize_side_min) diff --git a/models/research/cognitive_planning/standard_fields.py b/models/research/cognitive_planning/standard_fields.py new file mode 100644 index 0000000000000000000000000000000000000000..99e04e66c56527e2c7be03aaf48836e077832c1f --- /dev/null +++ b/models/research/cognitive_planning/standard_fields.py @@ -0,0 +1,224 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains classes specifying naming conventions used for object detection. + + +Specifies: + InputDataFields: standard fields used by reader/preprocessor/batcher. + DetectionResultFields: standard fields returned by object detector. + BoxListFields: standard field used by BoxList + TfExampleFields: standard fields for tf-example data format (go/tf-example). +""" + + +class InputDataFields(object): + """Names for the input tensors. + + Holds the standard data field names to use for identifying input tensors. This + should be used by the decoder to identify keys for the returned tensor_dict + containing input tensors. And it should be used by the model to identify the + tensors it needs. + + Attributes: + image: image. + image_additional_channels: additional channels. + original_image: image in the original input size. + key: unique key corresponding to image. + source_id: source of the original image. + filename: original filename of the dataset (without common path). + groundtruth_image_classes: image-level class labels. + groundtruth_boxes: coordinates of the ground truth boxes in the image. + groundtruth_classes: box-level class labels. + groundtruth_label_types: box-level label types (e.g. explicit negative). + groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead] + is the groundtruth a single object or a crowd. + groundtruth_area: area of a groundtruth segment. + groundtruth_difficult: is a `difficult` object + groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the + same class, forming a connected group, where instances are heavily + occluding each other. + proposal_boxes: coordinates of object proposal boxes. + proposal_objectness: objectness score of each proposal. + groundtruth_instance_masks: ground truth instance masks. + groundtruth_instance_boundaries: ground truth instance boundaries. + groundtruth_instance_classes: instance mask-level class labels. + groundtruth_keypoints: ground truth keypoints. + groundtruth_keypoint_visibilities: ground truth keypoint visibilities. + groundtruth_label_scores: groundtruth label scores. + groundtruth_weights: groundtruth weight factor for bounding boxes. + num_groundtruth_boxes: number of groundtruth boxes. + true_image_shapes: true shapes of images in the resized images, as resized + images can be padded with zeros. + multiclass_scores: the label score per class for each box. + """ + image = 'image' + image_additional_channels = 'image_additional_channels' + original_image = 'original_image' + key = 'key' + source_id = 'source_id' + filename = 'filename' + groundtruth_image_classes = 'groundtruth_image_classes' + groundtruth_boxes = 'groundtruth_boxes' + groundtruth_classes = 'groundtruth_classes' + groundtruth_label_types = 'groundtruth_label_types' + groundtruth_is_crowd = 'groundtruth_is_crowd' + groundtruth_area = 'groundtruth_area' + groundtruth_difficult = 'groundtruth_difficult' + groundtruth_group_of = 'groundtruth_group_of' + proposal_boxes = 'proposal_boxes' + proposal_objectness = 'proposal_objectness' + groundtruth_instance_masks = 'groundtruth_instance_masks' + groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' + groundtruth_instance_classes = 'groundtruth_instance_classes' + groundtruth_keypoints = 'groundtruth_keypoints' + groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' + groundtruth_label_scores = 'groundtruth_label_scores' + groundtruth_weights = 'groundtruth_weights' + num_groundtruth_boxes = 'num_groundtruth_boxes' + true_image_shape = 'true_image_shape' + multiclass_scores = 'multiclass_scores' + + +class DetectionResultFields(object): + """Naming conventions for storing the output of the detector. + + Attributes: + source_id: source of the original image. + key: unique key corresponding to image. + detection_boxes: coordinates of the detection boxes in the image. + detection_scores: detection scores for the detection boxes in the image. + detection_classes: detection-level class labels. + detection_masks: contains a segmentation mask for each detection box. + detection_boundaries: contains an object boundary for each detection box. + detection_keypoints: contains detection keypoints for each detection box. + num_detections: number of detections in the batch. + """ + + source_id = 'source_id' + key = 'key' + detection_boxes = 'detection_boxes' + detection_scores = 'detection_scores' + detection_classes = 'detection_classes' + detection_masks = 'detection_masks' + detection_boundaries = 'detection_boundaries' + detection_keypoints = 'detection_keypoints' + num_detections = 'num_detections' + + +class BoxListFields(object): + """Naming conventions for BoxLists. + + Attributes: + boxes: bounding box coordinates. + classes: classes per bounding box. + scores: scores per bounding box. + weights: sample weights per bounding box. + objectness: objectness score per bounding box. + masks: masks per bounding box. + boundaries: boundaries per bounding box. + keypoints: keypoints per bounding box. + keypoint_heatmaps: keypoint heatmaps per bounding box. + is_crowd: is_crowd annotation per bounding box. + """ + boxes = 'boxes' + classes = 'classes' + scores = 'scores' + weights = 'weights' + objectness = 'objectness' + masks = 'masks' + boundaries = 'boundaries' + keypoints = 'keypoints' + keypoint_heatmaps = 'keypoint_heatmaps' + is_crowd = 'is_crowd' + + +class TfExampleFields(object): + """TF-example proto feature names for object detection. + + Holds the standard feature names to load from an Example proto for object + detection. + + Attributes: + image_encoded: JPEG encoded string + image_format: image format, e.g. "JPEG" + filename: filename + channels: number of channels of image + colorspace: colorspace, e.g. "RGB" + height: height of image in pixels, e.g. 462 + width: width of image in pixels, e.g. 581 + source_id: original source of the image + image_class_text: image-level label in text format + image_class_label: image-level label in numerical format + object_class_text: labels in text format, e.g. ["person", "cat"] + object_class_label: labels in numbers, e.g. [16, 8] + object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30 + object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40 + object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50 + object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70 + object_view: viewpoint of object, e.g. ["frontal", "left"] + object_truncated: is object truncated, e.g. [true, false] + object_occluded: is object occluded, e.g. [true, false] + object_difficult: is object difficult, e.g. [true, false] + object_group_of: is object a single object or a group of objects + object_depiction: is object a depiction + object_is_crowd: [DEPRECATED, use object_group_of instead] + is the object a single object or a crowd + object_segment_area: the area of the segment. + object_weight: a weight factor for the object's bounding box. + instance_masks: instance segmentation masks. + instance_boundaries: instance boundaries. + instance_classes: Classes for each instance segmentation mask. + detection_class_label: class label in numbers. + detection_bbox_ymin: ymin coordinates of a detection box. + detection_bbox_xmin: xmin coordinates of a detection box. + detection_bbox_ymax: ymax coordinates of a detection box. + detection_bbox_xmax: xmax coordinates of a detection box. + detection_score: detection score for the class label and box. + """ + image_encoded = 'image/encoded' + image_format = 'image/format' # format is reserved keyword + filename = 'image/filename' + channels = 'image/channels' + colorspace = 'image/colorspace' + height = 'image/height' + width = 'image/width' + source_id = 'image/source_id' + image_class_text = 'image/class/text' + image_class_label = 'image/class/label' + object_class_text = 'image/object/class/text' + object_class_label = 'image/object/class/label' + object_bbox_ymin = 'image/object/bbox/ymin' + object_bbox_xmin = 'image/object/bbox/xmin' + object_bbox_ymax = 'image/object/bbox/ymax' + object_bbox_xmax = 'image/object/bbox/xmax' + object_view = 'image/object/view' + object_truncated = 'image/object/truncated' + object_occluded = 'image/object/occluded' + object_difficult = 'image/object/difficult' + object_group_of = 'image/object/group_of' + object_depiction = 'image/object/depiction' + object_is_crowd = 'image/object/is_crowd' + object_segment_area = 'image/object/segment/area' + object_weight = 'image/object/weight' + instance_masks = 'image/segmentation/object' + instance_boundaries = 'image/boundaries/object' + instance_classes = 'image/segmentation/object/class' + detection_class_label = 'image/detection/label' + detection_bbox_ymin = 'image/detection/bbox/ymin' + detection_bbox_xmin = 'image/detection/bbox/xmin' + detection_bbox_ymax = 'image/detection/bbox/ymax' + detection_bbox_xmax = 'image/detection/bbox/xmax' + detection_score = 'image/detection/score' diff --git a/models/research/cognitive_planning/string_int_label_map_pb2.py b/models/research/cognitive_planning/string_int_label_map_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..44a46d7abb400f580aec065f7d82a88c695a48da --- /dev/null +++ b/models/research/cognitive_planning/string_int_label_map_pb2.py @@ -0,0 +1,138 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: object_detection/protos/string_int_label_map.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='object_detection/protos/string_int_label_map.proto', + package='object_detection.protos', + syntax='proto2', + serialized_pb=_b('\n2object_detection/protos/string_int_label_map.proto\x12\x17object_detection.protos\"G\n\x15StringIntLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\"Q\n\x11StringIntLabelMap\x12<\n\x04item\x18\x01 \x03(\x0b\x32..object_detection.protos.StringIntLabelMapItem') +) + + + + +_STRINGINTLABELMAPITEM = _descriptor.Descriptor( + name='StringIntLabelMapItem', + full_name='object_detection.protos.StringIntLabelMapItem', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='object_detection.protos.StringIntLabelMapItem.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='id', full_name='object_detection.protos.StringIntLabelMapItem.id', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='display_name', full_name='object_detection.protos.StringIntLabelMapItem.display_name', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=79, + serialized_end=150, +) + + +_STRINGINTLABELMAP = _descriptor.Descriptor( + name='StringIntLabelMap', + full_name='object_detection.protos.StringIntLabelMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='item', full_name='object_detection.protos.StringIntLabelMap.item', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=152, + serialized_end=233, +) + +_STRINGINTLABELMAP.fields_by_name['item'].message_type = _STRINGINTLABELMAPITEM +DESCRIPTOR.message_types_by_name['StringIntLabelMapItem'] = _STRINGINTLABELMAPITEM +DESCRIPTOR.message_types_by_name['StringIntLabelMap'] = _STRINGINTLABELMAP +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +StringIntLabelMapItem = _reflection.GeneratedProtocolMessageType('StringIntLabelMapItem', (_message.Message,), dict( + DESCRIPTOR = _STRINGINTLABELMAPITEM, + __module__ = 'object_detection.protos.string_int_label_map_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMapItem) + )) +_sym_db.RegisterMessage(StringIntLabelMapItem) + +StringIntLabelMap = _reflection.GeneratedProtocolMessageType('StringIntLabelMap', (_message.Message,), dict( + DESCRIPTOR = _STRINGINTLABELMAP, + __module__ = 'object_detection.protos.string_int_label_map_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMap) + )) +_sym_db.RegisterMessage(StringIntLabelMap) + + +# @@protoc_insertion_point(module_scope) diff --git a/models/research/cognitive_planning/tasks.py b/models/research/cognitive_planning/tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..c3ef6ca328f7454ffe9aec61a704d1322d680d31 --- /dev/null +++ b/models/research/cognitive_planning/tasks.py @@ -0,0 +1,1507 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A library of tasks. + +This interface is intended to implement a wide variety of navigation +tasks. See go/navigation_tasks for a list. +""" + +import abc +import collections +import math +import threading +import networkx as nx +import numpy as np +import tensorflow as tf +#from pyglib import logging +#import gin +from envs import task_env +from envs import util as envs_util + + +# Utility functions. +def _pad_or_clip_array(np_arr, arr_len, is_front_clip=True, output_mask=False): + """Make np_arr array to have length arr_len. + + If the array is shorter than arr_len, then it is padded from the front with + zeros. If it is longer, then it is clipped either from the back or from the + front. Only the first dimension is modified. + + Args: + np_arr: numpy array. + arr_len: integer scalar. + is_front_clip: a boolean. If true then clipping is done in the front, + otherwise in the back. + output_mask: If True, outputs a numpy array of rank 1 which represents + a mask of which values have been added (0 - added, 1 - actual output). + + Returns: + A numpy array and the size of padding (as a python int32). This size is + negative is the array is clipped. + """ + shape = list(np_arr.shape) + pad_size = arr_len - shape[0] + padded_or_clipped = None + if pad_size < 0: + if is_front_clip: + padded_or_clipped = np_arr[-pad_size:, :] + else: + padded_or_clipped = np_arr[:arr_len, :] + elif pad_size > 0: + padding = np.zeros([pad_size] + shape[1:], dtype=np_arr.dtype) + padded_or_clipped = np.concatenate([np_arr, padding], axis=0) + else: + padded_or_clipped = np_arr + + if output_mask: + mask = np.ones((arr_len,), dtype=np.int) + if pad_size > 0: + mask[-pad_size:] = 0 + return padded_or_clipped, pad_size, mask + else: + return padded_or_clipped, pad_size + + +def classification_loss(truth, predicted, weights=None, is_one_hot=True): + """A cross entropy loss. + + Computes the mean of cross entropy losses for all pairs of true labels and + predictions. It wraps around a tf implementation of the cross entropy loss + with additional reformating of the inputs. If the truth and predicted are + n-rank Tensors with n > 2, then these are reshaped to 2-rank Tensors. It + allows for truth to be specified as one hot vector or class indices. Finally, + a weight can be specified for each element in truth and predicted. + + Args: + truth: an n-rank or (n-1)-rank Tensor containing labels. If is_one_hot is + True, then n-rank Tensor is expected, otherwise (n-1) rank one. + predicted: an n-rank float Tensor containing prediction probabilities. + weights: an (n-1)-rank float Tensor of weights + is_one_hot: a boolean. + + Returns: + A TF float scalar. + """ + num_labels = predicted.get_shape().as_list()[-1] + if not is_one_hot: + truth = tf.reshape(truth, [-1]) + truth = tf.one_hot( + truth, depth=num_labels, on_value=1.0, off_value=0.0, axis=-1) + else: + truth = tf.reshape(truth, [-1, num_labels]) + predicted = tf.reshape(predicted, [-1, num_labels]) + losses = tf.nn.softmax_cross_entropy_with_logits( + labels=truth, logits=predicted) + if weights is not None: + losses = tf.boolean_mask(losses, + tf.cast(tf.reshape(weights, [-1]), dtype=tf.bool)) + return tf.reduce_mean(losses) + + +class UnrolledTaskIOConfig(object): + """Configuration of task inputs and outputs. + + A task can have multiple inputs, which define the context, and a task query + which defines what is to be executed in this context. The desired execution + is encoded in an output. The config defines the shapes of the inputs, the + query and the outputs. + """ + + def __init__(self, inputs, output, query=None): + """Constructs a Task input/output config. + + Args: + inputs: a list of tuples. Each tuple represents the configuration of an + input, with first element being the type (a string value) and the second + element the shape. + output: a tuple representing the configuration of the output. + query: a tuple representing the configuration of the query. If no query, + then None. + """ + # A configuration of a single input, output or query. Consists of the type, + # which can be one of the three specified above, and a shape. The shape must + # be consistent with the type, e.g. if type == 'image', then shape is a 3 + # valued list. + io_config = collections.namedtuple('IOConfig', ['type', 'shape']) + + def assert_config(config): + if not isinstance(config, tuple): + raise ValueError('config must be a tuple. Received {}'.format( + type(config))) + if len(config) != 2: + raise ValueError('config must have 2 elements, has %d' % len(config)) + if not isinstance(config[0], tf.DType): + raise ValueError('First element of config must be a tf.DType.') + if not isinstance(config[1], list): + raise ValueError('Second element of config must be a list.') + + assert isinstance(inputs, collections.OrderedDict) + for modality_type in inputs: + assert_config(inputs[modality_type]) + self._inputs = collections.OrderedDict( + [(k, io_config(*value)) for k, value in inputs.iteritems()]) + + if query is not None: + assert_config(query) + self._query = io_config(*query) + else: + self._query = None + + assert_config(output) + self._output = io_config(*output) + + @property + def inputs(self): + return self._inputs + + @property + def output(self): + return self._output + + @property + def query(self): + return self._query + + +class UnrolledTask(object): + """An interface for a Task which can be unrolled during training. + + Each example is called episode and consists of inputs and target output, where + the output can be considered as desired unrolled sequence of actions for the + inputs. For the specified tasks, these action sequences are to be + unambiguously definable. + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, config): + assert isinstance(config, UnrolledTaskIOConfig) + self._config = config + # A dict of bookkeeping variables. + self.info = {} + # Tensorflow input is multithreaded and this lock is needed to prevent + # race condition in the environment. Without the lock, non-thread safe + # environments crash. + self._lock = threading.Lock() + + @property + def config(self): + return self._config + + @abc.abstractmethod + def episode(self): + """Returns data needed to train and test a single episode. + + Each episode consists of inputs, which define the context of the task, a + query which defines the task, and a target output, which defines a + sequence of actions to be executed for this query. This sequence should not + require feedback, i.e. can be predicted purely from input and query.] + + Returns: + inputs, query, output, where inputs is a list of numpy arrays and query + and output are numpy arrays. These arrays must be of shape and type as + specified in the task configuration. + """ + pass + + def reset(self, observation): + """Called after the environment is reset.""" + pass + + def episode_batch(self, batch_size): + """Returns a batch of episodes. + + Args: + batch_size: size of batch. + + Returns: + (inputs, query, output, masks) where inputs is list of numpy arrays and + query, output, and mask are numpy arrays. These arrays must be of shape + and type as specified in the task configuration with one additional + preceding dimension corresponding to the batch. + + Raises: + ValueError: if self.episode() returns illegal values. + """ + batched_inputs = collections.OrderedDict( + [[mtype, []] for mtype in self.config.inputs]) + batched_queries = [] + batched_outputs = [] + batched_masks = [] + for _ in range(int(batch_size)): + with self._lock: + # The episode function needs to be thread-safe. Since the current + # implementation for the envs are not thread safe we need to have lock + # the operations here. + inputs, query, outputs = self.episode() + if not isinstance(outputs, tuple): + raise ValueError('Outputs return value must be tuple.') + if len(outputs) != 2: + raise ValueError('Output tuple must be of size 2.') + if inputs is not None: + for modality_type in batched_inputs: + batched_inputs[modality_type].append( + np.expand_dims(inputs[modality_type], axis=0)) + + if query is not None: + batched_queries.append(np.expand_dims(query, axis=0)) + batched_outputs.append(np.expand_dims(outputs[0], axis=0)) + if outputs[1] is not None: + batched_masks.append(np.expand_dims(outputs[1], axis=0)) + + batched_inputs = { + k: np.concatenate(i, axis=0) for k, i in batched_inputs.iteritems() + } + if batched_queries: + batched_queries = np.concatenate(batched_queries, axis=0) + batched_outputs = np.concatenate(batched_outputs, axis=0) + if batched_masks: + batched_masks = np.concatenate(batched_masks, axis=0).astype(np.float32) + else: + # When the array is empty, the default np.dtype is float64 which causes + # py_func to crash in the tests. + batched_masks = np.array([], dtype=np.float32) + batched_inputs = [batched_inputs[k] for k in self._config.inputs] + return batched_inputs, batched_queries, batched_outputs, batched_masks + + def tf_episode_batch(self, batch_size): + """A batch of episodes as TF Tensors. + + Same as episode_batch with the difference that the return values are TF + Tensors. + + Args: + batch_size: a python float for the batch size. + + Returns: + inputs, query, output, mask where inputs is a dictionary of tf.Tensor + where the keys are the modality types specified in the config.inputs. + query, output, and mask are TF Tensors. These tensors must + be of shape and type as specified in the task configuration with one + additional preceding dimension corresponding to the batch. Both mask and + output have the same shape as output. + """ + + # Define TF outputs. + touts = [] + shapes = [] + for _, i in self._config.inputs.iteritems(): + touts.append(i.type) + shapes.append(i.shape) + if self._config.query is not None: + touts.append(self._config.query.type) + shapes.append(self._config.query.shape) + # Shapes and types for batched_outputs. + touts.append(self._config.output.type) + shapes.append(self._config.output.shape) + # Shapes and types for batched_masks. + touts.append(self._config.output.type) + shapes.append(self._config.output.shape[0:1]) + + def episode_batch_func(): + if self.config.query is None: + inp, _, output, masks = self.episode_batch(int(batch_size)) + return tuple(inp) + (output, masks) + else: + inp, query, output, masks = self.episode_batch(int(batch_size)) + return tuple(inp) + (query, output, masks) + + tf_episode_batch = tf.py_func(episode_batch_func, [], touts, + stateful=True, name='taskdata') + for episode, shape in zip(tf_episode_batch, shapes): + episode.set_shape([batch_size] + shape) + + tf_episode_batch_dict = collections.OrderedDict([ + (mtype, episode) + for mtype, episode in zip(self.config.inputs.keys(), tf_episode_batch) + ]) + cur_index = len(self.config.inputs.keys()) + tf_query = None + if self.config.query is not None: + tf_query = tf_episode_batch[cur_index] + cur_index += 1 + tf_outputs = tf_episode_batch[cur_index] + tf_masks = tf_episode_batch[cur_index + 1] + + return tf_episode_batch_dict, tf_query, tf_outputs, tf_masks + + @abc.abstractmethod + def target_loss(self, true_targets, targets, weights=None): + """A loss for training a task model. + + This loss measures the discrepancy between the task outputs, the true and + predicted ones. + + Args: + true_targets: tf.Tensor of shape and type as defined in the task config + containing the true outputs. + targets: tf.Tensor of shape and type as defined in the task config + containing the predicted outputs. + weights: a bool tf.Tensor of shape as targets. Only true values are + considered when formulating the loss. + """ + pass + + def reward(self, obs, done, info): + """Returns a reward. + + The tasks has to compute a reward based on the state of the environment. The + reward computation, though, is task specific. The task is to use the + environment interface, as defined in task_env.py, to compute the reward. If + this interface does not expose enough information, it is to be updated. + + Args: + obs: Observation from environment's step function. + done: Done flag from environment's step function. + info: Info dict from environment's step function. + + Returns: + obs: Observation. + reward: Floating point value. + done: Done flag. + info: Info dict. + """ + # Default implementation does not do anything. + return obs, 0.0, done, info + + +class RandomExplorationBasedTask(UnrolledTask): + """A Task which starts with a random exploration of the environment.""" + + def __init__(self, + env, + seed, + add_query_noise=False, + query_noise_var=0.0, + *args, + **kwargs): # pylint: disable=keyword-arg-before-vararg + """Initializes a Task using a random exploration runs. + + Args: + env: an instance of type TaskEnv and gym.Env. + seed: a random seed. + add_query_noise: boolean, if True then whatever queries are generated, + they are randomly perturbed. The semantics of the queries depends on the + concrete task implementation. + query_noise_var: float, the variance of Gaussian noise used for query + perturbation. Used iff add_query_noise==True. + *args: see super class. + **kwargs: see super class. + """ + super(RandomExplorationBasedTask, self).__init__(*args, **kwargs) + assert isinstance(env, task_env.TaskEnv) + self._env = env + self._env.set_task(self) + self._rng = np.random.RandomState(seed) + self._add_query_noise = add_query_noise + self._query_noise_var = query_noise_var + + # GoToStaticXTask can also take empty config but for the rest of the classes + # the number of modality types is 1. + if len(self.config.inputs.keys()) > 1: + raise NotImplementedError('current implementation supports input ' + 'with only one modality type or less.') + + def _exploration(self): + """Generates a random exploration run. + + The function uses the environment to generate a run. + + Returns: + A tuple of numpy arrays. The i-th array contains observation of type and + shape as specified in config.inputs[i]. + A list of states along the exploration path. + A list of vertex indices corresponding to the path of the exploration. + """ + in_seq_len = self._config.inputs.values()[0].shape[0] + path, _, states, step_outputs = self._env.random_step_sequence( + min_len=in_seq_len) + obs = {modality_type: [] for modality_type in self._config.inputs} + for o in step_outputs: + step_obs, _, done, _ = o + # It is expected that each value of step_obs is a dict of observations, + # whose dimensions are consistent with the config.inputs sizes. + for modality_type in self._config.inputs: + assert modality_type in step_obs, '{}'.format(type(step_obs)) + o = step_obs[modality_type] + i = self._config.inputs[modality_type] + assert len(o.shape) == len(i.shape) - 1 + for dim_o, dim_i in zip(o.shape, i.shape[1:]): + assert dim_o == dim_i, '{} != {}'.format(dim_o, dim_i) + obs[modality_type].append(o) + if done: + break + + if not obs: + return obs, states, path + + max_path_len = int( + round(in_seq_len * float(len(path)) / float(len(obs.values()[0])))) + path = path[-max_path_len:] + states = states[-in_seq_len:] + + # The above obs is a list of tuples of np,array. Re-format them as tuple of + # np.array, each array containing all observations from all steps. + def regroup(obs, i): + """Regroups observations. + + Args: + obs: a list of tuples of same size. The k-th tuple contains all the + observations from k-th step. Each observation is a numpy array. + i: the index of the observation in each tuple to be grouped. + + Returns: + A numpy array of shape config.inputs[i] which contains all i-th + observations from all steps. These are concatenated along the first + dimension. In addition, if the number of observations is different from + the one specified in config.inputs[i].shape[0], then the array is either + padded from front or clipped. + """ + grouped_obs = np.concatenate( + [np.expand_dims(o, axis=0) for o in obs[i]], axis=0) + in_seq_len = self._config.inputs[i].shape[0] + # pylint: disable=unbalanced-tuple-unpacking + grouped_obs, _ = _pad_or_clip_array( + grouped_obs, in_seq_len, is_front_clip=True) + return grouped_obs + + all_obs = {i: regroup(obs, i) for i in self._config.inputs} + + return all_obs, states, path + + def _obs_to_state(self, path, states): + """Computes mapping between path nodes and states.""" + # Generate a numpy array of locations corresponding to the path vertices. + path_coordinates = map(self._env.vertex_to_pose, path) + path_coordinates = np.concatenate( + [np.reshape(p, [1, 2]) for p in path_coordinates]) + + # The observations are taken along a smoothed trajectory following the path. + # We compute a mapping between the obeservations and the map vertices. + path_to_obs = collections.defaultdict(list) + obs_to_state = [] + for i, s in enumerate(states): + location = np.reshape(s[0:2], [1, 2]) + index = np.argmin( + np.reshape( + np.sum(np.power(path_coordinates - location, 2), axis=1), [-1])) + index = path[index] + path_to_obs[index].append(i) + obs_to_state.append(index) + return path_to_obs, obs_to_state + + def _perturb_state(self, state, noise_var): + """Perturbes the state. + + The location are purturbed using a Gaussian noise with variance + noise_var. The orientation is uniformly sampled. + + Args: + state: a numpy array containing an env state (x, y locations). + noise_var: float + Returns: + The perturbed state. + """ + + def normal(v, std): + if std > 0: + n = self._rng.normal(0.0, std) + n = min(n, 2.0 * std) + n = max(n, -2.0 * std) + return v + n + else: + return v + + state = state.copy() + state[0] = normal(state[0], noise_var) + state[1] = normal(state[1], noise_var) + if state.size > 2: + state[2] = self._rng.uniform(-math.pi, math.pi) + return state + + def _sample_obs(self, + indices, + observations, + observation_states, + path_to_obs, + max_obs_index=None, + use_exploration_obs=True): + """Samples one observation which corresponds to vertex_index in path. + + In addition, the sampled observation must have index in observations less + than max_obs_index. If these two conditions cannot be satisfied the + function returns None. + + Args: + indices: a list of integers. + observations: a list of numpy arrays containing all the observations. + observation_states: a list of numpy arrays, each array representing the + state of the observation. + path_to_obs: a dict of path indices to lists of observation indices. + max_obs_index: an integer. + use_exploration_obs: if True, then the observation is sampled among the + specified observations, otherwise it is obtained from the environment. + Returns: + A tuple of: + -- A numpy array of size width x height x 3 representing the sampled + observation. + -- The index of the sampld observation among the input observations. + -- The state at which the observation is captured. + Raises: + ValueError: if the observation and observation_states lists are of + different lengths. + """ + if len(observations) != len(observation_states): + raise ValueError('observation and observation_states lists must have ' + 'equal lengths') + if not indices: + return None, None, None + vertex_index = self._rng.choice(indices) + if use_exploration_obs: + obs_indices = path_to_obs[vertex_index] + + if max_obs_index is not None: + obs_indices = [i for i in obs_indices if i < max_obs_index] + + if obs_indices: + index = self._rng.choice(obs_indices) + if self._add_query_noise: + xytheta = self._perturb_state(observation_states[index], + self._query_noise_var) + return self._env.observation(xytheta), index, xytheta + else: + return observations[index], index, observation_states[index] + else: + return None, None, None + else: + xy = self._env.vertex_to_pose(vertex_index) + xytheta = np.array([xy[0], xy[1], 0.0]) + xytheta = self._perturb_state(xytheta, self._query_noise_var) + return self._env.observation(xytheta), None, xytheta + + +class AreNearbyTask(RandomExplorationBasedTask): + """A task of identifying whether a query is nearby current location or not. + + The query is guaranteed to be in proximity of an already visited location, + i.e. close to one of the observations. For each observation we have one + query, which is either close or not to this observation. + """ + + def __init__( + self, + max_distance=0, + *args, + **kwargs): # pylint: disable=keyword-arg-before-vararg + super(AreNearbyTask, self).__init__(*args, **kwargs) + self._max_distance = max_distance + + if len(self.config.inputs.keys()) != 1: + raise NotImplementedError('current implementation supports input ' + 'with only one modality type') + + def episode(self): + """Episode data. + + Returns: + observations: a tuple with one element. This element is a numpy array of + size in_seq_len x observation_size x observation_size x 3 containing + in_seq_len images. + query: a numpy array of size + in_seq_len x observation_size X observation_size x 3 containing a query + image. + A tuple of size two. First element is a in_seq_len x 2 numpy array of + either 1.0 or 0.0. The i-th element denotes whether the i-th query + image is neraby (value 1.0) or not (value 0.0) to the i-th observation. + The second element in the tuple is a mask, a numpy array of size + in_seq_len x 1 and values 1.0 or 0.0 denoting whether the query is + valid or not (it can happen that the query is not valid, e.g. there are + not enough observations to have a meaningful queries). + """ + observations, states, path = self._exploration() + assert len(observations.values()[0]) == len(states) + + # The observations are taken along a smoothed trajectory following the path. + # We compute a mapping between the obeservations and the map vertices. + path_to_obs, obs_to_path = self._obs_to_state(path, states) + + # Go over all observations, and sample a query. With probability 0.5 this + # query is a nearby observation (defined as belonging to the same vertex + # in path). + g = self._env.graph + queries = [] + labels = [] + validity_masks = [] + query_index_in_observations = [] + for i, curr_o in enumerate(observations.values()[0]): + p = obs_to_path[i] + low = max(0, i - self._max_distance) + + # A list of lists of vertex indices. Each list in this group corresponds + # to one possible label. + index_groups = [[], [], []] + # Nearby visited indices, label 1. + nearby_visited = [ + ii for ii in path[low:i + 1] + g[p].keys() if ii in obs_to_path[:i] + ] + nearby_visited = [ii for ii in index_groups[1] if ii in path_to_obs] + # NOT Nearby visited indices, label 0. + not_nearby_visited = [ii for ii in path[:low] if ii not in g[p].keys()] + not_nearby_visited = [ii for ii in index_groups[0] if ii in path_to_obs] + # NOT visited indices, label 2. + not_visited = [ + ii for ii in range(g.number_of_nodes()) if ii not in path[:i + 1] + ] + + index_groups = [not_nearby_visited, nearby_visited, not_visited] + + # Consider only labels for which there are indices. + allowed_labels = [ii for ii, group in enumerate(index_groups) if group] + label = self._rng.choice(allowed_labels) + + indices = list(set(index_groups[label])) + max_obs_index = None if label == 2 else i + use_exploration_obs = False if label == 2 else True + o, obs_index, _ = self._sample_obs( + indices=indices, + observations=observations.values()[0], + observation_states=states, + path_to_obs=path_to_obs, + max_obs_index=max_obs_index, + use_exploration_obs=use_exploration_obs) + query_index_in_observations.append(obs_index) + + # If we cannot sample a valid query, we mark it as not valid in mask. + if o is None: + label = 0.0 + o = curr_o + validity_masks.append(0) + else: + validity_masks.append(1) + + queries.append(o.values()[0]) + labels.append(label) + + query = np.concatenate([np.expand_dims(q, axis=0) for q in queries], axis=0) + + def one_hot(label, num_labels=3): + a = np.zeros((num_labels,), dtype=np.float) + a[int(label)] = 1.0 + return a + + outputs = np.stack([one_hot(l) for l in labels], axis=0) + validity_mask = np.reshape( + np.array(validity_masks, dtype=np.int32), [-1, 1]) + + self.info['query_index_in_observations'] = query_index_in_observations + self.info['observation_states'] = states + + return observations, query, (outputs, validity_mask) + + def target_loss(self, truth, predicted, weights=None): + pass + + +class NeighboringQueriesTask(RandomExplorationBasedTask): + """A task of identifying whether two queries are closeby or not. + + The proximity between queries is defined by the length of the shorest path + between them. + """ + + def __init__( + self, + max_distance=1, + *args, + **kwargs): # pylint: disable=keyword-arg-before-vararg + """Initializes a NeighboringQueriesTask. + + Args: + max_distance: integer, the maximum distance in terms of number of vertices + between the two queries, so that they are considered neighboring. + *args: for super class. + **kwargs: for super class. + """ + super(NeighboringQueriesTask, self).__init__(*args, **kwargs) + self._max_distance = max_distance + if len(self.config.inputs.keys()) != 1: + raise NotImplementedError('current implementation supports input ' + 'with only one modality type') + + def episode(self): + """Episode data. + + Returns: + observations: a tuple with one element. This element is a numpy array of + size in_seq_len x observation_size x observation_size x 3 containing + in_seq_len images. + query: a numpy array of size + 2 x observation_size X observation_size x 3 containing a pair of query + images. + A tuple of size two. First element is a numpy array of size 2 containing + a one hot vector of whether the two observations are neighobring. Second + element is a boolean numpy value denoting whether this is a valid + episode. + """ + observations, states, path = self._exploration() + assert len(observations.values()[0]) == len(states) + path_to_obs, _ = self._obs_to_state(path, states) + # Restrict path to ones for which observations have been generated. + path = [p for p in path if p in path_to_obs] + # Sample first query. + query1_index = self._rng.choice(path) + # Sample label. + label = self._rng.randint(2) + # Sample second query. + # If label == 1, then second query must be nearby, otherwise not. + closest_indices = nx.single_source_shortest_path( + self._env.graph, query1_index, self._max_distance).keys() + if label == 0: + # Closest indices on the path. + indices = [p for p in path if p not in closest_indices] + else: + # Indices which are not closest on the path. + indices = [p for p in closest_indices if p in path] + + query2_index = self._rng.choice(indices) + # Generate an observation. + query1, query1_index, _ = self._sample_obs( + [query1_index], + observations.values()[0], + states, + path_to_obs, + max_obs_index=None, + use_exploration_obs=True) + query2, query2_index, _ = self._sample_obs( + [query2_index], + observations.values()[0], + states, + path_to_obs, + max_obs_index=None, + use_exploration_obs=True) + + queries = np.concatenate( + [np.expand_dims(q, axis=0) for q in [query1, query2]]) + labels = np.array([0, 0]) + labels[label] = 1 + is_valid = np.array([1]) + + self.info['observation_states'] = states + self.info['query_indices_in_observations'] = [query1_index, query2_index] + + return observations, queries, (labels, is_valid) + + def target_loss(self, truth, predicted, weights=None): + pass + + +#@gin.configurable +class GotoStaticXTask(RandomExplorationBasedTask): + """Task go to a static X. + + If continuous reward is used only one goal is allowed so that the reward can + be computed as a delta-distance to that goal.. + """ + + def __init__(self, + step_reward=0.0, + goal_reward=1.0, + hit_wall_reward=-1.0, + done_at_target=False, + use_continuous_reward=False, + *args, + **kwargs): # pylint: disable=keyword-arg-before-vararg + super(GotoStaticXTask, self).__init__(*args, **kwargs) + if len(self.config.inputs.keys()) > 1: + raise NotImplementedError('current implementation supports input ' + 'with only one modality type or less.') + + self._step_reward = step_reward + self._goal_reward = goal_reward + self._hit_wall_reward = hit_wall_reward + self._done_at_target = done_at_target + self._use_continuous_reward = use_continuous_reward + + self._previous_path_length = None + + def episode(self): + observations, _, path = self._exploration() + if len(path) < 2: + raise ValueError('The exploration path has only one node.') + + g = self._env.graph + start = path[-1] + while True: + goal = self._rng.choice(path[:-1]) + if goal != start: + break + goal_path = nx.shortest_path(g, start, goal) + + init_orientation = self._rng.uniform(0, np.pi, (1,)) + trajectory = np.array( + [list(self._env.vertex_to_pose(p)) for p in goal_path]) + init_xy = np.reshape(trajectory[0, :], [-1]) + init_state = np.concatenate([init_xy, init_orientation], 0) + + trajectory = trajectory[1:, :] + deltas = envs_util.trajectory_to_deltas(trajectory, init_state) + output_seq_len = self._config.output.shape[0] + arr = _pad_or_clip_array(deltas, output_seq_len, output_mask=True) + # pylint: disable=unbalanced-tuple-unpacking + thetas, _, thetas_mask = arr + + query = self._env.observation(self._env.vertex_to_pose(goal)).values()[0] + + return observations, query, (thetas, thetas_mask) + + def reward(self, obs, done, info): + if 'wall_collision' in info and info['wall_collision']: + return obs, self._hit_wall_reward, done, info + + reward = 0.0 + current_vertex = self._env.pose_to_vertex(self._env.state) + + if current_vertex in self._env.targets(): + if self._done_at_target: + done = True + else: + obs = self._env.reset() + reward = self._goal_reward + else: + if self._use_continuous_reward: + if len(self._env.targets()) != 1: + raise ValueError( + 'FindX task with continuous reward is assuming only one target.') + goal_vertex = self._env.targets()[0] + path_length = self._compute_path_length(goal_vertex) + reward = self._previous_path_length - path_length + self._previous_path_length = path_length + else: + reward = self._step_reward + + return obs, reward, done, info + + def _compute_path_length(self, goal_vertex): + current_vertex = self._env.pose_to_vertex(self._env.state) + path = nx.shortest_path(self._env.graph, current_vertex, goal_vertex) + assert len(path) >= 2 + curr_xy = np.array(self._env.state[:2]) + next_xy = np.array(self._env.vertex_to_pose(path[1])) + last_step_distance = np.linalg.norm(next_xy - curr_xy) + return (len(path) - 2) * self._env.cell_size_px + last_step_distance + + def reset(self, observation): + if self._use_continuous_reward: + if len(self._env.targets()) != 1: + raise ValueError( + 'FindX task with continuous reward is assuming only one target.') + goal_vertex = self._env.targets()[0] + self._previous_path_length = self._compute_path_length(goal_vertex) + + def target_loss(self, truth, predicted, weights=None): + """Action classification loss. + + Args: + truth: a batch_size x sequence length x number of labels float + Tensor containing a one hot vector for each label in each batch and + time. + predicted: a batch_size x sequence length x number of labels float + Tensor containing a predicted distribution over all actions. + weights: a batch_size x sequence_length float Tensor of bool + denoting which actions are valid. + + Returns: + An average cross entropy over all batches and elements in sequence. + """ + return classification_loss( + truth=truth, predicted=predicted, weights=weights, is_one_hot=True) + + +class RelativeLocationTask(RandomExplorationBasedTask): + """A task of estimating the relative location of a query w.r.t current. + + It is to be used for debugging. It is designed such that the output is a + single value, out of a discrete set of values, so that it can be phrased as + a classification problem. + """ + + def __init__(self, num_labels, *args, **kwargs): + """Initializes a relative location task. + + Args: + num_labels: integer, number of orientations to bin the relative + orientation into. + *args: see super class. + **kwargs: see super class. + """ + super(RelativeLocationTask, self).__init__(*args, **kwargs) + self._num_labels = num_labels + if len(self.config.inputs.keys()) != 1: + raise NotImplementedError('current implementation supports input ' + 'with only one modality type') + + def episode(self): + observations, states, path = self._exploration() + + # Select a random element from history. + path_to_obs, _ = self._obs_to_state(path, states) + use_exploration_obs = not self._add_query_noise + query, _, query_state = self._sample_obs( + path[:-1], + observations.values()[0], + states, + path_to_obs, + max_obs_index=None, + use_exploration_obs=use_exploration_obs) + + x, y, theta = tuple(states[-1]) + q_x, q_y, _ = tuple(query_state) + t_x, t_y = q_x - x, q_y - y + (rt_x, rt_y) = (np.sin(theta) * t_x - np.cos(theta) * t_y, + np.cos(theta) * t_x + np.sin(theta) * t_y) + # Bins are [a(i), a(i+1)] for a(i) = -pi + 0.5 * bin_size + i * bin_size. + shift = np.pi * (1 - 1.0 / (2.0 * self._num_labels)) + orientation = np.arctan2(rt_y, rt_x) + shift + if orientation < 0: + orientation += 2 * np.pi + label = int(np.floor(self._num_labels * orientation / (2 * np.pi))) + + out_shape = self._config.output.shape + if len(out_shape) != 1: + raise ValueError('Output shape should be of rank 1.') + if out_shape[0] != self._num_labels: + raise ValueError('Output shape must be of size %d' % self._num_labels) + output = np.zeros(out_shape, dtype=np.float32) + output[label] = 1 + + return observations, query, (output, None) + + def target_loss(self, truth, predicted, weights=None): + return classification_loss( + truth=truth, predicted=predicted, weights=weights, is_one_hot=True) + + +class LocationClassificationTask(UnrolledTask): + """A task of classifying a location as one of several classes. + + The task does not have an input, but just a query and an output. The query + is an observation of the current location, e.g. an image taken from the + current state. The output is a label classifying this location in one of + predefined set of locations (or landmarks). + + The current implementation classifies locations as intersections based on the + number and directions of biforcations. It is expected that a location can have + at most 4 different directions, aligned with the axes. As each of these four + directions might be present or not, the number of possible intersections are + 2^4 = 16. + """ + + def __init__(self, env, seed, *args, **kwargs): + super(LocationClassificationTask, self).__init__(*args, **kwargs) + self._env = env + self._rng = np.random.RandomState(seed) + # A location property which can be set. If not set, a random one is + # generated. + self._location = None + if len(self.config.inputs.keys()) > 1: + raise NotImplementedError('current implementation supports input ' + 'with only one modality type or less.') + + @property + def location(self): + return self._location + + @location.setter + def location(self, location): + self._location = location + + def episode(self): + # Get a location. If not set, sample on at a vertex with a random + # orientation + location = self._location + if location is None: + num_nodes = self._env.graph.number_of_nodes() + vertex = int(math.floor(self._rng.uniform(0, num_nodes))) + xy = self._env.vertex_to_pose(vertex) + theta = self._rng.uniform(0, 2 * math.pi) + location = np.concatenate( + [np.reshape(xy, [-1]), np.array([theta])], axis=0) + else: + vertex = self._env.pose_to_vertex(location) + + theta = location[2] + neighbors = self._env.graph.neighbors(vertex) + xy_s = [self._env.vertex_to_pose(n) for n in neighbors] + + def rotate(xy, theta): + """Rotates a vector around the origin by angle theta. + + Args: + xy: a numpy darray of shape (2, ) of floats containing the x and y + coordinates of a vector. + theta: a python float containing the rotation angle in radians. + + Returns: + A numpy darray of floats of shape (2,) containing the x and y + coordinates rotated xy. + """ + rotated_x = np.cos(theta) * xy[0] - np.sin(theta) * xy[1] + rotated_y = np.sin(theta) * xy[0] + np.cos(theta) * xy[1] + return np.array([rotated_x, rotated_y]) + + # Rotate all intersection biforcation by the orientation of the agent as the + # intersection label is defined in an agent centered fashion. + xy_s = [ + rotate(xy - location[0:2], -location[2] - math.pi / 4) for xy in xy_s + ] + th_s = [np.arctan2(xy[1], xy[0]) for xy in xy_s] + + out_shape = self._config.output.shape + if len(out_shape) != 1: + raise ValueError('Output shape should be of rank 1.') + num_labels = out_shape[0] + if num_labels != 16: + raise ValueError('Currently only 16 labels are supported ' + '(there are 16 different 4 way intersection types).') + + th_s = set([int(math.floor(4 * (th / (2 * np.pi) + 0.5))) for th in th_s]) + one_hot_label = np.zeros((num_labels,), dtype=np.float32) + label = 0 + for th in th_s: + label += pow(2, th) + one_hot_label[int(label)] = 1.0 + + query = self._env.observation(location).values()[0] + return [], query, (one_hot_label, None) + + def reward(self, obs, done, info): + raise ValueError('Do not call.') + + def target_loss(self, truth, predicted, weights=None): + return classification_loss( + truth=truth, predicted=predicted, weights=weights, is_one_hot=True) + + +class GotoStaticXNoExplorationTask(UnrolledTask): + """An interface for findX tasks without exploration. + + The agent is initialized a random location in a random world and a random goal + and the objective is for the agent to move toward the goal. This class + generates episode for such task. Each generates a sequence of observations x + and target outputs y. x is the observations and is an OrderedDict with keys + provided from config.inputs.keys() and the shapes provided in the + config.inputs. The output is a numpy arrays with the shape specified in the + config.output. The shape of the array is (sequence_length x action_size) where + action is the number of actions that can be done in the environment. Note that + config.output.shape should be set according to the number of actions that can + be done in the env. + target outputs y are the groundtruth value of each action that is computed + from the environment graph. The target output for each action is proportional + to the progress that each action makes. Target value of 1 means that the + action takes the agent one step closer, -1 means the action takes the agent + one step farther. Value of -2 means that action should not take place at all. + This can be because the action leads to collision or it wants to terminate the + episode prematurely. + """ + + def __init__(self, env, *args, **kwargs): + super(GotoStaticXNoExplorationTask, self).__init__(*args, **kwargs) + + if self._config.query is not None: + raise ValueError('query should be None.') + if len(self._config.output.shape) != 2: + raise ValueError('output should only have two dimensions:' + '(sequence_length x number_of_actions)') + for input_config in self._config.inputs.values(): + if input_config.shape[0] != self._config.output.shape[0]: + raise ValueError('the first dimension of the input and output should' + 'be the same.') + if len(self._config.output.shape) != 2: + raise ValueError('output shape should be ' + '(sequence_length x number_of_actions)') + + self._env = env + + def _compute_shortest_path_length(self, vertex, target_vertices): + """Computes length of the shortest path from vertex to any target vertexes. + + Args: + vertex: integer, index of the vertex in the environment graph. + target_vertices: list of the target vertexes + + Returns: + integer, minimum distance from the vertex to any of the target_vertices. + + Raises: + ValueError: if there is no path between the vertex and at least one of + the target_vertices. + """ + try: + return np.min([ + len(nx.shortest_path(self._env.graph, vertex, t)) + for t in target_vertices + ]) + except: + #logging.error('there is no path between vertex %d and at least one of ' + # 'the targets %r', vertex, target_vertices) + raise + + def _compute_gt_value(self, vertex, target_vertices): + """Computes groundtruth value of all the actions at the vertex. + + The value of each action is the difference each action makes in the length + of the shortest path to the goal. If an action takes the agent one step + closer to the goal the value is 1. In case, it takes the agent one step away + from the goal it would be -1. If it leads to collision or if the agent uses + action stop before reaching to the goal it is -2. To avoid scale issues the + gt_values are multipled by 0.5. + + Args: + vertex: integer, the index of current vertex. + target_vertices: list of the integer indexes of the target views. + + Returns: + numpy array with shape (action_size,) and each element is the groundtruth + value of each action based on the progress each action makes. + """ + action_size = self._config.output.shape[1] + output_value = np.ones((action_size), dtype=np.float32) * -2 + my_distance = self._compute_shortest_path_length(vertex, target_vertices) + for adj in self._env.graph[vertex]: + adj_distance = self._compute_shortest_path_length(adj, target_vertices) + if adj_distance is None: + continue + action_index = self._env.action( + self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj)) + assert action_index is not None, ('{} is not adjacent to {}. There might ' + 'be a problem in environment graph ' + 'connectivity because there is no ' + 'direct edge between the given ' + 'vertices').format( + self._env.vertex_to_pose(vertex), + self._env.vertex_to_pose(adj)) + output_value[action_index] = my_distance - adj_distance + + return output_value * 0.5 + + def episode(self): + """Returns data needed to train and test a single episode. + + Returns: + (inputs, None, output) where inputs is a dictionary of modality types to + numpy arrays. The second element is query but we assume that the goal + is also given as part of observation so it should be None for this task, + and the outputs is the tuple of ground truth action values with the + shape of (sequence_length x action_size) that is coming from + config.output.shape and a numpy array with the shape of + (sequence_length,) that is 1 if the corresponding element of the + input and output should be used in the training optimization. + + Raises: + ValueError: If the output values for env.random_step_sequence is not + valid. + ValueError: If the shape of observations coming from the env is not + consistent with the config. + ValueError: If there is a modality type specified in the config but the + environment does not return that. + """ + # Sequence length is the first dimension of any of the input tensors. + sequence_length = self._config.inputs.values()[0].shape[0] + modality_types = self._config.inputs.keys() + + path, _, _, step_outputs = self._env.random_step_sequence( + max_len=sequence_length) + target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()] + + if len(path) != len(step_outputs): + raise ValueError('path, and step_outputs should have equal length' + ' {}!={}'.format(len(path), len(step_outputs))) + + # Building up observations. observations will be a OrderedDict of + # modality types. The values are numpy arrays that follow the given shape + # in the input config for each modality type. + observations = collections.OrderedDict([k, []] for k in modality_types) + for step_output in step_outputs: + obs_dict = step_output[0] + # Only going over the modality types that are specified in the input + # config. + for modality_type in modality_types: + if modality_type not in obs_dict: + raise ValueError('modality type is not returned from the environment.' + '{} not in {}'.format(modality_type, + obs_dict.keys())) + obs = obs_dict[modality_type] + if np.any( + obs.shape != tuple(self._config.inputs[modality_type].shape[1:])): + raise ValueError( + 'The observations should have the same size as speicifed in' + 'config for modality type {}. {} != {}'.format( + modality_type, obs.shape, + self._config.inputs[modality_type].shape[1:])) + observations[modality_type].append(obs) + + gt_value = [self._compute_gt_value(v, target_vertices) for v in path] + + # pylint: disable=unbalanced-tuple-unpacking + gt_value, _, value_mask = _pad_or_clip_array( + np.array(gt_value), + sequence_length, + is_front_clip=False, + output_mask=True, + ) + for modality_type, obs in observations.iteritems(): + observations[modality_type], _, mask = _pad_or_clip_array( + np.array(obs), sequence_length, is_front_clip=False, output_mask=True) + assert np.all(mask == value_mask) + + return observations, None, (gt_value, value_mask) + + def reset(self, observation): + """Called after the environment is reset.""" + pass + + def target_loss(self, true_targets, targets, weights=None): + """A loss for training a task model. + + This loss measures the discrepancy between the task outputs, the true and + predicted ones. + + Args: + true_targets: tf.Tensor of tf.float32 with the shape of + (batch_size x sequence_length x action_size). + targets: tf.Tensor of tf.float32 with the shape of + (batch_size x sequence_length x action_size). + weights: tf.Tensor of tf.bool with the shape of + (batch_size x sequence_length). + + Raises: + ValueError: if the shapes of the input tensors are not consistent. + + Returns: + L2 loss between the predicted action values and true action values. + """ + targets_shape = targets.get_shape().as_list() + true_targets_shape = true_targets.get_shape().as_list() + if len(targets_shape) != 3 or len(true_targets_shape) != 3: + raise ValueError('invalid shape for targets or true_targets_shape') + if np.any(targets_shape != true_targets_shape): + raise ValueError('the shape of targets and true_targets are not the same' + '{} != {}'.format(targets_shape, true_targets_shape)) + + if weights is not None: + # Filtering targets and true_targets using weights. + weights_shape = weights.get_shape().as_list() + if np.any(weights_shape != targets_shape[0:2]): + raise ValueError('The first two elements of weights shape should match' + 'target. {} != {}'.format(weights_shape, + targets_shape)) + true_targets = tf.boolean_mask(true_targets, weights) + targets = tf.boolean_mask(targets, weights) + + return tf.losses.mean_squared_error(tf.reshape(targets, [-1]), + tf.reshape(true_targets, [-1])) + + def reward(self, obs, done, info): + raise NotImplementedError('reward is not implemented for this task') + + +################################################################################ +class NewTask(UnrolledTask): + def __init__(self, env, *args, **kwargs): + super(NewTask, self).__init__(*args, **kwargs) + self._env = env + + def _compute_shortest_path_length(self, vertex, target_vertices): + """Computes length of the shortest path from vertex to any target vertexes. + + Args: + vertex: integer, index of the vertex in the environment graph. + target_vertices: list of the target vertexes + + Returns: + integer, minimum distance from the vertex to any of the target_vertices. + + Raises: + ValueError: if there is no path between the vertex and at least one of + the target_vertices. + """ + try: + return np.min([ + len(nx.shortest_path(self._env.graph, vertex, t)) + for t in target_vertices + ]) + except: + logging.error('there is no path between vertex %d and at least one of ' + 'the targets %r', vertex, target_vertices) + raise + + def _compute_gt_value(self, vertex, target_vertices): + """Computes groundtruth value of all the actions at the vertex. + + The value of each action is the difference each action makes in the length + of the shortest path to the goal. If an action takes the agent one step + closer to the goal the value is 1. In case, it takes the agent one step away + from the goal it would be -1. If it leads to collision or if the agent uses + action stop before reaching to the goal it is -2. To avoid scale issues the + gt_values are multipled by 0.5. + + Args: + vertex: integer, the index of current vertex. + target_vertices: list of the integer indexes of the target views. + + Returns: + numpy array with shape (action_size,) and each element is the groundtruth + value of each action based on the progress each action makes. + """ + action_size = self._config.output.shape[1] + output_value = np.ones((action_size), dtype=np.float32) * -2 + # own compute _compute_shortest_path_length - returnts float + my_distance = self._compute_shortest_path_length(vertex, target_vertices) + for adj in self._env.graph[vertex]: + adj_distance = self._compute_shortest_path_length(adj, target_vertices) + if adj_distance is None: + continue + action_index = self._env.action( + self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj)) + assert action_index is not None, ('{} is not adjacent to {}. There might ' + 'be a problem in environment graph ' + 'connectivity because there is no ' + 'direct edge between the given ' + 'vertices').format( + self._env.vertex_to_pose(vertex), + self._env.vertex_to_pose(adj)) + output_value[action_index] = my_distance - adj_distance + + return output_value * 0.5 + + def episode(self): + """Returns data needed to train and test a single episode. + + Returns: + (inputs, None, output) where inputs is a dictionary of modality types to + numpy arrays. The second element is query but we assume that the goal + is also given as part of observation so it should be None for this task, + and the outputs is the tuple of ground truth action values with the + shape of (sequence_length x action_size) that is coming from + config.output.shape and a numpy array with the shape of + (sequence_length,) that is 1 if the corresponding element of the + input and output should be used in the training optimization. + + Raises: + ValueError: If the output values for env.random_step_sequence is not + valid. + ValueError: If the shape of observations coming from the env is not + consistent with the config. + ValueError: If there is a modality type specified in the config but the + environment does not return that. + """ + # Sequence length is the first dimension of any of the input tensors. + sequence_length = self._config.inputs.values()[0].shape[0] + modality_types = self._config.inputs.keys() + + path, _, _, step_outputs = self._env.random_step_sequence( + max_len=sequence_length) + target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()] + + if len(path) != len(step_outputs): + raise ValueError('path, and step_outputs should have equal length' + ' {}!={}'.format(len(path), len(step_outputs))) + + # Building up observations. observations will be a OrderedDict of + # modality types. The values are numpy arrays that follow the given shape + # in the input config for each modality type. + observations = collections.OrderedDict([k, []] for k in modality_types) + for step_output in step_outputs: + obs_dict = step_output[0] + # Only going over the modality types that are specified in the input + # config. + for modality_type in modality_types: + if modality_type not in obs_dict: + raise ValueError('modality type is not returned from the environment.' + '{} not in {}'.format(modality_type, + obs_dict.keys())) + obs = obs_dict[modality_type] + if np.any( + obs.shape != tuple(self._config.inputs[modality_type].shape[1:])): + raise ValueError( + 'The observations should have the same size as speicifed in' + 'config for modality type {}. {} != {}'.format( + modality_type, obs.shape, + self._config.inputs[modality_type].shape[1:])) + observations[modality_type].append(obs) + + gt_value = [self._compute_gt_value(v, target_vertices) for v in path] + + # pylint: disable=unbalanced-tuple-unpacking + gt_value, _, value_mask = _pad_or_clip_array( + np.array(gt_value), + sequence_length, + is_front_clip=False, + output_mask=True, + ) + for modality_type, obs in observations.iteritems(): + observations[modality_type], _, mask = _pad_or_clip_array( + np.array(obs), sequence_length, is_front_clip=False, output_mask=True) + assert np.all(mask == value_mask) + + return observations, None, (gt_value, value_mask) + + def reset(self, observation): + """Called after the environment is reset.""" + pass + + def target_loss(self, true_targets, targets, weights=None): + """A loss for training a task model. + + This loss measures the discrepancy between the task outputs, the true and + predicted ones. + + Args: + true_targets: tf.Tensor of tf.float32 with the shape of + (batch_size x sequence_length x action_size). + targets: tf.Tensor of tf.float32 with the shape of + (batch_size x sequence_length x action_size). + weights: tf.Tensor of tf.bool with the shape of + (batch_size x sequence_length). + + Raises: + ValueError: if the shapes of the input tensors are not consistent. + + Returns: + L2 loss between the predicted action values and true action values. + """ + targets_shape = targets.get_shape().as_list() + true_targets_shape = true_targets.get_shape().as_list() + if len(targets_shape) != 3 or len(true_targets_shape) != 3: + raise ValueError('invalid shape for targets or true_targets_shape') + if np.any(targets_shape != true_targets_shape): + raise ValueError('the shape of targets and true_targets are not the same' + '{} != {}'.format(targets_shape, true_targets_shape)) + + if weights is not None: + # Filtering targets and true_targets using weights. + weights_shape = weights.get_shape().as_list() + if np.any(weights_shape != targets_shape[0:2]): + raise ValueError('The first two elements of weights shape should match' + 'target. {} != {}'.format(weights_shape, + targets_shape)) + true_targets = tf.boolean_mask(true_targets, weights) + targets = tf.boolean_mask(targets, weights) + + return tf.losses.mean_squared_error(tf.reshape(targets, [-1]), + tf.reshape(true_targets, [-1])) + + def reward(self, obs, done, info): + raise NotImplementedError('reward is not implemented for this task') diff --git a/models/research/cognitive_planning/train_supervised_active_vision.py b/models/research/cognitive_planning/train_supervised_active_vision.py new file mode 100644 index 0000000000000000000000000000000000000000..5931a24e15b2402e07e774c1e78b03374cddfdce --- /dev/null +++ b/models/research/cognitive_planning/train_supervised_active_vision.py @@ -0,0 +1,503 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# pylint: disable=line-too-long +# pyformat: disable +"""Train and eval for supervised navigation training. + +For training: +python train_supervised_active_vision.py \ + --mode='train' \ + --logdir=$logdir/checkin_log_det/ \ + --modality_types='det' \ + --batch_size=8 \ + --train_iters=200000 \ + --lstm_cell_size=2048 \ + --policy_fc_size=2048 \ + --sequence_length=20 \ + --max_eval_episode_length=100 \ + --test_iters=194 \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root="$datadir"' \ + --logtostderr + +For testing: +python train_supervised_active_vision.py + --mode='eval' \ + --logdir=$logdir/checkin_log_det/ \ + --modality_types='det' \ + --batch_size=8 \ + --train_iters=200000 \ + --lstm_cell_size=2048 \ + --policy_fc_size=2048 \ + --sequence_length=20 \ + --max_eval_episode_length=100 \ + --test_iters=194 \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root="$datadir"' \ + --logtostderr +""" + +import collections +import os +import time +from absl import app +from absl import flags +from absl import logging +import networkx as nx +import numpy as np +import tensorflow as tf +import gin +import embedders +import policies +import tasks +from envs import active_vision_dataset_env +from envs import task_env + +slim = tf.contrib.slim + +flags.DEFINE_string('logdir', '', + 'Path to a directory to write summaries and checkpoints') +# Parameters controlling the training setup. In general one would not need to +# modify them. +flags.DEFINE_string('master', 'local', + 'BNS name of the TensorFlow master, or local.') +flags.DEFINE_integer('task_id', 0, + 'Task id of the replica running the training.') +flags.DEFINE_integer('ps_tasks', 0, + 'Number of tasks in the ps job. If 0 no ps job is used.') + +flags.DEFINE_integer('decay_steps', 1000, + 'Number of steps for exponential decay.') +flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.') +flags.DEFINE_integer('batch_size', 8, 'Batch size.') +flags.DEFINE_integer('sequence_length', 20, 'sequence length') +flags.DEFINE_integer('train_iters', 200000, 'number of training iterations.') +flags.DEFINE_integer('save_summaries_secs', 300, + 'number of seconds between saving summaries') +flags.DEFINE_integer('save_interval_secs', 300, + 'numer of seconds between saving variables') +flags.DEFINE_integer('log_every_n_steps', 20, 'number of steps between logging') +flags.DEFINE_string('modality_types', '', + 'modality names in _ separated format') +flags.DEFINE_string('conv_window_sizes', '8_4_3', + 'conv window size in separated by _') +flags.DEFINE_string('conv_strides', '4_2_1', '') +flags.DEFINE_string('conv_channels', '8_16_16', '') +flags.DEFINE_integer('embedding_fc_size', 128, + 'size of embedding for each modality') +flags.DEFINE_integer('obs_resolution', 64, + 'resolution of the input observations') +flags.DEFINE_integer('lstm_cell_size', 2048, 'size of lstm cell size') +flags.DEFINE_integer('policy_fc_size', 2048, + 'size of fully connected layers for policy part') +flags.DEFINE_float('weight_decay', 0.0002, 'weight decay') +flags.DEFINE_integer('goal_category_count', 5, 'number of goal categories') +flags.DEFINE_integer('action_size', 7, 'number of possible actions') +flags.DEFINE_integer('max_eval_episode_length', 100, + 'maximum sequence length for evaluation.') +flags.DEFINE_enum('mode', 'train', ['train', 'eval'], + 'indicates whether it is in training or evaluation') +flags.DEFINE_integer('test_iters', 194, + 'number of iterations that the eval needs to be run') +flags.DEFINE_multi_string('gin_config', [], + 'List of paths to a gin config files for the env.') +flags.DEFINE_multi_string('gin_params', [], + 'Newline separated list of Gin parameter bindings.') +flags.DEFINE_string( + 'resnet50_path', './resnet_v2_50_checkpoint/resnet_v2_50.ckpt', 'path to resnet50' + 'checkpoint') +flags.DEFINE_bool('freeze_resnet_weights', True, '') +flags.DEFINE_string( + 'eval_init_points_file_name', '', + 'Name of the file that containts the initial locations and' + 'worlds for each evalution point') + +FLAGS = flags.FLAGS +TRAIN_WORLDS = [ + 'Home_001_1', 'Home_001_2', 'Home_002_1', 'Home_003_1', 'Home_003_2', + 'Home_004_1', 'Home_004_2', 'Home_005_1', 'Home_005_2', 'Home_006_1', + 'Home_010_1' +] + +TEST_WORLDS = ['Home_011_1', 'Home_013_1', 'Home_016_1'] + + +def create_modality_types(): + """Parses the modality_types and returns a list of task_env.ModalityType.""" + if not FLAGS.modality_types: + raise ValueError('there needs to be at least one modality type') + modality_types = FLAGS.modality_types.split('_') + for x in modality_types: + if x not in ['image', 'sseg', 'det', 'depth']: + raise ValueError('invalid modality type: {}'.format(x)) + + conversion_dict = { + 'image': task_env.ModalityTypes.IMAGE, + 'sseg': task_env.ModalityTypes.SEMANTIC_SEGMENTATION, + 'depth': task_env.ModalityTypes.DEPTH, + 'det': task_env.ModalityTypes.OBJECT_DETECTION, + } + return [conversion_dict[k] for k in modality_types] + + +def create_task_io_config( + modality_types, + goal_category_count, + action_size, + sequence_length, +): + """Generates task io config.""" + shape_prefix = [sequence_length, FLAGS.obs_resolution, FLAGS.obs_resolution] + shapes = { + task_env.ModalityTypes.IMAGE: [sequence_length, 224, 224, 3], + task_env.ModalityTypes.DEPTH: shape_prefix + [ + 2, + ], + task_env.ModalityTypes.SEMANTIC_SEGMENTATION: shape_prefix + [ + 1, + ], + task_env.ModalityTypes.OBJECT_DETECTION: shape_prefix + [ + 90, + ] + } + types = {k: tf.float32 for k in shapes} + types[task_env.ModalityTypes.IMAGE] = tf.uint8 + inputs = collections.OrderedDict( + [[mtype, (types[mtype], shapes[mtype])] for mtype in modality_types]) + inputs[task_env.ModalityTypes.GOAL] = (tf.float32, + [sequence_length, goal_category_count]) + inputs[task_env.ModalityTypes.PREV_ACTION] = (tf.float32, [ + sequence_length, action_size + 1 + ]) + print inputs + return tasks.UnrolledTaskIOConfig( + inputs=inputs, + output=(tf.float32, [sequence_length, action_size]), + query=None) + + +def map_to_embedder(modality_type): + """Maps modality_type to its corresponding embedder.""" + if modality_type == task_env.ModalityTypes.PREV_ACTION: + return None + if modality_type == task_env.ModalityTypes.GOAL: + return embedders.IdentityEmbedder() + if modality_type == task_env.ModalityTypes.IMAGE: + return embedders.ResNet50Embedder() + conv_window_sizes = [int(x) for x in FLAGS.conv_window_sizes.split('_')] + conv_channels = [int(x) for x in FLAGS.conv_channels.split('_')] + conv_strides = [int(x) for x in FLAGS.conv_strides.split('_')] + params = tf.contrib.training.HParams( + to_one_hot=modality_type == task_env.ModalityTypes.SEMANTIC_SEGMENTATION, + one_hot_length=10, + conv_sizes=conv_window_sizes, + conv_strides=conv_strides, + conv_channels=conv_channels, + embedding_size=FLAGS.embedding_fc_size, + weight_decay_rate=FLAGS.weight_decay, + ) + return embedders.SmallNetworkEmbedder(params) + + +def create_train_and_init_ops(policy, task): + """Creates training ops given the arguments. + + Args: + policy: the policy for the task. + task: the task instance. + + Returns: + train_op: the op that needs to be runned at each step. + summaries_op: the summary op that is executed. + init_fn: the op that initializes the variables if there is no previous + checkpoint. If Resnet50 is not used in the model it is None, otherwise + it reads the weights from FLAGS.resnet50_path and sets the init_fn + to the op that initializes the ResNet50 with the pre-trained weights. + """ + assert isinstance(task, tasks.GotoStaticXNoExplorationTask) + assert isinstance(policy, policies.Policy) + + inputs, _, gt_outputs, masks = task.tf_episode_batch(FLAGS.batch_size) + outputs, _ = policy.build(inputs, None) + loss = task.target_loss(gt_outputs, outputs, masks) + + init_fn = None + + # If resnet is added to the graph, init_fn should initialize resnet weights + # if there is no previous checkpoint. + variables_assign_dict = {} + vars_list = [] + for v in slim.get_model_variables(): + if v.name.find('resnet') >= 0: + if not FLAGS.freeze_resnet_weights: + vars_list.append(v) + variables_assign_dict[v.name[v.name.find('resnet'):-2]] = v + else: + vars_list.append(v) + + global_step = tf.train.get_or_create_global_step() + learning_rate = tf.train.exponential_decay( + FLAGS.learning_rate, + global_step, + decay_steps=FLAGS.decay_steps, + decay_rate=0.98, + staircase=True) + optimizer = tf.train.AdamOptimizer(learning_rate) + train_op = slim.learning.create_train_op( + loss, + optimizer, + global_step=global_step, + variables_to_train=vars_list, + ) + + if variables_assign_dict: + init_fn = slim.assign_from_checkpoint_fn( + FLAGS.resnet50_path, + variables_assign_dict, + ignore_missing_vars=False) + scalar_summaries = {} + scalar_summaries['LR'] = learning_rate + scalar_summaries['loss'] = loss + + for name, summary in scalar_summaries.iteritems(): + tf.summary.scalar(name, summary) + + return train_op, init_fn + + +def create_eval_ops(policy, config, possible_targets): + """Creates the necessary ops for evaluation.""" + inputs_feed = collections.OrderedDict([[ + mtype, + tf.placeholder(config.inputs[mtype].type, + [1] + config.inputs[mtype].shape) + ] for mtype in config.inputs]) + inputs_feed[task_env.ModalityTypes.PREV_ACTION] = tf.placeholder( + tf.float32, [1, 1] + [ + config.output.shape[-1] + 1, + ]) + prev_state_feed = [ + tf.placeholder( + tf.float32, [1, FLAGS.lstm_cell_size], name='prev_state_{}'.format(i)) + for i in range(2) + ] + policy_outputs = policy.build(inputs_feed, prev_state_feed) + summary_feed = {} + for c in possible_targets + ['mean']: + summary_feed[c] = tf.placeholder( + tf.float32, [], name='eval_in_range_{}_input'.format(c)) + tf.summary.scalar('eval_in_range_{}'.format(c), summary_feed[c]) + + return inputs_feed, prev_state_feed, policy_outputs, (tf.summary.merge_all(), + summary_feed) + + +def unroll_policy_for_eval( + sess, + env, + inputs_feed, + prev_state_feed, + policy_outputs, + number_of_steps, + output_folder, +): + """unrolls the policy for testing. + + Args: + sess: tf.Session + env: The environment. + inputs_feed: dictionary of placeholder for the input modalities. + prev_state_feed: placeholder for the input to the prev_state of the model. + policy_outputs: tensor that contains outputs of the policy. + number_of_steps: maximum number of unrolling steps. + output_folder: output_folder where the function writes a dictionary of + detailed information about the path. The dictionary keys are 'states' and + 'distance'. The value for 'states' is the list of states that the agent + goes along the path. The value for 'distance' contains the length of + shortest path to the goal at each step. + + Returns: + states: list of states along the path. + distance: list of distances along the path. + """ + prev_state = [ + np.zeros((1, FLAGS.lstm_cell_size), dtype=np.float32) for _ in range(2) + ] + prev_action = np.zeros((1, 1, FLAGS.action_size + 1), dtype=np.float32) + obs = env.reset() + distances_to_goal = [] + states = [] + unique_id = '{}_{}'.format(env.cur_image_id(), env.goal_string) + for _ in range(number_of_steps): + distances_to_goal.append( + np.min([ + len( + nx.shortest_path(env.graph, env.pose_to_vertex(env.state()), + env.pose_to_vertex(target_view))) + for target_view in env.targets() + ])) + states.append(env.state()) + feed_dict = {inputs_feed[mtype]: [[obs[mtype]]] for mtype in inputs_feed} + feed_dict[prev_state_feed[0]] = prev_state[0] + feed_dict[prev_state_feed[1]] = prev_state[1] + action_values, prev_state = sess.run(policy_outputs, feed_dict=feed_dict) + chosen_action = np.argmax(action_values[0]) + obs, _, done, info = env.step(np.int32(chosen_action)) + prev_action[0][0][chosen_action] = 1. + prev_action[0][0][-1] = float(info['success']) + # If the agent chooses action stop or the number of steps exceeeded + # env._episode_length. + if done: + break + + # logging.info('distance = %d, id = %s, #steps = %d', distances_to_goal[-1], + output_path = os.path.join(output_folder, unique_id + '.npy') + with tf.gfile.Open(output_path, 'w') as f: + print 'saving path information to {}'.format(output_path) + np.save(f, {'states': states, 'distance': distances_to_goal}) + return states, distances_to_goal + + +def init(sequence_length, eval_init_points_file_name, worlds): + """Initializes the common operations between train and test.""" + modality_types = create_modality_types() + logging.info('modality types: %r', modality_types) + # negative reward_goal_range prevents the env from terminating early when the + # agent is close to the goal. The policy should keep the agent until the end + # of the 100 steps either through chosing stop action or oscilating around + # the target. + + env = active_vision_dataset_env.ActiveVisionDatasetEnv( + modality_types=modality_types + + [task_env.ModalityTypes.GOAL, task_env.ModalityTypes.PREV_ACTION], + reward_goal_range=-1, + eval_init_points_file_name=eval_init_points_file_name, + worlds=worlds, + output_size=FLAGS.obs_resolution, + ) + + config = create_task_io_config( + modality_types=modality_types, + goal_category_count=FLAGS.goal_category_count, + action_size=FLAGS.action_size, + sequence_length=sequence_length, + ) + task = tasks.GotoStaticXNoExplorationTask(env=env, config=config) + embedders_dict = {mtype: map_to_embedder(mtype) for mtype in config.inputs} + policy_params = tf.contrib.training.HParams( + lstm_state_size=FLAGS.lstm_cell_size, + fc_channels=FLAGS.policy_fc_size, + weight_decay=FLAGS.weight_decay, + target_embedding_size=FLAGS.embedding_fc_size, + ) + policy = policies.LSTMPolicy( + modality_names=config.inputs.keys(), + embedders_dict=embedders_dict, + action_size=FLAGS.action_size, + params=policy_params, + max_episode_length=sequence_length) + return env, config, task, policy + + +def test(): + """Contains all the operations for testing policies.""" + env, config, _, policy = init(1, 'all_init_configs', TEST_WORLDS) + inputs_feed, prev_state_feed, policy_outputs, summary_op = create_eval_ops( + policy, config, env.possible_targets) + sv = tf.train.Supervisor(logdir=FLAGS.logdir) + prev_checkpoint = None + with sv.managed_session( + start_standard_services=False, + config=tf.ConfigProto(allow_soft_placement=True)) as sess: + while not sv.should_stop(): + while True: + new_checkpoint = tf.train.latest_checkpoint(FLAGS.logdir) + print 'new_checkpoint ', new_checkpoint + if not new_checkpoint: + time.sleep(1) + continue + if prev_checkpoint is None: + prev_checkpoint = new_checkpoint + break + if prev_checkpoint != new_checkpoint: + prev_checkpoint = new_checkpoint + break + else: # if prev_checkpoint == new_checkpoint, we have to wait more. + time.sleep(1) + + checkpoint_step = int(new_checkpoint[new_checkpoint.rfind('-') + 1:]) + sv.saver.restore(sess, new_checkpoint) + print '--------------------' + print 'evaluating checkpoint {}'.format(new_checkpoint) + folder_path = os.path.join(FLAGS.logdir, 'evals', str(checkpoint_step)) + if not tf.gfile.Exists(folder_path): + tf.gfile.MakeDirs(folder_path) + eval_stats = {c: [] for c in env.possible_targets} + for test_iter in range(FLAGS.test_iters): + print 'evaluating {} of {}'.format(test_iter, FLAGS.test_iters) + _, distance_to_goal = unroll_policy_for_eval( + sess, + env, + inputs_feed, + prev_state_feed, + policy_outputs, + FLAGS.max_eval_episode_length, + folder_path, + ) + print 'goal = {}'.format(env.goal_string) + eval_stats[env.goal_string].append(float(distance_to_goal[-1] <= 7)) + eval_stats = {k: np.mean(v) for k, v in eval_stats.iteritems()} + eval_stats['mean'] = np.mean(eval_stats.values()) + print eval_stats + feed_dict = {summary_op[1][c]: eval_stats[c] for c in eval_stats} + summary_str = sess.run(summary_op[0], feed_dict=feed_dict) + writer = sv.summary_writer + writer.add_summary(summary_str, checkpoint_step) + writer.flush() + + +def train(): + _, _, task, policy = init(FLAGS.sequence_length, None, TRAIN_WORLDS) + print(FLAGS.save_summaries_secs) + print(FLAGS.save_interval_secs) + print(FLAGS.logdir) + + with tf.device( + tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks, merge_devices=True)): + train_op, init_fn = create_train_and_init_ops(policy=policy, task=task) + print(FLAGS.logdir) + slim.learning.train( + train_op=train_op, + init_fn=init_fn, + logdir=FLAGS.logdir, + is_chief=FLAGS.task_id == 0, + number_of_steps=FLAGS.train_iters, + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs, + session_config=tf.ConfigProto(allow_soft_placement=True), + ) + + +def main(_): + gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params) + if FLAGS.mode == 'train': + train() + else: + test() + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/cognitive_planning/train_supervised_active_vision.sh b/models/research/cognitive_planning/train_supervised_active_vision.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2ea22753443cce89bf1e78a3d8fae920e5e7266 --- /dev/null +++ b/models/research/cognitive_planning/train_supervised_active_vision.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# blaze build -c opt train_supervised_active_vision +# bazel build -c opt --config=cuda --copt=-mavx train_supervised_active_vision && \ +bazel-bin/research/cognitive_planning/train_supervised_active_vision \ + --mode='train' \ + --logdir=/usr/local/google/home/kosecka/local_avd_train/ \ + --modality_types='det' \ + --batch_size=8 \ + --train_iters=200000 \ + --lstm_cell_size=2048 \ + --policy_fc_size=2048 \ + --sequence_length=20 \ + --max_eval_episode_length=100 \ + --test_iters=194 \ + --gin_config=envs/configs/active_vision_config.gin \ + --gin_params='ActiveVisionDatasetEnv.dataset_root="/cns/jn-d/home/kosecka/AVD_Minimal/"' \ + --logtostderr diff --git a/models/research/cognitive_planning/visualization_utils.py b/models/research/cognitive_planning/visualization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7a7aeb50561dba9f8713d12a184ddd824c3c0e19 --- /dev/null +++ b/models/research/cognitive_planning/visualization_utils.py @@ -0,0 +1,733 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A set of functions that are used for visualization. + +These functions often receive an image, perform some visualization on the image. +The functions do not return a value, instead they modify the image itself. + +""" +import collections +import functools +# Set headless-friendly backend. +import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements +import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top +import numpy as np +import PIL.Image as Image +import PIL.ImageColor as ImageColor +import PIL.ImageDraw as ImageDraw +import PIL.ImageFont as ImageFont +import six +import tensorflow as tf + +import standard_fields as fields + + +_TITLE_LEFT_MARGIN = 10 +_TITLE_TOP_MARGIN = 10 +STANDARD_COLORS = [ + 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque', + 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite', + 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', + 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange', + 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet', + 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite', + 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod', + 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki', + 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue', + 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey', + 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', + 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime', + 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid', + 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen', + 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin', + 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed', + 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed', + 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple', + 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown', + 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue', + 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow', + 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', + 'WhiteSmoke', 'Yellow', 'YellowGreen' +] + + +def save_image_array_as_png(image, output_path): + """Saves an image (represented as a numpy array) to PNG. + + Args: + image: a numpy array with shape [height, width, 3]. + output_path: path to which image should be written. + """ + image_pil = Image.fromarray(np.uint8(image)).convert('RGB') + with tf.gfile.Open(output_path, 'w') as fid: + image_pil.save(fid, 'PNG') + + +def encode_image_array_as_png_str(image): + """Encodes a numpy array into a PNG string. + + Args: + image: a numpy array with shape [height, width, 3]. + + Returns: + PNG encoded image string. + """ + image_pil = Image.fromarray(np.uint8(image)) + output = six.BytesIO() + image_pil.save(output, format='PNG') + png_string = output.getvalue() + output.close() + return png_string + + +def draw_bounding_box_on_image_array(image, + ymin, + xmin, + ymax, + xmax, + color='red', + thickness=4, + display_str_list=(), + use_normalized_coordinates=True): + """Adds a bounding box to an image (numpy array). + + Bounding box coordinates can be specified in either absolute (pixel) or + normalized coordinates by setting the use_normalized_coordinates argument. + + Args: + image: a numpy array with shape [height, width, 3]. + ymin: ymin of bounding box. + xmin: xmin of bounding box. + ymax: ymax of bounding box. + xmax: xmax of bounding box. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list: list of strings to display in box + (each to be shown on its own line). + use_normalized_coordinates: If True (default), treat coordinates + ymin, xmin, ymax, xmax as relative to the image. Otherwise treat + coordinates as absolute. + """ + image_pil = Image.fromarray(np.uint8(image)).convert('RGB') + draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, + thickness, display_str_list, + use_normalized_coordinates) + np.copyto(image, np.array(image_pil)) + + +def draw_bounding_box_on_image(image, + ymin, + xmin, + ymax, + xmax, + color='red', + thickness=4, + display_str_list=(), + use_normalized_coordinates=True): + """Adds a bounding box to an image. + + Bounding box coordinates can be specified in either absolute (pixel) or + normalized coordinates by setting the use_normalized_coordinates argument. + + Each string in display_str_list is displayed on a separate line above the + bounding box in black text on a rectangle filled with the input 'color'. + If the top of the bounding box extends to the edge of the image, the strings + are displayed below the bounding box. + + Args: + image: a PIL.Image object. + ymin: ymin of bounding box. + xmin: xmin of bounding box. + ymax: ymax of bounding box. + xmax: xmax of bounding box. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list: list of strings to display in box + (each to be shown on its own line). + use_normalized_coordinates: If True (default), treat coordinates + ymin, xmin, ymax, xmax as relative to the image. Otherwise treat + coordinates as absolute. + """ + draw = ImageDraw.Draw(image) + im_width, im_height = image.size + if use_normalized_coordinates: + (left, right, top, bottom) = (xmin * im_width, xmax * im_width, + ymin * im_height, ymax * im_height) + else: + (left, right, top, bottom) = (xmin, xmax, ymin, ymax) + draw.line([(left, top), (left, bottom), (right, bottom), + (right, top), (left, top)], width=thickness, fill=color) + try: + font = ImageFont.truetype('arial.ttf', 24) + except IOError: + font = ImageFont.load_default() + + # If the total height of the display strings added to the top of the bounding + # box exceeds the top of the image, stack the strings below the bounding box + # instead of above. + display_str_heights = [font.getsize(ds)[1] for ds in display_str_list] + # Each display_str has a top and bottom margin of 0.05x. + total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights) + + if top > total_display_str_height: + text_bottom = top + else: + text_bottom = bottom + total_display_str_height + # Reverse list and print from bottom to top. + for display_str in display_str_list[::-1]: + text_width, text_height = font.getsize(display_str) + margin = np.ceil(0.05 * text_height) + draw.rectangle( + [(left, text_bottom - text_height - 2 * margin), (left + text_width, + text_bottom)], + fill=color) + draw.text( + (left + margin, text_bottom - text_height - margin), + display_str, + fill='black', + font=font) + text_bottom -= text_height - 2 * margin + + +def draw_bounding_boxes_on_image_array(image, + boxes, + color='red', + thickness=4, + display_str_list_list=()): + """Draws bounding boxes on image (numpy array). + + Args: + image: a numpy array object. + boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). + The coordinates are in normalized format between [0, 1]. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list_list: list of list of strings. + a list of strings for each bounding box. + The reason to pass a list of strings for a + bounding box is that it might contain + multiple labels. + + Raises: + ValueError: if boxes is not a [N, 4] array + """ + image_pil = Image.fromarray(image) + draw_bounding_boxes_on_image(image_pil, boxes, color, thickness, + display_str_list_list) + np.copyto(image, np.array(image_pil)) + + +def draw_bounding_boxes_on_image(image, + boxes, + color='red', + thickness=4, + display_str_list_list=()): + """Draws bounding boxes on image. + + Args: + image: a PIL.Image object. + boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). + The coordinates are in normalized format between [0, 1]. + color: color to draw bounding box. Default is red. + thickness: line thickness. Default value is 4. + display_str_list_list: list of list of strings. + a list of strings for each bounding box. + The reason to pass a list of strings for a + bounding box is that it might contain + multiple labels. + + Raises: + ValueError: if boxes is not a [N, 4] array + """ + boxes_shape = boxes.shape + if not boxes_shape: + return + if len(boxes_shape) != 2 or boxes_shape[1] != 4: + raise ValueError('Input must be of size [N, 4]') + for i in range(boxes_shape[0]): + display_str_list = () + if display_str_list_list: + display_str_list = display_str_list_list[i] + draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2], + boxes[i, 3], color, thickness, display_str_list) + + +def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, boxes, classes, scores, category_index=category_index, **kwargs) + + +def _visualize_boxes_and_masks(image, boxes, classes, scores, masks, + category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index=category_index, + instance_masks=masks, + **kwargs) + + +def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints, + category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index=category_index, + keypoints=keypoints, + **kwargs) + + +def _visualize_boxes_and_masks_and_keypoints( + image, boxes, classes, scores, masks, keypoints, category_index, **kwargs): + return visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index=category_index, + instance_masks=masks, + keypoints=keypoints, + **kwargs) + + +def draw_bounding_boxes_on_image_tensors(images, + boxes, + classes, + scores, + category_index, + instance_masks=None, + keypoints=None, + max_boxes_to_draw=20, + min_score_thresh=0.2, + use_normalized_coordinates=True): + """Draws bounding boxes, masks, and keypoints on batch of image tensors. + + Args: + images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional + channels will be ignored. + boxes: [N, max_detections, 4] float32 tensor of detection boxes. + classes: [N, max_detections] int tensor of detection classes. Note that + classes are 1-indexed. + scores: [N, max_detections] float32 tensor of detection scores. + category_index: a dict that maps integer ids to category dicts. e.g. + {1: {1: 'dog'}, 2: {2: 'cat'}, ...} + instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with + instance masks. + keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2] + with keypoints. + max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20. + min_score_thresh: Minimum score threshold for visualization. Default 0.2. + use_normalized_coordinates: Whether to assume boxes and kepoints are in + normalized coordinates (as opposed to absolute coordiantes). + Default is True. + + Returns: + 4D image tensor of type uint8, with boxes drawn on top. + """ + # Additional channels are being ignored. + images = images[:, :, :, 0:3] + visualization_keyword_args = { + 'use_normalized_coordinates': use_normalized_coordinates, + 'max_boxes_to_draw': max_boxes_to_draw, + 'min_score_thresh': min_score_thresh, + 'agnostic_mode': False, + 'line_thickness': 4 + } + + if instance_masks is not None and keypoints is None: + visualize_boxes_fn = functools.partial( + _visualize_boxes_and_masks, + category_index=category_index, + **visualization_keyword_args) + elems = [images, boxes, classes, scores, instance_masks] + elif instance_masks is None and keypoints is not None: + visualize_boxes_fn = functools.partial( + _visualize_boxes_and_keypoints, + category_index=category_index, + **visualization_keyword_args) + elems = [images, boxes, classes, scores, keypoints] + elif instance_masks is not None and keypoints is not None: + visualize_boxes_fn = functools.partial( + _visualize_boxes_and_masks_and_keypoints, + category_index=category_index, + **visualization_keyword_args) + elems = [images, boxes, classes, scores, instance_masks, keypoints] + else: + visualize_boxes_fn = functools.partial( + _visualize_boxes, + category_index=category_index, + **visualization_keyword_args) + elems = [images, boxes, classes, scores] + + def draw_boxes(image_and_detections): + """Draws boxes on image.""" + image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections, + tf.uint8) + return image_with_boxes + + images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False) + return images + + +def draw_side_by_side_evaluation_image(eval_dict, + category_index, + max_boxes_to_draw=20, + min_score_thresh=0.2, + use_normalized_coordinates=True): + """Creates a side-by-side image with detections and groundtruth. + + Bounding boxes (and instance masks, if available) are visualized on both + subimages. + + Args: + eval_dict: The evaluation dictionary returned by + eval_util.result_dict_for_single_example(). + category_index: A category index (dictionary) produced from a labelmap. + max_boxes_to_draw: The maximum number of boxes to draw for detections. + min_score_thresh: The minimum score threshold for showing detections. + use_normalized_coordinates: Whether to assume boxes and kepoints are in + normalized coordinates (as opposed to absolute coordiantes). + Default is True. + + Returns: + A [1, H, 2 * W, C] uint8 tensor. The subimage on the left corresponds to + detections, while the subimage on the right corresponds to groundtruth. + """ + detection_fields = fields.DetectionResultFields() + input_data_fields = fields.InputDataFields() + instance_masks = None + if detection_fields.detection_masks in eval_dict: + instance_masks = tf.cast( + tf.expand_dims(eval_dict[detection_fields.detection_masks], axis=0), + tf.uint8) + keypoints = None + if detection_fields.detection_keypoints in eval_dict: + keypoints = tf.expand_dims( + eval_dict[detection_fields.detection_keypoints], axis=0) + groundtruth_instance_masks = None + if input_data_fields.groundtruth_instance_masks in eval_dict: + groundtruth_instance_masks = tf.cast( + tf.expand_dims( + eval_dict[input_data_fields.groundtruth_instance_masks], axis=0), + tf.uint8) + images_with_detections = draw_bounding_boxes_on_image_tensors( + eval_dict[input_data_fields.original_image], + tf.expand_dims(eval_dict[detection_fields.detection_boxes], axis=0), + tf.expand_dims(eval_dict[detection_fields.detection_classes], axis=0), + tf.expand_dims(eval_dict[detection_fields.detection_scores], axis=0), + category_index, + instance_masks=instance_masks, + keypoints=keypoints, + max_boxes_to_draw=max_boxes_to_draw, + min_score_thresh=min_score_thresh, + use_normalized_coordinates=use_normalized_coordinates) + images_with_groundtruth = draw_bounding_boxes_on_image_tensors( + eval_dict[input_data_fields.original_image], + tf.expand_dims(eval_dict[input_data_fields.groundtruth_boxes], axis=0), + tf.expand_dims(eval_dict[input_data_fields.groundtruth_classes], axis=0), + tf.expand_dims( + tf.ones_like( + eval_dict[input_data_fields.groundtruth_classes], + dtype=tf.float32), + axis=0), + category_index, + instance_masks=groundtruth_instance_masks, + keypoints=None, + max_boxes_to_draw=None, + min_score_thresh=0.0, + use_normalized_coordinates=use_normalized_coordinates) + return tf.concat([images_with_detections, images_with_groundtruth], axis=2) + + +def draw_keypoints_on_image_array(image, + keypoints, + color='red', + radius=2, + use_normalized_coordinates=True): + """Draws keypoints on an image (numpy array). + + Args: + image: a numpy array with shape [height, width, 3]. + keypoints: a numpy array with shape [num_keypoints, 2]. + color: color to draw the keypoints with. Default is red. + radius: keypoint radius. Default value is 2. + use_normalized_coordinates: if True (default), treat keypoint values as + relative to the image. Otherwise treat them as absolute. + """ + image_pil = Image.fromarray(np.uint8(image)).convert('RGB') + draw_keypoints_on_image(image_pil, keypoints, color, radius, + use_normalized_coordinates) + np.copyto(image, np.array(image_pil)) + + +def draw_keypoints_on_image(image, + keypoints, + color='red', + radius=2, + use_normalized_coordinates=True): + """Draws keypoints on an image. + + Args: + image: a PIL.Image object. + keypoints: a numpy array with shape [num_keypoints, 2]. + color: color to draw the keypoints with. Default is red. + radius: keypoint radius. Default value is 2. + use_normalized_coordinates: if True (default), treat keypoint values as + relative to the image. Otherwise treat them as absolute. + """ + draw = ImageDraw.Draw(image) + im_width, im_height = image.size + keypoints_x = [k[1] for k in keypoints] + keypoints_y = [k[0] for k in keypoints] + if use_normalized_coordinates: + keypoints_x = tuple([im_width * x for x in keypoints_x]) + keypoints_y = tuple([im_height * y for y in keypoints_y]) + for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y): + draw.ellipse([(keypoint_x - radius, keypoint_y - radius), + (keypoint_x + radius, keypoint_y + radius)], + outline=color, fill=color) + + +def draw_mask_on_image_array(image, mask, color='red', alpha=0.4): + """Draws mask on an image. + + Args: + image: uint8 numpy array with shape (img_height, img_height, 3) + mask: a uint8 numpy array of shape (img_height, img_height) with + values between either 0 or 1. + color: color to draw the keypoints with. Default is red. + alpha: transparency value between 0 and 1. (default: 0.4) + + Raises: + ValueError: On incorrect data type for image or masks. + """ + if image.dtype != np.uint8: + raise ValueError('`image` not of type np.uint8') + if mask.dtype != np.uint8: + raise ValueError('`mask` not of type np.uint8') + if np.any(np.logical_and(mask != 1, mask != 0)): + raise ValueError('`mask` elements should be in [0, 1]') + if image.shape[:2] != mask.shape: + raise ValueError('The image has spatial dimensions %s but the mask has ' + 'dimensions %s' % (image.shape[:2], mask.shape)) + rgb = ImageColor.getrgb(color) + pil_image = Image.fromarray(image) + + solid_color = np.expand_dims( + np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) + pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') + pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') + pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) + np.copyto(image, np.array(pil_image.convert('RGB'))) + + +def visualize_boxes_and_labels_on_image_array( + image, + boxes, + classes, + scores, + category_index, + instance_masks=None, + instance_boundaries=None, + keypoints=None, + use_normalized_coordinates=False, + max_boxes_to_draw=20, + min_score_thresh=.5, + agnostic_mode=False, + line_thickness=4, + groundtruth_box_visualization_color='black', + skip_scores=False, + skip_labels=False): + """Overlay labeled boxes on an image with formatted scores and label names. + + This function groups boxes that correspond to the same location + and creates a display string for each detection and overlays these + on the image. Note that this function modifies the image in place, and returns + that same image. + + Args: + image: uint8 numpy array with shape (img_height, img_width, 3) + boxes: a numpy array of shape [N, 4] + classes: a numpy array of shape [N]. Note that class indices are 1-based, + and match the keys in the label map. + scores: a numpy array of shape [N] or None. If scores=None, then + this function assumes that the boxes to be plotted are groundtruth + boxes and plot all boxes as black with no classes or scores. + category_index: a dict containing category dictionaries (each holding + category index `id` and category name `name`) keyed by category indices. + instance_masks: a numpy array of shape [N, image_height, image_width] with + values ranging between 0 and 1, can be None. + instance_boundaries: a numpy array of shape [N, image_height, image_width] + with values ranging between 0 and 1, can be None. + keypoints: a numpy array of shape [N, num_keypoints, 2], can + be None + use_normalized_coordinates: whether boxes is to be interpreted as + normalized coordinates or not. + max_boxes_to_draw: maximum number of boxes to visualize. If None, draw + all boxes. + min_score_thresh: minimum score threshold for a box to be visualized + agnostic_mode: boolean (default: False) controlling whether to evaluate in + class-agnostic mode or not. This mode will display scores but ignore + classes. + line_thickness: integer (default: 4) controlling line width of the boxes. + groundtruth_box_visualization_color: box color for visualizing groundtruth + boxes + skip_scores: whether to skip score when drawing a single detection + skip_labels: whether to skip label when drawing a single detection + + Returns: + uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes. + """ + # Create a display string (and color) for every box location, group any boxes + # that correspond to the same location. + box_to_display_str_map = collections.defaultdict(list) + box_to_color_map = collections.defaultdict(str) + box_to_instance_masks_map = {} + box_to_instance_boundaries_map = {} + box_to_keypoints_map = collections.defaultdict(list) + if not max_boxes_to_draw: + max_boxes_to_draw = boxes.shape[0] + for i in range(min(max_boxes_to_draw, boxes.shape[0])): + if scores is None or scores[i] > min_score_thresh: + box = tuple(boxes[i].tolist()) + if instance_masks is not None: + box_to_instance_masks_map[box] = instance_masks[i] + if instance_boundaries is not None: + box_to_instance_boundaries_map[box] = instance_boundaries[i] + if keypoints is not None: + box_to_keypoints_map[box].extend(keypoints[i]) + if scores is None: + box_to_color_map[box] = groundtruth_box_visualization_color + else: + display_str = '' + if not skip_labels: + if not agnostic_mode: + if classes[i] in category_index.keys(): + class_name = category_index[classes[i]]['name'] + else: + class_name = 'N/A' + display_str = str(class_name) + if not skip_scores: + if not display_str: + display_str = '{}%'.format(int(100*scores[i])) + else: + display_str = '{}: {}%'.format(display_str, int(100*scores[i])) + box_to_display_str_map[box].append(display_str) + if agnostic_mode: + box_to_color_map[box] = 'DarkOrange' + else: + box_to_color_map[box] = STANDARD_COLORS[ + classes[i] % len(STANDARD_COLORS)] + + # Draw all boxes onto image. + for box, color in box_to_color_map.items(): + ymin, xmin, ymax, xmax = box + if instance_masks is not None: + draw_mask_on_image_array( + image, + box_to_instance_masks_map[box], + color=color + ) + if instance_boundaries is not None: + draw_mask_on_image_array( + image, + box_to_instance_boundaries_map[box], + color='red', + alpha=1.0 + ) + draw_bounding_box_on_image_array( + image, + ymin, + xmin, + ymax, + xmax, + color=color, + thickness=line_thickness, + display_str_list=box_to_display_str_map[box], + use_normalized_coordinates=use_normalized_coordinates) + if keypoints is not None: + draw_keypoints_on_image_array( + image, + box_to_keypoints_map[box], + color=color, + radius=line_thickness / 2, + use_normalized_coordinates=use_normalized_coordinates) + + return image + + +def add_cdf_image_summary(values, name): + """Adds a tf.summary.image for a CDF plot of the values. + + Normalizes `values` such that they sum to 1, plots the cumulative distribution + function and creates a tf image summary. + + Args: + values: a 1-D float32 tensor containing the values. + name: name for the image summary. + """ + def cdf_plot(values): + """Numpy function to plot CDF.""" + normalized_values = values / np.sum(values) + sorted_values = np.sort(normalized_values) + cumulative_values = np.cumsum(sorted_values) + fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32) + / cumulative_values.size) + fig = plt.figure(frameon=False) + ax = fig.add_subplot('111') + ax.plot(fraction_of_examples, cumulative_values) + ax.set_ylabel('cumulative normalized values') + ax.set_xlabel('fraction of examples') + fig.canvas.draw() + width, height = fig.get_size_inches() * fig.get_dpi() + image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape( + 1, int(height), int(width), 3) + return image + cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8) + tf.summary.image(name, cdf_plot) + + +def add_hist_image_summary(values, bins, name): + """Adds a tf.summary.image for a histogram plot of the values. + + Plots the histogram of values and creates a tf image summary. + + Args: + values: a 1-D float32 tensor containing the values. + bins: bin edges which will be directly passed to np.histogram. + name: name for the image summary. + """ + + def hist_plot(values, bins): + """Numpy function to plot hist.""" + fig = plt.figure(frameon=False) + ax = fig.add_subplot('111') + y, x = np.histogram(values, bins=bins) + ax.plot(x[:-1], y) + ax.set_ylabel('count') + ax.set_xlabel('value') + fig.canvas.draw() + width, height = fig.get_size_inches() * fig.get_dpi() + image = np.fromstring( + fig.canvas.tostring_rgb(), dtype='uint8').reshape( + 1, int(height), int(width), 3) + return image + hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8) + tf.summary.image(name, hist_plot) diff --git a/models/research/cognitive_planning/viz_active_vision_dataset_main.py b/models/research/cognitive_planning/viz_active_vision_dataset_main.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b7deef63e4675fe0c1c05f0c0f4139af9c34de --- /dev/null +++ b/models/research/cognitive_planning/viz_active_vision_dataset_main.py @@ -0,0 +1,379 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Initializes at random location and visualizes the optimal path. + +Different modes of execution: +1) benchmark: It generates benchmark_iter sample trajectory to random goals + and plots the histogram of path lengths. It can be also used to see how fast + it runs. +2) vis: It visualizes the generated paths by image, semantic segmentation, and + so on. +3) human: allows the user to navigate through environment from keyboard input. + +python viz_active_vision_dataset_main -- \ + --mode=benchmark --benchmark_iter=1000 --gin_config=envs/configs/active_vision_config.gin + +python viz_active_vision_dataset_main -- \ + --mode=vis \ + --gin_config=envs/configs/active_vision_config.gin + +python viz_active_vision_dataset_main -- \ + --mode=human \ + --gin_config=envs/configs/active_vision_config.gin + +python viz_active_vision_dataset_main.py --mode=eval --eval_folder=/usr/local/google/home/$USER/checkin_log_det/evals/ --output_folder=/usr/local/google/home/$USER/test_imgs/ --gin_config=envs/configs/active_vision_config.gin + +""" + +import matplotlib +# pylint: disable=g-import-not-at-top +# Need Tk for interactive plots. +matplotlib.use('TkAgg') +import tensorflow as tf +from matplotlib import pyplot as plt +import numpy as np +import os +from pyglib import app +from pyglib import flags +import gin +import cv2 +from envs import active_vision_dataset_env +from envs import task_env + + +VIS_MODE = 'vis' +HUMAN_MODE = 'human' +BENCHMARK_MODE = 'benchmark' +GRAPH_MODE = 'graph' +EVAL_MODE = 'eval' + +flags.DEFINE_enum('mode', VIS_MODE, + [VIS_MODE, HUMAN_MODE, BENCHMARK_MODE, GRAPH_MODE, EVAL_MODE], + 'mode of the execution') +flags.DEFINE_integer('benchmark_iter', 1000, + 'number of iterations for benchmarking') +flags.DEFINE_string('eval_folder', '', 'the path to the eval folder') +flags.DEFINE_string('output_folder', '', + 'the path to which the images and gifs are written') +flags.DEFINE_multi_string('gin_config', [], + 'List of paths to a gin config files for the env.') +flags.DEFINE_multi_string('gin_params', [], + 'Newline separated list of Gin parameter bindings.') + +mt = task_env.ModalityTypes +FLAGS = flags.FLAGS + +def benchmark(env, targets): + """Benchmarks the speed of sequence generation by env. + + Args: + env: environment. + targets: list of target classes. + """ + episode_lengths = {} + all_init_configs = {} + all_actions = dict([(a, 0.) for a in env.actions]) + for i in range(FLAGS.benchmark_iter): + path, actions, _, _ = env.random_step_sequence() + selected_actions = np.argmax(actions, axis=-1) + new_actions = dict([(a, 0.) for a in env.actions]) + for a in selected_actions: + new_actions[env.actions[a]] += 1. / selected_actions.shape[0] + for a in new_actions: + all_actions[a] += new_actions[a] / FLAGS.benchmark_iter + start_image_id, world, goal = env.get_init_config(path) + print world + if world not in all_init_configs: + all_init_configs[world] = set() + all_init_configs[world].add((start_image_id, goal, len(actions))) + if env.goal_index not in episode_lengths: + episode_lengths[env.goal_index] = [] + episode_lengths[env.goal_index].append(len(actions)) + for i, cls in enumerate(episode_lengths): + plt.subplot(231 + i) + plt.hist(episode_lengths[cls]) + plt.title(targets[cls]) + plt.show() + + +def human(env, targets): + """Lets user play around the env manually.""" + string_key_map = { + 'a': 'left', + 'd': 'right', + 'w': 'forward', + 's': 'backward', + 'j': 'rotate_ccw', + 'l': 'rotate_cw', + 'n': 'stop' + } + integer_key_map = { + 'a': env.actions.index('left'), + 'd': env.actions.index('right'), + 'w': env.actions.index('forward'), + 's': env.actions.index('backward'), + 'j': env.actions.index('rotate_ccw'), + 'l': env.actions.index('rotate_cw'), + 'n': env.actions.index('stop') + } + for k in integer_key_map: + integer_key_map[k] = np.int32(integer_key_map[k]) + plt.ion() + for _ in range(20): + obs = env.reset() + steps = -1 + action = None + while True: + print 'distance = ', obs[task_env.ModalityTypes.DISTANCE] + steps += 1 + depth_value = obs[task_env.ModalityTypes.DEPTH][:, :, 0] + depth_mask = obs[task_env.ModalityTypes.DEPTH][:, :, 1] + seg_mask = np.squeeze(obs[task_env.ModalityTypes.SEMANTIC_SEGMENTATION]) + det_mask = np.argmax( + obs[task_env.ModalityTypes.OBJECT_DETECTION], axis=-1) + img = obs[task_env.ModalityTypes.IMAGE] + plt.subplot(231) + plt.title('steps = {}'.format(steps)) + plt.imshow(img.astype(np.uint8)) + plt.subplot(232) + plt.imshow(depth_value) + plt.title('depth value') + plt.subplot(233) + plt.imshow(depth_mask) + plt.title('depth mask') + plt.subplot(234) + plt.imshow(seg_mask) + plt.title('seg') + plt.subplot(235) + plt.imshow(det_mask) + plt.title('det') + plt.subplot(236) + plt.title('goal={}'.format(targets[env.goal_index])) + plt.draw() + while True: + s = raw_input('key = ') + if np.random.rand() > 0.5: + key_map = string_key_map + else: + key_map = integer_key_map + if s in key_map: + action = key_map[s] + break + else: + print 'invalid action' + print 'action = {}'.format(action) + if action == 'stop': + print 'dist to goal: {}'.format(len(env.path_to_goal()) - 2) + break + obs, reward, done, info = env.step(action) + print 'reward = {}, done = {}, success = {}'.format( + reward, done, info['success']) + + +def visualize_random_step_sequence(env): + """Visualizes random sequence of steps.""" + plt.ion() + for _ in range(20): + path, actions, _, step_outputs = env.random_step_sequence(max_len=30) + print 'path = {}'.format(path) + for action, step_output in zip(actions, step_outputs): + obs, _, done, _ = step_output + depth_value = obs[task_env.ModalityTypes.DEPTH][:, :, 0] + depth_mask = obs[task_env.ModalityTypes.DEPTH][:, :, 1] + seg_mask = np.squeeze(obs[task_env.ModalityTypes.SEMANTIC_SEGMENTATION]) + det_mask = np.argmax( + obs[task_env.ModalityTypes.OBJECT_DETECTION], axis=-1) + img = obs[task_env.ModalityTypes.IMAGE] + plt.subplot(231) + plt.imshow(img.astype(np.uint8)) + plt.subplot(232) + plt.imshow(depth_value) + plt.title('depth value') + plt.subplot(233) + plt.imshow(depth_mask) + plt.title('depth mask') + plt.subplot(234) + plt.imshow(seg_mask) + plt.title('seg') + plt.subplot(235) + plt.imshow(det_mask) + plt.title('det') + plt.subplot(236) + print 'action = {}'.format(action) + print 'done = {}'.format(done) + plt.draw() + if raw_input('press \'n\' to go to the next random sequence. Otherwise, ' + 'press any key to continue...') == 'n': + break + + +def visualize(env, input_folder, output_root_folder): + """visualizes images for sequence of steps from the evals folder.""" + def which_env(file_name): + img_name = file_name.split('_')[0][2:5] + env_dict = {'161': 'Home_016_1', '131': 'Home_013_1', '111': 'Home_011_1'} + if img_name in env_dict: + return env_dict[img_name] + else: + raise ValueError('could not resolve env: {} {}'.format( + img_name, file_name)) + + def which_goal(file_name): + return file_name[file_name.find('_')+1:] + + output_images_folder = os.path.join(output_root_folder, 'images') + output_gifs_folder = os.path.join(output_root_folder, 'gifs') + if not tf.gfile.IsDirectory(output_images_folder): + tf.gfile.MakeDirs(output_images_folder) + if not tf.gfile.IsDirectory(output_gifs_folder): + tf.gfile.MakeDirs(output_gifs_folder) + npy_files = [ + os.path.join(input_folder, name) + for name in tf.gfile.ListDirectory(input_folder) + if name.find('npy') >= 0 + ] + for i, npy_file in enumerate(npy_files): + print 'saving images {}/{}'.format(i, len(npy_files)) + pure_name = npy_file[npy_file.rfind('/') + 1:-4] + output_folder = os.path.join(output_images_folder, pure_name) + if not tf.gfile.IsDirectory(output_folder): + tf.gfile.MakeDirs(output_folder) + print '*******' + print pure_name[0:pure_name.find('_')] + env.reset_for_eval(which_env(pure_name), + which_goal(pure_name), + pure_name[0:pure_name.find('_')], + ) + with tf.gfile.Open(npy_file) as h: + states = np.load(h).item()['states'] + images = [ + env.observation(state)[mt.IMAGE] for state in states + ] + for j, img in enumerate(images): + cv2.imwrite(os.path.join(output_folder, '{0:03d}'.format(j) + '.jpg'), + img[:, :, ::-1]) + print 'converting to gif' + os.system( + 'convert -set delay 20 -colors 256 -dispose 1 {}/*.jpg {}.gif'.format( + output_folder, + os.path.join(output_gifs_folder, pure_name + '.gif') + ) + ) + +def evaluate_folder(env, folder_path): + """Evaluates the performance from the evals folder.""" + targets = ['fridge', 'dining_table', 'microwave', 'tv', 'couch'] + + def compute_acc(npy_file): + with tf.gfile.Open(npy_file) as h: + data = np.load(h).item() + if npy_file.find('dining_table') >= 0: + category = 'dining_table' + else: + category = npy_file[npy_file.rfind('_') + 1:-4] + return category, data['distance'][-1] - 2 + + def evaluate_iteration(folder): + """Evaluates the data from the folder of certain eval iteration.""" + print folder + npy_files = [ + os.path.join(folder, name) + for name in tf.gfile.ListDirectory(folder) + if name.find('npy') >= 0 + ] + eval_stats = {c: [] for c in targets} + for npy_file in npy_files: + try: + category, dist = compute_acc(npy_file) + except: # pylint: disable=bare-except + continue + eval_stats[category].append(float(dist <= 5)) + for c in eval_stats: + if not eval_stats[c]: + print 'incomplete eval {}: empty class {}'.format(folder_path, c) + return None + eval_stats[c] = np.mean(eval_stats[c]) + + eval_stats['mean'] = np.mean(eval_stats.values()) + return eval_stats + + checkpoint_folders = [ + folder_path + x + for x in tf.gfile.ListDirectory(folder_path) + if tf.gfile.IsDirectory(folder_path + x) + ] + + print '{} folders found'.format(len(checkpoint_folders)) + print '------------------------' + all_iters = [] + all_accs = [] + for i, folder in enumerate(checkpoint_folders): + print 'processing {}/{}'.format(i, len(checkpoint_folders)) + eval_stats = evaluate_iteration(folder) + if eval_stats is None: + continue + else: + iter_no = int(folder[folder.rfind('/') + 1:]) + print 'result ', iter_no, eval_stats['mean'] + all_accs.append(eval_stats['mean']) + all_iters.append(iter_no) + + all_accs = np.asarray(all_accs) + all_iters = np.asarray(all_iters) + idx = np.argmax(all_accs) + print 'best result at iteration {} was {}'.format(all_iters[idx], + all_accs[idx]) + order = np.argsort(all_iters) + all_iters = all_iters[order] + all_accs = all_accs[order] + #plt.plot(all_iters, all_accs) + #plt.show() + #print 'done plotting' + + best_iteration_folder = os.path.join(folder_path, str(all_iters[idx])) + + print 'generating gifs and images for {}'.format(best_iteration_folder) + visualize(env, best_iteration_folder, FLAGS.output_folder) + + +def main(_): + gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_params) + print('********') + print(FLAGS.mode) + print(FLAGS.gin_config) + print(FLAGS.gin_params) + + env = active_vision_dataset_env.ActiveVisionDatasetEnv(modality_types=[ + task_env.ModalityTypes.IMAGE, + task_env.ModalityTypes.SEMANTIC_SEGMENTATION, + task_env.ModalityTypes.OBJECT_DETECTION, task_env.ModalityTypes.DEPTH, + task_env.ModalityTypes.DISTANCE + ]) + + if FLAGS.mode == BENCHMARK_MODE: + benchmark(env, env.possible_targets) + elif FLAGS.mode == GRAPH_MODE: + for loc in env.worlds: + env.check_scene_graph(loc, 'fridge') + elif FLAGS.mode == HUMAN_MODE: + human(env, env.possible_targets) + elif FLAGS.mode == VIS_MODE: + visualize_random_step_sequence(env) + elif FLAGS.mode == EVAL_MODE: + evaluate_folder(env, FLAGS.eval_folder) + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/compression/README.md b/models/research/compression/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7f431b5eac6805fbecc276783cef2bc6c62068e5 --- /dev/null +++ b/models/research/compression/README.md @@ -0,0 +1,19 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Compression with Neural Networks + +This is a [TensorFlow](http://www.tensorflow.org/) model repo containing +research on compression with neural networks. This repo currently contains +code for the following papers: + +[Full Resolution Image Compression with Recurrent Neural Networks](https://arxiv.org/abs/1608.05148) + +## Organization +[Image Encoder](image_encoder/): Encoding and decoding images into their binary representation. + +[Entropy Coder](entropy_coder/): Lossless compression of the binary representation. + +## Contact Info +Model repository maintained by Nick Johnston ([nmjohn](https://github.com/nmjohn)). diff --git a/models/research/compression/entropy_coder/README.md b/models/research/compression/entropy_coder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..59e889990aab71e12ed13122c9b5a796a048402a --- /dev/null +++ b/models/research/compression/entropy_coder/README.md @@ -0,0 +1,109 @@ +# Neural net based entropy coding + +This is a [TensorFlow](http://www.tensorflow.org/) model for additional +lossless compression of bitstreams generated by neural net based image +encoders as described in +[https://arxiv.org/abs/1703.10114](https://arxiv.org/abs/1703.10114). + +To be more specific, the entropy coder aims at compressing further binary +codes which have a 3D tensor structure with: + +* the first two dimensions of the tensors corresponding to the height and +the width of the binary codes, +* the last dimension being the depth of the codes. The last dimension can be +sliced into N groups of K, where each additional group is used by the image +decoder to add more details to the reconstructed image. + +The code in this directory only contains the underlying code probability model +but does not perform the actual compression using arithmetic coding. +The code probability model is enough to compute the theoretical compression +ratio. + + +## Prerequisites +The only software requirements for running the encoder and decoder is having +Tensorflow installed. + +You will also need to add the top level source directory of the entropy coder +to your `PYTHONPATH`, for example: + +`export PYTHONPATH=${PYTHONPATH}:/tmp/models/compression` + + +## Training the entropy coder + +### Synthetic dataset +If you do not have a training dataset, there is a simple code generative model +that you can use to generate a dataset and play with the entropy coder. +The generative model is located under dataset/gen\_synthetic\_dataset.py. Note +that this simple generative model is not going to give good results on real +images as it is not supposed to be close to the statistics of the binary +representation of encoded images. Consider it as a toy dataset, no more, no +less. + +To generate a synthetic dataset with 20000 samples: + +`mkdir -p /tmp/dataset` + +`python ./dataset/gen_synthetic_dataset.py --dataset_dir=/tmp/dataset/ +--count=20000` + +Note that the generator has not been optimized at all, generating the synthetic +dataset is currently pretty slow. + +### Training + +If you just want to play with the entropy coder trainer, here is the command +line that can be used to train the entropy coder on the synthetic dataset: + +`mkdir -p /tmp/entropy_coder_train` + +`python ./core/entropy_coder_train.py --task=0 +--train_dir=/tmp/entropy_coder_train/ +--model=progressive +--model_config=./configs/synthetic/model_config.json +--train_config=./configs/synthetic/train_config.json +--input_config=./configs/synthetic/input_config.json +` + +Training is configured using 3 files formatted using JSON: + +* One file is used to configure the underlying entropy coder model. + Currently, only the *progressive* model is supported. + This model takes 2 mandatory parameters and an optional one: + * `layer_depth`: the number of bits per layer (a.k.a. iteration). + Background: the image decoder takes each layer to add more detail + to the image. + * `layer_count`: the maximum number of layers that should be supported + by the model. This should be equal or greater than the maximum number + of layers in the input binary codes. + * `coded_layer_count`: This can be used to consider only partial codes, + keeping only the first `coded_layer_count` layers and ignoring the + remaining layers. If left empty, the binary codes are left unchanged. +* One file to configure the training, including the learning rate, ... + The meaning of the parameters are pretty straightforward. Note that this + file is only used during training and is not needed during inference. +* One file to specify the input dataset to use during training. + The dataset is formatted using tf.RecordIO. + + +## Inference: file size after entropy coding. + +### Using a synthetic sample + +Here is the command line to generate a single synthetic sample formatted +in the same way as what is provided by the image encoder: + +`python ./dataset/gen_synthetic_single.py +--sample_filename=/tmp/dataset/sample_0000.npz` + +To actually compute the additional compression ratio using the entropy coder +trained in the previous step: + +`python ./core/entropy_coder_single.py +--model=progressive +--model_config=./configs/synthetic/model_config.json +--input_codes=/tmp/dataset/sample_0000.npz +--checkpoint=/tmp/entropy_coder_train/model.ckpt-209078` + +where the checkpoint number should be adjusted accordingly. diff --git a/models/research/compression/entropy_coder/__init__.py b/models/research/compression/entropy_coder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/compression/entropy_coder/all_models/__init__.py b/models/research/compression/entropy_coder/all_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/compression/entropy_coder/all_models/all_models.py b/models/research/compression/entropy_coder/all_models/all_models.py new file mode 100644 index 0000000000000000000000000000000000000000..e376dac737667a348065eec622920b0a81ed1ac9 --- /dev/null +++ b/models/research/compression/entropy_coder/all_models/all_models.py @@ -0,0 +1,19 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Import and register all the entropy coder models.""" + +# pylint: disable=unused-import +from entropy_coder.progressive import progressive diff --git a/models/research/compression/entropy_coder/all_models/all_models_test.py b/models/research/compression/entropy_coder/all_models/all_models_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b8aff504a0a00d579d1b2768164b78b6c095b235 --- /dev/null +++ b/models/research/compression/entropy_coder/all_models/all_models_test.py @@ -0,0 +1,68 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Basic test of all registered models.""" + +import tensorflow as tf + +# pylint: disable=unused-import +import all_models +# pylint: enable=unused-import +from entropy_coder.model import model_factory + + +class AllModelsTest(tf.test.TestCase): + + def testBuildModelForTraining(self): + factory = model_factory.GetModelRegistry() + model_names = factory.GetAvailableModels() + + for m in model_names: + tf.reset_default_graph() + + global_step = tf.Variable(tf.zeros([], dtype=tf.int64), + trainable=False, + name='global_step') + + optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) + + batch_size = 3 + height = 40 + width = 20 + depth = 5 + binary_codes = tf.placeholder(dtype=tf.float32, + shape=[batch_size, height, width, depth]) + + # Create a model with the default configuration. + print('Creating model: {}'.format(m)) + model = factory.CreateModel(m) + model.Initialize(global_step, + optimizer, + model.GetConfigStringForUnitTest()) + self.assertTrue(model.loss is None, 'model: {}'.format(m)) + self.assertTrue(model.train_op is None, 'model: {}'.format(m)) + self.assertTrue(model.average_code_length is None, 'model: {}'.format(m)) + + # Build the Tensorflow graph corresponding to the model. + model.BuildGraph(binary_codes) + self.assertTrue(model.loss is not None, 'model: {}'.format(m)) + self.assertTrue(model.average_code_length is not None, + 'model: {}'.format(m)) + if model.train_op is None: + print('Model {} is not trainable'.format(m)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/compression/entropy_coder/configs/gru_prime3/model_config.json b/models/research/compression/entropy_coder/configs/gru_prime3/model_config.json new file mode 100644 index 0000000000000000000000000000000000000000..cf63a4c454df5c47c732c5eaeea481b2aa714665 --- /dev/null +++ b/models/research/compression/entropy_coder/configs/gru_prime3/model_config.json @@ -0,0 +1,4 @@ +{ + "layer_count": 16, + "layer_depth": 32 +} diff --git a/models/research/compression/entropy_coder/configs/synthetic/input_config.json b/models/research/compression/entropy_coder/configs/synthetic/input_config.json new file mode 100644 index 0000000000000000000000000000000000000000..18455e65120cd45cb04106ed8b6b2d6641e1d49a --- /dev/null +++ b/models/research/compression/entropy_coder/configs/synthetic/input_config.json @@ -0,0 +1,4 @@ +{ + "data": "/tmp/dataset/synthetic_dataset", + "unique_code_size": true +} diff --git a/models/research/compression/entropy_coder/configs/synthetic/model_config.json b/models/research/compression/entropy_coder/configs/synthetic/model_config.json new file mode 100644 index 0000000000000000000000000000000000000000..c6f1f3e11547a75c05019e24c59a7fc6d2a29e3b --- /dev/null +++ b/models/research/compression/entropy_coder/configs/synthetic/model_config.json @@ -0,0 +1,4 @@ +{ + "layer_depth": 2, + "layer_count": 8 +} diff --git a/models/research/compression/entropy_coder/configs/synthetic/train_config.json b/models/research/compression/entropy_coder/configs/synthetic/train_config.json new file mode 100644 index 0000000000000000000000000000000000000000..79e4909fd3f93df983d79890e25b7b61ba14aa40 --- /dev/null +++ b/models/research/compression/entropy_coder/configs/synthetic/train_config.json @@ -0,0 +1,6 @@ +{ + "batch_size": 4, + "learning_rate": 0.1, + "decay_rate": 0.9, + "samples_per_decay": 20000 +} diff --git a/models/research/compression/entropy_coder/core/code_loader.py b/models/research/compression/entropy_coder/core/code_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..603ab724afb0e6c4e94db9c121d7799eaf30fa02 --- /dev/null +++ b/models/research/compression/entropy_coder/core/code_loader.py @@ -0,0 +1,73 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Load binary codes stored as tf.Example in a TFRecord table.""" + +import tensorflow as tf + + +def ReadFirstCode(dataset): + """Read the first example from a binary code RecordIO table.""" + for record in tf.python_io.tf_record_iterator(dataset): + tf_example = tf.train.Example() + tf_example.ParseFromString(record) + break + return tf_example + + +def LoadBinaryCode(input_config, batch_size): + """Load a batch of binary codes from a tf.Example dataset. + + Args: + input_config: An InputConfig proto containing the input configuration. + batch_size: Output batch size of examples. + + Returns: + A batched tensor of binary codes. + """ + data = input_config.data + + # TODO: Possibly use multiple files (instead of just one). + file_list = [data] + filename_queue = tf.train.string_input_producer(file_list, + capacity=4) + reader = tf.TFRecordReader() + _, values = reader.read(filename_queue) + + serialized_example = tf.reshape(values, shape=[1]) + serialized_features = { + 'code_shape': tf.FixedLenFeature([3], + dtype=tf.int64), + 'code': tf.VarLenFeature(tf.float32), + } + example = tf.parse_example(serialized_example, serialized_features) + + # 3D shape: height x width x binary_code_depth + z = example['code_shape'] + code_shape = tf.reshape(tf.cast(z, tf.int32), [3]) + # Un-flatten the binary codes. + code = tf.reshape(tf.sparse_tensor_to_dense(example['code']), code_shape) + + queue_size = 10 + queue = tf.PaddingFIFOQueue( + queue_size + 3 * batch_size, + dtypes=[code.dtype], + shapes=[[None, None, None]]) + enqueue_op = queue.enqueue([code]) + dequeue_code = queue.dequeue_many(batch_size) + queue_runner = tf.train.queue_runner.QueueRunner(queue, [enqueue_op]) + tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, queue_runner) + + return dequeue_code diff --git a/models/research/compression/entropy_coder/core/config_helper.py b/models/research/compression/entropy_coder/core/config_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..a7d949e329b93f33d330d1ba494f71ae1704fa3f --- /dev/null +++ b/models/research/compression/entropy_coder/core/config_helper.py @@ -0,0 +1,52 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Helper functions used in both train and inference.""" + +import json +import os.path + +import tensorflow as tf + + +def GetConfigString(config_file): + config_string = '' + if config_file is not None: + config_string = open(config_file).read() + return config_string + + +class InputConfig(object): + + def __init__(self, config_string): + config = json.loads(config_string) + self.data = config["data"] + self.unique_code_size = config["unique_code_size"] + + +class TrainConfig(object): + + def __init__(self, config_string): + config = json.loads(config_string) + self.batch_size = config["batch_size"] + self.learning_rate = config["learning_rate"] + self.decay_rate = config["decay_rate"] + self.samples_per_decay = config["samples_per_decay"] + + +def SaveConfig(directory, filename, config_string): + path = os.path.join(directory, filename) + with tf.gfile.Open(path, mode='w') as f: + f.write(config_string) diff --git a/models/research/compression/entropy_coder/core/entropy_coder_single.py b/models/research/compression/entropy_coder/core/entropy_coder_single.py new file mode 100644 index 0000000000000000000000000000000000000000..8a61b488b6bdd11e1cff4a2da672129240eb7240 --- /dev/null +++ b/models/research/compression/entropy_coder/core/entropy_coder_single.py @@ -0,0 +1,116 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Compute the additional compression ratio after entropy coding.""" + +import io +import os + +import numpy as np +import tensorflow as tf + +import config_helper + +# pylint: disable=unused-import +from entropy_coder.all_models import all_models +# pylint: enable=unused-import +from entropy_coder.model import model_factory + + +# Checkpoint used to restore the model parameters. +tf.app.flags.DEFINE_string('checkpoint', None, + """Model checkpoint.""") + +# Model selection and configuration. +tf.app.flags.DEFINE_string('model', None, """Underlying encoder model.""") +tf.app.flags.DEFINE_string('model_config', None, + """Model config protobuf given as text file.""") + +# File holding the binary codes. +tf.flags.DEFINE_string('input_codes', None, 'Location of binary code file.') + +FLAGS = tf.flags.FLAGS + + +def main(_): + if (FLAGS.input_codes is None or FLAGS.model is None): + print ('\nUsage: python entropy_coder_single.py --model=progressive ' + '--model_config=model_config.json' + '--iteration=15\n\n') + return + + #if FLAGS.iteration < -1 or FLAGS.iteration > 15: + # print ('\n--iteration must be between 0 and 15 inclusive, or -1 to infer ' + # 'from file.\n') + # return + #iteration = FLAGS.iteration + + if not tf.gfile.Exists(FLAGS.input_codes): + print('\nInput codes not found.\n') + return + + with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file: + contents = code_file.read() + loaded_codes = np.load(io.BytesIO(contents)) + assert ['codes', 'shape'] not in loaded_codes.files + loaded_shape = loaded_codes['shape'] + loaded_array = loaded_codes['codes'] + + # Unpack and recover code shapes. + unpacked_codes = np.reshape(np.unpackbits(loaded_array) + [:np.prod(loaded_shape)], + loaded_shape) + + numpy_int_codes = unpacked_codes.transpose([1, 2, 3, 0, 4]) + numpy_int_codes = numpy_int_codes.reshape([numpy_int_codes.shape[0], + numpy_int_codes.shape[1], + numpy_int_codes.shape[2], + -1]) + numpy_codes = numpy_int_codes.astype(np.float32) * 2.0 - 1.0 + + with tf.Graph().as_default() as graph: + # TF tensor to hold the binary codes to losslessly compress. + batch_size = 1 + codes = tf.placeholder(tf.float32, shape=numpy_codes.shape) + + # Create the entropy coder model. + global_step = None + optimizer = None + model = model_factory.GetModelRegistry().CreateModel(FLAGS.model) + model_config_string = config_helper.GetConfigString(FLAGS.model_config) + model.Initialize(global_step, optimizer, model_config_string) + model.BuildGraph(codes) + + saver = tf.train.Saver(sharded=True, keep_checkpoint_every_n_hours=12.0) + + with tf.Session(graph=graph) as sess: + # Initialize local variables. + sess.run(tf.local_variables_initializer()) + + # Restore model variables. + saver.restore(sess, FLAGS.checkpoint) + + tf_tensors = { + 'code_length': model.average_code_length + } + feed_dict = {codes: numpy_codes} + np_tensors = sess.run(tf_tensors, feed_dict=feed_dict) + + print('Additional compression ratio: {}'.format( + np_tensors['code_length'])) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/compression/entropy_coder/core/entropy_coder_train.py b/models/research/compression/entropy_coder/core/entropy_coder_train.py new file mode 100644 index 0000000000000000000000000000000000000000..27c489037d27095b578aed6ad10a5a190ec49b18 --- /dev/null +++ b/models/research/compression/entropy_coder/core/entropy_coder_train.py @@ -0,0 +1,184 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Train an entropy coder model.""" + +import time + +import tensorflow as tf + +import code_loader +import config_helper + +# pylint: disable=unused-import +from entropy_coder.all_models import all_models +# pylint: enable=unused-import +from entropy_coder.model import model_factory + + +FLAGS = tf.app.flags.FLAGS + +# Hardware resources configuration. +tf.app.flags.DEFINE_string('master', '', + """Name of the TensorFlow master to use.""") +tf.app.flags.DEFINE_string('train_dir', None, + """Directory where to write event logs.""") +tf.app.flags.DEFINE_integer('task', None, + """Task id of the replica running the training.""") +tf.app.flags.DEFINE_integer('ps_tasks', 0, """Number of tasks in the ps job. + If 0 no ps job is used.""") + +# Model selection and configuration. +tf.app.flags.DEFINE_string('model', None, """Underlying encoder model.""") +tf.app.flags.DEFINE_string('model_config', None, + """Model config protobuf given as text file.""") + +# Training data and parameters configuration. +tf.app.flags.DEFINE_string('input_config', None, + """Path to the training input config file.""") +tf.app.flags.DEFINE_string('train_config', None, + """Path to the training experiment config file.""") + + +def train(): + if FLAGS.train_dir is None: + raise ValueError('Parameter train_dir must be provided') + if FLAGS.task is None: + raise ValueError('Parameter task must be provided') + if FLAGS.model is None: + raise ValueError('Parameter model must be provided') + + input_config_string = config_helper.GetConfigString(FLAGS.input_config) + input_config = config_helper.InputConfig(input_config_string) + + # Training parameters. + train_config_string = config_helper.GetConfigString(FLAGS.train_config) + train_config = config_helper.TrainConfig(train_config_string) + + batch_size = train_config.batch_size + initial_learning_rate = train_config.learning_rate + decay_rate = train_config.decay_rate + samples_per_decay = train_config.samples_per_decay + + # Parameters for learning-rate decay. + # The formula is decay_rate ** floor(steps / decay_steps). + decay_steps = samples_per_decay / batch_size + decay_steps = max(decay_steps, 1) + + first_code = code_loader.ReadFirstCode(input_config.data) + first_code_height = ( + first_code.features.feature['code_shape'].int64_list.value[0]) + first_code_width = ( + first_code.features.feature['code_shape'].int64_list.value[1]) + max_bit_depth = ( + first_code.features.feature['code_shape'].int64_list.value[2]) + print('Maximum code depth: {}'.format(max_bit_depth)) + + with tf.Graph().as_default(): + ps_ops = ["Variable", "VariableV2", "AutoReloadVariable", "VarHandleOp"] + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks, + ps_ops=ps_ops)): + codes = code_loader.LoadBinaryCode( + input_config=input_config, + batch_size=batch_size) + if input_config.unique_code_size: + print('Input code size: {} x {}'.format(first_code_height, + first_code_width)) + codes.set_shape( + [batch_size, first_code_height, first_code_width, max_bit_depth]) + else: + codes.set_shape([batch_size, None, None, max_bit_depth]) + codes_effective_shape = tf.shape(codes) + + global_step = tf.contrib.framework.create_global_step() + + # Apply learning-rate decay. + learning_rate = tf.train.exponential_decay( + learning_rate=initial_learning_rate, + global_step=global_step, + decay_steps=decay_steps, + decay_rate=decay_rate, + staircase=True) + tf.summary.scalar('Learning Rate', learning_rate) + optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, + epsilon=1.0) + + # Create the entropy coder model. + model = model_factory.GetModelRegistry().CreateModel(FLAGS.model) + model_config_string = config_helper.GetConfigString(FLAGS.model_config) + model.Initialize(global_step, optimizer, model_config_string) + model.BuildGraph(codes) + + summary_op = tf.summary.merge_all() + + # Verify that the model can actually be trained. + if model.train_op is None: + raise ValueError('Input model {} is not trainable'.format(FLAGS.model)) + + # We disable the summary thread run by Supervisor class by passing + # summary_op=None. We still pass save_summaries_secs because it is used by + # the global step counter thread. + is_chief = (FLAGS.task == 0) + sv = tf.train.Supervisor(logdir=FLAGS.train_dir, + is_chief=is_chief, + global_step=global_step, + # saver=model.saver, + summary_op=None, + save_summaries_secs=120, + save_model_secs=600, + recovery_wait_secs=30) + + sess = sv.PrepareSession(FLAGS.master) + sv.StartQueueRunners(sess) + + step = sess.run(global_step) + print('Trainer initial step: {}.'.format(step)) + + # Once everything has been setup properly, save the configs. + if is_chief: + config_helper.SaveConfig(FLAGS.train_dir, 'input_config.json', + input_config_string) + config_helper.SaveConfig(FLAGS.train_dir, 'model_config.json', + model_config_string) + config_helper.SaveConfig(FLAGS.train_dir, 'train_config.json', + train_config_string) + + # Train the model. + next_summary_time = time.time() + while not sv.ShouldStop(): + feed_dict = None + + # Once in a while, update the summaries on the chief worker. + if is_chief and next_summary_time < time.time(): + summary_str = sess.run(summary_op, feed_dict=feed_dict) + sv.SummaryComputed(sess, summary_str) + next_summary_time = time.time() + sv.save_summaries_secs + else: + tf_tensors = { + 'train': model.train_op, + 'code_length': model.average_code_length + } + np_tensors = sess.run(tf_tensors, feed_dict=feed_dict) + print(np_tensors['code_length']) + + sv.Stop() + + +def main(argv=None): # pylint: disable=unused-argument + train() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/compression/entropy_coder/dataset/gen_synthetic_dataset.py b/models/research/compression/entropy_coder/dataset/gen_synthetic_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..de60aee324d4a6209d00a873ee681aa59aae0d8e --- /dev/null +++ b/models/research/compression/entropy_coder/dataset/gen_synthetic_dataset.py @@ -0,0 +1,89 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generate a synthetic dataset.""" + +import os + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import synthetic_model + + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string( + 'dataset_dir', None, + """Directory where to write the dataset and the configs.""") +tf.app.flags.DEFINE_integer( + 'count', 1000, + """Number of samples to generate.""") + + +def int64_feature(values): + """Returns a TF-Feature of int64s. + + Args: + values: A scalar or list of values. + + Returns: + A TF-Feature. + """ + if not isinstance(values, (tuple, list)): + values = [values] + return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) + + +def float_feature(values): + """Returns a TF-Feature of floats. + + Args: + values: A scalar of list of values. + + Returns: + A TF-Feature. + """ + if not isinstance(values, (tuple, list)): + values = [values] + return tf.train.Feature(float_list=tf.train.FloatList(value=values)) + + +def AddToTFRecord(code, tfrecord_writer): + example = tf.train.Example(features=tf.train.Features(feature={ + 'code_shape': int64_feature(code.shape), + 'code': float_feature(code.flatten().tolist()), + })) + tfrecord_writer.write(example.SerializeToString()) + + +def GenerateDataset(filename, count, code_shape): + with tf.python_io.TFRecordWriter(filename) as tfrecord_writer: + for _ in xrange(count): + code = synthetic_model.GenerateSingleCode(code_shape) + # Convert {0,1} codes to {-1,+1} codes. + code = 2.0 * code - 1.0 + AddToTFRecord(code, tfrecord_writer) + + +def main(argv=None): # pylint: disable=unused-argument + GenerateDataset(os.path.join(FLAGS.dataset_dir + '/synthetic_dataset'), + FLAGS.count, + [35, 48, 8]) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/compression/entropy_coder/dataset/gen_synthetic_single.py b/models/research/compression/entropy_coder/dataset/gen_synthetic_single.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c3821c38b6a0b95f01ad7ffb283cca4beb34b3 --- /dev/null +++ b/models/research/compression/entropy_coder/dataset/gen_synthetic_single.py @@ -0,0 +1,72 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generate a single synthetic sample.""" + +import io +import os + +import numpy as np +import tensorflow as tf + +import synthetic_model + + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string( + 'sample_filename', None, + """Output file to store the generated binary code.""") + + +def GenerateSample(filename, code_shape, layer_depth): + # {0, +1} binary codes. + # No conversion since the output file is expected to store + # codes using {0, +1} codes (and not {-1, +1}). + code = synthetic_model.GenerateSingleCode(code_shape) + code = np.round(code) + + # Reformat the code so as to be compatible with what is generated + # by the image encoder. + # The image encoder generates a tensor of size: + # iteration_count x batch_size x height x width x iteration_depth. + # Here: batch_size = 1 + if code_shape[-1] % layer_depth != 0: + raise ValueError('Number of layers is not an integer') + height = code_shape[0] + width = code_shape[1] + code = code.reshape([1, height, width, -1, layer_depth]) + code = np.transpose(code, [3, 0, 1, 2, 4]) + + int_codes = code.astype(np.int8) + exported_codes = np.packbits(int_codes.reshape(-1)) + + output = io.BytesIO() + np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes) + with tf.gfile.FastGFile(filename, 'wb') as code_file: + code_file.write(output.getvalue()) + + +def main(argv=None): # pylint: disable=unused-argument + # Note: the height and the width is different from the training dataset. + # The main purpose is to show that the entropy coder model is fully + # convolutional and can be used on any image size. + layer_depth = 2 + GenerateSample(FLAGS.sample_filename, [31, 36, 8], layer_depth) + + +if __name__ == '__main__': + tf.app.run() + diff --git a/models/research/compression/entropy_coder/dataset/synthetic_model.py b/models/research/compression/entropy_coder/dataset/synthetic_model.py new file mode 100644 index 0000000000000000000000000000000000000000..9cccb64a136aba5a623c95e7c2dede2191d2cd62 --- /dev/null +++ b/models/research/compression/entropy_coder/dataset/synthetic_model.py @@ -0,0 +1,75 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Binary code sample generator.""" + +import numpy as np +from six.moves import xrange + + +_CRC_LINE = [ + [0, 1, 0], + [1, 1, 0], + [1, 0, 0] +] + +_CRC_DEPTH = [1, 1, 0, 1] + + +def ComputeLineCrc(code, width, y, x, d): + crc = 0 + for dy in xrange(len(_CRC_LINE)): + i = y - 1 - dy + if i < 0: + continue + for dx in xrange(len(_CRC_LINE[dy])): + j = x - 2 + dx + if j < 0 or j >= width: + continue + crc += 1 if (code[i, j, d] != _CRC_LINE[dy][dx]) else 0 + return crc + + +def ComputeDepthCrc(code, y, x, d): + crc = 0 + for delta in xrange(len(_CRC_DEPTH)): + k = d - 1 - delta + if k < 0: + continue + crc += 1 if (code[y, x, k] != _CRC_DEPTH[delta]) else 0 + return crc + + +def GenerateSingleCode(code_shape): + code = np.zeros(code_shape, dtype=np.int) + + keep_value_proba = 0.8 + + height = code_shape[0] + width = code_shape[1] + depth = code_shape[2] + + for d in xrange(depth): + for y in xrange(height): + for x in xrange(width): + v1 = ComputeLineCrc(code, width, y, x, d) + v2 = ComputeDepthCrc(code, y, x, d) + v = 1 if (v1 + v2 >= 6) else 0 + if np.random.rand() < keep_value_proba: + code[y, x, d] = v + else: + code[y, x, d] = 1 - v + + return code diff --git a/models/research/compression/entropy_coder/lib/__init__.py b/models/research/compression/entropy_coder/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/compression/entropy_coder/lib/block_base.py b/models/research/compression/entropy_coder/lib/block_base.py new file mode 100644 index 0000000000000000000000000000000000000000..615dff82829dbbcab46c7217cd35f6259de01161 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/block_base.py @@ -0,0 +1,258 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base class for Tensorflow building blocks.""" + +import collections +import contextlib +import itertools + +import tensorflow as tf + +_block_stacks = collections.defaultdict(lambda: []) + + +class BlockBase(object): + """Base class for transform wrappers of Tensorflow. + + To implement a Tensorflow transform block, inherit this class. + + 1. To create a variable, use NewVar() method. Do not overload this method! + For example, use as follows. + a_variable = self.NewVar(initial_value) + + 2. All Tensorflow-related code must be done inside 'with self._BlockScope().' + Otherwise, name scoping and block hierarchy will not work. An exception + is _Apply() method, which is already called inside the context manager + by __call__() method. + + 3. Override and implement _Apply() method. This method is called by + __call__() method. + + The users would use blocks like the following. + nn1 = NN(128, bias=Bias(0), act=tf.nn.relu) + y = nn1(x) + + Some things to consider. + + - Use lazy-initialization if possible. That is, initialize at first Apply() + rather than at __init__(). + + Note: if needed, the variables can be created on a specific parameter + server by creating blocks in a scope like: + with g.device(device): + linear = Linear(...) + """ + + def __init__(self, name): + self._variables = [] + self._subblocks = [] + self._called = False + + # Intentionally distinguishing empty string and None. + # If name is an empty string, then do not use name scope. + self.name = name if name is not None else self.__class__.__name__ + self._graph = tf.get_default_graph() + + if self.name: + # Capture the scope string at the init time. + with self._graph.name_scope(self.name) as scope: + self._scope_str = scope + else: + self._scope_str = '' + + # Maintain hierarchy structure of blocks. + self._stack = _block_stacks[self._graph] + if self.__class__ is BlockBase: + # This code is only executed to create the root, which starts in the + # initialized state. + assert not self._stack + self._parent = None + self._called = True # The root is initialized. + return + + # Create a fake root if a root is not already present. + if not self._stack: + self._stack.append(BlockBase('NoOpRoot')) + + self._parent = self._stack[-1] + self._parent._subblocks.append(self) # pylint: disable=protected-access + + def __repr__(self): + return '"{}" ({})'.format(self._scope_str, self.__class__.__name__) + + @contextlib.contextmanager + def _OptionalNameScope(self, scope_str): + if scope_str: + with self._graph.name_scope(scope_str): + yield + else: + yield + + @contextlib.contextmanager + def _BlockScope(self): + """Context manager that handles graph, namescope, and nested blocks.""" + self._stack.append(self) + + try: + with self._graph.as_default(): + with self._OptionalNameScope(self._scope_str): + yield self + finally: # Pop from the stack no matter exception is raised or not. + # The following line is executed when leaving 'with self._BlockScope()' + self._stack.pop() + + def __call__(self, *args, **kwargs): + assert self._stack is _block_stacks[self._graph] + + with self._BlockScope(): + ret = self._Apply(*args, **kwargs) + + self._called = True + return ret + + def _Apply(self, *args, **kwargs): + """Implementation of __call__().""" + raise NotImplementedError() + + # Redirect all variable creation to this single function, so that we can + # switch to better variable creation scheme. + def NewVar(self, value, **kwargs): + """Creates a new variable. + + This function creates a variable, then returns a local copy created by + Identity operation. To get the Variable class object, use LookupRef() + method. + + Note that each time Variable class object is used as an input to an + operation, Tensorflow will create a new Send/Recv pair. This hurts + performance. + + If not for assign operations, use the local copy returned by this method. + + Args: + value: Initialization value of the variable. The shape and the data type + of the variable is determined by this initial value. + **kwargs: Extra named arguments passed to Variable.__init__(). + + Returns: + A local copy of the new variable. + """ + v = tf.Variable(value, **kwargs) + + self._variables.append(v) + return v + + @property + def initialized(self): + """Returns bool if the block is initialized. + + By default, BlockBase assumes that a block is initialized when __call__() + is executed for the first time. If this is an incorrect assumption for some + subclasses, override this property in those subclasses. + + Returns: + True if initialized, False otherwise. + """ + return self._called + + def AssertInitialized(self): + """Asserts initialized property.""" + if not self.initialized: + raise RuntimeError('{} has not been initialized.'.format(self)) + + def VariableList(self): + """Returns the list of all tensorflow variables used inside this block.""" + variables = list(itertools.chain( + itertools.chain.from_iterable( + t.VariableList() for t in self._subblocks), + self._VariableList())) + return variables + + def _VariableList(self): + """Returns the list of all tensorflow variables owned by this block.""" + self.AssertInitialized() + return self._variables + + def CreateWeightLoss(self): + """Returns L2 loss list of (almost) all variables used inside this block. + + When this method needs to be overridden, there are two choices. + + 1. Override CreateWeightLoss() to change the weight loss of all variables + that belong to this block, both directly and indirectly. + 2. Override _CreateWeightLoss() to change the weight loss of all + variables that directly belong to this block but not to the sub-blocks. + + Returns: + A Tensor object or None. + """ + losses = list(itertools.chain( + itertools.chain.from_iterable( + t.CreateWeightLoss() for t in self._subblocks), + self._CreateWeightLoss())) + return losses + + def _CreateWeightLoss(self): + """Returns weight loss list of variables that belong to this block.""" + self.AssertInitialized() + with self._BlockScope(): + return [tf.nn.l2_loss(v) for v in self._variables] + + def CreateUpdateOps(self): + """Creates update operations for this block and its sub-blocks.""" + ops = list(itertools.chain( + itertools.chain.from_iterable( + t.CreateUpdateOps() for t in self._subblocks), + self._CreateUpdateOps())) + return ops + + def _CreateUpdateOps(self): + """Creates update operations for this block.""" + self.AssertInitialized() + return [] + + def MarkAsNonTrainable(self): + """Mark all the variables of this block as non-trainable. + + All the variables owned directly or indirectly (through subblocks) are + marked as non trainable. + + This function along with CheckpointInitOp can be used to load a pretrained + model that consists in only one part of the whole graph. + """ + assert self._called + + all_variables = self.VariableList() + collection = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES) + for v in all_variables: + if v in collection: + collection.remove(v) + + +def CreateWeightLoss(): + """Returns all weight losses from the blocks in the graph.""" + stack = _block_stacks[tf.get_default_graph()] + if not stack: + return [] + return stack[0].CreateWeightLoss() + + +def CreateBlockUpdates(): + """Combines all updates from the blocks in the graph.""" + stack = _block_stacks[tf.get_default_graph()] + if not stack: + return [] + return stack[0].CreateUpdateOps() diff --git a/models/research/compression/entropy_coder/lib/block_util.py b/models/research/compression/entropy_coder/lib/block_util.py new file mode 100644 index 0000000000000000000000000000000000000000..80479cc66df95338aa119ba1216cd213ecfbe08d --- /dev/null +++ b/models/research/compression/entropy_coder/lib/block_util.py @@ -0,0 +1,101 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions for blocks.""" + +from __future__ import division +from __future__ import unicode_literals + +import math + +import numpy as np +import six +import tensorflow as tf + + +class RsqrtInitializer(object): + """Gaussian initializer with standard deviation 1/sqrt(n). + + Note that tf.truncated_normal is used internally. Therefore any random sample + outside two-sigma will be discarded and re-sampled. + """ + + def __init__(self, dims=(0,), **kwargs): + """Creates an initializer. + + Args: + dims: Dimension(s) index to compute standard deviation: + 1.0 / sqrt(product(shape[dims])) + **kwargs: Extra keyword arguments to pass to tf.truncated_normal. + """ + if isinstance(dims, six.integer_types): + self._dims = [dims] + else: + self._dims = dims + self._kwargs = kwargs + + def __call__(self, shape, dtype): + stddev = 1.0 / np.sqrt(np.prod([shape[x] for x in self._dims])) + return tf.truncated_normal( + shape=shape, dtype=dtype, stddev=stddev, **self._kwargs) + + +class RectifierInitializer(object): + """Gaussian initializer with standard deviation sqrt(2/fan_in). + + Note that tf.random_normal is used internally to ensure the expected weight + distribution. This is intended to be used with ReLU activations, specially + in ResNets. + + For details please refer to: + Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet + Classification + """ + + def __init__(self, dims=(0,), scale=2.0, **kwargs): + """Creates an initializer. + + Args: + dims: Dimension(s) index to compute standard deviation: + sqrt(scale / product(shape[dims])) + scale: A constant scaling for the initialization used as + sqrt(scale / product(shape[dims])). + **kwargs: Extra keyword arguments to pass to tf.truncated_normal. + """ + if isinstance(dims, six.integer_types): + self._dims = [dims] + else: + self._dims = dims + self._kwargs = kwargs + self._scale = scale + + def __call__(self, shape, dtype): + stddev = np.sqrt(self._scale / np.prod([shape[x] for x in self._dims])) + return tf.random_normal( + shape=shape, dtype=dtype, stddev=stddev, **self._kwargs) + + +class GaussianInitializer(object): + """Gaussian initializer with a given standard deviation. + + Note that tf.truncated_normal is used internally. Therefore any random sample + outside two-sigma will be discarded and re-sampled. + """ + + def __init__(self, stddev=1.0): + self._stddev = stddev + + def __call__(self, shape, dtype): + return tf.truncated_normal(shape=shape, dtype=dtype, stddev=self._stddev) diff --git a/models/research/compression/entropy_coder/lib/blocks.py b/models/research/compression/entropy_coder/lib/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..002384eb07045f1cad963d217a205ade51ba03b6 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks.py @@ -0,0 +1,24 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from block_base import * +from block_util import * +from blocks_binarizer import * +from blocks_entropy_coding import * +from blocks_lstm import * +from blocks_masked_conv2d import * +from blocks_masked_conv2d_lstm import * +from blocks_operator import * +from blocks_std import * diff --git a/models/research/compression/entropy_coder/lib/blocks_binarizer.py b/models/research/compression/entropy_coder/lib/blocks_binarizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8206731610613af2cf3ec15210fd5b9977f4a916 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_binarizer.py @@ -0,0 +1,35 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Activation and weight binarizer implementations.""" + +import math + +import numpy as np +import tensorflow as tf + + +def ConvertSignCodeToZeroOneCode(x): + """Conversion from codes {-1, +1} to codes {0, 1}.""" + return 0.5 * (x + 1.0) + + +def ConvertZeroOneCodeToSignCode(x): + """Convert from codes {0, 1} to codes {-1, +1}.""" + return 2.0 * x - 1.0 + + +def CheckZeroOneCode(x): + return tf.reduce_all(tf.equal(x * (x - 1.0), 0)) diff --git a/models/research/compression/entropy_coder/lib/blocks_entropy_coding.py b/models/research/compression/entropy_coder/lib/blocks_entropy_coding.py new file mode 100644 index 0000000000000000000000000000000000000000..6ee5d97926c1b50b12cb9853d16caa25ba31e8d7 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_entropy_coding.py @@ -0,0 +1,49 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Set of blocks related to entropy coding.""" + +import math + +import tensorflow as tf + +import block_base + +# pylint does not recognize block_base.BlockBase.__call__(). +# pylint: disable=not-callable + + +class CodeLength(block_base.BlockBase): + """Theoretical bound for a code length given a probability distribution. + """ + + def __init__(self, name=None): + super(CodeLength, self).__init__(name) + + def _Apply(self, c, p): + """Theoretical bound of the coded length given a probability distribution. + + Args: + c: The binary codes. Belong to {0, 1}. + p: The probability of: P(code==+1) + + Returns: + The average code length. + Note: the average code length can be greater than 1 bit (e.g. when + encoding the least likely symbol). + """ + entropy = ((1.0 - c) * tf.log(1.0 - p) + c * tf.log(p)) / (-math.log(2)) + entropy = tf.reduce_mean(entropy) + return entropy diff --git a/models/research/compression/entropy_coder/lib/blocks_entropy_coding_test.py b/models/research/compression/entropy_coder/lib/blocks_entropy_coding_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5209865f5991598ee873ed24a4be572e3f9fc515 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_entropy_coding_test.py @@ -0,0 +1,56 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for basic tensorflow blocks_entropy_coding.""" + +from __future__ import division +from __future__ import unicode_literals + +import math + +import numpy as np +import tensorflow as tf + +import blocks_entropy_coding + + +class BlocksEntropyCodingTest(tf.test.TestCase): + + def testCodeLength(self): + shape = [2, 4] + proba_feed = [[0.65, 0.25, 0.70, 0.10], + [0.28, 0.20, 0.44, 0.54]] + symbol_feed = [[1.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0]] + mean_code_length = - ( + (math.log(0.65) + math.log(0.75) + math.log(0.70) + math.log(0.90) + + math.log(0.72) + math.log(0.80) + math.log(0.56) + math.log(0.54)) / + math.log(2.0)) / (shape[0] * shape[1]) + + symbol = tf.placeholder(dtype=tf.float32, shape=shape) + proba = tf.placeholder(dtype=tf.float32, shape=shape) + code_length_calculator = blocks_entropy_coding.CodeLength() + code_length = code_length_calculator(symbol, proba) + + with self.test_session(): + tf.global_variables_initializer().run() + code_length_eval = code_length.eval( + feed_dict={symbol: symbol_feed, proba: proba_feed}) + + self.assertAllClose(mean_code_length, code_length_eval) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/compression/entropy_coder/lib/blocks_lstm.py b/models/research/compression/entropy_coder/lib/blocks_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..6e474e3e3fcb6eeb3f18daf320e21a3acc88a2bf --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_lstm.py @@ -0,0 +1,263 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Blocks of LSTM and its variants.""" + +import numpy as np +import tensorflow as tf + +import block_base +import block_util +import blocks_std + +# pylint does not recognize block_base.BlockBase.__call__(). +# pylint: disable=not-callable + + +def LSTMBiasInit(shape, dtype): + """Returns ones for forget-gate, and zeros for the others.""" + shape = np.array(shape) + + # Check internal consistencies. + assert shape.shape == (1,), shape + assert shape[0] % 4 == 0, shape + + n = shape[0] // 4 + ones = tf.fill([n], tf.constant(1, dtype=dtype)) + zeros = tf.fill([3 * n], tf.constant(0, dtype=dtype)) + return tf.concat([ones, zeros], 0) + + +class LSTMBase(block_base.BlockBase): + """Base class for LSTM implementations. + + These LSTM implementations use the pattern found in [1]. No peephole + connection, i.e., cell content is not used in recurrence computation. + Hidden units are also output units. + + [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, + 2015. arxiv:1409.2329. + """ + + def __init__(self, output_shape, name): + """Initializes LSTMBase class object. + + Args: + output_shape: List representing the LSTM output shape. This argument + does not include batch dimension. For example, if the LSTM output has + shape [batch, depth], then pass [depth]. + name: Name of this block. + """ + super(LSTMBase, self).__init__(name) + + with self._BlockScope(): + self._output_shape = [None] + list(output_shape) + self._hidden = None + self._cell = None + + @property + def hidden(self): + """Returns the hidden units of this LSTM.""" + return self._hidden + + @hidden.setter + def hidden(self, value): + """Assigns to the hidden units of this LSTM. + + Args: + value: The new value for the hidden units. If None, the hidden units are + considered to be filled with zeros. + """ + if value is not None: + value.get_shape().assert_is_compatible_with(self._output_shape) + self._hidden = value + + @property + def cell(self): + """Returns the cell units of this LSTM.""" + return self._cell + + @cell.setter + def cell(self, value): + """Assigns to the cell units of this LSTM. + + Args: + value: The new value for the cell units. If None, the cell units are + considered to be filled with zeros. + """ + if value is not None: + value.get_shape().assert_is_compatible_with(self._output_shape) + self._cell = value + + # Consider moving bias terms to the base, and require this method to be + # linear. + def _TransformInputs(self, _): + """Transforms the input units to (4 * depth) units. + + The forget-gate, input-gate, output-gate, and cell update is computed as + f, i, j, o = T(h) + R(x) + where h is hidden units, x is input units, and T, R are transforms of + h, x, respectively. + + This method implements R. Note that T is strictly linear, so if LSTM is + going to use bias, this method must include the bias to the transformation. + + Subclasses must implement this method. See _Apply() for more details. + """ + raise NotImplementedError() + + def _TransformHidden(self, _): + """Transforms the hidden units to (4 * depth) units. + + The forget-gate, input-gate, output-gate, and cell update is computed as + f, i, j, o = T(h) + R(x) + where h is hidden units, x is input units, and T, R are transforms of + h, x, respectively. + + This method implements T in the equation. The method must implement a + strictly linear transformation. For example, it may use MatMul or Conv2D, + but must not add bias. This is because when hidden units are zeros, then + the LSTM implementation will skip calling this method, instead of passing + zeros to this function. + + Subclasses must implement this method. See _Apply() for more details. + """ + raise NotImplementedError() + + def _Apply(self, *args): + xtransform = self._TransformInputs(*args) + depth_axis = len(self._output_shape) - 1 + + if self.hidden is not None: + htransform = self._TransformHidden(self.hidden) + f, i, j, o = tf.split( + value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis) + else: + f, i, j, o = tf.split( + value=xtransform, num_or_size_splits=4, axis=depth_axis) + + if self.cell is not None: + self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j) + else: + self.cell = tf.sigmoid(i) * tf.tanh(j) + + self.hidden = tf.sigmoid(o) * tf.tanh(self.cell) + return self.hidden + + +class LSTM(LSTMBase): + """Efficient LSTM implementation used in [1]. + + [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, + 2015. arxiv:1409.2329. + """ + + def __init__(self, + depth, + bias=LSTMBiasInit, + initializer=block_util.RsqrtInitializer(), + name=None): + super(LSTM, self).__init__([depth], name) + + with self._BlockScope(): + self._depth = depth + self._nn = blocks_std.NN( + 4 * depth, bias=bias, act=None, initializer=initializer) + self._hidden_linear = blocks_std.Linear( + 4 * depth, initializer=initializer) + + def _TransformInputs(self, *args): + return self._nn(*args) + + def _TransformHidden(self, h): + return self._hidden_linear(h) + + +class Conv2DLSTM(LSTMBase): + """Convolutional LSTM implementation with optimizations inspired by [1]. + + Note that when using the batch normalization feature, the bias initializer + will not be used, since BN effectively cancels its effect out. + + [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, + 2015. arxiv:1409.2329. + """ + + def __init__(self, + depth, + filter_size, + hidden_filter_size, + strides, + padding, + bias=LSTMBiasInit, + initializer=block_util.RsqrtInitializer(dims=(0, 1, 2)), + use_moving_average=False, + name=None): + super(Conv2DLSTM, self).__init__([None, None, depth], name) + self._iter = 0 + + with self._BlockScope(): + self._input_conv = blocks_std.Conv2D( + 4 * depth, + filter_size, + strides, + padding, + bias=None, + act=None, + initializer=initializer, + name='input_conv2d') + + self._hidden_conv = blocks_std.Conv2D( + 4 * depth, + hidden_filter_size, + [1, 1], + 'SAME', + bias=None, + act=None, + initializer=initializer, + name='hidden_conv2d') + + if bias is not None: + self._bias = blocks_std.BiasAdd(bias, name='biases') + else: + self._bias = blocks_std.PassThrough() + + def _TransformInputs(self, x): + return self._bias(self._input_conv(x)) + + def _TransformHidden(self, h): + return self._hidden_conv(h) + + def _Apply(self, *args): + xtransform = self._TransformInputs(*args) + depth_axis = len(self._output_shape) - 1 + + if self.hidden is not None: + htransform = self._TransformHidden(self.hidden) + f, i, j, o = tf.split( + value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis) + else: + f, i, j, o = tf.split( + value=xtransform, num_or_size_splits=4, axis=depth_axis) + + if self.cell is not None: + self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j) + else: + self.cell = tf.sigmoid(i) * tf.tanh(j) + + self.hidden = tf.sigmoid(o) * tf.tanh(self.cell) + + self._iter += 1 + return self.hidden diff --git a/models/research/compression/entropy_coder/lib/blocks_lstm_test.py b/models/research/compression/entropy_coder/lib/blocks_lstm_test.py new file mode 100644 index 0000000000000000000000000000000000000000..03c32dc136effda11163f2e35c5a48496f0187c0 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_lstm_test.py @@ -0,0 +1,113 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for LSTM tensorflow blocks.""" +from __future__ import division + +import numpy as np +import tensorflow as tf + +import block_base +import blocks_std +import blocks_lstm + + +class BlocksLSTMTest(tf.test.TestCase): + + def CheckUnary(self, y, op_type): + self.assertEqual(op_type, y.op.type) + self.assertEqual(1, len(y.op.inputs)) + return y.op.inputs[0] + + def CheckBinary(self, y, op_type): + self.assertEqual(op_type, y.op.type) + self.assertEqual(2, len(y.op.inputs)) + return y.op.inputs + + def testLSTM(self): + lstm = blocks_lstm.LSTM(10) + lstm.hidden = tf.zeros(shape=[10, 10], dtype=tf.float32) + lstm.cell = tf.zeros(shape=[10, 10], dtype=tf.float32) + x = tf.placeholder(dtype=tf.float32, shape=[10, 11]) + y = lstm(x) + + o, tanhc = self.CheckBinary(y, 'Mul') + self.assertEqual(self.CheckUnary(o, 'Sigmoid').name, 'LSTM/split:3') + + self.assertIs(lstm.cell, self.CheckUnary(tanhc, 'Tanh')) + fc, ij = self.CheckBinary(lstm.cell, 'Add') + + f, _ = self.CheckBinary(fc, 'Mul') + self.assertEqual(self.CheckUnary(f, 'Sigmoid').name, 'LSTM/split:0') + + i, j = self.CheckBinary(ij, 'Mul') + self.assertEqual(self.CheckUnary(i, 'Sigmoid').name, 'LSTM/split:1') + j = self.CheckUnary(j, 'Tanh') + self.assertEqual(j.name, 'LSTM/split:2') + + def testLSTMBiasInit(self): + lstm = blocks_lstm.LSTM(9) + x = tf.placeholder(dtype=tf.float32, shape=[15, 7]) + lstm(x) + b = lstm._nn._bias + + with self.test_session(): + tf.global_variables_initializer().run() + bias_var = b._bias.eval() + + comp = ([1.0] * 9) + ([0.0] * 27) + self.assertAllEqual(bias_var, comp) + + def testConv2DLSTM(self): + lstm = blocks_lstm.Conv2DLSTM(depth=10, + filter_size=[1, 1], + hidden_filter_size=[1, 1], + strides=[1, 1], + padding='SAME') + lstm.hidden = tf.zeros(shape=[10, 11, 11, 10], dtype=tf.float32) + lstm.cell = tf.zeros(shape=[10, 11, 11, 10], dtype=tf.float32) + x = tf.placeholder(dtype=tf.float32, shape=[10, 11, 11, 1]) + y = lstm(x) + + o, tanhc = self.CheckBinary(y, 'Mul') + self.assertEqual(self.CheckUnary(o, 'Sigmoid').name, 'Conv2DLSTM/split:3') + + self.assertIs(lstm.cell, self.CheckUnary(tanhc, 'Tanh')) + fc, ij = self.CheckBinary(lstm.cell, 'Add') + + f, _ = self.CheckBinary(fc, 'Mul') + self.assertEqual(self.CheckUnary(f, 'Sigmoid').name, 'Conv2DLSTM/split:0') + + i, j = self.CheckBinary(ij, 'Mul') + self.assertEqual(self.CheckUnary(i, 'Sigmoid').name, 'Conv2DLSTM/split:1') + j = self.CheckUnary(j, 'Tanh') + self.assertEqual(j.name, 'Conv2DLSTM/split:2') + + def testConv2DLSTMBiasInit(self): + lstm = blocks_lstm.Conv2DLSTM(9, 1, 1, [1, 1], 'SAME') + x = tf.placeholder(dtype=tf.float32, shape=[1, 7, 7, 7]) + lstm(x) + b = lstm._bias + + with self.test_session(): + tf.global_variables_initializer().run() + bias_var = b._bias.eval() + + comp = ([1.0] * 9) + ([0.0] * 27) + self.assertAllEqual(bias_var, comp) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/compression/entropy_coder/lib/blocks_masked_conv2d.py b/models/research/compression/entropy_coder/lib/blocks_masked_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..3f562384a681964554ead02477da24c13715d4d1 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_masked_conv2d.py @@ -0,0 +1,226 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define some typical masked 2D convolutions.""" + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import block_util +import blocks_std + +# pylint does not recognize block_base.BlockBase.__call__(). +# pylint: disable=not-callable + + +class RasterScanConv2D(blocks_std.Conv2DBase): + """Conv2D with no dependency on future pixels (in raster scan order). + + For example, assuming a 5 x 5 kernel, the kernel is applied a spatial mask: + T T T T T + T T T T T + T T x F F + F F F F F + F F F F F + where 'T' are pixels which are available when computing the convolution + for pixel 'x'. All the pixels marked with 'F' are not available. + 'x' itself is not available if strict_order is True, otherwise, it is + available. + """ + + def __init__(self, depth, filter_size, strides, padding, + strict_order=True, + bias=None, act=None, initializer=None, name=None): + super(RasterScanConv2D, self).__init__( + depth, filter_size, strides, padding, bias, act, name=name) + + if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1: + raise ValueError('Kernel size should be odd.') + + with self._BlockScope(): + if initializer is None: + initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) + self._initializer = initializer + self._strict_order = strict_order + + def _CreateKernel(self, shape, dtype): + init = self._initializer(shape, dtype) + kernel = self.NewVar(init) + + mask = np.ones(shape[:2], dtype=dtype.as_numpy_dtype) + center = shape[:2] // 2 + mask[center[0] + 1:, :] = 0 + if not self._strict_order: + mask[center[0], center[1] + 1:] = 0 + else: + mask[center[0], center[1]:] = 0 + mask = mask.reshape(mask.shape + (1, 1)) + + return tf.convert_to_tensor(mask, dtype) * kernel + + +class DepthOrderConv2D(blocks_std.Conv2DBase): + """Conv2D with no dependency on higher depth dimensions. + + More precisely, the output depth #n has only dependencies on input depths #k + for k < n (if strict_order is True) or for k <= n (if strict_order is False). + """ + + def __init__(self, depth, filter_size, strides, padding, + strict_order=True, + bias=None, act=None, initializer=None, name=None): + super(DepthOrderConv2D, self).__init__( + depth, filter_size, strides, padding, bias, act, name=name) + + with self._BlockScope(): + if initializer is None: + initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) + self._initializer = initializer + self._strict_order = strict_order + + def _CreateKernel(self, shape, dtype): + init = self._initializer(shape, dtype) + kernel = self.NewVar(init) + + mask = np.ones(shape[2:], dtype=dtype.as_numpy_dtype) + depth_output = shape[3] + for d in xrange(depth_output): + if self._strict_order: + mask[d:, d] = 0 + else: + mask[d + 1:, d] = 0 + mask = mask.reshape((1, 1) + mask.shape) + + return tf.convert_to_tensor(mask, dtype) * kernel + + +class GroupRasterScanConv2D(blocks_std.Conv2DBase): + """Conv2D with no dependency on future pixels (in raster scan order). + + This version only introduces dependencies on previous pixels in raster scan + order. It can also introduce some dependencies on previous depth positions + of the current pixel (current pixel = center pixel of the kernel) in the + following way: + the depth dimension of the input is split into Ki groups of size + |input_group_size|, the output dimension is split into Ko groups of size + |output_group_size| (usually Ki == Ko). Each output group ko of the current + pixel position can only depend on previous input groups ki + (i.e. ki < ko if strict_order is True or ki <= ko if strict_order is False). + + Notes: + - Block RasterScanConv2D is a special case of GroupRasterScanConv2D + where Ki == Ko == 1 (i.e. input_group_size == input_depth and + output_group_size == output_depth). + - For 1x1 convolution, block DepthOrderConv2D is a special case of + GroupRasterScanConv2D where input_group_size == 1 and + output_group_size == 1. + """ + + def __init__(self, depth, filter_size, strides, padding, + strict_order=True, + input_group_size=1, + output_group_size=1, + bias=None, act=None, initializer=None, name=None): + super(GroupRasterScanConv2D, self).__init__( + depth, filter_size, strides, padding, bias, act, name=name) + + if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1: + raise ValueError('Kernel size should be odd.') + + with self._BlockScope(): + if initializer is None: + initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) + self._initializer = initializer + self._input_group_size = input_group_size + self._output_group_size = output_group_size + self._strict_order = strict_order + + if depth % self._output_group_size != 0: + raise ValueError( + 'Invalid depth group size: {} for depth {}'.format( + self._output_group_size, depth)) + self._output_group_count = depth // self._output_group_size + + def _CreateKernel(self, shape, dtype): + init = self._initializer(shape, dtype) + kernel = self.NewVar(init) + + depth_input = shape[2] + if depth_input % self._input_group_size != 0: + raise ValueError( + 'Invalid depth group size: {} for depth {}'.format( + self._input_group_size, depth_input)) + input_group_count = depth_input // self._input_group_size + output_group_count = self._output_group_count + + # Set the mask to 0 for future pixels in raster scan order. + center = shape[:2] // 2 + mask = np.ones([shape[0], shape[1], + input_group_count, self._input_group_size, + output_group_count, self._output_group_size], + dtype=dtype.as_numpy_dtype) + mask[center[0] + 1:, :, :, :, :, :] = 0 + mask[center[0], center[1] + 1:, :, :, :, :] = 0 + + # Adjust the mask for the current position (the center position). + depth_output = shape[3] + for d in xrange(output_group_count): + mask[center[0], center[1], d + 1:, :, d:d + 1, :] = 0 + if self._strict_order: + mask[center[0], center[1], d, :, d:d + 1, :] = 0 + + mask = mask.reshape([shape[0], shape[1], depth_input, depth_output]) + return tf.convert_to_tensor(mask, dtype) * kernel + + +class InFillingConv2D(blocks_std.Conv2DBase): + """Conv2D with kernel having no dependency on the current pixel. + + For example, assuming a 5 x 5 kernel, the kernel is applied a spatial mask: + T T T T T + T T T T T + T T x T T + T T T T T + T T T T T + where 'T' marks a pixel which is available when computing the convolution + for pixel 'x'. 'x' itself is not available. + """ + + def __init__(self, depth, filter_size, strides, padding, + bias=None, act=None, initializer=None, name=None): + super(InFillingConv2D, self).__init__( + depth, filter_size, strides, padding, bias, act, name=name) + + if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1: + raise ValueError('Kernel size should be odd.') + if filter_size[0] == 1 and filter_size[1] == 1: + raise ValueError('Kernel size should be larger than 1x1.') + + with self._BlockScope(): + if initializer is None: + initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) + self._initializer = initializer + + def _CreateKernel(self, shape, dtype): + init = self._initializer(shape, dtype) + kernel = self.NewVar(init) + + mask = np.ones(shape[:2], dtype=dtype.as_numpy_dtype) + center = shape[:2] // 2 + mask[center[0], center[1]] = 0 + mask = mask.reshape(mask.shape + (1, 1)) + + return tf.convert_to_tensor(mask, dtype) * kernel diff --git a/models/research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py b/models/research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6dfeffcaff1289adf3bdec33cb0560db6b0416 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py @@ -0,0 +1,79 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Masked conv2d LSTM.""" + +import block_base +import block_util +import blocks_masked_conv2d +import blocks_lstm +import blocks_std + +# pylint: disable=not-callable + + +class RasterScanConv2DLSTM(blocks_lstm.LSTMBase): + """Convolutional LSTM implementation with optimizations inspired by [1]. + + Note that when using the batch normalization feature, the bias initializer + will not be used, since BN effectively cancels its effect out. + + [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, + 2015. arxiv:1409.2329. + """ + + def __init__(self, + depth, + filter_size, + hidden_filter_size, + strides, + padding, + bias=blocks_lstm.LSTMBiasInit, + initializer=block_util.RsqrtInitializer(dims=(0, 1, 2)), + name=None): + super(RasterScanConv2DLSTM, self).__init__([None, None, depth], name) + + with self._BlockScope(): + self._input_conv = blocks_masked_conv2d.RasterScanConv2D( + 4 * depth, + filter_size, + strides, + padding, + strict_order=False, + bias=None, + act=None, + initializer=initializer, + name='input_conv2d') + + self._hidden_conv = blocks_std.Conv2D( + 4 * depth, + hidden_filter_size, + [1, 1], + 'SAME', + bias=None, + act=None, + initializer=initializer, + name='hidden_conv2d') + + if bias is not None: + self._bias = blocks_std.BiasAdd(bias, name='biases') + else: + self._bias = blocks_std.PassThrough() + + def _TransformInputs(self, x): + return self._bias(self._input_conv(x)) + + def _TransformHidden(self, h): + return self._hidden_conv(h) diff --git a/models/research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py b/models/research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1d284ebffe5a24b91c96936c17d6c23febdf76d5 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py @@ -0,0 +1,207 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests of the 2D masked convolution blocks.""" + +from __future__ import division +from __future__ import unicode_literals + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import blocks_masked_conv2d + + +class MaskedConv2DTest(tf.test.TestCase): + + def testRasterScanKernel(self): + kernel_size = 5 + input_depth = 1 + output_depth = 1 + kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] + + # pylint: disable=bad-whitespace + kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0], + [ 6.0, 7.0, 8.0, 9.0, 10.0], + [11.0, 12.0, 13.0, 14.0, 15.0], + [16.0, 17.0, 18.0, 19.0, 20.0], + [21.0, 22.0, 23.0, 24.0, 25.0]] + kernel_feed = np.reshape(kernel_feed, kernel_shape) + kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0], + [ 6.0, 7.0, 8.0, 9.0, 10.0], + [11.0, 12.0, 0.0, 0.0, 0.0], + [ 0.0, 0.0, 0.0, 0.0, 0.0], + [ 0.0, 0.0, 0.0, 0.0, 0.0]] + kernel_expected = np.reshape(kernel_expected, kernel_shape) + # pylint: enable=bad-whitespace + + init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) + masked_conv2d = blocks_masked_conv2d.RasterScanConv2D( + output_depth, [kernel_size] * 2, [1] * 2, 'SAME', + initializer=init_kernel) + x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth]) + _ = masked_conv2d(x) + + with self.test_session(): + tf.global_variables_initializer().run() + kernel_value = masked_conv2d._kernel.eval() + + self.assertAllEqual(kernel_expected, kernel_value) + + def testDepthOrderKernel(self): + kernel_size = 1 + input_depth = 7 + output_depth = input_depth + kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] + + kernel_feed = np.ones(kernel_shape) + x_shape = [5] * 3 + [input_depth] + x_feed = np.ones(x_shape) + y_expected = np.zeros(x_shape[0:3] + [output_depth]) + y_expected[:, :, :] = np.arange(output_depth) + + init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) + masked_conv2d = blocks_masked_conv2d.DepthOrderConv2D( + output_depth, [kernel_size] * 2, [1] * 2, 'SAME', + strict_order=True, + initializer=init_kernel) + x = tf.placeholder(dtype=tf.float32, shape=x_shape) + y = masked_conv2d(x) + + with self.test_session(): + tf.global_variables_initializer().run() + y_value = y.eval(feed_dict={x: x_feed}) + + self.assertAllEqual(y_expected, y_value) + + def testGroupRasterScanKernel(self): + kernel_size = 3 + input_depth = 4 + input_group_size = 2 + output_depth = 2 + output_group_size = 1 + kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] + kernel_feed = np.ones(shape=kernel_shape) + + height = 5 + width = 5 + x_shape = [1, height, width, input_depth] + x_feed = np.ones(shape=x_shape) + + # pylint: disable=bad-whitespace + y_expected = [ + [[ 0, 2], [ 4, 6], [ 4, 6], [ 4, 6], [ 4, 6]], + [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], + [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], + [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], + [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], + ] + y_expected = np.reshape(y_expected, [1, height, width, output_depth]) + # pylint: enable=bad-whitespace + + init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) + masked_conv2d = blocks_masked_conv2d.GroupRasterScanConv2D( + output_depth, [kernel_size] * 2, [1] * 2, 'SAME', + strict_order=True, + input_group_size=input_group_size, + output_group_size=output_group_size, + initializer=init_kernel) + x = tf.placeholder(dtype=tf.float32, shape=x_shape) + y = masked_conv2d(x) + + with self.test_session(): + tf.global_variables_initializer().run() + y_value = y.eval(feed_dict={x: x_feed}) + + self.assertAllEqual(y_expected, y_value) + + def testInFillingKernel(self): + kernel_size = 5 + input_depth = 1 + output_depth = 1 + kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] + + # pylint: disable=bad-whitespace + kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0], + [ 6.0, 7.0, 8.0, 9.0, 10.0], + [11.0, 12.0, 13.0, 14.0, 15.0], + [16.0, 17.0, 18.0, 19.0, 20.0], + [21.0, 22.0, 23.0, 24.0, 25.0]] + kernel_feed = np.reshape(kernel_feed, kernel_shape) + kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0], + [ 6.0, 7.0, 8.0, 9.0, 10.0], + [11.0, 12.0, 0.0, 14.0, 15.0], + [16.0, 17.0, 18.0, 19.0, 20.0], + [21.0, 22.0, 23.0, 24.0, 25.0]] + kernel_expected = np.reshape(kernel_expected, kernel_shape) + # pylint: enable=bad-whitespace + + init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) + masked_conv2d = blocks_masked_conv2d.InFillingConv2D( + output_depth, [kernel_size] * 2, [1] * 2, 'SAME', + initializer=init_kernel) + x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth]) + _ = masked_conv2d(x) + + with self.test_session(): + tf.global_variables_initializer().run() + kernel_value = masked_conv2d._kernel.eval() + + self.assertAllEqual(kernel_expected, kernel_value) + + def testConv2DMaskedNumerics(self): + kernel_size = 5 + input_shape = [1, 10, 10, 1] + filter_shape = [kernel_size, kernel_size, 1, 1] + strides = [1, 1, 1, 1] + output_shape = [1, 10, 10, 1] + + conv = blocks_masked_conv2d.RasterScanConv2D( + depth=filter_shape[-1], + filter_size=filter_shape[0:2], + strides=strides[1:3], + padding='SAME', + initializer=tf.constant_initializer(value=1.0)) + x = tf.placeholder(dtype=tf.float32, shape=input_shape) + y = conv(x) + + x_feed = - np.ones(input_shape, dtype=float) + y_expected = np.ones(output_shape, dtype=float) + for i in xrange(input_shape[1]): + for j in xrange(input_shape[2]): + x_feed[0, i, j, 0] = 10 * (j + 1) + i + v = 0 + ki_start = max(i - kernel_size // 2, 0) + kj_start = max(j - kernel_size // 2, 0) + kj_end = min(j + kernel_size // 2, input_shape[2] - 1) + for ki in range(ki_start, i + 1): + for kj in range(kj_start, kj_end + 1): + if ki > i: + continue + if ki == i and kj >= j: + continue + v += 10 * (kj + 1) + ki + y_expected[0, i, j, 0] = v + + with self.test_session(): + tf.global_variables_initializer().run() + y_value = y.eval(feed_dict={x: x_feed}) + + self.assertAllEqual(y_expected, y_value) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/compression/entropy_coder/lib/blocks_operator.py b/models/research/compression/entropy_coder/lib/blocks_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..e35e37b27aa416ed48f91eda866d372601741cba --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_operator.py @@ -0,0 +1,87 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Common blocks which work as operators on other blocks.""" + +import tensorflow as tf + +import block_base + +# pylint: disable=not-callable + + +class CompositionOperator(block_base.BlockBase): + """Composition of several blocks.""" + + def __init__(self, block_list, name=None): + """Initialization of the composition operator. + + Args: + block_list: List of blocks.BlockBase that are chained to create + a new blocks.BlockBase. + name: Name of this block. + """ + super(CompositionOperator, self).__init__(name) + self._blocks = block_list + + def _Apply(self, x): + """Apply successively all the blocks on the given input tensor.""" + h = x + for layer in self._blocks: + h = layer(h) + return h + + +class LineOperator(block_base.BlockBase): + """Repeat the same block over all the lines of an input tensor.""" + + def __init__(self, block, name=None): + super(LineOperator, self).__init__(name) + self._block = block + + def _Apply(self, x): + height = x.get_shape()[1].value + if height is None: + raise ValueError('Unknown tensor height') + all_line_x = tf.split(value=x, num_or_size_splits=height, axis=1) + + y = [] + for line_x in all_line_x: + y.append(self._block(line_x)) + y = tf.concat(values=y, axis=1) + + return y + + +class TowerOperator(block_base.BlockBase): + """Parallel execution with concatenation of several blocks.""" + + def __init__(self, block_list, dim=3, name=None): + """Initialization of the parallel exec + concat (Tower). + + Args: + block_list: List of blocks.BlockBase that are chained to create + a new blocks.BlockBase. + dim: the dimension on which to concat. + name: Name of this block. + """ + super(TowerOperator, self).__init__(name) + self._blocks = block_list + self._concat_dim = dim + + def _Apply(self, x): + """Apply successively all the blocks on the given input tensor.""" + outputs = [layer(x) for layer in self._blocks] + return tf.concat(outputs, self._concat_dim) diff --git a/models/research/compression/entropy_coder/lib/blocks_operator_test.py b/models/research/compression/entropy_coder/lib/blocks_operator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8b6d80da1d09102585e4725dd5c59f48d48eafcd --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_operator_test.py @@ -0,0 +1,64 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests of the block operators.""" + +import numpy as np +import tensorflow as tf + +import block_base +import blocks_operator + + +class AddOneBlock(block_base.BlockBase): + + def __init__(self, name=None): + super(AddOneBlock, self).__init__(name) + + def _Apply(self, x): + return x + 1.0 + + +class SquareBlock(block_base.BlockBase): + + def __init__(self, name=None): + super(SquareBlock, self).__init__(name) + + def _Apply(self, x): + return x * x + + +class BlocksOperatorTest(tf.test.TestCase): + + def testComposition(self): + x_value = np.array([[1.0, 2.0, 3.0], + [-1.0, -2.0, -3.0]]) + y_expected_value = np.array([[4.0, 9.0, 16.0], + [0.0, 1.0, 4.0]]) + + x = tf.placeholder(dtype=tf.float32, shape=[2, 3]) + complex_block = blocks_operator.CompositionOperator( + [AddOneBlock(), + SquareBlock()]) + y = complex_block(x) + + with self.test_session(): + y_value = y.eval(feed_dict={x: x_value}) + + self.assertAllClose(y_expected_value, y_value) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/compression/entropy_coder/lib/blocks_std.py b/models/research/compression/entropy_coder/lib/blocks_std.py new file mode 100644 index 0000000000000000000000000000000000000000..2c617485342452f500d4b1b0b18e33b07d51e487 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_std.py @@ -0,0 +1,363 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Basic blocks for building tensorflow models.""" + +import numpy as np +import tensorflow as tf + +import block_base +import block_util + +# pylint does not recognize block_base.BlockBase.__call__(). +# pylint: disable=not-callable + + +def HandleConvPaddingModes(x, padding, kernel_shape, strides): + """Returns an updated tensor and padding type for REFLECT and SYMMETRIC. + + Args: + x: A 4D tensor with shape [batch_size, height, width, depth]. + padding: Padding mode (SAME, VALID, REFLECT, or SYMMETRIC). + kernel_shape: Shape of convolution kernel that will be applied. + strides: Convolution stride that will be used. + + Returns: + x and padding after adjustments for REFLECT and SYMMETRIC. + """ + # For 1x1 convolution, all padding modes are the same. + if np.all(kernel_shape[:2] == 1): + return x, 'VALID' + + if padding == 'REFLECT' or padding == 'SYMMETRIC': + # We manually compute the number of paddings as if 'SAME'. + # From Tensorflow kernel, the formulas are as follows. + # output_shape = ceil(input_shape / strides) + # paddings = (output_shape - 1) * strides + filter_size - input_shape + # Let x, y, s be a shorthand notations for input_shape, output_shape, and + # strides, respectively. Let (x - 1) = sn + r where 0 <= r < s. Note that + # y - 1 = ceil(x / s) - 1 = floor((x - 1) / s) = n + # provided that x > 0. Therefore + # paddings = n * s + filter_size - (sn + r + 1) + # = filter_size - r - 1. + input_shape = x.get_shape() # shape at graph construction time + img_shape = tf.shape(x)[1:3] # image shape (no batch) at run time + remainder = tf.mod(img_shape - 1, strides[1:3]) + pad_sizes = kernel_shape[:2] - remainder - 1 + + pad_rows = pad_sizes[0] + pad_cols = pad_sizes[1] + pad = tf.stack([[0, 0], tf.stack([pad_rows // 2, (pad_rows + 1) // 2]), + tf.stack([pad_cols // 2, (pad_cols + 1) // 2]), [0, 0]]) + + # Manually pad the input and switch the padding mode to 'VALID'. + x = tf.pad(x, pad, mode=padding) + x.set_shape([input_shape[0], x.get_shape()[1], + x.get_shape()[2], input_shape[3]]) + padding = 'VALID' + + return x, padding + + +class PassThrough(block_base.BlockBase): + """A dummy transform block that does nothing.""" + + def __init__(self): + # Pass an empty string to disable name scoping. + super(PassThrough, self).__init__(name='') + + def _Apply(self, inp): + return inp + + @property + def initialized(self): + """Always returns True.""" + return True + + +class Bias(object): + """An initialization helper class for BiasAdd block below.""" + + def __init__(self, value=0): + self.value = value + + +class BiasAdd(block_base.BlockBase): + """A tf.nn.bias_add wrapper. + + This wrapper may act as a PassThrough block depending on the initializer + provided, to make easier optional bias applications in NN blocks, etc. + See __init__() for the details. + """ + + def __init__(self, initializer=Bias(0), name=None): + """Initializes Bias block. + + |initializer| parameter have two special cases. + + 1. If initializer is None, then this block works as a PassThrough. + 2. If initializer is a Bias class object, then tf.constant_initializer is + used with the stored value. + + Args: + initializer: An initializer for the bias variable. + name: Name of this block. + """ + super(BiasAdd, self).__init__(name) + + with self._BlockScope(): + if isinstance(initializer, Bias): + self._initializer = tf.constant_initializer(value=initializer.value) + else: + self._initializer = initializer + + self._bias = None + + def _Apply(self, x): + if not self._bias: + init = self._initializer([int(x.get_shape()[-1])], x.dtype) + self._bias = self.NewVar(init) + + return tf.nn.bias_add(x, self._bias) + + def CreateWeightLoss(self): + return [] + + +class LinearBase(block_base.BlockBase): + """A matmul wrapper. + + Returns input * W, where matrix W can be customized through derivation. + """ + + def __init__(self, depth, name=None): + super(LinearBase, self).__init__(name) + + with self._BlockScope(): + self._depth = depth + self._matrix = None + + def _CreateKernel(self, shape, dtype): + raise NotImplementedError('This method must be sub-classed.') + + def _Apply(self, x): + if not self._matrix: + shape = [int(x.get_shape()[-1]), self._depth] + self._matrix = self._CreateKernel(shape, x.dtype) + + return tf.matmul(x, self._matrix) + + +class Linear(LinearBase): + """A matmul wrapper. + + Returns input * W, where matrix W is learned. + """ + + def __init__(self, + depth, + initializer=block_util.RsqrtInitializer(), + name=None): + super(Linear, self).__init__(depth, name) + + with self._BlockScope(): + self._initializer = initializer + + def _CreateKernel(self, shape, dtype): + init = self._initializer(shape, dtype) + return self.NewVar(init) + + +class NN(block_base.BlockBase): + """A neural network layer wrapper. + + Returns act(input * W + b), where matrix W, bias b are learned, and act is an + optional activation function (i.e., nonlinearity). + + This transform block can handle multiple inputs. If x_1, x_2, ..., x_m are + the inputs, then returns act(x_1 * W_1 + ... + x_m * W_m + b). + + Attributes: + nunits: The dimension of the output. + """ + + def __init__(self, + depth, + bias=Bias(0), + act=None, # e.g., tf.nn.relu + initializer=block_util.RsqrtInitializer(), + linear_block_factory=(lambda d, i: Linear(d, initializer=i)), + name=None): + """Initializes NN block. + + Args: + depth: The depth of the output. + bias: An initializer for the bias, or a Bias class object. If None, there + will be no bias term for this NN block. See BiasAdd block. + act: Optional activation function. If None, no activation is applied. + initializer: The initialization method for the matrix weights. + linear_block_factory: A function used to create a linear block. + name: The name of this block. + """ + super(NN, self).__init__(name) + + with self._BlockScope(): + self._linear_block_factory = linear_block_factory + self._depth = depth + self._initializer = initializer + self._matrices = None + + self._bias = BiasAdd(bias) if bias else PassThrough() + self._act = act if act else PassThrough() + + def _Apply(self, *args): + if not self._matrices: + self._matrices = [ + self._linear_block_factory(self._depth, self._initializer) + for _ in args] + + if len(self._matrices) != len(args): + raise ValueError('{} expected {} inputs, but observed {} inputs'.format( + self.name, len(self._matrices), len(args))) + + if len(args) > 1: + y = tf.add_n([m(x) for m, x in zip(self._matrices, args)]) + else: + y = self._matrices[0](args[0]) + + return self._act(self._bias(y)) + + +class Conv2DBase(block_base.BlockBase): + """A tf.nn.conv2d operator.""" + + def __init__(self, depth, filter_size, strides, padding, + bias=None, act=None, atrous_rate=None, conv=tf.nn.conv2d, + name=None): + """Initializes a Conv2DBase block. + + Arguments: + depth: The output depth of the block (i.e. #filters); if negative, the + output depth will be set to be the same as the input depth. + filter_size: The size of the 2D filter. If it's specified as an integer, + it's going to create a square filter. Otherwise, this is a tuple + specifying the height x width of the filter. + strides: A tuple specifying the y and x stride. + padding: One of the valid padding modes allowed by tf.nn.conv2d, or + 'REFLECT'/'SYMMETRIC' for mirror padding. + bias: An initializer for the bias, or a Bias class object. If None, there + will be no bias in this block. See BiasAdd block. + act: Optional activation function applied to the output. + atrous_rate: optional input rate for ATrous convolution. If not None, this + will be used and the strides will be ignored. + conv: The convolution function to use (e.g. tf.nn.conv2d). + name: The name for this conv2d op. + """ + super(Conv2DBase, self).__init__(name) + + with self._BlockScope(): + self._act = act if act else PassThrough() + self._bias = BiasAdd(bias) if bias else PassThrough() + + self._kernel_shape = np.zeros((4,), dtype=np.int32) + self._kernel_shape[:2] = filter_size + self._kernel_shape[3] = depth + + self._strides = np.ones((4,), dtype=np.int32) + self._strides[1:3] = strides + self._strides = list(self._strides) + + self._padding = padding + + self._kernel = None + self._conv = conv + + self._atrous_rate = atrous_rate + + def _CreateKernel(self, shape, dtype): + raise NotImplementedError('This method must be sub-classed') + + def _Apply(self, x): + """Apply the self._conv op. + + Arguments: + x: input tensor. It needs to be a 4D tensor of the form + [batch, height, width, channels]. + Returns: + The output of the convolution of x with the current convolutional + kernel. + Raises: + ValueError: if number of channels is not defined at graph construction. + """ + input_shape = x.get_shape().with_rank(4) + input_shape[3:].assert_is_fully_defined() # channels must be defined + if self._kernel is None: + assert self._kernel_shape[2] == 0, self._kernel_shape + self._kernel_shape[2] = input_shape[3].value + if self._kernel_shape[3] < 0: + # Make output depth be the same as input depth. + self._kernel_shape[3] = self._kernel_shape[2] + self._kernel = self._CreateKernel(self._kernel_shape, x.dtype) + + x, padding = HandleConvPaddingModes( + x, self._padding, self._kernel_shape, self._strides) + if self._atrous_rate is None: + x = self._conv(x, self._kernel, strides=self._strides, padding=padding) + else: + x = self._conv(x, self._kernel, rate=self._atrous_rate, padding=padding) + + if self._padding != 'VALID': + # Manually update shape. Known shape information can be lost by tf.pad(). + height = (1 + (input_shape[1].value - 1) // self._strides[1] + if input_shape[1].value else None) + width = (1 + (input_shape[2].value - 1) // self._strides[2] + if input_shape[2].value else None) + shape = x.get_shape() + x.set_shape([shape[0], height, width, shape[3]]) + + return self._act(self._bias(x)) + + +class Conv2D(Conv2DBase): + """A tf.nn.conv2d operator.""" + + def __init__(self, depth, filter_size, strides, padding, + bias=None, act=None, initializer=None, name=None): + """Initializes a Conv2D block. + + Arguments: + depth: The output depth of the block (i.e., #filters) + filter_size: The size of the 2D filter. If it's specified as an integer, + it's going to create a square filter. Otherwise, this is a tuple + specifying the height x width of the filter. + strides: A tuple specifying the y and x stride. + padding: One of the valid padding modes allowed by tf.nn.conv2d, or + 'REFLECT'/'SYMMETRIC' for mirror padding. + bias: An initializer for the bias, or a Bias class object. If None, there + will be no bias in this block. See BiasAdd block. + act: Optional activation function applied to the output. + initializer: Optional initializer for weights. + name: The name for this conv2d op. + """ + super(Conv2D, self).__init__(depth, filter_size, strides, padding, bias, + act, conv=tf.nn.conv2d, name=name) + + with self._BlockScope(): + if initializer is None: + initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) + self._initializer = initializer + + def _CreateKernel(self, shape, dtype): + return self.NewVar(self._initializer(shape, dtype)) diff --git a/models/research/compression/entropy_coder/lib/blocks_std_test.py b/models/research/compression/entropy_coder/lib/blocks_std_test.py new file mode 100644 index 0000000000000000000000000000000000000000..328ebc9d2173436b2108b343b98650128a4613e3 --- /dev/null +++ b/models/research/compression/entropy_coder/lib/blocks_std_test.py @@ -0,0 +1,340 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for basic tensorflow blocks_std.""" + +from __future__ import division +from __future__ import unicode_literals + +import math +import os + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import blocks_std + + +def _NumpyConv2D(x, f, strides, padding, rate=1): + assert strides[0] == 1 and strides[3] == 1, strides + + if rate > 1: + f_shape = f.shape + expand_f = np.zeros([f_shape[0], ((f_shape[1] - 1) * rate + 1), + f_shape[2], f_shape[3]]) + expand_f[:, [y * rate for y in range(f_shape[1])], :, :] = f + f = np.zeros([((f_shape[0] - 1) * rate + 1), expand_f.shape[1], + f_shape[2], f_shape[3]]) + f[[y * rate for y in range(f_shape[0])], :, :, :] = expand_f + + if padding != 'VALID': + assert x.shape[1] > 0 and x.shape[2] > 0, x.shape + # Compute the number of padded rows and cols. + # See Conv2D block comments for a math explanation. + remainder = ((x.shape[1] - 1) % strides[1], (x.shape[2] - 1) % strides[2]) + pad_rows = f.shape[0] - remainder[0] - 1 + pad_cols = f.shape[1] - remainder[1] - 1 + pad = ((0, 0), + (pad_rows // 2, (pad_rows + 1) // 2), + (pad_cols // 2, (pad_cols + 1) // 2), + (0, 0)) + + # Pad the input using numpy.pad(). + mode = None + if padding == 'SAME': + mode = str('constant') + if padding == 'REFLECT': + mode = str('reflect') + if padding == 'SYMMETRIC': + mode = str('symmetric') + x = np.pad(x, pad, mode=mode) + + # Since x is now properly padded, proceed as if padding mode is VALID. + x_window = np.empty( + (x.shape[0], + int(math.ceil((x.shape[1] - f.shape[0] + 1) / strides[1])), + int(math.ceil((x.shape[2] - f.shape[1] + 1) / strides[2])), + np.prod(f.shape[:3]))) + + # The output at pixel location (i, j) is the result of linear transformation + # applied to the window whose top-left corner is at + # (i * row_stride, j * col_stride). + for i in xrange(x_window.shape[1]): + k = i * strides[1] + for j in xrange(x_window.shape[2]): + l = j * strides[2] + x_window[:, i, j, :] = x[:, + k:(k + f.shape[0]), + l:(l + f.shape[1]), + :].reshape((x_window.shape[0], -1)) + + y = np.tensordot(x_window, f.reshape((-1, f.shape[3])), axes=1) + return y + + +class BlocksStdTest(tf.test.TestCase): + + def CheckUnary(self, y, op_type): + self.assertEqual(op_type, y.op.type) + self.assertEqual(1, len(y.op.inputs)) + return y.op.inputs[0] + + def CheckBinary(self, y, op_type): + self.assertEqual(op_type, y.op.type) + self.assertEqual(2, len(y.op.inputs)) + return y.op.inputs + + def testPassThrough(self): + p = blocks_std.PassThrough() + x = tf.placeholder(dtype=tf.float32, shape=[1]) + self.assertIs(p(x), x) + + def CheckBiasAdd(self, y, b): + x, u = self.CheckBinary(y, 'BiasAdd') + self.assertIs(u, b._bias.value()) + self.assertEqual(x.dtype, u.dtype.base_dtype) + return x + + def testBiasAdd(self): + b = blocks_std.BiasAdd() + x = tf.placeholder(dtype=tf.float32, shape=[4, 8]) + y = b(x) + self.assertEqual(b._bias.get_shape(), x.get_shape()[-1:]) + self.assertIs(x, self.CheckBiasAdd(y, b)) + + def testBiasRankTest(self): + b = blocks_std.BiasAdd() + x = tf.placeholder(dtype=tf.float32, shape=[10]) + with self.assertRaises(ValueError): + b(x) + + def CheckLinear(self, y, m): + x, w = self.CheckBinary(y, 'MatMul') + self.assertIs(w, m._matrix.value()) + self.assertEqual(x.dtype, w.dtype.base_dtype) + return x + + def testLinear(self): + m = blocks_std.Linear(10) + x = tf.placeholder(dtype=tf.float32, shape=[8, 9]) + y = m(x) + self.assertEqual(m._matrix.get_shape(), [9, 10]) + self.assertIs(x, self.CheckLinear(y, m)) + + def testLinearShared(self): + # Create a linear map which is applied twice on different inputs + # (i.e. the weights of the map are shared). + linear_map = blocks_std.Linear(6) + x1 = tf.random_normal(shape=[1, 5]) + x2 = tf.random_normal(shape=[1, 5]) + xs = x1 + x2 + + # Apply the transform with the same weights. + y1 = linear_map(x1) + y2 = linear_map(x2) + ys = linear_map(xs) + + with self.test_session() as sess: + # Initialize all the variables of the graph. + tf.global_variables_initializer().run() + + y1_res, y2_res, ys_res = sess.run([y1, y2, ys]) + self.assertAllClose(y1_res + y2_res, ys_res) + + def CheckNN(self, y, nn, act=None): + if act: + pre_act = self.CheckUnary(y, act) + else: + pre_act = y + + if not isinstance(nn._bias, blocks_std.PassThrough): + pre_bias = self.CheckBiasAdd(pre_act, nn._bias) + else: + pre_bias = pre_act + + if len(nn._matrices) > 1: + self.assertEqual('AddN', pre_bias.op.type) + pre_bias = pre_bias.op.inputs + else: + pre_bias = [pre_bias] + + self.assertEqual(len(pre_bias), len(nn._matrices)) + return [self.CheckLinear(u, m) for u, m in zip(pre_bias, nn._matrices)] + + def testNNWithoutActWithoutBias(self): + nn = blocks_std.NN(10, act=None, bias=None) + x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) + y = nn(x) + self.assertIs(x, self.CheckNN(y, nn)[0]) + + def testNNWithoutBiasWithAct(self): + nn = blocks_std.NN(10, act=tf.nn.relu, bias=None) + x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) + y = nn(x) + self.assertIs(x, self.CheckNN(y, nn, 'Relu')[0]) + + def testNNWithBiasWithoutAct(self): + nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=None) + x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) + y = nn(x) + self.assertIs(x, self.CheckNN(y, nn)[0]) + + def testNNWithBiasWithAct(self): + nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.square) + x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) + y = nn(x) + self.assertIs(x, self.CheckNN(y, nn, 'Square')[0]) + + def testNNMultipleInputs(self): + nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.tanh) + x = [tf.placeholder(dtype=tf.float32, shape=[5, 7]), + tf.placeholder(dtype=tf.float32, shape=[5, 3]), + tf.placeholder(dtype=tf.float32, shape=[5, 5])] + y = nn(*x) + xs = self.CheckNN(y, nn, 'Tanh') + self.assertEqual(len(x), len(xs)) + for u, v in zip(x, xs): + self.assertIs(u, v) + + def testConv2DSAME(self): + np.random.seed(142536) + + x_shape = [4, 16, 11, 5] + f_shape = [4, 3, 5, 6] + strides = [1, 2, 2, 1] + padding = 'SAME' + + conv = blocks_std.Conv2D(depth=f_shape[-1], + filter_size=f_shape[0:2], + strides=strides[1:3], + padding=padding, + act=None, + bias=None) + x_value = np.random.normal(size=x_shape) + x = tf.convert_to_tensor(x_value, dtype=tf.float32) + y = conv(x) + + with self.test_session(): + tf.global_variables_initializer().run() + f_value = conv._kernel.eval() + y_value = y.eval() + + y_expected = _NumpyConv2D(x_value, f_value, + strides=strides, padding=padding) + self.assertAllClose(y_expected, y_value) + + def testConv2DValid(self): + np.random.seed(253647) + + x_shape = [4, 11, 12, 5] + f_shape = [5, 2, 5, 5] + strides = [1, 2, 2, 1] + padding = 'VALID' + + conv = blocks_std.Conv2D(depth=f_shape[-1], + filter_size=f_shape[0:2], + strides=strides[1:3], + padding=padding, + act=None, + bias=None) + x_value = np.random.normal(size=x_shape) + x = tf.convert_to_tensor(x_value, dtype=tf.float32) + y = conv(x) + + with self.test_session(): + tf.global_variables_initializer().run() + f_value = conv._kernel.eval() + y_value = y.eval() + + y_expected = _NumpyConv2D(x_value, f_value, + strides=strides, padding=padding) + self.assertAllClose(y_expected, y_value) + + def testConv2DSymmetric(self): + np.random.seed(364758) + + x_shape = [4, 10, 12, 6] + f_shape = [3, 4, 6, 5] + strides = [1, 1, 1, 1] + padding = 'SYMMETRIC' + + conv = blocks_std.Conv2D(depth=f_shape[-1], + filter_size=f_shape[0:2], + strides=strides[1:3], + padding=padding, + act=None, + bias=None) + x_value = np.random.normal(size=x_shape) + x = tf.convert_to_tensor(x_value, dtype=tf.float32) + y = conv(x) + + with self.test_session(): + tf.global_variables_initializer().run() + f_value = conv._kernel.eval() + y_value = y.eval() + + y_expected = _NumpyConv2D(x_value, f_value, + strides=strides, padding=padding) + self.assertAllClose(y_expected, y_value) + + def testConv2DReflect(self): + np.random.seed(768798) + + x_shape = [4, 10, 12, 6] + f_shape = [3, 4, 6, 5] + strides = [1, 2, 2, 1] + padding = 'REFLECT' + + conv = blocks_std.Conv2D(depth=f_shape[-1], + filter_size=f_shape[0:2], + strides=strides[1:3], + padding=padding, + act=None, + bias=None) + x_value = np.random.normal(size=x_shape) + x = tf.convert_to_tensor(x_value, dtype=tf.float32) + y = conv(x) + + with self.test_session(): + tf.global_variables_initializer().run() + f_value = conv._kernel.eval() + y_value = y.eval() + + y_expected = _NumpyConv2D(x_value, f_value, + strides=strides, padding=padding) + self.assertAllClose(y_expected, y_value) + + def testConv2DBias(self): + input_shape = [19, 14, 14, 64] + filter_shape = [3, 7, 64, 128] + strides = [1, 2, 2, 1] + output_shape = [19, 6, 4, 128] + + conv = blocks_std.Conv2D(depth=filter_shape[-1], + filter_size=filter_shape[0:2], + strides=strides[1:3], + padding='VALID', + act=None, + bias=blocks_std.Bias(1)) + x = tf.placeholder(dtype=tf.float32, shape=input_shape) + + y = conv(x) + self.CheckBiasAdd(y, conv._bias) + self.assertEqual(output_shape, y.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/compression/entropy_coder/model/__init__.py b/models/research/compression/entropy_coder/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/compression/entropy_coder/model/entropy_coder_model.py b/models/research/compression/entropy_coder/model/entropy_coder_model.py new file mode 100644 index 0000000000000000000000000000000000000000..67f7eb5bc05f3df7363529c19fa77d176caaabc1 --- /dev/null +++ b/models/research/compression/entropy_coder/model/entropy_coder_model.py @@ -0,0 +1,55 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Entropy coder model.""" + + +class EntropyCoderModel(object): + """Entropy coder model.""" + + def __init__(self): + # Loss used for training the model. + self.loss = None + + # Tensorflow op to run to train the model. + self.train_op = None + + # Tensor corresponding to the average code length of the input bit field + # tensor. The average code length is a number of output bits per input bit. + # To get an effective compression, this number should be between 0.0 + # and 1.0 (1.0 corresponds to no compression). + self.average_code_length = None + + def Initialize(self, global_step, optimizer, config_string): + raise NotImplementedError() + + def BuildGraph(self, input_codes): + """Build the Tensorflow graph corresponding to the entropy coder model. + + Args: + input_codes: Tensor of size: batch_size x height x width x bit_depth + corresponding to the codes to compress. + The input codes are {-1, +1} codes. + """ + # TODO: + # - consider switching to {0, 1} codes. + # - consider passing an extra tensor which gives for each (b, y, x) + # what is the actual depth (which would allow to use more or less bits + # for each (y, x) location. + raise NotImplementedError() + + def GetConfigStringForUnitTest(self): + """Returns a default model configuration to be used for unit tests.""" + return None diff --git a/models/research/compression/entropy_coder/model/model_factory.py b/models/research/compression/entropy_coder/model/model_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..e6f9902f3bb720e76f228f2774a9eaf7774ef191 --- /dev/null +++ b/models/research/compression/entropy_coder/model/model_factory.py @@ -0,0 +1,53 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Entropy coder model registrar.""" + + +class ModelFactory(object): + """Factory of encoder/decoder models.""" + + def __init__(self): + self._model_dictionary = dict() + + def RegisterModel(self, + entropy_coder_model_name, + entropy_coder_model_factory): + self._model_dictionary[entropy_coder_model_name] = ( + entropy_coder_model_factory) + + def CreateModel(self, model_name): + current_model_factory = self._model_dictionary[model_name] + return current_model_factory() + + def GetAvailableModels(self): + return self._model_dictionary.keys() + + +_model_registry = ModelFactory() + + +def GetModelRegistry(): + return _model_registry + + +class RegisterEntropyCoderModel(object): + + def __init__(self, model_name): + self._model_name = model_name + + def __call__(self, f): + _model_registry.RegisterModel(self._model_name, f) + return f diff --git a/models/research/compression/entropy_coder/progressive/__init__.py b/models/research/compression/entropy_coder/progressive/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/compression/entropy_coder/progressive/progressive.py b/models/research/compression/entropy_coder/progressive/progressive.py new file mode 100644 index 0000000000000000000000000000000000000000..7b03a07db055b62aa1c0f9cc89ddd2472899db3c --- /dev/null +++ b/models/research/compression/entropy_coder/progressive/progressive.py @@ -0,0 +1,242 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Code probability model used for entropy coding.""" + +import json + +from six.moves import xrange +import tensorflow as tf + +from entropy_coder.lib import blocks +from entropy_coder.model import entropy_coder_model +from entropy_coder.model import model_factory + +# pylint: disable=not-callable + + +class BrnnPredictor(blocks.BlockBase): + """BRNN prediction applied on one layer.""" + + def __init__(self, code_depth, name=None): + super(BrnnPredictor, self).__init__(name) + + with self._BlockScope(): + hidden_depth = 2 * code_depth + + # What is coming from the previous layer/iteration + # is going through a regular Conv2D layer as opposed to the binary codes + # of the current layer/iteration which are going through a masked + # convolution. + self._adaptation0 = blocks.RasterScanConv2D( + hidden_depth, [7, 7], [1, 1], 'SAME', + strict_order=True, + bias=blocks.Bias(0), act=tf.tanh) + self._adaptation1 = blocks.Conv2D( + hidden_depth, [3, 3], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh) + self._predictor = blocks.CompositionOperator([ + blocks.LineOperator( + blocks.RasterScanConv2DLSTM( + depth=hidden_depth, + filter_size=[1, 3], + hidden_filter_size=[1, 3], + strides=[1, 1], + padding='SAME')), + blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh), + blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh) + ]) + + def _Apply(self, x, s): + # Code estimation using both: + # - the state from the previous iteration/layer, + # - the binary codes that are before in raster scan order. + h = tf.concat(values=[self._adaptation0(x), self._adaptation1(s)], axis=3) + + estimated_codes = self._predictor(h) + + return estimated_codes + + +class LayerPrediction(blocks.BlockBase): + """Binary code prediction for one layer.""" + + def __init__(self, layer_count, code_depth, name=None): + super(LayerPrediction, self).__init__(name) + + self._layer_count = layer_count + + # No previous layer. + self._layer_state = None + self._current_layer = 0 + + with self._BlockScope(): + # Layers used to do the conditional code prediction. + self._brnn_predictors = [] + for _ in xrange(layer_count): + self._brnn_predictors.append(BrnnPredictor(code_depth)) + + # Layers used to generate the input of the LSTM operating on the + # iteration/depth domain. + hidden_depth = 2 * code_depth + self._state_blocks = [] + for _ in xrange(layer_count): + self._state_blocks.append(blocks.CompositionOperator([ + blocks.Conv2D( + hidden_depth, [3, 3], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh), + blocks.Conv2D( + code_depth, [3, 3], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh) + ])) + + # Memory of the RNN is equivalent to the size of 2 layers of binary + # codes. + hidden_depth = 2 * code_depth + self._layer_rnn = blocks.CompositionOperator([ + blocks.Conv2DLSTM( + depth=hidden_depth, + filter_size=[1, 1], + hidden_filter_size=[1, 1], + strides=[1, 1], + padding='SAME'), + blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh), + blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME', + bias=blocks.Bias(0), act=tf.tanh) + ]) + + def _Apply(self, x): + assert self._current_layer < self._layer_count + + # Layer state is set to 0 when there is no previous iteration. + if self._layer_state is None: + self._layer_state = tf.zeros_like(x, dtype=tf.float32) + + # Code estimation using both: + # - the state from the previous iteration/layer, + # - the binary codes that are before in raster scan order. + estimated_codes = self._brnn_predictors[self._current_layer]( + x, self._layer_state) + + # Compute the updated layer state. + h = self._state_blocks[self._current_layer](x) + self._layer_state = self._layer_rnn(h) + self._current_layer += 1 + + return estimated_codes + + +class ProgressiveModel(entropy_coder_model.EntropyCoderModel): + """Progressive BRNN entropy coder model.""" + + def __init__(self): + super(ProgressiveModel, self).__init__() + + def Initialize(self, global_step, optimizer, config_string): + if config_string is None: + raise ValueError('The progressive model requires a configuration.') + config = json.loads(config_string) + if 'coded_layer_count' not in config: + config['coded_layer_count'] = 0 + + self._config = config + self._optimizer = optimizer + self._global_step = global_step + + def BuildGraph(self, input_codes): + """Build the graph corresponding to the progressive BRNN model.""" + layer_depth = self._config['layer_depth'] + layer_count = self._config['layer_count'] + + code_shape = input_codes.get_shape() + code_depth = code_shape[-1].value + if self._config['coded_layer_count'] > 0: + prefix_depth = self._config['coded_layer_count'] * layer_depth + if code_depth < prefix_depth: + raise ValueError('Invalid prefix depth: {} VS {}'.format( + prefix_depth, code_depth)) + input_codes = input_codes[:, :, :, :prefix_depth] + + code_shape = input_codes.get_shape() + code_depth = code_shape[-1].value + if code_depth % layer_depth != 0: + raise ValueError( + 'Code depth must be a multiple of the layer depth: {} vs {}'.format( + code_depth, layer_depth)) + code_layer_count = code_depth // layer_depth + if code_layer_count > layer_count: + raise ValueError('Input codes have too many layers: {}, max={}'.format( + code_layer_count, layer_count)) + + # Block used to estimate binary codes. + layer_prediction = LayerPrediction(layer_count, layer_depth) + + # Block used to compute code lengths. + code_length_block = blocks.CodeLength() + + # Loop over all the layers. + code_length = [] + code_layers = tf.split( + value=input_codes, num_or_size_splits=code_layer_count, axis=3) + for k in xrange(code_layer_count): + x = code_layers[k] + predicted_x = layer_prediction(x) + # Saturate the prediction to avoid infinite code length. + epsilon = 0.001 + predicted_x = tf.clip_by_value( + predicted_x, -1 + epsilon, +1 - epsilon) + code_length.append(code_length_block( + blocks.ConvertSignCodeToZeroOneCode(x), + blocks.ConvertSignCodeToZeroOneCode(predicted_x))) + tf.summary.scalar('code_length_layer_{:02d}'.format(k), code_length[-1]) + code_length = tf.stack(code_length) + self.loss = tf.reduce_mean(code_length) + tf.summary.scalar('loss', self.loss) + + # Loop over all the remaining layers just to make sure they are + # instantiated. Otherwise, loading model params could fail. + dummy_x = tf.zeros_like(code_layers[0]) + for _ in xrange(layer_count - code_layer_count): + dummy_predicted_x = layer_prediction(dummy_x) + + # Average bitrate over total_line_count. + self.average_code_length = tf.reduce_mean(code_length) + + if self._optimizer: + optim_op = self._optimizer.minimize(self.loss, + global_step=self._global_step) + block_updates = blocks.CreateBlockUpdates() + if block_updates: + with tf.get_default_graph().control_dependencies([optim_op]): + self.train_op = tf.group(*block_updates) + else: + self.train_op = optim_op + else: + self.train_op = None + + def GetConfigStringForUnitTest(self): + s = '{\n' + s += '"layer_depth": 1,\n' + s += '"layer_count": 8\n' + s += '}\n' + return s + + +@model_factory.RegisterEntropyCoderModel('progressive') +def CreateProgressiveModel(): + return ProgressiveModel() diff --git a/models/research/compression/image_encoder/README.md b/models/research/compression/image_encoder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a47da977aa4db4be26528c5ebfe030024f31291b --- /dev/null +++ b/models/research/compression/image_encoder/README.md @@ -0,0 +1,105 @@ +# Image Compression with Neural Networks + +This is a [TensorFlow](http://www.tensorflow.org/) model for compressing and +decompressing images using an already trained Residual GRU model as descibed +in [Full Resolution Image Compression with Recurrent Neural Networks](https://arxiv.org/abs/1608.05148). Please consult the paper for more details +on the architecture and compression results. + +This code will allow you to perform the lossy compression on an model +already trained on compression. This code doesn't not currently contain the +Entropy Coding portions of our paper. + + +## Prerequisites +The only software requirements for running the encoder and decoder is having +Tensorflow installed. You will also need to [download](http://download.tensorflow.org/models/compression_residual_gru-2016-08-23.tar.gz) +and extract the model residual_gru.pb. + +If you want to generate the perceptual similarity under MS-SSIM, you will also +need to [Install SciPy](https://www.scipy.org/install.html). + +## Encoding +The Residual GRU network is fully convolutional, but requires the images +height and width in pixels by a multiple of 32. There is an image in this folder +called example.png that is 768x1024 if one is needed for testing. We also +rely on TensorFlow's built in decoding ops, which support only PNG and JPEG at +time of release. + +To encode an image, simply run the following command: + +`python encoder.py --input_image=/your/image/here.png +--output_codes=output_codes.npz --iteration=15 +--model=/path/to/model/residual_gru.pb +` + +The iteration parameter specifies the lossy-quality to target for compression. +The quality can be [0-15], where 0 corresponds to a target of 1/8 (bits per +pixel) bpp and every increment results in an additional 1/8 bpp. + +| Iteration | BPP | Compression Ratio | +|---: |---: |---: | +|0 | 0.125 | 192:1| +|1 | 0.250 | 96:1| +|2 | 0.375 | 64:1| +|3 | 0.500 | 48:1| +|4 | 0.625 | 38.4:1| +|5 | 0.750 | 32:1| +|6 | 0.875 | 27.4:1| +|7 | 1.000 | 24:1| +|8 | 1.125 | 21.3:1| +|9 | 1.250 | 19.2:1| +|10 | 1.375 | 17.4:1| +|11 | 1.500 | 16:1| +|12 | 1.625 | 14.7:1| +|13 | 1.750 | 13.7:1| +|14 | 1.875 | 12.8:1| +|15 | 2.000 | 12:1| + +The output_codes file contains the numpy shape and a flattened, bit-packed +array of the codes. These can be inspected in python by using numpy.load(). + + +## Decoding +After generating codes for an image, the lossy reconstructions for that image +can be done as follows: + +`python decoder.py --input_codes=codes.npz --output_directory=/tmp/decoded/ +--model=residual_gru.pb` + +The output_directory will contain images decoded at each quality level. + + +## Comparing Similarity +One of our primary metrics for comparing how similar two images are +is MS-SSIM. + +To generate these metrics on your images you can run: +`python msssim.py --original_image=/path/to/your/image.png +--compared_image=/tmp/decoded/image_15.png` + + +## Results +CSV results containing the post-entropy bitrates and MS-SSIM over Kodak can +are available for reference. Each row of the CSV represents each of the Kodak +images in their dataset number (1-24). Each column of the CSV represents each +iteration of the model (1-16). + +[Post Entropy Bitrates](https://storage.googleapis.com/compression-ml/residual_gru_results/bitrate.csv) + +[MS-SSIM](https://storage.googleapis.com/compression-ml/residual_gru_results/msssim.csv) + + +## FAQ + +#### How do I train my own compression network? +We currently don't provide the code to build and train a compression +graph from scratch. + +#### I get an InvalidArgumentError: Incompatible shapes. +This is usually due to the fact that our network only supports images that are +both height and width divisible by 32 pixel. Try padding your images to 32 +pixel boundaries. + + +## Contact Info +Model repository maintained by Nick Johnston ([nmjohn](https://github.com/nmjohn)). diff --git a/models/research/compression/image_encoder/decoder.py b/models/research/compression/image_encoder/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..75bc18cad0fdd4055df7b42d5440635365504774 --- /dev/null +++ b/models/research/compression/image_encoder/decoder.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Neural Network Image Compression Decoder. + +Decompress an image from the numpy's npz format generated by the encoder. + +Example usage: +python decoder.py --input_codes=output_codes.pkl --iteration=15 \ +--output_directory=/tmp/compression_output/ --model=residual_gru.pb +""" +import io +import os + +import numpy as np +import tensorflow as tf + +tf.flags.DEFINE_string('input_codes', None, 'Location of binary code file.') +tf.flags.DEFINE_integer('iteration', -1, 'The max quality level of ' + 'the images to output. Use -1 to infer from loaded ' + ' codes.') +tf.flags.DEFINE_string('output_directory', None, 'Directory to save decoded ' + 'images.') +tf.flags.DEFINE_string('model', None, 'Location of compression model.') + +FLAGS = tf.flags.FLAGS + + +def get_input_tensor_names(): + name_list = ['GruBinarizer/SignBinarizer/Sign:0'] + for i in range(1, 16): + name_list.append('GruBinarizer/SignBinarizer/Sign_{}:0'.format(i)) + return name_list + + +def get_output_tensor_names(): + return ['loop_{0:02d}/add:0'.format(i) for i in range(0, 16)] + + +def main(_): + if (FLAGS.input_codes is None or FLAGS.output_directory is None or + FLAGS.model is None): + print('\nUsage: python decoder.py --input_codes=output_codes.pkl ' + '--iteration=15 --output_directory=/tmp/compression_output/ ' + '--model=residual_gru.pb\n\n') + return + + if FLAGS.iteration < -1 or FLAGS.iteration > 15: + print('\n--iteration must be between 0 and 15 inclusive, or -1 to infer ' + 'from file.\n') + return + iteration = FLAGS.iteration + + if not tf.gfile.Exists(FLAGS.output_directory): + tf.gfile.MkDir(FLAGS.output_directory) + + if not tf.gfile.Exists(FLAGS.input_codes): + print('\nInput codes not found.\n') + return + + contents = '' + with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file: + contents = code_file.read() + loaded_codes = np.load(io.BytesIO(contents)) + assert ['codes', 'shape'] not in loaded_codes.files + loaded_shape = loaded_codes['shape'] + loaded_array = loaded_codes['codes'] + + # Unpack and recover code shapes. + unpacked_codes = np.reshape(np.unpackbits(loaded_array) + [:np.prod(loaded_shape)], + loaded_shape) + + numpy_int_codes = np.split(unpacked_codes, len(unpacked_codes)) + if iteration == -1: + iteration = len(unpacked_codes) - 1 + # Convert back to float and recover scale. + numpy_codes = [np.squeeze(x.astype(np.float32), 0) * 2 - 1 for x in + numpy_int_codes] + + with tf.Graph().as_default() as graph: + # Load the inference model for decoding. + with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file: + graph_def = tf.GraphDef() + graph_def.ParseFromString(model_file.read()) + _ = tf.import_graph_def(graph_def, name='') + + # For encoding the tensors into PNGs. + input_image = tf.placeholder(tf.uint8) + encoded_image = tf.image.encode_png(input_image) + + input_tensors = [graph.get_tensor_by_name(name) for name in + get_input_tensor_names()][0:iteration+1] + outputs = [graph.get_tensor_by_name(name) for name in + get_output_tensor_names()][0:iteration+1] + + feed_dict = {key: value for (key, value) in zip(input_tensors, + numpy_codes)} + + with tf.Session(graph=graph) as sess: + results = sess.run(outputs, feed_dict=feed_dict) + + for index, result in enumerate(results): + img = np.uint8(np.clip(result + 0.5, 0, 255)) + img = img.squeeze() + png_img = sess.run(encoded_image, feed_dict={input_image: img}) + + with tf.gfile.FastGFile(os.path.join(FLAGS.output_directory, + 'image_{0:02d}.png'.format(index)), + 'w') as output_image: + output_image.write(png_img) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/compression/image_encoder/encoder.py b/models/research/compression/image_encoder/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..27754bdaea19779cea653408d17ed2e6a051f0c5 --- /dev/null +++ b/models/research/compression/image_encoder/encoder.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Neural Network Image Compression Encoder. + +Compresses an image to a binarized numpy array. The image must be padded to a +multiple of 32 pixels in height and width. + +Example usage: +python encoder.py --input_image=/your/image/here.png \ +--output_codes=output_codes.pkl --iteration=15 --model=residual_gru.pb +""" +import io +import os + +import numpy as np +import tensorflow as tf + +tf.flags.DEFINE_string('input_image', None, 'Location of input image. We rely ' + 'on tf.image to decode the image, so only PNG and JPEG ' + 'formats are currently supported.') +tf.flags.DEFINE_integer('iteration', 15, 'Quality level for encoding image. ' + 'Must be between 0 and 15 inclusive.') +tf.flags.DEFINE_string('output_codes', None, 'File to save output encoding.') +tf.flags.DEFINE_string('model', None, 'Location of compression model.') + +FLAGS = tf.flags.FLAGS + + +def get_output_tensor_names(): + name_list = ['GruBinarizer/SignBinarizer/Sign:0'] + for i in range(1, 16): + name_list.append('GruBinarizer/SignBinarizer/Sign_{}:0'.format(i)) + return name_list + + +def main(_): + if (FLAGS.input_image is None or FLAGS.output_codes is None or + FLAGS.model is None): + print('\nUsage: python encoder.py --input_image=/your/image/here.png ' + '--output_codes=output_codes.pkl --iteration=15 ' + '--model=residual_gru.pb\n\n') + return + + if FLAGS.iteration < 0 or FLAGS.iteration > 15: + print('\n--iteration must be between 0 and 15 inclusive.\n') + return + + with tf.gfile.FastGFile(FLAGS.input_image, 'rb') as input_image: + input_image_str = input_image.read() + + with tf.Graph().as_default() as graph: + # Load the inference model for encoding. + with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file: + graph_def = tf.GraphDef() + graph_def.ParseFromString(model_file.read()) + _ = tf.import_graph_def(graph_def, name='') + + input_tensor = graph.get_tensor_by_name('Placeholder:0') + outputs = [graph.get_tensor_by_name(name) for name in + get_output_tensor_names()] + + input_image = tf.placeholder(tf.string) + _, ext = os.path.splitext(FLAGS.input_image) + if ext == '.png': + decoded_image = tf.image.decode_png(input_image, channels=3) + elif ext == '.jpeg' or ext == '.jpg': + decoded_image = tf.image.decode_jpeg(input_image, channels=3) + else: + assert False, 'Unsupported file format {}'.format(ext) + decoded_image = tf.expand_dims(decoded_image, 0) + + with tf.Session(graph=graph) as sess: + img_array = sess.run(decoded_image, feed_dict={input_image: + input_image_str}) + results = sess.run(outputs, feed_dict={input_tensor: img_array}) + + results = results[0:FLAGS.iteration + 1] + int_codes = np.asarray([x.astype(np.int8) for x in results]) + + # Convert int codes to binary. + int_codes = (int_codes + 1)//2 + export = np.packbits(int_codes.reshape(-1)) + + output = io.BytesIO() + np.savez_compressed(output, shape=int_codes.shape, codes=export) + with tf.gfile.FastGFile(FLAGS.output_codes, 'w') as code_file: + code_file.write(output.getvalue()) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/compression/image_encoder/example.png b/models/research/compression/image_encoder/example.png new file mode 100644 index 0000000000000000000000000000000000000000..2dbfa1a674973b84f8e633862de09bc00915b1f7 --- /dev/null +++ b/models/research/compression/image_encoder/example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aebce63dbd1e8cc6a13aaa2a924335b85a252c504b4f28e924e129de0fee2f0 +size 3155141 diff --git a/models/research/compression/image_encoder/msssim.py b/models/research/compression/image_encoder/msssim.py new file mode 100644 index 0000000000000000000000000000000000000000..f07a3712785c62feb261feb90016e0f621a3ee1d --- /dev/null +++ b/models/research/compression/image_encoder/msssim.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Python implementation of MS-SSIM. + +Usage: + +python msssim.py --original_image=original.png --compared_image=distorted.png +""" +import numpy as np +from scipy import signal +from scipy.ndimage.filters import convolve +import tensorflow as tf + + +tf.flags.DEFINE_string('original_image', None, 'Path to PNG image.') +tf.flags.DEFINE_string('compared_image', None, 'Path to PNG image.') +FLAGS = tf.flags.FLAGS + + +def _FSpecialGauss(size, sigma): + """Function to mimic the 'fspecial' gaussian MATLAB function.""" + radius = size // 2 + offset = 0.0 + start, stop = -radius, radius + 1 + if size % 2 == 0: + offset = 0.5 + stop -= 1 + x, y = np.mgrid[offset + start:stop, offset + start:stop] + assert len(x) == size + g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2))) + return g / g.sum() + + +def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, + filter_sigma=1.5, k1=0.01, k2=0.03): + """Return the Structural Similarity Map between `img1` and `img2`. + + This function attempts to match the functionality of ssim_index_new.m by + Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip + + Arguments: + img1: Numpy array holding the first RGB image batch. + img2: Numpy array holding the second RGB image batch. + max_val: the dynamic range of the images (i.e., the difference between the + maximum the and minimum allowed values). + filter_size: Size of blur kernel to use (will be reduced for small images). + filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced + for small images). + k1: Constant used to maintain stability in the SSIM calculation (0.01 in + the original paper). + k2: Constant used to maintain stability in the SSIM calculation (0.03 in + the original paper). + + Returns: + Pair containing the mean SSIM and contrast sensitivity between `img1` and + `img2`. + + Raises: + RuntimeError: If input images don't have the same shape or don't have four + dimensions: [batch_size, height, width, depth]. + """ + if img1.shape != img2.shape: + raise RuntimeError('Input images must have the same shape (%s vs. %s).', + img1.shape, img2.shape) + if img1.ndim != 4: + raise RuntimeError('Input images must have four dimensions, not %d', + img1.ndim) + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + _, height, width, _ = img1.shape + + # Filter size can't be larger than height or width of images. + size = min(filter_size, height, width) + + # Scale down sigma if a smaller filter size is used. + sigma = size * filter_sigma / filter_size if filter_size else 0 + + if filter_size: + window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1)) + mu1 = signal.fftconvolve(img1, window, mode='valid') + mu2 = signal.fftconvolve(img2, window, mode='valid') + sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid') + sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid') + sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid') + else: + # Empty blur kernel so no need to convolve. + mu1, mu2 = img1, img2 + sigma11 = img1 * img1 + sigma22 = img2 * img2 + sigma12 = img1 * img2 + + mu11 = mu1 * mu1 + mu22 = mu2 * mu2 + mu12 = mu1 * mu2 + sigma11 -= mu11 + sigma22 -= mu22 + sigma12 -= mu12 + + # Calculate intermediate values used by both ssim and cs_map. + c1 = (k1 * max_val) ** 2 + c2 = (k2 * max_val) ** 2 + v1 = 2.0 * sigma12 + c2 + v2 = sigma11 + sigma22 + c2 + ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2))) + cs = np.mean(v1 / v2) + return ssim, cs + + +def MultiScaleSSIM(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, + k1=0.01, k2=0.03, weights=None): + """Return the MS-SSIM score between `img1` and `img2`. + + This function implements Multi-Scale Structural Similarity (MS-SSIM) Image + Quality Assessment according to Zhou Wang's paper, "Multi-scale structural + similarity for image quality assessment" (2003). + Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf + + Author's MATLAB implementation: + http://www.cns.nyu.edu/~lcv/ssim/msssim.zip + + Arguments: + img1: Numpy array holding the first RGB image batch. + img2: Numpy array holding the second RGB image batch. + max_val: the dynamic range of the images (i.e., the difference between the + maximum the and minimum allowed values). + filter_size: Size of blur kernel to use (will be reduced for small images). + filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced + for small images). + k1: Constant used to maintain stability in the SSIM calculation (0.01 in + the original paper). + k2: Constant used to maintain stability in the SSIM calculation (0.03 in + the original paper). + weights: List of weights for each level; if none, use five levels and the + weights from the original paper. + + Returns: + MS-SSIM score between `img1` and `img2`. + + Raises: + RuntimeError: If input images don't have the same shape or don't have four + dimensions: [batch_size, height, width, depth]. + """ + if img1.shape != img2.shape: + raise RuntimeError('Input images must have the same shape (%s vs. %s).', + img1.shape, img2.shape) + if img1.ndim != 4: + raise RuntimeError('Input images must have four dimensions, not %d', + img1.ndim) + + # Note: default weights don't sum to 1.0 but do match the paper / matlab code. + weights = np.array(weights if weights else + [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) + levels = weights.size + downsample_filter = np.ones((1, 2, 2, 1)) / 4.0 + im1, im2 = [x.astype(np.float64) for x in [img1, img2]] + mssim = np.array([]) + mcs = np.array([]) + for _ in range(levels): + ssim, cs = _SSIMForMultiScale( + im1, im2, max_val=max_val, filter_size=filter_size, + filter_sigma=filter_sigma, k1=k1, k2=k2) + mssim = np.append(mssim, ssim) + mcs = np.append(mcs, cs) + filtered = [convolve(im, downsample_filter, mode='reflect') + for im in [im1, im2]] + im1, im2 = [x[:, ::2, ::2, :] for x in filtered] + return (np.prod(mcs[0:levels-1] ** weights[0:levels-1]) * + (mssim[levels-1] ** weights[levels-1])) + + +def main(_): + if FLAGS.original_image is None or FLAGS.compared_image is None: + print('\nUsage: python msssim.py --original_image=original.png ' + '--compared_image=distorted.png\n\n') + return + + if not tf.gfile.Exists(FLAGS.original_image): + print('\nCannot find --original_image.\n') + return + + if not tf.gfile.Exists(FLAGS.compared_image): + print('\nCannot find --compared_image.\n') + return + + with tf.gfile.FastGFile(FLAGS.original_image) as image_file: + img1_str = image_file.read('rb') + with tf.gfile.FastGFile(FLAGS.compared_image) as image_file: + img2_str = image_file.read('rb') + + input_img = tf.placeholder(tf.string) + decoded_image = tf.expand_dims(tf.image.decode_png(input_img, channels=3), 0) + + with tf.Session() as sess: + img1 = sess.run(decoded_image, feed_dict={input_img: img1_str}) + img2 = sess.run(decoded_image, feed_dict={input_img: img2_str}) + + print((MultiScaleSSIM(img1, img2, max_val=255))) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/cvt_text/README.md b/models/research/cvt_text/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c0a415b164192fe0175048575f875d055167d5d --- /dev/null +++ b/models/research/cvt_text/README.md @@ -0,0 +1,38 @@ +# Cross-View Training + +This repository contains code for *Semi-Supervised Sequence Modeling with Cross-View Training*. Currently sequence tagging and dependency parsing tasks are supported. + +## Requirements +* [Tensorflow](https://www.tensorflow.org/) +* [Numpy](http://www.numpy.org/) + +This code has been run with TensorFlow 1.10.1 and Numpy 1.14.5; other versions may work, but have not been tested. + +## Fetching and Preprocessing Data +Run `fetch_data.sh` to download and extract pretrained [GloVe](https://nlp.stanford.edu/projects/glove/) vectors, the [1 Billion Word Language Model Benchmark](http://www.statmt.org/lm-benchmark/) corpus of unlabeled data, and the CoNLL-2000 [text chunking](https://www.clips.uantwerpen.be/conll2000/chunking/) dataset. Unfortunately the other datasets from our paper are not freely available and so can't be included in this repository. + +To apply CVT to other datasets, the data should be placed in `data/raw_data//(train|dev|test).txt`. For sequence tagging data, each line should contain a word followed by a space followed by that word's tag. Sentences should be separated by empty lines. For dependency parsing, each tag should be of the form ``-`` (e.g., `0-root`). + +After all of the data has been downloaded, run `preprocessing.py`. + +## Training a Model +Run `python cvt.py --mode=train --model_name=chunking_model`. By default this trains a model on the chunking data downloaded with `fetch_data.sh`. To change which task(s) are trained on or model hyperparameters, modify [base/configure.py](base/configure.py). Models are automatically checkpointed every 1000 steps; training will continue from the latest checkpoint if training is interrupted and restarted. Model checkpoints and other data such as dev set accuracy over time are stored in `data/models/`. + +## Evaluating a Model +Run `python cvt.py --mode=eval --model_name=chunking_model`. A CVT model trained on the chunking data for 200k steps should get at least 97.1 F1 on the dev set and 96.6 F1 on the test set. + +## Citation +If you use this code for your publication, please cite the original paper: +``` +@inproceedings{clark2018semi, + title = {Semi-Supervised Sequence Modeling with Cross-View Training}, + author = {Kevin Clark and Minh-Thang Luong and Christopher D. Manning and Quoc V. Le}, + booktitle = {EMNLP}, + year = {2018} +} +``` + +## Contact +* [Kevin Clark](https://cs.stanford.edu/~kevclark/) (@clarkkev). +* [Thang Luong](https://nlp.stanford.edu/~lmthang/) (@lmthang). + diff --git a/models/research/cvt_text/__init__.py b/models/research/cvt_text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/base/__init__.py b/models/research/cvt_text/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/base/configure.py b/models/research/cvt_text/base/configure.py new file mode 100644 index 0000000000000000000000000000000000000000..38a69859412ab8b23f8399df105ba5d403b7de5b --- /dev/null +++ b/models/research/cvt_text/base/configure.py @@ -0,0 +1,139 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Classes for storing hyperparameters, data locations, etc.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +from os.path import join +import tensorflow as tf + + +class Config(object): + """Stores everything needed to train a model.""" + + def __init__(self, **kwargs): + # general + self.data_dir = './data' # top directory for data (corpora, models, etc.) + self.model_name = 'default_model' # name identifying the current model + + # mode + self.mode = 'train' # either "train" or "eval" + self.task_names = ['chunk'] # list of tasks this model will learn + # more than one trains a multi-task model + self.is_semisup = True # whether to use CVT or train purely supervised + self.for_preprocessing = False # is this for the preprocessing script + + # embeddings + self.pretrained_embeddings = 'glove.6B.300d.txt' # which pretrained + # embeddings to use + self.word_embedding_size = 300 # size of each word embedding + + # encoder + self.use_chars = True # whether to include a character-level cnn + self.char_embedding_size = 50 # size of character embeddings + self.char_cnn_filter_widths = [2, 3, 4] # filter widths for the char cnn + self.char_cnn_n_filters = 100 # number of filters for each filter width + self.unidirectional_sizes = [1024] # size of first Bi-LSTM + self.bidirectional_sizes = [512] # size of second Bi-LSTM + self.projection_size = 512 # projections size for LSTMs and hidden layers + + # dependency parsing + self.depparse_projection_size = 128 # size of the representations used in + # the bilinear classifier for parsing + + # tagging + self.label_encoding = 'BIOES' # label encoding scheme for entity-level + # tagging tasks + self.label_smoothing = 0.1 # label smoothing rate for tagging tasks + + # optimization + self.lr = 0.5 # base learning rate + self.momentum = 0.9 # momentum + self.grad_clip = 1.0 # maximum gradient norm during optimization + self.warm_up_steps = 5000.0 # linearly ramp up the lr for this many steps + self.lr_decay = 0.005 # factor for gradually decaying the lr + + # EMA + self.ema_decay = 0.998 # EMA coefficient for averaged model weights + self.ema_test = True # whether to use EMA weights at test time + self.ema_teacher = False # whether to use EMA weights for the teacher model + + # regularization + self.labeled_keep_prob = 0.5 # 1 - dropout on labeled examples + self.unlabeled_keep_prob = 0.8 # 1 - dropout on unlabeled examples + + # sizing + self.max_sentence_length = 100 # maximum length of unlabeled sentences + self.max_word_length = 20 # maximum length of words for char cnn + self.train_batch_size = 64 # train batch size + self.test_batch_size = 64 # test batch size + self.buckets = [(0, 15), (15, 40), (40, 1000)] # buckets for binning + # sentences by length + + # training + self.print_every = 25 # how often to print out training progress + self.eval_dev_every = 500 # how often to evaluate on the dev set + self.eval_train_every = 2000 # how often to evaluate on the train set + self.save_model_every = 1000 # how often to checkpoint the model + + # data set + self.train_set_percent = 100 # how much of the train set to use + + for k, v in kwargs.iteritems(): + if k not in self.__dict__: + raise ValueError("Unknown argument", k) + self.__dict__[k] = v + + self.dev_set = self.mode == "train" # whether to evaluate on the dev or + # test set + + # locations of various data files + self.raw_data_topdir = join(self.data_dir, 'raw_data') + self.unsupervised_data = join( + self.raw_data_topdir, + 'unlabeled_data', + '1-billion-word-language-modeling-benchmark-r13output', + 'training-monolingual.tokenized.shuffled') + self.pretrained_embeddings_file = join( + self.raw_data_topdir, 'pretrained_embeddings', + self.pretrained_embeddings) + + self.preprocessed_data_topdir = join(self.data_dir, 'preprocessed_data') + self.embeddings_dir = join(self.preprocessed_data_topdir, + self.pretrained_embeddings.rsplit('.', 1)[0]) + self.word_vocabulary = join(self.embeddings_dir, 'word_vocabulary.pkl') + self.word_embeddings = join(self.embeddings_dir, 'word_embeddings.pkl') + + self.model_dir = join(self.data_dir, "models", self.model_name) + self.checkpoints_dir = join(self.model_dir, 'checkpoints') + self.checkpoint = join(self.checkpoints_dir, 'checkpoint.ckpt') + self.best_model_checkpoints_dir = join( + self.model_dir, 'best_model_checkpoints') + self.best_model_checkpoint = join( + self.best_model_checkpoints_dir, 'checkpoint.ckpt') + self.progress = join(self.checkpoints_dir, 'progress.pkl') + self.summaries_dir = join(self.model_dir, 'summaries') + self.history_file = join(self.model_dir, 'history.pkl') + + def write(self): + tf.gfile.MakeDirs(self.model_dir) + with open(join(self.model_dir, 'config.json'), 'w') as f: + f.write(json.dumps(self.__dict__, sort_keys=True, indent=4, + separators=(',', ': '))) + diff --git a/models/research/cvt_text/base/embeddings.py b/models/research/cvt_text/base/embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..8863f547efb560d627a01c150c451e5221b9df77 --- /dev/null +++ b/models/research/cvt_text/base/embeddings.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +"""Utilities for handling word embeddings.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import numpy as np +import tensorflow as tf + +from base import utils + + +_CHARS = [ + # punctuation + '!', '\'', '#', '$', '%', '&', '"', '(', ')', '*', '+', ',', '-', '.', + '/', '\\', '_', '`', '{', '}', '[', ']', '<', '>', ':', ';', '?', '@', + # digits + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + # letters + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', + 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + # special characters + '£', '€', '®', '™', '�', '½', '»', '•', '—', '“', '”', '°', '‘', '’' +] + +# words not in GloVe that still should have embeddings +_EXTRA_WORDS = [ + # common digit patterns + '0/0', '0/00', '00/00', '0/000', + '00/00/00', '0/00/00', '00/00/0000', '0/00/0000', + '00-00', '00-00-00', '0-00-00', '00-00-0000', '0-00-0000', '0000-00-00', + '00-0-00-0', '00000000', '0:00.000', '00:00.000', + '0%', '00%', '00.' '0000.', '0.0bn', '0.0m', '0-', '00-', + # ontonotes uses **f to represent formulas and -amp- instead of amperstands + '**f', '-amp-' +] +SPECIAL_TOKENS = ['', '', '', '', ''] +NUM_CHARS = len(_CHARS) + len(SPECIAL_TOKENS) +PAD, UNK, START, END, MISSING = 0, 1, 2, 3, 4 + + +class Vocabulary(collections.OrderedDict): + def __getitem__(self, w): + return self.get(w, UNK) + + +@utils.Memoize +def get_char_vocab(): + characters = _CHARS + for i, special in enumerate(SPECIAL_TOKENS): + characters.insert(i, special) + return Vocabulary({c: i for i, c in enumerate(characters)}) + + +@utils.Memoize +def get_inv_char_vocab(): + return {i: c for c, i in get_char_vocab().items()} + + +def get_word_vocab(config): + return Vocabulary(utils.load_cpickle(config.word_vocabulary)) + + +def get_word_embeddings(config): + return utils.load_cpickle(config.word_embeddings) + + +@utils.Memoize +def _punctuation_ids(vocab_path): + vocab = Vocabulary(utils.load_cpickle(vocab_path)) + return set(i for w, i in vocab.iteritems() if w in [ + '!', '...', '``', '{', '}', '(', ')', '[', ']', '--', '-', ',', '.', + "''", '`', ';', ':', '?']) + + +def get_punctuation_ids(config): + return _punctuation_ids(config.word_vocabulary) + + +class PretrainedEmbeddingLoader(object): + def __init__(self, config): + self.config = config + self.vocabulary = {} + self.vectors = [] + self.vector_size = config.word_embedding_size + + def _add_vector(self, w): + if w not in self.vocabulary: + self.vocabulary[w] = len(self.vectors) + self.vectors.append(np.zeros(self.vector_size, dtype='float32')) + + def build(self): + utils.log('loading pretrained embeddings from', + self.config.pretrained_embeddings_file) + for special in SPECIAL_TOKENS: + self._add_vector(special) + for extra in _EXTRA_WORDS: + self._add_vector(extra) + with tf.gfile.GFile( + self.config.pretrained_embeddings_file, 'r') as f: + for i, line in enumerate(f): + if i % 10000 == 0: + utils.log('on line', i) + + split = line.decode('utf8').split() + w = normalize_word(split[0]) + + try: + vec = np.array(map(float, split[1:]), dtype='float32') + if vec.size != self.vector_size: + utils.log('vector for line', i, 'has size', vec.size, 'so skipping') + utils.log(line[:100] + '...') + continue + except: + utils.log('can\'t parse line', i, 'so skipping') + utils.log(line[:100] + '...') + continue + if w not in self.vocabulary: + self.vocabulary[w] = len(self.vectors) + self.vectors.append(vec) + utils.log('writing vectors!') + self._write() + + def _write(self): + utils.write_cpickle(np.vstack(self.vectors), self.config.word_embeddings) + utils.write_cpickle(self.vocabulary, self.config.word_vocabulary) + + +def normalize_chars(w): + if w == '-LRB-': + return '(' + elif w == '-RRB-': + return ')' + elif w == '-LCB-': + return '{' + elif w == '-RCB-': + return '}' + elif w == '-LSB-': + return '[' + elif w == '-RSB-': + return ']' + return w.replace(r'\/', '/').replace(r'\*', '*') + + +def normalize_word(w): + return re.sub(r'\d', '0', normalize_chars(w).lower()) diff --git a/models/research/cvt_text/base/utils.py b/models/research/cvt_text/base/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3a9ee40d5a50b98ec06a315b8c23e2fce3b9e6f5 --- /dev/null +++ b/models/research/cvt_text/base/utils.py @@ -0,0 +1,68 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Various utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import cPickle +import sys +import tensorflow as tf + + +class Memoize(object): + def __init__(self, f): + self.f = f + self.cache = {} + + def __call__(self, *args): + if args not in self.cache: + self.cache[args] = self.f(*args) + return self.cache[args] + + +def load_cpickle(path, memoized=True): + return _load_cpickle_memoize(path) if memoized else _load_cpickle(path) + + +def _load_cpickle(path): + with tf.gfile.GFile(path, 'r') as f: + return cPickle.load(f) + + +@Memoize +def _load_cpickle_memoize(path): + return _load_cpickle(path) + + +def write_cpickle(o, path): + tf.gfile.MakeDirs(path.rsplit('/', 1)[0]) + with tf.gfile.GFile(path, 'w') as f: + cPickle.dump(o, f, -1) + + +def log(*args): + msg = ' '.join(map(str, args)) + sys.stdout.write(msg + '\n') + sys.stdout.flush() + + +def heading(*args): + log() + log(80 * '=') + log(*args) + log(80 * '=') diff --git a/models/research/cvt_text/corpus_processing/__init__.py b/models/research/cvt_text/corpus_processing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/corpus_processing/example.py b/models/research/cvt_text/corpus_processing/example.py new file mode 100644 index 0000000000000000000000000000000000000000..023d2fa07cf02f2b672901b0c378ae4a56ef8271 --- /dev/null +++ b/models/research/cvt_text/corpus_processing/example.py @@ -0,0 +1,52 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base class for training examples.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from base import embeddings + + +CONTRACTION_WORDS = set(w + 'n' for w in + ['do', 'does', 'did', 'is', 'are', 'was', 'were', 'has', + 'have', 'had', 'could', 'would', 'should', 'ca', 'wo', + 'ai', 'might']) + + +class Example(object): + def __init__(self, words, word_vocab, char_vocab): + words = words[:] + # Fix inconsistent tokenization between datasets + for i in range(len(words)): + if (words[i].lower() == '\'t' and i > 0 and + words[i - 1].lower() in CONTRACTION_WORDS): + words[i] = words[i - 1][-1] + words[i] + words[i - 1] = words[i - 1][:-1] + + self.words = ([embeddings.START] + + [word_vocab[embeddings.normalize_word(w)] for w in words] + + [embeddings.END]) + self.chars = ([[embeddings.MISSING]] + + [[char_vocab[c] for c in embeddings.normalize_chars(w)] + for w in words] + + [[embeddings.MISSING]]) + + def __repr__(self,): + inv_char_vocab = embeddings.get_inv_char_vocab() + return ' '.join([''.join([inv_char_vocab[c] for c in w]) + for w in self.chars]) diff --git a/models/research/cvt_text/corpus_processing/minibatching.py b/models/research/cvt_text/corpus_processing/minibatching.py new file mode 100644 index 0000000000000000000000000000000000000000..c0ebbf723db74b7214bdf0b98284b1222dddbc2c --- /dev/null +++ b/models/research/cvt_text/corpus_processing/minibatching.py @@ -0,0 +1,143 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for constructing minibatches.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import random +import numpy as np + +from base import embeddings + + +def get_bucket(config, l): + for i, (s, e) in enumerate(config.buckets): + if s <= l < e: + return config.buckets[i] + + +def build_array(nested_lists, dtype='int32'): + depth_to_sizes = collections.defaultdict(set) + _get_sizes(nested_lists, depth_to_sizes) + shape = [max(depth_to_sizes[depth]) for depth in range(len(depth_to_sizes))] + + copy_depth = len(depth_to_sizes) - 1 + while copy_depth > 0 and len(depth_to_sizes[copy_depth]) == 1: + copy_depth -= 1 + + arr = np.zeros(shape, dtype=dtype) + _fill_array(nested_lists, arr, copy_depth) + + return arr + + +def _get_sizes(nested_lists, depth_to_sizes, depth=0): + depth_to_sizes[depth].add(len(nested_lists)) + first_elem = nested_lists[0] + if (isinstance(first_elem, collections.Sequence) or + isinstance(first_elem, np.ndarray)): + for sublist in nested_lists: + _get_sizes(sublist, depth_to_sizes, depth + 1) + + +def _fill_array(nested_lists, arr, copy_depth, depth=0): + if depth == copy_depth: + for i in range(len(nested_lists)): + if isinstance(nested_lists[i], np.ndarray): + arr[i] = nested_lists[i] + else: + arr[i] = np.array(nested_lists[i]) + else: + for i in range(len(nested_lists)): + _fill_array(nested_lists[i], arr[i], copy_depth, depth + 1) + + +class Dataset(object): + def __init__(self, config, examples, task_name='unlabeled', is_training=False): + self._config = config + self.examples = examples + self.size = len(examples) + self.task_name = task_name + self.is_training = is_training + + def get_minibatches(self, minibatch_size): + by_bucket = collections.defaultdict(list) + for i, e in enumerate(self.examples): + by_bucket[get_bucket(self._config, len(e.words))].append(i) + + # save memory by weighting examples so longer sentences have + # smaller minibatches. + weight = lambda ind: np.sqrt(len(self.examples[ind].words)) + total_weight = float(sum(weight(i) for i in range(len(self.examples)))) + weight_per_batch = minibatch_size * total_weight / len(self.examples) + cumulative_weight = 0.0 + id_batches = [] + for _, ids in by_bucket.iteritems(): + ids = np.array(ids) + np.random.shuffle(ids) + curr_batch, curr_weight = [], 0.0 + for i, curr_id in enumerate(ids): + curr_batch.append(curr_id) + curr_weight += weight(curr_id) + if (i == len(ids) - 1 or cumulative_weight + curr_weight >= + (len(id_batches) + 1) * weight_per_batch): + cumulative_weight += curr_weight + id_batches.append(np.array(curr_batch)) + curr_batch, curr_weight = [], 0.0 + random.shuffle(id_batches) + + for id_batch in id_batches: + yield self._make_minibatch(id_batch) + + def endless_minibatches(self, minibatch_size): + while True: + for mb in self.get_minibatches(minibatch_size): + yield mb + + def _make_minibatch(self, ids): + examples = [self.examples[i] for i in ids] + sentence_lengths = np.array([len(e.words) for e in examples]) + max_word_length = min(max(max(len(word) for word in e.chars) + for e in examples), + self._config.max_word_length) + characters = [[[embeddings.PAD] + [embeddings.START] + w[:max_word_length] + + [embeddings.END] + [embeddings.PAD] for w in e.chars] + for e in examples] + # the first and last words are masked because they are start/end tokens + mask = build_array([[0] + [1] * (length - 2) + [0] + for length in sentence_lengths]) + words = build_array([e.words for e in examples]) + chars = build_array(characters, dtype='int16') + return Minibatch( + task_name=self.task_name, + size=ids.size, + examples=examples, + ids=ids, + teacher_predictions={}, + words=words, + chars=chars, + lengths=sentence_lengths, + mask=mask, + ) + + +Minibatch = collections.namedtuple('Minibatch', [ + 'task_name', 'size', 'examples', 'ids', 'teacher_predictions', + 'words', 'chars', 'lengths', 'mask' +]) diff --git a/models/research/cvt_text/corpus_processing/scorer.py b/models/research/cvt_text/corpus_processing/scorer.py new file mode 100644 index 0000000000000000000000000000000000000000..8173dae36d83face5c2f01e804b3a46b043259a7 --- /dev/null +++ b/models/research/cvt_text/corpus_processing/scorer.py @@ -0,0 +1,52 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Abstract base class for evaluation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + + +class Scorer(object): + __metaclass__ = abc.ABCMeta + + def __init__(self): + self._updated = False + self._cached_results = {} + + @abc.abstractmethod + def update(self, examples, predictions, loss): + self._updated = True + + @abc.abstractmethod + def get_loss(self): + pass + + @abc.abstractmethod + def _get_results(self): + return [] + + def get_results(self, prefix=""): + results = self._get_results() if self._updated else self._cached_results + self._cached_results = results + self._updated = False + return [(prefix + k, v) for k, v in results] + + def results_str(self): + return " - ".join(["{:}: {:.2f}".format(k, v) + for k, v in self.get_results()]) diff --git a/models/research/cvt_text/corpus_processing/unlabeled_data.py b/models/research/cvt_text/corpus_processing/unlabeled_data.py new file mode 100644 index 0000000000000000000000000000000000000000..0021c50618a453a0c319c511556412781268c6ed --- /dev/null +++ b/models/research/cvt_text/corpus_processing/unlabeled_data.py @@ -0,0 +1,81 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Reads data from a large unlabeled corpus.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tensorflow as tf + +from base import embeddings +from corpus_processing import example +from corpus_processing import minibatching + + +class UnlabeledDataReader(object): + def __init__(self, config, starting_file=0, starting_line=0, one_pass=False): + self.config = config + self.current_file = starting_file + self.current_line = starting_line + self._one_pass = one_pass + + def endless_minibatches(self): + for examples in self.get_unlabeled_examples(): + d = minibatching.Dataset(self.config, examples, 'unlabeled') + for mb in d.get_minibatches(self.config.train_batch_size): + yield mb + + def _make_examples(self, sentences): + word_vocab = embeddings.get_word_vocab(self.config) + char_vocab = embeddings.get_char_vocab() + return [ + example.Example(sentence, word_vocab, char_vocab) + for sentence in sentences + ] + + def get_unlabeled_examples(self): + lines = [] + for words in self.get_unlabeled_sentences(): + lines.append(words) + if len(lines) >= 10000: + yield self._make_examples(lines) + lines = [] + + def get_unlabeled_sentences(self): + while True: + file_ids_and_names = sorted([ + (int(fname.split('-')[1].replace('.txt', '')), fname) for fname in + tf.gfile.ListDirectory(self.config.unsupervised_data)]) + for fid, fname in file_ids_and_names: + if fid < self.current_file: + continue + self.current_file = fid + self.current_line = 0 + with tf.gfile.FastGFile(os.path.join(self.config.unsupervised_data, + fname), 'r') as f: + for i, line in enumerate(f): + if i < self.current_line: + continue + self.current_line = i + words = line.strip().split() + if len(words) < self.config.max_sentence_length: + yield words + self.current_file = 0 + self.current_line = 0 + if self._one_pass: + break diff --git a/models/research/cvt_text/cvt.py b/models/research/cvt_text/cvt.py new file mode 100644 index 0000000000000000000000000000000000000000..593ce5bb62e197d3392fb0e602afc1db896edd7a --- /dev/null +++ b/models/research/cvt_text/cvt.py @@ -0,0 +1,67 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Run training and evaluation for CVT text models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from base import configure +from base import utils +from training import trainer +from training import training_progress + + +FLAGS = tf.app.flags.FLAGS +tf.app.flags.DEFINE_string('mode', 'train', '"train" or "eval') +tf.app.flags.DEFINE_string('model_name', 'default_model', + 'A name identifying the model being ' + 'trained/evaluated') + + +def main(): + utils.heading('SETUP') + config = configure.Config(mode=FLAGS.mode, model_name=FLAGS.model_name) + config.write() + with tf.Graph().as_default() as graph: + model_trainer = trainer.Trainer(config) + summary_writer = tf.summary.FileWriter(config.summaries_dir) + checkpoints_saver = tf.train.Saver(max_to_keep=1) + best_model_saver = tf.train.Saver(max_to_keep=1) + init_op = tf.global_variables_initializer() + graph.finalize() + with tf.Session() as sess: + sess.run(init_op) + progress = training_progress.TrainingProgress( + config, sess, checkpoints_saver, best_model_saver, + config.mode == 'train') + utils.log() + if config.mode == 'train': + utils.heading('START TRAINING ({:})'.format(config.model_name)) + model_trainer.train(sess, progress, summary_writer) + elif config.mode == 'eval': + utils.heading('RUN EVALUATION ({:})'.format(config.model_name)) + progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( + config.checkpoints_dir)) + model_trainer.evaluate_all_tasks(sess, summary_writer, None) + else: + raise ValueError('Mode must be "train" or "eval"') + + +if __name__ == '__main__': + main() diff --git a/models/research/cvt_text/fetch_data.sh b/models/research/cvt_text/fetch_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..dcdb54cf88387b6e2f9e01d493b8be1f9249e7f5 --- /dev/null +++ b/models/research/cvt_text/fetch_data.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +TOPDIR='./data' +RUNDIR=${PWD} + +mkdir -p ${TOPDIR} +cd ${TOPDIR} +mkdir -p raw_data +mkdir -p raw_data/pretrained_embeddings +mkdir -p raw_data/unlabeled_data +mkdir -p raw_data/chunk +cd ${RUNDIR} + +echo "Preparing GloVe embeddings" +cd "${TOPDIR}/raw_data/pretrained_embeddings" +curl -OL http://nlp.stanford.edu/data/glove.6B.zip +unzip glove.6B.zip +cd ${RUNDIR} +echo + +echo "Preparing lm1b corpus" +cd "${TOPDIR}/raw_data/unlabeled_data" +curl -OL http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz +tar xzf 1-billion-word-language-modeling-benchmark-r13output.tar.gz +cd ${RUNDIR} +echo + +echo "Preparing chunking corpus" +cd "${TOPDIR}/raw_data/chunk" +curl -OL https://www.clips.uantwerpen.be/conll2000/chunking/train.txt.gz +curl -OL http://www.clips.uantwerpen.be/conll2000/chunking/test.txt.gz +gunzip * +cd ${RUNDIR} +echo + +echo "Done with data fetching!" + diff --git a/models/research/cvt_text/model/__init__.py b/models/research/cvt_text/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/model/encoder.py b/models/research/cvt_text/model/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..4b6da1255f57545995c4c4ce7079fc132b21585c --- /dev/null +++ b/models/research/cvt_text/model/encoder.py @@ -0,0 +1,110 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""CNN-BiLSTM sentence encoder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from base import embeddings +from model import model_helpers + + +class Encoder(object): + def __init__(self, config, inputs, pretrained_embeddings): + self._config = config + self._inputs = inputs + + self.word_reprs = self._get_word_reprs(pretrained_embeddings) + self.uni_fw, self.uni_bw = self._get_unidirectional_reprs(self.word_reprs) + self.uni_reprs = tf.concat([self.uni_fw, self.uni_bw], axis=-1) + self.bi_fw, self.bi_bw, self.bi_reprs = self._get_bidirectional_reprs( + self.uni_reprs) + + def _get_word_reprs(self, pretrained_embeddings): + with tf.variable_scope('word_embeddings'): + word_embedding_matrix = tf.get_variable( + 'word_embedding_matrix', initializer=pretrained_embeddings) + word_embeddings = tf.nn.embedding_lookup( + word_embedding_matrix, self._inputs.words) + word_embeddings = tf.nn.dropout(word_embeddings, self._inputs.keep_prob) + word_embeddings *= tf.get_variable('emb_scale', initializer=1.0) + + if not self._config.use_chars: + return word_embeddings + + with tf.variable_scope('char_embeddings'): + char_embedding_matrix = tf.get_variable( + 'char_embeddings', + shape=[embeddings.NUM_CHARS, self._config.char_embedding_size]) + char_embeddings = tf.nn.embedding_lookup(char_embedding_matrix, + self._inputs.chars) + shape = tf.shape(char_embeddings) + char_embeddings = tf.reshape( + char_embeddings, + shape=[-1, shape[-2], self._config.char_embedding_size]) + char_reprs = [] + for filter_width in self._config.char_cnn_filter_widths: + conv = tf.layers.conv1d( + char_embeddings, self._config.char_cnn_n_filters, filter_width) + conv = tf.nn.relu(conv) + conv = tf.nn.dropout(tf.reduce_max(conv, axis=1), + self._inputs.keep_prob) + conv = tf.reshape(conv, shape=[-1, shape[1], + self._config.char_cnn_n_filters]) + char_reprs.append(conv) + return tf.concat([word_embeddings] + char_reprs, axis=-1) + + def _get_unidirectional_reprs(self, word_reprs): + with tf.variable_scope('unidirectional_reprs'): + word_lstm_input_size = ( + self._config.word_embedding_size if not self._config.use_chars else + (self._config.word_embedding_size + + len(self._config.char_cnn_filter_widths) + * self._config.char_cnn_n_filters)) + word_reprs.set_shape([None, None, word_lstm_input_size]) + (outputs_fw, outputs_bw), _ = tf.nn.bidirectional_dynamic_rnn( + model_helpers.multi_lstm_cell(self._config.unidirectional_sizes, + self._inputs.keep_prob, + self._config.projection_size), + model_helpers.multi_lstm_cell(self._config.unidirectional_sizes, + self._inputs.keep_prob, + self._config.projection_size), + word_reprs, + dtype=tf.float32, + sequence_length=self._inputs.lengths, + scope='unilstm' + ) + return outputs_fw, outputs_bw + + def _get_bidirectional_reprs(self, uni_reprs): + with tf.variable_scope('bidirectional_reprs'): + current_outputs = uni_reprs + outputs_fw, outputs_bw = None, None + for size in self._config.bidirectional_sizes: + (outputs_fw, outputs_bw), _ = tf.nn.bidirectional_dynamic_rnn( + model_helpers.lstm_cell(size, self._inputs.keep_prob, + self._config.projection_size), + model_helpers.lstm_cell(size, self._inputs.keep_prob, + self._config.projection_size), + current_outputs, + dtype=tf.float32, + sequence_length=self._inputs.lengths, + scope='bilstm' + ) + current_outputs = tf.concat([outputs_fw, outputs_bw], axis=-1) + return outputs_fw, outputs_bw, current_outputs diff --git a/models/research/cvt_text/model/model_helpers.py b/models/research/cvt_text/model/model_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0cb670c455b8c4fcdcd6b05b5586cb979552b9 --- /dev/null +++ b/models/research/cvt_text/model/model_helpers.py @@ -0,0 +1,54 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for building the model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +def project(input_layers, size, name='projection'): + return tf.add_n([tf.layers.dense(layer, size, name=name + '_' + str(i)) + for i, layer in enumerate(input_layers)]) + + +def lstm_cell(cell_size, keep_prob, num_proj): + return tf.contrib.rnn.DropoutWrapper( + tf.contrib.rnn.LSTMCell(cell_size, num_proj=min(cell_size, num_proj)), + output_keep_prob=keep_prob) + + +def multi_lstm_cell(cell_sizes, keep_prob, num_proj): + return tf.contrib.rnn.MultiRNNCell([lstm_cell(cell_size, keep_prob, num_proj) + for cell_size in cell_sizes]) + + +def masked_ce_loss(logits, labels, mask, sparse=False, roll_direction=0): + if roll_direction != 0: + labels = _roll(labels, roll_direction, sparse) + mask *= _roll(mask, roll_direction, True) + ce = ((tf.nn.sparse_softmax_cross_entropy_with_logits if sparse + else tf.nn.softmax_cross_entropy_with_logits_v2) + (logits=logits, labels=labels)) + return tf.reduce_sum(mask * ce) / tf.to_float(tf.reduce_sum(mask)) + + +def _roll(arr, direction, sparse=False): + if sparse: + return tf.concat([arr[:, direction:], arr[:, :direction]], axis=1) + return tf.concat([arr[:, direction:, :], arr[:, :direction, :]], axis=1) diff --git a/models/research/cvt_text/model/multitask_model.py b/models/research/cvt_text/model/multitask_model.py new file mode 100644 index 0000000000000000000000000000000000000000..16dfdf7da6d9bb6c531f60d88224593155af19e7 --- /dev/null +++ b/models/research/cvt_text/model/multitask_model.py @@ -0,0 +1,132 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A multi-task and semi-supervised NLP model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from model import encoder +from model import shared_inputs + + +class Inference(object): + def __init__(self, config, inputs, pretrained_embeddings, tasks): + with tf.variable_scope('encoder'): + self.encoder = encoder.Encoder(config, inputs, pretrained_embeddings) + self.modules = {} + for task in tasks: + with tf.variable_scope(task.name): + self.modules[task.name] = task.get_module(inputs, self.encoder) + + +class Model(object): + def __init__(self, config, pretrained_embeddings, tasks): + self._config = config + self._tasks = tasks + + self._global_step, self._optimizer = self._get_optimizer() + self._inputs = shared_inputs.Inputs(config) + with tf.variable_scope('model', reuse=tf.AUTO_REUSE) as scope: + inference = Inference(config, self._inputs, pretrained_embeddings, + tasks) + self._trainer = inference + self._tester = inference + self._teacher = inference + if config.ema_test or config.ema_teacher: + ema = tf.train.ExponentialMovingAverage(config.ema_decay) + model_vars = tf.get_collection("trainable_variables", "model") + ema_op = ema.apply(model_vars) + tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op) + + def ema_getter(getter, name, *args, **kwargs): + var = getter(name, *args, **kwargs) + return ema.average(var) + + scope.set_custom_getter(ema_getter) + inference_ema = Inference( + config, self._inputs, pretrained_embeddings, tasks) + if config.ema_teacher: + self._teacher = inference_ema + if config.ema_test: + self._tester = inference_ema + + self._unlabeled_loss = self._get_consistency_loss(tasks) + self._unlabeled_train_op = self._get_train_op(self._unlabeled_loss) + self._labeled_train_ops = {} + for task in self._tasks: + task_loss = self._trainer.modules[task.name].supervised_loss + self._labeled_train_ops[task.name] = self._get_train_op(task_loss) + + def _get_consistency_loss(self, tasks): + return sum([self._trainer.modules[task.name].unsupervised_loss + for task in tasks]) + + def _get_optimizer(self): + global_step = tf.get_variable('global_step', initializer=0, trainable=False) + warm_up_multiplier = (tf.minimum(tf.to_float(global_step), + self._config.warm_up_steps) + / self._config.warm_up_steps) + decay_multiplier = 1.0 / (1 + self._config.lr_decay * + tf.sqrt(tf.to_float(global_step))) + lr = self._config.lr * warm_up_multiplier * decay_multiplier + optimizer = tf.train.MomentumOptimizer(lr, self._config.momentum) + return global_step, optimizer + + def _get_train_op(self, loss): + grads, vs = zip(*self._optimizer.compute_gradients(loss)) + grads, _ = tf.clip_by_global_norm(grads, self._config.grad_clip) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.control_dependencies(update_ops): + return self._optimizer.apply_gradients( + zip(grads, vs), global_step=self._global_step) + + def _create_feed_dict(self, mb, model, is_training=True): + feed = self._inputs.create_feed_dict(mb, is_training) + if mb.task_name in model.modules: + model.modules[mb.task_name].update_feed_dict(feed, mb) + else: + for module in model.modules.values(): + module.update_feed_dict(feed, mb) + return feed + + def train_unlabeled(self, sess, mb): + return sess.run([self._unlabeled_train_op, self._unlabeled_loss], + feed_dict=self._create_feed_dict(mb, self._trainer))[1] + + def train_labeled(self, sess, mb): + return sess.run([self._labeled_train_ops[mb.task_name], + self._trainer.modules[mb.task_name].supervised_loss,], + feed_dict=self._create_feed_dict(mb, self._trainer))[1] + + def run_teacher(self, sess, mb): + result = sess.run({task.name: self._teacher.modules[task.name].probs + for task in self._tasks}, + feed_dict=self._create_feed_dict(mb, self._teacher, + False)) + for task_name, probs in result.iteritems(): + mb.teacher_predictions[task_name] = probs.astype('float16') + + def test(self, sess, mb): + return sess.run( + [self._tester.modules[mb.task_name].supervised_loss, + self._tester.modules[mb.task_name].preds], + feed_dict=self._create_feed_dict(mb, self._tester, False)) + + def get_global_step(self, sess): + return sess.run(self._global_step) diff --git a/models/research/cvt_text/model/shared_inputs.py b/models/research/cvt_text/model/shared_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..2a97004b3270eb01c3eb459eb892514b3584bf5a --- /dev/null +++ b/models/research/cvt_text/model/shared_inputs.py @@ -0,0 +1,48 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Placeholders for non-task-specific model inputs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +class Inputs(object): + def __init__(self, config): + self._config = config + self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') + self.label_smoothing = tf.placeholder(tf.float32, name='label_smoothing') + self.lengths = tf.placeholder(tf.int32, shape=[None], name='lengths') + self.mask = tf.placeholder(tf.float32, [None, None], name='mask') + self.words = tf.placeholder(tf.int32, shape=[None, None], name='words') + self.chars = tf.placeholder(tf.int32, shape=[None, None, None], + name='chars') + + def create_feed_dict(self, mb, is_training): + cvt = mb.task_name == 'unlabeled' + return { + self.keep_prob: 1.0 if not is_training else + (self._config.unlabeled_keep_prob if cvt else + self._config.labeled_keep_prob), + self.label_smoothing: self._config.label_smoothing + if (is_training and not cvt) else 0.0, + self.lengths: mb.lengths, + self.words: mb.words, + self.chars: mb.chars, + self.mask: mb.mask.astype('float32') + } diff --git a/models/research/cvt_text/model/task_module.py b/models/research/cvt_text/model/task_module.py new file mode 100644 index 0000000000000000000000000000000000000000..92440b4d98ad8feaa7db6f9e6a642a42805c08a4 --- /dev/null +++ b/models/research/cvt_text/model/task_module.py @@ -0,0 +1,44 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base classes for task-specific modules.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + + +class SupervisedModule(object): + __metaclass__ = abc.ABCMeta + + def __init__(self): + self.supervised_loss = NotImplemented + self.probs = NotImplemented + self.preds = NotImplemented + + @abc.abstractmethod + def update_feed_dict(self, feed, mb): + pass + + +class SemiSupervisedModule(SupervisedModule): + __metaclass__ = abc.ABCMeta + + def __init__(self): + super(SemiSupervisedModule, self).__init__() + self.unsupervised_loss = NotImplemented + diff --git a/models/research/cvt_text/preprocessing.py b/models/research/cvt_text/preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..3dbf106be571a0e2098baab030e831ca72ea73aa --- /dev/null +++ b/models/research/cvt_text/preprocessing.py @@ -0,0 +1,87 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +""" +Preprocesses pretrained word embeddings, creates dev sets for tasks without a +provided one, and figures out the set of output classes for each task. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import random + +from base import configure +from base import embeddings +from base import utils +from task_specific.word_level import word_level_data + + +def main(data_dir='./data'): + random.seed(0) + + utils.log("BUILDING WORD VOCABULARY/EMBEDDINGS") + for pretrained in ['glove.6B.300d.txt']: + config = configure.Config(data_dir=data_dir, + for_preprocessing=True, + pretrained_embeddings=pretrained, + word_embedding_size=300) + embeddings.PretrainedEmbeddingLoader(config).build() + + utils.log("CONSTRUCTING DEV SETS") + for task_name in ["chunk"]: + # chunking does not come with a provided dev split, so create one by + # selecting a random subset of the data + config = configure.Config(data_dir=data_dir, + for_preprocessing=True) + task_data_dir = os.path.join(config.raw_data_topdir, task_name) + '/' + train_sentences = word_level_data.TaggedDataLoader( + config, task_name, False).get_labeled_sentences("train") + random.shuffle(train_sentences) + write_sentences(task_data_dir + 'train_subset.txt', train_sentences[1500:]) + write_sentences(task_data_dir + 'dev.txt', train_sentences[:1500]) + + utils.log("WRITING LABEL MAPPINGS") + for task_name in ["chunk"]: + for i, label_encoding in enumerate(["BIOES"]): + config = configure.Config(data_dir=data_dir, + for_preprocessing=True, + label_encoding=label_encoding) + token_level = task_name in ["ccg", "pos", "depparse"] + loader = word_level_data.TaggedDataLoader(config, task_name, token_level) + if token_level: + if i != 0: + continue + utils.log("WRITING LABEL MAPPING FOR", task_name.upper()) + else: + utils.log(" Writing label mapping for", task_name.upper(), + label_encoding) + utils.log(" ", len(loader.label_mapping), "classes") + utils.write_cpickle(loader.label_mapping, + loader.label_mapping_path) + + +def write_sentences(fname, sentences): + with open(fname, 'w') as f: + for words, tags in sentences: + for word, tag in zip(words, tags): + f.write(word + " " + tag + "\n") + f.write("\n") + + +if __name__ == '__main__': + main() diff --git a/models/research/cvt_text/task_specific/__init__.py b/models/research/cvt_text/task_specific/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/task_specific/task_definitions.py b/models/research/cvt_text/task_specific/task_definitions.py new file mode 100644 index 0000000000000000000000000000000000000000..f13fb559285fa7919c4af42ce60848fefeeeefdd --- /dev/null +++ b/models/research/cvt_text/task_specific/task_definitions.py @@ -0,0 +1,91 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Defines all the tasks the model can learn.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + +from base import embeddings +from task_specific.word_level import depparse_module +from task_specific.word_level import depparse_scorer +from task_specific.word_level import tagging_module +from task_specific.word_level import tagging_scorers +from task_specific.word_level import word_level_data + + +class Task(object): + __metaclass__ = abc.ABCMeta + + def __init__(self, config, name, loader): + self.config = config + self.name = name + self.loader = loader + self.train_set = self.loader.get_dataset("train") + self.val_set = self.loader.get_dataset("dev" if config.dev_set else "test") + + @abc.abstractmethod + def get_module(self, inputs, encoder): + pass + + @abc.abstractmethod + def get_scorer(self): + pass + + +class Tagging(Task): + def __init__(self, config, name, is_token_level=True): + super(Tagging, self).__init__( + config, name, word_level_data.TaggedDataLoader( + config, name, is_token_level)) + self.n_classes = len(set(self.loader.label_mapping.values())) + self.is_token_level = is_token_level + + def get_module(self, inputs, encoder): + return tagging_module.TaggingModule( + self.config, self.name, self.n_classes, inputs, encoder) + + def get_scorer(self): + if self.is_token_level: + return tagging_scorers.AccuracyScorer() + else: + return tagging_scorers.EntityLevelF1Scorer(self.loader.label_mapping) + + +class DependencyParsing(Tagging): + def __init__(self, config, name): + super(DependencyParsing, self).__init__(config, name, True) + + def get_module(self, inputs, encoder): + return depparse_module.DepparseModule( + self.config, self.name, self.n_classes, inputs, encoder) + + def get_scorer(self): + return depparse_scorer.DepparseScorer( + self.n_classes, (embeddings.get_punctuation_ids(self.config))) + + +def get_task(config, name): + if name in ["ccg", "pos"]: + return Tagging(config, name, True) + elif name in ["chunk", "ner", "er"]: + return Tagging(config, name, False) + elif name == "depparse": + return DependencyParsing(config, name) + else: + raise ValueError("Unknown task", name) diff --git a/models/research/cvt_text/task_specific/word_level/__init__.py b/models/research/cvt_text/task_specific/word_level/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/task_specific/word_level/depparse_module.py b/models/research/cvt_text/task_specific/word_level/depparse_module.py new file mode 100644 index 0000000000000000000000000000000000000000..b1207a9815219ca3f53e2a958bf8df8656c009fa --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/depparse_module.py @@ -0,0 +1,126 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Dependency parsing module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from corpus_processing import minibatching +from model import model_helpers +from model import task_module + + +class DepparseModule(task_module.SemiSupervisedModule): + def __init__(self, config, task_name, n_classes, inputs, encoder): + super(DepparseModule, self).__init__() + + self.task_name = task_name + self.n_classes = n_classes + self.labels = labels = tf.placeholder(tf.float32, [None, None, None], + name=task_name + '_labels') + + class PredictionModule(object): + def __init__(self, name, dep_reprs, head_reprs, roll_direction=0): + self.name = name + with tf.variable_scope(name + '/predictions'): + # apply hidden layers to the input representations + arc_dep_hidden = model_helpers.project( + dep_reprs, config.projection_size, 'arc_dep_hidden') + arc_head_hidden = model_helpers.project( + head_reprs, config.projection_size, 'arc_head_hidden') + arc_dep_hidden = tf.nn.relu(arc_dep_hidden) + arc_head_hidden = tf.nn.relu(arc_head_hidden) + arc_head_hidden = tf.nn.dropout(arc_head_hidden, inputs.keep_prob) + arc_dep_hidden = tf.nn.dropout(arc_dep_hidden, inputs.keep_prob) + + # bilinear classifier excluding the final dot product + arc_head = tf.layers.dense( + arc_head_hidden, config.depparse_projection_size, name='arc_head') + W = tf.get_variable('shared_W', + shape=[config.projection_size, n_classes, + config.depparse_projection_size]) + Wr = tf.get_variable('relation_specific_W', + shape=[config.projection_size, + config.depparse_projection_size]) + Wr_proj = tf.tile(tf.expand_dims(Wr, axis=-2), [1, n_classes, 1]) + W += Wr_proj + arc_dep = tf.tensordot(arc_dep_hidden, W, axes=[[-1], [0]]) + shape = tf.shape(arc_dep) + arc_dep = tf.reshape(arc_dep, + [shape[0], -1, config.depparse_projection_size]) + + # apply the transformer scaling trick to prevent dot products from + # getting too large (possibly not necessary) + scale = np.power( + config.depparse_projection_size, 0.25).astype('float32') + scale = tf.get_variable('scale', initializer=scale, dtype=tf.float32) + arc_dep /= scale + arc_head /= scale + + # compute the scores for each candidate arc + word_scores = tf.matmul(arc_head, arc_dep, transpose_b=True) + root_scores = tf.layers.dense(arc_head, n_classes, name='root_score') + arc_scores = tf.concat([root_scores, word_scores], axis=-1) + + # disallow the model from making impossible predictions + mask = inputs.mask + mask_shape = tf.shape(mask) + mask = tf.tile(tf.expand_dims(mask, -1), [1, 1, n_classes]) + mask = tf.reshape(mask, [-1, mask_shape[1] * n_classes]) + mask = tf.concat([tf.ones((mask_shape[0], 1)), + tf.zeros((mask_shape[0], n_classes - 1)), mask], + axis=1) + mask = tf.tile(tf.expand_dims(mask, 1), [1, mask_shape[1], 1]) + arc_scores += (mask - 1) * 100.0 + + self.logits = arc_scores + self.loss = model_helpers.masked_ce_loss( + self.logits, labels, inputs.mask, + roll_direction=roll_direction) + + primary = PredictionModule( + 'primary', + [encoder.uni_reprs, encoder.bi_reprs], + [encoder.uni_reprs, encoder.bi_reprs]) + ps = [ + PredictionModule( + 'full', + [encoder.uni_reprs, encoder.bi_reprs], + [encoder.uni_reprs, encoder.bi_reprs]), + PredictionModule('fw_fw', [encoder.uni_fw], [encoder.uni_fw]), + PredictionModule('fw_bw', [encoder.uni_fw], [encoder.uni_bw]), + PredictionModule('bw_fw', [encoder.uni_bw], [encoder.uni_fw]), + PredictionModule('bw_bw', [encoder.uni_bw], [encoder.uni_bw]), + ] + + self.unsupervised_loss = sum(p.loss for p in ps) + self.supervised_loss = primary.loss + self.probs = tf.nn.softmax(primary.logits) + self.preds = tf.argmax(primary.logits, axis=-1) + + def update_feed_dict(self, feed, mb): + if self.task_name in mb.teacher_predictions: + feed[self.labels] = mb.teacher_predictions[self.task_name] + elif mb.task_name != 'unlabeled': + labels = minibatching.build_array( + [[0] + e.labels + [0] for e in mb.examples]) + feed[self.labels] = np.eye( + (1 + mb.words.shape[1]) * self.n_classes)[labels] + diff --git a/models/research/cvt_text/task_specific/word_level/depparse_scorer.py b/models/research/cvt_text/task_specific/word_level/depparse_scorer.py new file mode 100644 index 0000000000000000000000000000000000000000..142cf79f9b3c9a7c172c1ea642683fa69b46dd4c --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/depparse_scorer.py @@ -0,0 +1,45 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Dependency parsing evaluation (computes UAS/LAS).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from task_specific.word_level import word_level_scorer + + +class DepparseScorer(word_level_scorer.WordLevelScorer): + def __init__(self, n_relations, punctuation): + super(DepparseScorer, self).__init__() + self._n_relations = n_relations + self._punctuation = punctuation if punctuation else None + + def _get_results(self): + correct_unlabeled, correct_labeled, count = 0, 0, 0 + for example, preds in zip(self._examples, self._preds): + for w, y_true, y_pred in zip(example.words[1:-1], example.labels, preds): + if w in self._punctuation: + continue + count += 1 + correct_labeled += (1 if y_pred == y_true else 0) + correct_unlabeled += (1 if int(y_pred // self._n_relations) == + int(y_true // self._n_relations) else 0) + return [ + ("las", 100.0 * correct_labeled / count), + ("uas", 100.0 * correct_unlabeled / count), + ("loss", self.get_loss()), + ] diff --git a/models/research/cvt_text/task_specific/word_level/tagging_module.py b/models/research/cvt_text/task_specific/word_level/tagging_module.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d85f333dbab5318f87cf39ec256e3f6c5832f6 --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/tagging_module.py @@ -0,0 +1,76 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Sequence tagging module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from corpus_processing import minibatching +from model import model_helpers +from model import task_module + + +class TaggingModule(task_module.SemiSupervisedModule): + def __init__(self, config, task_name, n_classes, inputs, + encoder): + super(TaggingModule, self).__init__() + self.task_name = task_name + self.n_classes = n_classes + self.labels = labels = tf.placeholder(tf.float32, [None, None, None], + name=task_name + '_labels') + + class PredictionModule(object): + def __init__(self, name, input_reprs, roll_direction=0, activate=True): + self.name = name + with tf.variable_scope(name + '/predictions'): + projected = model_helpers.project(input_reprs, config.projection_size) + if activate: + projected = tf.nn.relu(projected) + self.logits = tf.layers.dense(projected, n_classes, name='predict') + + targets = labels + targets *= (1 - inputs.label_smoothing) + targets += inputs.label_smoothing / n_classes + self.loss = model_helpers.masked_ce_loss( + self.logits, targets, inputs.mask, roll_direction=roll_direction) + + primary = PredictionModule('primary', + ([encoder.uni_reprs, encoder.bi_reprs])) + ps = [ + PredictionModule('full', ([encoder.uni_reprs, encoder.bi_reprs]), + activate=False), + PredictionModule('forwards', [encoder.uni_fw]), + PredictionModule('backwards', [encoder.uni_bw]), + PredictionModule('future', [encoder.uni_fw], roll_direction=1), + PredictionModule('past', [encoder.uni_bw], roll_direction=-1), + ] + + self.unsupervised_loss = sum(p.loss for p in ps) + self.supervised_loss = primary.loss + self.probs = tf.nn.softmax(primary.logits) + self.preds = tf.argmax(primary.logits, axis=-1) + + def update_feed_dict(self, feed, mb): + if self.task_name in mb.teacher_predictions: + feed[self.labels] = mb.teacher_predictions[self.task_name] + elif mb.task_name != 'unlabeled': + labels = minibatching.build_array( + [[0] + e.labels + [0] for e in mb.examples]) + feed[self.labels] = np.eye(self.n_classes)[labels] diff --git a/models/research/cvt_text/task_specific/word_level/tagging_scorers.py b/models/research/cvt_text/task_specific/word_level/tagging_scorers.py new file mode 100644 index 0000000000000000000000000000000000000000..ee8a7c74f8181618a9c81389d8b24bc12c70b32f --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/tagging_scorers.py @@ -0,0 +1,83 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Sequence tagging evaluation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + +from task_specific.word_level import tagging_utils +from task_specific.word_level import word_level_scorer + + +class AccuracyScorer(word_level_scorer.WordLevelScorer): + def __init__(self, auto_fail_label=None): + super(AccuracyScorer, self).__init__() + self._auto_fail_label = auto_fail_label + + def _get_results(self): + correct, count = 0, 0 + for example, preds in zip(self._examples, self._preds): + for y_true, y_pred in zip(example.labels, preds): + count += 1 + correct += (1 if y_pred == y_true and y_true != self._auto_fail_label + else 0) + return [ + ("accuracy", 100.0 * correct / count), + ("loss", self.get_loss()) + ] + + +class F1Scorer(word_level_scorer.WordLevelScorer): + __metaclass__ = abc.ABCMeta + + def __init__(self): + super(F1Scorer, self).__init__() + self._n_correct, self._n_predicted, self._n_gold = 0, 0, 0 + + def _get_results(self): + if self._n_correct == 0: + p, r, f1 = 0, 0, 0 + else: + p = 100.0 * self._n_correct / self._n_predicted + r = 100.0 * self._n_correct / self._n_gold + f1 = 2 * p * r / (p + r) + return [ + ("precision", p), + ("recall", r), + ("f1", f1), + ("loss", self.get_loss()), + ] + + +class EntityLevelF1Scorer(F1Scorer): + def __init__(self, label_mapping): + super(EntityLevelF1Scorer, self).__init__() + self._inv_label_mapping = {v: k for k, v in label_mapping.iteritems()} + + def _get_results(self): + self._n_correct, self._n_predicted, self._n_gold = 0, 0, 0 + for example, preds in zip(self._examples, self._preds): + sent_spans = set(tagging_utils.get_span_labels( + example.labels, self._inv_label_mapping)) + span_preds = set(tagging_utils.get_span_labels( + preds, self._inv_label_mapping)) + self._n_correct += len(sent_spans & span_preds) + self._n_gold += len(sent_spans) + self._n_predicted += len(span_preds) + return super(EntityLevelF1Scorer, self)._get_results() diff --git a/models/research/cvt_text/task_specific/word_level/tagging_utils.py b/models/research/cvt_text/task_specific/word_level/tagging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b300e492592f185c0ea169a4c7149e86669232f8 --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/tagging_utils.py @@ -0,0 +1,59 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for sequence tagging tasks for entity-level tasks (e.g., NER).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def get_span_labels(sentence_tags, inv_label_mapping=None): + """Go from token-level labels to list of entities (start, end, class).""" + + if inv_label_mapping: + sentence_tags = [inv_label_mapping[i] for i in sentence_tags] + span_labels = [] + last = 'O' + start = -1 + for i, tag in enumerate(sentence_tags): + pos, _ = (None, 'O') if tag == 'O' else tag.split('-') + if (pos == 'S' or pos == 'B' or tag == 'O') and last != 'O': + span_labels.append((start, i - 1, last.split('-')[-1])) + if pos == 'B' or pos == 'S' or last == 'O': + start = i + last = tag + if sentence_tags[-1] != 'O': + span_labels.append((start, len(sentence_tags) - 1, + sentence_tags[-1].split('-')[-1])) + return span_labels + + +def get_tags(span_labels, length, encoding): + """Converts a list of entities to token-label labels based on the provided + encoding (e.g., BIOES). + """ + + tags = ['O' for _ in range(length)] + for s, e, t in span_labels: + for i in range(s, e + 1): + tags[i] = 'I-' + t + if 'E' in encoding: + tags[e] = 'E-' + t + if 'B' in encoding: + tags[s] = 'B-' + t + if 'S' in encoding and s == e: + tags[s] = 'S-' + t + return tags diff --git a/models/research/cvt_text/task_specific/word_level/word_level_data.py b/models/research/cvt_text/task_specific/word_level/word_level_data.py new file mode 100644 index 0000000000000000000000000000000000000000..40fa27b188c26099dfed48da19a2925f0d13fd93 --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/word_level_data.py @@ -0,0 +1,161 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for processing word-level datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import random +import tensorflow as tf + +from base import embeddings +from base import utils +from corpus_processing import example +from corpus_processing import minibatching +from task_specific.word_level import tagging_utils + + +class TaggedDataLoader(object): + def __init__(self, config, name, is_token_level): + self._config = config + self._task_name = name + self._raw_data_path = os.path.join(config.raw_data_topdir, name) + self._is_token_level = is_token_level + self.label_mapping_path = os.path.join( + config.preprocessed_data_topdir, + (name if is_token_level else + name + '_' + config.label_encoding) + '_label_mapping.pkl') + + if self.label_mapping: + self._n_classes = len(set(self.label_mapping.values())) + else: + self._n_classes = None + + def get_dataset(self, split): + if (split == 'train' and not self._config.for_preprocessing and + tf.gfile.Exists(os.path.join(self._raw_data_path, 'train_subset.txt'))): + split = 'train_subset' + return minibatching.Dataset( + self._config, self._get_examples(split), self._task_name) + + def get_labeled_sentences(self, split): + sentences = [] + path = os.path.join(self._raw_data_path, split + '.txt') + if not tf.gfile.Exists(path): + if self._config.for_preprocessing: + return [] + else: + raise ValueError('Unable to load data from', path) + + with tf.gfile.GFile(path, 'r') as f: + sentence = [] + for line in f: + line = line.strip().split() + if not line: + if sentence: + words, tags = zip(*sentence) + sentences.append((words, tags)) + sentence = [] + continue + if line[0] == '-DOCSTART-': + continue + word, tag = line[0], line[-1] + sentence.append((word, tag)) + return sentences + + @property + def label_mapping(self): + if not self._config.for_preprocessing: + return utils.load_cpickle(self.label_mapping_path) + + tag_counts = collections.Counter() + train_tags = set() + for split in ['train', 'dev', 'test']: + for words, tags in self.get_labeled_sentences(split): + if not self._is_token_level: + span_labels = tagging_utils.get_span_labels(tags) + tags = tagging_utils.get_tags( + span_labels, len(words), self._config.label_encoding) + for tag in tags: + if self._task_name == 'depparse': + tag = tag.split('-')[1] + tag_counts[tag] += 1 + if split == 'train': + train_tags.add(tag) + if self._task_name == 'ccg': + # for CCG, there are tags in the test sets that aren't in the train set + # all tags not in the train set get mapped to a special label + # the model will never predict this label because it never sees it in the + # training set + not_in_train_tags = [] + for tag, count in tag_counts.items(): + if tag not in train_tags: + not_in_train_tags.append(tag) + label_mapping = { + label: i for i, label in enumerate(sorted(filter( + lambda t: t not in not_in_train_tags, tag_counts.keys()))) + } + n = len(label_mapping) + for tag in not_in_train_tags: + label_mapping[tag] = n + else: + labels = sorted(tag_counts.keys()) + if self._task_name == 'depparse': + labels.remove('root') + labels.insert(0, 'root') + label_mapping = {label: i for i, label in enumerate(labels)} + return label_mapping + + def _get_examples(self, split): + word_vocab = embeddings.get_word_vocab(self._config) + char_vocab = embeddings.get_char_vocab() + examples = [ + TaggingExample( + self._config, self._is_token_level, words, tags, + word_vocab, char_vocab, self.label_mapping, self._task_name) + for words, tags in self.get_labeled_sentences(split)] + if self._config.train_set_percent < 100: + utils.log('using reduced train set ({:}%)'.format( + self._config.train_set_percent)) + random.shuffle(examples) + examples = examples[:int(len(examples) * + self._config.train_set_percent / 100.0)] + return examples + + +class TaggingExample(example.Example): + def __init__(self, config, is_token_level, words, original_tags, + word_vocab, char_vocab, label_mapping, task_name): + super(TaggingExample, self).__init__(words, word_vocab, char_vocab) + if is_token_level: + labels = original_tags + else: + span_labels = tagging_utils.get_span_labels(original_tags) + labels = tagging_utils.get_tags( + span_labels, len(words), config.label_encoding) + + if task_name == 'depparse': + self.labels = [] + for l in labels: + split = l.split('-') + self.labels.append( + len(label_mapping) * (0 if split[0] == '0' else 1 + int(split[0])) + + label_mapping[split[1]]) + else: + self.labels = [label_mapping[l] for l in labels] diff --git a/models/research/cvt_text/task_specific/word_level/word_level_scorer.py b/models/research/cvt_text/task_specific/word_level/word_level_scorer.py new file mode 100644 index 0000000000000000000000000000000000000000..e29848d9ca1043fbba86a5bdf98b83269ecdb528 --- /dev/null +++ b/models/research/cvt_text/task_specific/word_level/word_level_scorer.py @@ -0,0 +1,48 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base class for word-level scorers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + +from corpus_processing import scorer + + +class WordLevelScorer(scorer.Scorer): + __metaclass__ = abc.ABCMeta + + def __init__(self): + super(WordLevelScorer, self).__init__() + self._total_loss = 0 + self._total_words = 0 + self._examples = [] + self._preds = [] + + def update(self, examples, predictions, loss): + super(WordLevelScorer, self).update(examples, predictions, loss) + n_words = 0 + for example, preds in zip(examples, predictions): + self._examples.append(example) + self._preds.append(list(preds)[1:len(example.words) - 1]) + n_words += len(example.words) - 2 + self._total_loss += loss * n_words + self._total_words += n_words + + def get_loss(self): + return self._total_loss / max(1, self._total_words) diff --git a/models/research/cvt_text/training/__init__.py b/models/research/cvt_text/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/cvt_text/training/trainer.py b/models/research/cvt_text/training/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..23dc4dad2c1f577ca87f264a640562e092a3ec96 --- /dev/null +++ b/models/research/cvt_text/training/trainer.py @@ -0,0 +1,139 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Runs training for CVT text models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import bisect +import time +import numpy as np +import tensorflow as tf + +from base import utils +from model import multitask_model +from task_specific import task_definitions + + +class Trainer(object): + def __init__(self, config): + self._config = config + self.tasks = [task_definitions.get_task(self._config, task_name) + for task_name in self._config.task_names] + + utils.log('Loading Pretrained Embeddings') + pretrained_embeddings = utils.load_cpickle(self._config.word_embeddings) + + utils.log('Building Model') + self._model = multitask_model.Model( + self._config, pretrained_embeddings, self.tasks) + utils.log() + + def train(self, sess, progress, summary_writer): + heading = lambda s: utils.heading(s, '(' + self._config.model_name + ')') + trained_on_sentences = 0 + start_time = time.time() + unsupervised_loss_total, unsupervised_loss_count = 0, 0 + supervised_loss_total, supervised_loss_count = 0, 0 + for mb in self._get_training_mbs(progress.unlabeled_data_reader): + if mb.task_name != 'unlabeled': + loss = self._model.train_labeled(sess, mb) + supervised_loss_total += loss + supervised_loss_count += 1 + + if mb.task_name == 'unlabeled': + self._model.run_teacher(sess, mb) + loss = self._model.train_unlabeled(sess, mb) + unsupervised_loss_total += loss + unsupervised_loss_count += 1 + mb.teacher_predictions.clear() + + trained_on_sentences += mb.size + global_step = self._model.get_global_step(sess) + + if global_step % self._config.print_every == 0: + utils.log('step {:} - ' + 'supervised loss: {:.2f} - ' + 'unsupervised loss: {:.2f} - ' + '{:.1f} sentences per second'.format( + global_step, + supervised_loss_total / max(1, supervised_loss_count), + unsupervised_loss_total / max(1, unsupervised_loss_count), + trained_on_sentences / (time.time() - start_time))) + unsupervised_loss_total, unsupervised_loss_count = 0, 0 + supervised_loss_total, supervised_loss_count = 0, 0 + + if global_step % self._config.eval_dev_every == 0: + heading('EVAL ON DEV') + self.evaluate_all_tasks(sess, summary_writer, progress.history) + progress.save_if_best_dev_model(sess, global_step) + utils.log() + + if global_step % self._config.eval_train_every == 0: + heading('EVAL ON TRAIN') + self.evaluate_all_tasks(sess, summary_writer, progress.history, True) + utils.log() + + if global_step % self._config.save_model_every == 0: + heading('CHECKPOINTING MODEL') + progress.write(sess, global_step) + utils.log() + + def evaluate_all_tasks(self, sess, summary_writer, history, train_set=False): + for task in self.tasks: + results = self._evaluate_task(sess, task, summary_writer, train_set) + if history is not None: + results.append(('step', self._model.get_global_step(sess))) + history.append(results) + if history is not None: + utils.write_cpickle(history, self._config.history_file) + + def _evaluate_task(self, sess, task, summary_writer, train_set): + scorer = task.get_scorer() + data = task.train_set if train_set else task.val_set + for i, mb in enumerate(data.get_minibatches(self._config.test_batch_size)): + loss, batch_preds = self._model.test(sess, mb) + scorer.update(mb.examples, batch_preds, loss) + + results = scorer.get_results(task.name + + ('_train_' if train_set else '_dev_')) + utils.log(task.name.upper() + ': ' + scorer.results_str()) + write_summary(summary_writer, results, + global_step=self._model.get_global_step(sess)) + return results + + def _get_training_mbs(self, unlabeled_data_reader): + datasets = [task.train_set for task in self.tasks] + weights = [np.sqrt(dataset.size) for dataset in datasets] + thresholds = np.cumsum([w / np.sum(weights) for w in weights]) + + labeled_mbs = [dataset.endless_minibatches(self._config.train_batch_size) + for dataset in datasets] + unlabeled_mbs = unlabeled_data_reader.endless_minibatches() + while True: + dataset_ind = bisect.bisect(thresholds, np.random.random()) + yield next(labeled_mbs[dataset_ind]) + if self._config.is_semisup: + yield next(unlabeled_mbs) + + +def write_summary(writer, results, global_step): + for k, v in results: + if 'f1' in k or 'acc' in k or 'loss' in k: + writer.add_summary(tf.Summary( + value=[tf.Summary.Value(tag=k, simple_value=v)]), global_step) + writer.flush() diff --git a/models/research/cvt_text/training/training_progress.py b/models/research/cvt_text/training/training_progress.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ec96d5df8308e0d68302b529f348fa24ba0f22 --- /dev/null +++ b/models/research/cvt_text/training/training_progress.py @@ -0,0 +1,79 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +""" +Tracks and saves training progress (models and other data such as the current +location in the lm1b corpus) for later reloading. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from base import utils +from corpus_processing import unlabeled_data + + +class TrainingProgress(object): + def __init__(self, config, sess, checkpoint_saver, best_model_saver, + restore_if_possible=True): + self.config = config + self.checkpoint_saver = checkpoint_saver + self.best_model_saver = best_model_saver + + tf.gfile.MakeDirs(config.checkpoints_dir) + if restore_if_possible and tf.gfile.Exists(config.progress): + history, current_file, current_line = utils.load_cpickle( + config.progress, memoized=False) + self.history = history + self.unlabeled_data_reader = unlabeled_data.UnlabeledDataReader( + config, current_file, current_line) + utils.log("Continuing from global step", dict(self.history[-1])["step"], + "(lm1b file {:}, line {:})".format(current_file, current_line)) + self.checkpoint_saver.restore(sess, tf.train.latest_checkpoint( + self.config.checkpoints_dir)) + else: + utils.log("No previous checkpoint found - starting from scratch") + self.history = [] + self.unlabeled_data_reader = ( + unlabeled_data.UnlabeledDataReader(config)) + + def write(self, sess, global_step): + self.checkpoint_saver.save(sess, self.config.checkpoint, + global_step=global_step) + utils.write_cpickle( + (self.history, self.unlabeled_data_reader.current_file, + self.unlabeled_data_reader.current_line), + self.config.progress) + + def save_if_best_dev_model(self, sess, global_step): + best_avg_score = 0 + for i, results in enumerate(self.history): + if any("train" in metric for metric, value in results): + continue + total, count = 0, 0 + for metric, value in results: + if "f1" in metric or "las" in metric or "accuracy" in metric: + total += value + count += 1 + avg_score = total / count + if avg_score >= best_avg_score: + best_avg_score = avg_score + if i == len(self.history) - 1: + utils.log("New best model! Saving...") + self.best_model_saver.save(sess, self.config.best_model_checkpoint, + global_step=global_step) diff --git a/models/research/deep_contextual_bandits/README.md b/models/research/deep_contextual_bandits/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b81309af5b08003eb727e079e70c3dd08eedb6f6 --- /dev/null +++ b/models/research/deep_contextual_bandits/README.md @@ -0,0 +1,444 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Deep Bayesian Bandits Library + +This library corresponds to the *[Deep Bayesian Bandits Showdown: An Empirical +Comparison of Bayesian Deep Networks for Thompson +Sampling](https://arxiv.org/abs/1802.09127)* paper, published in +[ICLR](https://iclr.cc/) 2018. We provide a benchmark to test decision-making +algorithms for contextual-bandits. In particular, the current library implements +a variety of algorithms (many of them based on approximate Bayesian Neural +Networks and Thompson sampling), and a number of real and syntethic data +problems exhibiting a diverse set of properties. + +It is a Python library that uses [TensorFlow](https://www.tensorflow.org/). + +We encourage contributors to add new approximate Bayesian Neural Networks or, +more generally, contextual bandits algorithms to the library. Also, we would +like to extend the data sources over time, so we warmly encourage contributions +in this front too! + +Please, use the following when citing the code or the paper: + +``` +@article{riquelme2018deep, title={Deep Bayesian Bandits Showdown: An Empirical +Comparison of Bayesian Deep Networks for Thompson Sampling}, +author={Riquelme, Carlos and Tucker, George and Snoek, Jasper}, +journal={International Conference on Learning Representations, ICLR.}, year={2018}} +``` + +**Contact**. This repository is maintained by [Carlos Riquelme](http://rikel.me) ([rikel](https://github.com/rikel)). Feel free to reach out directly at [rikel@google.com](mailto:rikel@google.com) with any questions or comments. + + +We first briefly introduce contextual bandits, Thompson sampling, enumerate the +implemented algorithms, and the available data sources. Then, we provide a +simple complete example illustrating how to use the library. + +## Contextual Bandits + +Contextual bandits are a rich decision-making framework where an algorithm has +to choose among a set of *k* actions at every time step *t*, after observing +a context (or side-information) denoted by *Xt*. The general pseudocode for +the process if we use algorithm **A** is as follows: + +``` +At time t = 1, ..., T: + 1. Observe new context: X_t + 2. Choose action: a_t = A.action(X_t) + 3. Observe reward: r_t + 4. Update internal state of the algorithm: A.update((X_t, a_t, r_t)) +``` + +The goal is to maximize the total sum of rewards: ∑t rt + +For example, each *Xt* could encode the properties of a specific user (and +the time or day), and we may have to choose an ad, discount coupon, treatment, +hyper-parameters, or version of a website to show or provide to the user. +Hopefully, over time, we will learn how to match each type of user to the most +beneficial personalized action under some metric (the reward). + +## Thompson Sampling + +Thompson Sampling is a meta-algorithm that chooses an action for the contextual +bandit in a statistically efficient manner, simultaneously finding the best arm +while attempting to incur low cost. Informally speaking, we assume the expected +reward is given by some function +**E**[rt | Xt, at] = f(Xt, at). +Unfortunately, function **f** is unknown, as otherwise we could just choose the +action with highest expected value: +at* = arg maxi f(Xt, at). + +The idea behind Thompson Sampling is based on keeping a posterior distribution +πt over functions in some family f ∈ F after observing the first +*t-1* datapoints. Then, at time *t*, we sample one potential explanation of +the underlying process: ft ∼ πt, and act optimally (i.e., greedily) +*according to ft*. In other words, we choose +at = arg maxi ft(Xt, ai). +Finally, we update our posterior distribution with the new collected +datapoint (Xt, at, rt). + +The main issue is that keeping an updated posterior πt (or, even, +sampling from it) is often intractable for highly parameterized models like deep +neural networks. The algorithms we list in the next section provide tractable +*approximations* that can be used in combination with Thompson Sampling to solve +the contextual bandit problem. + +## Algorithms + +The Deep Bayesian Bandits library includes the following algorithms (see the +[paper](https://arxiv.org/abs/1802.09127) for further details): + +1. **Linear Algorithms**. As a powerful baseline, we provide linear algorithms. + In particular, we focus on the exact Bayesian linear regression + implementation, while it is easy to derive the greedy OLS version (possibly, + with epsilon-greedy exploration). The algorithm is implemented in + *linear_full_posterior_sampling.py*, and it is instantiated as follows: + + ``` + linear_full = LinearFullPosteriorSampling('MyLinearTS', my_hparams) + ``` + +2. **Neural Linear**. We introduce an algorithm we call Neural Linear, which + operates by learning a neural network to map contexts to rewards for each + action, and ---simultaneously--- it updates a Bayesian linear regression in + the last layer (i.e., the one that maps the final representation **z** to + the rewards **r**). Thompson Sampling samples the linear parameters + βi for each action *i*, but keeps the network that computes the + representation. Then, both parts (network and Bayesian linear regression) + are updated, possibly at different frequencies. The algorithm is implemented + in *neural_linear_sampling.py*, and we create an algorithm instance like + this: + + ``` + neural_linear = NeuralLinearPosteriorSampling('MyNLinear', my_hparams) + ``` + +3. **Neural Greedy**. Another standard benchmark is to train a neural network + that maps contexts to rewards, and at each time *t* just acts greedily + according to the current model. In particular, this approach does *not* + explicitly use Thompson Sampling. However, due to stochastic gradient + descent, there is still some randomness in its output. It is + straight-forward to add epsilon-greedy exploration to choose random + actions with probability ε ∈ (0, 1). The algorithm is + implemented in *neural_bandit_model.py*, and it is used together with + *PosteriorBNNSampling* (defined in *posterior_bnn_sampling.py*) by calling: + + ``` + neural_greedy = PosteriorBNNSampling('MyNGreedy', my_hparams, 'RMSProp') + ``` + +4. **Stochastic Variational Inference**, Bayes by Backpropagation. We implement + a Bayesian neural network by modeling each individual weight posterior as a + univariate Gaussian distribution: wij ∼ N(μij, σij2). + Thompson sampling then samples a network at each time step + by sampling each weight independently. The variational approach consists in + maximizing a proxy for maximum likelihood of the observed data, the ELBO or + variational lower bound, to fit the values of μij, σij2 + for every *i, j*. + + See [Weight Uncertainty in Neural + Networks](https://arxiv.org/abs/1505.05424). + + The BNN algorithm is implemented in *variational_neural_bandit_model.py*, + and it is used together with *PosteriorBNNSampling* (defined in + *posterior_bnn_sampling.py*) by calling: + + ``` + bbb = PosteriorBNNSampling('myBBB', my_hparams, 'Variational') + ``` + +5. **Expectation-Propagation**, Black-box alpha-divergence minimization. + The family of expectation-propagation algorithms is based on the message + passing framework . They iteratively approximate the posterior by updating a + single approximation factor (or site) at a time, which usually corresponds + to the likelihood of one data point. We focus on methods that directly + optimize the global EP objective via stochastic gradient descent, as, for + instance, Power EP. For further details see original paper below. + + See [Black-box alpha-divergence + Minimization](https://arxiv.org/abs/1511.03243). + + We create an instance of the algorithm like this: + + ``` + bb_adiv = PosteriorBNNSampling('MyEP', my_hparams, 'AlphaDiv') + ``` + +6. **Dropout**. Dropout is a training technique where the output of each neuron + is independently zeroed out with probability *p* at each forward pass. + Once the network has been trained, dropout can still be used to obtain a + distribution of predictions for a specific input. Following the best action + with respect to the random dropout prediction can be interpreted as an + implicit form of Thompson sampling. The code for dropout is the same as for + Neural Greedy (see above), but we need to set two hyper-parameters: + *use_dropout=True* and *keep_prob=p* where *p* takes the desired value in + (0, 1). Then: + + ``` + dropout = PosteriorBNNSampling('MyDropout', my_hparams, 'RMSProp') + ``` + +7. **Monte Carlo Methods**. To be added soon. + +8. **Bootstrapped Networks**. This algorithm trains simultaneously and in + parallel **q** neural networks based on different datasets D1, ..., Dq. The way those datasets are collected is by adding each new collected + datapoint (Xt, at, rt) to each dataset *Di* independently and with + probability p ∈ (0, 1]. Therefore, the main hyperparameters of the + algorithm are **(q, p)**. In order to choose an action for a new context, + one of the **q** networks is first selected with uniform probability (i.e., + *1/q*). Then, the best action according to the *selected* network is + played. + + See [Deep Exploration via Bootstrapped + DQN](https://arxiv.org/abs/1602.04621). + + The algorithm is implemented in *bootstrapped_bnn_sampling.py*, and we + instantiate it as (where *my_hparams* contains both **q** and **p**): + + ``` + bootstrap = BootstrappedBNNSampling('MyBoot', my_hparams) + ``` + +9. **Parameter-Noise**. Another approach to approximate a distribution over + neural networks (or more generally, models) that map contexts to rewards, + consists in randomly perturbing a point estimate trained by Stochastic + Gradient Descent on the data. The Parameter-Noise algorithm uses a heuristic + to control the amount of noise σt2 it adds independently to the + parameters representing a neural network: θt' = θt + ε where + ε ∼ N(0, σt2 Id). + After using θt' for decision making, the following SGD + training steps start again from θt. The key hyperparameters to set + are those controlling the noise heuristic. + + See [Parameter Space Noise for + Exploration](https://arxiv.org/abs/1706.01905). + + The algorithm is implemented in *parameter_noise_sampling.py*, and we create + an instance by calling: + + ``` + parameter_noise = ParameterNoiseSampling('MyParamNoise', my_hparams) + ``` + +10. **Gaussian Processes**. Another standard benchmark are Gaussian Processes, + see *Gaussian Processes for Machine Learning* by Rasmussen and Williams for + an introduction. To model the expected reward of different actions, we fit a + multitask GP. + + See [Multi-task Gaussian Process + Prediction](http://papers.nips.cc/paper/3189-multi-task-gaussian-process-prediction.pdf). + + Our implementation is provided in *multitask_gp.py*, and it is instantiated + as follows: + + ``` + gp = PosteriorBNNSampling('MyMultitaskGP', my_hparams, 'GP') + ``` + +In the code snippet at the bottom, we show how to instantiate some of these +algorithms, and how to run the contextual bandit simulator, and display the +high-level results. + +## Data + +In the paper we use two types of contextual datasets: synthetic and based on +real-world data. + +We provide functions that sample problems from those datasets. In the case of +real-world data, you first need to download the raw datasets, and pass the route +to the functions. Links for the datasets are provided below. + +### Synthetic Datasets + +Synthetic datasets are contained in the *synthetic_data_sampler.py* file. In +particular, it includes: + +1. **Linear data**. Provides a number of linear arms, and Gaussian contexts. + +2. **Sparse linear data**. Provides a number of sparse linear arms, and + Gaussian contexts. + +3. **Wheel bandit data**. Provides sampled data from the wheel bandit data, see + [Section 5.4](https://arxiv.org/abs/1802.09127) in the paper. + +### Real-World Datasets + +Real-world data generating functions are contained in the *data_sampler.py* +file. + +In particular, it includes: + +1. **Mushroom data**. Each incoming context represents a different type of + mushroom, and the actions are eat or no-eat. Eating an edible mushroom + provides positive reward, while eating a poisonous one provides positive + reward with probability *p*, and a large negative reward with probability + *1-p*. All the rewards, and the value of *p* are customizable. The + [dataset](https://archive.ics.uci.edu/ml/datasets/mushroom) is part of the + UCI repository, and the bandit problem was proposed in Blundell et al. + (2015). Data is available [here](https://storage.googleapis.com/bandits_datasets/mushroom.data) + or alternatively [here](https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/), + use the *agaricus-lepiota.data* file. + +2. **Stock data**. We created the Financial Dataset by pulling the stock prices + of *d = 21* publicly traded companies in NYSE and Nasdaq, for the last 14 + years (*n = 3713*). For each day, the context was the price difference + between the beginning and end of the session for each stock. We + synthetically created the arms to be a linear combination of the contexts, + representing *k = 8* different potential portfolios. Data is available + [here](https://storage.googleapis.com/bandits_datasets/raw_stock_contexts). + +3. **Jester data**. We create a recommendation system bandit problem as + follows. The Jester Dataset (Goldberg et al., 2001) provides continuous + ratings in *[-10, 10]* for 100 jokes from a total of 73421 users. We find + a *complete* subset of *n = 19181* users rating all 40 jokes. Following + Riquelme et al. (2017), we take *d = 32* of the ratings as the context of + the user, and *k = 8* as the arms. The agent recommends one joke, and + obtains the reward corresponding to the rating of the user for the selected + joke. Data is available [here](https://storage.googleapis.com/bandits_datasets/jester_data_40jokes_19181users.npy). + +4. **Statlog data**. The Shuttle Statlog Dataset (Asuncion & Newman, 2007) + provides the value of *d = 9* indicators during a space shuttle flight, + and the goal is to predict the state of the radiator subsystem of the + shuttle. There are *k = 7* possible states, and if the agent selects the + right state, then reward 1 is generated. Otherwise, the agent obtains no + reward (*r = 0*). The most interesting aspect of the dataset is that one + action is the optimal one in 80% of the cases, and some algorithms may + commit to this action instead of further exploring. In this case, the number + of contexts is *n = 43500*. Data is available [here](https://storage.googleapis.com/bandits_datasets/shuttle.trn) or alternatively + [here](https://archive.ics.uci.edu/ml/datasets/Statlog+\(Shuttle\)), use + *shuttle.trn* file. + +5. **Adult data**. The Adult Dataset (Kohavi, 1996; Asuncion & Newman, 2007) + comprises personal information from the US Census Bureau database, and the + standard prediction task is to determine if a person makes over 50K a year + or not. However, we consider the *k = 14* different occupations as + feasible actions, based on *d = 94* covariates (many of them binarized). + As in previous datasets, the agent obtains a reward of 1 for making the + right prediction, and 0 otherwise. The total number of contexts is *n = + 45222*. Data is available [here](https://storage.googleapis.com/bandits_datasets/adult.full) or alternatively + [here](https://archive.ics.uci.edu/ml/datasets/adult), use *adult.data* + file. + +6. **Census data**. The US Census (1990) Dataset (Asuncion & Newman, 2007) + contains a number of personal features (age, native language, education...) + which we summarize in *d = 389* covariates, including binary dummy + variables for categorical features. Our goal again is to predict the + occupation of the individual among *k = 9* classes. The agent obtains + reward 1 for making the right prediction, and 0 otherwise. Data is available + [here](https://storage.googleapis.com/bandits_datasets/USCensus1990.data.txt) or alternatively [here](https://archive.ics.uci.edu/ml/datasets/US+Census+Data+\(1990\)), use + *USCensus1990.data.txt* file. + +7. **Covertype data**. The Covertype Dataset (Asuncion & Newman, 2007) + classifies the cover type of northern Colorado forest areas in *k = 7* + classes, based on *d = 54* features, including elevation, slope, aspect, + and soil type. Again, the agent obtains reward 1 if the correct class is + selected, and 0 otherwise. Data is available [here](https://storage.googleapis.com/bandits_datasets/covtype.data) or alternatively + [here](https://archive.ics.uci.edu/ml/datasets/covertype), use + *covtype.data* file. + +In datasets 4-7, each feature of the dataset is normalized first. + +## Usage: Basic Example + +This library requires Tensorflow, Numpy, and Pandas. + +The file *example_main.py* provides a complete example on how to use the +library. We run the code: + +``` + python example_main.py +``` + +**Do not forget to** configure the routes to the data files at the top of *example_main.py*. + +For example, we can run the Mushroom bandit for 2000 contexts on a few +algorithms as follows: + +``` + # Problem parameters + num_contexts = 2000 + + # Choose data source among: + # {linear, sparse_linear, mushroom, financial, jester, + # statlog, adult, covertype, census, wheel} + data_type = 'mushroom' + + # Create dataset + sampled_vals = sample_data(data_type, num_contexts) + dataset, opt_rewards, opt_actions, num_actions, context_dim = sampled_vals + + # Define hyperparameters and algorithms + hparams_linear = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + a0=6, + b0=6, + lambda_prior=0.25, + initial_pulls=2) + + hparams_dropout = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + optimizer='RMS', + reset_lr=True, + lr_decay_rate=0.5, + training_freq=50, + training_epochs=100, + keep_prob=0.80, + use_dropout=True) + + ### Create hyper-parameter configurations for other algorithms + [...] + + algos = [ + UniformSampling('Uniform Sampling', hparams), + PosteriorBNNSampling('Dropout', hparams_dropout, 'RMSProp'), + PosteriorBNNSampling('BBB', hparams_bbb, 'Variational'), + NeuralLinearPosteriorSampling('NeuralLinear', hparams_nlinear), + LinearFullPosteriorSampling('LinFullPost', hparams_linear), + BootstrappedBNNSampling('BootRMS', hparams_boot), + ParameterNoiseSampling('ParamNoise', hparams_pnoise), + ] + + # Run contextual bandit problem + t_init = time.time() + results = run_contextual_bandit(context_dim, num_actions, dataset, algos) + _, h_rewards = results + + # Display results + display_results(algos, opt_rewards, opt_actions, h_rewards, t_init, data_type) + +``` + +The previous code leads to final results that look like: + +``` +--------------------------------------------------- +--------------------------------------------------- +mushroom bandit completed after 69.8401839733 seconds. +--------------------------------------------------- + 0) LinFullPost | total reward = 4365.0. + 1) NeuralLinear | total reward = 4110.0. + 2) Dropout | total reward = 3430.0. + 3) ParamNoise | total reward = 3270.0. + 4) BootRMS | total reward = 3050.0. + 5) BBB | total reward = 2505.0. + 6) Uniform Sampling | total reward = -4930.0. +--------------------------------------------------- +Optimal total reward = 5235. +Frequency of optimal actions (action, frequency): +[[0, 953], [1, 1047]] +--------------------------------------------------- +--------------------------------------------------- +``` diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py b/models/research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5b9c0ebd0988873eaf97d8d68d25dae5e5b9cd71 --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py @@ -0,0 +1,373 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bayesian NN using expectation propagation (Black-Box Alpha-Divergence). + +See https://arxiv.org/abs/1511.03243 for details. +All formulas used in this implementation are derived in: +https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import numpy as np +import tensorflow as tf +from absl import flags + +from bandits.core.bayesian_nn import BayesianNN + + +FLAGS = flags.FLAGS +tfd = tf.contrib.distributions # update to: tensorflow_probability.distributions + + +def log_gaussian(x, mu, sigma, reduce_sum=True): + res = tfd.Normal(mu, sigma).log_prob(x) + if reduce_sum: + return tf.reduce_sum(res) + else: + return res + + +class BBAlphaDivergence(BayesianNN): + """Implements an approximate Bayesian NN via Black-Box Alpha-Divergence.""" + + def __init__(self, hparams, name): + + self.name = name + self.hparams = hparams + + self.alpha = getattr(self.hparams, 'alpha', 1.0) + self.num_mc_nn_samples = getattr(self.hparams, 'num_mc_nn_samples', 10) + + self.n_in = self.hparams.context_dim + self.n_out = self.hparams.num_actions + self.layers = self.hparams.layer_sizes + self.batch_size = self.hparams.batch_size + + self.show_training = self.hparams.show_training + self.freq_summary = self.hparams.freq_summary + self.verbose = getattr(self.hparams, 'verbose', True) + + self.cleared_times_trained = self.hparams.cleared_times_trained + self.initial_training_steps = self.hparams.initial_training_steps + self.training_schedule = np.linspace(self.initial_training_steps, + self.hparams.training_epochs, + self.cleared_times_trained) + + self.times_trained = 0 + self.initialize_model() + + def initialize_model(self): + """Builds and initialize the model.""" + + self.num_w = 0 + self.num_b = 0 + + self.weights_m = {} + self.weights_std = {} + self.biases_m = {} + self.biases_std = {} + + self.h_max_var = [] + + if self.hparams.use_sigma_exp_transform: + self.sigma_transform = tfd.bijectors.Exp() + else: + self.sigma_transform = tfd.bijectors.Softplus() + + # Build the graph corresponding to the Bayesian NN instance. + self.graph = tf.Graph() + + with self.graph.as_default(): + + self.sess = tf.Session() + self.x = tf.placeholder(shape=[None, self.n_in], + dtype=tf.float32, name='x') + self.y = tf.placeholder(shape=[None, self.n_out], + dtype=tf.float32, name='y') + self.weights = tf.placeholder(shape=[None, self.n_out], + dtype=tf.float32, name='w') + self.data_size = tf.placeholder(tf.float32, shape=(), name='data_size') + + self.prior_variance = self.hparams.prior_variance + if self.prior_variance < 0: + # if not fixed, we learn the prior. + self.prior_variance = self.sigma_transform.forward( + self.build_mu_variable([1, 1])) + + self.build_model() + self.sess.run(tf.global_variables_initializer()) + + def build_mu_variable(self, shape): + """Returns a mean variable initialized as N(0, 0.05).""" + return tf.Variable(tf.random_normal(shape, 0.0, 0.05)) + + def build_sigma_variable(self, shape, init=-5.): + """Returns a sigma variable initialized as N(init, 0.05).""" + # Initialize sigma to be very small initially to encourage MAP opt first + return tf.Variable(tf.random_normal(shape, init, 0.05)) + + def build_layer(self, input_x, shape, layer_id, activation_fn=tf.nn.relu): + """Builds a layer with N(mean, std) for each weight, and samples from it.""" + + w_mu = self.build_mu_variable(shape) + w_sigma = self.sigma_transform.forward(self.build_sigma_variable(shape)) + + w_noise = tf.random_normal(shape) + w = w_mu + w_sigma * w_noise + + b_mu = self.build_mu_variable([1, shape[1]]) + b_sigma = self.sigma_transform.forward( + self.build_sigma_variable([1, shape[1]])) + + b_noise = tf.random_normal([1, shape[1]]) + b = b_mu + b_sigma * b_noise + + # Create outputs + output_h = activation_fn(tf.matmul(input_x, w) + b) + + # Store means and stds + self.weights_m[layer_id] = w_mu + self.weights_std[layer_id] = w_sigma + self.biases_m[layer_id] = b_mu + self.biases_std[layer_id] = b_sigma + + return output_h + + def sample_neural_network(self, activation_fn=tf.nn.relu): + """Samples a nn from posterior, computes data log lk and log f factor.""" + + with self.graph.as_default(): + + log_f = 0 + n = self.data_size + input_x = self.x + + for layer_id in range(self.total_layers): + + # load mean and std of each weight + w_mu = self.weights_m[layer_id] + w_sigma = self.weights_std[layer_id] + b_mu = self.biases_m[layer_id] + b_sigma = self.biases_std[layer_id] + + # sample weights from Gaussian distribution + shape = w_mu.shape + w_noise = tf.random_normal(shape) + b_noise = tf.random_normal([1, int(shape[1])]) + w = w_mu + w_sigma * w_noise + b = b_mu + b_sigma * b_noise + + # compute contribution to log_f + t1 = w * w_mu / (n * w_sigma ** 2) + t2 = (0.5 * w ** 2 / n) * (1 / self.prior_variance - 1 / w_sigma ** 2) + log_f += tf.reduce_sum(t1 + t2) + + t1 = b * b_mu / (n * b_sigma ** 2) + t2 = (0.5 * b ** 2 / n) * (1 / self.prior_variance - 1 / b_sigma ** 2) + log_f += tf.reduce_sum(t1 + t2) + + if layer_id < self.total_layers - 1: + output_h = activation_fn(tf.matmul(input_x, w) + b) + else: + output_h = tf.matmul(input_x, w) + b + + input_x = output_h + + # compute log likelihood of the observed reward under the sampled nn + log_likelihood = log_gaussian( + self.y, output_h, self.noise_sigma, reduce_sum=False) + weighted_log_likelihood = tf.reduce_sum(log_likelihood * self.weights, -1) + + return log_f, weighted_log_likelihood + + def log_z_q(self): + """Computes log-partition function of current posterior parameters.""" + + with self.graph.as_default(): + + log_z_q = 0 + + for layer_id in range(self.total_layers): + + w_mu = self.weights_m[layer_id] + w_sigma = self.weights_std[layer_id] + b_mu = self.biases_m[layer_id] + b_sigma = self.biases_std[layer_id] + + w_term = 0.5 * tf.reduce_sum(w_mu ** 2 / w_sigma ** 2) + w_term += 0.5 * tf.reduce_sum(tf.log(2 * np.pi) + 2 * tf.log(w_sigma)) + + b_term = 0.5 * tf.reduce_sum(b_mu ** 2 / b_sigma ** 2) + b_term += 0.5 * tf.reduce_sum(tf.log(2 * np.pi) + 2 * tf.log(b_sigma)) + + log_z_q += w_term + b_term + + return log_z_q + + def log_z_prior(self): + """Computes log-partition function of the prior parameters.""" + num_params = self.num_w + self.num_b + return num_params * 0.5 * tf.log(2 * np.pi * self.prior_variance) + + def log_alpha_likelihood_ratio(self, activation_fn=tf.nn.relu): + + # each nn sample returns (log f, log likelihoods) + nn_samples = [ + self.sample_neural_network(activation_fn) + for _ in range(self.num_mc_nn_samples) + ] + nn_log_f_samples = [elt[0] for elt in nn_samples] + nn_log_lk_samples = [elt[1] for elt in nn_samples] + + # we stack the (log f, log likelihoods) from the k nn samples + nn_log_f_stack = tf.stack(nn_log_f_samples) # k x 1 + nn_log_lk_stack = tf.stack(nn_log_lk_samples) # k x N + nn_f_tile = tf.tile(nn_log_f_stack, [self.batch_size]) + nn_f_tile = tf.reshape(nn_f_tile, + [self.num_mc_nn_samples, self.batch_size]) + + # now both the log f and log likelihood terms have shape: k x N + # apply formula in https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/ + nn_log_ratio = nn_log_lk_stack - nn_f_tile + nn_log_ratio = self.alpha * tf.transpose(nn_log_ratio) + logsumexp_value = tf.reduce_logsumexp(nn_log_ratio, -1) + log_k_scalar = tf.log(tf.cast(self.num_mc_nn_samples, tf.float32)) + log_k = log_k_scalar * tf.ones([self.batch_size]) + + return tf.reduce_sum(logsumexp_value - log_k, -1) + + def build_model(self, activation_fn=tf.nn.relu): + """Defines the actual NN model with fully connected layers. + + Args: + activation_fn: Activation function for the neural network. + + The loss is computed for partial feedback settings (bandits), so only + the observed outcome is backpropagated (see weighted loss). + Selects the optimizer and, finally, it also initializes the graph. + """ + + print('Initializing model {}.'.format(self.name)) + + # Build terms for the noise sigma estimation for each action. + noise_sigma_mu = (self.build_mu_variable([1, self.n_out]) + + self.sigma_transform.inverse(self.hparams.noise_sigma)) + noise_sigma_sigma = self.sigma_transform.forward( + self.build_sigma_variable([1, self.n_out])) + + pre_noise_sigma = noise_sigma_mu + tf.random_normal( + [1, self.n_out]) * noise_sigma_sigma + self.noise_sigma = self.sigma_transform.forward(pre_noise_sigma) + + # Build network + input_x = self.x + n_in = self.n_in + self.total_layers = len(self.layers) + 1 + if self.layers[0] == 0: + self.total_layers = 1 + + for l_number, n_nodes in enumerate(self.layers): + if n_nodes > 0: + h = self.build_layer(input_x, [n_in, n_nodes], l_number) + input_x = h + n_in = n_nodes + self.num_w += n_in * n_nodes + self.num_b += n_nodes + + self.y_pred = self.build_layer(input_x, [n_in, self.n_out], + self.total_layers - 1, + activation_fn=lambda x: x) + + # Compute energy function based on sampled nn's + log_coeff = self.data_size / (self.batch_size * self.alpha) + log_ratio = log_coeff * self.log_alpha_likelihood_ratio(activation_fn) + logzprior = self.log_z_prior() + logzq = self.log_z_q() + energy = logzprior - logzq - log_ratio + + self.loss = energy + self.global_step = tf.train.get_or_create_global_step() + self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize( + self.loss, global_step=self.global_step) + + # Useful for debugging + sq_loss = tf.squared_difference(self.y_pred, self.y) + weighted_sq_loss = self.weights * sq_loss + self.cost = tf.reduce_sum(weighted_sq_loss) / self.batch_size + + # Create tensorboard metrics + self.create_summaries() + self.summary_writer = tf.summary.FileWriter('{}/graph_{}'.format( + FLAGS.logdir, self.name), self.sess.graph) + + def create_summaries(self): + tf.summary.scalar('loss', self.loss) + tf.summary.scalar('cost', self.cost) + self.summary_op = tf.summary.merge_all() + + def assign_lr(self): + """Resets the learning rate in dynamic schedules for subsequent trainings. + + In bandits settings, we do expand our dataset over time. Then, we need to + re-train the network with the new data. Those algorithms that do not keep + the step constant, can reset it at the start of each training process. + """ + + decay_steps = 1 + if self.hparams.activate_decay: + current_gs = self.sess.run(self.global_step) + with self.graph.as_default(): + self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, + self.global_step - current_gs, + decay_steps, + self.hparams.lr_decay_rate) + + def train(self, data, num_steps): + """Trains the BNN for num_steps, using the data in 'data'. + + Args: + data: ContextualDataset object that provides the data. + num_steps: Number of minibatches to train the network for. + """ + + if self.times_trained < self.cleared_times_trained: + num_steps = int(self.training_schedule[self.times_trained]) + self.times_trained += 1 + + if self.verbose: + print('Training {} for {} steps...'.format(self.name, num_steps)) + + with self.graph.as_default(): + + for step in range(num_steps): + x, y, w = data.get_batch_with_weights(self.hparams.batch_size) + _, summary, global_step, loss = self.sess.run( + [self.train_op, self.summary_op, self.global_step, self.loss], + feed_dict={self.x: x, self.y: y, self.weights: w, + self.data_size: data.num_points()}) + + weights_l = self.sess.run(self.weights_std[0]) + self.h_max_var.append(np.max(weights_l)) + + if step % self.freq_summary == 0: + if self.show_training: + print('step: {}, loss: {}'.format(step, loss)) + sys.stdout.flush() + self.summary_writer.add_summary(summary, global_step) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py b/models/research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py new file mode 100644 index 0000000000000000000000000000000000000000..cb87c23358f27bd93e30528b20f7a3bb3ba876dd --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py @@ -0,0 +1,352 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014). + +See https://arxiv.org/abs/1505.05424 for details. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +# import tensorflow_probability as tfp + +from absl import flags +from bandits.core.bayesian_nn import BayesianNN + + +FLAGS = flags.FLAGS +# tfd = tfp.distributions +tfd = tf.contrib.distributions +tfl = tf.contrib.layers + + +def log_gaussian(x, mu, sigma, reduce_sum=True): + """Returns log Gaussian pdf.""" + res = tfd.Normal(mu, sigma).log_prob(x) + if reduce_sum: + return tf.reduce_sum(res) + else: + return res + + +def analytic_kl(mu_1, sigma_1, mu_2, sigma_2): + """KL for two Gaussian distributions with diagonal covariance matrix.""" + kl = tfd.kl_divergence(tfd.MVNDiag(mu_1, sigma_1), tfd.MVNDiag(mu_2, sigma_2)) + return kl + + +class BfVariationalNeuralBanditModel(BayesianNN): + """Implements an approximate Bayesian NN using Variational Inference.""" + + def __init__(self, hparams, name="BBBNN"): + + self.name = name + self.hparams = hparams + + self.n_in = self.hparams.context_dim + self.n_out = self.hparams.num_actions + self.layers = self.hparams.layer_sizes + self.init_scale = self.hparams.init_scale + self.f_num_points = None + if "f_num_points" in hparams: + self.f_num_points = self.hparams.f_num_points + + self.cleared_times_trained = self.hparams.cleared_times_trained + self.initial_training_steps = self.hparams.initial_training_steps + self.training_schedule = np.linspace(self.initial_training_steps, + self.hparams.training_epochs, + self.cleared_times_trained) + self.verbose = getattr(self.hparams, "verbose", True) + + self.weights_m = {} + self.weights_std = {} + self.biases_m = {} + self.biases_std = {} + + self.times_trained = 0 + + if self.hparams.use_sigma_exp_transform: + self.sigma_transform = tf.exp + self.inverse_sigma_transform = np.log + else: + self.sigma_transform = tf.nn.softplus + self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y)) + + # Whether to use the local reparameterization trick to compute the loss. + # See details in https://arxiv.org/abs/1506.02557 + self.use_local_reparameterization = True + + self.build_graph() + + def build_mu_variable(self, shape): + """Returns a mean variable initialized as N(0, 0.05).""" + return tf.Variable(tf.random_normal(shape, 0.0, 0.05)) + + def build_sigma_variable(self, shape, init=-5.): + """Returns a sigma variable initialized as N(init, 0.05).""" + # Initialize sigma to be very small initially to encourage MAP opt first + return tf.Variable(tf.random_normal(shape, init, 0.05)) + + def build_layer(self, input_x, input_x_local, shape, + layer_id, activation_fn=tf.nn.relu): + """Builds a variational layer, and computes KL term. + + Args: + input_x: Input to the variational layer. + input_x_local: Input when the local reparameterization trick was applied. + shape: [number_inputs, number_outputs] for the layer. + layer_id: Number of layer in the architecture. + activation_fn: Activation function to apply. + + Returns: + output_h: Output of the variational layer. + output_h_local: Output when local reparameterization trick was applied. + neg_kl: Negative KL term for the layer. + """ + + w_mu = self.build_mu_variable(shape) + w_sigma = self.sigma_transform(self.build_sigma_variable(shape)) + w_noise = tf.random_normal(shape) + w = w_mu + w_sigma * w_noise + + b_mu = self.build_mu_variable([1, shape[1]]) + b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]])) + b = b_mu + + # Store means and stds + self.weights_m[layer_id] = w_mu + self.weights_std[layer_id] = w_sigma + self.biases_m[layer_id] = b_mu + self.biases_std[layer_id] = b_sigma + + # Create outputs + output_h = activation_fn(tf.matmul(input_x, w) + b) + + if self.use_local_reparameterization: + # Use analytic KL divergence wrt the prior + neg_kl = -analytic_kl(w_mu, w_sigma, + 0., tf.to_float(np.sqrt(2./shape[0]))) + else: + # Create empirical KL loss terms + log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0]))) + log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma)) + neg_kl = log_p - log_q + + # Apply local reparameterization trick: sample activations pre nonlinearity + m_h = tf.matmul(input_x_local, w_mu) + b + v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma)) + output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h)) + output_h_local = activation_fn(output_h_local) + + return output_h, output_h_local, neg_kl + + def build_action_noise(self): + """Defines a model for additive noise per action, and its KL term.""" + + # Define mean and std variables (log-normal dist) for each action. + noise_sigma_mu = (self.build_mu_variable([1, self.n_out]) + + self.inverse_sigma_transform(self.hparams.noise_sigma)) + noise_sigma_sigma = self.sigma_transform( + self.build_sigma_variable([1, self.n_out])) + + pre_noise_sigma = (noise_sigma_mu + + tf.random_normal([1, self.n_out]) * noise_sigma_sigma) + self.noise_sigma = self.sigma_transform(pre_noise_sigma) + + # Compute KL for additive noise sigma terms. + if getattr(self.hparams, "infer_noise_sigma", False): + neg_kl_term = log_gaussian( + pre_noise_sigma, + self.inverse_sigma_transform(self.hparams.noise_sigma), + self.hparams.prior_sigma + ) + neg_kl_term -= log_gaussian(pre_noise_sigma, + noise_sigma_mu, + noise_sigma_sigma) + else: + neg_kl_term = 0. + + return neg_kl_term + + def build_model(self, activation_fn=tf.nn.relu): + """Defines the actual NN model with fully connected layers. + + The loss is computed for partial feedback settings (bandits), so only + the observed outcome is backpropagated (see weighted loss). + Selects the optimizer and, finally, it also initializes the graph. + + Args: + activation_fn: the activation function used in the nn layers. + """ + + def weight_prior(dtype, shape, c, d, e): + del c, d, e + return tfd.Independent( + tfd.Normal(loc=tf.zeros(shape, dtype), + scale=tf.to_float(np.sqrt(2) / shape[0])), + reinterpreted_batch_ndims=tf.size(shape)) + + if self.verbose: + print("Initializing model {}.".format(self.name)) + + # Compute model additive noise for each action with log-normal distribution + neg_kl_term = self.build_action_noise() + + # Build variational network using self.x as input. + input_x = self.x + + # Create Keras model using DenseLocalReparameterization (prior N(0, 1)). + model_layers = [ + tfl.DenseLocalReparameterization( + n_nodes, + activation=tf.nn.relu, + kernel_prior_fn=weight_prior + ) + for n_nodes in self.layers if n_nodes > 0 + ] + + output_layer = tfl.DenseLocalReparameterization( + self.n_out, + activation=lambda x: x, + kernel_prior_fn=weight_prior + ) + model_layers.append(output_layer) + + model = tf.keras.Sequential(model_layers) + self.y_pred = model(input_x) + + # Compute KL term + neg_kl_term -= tf.add_n(model.losses) + + # Compute log likelihood (with learned or fixed noise level) + if getattr(self.hparams, "infer_noise_sigma", False): + log_likelihood = log_gaussian( + self.y, self.y_pred, self.noise_sigma, reduce_sum=False) + else: + log_likelihood = log_gaussian( + self.y, self.y_pred, self.hparams.noise_sigma, reduce_sum=False) + + # Only take into account observed outcomes (bandits setting) + batch_size = tf.to_float(tf.shape(self.x)[0]) + weighted_log_likelihood = tf.reduce_sum( + log_likelihood * self.weights) / batch_size + + # The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL + elbo = weighted_log_likelihood + (neg_kl_term / self.n) + + self.loss = -elbo + self.global_step = tf.train.get_or_create_global_step() + self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize( + self.loss, global_step=self.global_step) + + # Create tensorboard metrics + self.create_summaries() + self.summary_writer = tf.summary.FileWriter( + "{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph) + + def build_graph(self): + """Defines graph, session, placeholders, and model. + + Placeholders are: n (size of the dataset), x and y (context and observed + reward for each action), and weights (one-hot encoding of selected action + for each context, i.e., only possibly non-zero element in each y). + """ + + self.graph = tf.Graph() + with self.graph.as_default(): + + self.sess = tf.Session() + + self.n = tf.placeholder(shape=[], dtype=tf.float32) + + self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32) + self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) + self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) + + self.build_model() + self.sess.run(tf.global_variables_initializer()) + + def create_summaries(self): + """Defines summaries including mean loss, and global step.""" + + with self.graph.as_default(): + with tf.name_scope(self.name + "_summaries"): + tf.summary.scalar("loss", self.loss) + tf.summary.scalar("global_step", self.global_step) + self.summary_op = tf.summary.merge_all() + + def assign_lr(self): + """Resets the learning rate in dynamic schedules for subsequent trainings. + + In bandits settings, we do expand our dataset over time. Then, we need to + re-train the network with the new data. The algorithms that do not keep + the step constant, can reset it at the start of each *training* process. + """ + + decay_steps = 1 + if self.hparams.activate_decay: + current_gs = self.sess.run(self.global_step) + with self.graph.as_default(): + self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, + self.global_step - current_gs, + decay_steps, + self.hparams.lr_decay_rate) + + def train(self, data, num_steps): + """Trains the BNN for num_steps, using the data in 'data'. + + Args: + data: ContextualDataset object that provides the data. + num_steps: Number of minibatches to train the network for. + + Returns: + losses: Loss history during training. + """ + + if self.times_trained < self.cleared_times_trained: + num_steps = int(self.training_schedule[self.times_trained]) + self.times_trained += 1 + + losses = [] + + with self.graph.as_default(): + + if self.verbose: + print("Training {} for {} steps...".format(self.name, num_steps)) + + for step in range(num_steps): + x, y, weights = data.get_batch_with_weights(self.hparams.batch_size) + _, summary, global_step, loss = self.sess.run( + [self.train_op, self.summary_op, self.global_step, self.loss], + feed_dict={ + self.x: x, + self.y: y, + self.weights: weights, + self.n: data.num_points(self.f_num_points), + }) + + losses.append(loss) + + if step % self.hparams.freq_summary == 0: + if self.hparams.show_training: + print("{} | step: {}, loss: {}".format( + self.name, global_step, loss)) + self.summary_writer.add_summary(summary, global_step) + + return losses diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..7c44b681c7bd1da113ec29c1bb6d370c88d7053f --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py @@ -0,0 +1,98 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contextual algorithm based on boostrapping neural networks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from bandits.core.bandit_algorithm import BanditAlgorithm +from bandits.core.contextual_dataset import ContextualDataset +from bandits.algorithms.neural_bandit_model import NeuralBanditModel + + +class BootstrappedBNNSampling(BanditAlgorithm): + """Thompson Sampling algorithm based on training several neural networks.""" + + def __init__(self, name, hparams, optimizer='RMS'): + """Creates a BootstrappedSGDSampling object based on a specific optimizer. + + hparams.q: Number of models that are independently trained. + hparams.p: Prob of independently including each datapoint in each model. + + Args: + name: Name given to the instance. + hparams: Hyperparameters for each individual model. + optimizer: Neural network optimization algorithm. + """ + + self.name = name + self.hparams = hparams + self.optimizer_n = optimizer + + self.training_freq = hparams.training_freq + self.training_epochs = hparams.training_epochs + self.t = 0 + + self.q = hparams.q + self.p = hparams.p + + self.datasets = [ + ContextualDataset(hparams.context_dim, + hparams.num_actions, + hparams.buffer_s) + for _ in range(self.q) + ] + + self.bnn_boot = [ + NeuralBanditModel(optimizer, hparams, '{}-{}-bnn'.format(name, i)) + for i in range(self.q) + ] + + def action(self, context): + """Selects action for context based on Thompson Sampling using one BNN.""" + + if self.t < self.hparams.num_actions * self.hparams.initial_pulls: + # round robin until each action has been taken "initial_pulls" times + return self.t % self.hparams.num_actions + + # choose model uniformly at random + model_index = np.random.randint(self.q) + + with self.bnn_boot[model_index].graph.as_default(): + c = context.reshape((1, self.hparams.context_dim)) + output = self.bnn_boot[model_index].sess.run( + self.bnn_boot[model_index].y_pred, + feed_dict={self.bnn_boot[model_index].x: c}) + return np.argmax(output) + + def update(self, context, action, reward): + """Updates the data buffer, and re-trains the BNN every self.freq_update.""" + + self.t += 1 + for i in range(self.q): + # include the data point with probability p independently in each dataset + if np.random.random() < self.p or self.t < 2: + self.datasets[i].add(context, action, reward) + + if self.t % self.training_freq == 0: + # update all the models: + for i in range(self.q): + if self.hparams.reset_lr: + self.bnn_boot[i].assign_lr() + self.bnn_boot[i].train(self.datasets[i], self.training_epochs) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ad6e3ed9ed9d1478e6ac132b41cfb5ae1bb47a --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py @@ -0,0 +1,51 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contextual bandit algorithm that selects an action at random.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from bandits.core.bandit_algorithm import BanditAlgorithm + + +class FixedPolicySampling(BanditAlgorithm): + """Defines a baseline; returns an action at random with probs given by p.""" + + def __init__(self, name, p, hparams): + """Creates a FixedPolicySampling object. + + Args: + name: Name of the algorithm. + p: Vector of normalized probabilities corresponding to sampling each arm. + hparams: Hyper-parameters, including the number of arms (num_actions). + + Raises: + ValueError: when p dimension does not match the number of actions. + """ + + self.name = name + self.p = p + self.hparams = hparams + + if len(p) != self.hparams.num_actions: + raise ValueError('Policy needs k probabilities.') + + def action(self, context): + """Selects an action at random according to distribution p.""" + return np.random.choice(range(self.hparams.num_actions), p=self.p) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..15ef8fa9b562101111042dc2ce7b17174018ab6e --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py @@ -0,0 +1,164 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contextual algorithm that keeps a full linear posterior for each arm.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from scipy.stats import invgamma + +from bandits.core.bandit_algorithm import BanditAlgorithm +from bandits.core.contextual_dataset import ContextualDataset + + +class LinearFullPosteriorSampling(BanditAlgorithm): + """Thompson Sampling with independent linear models and unknown noise var.""" + + def __init__(self, name, hparams): + """Initialize posterior distributions and hyperparameters. + + Assume a linear model for each action i: reward = context^T beta_i + noise + Each beta_i has a Gaussian prior (lambda parameter), each sigma2_i (noise + level) has an inverse Gamma prior (a0, b0 parameters). Mean, covariance, + and precision matrices are initialized, and the ContextualDataset created. + + Args: + name: Name of the algorithm. + hparams: Hyper-parameters of the algorithm. + """ + + self.name = name + self.hparams = hparams + + # Gaussian prior for each beta_i + self._lambda_prior = self.hparams.lambda_prior + + self.mu = [ + np.zeros(self.hparams.context_dim + 1) + for _ in range(self.hparams.num_actions) + ] + + self.cov = [(1.0 / self.lambda_prior) * np.eye(self.hparams.context_dim + 1) + for _ in range(self.hparams.num_actions)] + + self.precision = [ + self.lambda_prior * np.eye(self.hparams.context_dim + 1) + for _ in range(self.hparams.num_actions) + ] + + # Inverse Gamma prior for each sigma2_i + self._a0 = self.hparams.a0 + self._b0 = self.hparams.b0 + + self.a = [self._a0 for _ in range(self.hparams.num_actions)] + self.b = [self._b0 for _ in range(self.hparams.num_actions)] + + self.t = 0 + self.data_h = ContextualDataset(hparams.context_dim, + hparams.num_actions, + intercept=True) + + def action(self, context): + """Samples beta's from posterior, and chooses best action accordingly. + + Args: + context: Context for which the action need to be chosen. + + Returns: + action: Selected action for the context. + """ + + # Round robin until each action has been selected "initial_pulls" times + if self.t < self.hparams.num_actions * self.hparams.initial_pulls: + return self.t % self.hparams.num_actions + + # Sample sigma2, and beta conditional on sigma2 + sigma2_s = [ + self.b[i] * invgamma.rvs(self.a[i]) + for i in range(self.hparams.num_actions) + ] + + try: + beta_s = [ + np.random.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i]) + for i in range(self.hparams.num_actions) + ] + except np.linalg.LinAlgError as e: + # Sampling could fail if covariance is not positive definite + print('Exception when sampling from {}.'.format(self.name)) + print('Details: {} | {}.'.format(e.message, e.args)) + d = self.hparams.context_dim + 1 + beta_s = [ + np.random.multivariate_normal(np.zeros((d)), np.eye(d)) + for i in range(self.hparams.num_actions) + ] + + # Compute sampled expected values, intercept is last component of beta + vals = [ + np.dot(beta_s[i][:-1], context.T) + beta_s[i][-1] + for i in range(self.hparams.num_actions) + ] + + return np.argmax(vals) + + def update(self, context, action, reward): + """Updates action posterior using the linear Bayesian regression formula. + + Args: + context: Last observed context. + action: Last observed action. + reward: Last observed reward. + """ + + self.t += 1 + self.data_h.add(context, action, reward) + + # Update posterior of action with formulas: \beta | x,y ~ N(mu_q, cov_q) + x, y = self.data_h.get_data(action) + + # The algorithm could be improved with sequential update formulas (cheaper) + s = np.dot(x.T, x) + + # Some terms are removed as we assume prior mu_0 = 0. + precision_a = s + self.lambda_prior * np.eye(self.hparams.context_dim + 1) + cov_a = np.linalg.inv(precision_a) + mu_a = np.dot(cov_a, np.dot(x.T, y)) + + # Inverse Gamma posterior update + a_post = self.a0 + x.shape[0] / 2.0 + b_upd = 0.5 * (np.dot(y.T, y) - np.dot(mu_a.T, np.dot(precision_a, mu_a))) + b_post = self.b0 + b_upd + + # Store new posterior distributions + self.mu[action] = mu_a + self.cov[action] = cov_a + self.precision[action] = precision_a + self.a[action] = a_post + self.b[action] = b_post + + @property + def a0(self): + return self._a0 + + @property + def b0(self): + return self._b0 + + @property + def lambda_prior(self): + return self._lambda_prior diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py b/models/research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py new file mode 100644 index 0000000000000000000000000000000000000000..0c35dfaeaf9e30993d49d807f16dd64e15d3fc66 --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py @@ -0,0 +1,374 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A Multitask Gaussian process.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +from absl import logging + +import numpy as np +import tensorflow as tf +from bandits.core.bayesian_nn import BayesianNN + +FLAGS = flags.FLAGS +tfd = tf.contrib.distributions + +class MultitaskGP(BayesianNN): + """Implements a Gaussian process with multi-task outputs. + + Optimizes the hyperparameters over the log marginal likelihood. + Uses a Matern 3/2 + linear covariance and returns + sampled predictions for test inputs. The outputs are optionally + correlated where the correlation structure is learned through latent + embeddings of the tasks. + """ + + def __init__(self, hparams): + self.name = "MultiTaskGP" + self.hparams = hparams + + self.n_in = self.hparams.context_dim + self.n_out = self.hparams.num_outputs + self.keep_fixed_after_max_obs = self.hparams.keep_fixed_after_max_obs + + self._show_training = self.hparams.show_training + self._freq_summary = self.hparams.freq_summary + + # Dimensionality of the latent task vectors + self.task_latent_dim = self.hparams.task_latent_dim + + # Maximum number of observations to include + self.max_num_points = self.hparams.max_num_points + + if self.hparams.learn_embeddings: + self.learn_embeddings = self.hparams.learn_embeddings + else: + self.learn_embeddings = False + + # create the graph corresponding to the BNN instance + self.graph = tf.Graph() + with self.graph.as_default(): + # store a new session for the graph + self.sess = tf.Session() + + with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): + self.n = tf.placeholder(shape=[], dtype=tf.float64) + self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float64) + self.x_in = tf.placeholder(shape=[None, self.n_in], dtype=tf.float64) + self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float64) + self.weights = tf.placeholder(shape=[None, self.n_out], + dtype=tf.float64) + + self.build_model() + self.sess.run(tf.global_variables_initializer()) + + def atleast_2d(self, x, dims): + return tf.reshape(tf.expand_dims(x, axis=0), (-1, dims)) + + def sq_dist(self, x, x2): + a2 = tf.reduce_sum(tf.square(x), 1) + b2 = tf.reduce_sum(tf.square(x2), 1) + sqdists = tf.expand_dims(a2, 1) + b2 - 2.0 * tf.matmul(x, tf.transpose(x2)) + return sqdists + + # Covariance between outputs + def task_cov(self, x, x2): + """Squared Exponential Covariance Kernel over latent task embeddings.""" + # Index into latent task vectors + x_vecs = tf.gather(self.task_vectors, tf.argmax(x, axis=1), axis=0) + x2_vecs = tf.gather(self.task_vectors, tf.argmax(x2, axis=1), axis=0) + r = self.sq_dist(self.atleast_2d(x_vecs, self.task_latent_dim), + self.atleast_2d(x2_vecs, self.task_latent_dim)) + return tf.exp(-r) + + def cov(self, x, x2): + """Matern 3/2 + Linear Gaussian Process Covariance Function.""" + ls = tf.clip_by_value(self.length_scales, -5.0, 5.0) + ls_lin = tf.clip_by_value(self.length_scales_lin, -5.0, 5.0) + r = self.sq_dist(self.atleast_2d(x, self.n_in)/tf.nn.softplus(ls), + self.atleast_2d(x2, self.n_in)/tf.nn.softplus(ls)) + r = tf.clip_by_value(r, 0, 1e8) + + # Matern 3/2 Covariance + matern = (1.0 + tf.sqrt(3.0*r + 1e-16)) * tf.exp(-tf.sqrt(3.0*r + 1e-16)) + # Linear Covariance + lin = tf.matmul(x / tf.nn.softplus(ls_lin), + x2 / tf.nn.softplus(ls_lin), transpose_b=True) + return (tf.nn.softplus(self.amplitude) * matern + + tf.nn.softplus(self.amplitude_linear) * lin) + + def build_model(self): + """Defines the GP model. + + The loss is computed for partial feedback settings (bandits), so only + the observed outcome is backpropagated (see weighted loss). + Selects the optimizer and, finally, it also initializes the graph. + """ + + logging.info("Initializing model %s.", self.name) + self.global_step = tf.train.get_or_create_global_step() + + # Define state for the model (inputs, etc.) + self.x_train = tf.get_variable( + "training_data", + initializer=tf.ones( + [self.hparams.batch_size, self.n_in], dtype=tf.float64), + validate_shape=False, + trainable=False) + self.y_train = tf.get_variable( + "training_labels", + initializer=tf.zeros([self.hparams.batch_size, 1], dtype=tf.float64), + validate_shape=False, + trainable=False) + self.weights_train = tf.get_variable( + "weights_train", + initializer=tf.ones( + [self.hparams.batch_size, self.n_out], dtype=tf.float64), + validate_shape=False, + trainable=False) + self.input_op = tf.assign(self.x_train, self.x_in, validate_shape=False) + self.input_w_op = tf.assign( + self.weights_train, self.weights, validate_shape=False) + + self.input_std = tf.get_variable( + "data_standard_deviation", + initializer=tf.ones([1, self.n_out], dtype=tf.float64), + dtype=tf.float64, + trainable=False) + self.input_mean = tf.get_variable( + "data_mean", + initializer=tf.zeros([1, self.n_out], dtype=tf.float64), + dtype=tf.float64, + trainable=True) + + # GP Hyperparameters + self.noise = tf.get_variable( + "noise", initializer=tf.cast(0.0, dtype=tf.float64)) + self.amplitude = tf.get_variable( + "amplitude", initializer=tf.cast(1.0, dtype=tf.float64)) + self.amplitude_linear = tf.get_variable( + "linear_amplitude", initializer=tf.cast(1.0, dtype=tf.float64)) + self.length_scales = tf.get_variable( + "length_scales", initializer=tf.zeros([1, self.n_in], dtype=tf.float64)) + self.length_scales_lin = tf.get_variable( + "length_scales_linear", + initializer=tf.zeros([1, self.n_in], dtype=tf.float64)) + + # Latent embeddings of the different outputs for task covariance + self.task_vectors = tf.get_variable( + "latent_task_vectors", + initializer=tf.random_normal( + [self.n_out, self.task_latent_dim], dtype=tf.float64)) + + # Normalize outputs across each dimension + # Since we have different numbers of observations across each task, we + # normalize by their respective counts. + index_counts = self.atleast_2d(tf.reduce_sum(self.weights, axis=0), + self.n_out) + index_counts = tf.where(index_counts > 0, index_counts, + tf.ones(tf.shape(index_counts), dtype=tf.float64)) + self.mean_op = tf.assign(self.input_mean, + tf.reduce_sum(self.y, axis=0) / index_counts) + self.var_op = tf.assign( + self.input_std, tf.sqrt(1e-4 + tf.reduce_sum(tf.square( + self.y - tf.reduce_sum(self.y, axis=0) / index_counts), axis=0) + / index_counts)) + + with tf.control_dependencies([self.var_op]): + y_normed = self.atleast_2d( + (self.y - self.input_mean) / self.input_std, self.n_out) + y_normed = self.atleast_2d(tf.boolean_mask(y_normed, self.weights > 0), 1) + self.out_op = tf.assign(self.y_train, y_normed, validate_shape=False) + + # Observation noise + alpha = tf.nn.softplus(self.noise) + 1e-6 + + # Covariance + with tf.control_dependencies([self.input_op, self.input_w_op, self.out_op]): + self.self_cov = (self.cov(self.x_in, self.x_in) * + self.task_cov(self.weights, self.weights) + + tf.eye(tf.shape(self.x_in)[0], dtype=tf.float64) * alpha) + + self.chol = tf.cholesky(self.self_cov) + self.kinv = tf.cholesky_solve(self.chol, tf.eye(tf.shape(self.x_in)[0], + dtype=tf.float64)) + + self.input_inv = tf.Variable( + tf.eye(self.hparams.batch_size, dtype=tf.float64), + validate_shape=False, + trainable=False) + self.input_cov_op = tf.assign(self.input_inv, self.kinv, + validate_shape=False) + + # Log determinant by taking the singular values along the diagonal + # of self.chol + with tf.control_dependencies([self.input_cov_op]): + logdet = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(self.chol) + 1e-16)) + + # Log Marginal likelihood + self.marginal_ll = -tf.reduce_sum(-0.5 * tf.matmul( + tf.transpose(y_normed), tf.matmul(self.kinv, y_normed)) - 0.5 * logdet - + 0.5 * self.n * np.log(2 * np.pi)) + + zero = tf.cast(0., dtype=tf.float64) + one = tf.cast(1., dtype=tf.float64) + standard_normal = tfd.Normal(loc=zero, scale=one) + + # Loss is marginal likelihood and priors + self.loss = tf.reduce_sum( + self.marginal_ll - + (standard_normal.log_prob(self.amplitude) + + standard_normal.log_prob(tf.exp(self.noise)) + + standard_normal.log_prob(self.amplitude_linear) + + tfd.Normal(loc=zero, scale=one * 10.).log_prob( + self.task_vectors)) + ) + + # Optimizer for hyperparameters + optimizer = tf.train.AdamOptimizer(learning_rate=self.hparams.lr) + vars_to_optimize = [ + self.amplitude, self.length_scales, self.length_scales_lin, + self.amplitude_linear, self.noise, self.input_mean + ] + + if self.learn_embeddings: + vars_to_optimize.append(self.task_vectors) + grads = optimizer.compute_gradients(self.loss, vars_to_optimize) + self.train_op = optimizer.apply_gradients(grads, + global_step=self.global_step) + + # Predictions for test data + self.y_mean, self.y_pred = self.posterior_mean_and_sample(self.x) + + # create tensorboard metrics + self.create_summaries() + self.summary_writer = tf.summary.FileWriter("{}/graph_{}".format( + FLAGS.logdir, self.name), self.sess.graph) + self.check = tf.add_check_numerics_ops() + + def posterior_mean_and_sample(self, candidates): + """Draw samples for test predictions. + + Given a Tensor of 'candidates' inputs, returns samples from the posterior + and the posterior mean prediction for those inputs. + + Args: + candidates: A (num-examples x num-dims) Tensor containing the inputs for + which to return predictions. + Returns: + y_mean: The posterior mean prediction given these inputs + y_sample: A sample from the posterior of the outputs given these inputs + """ + # Cross-covariance for test predictions + w = tf.identity(self.weights_train) + inds = tf.squeeze( + tf.reshape( + tf.tile( + tf.reshape(tf.range(self.n_out), (self.n_out, 1)), + (1, tf.shape(candidates)[0])), (-1, 1))) + + cross_cov = self.cov(tf.tile(candidates, [self.n_out, 1]), self.x_train) + cross_task_cov = self.task_cov(tf.one_hot(inds, self.n_out), w) + cross_cov *= cross_task_cov + + # Test mean prediction + y_mean = tf.matmul(cross_cov, tf.matmul(self.input_inv, self.y_train)) + + # Test sample predictions + # Note this can be done much more efficiently using Kronecker products + # if all tasks are fully observed (which we won't assume) + test_cov = ( + self.cov(tf.tile(candidates, [self.n_out, 1]), + tf.tile(candidates, [self.n_out, 1])) * + self.task_cov(tf.one_hot(inds, self.n_out), + tf.one_hot(inds, self.n_out)) - + tf.matmul(cross_cov, + tf.matmul(self.input_inv, + tf.transpose(cross_cov)))) + + # Get the matrix square root through an SVD for drawing samples + # This seems more numerically stable than the Cholesky + s, _, v = tf.svd(test_cov, full_matrices=True) + test_sqrt = tf.matmul(v, tf.matmul(tf.diag(s), tf.transpose(v))) + + y_sample = ( + tf.matmul( + test_sqrt, + tf.random_normal([tf.shape(test_sqrt)[0], 1], dtype=tf.float64)) + + y_mean) + + y_sample = ( + tf.transpose(tf.reshape(y_sample, + (self.n_out, -1))) * self.input_std + + self.input_mean) + + return y_mean, y_sample + + def create_summaries(self): + with self.graph.as_default(): + tf.summary.scalar("loss", self.loss) + tf.summary.scalar("log_noise", self.noise) + tf.summary.scalar("log_amp", self.amplitude) + tf.summary.scalar("log_amp_lin", self.amplitude_linear) + tf.summary.histogram("length_scales", self.length_scales) + tf.summary.histogram("length_scales_lin", self.length_scales_lin) + self.summary_op = tf.summary.merge_all() + + def train(self, data, num_steps): + """Trains the GP for num_steps, using the data in 'data'. + + Args: + data: ContextualDataset object that provides the data. + num_steps: Number of minibatches to train the network for. + """ + + logging.info("Training %s for %d steps...", self.name, num_steps) + for step in range(num_steps): + numpts = min(data.num_points(None), self.max_num_points) + if numpts >= self.max_num_points and self.keep_fixed_after_max_obs: + x = data.contexts[:numpts, :] + y = data.rewards[:numpts, :] + weights = np.zeros((x.shape[0], self.n_out)) + for i, val in enumerate(data.actions[:numpts]): + weights[i, val] = 1.0 + else: + x, y, weights = data.get_batch_with_weights(numpts) + + ops = [ + self.global_step, self.summary_op, self.loss, self.noise, + self.amplitude, self.amplitude_linear, self.length_scales, + self.length_scales_lin, self.input_cov_op, self.input_op, self.var_op, + self.input_w_op, self.out_op, self.train_op + ] + + res = self.sess.run(ops, + feed_dict={self.x: x, + self.x_in: x, + self.y: y, + self.weights: weights, + self.n: numpts, + }) + + if step % self._freq_summary == 0: + if self._show_training: + logging.info("step: %d, loss: %g noise: %f amp: %f amp_lin: %f", + step, res[2], res[3], res[4], res[5]) + summary = res[1] + global_step = res[0] + self.summary_writer.add_summary(summary, global_step=global_step) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py b/models/research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py new file mode 100644 index 0000000000000000000000000000000000000000..99d7cd4dc8e2c35571f82bbb79ea1564a148ff5d --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py @@ -0,0 +1,220 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define a family of neural network architectures for bandits. + +The network accepts different type of optimizers that could lead to different +approximations of the posterior distribution or simply to point estimates. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from absl import flags +from bandits.core.bayesian_nn import BayesianNN + +FLAGS = flags.FLAGS + + +class NeuralBanditModel(BayesianNN): + """Implements a neural network for bandit problems.""" + + def __init__(self, optimizer, hparams, name): + """Saves hyper-params and builds the Tensorflow graph.""" + + self.opt_name = optimizer + self.name = name + self.hparams = hparams + self.verbose = getattr(self.hparams, "verbose", True) + self.times_trained = 0 + self.build_model() + + def build_layer(self, x, num_units): + """Builds a layer with input x; dropout and layer norm if specified.""" + + init_s = self.hparams.init_scale + + layer_n = getattr(self.hparams, "layer_norm", False) + dropout = getattr(self.hparams, "use_dropout", False) + + nn = tf.contrib.layers.fully_connected( + x, + num_units, + activation_fn=self.hparams.activation, + normalizer_fn=None if not layer_n else tf.contrib.layers.layer_norm, + normalizer_params={}, + weights_initializer=tf.random_uniform_initializer(-init_s, init_s) + ) + + if dropout: + nn = tf.nn.dropout(nn, self.hparams.keep_prob) + + return nn + + def forward_pass(self): + + init_s = self.hparams.init_scale + + scope_name = "prediction_{}".format(self.name) + with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE): + nn = self.x + for num_units in self.hparams.layer_sizes: + if num_units > 0: + nn = self.build_layer(nn, num_units) + + y_pred = tf.layers.dense( + nn, + self.hparams.num_actions, + kernel_initializer=tf.random_uniform_initializer(-init_s, init_s)) + + return nn, y_pred + + def build_model(self): + """Defines the actual NN model with fully connected layers. + + The loss is computed for partial feedback settings (bandits), so only + the observed outcome is backpropagated (see weighted loss). + Selects the optimizer and, finally, it also initializes the graph. + """ + + # create and store the graph corresponding to the BNN instance + self.graph = tf.Graph() + + with self.graph.as_default(): + + # create and store a new session for the graph + self.sess = tf.Session() + + with tf.name_scope(self.name): + + self.global_step = tf.train.get_or_create_global_step() + + # context + self.x = tf.placeholder( + shape=[None, self.hparams.context_dim], + dtype=tf.float32, + name="{}_x".format(self.name)) + + # reward vector + self.y = tf.placeholder( + shape=[None, self.hparams.num_actions], + dtype=tf.float32, + name="{}_y".format(self.name)) + + # weights (1 for selected action, 0 otherwise) + self.weights = tf.placeholder( + shape=[None, self.hparams.num_actions], + dtype=tf.float32, + name="{}_w".format(self.name)) + + # with tf.variable_scope("prediction_{}".format(self.name)): + self.nn, self.y_pred = self.forward_pass() + self.loss = tf.squared_difference(self.y_pred, self.y) + self.weighted_loss = tf.multiply(self.weights, self.loss) + self.cost = tf.reduce_sum(self.weighted_loss) / self.hparams.batch_size + + if self.hparams.activate_decay: + self.lr = tf.train.inverse_time_decay( + self.hparams.initial_lr, self.global_step, + 1, self.hparams.lr_decay_rate) + else: + self.lr = tf.Variable(self.hparams.initial_lr, trainable=False) + + # create tensorboard metrics + self.create_summaries() + self.summary_writer = tf.summary.FileWriter( + "{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph) + + tvars = tf.trainable_variables() + grads, _ = tf.clip_by_global_norm( + tf.gradients(self.cost, tvars), self.hparams.max_grad_norm) + + self.optimizer = self.select_optimizer() + + self.train_op = self.optimizer.apply_gradients( + zip(grads, tvars), global_step=self.global_step) + + self.init = tf.global_variables_initializer() + + self.initialize_graph() + + def initialize_graph(self): + """Initializes all variables.""" + + with self.graph.as_default(): + if self.verbose: + print("Initializing model {}.".format(self.name)) + self.sess.run(self.init) + + def assign_lr(self): + """Resets the learning rate in dynamic schedules for subsequent trainings. + + In bandits settings, we do expand our dataset over time. Then, we need to + re-train the network with the new data. The algorithms that do not keep + the step constant, can reset it at the start of each *training* process. + """ + + decay_steps = 1 + if self.hparams.activate_decay: + current_gs = self.sess.run(self.global_step) + with self.graph.as_default(): + self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, + self.global_step - current_gs, + decay_steps, + self.hparams.lr_decay_rate) + + def select_optimizer(self): + """Selects optimizer. To be extended (SGLD, KFAC, etc).""" + return tf.train.RMSPropOptimizer(self.lr) + + def create_summaries(self): + """Defines summaries including mean loss, learning rate, and global step.""" + + with self.graph.as_default(): + with tf.name_scope(self.name + "_summaries"): + tf.summary.scalar("cost", self.cost) + tf.summary.scalar("lr", self.lr) + tf.summary.scalar("global_step", self.global_step) + self.summary_op = tf.summary.merge_all() + + def train(self, data, num_steps): + """Trains the network for num_steps, using the provided data. + + Args: + data: ContextualDataset object that provides the data. + num_steps: Number of minibatches to train the network for. + """ + + if self.verbose: + print("Training {} for {} steps...".format(self.name, num_steps)) + + with self.graph.as_default(): + + for step in range(num_steps): + x, y, w = data.get_batch_with_weights(self.hparams.batch_size) + _, cost, summary, lr = self.sess.run( + [self.train_op, self.cost, self.summary_op, self.lr], + feed_dict={self.x: x, self.y: y, self.weights: w}) + + if step % self.hparams.freq_summary == 0: + if self.hparams.show_training: + print("{} | step: {}, lr: {}, loss: {}".format( + self.name, step, lr, cost)) + self.summary_writer.add_summary(summary, step) + + self.times_trained += 1 diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..43fc551614b49ad34538aa64090bcda5f823a60f --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py @@ -0,0 +1,180 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Thompson Sampling with linear posterior over a learnt deep representation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from scipy.stats import invgamma + +from bandits.core.bandit_algorithm import BanditAlgorithm +from bandits.core.contextual_dataset import ContextualDataset +from bandits.algorithms.neural_bandit_model import NeuralBanditModel + + +class NeuralLinearPosteriorSampling(BanditAlgorithm): + """Full Bayesian linear regression on the last layer of a deep neural net.""" + + def __init__(self, name, hparams, optimizer='RMS'): + + self.name = name + self.hparams = hparams + self.latent_dim = self.hparams.layer_sizes[-1] + + # Gaussian prior for each beta_i + self._lambda_prior = self.hparams.lambda_prior + + self.mu = [ + np.zeros(self.latent_dim) + for _ in range(self.hparams.num_actions) + ] + + self.cov = [(1.0 / self.lambda_prior) * np.eye(self.latent_dim) + for _ in range(self.hparams.num_actions)] + + self.precision = [ + self.lambda_prior * np.eye(self.latent_dim) + for _ in range(self.hparams.num_actions) + ] + + # Inverse Gamma prior for each sigma2_i + self._a0 = self.hparams.a0 + self._b0 = self.hparams.b0 + + self.a = [self._a0 for _ in range(self.hparams.num_actions)] + self.b = [self._b0 for _ in range(self.hparams.num_actions)] + + # Regression and NN Update Frequency + self.update_freq_lr = hparams.training_freq + self.update_freq_nn = hparams.training_freq_network + + self.t = 0 + self.optimizer_n = optimizer + + self.num_epochs = hparams.training_epochs + self.data_h = ContextualDataset(hparams.context_dim, + hparams.num_actions, + intercept=False) + self.latent_h = ContextualDataset(self.latent_dim, + hparams.num_actions, + intercept=False) + self.bnn = NeuralBanditModel(optimizer, hparams, '{}-bnn'.format(name)) + + def action(self, context): + """Samples beta's from posterior, and chooses best action accordingly.""" + + # Round robin until each action has been selected "initial_pulls" times + if self.t < self.hparams.num_actions * self.hparams.initial_pulls: + return self.t % self.hparams.num_actions + + # Sample sigma2, and beta conditional on sigma2 + sigma2_s = [ + self.b[i] * invgamma.rvs(self.a[i]) + for i in range(self.hparams.num_actions) + ] + + try: + beta_s = [ + np.random.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i]) + for i in range(self.hparams.num_actions) + ] + except np.linalg.LinAlgError as e: + # Sampling could fail if covariance is not positive definite + print('Exception when sampling for {}.'.format(self.name)) + print('Details: {} | {}.'.format(e.message, e.args)) + d = self.latent_dim + beta_s = [ + np.random.multivariate_normal(np.zeros((d)), np.eye(d)) + for i in range(self.hparams.num_actions) + ] + + # Compute last-layer representation for the current context + with self.bnn.graph.as_default(): + c = context.reshape((1, self.hparams.context_dim)) + z_context = self.bnn.sess.run(self.bnn.nn, feed_dict={self.bnn.x: c}) + + # Apply Thompson Sampling to last-layer representation + vals = [ + np.dot(beta_s[i], z_context.T) for i in range(self.hparams.num_actions) + ] + return np.argmax(vals) + + def update(self, context, action, reward): + """Updates the posterior using linear bayesian regression formula.""" + + self.t += 1 + self.data_h.add(context, action, reward) + c = context.reshape((1, self.hparams.context_dim)) + z_context = self.bnn.sess.run(self.bnn.nn, feed_dict={self.bnn.x: c}) + self.latent_h.add(z_context, action, reward) + + # Retrain the network on the original data (data_h) + if self.t % self.update_freq_nn == 0: + + if self.hparams.reset_lr: + self.bnn.assign_lr() + self.bnn.train(self.data_h, self.num_epochs) + + # Update the latent representation of every datapoint collected so far + new_z = self.bnn.sess.run(self.bnn.nn, + feed_dict={self.bnn.x: self.data_h.contexts}) + self.latent_h.replace_data(contexts=new_z) + + # Update the Bayesian Linear Regression + if self.t % self.update_freq_lr == 0: + + # Find all the actions to update + actions_to_update = self.latent_h.actions[:-self.update_freq_lr] + + for action_v in np.unique(actions_to_update): + + # Update action posterior with formulas: \beta | z,y ~ N(mu_q, cov_q) + z, y = self.latent_h.get_data(action_v) + + # The algorithm could be improved with sequential formulas (cheaper) + s = np.dot(z.T, z) + + # Some terms are removed as we assume prior mu_0 = 0. + precision_a = s + self.lambda_prior * np.eye(self.latent_dim) + cov_a = np.linalg.inv(precision_a) + mu_a = np.dot(cov_a, np.dot(z.T, y)) + + # Inverse Gamma posterior update + a_post = self.a0 + z.shape[0] / 2.0 + b_upd = 0.5 * np.dot(y.T, y) + b_upd -= 0.5 * np.dot(mu_a.T, np.dot(precision_a, mu_a)) + b_post = self.b0 + b_upd + + # Store new posterior distributions + self.mu[action_v] = mu_a + self.cov[action_v] = cov_a + self.precision[action_v] = precision_a + self.a[action_v] = a_post + self.b[action_v] = b_post + + @property + def a0(self): + return self._a0 + + @property + def b0(self): + return self._b0 + + @property + def lambda_prior(self): + return self._lambda_prior diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..19944ad577372b6971f03f1117fc33d5a2a276b1 --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py @@ -0,0 +1,187 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contextual algorithm based on Thompson Sampling + direct noise injection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from scipy.special import logsumexp +import tensorflow as tf + +from absl import flags + +from bandits.core.bandit_algorithm import BanditAlgorithm +from bandits.core.contextual_dataset import ContextualDataset +from bandits.algorithms.neural_bandit_model import NeuralBanditModel + +FLAGS = flags.FLAGS + + +class ParameterNoiseSampling(BanditAlgorithm): + """Parameter Noise Sampling algorithm based on adding noise to net params. + + Described in https://arxiv.org/abs/1706.01905 + """ + + def __init__(self, name, hparams): + """Creates the algorithm, and sets up the adaptive Gaussian noise.""" + + self.name = name + self.hparams = hparams + self.verbose = getattr(self.hparams, 'verbose', True) + self.noise_std = getattr(self.hparams, 'noise_std', 0.005) + self.eps = getattr(self.hparams, 'eps', 0.05) + self.d_samples = getattr(self.hparams, 'd_samples', 300) + self.optimizer = getattr(self.hparams, 'optimizer', 'RMS') + + # keep track of noise heuristic statistics + self.std_h = [self.noise_std] + self.eps_h = [self.eps] + self.kl_h = [] + self.t = 0 + + self.freq_update = hparams.training_freq + self.num_epochs = hparams.training_epochs + + self.data_h = ContextualDataset(hparams.context_dim, hparams.num_actions, + hparams.buffer_s) + self.bnn = NeuralBanditModel(self.optimizer, hparams, '{}-bnn'.format(name)) + + with self.bnn.graph.as_default(): + + # noise-injection std placeholder + self.bnn.noise_std_ph = tf.placeholder(tf.float32, shape=()) + + # create noise corruption op; adds noise to all weights + tvars = tf.trainable_variables() + self.bnn.noisy_grads = [ + tf.random_normal(v.get_shape(), 0, self.bnn.noise_std_ph) + for v in tvars + ] + + # add noise to all params, then compute prediction, then subtract. + with tf.control_dependencies(self.bnn.noisy_grads): + self.bnn.noise_add_ops = [ + tvars[i].assign_add(n) for i, n in enumerate(self.bnn.noisy_grads) + ] + with tf.control_dependencies(self.bnn.noise_add_ops): + # we force the prediction for 'y' to be recomputed after adding noise + self.bnn.noisy_nn, self.bnn.noisy_pred_val = self.bnn.forward_pass() + + self.bnn.noisy_pred = tf.identity(self.bnn.noisy_pred_val) + with tf.control_dependencies([tf.identity(self.bnn.noisy_pred)]): + self.bnn.noise_sub_ops = [ + tvars[i].assign_add(-n) + for i, n in enumerate(self.bnn.noisy_grads) + ] + + def action(self, context): + """Selects action based on Thompson Sampling *after* adding noise.""" + + if self.t < self.hparams.num_actions * self.hparams.initial_pulls: + # round robin until each action has been taken "initial_pulls" times + return self.t % self.hparams.num_actions + + with self.bnn.graph.as_default(): + # run noise prediction op to choose action, and subtract noise op after. + c = context.reshape((1, self.hparams.context_dim)) + output, _ = self.bnn.sess.run( + [self.bnn.noisy_pred, self.bnn.noise_sub_ops], + feed_dict={self.bnn.x: c, + self.bnn.noise_std_ph: self.noise_std}) + return np.argmax(output) + + def update(self, context, action, reward): + """Updates the data buffer, and re-trains the BNN and noise level.""" + + self.t += 1 + self.data_h.add(context, action, reward) + + if self.t % self.freq_update == 0: + self.bnn.train(self.data_h, self.num_epochs) + self.update_noise() + + def update_noise(self): + """Increase noise if distance btw original and corrupted distrib small.""" + + kl = self.compute_distance() + delta = -np.log1p(- self.eps + self.eps / self.hparams.num_actions) + + if kl < delta: + self.noise_std *= 1.01 + else: + self.noise_std /= 1.01 + + self.eps *= 0.99 + + if self.verbose: + print('Update eps={} | kl={} | std={} | delta={} | increase={}.'.format( + self.eps, kl, self.noise_std, delta, kl < delta)) + + # store noise-injection statistics for inspection: std, KL, eps. + self.std_h.append(self.noise_std) + self.kl_h.append(kl) + self.eps_h.append(self.eps) + + def compute_distance(self): + """Computes empirical KL for original and corrupted output distributions.""" + + random_inputs, _ = self.data_h.get_batch(self.d_samples) + y_model = self.bnn.sess.run( + self.bnn.y_pred, + feed_dict={ + self.bnn.x: random_inputs, + self.bnn.noise_std_ph: self.noise_std + }) + y_noisy, _ = self.bnn.sess.run( + [self.bnn.noisy_pred, self.bnn.noise_sub_ops], + feed_dict={ + self.bnn.x: random_inputs, + self.bnn.noise_std_ph: self.noise_std + }) + + if self.verbose: + # display how often original & perturbed models propose different actions + s = np.sum([np.argmax(y_model[i, :]) == np.argmax(y_noisy[i, :]) + for i in range(y_model.shape[0])]) + print('{} | % of agreement btw original / corrupted actions: {}.'.format( + self.name, s / self.d_samples)) + + kl = self.compute_kl_with_logits(y_model, y_noisy) + return kl + + def compute_kl_with_logits(self, logits1, logits2): + """Computes KL from logits samples from two distributions.""" + + def exp_times_diff(a, b): + return np.multiply(np.exp(a), a - b) + + logsumexp1 = logsumexp(logits1, axis=1) + logsumexp2 = logsumexp(logits2, axis=1) + logsumexp_diff = logsumexp2 - logsumexp1 + + exp_diff = exp_times_diff(logits1, logits2) + exp_diff = np.sum(exp_diff, axis=1) + + inv_exp_sum = np.sum(np.exp(logits1), axis=1) + term1 = np.divide(exp_diff, inv_exp_sum) + + kl = term1 + logsumexp_diff + kl = np.maximum(kl, 0.0) + kl = np.nan_to_num(kl) + return np.mean(kl) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0c5d365a3a3e48006fe6b4e7e47ab73ea756cf --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py @@ -0,0 +1,92 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contextual bandit algorithm based on Thompson Sampling and a Bayesian NN.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from bandits.core.bandit_algorithm import BanditAlgorithm +from bandits.algorithms.bb_alpha_divergence_model import BBAlphaDivergence +from bandits.algorithms.bf_variational_neural_bandit_model import BfVariationalNeuralBanditModel +from bandits.core.contextual_dataset import ContextualDataset +from bandits.algorithms.multitask_gp import MultitaskGP +from bandits.algorithms.neural_bandit_model import NeuralBanditModel +from bandits.algorithms.variational_neural_bandit_model import VariationalNeuralBanditModel + + +class PosteriorBNNSampling(BanditAlgorithm): + """Posterior Sampling algorithm based on a Bayesian neural network.""" + + def __init__(self, name, hparams, bnn_model='RMSProp'): + """Creates a PosteriorBNNSampling object based on a specific optimizer. + + The algorithm has two basic tools: an Approx BNN and a Contextual Dataset. + The Bayesian Network keeps the posterior based on the optimizer iterations. + + Args: + name: Name of the algorithm. + hparams: Hyper-parameters of the algorithm. + bnn_model: Type of BNN. By default RMSProp (point estimate). + """ + + self.name = name + self.hparams = hparams + self.optimizer_n = hparams.optimizer + + self.training_freq = hparams.training_freq + self.training_epochs = hparams.training_epochs + self.t = 0 + self.data_h = ContextualDataset(hparams.context_dim, hparams.num_actions, + hparams.buffer_s) + + # to be extended with more BNNs (BB alpha-div, GPs, SGFS, constSGD...) + bnn_name = '{}-bnn'.format(name) + if bnn_model == 'Variational': + self.bnn = VariationalNeuralBanditModel(hparams, bnn_name) + elif bnn_model == 'AlphaDiv': + self.bnn = BBAlphaDivergence(hparams, bnn_name) + elif bnn_model == 'Variational_BF': + self.bnn = BfVariationalNeuralBanditModel(hparams, bnn_name) + elif bnn_model == 'GP': + self.bnn = MultitaskGP(hparams) + else: + self.bnn = NeuralBanditModel(self.optimizer_n, hparams, bnn_name) + + def action(self, context): + """Selects action for context based on Thompson Sampling using the BNN.""" + + if self.t < self.hparams.num_actions * self.hparams.initial_pulls: + # round robin until each action has been taken "initial_pulls" times + return self.t % self.hparams.num_actions + + with self.bnn.graph.as_default(): + c = context.reshape((1, self.hparams.context_dim)) + output = self.bnn.sess.run(self.bnn.y_pred, feed_dict={self.bnn.x: c}) + return np.argmax(output) + + def update(self, context, action, reward): + """Updates data buffer, and re-trains the BNN every training_freq steps.""" + + self.t += 1 + self.data_h.add(context, action, reward) + + if self.t % self.training_freq == 0: + if self.hparams.reset_lr: + self.bnn.assign_lr() + self.bnn.train(self.data_h, self.training_epochs) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py b/models/research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..15c073fbe89da4e9aef595c8772ceaa3667e1952 --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py @@ -0,0 +1,43 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contextual bandit algorithm that selects an action uniformly at random.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from bandits.core.bandit_algorithm import BanditAlgorithm + + +class UniformSampling(BanditAlgorithm): + """Defines a baseline; returns one action uniformly at random.""" + + def __init__(self, name, hparams): + """Creates a UniformSampling object. + + Args: + name: Name of the algorithm. + hparams: Hyper-parameters, including the number of arms (num_actions). + """ + + self.name = name + self.hparams = hparams + + def action(self, context): + """Selects an action uniformly at random.""" + return np.random.choice(range(self.hparams.num_actions)) diff --git a/models/research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py b/models/research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7700c08ba9f7861aac522ba6da9f7371b5e203af --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py @@ -0,0 +1,346 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014). + +See https://arxiv.org/abs/1505.05424 for details. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from absl import flags +from bandits.core.bayesian_nn import BayesianNN + +FLAGS = flags.FLAGS + + +def log_gaussian(x, mu, sigma, reduce_sum=True): + """Returns log Gaussian pdf.""" + res = (-0.5 * np.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) / + (2 * tf.square(sigma))) + if reduce_sum: + return tf.reduce_sum(res) + else: + return res + + +def analytic_kl(mu_1, sigma_1, mu_2, sigma_2): + """KL for two Gaussian distributions with diagonal covariance matrix.""" + sigma_1_sq = tf.square(sigma_1) + sigma_2_sq = tf.square(sigma_2) + + t1 = tf.square(mu_1 - mu_2) / (2. * sigma_2_sq) + t2 = (sigma_1_sq/sigma_2_sq - 1. - tf.log(sigma_1_sq) + tf.log(sigma_2_sq))/2. + return tf.reduce_sum(t1 + t2) + + +class VariationalNeuralBanditModel(BayesianNN): + """Implements an approximate Bayesian NN using Variational Inference.""" + + def __init__(self, hparams, name="BBBNN"): + + self.name = name + self.hparams = hparams + + self.n_in = self.hparams.context_dim + self.n_out = self.hparams.num_actions + self.layers = self.hparams.layer_sizes + self.init_scale = self.hparams.init_scale + self.f_num_points = None + if "f_num_points" in hparams: + self.f_num_points = self.hparams.f_num_points + + self.cleared_times_trained = self.hparams.cleared_times_trained + self.initial_training_steps = self.hparams.initial_training_steps + self.training_schedule = np.linspace(self.initial_training_steps, + self.hparams.training_epochs, + self.cleared_times_trained) + self.verbose = getattr(self.hparams, "verbose", True) + + self.weights_m = {} + self.weights_std = {} + self.biases_m = {} + self.biases_std = {} + + self.times_trained = 0 + + if self.hparams.use_sigma_exp_transform: + self.sigma_transform = tf.exp + self.inverse_sigma_transform = np.log + else: + self.sigma_transform = tf.nn.softplus + self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y)) + + # Whether to use the local reparameterization trick to compute the loss. + # See details in https://arxiv.org/abs/1506.02557 + self.use_local_reparameterization = True + + self.build_graph() + + def build_mu_variable(self, shape): + """Returns a mean variable initialized as N(0, 0.05).""" + return tf.Variable(tf.random_normal(shape, 0.0, 0.05)) + + def build_sigma_variable(self, shape, init=-5.): + """Returns a sigma variable initialized as N(init, 0.05).""" + # Initialize sigma to be very small initially to encourage MAP opt first + return tf.Variable(tf.random_normal(shape, init, 0.05)) + + def build_layer(self, input_x, input_x_local, shape, + layer_id, activation_fn=tf.nn.relu): + """Builds a variational layer, and computes KL term. + + Args: + input_x: Input to the variational layer. + input_x_local: Input when the local reparameterization trick was applied. + shape: [number_inputs, number_outputs] for the layer. + layer_id: Number of layer in the architecture. + activation_fn: Activation function to apply. + + Returns: + output_h: Output of the variational layer. + output_h_local: Output when local reparameterization trick was applied. + neg_kl: Negative KL term for the layer. + """ + + w_mu = self.build_mu_variable(shape) + w_sigma = self.sigma_transform(self.build_sigma_variable(shape)) + w_noise = tf.random_normal(shape) + w = w_mu + w_sigma * w_noise + + b_mu = self.build_mu_variable([1, shape[1]]) + b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]])) + b = b_mu + + # Store means and stds + self.weights_m[layer_id] = w_mu + self.weights_std[layer_id] = w_sigma + self.biases_m[layer_id] = b_mu + self.biases_std[layer_id] = b_sigma + + # Create outputs + output_h = activation_fn(tf.matmul(input_x, w) + b) + + if self.use_local_reparameterization: + # Use analytic KL divergence wrt the prior + neg_kl = -analytic_kl(w_mu, w_sigma, + 0., tf.to_float(np.sqrt(2./shape[0]))) + else: + # Create empirical KL loss terms + log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0]))) + log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma)) + neg_kl = log_p - log_q + + # Apply local reparameterization trick: sample activations pre nonlinearity + m_h = tf.matmul(input_x_local, w_mu) + b + v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma)) + output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h)) + output_h_local = activation_fn(output_h_local) + + return output_h, output_h_local, neg_kl + + def build_action_noise(self): + """Defines a model for additive noise per action, and its KL term.""" + + # Define mean and std variables (log-normal dist) for each action. + noise_sigma_mu = (self.build_mu_variable([1, self.n_out]) + + self.inverse_sigma_transform(self.hparams.noise_sigma)) + noise_sigma_sigma = self.sigma_transform( + self.build_sigma_variable([1, self.n_out])) + + pre_noise_sigma = (noise_sigma_mu + + tf.random_normal([1, self.n_out]) * noise_sigma_sigma) + self.noise_sigma = self.sigma_transform(pre_noise_sigma) + + # Compute KL for additive noise sigma terms. + if getattr(self.hparams, "infer_noise_sigma", False): + neg_kl_term = log_gaussian( + pre_noise_sigma, + self.inverse_sigma_transform(self.hparams.noise_sigma), + self.hparams.prior_sigma + ) + neg_kl_term -= log_gaussian(pre_noise_sigma, + noise_sigma_mu, + noise_sigma_sigma) + else: + neg_kl_term = 0. + + return neg_kl_term + + def build_model(self, activation_fn=tf.nn.relu): + """Defines the actual NN model with fully connected layers. + + The loss is computed for partial feedback settings (bandits), so only + the observed outcome is backpropagated (see weighted loss). + Selects the optimizer and, finally, it also initializes the graph. + + Args: + activation_fn: the activation function used in the nn layers. + """ + + if self.verbose: + print("Initializing model {}.".format(self.name)) + neg_kl_term, l_number = 0, 0 + use_local_reparameterization = self.use_local_reparameterization + + # Compute model additive noise for each action with log-normal distribution + neg_kl_term += self.build_action_noise() + + # Build network. + input_x = self.x + input_local = self.x + n_in = self.n_in + + for l_number, n_nodes in enumerate(self.layers): + if n_nodes > 0: + h, h_local, neg_kl = self.build_layer(input_x, input_local, + [n_in, n_nodes], l_number) + + neg_kl_term += neg_kl + input_x, input_local = h, h_local + n_in = n_nodes + + # Create last linear layer + h, h_local, neg_kl = self.build_layer(input_x, input_local, + [n_in, self.n_out], + l_number + 1, + activation_fn=lambda x: x) + neg_kl_term += neg_kl + + self.y_pred = h + self.y_pred_local = h_local + + # Compute log likelihood (with learned or fixed noise level) + if getattr(self.hparams, "infer_noise_sigma", False): + log_likelihood = log_gaussian( + self.y, self.y_pred_local, self.noise_sigma, reduce_sum=False) + else: + y_hat = self.y_pred_local if use_local_reparameterization else self.y_pred + log_likelihood = log_gaussian( + self.y, y_hat, self.hparams.noise_sigma, reduce_sum=False) + + # Only take into account observed outcomes (bandits setting) + batch_size = tf.to_float(tf.shape(self.x)[0]) + weighted_log_likelihood = tf.reduce_sum( + log_likelihood * self.weights) / batch_size + + # The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL + elbo = weighted_log_likelihood + (neg_kl_term / self.n) + + self.loss = -elbo + self.global_step = tf.train.get_or_create_global_step() + self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize( + self.loss, global_step=self.global_step) + + # Create tensorboard metrics + self.create_summaries() + self.summary_writer = tf.summary.FileWriter( + "{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph) + + def build_graph(self): + """Defines graph, session, placeholders, and model. + + Placeholders are: n (size of the dataset), x and y (context and observed + reward for each action), and weights (one-hot encoding of selected action + for each context, i.e., only possibly non-zero element in each y). + """ + + self.graph = tf.Graph() + with self.graph.as_default(): + + self.sess = tf.Session() + + self.n = tf.placeholder(shape=[], dtype=tf.float32) + + self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32) + self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) + self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) + + self.build_model() + self.sess.run(tf.global_variables_initializer()) + + def create_summaries(self): + """Defines summaries including mean loss, and global step.""" + + with self.graph.as_default(): + with tf.name_scope(self.name + "_summaries"): + tf.summary.scalar("loss", self.loss) + tf.summary.scalar("global_step", self.global_step) + self.summary_op = tf.summary.merge_all() + + def assign_lr(self): + """Resets the learning rate in dynamic schedules for subsequent trainings. + + In bandits settings, we do expand our dataset over time. Then, we need to + re-train the network with the new data. The algorithms that do not keep + the step constant, can reset it at the start of each *training* process. + """ + + decay_steps = 1 + if self.hparams.activate_decay: + current_gs = self.sess.run(self.global_step) + with self.graph.as_default(): + self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, + self.global_step - current_gs, + decay_steps, + self.hparams.lr_decay_rate) + + def train(self, data, num_steps): + """Trains the BNN for num_steps, using the data in 'data'. + + Args: + data: ContextualDataset object that provides the data. + num_steps: Number of minibatches to train the network for. + + Returns: + losses: Loss history during training. + """ + + if self.times_trained < self.cleared_times_trained: + num_steps = int(self.training_schedule[self.times_trained]) + self.times_trained += 1 + + losses = [] + + with self.graph.as_default(): + + if self.verbose: + print("Training {} for {} steps...".format(self.name, num_steps)) + + for step in range(num_steps): + x, y, weights = data.get_batch_with_weights(self.hparams.batch_size) + _, summary, global_step, loss = self.sess.run( + [self.train_op, self.summary_op, self.global_step, self.loss], + feed_dict={ + self.x: x, + self.y: y, + self.weights: weights, + self.n: data.num_points(self.f_num_points), + }) + + losses.append(loss) + + if step % self.hparams.freq_summary == 0: + if self.hparams.show_training: + print("{} | step: {}, loss: {}".format( + self.name, global_step, loss)) + self.summary_writer.add_summary(summary, global_step) + + return losses diff --git a/models/research/deep_contextual_bandits/bandits/core/bandit_algorithm.py b/models/research/deep_contextual_bandits/bandits/core/bandit_algorithm.py new file mode 100644 index 0000000000000000000000000000000000000000..cae4e1676a865d538fa41936feb9118283b92a2c --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/core/bandit_algorithm.py @@ -0,0 +1,34 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define the abstract class for contextual bandit algorithms.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class BanditAlgorithm(object): + """A bandit algorithm must be able to do two basic operations. + + 1. Choose an action given a context. + 2. Update its internal model given a triple (context, played action, reward). + """ + + def action(self, context): + pass + + def update(self, context, action, reward): + pass diff --git a/models/research/deep_contextual_bandits/bandits/core/bayesian_nn.py b/models/research/deep_contextual_bandits/bandits/core/bayesian_nn.py new file mode 100644 index 0000000000000000000000000000000000000000..310961591317f8c9ff958a5178e81e0422385baf --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/core/bayesian_nn.py @@ -0,0 +1,36 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define the abstract class for Bayesian Neural Networks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class BayesianNN(object): + """A Bayesian neural network keeps a distribution over neural nets.""" + + def __init__(self, optimizer): + pass + + def build_model(self): + pass + + def train(self, data): + pass + + def sample(self, steps): + pass diff --git a/models/research/deep_contextual_bandits/bandits/core/contextual_bandit.py b/models/research/deep_contextual_bandits/bandits/core/contextual_bandit.py new file mode 100644 index 0000000000000000000000000000000000000000..98467378953b9f3e38057be8a0068fdbc7b59a84 --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/core/contextual_bandit.py @@ -0,0 +1,125 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define a contextual bandit from which we can sample and compute rewards. + +We can feed the data, sample a context, its reward for a specific action, and +also the optimal action for a given context. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def run_contextual_bandit(context_dim, num_actions, dataset, algos): + """Run a contextual bandit problem on a set of algorithms. + + Args: + context_dim: Dimension of the context. + num_actions: Number of available actions. + dataset: Matrix where every row is a context + num_actions rewards. + algos: List of algorithms to use in the contextual bandit instance. + + Returns: + h_actions: Matrix with actions: size (num_context, num_algorithms). + h_rewards: Matrix with rewards: size (num_context, num_algorithms). + """ + + num_contexts = dataset.shape[0] + + # Create contextual bandit + cmab = ContextualBandit(context_dim, num_actions) + cmab.feed_data(dataset) + + h_actions = np.empty((0, len(algos)), float) + h_rewards = np.empty((0, len(algos)), float) + + # Run the contextual bandit process + for i in range(num_contexts): + context = cmab.context(i) + actions = [a.action(context) for a in algos] + rewards = [cmab.reward(i, action) for action in actions] + + for j, a in enumerate(algos): + a.update(context, actions[j], rewards[j]) + + h_actions = np.vstack((h_actions, np.array(actions))) + h_rewards = np.vstack((h_rewards, np.array(rewards))) + + return h_actions, h_rewards + + +class ContextualBandit(object): + """Implements a Contextual Bandit with d-dimensional contexts and k arms.""" + + def __init__(self, context_dim, num_actions): + """Creates a contextual bandit object. + + Args: + context_dim: Dimension of the contexts. + num_actions: Number of arms for the multi-armed bandit. + """ + + self._context_dim = context_dim + self._num_actions = num_actions + + def feed_data(self, data): + """Feeds the data (contexts + rewards) to the bandit object. + + Args: + data: Numpy array with shape [n, d+k], where n is the number of contexts, + d is the dimension of each context, and k the number of arms (rewards). + + Raises: + ValueError: when data dimensions do not correspond to the object values. + """ + + if data.shape[1] != self.context_dim + self.num_actions: + raise ValueError('Data dimensions do not match.') + + self._number_contexts = data.shape[0] + self.data = data + self.order = range(self.number_contexts) + + def reset(self): + """Randomly shuffle the order of the contexts to deliver.""" + self.order = np.random.permutation(self.number_contexts) + + def context(self, number): + """Returns the number-th context.""" + return self.data[self.order[number]][:self.context_dim] + + def reward(self, number, action): + """Returns the reward for the number-th context and action.""" + return self.data[self.order[number]][self.context_dim + action] + + def optimal(self, number): + """Returns the optimal action (in hindsight) for the number-th context.""" + return np.argmax(self.data[self.order[number]][self.context_dim:]) + + @property + def context_dim(self): + return self._context_dim + + @property + def num_actions(self): + return self._num_actions + + @property + def number_contexts(self): + return self._number_contexts diff --git a/models/research/deep_contextual_bandits/bandits/core/contextual_dataset.py b/models/research/deep_contextual_bandits/bandits/core/contextual_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9fae7629c7c2ee39ab6b98ddac73876b5fca421a --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/core/contextual_dataset.py @@ -0,0 +1,166 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Define a data buffer for contextual bandit algorithms.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +class ContextualDataset(object): + """The buffer is able to append new data, and sample random minibatches.""" + + def __init__(self, context_dim, num_actions, buffer_s=-1, intercept=False): + """Creates a ContextualDataset object. + + The data is stored in attributes: contexts and rewards. + The sequence of taken actions are stored in attribute actions. + + Args: + context_dim: Dimension of the contexts. + num_actions: Number of arms for the multi-armed bandit. + buffer_s: Size of buffer for training. Only last buffer_s will be + returned as minibatch. If buffer_s = -1, all data will be used. + intercept: If True, it adds a constant (1.0) dimension to each context X, + at the end. + """ + + self._context_dim = context_dim + self._num_actions = num_actions + self._contexts = None + self._rewards = None + self.actions = [] + self.buffer_s = buffer_s + self.intercept = intercept + + def add(self, context, action, reward): + """Adds a new triplet (context, action, reward) to the dataset. + + The reward for the actions that weren't played is assumed to be zero. + + Args: + context: A d-dimensional vector with the context. + action: Integer between 0 and k-1 representing the chosen arm. + reward: Real number representing the reward for the (context, action). + """ + + if self.intercept: + c = np.array(context[:]) + c = np.append(c, 1.0).reshape((1, self.context_dim + 1)) + else: + c = np.array(context[:]).reshape((1, self.context_dim)) + + if self.contexts is None: + self.contexts = c + else: + self.contexts = np.vstack((self.contexts, c)) + + r = np.zeros((1, self.num_actions)) + r[0, action] = reward + if self.rewards is None: + self.rewards = r + else: + self.rewards = np.vstack((self.rewards, r)) + + self.actions.append(action) + + def replace_data(self, contexts=None, actions=None, rewards=None): + if contexts is not None: + self.contexts = contexts + if actions is not None: + self.actions = actions + if rewards is not None: + self.rewards = rewards + + def get_batch(self, batch_size): + """Returns a random minibatch of (contexts, rewards) with batch_size.""" + n, _ = self.contexts.shape + if self.buffer_s == -1: + # use all the data + ind = np.random.choice(range(n), batch_size) + else: + # use only buffer (last buffer_s observations) + ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size) + return self.contexts[ind, :], self.rewards[ind, :] + + def get_data(self, action): + """Returns all (context, reward) where the action was played.""" + n, _ = self.contexts.shape + ind = np.array([i for i in range(n) if self.actions[i] == action]) + return self.contexts[ind, :], self.rewards[ind, action] + + def get_data_with_weights(self): + """Returns all observations with one-hot weights for actions.""" + weights = np.zeros((self.contexts.shape[0], self.num_actions)) + a_ind = np.array([(i, val) for i, val in enumerate(self.actions)]) + weights[a_ind[:, 0], a_ind[:, 1]] = 1.0 + return self.contexts, self.rewards, weights + + def get_batch_with_weights(self, batch_size): + """Returns a random mini-batch with one-hot weights for actions.""" + n, _ = self.contexts.shape + if self.buffer_s == -1: + # use all the data + ind = np.random.choice(range(n), batch_size) + else: + # use only buffer (last buffer_s obs) + ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size) + + weights = np.zeros((batch_size, self.num_actions)) + sampled_actions = np.array(self.actions)[ind] + a_ind = np.array([(i, val) for i, val in enumerate(sampled_actions)]) + weights[a_ind[:, 0], a_ind[:, 1]] = 1.0 + return self.contexts[ind, :], self.rewards[ind, :], weights + + def num_points(self, f=None): + """Returns number of points in the buffer (after applying function f).""" + if f is not None: + return f(self.contexts.shape[0]) + return self.contexts.shape[0] + + @property + def context_dim(self): + return self._context_dim + + @property + def num_actions(self): + return self._num_actions + + @property + def contexts(self): + return self._contexts + + @contexts.setter + def contexts(self, value): + self._contexts = value + + @property + def actions(self): + return self._actions + + @actions.setter + def actions(self, value): + self._actions = value + + @property + def rewards(self): + return self._rewards + + @rewards.setter + def rewards(self, value): + self._rewards = value diff --git a/models/research/deep_contextual_bandits/bandits/data/data_sampler.py b/models/research/deep_contextual_bandits/bandits/data/data_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..55d1bae383637485182a9524ba8a3cb37b76bd0d --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/data/data_sampler.py @@ -0,0 +1,374 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to create bandit problems from datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import pandas as pd +import tensorflow as tf + + +def one_hot(df, cols): + """Returns one-hot encoding of DataFrame df including columns in cols.""" + for col in cols: + dummies = pd.get_dummies(df[col], prefix=col, drop_first=False) + df = pd.concat([df, dummies], axis=1) + df = df.drop(col, axis=1) + return df + + +def sample_mushroom_data(file_name, + num_contexts, + r_noeat=0, + r_eat_safe=5, + r_eat_poison_bad=-35, + r_eat_poison_good=5, + prob_poison_bad=0.5): + """Samples bandit game from Mushroom UCI Dataset. + + Args: + file_name: Route of file containing the original Mushroom UCI dataset. + num_contexts: Number of points to sample, i.e. (context, action rewards). + r_noeat: Reward for not eating a mushroom. + r_eat_safe: Reward for eating a non-poisonous mushroom. + r_eat_poison_bad: Reward for eating a poisonous mushroom if harmed. + r_eat_poison_good: Reward for eating a poisonous mushroom if not harmed. + prob_poison_bad: Probability of being harmed by eating a poisonous mushroom. + + Returns: + dataset: Sampled matrix with n rows: (context, eat_reward, no_eat_reward). + opt_vals: Vector of expected optimal (reward, action) for each context. + + We assume r_eat_safe > r_noeat, and r_eat_poison_good > r_eat_poison_bad. + """ + + # first two cols of df encode whether mushroom is edible or poisonous + df = pd.read_csv(file_name, header=None) + df = one_hot(df, df.columns) + ind = np.random.choice(range(df.shape[0]), num_contexts, replace=True) + + contexts = df.iloc[ind, 2:] + no_eat_reward = r_noeat * np.ones((num_contexts, 1)) + random_poison = np.random.choice( + [r_eat_poison_bad, r_eat_poison_good], + p=[prob_poison_bad, 1 - prob_poison_bad], + size=num_contexts) + eat_reward = r_eat_safe * df.iloc[ind, 0] + eat_reward += np.multiply(random_poison, df.iloc[ind, 1]) + eat_reward = eat_reward.values.reshape((num_contexts, 1)) + + # compute optimal expected reward and optimal actions + exp_eat_poison_reward = r_eat_poison_bad * prob_poison_bad + exp_eat_poison_reward += r_eat_poison_good * (1 - prob_poison_bad) + opt_exp_reward = r_eat_safe * df.iloc[ind, 0] + max( + r_noeat, exp_eat_poison_reward) * df.iloc[ind, 1] + + if r_noeat > exp_eat_poison_reward: + # actions: no eat = 0 ; eat = 1 + opt_actions = df.iloc[ind, 0] # indicator of edible + else: + # should always eat (higher expected reward) + opt_actions = np.ones((num_contexts, 1)) + + opt_vals = (opt_exp_reward.values, opt_actions.values) + + return np.hstack((contexts, no_eat_reward, eat_reward)), opt_vals + + +def sample_stock_data(file_name, context_dim, num_actions, num_contexts, + sigma, shuffle_rows=True): + """Samples linear bandit game from stock prices dataset. + + Args: + file_name: Route of file containing the stock prices dataset. + context_dim: Context dimension (i.e. vector with the price of each stock). + num_actions: Number of actions (different linear portfolio strategies). + num_contexts: Number of contexts to sample. + sigma: Vector with additive noise levels for each action. + shuffle_rows: If True, rows from original dataset are shuffled. + + Returns: + dataset: Sampled matrix with rows: (context, reward_1, ..., reward_k). + opt_vals: Vector of expected optimal (reward, action) for each context. + """ + + with tf.gfile.Open(file_name, 'r') as f: + contexts = np.loadtxt(f, skiprows=1) + + if shuffle_rows: + np.random.shuffle(contexts) + contexts = contexts[:num_contexts, :] + + betas = np.random.uniform(-1, 1, (context_dim, num_actions)) + betas /= np.linalg.norm(betas, axis=0) + + mean_rewards = np.dot(contexts, betas) + noise = np.random.normal(scale=sigma, size=mean_rewards.shape) + rewards = mean_rewards + noise + + opt_actions = np.argmax(mean_rewards, axis=1) + opt_rewards = [mean_rewards[i, a] for i, a in enumerate(opt_actions)] + return np.hstack((contexts, rewards)), (np.array(opt_rewards), opt_actions) + + +def sample_jester_data(file_name, context_dim, num_actions, num_contexts, + shuffle_rows=True, shuffle_cols=False): + """Samples bandit game from (user, joke) dense subset of Jester dataset. + + Args: + file_name: Route of file containing the modified Jester dataset. + context_dim: Context dimension (i.e. vector with some ratings from a user). + num_actions: Number of actions (number of joke ratings to predict). + num_contexts: Number of contexts to sample. + shuffle_rows: If True, rows from original dataset are shuffled. + shuffle_cols: Whether or not context/action jokes are randomly shuffled. + + Returns: + dataset: Sampled matrix with rows: (context, rating_1, ..., rating_k). + opt_vals: Vector of deterministic optimal (reward, action) for each context. + """ + + with tf.gfile.Open(file_name, 'rb') as f: + dataset = np.load(f) + + if shuffle_cols: + dataset = dataset[:, np.random.permutation(dataset.shape[1])] + if shuffle_rows: + np.random.shuffle(dataset) + dataset = dataset[:num_contexts, :] + + assert context_dim + num_actions == dataset.shape[1], 'Wrong data dimensions.' + + opt_actions = np.argmax(dataset[:, context_dim:], axis=1) + opt_rewards = np.array([dataset[i, context_dim + a] + for i, a in enumerate(opt_actions)]) + + return dataset, (opt_rewards, opt_actions) + + +def sample_statlog_data(file_name, num_contexts, shuffle_rows=True, + remove_underrepresented=False): + """Returns bandit problem dataset based on the UCI statlog data. + + Args: + file_name: Route of file containing the Statlog dataset. + num_contexts: Number of contexts to sample. + shuffle_rows: If True, rows from original dataset are shuffled. + remove_underrepresented: If True, removes arms with very few rewards. + + Returns: + dataset: Sampled matrix with rows: (context, action rewards). + opt_vals: Vector of deterministic optimal (reward, action) for each context. + + https://archive.ics.uci.edu/ml/datasets/Statlog+(Shuttle) + """ + + with tf.gfile.Open(file_name, 'r') as f: + data = np.loadtxt(f) + + num_actions = 7 # some of the actions are very rarely optimal. + + # Shuffle data + if shuffle_rows: + np.random.shuffle(data) + data = data[:num_contexts, :] + + # Last column is label, rest are features + contexts = data[:, :-1] + labels = data[:, -1].astype(int) - 1 # convert to 0 based index + + if remove_underrepresented: + contexts, labels = remove_underrepresented_classes(contexts, labels) + + return classification_to_bandit_problem(contexts, labels, num_actions) + + +def sample_adult_data(file_name, num_contexts, shuffle_rows=True, + remove_underrepresented=False): + """Returns bandit problem dataset based on the UCI adult data. + + Args: + file_name: Route of file containing the Adult dataset. + num_contexts: Number of contexts to sample. + shuffle_rows: If True, rows from original dataset are shuffled. + remove_underrepresented: If True, removes arms with very few rewards. + + Returns: + dataset: Sampled matrix with rows: (context, action rewards). + opt_vals: Vector of deterministic optimal (reward, action) for each context. + + Preprocessing: + * drop rows with missing values + * convert categorical variables to 1 hot encoding + + https://archive.ics.uci.edu/ml/datasets/census+income + """ + with tf.gfile.Open(file_name, 'r') as f: + df = pd.read_csv(f, header=None, + na_values=[' ?']).dropna() + + num_actions = 14 + + if shuffle_rows: + df = df.sample(frac=1) + df = df.iloc[:num_contexts, :] + + labels = df[6].astype('category').cat.codes.as_matrix() + df = df.drop([6], axis=1) + + # Convert categorical variables to 1 hot encoding + cols_to_transform = [1, 3, 5, 7, 8, 9, 13, 14] + df = pd.get_dummies(df, columns=cols_to_transform) + + if remove_underrepresented: + df, labels = remove_underrepresented_classes(df, labels) + contexts = df.as_matrix() + + return classification_to_bandit_problem(contexts, labels, num_actions) + + +def sample_census_data(file_name, num_contexts, shuffle_rows=True, + remove_underrepresented=False): + """Returns bandit problem dataset based on the UCI census data. + + Args: + file_name: Route of file containing the Census dataset. + num_contexts: Number of contexts to sample. + shuffle_rows: If True, rows from original dataset are shuffled. + remove_underrepresented: If True, removes arms with very few rewards. + + Returns: + dataset: Sampled matrix with rows: (context, action rewards). + opt_vals: Vector of deterministic optimal (reward, action) for each context. + + Preprocessing: + * drop rows with missing labels + * convert categorical variables to 1 hot encoding + + Note: this is the processed (not the 'raw') dataset. It contains a subset + of the raw features and they've all been discretized. + + https://archive.ics.uci.edu/ml/datasets/US+Census+Data+%281990%29 + """ + # Note: this dataset is quite large. It will be slow to load and preprocess. + with tf.gfile.Open(file_name, 'r') as f: + df = (pd.read_csv(f, header=0, na_values=['?']) + .dropna()) + + num_actions = 9 + + if shuffle_rows: + df = df.sample(frac=1) + df = df.iloc[:num_contexts, :] + + # Assuming what the paper calls response variable is the label? + labels = df['dOccup'].astype('category').cat.codes.as_matrix() + # In addition to label, also drop the (unique?) key. + df = df.drop(['dOccup', 'caseid'], axis=1) + + # All columns are categorical. Convert to 1 hot encoding. + df = pd.get_dummies(df, columns=df.columns) + + if remove_underrepresented: + df, labels = remove_underrepresented_classes(df, labels) + contexts = df.as_matrix() + + return classification_to_bandit_problem(contexts, labels, num_actions) + + +def sample_covertype_data(file_name, num_contexts, shuffle_rows=True, + remove_underrepresented=False): + """Returns bandit problem dataset based on the UCI Cover_Type data. + + Args: + file_name: Route of file containing the Covertype dataset. + num_contexts: Number of contexts to sample. + shuffle_rows: If True, rows from original dataset are shuffled. + remove_underrepresented: If True, removes arms with very few rewards. + + Returns: + dataset: Sampled matrix with rows: (context, action rewards). + opt_vals: Vector of deterministic optimal (reward, action) for each context. + + Preprocessing: + * drop rows with missing labels + * convert categorical variables to 1 hot encoding + + https://archive.ics.uci.edu/ml/datasets/Covertype + """ + with tf.gfile.Open(file_name, 'r') as f: + df = (pd.read_csv(f, header=0, na_values=['?']) + .dropna()) + + num_actions = 7 + + if shuffle_rows: + df = df.sample(frac=1) + df = df.iloc[:num_contexts, :] + + # Assuming what the paper calls response variable is the label? + # Last column is label. + labels = df[df.columns[-1]].astype('category').cat.codes.as_matrix() + df = df.drop([df.columns[-1]], axis=1) + + # All columns are either quantitative or already converted to 1 hot. + if remove_underrepresented: + df, labels = remove_underrepresented_classes(df, labels) + contexts = df.as_matrix() + + return classification_to_bandit_problem(contexts, labels, num_actions) + + +def classification_to_bandit_problem(contexts, labels, num_actions=None): + """Normalize contexts and encode deterministic rewards.""" + + if num_actions is None: + num_actions = np.max(labels) + 1 + num_contexts = contexts.shape[0] + + # Due to random subsampling in small problems, some features may be constant + sstd = safe_std(np.std(contexts, axis=0, keepdims=True)[0, :]) + + # Normalize features + contexts = ((contexts - np.mean(contexts, axis=0, keepdims=True)) / sstd) + + # One hot encode labels as rewards + rewards = np.zeros((num_contexts, num_actions)) + rewards[np.arange(num_contexts), labels] = 1.0 + + return contexts, rewards, (np.ones(num_contexts), labels) + + +def safe_std(values): + """Remove zero std values for ones.""" + return np.array([val if val != 0.0 else 1.0 for val in values]) + + +def remove_underrepresented_classes(features, labels, thresh=0.0005): + """Removes classes when number of datapoints fraction is below a threshold.""" + + # Threshold doesn't seem to agree with https://arxiv.org/pdf/1706.04687.pdf + # Example: for Covertype, they report 4 classes after filtering, we get 7? + total_count = labels.shape[0] + unique, counts = np.unique(labels, return_counts=True) + ratios = counts.astype('float') / total_count + vals_and_ratios = dict(zip(unique, ratios)) + print('Unique classes and their ratio of total: %s' % vals_and_ratios) + keep = [vals_and_ratios[v] >= thresh for v in labels] + return features[keep], labels[np.array(keep)] diff --git a/models/research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py b/models/research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..c7de48aba4de109392aa8efad06071886cf67964 --- /dev/null +++ b/models/research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py @@ -0,0 +1,179 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Several functions to sample contextual data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def sample_contextual_data(num_contexts, dim_context, num_actions, sigma): + """Samples independent Gaussian data. + + There is nothing to learn here as the rewards do not depend on the context. + + Args: + num_contexts: Number of contexts to sample. + dim_context: Dimension of the contexts. + num_actions: Number of arms for the multi-armed bandit. + sigma: Standard deviation of the independent Gaussian samples. + + Returns: + data: A [num_contexts, dim_context + num_actions] numpy array with the data. + """ + size_data = [num_contexts, dim_context + num_actions] + return np.random.normal(scale=sigma, size=size_data) + + +def sample_linear_data(num_contexts, dim_context, num_actions, sigma=0.0): + """Samples data from linearly parameterized arms. + + The reward for context X and arm j is given by X^T beta_j, for some latent + set of parameters {beta_j : j = 1, ..., k}. The beta's are sampled uniformly + at random, the contexts are Gaussian, and sigma-noise is added to the rewards. + + Args: + num_contexts: Number of contexts to sample. + dim_context: Dimension of the contexts. + num_actions: Number of arms for the multi-armed bandit. + sigma: Standard deviation of the additive noise. Set to zero for no noise. + + Returns: + data: A [n, d+k] numpy array with the data. + betas: Latent parameters that determine expected reward for each arm. + opt: (optimal_rewards, optimal_actions) for all contexts. + """ + + betas = np.random.uniform(-1, 1, (dim_context, num_actions)) + betas /= np.linalg.norm(betas, axis=0) + contexts = np.random.normal(size=[num_contexts, dim_context]) + rewards = np.dot(contexts, betas) + opt_actions = np.argmax(rewards, axis=1) + rewards += np.random.normal(scale=sigma, size=rewards.shape) + opt_rewards = np.array([rewards[i, act] for i, act in enumerate(opt_actions)]) + return np.hstack((contexts, rewards)), betas, (opt_rewards, opt_actions) + + +def sample_sparse_linear_data(num_contexts, dim_context, num_actions, + sparse_dim, sigma=0.0): + """Samples data from sparse linearly parameterized arms. + + The reward for context X and arm j is given by X^T beta_j, for some latent + set of parameters {beta_j : j = 1, ..., k}. The beta's are sampled uniformly + at random, the contexts are Gaussian, and sigma-noise is added to the rewards. + Only s components out of d are non-zero for each arm's beta. + + Args: + num_contexts: Number of contexts to sample. + dim_context: Dimension of the contexts. + num_actions: Number of arms for the multi-armed bandit. + sparse_dim: Dimension of the latent subspace (sparsity pattern dimension). + sigma: Standard deviation of the additive noise. Set to zero for no noise. + + Returns: + data: A [num_contexts, dim_context+num_actions] numpy array with the data. + betas: Latent parameters that determine expected reward for each arm. + opt: (optimal_rewards, optimal_actions) for all contexts. + """ + + flatten = lambda l: [item for sublist in l for item in sublist] + sparse_pattern = flatten( + [[(j, i) for j in np.random.choice(range(dim_context), + sparse_dim, + replace=False)] + for i in range(num_actions)]) + betas = np.random.uniform(-1, 1, (dim_context, num_actions)) + mask = np.zeros((dim_context, num_actions)) + for elt in sparse_pattern: + mask[elt] = 1 + betas = np.multiply(betas, mask) + betas /= np.linalg.norm(betas, axis=0) + contexts = np.random.normal(size=[num_contexts, dim_context]) + rewards = np.dot(contexts, betas) + opt_actions = np.argmax(rewards, axis=1) + rewards += np.random.normal(scale=sigma, size=rewards.shape) + opt_rewards = np.array([rewards[i, act] for i, act in enumerate(opt_actions)]) + return np.hstack((contexts, rewards)), betas, (opt_rewards, opt_actions) + + +def sample_wheel_bandit_data(num_contexts, delta, mean_v, std_v, + mu_large, std_large): + """Samples from Wheel bandit game (see https://arxiv.org/abs/1802.09127). + + Args: + num_contexts: Number of points to sample, i.e. (context, action rewards). + delta: Exploration parameter: high reward in one region if norm above delta. + mean_v: Mean reward for each action if context norm is below delta. + std_v: Gaussian reward std for each action if context norm is below delta. + mu_large: Mean reward for optimal action if context norm is above delta. + std_large: Reward std for optimal action if context norm is above delta. + + Returns: + dataset: Sampled matrix with n rows: (context, action rewards). + opt_vals: Vector of expected optimal (reward, action) for each context. + """ + + context_dim = 2 + num_actions = 5 + + data = [] + rewards = [] + opt_actions = [] + opt_rewards = [] + + # sample uniform contexts in unit ball + while len(data) < num_contexts: + raw_data = np.random.uniform(-1, 1, (int(num_contexts / 3), context_dim)) + + for i in range(raw_data.shape[0]): + if np.linalg.norm(raw_data[i, :]) <= 1: + data.append(raw_data[i, :]) + + contexts = np.stack(data)[:num_contexts, :] + + # sample rewards + for i in range(num_contexts): + r = [np.random.normal(mean_v[j], std_v[j]) for j in range(num_actions)] + if np.linalg.norm(contexts[i, :]) >= delta: + # large reward in the right region for the context + r_big = np.random.normal(mu_large, std_large) + if contexts[i, 0] > 0: + if contexts[i, 1] > 0: + r[0] = r_big + opt_actions.append(0) + else: + r[1] = r_big + opt_actions.append(1) + else: + if contexts[i, 1] > 0: + r[2] = r_big + opt_actions.append(2) + else: + r[3] = r_big + opt_actions.append(3) + else: + opt_actions.append(np.argmax(mean_v)) + + opt_rewards.append(r[opt_actions[-1]]) + rewards.append(r) + + rewards = np.stack(rewards) + opt_rewards = np.array(opt_rewards) + opt_actions = np.array(opt_actions) + + return np.hstack((contexts, rewards)), (opt_rewards, opt_actions) diff --git a/models/research/deep_contextual_bandits/example_main.py b/models/research/deep_contextual_bandits/example_main.py new file mode 100644 index 0000000000000000000000000000000000000000..c71a5aa26f94adbf5989d002fd5c768582c14e14 --- /dev/null +++ b/models/research/deep_contextual_bandits/example_main.py @@ -0,0 +1,454 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple example of contextual bandits simulation. + +Code corresponding to: +Deep Bayesian Bandits Showdown: An Empirical Comparison of Bayesian Deep Networks +for Thompson Sampling, by Carlos Riquelme, George Tucker, and Jasper Snoek. +https://arxiv.org/abs/1802.09127 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time +from absl import app +from absl import flags +import numpy as np +import os +import tensorflow as tf + +from bandits.algorithms.bootstrapped_bnn_sampling import BootstrappedBNNSampling +from bandits.core.contextual_bandit import run_contextual_bandit +from bandits.data.data_sampler import sample_adult_data +from bandits.data.data_sampler import sample_census_data +from bandits.data.data_sampler import sample_covertype_data +from bandits.data.data_sampler import sample_jester_data +from bandits.data.data_sampler import sample_mushroom_data +from bandits.data.data_sampler import sample_statlog_data +from bandits.data.data_sampler import sample_stock_data +from bandits.algorithms.fixed_policy_sampling import FixedPolicySampling +from bandits.algorithms.linear_full_posterior_sampling import LinearFullPosteriorSampling +from bandits.algorithms.neural_linear_sampling import NeuralLinearPosteriorSampling +from bandits.algorithms.parameter_noise_sampling import ParameterNoiseSampling +from bandits.algorithms.posterior_bnn_sampling import PosteriorBNNSampling +from bandits.data.synthetic_data_sampler import sample_linear_data +from bandits.data.synthetic_data_sampler import sample_sparse_linear_data +from bandits.data.synthetic_data_sampler import sample_wheel_bandit_data +from bandits.algorithms.uniform_sampling import UniformSampling + +# Set up your file routes to the data files. +base_route = os.getcwd() +data_route = 'contextual_bandits/datasets' + +FLAGS = flags.FLAGS +FLAGS.set_default('alsologtostderr', True) +flags.DEFINE_string('logdir', '/tmp/bandits/', 'Base directory to save output') +flags.DEFINE_string( + 'mushroom_data', + os.path.join(base_route, data_route, 'mushroom.data'), + 'Directory where Mushroom data is stored.') +flags.DEFINE_string( + 'financial_data', + os.path.join(base_route, data_route, 'raw_stock_contexts'), + 'Directory where Financial data is stored.') +flags.DEFINE_string( + 'jester_data', + os.path.join(base_route, data_route, 'jester_data_40jokes_19181users.npy'), + 'Directory where Jester data is stored.') +flags.DEFINE_string( + 'statlog_data', + os.path.join(base_route, data_route, 'shuttle.trn'), + 'Directory where Statlog data is stored.') +flags.DEFINE_string( + 'adult_data', + os.path.join(base_route, data_route, 'adult.full'), + 'Directory where Adult data is stored.') +flags.DEFINE_string( + 'covertype_data', + os.path.join(base_route, data_route, 'covtype.data'), + 'Directory where Covertype data is stored.') +flags.DEFINE_string( + 'census_data', + os.path.join(base_route, data_route, 'USCensus1990.data.txt'), + 'Directory where Census data is stored.') + + +def sample_data(data_type, num_contexts=None): + """Sample data from given 'data_type'. + + Args: + data_type: Dataset from which to sample. + num_contexts: Number of contexts to sample. + + Returns: + dataset: Sampled matrix with rows: (context, reward_1, ..., reward_num_act). + opt_rewards: Vector of expected optimal reward for each context. + opt_actions: Vector of optimal action for each context. + num_actions: Number of available actions. + context_dim: Dimension of each context. + """ + + if data_type == 'linear': + # Create linear dataset + num_actions = 8 + context_dim = 10 + noise_stds = [0.01 * (i + 1) for i in range(num_actions)] + dataset, _, opt_linear = sample_linear_data(num_contexts, context_dim, + num_actions, sigma=noise_stds) + opt_rewards, opt_actions = opt_linear + elif data_type == 'sparse_linear': + # Create sparse linear dataset + num_actions = 7 + context_dim = 10 + noise_stds = [0.01 * (i + 1) for i in range(num_actions)] + num_nnz_dims = int(context_dim / 3.0) + dataset, _, opt_sparse_linear = sample_sparse_linear_data( + num_contexts, context_dim, num_actions, num_nnz_dims, sigma=noise_stds) + opt_rewards, opt_actions = opt_sparse_linear + elif data_type == 'mushroom': + # Create mushroom dataset + num_actions = 2 + context_dim = 117 + file_name = FLAGS.mushroom_data + dataset, opt_mushroom = sample_mushroom_data(file_name, num_contexts) + opt_rewards, opt_actions = opt_mushroom + elif data_type == 'financial': + num_actions = 8 + context_dim = 21 + num_contexts = min(3713, num_contexts) + noise_stds = [0.01 * (i + 1) for i in range(num_actions)] + file_name = FLAGS.financial_data + dataset, opt_financial = sample_stock_data(file_name, context_dim, + num_actions, num_contexts, + noise_stds, shuffle_rows=True) + opt_rewards, opt_actions = opt_financial + elif data_type == 'jester': + num_actions = 8 + context_dim = 32 + num_contexts = min(19181, num_contexts) + file_name = FLAGS.jester_data + dataset, opt_jester = sample_jester_data(file_name, context_dim, + num_actions, num_contexts, + shuffle_rows=True, + shuffle_cols=True) + opt_rewards, opt_actions = opt_jester + elif data_type == 'statlog': + file_name = FLAGS.statlog_data + num_actions = 7 + num_contexts = min(43500, num_contexts) + sampled_vals = sample_statlog_data(file_name, num_contexts, + shuffle_rows=True) + contexts, rewards, (opt_rewards, opt_actions) = sampled_vals + dataset = np.hstack((contexts, rewards)) + context_dim = contexts.shape[1] + elif data_type == 'adult': + file_name = FLAGS.adult_data + num_actions = 14 + num_contexts = min(45222, num_contexts) + sampled_vals = sample_adult_data(file_name, num_contexts, + shuffle_rows=True) + contexts, rewards, (opt_rewards, opt_actions) = sampled_vals + dataset = np.hstack((contexts, rewards)) + context_dim = contexts.shape[1] + elif data_type == 'covertype': + file_name = FLAGS.covertype_data + num_actions = 7 + num_contexts = min(150000, num_contexts) + sampled_vals = sample_covertype_data(file_name, num_contexts, + shuffle_rows=True) + contexts, rewards, (opt_rewards, opt_actions) = sampled_vals + dataset = np.hstack((contexts, rewards)) + context_dim = contexts.shape[1] + elif data_type == 'census': + file_name = FLAGS.census_data + num_actions = 9 + num_contexts = min(150000, num_contexts) + sampled_vals = sample_census_data(file_name, num_contexts, + shuffle_rows=True) + contexts, rewards, (opt_rewards, opt_actions) = sampled_vals + dataset = np.hstack((contexts, rewards)) + context_dim = contexts.shape[1] + elif data_type == 'wheel': + delta = 0.95 + num_actions = 5 + context_dim = 2 + mean_v = [1.0, 1.0, 1.0, 1.0, 1.2] + std_v = [0.05, 0.05, 0.05, 0.05, 0.05] + mu_large = 50 + std_large = 0.01 + dataset, opt_wheel = sample_wheel_bandit_data(num_contexts, delta, + mean_v, std_v, + mu_large, std_large) + opt_rewards, opt_actions = opt_wheel + + return dataset, opt_rewards, opt_actions, num_actions, context_dim + + +def display_results(algos, opt_rewards, opt_actions, h_rewards, t_init, name): + """Displays summary statistics of the performance of each algorithm.""" + + print('---------------------------------------------------') + print('---------------------------------------------------') + print('{} bandit completed after {} seconds.'.format( + name, time.time() - t_init)) + print('---------------------------------------------------') + + performance_pairs = [] + for j, a in enumerate(algos): + performance_pairs.append((a.name, np.sum(h_rewards[:, j]))) + performance_pairs = sorted(performance_pairs, + key=lambda elt: elt[1], + reverse=True) + for i, (name, reward) in enumerate(performance_pairs): + print('{:3}) {:20}| \t \t total reward = {:10}.'.format(i, name, reward)) + + print('---------------------------------------------------') + print('Optimal total reward = {}.'.format(np.sum(opt_rewards))) + print('Frequency of optimal actions (action, frequency):') + print([[elt, list(opt_actions).count(elt)] for elt in set(opt_actions)]) + print('---------------------------------------------------') + print('---------------------------------------------------') + + +def main(_): + + # Problem parameters + num_contexts = 2000 + + # Data type in {linear, sparse_linear, mushroom, financial, jester, + # statlog, adult, covertype, census, wheel} + data_type = 'mushroom' + + # Create dataset + sampled_vals = sample_data(data_type, num_contexts) + dataset, opt_rewards, opt_actions, num_actions, context_dim = sampled_vals + + # Define hyperparameters and algorithms + hparams = tf.contrib.training.HParams(num_actions=num_actions) + + hparams_linear = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + a0=6, + b0=6, + lambda_prior=0.25, + initial_pulls=2) + + hparams_rms = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + optimizer='RMS', + reset_lr=True, + lr_decay_rate=0.5, + training_freq=50, + training_epochs=100, + p=0.95, + q=3) + + hparams_dropout = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + optimizer='RMS', + reset_lr=True, + lr_decay_rate=0.5, + training_freq=50, + training_epochs=100, + use_dropout=True, + keep_prob=0.80) + + hparams_bbb = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + optimizer='RMS', + use_sigma_exp_transform=True, + cleared_times_trained=10, + initial_training_steps=100, + noise_sigma=0.1, + reset_lr=False, + training_freq=50, + training_epochs=100) + + hparams_nlinear = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + reset_lr=True, + lr_decay_rate=0.5, + training_freq=1, + training_freq_network=50, + training_epochs=100, + a0=6, + b0=6, + lambda_prior=0.25) + + hparams_nlinear2 = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + reset_lr=True, + lr_decay_rate=0.5, + training_freq=10, + training_freq_network=50, + training_epochs=100, + a0=6, + b0=6, + lambda_prior=0.25) + + hparams_pnoise = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + optimizer='RMS', + reset_lr=True, + lr_decay_rate=0.5, + training_freq=50, + training_epochs=100, + noise_std=0.05, + eps=0.1, + d_samples=300, + ) + + hparams_alpha_div = tf.contrib.training.HParams(num_actions=num_actions, + context_dim=context_dim, + init_scale=0.3, + activation=tf.nn.relu, + layer_sizes=[50], + batch_size=512, + activate_decay=True, + initial_lr=0.1, + max_grad_norm=5.0, + show_training=False, + freq_summary=1000, + buffer_s=-1, + initial_pulls=2, + optimizer='RMS', + use_sigma_exp_transform=True, + cleared_times_trained=10, + initial_training_steps=100, + noise_sigma=0.1, + reset_lr=False, + training_freq=50, + training_epochs=100, + alpha=1.0, + k=20, + prior_variance=0.1) + + hparams_gp = tf.contrib.training.HParams(num_actions=num_actions, + num_outputs=num_actions, + context_dim=context_dim, + reset_lr=False, + learn_embeddings=True, + max_num_points=1000, + show_training=False, + freq_summary=1000, + batch_size=512, + keep_fixed_after_max_obs=True, + training_freq=50, + initial_pulls=2, + training_epochs=100, + lr=0.01, + buffer_s=-1, + initial_lr=0.001, + lr_decay_rate=0.0, + optimizer='RMS', + task_latent_dim=5, + activate_decay=False) + + algos = [ + UniformSampling('Uniform Sampling', hparams), + UniformSampling('Uniform Sampling 2', hparams), + FixedPolicySampling('fixed1', [0.75, 0.25], hparams), + FixedPolicySampling('fixed2', [0.25, 0.75], hparams), + PosteriorBNNSampling('RMS', hparams_rms, 'RMSProp'), + PosteriorBNNSampling('Dropout', hparams_dropout, 'RMSProp'), + PosteriorBNNSampling('BBB', hparams_bbb, 'Variational'), + NeuralLinearPosteriorSampling('NeuralLinear', hparams_nlinear), + NeuralLinearPosteriorSampling('NeuralLinear2', hparams_nlinear2), + LinearFullPosteriorSampling('LinFullPost', hparams_linear), + BootstrappedBNNSampling('BootRMS', hparams_rms), + ParameterNoiseSampling('ParamNoise', hparams_pnoise), + PosteriorBNNSampling('BBAlphaDiv', hparams_alpha_div, 'AlphaDiv'), + PosteriorBNNSampling('MultitaskGP', hparams_gp, 'GP'), + ] + + # Run contextual bandit problem + t_init = time.time() + results = run_contextual_bandit(context_dim, num_actions, dataset, algos) + _, h_rewards = results + + # Display results + display_results(algos, opt_rewards, opt_actions, h_rewards, t_init, data_type) + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/deep_speech/README.md b/models/research/deep_speech/README.md new file mode 100644 index 0000000000000000000000000000000000000000..59a9dea7f81e3963372b23d8c4436e3e04d763ac --- /dev/null +++ b/models/research/deep_speech/README.md @@ -0,0 +1,74 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# DeepSpeech2 Model + +## Overview +This is an implementation of the [DeepSpeech2](https://arxiv.org/pdf/1512.02595.pdf) model. Current implementation is based on the code from the authors' [DeepSpeech code](https://github.com/PaddlePaddle/DeepSpeech) and the implementation in the [MLPerf Repo](https://github.com/mlperf/reference/tree/master/speech_recognition). + +DeepSpeech2 is an end-to-end deep neural network for automatic speech +recognition (ASR). It consists of 2 convolutional layers, 5 bidirectional RNN +layers and a fully connected layer. The feature in use is linear spectrogram +extracted from audio input. The network uses Connectionist Temporal Classification [CTC](https://www.cs.toronto.edu/~graves/icml_2006.pdf) as the loss function. + +## Dataset +The [OpenSLR LibriSpeech Corpus](http://www.openslr.org/12/) are used for model training and evaluation. + +The training data is a combination of train-clean-100 and train-clean-360 (~130k +examples in total). The validation set is dev-clean which has 2.7K lines. +The download script will preprocess the data into three columns: wav_filename, +wav_filesize, transcript. data/dataset.py will parse the csv file and build a +tf.data.Dataset object to feed data. Within each epoch (except for the +first if sortagrad is enabled), the training data will be shuffled batch-wise. + +## Running Code + +### Configure Python path +Add the top-level /models folder to the Python path with the command: +``` +export PYTHONPATH="$PYTHONPATH:/path/to/models" +``` + +### Install dependencies + +First install shared dependencies before running the code. Issue the following command: +``` +pip3 install -r requirements.txt +``` +or +``` +pip install -r requirements.txt +``` + +### Run each step individually + +#### Download and preprocess dataset +To download the dataset, issue the following command: +``` +python data/download.py +``` +Arguments: + * `--data_dir`: Directory where to download and save the preprocessed data. By default, it is `/tmp/librispeech_data`. + +Use the `--help` or `-h` flag to get a full list of possible arguments. + +#### Train and evaluate model +To train and evaluate the model, issue the following command: +``` +python deep_speech.py +``` +Arguments: + * `--model_dir`: Directory to save model training checkpoints. By default, it is `/tmp/deep_speech_model/`. + * `--train_data_dir`: Directory of the training dataset. + * `--eval_data_dir`: Directory of the evaluation dataset. + * `--num_gpus`: Number of GPUs to use (specify -1 if you want to use all available GPUs). + +There are other arguments about DeepSpeech2 model and training/evaluation process. Use the `--help` or `-h` flag to get a full list of possible arguments with detailed descriptions. + +### Run the benchmark +A shell script [run_deep_speech.sh](run_deep_speech.sh) is provided to run the whole pipeline with default parameters. Issue the following command to run the benchmark: +``` +sh run_deep_speech.sh +``` +Note by default, the training dataset in the benchmark include train-clean-100, train-clean-360 and train-other-500, and the evaluation dataset include dev-clean and dev-other. diff --git a/models/research/deep_speech/__init__.py b/models/research/deep_speech/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deep_speech/data/__init__.py b/models/research/deep_speech/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deep_speech/data/dataset.py b/models/research/deep_speech/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4a8cb59955c4608e20dcec6d9c5e38d705f24311 --- /dev/null +++ b/models/research/deep_speech/data/dataset.py @@ -0,0 +1,274 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generate tf.data.Dataset object for deep speech training/evaluation.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import random +# pylint: disable=g-bad-import-order +import numpy as np +from six.moves import xrange # pylint: disable=redefined-builtin +import soundfile +import tensorflow as tf +# pylint: enable=g-bad-import-order + +import data.featurizer as featurizer # pylint: disable=g-bad-import-order + + +class AudioConfig(object): + """Configs for spectrogram extraction from audio.""" + + def __init__(self, + sample_rate, + window_ms, + stride_ms, + normalize=False): + """Initialize the AudioConfig class. + + Args: + sample_rate: an integer denoting the sample rate of the input waveform. + window_ms: an integer for the length of a spectrogram frame, in ms. + stride_ms: an integer for the frame stride, in ms. + normalize: a boolean for whether apply normalization on the audio feature. + """ + + self.sample_rate = sample_rate + self.window_ms = window_ms + self.stride_ms = stride_ms + self.normalize = normalize + + +class DatasetConfig(object): + """Config class for generating the DeepSpeechDataset.""" + + def __init__(self, audio_config, data_path, vocab_file_path, sortagrad): + """Initialize the configs for deep speech dataset. + + Args: + audio_config: AudioConfig object specifying the audio-related configs. + data_path: a string denoting the full path of a manifest file. + vocab_file_path: a string specifying the vocabulary file path. + sortagrad: a boolean, if set to true, audio sequences will be fed by + increasing length in the first training epoch, which will + expedite network convergence. + + Raises: + RuntimeError: file path not exist. + """ + + self.audio_config = audio_config + assert tf.gfile.Exists(data_path) + assert tf.gfile.Exists(vocab_file_path) + self.data_path = data_path + self.vocab_file_path = vocab_file_path + self.sortagrad = sortagrad + + +def _normalize_audio_feature(audio_feature): + """Perform mean and variance normalization on the spectrogram feature. + + Args: + audio_feature: a numpy array for the spectrogram feature. + + Returns: + a numpy array of the normalized spectrogram. + """ + mean = np.mean(audio_feature, axis=0) + var = np.var(audio_feature, axis=0) + normalized = (audio_feature - mean) / (np.sqrt(var) + 1e-6) + + return normalized + + +def _preprocess_audio(audio_file_path, audio_featurizer, normalize): + """Load the audio file and compute spectrogram feature.""" + data, _ = soundfile.read(audio_file_path) + feature = featurizer.compute_spectrogram_feature( + data, audio_featurizer.sample_rate, audio_featurizer.stride_ms, + audio_featurizer.window_ms) + # Feature normalization + if normalize: + feature = _normalize_audio_feature(feature) + + # Adding Channel dimension for conv2D input. + feature = np.expand_dims(feature, axis=2) + return feature + + +def _preprocess_data(file_path): + """Generate a list of tuples (wav_filename, wav_filesize, transcript). + + Each dataset file contains three columns: "wav_filename", "wav_filesize", + and "transcript". This function parses the csv file and stores each example + by the increasing order of audio length (indicated by wav_filesize). + AS the waveforms are ordered in increasing length, audio samples in a + mini-batch have similar length. + + Args: + file_path: a string specifying the csv file path for a dataset. + + Returns: + A list of tuples (wav_filename, wav_filesize, transcript) sorted by + file_size. + """ + tf.logging.info("Loading data set {}".format(file_path)) + with tf.gfile.Open(file_path, "r") as f: + lines = f.read().splitlines() + # Skip the csv header in lines[0]. + lines = lines[1:] + # The metadata file is tab separated. + lines = [line.split("\t", 2) for line in lines] + # Sort input data by the length of audio sequence. + lines.sort(key=lambda item: int(item[1])) + + return [tuple(line) for line in lines] + + +class DeepSpeechDataset(object): + """Dataset class for training/evaluation of DeepSpeech model.""" + + def __init__(self, dataset_config): + """Initialize the DeepSpeechDataset class. + + Args: + dataset_config: DatasetConfig object. + """ + self.config = dataset_config + # Instantiate audio feature extractor. + self.audio_featurizer = featurizer.AudioFeaturizer( + sample_rate=self.config.audio_config.sample_rate, + window_ms=self.config.audio_config.window_ms, + stride_ms=self.config.audio_config.stride_ms) + # Instantiate text feature extractor. + self.text_featurizer = featurizer.TextFeaturizer( + vocab_file=self.config.vocab_file_path) + + self.speech_labels = self.text_featurizer.speech_labels + self.entries = _preprocess_data(self.config.data_path) + # The generated spectrogram will have 161 feature bins. + self.num_feature_bins = 161 + + +def batch_wise_dataset_shuffle(entries, epoch_index, sortagrad, batch_size): + """Batch-wise shuffling of the data entries. + + Each data entry is in the format of (audio_file, file_size, transcript). + If epoch_index is 0 and sortagrad is true, we don't perform shuffling and + return entries in sorted file_size order. Otherwise, do batch_wise shuffling. + + Args: + entries: a list of data entries. + epoch_index: an integer of epoch index + sortagrad: a boolean to control whether sorting the audio in the first + training epoch. + batch_size: an integer for the batch size. + + Returns: + The shuffled data entries. + """ + shuffled_entries = [] + if epoch_index == 0 and sortagrad: + # No need to shuffle. + shuffled_entries = entries + else: + # Shuffle entries batch-wise. + max_buckets = int(math.floor(len(entries) / batch_size)) + total_buckets = [i for i in xrange(max_buckets)] + random.shuffle(total_buckets) + shuffled_entries = [] + for i in total_buckets: + shuffled_entries.extend(entries[i * batch_size : (i + 1) * batch_size]) + # If the last batch doesn't contain enough batch_size examples, + # just append it to the shuffled_entries. + shuffled_entries.extend(entries[max_buckets * batch_size:]) + + return shuffled_entries + + +def input_fn(batch_size, deep_speech_dataset, repeat=1): + """Input function for model training and evaluation. + + Args: + batch_size: an integer denoting the size of a batch. + deep_speech_dataset: DeepSpeechDataset object. + repeat: an integer for how many times to repeat the dataset. + + Returns: + a tf.data.Dataset object for model to consume. + """ + # Dataset properties + data_entries = deep_speech_dataset.entries + num_feature_bins = deep_speech_dataset.num_feature_bins + audio_featurizer = deep_speech_dataset.audio_featurizer + feature_normalize = deep_speech_dataset.config.audio_config.normalize + text_featurizer = deep_speech_dataset.text_featurizer + + def _gen_data(): + """Dataset generator function.""" + for audio_file, _, transcript in data_entries: + features = _preprocess_audio( + audio_file, audio_featurizer, feature_normalize) + labels = featurizer.compute_label_feature( + transcript, text_featurizer.token_to_index) + input_length = [features.shape[0]] + label_length = [len(labels)] + # Yield a tuple of (features, labels) where features is a dict containing + # all info about the actual data features. + yield ( + { + "features": features, + "input_length": input_length, + "label_length": label_length + }, + labels) + + dataset = tf.data.Dataset.from_generator( + _gen_data, + output_types=( + { + "features": tf.float32, + "input_length": tf.int32, + "label_length": tf.int32 + }, + tf.int32), + output_shapes=( + { + "features": tf.TensorShape([None, num_feature_bins, 1]), + "input_length": tf.TensorShape([1]), + "label_length": tf.TensorShape([1]) + }, + tf.TensorShape([None])) + ) + + # Repeat and batch the dataset + dataset = dataset.repeat(repeat) + + # Padding the features to its max length dimensions. + dataset = dataset.padded_batch( + batch_size=batch_size, + padded_shapes=( + { + "features": tf.TensorShape([None, num_feature_bins, 1]), + "input_length": tf.TensorShape([1]), + "label_length": tf.TensorShape([1]) + }, + tf.TensorShape([None])) + ) + + # Prefetch to improve speed of input pipeline. + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + return dataset diff --git a/models/research/deep_speech/data/download.py b/models/research/deep_speech/data/download.py new file mode 100644 index 0000000000000000000000000000000000000000..5ded03762138a0006c36585e2e8da450baaccac7 --- /dev/null +++ b/models/research/deep_speech/data/download.py @@ -0,0 +1,208 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Download and preprocess LibriSpeech dataset for DeepSpeech model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import codecs +import fnmatch +import os +import sys +import tarfile +import tempfile +import unicodedata + +from absl import app as absl_app +from absl import flags as absl_flags +import pandas +from six.moves import urllib +from sox import Transformer +import tensorflow as tf + +LIBRI_SPEECH_URLS = { + "train-clean-100": + "http://www.openslr.org/resources/12/train-clean-100.tar.gz", + "train-clean-360": + "http://www.openslr.org/resources/12/train-clean-360.tar.gz", + "train-other-500": + "http://www.openslr.org/resources/12/train-other-500.tar.gz", + "dev-clean": + "http://www.openslr.org/resources/12/dev-clean.tar.gz", + "dev-other": + "http://www.openslr.org/resources/12/dev-other.tar.gz", + "test-clean": + "http://www.openslr.org/resources/12/test-clean.tar.gz", + "test-other": + "http://www.openslr.org/resources/12/test-other.tar.gz" +} + + +def download_and_extract(directory, url): + """Download and extract the given split of dataset. + + Args: + directory: the directory where to extract the tarball. + url: the url to download the data file. + """ + + if not tf.gfile.Exists(directory): + tf.gfile.MakeDirs(directory) + + _, tar_filepath = tempfile.mkstemp(suffix=".tar.gz") + + try: + tf.logging.info("Downloading %s to %s" % (url, tar_filepath)) + + def _progress(count, block_size, total_size): + sys.stdout.write("\r>> Downloading {} {:.1f}%".format( + tar_filepath, 100.0 * count * block_size / total_size)) + sys.stdout.flush() + + urllib.request.urlretrieve(url, tar_filepath, _progress) + print() + statinfo = os.stat(tar_filepath) + tf.logging.info( + "Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size)) + with tarfile.open(tar_filepath, "r") as tar: + tar.extractall(directory) + finally: + tf.gfile.Remove(tar_filepath) + + +def convert_audio_and_split_transcript(input_dir, source_name, target_name, + output_dir, output_file): + """Convert FLAC to WAV and split the transcript. + + For audio file, convert the format from FLAC to WAV using the sox.Transformer + library. + For transcripts, each line contains the sequence id and the corresponding + transcript (separated by space): + Input data format: seq-id transcript_of_seq-id + For example: + 1-2-0 transcript_of_1-2-0.flac + 1-2-1 transcript_of_1-2-1.flac + ... + + Each sequence id has a corresponding .flac file. + Parse the transcript file and generate a new csv file which has three columns: + "wav_filename": the absolute path to a wav file. + "wav_filesize": the size of the corresponding wav file. + "transcript": the transcript for this audio segement. + + Args: + input_dir: the directory which holds the input dataset. + source_name: the name of the specified dataset. e.g. test-clean + target_name: the directory name for the newly generated audio files. + e.g. test-clean-wav + output_dir: the directory to place the newly generated csv files. + output_file: the name of the newly generated csv file. e.g. test-clean.csv + """ + + tf.logging.info("Preprocessing audio and transcript for %s" % source_name) + source_dir = os.path.join(input_dir, source_name) + target_dir = os.path.join(input_dir, target_name) + + if not tf.gfile.Exists(target_dir): + tf.gfile.MakeDirs(target_dir) + + files = [] + tfm = Transformer() + # Convert all FLAC file into WAV format. At the same time, generate the csv + # file. + for root, _, filenames in tf.gfile.Walk(source_dir): + for filename in fnmatch.filter(filenames, "*.trans.txt"): + trans_file = os.path.join(root, filename) + with codecs.open(trans_file, "r", "utf-8") as fin: + for line in fin: + seqid, transcript = line.split(" ", 1) + # We do a encode-decode transformation here because the output type + # of encode is a bytes object, we need convert it to string. + transcript = unicodedata.normalize("NFKD", transcript).encode( + "ascii", "ignore").decode("ascii", "ignore").strip().lower() + + # Convert FLAC to WAV. + flac_file = os.path.join(root, seqid + ".flac") + wav_file = os.path.join(target_dir, seqid + ".wav") + if not tf.gfile.Exists(wav_file): + tfm.build(flac_file, wav_file) + wav_filesize = os.path.getsize(wav_file) + + files.append((os.path.abspath(wav_file), wav_filesize, transcript)) + + # Write to CSV file which contains three columns: + # "wav_filename", "wav_filesize", "transcript". + csv_file_path = os.path.join(output_dir, output_file) + df = pandas.DataFrame( + data=files, columns=["wav_filename", "wav_filesize", "transcript"]) + df.to_csv(csv_file_path, index=False, sep="\t") + tf.logging.info("Successfully generated csv file {}".format(csv_file_path)) + + +def download_and_process_datasets(directory, datasets): + """Download and pre-process the specified list of LibriSpeech dataset. + + Args: + directory: the directory to put all the downloaded and preprocessed data. + datasets: list of dataset names that will be downloaded and processed. + """ + + tf.logging.info("Preparing LibriSpeech dataset: {}".format( + ",".join(datasets))) + for dataset in datasets: + tf.logging.info("Preparing dataset %s", dataset) + dataset_dir = os.path.join(directory, dataset) + download_and_extract(dataset_dir, LIBRI_SPEECH_URLS[dataset]) + convert_audio_and_split_transcript( + dataset_dir + "/LibriSpeech", dataset, dataset + "-wav", + dataset_dir + "/LibriSpeech", dataset + ".csv") + + +def define_data_download_flags(): + """Define flags for data downloading.""" + absl_flags.DEFINE_string( + "data_dir", "/tmp/librispeech_data", + "Directory to download data and extract the tarball") + absl_flags.DEFINE_bool("train_only", False, + "If true, only download the training set") + absl_flags.DEFINE_bool("dev_only", False, + "If true, only download the dev set") + absl_flags.DEFINE_bool("test_only", False, + "If true, only download the test set") + + +def main(_): + if not tf.gfile.Exists(FLAGS.data_dir): + tf.gfile.MakeDirs(FLAGS.data_dir) + + if FLAGS.train_only: + download_and_process_datasets( + FLAGS.data_dir, + ["train-clean-100", "train-clean-360", "train-other-500"]) + elif FLAGS.dev_only: + download_and_process_datasets(FLAGS.data_dir, ["dev-clean", "dev-other"]) + elif FLAGS.test_only: + download_and_process_datasets(FLAGS.data_dir, ["test-clean", "test-other"]) + else: + # By default we download the entire dataset. + download_and_process_datasets(FLAGS.data_dir, LIBRI_SPEECH_URLS.keys()) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + define_data_download_flags() + FLAGS = absl_flags.FLAGS + absl_app.run(main) diff --git a/models/research/deep_speech/data/featurizer.py b/models/research/deep_speech/data/featurizer.py new file mode 100644 index 0000000000000000000000000000000000000000..10b7069d3136d306075e9770685061749e634023 --- /dev/null +++ b/models/research/deep_speech/data/featurizer.py @@ -0,0 +1,118 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility class for extracting features from the text and audio input.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import codecs +import numpy as np + + +def compute_spectrogram_feature(samples, sample_rate, stride_ms=10.0, + window_ms=20.0, max_freq=None, eps=1e-14): + """Compute the spectrograms for the input samples(waveforms). + + More about spectrogram computation, please refer to: + https://en.wikipedia.org/wiki/Short-time_Fourier_transform. + """ + if max_freq is None: + max_freq = sample_rate / 2 + if max_freq > sample_rate / 2: + raise ValueError("max_freq must not be greater than half of sample rate.") + + if stride_ms > window_ms: + raise ValueError("Stride size must not be greater than window size.") + + stride_size = int(0.001 * sample_rate * stride_ms) + window_size = int(0.001 * sample_rate * window_ms) + + # Extract strided windows + truncate_size = (len(samples) - window_size) % stride_size + samples = samples[:len(samples) - truncate_size] + nshape = (window_size, (len(samples) - window_size) // stride_size + 1) + nstrides = (samples.strides[0], samples.strides[0] * stride_size) + windows = np.lib.stride_tricks.as_strided( + samples, shape=nshape, strides=nstrides) + assert np.all( + windows[:, 1] == samples[stride_size:(stride_size + window_size)]) + + # Window weighting, squared Fast Fourier Transform (fft), scaling + weighting = np.hanning(window_size)[:, None] + fft = np.fft.rfft(windows * weighting, axis=0) + fft = np.absolute(fft) + fft = fft**2 + scale = np.sum(weighting**2) * sample_rate + fft[1:-1, :] *= (2.0 / scale) + fft[(0, -1), :] /= scale + # Prepare fft frequency list + freqs = float(sample_rate) / window_size * np.arange(fft.shape[0]) + + # Compute spectrogram feature + ind = np.where(freqs <= max_freq)[0][-1] + 1 + specgram = np.log(fft[:ind, :] + eps) + return np.transpose(specgram, (1, 0)) + + +class AudioFeaturizer(object): + """Class to extract spectrogram features from the audio input.""" + + def __init__(self, + sample_rate=16000, + window_ms=20.0, + stride_ms=10.0): + """Initialize the audio featurizer class according to the configs. + + Args: + sample_rate: an integer specifying the sample rate of the input waveform. + window_ms: an integer for the length of a spectrogram frame, in ms. + stride_ms: an integer for the frame stride, in ms. + """ + self.sample_rate = sample_rate + self.window_ms = window_ms + self.stride_ms = stride_ms + + +def compute_label_feature(text, token_to_idx): + """Convert string to a list of integers.""" + tokens = list(text.strip().lower()) + feats = [token_to_idx[token] for token in tokens] + return feats + + +class TextFeaturizer(object): + """Extract text feature based on char-level granularity. + + By looking up the vocabulary table, each input string (one line of transcript) + will be converted to a sequence of integer indexes. + """ + + def __init__(self, vocab_file): + lines = [] + with codecs.open(vocab_file, "r", "utf-8") as fin: + lines.extend(fin.readlines()) + self.token_to_index = {} + self.index_to_token = {} + self.speech_labels = "" + index = 0 + for line in lines: + line = line[:-1] # Strip the '\n' char. + if line.startswith("#"): + # Skip from reading comment line. + continue + self.token_to_index[line] = index + self.index_to_token[index] = line + self.speech_labels += line + index += 1 diff --git a/models/research/deep_speech/data/vocabulary.txt b/models/research/deep_speech/data/vocabulary.txt new file mode 100644 index 0000000000000000000000000000000000000000..51852b3a78b0b14e19b15782002cf8c401ac71c9 --- /dev/null +++ b/models/research/deep_speech/data/vocabulary.txt @@ -0,0 +1,33 @@ +# List of alphabets (utf-8 encoded). Note that '#' starts a comment line, which +# will be ignored by the parser. +# begin of vocabulary + +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +' +- +# end of vocabulary diff --git a/models/research/deep_speech/decoder.py b/models/research/deep_speech/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f46983170f5885385942be1b24e262f30b4be8e0 --- /dev/null +++ b/models/research/deep_speech/decoder.py @@ -0,0 +1,95 @@ + +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Deep speech decoder.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import itertools + +from nltk.metrics import distance +import numpy as np + + +class DeepSpeechDecoder(object): + """Greedy decoder implementation for Deep Speech model.""" + + def __init__(self, labels, blank_index=28): + """Decoder initialization. + + Arguments: + labels: a string specifying the speech labels for the decoder to use. + blank_index: an integer specifying index for the blank character. + Defaults to 28. + """ + # e.g. labels = "[a-z]' _" + self.labels = labels + self.blank_index = blank_index + self.int_to_char = dict([(i, c) for (i, c) in enumerate(labels)]) + + def convert_to_string(self, sequence): + """Convert a sequence of indexes into corresponding string.""" + return ''.join([self.int_to_char[i] for i in sequence]) + + def wer(self, decode, target): + """Computes the Word Error Rate (WER). + + WER is defined as the edit distance between the two provided sentences after + tokenizing to words. + + Args: + decode: string of the decoded output. + target: a string for the ground truth label. + + Returns: + A float number for the WER of the current decode-target pair. + """ + # Map each word to a new char. + words = set(decode.split() + target.split()) + word2char = dict(zip(words, range(len(words)))) + + new_decode = [chr(word2char[w]) for w in decode.split()] + new_target = [chr(word2char[w]) for w in target.split()] + + return distance.edit_distance(''.join(new_decode), ''.join(new_target)) + + def cer(self, decode, target): + """Computes the Character Error Rate (CER). + + CER is defined as the edit distance between the two given strings. + + Args: + decode: a string of the decoded output. + target: a string for the ground truth label. + + Returns: + A float number denoting the CER for the current sentence pair. + """ + return distance.edit_distance(decode, target) + + def decode(self, logits): + """Decode the best guess from logits using greedy algorithm.""" + # Choose the class with maximimum probability. + best = list(np.argmax(logits, axis=1)) + # Merge repeated chars. + merge = [k for k, _ in itertools.groupby(best)] + # Remove the blank index in the decoded sequence. + merge_remove_blank = [] + for k in merge: + if k != self.blank_index: + merge_remove_blank.append(k) + + return self.convert_to_string(merge_remove_blank) diff --git a/models/research/deep_speech/deep_speech.py b/models/research/deep_speech/deep_speech.py new file mode 100644 index 0000000000000000000000000000000000000000..3d809c3cbc245e20b752bf2d2e45823f33521d64 --- /dev/null +++ b/models/research/deep_speech/deep_speech.py @@ -0,0 +1,432 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Main entry to train and evaluate DeepSpeech model.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +# pylint: disable=g-bad-import-order +from absl import app as absl_app +from absl import flags +import tensorflow as tf +# pylint: enable=g-bad-import-order + +import data.dataset as dataset +import decoder +import deep_speech_model +from official.utils.flags import core as flags_core +from official.utils.misc import distribution_utils +from official.utils.misc import model_helpers + +# Default vocabulary file +_VOCABULARY_FILE = os.path.join( + os.path.dirname(__file__), "data/vocabulary.txt") +# Evaluation metrics +_WER_KEY = "WER" +_CER_KEY = "CER" + + +def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length): + """Computes the time_steps/ctc_input_length after convolution. + + Suppose that the original feature contains two parts: + 1) Real spectrogram signals, spanning input_length steps. + 2) Padded part with all 0s. + The total length of those two parts is denoted as max_time_steps, which is + the padded length of the current batch. After convolution layers, the time + steps of a spectrogram feature will be decreased. As we know the percentage + of its original length within the entire length, we can compute the time steps + for the signal after conv as follows (using ctc_input_length to denote): + ctc_input_length = (input_length / max_time_steps) * output_length_of_conv. + This length is then fed into ctc loss function to compute loss. + + Args: + max_time_steps: max_time_steps for the batch, after padding. + ctc_time_steps: number of timesteps after convolution. + input_length: actual length of the original spectrogram, without padding. + + Returns: + the ctc_input_length after convolution layer. + """ + ctc_input_length = tf.to_float(tf.multiply( + input_length, ctc_time_steps)) + return tf.to_int32(tf.floordiv( + ctc_input_length, tf.to_float(max_time_steps))) + + +def ctc_loss(label_length, ctc_input_length, labels, logits): + """Computes the ctc loss for the current batch of predictions.""" + label_length = tf.to_int32(tf.squeeze(label_length)) + ctc_input_length = tf.to_int32(tf.squeeze(ctc_input_length)) + sparse_labels = tf.to_int32( + tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length)) + y_pred = tf.log(tf.transpose( + logits, perm=[1, 0, 2]) + tf.keras.backend.epsilon()) + + return tf.expand_dims( + tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred, + sequence_length=ctc_input_length), + axis=1) + + +def evaluate_model(estimator, speech_labels, entries, input_fn_eval): + """Evaluate the model performance using WER anc CER as metrics. + + WER: Word Error Rate + CER: Character Error Rate + + Args: + estimator: estimator to evaluate. + speech_labels: a string specifying all the character in the vocabulary. + entries: a list of data entries (audio_file, file_size, transcript) for the + given dataset. + input_fn_eval: data input function for evaluation. + + Returns: + Evaluation result containing 'wer' and 'cer' as two metrics. + """ + # Get predictions + predictions = estimator.predict(input_fn=input_fn_eval) + + # Get probabilities of each predicted class + probs = [pred["probabilities"] for pred in predictions] + + num_of_examples = len(probs) + targets = [entry[2] for entry in entries] # The ground truth transcript + + total_wer, total_cer = 0, 0 + greedy_decoder = decoder.DeepSpeechDecoder(speech_labels) + for i in range(num_of_examples): + # Decode string. + decoded_str = greedy_decoder.decode(probs[i]) + # Compute CER. + total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float( + len(targets[i])) + # Compute WER. + total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float( + len(targets[i].split())) + + # Get mean value + total_cer /= num_of_examples + total_wer /= num_of_examples + + global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP) + eval_results = { + _WER_KEY: total_wer, + _CER_KEY: total_cer, + tf.GraphKeys.GLOBAL_STEP: global_step, + } + + return eval_results + + +def model_fn(features, labels, mode, params): + """Define model function for deep speech model. + + Args: + features: a dictionary of input_data features. It includes the data + input_length, label_length and the spectrogram features. + labels: a list of labels for the input data. + mode: current estimator mode; should be one of + `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`. + params: a dict of hyper parameters to be passed to model_fn. + + Returns: + EstimatorSpec parameterized according to the input params and the + current mode. + """ + num_classes = params["num_classes"] + input_length = features["input_length"] + label_length = features["label_length"] + features = features["features"] + + # Create DeepSpeech2 model. + model = deep_speech_model.DeepSpeech2( + flags_obj.rnn_hidden_layers, flags_obj.rnn_type, + flags_obj.is_bidirectional, flags_obj.rnn_hidden_size, + num_classes, flags_obj.use_bias) + + if mode == tf.estimator.ModeKeys.PREDICT: + logits = model(features, training=False) + predictions = { + "classes": tf.argmax(logits, axis=2), + "probabilities": tf.nn.softmax(logits), + "logits": logits + } + return tf.estimator.EstimatorSpec( + mode=mode, + predictions=predictions) + + # In training mode. + logits = model(features, training=True) + probs = tf.nn.softmax(logits) + ctc_input_length = compute_length_after_conv( + tf.shape(features)[1], tf.shape(probs)[1], input_length) + # Compute CTC loss + loss = tf.reduce_mean(ctc_loss( + label_length, ctc_input_length, labels, probs)) + + optimizer = tf.train.AdamOptimizer(learning_rate=flags_obj.learning_rate) + global_step = tf.train.get_or_create_global_step() + minimize_op = optimizer.minimize(loss, global_step=global_step) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + # Create the train_op that groups both minimize_ops and update_ops + train_op = tf.group(minimize_op, update_ops) + + return tf.estimator.EstimatorSpec( + mode=mode, + loss=loss, + train_op=train_op) + + +def generate_dataset(data_dir): + """Generate a speech dataset.""" + audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate, + window_ms=flags_obj.window_ms, + stride_ms=flags_obj.stride_ms, + normalize=True) + train_data_conf = dataset.DatasetConfig( + audio_conf, + data_dir, + flags_obj.vocabulary_file, + flags_obj.sortagrad + ) + speech_dataset = dataset.DeepSpeechDataset(train_data_conf) + return speech_dataset + +def per_device_batch_size(batch_size, num_gpus): + """For multi-gpu, batch-size must be a multiple of the number of GPUs. + + + Note that distribution strategy handles this automatically when used with + Keras. For using with Estimator, we need to get per GPU batch. + + Args: + batch_size: Global batch size to be divided among devices. This should be + equal to num_gpus times the single-GPU batch_size for multi-gpu training. + num_gpus: How many GPUs are used with DistributionStrategies. + + Returns: + Batch size per device. + + Raises: + ValueError: if batch_size is not divisible by number of devices + """ + if num_gpus <= 1: + return batch_size + + remainder = batch_size % num_gpus + if remainder: + err = ('When running with multiple GPUs, batch size ' + 'must be a multiple of the number of available GPUs. Found {} ' + 'GPUs with a batch size of {}; try --batch_size={} instead.' + ).format(num_gpus, batch_size, batch_size - remainder) + raise ValueError(err) + return int(batch_size / num_gpus) + +def run_deep_speech(_): + """Run deep speech training and eval loop.""" + tf.set_random_seed(flags_obj.seed) + # Data preprocessing + tf.logging.info("Data preprocessing...") + train_speech_dataset = generate_dataset(flags_obj.train_data_dir) + eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir) + + # Number of label classes. Label string is "[a-z]' -" + num_classes = len(train_speech_dataset.speech_labels) + + # Use distribution strategy for multi-gpu training + num_gpus = flags_core.get_num_gpus(flags_obj) + distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus) + run_config = tf.estimator.RunConfig( + train_distribute=distribution_strategy) + + estimator = tf.estimator.Estimator( + model_fn=model_fn, + model_dir=flags_obj.model_dir, + config=run_config, + params={ + "num_classes": num_classes, + } + ) + + # Benchmark logging + run_params = { + "batch_size": flags_obj.batch_size, + "train_epochs": flags_obj.train_epochs, + "rnn_hidden_size": flags_obj.rnn_hidden_size, + "rnn_hidden_layers": flags_obj.rnn_hidden_layers, + "rnn_type": flags_obj.rnn_type, + "is_bidirectional": flags_obj.is_bidirectional, + "use_bias": flags_obj.use_bias + } + + per_replica_batch_size = per_device_batch_size(flags_obj.batch_size, num_gpus) + + def input_fn_train(): + return dataset.input_fn( + per_replica_batch_size, train_speech_dataset) + + def input_fn_eval(): + return dataset.input_fn( + per_replica_batch_size, eval_speech_dataset) + + total_training_cycle = (flags_obj.train_epochs // + flags_obj.epochs_between_evals) + for cycle_index in range(total_training_cycle): + tf.logging.info("Starting a training cycle: %d/%d", + cycle_index + 1, total_training_cycle) + + # Perform batch_wise dataset shuffling + train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle( + train_speech_dataset.entries, cycle_index, flags_obj.sortagrad, + flags_obj.batch_size) + + estimator.train(input_fn=input_fn_train) + + # Evaluation + tf.logging.info("Starting to evaluate...") + + eval_results = evaluate_model( + estimator, eval_speech_dataset.speech_labels, + eval_speech_dataset.entries, input_fn_eval) + + # Log the WER and CER results. + benchmark_logger.log_evaluation_result(eval_results) + tf.logging.info( + "Iteration {}: WER = {:.2f}, CER = {:.2f}".format( + cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY])) + + # If some evaluation threshold is met + if model_helpers.past_stop_threshold( + flags_obj.wer_threshold, eval_results[_WER_KEY]): + break + + +def define_deep_speech_flags(): + """Add flags for run_deep_speech.""" + # Add common flags + flags_core.define_base( + data_dir=False, # we use train_data_dir and eval_data_dir instead + export_dir=True, + train_epochs=True, + hooks=True, + num_gpu=True, + epochs_between_evals=True + ) + flags_core.define_performance( + num_parallel_calls=False, + inter_op=False, + intra_op=False, + synthetic_data=False, + max_train_steps=False, + dtype=False + ) + flags_core.define_benchmark() + flags.adopt_module_key_flags(flags_core) + + flags_core.set_defaults( + model_dir="/tmp/deep_speech_model/", + export_dir="/tmp/deep_speech_saved_model/", + train_epochs=10, + batch_size=128, + hooks="") + + # Deep speech flags + flags.DEFINE_integer( + name="seed", default=1, + help=flags_core.help_wrap("The random seed.")) + + flags.DEFINE_string( + name="train_data_dir", + default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv", + help=flags_core.help_wrap("The csv file path of train dataset.")) + + flags.DEFINE_string( + name="eval_data_dir", + default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv", + help=flags_core.help_wrap("The csv file path of evaluation dataset.")) + + flags.DEFINE_bool( + name="sortagrad", default=True, + help=flags_core.help_wrap( + "If true, sort examples by audio length and perform no " + "batch_wise shuffling for the first epoch.")) + + flags.DEFINE_integer( + name="sample_rate", default=16000, + help=flags_core.help_wrap("The sample rate for audio.")) + + flags.DEFINE_integer( + name="window_ms", default=20, + help=flags_core.help_wrap("The frame length for spectrogram.")) + + flags.DEFINE_integer( + name="stride_ms", default=10, + help=flags_core.help_wrap("The frame step.")) + + flags.DEFINE_string( + name="vocabulary_file", default=_VOCABULARY_FILE, + help=flags_core.help_wrap("The file path of vocabulary file.")) + + # RNN related flags + flags.DEFINE_integer( + name="rnn_hidden_size", default=800, + help=flags_core.help_wrap("The hidden size of RNNs.")) + + flags.DEFINE_integer( + name="rnn_hidden_layers", default=5, + help=flags_core.help_wrap("The number of RNN layers.")) + + flags.DEFINE_bool( + name="use_bias", default=True, + help=flags_core.help_wrap("Use bias in the last fully-connected layer")) + + flags.DEFINE_bool( + name="is_bidirectional", default=True, + help=flags_core.help_wrap("If rnn unit is bidirectional")) + + flags.DEFINE_enum( + name="rnn_type", default="gru", + enum_values=deep_speech_model.SUPPORTED_RNNS.keys(), + case_sensitive=False, + help=flags_core.help_wrap("Type of RNN cell.")) + + # Training related flags + flags.DEFINE_float( + name="learning_rate", default=5e-4, + help=flags_core.help_wrap("The initial learning rate.")) + + # Evaluation metrics threshold + flags.DEFINE_float( + name="wer_threshold", default=None, + help=flags_core.help_wrap( + "If passed, training will stop when the evaluation metric WER is " + "greater than or equal to wer_threshold. For libri speech dataset " + "the desired wer_threshold is 0.23 which is the result achieved by " + "MLPerf implementation.")) + + +def main(_): + run_deep_speech(flags_obj) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + define_deep_speech_flags() + flags_obj = flags.FLAGS + absl_app.run(main) + diff --git a/models/research/deep_speech/deep_speech_model.py b/models/research/deep_speech/deep_speech_model.py new file mode 100644 index 0000000000000000000000000000000000000000..dd768f825c792eb9f8f8ca7dbef6a25f5ce81091 --- /dev/null +++ b/models/research/deep_speech/deep_speech_model.py @@ -0,0 +1,185 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Network structure for DeepSpeech2 model.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +# Supported rnn cells. +SUPPORTED_RNNS = { + "lstm": tf.contrib.rnn.BasicLSTMCell, + "rnn": tf.contrib.rnn.RNNCell, + "gru": tf.contrib.rnn.GRUCell, +} + +# Parameters for batch normalization. +_BATCH_NORM_EPSILON = 1e-5 +_BATCH_NORM_DECAY = 0.997 + +# Filters of convolution layer +_CONV_FILTERS = 32 + + +def batch_norm(inputs, training): + """Batch normalization layer. + + Note that the momentum to use will affect validation accuracy over time. + Batch norm has different behaviors during training/evaluation. With a large + momentum, the model takes longer to get a near-accurate estimation of the + moving mean/variance over the entire training dataset, which means we need + more iterations to see good evaluation results. If the training data is evenly + distributed over the feature space, we can also try setting a smaller momentum + (such as 0.1) to get good evaluation result sooner. + + Args: + inputs: input data for batch norm layer. + training: a boolean to indicate if it is in training stage. + + Returns: + tensor output from batch norm layer. + """ + return tf.layers.batch_normalization( + inputs=inputs, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, + fused=True, training=training) + + +def _conv_bn_layer(inputs, padding, filters, kernel_size, strides, layer_id, + training): + """Defines 2D convolutional + batch normalization layer. + + Args: + inputs: input data for convolution layer. + padding: padding to be applied before convolution layer. + filters: an integer, number of output filters in the convolution. + kernel_size: a tuple specifying the height and width of the 2D convolution + window. + strides: a tuple specifying the stride length of the convolution. + layer_id: an integer specifying the layer index. + training: a boolean to indicate which stage we are in (training/eval). + + Returns: + tensor output from the current layer. + """ + # Perform symmetric padding on the feature dimension of time_step + # This step is required to avoid issues when RNN output sequence is shorter + # than the label length. + inputs = tf.pad( + inputs, + [[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]]) + inputs = tf.layers.conv2d( + inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, + padding="valid", use_bias=False, activation=tf.nn.relu6, + name="cnn_{}".format(layer_id)) + return batch_norm(inputs, training) + + +def _rnn_layer(inputs, rnn_cell, rnn_hidden_size, layer_id, is_batch_norm, + is_bidirectional, training): + """Defines a batch normalization + rnn layer. + + Args: + inputs: input tensors for the current layer. + rnn_cell: RNN cell instance to use. + rnn_hidden_size: an integer for the dimensionality of the rnn output space. + layer_id: an integer for the index of current layer. + is_batch_norm: a boolean specifying whether to perform batch normalization + on input states. + is_bidirectional: a boolean specifying whether the rnn layer is + bi-directional. + training: a boolean to indicate which stage we are in (training/eval). + + Returns: + tensor output for the current layer. + """ + if is_batch_norm: + inputs = batch_norm(inputs, training) + + # Construct forward/backward RNN cells. + fw_cell = rnn_cell(num_units=rnn_hidden_size, + name="rnn_fw_{}".format(layer_id)) + bw_cell = rnn_cell(num_units=rnn_hidden_size, + name="rnn_bw_{}".format(layer_id)) + + if is_bidirectional: + outputs, _ = tf.nn.bidirectional_dynamic_rnn( + cell_fw=fw_cell, cell_bw=bw_cell, inputs=inputs, dtype=tf.float32, + swap_memory=True) + rnn_outputs = tf.concat(outputs, -1) + else: + rnn_outputs = tf.nn.dynamic_rnn( + fw_cell, inputs, dtype=tf.float32, swap_memory=True) + + return rnn_outputs + + +class DeepSpeech2(object): + """Define DeepSpeech2 model.""" + + def __init__(self, num_rnn_layers, rnn_type, is_bidirectional, + rnn_hidden_size, num_classes, use_bias): + """Initialize DeepSpeech2 model. + + Args: + num_rnn_layers: an integer, the number of rnn layers. By default, it's 5. + rnn_type: a string, one of the supported rnn cells: gru, rnn and lstm. + is_bidirectional: a boolean to indicate if the rnn layer is bidirectional. + rnn_hidden_size: an integer for the number of hidden states in each unit. + num_classes: an integer, the number of output classes/labels. + use_bias: a boolean specifying whether to use bias in the last fc layer. + """ + self.num_rnn_layers = num_rnn_layers + self.rnn_type = rnn_type + self.is_bidirectional = is_bidirectional + self.rnn_hidden_size = rnn_hidden_size + self.num_classes = num_classes + self.use_bias = use_bias + + def __call__(self, inputs, training): + # Two cnn layers. + inputs = _conv_bn_layer( + inputs, padding=(20, 5), filters=_CONV_FILTERS, kernel_size=(41, 11), + strides=(2, 2), layer_id=1, training=training) + + inputs = _conv_bn_layer( + inputs, padding=(10, 5), filters=_CONV_FILTERS, kernel_size=(21, 11), + strides=(2, 1), layer_id=2, training=training) + + # output of conv_layer2 with the shape of + # [batch_size (N), times (T), features (F), channels (C)]. + # Convert the conv output to rnn input. + batch_size = tf.shape(inputs)[0] + feat_size = inputs.get_shape().as_list()[2] + inputs = tf.reshape( + inputs, + [batch_size, -1, feat_size * _CONV_FILTERS]) + + # RNN layers. + rnn_cell = SUPPORTED_RNNS[self.rnn_type] + for layer_counter in xrange(self.num_rnn_layers): + # No batch normalization on the first layer. + is_batch_norm = (layer_counter != 0) + inputs = _rnn_layer( + inputs, rnn_cell, self.rnn_hidden_size, layer_counter + 1, + is_batch_norm, self.is_bidirectional, training) + + # FC layer with batch norm. + inputs = batch_norm(inputs, training) + logits = tf.layers.dense(inputs, self.num_classes, use_bias=self.use_bias) + + return logits + diff --git a/models/research/deep_speech/requirements.txt b/models/research/deep_speech/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d951b6c439294c37eabb65e20f2a043eecf3bb5e --- /dev/null +++ b/models/research/deep_speech/requirements.txt @@ -0,0 +1,4 @@ +nltk>=3.3 +pandas>=0.23.3 +soundfile>=0.10.2 +sox>=1.3.3 diff --git a/models/research/deep_speech/run_deep_speech.sh b/models/research/deep_speech/run_deep_speech.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1559aa614e084d17bcad93a739481cfda545bc6 --- /dev/null +++ b/models/research/deep_speech/run_deep_speech.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Script to run deep speech model to achieve the MLPerf target (WER = 0.23) +# Step 1: download the LibriSpeech dataset. +echo "Data downloading..." +python data/download.py + +## After data downloading, the dataset directories are: +train_clean_100="/tmp/librispeech_data/train-clean-100/LibriSpeech/train-clean-100.csv" +train_clean_360="/tmp/librispeech_data/train-clean-360/LibriSpeech/train-clean-360.csv" +train_other_500="/tmp/librispeech_data/train-other-500/LibriSpeech/train-other-500.csv" +dev_clean="/tmp/librispeech_data/dev-clean/LibriSpeech/dev-clean.csv" +dev_other="/tmp/librispeech_data/dev-other/LibriSpeech/dev-other.csv" +test_clean="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv" +test_other="/tmp/librispeech_data/test-other/LibriSpeech/test-other.csv" + +# Step 2: generate train dataset and evaluation dataset +echo "Data preprocessing..." +train_file="/tmp/librispeech_data/train_dataset.csv" +eval_file="/tmp/librispeech_data/eval_dataset.csv" + +head -1 $train_clean_100 > $train_file +for filename in $train_clean_100 $train_clean_360 $train_other_500 +do + sed 1d $filename >> $train_file +done + +head -1 $dev_clean > $eval_file +for filename in $dev_clean $dev_other +do + sed 1d $filename >> $eval_file +done + +# Step 3: filter out the audio files that exceed max time duration. +final_train_file="/tmp/librispeech_data/final_train_dataset.csv" +final_eval_file="/tmp/librispeech_data/final_eval_dataset.csv" + +MAX_AUDIO_LEN=27.0 +awk -v maxlen="$MAX_AUDIO_LEN" 'BEGIN{FS="\t";} NR==1{print $0} NR>1{cmd="soxi -D "$1""; cmd|getline x; if(x<=maxlen) {print $0}; close(cmd);}' $train_file > $final_train_file +awk -v maxlen="$MAX_AUDIO_LEN" 'BEGIN{FS="\t";} NR==1{print $0} NR>1{cmd="soxi -D "$1""; cmd|getline x; if(x<=maxlen) {print $0}; close(cmd);}' $eval_file > $final_eval_file + +# Step 4: run the training and evaluation loop in background, and save the running info to a log file +echo "Model training and evaluation..." +start=`date +%s` + +log_file=log_`date +%Y-%m-%d` +nohup python deep_speech.py --train_data_dir=$final_train_file --eval_data_dir=$final_eval_file --num_gpus=-1 --wer_threshold=0.23 --seed=1 >$log_file 2>&1& + +end=`date +%s` +runtime=$((end-start)) +echo "Model training time is" $runtime "seconds." diff --git a/models/research/deeplab/README.md b/models/research/deeplab/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8609432b5bd78b56bf18a557dd5b813ed6a3fc77 --- /dev/null +++ b/models/research/deeplab/README.md @@ -0,0 +1,321 @@ +# DeepLab: Deep Labelling for Semantic Image Segmentation + +DeepLab is a state-of-art deep learning model for semantic image segmentation, +where the goal is to assign semantic labels (e.g., person, dog, cat and so on) +to every pixel in the input image. Current implementation includes the following +features: + +1. DeepLabv1 [1]: We use *atrous convolution* to explicitly control the + resolution at which feature responses are computed within Deep Convolutional + Neural Networks. + +2. DeepLabv2 [2]: We use *atrous spatial pyramid pooling* (ASPP) to robustly + segment objects at multiple scales with filters at multiple sampling rates + and effective fields-of-views. + +3. DeepLabv3 [3]: We augment the ASPP module with *image-level feature* [5, 6] + to capture longer range information. We also include *batch normalization* + [7] parameters to facilitate the training. In particular, we applying atrous + convolution to extract output features at different output strides during + training and evaluation, which efficiently enables training BN at output + stride = 16 and attains a high performance at output stride = 8 during + evaluation. + +4. DeepLabv3+ [4]: We extend DeepLabv3 to include a simple yet effective + decoder module to refine the segmentation results especially along object + boundaries. Furthermore, in this encoder-decoder structure one can + arbitrarily control the resolution of extracted encoder features by atrous + convolution to trade-off precision and runtime. + +If you find the code useful for your research, please consider citing our latest +works: + +* DeepLabv3+: + +``` +@inproceedings{deeplabv3plus2018, + title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, + author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, + booktitle={ECCV}, + year={2018} +} +``` + +* MobileNetv2: + +``` +@inproceedings{mobilenetv22018, + title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, + author={Mark Sandler and Andrew Howard and Menglong Zhu and Andrey Zhmoginov and Liang-Chieh Chen}, + booktitle={CVPR}, + year={2018} +} +``` + +* MobileNetv3: + +``` +@inproceedings{mobilenetv32019, + title={Searching for MobileNetV3}, + author={Andrew Howard and Mark Sandler and Grace Chu and Liang-Chieh Chen and Bo Chen and Mingxing Tan and Weijun Wang and Yukun Zhu and Ruoming Pang and Vijay Vasudevan and Quoc V. Le and Hartwig Adam}, + booktitle={ICCV}, + year={2019} +} +``` + +* Architecture search for dense prediction cell: + +``` +@inproceedings{dpc2018, + title={Searching for Efficient Multi-Scale Architectures for Dense Image Prediction}, + author={Liang-Chieh Chen and Maxwell D. Collins and Yukun Zhu and George Papandreou and Barret Zoph and Florian Schroff and Hartwig Adam and Jonathon Shlens}, + booktitle={NIPS}, + year={2018} +} + +``` + +* Auto-DeepLab (also called hnasnet in core/nas_network.py): + +``` +@inproceedings{autodeeplab2019, + title={Auto-DeepLab: Hierarchical Neural Architecture Search for Semantic +Image Segmentation}, + author={Chenxi Liu and Liang-Chieh Chen and Florian Schroff and Hartwig Adam + and Wei Hua and Alan Yuille and Li Fei-Fei}, + booktitle={CVPR}, + year={2019} +} + +``` + + +In the current implementation, we support adopting the following network +backbones: + +1. MobileNetv2 [8] and MobileNetv3 [16]: A fast network structure designed + for mobile devices. + +2. Xception [9, 10]: A powerful network structure intended for server-side + deployment. + +3. ResNet-v1-{50,101} [14]: We provide both the original ResNet-v1 and its + 'beta' variant where the 'stem' is modified for semantic segmentation. + +4. PNASNet [15]: A Powerful network structure found by neural architecture + search. + +5. Auto-DeepLab (called HNASNet in the code): A segmentation-specific network + backbone found by neural architecture search. + +This directory contains our TensorFlow [11] implementation. We provide codes +allowing users to train the model, evaluate results in terms of mIOU (mean +intersection-over-union), and visualize segmentation results. We use PASCAL VOC +2012 [12] and Cityscapes [13] semantic segmentation benchmarks as an example in +the code. + +Some segmentation results on Flickr images: +

+
+
+
+

+ +## Contacts (Maintainers) + +* Liang-Chieh Chen, github: [aquariusjay](https://github.com/aquariusjay) +* YuKun Zhu, github: [yknzhu](https://github.com/YknZhu) +* George Papandreou, github: [gpapan](https://github.com/gpapan) +* Hui Hui, github: [huihui-personal](https://github.com/huihui-personal) +* Maxwell D. Collins, github: [mcollinswisc](https://github.com/mcollinswisc) +* Ting Liu: github: [tingliu](https://github.com/tingliu) + +## Tables of Contents + +Demo: + +*
Colab notebook for off-the-shelf inference.
+ +Running: + +* Installation.
+* Running DeepLab on PASCAL VOC 2012 semantic segmentation dataset.
+* Running DeepLab on Cityscapes semantic segmentation dataset.
+* Running DeepLab on ADE20K semantic segmentation dataset.
+ +Models: + +* Checkpoints and frozen inference graphs.
+ +Misc: + +* Please check FAQ if you have some questions before reporting the issues.
+ +## Getting Help + +To get help with issues you may encounter while using the DeepLab Tensorflow +implementation, create a new question on +[StackOverflow](https://stackoverflow.com/) with the tag "tensorflow". + +Please report bugs (i.e., broken code, not usage questions) to the +tensorflow/models GitHub [issue +tracker](https://github.com/tensorflow/models/issues), prefixing the issue name +with "deeplab". + +## License + +All the codes in deeplab folder is covered by the [LICENSE](https://github.com/tensorflow/models/blob/master/LICENSE) +under tensorflow/models. Please refer to the LICENSE for details. + +## Change Logs + +### March 26, 2020 +* Supported EdgeTPU-DeepLab and EdgeTPU-DeepLab-slim on Cityscapes. +**Contributor**: Yun Long. + +### November 20, 2019 +* Supported MobileNetV3 large and small model variants on Cityscapes. +**Contributor**: Yukun Zhu. + + +### March 27, 2019 + +* Supported using different loss weights on different classes during training. +**Contributor**: Yuwei Yang. + + +### March 26, 2019 + +* Supported ResNet-v1-18. **Contributor**: Michalis Raptis. + + +### March 6, 2019 + +* Released the evaluation code (under the `evaluation` folder) for image +parsing, a.k.a. panoptic segmentation. In particular, the released code supports +evaluating the parsing results in terms of both the parsing covering and +panoptic quality metrics. **Contributors**: Maxwell Collins and Ting Liu. + + +### February 6, 2019 + +* Updated decoder module to exploit multiple low-level features with different +output_strides. + +### December 3, 2018 + +* Released the MobileNet-v2 checkpoint on ADE20K. + + +### November 19, 2018 + +* Supported NAS architecture for feature extraction. **Contributor**: Chenxi Liu. + +* Supported hard pixel mining during training. + + +### October 1, 2018 + +* Released MobileNet-v2 depth-multiplier = 0.5 COCO-pretrained checkpoints on +PASCAL VOC 2012, and Xception-65 COCO pretrained checkpoint (i.e., no PASCAL +pretrained). + + +### September 5, 2018 + +* Released Cityscapes pretrained checkpoints with found best dense prediction cell. + + +### May 26, 2018 + +* Updated ADE20K pretrained checkpoint. + + +### May 18, 2018 +* Added builders for ResNet-v1 and Xception model variants. +* Added ADE20K support, including colormap and pretrained Xception_65 checkpoint. +* Fixed a bug on using non-default depth_multiplier for MobileNet-v2. + + +### March 22, 2018 + +* Released checkpoints using MobileNet-V2 as network backbone and pretrained on +PASCAL VOC 2012 and Cityscapes. + + +### March 5, 2018 + +* First release of DeepLab in TensorFlow including deeper Xception network +backbone. Included chekcpoints that have been pretrained on PASCAL VOC 2012 +and Cityscapes. + +## References + +1. **Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs**
+ Liang-Chieh Chen+, George Papandreou+, Iasonas Kokkinos, Kevin Murphy, Alan L. Yuille (+ equal + contribution).
+ [[link]](https://arxiv.org/abs/1412.7062). In ICLR, 2015. + +2. **DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,** + **Atrous Convolution, and Fully Connected CRFs**
+ Liang-Chieh Chen+, George Papandreou+, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille (+ equal + contribution).
+ [[link]](http://arxiv.org/abs/1606.00915). TPAMI 2017. + +3. **Rethinking Atrous Convolution for Semantic Image Segmentation**
+ Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam.
+ [[link]](http://arxiv.org/abs/1706.05587). arXiv: 1706.05587, 2017. + +4. **Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation**
+ Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam.
+ [[link]](https://arxiv.org/abs/1802.02611). In ECCV, 2018. + +5. **ParseNet: Looking Wider to See Better**
+ Wei Liu, Andrew Rabinovich, Alexander C Berg
+ [[link]](https://arxiv.org/abs/1506.04579). arXiv:1506.04579, 2015. + +6. **Pyramid Scene Parsing Network**
+ Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, Jiaya Jia
+ [[link]](https://arxiv.org/abs/1612.01105). In CVPR, 2017. + +7. **Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate shift**
+ Sergey Ioffe, Christian Szegedy
+ [[link]](https://arxiv.org/abs/1502.03167). In ICML, 2015. + +8. **MobileNetV2: Inverted Residuals and Linear Bottlenecks**
+ Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
+ [[link]](https://arxiv.org/abs/1801.04381). In CVPR, 2018. + +9. **Xception: Deep Learning with Depthwise Separable Convolutions**
+ François Chollet
+ [[link]](https://arxiv.org/abs/1610.02357). In CVPR, 2017. + +10. **Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge 2017 Entry**
+ Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei, Jifeng Dai
+ [[link]](http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf). ICCV COCO Challenge + Workshop, 2017. + +11. **Tensorflow: Large-Scale Machine Learning on Heterogeneous Distributed Systems**
+ M. Abadi, A. Agarwal, et al.
+ [[link]](https://arxiv.org/abs/1603.04467). arXiv:1603.04467, 2016. + +12. **The Pascal Visual Object Classes Challenge – A Retrospective,**
+ Mark Everingham, S. M. Ali Eslami, Luc Van Gool, Christopher K. I. Williams, John + Winn, and Andrew Zisserma.
+ [[link]](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). IJCV, 2014. + +13. **The Cityscapes Dataset for Semantic Urban Scene Understanding**
+ Cordts, Marius, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, Bernt Schiele.
+ [[link]](https://www.cityscapes-dataset.com/). In CVPR, 2016. + +14. **Deep Residual Learning for Image Recognition**
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
+ [[link]](https://arxiv.org/abs/1512.03385). In CVPR, 2016. + +15. **Progressive Neural Architecture Search**
+ Chenxi Liu, Barret Zoph, Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, Alan Yuille, Jonathan Huang, Kevin Murphy.
+ [[link]](https://arxiv.org/abs/1712.00559). In ECCV, 2018. + +16. **Searching for MobileNetV3**
+ Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam.
+ [[link]](https://arxiv.org/abs/1905.02244). In ICCV, 2019. diff --git a/models/research/deeplab/__init__.py b/models/research/deeplab/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deeplab/common.py b/models/research/deeplab/common.py new file mode 100644 index 0000000000000000000000000000000000000000..928f7176c377e69aa2c5b8bc676f092cf97819c9 --- /dev/null +++ b/models/research/deeplab/common.py @@ -0,0 +1,295 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides flags that are common to scripts. + +Common flags from train/eval/vis/export_model.py are collected in this script. +""" +import collections +import copy +import json +import tensorflow as tf + +flags = tf.app.flags + +# Flags for input preprocessing. + +flags.DEFINE_integer('min_resize_value', None, + 'Desired size of the smaller image side.') + +flags.DEFINE_integer('max_resize_value', None, + 'Maximum allowed size of the larger image side.') + +flags.DEFINE_integer('resize_factor', None, + 'Resized dimensions are multiple of factor plus one.') + +flags.DEFINE_boolean('keep_aspect_ratio', True, + 'Keep aspect ratio after resizing or not.') + +# Model dependent flags. + +flags.DEFINE_integer('logits_kernel_size', 1, + 'The kernel size for the convolutional kernel that ' + 'generates logits.') + +# When using 'mobilent_v2', we set atrous_rates = decoder_output_stride = None. +# When using 'xception_65' or 'resnet_v1' model variants, we set +# atrous_rates = [6, 12, 18] (output stride 16) and decoder_output_stride = 4. +# See core/feature_extractor.py for supported model variants. +flags.DEFINE_string('model_variant', 'mobilenet_v2', 'DeepLab model variant.') + +flags.DEFINE_multi_float('image_pyramid', None, + 'Input scales for multi-scale feature extraction.') + +flags.DEFINE_boolean('add_image_level_feature', True, + 'Add image level feature.') + +flags.DEFINE_list( + 'image_pooling_crop_size', None, + 'Image pooling crop size [height, width] used in the ASPP module. When ' + 'value is None, the model performs image pooling with "crop_size". This' + 'flag is useful when one likes to use different image pooling sizes.') + +flags.DEFINE_list( + 'image_pooling_stride', '1,1', + 'Image pooling stride [height, width] used in the ASPP image pooling. ') + +flags.DEFINE_boolean('aspp_with_batch_norm', True, + 'Use batch norm parameters for ASPP or not.') + +flags.DEFINE_boolean('aspp_with_separable_conv', True, + 'Use separable convolution for ASPP or not.') + +# Defaults to None. Set multi_grid = [1, 2, 4] when using provided +# 'resnet_v1_{50,101}_beta' checkpoints. +flags.DEFINE_multi_integer('multi_grid', None, + 'Employ a hierarchy of atrous rates for ResNet.') + +flags.DEFINE_float('depth_multiplier', 1.0, + 'Multiplier for the depth (number of channels) for all ' + 'convolution ops used in MobileNet.') + +flags.DEFINE_integer('divisible_by', None, + 'An integer that ensures the layer # channels are ' + 'divisible by this value. Used in MobileNet.') + +# For `xception_65`, use decoder_output_stride = 4. For `mobilenet_v2`, use +# decoder_output_stride = None. +flags.DEFINE_list('decoder_output_stride', None, + 'Comma-separated list of strings with the number specifying ' + 'output stride of low-level features at each network level.' + 'Current semantic segmentation implementation assumes at ' + 'most one output stride (i.e., either None or a list with ' + 'only one element.') + +flags.DEFINE_boolean('decoder_use_separable_conv', True, + 'Employ separable convolution for decoder or not.') + +flags.DEFINE_enum('merge_method', 'max', ['max', 'avg'], + 'Scheme to merge multi scale features.') + +flags.DEFINE_boolean( + 'prediction_with_upsampled_logits', True, + 'When performing prediction, there are two options: (1) bilinear ' + 'upsampling the logits followed by softmax, or (2) softmax followed by ' + 'bilinear upsampling.') + +flags.DEFINE_string( + 'dense_prediction_cell_json', + '', + 'A JSON file that specifies the dense prediction cell.') + +flags.DEFINE_integer( + 'nas_stem_output_num_conv_filters', 20, + 'Number of filters of the stem output tensor in NAS models.') + +flags.DEFINE_bool('nas_use_classification_head', False, + 'Use image classification head for NAS model variants.') + +flags.DEFINE_bool('nas_remove_os32_stride', False, + 'Remove the stride in the output stride 32 branch.') + +flags.DEFINE_bool('use_bounded_activation', False, + 'Whether or not to use bounded activations. Bounded ' + 'activations better lend themselves to quantized inference.') + +flags.DEFINE_boolean('aspp_with_concat_projection', True, + 'ASPP with concat projection.') + +flags.DEFINE_boolean('aspp_with_squeeze_and_excitation', False, + 'ASPP with squeeze and excitation.') + +flags.DEFINE_integer('aspp_convs_filters', 256, 'ASPP convolution filters.') + +flags.DEFINE_boolean('decoder_use_sum_merge', False, + 'Decoder uses simply sum merge.') + +flags.DEFINE_integer('decoder_filters', 256, 'Decoder filters.') + +flags.DEFINE_boolean('decoder_output_is_logits', False, + 'Use decoder output as logits or not.') + +flags.DEFINE_boolean('image_se_uses_qsigmoid', False, 'Use q-sigmoid.') + +flags.DEFINE_multi_float( + 'label_weights', None, + 'A list of label weights, each element represents the weight for the label ' + 'of its index, for example, label_weights = [0.1, 0.5] means the weight ' + 'for label 0 is 0.1 and the weight for label 1 is 0.5. If set as None, all ' + 'the labels have the same weight 1.0.') + +flags.DEFINE_float('batch_norm_decay', 0.9997, 'Batchnorm decay.') + +FLAGS = flags.FLAGS + +# Constants + +# Perform semantic segmentation predictions. +OUTPUT_TYPE = 'semantic' + +# Semantic segmentation item names. +LABELS_CLASS = 'labels_class' +IMAGE = 'image' +HEIGHT = 'height' +WIDTH = 'width' +IMAGE_NAME = 'image_name' +LABEL = 'label' +ORIGINAL_IMAGE = 'original_image' + +# Test set name. +TEST_SET = 'test' + + +class ModelOptions( + collections.namedtuple('ModelOptions', [ + 'outputs_to_num_classes', + 'crop_size', + 'atrous_rates', + 'output_stride', + 'preprocessed_images_dtype', + 'merge_method', + 'add_image_level_feature', + 'image_pooling_crop_size', + 'image_pooling_stride', + 'aspp_with_batch_norm', + 'aspp_with_separable_conv', + 'multi_grid', + 'decoder_output_stride', + 'decoder_use_separable_conv', + 'logits_kernel_size', + 'model_variant', + 'depth_multiplier', + 'divisible_by', + 'prediction_with_upsampled_logits', + 'dense_prediction_cell_config', + 'nas_architecture_options', + 'use_bounded_activation', + 'aspp_with_concat_projection', + 'aspp_with_squeeze_and_excitation', + 'aspp_convs_filters', + 'decoder_use_sum_merge', + 'decoder_filters', + 'decoder_output_is_logits', + 'image_se_uses_qsigmoid', + 'label_weights', + 'sync_batch_norm_method', + 'batch_norm_decay', + ])): + """Immutable class to hold model options.""" + + __slots__ = () + + def __new__(cls, + outputs_to_num_classes, + crop_size=None, + atrous_rates=None, + output_stride=8, + preprocessed_images_dtype=tf.float32): + """Constructor to set default values. + + Args: + outputs_to_num_classes: A dictionary from output type to the number of + classes. For example, for the task of semantic segmentation with 21 + semantic classes, we would have outputs_to_num_classes['semantic'] = 21. + crop_size: A tuple [crop_height, crop_width]. + atrous_rates: A list of atrous convolution rates for ASPP. + output_stride: The ratio of input to output spatial resolution. + preprocessed_images_dtype: The type after the preprocessing function. + + Returns: + A new ModelOptions instance. + """ + dense_prediction_cell_config = None + if FLAGS.dense_prediction_cell_json: + with tf.gfile.Open(FLAGS.dense_prediction_cell_json, 'r') as f: + dense_prediction_cell_config = json.load(f) + decoder_output_stride = None + if FLAGS.decoder_output_stride: + decoder_output_stride = [ + int(x) for x in FLAGS.decoder_output_stride] + if sorted(decoder_output_stride, reverse=True) != decoder_output_stride: + raise ValueError('Decoder output stride need to be sorted in the ' + 'descending order.') + image_pooling_crop_size = None + if FLAGS.image_pooling_crop_size: + image_pooling_crop_size = [int(x) for x in FLAGS.image_pooling_crop_size] + image_pooling_stride = [1, 1] + if FLAGS.image_pooling_stride: + image_pooling_stride = [int(x) for x in FLAGS.image_pooling_stride] + label_weights = FLAGS.label_weights + if label_weights is None: + label_weights = 1.0 + nas_architecture_options = { + 'nas_stem_output_num_conv_filters': ( + FLAGS.nas_stem_output_num_conv_filters), + 'nas_use_classification_head': FLAGS.nas_use_classification_head, + 'nas_remove_os32_stride': FLAGS.nas_remove_os32_stride, + } + return super(ModelOptions, cls).__new__( + cls, outputs_to_num_classes, crop_size, atrous_rates, output_stride, + preprocessed_images_dtype, + FLAGS.merge_method, + FLAGS.add_image_level_feature, + image_pooling_crop_size, + image_pooling_stride, + FLAGS.aspp_with_batch_norm, + FLAGS.aspp_with_separable_conv, + FLAGS.multi_grid, + decoder_output_stride, + FLAGS.decoder_use_separable_conv, + FLAGS.logits_kernel_size, + FLAGS.model_variant, + FLAGS.depth_multiplier, + FLAGS.divisible_by, + FLAGS.prediction_with_upsampled_logits, + dense_prediction_cell_config, + nas_architecture_options, + FLAGS.use_bounded_activation, + FLAGS.aspp_with_concat_projection, + FLAGS.aspp_with_squeeze_and_excitation, + FLAGS.aspp_convs_filters, + FLAGS.decoder_use_sum_merge, + FLAGS.decoder_filters, + FLAGS.decoder_output_is_logits, + FLAGS.image_se_uses_qsigmoid, + label_weights, + 'None', + FLAGS.batch_norm_decay) + + def __deepcopy__(self, memo): + return ModelOptions(copy.deepcopy(self.outputs_to_num_classes), + self.crop_size, + self.atrous_rates, + self.output_stride, + self.preprocessed_images_dtype) diff --git a/models/research/deeplab/common_test.py b/models/research/deeplab/common_test.py new file mode 100644 index 0000000000000000000000000000000000000000..45b64e50e3bb0a574c0ec230075e6fddff3ae996 --- /dev/null +++ b/models/research/deeplab/common_test.py @@ -0,0 +1,52 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for common.py.""" +import copy + +import tensorflow as tf + +from deeplab import common + + +class CommonTest(tf.test.TestCase): + + def testOutputsToNumClasses(self): + num_classes = 21 + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: num_classes}) + self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE], + num_classes) + + def testDeepcopy(self): + num_classes = 21 + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: num_classes}) + model_options_new = copy.deepcopy(model_options) + self.assertEqual((model_options_new. + outputs_to_num_classes[common.OUTPUT_TYPE]), + num_classes) + + num_classes_new = 22 + model_options_new.outputs_to_num_classes[common.OUTPUT_TYPE] = ( + num_classes_new) + self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE], + num_classes) + self.assertEqual((model_options_new. + outputs_to_num_classes[common.OUTPUT_TYPE]), + num_classes_new) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/__init__.py b/models/research/deeplab/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deeplab/core/conv2d_ws.py b/models/research/deeplab/core/conv2d_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..9aaaf33dd3c2e098d7d5e815b4918c436ee1796c --- /dev/null +++ b/models/research/deeplab/core/conv2d_ws.py @@ -0,0 +1,369 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Augment slim.conv2d with optional Weight Standardization (WS). + +WS is a normalization method to accelerate micro-batch training. When used with +Group Normalization and trained with 1 image/GPU, WS is able to match or +outperform the performances of BN trained with large batch sizes. +[1] Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, Alan Yuille + Weight Standardization. arXiv:1903.10520 +[2] Lei Huang, Xianglong Liu, Yang Liu, Bo Lang, Dacheng Tao + Centered Weight Normalization in Accelerating Training of Deep Neural + Networks. ICCV 2017 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers + +from tensorflow.contrib.layers.python.layers import layers +from tensorflow.contrib.layers.python.layers import utils + + +class Conv2D(tf.keras.layers.Conv2D, tf.layers.Layer): + """2D convolution layer (e.g. spatial convolution over images). + + This layer creates a convolution kernel that is convolved + (actually cross-correlated) with the layer input to produce a tensor of + outputs. If `use_bias` is True (and a `bias_initializer` is provided), + a bias vector is created and added to the outputs. Finally, if + `activation` is not `None`, it is applied to the outputs as well. + """ + + def __init__(self, + filters, + kernel_size, + strides=(1, 1), + padding='valid', + data_format='channels_last', + dilation_rate=(1, 1), + activation=None, + use_bias=True, + kernel_initializer=None, + bias_initializer=tf.zeros_initializer(), + kernel_regularizer=None, + bias_regularizer=None, + use_weight_standardization=False, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + trainable=True, + name=None, + **kwargs): + """Constructs the 2D convolution layer. + + Args: + filters: Integer, the dimensionality of the output space (i.e. the number + of filters in the convolution). + kernel_size: An integer or tuple/list of 2 integers, specifying the height + and width of the 2D convolution window. Can be a single integer to + specify the same value for all spatial dimensions. + strides: An integer or tuple/list of 2 integers, specifying the strides of + the convolution along the height and width. Can be a single integer to + specify the same value for all spatial dimensions. Specifying any stride + value != 1 is incompatible with specifying any `dilation_rate` value != + 1. + padding: One of `"valid"` or `"same"` (case-insensitive). + data_format: A string, one of `channels_last` (default) or + `channels_first`. The ordering of the dimensions in the inputs. + `channels_last` corresponds to inputs with shape `(batch, height, width, + channels)` while `channels_first` corresponds to inputs with shape + `(batch, channels, height, width)`. + dilation_rate: An integer or tuple/list of 2 integers, specifying the + dilation rate to use for dilated convolution. Can be a single integer to + specify the same value for all spatial dimensions. Currently, specifying + any `dilation_rate` value != 1 is incompatible with specifying any + stride value != 1. + activation: Activation function. Set it to None to maintain a linear + activation. + use_bias: Boolean, whether the layer uses a bias. + kernel_initializer: An initializer for the convolution kernel. + bias_initializer: An initializer for the bias vector. If None, the default + initializer will be used. + kernel_regularizer: Optional regularizer for the convolution kernel. + bias_regularizer: Optional regularizer for the bias vector. + use_weight_standardization: Boolean, whether the layer uses weight + standardization. + activity_regularizer: Optional regularizer function for the output. + kernel_constraint: Optional projection function to be applied to the + kernel after being updated by an `Optimizer` (e.g. used to implement + norm constraints or value constraints for layer weights). The function + must take as input the unprojected variable and must return the + projected variable (which must have the same shape). Constraints are not + safe to use when doing asynchronous distributed training. + bias_constraint: Optional projection function to be applied to the bias + after being updated by an `Optimizer`. + trainable: Boolean, if `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + name: A string, the name of the layer. + **kwargs: Arbitrary keyword arguments passed to tf.keras.layers.Conv2D + """ + + super(Conv2D, self).__init__( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + bias_constraint=bias_constraint, + trainable=trainable, + name=name, + **kwargs) + self.use_weight_standardization = use_weight_standardization + + def call(self, inputs): + if self.use_weight_standardization: + mean, var = tf.nn.moments(self.kernel, [0, 1, 2], keep_dims=True) + kernel = (self.kernel - mean) / tf.sqrt(var + 1e-5) + outputs = self._convolution_op(inputs, kernel) + else: + outputs = self._convolution_op(inputs, self.kernel) + + if self.use_bias: + if self.data_format == 'channels_first': + if self.rank == 1: + # tf.nn.bias_add does not accept a 1D input tensor. + bias = tf.reshape(self.bias, (1, self.filters, 1)) + outputs += bias + else: + outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW') + else: + outputs = tf.nn.bias_add(outputs, self.bias, data_format='NHWC') + + if self.activation is not None: + return self.activation(outputs) + return outputs + + +@contrib_framework.add_arg_scope +def conv2d(inputs, + num_outputs, + kernel_size, + stride=1, + padding='SAME', + data_format=None, + rate=1, + activation_fn=tf.nn.relu, + normalizer_fn=None, + normalizer_params=None, + weights_initializer=contrib_layers.xavier_initializer(), + weights_regularizer=None, + biases_initializer=tf.zeros_initializer(), + biases_regularizer=None, + use_weight_standardization=False, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + scope=None): + """Adds a 2D convolution followed by an optional batch_norm layer. + + `convolution` creates a variable called `weights`, representing the + convolutional kernel, that is convolved (actually cross-correlated) with the + `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is + provided (such as `batch_norm`), it is then applied. Otherwise, if + `normalizer_fn` is None and a `biases_initializer` is provided then a `biases` + variable would be created and added the activations. Finally, if + `activation_fn` is not `None`, it is applied to the activations as well. + + Performs atrous convolution with input stride/dilation rate equal to `rate` + if a value > 1 for any dimension of `rate` is specified. In this case + `stride` values != 1 are not supported. + + Args: + inputs: A Tensor of rank N+2 of shape `[batch_size] + input_spatial_shape + + [in_channels]` if data_format does not start with "NC" (default), or + `[batch_size, in_channels] + input_spatial_shape` if data_format starts + with "NC". + num_outputs: Integer, the number of output filters. + kernel_size: A sequence of N positive integers specifying the spatial + dimensions of the filters. Can be a single integer to specify the same + value for all spatial dimensions. + stride: A sequence of N positive integers specifying the stride at which to + compute output. Can be a single integer to specify the same value for all + spatial dimensions. Specifying any `stride` value != 1 is incompatible + with specifying any `rate` value != 1. + padding: One of `"VALID"` or `"SAME"`. + data_format: A string or None. Specifies whether the channel dimension of + the `input` and output is the last dimension (default, or if `data_format` + does not start with "NC"), or the second dimension (if `data_format` + starts with "NC"). For N=1, the valid values are "NWC" (default) and + "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For + N=3, the valid values are "NDHWC" (default) and "NCDHW". + rate: A sequence of N positive integers specifying the dilation rate to use + for atrous convolution. Can be a single integer to specify the same value + for all spatial dimensions. Specifying any `rate` value != 1 is + incompatible with specifying any `stride` value != 1. + activation_fn: Activation function. The default value is a ReLU function. + Explicitly set it to None to skip it and maintain a linear activation. + normalizer_fn: Normalization function to use instead of `biases`. If + `normalizer_fn` is provided then `biases_initializer` and + `biases_regularizer` are ignored and `biases` are not created nor added. + default set to None for no normalizer function + normalizer_params: Normalization function parameters. + weights_initializer: An initializer for the weights. + weights_regularizer: Optional regularizer for the weights. + biases_initializer: An initializer for the biases. If None skip biases. + biases_regularizer: Optional regularizer for the biases. + use_weight_standardization: Boolean, whether the layer uses weight + standardization. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for all the variables or + a dictionary containing a different list of collection per variable. + outputs_collections: Collection to add the outputs. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). + scope: Optional scope for `variable_scope`. + + Returns: + A tensor representing the output of the operation. + + Raises: + ValueError: If `data_format` is invalid. + ValueError: Both 'rate' and `stride` are not uniformly 1. + """ + if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']: + raise ValueError('Invalid data_format: %r' % (data_format,)) + + # pylint: disable=protected-access + layer_variable_getter = layers._build_variable_getter({ + 'bias': 'biases', + 'kernel': 'weights' + }) + # pylint: enable=protected-access + with tf.variable_scope( + scope, 'Conv', [inputs], reuse=reuse, + custom_getter=layer_variable_getter) as sc: + inputs = tf.convert_to_tensor(inputs) + input_rank = inputs.get_shape().ndims + + if input_rank != 4: + raise ValueError('Convolution expects input with rank %d, got %d' % + (4, input_rank)) + + data_format = ('channels_first' if data_format and + data_format.startswith('NC') else 'channels_last') + layer = Conv2D( + filters=num_outputs, + kernel_size=kernel_size, + strides=stride, + padding=padding, + data_format=data_format, + dilation_rate=rate, + activation=None, + use_bias=not normalizer_fn and biases_initializer, + kernel_initializer=weights_initializer, + bias_initializer=biases_initializer, + kernel_regularizer=weights_regularizer, + bias_regularizer=biases_regularizer, + use_weight_standardization=use_weight_standardization, + activity_regularizer=None, + trainable=trainable, + name=sc.name, + dtype=inputs.dtype.base_dtype, + _scope=sc, + _reuse=reuse) + outputs = layer.apply(inputs) + + # Add variables to collections. + # pylint: disable=protected-access + layers._add_variable_to_collections(layer.kernel, variables_collections, + 'weights') + if layer.use_bias: + layers._add_variable_to_collections(layer.bias, variables_collections, + 'biases') + # pylint: enable=protected-access + if normalizer_fn is not None: + normalizer_params = normalizer_params or {} + outputs = normalizer_fn(outputs, **normalizer_params) + + if activation_fn is not None: + outputs = activation_fn(outputs) + return utils.collect_named_outputs(outputs_collections, sc.name, outputs) + + +def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None): + """Strided 2-D convolution with 'SAME' padding. + + When stride > 1, then we do explicit zero-padding, followed by conv2d with + 'VALID' padding. + + Note that + + net = conv2d_same(inputs, num_outputs, 3, stride=stride) + + is equivalent to + + net = conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') + net = subsample(net, factor=stride) + + whereas + + net = conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') + + is different when the input's height or width is even, which is why we add the + current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). + + Args: + inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. + num_outputs: An integer, the number of output filters. + kernel_size: An int with the kernel_size of the filters. + stride: An integer, the output stride. + rate: An integer, rate for atrous convolution. + scope: Scope. + + Returns: + output: A 4-D tensor of size [batch, height_out, width_out, channels] with + the convolution output. + """ + if stride == 1: + return conv2d( + inputs, + num_outputs, + kernel_size, + stride=1, + rate=rate, + padding='SAME', + scope=scope) + else: + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + inputs = tf.pad(inputs, + [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + return conv2d( + inputs, + num_outputs, + kernel_size, + stride=stride, + rate=rate, + padding='VALID', + scope=scope) diff --git a/models/research/deeplab/core/conv2d_ws_test.py b/models/research/deeplab/core/conv2d_ws_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b6bea85ee034779ff5bf87b88cd912aa8dc863f2 --- /dev/null +++ b/models/research/deeplab/core/conv2d_ws_test.py @@ -0,0 +1,420 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for conv2d_ws.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers +from deeplab.core import conv2d_ws + + +class ConvolutionTest(tf.test.TestCase): + + def testInvalidShape(self): + with self.cached_session(): + images_3d = tf.random_uniform((5, 6, 7, 9, 3), seed=1) + with self.assertRaisesRegexp( + ValueError, 'Convolution expects input with rank 4, got 5'): + conv2d_ws.conv2d(images_3d, 32, 3) + + def testInvalidDataFormat(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.assertRaisesRegexp(ValueError, 'data_format'): + conv2d_ws.conv2d(images, 32, 3, data_format='CHWN') + + def testCreateConv(self): + height, width = 7, 9 + with self.cached_session(): + images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32) + output = conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateConvWithWS(self): + height, width = 7, 9 + with self.cached_session(): + images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32) + output = conv2d_ws.conv2d( + images, 32, [3, 3], use_weight_standardization=True) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateConvNCHW(self): + height, width = 7, 9 + with self.cached_session(): + images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32) + output = conv2d_ws.conv2d(images, 32, [3, 3], data_format='NCHW') + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateSquareConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, 3) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateConvWithTensorShape(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, images.get_shape()[1:3]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateFullyConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + output = conv2d_ws.conv2d( + images, 64, images.get_shape()[1:3], padding='VALID') + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [64]) + + def testFullyConvWithCustomGetter(self): + height, width = 7, 9 + with self.cached_session(): + called = [0] + + def custom_getter(getter, *args, **kwargs): + called[0] += 1 + return getter(*args, **kwargs) + + with tf.variable_scope('test', custom_getter=custom_getter): + images = tf.random_uniform((5, height, width, 32), seed=1) + conv2d_ws.conv2d(images, 64, images.get_shape()[1:3]) + self.assertEqual(called[0], 2) # Custom getter called twice. + + def testCreateVerticalConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 4), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 1]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32]) + biases = contrib_framework.get_variables_by_name('biases')[0] + self.assertListEqual(biases.get_shape().as_list(), [32]) + + def testCreateHorizontalConv(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 4), seed=1) + output = conv2d_ws.conv2d(images, 32, [1, 3]) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + weights = contrib_framework.get_variables_by_name('weights')[0] + self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32]) + + def testCreateConvWithStride(self): + height, width = 6, 8 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], stride=2) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), + [5, height / 2, width / 2, 32]) + + def testCreateConvCreatesWeightsAndBiasesVars(self): + height, width = 7, 9 + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.cached_session(): + self.assertFalse(contrib_framework.get_variables('conv1/weights')) + self.assertFalse(contrib_framework.get_variables('conv1/biases')) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertTrue(contrib_framework.get_variables('conv1/weights')) + self.assertTrue(contrib_framework.get_variables('conv1/biases')) + + def testCreateConvWithScope(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEqual(output.op.name, 'conv1/Relu') + + def testCreateConvWithCollection(self): + height, width = 7, 9 + images = tf.random_uniform((5, height, width, 3), seed=1) + with tf.name_scope('fe'): + conv = conv2d_ws.conv2d( + images, 32, [3, 3], outputs_collections='outputs', scope='Conv') + output_collected = tf.get_collection('outputs')[0] + self.assertEqual(output_collected.aliases, ['Conv']) + self.assertEqual(output_collected, conv) + + def testCreateConvWithoutActivation(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], activation_fn=None) + self.assertEqual(output.op.name, 'Conv/BiasAdd') + + def testCreateConvValid(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = conv2d_ws.conv2d(images, 32, [3, 3], padding='VALID') + self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32]) + + def testCreateConvWithWD(self): + height, width = 7, 9 + weight_decay = 0.01 + with self.cached_session() as sess: + images = tf.random_uniform((5, height, width, 3), seed=1) + regularizer = contrib_layers.l2_regularizer(weight_decay) + conv2d_ws.conv2d(images, 32, [3, 3], weights_regularizer=regularizer) + l2_loss = tf.nn.l2_loss( + contrib_framework.get_variables_by_name('weights')[0]) + wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] + self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer') + sess.run(tf.global_variables_initializer()) + self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval()) + + def testCreateConvNoRegularizers(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual( + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), []) + + def testReuseVars(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEqual(len(contrib_framework.get_variables()), 2) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True) + self.assertEqual(len(contrib_framework.get_variables()), 2) + + def testNonReuseVars(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual(len(contrib_framework.get_variables()), 2) + conv2d_ws.conv2d(images, 32, [3, 3]) + self.assertEqual(len(contrib_framework.get_variables()), 4) + + def testReuseConvWithWD(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + weight_decay = contrib_layers.l2_regularizer(0.01) + with contrib_framework.arg_scope([conv2d_ws.conv2d], + weights_regularizer=weight_decay): + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEqual(len(contrib_framework.get_variables()), 2) + self.assertEqual( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True) + self.assertEqual(len(contrib_framework.get_variables()), 2) + self.assertEqual( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + + def testConvWithBatchNorm(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + with contrib_framework.arg_scope([conv2d_ws.conv2d], + normalizer_fn=contrib_layers.batch_norm, + normalizer_params={'decay': 0.9}): + net = conv2d_ws.conv2d(images, 32, [3, 3]) + net = conv2d_ws.conv2d(net, 32, [3, 3]) + self.assertEqual(len(contrib_framework.get_variables()), 8) + self.assertEqual( + len(contrib_framework.get_variables('Conv/BatchNorm')), 3) + self.assertEqual( + len(contrib_framework.get_variables('Conv_1/BatchNorm')), 3) + + def testReuseConvWithBatchNorm(self): + height, width = 7, 9 + with self.cached_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + with contrib_framework.arg_scope([conv2d_ws.conv2d], + normalizer_fn=contrib_layers.batch_norm, + normalizer_params={'decay': 0.9}): + net = conv2d_ws.conv2d(images, 32, [3, 3], scope='Conv') + net = conv2d_ws.conv2d(net, 32, [3, 3], scope='Conv', reuse=True) + self.assertEqual(len(contrib_framework.get_variables()), 4) + self.assertEqual( + len(contrib_framework.get_variables('Conv/BatchNorm')), 3) + self.assertEqual( + len(contrib_framework.get_variables('Conv_1/BatchNorm')), 0) + + def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self): + height, width = 7, 9 + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.cached_session(): + self.assertFalse(contrib_framework.get_variables('conv1/weights')) + self.assertFalse(contrib_framework.get_variables('conv1/biases')) + conv2d_ws.conv2d(images, 32, [3, 3], rate=2, scope='conv1') + self.assertTrue(contrib_framework.get_variables('conv1/weights')) + self.assertTrue(contrib_framework.get_variables('conv1/biases')) + + def testOutputSizeWithRateTwoSamePadding(self): + num_filters = 32 + input_size = [5, 10, 12, 3] + expected_size = [5, 10, 12, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='SAME') + self.assertListEqual(list(output.get_shape().as_list()), expected_size) + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testOutputSizeWithRateTwoValidPadding(self): + num_filters = 32 + input_size = [5, 10, 12, 3] + expected_size = [5, 6, 8, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='VALID') + self.assertListEqual(list(output.get_shape().as_list()), expected_size) + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testOutputSizeWithRateTwoThreeValidPadding(self): + num_filters = 32 + input_size = [5, 10, 12, 3] + expected_size = [5, 6, 6, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=[2, 3], padding='VALID') + self.assertListEqual(list(output.get_shape().as_list()), expected_size) + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testDynamicOutputSizeWithRateOneValidPadding(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [None, None, None, num_filters] + expected_size_dynamic = [5, 7, 9, num_filters] + + with self.cached_session(): + images = tf.placeholder(np.float32, [None, None, None, input_size[3]]) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=1, padding='VALID') + tf.global_variables_initializer().run() + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), expected_size) + eval_output = output.eval({images: np.zeros(input_size, np.float32)}) + self.assertListEqual(list(eval_output.shape), expected_size_dynamic) + + def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self): + if tf.test.is_gpu_available(cuda_only=True): + num_filters = 32 + input_size = [5, 3, 9, 11] + expected_size = [None, num_filters, None, None] + expected_size_dynamic = [5, num_filters, 7, 9] + + with self.session(use_gpu=True): + images = tf.placeholder(np.float32, [None, input_size[1], None, None]) + output = conv2d_ws.conv2d( + images, + num_filters, [3, 3], + rate=1, + padding='VALID', + data_format='NCHW') + tf.global_variables_initializer().run() + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), expected_size) + eval_output = output.eval({images: np.zeros(input_size, np.float32)}) + self.assertListEqual(list(eval_output.shape), expected_size_dynamic) + + def testDynamicOutputSizeWithRateTwoValidPadding(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [None, None, None, num_filters] + expected_size_dynamic = [5, 5, 7, num_filters] + + with self.cached_session(): + images = tf.placeholder(np.float32, [None, None, None, input_size[3]]) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='VALID') + tf.global_variables_initializer().run() + self.assertEqual(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), expected_size) + eval_output = output.eval({images: np.zeros(input_size, np.float32)}) + self.assertListEqual(list(eval_output.shape), expected_size_dynamic) + + def testWithScope(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [5, 5, 7, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7') + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'conv7/Relu') + self.assertListEqual(list(output.eval().shape), expected_size) + + def testWithScopeWithoutActivation(self): + num_filters = 32 + input_size = [5, 9, 11, 3] + expected_size = [5, 5, 7, num_filters] + + images = tf.random_uniform(input_size, seed=1) + output = conv2d_ws.conv2d( + images, + num_filters, [3, 3], + rate=2, + padding='VALID', + activation_fn=None, + scope='conv7') + with self.cached_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertEqual(output.op.name, 'conv7/BiasAdd') + self.assertListEqual(list(output.eval().shape), expected_size) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/dense_prediction_cell.py b/models/research/deeplab/core/dense_prediction_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..8e32f8e227f0841d51df780618523a53c5eb4ae3 --- /dev/null +++ b/models/research/deeplab/core/dense_prediction_cell.py @@ -0,0 +1,290 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Dense Prediction Cell class that can be evolved in semantic segmentation. + +DensePredictionCell is used as a `layer` in semantic segmentation whose +architecture is determined by the `config`, a dictionary specifying +the architecture. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import utils + +slim = contrib_slim + +# Local constants. +_META_ARCHITECTURE_SCOPE = 'meta_architecture' +_CONCAT_PROJECTION_SCOPE = 'concat_projection' +_OP = 'op' +_CONV = 'conv' +_PYRAMID_POOLING = 'pyramid_pooling' +_KERNEL = 'kernel' +_RATE = 'rate' +_GRID_SIZE = 'grid_size' +_TARGET_SIZE = 'target_size' +_INPUT = 'input' + + +def dense_prediction_cell_hparams(): + """DensePredictionCell HParams. + + Returns: + A dictionary of hyper-parameters used for dense prediction cell with keys: + - reduction_size: Integer, the number of output filters for each operation + inside the cell. + - dropout_on_concat_features: Boolean, apply dropout on the concatenated + features or not. + - dropout_on_projection_features: Boolean, apply dropout on the projection + features or not. + - dropout_keep_prob: Float, when `dropout_on_concat_features' or + `dropout_on_projection_features' is True, the `keep_prob` value used + in the dropout operation. + - concat_channels: Integer, the concatenated features will be + channel-reduced to `concat_channels` channels. + - conv_rate_multiplier: Integer, used to multiply the convolution rates. + This is useful in the case when the output_stride is changed from 16 + to 8, we need to double the convolution rates correspondingly. + """ + return { + 'reduction_size': 256, + 'dropout_on_concat_features': True, + 'dropout_on_projection_features': False, + 'dropout_keep_prob': 0.9, + 'concat_channels': 256, + 'conv_rate_multiplier': 1, + } + + +class DensePredictionCell(object): + """DensePredictionCell class used as a 'layer' in semantic segmentation.""" + + def __init__(self, config, hparams=None): + """Initializes the dense prediction cell. + + Args: + config: A dictionary storing the architecture of a dense prediction cell. + hparams: A dictionary of hyper-parameters, provided by users. This + dictionary will be used to update the default dictionary returned by + dense_prediction_cell_hparams(). + + Raises: + ValueError: If `conv_rate_multiplier` has value < 1. + """ + self.hparams = dense_prediction_cell_hparams() + if hparams is not None: + self.hparams.update(hparams) + self.config = config + + # Check values in hparams are valid or not. + if self.hparams['conv_rate_multiplier'] < 1: + raise ValueError('conv_rate_multiplier cannot have value < 1.') + + def _get_pyramid_pooling_arguments( + self, crop_size, output_stride, image_grid, image_pooling_crop_size=None): + """Gets arguments for pyramid pooling. + + Args: + crop_size: A list of two integers, [crop_height, crop_width] specifying + whole patch crop size. + output_stride: Integer, output stride value for extracted features. + image_grid: A list of two integers, [image_grid_height, image_grid_width], + specifying the grid size of how the pyramid pooling will be performed. + image_pooling_crop_size: A list of two integers, [crop_height, crop_width] + specifying the crop size for image pooling operations. Note that we + decouple whole patch crop_size and image_pooling_crop_size as one could + perform the image_pooling with different crop sizes. + + Returns: + A list of (resize_value, pooled_kernel) + """ + resize_height = utils.scale_dimension(crop_size[0], 1. / output_stride) + resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride) + # If image_pooling_crop_size is not specified, use crop_size. + if image_pooling_crop_size is None: + image_pooling_crop_size = crop_size + pooled_height = utils.scale_dimension( + image_pooling_crop_size[0], 1. / (output_stride * image_grid[0])) + pooled_width = utils.scale_dimension( + image_pooling_crop_size[1], 1. / (output_stride * image_grid[1])) + return ([resize_height, resize_width], [pooled_height, pooled_width]) + + def _parse_operation(self, config, crop_size, output_stride, + image_pooling_crop_size=None): + """Parses one operation. + + When 'operation' is 'pyramid_pooling', we compute the required + hyper-parameters and save in config. + + Args: + config: A dictionary storing required hyper-parameters for one + operation. + crop_size: A list of two integers, [crop_height, crop_width] specifying + whole patch crop size. + output_stride: Integer, output stride value for extracted features. + image_pooling_crop_size: A list of two integers, [crop_height, crop_width] + specifying the crop size for image pooling operations. Note that we + decouple whole patch crop_size and image_pooling_crop_size as one could + perform the image_pooling with different crop sizes. + + Returns: + A dictionary stores the related information for the operation. + """ + if config[_OP] == _PYRAMID_POOLING: + (config[_TARGET_SIZE], + config[_KERNEL]) = self._get_pyramid_pooling_arguments( + crop_size=crop_size, + output_stride=output_stride, + image_grid=config[_GRID_SIZE], + image_pooling_crop_size=image_pooling_crop_size) + + return config + + def build_cell(self, + features, + output_stride=16, + crop_size=None, + image_pooling_crop_size=None, + weight_decay=0.00004, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + scope=None): + """Builds the dense prediction cell based on the config. + + Args: + features: Input feature map of size [batch, height, width, channels]. + output_stride: Int, output stride at which the features were extracted. + crop_size: A list [crop_height, crop_width], determining the input + features resolution. + image_pooling_crop_size: A list of two integers, [crop_height, crop_width] + specifying the crop size for image pooling operations. Note that we + decouple whole patch crop_size and image_pooling_crop_size as one could + perform the image_pooling with different crop sizes. + weight_decay: Float, the weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Boolean, is training or not. + fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not. + scope: Optional string, specifying the variable scope. + + Returns: + Features after passing through the constructed dense prediction cell with + shape = [batch, height, width, channels] where channels are determined + by `reduction_size` returned by dense_prediction_cell_hparams(). + + Raises: + ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or + the operation is not recognized. + """ + batch_norm_params = { + 'is_training': is_training and fine_tune_batch_norm, + 'decay': 0.9997, + 'epsilon': 1e-5, + 'scale': True, + } + hparams = self.hparams + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + padding='SAME', + stride=1, + reuse=reuse): + with slim.arg_scope([slim.batch_norm], **batch_norm_params): + with tf.variable_scope(scope, _META_ARCHITECTURE_SCOPE, [features]): + depth = hparams['reduction_size'] + branch_logits = [] + for i, current_config in enumerate(self.config): + scope = 'branch%d' % i + current_config = self._parse_operation( + config=current_config, + crop_size=crop_size, + output_stride=output_stride, + image_pooling_crop_size=image_pooling_crop_size) + tf.logging.info(current_config) + if current_config[_INPUT] < 0: + operation_input = features + else: + operation_input = branch_logits[current_config[_INPUT]] + if current_config[_OP] == _CONV: + if current_config[_KERNEL] == [1, 1] or current_config[ + _KERNEL] == 1: + branch_logits.append( + slim.conv2d(operation_input, depth, 1, scope=scope)) + else: + conv_rate = [r * hparams['conv_rate_multiplier'] + for r in current_config[_RATE]] + branch_logits.append( + utils.split_separable_conv2d( + operation_input, + filters=depth, + kernel_size=current_config[_KERNEL], + rate=conv_rate, + weight_decay=weight_decay, + scope=scope)) + elif current_config[_OP] == _PYRAMID_POOLING: + pooled_features = slim.avg_pool2d( + operation_input, + kernel_size=current_config[_KERNEL], + stride=[1, 1], + padding='VALID') + pooled_features = slim.conv2d( + pooled_features, + depth, + 1, + scope=scope) + pooled_features = tf.image.resize_bilinear( + pooled_features, + current_config[_TARGET_SIZE], + align_corners=True) + # Set shape for resize_height/resize_width if they are not Tensor. + resize_height = current_config[_TARGET_SIZE][0] + resize_width = current_config[_TARGET_SIZE][1] + if isinstance(resize_height, tf.Tensor): + resize_height = None + if isinstance(resize_width, tf.Tensor): + resize_width = None + pooled_features.set_shape( + [None, resize_height, resize_width, depth]) + branch_logits.append(pooled_features) + else: + raise ValueError('Unrecognized operation.') + # Merge branch logits. + concat_logits = tf.concat(branch_logits, 3) + if self.hparams['dropout_on_concat_features']: + concat_logits = slim.dropout( + concat_logits, + keep_prob=self.hparams['dropout_keep_prob'], + is_training=is_training, + scope=_CONCAT_PROJECTION_SCOPE + '_dropout') + concat_logits = slim.conv2d(concat_logits, + self.hparams['concat_channels'], + 1, + scope=_CONCAT_PROJECTION_SCOPE) + if self.hparams['dropout_on_projection_features']: + concat_logits = slim.dropout( + concat_logits, + keep_prob=self.hparams['dropout_keep_prob'], + is_training=is_training, + scope=_CONCAT_PROJECTION_SCOPE + '_dropout') + return concat_logits diff --git a/models/research/deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json b/models/research/deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json new file mode 100644 index 0000000000000000000000000000000000000000..12b093d07d1a696258cae7eaf4d793978433a69f --- /dev/null +++ b/models/research/deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json @@ -0,0 +1 @@ +[{"kernel": 3, "rate": [1, 6], "op": "conv", "input": -1}, {"kernel": 3, "rate": [18, 15], "op": "conv", "input": 0}, {"kernel": 3, "rate": [6, 3], "op": "conv", "input": 1}, {"kernel": 3, "rate": [1, 1], "op": "conv", "input": 0}, {"kernel": 3, "rate": [6, 21], "op": "conv", "input": 0}] \ No newline at end of file diff --git a/models/research/deeplab/core/dense_prediction_cell_test.py b/models/research/deeplab/core/dense_prediction_cell_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1396a73626d90c5da1db3b187c5a927a0c9eeb11 --- /dev/null +++ b/models/research/deeplab/core/dense_prediction_cell_test.py @@ -0,0 +1,136 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for dense_prediction_cell.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from deeplab.core import dense_prediction_cell + + +class DensePredictionCellTest(tf.test.TestCase): + + def setUp(self): + self.segmentation_layer = dense_prediction_cell.DensePredictionCell( + config=[ + { + dense_prediction_cell._INPUT: -1, + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: 1, + }, + { + dense_prediction_cell._INPUT: 0, + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: 3, + dense_prediction_cell._RATE: [1, 3], + }, + { + dense_prediction_cell._INPUT: 1, + dense_prediction_cell._OP: ( + dense_prediction_cell._PYRAMID_POOLING), + dense_prediction_cell._GRID_SIZE: [1, 2], + }, + ], + hparams={'conv_rate_multiplier': 2}) + + def testPyramidPoolingArguments(self): + features_size, pooled_kernel = ( + self.segmentation_layer._get_pyramid_pooling_arguments( + crop_size=[513, 513], + output_stride=16, + image_grid=[4, 4])) + self.assertListEqual(features_size, [33, 33]) + self.assertListEqual(pooled_kernel, [9, 9]) + + def testPyramidPoolingArgumentsWithImageGrid1x1(self): + features_size, pooled_kernel = ( + self.segmentation_layer._get_pyramid_pooling_arguments( + crop_size=[257, 257], + output_stride=16, + image_grid=[1, 1])) + self.assertListEqual(features_size, [17, 17]) + self.assertListEqual(pooled_kernel, [17, 17]) + + def testParseOperationStringWithConv1x1(self): + operation = self.segmentation_layer._parse_operation( + config={ + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: [1, 1], + }, + crop_size=[513, 513], output_stride=16) + self.assertEqual(operation[dense_prediction_cell._OP], + dense_prediction_cell._CONV) + self.assertListEqual(operation[dense_prediction_cell._KERNEL], [1, 1]) + + def testParseOperationStringWithConv3x3(self): + operation = self.segmentation_layer._parse_operation( + config={ + dense_prediction_cell._OP: dense_prediction_cell._CONV, + dense_prediction_cell._KERNEL: [3, 3], + dense_prediction_cell._RATE: [9, 6], + }, + crop_size=[513, 513], output_stride=16) + self.assertEqual(operation[dense_prediction_cell._OP], + dense_prediction_cell._CONV) + self.assertListEqual(operation[dense_prediction_cell._KERNEL], [3, 3]) + self.assertEqual(operation[dense_prediction_cell._RATE], [9, 6]) + + def testParseOperationStringWithPyramidPooling2x2(self): + operation = self.segmentation_layer._parse_operation( + config={ + dense_prediction_cell._OP: dense_prediction_cell._PYRAMID_POOLING, + dense_prediction_cell._GRID_SIZE: [2, 2], + }, + crop_size=[513, 513], + output_stride=16) + self.assertEqual(operation[dense_prediction_cell._OP], + dense_prediction_cell._PYRAMID_POOLING) + # The feature maps of size [33, 33] should be covered by 2x2 kernels with + # size [17, 17]. + self.assertListEqual( + operation[dense_prediction_cell._TARGET_SIZE], [33, 33]) + self.assertListEqual(operation[dense_prediction_cell._KERNEL], [17, 17]) + + def testBuildCell(self): + with self.test_session(graph=tf.Graph()) as sess: + features = tf.random_normal([2, 33, 33, 5]) + concat_logits = self.segmentation_layer.build_cell( + features, + output_stride=8, + crop_size=[257, 257]) + sess.run(tf.global_variables_initializer()) + concat_logits = sess.run(concat_logits) + self.assertTrue(concat_logits.any()) + + def testBuildCellWithImagePoolingCropSize(self): + with self.test_session(graph=tf.Graph()) as sess: + features = tf.random_normal([2, 33, 33, 5]) + concat_logits = self.segmentation_layer.build_cell( + features, + output_stride=8, + crop_size=[257, 257], + image_pooling_crop_size=[129, 129]) + sess.run(tf.global_variables_initializer()) + concat_logits = sess.run(concat_logits) + self.assertTrue(concat_logits.any()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/feature_extractor.py b/models/research/deeplab/core/feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..553bd9b6a7393dd7f3e0ebce80302919215a9bfe --- /dev/null +++ b/models/research/deeplab/core/feature_extractor.py @@ -0,0 +1,711 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Extracts features for different models.""" +import copy +import functools + +import tensorflow.compat.v1 as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import nas_network +from deeplab.core import resnet_v1_beta +from deeplab.core import xception +from nets.mobilenet import conv_blocks +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 +from nets.mobilenet import mobilenet_v3 + +slim = contrib_slim + +# Default end point for MobileNetv2 (one-based indexing). +_MOBILENET_V2_FINAL_ENDPOINT = 'layer_18' +# Default end point for MobileNetv3. +_MOBILENET_V3_LARGE_FINAL_ENDPOINT = 'layer_17' +_MOBILENET_V3_SMALL_FINAL_ENDPOINT = 'layer_13' +# Default end point for EdgeTPU Mobilenet. +_MOBILENET_EDGETPU = 'layer_24' + + +def _mobilenet_v2(net, + depth_multiplier, + output_stride, + conv_defs=None, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Auxiliary function to add support for 'reuse' to mobilenet_v2. + + Args: + net: Input tensor of shape [batch_size, height, width, channels]. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 8 (accurate fully convolutional + mode), 16 (fast fully convolutional mode), 32 (classification mode). + conv_defs: MobileNet con def. + divisible_by: None (use default setting) or an integer that ensures all + layers # channels will be divisible by this number. Used in MobileNet. + reuse: Reuse model variables. + scope: Optional variable scope. + final_endpoint: The endpoint to construct the network up to. + + Returns: + Features extracted by MobileNetv2. + """ + if divisible_by is None: + divisible_by = 8 if depth_multiplier == 1.0 else 1 + if conv_defs is None: + conv_defs = mobilenet_v2.V2_DEF + with tf.variable_scope( + scope, 'MobilenetV2', [net], reuse=reuse) as scope: + return mobilenet_v2.mobilenet_base( + net, + conv_defs=conv_defs, + depth_multiplier=depth_multiplier, + min_depth=8 if depth_multiplier == 1.0 else 1, + divisible_by=divisible_by, + final_endpoint=final_endpoint or _MOBILENET_V2_FINAL_ENDPOINT, + output_stride=output_stride, + scope=scope) + + +def _mobilenet_v3(net, + depth_multiplier, + output_stride, + conv_defs=None, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Auxiliary function to build mobilenet v3. + + Args: + net: Input tensor of shape [batch_size, height, width, channels]. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 8 (accurate fully convolutional + mode), 16 (fast fully convolutional mode), 32 (classification mode). + conv_defs: A list of ConvDef namedtuples specifying the net architecture. + divisible_by: None (use default setting) or an integer that ensures all + layers # channels will be divisible by this number. Used in MobileNet. + reuse: Reuse model variables. + scope: Optional variable scope. + final_endpoint: The endpoint to construct the network up to. + + Returns: + net: The output tensor. + end_points: A set of activations for external use. + + Raises: + ValueError: If conv_defs or final_endpoint is not specified. + """ + del divisible_by + with tf.variable_scope( + scope, 'MobilenetV3', [net], reuse=reuse) as scope: + if conv_defs is None: + raise ValueError('conv_defs must be specified for mobilenet v3.') + if final_endpoint is None: + raise ValueError('Final endpoint must be specified for mobilenet v3.') + net, end_points = mobilenet_v3.mobilenet_base( + net, + depth_multiplier=depth_multiplier, + conv_defs=conv_defs, + output_stride=output_stride, + final_endpoint=final_endpoint, + scope=scope) + + return net, end_points + + +def mobilenet_v3_large_seg(net, + depth_multiplier, + output_stride, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Final mobilenet v3 large model for segmentation task.""" + del divisible_by + del final_endpoint + conv_defs = copy.deepcopy(mobilenet_v3.V3_LARGE) + + # Reduce the filters by a factor of 2 in the last block. + for layer, expansion in [(13, 336), (14, 480), (15, 480), (16, None)]: + conv_defs['spec'][layer].params['num_outputs'] /= 2 + # Update expansion size + if expansion is not None: + factor = expansion / conv_defs['spec'][layer - 1].params['num_outputs'] + conv_defs['spec'][layer].params[ + 'expansion_size'] = mobilenet_v3.expand_input(factor) + + return _mobilenet_v3( + net, + depth_multiplier=depth_multiplier, + output_stride=output_stride, + divisible_by=8, + conv_defs=conv_defs, + reuse=reuse, + scope=scope, + final_endpoint=_MOBILENET_V3_LARGE_FINAL_ENDPOINT) + + +def mobilenet_edgetpu(net, + depth_multiplier, + output_stride, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """EdgeTPU version of mobilenet model for segmentation task.""" + del divisible_by + del final_endpoint + conv_defs = copy.deepcopy(mobilenet_v3.V3_EDGETPU) + + return _mobilenet_v3( + net, + depth_multiplier=depth_multiplier, + output_stride=output_stride, + divisible_by=8, + conv_defs=conv_defs, + reuse=reuse, + scope=scope, # the scope is 'MobilenetEdgeTPU' + final_endpoint=_MOBILENET_EDGETPU) + + +def mobilenet_v3_small_seg(net, + depth_multiplier, + output_stride, + divisible_by=None, + reuse=None, + scope=None, + final_endpoint=None): + """Final mobilenet v3 small model for segmentation task.""" + del divisible_by + del final_endpoint + conv_defs = copy.deepcopy(mobilenet_v3.V3_SMALL) + + # Reduce the filters by a factor of 2 in the last block. + for layer, expansion in [(9, 144), (10, 288), (11, 288), (12, None)]: + conv_defs['spec'][layer].params['num_outputs'] /= 2 + # Update expansion size + if expansion is not None: + factor = expansion / conv_defs['spec'][layer - 1].params['num_outputs'] + conv_defs['spec'][layer].params[ + 'expansion_size'] = mobilenet_v3.expand_input(factor) + + return _mobilenet_v3( + net, + depth_multiplier=depth_multiplier, + output_stride=output_stride, + divisible_by=8, + conv_defs=conv_defs, + reuse=reuse, + scope=scope, + final_endpoint=_MOBILENET_V3_SMALL_FINAL_ENDPOINT) + + +# A map from network name to network function. +networks_map = { + 'mobilenet_v2': _mobilenet_v2, + 'mobilenet_edgetpu': mobilenet_edgetpu, + 'mobilenet_v3_large_seg': mobilenet_v3_large_seg, + 'mobilenet_v3_small_seg': mobilenet_v3_small_seg, + 'resnet_v1_18': resnet_v1_beta.resnet_v1_18, + 'resnet_v1_18_beta': resnet_v1_beta.resnet_v1_18_beta, + 'resnet_v1_50': resnet_v1_beta.resnet_v1_50, + 'resnet_v1_50_beta': resnet_v1_beta.resnet_v1_50_beta, + 'resnet_v1_101': resnet_v1_beta.resnet_v1_101, + 'resnet_v1_101_beta': resnet_v1_beta.resnet_v1_101_beta, + 'xception_41': xception.xception_41, + 'xception_65': xception.xception_65, + 'xception_71': xception.xception_71, + 'nas_pnasnet': nas_network.pnasnet, + 'nas_hnasnet': nas_network.hnasnet, +} + + +def mobilenet_v2_arg_scope(is_training=True, + weight_decay=0.00004, + stddev=0.09, + activation=tf.nn.relu6, + bn_decay=0.997, + bn_epsilon=None, + bn_renorm=None): + """Defines the default MobilenetV2 arg scope. + + Args: + is_training: Whether or not we're training the model. If this is set to None + is_training parameter in batch_norm is not set. Please note that this also + sets the is_training parameter in dropout to None. + weight_decay: The weight decay to use for regularizing the model. + stddev: Standard deviation for initialization, if negative uses xavier. + activation: If True, a modified activation is used (initialized ~ReLU6). + bn_decay: decay for the batch norm moving averages. + bn_epsilon: batch normalization epsilon. + bn_renorm: whether to use batchnorm renormalization + + Returns: + An `arg_scope` to use for the mobilenet v1 model. + """ + batch_norm_params = { + 'center': True, + 'scale': True, + 'decay': bn_decay, + } + if bn_epsilon is not None: + batch_norm_params['epsilon'] = bn_epsilon + if is_training is not None: + batch_norm_params['is_training'] = is_training + if bn_renorm is not None: + batch_norm_params['renorm'] = bn_renorm + dropout_params = {} + if is_training is not None: + dropout_params['is_training'] = is_training + + instance_norm_params = { + 'center': True, + 'scale': True, + 'epsilon': 0.001, + } + + if stddev < 0: + weight_intitializer = slim.initializers.xavier_initializer() + else: + weight_intitializer = tf.truncated_normal_initializer(stddev=stddev) + + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope( + [slim.conv2d, slim.fully_connected, slim.separable_conv2d], + weights_initializer=weight_intitializer, + activation_fn=activation, + normalizer_fn=slim.batch_norm), \ + slim.arg_scope( + [conv_blocks.expanded_conv], normalizer_fn=slim.batch_norm), \ + slim.arg_scope([mobilenet.apply_activation], activation_fn=activation),\ + slim.arg_scope([slim.batch_norm], **batch_norm_params), \ + slim.arg_scope([mobilenet.mobilenet_base, mobilenet.mobilenet], + is_training=is_training),\ + slim.arg_scope([slim.dropout], **dropout_params), \ + slim.arg_scope([slim.instance_norm], **instance_norm_params), \ + slim.arg_scope([slim.conv2d], \ + weights_regularizer=slim.l2_regularizer(weight_decay)), \ + slim.arg_scope([slim.separable_conv2d], weights_regularizer=None), \ + slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME') as s: + return s + + +# A map from network name to network arg scope. +arg_scopes_map = { + 'mobilenet_v2': mobilenet_v2.training_scope, + 'mobilenet_edgetpu': mobilenet_v2_arg_scope, + 'mobilenet_v3_large_seg': mobilenet_v2_arg_scope, + 'mobilenet_v3_small_seg': mobilenet_v2_arg_scope, + 'resnet_v1_18': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_18_beta': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_50': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_50_beta': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_101': resnet_v1_beta.resnet_arg_scope, + 'resnet_v1_101_beta': resnet_v1_beta.resnet_arg_scope, + 'xception_41': xception.xception_arg_scope, + 'xception_65': xception.xception_arg_scope, + 'xception_71': xception.xception_arg_scope, + 'nas_pnasnet': nas_network.nas_arg_scope, + 'nas_hnasnet': nas_network.nas_arg_scope, +} + +# Names for end point features. +DECODER_END_POINTS = 'decoder_end_points' + +# A dictionary from network name to a map of end point features. +networks_to_feature_maps = { + 'mobilenet_v2': { + DECODER_END_POINTS: { + 4: ['layer_4/depthwise_output'], + 8: ['layer_7/depthwise_output'], + 16: ['layer_14/depthwise_output'], + }, + }, + 'mobilenet_v3_large_seg': { + DECODER_END_POINTS: { + 4: ['layer_4/depthwise_output'], + 8: ['layer_7/depthwise_output'], + 16: ['layer_13/depthwise_output'], + }, + }, + 'mobilenet_v3_small_seg': { + DECODER_END_POINTS: { + 4: ['layer_2/depthwise_output'], + 8: ['layer_4/depthwise_output'], + 16: ['layer_9/depthwise_output'], + }, + }, + 'resnet_v1_18': { + DECODER_END_POINTS: { + 4: ['block1/unit_1/lite_bottleneck_v1/conv2'], + 8: ['block2/unit_1/lite_bottleneck_v1/conv2'], + 16: ['block3/unit_1/lite_bottleneck_v1/conv2'], + }, + }, + 'resnet_v1_18_beta': { + DECODER_END_POINTS: { + 4: ['block1/unit_1/lite_bottleneck_v1/conv2'], + 8: ['block2/unit_1/lite_bottleneck_v1/conv2'], + 16: ['block3/unit_1/lite_bottleneck_v1/conv2'], + }, + }, + 'resnet_v1_50': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_5/bottleneck_v1/conv3'], + }, + }, + 'resnet_v1_50_beta': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_5/bottleneck_v1/conv3'], + }, + }, + 'resnet_v1_101': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_22/bottleneck_v1/conv3'], + }, + }, + 'resnet_v1_101_beta': { + DECODER_END_POINTS: { + 4: ['block1/unit_2/bottleneck_v1/conv3'], + 8: ['block2/unit_3/bottleneck_v1/conv3'], + 16: ['block3/unit_22/bottleneck_v1/conv3'], + }, + }, + 'xception_41': { + DECODER_END_POINTS: { + 4: ['entry_flow/block2/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 8: ['entry_flow/block3/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 16: ['exit_flow/block1/unit_1/xception_module/' + 'separable_conv2_pointwise'], + }, + }, + 'xception_65': { + DECODER_END_POINTS: { + 4: ['entry_flow/block2/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 8: ['entry_flow/block3/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 16: ['exit_flow/block1/unit_1/xception_module/' + 'separable_conv2_pointwise'], + }, + }, + 'xception_71': { + DECODER_END_POINTS: { + 4: ['entry_flow/block3/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 8: ['entry_flow/block5/unit_1/xception_module/' + 'separable_conv2_pointwise'], + 16: ['exit_flow/block1/unit_1/xception_module/' + 'separable_conv2_pointwise'], + }, + }, + 'nas_pnasnet': { + DECODER_END_POINTS: { + 4: ['Stem'], + 8: ['Cell_3'], + 16: ['Cell_7'], + }, + }, + 'nas_hnasnet': { + DECODER_END_POINTS: { + 4: ['Cell_2'], + 8: ['Cell_5'], + 16: ['Cell_7'], + }, + }, +} + +# A map from feature extractor name to the network name scope used in the +# ImageNet pretrained versions of these models. +name_scope = { + 'mobilenet_v2': 'MobilenetV2', + 'mobilenet_edgetpu': 'MobilenetEdgeTPU', + 'mobilenet_v3_large_seg': 'MobilenetV3', + 'mobilenet_v3_small_seg': 'MobilenetV3', + 'resnet_v1_18': 'resnet_v1_18', + 'resnet_v1_18_beta': 'resnet_v1_18', + 'resnet_v1_50': 'resnet_v1_50', + 'resnet_v1_50_beta': 'resnet_v1_50', + 'resnet_v1_101': 'resnet_v1_101', + 'resnet_v1_101_beta': 'resnet_v1_101', + 'xception_41': 'xception_41', + 'xception_65': 'xception_65', + 'xception_71': 'xception_71', + 'nas_pnasnet': 'pnasnet', + 'nas_hnasnet': 'hnasnet', +} + +# Mean pixel value. +_MEAN_RGB = [123.15, 115.90, 103.06] + + +def _preprocess_subtract_imagenet_mean(inputs, dtype=tf.float32): + """Subtract Imagenet mean RGB value.""" + mean_rgb = tf.reshape(_MEAN_RGB, [1, 1, 1, 3]) + num_channels = tf.shape(inputs)[-1] + # We set mean pixel as 0 for the non-RGB channels. + mean_rgb_extended = tf.concat( + [mean_rgb, tf.zeros([1, 1, 1, num_channels - 3])], axis=3) + return tf.cast(inputs - mean_rgb_extended, dtype=dtype) + + +def _preprocess_zero_mean_unit_range(inputs, dtype=tf.float32): + """Map image values from [0, 255] to [-1, 1].""" + preprocessed_inputs = (2.0 / 255.0) * tf.to_float(inputs) - 1.0 + return tf.cast(preprocessed_inputs, dtype=dtype) + + +_PREPROCESS_FN = { + 'mobilenet_v2': _preprocess_zero_mean_unit_range, + 'mobilenet_edgetpu': _preprocess_zero_mean_unit_range, + 'mobilenet_v3_large_seg': _preprocess_zero_mean_unit_range, + 'mobilenet_v3_small_seg': _preprocess_zero_mean_unit_range, + 'resnet_v1_18': _preprocess_subtract_imagenet_mean, + 'resnet_v1_18_beta': _preprocess_zero_mean_unit_range, + 'resnet_v1_50': _preprocess_subtract_imagenet_mean, + 'resnet_v1_50_beta': _preprocess_zero_mean_unit_range, + 'resnet_v1_101': _preprocess_subtract_imagenet_mean, + 'resnet_v1_101_beta': _preprocess_zero_mean_unit_range, + 'xception_41': _preprocess_zero_mean_unit_range, + 'xception_65': _preprocess_zero_mean_unit_range, + 'xception_71': _preprocess_zero_mean_unit_range, + 'nas_pnasnet': _preprocess_zero_mean_unit_range, + 'nas_hnasnet': _preprocess_zero_mean_unit_range, +} + + +def mean_pixel(model_variant=None): + """Gets mean pixel value. + + This function returns different mean pixel value, depending on the input + model_variant which adopts different preprocessing functions. We currently + handle the following preprocessing functions: + (1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value. + (2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5]. + The return values are used in a way that the padded regions after + pre-processing will contain value 0. + + Args: + model_variant: Model variant (string) for feature extraction. For + backwards compatibility, model_variant=None returns _MEAN_RGB. + + Returns: + Mean pixel value. + """ + if model_variant in ['resnet_v1_50', + 'resnet_v1_101'] or model_variant is None: + return _MEAN_RGB + else: + return [127.5, 127.5, 127.5] + + +def extract_features(images, + output_stride=8, + multi_grid=None, + depth_multiplier=1.0, + divisible_by=None, + final_endpoint=None, + model_variant=None, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + regularize_depthwise=False, + preprocess_images=True, + preprocessed_images_dtype=tf.float32, + num_classes=None, + global_pool=False, + nas_architecture_options=None, + nas_training_hyper_parameters=None, + use_bounded_activation=False): + """Extracts features by the particular model_variant. + + Args: + images: A tensor of size [batch, height, width, channels]. + output_stride: The ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops used in MobileNet. + divisible_by: None (use default setting) or an integer that ensures all + layers # channels will be divisible by this number. Used in MobileNet. + final_endpoint: The MobileNet endpoint to construct the network up to. + model_variant: Model variant for feature extraction. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + preprocess_images: Performs preprocessing on images or not. Defaults to + True. Set to False if preprocessing will be done by other functions. We + supprot two types of preprocessing: (1) Mean pixel substraction and (2) + Pixel values normalization to be [-1, 1]. + preprocessed_images_dtype: The type after the preprocessing function. + num_classes: Number of classes for image classification task. Defaults + to None for dense prediction tasks. + global_pool: Global pooling for image classification task. Defaults to + False, since dense prediction tasks do not use this. + nas_architecture_options: A dictionary storing NAS architecture options. + It is either None or its kerys are: + - `nas_stem_output_num_conv_filters`: Number of filters of the NAS stem + output tensor. + - `nas_use_classification_head`: Boolean, use image classification head. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. It is either None or its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. Currently, + bounded activation is only used in xception model. + + Returns: + features: A tensor of size [batch, feature_height, feature_width, + feature_channels], where feature_height/feature_width are determined + by the images height/width and output_stride. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: Unrecognized model variant. + """ + if 'resnet' in model_variant: + arg_scope = arg_scopes_map[model_variant]( + weight_decay=weight_decay, + batch_norm_decay=0.95, + batch_norm_epsilon=1e-5, + batch_norm_scale=True) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + num_classes=num_classes, + is_training=(is_training and fine_tune_batch_norm), + global_pool=global_pool, + output_stride=output_stride, + multi_grid=multi_grid, + reuse=reuse, + scope=name_scope[model_variant]) + elif 'xception' in model_variant: + arg_scope = arg_scopes_map[model_variant]( + weight_decay=weight_decay, + batch_norm_decay=0.9997, + batch_norm_epsilon=1e-3, + batch_norm_scale=True, + regularize_depthwise=regularize_depthwise, + use_bounded_activation=use_bounded_activation) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + num_classes=num_classes, + is_training=(is_training and fine_tune_batch_norm), + global_pool=global_pool, + output_stride=output_stride, + regularize_depthwise=regularize_depthwise, + multi_grid=multi_grid, + reuse=reuse, + scope=name_scope[model_variant]) + elif 'mobilenet' in model_variant or model_variant.startswith('mnas'): + arg_scope = arg_scopes_map[model_variant]( + is_training=(is_training and fine_tune_batch_norm), + weight_decay=weight_decay) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + depth_multiplier=depth_multiplier, + divisible_by=divisible_by, + output_stride=output_stride, + reuse=reuse, + scope=name_scope[model_variant], + final_endpoint=final_endpoint) + elif model_variant.startswith('nas'): + arg_scope = arg_scopes_map[model_variant]( + weight_decay=weight_decay, + batch_norm_decay=0.9997, + batch_norm_epsilon=1e-3) + features, end_points = get_network( + model_variant, preprocess_images, preprocessed_images_dtype, arg_scope)( + inputs=images, + num_classes=num_classes, + is_training=(is_training and fine_tune_batch_norm), + global_pool=global_pool, + output_stride=output_stride, + nas_architecture_options=nas_architecture_options, + nas_training_hyper_parameters=nas_training_hyper_parameters, + reuse=reuse, + scope=name_scope[model_variant]) + else: + raise ValueError('Unknown model variant %s.' % model_variant) + + return features, end_points + + +def get_network(network_name, preprocess_images, + preprocessed_images_dtype=tf.float32, arg_scope=None): + """Gets the network. + + Args: + network_name: Network name. + preprocess_images: Preprocesses the images or not. + preprocessed_images_dtype: The type after the preprocessing function. + arg_scope: Optional, arg_scope to build the network. If not provided the + default arg_scope of the network would be used. + + Returns: + A network function that is used to extract features. + + Raises: + ValueError: network is not supported. + """ + if network_name not in networks_map: + raise ValueError('Unsupported network %s.' % network_name) + arg_scope = arg_scope or arg_scopes_map[network_name]() + def _identity_function(inputs, dtype=preprocessed_images_dtype): + return tf.cast(inputs, dtype=dtype) + if preprocess_images: + preprocess_function = _PREPROCESS_FN[network_name] + else: + preprocess_function = _identity_function + func = networks_map[network_name] + @functools.wraps(func) + def network_fn(inputs, *args, **kwargs): + with slim.arg_scope(arg_scope): + return func(preprocess_function(inputs, preprocessed_images_dtype), + *args, **kwargs) + return network_fn diff --git a/models/research/deeplab/core/nas_cell.py b/models/research/deeplab/core/nas_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..d179082dc72b6692e96289ef9ed6964165023c33 --- /dev/null +++ b/models/research/deeplab/core/nas_cell.py @@ -0,0 +1,221 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Cell structure used by NAS.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +from six.moves import range +from six.moves import zip +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import xception as xception_utils +from deeplab.core.utils import resize_bilinear +from deeplab.core.utils import scale_dimension +from tensorflow.contrib.slim.nets import resnet_utils + +arg_scope = contrib_framework.arg_scope +slim = contrib_slim + +separable_conv2d_same = functools.partial(xception_utils.separable_conv2d_same, + regularize_depthwise=True) + + +class NASBaseCell(object): + """NASNet Cell class that is used as a 'layer' in image architectures.""" + + def __init__(self, num_conv_filters, operations, used_hiddenstates, + hiddenstate_indices, drop_path_keep_prob, total_num_cells, + total_training_steps, batch_norm_fn=slim.batch_norm): + """Init function. + + For more details about NAS cell, see + https://arxiv.org/abs/1707.07012 and https://arxiv.org/abs/1712.00559. + + Args: + num_conv_filters: The number of filters for each convolution operation. + operations: List of operations that are performed in the NASNet Cell in + order. + used_hiddenstates: Binary array that signals if the hiddenstate was used + within the cell. This is used to determine what outputs of the cell + should be concatenated together. + hiddenstate_indices: Determines what hiddenstates should be combined + together with the specified operations to create the NASNet cell. + drop_path_keep_prob: Float, drop path keep probability. + total_num_cells: Integer, total number of cells. + total_training_steps: Integer, total training steps. + batch_norm_fn: Function, batch norm function. Defaults to + slim.batch_norm. + """ + if len(hiddenstate_indices) != len(operations): + raise ValueError( + 'Number of hiddenstate_indices and operations should be the same.') + if len(operations) % 2: + raise ValueError('Number of operations should be even.') + self._num_conv_filters = num_conv_filters + self._operations = operations + self._used_hiddenstates = used_hiddenstates + self._hiddenstate_indices = hiddenstate_indices + self._drop_path_keep_prob = drop_path_keep_prob + self._total_num_cells = total_num_cells + self._total_training_steps = total_training_steps + self._batch_norm_fn = batch_norm_fn + + def __call__(self, net, scope, filter_scaling, stride, prev_layer, cell_num): + """Runs the conv cell.""" + self._cell_num = cell_num + self._filter_scaling = filter_scaling + self._filter_size = int(self._num_conv_filters * filter_scaling) + + with tf.variable_scope(scope): + net = self._cell_base(net, prev_layer) + for i in range(len(self._operations) // 2): + with tf.variable_scope('comb_iter_{}'.format(i)): + h1 = net[self._hiddenstate_indices[i * 2]] + h2 = net[self._hiddenstate_indices[i * 2 + 1]] + with tf.variable_scope('left'): + h1 = self._apply_conv_operation( + h1, self._operations[i * 2], stride, + self._hiddenstate_indices[i * 2] < 2) + with tf.variable_scope('right'): + h2 = self._apply_conv_operation( + h2, self._operations[i * 2 + 1], stride, + self._hiddenstate_indices[i * 2 + 1] < 2) + with tf.variable_scope('combine'): + h = h1 + h2 + net.append(h) + + with tf.variable_scope('cell_output'): + net = self._combine_unused_states(net) + + return net + + def _cell_base(self, net, prev_layer): + """Runs the beginning of the conv cell before the chosen ops are run.""" + filter_size = self._filter_size + + if prev_layer is None: + prev_layer = net + else: + if net.shape[2] != prev_layer.shape[2]: + prev_layer = resize_bilinear( + prev_layer, tf.shape(net)[1:3], prev_layer.dtype) + if filter_size != prev_layer.shape[3]: + prev_layer = tf.nn.relu(prev_layer) + prev_layer = slim.conv2d(prev_layer, filter_size, 1, scope='prev_1x1') + prev_layer = self._batch_norm_fn(prev_layer, scope='prev_bn') + + net = tf.nn.relu(net) + net = slim.conv2d(net, filter_size, 1, scope='1x1') + net = self._batch_norm_fn(net, scope='beginning_bn') + net = tf.split(axis=3, num_or_size_splits=1, value=net) + net.append(prev_layer) + return net + + def _apply_conv_operation(self, net, operation, stride, + is_from_original_input): + """Applies the predicted conv operation to net.""" + if stride > 1 and not is_from_original_input: + stride = 1 + input_filters = net.shape[3] + filter_size = self._filter_size + if 'separable' in operation: + num_layers = int(operation.split('_')[-1]) + kernel_size = int(operation.split('x')[0][-1]) + for layer_num in range(num_layers): + net = tf.nn.relu(net) + net = separable_conv2d_same( + net, + filter_size, + kernel_size, + depth_multiplier=1, + scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1), + stride=stride) + net = self._batch_norm_fn( + net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1)) + stride = 1 + elif 'atrous' in operation: + kernel_size = int(operation.split('x')[0][-1]) + net = tf.nn.relu(net) + if stride == 2: + scaled_height = scale_dimension(tf.shape(net)[1], 0.5) + scaled_width = scale_dimension(tf.shape(net)[2], 0.5) + net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype) + net = resnet_utils.conv2d_same( + net, filter_size, kernel_size, rate=1, stride=1, + scope='atrous_{0}x{0}'.format(kernel_size)) + else: + net = resnet_utils.conv2d_same( + net, filter_size, kernel_size, rate=2, stride=1, + scope='atrous_{0}x{0}'.format(kernel_size)) + net = self._batch_norm_fn(net, scope='bn_atr_{0}x{0}'.format(kernel_size)) + elif operation in ['none']: + if stride > 1 or (input_filters != filter_size): + net = tf.nn.relu(net) + net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1') + net = self._batch_norm_fn(net, scope='bn_1') + elif 'pool' in operation: + pooling_type = operation.split('_')[0] + pooling_shape = int(operation.split('_')[-1].split('x')[0]) + if pooling_type == 'avg': + net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding='SAME') + elif pooling_type == 'max': + net = slim.max_pool2d(net, pooling_shape, stride=stride, padding='SAME') + else: + raise ValueError('Unimplemented pooling type: ', pooling_type) + if input_filters != filter_size: + net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1') + net = self._batch_norm_fn(net, scope='bn_1') + else: + raise ValueError('Unimplemented operation', operation) + + if operation != 'none': + net = self._apply_drop_path(net) + return net + + def _combine_unused_states(self, net): + """Concatenates the unused hidden states of the cell.""" + used_hiddenstates = self._used_hiddenstates + states_to_combine = ([ + h for h, is_used in zip(net, used_hiddenstates) if not is_used]) + net = tf.concat(values=states_to_combine, axis=3) + return net + + @contrib_framework.add_arg_scope + def _apply_drop_path(self, net): + """Apply drop_path regularization.""" + drop_path_keep_prob = self._drop_path_keep_prob + if drop_path_keep_prob < 1.0: + # Scale keep prob by layer number. + assert self._cell_num != -1 + layer_ratio = (self._cell_num + 1) / float(self._total_num_cells) + drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob) + # Decrease keep prob over time. + current_step = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + current_ratio = tf.minimum(1.0, current_step / self._total_training_steps) + drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob)) + # Drop path. + noise_shape = [tf.shape(net)[0], 1, 1, 1] + random_tensor = drop_path_keep_prob + random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32) + binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype) + keep_prob_inv = tf.cast(1.0 / drop_path_keep_prob, net.dtype) + net = net * keep_prob_inv * binary_tensor + return net diff --git a/models/research/deeplab/core/nas_genotypes.py b/models/research/deeplab/core/nas_genotypes.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e6dd55b450658e10acaa420a6cc31635817a8a --- /dev/null +++ b/models/research/deeplab/core/nas_genotypes.py @@ -0,0 +1,45 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Genotypes used by NAS.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import nas_cell + +slim = contrib_slim + + +class PNASCell(nas_cell.NASBaseCell): + """Configuration and construction of the PNASNet-5 Cell.""" + + def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells, + total_training_steps, batch_norm_fn=slim.batch_norm): + # Name of operations: op_kernel-size_num-layers. + operations = [ + 'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3', + 'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3', + 'separable_3x3_2', 'none' + ] + used_hiddenstates = [1, 1, 0, 0, 0, 0, 0] + hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0] + + super(PNASCell, self).__init__( + num_conv_filters, operations, used_hiddenstates, hiddenstate_indices, + drop_path_keep_prob, total_num_cells, total_training_steps, + batch_norm_fn) diff --git a/models/research/deeplab/core/nas_network.py b/models/research/deeplab/core/nas_network.py new file mode 100644 index 0000000000000000000000000000000000000000..1da2e04dbaa5cfcb7db6f21266daf846000481fd --- /dev/null +++ b/models/research/deeplab/core/nas_network.py @@ -0,0 +1,368 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Network structure used by NAS. + +Here we provide a few NAS backbones for semantic segmentation. +Currently, we have + +1. pnasnet +"Progressive Neural Architecture Search", Chenxi Liu, Barret Zoph, +Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, +Alan Yuille, Jonathan Huang, Kevin Murphy. In ECCV, 2018. + +2. hnasnet (also called Auto-DeepLab) +"Auto-DeepLab: Hierarchical Neural Architecture Search for Semantic +Image Segmentation", Chenxi Liu, Liang-Chieh Chen, Florian Schroff, +Hartwig Adam, Wei Hua, Alan Yuille, Li Fei-Fei. In CVPR, 2019. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers +from tensorflow.contrib import slim as contrib_slim +from tensorflow.contrib import training as contrib_training + +from deeplab.core import nas_genotypes +from deeplab.core import utils +from deeplab.core.nas_cell import NASBaseCell +from tensorflow.contrib.slim.nets import resnet_utils + +arg_scope = contrib_framework.arg_scope +slim = contrib_slim +resize_bilinear = utils.resize_bilinear +scale_dimension = utils.scale_dimension + + +def config(num_conv_filters=20, + total_training_steps=500000, + drop_path_keep_prob=1.0): + return contrib_training.HParams( + # Multiplier when spatial size is reduced by 2. + filter_scaling_rate=2.0, + # Number of filters of the stem output tensor. + num_conv_filters=num_conv_filters, + # Probability to keep each path in the cell when training. + drop_path_keep_prob=drop_path_keep_prob, + # Total training steps to help drop path probability calculation. + total_training_steps=total_training_steps, + ) + + +def nas_arg_scope(weight_decay=4e-5, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001, + sync_batch_norm_method='None'): + """Default arg scope for the NAS models.""" + batch_norm_params = { + # Decay for the moving averages. + 'decay': batch_norm_decay, + # epsilon to prevent 0s in variance. + 'epsilon': batch_norm_epsilon, + 'scale': True, + } + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + weights_regularizer = contrib_layers.l2_regularizer(weight_decay) + weights_initializer = contrib_layers.variance_scaling_initializer( + factor=1 / 3.0, mode='FAN_IN', uniform=True) + with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d], + weights_regularizer=weights_regularizer, + weights_initializer=weights_initializer): + with arg_scope([slim.fully_connected], + activation_fn=None, scope='FC'): + with arg_scope([slim.conv2d, slim.separable_conv2d], + activation_fn=None, biases_initializer=None): + with arg_scope([batch_norm], **batch_norm_params) as sc: + return sc + + +def _nas_stem(inputs, + batch_norm_fn=slim.batch_norm): + """Stem used for NAS models.""" + net = resnet_utils.conv2d_same(inputs, 64, 3, stride=2, scope='conv0') + net = batch_norm_fn(net, scope='conv0_bn') + net = tf.nn.relu(net) + net = resnet_utils.conv2d_same(net, 64, 3, stride=1, scope='conv1') + net = batch_norm_fn(net, scope='conv1_bn') + cell_outputs = [net] + net = tf.nn.relu(net) + net = resnet_utils.conv2d_same(net, 128, 3, stride=2, scope='conv2') + net = batch_norm_fn(net, scope='conv2_bn') + cell_outputs.append(net) + return net, cell_outputs + + +def _build_nas_base(images, + cell, + backbone, + num_classes, + hparams, + global_pool=False, + output_stride=16, + nas_use_classification_head=False, + reuse=None, + scope=None, + final_endpoint=None, + batch_norm_fn=slim.batch_norm, + nas_remove_os32_stride=False): + """Constructs a NAS model. + + Args: + images: A tensor of size [batch, height, width, channels]. + cell: Cell structure used in the network. + backbone: Backbone structure used in the network. A list of integers in + which value 0 means "output_stride=4", value 1 means "output_stride=8", + value 2 means "output_stride=16", and value 3 means "output_stride=32". + num_classes: Number of classes to predict. + hparams: Hyperparameters needed to construct the network. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: Interger, the stride of output feature maps. + nas_use_classification_head: Boolean, use image classification head. + reuse: Whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + final_endpoint: The endpoint to construct the network up to. + batch_norm_fn: Batch norm function. + nas_remove_os32_stride: Boolean, remove stride in output_stride 32 branch. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If output_stride is not a multiple of backbone output stride. + """ + with tf.variable_scope(scope, 'nas', [images], reuse=reuse): + end_points = {} + def add_and_check_endpoint(endpoint_name, net): + end_points[endpoint_name] = net + return final_endpoint and (endpoint_name == final_endpoint) + + net, cell_outputs = _nas_stem(images, + batch_norm_fn=batch_norm_fn) + if add_and_check_endpoint('Stem', net): + return net, end_points + + # Run the cells + filter_scaling = 1.0 + for cell_num in range(len(backbone)): + stride = 1 + if cell_num == 0: + if backbone[0] == 1: + stride = 2 + filter_scaling *= hparams.filter_scaling_rate + else: + if backbone[cell_num] == backbone[cell_num - 1] + 1: + stride = 2 + if backbone[cell_num] == 3 and nas_remove_os32_stride: + stride = 1 + filter_scaling *= hparams.filter_scaling_rate + elif backbone[cell_num] == backbone[cell_num - 1] - 1: + if backbone[cell_num - 1] == 3 and nas_remove_os32_stride: + # No need to rescale features. + pass + else: + # Scale features by a factor of 2. + scaled_height = scale_dimension(net.shape[1].value, 2) + scaled_width = scale_dimension(net.shape[2].value, 2) + net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype) + filter_scaling /= hparams.filter_scaling_rate + net = cell( + net, + scope='cell_{}'.format(cell_num), + filter_scaling=filter_scaling, + stride=stride, + prev_layer=cell_outputs[-2], + cell_num=cell_num) + if add_and_check_endpoint('Cell_{}'.format(cell_num), net): + return net, end_points + cell_outputs.append(net) + net = tf.nn.relu(net) + + if nas_use_classification_head: + # Add image classification head. + # We will expand the filters for different output_strides. + output_stride_to_expanded_filters = {8: 256, 16: 512, 32: 1024} + current_output_scale = 2 + backbone[-1] + current_output_stride = 2 ** current_output_scale + if output_stride % current_output_stride != 0: + raise ValueError( + 'output_stride must be a multiple of backbone output stride.') + output_stride //= current_output_stride + rate = 1 + if current_output_stride != 32: + num_downsampling = 5 - current_output_scale + for i in range(num_downsampling): + # Gradually donwsample feature maps to output stride = 32. + target_output_stride = 2 ** (current_output_scale + 1 + i) + target_filters = output_stride_to_expanded_filters[ + target_output_stride] + scope = 'downsample_os{}'.format(target_output_stride) + if output_stride != 1: + stride = 2 + output_stride //= 2 + else: + stride = 1 + rate *= 2 + net = resnet_utils.conv2d_same( + net, target_filters, 3, stride=stride, rate=rate, + scope=scope + '_conv') + net = batch_norm_fn(net, scope=scope + '_bn') + add_and_check_endpoint(scope, net) + net = tf.nn.relu(net) + # Apply 1x1 convolution to expand dimension to 2048. + scope = 'classification_head' + net = slim.conv2d(net, 2048, 1, scope=scope + '_conv') + net = batch_norm_fn(net, scope=scope + '_bn') + add_and_check_endpoint(scope, net) + net = tf.nn.relu(net) + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True) + if num_classes is not None: + net = slim.conv2d(net, num_classes, 1, activation_fn=None, + normalizer_fn=None, scope='logits') + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +def pnasnet(images, + num_classes, + is_training=True, + global_pool=False, + output_stride=16, + nas_architecture_options=None, + nas_training_hyper_parameters=None, + reuse=None, + scope='pnasnet', + final_endpoint=None, + sync_batch_norm_method='None'): + """Builds PNASNet model.""" + if nas_architecture_options is None: + raise ValueError( + 'Using NAS model variants. nas_architecture_options cannot be None.') + hparams = config(num_conv_filters=nas_architecture_options[ + 'nas_stem_output_num_conv_filters']) + if nas_training_hyper_parameters: + hparams.set_hparam('drop_path_keep_prob', + nas_training_hyper_parameters['drop_path_keep_prob']) + hparams.set_hparam('total_training_steps', + nas_training_hyper_parameters['total_training_steps']) + if not is_training: + tf.logging.info('During inference, setting drop_path_keep_prob = 1.0.') + hparams.set_hparam('drop_path_keep_prob', 1.0) + tf.logging.info(hparams) + if output_stride == 8: + backbone = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + elif output_stride == 16: + backbone = [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2] + elif output_stride == 32: + backbone = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3] + else: + raise ValueError('Unsupported output_stride ', output_stride) + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + cell = nas_genotypes.PNASCell(hparams.num_conv_filters, + hparams.drop_path_keep_prob, + len(backbone), + hparams.total_training_steps, + batch_norm_fn=batch_norm) + with arg_scope([slim.dropout, batch_norm], is_training=is_training): + return _build_nas_base( + images, + cell=cell, + backbone=backbone, + num_classes=num_classes, + hparams=hparams, + global_pool=global_pool, + output_stride=output_stride, + nas_use_classification_head=nas_architecture_options[ + 'nas_use_classification_head'], + reuse=reuse, + scope=scope, + final_endpoint=final_endpoint, + batch_norm_fn=batch_norm, + nas_remove_os32_stride=nas_architecture_options[ + 'nas_remove_os32_stride']) + + +# pylint: disable=unused-argument +def hnasnet(images, + num_classes, + is_training=True, + global_pool=False, + output_stride=8, + nas_architecture_options=None, + nas_training_hyper_parameters=None, + reuse=None, + scope='hnasnet', + final_endpoint=None, + sync_batch_norm_method='None'): + """Builds hierarchical model.""" + if nas_architecture_options is None: + raise ValueError( + 'Using NAS model variants. nas_architecture_options cannot be None.') + hparams = config(num_conv_filters=nas_architecture_options[ + 'nas_stem_output_num_conv_filters']) + if nas_training_hyper_parameters: + hparams.set_hparam('drop_path_keep_prob', + nas_training_hyper_parameters['drop_path_keep_prob']) + hparams.set_hparam('total_training_steps', + nas_training_hyper_parameters['total_training_steps']) + if not is_training: + tf.logging.info('During inference, setting drop_path_keep_prob = 1.0.') + hparams.set_hparam('drop_path_keep_prob', 1.0) + tf.logging.info(hparams) + operations = [ + 'atrous_5x5', 'separable_3x3_2', 'separable_3x3_2', 'atrous_3x3', + 'separable_3x3_2', 'separable_3x3_2', 'separable_5x5_2', + 'separable_5x5_2', 'separable_5x5_2', 'atrous_5x5' + ] + used_hiddenstates = [1, 1, 0, 0, 0, 0, 0] + hiddenstate_indices = [1, 0, 1, 0, 3, 1, 4, 2, 3, 5] + backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1] + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + cell = NASBaseCell(hparams.num_conv_filters, + operations, + used_hiddenstates, + hiddenstate_indices, + hparams.drop_path_keep_prob, + len(backbone), + hparams.total_training_steps, + batch_norm_fn=batch_norm) + with arg_scope([slim.dropout, batch_norm], is_training=is_training): + return _build_nas_base( + images, + cell=cell, + backbone=backbone, + num_classes=num_classes, + hparams=hparams, + global_pool=global_pool, + output_stride=output_stride, + nas_use_classification_head=nas_architecture_options[ + 'nas_use_classification_head'], + reuse=reuse, + scope=scope, + final_endpoint=final_endpoint, + batch_norm_fn=batch_norm, + nas_remove_os32_stride=nas_architecture_options[ + 'nas_remove_os32_stride']) diff --git a/models/research/deeplab/core/nas_network_test.py b/models/research/deeplab/core/nas_network_test.py new file mode 100644 index 0000000000000000000000000000000000000000..18621b250ad7321f554b8d97449e19bde5ef4174 --- /dev/null +++ b/models/research/deeplab/core/nas_network_test.py @@ -0,0 +1,111 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for resnet_v1_beta module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import slim as contrib_slim +from tensorflow.contrib import training as contrib_training + +from deeplab.core import nas_genotypes +from deeplab.core import nas_network + +arg_scope = contrib_framework.arg_scope +slim = contrib_slim + + +def create_test_input(batch, height, width, channels): + """Creates test input tensor.""" + if None in [batch, height, width, channels]: + return tf.placeholder(tf.float32, (batch, height, width, channels)) + else: + return tf.to_float( + np.tile( + np.reshape( + np.reshape(np.arange(height), [height, 1]) + + np.reshape(np.arange(width), [1, width]), + [1, height, width, 1]), + [batch, 1, 1, channels])) + + +class NASNetworkTest(tf.test.TestCase): + """Tests with complete small NAS networks.""" + + def _pnasnet(self, + images, + backbone, + num_classes, + is_training=True, + output_stride=16, + final_endpoint=None): + """Build PNASNet model backbone.""" + hparams = contrib_training.HParams( + filter_scaling_rate=2.0, + num_conv_filters=10, + drop_path_keep_prob=1.0, + total_training_steps=200000, + ) + if not is_training: + hparams.set_hparam('drop_path_keep_prob', 1.0) + + cell = nas_genotypes.PNASCell(hparams.num_conv_filters, + hparams.drop_path_keep_prob, + len(backbone), + hparams.total_training_steps) + with arg_scope([slim.dropout, slim.batch_norm], is_training=is_training): + return nas_network._build_nas_base( + images, + cell=cell, + backbone=backbone, + num_classes=num_classes, + hparams=hparams, + reuse=tf.AUTO_REUSE, + scope='pnasnet_small', + final_endpoint=final_endpoint) + + def testFullyConvolutionalEndpointShapes(self): + num_classes = 10 + backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1] + inputs = create_test_input(None, 321, 321, 3) + with slim.arg_scope(nas_network.nas_arg_scope()): + _, end_points = self._pnasnet(inputs, backbone, num_classes) + endpoint_to_shape = { + 'Stem': [None, 81, 81, 128], + 'Cell_0': [None, 81, 81, 50], + 'Cell_1': [None, 81, 81, 50], + 'Cell_2': [None, 81, 81, 50], + 'Cell_3': [None, 41, 41, 100], + 'Cell_4': [None, 21, 21, 200], + 'Cell_5': [None, 41, 41, 100], + 'Cell_6': [None, 21, 21, 200], + 'Cell_7': [None, 21, 21, 200], + 'Cell_8': [None, 11, 11, 400], + 'Cell_9': [None, 11, 11, 400], + 'Cell_10': [None, 21, 21, 200], + 'Cell_11': [None, 41, 41, 100] + } + for endpoint, shape in endpoint_to_shape.items(): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/preprocess_utils.py b/models/research/deeplab/core/preprocess_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..440717e414d1a6f67b0947eb78830ff84baa812d --- /dev/null +++ b/models/research/deeplab/core/preprocess_utils.py @@ -0,0 +1,533 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions related to preprocessing inputs.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range +from six.moves import zip +import tensorflow as tf + + +def flip_dim(tensor_list, prob=0.5, dim=1): + """Randomly flips a dimension of the given tensor. + + The decision to randomly flip the `Tensors` is made together. In other words, + all or none of the images pass in are flipped. + + Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so + that we can control for the probability as well as ensure the same decision + is applied across the images. + + Args: + tensor_list: A list of `Tensors` with the same number of dimensions. + prob: The probability of a left-right flip. + dim: The dimension to flip, 0, 1, .. + + Returns: + outputs: A list of the possibly flipped `Tensors` as well as an indicator + `Tensor` at the end whose value is `True` if the inputs were flipped and + `False` otherwise. + + Raises: + ValueError: If dim is negative or greater than the dimension of a `Tensor`. + """ + random_value = tf.random_uniform([]) + + def flip(): + flipped = [] + for tensor in tensor_list: + if dim < 0 or dim >= len(tensor.get_shape().as_list()): + raise ValueError('dim must represent a valid dimension.') + flipped.append(tf.reverse_v2(tensor, [dim])) + return flipped + + is_flipped = tf.less_equal(random_value, prob) + outputs = tf.cond(is_flipped, flip, lambda: tensor_list) + if not isinstance(outputs, (list, tuple)): + outputs = [outputs] + outputs.append(is_flipped) + + return outputs + + +def _image_dimensions(image, rank): + """Returns the dimensions of an image tensor. + + Args: + image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. + rank: The expected rank of the image + + Returns: + A list of corresponding to the dimensions of the input image. Dimensions + that are statically known are python integers, otherwise they are integer + scalar tensors. + """ + if image.get_shape().is_fully_defined(): + return image.get_shape().as_list() + else: + static_shape = image.get_shape().with_rank(rank).as_list() + dynamic_shape = tf.unstack(tf.shape(image), rank) + return [ + s if s is not None else d for s, d in zip(static_shape, dynamic_shape) + ] + + +def get_label_resize_method(label): + """Returns the resize method of labels depending on label dtype. + + Args: + label: Groundtruth label tensor. + + Returns: + tf.image.ResizeMethod.BILINEAR, if label dtype is floating. + tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer. + + Raises: + ValueError: If label is neither floating nor integer. + """ + if label.dtype.is_floating: + return tf.image.ResizeMethod.BILINEAR + elif label.dtype.is_integer: + return tf.image.ResizeMethod.NEAREST_NEIGHBOR + else: + raise ValueError('Label type must be either floating or integer.') + + +def pad_to_bounding_box(image, offset_height, offset_width, target_height, + target_width, pad_value): + """Pads the given image with the given pad_value. + + Works like tf.image.pad_to_bounding_box, except it can pad the image + with any given arbitrary pad value and also handle images whose sizes are not + known during graph construction. + + Args: + image: 3-D tensor with shape [height, width, channels] + offset_height: Number of rows of zeros to add on top. + offset_width: Number of columns of zeros to add on the left. + target_height: Height of output image. + target_width: Width of output image. + pad_value: Value to pad the image tensor with. + + Returns: + 3-D tensor of shape [target_height, target_width, channels]. + + Raises: + ValueError: If the shape of image is incompatible with the offset_* or + target_* arguments. + """ + with tf.name_scope(None, 'pad_to_bounding_box', [image]): + image = tf.convert_to_tensor(image, name='image') + original_dtype = image.dtype + if original_dtype != tf.float32 and original_dtype != tf.float64: + # If image dtype is not float, we convert it to int32 to avoid overflow. + image = tf.cast(image, tf.int32) + image_rank_assert = tf.Assert( + tf.logical_or( + tf.equal(tf.rank(image), 3), + tf.equal(tf.rank(image), 4)), + ['Wrong image tensor rank.']) + with tf.control_dependencies([image_rank_assert]): + image -= pad_value + image_shape = image.get_shape() + is_batch = True + if image_shape.ndims == 3: + is_batch = False + image = tf.expand_dims(image, 0) + elif image_shape.ndims is None: + is_batch = False + image = tf.expand_dims(image, 0) + image.set_shape([None] * 4) + elif image.get_shape().ndims != 4: + raise ValueError('Input image must have either 3 or 4 dimensions.') + _, height, width, _ = _image_dimensions(image, rank=4) + target_width_assert = tf.Assert( + tf.greater_equal( + target_width, width), + ['target_width must be >= width']) + target_height_assert = tf.Assert( + tf.greater_equal(target_height, height), + ['target_height must be >= height']) + with tf.control_dependencies([target_width_assert]): + after_padding_width = target_width - offset_width - width + with tf.control_dependencies([target_height_assert]): + after_padding_height = target_height - offset_height - height + offset_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(after_padding_width, 0), + tf.greater_equal(after_padding_height, 0)), + ['target size not possible with the given target offsets']) + batch_params = tf.stack([0, 0]) + height_params = tf.stack([offset_height, after_padding_height]) + width_params = tf.stack([offset_width, after_padding_width]) + channel_params = tf.stack([0, 0]) + with tf.control_dependencies([offset_assert]): + paddings = tf.stack([batch_params, height_params, width_params, + channel_params]) + padded = tf.pad(image, paddings) + if not is_batch: + padded = tf.squeeze(padded, axis=[0]) + outputs = padded + pad_value + if outputs.dtype != original_dtype: + outputs = tf.cast(outputs, original_dtype) + return outputs + + +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: an image of shape [height, width, channels]. + offset_height: a scalar tensor indicating the height offset. + offset_width: a scalar tensor indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + The cropped (and resized) image. + + Raises: + ValueError: if `image` doesn't have rank of 3. + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + if len(image.get_shape().as_list()) != 3: + raise ValueError('input must have rank of 3') + original_channels = image.get_shape().as_list()[2] + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), + ['Rank of image must be equal to 3.']) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ['Crop size greater than the image size.']) + + offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + image = tf.reshape(image, cropped_shape) + image.set_shape([crop_height, crop_width, original_channels]) + return image + + +def random_crop(image_list, crop_height, crop_width): + """Crops the given list of images. + + The function applies the same crop to each image in the list. This can be + effectively applied when there are multiple image inputs of the same + dimension such as: + + image, depths, normals = random_crop([image, depths, normals], 120, 150) + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the new height. + crop_width: the new width. + + Returns: + the image_list with cropped images. + + Raises: + ValueError: if there are multiple image inputs provided with different size + or the images are smaller than the crop dimensions. + """ + if not image_list: + raise ValueError('Empty image_list.') + + # Compute the rank assertions. + rank_assertions = [] + for i in range(len(image_list)): + image_rank = tf.rank(image_list[i]) + rank_assert = tf.Assert( + tf.equal(image_rank, 3), + ['Wrong rank for tensor %s [expected] [actual]', + image_list[i].name, 3, image_rank]) + rank_assertions.append(rank_assert) + + with tf.control_dependencies([rank_assertions[0]]): + image_shape = tf.shape(image_list[0]) + image_height = image_shape[0] + image_width = image_shape[1] + crop_size_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(image_height, crop_height), + tf.greater_equal(image_width, crop_width)), + ['Crop size greater than the image size.']) + + asserts = [rank_assertions[0], crop_size_assert] + + for i in range(1, len(image_list)): + image = image_list[i] + asserts.append(rank_assertions[i]) + with tf.control_dependencies([rank_assertions[i]]): + shape = tf.shape(image) + height = shape[0] + width = shape[1] + + height_assert = tf.Assert( + tf.equal(height, image_height), + ['Wrong height for tensor %s [expected][actual]', + image.name, height, image_height]) + width_assert = tf.Assert( + tf.equal(width, image_width), + ['Wrong width for tensor %s [expected][actual]', + image.name, width, image_width]) + asserts.extend([height_assert, width_assert]) + + # Create a random bounding box. + # + # Use tf.random_uniform and not numpy.random.rand as doing the former would + # generate random numbers at graph eval time, unlike the latter which + # generates random numbers at graph definition time. + with tf.control_dependencies(asserts): + max_offset_height = tf.reshape(image_height - crop_height + 1, []) + max_offset_width = tf.reshape(image_width - crop_width + 1, []) + offset_height = tf.random_uniform( + [], maxval=max_offset_height, dtype=tf.int32) + offset_width = tf.random_uniform( + [], maxval=max_offset_width, dtype=tf.int32) + + return [_crop(image, offset_height, offset_width, + crop_height, crop_width) for image in image_list] + + +def get_random_scale(min_scale_factor, max_scale_factor, step_size): + """Gets a random scale value. + + Args: + min_scale_factor: Minimum scale value. + max_scale_factor: Maximum scale value. + step_size: The step size from minimum to maximum value. + + Returns: + A random scale value selected between minimum and maximum value. + + Raises: + ValueError: min_scale_factor has unexpected value. + """ + if min_scale_factor < 0 or min_scale_factor > max_scale_factor: + raise ValueError('Unexpected value of min_scale_factor.') + + if min_scale_factor == max_scale_factor: + return tf.cast(min_scale_factor, tf.float32) + + # When step_size = 0, we sample the value uniformly from [min, max). + if step_size == 0: + return tf.random_uniform([1], + minval=min_scale_factor, + maxval=max_scale_factor) + + # When step_size != 0, we randomly select one discrete value from [min, max]. + num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1) + scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps) + shuffled_scale_factors = tf.random_shuffle(scale_factors) + return shuffled_scale_factors[0] + + +def randomly_scale_image_and_label(image, label=None, scale=1.0): + """Randomly scales image and label. + + Args: + image: Image with shape [height, width, 3]. + label: Label with shape [height, width, 1]. + scale: The value to scale image and label. + + Returns: + Scaled image and label. + """ + # No random scaling if scale == 1. + if scale == 1.0: + return image, label + image_shape = tf.shape(image) + new_dim = tf.cast( + tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale, + tf.int32) + + # Need squeeze and expand_dims because image interpolation takes + # 4D tensors as input. + image = tf.squeeze(tf.image.resize_bilinear( + tf.expand_dims(image, 0), + new_dim, + align_corners=True), [0]) + if label is not None: + label = tf.image.resize( + label, + new_dim, + method=get_label_resize_method(label), + align_corners=True) + + return image, label + + +def resolve_shape(tensor, rank=None, scope=None): + """Fully resolves the shape of a Tensor. + + Use as much as possible the shape components already known during graph + creation and resolve the remaining ones during runtime. + + Args: + tensor: Input tensor whose shape we query. + rank: The rank of the tensor, provided that we know it. + scope: Optional name scope. + + Returns: + shape: The full shape of the tensor. + """ + with tf.name_scope(scope, 'resolve_shape', [tensor]): + if rank is not None: + shape = tensor.get_shape().with_rank(rank).as_list() + else: + shape = tensor.get_shape().as_list() + + if None in shape: + shape_dynamic = tf.shape(tensor) + for i in range(len(shape)): + if shape[i] is None: + shape[i] = shape_dynamic[i] + + return shape + + +def resize_to_range(image, + label=None, + min_size=None, + max_size=None, + factor=None, + keep_aspect_ratio=True, + align_corners=True, + label_layout_is_chw=False, + scope=None, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes image or label so their sides are within the provided range. + + The output size can be described by two cases: + 1. If the image can be rescaled so its minimum size is equal to min_size + without the other side exceeding max_size, then do so. + 2. Otherwise, resize so the largest side is equal to max_size. + + An integer in `range(factor)` is added to the computed sides so that the + final dimensions are multiples of `factor` plus one. + + Args: + image: A 3D tensor of shape [height, width, channels]. + label: (optional) A 3D tensor of shape [height, width, channels] (default) + or [channels, height, width] when label_layout_is_chw = True. + min_size: (scalar) desired size of the smaller image side. + max_size: (scalar) maximum allowed size of the larger image side. Note + that the output dimension is no larger than max_size and may be slightly + smaller than max_size when factor is not None. + factor: Make output size multiple of factor plus one. + keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input + will be resized while keeping the original aspect ratio. If False, the + input will be resized to [max_resize_value, max_resize_value] without + keeping the original aspect ratio. + align_corners: If True, exactly align all 4 corners of input and output. + label_layout_is_chw: If true, the label has shape [channel, height, width]. + We support this case because for some instance segmentation dataset, the + instance segmentation is saved as [num_instances, height, width]. + scope: Optional name scope. + method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR. + + Returns: + A 3-D tensor of shape [new_height, new_width, channels], where the image + has been resized (with the specified method) so that + min(new_height, new_width) == ceil(min_size) or + max(new_height, new_width) == ceil(max_size). + + Raises: + ValueError: If the image is not a 3D tensor. + """ + with tf.name_scope(scope, 'resize_to_range', [image]): + new_tensor_list = [] + min_size = tf.cast(min_size, tf.float32) + if max_size is not None: + max_size = tf.cast(max_size, tf.float32) + # Modify the max_size to be a multiple of factor plus 1 and make sure the + # max dimension after resizing is no larger than max_size. + if factor is not None: + max_size = (max_size - (max_size - 1) % factor) + + [orig_height, orig_width, _] = resolve_shape(image, rank=3) + orig_height = tf.cast(orig_height, tf.float32) + orig_width = tf.cast(orig_width, tf.float32) + orig_min_size = tf.minimum(orig_height, orig_width) + + # Calculate the larger of the possible sizes + large_scale_factor = min_size / orig_min_size + large_height = tf.cast(tf.floor(orig_height * large_scale_factor), tf.int32) + large_width = tf.cast(tf.floor(orig_width * large_scale_factor), tf.int32) + large_size = tf.stack([large_height, large_width]) + + new_size = large_size + if max_size is not None: + # Calculate the smaller of the possible sizes, use that if the larger + # is too big. + orig_max_size = tf.maximum(orig_height, orig_width) + small_scale_factor = max_size / orig_max_size + small_height = tf.cast( + tf.floor(orig_height * small_scale_factor), tf.int32) + small_width = tf.cast(tf.floor(orig_width * small_scale_factor), tf.int32) + small_size = tf.stack([small_height, small_width]) + new_size = tf.cond( + tf.cast(tf.reduce_max(large_size), tf.float32) > max_size, + lambda: small_size, + lambda: large_size) + # Ensure that both output sides are multiples of factor plus one. + if factor is not None: + new_size += (factor - (new_size - 1) % factor) % factor + if not keep_aspect_ratio: + # If not keep the aspect ratio, we resize everything to max_size, allowing + # us to do pre-processing without extra padding. + new_size = [tf.reduce_max(new_size), tf.reduce_max(new_size)] + new_tensor_list.append(tf.image.resize( + image, new_size, method=method, align_corners=align_corners)) + if label is not None: + if label_layout_is_chw: + # Input label has shape [channel, height, width]. + resized_label = tf.expand_dims(label, 3) + resized_label = tf.image.resize( + resized_label, + new_size, + method=get_label_resize_method(label), + align_corners=align_corners) + resized_label = tf.squeeze(resized_label, 3) + else: + # Input label has shape [height, width, channel]. + resized_label = tf.image.resize( + label, + new_size, + method=get_label_resize_method(label), + align_corners=align_corners) + new_tensor_list.append(resized_label) + else: + new_tensor_list.append(None) + return new_tensor_list diff --git a/models/research/deeplab/core/preprocess_utils_test.py b/models/research/deeplab/core/preprocess_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..606fe46dd62787cf1a8adfaa27121affc4a02498 --- /dev/null +++ b/models/research/deeplab/core/preprocess_utils_test.py @@ -0,0 +1,515 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for preprocess_utils.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import range +import tensorflow as tf + +from deeplab.core import preprocess_utils + + +class PreprocessUtilsTest(tf.test.TestCase): + + def testNoFlipWhenProbIsZero(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + image = tf.convert_to_tensor(numpy_image) + + with self.test_session(): + actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=0) + self.assertAllEqual(numpy_image, actual.eval()) + self.assertAllEqual(False, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=1) + self.assertAllEqual(numpy_image, actual.eval()) + self.assertAllEqual(False, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=2) + self.assertAllEqual(numpy_image, actual.eval()) + self.assertAllEqual(False, is_flipped.eval()) + + def testFlipWhenProbIsOne(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + dim0_flipped = np.dstack([[[9., 0.], + [5., 6.]], + [[3., 5.], + [4., 3.]]]) + dim1_flipped = np.dstack([[[6., 5.], + [0., 9.]], + [[3., 4.], + [5., 3.]]]) + dim2_flipped = np.dstack([[[4., 3.], + [3., 5.]], + [[5., 6.], + [9., 0.]]]) + image = tf.convert_to_tensor(numpy_image) + + with self.test_session(): + actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=0) + self.assertAllEqual(dim0_flipped, actual.eval()) + self.assertAllEqual(True, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=1) + self.assertAllEqual(dim1_flipped, actual.eval()) + self.assertAllEqual(True, is_flipped.eval()) + actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=2) + self.assertAllEqual(dim2_flipped, actual.eval()) + self.assertAllEqual(True, is_flipped.eval()) + + def testFlipMultipleImagesConsistentlyWhenProbIsOne(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + numpy_label = np.dstack([[[0., 1.], + [2., 3.]]]) + image_dim1_flipped = np.dstack([[[6., 5.], + [0., 9.]], + [[3., 4.], + [5., 3.]]]) + label_dim1_flipped = np.dstack([[[1., 0.], + [3., 2.]]]) + image = tf.convert_to_tensor(numpy_image) + label = tf.convert_to_tensor(numpy_label) + + with self.test_session() as sess: + image, label, is_flipped = preprocess_utils.flip_dim( + [image, label], prob=1, dim=1) + actual_image, actual_label = sess.run([image, label]) + self.assertAllEqual(image_dim1_flipped, actual_image) + self.assertAllEqual(label_dim1_flipped, actual_label) + self.assertEqual(True, is_flipped.eval()) + + def testReturnRandomFlipsOnMultipleEvals(self): + numpy_image = np.dstack([[[5., 6.], + [9., 0.]], + [[4., 3.], + [3., 5.]]]) + dim1_flipped = np.dstack([[[6., 5.], + [0., 9.]], + [[3., 4.], + [5., 3.]]]) + image = tf.convert_to_tensor(numpy_image) + tf.compat.v1.set_random_seed(53) + + with self.test_session() as sess: + actual, is_flipped = preprocess_utils.flip_dim( + [image], prob=0.5, dim=1) + actual_image, actual_is_flipped = sess.run([actual, is_flipped]) + self.assertAllEqual(numpy_image, actual_image) + self.assertEqual(False, actual_is_flipped) + actual_image, actual_is_flipped = sess.run([actual, is_flipped]) + self.assertAllEqual(dim1_flipped, actual_image) + self.assertEqual(True, actual_is_flipped) + + def testReturnCorrectCropOfSingleImage(self): + np.random.seed(0) + + height, width = 10, 20 + image = np.random.randint(0, 256, size=(height, width, 3)) + + crop_height, crop_width = 2, 4 + + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + [cropped] = preprocess_utils.random_crop([image_placeholder], + crop_height, + crop_width) + + with self.test_session(): + cropped_image = cropped.eval(feed_dict={image_placeholder: image}) + + # Ensure we can find the cropped image in the original: + is_found = False + for x in range(0, width - crop_width + 1): + for y in range(0, height - crop_height + 1): + if np.isclose(image[y:y+crop_height, x:x+crop_width, :], + cropped_image).all(): + is_found = True + break + + self.assertTrue(is_found) + + def testRandomCropMaintainsNumberOfChannels(self): + np.random.seed(0) + + crop_height, crop_width = 10, 20 + image = np.random.randint(0, 256, size=(100, 200, 3)) + + tf.compat.v1.set_random_seed(37) + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + [cropped] = preprocess_utils.random_crop( + [image_placeholder], crop_height, crop_width) + + with self.test_session(): + cropped_image = cropped.eval(feed_dict={image_placeholder: image}) + self.assertTupleEqual(cropped_image.shape, (crop_height, crop_width, 3)) + + def testReturnDifferentCropAreasOnTwoEvals(self): + tf.compat.v1.set_random_seed(0) + + crop_height, crop_width = 2, 3 + image = np.random.randint(0, 256, size=(100, 200, 3)) + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + [cropped] = preprocess_utils.random_crop( + [image_placeholder], crop_height, crop_width) + + with self.test_session(): + crop0 = cropped.eval(feed_dict={image_placeholder: image}) + crop1 = cropped.eval(feed_dict={image_placeholder: image}) + self.assertFalse(np.isclose(crop0, crop1).all()) + + def testReturnConsistenCropsOfImagesInTheList(self): + tf.compat.v1.set_random_seed(0) + + height, width = 10, 20 + crop_height, crop_width = 2, 3 + labels = np.linspace(0, height * width-1, height * width) + labels = labels.reshape((height, width, 1)) + image = np.tile(labels, (1, 1, 3)) + + image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) + label_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1)) + [cropped_image, cropped_label] = preprocess_utils.random_crop( + [image_placeholder, label_placeholder], crop_height, crop_width) + + with self.test_session() as sess: + cropped_image, cropped_labels = sess.run([cropped_image, cropped_label], + feed_dict={ + image_placeholder: image, + label_placeholder: labels}) + for i in range(3): + self.assertAllEqual(cropped_image[:, :, i], cropped_labels.squeeze()) + + def testDieOnRandomCropWhenImagesWithDifferentWidth(self): + crop_height, crop_width = 2, 3 + image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) + image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) + cropped = preprocess_utils.random_crop( + [image1, image2], crop_height, crop_width) + + with self.test_session() as sess: + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), + image2: np.random.rand(4, 6, 1)}) + + def testDieOnRandomCropWhenImagesWithDifferentHeight(self): + crop_height, crop_width = 2, 3 + image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) + image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) + cropped = preprocess_utils.random_crop( + [image1, image2], crop_height, crop_width) + + with self.test_session() as sess: + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'Wrong height for tensor'): + sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), + image2: np.random.rand(3, 5, 1)}) + + def testDieOnRandomCropWhenCropSizeIsGreaterThanImage(self): + crop_height, crop_width = 5, 9 + image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) + image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) + cropped = preprocess_utils.random_crop( + [image1, image2], crop_height, crop_width) + + with self.test_session() as sess: + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'Crop size greater than the image size.'): + sess.run(cropped, feed_dict={image1: np.random.rand(4, 5, 3), + image2: np.random.rand(4, 5, 1)}) + + def testReturnPaddedImageWithNonZeroPadValue(self): + for dtype in [np.int32, np.int64, np.float32, np.float64]: + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]).astype(dtype) + expected_image = np.dstack([[[255, 255, 255, 255, 255], + [255, 255, 255, 255, 255], + [255, 5, 6, 255, 255], + [255, 9, 0, 255, 255], + [255, 255, 255, 255, 255]], + [[255, 255, 255, 255, 255], + [255, 255, 255, 255, 255], + [255, 4, 3, 255, 255], + [255, 3, 5, 255, 255], + [255, 255, 255, 255, 255]]]).astype(dtype) + + with self.session() as sess: + padded_image = preprocess_utils.pad_to_bounding_box( + image, 2, 1, 5, 5, 255) + padded_image = sess.run(padded_image) + self.assertAllClose(padded_image, expected_image) + # Add batch size = 1 to image. + padded_image = preprocess_utils.pad_to_bounding_box( + np.expand_dims(image, 0), 2, 1, 5, 5, 255) + padded_image = sess.run(padded_image) + self.assertAllClose(padded_image, np.expand_dims(expected_image, 0)) + + def testReturnOriginalImageWhenTargetSizeIsEqualToImageSize(self): + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]) + with self.session() as sess: + padded_image = preprocess_utils.pad_to_bounding_box( + image, 0, 0, 2, 2, 255) + padded_image = sess.run(padded_image) + self.assertAllClose(padded_image, image) + + def testDieOnTargetSizeGreaterThanImageSize(self): + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]) + with self.test_session(): + image_placeholder = tf.placeholder(tf.float32) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 0, 0, 2, 1, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'target_width must be >= width'): + padded_image.eval(feed_dict={image_placeholder: image}) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 0, 0, 1, 2, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'target_height must be >= height'): + padded_image.eval(feed_dict={image_placeholder: image}) + + def testDieIfTargetSizeNotPossibleWithGivenOffset(self): + image = np.dstack([[[5, 6], + [9, 0]], + [[4, 3], + [3, 5]]]) + with self.test_session(): + image_placeholder = tf.placeholder(tf.float32) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 3, 0, 4, 4, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'target size not possible with the given target offsets'): + padded_image.eval(feed_dict={image_placeholder: image}) + + def testDieIfImageTensorRankIsTwo(self): + image = np.vstack([[5, 6], + [9, 0]]) + with self.test_session(): + image_placeholder = tf.placeholder(tf.float32) + padded_image = preprocess_utils.pad_to_bounding_box( + image_placeholder, 0, 0, 2, 2, 255) + with self.assertRaisesWithPredicateMatch( + tf.errors.InvalidArgumentError, + 'Wrong image tensor rank'): + padded_image.eval(feed_dict={image_placeholder: image}) + + def testResizeTensorsToRange(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + min_size = 50 + max_size = 100 + factor = None + expected_shape_list = [(75, 50, 3), + (50, 100, 3), + (30, 100, 3)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=None, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + resized_image = session.run(new_tensor_list[0]) + self.assertEqual(resized_image.shape, expected_shape_list[i]) + + def testResizeTensorsToRangeWithFactor(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + min_size = 50 + max_size = 98 + factor = 8 + expected_image_shape_list = [(81, 57, 3), + (49, 97, 3), + (33, 97, 3)] + expected_label_shape_list = [(81, 57, 1), + (49, 97, 1), + (33, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithFactorAndLabelShapeCHW(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + min_size = 50 + max_size = 98 + factor = 8 + expected_image_shape_list = [(81, 57, 3), + (49, 97, 3), + (33, 97, 3)] + expected_label_shape_list = [(5, 81, 57), + (5, 49, 97), + (5, 33, 97)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([5, test_shape[0], test_shape[1]]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True, + label_layout_is_chw=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithSimilarMinMaxSizes(self): + test_shapes = [[60, 40], + [15, 30], + [15, 50]] + # Values set so that one of the side = 97. + min_size = 96 + max_size = 98 + factor = 8 + expected_image_shape_list = [(97, 65, 3), + (49, 97, 3), + (33, 97, 3)] + expected_label_shape_list = [(97, 65, 1), + (49, 97, 1), + (33, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithEqualMaxSize(self): + test_shapes = [[97, 38], + [96, 97]] + # Make max_size equal to the larger value of test_shapes. + min_size = 97 + max_size = 97 + factor = 8 + expected_image_shape_list = [(97, 41, 3), + (97, 97, 3)] + expected_label_shape_list = [(97, 41, 1), + (97, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + def testResizeTensorsToRangeWithPotentialErrorInTFCeil(self): + test_shape = [3936, 5248] + # Make max_size equal to the larger value of test_shapes. + min_size = 1441 + max_size = 1441 + factor = 16 + expected_image_shape = (1089, 1441, 3) + expected_label_shape = (1089, 1441, 1) + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape) + + def testResizeTensorsToRangeWithEqualMaxSizeWithoutAspectRatio(self): + test_shapes = [[97, 38], + [96, 97]] + # Make max_size equal to the larger value of test_shapes. + min_size = 97 + max_size = 97 + factor = 8 + keep_aspect_ratio = False + expected_image_shape_list = [(97, 97, 3), + (97, 97, 3)] + expected_label_shape_list = [(97, 97, 1), + (97, 97, 1)] + for i, test_shape in enumerate(test_shapes): + image = tf.random.normal([test_shape[0], test_shape[1], 3]) + label = tf.random.normal([test_shape[0], test_shape[1], 1]) + new_tensor_list = preprocess_utils.resize_to_range( + image=image, + label=label, + min_size=min_size, + max_size=max_size, + factor=factor, + keep_aspect_ratio=keep_aspect_ratio, + align_corners=True) + with self.test_session() as session: + new_tensor_list = session.run(new_tensor_list) + self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i]) + self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/resnet_v1_beta.py b/models/research/deeplab/core/resnet_v1_beta.py new file mode 100644 index 0000000000000000000000000000000000000000..0d5f1f19a234fb13cbac2d7397d0948b64d3011b --- /dev/null +++ b/models/research/deeplab/core/resnet_v1_beta.py @@ -0,0 +1,827 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Resnet v1 model variants. + +Code branched out from slim/nets/resnet_v1.py, and please refer to it for +more details. + +The original version ResNets-v1 were proposed by: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import conv2d_ws +from deeplab.core import utils +from tensorflow.contrib.slim.nets import resnet_utils + +slim = contrib_slim + +_DEFAULT_MULTI_GRID = [1, 1, 1] +_DEFAULT_MULTI_GRID_RESNET_18 = [1, 1] + + +@slim.add_arg_scope +def bottleneck(inputs, + depth, + depth_bottleneck, + stride, + unit_rate=1, + rate=1, + outputs_collections=None, + scope=None): + """Bottleneck residual unit variant with BN after convolutions. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. Note that we use here the bottleneck variant which has an + extra bottleneck layer. + + When putting together two consecutive ResNet blocks that use this unit, one + should use stride = 2 in the last unit of the first block. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + depth_bottleneck: The depth of the bottleneck layers. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + unit_rate: An integer, unit rate for atrous convolution. + rate: An integer, rate for atrous convolution. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc: + depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) + if depth == depth_in: + shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') + else: + shortcut = conv2d_ws.conv2d( + inputs, + depth, + [1, 1], + stride=stride, + activation_fn=None, + scope='shortcut') + + residual = conv2d_ws.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, + scope='conv1') + residual = conv2d_ws.conv2d_same(residual, depth_bottleneck, 3, stride, + rate=rate*unit_rate, scope='conv2') + residual = conv2d_ws.conv2d(residual, depth, [1, 1], stride=1, + activation_fn=None, scope='conv3') + output = tf.nn.relu(shortcut + residual) + + return slim.utils.collect_named_outputs(outputs_collections, sc.name, + output) + + +@slim.add_arg_scope +def lite_bottleneck(inputs, + depth, + stride, + unit_rate=1, + rate=1, + outputs_collections=None, + scope=None): + """Bottleneck residual unit variant with BN after convolutions. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. Note that we use here the bottleneck variant which has an + extra bottleneck layer. + + When putting together two consecutive ResNet blocks that use this unit, one + should use stride = 2 in the last unit of the first block. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + unit_rate: An integer, unit rate for atrous convolution. + rate: An integer, rate for atrous convolution. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'lite_bottleneck_v1', [inputs]) as sc: + depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) + if depth == depth_in: + shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') + else: + shortcut = conv2d_ws.conv2d( + inputs, + depth, [1, 1], + stride=stride, + activation_fn=None, + scope='shortcut') + + residual = conv2d_ws.conv2d_same( + inputs, depth, 3, 1, rate=rate * unit_rate, scope='conv1') + with slim.arg_scope([conv2d_ws.conv2d], activation_fn=None): + residual = conv2d_ws.conv2d_same( + residual, depth, 3, stride, rate=rate * unit_rate, scope='conv2') + output = tf.nn.relu(shortcut + residual) + + return slim.utils.collect_named_outputs(outputs_collections, sc.name, + output) + + +def root_block_fn_for_beta_variant(net, depth_multiplier=1.0): + """Gets root_block_fn for beta variant. + + ResNet-v1 beta variant modifies the first original 7x7 convolution to three + 3x3 convolutions. + + Args: + net: A tensor of size [batch, height, width, channels], input to the model. + depth_multiplier: Controls the number of convolution output channels for + each input channel. The total number of depthwise convolution output + channels will be equal to `num_filters_out * depth_multiplier`. + + Returns: + A tensor after three 3x3 convolutions. + """ + net = conv2d_ws.conv2d_same( + net, int(64 * depth_multiplier), 3, stride=2, scope='conv1_1') + net = conv2d_ws.conv2d_same( + net, int(64 * depth_multiplier), 3, stride=1, scope='conv1_2') + net = conv2d_ws.conv2d_same( + net, int(128 * depth_multiplier), 3, stride=1, scope='conv1_3') + + return net + + +def resnet_v1_beta(inputs, + blocks, + num_classes=None, + is_training=None, + global_pool=True, + output_stride=None, + root_block_fn=None, + reuse=None, + scope=None, + sync_batch_norm_method='None'): + """Generator for v1 ResNet models (beta variant). + + This function generates a family of modified ResNet v1 models. In particular, + the first original 7x7 convolution is replaced with three 3x3 convolutions. + See the resnet_v1_*() methods for specific model instantiations, obtained by + selecting different block instantiations that produce ResNets of various + depths. + + The code is modified from slim/nets/resnet_v1.py, and please refer to it for + more details. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + blocks: A list of length equal to the number of ResNet blocks. Each element + is a resnet_utils.Block object describing the units in the block. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + root_block_fn: The function consisting of convolution operations applied to + the root input. If root_block_fn is None, use the original setting of + RseNet-v1, which is simply one convolution with 7x7 kernel and stride=2. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If the target output_stride is not valid. + """ + if root_block_fn is None: + root_block_fn = functools.partial(conv2d_ws.conv2d_same, + num_outputs=64, + kernel_size=7, + stride=2, + scope='conv1') + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc: + end_points_collection = sc.original_name_scope + '_end_points' + with slim.arg_scope([ + conv2d_ws.conv2d, bottleneck, lite_bottleneck, + resnet_utils.stack_blocks_dense + ], + outputs_collections=end_points_collection): + if is_training is not None: + arg_scope = slim.arg_scope([batch_norm], is_training=is_training) + else: + arg_scope = slim.arg_scope([]) + with arg_scope: + net = inputs + if output_stride is not None: + if output_stride % 4 != 0: + raise ValueError('The output_stride needs to be a multiple of 4.') + output_stride //= 4 + net = root_block_fn(net) + net = slim.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool1') + net = resnet_utils.stack_blocks_dense(net, blocks, output_stride) + + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='pool5', keepdims=True) + if num_classes is not None: + net = conv2d_ws.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='logits', + use_weight_standardization=False) + # Convert end_points_collection into a dictionary of end_points. + end_points = slim.utils.convert_collection_to_dict( + end_points_collection) + if num_classes is not None: + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +def resnet_v1_beta_block(scope, base_depth, num_units, stride): + """Helper function for creating a resnet_v1 beta variant bottleneck block. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + + Returns: + A resnet_v1 bottleneck block. + """ + return resnet_utils.Block(scope, bottleneck, [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': 1, + 'unit_rate': 1 + }] * (num_units - 1) + [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': stride, + 'unit_rate': 1 + }]) + + +def resnet_v1_small_beta_block(scope, base_depth, num_units, stride): + """Helper function for creating a resnet_18 beta variant bottleneck block. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + + Returns: + A resnet_18 bottleneck block. + """ + block_args = [] + for _ in range(num_units - 1): + block_args.append({'depth': base_depth, 'stride': 1, 'unit_rate': 1}) + block_args.append({'depth': base_depth, 'stride': stride, 'unit_rate': 1}) + return resnet_utils.Block(scope, lite_bottleneck, block_args) + + +def resnet_v1_18(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_18', + sync_batch_norm_method='None'): + """Resnet v1 18. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID_RESNET_18 + else: + if len(multi_grid) != 2: + raise ValueError('Expect multi_grid to have length 2.') + + block4_args = [] + for rate in multi_grid: + block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate}) + + blocks = [ + resnet_v1_small_beta_block( + 'block1', base_depth=64, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block2', base_depth=128, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block3', base_depth=256, num_units=2, stride=2), + resnet_utils.Block('block4', lite_bottleneck, block4_args), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_18_beta(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + root_depth_multiplier=0.25, + reuse=None, + scope='resnet_v1_18', + sync_batch_norm_method='None'): + """Resnet v1 18 beta variant. + + This variant modifies the first convolution layer of ResNet-v1-18. In + particular, it changes the original one 7x7 convolution to three 3x3 + convolutions. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + root_depth_multiplier: Float, depth multiplier used for the first three + convolution layers that replace the 7x7 convolution. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID_RESNET_18 + else: + if len(multi_grid) != 2: + raise ValueError('Expect multi_grid to have length 2.') + + block4_args = [] + for rate in multi_grid: + block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate}) + + blocks = [ + resnet_v1_small_beta_block( + 'block1', base_depth=64, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block2', base_depth=128, num_units=2, stride=2), + resnet_v1_small_beta_block( + 'block3', base_depth=256, num_units=2, stride=2), + resnet_utils.Block('block4', lite_bottleneck, block4_args), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial(root_block_fn_for_beta_variant, + depth_multiplier=root_depth_multiplier), + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_50(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_50', + sync_batch_norm_method='None'): + """Resnet v1 50. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=6, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_50_beta(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_50', + sync_batch_norm_method='None'): + """Resnet v1 50 beta variant. + + This variant modifies the first convolution layer of ResNet-v1-50. In + particular, it changes the original one 7x7 convolution to three 3x3 + convolutions. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=6, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial(root_block_fn_for_beta_variant), + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_101(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_101', + sync_batch_norm_method='None'): + """Resnet v1 101. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=23, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_v1_101_beta(inputs, + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_101', + sync_batch_norm_method='None'): + """Resnet v1 101 beta variant. + + This variant modifies the first convolution layer of ResNet-v1-101. In + particular, it changes the original one 7x7 convolution to three 3x3 + convolutions. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + num_classes: Number of predicted classes for classification tasks. If None + we return the features before the logit layer. + is_training: Enable/disable is_training for batch normalization. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + multi_grid: Employ a hierarchy of different atrous rates within network. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is None, then + net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is not None, net contains the pre-softmax + activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: if multi_grid is not None and does not have length = 3. + """ + if multi_grid is None: + multi_grid = _DEFAULT_MULTI_GRID + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + blocks = [ + resnet_v1_beta_block( + 'block1', base_depth=64, num_units=3, stride=2), + resnet_v1_beta_block( + 'block2', base_depth=128, num_units=4, stride=2), + resnet_v1_beta_block( + 'block3', base_depth=256, num_units=23, stride=2), + resnet_utils.Block('block4', bottleneck, [ + {'depth': 2048, 'depth_bottleneck': 512, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid]), + ] + return resnet_v1_beta( + inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial(root_block_fn_for_beta_variant), + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def resnet_arg_scope(weight_decay=0.0001, + batch_norm_decay=0.997, + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=tf.nn.relu, + use_batch_norm=True, + sync_batch_norm_method='None', + normalization_method='unspecified', + use_weight_standardization=False): + """Defines the default ResNet arg scope. + + Args: + weight_decay: The weight decay to use for regularizing the model. + batch_norm_decay: The moving average decay when estimating layer activation + statistics in batch normalization. + batch_norm_epsilon: Small constant to prevent division by zero when + normalizing activations by their variance in batch normalization. + batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + activation_fn: The activation function which is used in ResNet. + use_batch_norm: Deprecated in favor of normalization_method. + sync_batch_norm_method: String, sync batchnorm method. + normalization_method: String, one of `batch`, `none`, or `group`, to use + batch normalization, no normalization, or group normalization. + use_weight_standardization: Boolean, whether to use weight standardization. + + Returns: + An `arg_scope` to use for the resnet models. + """ + batch_norm_params = { + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + 'scale': batch_norm_scale, + } + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + if normalization_method == 'batch': + normalizer_fn = batch_norm + elif normalization_method == 'none': + normalizer_fn = None + elif normalization_method == 'group': + normalizer_fn = slim.group_norm + elif normalization_method == 'unspecified': + normalizer_fn = batch_norm if use_batch_norm else None + else: + raise ValueError('Unrecognized normalization_method %s' % + normalization_method) + + with slim.arg_scope([conv2d_ws.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + weights_initializer=slim.variance_scaling_initializer(), + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + use_weight_standardization=use_weight_standardization): + with slim.arg_scope([batch_norm], **batch_norm_params): + # The following implies padding='SAME' for pool1, which makes feature + # alignment easier for dense prediction tasks. This is also used in + # https://github.com/facebook/fb.resnet.torch. However the accompanying + # code of 'Deep Residual Learning for Image Recognition' uses + # padding='VALID' for pool1. You can switch to that choice by setting + # slim.arg_scope([slim.max_pool2d], padding='VALID'). + with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: + return arg_sc diff --git a/models/research/deeplab/core/resnet_v1_beta_test.py b/models/research/deeplab/core/resnet_v1_beta_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8b61edcce21803047ea047a0acb1bc9e7ae147da --- /dev/null +++ b/models/research/deeplab/core/resnet_v1_beta_test.py @@ -0,0 +1,564 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for resnet_v1_beta module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np +import six +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import resnet_v1_beta +from tensorflow.contrib.slim.nets import resnet_utils + +slim = contrib_slim + + +def create_test_input(batch, height, width, channels): + """Create test input tensor.""" + if None in [batch, height, width, channels]: + return tf.placeholder(tf.float32, (batch, height, width, channels)) + else: + return tf.to_float( + np.tile( + np.reshape( + np.reshape(np.arange(height), [height, 1]) + + np.reshape(np.arange(width), [1, width]), + [1, height, width, 1]), + [batch, 1, 1, channels])) + + +class ResnetCompleteNetworkTest(tf.test.TestCase): + """Tests with complete small ResNet v1 networks.""" + + def _resnet_small_lite_bottleneck(self, + inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_small'): + """A shallow and thin ResNet v1 with lite_bottleneck.""" + if multi_grid is None: + multi_grid = [1, 1] + else: + if len(multi_grid) != 2: + raise ValueError('Expect multi_grid to have length 2.') + block = resnet_v1_beta.resnet_v1_small_beta_block + blocks = [ + block('block1', base_depth=1, num_units=1, stride=2), + block('block2', base_depth=2, num_units=1, stride=2), + block('block3', base_depth=4, num_units=1, stride=2), + resnet_utils.Block('block4', resnet_v1_beta.lite_bottleneck, [ + {'depth': 8, + 'stride': 1, + 'unit_rate': rate} for rate in multi_grid])] + return resnet_v1_beta.resnet_v1_beta( + inputs, + blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial( + resnet_v1_beta.root_block_fn_for_beta_variant, + depth_multiplier=0.25), + reuse=reuse, + scope=scope) + + def _resnet_small(self, + inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + multi_grid=None, + reuse=None, + scope='resnet_v1_small'): + """A shallow and thin ResNet v1 for faster tests.""" + if multi_grid is None: + multi_grid = [1, 1, 1] + else: + if len(multi_grid) != 3: + raise ValueError('Expect multi_grid to have length 3.') + + block = resnet_v1_beta.resnet_v1_beta_block + blocks = [ + block('block1', base_depth=1, num_units=1, stride=2), + block('block2', base_depth=2, num_units=1, stride=2), + block('block3', base_depth=4, num_units=1, stride=2), + resnet_utils.Block('block4', resnet_v1_beta.bottleneck, [ + {'depth': 32, 'depth_bottleneck': 8, 'stride': 1, + 'unit_rate': rate} for rate in multi_grid])] + + return resnet_v1_beta.resnet_v1_beta( + inputs, + blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + root_block_fn=functools.partial( + resnet_v1_beta.root_block_fn_for_beta_variant), + reuse=reuse, + scope=scope) + + def testClassificationEndPointsWithLiteBottleneck(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithMultigridAndLiteBottleneck(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + multi_grid = [1, 2] + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + multi_grid=multi_grid, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationShapesWithLiteBottleneck(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 112, 112, 16], + 'resnet/conv1_2': [2, 112, 112, 16], + 'resnet/conv1_3': [2, 112, 112, 32], + 'resnet/block1': [2, 28, 28, 1], + 'resnet/block2': [2, 14, 14, 2], + 'resnet/block3': [2, 7, 7, 4], + 'resnet/block4': [2, 7, 7, 8]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testFullyConvolutionalEndpointShapesWithLiteBottleneck(self): + global_pool = False + num_classes = 10 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 16], + 'resnet/conv1_2': [2, 161, 161, 16], + 'resnet/conv1_3': [2, 161, 161, 32], + 'resnet/block1': [2, 41, 41, 1], + 'resnet/block2': [2, 21, 21, 2], + 'resnet/block3': [2, 11, 11, 4], + 'resnet/block4': [2, 11, 11, 8]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalEndpointShapesWithLiteBottleneck(self): + global_pool = False + num_classes = 10 + output_stride = 8 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 16], + 'resnet/conv1_2': [2, 161, 161, 16], + 'resnet/conv1_3': [2, 161, 161, 32], + 'resnet/block1': [2, 41, 41, 1], + 'resnet/block2': [2, 41, 41, 2], + 'resnet/block3': [2, 41, 41, 4], + 'resnet/block4': [2, 41, 41, 8]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalValuesWithLiteBottleneck(self): + """Verify dense feature extraction with atrous convolution.""" + nominal_stride = 32 + for output_stride in [4, 8, 16, 32, None]: + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + with tf.Graph().as_default(): + with self.test_session() as sess: + tf.set_random_seed(0) + inputs = create_test_input(2, 81, 81, 3) + # Dense feature extraction followed by subsampling. + output, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + is_training=False, + global_pool=False, + output_stride=output_stride) + if output_stride is None: + factor = 1 + else: + factor = nominal_stride // output_stride + output = resnet_utils.subsample(output, factor) + # Make the two networks use the same weights. + tf.get_variable_scope().reuse_variables() + # Feature extraction at the nominal network rate. + expected, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + is_training=False, + global_pool=False) + sess.run(tf.global_variables_initializer()) + self.assertAllClose(output.eval(), expected.eval(), + atol=1e-4, rtol=1e-4) + + def testUnknownBatchSizeWithLiteBottleneck(self): + batch = 2 + height, width = 65, 65 + global_pool = True + num_classes = 10 + inputs = create_test_input(None, height, width, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, _ = self._resnet_small_lite_bottleneck( + inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, 1, 1, num_classes]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 1, 1, num_classes)) + + def testFullyConvolutionalUnknownHeightWidthWithLiteBottleneck(self): + batch = 2 + height, width = 65, 65 + global_pool = False + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + global_pool=global_pool) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 8]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 3, 3, 8)) + + def testAtrousFullyConvolutionalUnknownHeightWidthWithLiteBottleneck(self): + batch = 2 + height, width = 65, 65 + global_pool = False + output_stride = 8 + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small_lite_bottleneck( + inputs, + None, + global_pool=global_pool, + output_stride=output_stride) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 8]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 9, 9, 8)) + + def testClassificationEndPoints(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithWS(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope(use_weight_standardization=True)): + logits, end_points = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithGN(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope(normalization_method='group')): + with slim.arg_scope([slim.group_norm], groups=1): + logits, end_points = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testInvalidGroupsWithGN(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with self.assertRaisesRegexp(ValueError, 'Invalid groups'): + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope(normalization_method='group')): + with slim.arg_scope([slim.group_norm], groups=32): + _, _ = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + def testClassificationEndPointsWithGNWS(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope( + resnet_v1_beta.resnet_arg_scope( + normalization_method='group', use_weight_standardization=True)): + with slim.arg_scope([slim.group_norm], groups=1): + logits, end_points = self._resnet_small( + inputs, num_classes, global_pool=global_pool, scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationEndPointsWithMultigrid(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + multi_grid = [1, 2, 4] + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + multi_grid=multi_grid, + scope='resnet') + + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertIn('predictions', end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + + def testClassificationShapes(self): + global_pool = True + num_classes = 10 + inputs = create_test_input(2, 224, 224, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 112, 112, 64], + 'resnet/conv1_2': [2, 112, 112, 64], + 'resnet/conv1_3': [2, 112, 112, 128], + 'resnet/block1': [2, 28, 28, 4], + 'resnet/block2': [2, 14, 14, 8], + 'resnet/block3': [2, 7, 7, 16], + 'resnet/block4': [2, 7, 7, 32]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 10 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 64], + 'resnet/conv1_2': [2, 161, 161, 64], + 'resnet/conv1_3': [2, 161, 161, 128], + 'resnet/block1': [2, 41, 41, 4], + 'resnet/block2': [2, 21, 21, 8], + 'resnet/block3': [2, 11, 11, 16], + 'resnet/block4': [2, 11, 11, 32]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 10 + output_stride = 8 + inputs = create_test_input(2, 321, 321, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + _, end_points = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='resnet') + endpoint_to_shape = { + 'resnet/conv1_1': [2, 161, 161, 64], + 'resnet/conv1_2': [2, 161, 161, 64], + 'resnet/conv1_3': [2, 161, 161, 128], + 'resnet/block1': [2, 41, 41, 4], + 'resnet/block2': [2, 41, 41, 8], + 'resnet/block3': [2, 41, 41, 16], + 'resnet/block4': [2, 41, 41, 32]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalValues(self): + """Verify dense feature extraction with atrous convolution.""" + nominal_stride = 32 + for output_stride in [4, 8, 16, 32, None]: + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + with tf.Graph().as_default(): + with self.test_session() as sess: + tf.set_random_seed(0) + inputs = create_test_input(2, 81, 81, 3) + # Dense feature extraction followed by subsampling. + output, _ = self._resnet_small(inputs, + None, + is_training=False, + global_pool=False, + output_stride=output_stride) + if output_stride is None: + factor = 1 + else: + factor = nominal_stride // output_stride + output = resnet_utils.subsample(output, factor) + # Make the two networks use the same weights. + tf.get_variable_scope().reuse_variables() + # Feature extraction at the nominal network rate. + expected, _ = self._resnet_small(inputs, + None, + is_training=False, + global_pool=False) + sess.run(tf.global_variables_initializer()) + self.assertAllClose(output.eval(), expected.eval(), + atol=1e-4, rtol=1e-4) + + def testUnknownBatchSize(self): + batch = 2 + height, width = 65, 65 + global_pool = True + num_classes = 10 + inputs = create_test_input(None, height, width, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + logits, _ = self._resnet_small(inputs, + num_classes, + global_pool=global_pool, + scope='resnet') + self.assertTrue(logits.op.name.startswith('resnet/logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, 1, 1, num_classes]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 1, 1, num_classes)) + + def testFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small(inputs, + None, + global_pool=global_pool) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 32]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 3, 3, 32)) + + def testAtrousFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + output_stride = 8 + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(resnet_utils.resnet_arg_scope()): + output, _ = self._resnet_small(inputs, + None, + global_pool=global_pool, + output_stride=output_stride) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 32]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEqual(output.shape, (batch, 9, 9, 32)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/utils.py b/models/research/deeplab/core/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4bf3d09ad4647c757da5f9ebb3c2f676e3ccc00c --- /dev/null +++ b/models/research/deeplab/core/utils.py @@ -0,0 +1,214 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""This script contains utility functions.""" +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import slim as contrib_slim + +slim = contrib_slim + + +# Quantized version of sigmoid function. +q_sigmoid = lambda x: tf.nn.relu6(x + 3) * 0.16667 + + +def resize_bilinear(images, size, output_dtype=tf.float32): + """Returns resized images as output_type. + + Args: + images: A tensor of size [batch, height_in, width_in, channels]. + size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size + for the images. + output_dtype: The destination type. + Returns: + A tensor of size [batch, height_out, width_out, channels] as a dtype of + output_dtype. + """ + images = tf.image.resize_bilinear(images, size, align_corners=True) + return tf.cast(images, dtype=output_dtype) + + +def scale_dimension(dim, scale): + """Scales the input dimension. + + Args: + dim: Input dimension (a scalar or a scalar Tensor). + scale: The amount of scaling applied to the input. + + Returns: + Scaled dimension. + """ + if isinstance(dim, tf.Tensor): + return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32) + else: + return int((float(dim) - 1.0) * scale + 1.0) + + +def split_separable_conv2d(inputs, + filters, + kernel_size=3, + rate=1, + weight_decay=0.00004, + depthwise_weights_initializer_stddev=0.33, + pointwise_weights_initializer_stddev=0.06, + scope=None): + """Splits a separable conv2d into depthwise and pointwise conv2d. + + This operation differs from `tf.layers.separable_conv2d` as this operation + applies activation function between depthwise and pointwise conv2d. + + Args: + inputs: Input tensor with shape [batch, height, width, channels]. + filters: Number of filters in the 1x1 pointwise convolution. + kernel_size: A list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + rate: Atrous convolution rate for the depthwise convolution. + weight_decay: The weight decay to use for regularizing the model. + depthwise_weights_initializer_stddev: The standard deviation of the + truncated normal weight initializer for depthwise convolution. + pointwise_weights_initializer_stddev: The standard deviation of the + truncated normal weight initializer for pointwise convolution. + scope: Optional scope for the operation. + + Returns: + Computed features after split separable conv2d. + """ + outputs = slim.separable_conv2d( + inputs, + None, + kernel_size=kernel_size, + depth_multiplier=1, + rate=rate, + weights_initializer=tf.truncated_normal_initializer( + stddev=depthwise_weights_initializer_stddev), + weights_regularizer=None, + scope=scope + '_depthwise') + return slim.conv2d( + outputs, + filters, + 1, + weights_initializer=tf.truncated_normal_initializer( + stddev=pointwise_weights_initializer_stddev), + weights_regularizer=slim.l2_regularizer(weight_decay), + scope=scope + '_pointwise') + + +def get_label_weight_mask(labels, ignore_label, num_classes, label_weights=1.0): + """Gets the label weight mask. + + Args: + labels: A Tensor of labels with the shape of [-1]. + ignore_label: Integer, label to ignore. + num_classes: Integer, the number of semantic classes. + label_weights: A float or a list of weights. If it is a float, it means all + the labels have the same weight. If it is a list of weights, then each + element in the list represents the weight for the label of its index, for + example, label_weights = [0.1, 0.5] means the weight for label 0 is 0.1 + and the weight for label 1 is 0.5. + + Returns: + A Tensor of label weights with the same shape of labels, each element is the + weight for the label with the same index in labels and the element is 0.0 + if the label is to ignore. + + Raises: + ValueError: If label_weights is neither a float nor a list, or if + label_weights is a list and its length is not equal to num_classes. + """ + if not isinstance(label_weights, (float, list)): + raise ValueError( + 'The type of label_weights is invalid, it must be a float or a list.') + + if isinstance(label_weights, list) and len(label_weights) != num_classes: + raise ValueError( + 'Length of label_weights must be equal to num_classes if it is a list, ' + 'label_weights: %s, num_classes: %d.' % (label_weights, num_classes)) + + not_ignore_mask = tf.not_equal(labels, ignore_label) + not_ignore_mask = tf.cast(not_ignore_mask, tf.float32) + if isinstance(label_weights, float): + return not_ignore_mask * label_weights + + label_weights = tf.constant(label_weights, tf.float32) + weight_mask = tf.einsum('...y,y->...', + tf.one_hot(labels, num_classes, dtype=tf.float32), + label_weights) + return tf.multiply(not_ignore_mask, weight_mask) + + +def get_batch_norm_fn(sync_batch_norm_method): + """Gets batch norm function. + + Currently we only support the following methods: + - `None` (no sync batch norm). We use slim.batch_norm in this case. + + Args: + sync_batch_norm_method: String, method used to sync batch norm. + + Returns: + Batchnorm function. + + Raises: + ValueError: If sync_batch_norm_method is not supported. + """ + if sync_batch_norm_method == 'None': + return slim.batch_norm + else: + raise ValueError('Unsupported sync_batch_norm_method.') + + +def get_batch_norm_params(decay=0.9997, + epsilon=1e-5, + center=True, + scale=True, + is_training=True, + sync_batch_norm_method='None', + initialize_gamma_as_zeros=False): + """Gets batch norm parameters. + + Args: + decay: Float, decay for the moving average. + epsilon: Float, value added to variance to avoid dividing by zero. + center: Boolean. If True, add offset of `beta` to normalized tensor. If + False,`beta` is ignored. + scale: Boolean. If True, multiply by `gamma`. If False, `gamma` is not used. + is_training: Boolean, whether or not the layer is in training mode. + sync_batch_norm_method: String, method used to sync batch norm. + initialize_gamma_as_zeros: Boolean, initializing `gamma` as zeros or not. + + Returns: + A dictionary for batchnorm parameters. + + Raises: + ValueError: If sync_batch_norm_method is not supported. + """ + batch_norm_params = { + 'is_training': is_training, + 'decay': decay, + 'epsilon': epsilon, + 'scale': scale, + 'center': center, + } + if initialize_gamma_as_zeros: + if sync_batch_norm_method == 'None': + # Slim-type gamma_initialier. + batch_norm_params['param_initializers'] = { + 'gamma': tf.zeros_initializer(), + } + else: + raise ValueError('Unsupported sync_batch_norm_method.') + return batch_norm_params diff --git a/models/research/deeplab/core/utils_test.py b/models/research/deeplab/core/utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cfdb63ef2d3faaa8090867a5382876972b0cff3d --- /dev/null +++ b/models/research/deeplab/core/utils_test.py @@ -0,0 +1,90 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for utils.py.""" + +import numpy as np +import tensorflow as tf + +from deeplab.core import utils + + +class UtilsTest(tf.test.TestCase): + + def testScaleDimensionOutput(self): + self.assertEqual(161, utils.scale_dimension(321, 0.5)) + self.assertEqual(193, utils.scale_dimension(321, 0.6)) + self.assertEqual(241, utils.scale_dimension(321, 0.75)) + + def testGetLabelWeightMask_withFloatLabelWeights(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + label_weights = 0.5 + expected_label_weight_mask = np.array([0.5, 0.0, 0.5, 0.5, 0.5], + dtype=np.float32) + + with self.test_session() as sess: + label_weight_mask = utils.get_label_weight_mask( + labels, ignore_label, num_classes, label_weights=label_weights) + label_weight_mask = sess.run(label_weight_mask) + self.assertAllEqual(label_weight_mask, expected_label_weight_mask) + + def testGetLabelWeightMask_withListLabelWeights(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + label_weights = [0.0, 0.1, 0.2, 0.3, 0.4] + expected_label_weight_mask = np.array([0.0, 0.0, 0.1, 0.3, 0.2], + dtype=np.float32) + + with self.test_session() as sess: + label_weight_mask = utils.get_label_weight_mask( + labels, ignore_label, num_classes, label_weights=label_weights) + label_weight_mask = sess.run(label_weight_mask) + self.assertAllEqual(label_weight_mask, expected_label_weight_mask) + + def testGetLabelWeightMask_withInvalidLabelWeightsType(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + + self.assertRaisesWithRegexpMatch( + ValueError, + '^The type of label_weights is invalid, it must be a float or a list', + utils.get_label_weight_mask, + labels=labels, + ignore_label=ignore_label, + num_classes=num_classes, + label_weights=None) + + def testGetLabelWeightMask_withInvalidLabelWeightsLength(self): + labels = tf.constant([0, 4, 1, 3, 2]) + ignore_label = 4 + num_classes = 5 + label_weights = [0.0, 0.1, 0.2] + + self.assertRaisesWithRegexpMatch( + ValueError, + '^Length of label_weights must be equal to num_classes if it is a list', + utils.get_label_weight_mask, + labels=labels, + ignore_label=ignore_label, + num_classes=num_classes, + label_weights=label_weights) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/core/xception.py b/models/research/deeplab/core/xception.py new file mode 100644 index 0000000000000000000000000000000000000000..f9925714716ea0346dd8df75b956a876e52bde69 --- /dev/null +++ b/models/research/deeplab/core/xception.py @@ -0,0 +1,945 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Xception model. + +"Xception: Deep Learning with Depthwise Separable Convolutions" +Fran{\c{c}}ois Chollet +https://arxiv.org/abs/1610.02357 + +We implement the modified version by Jifeng Dai et al. for their COCO 2017 +detection challenge submission, where the model is made deeper and has aligned +features for dense prediction tasks. See their slides for details: + +"Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge +2017 Entry" +Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei and Jifeng Dai +ICCV 2017 COCO Challenge workshop +http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf + +We made a few more changes on top of MSRA's modifications: +1. Fully convolutional: All the max-pooling layers are replaced with separable + conv2d with stride = 2. This allows us to use atrous convolution to extract + feature maps at any resolution. + +2. We support adding ReLU and BatchNorm after depthwise convolution, motivated + by the design of MobileNetv1. + +"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision +Applications" +Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, +Tobias Weyand, Marco Andreetto, Hartwig Adam +https://arxiv.org/abs/1704.04861 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import utils +from tensorflow.contrib.slim.nets import resnet_utils +from nets.mobilenet import conv_blocks as mobilenet_v3_ops + +slim = contrib_slim + + +_DEFAULT_MULTI_GRID = [1, 1, 1] +# The cap for tf.clip_by_value. +_CLIP_CAP = 6 + + +class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])): + """A named tuple describing an Xception block. + + Its parts are: + scope: The scope of the block. + unit_fn: The Xception unit function which takes as input a tensor and + returns another tensor with the output of the Xception unit. + args: A list of length equal to the number of units in the block. The list + contains one dictionary for each unit in the block to serve as argument to + unit_fn. + """ + + +def fixed_padding(inputs, kernel_size, rate=1): + """Pads the input along the spatial dimensions independently of input size. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + Should be a positive integer. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], + [pad_beg, pad_end], [0, 0]]) + return padded_inputs + + +@slim.add_arg_scope +def separable_conv2d_same(inputs, + num_outputs, + kernel_size, + depth_multiplier, + stride, + rate=1, + use_explicit_padding=True, + regularize_depthwise=False, + scope=None, + **kwargs): + """Strided 2-D separable convolution with 'SAME' padding. + + If stride > 1 and use_explicit_padding is True, then we do explicit zero- + padding, followed by conv2d with 'VALID' padding. + + Note that + + net = separable_conv2d_same(inputs, num_outputs, 3, + depth_multiplier=1, stride=stride) + + is equivalent to + + net = slim.separable_conv2d(inputs, num_outputs, 3, + depth_multiplier=1, stride=1, padding='SAME') + net = resnet_utils.subsample(net, factor=stride) + + whereas + + net = slim.separable_conv2d(inputs, num_outputs, 3, stride=stride, + depth_multiplier=1, padding='SAME') + + is different when the input's height or width is even, which is why we add the + current function. + + Consequently, if the input feature map has even height or width, setting + `use_explicit_padding=False` will result in feature misalignment by one pixel + along the corresponding dimension. + + Args: + inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. + num_outputs: An integer, the number of output filters. + kernel_size: An int with the kernel_size of the filters. + depth_multiplier: The number of depthwise convolution output channels for + each input channel. The total number of depthwise convolution output + channels will be equal to `num_filters_in * depth_multiplier`. + stride: An integer, the output stride. + rate: An integer, rate for atrous convolution. + use_explicit_padding: If True, use explicit padding to make the model fully + compatible with the open source version, otherwise use the native + Tensorflow 'SAME' padding. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + scope: Scope. + **kwargs: additional keyword arguments to pass to slim.conv2d + + Returns: + output: A 4-D tensor of size [batch, height_out, width_out, channels] with + the convolution output. + """ + def _separable_conv2d(padding): + """Wrapper for separable conv2d.""" + return slim.separable_conv2d(inputs, + num_outputs, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + rate=rate, + padding=padding, + scope=scope, + **kwargs) + def _split_separable_conv2d(padding): + """Splits separable conv2d into depthwise and pointwise conv2d.""" + outputs = slim.separable_conv2d(inputs, + None, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + rate=rate, + padding=padding, + scope=scope + '_depthwise', + **kwargs) + return slim.conv2d(outputs, + num_outputs, + 1, + scope=scope + '_pointwise', + **kwargs) + if stride == 1 or not use_explicit_padding: + if regularize_depthwise: + outputs = _separable_conv2d(padding='SAME') + else: + outputs = _split_separable_conv2d(padding='SAME') + else: + inputs = fixed_padding(inputs, kernel_size, rate) + if regularize_depthwise: + outputs = _separable_conv2d(padding='VALID') + else: + outputs = _split_separable_conv2d(padding='VALID') + return outputs + + +@slim.add_arg_scope +def xception_module(inputs, + depth_list, + skip_connection_type, + stride, + kernel_size=3, + unit_rate_list=None, + rate=1, + activation_fn_in_separable_conv=False, + regularize_depthwise=False, + outputs_collections=None, + scope=None, + use_bounded_activation=False, + use_explicit_padding=True, + use_squeeze_excite=False, + se_pool_size=None): + """An Xception module. + + The output of one Xception module is equal to the sum of `residual` and + `shortcut`, where `residual` is the feature computed by three separable + convolution. The `shortcut` is the feature computed by 1x1 convolution with + or without striding. In some cases, the `shortcut` path could be a simple + identity function or none (i.e, no shortcut). + + Note that we replace the max pooling operations in the Xception module with + another separable convolution with striding, since atrous rate is not properly + supported in current TensorFlow max pooling implementation. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth_list: A list of three integers specifying the depth values of one + Xception module. + skip_connection_type: Skip connection type for the residual path. Only + supports 'conv', 'sum', or 'none'. + stride: The block unit's stride. Determines the amount of downsampling of + the units output compared to its input. + kernel_size: Integer, convolution kernel size. + unit_rate_list: A list of three integers, determining the unit rate for + each separable convolution in the xception module. + rate: An integer, rate for atrous convolution. + activation_fn_in_separable_conv: Includes activation function in the + separable convolution or not. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + outputs_collections: Collection to add the Xception unit output. + scope: Optional variable_scope. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + use_explicit_padding: If True, use explicit padding to make the model fully + compatible with the open source version, otherwise use the native + Tensorflow 'SAME' padding. + use_squeeze_excite: Boolean, use squeeze-and-excitation or not. + se_pool_size: None or integer specifying the pooling size used in SE module. + + Returns: + The Xception module's output. + + Raises: + ValueError: If depth_list and unit_rate_list do not contain three elements, + or if stride != 1 for the third separable convolution operation in the + residual path, or unsupported skip connection type. + """ + if len(depth_list) != 3: + raise ValueError('Expect three elements in depth_list.') + if unit_rate_list: + if len(unit_rate_list) != 3: + raise ValueError('Expect three elements in unit_rate_list.') + + with tf.variable_scope(scope, 'xception_module', [inputs]) as sc: + residual = inputs + + def _separable_conv(features, depth, kernel_size, depth_multiplier, + regularize_depthwise, rate, stride, scope): + """Separable conv block.""" + if activation_fn_in_separable_conv: + activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu + else: + if use_bounded_activation: + # When use_bounded_activation is True, we clip the feature values and + # apply relu6 for activation. + activation_fn = lambda x: tf.clip_by_value(x, -_CLIP_CAP, _CLIP_CAP) + features = tf.nn.relu6(features) + else: + # Original network design. + activation_fn = None + features = tf.nn.relu(features) + return separable_conv2d_same(features, + depth, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + rate=rate, + activation_fn=activation_fn, + use_explicit_padding=use_explicit_padding, + regularize_depthwise=regularize_depthwise, + scope=scope) + for i in range(3): + residual = _separable_conv(residual, + depth_list[i], + kernel_size=kernel_size, + depth_multiplier=1, + regularize_depthwise=regularize_depthwise, + rate=rate*unit_rate_list[i], + stride=stride if i == 2 else 1, + scope='separable_conv' + str(i+1)) + if use_squeeze_excite: + residual = mobilenet_v3_ops.squeeze_excite( + input_tensor=residual, + squeeze_factor=16, + inner_activation_fn=tf.nn.relu, + gating_fn=lambda x: tf.nn.relu6(x+3)*0.16667, + pool=se_pool_size) + + if skip_connection_type == 'conv': + shortcut = slim.conv2d(inputs, + depth_list[-1], + [1, 1], + stride=stride, + activation_fn=None, + scope='shortcut') + if use_bounded_activation: + residual = tf.clip_by_value(residual, -_CLIP_CAP, _CLIP_CAP) + shortcut = tf.clip_by_value(shortcut, -_CLIP_CAP, _CLIP_CAP) + outputs = residual + shortcut + if use_bounded_activation: + outputs = tf.nn.relu6(outputs) + elif skip_connection_type == 'sum': + if use_bounded_activation: + residual = tf.clip_by_value(residual, -_CLIP_CAP, _CLIP_CAP) + inputs = tf.clip_by_value(inputs, -_CLIP_CAP, _CLIP_CAP) + outputs = residual + inputs + if use_bounded_activation: + outputs = tf.nn.relu6(outputs) + elif skip_connection_type == 'none': + outputs = residual + else: + raise ValueError('Unsupported skip connection type.') + + return slim.utils.collect_named_outputs(outputs_collections, + sc.name, + outputs) + + +@slim.add_arg_scope +def stack_blocks_dense(net, + blocks, + output_stride=None, + outputs_collections=None): + """Stacks Xception blocks and controls output feature density. + + First, this function creates scopes for the Xception in the form of + 'block_name/unit_1', 'block_name/unit_2', etc. + + Second, this function allows the user to explicitly control the output + stride, which is the ratio of the input to output spatial resolution. This + is useful for dense prediction tasks such as semantic segmentation or + object detection. + + Control of the output feature density is implemented by atrous convolution. + + Args: + net: A tensor of size [batch, height, width, channels]. + blocks: A list of length equal to the number of Xception blocks. Each + element is an Xception Block object describing the units in the block. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution, which needs to be equal to + the product of unit strides from the start up to some level of Xception. + For example, if the Xception employs units with strides 1, 2, 1, 3, 4, 1, + then valid values for the output_stride are 1, 2, 6, 24 or None (which + is equivalent to output_stride=24). + outputs_collections: Collection to add the Xception block outputs. + + Returns: + net: Output tensor with stride equal to the specified output_stride. + + Raises: + ValueError: If the target output_stride is not valid. + """ + # The current_stride variable keeps track of the effective stride of the + # activations. This allows us to invoke atrous convolution whenever applying + # the next residual unit would result in the activations having stride larger + # than the target output_stride. + current_stride = 1 + + # The atrous convolution rate parameter. + rate = 1 + + for block in blocks: + with tf.variable_scope(block.scope, 'block', [net]) as sc: + for i, unit in enumerate(block.args): + if output_stride is not None and current_stride > output_stride: + raise ValueError('The target output_stride cannot be reached.') + with tf.variable_scope('unit_%d' % (i + 1), values=[net]): + # If we have reached the target output_stride, then we need to employ + # atrous convolution with stride=1 and multiply the atrous rate by the + # current unit's stride for use in subsequent layers. + if output_stride is not None and current_stride == output_stride: + net = block.unit_fn(net, rate=rate, **dict(unit, stride=1)) + rate *= unit.get('stride', 1) + else: + net = block.unit_fn(net, rate=1, **unit) + current_stride *= unit.get('stride', 1) + + # Collect activations at the block's end before performing subsampling. + net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) + + if output_stride is not None and current_stride != output_stride: + raise ValueError('The target output_stride cannot be reached.') + + return net + + +def xception(inputs, + blocks, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + reuse=None, + scope=None, + sync_batch_norm_method='None'): + """Generator for Xception models. + + This function generates a family of Xception models. See the xception_*() + methods for specific model instantiations, obtained by selecting different + block instantiations that produce Xception of various depths. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. Must be + floating point. If a pretrained checkpoint is used, pixel values should be + the same as during training (see go/slim-classification-models for + specifics). + blocks: A list of length equal to the number of Xception blocks. Each + element is an Xception Block object describing the units in the block. + num_classes: Number of predicted classes for classification tasks. + If 0 or None, we return the features before the logit layer. + is_training: whether batch_norm layers are in training mode. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + keep_prob: Keep probability used in the pre-logits dropout layer. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + sync_batch_norm_method: String, sync batchnorm method. Currently only + support `None`. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is 0 or None, + then net is the output of the last Xception block, potentially after + global average pooling. If num_classes is a non-zero integer, net contains + the pre-softmax activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If the target output_stride is not valid. + """ + with tf.variable_scope( + scope, 'xception', [inputs], reuse=reuse) as sc: + end_points_collection = sc.original_name_scope + 'end_points' + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + with slim.arg_scope([slim.conv2d, + slim.separable_conv2d, + xception_module, + stack_blocks_dense], + outputs_collections=end_points_collection): + with slim.arg_scope([batch_norm], is_training=is_training): + net = inputs + if output_stride is not None: + if output_stride % 2 != 0: + raise ValueError('The output_stride needs to be a multiple of 2.') + output_stride //= 2 + # Root block function operated on inputs. + net = resnet_utils.conv2d_same(net, 32, 3, stride=2, + scope='entry_flow/conv1_1') + net = resnet_utils.conv2d_same(net, 64, 3, stride=1, + scope='entry_flow/conv1_2') + + # Extract features for entry_flow, middle_flow, and exit_flow. + net = stack_blocks_dense(net, blocks, output_stride) + + # Convert end_points_collection into a dictionary of end_points. + end_points = slim.utils.convert_collection_to_dict( + end_points_collection, clear_collection=True) + + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True) + end_points['global_pool'] = net + if num_classes: + net = slim.dropout(net, keep_prob=keep_prob, is_training=is_training, + scope='prelogits_dropout') + net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='logits') + end_points[sc.name + '/logits'] = net + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +def xception_block(scope, + depth_list, + skip_connection_type, + activation_fn_in_separable_conv, + regularize_depthwise, + num_units, + stride, + kernel_size=3, + unit_rate_list=None, + use_squeeze_excite=False, + se_pool_size=None): + """Helper function for creating a Xception block. + + Args: + scope: The scope of the block. + depth_list: The depth of the bottleneck layer for each unit. + skip_connection_type: Skip connection type for the residual path. Only + supports 'conv', 'sum', or 'none'. + activation_fn_in_separable_conv: Includes activation function in the + separable convolution or not. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + kernel_size: Integer, convolution kernel size. + unit_rate_list: A list of three integers, determining the unit rate in the + corresponding xception block. + use_squeeze_excite: Boolean, use squeeze-and-excitation or not. + se_pool_size: None or integer specifying the pooling size used in SE module. + + Returns: + An Xception block. + """ + if unit_rate_list is None: + unit_rate_list = _DEFAULT_MULTI_GRID + return Block(scope, xception_module, [{ + 'depth_list': depth_list, + 'skip_connection_type': skip_connection_type, + 'activation_fn_in_separable_conv': activation_fn_in_separable_conv, + 'regularize_depthwise': regularize_depthwise, + 'stride': stride, + 'kernel_size': kernel_size, + 'unit_rate_list': unit_rate_list, + 'use_squeeze_excite': use_squeeze_excite, + 'se_pool_size': se_pool_size, + }] * num_units) + + +def xception_41(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + multi_grid=None, + reuse=None, + scope='xception_41', + sync_batch_norm_method='None'): + """Xception-41 model.""" + blocks = [ + xception_block('entry_flow/block1', + depth_list=[128, 128, 128], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('entry_flow/block2', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('entry_flow/block3', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('middle_flow/block1', + depth_list=[728, 728, 728], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=8, + stride=1), + xception_block('exit_flow/block1', + depth_list=[728, 1024, 1024], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + xception_block('exit_flow/block2', + depth_list=[1536, 1536, 2048], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + unit_rate_list=multi_grid), + ] + return xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_65_factory(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + kernel_size=3, + multi_grid=None, + reuse=None, + use_squeeze_excite=False, + se_pool_size=None, + scope='xception_65', + sync_batch_norm_method='None'): + """Xception-65 model factory.""" + blocks = [ + xception_block('entry_flow/block1', + depth_list=[128, 128, 128], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block2', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block3', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('middle_flow/block1', + depth_list=[728, 728, 728], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=16, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block1', + depth_list=[728, 1024, 1024], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block2', + depth_list=[1536, 1536, 2048], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + unit_rate_list=multi_grid, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + ] + return xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_65(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + multi_grid=None, + reuse=None, + scope='xception_65', + sync_batch_norm_method='None'): + """Xception-65 model.""" + return xception_65_factory( + inputs=inputs, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + regularize_depthwise=regularize_depthwise, + multi_grid=multi_grid, + reuse=reuse, + scope=scope, + use_squeeze_excite=False, + se_pool_size=None, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_71_factory(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + kernel_size=3, + multi_grid=None, + reuse=None, + scope='xception_71', + use_squeeze_excite=False, + se_pool_size=None, + sync_batch_norm_method='None'): + """Xception-71 model factory.""" + blocks = [ + xception_block('entry_flow/block1', + depth_list=[128, 128, 128], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block2', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block3', + depth_list=[256, 256, 256], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + xception_block('entry_flow/block4', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('entry_flow/block5', + depth_list=[728, 728, 728], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('middle_flow/block1', + depth_list=[728, 728, 728], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=16, + stride=1, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block1', + depth_list=[728, 1024, 1024], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2, + kernel_size=kernel_size, + use_squeeze_excite=use_squeeze_excite, + se_pool_size=se_pool_size), + xception_block('exit_flow/block2', + depth_list=[1536, 1536, 2048], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1, + kernel_size=kernel_size, + unit_rate_list=multi_grid, + use_squeeze_excite=False, + se_pool_size=se_pool_size), + ] + return xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + reuse=reuse, + scope=scope, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_71(inputs, + num_classes=None, + is_training=True, + global_pool=True, + keep_prob=0.5, + output_stride=None, + regularize_depthwise=False, + multi_grid=None, + reuse=None, + scope='xception_71', + sync_batch_norm_method='None'): + """Xception-71 model.""" + return xception_71_factory( + inputs=inputs, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + keep_prob=keep_prob, + output_stride=output_stride, + regularize_depthwise=regularize_depthwise, + multi_grid=multi_grid, + reuse=reuse, + scope=scope, + use_squeeze_excite=False, + se_pool_size=None, + sync_batch_norm_method=sync_batch_norm_method) + + +def xception_arg_scope(weight_decay=0.00004, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001, + batch_norm_scale=True, + weights_initializer_stddev=0.09, + regularize_depthwise=False, + use_batch_norm=True, + use_bounded_activation=False, + sync_batch_norm_method='None'): + """Defines the default Xception arg scope. + + Args: + weight_decay: The weight decay to use for regularizing the model. + batch_norm_decay: The moving average decay when estimating layer activation + statistics in batch normalization. + batch_norm_epsilon: Small constant to prevent division by zero when + normalizing activations by their variance in batch normalization. + batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + weights_initializer_stddev: The standard deviation of the trunctated normal + weight initializer. + regularize_depthwise: Whether or not apply L2-norm regularization on the + depthwise convolution weights. + use_batch_norm: Whether or not to use batch normalization. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + sync_batch_norm_method: String, sync batchnorm method. Currently only + support `None`. Also, it is only effective for Xception. + + Returns: + An `arg_scope` to use for the Xception models. + """ + batch_norm_params = { + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + 'scale': batch_norm_scale, + } + if regularize_depthwise: + depthwise_regularizer = slim.l2_regularizer(weight_decay) + else: + depthwise_regularizer = None + activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_initializer=tf.truncated_normal_initializer( + stddev=weights_initializer_stddev), + activation_fn=activation_fn, + normalizer_fn=batch_norm if use_batch_norm else None): + with slim.arg_scope([batch_norm], **batch_norm_params): + with slim.arg_scope( + [slim.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay)): + with slim.arg_scope( + [slim.separable_conv2d], + weights_regularizer=depthwise_regularizer): + with slim.arg_scope( + [xception_module], + use_bounded_activation=use_bounded_activation, + use_explicit_padding=not use_bounded_activation) as arg_sc: + return arg_sc diff --git a/models/research/deeplab/core/xception_test.py b/models/research/deeplab/core/xception_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fc338daa6e56d1290f5a9330a6728c1f8512881e --- /dev/null +++ b/models/research/deeplab/core/xception_test.py @@ -0,0 +1,488 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for xception.py.""" +import numpy as np +import six +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from deeplab.core import xception +from tensorflow.contrib.slim.nets import resnet_utils + +slim = contrib_slim + + +def create_test_input(batch, height, width, channels): + """Create test input tensor.""" + if None in [batch, height, width, channels]: + return tf.placeholder(tf.float32, (batch, height, width, channels)) + else: + return tf.cast( + np.tile( + np.reshape( + np.reshape(np.arange(height), [height, 1]) + + np.reshape(np.arange(width), [1, width]), + [1, height, width, 1]), + [batch, 1, 1, channels]), + tf.float32) + + +class UtilityFunctionTest(tf.test.TestCase): + + def testSeparableConv2DSameWithInputEvenSize(self): + n, n2 = 4, 2 + + # Input image. + x = create_test_input(1, n, n, 1) + + # Convolution kernel. + dw = create_test_input(1, 3, 3, 1) + dw = tf.reshape(dw, [3, 3, 1, 1]) + + tf.get_variable('Conv/depthwise_weights', initializer=dw) + tf.get_variable('Conv/pointwise_weights', + initializer=tf.ones([1, 1, 1, 1])) + tf.get_variable('Conv/biases', initializer=tf.zeros([1])) + tf.get_variable_scope().reuse_variables() + + y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=1, scope='Conv') + y1_expected = tf.cast([[14, 28, 43, 26], + [28, 48, 66, 37], + [43, 66, 84, 46], + [26, 37, 46, 22]], tf.float32) + y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) + + y2 = resnet_utils.subsample(y1, 2) + y2_expected = tf.cast([[14, 43], + [43, 84]], tf.float32) + y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) + + y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1, + regularize_depthwise=True, + stride=2, scope='Conv') + y3_expected = y2_expected + + y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=2, scope='Conv') + y4_expected = tf.cast([[48, 37], + [37, 22]], tf.float32) + y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertAllClose(y1.eval(), y1_expected.eval()) + self.assertAllClose(y2.eval(), y2_expected.eval()) + self.assertAllClose(y3.eval(), y3_expected.eval()) + self.assertAllClose(y4.eval(), y4_expected.eval()) + + def testSeparableConv2DSameWithInputOddSize(self): + n, n2 = 5, 3 + + # Input image. + x = create_test_input(1, n, n, 1) + + # Convolution kernel. + dw = create_test_input(1, 3, 3, 1) + dw = tf.reshape(dw, [3, 3, 1, 1]) + + tf.get_variable('Conv/depthwise_weights', initializer=dw) + tf.get_variable('Conv/pointwise_weights', + initializer=tf.ones([1, 1, 1, 1])) + tf.get_variable('Conv/biases', initializer=tf.zeros([1])) + tf.get_variable_scope().reuse_variables() + + y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=1, scope='Conv') + y1_expected = tf.cast([[14, 28, 43, 58, 34], + [28, 48, 66, 84, 46], + [43, 66, 84, 102, 55], + [58, 84, 102, 120, 64], + [34, 46, 55, 64, 30]], tf.float32) + y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) + + y2 = resnet_utils.subsample(y1, 2) + y2_expected = tf.cast([[14, 43, 34], + [43, 84, 55], + [34, 55, 30]], tf.float32) + y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) + + y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1, + regularize_depthwise=True, + stride=2, scope='Conv') + y3_expected = y2_expected + + y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, + stride=2, scope='Conv') + y4_expected = y2_expected + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + self.assertAllClose(y1.eval(), y1_expected.eval()) + self.assertAllClose(y2.eval(), y2_expected.eval()) + self.assertAllClose(y3.eval(), y3_expected.eval()) + self.assertAllClose(y4.eval(), y4_expected.eval()) + + +class XceptionNetworkTest(tf.test.TestCase): + """Tests with small Xception network.""" + + def _xception_small(self, + inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + regularize_depthwise=True, + reuse=None, + scope='xception_small'): + """A shallow and thin Xception for faster tests.""" + block = xception.xception_block + blocks = [ + block('entry_flow/block1', + depth_list=[1, 1, 1], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('entry_flow/block2', + depth_list=[2, 2, 2], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('entry_flow/block3', + depth_list=[4, 4, 4], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1), + block('entry_flow/block4', + depth_list=[4, 4, 4], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('middle_flow/block1', + depth_list=[4, 4, 4], + skip_connection_type='sum', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=2, + stride=1), + block('exit_flow/block1', + depth_list=[8, 8, 8], + skip_connection_type='conv', + activation_fn_in_separable_conv=False, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=2), + block('exit_flow/block2', + depth_list=[16, 16, 16], + skip_connection_type='none', + activation_fn_in_separable_conv=True, + regularize_depthwise=regularize_depthwise, + num_units=1, + stride=1), + ] + return xception.xception(inputs, + blocks=blocks, + num_classes=num_classes, + is_training=is_training, + global_pool=global_pool, + output_stride=output_stride, + reuse=reuse, + scope=scope) + + def testClassificationEndPoints(self): + global_pool = True + num_classes = 3 + inputs = create_test_input(2, 32, 32, 3) + with slim.arg_scope(xception.xception_arg_scope()): + logits, end_points = self._xception_small( + inputs, + num_classes=num_classes, + global_pool=global_pool, + scope='xception') + self.assertTrue( + logits.op.name.startswith('xception/logits')) + self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) + self.assertTrue('predictions' in end_points) + self.assertListEqual(end_points['predictions'].get_shape().as_list(), + [2, 1, 1, num_classes]) + self.assertTrue('global_pool' in end_points) + self.assertListEqual(end_points['global_pool'].get_shape().as_list(), + [2, 1, 1, 16]) + + def testEndpointNames(self): + global_pool = True + num_classes = 3 + inputs = create_test_input(2, 32, 32, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes=num_classes, + global_pool=global_pool, + scope='xception') + expected = [ + 'xception/entry_flow/conv1_1', + 'xception/entry_flow/conv1_2', + 'xception/entry_flow/block1/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block1/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block1/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block1/unit_1/xception_module/shortcut', + 'xception/entry_flow/block1/unit_1/xception_module', + 'xception/entry_flow/block1', + 'xception/entry_flow/block2/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block2/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block2/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block2/unit_1/xception_module/shortcut', + 'xception/entry_flow/block2/unit_1/xception_module', + 'xception/entry_flow/block2', + 'xception/entry_flow/block3/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block3/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block3/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block3/unit_1/xception_module/shortcut', + 'xception/entry_flow/block3/unit_1/xception_module', + 'xception/entry_flow/block3', + 'xception/entry_flow/block4/unit_1/xception_module/separable_conv1', + 'xception/entry_flow/block4/unit_1/xception_module/separable_conv2', + 'xception/entry_flow/block4/unit_1/xception_module/separable_conv3', + 'xception/entry_flow/block4/unit_1/xception_module/shortcut', + 'xception/entry_flow/block4/unit_1/xception_module', + 'xception/entry_flow/block4', + 'xception/middle_flow/block1/unit_1/xception_module/separable_conv1', + 'xception/middle_flow/block1/unit_1/xception_module/separable_conv2', + 'xception/middle_flow/block1/unit_1/xception_module/separable_conv3', + 'xception/middle_flow/block1/unit_1/xception_module', + 'xception/middle_flow/block1/unit_2/xception_module/separable_conv1', + 'xception/middle_flow/block1/unit_2/xception_module/separable_conv2', + 'xception/middle_flow/block1/unit_2/xception_module/separable_conv3', + 'xception/middle_flow/block1/unit_2/xception_module', + 'xception/middle_flow/block1', + 'xception/exit_flow/block1/unit_1/xception_module/separable_conv1', + 'xception/exit_flow/block1/unit_1/xception_module/separable_conv2', + 'xception/exit_flow/block1/unit_1/xception_module/separable_conv3', + 'xception/exit_flow/block1/unit_1/xception_module/shortcut', + 'xception/exit_flow/block1/unit_1/xception_module', + 'xception/exit_flow/block1', + 'xception/exit_flow/block2/unit_1/xception_module/separable_conv1', + 'xception/exit_flow/block2/unit_1/xception_module/separable_conv2', + 'xception/exit_flow/block2/unit_1/xception_module/separable_conv3', + 'xception/exit_flow/block2/unit_1/xception_module', + 'xception/exit_flow/block2', + 'global_pool', + 'xception/logits', + 'predictions', + ] + self.assertItemsEqual(list(end_points.keys()), expected) + + def testClassificationShapes(self): + global_pool = True + num_classes = 3 + inputs = create_test_input(2, 64, 64, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + scope='xception') + endpoint_to_shape = { + 'xception/entry_flow/conv1_1': [2, 32, 32, 32], + 'xception/entry_flow/block1': [2, 16, 16, 1], + 'xception/entry_flow/block2': [2, 8, 8, 2], + 'xception/entry_flow/block4': [2, 4, 4, 4], + 'xception/middle_flow/block1': [2, 4, 4, 4], + 'xception/exit_flow/block1': [2, 2, 2, 8], + 'xception/exit_flow/block2': [2, 2, 2, 16]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 3 + inputs = create_test_input(2, 65, 65, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + scope='xception') + endpoint_to_shape = { + 'xception/entry_flow/conv1_1': [2, 33, 33, 32], + 'xception/entry_flow/block1': [2, 17, 17, 1], + 'xception/entry_flow/block2': [2, 9, 9, 2], + 'xception/entry_flow/block4': [2, 5, 5, 4], + 'xception/middle_flow/block1': [2, 5, 5, 4], + 'xception/exit_flow/block1': [2, 3, 3, 8], + 'xception/exit_flow/block2': [2, 3, 3, 16]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalEndpointShapes(self): + global_pool = False + num_classes = 3 + output_stride = 8 + inputs = create_test_input(2, 65, 65, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='xception') + endpoint_to_shape = { + 'xception/entry_flow/block1': [2, 17, 17, 1], + 'xception/entry_flow/block2': [2, 9, 9, 2], + 'xception/entry_flow/block4': [2, 9, 9, 4], + 'xception/middle_flow/block1': [2, 9, 9, 4], + 'xception/exit_flow/block1': [2, 9, 9, 8], + 'xception/exit_flow/block2': [2, 9, 9, 16]} + for endpoint, shape in six.iteritems(endpoint_to_shape): + self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) + + def testAtrousFullyConvolutionalValues(self): + """Verify dense feature extraction with atrous convolution.""" + nominal_stride = 32 + for output_stride in [4, 8, 16, 32, None]: + with slim.arg_scope(xception.xception_arg_scope()): + with tf.Graph().as_default(): + with self.test_session() as sess: + tf.set_random_seed(0) + inputs = create_test_input(2, 96, 97, 3) + # Dense feature extraction followed by subsampling. + output, _ = self._xception_small( + inputs, + None, + is_training=False, + global_pool=False, + output_stride=output_stride) + if output_stride is None: + factor = 1 + else: + factor = nominal_stride // output_stride + output = resnet_utils.subsample(output, factor) + # Make the two networks use the same weights. + tf.get_variable_scope().reuse_variables() + # Feature extraction at the nominal network rate. + expected, _ = self._xception_small( + inputs, + None, + is_training=False, + global_pool=False) + sess.run(tf.global_variables_initializer()) + self.assertAllClose(output.eval(), expected.eval(), + atol=1e-5, rtol=1e-5) + + def testUnknownBatchSize(self): + batch = 2 + height, width = 65, 65 + global_pool = True + num_classes = 10 + inputs = create_test_input(None, height, width, 3) + with slim.arg_scope(xception.xception_arg_scope()): + logits, _ = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + scope='xception') + self.assertTrue(logits.op.name.startswith('xception/logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, 1, 1, num_classes]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch, 1, 1, num_classes)) + + def testFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(xception.xception_arg_scope()): + output, _ = self._xception_small( + inputs, + None, + global_pool=global_pool) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 16]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch, 3, 3, 16)) + + def testAtrousFullyConvolutionalUnknownHeightWidth(self): + batch = 2 + height, width = 65, 65 + global_pool = False + output_stride = 8 + inputs = create_test_input(batch, None, None, 3) + with slim.arg_scope(xception.xception_arg_scope()): + output, _ = self._xception_small( + inputs, + None, + global_pool=global_pool, + output_stride=output_stride) + self.assertListEqual(output.get_shape().as_list(), + [batch, None, None, 16]) + images = create_test_input(batch, height, width, 3) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output = sess.run(output, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch, 9, 9, 16)) + + def testEndpointsReuse(self): + inputs = create_test_input(2, 32, 32, 3) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points0 = xception.xception_65( + inputs, + num_classes=10, + reuse=False) + with slim.arg_scope(xception.xception_arg_scope()): + _, end_points1 = xception.xception_65( + inputs, + num_classes=10, + reuse=True) + self.assertItemsEqual(list(end_points0.keys()), list(end_points1.keys())) + + def testUseBoundedAcitvation(self): + global_pool = False + num_classes = 3 + output_stride = 16 + for use_bounded_activation in (True, False): + tf.reset_default_graph() + inputs = create_test_input(2, 65, 65, 3) + with slim.arg_scope(xception.xception_arg_scope( + use_bounded_activation=use_bounded_activation)): + _, _ = self._xception_small( + inputs, + num_classes, + global_pool=global_pool, + output_stride=output_stride, + scope='xception') + for node in tf.get_default_graph().as_graph_def().node: + if node.op.startswith('Relu'): + self.assertEqual(node.op == 'Relu6', use_bounded_activation) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/datasets/__init__.py b/models/research/deeplab/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deeplab/datasets/build_ade20k_data.py b/models/research/deeplab/datasets/build_ade20k_data.py new file mode 100644 index 0000000000000000000000000000000000000000..fc04ed0db04c83af6deaad8b59087624e8bd40e8 --- /dev/null +++ b/models/research/deeplab/datasets/build_ade20k_data.py @@ -0,0 +1,123 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts ADE20K data to TFRecord file format with Example protos.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +import os +import random +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string( + 'train_image_folder', + './ADE20K/ADEChallengeData2016/images/training', + 'Folder containing trainng images') +tf.app.flags.DEFINE_string( + 'train_image_label_folder', + './ADE20K/ADEChallengeData2016/annotations/training', + 'Folder containing annotations for trainng images') + +tf.app.flags.DEFINE_string( + 'val_image_folder', + './ADE20K/ADEChallengeData2016/images/validation', + 'Folder containing validation images') + +tf.app.flags.DEFINE_string( + 'val_image_label_folder', + './ADE20K/ADEChallengeData2016/annotations/validation', + 'Folder containing annotations for validation') + +tf.app.flags.DEFINE_string( + 'output_dir', './ADE20K/tfrecord', + 'Path to save converted tfrecord of Tensorflow example') + +_NUM_SHARDS = 4 + + +def _convert_dataset(dataset_split, dataset_dir, dataset_label_dir): + """Converts the ADE20k dataset into into tfrecord format. + + Args: + dataset_split: Dataset split (e.g., train, val). + dataset_dir: Dir in which the dataset locates. + dataset_label_dir: Dir in which the annotations locates. + + Raises: + RuntimeError: If loaded image and label have different shape. + """ + + img_names = tf.gfile.Glob(os.path.join(dataset_dir, '*.jpg')) + random.shuffle(img_names) + seg_names = [] + for f in img_names: + # get the filename without the extension + basename = os.path.basename(f).split('.')[0] + # cover its corresponding *_seg.png + seg = os.path.join(dataset_label_dir, basename+'.png') + seg_names.append(seg) + + num_images = len(img_names) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + image_reader = build_data.ImageReader('jpeg', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + output_filename = os.path.join( + FLAGS.output_dir, + '%s-%05d-of-%05d.tfrecord' % (dataset_split, shard_id, _NUM_SHARDS)) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, num_images, shard_id)) + sys.stdout.flush() + # Read the image. + image_filename = img_names[i] + image_data = tf.gfile.FastGFile(image_filename, 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_filename = seg_names[i] + seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + example = build_data.image_seg_to_tfexample( + image_data, img_names[i], height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +def main(unused_argv): + tf.gfile.MakeDirs(FLAGS.output_dir) + _convert_dataset( + 'train', FLAGS.train_image_folder, FLAGS.train_image_label_folder) + _convert_dataset('val', FLAGS.val_image_folder, FLAGS.val_image_label_folder) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/deeplab/datasets/build_cityscapes_data.py b/models/research/deeplab/datasets/build_cityscapes_data.py new file mode 100644 index 0000000000000000000000000000000000000000..ce81baef20a460abaa634d3f1dcb6760a0858dec --- /dev/null +++ b/models/research/deeplab/datasets/build_cityscapes_data.py @@ -0,0 +1,188 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts Cityscapes data to TFRecord file format with Example protos. + +The Cityscapes dataset is expected to have the following directory structure: + + + cityscapes + - build_cityscapes_data.py (current working directiory). + - build_data.py + + cityscapesscripts + + annotation + + evaluation + + helpers + + preparation + + viewer + + gtFine + + train + + val + + test + + leftImg8bit + + train + + val + + test + + tfrecord + +This script converts data into sharded data files and save at tfrecord folder. + +Note that before running this script, the users should (1) register the +Cityscapes dataset website at https://www.cityscapes-dataset.com to +download the dataset, and (2) run the script provided by Cityscapes +`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth. + +Also note that the tensorflow model will be trained with `TrainId' instead +of `EvalId' used on the evaluation server. Thus, the users need to convert +the predicted labels to `EvalId` for evaluation on the server. See the +vis.py for more details. + +The Example proto contains the following fields: + + image/encoded: encoded image content. + image/filename: image filename. + image/format: image file format. + image/height: image height. + image/width: image width. + image/channels: image channels. + image/segmentation/class/encoded: encoded semantic segmentation content. + image/segmentation/class/format: semantic segmentation file format. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import glob +import math +import os.path +import re +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('cityscapes_root', + './cityscapes', + 'Cityscapes dataset root folder.') + +tf.app.flags.DEFINE_string( + 'output_dir', + './tfrecord', + 'Path to save converted SSTable of TensorFlow examples.') + + +_NUM_SHARDS = 10 + +# A map from data type to folder name that saves the data. +_FOLDERS_MAP = { + 'image': 'leftImg8bit', + 'label': 'gtFine', +} + +# A map from data type to filename postfix. +_POSTFIX_MAP = { + 'image': '_leftImg8bit', + 'label': '_gtFine_labelTrainIds', +} + +# A map from data type to data format. +_DATA_FORMAT_MAP = { + 'image': 'png', + 'label': 'png', +} + +# Image file pattern. +_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image']) + + +def _get_files(data, dataset_split): + """Gets files for the specified data type and dataset split. + + Args: + data: String, desired data ('image' or 'label'). + dataset_split: String, dataset split ('train', 'val', 'test') + + Returns: + A list of sorted file names or None when getting label for + test set. + """ + if data == 'label' and dataset_split == 'test': + return None + pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data]) + search_files = os.path.join( + FLAGS.cityscapes_root, _FOLDERS_MAP[data], dataset_split, '*', pattern) + filenames = glob.glob(search_files) + return sorted(filenames) + + +def _convert_dataset(dataset_split): + """Converts the specified dataset split to TFRecord format. + + Args: + dataset_split: The dataset split (e.g., train, val). + + Raises: + RuntimeError: If loaded image and label have different shape, or if the + image file with specified postfix could not be found. + """ + image_files = _get_files('image', dataset_split) + label_files = _get_files('label', dataset_split) + + num_images = len(image_files) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + image_reader = build_data.ImageReader('png', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + shard_filename = '%s-%05d-of-%05d.tfrecord' % ( + dataset_split, shard_id, _NUM_SHARDS) + output_filename = os.path.join(FLAGS.output_dir, shard_filename) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, num_images, shard_id)) + sys.stdout.flush() + # Read the image. + image_data = tf.gfile.FastGFile(image_files[i], 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + re_match = _IMAGE_FILENAME_RE.search(image_files[i]) + if re_match is None: + raise RuntimeError('Invalid image filename: ' + image_files[i]) + filename = os.path.basename(re_match.group(1)) + example = build_data.image_seg_to_tfexample( + image_data, filename, height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +def main(unused_argv): + # Only support converting 'train' and 'val' sets for now. + for dataset_split in ['train', 'val']: + _convert_dataset(dataset_split) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/deeplab/datasets/build_data.py b/models/research/deeplab/datasets/build_data.py new file mode 100644 index 0000000000000000000000000000000000000000..45628674dbf3653ca0ca20014a968794bb8cd861 --- /dev/null +++ b/models/research/deeplab/datasets/build_data.py @@ -0,0 +1,161 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains common utility functions and classes for building dataset. + +This script contains utility functions and classes to converts dataset to +TFRecord file format with Example protos. + +The Example proto contains the following fields: + + image/encoded: encoded image content. + image/filename: image filename. + image/format: image file format. + image/height: image height. + image/width: image width. + image/channels: image channels. + image/segmentation/class/encoded: encoded semantic segmentation content. + image/segmentation/class/format: semantic segmentation file format. +""" +import collections +import six +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_enum('image_format', 'png', ['jpg', 'jpeg', 'png'], + 'Image format.') + +tf.app.flags.DEFINE_enum('label_format', 'png', ['png'], + 'Segmentation label format.') + +# A map from image format to expected data format. +_IMAGE_FORMAT_MAP = { + 'jpg': 'jpeg', + 'jpeg': 'jpeg', + 'png': 'png', +} + + +class ImageReader(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self, image_format='jpeg', channels=3): + """Class constructor. + + Args: + image_format: Image format. Only 'jpeg', 'jpg', or 'png' are supported. + channels: Image channels. + """ + with tf.Graph().as_default(): + self._decode_data = tf.placeholder(dtype=tf.string) + self._image_format = image_format + self._session = tf.Session() + if self._image_format in ('jpeg', 'jpg'): + self._decode = tf.image.decode_jpeg(self._decode_data, + channels=channels) + elif self._image_format == 'png': + self._decode = tf.image.decode_png(self._decode_data, + channels=channels) + + def read_image_dims(self, image_data): + """Reads the image dimensions. + + Args: + image_data: string of image data. + + Returns: + image_height and image_width. + """ + image = self.decode_image(image_data) + return image.shape[:2] + + def decode_image(self, image_data): + """Decodes the image data string. + + Args: + image_data: string of image data. + + Returns: + Decoded image data. + + Raises: + ValueError: Value of image channels not supported. + """ + image = self._session.run(self._decode, + feed_dict={self._decode_data: image_data}) + if len(image.shape) != 3 or image.shape[2] not in (1, 3): + raise ValueError('The image channels not supported.') + + return image + + +def _int64_list_feature(values): + """Returns a TF-Feature of int64_list. + + Args: + values: A scalar or list of values. + + Returns: + A TF-Feature. + """ + if not isinstance(values, collections.Iterable): + values = [values] + + return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) + + +def _bytes_list_feature(values): + """Returns a TF-Feature of bytes. + + Args: + values: A string. + + Returns: + A TF-Feature. + """ + def norm2bytes(value): + return value.encode() if isinstance(value, str) and six.PY3 else value + + return tf.train.Feature( + bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) + + +def image_seg_to_tfexample(image_data, filename, height, width, seg_data): + """Converts one image/segmentation pair to tf example. + + Args: + image_data: string of image data. + filename: image filename. + height: image height. + width: image width. + seg_data: string of semantic segmentation data. + + Returns: + tf example of one image/segmentation pair. + """ + return tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': _bytes_list_feature(image_data), + 'image/filename': _bytes_list_feature(filename), + 'image/format': _bytes_list_feature( + _IMAGE_FORMAT_MAP[FLAGS.image_format]), + 'image/height': _int64_list_feature(height), + 'image/width': _int64_list_feature(width), + 'image/channels': _int64_list_feature(3), + 'image/segmentation/class/encoded': ( + _bytes_list_feature(seg_data)), + 'image/segmentation/class/format': _bytes_list_feature( + FLAGS.label_format), + })) diff --git a/models/research/deeplab/datasets/build_voc2012_data.py b/models/research/deeplab/datasets/build_voc2012_data.py new file mode 100644 index 0000000000000000000000000000000000000000..f0bdecb6a0f954d90164ac64b55966d0fe754557 --- /dev/null +++ b/models/research/deeplab/datasets/build_voc2012_data.py @@ -0,0 +1,146 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts PASCAL VOC 2012 data to TFRecord file format with Example protos. + +PASCAL VOC 2012 dataset is expected to have the following directory structure: + + + pascal_voc_seg + - build_data.py + - build_voc2012_data.py (current working directory). + + VOCdevkit + + VOC2012 + + JPEGImages + + SegmentationClass + + ImageSets + + Segmentation + + tfrecord + +Image folder: + ./VOCdevkit/VOC2012/JPEGImages + +Semantic segmentation annotations: + ./VOCdevkit/VOC2012/SegmentationClass + +list folder: + ./VOCdevkit/VOC2012/ImageSets/Segmentation + +This script converts data into sharded data files and save at tfrecord folder. + +The Example proto contains the following fields: + + image/encoded: encoded image content. + image/filename: image filename. + image/format: image file format. + image/height: image height. + image/width: image width. + image/channels: image channels. + image/segmentation/class/encoded: encoded semantic segmentation content. + image/segmentation/class/format: semantic segmentation file format. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +import os.path +import sys +import build_data +from six.moves import range +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('image_folder', + './VOCdevkit/VOC2012/JPEGImages', + 'Folder containing images.') + +tf.app.flags.DEFINE_string( + 'semantic_segmentation_folder', + './VOCdevkit/VOC2012/SegmentationClassRaw', + 'Folder containing semantic segmentation annotations.') + +tf.app.flags.DEFINE_string( + 'list_folder', + './VOCdevkit/VOC2012/ImageSets/Segmentation', + 'Folder containing lists for training and validation') + +tf.app.flags.DEFINE_string( + 'output_dir', + './tfrecord', + 'Path to save converted SSTable of TensorFlow examples.') + + +_NUM_SHARDS = 4 + + +def _convert_dataset(dataset_split): + """Converts the specified dataset split to TFRecord format. + + Args: + dataset_split: The dataset split (e.g., train, test). + + Raises: + RuntimeError: If loaded image and label have different shape. + """ + dataset = os.path.basename(dataset_split)[:-4] + sys.stdout.write('Processing ' + dataset) + filenames = [x.strip('\n') for x in open(dataset_split, 'r')] + num_images = len(filenames) + num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) + + image_reader = build_data.ImageReader('jpeg', channels=3) + label_reader = build_data.ImageReader('png', channels=1) + + for shard_id in range(_NUM_SHARDS): + output_filename = os.path.join( + FLAGS.output_dir, + '%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS)) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_per_shard + end_idx = min((shard_id + 1) * num_per_shard, num_images) + for i in range(start_idx, end_idx): + sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( + i + 1, len(filenames), shard_id)) + sys.stdout.flush() + # Read the image. + image_filename = os.path.join( + FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format) + image_data = tf.gfile.GFile(image_filename, 'rb').read() + height, width = image_reader.read_image_dims(image_data) + # Read the semantic segmentation annotation. + seg_filename = os.path.join( + FLAGS.semantic_segmentation_folder, + filenames[i] + '.' + FLAGS.label_format) + seg_data = tf.gfile.GFile(seg_filename, 'rb').read() + seg_height, seg_width = label_reader.read_image_dims(seg_data) + if height != seg_height or width != seg_width: + raise RuntimeError('Shape mismatched between image and label.') + # Convert to tf example. + example = build_data.image_seg_to_tfexample( + image_data, filenames[i], height, width, seg_data) + tfrecord_writer.write(example.SerializeToString()) + sys.stdout.write('\n') + sys.stdout.flush() + + +def main(unused_argv): + dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt')) + for dataset_split in dataset_splits: + _convert_dataset(dataset_split) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/deeplab/datasets/convert_cityscapes.sh b/models/research/deeplab/datasets/convert_cityscapes.sh new file mode 100644 index 0000000000000000000000000000000000000000..a95b5d66aad79ae7cbd6ad2d3ee60550ab7f6239 --- /dev/null +++ b/models/research/deeplab/datasets/convert_cityscapes.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to preprocess the Cityscapes dataset. Note (1) the users should +# register the Cityscapes dataset website at +# https://www.cityscapes-dataset.com/downloads/ to download the dataset, +# and (2) the users should download the utility scripts provided by +# Cityscapes at https://github.com/mcordts/cityscapesScripts. +# +# Usage: +# bash ./convert_cityscapes.sh +# +# The folder structure is assumed to be: +# + datasets +# - build_cityscapes_data.py +# - convert_cityscapes.sh +# + cityscapes +# + cityscapesscripts (downloaded scripts) +# + gtFine +# + leftImg8bit +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="." + +# Root path for Cityscapes dataset. +CITYSCAPES_ROOT="${WORK_DIR}/cityscapes" + +# Create training labels. +python "${CITYSCAPES_ROOT}/cityscapesscripts/preparation/createTrainIdLabelImgs.py" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${CITYSCAPES_ROOT}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +BUILD_SCRIPT="${CURRENT_DIR}/build_cityscapes_data.py" + +echo "Converting Cityscapes dataset..." +python "${BUILD_SCRIPT}" \ + --cityscapes_root="${CITYSCAPES_ROOT}" \ + --output_dir="${OUTPUT_DIR}" \ diff --git a/models/research/deeplab/datasets/data_generator.py b/models/research/deeplab/datasets/data_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..d84e66f9c48181d579a027daa08206491d995b65 --- /dev/null +++ b/models/research/deeplab/datasets/data_generator.py @@ -0,0 +1,350 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Wrapper for providing semantic segmentaion data. + +The SegmentationDataset class provides both images and annotations (semantic +segmentation and/or instance segmentation) for TensorFlow. Currently, we +support the following datasets: + +1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). + +PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects +(e.g., bike, person, and so on) and leaves all the other semantic classes as +one background class. The dataset contains 1464, 1449, and 1456 annotated +images for the training, validation and test respectively. + +2. Cityscapes dataset (https://www.cityscapes-dataset.com) + +The Cityscapes dataset contains 19 semantic labels (such as road, person, car, +and so on) for urban street scenes. + +3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K) + +The ADE20K dataset contains 150 semantic labels both urban street scenes and +indoor scenes. + +References: + M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn, + and A. Zisserman, The pascal visual object classes challenge a retrospective. + IJCV, 2014. + + M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson, + U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban + scene understanding," In Proc. of CVPR, 2016. + + B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing + through ADE20K dataset", In Proc. of CVPR, 2017. +""" + +import collections +import os +import tensorflow as tf +from deeplab import common +from deeplab import input_preprocess + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + [ + 'splits_to_sizes', # Splits of the dataset into training, val and test. + 'num_classes', # Number of semantic classes, including the + # background class (if exists). For example, there + # are 20 foreground classes + 1 background class in + # the PASCAL VOC 2012 dataset. Thus, we set + # num_classes=21. + 'ignore_label', # Ignore label value. + ]) + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train_fine': 2975, + 'train_coarse': 22973, + 'trainval_fine': 3475, + 'trainval_coarse': 23473, + 'val_fine': 500, + 'test_fine': 1525}, + num_classes=19, + ignore_label=255, +) + +_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 1464, + 'train_aug': 10582, + 'trainval': 2913, + 'val': 1449, + }, + num_classes=21, + ignore_label=255, +) + +_ADE20K_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 20210, # num of samples in images/training + 'val': 2000, # num of samples in images/validation + }, + num_classes=151, + ignore_label=0, +) + +_DATASETS_INFORMATION = { + 'cityscapes': _CITYSCAPES_INFORMATION, + 'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION, + 'ade20k': _ADE20K_INFORMATION, +} + +# Default file pattern of TFRecord of TensorFlow Example. +_FILE_PATTERN = '%s-*' + + +def get_cityscapes_dataset_name(): + return 'cityscapes' + + +class Dataset(object): + """Represents input dataset for deeplab model.""" + + def __init__(self, + dataset_name, + split_name, + dataset_dir, + batch_size, + crop_size, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + model_variant=None, + num_readers=1, + is_training=False, + should_shuffle=False, + should_repeat=False): + """Initializes the dataset. + + Args: + dataset_name: Dataset name. + split_name: A train/val Split name. + dataset_dir: The directory of the dataset sources. + batch_size: Batch size. + crop_size: The size used to crop the image and label. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + model_variant: Model variant (string) for choosing how to mean-subtract + the images. See feature_extractor.network_map for supported model + variants. + num_readers: Number of readers for data provider. + is_training: Boolean, if dataset is for training or not. + should_shuffle: Boolean, if should shuffle the input data. + should_repeat: Boolean, if should repeat the input data. + + Raises: + ValueError: Dataset name and split name are not supported. + """ + if dataset_name not in _DATASETS_INFORMATION: + raise ValueError('The specified dataset is not supported yet.') + self.dataset_name = dataset_name + + splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes + + if split_name not in splits_to_sizes: + raise ValueError('data split name %s not recognized' % split_name) + + if model_variant is None: + tf.logging.warning('Please specify a model_variant. See ' + 'feature_extractor.network_map for supported model ' + 'variants.') + + self.split_name = split_name + self.dataset_dir = dataset_dir + self.batch_size = batch_size + self.crop_size = crop_size + self.min_resize_value = min_resize_value + self.max_resize_value = max_resize_value + self.resize_factor = resize_factor + self.min_scale_factor = min_scale_factor + self.max_scale_factor = max_scale_factor + self.scale_factor_step_size = scale_factor_step_size + self.model_variant = model_variant + self.num_readers = num_readers + self.is_training = is_training + self.should_shuffle = should_shuffle + self.should_repeat = should_repeat + + self.num_of_classes = _DATASETS_INFORMATION[self.dataset_name].num_classes + self.ignore_label = _DATASETS_INFORMATION[self.dataset_name].ignore_label + + def _parse_function(self, example_proto): + """Function to parse the example proto. + + Args: + example_proto: Proto in the format of tf.Example. + + Returns: + A dictionary with parsed image, label, height, width and image name. + + Raises: + ValueError: Label is of wrong shape. + """ + + # Currently only supports jpeg and png. + # Need to use this logic because the shape is not known for + # tf.image.decode_image and we rely on this info to + # extend label if necessary. + def _decode_image(content, channels): + return tf.cond( + tf.image.is_jpeg(content), + lambda: tf.image.decode_jpeg(content, channels), + lambda: tf.image.decode_png(content, channels)) + + features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/filename': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/height': + tf.FixedLenFeature((), tf.int64, default_value=0), + 'image/width': + tf.FixedLenFeature((), tf.int64, default_value=0), + 'image/segmentation/class/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/segmentation/class/format': + tf.FixedLenFeature((), tf.string, default_value='png'), + } + + parsed_features = tf.parse_single_example(example_proto, features) + + image = _decode_image(parsed_features['image/encoded'], channels=3) + + label = None + if self.split_name != common.TEST_SET: + label = _decode_image( + parsed_features['image/segmentation/class/encoded'], channels=1) + + image_name = parsed_features['image/filename'] + if image_name is None: + image_name = tf.constant('') + + sample = { + common.IMAGE: image, + common.IMAGE_NAME: image_name, + common.HEIGHT: parsed_features['image/height'], + common.WIDTH: parsed_features['image/width'], + } + + if label is not None: + if label.get_shape().ndims == 2: + label = tf.expand_dims(label, 2) + elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1: + pass + else: + raise ValueError('Input label shape must be [height, width], or ' + '[height, width, 1].') + + label.set_shape([None, None, 1]) + + sample[common.LABELS_CLASS] = label + + return sample + + def _preprocess_image(self, sample): + """Preprocesses the image and label. + + Args: + sample: A sample containing image and label. + + Returns: + sample: Sample with preprocessed image and label. + + Raises: + ValueError: Ground truth label not provided during training. + """ + image = sample[common.IMAGE] + label = sample[common.LABELS_CLASS] + + original_image, image, label = input_preprocess.preprocess_image_and_label( + image=image, + label=label, + crop_height=self.crop_size[0], + crop_width=self.crop_size[1], + min_resize_value=self.min_resize_value, + max_resize_value=self.max_resize_value, + resize_factor=self.resize_factor, + min_scale_factor=self.min_scale_factor, + max_scale_factor=self.max_scale_factor, + scale_factor_step_size=self.scale_factor_step_size, + ignore_label=self.ignore_label, + is_training=self.is_training, + model_variant=self.model_variant) + + sample[common.IMAGE] = image + + if not self.is_training: + # Original image is only used during visualization. + sample[common.ORIGINAL_IMAGE] = original_image + + if label is not None: + sample[common.LABEL] = label + + # Remove common.LABEL_CLASS key in the sample since it is only used to + # derive label and not used in training and evaluation. + sample.pop(common.LABELS_CLASS, None) + + return sample + + def get_one_shot_iterator(self): + """Gets an iterator that iterates across the dataset once. + + Returns: + An iterator of type tf.data.Iterator. + """ + + files = self._get_all_files() + + dataset = ( + tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers) + .map(self._parse_function, num_parallel_calls=self.num_readers) + .map(self._preprocess_image, num_parallel_calls=self.num_readers)) + + if self.should_shuffle: + dataset = dataset.shuffle(buffer_size=100) + + if self.should_repeat: + dataset = dataset.repeat() # Repeat forever for training. + else: + dataset = dataset.repeat(1) + + dataset = dataset.batch(self.batch_size).prefetch(self.batch_size) + return dataset.make_one_shot_iterator() + + def _get_all_files(self): + """Gets all the files to read data from. + + Returns: + A list of input files. + """ + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(self.dataset_dir, + file_pattern % self.split_name) + return tf.gfile.Glob(file_pattern) diff --git a/models/research/deeplab/datasets/data_generator_test.py b/models/research/deeplab/datasets/data_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f4425d01da0c6f3bafaaff7c038498349a5c3f98 --- /dev/null +++ b/models/research/deeplab/datasets/data_generator_test.py @@ -0,0 +1,115 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for deeplab.datasets.data_generator.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import range +import tensorflow as tf + +from deeplab import common +from deeplab.datasets import data_generator + +ImageAttributes = collections.namedtuple( + 'ImageAttributes', ['image', 'label', 'height', 'width', 'image_name']) + + +class DatasetTest(tf.test.TestCase): + + # Note: training dataset cannot be tested since there is shuffle operation. + # When disabling the shuffle, training dataset is operated same as validation + # dataset. Therefore it is not tested again. + def testPascalVocSegTestData(self): + dataset = data_generator.Dataset( + dataset_name='pascal_voc_seg', + split_name='val', + dataset_dir= + 'deeplab/testing/pascal_voc_seg', + batch_size=1, + crop_size=[3, 3], # Use small size for testing. + min_resize_value=3, + max_resize_value=3, + resize_factor=None, + min_scale_factor=0.01, + max_scale_factor=2.0, + scale_factor_step_size=0.25, + is_training=False, + model_variant='mobilenet_v2') + + self.assertAllEqual(dataset.num_of_classes, 21) + self.assertAllEqual(dataset.ignore_label, 255) + + num_of_images = 3 + with self.test_session() as sess: + iterator = dataset.get_one_shot_iterator() + + for i in range(num_of_images): + batch = iterator.get_next() + batch, = sess.run([batch]) + image_attributes = _get_attributes_of_image(i) + self.assertEqual(batch[common.HEIGHT][0], image_attributes.height) + self.assertEqual(batch[common.WIDTH][0], image_attributes.width) + self.assertEqual(batch[common.IMAGE_NAME][0], + image_attributes.image_name.encode()) + + # All data have been read. + with self.assertRaisesRegexp(tf.errors.OutOfRangeError, ''): + sess.run([iterator.get_next()]) + + +def _get_attributes_of_image(index): + """Gets the attributes of the image. + + Args: + index: Index of image in all images. + + Returns: + Attributes of the image in the format of ImageAttributes. + + Raises: + ValueError: If index is of wrong value. + """ + if index == 0: + return ImageAttributes( + image=None, + label=None, + height=366, + width=500, + image_name='2007_000033') + elif index == 1: + return ImageAttributes( + image=None, + label=None, + height=335, + width=500, + image_name='2007_000042') + elif index == 2: + return ImageAttributes( + image=None, + label=None, + height=333, + width=500, + image_name='2007_000061') + else: + raise ValueError('Index can only be 0, 1 or 2.') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/datasets/download_and_convert_ade20k.sh b/models/research/deeplab/datasets/download_and_convert_ade20k.sh new file mode 100644 index 0000000000000000000000000000000000000000..3614ae42c16e4f727a725066be8948b666995241 --- /dev/null +++ b/models/research/deeplab/datasets/download_and_convert_ade20k.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to download and preprocess the ADE20K dataset. +# +# Usage: +# bash ./download_and_convert_ade20k.sh +# +# The folder structure is assumed to be: +# + datasets +# - build_data.py +# - build_ade20k_data.py +# - download_and_convert_ade20k.sh +# + ADE20K +# + tfrecord +# + ADEChallengeData2016 +# + annotations +# + training +# + validation +# + images +# + training +# + validation + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="./ADE20K" +mkdir -p "${WORK_DIR}" +cd "${WORK_DIR}" + +# Helper function to download and unpack ADE20K dataset. +download_and_uncompress() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f "${FILENAME}" ]; then + echo "Downloading ${FILENAME} to ${WORK_DIR}" + wget -nd -c "${BASE_URL}/${FILENAME}" + fi + echo "Uncompressing ${FILENAME}" + unzip "${FILENAME}" +} + +# Download the images. +BASE_URL="http://data.csail.mit.edu/places/ADEchallenge" +FILENAME="ADEChallengeData2016.zip" + +download_and_uncompress "${BASE_URL}" "${FILENAME}" + +cd "${CURRENT_DIR}" + +# Root path for ADE20K dataset. +ADE20K_ROOT="${WORK_DIR}/ADEChallengeData2016" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${WORK_DIR}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +echo "Converting ADE20K dataset..." +python ./build_ade20k_data.py \ + --train_image_folder="${ADE20K_ROOT}/images/training/" \ + --train_image_label_folder="${ADE20K_ROOT}/annotations/training/" \ + --val_image_folder="${ADE20K_ROOT}/images/validation/" \ + --val_image_label_folder="${ADE20K_ROOT}/annotations/validation/" \ + --output_dir="${OUTPUT_DIR}" diff --git a/models/research/deeplab/datasets/download_and_convert_voc2012.sh b/models/research/deeplab/datasets/download_and_convert_voc2012.sh new file mode 100644 index 0000000000000000000000000000000000000000..c02235182d427dfb1d63154a8266ad37b0a1d53f --- /dev/null +++ b/models/research/deeplab/datasets/download_and_convert_voc2012.sh @@ -0,0 +1,91 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to download and preprocess the PASCAL VOC 2012 dataset. +# +# Usage: +# bash ./download_and_convert_voc2012.sh +# +# The folder structure is assumed to be: +# + datasets +# - build_data.py +# - build_voc2012_data.py +# - download_and_convert_voc2012.sh +# - remove_gt_colormap.py +# + pascal_voc_seg +# + VOCdevkit +# + VOC2012 +# + JPEGImages +# + SegmentationClass +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="./pascal_voc_seg" +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +mkdir -p "${WORK_DIR}" +cd "${WORK_DIR}" + +# Helper function to download and unpack VOC 2012 dataset. +download_and_uncompress() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f "${FILENAME}" ]; then + echo "Downloading ${FILENAME} to ${WORK_DIR}" + wget -nd -c "${BASE_URL}/${FILENAME}" + fi + echo "Uncompressing ${FILENAME}" + tar -xf "${FILENAME}" +} + +# Download the images. +BASE_URL="http://host.robots.ox.ac.uk/pascal/VOC/voc2012/" +FILENAME="VOCtrainval_11-May-2012.tar" + +download_and_uncompress "${BASE_URL}" "${FILENAME}" + +cd "${CURRENT_DIR}" + +# Root path for PASCAL VOC 2012 dataset. +PASCAL_ROOT="${WORK_DIR}/VOCdevkit/VOC2012" + +# Remove the colormap in the ground truth annotations. +SEG_FOLDER="${PASCAL_ROOT}/SegmentationClass" +SEMANTIC_SEG_FOLDER="${PASCAL_ROOT}/SegmentationClassRaw" + +echo "Removing the color map in ground truth annotations..." +python3 "${SCRIPT_DIR}/remove_gt_colormap.py" \ + --original_gt_folder="${SEG_FOLDER}" \ + --output_dir="${SEMANTIC_SEG_FOLDER}" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${WORK_DIR}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +IMAGE_FOLDER="${PASCAL_ROOT}/JPEGImages" +LIST_FOLDER="${PASCAL_ROOT}/ImageSets/Segmentation" + +echo "Converting PASCAL VOC 2012 dataset..." +python3 "${SCRIPT_DIR}/build_voc2012_data.py" \ + --image_folder="${IMAGE_FOLDER}" \ + --semantic_segmentation_folder="${SEMANTIC_SEG_FOLDER}" \ + --list_folder="${LIST_FOLDER}" \ + --image_format="jpg" \ + --output_dir="${OUTPUT_DIR}" diff --git a/models/research/deeplab/datasets/remove_gt_colormap.py b/models/research/deeplab/datasets/remove_gt_colormap.py new file mode 100644 index 0000000000000000000000000000000000000000..900570038ed0f1add9d670157494d4cab6bf5324 --- /dev/null +++ b/models/research/deeplab/datasets/remove_gt_colormap.py @@ -0,0 +1,83 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Removes the color map from segmentation annotations. + +Removes the color map from the ground truth segmentation annotations and save +the results to output_dir. +""" +import glob +import os.path +import numpy as np + +from PIL import Image + +import tensorflow as tf + +FLAGS = tf.compat.v1.flags.FLAGS + +tf.compat.v1.flags.DEFINE_string('original_gt_folder', + './VOCdevkit/VOC2012/SegmentationClass', + 'Original ground truth annotations.') + +tf.compat.v1.flags.DEFINE_string('segmentation_format', 'png', 'Segmentation format.') + +tf.compat.v1.flags.DEFINE_string('output_dir', + './VOCdevkit/VOC2012/SegmentationClassRaw', + 'folder to save modified ground truth annotations.') + + +def _remove_colormap(filename): + """Removes the color map from the annotation. + + Args: + filename: Ground truth annotation filename. + + Returns: + Annotation without color map. + """ + return np.array(Image.open(filename)) + + +def _save_annotation(annotation, filename): + """Saves the annotation as png file. + + Args: + annotation: Segmentation annotation. + filename: Output filename. + """ + pil_image = Image.fromarray(annotation.astype(dtype=np.uint8)) + with tf.io.gfile.GFile(filename, mode='w') as f: + pil_image.save(f, 'PNG') + + +def main(unused_argv): + # Create the output directory if not exists. + if not tf.io.gfile.isdir(FLAGS.output_dir): + tf.io.gfile.makedirs(FLAGS.output_dir) + + annotations = glob.glob(os.path.join(FLAGS.original_gt_folder, + '*.' + FLAGS.segmentation_format)) + for annotation in annotations: + raw_annotation = _remove_colormap(annotation) + filename = os.path.basename(annotation)[:-4] + _save_annotation(raw_annotation, + os.path.join( + FLAGS.output_dir, + filename + '.' + FLAGS.segmentation_format)) + + +if __name__ == '__main__': + tf.compat.v1.app.run() diff --git a/models/research/deeplab/deeplab_demo.ipynb b/models/research/deeplab/deeplab_demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..81ccfde1b6484625ad1e0662d9a4cf12941d262c --- /dev/null +++ b/models/research/deeplab/deeplab_demo.ipynb @@ -0,0 +1,369 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "KFPcBuVFw61h" + }, + "source": [ + "# Overview\n", + "\n", + "This colab demonstrates the steps to use the DeepLab model to perform semantic segmentation on a sample input image. Expected outputs are semantic labels overlayed on the sample image.\n", + "\n", + "### About DeepLab\n", + "The models used in this colab perform semantic segmentation. Semantic segmentation models focus on assigning semantic labels, such as sky, person, or car, to multiple objects and stuff in a single image." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "t3ozFsEEP-u_" + }, + "source": [ + "# Instructions\n", + "\u003ch3\u003e\u003ca href=\"https://cloud.google.com/tpu/\"\u003e\u003cimg valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png\" width=\"50\"\u003e\u003c/a\u003e \u0026nbsp;\u0026nbsp;Use a free TPU device\u003c/h3\u003e\n", + "\n", + " 1. On the main menu, click Runtime and select **Change runtime type**. Set \"TPU\" as the hardware accelerator.\n", + " 1. Click Runtime again and select **Runtime \u003e Run All**. You can also run the cells manually with Shift-ENTER." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7cRiapZ1P3wy" + }, + "source": [ + "## Import Libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "code", + "colab": {}, + "colab_type": "code", + "id": "kAbdmRmvq0Je" + }, + "outputs": [], + "source": [ + "import os\n", + "from io import BytesIO\n", + "import tarfile\n", + "import tempfile\n", + "from six.moves import urllib\n", + "\n", + "from matplotlib import gridspec\n", + "from matplotlib import pyplot as plt\n", + "import numpy as np\n", + "from PIL import Image\n", + "\n", + "%tensorflow_version 1.x\n", + "import tensorflow as tf" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "p47cYGGOQE1W" + }, + "source": [ + "## Import helper methods\n", + "These methods help us perform the following tasks:\n", + "* Load the latest version of the pretrained DeepLab model\n", + "* Load the colormap from the PASCAL VOC dataset\n", + "* Adds colors to various labels, such as \"pink\" for people, \"green\" for bicycle and more\n", + "* Visualize an image, and add an overlay of colors on various regions" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "code", + "colab": {}, + "colab_type": "code", + "id": "vN0kU6NJ1Ye5" + }, + "outputs": [], + "source": [ + "class DeepLabModel(object):\n", + " \"\"\"Class to load deeplab model and run inference.\"\"\"\n", + "\n", + " INPUT_TENSOR_NAME = 'ImageTensor:0'\n", + " OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'\n", + " INPUT_SIZE = 513\n", + " FROZEN_GRAPH_NAME = 'frozen_inference_graph'\n", + "\n", + " def __init__(self, tarball_path):\n", + " \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n", + " self.graph = tf.Graph()\n", + "\n", + " graph_def = None\n", + " # Extract frozen graph from tar archive.\n", + " tar_file = tarfile.open(tarball_path)\n", + " for tar_info in tar_file.getmembers():\n", + " if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):\n", + " file_handle = tar_file.extractfile(tar_info)\n", + " graph_def = tf.GraphDef.FromString(file_handle.read())\n", + " break\n", + "\n", + " tar_file.close()\n", + "\n", + " if graph_def is None:\n", + " raise RuntimeError('Cannot find inference graph in tar archive.')\n", + "\n", + " with self.graph.as_default():\n", + " tf.import_graph_def(graph_def, name='')\n", + "\n", + " self.sess = tf.Session(graph=self.graph)\n", + "\n", + " def run(self, image):\n", + " \"\"\"Runs inference on a single image.\n", + "\n", + " Args:\n", + " image: A PIL.Image object, raw input image.\n", + "\n", + " Returns:\n", + " resized_image: RGB image resized from original input image.\n", + " seg_map: Segmentation map of `resized_image`.\n", + " \"\"\"\n", + " width, height = image.size\n", + " resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n", + " target_size = (int(resize_ratio * width), int(resize_ratio * height))\n", + " resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n", + " batch_seg_map = self.sess.run(\n", + " self.OUTPUT_TENSOR_NAME,\n", + " feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n", + " seg_map = batch_seg_map[0]\n", + " return resized_image, seg_map\n", + "\n", + "\n", + "def create_pascal_label_colormap():\n", + " \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n", + "\n", + " Returns:\n", + " A Colormap for visualizing segmentation results.\n", + " \"\"\"\n", + " colormap = np.zeros((256, 3), dtype=int)\n", + " ind = np.arange(256, dtype=int)\n", + "\n", + " for shift in reversed(range(8)):\n", + " for channel in range(3):\n", + " colormap[:, channel] |= ((ind \u003e\u003e channel) \u0026 1) \u003c\u003c shift\n", + " ind \u003e\u003e= 3\n", + "\n", + " return colormap\n", + "\n", + "\n", + "def label_to_color_image(label):\n", + " \"\"\"Adds color defined by the dataset colormap to the label.\n", + "\n", + " Args:\n", + " label: A 2D array with integer type, storing the segmentation label.\n", + "\n", + " Returns:\n", + " result: A 2D array with floating type. The element of the array\n", + " is the color indexed by the corresponding element in the input label\n", + " to the PASCAL color map.\n", + "\n", + " Raises:\n", + " ValueError: If label is not of rank 2 or its value is larger than color\n", + " map maximum entry.\n", + " \"\"\"\n", + " if label.ndim != 2:\n", + " raise ValueError('Expect 2-D input label')\n", + "\n", + " colormap = create_pascal_label_colormap()\n", + "\n", + " if np.max(label) \u003e= len(colormap):\n", + " raise ValueError('label value too large.')\n", + "\n", + " return colormap[label]\n", + "\n", + "\n", + "def vis_segmentation(image, seg_map):\n", + " \"\"\"Visualizes input image, segmentation map and overlay view.\"\"\"\n", + " plt.figure(figsize=(15, 5))\n", + " grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n", + "\n", + " plt.subplot(grid_spec[0])\n", + " plt.imshow(image)\n", + " plt.axis('off')\n", + " plt.title('input image')\n", + "\n", + " plt.subplot(grid_spec[1])\n", + " seg_image = label_to_color_image(seg_map).astype(np.uint8)\n", + " plt.imshow(seg_image)\n", + " plt.axis('off')\n", + " plt.title('segmentation map')\n", + "\n", + " plt.subplot(grid_spec[2])\n", + " plt.imshow(image)\n", + " plt.imshow(seg_image, alpha=0.7)\n", + " plt.axis('off')\n", + " plt.title('segmentation overlay')\n", + "\n", + " unique_labels = np.unique(seg_map)\n", + " ax = plt.subplot(grid_spec[3])\n", + " plt.imshow(\n", + " FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n", + " ax.yaxis.tick_right()\n", + " plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n", + " plt.xticks([], [])\n", + " ax.tick_params(width=0.0)\n", + " plt.grid('off')\n", + " plt.show()\n", + "\n", + "\n", + "LABEL_NAMES = np.asarray([\n", + " 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n", + " 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n", + " 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'\n", + "])\n", + "\n", + "FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)\n", + "FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nGcZzNkASG9A" + }, + "source": [ + "## Select a pretrained model\n", + "We have trained the DeepLab model using various backbone networks. Select one from the MODEL_NAME list." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "c4oXKmnjw6i_" + }, + "outputs": [], + "source": [ + "MODEL_NAME = 'mobilenetv2_coco_voctrainaug' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval']\n", + "\n", + "_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'\n", + "_MODEL_URLS = {\n", + " 'mobilenetv2_coco_voctrainaug':\n", + " 'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',\n", + " 'mobilenetv2_coco_voctrainval':\n", + " 'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',\n", + " 'xception_coco_voctrainaug':\n", + " 'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',\n", + " 'xception_coco_voctrainval':\n", + " 'deeplabv3_pascal_trainval_2018_01_04.tar.gz',\n", + "}\n", + "_TARBALL_NAME = 'deeplab_model.tar.gz'\n", + "\n", + "model_dir = tempfile.mkdtemp()\n", + "tf.gfile.MakeDirs(model_dir)\n", + "\n", + "download_path = os.path.join(model_dir, _TARBALL_NAME)\n", + "print('downloading model, this might take a while...')\n", + "urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],\n", + " download_path)\n", + "print('download completed! loading DeepLab model...')\n", + "\n", + "MODEL = DeepLabModel(download_path)\n", + "print('model loaded successfully!')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "SZst78N-4OKO" + }, + "source": [ + "## Run on sample images\n", + "\n", + "Select one of sample images (leave `IMAGE_URL` empty) or feed any internet image\n", + "url for inference.\n", + "\n", + "Note that this colab uses single scale inference for fast computation,\n", + "so the results may slightly differ from the visualizations in the\n", + "[README](https://github.com/tensorflow/models/blob/master/research/deeplab/README.md) file,\n", + "which uses multi-scale and left-right flipped inputs." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "edGukUHXyymr" + }, + "outputs": [], + "source": [ + "\n", + "SAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']\n", + "IMAGE_URL = '' #@param {type:\"string\"}\n", + "\n", + "_SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'\n", + " 'deeplab/g3doc/img/%s.jpg?raw=true')\n", + "\n", + "\n", + "def run_visualization(url):\n", + " \"\"\"Inferences DeepLab model and visualizes result.\"\"\"\n", + " try:\n", + " f = urllib.request.urlopen(url)\n", + " jpeg_str = f.read()\n", + " original_im = Image.open(BytesIO(jpeg_str))\n", + " except IOError:\n", + " print('Cannot retrieve image. Please check url: ' + url)\n", + " return\n", + "\n", + " print('running deeplab on image %s...' % url)\n", + " resized_im, seg_map = MODEL.run(original_im)\n", + "\n", + " vis_segmentation(resized_im, seg_map)\n", + "\n", + "\n", + "image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE\n", + "run_visualization(image_url)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "aUbVoHScTJYe" + }, + "source": [ + "## What's next\n", + "\n", + "* Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly.\n", + "* Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project.\n", + "* For more information on running the DeepLab model on Cloud TPUs, see the [DeepLab tutorial](https://cloud.google.com/tpu/docs/tutorials/deeplab).\n" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "DeepLab Demo.ipynb", + "provenance": [], + "toc_visible": true, + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/models/research/deeplab/deprecated/__init__.py b/models/research/deeplab/deprecated/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deeplab/deprecated/segmentation_dataset.py b/models/research/deeplab/deprecated/segmentation_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5980b1d940878cd1aead4a5d301cca7b4a642b --- /dev/null +++ b/models/research/deeplab/deprecated/segmentation_dataset.py @@ -0,0 +1,200 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides data from semantic segmentation datasets. + +The SegmentationDataset class provides both images and annotations (semantic +segmentation and/or instance segmentation) for TensorFlow. Currently, we +support the following datasets: + +1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). + +PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects +(e.g., bike, person, and so on) and leaves all the other semantic classes as +one background class. The dataset contains 1464, 1449, and 1456 annotated +images for the training, validation and test respectively. + +2. Cityscapes dataset (https://www.cityscapes-dataset.com) + +The Cityscapes dataset contains 19 semantic labels (such as road, person, car, +and so on) for urban street scenes. + +3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K) + +The ADE20K dataset contains 150 semantic labels both urban street scenes and +indoor scenes. + +References: + M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn, + and A. Zisserman, The pascal visual object classes challenge a retrospective. + IJCV, 2014. + + M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson, + U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban + scene understanding," In Proc. of CVPR, 2016. + + B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing + through ADE20K dataset", In Proc. of CVPR, 2017. +""" +import collections +import os.path +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +slim = contrib_slim + +dataset = slim.dataset + +tfexample_decoder = slim.tfexample_decoder + + +_ITEMS_TO_DESCRIPTIONS = { + 'image': 'A color image of varying height and width.', + 'labels_class': ('A semantic segmentation label whose size matches image.' + 'Its values range from 0 (background) to num_classes.'), +} + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + ['splits_to_sizes', # Splits of the dataset into training, val, and test. + 'num_classes', # Number of semantic classes, including the background + # class (if exists). For example, there are 20 + # foreground classes + 1 background class in the PASCAL + # VOC 2012 dataset. Thus, we set num_classes=21. + 'ignore_label', # Ignore label value. + ] +) + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 2975, + 'val': 500, + }, + num_classes=19, + ignore_label=255, +) + +_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 1464, + 'train_aug': 10582, + 'trainval': 2913, + 'val': 1449, + }, + num_classes=21, + ignore_label=255, +) + +# These number (i.e., 'train'/'test') seems to have to be hard coded +# You are required to figure it out for your training/testing example. +_ADE20K_INFORMATION = DatasetDescriptor( + splits_to_sizes={ + 'train': 20210, # num of samples in images/training + 'val': 2000, # num of samples in images/validation + }, + num_classes=151, + ignore_label=0, +) + + +_DATASETS_INFORMATION = { + 'cityscapes': _CITYSCAPES_INFORMATION, + 'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION, + 'ade20k': _ADE20K_INFORMATION, +} + +# Default file pattern of TFRecord of TensorFlow Example. +_FILE_PATTERN = '%s-*' + + +def get_cityscapes_dataset_name(): + return 'cityscapes' + + +def get_dataset(dataset_name, split_name, dataset_dir): + """Gets an instance of slim Dataset. + + Args: + dataset_name: Dataset name. + split_name: A train/val Split name. + dataset_dir: The directory of the dataset sources. + + Returns: + An instance of slim Dataset. + + Raises: + ValueError: if the dataset_name or split_name is not recognized. + """ + if dataset_name not in _DATASETS_INFORMATION: + raise ValueError('The specified dataset is not supported yet.') + + splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes + + if split_name not in splits_to_sizes: + raise ValueError('data split name %s not recognized' % split_name) + + # Prepare the variables for different datasets. + num_classes = _DATASETS_INFORMATION[dataset_name].num_classes + ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label + + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(dataset_dir, file_pattern % split_name) + + # Specify how the TF-Examples are decoded. + keys_to_features = { + 'image/encoded': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/filename': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/format': tf.FixedLenFeature( + (), tf.string, default_value='jpeg'), + 'image/height': tf.FixedLenFeature( + (), tf.int64, default_value=0), + 'image/width': tf.FixedLenFeature( + (), tf.int64, default_value=0), + 'image/segmentation/class/encoded': tf.FixedLenFeature( + (), tf.string, default_value=''), + 'image/segmentation/class/format': tf.FixedLenFeature( + (), tf.string, default_value='png'), + } + items_to_handlers = { + 'image': tfexample_decoder.Image( + image_key='image/encoded', + format_key='image/format', + channels=3), + 'image_name': tfexample_decoder.Tensor('image/filename'), + 'height': tfexample_decoder.Tensor('image/height'), + 'width': tfexample_decoder.Tensor('image/width'), + 'labels_class': tfexample_decoder.Image( + image_key='image/segmentation/class/encoded', + format_key='image/segmentation/class/format', + channels=1), + } + + decoder = tfexample_decoder.TFExampleDecoder( + keys_to_features, items_to_handlers) + + return dataset.Dataset( + data_sources=file_pattern, + reader=tf.TFRecordReader, + decoder=decoder, + num_samples=splits_to_sizes[split_name], + items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, + ignore_label=ignore_label, + num_classes=num_classes, + name=dataset_name, + multi_label=True) diff --git a/models/research/deeplab/eval.py b/models/research/deeplab/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..4f5fb8ba9c7493e45e567e9cd5ba9fe567dd9690 --- /dev/null +++ b/models/research/deeplab/eval.py @@ -0,0 +1,227 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluation script for the DeepLab model. + +See model.py for more details and usage. +""" + +import numpy as np +import six +import tensorflow as tf +from tensorflow.contrib import metrics as contrib_metrics +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib import tfprof as contrib_tfprof +from tensorflow.contrib import training as contrib_training +from deeplab import common +from deeplab import model +from deeplab.datasets import data_generator + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +# Settings for log directories. + +flags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.') + +flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.') + +# Settings for evaluating the model. + +flags.DEFINE_integer('eval_batch_size', 1, + 'The number of images in each batch during evaluation.') + +flags.DEFINE_list('eval_crop_size', '513,513', + 'Image crop size [height, width] for evaluation.') + +flags.DEFINE_integer('eval_interval_secs', 60 * 5, + 'How often (in seconds) to run evaluation.') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 16, + 'The ratio of input to output spatial resolution.') + +# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test. +flags.DEFINE_multi_float('eval_scales', [1.0], + 'The scales to resize images for evaluation.') + +# Change to True for adding flipped images during test. +flags.DEFINE_bool('add_flipped_images', False, + 'Add flipped images for evaluation or not.') + +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +# Dataset settings. + +flags.DEFINE_string('dataset', 'pascal_voc_seg', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('eval_split', 'val', + 'Which split of the dataset used for evaluation') + +flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.') + +flags.DEFINE_integer('max_number_of_evaluations', 0, + 'Maximum number of eval iterations. Will loop ' + 'indefinitely upon nonpositive values.') + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + + dataset = data_generator.Dataset( + dataset_name=FLAGS.dataset, + split_name=FLAGS.eval_split, + dataset_dir=FLAGS.dataset_dir, + batch_size=FLAGS.eval_batch_size, + crop_size=[int(sz) for sz in FLAGS.eval_crop_size], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + model_variant=FLAGS.model_variant, + num_readers=2, + is_training=False, + should_shuffle=False, + should_repeat=False) + + tf.gfile.MakeDirs(FLAGS.eval_logdir) + tf.logging.info('Evaluating on %s set', FLAGS.eval_split) + + with tf.Graph().as_default(): + samples = dataset.get_one_shot_iterator().get_next() + + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes}, + crop_size=[int(sz) for sz in FLAGS.eval_crop_size], + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + # Set shape in order for tf.contrib.tfprof.model_analyzer to work properly. + samples[common.IMAGE].set_shape( + [FLAGS.eval_batch_size, + int(FLAGS.eval_crop_size[0]), + int(FLAGS.eval_crop_size[1]), + 3]) + if tuple(FLAGS.eval_scales) == (1.0,): + tf.logging.info('Performing single-scale test.') + predictions = model.predict_labels(samples[common.IMAGE], model_options, + image_pyramid=FLAGS.image_pyramid) + else: + tf.logging.info('Performing multi-scale test.') + if FLAGS.quantize_delay_step >= 0: + raise ValueError( + 'Quantize mode is not supported with multi-scale test.') + + predictions = model.predict_labels_multi_scale( + samples[common.IMAGE], + model_options=model_options, + eval_scales=FLAGS.eval_scales, + add_flipped_images=FLAGS.add_flipped_images) + predictions = predictions[common.OUTPUT_TYPE] + predictions = tf.reshape(predictions, shape=[-1]) + labels = tf.reshape(samples[common.LABEL], shape=[-1]) + weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label)) + + # Set ignore_label regions to label 0, because metrics.mean_iou requires + # range of labels = [0, dataset.num_classes). Note the ignore_label regions + # are not evaluated since the corresponding regions contain weights = 0. + labels = tf.where( + tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels) + + predictions_tag = 'miou' + for eval_scale in FLAGS.eval_scales: + predictions_tag += '_' + str(eval_scale) + if FLAGS.add_flipped_images: + predictions_tag += '_flipped' + + # Define the evaluation metric. + metric_map = {} + num_classes = dataset.num_of_classes + metric_map['eval/%s_overall' % predictions_tag] = tf.metrics.mean_iou( + labels=labels, predictions=predictions, num_classes=num_classes, + weights=weights) + # IoU for each class. + one_hot_predictions = tf.one_hot(predictions, num_classes) + one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes]) + one_hot_labels = tf.one_hot(labels, num_classes) + one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes]) + for c in range(num_classes): + predictions_tag_c = '%s_class_%d' % (predictions_tag, c) + tp, tp_op = tf.metrics.true_positives( + labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c], + weights=weights) + fp, fp_op = tf.metrics.false_positives( + labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c], + weights=weights) + fn, fn_op = tf.metrics.false_negatives( + labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c], + weights=weights) + tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op) + iou = tf.where(tf.greater(tp + fn, 0.0), + tp / (tp + fn + fp), + tf.constant(np.NaN)) + metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op) + + (metrics_to_values, + metrics_to_updates) = contrib_metrics.aggregate_metric_map(metric_map) + + summary_ops = [] + for metric_name, metric_value in six.iteritems(metrics_to_values): + op = tf.summary.scalar(metric_name, metric_value) + op = tf.Print(op, [metric_value], metric_name) + summary_ops.append(op) + + summary_op = tf.summary.merge(summary_ops) + summary_hook = contrib_training.SummaryAtEndHook( + log_dir=FLAGS.eval_logdir, summary_op=summary_op) + hooks = [summary_hook] + + num_eval_iters = None + if FLAGS.max_number_of_evaluations > 0: + num_eval_iters = FLAGS.max_number_of_evaluations + + if FLAGS.quantize_delay_step >= 0: + contrib_quantize.create_eval_graph() + + contrib_tfprof.model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=contrib_tfprof.model_analyzer + .TRAINABLE_VARS_PARAMS_STAT_OPTIONS) + contrib_tfprof.model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS) + contrib_training.evaluate_repeatedly( + checkpoint_dir=FLAGS.checkpoint_dir, + master=FLAGS.master, + eval_ops=list(metrics_to_updates.values()), + max_number_of_evaluations=num_eval_iters, + hooks=hooks, + eval_interval_secs=FLAGS.eval_interval_secs) + + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_dir') + flags.mark_flag_as_required('eval_logdir') + flags.mark_flag_as_required('dataset_dir') + tf.app.run() diff --git a/models/research/deeplab/evaluation/README.md b/models/research/deeplab/evaluation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..69255384e9a293e7acada74b40bd4288ba121edb --- /dev/null +++ b/models/research/deeplab/evaluation/README.md @@ -0,0 +1,311 @@ +# Evaluation Metrics for Whole Image Parsing + +Whole Image Parsing [1], also known as Panoptic Segmentation [2], generalizes +the tasks of semantic segmentation for "stuff" classes and instance +segmentation for "thing" classes, assigning both semantic and instance labels +to every pixel in an image. + +Previous works evaluate the parsing result with separate metrics (e.g., one for +semantic segmentation result and one for object detection result). Recently, +Kirillov et al. propose the unified instance-based Panoptic Quality (PQ) metric +[2] into several benchmarks [3, 4]. + +However, we notice that the instance-based PQ metric often places +disproportionate emphasis on small instance parsing, as well as on "thing" over +"stuff" classes. To remedy these effects, we propose an alternative +region-based Parsing Covering (PC) metric [5], which adapts the Covering +metric [6], previously used for class-agnostics segmentation quality +evaluation, to the task of image parsing. + +Here, we provide implementation of both PQ and PC for evaluating the parsing +results. We briefly explain both metrics below for reference. + +## Panoptic Quality (PQ) + +Given a groundtruth segmentation S and a predicted segmentation S', PQ is +defined as follows: + +

+ +

+ +where R and R' are groundtruth regions and predicted regions respectively, +and |TP|, |FP|, and |FN| are the number of true positives, false postives, +and false negatives. The matching is determined by a threshold of 0.5 +Intersection-Over-Union (IOU). + +PQ treats all regions of the same ‘stuff‘ class as one instance, and the +size of instances is not considered. For example, instances with 10 × 10 +pixels contribute equally to the metric as instances with 1000 × 1000 pixels. +Therefore, PQ is sensitive to false positives with small regions and some +heuristics could improve the performance, such as removing those small +regions (as also pointed out in the open-sourced evaluation code from [2]). +Thus, we argue that PQ is suitable in applications where one cares equally for +the parsing quality of instances irrespective of their sizes. + +## Parsing Covering (PC) + +We notice that there are applications where one pays more attention to large +objects, e.g., autonomous driving (where nearby objects are more important +than far away ones). Motivated by this, we propose to also evaluate the +quality of image parsing results by extending the existing Covering metric [5], +which accounts for instance sizes. Specifically, our proposed metric, Parsing +Covering (PC), is defined as follows: + +

+ +

+ + +where Si and Si' are the groundtruth segmentation and +predicted segmentation for the i-th semantic class respectively, and +Ni is the total number of pixels of groundtruth regions from +Si . The Covering for class i, Covi , is computed in +the same way as the original Covering metric except that only groundtruth +regions from Si and predicted regions from Si' are +considered. PC is then obtained by computing the average of Covi +over C semantic classes. + +A notable difference between PQ and the proposed PC is that there is no +matching involved in PC and hence no matching threshold. As an attempt to +treat equally "thing" and "stuff", the segmentation of "stuff" classes still +receives partial PC score if the segmentation is only partially correct. For +example, if one out of three equally-sized trees is perfectly segmented, the +model will get the same partial score by using PC regardless of considering +"tree" as "stuff" or "thing". + +## Tutorial + +To evaluate the parsing results with PQ and PC, we provide two options: + +1. Python off-line evaluation with results saved in the [COCO format](http://cocodataset.org/#format-results). +2. TensorFlow on-line evaluation. + +Below, we explain each option in detail. + +#### 1. Python off-line evaluation with results saved in COCO format + +[COCO result format](http://cocodataset.org/#format-results) has been +adopted by several benchmarks [3, 4]. Therefore, we provide a convenient +function, `eval_coco_format`, to evaluate the results saved in COCO format +in terms of PC and re-implemented PQ. + +Before using the provided function, the users need to download the official COCO +panotpic segmentation task API. Please see [installation](../g3doc/installation.md#add-libraries-to-pythonpath) +for reference. + +Once the official COCO panoptic segmentation task API is downloaded, the +users should be able to run the `eval_coco_format.py` to evaluate the parsing +results in terms of both PC and reimplemented PQ. + +To be concrete, let's take a look at the function, `eval_coco_format` in +`eval_coco_format.py`: + +```python +eval_coco_format(gt_json_file, + pred_json_file, + gt_folder=None, + pred_folder=None, + metric='pq', + num_categories=201, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=None, + normalize_by_image_size=True, + num_workers=0, + print_digits=3): + +``` +where + +1. `gt_json_file`: Path to a JSON file giving ground-truth annotations in COCO +format. +2. `pred_json_file`: Path to a JSON file for the predictions to evaluate. +3. `gt_folder`: Folder containing panoptic-format ID images to match +ground-truth annotations to image regions. +4. `pred_folder`: Path to a folder containing ID images for predictions. +5. `metric`: Name of a metric to compute. Set to `pc`, `pq` for evaluation in PC +or PQ, respectively. +6. `num_categories`: The number of segmentation categories (or "classes") in the +dataset. +7. `ignored_label`: A category id that is ignored in evaluation, e.g. the "void" +label in COCO panoptic segmentation dataset. +8. `max_instances_per_category`: The maximum number of instances for each +category to ensure unique instance labels. +9. `intersection_offset`: The maximum number of unique labels. +10. `normalize_by_image_size`: Whether to normalize groundtruth instance region +areas by image size when using PC. +11. `num_workers`: If set to a positive number, will spawn child processes to +compute parts of the metric in parallel by splitting the images between the +workers. If set to -1, will use the value of multiprocessing.cpu_count(). +12. `print_digits`: Number of significant digits to print in summary of computed +metrics. + +The input arguments have default values set for the COCO panoptic segmentation +dataset. Thus, users only need to provide the `gt_json_file` and the +`pred_json_file` (following the COCO format) to run the evaluation on COCO with +PQ. If users want to evaluate the results on other datasets, they may need +to change the default values. + +As an example, the interested users could take a look at the provided unit +test, `test_compare_pq_with_reference_eval`, in `eval_coco_format_test.py`. + +#### 2. TensorFlow on-line evaluation + +Users may also want to run the TensorFlow on-line evaluation, similar to the +[tf.contrib.metrics.streaming_mean_iou](https://www.tensorflow.org/api_docs/python/tf/contrib/metrics/streaming_mean_iou). + +Below, we provide a code snippet that shows how to use the provided +`streaming_panoptic_quality` and `streaming_parsing_covering`. + +```python +metric_map = {} +metric_map['panoptic_quality'] = streaming_metrics.streaming_panoptic_quality( + category_label, + instance_label, + category_prediction, + instance_prediction, + num_classes=201, + max_instances_per_category=256, + ignored_label=0, + offset=256*256) +metric_map['parsing_covering'] = streaming_metrics.streaming_parsing_covering( + category_label, + instance_label, + category_prediction, + instance_prediction, + num_classes=201, + max_instances_per_category=256, + ignored_label=0, + offset=256*256, + normalize_by_image_size=True) +metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map( + metric_map) +``` +where `metric_map` is a dictionary storing the streamed results of PQ and PC. + +The `category_label` and the `instance_label` are the semantic segmentation and +instance segmentation groundtruth, respectively. That is, in the panoptic +segmentation format: +panoptic_label = category_label * max_instances_per_category + instance_label. +Similarly, the `category_prediction` and the `instance_prediction` are the +predicted semantic segmentation and instance segmentation, respectively. + +Below, we provide a code snippet about how to summarize the results in the +context of tf.summary. + +```python +summary_ops = [] +for metric_name, metric_value in metrics_to_values.iteritems(): + if metric_name == 'panoptic_quality': + [pq, sq, rq, total_tp, total_fn, total_fp] = tf.unstack( + metric_value, 6, axis=0) + panoptic_metrics = { + # Panoptic quality. + 'pq': pq, + # Segmentation quality. + 'sq': sq, + # Recognition quality. + 'rq': rq, + # Total true positives. + 'total_tp': total_tp, + # Total false negatives. + 'total_fn': total_fn, + # Total false positives. + 'total_fp': total_fp, + } + # Find the valid classes that will be used for evaluation. We will + # ignore the `ignore_label` class and other classes which have (tp + fn + # + fp) equal to 0. + valid_classes = tf.logical_and( + tf.not_equal(tf.range(0, num_classes), void_label), + tf.not_equal(total_tp + total_fn + total_fp, 0)) + for target_metric, target_value in panoptic_metrics.iteritems(): + output_metric_name = '{}_{}'.format(metric_name, target_metric) + op = tf.summary.scalar( + output_metric_name, + tf.reduce_mean(tf.boolean_mask(target_value, valid_classes))) + op = tf.Print(op, [target_value], output_metric_name + '_classwise: ', + summarize=num_classes) + op = tf.Print( + op, + [tf.reduce_mean(tf.boolean_mask(target_value, valid_classes))], + output_metric_name + '_mean: ', + summarize=1) + summary_ops.append(op) + elif metric_name == 'parsing_covering': + [per_class_covering, + total_per_class_weighted_ious, + total_per_class_gt_areas] = tf.unstack(metric_value, 3, axis=0) + # Find the valid classes that will be used for evaluation. We will + # ignore the `void_label` class and other classes which have + # total_per_class_weighted_ious + total_per_class_gt_areas equal to 0. + valid_classes = tf.logical_and( + tf.not_equal(tf.range(0, num_classes), void_label), + tf.not_equal( + total_per_class_weighted_ious + total_per_class_gt_areas, 0)) + op = tf.summary.scalar( + metric_name, + tf.reduce_mean(tf.boolean_mask(per_class_covering, valid_classes))) + op = tf.Print(op, [per_class_covering], metric_name + '_classwise: ', + summarize=num_classes) + op = tf.Print( + op, + [tf.reduce_mean( + tf.boolean_mask(per_class_covering, valid_classes))], + metric_name + '_mean: ', + summarize=1) + summary_ops.append(op) + else: + raise ValueError('The metric_name "%s" is not supported.' % metric_name) +``` + +Afterwards, the users could use the following code to run the evaluation in +TensorFlow. + +Users can take a look at eval.py for reference which provides a simple +example to run the streaming evaluation of mIOU for semantic segmentation. + +```python +metric_values = slim.evaluation.evaluation_loop( + master=FLAGS.master, + checkpoint_dir=FLAGS.checkpoint_dir, + logdir=FLAGS.eval_logdir, + num_evals=num_batches, + eval_op=metrics_to_updates.values(), + final_op=metrics_to_values.values(), + summary_op=tf.summary.merge(summary_ops), + max_number_of_evaluations=FLAGS.max_number_of_evaluations, + eval_interval_secs=FLAGS.eval_interval_secs) +``` + + +### References + +1. **Image Parsing: Unifying Segmentation, Detection, and Recognition**
+ Zhuowen Tu, Xiangrong Chen, Alan L. Yuille, and Song-Chun Zhu
+ IJCV, 2005. + +2. **Panoptic Segmentation**
+ Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother and Piotr + Dollár
+ arXiv:1801.00868, 2018. + +3. **Microsoft COCO: Common Objects in Context**
+ Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross + Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, + Piotr Dollar
+ In the Proc. of ECCV, 2014. + +4. **The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes**
+ Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulò, and Peter Kontschieder
+ In the Proc. of ICCV, 2017. + +5. **DeeperLab: Single-Shot Image Parser**
+ Tien-Ju Yang, Maxwell D. Collins, Yukun Zhu, Jyh-Jing Hwang, Ting Liu, + Xiao Zhang, Vivienne Sze, George Papandreou, Liang-Chieh Chen
+ arXiv: 1902.05093, 2019. + +6. **Contour Detection and Hierarchical Image Segmentation**
+ Pablo Arbelaez, Michael Maire, Charless Fowlkes, and Jitendra Malik
+ PAMI, 2011 diff --git a/models/research/deeplab/evaluation/__init__.py b/models/research/deeplab/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deeplab/evaluation/base_metric.py b/models/research/deeplab/evaluation/base_metric.py new file mode 100644 index 0000000000000000000000000000000000000000..ee7606ef44c1c2c027e593c494659f0dbcd455d3 --- /dev/null +++ b/models/research/deeplab/evaluation/base_metric.py @@ -0,0 +1,191 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines the top-level interface for evaluating segmentations.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import numpy as np +import six + + +_EPSILON = 1e-10 + + +def realdiv_maybe_zero(x, y): + """Element-wise x / y where y may contain zeros, for those returns 0 too.""" + return np.where( + np.less(np.abs(y), _EPSILON), np.zeros_like(x), np.divide(x, y)) + + +@six.add_metaclass(abc.ABCMeta) +class SegmentationMetric(object): + """Abstract base class for computers of segmentation metrics. + + Subclasses will implement both: + 1. Comparing the predicted segmentation for an image with the groundtruth. + 2. Computing the final metric over a set of images. + These are often done as separate steps, due to the need to accumulate + intermediate values other than the metric itself across images, computing the + actual metric value only on these accumulations after all the images have been + compared. + + A simple usage would be: + + metric = MetricImplementation(...) + for , in evaluation_set: + = run_segmentation() + metric.compare_and_accumulate(, ) + print(metric.result()) + + """ + + def __init__(self, num_categories, ignored_label, max_instances_per_category, + offset): + """Base initialization for SegmentationMetric. + + Args: + num_categories: The number of segmentation categories (or "classes" in the + dataset. + ignored_label: A category id that is ignored in evaluation, e.g. the void + label as defined in COCO panoptic segmentation dataset. + max_instances_per_category: The maximum number of instances for each + category. Used in ensuring unique instance labels. + offset: The maximum number of unique labels. This is used, by multiplying + the ground-truth labels, to generate unique ids for individual regions + of overlap between groundtruth and predicted segments. + """ + self.num_categories = num_categories + self.ignored_label = ignored_label + self.max_instances_per_category = max_instances_per_category + self.offset = offset + self.reset() + + def _naively_combine_labels(self, category_array, instance_array): + """Naively creates a combined label array from categories and instances.""" + return (category_array.astype(np.uint32) * self.max_instances_per_category + + instance_array.astype(np.uint32)) + + @abc.abstractmethod + def compare_and_accumulate( + self, groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array): + """Compares predicted segmentation with groundtruth, accumulates its metric. + + It is not assumed that instance ids are unique across different categories. + See for example combine_semantic_and_instance_predictions.py in official + PanopticAPI evaluation code for issues to consider when fusing category + and instance labels. + + Instances ids of the ignored category have the meaning that id 0 is "void" + and remaining ones are crowd instances. + + Args: + groundtruth_category_array: A 2D numpy uint16 array of groundtruth + per-pixel category labels. + groundtruth_instance_array: A 2D numpy uint16 array of groundtruth + instance labels. + predicted_category_array: A 2D numpy uint16 array of predicted per-pixel + category labels. + predicted_instance_array: A 2D numpy uint16 array of predicted instance + labels. + + Returns: + The value of the metric over all comparisons done so far, including this + one, as a float scalar. + """ + raise NotImplementedError('Must be implemented in subclasses.') + + @abc.abstractmethod + def result(self): + """Computes the metric over all comparisons done so far.""" + raise NotImplementedError('Must be implemented in subclasses.') + + @abc.abstractmethod + def detailed_results(self, is_thing=None): + """Computes and returns the detailed final metric results. + + Args: + is_thing: A boolean array of length `num_categories`. The entry + `is_thing[category_id]` is True iff that category is a "thing" category + instead of "stuff." + + Returns: + A dictionary with a breakdown of metrics and/or metric factors by things, + stuff, and all categories. + """ + raise NotImplementedError('Not implemented in subclasses.') + + @abc.abstractmethod + def result_per_category(self): + """For supported metrics, return individual per-category metric values. + + Returns: + A numpy array of shape `[self.num_categories]`, where index `i` is the + metrics value over only that category. + """ + raise NotImplementedError('Not implemented in subclass.') + + def print_detailed_results(self, is_thing=None, print_digits=3): + """Prints out a detailed breakdown of metric results. + + Args: + is_thing: A boolean array of length num_categories. + `is_thing[category_id]` will say whether that category is a "thing" + rather than "stuff." + print_digits: Number of significant digits to print in computed metrics. + """ + raise NotImplementedError('Not implemented in subclass.') + + @abc.abstractmethod + def merge(self, other_instance): + """Combines the accumulated results of another instance into self. + + The following two cases should put `metric_a` into an equivalent state. + + Case 1 (with merge): + + metric_a = MetricsSubclass(...) + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + + metric_b = MetricsSubclass(...) + metric_b.compare_and_accumulate() + metric_b.compare_and_accumulate() + + metric_a.merge(metric_b) + + Case 2 (without merge): + + metric_a = MetricsSubclass(...) + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + metric_a.compare_and_accumulate() + + Args: + other_instance: Another compatible instance of the same metric subclass. + """ + raise NotImplementedError('Not implemented in subclass.') + + @abc.abstractmethod + def reset(self): + """Resets the accumulation to the metric class's state at initialization. + + Note that this function will be called in SegmentationMetric.__init__. + """ + raise NotImplementedError('Must be implemented in subclasses.') diff --git a/models/research/deeplab/evaluation/eval_coco_format.py b/models/research/deeplab/evaluation/eval_coco_format.py new file mode 100644 index 0000000000000000000000000000000000000000..1a26446f16b9787f246034a58247eb36d0064f80 --- /dev/null +++ b/models/research/deeplab/evaluation/eval_coco_format.py @@ -0,0 +1,338 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Computes evaluation metrics on groundtruth and predictions in COCO format. + +The Common Objects in Context (COCO) dataset defines a format for specifying +combined semantic and instance segmentations as "panoptic" segmentations. This +is done with the combination of JSON and image files as specified at: +http://cocodataset.org/#format-results +where the JSON file specifies the overall structure of the result, +including the categories for each annotation, and the images specify the image +region for each annotation in that image by its ID. + +This script computes additional metrics such as Parsing Covering on datasets and +predictions in this format. An implementation of Panoptic Quality is also +provided for convenience. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import json +import multiprocessing +import os + +from absl import app +from absl import flags +from absl import logging +import numpy as np +from PIL import Image +import utils as panopticapi_utils +import six + +from deeplab.evaluation import panoptic_quality +from deeplab.evaluation import parsing_covering + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'gt_json_file', None, + ' Path to a JSON file giving ground-truth annotations in COCO format.') +flags.DEFINE_string('pred_json_file', None, + 'Path to a JSON file for the predictions to evaluate.') +flags.DEFINE_string( + 'gt_folder', None, + 'Folder containing panoptic-format ID images to match ground-truth ' + 'annotations to image regions.') +flags.DEFINE_string('pred_folder', None, + 'Folder containing ID images for predictions.') +flags.DEFINE_enum( + 'metric', 'pq', ['pq', 'pc'], 'Shorthand name of a metric to compute. ' + 'Supported values are:\n' + 'Panoptic Quality (pq)\n' + 'Parsing Covering (pc)') +flags.DEFINE_integer( + 'num_categories', 201, + 'The number of segmentation categories (or "classes") in the dataset.') +flags.DEFINE_integer( + 'ignored_label', 0, + 'A category id that is ignored in evaluation, e.g. the void label as ' + 'defined in COCO panoptic segmentation dataset.') +flags.DEFINE_integer( + 'max_instances_per_category', 256, + 'The maximum number of instances for each category. Used in ensuring ' + 'unique instance labels.') +flags.DEFINE_integer('intersection_offset', None, + 'The maximum number of unique labels.') +flags.DEFINE_bool( + 'normalize_by_image_size', True, + 'Whether to normalize groundtruth instance region areas by image size. If ' + 'True, groundtruth instance areas and weighted IoUs will be divided by the ' + 'size of the corresponding image before accumulated across the dataset. ' + 'Only used for Parsing Covering (pc) evaluation.') +flags.DEFINE_integer( + 'num_workers', 0, 'If set to a positive number, will spawn child processes ' + 'to compute parts of the metric in parallel by splitting ' + 'the images between the workers. If set to -1, will use ' + 'the value of multiprocessing.cpu_count().') +flags.DEFINE_integer('print_digits', 3, + 'Number of significant digits to print in metrics.') + + +def _build_metric(metric, + num_categories, + ignored_label, + max_instances_per_category, + intersection_offset=None, + normalize_by_image_size=True): + """Creates a metric aggregator objet of the given name.""" + if metric == 'pq': + logging.warning('One should check Panoptic Quality results against the ' + 'official COCO API code. Small numerical differences ' + '(< 0.1%) can be magnified by rounding.') + return panoptic_quality.PanopticQuality(num_categories, ignored_label, + max_instances_per_category, + intersection_offset) + elif metric == 'pc': + return parsing_covering.ParsingCovering( + num_categories, ignored_label, max_instances_per_category, + intersection_offset, normalize_by_image_size) + else: + raise ValueError('No implementation for metric "%s"' % metric) + + +def _matched_annotations(gt_json, pred_json): + """Yields a set of (groundtruth, prediction) image annotation pairs..""" + image_id_to_pred_ann = { + annotation['image_id']: annotation + for annotation in pred_json['annotations'] + } + for gt_ann in gt_json['annotations']: + image_id = gt_ann['image_id'] + pred_ann = image_id_to_pred_ann[image_id] + yield gt_ann, pred_ann + + +def _open_panoptic_id_image(image_path): + """Loads a COCO-format panoptic ID image from file.""" + return panopticapi_utils.rgb2id( + np.array(Image.open(image_path), dtype=np.uint32)) + + +def _split_panoptic(ann_json, id_array, ignored_label, allow_crowds): + """Given the COCO JSON and ID map, splits into categories and instances.""" + category = np.zeros(id_array.shape, np.uint16) + instance = np.zeros(id_array.shape, np.uint16) + next_instance_id = collections.defaultdict(int) + # Skip instance label 0 for ignored label. That is reserved for void. + next_instance_id[ignored_label] = 1 + for segment_info in ann_json['segments_info']: + if allow_crowds and segment_info['iscrowd']: + category_id = ignored_label + else: + category_id = segment_info['category_id'] + mask = np.equal(id_array, segment_info['id']) + category[mask] = category_id + instance[mask] = next_instance_id[category_id] + next_instance_id[category_id] += 1 + return category, instance + + +def _category_and_instance_from_annotation(ann_json, folder, ignored_label, + allow_crowds): + """Given the COCO JSON annotations, finds maps of categories and instances.""" + panoptic_id_image = _open_panoptic_id_image( + os.path.join(folder, ann_json['file_name'])) + return _split_panoptic(ann_json, panoptic_id_image, ignored_label, + allow_crowds) + + +def _compute_metric(metric_aggregator, gt_folder, pred_folder, + annotation_pairs): + """Iterates over matched annotation pairs and computes a metric over them.""" + for gt_ann, pred_ann in annotation_pairs: + # We only expect "iscrowd" to appear in the ground-truth, and not in model + # output. In predicted JSON it is simply ignored, as done in official code. + gt_category, gt_instance = _category_and_instance_from_annotation( + gt_ann, gt_folder, metric_aggregator.ignored_label, True) + pred_category, pred_instance = _category_and_instance_from_annotation( + pred_ann, pred_folder, metric_aggregator.ignored_label, False) + + metric_aggregator.compare_and_accumulate(gt_category, gt_instance, + pred_category, pred_instance) + return metric_aggregator + + +def _iterate_work_queue(work_queue): + """Creates an iterable that retrieves items from a queue until one is None.""" + task = work_queue.get(block=True) + while task is not None: + yield task + task = work_queue.get(block=True) + + +def _run_metrics_worker(metric_aggregator, gt_folder, pred_folder, work_queue, + result_queue): + result = _compute_metric(metric_aggregator, gt_folder, pred_folder, + _iterate_work_queue(work_queue)) + result_queue.put(result, block=True) + + +def _is_thing_array(categories_json, ignored_label): + """is_thing[category_id] is a bool on if category is "thing" or "stuff".""" + is_thing_dict = {} + for category_json in categories_json: + is_thing_dict[category_json['id']] = bool(category_json['isthing']) + + # Check our assumption that the category ids are consecutive. + # Usually metrics should be able to handle this case, but adding a warning + # here. + max_category_id = max(six.iterkeys(is_thing_dict)) + if len(is_thing_dict) != max_category_id + 1: + seen_ids = six.viewkeys(is_thing_dict) + all_ids = set(six.moves.range(max_category_id + 1)) + unseen_ids = all_ids.difference(seen_ids) + if unseen_ids != {ignored_label}: + logging.warning( + 'Nonconsecutive category ids or no category JSON specified for ids: ' + '%s', unseen_ids) + + is_thing_array = np.zeros(max_category_id + 1) + for category_id, is_thing in six.iteritems(is_thing_dict): + is_thing_array[category_id] = is_thing + + return is_thing_array + + +def eval_coco_format(gt_json_file, + pred_json_file, + gt_folder=None, + pred_folder=None, + metric='pq', + num_categories=201, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=None, + normalize_by_image_size=True, + num_workers=0, + print_digits=3): + """Top-level code to compute metrics on a COCO-format result. + + Note that the default values are set for COCO panoptic segmentation dataset, + and thus the users may want to change it for their own dataset evaluation. + + Args: + gt_json_file: Path to a JSON file giving ground-truth annotations in COCO + format. + pred_json_file: Path to a JSON file for the predictions to evaluate. + gt_folder: Folder containing panoptic-format ID images to match ground-truth + annotations to image regions. + pred_folder: Folder containing ID images for predictions. + metric: Name of a metric to compute. + num_categories: The number of segmentation categories (or "classes") in the + dataset. + ignored_label: A category id that is ignored in evaluation, e.g. the "void" + label as defined in the COCO panoptic segmentation dataset. + max_instances_per_category: The maximum number of instances for each + category. Used in ensuring unique instance labels. + intersection_offset: The maximum number of unique labels. + normalize_by_image_size: Whether to normalize groundtruth instance region + areas by image size. If True, groundtruth instance areas and weighted IoUs + will be divided by the size of the corresponding image before accumulated + across the dataset. Only used for Parsing Covering (pc) evaluation. + num_workers: If set to a positive number, will spawn child processes to + compute parts of the metric in parallel by splitting the images between + the workers. If set to -1, will use the value of + multiprocessing.cpu_count(). + print_digits: Number of significant digits to print in summary of computed + metrics. + + Returns: + The computed result of the metric as a float scalar. + """ + with open(gt_json_file, 'r') as gt_json_fo: + gt_json = json.load(gt_json_fo) + with open(pred_json_file, 'r') as pred_json_fo: + pred_json = json.load(pred_json_fo) + if gt_folder is None: + gt_folder = gt_json_file.replace('.json', '') + if pred_folder is None: + pred_folder = pred_json_file.replace('.json', '') + if intersection_offset is None: + intersection_offset = (num_categories + 1) * max_instances_per_category + + metric_aggregator = _build_metric( + metric, num_categories, ignored_label, max_instances_per_category, + intersection_offset, normalize_by_image_size) + + if num_workers == -1: + logging.info('Attempting to get the CPU count to set # workers.') + num_workers = multiprocessing.cpu_count() + + if num_workers > 0: + logging.info('Computing metric in parallel with %d workers.', num_workers) + work_queue = multiprocessing.Queue() + result_queue = multiprocessing.Queue() + workers = [] + worker_args = (metric_aggregator, gt_folder, pred_folder, work_queue, + result_queue) + for _ in six.moves.range(num_workers): + workers.append( + multiprocessing.Process(target=_run_metrics_worker, args=worker_args)) + for worker in workers: + worker.start() + for ann_pair in _matched_annotations(gt_json, pred_json): + work_queue.put(ann_pair, block=True) + + # Will cause each worker to return a result and terminate upon recieving a + # None task. + for _ in six.moves.range(num_workers): + work_queue.put(None, block=True) + + # Retrieve results. + for _ in six.moves.range(num_workers): + metric_aggregator.merge(result_queue.get(block=True)) + + for worker in workers: + worker.join() + else: + logging.info('Computing metric in a single process.') + annotation_pairs = _matched_annotations(gt_json, pred_json) + _compute_metric(metric_aggregator, gt_folder, pred_folder, annotation_pairs) + + is_thing = _is_thing_array(gt_json['categories'], ignored_label) + metric_aggregator.print_detailed_results( + is_thing=is_thing, print_digits=print_digits) + return metric_aggregator.detailed_results(is_thing=is_thing) + + +def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + eval_coco_format(FLAGS.gt_json_file, FLAGS.pred_json_file, FLAGS.gt_folder, + FLAGS.pred_folder, FLAGS.metric, FLAGS.num_categories, + FLAGS.ignored_label, FLAGS.max_instances_per_category, + FLAGS.intersection_offset, FLAGS.normalize_by_image_size, + FLAGS.num_workers, FLAGS.print_digits) + + +if __name__ == '__main__': + flags.mark_flags_as_required( + ['gt_json_file', 'gt_folder', 'pred_json_file', 'pred_folder']) + app.run(main) diff --git a/models/research/deeplab/evaluation/eval_coco_format_test.py b/models/research/deeplab/evaluation/eval_coco_format_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d9093ff127e5dce27775421b9d136fc7cbc27c77 --- /dev/null +++ b/models/research/deeplab/evaluation/eval_coco_format_test.py @@ -0,0 +1,140 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for eval_coco_format script.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl.testing import absltest +import evaluation as panopticapi_eval + +from deeplab.evaluation import eval_coco_format + +_TEST_DIR = 'deeplab/evaluation/testdata' + +FLAGS = flags.FLAGS + + +class EvalCocoFormatTest(absltest.TestCase): + + def test_compare_pq_with_reference_eval(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + panopticapi_results = panopticapi_eval.pq_compute( + gt_json_file, pred_json_file, gt_folder, pred_folder) + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pq', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256)) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + for cat_group in ['All', 'Things', 'Stuff']: + self.assertCountEqual(deeplab_results[cat_group], ['pq', 'sq', 'rq', 'n']) + for metric in ['pq', 'sq', 'rq', 'n']: + self.assertAlmostEqual(deeplab_results[cat_group][metric], + panopticapi_results[cat_group][metric]) + + def test_compare_pc_with_golden_value(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pc', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256), + normalize_by_image_size=False) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + for cat_group in ['All', 'Things', 'Stuff']: + self.assertCountEqual(deeplab_results[cat_group], ['pc', 'n']) + self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68210561) + self.assertEqual(deeplab_results['All']['n'], 6) + self.assertAlmostEqual(deeplab_results['Things']['pc'], 0.5890529) + self.assertEqual(deeplab_results['Things']['n'], 4) + self.assertAlmostEqual(deeplab_results['Stuff']['pc'], 0.86821097) + self.assertEqual(deeplab_results['Stuff']['n'], 2) + + def test_compare_pc_with_golden_value_normalize_by_size(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pc', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256), + normalize_by_image_size=True) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68214908840) + + def test_pc_with_multiple_workers(self): + sample_data_dir = os.path.join(_TEST_DIR) + gt_json_file = os.path.join(sample_data_dir, 'coco_gt.json') + gt_folder = os.path.join(sample_data_dir, 'coco_gt') + pred_json_file = os.path.join(sample_data_dir, 'coco_pred.json') + pred_folder = os.path.join(sample_data_dir, 'coco_pred') + + deeplab_results = eval_coco_format.eval_coco_format( + gt_json_file, + pred_json_file, + gt_folder, + pred_folder, + metric='pc', + num_categories=7, + ignored_label=0, + max_instances_per_category=256, + intersection_offset=(256 * 256), + num_workers=3, + normalize_by_image_size=False) + self.assertCountEqual( + list(deeplab_results.keys()), ['All', 'Things', 'Stuff']) + self.assertAlmostEqual(deeplab_results['All']['pc'], 0.68210561668) + + +if __name__ == '__main__': + absltest.main() diff --git a/models/research/deeplab/evaluation/g3doc/img/equation_pc.png b/models/research/deeplab/evaluation/g3doc/img/equation_pc.png new file mode 100644 index 0000000000000000000000000000000000000000..90f15e7a461f929db9774f2c3c7e9dca549f433b Binary files /dev/null and b/models/research/deeplab/evaluation/g3doc/img/equation_pc.png differ diff --git a/models/research/deeplab/evaluation/g3doc/img/equation_pq.png b/models/research/deeplab/evaluation/g3doc/img/equation_pq.png new file mode 100644 index 0000000000000000000000000000000000000000..13a4393c181f27f5eb9be43f47e9cb132b28b924 Binary files /dev/null and b/models/research/deeplab/evaluation/g3doc/img/equation_pq.png differ diff --git a/models/research/deeplab/evaluation/panoptic_quality.py b/models/research/deeplab/evaluation/panoptic_quality.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d0f3f98f09819feda52bc89069333665ff5d94 --- /dev/null +++ b/models/research/deeplab/evaluation/panoptic_quality.py @@ -0,0 +1,259 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of the Panoptic Quality metric. + +Panoptic Quality is an instance-based metric for evaluating the task of +image parsing, aka panoptic segmentation. + +Please see the paper for details: +"Panoptic Segmentation", Alexander Kirillov, Kaiming He, Ross Girshick, +Carsten Rother and Piotr Dollar. arXiv:1801.00868, 2018. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import prettytable +import six + +from deeplab.evaluation import base_metric + + +def _ids_to_counts(id_array): + """Given a numpy array, a mapping from each unique entry to its count.""" + ids, counts = np.unique(id_array, return_counts=True) + return dict(six.moves.zip(ids, counts)) + + +class PanopticQuality(base_metric.SegmentationMetric): + """Metric class for Panoptic Quality. + + "Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick, + Carsten Rother, Piotr Dollar. + https://arxiv.org/abs/1801.00868 + """ + + def compare_and_accumulate( + self, groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array): + """See base class.""" + # First, combine the category and instance labels so that every unique + # value for (category, instance) is assigned a unique integer label. + pred_segment_id = self._naively_combine_labels(predicted_category_array, + predicted_instance_array) + gt_segment_id = self._naively_combine_labels(groundtruth_category_array, + groundtruth_instance_array) + + # Pre-calculate areas for all groundtruth and predicted segments. + gt_segment_areas = _ids_to_counts(gt_segment_id) + pred_segment_areas = _ids_to_counts(pred_segment_id) + + # We assume there is only one void segment and it has instance id = 0. + void_segment_id = self.ignored_label * self.max_instances_per_category + + # There may be other ignored groundtruth segments with instance id > 0, find + # those ids using the unique segment ids extracted with the area computation + # above. + ignored_segment_ids = { + gt_segment_id for gt_segment_id in six.iterkeys(gt_segment_areas) + if (gt_segment_id // + self.max_instances_per_category) == self.ignored_label + } + + # Next, combine the groundtruth and predicted labels. Dividing up the pixels + # based on which groundtruth segment and which predicted segment they belong + # to, this will assign a different 32-bit integer label to each choice + # of (groundtruth segment, predicted segment), encoded as + # gt_segment_id * offset + pred_segment_id. + intersection_id_array = ( + gt_segment_id.astype(np.uint32) * self.offset + + pred_segment_id.astype(np.uint32)) + + # For every combination of (groundtruth segment, predicted segment) with a + # non-empty intersection, this counts the number of pixels in that + # intersection. + intersection_areas = _ids_to_counts(intersection_id_array) + + # Helper function that computes the area of the overlap between a predicted + # segment and the ground-truth void/ignored segment. + def prediction_void_overlap(pred_segment_id): + void_intersection_id = void_segment_id * self.offset + pred_segment_id + return intersection_areas.get(void_intersection_id, 0) + + # Compute overall ignored overlap. + def prediction_ignored_overlap(pred_segment_id): + total_ignored_overlap = 0 + for ignored_segment_id in ignored_segment_ids: + intersection_id = ignored_segment_id * self.offset + pred_segment_id + total_ignored_overlap += intersection_areas.get(intersection_id, 0) + return total_ignored_overlap + + # Sets that are populated with which segments groundtruth/predicted segments + # have been matched with overlapping predicted/groundtruth segments + # respectively. + gt_matched = set() + pred_matched = set() + + # Calculate IoU per pair of intersecting segments of the same category. + for intersection_id, intersection_area in six.iteritems(intersection_areas): + gt_segment_id = intersection_id // self.offset + pred_segment_id = intersection_id % self.offset + + gt_category = gt_segment_id // self.max_instances_per_category + pred_category = pred_segment_id // self.max_instances_per_category + if gt_category != pred_category: + continue + + # Union between the groundtruth and predicted segments being compared does + # not include the portion of the predicted segment that consists of + # groundtruth "void" pixels. + union = ( + gt_segment_areas[gt_segment_id] + + pred_segment_areas[pred_segment_id] - intersection_area - + prediction_void_overlap(pred_segment_id)) + iou = intersection_area / union + if iou > 0.5: + self.tp_per_class[gt_category] += 1 + self.iou_per_class[gt_category] += iou + gt_matched.add(gt_segment_id) + pred_matched.add(pred_segment_id) + + # Count false negatives for each category. + for gt_segment_id in six.iterkeys(gt_segment_areas): + if gt_segment_id in gt_matched: + continue + category = gt_segment_id // self.max_instances_per_category + # Failing to detect a void segment is not a false negative. + if category == self.ignored_label: + continue + self.fn_per_class[category] += 1 + + # Count false positives for each category. + for pred_segment_id in six.iterkeys(pred_segment_areas): + if pred_segment_id in pred_matched: + continue + # A false positive is not penalized if is mostly ignored in the + # groundtruth. + if (prediction_ignored_overlap(pred_segment_id) / + pred_segment_areas[pred_segment_id]) > 0.5: + continue + category = pred_segment_id // self.max_instances_per_category + self.fp_per_class[category] += 1 + + return self.result() + + def _valid_categories(self): + """Categories with a "valid" value for the metric, have > 0 instances. + + We will ignore the `ignore_label` class and other classes which have + `tp + fn + fp = 0`. + + Returns: + Boolean array of shape `[num_categories]`. + """ + valid_categories = np.not_equal( + self.tp_per_class + self.fn_per_class + self.fp_per_class, 0) + if self.ignored_label >= 0 and self.ignored_label < self.num_categories: + valid_categories[self.ignored_label] = False + return valid_categories + + def detailed_results(self, is_thing=None): + """See base class.""" + valid_categories = self._valid_categories() + + # If known, break down which categories are valid _and_ things/stuff. + category_sets = collections.OrderedDict() + category_sets['All'] = valid_categories + if is_thing is not None: + category_sets['Things'] = np.logical_and(valid_categories, is_thing) + category_sets['Stuff'] = np.logical_and(valid_categories, + np.logical_not(is_thing)) + + # Compute individual per-class metrics that constitute factors of PQ. + sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class) + rq = base_metric.realdiv_maybe_zero( + self.tp_per_class, + self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class) + pq = np.multiply(sq, rq) + + # Assemble detailed results dictionary. + results = {} + for category_set_name, in_category_set in six.iteritems(category_sets): + if np.any(in_category_set): + results[category_set_name] = { + 'pq': np.mean(pq[in_category_set]), + 'sq': np.mean(sq[in_category_set]), + 'rq': np.mean(rq[in_category_set]), + # The number of categories in this subset. + 'n': np.sum(in_category_set.astype(np.int32)), + } + else: + results[category_set_name] = {'pq': 0, 'sq': 0, 'rq': 0, 'n': 0} + + return results + + def result_per_category(self): + """See base class.""" + sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class) + rq = base_metric.realdiv_maybe_zero( + self.tp_per_class, + self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class) + return np.multiply(sq, rq) + + def print_detailed_results(self, is_thing=None, print_digits=3): + """See base class.""" + results = self.detailed_results(is_thing=is_thing) + + tab = prettytable.PrettyTable() + + tab.add_column('', [], align='l') + for fieldname in ['PQ', 'SQ', 'RQ', 'N']: + tab.add_column(fieldname, [], align='r') + + for category_set, subset_results in six.iteritems(results): + data_cols = [ + round(subset_results[col_key], print_digits) * 100 + for col_key in ['pq', 'sq', 'rq'] + ] + data_cols += [subset_results['n']] + tab.add_row([category_set] + data_cols) + + print(tab) + + def result(self): + """See base class.""" + pq_per_class = self.result_per_category() + valid_categories = self._valid_categories() + if not np.any(valid_categories): + return 0. + return np.mean(pq_per_class[valid_categories]) + + def merge(self, other_instance): + """See base class.""" + self.iou_per_class += other_instance.iou_per_class + self.tp_per_class += other_instance.tp_per_class + self.fn_per_class += other_instance.fn_per_class + self.fp_per_class += other_instance.fp_per_class + + def reset(self): + """See base class.""" + self.iou_per_class = np.zeros(self.num_categories, dtype=np.float64) + self.tp_per_class = np.zeros(self.num_categories, dtype=np.float64) + self.fn_per_class = np.zeros(self.num_categories, dtype=np.float64) + self.fp_per_class = np.zeros(self.num_categories, dtype=np.float64) diff --git a/models/research/deeplab/evaluation/panoptic_quality_test.py b/models/research/deeplab/evaluation/panoptic_quality_test.py new file mode 100644 index 0000000000000000000000000000000000000000..00c88c293b8edc39b7ceb28b2fc7fea4da1c3cb0 --- /dev/null +++ b/models/research/deeplab/evaluation/panoptic_quality_test.py @@ -0,0 +1,336 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Panoptic Quality metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from absl.testing import absltest +import numpy as np +import six + +from deeplab.evaluation import panoptic_quality +from deeplab.evaluation import test_utils + +# See the definition of the color names at: +# https://en.wikipedia.org/wiki/Web_colors. +_CLASS_COLOR_MAP = { + (0, 0, 0): 0, + (0, 0, 255): 1, # Person (blue). + (255, 0, 0): 2, # Bear (red). + (0, 255, 0): 3, # Tree (lime). + (255, 0, 255): 4, # Bird (fuchsia). + (0, 255, 255): 5, # Sky (aqua). + (255, 255, 0): 6, # Cat (yellow). +} + + +class PanopticQualityTest(absltest.TestCase): + + def test_perfect_match(self): + categories = np.zeros([6, 6], np.uint16) + instances = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 1, 1, 1], + [1, 2, 1, 1, 1, 1], + ], + dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + pq.compare_and_accumulate(categories, instances, categories, instances) + np.testing.assert_array_equal(pq.iou_per_class, [2.0]) + np.testing.assert_array_equal(pq.tp_per_class, [2]) + np.testing.assert_array_equal(pq.fn_per_class, [0]) + np.testing.assert_array_equal(pq.fp_per_class, [0]) + np.testing.assert_array_equal(pq.result_per_category(), [1.0]) + self.assertEqual(pq.result(), 1.0) + + def test_totally_wrong(self): + det_categories = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + gt_categories = 1 - det_categories + instances = np.zeros([6, 6], np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=2, + ignored_label=2, + max_instances_per_category=1, + offset=16) + pq.compare_and_accumulate(gt_categories, instances, det_categories, + instances) + np.testing.assert_array_equal(pq.iou_per_class, [0.0, 0.0]) + np.testing.assert_array_equal(pq.tp_per_class, [0, 0]) + np.testing.assert_array_equal(pq.fn_per_class, [1, 1]) + np.testing.assert_array_equal(pq.fp_per_class, [1, 1]) + np.testing.assert_array_equal(pq.result_per_category(), [0.0, 0.0]) + self.assertEqual(pq.result(), 0.0) + + def test_matches_by_iou(self): + good_det_labels = np.array( + [ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + gt_labels = np.array( + [ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + pq.compare_and_accumulate( + np.zeros_like(gt_labels), gt_labels, np.zeros_like(good_det_labels), + good_det_labels) + + # iou(1, 1) = 28/30 + # iou(2, 2) = 6/8 + np.testing.assert_array_almost_equal(pq.iou_per_class, [28 / 30 + 6 / 8]) + np.testing.assert_array_equal(pq.tp_per_class, [2]) + np.testing.assert_array_equal(pq.fn_per_class, [0]) + np.testing.assert_array_equal(pq.fp_per_class, [0]) + self.assertAlmostEqual(pq.result(), (28 / 30 + 6 / 8) / 2) + + bad_det_labels = np.array( + [ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + pq.reset() + pq.compare_and_accumulate( + np.zeros_like(gt_labels), gt_labels, np.zeros_like(bad_det_labels), + bad_det_labels) + + # iou(1, 1) = 27/32 + np.testing.assert_array_almost_equal(pq.iou_per_class, [27 / 32]) + np.testing.assert_array_equal(pq.tp_per_class, [1]) + np.testing.assert_array_equal(pq.fn_per_class, [1]) + np.testing.assert_array_equal(pq.fp_per_class, [1]) + self.assertAlmostEqual(pq.result(), (27 / 32) * (1 / 2)) + + def test_wrong_instances(self): + categories = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 1, 2, 2], + [1, 2, 2, 1, 2, 2], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + predicted_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + groundtruth_instances = np.zeros([6, 6], dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=3, + ignored_label=0, + max_instances_per_category=10, + offset=100) + pq.compare_and_accumulate(categories, groundtruth_instances, categories, + predicted_instances) + + np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 0.0]) + np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 0]) + np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 1]) + np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 2]) + np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 0]) + self.assertAlmostEqual(pq.result(), 0.5) + + def test_instance_order_is_arbitrary(self): + categories = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 1, 2, 2], + [1, 2, 2, 1, 2, 2], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + predicted_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + groundtruth_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + + pq = panoptic_quality.PanopticQuality( + num_categories=3, + ignored_label=0, + max_instances_per_category=10, + offset=100) + pq.compare_and_accumulate(categories, groundtruth_instances, categories, + predicted_instances) + + np.testing.assert_array_equal(pq.iou_per_class, [0.0, 1.0, 2.0]) + np.testing.assert_array_equal(pq.tp_per_class, [0, 1, 2]) + np.testing.assert_array_equal(pq.fn_per_class, [0, 0, 0]) + np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0]) + np.testing.assert_array_equal(pq.result_per_category(), [0, 1, 1]) + self.assertAlmostEqual(pq.result(), 1.0) + + def test_matches_expected(self): + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pq = panoptic_quality.PanopticQuality( + num_categories=3, + ignored_label=0, + max_instances_per_category=256, + offset=256 * 256) + pq.compare_and_accumulate(gt_classes, gt_instances, pred_classes, + pred_instances) + np.testing.assert_array_almost_equal( + pq.iou_per_class, [2.06104, 5.26827, 0.54069], decimal=4) + np.testing.assert_array_equal(pq.tp_per_class, [1, 7, 1]) + np.testing.assert_array_equal(pq.fn_per_class, [0, 1, 0]) + np.testing.assert_array_equal(pq.fp_per_class, [0, 0, 0]) + np.testing.assert_array_almost_equal(pq.result_per_category(), + [2.061038, 0.702436, 0.54069]) + self.assertAlmostEqual(pq.result(), 0.62156287) + + def test_merge_accumulates_all_across_instances(self): + categories = np.zeros([6, 6], np.uint16) + good_det_labels = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 2, 2, 2, 2, 1], + [1, 2, 2, 2, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + gt_labels = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 2, 2, 2, 1], + [1, 2, 2, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + good_pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + for _ in six.moves.range(2): + good_pq.compare_and_accumulate(categories, gt_labels, categories, + good_det_labels) + + bad_det_labels = np.array([ + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 2, 2, 1], + [1, 1, 1, 1, 1, 1], + ], + dtype=np.uint16) + + bad_pq = panoptic_quality.PanopticQuality( + num_categories=1, + ignored_label=2, + max_instances_per_category=16, + offset=16) + for _ in six.moves.range(2): + bad_pq.compare_and_accumulate(categories, gt_labels, categories, + bad_det_labels) + + good_pq.merge(bad_pq) + + np.testing.assert_array_almost_equal( + good_pq.iou_per_class, [2 * (28 / 30 + 6 / 8) + 2 * (27 / 32)]) + np.testing.assert_array_equal(good_pq.tp_per_class, [2 * 2 + 2]) + np.testing.assert_array_equal(good_pq.fn_per_class, [2]) + np.testing.assert_array_equal(good_pq.fp_per_class, [2]) + self.assertAlmostEqual(good_pq.result(), 0.63177083) + + +if __name__ == '__main__': + absltest.main() diff --git a/models/research/deeplab/evaluation/parsing_covering.py b/models/research/deeplab/evaluation/parsing_covering.py new file mode 100644 index 0000000000000000000000000000000000000000..a40e55fc6be7a7563ceba75db17b188244dad832 --- /dev/null +++ b/models/research/deeplab/evaluation/parsing_covering.py @@ -0,0 +1,246 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of the Parsing Covering metric. + +Parsing Covering is a region-based metric for evaluating the task of +image parsing, aka panoptic segmentation. + +Please see the paper for details: +"DeeperLab: Single-Shot Image Parser", Tien-Ju Yang, Maxwell D. Collins, +Yukun Zhu, Jyh-Jing Hwang, Ting Liu, Xiao Zhang, Vivienne Sze, +George Papandreou, Liang-Chieh Chen. arXiv: 1902.05093, 2019. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import numpy as np +import prettytable +import six + +from deeplab.evaluation import base_metric + + +class ParsingCovering(base_metric.SegmentationMetric): + r"""Metric class for Parsing Covering. + + Computes segmentation covering metric introduced in (Arbelaez, et al., 2010) + with extension to handle multi-class semantic labels (a.k.a. parsing + covering). Specifically, segmentation covering (SC) is defined in Eq. (8) in + (Arbelaez et al., 2010) as: + + SC(c) = \sum_{R\in S}(|R| * \max_{R'\in S'}O(R,R')) / \sum_{R\in S}|R|, + + where S are the groundtruth instance regions and S' are the predicted + instance regions. The parsing covering is simply: + + PC = \sum_{c=1}^{C}SC(c) / C, + + where C is the number of classes. + """ + + def __init__(self, + num_categories, + ignored_label, + max_instances_per_category, + offset, + normalize_by_image_size=True): + """Initialization for ParsingCovering. + + Args: + num_categories: The number of segmentation categories (or "classes" in the + dataset. + ignored_label: A category id that is ignored in evaluation, e.g. the void + label as defined in COCO panoptic segmentation dataset. + max_instances_per_category: The maximum number of instances for each + category. Used in ensuring unique instance labels. + offset: The maximum number of unique labels. This is used, by multiplying + the ground-truth labels, to generate unique ids for individual regions + of overlap between groundtruth and predicted segments. + normalize_by_image_size: Whether to normalize groundtruth instance region + areas by image size. If True, groundtruth instance areas and weighted + IoUs will be divided by the size of the corresponding image before + accumulated across the dataset. + """ + super(ParsingCovering, self).__init__(num_categories, ignored_label, + max_instances_per_category, offset) + self.normalize_by_image_size = normalize_by_image_size + + def compare_and_accumulate( + self, groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array): + """See base class.""" + # Allocate intermediate data structures. + max_ious = np.zeros([self.num_categories, self.max_instances_per_category], + dtype=np.float64) + gt_areas = np.zeros([self.num_categories, self.max_instances_per_category], + dtype=np.float64) + pred_areas = np.zeros( + [self.num_categories, self.max_instances_per_category], + dtype=np.float64) + # This is a dictionary in the format: + # {(category, gt_instance): [(pred_instance, intersection_area)]}. + intersections = collections.defaultdict(list) + + # First, combine the category and instance labels so that every unique + # value for (category, instance) is assigned a unique integer label. + pred_segment_id = self._naively_combine_labels(predicted_category_array, + predicted_instance_array) + gt_segment_id = self._naively_combine_labels(groundtruth_category_array, + groundtruth_instance_array) + + # Next, combine the groundtruth and predicted labels. Dividing up the pixels + # based on which groundtruth segment and which predicted segment they belong + # to, this will assign a different 32-bit integer label to each choice + # of (groundtruth segment, predicted segment), encoded as + # gt_segment_id * offset + pred_segment_id. + intersection_id_array = ( + gt_segment_id.astype(np.uint32) * self.offset + + pred_segment_id.astype(np.uint32)) + + # For every combination of (groundtruth segment, predicted segment) with a + # non-empty intersection, this counts the number of pixels in that + # intersection. + intersection_ids, intersection_areas = np.unique( + intersection_id_array, return_counts=True) + + # Find areas of all groundtruth and predicted instances, as well as of their + # intersections. + for intersection_id, intersection_area in six.moves.zip( + intersection_ids, intersection_areas): + gt_segment_id = intersection_id // self.offset + gt_category = gt_segment_id // self.max_instances_per_category + if gt_category == self.ignored_label: + continue + gt_instance = gt_segment_id % self.max_instances_per_category + gt_areas[gt_category, gt_instance] += intersection_area + + pred_segment_id = intersection_id % self.offset + pred_category = pred_segment_id // self.max_instances_per_category + pred_instance = pred_segment_id % self.max_instances_per_category + pred_areas[pred_category, pred_instance] += intersection_area + if pred_category != gt_category: + continue + + intersections[gt_category, gt_instance].append((pred_instance, + intersection_area)) + + # Find maximum IoU for every groundtruth instance. + for gt_label, instance_intersections in six.iteritems(intersections): + category, gt_instance = gt_label + gt_area = gt_areas[category, gt_instance] + ious = [] + for pred_instance, intersection_area in instance_intersections: + pred_area = pred_areas[category, pred_instance] + union = gt_area + pred_area - intersection_area + ious.append(intersection_area / union) + max_ious[category, gt_instance] = max(ious) + + # Normalize groundtruth instance areas by image size if necessary. + if self.normalize_by_image_size: + gt_areas /= groundtruth_category_array.size + + # Compute per-class weighted IoUs and areas summed over all groundtruth + # instances. + self.weighted_iou_per_class += np.sum(max_ious * gt_areas, axis=-1) + self.gt_area_per_class += np.sum(gt_areas, axis=-1) + + return self.result() + + def result_per_category(self): + """See base class.""" + return base_metric.realdiv_maybe_zero(self.weighted_iou_per_class, + self.gt_area_per_class) + + def _valid_categories(self): + """Categories with a "valid" value for the metric, have > 0 instances. + + We will ignore the `ignore_label` class and other classes which have + groundtruth area of 0. + + Returns: + Boolean array of shape `[num_categories]`. + """ + valid_categories = np.not_equal(self.gt_area_per_class, 0) + if self.ignored_label >= 0 and self.ignored_label < self.num_categories: + valid_categories[self.ignored_label] = False + return valid_categories + + def detailed_results(self, is_thing=None): + """See base class.""" + valid_categories = self._valid_categories() + + # If known, break down which categories are valid _and_ things/stuff. + category_sets = collections.OrderedDict() + category_sets['All'] = valid_categories + if is_thing is not None: + category_sets['Things'] = np.logical_and(valid_categories, is_thing) + category_sets['Stuff'] = np.logical_and(valid_categories, + np.logical_not(is_thing)) + + covering_per_class = self.result_per_category() + results = {} + for category_set_name, in_category_set in six.iteritems(category_sets): + if np.any(in_category_set): + results[category_set_name] = { + 'pc': np.mean(covering_per_class[in_category_set]), + # The number of valid categories in this subset. + 'n': np.sum(in_category_set.astype(np.int32)), + } + else: + results[category_set_name] = {'pc': 0, 'n': 0} + + return results + + def print_detailed_results(self, is_thing=None, print_digits=3): + """See base class.""" + results = self.detailed_results(is_thing=is_thing) + + tab = prettytable.PrettyTable() + + tab.add_column('', [], align='l') + for fieldname in ['PC', 'N']: + tab.add_column(fieldname, [], align='r') + + for category_set, subset_results in six.iteritems(results): + data_cols = [ + round(subset_results['pc'], print_digits) * 100, subset_results['n'] + ] + tab.add_row([category_set] + data_cols) + + print(tab) + + def result(self): + """See base class.""" + covering_per_class = self.result_per_category() + valid_categories = self._valid_categories() + if not np.any(valid_categories): + return 0. + return np.mean(covering_per_class[valid_categories]) + + def merge(self, other_instance): + """See base class.""" + self.weighted_iou_per_class += other_instance.weighted_iou_per_class + self.gt_area_per_class += other_instance.gt_area_per_class + + def reset(self): + """See base class.""" + self.weighted_iou_per_class = np.zeros( + self.num_categories, dtype=np.float64) + self.gt_area_per_class = np.zeros(self.num_categories, dtype=np.float64) diff --git a/models/research/deeplab/evaluation/parsing_covering_test.py b/models/research/deeplab/evaluation/parsing_covering_test.py new file mode 100644 index 0000000000000000000000000000000000000000..124d1b372559ef672ea1c9f821eac3fec52c97ea --- /dev/null +++ b/models/research/deeplab/evaluation/parsing_covering_test.py @@ -0,0 +1,173 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Parsing Covering metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from absl.testing import absltest +import numpy as np + +from deeplab.evaluation import parsing_covering +from deeplab.evaluation import test_utils + +# See the definition of the color names at: +# https://en.wikipedia.org/wiki/Web_colors. +_CLASS_COLOR_MAP = { + (0, 0, 0): 0, + (0, 0, 255): 1, # Person (blue). + (255, 0, 0): 2, # Bear (red). + (0, 255, 0): 3, # Tree (lime). + (255, 0, 255): 4, # Bird (fuchsia). + (0, 255, 255): 5, # Sky (aqua). + (255, 255, 0): 6, # Cat (yellow). +} + + +class CoveringConveringTest(absltest.TestCase): + + def test_perfect_match(self): + categories = np.zeros([6, 6], np.uint16) + instances = np.array([ + [2, 2, 2, 2, 2, 2], + [2, 4, 4, 4, 4, 2], + [2, 4, 4, 4, 4, 2], + [2, 4, 4, 4, 4, 2], + [2, 4, 4, 2, 2, 2], + [2, 4, 2, 2, 2, 2], + ], + dtype=np.uint16) + + pc = parsing_covering.ParsingCovering( + num_categories=3, + ignored_label=2, + max_instances_per_category=2, + offset=16, + normalize_by_image_size=False) + pc.compare_and_accumulate(categories, instances, categories, instances) + np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 21.0, 0.0]) + np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 21.0, 0.0]) + np.testing.assert_array_equal(pc.result_per_category(), [0.0, 1.0, 0.0]) + self.assertEqual(pc.result(), 1.0) + + def test_totally_wrong(self): + categories = np.zeros([6, 6], np.uint16) + gt_instances = np.array([ + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ], + dtype=np.uint16) + pred_instances = 1 - gt_instances + + pc = parsing_covering.ParsingCovering( + num_categories=2, + ignored_label=0, + max_instances_per_category=1, + offset=16, + normalize_by_image_size=False) + pc.compare_and_accumulate(categories, gt_instances, categories, + pred_instances) + np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 0.0]) + np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 10.0]) + np.testing.assert_array_equal(pc.result_per_category(), [0.0, 0.0]) + self.assertEqual(pc.result(), 0.0) + + def test_matches_expected(self): + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pc = parsing_covering.ParsingCovering( + num_categories=3, + ignored_label=0, + max_instances_per_category=256, + offset=256 * 256, + normalize_by_image_size=False) + pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes, + pred_instances) + np.testing.assert_array_almost_equal( + pc.weighted_iou_per_class, [0.0, 39864.14634, 3136], decimal=4) + np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 56870, 5800]) + np.testing.assert_array_almost_equal( + pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4) + self.assertAlmostEqual(pc.result(), 0.6208296732) + + def test_matches_expected_normalize_by_size(self): + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pc = parsing_covering.ParsingCovering( + num_categories=3, + ignored_label=0, + max_instances_per_category=256, + offset=256 * 256, + normalize_by_image_size=True) + pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes, + pred_instances) + np.testing.assert_array_almost_equal( + pc.weighted_iou_per_class, [0.0, 0.5002088756, 0.03935002196], + decimal=4) + np.testing.assert_array_almost_equal( + pc.gt_area_per_class, [0.0, 0.7135955832, 0.07277746408], decimal=4) + # Note that the per-category and overall PCs are identical to those without + # normalization in the previous test, because we only have a single image. + np.testing.assert_array_almost_equal( + pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4) + self.assertAlmostEqual(pc.result(), 0.6208296732) + + +if __name__ == '__main__': + absltest.main() diff --git a/models/research/deeplab/evaluation/streaming_metrics.py b/models/research/deeplab/evaluation/streaming_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..8313792676a62e58af70300a0cfa43528a904435 --- /dev/null +++ b/models/research/deeplab/evaluation/streaming_metrics.py @@ -0,0 +1,240 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Code to compute segmentation in a "streaming" pattern in Tensorflow. + +These aggregate the metric over examples of the evaluation set. Each example is +assumed to be fed in in a stream, and the metric implementation accumulates +across them. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from deeplab.evaluation import panoptic_quality +from deeplab.evaluation import parsing_covering + +_EPSILON = 1e-10 + + +def _realdiv_maybe_zero(x, y): + """Support tf.realdiv(x, y) where y may contain zeros.""" + return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y)) + + +def _running_total(value, shape, name=None): + """Maintains a running total of tensor `value` between calls.""" + with tf.variable_scope(name, 'running_total', [value]): + total_var = tf.get_variable( + 'total', + shape, + value.dtype, + initializer=tf.zeros_initializer(), + trainable=False, + collections=[ + tf.GraphKeys.LOCAL_VARIABLES, tf.GraphKeys.METRIC_VARIABLES + ]) + updated_total = tf.assign_add(total_var, value, use_locking=True) + + return total_var, updated_total + + +def _panoptic_quality_helper( + groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array, num_classes, + max_instances_per_category, ignored_label, offset): + """Helper function to compute panoptic quality.""" + pq = panoptic_quality.PanopticQuality(num_classes, ignored_label, + max_instances_per_category, offset) + pq.compare_and_accumulate(groundtruth_category_array, + groundtruth_instance_array, + predicted_category_array, predicted_instance_array) + return pq.iou_per_class, pq.tp_per_class, pq.fn_per_class, pq.fp_per_class + + +def streaming_panoptic_quality(groundtruth_categories, + groundtruth_instances, + predicted_categories, + predicted_instances, + num_classes, + max_instances_per_category, + ignored_label, + offset, + name=None): + """Aggregates the panoptic metric across calls with different input tensors. + + See tf.metrics.* functions for comparable functionality and usage. + + Args: + groundtruth_categories: A 2D uint16 tensor of groundtruth category labels. + groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels. + predicted_categories: A 2D uint16 tensor of predicted category labels. + predicted_instances: A 2D uint16 tensor of predicted instance labels. + num_classes: Number of classes in the dataset as an integer. + max_instances_per_category: The maximum number of instances for each class + as an integer or integer tensor. + ignored_label: The class id to be ignored in evaluation as an integer or + integer tensor. + offset: The maximum number of unique labels as an integer or integer tensor. + name: An optional variable_scope name. + + Returns: + qualities: A tensor of shape `[6, num_classes]`, where (1) panoptic quality, + (2) segmentation quality, (3) recognition quality, (4) total_tp, + (5) total_fn and (6) total_fp are saved in the respective rows. + update_ops: List of operations that update the running overall panoptic + quality. + + Raises: + RuntimeError: If eager execution is enabled. + """ + if tf.executing_eagerly(): + raise RuntimeError('Cannot aggregate when eager execution is enabled.') + + input_args = [ + tf.convert_to_tensor(groundtruth_categories, tf.uint16), + tf.convert_to_tensor(groundtruth_instances, tf.uint16), + tf.convert_to_tensor(predicted_categories, tf.uint16), + tf.convert_to_tensor(predicted_instances, tf.uint16), + tf.convert_to_tensor(num_classes, tf.int32), + tf.convert_to_tensor(max_instances_per_category, tf.int32), + tf.convert_to_tensor(ignored_label, tf.int32), + tf.convert_to_tensor(offset, tf.int32), + ] + return_types = [ + tf.float64, + tf.float64, + tf.float64, + tf.float64, + ] + with tf.variable_scope(name, 'streaming_panoptic_quality', input_args): + panoptic_results = tf.py_func( + _panoptic_quality_helper, input_args, return_types, stateful=False) + iou, tp, fn, fp = tuple(panoptic_results) + + total_iou, updated_iou = _running_total( + iou, [num_classes], name='iou_total') + total_tp, updated_tp = _running_total(tp, [num_classes], name='tp_total') + total_fn, updated_fn = _running_total(fn, [num_classes], name='fn_total') + total_fp, updated_fp = _running_total(fp, [num_classes], name='fp_total') + update_ops = [updated_iou, updated_tp, updated_fn, updated_fp] + + sq = _realdiv_maybe_zero(total_iou, total_tp) + rq = _realdiv_maybe_zero(total_tp, + total_tp + 0.5 * total_fn + 0.5 * total_fp) + pq = tf.multiply(sq, rq) + qualities = tf.stack([pq, sq, rq, total_tp, total_fn, total_fp], axis=0) + return qualities, update_ops + + +def _parsing_covering_helper( + groundtruth_category_array, groundtruth_instance_array, + predicted_category_array, predicted_instance_array, num_classes, + max_instances_per_category, ignored_label, offset, normalize_by_image_size): + """Helper function to compute parsing covering.""" + pc = parsing_covering.ParsingCovering(num_classes, ignored_label, + max_instances_per_category, offset, + normalize_by_image_size) + pc.compare_and_accumulate(groundtruth_category_array, + groundtruth_instance_array, + predicted_category_array, predicted_instance_array) + return pc.weighted_iou_per_class, pc.gt_area_per_class + + +def streaming_parsing_covering(groundtruth_categories, + groundtruth_instances, + predicted_categories, + predicted_instances, + num_classes, + max_instances_per_category, + ignored_label, + offset, + normalize_by_image_size=True, + name=None): + """Aggregates the covering across calls with different input tensors. + + See tf.metrics.* functions for comparable functionality and usage. + + Args: + groundtruth_categories: A 2D uint16 tensor of groundtruth category labels. + groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels. + predicted_categories: A 2D uint16 tensor of predicted category labels. + predicted_instances: A 2D uint16 tensor of predicted instance labels. + num_classes: Number of classes in the dataset as an integer. + max_instances_per_category: The maximum number of instances for each class + as an integer or integer tensor. + ignored_label: The class id to be ignored in evaluation as an integer or + integer tensor. + offset: The maximum number of unique labels as an integer or integer tensor. + normalize_by_image_size: Whether to normalize groundtruth region areas by + image size. If True, groundtruth instance areas and weighted IoUs will be + divided by the size of the corresponding image before accumulated across + the dataset. + name: An optional variable_scope name. + + Returns: + coverings: A tensor of shape `[3, num_classes]`, where (1) per class + coverings, (2) per class sum of weighted IoUs, and (3) per class sum of + groundtruth region areas are saved in the perspective rows. + update_ops: List of operations that update the running overall parsing + covering. + + Raises: + RuntimeError: If eager execution is enabled. + """ + if tf.executing_eagerly(): + raise RuntimeError('Cannot aggregate when eager execution is enabled.') + + input_args = [ + tf.convert_to_tensor(groundtruth_categories, tf.uint16), + tf.convert_to_tensor(groundtruth_instances, tf.uint16), + tf.convert_to_tensor(predicted_categories, tf.uint16), + tf.convert_to_tensor(predicted_instances, tf.uint16), + tf.convert_to_tensor(num_classes, tf.int32), + tf.convert_to_tensor(max_instances_per_category, tf.int32), + tf.convert_to_tensor(ignored_label, tf.int32), + tf.convert_to_tensor(offset, tf.int32), + tf.convert_to_tensor(normalize_by_image_size, tf.bool), + ] + return_types = [ + tf.float64, + tf.float64, + ] + with tf.variable_scope(name, 'streaming_parsing_covering', input_args): + covering_results = tf.py_func( + _parsing_covering_helper, input_args, return_types, stateful=False) + weighted_iou_per_class, gt_area_per_class = tuple(covering_results) + + total_weighted_iou_per_class, updated_weighted_iou_per_class = ( + _running_total( + weighted_iou_per_class, [num_classes], + name='weighted_iou_per_class_total')) + total_gt_area_per_class, updated_gt_area_per_class = _running_total( + gt_area_per_class, [num_classes], name='gt_area_per_class_total') + + covering_per_class = _realdiv_maybe_zero(total_weighted_iou_per_class, + total_gt_area_per_class) + coverings = tf.stack([ + covering_per_class, + total_weighted_iou_per_class, + total_gt_area_per_class, + ], + axis=0) + update_ops = [updated_weighted_iou_per_class, updated_gt_area_per_class] + + return coverings, update_ops diff --git a/models/research/deeplab/evaluation/streaming_metrics_test.py b/models/research/deeplab/evaluation/streaming_metrics_test.py new file mode 100644 index 0000000000000000000000000000000000000000..656007e6238e5c106dd8eee08fe65e4ba7457801 --- /dev/null +++ b/models/research/deeplab/evaluation/streaming_metrics_test.py @@ -0,0 +1,549 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for segmentation "streaming" metrics.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + + + +import numpy as np +import six +import tensorflow as tf + +from deeplab.evaluation import streaming_metrics +from deeplab.evaluation import test_utils + +# See the definition of the color names at: +# https://en.wikipedia.org/wiki/Web_colors. +_CLASS_COLOR_MAP = { + (0, 0, 0): 0, + (0, 0, 255): 1, # Person (blue). + (255, 0, 0): 2, # Bear (red). + (0, 255, 0): 3, # Tree (lime). + (255, 0, 255): 4, # Bird (fuchsia). + (0, 255, 255): 5, # Sky (aqua). + (255, 255, 0): 6, # Cat (yellow). +} + + +class StreamingPanopticQualityTest(tf.test.TestCase): + + def test_streaming_metric_on_single_image(self): + offset = 256 * 256 + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + qualities, update_pq = streaming_metrics.streaming_panoptic_quality( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=3, + max_instances_per_category=256, + ignored_label=0, + offset=offset) + pq, sq, rq, total_tp, total_fn, total_fp = tf.unstack(qualities, 6, axis=0) + feed_dict = { + gt_class_tensor: gt_classes, + gt_instance_tensor: gt_instances, + pred_class_tensor: pred_classes, + pred_instance_tensor: pred_instances + } + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + sess.run(update_pq, feed_dict=feed_dict) + (result_pq, result_sq, result_rq, result_total_tp, result_total_fn, + result_total_fp) = sess.run([pq, sq, rq, total_tp, total_fn, total_fp], + feed_dict=feed_dict) + np.testing.assert_array_almost_equal( + result_pq, [2.06104, 0.7024, 0.54069], decimal=4) + np.testing.assert_array_almost_equal( + result_sq, [2.06104, 0.7526, 0.54069], decimal=4) + np.testing.assert_array_almost_equal(result_rq, [1., 0.9333, 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_tp, [1., 7., 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fn, [0., 1., 0.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fp, [0., 0., 0.], decimal=4) + + def test_streaming_metric_on_multiple_images(self): + num_classes = 7 + offset = 256 * 256 + + bird_gt_instance_class_map = { + 92: 5, + 176: 3, + 255: 4, + } + cat_gt_instance_class_map = { + 0: 0, + 255: 6, + } + team_gt_instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + test_image = collections.namedtuple( + 'TestImage', + ['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path']) + test_images = [ + test_image(bird_gt_instance_class_map, 'bird_gt.png', + 'bird_pred_instance.png', 'bird_pred_class.png'), + test_image(cat_gt_instance_class_map, 'cat_gt.png', + 'cat_pred_instance.png', 'cat_pred_class.png'), + test_image(team_gt_instance_class_map, 'team_gt_instance.png', + 'team_pred_instance.png', 'team_pred_class.png'), + ] + + gt_classes = [] + gt_instances = [] + pred_classes = [] + pred_instances = [] + for test_image in test_images: + (image_gt_instances, + image_gt_classes) = test_utils.panoptic_segmentation_with_class_map( + test_image.gt_path, test_image.gt_class_map) + gt_classes.append(image_gt_classes) + gt_instances.append(image_gt_instances) + + pred_classes.append( + test_utils.read_segmentation_with_rgb_color_map( + test_image.pred_class_path, _CLASS_COLOR_MAP)) + pred_instances.append( + test_utils.read_test_image(test_image.pred_inst_path, mode='L')) + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + qualities, update_pq = streaming_metrics.streaming_panoptic_quality( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=num_classes, + max_instances_per_category=256, + ignored_label=0, + offset=offset) + pq, sq, rq, total_tp, total_fn, total_fp = tf.unstack(qualities, 6, axis=0) + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip( + pred_classes, pred_instances, gt_classes, gt_instances): + sess.run( + update_pq, + feed_dict={ + gt_class_tensor: gt_class, + gt_instance_tensor: gt_instance, + pred_class_tensor: pred_class, + pred_instance_tensor: pred_instance + }) + (result_pq, result_sq, result_rq, result_total_tp, result_total_fn, + result_total_fp) = sess.run( + [pq, sq, rq, total_tp, total_fn, total_fp], + feed_dict={ + gt_class_tensor: 0, + gt_instance_tensor: 0, + pred_class_tensor: 0, + pred_instance_tensor: 0 + }) + np.testing.assert_array_almost_equal( + result_pq, + [4.3107, 0.7024, 0.54069, 0.745353, 0.85768, 0.99107, 0.77410], + decimal=4) + np.testing.assert_array_almost_equal( + result_sq, [5.3883, 0.7526, 0.5407, 0.7454, 0.8577, 0.9911, 0.7741], + decimal=4) + np.testing.assert_array_almost_equal( + result_rq, [0.8, 0.9333, 1., 1., 1., 1., 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_tp, [2., 7., 1., 1., 1., 1., 1.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fn, [0., 1., 0., 0., 0., 0., 0.], decimal=4) + np.testing.assert_array_almost_equal( + result_total_fp, [1., 0., 0., 0., 0., 0., 0.], decimal=4) + + +class StreamingParsingCoveringTest(tf.test.TestCase): + + def test_streaming_metric_on_single_image(self): + offset = 256 * 256 + + instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_class_map) + + pred_classes = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', _CLASS_COLOR_MAP) + pred_instances = test_utils.read_test_image( + 'team_pred_instance.png', mode='L') + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + coverings, update_ops = streaming_metrics.streaming_parsing_covering( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=3, + max_instances_per_category=256, + ignored_label=0, + offset=offset, + normalize_by_image_size=False) + (per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = ( + tf.unstack(coverings, num=3, axis=0)) + feed_dict = { + gt_class_tensor: gt_classes, + gt_instance_tensor: gt_instances, + pred_class_tensor: pred_classes, + pred_instance_tensor: pred_instances + } + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + sess.run(update_ops, feed_dict=feed_dict) + (result_per_class_coverings, result_per_class_weighted_ious, + result_per_class_gt_areas) = ( + sess.run([ + per_class_coverings, + per_class_weighted_ious, + per_class_gt_areas, + ], + feed_dict=feed_dict)) + + np.testing.assert_array_almost_equal( + result_per_class_coverings, [0.0, 0.7009696912, 0.5406896552], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_weighted_ious, [0.0, 39864.14634, 3136], decimal=4) + np.testing.assert_array_equal(result_per_class_gt_areas, [0, 56870, 5800]) + + def test_streaming_metric_on_multiple_images(self): + """Tests streaming parsing covering metric.""" + num_classes = 7 + offset = 256 * 256 + + bird_gt_instance_class_map = { + 92: 5, + 176: 3, + 255: 4, + } + cat_gt_instance_class_map = { + 0: 0, + 255: 6, + } + team_gt_instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + test_image = collections.namedtuple( + 'TestImage', + ['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path']) + test_images = [ + test_image(bird_gt_instance_class_map, 'bird_gt.png', + 'bird_pred_instance.png', 'bird_pred_class.png'), + test_image(cat_gt_instance_class_map, 'cat_gt.png', + 'cat_pred_instance.png', 'cat_pred_class.png'), + test_image(team_gt_instance_class_map, 'team_gt_instance.png', + 'team_pred_instance.png', 'team_pred_class.png'), + ] + + gt_classes = [] + gt_instances = [] + pred_classes = [] + pred_instances = [] + for test_image in test_images: + (image_gt_instances, + image_gt_classes) = test_utils.panoptic_segmentation_with_class_map( + test_image.gt_path, test_image.gt_class_map) + gt_classes.append(image_gt_classes) + gt_instances.append(image_gt_instances) + + pred_instances.append( + test_utils.read_test_image(test_image.pred_inst_path, mode='L')) + pred_classes.append( + test_utils.read_segmentation_with_rgb_color_map( + test_image.pred_class_path, _CLASS_COLOR_MAP)) + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + coverings, update_ops = streaming_metrics.streaming_parsing_covering( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=num_classes, + max_instances_per_category=256, + ignored_label=0, + offset=offset, + normalize_by_image_size=False) + (per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = ( + tf.unstack(coverings, num=3, axis=0)) + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip( + pred_classes, pred_instances, gt_classes, gt_instances): + sess.run( + update_ops, + feed_dict={ + gt_class_tensor: gt_class, + gt_instance_tensor: gt_instance, + pred_class_tensor: pred_class, + pred_instance_tensor: pred_instance + }) + (result_per_class_coverings, result_per_class_weighted_ious, + result_per_class_gt_areas) = ( + sess.run( + [ + per_class_coverings, + per_class_weighted_ious, + per_class_gt_areas, + ], + feed_dict={ + gt_class_tensor: 0, + gt_instance_tensor: 0, + pred_class_tensor: 0, + pred_instance_tensor: 0 + })) + + np.testing.assert_array_almost_equal( + result_per_class_coverings, [ + 0.0, + 0.7009696912, + 0.5406896552, + 0.7453531599, + 0.8576779026, + 0.9910687881, + 0.7741046032, + ], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_weighted_ious, [ + 0.0, + 39864.14634, + 3136, + 1177.657993, + 2498.41573, + 33366.31289, + 26671, + ], + decimal=4) + np.testing.assert_array_equal(result_per_class_gt_areas, [ + 0.0, + 56870, + 5800, + 1580, + 2913, + 33667, + 34454, + ]) + + def test_streaming_metric_on_multiple_images_normalize_by_size(self): + """Tests streaming parsing covering metric with image size normalization.""" + num_classes = 7 + offset = 256 * 256 + + bird_gt_instance_class_map = { + 92: 5, + 176: 3, + 255: 4, + } + cat_gt_instance_class_map = { + 0: 0, + 255: 6, + } + team_gt_instance_class_map = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 2, + 215: 1, + 244: 1, + 255: 1, + } + test_image = collections.namedtuple( + 'TestImage', + ['gt_class_map', 'gt_path', 'pred_inst_path', 'pred_class_path']) + test_images = [ + test_image(bird_gt_instance_class_map, 'bird_gt.png', + 'bird_pred_instance.png', 'bird_pred_class.png'), + test_image(cat_gt_instance_class_map, 'cat_gt.png', + 'cat_pred_instance.png', 'cat_pred_class.png'), + test_image(team_gt_instance_class_map, 'team_gt_instance.png', + 'team_pred_instance.png', 'team_pred_class.png'), + ] + + gt_classes = [] + gt_instances = [] + pred_classes = [] + pred_instances = [] + for test_image in test_images: + (image_gt_instances, + image_gt_classes) = test_utils.panoptic_segmentation_with_class_map( + test_image.gt_path, test_image.gt_class_map) + gt_classes.append(image_gt_classes) + gt_instances.append(image_gt_instances) + + pred_instances.append( + test_utils.read_test_image(test_image.pred_inst_path, mode='L')) + pred_classes.append( + test_utils.read_segmentation_with_rgb_color_map( + test_image.pred_class_path, _CLASS_COLOR_MAP)) + + gt_class_tensor = tf.placeholder(tf.uint16) + gt_instance_tensor = tf.placeholder(tf.uint16) + pred_class_tensor = tf.placeholder(tf.uint16) + pred_instance_tensor = tf.placeholder(tf.uint16) + coverings, update_ops = streaming_metrics.streaming_parsing_covering( + gt_class_tensor, + gt_instance_tensor, + pred_class_tensor, + pred_instance_tensor, + num_classes=num_classes, + max_instances_per_category=256, + ignored_label=0, + offset=offset, + normalize_by_image_size=True) + (per_class_coverings, per_class_weighted_ious, per_class_gt_areas) = ( + tf.unstack(coverings, num=3, axis=0)) + + with self.session() as sess: + sess.run(tf.local_variables_initializer()) + for pred_class, pred_instance, gt_class, gt_instance in six.moves.zip( + pred_classes, pred_instances, gt_classes, gt_instances): + sess.run( + update_ops, + feed_dict={ + gt_class_tensor: gt_class, + gt_instance_tensor: gt_instance, + pred_class_tensor: pred_class, + pred_instance_tensor: pred_instance + }) + (result_per_class_coverings, result_per_class_weighted_ious, + result_per_class_gt_areas) = ( + sess.run( + [ + per_class_coverings, + per_class_weighted_ious, + per_class_gt_areas, + ], + feed_dict={ + gt_class_tensor: 0, + gt_instance_tensor: 0, + pred_class_tensor: 0, + pred_instance_tensor: 0 + })) + + np.testing.assert_array_almost_equal( + result_per_class_coverings, [ + 0.0, + 0.7009696912, + 0.5406896552, + 0.7453531599, + 0.8576779026, + 0.9910687881, + 0.7741046032, + ], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_weighted_ious, [ + 0.0, + 0.5002088756, + 0.03935002196, + 0.03086105851, + 0.06547211033, + 0.8743792686, + 0.2549565051, + ], + decimal=4) + np.testing.assert_array_almost_equal( + result_per_class_gt_areas, [ + 0.0, + 0.7135955832, + 0.07277746408, + 0.04140461216, + 0.07633647799, + 0.8822589099, + 0.3293566581, + ], + decimal=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/evaluation/test_utils.py b/models/research/deeplab/evaluation/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad4f551271527ef1d1990398de5523b074d5779 --- /dev/null +++ b/models/research/deeplab/evaluation/test_utils.py @@ -0,0 +1,119 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions to set up unit tests on Panoptic Segmentation code.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + + + +from absl import flags +import numpy as np +import scipy.misc +import six +from six.moves import map + +FLAGS = flags.FLAGS + +_TEST_DIR = 'deeplab/evaluation/testdata' + + +def read_test_image(testdata_path, *args, **kwargs): + """Loads a test image. + + Args: + testdata_path: Image path relative to panoptic_segmentation/testdata as a + string. + *args: Additional positional arguments passed to `imread`. + **kwargs: Additional keyword arguments passed to `imread`. + + Returns: + The image, as a numpy array. + """ + image_path = os.path.join(_TEST_DIR, testdata_path) + return scipy.misc.imread(image_path, *args, **kwargs) + + +def read_segmentation_with_rgb_color_map(image_testdata_path, + rgb_to_semantic_label, + output_dtype=None): + """Reads a test segmentation as an image and a map from colors to labels. + + Args: + image_testdata_path: Image path relative to panoptic_segmentation/testdata + as a string. + rgb_to_semantic_label: Mapping from RGB colors to integer labels as a + dictionary. + output_dtype: Type of the output labels. If None, defaults to the type of + the provided color map. + + Returns: + A 2D numpy array of labels. + + Raises: + ValueError: On an incomplete `rgb_to_semantic_label`. + """ + rgb_image = read_test_image(image_testdata_path, mode='RGB') + if len(rgb_image.shape) != 3 or rgb_image.shape[2] != 3: + raise AssertionError( + 'Expected RGB image, actual shape is %s' % rgb_image.sape) + + num_pixels = rgb_image.shape[0] * rgb_image.shape[1] + unique_colors = np.unique(np.reshape(rgb_image, [num_pixels, 3]), axis=0) + if not set(map(tuple, unique_colors)).issubset( + six.viewkeys(rgb_to_semantic_label)): + raise ValueError('RGB image has colors not in color map.') + + output_dtype = output_dtype or type( + next(six.itervalues(rgb_to_semantic_label))) + output_labels = np.empty(rgb_image.shape[:2], dtype=output_dtype) + for rgb_color, int_label in six.iteritems(rgb_to_semantic_label): + color_array = np.array(rgb_color, ndmin=3) + output_labels[np.all(rgb_image == color_array, axis=2)] = int_label + return output_labels + + +def panoptic_segmentation_with_class_map(instance_testdata_path, + instance_label_to_semantic_label): + """Reads in a panoptic segmentation with an instance map and a map to classes. + + Args: + instance_testdata_path: Path to a grayscale instance map, given as a string + and relative to panoptic_segmentation/testdata. + instance_label_to_semantic_label: A map from instance labels to class + labels. + + Returns: + A tuple `(instance_labels, class_labels)` of numpy arrays. + + Raises: + ValueError: On a mismatched set of instances in + the + `instance_label_to_semantic_label`. + """ + instance_labels = read_test_image(instance_testdata_path, mode='L') + if set(np.unique(instance_labels)) != set( + six.iterkeys(instance_label_to_semantic_label)): + raise ValueError('Provided class map does not match present instance ids.') + + class_labels = np.empty_like(instance_labels) + for instance_id, class_id in six.iteritems(instance_label_to_semantic_label): + class_labels[instance_labels == instance_id] = class_id + + return instance_labels, class_labels diff --git a/models/research/deeplab/evaluation/test_utils_test.py b/models/research/deeplab/evaluation/test_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9bed37e4bf721304e60d7fa12e6cfa9c4b7ef8 --- /dev/null +++ b/models/research/deeplab/evaluation/test_utils_test.py @@ -0,0 +1,74 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for test_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from absl.testing import absltest +import numpy as np + +from deeplab.evaluation import test_utils + + +class TestUtilsTest(absltest.TestCase): + + def test_read_test_image(self): + image_array = test_utils.read_test_image('team_pred_class.png') + self.assertSequenceEqual(image_array.shape, (231, 345, 4)) + + def test_reads_segmentation_with_color_map(self): + rgb_to_semantic_label = {(0, 0, 0): 0, (0, 0, 255): 1, (255, 0, 0): 23} + labels = test_utils.read_segmentation_with_rgb_color_map( + 'team_pred_class.png', rgb_to_semantic_label) + + input_image = test_utils.read_test_image('team_pred_class.png') + np.testing.assert_array_equal( + labels == 0, + np.logical_and(input_image[:, :, 0] == 0, input_image[:, :, 2] == 0)) + np.testing.assert_array_equal(labels == 1, input_image[:, :, 2] == 255) + np.testing.assert_array_equal(labels == 23, input_image[:, :, 0] == 255) + + def test_reads_gt_segmentation(self): + instance_label_to_semantic_label = { + 0: 0, + 47: 1, + 97: 1, + 133: 1, + 150: 1, + 174: 1, + 198: 23, + 215: 1, + 244: 1, + 255: 1, + } + instances, classes = test_utils.panoptic_segmentation_with_class_map( + 'team_gt_instance.png', instance_label_to_semantic_label) + + expected_label_shape = (231, 345) + self.assertSequenceEqual(instances.shape, expected_label_shape) + self.assertSequenceEqual(classes.shape, expected_label_shape) + np.testing.assert_array_equal(instances == 0, classes == 0) + np.testing.assert_array_equal(instances == 198, classes == 23) + np.testing.assert_array_equal( + np.logical_and(instances != 0, instances != 198), classes == 1) + + +if __name__ == '__main__': + absltest.main() diff --git a/models/research/deeplab/evaluation/testdata/README.md b/models/research/deeplab/evaluation/testdata/README.md new file mode 100644 index 0000000000000000000000000000000000000000..711b4767de830938b277d8d135175c2287d9c9db --- /dev/null +++ b/models/research/deeplab/evaluation/testdata/README.md @@ -0,0 +1,14 @@ +# Segmentation Evalaution Test Data + +## Source Images + +* [team_input.png](team_input.png) \ + Source: + https://ai.googleblog.com/2018/03/semantic-image-segmentation-with.html +* [cat_input.jpg](cat_input.jpg) \ + Source: https://www.flickr.com/photos/magdalena_b/4995858743 +* [bird_input.jpg](bird_input.jpg) \ + Source: https://www.flickr.com/photos/chivinskia/40619099560 +* [congress_input.jpg](congress_input.jpg) \ + Source: + https://cao.house.gov/sites/cao.house.gov/files/documents/SAR-Jan-Jun-2016.pdf diff --git a/models/research/deeplab/evaluation/testdata/bird_gt.png b/models/research/deeplab/evaluation/testdata/bird_gt.png new file mode 100644 index 0000000000000000000000000000000000000000..05d854915d1809abe3ba10f03c20e75706e0bb17 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/bird_gt.png differ diff --git a/models/research/deeplab/evaluation/testdata/bird_pred_class.png b/models/research/deeplab/evaluation/testdata/bird_pred_class.png new file mode 100644 index 0000000000000000000000000000000000000000..07351bf061115d0990486cbb086b6b9ec53e691b Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/bird_pred_class.png differ diff --git a/models/research/deeplab/evaluation/testdata/bird_pred_instance.png b/models/research/deeplab/evaluation/testdata/bird_pred_instance.png new file mode 100644 index 0000000000000000000000000000000000000000..faa1371f52510fb6f15fecb0eecc3441b2c8eadb Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/bird_pred_instance.png differ diff --git a/models/research/deeplab/evaluation/testdata/cat_gt.png b/models/research/deeplab/evaluation/testdata/cat_gt.png new file mode 100644 index 0000000000000000000000000000000000000000..41f60111f3de899a9e1ca3a646bea72d86b3009f Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/cat_gt.png differ diff --git a/models/research/deeplab/evaluation/testdata/cat_pred_class.png b/models/research/deeplab/evaluation/testdata/cat_pred_class.png new file mode 100644 index 0000000000000000000000000000000000000000..3728c68ced20312567e70540b667b53269000318 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/cat_pred_class.png differ diff --git a/models/research/deeplab/evaluation/testdata/cat_pred_instance.png b/models/research/deeplab/evaluation/testdata/cat_pred_instance.png new file mode 100644 index 0000000000000000000000000000000000000000..ebd9ba4855f5c88a3b336d50e21d864a37175bbe Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/cat_pred_instance.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_gt.json b/models/research/deeplab/evaluation/testdata/coco_gt.json new file mode 100644 index 0000000000000000000000000000000000000000..5f79bf184338b8ec1ed540fd388f2de1f6a9451b --- /dev/null +++ b/models/research/deeplab/evaluation/testdata/coco_gt.json @@ -0,0 +1,214 @@ +{ + "info": { + "description": "Test COCO-format dataset", + "url": "https://github.com/tensorflow/models/tree/master/research/deeplab", + "version": "1.0", + "year": 2019 + }, + "images": [ + { + "id": 1, + "file_name": "bird.jpg", + "height": 159, + "width": 240, + "flickr_url": "https://www.flickr.com/photos/chivinskia/40619099560" + }, + { + "id": 2, + "file_name": "cat.jpg", + "height": 330, + "width": 317, + "flickr_url": "https://www.flickr.com/photos/magdalena_b/4995858743" + }, + { + "id": 3, + "file_name": "team.jpg", + "height": 231, + "width": 345 + }, + { + "id": 4, + "file_name": "congress.jpg", + "height": 267, + "width": 525 + } + ], + "annotations": [ + { + "image_id": 1, + "file_name": "bird.png", + "segments_info": [ + { + "id": 255, + "area": 2913, + "category_id": 4, + "iscrowd": 0 + }, + { + "id": 2586368, + "area": 1580, + "category_id": 3, + "iscrowd": 0 + }, + { + "id": 16770360, + "area": 33667, + "category_id": 5, + "iscrowd": 0 + } + ] + }, + { + "image_id": 2, + "file_name": "cat.png", + "segments_info": [ + { + "id": 16711691, + "area": 34454, + "category_id": 6, + "iscrowd": 0 + } + ] + }, + { + "image_id": 3, + "file_name": "team.png", + "segments_info": [ + { + "id": 129, + "area": 5443, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 255, + "area": 3574, + "category_id": 2, + "iscrowd": 0 + }, + { + "id": 47615, + "area": 11483, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 65532, + "area": 7080, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 8585107, + "area": 11363, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 9011200, + "area": 7158, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 12858027, + "area": 6419, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16053492, + "area": 4350, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711680, + "area": 5800, + "category_id": 1, + "iscrowd": 0 + } + ] + }, + { + "image_id": 4, + "file_name": "congress.png", + "segments_info": [ + { + "id": 255, + "area": 243, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 65315, + "area": 553, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 65516, + "area": 652, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 9895680, + "area": 82774, + "category_id": 1, + "iscrowd": 1 + }, + { + "id": 16711739, + "area": 137, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711868, + "area": 179, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16762624, + "area": 2742, + "category_id": 1, + "iscrowd": 0 + } + ] + } + ], + "categories": [ + { + "id": 1, + "name": "person", + "isthing": 1 + }, + { + "id": 2, + "name": "umbrella", + "isthing": 1 + }, + { + "id": 3, + "name": "tree-merged", + "isthing": 0 + }, + { + "id": 4, + "name": "bird", + "isthing": 1 + }, + { + "id": 5, + "name": "sky", + "isthing": 0 + }, + { + "id": 6, + "name": "cat", + "isthing": 1 + } + ] +} diff --git a/models/research/deeplab/evaluation/testdata/coco_gt/bird.png b/models/research/deeplab/evaluation/testdata/coco_gt/bird.png new file mode 100644 index 0000000000000000000000000000000000000000..9ef4ad9504126213bf2e3f1f49cdb65b189e6b95 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_gt/bird.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_gt/cat.png b/models/research/deeplab/evaluation/testdata/coco_gt/cat.png new file mode 100644 index 0000000000000000000000000000000000000000..cb02530f2f912ef0d8252e327c6324211152c760 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_gt/cat.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_gt/congress.png b/models/research/deeplab/evaluation/testdata/coco_gt/congress.png new file mode 100644 index 0000000000000000000000000000000000000000..a56b98d336172288b2c68284f5cc1373f515c342 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_gt/congress.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_gt/team.png b/models/research/deeplab/evaluation/testdata/coco_gt/team.png new file mode 100644 index 0000000000000000000000000000000000000000..bde358d151a576049e993a0fd9ebb9661c7060a9 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_gt/team.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_pred.json b/models/research/deeplab/evaluation/testdata/coco_pred.json new file mode 100644 index 0000000000000000000000000000000000000000..4aead17a65d7d9203eabdb9f46c334d9e5aa402c --- /dev/null +++ b/models/research/deeplab/evaluation/testdata/coco_pred.json @@ -0,0 +1,208 @@ +{ + "info": { + "description": "Test COCO-format dataset", + "url": "https://github.com/tensorflow/models/tree/master/research/deeplab", + "version": "1.0", + "year": 2019 + }, + "images": [ + { + "id": 1, + "file_name": "bird.jpg", + "height": 159, + "width": 240, + "flickr_url": "https://www.flickr.com/photos/chivinskia/40619099560" + }, + { + "id": 2, + "file_name": "cat.jpg", + "height": 330, + "width": 317, + "flickr_url": "https://www.flickr.com/photos/magdalena_b/4995858743" + }, + { + "id": 3, + "file_name": "team.jpg", + "height": 231, + "width": 345 + }, + { + "id": 4, + "file_name": "congress.jpg", + "height": 267, + "width": 525 + } + ], + "annotations": [ + { + "image_id": 1, + "file_name": "bird.png", + "segments_info": [ + { + "id": 55551, + "area": 3039, + "category_id": 4, + "iscrowd": 0 + }, + { + "id": 16216831, + "area": 33659, + "category_id": 5, + "iscrowd": 0 + }, + { + "id": 16760832, + "area": 1237, + "category_id": 3, + "iscrowd": 0 + } + ] + }, + { + "image_id": 2, + "file_name": "cat.png", + "segments_info": [ + { + "id": 36493, + "area": 26910, + "category_id": 6, + "iscrowd": 0 + } + ] + }, + { + "image_id": 3, + "file_name": "team.png", + "segments_info": [ + { + "id": 0, + "area": 22164, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 129, + "area": 3418, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 255, + "area": 12827, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 740608, + "area": 8606, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 2555695, + "area": 7636, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 2883541, + "area": 6844, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 14408667, + "area": 4766, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711820, + "area": 4767, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16768768, + "area": 8667, + "category_id": 1, + "iscrowd": 0 + } + ] + }, + { + "image_id": 4, + "file_name": "congress.png", + "segments_info": [ + { + "id": 255, + "area": 2599, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 37375, + "area": 386, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 62207, + "area": 384, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 5177088, + "area": 260, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16711691, + "area": 1011, + "category_id": 1, + "iscrowd": 0 + }, + { + "id": 16774912, + "area": 803, + "category_id": 1, + "iscrowd": 0 + } + ] + } + ], + "categories": [ + { + "id": 1, + "name": "person", + "isthing": 1 + }, + { + "id": 2, + "name": "umbrella", + "isthing": 1 + }, + { + "id": 3, + "name": "tree-merged", + "isthing": 0 + }, + { + "id": 4, + "name": "bird", + "isthing": 1 + }, + { + "id": 5, + "name": "sky", + "isthing": 0 + }, + { + "id": 6, + "name": "cat", + "isthing": 1 + } + ] +} diff --git a/models/research/deeplab/evaluation/testdata/coco_pred/bird.png b/models/research/deeplab/evaluation/testdata/coco_pred/bird.png new file mode 100644 index 0000000000000000000000000000000000000000..c9b4cbcbf444a890e26ad9091f8496e2596c04ad Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_pred/bird.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_pred/cat.png b/models/research/deeplab/evaluation/testdata/coco_pred/cat.png new file mode 100644 index 0000000000000000000000000000000000000000..324583271c4b11ef28e845e1cafb853383faf506 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_pred/cat.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_pred/congress.png b/models/research/deeplab/evaluation/testdata/coco_pred/congress.png new file mode 100644 index 0000000000000000000000000000000000000000..fc7bb06050ed40f5c022f3cd7f0060c7fa84751a Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_pred/congress.png differ diff --git a/models/research/deeplab/evaluation/testdata/coco_pred/team.png b/models/research/deeplab/evaluation/testdata/coco_pred/team.png new file mode 100644 index 0000000000000000000000000000000000000000..7300bf41f03a8ba08a1cb3f99821b69cddb318c2 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/coco_pred/team.png differ diff --git a/models/research/deeplab/evaluation/testdata/team_gt_instance.png b/models/research/deeplab/evaluation/testdata/team_gt_instance.png new file mode 100644 index 0000000000000000000000000000000000000000..97abb55273ce409a5fbaa85cb999f0725d457dbf Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/team_gt_instance.png differ diff --git a/models/research/deeplab/evaluation/testdata/team_pred_class.png b/models/research/deeplab/evaluation/testdata/team_pred_class.png new file mode 100644 index 0000000000000000000000000000000000000000..2ed78de2cbd923e6530f08fc2c47bf8377cfaf69 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/team_pred_class.png differ diff --git a/models/research/deeplab/evaluation/testdata/team_pred_instance.png b/models/research/deeplab/evaluation/testdata/team_pred_instance.png new file mode 100644 index 0000000000000000000000000000000000000000..264606a4d8822108481132ff9e990d826c64a274 Binary files /dev/null and b/models/research/deeplab/evaluation/testdata/team_pred_instance.png differ diff --git a/models/research/deeplab/export_model.py b/models/research/deeplab/export_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b7307b5a212f4445f78b31a933443b2dcbd505e6 --- /dev/null +++ b/models/research/deeplab/export_model.py @@ -0,0 +1,201 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exports trained model to TensorFlow frozen graph.""" + +import os +import tensorflow as tf + +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.python.tools import freeze_graph +from deeplab import common +from deeplab import input_preprocess +from deeplab import model + +slim = tf.contrib.slim +flags = tf.app.flags + +FLAGS = flags.FLAGS + +flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path') + +flags.DEFINE_string('export_path', None, + 'Path to output Tensorflow frozen graph.') + +flags.DEFINE_integer('num_classes', 21, 'Number of classes.') + +flags.DEFINE_multi_integer('crop_size', [513, 513], + 'Crop size [height, width].') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 8, + 'The ratio of input to output spatial resolution.') + +# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale inference. +flags.DEFINE_multi_float('inference_scales', [1.0], + 'The scales to resize images for inference.') + +flags.DEFINE_bool('add_flipped_images', False, + 'Add flipped images during inference or not.') + +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +flags.DEFINE_bool('save_inference_graph', False, + 'Save inference graph in text proto.') + +# Input name of the exported model. +_INPUT_NAME = 'ImageTensor' + +# Output name of the exported predictions. +_OUTPUT_NAME = 'SemanticPredictions' +_RAW_OUTPUT_NAME = 'RawSemanticPredictions' + +# Output name of the exported probabilities. +_OUTPUT_PROB_NAME = 'SemanticProbabilities' +_RAW_OUTPUT_PROB_NAME = 'RawSemanticProbabilities' + + +def _create_input_tensors(): + """Creates and prepares input tensors for DeepLab model. + + This method creates a 4-D uint8 image tensor 'ImageTensor' with shape + [1, None, None, 3]. The actual input tensor name to use during inference is + 'ImageTensor:0'. + + Returns: + image: Preprocessed 4-D float32 tensor with shape [1, crop_height, + crop_width, 3]. + original_image_size: Original image shape tensor [height, width]. + resized_image_size: Resized image shape tensor [height, width]. + """ + # input_preprocess takes 4-D image tensor as input. + input_image = tf.placeholder(tf.uint8, [1, None, None, 3], name=_INPUT_NAME) + original_image_size = tf.shape(input_image)[1:3] + + # Squeeze the dimension in axis=0 since `preprocess_image_and_label` assumes + # image to be 3-D. + image = tf.squeeze(input_image, axis=0) + resized_image, image, _ = input_preprocess.preprocess_image_and_label( + image, + label=None, + crop_height=FLAGS.crop_size[0], + crop_width=FLAGS.crop_size[1], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + is_training=False, + model_variant=FLAGS.model_variant) + resized_image_size = tf.shape(resized_image)[:2] + + # Expand the dimension in axis=0, since the following operations assume the + # image to be 4-D. + image = tf.expand_dims(image, 0) + + return image, original_image_size, resized_image_size + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.info('Prepare to export model to: %s', FLAGS.export_path) + + with tf.Graph().as_default(): + image, image_size, resized_image_size = _create_input_tensors() + + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes}, + crop_size=FLAGS.crop_size, + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + if tuple(FLAGS.inference_scales) == (1.0,): + tf.logging.info('Exported model performs single-scale inference.') + predictions = model.predict_labels( + image, + model_options=model_options, + image_pyramid=FLAGS.image_pyramid) + else: + tf.logging.info('Exported model performs multi-scale inference.') + if FLAGS.quantize_delay_step >= 0: + raise ValueError( + 'Quantize mode is not supported with multi-scale test.') + predictions = model.predict_labels_multi_scale( + image, + model_options=model_options, + eval_scales=FLAGS.inference_scales, + add_flipped_images=FLAGS.add_flipped_images) + raw_predictions = tf.identity( + tf.cast(predictions[common.OUTPUT_TYPE], tf.float32), + _RAW_OUTPUT_NAME) + raw_probabilities = tf.identity( + predictions[common.OUTPUT_TYPE + model.PROB_SUFFIX], + _RAW_OUTPUT_PROB_NAME) + + # Crop the valid regions from the predictions. + semantic_predictions = raw_predictions[ + :, :resized_image_size[0], :resized_image_size[1]] + semantic_probabilities = raw_probabilities[ + :, :resized_image_size[0], :resized_image_size[1]] + + # Resize back the prediction to the original image size. + def _resize_label(label, label_size): + # Expand dimension of label to [1, height, width, 1] for resize operation. + label = tf.expand_dims(label, 3) + resized_label = tf.image.resize_images( + label, + label_size, + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True) + return tf.cast(tf.squeeze(resized_label, 3), tf.int32) + semantic_predictions = _resize_label(semantic_predictions, image_size) + semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME) + + semantic_probabilities = tf.image.resize_bilinear( + semantic_probabilities, image_size, align_corners=True, + name=_OUTPUT_PROB_NAME) + + if FLAGS.quantize_delay_step >= 0: + contrib_quantize.create_eval_graph() + + saver = tf.train.Saver(tf.all_variables()) + + dirname = os.path.dirname(FLAGS.export_path) + tf.gfile.MakeDirs(dirname) + graph_def = tf.get_default_graph().as_graph_def(add_shapes=True) + freeze_graph.freeze_graph_with_def_protos( + graph_def, + saver.as_saver_def(), + FLAGS.checkpoint_path, + _OUTPUT_NAME + ',' + _OUTPUT_PROB_NAME, + restore_op_name=None, + filename_tensor_name=None, + output_graph=FLAGS.export_path, + clear_devices=True, + initializer_nodes=None) + + if FLAGS.save_inference_graph: + tf.train.write_graph(graph_def, dirname, 'inference_graph.pbtxt') + + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_path') + flags.mark_flag_as_required('export_path') + tf.app.run() diff --git a/models/research/deeplab/g3doc/ade20k.md b/models/research/deeplab/g3doc/ade20k.md new file mode 100644 index 0000000000000000000000000000000000000000..9505ab2cd99ef1b9a7eb8a53a7f909aa4a32977b --- /dev/null +++ b/models/research/deeplab/g3doc/ade20k.md @@ -0,0 +1,107 @@ +# Running DeepLab on ADE20K Semantic Segmentation Dataset + +This page walks through the steps required to run DeepLab on ADE20K dataset on a +local machine. + +## Download dataset and convert to TFRecord + +We have prepared the script (under the folder `datasets`) to download and +convert ADE20K semantic segmentation dataset to TFRecord. + +```bash +# From the tensorflow/models/research/deeplab/datasets directory. +bash download_and_convert_ade20k.sh +``` + +The converted dataset will be saved at ./deeplab/datasets/ADE20K/tfrecord + +## Recommended Directory Structure for Training and Evaluation + +``` ++ datasets + - build_data.py + - build_ade20k_data.py + - download_and_convert_ade20k.sh + + ADE20K + + tfrecord + + exp + + train_on_train_set + + train + + eval + + vis + + ADEChallengeData2016 + + annotations + + training + + validation + + images + + training + + validation +``` + +where the folder `train_on_train_set` stores the train/eval/vis events and +results (when training DeepLab on the ADE20K train set). + +## Running the train/eval/vis jobs + +A local training job using `xception_65` can be run with the following command: + +```bash +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=150000 \ + --train_split="train" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="513,513" \ + --train_batch_size=4 \ + --min_resize_value=513 \ + --max_resize_value=513 \ + --resize_factor=16 \ + --dataset="ade20k" \ + --tf_initial_checkpoint=${PATH_TO_INITIAL_CHECKPOINT} \ + --train_logdir=${PATH_TO_TRAIN_DIR}\ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH\_TO\_INITIAL\_CHECKPOINT} is the path to the initial checkpoint. +${PATH\_TO\_TRAIN\_DIR} is the directory in which training checkpoints and +events will be written to (it is recommended to set it to the +`train_on_train_set/train` above), and ${PATH\_TO\_DATASET} is the directory in +which the ADE20K dataset resides (the `tfrecord` above) + +**Note that for train.py:** + +1. In order to fine tune the BN layers, one needs to use large batch size (> + 12), and set fine_tune_batch_norm = True. Here, we simply use small batch + size during training for the purpose of demonstration. If the users have + limited GPU memory at hand, please fine-tune from our provided checkpoints + whose batch norm parameters have been trained, and use smaller learning rate + with fine_tune_batch_norm = False. + +2. User should fine tune the `min_resize_value` and `max_resize_value` to get + better result. Note that `resize_factor` has to be equal to `output_stride`. + +3. The users should change atrous_rates from [6, 12, 18] to [12, 24, 36] if + setting output_stride=8. + +4. The users could skip the flag, `decoder_output_stride`, if you do not want + to use the decoder structure. + +## Running Tensorboard + +Progress for training and evaluation jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${PATH_TO_LOG_DIRECTORY} +``` + +where `${PATH_TO_LOG_DIRECTORY}` points to the directory that contains the train +directorie (e.g., the folder `train_on_train_set` in the above example). Please +note it may take Tensorboard a couple minutes to populate with data. diff --git a/models/research/deeplab/g3doc/cityscapes.md b/models/research/deeplab/g3doc/cityscapes.md new file mode 100644 index 0000000000000000000000000000000000000000..af703088e61b49aa81bf62b536469b410f0fb352 --- /dev/null +++ b/models/research/deeplab/g3doc/cityscapes.md @@ -0,0 +1,159 @@ +# Running DeepLab on Cityscapes Semantic Segmentation Dataset + +This page walks through the steps required to run DeepLab on Cityscapes on a +local machine. + +## Download dataset and convert to TFRecord + +We have prepared the script (under the folder `datasets`) to convert Cityscapes +dataset to TFRecord. The users are required to download the dataset beforehand +by registering the [website](https://www.cityscapes-dataset.com/). + +```bash +# From the tensorflow/models/research/deeplab/datasets directory. +sh convert_cityscapes.sh +``` + +The converted dataset will be saved at ./deeplab/datasets/cityscapes/tfrecord. + +## Recommended Directory Structure for Training and Evaluation + +``` ++ datasets + + cityscapes + + leftImg8bit + + gtFine + + tfrecord + + exp + + train_on_train_set + + train + + eval + + vis +``` + +where the folder `train_on_train_set` stores the train/eval/vis events and +results (when training DeepLab on the Cityscapes train set). + +## Running the train/eval/vis jobs + +A local training job using `xception_65` can be run with the following command: + +```bash +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=90000 \ + --train_split="train" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="769,769" \ + --train_batch_size=1 \ + --dataset="cityscapes" \ + --tf_initial_checkpoint=${PATH_TO_INITIAL_CHECKPOINT} \ + --train_logdir=${PATH_TO_TRAIN_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_INITIAL_CHECKPOINT} is the path to the initial checkpoint +(usually an ImageNet pretrained checkpoint), ${PATH_TO_TRAIN_DIR} is the +directory in which training checkpoints and events will be written to, and +${PATH_TO_DATASET} is the directory in which the Cityscapes dataset resides. + +**Note that for {train,eval,vis}.py**: + +1. In order to reproduce our results, one needs to use large batch size (> 8), + and set fine_tune_batch_norm = True. Here, we simply use small batch size + during training for the purpose of demonstration. If the users have limited + GPU memory at hand, please fine-tune from our provided checkpoints whose + batch norm parameters have been trained, and use smaller learning rate with + fine_tune_batch_norm = False. + +2. The users should change atrous_rates from [6, 12, 18] to [12, 24, 36] if + setting output_stride=8. + +3. The users could skip the flag, `decoder_output_stride`, if you do not want + to use the decoder structure. + +4. Change and add the following flags in order to use the provided dense + prediction cell. Note we need to set decoder_output_stride if you want to + use the provided checkpoints which include the decoder module. + +```bash +--model_variant="xception_71" +--dense_prediction_cell_json="deeplab/core/dense_prediction_cell_branch5_top1_cityscapes.json" +--decoder_output_stride=4 +``` + +A local evaluation job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="1025,2049" \ + --dataset="cityscapes" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --eval_logdir=${PATH_TO_EVAL_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_EVAL_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +Cityscapes dataset resides. + +A local visualization job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="1025,2049" \ + --dataset="cityscapes" \ + --colormap_type="cityscapes" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --vis_logdir=${PATH_TO_VIS_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_VIS_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +Cityscapes dataset resides. Note that if the users would like to save the +segmentation results for evaluation server, set also_save_raw_predictions = +True. + +## Running Tensorboard + +Progress for training and evaluation jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${PATH_TO_LOG_DIRECTORY} +``` + +where `${PATH_TO_LOG_DIRECTORY}` points to the directory that contains the +train, eval, and vis directories (e.g., the folder `train_on_train_set` in the +above example). Please note it may take Tensorboard a couple minutes to populate +with data. diff --git a/models/research/deeplab/g3doc/export_model.md b/models/research/deeplab/g3doc/export_model.md new file mode 100644 index 0000000000000000000000000000000000000000..c41649e609a39ccb2e7c7622e1d4e25f86d20cb7 --- /dev/null +++ b/models/research/deeplab/g3doc/export_model.md @@ -0,0 +1,23 @@ +# Export trained deeplab model to frozen inference graph + +After model training finishes, you could export it to a frozen TensorFlow +inference graph proto. Your trained model checkpoint usually includes the +following files: + +* model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001, +* model.ckpt-${CHECKPOINT_NUMBER}.index +* model.ckpt-${CHECKPOINT_NUMBER}.meta + +After you have identified a candidate checkpoint to export, you can run the +following commandline to export to a frozen graph: + +```bash +# From tensorflow/models/research/ +# Assume all checkpoint files share the same path prefix `${CHECKPOINT_PATH}`. +python deeplab/export_model.py \ + --checkpoint_path=${CHECKPOINT_PATH} \ + --export_path=${OUTPUT_DIR}/frozen_inference_graph.pb +``` + +Please also add other model specific flags as you use for training, such as +`model_variant`, `add_image_level_feature`, etc. diff --git a/models/research/deeplab/g3doc/faq.md b/models/research/deeplab/g3doc/faq.md new file mode 100644 index 0000000000000000000000000000000000000000..26ff4b3281cd624cb25292d89ef3fad55b8851f2 --- /dev/null +++ b/models/research/deeplab/g3doc/faq.md @@ -0,0 +1,87 @@ +# FAQ +___ +Q1: What if I want to use other network backbones, such as ResNet [1], instead of only those provided ones (e.g., Xception)? + +A: The users could modify the provided core/feature_extractor.py to support more network backbones. +___ +Q2: What if I want to train the model on other datasets? + +A: The users could modify the provided dataset/build_{cityscapes,voc2012}_data.py and dataset/segmentation_dataset.py to build their own dataset. +___ +Q3: Where can I download the PASCAL VOC augmented training set? + +A: The PASCAL VOC augmented training set is provided by Bharath Hariharan et al. [2] Please refer to their [website](http://home.bharathh.info/pubs/codes/SBD/download.html) for details and consider citing their paper if using the dataset. +___ +Q4: Why the implementation does not include DenseCRF [3]? + +A: We have not tried this. The interested users could take a look at Philipp Krähenbühl's [website](http://graphics.stanford.edu/projects/densecrf/) and [paper](https://arxiv.org/abs/1210.5644) for details. +___ +Q5: What if I want to train the model and fine-tune the batch normalization parameters? + +A: If given the limited resource at hand, we would suggest you simply fine-tune +from our provided checkpoint whose batch-norm parameters have been trained (i.e., +train with a smaller learning rate, set `fine_tune_batch_norm = false`, and +employ longer training iterations since the learning rate is small). If +you really would like to train by yourself, we would suggest + +1. Set `output_stride = 16` or maybe even `32` (remember to change the flag +`atrous_rates` accordingly, e.g., `atrous_rates = [3, 6, 9]` for +`output_stride = 32`). + +2. Use as many GPUs as possible (change the flag `num_clones` in train.py) and +set `train_batch_size` as large as possible. + +3. Adjust the `train_crop_size` in train.py. Maybe set it to be smaller, e.g., +513x513 (or even 321x321), so that you could use a larger batch size. + +4. Use a smaller network backbone, such as MobileNet-v2. + +___ +Q6: How can I train the model asynchronously? + +A: In the train.py, the users could set `num_replicas` (number of machines for training) and `num_ps_tasks` (we usually set `num_ps_tasks` = `num_replicas` / 2). See slim.deployment.model_deploy for more details. +___ +Q7: I could not reproduce the performance even with the provided checkpoints. + +A: Please try running + +```bash +# Run the simple test with Xception_65 as network backbone. +sh local_test.sh +``` + +or + +```bash +# Run the simple test with MobileNet-v2 as network backbone. +sh local_test_mobilenetv2.sh +``` + +First, make sure you could reproduce the results with our provided setting. +After that, you could start to make a new change one at a time to help debug. +___ +Q8: What value of `eval_crop_size` should I use? + +A: Our model uses whole-image inference, meaning that we need to set `eval_crop_size` equal to `output_stride` * k + 1, where k is an integer and set k so that the resulting `eval_crop_size` is slightly larger the largest +image dimension in the dataset. For example, we have `eval_crop_size` = 513x513 for PASCAL dataset whose largest image dimension is 512. Similarly, we set `eval_crop_size` = 1025x2049 for Cityscapes images whose +image dimension is all equal to 1024x2048. +___ +Q9: Why multi-gpu training is slow? + +A: Please try to use more threads to pre-process the inputs. For, example change [num_readers = 4](https://github.com/tensorflow/models/blob/master/research/deeplab/train.py#L457). +___ + + +## References + +1. **Deep Residual Learning for Image Recognition**
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
+ [[link]](https://arxiv.org/abs/1512.03385), In CVPR, 2016. + +2. **Semantic Contours from Inverse Detectors**
+ Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji, Jitendra Malik
+ [[link]](http://home.bharathh.info/pubs/codes/SBD/download.html), In ICCV, 2011. + +3. **Efficient Inference in Fully Connected CRFs with Gaussian Edge Potentials**
+ Philipp Krähenbühl, Vladlen Koltun
+ [[link]](http://graphics.stanford.edu/projects/densecrf/), In NIPS, 2011. diff --git a/models/research/deeplab/g3doc/img/image1.jpg b/models/research/deeplab/g3doc/img/image1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..939b6f9cef3da337e1279246090f20bd78920bc8 Binary files /dev/null and b/models/research/deeplab/g3doc/img/image1.jpg differ diff --git a/models/research/deeplab/g3doc/img/image2.jpg b/models/research/deeplab/g3doc/img/image2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ec1b8ac278906921bd3b6efec8fbe2e9d8c429e Binary files /dev/null and b/models/research/deeplab/g3doc/img/image2.jpg differ diff --git a/models/research/deeplab/g3doc/img/image3.jpg b/models/research/deeplab/g3doc/img/image3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d788e3dc68d684ca6e282bdff66a32abc767214a Binary files /dev/null and b/models/research/deeplab/g3doc/img/image3.jpg differ diff --git a/models/research/deeplab/g3doc/img/image_info.txt b/models/research/deeplab/g3doc/img/image_info.txt new file mode 100644 index 0000000000000000000000000000000000000000..583d113e7ebb4d81ca1cdc51c3317243600809ee --- /dev/null +++ b/models/research/deeplab/g3doc/img/image_info.txt @@ -0,0 +1,13 @@ +Image provenance: + +image1.jpg: Philippe Put, + https://www.flickr.com/photos/34547181@N00/14499172124 + +image2.jpg: Peretz Partensky + https://www.flickr.com/photos/ifl/3926001309 + +image3.jpg: Peter Harrison + https://www.flickr.com/photos/devcentre/392585679 + + +vis[1-3].png: Showing original image together with DeepLab segmentation map. diff --git a/models/research/deeplab/g3doc/img/vis1.png b/models/research/deeplab/g3doc/img/vis1.png new file mode 100644 index 0000000000000000000000000000000000000000..41b8ecd89590dcf6b635e32c3af4d4b18fbafede Binary files /dev/null and b/models/research/deeplab/g3doc/img/vis1.png differ diff --git a/models/research/deeplab/g3doc/img/vis2.png b/models/research/deeplab/g3doc/img/vis2.png new file mode 100644 index 0000000000000000000000000000000000000000..7fa7a4cacc4807f2ab1a9c802757d76e932a41c1 Binary files /dev/null and b/models/research/deeplab/g3doc/img/vis2.png differ diff --git a/models/research/deeplab/g3doc/img/vis3.png b/models/research/deeplab/g3doc/img/vis3.png new file mode 100644 index 0000000000000000000000000000000000000000..813b6340a61f63e3b838a91562bc0b914191ba47 Binary files /dev/null and b/models/research/deeplab/g3doc/img/vis3.png differ diff --git a/models/research/deeplab/g3doc/installation.md b/models/research/deeplab/g3doc/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..8629aba42207fc6e35c907024485c0e7f29f5e10 --- /dev/null +++ b/models/research/deeplab/g3doc/installation.md @@ -0,0 +1,73 @@ +# Installation + +## Dependencies + +DeepLab depends on the following libraries: + +* Numpy +* Pillow 1.0 +* tf Slim (which is included in the "tensorflow/models/research/" checkout) +* Jupyter notebook +* Matplotlib +* Tensorflow + +For detailed steps to install Tensorflow, follow the [Tensorflow installation +instructions](https://www.tensorflow.org/install/). A typical user can install +Tensorflow using one of the following commands: + +```bash +# For CPU +pip install tensorflow +# For GPU +pip install tensorflow-gpu +``` + +The remaining libraries can be installed on Ubuntu 14.04 using via apt-get: + +```bash +sudo apt-get install python-pil python-numpy +pip install --user jupyter +pip install --user matplotlib +pip install --user PrettyTable +``` + +## Add Libraries to PYTHONPATH + +When running locally, the tensorflow/models/research/ directory should be +appended to PYTHONPATH. This can be done by running the following from +tensorflow/models/research/: + +```bash +# From tensorflow/models/research/ +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim + +# [Optional] for panoptic evaluation, you might need panopticapi: +# https://github.com/cocodataset/panopticapi +# Please clone it to a local directory ${PANOPTICAPI_DIR} +touch ${PANOPTICAPI_DIR}/panopticapi/__init__.py +export PYTHONPATH=$PYTHONPATH:${PANOPTICAPI_DIR}/panopticapi +``` + +Note: This command needs to run from every new terminal you start. If you wish +to avoid running this manually, you can add it as a new line to the end of your +~/.bashrc file. + +# Testing the Installation + +You can test if you have successfully installed the Tensorflow DeepLab by +running the following commands: + +Quick test by running model_test.py: + +```bash +# From tensorflow/models/research/ +python deeplab/model_test.py +``` + +Quick running the whole code on the PASCAL VOC 2012 dataset: + +```bash +# From tensorflow/models/research/deeplab +sh local_test.sh +``` + diff --git a/models/research/deeplab/g3doc/model_zoo.md b/models/research/deeplab/g3doc/model_zoo.md new file mode 100644 index 0000000000000000000000000000000000000000..76972dc796e77838004a6f36bef73ca5bb66aff5 --- /dev/null +++ b/models/research/deeplab/g3doc/model_zoo.md @@ -0,0 +1,254 @@ +# TensorFlow DeepLab Model Zoo + +We provide deeplab models pretrained several datasets, including (1) PASCAL VOC +2012, (2) Cityscapes, and (3) ADE20K for reproducing our results, as well as +some checkpoints that are only pretrained on ImageNet for training your own +models. + +## DeepLab models trained on PASCAL VOC 2012 + +Un-tar'ed directory includes: + +* a frozen inference graph (`frozen_inference_graph.pb`). All frozen inference + graphs by default use output stride of 8, a single eval scale of 1.0 and + no left-right flips, unless otherwise specified. MobileNet-v2 based models + do not include the decoder module. + +* a checkpoint (`model.ckpt.data-00000-of-00001`, `model.ckpt.index`) + +### Model details + +We provide several checkpoints that have been pretrained on VOC 2012 train_aug +set or train_aug + trainval set. In the former case, one could train their model +with smaller batch size and freeze batch normalization when limited GPU memory +is available, since we have already fine-tuned the batch normalization for you. +In the latter case, one could directly evaluate the checkpoints on VOC 2012 test +set or use this checkpoint for demo. Note *MobileNet-v2* based models do not +employ ASPP and decoder modules for fast computation. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder +--------------------------- | :--------------: | :-----------------: | :---: | :-----: +mobilenetv2_dm05_coco_voc_trainaug | MobileNet-v2
Depth-Multiplier = 0.5 | ImageNet
MS-COCO
VOC 2012 train_aug set| N/A | N/A +mobilenetv2_dm05_coco_voc_trainval | MobileNet-v2
Depth-Multiplier = 0.5 | ImageNet
MS-COCO
VOC 2012 train_aug + trainval sets | N/A | N/A +mobilenetv2_coco_voc_trainaug | MobileNet-v2 | ImageNet
MS-COCO
VOC 2012 train_aug set| N/A | N/A +mobilenetv2_coco_voc_trainval | MobileNet-v2 | ImageNet
MS-COCO
VOC 2012 train_aug + trainval sets | N/A | N/A +xception65_coco_voc_trainaug | Xception_65 | ImageNet
MS-COCO
VOC 2012 train_aug set| [6,12,18] for OS=16
[12,24,36] for OS=8 | OS = 4 +xception65_coco_voc_trainval | Xception_65 | ImageNet
MS-COCO
VOC 2012 train_aug + trainval sets | [6,12,18] for OS=16
[12,24,36] for OS=8 | OS = 4 + +In the table, **OS** denotes output stride. + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | Multiply-Adds | Runtime (sec) | PASCAL mIOU | File Size +------------------------------------------------------------------------------------------------------------------------ | :-------: | :------------------------: | :-------------: | :------------------: | :------------: | :----------------------------: | :-------: +[mobilenetv2_dm05_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_trainaug_2018_10_01.tar.gz) | 16 | [1.0] | No | 0.88B | - | 70.19% (val) | 7.6MB +[mobilenetv2_dm05_coco_voc_trainval](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_trainval_2018_10_01.tar.gz) | 8 | [1.0] | No | 2.84B | - | 71.83% (test) | 7.6MB +[mobilenetv2_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz) | 16
8 | [1.0]
[0.5:0.25:1.75] | No
Yes | 2.75B
152.59B | 0.1
26.9 | 75.32% (val)
77.33 (val) | 23MB +[mobilenetv2_coco_voc_trainval](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz) | 8 | [0.5:0.25:1.75] | Yes | 152.59B | 26.9 | 80.25% (**test**) | 23MB +[xception65_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_pascal_train_aug_2018_01_04.tar.gz) | 16
8 | [1.0]
[0.5:0.25:1.75] | No
Yes | 54.17B
3055.35B | 0.7
223.2 | 82.20% (val)
83.58% (val) | 439MB +[xception65_coco_voc_trainval](http://download.tensorflow.org/models/deeplabv3_pascal_trainval_2018_01_04.tar.gz) | 8 | [0.5:0.25:1.75] | Yes | 3055.35B | 223.2 | 87.80% (**test**) | 439MB + +In the table, we report both computation complexity (in terms of Multiply-Adds +and CPU Runtime) and segmentation performance (in terms of mIOU) on the PASCAL +VOC val or test set. The reported runtime is calculated by tfprof on a +workstation with CPU E5-1650 v3 @ 3.50GHz and 32GB memory. Note that applying +multi-scale inputs and left-right flips increases the segmentation performance +but also significantly increases the computation and thus may not be suitable +for real-time applications. + +## DeepLab models trained on Cityscapes + +### Model details + +We provide several checkpoints that have been pretrained on Cityscapes +train_fine set. Note *MobileNet-v2* based model has been pretrained on MS-COCO +dataset and does not employ ASPP and decoder modules for fast computation. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder +------------------------------------- | :--------------: | :-------------------------------------: | :----------------------------------------------: | :-----: +mobilenetv2_coco_cityscapes_trainfine | MobileNet-v2 | ImageNet
MS-COCO
Cityscapes train_fine set | N/A | N/A +mobilenetv3_large_cityscapes_trainfine | MobileNet-v3 Large | Cityscapes train_fine set
(No ImageNet) | N/A | OS = 8 +mobilenetv3_small_cityscapes_trainfine | MobileNet-v3 Small | Cityscapes train_fine set
(No ImageNet) | N/A | OS = 8 +xception65_cityscapes_trainfine | Xception_65 | ImageNet
Cityscapes train_fine set | [6, 12, 18] for OS=16
[12, 24, 36] for OS=8 | OS = 4 +xception71_dpc_cityscapes_trainfine | Xception_71 | ImageNet
MS-COCO
Cityscapes train_fine set | Dense Prediction Cell | OS = 4 +xception71_dpc_cityscapes_trainval | Xception_71 | ImageNet
MS-COCO
Cityscapes trainval_fine and coarse set | Dense Prediction Cell | OS = 4 + +In the table, **OS** denotes output stride. + +Note for mobilenet v3 models, we use additional commandline flags as follows: + +``` +--model_variant={ mobilenet_v3_large_seg | mobilenet_v3_small_seg } +--image_pooling_crop_size=769,769 +--image_pooling_stride=4,5 +--add_image_level_feature=1 +--aspp_convs_filters=128 +--aspp_with_concat_projection=0 +--aspp_with_squeeze_and_excitation=1 +--decoder_use_sum_merge=1 +--decoder_filters=19 +--decoder_output_is_logits=1 +--image_se_uses_qsigmoid=1 +--decoder_output_stride=8 +--output_stride=32 +``` + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | Multiply-Adds | Runtime (sec) | Cityscapes mIOU | File Size +-------------------------------------------------------------------------------------------------------------------------------- | :-------: | :-------------------------: | :-------------: | :-------------------: | :------------: | :----------------------------: | :-------: +[mobilenetv2_coco_cityscapes_trainfine](http://download.tensorflow.org/models/deeplabv3_mnv2_cityscapes_train_2018_02_05.tar.gz) | 16
8 | [1.0]
[0.75:0.25:1.25] | No
Yes | 21.27B
433.24B | 0.8
51.12 | 70.71% (val)
73.57% (val) | 23MB +[mobilenetv3_large_cityscapes_trainfine](http://download.tensorflow.org/models/deeplab_mnv3_large_cityscapes_trainfine_2019_11_15.tar.gz) | 32 | [1.0] | No | 15.95B | 0.6 | 72.41% (val) | 17MB +[mobilenetv3_small_cityscapes_trainfine](http://download.tensorflow.org/models/deeplab_mnv3_small_cityscapes_trainfine_2019_11_15.tar.gz) | 32 | [1.0] | No | 4.63B | 0.4 | 68.99% (val) | 5MB +[xception65_cityscapes_trainfine](http://download.tensorflow.org/models/deeplabv3_cityscapes_train_2018_02_06.tar.gz) | 16
8 | [1.0]
[0.75:0.25:1.25] | No
Yes | 418.64B
8677.92B | 5.0
422.8 | 78.79% (val)
80.42% (val) | 439MB +[xception71_dpc_cityscapes_trainfine](http://download.tensorflow.org/models/deeplab_cityscapes_xception71_trainfine_2018_09_08.tar.gz) | 16 | [1.0] | No | 502.07B | - | 80.31% (val) | 445MB +[xception71_dpc_cityscapes_trainval](http://download.tensorflow.org/models/deeplab_cityscapes_xception71_trainvalfine_2018_09_08.tar.gz) | 8 | [0.75:0.25:2] | Yes | - | - | 82.66% (**test**) | 446MB + +### EdgeTPU-DeepLab models on Cityscapes + +EdgeTPU is Google's machine learning accelerator architecture for edge devices +(exists in Coral devices and Pixel4's Neural Core). Leveraging nerual +architecture search (NAS, also named as Auto-ML) algorithms, +[EdgeTPU-Mobilenet](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) +has been released which yields higher hardware utilization, lower latency, as +well as better accuracy over Mobilenet-v2/v3. We use EdgeTPU-Mobilenet as the +backbone and provide checkpoints that have been pretrained on Cityscapes +train_fine set. We named them as EdgeTPU-DeepLab models. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder +-------------------- | :----------------: | :----------------: | :--: | :-----: +EdgeTPU-DeepLab | EdgeMobilenet-1.0 | ImageNet | N/A | N/A +EdgeTPU-DeepLab-slim | EdgeMobilenet-0.75 | ImageNet | N/A | N/A + +For EdgeTPU-DeepLab-slim, the backbone feature extractor has depth multiplier = +0.75 and aspp_convs_filters = 128. We do not employ ASPP nor decoder modules to +further reduce the latency. We employ the same train/eval flags used for +MobileNet-v2 DeepLab model. Flags changed for EdgeTPU-DeepLab model are listed +here. + +``` +--decoder_output_stride='' +--aspp_convs_filters=256 +--model_variant=mobilenet_edgetpu +``` + +For EdgeTPU-DeepLab-slim, also include the following flags. + +``` +--depth_multiplier=0.75 +--aspp_convs_filters=128 +``` + +Checkpoint name | Eval OS | Eval scales | Cityscapes mIOU | Multiply-Adds | Simulator latency on Pixel 4 EdgeTPU +---------------------------------------------------------------------------------------------------- | :--------: | :---------: | :--------------------------: | :------------: | :----------------------------------: +[EdgeTPU-DeepLab](http://download.tensorflow.org/models/edgetpu-deeplab_2020_03_09.tar.gz) | 32
16 | [1.0] | 70.6% (val)
74.1% (val) | 5.6B
7.1B | 13.8 ms
17.5 ms +[EdgeTPU-DeepLab-slim](http://download.tensorflow.org/models/edgetpu-deeplab-slim_2020_03_09.tar.gz) | 32
16 | [1.0] | 70.0% (val)
73.2% (val) | 3.5B
4.3B | 9.9 ms
13.2 ms + +## DeepLab models trained on ADE20K + +### Model details + +We provide some checkpoints that have been pretrained on ADE20K training set. +Note that the model has only been pretrained on ImageNet, following the +dataset rule. + +Checkpoint name | Network backbone | Pretrained dataset | ASPP | Decoder | Input size +------------------------------------- | :--------------: | :-------------------------------------: | :----------------------------------------------: | :-----: | :-----: +mobilenetv2_ade20k_train | MobileNet-v2 | ImageNet
ADE20K training set | N/A | OS = 4 | 257x257 +xception65_ade20k_train | Xception_65 | ImageNet
ADE20K training set | [6, 12, 18] for OS=16
[12, 24, 36] for OS=8 | OS = 4 | 513x513 + +The input dimensions of ADE20K have a huge amount of variation. We resize inputs so that the longest size is 257 for MobileNet-v2 (faster inference) and 513 for Xception_65 (better performation). Note that we also include the decoder module in the MobileNet-v2 checkpoint. + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | mIOU | Pixel-wise Accuracy | File Size +------------------------------------- | :-------: | :-------------------------: | :-------------: | :-------------------: | :-------------------: | :-------: +[mobilenetv2_ade20k_train](http://download.tensorflow.org/models/deeplabv3_mnv2_ade20k_train_2018_12_03.tar.gz) | 16 | [1.0] | No | 32.04% (val) | 75.41% (val) | 24.8MB +[xception65_ade20k_train](http://download.tensorflow.org/models/deeplabv3_xception_ade20k_train_2018_05_29.tar.gz) | 8 | [0.5:0.25:1.75] | Yes | 45.65% (val) | 82.52% (val) | 439MB + + +## Checkpoints pretrained on ImageNet + +Un-tar'ed directory includes: + +* model checkpoint (`model.ckpt.data-00000-of-00001`, `model.ckpt.index`). + +### Model details + +We also provide some checkpoints that are pretrained on ImageNet and/or COCO (as +post-fixed in the model name) so that one could use this for training your own +models. + +* mobilenet_v2: We refer the interested users to the TensorFlow open source + [MobileNet-V2](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) + for details. + +* xception_{41,65,71}: We adapt the original Xception model to the task of + semantic segmentation with the following changes: (1) more layers, (2) all + max pooling operations are replaced by strided (atrous) separable + convolutions, and (3) extra batch-norm and ReLU after each 3x3 depthwise + convolution are added. We provide three Xception model variants with + different network depths. + +* resnet_v1_{50,101}_beta: We modify the original ResNet-101 [10], similar to + PSPNet [11] by replacing the first 7x7 convolution with three 3x3 + convolutions. See resnet_v1_beta.py for more details. + +Model name | File Size +-------------------------------------------------------------------------------------- | :-------: +[xception_41_imagenet](http://download.tensorflow.org/models/xception_41_2018_05_09.tar.gz ) | 288MB +[xception_65_imagenet](http://download.tensorflow.org/models/deeplabv3_xception_2018_01_04.tar.gz) | 447MB +[xception_65_imagenet_coco](http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz) | 292MB +[xception_71_imagenet](http://download.tensorflow.org/models/xception_71_2018_05_09.tar.gz ) | 474MB +[resnet_v1_50_beta_imagenet](http://download.tensorflow.org/models/resnet_v1_50_2018_05_04.tar.gz) | 274MB +[resnet_v1_101_beta_imagenet](http://download.tensorflow.org/models/resnet_v1_101_2018_05_04.tar.gz) | 477MB + +## References + +1. **Mobilenets: Efficient convolutional neural networks for mobile vision applications**
+ Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam
+ [[link]](https://arxiv.org/abs/1704.04861). arXiv:1704.04861, 2017. + +2. **Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation**
+ Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
+ [[link]](https://arxiv.org/abs/1801.04381). arXiv:1801.04381, 2018. + +3. **Xception: Deep Learning with Depthwise Separable Convolutions**
+ François Chollet
+ [[link]](https://arxiv.org/abs/1610.02357). In the Proc. of CVPR, 2017. + +4. **Deformable Convolutional Networks -- COCO Detection and Segmentation Challenge 2017 Entry**
+ Haozhi Qi, Zheng Zhang, Bin Xiao, Han Hu, Bowen Cheng, Yichen Wei, Jifeng Dai
+ [[link]](http://presentations.cocodataset.org/COCO17-Detect-MSRA.pdf). ICCV COCO Challenge + Workshop, 2017. + +5. **The Pascal Visual Object Classes Challenge: A Retrospective**
+ Mark Everingham, S. M. Ali Eslami, Luc Van Gool, Christopher K. I. Williams, John M. Winn, Andrew Zisserman
+ [[link]](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). IJCV, 2014. + +6. **Semantic Contours from Inverse Detectors**
+ Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji, Jitendra Malik
+ [[link]](http://home.bharathh.info/pubs/codes/SBD/download.html). In the Proc. of ICCV, 2011. + +7. **The Cityscapes Dataset for Semantic Urban Scene Understanding**
+ Cordts, Marius, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, Bernt Schiele.
+ [[link]](https://www.cityscapes-dataset.com/). In the Proc. of CVPR, 2016. + +8. **Microsoft COCO: Common Objects in Context**
+ Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, Piotr Dollar
+ [[link]](http://cocodataset.org/). In the Proc. of ECCV, 2014. + +9. **ImageNet Large Scale Visual Recognition Challenge**
+ Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, Li Fei-Fei
+ [[link]](http://www.image-net.org/). IJCV, 2015. + +10. **Deep Residual Learning for Image Recognition**
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
+ [[link]](https://arxiv.org/abs/1512.03385). CVPR, 2016. + +11. **Pyramid Scene Parsing Network**
+ Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, Jiaya Jia
+ [[link]](https://arxiv.org/abs/1612.01105). In CVPR, 2017. + +12. **Scene Parsing through ADE20K Dataset**
+ Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, Antonio Torralba
+ [[link]](http://groups.csail.mit.edu/vision/datasets/ADE20K/). In CVPR, + 2017. + +13. **Searching for MobileNetV3**
+ Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam
+ [[link]](https://arxiv.org/abs/1905.02244). In ICCV, 2019. diff --git a/models/research/deeplab/g3doc/pascal.md b/models/research/deeplab/g3doc/pascal.md new file mode 100644 index 0000000000000000000000000000000000000000..f4bc84eabb83e90ff192077749784fb2560057dd --- /dev/null +++ b/models/research/deeplab/g3doc/pascal.md @@ -0,0 +1,161 @@ +# Running DeepLab on PASCAL VOC 2012 Semantic Segmentation Dataset + +This page walks through the steps required to run DeepLab on PASCAL VOC 2012 on +a local machine. + +## Download dataset and convert to TFRecord + +We have prepared the script (under the folder `datasets`) to download and +convert PASCAL VOC 2012 semantic segmentation dataset to TFRecord. + +```bash +# From the tensorflow/models/research/deeplab/datasets directory. +sh download_and_convert_voc2012.sh +``` + +The converted dataset will be saved at +./deeplab/datasets/pascal_voc_seg/tfrecord + +## Recommended Directory Structure for Training and Evaluation + +``` ++ datasets + + pascal_voc_seg + + VOCdevkit + + VOC2012 + + JPEGImages + + SegmentationClass + + tfrecord + + exp + + train_on_train_set + + train + + eval + + vis +``` + +where the folder `train_on_train_set` stores the train/eval/vis events and +results (when training DeepLab on the PASCAL VOC 2012 train set). + +## Running the train/eval/vis jobs + +A local training job using `xception_65` can be run with the following command: + +```bash +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=30000 \ + --train_split="train" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="513,513" \ + --train_batch_size=1 \ + --dataset="pascal_voc_seg" \ + --tf_initial_checkpoint=${PATH_TO_INITIAL_CHECKPOINT} \ + --train_logdir=${PATH_TO_TRAIN_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_INITIAL_CHECKPOINT} is the path to the initial checkpoint +(usually an ImageNet pretrained checkpoint), ${PATH_TO_TRAIN_DIR} is the +directory in which training checkpoints and events will be written to, and +${PATH_TO_DATASET} is the directory in which the PASCAL VOC 2012 dataset +resides. + +**Note that for {train,eval,vis}.py:** + +1. In order to reproduce our results, one needs to use large batch size (> 12), + and set fine_tune_batch_norm = True. Here, we simply use small batch size + during training for the purpose of demonstration. If the users have limited + GPU memory at hand, please fine-tune from our provided checkpoints whose + batch norm parameters have been trained, and use smaller learning rate with + fine_tune_batch_norm = False. + +2. The users should change atrous_rates from [6, 12, 18] to [12, 24, 36] if + setting output_stride=8. + +3. The users could skip the flag, `decoder_output_stride`, if you do not want + to use the decoder structure. + +A local evaluation job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="513,513" \ + --dataset="pascal_voc_seg" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --eval_logdir=${PATH_TO_EVAL_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_EVAL_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +PASCAL VOC 2012 dataset resides. + +A local visualization job using `xception_65` can be run with the following +command: + +```bash +# From tensorflow/models/research/ +python deeplab/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="513,513" \ + --dataset="pascal_voc_seg" \ + --checkpoint_dir=${PATH_TO_CHECKPOINT} \ + --vis_logdir=${PATH_TO_VIS_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +where ${PATH_TO_CHECKPOINT} is the path to the trained checkpoint (i.e., the +path to train_logdir), ${PATH_TO_VIS_DIR} is the directory in which evaluation +events will be written to, and ${PATH_TO_DATASET} is the directory in which the +PASCAL VOC 2012 dataset resides. Note that if the users would like to save the +segmentation results for evaluation server, set also_save_raw_predictions = +True. + +## Running Tensorboard + +Progress for training and evaluation jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${PATH_TO_LOG_DIRECTORY} +``` + +where `${PATH_TO_LOG_DIRECTORY}` points to the directory that contains the +train, eval, and vis directories (e.g., the folder `train_on_train_set` in the +above example). Please note it may take Tensorboard a couple minutes to populate +with data. + +## Example + +We provide a script to run the {train,eval,vis,export_model}.py on the PASCAL VOC +2012 dataset as an example. See the code in local_test.sh for details. + +```bash +# From tensorflow/models/research/deeplab +sh local_test.sh +``` diff --git a/models/research/deeplab/g3doc/quantize.md b/models/research/deeplab/g3doc/quantize.md new file mode 100644 index 0000000000000000000000000000000000000000..d88a2e9a8acbac4a0de6e3ea2bed65cb44535665 --- /dev/null +++ b/models/research/deeplab/g3doc/quantize.md @@ -0,0 +1,110 @@ +# Quantize DeepLab model for faster on-device inference + +This page describes the steps required to quantize DeepLab model and convert it +to TFLite for on-device inference. The main steps include: + +1. Quantization-aware training +1. Exporting model +1. Converting to TFLite FlatBuffer + +We provide details for each step below. + +## Quantization-aware training + +DeepLab supports two approaches to quantize your model. + +1. **[Recommended]** Training a non-quantized model until convergence. Then + fine-tune the trained float model with quantization using a small learning + rate (on PASCAL we use the value of 3e-5) . This fine-tuning step usually + takes 2k to 5k steps to converge. + +1. Training a deeplab float model with delayed quantization. Usually we delay + quantization until the last a few thousand steps in training. + +In the current implementation, quantization is only supported with 1) +`num_clones=1` for training and 2) single scale inference for evaluation, +visualization and model export. To get the best performance for the quantized +model, we strongly recommend to train the float model with larger `num_clones` +and then fine-tune the model with a single clone. + +Here shows the commandline to quantize deeplab model trained on PASCAL VOC +dataset using fine-tuning: + +``` +# From tensorflow/models/research/ +python deeplab/train.py \ + --logtostderr \ + --training_number_of_steps=3000 \ + --train_split="train" \ + --model_variant="mobilenet_v2" \ + --output_stride=16 \ + --train_crop_size="513,513" \ + --train_batch_size=8 \ + --base_learning_rate=3e-5 \ + --dataset="pascal_voc_seg" \ + --initialize_last_layer \ + --quantize_delay_step=0 \ + --tf_initial_checkpoint=${PATH_TO_TRAINED_FLOAT_MODEL} \ + --train_logdir=${PATH_TO_TRAIN_DIR} \ + --dataset_dir=${PATH_TO_DATASET} +``` + +## Converting to TFLite FlatBuffer + +First use the following commandline to export your trained model. + +``` +# From tensorflow/models/research/ +python deeplab/export_model.py \ + --checkpoint_path=${CHECKPOINT_PATH} \ + --quantize_delay_step=0 \ + --export_path=${OUTPUT_DIR}/frozen_inference_graph.pb + +``` + +Commandline below shows how to convert exported graphdef to TFlite model. + +``` +tflite_convert \ + --graph_def_file=${OUTPUT_DIR}/frozen_inference_graph.pb \ + --output_file=${OUTPUT_DIR}/frozen_inference_graph.tflite \ + --output_format=TFLITE \ + --input_shape=1,513,513,3 \ + --input_arrays="MobilenetV2/MobilenetV2/input" \ + --inference_type=QUANTIZED_UINT8 \ + --inference_input_type=QUANTIZED_UINT8 \ + --std_dev_values=128 \ + --mean_values=128 \ + --change_concat_input_ranges=true \ + --output_arrays="ArgMax" +``` + +**[Important]** Note that converted model expects 513x513 RGB input and doesn't +include preprocessing (resize and pad input image) and post processing (crop +padded region and resize to original input size). These steps can be implemented +outside of TFlite model. + +## Quantized model on PASCAL VOC + +We provide float and quantized checkpoints that have been pretrained on VOC 2012 +train_aug set, using MobileNet-v2 backbone with different depth multipliers. +Quantized model usually have 1% decay in mIoU. + +For quantized (8bit) model, un-tar'ed directory includes: + +* a frozen inference graph (frozen_inference_graph.pb) + +* a checkpoint (model.ckpt.data*, model.ckpt.index) + +* a converted TFlite FlatBuffer file (frozen_inference_graph.tflite) + +Checkpoint name | Eval OS | Eval scales | Left-right Flip | Multiply-Adds | Quantize | PASCAL mIOU | Folder Size | TFLite File Size +-------------------------------------------------------------------------------------------------------------------------------------------- | :-----: | :---------: | :-------------: | :-----------: | :------: | :----------: | :-------: | :-------: +[mobilenetv2_dm05_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_trainaug_2018_10_01.tar.gz) | 16 | [1.0] | No | 0.88B | No | 70.19% (val) | 7.6MB | N/A +[mobilenetv2_dm05_coco_voc_trainaug_8bit](http://download.tensorflow.org/models/deeplabv3_mnv2_dm05_pascal_train_aug_8bit_2019_04_26.tar.gz) | 16 | [1.0] | No | 0.88B | Yes | 69.65% (val) | 8.2MB | 751.1KB +[mobilenetv2_coco_voc_trainaug](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz) | 16 | [1.0] | No | 2.75B | No | 75.32% (val) | 23MB | N/A +[mobilenetv2_coco_voc_trainaug_8bit](http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_8bit_2019_04_26.tar.gz) | 16 | [1.0] | No | 2.75B | Yes | 74.26% (val) | 24MB | 2.2MB + +Note that you might need the nightly build of TensorFlow (see +[here](https://www.tensorflow.org/install) for install instructions) to convert +above quantized model to TFLite. diff --git a/models/research/deeplab/input_preprocess.py b/models/research/deeplab/input_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca8bce4eb9104b22469419c4e6af4beaba9406a --- /dev/null +++ b/models/research/deeplab/input_preprocess.py @@ -0,0 +1,139 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Prepares the data used for DeepLab training/evaluation.""" +import tensorflow as tf +from deeplab.core import feature_extractor +from deeplab.core import preprocess_utils + + +# The probability of flipping the images and labels +# left-right during training +_PROB_OF_FLIP = 0.5 + + +def preprocess_image_and_label(image, + label, + crop_height, + crop_width, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + ignore_label=255, + is_training=True, + model_variant=None): + """Preprocesses the image and label. + + Args: + image: Input image. + label: Ground truth annotation label. + crop_height: The height value used to crop the image and label. + crop_width: The width value used to crop the image and label. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + ignore_label: The label value which will be ignored for training and + evaluation. + is_training: If the preprocessing is used for training or not. + model_variant: Model variant (string) for choosing how to mean-subtract the + images. See feature_extractor.network_map for supported model variants. + + Returns: + original_image: Original image (could be resized). + processed_image: Preprocessed image. + label: Preprocessed ground truth segmentation label. + + Raises: + ValueError: Ground truth label not provided during training. + """ + if is_training and label is None: + raise ValueError('During training, label must be provided.') + if model_variant is None: + tf.logging.warning('Default mean-subtraction is performed. Please specify ' + 'a model_variant. See feature_extractor.network_map for ' + 'supported model variants.') + + # Keep reference to original image. + original_image = image + + processed_image = tf.cast(image, tf.float32) + + if label is not None: + label = tf.cast(label, tf.int32) + + # Resize image and label to the desired range. + if min_resize_value or max_resize_value: + [processed_image, label] = ( + preprocess_utils.resize_to_range( + image=processed_image, + label=label, + min_size=min_resize_value, + max_size=max_resize_value, + factor=resize_factor, + align_corners=True)) + # The `original_image` becomes the resized image. + original_image = tf.identity(processed_image) + + # Data augmentation by randomly scaling the inputs. + if is_training: + scale = preprocess_utils.get_random_scale( + min_scale_factor, max_scale_factor, scale_factor_step_size) + processed_image, label = preprocess_utils.randomly_scale_image_and_label( + processed_image, label, scale) + processed_image.set_shape([None, None, 3]) + + # Pad image and label to have dimensions >= [crop_height, crop_width] + image_shape = tf.shape(processed_image) + image_height = image_shape[0] + image_width = image_shape[1] + + target_height = image_height + tf.maximum(crop_height - image_height, 0) + target_width = image_width + tf.maximum(crop_width - image_width, 0) + + # Pad image with mean pixel value. + mean_pixel = tf.reshape( + feature_extractor.mean_pixel(model_variant), [1, 1, 3]) + processed_image = preprocess_utils.pad_to_bounding_box( + processed_image, 0, 0, target_height, target_width, mean_pixel) + + if label is not None: + label = preprocess_utils.pad_to_bounding_box( + label, 0, 0, target_height, target_width, ignore_label) + + # Randomly crop the image and label. + if is_training and label is not None: + processed_image, label = preprocess_utils.random_crop( + [processed_image, label], crop_height, crop_width) + + processed_image.set_shape([crop_height, crop_width, 3]) + + if label is not None: + label.set_shape([crop_height, crop_width, 1]) + + if is_training: + # Randomly left-right flip the image and label. + processed_image, label, _ = preprocess_utils.flip_dim( + [processed_image, label], _PROB_OF_FLIP, dim=1) + + return original_image, processed_image, label diff --git a/models/research/deeplab/local_test.sh b/models/research/deeplab/local_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..d5e4a5f42bb4241d4b6dd1b9d8a2619c4ca9dc8b --- /dev/null +++ b/models/research/deeplab/local_test.sh @@ -0,0 +1,147 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to run local test on PASCAL VOC 2012. Users could also +# modify from this script for their use case. +# +# Usage: +# # From the tensorflow/models/research/deeplab directory. +# sh ./local_test.sh +# +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +# Move one-level up to tensorflow/models/research directory. +cd .. + +# Update PYTHONPATH. +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim + +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/deeplab" + +# Run model_test first to make sure the PYTHONPATH is correctly set. +python "${WORK_DIR}"/model_test.py + +# Go to datasets folder and download PASCAL VOC 2012 segmentation dataset. +DATASET_DIR="datasets" +cd "${WORK_DIR}/${DATASET_DIR}" +sh download_and_convert_voc2012.sh + +# Go back to original directory. +cd "${CURRENT_DIR}" + +# Set up the working directories. +PASCAL_FOLDER="pascal_voc_seg" +EXP_FOLDER="exp/train_on_trainval_set" +INIT_FOLDER="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/init_models" +TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/train" +EVAL_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/eval" +VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/vis" +EXPORT_DIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/export" +mkdir -p "${INIT_FOLDER}" +mkdir -p "${TRAIN_LOGDIR}" +mkdir -p "${EVAL_LOGDIR}" +mkdir -p "${VIS_LOGDIR}" +mkdir -p "${EXPORT_DIR}" + +# Copy locally the trained checkpoint as the initial checkpoint. +TF_INIT_ROOT="http://download.tensorflow.org/models" +TF_INIT_CKPT="deeplabv3_pascal_train_aug_2018_01_04.tar.gz" +cd "${INIT_FOLDER}" +wget -nd -c "${TF_INIT_ROOT}/${TF_INIT_CKPT}" +tar -xf "${TF_INIT_CKPT}" +cd "${CURRENT_DIR}" + +PASCAL_DATASET="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/tfrecord" + +# Train 10 iterations. +NUM_ITERATIONS=10 +python "${WORK_DIR}"/train.py \ + --logtostderr \ + --train_split="trainval" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --train_crop_size="513,513" \ + --train_batch_size=4 \ + --training_number_of_steps="${NUM_ITERATIONS}" \ + --fine_tune_batch_norm=true \ + --tf_initial_checkpoint="${INIT_FOLDER}/deeplabv3_pascal_train_aug/model.ckpt" \ + --train_logdir="${TRAIN_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" + +# Run evaluation. This performs eval over the full val split (1449 images) and +# will take a while. +# Using the provided checkpoint, one should expect mIOU=82.20%. +python "${WORK_DIR}"/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --eval_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --eval_logdir="${EVAL_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_evaluations=1 + +# Visualize the results. +python "${WORK_DIR}"/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --vis_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --vis_logdir="${VIS_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_iterations=1 + +# Export the trained checkpoint. +CKPT_PATH="${TRAIN_LOGDIR}/model.ckpt-${NUM_ITERATIONS}" +EXPORT_PATH="${EXPORT_DIR}/frozen_inference_graph.pb" + +python "${WORK_DIR}"/export_model.py \ + --logtostderr \ + --checkpoint_path="${CKPT_PATH}" \ + --export_path="${EXPORT_PATH}" \ + --model_variant="xception_65" \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --output_stride=16 \ + --decoder_output_stride=4 \ + --num_classes=21 \ + --crop_size=513 \ + --crop_size=513 \ + --inference_scales=1.0 + +# Run inference with the exported checkpoint. +# Please refer to the provided deeplab_demo.ipynb for an example. diff --git a/models/research/deeplab/local_test_mobilenetv2.sh b/models/research/deeplab/local_test_mobilenetv2.sh new file mode 100644 index 0000000000000000000000000000000000000000..c38646fdf6caa3934b7c8db66e53ffbd4f9fd8c6 --- /dev/null +++ b/models/research/deeplab/local_test_mobilenetv2.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to run local test on PASCAL VOC 2012 using MobileNet-v2. +# Users could also modify from this script for their use case. +# +# Usage: +# # From the tensorflow/models/research/deeplab directory. +# sh ./local_test_mobilenetv2.sh +# +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +# Move one-level up to tensorflow/models/research directory. +cd .. + +# Update PYTHONPATH. +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim + +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/deeplab" + +# Run model_test first to make sure the PYTHONPATH is correctly set. +python "${WORK_DIR}"/model_test.py -v + +# Go to datasets folder and download PASCAL VOC 2012 segmentation dataset. +DATASET_DIR="datasets" +cd "${WORK_DIR}/${DATASET_DIR}" +sh download_and_convert_voc2012.sh + +# Go back to original directory. +cd "${CURRENT_DIR}" + +# Set up the working directories. +PASCAL_FOLDER="pascal_voc_seg" +EXP_FOLDER="exp/train_on_trainval_set_mobilenetv2" +INIT_FOLDER="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/init_models" +TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/train" +EVAL_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/eval" +VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/vis" +EXPORT_DIR="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/${EXP_FOLDER}/export" +mkdir -p "${INIT_FOLDER}" +mkdir -p "${TRAIN_LOGDIR}" +mkdir -p "${EVAL_LOGDIR}" +mkdir -p "${VIS_LOGDIR}" +mkdir -p "${EXPORT_DIR}" + +# Copy locally the trained checkpoint as the initial checkpoint. +TF_INIT_ROOT="http://download.tensorflow.org/models" +CKPT_NAME="deeplabv3_mnv2_pascal_train_aug" +TF_INIT_CKPT="${CKPT_NAME}_2018_01_29.tar.gz" +cd "${INIT_FOLDER}" +wget -nd -c "${TF_INIT_ROOT}/${TF_INIT_CKPT}" +tar -xf "${TF_INIT_CKPT}" +cd "${CURRENT_DIR}" + +PASCAL_DATASET="${WORK_DIR}/${DATASET_DIR}/${PASCAL_FOLDER}/tfrecord" + +# Train 10 iterations. +NUM_ITERATIONS=10 +python "${WORK_DIR}"/train.py \ + --logtostderr \ + --train_split="trainval" \ + --model_variant="mobilenet_v2" \ + --output_stride=16 \ + --train_crop_size="513,513" \ + --train_batch_size=4 \ + --training_number_of_steps="${NUM_ITERATIONS}" \ + --fine_tune_batch_norm=true \ + --tf_initial_checkpoint="${INIT_FOLDER}/${CKPT_NAME}/model.ckpt-30000" \ + --train_logdir="${TRAIN_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" + +# Run evaluation. This performs eval over the full val split (1449 images) and +# will take a while. +# Using the provided checkpoint, one should expect mIOU=75.34%. +python "${WORK_DIR}"/eval.py \ + --logtostderr \ + --eval_split="val" \ + --model_variant="mobilenet_v2" \ + --eval_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --eval_logdir="${EVAL_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_evaluations=1 + +# Visualize the results. +python "${WORK_DIR}"/vis.py \ + --logtostderr \ + --vis_split="val" \ + --model_variant="mobilenet_v2" \ + --vis_crop_size="513,513" \ + --checkpoint_dir="${TRAIN_LOGDIR}" \ + --vis_logdir="${VIS_LOGDIR}" \ + --dataset_dir="${PASCAL_DATASET}" \ + --max_number_of_iterations=1 + +# Export the trained checkpoint. +CKPT_PATH="${TRAIN_LOGDIR}/model.ckpt-${NUM_ITERATIONS}" +EXPORT_PATH="${EXPORT_DIR}/frozen_inference_graph.pb" + +python "${WORK_DIR}"/export_model.py \ + --logtostderr \ + --checkpoint_path="${CKPT_PATH}" \ + --export_path="${EXPORT_PATH}" \ + --model_variant="mobilenet_v2" \ + --num_classes=21 \ + --crop_size=513 \ + --crop_size=513 \ + --inference_scales=1.0 + +# Run inference with the exported checkpoint. +# Please refer to the provided deeplab_demo.ipynb for an example. diff --git a/models/research/deeplab/model.py b/models/research/deeplab/model.py new file mode 100644 index 0000000000000000000000000000000000000000..311aaa1acb13cb445053ac12fa09e354423e56df --- /dev/null +++ b/models/research/deeplab/model.py @@ -0,0 +1,911 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Provides DeepLab model definition and helper functions. + +DeepLab is a deep learning system for semantic image segmentation with +the following features: + +(1) Atrous convolution to explicitly control the resolution at which +feature responses are computed within Deep Convolutional Neural Networks. + +(2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at +multiple scales with filters at multiple sampling rates and effective +fields-of-views. + +(3) ASPP module augmented with image-level feature and batch normalization. + +(4) A simple yet effective decoder module to recover the object boundaries. + +See the following papers for more details: + +"Encoder-Decoder with Atrous Separable Convolution for Semantic Image +Segmentation" +Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. +(https://arxiv.org/abs/1802.02611) + +"Rethinking Atrous Convolution for Semantic Image Segmentation," +Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam +(https://arxiv.org/abs/1706.05587) + +"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, +Atrous Convolution, and Fully Connected CRFs", +Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, +Alan L Yuille (* equal contribution) +(https://arxiv.org/abs/1606.00915) + +"Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected +CRFs" +Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, +Alan L. Yuille (* equal contribution) +(https://arxiv.org/abs/1412.7062) +""" +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim +from deeplab.core import dense_prediction_cell +from deeplab.core import feature_extractor +from deeplab.core import utils + +slim = contrib_slim + +LOGITS_SCOPE_NAME = 'logits' +MERGED_LOGITS_SCOPE = 'merged_logits' +IMAGE_POOLING_SCOPE = 'image_pooling' +ASPP_SCOPE = 'aspp' +CONCAT_PROJECTION_SCOPE = 'concat_projection' +DECODER_SCOPE = 'decoder' +META_ARCHITECTURE_SCOPE = 'meta_architecture' + +PROB_SUFFIX = '_prob' + +_resize_bilinear = utils.resize_bilinear +scale_dimension = utils.scale_dimension +split_separable_conv2d = utils.split_separable_conv2d + + +def get_extra_layer_scopes(last_layers_contain_logits_only=False): + """Gets the scopes for extra layers. + + Args: + last_layers_contain_logits_only: Boolean, True if only consider logits as + the last layer (i.e., exclude ASPP module, decoder module and so on) + + Returns: + A list of scopes for extra layers. + """ + if last_layers_contain_logits_only: + return [LOGITS_SCOPE_NAME] + else: + return [ + LOGITS_SCOPE_NAME, + IMAGE_POOLING_SCOPE, + ASPP_SCOPE, + CONCAT_PROJECTION_SCOPE, + DECODER_SCOPE, + META_ARCHITECTURE_SCOPE, + ] + + +def predict_labels_multi_scale(images, + model_options, + eval_scales=(1.0,), + add_flipped_images=False): + """Predicts segmentation labels. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + eval_scales: The scales to resize images for evaluation. + add_flipped_images: Add flipped images for evaluation or not. + + Returns: + A dictionary with keys specifying the output_type (e.g., semantic + prediction) and values storing Tensors representing predictions (argmax + over channels). Each prediction has size [batch, height, width]. + """ + outputs_to_predictions = { + output: [] + for output in model_options.outputs_to_num_classes + } + + for i, image_scale in enumerate(eval_scales): + with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None): + outputs_to_scales_to_logits = multi_scale_logits( + images, + model_options=model_options, + image_pyramid=[image_scale], + is_training=False, + fine_tune_batch_norm=False) + + if add_flipped_images: + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + outputs_to_scales_to_logits_reversed = multi_scale_logits( + tf.reverse_v2(images, [2]), + model_options=model_options, + image_pyramid=[image_scale], + is_training=False, + fine_tune_batch_norm=False) + + for output in sorted(outputs_to_scales_to_logits): + scales_to_logits = outputs_to_scales_to_logits[output] + logits = _resize_bilinear( + scales_to_logits[MERGED_LOGITS_SCOPE], + tf.shape(images)[1:3], + scales_to_logits[MERGED_LOGITS_SCOPE].dtype) + outputs_to_predictions[output].append( + tf.expand_dims(tf.nn.softmax(logits), 4)) + + if add_flipped_images: + scales_to_logits_reversed = ( + outputs_to_scales_to_logits_reversed[output]) + logits_reversed = _resize_bilinear( + tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), + tf.shape(images)[1:3], + scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) + outputs_to_predictions[output].append( + tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) + + for output in sorted(outputs_to_predictions): + predictions = outputs_to_predictions[output] + # Compute average prediction across different scales and flipped images. + predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) + outputs_to_predictions[output] = tf.argmax(predictions, 3) + outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions) + + return outputs_to_predictions + + +def predict_labels(images, model_options, image_pyramid=None): + """Predicts segmentation labels. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + image_pyramid: Input image scales for multi-scale feature extraction. + + Returns: + A dictionary with keys specifying the output_type (e.g., semantic + prediction) and values storing Tensors representing predictions (argmax + over channels). Each prediction has size [batch, height, width]. + """ + outputs_to_scales_to_logits = multi_scale_logits( + images, + model_options=model_options, + image_pyramid=image_pyramid, + is_training=False, + fine_tune_batch_norm=False) + + predictions = {} + for output in sorted(outputs_to_scales_to_logits): + scales_to_logits = outputs_to_scales_to_logits[output] + logits = scales_to_logits[MERGED_LOGITS_SCOPE] + # There are two ways to obtain the final prediction results: (1) bilinear + # upsampling the logits followed by argmax, or (2) argmax followed by + # nearest neighbor upsampling. The second option may introduce the "blocking + # effect" but is computationally efficient. + if model_options.prediction_with_upsampled_logits: + logits = _resize_bilinear(logits, + tf.shape(images)[1:3], + scales_to_logits[MERGED_LOGITS_SCOPE].dtype) + predictions[output] = tf.argmax(logits, 3) + predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits) + else: + argmax_results = tf.argmax(logits, 3) + argmax_results = tf.image.resize_nearest_neighbor( + tf.expand_dims(argmax_results, 3), + tf.shape(images)[1:3], + align_corners=True, + name='resize_prediction') + predictions[output] = tf.squeeze(argmax_results, 3) + predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear( + tf.nn.softmax(logits), + tf.shape(images)[1:3], + align_corners=True, + name='resize_prob') + return predictions + + +def multi_scale_logits(images, + model_options, + image_pyramid, + weight_decay=0.0001, + is_training=False, + fine_tune_batch_norm=False, + nas_training_hyper_parameters=None): + """Gets the logits for multi-scale inputs. + + The returned logits are all downsampled (due to max-pooling layers) + for both training and evaluation. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + image_pyramid: Input image scales for multi-scale feature extraction. + weight_decay: The weight decay for model variables. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. Its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + + Returns: + outputs_to_scales_to_logits: A map of maps from output_type (e.g., + semantic prediction) to a dictionary of multi-scale logits names to + logits. For each output_type, the dictionary has keys which + correspond to the scales and values which correspond to the logits. + For example, if `scales` equals [1.0, 1.5], then the keys would + include 'merged_logits', 'logits_1.00' and 'logits_1.50'. + + Raises: + ValueError: If model_options doesn't specify crop_size and its + add_image_level_feature = True, since add_image_level_feature requires + crop_size information. + """ + # Setup default values. + if not image_pyramid: + image_pyramid = [1.0] + crop_height = ( + model_options.crop_size[0] + if model_options.crop_size else tf.shape(images)[1]) + crop_width = ( + model_options.crop_size[1] + if model_options.crop_size else tf.shape(images)[2]) + if model_options.image_pooling_crop_size: + image_pooling_crop_height = model_options.image_pooling_crop_size[0] + image_pooling_crop_width = model_options.image_pooling_crop_size[1] + + # Compute the height, width for the output logits. + if model_options.decoder_output_stride: + logits_output_stride = min(model_options.decoder_output_stride) + else: + logits_output_stride = model_options.output_stride + + logits_height = scale_dimension( + crop_height, + max(1.0, max(image_pyramid)) / logits_output_stride) + logits_width = scale_dimension( + crop_width, + max(1.0, max(image_pyramid)) / logits_output_stride) + + # Compute the logits for each scale in the image pyramid. + outputs_to_scales_to_logits = { + k: {} + for k in model_options.outputs_to_num_classes + } + + num_channels = images.get_shape().as_list()[-1] + + for image_scale in image_pyramid: + if image_scale != 1.0: + scaled_height = scale_dimension(crop_height, image_scale) + scaled_width = scale_dimension(crop_width, image_scale) + scaled_crop_size = [scaled_height, scaled_width] + scaled_images = _resize_bilinear(images, scaled_crop_size, images.dtype) + if model_options.crop_size: + scaled_images.set_shape( + [None, scaled_height, scaled_width, num_channels]) + # Adjust image_pooling_crop_size accordingly. + scaled_image_pooling_crop_size = None + if model_options.image_pooling_crop_size: + scaled_image_pooling_crop_size = [ + scale_dimension(image_pooling_crop_height, image_scale), + scale_dimension(image_pooling_crop_width, image_scale)] + else: + scaled_crop_size = model_options.crop_size + scaled_images = images + scaled_image_pooling_crop_size = model_options.image_pooling_crop_size + + updated_options = model_options._replace( + crop_size=scaled_crop_size, + image_pooling_crop_size=scaled_image_pooling_crop_size) + outputs_to_logits = _get_logits( + scaled_images, + updated_options, + weight_decay=weight_decay, + reuse=tf.AUTO_REUSE, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + nas_training_hyper_parameters=nas_training_hyper_parameters) + + # Resize the logits to have the same dimension before merging. + for output in sorted(outputs_to_logits): + outputs_to_logits[output] = _resize_bilinear( + outputs_to_logits[output], [logits_height, logits_width], + outputs_to_logits[output].dtype) + + # Return when only one input scale. + if len(image_pyramid) == 1: + for output in sorted(model_options.outputs_to_num_classes): + outputs_to_scales_to_logits[output][ + MERGED_LOGITS_SCOPE] = outputs_to_logits[output] + return outputs_to_scales_to_logits + + # Save logits to the output map. + for output in sorted(model_options.outputs_to_num_classes): + outputs_to_scales_to_logits[output][ + 'logits_%.2f' % image_scale] = outputs_to_logits[output] + + # Merge the logits from all the multi-scale inputs. + for output in sorted(model_options.outputs_to_num_classes): + # Concatenate the multi-scale logits for each output type. + all_logits = [ + tf.expand_dims(logits, axis=4) + for logits in outputs_to_scales_to_logits[output].values() + ] + all_logits = tf.concat(all_logits, 4) + merge_fn = ( + tf.reduce_max + if model_options.merge_method == 'max' else tf.reduce_mean) + outputs_to_scales_to_logits[output][MERGED_LOGITS_SCOPE] = merge_fn( + all_logits, axis=4) + + return outputs_to_scales_to_logits + + +def extract_features(images, + model_options, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + nas_training_hyper_parameters=None): + """Extracts features by the particular model_variant. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. Its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + + Returns: + concat_logits: A tensor of size [batch, feature_height, feature_width, + feature_channels], where feature_height/feature_width are determined by + the images height/width and output_stride. + end_points: A dictionary from components of the network to the corresponding + activation. + """ + features, end_points = feature_extractor.extract_features( + images, + output_stride=model_options.output_stride, + multi_grid=model_options.multi_grid, + model_variant=model_options.model_variant, + depth_multiplier=model_options.depth_multiplier, + divisible_by=model_options.divisible_by, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + preprocessed_images_dtype=model_options.preprocessed_images_dtype, + fine_tune_batch_norm=fine_tune_batch_norm, + nas_architecture_options=model_options.nas_architecture_options, + nas_training_hyper_parameters=nas_training_hyper_parameters, + use_bounded_activation=model_options.use_bounded_activation) + + if not model_options.aspp_with_batch_norm: + return features, end_points + else: + if model_options.dense_prediction_cell_config is not None: + tf.logging.info('Using dense prediction cell config.') + dense_prediction_layer = dense_prediction_cell.DensePredictionCell( + config=model_options.dense_prediction_cell_config, + hparams={ + 'conv_rate_multiplier': 16 // model_options.output_stride, + }) + concat_logits = dense_prediction_layer.build_cell( + features, + output_stride=model_options.output_stride, + crop_size=model_options.crop_size, + image_pooling_crop_size=model_options.image_pooling_crop_size, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm) + return concat_logits, end_points + else: + # The following codes employ the DeepLabv3 ASPP module. Note that we + # could express the ASPP module as one particular dense prediction + # cell architecture. We do not do so but leave the following codes + # for backward compatibility. + batch_norm_params = utils.get_batch_norm_params( + decay=0.9997, + epsilon=1e-5, + scale=True, + is_training=(is_training and fine_tune_batch_norm), + sync_batch_norm_method=model_options.sync_batch_norm_method) + batch_norm = utils.get_batch_norm_fn( + model_options.sync_batch_norm_method) + activation_fn = ( + tf.nn.relu6 if model_options.use_bounded_activation else tf.nn.relu) + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=activation_fn, + normalizer_fn=batch_norm, + padding='SAME', + stride=1, + reuse=reuse): + with slim.arg_scope([batch_norm], **batch_norm_params): + depth = model_options.aspp_convs_filters + branch_logits = [] + + if model_options.add_image_level_feature: + if model_options.crop_size is not None: + image_pooling_crop_size = model_options.image_pooling_crop_size + # If image_pooling_crop_size is not specified, use crop_size. + if image_pooling_crop_size is None: + image_pooling_crop_size = model_options.crop_size + pool_height = scale_dimension( + image_pooling_crop_size[0], + 1. / model_options.output_stride) + pool_width = scale_dimension( + image_pooling_crop_size[1], + 1. / model_options.output_stride) + image_feature = slim.avg_pool2d( + features, [pool_height, pool_width], + model_options.image_pooling_stride, padding='VALID') + resize_height = scale_dimension( + model_options.crop_size[0], + 1. / model_options.output_stride) + resize_width = scale_dimension( + model_options.crop_size[1], + 1. / model_options.output_stride) + else: + # If crop_size is None, we simply do global pooling. + pool_height = tf.shape(features)[1] + pool_width = tf.shape(features)[2] + image_feature = tf.reduce_mean( + features, axis=[1, 2], keepdims=True) + resize_height = pool_height + resize_width = pool_width + image_feature_activation_fn = tf.nn.relu + image_feature_normalizer_fn = batch_norm + if model_options.aspp_with_squeeze_and_excitation: + image_feature_activation_fn = tf.nn.sigmoid + if model_options.image_se_uses_qsigmoid: + image_feature_activation_fn = utils.q_sigmoid + image_feature_normalizer_fn = None + image_feature = slim.conv2d( + image_feature, depth, 1, + activation_fn=image_feature_activation_fn, + normalizer_fn=image_feature_normalizer_fn, + scope=IMAGE_POOLING_SCOPE) + image_feature = _resize_bilinear( + image_feature, + [resize_height, resize_width], + image_feature.dtype) + # Set shape for resize_height/resize_width if they are not Tensor. + if isinstance(resize_height, tf.Tensor): + resize_height = None + if isinstance(resize_width, tf.Tensor): + resize_width = None + image_feature.set_shape([None, resize_height, resize_width, depth]) + if not model_options.aspp_with_squeeze_and_excitation: + branch_logits.append(image_feature) + + # Employ a 1x1 convolution. + branch_logits.append(slim.conv2d(features, depth, 1, + scope=ASPP_SCOPE + str(0))) + + if model_options.atrous_rates: + # Employ 3x3 convolutions with different atrous rates. + for i, rate in enumerate(model_options.atrous_rates, 1): + scope = ASPP_SCOPE + str(i) + if model_options.aspp_with_separable_conv: + aspp_features = split_separable_conv2d( + features, + filters=depth, + rate=rate, + weight_decay=weight_decay, + scope=scope) + else: + aspp_features = slim.conv2d( + features, depth, 3, rate=rate, scope=scope) + branch_logits.append(aspp_features) + + # Merge branch logits. + concat_logits = tf.concat(branch_logits, 3) + if model_options.aspp_with_concat_projection: + concat_logits = slim.conv2d( + concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE) + concat_logits = slim.dropout( + concat_logits, + keep_prob=0.9, + is_training=is_training, + scope=CONCAT_PROJECTION_SCOPE + '_dropout') + if (model_options.add_image_level_feature and + model_options.aspp_with_squeeze_and_excitation): + concat_logits *= image_feature + + return concat_logits, end_points + + +def _get_logits(images, + model_options, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + nas_training_hyper_parameters=None): + """Gets the logits by atrous/image spatial pyramid pooling. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + nas_training_hyper_parameters: A dictionary storing hyper-parameters for + training nas models. Its keys are: + - `drop_path_keep_prob`: Probability to keep each path in the cell when + training. + - `total_training_steps`: Total training steps to help drop path + probability calculation. + + Returns: + outputs_to_logits: A map from output_type to logits. + """ + features, end_points = extract_features( + images, + model_options, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + nas_training_hyper_parameters=nas_training_hyper_parameters) + + if model_options.decoder_output_stride: + crop_size = model_options.crop_size + if crop_size is None: + crop_size = [tf.shape(images)[1], tf.shape(images)[2]] + features = refine_by_decoder( + features, + end_points, + crop_size=crop_size, + decoder_output_stride=model_options.decoder_output_stride, + decoder_use_separable_conv=model_options.decoder_use_separable_conv, + decoder_use_sum_merge=model_options.decoder_use_sum_merge, + decoder_filters=model_options.decoder_filters, + decoder_output_is_logits=model_options.decoder_output_is_logits, + model_variant=model_options.model_variant, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + use_bounded_activation=model_options.use_bounded_activation) + + outputs_to_logits = {} + for output in sorted(model_options.outputs_to_num_classes): + if model_options.decoder_output_is_logits: + outputs_to_logits[output] = tf.identity(features, + name=output) + else: + outputs_to_logits[output] = get_branch_logits( + features, + model_options.outputs_to_num_classes[output], + model_options.atrous_rates, + aspp_with_batch_norm=model_options.aspp_with_batch_norm, + kernel_size=model_options.logits_kernel_size, + weight_decay=weight_decay, + reuse=reuse, + scope_suffix=output) + + return outputs_to_logits + + +def refine_by_decoder(features, + end_points, + crop_size=None, + decoder_output_stride=None, + decoder_use_separable_conv=False, + decoder_use_sum_merge=False, + decoder_filters=256, + decoder_output_is_logits=False, + model_variant=None, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + use_bounded_activation=False, + sync_batch_norm_method='None'): + """Adds the decoder to obtain sharper segmentation results. + + Args: + features: A tensor of size [batch, features_height, features_width, + features_channels]. + end_points: A dictionary from components of the network to the corresponding + activation. + crop_size: A tuple [crop_height, crop_width] specifying whole patch crop + size. + decoder_output_stride: A list of integers specifying the output stride of + low-level features used in the decoder module. + decoder_use_separable_conv: Employ separable convolution for decoder or not. + decoder_use_sum_merge: Boolean, decoder uses simple sum merge or not. + decoder_filters: Integer, decoder filter size. + decoder_output_is_logits: Boolean, using decoder output as logits or not. + model_variant: Model variant for feature extraction. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + use_bounded_activation: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + sync_batch_norm_method: String, method used to sync batch norm. Currently + only support `None` (no sync batch norm) and `tpu` (use tpu code to + sync batch norm). + + Returns: + Decoder output with size [batch, decoder_height, decoder_width, + decoder_channels]. + + Raises: + ValueError: If crop_size is None. + """ + if crop_size is None: + raise ValueError('crop_size must be provided when using decoder.') + batch_norm_params = utils.get_batch_norm_params( + decay=0.9997, + epsilon=1e-5, + scale=True, + is_training=(is_training and fine_tune_batch_norm), + sync_batch_norm_method=sync_batch_norm_method) + batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) + decoder_depth = decoder_filters + projected_filters = 48 + if decoder_use_sum_merge: + # When using sum merge, the projected filters must be equal to decoder + # filters. + projected_filters = decoder_filters + if decoder_output_is_logits: + # Overwrite the setting when decoder output is logits. + activation_fn = None + normalizer_fn = None + conv2d_kernel = 1 + # Use original conv instead of separable conv. + decoder_use_separable_conv = False + else: + # Default setting when decoder output is not logits. + activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu + normalizer_fn = batch_norm + conv2d_kernel = 3 + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + padding='SAME', + stride=1, + reuse=reuse): + with slim.arg_scope([batch_norm], **batch_norm_params): + with tf.variable_scope(DECODER_SCOPE, DECODER_SCOPE, [features]): + decoder_features = features + decoder_stage = 0 + scope_suffix = '' + for output_stride in decoder_output_stride: + feature_list = feature_extractor.networks_to_feature_maps[ + model_variant][ + feature_extractor.DECODER_END_POINTS][output_stride] + # If only one decoder stage, we do not change the scope name in + # order for backward compactibility. + if decoder_stage: + scope_suffix = '_{}'.format(decoder_stage) + for i, name in enumerate(feature_list): + decoder_features_list = [decoder_features] + # MobileNet and NAS variants use different naming convention. + if ('mobilenet' in model_variant or + model_variant.startswith('mnas') or + model_variant.startswith('nas')): + feature_name = name + else: + feature_name = '{}/{}'.format( + feature_extractor.name_scope[model_variant], name) + decoder_features_list.append( + slim.conv2d( + end_points[feature_name], + projected_filters, + 1, + scope='feature_projection' + str(i) + scope_suffix)) + # Determine the output size. + decoder_height = scale_dimension(crop_size[0], 1.0 / output_stride) + decoder_width = scale_dimension(crop_size[1], 1.0 / output_stride) + # Resize to decoder_height/decoder_width. + for j, feature in enumerate(decoder_features_list): + decoder_features_list[j] = _resize_bilinear( + feature, [decoder_height, decoder_width], feature.dtype) + h = (None if isinstance(decoder_height, tf.Tensor) + else decoder_height) + w = (None if isinstance(decoder_width, tf.Tensor) + else decoder_width) + decoder_features_list[j].set_shape([None, h, w, None]) + if decoder_use_sum_merge: + decoder_features = _decoder_with_sum_merge( + decoder_features_list, + decoder_depth, + conv2d_kernel=conv2d_kernel, + decoder_use_separable_conv=decoder_use_separable_conv, + weight_decay=weight_decay, + scope_suffix=scope_suffix) + else: + if not decoder_use_separable_conv: + scope_suffix = str(i) + scope_suffix + decoder_features = _decoder_with_concat_merge( + decoder_features_list, + decoder_depth, + decoder_use_separable_conv=decoder_use_separable_conv, + weight_decay=weight_decay, + scope_suffix=scope_suffix) + decoder_stage += 1 + return decoder_features + + +def _decoder_with_sum_merge(decoder_features_list, + decoder_depth, + conv2d_kernel=3, + decoder_use_separable_conv=True, + weight_decay=0.0001, + scope_suffix=''): + """Decoder with sum to merge features. + + Args: + decoder_features_list: A list of decoder features. + decoder_depth: Integer, the filters used in the convolution. + conv2d_kernel: Integer, the convolution kernel size. + decoder_use_separable_conv: Boolean, use separable conv or not. + weight_decay: Weight decay for the model variables. + scope_suffix: String, used in the scope suffix. + + Returns: + decoder features merged with sum. + + Raises: + RuntimeError: If decoder_features_list have length not equal to 2. + """ + if len(decoder_features_list) != 2: + raise RuntimeError('Expect decoder_features has length 2.') + # Only apply one convolution when decoder use sum merge. + if decoder_use_separable_conv: + decoder_features = split_separable_conv2d( + decoder_features_list[0], + filters=decoder_depth, + rate=1, + weight_decay=weight_decay, + scope='decoder_split_sep_conv0'+scope_suffix) + decoder_features_list[1] + else: + decoder_features = slim.conv2d( + decoder_features_list[0], + decoder_depth, + conv2d_kernel, + scope='decoder_conv0'+scope_suffix) + decoder_features_list[1] + return decoder_features + + +def _decoder_with_concat_merge(decoder_features_list, + decoder_depth, + decoder_use_separable_conv=True, + weight_decay=0.0001, + scope_suffix=''): + """Decoder with concatenation to merge features. + + This decoder method applies two convolutions to smooth the features obtained + by concatenating the input decoder_features_list. + + This decoder module is proposed in the DeepLabv3+ paper. + + Args: + decoder_features_list: A list of decoder features. + decoder_depth: Integer, the filters used in the convolution. + decoder_use_separable_conv: Boolean, use separable conv or not. + weight_decay: Weight decay for the model variables. + scope_suffix: String, used in the scope suffix. + + Returns: + decoder features merged with concatenation. + """ + if decoder_use_separable_conv: + decoder_features = split_separable_conv2d( + tf.concat(decoder_features_list, 3), + filters=decoder_depth, + rate=1, + weight_decay=weight_decay, + scope='decoder_conv0'+scope_suffix) + decoder_features = split_separable_conv2d( + decoder_features, + filters=decoder_depth, + rate=1, + weight_decay=weight_decay, + scope='decoder_conv1'+scope_suffix) + else: + num_convs = 2 + decoder_features = slim.repeat( + tf.concat(decoder_features_list, 3), + num_convs, + slim.conv2d, + decoder_depth, + 3, + scope='decoder_conv'+scope_suffix) + return decoder_features + + +def get_branch_logits(features, + num_classes, + atrous_rates=None, + aspp_with_batch_norm=False, + kernel_size=1, + weight_decay=0.0001, + reuse=None, + scope_suffix=''): + """Gets the logits from each model's branch. + + The underlying model is branched out in the last layer when atrous + spatial pyramid pooling is employed, and all branches are sum-merged + to form the final logits. + + Args: + features: A float tensor of shape [batch, height, width, channels]. + num_classes: Number of classes to predict. + atrous_rates: A list of atrous convolution rates for last layer. + aspp_with_batch_norm: Use batch normalization layers for ASPP. + kernel_size: Kernel size for convolution. + weight_decay: Weight decay for the model variables. + reuse: Reuse model variables or not. + scope_suffix: Scope suffix for the model variables. + + Returns: + Merged logits with shape [batch, height, width, num_classes]. + + Raises: + ValueError: Upon invalid input kernel_size value. + """ + # When using batch normalization with ASPP, ASPP has been applied before + # in extract_features, and thus we simply apply 1x1 convolution here. + if aspp_with_batch_norm or atrous_rates is None: + if kernel_size != 1: + raise ValueError('Kernel size must be 1 when atrous_rates is None or ' + 'using aspp_with_batch_norm. Gets %d.' % kernel_size) + atrous_rates = [1] + + with slim.arg_scope( + [slim.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + weights_initializer=tf.truncated_normal_initializer(stddev=0.01), + reuse=reuse): + with tf.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]): + branch_logits = [] + for i, rate in enumerate(atrous_rates): + scope = scope_suffix + if i: + scope += '_%d' % i + + branch_logits.append( + slim.conv2d( + features, + num_classes, + kernel_size=kernel_size, + rate=rate, + activation_fn=None, + normalizer_fn=None, + scope=scope)) + + return tf.add_n(branch_logits) diff --git a/models/research/deeplab/model_test.py b/models/research/deeplab/model_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d8413d7395d022adb4f43223eb06a4bdc1aa53db --- /dev/null +++ b/models/research/deeplab/model_test.py @@ -0,0 +1,148 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for DeepLab model and some helper functions.""" + +import tensorflow as tf + +from deeplab import common +from deeplab import model + + +class DeeplabModelTest(tf.test.TestCase): + + def testWrongDeepLabVariant(self): + model_options = common.ModelOptions([])._replace( + model_variant='no_such_variant') + with self.assertRaises(ValueError): + model._get_logits(images=[], model_options=model_options) + + def testBuildDeepLabv2(self): + batch_size = 2 + crop_size = [41, 41] + + # Test with two image_pyramids. + image_pyramids = [[1], [0.5, 1]] + + # Test two model variants. + model_variants = ['xception_65', 'mobilenet_v2'] + + # Test with two output_types. + outputs_to_num_classes = {'semantic': 3, + 'direction': 2} + + expected_endpoints = [['merged_logits'], + ['merged_logits', + 'logits_0.50', + 'logits_1.00']] + expected_num_logits = [1, 3] + + for model_variant in model_variants: + model_options = common.ModelOptions(outputs_to_num_classes)._replace( + add_image_level_feature=False, + aspp_with_batch_norm=False, + aspp_with_separable_conv=False, + model_variant=model_variant) + + for i, image_pyramid in enumerate(image_pyramids): + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g): + inputs = tf.random_uniform( + (batch_size, crop_size[0], crop_size[1], 3)) + outputs_to_scales_to_logits = model.multi_scale_logits( + inputs, model_options, image_pyramid=image_pyramid) + + # Check computed results for each output type. + for output in outputs_to_num_classes: + scales_to_logits = outputs_to_scales_to_logits[output] + self.assertListEqual(sorted(scales_to_logits.keys()), + sorted(expected_endpoints[i])) + + # Expected number of logits = len(image_pyramid) + 1, since the + # last logits is merged from all the scales. + self.assertEqual(len(scales_to_logits), expected_num_logits[i]) + + def testForwardpassDeepLabv3plus(self): + crop_size = [33, 33] + outputs_to_num_classes = {'semantic': 3} + + model_options = common.ModelOptions( + outputs_to_num_classes, + crop_size, + output_stride=16 + )._replace( + add_image_level_feature=True, + aspp_with_batch_norm=True, + logits_kernel_size=1, + decoder_output_stride=[4], + model_variant='mobilenet_v2') # Employ MobileNetv2 for fast test. + + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + inputs = tf.random_uniform( + (1, crop_size[0], crop_size[1], 3)) + outputs_to_scales_to_logits = model.multi_scale_logits( + inputs, + model_options, + image_pyramid=[1.0]) + + sess.run(tf.global_variables_initializer()) + outputs_to_scales_to_logits = sess.run(outputs_to_scales_to_logits) + + # Check computed results for each output type. + for output in outputs_to_num_classes: + scales_to_logits = outputs_to_scales_to_logits[output] + # Expect only one output. + self.assertEqual(len(scales_to_logits), 1) + for logits in scales_to_logits.values(): + self.assertTrue(logits.any()) + + def testBuildDeepLabWithDensePredictionCell(self): + batch_size = 1 + crop_size = [33, 33] + outputs_to_num_classes = {'semantic': 2} + expected_endpoints = ['merged_logits'] + dense_prediction_cell_config = [ + {'kernel': 3, 'rate': [1, 6], 'op': 'conv', 'input': -1}, + {'kernel': 3, 'rate': [18, 15], 'op': 'conv', 'input': 0}, + ] + model_options = common.ModelOptions( + outputs_to_num_classes, + crop_size, + output_stride=16)._replace( + aspp_with_batch_norm=True, + model_variant='mobilenet_v2', + dense_prediction_cell_config=dense_prediction_cell_config) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g): + inputs = tf.random_uniform( + (batch_size, crop_size[0], crop_size[1], 3)) + outputs_to_scales_to_model_results = model.multi_scale_logits( + inputs, + model_options, + image_pyramid=[1.0]) + for output in outputs_to_num_classes: + scales_to_model_results = outputs_to_scales_to_model_results[output] + self.assertListEqual( + list(scales_to_model_results), expected_endpoints) + self.assertEqual(len(scales_to_model_results), 1) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/testing/info.md b/models/research/deeplab/testing/info.md new file mode 100644 index 0000000000000000000000000000000000000000..b84d2adb1c5088ed2a6ec4799de7764d64f867b7 --- /dev/null +++ b/models/research/deeplab/testing/info.md @@ -0,0 +1,6 @@ +This directory contains testing data. + +# pascal_voc_seg +This folder contains data specific to pascal_voc_seg dataset. val-00000-of-00001.tfrecord contains +three randomly generated images with format defined in +tensorflow/models/research/deeplab/datasets/build_voc2012_data.py. diff --git a/models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord b/models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord new file mode 100644 index 0000000000000000000000000000000000000000..de9dee50f7973c52305b2692f00a5d6f396f9fbe --- /dev/null +++ b/models/research/deeplab/testing/pascal_voc_seg/val-00000-of-00001.tfrecord @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88314133dce131cdb6a93f37ec2e96c3efdb2f9a111defae284d1530fee3207a +size 1137674 diff --git a/models/research/deeplab/train.py b/models/research/deeplab/train.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe060dccd41793e3e843f4fcbe155576e42eb14 --- /dev/null +++ b/models/research/deeplab/train.py @@ -0,0 +1,464 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Training script for the DeepLab model. + +See model.py for more details and usage. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import six +import tensorflow as tf +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib import tfprof as contrib_tfprof +from deeplab import common +from deeplab import model +from deeplab.datasets import data_generator +from deeplab.utils import train_utils +from deployment import model_deploy + +slim = tf.contrib.slim +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Settings for multi-GPUs/multi-replicas training. + +flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.') + +flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.') + +flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.') + +flags.DEFINE_integer('startup_delay_steps', 15, + 'Number of training steps between replicas startup.') + +flags.DEFINE_integer( + 'num_ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then ' + 'the parameters are handled locally by the worker.') + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +flags.DEFINE_integer('task', 0, 'The task ID.') + +# Settings for logging. + +flags.DEFINE_string('train_logdir', None, + 'Where the checkpoint and logs are stored.') + +flags.DEFINE_integer('log_steps', 10, + 'Display logging information at every log_steps.') + +flags.DEFINE_integer('save_interval_secs', 1200, + 'How often, in seconds, we save the model to disk.') + +flags.DEFINE_integer('save_summaries_secs', 600, + 'How often, in seconds, we compute the summaries.') + +flags.DEFINE_boolean( + 'save_summaries_images', False, + 'Save sample inputs, labels, and semantic predictions as ' + 'images to summary.') + +# Settings for profiling. + +flags.DEFINE_string('profile_logdir', None, + 'Where the profile files are stored.') + +# Settings for training strategy. + +flags.DEFINE_enum('optimizer', 'momentum', ['momentum', 'adam'], + 'Which optimizer to use.') + + +# Momentum optimizer flags + +flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'], + 'Learning rate policy for training.') + +# Use 0.007 when training on PASCAL augmented training set, train_aug. When +# fine-tuning on PASCAL trainval set, use learning rate=0.0001. +flags.DEFINE_float('base_learning_rate', .0001, + 'The base learning rate for model training.') + +flags.DEFINE_float('decay_steps', 0.0, + 'Decay steps for polynomial learning rate schedule.') + +flags.DEFINE_float('end_learning_rate', 0.0, + 'End learning rate for polynomial learning rate schedule.') + +flags.DEFINE_float('learning_rate_decay_factor', 0.1, + 'The rate to decay the base learning rate.') + +flags.DEFINE_integer('learning_rate_decay_step', 2000, + 'Decay the base learning rate at a fixed step.') + +flags.DEFINE_float('learning_power', 0.9, + 'The power value used in the poly learning policy.') + +flags.DEFINE_integer('training_number_of_steps', 30000, + 'The number of steps used for training') + +flags.DEFINE_float('momentum', 0.9, 'The momentum value to use') + +# Adam optimizer flags +flags.DEFINE_float('adam_learning_rate', 0.001, + 'Learning rate for the adam optimizer.') +flags.DEFINE_float('adam_epsilon', 1e-08, 'Adam optimizer epsilon.') + +# When fine_tune_batch_norm=True, use at least batch size larger than 12 +# (batch size more than 16 is better). Otherwise, one could use smaller batch +# size and set fine_tune_batch_norm=False. +flags.DEFINE_integer('train_batch_size', 8, + 'The number of images in each batch during training.') + +# For weight_decay, use 0.00004 for MobileNet-V2 or Xcpetion model variants. +# Use 0.0001 for ResNet model variants. +flags.DEFINE_float('weight_decay', 0.00004, + 'The value of the weight decay for training.') + +flags.DEFINE_list('train_crop_size', '513,513', + 'Image crop size [height, width] during training.') + +flags.DEFINE_float( + 'last_layer_gradient_multiplier', 1.0, + 'The gradient multiplier for last layers, which is used to ' + 'boost the gradient of last layers if the value > 1.') + +flags.DEFINE_boolean('upsample_logits', True, + 'Upsample logits during training.') + +# Hyper-parameters for NAS training strategy. + +flags.DEFINE_float( + 'drop_path_keep_prob', 1.0, + 'Probability to keep each path in the NAS cell when training.') + +# Settings for fine-tuning the network. + +flags.DEFINE_string('tf_initial_checkpoint', None, + 'The initial checkpoint in tensorflow format.') + +# Set to False if one does not want to re-use the trained classifier weights. +flags.DEFINE_boolean('initialize_last_layer', True, + 'Initialize the last layer.') + +flags.DEFINE_boolean('last_layers_contain_logits_only', False, + 'Only consider logits as last layers or not.') + +flags.DEFINE_integer('slow_start_step', 0, + 'Training model with small learning rate for few steps.') + +flags.DEFINE_float('slow_start_learning_rate', 1e-4, + 'Learning rate employed during slow start.') + +# Set to True if one wants to fine-tune the batch norm parameters in DeepLabv3. +# Set to False and use small batch size to save GPU memory. +flags.DEFINE_boolean('fine_tune_batch_norm', True, + 'Fine tune the batch norm parameters or not.') + +flags.DEFINE_float('min_scale_factor', 0.5, + 'Mininum scale factor for data augmentation.') + +flags.DEFINE_float('max_scale_factor', 2., + 'Maximum scale factor for data augmentation.') + +flags.DEFINE_float('scale_factor_step_size', 0.25, + 'Scale factor step size for data augmentation.') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 16, + 'The ratio of input to output spatial resolution.') + +# Hard example mining related flags. +flags.DEFINE_integer( + 'hard_example_mining_step', 0, + 'The training step in which exact hard example mining kicks off. Note we ' + 'gradually reduce the mining percent to the specified ' + 'top_k_percent_pixels. For example, if hard_example_mining_step=100K and ' + 'top_k_percent_pixels=0.25, then mining percent will gradually reduce from ' + '100% to 25% until 100K steps after which we only mine top 25% pixels.') + +flags.DEFINE_float( + 'top_k_percent_pixels', 1.0, + 'The top k percent pixels (in terms of the loss values) used to compute ' + 'loss during training. This is useful for hard pixel mining.') + +# Quantization setting. +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +# Dataset settings. +flags.DEFINE_string('dataset', 'pascal_voc_seg', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('train_split', 'train', + 'Which split of the dataset to be used for training') + +flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.') + + +def _build_deeplab(iterator, outputs_to_num_classes, ignore_label): + """Builds a clone of DeepLab. + + Args: + iterator: An iterator of type tf.data.Iterator for images and labels. + outputs_to_num_classes: A map from output type to the number of classes. For + example, for the task of semantic segmentation with 21 semantic classes, + we would have outputs_to_num_classes['semantic'] = 21. + ignore_label: Ignore label. + """ + samples = iterator.get_next() + + # Add name to input and label nodes so we can add to summary. + samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name=common.IMAGE) + samples[common.LABEL] = tf.identity(samples[common.LABEL], name=common.LABEL) + + model_options = common.ModelOptions( + outputs_to_num_classes=outputs_to_num_classes, + crop_size=[int(sz) for sz in FLAGS.train_crop_size], + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + outputs_to_scales_to_logits = model.multi_scale_logits( + samples[common.IMAGE], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid, + weight_decay=FLAGS.weight_decay, + is_training=True, + fine_tune_batch_norm=FLAGS.fine_tune_batch_norm, + nas_training_hyper_parameters={ + 'drop_path_keep_prob': FLAGS.drop_path_keep_prob, + 'total_training_steps': FLAGS.training_number_of_steps, + }) + + # Add name to graph node so we can add to summary. + output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE] + output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity( + output_type_dict[model.MERGED_LOGITS_SCOPE], name=common.OUTPUT_TYPE) + + for output, num_classes in six.iteritems(outputs_to_num_classes): + train_utils.add_softmax_cross_entropy_loss_for_each_scale( + outputs_to_scales_to_logits[output], + samples[common.LABEL], + num_classes, + ignore_label, + loss_weight=model_options.label_weights, + upsample_logits=FLAGS.upsample_logits, + hard_example_mining_step=FLAGS.hard_example_mining_step, + top_k_percent_pixels=FLAGS.top_k_percent_pixels, + scope=output) + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + # Set up deployment (i.e., multi-GPUs and/or multi-replicas). + config = model_deploy.DeploymentConfig( + num_clones=FLAGS.num_clones, + clone_on_cpu=FLAGS.clone_on_cpu, + replica_id=FLAGS.task, + num_replicas=FLAGS.num_replicas, + num_ps_tasks=FLAGS.num_ps_tasks) + + # Split the batch across GPUs. + assert FLAGS.train_batch_size % config.num_clones == 0, ( + 'Training batch size not divisble by number of clones (GPUs).') + + clone_batch_size = FLAGS.train_batch_size // config.num_clones + + tf.gfile.MakeDirs(FLAGS.train_logdir) + tf.logging.info('Training on %s set', FLAGS.train_split) + + with tf.Graph().as_default() as graph: + with tf.device(config.inputs_device()): + dataset = data_generator.Dataset( + dataset_name=FLAGS.dataset, + split_name=FLAGS.train_split, + dataset_dir=FLAGS.dataset_dir, + batch_size=clone_batch_size, + crop_size=[int(sz) for sz in FLAGS.train_crop_size], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + min_scale_factor=FLAGS.min_scale_factor, + max_scale_factor=FLAGS.max_scale_factor, + scale_factor_step_size=FLAGS.scale_factor_step_size, + model_variant=FLAGS.model_variant, + num_readers=4, + is_training=True, + should_shuffle=True, + should_repeat=True) + + # Create the global step on the device storing the variables. + with tf.device(config.variables_device()): + global_step = tf.train.get_or_create_global_step() + + # Define the model and create clones. + model_fn = _build_deeplab + model_args = (dataset.get_one_shot_iterator(), { + common.OUTPUT_TYPE: dataset.num_of_classes + }, dataset.ignore_label) + clones = model_deploy.create_clones(config, model_fn, args=model_args) + + # Gather update_ops from the first clone. These contain, for example, + # the updates for the batch_norm variables created by model_fn. + first_clone_scope = config.clone_scope(0) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) + + # Gather initial summaries. + summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) + + # Add summaries for model variables. + for model_var in tf.model_variables(): + summaries.add(tf.summary.histogram(model_var.op.name, model_var)) + + # Add summaries for images, labels, semantic predictions + if FLAGS.save_summaries_images: + summary_image = graph.get_tensor_by_name( + ('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/')) + summaries.add( + tf.summary.image('samples/%s' % common.IMAGE, summary_image)) + + first_clone_label = graph.get_tensor_by_name( + ('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/')) + # Scale up summary image pixel values for better visualization. + pixel_scaling = max(1, 255 // dataset.num_of_classes) + summary_label = tf.cast(first_clone_label * pixel_scaling, tf.uint8) + summaries.add( + tf.summary.image('samples/%s' % common.LABEL, summary_label)) + + first_clone_output = graph.get_tensor_by_name( + ('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/')) + predictions = tf.expand_dims(tf.argmax(first_clone_output, 3), -1) + + summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8) + summaries.add( + tf.summary.image( + 'samples/%s' % common.OUTPUT_TYPE, summary_predictions)) + + # Add summaries for losses. + for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): + summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) + + # Build the optimizer based on the device specification. + with tf.device(config.optimizer_device()): + learning_rate = train_utils.get_model_learning_rate( + FLAGS.learning_policy, + FLAGS.base_learning_rate, + FLAGS.learning_rate_decay_step, + FLAGS.learning_rate_decay_factor, + FLAGS.training_number_of_steps, + FLAGS.learning_power, + FLAGS.slow_start_step, + FLAGS.slow_start_learning_rate, + decay_steps=FLAGS.decay_steps, + end_learning_rate=FLAGS.end_learning_rate) + + summaries.add(tf.summary.scalar('learning_rate', learning_rate)) + + if FLAGS.optimizer == 'momentum': + optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum) + elif FLAGS.optimizer == 'adam': + optimizer = tf.train.AdamOptimizer( + learning_rate=FLAGS.adam_learning_rate, epsilon=FLAGS.adam_epsilon) + else: + raise ValueError('Unknown optimizer') + + if FLAGS.quantize_delay_step >= 0: + if FLAGS.num_clones > 1: + raise ValueError('Quantization doesn\'t support multi-clone yet.') + contrib_quantize.create_training_graph( + quant_delay=FLAGS.quantize_delay_step) + + startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps + + with tf.device(config.variables_device()): + total_loss, grads_and_vars = model_deploy.optimize_clones( + clones, optimizer) + total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.') + summaries.add(tf.summary.scalar('total_loss', total_loss)) + + # Modify the gradients for biases and last layer variables. + last_layers = model.get_extra_layer_scopes( + FLAGS.last_layers_contain_logits_only) + grad_mult = train_utils.get_model_gradient_multipliers( + last_layers, FLAGS.last_layer_gradient_multiplier) + if grad_mult: + grads_and_vars = slim.learning.multiply_gradients( + grads_and_vars, grad_mult) + + # Create gradient update op. + grad_updates = optimizer.apply_gradients( + grads_and_vars, global_step=global_step) + update_ops.append(grad_updates) + update_op = tf.group(*update_ops) + with tf.control_dependencies([update_op]): + train_tensor = tf.identity(total_loss, name='train_op') + + # Add the summaries from the first clone. These contain the summaries + # created by model_fn and either optimize_clones() or _gather_clone_loss(). + summaries |= set( + tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) + + # Merge all summaries together. + summary_op = tf.summary.merge(list(summaries)) + + # Soft placement allows placing on CPU ops without GPU implementation. + session_config = tf.ConfigProto( + allow_soft_placement=True, log_device_placement=False) + + # Start the training. + profile_dir = FLAGS.profile_logdir + if profile_dir is not None: + tf.gfile.MakeDirs(profile_dir) + + with contrib_tfprof.ProfileContext( + enabled=profile_dir is not None, profile_dir=profile_dir): + init_fn = None + if FLAGS.tf_initial_checkpoint: + init_fn = train_utils.get_model_init_fn( + FLAGS.train_logdir, + FLAGS.tf_initial_checkpoint, + FLAGS.initialize_last_layer, + last_layers, + ignore_missing_vars=True) + + slim.learning.train( + train_tensor, + logdir=FLAGS.train_logdir, + log_every_n_steps=FLAGS.log_steps, + master=FLAGS.master, + number_of_steps=FLAGS.training_number_of_steps, + is_chief=(FLAGS.task == 0), + session_config=session_config, + startup_delay_steps=startup_delay_steps, + init_fn=init_fn, + summary_op=summary_op, + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs) + + +if __name__ == '__main__': + flags.mark_flag_as_required('train_logdir') + flags.mark_flag_as_required('dataset_dir') + tf.app.run() diff --git a/models/research/deeplab/utils/__init__.py b/models/research/deeplab/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/deeplab/utils/get_dataset_colormap.py b/models/research/deeplab/utils/get_dataset_colormap.py new file mode 100644 index 0000000000000000000000000000000000000000..c0502e3b3cdd4ee065701e5ee8d94d7f3701c576 --- /dev/null +++ b/models/research/deeplab/utils/get_dataset_colormap.py @@ -0,0 +1,416 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Visualizes the segmentation results via specified color map. + +Visualizes the semantic segmentation results by the color map +defined by the different datasets. Supported colormaps are: + +* ADE20K (http://groups.csail.mit.edu/vision/datasets/ADE20K/). + +* Cityscapes dataset (https://www.cityscapes-dataset.com). + +* Mapillary Vistas (https://research.mapillary.com). + +* PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from six.moves import range + +# Dataset names. +_ADE20K = 'ade20k' +_CITYSCAPES = 'cityscapes' +_MAPILLARY_VISTAS = 'mapillary_vistas' +_PASCAL = 'pascal' + +# Max number of entries in the colormap for each dataset. +_DATASET_MAX_ENTRIES = { + _ADE20K: 151, + _CITYSCAPES: 256, + _MAPILLARY_VISTAS: 66, + _PASCAL: 512, +} + + +def create_ade20k_label_colormap(): + """Creates a label colormap used in ADE20K segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + return np.asarray([ + [0, 0, 0], + [120, 120, 120], + [180, 120, 120], + [6, 230, 230], + [80, 50, 50], + [4, 200, 3], + [120, 120, 80], + [140, 140, 140], + [204, 5, 255], + [230, 230, 230], + [4, 250, 7], + [224, 5, 255], + [235, 255, 7], + [150, 5, 61], + [120, 120, 70], + [8, 255, 51], + [255, 6, 82], + [143, 255, 140], + [204, 255, 4], + [255, 51, 7], + [204, 70, 3], + [0, 102, 200], + [61, 230, 250], + [255, 6, 51], + [11, 102, 255], + [255, 7, 71], + [255, 9, 224], + [9, 7, 230], + [220, 220, 220], + [255, 9, 92], + [112, 9, 255], + [8, 255, 214], + [7, 255, 224], + [255, 184, 6], + [10, 255, 71], + [255, 41, 10], + [7, 255, 255], + [224, 255, 8], + [102, 8, 255], + [255, 61, 6], + [255, 194, 7], + [255, 122, 8], + [0, 255, 20], + [255, 8, 41], + [255, 5, 153], + [6, 51, 255], + [235, 12, 255], + [160, 150, 20], + [0, 163, 255], + [140, 140, 140], + [250, 10, 15], + [20, 255, 0], + [31, 255, 0], + [255, 31, 0], + [255, 224, 0], + [153, 255, 0], + [0, 0, 255], + [255, 71, 0], + [0, 235, 255], + [0, 173, 255], + [31, 0, 255], + [11, 200, 200], + [255, 82, 0], + [0, 255, 245], + [0, 61, 255], + [0, 255, 112], + [0, 255, 133], + [255, 0, 0], + [255, 163, 0], + [255, 102, 0], + [194, 255, 0], + [0, 143, 255], + [51, 255, 0], + [0, 82, 255], + [0, 255, 41], + [0, 255, 173], + [10, 0, 255], + [173, 255, 0], + [0, 255, 153], + [255, 92, 0], + [255, 0, 255], + [255, 0, 245], + [255, 0, 102], + [255, 173, 0], + [255, 0, 20], + [255, 184, 184], + [0, 31, 255], + [0, 255, 61], + [0, 71, 255], + [255, 0, 204], + [0, 255, 194], + [0, 255, 82], + [0, 10, 255], + [0, 112, 255], + [51, 0, 255], + [0, 194, 255], + [0, 122, 255], + [0, 255, 163], + [255, 153, 0], + [0, 255, 10], + [255, 112, 0], + [143, 255, 0], + [82, 0, 255], + [163, 255, 0], + [255, 235, 0], + [8, 184, 170], + [133, 0, 255], + [0, 255, 92], + [184, 0, 255], + [255, 0, 31], + [0, 184, 255], + [0, 214, 255], + [255, 0, 112], + [92, 255, 0], + [0, 224, 255], + [112, 224, 255], + [70, 184, 160], + [163, 0, 255], + [153, 0, 255], + [71, 255, 0], + [255, 0, 163], + [255, 204, 0], + [255, 0, 143], + [0, 255, 235], + [133, 255, 0], + [255, 0, 235], + [245, 0, 255], + [255, 0, 122], + [255, 245, 0], + [10, 190, 212], + [214, 255, 0], + [0, 204, 255], + [20, 0, 255], + [255, 255, 0], + [0, 153, 255], + [0, 41, 255], + [0, 255, 204], + [41, 0, 255], + [41, 255, 0], + [173, 0, 255], + [0, 245, 255], + [71, 0, 255], + [122, 0, 255], + [0, 255, 184], + [0, 92, 255], + [184, 255, 0], + [0, 133, 255], + [255, 214, 0], + [25, 194, 194], + [102, 255, 0], + [92, 0, 255], + ]) + + +def create_cityscapes_label_colormap(): + """Creates a label colormap used in CITYSCAPES segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + colormap[0] = [128, 64, 128] + colormap[1] = [244, 35, 232] + colormap[2] = [70, 70, 70] + colormap[3] = [102, 102, 156] + colormap[4] = [190, 153, 153] + colormap[5] = [153, 153, 153] + colormap[6] = [250, 170, 30] + colormap[7] = [220, 220, 0] + colormap[8] = [107, 142, 35] + colormap[9] = [152, 251, 152] + colormap[10] = [70, 130, 180] + colormap[11] = [220, 20, 60] + colormap[12] = [255, 0, 0] + colormap[13] = [0, 0, 142] + colormap[14] = [0, 0, 70] + colormap[15] = [0, 60, 100] + colormap[16] = [0, 80, 100] + colormap[17] = [0, 0, 230] + colormap[18] = [119, 11, 32] + return colormap + + +def create_mapillary_vistas_label_colormap(): + """Creates a label colormap used in Mapillary Vistas segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + return np.asarray([ + [165, 42, 42], + [0, 192, 0], + [196, 196, 196], + [190, 153, 153], + [180, 165, 180], + [102, 102, 156], + [102, 102, 156], + [128, 64, 255], + [140, 140, 200], + [170, 170, 170], + [250, 170, 160], + [96, 96, 96], + [230, 150, 140], + [128, 64, 128], + [110, 110, 110], + [244, 35, 232], + [150, 100, 100], + [70, 70, 70], + [150, 120, 90], + [220, 20, 60], + [255, 0, 0], + [255, 0, 0], + [255, 0, 0], + [200, 128, 128], + [255, 255, 255], + [64, 170, 64], + [128, 64, 64], + [70, 130, 180], + [255, 255, 255], + [152, 251, 152], + [107, 142, 35], + [0, 170, 30], + [255, 255, 128], + [250, 0, 30], + [0, 0, 0], + [220, 220, 220], + [170, 170, 170], + [222, 40, 40], + [100, 170, 30], + [40, 40, 40], + [33, 33, 33], + [170, 170, 170], + [0, 0, 142], + [170, 170, 170], + [210, 170, 100], + [153, 153, 153], + [128, 128, 128], + [0, 0, 142], + [250, 170, 30], + [192, 192, 192], + [220, 220, 0], + [180, 165, 180], + [119, 11, 32], + [0, 0, 142], + [0, 60, 100], + [0, 0, 142], + [0, 0, 90], + [0, 0, 230], + [0, 80, 100], + [128, 64, 64], + [0, 0, 110], + [0, 0, 70], + [0, 0, 192], + [32, 32, 32], + [0, 0, 0], + [0, 0, 0], + ]) + + +def create_pascal_label_colormap(): + """Creates a label colormap used in PASCAL VOC segmentation benchmark. + + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int) + ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int) + + for shift in reversed(list(range(8))): + for channel in range(3): + colormap[:, channel] |= bit_get(ind, channel) << shift + ind >>= 3 + + return colormap + + +def get_ade20k_name(): + return _ADE20K + + +def get_cityscapes_name(): + return _CITYSCAPES + + +def get_mapillary_vistas_name(): + return _MAPILLARY_VISTAS + + +def get_pascal_name(): + return _PASCAL + + +def bit_get(val, idx): + """Gets the bit value. + + Args: + val: Input value, int or numpy int array. + idx: Which bit of the input val. + + Returns: + The "idx"-th bit of input val. + """ + return (val >> idx) & 1 + + +def create_label_colormap(dataset=_PASCAL): + """Creates a label colormap for the specified dataset. + + Args: + dataset: The colormap used in the dataset. + + Returns: + A numpy array of the dataset colormap. + + Raises: + ValueError: If the dataset is not supported. + """ + if dataset == _ADE20K: + return create_ade20k_label_colormap() + elif dataset == _CITYSCAPES: + return create_cityscapes_label_colormap() + elif dataset == _MAPILLARY_VISTAS: + return create_mapillary_vistas_label_colormap() + elif dataset == _PASCAL: + return create_pascal_label_colormap() + else: + raise ValueError('Unsupported dataset.') + + +def label_to_color_image(label, dataset=_PASCAL): + """Adds color defined by the dataset colormap to the label. + + Args: + label: A 2D array with integer type, storing the segmentation label. + dataset: The colormap used in the dataset. + + Returns: + result: A 2D array with floating type. The element of the array + is the color indexed by the corresponding element in the input label + to the dataset color map. + + Raises: + ValueError: If label is not of rank 2 or its value is larger than color + map maximum entry. + """ + if label.ndim != 2: + raise ValueError('Expect 2-D input label. Got {}'.format(label.shape)) + + if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]: + raise ValueError( + 'label value too large: {} >= {}.'.format( + np.max(label), _DATASET_MAX_ENTRIES[dataset])) + + colormap = create_label_colormap(dataset) + return colormap[label] + + +def get_dataset_colormap_max_entries(dataset): + return _DATASET_MAX_ENTRIES[dataset] diff --git a/models/research/deeplab/utils/get_dataset_colormap_test.py b/models/research/deeplab/utils/get_dataset_colormap_test.py new file mode 100644 index 0000000000000000000000000000000000000000..89adb2c7391ce087100558fcf256acb1ca45638b --- /dev/null +++ b/models/research/deeplab/utils/get_dataset_colormap_test.py @@ -0,0 +1,97 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for get_dataset_colormap.py.""" + +import numpy as np +import tensorflow as tf + +from deeplab.utils import get_dataset_colormap + + +class VisualizationUtilTest(tf.test.TestCase): + + def testBitGet(self): + """Test that if the returned bit value is correct.""" + self.assertEqual(1, get_dataset_colormap.bit_get(9, 0)) + self.assertEqual(0, get_dataset_colormap.bit_get(9, 1)) + self.assertEqual(0, get_dataset_colormap.bit_get(9, 2)) + self.assertEqual(1, get_dataset_colormap.bit_get(9, 3)) + + def testPASCALLabelColorMapValue(self): + """Test the getd color map value.""" + colormap = get_dataset_colormap.create_pascal_label_colormap() + + # Only test a few sampled entries in the color map. + self.assertTrue(np.array_equal([128., 0., 128.], colormap[5, :])) + self.assertTrue(np.array_equal([128., 192., 128.], colormap[23, :])) + self.assertTrue(np.array_equal([128., 0., 192.], colormap[37, :])) + self.assertTrue(np.array_equal([224., 192., 192.], colormap[127, :])) + self.assertTrue(np.array_equal([192., 160., 192.], colormap[175, :])) + + def testLabelToPASCALColorImage(self): + """Test the value of the converted label value.""" + label = np.array([[0, 16, 16], [52, 7, 52]]) + expected_result = np.array([ + [[0, 0, 0], [0, 64, 0], [0, 64, 0]], + [[0, 64, 192], [128, 128, 128], [0, 64, 192]] + ]) + colored_label = get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_pascal_name()) + self.assertTrue(np.array_equal(expected_result, colored_label)) + + def testUnExpectedLabelValueForLabelToPASCALColorImage(self): + """Raise ValueError when input value exceeds range.""" + label = np.array([[120], [600]]) + with self.assertRaises(ValueError): + get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_pascal_name()) + + def testUnExpectedLabelDimensionForLabelToPASCALColorImage(self): + """Raise ValueError if input dimension is not correct.""" + label = np.array([120]) + with self.assertRaises(ValueError): + get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_pascal_name()) + + def testGetColormapForUnsupportedDataset(self): + with self.assertRaises(ValueError): + get_dataset_colormap.create_label_colormap('unsupported_dataset') + + def testUnExpectedLabelDimensionForLabelToADE20KColorImage(self): + label = np.array([250]) + with self.assertRaises(ValueError): + get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_ade20k_name()) + + def testFirstColorInADE20KColorMap(self): + label = np.array([[1, 3], [10, 20]]) + expected_result = np.array([ + [[120, 120, 120], [6, 230, 230]], + [[4, 250, 7], [204, 70, 3]] + ]) + colored_label = get_dataset_colormap.label_to_color_image( + label, get_dataset_colormap.get_ade20k_name()) + self.assertTrue(np.array_equal(colored_label, expected_result)) + + def testMapillaryVistasColorMapValue(self): + colormap = get_dataset_colormap.create_mapillary_vistas_label_colormap() + self.assertTrue(np.array_equal([190, 153, 153], colormap[3, :])) + self.assertTrue(np.array_equal([102, 102, 156], colormap[6, :])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/deeplab/utils/save_annotation.py b/models/research/deeplab/utils/save_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..2444df79532d6ef999f470ab8eef5ab333491660 --- /dev/null +++ b/models/research/deeplab/utils/save_annotation.py @@ -0,0 +1,66 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Saves an annotation as one png image. + +This script saves an annotation as one png image, and has the option to add +colormap to the png image for better visualization. +""" + +import numpy as np +import PIL.Image as img +import tensorflow as tf + +from deeplab.utils import get_dataset_colormap + + +def save_annotation(label, + save_dir, + filename, + add_colormap=True, + normalize_to_unit_values=False, + scale_values=False, + colormap_type=get_dataset_colormap.get_pascal_name()): + """Saves the given label to image on disk. + + Args: + label: The numpy array to be saved. The data will be converted + to uint8 and saved as png image. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + add_colormap: Boolean, add color map to the label or not. + normalize_to_unit_values: Boolean, normalize the input values to [0, 1]. + scale_values: Boolean, scale the input values to [0, 255] for visualization. + colormap_type: String, colormap type for visualization. + """ + # Add colormap for visualizing the prediction. + if add_colormap: + colored_label = get_dataset_colormap.label_to_color_image( + label, colormap_type) + else: + colored_label = label + if normalize_to_unit_values: + min_value = np.amin(colored_label) + max_value = np.amax(colored_label) + range_value = max_value - min_value + if range_value != 0: + colored_label = (colored_label - min_value) / range_value + + if scale_values: + colored_label = 255. * colored_label + + pil_image = img.fromarray(colored_label.astype(dtype=np.uint8)) + with tf.gfile.Open('%s/%s.png' % (save_dir, filename), mode='w') as f: + pil_image.save(f, 'PNG') diff --git a/models/research/deeplab/utils/train_utils.py b/models/research/deeplab/utils/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..14bbd6ee7e55533d94195fb4e7327e63e53a2800 --- /dev/null +++ b/models/research/deeplab/utils/train_utils.py @@ -0,0 +1,372 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for training.""" + +import six +import tensorflow as tf +from tensorflow.contrib import framework as contrib_framework + +from deeplab.core import preprocess_utils +from deeplab.core import utils + + +def _div_maybe_zero(total_loss, num_present): + """Normalizes the total loss with the number of present pixels.""" + return tf.to_float(num_present > 0) * tf.math.divide( + total_loss, + tf.maximum(1e-5, num_present)) + + +def add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits, + labels, + num_classes, + ignore_label, + loss_weight=1.0, + upsample_logits=True, + hard_example_mining_step=0, + top_k_percent_pixels=1.0, + gt_is_matting_map=False, + scope=None): + """Adds softmax cross entropy loss for logits of each scale. + + Args: + scales_to_logits: A map from logits names for different scales to logits. + The logits have shape [batch, logits_height, logits_width, num_classes]. + labels: Groundtruth labels with shape [batch, image_height, image_width, 1]. + num_classes: Integer, number of target classes. + ignore_label: Integer, label to ignore. + loss_weight: A float or a list of loss weights. If it is a float, it means + all the labels have the same weight. If it is a list of weights, then each + element in the list represents the weight for the label of its index, for + example, loss_weight = [0.1, 0.5] means the weight for label 0 is 0.1 and + the weight for label 1 is 0.5. + upsample_logits: Boolean, upsample logits or not. + hard_example_mining_step: An integer, the training step in which the hard + exampling mining kicks off. Note that we gradually reduce the mining + percent to the top_k_percent_pixels. For example, if + hard_example_mining_step = 100K and top_k_percent_pixels = 0.25, then + mining percent will gradually reduce from 100% to 25% until 100K steps + after which we only mine top 25% pixels. + top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its value + < 1.0, only compute the loss for the top k percent pixels (e.g., the top + 20% pixels). This is useful for hard pixel mining. + gt_is_matting_map: If true, the groundtruth is a matting map of confidence + score. If false, the groundtruth is an integer valued class mask. + scope: String, the scope for the loss. + + Raises: + ValueError: Label or logits is None, or groundtruth is matting map while + label is not floating value. + """ + if labels is None: + raise ValueError('No label for softmax cross entropy loss.') + + # If input groundtruth is a matting map of confidence, check if the input + # labels are floating point values. + if gt_is_matting_map and not labels.dtype.is_floating: + raise ValueError('Labels must be floats if groundtruth is a matting map.') + + for scale, logits in six.iteritems(scales_to_logits): + loss_scope = None + if scope: + loss_scope = '%s_%s' % (scope, scale) + + if upsample_logits: + # Label is not downsampled, and instead we upsample logits. + logits = tf.image.resize_bilinear( + logits, + preprocess_utils.resolve_shape(labels, 4)[1:3], + align_corners=True) + scaled_labels = labels + else: + # Label is downsampled to the same size as logits. + # When gt_is_matting_map = true, label downsampling with nearest neighbor + # method may introduce artifacts. However, to avoid ignore_label from + # being interpolated with other labels, we still perform nearest neighbor + # interpolation. + # TODO(huizhongc): Change to bilinear interpolation by processing padded + # and non-padded label separately. + if gt_is_matting_map: + tf.logging.warning( + 'Label downsampling with nearest neighbor may introduce artifacts.') + + scaled_labels = tf.image.resize_nearest_neighbor( + labels, + preprocess_utils.resolve_shape(logits, 4)[1:3], + align_corners=True) + + scaled_labels = tf.reshape(scaled_labels, shape=[-1]) + weights = utils.get_label_weight_mask( + scaled_labels, ignore_label, num_classes, label_weights=loss_weight) + # Dimension of keep_mask is equal to the total number of pixels. + keep_mask = tf.cast( + tf.not_equal(scaled_labels, ignore_label), dtype=tf.float32) + + train_labels = None + logits = tf.reshape(logits, shape=[-1, num_classes]) + + if gt_is_matting_map: + # When the groundtruth is integer label mask, we can assign class + # dependent label weights to the loss. When the groundtruth is image + # matting confidence, we do not apply class-dependent label weight (i.e., + # label_weight = 1.0). + if loss_weight != 1.0: + raise ValueError( + 'loss_weight must equal to 1 if groundtruth is matting map.') + + # Assign label value 0 to ignore pixels. The exact label value of ignore + # pixel does not matter, because those ignore_value pixel losses will be + # multiplied to 0 weight. + train_labels = scaled_labels * keep_mask + + train_labels = tf.expand_dims(train_labels, 1) + train_labels = tf.concat([1 - train_labels, train_labels], axis=1) + else: + train_labels = tf.one_hot( + scaled_labels, num_classes, on_value=1.0, off_value=0.0) + + default_loss_scope = ('softmax_all_pixel_loss' + if top_k_percent_pixels == 1.0 else + 'softmax_hard_example_mining') + with tf.name_scope(loss_scope, default_loss_scope, + [logits, train_labels, weights]): + # Compute the loss for all pixels. + pixel_losses = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=tf.stop_gradient( + train_labels, name='train_labels_stop_gradient'), + logits=logits, + name='pixel_losses') + weighted_pixel_losses = tf.multiply(pixel_losses, weights) + + if top_k_percent_pixels == 1.0: + total_loss = tf.reduce_sum(weighted_pixel_losses) + num_present = tf.reduce_sum(keep_mask) + loss = _div_maybe_zero(total_loss, num_present) + tf.losses.add_loss(loss) + else: + num_pixels = tf.to_float(tf.shape(logits)[0]) + # Compute the top_k_percent pixels based on current training step. + if hard_example_mining_step == 0: + # Directly focus on the top_k pixels. + top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels) + else: + # Gradually reduce the mining percent to top_k_percent_pixels. + global_step = tf.to_float(tf.train.get_or_create_global_step()) + ratio = tf.minimum(1.0, global_step / hard_example_mining_step) + top_k_pixels = tf.to_int32( + (ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels) + top_k_losses, _ = tf.nn.top_k(weighted_pixel_losses, + k=top_k_pixels, + sorted=True, + name='top_k_percent_pixels') + total_loss = tf.reduce_sum(top_k_losses) + num_present = tf.reduce_sum( + tf.to_float(tf.not_equal(top_k_losses, 0.0))) + loss = _div_maybe_zero(total_loss, num_present) + tf.losses.add_loss(loss) + + +def get_model_init_fn(train_logdir, + tf_initial_checkpoint, + initialize_last_layer, + last_layers, + ignore_missing_vars=False): + """Gets the function initializing model variables from a checkpoint. + + Args: + train_logdir: Log directory for training. + tf_initial_checkpoint: TensorFlow checkpoint for initialization. + initialize_last_layer: Initialize last layer or not. + last_layers: Last layers of the model. + ignore_missing_vars: Ignore missing variables in the checkpoint. + + Returns: + Initialization function. + """ + if tf_initial_checkpoint is None: + tf.logging.info('Not initializing the model from a checkpoint.') + return None + + if tf.train.latest_checkpoint(train_logdir): + tf.logging.info('Ignoring initialization; other checkpoint exists') + return None + + tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint) + + # Variables that will not be restored. + exclude_list = ['global_step'] + if not initialize_last_layer: + exclude_list.extend(last_layers) + + variables_to_restore = contrib_framework.get_variables_to_restore( + exclude=exclude_list) + + if variables_to_restore: + init_op, init_feed_dict = contrib_framework.assign_from_checkpoint( + tf_initial_checkpoint, + variables_to_restore, + ignore_missing_vars=ignore_missing_vars) + global_step = tf.train.get_or_create_global_step() + + def restore_fn(sess): + sess.run(init_op, init_feed_dict) + sess.run([global_step]) + + return restore_fn + + return None + + +def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier): + """Gets the gradient multipliers. + + The gradient multipliers will adjust the learning rates for model + variables. For the task of semantic segmentation, the models are + usually fine-tuned from the models trained on the task of image + classification. To fine-tune the models, we usually set larger (e.g., + 10 times larger) learning rate for the parameters of last layer. + + Args: + last_layers: Scopes of last layers. + last_layer_gradient_multiplier: The gradient multiplier for last layers. + + Returns: + The gradient multiplier map with variables as key, and multipliers as value. + """ + gradient_multipliers = {} + + for var in tf.model_variables(): + # Double the learning rate for biases. + if 'biases' in var.op.name: + gradient_multipliers[var.op.name] = 2. + + # Use larger learning rate for last layer variables. + for layer in last_layers: + if layer in var.op.name and 'biases' in var.op.name: + gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier + break + elif layer in var.op.name: + gradient_multipliers[var.op.name] = last_layer_gradient_multiplier + break + + return gradient_multipliers + + +def get_model_learning_rate(learning_policy, + base_learning_rate, + learning_rate_decay_step, + learning_rate_decay_factor, + training_number_of_steps, + learning_power, + slow_start_step, + slow_start_learning_rate, + slow_start_burnin_type='none', + decay_steps=0.0, + end_learning_rate=0.0, + boundaries=None, + boundary_learning_rates=None): + """Gets model's learning rate. + + Computes the model's learning rate for different learning policy. + Right now, only "step" and "poly" are supported. + (1) The learning policy for "step" is computed as follows: + current_learning_rate = base_learning_rate * + learning_rate_decay_factor ^ (global_step / learning_rate_decay_step) + See tf.train.exponential_decay for details. + (2) The learning policy for "poly" is computed as follows: + current_learning_rate = base_learning_rate * + (1 - global_step / training_number_of_steps) ^ learning_power + + Args: + learning_policy: Learning rate policy for training. + base_learning_rate: The base learning rate for model training. + learning_rate_decay_step: Decay the base learning rate at a fixed step. + learning_rate_decay_factor: The rate to decay the base learning rate. + training_number_of_steps: Number of steps for training. + learning_power: Power used for 'poly' learning policy. + slow_start_step: Training model with small learning rate for the first + few steps. + slow_start_learning_rate: The learning rate employed during slow start. + slow_start_burnin_type: The burnin type for the slow start stage. Can be + `none` which means no burnin or `linear` which means the learning rate + increases linearly from slow_start_learning_rate and reaches + base_learning_rate after slow_start_steps. + decay_steps: Float, `decay_steps` for polynomial learning rate. + end_learning_rate: Float, `end_learning_rate` for polynomial learning rate. + boundaries: A list of `Tensor`s or `int`s or `float`s with strictly + increasing entries. + boundary_learning_rates: A list of `Tensor`s or `float`s or `int`s that + specifies the values for the intervals defined by `boundaries`. It should + have one more element than `boundaries`, and all elements should have the + same type. + + Returns: + Learning rate for the specified learning policy. + + Raises: + ValueError: If learning policy or slow start burnin type is not recognized. + ValueError: If `boundaries` and `boundary_learning_rates` are not set for + multi_steps learning rate decay. + """ + global_step = tf.train.get_or_create_global_step() + adjusted_global_step = tf.maximum(global_step - slow_start_step, 0) + if decay_steps == 0.0: + tf.logging.info('Setting decay_steps to total training steps.') + decay_steps = training_number_of_steps - slow_start_step + if learning_policy == 'step': + learning_rate = tf.train.exponential_decay( + base_learning_rate, + adjusted_global_step, + learning_rate_decay_step, + learning_rate_decay_factor, + staircase=True) + elif learning_policy == 'poly': + learning_rate = tf.train.polynomial_decay( + base_learning_rate, + adjusted_global_step, + decay_steps=decay_steps, + end_learning_rate=end_learning_rate, + power=learning_power) + elif learning_policy == 'cosine': + learning_rate = tf.train.cosine_decay( + base_learning_rate, + adjusted_global_step, + training_number_of_steps - slow_start_step) + elif learning_policy == 'multi_steps': + if boundaries is None or boundary_learning_rates is None: + raise ValueError('Must set `boundaries` and `boundary_learning_rates` ' + 'for multi_steps learning rate decay.') + learning_rate = tf.train.piecewise_constant_decay( + adjusted_global_step, + boundaries, + boundary_learning_rates) + else: + raise ValueError('Unknown learning policy.') + + adjusted_slow_start_learning_rate = slow_start_learning_rate + if slow_start_burnin_type == 'linear': + # Do linear burnin. Increase linearly from slow_start_learning_rate and + # reach base_learning_rate after (global_step >= slow_start_steps). + adjusted_slow_start_learning_rate = ( + slow_start_learning_rate + + (base_learning_rate - slow_start_learning_rate) * + tf.to_float(global_step) / slow_start_step) + elif slow_start_burnin_type != 'none': + raise ValueError('Unknown burnin type.') + + # Employ small learning rate at the first few steps for warm start. + return tf.where(global_step < slow_start_step, + adjusted_slow_start_learning_rate, learning_rate) diff --git a/models/research/deeplab/vis.py b/models/research/deeplab/vis.py new file mode 100644 index 0000000000000000000000000000000000000000..20808d37bf2f45f196a04391548c6745fcc6603b --- /dev/null +++ b/models/research/deeplab/vis.py @@ -0,0 +1,327 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Segmentation results visualization on a given set of images. + +See model.py for more details and usage. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os.path +import time +import numpy as np +from six.moves import range +import tensorflow as tf +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib import training as contrib_training +from deeplab import common +from deeplab import model +from deeplab.datasets import data_generator +from deeplab.utils import save_annotation + +flags = tf.app.flags + +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +# Settings for log directories. + +flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.') + +flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.') + +# Settings for visualizing the model. + +flags.DEFINE_integer('vis_batch_size', 1, + 'The number of images in each batch during evaluation.') + +flags.DEFINE_list('vis_crop_size', '513,513', + 'Crop size [height, width] for visualization.') + +flags.DEFINE_integer('eval_interval_secs', 60 * 5, + 'How often (in seconds) to run evaluation.') + +# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or +# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note +# one could use different atrous_rates/output_stride during training/evaluation. +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 16, + 'The ratio of input to output spatial resolution.') + +# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test. +flags.DEFINE_multi_float('eval_scales', [1.0], + 'The scales to resize images for evaluation.') + +# Change to True for adding flipped images during test. +flags.DEFINE_bool('add_flipped_images', False, + 'Add flipped images for evaluation or not.') + +flags.DEFINE_integer( + 'quantize_delay_step', -1, + 'Steps to start quantized training. If < 0, will not quantize model.') + +# Dataset settings. + +flags.DEFINE_string('dataset', 'pascal_voc_seg', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('vis_split', 'val', + 'Which split of the dataset used for visualizing results') + +flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.') + +flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes', 'ade20k'], + 'Visualization colormap type.') + +flags.DEFINE_boolean('also_save_raw_predictions', False, + 'Also save raw predictions.') + +flags.DEFINE_integer('max_number_of_iterations', 0, + 'Maximum number of visualization iterations. Will loop ' + 'indefinitely upon nonpositive values.') + +# The folder where semantic segmentation predictions are saved. +_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results' + +# The folder where raw semantic segmentation predictions are saved. +_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results' + +# The format to save image. +_IMAGE_FORMAT = '%06d_image' + +# The format to save prediction +_PREDICTION_FORMAT = '%06d_prediction' + +# To evaluate Cityscapes results on the evaluation server, the labels used +# during training should be mapped to the labels for evaluation. +_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + + +def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id): + """Converts the predicted label for evaluation. + + There are cases where the training labels are not equal to the evaluation + labels. This function is used to perform the conversion so that we could + evaluate the results on the evaluation server. + + Args: + prediction: Semantic segmentation prediction. + train_id_to_eval_id: A list mapping from train id to evaluation id. + + Returns: + Semantic segmentation prediction whose labels have been changed. + """ + converted_prediction = prediction.copy() + for train_id, eval_id in enumerate(train_id_to_eval_id): + converted_prediction[prediction == train_id] = eval_id + + return converted_prediction + + +def _process_batch(sess, original_images, semantic_predictions, image_names, + image_heights, image_widths, image_id_offset, save_dir, + raw_save_dir, train_id_to_eval_id=None): + """Evaluates one single batch qualitatively. + + Args: + sess: TensorFlow session. + original_images: One batch of original images. + semantic_predictions: One batch of semantic segmentation predictions. + image_names: Image names. + image_heights: Image heights. + image_widths: Image widths. + image_id_offset: Image id offset for indexing images. + save_dir: The directory where the predictions will be saved. + raw_save_dir: The directory where the raw predictions will be saved. + train_id_to_eval_id: A list mapping from train id to eval id. + """ + (original_images, + semantic_predictions, + image_names, + image_heights, + image_widths) = sess.run([original_images, semantic_predictions, + image_names, image_heights, image_widths]) + + num_image = semantic_predictions.shape[0] + for i in range(num_image): + image_height = np.squeeze(image_heights[i]) + image_width = np.squeeze(image_widths[i]) + original_image = np.squeeze(original_images[i]) + semantic_prediction = np.squeeze(semantic_predictions[i]) + crop_semantic_prediction = semantic_prediction[:image_height, :image_width] + + # Save image. + save_annotation.save_annotation( + original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i), + add_colormap=False) + + # Save prediction. + save_annotation.save_annotation( + crop_semantic_prediction, save_dir, + _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True, + colormap_type=FLAGS.colormap_type) + + if FLAGS.also_save_raw_predictions: + image_filename = os.path.basename(image_names[i]) + + if train_id_to_eval_id is not None: + crop_semantic_prediction = _convert_train_id_to_eval_id( + crop_semantic_prediction, + train_id_to_eval_id) + save_annotation.save_annotation( + crop_semantic_prediction, raw_save_dir, image_filename, + add_colormap=False) + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + + # Get dataset-dependent information. + dataset = data_generator.Dataset( + dataset_name=FLAGS.dataset, + split_name=FLAGS.vis_split, + dataset_dir=FLAGS.dataset_dir, + batch_size=FLAGS.vis_batch_size, + crop_size=[int(sz) for sz in FLAGS.vis_crop_size], + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + model_variant=FLAGS.model_variant, + is_training=False, + should_shuffle=False, + should_repeat=False) + + train_id_to_eval_id = None + if dataset.dataset_name == data_generator.get_cityscapes_dataset_name(): + tf.logging.info('Cityscapes requires converting train_id to eval_id.') + train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID + + # Prepare for visualization. + tf.gfile.MakeDirs(FLAGS.vis_logdir) + save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER) + tf.gfile.MakeDirs(save_dir) + raw_save_dir = os.path.join( + FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER) + tf.gfile.MakeDirs(raw_save_dir) + + tf.logging.info('Visualizing on %s set', FLAGS.vis_split) + + with tf.Graph().as_default(): + samples = dataset.get_one_shot_iterator().get_next() + + model_options = common.ModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes}, + crop_size=[int(sz) for sz in FLAGS.vis_crop_size], + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + if tuple(FLAGS.eval_scales) == (1.0,): + tf.logging.info('Performing single-scale test.') + predictions = model.predict_labels( + samples[common.IMAGE], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid) + else: + tf.logging.info('Performing multi-scale test.') + if FLAGS.quantize_delay_step >= 0: + raise ValueError( + 'Quantize mode is not supported with multi-scale test.') + predictions = model.predict_labels_multi_scale( + samples[common.IMAGE], + model_options=model_options, + eval_scales=FLAGS.eval_scales, + add_flipped_images=FLAGS.add_flipped_images) + predictions = predictions[common.OUTPUT_TYPE] + + if FLAGS.min_resize_value and FLAGS.max_resize_value: + # Only support batch_size = 1, since we assume the dimensions of original + # image after tf.squeeze is [height, width, 3]. + assert FLAGS.vis_batch_size == 1 + + # Reverse the resizing and padding operations performed in preprocessing. + # First, we slice the valid regions (i.e., remove padded region) and then + # we resize the predictions back. + original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE]) + original_image_shape = tf.shape(original_image) + predictions = tf.slice( + predictions, + [0, 0, 0], + [1, original_image_shape[0], original_image_shape[1]]) + resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]), + tf.squeeze(samples[common.WIDTH])]) + predictions = tf.squeeze( + tf.image.resize_images(tf.expand_dims(predictions, 3), + resized_shape, + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True), 3) + + tf.train.get_or_create_global_step() + if FLAGS.quantize_delay_step >= 0: + contrib_quantize.create_eval_graph() + + num_iteration = 0 + max_num_iteration = FLAGS.max_number_of_iterations + + checkpoints_iterator = contrib_training.checkpoints_iterator( + FLAGS.checkpoint_dir, min_interval_secs=FLAGS.eval_interval_secs) + for checkpoint_path in checkpoints_iterator: + num_iteration += 1 + tf.logging.info( + 'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', + time.gmtime())) + tf.logging.info('Visualizing with model %s', checkpoint_path) + + scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer()) + session_creator = tf.train.ChiefSessionCreator( + scaffold=scaffold, + master=FLAGS.master, + checkpoint_filename_with_path=checkpoint_path) + with tf.train.MonitoredSession( + session_creator=session_creator, hooks=None) as sess: + batch = 0 + image_id_offset = 0 + + while not sess.should_stop(): + tf.logging.info('Visualizing batch %d', batch + 1) + _process_batch(sess=sess, + original_images=samples[common.ORIGINAL_IMAGE], + semantic_predictions=predictions, + image_names=samples[common.IMAGE_NAME], + image_heights=samples[common.HEIGHT], + image_widths=samples[common.WIDTH], + image_id_offset=image_id_offset, + save_dir=save_dir, + raw_save_dir=raw_save_dir, + train_id_to_eval_id=train_id_to_eval_id) + image_id_offset += FLAGS.vis_batch_size + batch += 1 + + tf.logging.info( + 'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', + time.gmtime())) + if max_num_iteration > 0 and num_iteration >= max_num_iteration: + break + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_dir') + flags.mark_flag_as_required('vis_logdir') + flags.mark_flag_as_required('dataset_dir') + tf.app.run() diff --git a/models/research/delf/.gitignore b/models/research/delf/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b61ddd100012ab30e0bf438f3a5ab01ea3f44281 --- /dev/null +++ b/models/research/delf/.gitignore @@ -0,0 +1,4 @@ +*pyc +*~ +*pb2.py +*pb2.pyc diff --git a/models/research/delf/DETECTION.md b/models/research/delf/DETECTION.md new file mode 100644 index 0000000000000000000000000000000000000000..7fa7570f74dc58622151bee37f9a2a5697b896de --- /dev/null +++ b/models/research/delf/DETECTION.md @@ -0,0 +1,69 @@ +## Quick start: landmark detection + +[![Paper](http://img.shields.io/badge/paper-arXiv.1812.01584-B3181B.svg)](https://arxiv.org/abs/1812.01584) + +### Install DELF library + +To be able to use this code, please follow +[these instructions](INSTALL_INSTRUCTIONS.md) to properly install the DELF +library. + +### Download Oxford buildings dataset + +To illustrate detector usage, please download the Oxford buildings dataset, by +following the instructions +[here](EXTRACTION_MATCHING.md#download-oxford-buildings-dataset). Then, create +the file `list_images_detector.txt` as follows: + +```bash +# From tensorflow/models/research/delf/delf/python/examples/ +echo data/oxford5k_images/all_souls_000002.jpg >> list_images_detector.txt +echo data/oxford5k_images/all_souls_000035.jpg >> list_images_detector.txt +``` + +### Download detector model + +Also, you will need to download the pre-trained detector model: + +```bash +# From tensorflow/models/research/delf/delf/python/examples/ +mkdir parameters && cd parameters +wget http://storage.googleapis.com/delf/d2r_frcnn_20190411.tar.gz +tar -xvzf d2r_frcnn_20190411.tar.gz +``` + +**Note**: this is the Faster-RCNN based model. We also release a MobileNet-SSD +model, see the [README](README.md#pre-trained-models) for download link. The +instructions should work seamlessly for both models. + +### Detecting landmarks + +Now that you have everything in place, running this command should detect boxes +for the images `all_souls_000002.jpg` and `all_souls_000035.jpg`, with a +threshold of 0.8, and produce visualizations. + +```bash +# From tensorflow/models/research/delf/delf/python/examples/ +python3 extract_boxes.py \ + --detector_path parameters/d2r_frcnn_20190411 \ + --detector_thresh 0.8 \ + --list_images_path list_images_detector.txt \ + --output_dir data/oxford5k_boxes \ + --output_viz_dir data/oxford5k_boxes_viz +``` + +Two images are generated in the `data/oxford5k_boxes_viz` directory, they should +look similar to these ones: + +![DetectionExample1](delf/python/examples/detection_example_1.jpg) +![DetectionExample2](delf/python/examples/detection_example_2.jpg) + +### Troubleshooting + +#### `matplotlib` + +`matplotlib` may complain with a message such as `no display name and no +$DISPLAY environment variable`. To fix this, one option is add the line +`backend : Agg` to the file `.config/matplotlib/matplotlibrc`. On this problem, +see the discussion +[here](https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable). diff --git a/models/research/delf/EXTRACTION_MATCHING.md b/models/research/delf/EXTRACTION_MATCHING.md new file mode 100644 index 0000000000000000000000000000000000000000..53159638587282658129aa13ac165fbd7d3803ea --- /dev/null +++ b/models/research/delf/EXTRACTION_MATCHING.md @@ -0,0 +1,87 @@ +## Quick start: DELF extraction and matching + +[![Paper](http://img.shields.io/badge/paper-arXiv.1612.06321-B3181B.svg)](https://arxiv.org/abs/1612.06321) + +### Install DELF library + +To be able to use this code, please follow +[these instructions](INSTALL_INSTRUCTIONS.md) to properly install the DELF +library. + +### Download Oxford buildings dataset + +To illustrate DELF usage, please download the Oxford buildings dataset. To +follow these instructions closely, please download the dataset to the +`tensorflow/models/research/delf/delf/python/examples` directory, as in the +following commands: + +```bash +# From tensorflow/models/research/delf/delf/python/examples/ +mkdir data && cd data +wget http://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images.tgz +mkdir oxford5k_images oxford5k_features +tar -xvzf oxbuild_images.tgz -C oxford5k_images/ +cd ../ +echo data/oxford5k_images/hertford_000056.jpg >> list_images.txt +echo data/oxford5k_images/oxford_000317.jpg >> list_images.txt +``` + +### Download pre-trained DELF model + +Also, you will need to download the trained DELF model: + +```bash +# From tensorflow/models/research/delf/delf/python/examples/ +mkdir parameters && cd parameters +wget http://storage.googleapis.com/delf/delf_gld_20190411.tar.gz +tar -xvzf delf_gld_20190411.tar.gz +``` + +### DELF feature extraction + +Now that you have everything in place, running this command should extract DELF +features for the images `hertford_000056.jpg` and `oxford_000317.jpg`: + +```bash +# From tensorflow/models/research/delf/delf/python/examples/ +python3 extract_features.py \ + --config_path delf_config_example.pbtxt \ + --list_images_path list_images.txt \ + --output_dir data/oxford5k_features +``` + +### Image matching using DELF features + +After feature extraction, run this command to perform feature matching between +the images `hertford_000056.jpg` and `oxford_000317.jpg`: + +```bash +python3 match_images.py \ + --image_1_path data/oxford5k_images/hertford_000056.jpg \ + --image_2_path data/oxford5k_images/oxford_000317.jpg \ + --features_1_path data/oxford5k_features/hertford_000056.delf \ + --features_2_path data/oxford5k_features/oxford_000317.delf \ + --output_image matched_images.png +``` + +The image `matched_images.png` is generated and should look similar to this one: + +![MatchedImagesExample](delf/python/examples/matched_images_example.jpg) + +### Troubleshooting + +#### `matplotlib` + +`matplotlib` may complain with a message such as `no display name and no +$DISPLAY environment variable`. To fix this, one option is add the line +`backend : Agg` to the file `.config/matplotlib/matplotlibrc`. On this problem, +see the discussion +[here](https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable). + +#### 'skimage' + +By default, skimage 0.13.XX or 0.14.1 is installed if you followed the +instructions. According to +[https://github.com/scikit-image/scikit-image/issues/3649#issuecomment-455273659] +If you have scikit-image related issues, upgrading to a version above 0.14.1 +with `pip3 install -U scikit-image` should fix the issue diff --git a/models/research/delf/INSTALL_INSTRUCTIONS.md b/models/research/delf/INSTALL_INSTRUCTIONS.md new file mode 100644 index 0000000000000000000000000000000000000000..4f66e9389fdd126dd769a8282a69482b989c9c9e --- /dev/null +++ b/models/research/delf/INSTALL_INSTRUCTIONS.md @@ -0,0 +1,122 @@ +## DELF installation + +### Tensorflow + +[![TensorFlow 2.1](https://img.shields.io/badge/tensorflow-2.1-brightgreen)](https://github.com/tensorflow/tensorflow/releases/tag/v2.1.0) +[![Python 3.6](https://img.shields.io/badge/python-3.6-blue.svg)](https://www.python.org/downloads/release/python-360/) + +For detailed steps to install Tensorflow, follow the +[Tensorflow installation instructions](https://www.tensorflow.org/install/). A +typical user can install Tensorflow using one of the following commands: + +```bash +# For CPU: +pip3 install 'tensorflow' +# For GPU: +pip3 install 'tensorflow-gpu' +``` + +### TF-Slim + +Note: currently, we need to install the latest version from source, to avoid +using previous versions which relied on tf.contrib (which is now deprecated). + +```bash +git clone git@github.com:google-research/tf-slim.git +cd tf-slim +pip3 install . +``` + +Note that these commands assume you are cloning using SSH. If you are using +HTTPS instead, use `git clone https://github.com/google-research/tf-slim.git` +instead. See +[this link](https://help.github.com/en/github/using-git/which-remote-url-should-i-use) +for more information. + +### Protobuf + +The DELF library uses [protobuf](https://github.com/google/protobuf) (the python +version) to configure feature extraction and its format. You will need the +`protoc` compiler, version >= 3.3. The easiest way to get it is to download +directly. For Linux, this can be done as (see +[here](https://github.com/google/protobuf/releases) for other platforms): + +```bash +wget https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip +unzip protoc-3.3.0-linux-x86_64.zip +PATH_TO_PROTOC=`pwd` +``` + +### Python dependencies + +Install python library dependencies: + +```bash +pip3 install matplotlib numpy scikit-image scipy +sudo apt-get install python3-tk +``` + +### `tensorflow/models` + +Now, clone `tensorflow/models`, and install required libraries: (note that the +`object_detection` library requires you to add `tensorflow/models/research/` to +your `PYTHONPATH`, as instructed +[here](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md)) + +```bash +git clone git@github.com:tensorflow/models.git + +# Setup the object_detection module by editing PYTHONPATH. +cd .. +# From tensorflow/models/research/ +export PYTHONPATH=$PYTHONPATH:`pwd` +``` + +Note that these commands assume you are cloning using SSH. If you are using +HTTPS instead, use `git clone https://github.com/tensorflow/models.git` instead. +See +[this link](https://help.github.com/en/github/using-git/which-remote-url-should-i-use) +for more information. + +Then, compile DELF's protobufs. Use `PATH_TO_PROTOC` as the directory where you +downloaded the `protoc` compiler. + +```bash +# From tensorflow/models/research/delf/ +${PATH_TO_PROTOC?}/bin/protoc delf/protos/*.proto --python_out=. +``` + +Finally, install the DELF package. This may also install some other dependencies +under the hood. + +```bash +# From tensorflow/models/research/delf/ +pip3 install -e . # Install "delf" package. +``` + +At this point, running + +```bash +python3 -c 'import delf' +``` + +should just return without complaints. This indicates that the DELF package is +loaded successfully. + +### Troubleshooting + +#### `pip3 install` + +Issues might be observed if using `pip3 install` with `-e` option (editable +mode). You may try out to simply remove the `-e` from the commands above. Also, +depending on your machine setup, you might need to run the `sudo pip3 install` +command, that is with a `sudo` at the beginning. + +#### Cloning github repositories + +The default commands above assume you are cloning using SSH. If you are using +HTTPS instead, use for example `git clone +https://github.com/tensorflow/models.git` instead of `git clone +git@github.com:tensorflow/models.git`. See +[this link](https://help.github.com/en/github/using-git/which-remote-url-should-i-use) +for more information. diff --git a/models/research/delf/README.md b/models/research/delf/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f10852759c3455ae2990475ea917a4e45ee96264 --- /dev/null +++ b/models/research/delf/README.md @@ -0,0 +1,324 @@ +# Deep Local and Global Image Features + +[![TensorFlow 2.1](https://img.shields.io/badge/tensorflow-2.1-brightgreen)](https://github.com/tensorflow/tensorflow/releases/tag/v2.1.0) +[![Python 3.6](https://img.shields.io/badge/python-3.6-blue.svg)](https://www.python.org/downloads/release/python-360/) + +This project presents code for deep local and global image feature methods, +which are particularly useful for the computer vision tasks of instance-level +recognition and retrieval. These were introduced in the +[DELF](https://arxiv.org/abs/1612.06321), +[Detect-to-Retrieve](https://arxiv.org/abs/1812.01584), +[DELG](https://arxiv.org/abs/2001.05027) and +[Google Landmarks Dataset v2](https://arxiv.org/abs/2004.01804) papers. + +We provide Tensorflow code for building and training models, and python code for +image retrieval and local feature matching. Pre-trained models for the landmark +recognition domain are also provided. + +If you make use of this codebase, please consider citing the following papers: + +DELF: +[![Paper](http://img.shields.io/badge/paper-arXiv.1612.06321-B3181B.svg)](https://arxiv.org/abs/1612.06321) + +``` +"Large-Scale Image Retrieval with Attentive Deep Local Features", +H. Noh, A. Araujo, J. Sim, T. Weyand and B. Han, +Proc. ICCV'17 +``` + +Detect-to-Retrieve: +[![Paper](http://img.shields.io/badge/paper-arXiv.1812.01584-B3181B.svg)](https://arxiv.org/abs/1812.01584) + +``` +"Detect-to-Retrieve: Efficient Regional Aggregation for Image Search", +M. Teichmann*, A. Araujo*, M. Zhu and J. Sim, +Proc. CVPR'19 +``` + +DELG: +[![Paper](http://img.shields.io/badge/paper-arXiv.2001.05027-B3181B.svg)](https://arxiv.org/abs/2001.05027) + +``` +"Unifying Deep Local and Global Features for Image Search", +B. Cao*, A. Araujo* and J. Sim, +arxiv:2001.05027 +``` + +GLDv2: +[![Paper](http://img.shields.io/badge/paper-arXiv.2004.01804-B3181B.svg)](https://arxiv.org/abs/2004.01804) + +``` +"Google Landmarks Dataset v2 - A Large-Scale Benchmark for Instance-Level Recognition and Retrieval", +T. Weyand*, A. Araujo*, B. Cao and J. Sim, +Proc. CVPR'20 +``` + +## News + +- [Apr'20] Check out our CVPR'20 paper: ["Google Landmarks Dataset v2 - A + Large-Scale Benchmark for Instance-Level Recognition and + Retrieval"](https://arxiv.org/abs/2004.01804) +- [Jan'20] Check out our new paper: + ["Unifying Deep Local and Global Features for Image Search"](https://arxiv.org/abs/2001.05027) +- [Jun'19] DELF achieved 2nd place in + [CVPR Visual Localization challenge (Local Features track)](https://sites.google.com/corp/view/ltvl2019). + See our slides + [here](https://docs.google.com/presentation/d/e/2PACX-1vTswzoXelqFqI_pCEIVl2uazeyGr7aKNklWHQCX-CbQ7MB17gaycqIaDTguuUCRm6_lXHwCdrkP7n1x/pub?start=false&loop=false&delayms=3000). +- [Apr'19] Check out our CVPR'19 paper: + ["Detect-to-Retrieve: Efficient Regional Aggregation for Image Search"](https://arxiv.org/abs/1812.01584) +- [Jun'18] DELF achieved state-of-the-art results in a CVPR'18 image retrieval + paper: [Radenovic et al., "Revisiting Oxford and Paris: Large-Scale Image + Retrieval Benchmarking"](https://arxiv.org/abs/1803.11285). +- [Apr'18] DELF was featured in + [ModelDepot](https://modeldepot.io/mikeshi/delf/overview) +- [Mar'18] DELF is now available in + [TF-Hub](https://www.tensorflow.org/hub/modules/google/delf/1) + +## Datasets + +We have two Google-Landmarks dataset versions: + +- Initial version (v1) can be found + [here](https://www.kaggle.com/google/google-landmarks-dataset). In includes + the Google Landmark Boxes which were described in the Detect-to-Retrieve + paper. +- Second version (v2) has been released as part of two Kaggle challenges: + [Landmark Recognition](https://www.kaggle.com/c/landmark-recognition-2019) + and [Landmark Retrieval](https://www.kaggle.com/c/landmark-retrieval-2019). + It can be downloaded from CVDF + [here](https://github.com/cvdfoundation/google-landmark). See also + [the CVPR'20 paper](https://arxiv.org/abs/2004.01804) on this new dataset + version. + +If you make use of these datasets in your research, please consider citing the +papers mentioned above. + +## Installation + +To be able to use this code, please follow +[these instructions](INSTALL_INSTRUCTIONS.md) to properly install the DELF +library. + +## Quick start + +### Pre-trained models + +We release several pre-trained models. See instructions in the following +sections for examples on how to use the models. + +**DELF pre-trained on the Google-Landmarks dataset v1** +([link](http://storage.googleapis.com/delf/delf_gld_20190411.tar.gz)). Presented +in the [Detect-to-Retrieve paper](https://arxiv.org/abs/1812.01584). Boosts +performance by ~4% mAP compared to ICCV'17 DELF model. + +**DELG pre-trained on the Google-Landmarks dataset v1** +([link](http://storage.googleapis.com/delf/delg_gld_20200520.tar.gz)). Presented +in the [DELG paper](https://arxiv.org/abs/2001.05027). + +**RN101-ArcFace pre-trained on the Google-Landmarks dataset v2 (train-clean)** +([link](https://storage.googleapis.com/delf/rn101_af_gldv2clean_20200521.tar.gz)). +Presented in the [GLDv2 paper](https://arxiv.org/abs/2004.01804). + +**DELF pre-trained on Landmarks-Clean/Landmarks-Full dataset** +([link](http://storage.googleapis.com/delf/delf_v1_20171026.tar.gz)). Presented +in the [DELF paper](https://arxiv.org/abs/1612.06321), model was trained on the +dataset released by the [DIR paper](https://arxiv.org/abs/1604.01325). + +**Faster-RCNN detector pre-trained on Google Landmark Boxes** +([link](http://storage.googleapis.com/delf/d2r_frcnn_20190411.tar.gz)). +Presented in the [Detect-to-Retrieve paper](https://arxiv.org/abs/1812.01584). + +**MobileNet-SSD detector pre-trained on Google Landmark Boxes** +([link](http://storage.googleapis.com/delf/d2r_mnetssd_20190411.tar.gz)). +Presented in the [Detect-to-Retrieve paper](https://arxiv.org/abs/1812.01584). + +Besides these, we also release pre-trained codebooks for local feature +aggregation. See the +[Detect-to-Retrieve instructions](delf/python/detect_to_retrieve/DETECT_TO_RETRIEVE_INSTRUCTIONS.md) +for details. + +### DELF extraction and matching + +Please follow [these instructions](EXTRACTION_MATCHING.md). At the end, you +should obtain a nice figure showing local feature matches, as: + +![MatchedImagesExample](delf/python/examples/matched_images_example.jpg) + +### DELF training + +Please follow [these instructions](delf/python/training/README.md). + +### DELG + +Please follow [these instructions](delf/python/delg/DELG_INSTRUCTIONS.md). At +the end, you should obtain image retrieval results on the Revisited Oxford/Paris +datasets. + +### GLDv2 baseline + +Please follow +[these instructions](delf/python/google_landmarks_dataset/README.md). At the +end, you should obtain image retrieval results on the Revisited Oxford/Paris +datasets. + +### Landmark detection + +Please follow [these instructions](DETECTION.md). At the end, you should obtain +a nice figure showing a detection, as: + +![DetectionExample1](delf/python/examples/detection_example_1.jpg) + +### Detect-to-Retrieve + +Please follow +[these instructions](delf/python/detect_to_retrieve/DETECT_TO_RETRIEVE_INSTRUCTIONS.md). +At the end, you should obtain image retrieval results on the Revisited +Oxford/Paris datasets. + +## Code overview + +DELF/D2R/DELG/GLD code is located under the `delf` directory. There are two +directories therein, `protos` and `python`. + +### `delf/protos` + +This directory contains protobufs: + +- `aggregation_config.proto`: protobuf for configuring local feature + aggregation. +- `box.proto`: protobuf for serializing detected boxes. +- `datum.proto`: general-purpose protobuf for serializing float tensors. +- `delf_config.proto`: protobuf for configuring DELF/DELG extraction. +- `feature.proto`: protobuf for serializing DELF features. + +### `delf/python` + +This directory contains files for several different purposes: + +- `box_io.py`, `datum_io.py`, `feature_io.py` are helper files for reading and + writing tensors and features. +- `delf_v1.py` contains code to create DELF models. +- `feature_aggregation_extractor.py` contains a module to perform local + feature aggregation. +- `feature_aggregation_similarity.py` contains a module to perform similarity + computation for aggregated local features. +- `feature_extractor.py` contains the code to extract features using DELF. + This is particularly useful for extracting features over multiple scales, + with keypoint selection based on attention scores, and PCA/whitening + post-processing. + +The subdirectory `delf/python/examples` contains sample scripts to run DELF +feature extraction/matching, and object detection: + +- `delf_config_example.pbtxt` shows an example instantiation of the DelfConfig + proto, used for DELF feature extraction. +- `detector.py` is a module to construct an object detector function. +- `extract_boxes.py` enables object detection from a list of images. +- `extract_features.py` enables DELF extraction from a list of images. +- `extractor.py` is a module to construct a DELF/DELG local feature extraction + function. +- `match_images.py` supports image matching using DELF features extracted + using `extract_features.py`. + +The subdirectory `delf/python/delg` contains sample scripts/configs related to +the DELG paper: + +- `delg_gld_config.pbtxt` gives the DelfConfig used in DELG paper. +- `extract_features.py` for local+global feature extraction on Revisited + datasets. +- `perform_retrieval.py` for performing retrieval/evaluating methods on + Revisited datasets. + +The subdirectory `delf/python/detect_to_retrieve` contains sample +scripts/configs related to the Detect-to-Retrieve paper: + +- `aggregation_extraction.py` is a library to extract/save feature + aggregation. +- `boxes_and_features_extraction.py` is a library to extract/save boxes and + DELF features. +- `cluster_delf_features.py` for local feature clustering. +- `dataset.py` for parsing/evaluating results on Revisited Oxford/Paris + datasets. +- `delf_gld_config.pbtxt` gives the DelfConfig used in Detect-to-Retrieve + paper. +- `extract_aggregation.py` for aggregated local feature extraction. +- `extract_index_boxes_and_features.py` for index image local feature + extraction / bounding box detection on Revisited datasets. +- `extract_query_features.py` for query image local feature extraction on + Revisited datasets. +- `image_reranking.py` is a module to re-rank images with geometric + verification. +- `perform_retrieval.py` for performing retrieval/evaluating methods using + aggregated local features on Revisited datasets. +- `index_aggregation_config.pbtxt`, `query_aggregation_config.pbtxt` give + AggregationConfig's for Detect-to-Retrieve experiments. + +The subdirectory `delf/python/google_landmarks_dataset` contains sample +scripts/modules for computing GLD metrics / reproducing results from the GLDv2 +paper: + +- `compute_recognition_metrics.py` performs recognition metric computation + given input predictions and solution files. +- `compute_retrieval_metrics.py` performs retrieval metric computation given + input predictions and solution files. +- `dataset_file_io.py` is a module for dataset-related file IO. +- `metrics.py` is a module for GLD metric computation. +- `rn101_af_gldv2clean_config.pbtxt` gives the DelfConfig used in the + ResNet101-ArcFace (trained on GLDv2-train-clean) baseline used in the GLDv2 + paper. + +The subdirectory `delf/python/training` contains sample scripts/modules for +performing DELF training: + +- `datasets/googlelandmarks.py` is the dataset module used for training. +- `model/delf_model.py` is the model module used for training. +- `model/export_model.py` is a script for exporting trained models in the + format used by the inference code. +- `model/export_model_utils.py` is a module with utilities for model + exporting. +- `model/resnet50.py` is a module with a backbone RN50 implementation. +- `build_image_dataset.py` converts downloaded dataset into TFRecords format + for training. +- `train.py` is the main training script. + +Besides these, other files in the different subdirectories contain tests for the +various modules. + +## Maintainers + +André Araujo (@andrefaraujo) + +## Release history + +### May, 2020 + +- Codebase is now Python3-first +- DELG model/code released +- GLDv2 baseline model released + +**Thanks to contributors**: Barbara Fusinska and André Araujo. + +### April, 2020 (version 2.0) + +- Initial DELF training code released. +- Codebase is now fully compatible with TF 2.1. + +**Thanks to contributors**: Arun Mukundan, Yuewei Na and André Araujo. + +### April, 2019 + +Detect-to-Retrieve code released. + +Includes pre-trained models to detect landmark boxes, and DELF model pre-trained +on Google Landmarks v1 dataset. + +**Thanks to contributors**: André Araujo, Marvin Teichmann, Menglong Zhu, +Jack Sim. + +### October, 2017 + +Initial release containing DELF-v1 code, including feature extraction and +matching examples. Pre-trained DELF model from ICCV'17 paper is released. + +**Thanks to contributors**: André Araujo, Hyeonwoo Noh, Youlong Cheng, +Jack Sim. diff --git a/models/research/delf/delf/__init__.py b/models/research/delf/delf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a52df3c4546414e61f479357d06b65d4c132c753 --- /dev/null +++ b/models/research/delf/delf/__init__.py @@ -0,0 +1,39 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module to extract deep local features.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=unused-import +from delf.protos import aggregation_config_pb2 +from delf.protos import box_pb2 +from delf.protos import datum_pb2 +from delf.protos import delf_config_pb2 +from delf.protos import feature_pb2 +from delf.python import box_io +from delf.python import datum_io +from delf.python import feature_aggregation_extractor +from delf.python import feature_aggregation_similarity +from delf.python import feature_extractor +from delf.python import feature_io +from delf.python import utils +from delf.python.examples import detector +from delf.python.examples import extractor +from delf.python import detect_to_retrieve +from delf.python import training +from delf.python.training import model +from delf.python.training import datasets +# pylint: enable=unused-import diff --git a/models/research/delf/delf/protos/__init__.py b/models/research/delf/delf/protos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/delf/delf/protos/aggregation_config.proto b/models/research/delf/delf/protos/aggregation_config.proto new file mode 100644 index 0000000000000000000000000000000000000000..b1d5953d43ffc84f435c57be7145c7c17ae01186 --- /dev/null +++ b/models/research/delf/delf/protos/aggregation_config.proto @@ -0,0 +1,63 @@ +// Protocol buffer for feature aggregation configuration. +// +// Used for both extraction and comparison of aggregated representations. Note +// that some options are only relevant for the former or the latter. +// +// For more details, please refer to the paper: +// "Detect-to-Retrieve: Efficient Regional Aggregation for Image Search", +// Proc. CVPR'19 (https://arxiv.org/abs/1812.01584). + +syntax = "proto2"; + +package delf.protos; + +message AggregationConfig { + // Number of codewords (ie, visual words) in the codebook. + optional int32 codebook_size = 1 [default = 65536]; + + // Dimensionality of local features (eg, 128 for DELF used in + // Detect-to-Retrieve paper). + optional int32 feature_dimensionality = 2 [default = 128]; + + // Type of aggregation to use. + // For example, to use R-ASMK*, `aggregation_type` should be set to ASMK_STAR + // and `use_regional_aggregation` should be set to true. + enum AggregationType { + INVALID = 0; + VLAD = 1; + ASMK = 2; + ASMK_STAR = 3; + } + optional AggregationType aggregation_type = 3 [default = ASMK_STAR]; + + // L2 normalization option. + // - For vanilla aggregated kernels (eg, VLAD/ASMK/ASMK*), this should be + // set to true. + // - For regional aggregated kernels (ie, if `use_regional_aggregation` is + // true, leading to R-VLAD/R-ASMK/R-ASMK*), this should be set to false. + // Note that it is used differently depending on the `aggregation_type`: + // - For VLAD, this option is only used for extraction. + // - For ASMK/ASMK*, this option is only used for comparisons. + optional bool use_l2_normalization = 4 [default = true]; + + // Additional options used only for extraction. + // - Path to codebook checkpoint for aggregation. + optional string codebook_path = 5; + // - Number of visual words to assign each feature. + optional int32 num_assignments = 6 [default = 1]; + // - Whether to use regional aggregation. + optional bool use_regional_aggregation = 7 [default = false]; + // - Batch size to use for local features when computing aggregated + // representations. Particularly useful if `codebook_size` and + // `feature_dimensionality` are large, to avoid OOM. A value of zero or + // lower indicates that no batching is used. + optional int32 feature_batch_size = 10 [default = 100]; + + // Additional options used only for comparison. + // Only relevant if `aggregation_type` is ASMK or ASMK_STAR. + // - Power-law exponent for similarity of visual word descriptors. + optional float alpha = 8 [default = 3.0]; + // - Threshold above which similarity of visual word descriptors are + // considered; below this, similarity is set to zero. + optional float tau = 9 [default = 0.0]; +} diff --git a/models/research/delf/delf/protos/box.proto b/models/research/delf/delf/protos/box.proto new file mode 100644 index 0000000000000000000000000000000000000000..28da7fb71410262f9be98e206e756c82ba3beb38 --- /dev/null +++ b/models/research/delf/delf/protos/box.proto @@ -0,0 +1,24 @@ +// Protocol buffer for serializing detected bounding boxes. + +syntax = "proto2"; + +package delf.protos; + +message Box { + // Coordinates: [ymin, xmin, ymax, xmax] corresponds to + // [top, left, bottom, right]. + optional float ymin = 1; + optional float xmin = 2; + optional float ymax = 3; + optional float xmax = 4; + + // Detection score. Usually, the higher the more confident. + optional float score = 5; + + // Indicates which class the box corresponds to. + optional int32 class_index = 6; +} + +message Boxes { + repeated Box box = 1; +} diff --git a/models/research/delf/delf/protos/datum.proto b/models/research/delf/delf/protos/datum.proto new file mode 100644 index 0000000000000000000000000000000000000000..6806e56b25e912bfb6a87280432c8566dba0c41a --- /dev/null +++ b/models/research/delf/delf/protos/datum.proto @@ -0,0 +1,66 @@ +// Protocol buffer for serializing arbitrary float tensors. +// Note: Currently only floating point feature is supported. + +syntax = "proto2"; + +package delf.protos; + +// A DatumProto is a data structure used to serialize tensor with arbitrary +// shape. DatumProto contains an array of floating point values and its shape +// is represented as a sequence of integer values. Values are contained in +// row major order. +// +// Example: +// 3 x 2 array +// +// [1.1, 2.2] +// [3.3, 4.4] +// [5.5, 6.6] +// +// can be represented with the following DatumProto: +// +// DatumProto { +// shape { +// dim: 3 +// dim: 2 +// } +// float_list { +// value: 1.1 +// value: 2.2 +// value: 3.3 +// value: 4.4 +// value: 5.5 +// value: 6.6 +// } +// } + +// DatumShape is array of dimension of the tensor. +message DatumShape { + repeated int64 dim = 1 [packed = true]; +} + +// FloatList is a container of tensor values, which are saved as a list of +// floating point values. +message FloatList { + repeated float value = 1 [packed = true]; +} + +// Uint32List is a container of tensor values, which are saved as a list of +// uint32 values. +message Uint32List { + repeated uint32 value = 1 [packed = true]; +} + +message DatumProto { + optional DatumShape shape = 1; + oneof kind_oneof { + FloatList float_list = 2; + Uint32List uint32_list = 3; + } +} + +// Groups two DatumProto's. +message DatumPairProto { + optional DatumProto first = 1; + optional DatumProto second = 2; +} diff --git a/models/research/delf/delf/protos/delf_config.proto b/models/research/delf/delf/protos/delf_config.proto new file mode 100644 index 0000000000000000000000000000000000000000..10ae0a614cbdd483f08f1f9f806a9d3adbe6b46d --- /dev/null +++ b/models/research/delf/delf/protos/delf_config.proto @@ -0,0 +1,121 @@ +// Protocol buffer for configuring DELF feature extraction. + +syntax = "proto2"; + +package delf.protos; + +message DelfPcaParameters { + // Path to PCA mean file. + optional string mean_path = 1; // Required. + + // Path to PCA matrix file. + optional string projection_matrix_path = 2; // Required. + + // Dimensionality of feature after PCA. + optional int32 pca_dim = 3; // Required. + + // If whitening is to be used, this must be set to true. + optional bool use_whitening = 4 [default = false]; + + // Path to PCA variances file, used for whitening. This is used only if + // use_whitening is set to true. + optional string pca_variances_path = 5; +} + +message DelfLocalFeatureConfig { + // If PCA is to be used, this must be set to true. + optional bool use_pca = 1 [default = true]; + + // Target layer name for DELF model. This is used to obtain receptive field + // parameters used for localizing features with respect to the input image. + optional string layer_name = 2 [default = ""]; + + // Intersection over union threshold for the non-max suppression (NMS) + // operation. If two features overlap by at most this amount, both are kept. + // Otherwise, the one with largest attention score is kept. This should be a + // number between 0.0 (no region is selected) and 1.0 (all regions are + // selected and NMS is not performed). + optional float iou_threshold = 3 [default = 1.0]; + + // Maximum number of features that will be selected. The features with largest + // scores (eg, largest attention score if score_type is "Att") are the + // selected ones. + optional int32 max_feature_num = 4 [default = 1000]; + + // Threshold to be used for feature selection: no feature with score lower + // than this number will be selected). + optional float score_threshold = 5 [default = 100.0]; + + // PCA parameters for DELF local feature. This is used only if use_pca is + // true. + optional DelfPcaParameters pca_parameters = 6; +} + +message DelfGlobalFeatureConfig { + // If PCA is to be used, this must be set to true. + optional bool use_pca = 1 [default = true]; + + // PCA parameters for DELF global feature. This is used only if use_pca is + // true. + optional DelfPcaParameters pca_parameters = 2; + + // Denotes indices of DelfConfig's scales that will be used for global + // descriptor extraction. For example, if DelfConfig's image_scales are + // [0.25, 0.5, 1.0] and image_scales_ind is [0, 2], global descriptor + // extraction will use solely scales [0.25, 1.0]. Note that local feature + // extraction will still use [0.25, 0.5, 1.0] in this case. If empty (default) + // , all scales are used. + repeated int32 image_scales_ind = 3; +} + +message DelfConfig { + // Whether to extract local features when using the model. + // At least one of {use_local_features, use_global_features} must be true. + optional bool use_local_features = 7 [default = true]; + // Configuration used for local features. Note: this is used only if + // use_local_features is true. + optional DelfLocalFeatureConfig delf_local_config = 3; + + // Whether to extract global features when using the model. + // At least one of {use_local_features, use_global_features} must be true. + optional bool use_global_features = 8 [default = false]; + // Configuration used for global features. Note: this is used only if + // use_global_features is true. + optional DelfGlobalFeatureConfig delf_global_config = 9; + + // Path to DELF model. + optional string model_path = 1; // Required. + + // Image scales to be used. + repeated float image_scales = 2; + + // Image resizing options. + // - The maximum/minimum image size (in terms of height or width) to be used + // when extracting DELF features. If set to -1 (default), no upper/lower + // bound for image size. If use_square_images option is false (default): + // * If the height *OR* width is larger than max_image_size, it will be + // resized to max_image_size, and the other dimension will be resized by + // preserving the aspect ratio. + // * If both height *AND* width are smaller than min_image_size, the larger + // side is set to min_image_size. + // - If use_square_images option is true, it needs to be resized to square + // resolution. To be more specific: + // * If the height *OR* width is larger than max_image_size, it is resized + // to square resolution of max_image_size. + // * If both height *AND* width are smaller than min_image_size, it is + // resized to square resolution of min_image_size. + // * Else, if the input image's resolution is not square, it is resized to + // square resolution of the larger side. + // Image resizing is useful when we want to ensure that the input to the image + // pyramid has a reasonable number of pixels, which could have large impact in + // terms of image matching performance. + // When using local features, note that the feature locations and scales will + // be consistent with the original image input size. + // Note that when both max_image_size and min_image_size are specified + // (which is a valid and legit use case), as long as max_image_size >= + // min_image_size, there's no conflicting scenario (i.e. never triggers both + // enlarging / shrinking). Bilinear interpolation is used. + optional int32 max_image_size = 4 [default = -1]; + optional int32 min_image_size = 5 [default = -1]; + optional bool use_square_images = 6 [default = false]; +} diff --git a/models/research/delf/delf/protos/feature.proto b/models/research/delf/delf/protos/feature.proto new file mode 100644 index 0000000000000000000000000000000000000000..64c342fe2c36b9170de10628b8ddc83ee3cfb2c6 --- /dev/null +++ b/models/research/delf/delf/protos/feature.proto @@ -0,0 +1,22 @@ +// Protocol buffer for serializing the DELF feature information. + +syntax = "proto2"; + +package delf.protos; + +import "delf/protos/datum.proto"; + +// FloatList is the container of tensor values. The tensor values are saved as +// a list of floating point values. +message DelfFeature { + optional DatumProto descriptor = 1; + optional float x = 2; + optional float y = 3; + optional float scale = 4; + optional float orientation = 5; + optional float strength = 6; +} + +message DelfFeatures { + repeated DelfFeature feature = 1; +} diff --git a/models/research/delf/delf/python/__init__.py b/models/research/delf/delf/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/delf/delf/python/box_io.py b/models/research/delf/delf/python/box_io.py new file mode 100644 index 0000000000000000000000000000000000000000..8b0f0d2c973d5b83f9110f651f5c5541fad049b7 --- /dev/null +++ b/models/research/delf/delf/python/box_io.py @@ -0,0 +1,151 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python interface for Boxes proto. + +Support read and write of Boxes from/to numpy arrays and file. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from delf import box_pb2 + + +def ArraysToBoxes(boxes, scores, class_indices): + """Converts `boxes` to Boxes proto. + + Args: + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + + Returns: + boxes_proto: Boxes object. + """ + num_boxes = len(scores) + assert num_boxes == boxes.shape[0] + assert num_boxes == len(class_indices) + + boxes_proto = box_pb2.Boxes() + for i in range(num_boxes): + boxes_proto.box.add( + ymin=boxes[i, 0], + xmin=boxes[i, 1], + ymax=boxes[i, 2], + xmax=boxes[i, 3], + score=scores[i], + class_index=class_indices[i]) + + return boxes_proto + + +def BoxesToArrays(boxes_proto): + """Converts data saved in Boxes proto to numpy arrays. + + If there are no boxes, the function returns three empty arrays. + + Args: + boxes_proto: Boxes proto object. + + Returns: + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + """ + num_boxes = len(boxes_proto.box) + if num_boxes == 0: + return np.array([]), np.array([]), np.array([]) + + boxes = np.zeros([num_boxes, 4]) + scores = np.zeros([num_boxes]) + class_indices = np.zeros([num_boxes]) + + for i in range(num_boxes): + box_proto = boxes_proto.box[i] + boxes[i] = [box_proto.ymin, box_proto.xmin, box_proto.ymax, box_proto.xmax] + scores[i] = box_proto.score + class_indices[i] = box_proto.class_index + + return boxes, scores, class_indices + + +def SerializeToString(boxes, scores, class_indices): + """Converts numpy arrays to serialized Boxes. + + Args: + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + + Returns: + Serialized Boxes string. + """ + boxes_proto = ArraysToBoxes(boxes, scores, class_indices) + return boxes_proto.SerializeToString() + + +def ParseFromString(string): + """Converts serialized Boxes proto string to numpy arrays. + + Args: + string: Serialized Boxes string. + + Returns: + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + """ + boxes_proto = box_pb2.Boxes() + boxes_proto.ParseFromString(string) + return BoxesToArrays(boxes_proto) + + +def ReadFromFile(file_path): + """Helper function to load data from a Boxes proto format in a file. + + Args: + file_path: Path to file containing data. + + Returns: + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + """ + with tf.io.gfile.GFile(file_path, 'rb') as f: + return ParseFromString(f.read()) + + +def WriteToFile(file_path, boxes, scores, class_indices): + """Helper function to write data to a file in Boxes proto format. + + Args: + file_path: Path to file that will be written. + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + """ + serialized_data = SerializeToString(boxes, scores, class_indices) + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write(serialized_data) diff --git a/models/research/delf/delf/python/box_io_test.py b/models/research/delf/delf/python/box_io_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c659185daeec7efc9097ff72637d0d8f7c38664b --- /dev/null +++ b/models/research/delf/delf/python/box_io_test.py @@ -0,0 +1,82 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for box_io, the python interface of Boxes proto.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import numpy as np +import tensorflow as tf + +from delf import box_io + +FLAGS = flags.FLAGS + + +class BoxesIoTest(tf.test.TestCase): + + def _create_data(self): + """Creates data to be used in tests. + + Returns: + boxes: [N, 4] float array denoting bounding box coordinates, in format + [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + """ + boxes = np.arange(24, dtype=np.float32).reshape(6, 4) + scores = np.arange(6, dtype=np.float32) + class_indices = np.arange(6, dtype=np.int32) + + return boxes, scores, class_indices + + def testConversionAndBack(self): + boxes, scores, class_indices = self._create_data() + + serialized = box_io.SerializeToString(boxes, scores, class_indices) + parsed_data = box_io.ParseFromString(serialized) + + self.assertAllEqual(boxes, parsed_data[0]) + self.assertAllEqual(scores, parsed_data[1]) + self.assertAllEqual(class_indices, parsed_data[2]) + + def testWriteAndReadToFile(self): + boxes, scores, class_indices = self._create_data() + + filename = os.path.join(FLAGS.test_tmpdir, 'test.boxes') + box_io.WriteToFile(filename, boxes, scores, class_indices) + data_read = box_io.ReadFromFile(filename) + + self.assertAllEqual(boxes, data_read[0]) + self.assertAllEqual(scores, data_read[1]) + self.assertAllEqual(class_indices, data_read[2]) + + def testWriteAndReadToFileEmptyFile(self): + filename = os.path.join(FLAGS.test_tmpdir, 'test.box') + box_io.WriteToFile(filename, np.array([]), np.array([]), np.array([])) + data_read = box_io.ReadFromFile(filename) + + self.assertAllEqual(np.array([]), data_read[0]) + self.assertAllEqual(np.array([]), data_read[1]) + self.assertAllEqual(np.array([]), data_read[2]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/datum_io.py b/models/research/delf/delf/python/datum_io.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d4cbfd11a140c6805c1fa017b7328cf3d04e38 --- /dev/null +++ b/models/research/delf/delf/python/datum_io.py @@ -0,0 +1,221 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python interface for DatumProto. + +DatumProto is protocol buffer used to serialize tensor with arbitrary shape. +Please refer to datum.proto for details. + +Support read and write of DatumProto from/to NumPy array and file. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from delf import datum_pb2 + + +def ArrayToDatum(arr): + """Converts NumPy array to DatumProto. + + Supports arrays of types: + - float16 (it is converted into a float32 in DatumProto) + - float32 + - float64 (it is converted into a float32 in DatumProto) + - uint8 (it is converted into a uint32 in DatumProto) + - uint16 (it is converted into a uint32 in DatumProto) + - uint32 + - uint64 (it is converted into a uint32 in DatumProto) + + Args: + arr: NumPy array of arbitrary shape. + + Returns: + datum: DatumProto object. + + Raises: + ValueError: If array type is unsupported. + """ + datum = datum_pb2.DatumProto() + if arr.dtype in ('float16', 'float32', 'float64'): + datum.float_list.value.extend(arr.astype('float32').flat) + elif arr.dtype in ('uint8', 'uint16', 'uint32', 'uint64'): + datum.uint32_list.value.extend(arr.astype('uint32').flat) + else: + raise ValueError('Unsupported array type: %s' % arr.dtype) + + datum.shape.dim.extend(arr.shape) + return datum + + +def ArraysToDatumPair(arr_1, arr_2): + """Converts numpy arrays to DatumPairProto. + + Supports same formats as `ArrayToDatum`, see documentation therein. + + Args: + arr_1: NumPy array of arbitrary shape. + arr_2: NumPy array of arbitrary shape. + + Returns: + datum_pair: DatumPairProto object. + """ + datum_pair = datum_pb2.DatumPairProto() + datum_pair.first.CopyFrom(ArrayToDatum(arr_1)) + datum_pair.second.CopyFrom(ArrayToDatum(arr_2)) + + return datum_pair + + +def DatumToArray(datum): + """Converts data saved in DatumProto to NumPy array. + + Args: + datum: DatumProto object. + + Returns: + NumPy array of arbitrary shape. + """ + if datum.HasField('float_list'): + return np.array(datum.float_list.value).astype('float32').reshape( + datum.shape.dim) + elif datum.HasField('uint32_list'): + return np.array(datum.uint32_list.value).astype('uint32').reshape( + datum.shape.dim) + else: + raise ValueError('Input DatumProto does not have float_list or uint32_list') + + +def DatumPairToArrays(datum_pair): + """Converts data saved in DatumPairProto to NumPy arrays. + + Args: + datum_pair: DatumPairProto object. + + Returns: + Two NumPy arrays of arbitrary shape. + """ + first_datum = DatumToArray(datum_pair.first) + second_datum = DatumToArray(datum_pair.second) + return first_datum, second_datum + + +def SerializeToString(arr): + """Converts NumPy array to serialized DatumProto. + + Args: + arr: NumPy array of arbitrary shape. + + Returns: + Serialized DatumProto string. + """ + datum = ArrayToDatum(arr) + return datum.SerializeToString() + + +def SerializePairToString(arr_1, arr_2): + """Converts pair of NumPy arrays to serialized DatumPairProto. + + Args: + arr_1: NumPy array of arbitrary shape. + arr_2: NumPy array of arbitrary shape. + + Returns: + Serialized DatumPairProto string. + """ + datum_pair = ArraysToDatumPair(arr_1, arr_2) + return datum_pair.SerializeToString() + + +def ParseFromString(string): + """Converts serialized DatumProto string to NumPy array. + + Args: + string: Serialized DatumProto string. + + Returns: + NumPy array. + """ + datum = datum_pb2.DatumProto() + datum.ParseFromString(string) + return DatumToArray(datum) + + +def ParsePairFromString(string): + """Converts serialized DatumPairProto string to NumPy arrays. + + Args: + string: Serialized DatumProto string. + + Returns: + Two NumPy arrays. + """ + datum_pair = datum_pb2.DatumPairProto() + datum_pair.ParseFromString(string) + return DatumPairToArrays(datum_pair) + + +def ReadFromFile(file_path): + """Helper function to load data from a DatumProto format in a file. + + Args: + file_path: Path to file containing data. + + Returns: + data: NumPy array. + """ + with tf.io.gfile.GFile(file_path, 'rb') as f: + return ParseFromString(f.read()) + + +def ReadPairFromFile(file_path): + """Helper function to load data from a DatumPairProto format in a file. + + Args: + file_path: Path to file containing data. + + Returns: + Two NumPy arrays. + """ + with tf.io.gfile.GFile(file_path, 'rb') as f: + return ParsePairFromString(f.read()) + + +def WriteToFile(data, file_path): + """Helper function to write data to a file in DatumProto format. + + Args: + data: NumPy array. + file_path: Path to file that will be written. + """ + serialized_data = SerializeToString(data) + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write(serialized_data) + + +def WritePairToFile(arr_1, arr_2, file_path): + """Helper function to write pair of arrays to a file in DatumPairProto format. + + Args: + arr_1: NumPy array of arbitrary shape. + arr_2: NumPy array of arbitrary shape. + file_path: Path to file that will be written. + """ + serialized_data = SerializePairToString(arr_1, arr_2) + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write(serialized_data) diff --git a/models/research/delf/delf/python/datum_io_test.py b/models/research/delf/delf/python/datum_io_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f3587a10017f93af49715a9becc1ed72da8ebe69 --- /dev/null +++ b/models/research/delf/delf/python/datum_io_test.py @@ -0,0 +1,97 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for datum_io, the python interface of DatumProto.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import numpy as np +import tensorflow as tf + +from delf import datum_io + +FLAGS = flags.FLAGS + + +class DatumIoTest(tf.test.TestCase): + + def Conversion2dTestWithType(self, dtype): + original_data = np.arange(9).reshape(3, 3).astype(dtype) + serialized = datum_io.SerializeToString(original_data) + retrieved_data = datum_io.ParseFromString(serialized) + self.assertTrue(np.array_equal(original_data, retrieved_data)) + + def Conversion3dTestWithType(self, dtype): + original_data = np.arange(24).reshape(2, 3, 4).astype(dtype) + serialized = datum_io.SerializeToString(original_data) + retrieved_data = datum_io.ParseFromString(serialized) + self.assertTrue(np.array_equal(original_data, retrieved_data)) + + # This test covers the following functions: ArrayToDatum, SerializeToString, + # ParseFromString, DatumToArray. + def testConversion2dWithType(self): + self.Conversion2dTestWithType(np.uint16) + self.Conversion2dTestWithType(np.uint32) + self.Conversion2dTestWithType(np.uint64) + self.Conversion2dTestWithType(np.float16) + self.Conversion2dTestWithType(np.float32) + self.Conversion2dTestWithType(np.float64) + + # This test covers the following functions: ArrayToDatum, SerializeToString, + # ParseFromString, DatumToArray. + def testConversion3dWithType(self): + self.Conversion3dTestWithType(np.uint16) + self.Conversion3dTestWithType(np.uint32) + self.Conversion3dTestWithType(np.uint64) + self.Conversion3dTestWithType(np.float16) + self.Conversion3dTestWithType(np.float32) + self.Conversion3dTestWithType(np.float64) + + def testConversionWithUnsupportedType(self): + with self.assertRaisesRegex(ValueError, 'Unsupported array type'): + self.Conversion3dTestWithType(int) + + # This test covers the following functions: ArrayToDatum, SerializeToString, + # WriteToFile, ReadFromFile, ParseFromString, DatumToArray. + def testWriteAndReadToFile(self): + data = np.array([[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]], + [[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]]) + filename = os.path.join(FLAGS.test_tmpdir, 'test.datum') + datum_io.WriteToFile(data, filename) + data_read = datum_io.ReadFromFile(filename) + self.assertAllEqual(data_read, data) + + # This test covers the following functions: ArraysToDatumPair, + # SerializePairToString, WritePairToFile, ReadPairFromFile, + # ParsePairFromString, DatumPairToArrays. + def testWriteAndReadPairToFile(self): + data_1 = np.array([[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]], + [[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]]) + data_2 = np.array( + [[[255, 0, 5], [10, 300, 0]], [[20, 1, 100], [255, 360, 420]]], + dtype='uint32') + filename = os.path.join(FLAGS.test_tmpdir, 'test.datum_pair') + datum_io.WritePairToFile(data_1, data_2, filename) + data_read_1, data_read_2 = datum_io.ReadPairFromFile(filename) + self.assertAllEqual(data_read_1, data_1) + self.assertAllEqual(data_read_2, data_2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/delg/DELG_INSTRUCTIONS.md b/models/research/delf/delf/python/delg/DELG_INSTRUCTIONS.md new file mode 100644 index 0000000000000000000000000000000000000000..2b62ac29003e3d4b13a3ccc8fad5b43e236f96da --- /dev/null +++ b/models/research/delf/delf/python/delg/DELG_INSTRUCTIONS.md @@ -0,0 +1,159 @@ +## DELG instructions + +[![Paper](http://img.shields.io/badge/paper-arXiv.2001.05027-B3181B.svg)](https://arxiv.org/abs/2001.05027) + +These instructions can be used to reproduce the results from the +[DELG paper](https://arxiv.org/abs/2001.05027) for the Revisited Oxford/Paris +datasets. + +### Install DELF library + +To be able to use this code, please follow +[these instructions](../../../INSTALL_INSTRUCTIONS.md) to properly install the +DELF library. + +### Download datasets + +```bash +mkdir -p ~/delg/data && cd ~/delg/data + +# Oxford dataset. +wget http://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images.tgz +mkdir oxford5k_images +tar -xvzf oxbuild_images.tgz -C oxford5k_images/ + +# Paris dataset. Download and move all images to same directory. +wget http://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1.tgz +wget http://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2.tgz +mkdir paris6k_images_tmp +tar -xvzf paris_1.tgz -C paris6k_images_tmp/ +tar -xvzf paris_2.tgz -C paris6k_images_tmp/ +mkdir paris6k_images +mv paris6k_images_tmp/paris/*/*.jpg paris6k_images/ + +# Revisited annotations. +wget http://cmp.felk.cvut.cz/revisitop/data/datasets/roxford5k/gnd_roxford5k.mat +wget http://cmp.felk.cvut.cz/revisitop/data/datasets/rparis6k/gnd_rparis6k.mat +``` + +### Download model + +This is necessary to reproduce the main paper results: + +```bash +# From models/research/delf/delf/python/delg +mkdir parameters && cd parameters + +# DELG-GLD model. +wget http://storage.googleapis.com/delf/delg_gld_20200520.tar.gz +tar -xvzf delg_gld_20200520.tar.gz +``` + +### Feature extraction + +We present here commands for extraction on `roxford5k`. To extract on `rparis6k` +instead, please edit the arguments accordingly (especially the +`dataset_file_path` argument). + +#### Query feature extraction + +For query feature extraction, the cropped query image should be used to extract +features, according to the Revisited Oxford/Paris experimental protocol. Note +that this is done in the `extract_features` script, when setting +`image_set=query`. + +Query feature extraction can be run as follows: + +```bash +# From models/research/delf/delf/python/delg +python3 extract_features.py \ + --delf_config_path delg_gld_config.pbtxt \ + --dataset_file_path ~/delg/data/gnd_roxford5k.mat \ + --images_dir ~/delg/data/oxford5k_images \ + --image_set query \ + --output_features_dir ~/delg/data/oxford5k_features/query +``` + +#### Index feature extraction + +Run index feature extraction as follows: + +```bash +# From models/research/delf/delf/python/delg +python3 extract_features.py \ + --delf_config_path delg_gld_config.pbtxt \ + --dataset_file_path ~/delg/data/gnd_roxford5k.mat \ + --images_dir ~/delg/data/oxford5k_images \ + --image_set index \ + --output_features_dir ~/delg/data/oxford5k_features/index +``` + +### Perform retrieval + +To run retrieval on `roxford5k`, the following command can be used: + +```bash +# From models/research/delf/delf/python/delg +python3 perform_retrieval.py \ + --dataset_file_path ~/delg/data/gnd_roxford5k.mat \ + --query_features_dir ~/delg/data/oxford5k_features/query \ + --index_features_dir ~/delg/data/oxford5k_features/index \ + --output_dir ~/delg/results/oxford5k +``` + +A file with named `metrics.txt` will be written to the path given in +`output_dir`, with retrieval metrics for an experiment where geometric +verification is not used. The contents should look approximately like: + +``` +hard + mAP=45.11 + mP@k[ 1 5 10] [85.71 72.29 60.14] + mR@k[ 1 5 10] [19.15 29.72 36.32] +medium + mAP=69.71 + mP@k[ 1 5 10] [95.71 92. 86.86] + mR@k[ 1 5 10] [10.17 25.94 33.83] +``` + +which are the results presented in Table 3 of the paper. + +If you want to run retrieval with geometric verification, set +`use_geometric_verification` to `True`. It's much slower since (1) in this code +example the re-ranking is loading DELF local features from disk, and (2) +re-ranking needs to be performed separately for each dataset protocol, since the +junk images from each protocol should be removed when re-ranking. Here is an +example command: + +```bash +# From models/research/delf/delf/python/delg +python3 perform_retrieval.py \ + --dataset_file_path ~/delg/data/gnd_roxford5k.mat \ + --query_features_dir ~/delg/data/oxford5k_features/query \ + --index_features_dir ~/delg/data/oxford5k_features/index \ + --use_geometric_verification \ + --output_dir ~/delg/results/oxford5k_with_gv +``` + +The `metrics.txt` should now show: + +``` +hard + mAP=45.11 + mP@k[ 1 5 10] [85.71 72.29 60.14] + mR@k[ 1 5 10] [19.15 29.72 36.32] +hard_after_gv + mAP=53.72 + mP@k[ 1 5 10] [91.43 83.81 74.38] + mR@k[ 1 5 10] [19.45 34.45 44.64] +medium + mAP=69.71 + mP@k[ 1 5 10] [95.71 92. 86.86] + mR@k[ 1 5 10] [10.17 25.94 33.83] +medium_after_gv + mAP=75.42 + mP@k[ 1 5 10] [97.14 95.24 93.81] + mR@k[ 1 5 10] [10.21 27.21 37.72] +``` + +which, again, are the results presented in Table 3 of the paper. diff --git a/models/research/delf/delf/python/delg/delg_gld_config.pbtxt b/models/research/delf/delf/python/delg/delg_gld_config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..a659a0a3ee502c31f7d4b71fd634803f94d425b7 --- /dev/null +++ b/models/research/delf/delf/python/delg/delg_gld_config.pbtxt @@ -0,0 +1,22 @@ +use_local_features: true +use_global_features: true +model_path: "parameters/delg_gld_20200520" +image_scales: 0.25 +image_scales: 0.35355338 +image_scales: 0.5 +image_scales: 0.70710677 +image_scales: 1.0 +image_scales: 1.4142135 +image_scales: 2.0 +delf_local_config { + use_pca: false + max_feature_num: 1000 + score_threshold: 175.0 +} +delf_global_config { + use_pca: false + image_scales_ind: 3 + image_scales_ind: 4 + image_scales_ind: 5 +} +max_image_size: 1024 diff --git a/models/research/delf/delf/python/delg/extract_features.py b/models/research/delf/delf/python/delg/extract_features.py new file mode 100644 index 0000000000000000000000000000000000000000..ad65d66e69ddaa032d1201b34a2f10a04fe61eb5 --- /dev/null +++ b/models/research/delf/delf/python/delg/extract_features.py @@ -0,0 +1,162 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extracts DELG features for images from Revisited Oxford/Paris datasets. + +Note that query images are cropped before feature extraction, as required by the +evaluation protocols of these datasets. + +The types of extracted features (local and/or global) depend on the input +DelfConfig. + +The program checks if features already exist, and skips computation for those. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time + +from absl import app +from absl import flags +import numpy as np +import tensorflow as tf + +from google.protobuf import text_format +from delf import delf_config_pb2 +from delf import datum_io +from delf import feature_io +from delf import utils +from delf.python.detect_to_retrieve import dataset +from delf import extractor + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'delf_config_path', '/tmp/delf_config_example.pbtxt', + 'Path to DelfConfig proto text file with configuration to be used for DELG ' + 'extraction. Local features are extracted if use_local_features is True; ' + 'global features are extracted if use_global_features is True.') +flags.DEFINE_string( + 'dataset_file_path', '/tmp/gnd_roxford5k.mat', + 'Dataset file for Revisited Oxford or Paris dataset, in .mat format.') +flags.DEFINE_string( + 'images_dir', '/tmp/images', + 'Directory where dataset images are located, all in .jpg format.') +flags.DEFINE_enum('image_set', 'query', ['query', 'index'], + 'Whether to extract features from query or index images.') +flags.DEFINE_string( + 'output_features_dir', '/tmp/features', + "Directory where DELG features will be written to. Each image's features " + 'will be written to files with same name but different extension: the ' + 'global feature is written to a file with extension .delg_global and the ' + 'local features are written to a file with extension .delg_local.') + +# Extensions. +_DELG_GLOBAL_EXTENSION = '.delg_global' +_DELG_LOCAL_EXTENSION = '.delg_local' +_IMAGE_EXTENSION = '.jpg' + +# Pace to report extraction log. +_STATUS_CHECK_ITERATIONS = 50 + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read list of images from dataset file. + print('Reading list of images from dataset file...') + query_list, index_list, ground_truth = dataset.ReadDatasetFile( + FLAGS.dataset_file_path) + if FLAGS.image_set == 'query': + image_list = query_list + else: + image_list = index_list + num_images = len(image_list) + print('done! Found %d images' % num_images) + + # Parse DelfConfig proto. + config = delf_config_pb2.DelfConfig() + with tf.io.gfile.GFile(FLAGS.delf_config_path, 'r') as f: + text_format.Parse(f.read(), config) + + # Create output directory if necessary. + if not tf.io.gfile.exists(FLAGS.output_features_dir): + tf.io.gfile.makedirs(FLAGS.output_features_dir) + + extractor_fn = extractor.MakeExtractor(config) + + start = time.time() + for i in range(num_images): + if i == 0: + print('Starting to extract features...') + elif i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.time() - start) + print('Processing image %d out of %d, last %d ' + 'images took %f seconds' % + (i, num_images, _STATUS_CHECK_ITERATIONS, elapsed)) + start = time.time() + + image_name = image_list[i] + input_image_filename = os.path.join(FLAGS.images_dir, + image_name + _IMAGE_EXTENSION) + + # Compose output file name and decide if image should be skipped. + should_skip_global = True + should_skip_local = True + if config.use_global_features: + output_global_feature_filename = os.path.join( + FLAGS.output_features_dir, image_name + _DELG_GLOBAL_EXTENSION) + if not tf.io.gfile.exists(output_global_feature_filename): + should_skip_global = False + if config.use_local_features: + output_local_feature_filename = os.path.join( + FLAGS.output_features_dir, image_name + _DELG_LOCAL_EXTENSION) + if not tf.io.gfile.exists(output_local_feature_filename): + should_skip_local = False + if should_skip_global and should_skip_local: + print('Skipping %s' % image_name) + continue + + pil_im = utils.RgbLoader(input_image_filename) + resize_factor = 1.0 + if FLAGS.image_set == 'query': + # Crop query image according to bounding box. + original_image_size = max(pil_im.size) + bbox = [int(round(b)) for b in ground_truth[i]['bbx']] + pil_im = pil_im.crop(bbox) + cropped_image_size = max(pil_im.size) + resize_factor = cropped_image_size / original_image_size + + im = np.array(pil_im) + + # Extract and save features. + extracted_features = extractor_fn(im, resize_factor) + if config.use_global_features: + global_descriptor = extracted_features['global_descriptor'] + datum_io.WriteToFile(global_descriptor, output_global_feature_filename) + if config.use_local_features: + locations = extracted_features['local_features']['locations'] + descriptors = extracted_features['local_features']['descriptors'] + feature_scales = extracted_features['local_features']['scales'] + attention = extracted_features['local_features']['attention'] + feature_io.WriteToFile(output_local_feature_filename, locations, + feature_scales, descriptors, attention) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/delf/delf/python/delg/measure_latency.py b/models/research/delf/delf/python/delg/measure_latency.py new file mode 100644 index 0000000000000000000000000000000000000000..21ffbda4179a191139ae35244c8ae34693594fd9 --- /dev/null +++ b/models/research/delf/delf/python/delg/measure_latency.py @@ -0,0 +1,108 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Times DELF/G extraction.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time + +from absl import app +from absl import flags +import numpy as np +from six.moves import range +import tensorflow as tf + +from google.protobuf import text_format +from delf import delf_config_pb2 +from delf import utils +from delf import extractor + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'delf_config_path', '/tmp/delf_config_example.pbtxt', + 'Path to DelfConfig proto text file with configuration to be used for DELG ' + 'extraction. Local features are extracted if use_local_features is True; ' + 'global features are extracted if use_global_features is True.') +flags.DEFINE_string('list_images_path', '/tmp/list_images.txt', + 'Path to list of images whose features will be extracted.') +flags.DEFINE_integer('repeat_per_image', 10, + 'Number of times to repeat extraction per image.') + +# Pace to report extraction log. +_STATUS_CHECK_ITERATIONS = 100 + + +def _ReadImageList(list_path): + """Helper function to read image paths. + + Args: + list_path: Path to list of images, one image path per line. + + Returns: + image_paths: List of image paths. + """ + with tf.io.gfile.GFile(list_path, 'r') as f: + image_paths = f.readlines() + image_paths = [entry.rstrip() for entry in image_paths] + return image_paths + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read list of images. + print('Reading list of images...') + image_paths = _ReadImageList(FLAGS.list_images_path) + num_images = len(image_paths) + print(f'done! Found {num_images} images') + + # Load images in memory. + print('Loading images, %d times per image...' % FLAGS.repeat_per_image) + im_array = [] + for filename in image_paths: + im = np.array(utils.RgbLoader(filename)) + for _ in range(FLAGS.repeat_per_image): + im_array.append(im) + np.random.shuffle(im_array) + print('done!') + + # Parse DelfConfig proto. + config = delf_config_pb2.DelfConfig() + with tf.io.gfile.GFile(FLAGS.delf_config_path, 'r') as f: + text_format.Parse(f.read(), config) + + extractor_fn = extractor.MakeExtractor(config) + + start = time.time() + for i, im in enumerate(im_array): + if i == 0: + print('Starting to extract DELF features from images...') + elif i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.time() - start) + print(f'Processing image {i} out of {len(im_array)}, last ' + f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds,' + f'ie {elapsed/_STATUS_CHECK_ITERATIONS} secs/image.') + start = time.time() + + # Extract and save features. + extracted_features = extractor_fn(im) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/delf/delf/python/delg/perform_retrieval.py b/models/research/delf/delf/python/delg/perform_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..fb53abb1a9e15a5d5a040be42213f325ab345163 --- /dev/null +++ b/models/research/delf/delf/python/delg/perform_retrieval.py @@ -0,0 +1,215 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Performs DELG-based image retrieval on Revisited Oxford/Paris datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time + +from absl import app +from absl import flags +import numpy as np +import tensorflow as tf + +from delf import datum_io +from delf.python.detect_to_retrieve import dataset +from delf.python.detect_to_retrieve import image_reranking + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'dataset_file_path', '/tmp/gnd_roxford5k.mat', + 'Dataset file for Revisited Oxford or Paris dataset, in .mat format.') +flags.DEFINE_string('query_features_dir', '/tmp/features/query', + 'Directory where query DELG features are located.') +flags.DEFINE_string('index_features_dir', '/tmp/features/index', + 'Directory where index DELG features are located.') +flags.DEFINE_boolean( + 'use_geometric_verification', False, + 'If True, performs re-ranking using local feature-based geometric ' + 'verification.') +flags.DEFINE_float( + 'local_feature_distance_threshold', 1.0, + 'Optional, only used if `use_geometric_verification` is True. ' + 'Distance threshold below which a pair of local descriptors is considered ' + 'a potential match, and will be fed into RANSAC.') +flags.DEFINE_float( + 'ransac_residual_threshold', 20.0, + 'Optional, only used if `use_geometric_verification` is True. ' + 'Residual error threshold for considering matches as inliers, used in ' + 'RANSAC algorithm.') +flags.DEFINE_string( + 'output_dir', '/tmp/retrieval', + 'Directory where retrieval output will be written to. A file containing ' + "metrics for this run is saved therein, with file name 'metrics.txt'.") + +# Extensions. +_DELG_GLOBAL_EXTENSION = '.delg_global' +_DELG_LOCAL_EXTENSION = '.delg_local' + +# Precision-recall ranks to use in metric computation. +_PR_RANKS = (1, 5, 10) + +# Pace to log. +_STATUS_CHECK_LOAD_ITERATIONS = 50 + +# Output file names. +_METRICS_FILENAME = 'metrics.txt' + + +def _ReadDelgGlobalDescriptors(input_dir, image_list): + """Reads DELG global features. + + Args: + input_dir: Directory where features are located. + image_list: List of image names for which to load features. + + Returns: + global_descriptors: NumPy array of shape (len(image_list), D), where D + corresponds to the global descriptor dimensionality. + """ + num_images = len(image_list) + global_descriptors = [] + print('Starting to collect global descriptors for %d images...' % num_images) + start = time.time() + for i in range(num_images): + if i > 0 and i % _STATUS_CHECK_LOAD_ITERATIONS == 0: + elapsed = (time.time() - start) + print('Reading global descriptors for image %d out of %d, last %d ' + 'images took %f seconds' % + (i, num_images, _STATUS_CHECK_LOAD_ITERATIONS, elapsed)) + start = time.time() + + descriptor_filename = image_list[i] + _DELG_GLOBAL_EXTENSION + descriptor_fullpath = os.path.join(input_dir, descriptor_filename) + global_descriptors.append(datum_io.ReadFromFile(descriptor_fullpath)) + + return np.array(global_descriptors) + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Parse dataset to obtain query/index images, and ground-truth. + print('Parsing dataset...') + query_list, index_list, ground_truth = dataset.ReadDatasetFile( + FLAGS.dataset_file_path) + num_query_images = len(query_list) + num_index_images = len(index_list) + (_, medium_ground_truth, + hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth) + print('done! Found %d queries and %d index images' % + (num_query_images, num_index_images)) + + # Read global features. + query_global_features = _ReadDelgGlobalDescriptors(FLAGS.query_features_dir, + query_list) + index_global_features = _ReadDelgGlobalDescriptors(FLAGS.index_features_dir, + index_list) + + # Compute similarity between query and index images, potentially re-ranking + # with geometric verification. + ranks_before_gv = np.zeros([num_query_images, num_index_images], + dtype='int32') + if FLAGS.use_geometric_verification: + medium_ranks_after_gv = np.zeros([num_query_images, num_index_images], + dtype='int32') + hard_ranks_after_gv = np.zeros([num_query_images, num_index_images], + dtype='int32') + for i in range(num_query_images): + print('Performing retrieval with query %d (%s)...' % (i, query_list[i])) + start = time.time() + + # Compute similarity between global descriptors. + similarities = np.dot(index_global_features, query_global_features[i]) + ranks_before_gv[i] = np.argsort(-similarities) + + # Re-rank using geometric verification. + if FLAGS.use_geometric_verification: + medium_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification( + input_ranks=ranks_before_gv[i], + initial_scores=similarities, + query_name=query_list[i], + index_names=index_list, + query_features_dir=FLAGS.query_features_dir, + index_features_dir=FLAGS.index_features_dir, + junk_ids=set(medium_ground_truth[i]['junk']), + local_feature_extension=_DELG_LOCAL_EXTENSION, + ransac_seed=0, + feature_distance_threshold=FLAGS.local_feature_distance_threshold, + ransac_residual_threshold=FLAGS.ransac_residual_threshold) + hard_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification( + input_ranks=ranks_before_gv[i], + initial_scores=similarities, + query_name=query_list[i], + index_names=index_list, + query_features_dir=FLAGS.query_features_dir, + index_features_dir=FLAGS.index_features_dir, + junk_ids=set(hard_ground_truth[i]['junk']), + local_feature_extension=_DELG_LOCAL_EXTENSION, + ransac_seed=0, + feature_distance_threshold=FLAGS.local_feature_distance_threshold, + ransac_residual_threshold=FLAGS.ransac_residual_threshold) + + elapsed = (time.time() - start) + print('done! Retrieval for query %d took %f seconds' % (i, elapsed)) + + # Create output directory if necessary. + if not tf.io.gfile.exists(FLAGS.output_dir): + tf.io.gfile.makedirs(FLAGS.output_dir) + + # Compute metrics. + medium_metrics = dataset.ComputeMetrics(ranks_before_gv, medium_ground_truth, + _PR_RANKS) + hard_metrics = dataset.ComputeMetrics(ranks_before_gv, hard_ground_truth, + _PR_RANKS) + if FLAGS.use_geometric_verification: + medium_metrics_after_gv = dataset.ComputeMetrics(medium_ranks_after_gv, + medium_ground_truth, + _PR_RANKS) + hard_metrics_after_gv = dataset.ComputeMetrics(hard_ranks_after_gv, + hard_ground_truth, _PR_RANKS) + + # Write metrics to file. + mean_average_precision_dict = { + 'medium': medium_metrics[0], + 'hard': hard_metrics[0] + } + mean_precisions_dict = {'medium': medium_metrics[1], 'hard': hard_metrics[1]} + mean_recalls_dict = {'medium': medium_metrics[2], 'hard': hard_metrics[2]} + if FLAGS.use_geometric_verification: + mean_average_precision_dict.update({ + 'medium_after_gv': medium_metrics_after_gv[0], + 'hard_after_gv': hard_metrics_after_gv[0] + }) + mean_precisions_dict.update({ + 'medium_after_gv': medium_metrics_after_gv[1], + 'hard_after_gv': hard_metrics_after_gv[1] + }) + mean_recalls_dict.update({ + 'medium_after_gv': medium_metrics_after_gv[2], + 'hard_after_gv': hard_metrics_after_gv[2] + }) + dataset.SaveMetricsFile(mean_average_precision_dict, mean_precisions_dict, + mean_recalls_dict, _PR_RANKS, + os.path.join(FLAGS.output_dir, _METRICS_FILENAME)) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/delf/delf/python/detect_to_retrieve/DETECT_TO_RETRIEVE_INSTRUCTIONS.md b/models/research/delf/delf/python/detect_to_retrieve/DETECT_TO_RETRIEVE_INSTRUCTIONS.md new file mode 100644 index 0000000000000000000000000000000000000000..2d18a328997ace5ee01f60a4b2c95714a18eb7d9 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/DETECT_TO_RETRIEVE_INSTRUCTIONS.md @@ -0,0 +1,231 @@ +## Detect-to-Retrieve instructions + +[![Paper](http://img.shields.io/badge/paper-arXiv.1812.01584-B3181B.svg)](https://arxiv.org/abs/1812.01584) + +These instructions can be used to reproduce the results from the +[Detect-to-Retrieve paper](https://arxiv.org/abs/1812.01584) for the Revisited +Oxford/Paris datasets. + +### Install DELF library + +To be able to use this code, please follow +[these instructions](../../../INSTALL_INSTRUCTIONS.md) to properly install the +DELF library. + +### Download datasets + +```bash +mkdir -p ~/detect_to_retrieve/data && cd ~/detect_to_retrieve/data + +# Oxford dataset. +wget http://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images.tgz +mkdir oxford5k_images +tar -xvzf oxbuild_images.tgz -C oxford5k_images/ + +# Paris dataset. Download and move all images to same directory. +wget http://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1.tgz +wget http://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2.tgz +mkdir paris6k_images_tmp +tar -xvzf paris_1.tgz -C paris6k_images_tmp/ +tar -xvzf paris_2.tgz -C paris6k_images_tmp/ +mkdir paris6k_images +mv paris6k_images_tmp/paris/*/*.jpg paris6k_images/ + +# Revisited annotations. +wget http://cmp.felk.cvut.cz/revisitop/data/datasets/roxford5k/gnd_roxford5k.mat +wget http://cmp.felk.cvut.cz/revisitop/data/datasets/rparis6k/gnd_rparis6k.mat +``` + +### Download models + +These are necessary to reproduce the main paper results: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +mkdir parameters && cd parameters + +# DELF-GLD model. +wget http://storage.googleapis.com/delf/delf_gld_20190411.tar.gz +tar -xvzf delf_gld_20190411.tar.gz + +# Faster-RCNN detector model. +wget http://storage.googleapis.com/delf/d2r_frcnn_20190411.tar.gz +tar -xvzf d2r_frcnn_20190411.tar.gz + +# Codebooks. +# Note: you should use codebook trained on rparis6k for roxford5k retrieval +# experiments, and vice-versa. +wget http://storage.googleapis.com/delf/rparis6k_codebook_65536.tar.gz +mkdir rparis6k_codebook_65536 +tar -xvzf rparis6k_codebook_65536.tar.gz -C rparis6k_codebook_65536/ +wget http://storage.googleapis.com/delf/roxford5k_codebook_65536.tar.gz +mkdir roxford5k_codebook_65536 +tar -xvzf roxford5k_codebook_65536.tar.gz -C roxford5k_codebook_65536/ +``` + +We also make available other models/parameters that can be used to reproduce +more results from the paper: + +- [MobileNet-SSD trained detector](http://storage.googleapis.com/delf/d2r_mnetssd_20190411.tar.gz). +- Codebooks with 1024 centroids: + [rparis6k](http://storage.googleapis.com/delf/rparis6k_codebook_1024.tar.gz), + [roxford5k](http://storage.googleapis.com/delf/roxford5k_codebook_1024.tar.gz). + +### Feature extraction + +We present here commands for extraction on `roxford5k`. To extract on `rparis6k` +instead, please edit the arguments accordingly (especially the +`dataset_file_path` argument). + +#### Query feature extraction + +For query feature extraction, the cropped query image should be used to extract +features, according to the Revisited Oxford/Paris experimental protocol. Note +that this is done in the `extract_query_features` script. + +Query feature extraction can be run as follows: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 extract_query_features.py \ + --delf_config_path delf_gld_config.pbtxt \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_roxford5k.mat \ + --images_dir ~/detect_to_retrieve/data/oxford5k_images \ + --output_features_dir ~/detect_to_retrieve/data/oxford5k_features/query +``` + +#### Index feature extraction and box detection + +Index feature extraction / box detection can be run as follows: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 extract_index_boxes_and_features.py \ + --delf_config_path delf_gld_config.pbtxt \ + --detector_model_dir parameters/d2r_frcnn_20190411 \ + --detector_thresh 0.1 \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_roxford5k.mat \ + --images_dir ~/detect_to_retrieve/data/oxford5k_images \ + --output_boxes_dir ~/detect_to_retrieve/data/oxford5k_boxes/index \ + --output_features_dir ~/detect_to_retrieve/data/oxford5k_features/index_0.1 \ + --output_index_mapping ~/detect_to_retrieve/data/oxford5k_features/index_mapping_0.1.csv +``` + +### R-ASMK* aggregation extraction + +We present here commands for aggregation extraction on `roxford5k`. To extract +on `rparis6k` instead, please edit the arguments accordingly. In particular, +note that feature aggregation on `roxford5k` should use a codebook trained on +`rparis6k`, and vice-versa (this can be edited in the +`query_aggregation_config.pbtxt` and `index_aggregation_config.pbtxt` files. + +#### Query + +Run query feature aggregation as follows: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 extract_aggregation.py \ + --use_query_images True \ + --aggregation_config_path query_aggregation_config.pbtxt \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_roxford5k.mat \ + --features_dir ~/detect_to_retrieve/data/oxford5k_features/query \ + --output_aggregation_dir ~/detect_to_retrieve/data/oxford5k_aggregation/query +``` + +#### Index + +Run index feature aggregation as follows: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 extract_aggregation.py \ + --aggregation_config_path index_aggregation_config.pbtxt \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_roxford5k.mat \ + --features_dir ~/detect_to_retrieve/data/oxford5k_features/index_0.1 \ + --index_mapping_path ~/detect_to_retrieve/data/oxford5k_features/index_mapping_0.1.csv \ + --output_aggregation_dir ~/detect_to_retrieve/data/oxford5k_aggregation/index_0.1 +``` + +### Perform retrieval + +Currently, we support retrieval via brute-force comparison of aggregated +features. + +To run retrieval on `roxford5k`, the following command can be used: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 perform_retrieval.py \ + --index_aggregation_config_path index_aggregation_config.pbtxt \ + --query_aggregation_config_path query_aggregation_config.pbtxt \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_roxford5k.mat \ + --index_aggregation_dir ~/detect_to_retrieve/data/oxford5k_aggregation/index_0.1 \ + --query_aggregation_dir ~/detect_to_retrieve/data/oxford5k_aggregation/query \ + --output_dir ~/detect_to_retrieve/results/oxford5k +``` + +A file with named `metrics.txt` will be written to the path given in +`output_dir`, with retrieval metrics for an experiment where geometric +verification is not used. The contents should look approximately like: + +``` +hard +mAP=47.61 +mP@k[ 1 5 10] [84.29 73.71 64.43] +mR@k[ 1 5 10] [18.84 29.44 36.82] +medium +mAP=73.3 +mP@k[ 1 5 10] [97.14 94.57 90.14] +mR@k[ 1 5 10] [10.14 26.2 34.75] +``` + +which are the results presented in Table 2 of the paper (with small numerical +precision differences). + +If you want to run retrieval with geometric verification, set +`use_geometric_verification` to `True` and the arguments +`index_features_dir`/`query_features_dir`. It's much slower since (1) in this +code example the re-ranking is loading DELF local features from disk, and (2) +re-ranking needs to be performed separately for each dataset protocol, since the +junk images from each protocol should be removed when re-ranking. Here is an +example command: + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 perform_retrieval.py \ + --index_aggregation_config_path index_aggregation_config.pbtxt \ + --query_aggregation_config_path query_aggregation_config.pbtxt \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_roxford5k.mat \ + --index_aggregation_dir ~/detect_to_retrieve/data/oxford5k_aggregation/index_0.1 \ + --query_aggregation_dir ~/detect_to_retrieve/data/oxford5k_aggregation/query \ + --use_geometric_verification True \ + --index_features_dir ~/detect_to_retrieve/data/oxford5k_features/index_0.1 \ + --query_features_dir ~/detect_to_retrieve/data/oxford5k_features/query \ + --output_dir ~/detect_to_retrieve/results/oxford5k_with_gv +``` + +### Clustering + +In the code example above, we used a pre-trained DELF codebook. We also provide +code for re-training the codebook if desired. + +Note that for the time being this can only run on CPU, since the main ops in +K-means are not registered for GPU usage in Tensorflow. + +```bash +# From models/research/delf/delf/python/detect_to_retrieve +python3 cluster_delf_features.py \ + --dataset_file_path ~/detect_to_retrieve/data/gnd_rparis6k.mat \ + --features_dir ~/detect_to_retrieve/data/paris6k_features/index_0.1 \ + --num_clusters 1024 \ + --num_iterations 50 \ + --output_cluster_dir ~/detect_to_retrieve/data/paris6k_clusters_1024 +``` + +### Next steps + +To make retrieval more scalable and handle larger datasets more smoothly, we are +considering to provide code for inverted index building and retrieval. Please +reach out if you would like to help doing that -- feel free submit a pull +request. diff --git a/models/research/delf/delf/python/detect_to_retrieve/__init__.py b/models/research/delf/delf/python/detect_to_retrieve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..06972a7d06738da1dc50e832c4e8443b0e6fb5b6 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module for Detect-to-Retrieve technique.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=unused-import +from delf.python.detect_to_retrieve import aggregation_extraction +from delf.python.detect_to_retrieve import boxes_and_features_extraction +from delf.python.detect_to_retrieve import dataset +# pylint: enable=unused-import diff --git a/models/research/delf/delf/python/detect_to_retrieve/aggregation_extraction.py b/models/research/delf/delf/python/detect_to_retrieve/aggregation_extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..4ddab944b8a3365209b8e92af38241d297974122 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/aggregation_extraction.py @@ -0,0 +1,193 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library to extract/save feature aggregation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv +import os +import time + +import numpy as np +import tensorflow as tf + +from google.protobuf import text_format +from delf import aggregation_config_pb2 +from delf import datum_io +from delf import feature_aggregation_extractor +from delf import feature_io + +# Aliases for aggregation types. +_VLAD = aggregation_config_pb2.AggregationConfig.VLAD +_ASMK = aggregation_config_pb2.AggregationConfig.ASMK +_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR + +# Extensions. +_DELF_EXTENSION = '.delf' +_VLAD_EXTENSION_SUFFIX = 'vlad' +_ASMK_EXTENSION_SUFFIX = 'asmk' +_ASMK_STAR_EXTENSION_SUFFIX = 'asmk_star' + +# Pace to report extraction log. +_STATUS_CHECK_ITERATIONS = 50 + + +def _ReadMappingBasenameToBoxNames(input_path, index_image_names): + """Reads mapping from image name to DELF file names for each box. + + Args: + input_path: Path to CSV file containing mapping. + index_image_names: List containing index image names, in order, for the + dataset under consideration. + + Returns: + images_to_box_feature_files: Dict. key=string (image name); value=list of + strings (file names containing DELF features for boxes). + """ + images_to_box_feature_files = {} + with tf.io.gfile.GFile(input_path, 'r') as f: + reader = csv.DictReader(f) + for row in reader: + index_image_name = index_image_names[int(row['index_image_id'])] + if index_image_name not in images_to_box_feature_files: + images_to_box_feature_files[index_image_name] = [] + + images_to_box_feature_files[index_image_name].append(row['name']) + + return images_to_box_feature_files + + +def ExtractAggregatedRepresentationsToFiles(image_names, features_dir, + aggregation_config_path, + mapping_path, + output_aggregation_dir): + """Extracts aggregated feature representations, saving them to files. + + It checks if the aggregated representation for an image already exists, + and skips computation for those. + + Args: + image_names: List of image names. These are used to compose input file names + for the feature files, and the output file names for aggregated + representations. + features_dir: Directory where DELF features are located. + aggregation_config_path: Path to AggregationConfig proto text file with + configuration to be used for extraction. + mapping_path: Optional CSV file which maps each .delf file name to the index + image ID and detected box ID. If regional aggregation is performed, this + should be set. Otherwise, this is ignored. + output_aggregation_dir: Directory where aggregation output will be written + to. + + Raises: + ValueError: If AggregationConfig is malformed, or `mapping_path` is + missing. + """ + num_images = len(image_names) + + # Parse AggregationConfig proto, and select output extension. + config = aggregation_config_pb2.AggregationConfig() + with tf.io.gfile.GFile(aggregation_config_path, 'r') as f: + text_format.Merge(f.read(), config) + output_extension = '.' + if config.use_regional_aggregation: + output_extension += 'r' + if config.aggregation_type == _VLAD: + output_extension += _VLAD_EXTENSION_SUFFIX + elif config.aggregation_type == _ASMK: + output_extension += _ASMK_EXTENSION_SUFFIX + elif config.aggregation_type == _ASMK_STAR: + output_extension += _ASMK_STAR_EXTENSION_SUFFIX + else: + raise ValueError('Invalid aggregation type: %d' % config.aggregation_type) + + # Read index mapping path, if provided. + if mapping_path: + images_to_box_feature_files = _ReadMappingBasenameToBoxNames( + mapping_path, image_names) + + # Create output directory if necessary. + if not tf.io.gfile.exists(output_aggregation_dir): + tf.io.gfile.makedirs(output_aggregation_dir) + + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + + start = time.time() + for i in range(num_images): + if i == 0: + print('Starting to extract aggregation from images...') + elif i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.time() - start) + print('Processing image %d out of %d, last %d ' + 'images took %f seconds' % + (i, num_images, _STATUS_CHECK_ITERATIONS, elapsed)) + start = time.time() + + image_name = image_names[i] + + # Compose output file name, skip extraction for this image if it already + # exists. + output_aggregation_filename = os.path.join(output_aggregation_dir, + image_name + output_extension) + if tf.io.gfile.exists(output_aggregation_filename): + print('Skipping %s' % image_name) + continue + + # Load DELF features. + if config.use_regional_aggregation: + if not mapping_path: + raise ValueError( + 'Requested regional aggregation, but mapping_path was not ' + 'provided') + descriptors_list = [] + num_features_per_box = [] + for box_feature_file in images_to_box_feature_files[image_name]: + delf_filename = os.path.join(features_dir, + box_feature_file + _DELF_EXTENSION) + _, _, box_descriptors, _, _ = feature_io.ReadFromFile(delf_filename) + # If `box_descriptors` is empty, reshape it such that it can be + # concatenated with other descriptors. + if not box_descriptors.shape[0]: + box_descriptors = np.reshape(box_descriptors, + [0, config.feature_dimensionality]) + descriptors_list.append(box_descriptors) + num_features_per_box.append(box_descriptors.shape[0]) + + descriptors = np.concatenate(descriptors_list) + else: + input_delf_filename = os.path.join(features_dir, + image_name + _DELF_EXTENSION) + _, _, descriptors, _, _ = feature_io.ReadFromFile(input_delf_filename) + # If `descriptors` is empty, reshape it to avoid extraction failure. + if not descriptors.shape[0]: + descriptors = np.reshape(descriptors, + [0, config.feature_dimensionality]) + num_features_per_box = None + + # Extract and save aggregation. If using VLAD, only + # `aggregated_descriptors` needs to be saved. + (aggregated_descriptors, + feature_visual_words) = extractor.Extract(descriptors, + num_features_per_box) + if config.aggregation_type == _VLAD: + datum_io.WriteToFile(aggregated_descriptors, + output_aggregation_filename) + else: + datum_io.WritePairToFile(aggregated_descriptors, + feature_visual_words.astype('uint32'), + output_aggregation_filename) diff --git a/models/research/delf/delf/python/detect_to_retrieve/boxes_and_features_extraction.py b/models/research/delf/delf/python/detect_to_retrieve/boxes_and_features_extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..1faef983b2e0413e2f2746c5d56b5e62045e5a39 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/boxes_and_features_extraction.py @@ -0,0 +1,202 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library to extract/save boxes and DELF features.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv +import math +import os +import time + +import numpy as np +import tensorflow as tf + +from google.protobuf import text_format +from delf import delf_config_pb2 +from delf import box_io +from delf import feature_io +from delf import utils +from delf import detector +from delf import extractor + +# Extension of feature files. +_BOX_EXTENSION = '.boxes' +_DELF_EXTENSION = '.delf' + +# Pace to report extraction log. +_STATUS_CHECK_ITERATIONS = 100 + + +def _WriteMappingBasenameToIds(index_names_ids_and_boxes, output_path): + """Helper function to write CSV mapping from DELF file name to IDs. + + Args: + index_names_ids_and_boxes: List containing 3-element lists with name, image + ID and box ID. + output_path: Output CSV path. + """ + with tf.io.gfile.GFile(output_path, 'w') as f: + csv_writer = csv.DictWriter( + f, fieldnames=['name', 'index_image_id', 'box_id']) + csv_writer.writeheader() + for name_imid_boxid in index_names_ids_and_boxes: + csv_writer.writerow({ + 'name': name_imid_boxid[0], + 'index_image_id': name_imid_boxid[1], + 'box_id': name_imid_boxid[2], + }) + + +def ExtractBoxesAndFeaturesToFiles(image_names, image_paths, delf_config_path, + detector_model_dir, detector_thresh, + output_features_dir, output_boxes_dir, + output_mapping): + """Extracts boxes and features, saving them to files. + + Boxes are saved to .boxes files. DELF features are extracted for + the entire image and saved into .delf files. In addition, DELF + features are extracted for each high-confidence bounding box in the image, and + saved into files named _0.delf, _1.delf, etc. + + It checks if descriptors/boxes already exist, and skips computation for those. + + Args: + image_names: List of image names. These are used to compose output file + names for boxes and features. + image_paths: List of image paths. image_paths[i] is the path for the image + named by image_names[i]. `image_names` and `image_paths` must have the + same number of elements. + delf_config_path: Path to DelfConfig proto text file. + detector_model_dir: Directory where detector SavedModel is located. + detector_thresh: Threshold used to decide if an image's detected box + undergoes feature extraction. + output_features_dir: Directory where DELF features will be written to. + output_boxes_dir: Directory where detected boxes will be written to. + output_mapping: CSV file which maps each .delf file name to the image ID and + detected box ID. + + Raises: + ValueError: If len(image_names) and len(image_paths) are different. + """ + num_images = len(image_names) + if len(image_paths) != num_images: + raise ValueError( + 'image_names and image_paths have different number of items') + + # Parse DelfConfig proto. + config = delf_config_pb2.DelfConfig() + with tf.io.gfile.GFile(delf_config_path, 'r') as f: + text_format.Merge(f.read(), config) + + # Create output directories if necessary. + if not tf.io.gfile.exists(output_features_dir): + tf.io.gfile.makedirs(output_features_dir) + if not tf.io.gfile.exists(output_boxes_dir): + tf.io.gfile.makedirs(output_boxes_dir) + if not tf.io.gfile.exists(os.path.dirname(output_mapping)): + tf.io.gfile.makedirs(os.path.dirname(output_mapping)) + + names_ids_and_boxes = [] + detector_fn = detector.MakeDetector(detector_model_dir) + delf_extractor_fn = extractor.MakeExtractor(config) + + start = time.time() + for i in range(num_images): + if i == 0: + print('Starting to extract features/boxes...') + elif i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.time() - start) + print('Processing image %d out of %d, last %d ' + 'images took %f seconds' % + (i, num_images, _STATUS_CHECK_ITERATIONS, elapsed)) + start = time.time() + + image_name = image_names[i] + output_feature_filename_whole_image = os.path.join( + output_features_dir, image_name + _DELF_EXTENSION) + output_box_filename = os.path.join(output_boxes_dir, + image_name + _BOX_EXTENSION) + + pil_im = utils.RgbLoader(image_paths[i]) + width, height = pil_im.size + + # Extract and save boxes. + if tf.io.gfile.exists(output_box_filename): + print('Skipping box computation for %s' % image_name) + (boxes_out, scores_out, + class_indices_out) = box_io.ReadFromFile(output_box_filename) + else: + (boxes_out, scores_out, + class_indices_out) = detector_fn(np.expand_dims(pil_im, 0)) + # Using only one image per batch. + boxes_out = boxes_out[0] + scores_out = scores_out[0] + class_indices_out = class_indices_out[0] + box_io.WriteToFile(output_box_filename, boxes_out, scores_out, + class_indices_out) + + # Select boxes with scores greater than threshold. Those will be the + # ones with extracted DELF features (besides the whole image, whose DELF + # features are extracted in all cases). + num_delf_files = 1 + selected_boxes = [] + for box_ind, box in enumerate(boxes_out): + if scores_out[box_ind] >= detector_thresh: + selected_boxes.append(box) + num_delf_files += len(selected_boxes) + + # Extract and save DELF features. + for delf_file_ind in range(num_delf_files): + if delf_file_ind == 0: + box_name = image_name + output_feature_filename = output_feature_filename_whole_image + else: + box_name = image_name + '_' + str(delf_file_ind - 1) + output_feature_filename = os.path.join(output_features_dir, + box_name + _DELF_EXTENSION) + + names_ids_and_boxes.append([box_name, i, delf_file_ind - 1]) + + if tf.io.gfile.exists(output_feature_filename): + print('Skipping DELF computation for %s' % box_name) + continue + + if delf_file_ind >= 1: + bbox_for_cropping = selected_boxes[delf_file_ind - 1] + bbox_for_cropping_pil_convention = [ + int(math.floor(bbox_for_cropping[1] * width)), + int(math.floor(bbox_for_cropping[0] * height)), + int(math.ceil(bbox_for_cropping[3] * width)), + int(math.ceil(bbox_for_cropping[2] * height)) + ] + pil_cropped_im = pil_im.crop(bbox_for_cropping_pil_convention) + im = np.array(pil_cropped_im) + else: + im = np.array(pil_im) + + extracted_features = delf_extractor_fn(im) + locations_out = extracted_features['local_features']['locations'] + descriptors_out = extracted_features['local_features']['descriptors'] + feature_scales_out = extracted_features['local_features']['scales'] + attention_out = extracted_features['local_features']['attention'] + + feature_io.WriteToFile(output_feature_filename, locations_out, + feature_scales_out, descriptors_out, attention_out) + + # Save mapping from output DELF name to image id and box id. + _WriteMappingBasenameToIds(names_ids_and_boxes, output_mapping) diff --git a/models/research/delf/delf/python/detect_to_retrieve/cluster_delf_features.py b/models/research/delf/delf/python/detect_to_retrieve/cluster_delf_features.py new file mode 100644 index 0000000000000000000000000000000000000000..9ddda8e4d0cae7950e76383950aab976249f3461 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/cluster_delf_features.py @@ -0,0 +1,213 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Clusters DELF features using the K-means algorithm. + +All DELF local feature descriptors for a given dataset's index images are loaded +as the input. + +Note that: +- we only use features extracted from whole images (no features from boxes are + used). +- the codebook should be trained on Paris images for Oxford retrieval + experiments, and vice-versa. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +import time + +import numpy as np +import tensorflow as tf + +from tensorflow.python.platform import app +from delf import feature_io +from delf.python.detect_to_retrieve import dataset + +cmd_args = None + +# Extensions. +_DELF_EXTENSION = '.delf' + +# Default DELF dimensionality. +_DELF_DIM = 128 + +# Pace to report log when collecting features. +_STATUS_CHECK_ITERATIONS = 100 + + +class _IteratorInitHook(tf.estimator.SessionRunHook): + """Hook to initialize data iterator after session is created.""" + + def __init__(self): + super(_IteratorInitHook, self).__init__() + self.iterator_initializer_fn = None + + def after_create_session(self, session, coord): + """Initialize the iterator after the session has been created.""" + del coord + self.iterator_initializer_fn(session) + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Process output directory. + if tf.io.gfile.exists(cmd_args.output_cluster_dir): + raise RuntimeError( + 'output_cluster_dir = %s already exists. This may indicate that a ' + 'previous run already wrote checkpoints in this directory, which would ' + 'lead to incorrect training. Please re-run this script by specifying an' + ' inexisting directory.' % cmd_args.output_cluster_dir) + else: + tf.io.gfile.makedirs(cmd_args.output_cluster_dir) + + # Read list of index images from dataset file. + print('Reading list of index images from dataset file...') + _, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path) + num_images = len(index_list) + print('done! Found %d images' % num_images) + + # Loop over list of index images and collect DELF features. + features_for_clustering = [] + start = time.clock() + print('Starting to collect features from index images...') + for i in range(num_images): + if i > 0 and i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.clock() - start) + print('Processing index image %d out of %d, last %d ' + 'images took %f seconds' % + (i, num_images, _STATUS_CHECK_ITERATIONS, elapsed)) + start = time.clock() + + features_filename = index_list[i] + _DELF_EXTENSION + features_fullpath = os.path.join(cmd_args.features_dir, features_filename) + _, _, features, _, _ = feature_io.ReadFromFile(features_fullpath) + if features.size != 0: + assert features.shape[1] == _DELF_DIM + for feature in features: + features_for_clustering.append(feature) + + features_for_clustering = np.array(features_for_clustering, dtype=np.float32) + print('All features were loaded! There are %d features, each with %d ' + 'dimensions' % + (features_for_clustering.shape[0], features_for_clustering.shape[1])) + + # Run K-means clustering. + def _get_input_fn(): + """Helper function to create input function and hook for training. + + Returns: + input_fn: Input function for k-means Estimator training. + init_hook: Hook used to load data during training. + """ + init_hook = _IteratorInitHook() + + def _input_fn(): + """Produces tf.data.Dataset object for k-means training. + + Returns: + Tensor with the data for training. + """ + features_placeholder = tf.compat.v1.placeholder( + tf.float32, features_for_clustering.shape) + delf_dataset = tf.data.Dataset.from_tensor_slices((features_placeholder)) + delf_dataset = delf_dataset.shuffle(1000).batch( + features_for_clustering.shape[0]) + iterator = delf_dataset.make_initializable_iterator() + + def _initializer_fn(sess): + """Initialize dataset iterator, feed in the data.""" + sess.run( + iterator.initializer, + feed_dict={features_placeholder: features_for_clustering}) + + init_hook.iterator_initializer_fn = _initializer_fn + return iterator.get_next() + + return _input_fn, init_hook + + input_fn, init_hook = _get_input_fn() + + kmeans = tf.compat.v1.estimator.experimental.KMeans( + num_clusters=cmd_args.num_clusters, + model_dir=cmd_args.output_cluster_dir, + use_mini_batch=False, + ) + + print('Starting K-means clustering...') + start = time.clock() + for i in range(cmd_args.num_iterations): + kmeans.train(input_fn, hooks=[init_hook]) + average_sum_squared_error = kmeans.evaluate( + input_fn, hooks=[init_hook])['score'] / features_for_clustering.shape[0] + elapsed = (time.clock() - start) + print('K-means iteration %d (out of %d) took %f seconds, ' + 'average-sum-of-squares: %f' % + (i, cmd_args.num_iterations, elapsed, average_sum_squared_error)) + start = time.clock() + + print('K-means clustering finished!') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--dataset_file_path', + type=str, + default='/tmp/gnd_roxford5k.mat', + help=""" + Dataset file for Revisited Oxford or Paris dataset, in .mat format. The + list of index images loaded from this file is used to collect local + features, which are assumed to be in .delf file format. + """) + parser.add_argument( + '--features_dir', + type=str, + default='/tmp/features', + help=""" + Directory where DELF feature files are to be found. + """) + parser.add_argument( + '--num_clusters', + type=int, + default=1024, + help=""" + Number of clusters to use. + """) + parser.add_argument( + '--num_iterations', + type=int, + default=50, + help=""" + Number of iterations to use. + """) + parser.add_argument( + '--output_cluster_dir', + type=str, + default='/tmp/cluster', + help=""" + Directory where clustering outputs are written to. This directory should + not exist before running this script; it will be created during + clustering. + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/detect_to_retrieve/dataset.py b/models/research/delf/delf/python/detect_to_retrieve/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1e6b247895aa7bd8022d3a2fb87b878bbb3b38 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/dataset.py @@ -0,0 +1,469 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python library to parse ground-truth/evaluate on Revisited datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from scipy.io import matlab +import tensorflow as tf + +_GROUND_TRUTH_KEYS = ['easy', 'hard', 'junk'] + + +def ReadDatasetFile(dataset_file_path): + """Reads dataset file in Revisited Oxford/Paris ".mat" format. + + Args: + dataset_file_path: Path to dataset file, in .mat format. + + Returns: + query_list: List of query image names. + index_list: List of index image names. + ground_truth: List containing ground-truth information for dataset. Each + entry is a dict corresponding to the ground-truth information for a query. + The dict may have keys 'easy', 'hard', or 'junk', mapping to a NumPy + array of integers; additionally, it has a key 'bbx' mapping to a NumPy + array of floats with bounding box coordinates. + """ + with tf.io.gfile.GFile(dataset_file_path, 'rb') as f: + cfg = matlab.loadmat(f) + + # Parse outputs according to the specificities of the dataset file. + query_list = [str(im_array[0]) for im_array in np.squeeze(cfg['qimlist'])] + index_list = [str(im_array[0]) for im_array in np.squeeze(cfg['imlist'])] + ground_truth_raw = np.squeeze(cfg['gnd']) + ground_truth = [] + for query_ground_truth_raw in ground_truth_raw: + query_ground_truth = {} + for ground_truth_key in _GROUND_TRUTH_KEYS: + if ground_truth_key in query_ground_truth_raw.dtype.names: + adjusted_labels = query_ground_truth_raw[ground_truth_key] - 1 + query_ground_truth[ground_truth_key] = adjusted_labels.flatten() + + query_ground_truth['bbx'] = np.squeeze(query_ground_truth_raw['bbx']) + ground_truth.append(query_ground_truth) + + return query_list, index_list, ground_truth + + +def _ParseGroundTruth(ok_list, junk_list): + """Constructs dictionary of ok/junk indices for a data subset and query. + + Args: + ok_list: List of NumPy arrays containing true positive indices for query. + junk_list: List of NumPy arrays containing ignored indices for query. + + Returns: + ok_junk_dict: Dict mapping 'ok' and 'junk' strings to NumPy array of + indices. + """ + ok_junk_dict = {} + ok_junk_dict['ok'] = np.concatenate(ok_list) + ok_junk_dict['junk'] = np.concatenate(junk_list) + return ok_junk_dict + + +def ParseEasyMediumHardGroundTruth(ground_truth): + """Parses easy/medium/hard ground-truth from Revisited datasets. + + Args: + ground_truth: Usually the output from ReadDatasetFile(). List containing + ground-truth information for dataset. Each entry is a dict corresponding + to the ground-truth information for a query. The dict must have keys + 'easy', 'hard', and 'junk', mapping to a NumPy array of integers. + + Returns: + easy_ground_truth: List containing ground-truth information for easy subset + of dataset. Each entry is a dict corresponding to the ground-truth + information for a query. The dict has keys 'ok' and 'junk', mapping to a + NumPy array of integers. + medium_ground_truth: Same as `easy_ground_truth`, but for the medium subset. + hard_ground_truth: Same as `easy_ground_truth`, but for the hard subset. + """ + num_queries = len(ground_truth) + + easy_ground_truth = [] + medium_ground_truth = [] + hard_ground_truth = [] + for i in range(num_queries): + easy_ground_truth.append( + _ParseGroundTruth([ground_truth[i]['easy']], + [ground_truth[i]['junk'], ground_truth[i]['hard']])) + medium_ground_truth.append( + _ParseGroundTruth([ground_truth[i]['easy'], ground_truth[i]['hard']], + [ground_truth[i]['junk']])) + hard_ground_truth.append( + _ParseGroundTruth([ground_truth[i]['hard']], + [ground_truth[i]['junk'], ground_truth[i]['easy']])) + + return easy_ground_truth, medium_ground_truth, hard_ground_truth + + +def AdjustPositiveRanks(positive_ranks, junk_ranks): + """Adjusts positive ranks based on junk ranks. + + Args: + positive_ranks: Sorted 1D NumPy integer array. + junk_ranks: Sorted 1D NumPy integer array. + + Returns: + adjusted_positive_ranks: Sorted 1D NumPy array. + """ + if not junk_ranks.size: + return positive_ranks + + adjusted_positive_ranks = positive_ranks + j = 0 + for i, positive_index in enumerate(positive_ranks): + while (j < len(junk_ranks) and positive_index > junk_ranks[j]): + j += 1 + + adjusted_positive_ranks[i] -= j + + return adjusted_positive_ranks + + +def ComputeAveragePrecision(positive_ranks): + """Computes average precision according to dataset convention. + + It assumes that `positive_ranks` contains the ranks for all expected positive + index images to be retrieved. If `positive_ranks` is empty, returns + `average_precision` = 0. + + Note that average precision computation here does NOT use the finite sum + method (see + https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision) + which is common in information retrieval literature. Instead, the method + implemented here integrates over the precision-recall curve by averaging two + adjacent precision points, then multiplying by the recall step. This is the + convention for the Revisited Oxford/Paris datasets. + + Args: + positive_ranks: Sorted 1D NumPy integer array, zero-indexed. + + Returns: + average_precision: Float. + """ + average_precision = 0.0 + + num_expected_positives = len(positive_ranks) + if not num_expected_positives: + return average_precision + + recall_step = 1.0 / num_expected_positives + for i, rank in enumerate(positive_ranks): + if not rank: + left_precision = 1.0 + else: + left_precision = i / rank + + right_precision = (i + 1) / (rank + 1) + average_precision += (left_precision + right_precision) * recall_step / 2 + + return average_precision + + +def ComputePRAtRanks(positive_ranks, desired_pr_ranks): + """Computes precision/recall at desired ranks. + + It assumes that `positive_ranks` contains the ranks for all expected positive + index images to be retrieved. If `positive_ranks` is empty, return all-zeros + `precisions`/`recalls`. + + If a desired rank is larger than the last positive rank, its precision is + computed based on the last positive rank. For example, if `desired_pr_ranks` + is [10] and `positive_ranks` = [0, 7] --> `precisions` = [0.25], `recalls` = + [1.0]. + + Args: + positive_ranks: 1D NumPy integer array, zero-indexed. + desired_pr_ranks: List of integers containing the desired precision/recall + ranks to be reported. Eg, if precision@1/recall@1 and + precision@10/recall@10 are desired, this should be set to [1, 10]. + + Returns: + precisions: Precision @ `desired_pr_ranks` (NumPy array of + floats, with shape [len(desired_pr_ranks)]). + recalls: Recall @ `desired_pr_ranks` (NumPy array of floats, with + shape [len(desired_pr_ranks)]). + """ + num_desired_pr_ranks = len(desired_pr_ranks) + precisions = np.zeros([num_desired_pr_ranks]) + recalls = np.zeros([num_desired_pr_ranks]) + + num_expected_positives = len(positive_ranks) + if not num_expected_positives: + return precisions, recalls + + positive_ranks_one_indexed = positive_ranks + 1 + for i, desired_pr_rank in enumerate(desired_pr_ranks): + recalls[i] = np.sum( + positive_ranks_one_indexed <= desired_pr_rank) / num_expected_positives + + # If `desired_pr_rank` is larger than last positive's rank, only compute + # precision with respect to last positive's position. + precision_rank = min(max(positive_ranks_one_indexed), desired_pr_rank) + precisions[i] = np.sum( + positive_ranks_one_indexed <= precision_rank) / precision_rank + + return precisions, recalls + + +def ComputeMetrics(sorted_index_ids, ground_truth, desired_pr_ranks): + """Computes metrics for retrieval results on the Revisited datasets. + + If there are no valid ground-truth index images for a given query, the metric + results for the given query (`average_precisions`, `precisions` and `recalls`) + are set to NaN, and they are not taken into account when computing the + aggregated metrics (`mean_average_precision`, `mean_precisions` and + `mean_recalls`) over all queries. + + Args: + sorted_index_ids: Integer NumPy array of shape [#queries, #index_images]. + For each query, contains an array denoting the most relevant index images, + sorted from most to least relevant. + ground_truth: List containing ground-truth information for dataset. Each + entry is a dict corresponding to the ground-truth information for a query. + The dict has keys 'ok' and 'junk', mapping to a NumPy array of integers. + desired_pr_ranks: List of integers containing the desired precision/recall + ranks to be reported. Eg, if precision@1/recall@1 and + precision@10/recall@10 are desired, this should be set to [1, 10]. The + largest item should be <= #index_images. + + Returns: + mean_average_precision: Mean average precision (float). + mean_precisions: Mean precision @ `desired_pr_ranks` (NumPy array of + floats, with shape [len(desired_pr_ranks)]). + mean_recalls: Mean recall @ `desired_pr_ranks` (NumPy array of floats, with + shape [len(desired_pr_ranks)]). + average_precisions: Average precision for each query (NumPy array of floats, + with shape [#queries]). + precisions: Precision @ `desired_pr_ranks`, for each query (NumPy array of + floats, with shape [#queries, len(desired_pr_ranks)]). + recalls: Recall @ `desired_pr_ranks`, for each query (NumPy array of + floats, with shape [#queries, len(desired_pr_ranks)]). + + Raises: + ValueError: If largest desired PR rank in `desired_pr_ranks` > + #index_images. + """ + num_queries, num_index_images = sorted_index_ids.shape + num_desired_pr_ranks = len(desired_pr_ranks) + + sorted_desired_pr_ranks = sorted(desired_pr_ranks) + + if sorted_desired_pr_ranks[-1] > num_index_images: + raise ValueError( + 'Requested PR ranks up to %d, however there are only %d images' % + (sorted_desired_pr_ranks[-1], num_index_images)) + + # Instantiate all outputs, then loop over each query and gather metrics. + mean_average_precision = 0.0 + mean_precisions = np.zeros([num_desired_pr_ranks]) + mean_recalls = np.zeros([num_desired_pr_ranks]) + average_precisions = np.zeros([num_queries]) + precisions = np.zeros([num_queries, num_desired_pr_ranks]) + recalls = np.zeros([num_queries, num_desired_pr_ranks]) + num_empty_gt_queries = 0 + for i in range(num_queries): + ok_index_images = ground_truth[i]['ok'] + junk_index_images = ground_truth[i]['junk'] + + if not ok_index_images.size: + average_precisions[i] = float('nan') + precisions[i, :] = float('nan') + recalls[i, :] = float('nan') + num_empty_gt_queries += 1 + continue + + positive_ranks = np.arange(num_index_images)[np.in1d( + sorted_index_ids[i], ok_index_images)] + junk_ranks = np.arange(num_index_images)[np.in1d(sorted_index_ids[i], + junk_index_images)] + + adjusted_positive_ranks = AdjustPositiveRanks(positive_ranks, junk_ranks) + + average_precisions[i] = ComputeAveragePrecision(adjusted_positive_ranks) + precisions[i, :], recalls[i, :] = ComputePRAtRanks(adjusted_positive_ranks, + desired_pr_ranks) + + mean_average_precision += average_precisions[i] + mean_precisions += precisions[i, :] + mean_recalls += recalls[i, :] + + # Normalize aggregated metrics by number of queries. + num_valid_queries = num_queries - num_empty_gt_queries + mean_average_precision /= num_valid_queries + mean_precisions /= num_valid_queries + mean_recalls /= num_valid_queries + + return (mean_average_precision, mean_precisions, mean_recalls, + average_precisions, precisions, recalls) + + +def SaveMetricsFile(mean_average_precision, mean_precisions, mean_recalls, + pr_ranks, output_path): + """Saves aggregated retrieval metrics to text file. + + Args: + mean_average_precision: Dict mapping each dataset protocol to a float. + mean_precisions: Dict mapping each dataset protocol to a NumPy array of + floats with shape [len(pr_ranks)]. + mean_recalls: Dict mapping each dataset protocol to a NumPy array of floats + with shape [len(pr_ranks)]. + pr_ranks: List of integers. + output_path: Full file path. + """ + with tf.io.gfile.GFile(output_path, 'w') as f: + for k in sorted(mean_average_precision.keys()): + f.write('{}\n mAP={}\n mP@k{} {}\n mR@k{} {}\n'.format( + k, np.around(mean_average_precision[k] * 100, decimals=2), + np.array(pr_ranks), np.around(mean_precisions[k] * 100, decimals=2), + np.array(pr_ranks), np.around(mean_recalls[k] * 100, decimals=2))) + + +def _ParseSpaceSeparatedStringsInBrackets(line, prefixes, ind): + """Parses line containing space-separated strings in brackets. + + Args: + line: String, containing line in metrics file with mP@k or mR@k figures. + prefixes: Tuple/list of strings, containing valid prefixes. + ind: Integer indicating which field within brackets is parsed. + + Yields: + entry: String format entry. + + Raises: + ValueError: If input line does not contain a valid prefix. + """ + for prefix in prefixes: + if line.startswith(prefix): + line = line[len(prefix):] + break + else: + raise ValueError('Line %s is malformed, cannot find valid prefixes' % line) + + for entry in line.split('[')[ind].split(']')[0].split(): + yield entry + + +def _ParsePrRanks(line): + """Parses PR ranks from mP@k line in metrics file. + + Args: + line: String, containing line in metrics file with mP@k figures. + + Returns: + pr_ranks: List of integers, containing used ranks. + + Raises: + ValueError: If input line is malformed. + """ + return [ + int(pr_rank) for pr_rank in _ParseSpaceSeparatedStringsInBrackets( + line, [' mP@k['], 0) if pr_rank + ] + + +def _ParsePrScores(line, num_pr_ranks): + """Parses PR scores from line in metrics file. + + Args: + line: String, containing line in metrics file with mP@k or mR@k figures. + num_pr_ranks: Integer, number of scores that should be in output list. + + Returns: + pr_scores: List of floats, containing scores. + + Raises: + ValueError: If input line is malformed. + """ + pr_scores = [ + float(pr_score) for pr_score in _ParseSpaceSeparatedStringsInBrackets( + line, (' mP@k[', ' mR@k['), 1) if pr_score + ] + + if len(pr_scores) != num_pr_ranks: + raise ValueError('Line %s is malformed, expected %d scores but found %d' % + (line, num_pr_ranks, len(pr_scores))) + + return pr_scores + + +def ReadMetricsFile(metrics_path): + """Reads aggregated retrieval metrics from text file. + + Args: + metrics_path: Full file path, containing aggregated retrieval metrics. + + Returns: + mean_average_precision: Dict mapping each dataset protocol to a float. + pr_ranks: List of integer ranks used in aggregated recall/precision metrics. + mean_precisions: Dict mapping each dataset protocol to a NumPy array of + floats with shape [len(`pr_ranks`)]. + mean_recalls: Dict mapping each dataset protocol to a NumPy array of floats + with shape [len(`pr_ranks`)]. + + Raises: + ValueError: If input file is malformed. + """ + with tf.io.gfile.GFile(metrics_path, 'r') as f: + file_contents_stripped = [l.rstrip() for l in f] + + if len(file_contents_stripped) % 4: + raise ValueError( + 'Malformed input %s: number of lines must be a multiple of 4, ' + 'but it is %d' % (metrics_path, len(file_contents_stripped))) + + mean_average_precision = {} + pr_ranks = [] + mean_precisions = {} + mean_recalls = {} + protocols = set() + for i in range(0, len(file_contents_stripped), 4): + protocol = file_contents_stripped[i] + if protocol in protocols: + raise ValueError( + 'Malformed input %s: protocol %s is found a second time' % + (metrics_path, protocol)) + protocols.add(protocol) + + # Parse mAP. + mean_average_precision[protocol] = float( + file_contents_stripped[i + 1].split('=')[1]) / 100.0 + + # Parse (or check consistency of) pr_ranks. + parsed_pr_ranks = _ParsePrRanks(file_contents_stripped[i + 2]) + if not pr_ranks: + pr_ranks = parsed_pr_ranks + else: + if parsed_pr_ranks != pr_ranks: + raise ValueError('Malformed input %s: inconsistent PR ranks' % + metrics_path) + + # Parse mean precisions. + mean_precisions[protocol] = np.array( + _ParsePrScores(file_contents_stripped[i + 2], len(pr_ranks)), + dtype=float) / 100.0 + + # Parse mean recalls. + mean_recalls[protocol] = np.array( + _ParsePrScores(file_contents_stripped[i + 3], len(pr_ranks)), + dtype=float) / 100.0 + + return mean_average_precision, pr_ranks, mean_precisions, mean_recalls diff --git a/models/research/delf/delf/python/detect_to_retrieve/dataset_test.py b/models/research/delf/delf/python/detect_to_retrieve/dataset_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8e742703b04210787ede0bfc945a9f305d59efc7 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/dataset_test.py @@ -0,0 +1,288 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the python library parsing Revisited Oxford/Paris datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import numpy as np +import tensorflow as tf + +from delf.python.detect_to_retrieve import dataset + +FLAGS = flags.FLAGS + + +class DatasetTest(tf.test.TestCase): + + def testParseEasyMediumHardGroundTruth(self): + # Define input. + ground_truth = [{ + 'easy': np.array([10, 56, 100]), + 'hard': np.array([0]), + 'junk': np.array([6, 90]) + }, { + 'easy': np.array([], dtype='int64'), + 'hard': [5], + 'junk': [99, 100] + }, { + 'easy': [33], + 'hard': [66, 99], + 'junk': np.array([], dtype='int64') + }] + + # Run tested function. + (easy_ground_truth, medium_ground_truth, + hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth) + + # Define expected outputs. + expected_easy_ground_truth = [{ + 'ok': np.array([10, 56, 100]), + 'junk': np.array([6, 90, 0]) + }, { + 'ok': np.array([], dtype='int64'), + 'junk': np.array([99, 100, 5]) + }, { + 'ok': np.array([33]), + 'junk': np.array([66, 99]) + }] + expected_medium_ground_truth = [{ + 'ok': np.array([10, 56, 100, 0]), + 'junk': np.array([6, 90]) + }, { + 'ok': np.array([5]), + 'junk': np.array([99, 100]) + }, { + 'ok': np.array([33, 66, 99]), + 'junk': np.array([], dtype='int64') + }] + expected_hard_ground_truth = [{ + 'ok': np.array([0]), + 'junk': np.array([6, 90, 10, 56, 100]) + }, { + 'ok': np.array([5]), + 'junk': np.array([99, 100]) + }, { + 'ok': np.array([66, 99]), + 'junk': np.array([33]) + }] + + # Compare actual versus expected. + def _AssertListOfDictsOfArraysAreEqual(ground_truth, expected_ground_truth): + """Helper function to compare ground-truth data. + + Args: + ground_truth: List of dicts of arrays. + expected_ground_truth: List of dicts of arrays. + """ + self.assertEqual(len(ground_truth), len(expected_ground_truth)) + + for i, ground_truth_entry in enumerate(ground_truth): + self.assertEqual(sorted(ground_truth_entry.keys()), ['junk', 'ok']) + self.assertAllEqual(ground_truth_entry['junk'], + expected_ground_truth[i]['junk']) + self.assertAllEqual(ground_truth_entry['ok'], + expected_ground_truth[i]['ok']) + + _AssertListOfDictsOfArraysAreEqual(easy_ground_truth, + expected_easy_ground_truth) + _AssertListOfDictsOfArraysAreEqual(medium_ground_truth, + expected_medium_ground_truth) + _AssertListOfDictsOfArraysAreEqual(hard_ground_truth, + expected_hard_ground_truth) + + def testAdjustPositiveRanksWorks(self): + # Define inputs. + positive_ranks = np.array([0, 2, 6, 10, 20]) + junk_ranks = np.array([1, 8, 9, 30]) + + # Run tested function. + adjusted_positive_ranks = dataset.AdjustPositiveRanks( + positive_ranks, junk_ranks) + + # Define expected output. + expected_adjusted_positive_ranks = [0, 1, 5, 7, 17] + + # Compare actual versus expected. + self.assertAllEqual(adjusted_positive_ranks, + expected_adjusted_positive_ranks) + + def testComputeAveragePrecisionWorks(self): + # Define input. + positive_ranks = [0, 2, 5] + + # Run tested function. + average_precision = dataset.ComputeAveragePrecision(positive_ranks) + + # Define expected output. + expected_average_precision = 0.677778 + + # Compare actual versus expected. + self.assertAllClose(average_precision, expected_average_precision) + + def testComputePRAtRanksWorks(self): + # Define inputs. + positive_ranks = np.array([0, 2, 5]) + desired_pr_ranks = np.array([1, 5, 10]) + + # Run tested function. + precisions, recalls = dataset.ComputePRAtRanks(positive_ranks, + desired_pr_ranks) + + # Define expected outputs. + expected_precisions = [1.0, 0.4, 0.5] + expected_recalls = [0.333333, 0.666667, 1.0] + + # Compare actual versus expected. + self.assertAllClose(precisions, expected_precisions) + self.assertAllClose(recalls, expected_recalls) + + def testComputeMetricsWorks(self): + # Define inputs: 3 queries. For the last one, there are no expected images + # to be retrieved + sorted_index_ids = np.array([[4, 2, 0, 1, 3], [0, 2, 4, 1, 3], + [0, 1, 2, 3, 4]]) + ground_truth = [{ + 'ok': np.array([0, 1]), + 'junk': np.array([2]) + }, { + 'ok': np.array([0, 4]), + 'junk': np.array([], dtype='int64') + }, { + 'ok': np.array([], dtype='int64'), + 'junk': np.array([], dtype='int64') + }] + desired_pr_ranks = [1, 2, 5] + + # Run tested function. + (mean_average_precision, mean_precisions, mean_recalls, average_precisions, + precisions, recalls) = dataset.ComputeMetrics(sorted_index_ids, + ground_truth, + desired_pr_ranks) + + # Define expected outputs. + expected_mean_average_precision = 0.604167 + expected_mean_precisions = [0.5, 0.5, 0.666667] + expected_mean_recalls = [0.25, 0.5, 1.0] + expected_average_precisions = [0.416667, 0.791667, float('nan')] + expected_precisions = [[0.0, 0.5, 0.666667], [1.0, 0.5, 0.666667], + [float('nan'), + float('nan'), + float('nan')]] + expected_recalls = [[0.0, 0.5, 1.0], [0.5, 0.5, 1.0], + [float('nan'), float('nan'), + float('nan')]] + + # Compare actual versus expected. + self.assertAllClose(mean_average_precision, expected_mean_average_precision) + self.assertAllClose(mean_precisions, expected_mean_precisions) + self.assertAllClose(mean_recalls, expected_mean_recalls) + self.assertAllClose(average_precisions, expected_average_precisions) + self.assertAllClose(precisions, expected_precisions) + self.assertAllClose(recalls, expected_recalls) + + def testSaveMetricsFileWorks(self): + # Define inputs. + mean_average_precision = {'hard': 0.7, 'medium': 0.9} + mean_precisions = { + 'hard': np.array([1.0, 0.8]), + 'medium': np.array([1.0, 1.0]) + } + mean_recalls = { + 'hard': np.array([0.5, 0.8]), + 'medium': np.array([0.5, 1.0]) + } + pr_ranks = [1, 5] + output_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt') + + # Run tested function. + dataset.SaveMetricsFile(mean_average_precision, mean_precisions, + mean_recalls, pr_ranks, output_path) + + # Define expected results. + expected_metrics = ('hard\n' + ' mAP=70.0\n' + ' mP@k[1 5] [100. 80.]\n' + ' mR@k[1 5] [50. 80.]\n' + 'medium\n' + ' mAP=90.0\n' + ' mP@k[1 5] [100. 100.]\n' + ' mR@k[1 5] [ 50. 100.]\n') + + # Parse actual results, and compare to expected. + with tf.io.gfile.GFile(output_path) as f: + metrics = f.read() + + self.assertEqual(metrics, expected_metrics) + + def testSaveAndReadMetricsWorks(self): + # Define inputs. + mean_average_precision = {'hard': 0.7, 'medium': 0.9} + mean_precisions = { + 'hard': np.array([1.0, 0.8]), + 'medium': np.array([1.0, 1.0]) + } + mean_recalls = { + 'hard': np.array([0.5, 0.8]), + 'medium': np.array([0.5, 1.0]) + } + pr_ranks = [1, 5] + output_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt') + + # Run tested functions. + dataset.SaveMetricsFile(mean_average_precision, mean_precisions, + mean_recalls, pr_ranks, output_path) + (read_mean_average_precision, read_pr_ranks, read_mean_precisions, + read_mean_recalls) = dataset.ReadMetricsFile(output_path) + + # Compares actual and expected metrics. + self.assertEqual(read_mean_average_precision, mean_average_precision) + self.assertEqual(read_pr_ranks, pr_ranks) + self.assertEqual(read_mean_precisions.keys(), mean_precisions.keys()) + self.assertAllEqual(read_mean_precisions['hard'], mean_precisions['hard']) + self.assertAllEqual(read_mean_precisions['medium'], + mean_precisions['medium']) + self.assertEqual(read_mean_recalls.keys(), mean_recalls.keys()) + self.assertAllEqual(read_mean_recalls['hard'], mean_recalls['hard']) + self.assertAllEqual(read_mean_recalls['medium'], mean_recalls['medium']) + + def testReadMetricsWithRepeatedProtocolFails(self): + # Define inputs. + input_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt') + with tf.io.gfile.GFile(input_path, 'w') as f: + f.write('hard\n' + ' mAP=70.0\n' + ' mP@k[1 5] [ 100. 80.]\n' + ' mR@k[1 5] [ 50. 80.]\n' + 'medium\n' + ' mAP=90.0\n' + ' mP@k[1 5] [ 100. 100.]\n' + ' mR@k[1 5] [ 50. 100.]\n' + 'medium\n' + ' mAP=90.0\n' + ' mP@k[1 5] [ 100. 100.]\n' + ' mR@k[1 5] [ 50. 100.]\n') + + # Run tested functions. + with self.assertRaisesRegex(ValueError, 'Malformed input'): + dataset.ReadMetricsFile(input_path) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/detect_to_retrieve/delf_gld_config.pbtxt b/models/research/delf/delf/python/detect_to_retrieve/delf_gld_config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..046aed766ce8cee4b6309c8385d451cf20ad633a --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/delf_gld_config.pbtxt @@ -0,0 +1,25 @@ +model_path: "parameters/delf_gld_20190411/model" +image_scales: .25 +image_scales: .3536 +image_scales: .5 +image_scales: .7071 +image_scales: 1.0 +image_scales: 1.4142 +image_scales: 2.0 +delf_local_config { + use_pca: true + # Note that for the exported model provided as an example, layer_name and + # iou_threshold are hard-coded in the checkpoint. So, the layer_name and + # iou_threshold variables here have no effect on the provided + # extract_features.py script. + layer_name: "resnet_v1_50/block3" + iou_threshold: 1.0 + max_feature_num: 1000 + score_threshold: 100.0 + pca_parameters { + mean_path: "parameters/delf_gld_20190411/pca/mean.datum" + projection_matrix_path: "parameters/delf_gld_20190411/pca/pca_proj_mat.datum" + pca_dim: 128 + use_whitening: false + } +} diff --git a/models/research/delf/delf/python/detect_to_retrieve/extract_aggregation.py b/models/research/delf/delf/python/detect_to_retrieve/extract_aggregation.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a0fb3e6c62c0adc583ad3b30b809f36742d586 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/extract_aggregation.py @@ -0,0 +1,113 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extracts aggregation for images from Revisited Oxford/Paris datasets. + +The program checks if the aggregated representation for an image already exists, +and skips computation for those. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys + +from tensorflow.python.platform import app +from delf.python.detect_to_retrieve import aggregation_extraction +from delf.python.detect_to_retrieve import dataset + +cmd_args = None + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read list of images from dataset file. + print('Reading list of images from dataset file...') + query_list, index_list, _ = dataset.ReadDatasetFile( + cmd_args.dataset_file_path) + if cmd_args.use_query_images: + image_list = query_list + else: + image_list = index_list + num_images = len(image_list) + print('done! Found %d images' % num_images) + + aggregation_extraction.ExtractAggregatedRepresentationsToFiles( + image_names=image_list, + features_dir=cmd_args.features_dir, + aggregation_config_path=cmd_args.aggregation_config_path, + mapping_path=cmd_args.index_mapping_path, + output_aggregation_dir=cmd_args.output_aggregation_dir) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--aggregation_config_path', + type=str, + default='/tmp/aggregation_config.pbtxt', + help=""" + Path to AggregationConfig proto text file with configuration to be used + for extraction. + """) + parser.add_argument( + '--dataset_file_path', + type=str, + default='/tmp/gnd_roxford5k.mat', + help=""" + Dataset file for Revisited Oxford or Paris dataset, in .mat format. + """) + parser.add_argument( + '--use_query_images', + type=lambda x: (str(x).lower() == 'true'), + default=False, + help=""" + If True, processes the query images of the dataset. If False, processes + the database (ie, index) images. + """) + parser.add_argument( + '--features_dir', + type=str, + default='/tmp/features', + help=""" + Directory where image features are located, all in .delf format. + """) + parser.add_argument( + '--index_mapping_path', + type=str, + default='', + help=""" + Optional CSV file which maps each .delf file name to the index image ID + and detected box ID. If regional aggregation is performed, this should be + set. Otherwise, this is ignored. + Usually this file is obtained as an output from the + `extract_index_boxes_and_features.py` script. + """) + parser.add_argument( + '--output_aggregation_dir', + type=str, + default='/tmp/aggregation', + help=""" + Directory where aggregation output will be written to. Each image's + features will be written to a file with same name, and extension replaced + by one of + ['.vlad', '.asmk', '.asmk_star', '.rvlad', '.rasmk', '.rasmk_star']. + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/detect_to_retrieve/extract_index_boxes_and_features.py b/models/research/delf/delf/python/detect_to_retrieve/extract_index_boxes_and_features.py new file mode 100644 index 0000000000000000000000000000000000000000..2b891de4b0b093aa723c0dce547c2722ee475d7e --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/extract_index_boxes_and_features.py @@ -0,0 +1,151 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extracts DELF and boxes from the Revisited Oxford/Paris index datasets. + +Boxes are saved to .boxes files. DELF features are extracted for the +entire image and saved into .delf files. In addition, DELF features +are extracted for each high-confidence bounding box in the image, and saved into +files named _0.delf, _1.delf, etc. + +The program checks if descriptors/boxes already exist, and skips computation for +those. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys + +from tensorflow.python.platform import app +from delf.python.detect_to_retrieve import boxes_and_features_extraction +from delf.python.detect_to_retrieve import dataset + +cmd_args = None + +_IMAGE_EXTENSION = '.jpg' + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read list of index images from dataset file. + print('Reading list of index images from dataset file...') + _, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path) + num_images = len(index_list) + print('done! Found %d images' % num_images) + + # Compose list of image paths. + image_paths = [ + os.path.join(cmd_args.images_dir, index_image_name + _IMAGE_EXTENSION) + for index_image_name in index_list + ] + + # Extract boxes/features and save them to files. + boxes_and_features_extraction.ExtractBoxesAndFeaturesToFiles( + image_names=index_list, + image_paths=image_paths, + delf_config_path=cmd_args.delf_config_path, + detector_model_dir=cmd_args.detector_model_dir, + detector_thresh=cmd_args.detector_thresh, + output_features_dir=cmd_args.output_features_dir, + output_boxes_dir=cmd_args.output_boxes_dir, + output_mapping=cmd_args.output_index_mapping) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--delf_config_path', + type=str, + default='/tmp/delf_config_example.pbtxt', + help=""" + Path to DelfConfig proto text file with configuration to be used for DELF + extraction. + """) + parser.add_argument( + '--detector_model_dir', + type=str, + default='/tmp/detector_model', + help=""" + Directory where detector SavedModel is located. + """) + parser.add_argument( + '--detector_thresh', + type=float, + default=0.1, + help=""" + Threshold used to decide if an image's detected box undergoes feature + extraction. For all detected boxes with detection score larger than this, + a .delf file is saved containing the box features. Note that this + threshold is used only to select which boxes are used in feature + extraction; all detected boxes are actually saved in the .boxes file, even + those with score lower than detector_thresh. + """) + parser.add_argument( + '--dataset_file_path', + type=str, + default='/tmp/gnd_roxford5k.mat', + help=""" + Dataset file for Revisited Oxford or Paris dataset, in .mat format. + """) + parser.add_argument( + '--images_dir', + type=str, + default='/tmp/images', + help=""" + Directory where dataset images are located, all in .jpg format. + """) + parser.add_argument( + '--output_boxes_dir', + type=str, + default='/tmp/boxes', + help=""" + Directory where detected boxes will be written to. Each image's boxes + will be written to a file with same name, and extension replaced by + .boxes. + """) + parser.add_argument( + '--output_features_dir', + type=str, + default='/tmp/features', + help=""" + Directory where DELF features will be written to. Each image's features + will be written to a file with same name, and extension replaced by .delf, + eg: .delf. In addition, DELF features are extracted for each + high-confidence bounding box in the image, and saved into files named + _0.delf, _1.delf, etc. + """) + parser.add_argument( + '--output_index_mapping', + type=str, + default='/tmp/index_mapping.csv', + help=""" + CSV file which maps each .delf file name to the index image ID and + detected box ID. The format is 'name,index_image_id,box_id', including a + header. The 'name' refers to the .delf file name without extension. + + For example, a few lines may be like: + 'radcliffe_camera_000158,2,-1' + 'radcliffe_camera_000158_0,2,0' + 'radcliffe_camera_000158_1,2,1' + 'radcliffe_camera_000158_2,2,2' + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/detect_to_retrieve/extract_query_features.py b/models/research/delf/delf/python/detect_to_retrieve/extract_query_features.py new file mode 100644 index 0000000000000000000000000000000000000000..a0812b191265ec6e5350acf989432747d196a519 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/extract_query_features.py @@ -0,0 +1,137 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extracts DELF features for query images from Revisited Oxford/Paris datasets. + +Note that query images are cropped before feature extraction, as required by the +evaluation protocols of these datasets. + +The program checks if descriptors already exist, and skips computation for +those. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +import time + +import numpy as np +import tensorflow as tf + +from google.protobuf import text_format +from tensorflow.python.platform import app +from delf import delf_config_pb2 +from delf import feature_io +from delf import utils +from delf.python.detect_to_retrieve import dataset +from delf import extractor + +cmd_args = None + +# Extensions. +_DELF_EXTENSION = '.delf' +_IMAGE_EXTENSION = '.jpg' + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read list of query images from dataset file. + print('Reading list of query images and boxes from dataset file...') + query_list, _, ground_truth = dataset.ReadDatasetFile( + cmd_args.dataset_file_path) + num_images = len(query_list) + print(f'done! Found {num_images} images') + + # Parse DelfConfig proto. + config = delf_config_pb2.DelfConfig() + with tf.io.gfile.GFile(cmd_args.delf_config_path, 'r') as f: + text_format.Merge(f.read(), config) + + # Create output directory if necessary. + if not tf.io.gfile.exists(cmd_args.output_features_dir): + tf.io.gfile.makedirs(cmd_args.output_features_dir) + + extractor_fn = extractor.MakeExtractor(config) + + start = time.time() + for i in range(num_images): + query_image_name = query_list[i] + input_image_filename = os.path.join(cmd_args.images_dir, + query_image_name + _IMAGE_EXTENSION) + output_feature_filename = os.path.join( + cmd_args.output_features_dir, query_image_name + _DELF_EXTENSION) + if tf.io.gfile.exists(output_feature_filename): + print(f'Skipping {query_image_name}') + continue + + # Crop query image according to bounding box. + bbox = [int(round(b)) for b in ground_truth[i]['bbx']] + im = np.array(utils.RgbLoader(input_image_filename).crop(bbox)) + + # Extract and save features. + extracted_features = extractor_fn(im) + locations_out = extracted_features['local_features']['locations'] + descriptors_out = extracted_features['local_features']['descriptors'] + feature_scales_out = extracted_features['local_features']['scales'] + attention_out = extracted_features['local_features']['attention'] + + feature_io.WriteToFile(output_feature_filename, locations_out, + feature_scales_out, descriptors_out, + attention_out) + + elapsed = (time.time() - start) + print('Processed %d query images in %f seconds' % (num_images, elapsed)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--delf_config_path', + type=str, + default='/tmp/delf_config_example.pbtxt', + help=""" + Path to DelfConfig proto text file with configuration to be used for DELF + extraction. + """) + parser.add_argument( + '--dataset_file_path', + type=str, + default='/tmp/gnd_roxford5k.mat', + help=""" + Dataset file for Revisited Oxford or Paris dataset, in .mat format. + """) + parser.add_argument( + '--images_dir', + type=str, + default='/tmp/images', + help=""" + Directory where dataset images are located, all in .jpg format. + """) + parser.add_argument( + '--output_features_dir', + type=str, + default='/tmp/features', + help=""" + Directory where DELF features will be written to. Each image's features + will be written to a file with same name, and extension replaced by .delf. + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/detect_to_retrieve/image_reranking.py b/models/research/delf/delf/python/detect_to_retrieve/image_reranking.py new file mode 100644 index 0000000000000000000000000000000000000000..60c29cc18a4436815c721855da0ca4577b06e6c4 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/image_reranking.py @@ -0,0 +1,279 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library to re-rank images based on geometric verification.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import io +import os + +import matplotlib.pyplot as plt +import numpy as np +from scipy import spatial +from skimage import feature +from skimage import measure +from skimage import transform + +from delf import feature_io + +# Extensions. +_DELF_EXTENSION = '.delf' + +# Pace to log. +_STATUS_CHECK_GV_ITERATIONS = 10 + +# Re-ranking / geometric verification parameters. +_NUM_TO_RERANK = 100 +_NUM_RANSAC_TRIALS = 1000 +_MIN_RANSAC_SAMPLES = 3 + + +def MatchFeatures(query_locations, + query_descriptors, + index_image_locations, + index_image_descriptors, + ransac_seed=None, + feature_distance_threshold=0.9, + ransac_residual_threshold=10.0, + query_im_array=None, + index_im_array=None, + query_im_scale_factors=None, + index_im_scale_factors=None): + """Matches local features using geometric verification. + + First, finds putative local feature matches by matching `query_descriptors` + against a KD-tree from the `index_image_descriptors`. Then, attempts to fit an + affine transformation between the putative feature corresponces using their + locations. + + Args: + query_locations: Locations of local features for query image. NumPy array of + shape [#query_features, 2]. + query_descriptors: Descriptors of local features for query image. NumPy + array of shape [#query_features, depth]. + index_image_locations: Locations of local features for index image. NumPy + array of shape [#index_image_features, 2]. + index_image_descriptors: Descriptors of local features for index image. + NumPy array of shape [#index_image_features, depth]. + ransac_seed: Seed used by RANSAC. If None (default), no seed is provided. + feature_distance_threshold: Distance threshold below which a pair of + features is considered a potential match, and will be fed into RANSAC. + ransac_residual_threshold: Residual error threshold for considering matches + as inliers, used in RANSAC algorithm. + query_im_array: Optional. If not None, contains a NumPy array with the query + image, used to produce match visualization, if there is a match. + index_im_array: Optional. Same as `query_im_array`, but for index image. + query_im_scale_factors: Optional. If not None, contains a NumPy array with + the query image scales, used to produce match visualization, if there is a + match. If None and a visualization will be produced, [1.0, 1.0] is used + (ie, feature locations are not scaled). + index_im_scale_factors: Optional. Same as `query_im_scale_factors`, but for + index image. + + Returns: + score: Number of inliers of match. If no match is found, returns 0. + match_viz_bytes: Encoded image bytes with visualization of the match, if + there is one, and if `query_im_array` and `index_im_array` are properly + set. Otherwise, it's an empty bytes string. + + Raises: + ValueError: If local descriptors from query and index images have different + dimensionalities. + """ + num_features_query = query_locations.shape[0] + num_features_index_image = index_image_locations.shape[0] + if not num_features_query or not num_features_index_image: + return 0, b'' + + local_feature_dim = query_descriptors.shape[1] + if index_image_descriptors.shape[1] != local_feature_dim: + raise ValueError( + 'Local feature dimensionality is not consistent for query and index ' + 'images.') + + # Find nearest-neighbor matches using a KD tree. + index_image_tree = spatial.cKDTree(index_image_descriptors) + _, indices = index_image_tree.query( + query_descriptors, distance_upper_bound=feature_distance_threshold) + + # Select feature locations for putative matches. + query_locations_to_use = np.array([ + query_locations[i,] + for i in range(num_features_query) + if indices[i] != num_features_index_image + ]) + index_image_locations_to_use = np.array([ + index_image_locations[indices[i],] + for i in range(num_features_query) + if indices[i] != num_features_index_image + ]) + + # If there are not enough putative matches, early return 0. + if query_locations_to_use.shape[0] <= _MIN_RANSAC_SAMPLES: + return 0, b'' + + # Perform geometric verification using RANSAC. + _, inliers = measure.ransac( + (index_image_locations_to_use, query_locations_to_use), + transform.AffineTransform, + min_samples=_MIN_RANSAC_SAMPLES, + residual_threshold=ransac_residual_threshold, + max_trials=_NUM_RANSAC_TRIALS, + random_state=ransac_seed) + match_viz_bytes = b'' + + if inliers is None: + inliers = [] + elif query_im_array is not None and index_im_array is not None: + if query_im_scale_factors is None: + query_im_scale_factors = [1.0, 1.0] + if index_im_scale_factors is None: + index_im_scale_factors = [1.0, 1.0] + inlier_idxs = np.nonzero(inliers)[0] + _, ax = plt.subplots() + ax.axis('off') + ax.xaxis.set_major_locator(plt.NullLocator()) + ax.yaxis.set_major_locator(plt.NullLocator()) + plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) + plt.margins(0, 0) + feature.plot_matches( + ax, + query_im_array, + index_im_array, + query_locations_to_use * query_im_scale_factors, + index_image_locations_to_use * index_im_scale_factors, + np.column_stack((inlier_idxs, inlier_idxs)), + only_matches=True) + + match_viz_io = io.BytesIO() + plt.savefig(match_viz_io, format='jpeg', bbox_inches='tight', pad_inches=0) + match_viz_bytes = match_viz_io.getvalue() + + return sum(inliers), match_viz_bytes + + +def RerankByGeometricVerification(input_ranks, + initial_scores, + query_name, + index_names, + query_features_dir, + index_features_dir, + junk_ids, + local_feature_extension=_DELF_EXTENSION, + ransac_seed=None, + feature_distance_threshold=0.9, + ransac_residual_threshold=10.0): + """Re-ranks retrieval results using geometric verification. + + Args: + input_ranks: 1D NumPy array with indices of top-ranked index images, sorted + from the most to the least similar. + initial_scores: 1D NumPy array with initial similarity scores between query + and index images. Entry i corresponds to score for image i. + query_name: Name for query image (string). + index_names: List of names for index images (strings). + query_features_dir: Directory where query local feature file is located + (string). + index_features_dir: Directory where index local feature files are located + (string). + junk_ids: Set with indices of junk images which should not be considered + during re-ranking. + local_feature_extension: String, extension to use for loading local feature + files. + ransac_seed: Seed used by RANSAC. If None (default), no seed is provided. + feature_distance_threshold: Distance threshold below which a pair of local + features is considered a potential match, and will be fed into RANSAC. + ransac_residual_threshold: Residual error threshold for considering matches + as inliers, used in RANSAC algorithm. + + Returns: + output_ranks: 1D NumPy array with index image indices, sorted from the most + to the least similar according to the geometric verification and initial + scores. + + Raises: + ValueError: If `input_ranks`, `initial_scores` and `index_names` do not have + the same number of entries. + """ + num_index_images = len(index_names) + if len(input_ranks) != num_index_images: + raise ValueError('input_ranks and index_names have different number of ' + 'elements: %d vs %d' % + (len(input_ranks), len(index_names))) + if len(initial_scores) != num_index_images: + raise ValueError('initial_scores and index_names have different number of ' + 'elements: %d vs %d' % + (len(initial_scores), len(index_names))) + + # Filter out junk images from list that will be re-ranked. + input_ranks_for_gv = [] + for ind in input_ranks: + if ind not in junk_ids: + input_ranks_for_gv.append(ind) + num_to_rerank = min(_NUM_TO_RERANK, len(input_ranks_for_gv)) + + # Load query image features. + query_features_path = os.path.join(query_features_dir, + query_name + local_feature_extension) + query_locations, _, query_descriptors, _, _ = feature_io.ReadFromFile( + query_features_path) + + # Initialize list containing number of inliers and initial similarity scores. + inliers_and_initial_scores = [] + for i in range(num_index_images): + inliers_and_initial_scores.append([0, initial_scores[i]]) + + # Loop over top-ranked images and get results. + print('Starting to re-rank') + for i in range(num_to_rerank): + if i > 0 and i % _STATUS_CHECK_GV_ITERATIONS == 0: + print('Re-ranking: i = %d out of %d' % (i, num_to_rerank)) + + index_image_id = input_ranks_for_gv[i] + + # Load index image features. + index_image_features_path = os.path.join( + index_features_dir, + index_names[index_image_id] + local_feature_extension) + (index_image_locations, _, index_image_descriptors, _, + _) = feature_io.ReadFromFile(index_image_features_path) + + inliers_and_initial_scores[index_image_id][0], _ = MatchFeatures( + query_locations, + query_descriptors, + index_image_locations, + index_image_descriptors, + ransac_seed=ransac_seed, + feature_distance_threshold=feature_distance_threshold, + ransac_residual_threshold=ransac_residual_threshold) + + # Sort based on (inliers_score, initial_score). + def _InliersInitialScoresSorting(k): + """Helper function to sort list based on two entries. + + Args: + k: Index into `inliers_and_initial_scores`. + + Returns: + Tuple containing inlier score and initial score. + """ + return (inliers_and_initial_scores[k][0], inliers_and_initial_scores[k][1]) + + output_ranks = sorted( + range(num_index_images), key=_InliersInitialScoresSorting, reverse=True) + + return output_ranks diff --git a/models/research/delf/delf/python/detect_to_retrieve/index_aggregation_config.pbtxt b/models/research/delf/delf/python/detect_to_retrieve/index_aggregation_config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..ba7ba4e4956637152d952aff1cccd66da42800f4 --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/index_aggregation_config.pbtxt @@ -0,0 +1,10 @@ +codebook_size: 65536 +feature_dimensionality: 128 +aggregation_type: ASMK_STAR +use_l2_normalization: false +codebook_path: "parameters/rparis6k_codebook_65536/k65536_codebook_tfckpt/codebook" +num_assignments: 1 +use_regional_aggregation: true +feature_batch_size: 100 +alpha: 3.0 +tau: 0.0 diff --git a/models/research/delf/delf/python/detect_to_retrieve/perform_retrieval.py b/models/research/delf/delf/python/detect_to_retrieve/perform_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..c2034dfb285118f4ed8928f996e031365a3ffbbf --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/perform_retrieval.py @@ -0,0 +1,301 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Performs image retrieval on Revisited Oxford/Paris datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +import time + +import numpy as np +import tensorflow as tf + +from google.protobuf import text_format +from tensorflow.python.platform import app +from delf import aggregation_config_pb2 +from delf import datum_io +from delf import feature_aggregation_similarity +from delf.python.detect_to_retrieve import dataset +from delf.python.detect_to_retrieve import image_reranking + +cmd_args = None + +# Aliases for aggregation types. +_VLAD = aggregation_config_pb2.AggregationConfig.VLAD +_ASMK = aggregation_config_pb2.AggregationConfig.ASMK +_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR + +# Extensions. +_VLAD_EXTENSION_SUFFIX = 'vlad' +_ASMK_EXTENSION_SUFFIX = 'asmk' +_ASMK_STAR_EXTENSION_SUFFIX = 'asmk_star' + +# Precision-recall ranks to use in metric computation. +_PR_RANKS = (1, 5, 10) + +# Pace to log. +_STATUS_CHECK_LOAD_ITERATIONS = 50 + +# Output file names. +_METRICS_FILENAME = 'metrics.txt' + + +def _ReadAggregatedDescriptors(input_dir, image_list, config): + """Reads aggregated descriptors. + + Args: + input_dir: Directory where aggregated descriptors are located. + image_list: List of image names for which to load descriptors. + config: AggregationConfig used for images. + + Returns: + aggregated_descriptors: List containing #images items, each a 1D NumPy + array. + visual_words: If using VLAD aggregation, returns an empty list. Otherwise, + returns a list containing #images items, each a 1D NumPy array. + """ + # Compose extension of aggregated descriptors. + extension = '.' + if config.use_regional_aggregation: + extension += 'r' + if config.aggregation_type == _VLAD: + extension += _VLAD_EXTENSION_SUFFIX + elif config.aggregation_type == _ASMK: + extension += _ASMK_EXTENSION_SUFFIX + elif config.aggregation_type == _ASMK_STAR: + extension += _ASMK_STAR_EXTENSION_SUFFIX + else: + raise ValueError('Invalid aggregation type: %d' % config.aggregation_type) + + num_images = len(image_list) + aggregated_descriptors = [] + visual_words = [] + print('Starting to collect descriptors for %d images...' % num_images) + start = time.clock() + for i in range(num_images): + if i > 0 and i % _STATUS_CHECK_LOAD_ITERATIONS == 0: + elapsed = (time.clock() - start) + print('Reading descriptors for image %d out of %d, last %d ' + 'images took %f seconds' % + (i, num_images, _STATUS_CHECK_LOAD_ITERATIONS, elapsed)) + start = time.clock() + + descriptors_filename = image_list[i] + extension + descriptors_fullpath = os.path.join(input_dir, descriptors_filename) + if config.aggregation_type == _VLAD: + aggregated_descriptors.append(datum_io.ReadFromFile(descriptors_fullpath)) + else: + d, v = datum_io.ReadPairFromFile(descriptors_fullpath) + if config.aggregation_type == _ASMK_STAR: + d = d.astype('uint8') + + aggregated_descriptors.append(d) + visual_words.append(v) + + return aggregated_descriptors, visual_words + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Parse dataset to obtain query/index images, and ground-truth. + print('Parsing dataset...') + query_list, index_list, ground_truth = dataset.ReadDatasetFile( + cmd_args.dataset_file_path) + num_query_images = len(query_list) + num_index_images = len(index_list) + (_, medium_ground_truth, + hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth) + print('done! Found %d queries and %d index images' % + (num_query_images, num_index_images)) + + # Parse AggregationConfig protos. + query_config = aggregation_config_pb2.AggregationConfig() + with tf.io.gfile.GFile(cmd_args.query_aggregation_config_path, 'r') as f: + text_format.Merge(f.read(), query_config) + index_config = aggregation_config_pb2.AggregationConfig() + with tf.io.gfile.GFile(cmd_args.index_aggregation_config_path, 'r') as f: + text_format.Merge(f.read(), index_config) + + # Read aggregated descriptors. + query_aggregated_descriptors, query_visual_words = _ReadAggregatedDescriptors( + cmd_args.query_aggregation_dir, query_list, query_config) + index_aggregated_descriptors, index_visual_words = _ReadAggregatedDescriptors( + cmd_args.index_aggregation_dir, index_list, index_config) + + # Create similarity computer. + similarity_computer = ( + feature_aggregation_similarity.SimilarityAggregatedRepresentation( + index_config)) + + # Compute similarity between query and index images, potentially re-ranking + # with geometric verification. + ranks_before_gv = np.zeros([num_query_images, num_index_images], + dtype='int32') + if cmd_args.use_geometric_verification: + medium_ranks_after_gv = np.zeros([num_query_images, num_index_images], + dtype='int32') + hard_ranks_after_gv = np.zeros([num_query_images, num_index_images], + dtype='int32') + for i in range(num_query_images): + print('Performing retrieval with query %d (%s)...' % (i, query_list[i])) + start = time.clock() + + # Compute similarity between aggregated descriptors. + similarities = np.zeros([num_index_images]) + for j in range(num_index_images): + similarities[j] = similarity_computer.ComputeSimilarity( + query_aggregated_descriptors[i], index_aggregated_descriptors[j], + query_visual_words[i], index_visual_words[j]) + + ranks_before_gv[i] = np.argsort(-similarities) + + # Re-rank using geometric verification. + if cmd_args.use_geometric_verification: + medium_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification( + ranks_before_gv[i], similarities, query_list[i], index_list, + cmd_args.query_features_dir, cmd_args.index_features_dir, + set(medium_ground_truth[i]['junk'])) + hard_ranks_after_gv[i] = image_reranking.RerankByGeometricVerification( + ranks_before_gv[i], similarities, query_list[i], index_list, + cmd_args.query_features_dir, cmd_args.index_features_dir, + set(hard_ground_truth[i]['junk'])) + + elapsed = (time.clock() - start) + print('done! Retrieval for query %d took %f seconds' % (i, elapsed)) + + # Create output directory if necessary. + if not tf.io.gfile.exists(cmd_args.output_dir): + tf.io.gfile.makedirs(cmd_args.output_dir) + + # Compute metrics. + medium_metrics = dataset.ComputeMetrics(ranks_before_gv, medium_ground_truth, + _PR_RANKS) + hard_metrics = dataset.ComputeMetrics(ranks_before_gv, hard_ground_truth, + _PR_RANKS) + if cmd_args.use_geometric_verification: + medium_metrics_after_gv = dataset.ComputeMetrics(medium_ranks_after_gv, + medium_ground_truth, + _PR_RANKS) + hard_metrics_after_gv = dataset.ComputeMetrics(hard_ranks_after_gv, + hard_ground_truth, _PR_RANKS) + + # Write metrics to file. + mean_average_precision_dict = { + 'medium': medium_metrics[0], + 'hard': hard_metrics[0] + } + mean_precisions_dict = {'medium': medium_metrics[1], 'hard': hard_metrics[1]} + mean_recalls_dict = {'medium': medium_metrics[2], 'hard': hard_metrics[2]} + if cmd_args.use_geometric_verification: + mean_average_precision_dict.update({ + 'medium_after_gv': medium_metrics_after_gv[0], + 'hard_after_gv': hard_metrics_after_gv[0] + }) + mean_precisions_dict.update({ + 'medium_after_gv': medium_metrics_after_gv[1], + 'hard_after_gv': hard_metrics_after_gv[1] + }) + mean_recalls_dict.update({ + 'medium_after_gv': medium_metrics_after_gv[2], + 'hard_after_gv': hard_metrics_after_gv[2] + }) + dataset.SaveMetricsFile(mean_average_precision_dict, mean_precisions_dict, + mean_recalls_dict, _PR_RANKS, + os.path.join(cmd_args.output_dir, _METRICS_FILENAME)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--index_aggregation_config_path', + type=str, + default='/tmp/index_aggregation_config.pbtxt', + help=""" + Path to index AggregationConfig proto text file. This is used to load the + aggregated descriptors from the index, and to define the parameters used + in computing similarity for aggregated descriptors. + """) + parser.add_argument( + '--query_aggregation_config_path', + type=str, + default='/tmp/query_aggregation_config.pbtxt', + help=""" + Path to query AggregationConfig proto text file. This is only used to load + the aggregated descriptors for the queries. + """) + parser.add_argument( + '--dataset_file_path', + type=str, + default='/tmp/gnd_roxford5k.mat', + help=""" + Dataset file for Revisited Oxford or Paris dataset, in .mat format. + """) + parser.add_argument( + '--index_aggregation_dir', + type=str, + default='/tmp/index_aggregation', + help=""" + Directory where index aggregated descriptors are located. + """) + parser.add_argument( + '--query_aggregation_dir', + type=str, + default='/tmp/query_aggregation', + help=""" + Directory where query aggregated descriptors are located. + """) + parser.add_argument( + '--use_geometric_verification', + type=lambda x: (str(x).lower() == 'true'), + default=False, + help=""" + If True, performs re-ranking using local feature-based geometric + verification. + """) + parser.add_argument( + '--index_features_dir', + type=str, + default='/tmp/index_features', + help=""" + Only used if `use_geometric_verification` is True. + Directory where index local image features are located, all in .delf + format. + """) + parser.add_argument( + '--query_features_dir', + type=str, + default='/tmp/query_features', + help=""" + Only used if `use_geometric_verification` is True. + Directory where query local image features are located, all in .delf + format. + """) + parser.add_argument( + '--output_dir', + type=str, + default='/tmp/retrieval', + help=""" + Directory where retrieval output will be written to. A file containing + metrics for this run is saved therein, with file name "metrics.txt". + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/detect_to_retrieve/query_aggregation_config.pbtxt b/models/research/delf/delf/python/detect_to_retrieve/query_aggregation_config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..39a917eef4389baa9a7f08722aed0a2e0cb6dd1f --- /dev/null +++ b/models/research/delf/delf/python/detect_to_retrieve/query_aggregation_config.pbtxt @@ -0,0 +1,7 @@ +codebook_size: 65536 +feature_dimensionality: 128 +aggregation_type: ASMK_STAR +codebook_path: "parameters/rparis6k_codebook_65536/k65536_codebook_tfckpt/codebook" +num_assignments: 1 +use_regional_aggregation: false +feature_batch_size: 100 diff --git a/models/research/delf/delf/python/examples/__init__.py b/models/research/delf/delf/python/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/delf/delf/python/examples/delf_config_example.pbtxt b/models/research/delf/delf/python/examples/delf_config_example.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..ff2d9c0023c41accd2cff51c0117d4ff01def2a0 --- /dev/null +++ b/models/research/delf/delf/python/examples/delf_config_example.pbtxt @@ -0,0 +1,25 @@ +model_path: "parameters/delf_gld_20190411/model/" +image_scales: .25 +image_scales: .3536 +image_scales: .5 +image_scales: .7071 +image_scales: 1.0 +image_scales: 1.4142 +image_scales: 2.0 +delf_local_config { + use_pca: true + # Note that for the exported model provided as an example, layer_name and + # iou_threshold are hard-coded in the checkpoint. So, the layer_name and + # iou_threshold variables here have no effect on the provided + # extract_features.py script. + layer_name: "resnet_v1_50/block3" + iou_threshold: 1.0 + max_feature_num: 1000 + score_threshold: 100.0 + pca_parameters { + mean_path: "parameters/delf_gld_20190411/pca/mean.datum" + projection_matrix_path: "parameters/delf_gld_20190411/pca/pca_proj_mat.datum" + pca_dim: 40 + use_whitening: false + } +} diff --git a/models/research/delf/delf/python/examples/detection_example_1.jpg b/models/research/delf/delf/python/examples/detection_example_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afdb388f0dea74de9ac259cfd8a0f9b3b17779e8 Binary files /dev/null and b/models/research/delf/delf/python/examples/detection_example_1.jpg differ diff --git a/models/research/delf/delf/python/examples/detection_example_2.jpg b/models/research/delf/delf/python/examples/detection_example_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5baf54a80888810f0a0daa279eb29936f15c8457 Binary files /dev/null and b/models/research/delf/delf/python/examples/detection_example_2.jpg differ diff --git a/models/research/delf/delf/python/examples/detector.py b/models/research/delf/delf/python/examples/detector.py new file mode 100644 index 0000000000000000000000000000000000000000..fd8aef1cf7fef2aea7ae1e28e793f0ab172e915d --- /dev/null +++ b/models/research/delf/delf/python/examples/detector.py @@ -0,0 +1,55 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module to construct object detector function.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +def MakeDetector(model_dir): + """Creates a function to detect objects in an image. + + Args: + model_dir: Directory where SavedModel is located. + + Returns: + Function that receives an image and returns detection results. + """ + model = tf.saved_model.load(model_dir) + + # Input and output tensors. + feeds = ['input_images:0'] + fetches = ['detection_boxes:0', 'detection_scores:0', 'detection_classes:0'] + + model = model.prune(feeds=feeds, fetches=fetches) + + def DetectorFn(images): + """Receives an image and returns detected boxes. + + Args: + images: Uint8 array with shape (batch, height, width 3) containing a batch + of RGB images. + + Returns: + Tuple (boxes, scores, class_indices). + """ + boxes, scores, class_indices = model(tf.convert_to_tensor(images)) + + return boxes.numpy(), scores.numpy(), class_indices.numpy() + + return DetectorFn diff --git a/models/research/delf/delf/python/examples/extract_boxes.py b/models/research/delf/delf/python/examples/extract_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..8851c44fb9a051104adde50a4c869a28cfd513da --- /dev/null +++ b/models/research/delf/delf/python/examples/extract_boxes.py @@ -0,0 +1,234 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extracts bounding boxes from a list of images, saving them to files. + +The images must be in JPG format. The program checks if boxes already +exist, and skips computation for those. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +import time + +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np +import tensorflow as tf + +from tensorflow.python.platform import app +from delf import box_io +from delf import utils +from delf import detector + +cmd_args = None + +# Extension/suffix of produced files. +_BOX_EXT = '.boxes' +_VIZ_SUFFIX = '_viz.jpg' + +# Used for plotting boxes. +_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w'] + +# Pace to report extraction log. +_STATUS_CHECK_ITERATIONS = 100 + + +def _ReadImageList(list_path): + """Helper function to read image paths. + + Args: + list_path: Path to list of images, one image path per line. + + Returns: + image_paths: List of image paths. + """ + with tf.io.gfile.GFile(list_path, 'r') as f: + image_paths = f.readlines() + image_paths = [entry.rstrip() for entry in image_paths] + return image_paths + + +def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold): + """Filter boxes based on detection scores. + + Boxes with detection score >= score_threshold are returned. + + Args: + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + scores: [N] float array with detection scores. + class_indices: [N] int array with class indices. + score_threshold: Float detection score threshold to use. + + Returns: + selected_boxes: selected `boxes`. + selected_scores: selected `scores`. + selected_class_indices: selected `class_indices`. + """ + selected_boxes = [] + selected_scores = [] + selected_class_indices = [] + for i, box in enumerate(boxes): + if scores[i] >= score_threshold: + selected_boxes.append(box) + selected_scores.append(scores[i]) + selected_class_indices.append(class_indices[i]) + + return np.array(selected_boxes), np.array(selected_scores), np.array( + selected_class_indices) + + +def _PlotBoxesAndSaveImage(image, boxes, output_path): + """Plot boxes on image and save to output path. + + Args: + image: Numpy array containing image. + boxes: [N, 4] float array denoting bounding box coordinates, in format [top, + left, bottom, right]. + output_path: String containing output path. + """ + height = image.shape[0] + width = image.shape[1] + + fig, ax = plt.subplots(1) + ax.imshow(image) + for i, box in enumerate(boxes): + scaled_box = [ + box[0] * height, box[1] * width, box[2] * height, box[3] * width + ] + rect = patches.Rectangle([scaled_box[1], scaled_box[0]], + scaled_box[3] - scaled_box[1], + scaled_box[2] - scaled_box[0], + linewidth=3, + edgecolor=_BOX_EDGE_COLORS[i % + len(_BOX_EDGE_COLORS)], + facecolor='none') + ax.add_patch(rect) + + ax.axis('off') + plt.savefig(output_path, bbox_inches='tight') + plt.close(fig) + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read list of images. + print('Reading list of images...') + image_paths = _ReadImageList(cmd_args.list_images_path) + num_images = len(image_paths) + print(f'done! Found {num_images} images') + + # Create output directories if necessary. + if not tf.io.gfile.exists(cmd_args.output_dir): + tf.io.gfile.makedirs(cmd_args.output_dir) + if cmd_args.output_viz_dir and not tf.io.gfile.exists( + cmd_args.output_viz_dir): + tf.io.gfile.makedirs(cmd_args.output_viz_dir) + + detector_fn = detector.MakeDetector(cmd_args.detector_path) + + start = time.time() + for i, image_path in enumerate(image_paths): + # Report progress once in a while. + if i == 0: + print('Starting to detect objects in images...') + elif i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.time() - start) + print( + f'Processing image {i} out of {num_images}, last ' + f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds' + ) + start = time.time() + + # If descriptor already exists, skip its computation. + base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path)) + out_boxes_filename = base_boxes_filename + _BOX_EXT + out_boxes_fullpath = os.path.join(cmd_args.output_dir, + out_boxes_filename) + if tf.io.gfile.exists(out_boxes_fullpath): + print(f'Skipping {image_path}') + continue + + im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0) + + # Extract and save boxes. + (boxes_out, scores_out, class_indices_out) = detector_fn(im) + (selected_boxes, selected_scores, + selected_class_indices) = _FilterBoxesByScore(boxes_out[0], + scores_out[0], + class_indices_out[0], + cmd_args.detector_thresh) + + box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores, + selected_class_indices) + if cmd_args.output_viz_dir: + out_viz_filename = base_boxes_filename + _VIZ_SUFFIX + out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, + out_viz_filename) + _PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--detector_path', + type=str, + default='/tmp/d2r_frcnn_20190411/', + help=""" + Path to exported detector model. + """) + parser.add_argument( + '--detector_thresh', + type=float, + default=.0, + help=""" + Detector threshold. Any box with confidence score lower than this is not + returned. + """) + parser.add_argument( + '--list_images_path', + type=str, + default='list_images.txt', + help=""" + Path to list of images to undergo object detection. + """) + parser.add_argument( + '--output_dir', + type=str, + default='test_boxes', + help=""" + Directory where bounding boxes will be written to. Each image's boxes + will be written to a file with same name, and extension replaced by + .boxes. + """) + parser.add_argument( + '--output_viz_dir', + type=str, + default='', + help=""" + Optional. If set, a visualization of the detected boxes overlaid on the + image is produced, and saved to this directory. Each image is saved with + _viz.jpg suffix. + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/examples/extract_features.py b/models/research/delf/delf/python/examples/extract_features.py new file mode 100644 index 0000000000000000000000000000000000000000..05fd77316070d39722e133dbd544f5b53791f6d0 --- /dev/null +++ b/models/research/delf/delf/python/examples/extract_features.py @@ -0,0 +1,144 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extracts DELF features from a list of images, saving them to file. + +The images must be in JPG format. The program checks if descriptors already +exist, and skips computation for those. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +import time + +import numpy as np +from six.moves import range +import tensorflow as tf + +from google.protobuf import text_format +from tensorflow.python.platform import app +from delf import delf_config_pb2 +from delf import feature_io +from delf import utils +from delf import extractor + +cmd_args = None + +# Extension of feature files. +_DELF_EXT = '.delf' + +# Pace to report extraction log. +_STATUS_CHECK_ITERATIONS = 100 + + +def _ReadImageList(list_path): + """Helper function to read image paths. + + Args: + list_path: Path to list of images, one image path per line. + + Returns: + image_paths: List of image paths. + """ + with tf.io.gfile.GFile(list_path, 'r') as f: + image_paths = f.readlines() + image_paths = [entry.rstrip() for entry in image_paths] + return image_paths + + +def main(unused_argv): + # Read list of images. + print('Reading list of images...') + image_paths = _ReadImageList(cmd_args.list_images_path) + num_images = len(image_paths) + print(f'done! Found {num_images} images') + + # Parse DelfConfig proto. + config = delf_config_pb2.DelfConfig() + with tf.io.gfile.GFile(cmd_args.config_path, 'r') as f: + text_format.Merge(f.read(), config) + + # Create output directory if necessary. + if not tf.io.gfile.exists(cmd_args.output_dir): + tf.io.gfile.makedirs(cmd_args.output_dir) + + extractor_fn = extractor.MakeExtractor(config) + + start = time.time() + for i in range(num_images): + # Report progress once in a while. + if i == 0: + print('Starting to extract DELF features from images...') + elif i % _STATUS_CHECK_ITERATIONS == 0: + elapsed = (time.time() - start) + print( + f'Processing image {i} out of {num_images}, last ' + f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds' + ) + start = time.time() + + # If descriptor already exists, skip its computation. + out_desc_filename = os.path.splitext(os.path.basename( + image_paths[i]))[0] + _DELF_EXT + out_desc_fullpath = os.path.join(cmd_args.output_dir, out_desc_filename) + if tf.io.gfile.exists(out_desc_fullpath): + print(f'Skipping {image_paths[i]}') + continue + + im = np.array(utils.RgbLoader(image_paths[i])) + + # Extract and save features. + extracted_features = extractor_fn(im) + locations_out = extracted_features['local_features']['locations'] + descriptors_out = extracted_features['local_features']['descriptors'] + feature_scales_out = extracted_features['local_features']['scales'] + attention_out = extracted_features['local_features']['attention'] + + feature_io.WriteToFile(out_desc_fullpath, locations_out, feature_scales_out, + descriptors_out, attention_out) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--config_path', + type=str, + default='delf_config_example.pbtxt', + help=""" + Path to DelfConfig proto text file with configuration to be used for DELF + extraction. + """) + parser.add_argument( + '--list_images_path', + type=str, + default='list_images.txt', + help=""" + Path to list of images whose DELF features will be extracted. + """) + parser.add_argument( + '--output_dir', + type=str, + default='test_features', + help=""" + Directory where DELF features will be written to. Each image's features + will be written to a file with same name, and extension replaced by .delf. + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/examples/extractor.py b/models/research/delf/delf/python/examples/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..bd63ab38362a9c6f9ccc5d3bfca1fd007045d261 --- /dev/null +++ b/models/research/delf/delf/python/examples/extractor.py @@ -0,0 +1,277 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module to construct DELF feature extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from PIL import Image +import tensorflow as tf + +from delf import datum_io +from delf import feature_extractor + +# Minimum dimensions below which DELF features are not extracted (empty +# features are returned). This applies after any resizing is performed. +_MIN_HEIGHT = 10 +_MIN_WIDTH = 10 + + +def ResizeImage(image, config, resize_factor=1.0): + """Resizes image according to config. + + Args: + image: Uint8 array with shape (height, width, 3). + config: DelfConfig proto containing the model configuration. + resize_factor: Optional float resize factor for the input image. If given, + the maximum and minimum allowed image sizes in `config` are scaled by this + factor. Must be non-negative. + + Returns: + resized_image: Uint8 array with resized image. + scale_factors: 2D float array, with factors used for resizing along height + and width (If upscaling, larger than 1; if downscaling, smaller than 1). + + Raises: + ValueError: If `image` has incorrect number of dimensions/channels. + """ + if resize_factor < 0.0: + raise ValueError('negative resize_factor is not allowed: %f' % + resize_factor) + if image.ndim != 3: + raise ValueError('image has incorrect number of dimensions: %d' % + image.ndims) + height, width, channels = image.shape + + # Take into account resize factor. + max_image_size = resize_factor * config.max_image_size + min_image_size = resize_factor * config.min_image_size + + if channels != 3: + raise ValueError('image has incorrect number of channels: %d' % channels) + + largest_side = max(width, height) + + if max_image_size >= 0 and largest_side > max_image_size: + scale_factor = max_image_size / largest_side + elif min_image_size >= 0 and largest_side < min_image_size: + scale_factor = min_image_size / largest_side + elif config.use_square_images and (height != width): + scale_factor = 1.0 + else: + # No resizing needed, early return. + return image, np.ones(2, dtype=float) + + # Note that new_shape is in (width, height) format (PIL convention), while + # scale_factors are in (height, width) convention (NumPy convention). + if config.use_square_images: + new_shape = (int(round(largest_side * scale_factor)), + int(round(largest_side * scale_factor))) + else: + new_shape = (int(round(width * scale_factor)), + int(round(height * scale_factor))) + + scale_factors = np.array([new_shape[1] / height, new_shape[0] / width], + dtype=float) + + pil_image = Image.fromarray(image) + resized_image = np.array(pil_image.resize(new_shape, resample=Image.BILINEAR)) + + return resized_image, scale_factors + + +def MakeExtractor(config): + """Creates a function to extract global and/or local features from an image. + + Args: + config: DelfConfig proto containing the model configuration. + + Returns: + Function that receives an image and returns features. + """ + # Load model. + model = tf.saved_model.load(config.model_path) + + # Input/output end-points/tensors. + feeds = ['input_image:0', 'input_scales:0'] + fetches = [] + image_scales_tensor = tf.convert_to_tensor(list(config.image_scales)) + + # Custom configuration needed when local features are used. + if config.use_local_features: + # Extra input/output end-points/tensors. + feeds.append('input_abs_thres:0') + feeds.append('input_max_feature_num:0') + fetches.append('boxes:0') + fetches.append('features:0') + fetches.append('scales:0') + fetches.append('scores:0') + score_threshold_tensor = tf.constant( + config.delf_local_config.score_threshold) + max_feature_num_tensor = tf.constant( + config.delf_local_config.max_feature_num) + + # If using PCA, pre-load required parameters. + local_pca_parameters = {} + if config.delf_local_config.use_pca: + local_pca_parameters['mean'] = tf.constant( + datum_io.ReadFromFile( + config.delf_local_config.pca_parameters.mean_path), + dtype=tf.float32) + local_pca_parameters['matrix'] = tf.constant( + datum_io.ReadFromFile( + config.delf_local_config.pca_parameters.projection_matrix_path), + dtype=tf.float32) + local_pca_parameters[ + 'dim'] = config.delf_local_config.pca_parameters.pca_dim + local_pca_parameters['use_whitening'] = ( + config.delf_local_config.pca_parameters.use_whitening) + if config.delf_local_config.pca_parameters.use_whitening: + local_pca_parameters['variances'] = tf.squeeze( + tf.constant( + datum_io.ReadFromFile( + config.delf_local_config.pca_parameters.pca_variances_path), + dtype=tf.float32)) + else: + local_pca_parameters['variances'] = None + + # Custom configuration needed when global features are used. + if config.use_global_features: + # Extra output end-point. + fetches.append('global_descriptors:0') + + # If using PCA, pre-load required parameters. + global_pca_parameters = {} + if config.delf_global_config.use_pca: + global_pca_parameters['mean'] = tf.constant( + datum_io.ReadFromFile( + config.delf_global_config.pca_parameters.mean_path), + dtype=tf.float32) + global_pca_parameters['matrix'] = tf.constant( + datum_io.ReadFromFile( + config.delf_global_config.pca_parameters.projection_matrix_path), + dtype=tf.float32) + global_pca_parameters[ + 'dim'] = config.delf_global_config.pca_parameters.pca_dim + global_pca_parameters['use_whitening'] = ( + config.delf_global_config.pca_parameters.use_whitening) + if config.delf_global_config.pca_parameters.use_whitening: + global_pca_parameters['variances'] = tf.squeeze( + tf.constant( + datum_io.ReadFromFile(config.delf_global_config.pca_parameters + .pca_variances_path), + dtype=tf.float32)) + else: + global_pca_parameters['variances'] = None + + model = model.prune(feeds=feeds, fetches=fetches) + + def ExtractorFn(image, resize_factor=1.0): + """Receives an image and returns DELF global and/or local features. + + If image is too small, returns empty features. + + Args: + image: Uint8 array with shape (height, width, 3) containing the RGB image. + resize_factor: Optional float resize factor for the input image. If given, + the maximum and minimum allowed image sizes in the config are scaled by + this factor. + + Returns: + extracted_features: A dict containing the extracted global descriptors + (key 'global_descriptor' mapping to a [D] float array), and/or local + features (key 'local_features' mapping to a dict with keys 'locations', + 'descriptors', 'scales', 'attention'). + """ + + resized_image, scale_factors = ResizeImage( + image, config, resize_factor=resize_factor) + + # If the image is too small, returns empty features. + if resized_image.shape[0] < _MIN_HEIGHT or resized_image.shape[ + 1] < _MIN_WIDTH: + extracted_features = {'global_descriptor': np.array([])} + if config.use_local_features: + extracted_features.update({ + 'local_features': { + 'locations': np.array([]), + 'descriptors': np.array([]), + 'scales': np.array([]), + 'attention': np.array([]), + } + }) + return extracted_features + + # Input tensors. + image_tensor = tf.convert_to_tensor(resized_image) + + # Extracted features. + extracted_features = {} + output = None + + if config.use_local_features: + output = model(image_tensor, image_scales_tensor, score_threshold_tensor, + max_feature_num_tensor) + else: + output = model(image_tensor, image_scales_tensor) + + # Post-process extracted features: normalize, PCA (optional), pooling. + if config.use_global_features: + raw_global_descriptors = output[-1] + if config.delf_global_config.image_scales_ind: + raw_global_descriptors_selected_scales = tf.gather( + raw_global_descriptors, + list(config.delf_global_config.image_scales_ind)) + else: + raw_global_descriptors_selected_scales = raw_global_descriptors + global_descriptors_per_scale = feature_extractor.PostProcessDescriptors( + raw_global_descriptors_selected_scales, + config.delf_global_config.use_pca, global_pca_parameters) + unnormalized_global_descriptor = tf.reduce_sum( + global_descriptors_per_scale, axis=0, name='sum_pooling') + global_descriptor = tf.nn.l2_normalize( + unnormalized_global_descriptor, axis=0, name='final_l2_normalization') + extracted_features.update({ + 'global_descriptor': global_descriptor.numpy(), + }) + + if config.use_local_features: + boxes = output[0] + raw_local_descriptors = output[1] + feature_scales = output[2] + attention_with_extra_dim = output[3] + + attention = tf.reshape(attention_with_extra_dim, + [tf.shape(attention_with_extra_dim)[0]]) + locations, local_descriptors = ( + feature_extractor.DelfFeaturePostProcessing( + boxes, raw_local_descriptors, config.delf_local_config.use_pca, + local_pca_parameters)) + locations /= scale_factors + + extracted_features.update({ + 'local_features': { + 'locations': locations.numpy(), + 'descriptors': local_descriptors.numpy(), + 'scales': feature_scales.numpy(), + 'attention': attention.numpy(), + } + }) + + return extracted_features + + return ExtractorFn diff --git a/models/research/delf/delf/python/examples/extractor_test.py b/models/research/delf/delf/python/examples/extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..aa560c75a5ca7f8a48247eb7636643e2369c0e5e --- /dev/null +++ b/models/research/delf/delf/python/examples/extractor_test.py @@ -0,0 +1,103 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DELF feature extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from delf import delf_config_pb2 +from delf import extractor + + +class ExtractorTest(tf.test.TestCase, parameterized.TestCase): + + @parameterized.named_parameters( + ('Max-1Min-1', -1, -1, 1.0, False, [4, 2, 3], [1.0, 1.0]), + ('Max-1Min-1Square', -1, -1, 1.0, True, [4, 4, 3], [1.0, 2.0]), + ('Max2Min-1', 2, -1, 1.0, False, [2, 1, 3], [0.5, 0.5]), + ('Max2Min-1Square', 2, -1, 1.0, True, [2, 2, 3], [0.5, 1.0]), + ('Max8Min-1', 8, -1, 1.0, False, [4, 2, 3], [1.0, 1.0]), + ('Max8Min-1Square', 8, -1, 1.0, True, [4, 4, 3], [1.0, 2.0]), + ('Max-1Min1', -1, 1, 1.0, False, [4, 2, 3], [1.0, 1.0]), + ('Max-1Min1Square', -1, 1, 1.0, True, [4, 4, 3], [1.0, 2.0]), + ('Max-1Min8', -1, 8, 1.0, False, [8, 4, 3], [2.0, 2.0]), + ('Max-1Min8Square', -1, 8, 1.0, True, [8, 8, 3], [2.0, 4.0]), + ('Max16Min8', 16, 8, 1.0, False, [8, 4, 3], [2.0, 2.0]), + ('Max16Min8Square', 16, 8, 1.0, True, [8, 8, 3], [2.0, 4.0]), + ('Max2Min2', 2, 2, 1.0, False, [2, 1, 3], [0.5, 0.5]), + ('Max2Min2Square', 2, 2, 1.0, True, [2, 2, 3], [0.5, 1.0]), + ('Max-1Min-1Factor0.5', -1, -1, 0.5, False, [4, 2, 3], [1.0, 1.0]), + ('Max-1Min-1Factor0.5Square', -1, -1, 0.5, True, [4, 4, 3], [1.0, 2.0]), + ('Max2Min-1Factor2.0', 2, -1, 2.0, False, [4, 2, 3], [1.0, 1.0]), + ('Max2Min-1Factor2.0Square', 2, -1, 2.0, True, [4, 4, 3], [1.0, 2.0]), + ('Max-1Min8Factor0.5', -1, 8, 0.5, False, [4, 2, 3], [1.0, 1.0]), + ('Max-1Min8Factor0.5Square', -1, 8, 0.5, True, [4, 4, 3], [1.0, 2.0]), + ('Max-1Min8Factor0.25', -1, 8, 0.25, False, [4, 2, 3], [1.0, 1.0]), + ('Max-1Min8Factor0.25Square', -1, 8, 0.25, True, [4, 4, 3], [1.0, 2.0]), + ('Max2Min2Factor2.0', 2, 2, 2.0, False, [4, 2, 3], [1.0, 1.0]), + ('Max2Min2Factor2.0Square', 2, 2, 2.0, True, [4, 4, 3], [1.0, 2.0]), + ('Max16Min8Factor0.5', 16, 8, 0.5, False, [4, 2, 3], [1.0, 1.0]), + ('Max16Min8Factor0.5Square', 16, 8, 0.5, True, [4, 4, 3], [1.0, 2.0]), + ) + def testResizeImageWorks(self, max_image_size, min_image_size, resize_factor, + square_output, expected_shape, + expected_scale_factors): + # Construct image of size 4x2x3. + image = np.array([[[0, 0, 0], [1, 1, 1]], [[2, 2, 2], [3, 3, 3]], + [[4, 4, 4], [5, 5, 5]], [[6, 6, 6], [7, 7, 7]]], + dtype='uint8') + + # Set up config. + config = delf_config_pb2.DelfConfig( + max_image_size=max_image_size, + min_image_size=min_image_size, + use_square_images=square_output) + + resized_image, scale_factors = extractor.ResizeImage( + image, config, resize_factor) + self.assertAllEqual(resized_image.shape, expected_shape) + self.assertAllClose(scale_factors, expected_scale_factors) + + @parameterized.named_parameters( + ('Max2Min2', 2, 2, 1.0, False, [2, 1, 3], [0.666666, 0.5]), + ('Max2Min2Square', 2, 2, 1.0, True, [2, 2, 3], [0.666666, 1.0]), + ) + def testResizeImageRoundingWorks(self, max_image_size, min_image_size, + resize_factor, square_output, expected_shape, + expected_scale_factors): + # Construct image of size 3x2x3. + image = np.array([[[0, 0, 0], [1, 1, 1]], [[2, 2, 2], [3, 3, 3]], + [[4, 4, 4], [5, 5, 5]]], + dtype='uint8') + + # Set up config. + config = delf_config_pb2.DelfConfig( + max_image_size=max_image_size, + min_image_size=min_image_size, + use_square_images=square_output) + + resized_image, scale_factors = extractor.ResizeImage( + image, config, resize_factor) + self.assertAllEqual(resized_image.shape, expected_shape) + self.assertAllClose(scale_factors, expected_scale_factors) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/examples/match_images.py b/models/research/delf/delf/python/examples/match_images.py new file mode 100644 index 0000000000000000000000000000000000000000..bb030739cb9067bf3be50f999368af622f083b54 --- /dev/null +++ b/models/research/delf/delf/python/examples/match_images.py @@ -0,0 +1,143 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Matches two images using their DELF features. + +The matching is done using feature-based nearest-neighbor search, followed by +geometric verification using RANSAC. + +The DELF features can be extracted using the extract_features.py script. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys + +import matplotlib +# Needed before pyplot import for matplotlib to work properly. +matplotlib.use('Agg') +import matplotlib.image as mpimg # pylint: disable=g-import-not-at-top +import matplotlib.pyplot as plt +import numpy as np +from scipy import spatial +from skimage import feature +from skimage import measure +from skimage import transform + +from tensorflow.python.platform import app +from delf import feature_io + +cmd_args = None + +_DISTANCE_THRESHOLD = 0.8 + + +def main(unused_argv): + # Read features. + locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile( + cmd_args.features_1_path) + num_features_1 = locations_1.shape[0] + print(f"Loaded image 1's {num_features_1} features") + locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile( + cmd_args.features_2_path) + num_features_2 = locations_2.shape[0] + print(f"Loaded image 2's {num_features_2} features") + + # Find nearest-neighbor matches using a KD tree. + d1_tree = spatial.cKDTree(descriptors_1) + _, indices = d1_tree.query( + descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD) + + # Select feature locations for putative matches. + locations_2_to_use = np.array([ + locations_2[i,] + for i in range(num_features_2) + if indices[i] != num_features_1 + ]) + locations_1_to_use = np.array([ + locations_1[indices[i],] + for i in range(num_features_2) + if indices[i] != num_features_1 + ]) + + # Perform geometric verification using RANSAC. + _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use), + transform.AffineTransform, + min_samples=3, + residual_threshold=20, + max_trials=1000) + + print(f'Found {sum(inliers)} inliers') + + # Visualize correspondences, and save to file. + _, ax = plt.subplots() + img_1 = mpimg.imread(cmd_args.image_1_path) + img_2 = mpimg.imread(cmd_args.image_2_path) + inlier_idxs = np.nonzero(inliers)[0] + feature.plot_matches( + ax, + img_1, + img_2, + locations_1_to_use, + locations_2_to_use, + np.column_stack((inlier_idxs, inlier_idxs)), + matches_color='b') + ax.axis('off') + ax.set_title('DELF correspondences') + plt.savefig(cmd_args.output_image) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--image_1_path', + type=str, + default='test_images/image_1.jpg', + help=""" + Path to test image 1. + """) + parser.add_argument( + '--image_2_path', + type=str, + default='test_images/image_2.jpg', + help=""" + Path to test image 2. + """) + parser.add_argument( + '--features_1_path', + type=str, + default='test_features/image_1.delf', + help=""" + Path to DELF features from image 1. + """) + parser.add_argument( + '--features_2_path', + type=str, + default='test_features/image_2.delf', + help=""" + Path to DELF features from image 2. + """) + parser.add_argument( + '--output_image', + type=str, + default='test_match.png', + help=""" + Path where an image showing the matches will be saved. + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/examples/matched_images_example.jpg b/models/research/delf/delf/python/examples/matched_images_example.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbd0061ac02d460be3bc24a3f7f736d418b1da88 Binary files /dev/null and b/models/research/delf/delf/python/examples/matched_images_example.jpg differ diff --git a/models/research/delf/delf/python/feature_aggregation_extractor.py b/models/research/delf/delf/python/feature_aggregation_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..f230642ea950d5393005583334836630328198c9 --- /dev/null +++ b/models/research/delf/delf/python/feature_aggregation_extractor.py @@ -0,0 +1,475 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Local feature aggregation extraction. + +For more details, please refer to the paper: +"Detect-to-Retrieve: Efficient Regional Aggregation for Image Search", +Proc. CVPR'19 (https://arxiv.org/abs/1812.01584). +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from delf import aggregation_config_pb2 + +_CLUSTER_CENTERS_VAR_NAME = "clusters" +_NORM_SQUARED_TOLERANCE = 1e-12 + +# Aliases for aggregation types. +_VLAD = aggregation_config_pb2.AggregationConfig.VLAD +_ASMK = aggregation_config_pb2.AggregationConfig.ASMK +_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR + + +class ExtractAggregatedRepresentation(object): + """Class for extraction of aggregated local feature representation. + + Args: + aggregation_config: AggregationConfig object defining type of aggregation to + use. + + Raises: + ValueError: If aggregation type is invalid. + """ + + def __init__(self, aggregation_config): + self._codebook_size = aggregation_config.codebook_size + self._feature_dimensionality = aggregation_config.feature_dimensionality + self._aggregation_type = aggregation_config.aggregation_type + self._feature_batch_size = aggregation_config.feature_batch_size + self._codebook_path = aggregation_config.codebook_path + self._use_regional_aggregation = aggregation_config.use_regional_aggregation + self._use_l2_normalization = aggregation_config.use_l2_normalization + self._num_assignments = aggregation_config.num_assignments + + if self._aggregation_type not in [_VLAD, _ASMK, _ASMK_STAR]: + raise ValueError("Invalid aggregation type: %d" % self._aggregation_type) + + # Load codebook + codebook = tf.Variable( + tf.zeros([self._codebook_size, self._feature_dimensionality], + dtype=tf.float32), + name=_CLUSTER_CENTERS_VAR_NAME) + ckpt = tf.train.Checkpoint(codebook=codebook) + ckpt.restore(self._codebook_path) + + self._codebook = codebook + + def Extract(self, features, num_features_per_region=None): + """Extracts aggregated representation. + + Args: + features: [N, D] float numpy array with N local feature descriptors. + num_features_per_region: Required only if computing regional aggregated + representations, otherwise optional. List of number of features per + region, such that sum(num_features_per_region) = N. It indicates which + features correspond to each region. + + Returns: + aggregated_descriptors: 1-D numpy array. + feature_visual_words: Used only for ASMK/ASMK* aggregation type. 1-D + numpy array denoting visual words corresponding to the + `aggregated_descriptors`. + + Raises: + ValueError: If inputs are misconfigured. + """ + features = tf.cast(features, dtype=tf.float32) + + if num_features_per_region is None: + # Use dummy value since it is unused. + num_features_per_region = [] + else: + num_features_per_region = tf.cast(num_features_per_region, dtype=tf.int32) + if len(num_features_per_region + ) and sum(num_features_per_region) != features.shape[0]: + raise ValueError( + "Incorrect arguments: sum(num_features_per_region) and " + "features.shape[0] are different: %d vs %d" % + (sum(num_features_per_region), features.shape[0])) + + # Extract features based on desired options. + if self._aggregation_type == _VLAD: + # Feature visual words are unused in the case of VLAD, so just return + # dummy constant. + feature_visual_words = tf.constant(-1, dtype=tf.int32) + if self._use_regional_aggregation: + aggregated_descriptors = self._ComputeRvlad( + features, + num_features_per_region, + self._codebook, + use_l2_normalization=self._use_l2_normalization, + num_assignments=self._num_assignments) + else: + aggregated_descriptors = self._ComputeVlad( + features, + self._codebook, + use_l2_normalization=self._use_l2_normalization, + num_assignments=self._num_assignments) + elif (self._aggregation_type == _ASMK or + self._aggregation_type == _ASMK_STAR): + if self._use_regional_aggregation: + (aggregated_descriptors, + feature_visual_words) = self._ComputeRasmk( + features, + num_features_per_region, + self._codebook, + num_assignments=self._num_assignments) + else: + (aggregated_descriptors, + feature_visual_words) = self._ComputeAsmk( + features, + self._codebook, + num_assignments=self._num_assignments) + + feature_visual_words_output = feature_visual_words.numpy() + + # If using ASMK*/RASMK*, binarize the aggregated descriptors. + if self._aggregation_type == _ASMK_STAR: + reshaped_aggregated_descriptors = np.reshape( + aggregated_descriptors, [-1, self._feature_dimensionality]) + packed_descriptors = np.packbits( + reshaped_aggregated_descriptors > 0, axis=1) + aggregated_descriptors_output = np.reshape(packed_descriptors, [-1]) + else: + aggregated_descriptors_output = aggregated_descriptors.numpy() + + return aggregated_descriptors_output, feature_visual_words_output + + def _ComputeVlad(self, + features, + codebook, + use_l2_normalization=True, + num_assignments=1): + """Compute VLAD representation. + + Args: + features: [N, D] float tensor. + codebook: [K, D] float tensor. + use_l2_normalization: If False, does not L2-normalize after aggregation. + num_assignments: Number of visual words to assign a feature to. + + Returns: + vlad: [K*D] float tensor. + """ + + def _ComputeVladEmptyFeatures(): + """Computes VLAD if `features` is empty. + + Returns: + [K*D] all-zeros tensor. + """ + return tf.zeros([self._codebook_size * self._feature_dimensionality], + dtype=tf.float32) + + def _ComputeVladNonEmptyFeatures(): + """Computes VLAD if `features` is not empty. + + Returns: + [K*D] tensor with VLAD descriptor. + """ + num_features = tf.shape(features)[0] + + # Find nearest visual words for each feature. Possibly batch the local + # features to avoid OOM. + if self._feature_batch_size <= 0: + actual_batch_size = num_features + else: + actual_batch_size = self._feature_batch_size + + def _BatchNearestVisualWords(ind, selected_visual_words): + """Compute nearest neighbor visual words for a batch of features. + + Args: + ind: Integer index denoting feature. + selected_visual_words: Partial set of visual words. + + Returns: + output_ind: Next index. + output_selected_visual_words: Updated set of visual words, including + the visual words for the new batch. + """ + # Handle case of last batch, where there may be fewer than + # `actual_batch_size` features. + batch_size_to_use = tf.cond( + tf.greater(ind + actual_batch_size, num_features), + true_fn=lambda: num_features - ind, + false_fn=lambda: actual_batch_size) + + # Denote B = batch_size_to_use. + # K*B x D. + tiled_features = tf.reshape( + tf.tile( + tf.slice(features, [ind, 0], + [batch_size_to_use, self._feature_dimensionality]), + [1, self._codebook_size]), [-1, self._feature_dimensionality]) + # K*B x D. + tiled_codebook = tf.reshape( + tf.tile(tf.reshape(codebook, [1, -1]), [batch_size_to_use, 1]), + [-1, self._feature_dimensionality]) + # B x K. + squared_distances = tf.reshape( + tf.reduce_sum( + tf.math.squared_difference(tiled_features, tiled_codebook), + axis=1), [batch_size_to_use, self._codebook_size]) + # B x K. + nearest_visual_words = tf.argsort(squared_distances) + # B x num_assignments. + batch_selected_visual_words = tf.slice( + nearest_visual_words, [0, 0], [batch_size_to_use, num_assignments]) + selected_visual_words = tf.concat( + [selected_visual_words, batch_selected_visual_words], axis=0) + + return ind + batch_size_to_use, selected_visual_words + + ind_batch = tf.constant(0, dtype=tf.int32) + keep_going = lambda j, selected_visual_words: tf.less(j, num_features) + selected_visual_words = tf.zeros([0, num_assignments], dtype=tf.int32) + _, selected_visual_words = tf.while_loop( + cond=keep_going, + body=_BatchNearestVisualWords, + loop_vars=[ind_batch, selected_visual_words], + shape_invariants=[ + ind_batch.get_shape(), + tf.TensorShape([None, num_assignments]) + ], + parallel_iterations=1, + back_prop=False) + + # Helper function to collect residuals for relevant visual words. + def _ConstructVladFromAssignments(ind, vlad): + """Add contributions of a feature to a VLAD descriptor. + + Args: + ind: Integer index denoting feature. + vlad: Partial VLAD descriptor. + + Returns: + output_ind: Next index (ie, ind+1). + output_vlad: VLAD descriptor updated to take into account contribution + from ind-th feature. + """ + diff = tf.tile( + tf.expand_dims(features[ind], + axis=0), [num_assignments, 1]) - tf.gather( + codebook, selected_visual_words[ind]) + return ind + 1, tf.tensor_scatter_nd_add( + vlad, tf.expand_dims(selected_visual_words[ind], axis=1), + tf.cast(diff, dtype=tf.float32)) + + ind_vlad = tf.constant(0, dtype=tf.int32) + keep_going = lambda j, vlad: tf.less(j, num_features) + vlad = tf.zeros([self._codebook_size, self._feature_dimensionality], + dtype=tf.float32) + _, vlad = tf.while_loop( + cond=keep_going, + body=_ConstructVladFromAssignments, + loop_vars=[ind_vlad, vlad], + back_prop=False) + + vlad = tf.reshape(vlad, + [self._codebook_size * self._feature_dimensionality]) + if use_l2_normalization: + vlad = tf.math.l2_normalize(vlad, epsilon=_NORM_SQUARED_TOLERANCE) + + return vlad + + return tf.cond( + tf.greater(tf.size(features), 0), + true_fn=_ComputeVladNonEmptyFeatures, + false_fn=_ComputeVladEmptyFeatures) + + def _ComputeRvlad(self, + features, + num_features_per_region, + codebook, + use_l2_normalization=False, + num_assignments=1): + """Compute R-VLAD representation. + + Args: + features: [N, D] float tensor. + num_features_per_region: [R] int tensor. Contains number of features per + region, such that sum(num_features_per_region) = N. It indicates which + features correspond to each region. + codebook: [K, D] float tensor. + use_l2_normalization: If True, performs L2-normalization after regional + aggregation; if False (default), performs componentwise division by R + after regional aggregation. + num_assignments: Number of visual words to assign a feature to. + + Returns: + rvlad: [K*D] float tensor. + """ + + def _ComputeRvladEmptyRegions(): + """Computes R-VLAD if `num_features_per_region` is empty. + + Returns: + [K*D] all-zeros tensor. + """ + return tf.zeros([self._codebook_size * self._feature_dimensionality], + dtype=tf.float32) + + def _ComputeRvladNonEmptyRegions(): + """Computes R-VLAD if `num_features_per_region` is not empty. + + Returns: + [K*D] tensor with R-VLAD descriptor. + """ + + # Helper function to compose initial R-VLAD from image regions. + def _ConstructRvladFromVlad(ind, rvlad): + """Add contributions from different regions into R-VLAD. + + Args: + ind: Integer index denoting region. + rvlad: Partial R-VLAD descriptor. + + Returns: + output_ind: Next index (ie, ind+1). + output_rvlad: R-VLAD descriptor updated to take into account + contribution from ind-th region. + """ + return ind + 1, rvlad + self._ComputeVlad( + tf.slice( + features, [tf.reduce_sum(num_features_per_region[:ind]), 0], + [num_features_per_region[ind], self._feature_dimensionality]), + codebook, + num_assignments=num_assignments) + + i = tf.constant(0, dtype=tf.int32) + num_regions = tf.shape(num_features_per_region)[0] + keep_going = lambda j, rvlad: tf.less(j, num_regions) + rvlad = tf.zeros([self._codebook_size * self._feature_dimensionality], + dtype=tf.float32) + _, rvlad = tf.while_loop( + cond=keep_going, + body=_ConstructRvladFromVlad, + loop_vars=[i, rvlad], + back_prop=False, + parallel_iterations=1) + + if use_l2_normalization: + rvlad = tf.math.l2_normalize(rvlad, epsilon=_NORM_SQUARED_TOLERANCE) + else: + rvlad /= tf.cast(num_regions, dtype=tf.float32) + + return rvlad + + return tf.cond( + tf.greater(tf.size(num_features_per_region), 0), + true_fn=_ComputeRvladNonEmptyRegions, + false_fn=_ComputeRvladEmptyRegions) + + def _PerCentroidNormalization(self, unnormalized_vector): + """Perform per-centroid normalization. + + Args: + unnormalized_vector: [KxD] float tensor. + + Returns: + per_centroid_normalized_vector: [KxD] float tensor, with normalized + aggregated residuals. Some residuals may be all-zero. + visual_words: Int tensor containing indices of visual words which are + present for the set of features. + """ + unnormalized_vector = tf.reshape( + unnormalized_vector, + [self._codebook_size, self._feature_dimensionality]) + per_centroid_norms = tf.norm(unnormalized_vector, axis=1) + + visual_words = tf.reshape( + tf.where( + tf.greater( + per_centroid_norms, + tf.cast(tf.sqrt(_NORM_SQUARED_TOLERANCE), dtype=tf.float32))), + [-1]) + + per_centroid_normalized_vector = tf.math.l2_normalize( + unnormalized_vector, axis=1, epsilon=_NORM_SQUARED_TOLERANCE) + + return per_centroid_normalized_vector, visual_words + + def _ComputeAsmk(self, features, codebook, num_assignments=1): + """Compute ASMK representation. + + Args: + features: [N, D] float tensor. + codebook: [K, D] float tensor. + num_assignments: Number of visual words to assign a feature to. + + Returns: + normalized_residuals: 1-dimensional float tensor with concatenated + residuals which are non-zero. Note that the dimensionality is + input-dependent. + visual_words: 1-dimensional int tensor of sorted visual word ids. + Dimensionality is shape(normalized_residuals)[0] / D. + """ + unnormalized_vlad = self._ComputeVlad( + features, + codebook, + use_l2_normalization=False, + num_assignments=num_assignments) + + per_centroid_normalized_vlad, visual_words = self._PerCentroidNormalization( + unnormalized_vlad) + + normalized_residuals = tf.reshape( + tf.gather(per_centroid_normalized_vlad, visual_words), + [tf.shape(visual_words)[0] * self._feature_dimensionality]) + + return normalized_residuals, visual_words + + def _ComputeRasmk(self, + features, + num_features_per_region, + codebook, + num_assignments=1): + """Compute R-ASMK representation. + + Args: + features: [N, D] float tensor. + num_features_per_region: [R] int tensor. Contains number of features per + region, such that sum(num_features_per_region) = N. It indicates which + features correspond to each region. + codebook: [K, D] float tensor. + num_assignments: Number of visual words to assign a feature to. + + Returns: + normalized_residuals: 1-dimensional float tensor with concatenated + residuals which are non-zero. Note that the dimensionality is + input-dependent. + visual_words: 1-dimensional int tensor of sorted visual word ids. + Dimensionality is shape(normalized_residuals)[0] / D. + """ + unnormalized_rvlad = self._ComputeRvlad( + features, + num_features_per_region, + codebook, + use_l2_normalization=False, + num_assignments=num_assignments) + + (per_centroid_normalized_rvlad, + visual_words) = self._PerCentroidNormalization(unnormalized_rvlad) + + normalized_residuals = tf.reshape( + tf.gather(per_centroid_normalized_rvlad, visual_words), + [tf.shape(visual_words)[0] * self._feature_dimensionality]) + + return normalized_residuals, visual_words diff --git a/models/research/delf/delf/python/feature_aggregation_extractor_test.py b/models/research/delf/delf/python/feature_aggregation_extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..dfba92a2b1b4847460fefbba5ef41fa8cce1ba42 --- /dev/null +++ b/models/research/delf/delf/python/feature_aggregation_extractor_test.py @@ -0,0 +1,494 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DELF feature aggregation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import numpy as np +import tensorflow as tf + +from delf import aggregation_config_pb2 +from delf import feature_aggregation_extractor + +FLAGS = flags.FLAGS + + +class FeatureAggregationTest(tf.test.TestCase): + + def _CreateCodebook(self, checkpoint_path): + """Creates codebook used in tests. + + Args: + checkpoint_path: Directory where codebook is saved to. + """ + codebook = tf.Variable( + [[0.5, 0.5], [0.0, 0.0], [1.0, 0.0], [-0.5, -0.5], [0.0, 1.0]], + name='clusters', + dtype=tf.float32) + ckpt = tf.train.Checkpoint(codebook=codebook) + ckpt.write(checkpoint_path) + + def setUp(self): + self._codebook_path = os.path.join(FLAGS.test_tmpdir, 'test_codebook') + self._CreateCodebook(self._codebook_path) + + def testComputeNormalizedVladWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = True + config.codebook_path = self._codebook_path + config.num_assignments = 1 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + vlad, extra_output = extractor.Extract(features) + + # Define expected results. + exp_vlad = [ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456 + ] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllClose(vlad, exp_vlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeNormalizedVladWithBatchingWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = True + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.feature_batch_size = 2 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + vlad, extra_output = extractor.Extract(features) + + # Define expected results. + exp_vlad = [ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456 + ] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllClose(vlad, exp_vlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeUnnormalizedVladWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = False + config.codebook_path = self._codebook_path + config.num_assignments = 1 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + vlad, extra_output = extractor.Extract(features) + + # Define expected results. + exp_vlad = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 1.0, 1.0] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllEqual(vlad, exp_vlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeUnnormalizedVladMultipleAssignmentWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = False + config.codebook_path = self._codebook_path + config.num_assignments = 3 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + vlad, extra_output = extractor.Extract(features) + + # Define expected results. + exp_vlad = [1.0, 1.0, 0.0, 0.0, 0.0, 2.0, -0.5, 0.5, 0.0, 0.0] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllEqual(vlad, exp_vlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeVladEmptyFeaturesWorks(self): + # Construct inputs. + # Empty feature array. + features = np.array([[]]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.codebook_path = self._codebook_path + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + vlad, extra_output = extractor.Extract(features) + + # Define expected results. + exp_vlad = np.zeros([10], dtype=float) + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllEqual(vlad, exp_vlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeUnnormalizedRvladWorks(self): + # Construct inputs. + # 4 2-D features: 3 in first region, 1 in second region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + num_features_per_region = np.array([3, 1]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = False + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rvlad, extra_output = extractor.Extract(features, num_features_per_region) + + # Define expected results. + exp_rvlad = [ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.158114, 0.158114, 0.316228, 0.816228 + ] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllClose(rvlad, exp_rvlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeNormalizedRvladWorks(self): + # Construct inputs. + # 4 2-D features: 3 in first region, 1 in second region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + num_features_per_region = np.array([3, 1]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = True + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rvlad, extra_output = extractor.Extract(features, num_features_per_region) + + # Define expected results. + exp_rvlad = [ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453 + ] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllClose(rvlad, exp_rvlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeRvladEmptyRegionsWorks(self): + # Construct inputs. + # Empty feature array. + features = np.array([[]]) + num_features_per_region = np.array([]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.codebook_path = self._codebook_path + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rvlad, extra_output = extractor.Extract(features, num_features_per_region) + + # Define expected results. + exp_rvlad = np.zeros([10], dtype=float) + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllEqual(rvlad, exp_rvlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeUnnormalizedRvladSomeEmptyRegionsWorks(self): + # Construct inputs. + # 4 2-D features: 0 in first region, 3 in second region, 0 in third region, + # 1 in fourth region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + num_features_per_region = np.array([0, 3, 0, 1]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = False + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rvlad, extra_output = extractor.Extract(features, num_features_per_region) + + # Define expected results. + exp_rvlad = [ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.079057, 0.079057, 0.158114, 0.408114 + ] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllClose(rvlad, exp_rvlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeNormalizedRvladSomeEmptyRegionsWorks(self): + # Construct inputs. + # 4 2-D features: 0 in first region, 3 in second region, 0 in third region, + # 1 in fourth region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + num_features_per_region = np.array([0, 3, 0, 1]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.use_l2_normalization = True + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rvlad, extra_output = extractor.Extract(features, num_features_per_region) + + # Define expected results. + exp_rvlad = [ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453 + ] + exp_extra_output = -1 + + # Compare actual and expected results. + self.assertAllClose(rvlad, exp_rvlad) + self.assertAllEqual(extra_output, exp_extra_output) + + def testComputeRvladMisconfiguredFeatures(self): + # Construct inputs. + # 4 2-D features: 3 in first region, 1 in second region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + # Misconfigured number of features; there are only 4 features, but + # sum(num_features_per_region) = 5. + num_features_per_region = np.array([3, 2]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + config.codebook_path = self._codebook_path + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + with self.assertRaisesRegex( + ValueError, + r'Incorrect arguments: sum\(num_features_per_region\) and ' + r'features.shape\[0\] are different'): + extractor.Extract(features, num_features_per_region) + + def testComputeAsmkWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK + config.codebook_path = self._codebook_path + config.num_assignments = 1 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + asmk, visual_words = extractor.Extract(features) + + # Define expected results. + exp_asmk = [-0.707107, 0.707107, 0.707107, 0.707107] + exp_visual_words = [3, 4] + + # Compare actual and expected results. + self.assertAllClose(asmk, exp_asmk) + self.assertAllEqual(visual_words, exp_visual_words) + + def testComputeAsmkStarWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR + config.codebook_path = self._codebook_path + config.num_assignments = 1 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + asmk_star, visual_words = extractor.Extract(features) + + # Define expected results. + exp_asmk_star = [64, 192] + exp_visual_words = [3, 4] + + # Compare actual and expected results. + self.assertAllEqual(asmk_star, exp_asmk_star) + self.assertAllEqual(visual_words, exp_visual_words) + + def testComputeAsmkMultipleAssignmentWorks(self): + # Construct inputs. + # 3 2-D features. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK + config.codebook_path = self._codebook_path + config.num_assignments = 3 + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + asmk, visual_words = extractor.Extract(features) + + # Define expected results. + exp_asmk = [0.707107, 0.707107, 0.0, 1.0, -0.707107, 0.707107] + exp_visual_words = [0, 2, 3] + + # Compare actual and expected results. + self.assertAllClose(asmk, exp_asmk) + self.assertAllEqual(visual_words, exp_visual_words) + + def testComputeRasmkWorks(self): + # Construct inputs. + # 4 2-D features: 3 in first region, 1 in second region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + num_features_per_region = np.array([3, 1]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rasmk, visual_words = extractor.Extract(features, num_features_per_region) + + # Define expected results. + exp_rasmk = [-0.707107, 0.707107, 0.361261, 0.932465] + exp_visual_words = [3, 4] + + # Compare actual and expected results. + self.assertAllClose(rasmk, exp_rasmk) + self.assertAllEqual(visual_words, exp_visual_words) + + def testComputeRasmkStarWorks(self): + # Construct inputs. + # 4 2-D features: 3 in first region, 1 in second region. + features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]], + dtype=float) + num_features_per_region = np.array([3, 1]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR + config.codebook_path = self._codebook_path + config.num_assignments = 1 + config.use_regional_aggregation = True + + # Run tested function. + extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + rasmk_star, visual_words = extractor.Extract(features, + num_features_per_region) + + # Define expected results. + exp_rasmk_star = [64, 192] + exp_visual_words = [3, 4] + + # Compare actual and expected results. + self.assertAllEqual(rasmk_star, exp_rasmk_star) + self.assertAllEqual(visual_words, exp_visual_words) + + def testComputeUnknownAggregation(self): + # Construct inputs. + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = 0 + config.codebook_path = self._codebook_path + config.use_regional_aggregation = True + + # Run tested function. + with self.assertRaisesRegex(ValueError, 'Invalid aggregation type'): + feature_aggregation_extractor.ExtractAggregatedRepresentation( + config) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/feature_aggregation_similarity.py b/models/research/delf/delf/python/feature_aggregation_similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..991c95c767c6bed5d0db38226a0cf361eee18c2f --- /dev/null +++ b/models/research/delf/delf/python/feature_aggregation_similarity.py @@ -0,0 +1,265 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Local feature aggregation similarity computation. + +For more details, please refer to the paper: +"Detect-to-Retrieve: Efficient Regional Aggregation for Image Search", +Proc. CVPR'19 (https://arxiv.org/abs/1812.01584). +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from delf import aggregation_config_pb2 + +# Aliases for aggregation types. +_VLAD = aggregation_config_pb2.AggregationConfig.VLAD +_ASMK = aggregation_config_pb2.AggregationConfig.ASMK +_ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR + + +class SimilarityAggregatedRepresentation(object): + """Class for computing similarity of aggregated local feature representations. + + Args: + aggregation_config: AggregationConfig object defining type of aggregation to + use. + + Raises: + ValueError: If aggregation type is invalid. + """ + + def __init__(self, aggregation_config): + self._feature_dimensionality = aggregation_config.feature_dimensionality + self._aggregation_type = aggregation_config.aggregation_type + + # Only relevant if using ASMK/ASMK*. Otherwise, ignored. + self._use_l2_normalization = aggregation_config.use_l2_normalization + self._alpha = aggregation_config.alpha + self._tau = aggregation_config.tau + + # Only relevant if using ASMK*. Otherwise, ignored. + self._number_bits = np.array([bin(n).count('1') for n in range(256)]) + + def ComputeSimilarity(self, + aggregated_descriptors_1, + aggregated_descriptors_2, + feature_visual_words_1=None, + feature_visual_words_2=None): + """Computes similarity between aggregated descriptors. + + Args: + aggregated_descriptors_1: 1-D NumPy array. + aggregated_descriptors_2: 1-D NumPy array. + feature_visual_words_1: Used only for ASMK/ASMK* aggregation type. 1-D + sorted NumPy integer array denoting visual words corresponding to + `aggregated_descriptors_1`. + feature_visual_words_2: Used only for ASMK/ASMK* aggregation type. 1-D + sorted NumPy integer array denoting visual words corresponding to + `aggregated_descriptors_2`. + + Returns: + similarity: Float. The larger, the more similar. + + Raises: + ValueError: If aggregation type is invalid. + """ + if self._aggregation_type == _VLAD: + similarity = np.dot(aggregated_descriptors_1, aggregated_descriptors_2) + elif self._aggregation_type == _ASMK: + similarity = self._AsmkSimilarity( + aggregated_descriptors_1, + aggregated_descriptors_2, + feature_visual_words_1, + feature_visual_words_2, + binarized=False) + elif self._aggregation_type == _ASMK_STAR: + similarity = self._AsmkSimilarity( + aggregated_descriptors_1, + aggregated_descriptors_2, + feature_visual_words_1, + feature_visual_words_2, + binarized=True) + else: + raise ValueError('Invalid aggregation type: %d' % self._aggregation_type) + + return similarity + + def _CheckAsmkDimensionality(self, aggregated_descriptors, num_visual_words, + descriptor_name): + """Checks that ASMK dimensionality is as expected. + + Args: + aggregated_descriptors: 1-D NumPy array. + num_visual_words: Integer. + descriptor_name: String. + + Raises: + ValueError: If descriptor dimensionality is incorrect. + """ + if len(aggregated_descriptors + ) / num_visual_words != self._feature_dimensionality: + raise ValueError( + 'Feature dimensionality for aggregated descriptor %s is invalid: %d;' + ' expected %d.' % (descriptor_name, len(aggregated_descriptors) / + num_visual_words, self._feature_dimensionality)) + + def _SigmaFn(self, x): + """Selectivity ASMK/ASMK* similarity function. + + Args: + x: Scalar or 1-D NumPy array. + + Returns: + result: Same type as input, with output of selectivity function. + """ + if np.isscalar(x): + if x > self._tau: + result = np.sign(x) * np.power(np.absolute(x), self._alpha) + else: + result = 0.0 + else: + result = np.zeros_like(x) + above_tau = np.nonzero(x > self._tau) + result[above_tau] = np.sign(x[above_tau]) * np.power( + np.absolute(x[above_tau]), self._alpha) + + return result + + def _BinaryNormalizedInnerProduct(self, descriptors_1, descriptors_2): + """Computes normalized binary inner product. + + Args: + descriptors_1: 1-D NumPy integer array. + descriptors_2: 1-D NumPy integer array. + + Returns: + inner_product: Float. + + Raises: + ValueError: If the dimensionality of descriptors is different. + """ + num_descriptors = len(descriptors_1) + if num_descriptors != len(descriptors_2): + raise ValueError( + 'Descriptors have incompatible dimensionality: %d vs %d' % + (len(descriptors_1), len(descriptors_2))) + + h = 0 + for i in range(num_descriptors): + h += self._number_bits[np.bitwise_xor(descriptors_1[i], descriptors_2[i])] + + # If local feature dimensionality is lower than 8, then use that to compute + # proper binarized inner product. + bits_per_descriptor = min(self._feature_dimensionality, 8) + + total_num_bits = bits_per_descriptor * num_descriptors + + return 1.0 - 2.0 * h / total_num_bits + + def _AsmkSimilarity(self, + aggregated_descriptors_1, + aggregated_descriptors_2, + visual_words_1, + visual_words_2, + binarized=False): + """Compute ASMK-based similarity. + + If `aggregated_descriptors_1` or `aggregated_descriptors_2` is empty, we + return a similarity of -1.0. + + If binarized is True, `aggregated_descriptors_1` and + `aggregated_descriptors_2` must be of type uint8. + + Args: + aggregated_descriptors_1: 1-D NumPy array. + aggregated_descriptors_2: 1-D NumPy array. + visual_words_1: 1-D sorted NumPy integer array denoting visual words + corresponding to `aggregated_descriptors_1`. + visual_words_2: 1-D sorted NumPy integer array denoting visual words + corresponding to `aggregated_descriptors_2`. + binarized: If True, compute ASMK* similarity. + + Returns: + similarity: Float. The larger, the more similar. + + Raises: + ValueError: If input descriptor dimensionality is inconsistent, or if + descriptor type is unsupported. + """ + num_visual_words_1 = len(visual_words_1) + num_visual_words_2 = len(visual_words_2) + + if not num_visual_words_1 or not num_visual_words_2: + return -1.0 + + # Parse dimensionality used per visual word. They must be the same for both + # aggregated descriptors. If using ASMK, they also must be equal to + # self._feature_dimensionality. + if binarized: + if aggregated_descriptors_1.dtype != 'uint8': + raise ValueError('Incorrect input descriptor type: %s' % + aggregated_descriptors_1.dtype) + if aggregated_descriptors_2.dtype != 'uint8': + raise ValueError('Incorrect input descriptor type: %s' % + aggregated_descriptors_2.dtype) + + per_visual_word_dimensionality = int( + len(aggregated_descriptors_1) / num_visual_words_1) + if len(aggregated_descriptors_2 + ) / num_visual_words_2 != per_visual_word_dimensionality: + raise ValueError('ASMK* dimensionality is inconsistent.') + else: + per_visual_word_dimensionality = self._feature_dimensionality + self._CheckAsmkDimensionality(aggregated_descriptors_1, + num_visual_words_1, '1') + self._CheckAsmkDimensionality(aggregated_descriptors_2, + num_visual_words_2, '2') + + aggregated_descriptors_1_reshape = np.reshape( + aggregated_descriptors_1, + [num_visual_words_1, per_visual_word_dimensionality]) + aggregated_descriptors_2_reshape = np.reshape( + aggregated_descriptors_2, + [num_visual_words_2, per_visual_word_dimensionality]) + + # Loop over visual words, compute similarity. + unnormalized_similarity = 0.0 + ind_1 = 0 + ind_2 = 0 + while ind_1 < num_visual_words_1 and ind_2 < num_visual_words_2: + if visual_words_1[ind_1] == visual_words_2[ind_2]: + if binarized: + inner_product = self._BinaryNormalizedInnerProduct( + aggregated_descriptors_1_reshape[ind_1], + aggregated_descriptors_2_reshape[ind_2]) + else: + inner_product = np.dot(aggregated_descriptors_1_reshape[ind_1], + aggregated_descriptors_2_reshape[ind_2]) + unnormalized_similarity += self._SigmaFn(inner_product) + ind_1 += 1 + ind_2 += 1 + elif visual_words_1[ind_1] > visual_words_2[ind_2]: + ind_2 += 1 + else: + ind_1 += 1 + + final_similarity = unnormalized_similarity + if self._use_l2_normalization: + final_similarity /= np.sqrt(num_visual_words_1 * num_visual_words_2) + + return final_similarity diff --git a/models/research/delf/delf/python/feature_aggregation_similarity_test.py b/models/research/delf/delf/python/feature_aggregation_similarity_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e2f01b1d2a7b36b87773714f0cc027a98a36324f --- /dev/null +++ b/models/research/delf/delf/python/feature_aggregation_similarity_test.py @@ -0,0 +1,137 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DELF feature aggregation similarity.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from delf import aggregation_config_pb2 +from delf import feature_aggregation_similarity + + +class FeatureAggregationSimilarityTest(tf.test.TestCase): + + def testComputeVladSimilarityWorks(self): + # Construct inputs. + vlad_1 = np.array([0, 1, 2, 3, 4]) + vlad_2 = np.array([5, 6, 7, 8, 9]) + config = aggregation_config_pb2.AggregationConfig() + config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD + + # Run tested function. + similarity_computer = ( + feature_aggregation_similarity.SimilarityAggregatedRepresentation( + config)) + similarity = similarity_computer.ComputeSimilarity(vlad_1, vlad_2) + + # Define expected results. + exp_similarity = 80 + + # Compare actual and expected results. + self.assertAllEqual(similarity, exp_similarity) + + def testComputeAsmkSimilarityWorks(self): + # Construct inputs. + aggregated_descriptors_1 = np.array([ + 0.0, 0.0, -0.707107, -0.707107, 0.5, 0.866025, 0.816497, 0.577350, 1.0, + 0.0 + ]) + visual_words_1 = np.array([0, 1, 2, 3, 4]) + aggregated_descriptors_2 = np.array( + [0.0, 1.0, 1.0, 0.0, 0.707107, 0.707107]) + visual_words_2 = np.array([1, 2, 4]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK + config.use_l2_normalization = True + + # Run tested function. + similarity_computer = ( + feature_aggregation_similarity.SimilarityAggregatedRepresentation( + config)) + similarity = similarity_computer.ComputeSimilarity( + aggregated_descriptors_1, aggregated_descriptors_2, visual_words_1, + visual_words_2) + + # Define expected results. + exp_similarity = 0.123562 + + # Compare actual and expected results. + self.assertAllClose(similarity, exp_similarity) + + def testComputeAsmkSimilarityNoNormalizationWorks(self): + # Construct inputs. + aggregated_descriptors_1 = np.array([ + 0.0, 0.0, -0.707107, -0.707107, 0.5, 0.866025, 0.816497, 0.577350, 1.0, + 0.0 + ]) + visual_words_1 = np.array([0, 1, 2, 3, 4]) + aggregated_descriptors_2 = np.array( + [0.0, 1.0, 1.0, 0.0, 0.707107, 0.707107]) + visual_words_2 = np.array([1, 2, 4]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK + config.use_l2_normalization = False + + # Run tested function. + similarity_computer = ( + feature_aggregation_similarity.SimilarityAggregatedRepresentation( + config)) + similarity = similarity_computer.ComputeSimilarity( + aggregated_descriptors_1, aggregated_descriptors_2, visual_words_1, + visual_words_2) + + # Define expected results. + exp_similarity = 0.478554 + + # Compare actual and expected results. + self.assertAllClose(similarity, exp_similarity) + + def testComputeAsmkStarSimilarityWorks(self): + # Construct inputs. + aggregated_descriptors_1 = np.array([0, 0, 3, 3, 3], dtype='uint8') + visual_words_1 = np.array([0, 1, 2, 3, 4]) + aggregated_descriptors_2 = np.array([1, 2, 3], dtype='uint8') + visual_words_2 = np.array([1, 2, 4]) + config = aggregation_config_pb2.AggregationConfig() + config.codebook_size = 5 + config.feature_dimensionality = 2 + config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR + config.use_l2_normalization = True + + # Run tested function. + similarity_computer = ( + feature_aggregation_similarity.SimilarityAggregatedRepresentation( + config)) + similarity = similarity_computer.ComputeSimilarity( + aggregated_descriptors_1, aggregated_descriptors_2, visual_words_1, + visual_words_2) + + # Define expected results. + exp_similarity = 0.258199 + + # Compare actual and expected results. + self.assertAllClose(similarity, exp_similarity) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/feature_extractor.py b/models/research/delf/delf/python/feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9545337f18724520e260af4e36ffa6ee35bce4c6 --- /dev/null +++ b/models/research/delf/delf/python/feature_extractor.py @@ -0,0 +1,175 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DELF feature extractor.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +def NormalizePixelValues(image, + pixel_value_offset=128.0, + pixel_value_scale=128.0): + """Normalize image pixel values. + + Args: + image: a uint8 tensor. + pixel_value_offset: a Python float, offset for normalizing pixel values. + pixel_value_scale: a Python float, scale for normalizing pixel values. + + Returns: + image: a float32 tensor of the same shape as the input image. + """ + image = tf.cast(image, dtype=tf.float32) + image = tf.truediv(tf.subtract(image, pixel_value_offset), pixel_value_scale) + return image + + +def CalculateReceptiveBoxes(height, width, rf, stride, padding): + """Calculate receptive boxes for each feature point. + + Args: + height: The height of feature map. + width: The width of feature map. + rf: The receptive field size. + stride: The effective stride between two adjacent feature points. + padding: The effective padding size. + + Returns: + rf_boxes: [N, 4] receptive boxes tensor. Here N equals to height x width. + Each box is represented by [ymin, xmin, ymax, xmax]. + """ + x, y = tf.meshgrid(tf.range(width), tf.range(height)) + coordinates = tf.reshape(tf.stack([y, x], axis=2), [-1, 2]) + # [y,x,y,x] + point_boxes = tf.cast( + tf.concat([coordinates, coordinates], 1), dtype=tf.float32) + bias = [-padding, -padding, -padding + rf - 1, -padding + rf - 1] + rf_boxes = stride * point_boxes + bias + return rf_boxes + + +def CalculateKeypointCenters(boxes): + """Helper function to compute feature centers, from RF boxes. + + Args: + boxes: [N, 4] float tensor. + + Returns: + centers: [N, 2] float tensor. + """ + return tf.divide( + tf.add( + tf.gather(boxes, [0, 1], axis=1), tf.gather(boxes, [2, 3], axis=1)), + 2.0) + + +def ApplyPcaAndWhitening(data, + pca_matrix, + pca_mean, + output_dim, + use_whitening=False, + pca_variances=None): + """Applies PCA/whitening to data. + + Args: + data: [N, dim] float tensor containing data which undergoes PCA/whitening. + pca_matrix: [dim, dim] float tensor PCA matrix, row-major. + pca_mean: [dim] float tensor, mean to subtract before projection. + output_dim: Number of dimensions to use in output data, of type int. + use_whitening: Whether whitening is to be used. + pca_variances: [dim] float tensor containing PCA variances. Only used if + use_whitening is True. + + Returns: + output: [N, output_dim] float tensor with output of PCA/whitening operation. + """ + output = tf.matmul( + tf.subtract(data, pca_mean), + tf.slice(pca_matrix, [0, 0], [output_dim, -1]), + transpose_b=True, + name='pca_matmul') + + # Apply whitening if desired. + if use_whitening: + output = tf.divide( + output, + tf.sqrt(tf.slice(pca_variances, [0], [output_dim])), + name='whitening') + + return output + + +def PostProcessDescriptors(descriptors, use_pca, pca_parameters=None): + """Post-process descriptors. + + Args: + descriptors: [N, input_dim] float tensor. + use_pca: Whether to use PCA. + pca_parameters: Only used if `use_pca` is True. Dict containing PCA + parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening', + 'variances'. + + Returns: + final_descriptors: [N, output_dim] float tensor with descriptors after + normalization and (possibly) PCA/whitening. + """ + # L2-normalize, and if desired apply PCA (followed by L2-normalization). + final_descriptors = tf.nn.l2_normalize( + descriptors, axis=1, name='l2_normalization') + + if use_pca: + # Apply PCA, and whitening if desired. + final_descriptors = ApplyPcaAndWhitening(final_descriptors, + pca_parameters['matrix'], + pca_parameters['mean'], + pca_parameters['dim'], + pca_parameters['use_whitening'], + pca_parameters['variances']) + + # Re-normalize. + final_descriptors = tf.nn.l2_normalize( + final_descriptors, axis=1, name='pca_l2_normalization') + + return final_descriptors + + +def DelfFeaturePostProcessing(boxes, descriptors, use_pca, pca_parameters=None): + """Extract DELF features from input image. + + Args: + boxes: [N, 4] float tensor which denotes the selected receptive box. N is + the number of final feature points which pass through keypoint selection + and NMS steps. + descriptors: [N, input_dim] float tensor. + use_pca: Whether to use PCA. + pca_parameters: Only used if `use_pca` is True. Dict containing PCA + parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening', + 'variances'. + + Returns: + locations: [N, 2] float tensor which denotes the selected keypoint + locations. + final_descriptors: [N, output_dim] float tensor with DELF descriptors after + normalization and (possibly) PCA/whitening. + """ + + # Get center of descriptor boxes, corresponding to feature locations. + locations = CalculateKeypointCenters(boxes) + final_descriptors = PostProcessDescriptors(descriptors, use_pca, + pca_parameters) + + return locations, final_descriptors diff --git a/models/research/delf/delf/python/feature_extractor_test.py b/models/research/delf/delf/python/feature_extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0caa51c4321ae30866d2c1247626843a907c5a2d --- /dev/null +++ b/models/research/delf/delf/python/feature_extractor_test.py @@ -0,0 +1,75 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DELF feature extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from delf import feature_extractor + + +class FeatureExtractorTest(tf.test.TestCase): + + def testNormalizePixelValues(self): + image = tf.constant( + [[[3, 255, 0], [34, 12, 5]], [[45, 5, 65], [56, 77, 89]]], + dtype=tf.uint8) + normalized_image = feature_extractor.NormalizePixelValues( + image, pixel_value_offset=5.0, pixel_value_scale=2.0) + exp_normalized_image = [[[-1.0, 125.0, -2.5], [14.5, 3.5, 0.0]], + [[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]] + + self.assertAllEqual(normalized_image, exp_normalized_image) + + def testCalculateReceptiveBoxes(self): + boxes = feature_extractor.CalculateReceptiveBoxes( + height=1, width=2, rf=291, stride=32, padding=145) + exp_boxes = [[-145., -145., 145., 145.], [-145., -113., 145., 177.]] + + self.assertAllEqual(exp_boxes, boxes) + + def testCalculateKeypointCenters(self): + boxes = [[-10.0, 0.0, 11.0, 21.0], [-2.5, 5.0, 18.5, 26.0], + [45.0, -2.5, 66.0, 18.5]] + centers = feature_extractor.CalculateKeypointCenters(boxes) + + exp_centers = [[0.5, 10.5], [8.0, 15.5], [55.5, 8.0]] + + self.assertAllEqual(exp_centers, centers) + + def testPcaWhitening(self): + data = tf.constant([[1.0, 2.0, -2.0], [-5.0, 0.0, 3.0], [-1.0, 2.0, 0.0], + [0.0, 4.0, -1.0]]) + pca_matrix = tf.constant([[2.0, 0.0, -1.0], [0.0, 1.0, 1.0], + [-1.0, 1.0, 3.0]]) + pca_mean = tf.constant([1.0, 2.0, 3.0]) + output_dim = 2 + use_whitening = True + pca_variances = tf.constant([4.0, 1.0]) + + output = feature_extractor.ApplyPcaAndWhitening(data, pca_matrix, pca_mean, + output_dim, use_whitening, + pca_variances) + + exp_output = [[2.5, -5.0], [-6.0, -2.0], [-0.5, -3.0], [1.0, -2.0]] + + self.assertAllEqual(exp_output, output) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/feature_io.py b/models/research/delf/delf/python/feature_io.py new file mode 100644 index 0000000000000000000000000000000000000000..9b68586b8543b08bf16d345a65345be7cb6d8a67 --- /dev/null +++ b/models/research/delf/delf/python/feature_io.py @@ -0,0 +1,196 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python interface for DelfFeatures proto. + +Support read and write of DelfFeatures from/to numpy arrays and file. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from delf import feature_pb2 +from delf import datum_io + + +def ArraysToDelfFeatures(locations, + scales, + descriptors, + attention, + orientations=None): + """Converts DELF features to DelfFeatures proto. + + Args: + locations: [N, 2] float array which denotes the selected keypoint locations. + N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. If None, all orientations + are set to zero. + + Returns: + delf_features: DelfFeatures object. + """ + num_features = len(attention) + assert num_features == locations.shape[0] + assert num_features == len(scales) + assert num_features == descriptors.shape[0] + + if orientations is None: + orientations = np.zeros([num_features], dtype=np.float32) + else: + assert num_features == len(orientations) + + delf_features = feature_pb2.DelfFeatures() + for i in range(num_features): + delf_feature = delf_features.feature.add() + delf_feature.y = locations[i, 0] + delf_feature.x = locations[i, 1] + delf_feature.scale = scales[i] + delf_feature.orientation = orientations[i] + delf_feature.strength = attention[i] + delf_feature.descriptor.CopyFrom(datum_io.ArrayToDatum(descriptors[i,])) + + return delf_features + + +def DelfFeaturesToArrays(delf_features): + """Converts data saved in DelfFeatures to numpy arrays. + + If there are no features, the function returns four empty arrays. + + Args: + delf_features: DelfFeatures object. + + Returns: + locations: [N, 2] float array which denotes the selected keypoint + locations. N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. + """ + num_features = len(delf_features.feature) + if num_features == 0: + return np.array([]), np.array([]), np.array([]), np.array([]), np.array([]) + + # Figure out descriptor dimensionality by parsing first one. + descriptor_dim = len( + datum_io.DatumToArray(delf_features.feature[0].descriptor)) + locations = np.zeros([num_features, 2]) + scales = np.zeros([num_features]) + descriptors = np.zeros([num_features, descriptor_dim]) + attention = np.zeros([num_features]) + orientations = np.zeros([num_features]) + + for i in range(num_features): + delf_feature = delf_features.feature[i] + locations[i, 0] = delf_feature.y + locations[i, 1] = delf_feature.x + scales[i] = delf_feature.scale + descriptors[i,] = datum_io.DatumToArray(delf_feature.descriptor) + attention[i] = delf_feature.strength + orientations[i] = delf_feature.orientation + + return locations, scales, descriptors, attention, orientations + + +def SerializeToString(locations, + scales, + descriptors, + attention, + orientations=None): + """Converts numpy arrays to serialized DelfFeatures. + + Args: + locations: [N, 2] float array which denotes the selected keypoint locations. + N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. If None, all orientations + are set to zero. + + Returns: + Serialized DelfFeatures string. + """ + delf_features = ArraysToDelfFeatures(locations, scales, descriptors, + attention, orientations) + return delf_features.SerializeToString() + + +def ParseFromString(string): + """Converts serialized DelfFeatures string to numpy arrays. + + Args: + string: Serialized DelfFeatures string. + + Returns: + locations: [N, 2] float array which denotes the selected keypoint + locations. N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. + """ + delf_features = feature_pb2.DelfFeatures() + delf_features.ParseFromString(string) + return DelfFeaturesToArrays(delf_features) + + +def ReadFromFile(file_path): + """Helper function to load data from a DelfFeatures format in a file. + + Args: + file_path: Path to file containing data. + + Returns: + locations: [N, 2] float array which denotes the selected keypoint + locations. N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. + """ + with tf.io.gfile.GFile(file_path, 'rb') as f: + return ParseFromString(f.read()) + + +def WriteToFile(file_path, + locations, + scales, + descriptors, + attention, + orientations=None): + """Helper function to write data to a file in DelfFeatures format. + + Args: + file_path: Path to file that will be written. + locations: [N, 2] float array which denotes the selected keypoint locations. + N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. If None, all orientations + are set to zero. + """ + serialized_data = SerializeToString(locations, scales, descriptors, attention, + orientations) + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write(serialized_data) diff --git a/models/research/delf/delf/python/feature_io_test.py b/models/research/delf/delf/python/feature_io_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8b68d3b241cf561de9362c84ec05e148d22ee0f2 --- /dev/null +++ b/models/research/delf/delf/python/feature_io_test.py @@ -0,0 +1,112 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for feature_io, the python interface of DelfFeatures.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import numpy as np +import tensorflow as tf + +from delf import feature_io + +FLAGS = flags.FLAGS + + +def create_data(): + """Creates data to be used in tests. + + Returns: + locations: [N, 2] float array which denotes the selected keypoint + locations. N is the number of features. + scales: [N] float array with feature scales. + descriptors: [N, depth] float array with DELF descriptors. + attention: [N] float array with attention scores. + orientations: [N] float array with orientations. + """ + locations = np.arange(8, dtype=np.float32).reshape(4, 2) + scales = np.arange(4, dtype=np.float32) + attention = np.arange(4, dtype=np.float32) + orientations = np.arange(4, dtype=np.float32) + descriptors = np.zeros([4, 1024]) + descriptors[0,] = np.arange(1024) + descriptors[1,] = np.zeros([1024]) + descriptors[2,] = np.ones([1024]) + descriptors[3,] = -np.ones([1024]) + + return locations, scales, descriptors, attention, orientations + + +class DelfFeaturesIoTest(tf.test.TestCase): + + def testConversionAndBack(self): + locations, scales, descriptors, attention, orientations = create_data() + + serialized = feature_io.SerializeToString(locations, scales, descriptors, + attention, orientations) + parsed_data = feature_io.ParseFromString(serialized) + + self.assertAllEqual(locations, parsed_data[0]) + self.assertAllEqual(scales, parsed_data[1]) + self.assertAllEqual(descriptors, parsed_data[2]) + self.assertAllEqual(attention, parsed_data[3]) + self.assertAllEqual(orientations, parsed_data[4]) + + def testConversionAndBackNoOrientations(self): + locations, scales, descriptors, attention, _ = create_data() + + serialized = feature_io.SerializeToString(locations, scales, descriptors, + attention) + parsed_data = feature_io.ParseFromString(serialized) + + self.assertAllEqual(locations, parsed_data[0]) + self.assertAllEqual(scales, parsed_data[1]) + self.assertAllEqual(descriptors, parsed_data[2]) + self.assertAllEqual(attention, parsed_data[3]) + self.assertAllEqual(np.zeros([4]), parsed_data[4]) + + def testWriteAndReadToFile(self): + locations, scales, descriptors, attention, orientations = create_data() + + filename = os.path.join(FLAGS.test_tmpdir, 'test.delf') + feature_io.WriteToFile(filename, locations, scales, descriptors, attention, + orientations) + data_read = feature_io.ReadFromFile(filename) + + self.assertAllEqual(locations, data_read[0]) + self.assertAllEqual(scales, data_read[1]) + self.assertAllEqual(descriptors, data_read[2]) + self.assertAllEqual(attention, data_read[3]) + self.assertAllEqual(orientations, data_read[4]) + + def testWriteAndReadToFileEmptyFile(self): + filename = os.path.join(FLAGS.test_tmpdir, 'test.delf') + feature_io.WriteToFile(filename, np.array([]), np.array([]), np.array([]), + np.array([]), np.array([])) + data_read = feature_io.ReadFromFile(filename) + + self.assertAllEqual(np.array([]), data_read[0]) + self.assertAllEqual(np.array([]), data_read[1]) + self.assertAllEqual(np.array([]), data_read[2]) + self.assertAllEqual(np.array([]), data_read[3]) + self.assertAllEqual(np.array([]), data_read[4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/google_landmarks_dataset/README.md b/models/research/delf/delf/python/google_landmarks_dataset/README.md new file mode 100644 index 0000000000000000000000000000000000000000..485c1a946b5b21ddb369cc1bc8645534abbfad1e --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/README.md @@ -0,0 +1,123 @@ +## GLDv2 code/models + +[![Paper](http://img.shields.io/badge/paper-arXiv.2004.01804-B3181B.svg)](https://arxiv.org/abs/2004.01804) + +These instructions can be used to reproduce results from the +[GLDv2 paper](https://arxiv.org/abs/2004.01804). We present here results on the +Revisited Oxford/Paris datasets since they are smaller and quicker to +reproduce -- but note that a very similar procedure can be used to obtain +results on the GLDv2 retrieval or recognition datasets. + +Note that this directory also contains code to compute GLDv2 metrics: see +`compute_retrieval_metrics.py`, `compute_recognition_metrics.py` and associated +file reading / metric computation modules. + +For more details on the dataset, please refer to its +[website](https://github.com/cvdfoundation/google-landmark). + +### Install DELF library + +To be able to use this code, please follow +[these instructions](../../../INSTALL_INSTRUCTIONS.md) to properly install the +DELF library. + +### Download Revisited Oxford/Paris datasets + +```bash +mkdir -p ~/revisitop/data && cd ~/revisitop/data + +# Oxford dataset. +wget http://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images.tgz +mkdir oxford5k_images +tar -xvzf oxbuild_images.tgz -C oxford5k_images/ + +# Paris dataset. Download and move all images to same directory. +wget http://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1.tgz +wget http://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2.tgz +mkdir paris6k_images_tmp +tar -xvzf paris_1.tgz -C paris6k_images_tmp/ +tar -xvzf paris_2.tgz -C paris6k_images_tmp/ +mkdir paris6k_images +mv paris6k_images_tmp/paris/*/*.jpg paris6k_images/ + +# Revisited annotations. +wget http://cmp.felk.cvut.cz/revisitop/data/datasets/roxford5k/gnd_roxford5k.mat +wget http://cmp.felk.cvut.cz/revisitop/data/datasets/rparis6k/gnd_rparis6k.mat +``` + +### Download model + +```bash +# From models/research/delf/delf/python/google_landmarks_dataset +mkdir parameters && cd parameters + +# RN101-ArcFace model trained on GLDv2-clean. +wget https://storage.googleapis.com/delf/rn101_af_gldv2clean_20200521.tar.gz +tar -xvzf rn101_af_gldv2clean_20200521.tar.gz +``` + +### Feature extraction + +We present here commands for extraction on `roxford5k`. To extract on `rparis6k` +instead, please edit the arguments accordingly (especially the +`dataset_file_path` argument). + +#### Query feature extraction + +In the Revisited Oxford/Paris experimental protocol, query images must be the +cropped before feature extraction (this is done in the `extract_features` +script, when setting `image_set=query`). Note that this is specific to these +datasets, and not required for the GLDv2 retrieval/recognition datasets. + +Run query feature extraction as follows: + +```bash +# From models/research/delf/delf/python/google_landmarks_dataset +python3 ../delg/extract_features.py \ + --delf_config_path rn101_af_gldv2clean_config.pbtxt \ + --dataset_file_path ~/revisitop/data/gnd_roxford5k.mat \ + --images_dir ~/revisitop/data/oxford5k_images \ + --image_set query \ + --output_features_dir ~/revisitop/data/oxford5k_features/query +``` + +#### Index feature extraction + +Run index feature extraction as follows: + +```bash +# From models/research/delf/delf/python/google_landmarks_dataset +python3 ../delg/extract_features.py \ + --delf_config_path rn101_af_gldv2clean_config.pbtxt \ + --dataset_file_path ~/revisitop/data/gnd_roxford5k.mat \ + --images_dir ~/revisitop/data/oxford5k_images \ + --image_set index \ + --output_features_dir ~/revisitop/data/oxford5k_features/index +``` + +### Perform retrieval + +To run retrieval on `roxford5k`, the following command can be used: + +```bash +# From models/research/delf/delf/python/google_landmarks_dataset +python3 ../delg/perform_retrieval.py \ + --dataset_file_path ~/revisitop/data/gnd_roxford5k.mat \ + --query_features_dir ~/revisitop/data/oxford5k_features/query \ + --index_features_dir ~/revisitop/data/oxford5k_features/index \ + --output_dir ~/revisitop/results/oxford5k +``` + +A file with named `metrics.txt` will be written to the path given in +`output_dir`. The contents should look approximately like: + +``` +hard + mAP=55.54 + mP@k[ 1 5 10] [88.57 80.86 70.14] + mR@k[ 1 5 10] [19.46 33.65 42.44] +medium + mAP=76.23 + mP@k[ 1 5 10] [95.71 92.86 90.43] + mR@k[ 1 5 10] [10.17 25.96 35.29] +``` diff --git a/models/research/delf/delf/python/google_landmarks_dataset/compute_recognition_metrics.py b/models/research/delf/delf/python/google_landmarks_dataset/compute_recognition_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..f80cf47de7487d4cd584c969d994f7d3f1135cae --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/compute_recognition_metrics.py @@ -0,0 +1,99 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Computes metrics for Google Landmarks Recognition dataset predictions. + +Metrics are written to stdout. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys + +from tensorflow.python.platform import app +from delf.python.google_landmarks_dataset import dataset_file_io +from delf.python.google_landmarks_dataset import metrics + +cmd_args = None + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read solution. + print('Reading solution...') + public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution( + cmd_args.solution_path, dataset_file_io.RECOGNITION_TASK_ID) + print('done!') + + # Read predictions. + print('Reading predictions...') + public_predictions, private_predictions = dataset_file_io.ReadPredictions( + cmd_args.predictions_path, set(public_solution.keys()), + set(private_solution.keys()), set(ignored_ids), + dataset_file_io.RECOGNITION_TASK_ID) + print('done!') + + # Global Average Precision. + print('**********************************************') + print('(Public) Global Average Precision: %f' % + metrics.GlobalAveragePrecision(public_predictions, public_solution)) + print('(Private) Global Average Precision: %f' % + metrics.GlobalAveragePrecision(private_predictions, private_solution)) + + # Global Average Precision ignoring non-landmark queries. + print('**********************************************') + print( + '(Public) Global Average Precision ignoring non-landmark queries: %f' % + metrics.GlobalAveragePrecision( + public_predictions, public_solution, ignore_non_gt_test_images=True)) + print( + '(Private) Global Average Precision ignoring non-landmark queries: %f' % + metrics.GlobalAveragePrecision( + private_predictions, private_solution, + ignore_non_gt_test_images=True)) + + # Top-1 accuracy. + print('**********************************************') + print('(Public) Top-1 accuracy: %.2f' % + (100.0 * metrics.Top1Accuracy(public_predictions, public_solution))) + print('(Private) Top-1 accuracy: %.2f' % + (100.0 * metrics.Top1Accuracy(private_predictions, private_solution))) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--predictions_path', + type=str, + default='/tmp/predictions.csv', + help=""" + Path to CSV predictions file, formatted with columns 'id,landmarks' (the + file should include a header). + """) + parser.add_argument( + '--solution_path', + type=str, + default='/tmp/solution.csv', + help=""" + Path to CSV solution file, formatted with columns 'id,landmarks,Usage' + (the file should include a header). + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/google_landmarks_dataset/compute_retrieval_metrics.py b/models/research/delf/delf/python/google_landmarks_dataset/compute_retrieval_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..adcee356e5d64d094236cda9656c86164c24faf8 --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/compute_retrieval_metrics.py @@ -0,0 +1,106 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Computes metrics for Google Landmarks Retrieval dataset predictions. + +Metrics are written to stdout. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys + +from tensorflow.python.platform import app +from delf.python.google_landmarks_dataset import dataset_file_io +from delf.python.google_landmarks_dataset import metrics + +cmd_args = None + + +def main(argv): + if len(argv) > 1: + raise RuntimeError('Too many command-line arguments.') + + # Read solution. + print('Reading solution...') + public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution( + cmd_args.solution_path, dataset_file_io.RETRIEVAL_TASK_ID) + print('done!') + + # Read predictions. + print('Reading predictions...') + public_predictions, private_predictions = dataset_file_io.ReadPredictions( + cmd_args.predictions_path, set(public_solution.keys()), + set(private_solution.keys()), set(ignored_ids), + dataset_file_io.RETRIEVAL_TASK_ID) + print('done!') + + # Mean average precision. + print('**********************************************') + print('(Public) Mean Average Precision: %f' % + metrics.MeanAveragePrecision(public_predictions, public_solution)) + print('(Private) Mean Average Precision: %f' % + metrics.MeanAveragePrecision(private_predictions, private_solution)) + + # Mean precision@k. + print('**********************************************') + public_precisions = 100.0 * metrics.MeanPrecisions(public_predictions, + public_solution) + private_precisions = 100.0 * metrics.MeanPrecisions(private_predictions, + private_solution) + print('(Public) Mean precisions: P@1: %.2f, P@5: %.2f, P@10: %.2f, ' + 'P@50: %.2f, P@100: %.2f' % + (public_precisions[0], public_precisions[4], public_precisions[9], + public_precisions[49], public_precisions[99])) + print('(Private) Mean precisions: P@1: %.2f, P@5: %.2f, P@10: %.2f, ' + 'P@50: %.2f, P@100: %.2f' % + (private_precisions[0], private_precisions[4], private_precisions[9], + private_precisions[49], private_precisions[99])) + + # Mean/median position of first correct. + print('**********************************************') + public_mean_position, public_median_position = metrics.MeanMedianPosition( + public_predictions, public_solution) + private_mean_position, private_median_position = metrics.MeanMedianPosition( + private_predictions, private_solution) + print('(Public) Mean position: %.2f, median position: %.2f' % + (public_mean_position, public_median_position)) + print('(Private) Mean position: %.2f, median position: %.2f' % + (private_mean_position, private_median_position)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.register('type', 'bool', lambda v: v.lower() == 'true') + parser.add_argument( + '--predictions_path', + type=str, + default='/tmp/predictions.csv', + help=""" + Path to CSV predictions file, formatted with columns 'id,images' (the + file should include a header). + """) + parser.add_argument( + '--solution_path', + type=str, + default='/tmp/solution.csv', + help=""" + Path to CSV solution file, formatted with columns 'id,images,Usage' + (the file should include a header). + """) + cmd_args, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/models/research/delf/delf/python/google_landmarks_dataset/dataset_file_io.py b/models/research/delf/delf/python/google_landmarks_dataset/dataset_file_io.py new file mode 100644 index 0000000000000000000000000000000000000000..93f2785d78f03b5b112bbba635b4778f2e9b9a08 --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/dataset_file_io.py @@ -0,0 +1,159 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IO module for files from Landmark recognition/retrieval challenges.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv + +import tensorflow as tf + +RECOGNITION_TASK_ID = 'recognition' +RETRIEVAL_TASK_ID = 'retrieval' + + +def ReadSolution(file_path, task): + """Reads solution from file, for a given task. + + Args: + file_path: Path to CSV file with solution. File contains a header. + task: Type of challenge task. Supported values: 'recognition', 'retrieval'. + + Returns: + public_solution: Dict mapping test image ID to list of ground-truth IDs, for + the Public subset of test images. If `task` == 'recognition', the IDs are + integers corresponding to landmark IDs. If `task` == 'retrieval', the IDs + are strings corresponding to index image IDs. + private_solution: Same as `public_solution`, but for the private subset of + test images. + ignored_ids: List of test images that are ignored in scoring. + + Raises: + ValueError: If Usage field is not Public, Private or Ignored; or if `task` + is not supported. + """ + public_solution = {} + private_solution = {} + ignored_ids = [] + with tf.io.gfile.GFile(file_path, 'r') as csv_file: + reader = csv.reader(csv_file) + next(reader, None) # Skip header. + for row in reader: + test_id = row[0] + if row[2] == 'Ignored': + ignored_ids.append(test_id) + else: + ground_truth_ids = [] + if task == RECOGNITION_TASK_ID: + if row[1]: + for landmark_id in row[1].split(' '): + ground_truth_ids.append(int(landmark_id)) + elif task == RETRIEVAL_TASK_ID: + for image_id in row[1].split(' '): + ground_truth_ids.append(image_id) + else: + raise ValueError('Unrecognized task: %s' % task) + + if row[2] == 'Public': + public_solution[test_id] = ground_truth_ids + elif row[2] == 'Private': + private_solution[test_id] = ground_truth_ids + else: + raise ValueError('Test image %s has unrecognized Usage tag %s' % + (row[0], row[2])) + + return public_solution, private_solution, ignored_ids + + +def ReadPredictions(file_path, public_ids, private_ids, ignored_ids, task): + """Reads predictions from file, for a given task. + + Args: + file_path: Path to CSV file with predictions. File contains a header. + public_ids: Set (or list) of test image IDs in Public subset of test images. + private_ids: Same as `public_ids`, but for the private subset of test + images. + ignored_ids: Set (or list) of test image IDs that are ignored in scoring and + are associated to no ground-truth. + task: Type of challenge task. Supported values: 'recognition', 'retrieval'. + + Returns: + public_predictions: Dict mapping test image ID to prediction, for the Public + subset of test images. If `task` == 'recognition', the prediction is a + dict with keys 'class' (integer) and 'score' (float). If `task` == + 'retrieval', the prediction is a list of strings corresponding to index + image IDs. + private_predictions: Same as `public_predictions`, but for the private + subset of test images. + + Raises: + ValueError: + - If test image ID is unrecognized/repeated; + - If `task` is not supported; + - If prediction is malformed. + """ + public_predictions = {} + private_predictions = {} + with tf.io.gfile.GFile(file_path, 'r') as csv_file: + reader = csv.reader(csv_file) + next(reader, None) # Skip header. + for row in reader: + # Skip row if empty. + if not row: + continue + + test_id = row[0] + + # Makes sure this query has not yet been seen. + if test_id in public_predictions: + raise ValueError('Test image %s is repeated.' % test_id) + if test_id in private_predictions: + raise ValueError('Test image %s is repeated' % test_id) + + # If ignored, skip it. + if test_id in ignored_ids: + continue + + # Only parse result if there is a prediction. + if row[1]: + prediction_split = row[1].split(' ') + # Remove empty spaces at end (if any). + if not prediction_split[-1]: + prediction_split = prediction_split[:-1] + + if task == RECOGNITION_TASK_ID: + if len(prediction_split) != 2: + raise ValueError('Prediction is malformed: there should only be 2 ' + 'elements in second column, but found %d for test ' + 'image %s' % (len(prediction_split), test_id)) + + landmark_id = int(prediction_split[0]) + score = float(prediction_split[1]) + prediction_entry = {'class': landmark_id, 'score': score} + elif task == RETRIEVAL_TASK_ID: + prediction_entry = prediction_split + else: + raise ValueError('Unrecognized task: %s' % task) + + if test_id in public_ids: + public_predictions[test_id] = prediction_entry + elif test_id in private_ids: + private_predictions[test_id] = prediction_entry + else: + raise ValueError('test_id %s is unrecognized' % test_id) + + return public_predictions, private_predictions diff --git a/models/research/delf/delf/python/google_landmarks_dataset/dataset_file_io_test.py b/models/research/delf/delf/python/google_landmarks_dataset/dataset_file_io_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0101d989fba85e487842e65b3b1aec4e728c101c --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/dataset_file_io_test.py @@ -0,0 +1,170 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for dataset file IO module.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import tensorflow as tf + +from delf.python.google_landmarks_dataset import dataset_file_io + +FLAGS = flags.FLAGS + + +class DatasetFileIoTest(tf.test.TestCase): + + def testReadRecognitionSolutionWorks(self): + # Define inputs. + file_path = os.path.join(FLAGS.test_tmpdir, + 'recognition_solution.csv') + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write('id,landmarks,Usage\n') + f.write('0123456789abcdef,0 12,Public\n') + f.write('0223456789abcdef,,Public\n') + f.write('0323456789abcdef,100,Ignored\n') + f.write('0423456789abcdef,1,Private\n') + f.write('0523456789abcdef,,Ignored\n') + + # Run tested function. + (public_solution, private_solution, + ignored_ids) = dataset_file_io.ReadSolution( + file_path, dataset_file_io.RECOGNITION_TASK_ID) + + # Define expected results. + expected_public_solution = { + '0123456789abcdef': [0, 12], + '0223456789abcdef': [] + } + expected_private_solution = { + '0423456789abcdef': [1], + } + expected_ignored_ids = ['0323456789abcdef', '0523456789abcdef'] + + # Compare actual and expected results. + self.assertEqual(public_solution, expected_public_solution) + self.assertEqual(private_solution, expected_private_solution) + self.assertEqual(ignored_ids, expected_ignored_ids) + + def testReadRetrievalSolutionWorks(self): + # Define inputs. + file_path = os.path.join(FLAGS.test_tmpdir, + 'retrieval_solution.csv') + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write('id,images,Usage\n') + f.write('0123456789abcdef,None,Ignored\n') + f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200,Public\n') + f.write('0323456789abcdef,fedcba9876543200,Private\n') + f.write('0423456789abcdef,fedcba9876543220,Private\n') + f.write('0523456789abcdef,None,Ignored\n') + + # Run tested function. + (public_solution, private_solution, + ignored_ids) = dataset_file_io.ReadSolution( + file_path, dataset_file_io.RETRIEVAL_TASK_ID) + + # Define expected results. + expected_public_solution = { + '0223456789abcdef': ['fedcba9876543210', 'fedcba9876543200'], + } + expected_private_solution = { + '0323456789abcdef': ['fedcba9876543200'], + '0423456789abcdef': ['fedcba9876543220'], + } + expected_ignored_ids = ['0123456789abcdef', '0523456789abcdef'] + + # Compare actual and expected results. + self.assertEqual(public_solution, expected_public_solution) + self.assertEqual(private_solution, expected_private_solution) + self.assertEqual(ignored_ids, expected_ignored_ids) + + def testReadRecognitionPredictionsWorks(self): + # Define inputs. + file_path = os.path.join(FLAGS.test_tmpdir, + 'recognition_predictions.csv') + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write('id,landmarks\n') + f.write('0123456789abcdef,12 0.1 \n') + f.write('0423456789abcdef,0 19.0\n') + f.write('0223456789abcdef,\n') + f.write('\n') + f.write('0523456789abcdef,14 0.01\n') + public_ids = ['0123456789abcdef', '0223456789abcdef'] + private_ids = ['0423456789abcdef'] + ignored_ids = ['0323456789abcdef', '0523456789abcdef'] + + # Run tested function. + public_predictions, private_predictions = dataset_file_io.ReadPredictions( + file_path, public_ids, private_ids, ignored_ids, + dataset_file_io.RECOGNITION_TASK_ID) + + # Define expected results. + expected_public_predictions = { + '0123456789abcdef': { + 'class': 12, + 'score': 0.1 + } + } + expected_private_predictions = { + '0423456789abcdef': { + 'class': 0, + 'score': 19.0 + } + } + + # Compare actual and expected results. + self.assertEqual(public_predictions, expected_public_predictions) + self.assertEqual(private_predictions, expected_private_predictions) + + def testReadRetrievalPredictionsWorks(self): + # Define inputs. + file_path = os.path.join(FLAGS.test_tmpdir, + 'retrieval_predictions.csv') + with tf.io.gfile.GFile(file_path, 'w') as f: + f.write('id,images\n') + f.write('0123456789abcdef,fedcba9876543250 \n') + f.write('0423456789abcdef,fedcba9876543260\n') + f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200 ' + 'fedcba9876543220\n') + f.write('\n') + f.write('0523456789abcdef,\n') + public_ids = ['0223456789abcdef'] + private_ids = ['0323456789abcdef', '0423456789abcdef'] + ignored_ids = ['0123456789abcdef', '0523456789abcdef'] + + # Run tested function. + public_predictions, private_predictions = dataset_file_io.ReadPredictions( + file_path, public_ids, private_ids, ignored_ids, + dataset_file_io.RETRIEVAL_TASK_ID) + + # Define expected results. + expected_public_predictions = { + '0223456789abcdef': [ + 'fedcba9876543210', 'fedcba9876543200', 'fedcba9876543220' + ] + } + expected_private_predictions = {'0423456789abcdef': ['fedcba9876543260']} + + # Compare actual and expected results. + self.assertEqual(public_predictions, expected_public_predictions) + self.assertEqual(private_predictions, expected_private_predictions) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/google_landmarks_dataset/metrics.py b/models/research/delf/delf/python/google_landmarks_dataset/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..1516be9d8569cecfe470d9ec98ce273a72cce84f --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/metrics.py @@ -0,0 +1,254 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python module to compute metrics for Google Landmarks dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def _CountPositives(solution): + """Counts number of test images with non-empty ground-truth in `solution`. + + Args: + solution: Dict mapping test image ID to list of ground-truth IDs. + + Returns: + count: Number of test images with non-empty ground-truth. + """ + count = 0 + for v in solution.values(): + if v: + count += 1 + + return count + + +def GlobalAveragePrecision(predictions, + recognition_solution, + ignore_non_gt_test_images=False): + """Computes global average precision for recognition prediction. + + Args: + predictions: Dict mapping test image ID to a dict with keys 'class' + (integer) and 'score' (float). + recognition_solution: Dict mapping test image ID to list of ground-truth + landmark IDs. + ignore_non_gt_test_images: If True, ignore test images which do not have + associated ground-truth landmark IDs. For the Google Landmark Recognition + challenge, this should be set to False. + + Returns: + gap: Global average precision score (float). + """ + # Compute number of expected results. + num_positives = _CountPositives(recognition_solution) + + gap = 0.0 + total_predictions = 0 + correct_predictions = 0 + + # Sort predictions according to Kaggle's convention: + # - first by score (descending); + # - then by key (ascending); + # - then by class (ascending). + sorted_predictions_by_key_class = sorted( + predictions.items(), key=lambda item: (item[0], item[1]['class'])) + sorted_predictions = sorted( + sorted_predictions_by_key_class, + key=lambda item: item[1]['score'], + reverse=True) + + # Loop over sorted predictions (descending order) and compute GAPs. + for key, prediction in sorted_predictions: + if ignore_non_gt_test_images and not recognition_solution[key]: + continue + + total_predictions += 1 + if prediction['class'] in recognition_solution[key]: + correct_predictions += 1 + gap += correct_predictions / total_predictions + + gap /= num_positives + + return gap + + +def Top1Accuracy(predictions, recognition_solution): + """Computes top-1 accuracy for recognition prediction. + + Note that test images without ground-truth are ignored. + + Args: + predictions: Dict mapping test image ID to a dict with keys 'class' + (integer) and 'score' (float). + recognition_solution: Dict mapping test image ID to list of ground-truth + landmark IDs. + + Returns: + accuracy: Top-1 accuracy (float). + """ + # Loop over test images in solution. If it has at least one class label, we + # check if the predicion is correct. + num_correct_predictions = 0 + num_test_images_with_ground_truth = 0 + for key, ground_truth in recognition_solution.items(): + if ground_truth: + num_test_images_with_ground_truth += 1 + if key in predictions: + if predictions[key]['class'] in ground_truth: + num_correct_predictions += 1 + + return num_correct_predictions / num_test_images_with_ground_truth + + +def MeanAveragePrecision(predictions, retrieval_solution, max_predictions=100): + """Computes mean average precision for retrieval prediction. + + Args: + predictions: Dict mapping test image ID to a list of strings corresponding + to index image IDs. + retrieval_solution: Dict mapping test image ID to list of ground-truth image + IDs. + max_predictions: Maximum number of predictions per query to take into + account. For the Google Landmark Retrieval challenge, this should be set + to 100. + + Returns: + mean_ap: Mean average precision score (float). + + Raises: + ValueError: If a test image in `predictions` is not included in + `retrieval_solutions`. + """ + # Compute number of test images. + num_test_images = len(retrieval_solution.keys()) + + # Loop over predictions for each query and compute mAP. + mean_ap = 0.0 + for key, prediction in predictions.items(): + if key not in retrieval_solution: + raise ValueError('Test image %s is not part of retrieval_solution' % key) + + # Loop over predicted images, keeping track of those which were already + # used (duplicates are skipped). + ap = 0.0 + already_predicted = set() + num_expected_retrieved = min(len(retrieval_solution[key]), max_predictions) + num_correct = 0 + for i in range(min(len(prediction), max_predictions)): + if prediction[i] not in already_predicted: + if prediction[i] in retrieval_solution[key]: + num_correct += 1 + ap += num_correct / (i + 1) + already_predicted.add(prediction[i]) + + ap /= num_expected_retrieved + mean_ap += ap + + mean_ap /= num_test_images + + return mean_ap + + +def MeanPrecisions(predictions, retrieval_solution, max_predictions=100): + """Computes mean precisions for retrieval prediction. + + Args: + predictions: Dict mapping test image ID to a list of strings corresponding + to index image IDs. + retrieval_solution: Dict mapping test image ID to list of ground-truth image + IDs. + max_predictions: Maximum number of predictions per query to take into + account. + + Returns: + mean_precisions: NumPy array with mean precisions at ranks 1 through + `max_predictions`. + + Raises: + ValueError: If a test image in `predictions` is not included in + `retrieval_solutions`. + """ + # Compute number of test images. + num_test_images = len(retrieval_solution.keys()) + + # Loop over predictions for each query and compute precisions@k. + precisions = np.zeros((num_test_images, max_predictions)) + count_test_images = 0 + for key, prediction in predictions.items(): + if key not in retrieval_solution: + raise ValueError('Test image %s is not part of retrieval_solution' % key) + + # Loop over predicted images, keeping track of those which were already + # used (duplicates are skipped). + already_predicted = set() + num_correct = 0 + for i in range(max_predictions): + if i < len(prediction): + if prediction[i] not in already_predicted: + if prediction[i] in retrieval_solution[key]: + num_correct += 1 + already_predicted.add(prediction[i]) + precisions[count_test_images, i] = num_correct / (i + 1) + count_test_images += 1 + + mean_precisions = np.mean(precisions, axis=0) + + return mean_precisions + + +def MeanMedianPosition(predictions, retrieval_solution, max_predictions=100): + """Computes mean and median positions of first correct image. + + Args: + predictions: Dict mapping test image ID to a list of strings corresponding + to index image IDs. + retrieval_solution: Dict mapping test image ID to list of ground-truth image + IDs. + max_predictions: Maximum number of predictions per query to take into + account. + + Returns: + mean_position: Float. + median_position: Float. + + Raises: + ValueError: If a test image in `predictions` is not included in + `retrieval_solutions`. + """ + # Compute number of test images. + num_test_images = len(retrieval_solution.keys()) + + # Loop over predictions for each query to find first correct ranked image. + positions = (max_predictions + 1) * np.ones((num_test_images)) + count_test_images = 0 + for key, prediction in predictions.items(): + if key not in retrieval_solution: + raise ValueError('Test image %s is not part of retrieval_solution' % key) + + for i in range(min(len(prediction), max_predictions)): + if prediction[i] in retrieval_solution[key]: + positions[count_test_images] = i + 1 + break + + count_test_images += 1 + + mean_position = np.mean(positions) + median_position = np.median(positions) + + return mean_position, median_position diff --git a/models/research/delf/delf/python/google_landmarks_dataset/metrics_test.py b/models/research/delf/delf/python/google_landmarks_dataset/metrics_test.py new file mode 100644 index 0000000000000000000000000000000000000000..50838cae2b5bfaa8f6f0c5cbfab2a07aa20b7c52 --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/metrics_test.py @@ -0,0 +1,219 @@ +# Copyright 2019 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Google Landmarks dataset metric computation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from delf.python.google_landmarks_dataset import metrics + + +def _CreateRecognitionSolution(): + """Creates recognition solution to be used in tests. + + Returns: + solution: Dict mapping test image ID to list of ground-truth landmark IDs. + """ + return { + '0123456789abcdef': [0, 12], + '0223456789abcdef': [100, 200, 300], + '0323456789abcdef': [1], + '0423456789abcdef': [], + '0523456789abcdef': [], + } + + +def _CreateRecognitionPredictions(): + """Creates recognition predictions to be used in tests. + + Returns: + predictions: Dict mapping test image ID to a dict with keys 'class' + (integer) and 'score' (float). + """ + return { + '0223456789abcdef': { + 'class': 0, + 'score': 0.01 + }, + '0323456789abcdef': { + 'class': 1, + 'score': 10.0 + }, + '0423456789abcdef': { + 'class': 150, + 'score': 15.0 + }, + } + + +def _CreateRetrievalSolution(): + """Creates retrieval solution to be used in tests. + + Returns: + solution: Dict mapping test image ID to list of ground-truth image IDs. + """ + return { + '0123456789abcdef': ['fedcba9876543210', 'fedcba9876543220'], + '0223456789abcdef': ['fedcba9876543210'], + '0323456789abcdef': [ + 'fedcba9876543230', 'fedcba9876543240', 'fedcba9876543250' + ], + '0423456789abcdef': ['fedcba9876543230'], + } + + +def _CreateRetrievalPredictions(): + """Creates retrieval predictions to be used in tests. + + Returns: + predictions: Dict mapping test image ID to a list with predicted index image + ids. + """ + return { + '0223456789abcdef': ['fedcba9876543200', 'fedcba9876543210'], + '0323456789abcdef': ['fedcba9876543240'], + '0423456789abcdef': ['fedcba9876543230', 'fedcba9876543240'], + } + + +class MetricsTest(tf.test.TestCase): + + def testGlobalAveragePrecisionWorks(self): + # Define input. + predictions = _CreateRecognitionPredictions() + solution = _CreateRecognitionSolution() + + # Run tested function. + gap = metrics.GlobalAveragePrecision(predictions, solution) + + # Define expected results. + expected_gap = 0.166667 + + # Compare actual and expected results. + self.assertAllClose(gap, expected_gap) + + def testGlobalAveragePrecisionIgnoreNonGroundTruthWorks(self): + # Define input. + predictions = _CreateRecognitionPredictions() + solution = _CreateRecognitionSolution() + + # Run tested function. + gap = metrics.GlobalAveragePrecision( + predictions, solution, ignore_non_gt_test_images=True) + + # Define expected results. + expected_gap = 0.333333 + + # Compare actual and expected results. + self.assertAllClose(gap, expected_gap) + + def testTop1AccuracyWorks(self): + # Define input. + predictions = _CreateRecognitionPredictions() + solution = _CreateRecognitionSolution() + + # Run tested function. + accuracy = metrics.Top1Accuracy(predictions, solution) + + # Define expected results. + expected_accuracy = 0.333333 + + # Compare actual and expected results. + self.assertAllClose(accuracy, expected_accuracy) + + def testMeanAveragePrecisionWorks(self): + # Define input. + predictions = _CreateRetrievalPredictions() + solution = _CreateRetrievalSolution() + + # Run tested function. + mean_ap = metrics.MeanAveragePrecision(predictions, solution) + + # Define expected results. + expected_mean_ap = 0.458333 + + # Compare actual and expected results. + self.assertAllClose(mean_ap, expected_mean_ap) + + def testMeanAveragePrecisionMaxPredictionsWorks(self): + # Define input. + predictions = _CreateRetrievalPredictions() + solution = _CreateRetrievalSolution() + + # Run tested function. + mean_ap = metrics.MeanAveragePrecision( + predictions, solution, max_predictions=1) + + # Define expected results. + expected_mean_ap = 0.5 + + # Compare actual and expected results. + self.assertAllClose(mean_ap, expected_mean_ap) + + def testMeanPrecisionsWorks(self): + # Define input. + predictions = _CreateRetrievalPredictions() + solution = _CreateRetrievalSolution() + + # Run tested function. + mean_precisions = metrics.MeanPrecisions( + predictions, solution, max_predictions=2) + + # Define expected results. + expected_mean_precisions = [0.5, 0.375] + + # Compare actual and expected results. + self.assertAllClose(mean_precisions, expected_mean_precisions) + + def testMeanMedianPositionWorks(self): + # Define input. + predictions = _CreateRetrievalPredictions() + solution = _CreateRetrievalSolution() + + # Run tested function. + mean_position, median_position = metrics.MeanMedianPosition( + predictions, solution) + + # Define expected results. + expected_mean_position = 26.25 + expected_median_position = 1.5 + + # Compare actual and expected results. + self.assertAllClose(mean_position, expected_mean_position) + self.assertAllClose(median_position, expected_median_position) + + def testMeanMedianPositionMaxPredictionsWorks(self): + # Define input. + predictions = _CreateRetrievalPredictions() + solution = _CreateRetrievalSolution() + + # Run tested function. + mean_position, median_position = metrics.MeanMedianPosition( + predictions, solution, max_predictions=1) + + # Define expected results. + expected_mean_position = 1.5 + expected_median_position = 1.5 + + # Compare actual and expected results. + self.assertAllClose(mean_position, expected_mean_position) + self.assertAllClose(median_position, expected_median_position) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/google_landmarks_dataset/rn101_af_gldv2clean_config.pbtxt b/models/research/delf/delf/python/google_landmarks_dataset/rn101_af_gldv2clean_config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..992cb0fd142ba8c6d89c763d5e323a0d33e7a3a5 --- /dev/null +++ b/models/research/delf/delf/python/google_landmarks_dataset/rn101_af_gldv2clean_config.pbtxt @@ -0,0 +1,10 @@ +use_local_features: false +use_global_features: true +model_path: "parameters/rn101_af_gldv2clean_20200521" +image_scales: 0.70710677 +image_scales: 1.0 +image_scales: 1.4142135 +delf_global_config { + use_pca: false +} +max_image_size: 1024 diff --git a/models/research/delf/delf/python/training/README.md b/models/research/delf/delf/python/training/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a836370fb7830392715c45298987c40e24859032 --- /dev/null +++ b/models/research/delf/delf/python/training/README.md @@ -0,0 +1,128 @@ +# DELF Training Instructions + +This README documents the end-to-end process for training a landmark detection and retrieval +model using the DELF library on the [Google Landmarks Dataset v2](https://github.com/cvdfoundation/google-landmark) (GLDv2). This can be achieved following these steps: +1. Install the DELF Python library. +2. Download the raw images of the GLDv2 dataset. +3. Prepare the training data. +4. Run the training. + +The next sections will cove each of these steps in greater detail. + +## Prerequisites + +Clone the [TensorFlow Model Garden](https://github.com/tensorflow/models) repository and move +into the `models/research/delf/delf/python/training`folder. +``` +git clone https://github.com/tensorflow/models.git +cd models/research/delf/delf/python/training +``` + +## Install the DELF Library + +The DELF Python library can be installed by running the [`install_delf.sh`](./install_delf.sh) +script using the command: +``` +bash install_delf.sh +``` +The script installs both the DELF library and its dependencies in the following sequence: +* Install TensorFlow 2.2 and TensorFlow 2.2 for GPU. +* Install the [TF-Slim](https://github.com/google-research/tf-slim) library from source. +* Download [protoc](https://github.com/protocolbuffers/protobuf) and compile the DELF Protocol +Buffers. +* Install the matplotlib, numpy, scikit-image, scipy and python3-tk Python libraries. +* Install the [TensorFlow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) from the cloned TensorFlow Model Garden repository. +* Install the DELF package. + +*Please note that the current installation only works on 64 bits Linux architectures due to the +`protoc` binary downloaded by the installation script. If you wish to install the DELF library on +other architectures please update the [`install_delf.sh`](./install_delf.sh) script by referencing +the desired `protoc` [binary release](https://github.com/protocolbuffers/protobuf/releases).* + +## Download the GLDv2 Training Data + +The [GLDv2](https://github.com/cvdfoundation/google-landmark) images are grouped in 3 datasets: TRAIN, INDEX, TEST. Images in each dataset are grouped into `*.tar` files and individually +referenced in `*.csv`files containing training metadata and licensing information. The number of +`*.tar` files per dataset is as follows: +* TRAIN: 500 files. +* INDEX: 100 files. +* TEST: 20 files. + +To download the GLDv2 images, run the [`download_dataset.sh`](./download_dataset.sh) script like in +the following example: +``` +bash download_dataset.sh 500 100 20 +``` +The script takes the following parameters, in order: +* The number of image files from the TRAIN dataset to download (maximum 500). +* The number of image files from the INDEX dataset to download (maximum 100). +* The number of image files from the TEST dataset to download (maximum 20). + +The script downloads the GLDv2 images under the following directory structure: +* gldv2_dataset/ + * train/ - Contains raw images from the TRAIN dataset. + * index/ - Contains raw images from the INDEX dataset. + * test/ - Contains raw images from the TEST dataset. + +Each of the three folders `gldv2_dataset/train/`, `gldv2_dataset/index/` and `gldv2_dataset/test/` +contains the following: +* The downloaded `*.tar` files. +* The corresponding MD5 checksum files, `*.txt`. +* The unpacked content of the downloaded files. (*Images are organized in folders and subfolders +based on the first, second and third character in their file name.*) +* The CSV files containing training and licensing metadata of the downloaded images. + +*Please note that due to the large size of the GLDv2 dataset, the download can take up to 12 +hours and up to 1 TB of disk space. In order to save bandwidth and disk space, you may want to start by downloading only the TRAIN dataset, the only one required for the training, thus saving +approximately ~95 GB, the equivalent of the INDEX and TEST datasets. To further save disk space, +the `*.tar` files can be deleted after downloading and upacking them.* + +## Prepare the Data for Training + +Preparing the data for training consists of creating [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord) +files from the raw GLDv2 images grouped into TRAIN and VALIDATION splits. The training set +produced contains only the *clean* subset of the GLDv2 dataset. The [CVPR'20 paper](https://arxiv.org/abs/2004.01804) +introducing the GLDv2 dataset contains a detailed description of the *clean* subset. + +Generating the TFRecord files containing the TRAIN and VALIDATION splits of the *clean* GLDv2 +subset can be achieved by running the [`build_image_dataset.py`](./build_image_dataset.py) +script. Assuming that the GLDv2 images have been downloaded to the `gldv2_dataset` folder, the +script can be run as follows: +``` +python3 build_image_dataset.py \ + --train_csv_path=gldv2_dataset/train/train.csv \ + --train_clean_csv_path=gldv2_dataset/train/train_clean.csv \ + --train_directory=gldv2_dataset/train/*/*/*/ \ + --output_directory=gldv2_dataset/tfrecord/ \ + --num_shards=128 \ + --generate_train_validation_splits \ + --validation_split_size=0.2 +``` +*Please refer to the source code of the [`build_image_dataset.py`](./build_image_dataset.py) script for a detailed description of its parameters.* + +The TFRecord files written in the `OUTPUT_DIRECTORY` will be prefixed as follows: +* TRAIN split: `train-*` +* VALIDATION split: `validation-*` + +The same script can be used to generate TFRecord files for the TEST split for post-training +evaluation purposes. This can be achieved by adding the parameters: +``` + --test_csv_path=gldv2_dataset/train/test.csv \ + --test_directory=gldv2_dataset/test/*/*/*/ \ +``` +In this scenario, the TFRecord files of the TEST split written in the `OUTPUT_DIRECTORY` will be +named according to the pattern `test-*`. + +*Please note that due to the large size of the GLDv2 dataset, the generation of the TFRecord +files can take up to 12 hours and up to 500 GB of space disk.* + +## Running the Training + +Assuming the TFRecord files were generated in the `gldv2_dataset/tfrecord/` directory, running +the following command should start training a model: + +``` +python3 train.py \ + --train_file_pattern=gldv2_dataset/tfrecord/train* \ + --validation_file_pattern=gldv2_dataset/tfrecord/validation* +``` diff --git a/models/research/delf/delf/python/training/__init__.py b/models/research/delf/delf/python/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c87f3d895c72593403f71e2768b31307f3db5ea6 --- /dev/null +++ b/models/research/delf/delf/python/training/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module for DELF training.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=unused-import +from delf.python.training import build_image_dataset +# pylint: enable=unused-import diff --git a/models/research/delf/delf/python/training/build_image_dataset.py b/models/research/delf/delf/python/training/build_image_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5df58df0b80d506330e3560b46b8835c283a2bf8 --- /dev/null +++ b/models/research/delf/delf/python/training/build_image_dataset.py @@ -0,0 +1,473 @@ +#!/usr/bin/python +# Copyright 2020 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts landmark image data to TFRecords file format with Example protos. + +The image data set is expected to reside in JPEG files ends up with '.jpg'. + +This script converts the training and testing data into +a sharded data set consisting of TFRecord files + train_directory/train-00000-of-00128 + train_directory/train-00001-of-00128 + ... + train_directory/train-00127-of-00128 +and + test_directory/test-00000-of-00128 + test_directory/test-00001-of-00128 + ... + test_directory/test-00127-of-00128 +where we have selected 128 shards for both data sets. Each record +within the TFRecord file is a serialized Example proto. The Example proto +contains the following fields: + image/encoded: string containing JPEG encoded image in RGB colorspace + image/height: integer, image height in pixels + image/width: integer, image width in pixels + image/colorspace: string, specifying the colorspace, always 'RGB' + image/channels: integer, specifying the number of channels, always 3 + image/format: string, specifying the format, always 'JPEG' + image/filename: string, the unique id of the image file + e.g. '97c0a12e07ae8dd5' or '650c989dd3493748' +Furthermore, if the data set type is training, it would contain one more field: + image/class/label: integer, the landmark_id from the input training csv file. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv +import os + +from absl import app +from absl import flags + +import numpy as np +import pandas as pd +import tensorflow as tf + +FLAGS = flags.FLAGS + +flags.DEFINE_string('train_directory', '/tmp/', 'Training data directory.') +flags.DEFINE_string('test_directory', None, + '(Optional) Testing data directory. Required only if ' + 'test_csv_path is not None.') +flags.DEFINE_string('output_directory', '/tmp/', 'Output data directory.') +flags.DEFINE_string('train_csv_path', '/tmp/train.csv', + 'Training data csv file path.') +flags.DEFINE_string('train_clean_csv_path', None, + ('(Optional) Clean training data csv file path. ' + 'If provided, filters images keeping the ones listed in ' + 'this file. In this case, also outputs a CSV file ' + 'relabeling.csv mapping new labels to old ones.')) +flags.DEFINE_string('test_csv_path', None, + '(Optional) Testing data csv file path. If None or absent,' + 'TFRecords for the images in the test dataset are not' + 'generated') +flags.DEFINE_integer('num_shards', 128, 'Number of shards in output data.') +flags.DEFINE_boolean('generate_train_validation_splits', False, + '(Optional) Whether to split the train dataset into' + 'TRAIN and VALIDATION splits.') +flags.DEFINE_float('validation_split_size', 0.2, + '(Optional) The size of the VALIDATION split as a fraction' + 'of the train dataset.') +flags.DEFINE_integer('seed', 0, + '(Optional) The seed to be used while shuffling the train' + 'dataset when generating the TRAIN and VALIDATION splits.' + 'Recommended for splits reproducibility purposes.') + +_FILE_IDS_KEY = 'file_ids' +_IMAGE_PATHS_KEY = 'image_paths' +_LABELS_KEY = 'labels' +_TEST_SPLIT = 'test' +_TRAIN_SPLIT = 'train' +_VALIDATION_SPLIT = 'validation' + + +def _get_all_image_files_and_labels(name, csv_path, image_dir): + """Process input and get the image file paths, image ids and the labels. + + Args: + name: 'train' or 'test'. + csv_path: path to the Google-landmark Dataset csv Data Sources files. + image_dir: directory that stores downloaded images. + Returns: + image_paths: the paths to all images in the image_dir. + file_ids: the unique ids of images. + labels: the landmark id of all images. When name='test', the returned labels + will be an empty list. + Raises: + ValueError: if input name is not supported. + """ + image_paths = tf.io.gfile.glob(os.path.join(image_dir, '*.jpg')) + file_ids = [os.path.basename(os.path.normpath(f))[:-4] for f in image_paths] + if name == _TRAIN_SPLIT: + with tf.io.gfile.GFile(csv_path, 'rb') as csv_file: + df = pd.read_csv(csv_file) + df = df.set_index('id') + labels = [int(df.loc[fid]['landmark_id']) for fid in file_ids] + elif name == _TEST_SPLIT: + labels = [] + else: + raise ValueError('Unsupported dataset split name: %s' % name) + return image_paths, file_ids, labels + + +def _get_clean_train_image_files_and_labels(csv_path, image_dir): + """Get image file paths, image ids and labels for the clean training split. + + Args: + csv_path: path to the Google-landmark Dataset v2 CSV Data Sources files + of the clean train dataset. Assumes CSV header landmark_id;images. + image_dir: directory that stores downloaded images. + + Returns: + image_paths: the paths to all images in the image_dir. + file_ids: the unique ids of images. + labels: the landmark id of all images. + relabeling: relabeling rules created to replace actual labels with + a continuous set of labels. + """ + # Load the content of the CSV file (landmark_id/label -> images). + with tf.io.gfile.GFile(csv_path, 'rb') as csv_file: + df = pd.read_csv(csv_file) + + # Create the dictionary (key = image_id, value = {label, file_id}). + images = {} + for _, row in df.iterrows(): + label = row['landmark_id'] + for file_id in row['images'].split(' '): + images[file_id] = {} + images[file_id]['label'] = label + images[file_id]['file_id'] = file_id + + # Add the full image path to the dictionary of images. + image_paths = tf.io.gfile.glob(os.path.join(image_dir, '*.jpg')) + for image_path in image_paths: + file_id = os.path.basename(os.path.normpath(image_path))[:-4] + if file_id in images: + images[file_id]['image_path'] = image_path + + # Explode the dictionary into lists (1 per image attribute). + image_paths = [] + file_ids = [] + labels = [] + for _, value in images.items(): + image_paths.append(value['image_path']) + file_ids.append(value['file_id']) + labels.append(value['label']) + + # Relabel image labels to contiguous values. + unique_labels = sorted(set(labels)) + relabeling = {label: index for index, label in enumerate(unique_labels)} + new_labels = [relabeling[label] for label in labels] + return image_paths, file_ids, new_labels, relabeling + + +def _process_image(filename): + """Process a single image file. + + Args: + filename: string, path to an image file e.g., '/path/to/example.jpg'. + + Returns: + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + Raises: + ValueError: if parsed image has wrong number of dimensions or channels. + """ + # Read the image file. + with tf.io.gfile.GFile(filename, 'rb') as f: + image_data = f.read() + + # Decode the RGB JPEG. + image = tf.io.decode_jpeg(image_data, channels=3) + + # Check that image converted to RGB + if len(image.shape) != 3: + raise ValueError('The parsed image number of dimensions is not 3 but %d' % + (image.shape)) + height = image.shape[0] + width = image.shape[1] + if image.shape[2] != 3: + raise ValueError('The parsed image channels is not 3 but %d' % + (image.shape[2])) + + return image_data, height, width + + +def _int64_feature(value): + """Returns an int64_list from a bool / enum / int / uint.""" + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + +def _bytes_feature(value): + """Returns a bytes_list from a string / byte.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _convert_to_example(file_id, image_buffer, height, width, label=None): + """Build an Example proto for the given inputs. + + Args: + file_id: string, unique id of an image file, e.g., '97c0a12e07ae8dd5'. + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + label: integer, the landmark id and prediction label. + + Returns: + Example proto. + """ + colorspace = 'RGB' + channels = 3 + image_format = 'JPEG' + features = { + 'image/height': _int64_feature(height), + 'image/width': _int64_feature(width), + 'image/colorspace': _bytes_feature(colorspace.encode('utf-8')), + 'image/channels': _int64_feature(channels), + 'image/format': _bytes_feature(image_format.encode('utf-8')), + 'image/id': _bytes_feature(file_id.encode('utf-8')), + 'image/encoded': _bytes_feature(image_buffer) + } + if label is not None: + features['image/class/label'] = _int64_feature(label) + example = tf.train.Example(features=tf.train.Features(feature=features)) + + return example + + +def _write_tfrecord(output_prefix, image_paths, file_ids, labels): + """Read image files and write image and label data into TFRecord files. + + Args: + output_prefix: string, the prefix of output files, e.g. 'train'. + image_paths: list of strings, the paths to images to be converted. + file_ids: list of strings, the image unique ids. + labels: list of integers, the landmark ids of images. It is an empty list + when output_prefix='test'. + + Raises: + ValueError: if the length of input images, ids and labels don't match + """ + if output_prefix == _TEST_SPLIT: + labels = [None] * len(image_paths) + if not len(image_paths) == len(file_ids) == len(labels): + raise ValueError('length of image_paths, file_ids, labels shoud be the' + + ' same. But they are %d, %d, %d, respectively' % + (len(image_paths), len(file_ids), len(labels))) + + spacing = np.linspace(0, len(image_paths), FLAGS.num_shards + 1, dtype=np.int) + + for shard in range(FLAGS.num_shards): + output_file = os.path.join( + FLAGS.output_directory, + '%s-%.5d-of-%.5d' % (output_prefix, shard, FLAGS.num_shards)) + writer = tf.io.TFRecordWriter(output_file) + print('Processing shard ', shard, ' and writing file ', output_file) + for i in range(spacing[shard], spacing[shard + 1]): + image_buffer, height, width = _process_image(image_paths[i]) + example = _convert_to_example(file_ids[i], image_buffer, height, width, + labels[i]) + writer.write(example.SerializeToString()) + writer.close() + + +def _write_relabeling_rules(relabeling_rules): + """Write to a file the relabeling rules when the clean train dataset is used. + + Args: + relabeling_rules: dictionary of relabeling rules applied when the clean + train dataset is used (key = old_label, value = new_label). + """ + relabeling_file_name = os.path.join(FLAGS.output_directory, + 'relabeling.csv') + with tf.io.gfile.GFile(relabeling_file_name, 'w') as relabeling_file: + csv_writer = csv.writer(relabeling_file, delimiter=',') + csv_writer.writerow(['new_label', 'old_label']) + for old_label, new_label in relabeling_rules.items(): + csv_writer.writerow([new_label, old_label]) + + +def _build_train_and_validation_splits(image_paths, file_ids, labels, + validation_split_size, seed): + """Create TRAIN and VALIDATION splits containg all labels in equal proportion. + + Args: + image_paths: list of paths to the image files in the train dataset. + file_ids: list of image file ids in the train dataset. + labels: list of image labels in the train dataset. + validation_split_size: size of the VALIDATION split as a ratio of the train + dataset. + seed: seed to use for shuffling the dataset for reproducibility purposes. + + Returns: + splits : tuple containing the TRAIN and VALIDATION splits. + Raises: + ValueError: if the image attributes arrays don't all have the same length, + which makes the shuffling impossible. + """ + # Ensure all image attribute arrays have the same length. + total_images = len(file_ids) + if not (len(image_paths) == total_images and len(labels) == total_images): + raise ValueError('Inconsistencies between number of file_ids (%d), number ' + 'of image_paths (%d) and number of labels (%d). Cannot' + 'shuffle the train dataset.'% (total_images, + len(image_paths), + len(labels))) + + # Stack all image attributes arrays in a single 2D array of dimensions + # (3, number of images) and group by label the indices of datapoins in the + # image attributes arrays. Explicitly convert label types from 'int' to 'str' + # to avoid implicit conversion during stacking with image_paths and file_ids + # which are 'str'. + labels_str = [str(label) for label in labels] + image_attrs = np.stack((image_paths, file_ids, labels_str)) + image_attrs_idx_by_label = {} + for index, label in enumerate(labels): + if label not in image_attrs_idx_by_label: + image_attrs_idx_by_label[label] = [] + image_attrs_idx_by_label[label].append(index) + + # Create subsets of image attributes by label, shuffle them separately and + # split each subset into TRAIN and VALIDATION splits based on the size of the + # validation split. + splits = { + _VALIDATION_SPLIT: [], + _TRAIN_SPLIT: [] + } + rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(seed))) + for label, indexes in image_attrs_idx_by_label.items(): + # Create the subset for the current label. + image_attrs_label = image_attrs[:, indexes] + images_per_label = image_attrs_label.shape[1] + # Shuffle the current label subset. + columns_indices = np.arange(images_per_label) + rs.shuffle(columns_indices) + image_attrs_label = image_attrs_label[:, columns_indices] + # Split the current label subset into TRAIN and VALIDATION splits and add + # each split to the list of all splits. + cutoff_idx = max(1, int(validation_split_size * images_per_label)) + splits[_VALIDATION_SPLIT].append(image_attrs_label[:, 0 : cutoff_idx]) + splits[_TRAIN_SPLIT].append(image_attrs_label[:, cutoff_idx : ]) + + validation_split = np.concatenate(splits[_VALIDATION_SPLIT], axis=1) + train_split = np.concatenate(splits[_TRAIN_SPLIT], axis=1) + + # Unstack the image attribute arrays in the TRAIN and VALIDATION splits and + # convert them back to lists. Convert labels back to 'int' from 'str' + # following the explicit type change from 'str' to 'int' for stacking. + return ( + { + _IMAGE_PATHS_KEY: validation_split[0, :].tolist(), + _FILE_IDS_KEY: validation_split[1, :].tolist(), + _LABELS_KEY: [int(label) for label in validation_split[2, :].tolist()] + }, { + _IMAGE_PATHS_KEY: train_split[0, :].tolist(), + _FILE_IDS_KEY: train_split[1, :].tolist(), + _LABELS_KEY: [int(label) for label in train_split[2, :].tolist()] + }) + + +def _build_train_tfrecord_dataset(csv_path, + clean_csv_path, + image_dir, + generate_train_validation_splits, + validation_split_size, + seed): + """Build a TFRecord dataset for the train split. + + Args: + csv_path: path to the train Google-landmark Dataset csv Data Sources files. + clean_csv_path: path to the Google-landmark Dataset v2 CSV Data Sources + files of the clean train dataset. + image_dir: directory that stores downloaded images. + generate_train_validation_splits: whether to split the test dataset into + TRAIN and VALIDATION splits. + validation_split_size: size of the VALIDATION split as a ratio of the train + dataset. Only used if 'generate_train_validation_splits' is True. + seed: seed to use for shuffling the dataset for reproducibility purposes. + Only used if 'generate_train_validation_splits' is True. + + Returns: + Nothing. After the function call, sharded TFRecord files are materialized. + Raises: + ValueError: if the size of the VALIDATION split is outside (0,1) when TRAIN + and VALIDATION splits need to be generated. + """ + # Make sure the size of the VALIDATION split is inside (0, 1) if we need to + # generate the TRAIN and VALIDATION splits. + if generate_train_validation_splits: + if validation_split_size <= 0 or validation_split_size >= 1: + raise ValueError('Invalid VALIDATION split size. Expected inside (0,1)' + 'but received %f.' % validation_split_size) + + if clean_csv_path: + # Load clean train images and labels and write the relabeling rules. + (image_paths, file_ids, labels, + relabeling_rules) = _get_clean_train_image_files_and_labels(clean_csv_path, + image_dir) + _write_relabeling_rules(relabeling_rules) + else: + # Load all train images. + image_paths, file_ids, labels = _get_all_image_files_and_labels( + _TRAIN_SPLIT, csv_path, image_dir) + + if generate_train_validation_splits: + # Generate the TRAIN and VALIDATION splits and write them to TFRecord. + validation_split, train_split = _build_train_and_validation_splits( + image_paths, file_ids, labels, validation_split_size, seed) + _write_tfrecord(_VALIDATION_SPLIT, + validation_split[_IMAGE_PATHS_KEY], + validation_split[_FILE_IDS_KEY], + validation_split[_LABELS_KEY]) + _write_tfrecord(_TRAIN_SPLIT, + train_split[_IMAGE_PATHS_KEY], + train_split[_FILE_IDS_KEY], + train_split[_LABELS_KEY]) + else: + # Write to TFRecord a single split, TRAIN. + _write_tfrecord(_TRAIN_SPLIT, image_paths, file_ids, labels) + + +def _build_test_tfrecord_dataset(csv_path, image_dir): + """Build a TFRecord dataset for the 'test' split. + + Args: + csv_path: path to the 'test' Google-landmark Dataset csv Data Sources files. + image_dir: directory that stores downloaded images. + + Returns: + Nothing. After the function call, sharded TFRecord files are materialized. + """ + image_paths, file_ids, labels = _get_all_image_files_and_labels( + _TEST_SPLIT, csv_path, image_dir) + _write_tfrecord(_TEST_SPLIT, image_paths, file_ids, labels) + + +def main(unused_argv): + _build_train_tfrecord_dataset(FLAGS.train_csv_path, + FLAGS.train_clean_csv_path, + FLAGS.train_directory, + FLAGS.generate_train_validation_splits, + FLAGS.validation_split_size, + FLAGS.seed) + if FLAGS.test_csv_path is not None: + _build_test_tfrecord_dataset(FLAGS.test_csv_path, FLAGS.test_directory) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/delf/delf/python/training/datasets/__init__.py b/models/research/delf/delf/python/training/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0a672716945394cce4b2c69ee3d086192da87c --- /dev/null +++ b/models/research/delf/delf/python/training/datasets/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module exposing datasets for training.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=unused-import +from delf.python.training.datasets import googlelandmarks +# pylint: enable=unused-import diff --git a/models/research/delf/delf/python/training/datasets/googlelandmarks.py b/models/research/delf/delf/python/training/datasets/googlelandmarks.py new file mode 100644 index 0000000000000000000000000000000000000000..f289cc166460f3a2fd9f157bc672ea0a464a2995 --- /dev/null +++ b/models/research/delf/delf/python/training/datasets/googlelandmarks.py @@ -0,0 +1,187 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Google Landmarks Dataset(GLD). + +Placeholder for Google Landmarks dataset. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import tensorflow as tf + + +class _GoogleLandmarksInfo(object): + """Metadata about the Google Landmarks dataset.""" + num_classes = { + 'gld_v1': 14951, + 'gld_v2': 203094, + 'gld_v2_clean': 81313 + } + + +class _DataAugmentationParams(object): + """Default parameters for augmentation.""" + # The following are used for training. + min_object_covered = 0.1 + aspect_ratio_range_min = 3. / 4 + aspect_ratio_range_max = 4. / 3 + area_range_min = 0.08 + area_range_max = 1.0 + max_attempts = 100 + update_labels = False + # 'central_fraction' is used for central crop in inference. + central_fraction = 0.875 + + random_reflection = False + input_rows = 321 + input_cols = 321 + + +def NormalizeImages(images, pixel_value_scale=0.5, pixel_value_offset=0.5): + """Normalize pixel values in image. + + Output is computed as + normalized_images = (images - pixel_value_offset) / pixel_value_scale. + + Args: + images: `Tensor`, images to normalize. + pixel_value_scale: float, scale. + pixel_value_offset: float, offset. + + Returns: + normalized_images: `Tensor`, normalized images. + """ + images = tf.cast(images, tf.float32) + normalized_images = tf.math.divide( + tf.subtract(images, pixel_value_offset), pixel_value_scale) + return normalized_images + + +def _ImageNetCrop(image): + """Imagenet-style crop with random bbox and aspect ratio. + + Args: + image: a `Tensor`, image to crop. + + Returns: + cropped_image: `Tensor`, cropped image. + """ + + params = _DataAugmentationParams() + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + (bbox_begin, bbox_size, _) = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=params.min_object_covered, + aspect_ratio_range=(params.aspect_ratio_range_min, + params.aspect_ratio_range_max), + area_range=(params.area_range_min, params.area_range_max), + max_attempts=params.max_attempts, + use_image_if_no_bounding_boxes=True) + cropped_image = tf.slice(image, bbox_begin, bbox_size) + cropped_image.set_shape([None, None, 3]) + + cropped_image = tf.image.resize( + cropped_image, [params.input_rows, params.input_cols], method='area') + if params.random_reflection: + cropped_image = tf.image.random_flip_left_right(cropped_image) + + return cropped_image + + +def _ParseFunction(example, name_to_features, image_size, augmentation): + """Parse a single TFExample to get the image and label and process the image. + + Args: + example: a `TFExample`. + name_to_features: a `dict`. The mapping from feature names to its type. + image_size: an `int`. The image size for the decoded image, on each side. + augmentation: a `boolean`. True if the image will be augmented. + + Returns: + image: a `Tensor`. The processed image. + label: a `Tensor`. The ground-truth label. + """ + parsed_example = tf.io.parse_single_example(example, name_to_features) + # Parse to get image. + image = parsed_example['image/encoded'] + image = tf.io.decode_jpeg(image) + if augmentation: + image = _ImageNetCrop(image) + else: + image = tf.image.resize(image, [image_size, image_size]) + image.set_shape([image_size, image_size, 3]) + # Parse to get label. + label = parsed_example['image/class/label'] + return image, label + + +def CreateDataset(file_pattern, + image_size=321, + batch_size=32, + augmentation=False, + seed=0): + """Creates a dataset. + + Args: + file_pattern: str, file pattern of the dataset files. + image_size: int, image size. + batch_size: int, batch size. + augmentation: bool, whether to apply augmentation. + seed: int, seed for shuffling the dataset. + + Returns: + tf.data.TFRecordDataset. + """ + + filenames = tf.io.gfile.glob(file_pattern) + + dataset = tf.data.TFRecordDataset(filenames) + dataset = dataset.repeat().shuffle(buffer_size=100, seed=seed) + + # Create a description of the features. + feature_description = { + 'image/height': tf.io.FixedLenFeature([], tf.int64, default_value=0), + 'image/width': tf.io.FixedLenFeature([], tf.int64, default_value=0), + 'image/channels': tf.io.FixedLenFeature([], tf.int64, default_value=0), + 'image/format': tf.io.FixedLenFeature([], tf.string, default_value=''), + 'image/filename': tf.io.FixedLenFeature([], tf.string, default_value=''), + 'image/encoded': tf.io.FixedLenFeature([], tf.string, default_value=''), + 'image/class/label': tf.io.FixedLenFeature([], tf.int64, default_value=0), + } + + customized_parse_func = functools.partial( + _ParseFunction, + name_to_features=feature_description, + image_size=image_size, + augmentation=augmentation) + dataset = dataset.map(customized_parse_func) + dataset = dataset.batch(batch_size) + + return dataset + + +def GoogleLandmarksInfo(): + """Returns metadata information on the Google Landmarks dataset. + + Returns: + object _GoogleLandmarksInfo containing metadata about the GLD dataset. + """ + return _GoogleLandmarksInfo() diff --git a/models/research/delf/delf/python/training/download_dataset.sh b/models/research/delf/delf/python/training/download_dataset.sh new file mode 100644 index 0000000000000000000000000000000000000000..ecbd905eccde6b4056f4b3cc0a011695debb3390 --- /dev/null +++ b/models/research/delf/delf/python/training/download_dataset.sh @@ -0,0 +1,161 @@ +#!/bin/bash + +# Copyright 2020 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# This script downloads the Google Landmarks v2 dataset. To download the dataset +# run the script like in the following example: +# bash download_dataset.sh 500 100 20 +# +# The script takes the following parameters, in order: +# - number of image files from the TRAIN split to download (maximum 500) +# - number of image files from the INDEX split to download (maximum 100) +# - number of image files from the TEST split to download (maximum 20) + +image_files_train=$1 # Number of image files to download from the TRAIN split +image_files_index=$2 # Number of image files to download from the INDEX split +image_files_test=$3 # Number of image files to download from the TEST split + +splits=("train" "test" "index") +dataset_root_folder=gldv2_dataset + +metadata_url="https://s3.amazonaws.com/google-landmark/metadata" +ground_truth_url="https://s3.amazonaws.com/google-landmark/ground_truth" +csv_train=(${metadata_url}/train.csv ${metadata_url}/train_clean.csv ${metadata_url}/train_attribution.csv ${metadata_url}/train_label_to_category.csv) +csv_index=(${metadata_url}/index.csv ${metadata_url}/index_image_to_landmark.csv ${metadata_url}/index_label_to_category.csv) +csv_test=(${metadata_url}/test.csv ${ground_truth_url}/recognition_solution_v2.1.csv ${ground_truth_url}/retrieval_solution_v2.1.csv) + +images_tar_file_base_url="https://s3.amazonaws.com/google-landmark" +images_md5_file_base_url="https://s3.amazonaws.com/google-landmark/md5sum" +num_processes=6 + +make_folder() { + # Creates a folder and checks if it exists. Exits if folder creation fails. + local folder=$1 + if [ -d "${folder}" ]; then + echo "Folder ${folder} already exists. Skipping folder creation." + else + echo "Creating folder ${folder}." + if mkdir ${folder}; then + echo "Successfully created folder ${folder}." + else + echo "Failed to create folder ${folder}. Exiting." + exit 1 + fi + fi +} + +download_file() { + # Downloads a file from an URL into a specified folder. + local file_url=$1 + local folder=$2 + local file_path="${folder}/`basename ${file_url}`" + echo "Downloading file ${file_url} to folder ${folder}." + pushd . > /dev/null + cd ${folder} + curl -Os ${file_url} + popd > /dev/null +} + +validate_md5_checksum() { + # Validate the MD5 checksum of a downloaded file. + local content_file=$1 + local md5_file=$2 + echo "Checking MD5 checksum of file ${content_file} against ${md5_file}" + if [[ "${OSTYPE}" == "linux-gnu" ]]; then + content_md5=`md5sum ${content_file}` + elif [[ "${OSTYPE}" == "darwin"* ]]; then + content_md5=`md5 -r "${content_file}"` + fi + content_md5=`cut -d' ' -f1<<<"${content_md5}"` + expected_md5=`cut -d' ' -f1<<${max_idx}?${max_idx}:${curr_max_idx})) + for j in $(seq ${i} 1 ${last_idx}); do download_image_file "${split}" "${j}" "${split_folder}" & done + wait + done +} + +download_csv_files() { + # Downloads all medatada CSV files of a split. + local split=$1 + local split_folder=$2 + local csv_list="csv_${split}[*]" + for csv_file in ${!csv_list}; do + download_file "${csv_file}" "${split_folder}" + done +} + +download_split() { + # Downloads all artifacts, metadata CSV files and image files of a single split. + local split=$1 + local split_folder=${dataset_root_folder}/${split} + make_folder "${split_folder}" + download_csv_files "${split}" "${split_folder}" + download_image_files "${split}" "${split_folder}" +} + +download_all_splits() { + # Downloads all artifacts, metadata CSV files and image files of all splits. + make_folder "${dataset_root_folder}" + for split in "${splits[@]}"; do + download_split "$split" + done +} + +download_all_splits + +exit 0 diff --git a/models/research/delf/delf/python/training/install_delf.sh b/models/research/delf/delf/python/training/install_delf.sh new file mode 100644 index 0000000000000000000000000000000000000000..4feb464aa7def067028e65281d906e006f4533a2 --- /dev/null +++ b/models/research/delf/delf/python/training/install_delf.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +# Copyright 2020 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# This script installs the DELF package along with its dependencies. To install +# the DELF package run the script like in the following example: +# bash install_delf.sh + +protoc_folder="protoc" +protoc_url="https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip" +tf_slim_git_repo="https://github.com/google-research/tf-slim.git" + +handle_exit_code() { + # Fail gracefully in case of an exit code different than 0. + exit_code=$1 + error_message=$2 + if [ ${exit_code} -ne 0 ]; then + echo "${error_message} Exiting." + exit 1 + fi +} + +install_tensorflow() { + # Install TensorFlow 2.2. + echo "Installing TensorFlow 2.2" + pip3 install --upgrade tensorflow==2.2.0 + local exit_code=$? + handle_exit_code ${exit_code} "Unable to install Tensorflow 2.2." + echo "Installing TensorFlow 2.2 for GPU" + pip3 install --upgrade tensorflow-gpu==2.2.0 + local exit_code=$? + handle_exit_code ${exit_code} "Unable to install Tensorflow for GPU 2.2.0." +} + +install_tf_slim() { + # Install TF-Slim from source. + echo "Installing TF-Slim from source: ${git_repo}" + git clone ${tf_slim_git_repo} + local exit_code=$? + handle_exit_code ${exit_code} "Unable to clone TF-Slim repository ${tf_slim_git_repo}." + pushd . > /dev/null + cd tf-slim + pip3 install . + popd > /dev/null + rm -rf tf-slim +} + +download_protoc() { + # Installs the Protobuf compiler protoc. + echo "Downloading Protobuf compiler from ${protoc_url}" + curl -L -Os ${protoc_url} + local exit_code=$? + handle_exit_code ${exit_code} "Unable to download Protobuf compiler from ${tf_slim_git_repo}." + + mkdir ${protoc_folder} + local protoc_archive=`basename ${protoc_url}` + unzip ${protoc_archive} -d ${protoc_folder} + local exit_code=$? + handle_exit_code ${exit_code} "Unable to unzip Protobuf compiler from ${protoc_archive}." + + rm ${protoc_archive} +} + +compile_delf_protos() { + # Compiles DELF protobufs from tensorflow/models/research/delf using the potoc compiler. + echo "Compiling DELF Protobufs" + PATH_TO_PROTOC="`pwd`/${protoc_folder}" + pushd . > /dev/null + cd ../../.. + ${PATH_TO_PROTOC}/bin/protoc delf/protos/*.proto --python_out=. + local exit_code=$? + handle_exit_code ${exit_code} "Unable to compile DELF Protobufs." + popd > /dev/null +} + +cleanup_protoc() { + # Removes the downloaded Protobuf compiler protoc after the installation of the DELF package. + echo "Cleaning up Protobuf compiler download" + rm -rf ${protoc_folder} +} + +install_python_libraries() { + # Installs Python libraries upon which the DELF package has dependencies. + echo "Installing matplotlib, numpy, scikit-image, scipy and python3-tk" + pip3 install matplotlib numpy scikit-image scipy + local exit_code=$? + handle_exit_code ${exit_code} "Unable to install at least one of: matplotlib numpy scikit-image scipy." + sudo apt-get -y install python3-tk + local exit_code=$? + handle_exit_code ${exit_code} "Unable to install python3-tk." +} + +install_object_detection() { + # Installs the object detection package from tensorflow/models/research. + echo "Installing object detection" + pushd . > /dev/null + cd ../../../.. + export PYTHONPATH=$PYTHONPATH:`pwd` + pip3 install . + local exit_code=$? + handle_exit_code ${exit_code} "Unable to install the object_detection package." + popd > /dev/null +} + +install_delf_package() { + # Installs the DELF package from tensorflow/models/research/delf/delf. + echo "Installing DELF package" + pushd . > /dev/null + cd ../../.. + pip3 install -e . + local exit_code=$? + handle_exit_code ${exit_code} "Unable to install the DELF package." + popd > /dev/null +} + +post_install_check() { + # Checks the DELF package has been successfully installed. + echo "Checking DELF package installation" + python3 -c 'import delf' + local exit_code=$? + handle_exit_code ${exit_code} "DELF package installation check failed." + echo "Installation successful." +} + +install_delf() { + # Orchestrates DELF package installation. + install_tensorflow + install_tf_slim + download_protoc + compile_delf_protos + cleanup_protoc + install_python_libraries + install_object_detection + install_delf_package + post_install_check +} + +install_delf + +exit 0 diff --git a/models/research/delf/delf/python/training/model/__init__.py b/models/research/delf/delf/python/training/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc888bd8a65e9ba48f15e4082064e7285ac2591 --- /dev/null +++ b/models/research/delf/delf/python/training/model/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DELF model module, used for training and exporting.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=unused-import +from delf.python.training.model import delf_model +from delf.python.training.model import export_model_utils +from delf.python.training.model import resnet50 +# pylint: enable=unused-import diff --git a/models/research/delf/delf/python/training/model/delf_model.py b/models/research/delf/delf/python/training/model/delf_model.py new file mode 100644 index 0000000000000000000000000000000000000000..27409de99c52dcb0f0eb00ca9ae0602a2be0d30b --- /dev/null +++ b/models/research/delf/delf/python/training/model/delf_model.py @@ -0,0 +1,141 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DELF model implementation based on the following paper. + + Large-Scale Image Retrieval with Attentive Deep Local Features + https://arxiv.org/abs/1612.06321 +""" + +import tensorflow as tf + +from delf.python.training.model import resnet50 as resnet + +layers = tf.keras.layers +reg = tf.keras.regularizers + +_DECAY = 0.0001 + + +class AttentionModel(tf.keras.Model): + """Instantiates attention model. + + Uses two [kernel_size x kernel_size] convolutions and softplus as activation + to compute an attention map with the same resolution as the featuremap. + Features l2-normalized and aggregated using attention probabilites as weights. + """ + + def __init__(self, kernel_size=1, decay=_DECAY, name='attention'): + """Initialization of attention model. + + Args: + kernel_size: int, kernel size of convolutions. + decay: float, decay for l2 regularization of kernel weights. + name: str, name to identify model. + """ + super(AttentionModel, self).__init__(name=name) + + # First convolutional layer (called with relu activation). + self.conv1 = layers.Conv2D( + 512, + kernel_size, + kernel_regularizer=reg.l2(decay), + padding='same', + name='attn_conv1') + self.bn_conv1 = layers.BatchNormalization(axis=3, name='bn_conv1') + + # Second convolutional layer, with softplus activation. + self.conv2 = layers.Conv2D( + 1, + kernel_size, + kernel_regularizer=reg.l2(decay), + padding='same', + name='attn_conv2') + self.activation_layer = layers.Activation('softplus') + + def call(self, inputs, training=True): + x = self.conv1(inputs) + x = self.bn_conv1(x, training=training) + x = tf.nn.relu(x) + + score = self.conv2(x) + prob = self.activation_layer(score) + + # L2-normalize the featuremap before pooling. + inputs = tf.nn.l2_normalize(inputs, axis=-1) + feat = tf.reduce_mean(tf.multiply(inputs, prob), [1, 2], keepdims=False) + + return feat, prob, score + + +class Delf(tf.keras.Model): + """Instantiates Keras DELF model using ResNet50 as backbone. + + This class implements the [DELF](https://arxiv.org/abs/1612.06321) model for + extracting local features from images. The backbone is a ResNet50 network + that extracts featuremaps from both conv_4 and conv_5 layers. Activations + from conv_4 are used to compute an attention map of the same resolution. + """ + + def __init__(self, block3_strides=True, name='DELF'): + """Initialization of DELF model. + + Args: + block3_strides: bool, whether to add strides to the output of block3. + name: str, name to identify model. + """ + super(Delf, self).__init__(name=name) + + # Backbone using Keras ResNet50. + self.backbone = resnet.ResNet50( + 'channels_last', + name='backbone', + include_top=False, + pooling='avg', + block3_strides=block3_strides, + average_pooling=False) + + # Attention model. + self.attention = AttentionModel(name='attention') + + # Define classifiers for training backbone and attention models. + def init_classifiers(self, num_classes): + self.num_classes = num_classes + self.desc_classification = layers.Dense( + num_classes, activation=None, kernel_regularizer=None, name='desc_fc') + + self.attn_classification = layers.Dense( + num_classes, activation=None, kernel_regularizer=None, name='att_fc') + + # Weights to optimize for descriptor fine tuning. + @property + def desc_trainable_weights(self): + return (self.backbone.trainable_weights + + self.desc_classification.trainable_weights) + + # Weights to optimize for attention model training. + @property + def attn_trainable_weights(self): + return (self.attention.trainable_weights + + self.attn_classification.trainable_weights) + + def call(self, input_image, training=True): + blocks = {'block3': None} + self.backbone(input_image, intermediates_dict=blocks, training=training) + + features = blocks['block3'] + _, probs, _ = self.attention(features, training=training) + + return probs, features diff --git a/models/research/delf/delf/python/training/model/delf_model_test.py b/models/research/delf/delf/python/training/model/delf_model_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c4cbcef555db3cd6e6395aee69f1479863916bd4 --- /dev/null +++ b/models/research/delf/delf/python/training/model/delf_model_test.py @@ -0,0 +1,115 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the DELF model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import tensorflow as tf + +from delf.python.training.model import delf_model + + +class DelfTest(tf.test.TestCase, parameterized.TestCase): + + @parameterized.named_parameters( + ('block3_stridesTrue', True), + ('block3_stridesFalse', False), + ) + def test_build_model(self, block3_strides): + image_size = 321 + num_classes = 1000 + batch_size = 2 + input_shape = (batch_size, image_size, image_size, 3) + + model = delf_model.Delf(block3_strides=block3_strides, name='DELF') + model.init_classifiers(num_classes) + + images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0) + blocks = {} + + # Get global feature by pooling block4 features. + desc_prelogits = model.backbone( + images, intermediates_dict=blocks, training=False) + desc_logits = model.desc_classification(desc_prelogits) + self.assertAllEqual(desc_prelogits.shape, (batch_size, 2048)) + self.assertAllEqual(desc_logits.shape, (batch_size, num_classes)) + + features = blocks['block3'] + attn_prelogits, _, _ = model.attention(features) + attn_logits = model.attn_classification(attn_prelogits) + self.assertAllEqual(attn_prelogits.shape, (batch_size, 1024)) + self.assertAllEqual(attn_logits.shape, (batch_size, num_classes)) + + @parameterized.named_parameters( + ('block3_stridesTrue', True), + ('block3_stridesFalse', False), + ) + def test_train_step(self, block3_strides): + + image_size = 321 + num_classes = 1000 + batch_size = 2 + clip_val = 10.0 + input_shape = (batch_size, image_size, image_size, 3) + + model = delf_model.Delf(block3_strides=block3_strides, name='DELF') + model.init_classifiers(num_classes) + + optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9) + + images = tf.random.uniform(input_shape, minval=0.0, maxval=1.0, seed=0) + labels = tf.random.uniform((batch_size,), + minval=0, + maxval=model.num_classes - 1, + dtype=tf.int64) + + loss_object = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE) + + def compute_loss(labels, predictions): + per_example_loss = loss_object(labels, predictions) + return tf.nn.compute_average_loss( + per_example_loss, global_batch_size=batch_size) + + with tf.GradientTape() as desc_tape: + blocks = {} + desc_prelogits = model.backbone( + images, intermediates_dict=blocks, training=False) + desc_logits = model.desc_classification(desc_prelogits) + desc_logits = model.desc_classification(desc_prelogits) + desc_loss = compute_loss(labels, desc_logits) + + gradients = desc_tape.gradient(desc_loss, model.desc_trainable_weights) + clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val) + optimizer.apply_gradients(zip(clipped, model.desc_trainable_weights)) + + with tf.GradientTape() as attn_tape: + block3 = blocks['block3'] + block3 = tf.stop_gradient(block3) + attn_prelogits, _, _ = model.attention(block3, training=True) + attn_logits = model.attn_classification(attn_prelogits) + attn_loss = compute_loss(labels, attn_logits) + + gradients = attn_tape.gradient(attn_loss, model.attn_trainable_weights) + clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val) + optimizer.apply_gradients(zip(clipped, model.attn_trainable_weights)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/delf/delf/python/training/model/export_model.py b/models/research/delf/delf/python/training/model/export_model.py new file mode 100644 index 0000000000000000000000000000000000000000..4af69a231641ef2cc69a08fb9a5ba5c31655c26c --- /dev/null +++ b/models/research/delf/delf/python/training/model/export_model.py @@ -0,0 +1,137 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Export DELF tensorflow inference model. + +This model includes feature extraction, receptive field calculation and +key-point selection and outputs the selected feature descriptors. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags +import tensorflow as tf + +from delf.python.training.model import delf_model +from delf.python.training.model import export_model_utils + +FLAGS = flags.FLAGS + +flags.DEFINE_string('ckpt_path', '/tmp/delf-logdir/delf-weights', + 'Path to saved checkpoint.') +flags.DEFINE_string('export_path', None, 'Path where model will be exported.') +flags.DEFINE_boolean('block3_strides', False, + 'Whether to apply strides after block3.') +flags.DEFINE_float('iou', 1.0, 'IOU for non-max suppression.') + + +def _build_tensor_info(tensor_dict): + """Replace the dict's value by the tensor info. + + Args: + tensor_dict: A dictionary contains . + + Returns: + dict: New dictionary contains . + """ + return { + k: tf.compat.v1.saved_model.utils.build_tensor_info(t) + for k, t in tensor_dict.items() + } + + +def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + export_path = FLAGS.export_path + if os.path.exists(export_path): + raise ValueError('Export_path already exists.') + + with tf.Graph().as_default() as g, tf.compat.v1.Session(graph=g) as sess: + + # Setup the DELF model for extraction. + model = delf_model.Delf(block3_strides=FLAGS.block3_strides, name='DELF') + + # Initial forward pass to build model. + images = tf.zeros((1, 321, 321, 3), dtype=tf.float32) + model(images) + + stride_factor = 2.0 if FLAGS.block3_strides else 1.0 + + # Setup the multiscale keypoint extraction. + input_image = tf.compat.v1.placeholder( + tf.uint8, shape=(None, None, 3), name='input_image') + input_abs_thres = tf.compat.v1.placeholder( + tf.float32, shape=(), name='input_abs_thres') + input_scales = tf.compat.v1.placeholder( + tf.float32, shape=[None], name='input_scales') + input_max_feature_num = tf.compat.v1.placeholder( + tf.int32, shape=(), name='input_max_feature_num') + + extracted_features = export_model_utils.ExtractLocalFeatures( + input_image, input_scales, input_max_feature_num, input_abs_thres, + FLAGS.iou, lambda x: model(x, training=False), stride_factor) + + # Load the weights. + checkpoint_path = FLAGS.ckpt_path + model.load_weights(checkpoint_path) + print('Checkpoint loaded from ', checkpoint_path) + + named_input_tensors = { + 'input_image': input_image, + 'input_scales': input_scales, + 'input_abs_thres': input_abs_thres, + 'input_max_feature_num': input_max_feature_num, + } + + # Outputs to the exported model. + named_output_tensors = {} + named_output_tensors['boxes'] = tf.identity( + extracted_features[0], name='boxes') + named_output_tensors['features'] = tf.identity( + extracted_features[1], name='features') + named_output_tensors['scales'] = tf.identity( + extracted_features[2], name='scales') + named_output_tensors['scores'] = tf.identity( + extracted_features[3], name='scores') + + # Export the model. + signature_def = tf.compat.v1.saved_model.signature_def_utils.build_signature_def( + inputs=_build_tensor_info(named_input_tensors), + outputs=_build_tensor_info(named_output_tensors)) + + print('Exporting trained model to:', export_path) + builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_path) + + init_op = None + builder.add_meta_graph_and_variables( + sess, [tf.compat.v1.saved_model.tag_constants.SERVING], + signature_def_map={ + tf.compat.v1.saved_model.signature_constants + .DEFAULT_SERVING_SIGNATURE_DEF_KEY: + signature_def + }, + main_op=init_op) + builder.save() + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/delf/delf/python/training/model/export_model_utils.py b/models/research/delf/delf/python/training/model/export_model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f4302aca139802e99d80bfd4e1fc27e353abdfbb --- /dev/null +++ b/models/research/delf/delf/python/training/model/export_model_utils.py @@ -0,0 +1,171 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for DELF model exporting.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from delf import feature_extractor +from delf.python.training.datasets import googlelandmarks as gld +from object_detection.core import box_list +from object_detection.core import box_list_ops + + +def ExtractLocalFeatures(image, image_scales, max_feature_num, abs_thres, iou, + attention_model_fn, stride_factor): + """Extract local features for input image. + + Args: + image: image tensor of type tf.uint8 with shape [h, w, channels]. + image_scales: 1D float tensor which contains float scales used for image + pyramid construction. + max_feature_num: int tensor denotes the maximum selected feature points. + abs_thres: float tensor denotes the score threshold for feature selection. + iou: float scalar denotes the iou threshold for NMS. + attention_model_fn: model function. Follows the signature: + * Args: + * `images`: Image tensor which is re-scaled. + * Returns: + * `attention_prob`: attention map after the non-linearity. + * `feature_map`: feature map after ResNet convolution. + stride_factor: integer accounting for striding after block3. + + Returns: + boxes: [N, 4] float tensor which denotes the selected receptive box. N is + the number of final feature points which pass through keypoint selection + and NMS steps. + features: [N, depth] float tensor. + feature_scales: [N] float tensor. It is the inverse of the input image + scales such that larger image scales correspond to larger image regions, + which is compatible with keypoints detected with other techniques, for + example Congas. + scores: [N, 1] float tensor denotes the attention score. + + """ + original_image_shape_float = tf.gather( + tf.dtypes.cast(tf.shape(image), tf.float32), [0, 1]) + + image_tensor = gld.NormalizeImages( + image, pixel_value_offset=128.0, pixel_value_scale=128.0) + image_tensor = tf.expand_dims(image_tensor, 0, name='image/expand_dims') + + # Hard code the feature depth and receptive field parameters for now. + rf, stride, padding = [291.0, 16.0 * stride_factor, 145.0] + feature_depth = 1024 + + def _ProcessSingleScale(scale_index, boxes, features, scales, scores): + """Resizes the image and run feature extraction and keypoint selection. + + This function will be passed into tf.while_loop() and be called + repeatedly. The input boxes are collected from the previous iteration + [0: scale_index -1]. We get the current scale by + image_scales[scale_index], and run resize image, feature extraction and + keypoint selection. Then we will get a new set of selected_boxes for + current scale. In the end, we concat the previous boxes with current + selected_boxes as the output. + Args: + scale_index: A valid index in the image_scales. + boxes: Box tensor with the shape of [N, 4]. + features: Feature tensor with the shape of [N, depth]. + scales: Scale tensor with the shape of [N]. + scores: Attention score tensor with the shape of [N]. + + Returns: + scale_index: The next scale index for processing. + boxes: Concatenated box tensor with the shape of [K, 4]. K >= N. + features: Concatenated feature tensor with the shape of [K, depth]. + scales: Concatenated scale tensor with the shape of [K]. + scores: Concatenated score tensor with the shape of [K]. + """ + scale = tf.gather(image_scales, scale_index) + new_image_size = tf.dtypes.cast( + tf.round(original_image_shape_float * scale), tf.int32) + resized_image = tf.image.resize(image_tensor, new_image_size) + + attention_prob, feature_map = attention_model_fn(resized_image) + attention_prob = tf.squeeze(attention_prob, axis=[0]) + feature_map = tf.squeeze(feature_map, axis=[0]) + + rf_boxes = feature_extractor.CalculateReceptiveBoxes( + tf.shape(feature_map)[0], + tf.shape(feature_map)[1], rf, stride, padding) + + # Re-project back to the original image space. + rf_boxes = tf.divide(rf_boxes, scale) + attention_prob = tf.reshape(attention_prob, [-1]) + feature_map = tf.reshape(feature_map, [-1, feature_depth]) + + # Use attention score to select feature vectors. + indices = tf.reshape(tf.where(attention_prob >= abs_thres), [-1]) + selected_boxes = tf.gather(rf_boxes, indices) + selected_features = tf.gather(feature_map, indices) + selected_scores = tf.gather(attention_prob, indices) + selected_scales = tf.ones_like(selected_scores, tf.float32) / scale + + # Concat with the previous result from different scales. + boxes = tf.concat([boxes, selected_boxes], 0) + features = tf.concat([features, selected_features], 0) + scales = tf.concat([scales, selected_scales], 0) + scores = tf.concat([scores, selected_scores], 0) + + return scale_index + 1, boxes, features, scales, scores + + output_boxes = tf.zeros([0, 4], dtype=tf.float32) + output_features = tf.zeros([0, feature_depth], dtype=tf.float32) + output_scales = tf.zeros([0], dtype=tf.float32) + output_scores = tf.zeros([0], dtype=tf.float32) + + # Process the first scale separately, the following scales will reuse the + # graph variables. + (_, output_boxes, output_features, output_scales, + output_scores) = _ProcessSingleScale(0, output_boxes, output_features, + output_scales, output_scores) + + i = tf.constant(1, dtype=tf.int32) + num_scales = tf.shape(image_scales)[0] + keep_going = lambda j, b, f, scales, scores: tf.less(j, num_scales) + + (_, output_boxes, output_features, output_scales, + output_scores) = tf.while_loop( + cond=keep_going, + body=_ProcessSingleScale, + loop_vars=[ + i, output_boxes, output_features, output_scales, output_scores + ], + shape_invariants=[ + i.get_shape(), + tf.TensorShape([None, 4]), + tf.TensorShape([None, feature_depth]), + tf.TensorShape([None]), + tf.TensorShape([None]) + ], + back_prop=False) + + feature_boxes = box_list.BoxList(output_boxes) + feature_boxes.add_field('features', output_features) + feature_boxes.add_field('scales', output_scales) + feature_boxes.add_field('scores', output_scores) + + nms_max_boxes = tf.minimum(max_feature_num, feature_boxes.num_boxes()) + final_boxes = box_list_ops.non_max_suppression(feature_boxes, iou, + nms_max_boxes) + + return final_boxes.get(), final_boxes.get_field( + 'features'), final_boxes.get_field('scales'), tf.expand_dims( + final_boxes.get_field('scores'), 1) diff --git a/models/research/delf/delf/python/training/model/resnet50.py b/models/research/delf/delf/python/training/model/resnet50.py new file mode 100644 index 0000000000000000000000000000000000000000..1c4d7c2f68dea12d74fcd32a8b52fd1285e92b59 --- /dev/null +++ b/models/research/delf/delf/python/training/model/resnet50.py @@ -0,0 +1,358 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ResNet50 backbone used in DELF model. + +Copied over from tensorflow/python/eager/benchmarks/resnet50/resnet50.py, +because that code does not support dependencies. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import tensorflow as tf + +layers = tf.keras.layers + + +class _IdentityBlock(tf.keras.Model): + """_IdentityBlock is the block that has no conv layer at shortcut. + + Args: + kernel_size: the kernel size of middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + data_format: data_format for the input ('channels_first' or + 'channels_last'). + """ + + def __init__(self, kernel_size, filters, stage, block, data_format): + super(_IdentityBlock, self).__init__(name='') + filters1, filters2, filters3 = filters + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + bn_axis = 1 if data_format == 'channels_first' else 3 + + self.conv2a = layers.Conv2D( + filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format) + self.bn2a = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '2a') + + self.conv2b = layers.Conv2D( + filters2, + kernel_size, + padding='same', + data_format=data_format, + name=conv_name_base + '2b') + self.bn2b = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '2b') + + self.conv2c = layers.Conv2D( + filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format) + self.bn2c = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '2c') + + def call(self, input_tensor, training=False): + x = self.conv2a(input_tensor) + x = self.bn2a(x, training=training) + x = tf.nn.relu(x) + + x = self.conv2b(x) + x = self.bn2b(x, training=training) + x = tf.nn.relu(x) + + x = self.conv2c(x) + x = self.bn2c(x, training=training) + + x += input_tensor + return tf.nn.relu(x) + + +class _ConvBlock(tf.keras.Model): + """_ConvBlock is the block that has a conv layer at shortcut. + + Args: + kernel_size: the kernel size of middle conv layer at main path + filters: list of integers, the filters of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + data_format: data_format for the input ('channels_first' or + 'channels_last'). + strides: strides for the convolution. Note that from stage 3, the first + conv layer at main path is with strides=(2,2), and the shortcut should + have strides=(2,2) as well. + """ + + def __init__(self, + kernel_size, + filters, + stage, + block, + data_format, + strides=(2, 2)): + super(_ConvBlock, self).__init__(name='') + filters1, filters2, filters3 = filters + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + bn_axis = 1 if data_format == 'channels_first' else 3 + + self.conv2a = layers.Conv2D( + filters1, (1, 1), + strides=strides, + name=conv_name_base + '2a', + data_format=data_format) + self.bn2a = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '2a') + + self.conv2b = layers.Conv2D( + filters2, + kernel_size, + padding='same', + name=conv_name_base + '2b', + data_format=data_format) + self.bn2b = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '2b') + + self.conv2c = layers.Conv2D( + filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format) + self.bn2c = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '2c') + + self.conv_shortcut = layers.Conv2D( + filters3, (1, 1), + strides=strides, + name=conv_name_base + '1', + data_format=data_format) + self.bn_shortcut = layers.BatchNormalization( + axis=bn_axis, name=bn_name_base + '1') + + def call(self, input_tensor, training=False): + x = self.conv2a(input_tensor) + x = self.bn2a(x, training=training) + x = tf.nn.relu(x) + + x = self.conv2b(x) + x = self.bn2b(x, training=training) + x = tf.nn.relu(x) + + x = self.conv2c(x) + x = self.bn2c(x, training=training) + + shortcut = self.conv_shortcut(input_tensor) + shortcut = self.bn_shortcut(shortcut, training=training) + + x += shortcut + return tf.nn.relu(x) + + +# pylint: disable=not-callable +class ResNet50(tf.keras.Model): + """Instantiates the ResNet50 architecture. + + Args: + data_format: format for the image. Either 'channels_first' or + 'channels_last'. 'channels_first' is typically faster on GPUs while + 'channels_last' is typically faster on CPUs. See + https://www.tensorflow.org/performance/performance_guide#data_formats + name: Prefix applied to names of variables created in the model. + include_top: whether to include the fully-connected layer at the top of the + network. + pooling: Optional pooling mode for feature extraction when `include_top` is + False. 'None' means that the output of the model will be the 4D tensor + output of the last convolutional layer. 'avg' means that global average + pooling will be applied to the output of the last convolutional layer, and + thus the output of the model will be a 2D tensor. 'max' means that global + max pooling will be applied. + block3_strides: whether to add a stride of 2 to block3 to make it compatible + with tf.slim ResNet implementation. + average_pooling: whether to do average pooling of block4 features before + global pooling. + classes: optional number of classes to classify images into, only to be + specified if `include_top` is True. + + Raises: + ValueError: in case of invalid argument for data_format. + """ + + def __init__(self, + data_format, + name='', + include_top=True, + pooling=None, + block3_strides=False, + average_pooling=True, + classes=1000): + super(ResNet50, self).__init__(name=name) + + valid_channel_values = ('channels_first', 'channels_last') + if data_format not in valid_channel_values: + raise ValueError('Unknown data_format: %s. Valid values: %s' % + (data_format, valid_channel_values)) + self.include_top = include_top + self.block3_strides = block3_strides + self.average_pooling = average_pooling + self.pooling = pooling + + def conv_block(filters, stage, block, strides=(2, 2)): + return _ConvBlock( + 3, + filters, + stage=stage, + block=block, + data_format=data_format, + strides=strides) + + def id_block(filters, stage, block): + return _IdentityBlock( + 3, filters, stage=stage, block=block, data_format=data_format) + + self.conv1 = layers.Conv2D( + 64, (7, 7), + strides=(2, 2), + data_format=data_format, + padding='same', + name='conv1') + bn_axis = 1 if data_format == 'channels_first' else 3 + self.bn_conv1 = layers.BatchNormalization(axis=bn_axis, name='bn_conv1') + self.max_pool = layers.MaxPooling2D((3, 3), + strides=(2, 2), + data_format=data_format) + + self.l2a = conv_block([64, 64, 256], stage=2, block='a', strides=(1, 1)) + self.l2b = id_block([64, 64, 256], stage=2, block='b') + self.l2c = id_block([64, 64, 256], stage=2, block='c') + + self.l3a = conv_block([128, 128, 512], stage=3, block='a') + self.l3b = id_block([128, 128, 512], stage=3, block='b') + self.l3c = id_block([128, 128, 512], stage=3, block='c') + self.l3d = id_block([128, 128, 512], stage=3, block='d') + + self.l4a = conv_block([256, 256, 1024], stage=4, block='a') + self.l4b = id_block([256, 256, 1024], stage=4, block='b') + self.l4c = id_block([256, 256, 1024], stage=4, block='c') + self.l4d = id_block([256, 256, 1024], stage=4, block='d') + self.l4e = id_block([256, 256, 1024], stage=4, block='e') + self.l4f = id_block([256, 256, 1024], stage=4, block='f') + + # Striding layer that can be used on top of block3 to produce feature maps + # with the same resolution as the TF-Slim implementation. + if self.block3_strides: + self.subsampling_layer = layers.MaxPooling2D((1, 1), + strides=(2, 2), + data_format=data_format) + self.l5a = conv_block([512, 512, 2048], + stage=5, + block='a', + strides=(1, 1)) + else: + self.l5a = conv_block([512, 512, 2048], stage=5, block='a') + self.l5b = id_block([512, 512, 2048], stage=5, block='b') + self.l5c = id_block([512, 512, 2048], stage=5, block='c') + + self.avg_pool = layers.AveragePooling2D((7, 7), + strides=(7, 7), + data_format=data_format) + + if self.include_top: + self.flatten = layers.Flatten() + self.fc1000 = layers.Dense(classes, name='fc1000') + else: + reduction_indices = [1, 2] if data_format == 'channels_last' else [2, 3] + reduction_indices = tf.constant(reduction_indices) + if pooling == 'avg': + self.global_pooling = functools.partial( + tf.reduce_mean, axis=reduction_indices, keepdims=False) + elif pooling == 'max': + self.global_pooling = functools.partial( + tf.reduce_max, axis=reduction_indices, keepdims=False) + else: + self.global_pooling = None + + def call(self, inputs, training=True, intermediates_dict=None): + """Call the ResNet50 model. + + Args: + inputs: Images to compute features for. + training: Whether model is in training phase. + intermediates_dict: `None` or dictionary. If not None, accumulate feature + maps from intermediate blocks into the dictionary. "" + + Returns: + Tensor with featuremap. + """ + + x = self.conv1(inputs) + x = self.bn_conv1(x, training=training) + x = tf.nn.relu(x) + if intermediates_dict is not None: + intermediates_dict['block0'] = x + + x = self.max_pool(x) + if intermediates_dict is not None: + intermediates_dict['block0mp'] = x + + # Block 1 (equivalent to "conv2" in Resnet paper). + x = self.l2a(x, training=training) + x = self.l2b(x, training=training) + x = self.l2c(x, training=training) + if intermediates_dict is not None: + intermediates_dict['block1'] = x + + # Block 2 (equivalent to "conv3" in Resnet paper). + x = self.l3a(x, training=training) + x = self.l3b(x, training=training) + x = self.l3c(x, training=training) + x = self.l3d(x, training=training) + if intermediates_dict is not None: + intermediates_dict['block2'] = x + + # Block 3 (equivalent to "conv4" in Resnet paper). + x = self.l4a(x, training=training) + x = self.l4b(x, training=training) + x = self.l4c(x, training=training) + x = self.l4d(x, training=training) + x = self.l4e(x, training=training) + x = self.l4f(x, training=training) + + if self.block3_strides: + x = self.subsampling_layer(x) + if intermediates_dict is not None: + intermediates_dict['block3'] = x + else: + if intermediates_dict is not None: + intermediates_dict['block3'] = x + + x = self.l5a(x, training=training) + x = self.l5b(x, training=training) + x = self.l5c(x, training=training) + + if self.average_pooling: + x = self.avg_pool(x) + if intermediates_dict is not None: + intermediates_dict['block4'] = x + else: + if intermediates_dict is not None: + intermediates_dict['block4'] = x + + if self.include_top: + return self.fc1000(self.flatten(x)) + elif self.global_pooling: + return self.global_pooling(x) + else: + return x diff --git a/models/research/delf/delf/python/training/train.py b/models/research/delf/delf/python/training/train.py new file mode 100644 index 0000000000000000000000000000000000000000..9b0d0a6cdaea696398ae50fcdadbead91899539f --- /dev/null +++ b/models/research/delf/delf/python/training/train.py @@ -0,0 +1,442 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Training script for DELF on Google Landmarks Dataset. + +Script to train DELF using classification loss on Google Landmarks Dataset +using MirroredStrategy to so it can run on multiple GPUs. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf +import tensorflow_probability as tfp + +# Placeholder for internal import. Do not remove this line. +from delf.python.training.datasets import googlelandmarks as gld +from delf.python.training.model import delf_model + +FLAGS = flags.FLAGS + +flags.DEFINE_boolean('debug', False, 'Debug mode.') +flags.DEFINE_string('logdir', '/tmp/delf', 'WithTensorBoard logdir.') +flags.DEFINE_string('train_file_pattern', '/tmp/data/train*', + 'File pattern of training dataset files.') +flags.DEFINE_string('validation_file_pattern', '/tmp/data/validation*', + 'File pattern of validation dataset files.') +flags.DEFINE_enum('dataset_version', 'gld_v1', + ['gld_v1', 'gld_v2', 'gld_v2_clean'], + 'Google Landmarks dataset version, used to determine the' + 'number of classes.') +flags.DEFINE_integer('seed', 0, 'Seed to training dataset.') +flags.DEFINE_float('initial_lr', 0.001, 'Initial learning rate.') +flags.DEFINE_integer('batch_size', 32, 'Global batch size.') +flags.DEFINE_integer('max_iters', 500000, 'Maximum iterations.') +flags.DEFINE_boolean('block3_strides', False, 'Whether to use block3_strides.') +flags.DEFINE_boolean('use_augmentation', True, + 'Whether to use ImageNet style augmentation.') + + +def _record_accuracy(metric, logits, labels): + """Record accuracy given predicted logits and ground-truth labels.""" + softmax_probabilities = tf.keras.layers.Softmax()(logits) + metric.update_state(labels, softmax_probabilities) + + +def _attention_summaries(scores, global_step): + """Record statistics of the attention score.""" + tf.summary.scalar('attention/max', tf.reduce_max(scores), step=global_step) + tf.summary.scalar('attention/min', tf.reduce_min(scores), step=global_step) + tf.summary.scalar('attention/mean', tf.reduce_mean(scores), step=global_step) + tf.summary.scalar( + 'attention/percent_25', + tfp.stats.percentile(scores, 25.0), + step=global_step) + tf.summary.scalar( + 'attention/percent_50', + tfp.stats.percentile(scores, 50.0), + step=global_step) + tf.summary.scalar( + 'attention/percent_75', + tfp.stats.percentile(scores, 75.0), + step=global_step) + + +def create_model(num_classes): + """Define DELF model, and initialize classifiers.""" + model = delf_model.Delf(block3_strides=FLAGS.block3_strides, name='DELF') + model.init_classifiers(num_classes) + return model + + +def _learning_rate_schedule(global_step_value, max_iters, initial_lr): + """Calculates learning_rate with linear decay. + + Args: + global_step_value: int, global step. + max_iters: int, maximum iterations. + initial_lr: float, initial learning rate. + + Returns: + lr: float, learning rate. + """ + lr = initial_lr * (1.0 - global_step_value / max_iters) + return lr + + +def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + #------------------------------------------------------------- + # Log flags used. + logging.info('Running training script with\n') + logging.info('logdir= %s', FLAGS.logdir) + logging.info('initial_lr= %f', FLAGS.initial_lr) + logging.info('block3_strides= %s', str(FLAGS.block3_strides)) + + # ------------------------------------------------------------ + # Create the strategy. + strategy = tf.distribute.MirroredStrategy() + logging.info('Number of devices: %d', strategy.num_replicas_in_sync) + if FLAGS.debug: + print('Number of devices:', strategy.num_replicas_in_sync) + + max_iters = FLAGS.max_iters + global_batch_size = FLAGS.batch_size + image_size = 321 + num_eval = 1000 + report_interval = 100 + eval_interval = 1000 + save_interval = 20000 + + initial_lr = FLAGS.initial_lr + + clip_val = tf.constant(10.0) + + if FLAGS.debug: + global_batch_size = 4 + max_iters = 4 + num_eval = 1 + save_interval = 1 + report_interval = 1 + + # Determine the number of classes based on the version of the dataset. + gld_info = gld.GoogleLandmarksInfo() + num_classes = gld_info.num_classes[FLAGS.dataset_version] + + # ------------------------------------------------------------ + # Create the distributed train/validation sets. + train_dataset = gld.CreateDataset( + file_pattern=FLAGS.train_file_pattern, + batch_size=global_batch_size, + image_size=image_size, + augmentation=FLAGS.use_augmentation, + seed=FLAGS.seed) + validation_dataset = gld.CreateDataset( + file_pattern=FLAGS.validation_file_pattern, + batch_size=global_batch_size, + image_size=image_size, + augmentation=False, + seed=FLAGS.seed) + + train_iterator = strategy.make_dataset_iterator(train_dataset) + validation_iterator = strategy.make_dataset_iterator(validation_dataset) + + train_iterator.initialize() + validation_iterator.initialize() + + # Create a checkpoint directory to store the checkpoints. + checkpoint_prefix = os.path.join(FLAGS.logdir, 'delf_tf2-ckpt') + + # ------------------------------------------------------------ + # Finally, we do everything in distributed scope. + with strategy.scope(): + # Compute loss. + # Set reduction to `none` so we can do the reduction afterwards and divide + # by global batch size. + loss_object = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE) + + def compute_loss(labels, predictions): + per_example_loss = loss_object(labels, predictions) + return tf.nn.compute_average_loss( + per_example_loss, global_batch_size=global_batch_size) + + # Set up metrics. + desc_validation_loss = tf.keras.metrics.Mean(name='desc_validation_loss') + attn_validation_loss = tf.keras.metrics.Mean(name='attn_validation_loss') + desc_train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( + name='desc_train_accuracy') + attn_train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( + name='attn_train_accuracy') + desc_validation_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( + name='desc_validation_accuracy') + attn_validation_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( + name='attn_validation_accuracy') + + # ------------------------------------------------------------ + # Setup DELF model and optimizer. + model = create_model(num_classes) + logging.info('Model, datasets loaded.\nnum_classes= %d', num_classes) + + optimizer = tf.keras.optimizers.SGD(learning_rate=initial_lr, momentum=0.9) + + # Setup summary writer. + summary_writer = tf.summary.create_file_writer( + os.path.join(FLAGS.logdir, 'train_logs'), flush_millis=10000) + + # Setup checkpoint directory. + checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) + manager = tf.train.CheckpointManager( + checkpoint, checkpoint_prefix, max_to_keep=3) + + # ------------------------------------------------------------ + # Train step to run on one GPU. + def train_step(inputs): + """Train one batch.""" + images, labels = inputs + # Temporary workaround to avoid some corrupted labels. + labels = tf.clip_by_value(labels, 0, model.num_classes) + + global_step = optimizer.iterations + tf.summary.scalar( + 'image_range/max', tf.reduce_max(images), step=global_step) + tf.summary.scalar( + 'image_range/min', tf.reduce_min(images), step=global_step) + + def _backprop_loss(tape, loss, weights): + """Backpropogate losses using clipped gradients. + + Args: + tape: gradient tape. + loss: scalar Tensor, loss value. + weights: keras model weights. + """ + gradients = tape.gradient(loss, weights) + clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val) + optimizer.apply_gradients(zip(clipped, weights)) + + # Record gradients and loss through backbone. + with tf.GradientTape() as desc_tape: + + blocks = {} + prelogits = model.backbone( + images, intermediates_dict=blocks, training=True) + + # Report sparsity. + activations_zero_fractions = { + 'sparsity/%s' % k: tf.nn.zero_fraction(v) + for k, v in blocks.items() + } + for k, v in activations_zero_fractions.items(): + tf.summary.scalar(k, v, step=global_step) + + # Apply descriptor classifier. + logits = model.desc_classification(prelogits) + + desc_loss = compute_loss(labels, logits) + + # Backprop only through backbone weights. + _backprop_loss(desc_tape, desc_loss, model.desc_trainable_weights) + + # Record descriptor train accuracy. + _record_accuracy(desc_train_accuracy, logits, labels) + + # Record gradients and loss through attention block. + with tf.GradientTape() as attn_tape: + block3 = blocks['block3'] # pytype: disable=key-error + + # Stopping gradients according to DELG paper: + # (https://arxiv.org/abs/2001.05027). + block3 = tf.stop_gradient(block3) + + prelogits, scores, _ = model.attention(block3, training=True) + _attention_summaries(scores, global_step) + + # Apply attention block classifier. + logits = model.attn_classification(prelogits) + + attn_loss = compute_loss(labels, logits) + + # Backprop only through attention weights. + _backprop_loss(attn_tape, attn_loss, model.attn_trainable_weights) + + # Record attention train accuracy. + _record_accuracy(attn_train_accuracy, logits, labels) + + return desc_loss, attn_loss + + # ------------------------------------------------------------ + def validation_step(inputs): + """Validate one batch.""" + images, labels = inputs + labels = tf.clip_by_value(labels, 0, model.num_classes) + + # Get descriptor predictions. + blocks = {} + prelogits = model.backbone( + images, intermediates_dict=blocks, training=False) + logits = model.desc_classification(prelogits, training=False) + softmax_probabilities = tf.keras.layers.Softmax()(logits) + + validation_loss = loss_object(labels, logits) + desc_validation_loss.update_state(validation_loss) + desc_validation_accuracy.update_state(labels, softmax_probabilities) + + # Get attention predictions. + block3 = blocks['block3'] # pytype: disable=key-error + prelogits, _, _ = model.attention(block3, training=False) + + logits = model.attn_classification(prelogits, training=False) + softmax_probabilities = tf.keras.layers.Softmax()(logits) + + validation_loss = loss_object(labels, logits) + attn_validation_loss.update_state(validation_loss) + attn_validation_accuracy.update_state(labels, softmax_probabilities) + + return desc_validation_accuracy.result(), attn_validation_accuracy.result( + ) + + # `run` replicates the provided computation and runs it + # with the distributed input. + @tf.function + def distributed_train_step(dataset_inputs): + """Get the actual losses.""" + # Each (desc, attn) is a list of 3 losses - crossentropy, reg, total. + desc_per_replica_loss, attn_per_replica_loss = ( + strategy.run(train_step, args=(dataset_inputs,))) + + # Reduce over the replicas. + desc_global_loss = strategy.reduce( + tf.distribute.ReduceOp.SUM, desc_per_replica_loss, axis=None) + attn_global_loss = strategy.reduce( + tf.distribute.ReduceOp.SUM, attn_per_replica_loss, axis=None) + + return desc_global_loss, attn_global_loss + + @tf.function + def distributed_validation_step(dataset_inputs): + return strategy.run(validation_step, args=(dataset_inputs,)) + + # ------------------------------------------------------------ + # *** TRAIN LOOP *** + with summary_writer.as_default(): + with tf.summary.record_if( + tf.math.equal(0, optimizer.iterations % report_interval)): + + global_step_value = optimizer.iterations.numpy() + while global_step_value < max_iters: + + # input_batch : images(b, h, w, c), labels(b,). + try: + input_batch = train_iterator.get_next() + except tf.errors.OutOfRangeError: + # Break if we run out of data in the dataset. + logging.info('Stopping training at global step %d, no more data', + global_step_value) + break + + # Set learning rate for optimizer to use. + global_step = optimizer.iterations + global_step_value = global_step.numpy() + + learning_rate = _learning_rate_schedule(global_step_value, max_iters, + initial_lr) + optimizer.learning_rate = learning_rate + tf.summary.scalar( + 'learning_rate', optimizer.learning_rate, step=global_step) + + # Run the training step over num_gpu gpus. + desc_dist_loss, attn_dist_loss = distributed_train_step(input_batch) + + # Log losses and accuracies to tensorboard. + tf.summary.scalar( + 'loss/desc/crossentropy', desc_dist_loss, step=global_step) + tf.summary.scalar( + 'loss/attn/crossentropy', attn_dist_loss, step=global_step) + tf.summary.scalar( + 'train_accuracy/desc', + desc_train_accuracy.result(), + step=global_step) + tf.summary.scalar( + 'train_accuracy/attn', + attn_train_accuracy.result(), + step=global_step) + + # Print to console if running locally. + if FLAGS.debug: + if global_step_value % report_interval == 0: + print(global_step.numpy()) + print('desc:', desc_dist_loss.numpy()) + print('attn:', attn_dist_loss.numpy()) + + # Validate once in {eval_interval*n, n \in N} steps. + if global_step_value % eval_interval == 0: + for i in range(num_eval): + try: + validation_batch = validation_iterator.get_next() + desc_validation_result, attn_validation_result = ( + distributed_validation_step(validation_batch)) + except tf.errors.OutOfRangeError: + logging.info('Stopping eval at batch %d, no more data', i) + break + + # Log validation results to tensorboard. + tf.summary.scalar( + 'validation/desc', desc_validation_result, step=global_step) + tf.summary.scalar( + 'validation/attn', attn_validation_result, step=global_step) + + logging.info('\nValidation(%f)\n', global_step_value) + logging.info(': desc: %f\n', desc_validation_result.numpy()) + logging.info(': attn: %f\n', attn_validation_result.numpy()) + # Print to console. + if FLAGS.debug: + print('Validation: desc:', desc_validation_result.numpy()) + print(' : attn:', attn_validation_result.numpy()) + + # Save checkpoint once (each save_interval*n, n \in N) steps. + if global_step_value % save_interval == 0: + save_path = manager.save() + logging.info('Saved({global_step_value}) at %s', save_path) + + file_path = '%s/delf_weights' % FLAGS.logdir + model.save_weights(file_path, save_format='tf') + logging.info('Saved weights({global_step_value}) at %s', file_path) + + # Reset metrics for next step. + desc_train_accuracy.reset_states() + attn_train_accuracy.reset_states() + desc_validation_loss.reset_states() + attn_validation_loss.reset_states() + desc_validation_accuracy.reset_states() + attn_validation_accuracy.reset_states() + + if global_step.numpy() > max_iters: + break + + logging.info('Finished training for %d steps.', max_iters) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/delf/delf/python/utils.py b/models/research/delf/delf/python/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dbab2d8c7f1f423991c98851ad509e4684b738b7 --- /dev/null +++ b/models/research/delf/delf/python/utils.py @@ -0,0 +1,41 @@ +# Copyright 2020 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for DELF.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from PIL import Image +from PIL import ImageFile +import tensorflow as tf + +# To avoid PIL crashing for truncated (corrupted) images. +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +def RgbLoader(path): + """Helper function to read image with PIL. + + Args: + path: Path to image to be loaded. + + Returns: + PIL image in RGB format. + """ + with tf.io.gfile.GFile(path, 'rb') as f: + img = Image.open(f) + return img.convert('RGB') + diff --git a/models/research/delf/setup.py b/models/research/delf/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..7aec6f0065a476dbc83b28145916f2981df4bd82 --- /dev/null +++ b/models/research/delf/setup.py @@ -0,0 +1,37 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Setup script for delf.""" + +from setuptools import setup, find_packages + +install_requires = [ + 'absl-py >= 0.7.1', + 'protobuf >= 3.8.0', + 'pandas >= 0.24.2', + 'numpy >= 1.16.1', + 'scipy >= 1.2.2', + 'tensorflow >= 2.0.0b1', + 'tf_slim >= 1.1', + 'tensorflow_probability >= 0.9.0', +] + +setup( + name='delf', + version='2.0', + include_package_data=True, + packages=find_packages(), + install_requires=install_requires, + description='DELF (DEep Local Features)', +) diff --git a/models/research/domain_adaptation/README.md b/models/research/domain_adaptation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e8a2b83794f11ed3711e6bc26254a90cb5469440 --- /dev/null +++ b/models/research/domain_adaptation/README.md @@ -0,0 +1,124 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +## Introduction +This is the code used for two domain adaptation papers. + +The `domain_separation` directory contains code for the "Domain Separation +Networks" paper by Bousmalis K., Trigeorgis G., et al. which was presented at +NIPS 2016. The paper can be found here: https://arxiv.org/abs/1608.06019. + +The `pixel_domain_adaptation` directory contains the code used for the +"Unsupervised Pixel-Level Domain Adaptation with Generative Adversarial +Networks" paper by Bousmalis K., et al. (presented at CVPR 2017). The paper can +be found here: https://arxiv.org/abs/1612.05424. PixelDA aims to perform domain +adaptation by transfering the visual style of the target domain (which has few +or no labels) to a source domain (which has many labels). This is accomplished +using a Generative Adversarial Network (GAN). + +### Other implementations +* [Simplified-DSN](https://github.com/AmirHussein96/Simplified-DSN): + An unofficial implementation of the [Domain Separation Networks paper](https://arxiv.org/abs/1608.06019). + +## Contact +The domain separation code was open-sourced +by [Konstantinos Bousmalis](https://github.com/bousmalis) +(konstantinos@google.com), while the pixel level domain adaptation code was +open-sourced by [David Dohan](https://github.com/dmrd) (ddohan@google.com). + +## Installation +You will need to have the following installed on your machine before trying out the DSN code. + +* TensorFlow 1.x: https://www.tensorflow.org/install/ +* Bazel: https://bazel.build/ + +## Initial setup +In order to run the MNIST to MNIST-M experiments, you will need to set the +data directory: + +``` +$ export DSN_DATA_DIR=/your/dir +``` + +Add models and models/slim to your `$PYTHONPATH` (assumes $PWD is /models): + +``` +$ export PYTHONPATH=$PYTHONPATH:$PWD:$PWD/slim +``` + +## Getting the datasets + +You can fetch the MNIST data by running + +``` + $ bazel run slim:download_and_convert_data -- --dataset_dir $DSN_DATA_DIR --dataset_name=mnist +``` + +The MNIST-M dataset is available online [here](http://bit.ly/2nrlUAJ). Once it is downloaded and extracted into your data directory, create TFRecord files by running: +``` +$ bazel run domain_adaptation/datasets:download_and_convert_mnist_m -- --dataset_dir $DSN_DATA_DIR +``` + +# Running PixelDA from MNIST to MNIST-M +You can run PixelDA as follows (using Tensorboard to examine the results): + +``` +$ bazel run domain_adaptation/pixel_domain_adaptation:pixelda_train -- --dataset_dir $DSN_DATA_DIR --source_dataset mnist --target_dataset mnist_m +``` + +And evaluation as: +``` +$ bazel run domain_adaptation/pixel_domain_adaptation:pixelda_eval -- --dataset_dir $DSN_DATA_DIR --source_dataset mnist --target_dataset mnist_m --target_split_name test +``` + +The MNIST-M results in the paper were run with the following hparams flag: +``` +--hparams arch=resnet,domain_loss_weight=0.135603587834,num_training_examples=16000000,style_transfer_loss_weight=0.0113173311334,task_loss_in_g_weight=0.0100959947002,task_tower=mnist,task_tower_in_g_step=true +``` + +### A note on terminology/language of the code: + +The components of the network can be grouped into two parts +which correspond to elements which are jointly optimized: The generator +component and the discriminator component. + +The generator component takes either an image or noise vector and produces an +output image. + +The discriminator component takes the generated images and the target images +and attempts to discriminate between them. + +## Running DSN code for adapting MNIST to MNIST-M + +Then you need to build the binaries with Bazel: + +``` +$ bazel build -c opt domain_adaptation/domain_separation/... +``` + +You can then train with the following command: + +``` +$ ./bazel-bin/domain_adaptation/domain_separation/dsn_train \ + --similarity_loss=dann_loss \ + --basic_tower=dann_mnist \ + --source_dataset=mnist \ + --target_dataset=mnist_m \ + --learning_rate=0.0117249 \ + --gamma_weight=0.251175 \ + --weight_decay=1e-6 \ + --layers_to_regularize=fc3 \ + --nouse_separation \ + --master="" \ + --dataset_dir=${DSN_DATA_DIR} \ + -v --use_logging +``` + +Evaluation can be invoked with the following command: + +``` +$ ./bazel-bin/domain_adaptation/domain_separation/dsn_eval \ + -v --dataset mnist_m --split test --num_examples=9001 \ + --dataset_dir=${DSN_DATA_DIR} +``` diff --git a/models/research/domain_adaptation/WORKSPACE b/models/research/domain_adaptation/WORKSPACE new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/domain_adaptation/__init__.py b/models/research/domain_adaptation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/domain_adaptation/datasets/BUILD b/models/research/domain_adaptation/datasets/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..067a79374fbcedaa6fcd90293e5365aaad4c18c6 --- /dev/null +++ b/models/research/domain_adaptation/datasets/BUILD @@ -0,0 +1,45 @@ +# Domain Adaptation Scenarios Datasets + +package( + default_visibility = [ + ":internal", + ], +) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = [ + "//domain_adaptation/...", + ], +) + +py_library( + name = "dataset_factory", + srcs = ["dataset_factory.py"], + deps = [ + ":mnist_m", + "//slim:mnist", + ], +) + +py_binary( + name = "download_and_convert_mnist_m", + srcs = ["download_and_convert_mnist_m.py"], + deps = [ + + "//slim:dataset_utils", + ], +) + +py_binary( + name = "mnist_m", + srcs = ["mnist_m.py"], + deps = [ + + "//slim:dataset_utils", + ], +) diff --git a/models/research/domain_adaptation/datasets/__init__.py b/models/research/domain_adaptation/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/domain_adaptation/datasets/dataset_factory.py b/models/research/domain_adaptation/datasets/dataset_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca1b41c412a78d25053fc786c8f81072fe90adb --- /dev/null +++ b/models/research/domain_adaptation/datasets/dataset_factory.py @@ -0,0 +1,107 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A factory-pattern class which returns image/label pairs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports +import tensorflow as tf + +from slim.datasets import mnist +from domain_adaptation.datasets import mnist_m + +slim = tf.contrib.slim + + +def get_dataset(dataset_name, + split_name, + dataset_dir, + file_pattern=None, + reader=None): + """Given a dataset name and a split_name returns a Dataset. + + Args: + dataset_name: String, the name of the dataset. + split_name: A train/test split name. + dataset_dir: The directory where the dataset files are stored. + file_pattern: The file pattern to use for matching the dataset source files. + reader: The subclass of tf.ReaderBase. If left as `None`, then the default + reader defined by each dataset is used. + + Returns: + A tf-slim `Dataset` class. + + Raises: + ValueError: if `dataset_name` isn't recognized. + """ + dataset_name_to_module = {'mnist': mnist, 'mnist_m': mnist_m} + if dataset_name not in dataset_name_to_module: + raise ValueError('Name of dataset unknown %s.' % dataset_name) + + return dataset_name_to_module[dataset_name].get_split(split_name, dataset_dir, + file_pattern, reader) + + +def provide_batch(dataset_name, split_name, dataset_dir, num_readers, + batch_size, num_preprocessing_threads): + """Provides a batch of images and corresponding labels. + + Args: + dataset_name: String, the name of the dataset. + split_name: A train/test split name. + dataset_dir: The directory where the dataset files are stored. + num_readers: The number of readers used by DatasetDataProvider. + batch_size: The size of the batch requested. + num_preprocessing_threads: The number of preprocessing threads for + tf.train.batch. + file_pattern: The file pattern to use for matching the dataset source files. + reader: The subclass of tf.ReaderBase. If left as `None`, then the default + reader defined by each dataset is used. + + Returns: + A batch of + images: tensor of [batch_size, height, width, channels]. + labels: dictionary of labels. + """ + dataset = get_dataset(dataset_name, split_name, dataset_dir) + provider = slim.dataset_data_provider.DatasetDataProvider( + dataset, + num_readers=num_readers, + common_queue_capacity=20 * batch_size, + common_queue_min=10 * batch_size) + [image, label] = provider.get(['image', 'label']) + + # Convert images to float32 + image = tf.image.convert_image_dtype(image, tf.float32) + image -= 0.5 + image *= 2 + + # Load the data. + labels = {} + images, labels['classes'] = tf.train.batch( + [image, label], + batch_size=batch_size, + num_threads=num_preprocessing_threads, + capacity=5 * batch_size) + labels['classes'] = slim.one_hot_encoding(labels['classes'], + dataset.num_classes) + + # Convert mnist to RGB and 32x32 so that it can match mnist_m. + if dataset_name == 'mnist': + images = tf.image.grayscale_to_rgb(images) + images = tf.image.resize_images(images, [32, 32]) + return images, labels diff --git a/models/research/domain_adaptation/datasets/download_and_convert_mnist_m.py b/models/research/domain_adaptation/datasets/download_and_convert_mnist_m.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5004d3d8aaf54656389e517c50f38299714bc7 --- /dev/null +++ b/models/research/domain_adaptation/datasets/download_and_convert_mnist_m.py @@ -0,0 +1,237 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Downloads and converts MNIST-M data to TFRecords of TF-Example protos. + +This module downloads the MNIST-M data, uncompresses it, reads the files +that make up the MNIST-M data and creates two TFRecord datasets: one for train +and one for test. Each TFRecord dataset is comprised of a set of TF-Example +protocol buffers, each of which contain a single image and label. + +The script should take about a minute to run. + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import random +import sys + +# Dependency imports +import numpy as np +from six.moves import urllib +import tensorflow as tf + +from slim.datasets import dataset_utils + +tf.app.flags.DEFINE_string( + 'dataset_dir', None, + 'The directory where the output TFRecords and temporary files are saved.') + +FLAGS = tf.app.flags.FLAGS + +_IMAGE_SIZE = 32 +_NUM_CHANNELS = 3 + +# The number of images in the training set. +_NUM_TRAIN_SAMPLES = 59001 + +# The number of images to be kept from the training set for the validation set. +_NUM_VALIDATION = 1000 + +# The number of images in the test set. +_NUM_TEST_SAMPLES = 9001 + +# Seed for repeatability. +_RANDOM_SEED = 0 + +# The names of the classes. +_CLASS_NAMES = [ + 'zero', + 'one', + 'two', + 'three', + 'four', + 'five', + 'size', + 'seven', + 'eight', + 'nine', +] + + +class ImageReader(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self): + # Initializes function that decodes RGB PNG data. + self._decode_png_data = tf.placeholder(dtype=tf.string) + self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3) + + def read_image_dims(self, sess, image_data): + image = self.decode_png(sess, image_data) + return image.shape[0], image.shape[1] + + def decode_png(self, sess, image_data): + image = sess.run( + self._decode_png, feed_dict={self._decode_png_data: image_data}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _convert_dataset(split_name, filenames, filename_to_class_id, dataset_dir): + """Converts the given filenames to a TFRecord dataset. + + Args: + split_name: The name of the dataset, either 'train' or 'valid'. + filenames: A list of absolute paths to png images. + filename_to_class_id: A dictionary from filenames (strings) to class ids + (integers). + dataset_dir: The directory where the converted datasets are stored. + """ + print('Converting the {} split.'.format(split_name)) + # Train and validation splits are both in the train directory. + if split_name in ['train', 'valid']: + png_directory = os.path.join(dataset_dir, 'mnist_m', 'mnist_m_train') + elif split_name == 'test': + png_directory = os.path.join(dataset_dir, 'mnist_m', 'mnist_m_test') + + with tf.Graph().as_default(): + image_reader = ImageReader() + + with tf.Session('') as sess: + output_filename = _get_output_filename(dataset_dir, split_name) + + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + for filename in filenames: + # Read the filename: + image_data = tf.gfile.FastGFile( + os.path.join(png_directory, filename), 'r').read() + height, width = image_reader.read_image_dims(sess, image_data) + + class_id = filename_to_class_id[filename] + example = dataset_utils.image_to_tfexample(image_data, 'png', height, + width, class_id) + tfrecord_writer.write(example.SerializeToString()) + + sys.stdout.write('\n') + sys.stdout.flush() + + +def _extract_labels(label_filename): + """Extract the labels into a dict of filenames to int labels. + + Args: + labels_filename: The filename of the MNIST-M labels. + + Returns: + A dictionary of filenames to int labels. + """ + print('Extracting labels from: ', label_filename) + label_file = tf.gfile.FastGFile(label_filename, 'r').readlines() + label_lines = [line.rstrip('\n').split() for line in label_file] + labels = {} + for line in label_lines: + assert len(line) == 2 + labels[line[0]] = int(line[1]) + return labels + + +def _get_output_filename(dataset_dir, split_name): + """Creates the output filename. + + Args: + dataset_dir: The directory where the temporary files are stored. + split_name: The name of the train/test split. + + Returns: + An absolute file path. + """ + return '%s/mnist_m_%s.tfrecord' % (dataset_dir, split_name) + + +def _get_filenames(dataset_dir): + """Returns a list of filenames and inferred class names. + + Args: + dataset_dir: A directory containing a set PNG encoded MNIST-M images. + + Returns: + A list of image file paths, relative to `dataset_dir`. + """ + photo_filenames = [] + for filename in os.listdir(dataset_dir): + photo_filenames.append(filename) + return photo_filenames + + +def run(dataset_dir): + """Runs the download and conversion operation. + + Args: + dataset_dir: The dataset directory where the dataset is stored. + """ + if not tf.gfile.Exists(dataset_dir): + tf.gfile.MakeDirs(dataset_dir) + + train_filename = _get_output_filename(dataset_dir, 'train') + testing_filename = _get_output_filename(dataset_dir, 'test') + + if tf.gfile.Exists(train_filename) and tf.gfile.Exists(testing_filename): + print('Dataset files already exist. Exiting without re-creating them.') + return + + # TODO(konstantinos): Add download and cleanup functionality + + train_validation_filenames = _get_filenames( + os.path.join(dataset_dir, 'mnist_m', 'mnist_m_train')) + test_filenames = _get_filenames( + os.path.join(dataset_dir, 'mnist_m', 'mnist_m_test')) + + # Divide into train and validation: + random.seed(_RANDOM_SEED) + random.shuffle(train_validation_filenames) + train_filenames = train_validation_filenames[_NUM_VALIDATION:] + validation_filenames = train_validation_filenames[:_NUM_VALIDATION] + + train_validation_filenames_to_class_ids = _extract_labels( + os.path.join(dataset_dir, 'mnist_m', 'mnist_m_train_labels.txt')) + test_filenames_to_class_ids = _extract_labels( + os.path.join(dataset_dir, 'mnist_m', 'mnist_m_test_labels.txt')) + + # Convert the train, validation, and test sets. + _convert_dataset('train', train_filenames, + train_validation_filenames_to_class_ids, dataset_dir) + _convert_dataset('valid', validation_filenames, + train_validation_filenames_to_class_ids, dataset_dir) + _convert_dataset('test', test_filenames, test_filenames_to_class_ids, + dataset_dir) + + # Finally, write the labels file: + labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES)) + dataset_utils.write_label_file(labels_to_class_names, dataset_dir) + + print('\nFinished converting the MNIST-M dataset!') + + +def main(_): + assert FLAGS.dataset_dir + run(FLAGS.dataset_dir) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/datasets/mnist_m.py b/models/research/domain_adaptation/datasets/mnist_m.py new file mode 100644 index 0000000000000000000000000000000000000000..fab6c443cf3d2e9783d19bf52c81b7aa62d56a38 --- /dev/null +++ b/models/research/domain_adaptation/datasets/mnist_m.py @@ -0,0 +1,98 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides data for the MNIST-M dataset. + +The dataset scripts used to create the dataset can be found at: +tensorflow_models/domain_adaptation_/datasets/download_and_convert_mnist_m_dataset.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +# Dependency imports +import tensorflow as tf + +from slim.datasets import dataset_utils + +slim = tf.contrib.slim + +_FILE_PATTERN = 'mnist_m_%s.tfrecord' + +_SPLITS_TO_SIZES = {'train': 58001, 'valid': 1000, 'test': 9001} + +_NUM_CLASSES = 10 + +_ITEMS_TO_DESCRIPTIONS = { + 'image': 'A [32 x 32 x 1] RGB image.', + 'label': 'A single integer between 0 and 9', +} + + +def get_split(split_name, dataset_dir, file_pattern=None, reader=None): + """Gets a dataset tuple with instructions for reading MNIST. + + Args: + split_name: A train/test split name. + dataset_dir: The base directory of the dataset sources. + + Returns: + A `Dataset` namedtuple. + + Raises: + ValueError: if `split_name` is not a valid train/test split. + """ + if split_name not in _SPLITS_TO_SIZES: + raise ValueError('split name %s was not recognized.' % split_name) + + if not file_pattern: + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(dataset_dir, file_pattern % split_name) + + # Allowing None in the signature so that dataset_factory can use the default. + if reader is None: + reader = tf.TFRecordReader + + keys_to_features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='png'), + 'image/class/label': + tf.FixedLenFeature( + [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)), + } + + items_to_handlers = { + 'image': slim.tfexample_decoder.Image(shape=[32, 32, 3], channels=3), + 'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]), + } + + decoder = slim.tfexample_decoder.TFExampleDecoder( + keys_to_features, items_to_handlers) + + labels_to_names = None + if dataset_utils.has_labels(dataset_dir): + labels_to_names = dataset_utils.read_label_file(dataset_dir) + + return slim.dataset.Dataset( + data_sources=file_pattern, + reader=reader, + decoder=decoder, + num_samples=_SPLITS_TO_SIZES[split_name], + num_classes=_NUM_CLASSES, + items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, + labels_to_names=labels_to_names) diff --git a/models/research/domain_adaptation/domain_separation/BUILD b/models/research/domain_adaptation/domain_separation/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..14dceda27e49d74eaaaeae21676183b78c72b9c2 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/BUILD @@ -0,0 +1,157 @@ +# Domain Separation Networks + +package( + default_visibility = [ + ":internal", + ], +) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = [ + "//domain_adaptation/...", + ], +) + +py_library( + name = "models", + srcs = [ + "models.py", + ], + deps = [ + ":utils", + ], +) + +py_library( + name = "losses", + srcs = [ + "losses.py", + ], + deps = [ + ":grl_op_grads_py", + ":grl_op_shapes_py", + ":grl_ops", + ":utils", + ], +) + +py_test( + name = "losses_test", + srcs = [ + "losses_test.py", + ], + deps = [ + ":losses", + ":utils", + ], +) + +py_library( + name = "dsn", + srcs = [ + "dsn.py", + ], + deps = [ + ":grl_op_grads_py", + ":grl_op_shapes_py", + ":grl_ops", + ":losses", + ":models", + ":utils", + ], +) + +py_test( + name = "dsn_test", + srcs = [ + "dsn_test.py", + ], + deps = [ + ":dsn", + ], +) + +py_binary( + name = "dsn_train", + srcs = [ + "dsn_train.py", + ], + deps = [ + ":dsn", + ":models", + "//domain_adaptation/datasets:dataset_factory", + ], +) + +py_binary( + name = "dsn_eval", + srcs = [ + "dsn_eval.py", + ], + deps = [ + ":dsn", + ":models", + "//domain_adaptation/datasets:dataset_factory", + ], +) + +py_test( + name = "models_test", + srcs = [ + "models_test.py", + ], + deps = [ + ":models", + "//domain_adaptation/datasets:dataset_factory", + ], +) + +py_library( + name = "utils", + srcs = [ + "utils.py", + ], + deps = [ + ], +) + +py_library( + name = "grl_op_grads_py", + srcs = [ + "grl_op_grads.py", + ], + deps = [ + ":grl_ops", + ], +) + +py_library( + name = "grl_op_shapes_py", + srcs = [ + "grl_op_shapes.py", + ], + deps = [ + ], +) + +py_library( + name = "grl_ops", + srcs = ["grl_ops.py"], + data = ["_grl_ops.so"], +) + +py_test( + name = "grl_ops_test", + size = "small", + srcs = ["grl_ops_test.py"], + deps = [ + ":grl_op_grads_py", + ":grl_op_shapes_py", + ":grl_ops", + ], +) diff --git a/models/research/domain_adaptation/domain_separation/__init__.py b/models/research/domain_adaptation/domain_separation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/domain_adaptation/domain_separation/_grl_ops.so b/models/research/domain_adaptation/domain_separation/_grl_ops.so new file mode 100644 index 0000000000000000000000000000000000000000..4c35473760a76dcb743d58f45eddccecb5f5161e Binary files /dev/null and b/models/research/domain_adaptation/domain_separation/_grl_ops.so differ diff --git a/models/research/domain_adaptation/domain_separation/dsn.py b/models/research/domain_adaptation/domain_separation/dsn.py new file mode 100644 index 0000000000000000000000000000000000000000..3018e8a791840ae465bad493913235cc04c31cff --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/dsn.py @@ -0,0 +1,355 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions to create a DSN model and add the different losses to it. + +Specifically, in this file we define the: + - Shared Encoding Similarity Loss Module, with: + - The MMD Similarity method + - The Correlation Similarity method + - The Gradient Reversal (Domain-Adversarial) method + - Difference Loss Module + - Reconstruction Loss Module + - Task Loss Module +""" +from functools import partial + +import tensorflow as tf + +import losses +import models +import utils + +slim = tf.contrib.slim + + +################################################################################ +# HELPER FUNCTIONS +################################################################################ +def dsn_loss_coefficient(params): + """The global_step-dependent weight that specifies when to kick in DSN losses. + + Args: + params: A dictionary of parameters. Expecting 'domain_separation_startpoint' + + Returns: + A weight to that effectively enables or disables the DSN-related losses, + i.e. similarity, difference, and reconstruction losses. + """ + return tf.where( + tf.less(slim.get_or_create_global_step(), + params['domain_separation_startpoint']), 1e-10, 1.0) + + +################################################################################ +# MODEL CREATION +################################################################################ +def create_model(source_images, source_labels, domain_selection_mask, + target_images, target_labels, similarity_loss, params, + basic_tower_name): + """Creates a DSN model. + + Args: + source_images: images from the source domain, a tensor of size + [batch_size, height, width, channels] + source_labels: a dictionary with the name, tensor pairs. 'classes' is one- + hot for the number of classes. + domain_selection_mask: a boolean tensor of size [batch_size, ] which denotes + the labeled images that belong to the source domain. + target_images: images from the target domain, a tensor of size + [batch_size, height width, channels]. + target_labels: a dictionary with the name, tensor pairs. + similarity_loss: The type of method to use for encouraging + the codes from the shared encoder to be similar. + params: A dictionary of parameters. Expecting 'weight_decay', + 'layers_to_regularize', 'use_separation', 'domain_separation_startpoint', + 'alpha_weight', 'beta_weight', 'gamma_weight', 'recon_loss_name', + 'decoder_name', 'encoder_name' + basic_tower_name: the name of the tower to use for the shared encoder. + + Raises: + ValueError: if the arch is not one of the available architectures. + """ + network = getattr(models, basic_tower_name) + num_classes = source_labels['classes'].get_shape().as_list()[1] + + # Make sure we are using the appropriate number of classes. + network = partial(network, num_classes=num_classes) + + # Add the classification/pose estimation loss to the source domain. + source_endpoints = add_task_loss(source_images, source_labels, network, + params) + + if similarity_loss == 'none': + # No domain adaptation, we can stop here. + return + + with tf.variable_scope('towers', reuse=True): + target_logits, target_endpoints = network( + target_images, weight_decay=params['weight_decay'], prefix='target') + + # Plot target accuracy of the train set. + target_accuracy = utils.accuracy( + tf.argmax(target_logits, 1), tf.argmax(target_labels['classes'], 1)) + + if 'quaternions' in target_labels: + target_quaternion_loss = losses.log_quaternion_loss( + target_labels['quaternions'], target_endpoints['quaternion_pred'], + params) + tf.summary.scalar('eval/Target quaternions', target_quaternion_loss) + + tf.summary.scalar('eval/Target accuracy', target_accuracy) + + source_shared = source_endpoints[params['layers_to_regularize']] + target_shared = target_endpoints[params['layers_to_regularize']] + + # When using the semisupervised model we include labeled target data in the + # source classifier. We do not want to include these target domain when + # we use the similarity loss. + indices = tf.range(0, source_shared.get_shape().as_list()[0]) + indices = tf.boolean_mask(indices, domain_selection_mask) + add_similarity_loss(similarity_loss, + tf.gather(source_shared, indices), + tf.gather(target_shared, indices), params) + + if params['use_separation']: + add_autoencoders( + source_images, + source_shared, + target_images, + target_shared, + params=params,) + + +def add_similarity_loss(method_name, + source_samples, + target_samples, + params, + scope=None): + """Adds a loss encouraging the shared encoding from each domain to be similar. + + Args: + method_name: the name of the encoding similarity method to use. Valid + options include `dann_loss', `mmd_loss' or `correlation_loss'. + source_samples: a tensor of shape [num_samples, num_features]. + target_samples: a tensor of shape [num_samples, num_features]. + params: a dictionary of parameters. Expecting 'gamma_weight'. + scope: optional name scope for summary tags. + Raises: + ValueError: if `method_name` is not recognized. + """ + weight = dsn_loss_coefficient(params) * params['gamma_weight'] + method = getattr(losses, method_name) + method(source_samples, target_samples, weight, scope) + + +def add_reconstruction_loss(recon_loss_name, images, recons, weight, domain): + """Adds a reconstruction loss. + + Args: + recon_loss_name: The name of the reconstruction loss. + images: A `Tensor` of size [batch_size, height, width, 3]. + recons: A `Tensor` whose size matches `images`. + weight: A scalar coefficient for the loss. + domain: The name of the domain being reconstructed. + + Raises: + ValueError: If `recon_loss_name` is not recognized. + """ + if recon_loss_name == 'sum_of_pairwise_squares': + loss_fn = tf.contrib.losses.mean_pairwise_squared_error + elif recon_loss_name == 'sum_of_squares': + loss_fn = tf.contrib.losses.mean_squared_error + else: + raise ValueError('recon_loss_name value [%s] not recognized.' % + recon_loss_name) + + loss = loss_fn(recons, images, weight) + assert_op = tf.Assert(tf.is_finite(loss), [loss]) + with tf.control_dependencies([assert_op]): + tf.summary.scalar('losses/%s Recon Loss' % domain, loss) + + +def add_autoencoders(source_data, source_shared, target_data, target_shared, + params): + """Adds the encoders/decoders for our domain separation model w/ incoherence. + + Args: + source_data: images from the source domain, a tensor of size + [batch_size, height, width, channels] + source_shared: a tensor with first dimension batch_size + target_data: images from the target domain, a tensor of size + [batch_size, height, width, channels] + target_shared: a tensor with first dimension batch_size + params: A dictionary of parameters. Expecting 'layers_to_regularize', + 'beta_weight', 'alpha_weight', 'recon_loss_name', 'decoder_name', + 'encoder_name', 'weight_decay' + """ + + def normalize_images(images): + images -= tf.reduce_min(images) + return images / tf.reduce_max(images) + + def concat_operation(shared_repr, private_repr): + return shared_repr + private_repr + + mu = dsn_loss_coefficient(params) + + # The layer to concatenate the networks at. + concat_layer = params['layers_to_regularize'] + + # The coefficient for modulating the private/shared difference loss. + difference_loss_weight = params['beta_weight'] * mu + + # The reconstruction weight. + recon_loss_weight = params['alpha_weight'] * mu + + # The reconstruction loss to use. + recon_loss_name = params['recon_loss_name'] + + # The decoder/encoder to use. + decoder_name = params['decoder_name'] + encoder_name = params['encoder_name'] + + _, height, width, _ = source_data.get_shape().as_list() + code_size = source_shared.get_shape().as_list()[-1] + weight_decay = params['weight_decay'] + + encoder_fn = getattr(models, encoder_name) + # Target Auto-encoding. + with tf.variable_scope('source_encoder'): + source_endpoints = encoder_fn( + source_data, code_size, weight_decay=weight_decay) + + with tf.variable_scope('target_encoder'): + target_endpoints = encoder_fn( + target_data, code_size, weight_decay=weight_decay) + + decoder_fn = getattr(models, decoder_name) + + decoder = partial( + decoder_fn, + height=height, + width=width, + channels=source_data.get_shape().as_list()[-1], + weight_decay=weight_decay) + + # Source Auto-encoding. + source_private = source_endpoints[concat_layer] + target_private = target_endpoints[concat_layer] + with tf.variable_scope('decoder'): + source_recons = decoder(concat_operation(source_shared, source_private)) + + with tf.variable_scope('decoder', reuse=True): + source_private_recons = decoder( + concat_operation(tf.zeros_like(source_private), source_private)) + source_shared_recons = decoder( + concat_operation(source_shared, tf.zeros_like(source_shared))) + + with tf.variable_scope('decoder', reuse=True): + target_recons = decoder(concat_operation(target_shared, target_private)) + target_shared_recons = decoder( + concat_operation(target_shared, tf.zeros_like(target_shared))) + target_private_recons = decoder( + concat_operation(tf.zeros_like(target_private), target_private)) + + losses.difference_loss( + source_private, + source_shared, + weight=difference_loss_weight, + name='Source') + losses.difference_loss( + target_private, + target_shared, + weight=difference_loss_weight, + name='Target') + + add_reconstruction_loss(recon_loss_name, source_data, source_recons, + recon_loss_weight, 'source') + add_reconstruction_loss(recon_loss_name, target_data, target_recons, + recon_loss_weight, 'target') + + # Add summaries + source_reconstructions = tf.concat( + axis=2, + values=map(normalize_images, [ + source_data, source_recons, source_shared_recons, + source_private_recons + ])) + target_reconstructions = tf.concat( + axis=2, + values=map(normalize_images, [ + target_data, target_recons, target_shared_recons, + target_private_recons + ])) + tf.summary.image( + 'Source Images:Recons:RGB', + source_reconstructions[:, :, :, :3], + max_outputs=10) + tf.summary.image( + 'Target Images:Recons:RGB', + target_reconstructions[:, :, :, :3], + max_outputs=10) + + if source_reconstructions.get_shape().as_list()[3] == 4: + tf.summary.image( + 'Source Images:Recons:Depth', + source_reconstructions[:, :, :, 3:4], + max_outputs=10) + tf.summary.image( + 'Target Images:Recons:Depth', + target_reconstructions[:, :, :, 3:4], + max_outputs=10) + + +def add_task_loss(source_images, source_labels, basic_tower, params): + """Adds a classification and/or pose estimation loss to the model. + + Args: + source_images: images from the source domain, a tensor of size + [batch_size, height, width, channels] + source_labels: labels from the source domain, a tensor of size [batch_size]. + or a tuple of (quaternions, class_labels) + basic_tower: a function that creates the single tower of the model. + params: A dictionary of parameters. Expecting 'weight_decay', 'pose_weight'. + Returns: + The source endpoints. + + Raises: + RuntimeError: if basic tower does not support pose estimation. + """ + with tf.variable_scope('towers'): + source_logits, source_endpoints = basic_tower( + source_images, weight_decay=params['weight_decay'], prefix='Source') + + if 'quaternions' in source_labels: # We have pose estimation as well + if 'quaternion_pred' not in source_endpoints: + raise RuntimeError('Please use a model for estimation e.g. pose_mini') + + loss = losses.log_quaternion_loss(source_labels['quaternions'], + source_endpoints['quaternion_pred'], + params) + + assert_op = tf.Assert(tf.is_finite(loss), [loss]) + with tf.control_dependencies([assert_op]): + quaternion_loss = loss + tf.summary.histogram('log_quaternion_loss_hist', quaternion_loss) + slim.losses.add_loss(quaternion_loss * params['pose_weight']) + tf.summary.scalar('losses/quaternion_loss', quaternion_loss) + + classification_loss = tf.losses.softmax_cross_entropy( + source_labels['classes'], source_logits) + + tf.summary.scalar('losses/classification_loss', classification_loss) + return source_endpoints diff --git a/models/research/domain_adaptation/domain_separation/dsn_eval.py b/models/research/domain_adaptation/domain_separation/dsn_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..b6cccdfcc17e8f18e8381530b5c8f41501bda29b --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/dsn_eval.py @@ -0,0 +1,161 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# pylint: disable=line-too-long +"""Evaluation for Domain Separation Networks (DSNs).""" +# pylint: enable=line-too-long +import math + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from domain_adaptation.datasets import dataset_factory +from domain_adaptation.domain_separation import losses +from domain_adaptation.domain_separation import models + +slim = tf.contrib.slim + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_integer('batch_size', 32, + 'The number of images in each batch.') + +tf.app.flags.DEFINE_string('master', '', + 'BNS name of the TensorFlow master to use.') + +tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/', + 'Directory where the model was written to.') + +tf.app.flags.DEFINE_string( + 'eval_dir', '/tmp/da/', + 'Directory where we should write the tf summaries to.') + +tf.app.flags.DEFINE_string('dataset_dir', None, + 'The directory where the dataset files are stored.') + +tf.app.flags.DEFINE_string('dataset', 'mnist_m', + 'Which dataset to test on: "mnist", "mnist_m".') + +tf.app.flags.DEFINE_string('split', 'valid', + 'Which portion to test on: "valid", "test".') + +tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.') + +tf.app.flags.DEFINE_string('basic_tower', 'dann_mnist', + 'The basic tower building block.') + +tf.app.flags.DEFINE_bool('enable_precision_recall', False, + 'If True, precision and recall for each class will ' + 'be added to the metrics.') + +tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.') + + +def quaternion_metric(predictions, labels): + params = {'batch_size': FLAGS.batch_size, 'use_logging': False} + logcost = losses.log_quaternion_loss_batch(predictions, labels, params) + return slim.metrics.streaming_mean(logcost) + + +def angle_diff(true_q, pred_q): + angles = 2 * ( + 180.0 / + np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1))) + return angles + + +def provide_batch_fn(): + """ The provide_batch function to use. """ + return dataset_factory.provide_batch + + +def main(_): + g = tf.Graph() + with g.as_default(): + # Load the data. + images, labels = provide_batch_fn()( + FLAGS.dataset, FLAGS.split, FLAGS.dataset_dir, 4, FLAGS.batch_size, 4) + + num_classes = labels['classes'].get_shape().as_list()[1] + + tf.summary.image('eval_images', images, max_outputs=3) + + # Define the model: + with tf.variable_scope('towers'): + basic_tower = getattr(models, FLAGS.basic_tower) + predictions, endpoints = basic_tower( + images, + num_classes=num_classes, + is_training=False, + batch_norm_params=None) + metric_names_to_values = {} + + # Define the metrics: + if 'quaternions' in labels: # Also have to evaluate pose estimation! + quaternion_loss = quaternion_metric(labels['quaternions'], + endpoints['quaternion_pred']) + + angle_errors, = tf.py_func( + angle_diff, [labels['quaternions'], endpoints['quaternion_pred']], + [tf.float32]) + + metric_names_to_values[ + 'Angular mean error'] = slim.metrics.streaming_mean(angle_errors) + metric_names_to_values['Quaternion Loss'] = quaternion_loss + + accuracy = tf.contrib.metrics.streaming_accuracy( + tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1)) + + predictions = tf.argmax(predictions, 1) + labels = tf.argmax(labels['classes'], 1) + metric_names_to_values['Accuracy'] = accuracy + + if FLAGS.enable_precision_recall: + for i in xrange(num_classes): + index_map = tf.one_hot(i, depth=num_classes) + name = 'PR/Precision_{}'.format(i) + metric_names_to_values[name] = slim.metrics.streaming_precision( + tf.gather(index_map, predictions), tf.gather(index_map, labels)) + name = 'PR/Recall_{}'.format(i) + metric_names_to_values[name] = slim.metrics.streaming_recall( + tf.gather(index_map, predictions), tf.gather(index_map, labels)) + + names_to_values, names_to_updates = slim.metrics.aggregate_metric_map( + metric_names_to_values) + + # Create the summary ops such that they also print out to std output: + summary_ops = [] + for metric_name, metric_value in names_to_values.iteritems(): + op = tf.summary.scalar(metric_name, metric_value) + op = tf.Print(op, [metric_value], metric_name) + summary_ops.append(op) + + # This ensures that we make a single pass over all of the data. + num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size)) + + # Setup the global step. + slim.get_or_create_global_step() + slim.evaluation.evaluation_loop( + FLAGS.master, + checkpoint_dir=FLAGS.checkpoint_dir, + logdir=FLAGS.eval_dir, + num_evals=num_batches, + eval_op=names_to_updates.values(), + summary_op=tf.summary.merge(summary_ops)) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/domain_separation/dsn_test.py b/models/research/domain_adaptation/domain_separation/dsn_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3d687398a9b9356455f739417bc96ddb2ca5ad40 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/dsn_test.py @@ -0,0 +1,157 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DSN model assembly functions.""" + +import numpy as np +import tensorflow as tf + +import dsn + + +class HelperFunctionsTest(tf.test.TestCase): + + def testBasicDomainSeparationStartPoint(self): + with self.test_session() as sess: + # Test for when global_step < domain_separation_startpoint + step = tf.contrib.slim.get_or_create_global_step() + sess.run(tf.global_variables_initializer()) # global_step = 0 + params = {'domain_separation_startpoint': 2} + weight = dsn.dsn_loss_coefficient(params) + weight_np = sess.run(weight) + self.assertAlmostEqual(weight_np, 1e-10) + + step_op = tf.assign_add(step, 1) + step_np = sess.run(step_op) # global_step = 1 + weight = dsn.dsn_loss_coefficient(params) + weight_np = sess.run(weight) + self.assertAlmostEqual(weight_np, 1e-10) + + # Test for when global_step >= domain_separation_startpoint + step_np = sess.run(step_op) # global_step = 2 + tf.logging.info(step_np) + weight = dsn.dsn_loss_coefficient(params) + weight_np = sess.run(weight) + self.assertAlmostEqual(weight_np, 1.0) + + +class DsnModelAssemblyTest(tf.test.TestCase): + + def _testBuildDefaultModel(self): + images = tf.to_float(np.random.rand(32, 28, 28, 1)) + labels = {} + labels['classes'] = tf.one_hot( + tf.to_int32(np.random.randint(0, 9, (32))), 10) + + params = { + 'use_separation': True, + 'layers_to_regularize': 'fc3', + 'weight_decay': 0.0, + 'ps_tasks': 1, + 'domain_separation_startpoint': 1, + 'alpha_weight': 1, + 'beta_weight': 1, + 'gamma_weight': 1, + 'recon_loss_name': 'sum_of_squares', + 'decoder_name': 'small_decoder', + 'encoder_name': 'default_encoder', + } + return images, labels, params + + def testBuildModelDann(self): + images, labels, params = self._testBuildDefaultModel() + + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, + 'dann_loss', params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 6) + + def testBuildModelDannSumOfPairwiseSquares(self): + images, labels, params = self._testBuildDefaultModel() + + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, + 'dann_loss', params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 6) + + def testBuildModelDannMultiPSTasks(self): + images, labels, params = self._testBuildDefaultModel() + params['ps_tasks'] = 10 + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, + 'dann_loss', params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 6) + + def testBuildModelMmd(self): + images, labels, params = self._testBuildDefaultModel() + + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, + 'mmd_loss', params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 6) + + def testBuildModelCorr(self): + images, labels, params = self._testBuildDefaultModel() + + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, + 'correlation_loss', params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 6) + + def testBuildModelNoDomainAdaptation(self): + images, labels, params = self._testBuildDefaultModel() + params['use_separation'] = False + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none', + params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 1) + self.assertEqual(len(tf.contrib.losses.get_regularization_losses()), 0) + + def testBuildModelNoAdaptationWeightDecay(self): + images, labels, params = self._testBuildDefaultModel() + params['use_separation'] = False + params['weight_decay'] = 1e-5 + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none', + params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 1) + self.assertTrue(len(tf.contrib.losses.get_regularization_losses()) >= 1) + + def testBuildModelNoSeparation(self): + images, labels, params = self._testBuildDefaultModel() + params['use_separation'] = False + with self.test_session(): + dsn.create_model(images, labels, + tf.cast(tf.ones([32,]), tf.bool), images, labels, + 'dann_loss', params, 'dann_mnist') + loss_tensors = tf.contrib.losses.get_losses() + self.assertEqual(len(loss_tensors), 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/domain_adaptation/domain_separation/dsn_train.py b/models/research/domain_adaptation/domain_separation/dsn_train.py new file mode 100644 index 0000000000000000000000000000000000000000..5e364ad3037b041125a3523370b3b040478f0d8e --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/dsn_train.py @@ -0,0 +1,278 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Training for Domain Separation Networks (DSNs).""" +from __future__ import division + +import tensorflow as tf + +from domain_adaptation.datasets import dataset_factory +import dsn + +slim = tf.contrib.slim +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_integer('batch_size', 32, + 'The number of images in each batch.') + +tf.app.flags.DEFINE_string('source_dataset', 'pose_synthetic', + 'Source dataset to train on.') + +tf.app.flags.DEFINE_string('target_dataset', 'pose_real', + 'Target dataset to train on.') + +tf.app.flags.DEFINE_string('target_labeled_dataset', 'none', + 'Target dataset to train on.') + +tf.app.flags.DEFINE_string('dataset_dir', None, + 'The directory where the dataset files are stored.') + +tf.app.flags.DEFINE_string('master', '', + 'BNS name of the TensorFlow master to use.') + +tf.app.flags.DEFINE_string('train_log_dir', '/tmp/da/', + 'Directory where to write event logs.') + +tf.app.flags.DEFINE_string( + 'layers_to_regularize', 'fc3', + 'Comma-separated list of layer names to use MMD regularization on.') + +tf.app.flags.DEFINE_float('learning_rate', .01, 'The learning rate') + +tf.app.flags.DEFINE_float('alpha_weight', 1e-6, + 'The coefficient for scaling the reconstruction ' + 'loss.') + +tf.app.flags.DEFINE_float( + 'beta_weight', 1e-6, + 'The coefficient for scaling the private/shared difference loss.') + +tf.app.flags.DEFINE_float( + 'gamma_weight', 1e-6, + 'The coefficient for scaling the shared encoding similarity loss.') + +tf.app.flags.DEFINE_float('pose_weight', 0.125, + 'The coefficient for scaling the pose loss.') + +tf.app.flags.DEFINE_float( + 'weight_decay', 1e-6, + 'The coefficient for the L2 regularization applied for all weights.') + +tf.app.flags.DEFINE_integer( + 'save_summaries_secs', 60, + 'The frequency with which summaries are saved, in seconds.') + +tf.app.flags.DEFINE_integer( + 'save_interval_secs', 60, + 'The frequency with which the model is saved, in seconds.') + +tf.app.flags.DEFINE_integer( + 'max_number_of_steps', None, + 'The maximum number of gradient steps. Use None to train indefinitely.') + +tf.app.flags.DEFINE_integer( + 'domain_separation_startpoint', 1, + 'The global step to add the domain separation losses.') + +tf.app.flags.DEFINE_integer( + 'bipartite_assignment_top_k', 3, + 'The number of top-k matches to use in bipartite matching adaptation.') + +tf.app.flags.DEFINE_float('decay_rate', 0.95, 'Learning rate decay factor.') + +tf.app.flags.DEFINE_integer('decay_steps', 20000, 'Learning rate decay steps.') + +tf.app.flags.DEFINE_float('momentum', 0.9, 'The momentum value.') + +tf.app.flags.DEFINE_bool('use_separation', False, + 'Use our domain separation model.') + +tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.') + +tf.app.flags.DEFINE_integer( + 'ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then the parameters ' + 'are handled locally by the worker.') + +tf.app.flags.DEFINE_integer( + 'num_readers', 4, + 'The number of parallel readers that read data from the dataset.') + +tf.app.flags.DEFINE_integer('num_preprocessing_threads', 4, + 'The number of threads used to create the batches.') + +tf.app.flags.DEFINE_integer( + 'task', 0, + 'The Task ID. This value is used when training with multiple workers to ' + 'identify each worker.') + +tf.app.flags.DEFINE_string('decoder_name', 'small_decoder', + 'The decoder to use.') +tf.app.flags.DEFINE_string('encoder_name', 'default_encoder', + 'The encoder to use.') + +################################################################################ +# Flags that control the architecture and losses +################################################################################ +tf.app.flags.DEFINE_string( + 'similarity_loss', 'grl', + 'The method to use for encouraging the common encoder codes to be ' + 'similar, one of "grl", "mmd", "corr".') + +tf.app.flags.DEFINE_string('recon_loss_name', 'sum_of_pairwise_squares', + 'The name of the reconstruction loss.') + +tf.app.flags.DEFINE_string('basic_tower', 'pose_mini', + 'The basic tower building block.') + +def provide_batch_fn(): + """ The provide_batch function to use. """ + return dataset_factory.provide_batch + +def main(_): + model_params = { + 'use_separation': FLAGS.use_separation, + 'domain_separation_startpoint': FLAGS.domain_separation_startpoint, + 'layers_to_regularize': FLAGS.layers_to_regularize, + 'alpha_weight': FLAGS.alpha_weight, + 'beta_weight': FLAGS.beta_weight, + 'gamma_weight': FLAGS.gamma_weight, + 'pose_weight': FLAGS.pose_weight, + 'recon_loss_name': FLAGS.recon_loss_name, + 'decoder_name': FLAGS.decoder_name, + 'encoder_name': FLAGS.encoder_name, + 'weight_decay': FLAGS.weight_decay, + 'batch_size': FLAGS.batch_size, + 'use_logging': FLAGS.use_logging, + 'ps_tasks': FLAGS.ps_tasks, + 'task': FLAGS.task, + } + g = tf.Graph() + with g.as_default(): + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): + # Load the data. + source_images, source_labels = provide_batch_fn()( + FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, + FLAGS.batch_size, FLAGS.num_preprocessing_threads) + target_images, target_labels = provide_batch_fn()( + FLAGS.target_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, + FLAGS.batch_size, FLAGS.num_preprocessing_threads) + + # In the unsupervised case all the samples in the labeled + # domain are from the source domain. + domain_selection_mask = tf.fill((source_images.get_shape().as_list()[0],), + True) + + # When using the semisupervised model we include labeled target data in + # the source labelled data. + if FLAGS.target_labeled_dataset != 'none': + # 1000 is the maximum number of labelled target samples that exists in + # the datasets. + target_semi_images, target_semi_labels = provide_batch_fn()( + FLAGS.target_labeled_dataset, 'train', FLAGS.batch_size) + + # Calculate the proportion of source domain samples in the semi- + # supervised setting, so that the proportion is set accordingly in the + # batches. + proportion = float(source_labels['num_train_samples']) / ( + source_labels['num_train_samples'] + + target_semi_labels['num_train_samples']) + + rnd_tensor = tf.random_uniform( + (target_semi_images.get_shape().as_list()[0],)) + + domain_selection_mask = rnd_tensor < proportion + source_images = tf.where(domain_selection_mask, source_images, + target_semi_images) + source_class_labels = tf.where(domain_selection_mask, + source_labels['classes'], + target_semi_labels['classes']) + + if 'quaternions' in source_labels: + source_pose_labels = tf.where(domain_selection_mask, + source_labels['quaternions'], + target_semi_labels['quaternions']) + (source_images, source_class_labels, source_pose_labels, + domain_selection_mask) = tf.train.shuffle_batch( + [ + source_images, source_class_labels, source_pose_labels, + domain_selection_mask + ], + FLAGS.batch_size, + 50000, + 5000, + num_threads=1, + enqueue_many=True) + + else: + (source_images, source_class_labels, + domain_selection_mask) = tf.train.shuffle_batch( + [source_images, source_class_labels, domain_selection_mask], + FLAGS.batch_size, + 50000, + 5000, + num_threads=1, + enqueue_many=True) + source_labels = {} + source_labels['classes'] = source_class_labels + if 'quaternions' in source_labels: + source_labels['quaternions'] = source_pose_labels + + slim.get_or_create_global_step() + tf.summary.image('source_images', source_images, max_outputs=3) + tf.summary.image('target_images', target_images, max_outputs=3) + + dsn.create_model( + source_images, + source_labels, + domain_selection_mask, + target_images, + target_labels, + FLAGS.similarity_loss, + model_params, + basic_tower_name=FLAGS.basic_tower) + + # Configure the optimization scheme: + learning_rate = tf.train.exponential_decay( + FLAGS.learning_rate, + slim.get_or_create_global_step(), + FLAGS.decay_steps, + FLAGS.decay_rate, + staircase=True, + name='learning_rate') + + tf.summary.scalar('learning_rate', learning_rate) + tf.summary.scalar('total_loss', tf.losses.get_total_loss()) + + opt = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum) + tf.logging.set_verbosity(tf.logging.INFO) + # Run training. + loss_tensor = slim.learning.create_train_op( + slim.losses.get_total_loss(), + opt, + summarize_gradients=True, + colocate_gradients_with_ops=True) + slim.learning.train( + train_op=loss_tensor, + logdir=FLAGS.train_log_dir, + master=FLAGS.master, + is_chief=FLAGS.task == 0, + number_of_steps=FLAGS.max_number_of_steps, + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/domain_separation/grl_op_grads.py b/models/research/domain_adaptation/domain_separation/grl_op_grads.py new file mode 100644 index 0000000000000000000000000000000000000000..fcd85ba2b5e7912bffe646a73558af8184812ea6 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/grl_op_grads.py @@ -0,0 +1,34 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Gradients for operators defined in grl_ops.py.""" +import tensorflow as tf + + +@tf.RegisterGradient("GradientReversal") +def _GradientReversalGrad(_, grad): + """The gradients for `gradient_reversal`. + + Args: + _: The `gradient_reversal` `Operation` that we are differentiating, + which we can use to find the inputs and outputs of the original op. + grad: Gradient with respect to the output of the `gradient_reversal` op. + + Returns: + Gradient with respect to the input of `gradient_reversal`, which is simply + the negative of the input gradient. + + """ + return tf.negative(grad) diff --git a/models/research/domain_adaptation/domain_separation/grl_op_kernels.cc b/models/research/domain_adaptation/domain_separation/grl_op_kernels.cc new file mode 100644 index 0000000000000000000000000000000000000000..ba30128f11e9e88c702d3a80593d930519f346fe --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/grl_op_kernels.cc @@ -0,0 +1,47 @@ +/* Copyright 2016 The TensorFlow Authors All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file contains the implementations of the ops registered in +// grl_ops.cc. + +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/types.pb.h" + +namespace tensorflow { + +// The gradient reversal op is used in domain adversarial training. It behaves +// as the identity op during forward propagation, and multiplies its input by -1 +// during backward propagation. +class GradientReversalOp : public OpKernel { + public: + explicit GradientReversalOp(OpKernelConstruction* context) + : OpKernel(context) {} + + // Gradient reversal op behaves as the identity op during forward + // propagation. Compute() function copied from the IdentityOp::Compute() + // function here: third_party/tensorflow/core/kernels/identity_op.h. + void Compute(OpKernelContext* context) override { + if (IsRefType(context->input_dtype(0))) { + context->forward_ref_input_to_ref_output(0, 0); + } else { + context->set_output(0, context->input(0)); + } + } +}; + +REGISTER_KERNEL_BUILDER(Name("GradientReversal").Device(DEVICE_CPU), + GradientReversalOp); + +} // namespace tensorflow diff --git a/models/research/domain_adaptation/domain_separation/grl_op_shapes.py b/models/research/domain_adaptation/domain_separation/grl_op_shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..52773c680af265beca9125e48bf68152b8a34e56 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/grl_op_shapes.py @@ -0,0 +1,16 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Shape inference for operators defined in grl_ops.cc.""" diff --git a/models/research/domain_adaptation/domain_separation/grl_ops.cc b/models/research/domain_adaptation/domain_separation/grl_ops.cc new file mode 100644 index 0000000000000000000000000000000000000000..d441c2b484215605db65a043be6cfa0ab90da2c3 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/grl_ops.cc @@ -0,0 +1,36 @@ +/* Copyright 2016 The TensorFlow Authors All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Contains custom ops. + +#include "tensorflow/core/framework/common_shape_fns.h" +#include "tensorflow/core/framework/op.h" + +namespace tensorflow { + +// This custom op is used by adversarial training. +REGISTER_OP("GradientReversal") + .Input("input: float") + .Output("output: float") + .SetShapeFn(shape_inference::UnchangedShape) + .Doc(R"doc( +This op copies the input to the output during forward propagation, and +negates the input during backward propagation. + +input: Tensor. +output: Tensor, copied from input. +)doc"); + +} // namespace tensorflow diff --git a/models/research/domain_adaptation/domain_separation/grl_ops.py b/models/research/domain_adaptation/domain_separation/grl_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..50447247b10caf3e41f3c0fb1c6f943dd3d9de6e --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/grl_ops.py @@ -0,0 +1,28 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""GradientReversal op Python library.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path + +import tensorflow as tf + +tf.logging.info(tf.resource_loader.get_data_files_path()) +_grl_ops_module = tf.load_op_library( + os.path.join(tf.resource_loader.get_data_files_path(), + '_grl_ops.so')) +gradient_reversal = _grl_ops_module.gradient_reversal diff --git a/models/research/domain_adaptation/domain_separation/grl_ops_test.py b/models/research/domain_adaptation/domain_separation/grl_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b431a6c02b60ade92a653d2ee8108c0586c70fbb --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/grl_ops_test.py @@ -0,0 +1,73 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for grl_ops.""" + +#from models.domain_adaptation.domain_separation import grl_op_grads # pylint: disable=unused-import +#from models.domain_adaptation.domain_separation import grl_op_shapes # pylint: disable=unused-import +import tensorflow as tf + +import grl_op_grads +import grl_ops + +FLAGS = tf.app.flags.FLAGS + + +class GRLOpsTest(tf.test.TestCase): + + def testGradientReversalOp(self): + with tf.Graph().as_default(): + with self.test_session(): + # Test that in forward prop, gradient reversal op acts as the + # identity operation. + examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0]) + output = grl_ops.gradient_reversal(examples) + expected_output = examples + self.assertAllEqual(output.eval(), expected_output.eval()) + + # Test that shape inference works as expected. + self.assertAllEqual(output.get_shape(), expected_output.get_shape()) + + # Test that in backward prop, gradient reversal op multiplies + # gradients by -1. + examples = tf.constant([[1.0]]) + w = tf.get_variable(name='w', shape=[1, 1]) + b = tf.get_variable(name='b', shape=[1]) + init_op = tf.global_variables_initializer() + init_op.run() + features = tf.nn.xw_plus_b(examples, w, b) + # Construct two outputs: features layer passes directly to output1, but + # features layer passes through a gradient reversal layer before + # reaching output2. + output1 = features + output2 = grl_ops.gradient_reversal(features) + gold = tf.constant([1.0]) + loss1 = gold - output1 + loss2 = gold - output2 + opt = tf.train.GradientDescentOptimizer(learning_rate=0.01) + grads_and_vars_1 = opt.compute_gradients(loss1, + tf.trainable_variables()) + grads_and_vars_2 = opt.compute_gradients(loss2, + tf.trainable_variables()) + self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2)) + for i in range(len(grads_and_vars_1)): + g1 = grads_and_vars_1[i][0] + g2 = grads_and_vars_2[i][0] + # Verify that gradients of loss1 are the negative of gradients of + # loss2. + self.assertAllEqual(tf.negative(g1).eval(), g2.eval()) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/domain_adaptation/domain_separation/losses.py b/models/research/domain_adaptation/domain_separation/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..0d882340de10e4dd64d44f9357e8bfc5b1dd4712 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/losses.py @@ -0,0 +1,290 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Domain Adaptation Loss Functions. + +The following domain adaptation loss functions are defined: + +- Maximum Mean Discrepancy (MMD). + Relevant paper: + Gretton, Arthur, et al., + "A kernel two-sample test." + The Journal of Machine Learning Research, 2012 + +- Correlation Loss on a batch. +""" +from functools import partial +import tensorflow as tf + +import grl_op_grads # pylint: disable=unused-import +import grl_op_shapes # pylint: disable=unused-import +import grl_ops +import utils +slim = tf.contrib.slim + + +################################################################################ +# SIMILARITY LOSS +################################################################################ +def maximum_mean_discrepancy(x, y, kernel=utils.gaussian_kernel_matrix): + r"""Computes the Maximum Mean Discrepancy (MMD) of two samples: x and y. + + Maximum Mean Discrepancy (MMD) is a distance-measure between the samples of + the distributions of x and y. Here we use the kernel two sample estimate + using the empirical mean of the two distributions. + + MMD^2(P, Q) = || \E{\phi(x)} - \E{\phi(y)} ||^2 + = \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) }, + + where K = <\phi(x), \phi(y)>, + is the desired kernel function, in this case a radial basis kernel. + + Args: + x: a tensor of shape [num_samples, num_features] + y: a tensor of shape [num_samples, num_features] + kernel: a function which computes the kernel in MMD. Defaults to the + GaussianKernelMatrix. + + Returns: + a scalar denoting the squared maximum mean discrepancy loss. + """ + with tf.name_scope('MaximumMeanDiscrepancy'): + # \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) } + cost = tf.reduce_mean(kernel(x, x)) + cost += tf.reduce_mean(kernel(y, y)) + cost -= 2 * tf.reduce_mean(kernel(x, y)) + + # We do not allow the loss to become negative. + cost = tf.where(cost > 0, cost, 0, name='value') + return cost + + +def mmd_loss(source_samples, target_samples, weight, scope=None): + """Adds a similarity loss term, the MMD between two representations. + + This Maximum Mean Discrepancy (MMD) loss is calculated with a number of + different Gaussian kernels. + + Args: + source_samples: a tensor of shape [num_samples, num_features]. + target_samples: a tensor of shape [num_samples, num_features]. + weight: the weight of the MMD loss. + scope: optional name scope for summary tags. + + Returns: + a scalar tensor representing the MMD loss value. + """ + sigmas = [ + 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, + 1e3, 1e4, 1e5, 1e6 + ] + gaussian_kernel = partial( + utils.gaussian_kernel_matrix, sigmas=tf.constant(sigmas)) + + loss_value = maximum_mean_discrepancy( + source_samples, target_samples, kernel=gaussian_kernel) + loss_value = tf.maximum(1e-4, loss_value) * weight + assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value]) + with tf.control_dependencies([assert_op]): + tag = 'MMD Loss' + if scope: + tag = scope + tag + tf.summary.scalar(tag, loss_value) + tf.losses.add_loss(loss_value) + + return loss_value + + +def correlation_loss(source_samples, target_samples, weight, scope=None): + """Adds a similarity loss term, the correlation between two representations. + + Args: + source_samples: a tensor of shape [num_samples, num_features] + target_samples: a tensor of shape [num_samples, num_features] + weight: a scalar weight for the loss. + scope: optional name scope for summary tags. + + Returns: + a scalar tensor representing the correlation loss value. + """ + with tf.name_scope('corr_loss'): + source_samples -= tf.reduce_mean(source_samples, 0) + target_samples -= tf.reduce_mean(target_samples, 0) + + source_samples = tf.nn.l2_normalize(source_samples, 1) + target_samples = tf.nn.l2_normalize(target_samples, 1) + + source_cov = tf.matmul(tf.transpose(source_samples), source_samples) + target_cov = tf.matmul(tf.transpose(target_samples), target_samples) + + corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight + + assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss]) + with tf.control_dependencies([assert_op]): + tag = 'Correlation Loss' + if scope: + tag = scope + tag + tf.summary.scalar(tag, corr_loss) + tf.losses.add_loss(corr_loss) + + return corr_loss + + +def dann_loss(source_samples, target_samples, weight, scope=None): + """Adds the domain adversarial (DANN) loss. + + Args: + source_samples: a tensor of shape [num_samples, num_features]. + target_samples: a tensor of shape [num_samples, num_features]. + weight: the weight of the loss. + scope: optional name scope for summary tags. + + Returns: + a scalar tensor representing the correlation loss value. + """ + with tf.variable_scope('dann'): + batch_size = tf.shape(source_samples)[0] + samples = tf.concat(axis=0, values=[source_samples, target_samples]) + samples = slim.flatten(samples) + + domain_selection_mask = tf.concat( + axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))]) + + # Perform the gradient reversal and be careful with the shape. + grl = grl_ops.gradient_reversal(samples) + grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1])) + + grl = slim.fully_connected(grl, 100, scope='fc1') + logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2') + + domain_predictions = tf.sigmoid(logits) + + domain_loss = tf.losses.log_loss( + domain_selection_mask, domain_predictions, weights=weight) + + domain_accuracy = utils.accuracy( + tf.round(domain_predictions), domain_selection_mask) + + assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss]) + with tf.control_dependencies([assert_op]): + tag_loss = 'losses/domain_loss' + tag_accuracy = 'losses/domain_accuracy' + if scope: + tag_loss = scope + tag_loss + tag_accuracy = scope + tag_accuracy + + tf.summary.scalar(tag_loss, domain_loss) + tf.summary.scalar(tag_accuracy, domain_accuracy) + + return domain_loss + + +################################################################################ +# DIFFERENCE LOSS +################################################################################ +def difference_loss(private_samples, shared_samples, weight=1.0, name=''): + """Adds the difference loss between the private and shared representations. + + Args: + private_samples: a tensor of shape [num_samples, num_features]. + shared_samples: a tensor of shape [num_samples, num_features]. + weight: the weight of the incoherence loss. + name: the name of the tf summary. + """ + private_samples -= tf.reduce_mean(private_samples, 0) + shared_samples -= tf.reduce_mean(shared_samples, 0) + + private_samples = tf.nn.l2_normalize(private_samples, 1) + shared_samples = tf.nn.l2_normalize(shared_samples, 1) + + correlation_matrix = tf.matmul( + private_samples, shared_samples, transpose_a=True) + + cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight + cost = tf.where(cost > 0, cost, 0, name='value') + + tf.summary.scalar('losses/Difference Loss {}'.format(name), + cost) + assert_op = tf.Assert(tf.is_finite(cost), [cost]) + with tf.control_dependencies([assert_op]): + tf.losses.add_loss(cost) + + +################################################################################ +# TASK LOSS +################################################################################ +def log_quaternion_loss_batch(predictions, labels, params): + """A helper function to compute the error between quaternions. + + Args: + predictions: A Tensor of size [batch_size, 4]. + labels: A Tensor of size [batch_size, 4]. + params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. + + Returns: + A Tensor of size [batch_size], denoting the error between the quaternions. + """ + use_logging = params['use_logging'] + assertions = [] + if use_logging: + assertions.append( + tf.Assert( + tf.reduce_all( + tf.less( + tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), + 1e-4)), + ['The l2 norm of each prediction quaternion vector should be 1.'])) + assertions.append( + tf.Assert( + tf.reduce_all( + tf.less( + tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), + ['The l2 norm of each label quaternion vector should be 1.'])) + + with tf.control_dependencies(assertions): + product = tf.multiply(predictions, labels) + internal_dot_products = tf.reduce_sum(product, [1]) + + if use_logging: + internal_dot_products = tf.Print( + internal_dot_products, + [internal_dot_products, tf.shape(internal_dot_products)], + 'internal_dot_products:') + + logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) + return logcost + + +def log_quaternion_loss(predictions, labels, params): + """A helper function to compute the mean error between batches of quaternions. + + The caller is expected to add the loss to the graph. + + Args: + predictions: A Tensor of size [batch_size, 4]. + labels: A Tensor of size [batch_size, 4]. + params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. + + Returns: + A Tensor of size 1, denoting the mean error between batches of quaternions. + """ + use_logging = params['use_logging'] + logcost = log_quaternion_loss_batch(predictions, labels, params) + logcost = tf.reduce_sum(logcost, [0]) + batch_size = params['batch_size'] + logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss') + if use_logging: + logcost = tf.Print( + logcost, [logcost], '[logcost]', name='log_quaternion_loss_print') + return logcost diff --git a/models/research/domain_adaptation/domain_separation/losses_test.py b/models/research/domain_adaptation/domain_separation/losses_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46e50301be56f5977adcb3fb00587f076934b785 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/losses_test.py @@ -0,0 +1,110 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DSN losses.""" +from functools import partial + +import numpy as np +import tensorflow as tf + +import losses +import utils + + +def MaximumMeanDiscrepancySlow(x, y, sigmas): + num_samples = x.get_shape().as_list()[0] + + def AverageGaussianKernel(x, y, sigmas): + result = 0 + for sigma in sigmas: + dist = tf.reduce_sum(tf.square(x - y)) + result += tf.exp((-1.0 / (2.0 * sigma)) * dist) + return result / num_samples**2 + + total = 0 + + for i in range(num_samples): + for j in range(num_samples): + total += AverageGaussianKernel(x[i, :], x[j, :], sigmas) + total += AverageGaussianKernel(y[i, :], y[j, :], sigmas) + total += -2 * AverageGaussianKernel(x[i, :], y[j, :], sigmas) + + return total + + +class LogQuaternionLossTest(tf.test.TestCase): + + def test_log_quaternion_loss_batch(self): + with self.test_session(): + predictions = tf.random_uniform((10, 4), seed=1) + predictions = tf.nn.l2_normalize(predictions, 1) + labels = tf.random_uniform((10, 4), seed=1) + labels = tf.nn.l2_normalize(labels, 1) + params = {'batch_size': 10, 'use_logging': False} + x = losses.log_quaternion_loss_batch(predictions, labels, params) + self.assertTrue(((10,) == tf.shape(x).eval()).all()) + + +class MaximumMeanDiscrepancyTest(tf.test.TestCase): + + def test_mmd_name(self): + with self.test_session(): + x = tf.random_uniform((2, 3), seed=1) + kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([1.])) + loss = losses.maximum_mean_discrepancy(x, x, kernel) + + self.assertEquals(loss.op.name, 'MaximumMeanDiscrepancy/value') + + def test_mmd_is_zero_when_inputs_are_same(self): + with self.test_session(): + x = tf.random_uniform((2, 3), seed=1) + kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([1.])) + self.assertEquals(0, losses.maximum_mean_discrepancy(x, x, kernel).eval()) + + def test_fast_mmd_is_similar_to_slow_mmd(self): + with self.test_session(): + x = tf.constant(np.random.normal(size=(2, 3)), tf.float32) + y = tf.constant(np.random.rand(2, 3), tf.float32) + + cost_old = MaximumMeanDiscrepancySlow(x, y, [1.]).eval() + kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([1.])) + cost_new = losses.maximum_mean_discrepancy(x, y, kernel).eval() + + self.assertAlmostEqual(cost_old, cost_new, delta=1e-5) + + def test_multiple_sigmas(self): + with self.test_session(): + x = tf.constant(np.random.normal(size=(2, 3)), tf.float32) + y = tf.constant(np.random.rand(2, 3), tf.float32) + + sigmas = tf.constant([2., 5., 10, 20, 30]) + kernel = partial(utils.gaussian_kernel_matrix, sigmas=sigmas) + cost_old = MaximumMeanDiscrepancySlow(x, y, [2., 5., 10, 20, 30]).eval() + cost_new = losses.maximum_mean_discrepancy(x, y, kernel=kernel).eval() + + self.assertAlmostEqual(cost_old, cost_new, delta=1e-5) + + def test_mmd_is_zero_when_distributions_are_same(self): + + with self.test_session(): + x = tf.random_uniform((1000, 10), seed=1) + y = tf.random_uniform((1000, 10), seed=3) + + kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([100.])) + loss = losses.maximum_mean_discrepancy(x, y, kernel=kernel).eval() + + self.assertAlmostEqual(0, loss, delta=1e-4) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/domain_adaptation/domain_separation/models.py b/models/research/domain_adaptation/domain_separation/models.py new file mode 100644 index 0000000000000000000000000000000000000000..04ccaf82eb9b31a6ea78871204c7df70eca3fbfd --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/models.py @@ -0,0 +1,443 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains different architectures for the different DSN parts. + +We define here the modules that can be used in the different parts of the DSN +model. +- shared encoder (dsn_cropped_linemod, dann_xxxx) +- private encoder (default_encoder) +- decoder (large_decoder, gtsrb_decoder, small_decoder) +""" +import tensorflow as tf + +#from models.domain_adaptation.domain_separation +import utils + +slim = tf.contrib.slim + + +def default_batch_norm_params(is_training=False): + """Returns default batch normalization parameters for DSNs. + + Args: + is_training: whether or not the model is training. + + Returns: + a dictionary that maps batch norm parameter names (strings) to values. + """ + return { + # Decay for the moving averages. + 'decay': 0.5, + # epsilon to prevent 0s in variance. + 'epsilon': 0.001, + 'is_training': is_training + } + + +################################################################################ +# PRIVATE ENCODERS +################################################################################ +def default_encoder(images, code_size, batch_norm_params=None, + weight_decay=0.0): + """Encodes the given images to codes of the given size. + + Args: + images: a tensor of size [batch_size, height, width, 1]. + code_size: the number of hidden units in the code layer of the classifier. + batch_norm_params: a dictionary that maps batch norm parameter names to + values. + weight_decay: the value for the weight decay coefficient. + + Returns: + end_points: the code of the input. + """ + end_points = {} + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params): + with slim.arg_scope([slim.conv2d], kernel_size=[5, 5], padding='SAME'): + net = slim.conv2d(images, 32, scope='conv1') + net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') + net = slim.conv2d(net, 64, scope='conv2') + net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') + + net = slim.flatten(net) + end_points['flatten'] = net + net = slim.fully_connected(net, code_size, scope='fc1') + end_points['fc3'] = net + return end_points + + +################################################################################ +# DECODERS +################################################################################ +def large_decoder(codes, + height, + width, + channels, + batch_norm_params=None, + weight_decay=0.0): + """Decodes the codes to a fixed output size. + + Args: + codes: a tensor of size [batch_size, code_size]. + height: the height of the output images. + width: the width of the output images. + channels: the number of the output channels. + batch_norm_params: a dictionary that maps batch norm parameter names to + values. + weight_decay: the value for the weight decay coefficient. + + Returns: + recons: the reconstruction tensor of shape [batch_size, height, width, 3]. + """ + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params): + net = slim.fully_connected(codes, 600, scope='fc1') + batch_size = net.get_shape().as_list()[0] + net = tf.reshape(net, [batch_size, 10, 10, 6]) + + net = slim.conv2d(net, 32, [5, 5], scope='conv1_1') + + net = tf.image.resize_nearest_neighbor(net, (16, 16)) + + net = slim.conv2d(net, 32, [5, 5], scope='conv2_1') + + net = tf.image.resize_nearest_neighbor(net, (32, 32)) + + net = slim.conv2d(net, 32, [5, 5], scope='conv3_2') + + output_size = [height, width] + net = tf.image.resize_nearest_neighbor(net, output_size) + + with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]): + net = slim.conv2d(net, channels, activation_fn=None, scope='conv4_1') + + return net + + +def gtsrb_decoder(codes, + height, + width, + channels, + batch_norm_params=None, + weight_decay=0.0): + """Decodes the codes to a fixed output size. This decoder is specific to GTSRB + + Args: + codes: a tensor of size [batch_size, 100]. + height: the height of the output images. + width: the width of the output images. + channels: the number of the output channels. + batch_norm_params: a dictionary that maps batch norm parameter names to + values. + weight_decay: the value for the weight decay coefficient. + + Returns: + recons: the reconstruction tensor of shape [batch_size, height, width, 3]. + + Raises: + ValueError: When the input code size is not 100. + """ + batch_size, code_size = codes.get_shape().as_list() + if code_size != 100: + raise ValueError('The code size used as an input to the GTSRB decoder is ' + 'expected to be 100.') + + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params): + net = codes + net = tf.reshape(net, [batch_size, 10, 10, 1]) + net = slim.conv2d(net, 32, [3, 3], scope='conv1_1') + + # First upsampling 20x20 + net = tf.image.resize_nearest_neighbor(net, [20, 20]) + + net = slim.conv2d(net, 32, [3, 3], scope='conv2_1') + + output_size = [height, width] + # Final upsampling 40 x 40 + net = tf.image.resize_nearest_neighbor(net, output_size) + + with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]): + net = slim.conv2d(net, 16, scope='conv3_1') + net = slim.conv2d(net, channels, activation_fn=None, scope='conv3_2') + + return net + + +def small_decoder(codes, + height, + width, + channels, + batch_norm_params=None, + weight_decay=0.0): + """Decodes the codes to a fixed output size. + + Args: + codes: a tensor of size [batch_size, code_size]. + height: the height of the output images. + width: the width of the output images. + channels: the number of the output channels. + batch_norm_params: a dictionary that maps batch norm parameter names to + values. + weight_decay: the value for the weight decay coefficient. + + Returns: + recons: the reconstruction tensor of shape [batch_size, height, width, 3]. + """ + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params): + net = slim.fully_connected(codes, 300, scope='fc1') + batch_size = net.get_shape().as_list()[0] + net = tf.reshape(net, [batch_size, 10, 10, 3]) + + net = slim.conv2d(net, 16, [3, 3], scope='conv1_1') + net = slim.conv2d(net, 16, [3, 3], scope='conv1_2') + + output_size = [height, width] + net = tf.image.resize_nearest_neighbor(net, output_size) + + with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]): + net = slim.conv2d(net, 16, scope='conv2_1') + net = slim.conv2d(net, channels, activation_fn=None, scope='conv2_2') + + return net + + +################################################################################ +# SHARED ENCODERS +################################################################################ +def dann_mnist(images, + weight_decay=0.0, + prefix='model', + num_classes=10, + **kwargs): + """Creates a convolution MNIST model. + + Note that this model implements the architecture for MNIST proposed in: + Y. Ganin et al., Domain-Adversarial Training of Neural Networks (DANN), + JMLR 2015 + + Args: + images: the MNIST digits, a tensor of size [batch_size, 28, 28, 1]. + weight_decay: the value for the weight decay coefficient. + prefix: name of the model to use when prefixing tags. + num_classes: the number of output classes to use. + **kwargs: Placeholder for keyword arguments used by other shared encoders. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + end_points = {} + + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu,): + with slim.arg_scope([slim.conv2d], padding='SAME'): + end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') + end_points['pool1'] = slim.max_pool2d( + end_points['conv1'], [2, 2], 2, scope='pool1') + end_points['conv2'] = slim.conv2d( + end_points['pool1'], 48, [5, 5], scope='conv2') + end_points['pool2'] = slim.max_pool2d( + end_points['conv2'], [2, 2], 2, scope='pool2') + end_points['fc3'] = slim.fully_connected( + slim.flatten(end_points['pool2']), 100, scope='fc3') + end_points['fc4'] = slim.fully_connected( + slim.flatten(end_points['fc3']), 100, scope='fc4') + + logits = slim.fully_connected( + end_points['fc4'], num_classes, activation_fn=None, scope='fc5') + + return logits, end_points + + +def dann_svhn(images, + weight_decay=0.0, + prefix='model', + num_classes=10, + **kwargs): + """Creates the convolutional SVHN model. + + Note that this model implements the architecture for MNIST proposed in: + Y. Ganin et al., Domain-Adversarial Training of Neural Networks (DANN), + JMLR 2015 + + Args: + images: the SVHN digits, a tensor of size [batch_size, 32, 32, 3]. + weight_decay: the value for the weight decay coefficient. + prefix: name of the model to use when prefixing tags. + num_classes: the number of output classes to use. + **kwargs: Placeholder for keyword arguments used by other shared encoders. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + + end_points = {} + + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu,): + with slim.arg_scope([slim.conv2d], padding='SAME'): + + end_points['conv1'] = slim.conv2d(images, 64, [5, 5], scope='conv1') + end_points['pool1'] = slim.max_pool2d( + end_points['conv1'], [3, 3], 2, scope='pool1') + end_points['conv2'] = slim.conv2d( + end_points['pool1'], 64, [5, 5], scope='conv2') + end_points['pool2'] = slim.max_pool2d( + end_points['conv2'], [3, 3], 2, scope='pool2') + end_points['conv3'] = slim.conv2d( + end_points['pool2'], 128, [5, 5], scope='conv3') + + end_points['fc3'] = slim.fully_connected( + slim.flatten(end_points['conv3']), 3072, scope='fc3') + end_points['fc4'] = slim.fully_connected( + slim.flatten(end_points['fc3']), 2048, scope='fc4') + + logits = slim.fully_connected( + end_points['fc4'], num_classes, activation_fn=None, scope='fc5') + + return logits, end_points + + +def dann_gtsrb(images, + weight_decay=0.0, + prefix='model', + num_classes=43, + **kwargs): + """Creates the convolutional GTSRB model. + + Note that this model implements the architecture for MNIST proposed in: + Y. Ganin et al., Domain-Adversarial Training of Neural Networks (DANN), + JMLR 2015 + + Args: + images: the GTSRB images, a tensor of size [batch_size, 40, 40, 3]. + weight_decay: the value for the weight decay coefficient. + prefix: name of the model to use when prefixing tags. + num_classes: the number of output classes to use. + **kwargs: Placeholder for keyword arguments used by other shared encoders. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + + end_points = {} + + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu,): + with slim.arg_scope([slim.conv2d], padding='SAME'): + + end_points['conv1'] = slim.conv2d(images, 96, [5, 5], scope='conv1') + end_points['pool1'] = slim.max_pool2d( + end_points['conv1'], [2, 2], 2, scope='pool1') + end_points['conv2'] = slim.conv2d( + end_points['pool1'], 144, [3, 3], scope='conv2') + end_points['pool2'] = slim.max_pool2d( + end_points['conv2'], [2, 2], 2, scope='pool2') + end_points['conv3'] = slim.conv2d( + end_points['pool2'], 256, [5, 5], scope='conv3') + end_points['pool3'] = slim.max_pool2d( + end_points['conv3'], [2, 2], 2, scope='pool3') + + end_points['fc3'] = slim.fully_connected( + slim.flatten(end_points['pool3']), 512, scope='fc3') + + logits = slim.fully_connected( + end_points['fc3'], num_classes, activation_fn=None, scope='fc4') + + return logits, end_points + + +def dsn_cropped_linemod(images, + weight_decay=0.0, + prefix='model', + num_classes=11, + batch_norm_params=None, + is_training=False): + """Creates the convolutional pose estimation model for Cropped Linemod. + + Args: + images: the Cropped Linemod samples, a tensor of size + [batch_size, 64, 64, 4]. + weight_decay: the value for the weight decay coefficient. + prefix: name of the model to use when prefixing tags. + num_classes: the number of output classes to use. + batch_norm_params: a dictionary that maps batch norm parameter names to + values. + is_training: specifies whether or not we're currently training the model. + This variable will determine the behaviour of the dropout layer. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + + end_points = {} + + tf.summary.image('{}/input_images'.format(prefix), images) + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=slim.l2_regularizer(weight_decay), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm if batch_norm_params else None, + normalizer_params=batch_norm_params): + with slim.arg_scope([slim.conv2d], padding='SAME'): + end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') + end_points['pool1'] = slim.max_pool2d( + end_points['conv1'], [2, 2], 2, scope='pool1') + end_points['conv2'] = slim.conv2d( + end_points['pool1'], 64, [5, 5], scope='conv2') + end_points['pool2'] = slim.max_pool2d( + end_points['conv2'], [2, 2], 2, scope='pool2') + net = slim.flatten(end_points['pool2']) + end_points['fc3'] = slim.fully_connected(net, 128, scope='fc3') + net = slim.dropout( + end_points['fc3'], 0.5, is_training=is_training, scope='dropout') + + with tf.variable_scope('quaternion_prediction'): + predicted_quaternion = slim.fully_connected( + net, 4, activation_fn=tf.nn.tanh) + predicted_quaternion = tf.nn.l2_normalize(predicted_quaternion, 1) + logits = slim.fully_connected( + net, num_classes, activation_fn=None, scope='fc4') + end_points['quaternion_pred'] = predicted_quaternion + + return logits, end_points diff --git a/models/research/domain_adaptation/domain_separation/models_test.py b/models/research/domain_adaptation/domain_separation/models_test.py new file mode 100644 index 0000000000000000000000000000000000000000..69d1a27259022569cc5865e49dd6bba5675d834f --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/models_test.py @@ -0,0 +1,167 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for DSN components.""" + +import numpy as np +import tensorflow as tf + +#from models.domain_adaptation.domain_separation +import models + + +class SharedEncodersTest(tf.test.TestCase): + + def _testSharedEncoder(self, + input_shape=[5, 28, 28, 1], + model=models.dann_mnist, + is_training=True): + images = tf.to_float(np.random.rand(*input_shape)) + + with self.test_session() as sess: + logits, _ = model(images) + sess.run(tf.global_variables_initializer()) + logits_np = sess.run(logits) + return logits_np + + def testBuildGRLMnistModel(self): + logits = self._testSharedEncoder(model=getattr(models, + 'dann_mnist')) + self.assertEqual(logits.shape, (5, 10)) + self.assertTrue(np.any(logits)) + + def testBuildGRLSvhnModel(self): + logits = self._testSharedEncoder(model=getattr(models, + 'dann_svhn')) + self.assertEqual(logits.shape, (5, 10)) + self.assertTrue(np.any(logits)) + + def testBuildGRLGtsrbModel(self): + logits = self._testSharedEncoder([5, 40, 40, 3], + getattr(models, 'dann_gtsrb')) + self.assertEqual(logits.shape, (5, 43)) + self.assertTrue(np.any(logits)) + + def testBuildPoseModel(self): + logits = self._testSharedEncoder([5, 64, 64, 4], + getattr(models, 'dsn_cropped_linemod')) + self.assertEqual(logits.shape, (5, 11)) + self.assertTrue(np.any(logits)) + + def testBuildPoseModelWithBatchNorm(self): + images = tf.to_float(np.random.rand(10, 64, 64, 4)) + + with self.test_session() as sess: + logits, _ = getattr(models, 'dsn_cropped_linemod')( + images, batch_norm_params=models.default_batch_norm_params(True)) + sess.run(tf.global_variables_initializer()) + logits_np = sess.run(logits) + self.assertEqual(logits_np.shape, (10, 11)) + self.assertTrue(np.any(logits_np)) + + +class EncoderTest(tf.test.TestCase): + + def _testEncoder(self, batch_norm_params=None, channels=1): + images = tf.to_float(np.random.rand(10, 28, 28, channels)) + + with self.test_session() as sess: + end_points = models.default_encoder( + images, 128, batch_norm_params=batch_norm_params) + sess.run(tf.global_variables_initializer()) + private_code = sess.run(end_points['fc3']) + self.assertEqual(private_code.shape, (10, 128)) + self.assertTrue(np.any(private_code)) + self.assertTrue(np.all(np.isfinite(private_code))) + + def testEncoder(self): + self._testEncoder() + + def testEncoderMultiChannel(self): + self._testEncoder(None, 4) + + def testEncoderIsTrainingBatchNorm(self): + self._testEncoder(models.default_batch_norm_params(True)) + + def testEncoderBatchNorm(self): + self._testEncoder(models.default_batch_norm_params(False)) + + +class DecoderTest(tf.test.TestCase): + + def _testDecoder(self, + height=64, + width=64, + channels=4, + batch_norm_params=None, + decoder=models.small_decoder): + codes = tf.to_float(np.random.rand(32, 100)) + + with self.test_session() as sess: + output = decoder( + codes, + height=height, + width=width, + channels=channels, + batch_norm_params=batch_norm_params) + sess.run(tf.global_variables_initializer()) + output_np = sess.run(output) + self.assertEqual(output_np.shape, (32, height, width, channels)) + self.assertTrue(np.any(output_np)) + self.assertTrue(np.all(np.isfinite(output_np))) + + def testSmallDecoder(self): + self._testDecoder(28, 28, 4, None, getattr(models, 'small_decoder')) + + def testSmallDecoderThreeChannels(self): + self._testDecoder(28, 28, 3) + + def testSmallDecoderBatchNorm(self): + self._testDecoder(28, 28, 4, models.default_batch_norm_params(False)) + + def testSmallDecoderIsTrainingBatchNorm(self): + self._testDecoder(28, 28, 4, models.default_batch_norm_params(True)) + + def testLargeDecoder(self): + self._testDecoder(32, 32, 4, None, getattr(models, 'large_decoder')) + + def testLargeDecoderThreeChannels(self): + self._testDecoder(32, 32, 3, None, getattr(models, 'large_decoder')) + + def testLargeDecoderBatchNorm(self): + self._testDecoder(32, 32, 4, + models.default_batch_norm_params(False), + getattr(models, 'large_decoder')) + + def testLargeDecoderIsTrainingBatchNorm(self): + self._testDecoder(32, 32, 4, + models.default_batch_norm_params(True), + getattr(models, 'large_decoder')) + + def testGtsrbDecoder(self): + self._testDecoder(40, 40, 3, None, getattr(models, 'large_decoder')) + + def testGtsrbDecoderBatchNorm(self): + self._testDecoder(40, 40, 4, + models.default_batch_norm_params(False), + getattr(models, 'gtsrb_decoder')) + + def testGtsrbDecoderIsTrainingBatchNorm(self): + self._testDecoder(40, 40, 4, + models.default_batch_norm_params(True), + getattr(models, 'gtsrb_decoder')) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/domain_adaptation/domain_separation/utils.py b/models/research/domain_adaptation/domain_separation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e144ee86120bd58eb06b710fb35f3f58b5a05343 --- /dev/null +++ b/models/research/domain_adaptation/domain_separation/utils.py @@ -0,0 +1,183 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Auxiliary functions for domain adaptation related losses. +""" +import math +import tensorflow as tf + + +def create_summaries(end_points, prefix='', max_images=3, use_op_name=False): + """Creates a tf summary per endpoint. + + If the endpoint is a 4 dimensional tensor it displays it as an image + otherwise if it is a two dimensional one it creates a histogram summary. + + Args: + end_points: a dictionary of name, tf tensor pairs. + prefix: an optional string to prefix the summary with. + max_images: the maximum number of images to display per summary. + use_op_name: Use the op name as opposed to the shorter end_points key. + """ + for layer_name in end_points: + if use_op_name: + name = end_points[layer_name].op.name + else: + name = layer_name + if len(end_points[layer_name].get_shape().as_list()) == 4: + # if it's an actual image do not attempt to reshape it + if end_points[layer_name].get_shape().as_list()[-1] == 1 or end_points[ + layer_name].get_shape().as_list()[-1] == 3: + visualization_image = end_points[layer_name] + else: + visualization_image = reshape_feature_maps(end_points[layer_name]) + tf.summary.image( + '{}/{}'.format(prefix, name), + visualization_image, + max_outputs=max_images) + elif len(end_points[layer_name].get_shape().as_list()) == 3: + images = tf.expand_dims(end_points[layer_name], 3) + tf.summary.image( + '{}/{}'.format(prefix, name), + images, + max_outputs=max_images) + elif len(end_points[layer_name].get_shape().as_list()) == 2: + tf.summary.histogram('{}/{}'.format(prefix, name), end_points[layer_name]) + + +def reshape_feature_maps(features_tensor): + """Reshape activations for tf.summary.image visualization. + + Arguments: + features_tensor: a tensor of activations with a square number of feature + maps, eg 4, 9, 16, etc. + Returns: + A composite image with all the feature maps that can be passed as an + argument to tf.summary.image. + """ + assert len(features_tensor.get_shape().as_list()) == 4 + num_filters = features_tensor.get_shape().as_list()[-1] + assert num_filters > 0 + num_filters_sqrt = math.sqrt(num_filters) + assert num_filters_sqrt.is_integer( + ), 'Number of filters should be a square number but got {}'.format( + num_filters) + num_filters_sqrt = int(num_filters_sqrt) + conv_summary = tf.unstack(features_tensor, axis=3) + conv_one_row = tf.concat(axis=2, values=conv_summary[0:num_filters_sqrt]) + ind = 1 + conv_final = conv_one_row + for ind in range(1, num_filters_sqrt): + conv_one_row = tf.concat(axis=2, + values=conv_summary[ + ind * num_filters_sqrt + 0:ind * num_filters_sqrt + num_filters_sqrt]) + conv_final = tf.concat( + axis=1, values=[tf.squeeze(conv_final), tf.squeeze(conv_one_row)]) + conv_final = tf.expand_dims(conv_final, -1) + return conv_final + + +def accuracy(predictions, labels): + """Calculates the classificaton accuracy. + + Args: + predictions: the predicted values, a tensor whose size matches 'labels'. + labels: the ground truth values, a tensor of any size. + + Returns: + a tensor whose value on evaluation returns the total accuracy. + """ + return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32)) + + +def compute_upsample_values(input_tensor, upsample_height, upsample_width): + """Compute values for an upsampling op (ops.BatchCropAndResize). + + Args: + input_tensor: image tensor with shape [batch, height, width, in_channels] + upsample_height: integer + upsample_width: integer + + Returns: + grid_centers: tensor with shape [batch, 1] + crop_sizes: tensor with shape [batch, 1] + output_height: integer + output_width: integer + """ + batch, input_height, input_width, _ = input_tensor.shape + + height_half = input_height / 2. + width_half = input_width / 2. + grid_centers = tf.constant(batch * [[height_half, width_half]]) + crop_sizes = tf.constant(batch * [[input_height, input_width]]) + output_height = input_height * upsample_height + output_width = input_width * upsample_width + + return grid_centers, tf.to_float(crop_sizes), output_height, output_width + + +def compute_pairwise_distances(x, y): + """Computes the squared pairwise Euclidean distances between x and y. + + Args: + x: a tensor of shape [num_x_samples, num_features] + y: a tensor of shape [num_y_samples, num_features] + + Returns: + a distance matrix of dimensions [num_x_samples, num_y_samples]. + + Raises: + ValueError: if the inputs do no matched the specified dimensions. + """ + + if not len(x.get_shape()) == len(y.get_shape()) == 2: + raise ValueError('Both inputs should be matrices.') + + if x.get_shape().as_list()[1] != y.get_shape().as_list()[1]: + raise ValueError('The number of features should be the same.') + + norm = lambda x: tf.reduce_sum(tf.square(x), 1) + + # By making the `inner' dimensions of the two matrices equal to 1 using + # broadcasting then we are essentially substracting every pair of rows + # of x and y. + # x will be num_samples x num_features x 1, + # and y will be 1 x num_features x num_samples (after broadcasting). + # After the substraction we will get a + # num_x_samples x num_features x num_y_samples matrix. + # The resulting dist will be of shape num_y_samples x num_x_samples. + # and thus we need to transpose it again. + return tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y))) + + +def gaussian_kernel_matrix(x, y, sigmas): + r"""Computes a Guassian Radial Basis Kernel between the samples of x and y. + + We create a sum of multiple gaussian kernels each having a width sigma_i. + + Args: + x: a tensor of shape [num_samples, num_features] + y: a tensor of shape [num_samples, num_features] + sigmas: a tensor of floats which denote the widths of each of the + gaussians in the kernel. + Returns: + A tensor of shape [num_samples{x}, num_samples{y}] with the RBF kernel. + """ + beta = 1. / (2. * (tf.expand_dims(sigmas, 1))) + + dist = compute_pairwise_distances(x, y) + + s = tf.matmul(beta, tf.reshape(dist, (1, -1))) + + return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/BUILD b/models/research/domain_adaptation/pixel_domain_adaptation/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..2bc8d4a49a828f97b8f45166aa2bbc552d4a3b92 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/BUILD @@ -0,0 +1,90 @@ +# Description: +# Contains code for domain-adaptation style transfer. + +package( + default_visibility = [ + ":internal", + ], +) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = [ + "//domain_adaptation/...", + ], +) + +py_library( + name = "pixelda_preprocess", + srcs = ["pixelda_preprocess.py"], + deps = [ + + ], +) + +py_test( + name = "pixelda_preprocess_test", + srcs = ["pixelda_preprocess_test.py"], + deps = [ + ":pixelda_preprocess", + + ], +) + +py_library( + name = "pixelda_model", + srcs = [ + "pixelda_model.py", + "pixelda_task_towers.py", + "hparams.py", + ], + deps = [ + + ], +) + +py_library( + name = "pixelda_utils", + srcs = ["pixelda_utils.py"], + deps = [ + + ], +) + +py_library( + name = "pixelda_losses", + srcs = ["pixelda_losses.py"], + deps = [ + + ], +) + +py_binary( + name = "pixelda_train", + srcs = ["pixelda_train.py"], + deps = [ + ":pixelda_losses", + ":pixelda_model", + ":pixelda_preprocess", + ":pixelda_utils", + + "//domain_adaptation/datasets:dataset_factory", + ], +) + +py_binary( + name = "pixelda_eval", + srcs = ["pixelda_eval.py"], + deps = [ + ":pixelda_losses", + ":pixelda_model", + ":pixelda_preprocess", + ":pixelda_utils", + + "//domain_adaptation/datasets:dataset_factory", + ], +) diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/README.md b/models/research/domain_adaptation/pixel_domain_adaptation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..c41a4ffeee80114145c4c3fc32a2191879b1b08a --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD @@ -0,0 +1,23 @@ +licenses(["notice"]) # Apache 2.0 + +py_binary( + name = "baseline_train", + srcs = ["baseline_train.py"], + deps = [ + + "//domain_adaptation/datasets:dataset_factory", + "//domain_adaptation/pixel_domain_adaptation:pixelda_model", + "//domain_adaptation/pixel_domain_adaptation:pixelda_preprocess", + ], +) + +py_binary( + name = "baseline_eval", + srcs = ["baseline_eval.py"], + deps = [ + + "//domain_adaptation/datasets:dataset_factory", + "//domain_adaptation/pixel_domain_adaptation:pixelda_model", + "//domain_adaptation/pixel_domain_adaptation:pixelda_preprocess", + ], +) diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/baselines/README.md b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d61195ad2de6867801143aeda906cb5efe30a5e3 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/README.md @@ -0,0 +1,60 @@ +The best baselines are obtainable via the following configuration: + + +## MNIST => MNIST_M + +Accuracy: +MNIST-Train: 99.9 +MNIST_M-Train: 63.9 +MNIST_M-Valid: 63.9 +MNIST_M-Test: 63.6 + +Learning Rate = 0.0001 +Weight Decay = 0.0 +Number of Steps: 105,000 + +## MNIST => USPS + +Accuracy: +MNIST-Train: 100.0 +USPS-Train: 82.8 +USPS-Valid: 82.8 +USPS-Test: 78.9 + +Learning Rate = 0.0001 +Weight Decay = 0.0 +Number of Steps: 22,000 + +## MNIST_M => MNIST + +Accuracy: +MNIST_M-Train: 100 +MNIST-Train: 98.5 +MNIST-Valid: 98.5 +MNIST-Test: 98.1 + +Learning Rate = 0.001 +Weight Decay = 0.0 +Number of Steps: 604,400 + +## MNIST_M => MNIST_M + +Accuracy: +MNIST_M-Train: 100.0 +MNIST_M-Valid: 96.6 +MNIST_M-Test: 96.4 + +Learning Rate = 0.001 +Weight Decay = 0.0 +Number of Steps: 139,400 + +## USPS => USPS + +Accuracy: +USPS-Train: 100.0 +USPS-Valid: 100.0 +USPS-Test: 96.5 + +Learning Rate = 0.001 +Weight Decay = 0.0 +Number of Steps: 67,000 diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7ef6452b4897b00dc8c977bf40526ad5052ede --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py @@ -0,0 +1,141 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Evals the classification/pose baselines.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from functools import partial + +import math + +# Dependency imports + +import tensorflow as tf + +from domain_adaptation.datasets import dataset_factory +from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess +from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers + +flags = tf.app.flags +FLAGS = flags.FLAGS + +slim = tf.contrib.slim + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +flags.DEFINE_string( + 'checkpoint_dir', None, 'The location of the checkpoint files.') + +flags.DEFINE_string( + 'eval_dir', None, 'The directory where evaluation logs are written.') + +flags.DEFINE_integer('batch_size', 32, 'The number of samples per batch.') + +flags.DEFINE_string('dataset_name', None, 'The name of the dataset.') + +flags.DEFINE_string('dataset_dir', None, + 'The directory where the data is stored.') + +flags.DEFINE_string('split_name', None, 'The name of the train/test split.') + +flags.DEFINE_integer('eval_interval_secs', 60 * 5, + 'How often (in seconds) to run evaluation.') + +flags.DEFINE_integer( + 'num_readers', 4, + 'The number of parallel readers that read data from the dataset.') + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + hparams = tf.contrib.training.HParams() + hparams.weight_decay_task_classifier = 0.0 + + if FLAGS.dataset_name in ['mnist', 'mnist_m', 'usps']: + hparams.task_tower = 'mnist' + else: + raise ValueError('Unknown dataset %s' % FLAGS.dataset_name) + + if not tf.gfile.Exists(FLAGS.eval_dir): + tf.gfile.MakeDirs(FLAGS.eval_dir) + + with tf.Graph().as_default(): + dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.split_name, + FLAGS.dataset_dir) + num_classes = dataset.num_classes + num_samples = dataset.num_samples + + preprocess_fn = partial(pixelda_preprocess.preprocess_classification, + is_training=False) + + images, labels = dataset_factory.provide_batch( + FLAGS.dataset_name, + FLAGS.split_name, + dataset_dir=FLAGS.dataset_dir, + num_readers=FLAGS.num_readers, + batch_size=FLAGS.batch_size, + num_preprocessing_threads=FLAGS.num_readers) + + # Define the model + logits, _ = pixelda_task_towers.add_task_specific_model( + images, hparams, num_classes=num_classes, is_training=True) + + ##################### + # Define the losses # + ##################### + if 'classes' in labels: + one_hot_labels = labels['classes'] + loss = tf.losses.softmax_cross_entropy( + onehot_labels=one_hot_labels, logits=logits) + tf.summary.scalar('losses/Classification_Loss', loss) + else: + raise ValueError('Only support classification for now.') + + total_loss = tf.losses.get_total_loss() + + predictions = tf.reshape(tf.argmax(logits, 1), shape=[-1]) + class_labels = tf.argmax(labels['classes'], 1) + + metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({ + 'Mean_Loss': + tf.contrib.metrics.streaming_mean(total_loss), + 'Accuracy': + tf.contrib.metrics.streaming_accuracy(predictions, + tf.reshape( + class_labels, + shape=[-1])), + 'Recall_at_5': + tf.contrib.metrics.streaming_recall_at_k(logits, class_labels, 5), + }) + + tf.summary.histogram('outputs/Predictions', predictions) + tf.summary.histogram('outputs/Ground_Truth', class_labels) + + for name, value in metrics_to_values.iteritems(): + tf.summary.scalar(name, value) + + num_batches = int(math.ceil(num_samples / float(FLAGS.batch_size))) + + slim.evaluation.evaluation_loop( + master=FLAGS.master, + checkpoint_dir=FLAGS.checkpoint_dir, + logdir=FLAGS.eval_dir, + num_evals=num_batches, + eval_op=metrics_to_updates.values(), + eval_interval_secs=FLAGS.eval_interval_secs) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py new file mode 100644 index 0000000000000000000000000000000000000000..8c92bd81a7b68879000dd793ba2fd013f395f408 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py @@ -0,0 +1,161 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Trains the classification/pose baselines.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from functools import partial + +# Dependency imports + +import tensorflow as tf + +from domain_adaptation.datasets import dataset_factory +from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess +from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers + +flags = tf.app.flags +FLAGS = flags.FLAGS + +slim = tf.contrib.slim + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +flags.DEFINE_integer('task', 0, 'The task ID.') + +flags.DEFINE_integer('num_ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then ' + 'the parameters are handled locally by the worker.') + +flags.DEFINE_integer('batch_size', 32, 'The number of samples per batch.') + +flags.DEFINE_string('dataset_name', None, 'The name of the dataset.') + +flags.DEFINE_string('dataset_dir', None, + 'The directory where the data is stored.') + +flags.DEFINE_string('split_name', None, 'The name of the train/test split.') + +flags.DEFINE_float('learning_rate', 0.001, 'The initial learning rate.') + +flags.DEFINE_integer( + 'learning_rate_decay_steps', 20000, + 'The frequency, in steps, at which the learning rate is decayed.') + +flags.DEFINE_float('learning_rate_decay_factor', + 0.95, + 'The factor with which the learning rate is decayed.') + +flags.DEFINE_float('adam_beta1', 0.5, 'The beta1 value for the AdamOptimizer') + +flags.DEFINE_float('weight_decay', 1e-5, + 'The L2 coefficient on the model weights.') + +flags.DEFINE_string( + 'logdir', None, 'The location of the logs and checkpoints.') + +flags.DEFINE_integer('save_interval_secs', 600, + 'How often, in seconds, we save the model to disk.') + +flags.DEFINE_integer('save_summaries_secs', 600, + 'How often, in seconds, we compute the summaries.') + +flags.DEFINE_integer( + 'num_readers', 4, + 'The number of parallel readers that read data from the dataset.') + +flags.DEFINE_float( + 'moving_average_decay', 0.9999, + 'The amount of decay to use for moving averages.') + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + hparams = tf.contrib.training.HParams() + hparams.weight_decay_task_classifier = FLAGS.weight_decay + + if FLAGS.dataset_name in ['mnist', 'mnist_m', 'usps']: + hparams.task_tower = 'mnist' + else: + raise ValueError('Unknown dataset %s' % FLAGS.dataset_name) + + with tf.Graph().as_default(): + with tf.device( + tf.train.replica_device_setter(FLAGS.num_ps_tasks, merge_devices=True)): + dataset = dataset_factory.get_dataset(FLAGS.dataset_name, + FLAGS.split_name, FLAGS.dataset_dir) + num_classes = dataset.num_classes + + preprocess_fn = partial(pixelda_preprocess.preprocess_classification, + is_training=True) + + images, labels = dataset_factory.provide_batch( + FLAGS.dataset_name, + FLAGS.split_name, + dataset_dir=FLAGS.dataset_dir, + num_readers=FLAGS.num_readers, + batch_size=FLAGS.batch_size, + num_preprocessing_threads=FLAGS.num_readers) + # preprocess_fn=preprocess_fn) + + # Define the model + logits, _ = pixelda_task_towers.add_task_specific_model( + images, hparams, num_classes=num_classes, is_training=True) + + # Define the losses + if 'classes' in labels: + one_hot_labels = labels['classes'] + loss = tf.losses.softmax_cross_entropy( + onehot_labels=one_hot_labels, logits=logits) + tf.summary.scalar('losses/Classification_Loss', loss) + else: + raise ValueError('Only support classification for now.') + + total_loss = tf.losses.get_total_loss() + tf.summary.scalar('losses/Total_Loss', total_loss) + + # Setup the moving averages + moving_average_variables = slim.get_model_variables() + variable_averages = tf.train.ExponentialMovingAverage( + FLAGS.moving_average_decay, slim.get_or_create_global_step()) + tf.add_to_collection( + tf.GraphKeys.UPDATE_OPS, + variable_averages.apply(moving_average_variables)) + + # Specify the optimization scheme: + learning_rate = tf.train.exponential_decay( + FLAGS.learning_rate, + slim.get_or_create_global_step(), + FLAGS.learning_rate_decay_steps, + FLAGS.learning_rate_decay_factor, + staircase=True) + + optimizer = tf.train.AdamOptimizer(learning_rate, beta1=FLAGS.adam_beta1) + + train_op = slim.learning.create_train_op(total_loss, optimizer) + + slim.learning.train( + train_op, + FLAGS.logdir, + master=FLAGS.master, + is_chief=(FLAGS.task == 0), + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/hparams.py b/models/research/domain_adaptation/pixel_domain_adaptation/hparams.py new file mode 100644 index 0000000000000000000000000000000000000000..ba9539f7d435c86f9fc92ed3406835bdaf2b50f3 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/hparams.py @@ -0,0 +1,201 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Define model HParams.""" +import tensorflow as tf + + +def create_hparams(hparam_string=None): + """Create model hyperparameters. Parse nondefault from given string.""" + hparams = tf.contrib.training.HParams( + # The name of the architecture to use. + arch='resnet', + lrelu_leakiness=0.2, + batch_norm_decay=0.9, + weight_decay=1e-5, + normal_init_std=0.02, + generator_kernel_size=3, + discriminator_kernel_size=3, + + # Stop training after this many examples are processed + # If none, train indefinitely + num_training_examples=0, + + # Apply data augmentation to datasets + # Applies only in training job + augment_source_images=False, + augment_target_images=False, + + # Discriminator + # Number of filters in first layer of discriminator + num_discriminator_filters=64, + discriminator_conv_block_size=1, # How many convs to have at each size + discriminator_filter_factor=2.0, # Multiply # filters by this each layer + # Add gaussian noise with this stddev to every hidden layer of D + discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1 + # If true, add this gaussian noise to input images to D as well + discriminator_image_noise=False, + discriminator_first_stride=1, # Stride in first conv of discriminator + discriminator_do_pooling=False, # If true, replace stride 2 with avg pool + discriminator_dropout_keep_prob=0.9, # keep probability for dropout + + # DCGAN Generator + # Number of filters in generator decoder last layer (repeatedly halved + # from 1st layer) + num_decoder_filters=64, + # Number of filters in generator encoder 1st layer (repeatedly doubled + # after 1st layer) + num_encoder_filters=64, + + # This is the shape to which the noise vector is projected (if we're + # transferring from noise). + # Write this way instead of [4, 4, 64] for hparam search flexibility + projection_shape_size=4, + projection_shape_channels=64, + + # Indicates the method by which we enlarge the spatial representation + # of an image. Possible values include: + # - resize_conv: Performs a nearest neighbor resize followed by a conv. + # - conv2d_transpose: Performs a conv2d_transpose. + upsample_method='resize_conv', + + # Visualization + summary_steps=500, # Output image summary every N steps + + ################################### + # Task Classifier Hyperparameters # + ################################### + + # Which task-specific prediction tower to use. Possible choices are: + # none: No task tower. + # doubling_pose_estimator: classifier + quaternion regressor. + # [conv + pool]* + FC + # Classifiers used in DSN paper: + # gtsrb: Classifier used for GTSRB + # svhn: Classifier used for SVHN + # mnist: Classifier used for MNIST + # pose_mini: Classifier + regressor used for pose_mini + task_tower='doubling_pose_estimator', + weight_decay_task_classifier=1e-5, + source_task_loss_weight=1.0, + transferred_task_loss_weight=1.0, + + # Number of private layers in doubling_pose_estimator task tower + num_private_layers=2, + + # The weight for the log quaternion loss we use for source and transferred + # samples of the cropped_linemod dataset. + # In the DSN work, 1/8 of the classifier weight worked well for our log + # quaternion loss + source_pose_weight=0.125 * 2.0, + transferred_pose_weight=0.125 * 1.0, + + # If set to True, the style transfer network also attempts to change its + # weights to maximize the performance of the task tower. If set to False, + # then the style transfer network only attempts to change its weights to + # make the transferred images more likely according to the domain + # classifier. + task_tower_in_g_step=True, + task_loss_in_g_weight=1.0, # Weight of task loss in G + + ######################################### + # 'simple` generator arch model hparams # + ######################################### + simple_num_conv_layers=1, + simple_conv_filters=8, + + ######################### + # Resnet Hyperparameters# + ######################### + resnet_blocks=6, # Number of resnet blocks + resnet_filters=64, # Number of filters per conv in resnet blocks + # If true, add original input back to result of convolutions inside the + # resnet arch. If false, it turns into a simple stack of conv/relu/BN + # layers. + resnet_residuals=True, + + ####################################### + # The residual / interpretable model. # + ####################################### + res_int_blocks=2, # The number of residual blocks. + res_int_convs=2, # The number of conv calls inside each block. + res_int_filters=64, # The number of filters used by each convolution. + + #################### + # Latent variables # + #################### + # if true, then generate random noise and project to input for generator + noise_channel=True, + # The number of dimensions in the input noise vector. + noise_dims=10, + + # If true, then one hot encode source image class and project as an + # additional channel for the input to generator. This gives the generator + # access to the class, which may help generation performance. + condition_on_source_class=False, + + ######################## + # Loss Hyperparameters # + ######################## + domain_loss_weight=1.0, + style_transfer_loss_weight=1.0, + + ######################################################################## + # Encourages the transferred images to be similar to the source images # + # using a configurable metric. # + ######################################################################## + + # The weight of the loss function encouraging the source and transferred + # images to be similar. If set to 0, then the loss function is not used. + transferred_similarity_loss_weight=0.0, + + # The type of loss used to encourage transferred and source image + # similarity. Valid values include: + # mpse: Mean Pairwise Squared Error + # mse: Mean Squared Error + # hinged_mse: Computes the mean squared error using squared differences + # greater than hparams.transferred_similarity_max_diff + # hinged_mae: Computes the mean absolute error using absolute + # differences greater than hparams.transferred_similarity_max_diff. + transferred_similarity_loss='mpse', + + # The maximum allowable difference between the source and target images. + # This value is used, in effect, to produce a hinge loss. Note that the + # range of values should be between 0 and 1. + transferred_similarity_max_diff=0.4, + + ################################ + # Optimization Hyperparameters # + ################################ + learning_rate=0.001, + batch_size=32, + lr_decay_steps=20000, + lr_decay_rate=0.95, + + # Recomendation from the DCGAN paper: + adam_beta1=0.5, + clip_gradient_norm=5.0, + + # The number of times we run the discriminator train_op in a row. + discriminator_steps=1, + + # The number of times we run the generator train_op in a row. + generator_steps=1) + + if hparam_string: + tf.logging.info('Parsing command line hparams: %s', hparam_string) + hparams.parse(hparam_string) + + tf.logging.info('Final parsed hparams: %s', hparams.values()) + return hparams diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..23824249a9e95586ed85e40cd89c5f6814977969 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py @@ -0,0 +1,298 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Evaluates the PIXELDA model. + +-- Compiles the model for CPU. +$ bazel build -c opt third_party/tensorflow_models/domain_adaptation/pixel_domain_adaptation:pixelda_eval + +-- Compile the model for GPU. +$ bazel build -c opt --copt=-mavx --config=cuda \ + third_party/tensorflow_models/domain_adaptation/pixel_domain_adaptation:pixelda_eval + +-- Runs the training. +$ ./bazel-bin/third_party/tensorflow_models/domain_adaptation/pixel_domain_adaptation/pixelda_eval \ + --source_dataset=mnist \ + --target_dataset=mnist_m \ + --dataset_dir=/tmp/datasets/ \ + --alsologtostderr + +-- Visualize the results. +$ bash learning/brain/tensorboard/tensorboard.sh \ + --port 2222 --logdir=/tmp/pixelda/ +""" +from functools import partial +import math + +# Dependency imports + +import tensorflow as tf + +from domain_adaptation.datasets import dataset_factory +from domain_adaptation.pixel_domain_adaptation import pixelda_model +from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess +from domain_adaptation.pixel_domain_adaptation import pixelda_utils +from domain_adaptation.pixel_domain_adaptation import pixelda_losses +from domain_adaptation.pixel_domain_adaptation.hparams import create_hparams + +slim = tf.contrib.slim + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.') + +flags.DEFINE_string('checkpoint_dir', '/tmp/pixelda/', + 'Directory where the model was written to.') + +flags.DEFINE_string('eval_dir', '/tmp/pixelda/', + 'Directory where the results are saved to.') + +flags.DEFINE_integer('eval_interval_secs', 60, + 'The frequency, in seconds, with which evaluation is run.') + +flags.DEFINE_string('target_split_name', 'test', + 'The name of the train/test split.') +flags.DEFINE_string('source_split_name', 'train', 'Split for source dataset.' + ' Defaults to train.') + +flags.DEFINE_string('source_dataset', 'mnist', + 'The name of the source dataset.') + +flags.DEFINE_string('target_dataset', 'mnist_m', + 'The name of the target dataset.') + +flags.DEFINE_string( + 'dataset_dir', + '', # None, + 'The directory where the datasets can be found.') + +flags.DEFINE_integer( + 'num_readers', 4, + 'The number of parallel readers that read data from the dataset.') + +flags.DEFINE_integer('num_preprocessing_threads', 4, + 'The number of threads used to create the batches.') + +# HParams + +flags.DEFINE_string('hparams', '', 'Comma separated hyperparameter values') + + +def run_eval(run_dir, checkpoint_dir, hparams): + """Runs the eval loop. + + Args: + run_dir: The directory where eval specific logs are placed + checkpoint_dir: The directory where the checkpoints are stored + hparams: The hyperparameters struct. + + Raises: + ValueError: if hparams.arch is not recognized. + """ + for checkpoint_path in slim.evaluation.checkpoints_iterator( + checkpoint_dir, FLAGS.eval_interval_secs): + with tf.Graph().as_default(): + ######################### + # Preprocess the inputs # + ######################### + target_dataset = dataset_factory.get_dataset( + FLAGS.target_dataset, + split_name=FLAGS.target_split_name, + dataset_dir=FLAGS.dataset_dir) + target_images, target_labels = dataset_factory.provide_batch( + FLAGS.target_dataset, FLAGS.target_split_name, FLAGS.dataset_dir, + FLAGS.num_readers, hparams.batch_size, + FLAGS.num_preprocessing_threads) + num_target_classes = target_dataset.num_classes + target_labels['class'] = tf.argmax(target_labels['classes'], 1) + del target_labels['classes'] + + if hparams.arch not in ['dcgan']: + source_dataset = dataset_factory.get_dataset( + FLAGS.source_dataset, + split_name=FLAGS.source_split_name, + dataset_dir=FLAGS.dataset_dir) + num_source_classes = source_dataset.num_classes + source_images, source_labels = dataset_factory.provide_batch( + FLAGS.source_dataset, FLAGS.source_split_name, FLAGS.dataset_dir, + FLAGS.num_readers, hparams.batch_size, + FLAGS.num_preprocessing_threads) + source_labels['class'] = tf.argmax(source_labels['classes'], 1) + del source_labels['classes'] + if num_source_classes != num_target_classes: + raise ValueError( + 'Input and output datasets must have same number of classes') + else: + source_images = None + source_labels = None + + #################### + # Define the model # + #################### + end_points = pixelda_model.create_model( + hparams, + target_images, + source_images=source_images, + source_labels=source_labels, + is_training=False, + num_classes=num_target_classes) + + ####################### + # Metrics & Summaries # + ####################### + names_to_values, names_to_updates = create_metrics(end_points, + source_labels, + target_labels, hparams) + pixelda_utils.summarize_model(end_points) + pixelda_utils.summarize_transferred_grid( + end_points['transferred_images'], source_images, name='Transferred') + if 'source_images_recon' in end_points: + pixelda_utils.summarize_transferred_grid( + end_points['source_images_recon'], + source_images, + name='Source Reconstruction') + pixelda_utils.summarize_images(target_images, 'Target') + + for name, value in names_to_values.iteritems(): + tf.summary.scalar(name, value) + + # Use the entire split by default + num_examples = target_dataset.num_samples + + num_batches = math.ceil(num_examples / float(hparams.batch_size)) + global_step = slim.get_or_create_global_step() + + result = slim.evaluation.evaluate_once( + master=FLAGS.master, + checkpoint_path=checkpoint_path, + logdir=run_dir, + num_evals=num_batches, + eval_op=names_to_updates.values(), + final_op=names_to_values) + + +def to_degrees(log_quaternion_loss): + """Converts a log quaternion distance to an angle. + + Args: + log_quaternion_loss: The log quaternion distance between two + unit quaternions (or a batch of pairs of quaternions). + + Returns: + The angle in degrees of the implied angle-axis representation. + """ + return tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi + + +def create_metrics(end_points, source_labels, target_labels, hparams): + """Create metrics for the model. + + Args: + end_points: A dictionary of end point name to tensor + source_labels: Labels for source images. batch_size x 1 + target_labels: Labels for target images. batch_size x 1 + hparams: The hyperparameters struct. + + Returns: + Tuple of (names_to_values, names_to_updates), dictionaries that map a metric + name to its value and update op, respectively + + """ + ########################################### + # Evaluate the Domain Prediction Accuracy # + ########################################### + batch_size = hparams.batch_size + names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ + ('eval/Domain_Accuracy-Transferred'): + tf.contrib.metrics.streaming_accuracy( + tf.to_int32( + tf.round(tf.sigmoid(end_points[ + 'transferred_domain_logits']))), + tf.zeros(batch_size, dtype=tf.int32)), + ('eval/Domain_Accuracy-Target'): + tf.contrib.metrics.streaming_accuracy( + tf.to_int32( + tf.round(tf.sigmoid(end_points['target_domain_logits']))), + tf.ones(batch_size, dtype=tf.int32)) + }) + + ################################ + # Evaluate the task classifier # + ################################ + if 'source_task_logits' in end_points: + metric_name = 'eval/Task_Accuracy-Source' + names_to_values[metric_name], names_to_updates[ + metric_name] = tf.contrib.metrics.streaming_accuracy( + tf.argmax(end_points['source_task_logits'], 1), + source_labels['class']) + + if 'transferred_task_logits' in end_points: + metric_name = 'eval/Task_Accuracy-Transferred' + names_to_values[metric_name], names_to_updates[ + metric_name] = tf.contrib.metrics.streaming_accuracy( + tf.argmax(end_points['transferred_task_logits'], 1), + source_labels['class']) + + if 'target_task_logits' in end_points: + metric_name = 'eval/Task_Accuracy-Target' + names_to_values[metric_name], names_to_updates[ + metric_name] = tf.contrib.metrics.streaming_accuracy( + tf.argmax(end_points['target_task_logits'], 1), + target_labels['class']) + + ########################################################################## + # Pose data-specific losses. + ########################################################################## + if 'quaternion' in source_labels.keys(): + params = {} + params['use_logging'] = False + params['batch_size'] = batch_size + + angle_loss_source = to_degrees( + pixelda_losses.log_quaternion_loss_batch(end_points[ + 'source_quaternion'], source_labels['quaternion'], params)) + angle_loss_transferred = to_degrees( + pixelda_losses.log_quaternion_loss_batch(end_points[ + 'transferred_quaternion'], source_labels['quaternion'], params)) + angle_loss_target = to_degrees( + pixelda_losses.log_quaternion_loss_batch(end_points[ + 'target_quaternion'], target_labels['quaternion'], params)) + + metric_name = 'eval/Angle_Loss-Source' + names_to_values[metric_name], names_to_updates[ + metric_name] = slim.metrics.mean(angle_loss_source) + + metric_name = 'eval/Angle_Loss-Transferred' + names_to_values[metric_name], names_to_updates[ + metric_name] = slim.metrics.mean(angle_loss_transferred) + + metric_name = 'eval/Angle_Loss-Target' + names_to_values[metric_name], names_to_updates[ + metric_name] = slim.metrics.mean(angle_loss_target) + + return names_to_values, names_to_updates + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + hparams = create_hparams(FLAGS.hparams) + run_eval( + run_dir=FLAGS.eval_dir, + checkpoint_dir=FLAGS.checkpoint_dir, + hparams=hparams) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..cf39765d4d28c5a04cb8868cdc465cdd0129b0df --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py @@ -0,0 +1,385 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Defines the various loss functions in use by the PIXELDA model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +slim = tf.contrib.slim + + +def add_domain_classifier_losses(end_points, hparams): + """Adds losses related to the domain-classifier. + + Args: + end_points: A map of network end point names to `Tensors`. + hparams: The hyperparameters struct. + + Returns: + loss: A `Tensor` representing the total task-classifier loss. + """ + if hparams.domain_loss_weight == 0: + tf.logging.info( + 'Domain classifier loss weight is 0, so not creating losses.') + return 0 + + # The domain prediction loss is minimized with respect to the domain + # classifier features only. Its aim is to predict the domain of the images. + # Note: 1 = 'real image' label, 0 = 'fake image' label + transferred_domain_loss = tf.losses.sigmoid_cross_entropy( + multi_class_labels=tf.zeros_like(end_points['transferred_domain_logits']), + logits=end_points['transferred_domain_logits']) + tf.summary.scalar('Domain_loss_transferred', transferred_domain_loss) + + target_domain_loss = tf.losses.sigmoid_cross_entropy( + multi_class_labels=tf.ones_like(end_points['target_domain_logits']), + logits=end_points['target_domain_logits']) + tf.summary.scalar('Domain_loss_target', target_domain_loss) + + # Compute the total domain loss: + total_domain_loss = transferred_domain_loss + target_domain_loss + total_domain_loss *= hparams.domain_loss_weight + tf.summary.scalar('Domain_loss_total', total_domain_loss) + + return total_domain_loss + +def log_quaternion_loss_batch(predictions, labels, params): + """A helper function to compute the error between quaternions. + + Args: + predictions: A Tensor of size [batch_size, 4]. + labels: A Tensor of size [batch_size, 4]. + params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. + + Returns: + A Tensor of size [batch_size], denoting the error between the quaternions. + """ + use_logging = params['use_logging'] + assertions = [] + if use_logging: + assertions.append( + tf.Assert( + tf.reduce_all( + tf.less( + tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), + 1e-4)), + ['The l2 norm of each prediction quaternion vector should be 1.'])) + assertions.append( + tf.Assert( + tf.reduce_all( + tf.less( + tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), + ['The l2 norm of each label quaternion vector should be 1.'])) + + with tf.control_dependencies(assertions): + product = tf.multiply(predictions, labels) + internal_dot_products = tf.reduce_sum(product, [1]) + + if use_logging: + internal_dot_products = tf.Print(internal_dot_products, [ + internal_dot_products, + tf.shape(internal_dot_products) + ], 'internal_dot_products:') + + logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) + return logcost + + +def log_quaternion_loss(predictions, labels, params): + """A helper function to compute the mean error between batches of quaternions. + + The caller is expected to add the loss to the graph. + + Args: + predictions: A Tensor of size [batch_size, 4]. + labels: A Tensor of size [batch_size, 4]. + params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. + + Returns: + A Tensor of size 1, denoting the mean error between batches of quaternions. + """ + use_logging = params['use_logging'] + logcost = log_quaternion_loss_batch(predictions, labels, params) + logcost = tf.reduce_sum(logcost, [0]) + batch_size = params['batch_size'] + logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss') + if use_logging: + logcost = tf.Print( + logcost, [logcost], '[logcost]', name='log_quaternion_loss_print') + return logcost + +def _quaternion_loss(labels, predictions, weight, batch_size, domain, + add_summaries): + """Creates a Quaternion Loss. + + Args: + labels: The true quaternions. + predictions: The predicted quaternions. + weight: A scalar weight. + batch_size: The size of the batches. + domain: The name of the domain from which the labels were taken. + add_summaries: Whether or not to add summaries for the losses. + + Returns: + A `Tensor` representing the loss. + """ + assert domain in ['Source', 'Transferred'] + + params = {'use_logging': False, 'batch_size': batch_size} + loss = weight * log_quaternion_loss(labels, predictions, params) + + if add_summaries: + assert_op = tf.Assert(tf.is_finite(loss), [loss]) + with tf.control_dependencies([assert_op]): + tf.summary.histogram( + 'Log_Quaternion_Loss_%s' % domain, loss, collections='losses') + tf.summary.scalar( + 'Task_Quaternion_Loss_%s' % domain, loss, collections='losses') + + return loss + + +def _add_task_specific_losses(end_points, source_labels, num_classes, hparams, + add_summaries=False): + """Adds losses related to the task-classifier. + + Args: + end_points: A map of network end point names to `Tensors`. + source_labels: A dictionary of output labels to `Tensors`. + num_classes: The number of classes used by the classifier. + hparams: The hyperparameters struct. + add_summaries: Whether or not to add the summaries. + + Returns: + loss: A `Tensor` representing the total task-classifier loss. + """ + # TODO(ddohan): Make sure the l2 regularization is added to the loss + + one_hot_labels = slim.one_hot_encoding(source_labels['class'], num_classes) + total_loss = 0 + + if 'source_task_logits' in end_points: + loss = tf.losses.softmax_cross_entropy( + onehot_labels=one_hot_labels, + logits=end_points['source_task_logits'], + weights=hparams.source_task_loss_weight) + if add_summaries: + tf.summary.scalar('Task_Classifier_Loss_Source', loss) + total_loss += loss + + if 'transferred_task_logits' in end_points: + loss = tf.losses.softmax_cross_entropy( + onehot_labels=one_hot_labels, + logits=end_points['transferred_task_logits'], + weights=hparams.transferred_task_loss_weight) + if add_summaries: + tf.summary.scalar('Task_Classifier_Loss_Transferred', loss) + total_loss += loss + + ######################### + # Pose specific losses. # + ######################### + if 'quaternion' in source_labels: + total_loss += _quaternion_loss( + source_labels['quaternion'], + end_points['source_quaternion'], + hparams.source_pose_weight, + hparams.batch_size, + 'Source', + add_summaries) + + total_loss += _quaternion_loss( + source_labels['quaternion'], + end_points['transferred_quaternion'], + hparams.transferred_pose_weight, + hparams.batch_size, + 'Transferred', + add_summaries) + + if add_summaries: + tf.summary.scalar('Task_Loss_Total', total_loss) + + return total_loss + + +def _transferred_similarity_loss(reconstructions, + source_images, + weight=1.0, + method='mse', + max_diff=0.4, + name='similarity'): + """Computes a loss encouraging similarity between source and transferred. + + Args: + reconstructions: A `Tensor` of shape [batch_size, height, width, channels] + source_images: A `Tensor` of shape [batch_size, height, width, channels]. + weight: Multiple similarity loss by this weight before returning + method: One of: + mpse = Mean Pairwise Squared Error + mse = Mean Squared Error + hinged_mse = Computes the mean squared error using squared differences + greater than hparams.transferred_similarity_max_diff + hinged_mae = Computes the mean absolute error using absolute + differences greater than hparams.transferred_similarity_max_diff. + max_diff: Maximum unpenalized difference for hinged losses + name: Identifying name to use for creating summaries + + + Returns: + A `Tensor` representing the transferred similarity loss. + + Raises: + ValueError: if `method` is not recognized. + """ + if weight == 0: + return 0 + + source_channels = source_images.shape.as_list()[-1] + reconstruction_channels = reconstructions.shape.as_list()[-1] + + # Convert grayscale source to RGB if target is RGB + if source_channels == 1 and reconstruction_channels != 1: + source_images = tf.tile(source_images, [1, 1, 1, reconstruction_channels]) + if reconstruction_channels == 1 and source_channels != 1: + reconstructions = tf.tile(reconstructions, [1, 1, 1, source_channels]) + + if method == 'mpse': + reconstruction_similarity_loss_fn = ( + tf.contrib.losses.mean_pairwise_squared_error) + elif method == 'masked_mpse': + + def masked_mpse(predictions, labels, weight): + """Masked mpse assuming we have a depth to create a mask from.""" + assert labels.shape.as_list()[-1] == 4 + mask = tf.to_float(tf.less(labels[:, :, :, 3:4], 0.99)) + mask = tf.tile(mask, [1, 1, 1, 4]) + predictions *= mask + labels *= mask + tf.image_summary('masked_pred', predictions) + tf.image_summary('masked_label', labels) + return tf.contrib.losses.mean_pairwise_squared_error( + predictions, labels, weight) + + reconstruction_similarity_loss_fn = masked_mpse + elif method == 'mse': + reconstruction_similarity_loss_fn = tf.contrib.losses.mean_squared_error + elif method == 'hinged_mse': + + def hinged_mse(predictions, labels, weight): + diffs = tf.square(predictions - labels) + diffs = tf.maximum(0.0, diffs - max_diff) + return tf.reduce_mean(diffs) * weight + + reconstruction_similarity_loss_fn = hinged_mse + elif method == 'hinged_mae': + + def hinged_mae(predictions, labels, weight): + diffs = tf.abs(predictions - labels) + diffs = tf.maximum(0.0, diffs - max_diff) + return tf.reduce_mean(diffs) * weight + + reconstruction_similarity_loss_fn = hinged_mae + else: + raise ValueError('Unknown reconstruction loss %s' % method) + + reconstruction_similarity_loss = reconstruction_similarity_loss_fn( + reconstructions, source_images, weight) + + name = '%s_Similarity_(%s)' % (name, method) + tf.summary.scalar(name, reconstruction_similarity_loss) + return reconstruction_similarity_loss + + +def g_step_loss(source_images, source_labels, end_points, hparams, num_classes): + """Configures the loss function which runs during the g-step. + + Args: + source_images: A `Tensor` of shape [batch_size, height, width, channels]. + source_labels: A dictionary of `Tensors` of shape [batch_size]. Valid keys + are 'class' and 'quaternion'. + end_points: A map of the network end points. + hparams: The hyperparameters struct. + num_classes: Number of classes for classifier loss + + Returns: + A `Tensor` representing a loss function. + + Raises: + ValueError: if hparams.transferred_similarity_loss_weight is non-zero but + hparams.transferred_similarity_loss is invalid. + """ + generator_loss = 0 + + ################################################################ + # Adds a loss which encourages the discriminator probabilities # + # to be high (near one). + ################################################################ + + # As per the GAN paper, maximize the log probs, instead of minimizing + # log(1-probs). Since we're minimizing, we'll minimize -log(probs) which is + # the same thing. + style_transfer_loss = tf.losses.sigmoid_cross_entropy( + logits=end_points['transferred_domain_logits'], + multi_class_labels=tf.ones_like(end_points['transferred_domain_logits']), + weights=hparams.style_transfer_loss_weight) + tf.summary.scalar('Style_transfer_loss', style_transfer_loss) + generator_loss += style_transfer_loss + + # Optimizes the style transfer network to produce transferred images similar + # to the source images. + generator_loss += _transferred_similarity_loss( + end_points['transferred_images'], + source_images, + weight=hparams.transferred_similarity_loss_weight, + method=hparams.transferred_similarity_loss, + name='transferred_similarity') + + # Optimizes the style transfer network to maximize classification accuracy. + if source_labels is not None and hparams.task_tower_in_g_step: + generator_loss += _add_task_specific_losses( + end_points, source_labels, num_classes, + hparams) * hparams.task_loss_in_g_weight + + return generator_loss + + +def d_step_loss(end_points, source_labels, num_classes, hparams): + """Configures the losses during the D-Step. + + Note that during the D-step, the model optimizes both the domain (binary) + classifier and the task classifier. + + Args: + end_points: A map of the network end points. + source_labels: A dictionary of output labels to `Tensors`. + num_classes: The number of classes used by the classifier. + hparams: The hyperparameters struct. + + Returns: + A `Tensor` representing the value of the D-step loss. + """ + domain_classifier_loss = add_domain_classifier_losses(end_points, hparams) + + task_classifier_loss = 0 + if source_labels is not None: + task_classifier_loss = _add_task_specific_losses( + end_points, source_labels, num_classes, hparams, add_summaries=True) + + return domain_classifier_loss + task_classifier_loss diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py new file mode 100644 index 0000000000000000000000000000000000000000..16b550a62d88ec2724c91f9dab9e3b34c736ec4f --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py @@ -0,0 +1,713 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the Domain Adaptation via Style Transfer (PixelDA) model components. + +A number of details in the implementation make reference to one of the following +works: + +- "Unsupervised Representation Learning with Deep Convolutional + Generative Adversarial Networks"" + https://arxiv.org/abs/1511.06434 + +This paper makes several architecture recommendations: +1. Use strided convs in discriminator, fractional-strided convs in generator +2. batchnorm everywhere +3. remove fully connected layers for deep models +4. ReLu for all layers in generator, except tanh on output +5. LeakyReLu for everything in discriminator +""" +import functools +import math + +# Dependency imports +import numpy as np + +import tensorflow as tf + +slim = tf.contrib.slim + +from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers + + +def create_model(hparams, + target_images, + source_images=None, + source_labels=None, + is_training=False, + noise=None, + num_classes=None): + """Create a GAN model. + + Arguments: + hparams: HParam object specifying model params + target_images: A `Tensor` of size [batch_size, height, width, channels]. It + is assumed that the images are [-1, 1] normalized. + source_images: A `Tensor` of size [batch_size, height, width, channels]. It + is assumed that the images are [-1, 1] normalized. + source_labels: A `Tensor` of size [batch_size] of categorical labels between + [0, num_classes] + is_training: whether model is currently training + noise: If None, model generates its own noise. Otherwise use provided. + num_classes: Number of classes for classification + + Returns: + end_points dict with model outputs + + Raises: + ValueError: unknown hparams.arch setting + """ + if num_classes is None and hparams.arch in ['resnet', 'simple']: + raise ValueError('Num classes must be provided to create task classifier') + + if target_images.dtype != tf.float32: + raise ValueError('target_images must be tf.float32 and [-1, 1] normalized.') + if source_images is not None and source_images.dtype != tf.float32: + raise ValueError('source_images must be tf.float32 and [-1, 1] normalized.') + + ########################### + # Create latent variables # + ########################### + latent_vars = dict() + + if hparams.noise_channel: + noise_shape = [hparams.batch_size, hparams.noise_dims] + if noise is not None: + assert noise.shape.as_list() == noise_shape + tf.logging.info('Using provided noise') + else: + tf.logging.info('Using random noise') + noise = tf.random_uniform( + shape=noise_shape, + minval=-1, + maxval=1, + dtype=tf.float32, + name='random_noise') + latent_vars['noise'] = noise + + #################### + # Create generator # + #################### + + with slim.arg_scope( + [slim.conv2d, slim.conv2d_transpose, slim.fully_connected], + normalizer_params=batch_norm_params(is_training, + hparams.batch_norm_decay), + weights_initializer=tf.random_normal_initializer( + stddev=hparams.normal_init_std), + weights_regularizer=tf.contrib.layers.l2_regularizer( + hparams.weight_decay)): + with slim.arg_scope([slim.conv2d], padding='SAME'): + if hparams.arch == 'dcgan': + end_points = dcgan( + target_images, latent_vars, hparams, scope='generator') + elif hparams.arch == 'resnet': + end_points = resnet_generator( + source_images, + target_images.shape.as_list()[1:4], + hparams=hparams, + latent_vars=latent_vars) + elif hparams.arch == 'residual_interpretation': + end_points = residual_interpretation_generator( + source_images, is_training=is_training, hparams=hparams) + elif hparams.arch == 'simple': + end_points = simple_generator( + source_images, + target_images, + is_training=is_training, + hparams=hparams, + latent_vars=latent_vars) + elif hparams.arch == 'identity': + # Pass through unmodified, besides changing # channels + # Used to calculate baseline numbers + # Also set `generator_steps=0` for baseline + if hparams.generator_steps: + raise ValueError('Must set generator_steps=0 for identity arch. Is %s' + % hparams.generator_steps) + transferred_images = source_images + source_channels = source_images.shape.as_list()[-1] + target_channels = target_images.shape.as_list()[-1] + if source_channels == 1 and target_channels == 3: + transferred_images = tf.tile(source_images, [1, 1, 1, 3]) + if source_channels == 3 and target_channels == 1: + transferred_images = tf.image.rgb_to_grayscale(source_images) + end_points = {'transferred_images': transferred_images} + else: + raise ValueError('Unknown architecture: %s' % hparams.arch) + + ##################### + # Domain Classifier # + ##################### + if hparams.arch in [ + 'dcgan', 'resnet', 'residual_interpretation', 'simple', 'identity', + ]: + + # Add a discriminator for these architectures + end_points['transferred_domain_logits'] = predict_domain( + end_points['transferred_images'], + hparams, + is_training=is_training, + reuse=False) + end_points['target_domain_logits'] = predict_domain( + target_images, + hparams, + is_training=is_training, + reuse=True) + + ################### + # Task Classifier # + ################### + if hparams.task_tower != 'none' and hparams.arch in [ + 'resnet', 'residual_interpretation', 'simple', 'identity', + ]: + with tf.variable_scope('discriminator'): + with tf.variable_scope('task_tower'): + end_points['source_task_logits'], end_points[ + 'source_quaternion'] = pixelda_task_towers.add_task_specific_model( + source_images, + hparams, + num_classes=num_classes, + is_training=is_training, + reuse_private=False, + private_scope='source_task_classifier', + reuse_shared=False) + end_points['transferred_task_logits'], end_points[ + 'transferred_quaternion'] = ( + pixelda_task_towers.add_task_specific_model( + end_points['transferred_images'], + hparams, + num_classes=num_classes, + is_training=is_training, + reuse_private=False, + private_scope='transferred_task_classifier', + reuse_shared=True)) + end_points['target_task_logits'], end_points[ + 'target_quaternion'] = pixelda_task_towers.add_task_specific_model( + target_images, + hparams, + num_classes=num_classes, + is_training=is_training, + reuse_private=True, + private_scope='transferred_task_classifier', + reuse_shared=True) + # Remove any endpoints with None values + return dict((k, v) for k, v in end_points.iteritems() if v is not None) + + +def batch_norm_params(is_training, batch_norm_decay): + return { + 'is_training': is_training, + # Decay for the moving averages. + 'decay': batch_norm_decay, + # epsilon to prevent 0s in variance. + 'epsilon': 0.001, + } + + +def lrelu(x, leakiness=0.2): + """Relu, with optional leaky support.""" + return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu') + + +def upsample(net, num_filters, scale=2, method='resize_conv', scope=None): + """Performs spatial upsampling of the given features. + + Args: + net: A `Tensor` of shape [batch_size, height, width, filters]. + num_filters: The number of output filters. + scale: The scale of the upsampling. Must be a positive integer greater or + equal to two. + method: The method by which the features are upsampled. Valid options + include 'resize_conv' and 'conv2d_transpose'. + scope: An optional variable scope. + + Returns: + A new set of features of shape + [batch_size, height*scale, width*scale, num_filters]. + + Raises: + ValueError: if `method` is not valid or + """ + if scale < 2: + raise ValueError('scale must be greater or equal to two.') + + with tf.variable_scope(scope, 'upsample', [net]): + if method == 'resize_conv': + net = tf.image.resize_nearest_neighbor( + net, [net.shape.as_list()[1] * scale, + net.shape.as_list()[2] * scale], + align_corners=True, + name='resize') + return slim.conv2d(net, num_filters, stride=1, scope='conv') + elif method == 'conv2d_transpose': + return slim.conv2d_transpose(net, num_filters, scope='deconv') + else: + raise ValueError('Upsample method [%s] was not recognized.' % method) + + +def project_latent_vars(hparams, proj_shape, latent_vars, combine_method='sum'): + """Generate noise and project to input volume size. + + Args: + hparams: The hyperparameter HParams struct. + proj_shape: Shape to project noise (not including batch size). + latent_vars: dictionary of `'key': Tensor of shape [batch_size, N]` + combine_method: How to combine the projected values. + sum = project to volume then sum + concat = concatenate along last dimension (i.e. channel) + + Returns: + If combine_method=sum, a `Tensor` of size `hparams.projection_shape` + If combine_method=concat and there are N latent vars, a `Tensor` of size + `hparams.projection_shape`, with the last channel multiplied by N + + + Raises: + ValueError: combine_method is not one of sum/concat + """ + values = [] + for var in latent_vars: + with tf.variable_scope(var): + # Project & reshape noise to a HxWxC input + projected = slim.fully_connected( + latent_vars[var], + np.prod(proj_shape), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm) + values.append(tf.reshape(projected, [hparams.batch_size] + proj_shape)) + + if combine_method == 'sum': + result = values[0] + for value in values[1:]: + result += value + elif combine_method == 'concat': + # Concatenate along last axis + result = tf.concat(values, len(proj_shape)) + else: + raise ValueError('Unknown combine_method %s' % combine_method) + + tf.logging.info('Latent variables projected to size %s volume', result.shape) + + return result + + +def resnet_block(net, hparams): + """Create a resnet block.""" + net_in = net + net = slim.conv2d( + net, + hparams.resnet_filters, + stride=1, + normalizer_fn=slim.batch_norm, + activation_fn=tf.nn.relu) + net = slim.conv2d( + net, + hparams.resnet_filters, + stride=1, + normalizer_fn=slim.batch_norm, + activation_fn=None) + if hparams.resnet_residuals: + net += net_in + return net + + +def resnet_stack(images, output_shape, hparams, scope=None): + """Create a resnet style transfer block. + + Args: + images: [batch-size, height, width, channels] image tensor to feed as input + output_shape: output image shape in form [height, width, channels] + hparams: hparams objects + scope: Variable scope + + Returns: + Images after processing with resnet blocks. + """ + end_points = {} + if hparams.noise_channel: + # separate the noise for visualization + end_points['noise'] = images[:, :, :, -1] + assert images.shape.as_list()[1:3] == output_shape[0:2] + + with tf.variable_scope(scope, 'resnet_style_transfer', [images]): + with slim.arg_scope( + [slim.conv2d], + normalizer_fn=slim.batch_norm, + kernel_size=[hparams.generator_kernel_size] * 2, + stride=1): + net = slim.conv2d( + images, + hparams.resnet_filters, + normalizer_fn=None, + activation_fn=tf.nn.relu) + for block in range(hparams.resnet_blocks): + net = resnet_block(net, hparams) + end_points['resnet_block_{}'.format(block)] = net + + net = slim.conv2d( + net, + output_shape[-1], + kernel_size=[1, 1], + normalizer_fn=None, + activation_fn=tf.nn.tanh, + scope='conv_out') + end_points['transferred_images'] = net + return net, end_points + + +def predict_domain(images, + hparams, + is_training=False, + reuse=False, + scope='discriminator'): + """Creates a discriminator for a GAN. + + Args: + images: A `Tensor` of size [batch_size, height, width, channels]. It is + assumed that the images are centered between -1 and 1. + hparams: hparam object with params for discriminator + is_training: Specifies whether or not we're training or testing. + reuse: Whether to reuse variable scope + scope: An optional variable_scope. + + Returns: + [batch size, 1] - logit output of discriminator. + """ + with tf.variable_scope(scope, 'discriminator', [images], reuse=reuse): + lrelu_partial = functools.partial(lrelu, leakiness=hparams.lrelu_leakiness) + with slim.arg_scope( + [slim.conv2d], + kernel_size=[hparams.discriminator_kernel_size] * 2, + activation_fn=lrelu_partial, + stride=2, + normalizer_fn=slim.batch_norm): + + def add_noise(hidden, scope_num=None): + if scope_num: + hidden = slim.dropout( + hidden, + hparams.discriminator_dropout_keep_prob, + is_training=is_training, + scope='dropout_%s' % scope_num) + if hparams.discriminator_noise_stddev == 0: + return hidden + return hidden + tf.random_normal( + hidden.shape.as_list(), + mean=0.0, + stddev=hparams.discriminator_noise_stddev) + + # As per the recommendation of the DCGAN paper, we don't use batch norm + # on the discriminator input (https://arxiv.org/pdf/1511.06434v2.pdf). + if hparams.discriminator_image_noise: + images = add_noise(images) + net = slim.conv2d( + images, + hparams.num_discriminator_filters, + normalizer_fn=None, + stride=hparams.discriminator_first_stride, + scope='conv1_stride%s' % hparams.discriminator_first_stride) + net = add_noise(net, 1) + + block_id = 2 + # Repeatedly stack + # discriminator_conv_block_size-1 conv layers with stride 1 + # followed by a stride 2 layer + # Add (optional) noise at every point + while net.shape.as_list()[1] > hparams.projection_shape_size: + num_filters = int(hparams.num_discriminator_filters * + (hparams.discriminator_filter_factor**(block_id - 1))) + for conv_id in range(1, hparams.discriminator_conv_block_size): + net = slim.conv2d( + net, + num_filters, + stride=1, + scope='conv_%s_%s' % (block_id, conv_id)) + if hparams.discriminator_do_pooling: + net = slim.conv2d( + net, num_filters, scope='conv_%s_prepool' % block_id) + net = slim.avg_pool2d( + net, kernel_size=[2, 2], stride=2, scope='pool_%s' % block_id) + else: + net = slim.conv2d( + net, num_filters, scope='conv_%s_stride2' % block_id) + net = add_noise(net, block_id) + block_id += 1 + net = slim.flatten(net) + net = slim.fully_connected( + net, + 1, + # Models with BN here generally produce noise + normalizer_fn=None, + activation_fn=None, + scope='fc_logit_out') # Returns logits! + return net + + +def dcgan_generator(images, output_shape, hparams, scope=None): + """Transforms the visual style of the input images. + + Args: + images: A `Tensor` of shape [batch_size, height, width, channels]. + output_shape: A list or tuple of 3 elements: the output height, width and + number of channels. + hparams: hparams object with generator parameters + scope: Scope to place generator inside + + Returns: + A `Tensor` of shape [batch_size, height, width, output_channels] which + represents the result of style transfer. + + Raises: + ValueError: If `output_shape` is not a list or tuple or if it doesn't have + three elements or if `output_shape` or `images` arent square. + """ + if not isinstance(output_shape, (tuple, list)): + raise ValueError('output_shape must be a tuple or list.') + elif len(output_shape) != 3: + raise ValueError('output_shape must have three elements.') + + if output_shape[0] != output_shape[1]: + raise ValueError('output_shape must be square') + if images.shape.as_list()[1] != images.shape.as_list()[2]: + raise ValueError('images height and width must match.') + + outdim = output_shape[0] + indim = images.shape.as_list()[1] + num_iterations = int(math.ceil(math.log(float(outdim) / float(indim), 2.0))) + + with slim.arg_scope( + [slim.conv2d, slim.conv2d_transpose], + kernel_size=[hparams.generator_kernel_size] * 2, + stride=2): + with tf.variable_scope(scope or 'generator'): + + net = images + + # Repeatedly halve # filters until = hparams.decode_filters in last layer + for i in range(num_iterations): + num_filters = hparams.num_decoder_filters * 2**(num_iterations - i - 1) + net = slim.conv2d_transpose(net, num_filters, scope='deconv_%s' % i) + + # Crop down to desired size (e.g. 32x32 -> 28x28) + dif = net.shape.as_list()[1] - outdim + low = dif / 2 + high = net.shape.as_list()[1] - low + net = net[:, low:high, low:high, :] + + # No batch norm on generator output + net = slim.conv2d( + net, + output_shape[2], + kernel_size=[1, 1], + stride=1, + normalizer_fn=None, + activation_fn=tf.tanh, + scope='conv_out') + return net + + +def dcgan(target_images, latent_vars, hparams, scope='dcgan'): + """Creates the PixelDA model. + + Args: + target_images: A `Tensor` of shape [batch_size, height, width, 3] + sampled from the image domain to which we want to transfer. + latent_vars: dictionary of 'key': Tensor of shape [batch_size, N] + hparams: The hyperparameter map. + scope: Surround generator component with this scope + + Returns: + A dictionary of model outputs. + """ + proj_shape = [ + hparams.projection_shape_size, hparams.projection_shape_size, + hparams.projection_shape_channels + ] + source_volume = project_latent_vars( + hparams, proj_shape, latent_vars, combine_method='concat') + + ################################################### + # Transfer the source images to the target style. # + ################################################### + with tf.variable_scope(scope, 'generator', [target_images]): + transferred_images = dcgan_generator( + source_volume, + output_shape=target_images.shape.as_list()[1:4], + hparams=hparams) + assert transferred_images.shape.as_list() == target_images.shape.as_list() + + return {'transferred_images': transferred_images} + + +def resnet_generator(images, output_shape, hparams, latent_vars=None): + """Creates a ResNet-based generator. + + Args: + images: A `Tensor` of shape [batch_size, height, width, num_channels] + sampled from the image domain from which we want to transfer + output_shape: A length-3 array indicating the height, width and channels of + the output. + hparams: The hyperparameter map. + latent_vars: dictionary of 'key': Tensor of shape [batch_size, N] + + Returns: + A dictionary of model outputs. + """ + with tf.variable_scope('generator'): + if latent_vars: + noise_channel = project_latent_vars( + hparams, + proj_shape=images.shape.as_list()[1:3] + [1], + latent_vars=latent_vars, + combine_method='concat') + images = tf.concat([images, noise_channel], 3) + + transferred_images, end_points = resnet_stack( + images, + output_shape=output_shape, + hparams=hparams, + scope='resnet_stack') + end_points['transferred_images'] = transferred_images + + return end_points + + +def residual_interpretation_block(images, hparams, scope): + """Learns a residual image which is added to the incoming image. + + Args: + images: A `Tensor` of size [batch_size, height, width, 3] + hparams: The hyperparameters struct. + scope: The name of the variable op scope. + + Returns: + The updated images. + """ + with tf.variable_scope(scope): + with slim.arg_scope( + [slim.conv2d], + normalizer_fn=None, + kernel_size=[hparams.generator_kernel_size] * 2): + + net = images + for _ in range(hparams.res_int_convs): + net = slim.conv2d( + net, hparams.res_int_filters, activation_fn=tf.nn.relu) + net = slim.conv2d(net, 3, activation_fn=tf.nn.tanh) + + # Add the residual + images += net + + # Clip the output + images = tf.maximum(images, -1.0) + images = tf.minimum(images, 1.0) + return images + + +def residual_interpretation_generator(images, + is_training, + hparams, + latent_vars=None): + """Creates a generator producing purely residual transformations. + + A residual generator differs from the resnet generator in that each 'block' of + the residual generator produces a residual image. Consequently, the 'progress' + of the model generation process can be directly observed at inference time, + making it easier to diagnose and understand. + + Args: + images: A `Tensor` of shape [batch_size, height, width, num_channels] + sampled from the image domain from which we want to transfer. It is + assumed that the images are centered between -1 and 1. + is_training: whether or not the model is training. + hparams: The hyperparameter map. + latent_vars: dictionary of 'key': Tensor of shape [batch_size, N] + + Returns: + A dictionary of model outputs. + """ + end_points = {} + + with tf.variable_scope('generator'): + if latent_vars: + projected_latent = project_latent_vars( + hparams, + proj_shape=images.shape.as_list()[1:3] + [images.shape.as_list()[-1]], + latent_vars=latent_vars, + combine_method='sum') + images += projected_latent + with tf.variable_scope(None, 'residual_style_transfer', [images]): + for i in range(hparams.res_int_blocks): + images = residual_interpretation_block(images, hparams, + 'residual_%d' % i) + end_points['transferred_images_%d' % i] = images + + end_points['transferred_images'] = images + + return end_points + + +def simple_generator(source_images, target_images, is_training, hparams, + latent_vars): + """Simple generator architecture (stack of convs) for trying small models.""" + end_points = {} + with tf.variable_scope('generator'): + feed_source_images = source_images + + if latent_vars: + projected_latent = project_latent_vars( + hparams, + proj_shape=source_images.shape.as_list()[1:3] + [1], + latent_vars=latent_vars, + combine_method='concat') + feed_source_images = tf.concat([source_images, projected_latent], 3) + + end_points = {} + + ################################################### + # Transfer the source images to the target style. # + ################################################### + with slim.arg_scope( + [slim.conv2d], + normalizer_fn=slim.batch_norm, + stride=1, + kernel_size=[hparams.generator_kernel_size] * 2): + net = feed_source_images + + # N convolutions + for i in range(1, hparams.simple_num_conv_layers): + normalizer_fn = None + if i != 0: + normalizer_fn = slim.batch_norm + net = slim.conv2d( + net, + hparams.simple_conv_filters, + normalizer_fn=normalizer_fn, + activation_fn=tf.nn.relu) + + # Project back to right # image channels + net = slim.conv2d( + net, + target_images.shape.as_list()[-1], + kernel_size=[1, 1], + stride=1, + normalizer_fn=None, + activation_fn=tf.tanh, + scope='conv_out') + + transferred_images = net + assert transferred_images.shape.as_list() == target_images.shape.as_list() + end_points['transferred_images'] = transferred_images + + return end_points diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..747c17b18bf007d85e606015da6687a343bf74d2 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py @@ -0,0 +1,129 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains functions for preprocessing the inputs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + + +def preprocess_classification(image, labels, is_training=False): + """Preprocesses the image and labels for classification purposes. + + Preprocessing includes shifting the images to be 0-centered between -1 and 1. + This is not only a popular method of preprocessing (inception) but is also + the mechanism used by DSNs. + + Args: + image: A `Tensor` of size [height, width, 3]. + labels: A dictionary of labels. + is_training: Whether or not we're training the model. + + Returns: + The preprocessed image and labels. + """ + # If the image is uint8, this will scale it to 0-1. + image = tf.image.convert_image_dtype(image, tf.float32) + image -= 0.5 + image *= 2 + + return image, labels + + +def preprocess_style_transfer(image, + labels, + augment=False, + size=None, + is_training=False): + """Preprocesses the image and labels for style transfer purposes. + + Args: + image: A `Tensor` of size [height, width, 3]. + labels: A dictionary of labels. + augment: Whether to apply data augmentation to inputs + size: The height and width to which images should be resized. If left as + `None`, then no resizing is performed + is_training: Whether or not we're training the model + + Returns: + The preprocessed image and labels. Scaled to [-1, 1] + """ + # If the image is uint8, this will scale it to 0-1. + image = tf.image.convert_image_dtype(image, tf.float32) + if augment and is_training: + image = image_augmentation(image) + + if size: + image = resize_image(image, size) + + image -= 0.5 + image *= 2 + + return image, labels + + +def image_augmentation(image): + """Performs data augmentation by randomly permuting the inputs. + + Args: + image: A float `Tensor` of size [height, width, channels] with values + in range[0,1]. + + Returns: + The mutated batch of images + """ + # Apply photometric data augmentation (contrast etc.) + num_channels = image.shape_as_list()[-1] + if num_channels == 4: + # Only augment image part + image, depth = image[:, :, 0:3], image[:, :, 3:4] + elif num_channels == 1: + image = tf.image.grayscale_to_rgb(image) + image = tf.image.random_brightness(image, max_delta=0.1) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.032) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.clip_by_value(image, 0, 1.0) + if num_channels == 4: + image = tf.concat(2, [image, depth]) + elif num_channels == 1: + image = tf.image.rgb_to_grayscale(image) + return image + + +def resize_image(image, size=None): + """Resize image to target size. + + Args: + image: A `Tensor` of size [height, width, 3]. + size: (height, width) to resize image to. + + Returns: + resized image + """ + if size is None: + raise ValueError('Must specify size') + + if image.shape_as_list()[:2] == size: + # Don't resize if not necessary + return image + image = tf.expand_dims(image, 0) + image = tf.image.resize_images(image, size) + image = tf.squeeze(image, 0) + return image diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py new file mode 100644 index 0000000000000000000000000000000000000000..73f8c7ff05fc7d2614c419759a02f78ffbcdfec0 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py @@ -0,0 +1,69 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for domain_adaptation.pixel_domain_adaptation.pixelda_preprocess.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess + + +class PixelDAPreprocessTest(tf.test.TestCase): + + def assert_preprocess_classification_is_centered(self, dtype, is_training): + tf.set_random_seed(0) + + if dtype == tf.uint8: + image = tf.random_uniform((100, 200, 3), maxval=255, dtype=tf.int64) + image = tf.cast(image, tf.uint8) + else: + image = tf.random_uniform((100, 200, 3), maxval=1.0, dtype=dtype) + + labels = {} + image, labels = pixelda_preprocess.preprocess_classification( + image, labels, is_training=is_training) + + with self.test_session() as sess: + np_image = sess.run(image) + + self.assertTrue(np_image.min() <= -0.95) + self.assertTrue(np_image.min() >= -1.0) + self.assertTrue(np_image.max() >= 0.95) + self.assertTrue(np_image.max() <= 1.0) + + def testPreprocessClassificationZeroCentersUint8DuringTrain(self): + self.assert_preprocess_classification_is_centered( + tf.uint8, is_training=True) + + def testPreprocessClassificationZeroCentersUint8DuringTest(self): + self.assert_preprocess_classification_is_centered( + tf.uint8, is_training=False) + + def testPreprocessClassificationZeroCentersFloatDuringTrain(self): + self.assert_preprocess_classification_is_centered( + tf.float32, is_training=True) + + def testPreprocessClassificationZeroCentersFloatDuringTest(self): + self.assert_preprocess_classification_is_centered( + tf.float32, is_training=False) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb42e2d890a7759318cf0981640c0dd1645461e --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py @@ -0,0 +1,317 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Task towers for PixelDA model.""" +import tensorflow as tf + +slim = tf.contrib.slim + + +def add_task_specific_model(images, + hparams, + num_classes=10, + is_training=False, + reuse_private=False, + private_scope=None, + reuse_shared=False, + shared_scope=None): + """Create a classifier for the given images. + + The classifier is composed of a few 'private' layers followed by a few + 'shared' layers. This lets us account for different image 'style', while + sharing the last few layers as 'content' layers. + + Args: + images: A `Tensor` of size [batch_size, height, width, 3]. + hparams: model hparams + num_classes: The number of output classes. + is_training: whether model is training + reuse_private: Whether or not to reuse the private weights, which are the + first few layers in the classifier + private_scope: The name of the variable_scope for the private (unshared) + components of the classifier. + reuse_shared: Whether or not to reuse the shared weights, which are the last + few layers in the classifier + shared_scope: The name of the variable_scope for the shared components of + the classifier. + + Returns: + The logits, a `Tensor` of shape [batch_size, num_classes]. + + Raises: + ValueError: If hparams.task_classifier is an unknown value + """ + + model = hparams.task_tower + # Make sure the classifier name shows up in graph + shared_scope = shared_scope or (model + '_shared') + kwargs = { + 'num_classes': num_classes, + 'is_training': is_training, + 'reuse_private': reuse_private, + 'reuse_shared': reuse_shared, + } + + if private_scope: + kwargs['private_scope'] = private_scope + if shared_scope: + kwargs['shared_scope'] = shared_scope + + quaternion_pred = None + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + activation_fn=tf.nn.relu, + weights_regularizer=tf.contrib.layers.l2_regularizer( + hparams.weight_decay_task_classifier)): + with slim.arg_scope([slim.conv2d], padding='SAME'): + if model == 'doubling_pose_estimator': + logits, quaternion_pred = doubling_cnn_class_and_quaternion( + images, num_private_layers=hparams.num_private_layers, **kwargs) + elif model == 'mnist': + logits, _ = mnist_classifier(images, **kwargs) + elif model == 'svhn': + logits, _ = svhn_classifier(images, **kwargs) + elif model == 'gtsrb': + logits, _ = gtsrb_classifier(images, **kwargs) + elif model == 'pose_mini': + logits, quaternion_pred = pose_mini_tower(images, **kwargs) + else: + raise ValueError('Unknown task classifier %s' % model) + + return logits, quaternion_pred + + +##################################### +# Classifiers used in the DSN paper # +##################################### + + +def mnist_classifier(images, + is_training=False, + num_classes=10, + reuse_private=False, + private_scope='mnist', + reuse_shared=False, + shared_scope='task_model'): + """Creates the convolutional MNIST model from the gradient reversal paper. + + Note that since the output is a set of 'logits', the values fall in the + interval of (-infinity, infinity). Consequently, to convert the outputs to a + probability distribution over the characters, one will need to convert them + using the softmax function: + logits, endpoints = conv_mnist(images, is_training=False) + predictions = tf.nn.softmax(logits) + + Args: + images: the MNIST digits, a tensor of size [batch_size, 28, 28, 1]. + is_training: specifies whether or not we're currently training the model. + This variable will determine the behaviour of the dropout layer. + num_classes: the number of output classes to use. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + + net = {} + + with tf.variable_scope(private_scope, reuse=reuse_private): + net['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') + net['pool1'] = slim.max_pool2d(net['conv1'], [2, 2], 2, scope='pool1') + + with tf.variable_scope(shared_scope, reuse=reuse_shared): + net['conv2'] = slim.conv2d(net['pool1'], 48, [5, 5], scope='conv2') + net['pool2'] = slim.max_pool2d(net['conv2'], [2, 2], 2, scope='pool2') + net['fc3'] = slim.fully_connected( + slim.flatten(net['pool2']), 100, scope='fc3') + net['fc4'] = slim.fully_connected( + slim.flatten(net['fc3']), 100, scope='fc4') + logits = slim.fully_connected( + net['fc4'], num_classes, activation_fn=None, scope='fc5') + return logits, net + + +def svhn_classifier(images, + is_training=False, + num_classes=10, + reuse_private=False, + private_scope=None, + reuse_shared=False, + shared_scope='task_model'): + """Creates the convolutional SVHN model from the gradient reversal paper. + + Note that since the output is a set of 'logits', the values fall in the + interval of (-infinity, infinity). Consequently, to convert the outputs to a + probability distribution over the characters, one will need to convert them + using the softmax function: + logits = mnist.Mnist(images, is_training=False) + predictions = tf.nn.softmax(logits) + + Args: + images: the SVHN digits, a tensor of size [batch_size, 40, 40, 3]. + is_training: specifies whether or not we're currently training the model. + This variable will determine the behaviour of the dropout layer. + num_classes: the number of output classes to use. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + + net = {} + + with tf.variable_scope(private_scope, reuse=reuse_private): + net['conv1'] = slim.conv2d(images, 64, [5, 5], scope='conv1') + net['pool1'] = slim.max_pool2d(net['conv1'], [3, 3], 2, scope='pool1') + + with tf.variable_scope(shared_scope, reuse=reuse_shared): + net['conv2'] = slim.conv2d(net['pool1'], 64, [5, 5], scope='conv2') + net['pool2'] = slim.max_pool2d(net['conv2'], [3, 3], 2, scope='pool2') + net['conv3'] = slim.conv2d(net['pool2'], 128, [5, 5], scope='conv3') + + net['fc3'] = slim.fully_connected( + slim.flatten(net['conv3']), 3072, scope='fc3') + net['fc4'] = slim.fully_connected( + slim.flatten(net['fc3']), 2048, scope='fc4') + + logits = slim.fully_connected( + net['fc4'], num_classes, activation_fn=None, scope='fc5') + + return logits, net + + +def gtsrb_classifier(images, + is_training=False, + num_classes=43, + reuse_private=False, + private_scope='gtsrb', + reuse_shared=False, + shared_scope='task_model'): + """Creates the convolutional GTSRB model from the gradient reversal paper. + + Note that since the output is a set of 'logits', the values fall in the + interval of (-infinity, infinity). Consequently, to convert the outputs to a + probability distribution over the characters, one will need to convert them + using the softmax function: + logits = mnist.Mnist(images, is_training=False) + predictions = tf.nn.softmax(logits) + + Args: + images: the SVHN digits, a tensor of size [batch_size, 40, 40, 3]. + is_training: specifies whether or not we're currently training the model. + This variable will determine the behaviour of the dropout layer. + num_classes: the number of output classes to use. + reuse_private: Whether or not to reuse the private components of the model. + private_scope: The name of the private scope. + reuse_shared: Whether or not to reuse the shared components of the model. + shared_scope: The name of the shared scope. + + Returns: + the output logits, a tensor of size [batch_size, num_classes]. + a dictionary with key/values the layer names and tensors. + """ + + net = {} + + with tf.variable_scope(private_scope, reuse=reuse_private): + net['conv1'] = slim.conv2d(images, 96, [5, 5], scope='conv1') + net['pool1'] = slim.max_pool2d(net['conv1'], [2, 2], 2, scope='pool1') + with tf.variable_scope(shared_scope, reuse=reuse_shared): + net['conv2'] = slim.conv2d(net['pool1'], 144, [3, 3], scope='conv2') + net['pool2'] = slim.max_pool2d(net['conv2'], [2, 2], 2, scope='pool2') + net['conv3'] = slim.conv2d(net['pool2'], 256, [5, 5], scope='conv3') + net['pool3'] = slim.max_pool2d(net['conv3'], [2, 2], 2, scope='pool3') + + net['fc3'] = slim.fully_connected( + slim.flatten(net['pool3']), 512, scope='fc3') + logits = slim.fully_connected( + net['fc3'], num_classes, activation_fn=None, scope='fc4') + + return logits, net + + +######################### +# pose_mini task towers # +######################### + + +def pose_mini_tower(images, + num_classes=11, + is_training=False, + reuse_private=False, + private_scope='pose_mini', + reuse_shared=False, + shared_scope='task_model'): + """Task tower for the pose_mini dataset.""" + + with tf.variable_scope(private_scope, reuse=reuse_private): + net = slim.conv2d(images, 32, [5, 5], scope='conv1') + net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool1') + with tf.variable_scope(shared_scope, reuse=reuse_shared): + net = slim.conv2d(net, 64, [5, 5], scope='conv2') + net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool2') + net = slim.flatten(net) + + net = slim.fully_connected(net, 128, scope='fc3') + net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout') + with tf.variable_scope('quaternion_prediction'): + quaternion_pred = slim.fully_connected( + net, 4, activation_fn=tf.tanh, scope='fc_q') + quaternion_pred = tf.nn.l2_normalize(quaternion_pred, 1) + + logits = slim.fully_connected( + net, num_classes, activation_fn=None, scope='fc4') + + return logits, quaternion_pred + + +def doubling_cnn_class_and_quaternion(images, + num_private_layers=1, + num_classes=10, + is_training=False, + reuse_private=False, + private_scope='doubling_cnn', + reuse_shared=False, + shared_scope='task_model'): + """Alternate conv, pool while doubling filter count.""" + net = images + depth = 32 + layer_id = 1 + + with tf.variable_scope(private_scope, reuse=reuse_private): + while num_private_layers > 0 and net.shape.as_list()[1] > 5: + net = slim.conv2d(net, depth, [3, 3], scope='conv%s' % layer_id) + net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool%s' % layer_id) + depth *= 2 + layer_id += 1 + num_private_layers -= 1 + + with tf.variable_scope(shared_scope, reuse=reuse_shared): + while net.shape.as_list()[1] > 5: + net = slim.conv2d(net, depth, [3, 3], scope='conv%s' % layer_id) + net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool%s' % layer_id) + depth *= 2 + layer_id += 1 + + net = slim.flatten(net) + net = slim.fully_connected(net, 100, scope='fc1') + net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout') + quaternion_pred = slim.fully_connected( + net, 4, activation_fn=tf.tanh, scope='fc_q') + quaternion_pred = tf.nn.l2_normalize(quaternion_pred, 1) + + logits = slim.fully_connected( + net, num_classes, activation_fn=None, scope='fc_logits') + + return logits, quaternion_pred diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca072cceafa48769623381b8e564fe650f2a514 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py @@ -0,0 +1,409 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Trains the PixelDA model.""" + +from functools import partial +import os + +# Dependency imports + +import tensorflow as tf + +from domain_adaptation.datasets import dataset_factory +from domain_adaptation.pixel_domain_adaptation import pixelda_losses +from domain_adaptation.pixel_domain_adaptation import pixelda_model +from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess +from domain_adaptation.pixel_domain_adaptation import pixelda_utils +from domain_adaptation.pixel_domain_adaptation.hparams import create_hparams + +slim = tf.contrib.slim + +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.') + +flags.DEFINE_integer( + 'ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then the parameters ' + 'are handled locally by the worker.') + +flags.DEFINE_integer( + 'task', 0, + 'The Task ID. This value is used when training with multiple workers to ' + 'identify each worker.') + +flags.DEFINE_string('train_log_dir', '/tmp/pixelda/', + 'Directory where to write event logs.') + +flags.DEFINE_integer( + 'save_summaries_steps', 500, + 'The frequency with which summaries are saved, in seconds.') + +flags.DEFINE_integer('save_interval_secs', 300, + 'The frequency with which the model is saved, in seconds.') + +flags.DEFINE_boolean('summarize_gradients', False, + 'Whether to summarize model gradients') + +flags.DEFINE_integer( + 'print_loss_steps', 100, + 'The frequency with which the losses are printed, in steps.') + +flags.DEFINE_string('source_dataset', 'mnist', 'The name of the source dataset.' + ' If hparams="arch=dcgan", this flag is ignored.') + +flags.DEFINE_string('target_dataset', 'mnist_m', + 'The name of the target dataset.') + +flags.DEFINE_string('source_split_name', 'train', + 'Name of the train split for the source.') + +flags.DEFINE_string('target_split_name', 'train', + 'Name of the train split for the target.') + +flags.DEFINE_string('dataset_dir', '', + 'The directory where the datasets can be found.') + +flags.DEFINE_integer( + 'num_readers', 4, + 'The number of parallel readers that read data from the dataset.') + +flags.DEFINE_integer('num_preprocessing_threads', 4, + 'The number of threads used to create the batches.') + +# HParams + +flags.DEFINE_string('hparams', '', 'Comma separated hyperparameter values') + + +def _get_vars_and_update_ops(hparams, scope): + """Returns the variables and update ops for a particular variable scope. + + Args: + hparams: The hyperparameters struct. + scope: The variable scope. + + Returns: + A tuple consisting of trainable variables and update ops. + """ + is_trainable = lambda x: x in tf.trainable_variables() + var_list = filter(is_trainable, slim.get_model_variables(scope)) + global_step = slim.get_or_create_global_step() + + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope) + + tf.logging.info('All variables for scope: %s', + slim.get_model_variables(scope)) + tf.logging.info('Trainable variables for scope: %s', var_list) + + return var_list, update_ops + + +def _train(discriminator_train_op, + generator_train_op, + logdir, + master='', + is_chief=True, + scaffold=None, + hooks=None, + chief_only_hooks=None, + save_checkpoint_secs=600, + save_summaries_steps=100, + hparams=None): + """Runs the training loop. + + Args: + discriminator_train_op: A `Tensor` that, when executed, will apply the + gradients and return the loss value for the discriminator. + generator_train_op: A `Tensor` that, when executed, will apply the + gradients and return the loss value for the generator. + logdir: The directory where the graph and checkpoints are saved. + master: The URL of the master. + is_chief: Specifies whether or not the training is being run by the primary + replica during replica training. + scaffold: An tf.train.Scaffold instance. + hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the + training loop. + chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run + inside the training loop for the chief trainer only. + save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved + using a default checkpoint saver. If `save_checkpoint_secs` is set to + `None`, then the default checkpoint saver isn't used. + save_summaries_steps: The frequency, in number of global steps, that the + summaries are written to disk using a default summary saver. If + `save_summaries_steps` is set to `None`, then the default summary saver + isn't used. + hparams: The hparams struct. + + Returns: + the value of the loss function after training. + + Raises: + ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or + `save_summaries_steps` are `None. + """ + global_step = slim.get_or_create_global_step() + + scaffold = scaffold or tf.train.Scaffold() + + hooks = hooks or [] + + if is_chief: + session_creator = tf.train.ChiefSessionCreator( + scaffold=scaffold, checkpoint_dir=logdir, master=master) + + if chief_only_hooks: + hooks.extend(chief_only_hooks) + hooks.append(tf.train.StepCounterHook(output_dir=logdir)) + + if save_summaries_steps: + if logdir is None: + raise ValueError( + 'logdir cannot be None when save_summaries_steps is None') + hooks.append( + tf.train.SummarySaverHook( + scaffold=scaffold, + save_steps=save_summaries_steps, + output_dir=logdir)) + + if save_checkpoint_secs: + if logdir is None: + raise ValueError( + 'logdir cannot be None when save_checkpoint_secs is None') + hooks.append( + tf.train.CheckpointSaverHook( + logdir, save_secs=save_checkpoint_secs, scaffold=scaffold)) + else: + session_creator = tf.train.WorkerSessionCreator( + scaffold=scaffold, master=master) + + with tf.train.MonitoredSession( + session_creator=session_creator, hooks=hooks) as session: + loss = None + while not session.should_stop(): + # Run the domain classifier op X times. + for _ in range(hparams.discriminator_steps): + if session.should_stop(): + return loss + loss, np_global_step = session.run( + [discriminator_train_op, global_step]) + if np_global_step % FLAGS.print_loss_steps == 0: + tf.logging.info('Step %d: Discriminator Loss = %.2f', np_global_step, + loss) + + # Run the generator op X times. + for _ in range(hparams.generator_steps): + if session.should_stop(): + return loss + loss, np_global_step = session.run([generator_train_op, global_step]) + if np_global_step % FLAGS.print_loss_steps == 0: + tf.logging.info('Step %d: Generator Loss = %.2f', np_global_step, + loss) + return loss + + +def run_training(run_dir, checkpoint_dir, hparams): + """Runs the training loop. + + Args: + run_dir: The directory where training specific logs are placed + checkpoint_dir: The directory where the checkpoints and log files are + stored. + hparams: The hyperparameters struct. + + Raises: + ValueError: if hparams.arch is not recognized. + """ + for path in [run_dir, checkpoint_dir]: + if not tf.gfile.Exists(path): + tf.gfile.MakeDirs(path) + + # Serialize hparams to log dir + hparams_filename = os.path.join(checkpoint_dir, 'hparams.json') + with tf.gfile.FastGFile(hparams_filename, 'w') as f: + f.write(hparams.to_json()) + + with tf.Graph().as_default(): + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): + global_step = slim.get_or_create_global_step() + + ######################### + # Preprocess the inputs # + ######################### + target_dataset = dataset_factory.get_dataset( + FLAGS.target_dataset, + split_name='train', + dataset_dir=FLAGS.dataset_dir) + target_images, _ = dataset_factory.provide_batch( + FLAGS.target_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, + hparams.batch_size, FLAGS.num_preprocessing_threads) + num_target_classes = target_dataset.num_classes + + if hparams.arch not in ['dcgan']: + source_dataset = dataset_factory.get_dataset( + FLAGS.source_dataset, + split_name='train', + dataset_dir=FLAGS.dataset_dir) + num_source_classes = source_dataset.num_classes + source_images, source_labels = dataset_factory.provide_batch( + FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, + hparams.batch_size, FLAGS.num_preprocessing_threads) + # Data provider provides 1 hot labels, but we expect categorical. + source_labels['class'] = tf.argmax(source_labels['classes'], 1) + del source_labels['classes'] + if num_source_classes != num_target_classes: + raise ValueError( + 'Source and Target datasets must have same number of classes. ' + 'Are %d and %d' % (num_source_classes, num_target_classes)) + else: + source_images = None + source_labels = None + + #################### + # Define the model # + #################### + end_points = pixelda_model.create_model( + hparams, + target_images, + source_images=source_images, + source_labels=source_labels, + is_training=True, + num_classes=num_target_classes) + + ################################# + # Get the variables to optimize # + ################################# + generator_vars, generator_update_ops = _get_vars_and_update_ops( + hparams, 'generator') + discriminator_vars, discriminator_update_ops = _get_vars_and_update_ops( + hparams, 'discriminator') + + ######################## + # Configure the losses # + ######################## + generator_loss = pixelda_losses.g_step_loss( + source_images, + source_labels, + end_points, + hparams, + num_classes=num_target_classes) + discriminator_loss = pixelda_losses.d_step_loss( + end_points, source_labels, num_target_classes, hparams) + + ########################### + # Create the training ops # + ########################### + learning_rate = hparams.learning_rate + if hparams.lr_decay_steps: + learning_rate = tf.train.exponential_decay( + learning_rate, + slim.get_or_create_global_step(), + decay_steps=hparams.lr_decay_steps, + decay_rate=hparams.lr_decay_rate, + staircase=True) + tf.summary.scalar('Learning_rate', learning_rate) + + + if hparams.discriminator_steps == 0: + discriminator_train_op = tf.no_op() + else: + discriminator_optimizer = tf.train.AdamOptimizer( + learning_rate, beta1=hparams.adam_beta1) + + discriminator_train_op = slim.learning.create_train_op( + discriminator_loss, + discriminator_optimizer, + update_ops=discriminator_update_ops, + variables_to_train=discriminator_vars, + clip_gradient_norm=hparams.clip_gradient_norm, + summarize_gradients=FLAGS.summarize_gradients) + + if hparams.generator_steps == 0: + generator_train_op = tf.no_op() + else: + generator_optimizer = tf.train.AdamOptimizer( + learning_rate, beta1=hparams.adam_beta1) + generator_train_op = slim.learning.create_train_op( + generator_loss, + generator_optimizer, + update_ops=generator_update_ops, + variables_to_train=generator_vars, + clip_gradient_norm=hparams.clip_gradient_norm, + summarize_gradients=FLAGS.summarize_gradients) + + ############# + # Summaries # + ############# + pixelda_utils.summarize_model(end_points) + pixelda_utils.summarize_transferred_grid( + end_points['transferred_images'], source_images, name='Transferred') + if 'source_images_recon' in end_points: + pixelda_utils.summarize_transferred_grid( + end_points['source_images_recon'], + source_images, + name='Source Reconstruction') + pixelda_utils.summaries_color_distributions(end_points['transferred_images'], + 'Transferred') + pixelda_utils.summaries_color_distributions(target_images, 'Target') + + if source_images is not None: + pixelda_utils.summarize_transferred(source_images, + end_points['transferred_images']) + pixelda_utils.summaries_color_distributions(source_images, 'Source') + pixelda_utils.summaries_color_distributions( + tf.abs(source_images - end_points['transferred_images']), + 'Abs(Source_minus_Transferred)') + + number_of_steps = None + if hparams.num_training_examples: + # Want to control by amount of data seen, not # steps + number_of_steps = hparams.num_training_examples / hparams.batch_size + + hooks = [tf.train.StepCounterHook(),] + + chief_only_hooks = [ + tf.train.CheckpointSaverHook( + saver=tf.train.Saver(), + checkpoint_dir=run_dir, + save_secs=FLAGS.save_interval_secs) + ] + + if number_of_steps: + hooks.append(tf.train.StopAtStepHook(last_step=number_of_steps)) + + _train( + discriminator_train_op, + generator_train_op, + logdir=run_dir, + master=FLAGS.master, + is_chief=FLAGS.task == 0, + hooks=hooks, + chief_only_hooks=chief_only_hooks, + save_checkpoint_secs=None, + save_summaries_steps=FLAGS.save_summaries_steps, + hparams=hparams) + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + hparams = create_hparams(FLAGS.hparams) + run_training( + run_dir=FLAGS.train_log_dir, + checkpoint_dir=FLAGS.train_log_dir, + hparams=hparams) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..28e8006f267f9bf7f13c3dff78625cc4cbd00185 --- /dev/null +++ b/models/research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py @@ -0,0 +1,195 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for PixelDA model.""" +import math + +# Dependency imports + +import tensorflow as tf + +slim = tf.contrib.slim + +flags = tf.app.flags +FLAGS = flags.FLAGS + + +def remove_depth(images): + """Takes a batch of images and remove depth channel if present.""" + if images.shape.as_list()[-1] == 4: + return images[:, :, :, 0:3] + return images + + +def image_grid(images, max_grid_size=4): + """Given images and N, return first N^2 images as an NxN image grid. + + Args: + images: a `Tensor` of size [batch_size, height, width, channels] + max_grid_size: Maximum image grid height/width + + Returns: + Single image batch, of dim [1, h*n, w*n, c] + """ + images = remove_depth(images) + batch_size = images.shape.as_list()[0] + grid_size = min(int(math.sqrt(batch_size)), max_grid_size) + assert images.shape.as_list()[0] >= grid_size * grid_size + + # If we have a depth channel + if images.shape.as_list()[-1] == 4: + images = images[:grid_size * grid_size, :, :, 0:3] + depth = tf.image.grayscale_to_rgb(images[:grid_size * grid_size, :, :, 3:4]) + + images = tf.reshape(images, [-1, images.shape.as_list()[2], 3]) + split = tf.split(0, grid_size, images) + depth = tf.reshape(depth, [-1, images.shape.as_list()[2], 3]) + depth_split = tf.split(0, grid_size, depth) + grid = tf.concat(split + depth_split, 1) + return tf.expand_dims(grid, 0) + else: + images = images[:grid_size * grid_size, :, :, :] + images = tf.reshape( + images, [-1, images.shape.as_list()[2], + images.shape.as_list()[3]]) + split = tf.split(images, grid_size, 0) + grid = tf.concat(split, 1) + return tf.expand_dims(grid, 0) + + +def source_and_output_image_grid(output_images, + source_images=None, + max_grid_size=4): + """Create NxN image grid for output, concatenate source grid if given. + + Makes grid out of output_images and, if provided, source_images, and + concatenates them. + + Args: + output_images: [batch_size, h, w, c] tensor of images + source_images: optional[batch_size, h, w, c] tensor of images + max_grid_size: Image grid height/width + + Returns: + Single image batch, of dim [1, h*n, w*n, c] + + + """ + output_grid = image_grid(output_images, max_grid_size=max_grid_size) + if source_images is not None: + source_grid = image_grid(source_images, max_grid_size=max_grid_size) + # Make sure they have the same # of channels before concat + # Assumes either 1 or 3 channels + if output_grid.shape.as_list()[-1] != source_grid.shape.as_list()[-1]: + if output_grid.shape.as_list()[-1] == 1: + output_grid = tf.tile(output_grid, [1, 1, 1, 3]) + if source_grid.shape.as_list()[-1] == 1: + source_grid = tf.tile(source_grid, [1, 1, 1, 3]) + output_grid = tf.concat([output_grid, source_grid], 1) + return output_grid + + +def summarize_model(end_points): + """Summarizes the given model via its end_points. + + Args: + end_points: A dictionary of end_point names to `Tensor`. + """ + tf.summary.histogram('domain_logits_transferred', + tf.sigmoid(end_points['transferred_domain_logits'])) + + tf.summary.histogram('domain_logits_target', + tf.sigmoid(end_points['target_domain_logits'])) + + +def summarize_transferred_grid(transferred_images, + source_images=None, + name='Transferred'): + """Produces a visual grid summarization of the image transferrence. + + Args: + transferred_images: A `Tensor` of size [batch_size, height, width, c]. + source_images: A `Tensor` of size [batch_size, height, width, c]. + name: Name to use in summary name + """ + if source_images is not None: + grid = source_and_output_image_grid(transferred_images, source_images) + else: + grid = image_grid(transferred_images) + tf.summary.image('%s_Images_Grid' % name, grid, max_outputs=1) + + +def summarize_transferred(source_images, + transferred_images, + max_images=20, + name='Transferred'): + """Produces a visual summary of the image transferrence. + + This summary displays the source image, transferred image, and a grayscale + difference image which highlights the differences between input and output. + + Args: + source_images: A `Tensor` of size [batch_size, height, width, channels]. + transferred_images: A `Tensor` of size [batch_size, height, width, channels] + max_images: The number of images to show. + name: Name to use in summary name + + Raises: + ValueError: If number of channels in source and target are incompatible + """ + source_channels = source_images.shape.as_list()[-1] + transferred_channels = transferred_images.shape.as_list()[-1] + if source_channels < transferred_channels: + if source_channels != 1: + raise ValueError( + 'Source must be 1 channel or same # of channels as target') + source_images = tf.tile(source_images, [1, 1, 1, transferred_channels]) + if transferred_channels < source_channels: + if transferred_channels != 1: + raise ValueError( + 'Target must be 1 channel or same # of channels as source') + transferred_images = tf.tile(transferred_images, [1, 1, 1, source_channels]) + diffs = tf.abs(source_images - transferred_images) + diffs = tf.reduce_max(diffs, reduction_indices=[3], keep_dims=True) + diffs = tf.tile(diffs, [1, 1, 1, max(source_channels, transferred_channels)]) + + transition_images = tf.concat([ + source_images, + transferred_images, + diffs, + ], 2) + + tf.summary.image( + '%s_difference' % name, transition_images, max_outputs=max_images) + + +def summaries_color_distributions(images, name): + """Produces a histogram of the color distributions of the images. + + Args: + images: A `Tensor` of size [batch_size, height, width, 3]. + name: The name of the images being summarized. + """ + tf.summary.histogram('color_values/%s' % name, images) + + +def summarize_images(images, name): + """Produces a visual summary of the given images. + + Args: + images: A `Tensor` of size [batch_size, height, width, 3]. + name: The name of the images being summarized. + """ + grid = image_grid(images) + tf.summary.image('%s_Images' % name, grid, max_outputs=1) diff --git a/models/research/efficient-hrl/README.md b/models/research/efficient-hrl/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6c454c687a3b75e9cf68d1f3737d74b464167e14 --- /dev/null +++ b/models/research/efficient-hrl/README.md @@ -0,0 +1,65 @@ +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +Code for performing Hierarchical RL based on the following publications: + +"Data-Efficient Hierarchical Reinforcement Learning" by +Ofir Nachum, Shixiang (Shane) Gu, Honglak Lee, and Sergey Levine +(https://arxiv.org/abs/1805.08296). + +"Near-Optimal Representation Learning for Hierarchical Reinforcement Learning" +by Ofir Nachum, Shixiang (Shane) Gu, Honglak Lee, and Sergey Levine +(https://arxiv.org/abs/1810.01257). + + +Requirements: +* TensorFlow (see http://www.tensorflow.org for how to install/upgrade) +* Gin Config (see https://github.com/google/gin-config) +* Tensorflow Agents (see https://github.com/tensorflow/agents) +* OpenAI Gym (see http://gym.openai.com/docs, be sure to install MuJoCo as well) +* NumPy (see http://www.numpy.org/) + + +Quick Start: + +Run a training job based on the original HIRO paper on Ant Maze: + +``` +python scripts/local_train.py test1 hiro_orig ant_maze base_uvf suite +``` + +Run a continuous evaluation job for that experiment: + +``` +python scripts/local_eval.py test1 hiro_orig ant_maze base_uvf suite +``` + +To run the same experiment with online representation learning (the +"Near-Optimal" paper), change `hiro_orig` to `hiro_repr`. +You can also run with `hiro_xy` to run the same experiment with HIRO on only the +xy coordinates of the agent. + +To run on other environments, change `ant_maze` to something else; e.g., +`ant_push_multi`, `ant_fall_multi`, etc. See `context/configs/*` for other options. + + +Basic Code Guide: + +The code for training resides in train.py. The code trains a lower-level policy +(a UVF agent in the code) and a higher-level policy (a MetaAgent in the code) +concurrently. The higher-level policy communicates goals to the lower-level +policy. In the code, this is called a context. Not only does the lower-level +policy act with respect to a context (a higher-level specified goal), but the +higher-level policy also acts with respect to an environment-specified context +(corresponding to the navigation target location associated with the task). +Therefore, in `context/configs/*` you will find both specifications for task setup +as well as goal configurations. Most remaining hyperparameters used for +training/evaluation may be found in `configs/*`. + +NOTE: Not all the code corresponding to the "Near-Optimal" paper is included. +Namely, changes to low-level policy training proposed in the paper (discounting +and auxiliary rewards) are not implemented here. Performance should not change +significantly. + + +Maintained by Ofir Nachum (ofirnachum). diff --git a/models/research/efficient-hrl/agent.py b/models/research/efficient-hrl/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..0028ddffa0d37a0e80d2c990e6263a3d9b4ab948 --- /dev/null +++ b/models/research/efficient-hrl/agent.py @@ -0,0 +1,774 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A UVF agent. +""" + +import tensorflow as tf +import gin.tf +from agents import ddpg_agent +# pylint: disable=unused-import +import cond_fn +from utils import utils as uvf_utils +from context import gin_imports +# pylint: enable=unused-import +slim = tf.contrib.slim + + +@gin.configurable +class UvfAgentCore(object): + """Defines basic functions for UVF agent. Must be inherited with an RL agent. + + Used as lower-level agent. + """ + + def __init__(self, + observation_spec, + action_spec, + tf_env, + tf_context, + step_cond_fn=cond_fn.env_transition, + reset_episode_cond_fn=cond_fn.env_restart, + reset_env_cond_fn=cond_fn.false_fn, + metrics=None, + **base_agent_kwargs): + """Constructs a UVF agent. + + Args: + observation_spec: A TensorSpec defining the observations. + action_spec: A BoundedTensorSpec defining the actions. + tf_env: A Tensorflow environment object. + tf_context: A Context class. + step_cond_fn: A function indicating whether to increment the num of steps. + reset_episode_cond_fn: A function indicating whether to restart the + episode, resampling the context. + reset_env_cond_fn: A function indicating whether to perform a manual reset + of the environment. + metrics: A list of functions that evaluate metrics of the agent. + **base_agent_kwargs: A dictionary of parameters for base RL Agent. + Raises: + ValueError: If 'dqda_clipping' is < 0. + """ + self._step_cond_fn = step_cond_fn + self._reset_episode_cond_fn = reset_episode_cond_fn + self._reset_env_cond_fn = reset_env_cond_fn + self.metrics = metrics + + # expose tf_context methods + self.tf_context = tf_context(tf_env=tf_env) + self.set_replay = self.tf_context.set_replay + self.sample_contexts = self.tf_context.sample_contexts + self.compute_rewards = self.tf_context.compute_rewards + self.gamma_index = self.tf_context.gamma_index + self.context_specs = self.tf_context.context_specs + self.context_as_action_specs = self.tf_context.context_as_action_specs + self.init_context_vars = self.tf_context.create_vars + + self.env_observation_spec = observation_spec[0] + merged_observation_spec = (uvf_utils.merge_specs( + (self.env_observation_spec,) + self.context_specs),) + self._context_vars = dict() + self._action_vars = dict() + + self.BASE_AGENT_CLASS.__init__( + self, + observation_spec=merged_observation_spec, + action_spec=action_spec, + **base_agent_kwargs + ) + + def set_meta_agent(self, agent=None): + self._meta_agent = agent + + @property + def meta_agent(self): + return self._meta_agent + + def actor_loss(self, states, actions, rewards, discounts, + next_states): + """Returns the next action for the state. + + Args: + state: A [num_state_dims] tensor representing a state. + context: A list of [num_context_dims] tensor representing a context. + Returns: + A [num_action_dims] tensor representing the action. + """ + return self.BASE_AGENT_CLASS.actor_loss(self, states) + + def action(self, state, context=None): + """Returns the next action for the state. + + Args: + state: A [num_state_dims] tensor representing a state. + context: A list of [num_context_dims] tensor representing a context. + Returns: + A [num_action_dims] tensor representing the action. + """ + merged_state = self.merged_state(state, context) + return self.BASE_AGENT_CLASS.action(self, merged_state) + + def actions(self, state, context=None): + """Returns the next action for the state. + + Args: + state: A [-1, num_state_dims] tensor representing a state. + context: A list of [-1, num_context_dims] tensor representing a context. + Returns: + A [-1, num_action_dims] tensor representing the action. + """ + merged_states = self.merged_states(state, context) + return self.BASE_AGENT_CLASS.actor_net(self, merged_states) + + def log_probs(self, states, actions, state_reprs, contexts=None): + assert contexts is not None + batch_dims = [tf.shape(states)[0], tf.shape(states)[1]] + contexts = self.tf_context.context_multi_transition_fn( + contexts, states=tf.to_float(state_reprs)) + + flat_states = tf.reshape(states, + [batch_dims[0] * batch_dims[1], states.shape[-1]]) + flat_contexts = [tf.reshape(tf.cast(context, states.dtype), + [batch_dims[0] * batch_dims[1], context.shape[-1]]) + for context in contexts] + flat_pred_actions = self.actions(flat_states, flat_contexts) + pred_actions = tf.reshape(flat_pred_actions, + batch_dims + [flat_pred_actions.shape[-1]]) + + error = tf.square(actions - pred_actions) + spec_range = (self._action_spec.maximum - self._action_spec.minimum) / 2 + normalized_error = tf.cast(error, tf.float64) / tf.constant(spec_range) ** 2 + return -normalized_error + + @gin.configurable('uvf_add_noise_fn') + def add_noise_fn(self, action_fn, stddev=1.0, debug=False, + clip=True, global_step=None): + """Returns the action_fn with additive Gaussian noise. + + Args: + action_fn: A callable(`state`, `context`) which returns a + [num_action_dims] tensor representing a action. + stddev: stddev for the Ornstein-Uhlenbeck noise. + debug: Print debug messages. + Returns: + A [num_action_dims] action tensor. + """ + if global_step is not None: + stddev *= tf.maximum( # Decay exploration during training. + tf.train.exponential_decay(1.0, global_step, 1e6, 0.8), 0.5) + def noisy_action_fn(state, context=None): + """Noisy action fn.""" + action = action_fn(state, context) + if debug: + action = uvf_utils.tf_print( + action, [action], + message='[add_noise_fn] pre-noise action', + first_n=100) + noise_dist = tf.distributions.Normal(tf.zeros_like(action), + tf.ones_like(action) * stddev) + noise = noise_dist.sample() + action += noise + if debug: + action = uvf_utils.tf_print( + action, [action], + message='[add_noise_fn] post-noise action', + first_n=100) + if clip: + action = uvf_utils.clip_to_spec(action, self._action_spec) + return action + return noisy_action_fn + + def merged_state(self, state, context=None): + """Returns the merged state from the environment state and contexts. + + Args: + state: A [num_state_dims] tensor representing a state. + context: A list of [num_context_dims] tensor representing a context. + If None, use the internal context. + Returns: + A [num_merged_state_dims] tensor representing the merged state. + """ + if context is None: + context = list(self.context_vars) + state = tf.concat([state,] + context, axis=-1) + self._validate_states(self._batch_state(state)) + return state + + def merged_states(self, states, contexts=None): + """Returns the batch merged state from the batch env state and contexts. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + contexts: A list of [batch_size, num_context_dims] tensor + representing a batch of contexts. If None, + use the internal context. + Returns: + A [batch_size, num_merged_state_dims] tensor representing the batch + of merged states. + """ + if contexts is None: + contexts = [tf.tile(tf.expand_dims(context, axis=0), + (tf.shape(states)[0], 1)) for + context in self.context_vars] + states = tf.concat([states,] + contexts, axis=-1) + self._validate_states(states) + return states + + def unmerged_states(self, merged_states): + """Returns the batch state and contexts from the batch merged state. + + Args: + merged_states: A [batch_size, num_merged_state_dims] tensor + representing a batch of merged states. + Returns: + A [batch_size, num_state_dims] tensor and a list of + [batch_size, num_context_dims] tensors representing the batch state + and contexts respectively. + """ + self._validate_states(merged_states) + num_state_dims = self.env_observation_spec.shape.as_list()[0] + num_context_dims_list = [c.shape.as_list()[0] for c in self.context_specs] + states = merged_states[:, :num_state_dims] + contexts = [] + i = num_state_dims + for num_context_dims in num_context_dims_list: + contexts.append(merged_states[:, i: i+num_context_dims]) + i += num_context_dims + return states, contexts + + def sample_random_actions(self, batch_size=1): + """Return random actions. + + Args: + batch_size: Batch size. + Returns: + A [batch_size, num_action_dims] tensor representing the batch of actions. + """ + actions = tf.concat( + [ + tf.random_uniform( + shape=(batch_size, 1), + minval=self._action_spec.minimum[i], + maxval=self._action_spec.maximum[i]) + for i in range(self._action_spec.shape[0].value) + ], + axis=1) + return actions + + def clip_actions(self, actions): + """Clip actions to spec. + + Args: + actions: A [batch_size, num_action_dims] tensor representing + the batch of actions. + Returns: + A [batch_size, num_action_dims] tensor representing the batch + of clipped actions. + """ + actions = tf.concat( + [ + tf.clip_by_value( + actions[:, i:i+1], + self._action_spec.minimum[i], + self._action_spec.maximum[i]) + for i in range(self._action_spec.shape[0].value) + ], + axis=1) + return actions + + def mix_contexts(self, contexts, insert_contexts, indices): + """Mix two contexts based on indices. + + Args: + contexts: A list of [batch_size, num_context_dims] tensor representing + the batch of contexts. + insert_contexts: A list of [batch_size, num_context_dims] tensor + representing the batch of contexts to be inserted. + indices: A list of a list of integers denoting indices to replace. + Returns: + A list of resulting contexts. + """ + if indices is None: indices = [[]] * len(contexts) + assert len(contexts) == len(indices) + assert all([spec.shape.ndims == 1 for spec in self.context_specs]) + mix_contexts = [] + for contexts_, insert_contexts_, indices_, spec in zip( + contexts, insert_contexts, indices, self.context_specs): + mix_contexts.append( + tf.concat( + [ + insert_contexts_[:, i:i + 1] if i in indices_ else + contexts_[:, i:i + 1] for i in range(spec.shape.as_list()[0]) + ], + axis=1)) + return mix_contexts + + def begin_episode_ops(self, mode, action_fn=None, state=None): + """Returns ops that reset agent at beginning of episodes. + + Args: + mode: a string representing the mode=[train, explore, eval]. + Returns: + A list of ops. + """ + all_ops = [] + for _, action_var in sorted(self._action_vars.items()): + sample_action = self.sample_random_actions(1)[0] + all_ops.append(tf.assign(action_var, sample_action)) + all_ops += self.tf_context.reset(mode=mode, agent=self._meta_agent, + action_fn=action_fn, state=state) + return all_ops + + def cond_begin_episode_op(self, cond, input_vars, mode, meta_action_fn): + """Returns op that resets agent at beginning of episodes. + + A new episode is begun if the cond op evalues to `False`. + + Args: + cond: a Boolean tensor variable. + input_vars: A list of tensor variables. + mode: a string representing the mode=[train, explore, eval]. + Returns: + Conditional begin op. + """ + (state, action, reward, next_state, + state_repr, next_state_repr) = input_vars + def continue_fn(): + """Continue op fn.""" + items = [state, action, reward, next_state, + state_repr, next_state_repr] + list(self.context_vars) + batch_items = [tf.expand_dims(item, 0) for item in items] + (states, actions, rewards, next_states, + state_reprs, next_state_reprs) = batch_items[:6] + context_reward = self.compute_rewards( + mode, state_reprs, actions, rewards, next_state_reprs, + batch_items[6:])[0][0] + context_reward = tf.cast(context_reward, dtype=reward.dtype) + if self.meta_agent is not None: + meta_action = tf.concat(self.context_vars, -1) + items = [state, meta_action, reward, next_state, + state_repr, next_state_repr] + list(self.meta_agent.context_vars) + batch_items = [tf.expand_dims(item, 0) for item in items] + (states, meta_actions, rewards, next_states, + state_reprs, next_state_reprs) = batch_items[:6] + meta_reward = self.meta_agent.compute_rewards( + mode, states, meta_actions, rewards, + next_states, batch_items[6:])[0][0] + meta_reward = tf.cast(meta_reward, dtype=reward.dtype) + else: + meta_reward = tf.constant(0, dtype=reward.dtype) + + with tf.control_dependencies([context_reward, meta_reward]): + step_ops = self.tf_context.step(mode=mode, agent=self._meta_agent, + state=state, + next_state=next_state, + state_repr=state_repr, + next_state_repr=next_state_repr, + action_fn=meta_action_fn) + with tf.control_dependencies(step_ops): + context_reward, meta_reward = map(tf.identity, [context_reward, meta_reward]) + return context_reward, meta_reward + def begin_episode_fn(): + """Begin op fn.""" + begin_ops = self.begin_episode_ops(mode=mode, action_fn=meta_action_fn, state=state) + with tf.control_dependencies(begin_ops): + return tf.zeros_like(reward), tf.zeros_like(reward) + with tf.control_dependencies(input_vars): + cond_begin_episode_op = tf.cond(cond, continue_fn, begin_episode_fn) + return cond_begin_episode_op + + def get_env_base_wrapper(self, env_base, **begin_kwargs): + """Create a wrapper around env_base, with agent-specific begin/end_episode. + + Args: + env_base: A python environment base. + **begin_kwargs: Keyword args for begin_episode_ops. + Returns: + An object with begin_episode() and end_episode(). + """ + begin_ops = self.begin_episode_ops(**begin_kwargs) + return uvf_utils.get_contextual_env_base(env_base, begin_ops) + + def init_action_vars(self, name, i=None): + """Create and return a tensorflow Variable holding an action. + + Args: + name: Name of the variables. + i: Integer id. + Returns: + A [num_action_dims] tensor. + """ + if i is not None: + name += '_%d' % i + assert name not in self._action_vars, ('Conflict! %s is already ' + 'initialized.') % name + self._action_vars[name] = tf.Variable( + self.sample_random_actions(1)[0], name='%s_action' % (name)) + self._validate_actions(tf.expand_dims(self._action_vars[name], 0)) + return self._action_vars[name] + + @gin.configurable('uvf_critic_function') + def critic_function(self, critic_vals, states, critic_fn=None): + """Computes q values based on outputs from the critic net. + + Args: + critic_vals: A tf.float32 [batch_size, ...] tensor representing outputs + from the critic net. + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + critic_fn: A callable that process outputs from critic_net and + outputs a [batch_size] tensor representing q values. + Returns: + A tf.float32 [batch_size] tensor representing q values. + """ + if critic_fn is not None: + env_states, contexts = self.unmerged_states(states) + critic_vals = critic_fn(critic_vals, env_states, contexts) + critic_vals.shape.assert_has_rank(1) + return critic_vals + + def get_action_vars(self, key): + return self._action_vars[key] + + def get_context_vars(self, key): + return self.tf_context.context_vars[key] + + def step_cond_fn(self, *args): + return self._step_cond_fn(self, *args) + + def reset_episode_cond_fn(self, *args): + return self._reset_episode_cond_fn(self, *args) + + def reset_env_cond_fn(self, *args): + return self._reset_env_cond_fn(self, *args) + + @property + def context_vars(self): + return self.tf_context.vars + + +@gin.configurable +class MetaAgentCore(UvfAgentCore): + """Defines basic functions for UVF Meta-agent. Must be inherited with an RL agent. + + Used as higher-level agent. + """ + + def __init__(self, + observation_spec, + action_spec, + tf_env, + tf_context, + sub_context, + step_cond_fn=cond_fn.env_transition, + reset_episode_cond_fn=cond_fn.env_restart, + reset_env_cond_fn=cond_fn.false_fn, + metrics=None, + actions_reg=0., + k=2, + **base_agent_kwargs): + """Constructs a Meta agent. + + Args: + observation_spec: A TensorSpec defining the observations. + action_spec: A BoundedTensorSpec defining the actions. + tf_env: A Tensorflow environment object. + tf_context: A Context class. + step_cond_fn: A function indicating whether to increment the num of steps. + reset_episode_cond_fn: A function indicating whether to restart the + episode, resampling the context. + reset_env_cond_fn: A function indicating whether to perform a manual reset + of the environment. + metrics: A list of functions that evaluate metrics of the agent. + **base_agent_kwargs: A dictionary of parameters for base RL Agent. + Raises: + ValueError: If 'dqda_clipping' is < 0. + """ + self._step_cond_fn = step_cond_fn + self._reset_episode_cond_fn = reset_episode_cond_fn + self._reset_env_cond_fn = reset_env_cond_fn + self.metrics = metrics + self._actions_reg = actions_reg + self._k = k + + # expose tf_context methods + self.tf_context = tf_context(tf_env=tf_env) + self.sub_context = sub_context(tf_env=tf_env) + self.set_replay = self.tf_context.set_replay + self.sample_contexts = self.tf_context.sample_contexts + self.compute_rewards = self.tf_context.compute_rewards + self.gamma_index = self.tf_context.gamma_index + self.context_specs = self.tf_context.context_specs + self.context_as_action_specs = self.tf_context.context_as_action_specs + self.sub_context_as_action_specs = self.sub_context.context_as_action_specs + self.init_context_vars = self.tf_context.create_vars + + self.env_observation_spec = observation_spec[0] + merged_observation_spec = (uvf_utils.merge_specs( + (self.env_observation_spec,) + self.context_specs),) + self._context_vars = dict() + self._action_vars = dict() + + assert len(self.context_as_action_specs) == 1 + self.BASE_AGENT_CLASS.__init__( + self, + observation_spec=merged_observation_spec, + action_spec=self.sub_context_as_action_specs, + **base_agent_kwargs + ) + + @gin.configurable('meta_add_noise_fn') + def add_noise_fn(self, action_fn, stddev=1.0, debug=False, + global_step=None): + noisy_action_fn = super(MetaAgentCore, self).add_noise_fn( + action_fn, stddev, + clip=True, global_step=global_step) + return noisy_action_fn + + def actor_loss(self, states, actions, rewards, discounts, + next_states): + """Returns the next action for the state. + + Args: + state: A [num_state_dims] tensor representing a state. + context: A list of [num_context_dims] tensor representing a context. + Returns: + A [num_action_dims] tensor representing the action. + """ + actions = self.actor_net(states, stop_gradients=False) + regularizer = self._actions_reg * tf.reduce_mean( + tf.reduce_sum(tf.abs(actions[:, self._k:]), -1), 0) + loss = self.BASE_AGENT_CLASS.actor_loss(self, states) + return regularizer + loss + + +@gin.configurable +class UvfAgent(UvfAgentCore, ddpg_agent.TD3Agent): + """A DDPG agent with UVF. + """ + BASE_AGENT_CLASS = ddpg_agent.TD3Agent + ACTION_TYPE = 'continuous' + + def __init__(self, *args, **kwargs): + UvfAgentCore.__init__(self, *args, **kwargs) + + +@gin.configurable +class MetaAgent(MetaAgentCore, ddpg_agent.TD3Agent): + """A DDPG meta-agent. + """ + BASE_AGENT_CLASS = ddpg_agent.TD3Agent + ACTION_TYPE = 'continuous' + + def __init__(self, *args, **kwargs): + MetaAgentCore.__init__(self, *args, **kwargs) + + +@gin.configurable() +def state_preprocess_net( + states, + num_output_dims=2, + states_hidden_layers=(100,), + normalizer_fn=None, + activation_fn=tf.nn.relu, + zero_time=True, + images=False): + """Creates a simple feed forward net for embedding states. + """ + with slim.arg_scope( + [slim.fully_connected], + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_initializer=slim.variance_scaling_initializer( + factor=1.0/3.0, mode='FAN_IN', uniform=True)): + + states_shape = tf.shape(states) + states_dtype = states.dtype + states = tf.to_float(states) + if images: # Zero-out x-y + states *= tf.constant([0.] * 2 + [1.] * (states.shape[-1] - 2), dtype=states.dtype) + if zero_time: + states *= tf.constant([1.] * (states.shape[-1] - 1) + [0.], dtype=states.dtype) + orig_states = states + embed = states + if states_hidden_layers: + embed = slim.stack(embed, slim.fully_connected, states_hidden_layers, + scope='states') + + with slim.arg_scope([slim.fully_connected], + weights_regularizer=None, + weights_initializer=tf.random_uniform_initializer( + minval=-0.003, maxval=0.003)): + embed = slim.fully_connected(embed, num_output_dims, + activation_fn=None, + normalizer_fn=None, + scope='value') + + output = embed + output = tf.cast(output, states_dtype) + return output + + +@gin.configurable() +def action_embed_net( + actions, + states=None, + num_output_dims=2, + hidden_layers=(400, 300), + normalizer_fn=None, + activation_fn=tf.nn.relu, + zero_time=True, + images=False): + """Creates a simple feed forward net for embedding actions. + """ + with slim.arg_scope( + [slim.fully_connected], + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_initializer=slim.variance_scaling_initializer( + factor=1.0/3.0, mode='FAN_IN', uniform=True)): + + actions = tf.to_float(actions) + if states is not None: + if images: # Zero-out x-y + states *= tf.constant([0.] * 2 + [1.] * (states.shape[-1] - 2), dtype=states.dtype) + if zero_time: + states *= tf.constant([1.] * (states.shape[-1] - 1) + [0.], dtype=states.dtype) + actions = tf.concat([actions, tf.to_float(states)], -1) + + embed = actions + if hidden_layers: + embed = slim.stack(embed, slim.fully_connected, hidden_layers, + scope='hidden') + + with slim.arg_scope([slim.fully_connected], + weights_regularizer=None, + weights_initializer=tf.random_uniform_initializer( + minval=-0.003, maxval=0.003)): + embed = slim.fully_connected(embed, num_output_dims, + activation_fn=None, + normalizer_fn=None, + scope='value') + if num_output_dims == 1: + return embed[:, 0, ...] + else: + return embed + + +def huber(x, kappa=0.1): + return (0.5 * tf.square(x) * tf.to_float(tf.abs(x) <= kappa) + + kappa * (tf.abs(x) - 0.5 * kappa) * tf.to_float(tf.abs(x) > kappa) + ) / kappa + + +@gin.configurable() +class StatePreprocess(object): + STATE_PREPROCESS_NET_SCOPE = 'state_process_net' + ACTION_EMBED_NET_SCOPE = 'action_embed_net' + + def __init__(self, trainable=False, + state_preprocess_net=lambda states: states, + action_embed_net=lambda actions, *args, **kwargs: actions, + ndims=None): + self.trainable = trainable + self._scope = tf.get_variable_scope().name + self._ndims = ndims + self._state_preprocess_net = tf.make_template( + self.STATE_PREPROCESS_NET_SCOPE, state_preprocess_net, + create_scope_now_=True) + self._action_embed_net = tf.make_template( + self.ACTION_EMBED_NET_SCOPE, action_embed_net, + create_scope_now_=True) + + def __call__(self, states): + batched = states.get_shape().ndims != 1 + if not batched: + states = tf.expand_dims(states, 0) + embedded = self._state_preprocess_net(states) + if self._ndims is not None: + embedded = embedded[..., :self._ndims] + if not batched: + return embedded[0] + return embedded + + def loss(self, states, next_states, low_actions, low_states): + batch_size = tf.shape(states)[0] + d = int(low_states.shape[1]) + # Sample indices into meta-transition to train on. + probs = 0.99 ** tf.range(d, dtype=tf.float32) + probs *= tf.constant([1.0] * (d - 1) + [1.0 / (1 - 0.99)], + dtype=tf.float32) + probs /= tf.reduce_sum(probs) + index_dist = tf.distributions.Categorical(probs=probs, dtype=tf.int64) + indices = index_dist.sample(batch_size) + batch_size = tf.cast(batch_size, tf.int64) + next_indices = tf.concat( + [tf.range(batch_size, dtype=tf.int64)[:, None], + (1 + indices[:, None]) % d], -1) + new_next_states = tf.where(indices < d - 1, + tf.gather_nd(low_states, next_indices), + next_states) + next_states = new_next_states + + embed1 = tf.to_float(self._state_preprocess_net(states)) + embed2 = tf.to_float(self._state_preprocess_net(next_states)) + action_embed = self._action_embed_net( + tf.layers.flatten(low_actions), states=states) + + tau = 2.0 + fn = lambda z: tau * tf.reduce_sum(huber(z), -1) + all_embed = tf.get_variable('all_embed', [1024, int(embed1.shape[-1])], + initializer=tf.zeros_initializer()) + upd = all_embed.assign(tf.concat([all_embed[batch_size:], embed2], 0)) + with tf.control_dependencies([upd]): + close = 1 * tf.reduce_mean(fn(embed1 + action_embed - embed2)) + prior_log_probs = tf.reduce_logsumexp( + -fn((embed1 + action_embed)[:, None, :] - all_embed[None, :, :]), + axis=-1) - tf.log(tf.to_float(all_embed.shape[0])) + far = tf.reduce_mean(tf.exp(-fn((embed1 + action_embed)[1:] - embed2[:-1]) + - tf.stop_gradient(prior_log_probs[1:]))) + repr_log_probs = tf.stop_gradient( + -fn(embed1 + action_embed - embed2) - prior_log_probs) / tau + return close + far, repr_log_probs, indices + + def get_trainable_vars(self): + return ( + slim.get_trainable_variables( + uvf_utils.join_scope(self._scope, self.STATE_PREPROCESS_NET_SCOPE)) + + slim.get_trainable_variables( + uvf_utils.join_scope(self._scope, self.ACTION_EMBED_NET_SCOPE))) + + +@gin.configurable() +class InverseDynamics(object): + INVERSE_DYNAMICS_NET_SCOPE = 'inverse_dynamics' + + def __init__(self, spec): + self._spec = spec + + def sample(self, states, next_states, num_samples, orig_goals, sc=0.5): + goal_dim = orig_goals.shape[-1] + spec_range = (self._spec.maximum - self._spec.minimum) / 2 * tf.ones([goal_dim]) + loc = tf.cast(next_states - states, tf.float32)[:, :goal_dim] + scale = sc * tf.tile(tf.reshape(spec_range, [1, goal_dim]), + [tf.shape(states)[0], 1]) + dist = tf.distributions.Normal(loc, scale) + if num_samples == 1: + return dist.sample() + samples = tf.concat([dist.sample(num_samples - 2), + tf.expand_dims(loc, 0), + tf.expand_dims(orig_goals, 0)], 0) + return uvf_utils.clip_to_spec(samples, self._spec) diff --git a/models/research/efficient-hrl/agents/__init__.py b/models/research/efficient-hrl/agents/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/research/efficient-hrl/agents/__init__.py @@ -0,0 +1 @@ + diff --git a/models/research/efficient-hrl/agents/circular_buffer.py b/models/research/efficient-hrl/agents/circular_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..72f90f0de89bf99956436e54a84bbcba903df6e7 --- /dev/null +++ b/models/research/efficient-hrl/agents/circular_buffer.py @@ -0,0 +1,289 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A circular buffer where each element is a list of tensors. + +Each element of the buffer is a list of tensors. An example use case is a replay +buffer in reinforcement learning, where each element is a list of tensors +representing the state, action, reward etc. + +New elements are added sequentially, and once the buffer is full, we +start overwriting them in a circular fashion. Reading does not remove any +elements, only adding new elements does. +""" + +import collections +import numpy as np +import tensorflow as tf + +import gin.tf + + +@gin.configurable +class CircularBuffer(object): + """A circular buffer where each element is a list of tensors.""" + + def __init__(self, buffer_size=1000, scope='replay_buffer'): + """Circular buffer of list of tensors. + + Args: + buffer_size: (integer) maximum number of tensor lists the buffer can hold. + scope: (string) variable scope for creating the variables. + """ + self._buffer_size = np.int64(buffer_size) + self._scope = scope + self._tensors = collections.OrderedDict() + with tf.variable_scope(self._scope): + self._num_adds = tf.Variable(0, dtype=tf.int64, name='num_adds') + self._num_adds_cs = tf.CriticalSection(name='num_adds') + + @property + def buffer_size(self): + return self._buffer_size + + @property + def scope(self): + return self._scope + + @property + def num_adds(self): + return self._num_adds + + def _create_variables(self, tensors): + with tf.variable_scope(self._scope): + for name in tensors.keys(): + tensor = tensors[name] + self._tensors[name] = tf.get_variable( + name='BufferVariable_' + name, + shape=[self._buffer_size] + tensor.get_shape().as_list(), + dtype=tensor.dtype, + trainable=False) + + def _validate(self, tensors): + """Validate shapes of tensors.""" + if len(tensors) != len(self._tensors): + raise ValueError('Expected tensors to have %d elements. Received %d ' + 'instead.' % (len(self._tensors), len(tensors))) + if self._tensors.keys() != tensors.keys(): + raise ValueError('The keys of tensors should be the always the same.' + 'Received %s instead %s.' % + (tensors.keys(), self._tensors.keys())) + for name, tensor in tensors.items(): + if tensor.get_shape().as_list() != self._tensors[ + name].get_shape().as_list()[1:]: + raise ValueError('Tensor %s has incorrect shape.' % name) + if not tensor.dtype.is_compatible_with(self._tensors[name].dtype): + raise ValueError( + 'Tensor %s has incorrect data type. Expected %s, received %s' % + (name, self._tensors[name].read_value().dtype, tensor.dtype)) + + def add(self, tensors): + """Adds an element (list/tuple/dict of tensors) to the buffer. + + Args: + tensors: (list/tuple/dict of tensors) to be added to the buffer. + Returns: + An add operation that adds the input `tensors` to the buffer. Similar to + an enqueue_op. + Raises: + ValueError: If the shapes and data types of input `tensors' are not the + same across calls to the add function. + """ + return self.maybe_add(tensors, True) + + def maybe_add(self, tensors, condition): + """Adds an element (tensors) to the buffer based on the condition.. + + Args: + tensors: (list/tuple of tensors) to be added to the buffer. + condition: A boolean Tensor controlling whether the tensors would be added + to the buffer or not. + Returns: + An add operation that adds the input `tensors` to the buffer. Similar to + an maybe_enqueue_op. + Raises: + ValueError: If the shapes and data types of input `tensors' are not the + same across calls to the add function. + """ + if not isinstance(tensors, dict): + names = [str(i) for i in range(len(tensors))] + tensors = collections.OrderedDict(zip(names, tensors)) + if not isinstance(tensors, collections.OrderedDict): + tensors = collections.OrderedDict( + sorted(tensors.items(), key=lambda t: t[0])) + if not self._tensors: + self._create_variables(tensors) + else: + self._validate(tensors) + + #@tf.critical_section(self._position_mutex) + def _increment_num_adds(): + # Adding 0 to the num_adds variable is a trick to read the value of the + # variable and return a read-only tensor. Doing this in a critical + # section allows us to capture a snapshot of the variable that will + # not be affected by other threads updating num_adds. + return self._num_adds.assign_add(1) + 0 + def _add(): + num_adds_inc = self._num_adds_cs.execute(_increment_num_adds) + current_pos = tf.mod(num_adds_inc - 1, self._buffer_size) + update_ops = [] + for name in self._tensors.keys(): + update_ops.append( + tf.scatter_update(self._tensors[name], current_pos, tensors[name])) + return tf.group(*update_ops) + + return tf.contrib.framework.smart_cond(condition, _add, tf.no_op) + + def get_random_batch(self, batch_size, keys=None, num_steps=1): + """Samples a batch of tensors from the buffer with replacement. + + Args: + batch_size: (integer) number of elements to sample. + keys: List of keys of tensors to retrieve. If None retrieve all. + num_steps: (integer) length of trajectories to return. If > 1 will return + a list of lists, where each internal list represents a trajectory of + length num_steps. + Returns: + A list of tensors, where each element in the list is a batch sampled from + one of the tensors in the buffer. + Raises: + ValueError: If get_random_batch is called before calling the add function. + tf.errors.InvalidArgumentError: If this operation is executed before any + items are added to the buffer. + """ + if not self._tensors: + raise ValueError('The add function must be called before get_random_batch.') + if keys is None: + keys = self._tensors.keys() + + latest_start_index = self.get_num_adds() - num_steps + 1 + empty_buffer_assert = tf.Assert( + tf.greater(latest_start_index, 0), + ['Not enough elements have been added to the buffer.']) + with tf.control_dependencies([empty_buffer_assert]): + max_index = tf.minimum(self._buffer_size, latest_start_index) + indices = tf.random_uniform( + [batch_size], + minval=0, + maxval=max_index, + dtype=tf.int64) + if num_steps == 1: + return self.gather(indices, keys) + else: + return self.gather_nstep(num_steps, indices, keys) + + def gather(self, indices, keys=None): + """Returns elements at the specified indices from the buffer. + + Args: + indices: (list of integers or rank 1 int Tensor) indices in the buffer to + retrieve elements from. + keys: List of keys of tensors to retrieve. If None retrieve all. + Returns: + A list of tensors, where each element in the list is obtained by indexing + one of the tensors in the buffer. + Raises: + ValueError: If gather is called before calling the add function. + tf.errors.InvalidArgumentError: If indices are bigger than the number of + items in the buffer. + """ + if not self._tensors: + raise ValueError('The add function must be called before calling gather.') + if keys is None: + keys = self._tensors.keys() + with tf.name_scope('Gather'): + index_bound_assert = tf.Assert( + tf.less( + tf.to_int64(tf.reduce_max(indices)), + tf.minimum(self.get_num_adds(), self._buffer_size)), + ['Index out of bounds.']) + with tf.control_dependencies([index_bound_assert]): + indices = tf.convert_to_tensor(indices) + + batch = [] + for key in keys: + batch.append(tf.gather(self._tensors[key], indices, name=key)) + return batch + + def gather_nstep(self, num_steps, indices, keys=None): + """Returns elements at the specified indices from the buffer. + + Args: + num_steps: (integer) length of trajectories to return. + indices: (list of rank num_steps int Tensor) indices in the buffer to + retrieve elements from for multiple trajectories. Each Tensor in the + list represents the indices for a trajectory. + keys: List of keys of tensors to retrieve. If None retrieve all. + Returns: + A list of list-of-tensors, where each element in the list is obtained by + indexing one of the tensors in the buffer. + Raises: + ValueError: If gather is called before calling the add function. + tf.errors.InvalidArgumentError: If indices are bigger than the number of + items in the buffer. + """ + if not self._tensors: + raise ValueError('The add function must be called before calling gather.') + if keys is None: + keys = self._tensors.keys() + with tf.name_scope('Gather'): + index_bound_assert = tf.Assert( + tf.less_equal( + tf.to_int64(tf.reduce_max(indices) + num_steps), + self.get_num_adds()), + ['Trajectory indices go out of bounds.']) + with tf.control_dependencies([index_bound_assert]): + indices = tf.map_fn( + lambda x: tf.mod(tf.range(x, x + num_steps), self._buffer_size), + indices, + dtype=tf.int64) + + batch = [] + for key in keys: + + def SampleTrajectories(trajectory_indices, key=key, + num_steps=num_steps): + trajectory_indices.set_shape([num_steps]) + return tf.gather(self._tensors[key], trajectory_indices, name=key) + + batch.append(tf.map_fn(SampleTrajectories, indices, + dtype=self._tensors[key].dtype)) + return batch + + def get_position(self): + """Returns the position at which the last element was added. + + Returns: + An int tensor representing the index at which the last element was added + to the buffer or -1 if no elements were added. + """ + return tf.cond(self.get_num_adds() < 1, + lambda: self.get_num_adds() - 1, + lambda: tf.mod(self.get_num_adds() - 1, self._buffer_size)) + + def get_num_adds(self): + """Returns the number of additions to the buffer. + + Returns: + An int tensor representing the number of elements that were added. + """ + def num_adds(): + return self._num_adds.value() + + return self._num_adds_cs.execute(num_adds) + + def get_num_tensors(self): + """Returns the number of tensors (slots) in the buffer.""" + return len(self._tensors) diff --git a/models/research/efficient-hrl/agents/ddpg_agent.py b/models/research/efficient-hrl/agents/ddpg_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..904eb6502717e30681d5a3d50437c31b2aa580b2 --- /dev/null +++ b/models/research/efficient-hrl/agents/ddpg_agent.py @@ -0,0 +1,739 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A DDPG/NAF agent. + +Implements the Deep Deterministic Policy Gradient (DDPG) algorithm from +"Continuous control with deep reinforcement learning" - Lilicrap et al. +https://arxiv.org/abs/1509.02971, and the Normalized Advantage Functions (NAF) +algorithm "Continuous Deep Q-Learning with Model-based Acceleration" - Gu et al. +https://arxiv.org/pdf/1603.00748. +""" + +import tensorflow as tf +slim = tf.contrib.slim +import gin.tf +from utils import utils +from agents import ddpg_networks as networks + + +@gin.configurable +class DdpgAgent(object): + """An RL agent that learns using the DDPG algorithm. + + Example usage: + + def critic_net(states, actions): + ... + def actor_net(states, num_action_dims): + ... + + Given a tensorflow environment tf_env, + (of type learning.deepmind.rl.environments.tensorflow.python.tfpyenvironment) + + obs_spec = tf_env.observation_spec() + action_spec = tf_env.action_spec() + + ddpg_agent = agent.DdpgAgent(obs_spec, + action_spec, + actor_net=actor_net, + critic_net=critic_net) + + we can perform actions on the environment as follows: + + state = tf_env.observations()[0] + action = ddpg_agent.actor_net(tf.expand_dims(state, 0))[0, :] + transition_type, reward, discount = tf_env.step([action]) + + Train: + + critic_loss = ddpg_agent.critic_loss(states, actions, rewards, discounts, + next_states) + actor_loss = ddpg_agent.actor_loss(states) + + critic_train_op = slim.learning.create_train_op( + critic_loss, + critic_optimizer, + variables_to_train=ddpg_agent.get_trainable_critic_vars(), + ) + + actor_train_op = slim.learning.create_train_op( + actor_loss, + actor_optimizer, + variables_to_train=ddpg_agent.get_trainable_actor_vars(), + ) + """ + + ACTOR_NET_SCOPE = 'actor_net' + CRITIC_NET_SCOPE = 'critic_net' + TARGET_ACTOR_NET_SCOPE = 'target_actor_net' + TARGET_CRITIC_NET_SCOPE = 'target_critic_net' + + def __init__(self, + observation_spec, + action_spec, + actor_net=networks.actor_net, + critic_net=networks.critic_net, + td_errors_loss=tf.losses.huber_loss, + dqda_clipping=0., + actions_regularizer=0., + target_q_clipping=None, + residual_phi=0.0, + debug_summaries=False): + """Constructs a DDPG agent. + + Args: + observation_spec: A TensorSpec defining the observations. + action_spec: A BoundedTensorSpec defining the actions. + actor_net: A callable that creates the actor network. Must take the + following arguments: states, num_actions. Please see networks.actor_net + for an example. + critic_net: A callable that creates the critic network. Must take the + following arguments: states, actions. Please see networks.critic_net + for an example. + td_errors_loss: A callable defining the loss function for the critic + td error. + dqda_clipping: (float) clips the gradient dqda element-wise between + [-dqda_clipping, dqda_clipping]. Does not perform clipping if + dqda_clipping == 0. + actions_regularizer: A scalar, when positive penalizes the norm of the + actions. This can prevent saturation of actions for the actor_loss. + target_q_clipping: (tuple of floats) clips target q values within + (low, high) values when computing the critic loss. + residual_phi: (float) [0.0, 1.0] Residual algorithm parameter that + interpolates between Q-learning and residual gradient algorithm. + http://www.leemon.com/papers/1995b.pdf + debug_summaries: If True, add summaries to help debug behavior. + Raises: + ValueError: If 'dqda_clipping' is < 0. + """ + self._observation_spec = observation_spec[0] + self._action_spec = action_spec[0] + self._state_shape = tf.TensorShape([None]).concatenate( + self._observation_spec.shape) + self._action_shape = tf.TensorShape([None]).concatenate( + self._action_spec.shape) + self._num_action_dims = self._action_spec.shape.num_elements() + + self._scope = tf.get_variable_scope().name + self._actor_net = tf.make_template( + self.ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) + self._critic_net = tf.make_template( + self.CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) + self._target_actor_net = tf.make_template( + self.TARGET_ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) + self._target_critic_net = tf.make_template( + self.TARGET_CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) + self._td_errors_loss = td_errors_loss + if dqda_clipping < 0: + raise ValueError('dqda_clipping must be >= 0.') + self._dqda_clipping = dqda_clipping + self._actions_regularizer = actions_regularizer + self._target_q_clipping = target_q_clipping + self._residual_phi = residual_phi + self._debug_summaries = debug_summaries + + def _batch_state(self, state): + """Convert state to a batched state. + + Args: + state: Either a list/tuple with an state tensor [num_state_dims]. + Returns: + A tensor [1, num_state_dims] + """ + if isinstance(state, (tuple, list)): + state = state[0] + if state.get_shape().ndims == 1: + state = tf.expand_dims(state, 0) + return state + + def action(self, state): + """Returns the next action for the state. + + Args: + state: A [num_state_dims] tensor representing a state. + Returns: + A [num_action_dims] tensor representing the action. + """ + return self.actor_net(self._batch_state(state), stop_gradients=True)[0, :] + + @gin.configurable('ddpg_sample_action') + def sample_action(self, state, stddev=1.0): + """Returns the action for the state with additive noise. + + Args: + state: A [num_state_dims] tensor representing a state. + stddev: stddev for the Ornstein-Uhlenbeck noise. + Returns: + A [num_action_dims] action tensor. + """ + agent_action = self.action(state) + agent_action += tf.random_normal(tf.shape(agent_action)) * stddev + return utils.clip_to_spec(agent_action, self._action_spec) + + def actor_net(self, states, stop_gradients=False): + """Returns the output of the actor network. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + stop_gradients: (boolean) if true, gradients cannot be propogated through + this operation. + Returns: + A [batch_size, num_action_dims] tensor of actions. + Raises: + ValueError: If `states` does not have the expected dimensions. + """ + self._validate_states(states) + actions = self._actor_net(states, self._action_spec) + if stop_gradients: + actions = tf.stop_gradient(actions) + return actions + + def critic_net(self, states, actions, for_critic_loss=False): + """Returns the output of the critic network. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] tensor representing a batch + of actions. + Returns: + q values: A [batch_size] tensor of q values. + Raises: + ValueError: If `states` or `actions' do not have the expected dimensions. + """ + self._validate_states(states) + self._validate_actions(actions) + return self._critic_net(states, actions, + for_critic_loss=for_critic_loss) + + def target_actor_net(self, states): + """Returns the output of the target actor network. + + The target network is used to compute stable targets for training. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + Returns: + A [batch_size, num_action_dims] tensor of actions. + Raises: + ValueError: If `states` does not have the expected dimensions. + """ + self._validate_states(states) + actions = self._target_actor_net(states, self._action_spec) + return tf.stop_gradient(actions) + + def target_critic_net(self, states, actions, for_critic_loss=False): + """Returns the output of the target critic network. + + The target network is used to compute stable targets for training. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] tensor representing a batch + of actions. + Returns: + q values: A [batch_size] tensor of q values. + Raises: + ValueError: If `states` or `actions' do not have the expected dimensions. + """ + self._validate_states(states) + self._validate_actions(actions) + return tf.stop_gradient( + self._target_critic_net(states, actions, + for_critic_loss=for_critic_loss)) + + def value_net(self, states, for_critic_loss=False): + """Returns the output of the critic evaluated with the actor. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + Returns: + q values: A [batch_size] tensor of q values. + """ + actions = self.actor_net(states) + return self.critic_net(states, actions, + for_critic_loss=for_critic_loss) + + def target_value_net(self, states, for_critic_loss=False): + """Returns the output of the target critic evaluated with the target actor. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + Returns: + q values: A [batch_size] tensor of q values. + """ + target_actions = self.target_actor_net(states) + return self.target_critic_net(states, target_actions, + for_critic_loss=for_critic_loss) + + def critic_loss(self, states, actions, rewards, discounts, + next_states): + """Computes a loss for training the critic network. + + The loss is the mean squared error between the Q value predictions of the + critic and Q values estimated using TD-lambda. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] tensor representing a batch + of actions. + rewards: A [batch_size, ...] tensor representing a batch of rewards, + broadcastable to the critic net output. + discounts: A [batch_size, ...] tensor representing a batch of discounts, + broadcastable to the critic net output. + next_states: A [batch_size, num_state_dims] tensor representing a batch + of next states. + Returns: + A rank-0 tensor representing the critic loss. + Raises: + ValueError: If any of the inputs do not have the expected dimensions, or + if their batch_sizes do not match. + """ + self._validate_states(states) + self._validate_actions(actions) + self._validate_states(next_states) + + target_q_values = self.target_value_net(next_states, for_critic_loss=True) + td_targets = target_q_values * discounts + rewards + if self._target_q_clipping is not None: + td_targets = tf.clip_by_value(td_targets, self._target_q_clipping[0], + self._target_q_clipping[1]) + q_values = self.critic_net(states, actions, for_critic_loss=True) + td_errors = td_targets - q_values + if self._debug_summaries: + gen_debug_td_error_summaries( + target_q_values, q_values, td_targets, td_errors) + + loss = self._td_errors_loss(td_targets, q_values) + + if self._residual_phi > 0.0: # compute residual gradient loss + residual_q_values = self.value_net(next_states, for_critic_loss=True) + residual_td_targets = residual_q_values * discounts + rewards + if self._target_q_clipping is not None: + residual_td_targets = tf.clip_by_value(residual_td_targets, + self._target_q_clipping[0], + self._target_q_clipping[1]) + residual_td_errors = residual_td_targets - q_values + residual_loss = self._td_errors_loss( + residual_td_targets, residual_q_values) + loss = (loss * (1.0 - self._residual_phi) + + residual_loss * self._residual_phi) + return loss + + def actor_loss(self, states): + """Computes a loss for training the actor network. + + Note that output does not represent an actual loss. It is called a loss only + in the sense that its gradient w.r.t. the actor network weights is the + correct gradient for training the actor network, + i.e. dloss/dweights = (dq/da)*(da/dweights) + which is the gradient used in Algorithm 1 of Lilicrap et al. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + Returns: + A rank-0 tensor representing the actor loss. + Raises: + ValueError: If `states` does not have the expected dimensions. + """ + self._validate_states(states) + actions = self.actor_net(states, stop_gradients=False) + critic_values = self.critic_net(states, actions) + q_values = self.critic_function(critic_values, states) + dqda = tf.gradients([q_values], [actions])[0] + dqda_unclipped = dqda + if self._dqda_clipping > 0: + dqda = tf.clip_by_value(dqda, -self._dqda_clipping, self._dqda_clipping) + + actions_norm = tf.norm(actions) + if self._debug_summaries: + with tf.name_scope('dqda'): + tf.summary.scalar('actions_norm', actions_norm) + tf.summary.histogram('dqda', dqda) + tf.summary.histogram('dqda_unclipped', dqda_unclipped) + tf.summary.histogram('actions', actions) + for a in range(self._num_action_dims): + tf.summary.histogram('dqda_unclipped_%d' % a, dqda_unclipped[:, a]) + tf.summary.histogram('dqda_%d' % a, dqda[:, a]) + + actions_norm *= self._actions_regularizer + return slim.losses.mean_squared_error(tf.stop_gradient(dqda + actions), + actions, + scope='actor_loss') + actions_norm + + @gin.configurable('ddpg_critic_function') + def critic_function(self, critic_values, states, weights=None): + """Computes q values based on critic_net outputs, states, and weights. + + Args: + critic_values: A tf.float32 [batch_size, ...] tensor representing outputs + from the critic net. + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + weights: A list or Numpy array or tensor with a shape broadcastable to + `critic_values`. + Returns: + A tf.float32 [batch_size] tensor representing q values. + """ + del states # unused args + if weights is not None: + weights = tf.convert_to_tensor(weights, dtype=critic_values.dtype) + critic_values *= weights + if critic_values.shape.ndims > 1: + critic_values = tf.reduce_sum(critic_values, + range(1, critic_values.shape.ndims)) + critic_values.shape.assert_has_rank(1) + return critic_values + + @gin.configurable('ddpg_update_targets') + def update_targets(self, tau=1.0): + """Performs a soft update of the target network parameters. + + For each weight w_s in the actor/critic networks, and its corresponding + weight w_t in the target actor/critic networks, a soft update is: + w_t = (1- tau) x w_t + tau x ws + + Args: + tau: A float scalar in [0, 1] + Returns: + An operation that performs a soft update of the target network parameters. + Raises: + ValueError: If `tau` is not in [0, 1]. + """ + if tau < 0 or tau > 1: + raise ValueError('Input `tau` should be in [0, 1].') + update_actor = utils.soft_variables_update( + slim.get_trainable_variables( + utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)), + slim.get_trainable_variables( + utils.join_scope(self._scope, self.TARGET_ACTOR_NET_SCOPE)), + tau) + update_critic = utils.soft_variables_update( + slim.get_trainable_variables( + utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)), + slim.get_trainable_variables( + utils.join_scope(self._scope, self.TARGET_CRITIC_NET_SCOPE)), + tau) + return tf.group(update_actor, update_critic, name='update_targets') + + def get_trainable_critic_vars(self): + """Returns a list of trainable variables in the critic network. + + Returns: + A list of trainable variables in the critic network. + """ + return slim.get_trainable_variables( + utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)) + + def get_trainable_actor_vars(self): + """Returns a list of trainable variables in the actor network. + + Returns: + A list of trainable variables in the actor network. + """ + return slim.get_trainable_variables( + utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)) + + def get_critic_vars(self): + """Returns a list of all variables in the critic network. + + Returns: + A list of trainable variables in the critic network. + """ + return slim.get_model_variables( + utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)) + + def get_actor_vars(self): + """Returns a list of all variables in the actor network. + + Returns: + A list of trainable variables in the actor network. + """ + return slim.get_model_variables( + utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)) + + def _validate_states(self, states): + """Raises a value error if `states` does not have the expected shape. + + Args: + states: A tensor. + Raises: + ValueError: If states.shape or states.dtype are not compatible with + observation_spec. + """ + states.shape.assert_is_compatible_with(self._state_shape) + if not states.dtype.is_compatible_with(self._observation_spec.dtype): + raise ValueError('states.dtype={} is not compatible with' + ' observation_spec.dtype={}'.format( + states.dtype, self._observation_spec.dtype)) + + def _validate_actions(self, actions): + """Raises a value error if `actions` does not have the expected shape. + + Args: + actions: A tensor. + Raises: + ValueError: If actions.shape or actions.dtype are not compatible with + action_spec. + """ + actions.shape.assert_is_compatible_with(self._action_shape) + if not actions.dtype.is_compatible_with(self._action_spec.dtype): + raise ValueError('actions.dtype={} is not compatible with' + ' action_spec.dtype={}'.format( + actions.dtype, self._action_spec.dtype)) + + +@gin.configurable +class TD3Agent(DdpgAgent): + """An RL agent that learns using the TD3 algorithm.""" + + ACTOR_NET_SCOPE = 'actor_net' + CRITIC_NET_SCOPE = 'critic_net' + CRITIC_NET2_SCOPE = 'critic_net2' + TARGET_ACTOR_NET_SCOPE = 'target_actor_net' + TARGET_CRITIC_NET_SCOPE = 'target_critic_net' + TARGET_CRITIC_NET2_SCOPE = 'target_critic_net2' + + def __init__(self, + observation_spec, + action_spec, + actor_net=networks.actor_net, + critic_net=networks.critic_net, + td_errors_loss=tf.losses.huber_loss, + dqda_clipping=0., + actions_regularizer=0., + target_q_clipping=None, + residual_phi=0.0, + debug_summaries=False): + """Constructs a TD3 agent. + + Args: + observation_spec: A TensorSpec defining the observations. + action_spec: A BoundedTensorSpec defining the actions. + actor_net: A callable that creates the actor network. Must take the + following arguments: states, num_actions. Please see networks.actor_net + for an example. + critic_net: A callable that creates the critic network. Must take the + following arguments: states, actions. Please see networks.critic_net + for an example. + td_errors_loss: A callable defining the loss function for the critic + td error. + dqda_clipping: (float) clips the gradient dqda element-wise between + [-dqda_clipping, dqda_clipping]. Does not perform clipping if + dqda_clipping == 0. + actions_regularizer: A scalar, when positive penalizes the norm of the + actions. This can prevent saturation of actions for the actor_loss. + target_q_clipping: (tuple of floats) clips target q values within + (low, high) values when computing the critic loss. + residual_phi: (float) [0.0, 1.0] Residual algorithm parameter that + interpolates between Q-learning and residual gradient algorithm. + http://www.leemon.com/papers/1995b.pdf + debug_summaries: If True, add summaries to help debug behavior. + Raises: + ValueError: If 'dqda_clipping' is < 0. + """ + self._observation_spec = observation_spec[0] + self._action_spec = action_spec[0] + self._state_shape = tf.TensorShape([None]).concatenate( + self._observation_spec.shape) + self._action_shape = tf.TensorShape([None]).concatenate( + self._action_spec.shape) + self._num_action_dims = self._action_spec.shape.num_elements() + + self._scope = tf.get_variable_scope().name + self._actor_net = tf.make_template( + self.ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) + self._critic_net = tf.make_template( + self.CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) + self._critic_net2 = tf.make_template( + self.CRITIC_NET2_SCOPE, critic_net, create_scope_now_=True) + self._target_actor_net = tf.make_template( + self.TARGET_ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) + self._target_critic_net = tf.make_template( + self.TARGET_CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) + self._target_critic_net2 = tf.make_template( + self.TARGET_CRITIC_NET2_SCOPE, critic_net, create_scope_now_=True) + self._td_errors_loss = td_errors_loss + if dqda_clipping < 0: + raise ValueError('dqda_clipping must be >= 0.') + self._dqda_clipping = dqda_clipping + self._actions_regularizer = actions_regularizer + self._target_q_clipping = target_q_clipping + self._residual_phi = residual_phi + self._debug_summaries = debug_summaries + + def get_trainable_critic_vars(self): + """Returns a list of trainable variables in the critic network. + NOTE: This gets the vars of both critic networks. + + Returns: + A list of trainable variables in the critic network. + """ + return ( + slim.get_trainable_variables( + utils.join_scope(self._scope, self.CRITIC_NET_SCOPE))) + + def critic_net(self, states, actions, for_critic_loss=False): + """Returns the output of the critic network. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] tensor representing a batch + of actions. + Returns: + q values: A [batch_size] tensor of q values. + Raises: + ValueError: If `states` or `actions' do not have the expected dimensions. + """ + values1 = self._critic_net(states, actions, + for_critic_loss=for_critic_loss) + values2 = self._critic_net2(states, actions, + for_critic_loss=for_critic_loss) + if for_critic_loss: + return values1, values2 + return values1 + + def target_critic_net(self, states, actions, for_critic_loss=False): + """Returns the output of the target critic network. + + The target network is used to compute stable targets for training. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] tensor representing a batch + of actions. + Returns: + q values: A [batch_size] tensor of q values. + Raises: + ValueError: If `states` or `actions' do not have the expected dimensions. + """ + self._validate_states(states) + self._validate_actions(actions) + values1 = tf.stop_gradient( + self._target_critic_net(states, actions, + for_critic_loss=for_critic_loss)) + values2 = tf.stop_gradient( + self._target_critic_net2(states, actions, + for_critic_loss=for_critic_loss)) + if for_critic_loss: + return values1, values2 + return values1 + + def value_net(self, states, for_critic_loss=False): + """Returns the output of the critic evaluated with the actor. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + Returns: + q values: A [batch_size] tensor of q values. + """ + actions = self.actor_net(states) + return self.critic_net(states, actions, + for_critic_loss=for_critic_loss) + + def target_value_net(self, states, for_critic_loss=False): + """Returns the output of the target critic evaluated with the target actor. + + Args: + states: A [batch_size, num_state_dims] tensor representing a batch + of states. + Returns: + q values: A [batch_size] tensor of q values. + """ + target_actions = self.target_actor_net(states) + noise = tf.clip_by_value( + tf.random_normal(tf.shape(target_actions), stddev=0.2), -0.5, 0.5) + values1, values2 = self.target_critic_net( + states, target_actions + noise, + for_critic_loss=for_critic_loss) + values = tf.minimum(values1, values2) + return values, values + + @gin.configurable('td3_update_targets') + def update_targets(self, tau=1.0): + """Performs a soft update of the target network parameters. + + For each weight w_s in the actor/critic networks, and its corresponding + weight w_t in the target actor/critic networks, a soft update is: + w_t = (1- tau) x w_t + tau x ws + + Args: + tau: A float scalar in [0, 1] + Returns: + An operation that performs a soft update of the target network parameters. + Raises: + ValueError: If `tau` is not in [0, 1]. + """ + if tau < 0 or tau > 1: + raise ValueError('Input `tau` should be in [0, 1].') + update_actor = utils.soft_variables_update( + slim.get_trainable_variables( + utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)), + slim.get_trainable_variables( + utils.join_scope(self._scope, self.TARGET_ACTOR_NET_SCOPE)), + tau) + # NOTE: This updates both critic networks. + update_critic = utils.soft_variables_update( + slim.get_trainable_variables( + utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)), + slim.get_trainable_variables( + utils.join_scope(self._scope, self.TARGET_CRITIC_NET_SCOPE)), + tau) + return tf.group(update_actor, update_critic, name='update_targets') + + +def gen_debug_td_error_summaries( + target_q_values, q_values, td_targets, td_errors): + """Generates debug summaries for critic given a set of batch samples. + + Args: + target_q_values: set of predicted next stage values. + q_values: current predicted value for the critic network. + td_targets: discounted target_q_values with added next stage reward. + td_errors: the different between td_targets and q_values. + """ + with tf.name_scope('td_errors'): + tf.summary.histogram('td_targets', td_targets) + tf.summary.histogram('q_values', q_values) + tf.summary.histogram('target_q_values', target_q_values) + tf.summary.histogram('td_errors', td_errors) + with tf.name_scope('td_targets'): + tf.summary.scalar('mean', tf.reduce_mean(td_targets)) + tf.summary.scalar('max', tf.reduce_max(td_targets)) + tf.summary.scalar('min', tf.reduce_min(td_targets)) + with tf.name_scope('q_values'): + tf.summary.scalar('mean', tf.reduce_mean(q_values)) + tf.summary.scalar('max', tf.reduce_max(q_values)) + tf.summary.scalar('min', tf.reduce_min(q_values)) + with tf.name_scope('target_q_values'): + tf.summary.scalar('mean', tf.reduce_mean(target_q_values)) + tf.summary.scalar('max', tf.reduce_max(target_q_values)) + tf.summary.scalar('min', tf.reduce_min(target_q_values)) + with tf.name_scope('td_errors'): + tf.summary.scalar('mean', tf.reduce_mean(td_errors)) + tf.summary.scalar('max', tf.reduce_max(td_errors)) + tf.summary.scalar('min', tf.reduce_min(td_errors)) + tf.summary.scalar('mean_abs', tf.reduce_mean(tf.abs(td_errors))) diff --git a/models/research/efficient-hrl/agents/ddpg_networks.py b/models/research/efficient-hrl/agents/ddpg_networks.py new file mode 100644 index 0000000000000000000000000000000000000000..63074dfb91cf950b602212936ab2560db818c3a4 --- /dev/null +++ b/models/research/efficient-hrl/agents/ddpg_networks.py @@ -0,0 +1,150 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Sample actor(policy) and critic(q) networks to use with DDPG/NAF agents. + +The DDPG networks are defined in "Section 7: Experiment Details" of +"Continuous control with deep reinforcement learning" - Lilicrap et al. +https://arxiv.org/abs/1509.02971 + +The NAF critic network is based on "Section 4" of "Continuous deep Q-learning +with model-based acceleration" - Gu et al. https://arxiv.org/pdf/1603.00748. +""" + +import tensorflow as tf +slim = tf.contrib.slim +import gin.tf + + +@gin.configurable('ddpg_critic_net') +def critic_net(states, actions, + for_critic_loss=False, + num_reward_dims=1, + states_hidden_layers=(400,), + actions_hidden_layers=None, + joint_hidden_layers=(300,), + weight_decay=0.0001, + normalizer_fn=None, + activation_fn=tf.nn.relu, + zero_obs=False, + images=False): + """Creates a critic that returns q values for the given states and actions. + + Args: + states: (castable to tf.float32) a [batch_size, num_state_dims] tensor + representing a batch of states. + actions: (castable to tf.float32) a [batch_size, num_action_dims] tensor + representing a batch of actions. + num_reward_dims: Number of reward dimensions. + states_hidden_layers: tuple of hidden layers units for states. + actions_hidden_layers: tuple of hidden layers units for actions. + joint_hidden_layers: tuple of hidden layers units after joining states + and actions using tf.concat(). + weight_decay: Weight decay for l2 weights regularizer. + normalizer_fn: Normalizer function, i.e. slim.layer_norm, + activation_fn: Activation function, i.e. tf.nn.relu, slim.leaky_relu, ... + Returns: + A tf.float32 [batch_size] tensor of q values, or a tf.float32 + [batch_size, num_reward_dims] tensor of vector q values if + num_reward_dims > 1. + """ + with slim.arg_scope( + [slim.fully_connected], + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_regularizer=slim.l2_regularizer(weight_decay), + weights_initializer=slim.variance_scaling_initializer( + factor=1.0/3.0, mode='FAN_IN', uniform=True)): + + orig_states = tf.to_float(states) + #states = tf.to_float(states) + states = tf.concat([tf.to_float(states), tf.to_float(actions)], -1) #TD3 + if images or zero_obs: + states *= tf.constant([0.0] * 2 + [1.0] * (states.shape[1] - 2)) #LALA + actions = tf.to_float(actions) + if states_hidden_layers: + states = slim.stack(states, slim.fully_connected, states_hidden_layers, + scope='states') + if actions_hidden_layers: + actions = slim.stack(actions, slim.fully_connected, actions_hidden_layers, + scope='actions') + joint = tf.concat([states, actions], 1) + if joint_hidden_layers: + joint = slim.stack(joint, slim.fully_connected, joint_hidden_layers, + scope='joint') + with slim.arg_scope([slim.fully_connected], + weights_regularizer=None, + weights_initializer=tf.random_uniform_initializer( + minval=-0.003, maxval=0.003)): + value = slim.fully_connected(joint, num_reward_dims, + activation_fn=None, + normalizer_fn=None, + scope='q_value') + if num_reward_dims == 1: + value = tf.reshape(value, [-1]) + if not for_critic_loss and num_reward_dims > 1: + value = tf.reduce_sum( + value * tf.abs(orig_states[:, -num_reward_dims:]), -1) + return value + + +@gin.configurable('ddpg_actor_net') +def actor_net(states, action_spec, + hidden_layers=(400, 300), + normalizer_fn=None, + activation_fn=tf.nn.relu, + zero_obs=False, + images=False): + """Creates an actor that returns actions for the given states. + + Args: + states: (castable to tf.float32) a [batch_size, num_state_dims] tensor + representing a batch of states. + action_spec: (BoundedTensorSpec) A tensor spec indicating the shape + and range of actions. + hidden_layers: tuple of hidden layers units. + normalizer_fn: Normalizer function, i.e. slim.layer_norm, + activation_fn: Activation function, i.e. tf.nn.relu, slim.leaky_relu, ... + Returns: + A tf.float32 [batch_size, num_action_dims] tensor of actions. + """ + + with slim.arg_scope( + [slim.fully_connected], + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_initializer=slim.variance_scaling_initializer( + factor=1.0/3.0, mode='FAN_IN', uniform=True)): + + states = tf.to_float(states) + orig_states = states + if images or zero_obs: # Zero-out x, y position. Hacky. + states *= tf.constant([0.0] * 2 + [1.0] * (states.shape[1] - 2)) + if hidden_layers: + states = slim.stack(states, slim.fully_connected, hidden_layers, + scope='states') + with slim.arg_scope([slim.fully_connected], + weights_initializer=tf.random_uniform_initializer( + minval=-0.003, maxval=0.003)): + actions = slim.fully_connected(states, + action_spec.shape.num_elements(), + scope='actions', + normalizer_fn=None, + activation_fn=tf.nn.tanh) + action_means = (action_spec.maximum + action_spec.minimum) / 2.0 + action_magnitudes = (action_spec.maximum - action_spec.minimum) / 2.0 + actions = action_means + action_magnitudes * actions + + return actions diff --git a/models/research/efficient-hrl/cond_fn.py b/models/research/efficient-hrl/cond_fn.py new file mode 100644 index 0000000000000000000000000000000000000000..cd1a276e136bf69fa453c3b90c5907eecb1bda1e --- /dev/null +++ b/models/research/efficient-hrl/cond_fn.py @@ -0,0 +1,244 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Defines many boolean functions indicating when to step and reset. +""" + +import tensorflow as tf +import gin.tf + + +@gin.configurable +def env_transition(agent, state, action, transition_type, environment_steps, + num_episodes): + """True if the transition_type is TRANSITION or FINAL_TRANSITION. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + Returns: + cond: Returns an op that evaluates to true if the transition type is + not RESTARTING + """ + del agent, state, action, num_episodes, environment_steps + cond = tf.logical_not(transition_type) + return cond + + +@gin.configurable +def env_restart(agent, state, action, transition_type, environment_steps, + num_episodes): + """True if the transition_type is RESTARTING. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + Returns: + cond: Returns an op that evaluates to true if the transition type equals + RESTARTING. + """ + del agent, state, action, num_episodes, environment_steps + cond = tf.identity(transition_type) + return cond + + +@gin.configurable +def every_n_steps(agent, + state, + action, + transition_type, + environment_steps, + num_episodes, + n=150): + """True once every n steps. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + n: Return true once every n steps. + Returns: + cond: Returns an op that evaluates to true if environment_steps + equals 0 mod n. We increment the step before checking this condition, so + we do not need to add one to environment_steps. + """ + del agent, state, action, transition_type, num_episodes + cond = tf.equal(tf.mod(environment_steps, n), 0) + return cond + + +@gin.configurable +def every_n_episodes(agent, + state, + action, + transition_type, + environment_steps, + num_episodes, + n=2, + steps_per_episode=None): + """True once every n episodes. + + Specifically, evaluates to True on the 0th step of every nth episode. + Unlike environment_steps, num_episodes starts at 0, so we do want to add + one to ensure it does not reset on the first call. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + n: Return true once every n episodes. + steps_per_episode: How many steps per episode. Needed to determine when a + new episode starts. + Returns: + cond: Returns an op that evaluates to true on the last step of the episode + (i.e. if num_episodes equals 0 mod n). + """ + assert steps_per_episode is not None + del agent, action, transition_type + ant_fell = tf.logical_or(state[2] < 0.2, state[2] > 1.0) + cond = tf.logical_and( + tf.logical_or( + ant_fell, + tf.equal(tf.mod(num_episodes + 1, n), 0)), + tf.equal(tf.mod(environment_steps, steps_per_episode), 0)) + return cond + + +@gin.configurable +def failed_reset_after_n_episodes(agent, + state, + action, + transition_type, + environment_steps, + num_episodes, + steps_per_episode=None, + reset_state=None, + max_dist=1.0, + epsilon=1e-10): + """Every n episodes, returns True if the reset agent fails to return. + + Specifically, evaluates to True if the distance between the state and the + reset state is greater than max_dist at the end of the episode. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + steps_per_episode: How many steps per episode. Needed to determine when a + new episode starts. + reset_state: State to which the reset controller should return. + max_dist: Agent is considered to have successfully reset if its distance + from the reset_state is less than max_dist. + epsilon: small offset to ensure non-negative/zero distance. + Returns: + cond: Returns an op that evaluates to true if num_episodes+1 equals 0 + mod n. We add one to the num_episodes so the environment is not reset after + the 0th step. + """ + assert steps_per_episode is not None + assert reset_state is not None + del agent, state, action, transition_type, num_episodes + dist = tf.sqrt( + tf.reduce_sum(tf.squared_difference(state, reset_state)) + epsilon) + cond = tf.logical_and( + tf.greater(dist, tf.constant(max_dist)), + tf.equal(tf.mod(environment_steps, steps_per_episode), 0)) + return cond + + +@gin.configurable +def q_too_small(agent, + state, + action, + transition_type, + environment_steps, + num_episodes, + q_min=0.5): + """True of q is too small. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + q_min: Returns true if the qval is less than q_min + Returns: + cond: Returns an op that evaluates to true if qval is less than q_min. + """ + del transition_type, environment_steps, num_episodes + state_for_reset_agent = tf.stack(state[:-1], tf.constant([0], dtype=tf.float)) + qval = agent.BASE_AGENT_CLASS.critic_net( + tf.expand_dims(state_for_reset_agent, 0), tf.expand_dims(action, 0))[0, :] + cond = tf.greater(tf.constant(q_min), qval) + return cond + + +@gin.configurable +def true_fn(agent, state, action, transition_type, environment_steps, + num_episodes): + """Returns an op that evaluates to true. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + Returns: + cond: op that always evaluates to True. + """ + del agent, state, action, transition_type, environment_steps, num_episodes + cond = tf.constant(True, dtype=tf.bool) + return cond + + +@gin.configurable +def false_fn(agent, state, action, transition_type, environment_steps, + num_episodes): + """Returns an op that evaluates to false. + + Args: + agent: RL agent. + state: A [num_state_dims] tensor representing a state. + action: Action performed. + transition_type: Type of transition after action + environment_steps: Number of steps performed by environment. + num_episodes: Number of episodes. + Returns: + cond: op that always evaluates to False. + """ + del agent, state, action, transition_type, environment_steps, num_episodes + cond = tf.constant(False, dtype=tf.bool) + return cond diff --git a/models/research/efficient-hrl/configs/base_uvf.gin b/models/research/efficient-hrl/configs/base_uvf.gin new file mode 100644 index 0000000000000000000000000000000000000000..2f3f47b67a3fb0a38ee35b7a4deaf54a2700a19a --- /dev/null +++ b/models/research/efficient-hrl/configs/base_uvf.gin @@ -0,0 +1,68 @@ +#-*-Python-*- +import gin.tf.external_configurables + +create_maze_env.top_down_view = %IMAGES +## Create the agent +AGENT_CLASS = @UvfAgent +UvfAgent.tf_context = %CONTEXT +UvfAgent.actor_net = @agent/ddpg_actor_net +UvfAgent.critic_net = @agent/ddpg_critic_net +UvfAgent.dqda_clipping = 0.0 +UvfAgent.td_errors_loss = @tf.losses.huber_loss +UvfAgent.target_q_clipping = %TARGET_Q_CLIPPING + +# Create meta agent +META_CLASS = @MetaAgent +MetaAgent.tf_context = %META_CONTEXT +MetaAgent.sub_context = %CONTEXT +MetaAgent.actor_net = @meta/ddpg_actor_net +MetaAgent.critic_net = @meta/ddpg_critic_net +MetaAgent.dqda_clipping = 0.0 +MetaAgent.td_errors_loss = @tf.losses.huber_loss +MetaAgent.target_q_clipping = %TARGET_Q_CLIPPING + +# Create state preprocess +STATE_PREPROCESS_CLASS = @StatePreprocess +StatePreprocess.ndims = %SUBGOAL_DIM +state_preprocess_net.states_hidden_layers = (100, 100) +state_preprocess_net.num_output_dims = %SUBGOAL_DIM +state_preprocess_net.images = %IMAGES +action_embed_net.num_output_dims = %SUBGOAL_DIM +INVERSE_DYNAMICS_CLASS = @InverseDynamics + +# actor_net +ACTOR_HIDDEN_SIZE_1 = 300 +ACTOR_HIDDEN_SIZE_2 = 300 +agent/ddpg_actor_net.hidden_layers = (%ACTOR_HIDDEN_SIZE_1, %ACTOR_HIDDEN_SIZE_2) +agent/ddpg_actor_net.activation_fn = @tf.nn.relu +agent/ddpg_actor_net.zero_obs = %ZERO_OBS +agent/ddpg_actor_net.images = %IMAGES +meta/ddpg_actor_net.hidden_layers = (%ACTOR_HIDDEN_SIZE_1, %ACTOR_HIDDEN_SIZE_2) +meta/ddpg_actor_net.activation_fn = @tf.nn.relu +meta/ddpg_actor_net.zero_obs = False +meta/ddpg_actor_net.images = %IMAGES +# critic_net +CRITIC_HIDDEN_SIZE_1 = 300 +CRITIC_HIDDEN_SIZE_2 = 300 +agent/ddpg_critic_net.states_hidden_layers = (%CRITIC_HIDDEN_SIZE_1,) +agent/ddpg_critic_net.actions_hidden_layers = None +agent/ddpg_critic_net.joint_hidden_layers = (%CRITIC_HIDDEN_SIZE_2,) +agent/ddpg_critic_net.weight_decay = 0.0 +agent/ddpg_critic_net.activation_fn = @tf.nn.relu +agent/ddpg_critic_net.zero_obs = %ZERO_OBS +agent/ddpg_critic_net.images = %IMAGES +meta/ddpg_critic_net.states_hidden_layers = (%CRITIC_HIDDEN_SIZE_1,) +meta/ddpg_critic_net.actions_hidden_layers = None +meta/ddpg_critic_net.joint_hidden_layers = (%CRITIC_HIDDEN_SIZE_2,) +meta/ddpg_critic_net.weight_decay = 0.0 +meta/ddpg_critic_net.activation_fn = @tf.nn.relu +meta/ddpg_critic_net.zero_obs = False +meta/ddpg_critic_net.images = %IMAGES + +tf.losses.huber_loss.delta = 1.0 +# Sample action +uvf_add_noise_fn.stddev = 1.0 +meta_add_noise_fn.stddev = %META_EXPLORE_NOISE +# Update targets +ddpg_update_targets.tau = 0.001 +td3_update_targets.tau = 0.005 diff --git a/models/research/efficient-hrl/configs/eval_uvf.gin b/models/research/efficient-hrl/configs/eval_uvf.gin new file mode 100644 index 0000000000000000000000000000000000000000..7a58241e06aa4a6140faa8a74b262729f1f5e4c1 --- /dev/null +++ b/models/research/efficient-hrl/configs/eval_uvf.gin @@ -0,0 +1,14 @@ +#-*-Python-*- +# Config eval +evaluate.environment = @create_maze_env() +evaluate.agent_class = %AGENT_CLASS +evaluate.meta_agent_class = %META_CLASS +evaluate.state_preprocess_class = %STATE_PREPROCESS_CLASS +evaluate.num_episodes_eval = 50 +evaluate.num_episodes_videos = 1 +evaluate.gamma = 1.0 +evaluate.eval_interval_secs = 1 +evaluate.generate_videos = False +evaluate.generate_summaries = True +evaluate.eval_modes = %EVAL_MODES +evaluate.max_steps_per_episode = %RESET_EPISODE_PERIOD diff --git a/models/research/efficient-hrl/configs/train_uvf.gin b/models/research/efficient-hrl/configs/train_uvf.gin new file mode 100644 index 0000000000000000000000000000000000000000..8b02d7a6cb468f913ebaefe3fa8f519c3ad8fe4c --- /dev/null +++ b/models/research/efficient-hrl/configs/train_uvf.gin @@ -0,0 +1,52 @@ +#-*-Python-*- +# Create replay_buffer +agent/CircularBuffer.buffer_size = 200000 +meta/CircularBuffer.buffer_size = 200000 +agent/CircularBuffer.scope = "agent" +meta/CircularBuffer.scope = "meta" + +# Config train +train_uvf.environment = @create_maze_env() +train_uvf.agent_class = %AGENT_CLASS +train_uvf.meta_agent_class = %META_CLASS +train_uvf.state_preprocess_class = %STATE_PREPROCESS_CLASS +train_uvf.inverse_dynamics_class = %INVERSE_DYNAMICS_CLASS +train_uvf.replay_buffer = @agent/CircularBuffer() +train_uvf.meta_replay_buffer = @meta/CircularBuffer() +train_uvf.critic_optimizer = @critic/AdamOptimizer() +train_uvf.actor_optimizer = @actor/AdamOptimizer() +train_uvf.meta_critic_optimizer = @meta_critic/AdamOptimizer() +train_uvf.meta_actor_optimizer = @meta_actor/AdamOptimizer() +train_uvf.repr_optimizer = @repr/AdamOptimizer() +train_uvf.num_episodes_train = 25000 +train_uvf.batch_size = 100 +train_uvf.initial_episodes = 5 +train_uvf.gamma = 0.99 +train_uvf.meta_gamma = 0.99 +train_uvf.reward_scale_factor = 1.0 +train_uvf.target_update_period = 2 +train_uvf.num_updates_per_observation = 1 +train_uvf.num_collect_per_update = 1 +train_uvf.num_collect_per_meta_update = 10 +train_uvf.debug_summaries = False +train_uvf.log_every_n_steps = 1000 +train_uvf.save_policy_every_n_steps =100000 + +# Config Optimizers +critic/AdamOptimizer.learning_rate = 0.001 +critic/AdamOptimizer.beta1 = 0.9 +critic/AdamOptimizer.beta2 = 0.999 +actor/AdamOptimizer.learning_rate = 0.0001 +actor/AdamOptimizer.beta1 = 0.9 +actor/AdamOptimizer.beta2 = 0.999 + +meta_critic/AdamOptimizer.learning_rate = 0.001 +meta_critic/AdamOptimizer.beta1 = 0.9 +meta_critic/AdamOptimizer.beta2 = 0.999 +meta_actor/AdamOptimizer.learning_rate = 0.0001 +meta_actor/AdamOptimizer.beta1 = 0.9 +meta_actor/AdamOptimizer.beta2 = 0.999 + +repr/AdamOptimizer.learning_rate = 0.0001 +repr/AdamOptimizer.beta1 = 0.9 +repr/AdamOptimizer.beta2 = 0.999 diff --git a/models/research/efficient-hrl/context/__init__.py b/models/research/efficient-hrl/context/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/research/efficient-hrl/context/__init__.py @@ -0,0 +1 @@ + diff --git a/models/research/efficient-hrl/context/configs/ant_block.gin b/models/research/efficient-hrl/context/configs/ant_block.gin new file mode 100644 index 0000000000000000000000000000000000000000..d5bd4f01e015611127bbc188b3ac9af3df6a288a --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_block.gin @@ -0,0 +1,67 @@ +#-*-Python-*- +create_maze_env.env_name = "AntBlock" +ZERO_OBS = False +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4), (20, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1", "eval2", "eval3"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], + "eval2": [@eval2/ConstantSampler], + "eval3": [@eval3/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [3, 4] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [16, 0] +eval2/ConstantSampler.value = [16, 16] +eval3/ConstantSampler.value = [0, 16] diff --git a/models/research/efficient-hrl/context/configs/ant_block_maze.gin b/models/research/efficient-hrl/context/configs/ant_block_maze.gin new file mode 100644 index 0000000000000000000000000000000000000000..cebf775be129b51588092699fbeb314fb4f985d0 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_block_maze.gin @@ -0,0 +1,67 @@ +#-*-Python-*- +create_maze_env.env_name = "AntBlockMaze" +ZERO_OBS = False +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4), (12, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1", "eval2", "eval3"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], + "eval2": [@eval2/ConstantSampler], + "eval3": [@eval3/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [3, 4] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [8, 0] +eval2/ConstantSampler.value = [8, 16] +eval3/ConstantSampler.value = [0, 16] diff --git a/models/research/efficient-hrl/context/configs/ant_fall_multi.gin b/models/research/efficient-hrl/context/configs/ant_fall_multi.gin new file mode 100644 index 0000000000000000000000000000000000000000..eb89ad0cb164ddb4c0c08ba9649d7f2e5d7a9944 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_fall_multi.gin @@ -0,0 +1,62 @@ +#-*-Python-*- +create_maze_env.env_name = "AntFall" +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4, 0), (12, 28, 5)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [3] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1, 2] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [0, 27, 4.5] diff --git a/models/research/efficient-hrl/context/configs/ant_fall_multi_img.gin b/models/research/efficient-hrl/context/configs/ant_fall_multi_img.gin new file mode 100644 index 0000000000000000000000000000000000000000..b54fb7c91961ab38febe597325c0d816a872be20 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_fall_multi_img.gin @@ -0,0 +1,68 @@ +#-*-Python-*- +create_maze_env.env_name = "AntFall" +IMAGES = True + +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4, 0), (12, 28, 5)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [3] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], +} +meta/Context.context_transition_fn = @task/relative_context_transition_fn +meta/Context.context_multi_transition_fn = @task/relative_context_multi_transition_fn +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1, 2] +task/negative_distance.relative_context = True +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +task/relative_context_transition_fn.k = 3 +task/relative_context_multi_transition_fn.k = 3 +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [0, 27, 0] diff --git a/models/research/efficient-hrl/context/configs/ant_fall_single.gin b/models/research/efficient-hrl/context/configs/ant_fall_single.gin new file mode 100644 index 0000000000000000000000000000000000000000..56bbc070072182cbcda7580c87cf65a593e8a743 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_fall_single.gin @@ -0,0 +1,62 @@ +#-*-Python-*- +create_maze_env.env_name = "AntFall" +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4, 0), (12, 28, 5)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [3] +meta/Context.samplers = { + "train": [@eval1/ConstantSampler], + "explore": [@eval1/ConstantSampler], + "eval1": [@eval1/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1, 2] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [0, 27, 4.5] diff --git a/models/research/efficient-hrl/context/configs/ant_maze.gin b/models/research/efficient-hrl/context/configs/ant_maze.gin new file mode 100644 index 0000000000000000000000000000000000000000..3a0b73e30d7054dc6d573669b7df728cff93226a --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_maze.gin @@ -0,0 +1,66 @@ +#-*-Python-*- +create_maze_env.env_name = "AntMaze" +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4), (20, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1", "eval2", "eval3"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], + "eval2": [@eval2/ConstantSampler], + "eval3": [@eval3/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [16, 0] +eval2/ConstantSampler.value = [16, 16] +eval3/ConstantSampler.value = [0, 16] diff --git a/models/research/efficient-hrl/context/configs/ant_maze_img.gin b/models/research/efficient-hrl/context/configs/ant_maze_img.gin new file mode 100644 index 0000000000000000000000000000000000000000..ceed65a0884587d9cd64cdf162bf1b7e3495469d --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_maze_img.gin @@ -0,0 +1,72 @@ +#-*-Python-*- +create_maze_env.env_name = "AntMaze" +IMAGES = True + +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-4, -4), (20, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1", "eval2", "eval3"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], + "eval2": [@eval2/ConstantSampler], + "eval3": [@eval3/ConstantSampler], +} +meta/Context.context_transition_fn = @task/relative_context_transition_fn +meta/Context.context_multi_transition_fn = @task/relative_context_multi_transition_fn +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1] +task/negative_distance.relative_context = True +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +task/relative_context_transition_fn.k = 2 +task/relative_context_multi_transition_fn.k = 2 +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [16, 0] +eval2/ConstantSampler.value = [16, 16] +eval3/ConstantSampler.value = [0, 16] diff --git a/models/research/efficient-hrl/context/configs/ant_push_multi.gin b/models/research/efficient-hrl/context/configs/ant_push_multi.gin new file mode 100644 index 0000000000000000000000000000000000000000..db9b4ed7bbe81fe38c9fbad10a43dde485a06802 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_push_multi.gin @@ -0,0 +1,62 @@ +#-*-Python-*- +create_maze_env.env_name = "AntPush" +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-16, -4), (16, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval2"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval2": [@eval2/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval2/ConstantSampler.value = [0, 19] diff --git a/models/research/efficient-hrl/context/configs/ant_push_multi_img.gin b/models/research/efficient-hrl/context/configs/ant_push_multi_img.gin new file mode 100644 index 0000000000000000000000000000000000000000..abdc43402fca8a3e83438655bec26c06b8dfccbe --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_push_multi_img.gin @@ -0,0 +1,68 @@ +#-*-Python-*- +create_maze_env.env_name = "AntPush" +IMAGES = True + +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-16, -4), (16, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval2"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval2": [@eval2/ConstantSampler], +} +meta/Context.context_transition_fn = @task/relative_context_transition_fn +meta/Context.context_multi_transition_fn = @task/relative_context_multi_transition_fn +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1] +task/negative_distance.relative_context = True +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +task/relative_context_transition_fn.k = 2 +task/relative_context_multi_transition_fn.k = 2 +MetaAgent.k = %SUBGOAL_DIM + +eval2/ConstantSampler.value = [0, 19] diff --git a/models/research/efficient-hrl/context/configs/ant_push_single.gin b/models/research/efficient-hrl/context/configs/ant_push_single.gin new file mode 100644 index 0000000000000000000000000000000000000000..e85c5dfba4d04668cc5407c89aa42ca2044e12fd --- /dev/null +++ b/models/research/efficient-hrl/context/configs/ant_push_single.gin @@ -0,0 +1,62 @@ +#-*-Python-*- +create_maze_env.env_name = "AntPush" +context_range = (%CONTEXT_RANGE_MIN, %CONTEXT_RANGE_MAX) +meta_context_range = ((-16, -4), (16, 20)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval2"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@eval2/ConstantSampler], + "explore": [@eval2/ConstantSampler], + "eval2": [@eval2/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval2/ConstantSampler.value = [0, 19] diff --git a/models/research/efficient-hrl/context/configs/default.gin b/models/research/efficient-hrl/context/configs/default.gin new file mode 100644 index 0000000000000000000000000000000000000000..65f91e5292db86b62a9275fcc8929d46d779a677 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/default.gin @@ -0,0 +1,12 @@ +#-*-Python-*- +ENV_CONTEXT = None +EVAL_MODES = ["eval"] +TARGET_Q_CLIPPING = None +RESET_EPISODE_PERIOD = None +ZERO_OBS = False +CONTEXT_RANGE_MIN = -10 +CONTEXT_RANGE_MAX = 10 +SUBGOAL_DIM = 2 + +uvf/negative_distance.summarize = False +uvf/negative_distance.relative_context = True diff --git a/models/research/efficient-hrl/context/configs/hiro_orig.gin b/models/research/efficient-hrl/context/configs/hiro_orig.gin new file mode 100644 index 0000000000000000000000000000000000000000..e39ba96be7b7d323ecf1a849dcea89d1468e87c3 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/hiro_orig.gin @@ -0,0 +1,14 @@ +#-*-Python-*- +ENV_CONTEXT = None +EVAL_MODES = ["eval"] +TARGET_Q_CLIPPING = None +RESET_EPISODE_PERIOD = None +ZERO_OBS = True +IMAGES = False +CONTEXT_RANGE_MIN = (-10, -10, -0.5, -1, -1, -1, -1, -0.5, -0.3, -0.5, -0.3, -0.5, -0.3, -0.5, -0.3) +CONTEXT_RANGE_MAX = ( 10, 10, 0.5, 1, 1, 1, 1, 0.5, 0.3, 0.5, 0.3, 0.5, 0.3, 0.5, 0.3) +SUBGOAL_DIM = 15 +META_EXPLORE_NOISE = 1.0 + +uvf/negative_distance.summarize = False +uvf/negative_distance.relative_context = True diff --git a/models/research/efficient-hrl/context/configs/hiro_repr.gin b/models/research/efficient-hrl/context/configs/hiro_repr.gin new file mode 100644 index 0000000000000000000000000000000000000000..a0a8057bd3cc834e5c1be33e73a9b7c6ae370a99 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/hiro_repr.gin @@ -0,0 +1,18 @@ +#-*-Python-*- +ENV_CONTEXT = None +EVAL_MODES = ["eval"] +TARGET_Q_CLIPPING = None +RESET_EPISODE_PERIOD = None +ZERO_OBS = False +IMAGES = False +CONTEXT_RANGE_MIN = -10 +CONTEXT_RANGE_MAX = 10 +SUBGOAL_DIM = 2 +META_EXPLORE_NOISE = 5.0 + +StatePreprocess.trainable = True +StatePreprocess.state_preprocess_net = @state_preprocess_net +StatePreprocess.action_embed_net = @action_embed_net + +uvf/negative_distance.summarize = False +uvf/negative_distance.relative_context = True diff --git a/models/research/efficient-hrl/context/configs/hiro_xy.gin b/models/research/efficient-hrl/context/configs/hiro_xy.gin new file mode 100644 index 0000000000000000000000000000000000000000..f35026c9e24246a87e69e16031981a13e22c283d --- /dev/null +++ b/models/research/efficient-hrl/context/configs/hiro_xy.gin @@ -0,0 +1,14 @@ +#-*-Python-*- +ENV_CONTEXT = None +EVAL_MODES = ["eval"] +TARGET_Q_CLIPPING = None +RESET_EPISODE_PERIOD = None +ZERO_OBS = False +IMAGES = False +CONTEXT_RANGE_MIN = -10 +CONTEXT_RANGE_MAX = 10 +SUBGOAL_DIM = 2 +META_EXPLORE_NOISE = 1.0 + +uvf/negative_distance.summarize = False +uvf/negative_distance.relative_context = True diff --git a/models/research/efficient-hrl/context/configs/point_maze.gin b/models/research/efficient-hrl/context/configs/point_maze.gin new file mode 100644 index 0000000000000000000000000000000000000000..0ea67d2d5fffedfdbc7b9df443acc3adaf98ec99 --- /dev/null +++ b/models/research/efficient-hrl/context/configs/point_maze.gin @@ -0,0 +1,73 @@ +#-*-Python-*- +# NOTE: For best training, low-level exploration (uvf_add_noise_fn.stddev) +# should be reduced to around 0.1. +create_maze_env.env_name = "PointMaze" +context_range_min = -10 +context_range_max = 10 +context_range = (%context_range_min, %context_range_max) +meta_context_range = ((-2, -2), (10, 10)) + +RESET_EPISODE_PERIOD = 500 +RESET_ENV_PERIOD = 1 +# End episode every N steps +UvfAgent.reset_episode_cond_fn = @every_n_steps +every_n_steps.n = %RESET_EPISODE_PERIOD +train_uvf.max_steps_per_episode = %RESET_EPISODE_PERIOD +# Do a manual reset every N episodes +UvfAgent.reset_env_cond_fn = @every_n_episodes +every_n_episodes.n = %RESET_ENV_PERIOD +every_n_episodes.steps_per_episode = %RESET_EPISODE_PERIOD + +## Config defaults +EVAL_MODES = ["eval1", "eval2", "eval3"] + +## Config agent +CONTEXT = @agent/Context +META_CONTEXT = @meta/Context + +## Config agent context +agent/Context.context_ranges = [%context_range] +agent/Context.context_shapes = [%SUBGOAL_DIM] +agent/Context.meta_action_every_n = 10 +agent/Context.samplers = { + "train": [@train/DirectionSampler], + "explore": [@train/DirectionSampler], + "eval1": [@uvf_eval1/ConstantSampler], + "eval2": [@uvf_eval2/ConstantSampler], + "eval3": [@uvf_eval3/ConstantSampler], +} + +agent/Context.context_transition_fn = @relative_context_transition_fn +agent/Context.context_multi_transition_fn = @relative_context_multi_transition_fn + +agent/Context.reward_fn = @uvf/negative_distance + +## Config meta context +meta/Context.context_ranges = [%meta_context_range] +meta/Context.context_shapes = [2] +meta/Context.samplers = { + "train": [@train/RandomSampler], + "explore": [@train/RandomSampler], + "eval1": [@eval1/ConstantSampler], + "eval2": [@eval2/ConstantSampler], + "eval3": [@eval3/ConstantSampler], +} +meta/Context.reward_fn = @task/negative_distance + +## Config rewards +task/negative_distance.state_indices = [0, 1] +task/negative_distance.relative_context = False +task/negative_distance.diff = False +task/negative_distance.offset = 0.0 + +## Config samplers +train/RandomSampler.context_range = %meta_context_range +train/DirectionSampler.context_range = %context_range +train/DirectionSampler.k = %SUBGOAL_DIM +relative_context_transition_fn.k = %SUBGOAL_DIM +relative_context_multi_transition_fn.k = %SUBGOAL_DIM +MetaAgent.k = %SUBGOAL_DIM + +eval1/ConstantSampler.value = [8, 0] +eval2/ConstantSampler.value = [8, 8] +eval3/ConstantSampler.value = [0, 8] diff --git a/models/research/efficient-hrl/context/context.py b/models/research/efficient-hrl/context/context.py new file mode 100644 index 0000000000000000000000000000000000000000..76be00b4966539b869e714225fbba124b9602c3a --- /dev/null +++ b/models/research/efficient-hrl/context/context.py @@ -0,0 +1,467 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Context for Universal Value Function agents. + +A context specifies a list of contextual variables, each with + own sampling and reward computation methods. + +Examples of contextual variables include + goal states, reward combination vectors, etc. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +import tensorflow as tf +from tf_agents import specs +import gin.tf +from utils import utils as uvf_utils + + +@gin.configurable +class Context(object): + """Base context.""" + VAR_NAME = 'action' + + def __init__(self, + tf_env, + context_ranges=None, + context_shapes=None, + state_indices=None, + variable_indices=None, + gamma_index=None, + settable_context=False, + timers=None, + samplers=None, + reward_weights=None, + reward_fn=None, + random_sampler_mode='random', + normalizers=None, + context_transition_fn=None, + context_multi_transition_fn=None, + meta_action_every_n=None): + self._tf_env = tf_env + self.variable_indices = variable_indices + self.gamma_index = gamma_index + self._settable_context = settable_context + self.timers = timers + self._context_transition_fn = context_transition_fn + self._context_multi_transition_fn = context_multi_transition_fn + self._random_sampler_mode = random_sampler_mode + + # assign specs + self._obs_spec = self._tf_env.observation_spec() + self._context_shapes = tuple([ + shape if shape is not None else self._obs_spec.shape + for shape in context_shapes + ]) + self.context_specs = tuple([ + specs.TensorSpec(dtype=self._obs_spec.dtype, shape=shape) + for shape in self._context_shapes + ]) + if context_ranges is not None: + self.context_ranges = context_ranges + else: + self.context_ranges = [None] * len(self._context_shapes) + + self.context_as_action_specs = tuple([ + specs.BoundedTensorSpec( + shape=shape, + dtype=(tf.float32 if self._obs_spec.dtype in + [tf.float32, tf.float64] else self._obs_spec.dtype), + minimum=context_range[0], + maximum=context_range[-1]) + for shape, context_range in zip(self._context_shapes, self.context_ranges) + ]) + + if state_indices is not None: + self.state_indices = state_indices + else: + self.state_indices = [None] * len(self._context_shapes) + if self.variable_indices is not None and self.n != len( + self.variable_indices): + raise ValueError( + 'variable_indices (%s) must have the same length as contexts (%s).' % + (self.variable_indices, self.context_specs)) + assert self.n == len(self.context_ranges) + assert self.n == len(self.state_indices) + + # assign reward/sampler fns + self._sampler_fns = dict() + self._samplers = dict() + self._reward_fns = dict() + + # assign reward fns + self._add_custom_reward_fns() + reward_weights = reward_weights or None + self._reward_fn = self._make_reward_fn(reward_fn, reward_weights) + + # assign samplers + self._add_custom_sampler_fns() + for mode, sampler_fns in samplers.items(): + self._make_sampler_fn(sampler_fns, mode) + + # create normalizers + if normalizers is None: + self._normalizers = [None] * len(self.context_specs) + else: + self._normalizers = [ + normalizer(tf.zeros(shape=spec.shape, dtype=spec.dtype)) + if normalizer is not None else None + for normalizer, spec in zip(normalizers, self.context_specs) + ] + assert self.n == len(self._normalizers) + + self.meta_action_every_n = meta_action_every_n + + # create vars + self.context_vars = {} + self.timer_vars = {} + self.create_vars(self.VAR_NAME) + self.t = tf.Variable( + tf.zeros(shape=(), dtype=tf.int32), name='num_timer_steps') + + def _add_custom_reward_fns(self): + pass + + def _add_custom_sampler_fns(self): + pass + + def sample_random_contexts(self, batch_size): + """Sample random batch contexts.""" + assert self._random_sampler_mode is not None + return self.sample_contexts(self._random_sampler_mode, batch_size)[0] + + def sample_contexts(self, mode, batch_size, state=None, next_state=None, + **kwargs): + """Sample a batch of contexts. + + Args: + mode: A string representing the mode [`train`, `explore`, `eval`]. + batch_size: Batch size. + Returns: + Two lists of [batch_size, num_context_dims] contexts. + """ + contexts, next_contexts = self._sampler_fns[mode]( + batch_size, state=state, next_state=next_state, + **kwargs) + self._validate_contexts(contexts) + self._validate_contexts(next_contexts) + return contexts, next_contexts + + def compute_rewards(self, mode, states, actions, rewards, next_states, + contexts): + """Compute context-based rewards. + + Args: + mode: A string representing the mode ['uvf', 'task']. + states: A [batch_size, num_state_dims] tensor. + actions: A [batch_size, num_action_dims] tensor. + rewards: A [batch_size] tensor representing unmodified rewards. + next_states: A [batch_size, num_state_dims] tensor. + contexts: A list of [batch_size, num_context_dims] tensors. + Returns: + A [batch_size] tensor representing rewards. + """ + return self._reward_fn(states, actions, rewards, next_states, + contexts) + + def _make_reward_fn(self, reward_fns_list, reward_weights): + """Returns a fn that computes rewards. + + Args: + reward_fns_list: A fn or a list of reward fns. + mode: A string representing the operating mode. + reward_weights: A list of reward weights. + """ + if not isinstance(reward_fns_list, (list, tuple)): + reward_fns_list = [reward_fns_list] + if reward_weights is None: + reward_weights = [1.0] * len(reward_fns_list) + assert len(reward_fns_list) == len(reward_weights) + + reward_fns_list = [ + self._custom_reward_fns[fn] if isinstance(fn, (str,)) else fn + for fn in reward_fns_list + ] + + def reward_fn(*args, **kwargs): + """Returns rewards, discounts.""" + reward_tuples = [ + reward_fn(*args, **kwargs) for reward_fn in reward_fns_list + ] + rewards_list = [reward_tuple[0] for reward_tuple in reward_tuples] + discounts_list = [reward_tuple[1] for reward_tuple in reward_tuples] + ndims = max([r.shape.ndims for r in rewards_list]) + if ndims > 1: # expand reward shapes to allow broadcasting + for i in range(len(rewards_list)): + for _ in range(rewards_list[i].shape.ndims - ndims): + rewards_list[i] = tf.expand_dims(rewards_list[i], axis=-1) + for _ in range(discounts_list[i].shape.ndims - ndims): + discounts_list[i] = tf.expand_dims(discounts_list[i], axis=-1) + rewards = tf.add_n( + [r * tf.to_float(w) for r, w in zip(rewards_list, reward_weights)]) + discounts = discounts_list[0] + for d in discounts_list[1:]: + discounts *= d + + return rewards, discounts + + return reward_fn + + def _make_sampler_fn(self, sampler_cls_list, mode): + """Returns a fn that samples a list of context vars. + + Args: + sampler_cls_list: A list of sampler classes. + mode: A string representing the operating mode. + """ + if not isinstance(sampler_cls_list, (list, tuple)): + sampler_cls_list = [sampler_cls_list] + + self._samplers[mode] = [] + sampler_fns = [] + for spec, sampler in zip(self.context_specs, sampler_cls_list): + if isinstance(sampler, (str,)): + sampler_fn = self._custom_sampler_fns[sampler] + else: + sampler_fn = sampler(context_spec=spec) + self._samplers[mode].append(sampler_fn) + sampler_fns.append(sampler_fn) + + def batch_sampler_fn(batch_size, state=None, next_state=None, **kwargs): + """Sampler fn.""" + contexts_tuples = [ + sampler(batch_size, state=state, next_state=next_state, **kwargs) + for sampler in sampler_fns] + contexts = [c[0] for c in contexts_tuples] + next_contexts = [c[1] for c in contexts_tuples] + contexts = [ + normalizer.update_apply(c) if normalizer is not None else c + for normalizer, c in zip(self._normalizers, contexts) + ] + next_contexts = [ + normalizer.apply(c) if normalizer is not None else c + for normalizer, c in zip(self._normalizers, next_contexts) + ] + return contexts, next_contexts + + self._sampler_fns[mode] = batch_sampler_fn + + def set_env_context_op(self, context, disable_unnormalizer=False): + """Returns a TensorFlow op that sets the environment context. + + Args: + context: A list of context Tensor variables. + disable_unnormalizer: Disable unnormalization. + Returns: + A TensorFlow op that sets the environment context. + """ + ret_val = np.array(1.0, dtype=np.float32) + if not self._settable_context: + return tf.identity(ret_val) + + if not disable_unnormalizer: + context = [ + normalizer.unapply(tf.expand_dims(c, 0))[0] + if normalizer is not None else c + for normalizer, c in zip(self._normalizers, context) + ] + + def set_context_func(*env_context_values): + tf.logging.info('[set_env_context_op] Setting gym environment context.') + # pylint: disable=protected-access + self.gym_env.set_context(*env_context_values) + return ret_val + # pylint: enable=protected-access + + with tf.name_scope('set_env_context'): + set_op = tf.py_func(set_context_func, context, tf.float32, + name='set_env_context_py_func') + set_op.set_shape([]) + return set_op + + def set_replay(self, replay): + """Set replay buffer for samplers. + + Args: + replay: A replay buffer. + """ + for _, samplers in self._samplers.items(): + for sampler in samplers: + sampler.set_replay(replay) + + def get_clip_fns(self): + """Returns a list of clip fns for contexts. + + Returns: + A list of fns that clip context tensors. + """ + clip_fns = [] + for context_range in self.context_ranges: + def clip_fn(var_, range_=context_range): + """Clip a tensor.""" + if range_ is None: + clipped_var = tf.identity(var_) + elif isinstance(range_[0], (int, long, float, list, np.ndarray)): + clipped_var = tf.clip_by_value( + var_, + range_[0], + range_[1],) + else: raise NotImplementedError(range_) + return clipped_var + clip_fns.append(clip_fn) + return clip_fns + + def _validate_contexts(self, contexts): + """Validate if contexts have right specs. + + Args: + contexts: A list of [batch_size, num_context_dim] tensors. + Raises: + ValueError: If shape or dtype mismatches that of spec. + """ + for i, (context, spec) in enumerate(zip(contexts, self.context_specs)): + if context[0].shape != spec.shape: + raise ValueError('contexts[%d] has invalid shape %s wrt spec shape %s' % + (i, context[0].shape, spec.shape)) + if context.dtype != spec.dtype: + raise ValueError('contexts[%d] has invalid dtype %s wrt spec dtype %s' % + (i, context.dtype, spec.dtype)) + + def context_multi_transition_fn(self, contexts, **kwargs): + """Returns multiple future contexts starting from a batch.""" + assert self._context_multi_transition_fn + return self._context_multi_transition_fn(contexts, None, None, **kwargs) + + def step(self, mode, agent=None, action_fn=None, **kwargs): + """Returns [next_contexts..., next_timer] list of ops. + + Args: + mode: a string representing the mode=[train, explore, eval]. + **kwargs: kwargs for context_transition_fn. + Returns: + a list of ops that set the context. + """ + if agent is None: + ops = [] + if self._context_transition_fn is not None: + def sampler_fn(): + samples = self.sample_contexts(mode, 1)[0] + return [s[0] for s in samples] + values = self._context_transition_fn(self.vars, self.t, sampler_fn, **kwargs) + ops += [tf.assign(var, value) for var, value in zip(self.vars, values)] + ops.append(tf.assign_add(self.t, 1)) # increment timer + return ops + else: + ops = agent.tf_context.step(mode, **kwargs) + state = kwargs['state'] + next_state = kwargs['next_state'] + state_repr = kwargs['state_repr'] + next_state_repr = kwargs['next_state_repr'] + with tf.control_dependencies(ops): # Step high level context before computing low level one. + # Get the context transition function output. + values = self._context_transition_fn(self.vars, self.t, None, + state=state_repr, + next_state=next_state_repr) + # Select a new goal every C steps, otherwise use context transition. + low_level_context = [ + tf.cond(tf.equal(self.t % self.meta_action_every_n, 0), + lambda: tf.cast(action_fn(next_state, context=None), tf.float32), + lambda: values)] + ops = [tf.assign(var, value) + for var, value in zip(self.vars, low_level_context)] + with tf.control_dependencies(ops): + return [tf.assign_add(self.t, 1)] # increment timer + return ops + + def reset(self, mode, agent=None, action_fn=None, state=None): + """Returns ops that reset the context. + + Args: + mode: a string representing the mode=[train, explore, eval]. + Returns: + a list of ops that reset the context. + """ + if agent is None: + values = self.sample_contexts(mode=mode, batch_size=1)[0] + if values is None: + return [] + values = [value[0] for value in values] + values[0] = uvf_utils.tf_print( + values[0], + values, + message='context:reset, mode=%s' % mode, + first_n=10, + name='context:reset:%s' % mode) + all_ops = [] + for _, context_vars in sorted(self.context_vars.items()): + ops = [tf.assign(var, value) for var, value in zip(context_vars, values)] + all_ops += ops + all_ops.append(self.set_env_context_op(values)) + all_ops.append(tf.assign(self.t, 0)) # reset timer + return all_ops + else: + ops = agent.tf_context.reset(mode) + # NOTE: The code is currently written in such a way that the higher level + # policy does not provide a low-level context until the second + # observation. Insead, we just zero-out low-level contexts. + for key, context_vars in sorted(self.context_vars.items()): + ops += [tf.assign(var, tf.zeros_like(var)) for var, meta_var in + zip(context_vars, agent.tf_context.context_vars[key])] + + ops.append(tf.assign(self.t, 0)) # reset timer + return ops + + def create_vars(self, name, agent=None): + """Create tf variables for contexts. + + Args: + name: Name of the variables. + Returns: + A list of [num_context_dims] tensors. + """ + if agent is not None: + meta_vars = agent.create_vars(name) + else: + meta_vars = {} + assert name not in self.context_vars, ('Conflict! %s is already ' + 'initialized.') % name + self.context_vars[name] = tuple([ + tf.Variable( + tf.zeros(shape=spec.shape, dtype=spec.dtype), + name='%s_context_%d' % (name, i)) + for i, spec in enumerate(self.context_specs) + ]) + return self.context_vars[name], meta_vars + + @property + def n(self): + return len(self.context_specs) + + @property + def vars(self): + return self.context_vars[self.VAR_NAME] + + # pylint: disable=protected-access + @property + def gym_env(self): + return self._tf_env.pyenv._gym_env + + @property + def tf_env(self): + return self._tf_env + # pylint: enable=protected-access diff --git a/models/research/efficient-hrl/context/context_transition_functions.py b/models/research/efficient-hrl/context/context_transition_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..70326debde4185ec9ee5300ebf06b94e8eb1f7ad --- /dev/null +++ b/models/research/efficient-hrl/context/context_transition_functions.py @@ -0,0 +1,123 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Context functions. + +Given the current contexts, timer and context sampler, returns new contexts + after an environment step. This can be used to define a high-level policy + that controls contexts as its actions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +import gin.tf +import utils as uvf_utils + + +@gin.configurable +def periodic_context_fn(contexts, timer, sampler_fn, period=1): + """Periodically samples contexts. + + Args: + contexts: a list of [num_context_dims] tensor variables representing + current contexts. + timer: a scalar integer tensor variable holding the current time step. + sampler_fn: a sampler function that samples a list of [num_context_dims] + tensors. + period: (integer) period of update. + Returns: + a list of [num_context_dims] tensors. + """ + contexts = list(contexts[:]) # create copy + return tf.cond(tf.mod(timer, period) == 0, sampler_fn, lambda: contexts) + + +@gin.configurable +def timer_context_fn(contexts, + timer, + sampler_fn, + period=1, + timer_index=-1, + debug=False): + """Samples contexts based on timer in contexts. + + Args: + contexts: a list of [num_context_dims] tensor variables representing + current contexts. + timer: a scalar integer tensor variable holding the current time step. + sampler_fn: a sampler function that samples a list of [num_context_dims] + tensors. + period: (integer) period of update; actual period = `period` + 1. + timer_index: (integer) Index of context list that present timer. + debug: (boolean) Print debug messages. + Returns: + a list of [num_context_dims] tensors. + """ + contexts = list(contexts[:]) # create copy + cond = tf.equal(contexts[timer_index][0], 0) + def reset(): + """Sample context and reset the timer.""" + new_contexts = sampler_fn() + new_contexts[timer_index] = tf.zeros_like( + contexts[timer_index]) + period + return new_contexts + def update(): + """Decrement the timer.""" + contexts[timer_index] -= 1 + return contexts + values = tf.cond(cond, reset, update) + if debug: + values[0] = uvf_utils.tf_print( + values[0], + values + [timer], + 'timer_context_fn', + first_n=200, + name='timer_context_fn:contexts') + return values + + +@gin.configurable +def relative_context_transition_fn( + contexts, timer, sampler_fn, + k=2, state=None, next_state=None, + **kwargs): + """Contexts updated to be relative to next state. + """ + contexts = list(contexts[:]) # create copy + assert len(contexts) == 1 + new_contexts = [ + tf.concat( + [contexts[0][:k] + state[:k] - next_state[:k], + contexts[0][k:]], -1)] + return new_contexts + + +@gin.configurable +def relative_context_multi_transition_fn( + contexts, timer, sampler_fn, + k=2, states=None, + **kwargs): + """Given contexts at first state and sequence of states, derives sequence of all contexts. + """ + contexts = list(contexts[:]) # create copy + assert len(contexts) == 1 + contexts = [ + tf.concat( + [tf.expand_dims(contexts[0][:, :k] + states[:, 0, :k], 1) - states[:, :, :k], + contexts[0][:, None, k:] * tf.ones_like(states[:, :, :1])], -1)] + return contexts diff --git a/models/research/efficient-hrl/context/gin_imports.py b/models/research/efficient-hrl/context/gin_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..94512cef8479ac2e9c36a941f4b197b6939d0814 --- /dev/null +++ b/models/research/efficient-hrl/context/gin_imports.py @@ -0,0 +1,25 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Import gin configurable modules. +""" + +# pylint: disable=unused-import +from context import context +from context import context_transition_functions +from context import gin_utils +from context import rewards_functions +from context import samplers +# pylint: disable=unused-import diff --git a/models/research/efficient-hrl/context/gin_utils.py b/models/research/efficient-hrl/context/gin_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ab7c1b2d1dd1d7317071ad6e2c08d057d42ec2e1 --- /dev/null +++ b/models/research/efficient-hrl/context/gin_utils.py @@ -0,0 +1,45 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Gin configurable utility functions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import gin.tf + + +@gin.configurable +def gin_sparse_array(size, values, indices, fill_value=0): + arr = np.zeros(size) + arr.fill(fill_value) + arr[indices] = values + return arr + + +@gin.configurable +def gin_sum(values): + result = values[0] + for value in values[1:]: + result += value + return result + + +@gin.configurable +def gin_range(n): + return range(n) diff --git a/models/research/efficient-hrl/context/rewards_functions.py b/models/research/efficient-hrl/context/rewards_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..ab560a7f4290dfa1269e001339e0e2cdb116e761 --- /dev/null +++ b/models/research/efficient-hrl/context/rewards_functions.py @@ -0,0 +1,741 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Reward shaping functions used by Contexts. + + Each reward function should take the following inputs and return new rewards, + and discounts. + + new_rewards, discounts = reward_fn(states, actions, rewards, + next_states, contexts) +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +import gin.tf + + +def summarize_stats(stats): + """Summarize a dictionary of variables. + + Args: + stats: a dictionary of {name: tensor} to compute stats over. + """ + for name, stat in stats.items(): + mean = tf.reduce_mean(stat) + tf.summary.scalar('mean_%s' % name, mean) + tf.summary.scalar('max_%s' % name, tf.reduce_max(stat)) + tf.summary.scalar('min_%s' % name, tf.reduce_min(stat)) + std = tf.sqrt(tf.reduce_mean(tf.square(stat)) - tf.square(mean) + 1e-10) + tf.summary.scalar('std_%s' % name, std) + tf.summary.histogram(name, stat) + + +def index_states(states, indices): + """Return indexed states. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + indices: (a list of Numpy integer array) Indices of states dimensions + to be mapped. + Returns: + A [batch_size, num_indices] Tensor representing the batch of indexed states. + """ + if indices is None: + return states + indices = tf.constant(indices, dtype=tf.int32) + return tf.gather(states, indices=indices, axis=1) + + +def record_tensor(tensor, indices, stats, name='states'): + """Record specified tensor dimensions into stats. + + Args: + tensor: A [batch_size, num_dims] Tensor. + indices: (a list of integers) Indices of dimensions to record. + stats: A dictionary holding stats. + name: (string) Name of tensor. + """ + if indices is None: + indices = range(tensor.shape.as_list()[1]) + for index in indices: + stats['%s_%02d' % (name, index)] = tensor[:, index] + + +@gin.configurable +def potential_rewards(states, + actions, + rewards, + next_states, + contexts, + gamma=1.0, + reward_fn=None): + """Return the potential-based rewards. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + gamma: Reward discount. + reward_fn: A reward function. + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del actions # unused args + gamma = tf.to_float(gamma) + rewards_tp1, discounts = reward_fn(None, None, rewards, next_states, contexts) + rewards, _ = reward_fn(None, None, rewards, states, contexts) + return -rewards + gamma * rewards_tp1, discounts + + +@gin.configurable +def timed_rewards(states, + actions, + rewards, + next_states, + contexts, + reward_fn=None, + dense=False, + timer_index=-1): + """Return the timed rewards. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + reward_fn: A reward function. + dense: (boolean) Provide dense rewards or sparse rewards at time = 0. + timer_index: (integer) The context list index that specifies timer. + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + assert contexts[timer_index].get_shape().as_list()[1] == 1 + timers = contexts[timer_index][:, 0] + rewards, discounts = reward_fn(states, actions, rewards, next_states, + contexts) + terminates = tf.to_float(timers <= 0) # if terminate set 1, else set 0 + for _ in range(rewards.shape.ndims - 1): + terminates = tf.expand_dims(terminates, axis=-1) + if not dense: + rewards *= terminates # if terminate, return rewards, else return 0 + discounts *= (tf.to_float(1.0) - terminates) + return rewards, discounts + + +@gin.configurable +def reset_rewards(states, + actions, + rewards, + next_states, + contexts, + reset_index=0, + reset_state=None, + reset_reward_function=None, + include_forward_rewards=True, + include_reset_rewards=True): + """Returns the rewards for a forward/reset agent. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + reset_index: (integer) The context list index that specifies reset. + reset_state: Reset state. + reset_reward_function: Reward function for reset step. + include_forward_rewards: Include the rewards from the forward pass. + include_reset_rewards: Include the rewards from the reset pass. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + reset_state = tf.constant( + reset_state, dtype=next_states.dtype, shape=next_states.shape) + reset_states = tf.expand_dims(reset_state, 0) + + def true_fn(): + if include_reset_rewards: + return reset_reward_function(states, actions, rewards, next_states, + [reset_states] + contexts[1:]) + else: + return tf.zeros_like(rewards), tf.ones_like(rewards) + + def false_fn(): + if include_forward_rewards: + return plain_rewards(states, actions, rewards, next_states, contexts) + else: + return tf.zeros_like(rewards), tf.ones_like(rewards) + + rewards, discounts = tf.cond( + tf.cast(contexts[reset_index][0, 0], dtype=tf.bool), true_fn, false_fn) + return rewards, discounts + + +@gin.configurable +def tanh_similarity(states, + actions, + rewards, + next_states, + contexts, + mse_scale=1.0, + state_scales=1.0, + goal_scales=1.0, + summarize=False): + """Returns the similarity between next_states and contexts using tanh and mse. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + mse_scale: A float, to scale mse before tanh. + state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, + must be broadcastable to number of state dimensions. + goal_scales: multiplicative scale for contexts. A scalar or 1D tensor, + must be broadcastable to number of goal dimensions. + summarize: (boolean) enable summary ops. + + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del states, actions, rewards # Unused + mse = tf.reduce_mean(tf.squared_difference(next_states * state_scales, + contexts[0] * goal_scales), -1) + tanh = tf.tanh(mse_scale * mse) + if summarize: + with tf.name_scope('RewardFn/'): + tf.summary.scalar('mean_mse', tf.reduce_mean(mse)) + tf.summary.histogram('mse', mse) + tf.summary.scalar('mean_tanh', tf.reduce_mean(tanh)) + tf.summary.histogram('tanh', tanh) + rewards = tf.to_float(1 - tanh) + return rewards, tf.ones_like(rewards) + + +@gin.configurable +def negative_mse(states, + actions, + rewards, + next_states, + contexts, + state_scales=1.0, + goal_scales=1.0, + summarize=False): + """Returns the negative mean square error between next_states and contexts. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, + must be broadcastable to number of state dimensions. + goal_scales: multiplicative scale for contexts. A scalar or 1D tensor, + must be broadcastable to number of goal dimensions. + summarize: (boolean) enable summary ops. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del states, actions, rewards # Unused + mse = tf.reduce_mean(tf.squared_difference(next_states * state_scales, + contexts[0] * goal_scales), -1) + if summarize: + with tf.name_scope('RewardFn/'): + tf.summary.scalar('mean_mse', tf.reduce_mean(mse)) + tf.summary.histogram('mse', mse) + rewards = tf.to_float(-mse) + return rewards, tf.ones_like(rewards) + + +@gin.configurable +def negative_distance(states, + actions, + rewards, + next_states, + contexts, + state_scales=1.0, + goal_scales=1.0, + reward_scales=1.0, + weight_index=None, + weight_vector=None, + summarize=False, + termination_epsilon=1e-4, + state_indices=None, + goal_indices=None, + vectorize=False, + relative_context=False, + diff=False, + norm='L2', + epsilon=1e-10, + bonus_epsilon=0., #5., + offset=0.0): + """Returns the negative euclidean distance between next_states and contexts. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, + must be broadcastable to number of state dimensions. + goal_scales: multiplicative scale for goals. A scalar or 1D tensor, + must be broadcastable to number of goal dimensions. + reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, + must be broadcastable to number of reward dimensions. + weight_index: (integer) The context list index that specifies weight. + weight_vector: (a number or a list or Numpy array) The weighting vector, + broadcastable to `next_states`. + summarize: (boolean) enable summary ops. + termination_epsilon: terminate if dist is less than this quantity. + state_indices: (a list of integers) list of state indices to select. + goal_indices: (a list of integers) list of goal indices to select. + vectorize: Return a vectorized form. + norm: L1 or L2. + epsilon: small offset to ensure non-negative/zero distance. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del actions, rewards # Unused + stats = {} + record_tensor(next_states, state_indices, stats, 'next_states') + states = index_states(states, state_indices) + next_states = index_states(next_states, state_indices) + goals = index_states(contexts[0], goal_indices) + if relative_context: + goals = states + goals + sq_dists = tf.squared_difference(next_states * state_scales, + goals * goal_scales) + old_sq_dists = tf.squared_difference(states * state_scales, + goals * goal_scales) + record_tensor(sq_dists, None, stats, 'sq_dists') + if weight_vector is not None: + sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) + old_sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) + if weight_index is not None: + #sq_dists *= contexts[weight_index] + weights = tf.abs(index_states(contexts[0], weight_index)) + #weights /= tf.reduce_sum(weights, -1, keepdims=True) + sq_dists *= weights + old_sq_dists *= weights + if norm == 'L1': + dist = tf.sqrt(sq_dists + epsilon) + old_dist = tf.sqrt(old_sq_dists + epsilon) + if not vectorize: + dist = tf.reduce_sum(dist, -1) + old_dist = tf.reduce_sum(old_dist, -1) + elif norm == 'L2': + if vectorize: + dist = sq_dists + old_dist = old_sq_dists + else: + dist = tf.reduce_sum(sq_dists, -1) + old_dist = tf.reduce_sum(old_sq_dists, -1) + dist = tf.sqrt(dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) + old_dist = tf.sqrt(old_dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) + else: + raise NotImplementedError(norm) + discounts = dist > termination_epsilon + if summarize: + with tf.name_scope('RewardFn/'): + tf.summary.scalar('mean_dist', tf.reduce_mean(dist)) + tf.summary.histogram('dist', dist) + summarize_stats(stats) + bonus = tf.to_float(dist < bonus_epsilon) + dist *= reward_scales + old_dist *= reward_scales + if diff: + return bonus + offset + tf.to_float(old_dist - dist), tf.to_float(discounts) + return bonus + offset + tf.to_float(-dist), tf.to_float(discounts) + + +@gin.configurable +def cosine_similarity(states, + actions, + rewards, + next_states, + contexts, + state_scales=1.0, + goal_scales=1.0, + reward_scales=1.0, + normalize_states=True, + normalize_goals=True, + weight_index=None, + weight_vector=None, + summarize=False, + state_indices=None, + goal_indices=None, + offset=0.0): + """Returns the cosine similarity between next_states - states and contexts. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, + must be broadcastable to number of state dimensions. + goal_scales: multiplicative scale for goals. A scalar or 1D tensor, + must be broadcastable to number of goal dimensions. + reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, + must be broadcastable to number of reward dimensions. + weight_index: (integer) The context list index that specifies weight. + weight_vector: (a number or a list or Numpy array) The weighting vector, + broadcastable to `next_states`. + summarize: (boolean) enable summary ops. + termination_epsilon: terminate if dist is less than this quantity. + state_indices: (a list of integers) list of state indices to select. + goal_indices: (a list of integers) list of goal indices to select. + vectorize: Return a vectorized form. + norm: L1 or L2. + epsilon: small offset to ensure non-negative/zero distance. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del actions, rewards # Unused + stats = {} + record_tensor(next_states, state_indices, stats, 'next_states') + states = index_states(states, state_indices) + next_states = index_states(next_states, state_indices) + goals = index_states(contexts[0], goal_indices) + + if weight_vector is not None: + goals *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) + if weight_index is not None: + weights = tf.abs(index_states(contexts[0], weight_index)) + goals *= weights + + direction_vec = next_states - states + if normalize_states: + direction_vec = tf.nn.l2_normalize(direction_vec, -1) + goal_vec = goals + if normalize_goals: + goal_vec = tf.nn.l2_normalize(goal_vec, -1) + + similarity = tf.reduce_sum(goal_vec * direction_vec, -1) + discounts = tf.ones_like(similarity) + return offset + tf.to_float(similarity), tf.to_float(discounts) + + +@gin.configurable +def diff_distance(states, + actions, + rewards, + next_states, + contexts, + state_scales=1.0, + goal_scales=1.0, + reward_scales=1.0, + weight_index=None, + weight_vector=None, + summarize=False, + termination_epsilon=1e-4, + state_indices=None, + goal_indices=None, + norm='L2', + epsilon=1e-10): + """Returns the difference in euclidean distance between states/next_states and contexts. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, + must be broadcastable to number of state dimensions. + goal_scales: multiplicative scale for goals. A scalar or 1D tensor, + must be broadcastable to number of goal dimensions. + reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, + must be broadcastable to number of reward dimensions. + weight_index: (integer) The context list index that specifies weight. + weight_vector: (a number or a list or Numpy array) The weighting vector, + broadcastable to `next_states`. + summarize: (boolean) enable summary ops. + termination_epsilon: terminate if dist is less than this quantity. + state_indices: (a list of integers) list of state indices to select. + goal_indices: (a list of integers) list of goal indices to select. + vectorize: Return a vectorized form. + norm: L1 or L2. + epsilon: small offset to ensure non-negative/zero distance. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del actions, rewards # Unused + stats = {} + record_tensor(next_states, state_indices, stats, 'next_states') + next_states = index_states(next_states, state_indices) + states = index_states(states, state_indices) + goals = index_states(contexts[0], goal_indices) + next_sq_dists = tf.squared_difference(next_states * state_scales, + goals * goal_scales) + sq_dists = tf.squared_difference(states * state_scales, + goals * goal_scales) + record_tensor(sq_dists, None, stats, 'sq_dists') + if weight_vector is not None: + next_sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) + sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) + if weight_index is not None: + next_sq_dists *= contexts[weight_index] + sq_dists *= contexts[weight_index] + if norm == 'L1': + next_dist = tf.sqrt(next_sq_dists + epsilon) + dist = tf.sqrt(sq_dists + epsilon) + next_dist = tf.reduce_sum(next_dist, -1) + dist = tf.reduce_sum(dist, -1) + elif norm == 'L2': + next_dist = tf.reduce_sum(next_sq_dists, -1) + next_dist = tf.sqrt(next_dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) + dist = tf.reduce_sum(sq_dists, -1) + dist = tf.sqrt(dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) + else: + raise NotImplementedError(norm) + discounts = next_dist > termination_epsilon + if summarize: + with tf.name_scope('RewardFn/'): + tf.summary.scalar('mean_dist', tf.reduce_mean(dist)) + tf.summary.histogram('dist', dist) + summarize_stats(stats) + diff = dist - next_dist + diff *= reward_scales + return tf.to_float(diff), tf.to_float(discounts) + + +@gin.configurable +def binary_indicator(states, + actions, + rewards, + next_states, + contexts, + termination_epsilon=1e-4, + offset=0, + epsilon=1e-10, + state_indices=None, + summarize=False): + """Returns 0/1 by checking if next_states and contexts overlap. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + termination_epsilon: terminate if dist is less than this quantity. + offset: Offset the rewards. + epsilon: small offset to ensure non-negative/zero distance. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del states, actions # unused args + next_states = index_states(next_states, state_indices) + dist = tf.reduce_sum(tf.squared_difference(next_states, contexts[0]), -1) + dist = tf.sqrt(dist + epsilon) + discounts = dist > termination_epsilon + rewards = tf.logical_not(discounts) + rewards = tf.to_float(rewards) + offset + return tf.to_float(rewards), tf.ones_like(tf.to_float(discounts)) #tf.to_float(discounts) + + +@gin.configurable +def plain_rewards(states, actions, rewards, next_states, contexts): + """Returns the given rewards. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del states, actions, next_states, contexts # Unused + return rewards, tf.ones_like(rewards) + + +@gin.configurable +def ctrl_rewards(states, + actions, + rewards, + next_states, + contexts, + reward_scales=1.0): + """Returns the negative control cost. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, + must be broadcastable to number of reward dimensions. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del states, rewards, contexts # Unused + if actions is None: + rewards = tf.to_float(tf.zeros(shape=next_states.shape[:1])) + else: + rewards = -tf.reduce_sum(tf.square(actions), axis=1) + rewards *= reward_scales + rewards = tf.to_float(rewards) + return rewards, tf.ones_like(rewards) + + +@gin.configurable +def diff_rewards( + states, + actions, + rewards, + next_states, + contexts, + state_indices=None, + goal_index=0,): + """Returns (next_states - goals) as a batched vector reward.""" + del states, rewards, actions # Unused + if state_indices is not None: + next_states = index_states(next_states, state_indices) + rewards = tf.to_float(next_states - contexts[goal_index]) + return rewards, tf.ones_like(rewards) + + +@gin.configurable +def state_rewards(states, + actions, + rewards, + next_states, + contexts, + weight_index=None, + state_indices=None, + weight_vector=1.0, + offset_vector=0.0, + summarize=False): + """Returns the rewards that are linear mapping of next_states. + + Args: + states: A [batch_size, num_state_dims] Tensor representing a batch + of states. + actions: A [batch_size, num_action_dims] Tensor representing a batch + of actions. + rewards: A [batch_size] Tensor representing a batch of rewards. + next_states: A [batch_size, num_state_dims] Tensor representing a batch + of next states. + contexts: A list of [batch_size, num_context_dims] Tensor representing + a batch of contexts. + weight_index: (integer) Index of contexts lists that specify weighting. + state_indices: (a list of Numpy integer array) Indices of states dimensions + to be mapped. + weight_vector: (a number or a list or Numpy array) The weighting vector, + broadcastable to `next_states`. + offset_vector: (a number or a list of Numpy array) The off vector. + summarize: (boolean) enable summary ops. + + Returns: + A new tf.float32 [batch_size] rewards Tensor, and + tf.float32 [batch_size] discounts tensor. + """ + del states, actions, rewards # unused args + stats = {} + record_tensor(next_states, state_indices, stats) + next_states = index_states(next_states, state_indices) + weight = tf.constant( + weight_vector, dtype=next_states.dtype, shape=next_states[0].shape) + weights = tf.expand_dims(weight, 0) + offset = tf.constant( + offset_vector, dtype=next_states.dtype, shape=next_states[0].shape) + offsets = tf.expand_dims(offset, 0) + if weight_index is not None: + weights *= contexts[weight_index] + rewards = tf.to_float(tf.reduce_sum(weights * (next_states+offsets), axis=1)) + if summarize: + with tf.name_scope('RewardFn/'): + summarize_stats(stats) + return rewards, tf.ones_like(rewards) diff --git a/models/research/efficient-hrl/context/samplers.py b/models/research/efficient-hrl/context/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..15a22df5eb3bcbd419b5a01bc299b5d5ac71ad91 --- /dev/null +++ b/models/research/efficient-hrl/context/samplers.py @@ -0,0 +1,445 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Samplers for Contexts. + + Each sampler class should define __call__(batch_size). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf +slim = tf.contrib.slim +import gin.tf + + +@gin.configurable +class BaseSampler(object): + """Base sampler.""" + + def __init__(self, context_spec, context_range=None, k=2, scope='sampler'): + """Construct a base sampler. + + Args: + context_spec: A context spec. + context_range: A tuple of (minval, max), where minval, maxval are floats + or Numpy arrays with the same shape as the context. + scope: A string denoting scope. + """ + self._context_spec = context_spec + self._context_range = context_range + self._k = k + self._scope = scope + + def __call__(self, batch_size, **kwargs): + raise NotImplementedError + + def set_replay(self, replay=None): + pass + + def _validate_contexts(self, contexts): + """Validate if contexts have right spec. + + Args: + contexts: A [batch_size, num_contexts_dim] tensor. + Raises: + ValueError: If shape or dtype mismatches that of spec. + """ + if contexts[0].shape != self._context_spec.shape: + raise ValueError('contexts has invalid shape %s wrt spec shape %s' % + (contexts[0].shape, self._context_spec.shape)) + if contexts.dtype != self._context_spec.dtype: + raise ValueError('contexts has invalid dtype %s wrt spec dtype %s' % + (contexts.dtype, self._context_spec.dtype)) + + +@gin.configurable +class ZeroSampler(BaseSampler): + """Zero sampler.""" + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + contexts = tf.zeros( + dtype=self._context_spec.dtype, + shape=[ + batch_size, + ] + self._context_spec.shape.as_list()) + return contexts, contexts + + +@gin.configurable +class BinarySampler(BaseSampler): + """Binary sampler.""" + + def __init__(self, probs=0.5, *args, **kwargs): + """Constructor.""" + super(BinarySampler, self).__init__(*args, **kwargs) + self._probs = probs + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context.""" + spec = self._context_spec + contexts = tf.random_uniform( + shape=[ + batch_size, + ] + spec.shape.as_list(), dtype=tf.float32) + contexts = tf.cast(tf.greater(contexts, self._probs), dtype=spec.dtype) + return contexts, contexts + + +@gin.configurable +class RandomSampler(BaseSampler): + """Random sampler.""" + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + spec = self._context_spec + context_range = self._context_range + if isinstance(context_range[0], (int, float)): + contexts = tf.random_uniform( + shape=[ + batch_size, + ] + spec.shape.as_list(), + minval=context_range[0], + maxval=context_range[1], + dtype=spec.dtype) + elif isinstance(context_range[0], (list, tuple, np.ndarray)): + assert len(spec.shape.as_list()) == 1 + assert spec.shape.as_list()[0] == len(context_range[0]) + assert spec.shape.as_list()[0] == len(context_range[1]) + contexts = tf.concat( + [ + tf.random_uniform( + shape=[ + batch_size, 1, + ] + spec.shape.as_list()[1:], + minval=context_range[0][i], + maxval=context_range[1][i], + dtype=spec.dtype) for i in range(spec.shape.as_list()[0]) + ], + axis=1) + else: raise NotImplementedError(context_range) + self._validate_contexts(contexts) + state, next_state = kwargs['state'], kwargs['next_state'] + if state is not None and next_state is not None: + pass + #contexts = tf.concat( + # [tf.random_normal(tf.shape(state[:, :self._k]), dtype=tf.float64) + + # tf.random_shuffle(state[:, :self._k]), + # contexts[:, self._k:]], 1) + + return contexts, contexts + + +@gin.configurable +class ScheduledSampler(BaseSampler): + """Scheduled sampler.""" + + def __init__(self, + scope='default', + values=None, + scheduler='cycle', + scheduler_params=None, + *args, **kwargs): + """Construct sampler. + + Args: + scope: Scope name. + values: A list of numbers or [num_context_dim] Numpy arrays + representing the values to cycle. + scheduler: scheduler type. + scheduler_params: scheduler parameters. + *args: arguments. + **kwargs: keyword arguments. + """ + super(ScheduledSampler, self).__init__(*args, **kwargs) + self._scope = scope + self._values = values + self._scheduler = scheduler + self._scheduler_params = scheduler_params or {} + assert self._values is not None and len( + self._values), 'must provide non-empty values.' + self._n = len(self._values) + # TODO(shanegu): move variable creation outside. resolve tf.cond problem. + self._count = 0 + self._i = tf.Variable( + tf.zeros(shape=(), dtype=tf.int32), + name='%s-scheduled_sampler_%d' % (self._scope, self._count)) + self._values = tf.constant(self._values, dtype=self._context_spec.dtype) + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + spec = self._context_spec + next_op = self._next(self._i) + with tf.control_dependencies([next_op]): + value = self._values[self._i] + if value.get_shape().as_list(): + values = tf.tile( + tf.expand_dims(value, 0), (batch_size,) + (1,) * spec.shape.ndims) + else: + values = value + tf.zeros( + shape=[ + batch_size, + ] + spec.shape.as_list(), dtype=spec.dtype) + self._validate_contexts(values) + self._count += 1 + return values, values + + def _next(self, i): + """Return op that increments pointer to next value. + + Args: + i: A tensorflow integer variable. + Returns: + Op that increments pointer. + """ + if self._scheduler == 'cycle': + inc = ('inc' in self._scheduler_params and + self._scheduler_params['inc']) or 1 + return tf.assign(i, tf.mod(i+inc, self._n)) + else: + raise NotImplementedError(self._scheduler) + + +@gin.configurable +class ReplaySampler(BaseSampler): + """Replay sampler.""" + + def __init__(self, + prefetch_queue_capacity=2, + override_indices=None, + state_indices=None, + *args, + **kwargs): + """Construct sampler. + + Args: + prefetch_queue_capacity: Capacity for prefetch queue. + override_indices: Override indices. + state_indices: Select certain indices from state dimension. + *args: arguments. + **kwargs: keyword arguments. + """ + super(ReplaySampler, self).__init__(*args, **kwargs) + self._prefetch_queue_capacity = prefetch_queue_capacity + self._override_indices = override_indices + self._state_indices = state_indices + + def set_replay(self, replay): + """Set replay. + + Args: + replay: A replay buffer. + """ + self._replay = replay + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + batch = self._replay.GetRandomBatch(batch_size) + next_states = batch[4] + if self._prefetch_queue_capacity > 0: + batch_queue = slim.prefetch_queue.prefetch_queue( + [next_states], + capacity=self._prefetch_queue_capacity, + name='%s/batch_context_queue' % self._scope) + next_states = batch_queue.dequeue() + if self._override_indices is not None: + assert self._context_range is not None and isinstance( + self._context_range[0], (int, long, float)) + next_states = tf.concat( + [ + tf.random_uniform( + shape=next_states[:, :1].shape, + minval=self._context_range[0], + maxval=self._context_range[1], + dtype=next_states.dtype) + if i in self._override_indices else next_states[:, i:i + 1] + for i in range(self._context_spec.shape.as_list()[0]) + ], + axis=1) + if self._state_indices is not None: + next_states = tf.concat( + [ + next_states[:, i:i + 1] + for i in range(self._context_spec.shape.as_list()[0]) + ], + axis=1) + self._validate_contexts(next_states) + return next_states, next_states + + +@gin.configurable +class TimeSampler(BaseSampler): + """Time Sampler.""" + + def __init__(self, minval=0, maxval=1, timestep=-1, *args, **kwargs): + """Construct sampler. + + Args: + minval: Min value integer. + maxval: Max value integer. + timestep: Time step between states and next_states. + *args: arguments. + **kwargs: keyword arguments. + """ + super(TimeSampler, self).__init__(*args, **kwargs) + assert self._context_spec.shape.as_list() == [1] + self._minval = minval + self._maxval = maxval + self._timestep = timestep + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + if self._maxval == self._minval: + contexts = tf.constant( + self._maxval, shape=[batch_size, 1], dtype=tf.int32) + else: + contexts = tf.random_uniform( + shape=[batch_size, 1], + dtype=tf.int32, + maxval=self._maxval, + minval=self._minval) + next_contexts = tf.maximum(contexts + self._timestep, 0) + + return tf.cast( + contexts, dtype=self._context_spec.dtype), tf.cast( + next_contexts, dtype=self._context_spec.dtype) + + +@gin.configurable +class ConstantSampler(BaseSampler): + """Constant sampler.""" + + def __init__(self, value=None, *args, **kwargs): + """Construct sampler. + + Args: + value: A list or Numpy array for values of the constant. + *args: arguments. + **kwargs: keyword arguments. + """ + super(ConstantSampler, self).__init__(*args, **kwargs) + self._value = value + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + spec = self._context_spec + value_ = tf.constant(self._value, shape=spec.shape, dtype=spec.dtype) + values = tf.tile( + tf.expand_dims(value_, 0), (batch_size,) + (1,) * spec.shape.ndims) + self._validate_contexts(values) + return values, values + + +@gin.configurable +class DirectionSampler(RandomSampler): + """Direction sampler.""" + + def __call__(self, batch_size, **kwargs): + """Sample a batch of context. + + Args: + batch_size: Batch size. + Returns: + Two [batch_size, num_context_dims] tensors. + """ + spec = self._context_spec + context_range = self._context_range + if isinstance(context_range[0], (int, float)): + contexts = tf.random_uniform( + shape=[ + batch_size, + ] + spec.shape.as_list(), + minval=context_range[0], + maxval=context_range[1], + dtype=spec.dtype) + elif isinstance(context_range[0], (list, tuple, np.ndarray)): + assert len(spec.shape.as_list()) == 1 + assert spec.shape.as_list()[0] == len(context_range[0]) + assert spec.shape.as_list()[0] == len(context_range[1]) + contexts = tf.concat( + [ + tf.random_uniform( + shape=[ + batch_size, 1, + ] + spec.shape.as_list()[1:], + minval=context_range[0][i], + maxval=context_range[1][i], + dtype=spec.dtype) for i in range(spec.shape.as_list()[0]) + ], + axis=1) + else: raise NotImplementedError(context_range) + self._validate_contexts(contexts) + if 'sampler_fn' in kwargs: + other_contexts = kwargs['sampler_fn']() + else: + other_contexts = contexts + state, next_state = kwargs['state'], kwargs['next_state'] + if state is not None and next_state is not None: + my_context_range = (np.array(context_range[1]) - np.array(context_range[0])) / 2 * np.ones(spec.shape.as_list()) + contexts = tf.concat( + [0.1 * my_context_range[:self._k] * + tf.random_normal(tf.shape(state[:, :self._k]), dtype=state.dtype) + + tf.random_shuffle(state[:, :self._k]) - state[:, :self._k], + other_contexts[:, self._k:]], 1) + #contexts = tf.Print(contexts, + # [contexts, tf.reduce_max(contexts, 0), + # tf.reduce_min(state, 0), tf.reduce_max(state, 0)], 'contexts', summarize=15) + next_contexts = tf.concat( #LALA + [state[:, :self._k] + contexts[:, :self._k] - next_state[:, :self._k], + other_contexts[:, self._k:]], 1) + next_contexts = contexts #LALA cosine + else: + next_contexts = contexts + return tf.stop_gradient(contexts), tf.stop_gradient(next_contexts) diff --git a/models/research/efficient-hrl/environments/__init__.py b/models/research/efficient-hrl/environments/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/research/efficient-hrl/environments/__init__.py @@ -0,0 +1 @@ + diff --git a/models/research/efficient-hrl/environments/ant.py b/models/research/efficient-hrl/environments/ant.py new file mode 100644 index 0000000000000000000000000000000000000000..feab1eef4c5fac51a2e0f683de00731b893751c4 --- /dev/null +++ b/models/research/efficient-hrl/environments/ant.py @@ -0,0 +1,141 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Wrapper for creating the ant environment in gym_mujoco.""" + +import math +import numpy as np +import mujoco_py +from gym import utils +from gym.envs.mujoco import mujoco_env + + +def q_inv(a): + return [a[0], -a[1], -a[2], -a[3]] + + +def q_mult(a, b): # multiply two quaternion + w = a[0] * b[0] - a[1] * b[1] - a[2] * b[2] - a[3] * b[3] + i = a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2] + j = a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1] + k = a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0] + return [w, i, j, k] + + +class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle): + FILE = "ant.xml" + ORI_IND = 3 + + def __init__(self, file_path=None, expose_all_qpos=True, + expose_body_coms=None, expose_body_comvels=None): + self._expose_all_qpos = expose_all_qpos + self._expose_body_coms = expose_body_coms + self._expose_body_comvels = expose_body_comvels + self._body_com_indices = {} + self._body_comvel_indices = {} + + mujoco_env.MujocoEnv.__init__(self, file_path, 5) + utils.EzPickle.__init__(self) + + @property + def physics(self): + # check mujoco version is greater than version 1.50 to call correct physics + # model containing PyMjData object for getting and setting position/velocity + # check https://github.com/openai/mujoco-py/issues/80 for updates to api + if mujoco_py.get_version() >= '1.50': + return self.sim + else: + return self.model + + def _step(self, a): + return self.step(a) + + def step(self, a): + xposbefore = self.get_body_com("torso")[0] + self.do_simulation(a, self.frame_skip) + xposafter = self.get_body_com("torso")[0] + forward_reward = (xposafter - xposbefore) / self.dt + ctrl_cost = .5 * np.square(a).sum() + survive_reward = 1.0 + reward = forward_reward - ctrl_cost + survive_reward + state = self.state_vector() + done = False + ob = self._get_obs() + return ob, reward, done, dict( + reward_forward=forward_reward, + reward_ctrl=-ctrl_cost, + reward_survive=survive_reward) + + def _get_obs(self): + # No cfrc observation + if self._expose_all_qpos: + obs = np.concatenate([ + self.physics.data.qpos.flat[:15], # Ensures only ant obs. + self.physics.data.qvel.flat[:14], + ]) + else: + obs = np.concatenate([ + self.physics.data.qpos.flat[2:15], + self.physics.data.qvel.flat[:14], + ]) + + if self._expose_body_coms is not None: + for name in self._expose_body_coms: + com = self.get_body_com(name) + if name not in self._body_com_indices: + indices = range(len(obs), len(obs) + len(com)) + self._body_com_indices[name] = indices + obs = np.concatenate([obs, com]) + + if self._expose_body_comvels is not None: + for name in self._expose_body_comvels: + comvel = self.get_body_comvel(name) + if name not in self._body_comvel_indices: + indices = range(len(obs), len(obs) + len(comvel)) + self._body_comvel_indices[name] = indices + obs = np.concatenate([obs, comvel]) + return obs + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + size=self.model.nq, low=-.1, high=.1) + qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 + + # Set everything other than ant to original position and 0 velocity. + qpos[15:] = self.init_qpos[15:] + qvel[14:] = 0. + self.set_state(qpos, qvel) + return self._get_obs() + + def viewer_setup(self): + self.viewer.cam.distance = self.model.stat.extent * 0.5 + + def get_ori(self): + ori = [0, 1, 0, 0] + rot = self.physics.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion + ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane + ori = math.atan2(ori[1], ori[0]) + return ori + + def set_xy(self, xy): + qpos = np.copy(self.physics.data.qpos) + qpos[0] = xy[0] + qpos[1] = xy[1] + + qvel = self.physics.data.qvel + self.set_state(qpos, qvel) + + def get_xy(self): + return self.physics.data.qpos[:2] diff --git a/models/research/efficient-hrl/environments/ant_maze_env.py b/models/research/efficient-hrl/environments/ant_maze_env.py new file mode 100644 index 0000000000000000000000000000000000000000..69a10663f4d02901d295f2781eca2dd3e601e292 --- /dev/null +++ b/models/research/efficient-hrl/environments/ant_maze_env.py @@ -0,0 +1,21 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from environments.maze_env import MazeEnv +from environments.ant import AntEnv + + +class AntMazeEnv(MazeEnv): + MODEL_CLASS = AntEnv diff --git a/models/research/efficient-hrl/environments/assets/ant.xml b/models/research/efficient-hrl/environments/assets/ant.xml new file mode 100644 index 0000000000000000000000000000000000000000..5a49d7f52a0e577d64c47205ae32c00a9d23a2d9 --- /dev/null +++ b/models/research/efficient-hrl/environments/assets/ant.xml @@ -0,0 +1,81 @@ + + + diff --git a/models/research/efficient-hrl/environments/create_maze_env.py b/models/research/efficient-hrl/environments/create_maze_env.py new file mode 100644 index 0000000000000000000000000000000000000000..f6dc4f42190b137364700d3dcd970c3ad8b1b9ad --- /dev/null +++ b/models/research/efficient-hrl/environments/create_maze_env.py @@ -0,0 +1,97 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from environments.ant_maze_env import AntMazeEnv +from environments.point_maze_env import PointMazeEnv + +import tensorflow as tf +import gin.tf +from tf_agents.environments import gym_wrapper +from tf_agents.environments import tf_py_environment + + +@gin.configurable +def create_maze_env(env_name=None, top_down_view=False): + n_bins = 0 + manual_collision = False + if env_name.startswith('Ego'): + n_bins = 8 + env_name = env_name[3:] + if env_name.startswith('Ant'): + cls = AntMazeEnv + env_name = env_name[3:] + maze_size_scaling = 8 + elif env_name.startswith('Point'): + cls = PointMazeEnv + manual_collision = True + env_name = env_name[5:] + maze_size_scaling = 4 + else: + assert False, 'unknown env %s' % env_name + + maze_id = None + observe_blocks = False + put_spin_near_agent = False + if env_name == 'Maze': + maze_id = 'Maze' + elif env_name == 'Push': + maze_id = 'Push' + elif env_name == 'Fall': + maze_id = 'Fall' + elif env_name == 'Block': + maze_id = 'Block' + put_spin_near_agent = True + observe_blocks = True + elif env_name == 'BlockMaze': + maze_id = 'BlockMaze' + put_spin_near_agent = True + observe_blocks = True + else: + raise ValueError('Unknown maze environment %s' % env_name) + + gym_mujoco_kwargs = { + 'maze_id': maze_id, + 'n_bins': n_bins, + 'observe_blocks': observe_blocks, + 'put_spin_near_agent': put_spin_near_agent, + 'top_down_view': top_down_view, + 'manual_collision': manual_collision, + 'maze_size_scaling': maze_size_scaling + } + gym_env = cls(**gym_mujoco_kwargs) + gym_env.reset() + wrapped_env = gym_wrapper.GymWrapper(gym_env) + return wrapped_env + + +class TFPyEnvironment(tf_py_environment.TFPyEnvironment): + + def __init__(self, *args, **kwargs): + super(TFPyEnvironment, self).__init__(*args, **kwargs) + + def start_collect(self): + pass + + def current_obs(self): + time_step = self.current_time_step() + return time_step.observation[0] # For some reason, there is an extra dim. + + def step(self, actions): + actions = tf.expand_dims(actions, 0) + next_step = super(TFPyEnvironment, self).step(actions) + return next_step.is_last()[0], next_step.reward[0], next_step.discount[0] + + def reset(self): + return super(TFPyEnvironment, self).reset() diff --git a/models/research/efficient-hrl/environments/maze_env.py b/models/research/efficient-hrl/environments/maze_env.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7d1f2dc0a0d5883a7953c623b3419a02282206 --- /dev/null +++ b/models/research/efficient-hrl/environments/maze_env.py @@ -0,0 +1,499 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Adapted from rllab maze_env.py.""" + +import os +import tempfile +import xml.etree.ElementTree as ET +import math +import numpy as np +import gym + +from environments import maze_env_utils + +# Directory that contains mujoco xml files. +MODEL_DIR = 'environments/assets' + + +class MazeEnv(gym.Env): + MODEL_CLASS = None + + MAZE_HEIGHT = None + MAZE_SIZE_SCALING = None + + def __init__( + self, + maze_id=None, + maze_height=0.5, + maze_size_scaling=8, + n_bins=0, + sensor_range=3., + sensor_span=2 * math.pi, + observe_blocks=False, + put_spin_near_agent=False, + top_down_view=False, + manual_collision=False, + *args, + **kwargs): + self._maze_id = maze_id + + model_cls = self.__class__.MODEL_CLASS + if model_cls is None: + raise "MODEL_CLASS unspecified!" + xml_path = os.path.join(MODEL_DIR, model_cls.FILE) + tree = ET.parse(xml_path) + worldbody = tree.find(".//worldbody") + + self.MAZE_HEIGHT = height = maze_height + self.MAZE_SIZE_SCALING = size_scaling = maze_size_scaling + self._n_bins = n_bins + self._sensor_range = sensor_range * size_scaling + self._sensor_span = sensor_span + self._observe_blocks = observe_blocks + self._put_spin_near_agent = put_spin_near_agent + self._top_down_view = top_down_view + self._manual_collision = manual_collision + + self.MAZE_STRUCTURE = structure = maze_env_utils.construct_maze(maze_id=self._maze_id) + self.elevated = any(-1 in row for row in structure) # Elevate the maze to allow for falling. + self.blocks = any( + any(maze_env_utils.can_move(r) for r in row) + for row in structure) # Are there any movable blocks? + + torso_x, torso_y = self._find_robot() + self._init_torso_x = torso_x + self._init_torso_y = torso_y + self._init_positions = [ + (x - torso_x, y - torso_y) + for x, y in self._find_all_robots()] + + self._xy_to_rowcol = lambda x, y: (2 + (y + size_scaling / 2) / size_scaling, + 2 + (x + size_scaling / 2) / size_scaling) + self._view = np.zeros([5, 5, 3]) # walls (immovable), chasms (fall), movable blocks + + height_offset = 0. + if self.elevated: + # Increase initial z-pos of ant. + height_offset = height * size_scaling + torso = tree.find(".//body[@name='torso']") + torso.set('pos', '0 0 %.2f' % (0.75 + height_offset)) + if self.blocks: + # If there are movable blocks, change simulation settings to perform + # better contact detection. + default = tree.find(".//default") + default.find('.//geom').set('solimp', '.995 .995 .01') + + self.movable_blocks = [] + for i in range(len(structure)): + for j in range(len(structure[0])): + struct = structure[i][j] + if struct == 'r' and self._put_spin_near_agent: + struct = maze_env_utils.Move.SpinXY + if self.elevated and struct not in [-1]: + # Create elevated platform. + ET.SubElement( + worldbody, "geom", + name="elevated_%d_%d" % (i, j), + pos="%f %f %f" % (j * size_scaling - torso_x, + i * size_scaling - torso_y, + height / 2 * size_scaling), + size="%f %f %f" % (0.5 * size_scaling, + 0.5 * size_scaling, + height / 2 * size_scaling), + type="box", + material="", + contype="1", + conaffinity="1", + rgba="0.9 0.9 0.9 1", + ) + if struct == 1: # Unmovable block. + # Offset all coordinates so that robot starts at the origin. + ET.SubElement( + worldbody, "geom", + name="block_%d_%d" % (i, j), + pos="%f %f %f" % (j * size_scaling - torso_x, + i * size_scaling - torso_y, + height_offset + + height / 2 * size_scaling), + size="%f %f %f" % (0.5 * size_scaling, + 0.5 * size_scaling, + height / 2 * size_scaling), + type="box", + material="", + contype="1", + conaffinity="1", + rgba="0.4 0.4 0.4 1", + ) + elif maze_env_utils.can_move(struct): # Movable block. + # The "falling" blocks are shrunk slightly and increased in mass to + # ensure that it can fall easily through a gap in the platform blocks. + name = "movable_%d_%d" % (i, j) + self.movable_blocks.append((name, struct)) + falling = maze_env_utils.can_move_z(struct) + spinning = maze_env_utils.can_spin(struct) + x_offset = 0.25 * size_scaling if spinning else 0.0 + y_offset = 0.0 + shrink = 0.1 if spinning else 0.99 if falling else 1.0 + height_shrink = 0.1 if spinning else 1.0 + movable_body = ET.SubElement( + worldbody, "body", + name=name, + pos="%f %f %f" % (j * size_scaling - torso_x + x_offset, + i * size_scaling - torso_y + y_offset, + height_offset + + height / 2 * size_scaling * height_shrink), + ) + ET.SubElement( + movable_body, "geom", + name="block_%d_%d" % (i, j), + pos="0 0 0", + size="%f %f %f" % (0.5 * size_scaling * shrink, + 0.5 * size_scaling * shrink, + height / 2 * size_scaling * height_shrink), + type="box", + material="", + mass="0.001" if falling else "0.0002", + contype="1", + conaffinity="1", + rgba="0.9 0.1 0.1 1" + ) + if maze_env_utils.can_move_x(struct): + ET.SubElement( + movable_body, "joint", + armature="0", + axis="1 0 0", + damping="0.0", + limited="true" if falling else "false", + range="%f %f" % (-size_scaling, size_scaling), + margin="0.01", + name="movable_x_%d_%d" % (i, j), + pos="0 0 0", + type="slide" + ) + if maze_env_utils.can_move_y(struct): + ET.SubElement( + movable_body, "joint", + armature="0", + axis="0 1 0", + damping="0.0", + limited="true" if falling else "false", + range="%f %f" % (-size_scaling, size_scaling), + margin="0.01", + name="movable_y_%d_%d" % (i, j), + pos="0 0 0", + type="slide" + ) + if maze_env_utils.can_move_z(struct): + ET.SubElement( + movable_body, "joint", + armature="0", + axis="0 0 1", + damping="0.0", + limited="true", + range="%f 0" % (-height_offset), + margin="0.01", + name="movable_z_%d_%d" % (i, j), + pos="0 0 0", + type="slide" + ) + if maze_env_utils.can_spin(struct): + ET.SubElement( + movable_body, "joint", + armature="0", + axis="0 0 1", + damping="0.0", + limited="false", + name="spinable_%d_%d" % (i, j), + pos="0 0 0", + type="ball" + ) + + torso = tree.find(".//body[@name='torso']") + geoms = torso.findall(".//geom") + for geom in geoms: + if 'name' not in geom.attrib: + raise Exception("Every geom of the torso must have a name " + "defined") + + _, file_path = tempfile.mkstemp(text=True, suffix='.xml') + tree.write(file_path) + + self.wrapped_env = model_cls(*args, file_path=file_path, **kwargs) + + def get_ori(self): + return self.wrapped_env.get_ori() + + def get_top_down_view(self): + self._view = np.zeros_like(self._view) + + def valid(row, col): + return self._view.shape[0] > row >= 0 and self._view.shape[1] > col >= 0 + + def update_view(x, y, d, row=None, col=None): + if row is None or col is None: + x = x - self._robot_x + y = y - self._robot_y + th = self._robot_ori + + row, col = self._xy_to_rowcol(x, y) + update_view(x, y, d, row=row, col=col) + return + + row, row_frac, col, col_frac = int(row), row % 1, int(col), col % 1 + if row_frac < 0: + row_frac += 1 + if col_frac < 0: + col_frac += 1 + + if valid(row, col): + self._view[row, col, d] += ( + (min(1., row_frac + 0.5) - max(0., row_frac - 0.5)) * + (min(1., col_frac + 0.5) - max(0., col_frac - 0.5))) + if valid(row - 1, col): + self._view[row - 1, col, d] += ( + (max(0., 0.5 - row_frac)) * + (min(1., col_frac + 0.5) - max(0., col_frac - 0.5))) + if valid(row + 1, col): + self._view[row + 1, col, d] += ( + (max(0., row_frac - 0.5)) * + (min(1., col_frac + 0.5) - max(0., col_frac - 0.5))) + if valid(row, col - 1): + self._view[row, col - 1, d] += ( + (min(1., row_frac + 0.5) - max(0., row_frac - 0.5)) * + (max(0., 0.5 - col_frac))) + if valid(row, col + 1): + self._view[row, col + 1, d] += ( + (min(1., row_frac + 0.5) - max(0., row_frac - 0.5)) * + (max(0., col_frac - 0.5))) + if valid(row - 1, col - 1): + self._view[row - 1, col - 1, d] += ( + (max(0., 0.5 - row_frac)) * max(0., 0.5 - col_frac)) + if valid(row - 1, col + 1): + self._view[row - 1, col + 1, d] += ( + (max(0., 0.5 - row_frac)) * max(0., col_frac - 0.5)) + if valid(row + 1, col + 1): + self._view[row + 1, col + 1, d] += ( + (max(0., row_frac - 0.5)) * max(0., col_frac - 0.5)) + if valid(row + 1, col - 1): + self._view[row + 1, col - 1, d] += ( + (max(0., row_frac - 0.5)) * max(0., 0.5 - col_frac)) + + # Draw ant. + robot_x, robot_y = self.wrapped_env.get_body_com("torso")[:2] + self._robot_x = robot_x + self._robot_y = robot_y + self._robot_ori = self.get_ori() + + structure = self.MAZE_STRUCTURE + size_scaling = self.MAZE_SIZE_SCALING + height = self.MAZE_HEIGHT + + # Draw immovable blocks and chasms. + for i in range(len(structure)): + for j in range(len(structure[0])): + if structure[i][j] == 1: # Wall. + update_view(j * size_scaling - self._init_torso_x, + i * size_scaling - self._init_torso_y, + 0) + if structure[i][j] == -1: # Chasm. + update_view(j * size_scaling - self._init_torso_x, + i * size_scaling - self._init_torso_y, + 1) + + # Draw movable blocks. + for block_name, block_type in self.movable_blocks: + block_x, block_y = self.wrapped_env.get_body_com(block_name)[:2] + update_view(block_x, block_y, 2) + + return self._view + + def get_range_sensor_obs(self): + """Returns egocentric range sensor observations of maze.""" + robot_x, robot_y, robot_z = self.wrapped_env.get_body_com("torso")[:3] + ori = self.get_ori() + + structure = self.MAZE_STRUCTURE + size_scaling = self.MAZE_SIZE_SCALING + height = self.MAZE_HEIGHT + + segments = [] + # Get line segments (corresponding to outer boundary) of each immovable + # block or drop-off. + for i in range(len(structure)): + for j in range(len(structure[0])): + if structure[i][j] in [1, -1]: # There's a wall or drop-off. + cx = j * size_scaling - self._init_torso_x + cy = i * size_scaling - self._init_torso_y + x1 = cx - 0.5 * size_scaling + x2 = cx + 0.5 * size_scaling + y1 = cy - 0.5 * size_scaling + y2 = cy + 0.5 * size_scaling + struct_segments = [ + ((x1, y1), (x2, y1)), + ((x2, y1), (x2, y2)), + ((x2, y2), (x1, y2)), + ((x1, y2), (x1, y1)), + ] + for seg in struct_segments: + segments.append(dict( + segment=seg, + type=structure[i][j], + )) + # Get line segments (corresponding to outer boundary) of each movable + # block within the agent's z-view. + for block_name, block_type in self.movable_blocks: + block_x, block_y, block_z = self.wrapped_env.get_body_com(block_name)[:3] + if (block_z + height * size_scaling / 2 >= robot_z and + robot_z >= block_z - height * size_scaling / 2): # Block in view. + x1 = block_x - 0.5 * size_scaling + x2 = block_x + 0.5 * size_scaling + y1 = block_y - 0.5 * size_scaling + y2 = block_y + 0.5 * size_scaling + struct_segments = [ + ((x1, y1), (x2, y1)), + ((x2, y1), (x2, y2)), + ((x2, y2), (x1, y2)), + ((x1, y2), (x1, y1)), + ] + for seg in struct_segments: + segments.append(dict( + segment=seg, + type=block_type, + )) + + sensor_readings = np.zeros((self._n_bins, 3)) # 3 for wall, drop-off, block + for ray_idx in range(self._n_bins): + ray_ori = (ori - self._sensor_span * 0.5 + + (2 * ray_idx + 1.0) / (2 * self._n_bins) * self._sensor_span) + ray_segments = [] + # Get all segments that intersect with ray. + for seg in segments: + p = maze_env_utils.ray_segment_intersect( + ray=((robot_x, robot_y), ray_ori), + segment=seg["segment"]) + if p is not None: + ray_segments.append(dict( + segment=seg["segment"], + type=seg["type"], + ray_ori=ray_ori, + distance=maze_env_utils.point_distance(p, (robot_x, robot_y)), + )) + if len(ray_segments) > 0: + # Find out which segment is intersected first. + first_seg = sorted(ray_segments, key=lambda x: x["distance"])[0] + seg_type = first_seg["type"] + idx = (0 if seg_type == 1 else # Wall. + 1 if seg_type == -1 else # Drop-off. + 2 if maze_env_utils.can_move(seg_type) else # Block. + None) + if first_seg["distance"] <= self._sensor_range: + sensor_readings[ray_idx][idx] = (self._sensor_range - first_seg["distance"]) / self._sensor_range + + return sensor_readings + + def _get_obs(self): + wrapped_obs = self.wrapped_env._get_obs() + if self._top_down_view: + view = [self.get_top_down_view().flat] + else: + view = [] + + if self._observe_blocks: + additional_obs = [] + for block_name, block_type in self.movable_blocks: + additional_obs.append(self.wrapped_env.get_body_com(block_name)) + wrapped_obs = np.concatenate([wrapped_obs[:3]] + additional_obs + + [wrapped_obs[3:]]) + + range_sensor_obs = self.get_range_sensor_obs() + return np.concatenate([wrapped_obs, + range_sensor_obs.flat] + + view + [[self.t * 0.001]]) + + def reset(self): + self.t = 0 + self.trajectory = [] + self.wrapped_env.reset() + if len(self._init_positions) > 1: + xy = random.choice(self._init_positions) + self.wrapped_env.set_xy(xy) + return self._get_obs() + + @property + def viewer(self): + return self.wrapped_env.viewer + + def render(self, *args, **kwargs): + return self.wrapped_env.render(*args, **kwargs) + + @property + def observation_space(self): + shape = self._get_obs().shape + high = np.inf * np.ones(shape) + low = -high + return gym.spaces.Box(low, high) + + @property + def action_space(self): + return self.wrapped_env.action_space + + def _find_robot(self): + structure = self.MAZE_STRUCTURE + size_scaling = self.MAZE_SIZE_SCALING + for i in range(len(structure)): + for j in range(len(structure[0])): + if structure[i][j] == 'r': + return j * size_scaling, i * size_scaling + assert False, 'No robot in maze specification.' + + def _find_all_robots(self): + structure = self.MAZE_STRUCTURE + size_scaling = self.MAZE_SIZE_SCALING + coords = [] + for i in range(len(structure)): + for j in range(len(structure[0])): + if structure[i][j] == 'r': + coords.append((j * size_scaling, i * size_scaling)) + return coords + + def _is_in_collision(self, pos): + x, y = pos + structure = self.MAZE_STRUCTURE + size_scaling = self.MAZE_SIZE_SCALING + for i in range(len(structure)): + for j in range(len(structure[0])): + if structure[i][j] == 1: + minx = j * size_scaling - size_scaling * 0.5 - self._init_torso_x + maxx = j * size_scaling + size_scaling * 0.5 - self._init_torso_x + miny = i * size_scaling - size_scaling * 0.5 - self._init_torso_y + maxy = i * size_scaling + size_scaling * 0.5 - self._init_torso_y + if minx <= x <= maxx and miny <= y <= maxy: + return True + return False + + def step(self, action): + self.t += 1 + if self._manual_collision: + old_pos = self.wrapped_env.get_xy() + inner_next_obs, inner_reward, done, info = self.wrapped_env.step(action) + new_pos = self.wrapped_env.get_xy() + if self._is_in_collision(new_pos): + self.wrapped_env.set_xy(old_pos) + else: + inner_next_obs, inner_reward, done, info = self.wrapped_env.step(action) + next_obs = self._get_obs() + done = False + return next_obs, inner_reward, done, info diff --git a/models/research/efficient-hrl/environments/maze_env_utils.py b/models/research/efficient-hrl/environments/maze_env_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4f52509b65a89b43c1baa8e7448e0692ceefaaab --- /dev/null +++ b/models/research/efficient-hrl/environments/maze_env_utils.py @@ -0,0 +1,164 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Adapted from rllab maze_env_utils.py.""" +import numpy as np +import math + + +class Move(object): + X = 11 + Y = 12 + Z = 13 + XY = 14 + XZ = 15 + YZ = 16 + XYZ = 17 + SpinXY = 18 + + +def can_move_x(movable): + return movable in [Move.X, Move.XY, Move.XZ, Move.XYZ, + Move.SpinXY] + + +def can_move_y(movable): + return movable in [Move.Y, Move.XY, Move.YZ, Move.XYZ, + Move.SpinXY] + + +def can_move_z(movable): + return movable in [Move.Z, Move.XZ, Move.YZ, Move.XYZ] + + +def can_spin(movable): + return movable in [Move.SpinXY] + + +def can_move(movable): + return can_move_x(movable) or can_move_y(movable) or can_move_z(movable) + + +def construct_maze(maze_id='Maze'): + if maze_id == 'Maze': + structure = [ + [1, 1, 1, 1, 1], + [1, 'r', 0, 0, 1], + [1, 1, 1, 0, 1], + [1, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + ] + elif maze_id == 'Push': + structure = [ + [1, 1, 1, 1, 1], + [1, 0, 'r', 1, 1], + [1, 0, Move.XY, 0, 1], + [1, 1, 0, 1, 1], + [1, 1, 1, 1, 1], + ] + elif maze_id == 'Fall': + structure = [ + [1, 1, 1, 1], + [1, 'r', 0, 1], + [1, 0, Move.YZ, 1], + [1, -1, -1, 1], + [1, 0, 0, 1], + [1, 1, 1, 1], + ] + elif maze_id == 'Block': + O = 'r' + structure = [ + [1, 1, 1, 1, 1], + [1, O, 0, 0, 1], + [1, 0, 0, 0, 1], + [1, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + ] + elif maze_id == 'BlockMaze': + O = 'r' + structure = [ + [1, 1, 1, 1], + [1, O, 0, 1], + [1, 1, 0, 1], + [1, 0, 0, 1], + [1, 1, 1, 1], + ] + else: + raise NotImplementedError('The provided MazeId %s is not recognized' % maze_id) + + return structure + + +def line_intersect(pt1, pt2, ptA, ptB): + """ + Taken from https://www.cs.hmc.edu/ACM/lectures/intersections.html + + this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB) + """ + + DET_TOLERANCE = 0.00000001 + + # the first line is pt1 + r*(pt2-pt1) + # in component form: + x1, y1 = pt1 + x2, y2 = pt2 + dx1 = x2 - x1 + dy1 = y2 - y1 + + # the second line is ptA + s*(ptB-ptA) + x, y = ptA + xB, yB = ptB + dx = xB - x + dy = yB - y + + DET = (-dx1 * dy + dy1 * dx) + + if math.fabs(DET) < DET_TOLERANCE: return (0, 0, 0, 0, 0) + + # now, the determinant should be OK + DETinv = 1.0 / DET + + # find the scalar amount along the "self" segment + r = DETinv * (-dy * (x - x1) + dx * (y - y1)) + + # find the scalar amount along the input line + s = DETinv * (-dy1 * (x - x1) + dx1 * (y - y1)) + + # return the average of the two descriptions + xi = (x1 + r * dx1 + x + s * dx) / 2.0 + yi = (y1 + r * dy1 + y + s * dy) / 2.0 + return (xi, yi, 1, r, s) + + +def ray_segment_intersect(ray, segment): + """ + Check if the ray originated from (x, y) with direction theta intersects the line segment (x1, y1) -- (x2, y2), + and return the intersection point if there is one + """ + (x, y), theta = ray + # (x1, y1), (x2, y2) = segment + pt1 = (x, y) + len = 1 + pt2 = (x + len * math.cos(theta), y + len * math.sin(theta)) + xo, yo, valid, r, s = line_intersect(pt1, pt2, *segment) + if valid and r >= 0 and 0 <= s <= 1: + return (xo, yo) + return None + + +def point_distance(p1, p2): + x1, y1 = p1 + x2, y2 = p2 + return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 diff --git a/models/research/efficient-hrl/environments/point.py b/models/research/efficient-hrl/environments/point.py new file mode 100644 index 0000000000000000000000000000000000000000..9c2fc80bc824dbc81e228a122b9aaea054f73b74 --- /dev/null +++ b/models/research/efficient-hrl/environments/point.py @@ -0,0 +1,97 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Wrapper for creating the ant environment in gym_mujoco.""" + +import math +import numpy as np +import mujoco_py +from gym import utils +from gym.envs.mujoco import mujoco_env + + +class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle): + FILE = "point.xml" + ORI_IND = 2 + + def __init__(self, file_path=None, expose_all_qpos=True): + self._expose_all_qpos = expose_all_qpos + + mujoco_env.MujocoEnv.__init__(self, file_path, 1) + utils.EzPickle.__init__(self) + + @property + def physics(self): + # check mujoco version is greater than version 1.50 to call correct physics + # model containing PyMjData object for getting and setting position/velocity + # check https://github.com/openai/mujoco-py/issues/80 for updates to api + if mujoco_py.get_version() >= '1.50': + return self.sim + else: + return self.model + + def _step(self, a): + return self.step(a) + + def step(self, action): + action[0] = 0.2 * action[0] + qpos = np.copy(self.physics.data.qpos) + qpos[2] += action[1] + ori = qpos[2] + # compute increment in each direction + dx = math.cos(ori) * action[0] + dy = math.sin(ori) * action[0] + # ensure that the robot is within reasonable range + qpos[0] = np.clip(qpos[0] + dx, -100, 100) + qpos[1] = np.clip(qpos[1] + dy, -100, 100) + qvel = self.physics.data.qvel + self.set_state(qpos, qvel) + for _ in range(0, self.frame_skip): + self.physics.step() + next_obs = self._get_obs() + reward = 0 + done = False + info = {} + return next_obs, reward, done, info + + def _get_obs(self): + if self._expose_all_qpos: + return np.concatenate([ + self.physics.data.qpos.flat[:3], # Only point-relevant coords. + self.physics.data.qvel.flat[:3]]) + return np.concatenate([ + self.physics.data.qpos.flat[2:3], + self.physics.data.qvel.flat[:3]]) + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + size=self.physics.model.nq, low=-.1, high=.1) + qvel = self.init_qvel + self.np_random.randn(self.physics.model.nv) * .1 + + # Set everything other than point to original position and 0 velocity. + qpos[3:] = self.init_qpos[3:] + qvel[3:] = 0. + self.set_state(qpos, qvel) + return self._get_obs() + + def get_ori(self): + return self.physics.data.qpos[self.__class__.ORI_IND] + + def set_xy(self, xy): + qpos = np.copy(self.physics.data.qpos) + qpos[0] = xy[0] + qpos[1] = xy[1] + + qvel = self.physics.data.qvel diff --git a/models/research/efficient-hrl/environments/point_maze_env.py b/models/research/efficient-hrl/environments/point_maze_env.py new file mode 100644 index 0000000000000000000000000000000000000000..8d6b819486370d609b87c232d92c4093aa906863 --- /dev/null +++ b/models/research/efficient-hrl/environments/point_maze_env.py @@ -0,0 +1,21 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from environments.maze_env import MazeEnv +from environments.point import PointEnv + + +class PointMazeEnv(MazeEnv): + MODEL_CLASS = PointEnv diff --git a/models/research/efficient-hrl/eval.py b/models/research/efficient-hrl/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..4f5a4b20a53d920b4a9095c30de2c03698cd1b78 --- /dev/null +++ b/models/research/efficient-hrl/eval.py @@ -0,0 +1,460 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Script for evaluating a UVF agent. + +To run locally: See run_eval.py + +To run on borg: See train_eval.borg +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tensorflow as tf +slim = tf.contrib.slim +import gin.tf +# pylint: disable=unused-import +import agent +import train +from utils import utils as uvf_utils +from utils import eval_utils +from environments import create_maze_env +# pylint: enable=unused-import + +flags = tf.app.flags + +flags.DEFINE_string('eval_dir', None, + 'Directory for writing logs/summaries during eval.') +flags.DEFINE_string('checkpoint_dir', None, + 'Directory containing checkpoints to eval.') +FLAGS = flags.FLAGS + + +def get_evaluate_checkpoint_fn(master, output_dir, eval_step_fns, + model_rollout_fn, gamma, max_steps_per_episode, + num_episodes_eval, num_episodes_videos, + tuner_hook, generate_videos, + generate_summaries, video_settings): + """Returns a function that evaluates a given checkpoint. + + Args: + master: BNS name of the TensorFlow master + output_dir: The output directory to which the metric summaries are written. + eval_step_fns: A dictionary of a functions that return a list of + [state, action, reward, discount, transition_type] tensors, + indexed by summary tag name. + model_rollout_fn: Model rollout fn. + gamma: Discount factor for the reward. + max_steps_per_episode: Maximum steps to run each episode for. + num_episodes_eval: Number of episodes to evaluate and average reward over. + num_episodes_videos: Number of episodes to record for video. + tuner_hook: A callable(average reward, global step) that updates a Vizier + tuner trial. + generate_videos: Whether to generate videos of the agent in action. + generate_summaries: Whether to generate summaries. + video_settings: Settings for generating videos of the agent. + + Returns: + A function that evaluates a checkpoint. + """ + sess = tf.Session(master, graph=tf.get_default_graph()) + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + summary_writer = tf.summary.FileWriter(output_dir) + + def evaluate_checkpoint(checkpoint_path): + """Performs a one-time evaluation of the given checkpoint. + + Args: + checkpoint_path: Checkpoint to evaluate. + Returns: + True if the evaluation process should stop + """ + restore_fn = tf.contrib.framework.assign_from_checkpoint_fn( + checkpoint_path, + uvf_utils.get_all_vars(), + ignore_missing_vars=True, + reshape_variables=False) + assert restore_fn is not None, 'cannot restore %s' % checkpoint_path + restore_fn(sess) + global_step = sess.run(slim.get_global_step()) + should_stop = False + max_reward = -1e10 + max_meta_reward = -1e10 + + for eval_tag, (eval_step, env_base,) in sorted(eval_step_fns.items()): + if hasattr(env_base, 'set_sess'): + env_base.set_sess(sess) # set session + + if generate_summaries: + tf.logging.info( + '[%s] Computing average reward over %d episodes at global step %d.', + eval_tag, num_episodes_eval, global_step) + (average_reward, last_reward, + average_meta_reward, last_meta_reward, average_success, + states, actions) = eval_utils.compute_average_reward( + sess, env_base, eval_step, gamma, max_steps_per_episode, + num_episodes_eval) + tf.logging.info('[%s] Average reward = %f', eval_tag, average_reward) + tf.logging.info('[%s] Last reward = %f', eval_tag, last_reward) + tf.logging.info('[%s] Average meta reward = %f', eval_tag, average_meta_reward) + tf.logging.info('[%s] Last meta reward = %f', eval_tag, last_meta_reward) + tf.logging.info('[%s] Average success = %f', eval_tag, average_success) + if model_rollout_fn is not None: + preds, model_losses = eval_utils.compute_model_loss( + sess, model_rollout_fn, states, actions) + for i, (pred, state, model_loss) in enumerate( + zip(preds, states, model_losses)): + tf.logging.info('[%s] Model rollout step %d: loss=%f', eval_tag, i, + model_loss) + tf.logging.info('[%s] Model rollout step %d: pred=%s', eval_tag, i, + str(pred.tolist())) + tf.logging.info('[%s] Model rollout step %d: state=%s', eval_tag, i, + str(state.tolist())) + + # Report the eval stats to the tuner. + if average_reward > max_reward: + max_reward = average_reward + if average_meta_reward > max_meta_reward: + max_meta_reward = average_meta_reward + + for (tag, value) in [('Reward/average_%s_reward', average_reward), + ('Reward/last_%s_reward', last_reward), + ('Reward/average_%s_meta_reward', average_meta_reward), + ('Reward/last_%s_meta_reward', last_meta_reward), + ('Reward/average_%s_success', average_success)]: + summary_str = tf.Summary(value=[ + tf.Summary.Value( + tag=tag % eval_tag, + simple_value=value) + ]) + summary_writer.add_summary(summary_str, global_step) + summary_writer.flush() + + if generate_videos or should_stop: + # Do a manual reset before generating the video to see the initial + # pose of the robot, towards which the reset controller is moving. + if hasattr(env_base, '_gym_env'): + tf.logging.info('Resetting before recording video') + if hasattr(env_base._gym_env, 'reset_model'): + env_base._gym_env.reset_model() # pylint: disable=protected-access + else: + env_base._gym_env.wrapped_env.reset_model() + video_filename = os.path.join(output_dir, 'videos', + '%s_step_%d.mp4' % (eval_tag, + global_step)) + eval_utils.capture_video(sess, eval_step, env_base, + max_steps_per_episode * num_episodes_videos, + video_filename, video_settings, + reset_every=max_steps_per_episode) + + should_stop = should_stop or (generate_summaries and tuner_hook and + tuner_hook(max_reward, global_step)) + return bool(should_stop) + + return evaluate_checkpoint + + +def get_model_rollout(uvf_agent, tf_env): + """Model rollout function.""" + state_spec = tf_env.observation_spec()[0] + action_spec = tf_env.action_spec()[0] + state_ph = tf.placeholder(dtype=state_spec.dtype, shape=state_spec.shape) + action_ph = tf.placeholder(dtype=action_spec.dtype, shape=action_spec.shape) + + merged_state = uvf_agent.merged_state(state_ph) + diff_value = uvf_agent.critic_net(tf.expand_dims(merged_state, 0), + tf.expand_dims(action_ph, 0))[0] + diff_value = tf.cast(diff_value, dtype=state_ph.dtype) + state_ph.shape.assert_is_compatible_with(diff_value.shape) + next_state = state_ph + diff_value + + def model_rollout_fn(sess, state, action): + return sess.run(next_state, feed_dict={state_ph: state, action_ph: action}) + + return model_rollout_fn + + +def get_eval_step(uvf_agent, + state_preprocess, + tf_env, + action_fn, + meta_action_fn, + environment_steps, + num_episodes, + mode='eval'): + """Get one-step policy/env stepping ops. + + Args: + uvf_agent: A UVF agent. + tf_env: A TFEnvironment. + action_fn: A function to produce actions given current state. + meta_action_fn: A function to produce meta actions given current state. + environment_steps: A variable to count the number of steps in the tf_env. + num_episodes: A variable to count the number of episodes. + mode: a string representing the mode=[train, explore, eval]. + + Returns: + A collect_experience_op that excute an action and store into the + replay_buffer + """ + + tf_env.start_collect() + state = tf_env.current_obs() + action = action_fn(state, context=None) + state_repr = state_preprocess(state) + + action_spec = tf_env.action_spec() + action_ph = tf.placeholder(dtype=action_spec.dtype, shape=action_spec.shape) + with tf.control_dependencies([state]): + transition_type, reward, discount = tf_env.step(action_ph) + + def increment_step(): + return environment_steps.assign_add(1) + + def increment_episode(): + return num_episodes.assign_add(1) + + def no_op_int(): + return tf.constant(0, dtype=tf.int64) + + step_cond = uvf_agent.step_cond_fn(state, action, + transition_type, + environment_steps, num_episodes) + reset_episode_cond = uvf_agent.reset_episode_cond_fn( + state, action, + transition_type, environment_steps, num_episodes) + reset_env_cond = uvf_agent.reset_env_cond_fn(state, action, + transition_type, + environment_steps, num_episodes) + + increment_step_op = tf.cond(step_cond, increment_step, no_op_int) + with tf.control_dependencies([increment_step_op]): + increment_episode_op = tf.cond(reset_episode_cond, increment_episode, + no_op_int) + + with tf.control_dependencies([reward, discount]): + next_state = tf_env.current_obs() + next_state_repr = state_preprocess(next_state) + + with tf.control_dependencies([increment_episode_op]): + post_reward, post_meta_reward = uvf_agent.cond_begin_episode_op( + tf.logical_not(reset_episode_cond), + [state, action_ph, reward, next_state, + state_repr, next_state_repr], + mode=mode, meta_action_fn=meta_action_fn) + + # Important: do manual reset after getting the final reward from the + # unreset environment. + with tf.control_dependencies([post_reward, post_meta_reward]): + cond_reset_op = tf.cond(reset_env_cond, + tf_env.reset, + tf_env.current_time_step) + + # Add a dummy control dependency to force the reset_op to run + with tf.control_dependencies(cond_reset_op): + post_reward, post_meta_reward = map(tf.identity, [post_reward, post_meta_reward]) + + eval_step = [next_state, action_ph, transition_type, post_reward, post_meta_reward, discount, uvf_agent.context_vars, state_repr] + + if callable(action): + def step_fn(sess): + action_value = action(sess) + return sess.run(eval_step, feed_dict={action_ph: action_value}) + else: + action = uvf_utils.clip_to_spec(action, action_spec) + def step_fn(sess): + action_value = sess.run(action) + return sess.run(eval_step, feed_dict={action_ph: action_value}) + + return step_fn + + +@gin.configurable +def evaluate(checkpoint_dir, + eval_dir, + environment=None, + num_bin_actions=3, + agent_class=None, + meta_agent_class=None, + state_preprocess_class=None, + gamma=1.0, + num_episodes_eval=10, + eval_interval_secs=60, + max_number_of_evaluations=None, + checkpoint_timeout=None, + timeout_fn=None, + tuner_hook=None, + generate_videos=False, + generate_summaries=True, + num_episodes_videos=5, + video_settings=None, + eval_modes=('eval',), + eval_model_rollout=False, + policy_save_dir='policy', + checkpoint_range=None, + checkpoint_path=None, + max_steps_per_episode=None, + evaluate_nohrl=False): + """Loads and repeatedly evaluates a checkpointed model at a set interval. + + Args: + checkpoint_dir: The directory where the checkpoints reside. + eval_dir: Directory to save the evaluation summary results. + environment: A BaseEnvironment to evaluate. + num_bin_actions: Number of bins for discretizing continuous actions. + agent_class: An RL agent class. + meta_agent_class: A Meta agent class. + gamma: Discount factor for the reward. + num_episodes_eval: Number of episodes to evaluate and average reward over. + eval_interval_secs: The number of seconds between each evaluation run. + max_number_of_evaluations: The max number of evaluations. If None the + evaluation continues indefinitely. + checkpoint_timeout: The maximum amount of time to wait between checkpoints. + If left as `None`, then the process will wait indefinitely. + timeout_fn: Optional function to call after a timeout. + tuner_hook: A callable that takes the average reward and global step and + updates a Vizier tuner trial. + generate_videos: Whether to generate videos of the agent in action. + generate_summaries: Whether to generate summaries. + num_episodes_videos: Number of episodes to evaluate for generating videos. + video_settings: Settings for generating videos of the agent. + optimal action based on the critic. + eval_modes: A tuple of eval modes. + eval_model_rollout: Evaluate model rollout. + policy_save_dir: Optional sub-directory where the policies are + saved. + checkpoint_range: Optional. If provided, evaluate all checkpoints in + the range. + checkpoint_path: Optional sub-directory specifying which checkpoint to + evaluate. If None, will evaluate the most recent checkpoint. + """ + tf_env = create_maze_env.TFPyEnvironment(environment) + observation_spec = [tf_env.observation_spec()] + action_spec = [tf_env.action_spec()] + + assert max_steps_per_episode, 'max_steps_per_episode need to be set' + + if agent_class.ACTION_TYPE == 'discrete': + assert False + else: + assert agent_class.ACTION_TYPE == 'continuous' + + if meta_agent_class is not None: + assert agent_class.ACTION_TYPE == meta_agent_class.ACTION_TYPE + with tf.variable_scope('meta_agent'): + meta_agent = meta_agent_class( + observation_spec, + action_spec, + tf_env, + ) + else: + meta_agent = None + + with tf.variable_scope('uvf_agent'): + uvf_agent = agent_class( + observation_spec, + action_spec, + tf_env, + ) + uvf_agent.set_meta_agent(agent=meta_agent) + + with tf.variable_scope('state_preprocess'): + state_preprocess = state_preprocess_class() + + # run both actor and critic once to ensure networks are initialized + # and gin configs will be saved + # pylint: disable=protected-access + temp_states = tf.expand_dims( + tf.zeros( + dtype=uvf_agent._observation_spec.dtype, + shape=uvf_agent._observation_spec.shape), 0) + # pylint: enable=protected-access + temp_actions = uvf_agent.actor_net(temp_states) + uvf_agent.critic_net(temp_states, temp_actions) + + # create eval_step_fns for each action function + eval_step_fns = dict() + meta_agent = uvf_agent.meta_agent + for meta in [True] + [False] * evaluate_nohrl: + meta_tag = 'hrl' if meta else 'nohrl' + uvf_agent.set_meta_agent(meta_agent if meta else None) + for mode in eval_modes: + # wrap environment + wrapped_environment = uvf_agent.get_env_base_wrapper( + environment, mode=mode) + action_wrapper = lambda agent_: agent_.action + action_fn = action_wrapper(uvf_agent) + meta_action_fn = action_wrapper(meta_agent) + eval_step_fns['%s_%s' % (mode, meta_tag)] = (get_eval_step( + uvf_agent=uvf_agent, + state_preprocess=state_preprocess, + tf_env=tf_env, + action_fn=action_fn, + meta_action_fn=meta_action_fn, + environment_steps=tf.Variable( + 0, dtype=tf.int64, name='environment_steps'), + num_episodes=tf.Variable(0, dtype=tf.int64, name='num_episodes'), + mode=mode), wrapped_environment,) + + model_rollout_fn = None + if eval_model_rollout: + model_rollout_fn = get_model_rollout(uvf_agent, tf_env) + + tf.train.get_or_create_global_step() + + if policy_save_dir: + checkpoint_dir = os.path.join(checkpoint_dir, policy_save_dir) + + tf.logging.info('Evaluating policies at %s', checkpoint_dir) + tf.logging.info('Running episodes for max %d steps', max_steps_per_episode) + + evaluate_checkpoint_fn = get_evaluate_checkpoint_fn( + '', eval_dir, eval_step_fns, model_rollout_fn, gamma, + max_steps_per_episode, num_episodes_eval, num_episodes_videos, tuner_hook, + generate_videos, generate_summaries, video_settings) + + if checkpoint_path is not None: + checkpoint_path = os.path.join(checkpoint_dir, checkpoint_path) + evaluate_checkpoint_fn(checkpoint_path) + elif checkpoint_range is not None: + model_files = tf.gfile.Glob( + os.path.join(checkpoint_dir, 'model.ckpt-*.index')) + tf.logging.info('Found %s policies at %s', len(model_files), checkpoint_dir) + model_files = { + int(f.split('model.ckpt-', 1)[1].split('.', 1)[0]): + os.path.splitext(f)[0] + for f in model_files + } + model_files = { + k: v + for k, v in model_files.items() + if k >= checkpoint_range[0] and k <= checkpoint_range[1] + } + tf.logging.info('Evaluating %d policies at %s', + len(model_files), checkpoint_dir) + for _, checkpoint_path in sorted(model_files.items()): + evaluate_checkpoint_fn(checkpoint_path) + else: + eval_utils.evaluate_checkpoint_repeatedly( + checkpoint_dir, + evaluate_checkpoint_fn, + eval_interval_secs=eval_interval_secs, + max_number_of_evaluations=max_number_of_evaluations, + checkpoint_timeout=checkpoint_timeout, + timeout_fn=timeout_fn) diff --git a/models/research/efficient-hrl/run_env.py b/models/research/efficient-hrl/run_env.py new file mode 100644 index 0000000000000000000000000000000000000000..87fad542aea1dc0f9a39553b53d3da8978ca089f --- /dev/null +++ b/models/research/efficient-hrl/run_env.py @@ -0,0 +1,129 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Random policy on an environment.""" + +import tensorflow as tf +import numpy as np +import random + +from environments import create_maze_env + +app = tf.app +flags = tf.flags +logging = tf.logging + +FLAGS = flags.FLAGS + +flags.DEFINE_string('env', 'AntMaze', 'environment name: AntMaze, AntPush, or AntFall') +flags.DEFINE_integer('episode_length', 500, 'episode length') +flags.DEFINE_integer('num_episodes', 50, 'number of episodes') + + +def get_goal_sample_fn(env_name): + if env_name == 'AntMaze': + # NOTE: When evaluating (i.e. the metrics shown in the paper, + # we use the commented out goal sampling function. The uncommented + # one is only used for training. + #return lambda: np.array([0., 16.]) + return lambda: np.random.uniform((-4, -4), (20, 20)) + elif env_name == 'AntPush': + return lambda: np.array([0., 19.]) + elif env_name == 'AntFall': + return lambda: np.array([0., 27., 4.5]) + else: + assert False, 'Unknown env' + + +def get_reward_fn(env_name): + if env_name == 'AntMaze': + return lambda obs, goal: -np.sum(np.square(obs[:2] - goal)) ** 0.5 + elif env_name == 'AntPush': + return lambda obs, goal: -np.sum(np.square(obs[:2] - goal)) ** 0.5 + elif env_name == 'AntFall': + return lambda obs, goal: -np.sum(np.square(obs[:3] - goal)) ** 0.5 + else: + assert False, 'Unknown env' + + +def success_fn(last_reward): + return last_reward > -5.0 + + +class EnvWithGoal(object): + + def __init__(self, base_env, env_name): + self.base_env = base_env + self.goal_sample_fn = get_goal_sample_fn(env_name) + self.reward_fn = get_reward_fn(env_name) + self.goal = None + + def reset(self): + obs = self.base_env.reset() + self.goal = self.goal_sample_fn() + return np.concatenate([obs, self.goal]) + + def step(self, a): + obs, _, done, info = self.base_env.step(a) + reward = self.reward_fn(obs, self.goal) + return np.concatenate([obs, self.goal]), reward, done, info + + @property + def action_space(self): + return self.base_env.action_space + + +def run_environment(env_name, episode_length, num_episodes): + env = EnvWithGoal( + create_maze_env.create_maze_env(env_name).gym, + env_name) + + def action_fn(obs): + action_space = env.action_space + action_space_mean = (action_space.low + action_space.high) / 2.0 + action_space_magn = (action_space.high - action_space.low) / 2.0 + random_action = (action_space_mean + + action_space_magn * + np.random.uniform(low=-1.0, high=1.0, + size=action_space.shape)) + return random_action + + rewards = [] + successes = [] + for ep in range(num_episodes): + rewards.append(0.0) + successes.append(False) + obs = env.reset() + for _ in range(episode_length): + obs, reward, done, _ = env.step(action_fn(obs)) + rewards[-1] += reward + successes[-1] = success_fn(reward) + if done: + break + logging.info('Episode %d reward: %.2f, Success: %d', ep + 1, rewards[-1], successes[-1]) + + logging.info('Average Reward over %d episodes: %.2f', + num_episodes, np.mean(rewards)) + logging.info('Average Success over %d episodes: %.2f', + num_episodes, np.mean(successes)) + + +def main(unused_argv): + logging.set_verbosity(logging.INFO) + run_environment(FLAGS.env, FLAGS.episode_length, FLAGS.num_episodes) + + +if __name__ == '__main__': + app.run() diff --git a/models/research/efficient-hrl/run_eval.py b/models/research/efficient-hrl/run_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..12f12369c4c90762bdf3d7506b957588856bdd3f --- /dev/null +++ b/models/research/efficient-hrl/run_eval.py @@ -0,0 +1,51 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Script for evaluating a UVF agent. + +To run locally: See scripts/local_eval.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +import gin.tf +# pylint: disable=unused-import +import eval as eval_ +# pylint: enable=unused-import + +flags = tf.app.flags +FLAGS = flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + assert FLAGS.checkpoint_dir, "Flag 'checkpoint_dir' must be set." + assert FLAGS.eval_dir, "Flag 'eval_dir' must be set." + + if FLAGS.config_file: + for config_file in FLAGS.config_file: + gin.parse_config_file(config_file) + if FLAGS.params: + gin.parse_config(FLAGS.params) + + eval_.evaluate(FLAGS.checkpoint_dir, FLAGS.eval_dir) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/efficient-hrl/run_train.py b/models/research/efficient-hrl/run_train.py new file mode 100644 index 0000000000000000000000000000000000000000..1d459d60b7f870bdcd81a48edc896158a9c6e4eb --- /dev/null +++ b/models/research/efficient-hrl/run_train.py @@ -0,0 +1,49 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Script for training an RL agent using the UVF algorithm. + +To run locally: See scripts/local_train.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +import gin.tf +# pylint: enable=unused-import +import train +# pylint: disable=unused-import + +flags = tf.app.flags +FLAGS = flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + if FLAGS.config_file: + for config_file in FLAGS.config_file: + gin.parse_config_file(config_file) + if FLAGS.params: + gin.parse_config(FLAGS.params) + + assert FLAGS.train_dir, "Flag 'train_dir' must be set." + return train.train_uvf(FLAGS.train_dir) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/efficient-hrl/scripts/local_eval.py b/models/research/efficient-hrl/scripts/local_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..89ef745a4086197b07cee5f98fabfcc29af6d145 --- /dev/null +++ b/models/research/efficient-hrl/scripts/local_eval.py @@ -0,0 +1,76 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Script to run run_eval.py locally. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +from subprocess import call +import sys + +CONFIGS_PATH = 'configs' +CONTEXT_CONFIGS_PATH = 'context/configs' + +def main(): + bb = './' + base_num_args = 6 + if len(sys.argv) < base_num_args: + print( + "usage: python %s " + " [params...]" + % sys.argv[0]) + sys.exit(0) + exp = sys.argv[1] + context_setting = sys.argv[2] + context = sys.argv[3] + agent = sys.argv[4] + assert sys.argv[5] in ["suite"], "args[5] must be `suite'" + suite = "" + binary = "python {bb}/run_eval{suite}.py ".format(bb=bb, suite=suite) + + h = os.environ["HOME"] + ucp = CONFIGS_PATH + ccp = CONTEXT_CONFIGS_PATH + extra = '' + command_str = ("{binary} " + "--logtostderr " + "--checkpoint_dir={h}/tmp/{context_setting}/{context}/{agent}/{exp}/train " + "--eval_dir={h}/tmp/{context_setting}/{context}/{agent}/{exp}/eval " + "--config_file={ucp}/{agent}.gin " + "--config_file={ucp}/eval_{extra}uvf.gin " + "--config_file={ccp}/{context_setting}.gin " + "--config_file={ccp}/{context}.gin ").format( + h=h, + ucp=ucp, + ccp=ccp, + context_setting=context_setting, + context=context, + agent=agent, + extra=extra, + suite=suite, + exp=exp, + binary=binary) + for extra_arg in sys.argv[base_num_args:]: + command_str += "--params='%s' " % extra_arg + + print(command_str) + call(command_str, shell=True) + + +if __name__ == "__main__": + main() diff --git a/models/research/efficient-hrl/scripts/local_train.py b/models/research/efficient-hrl/scripts/local_train.py new file mode 100644 index 0000000000000000000000000000000000000000..718c88e8fedd381707ac944f5ee9243b636ac915 --- /dev/null +++ b/models/research/efficient-hrl/scripts/local_train.py @@ -0,0 +1,76 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Script to run run_train.py locally. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import random +from subprocess import call +import sys + +CONFIGS_PATH = './configs' +CONTEXT_CONFIGS_PATH = './context/configs' + +def main(): + bb = '.' + base_num_args = 6 + if len(sys.argv) < base_num_args: + print( + "usage: python %s " + " [params...]" + % sys.argv[0]) + sys.exit(0) + exp = sys.argv[1] # Name for experiment, e.g. 'test001' + context_setting = sys.argv[2] # Context setting, e.g. 'hiro_orig' + context = sys.argv[3] # Environment-specific context, e.g. 'ant_maze' + agent = sys.argv[4] # Agent settings, e.g. 'base_uvf' + assert sys.argv[5] in ["suite"], "args[5] must be `suite'" + suite = "" + binary = "python {bb}/run_train{suite}.py ".format(bb=bb, suite=suite) + + h = os.environ["HOME"] + ucp = CONFIGS_PATH + ccp = CONTEXT_CONFIGS_PATH + extra = '' + port = random.randint(2000, 8000) + command_str = ("{binary} " + "--train_dir={h}/tmp/{context_setting}/{context}/{agent}/{exp}/train " + "--config_file={ucp}/{agent}.gin " + "--config_file={ucp}/train_{extra}uvf.gin " + "--config_file={ccp}/{context_setting}.gin " + "--config_file={ccp}/{context}.gin " + "--summarize_gradients=False " + "--save_interval_secs=60 " + "--save_summaries_secs=1 " + "--master=local " + "--alsologtostderr ").format(h=h, ucp=ucp, + context_setting=context_setting, + context=context, ccp=ccp, + suite=suite, agent=agent, extra=extra, + exp=exp, binary=binary, + port=port) + for extra_arg in sys.argv[base_num_args:]: + command_str += "--params='%s' " % extra_arg + + print(command_str) + call(command_str, shell=True) + + +if __name__ == "__main__": + main() diff --git a/models/research/efficient-hrl/train.py b/models/research/efficient-hrl/train.py new file mode 100644 index 0000000000000000000000000000000000000000..a40e81dbec6c103563192a373661cda8b5ae5fbb --- /dev/null +++ b/models/research/efficient-hrl/train.py @@ -0,0 +1,670 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Script for training an RL agent using the UVF algorithm. + +To run locally: See run_train.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +import tensorflow as tf +slim = tf.contrib.slim + +import gin.tf +# pylint: disable=unused-import +import train_utils +import agent as agent_ +from agents import circular_buffer +from utils import utils as uvf_utils +from environments import create_maze_env +# pylint: enable=unused-import + + +flags = tf.app.flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('goal_sample_strategy', 'sample', + 'None, sample, FuN') + +LOAD_PATH = None + + +def collect_experience(tf_env, agent, meta_agent, state_preprocess, + replay_buffer, meta_replay_buffer, + action_fn, meta_action_fn, + environment_steps, num_episodes, num_resets, + episode_rewards, episode_meta_rewards, + store_context, + disable_agent_reset): + """Collect experience in a tf_env into a replay_buffer using action_fn. + + Args: + tf_env: A TFEnvironment. + agent: A UVF agent. + meta_agent: A Meta Agent. + replay_buffer: A Replay buffer to collect experience in. + meta_replay_buffer: A Replay buffer to collect meta agent experience in. + action_fn: A function to produce actions given current state. + meta_action_fn: A function to produce meta actions given current state. + environment_steps: A variable to count the number of steps in the tf_env. + num_episodes: A variable to count the number of episodes. + num_resets: A variable to count the number of resets. + store_context: A boolean to check if store context in replay. + disable_agent_reset: A boolean that disables agent from resetting. + + Returns: + A collect_experience_op that excute an action and store into the + replay_buffers + """ + tf_env.start_collect() + state = tf_env.current_obs() + state_repr = state_preprocess(state) + action = action_fn(state, context=None) + + with tf.control_dependencies([state]): + transition_type, reward, discount = tf_env.step(action) + + def increment_step(): + return environment_steps.assign_add(1) + + def increment_episode(): + return num_episodes.assign_add(1) + + def increment_reset(): + return num_resets.assign_add(1) + + def update_episode_rewards(context_reward, meta_reward, reset): + new_episode_rewards = tf.concat( + [episode_rewards[:1] + context_reward, episode_rewards[1:]], 0) + new_episode_meta_rewards = tf.concat( + [episode_meta_rewards[:1] + meta_reward, + episode_meta_rewards[1:]], 0) + return tf.group( + episode_rewards.assign( + tf.cond(reset, + lambda: tf.concat([[0.], episode_rewards[:-1]], 0), + lambda: new_episode_rewards)), + episode_meta_rewards.assign( + tf.cond(reset, + lambda: tf.concat([[0.], episode_meta_rewards[:-1]], 0), + lambda: new_episode_meta_rewards))) + + def no_op_int(): + return tf.constant(0, dtype=tf.int64) + + step_cond = agent.step_cond_fn(state, action, + transition_type, + environment_steps, num_episodes) + reset_episode_cond = agent.reset_episode_cond_fn( + state, action, + transition_type, environment_steps, num_episodes) + reset_env_cond = agent.reset_env_cond_fn(state, action, + transition_type, + environment_steps, num_episodes) + + increment_step_op = tf.cond(step_cond, increment_step, no_op_int) + increment_episode_op = tf.cond(reset_episode_cond, increment_episode, + no_op_int) + increment_reset_op = tf.cond(reset_env_cond, increment_reset, no_op_int) + increment_op = tf.group(increment_step_op, increment_episode_op, + increment_reset_op) + + with tf.control_dependencies([increment_op, reward, discount]): + next_state = tf_env.current_obs() + next_state_repr = state_preprocess(next_state) + next_reset_episode_cond = tf.logical_or( + agent.reset_episode_cond_fn( + state, action, + transition_type, environment_steps, num_episodes), + tf.equal(discount, 0.0)) + + if store_context: + context = [tf.identity(var) + tf.zeros_like(var) for var in agent.context_vars] + meta_context = [tf.identity(var) + tf.zeros_like(var) for var in meta_agent.context_vars] + else: + context = [] + meta_context = [] + with tf.control_dependencies([next_state] + context + meta_context): + if disable_agent_reset: + collect_experience_ops = [tf.no_op()] # don't reset agent + else: + collect_experience_ops = agent.cond_begin_episode_op( + tf.logical_not(reset_episode_cond), + [state, action, reward, next_state, + state_repr, next_state_repr], + mode='explore', meta_action_fn=meta_action_fn) + context_reward, meta_reward = collect_experience_ops + collect_experience_ops = list(collect_experience_ops) + collect_experience_ops.append( + update_episode_rewards(tf.reduce_sum(context_reward), meta_reward, + reset_episode_cond)) + + meta_action_every_n = agent.tf_context.meta_action_every_n + with tf.control_dependencies(collect_experience_ops): + transition = [state, action, reward, discount, next_state] + + meta_action = tf.to_float( + tf.concat(context, -1)) # Meta agent action is low-level context + + meta_end = tf.logical_and( # End of meta-transition. + tf.equal(agent.tf_context.t % meta_action_every_n, 1), + agent.tf_context.t > 1) + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): + states_var = tf.get_variable('states_var', + [meta_action_every_n, state.shape[-1]], + state.dtype) + actions_var = tf.get_variable('actions_var', + [meta_action_every_n, action.shape[-1]], + action.dtype) + state_var = tf.get_variable('state_var', state.shape, state.dtype) + reward_var = tf.get_variable('reward_var', reward.shape, reward.dtype) + meta_action_var = tf.get_variable('meta_action_var', + meta_action.shape, meta_action.dtype) + meta_context_var = [ + tf.get_variable('meta_context_var%d' % idx, + meta_context[idx].shape, meta_context[idx].dtype) + for idx in range(len(meta_context))] + + actions_var_upd = tf.scatter_update( + actions_var, (agent.tf_context.t - 2) % meta_action_every_n, action) + with tf.control_dependencies([actions_var_upd]): + actions = tf.identity(actions_var) + tf.zeros_like(actions_var) + meta_reward = tf.identity(meta_reward) + tf.zeros_like(meta_reward) + meta_reward = tf.reshape(meta_reward, reward.shape) + + reward = 0.1 * meta_reward + meta_transition = [state_var, meta_action_var, + reward_var + reward, + discount * (1 - tf.to_float(next_reset_episode_cond)), + next_state] + meta_transition.extend([states_var, actions]) + if store_context: # store current and next context into replay + transition += context + list(agent.context_vars) + meta_transition += meta_context_var + list(meta_agent.context_vars) + + meta_step_cond = tf.squeeze(tf.logical_and(step_cond, tf.logical_or(next_reset_episode_cond, meta_end))) + + collect_experience_op = tf.group( + replay_buffer.maybe_add(transition, step_cond), + meta_replay_buffer.maybe_add(meta_transition, meta_step_cond), + ) + + with tf.control_dependencies([collect_experience_op]): + collect_experience_op = tf.cond(reset_env_cond, + tf_env.reset, + tf_env.current_time_step) + + meta_period = tf.equal(agent.tf_context.t % meta_action_every_n, 1) + states_var_upd = tf.scatter_update( + states_var, (agent.tf_context.t - 1) % meta_action_every_n, + next_state) + state_var_upd = tf.assign( + state_var, + tf.cond(meta_period, lambda: next_state, lambda: state_var)) + reward_var_upd = tf.assign( + reward_var, + tf.cond(meta_period, + lambda: tf.zeros_like(reward_var), + lambda: reward_var + reward)) + meta_action = tf.to_float(tf.concat(agent.context_vars, -1)) + meta_action_var_upd = tf.assign( + meta_action_var, + tf.cond(meta_period, lambda: meta_action, lambda: meta_action_var)) + meta_context_var_upd = [ + tf.assign( + meta_context_var[idx], + tf.cond(meta_period, + lambda: meta_agent.context_vars[idx], + lambda: meta_context_var[idx])) + for idx in range(len(meta_context))] + + return tf.group( + collect_experience_op, + states_var_upd, + state_var_upd, + reward_var_upd, + meta_action_var_upd, + *meta_context_var_upd) + + +def sample_best_meta_actions(state_reprs, next_state_reprs, prev_meta_actions, + low_states, low_actions, low_state_reprs, + inverse_dynamics, uvf_agent, k=10): + """Return meta-actions which approximately maximize low-level log-probs.""" + sampled_actions = inverse_dynamics.sample(state_reprs, next_state_reprs, k, prev_meta_actions) + sampled_actions = tf.stop_gradient(sampled_actions) + sampled_log_probs = tf.reshape(uvf_agent.log_probs( + tf.tile(low_states, [k, 1, 1]), + tf.tile(low_actions, [k, 1, 1]), + tf.tile(low_state_reprs, [k, 1, 1]), + [tf.reshape(sampled_actions, [-1, sampled_actions.shape[-1]])]), + [k, low_states.shape[0], + low_states.shape[1], -1]) + fitness = tf.reduce_sum(sampled_log_probs, [2, 3]) + best_actions = tf.argmax(fitness, 0) + actions = tf.gather_nd( + sampled_actions, + tf.stack([best_actions, + tf.range(prev_meta_actions.shape[0], dtype=tf.int64)], -1)) + return actions + + +@gin.configurable +def train_uvf(train_dir, + environment=None, + num_bin_actions=3, + agent_class=None, + meta_agent_class=None, + state_preprocess_class=None, + inverse_dynamics_class=None, + exp_action_wrapper=None, + replay_buffer=None, + meta_replay_buffer=None, + replay_num_steps=1, + meta_replay_num_steps=1, + critic_optimizer=None, + actor_optimizer=None, + meta_critic_optimizer=None, + meta_actor_optimizer=None, + repr_optimizer=None, + relabel_contexts=False, + meta_relabel_contexts=False, + batch_size=64, + repeat_size=0, + num_episodes_train=2000, + initial_episodes=2, + initial_steps=None, + num_updates_per_observation=1, + num_collect_per_update=1, + num_collect_per_meta_update=1, + gamma=1.0, + meta_gamma=1.0, + reward_scale_factor=1.0, + target_update_period=1, + should_stop_early=None, + clip_gradient_norm=0.0, + summarize_gradients=False, + debug_summaries=False, + log_every_n_steps=100, + prefetch_queue_capacity=2, + policy_save_dir='policy', + save_policy_every_n_steps=1000, + save_policy_interval_secs=0, + replay_context_ratio=0.0, + next_state_as_context_ratio=0.0, + state_index=0, + zero_timer_ratio=0.0, + timer_index=-1, + debug=False, + max_policies_to_save=None, + max_steps_per_episode=None, + load_path=LOAD_PATH): + """Train an agent.""" + tf_env = create_maze_env.TFPyEnvironment(environment) + observation_spec = [tf_env.observation_spec()] + action_spec = [tf_env.action_spec()] + + max_steps_per_episode = max_steps_per_episode or tf_env.pyenv.max_episode_steps + + assert max_steps_per_episode, 'max_steps_per_episode need to be set' + + if initial_steps is None: + initial_steps = initial_episodes * max_steps_per_episode + + if agent_class.ACTION_TYPE == 'discrete': + assert False + else: + assert agent_class.ACTION_TYPE == 'continuous' + + assert agent_class.ACTION_TYPE == meta_agent_class.ACTION_TYPE + with tf.variable_scope('meta_agent'): + meta_agent = meta_agent_class( + observation_spec, + action_spec, + tf_env, + debug_summaries=debug_summaries) + meta_agent.set_replay(replay=meta_replay_buffer) + + with tf.variable_scope('uvf_agent'): + uvf_agent = agent_class( + observation_spec, + action_spec, + tf_env, + debug_summaries=debug_summaries) + uvf_agent.set_meta_agent(agent=meta_agent) + uvf_agent.set_replay(replay=replay_buffer) + + with tf.variable_scope('state_preprocess'): + state_preprocess = state_preprocess_class() + + with tf.variable_scope('inverse_dynamics'): + inverse_dynamics = inverse_dynamics_class( + meta_agent.sub_context_as_action_specs[0]) + + # Create counter variables + global_step = tf.contrib.framework.get_or_create_global_step() + num_episodes = tf.Variable(0, dtype=tf.int64, name='num_episodes') + num_resets = tf.Variable(0, dtype=tf.int64, name='num_resets') + num_updates = tf.Variable(0, dtype=tf.int64, name='num_updates') + num_meta_updates = tf.Variable(0, dtype=tf.int64, name='num_meta_updates') + episode_rewards = tf.Variable([0.] * 100, name='episode_rewards') + episode_meta_rewards = tf.Variable([0.] * 100, name='episode_meta_rewards') + + # Create counter variables summaries + train_utils.create_counter_summaries([ + ('environment_steps', global_step), + ('num_episodes', num_episodes), + ('num_resets', num_resets), + ('num_updates', num_updates), + ('num_meta_updates', num_meta_updates), + ('replay_buffer_adds', replay_buffer.get_num_adds()), + ('meta_replay_buffer_adds', meta_replay_buffer.get_num_adds()), + ]) + + tf.summary.scalar('avg_episode_rewards', + tf.reduce_mean(episode_rewards[1:])) + tf.summary.scalar('avg_episode_meta_rewards', + tf.reduce_mean(episode_meta_rewards[1:])) + tf.summary.histogram('episode_rewards', episode_rewards[1:]) + tf.summary.histogram('episode_meta_rewards', episode_meta_rewards[1:]) + + # Create init ops + action_fn = uvf_agent.action + action_fn = uvf_agent.add_noise_fn(action_fn, global_step=None) + meta_action_fn = meta_agent.action + meta_action_fn = meta_agent.add_noise_fn(meta_action_fn, global_step=None) + meta_actions_fn = meta_agent.actions + meta_actions_fn = meta_agent.add_noise_fn(meta_actions_fn, global_step=None) + init_collect_experience_op = collect_experience( + tf_env, + uvf_agent, + meta_agent, + state_preprocess, + replay_buffer, + meta_replay_buffer, + action_fn, + meta_action_fn, + environment_steps=global_step, + num_episodes=num_episodes, + num_resets=num_resets, + episode_rewards=episode_rewards, + episode_meta_rewards=episode_meta_rewards, + store_context=True, + disable_agent_reset=False, + ) + + # Create train ops + collect_experience_op = collect_experience( + tf_env, + uvf_agent, + meta_agent, + state_preprocess, + replay_buffer, + meta_replay_buffer, + action_fn, + meta_action_fn, + environment_steps=global_step, + num_episodes=num_episodes, + num_resets=num_resets, + episode_rewards=episode_rewards, + episode_meta_rewards=episode_meta_rewards, + store_context=True, + disable_agent_reset=False, + ) + + train_op_list = [] + repr_train_op = tf.constant(0.0) + for mode in ['meta', 'nometa']: + if mode == 'meta': + agent = meta_agent + buff = meta_replay_buffer + critic_opt = meta_critic_optimizer + actor_opt = meta_actor_optimizer + relabel = meta_relabel_contexts + num_steps = meta_replay_num_steps + my_gamma = meta_gamma, + n_updates = num_meta_updates + else: + agent = uvf_agent + buff = replay_buffer + critic_opt = critic_optimizer + actor_opt = actor_optimizer + relabel = relabel_contexts + num_steps = replay_num_steps + my_gamma = gamma + n_updates = num_updates + + with tf.name_scope(mode): + batch = buff.get_random_batch(batch_size, num_steps=num_steps) + states, actions, rewards, discounts, next_states = batch[:5] + with tf.name_scope('Reward'): + tf.summary.scalar('average_step_reward', tf.reduce_mean(rewards)) + rewards *= reward_scale_factor + batch_queue = slim.prefetch_queue.prefetch_queue( + [states, actions, rewards, discounts, next_states] + batch[5:], + capacity=prefetch_queue_capacity, + name='batch_queue') + + batch_dequeue = batch_queue.dequeue() + if repeat_size > 0: + batch_dequeue = [ + tf.tile(batch, (repeat_size+1,) + (1,) * (batch.shape.ndims - 1)) + for batch in batch_dequeue + ] + batch_size *= (repeat_size + 1) + states, actions, rewards, discounts, next_states = batch_dequeue[:5] + if mode == 'meta': + low_states = batch_dequeue[5] + low_actions = batch_dequeue[6] + low_state_reprs = state_preprocess(low_states) + state_reprs = state_preprocess(states) + next_state_reprs = state_preprocess(next_states) + + if mode == 'meta': # Re-label meta-action + prev_actions = actions + if FLAGS.goal_sample_strategy == 'None': + pass + elif FLAGS.goal_sample_strategy == 'FuN': + actions = inverse_dynamics.sample(state_reprs, next_state_reprs, 1, prev_actions, sc=0.1) + actions = tf.stop_gradient(actions) + elif FLAGS.goal_sample_strategy == 'sample': + actions = sample_best_meta_actions(state_reprs, next_state_reprs, prev_actions, + low_states, low_actions, low_state_reprs, + inverse_dynamics, uvf_agent, k=10) + else: + assert False + + if state_preprocess.trainable and mode == 'meta': + # Representation learning is based on meta-transitions, but is trained + # along with low-level policy updates. + repr_loss, _, _ = state_preprocess.loss(states, next_states, low_actions, low_states) + repr_train_op = slim.learning.create_train_op( + repr_loss, + repr_optimizer, + global_step=None, + update_ops=None, + summarize_gradients=summarize_gradients, + clip_gradient_norm=clip_gradient_norm, + variables_to_train=state_preprocess.get_trainable_vars(),) + + # Get contexts for training + contexts, next_contexts = agent.sample_contexts( + mode='train', batch_size=batch_size, + state=states, next_state=next_states, + ) + if not relabel: # Re-label context (in the style of TDM or HER). + contexts, next_contexts = ( + batch_dequeue[-2*len(contexts):-1*len(contexts)], + batch_dequeue[-1*len(contexts):]) + + merged_states = agent.merged_states(states, contexts) + merged_next_states = agent.merged_states(next_states, next_contexts) + if mode == 'nometa': + context_rewards, context_discounts = agent.compute_rewards( + 'train', state_reprs, actions, rewards, next_state_reprs, contexts) + elif mode == 'meta': # Meta-agent uses sum of rewards, not context-specific rewards. + _, context_discounts = agent.compute_rewards( + 'train', states, actions, rewards, next_states, contexts) + context_rewards = rewards + + if agent.gamma_index is not None: + context_discounts *= tf.cast( + tf.reshape(contexts[agent.gamma_index], (-1,)), + dtype=context_discounts.dtype) + else: context_discounts *= my_gamma + + critic_loss = agent.critic_loss(merged_states, actions, + context_rewards, context_discounts, + merged_next_states) + + critic_loss = tf.reduce_mean(critic_loss) + + actor_loss = agent.actor_loss(merged_states, actions, + context_rewards, context_discounts, + merged_next_states) + actor_loss *= tf.to_float( # Only update actor every N steps. + tf.equal(n_updates % target_update_period, 0)) + + critic_train_op = slim.learning.create_train_op( + critic_loss, + critic_opt, + global_step=n_updates, + update_ops=None, + summarize_gradients=summarize_gradients, + clip_gradient_norm=clip_gradient_norm, + variables_to_train=agent.get_trainable_critic_vars(),) + critic_train_op = uvf_utils.tf_print( + critic_train_op, [critic_train_op], + message='critic_loss', + print_freq=1000, + name='critic_loss') + train_op_list.append(critic_train_op) + if actor_loss is not None: + actor_train_op = slim.learning.create_train_op( + actor_loss, + actor_opt, + global_step=None, + update_ops=None, + summarize_gradients=summarize_gradients, + clip_gradient_norm=clip_gradient_norm, + variables_to_train=agent.get_trainable_actor_vars(),) + actor_train_op = uvf_utils.tf_print( + actor_train_op, [actor_train_op], + message='actor_loss', + print_freq=1000, + name='actor_loss') + train_op_list.append(actor_train_op) + + assert len(train_op_list) == 4 + # Update targets should happen after the networks have been updated. + with tf.control_dependencies(train_op_list[2:]): + update_targets_op = uvf_utils.periodically( + uvf_agent.update_targets, target_update_period, 'update_targets') + if meta_agent is not None: + with tf.control_dependencies(train_op_list[:2]): + update_meta_targets_op = uvf_utils.periodically( + meta_agent.update_targets, target_update_period, 'update_targets') + + assert_op = tf.Assert( # Hack to get training to stop. + tf.less_equal(global_step, 200 + num_episodes_train * max_steps_per_episode), + [global_step]) + with tf.control_dependencies([update_targets_op, assert_op]): + train_op = tf.add_n(train_op_list[2:], name='post_update_targets') + # Representation training steps on every low-level policy training step. + train_op += repr_train_op + with tf.control_dependencies([update_meta_targets_op, assert_op]): + meta_train_op = tf.add_n(train_op_list[:2], + name='post_update_meta_targets') + + if debug_summaries: + train_.gen_debug_batch_summaries(batch) + slim.summaries.add_histogram_summaries( + uvf_agent.get_trainable_critic_vars(), 'critic_vars') + slim.summaries.add_histogram_summaries( + uvf_agent.get_trainable_actor_vars(), 'actor_vars') + + train_ops = train_utils.TrainOps(train_op, meta_train_op, + collect_experience_op) + + policy_save_path = os.path.join(train_dir, policy_save_dir, 'model.ckpt') + policy_vars = uvf_agent.get_actor_vars() + meta_agent.get_actor_vars() + [ + global_step, num_episodes, num_resets + ] + list(uvf_agent.context_vars) + list(meta_agent.context_vars) + state_preprocess.get_trainable_vars() + # add critic vars, since some test evaluation depends on them + policy_vars += uvf_agent.get_trainable_critic_vars() + meta_agent.get_trainable_critic_vars() + policy_saver = tf.train.Saver( + policy_vars, max_to_keep=max_policies_to_save, sharded=False) + + lowlevel_vars = (uvf_agent.get_actor_vars() + + uvf_agent.get_trainable_critic_vars() + + state_preprocess.get_trainable_vars()) + lowlevel_saver = tf.train.Saver(lowlevel_vars) + + def policy_save_fn(sess): + policy_saver.save( + sess, policy_save_path, global_step=global_step, write_meta_graph=False) + if save_policy_interval_secs > 0: + tf.logging.info( + 'Wait %d secs after save policy.' % save_policy_interval_secs) + time.sleep(save_policy_interval_secs) + + train_step_fn = train_utils.TrainStep( + max_number_of_steps=num_episodes_train * max_steps_per_episode + 100, + num_updates_per_observation=num_updates_per_observation, + num_collect_per_update=num_collect_per_update, + num_collect_per_meta_update=num_collect_per_meta_update, + log_every_n_steps=log_every_n_steps, + policy_save_fn=policy_save_fn, + save_policy_every_n_steps=save_policy_every_n_steps, + should_stop_early=should_stop_early).train_step + + local_init_op = tf.local_variables_initializer() + init_targets_op = tf.group(uvf_agent.update_targets(1.0), + meta_agent.update_targets(1.0)) + + def initialize_training_fn(sess): + """Initialize training function.""" + sess.run(local_init_op) + sess.run(init_targets_op) + if load_path: + tf.logging.info('Restoring low-level from %s' % load_path) + lowlevel_saver.restore(sess, load_path) + global_step_value = sess.run(global_step) + assert global_step_value == 0, 'Global step should be zero.' + collect_experience_call = sess.make_callable( + init_collect_experience_op) + + for _ in range(initial_steps): + collect_experience_call() + + train_saver = tf.train.Saver(max_to_keep=2, sharded=True) + tf.logging.info('train dir: %s', train_dir) + return slim.learning.train( + train_ops, + train_dir, + train_step_fn=train_step_fn, + save_interval_secs=FLAGS.save_interval_secs, + saver=train_saver, + log_every_n_steps=0, + global_step=global_step, + master="", + is_chief=(FLAGS.task == 0), + save_summaries_secs=FLAGS.save_summaries_secs, + init_fn=initialize_training_fn) diff --git a/models/research/efficient-hrl/train_utils.py b/models/research/efficient-hrl/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ae23ef9f095ed1755c74579223b017239ccf1009 --- /dev/null +++ b/models/research/efficient-hrl/train_utils.py @@ -0,0 +1,175 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +import os +import time + +import tensorflow as tf + +import gin.tf + +flags = tf.app.flags + + +flags.DEFINE_multi_string('config_file', None, + 'List of paths to the config files.') +flags.DEFINE_multi_string('params', None, + 'Newline separated list of Gin parameter bindings.') + +flags.DEFINE_string('train_dir', None, + 'Directory for writing logs/summaries during training.') +flags.DEFINE_string('master', 'local', + 'BNS name of the TensorFlow master to use.') +flags.DEFINE_integer('task', 0, 'task id') +flags.DEFINE_integer('save_interval_secs', 300, 'The frequency at which ' + 'checkpoints are saved, in seconds.') +flags.DEFINE_integer('save_summaries_secs', 30, 'The frequency at which ' + 'summaries are saved, in seconds.') +flags.DEFINE_boolean('summarize_gradients', False, + 'Whether to generate gradient summaries.') + +FLAGS = flags.FLAGS + +TrainOps = namedtuple('TrainOps', + ['train_op', 'meta_train_op', 'collect_experience_op']) + + +class TrainStep(object): + """Handles training step.""" + + def __init__(self, + max_number_of_steps=0, + num_updates_per_observation=1, + num_collect_per_update=1, + num_collect_per_meta_update=1, + log_every_n_steps=1, + policy_save_fn=None, + save_policy_every_n_steps=0, + should_stop_early=None): + """Returns a function that is executed at each step of slim training. + + Args: + max_number_of_steps: Optional maximum number of train steps to take. + num_updates_per_observation: Number of updates per observation. + log_every_n_steps: The frequency, in terms of global steps, that the loss + and global step and logged. + policy_save_fn: A tf.Saver().save function to save the policy. + save_policy_every_n_steps: How frequently to save the policy. + should_stop_early: Optional hook to report whether training should stop. + Raises: + ValueError: If policy_save_fn is not provided when + save_policy_every_n_steps > 0. + """ + if save_policy_every_n_steps and policy_save_fn is None: + raise ValueError( + 'policy_save_fn is required when save_policy_every_n_steps > 0') + self.max_number_of_steps = max_number_of_steps + self.num_updates_per_observation = num_updates_per_observation + self.num_collect_per_update = num_collect_per_update + self.num_collect_per_meta_update = num_collect_per_meta_update + self.log_every_n_steps = log_every_n_steps + self.policy_save_fn = policy_save_fn + self.save_policy_every_n_steps = save_policy_every_n_steps + self.should_stop_early = should_stop_early + self.last_global_step_val = 0 + self.train_op_fn = None + self.collect_and_train_fn = None + tf.logging.info('Training for %d max_number_of_steps', + self.max_number_of_steps) + + def train_step(self, sess, train_ops, global_step, _): + """This function will be called at each step of training. + + This represents one step of the DDPG algorithm and can include: + 1. collect a transition + 2. update the target network + 3. train the actor + 4. train the critic + + Args: + sess: A Tensorflow session. + train_ops: A DdpgTrainOps tuple of train ops to run. + global_step: The global step. + + Returns: + A scalar total loss. + A boolean should stop. + """ + start_time = time.time() + if self.train_op_fn is None: + self.train_op_fn = sess.make_callable([train_ops.train_op, global_step]) + self.meta_train_op_fn = sess.make_callable([train_ops.meta_train_op, global_step]) + self.collect_fn = sess.make_callable([train_ops.collect_experience_op, global_step]) + self.collect_and_train_fn = sess.make_callable( + [train_ops.train_op, global_step, train_ops.collect_experience_op]) + self.collect_and_meta_train_fn = sess.make_callable( + [train_ops.meta_train_op, global_step, train_ops.collect_experience_op]) + for _ in range(self.num_collect_per_update - 1): + self.collect_fn() + for _ in range(self.num_updates_per_observation - 1): + self.train_op_fn() + + total_loss, global_step_val, _ = self.collect_and_train_fn() + if (global_step_val // self.num_collect_per_meta_update != + self.last_global_step_val // self.num_collect_per_meta_update): + self.meta_train_op_fn() + + time_elapsed = time.time() - start_time + should_stop = False + if self.max_number_of_steps: + should_stop = global_step_val >= self.max_number_of_steps + if global_step_val != self.last_global_step_val: + if (self.save_policy_every_n_steps and + global_step_val // self.save_policy_every_n_steps != + self.last_global_step_val // self.save_policy_every_n_steps): + self.policy_save_fn(sess) + + if (self.log_every_n_steps and + global_step_val % self.log_every_n_steps == 0): + tf.logging.info( + 'global step %d: loss = %.4f (%.3f sec/step) (%d steps/sec)', + global_step_val, total_loss, time_elapsed, 1 / time_elapsed) + + self.last_global_step_val = global_step_val + stop_early = bool(self.should_stop_early and self.should_stop_early()) + return total_loss, should_stop or stop_early + + +def create_counter_summaries(counters): + """Add named summaries to counters, a list of tuples (name, counter).""" + if counters: + with tf.name_scope('Counters/'): + for name, counter in counters: + tf.summary.scalar(name, counter) + + +def gen_debug_batch_summaries(batch): + """Generates summaries for the sampled replay batch.""" + states, actions, rewards, _, next_states = batch + with tf.name_scope('batch'): + for s in range(states.get_shape()[-1]): + tf.summary.histogram('states_%d' % s, states[:, s]) + for s in range(states.get_shape()[-1]): + tf.summary.histogram('next_states_%d' % s, next_states[:, s]) + for a in range(actions.get_shape()[-1]): + tf.summary.histogram('actions_%d' % a, actions[:, a]) + tf.summary.histogram('rewards', rewards) diff --git a/models/research/efficient-hrl/utils/__init__.py b/models/research/efficient-hrl/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/research/efficient-hrl/utils/__init__.py @@ -0,0 +1 @@ + diff --git a/models/research/efficient-hrl/utils/eval_utils.py b/models/research/efficient-hrl/utils/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c88efc80fe1cc3399027cf71e310db85e3653df9 --- /dev/null +++ b/models/research/efficient-hrl/utils/eval_utils.py @@ -0,0 +1,151 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Evaluation utility functions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +import tensorflow as tf +from collections import namedtuple +logging = tf.logging +import gin.tf + + +@gin.configurable +def evaluate_checkpoint_repeatedly(checkpoint_dir, + evaluate_checkpoint_fn, + eval_interval_secs=600, + max_number_of_evaluations=None, + checkpoint_timeout=None, + timeout_fn=None): + """Evaluates a checkpointed model at a set interval.""" + if max_number_of_evaluations is not None and max_number_of_evaluations <= 0: + raise ValueError( + '`max_number_of_evaluations` must be either None or a positive number.') + + number_of_evaluations = 0 + for checkpoint_path in tf.contrib.training.checkpoints_iterator( + checkpoint_dir, + min_interval_secs=eval_interval_secs, + timeout=checkpoint_timeout, + timeout_fn=timeout_fn): + retries = 3 + for _ in range(retries): + try: + should_stop = evaluate_checkpoint_fn(checkpoint_path) + break + except tf.errors.DataLossError as e: + logging.warn( + 'Encountered a DataLossError while evaluating a checkpoint. This ' + 'can happen when reading a checkpoint before it is fully written. ' + 'Retrying...' + ) + time.sleep(2.0) + + +def compute_model_loss(sess, model_rollout_fn, states, actions): + """Computes model loss.""" + preds, losses = [], [] + preds.append(states[0]) + losses.append(0) + for state, action in zip(states[1:], actions[1:]): + pred = model_rollout_fn(sess, preds[-1], action) + loss = np.sqrt(np.sum((state - pred) ** 2)) + preds.append(pred) + losses.append(loss) + return preds, losses + + +def compute_average_reward(sess, env_base, step_fn, gamma, num_steps, + num_episodes): + """Computes the discounted reward for a given number of steps. + + Args: + sess: The tensorflow session. + env_base: A python environment. + step_fn: A function that takes in `sess` and returns a list of + [state, action, reward, discount, transition_type] values. + gamma: discounting factor to apply to the reward. + num_steps: number of steps to compute the reward over. + num_episodes: number of episodes to average the reward over. + Returns: + average_reward: a scalar of discounted reward. + last_reward: last reward received. + """ + average_reward = 0 + average_last_reward = 0 + average_meta_reward = 0 + average_last_meta_reward = 0 + average_success = 0. + states, actions = None, None + for i in range(num_episodes): + env_base.end_episode() + env_base.begin_episode() + (reward, last_reward, meta_reward, last_meta_reward, + states, actions) = compute_reward( + sess, step_fn, gamma, num_steps) + s_reward = last_meta_reward # Navigation + success = (s_reward > -5.0) # When using diff=False + logging.info('Episode = %d, reward = %s, meta_reward = %f, ' + 'last_reward = %s, last meta_reward = %f, success = %s', + i, reward, meta_reward, last_reward, last_meta_reward, + success) + average_reward += reward + average_last_reward += last_reward + average_meta_reward += meta_reward + average_last_meta_reward += last_meta_reward + average_success += success + average_reward /= num_episodes + average_last_reward /= num_episodes + average_meta_reward /= num_episodes + average_last_meta_reward /= num_episodes + average_success /= num_episodes + return (average_reward, average_last_reward, + average_meta_reward, average_last_meta_reward, + average_success, + states, actions) + + +def compute_reward(sess, step_fn, gamma, num_steps): + """Computes the discounted reward for a given number of steps. + + Args: + sess: The tensorflow session. + step_fn: A function that takes in `sess` and returns a list of + [state, action, reward, discount, transition_type] values. + gamma: discounting factor to apply to the reward. + num_steps: number of steps to compute the reward over. + Returns: + reward: cumulative discounted reward. + last_reward: reward received at final step. + """ + + total_reward = 0 + total_meta_reward = 0 + gamma_step = 1 + states = [] + actions = [] + for _ in range(num_steps): + state, action, transition_type, reward, meta_reward, discount, _, _ = step_fn(sess) + total_reward += reward * gamma_step * discount + total_meta_reward += meta_reward * gamma_step * discount + gamma_step *= gamma + states.append(state) + actions.append(action) + return (total_reward, reward, total_meta_reward, meta_reward, + states, actions) diff --git a/models/research/efficient-hrl/utils/utils.py b/models/research/efficient-hrl/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e188316c33b006a34baaaf729a02cca2e13d92e8 --- /dev/null +++ b/models/research/efficient-hrl/utils/utils.py @@ -0,0 +1,318 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""TensorFlow utility functions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from copy import deepcopy +import tensorflow as tf +from tf_agents import specs +from tf_agents.utils import common + +_tf_print_counts = dict() +_tf_print_running_sums = dict() +_tf_print_running_counts = dict() +_tf_print_ids = 0 + + +def get_contextual_env_base(env_base, begin_ops=None, end_ops=None): + """Wrap env_base with additional tf ops.""" + # pylint: disable=protected-access + def init(self_, env_base): + self_._env_base = env_base + attribute_list = ["_render_mode", "_gym_env"] + for attribute in attribute_list: + if hasattr(env_base, attribute): + setattr(self_, attribute, getattr(env_base, attribute)) + if hasattr(env_base, "physics"): + self_._physics = env_base.physics + elif hasattr(env_base, "gym"): + class Physics(object): + def render(self, *args, **kwargs): + return env_base.gym.render("rgb_array") + physics = Physics() + self_._physics = physics + self_.physics = physics + def set_sess(self_, sess): + self_._sess = sess + if hasattr(self_._env_base, "set_sess"): + self_._env_base.set_sess(sess) + def begin_episode(self_): + self_._env_base.reset() + if begin_ops is not None: + self_._sess.run(begin_ops) + def end_episode(self_): + self_._env_base.reset() + if end_ops is not None: + self_._sess.run(end_ops) + return type("ContextualEnvBase", (env_base.__class__,), dict( + __init__=init, + set_sess=set_sess, + begin_episode=begin_episode, + end_episode=end_episode, + ))(env_base) + # pylint: enable=protected-access + + +def merge_specs(specs_): + """Merge TensorSpecs. + + Args: + specs_: List of TensorSpecs to be merged. + Returns: + a TensorSpec: a merged TensorSpec. + """ + shape = specs_[0].shape + dtype = specs_[0].dtype + name = specs_[0].name + for spec in specs_[1:]: + assert shape[1:] == spec.shape[1:], "incompatible shapes: %s, %s" % ( + shape, spec.shape) + assert dtype == spec.dtype, "incompatible dtypes: %s, %s" % ( + dtype, spec.dtype) + shape = merge_shapes((shape, spec.shape), axis=0) + return specs.TensorSpec( + shape=shape, + dtype=dtype, + name=name, + ) + + +def merge_shapes(shapes, axis=0): + """Merge TensorShapes. + + Args: + shapes: List of TensorShapes to be merged. + axis: optional, the axis to merge shaped. + Returns: + a TensorShape: a merged TensorShape. + """ + assert len(shapes) > 1 + dims = deepcopy(shapes[0].dims) + for shape in shapes[1:]: + assert shapes[0].ndims == shape.ndims + dims[axis] += shape.dims[axis] + return tf.TensorShape(dims=dims) + + +def get_all_vars(ignore_scopes=None): + """Get all tf variables in scope. + + Args: + ignore_scopes: A list of scope names to ignore. + Returns: + A list of all tf variables in scope. + """ + all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + all_vars = [var for var in all_vars if ignore_scopes is None or not + any(var.name.startswith(scope) for scope in ignore_scopes)] + return all_vars + + +def clip(tensor, range_=None): + """Return a tf op which clips tensor according to range_. + + Args: + tensor: A Tensor to be clipped. + range_: None, or a tuple representing (minval, maxval) + Returns: + A clipped Tensor. + """ + if range_ is None: + return tf.identity(tensor) + elif isinstance(range_, (tuple, list)): + assert len(range_) == 2 + return tf.clip_by_value(tensor, range_[0], range_[1]) + else: raise NotImplementedError("Unacceptable range input: %r" % range_) + + +def clip_to_bounds(value, minimum, maximum): + """Clips value to be between minimum and maximum. + + Args: + value: (tensor) value to be clipped. + minimum: (numpy float array) minimum value to clip to. + maximum: (numpy float array) maximum value to clip to. + Returns: + clipped_value: (tensor) `value` clipped to between `minimum` and `maximum`. + """ + value = tf.minimum(value, maximum) + return tf.maximum(value, minimum) + + +clip_to_spec = common.clip_to_spec +def _clip_to_spec(value, spec): + """Clips value to a given bounded tensor spec. + + Args: + value: (tensor) value to be clipped. + spec: (BoundedTensorSpec) spec containing min. and max. values for clipping. + Returns: + clipped_value: (tensor) `value` clipped to be compatible with `spec`. + """ + return clip_to_bounds(value, spec.minimum, spec.maximum) + + +join_scope = common.join_scope +def _join_scope(parent_scope, child_scope): + """Joins a parent and child scope using `/`, checking for empty/none. + + Args: + parent_scope: (string) parent/prefix scope. + child_scope: (string) child/suffix scope. + Returns: + joined scope: (string) parent and child scopes joined by /. + """ + if not parent_scope: + return child_scope + if not child_scope: + return parent_scope + return '/'.join([parent_scope, child_scope]) + + +def assign_vars(vars_, values): + """Returns the update ops for assigning a list of vars. + + Args: + vars_: A list of variables. + values: A list of tensors representing new values. + Returns: + A list of update ops for the variables. + """ + return [var.assign(value) for var, value in zip(vars_, values)] + + +def identity_vars(vars_): + """Return the identity ops for a list of tensors. + + Args: + vars_: A list of tensors. + Returns: + A list of identity ops. + """ + return [tf.identity(var) for var in vars_] + + +def tile(var, batch_size=1): + """Return tiled tensor. + + Args: + var: A tensor representing the state. + batch_size: Batch size. + Returns: + A tensor with shape [batch_size,] + var.shape. + """ + batch_var = tf.tile( + tf.expand_dims(var, 0), + (batch_size,) + (1,) * var.get_shape().ndims) + return batch_var + + +def batch_list(vars_list): + """Batch a list of variables. + + Args: + vars_list: A list of tensor variables. + Returns: + A list of tensor variables with additional first dimension. + """ + return [tf.expand_dims(var, 0) for var in vars_list] + + +def tf_print(op, + tensors, + message="", + first_n=-1, + name=None, + sub_messages=None, + print_freq=-1, + include_count=True): + """tf.Print, but to stdout.""" + # TODO(shanegu): `name` is deprecated. Remove from the rest of codes. + global _tf_print_ids + _tf_print_ids += 1 + name = _tf_print_ids + _tf_print_counts[name] = 0 + if print_freq > 0: + _tf_print_running_sums[name] = [0 for _ in tensors] + _tf_print_running_counts[name] = 0 + def print_message(*xs): + """print message fn.""" + _tf_print_counts[name] += 1 + if print_freq > 0: + for i, x in enumerate(xs): + _tf_print_running_sums[name][i] += x + _tf_print_running_counts[name] += 1 + if (print_freq <= 0 or _tf_print_running_counts[name] >= print_freq) and ( + first_n < 0 or _tf_print_counts[name] <= first_n): + for i, x in enumerate(xs): + if print_freq > 0: + del x + x = _tf_print_running_sums[name][i]/_tf_print_running_counts[name] + if sub_messages is None: + sub_message = str(i) + else: + sub_message = sub_messages[i] + log_message = "%s, %s" % (message, sub_message) + if include_count: + log_message += ", count=%d" % _tf_print_counts[name] + tf.logging.info("[%s]: %s" % (log_message, x)) + if print_freq > 0: + for i, x in enumerate(xs): + _tf_print_running_sums[name][i] = 0 + _tf_print_running_counts[name] = 0 + return xs[0] + + print_op = tf.py_func(print_message, tensors, tensors[0].dtype) + with tf.control_dependencies([print_op]): + op = tf.identity(op) + return op + + +periodically = common.periodically +def _periodically(body, period, name='periodically'): + """Periodically performs a tensorflow op.""" + if period is None or period == 0: + return tf.no_op() + + if period < 0: + raise ValueError("period cannot be less than 0.") + + if period == 1: + return body() + + with tf.variable_scope(None, default_name=name): + counter = tf.get_variable( + "counter", + shape=[], + dtype=tf.int64, + trainable=False, + initializer=tf.constant_initializer(period, dtype=tf.int64)) + + def _wrapped_body(): + with tf.control_dependencies([body()]): + return counter.assign(1) + + update = tf.cond( + tf.equal(counter, period), _wrapped_body, + lambda: counter.assign_add(1)) + + return update + +soft_variables_update = common.soft_variables_update diff --git a/models/research/feelvos/CONTRIBUTING.md b/models/research/feelvos/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..939e5341e74dc2371c8b47f0e27b50581bed5f63 --- /dev/null +++ b/models/research/feelvos/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google.com/conduct/). diff --git a/models/research/feelvos/LICENSE b/models/research/feelvos/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/models/research/feelvos/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/models/research/feelvos/README.md b/models/research/feelvos/README.md new file mode 100644 index 0000000000000000000000000000000000000000..69017c8b19fc1427c47cbdfbdce408ffa92ec32c --- /dev/null +++ b/models/research/feelvos/README.md @@ -0,0 +1,102 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation + +FEELVOS is a fast model for video object segmentation which does not rely on fine-tuning on the +first frame. + +For details, please refer to our paper. If you find the code useful, please +also consider citing it. + +* FEELVOS: + +``` +@inproceedings{feelvos2019, + title={FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation}, + author={Paul Voigtlaender and Yuning Chai and Florian Schroff and Hartwig Adam and Bastian Leibe and Liang-Chieh Chen}, + booktitle={CVPR}, + year={2019} +} +``` + +## Dependencies + +FEELVOS requires a good GPU with around 12 GB of memory and depends on the following libraries + +* TensorFlow +* Pillow +* Numpy +* Scipy +* Scikit Learn Image +* tf Slim (which is included in the "tensorflow/models/research/" checkout) +* DeepLab (which is included in the "tensorflow/models/research/" checkout) +* correlation_cost (optional, see below) + +For detailed steps to install Tensorflow, follow the [Tensorflow installation +instructions](https://www.tensorflow.org/install/). A typical user can install +Tensorflow using the following command: + +```bash +pip install tensorflow-gpu +``` + +The remaining libraries can also be installed with pip using: + +```bash +pip install pillow scipy scikit-image +``` + +## Dependency on correlation_cost + +For fast cross-correlation, we use correlation cost as an external dependency. By default FEELVOS +will use a slow and memory hungry fallback implementation without correlation_cost. If you care for +performance, you should set up correlation_cost by following the instructions in +correlation_cost/README and afterwards setting ```USE_CORRELATION_COST = True``` in +utils/embedding_utils.py. + +## Pre-trained Models + +We provide 2 pre-trained FEELVOS models, both are based on Xception-65: + +* [Trained on DAVIS 2017](http://download.tensorflow.org/models/feelvos_davis17_trained.tar.gz) +* [Trained on DAVIS 2017 and YouTube-VOS](http://download.tensorflow.org/models/feelvos_davis17_and_youtubevos_trained.tar.gz) + +Additionally, we provide a [DeepLab checkpoint for Xception-65 pre-trained on ImageNet and COCO](http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz), +which can be used as an initialization for training FEELVOS. + +## Pre-computed Segmentation Masks + +We provide [pre-computed segmentation masks](http://download.tensorflow.org/models/feelvos_precomputed_masks.zip) +for FEELVOS both for training with and without YouTube-VOS data for the following datasets: + +* DAVIS 2017 validation set +* DAVIS 2017 test-dev set +* YouTube-Objects dataset + +## Local Inference +For a demo of local inference on DAVIS 2017 run + +```bash +# From tensorflow/models/research/feelvos +sh eval.sh +``` + +## Local Training +For a demo of local training on DAVIS 2017 run + +```bash +# From tensorflow/models/research/feelvos +sh train.sh +``` + +## Contacts (Maintainers) +* Paul Voigtlaender, github: [pvoigtlaender](https://github.com/pvoigtlaender) +* Yuning Chai, github: [yuningchai](https://github.com/yuningchai) +* Liang-Chieh Chen, github: [aquariusjay](https://github.com/aquariusjay) + +## License + +All the codes in feelvos folder is covered by the [LICENSE](https://github.com/tensorflow/models/blob/master/LICENSE) +under tensorflow/models. Please refer to the LICENSE for details. diff --git a/models/research/feelvos/__init__.py b/models/research/feelvos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1373443d0ff84fd90714e41dade400ab41a22c --- /dev/null +++ b/models/research/feelvos/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/research/feelvos/common.py b/models/research/feelvos/common.py new file mode 100644 index 0000000000000000000000000000000000000000..98f5a9ce348aea36efa4b3cc57048d3659f18895 --- /dev/null +++ b/models/research/feelvos/common.py @@ -0,0 +1,163 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides flags that are common to scripts. + +Common flags from train/vis_video.py are collected in this script. +""" +import tensorflow as tf + +from deeplab import common + +flags = tf.app.flags + +flags.DEFINE_enum( + 'classification_loss', 'softmax_with_attention', + ['softmax', 'triplet', 'softmax_with_attention'], + 'Type of loss function used for classifying pixels, can be either softmax, ' + 'softmax_with_attention, or triplet.') + +flags.DEFINE_integer('k_nearest_neighbors', 1, + 'The number of nearest neighbors to use.') + +flags.DEFINE_integer('embedding_dimension', 100, 'The dimension used for the ' + 'learned embedding') + +flags.DEFINE_boolean('use_softmax_feedback', True, + 'Whether to give the softmax predictions of the last ' + 'frame as additional input to the segmentation head.') + +flags.DEFINE_boolean('sample_adjacent_and_consistent_query_frames', True, + 'If true, the query frames (all but the first frame ' + 'which is the reference frame) will be sampled such ' + 'that they are adjacent video frames and have the same ' + 'crop coordinates and flip augmentation. Note that if ' + 'use_softmax_feedback is True, this option will ' + 'automatically be activated.') + +flags.DEFINE_integer('embedding_seg_feature_dimension', 256, + 'The dimensionality used in the segmentation head layers.') + +flags.DEFINE_integer('embedding_seg_n_layers', 4, 'The number of layers in the ' + 'segmentation head.') + +flags.DEFINE_integer('embedding_seg_kernel_size', 7, 'The kernel size used in ' + 'the segmentation head.') + +flags.DEFINE_multi_integer('embedding_seg_atrous_rates', [], + 'The atrous rates to use for the segmentation head.') + +flags.DEFINE_boolean('normalize_nearest_neighbor_distances', True, + 'Whether to normalize the nearest neighbor distances ' + 'to [0,1] using sigmoid, scale and shift.') + +flags.DEFINE_boolean('also_attend_to_previous_frame', True, 'Whether to also ' + 'use nearest neighbor attention with respect to the ' + 'previous frame.') + +flags.DEFINE_bool('use_local_previous_frame_attention', True, + 'Whether to restrict the previous frame attention to a local ' + 'search window. Only has an effect, if ' + 'also_attend_to_previous_frame is True.') + +flags.DEFINE_integer('previous_frame_attention_window_size', 15, + 'The window size used for local previous frame attention,' + ' if use_local_previous_frame_attention is True.') + +flags.DEFINE_boolean('use_first_frame_matching', True, 'Whether to extract ' + 'features by matching to the reference frame. This should ' + 'always be true except for ablation experiments.') + +FLAGS = flags.FLAGS + +# Constants + +# Perform semantic segmentation predictions. +OUTPUT_TYPE = common.OUTPUT_TYPE + +# Semantic segmentation item names. +LABELS_CLASS = common.LABELS_CLASS +IMAGE = common.IMAGE +HEIGHT = common.HEIGHT +WIDTH = common.WIDTH +IMAGE_NAME = common.IMAGE_NAME +SOURCE_ID = 'source_id' +VIDEO_ID = 'video_id' +LABEL = common.LABEL +ORIGINAL_IMAGE = common.ORIGINAL_IMAGE +PRECEDING_FRAME_LABEL = 'preceding_frame_label' + +# Test set name. +TEST_SET = common.TEST_SET + +# Internal constants. +OBJECT_LABEL = 'object_label' + + +class VideoModelOptions(common.ModelOptions): + """Internal version of immutable class to hold model options.""" + + def __new__(cls, + outputs_to_num_classes, + crop_size=None, + atrous_rates=None, + output_stride=8): + """Constructor to set default values. + + Args: + outputs_to_num_classes: A dictionary from output type to the number of + classes. For example, for the task of semantic segmentation with 21 + semantic classes, we would have outputs_to_num_classes['semantic'] = 21. + crop_size: A tuple [crop_height, crop_width]. + atrous_rates: A list of atrous convolution rates for ASPP. + output_stride: The ratio of input to output spatial resolution. + + Returns: + A new VideoModelOptions instance. + """ + self = super(VideoModelOptions, cls).__new__( + cls, + outputs_to_num_classes, + crop_size, + atrous_rates, + output_stride) + # Add internal flags. + self.classification_loss = FLAGS.classification_loss + + return self + + +def parse_decoder_output_stride(): + """Parses decoder output stride. + + FEELVOS assumes decoder_output_stride = 4. Thus, this function is created for + this particular purpose. + + Returns: + An integer specifying the decoder_output_stride. + + Raises: + ValueError: If decoder_output_stride is None or contains more than one + element. + """ + if FLAGS.decoder_output_stride: + decoder_output_stride = [ + int(x) for x in FLAGS.decoder_output_stride] + if len(decoder_output_stride) != 1: + raise ValueError('Expect decoder output stride has only one element.') + decoder_output_stride = decoder_output_stride[0] + else: + raise ValueError('Expect flag decoder output stride not to be None.') + return decoder_output_stride diff --git a/models/research/feelvos/correlation_cost/README.md b/models/research/feelvos/correlation_cost/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6cdbe550c7fcf63191f6967dd99c72cf341302bc --- /dev/null +++ b/models/research/feelvos/correlation_cost/README.md @@ -0,0 +1,36 @@ +# correlation_cost + +FEELVOS uses correlation_cost as an optional dependency to improve the speed and memory consumption +of cross-correlation. + +## Installation + +Unfortunately we cannot provide the code for correlation_cost directly, so you +will have to copy some files from this pull request +https://github.com/tensorflow/tensorflow/pull/21392/. For your convenience we +prepared scripts to download and adjust the code automatically. + +In the best case, all you need to do is run compile.sh with the path to your +CUDA installation (tested only with CUDA 9). +Note that the path should be to a folder containing the cuda folder, not to the +cuda folder itself, e.g. if your cuda is in /usr/local/cuda-9.0, you can create +a symlink /usr/local/cuda pointing to /usr/local/cuda-9.0 and then run + +```bash +sh build.sh /usr/local/ +``` + +This will + +* Download the code via ```sh get_code.sh ``` +* Apply minor adjustments to the code via ```sh fix_code.sh``` +* Clone the dependencies cub and thrust from github via ```sh clone_dependencies.sh``` +* Compile a shared library correlation_cost.so for correlation_cost via +```sh compile.sh "${CUDA_DIR}"``` + +Please review the licenses of correlation_cost, cub, and thrust. + +## Enabling correlation_cost +If you managed to create the correlation_cost.so file, then set +```USE_CORRELATION_COST = True``` in feelvos/utils/embedding_utils.py and try to run +```sh eval.sh```. diff --git a/models/research/feelvos/correlation_cost/build.sh b/models/research/feelvos/correlation_cost/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..37d9adb3147df07646a462fd170772393abf5642 --- /dev/null +++ b/models/research/feelvos/correlation_cost/build.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to download and build the code for correlation_cost. +# +# Usage: +# sh ./build.sh cuda_dir +# Where cuda_dir points to a directory containing the cuda folder (not the cuda folder itself). +# +# + +if [ "$#" -ne 1 ]; then + echo "Illegal number of parameters, usage: ./build.sh cuda_dir" + echo "Where cuda_dir points to a directory containing the cuda folder (not the cuda folder itself)" + exit 1 +fi + +set -e +set -x + +sh ./get_code.sh +sh ./fix_code.sh +sh ./clone_dependencies.sh +sh ./compile.sh $1 diff --git a/models/research/feelvos/correlation_cost/clone_dependencies.sh b/models/research/feelvos/correlation_cost/clone_dependencies.sh new file mode 100644 index 0000000000000000000000000000000000000000..9174313f58a833a5ab547e21c63cdc87681cbc5d --- /dev/null +++ b/models/research/feelvos/correlation_cost/clone_dependencies.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to clone the dependencies, i.e. cub and thrust, of correlation_cost from github. +# +# Usage: +# sh ./clone_dependencies.sh +# +# + +# Clone cub. +if [ ! -d cub ] ; then + git clone https://github.com/dmlc/cub.git +fi +# Clone thrust. +if [ ! -d thrust ] ; then + git clone https://github.com/thrust/thrust.git +fi diff --git a/models/research/feelvos/correlation_cost/compile.sh b/models/research/feelvos/correlation_cost/compile.sh new file mode 100644 index 0000000000000000000000000000000000000000..6025292dfa78b44dd6fcf2f1b349af936a43fcc7 --- /dev/null +++ b/models/research/feelvos/correlation_cost/compile.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to compile the code for correlation_cost and create correlation_cost.so. +# +# Usage: +# sh ./compile.sh cuda_dir +# Where cuda_dir points to a directory containing the cuda folder (not the cuda folder itself). +# +# + +if [ "$#" -ne 1 ]; then + echo "Illegal number of parameters, usage: ./compile.sh cuda_dir" + exit 1 +fi +CUDA_DIR=$1 + +if [ ! -d "${CUDA_DIR}/cuda" ]; then + echo "cuda_dir must point to a directory containing the cuda folder, not to the cuda folder itself" + exit 1 +fi + +TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) +TF_LFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') ) +CUB_DIR=cub +THRUST_DIR=thrust + +# Depending on the versions of your nvcc and gcc, the flag --expt-relaxed-constexpr might be required or should be removed. +# If nvcc complains about a too new gcc version, you can point it to another gcc +# version by using something like nvcc -ccbin /path/to/your/gcc6 +nvcc -std=c++11 --expt-relaxed-constexpr -I ./ -I ${CUB_DIR}/../ -I ${THRUST_DIR} -I ${CUDA_DIR}/ -c -o correlation_cost_op_gpu.o kernels/correlation_cost_op_gpu.cu.cc ${TF_CFLAGS[@]} -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC + +g++ -std=c++11 -I ./ -L ${CUDA_DIR}/cuda/lib64 -shared -o correlation_cost.so ops/correlation_cost_op.cc kernels/correlation_cost_op.cc correlation_cost_op_gpu.o ${TF_CFLAGS[@]} -fPIC -lcudart ${TF_LFLAGS[@]} -D GOOGLE_CUDA=1 diff --git a/models/research/feelvos/correlation_cost/fix_code.sh b/models/research/feelvos/correlation_cost/fix_code.sh new file mode 100644 index 0000000000000000000000000000000000000000..d4f285db3d745fc55a20bac57f97c6ca2fd8a5c4 --- /dev/null +++ b/models/research/feelvos/correlation_cost/fix_code.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to modify the downloaded code. +# +# Usage: +# sh ./fix_code.sh +# +# + +sed -i "s/tensorflow\/contrib\/correlation_cost\///g" kernels/correlation_cost_op_gpu.cu.cc +sed -i "s/tensorflow\/contrib\/correlation_cost\///g" kernels/correlation_cost_op.cc +sed -i "s/external\/cub_archive\//cub\//g" kernels/correlation_cost_op_gpu.cu.cc + +sed -i "s/from tensorflow.contrib.util import loader/import tensorflow as tf/g" python/ops/correlation_cost_op.py +grep -v "from tensorflow" python/ops/correlation_cost_op.py | grep -v resource_loader.get_path_to_datafile > correlation_cost_op.py.tmp && mv correlation_cost_op.py.tmp python/ops/correlation_cost_op.py +sed -i "s/array_ops/tf/g" python/ops/correlation_cost_op.py +sed -i "s/ops/tf/g" python/ops/correlation_cost_op.py +sed -i "s/loader.load_op_library(/tf.load_op_library('feelvos\/correlation_cost\/correlation_cost.so')/g" python/ops/correlation_cost_op.py +sed -i "s/gen_correlation_cost_op/_correlation_cost_op_so/g" python/ops/correlation_cost_op.py diff --git a/models/research/feelvos/correlation_cost/get_code.sh b/models/research/feelvos/correlation_cost/get_code.sh new file mode 100644 index 0000000000000000000000000000000000000000..337142166ac4b61835417e807ef0a495532d749c --- /dev/null +++ b/models/research/feelvos/correlation_cost/get_code.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to download the code for correlation_cost. +# +# Usage: +# sh ./get_code.sh +# +# + +mkdir -p kernels ops python/ops +touch __init__.py +touch python/__init__.py +touch python/ops/__init__.py +wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/ops/correlation_cost_op.cc -O ops/correlation_cost_op.cc +wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/python/ops/correlation_cost_op.py -O python/ops/correlation_cost_op.py +wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/kernels/correlation_cost_op.cc -O kernels/correlation_cost_op.cc +wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/kernels/correlation_cost_op.h -O kernels/correlation_cost_op.h +wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/kernels/correlation_cost_op_gpu.cu.cc -O kernels/correlation_cost_op_gpu.cu.cc diff --git a/models/research/feelvos/datasets/__init__.py b/models/research/feelvos/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1373443d0ff84fd90714e41dade400ab41a22c --- /dev/null +++ b/models/research/feelvos/datasets/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/research/feelvos/datasets/build_davis2017_data.py b/models/research/feelvos/datasets/build_davis2017_data.py new file mode 100644 index 0000000000000000000000000000000000000000..5e093fc3b4531f5439957ea3608770441bd5ce4a --- /dev/null +++ b/models/research/feelvos/datasets/build_davis2017_data.py @@ -0,0 +1,163 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts DAVIS 2017 data to TFRecord file format with SequenceExample protos. +""" + +import io +import math +import os +from StringIO import StringIO +import numpy as np +import PIL +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('data_folder', 'DAVIS2017/', + 'Folder containing the DAVIS 2017 data') + +tf.app.flags.DEFINE_string('imageset', 'val', + 'Which subset to use, either train or val') + +tf.app.flags.DEFINE_string( + 'output_dir', './tfrecord', + 'Path to save converted TFRecords of TensorFlow examples.') + +_NUM_SHARDS_TRAIN = 10 +_NUM_SHARDS_VAL = 1 + + +def read_image(path): + with open(path) as fid: + image_str = fid.read() + image = PIL.Image.open(io.BytesIO(image_str)) + w, h = image.size + return image_str, (h, w) + + +def read_annotation(path): + """Reads a single image annotation from a png image. + + Args: + path: Path to the png image. + + Returns: + png_string: The png encoded as string. + size: Tuple of (height, width). + """ + with open(path) as fid: + x = np.array(PIL.Image.open(fid)) + h, w = x.shape + im = PIL.Image.fromarray(x) + + output = StringIO() + im.save(output, format='png') + png_string = output.getvalue() + output.close() + + return png_string, (h, w) + + +def process_video(key, input_dir, anno_dir): + """Creates a SequenceExample for the video. + + Args: + key: Name of the video. + input_dir: Directory which contains the image files. + anno_dir: Directory which contains the annotation files. + + Returns: + The created SequenceExample. + """ + frame_names = sorted(tf.gfile.ListDirectory(input_dir)) + anno_files = sorted(tf.gfile.ListDirectory(anno_dir)) + assert len(frame_names) == len(anno_files) + + sequence = tf.train.SequenceExample() + context = sequence.context.feature + features = sequence.feature_lists.feature_list + + for i, name in enumerate(frame_names): + image_str, image_shape = read_image( + os.path.join(input_dir, name)) + anno_str, anno_shape = read_annotation( + os.path.join(anno_dir, name[:-4] + '.png')) + image_encoded = features['image/encoded'].feature.add() + image_encoded.bytes_list.value.append(image_str) + segmentation_encoded = features['segmentation/object/encoded'].feature.add() + segmentation_encoded.bytes_list.value.append(anno_str) + + np.testing.assert_array_equal(np.array(image_shape), np.array(anno_shape)) + + if i == 0: + first_shape = np.array(image_shape) + else: + np.testing.assert_array_equal(np.array(image_shape), first_shape) + + context['video_id'].bytes_list.value.append(key.encode('ascii')) + context['clip/frames'].int64_list.value.append(len(frame_names)) + context['image/format'].bytes_list.value.append('JPEG') + context['image/channels'].int64_list.value.append(3) + context['image/height'].int64_list.value.append(first_shape[0]) + context['image/width'].int64_list.value.append(first_shape[1]) + context['segmentation/object/format'].bytes_list.value.append('PNG') + context['segmentation/object/height'].int64_list.value.append(first_shape[0]) + context['segmentation/object/width'].int64_list.value.append(first_shape[1]) + + return sequence + + +def convert(data_folder, imageset, output_dir, num_shards): + """Converts the specified subset of DAVIS 2017 to TFRecord format. + + Args: + data_folder: The path to the DAVIS 2017 data. + imageset: The subset to use, either train or val. + output_dir: Where to store the TFRecords. + num_shards: The number of shards used for storing the data. + """ + sets_file = os.path.join(data_folder, 'ImageSets', '2017', imageset + '.txt') + vids = [x.strip() for x in open(sets_file).readlines()] + num_vids = len(vids) + num_vids_per_shard = int(math.ceil(num_vids) / float(num_shards)) + for shard_id in range(num_shards): + output_filename = os.path.join( + output_dir, + '%s-%05d-of-%05d.tfrecord' % (imageset, shard_id, num_shards)) + with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: + start_idx = shard_id * num_vids_per_shard + end_idx = min((shard_id + 1) * num_vids_per_shard, num_vids) + for i in range(start_idx, end_idx): + print('Converting video %d/%d shard %d video %s' % ( + i + 1, num_vids, shard_id, vids[i])) + img_dir = os.path.join(data_folder, 'JPEGImages', '480p', vids[i]) + anno_dir = os.path.join(data_folder, 'Annotations', '480p', vids[i]) + example = process_video(vids[i], img_dir, anno_dir) + tfrecord_writer.write(example.SerializeToString()) + + +def main(unused_argv): + imageset = FLAGS.imageset + assert imageset in ('train', 'val') + if imageset == 'train': + num_shards = _NUM_SHARDS_TRAIN + else: + num_shards = _NUM_SHARDS_VAL + convert(FLAGS.data_folder, FLAGS.imageset, FLAGS.output_dir, num_shards) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/feelvos/datasets/download_and_convert_davis17.sh b/models/research/feelvos/datasets/download_and_convert_davis17.sh new file mode 100644 index 0000000000000000000000000000000000000000..011be61ba7586c8f3d141ccc00194d1c7ae56c3a --- /dev/null +++ b/models/research/feelvos/datasets/download_and_convert_davis17.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Script to download and preprocess the DAVIS 2017 dataset. +# +# Usage: +# bash ./download_and_convert_davis17.sh + +# Exit immediately if a command exits with a non-zero status. +set -e + +CURRENT_DIR=$(pwd) +WORK_DIR="./davis17" +mkdir -p "${WORK_DIR}" +cd "${WORK_DIR}" + +# Helper function to download and unpack the DAVIS 2017 dataset. +download_and_uncompress() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f "${FILENAME}" ]; then + echo "Downloading ${FILENAME} to ${WORK_DIR}" + wget -nd -c "${BASE_URL}/${FILENAME}" + echo "Uncompressing ${FILENAME}" + unzip "${FILENAME}" + fi +} + +BASE_URL="https://data.vision.ee.ethz.ch/csergi/share/davis/" +FILENAME="DAVIS-2017-trainval-480p.zip" + +download_and_uncompress "${BASE_URL}" "${FILENAME}" + +cd "${CURRENT_DIR}" + +# Root path for DAVIS 2017 dataset. +DAVIS_ROOT="${WORK_DIR}/DAVIS" + +# Build TFRecords of the dataset. +# First, create output directory for storing TFRecords. +OUTPUT_DIR="${WORK_DIR}/tfrecord" +mkdir -p "${OUTPUT_DIR}" + +IMAGE_FOLDER="${DAVIS_ROOT}/JPEGImages" +LIST_FOLDER="${DAVIS_ROOT}/ImageSets/Segmentation" + +# Convert validation set. +if [ ! -f "${OUTPUT_DIR}/val-00000-of-00001.tfrecord" ]; then + echo "Converting DAVIS 2017 dataset (val)..." + python ./build_davis2017_data.py \ + --data_folder="${DAVIS_ROOT}" \ + --imageset=val \ + --output_dir="${OUTPUT_DIR}" +fi + +# Convert training set. +if [ ! -f "${OUTPUT_DIR}/train-00009-of-00010.tfrecord" ]; then + echo "Converting DAVIS 2017 dataset (train)..." + python ./build_davis2017_data.py \ + --data_folder="${DAVIS_ROOT}" \ + --imageset=train \ + --output_dir="${OUTPUT_DIR}" +fi diff --git a/models/research/feelvos/datasets/tfsequence_example_decoder.py b/models/research/feelvos/datasets/tfsequence_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..2fa3e95d5b98eb00aa485371037b4ad6b0e7ece3 --- /dev/null +++ b/models/research/feelvos/datasets/tfsequence_example_decoder.py @@ -0,0 +1,118 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains the TFExampleDecoder. + +The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos. +In order to do so each requested item must be paired with one or more Example +features that are parsed to produce the Tensor-based manifestation of the item. +""" + +import tensorflow as tf +slim = tf.contrib.slim +data_decoder = slim.data_decoder + + +class TFSequenceExampleDecoder(data_decoder.DataDecoder): + """A decoder for TensorFlow SequenceExamples. + + Decoding SequenceExample proto buffers is comprised of two stages: + (1) Example parsing and (2) tensor manipulation. + + In the first stage, the tf.parse_single_sequence_example function is called + with a list of FixedLenFeatures and SparseLenFeatures. These instances tell TF + how to parse the example. The output of this stage is a set of tensors. + + In the second stage, the resulting tensors are manipulated to provide the + requested 'item' tensors. + + To perform this decoding operation, a SequenceExampleDecoder is given a list + of ItemHandlers. Each ItemHandler indicates the set of features for stage 1 + and contains the instructions for post_processing its tensors for stage 2. + """ + + def __init__(self, keys_to_context_features, keys_to_sequence_features, + items_to_handlers): + """Constructs the decoder. + + Args: + keys_to_context_features: a dictionary from TF-SequenceExample context + keys to either tf.VarLenFeature or tf.FixedLenFeature instances. + See tensorflow's parsing_ops.py. + keys_to_sequence_features: a dictionary from TF-SequenceExample sequence + keys to either tf.VarLenFeature or tf.FixedLenSequenceFeature instances. + See tensorflow's parsing_ops.py. + items_to_handlers: a dictionary from items (strings) to ItemHandler + instances. Note that the ItemHandler's are provided the keys that they + use to return the final item Tensors. + + Raises: + ValueError: if the same key is present for context features and sequence + features. + """ + unique_keys = set() + unique_keys.update(keys_to_context_features) + unique_keys.update(keys_to_sequence_features) + if len(unique_keys) != ( + len(keys_to_context_features) + len(keys_to_sequence_features)): + # This situation is ambiguous in the decoder's keys_to_tensors variable. + raise ValueError('Context and sequence keys are not unique. \n' + ' Context keys: %s \n Sequence keys: %s' % + (list(keys_to_context_features.keys()), + list(keys_to_sequence_features.keys()))) + + self._keys_to_context_features = keys_to_context_features + self._keys_to_sequence_features = keys_to_sequence_features + self._items_to_handlers = items_to_handlers + + def list_items(self): + """See base class.""" + return self._items_to_handlers.keys() + + def decode(self, serialized_example, items=None): + """Decodes the given serialized TF-SequenceExample. + + Args: + serialized_example: a serialized TF-SequenceExample tensor. + items: the list of items to decode. These must be a subset of the item + keys in self._items_to_handlers. If `items` is left as None, then all + of the items in self._items_to_handlers are decoded. + + Returns: + the decoded items, a list of tensor. + """ + + context, feature_list = tf.parse_single_sequence_example( + serialized_example, self._keys_to_context_features, + self._keys_to_sequence_features) + + # Reshape non-sparse elements just once: + for k in self._keys_to_context_features: + v = self._keys_to_context_features[k] + if isinstance(v, tf.FixedLenFeature): + context[k] = tf.reshape(context[k], v.shape) + + if not items: + items = self._items_to_handlers.keys() + + outputs = [] + for item in items: + handler = self._items_to_handlers[item] + keys_to_tensors = { + key: context[key] if key in context else feature_list[key] + for key in handler.keys + } + outputs.append(handler.tensors_to_item(keys_to_tensors)) + return outputs diff --git a/models/research/feelvos/datasets/video_dataset.py b/models/research/feelvos/datasets/video_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..17b62e989af866df0232a0e6d921faee84fe1fa7 --- /dev/null +++ b/models/research/feelvos/datasets/video_dataset.py @@ -0,0 +1,196 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides data from video object segmentation datasets. + +This file provides both images and annotations (instance segmentations) for +TensorFlow. Currently, we support the following datasets: + +1. DAVIS 2017 (https://davischallenge.org/davis2017/code.html). + +2. DAVIS 2016 (https://davischallenge.org/davis2016/code.html). + +3. YouTube-VOS (https://youtube-vos.org/dataset/download). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os.path +import tensorflow as tf +from feelvos.datasets import tfsequence_example_decoder + +slim = tf.contrib.slim +dataset = slim.dataset +tfexample_decoder = slim.tfexample_decoder + + +_ITEMS_TO_DESCRIPTIONS = { + 'image': 'A color image of varying height and width.', + 'labels_class': ('A semantic segmentation label whose size matches image.' + 'Its values range from 0 (background) to num_classes.'), +} + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + ['splits_to_sizes', # Splits of the dataset into training, val, and test. + 'num_classes', # Number of semantic classes. + 'ignore_label', # Ignore label value. + ] +) + +_DAVIS_2016_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train': [30, 1830], + 'val': [20, 1376]}, + num_classes=2, + ignore_label=255, +) + +_DAVIS_2017_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train': [60, 4219], + 'val': [30, 2023], + 'test-dev': [30, 2037]}, + num_classes=None, # Number of instances per videos differ. + ignore_label=255, +) + +_YOUTUBE_VOS_2018_INFORMATION = DatasetDescriptor( + # Leave these sizes as None to allow for different splits into + # training and validation sets. + splits_to_sizes={'train': [None, None], + 'val': [None, None]}, + num_classes=None, # Number of instances per video differs. + ignore_label=255, +) + +_DATASETS_INFORMATION = { + 'davis_2016': _DAVIS_2016_INFORMATION, + 'davis_2017': _DAVIS_2017_INFORMATION, + 'youtube_vos_2018': _YOUTUBE_VOS_2018_INFORMATION, +} + +# Default file pattern of SSTable. Note we include '-' to avoid the confusion +# between `train-` and `trainval-` sets. +_FILE_PATTERN = '%s-*' + + +def get_dataset(dataset_name, + split_name, + dataset_dir, + file_pattern=None, + data_type='tf_sequence_example', + decode_video_frames=False): + """Gets an instance of slim Dataset. + + Args: + dataset_name: String, dataset name. + split_name: String, the train/val Split name. + dataset_dir: String, the directory of the dataset sources. + file_pattern: String, file pattern of SSTable. + data_type: String, data type. Currently supports 'tf_example' and + 'annotated_image'. + decode_video_frames: Boolean, decode the images or not. Not decoding it here + is useful if we subsample later + + Returns: + An instance of slim Dataset. + + Raises: + ValueError: If the dataset_name or split_name is not recognized, or if + the dataset_type is not supported. + """ + if dataset_name not in _DATASETS_INFORMATION: + raise ValueError('The specified dataset is not supported yet.') + + splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes + + if split_name not in splits_to_sizes: + raise ValueError('data split name %s not recognized' % split_name) + + # Prepare the variables for different datasets. + num_classes = _DATASETS_INFORMATION[dataset_name].num_classes + ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label + + if file_pattern is None: + file_pattern = _FILE_PATTERN + file_pattern = os.path.join(dataset_dir, file_pattern % split_name) + if data_type == 'tf_sequence_example': + keys_to_context_features = { + 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/height': tf.FixedLenFeature((), tf.int64, default_value=0), + 'image/width': tf.FixedLenFeature((), tf.int64, default_value=0), + 'segmentation/object/format': tf.FixedLenFeature( + (), tf.string, default_value='png'), + 'video_id': tf.FixedLenFeature((), tf.string, default_value='unknown') + } + label_name = 'class' if dataset_name == 'davis_2016' else 'object' + keys_to_sequence_features = { + 'image/encoded': tf.FixedLenSequenceFeature((), dtype=tf.string), + 'segmentation/{}/encoded'.format(label_name): + tf.FixedLenSequenceFeature((), tf.string), + 'segmentation/{}/encoded'.format(label_name): + tf.FixedLenSequenceFeature((), tf.string), + } + items_to_handlers = { + 'height': tfexample_decoder.Tensor('image/height'), + 'width': tfexample_decoder.Tensor('image/width'), + 'video_id': tfexample_decoder.Tensor('video_id') + } + if decode_video_frames: + decode_image_handler = tfexample_decoder.Image( + image_key='image/encoded', + format_key='image/format', + channels=3, + repeated=True) + items_to_handlers['image'] = decode_image_handler + decode_label_handler = tfexample_decoder.Image( + image_key='segmentation/{}/encoded'.format(label_name), + format_key='segmentation/{}/format'.format(label_name), + channels=1, + repeated=True) + items_to_handlers['labels_class'] = decode_label_handler + else: + items_to_handlers['image/encoded'] = tfexample_decoder.Tensor( + 'image/encoded') + items_to_handlers[ + 'segmentation/object/encoded'] = tfexample_decoder.Tensor( + 'segmentation/{}/encoded'.format(label_name)) + decoder = tfsequence_example_decoder.TFSequenceExampleDecoder( + keys_to_context_features, keys_to_sequence_features, items_to_handlers) + else: + raise ValueError('Unknown data type.') + + size = splits_to_sizes[split_name] + if isinstance(size, collections.Sequence): + num_videos = size[0] + num_samples = size[1] + else: + num_videos = 0 + num_samples = size + + return dataset.Dataset( + data_sources=file_pattern, + reader=tf.TFRecordReader, + decoder=decoder, + num_samples=num_samples, + num_videos=num_videos, + items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, + ignore_label=ignore_label, + num_classes=num_classes, + name=dataset_name, + multi_label=True) diff --git a/models/research/feelvos/eval.sh b/models/research/feelvos/eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..96cb7f409a1e652ba8263f35c3786cb0cb77f5d1 --- /dev/null +++ b/models/research/feelvos/eval.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to locally run inference on DAVIS 2017. Users could also +# modify from this script for their use case. See train.sh for an example of +# local training. +# +# Usage: +# # From the tensorflow/models/research/feelvos directory. +# sh ./eval.sh +# +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +# Move one-level up to tensorflow/models/research directory. +cd .. + +# Update PYTHONPATH. +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim:`pwd`/feelvos + +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/feelvos" + +# Run embedding_utils_test first to make sure the PYTHONPATH is correctly set. +python "${WORK_DIR}"/utils/embedding_utils_test.py -v + +# Go to datasets folder and download and convert the DAVIS 2017 dataset. +DATASET_DIR="datasets" +cd "${WORK_DIR}/${DATASET_DIR}" +sh download_and_convert_davis17.sh + +# Go to models folder and download and unpack the DAVIS 2017 trained model. +MODELS_DIR="models" +mkdir -p "${WORK_DIR}/${MODELS_DIR}" +cd "${WORK_DIR}/${MODELS_DIR}" +if [ ! -d "feelvos_davis17_trained" ]; then + wget http://download.tensorflow.org/models/feelvos_davis17_trained.tar.gz + tar -xvf feelvos_davis17_trained.tar.gz + echo "model_checkpoint_path: \"model.ckpt-200004\"" > feelvos_davis17_trained/checkpoint + rm feelvos_davis17_trained.tar.gz +fi +CHECKPOINT_DIR="${WORK_DIR}/${MODELS_DIR}/feelvos_davis17_trained/" + +# Go back to orignal directory. +cd "${CURRENT_DIR}" + +# Set up the working directories. +DAVIS_FOLDER="davis17" +EXP_FOLDER="exp/eval_on_val_set" +VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/${EXP_FOLDER}/eval" +mkdir -p ${VIS_LOGDIR} + +DAVIS_DATASET="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/tfrecord" + +python "${WORK_DIR}"/vis_video.py \ + --dataset=davis_2017 \ + --dataset_dir="${DAVIS_DATASET}" \ + --vis_logdir="${VIS_LOGDIR}" \ + --checkpoint_dir="${CHECKPOINT_DIR}" \ + --logtostderr \ + --atrous_rates=12 \ + --atrous_rates=24 \ + --atrous_rates=36 \ + --decoder_output_stride=4 \ + --model_variant=xception_65 \ + --multi_grid=1 \ + --multi_grid=1 \ + --multi_grid=1 \ + --output_stride=8 \ + --save_segmentations diff --git a/models/research/feelvos/input_preprocess.py b/models/research/feelvos/input_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..954c0b42ef2650b1c25ec8071933beee57e9bd69 --- /dev/null +++ b/models/research/feelvos/input_preprocess.py @@ -0,0 +1,280 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Prepare the data used for FEELVOS training/evaluation.""" +import tensorflow as tf + +from deeplab.core import feature_extractor +from deeplab.core import preprocess_utils + +# The probability of flipping the images and labels +# left-right during training +_PROB_OF_FLIP = 0.5 + +get_random_scale = preprocess_utils.get_random_scale +randomly_scale_image_and_label = ( + preprocess_utils.randomly_scale_image_and_label) + + +def preprocess_image_and_label(image, + label, + crop_height, + crop_width, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + ignore_label=255, + is_training=True, + model_variant=None): + """Preprocesses the image and label. + + Args: + image: Input image. + label: Ground truth annotation label. + crop_height: The height value used to crop the image and label. + crop_width: The width value used to crop the image and label. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + ignore_label: The label value which will be ignored for training and + evaluation. + is_training: If the preprocessing is used for training or not. + model_variant: Model variant (string) for choosing how to mean-subtract the + images. See feature_extractor.network_map for supported model variants. + + Returns: + original_image: Original image (could be resized). + processed_image: Preprocessed image. + label: Preprocessed ground truth segmentation label. + + Raises: + ValueError: Ground truth label not provided during training. + """ + if is_training and label is None: + raise ValueError('During training, label must be provided.') + if model_variant is None: + tf.logging.warning('Default mean-subtraction is performed. Please specify ' + 'a model_variant. See feature_extractor.network_map for ' + 'supported model variants.') + + # Keep reference to original image. + original_image = image + + processed_image = tf.cast(image, tf.float32) + + if label is not None: + label = tf.cast(label, tf.int32) + + # Resize image and label to the desired range. + if min_resize_value is not None or max_resize_value is not None: + [processed_image, label] = ( + preprocess_utils.resize_to_range( + image=processed_image, + label=label, + min_size=min_resize_value, + max_size=max_resize_value, + factor=resize_factor, + align_corners=True)) + # The `original_image` becomes the resized image. + original_image = tf.identity(processed_image) + + # Data augmentation by randomly scaling the inputs. + scale = get_random_scale( + min_scale_factor, max_scale_factor, scale_factor_step_size) + processed_image, label = randomly_scale_image_and_label( + processed_image, label, scale) + + processed_image.set_shape([None, None, 3]) + + if crop_height is not None and crop_width is not None: + # Pad image and label to have dimensions >= [crop_height, crop_width]. + image_shape = tf.shape(processed_image) + image_height = image_shape[0] + image_width = image_shape[1] + + target_height = image_height + tf.maximum(crop_height - image_height, 0) + target_width = image_width + tf.maximum(crop_width - image_width, 0) + + # Pad image with mean pixel value. + mean_pixel = tf.reshape( + feature_extractor.mean_pixel(model_variant), [1, 1, 3]) + processed_image = preprocess_utils.pad_to_bounding_box( + processed_image, 0, 0, target_height, target_width, mean_pixel) + + if label is not None: + label = preprocess_utils.pad_to_bounding_box( + label, 0, 0, target_height, target_width, ignore_label) + + # Randomly crop the image and label. + if is_training and label is not None: + processed_image, label = preprocess_utils.random_crop( + [processed_image, label], crop_height, crop_width) + + processed_image.set_shape([crop_height, crop_width, 3]) + + if label is not None: + label.set_shape([crop_height, crop_width, 1]) + + if is_training: + # Randomly left-right flip the image and label. + processed_image, label, _ = preprocess_utils.flip_dim( + [processed_image, label], _PROB_OF_FLIP, dim=1) + + return original_image, processed_image, label + + +def preprocess_images_and_labels_consistently(images, + labels, + crop_height, + crop_width, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + ignore_label=255, + is_training=True, + model_variant=None): + """Preprocesses images and labels in a consistent way. + + Similar to preprocess_image_and_label, but works on a list of images + and a list of labels and uses the same crop coordinates and either flips + all images and labels or none of them. + + Args: + images: List of input images. + labels: List of ground truth annotation labels. + crop_height: The height value used to crop the image and label. + crop_width: The width value used to crop the image and label. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + ignore_label: The label value which will be ignored for training and + evaluation. + is_training: If the preprocessing is used for training or not. + model_variant: Model variant (string) for choosing how to mean-subtract the + images. See feature_extractor.network_map for supported model variants. + + Returns: + original_images: Original images (could be resized). + processed_images: Preprocessed images. + labels: Preprocessed ground truth segmentation labels. + + Raises: + ValueError: Ground truth label not provided during training. + """ + if is_training and labels is None: + raise ValueError('During training, labels must be provided.') + if model_variant is None: + tf.logging.warning('Default mean-subtraction is performed. Please specify ' + 'a model_variant. See feature_extractor.network_map for ' + 'supported model variants.') + if labels is not None: + assert len(images) == len(labels) + num_imgs = len(images) + + # Keep reference to original images. + original_images = images + + processed_images = [tf.cast(image, tf.float32) for image in images] + + if labels is not None: + labels = [tf.cast(label, tf.int32) for label in labels] + + # Resize images and labels to the desired range. + if min_resize_value is not None or max_resize_value is not None: + processed_images, labels = zip(*[ + preprocess_utils.resize_to_range( + image=processed_image, + label=label, + min_size=min_resize_value, + max_size=max_resize_value, + factor=resize_factor, + align_corners=True) for processed_image, label + in zip(processed_images, labels)]) + # The `original_images` becomes the resized images. + original_images = [tf.identity(processed_image) + for processed_image in processed_images] + + # Data augmentation by randomly scaling the inputs. + scale = get_random_scale( + min_scale_factor, max_scale_factor, scale_factor_step_size) + processed_images, labels = zip( + *[randomly_scale_image_and_label(processed_image, label, scale) + for processed_image, label in zip(processed_images, labels)]) + + for processed_image in processed_images: + processed_image.set_shape([None, None, 3]) + + if crop_height is not None and crop_width is not None: + # Pad image and label to have dimensions >= [crop_height, crop_width]. + image_shape = tf.shape(processed_images[0]) + image_height = image_shape[0] + image_width = image_shape[1] + + target_height = image_height + tf.maximum(crop_height - image_height, 0) + target_width = image_width + tf.maximum(crop_width - image_width, 0) + + # Pad image with mean pixel value. + mean_pixel = tf.reshape( + feature_extractor.mean_pixel(model_variant), [1, 1, 3]) + processed_images = [preprocess_utils.pad_to_bounding_box( + processed_image, 0, 0, target_height, target_width, mean_pixel) + for processed_image in processed_images] + + if labels is not None: + labels = [preprocess_utils.pad_to_bounding_box( + label, 0, 0, target_height, target_width, ignore_label) + for label in labels] + + # Randomly crop the images and labels. + if is_training and labels is not None: + cropped = preprocess_utils.random_crop( + processed_images + labels, crop_height, crop_width) + assert len(cropped) == 2 * num_imgs + processed_images = cropped[:num_imgs] + labels = cropped[num_imgs:] + + for processed_image in processed_images: + processed_image.set_shape([crop_height, crop_width, 3]) + + if labels is not None: + for label in labels: + label.set_shape([crop_height, crop_width, 1]) + + if is_training: + # Randomly left-right flip the image and label. + res = preprocess_utils.flip_dim( + list(processed_images + labels), _PROB_OF_FLIP, dim=1) + maybe_flipped = res[:-1] + assert len(maybe_flipped) == 2 * num_imgs + processed_images = maybe_flipped[:num_imgs] + labels = maybe_flipped[num_imgs:] + + return original_images, processed_images, labels diff --git a/models/research/feelvos/model.py b/models/research/feelvos/model.py new file mode 100644 index 0000000000000000000000000000000000000000..f145f91616958b7327d99bb55efb1b7b5016a223 --- /dev/null +++ b/models/research/feelvos/model.py @@ -0,0 +1,480 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Provides DeepLab model definition and helper functions. + +DeepLab is a deep learning system for semantic image segmentation with +the following features: + +(1) Atrous convolution to explicitly control the resolution at which +feature responses are computed within Deep Convolutional Neural Networks. + +(2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at +multiple scales with filters at multiple sampling rates and effective +fields-of-views. + +(3) ASPP module augmented with image-level feature and batch normalization. + +(4) A simple yet effective decoder module to recover the object boundaries. + +See the following papers for more details: + +"Encoder-Decoder with Atrous Separable Convolution for Semantic Image +Segmentation" +Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. +(https://arxiv.org/abs1802.02611) + +"Rethinking Atrous Convolution for Semantic Image Segmentation," +Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam +(https://arxiv.org/abs/1706.05587) + +"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, +Atrous Convolution, and Fully Connected CRFs", +Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, +Alan L Yuille (* equal contribution) +(https://arxiv.org/abs/1606.00915) + +"Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected +CRFs" +Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, +Alan L. Yuille (* equal contribution) +(https://arxiv.org/abs/1412.7062) +""" +import collections +import tensorflow as tf + +from deeplab import model +from feelvos import common +from feelvos.utils import embedding_utils +from feelvos.utils import train_utils + +slim = tf.contrib.slim + + +get_branch_logits = model.get_branch_logits +get_extra_layer_scopes = model.get_extra_layer_scopes +multi_scale_logits_v2 = model.multi_scale_logits +refine_by_decoder = model.refine_by_decoder +scale_dimension = model.scale_dimension +split_separable_conv2d = model.split_separable_conv2d + +MERGED_LOGITS_SCOPE = model.MERGED_LOGITS_SCOPE +IMAGE_POOLING_SCOPE = model.IMAGE_POOLING_SCOPE +ASPP_SCOPE = model.ASPP_SCOPE +CONCAT_PROJECTION_SCOPE = model.CONCAT_PROJECTION_SCOPE + + +def predict_labels(images, + model_options, + image_pyramid=None, + reference_labels=None, + k_nearest_neighbors=1, + embedding_dimension=None, + use_softmax_feedback=False, + initial_softmax_feedback=None, + embedding_seg_feature_dimension=256, + embedding_seg_n_layers=4, + embedding_seg_kernel_size=7, + embedding_seg_atrous_rates=None, + also_return_softmax_probabilities=False, + num_frames_per_video=None, + normalize_nearest_neighbor_distances=False, + also_attend_to_previous_frame=False, + use_local_previous_frame_attention=False, + previous_frame_attention_window_size=9, + use_first_frame_matching=True, + also_return_embeddings=False, + ref_embeddings=None): + """Predicts segmentation labels. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: An InternalModelOptions instance to configure models. + image_pyramid: Input image scales for multi-scale feature extraction. + reference_labels: A tensor of size [batch, height, width, 1]. + ground truth labels used to perform a nearest neighbor query + k_nearest_neighbors: Integer, the number of neighbors to use for nearest + neighbor queries. + embedding_dimension: Integer, the dimension used for the learned embedding. + use_softmax_feedback: Boolean, whether to give the softmax predictions of + the last frame as additional input to the segmentation head. + initial_softmax_feedback: Float32 tensor, or None. Can be used to + initialize the softmax predictions used for the feedback loop. + Typically only useful for inference. Only has an effect if + use_softmax_feedback is True. + embedding_seg_feature_dimension: Integer, the dimensionality used in the + segmentation head layers. + embedding_seg_n_layers: Integer, the number of layers in the segmentation + head. + embedding_seg_kernel_size: Integer, the kernel size used in the + segmentation head. + embedding_seg_atrous_rates: List of integers of length + embedding_seg_n_layers, the atrous rates to use for the segmentation head. + also_return_softmax_probabilities: Boolean, if true, additionally return + the softmax probabilities as second return value. + num_frames_per_video: Integer, the number of frames per video. + normalize_nearest_neighbor_distances: Boolean, whether to normalize the + nearest neighbor distances to [0,1] using sigmoid, scale and shift. + also_attend_to_previous_frame: Boolean, whether to also use nearest + neighbor attention with respect to the previous frame. + use_local_previous_frame_attention: Boolean, whether to restrict the + previous frame attention to a local search window. + Only has an effect, if also_attend_to_previous_frame is True. + previous_frame_attention_window_size: Integer, the window size used for + local previous frame attention, if use_local_previous_frame_attention + is True. + use_first_frame_matching: Boolean, whether to extract features by matching + to the reference frame. This should always be true except for ablation + experiments. + also_return_embeddings: Boolean, whether to return the embeddings as well. + ref_embeddings: Tuple of + (first_frame_embeddings, previous_frame_embeddings), + each of shape [batch, height, width, embedding_dimension], or None. + + Returns: + A dictionary with keys specifying the output_type (e.g., semantic + prediction) and values storing Tensors representing predictions (argmax + over channels). Each prediction has size [batch, height, width]. + If also_return_softmax_probabilities is True, the second return value are + the softmax probabilities. + If also_return_embeddings is True, it will also return an embeddings + tensor of shape [batch, height, width, embedding_dimension]. + + Raises: + ValueError: If classification_loss is not softmax, softmax_with_attention, + nor triplet. + """ + if (model_options.classification_loss == 'triplet' and + reference_labels is None): + raise ValueError('Need reference_labels for triplet loss') + + if model_options.classification_loss == 'softmax_with_attention': + if embedding_dimension is None: + raise ValueError('Need embedding_dimension for softmax_with_attention ' + 'loss') + if reference_labels is None: + raise ValueError('Need reference_labels for softmax_with_attention loss') + res = ( + multi_scale_logits_with_nearest_neighbor_matching( + images, + model_options=model_options, + image_pyramid=image_pyramid, + is_training=False, + reference_labels=reference_labels, + clone_batch_size=1, + num_frames_per_video=num_frames_per_video, + embedding_dimension=embedding_dimension, + max_neighbors_per_object=0, + k_nearest_neighbors=k_nearest_neighbors, + use_softmax_feedback=use_softmax_feedback, + initial_softmax_feedback=initial_softmax_feedback, + embedding_seg_feature_dimension=embedding_seg_feature_dimension, + embedding_seg_n_layers=embedding_seg_n_layers, + embedding_seg_kernel_size=embedding_seg_kernel_size, + embedding_seg_atrous_rates=embedding_seg_atrous_rates, + normalize_nearest_neighbor_distances= + normalize_nearest_neighbor_distances, + also_attend_to_previous_frame=also_attend_to_previous_frame, + use_local_previous_frame_attention= + use_local_previous_frame_attention, + previous_frame_attention_window_size= + previous_frame_attention_window_size, + use_first_frame_matching=use_first_frame_matching, + also_return_embeddings=also_return_embeddings, + ref_embeddings=ref_embeddings + )) + if also_return_embeddings: + outputs_to_scales_to_logits, embeddings = res + else: + outputs_to_scales_to_logits = res + embeddings = None + else: + outputs_to_scales_to_logits = multi_scale_logits_v2( + images, + model_options=model_options, + image_pyramid=image_pyramid, + is_training=False, + fine_tune_batch_norm=False) + + predictions = {} + for output in sorted(outputs_to_scales_to_logits): + scales_to_logits = outputs_to_scales_to_logits[output] + original_logits = scales_to_logits[MERGED_LOGITS_SCOPE] + if isinstance(original_logits, list): + assert len(original_logits) == 1 + original_logits = original_logits[0] + logits = tf.image.resize_bilinear(original_logits, tf.shape(images)[1:3], + align_corners=True) + if model_options.classification_loss in ('softmax', + 'softmax_with_attention'): + predictions[output] = tf.argmax(logits, 3) + elif model_options.classification_loss == 'triplet': + # to keep this fast, we do the nearest neighbor assignment on the + # resolution at which the embedding is extracted and scale the result up + # afterwards + embeddings = original_logits + reference_labels_logits_size = tf.squeeze( + tf.image.resize_nearest_neighbor( + reference_labels[tf.newaxis], + train_utils.resolve_shape(embeddings)[1:3], + align_corners=True), axis=0) + nn_labels = embedding_utils.assign_labels_by_nearest_neighbors( + embeddings[0], embeddings[1:], reference_labels_logits_size, + k_nearest_neighbors) + predictions[common.OUTPUT_TYPE] = tf.image.resize_nearest_neighbor( + nn_labels, tf.shape(images)[1:3], align_corners=True) + else: + raise ValueError( + 'Only support softmax, triplet, or softmax_with_attention for ' + 'classification_loss.') + + if also_return_embeddings: + assert also_return_softmax_probabilities + return predictions, tf.nn.softmax(original_logits, axis=-1), embeddings + elif also_return_softmax_probabilities: + return predictions, tf.nn.softmax(original_logits, axis=-1) + else: + return predictions + + +def multi_scale_logits_with_nearest_neighbor_matching( + images, + model_options, + image_pyramid, + clone_batch_size, + reference_labels, + num_frames_per_video, + embedding_dimension, + max_neighbors_per_object, + weight_decay=0.0001, + is_training=False, + fine_tune_batch_norm=False, + k_nearest_neighbors=1, + use_softmax_feedback=False, + initial_softmax_feedback=None, + embedding_seg_feature_dimension=256, + embedding_seg_n_layers=4, + embedding_seg_kernel_size=7, + embedding_seg_atrous_rates=None, + normalize_nearest_neighbor_distances=False, + also_attend_to_previous_frame=False, + damage_initial_previous_frame_mask=False, + use_local_previous_frame_attention=False, + previous_frame_attention_window_size=9, + use_first_frame_matching=True, + also_return_embeddings=False, + ref_embeddings=None): + """Gets the logits for multi-scale inputs using nearest neighbor attention. + + Adjusted version of multi_scale_logits_v2 to support nearest neighbor + attention and a variable number of classes for each element of the batch. + The returned logits are all downsampled (due to max-pooling layers) + for both training and evaluation. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + image_pyramid: Input image scales for multi-scale feature extraction. + clone_batch_size: Integer, the number of videos on a batch. + reference_labels: The segmentation labels of the reference frame on which + attention is applied. + num_frames_per_video: Integer, the number of frames per video. + embedding_dimension: Integer, the dimension of the embedding. + max_neighbors_per_object: Integer, the maximum number of candidates + for the nearest neighbor query per object after subsampling. + Can be 0 for no subsampling. + weight_decay: The weight decay for model variables. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + k_nearest_neighbors: Integer, the number of nearest neighbors to use. + use_softmax_feedback: Boolean, whether to give the softmax predictions of + the last frame as additional input to the segmentation head. + initial_softmax_feedback: List of Float32 tensors, or None. + Can be used to initialize the softmax predictions used for the feedback + loop. Only has an effect if use_softmax_feedback is True. + embedding_seg_feature_dimension: Integer, the dimensionality used in the + segmentation head layers. + embedding_seg_n_layers: Integer, the number of layers in the segmentation + head. + embedding_seg_kernel_size: Integer, the kernel size used in the + segmentation head. + embedding_seg_atrous_rates: List of integers of length + embedding_seg_n_layers, the atrous rates to use for the segmentation head. + normalize_nearest_neighbor_distances: Boolean, whether to normalize the + nearest neighbor distances to [0,1] using sigmoid, scale and shift. + also_attend_to_previous_frame: Boolean, whether to also use nearest + neighbor attention with respect to the previous frame. + damage_initial_previous_frame_mask: Boolean, whether to artificially damage + the initial previous frame mask. Only has an effect if + also_attend_to_previous_frame is True. + use_local_previous_frame_attention: Boolean, whether to restrict the + previous frame attention to a local search window. + Only has an effect, if also_attend_to_previous_frame is True. + previous_frame_attention_window_size: Integer, the window size used for + local previous frame attention, if use_local_previous_frame_attention + is True. + use_first_frame_matching: Boolean, whether to extract features by matching + to the reference frame. This should always be true except for ablation + experiments. + also_return_embeddings: Boolean, whether to return the embeddings as well. + ref_embeddings: Tuple of + (first_frame_embeddings, previous_frame_embeddings), + each of shape [batch, height, width, embedding_dimension], or None. + + Returns: + outputs_to_scales_to_logits: A map of maps from output_type (e.g., + semantic prediction) to a dictionary of multi-scale logits names to + logits. For each output_type, the dictionary has keys which + correspond to the scales and values which correspond to the logits. + For example, if `scales` equals [1.0, 1.5], then the keys would + include 'merged_logits', 'logits_1.00' and 'logits_1.50'. + If also_return_embeddings is True, it will also return an embeddings + tensor of shape [batch, height, width, embedding_dimension]. + + Raises: + ValueError: If model_options doesn't specify crop_size and its + add_image_level_feature = True, since add_image_level_feature requires + crop_size information. + """ + # Setup default values. + if not image_pyramid: + image_pyramid = [1.0] + crop_height = ( + model_options.crop_size[0] + if model_options.crop_size else tf.shape(images)[1]) + crop_width = ( + model_options.crop_size[1] + if model_options.crop_size else tf.shape(images)[2]) + + # Compute the height, width for the output logits. + if model_options.decoder_output_stride: + logits_output_stride = min(model_options.decoder_output_stride) + else: + logits_output_stride = model_options.output_stride + logits_height = scale_dimension( + crop_height, + max(1.0, max(image_pyramid)) / logits_output_stride) + logits_width = scale_dimension( + crop_width, + max(1.0, max(image_pyramid)) / logits_output_stride) + + # Compute the logits for each scale in the image pyramid. + outputs_to_scales_to_logits = { + k: {} + for k in model_options.outputs_to_num_classes + } + + for image_scale in image_pyramid: + if image_scale != 1.0: + scaled_height = scale_dimension(crop_height, image_scale) + scaled_width = scale_dimension(crop_width, image_scale) + scaled_crop_size = [scaled_height, scaled_width] + scaled_images = tf.image.resize_bilinear( + images, scaled_crop_size, align_corners=True) + scaled_reference_labels = tf.image.resize_nearest_neighbor( + reference_labels, scaled_crop_size, align_corners=True + ) + if model_options.crop_size is None: + scaled_crop_size = None + if model_options.crop_size: + scaled_images.set_shape([None, scaled_height, scaled_width, 3]) + else: + scaled_crop_size = model_options.crop_size + scaled_images = images + scaled_reference_labels = reference_labels + + updated_options = model_options._replace(crop_size=scaled_crop_size) + res = embedding_utils.get_logits_with_matching( + scaled_images, + updated_options, + weight_decay=weight_decay, + reuse=tf.AUTO_REUSE, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm, + reference_labels=scaled_reference_labels, + batch_size=clone_batch_size, + num_frames_per_video=num_frames_per_video, + embedding_dimension=embedding_dimension, + max_neighbors_per_object=max_neighbors_per_object, + k_nearest_neighbors=k_nearest_neighbors, + use_softmax_feedback=use_softmax_feedback, + initial_softmax_feedback=initial_softmax_feedback, + embedding_seg_feature_dimension=embedding_seg_feature_dimension, + embedding_seg_n_layers=embedding_seg_n_layers, + embedding_seg_kernel_size=embedding_seg_kernel_size, + embedding_seg_atrous_rates=embedding_seg_atrous_rates, + normalize_nearest_neighbor_distances= + normalize_nearest_neighbor_distances, + also_attend_to_previous_frame=also_attend_to_previous_frame, + damage_initial_previous_frame_mask=damage_initial_previous_frame_mask, + use_local_previous_frame_attention=use_local_previous_frame_attention, + previous_frame_attention_window_size= + previous_frame_attention_window_size, + use_first_frame_matching=use_first_frame_matching, + also_return_embeddings=also_return_embeddings, + ref_embeddings=ref_embeddings + ) + if also_return_embeddings: + outputs_to_logits, embeddings = res + else: + outputs_to_logits = res + embeddings = None + + # Resize the logits to have the same dimension before merging. + for output in sorted(outputs_to_logits): + if isinstance(outputs_to_logits[output], collections.Sequence): + outputs_to_logits[output] = [tf.image.resize_bilinear( + x, [logits_height, logits_width], align_corners=True) + for x in outputs_to_logits[output]] + else: + outputs_to_logits[output] = tf.image.resize_bilinear( + outputs_to_logits[output], [logits_height, logits_width], + align_corners=True) + + # Return when only one input scale. + if len(image_pyramid) == 1: + for output in sorted(model_options.outputs_to_num_classes): + outputs_to_scales_to_logits[output][ + MERGED_LOGITS_SCOPE] = outputs_to_logits[output] + if also_return_embeddings: + return outputs_to_scales_to_logits, embeddings + else: + return outputs_to_scales_to_logits + + # Save logits to the output map. + for output in sorted(model_options.outputs_to_num_classes): + outputs_to_scales_to_logits[output][ + 'logits_%.2f' % image_scale] = outputs_to_logits[output] + + # Merge the logits from all the multi-scale inputs. + for output in sorted(model_options.outputs_to_num_classes): + # Concatenate the multi-scale logits for each output type. + all_logits = [ + [tf.expand_dims(l, axis=4)] + for logits in outputs_to_scales_to_logits[output].values() + for l in logits + ] + transposed = map(list, zip(*all_logits)) + all_logits = [tf.concat(t, 4) for t in transposed] + merge_fn = ( + tf.reduce_max + if model_options.merge_method == 'max' else tf.reduce_mean) + outputs_to_scales_to_logits[output][MERGED_LOGITS_SCOPE] = [merge_fn( + l, axis=4) for l in all_logits] + + if also_return_embeddings: + return outputs_to_scales_to_logits, embeddings + else: + return outputs_to_scales_to_logits diff --git a/models/research/feelvos/train.py b/models/research/feelvos/train.py new file mode 100644 index 0000000000000000000000000000000000000000..16c085722749bcfde5aeff15cdbec336e5efe451 --- /dev/null +++ b/models/research/feelvos/train.py @@ -0,0 +1,630 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Training script for the FEELVOS model. + +See model.py for more details and usage. +""" +import six +import tensorflow as tf + +from feelvos import common +from feelvos import model +from feelvos.datasets import video_dataset +from feelvos.utils import embedding_utils +from feelvos.utils import train_utils +from feelvos.utils import video_input_generator +from deployment import model_deploy + +slim = tf.contrib.slim +prefetch_queue = slim.prefetch_queue +flags = tf.app.flags +FLAGS = flags.FLAGS + +# Settings for multi-GPUs/multi-replicas training. + +flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.') + +flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.') + +flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.') + +flags.DEFINE_integer('startup_delay_steps', 15, + 'Number of training steps between replicas startup.') + +flags.DEFINE_integer('num_ps_tasks', 0, + 'The number of parameter servers. If the value is 0, then ' + 'the parameters are handled locally by the worker.') + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +flags.DEFINE_integer('task', 0, 'The task ID.') + +# Settings for logging. + +flags.DEFINE_string('train_logdir', None, + 'Where the checkpoint and logs are stored.') + +flags.DEFINE_integer('log_steps', 10, + 'Display logging information at every log_steps.') + +flags.DEFINE_integer('save_interval_secs', 1200, + 'How often, in seconds, we save the model to disk.') + +flags.DEFINE_integer('save_summaries_secs', 600, + 'How often, in seconds, we compute the summaries.') + +# Settings for training strategy. + +flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'], + 'Learning rate policy for training.') + +flags.DEFINE_float('base_learning_rate', 0.0007, + 'The base learning rate for model training.') + +flags.DEFINE_float('learning_rate_decay_factor', 0.1, + 'The rate to decay the base learning rate.') + +flags.DEFINE_integer('learning_rate_decay_step', 2000, + 'Decay the base learning rate at a fixed step.') + +flags.DEFINE_float('learning_power', 0.9, + 'The power value used in the poly learning policy.') + +flags.DEFINE_integer('training_number_of_steps', 200000, + 'The number of steps used for training') + +flags.DEFINE_float('momentum', 0.9, 'The momentum value to use') + +flags.DEFINE_integer('train_batch_size', 6, + 'The number of images in each batch during training.') + +flags.DEFINE_integer('train_num_frames_per_video', 3, + 'The number of frames used per video during training') + +flags.DEFINE_float('weight_decay', 0.00004, + 'The value of the weight decay for training.') + +flags.DEFINE_multi_integer('train_crop_size', [465, 465], + 'Image crop size [height, width] during training.') + +flags.DEFINE_float('last_layer_gradient_multiplier', 1.0, + 'The gradient multiplier for last layers, which is used to ' + 'boost the gradient of last layers if the value > 1.') + +flags.DEFINE_boolean('upsample_logits', True, + 'Upsample logits during training.') + +flags.DEFINE_integer('batch_capacity_factor', 16, 'Batch capacity factor.') + +flags.DEFINE_integer('num_readers', 1, 'Number of readers for data provider.') + +flags.DEFINE_integer('batch_num_threads', 1, 'Batch number of threads.') + +flags.DEFINE_integer('prefetch_queue_capacity_factor', 32, + 'Prefetch queue capacity factor.') + +flags.DEFINE_integer('prefetch_queue_num_threads', 1, + 'Prefetch queue number of threads.') + +flags.DEFINE_integer('train_max_neighbors_per_object', 1024, + 'The maximum number of candidates for the nearest ' + 'neighbor query per object after subsampling') + +# Settings for fine-tuning the network. + +flags.DEFINE_string('tf_initial_checkpoint', None, + 'The initial checkpoint in tensorflow format.') + +flags.DEFINE_boolean('initialize_last_layer', False, + 'Initialize the last layer.') + +flags.DEFINE_boolean('last_layers_contain_logits_only', False, + 'Only consider logits as last layers or not.') + +flags.DEFINE_integer('slow_start_step', 0, + 'Training model with small learning rate for few steps.') + +flags.DEFINE_float('slow_start_learning_rate', 1e-4, + 'Learning rate employed during slow start.') + +flags.DEFINE_boolean('fine_tune_batch_norm', False, + 'Fine tune the batch norm parameters or not.') + +flags.DEFINE_float('min_scale_factor', 1., + 'Mininum scale factor for data augmentation.') + +flags.DEFINE_float('max_scale_factor', 1.3, + 'Maximum scale factor for data augmentation.') + +flags.DEFINE_float('scale_factor_step_size', 0, + 'Scale factor step size for data augmentation.') + +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_integer('output_stride', 8, + 'The ratio of input to output spatial resolution.') + +flags.DEFINE_boolean('sample_only_first_frame_for_finetuning', False, + 'Whether to only sample the first frame during ' + 'fine-tuning. This should be False when using lucid data, ' + 'but True when fine-tuning on the first frame only. Only ' + 'has an effect if first_frame_finetuning is True.') + +flags.DEFINE_multi_integer('first_frame_finetuning', [0], + 'Whether to only sample the first frame for ' + 'fine-tuning.') + +# Dataset settings. + +flags.DEFINE_multi_string('dataset', [], 'Name of the segmentation datasets.') + +flags.DEFINE_multi_float('dataset_sampling_probabilities', [], + 'A list of probabilities to sample each of the ' + 'datasets.') + +flags.DEFINE_string('train_split', 'train', + 'Which split of the dataset to be used for training') + +flags.DEFINE_multi_string('dataset_dir', [], 'Where the datasets reside.') + +flags.DEFINE_multi_integer('three_frame_dataset', [0], + 'Whether the dataset has exactly three frames per ' + 'video of which the first is to be used as reference' + ' and the two others are consecutive frames to be ' + 'used as query frames.' + 'Set true for pascal lucid data.') + +flags.DEFINE_boolean('damage_initial_previous_frame_mask', False, + 'Whether to artificially damage the initial previous ' + 'frame mask. Only has an effect if ' + 'also_attend_to_previous_frame is True.') + +flags.DEFINE_float('top_k_percent_pixels', 0.15, 'Float in [0.0, 1.0].' + 'When its value < 1.0, only compute the loss for the top k' + 'percent pixels (e.g., the top 20% pixels). This is useful' + 'for hard pixel mining.') + +flags.DEFINE_integer('hard_example_mining_step', 100000, + 'The training step in which the hard exampling mining ' + 'kicks off. Note that we gradually reduce the mining ' + 'percent to the top_k_percent_pixels. For example, if ' + 'hard_example_mining_step=100K and ' + 'top_k_percent_pixels=0.25, then mining percent will ' + 'gradually reduce from 100% to 25% until 100K steps ' + 'after which we only mine top 25% pixels. Only has an ' + 'effect if top_k_percent_pixels < 1.0') + + +def _build_deeplab(inputs_queue_or_samples, outputs_to_num_classes, + ignore_label): + """Builds a clone of DeepLab. + + Args: + inputs_queue_or_samples: A prefetch queue for images and labels, or + directly a dict of the samples. + outputs_to_num_classes: A map from output type to the number of classes. + For example, for the task of semantic segmentation with 21 semantic + classes, we would have outputs_to_num_classes['semantic'] = 21. + ignore_label: Ignore label. + + Returns: + A map of maps from output_type (e.g., semantic prediction) to a + dictionary of multi-scale logits names to logits. For each output_type, + the dictionary has keys which correspond to the scales and values which + correspond to the logits. For example, if `scales` equals [1.0, 1.5], + then the keys would include 'merged_logits', 'logits_1.00' and + 'logits_1.50'. + + Raises: + ValueError: If classification_loss is not softmax, softmax_with_attention, + or triplet. + """ + if hasattr(inputs_queue_or_samples, 'dequeue'): + samples = inputs_queue_or_samples.dequeue() + else: + samples = inputs_queue_or_samples + train_crop_size = (None if 0 in FLAGS.train_crop_size else + FLAGS.train_crop_size) + + model_options = common.VideoModelOptions( + outputs_to_num_classes=outputs_to_num_classes, + crop_size=train_crop_size, + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + if model_options.classification_loss == 'softmax_with_attention': + clone_batch_size = FLAGS.train_batch_size // FLAGS.num_clones + + # Create summaries of ground truth labels. + for n in range(clone_batch_size): + tf.summary.image( + 'gt_label_%d' % n, + tf.cast(samples[common.LABEL][ + n * FLAGS.train_num_frames_per_video: + (n + 1) * FLAGS.train_num_frames_per_video], + tf.uint8) * 32, max_outputs=FLAGS.train_num_frames_per_video) + + if common.PRECEDING_FRAME_LABEL in samples: + preceding_frame_label = samples[common.PRECEDING_FRAME_LABEL] + init_softmax = [] + for n in range(clone_batch_size): + init_softmax_n = embedding_utils.create_initial_softmax_from_labels( + preceding_frame_label[n, tf.newaxis], + samples[common.LABEL][n * FLAGS.train_num_frames_per_video, + tf.newaxis], + common.parse_decoder_output_stride(), + reduce_labels=True) + init_softmax_n = tf.squeeze(init_softmax_n, axis=0) + init_softmax.append(init_softmax_n) + tf.summary.image('preceding_frame_label', + tf.cast(preceding_frame_label[n, tf.newaxis], + tf.uint8) * 32) + else: + init_softmax = None + + outputs_to_scales_to_logits = ( + model.multi_scale_logits_with_nearest_neighbor_matching( + samples[common.IMAGE], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid, + weight_decay=FLAGS.weight_decay, + is_training=True, + fine_tune_batch_norm=FLAGS.fine_tune_batch_norm, + reference_labels=samples[common.LABEL], + clone_batch_size=FLAGS.train_batch_size // FLAGS.num_clones, + num_frames_per_video=FLAGS.train_num_frames_per_video, + embedding_dimension=FLAGS.embedding_dimension, + max_neighbors_per_object=FLAGS.train_max_neighbors_per_object, + k_nearest_neighbors=FLAGS.k_nearest_neighbors, + use_softmax_feedback=FLAGS.use_softmax_feedback, + initial_softmax_feedback=init_softmax, + embedding_seg_feature_dimension= + FLAGS.embedding_seg_feature_dimension, + embedding_seg_n_layers=FLAGS.embedding_seg_n_layers, + embedding_seg_kernel_size=FLAGS.embedding_seg_kernel_size, + embedding_seg_atrous_rates=FLAGS.embedding_seg_atrous_rates, + normalize_nearest_neighbor_distances= + FLAGS.normalize_nearest_neighbor_distances, + also_attend_to_previous_frame=FLAGS.also_attend_to_previous_frame, + damage_initial_previous_frame_mask= + FLAGS.damage_initial_previous_frame_mask, + use_local_previous_frame_attention= + FLAGS.use_local_previous_frame_attention, + previous_frame_attention_window_size= + FLAGS.previous_frame_attention_window_size, + use_first_frame_matching=FLAGS.use_first_frame_matching + )) + else: + outputs_to_scales_to_logits = model.multi_scale_logits_v2( + samples[common.IMAGE], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid, + weight_decay=FLAGS.weight_decay, + is_training=True, + fine_tune_batch_norm=FLAGS.fine_tune_batch_norm) + + if model_options.classification_loss == 'softmax': + for output, num_classes in six.iteritems(outputs_to_num_classes): + train_utils.add_softmax_cross_entropy_loss_for_each_scale( + outputs_to_scales_to_logits[output], + samples[common.LABEL], + num_classes, + ignore_label, + loss_weight=1.0, + upsample_logits=FLAGS.upsample_logits, + scope=output) + elif model_options.classification_loss == 'triplet': + for output, _ in six.iteritems(outputs_to_num_classes): + train_utils.add_triplet_loss_for_each_scale( + FLAGS.train_batch_size // FLAGS.num_clones, + FLAGS.train_num_frames_per_video, + FLAGS.embedding_dimension, outputs_to_scales_to_logits[output], + samples[common.LABEL], scope=output) + elif model_options.classification_loss == 'softmax_with_attention': + labels = samples[common.LABEL] + batch_size = FLAGS.train_batch_size // FLAGS.num_clones + num_frames_per_video = FLAGS.train_num_frames_per_video + h, w = train_utils.resolve_shape(labels)[1:3] + labels = tf.reshape(labels, tf.stack( + [batch_size, num_frames_per_video, h, w, 1])) + # Strip the reference labels off. + if FLAGS.also_attend_to_previous_frame or FLAGS.use_softmax_feedback: + n_ref_frames = 2 + else: + n_ref_frames = 1 + labels = labels[:, n_ref_frames:] + # Merge batch and time dimensions. + labels = tf.reshape(labels, tf.stack( + [batch_size * (num_frames_per_video - n_ref_frames), h, w, 1])) + + for output, num_classes in six.iteritems(outputs_to_num_classes): + train_utils.add_dynamic_softmax_cross_entropy_loss_for_each_scale( + outputs_to_scales_to_logits[output], + labels, + ignore_label, + loss_weight=1.0, + upsample_logits=FLAGS.upsample_logits, + scope=output, + top_k_percent_pixels=FLAGS.top_k_percent_pixels, + hard_example_mining_step=FLAGS.hard_example_mining_step) + else: + raise ValueError('Only support softmax, softmax_with_attention' + ' or triplet for classification_loss.') + + return outputs_to_scales_to_logits + + +def main(unused_argv): + # Set up deployment (i.e., multi-GPUs and/or multi-replicas). + config = model_deploy.DeploymentConfig( + num_clones=FLAGS.num_clones, + clone_on_cpu=FLAGS.clone_on_cpu, + replica_id=FLAGS.task, + num_replicas=FLAGS.num_replicas, + num_ps_tasks=FLAGS.num_ps_tasks) + + with tf.Graph().as_default(): + with tf.device(config.inputs_device()): + train_crop_size = (None if 0 in FLAGS.train_crop_size else + FLAGS.train_crop_size) + assert FLAGS.dataset + assert len(FLAGS.dataset) == len(FLAGS.dataset_dir) + if len(FLAGS.first_frame_finetuning) == 1: + first_frame_finetuning = (list(FLAGS.first_frame_finetuning) + * len(FLAGS.dataset)) + else: + first_frame_finetuning = FLAGS.first_frame_finetuning + if len(FLAGS.three_frame_dataset) == 1: + three_frame_dataset = (list(FLAGS.three_frame_dataset) + * len(FLAGS.dataset)) + else: + three_frame_dataset = FLAGS.three_frame_dataset + assert len(FLAGS.dataset) == len(first_frame_finetuning) + assert len(FLAGS.dataset) == len(three_frame_dataset) + datasets, samples_list = zip( + *[_get_dataset_and_samples(config, train_crop_size, dataset, + dataset_dir, bool(first_frame_finetuning_), + bool(three_frame_dataset_)) + for dataset, dataset_dir, first_frame_finetuning_, + three_frame_dataset_ in zip(FLAGS.dataset, FLAGS.dataset_dir, + first_frame_finetuning, + three_frame_dataset)]) + # Note that this way of doing things is wasteful since it will evaluate + # all branches but just use one of them. But let's do it anyway for now, + # since it's easy and will probably be fast enough. + dataset = datasets[0] + if len(samples_list) == 1: + samples = samples_list[0] + else: + probabilities = FLAGS.dataset_sampling_probabilities + if probabilities: + assert len(probabilities) == len(samples_list) + else: + # Default to uniform probabilities. + probabilities = [1.0 / len(samples_list) for _ in samples_list] + probabilities = tf.constant(probabilities) + logits = tf.log(probabilities[tf.newaxis]) + rand_idx = tf.squeeze(tf.multinomial(logits, 1, output_dtype=tf.int32), + axis=[0, 1]) + + def wrap(x): + def f(): + return x + return f + + samples = tf.case({tf.equal(rand_idx, idx): wrap(s) + for idx, s in enumerate(samples_list)}, + exclusive=True) + + # Prefetch_queue requires the shape to be known at graph creation time. + # So we only use it if we crop to a fixed size. + if train_crop_size is None: + inputs_queue = samples + else: + inputs_queue = prefetch_queue.prefetch_queue( + samples, + capacity=FLAGS.prefetch_queue_capacity_factor*config.num_clones, + num_threads=FLAGS.prefetch_queue_num_threads) + + # Create the global step on the device storing the variables. + with tf.device(config.variables_device()): + global_step = tf.train.get_or_create_global_step() + + # Define the model and create clones. + model_fn = _build_deeplab + if FLAGS.classification_loss == 'triplet': + embedding_dim = FLAGS.embedding_dimension + output_type_to_dim = {'embedding': embedding_dim} + else: + output_type_to_dim = {common.OUTPUT_TYPE: dataset.num_classes} + model_args = (inputs_queue, output_type_to_dim, dataset.ignore_label) + clones = model_deploy.create_clones(config, model_fn, args=model_args) + + # Gather update_ops from the first clone. These contain, for example, + # the updates for the batch_norm variables created by model_fn. + first_clone_scope = config.clone_scope(0) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) + + # Gather initial summaries. + summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) + + # Add summaries for model variables. + for model_var in tf.contrib.framework.get_model_variables(): + summaries.add(tf.summary.histogram(model_var.op.name, model_var)) + + # Add summaries for losses. + for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): + summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) + + # Build the optimizer based on the device specification. + with tf.device(config.optimizer_device()): + learning_rate = train_utils.get_model_learning_rate( + FLAGS.learning_policy, + FLAGS.base_learning_rate, + FLAGS.learning_rate_decay_step, + FLAGS.learning_rate_decay_factor, + FLAGS.training_number_of_steps, + FLAGS.learning_power, + FLAGS.slow_start_step, + FLAGS.slow_start_learning_rate) + optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum) + summaries.add(tf.summary.scalar('learning_rate', learning_rate)) + + startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps + + with tf.device(config.variables_device()): + total_loss, grads_and_vars = model_deploy.optimize_clones( + clones, optimizer) + total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.') + summaries.add(tf.summary.scalar('total_loss', total_loss)) + + # Modify the gradients for biases and last layer variables. + last_layers = model.get_extra_layer_scopes( + FLAGS.last_layers_contain_logits_only) + grad_mult = train_utils.get_model_gradient_multipliers( + last_layers, FLAGS.last_layer_gradient_multiplier) + if grad_mult: + grads_and_vars = slim.learning.multiply_gradients(grads_and_vars, + grad_mult) + + with tf.name_scope('grad_clipping'): + grads_and_vars = slim.learning.clip_gradient_norms(grads_and_vars, 5.0) + + # Create histogram summaries for the gradients. + # We have too many summaries for mldash, so disable this one for now. + # for grad, var in grads_and_vars: + # summaries.add(tf.summary.histogram( + # var.name.replace(':0', '_0') + '/gradient', grad)) + + # Create gradient update op. + grad_updates = optimizer.apply_gradients(grads_and_vars, + global_step=global_step) + update_ops.append(grad_updates) + update_op = tf.group(*update_ops) + with tf.control_dependencies([update_op]): + train_tensor = tf.identity(total_loss, name='train_op') + + # Add the summaries from the first clone. These contain the summaries + # created by model_fn and either optimize_clones() or _gather_clone_loss(). + summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, + first_clone_scope)) + + # Merge all summaries together. + summary_op = tf.summary.merge(list(summaries)) + + # Soft placement allows placing on CPU ops without GPU implementation. + session_config = tf.ConfigProto(allow_soft_placement=True, + log_device_placement=False) + + # Start the training. + slim.learning.train( + train_tensor, + logdir=FLAGS.train_logdir, + log_every_n_steps=FLAGS.log_steps, + master=FLAGS.master, + number_of_steps=FLAGS.training_number_of_steps, + is_chief=(FLAGS.task == 0), + session_config=session_config, + startup_delay_steps=startup_delay_steps, + init_fn=train_utils.get_model_init_fn(FLAGS.train_logdir, + FLAGS.tf_initial_checkpoint, + FLAGS.initialize_last_layer, + last_layers, + ignore_missing_vars=True), + summary_op=summary_op, + save_summaries_secs=FLAGS.save_summaries_secs, + save_interval_secs=FLAGS.save_interval_secs) + + +def _get_dataset_and_samples(config, train_crop_size, dataset_name, + dataset_dir, first_frame_finetuning, + three_frame_dataset): + """Creates dataset object and samples dict of tensor. + + Args: + config: A DeploymentConfig. + train_crop_size: Integer, the crop size used for training. + dataset_name: String, the name of the dataset. + dataset_dir: String, the directory of the dataset. + first_frame_finetuning: Boolean, whether the used dataset is a dataset + for first frame fine-tuning. + three_frame_dataset: Boolean, whether the dataset has exactly three frames + per video of which the first is to be used as reference and the two + others are consecutive frames to be used as query frames. + + Returns: + dataset: An instance of slim Dataset. + samples: A dictionary of tensors for semantic segmentation. + """ + + # Split the batch across GPUs. + assert FLAGS.train_batch_size % config.num_clones == 0, ( + 'Training batch size not divisble by number of clones (GPUs).') + + clone_batch_size = FLAGS.train_batch_size / config.num_clones + + if first_frame_finetuning: + train_split = 'val' + else: + train_split = FLAGS.train_split + + data_type = 'tf_sequence_example' + # Get dataset-dependent information. + dataset = video_dataset.get_dataset( + dataset_name, + train_split, + dataset_dir=dataset_dir, + data_type=data_type) + + tf.gfile.MakeDirs(FLAGS.train_logdir) + tf.logging.info('Training on %s set', train_split) + + samples = video_input_generator.get( + dataset, + FLAGS.train_num_frames_per_video, + train_crop_size, + clone_batch_size, + num_readers=FLAGS.num_readers, + num_threads=FLAGS.batch_num_threads, + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + min_scale_factor=FLAGS.min_scale_factor, + max_scale_factor=FLAGS.max_scale_factor, + scale_factor_step_size=FLAGS.scale_factor_step_size, + dataset_split=FLAGS.train_split, + is_training=True, + model_variant=FLAGS.model_variant, + batch_capacity_factor=FLAGS.batch_capacity_factor, + decoder_output_stride=common.parse_decoder_output_stride(), + first_frame_finetuning=first_frame_finetuning, + sample_only_first_frame_for_finetuning= + FLAGS.sample_only_first_frame_for_finetuning, + sample_adjacent_and_consistent_query_frames= + FLAGS.sample_adjacent_and_consistent_query_frames or + FLAGS.use_softmax_feedback, + remap_labels_to_reference_frame=True, + three_frame_dataset=three_frame_dataset, + add_prev_frame_label=not FLAGS.also_attend_to_previous_frame + ) + return dataset, samples + + +if __name__ == '__main__': + flags.mark_flag_as_required('train_logdir') + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/models/research/feelvos/train.sh b/models/research/feelvos/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..63b7ea19d4c53dea932322c3885abb9a95237e0c --- /dev/null +++ b/models/research/feelvos/train.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# This script is used to run local training on DAVIS 2017. Users could also +# modify from this script for their use case. See eval.sh for an example of +# local inference with a pre-trained model. +# +# Note that this script runs local training with a single GPU and a smaller crop +# and batch size, while in the paper, we trained our models with 16 GPUS with +# --num_clones=2, --train_batch_size=6, --num_replicas=8, +# --training_number_of_steps=200000, --train_crop_size=465, +# --train_crop_size=465. +# +# Usage: +# # From the tensorflow/models/research/feelvos directory. +# sh ./train.sh +# +# + +# Exit immediately if a command exits with a non-zero status. +set -e + +# Move one-level up to tensorflow/models/research directory. +cd .. + +# Update PYTHONPATH. +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim:`pwd`/feelvos + +# Set up the working environment. +CURRENT_DIR=$(pwd) +WORK_DIR="${CURRENT_DIR}/feelvos" + +# Set up the working directories. +DATASET_DIR="datasets" +DAVIS_FOLDER="davis17" +DAVIS_DATASET="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/tfrecord" +EXP_FOLDER="exp/train" +TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/${EXP_FOLDER}/train" +mkdir -p ${TRAIN_LOGDIR} + +# Go to datasets folder and download and convert the DAVIS 2017 dataset. +DATASET_DIR="datasets" +cd "${WORK_DIR}/${DATASET_DIR}" +sh download_and_convert_davis17.sh + +# Go to models folder and download and unpack the COCO pre-trained model. +MODELS_DIR="models" +mkdir -p "${WORK_DIR}/${MODELS_DIR}" +cd "${WORK_DIR}/${MODELS_DIR}" +if [ ! -d "xception_65_coco_pretrained" ]; then + wget http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz + tar -xvf xception_65_coco_pretrained_2018_10_02.tar.gz + rm xception_65_coco_pretrained_2018_10_02.tar.gz +fi +INIT_CKPT="${WORK_DIR}/${MODELS_DIR}/xception_65_coco_pretrained/x65-b2u1s2p-d48-2-3x256-sc-cr300k_init.ckpt" + +# Go back to orignal directory. +cd "${CURRENT_DIR}" + +python "${WORK_DIR}"/train.py \ + --dataset=davis_2017 \ + --dataset_dir="${DAVIS_DATASET}" \ + --train_logdir="${TRAIN_LOGDIR}" \ + --tf_initial_checkpoint="${INIT_CKPT}" \ + --logtostderr \ + --atrous_rates=6 \ + --atrous_rates=12 \ + --atrous_rates=18 \ + --decoder_output_stride=4 \ + --model_variant=xception_65 \ + --multi_grid=1 \ + --multi_grid=1 \ + --multi_grid=1 \ + --output_stride=16 \ + --weight_decay=0.00004 \ + --num_clones=1 \ + --train_batch_size=1 \ + --train_crop_size=300 \ + --train_crop_size=300 diff --git a/models/research/feelvos/utils/__init__.py b/models/research/feelvos/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1373443d0ff84fd90714e41dade400ab41a22c --- /dev/null +++ b/models/research/feelvos/utils/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/models/research/feelvos/utils/embedding_utils.py b/models/research/feelvos/utils/embedding_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..233c70d9327d08251537c58821dd8405b42f0fe7 --- /dev/null +++ b/models/research/feelvos/utils/embedding_utils.py @@ -0,0 +1,1082 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for the instance embedding for segmentation.""" + +import numpy as np +import tensorflow as tf +from deeplab import model +from deeplab.core import preprocess_utils +from feelvos.utils import mask_damaging + +slim = tf.contrib.slim +resolve_shape = preprocess_utils.resolve_shape +WRONG_LABEL_PADDING_DISTANCE = 1e20 + +# With correlation_cost local matching will be much faster. But we provide a +# slow fallback for convenience. +USE_CORRELATION_COST = False +if USE_CORRELATION_COST: + # pylint: disable=g-import-not-at-top + from correlation_cost.python.ops import correlation_cost_op + + +def pairwise_distances(x, y): + """Computes pairwise squared l2 distances between tensors x and y. + + Args: + x: Tensor of shape [n, feature_dim]. + y: Tensor of shape [m, feature_dim]. + + Returns: + Float32 distances tensor of shape [n, m]. + """ + # d[i,j] = (x[i] - y[j]) * (x[i] - y[j])' + # = sum(x[i]^2, 1) + sum(y[j]^2, 1) - 2 * x[i] * y[j]' + xs = tf.reduce_sum(x * x, axis=1)[:, tf.newaxis] + ys = tf.reduce_sum(y * y, axis=1)[tf.newaxis, :] + d = xs + ys - 2 * tf.matmul(x, y, transpose_b=True) + return d + + +def pairwise_distances2(x, y): + """Computes pairwise squared l2 distances between tensors x and y. + + Naive implementation, high memory use. Could be useful to test the more + efficient implementation. + + Args: + x: Tensor of shape [n, feature_dim]. + y: Tensor of shape [m, feature_dim]. + + Returns: + distances of shape [n, m]. + """ + return tf.reduce_sum(tf.squared_difference( + x[:, tf.newaxis], y[tf.newaxis, :]), axis=-1) + + +def cross_correlate(x, y, max_distance=9): + """Efficiently computes the cross correlation of x and y. + + Optimized implementation using correlation_cost. + Note that we do not normalize by the feature dimension. + + Args: + x: Float32 tensor of shape [height, width, feature_dim]. + y: Float32 tensor of shape [height, width, feature_dim]. + max_distance: Integer, the maximum distance in pixel coordinates + per dimension which is considered to be in the search window. + + Returns: + Float32 tensor of shape [height, width, (2 * max_distance + 1) ** 2]. + """ + with tf.name_scope('cross_correlation'): + corr = correlation_cost_op.correlation_cost( + x[tf.newaxis], y[tf.newaxis], kernel_size=1, + max_displacement=max_distance, stride_1=1, stride_2=1, + pad=max_distance) + corr = tf.squeeze(corr, axis=0) + # This correlation implementation takes the mean over the feature_dim, + # but we want sum here, so multiply by feature_dim. + feature_dim = resolve_shape(x)[-1] + corr *= feature_dim + return corr + + +def local_pairwise_distances(x, y, max_distance=9): + """Computes pairwise squared l2 distances using a local search window. + + Optimized implementation using correlation_cost. + + Args: + x: Float32 tensor of shape [height, width, feature_dim]. + y: Float32 tensor of shape [height, width, feature_dim]. + max_distance: Integer, the maximum distance in pixel coordinates + per dimension which is considered to be in the search window. + + Returns: + Float32 distances tensor of shape + [height, width, (2 * max_distance + 1) ** 2]. + """ + with tf.name_scope('local_pairwise_distances'): + # d[i,j] = (x[i] - y[j]) * (x[i] - y[j])' + # = sum(x[i]^2, -1) + sum(y[j]^2, -1) - 2 * x[i] * y[j]' + corr = cross_correlate(x, y, max_distance=max_distance) + xs = tf.reduce_sum(x * x, axis=2)[..., tf.newaxis] + ys = tf.reduce_sum(y * y, axis=2)[..., tf.newaxis] + ones_ys = tf.ones_like(ys) + ys = cross_correlate(ones_ys, ys, max_distance=max_distance) + d = xs + ys - 2 * corr + # Boundary should be set to Inf. + boundary = tf.equal( + cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0) + d = tf.where(boundary, tf.fill(tf.shape(d), tf.constant(np.float('inf'))), + d) + return d + + +def local_pairwise_distances2(x, y, max_distance=9): + """Computes pairwise squared l2 distances using a local search window. + + Naive implementation using map_fn. + Used as a slow fallback for when correlation_cost is not available. + + Args: + x: Float32 tensor of shape [height, width, feature_dim]. + y: Float32 tensor of shape [height, width, feature_dim]. + max_distance: Integer, the maximum distance in pixel coordinates + per dimension which is considered to be in the search window. + + Returns: + Float32 distances tensor of shape + [height, width, (2 * max_distance + 1) ** 2]. + """ + with tf.name_scope('local_pairwise_distances2'): + padding_val = 1e20 + padded_y = tf.pad(y, [[max_distance, max_distance], + [max_distance, max_distance], [0, 0]], + constant_values=padding_val) + height, width, _ = resolve_shape(x) + dists = [] + for y_start in range(2 * max_distance + 1): + y_end = y_start + height + y_slice = padded_y[y_start:y_end] + for x_start in range(2 * max_distance + 1): + x_end = x_start + width + offset_y = y_slice[:, x_start:x_end] + dist = tf.reduce_sum(tf.squared_difference(x, offset_y), axis=2) + dists.append(dist) + dists = tf.stack(dists, axis=2) + return dists + + +def majority_vote(labels): + """Performs a label majority vote along axis 1. + + Second try, hopefully this time more efficient. + We assume that the labels are contiguous starting from 0. + It will also work for non-contiguous labels, but be inefficient. + + Args: + labels: Int tensor of shape [n, k] + + Returns: + The majority of labels along axis 1 + """ + max_label = tf.reduce_max(labels) + one_hot = tf.one_hot(labels, depth=max_label + 1) + summed = tf.reduce_sum(one_hot, axis=1) + majority = tf.argmax(summed, axis=1) + return majority + + +def assign_labels_by_nearest_neighbors(reference_embeddings, query_embeddings, + reference_labels, k=1): + """Segments by nearest neighbor query wrt the reference frame. + + Args: + reference_embeddings: Tensor of shape [height, width, embedding_dim], + the embedding vectors for the reference frame + query_embeddings: Tensor of shape [n_query_images, height, width, + embedding_dim], the embedding vectors for the query frames + reference_labels: Tensor of shape [height, width, 1], the class labels of + the reference frame + k: Integer, the number of nearest neighbors to use + + Returns: + The labels of the nearest neighbors as [n_query_frames, height, width, 1] + tensor + + Raises: + ValueError: If k < 1. + """ + if k < 1: + raise ValueError('k must be at least 1') + dists = flattened_pairwise_distances(reference_embeddings, query_embeddings) + if k == 1: + nn_indices = tf.argmin(dists, axis=1)[..., tf.newaxis] + else: + _, nn_indices = tf.nn.top_k(-dists, k, sorted=False) + reference_labels = tf.reshape(reference_labels, [-1]) + nn_labels = tf.gather(reference_labels, nn_indices) + if k == 1: + nn_labels = tf.squeeze(nn_labels, axis=1) + else: + nn_labels = majority_vote(nn_labels) + height = tf.shape(reference_embeddings)[0] + width = tf.shape(reference_embeddings)[1] + n_query_frames = query_embeddings.shape[0] + nn_labels = tf.reshape(nn_labels, [n_query_frames, height, width, 1]) + return nn_labels + + +def flattened_pairwise_distances(reference_embeddings, query_embeddings): + """Calculates flattened tensor of pairwise distances between ref and query. + + Args: + reference_embeddings: Tensor of shape [..., embedding_dim], + the embedding vectors for the reference frame + query_embeddings: Tensor of shape [n_query_images, height, width, + embedding_dim], the embedding vectors for the query frames. + + Returns: + A distance tensor of shape [reference_embeddings.size / embedding_dim, + query_embeddings.size / embedding_dim] + """ + embedding_dim = resolve_shape(query_embeddings)[-1] + reference_embeddings = tf.reshape(reference_embeddings, [-1, embedding_dim]) + first_dim = -1 + query_embeddings = tf.reshape(query_embeddings, [first_dim, embedding_dim]) + dists = pairwise_distances(query_embeddings, reference_embeddings) + return dists + + +def nearest_neighbor_features_per_object( + reference_embeddings, query_embeddings, reference_labels, + max_neighbors_per_object, k_nearest_neighbors, gt_ids=None, n_chunks=100): + """Calculates the distance to the nearest neighbor per object. + + For every pixel of query_embeddings calculate the distance to the + nearest neighbor in the (possibly subsampled) reference_embeddings per object. + + Args: + reference_embeddings: Tensor of shape [height, width, embedding_dim], + the embedding vectors for the reference frame. + query_embeddings: Tensor of shape [n_query_images, height, width, + embedding_dim], the embedding vectors for the query frames. + reference_labels: Tensor of shape [height, width, 1], the class labels of + the reference frame. + max_neighbors_per_object: Integer, the maximum number of candidates + for the nearest neighbor query per object after subsampling, + or 0 for no subsampling. + k_nearest_neighbors: Integer, the number of nearest neighbors to use. + gt_ids: Int tensor of shape [n_objs] of the sorted unique ground truth + ids in the first frame. If None, it will be derived from + reference_labels. + n_chunks: Integer, the number of chunks to use to save memory + (set to 1 for no chunking). + + Returns: + nn_features: A float32 tensor of nearest neighbor features of shape + [n_query_images, height, width, n_objects, feature_dim]. + gt_ids: An int32 tensor of the unique sorted object ids present + in the reference labels. + """ + with tf.name_scope('nn_features_per_object'): + reference_labels_flat = tf.reshape(reference_labels, [-1]) + if gt_ids is None: + ref_obj_ids, _ = tf.unique(reference_labels_flat) + ref_obj_ids = tf.contrib.framework.sort(ref_obj_ids) + gt_ids = ref_obj_ids + embedding_dim = resolve_shape(reference_embeddings)[-1] + reference_embeddings_flat = tf.reshape(reference_embeddings, + [-1, embedding_dim]) + + reference_embeddings_flat, reference_labels_flat = ( + subsample_reference_embeddings_and_labels(reference_embeddings_flat, + reference_labels_flat, + gt_ids, + max_neighbors_per_object)) + shape = resolve_shape(query_embeddings) + query_embeddings_flat = tf.reshape(query_embeddings, [-1, embedding_dim]) + nn_features = _nearest_neighbor_features_per_object_in_chunks( + reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, + gt_ids, k_nearest_neighbors, n_chunks) + nn_features_dim = resolve_shape(nn_features)[-1] + nn_features_reshaped = tf.reshape(nn_features, + tf.stack(shape[:3] + [tf.size(gt_ids), + nn_features_dim])) + return nn_features_reshaped, gt_ids + + +def _nearest_neighbor_features_per_object_in_chunks( + reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, + ref_obj_ids, k_nearest_neighbors, n_chunks): + """Calculates the nearest neighbor features per object in chunks to save mem. + + Uses chunking to bound the memory use. + + Args: + reference_embeddings_flat: Tensor of shape [n, embedding_dim], + the embedding vectors for the reference frame. + query_embeddings_flat: Tensor of shape [m, embedding_dim], the embedding + vectors for the query frames. + reference_labels_flat: Tensor of shape [n], the class labels of the + reference frame. + ref_obj_ids: int tensor of unique object ids in the reference labels. + k_nearest_neighbors: Integer, the number of nearest neighbors to use. + n_chunks: Integer, the number of chunks to use to save memory + (set to 1 for no chunking). + + Returns: + nn_features: A float32 tensor of nearest neighbor features of shape + [m, n_objects, feature_dim]. + """ + chunk_size = tf.cast(tf.ceil(tf.cast(tf.shape(query_embeddings_flat)[0], + tf.float32) / n_chunks), tf.int32) + wrong_label_mask = tf.not_equal(reference_labels_flat, + ref_obj_ids[:, tf.newaxis]) + all_features = [] + for n in range(n_chunks): + if n_chunks == 1: + query_embeddings_flat_chunk = query_embeddings_flat + else: + chunk_start = n * chunk_size + chunk_end = (n + 1) * chunk_size + query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end] + # Use control dependencies to make sure that the chunks are not processed + # in parallel which would prevent any peak memory savings. + with tf.control_dependencies(all_features): + features = _nn_features_per_object_for_chunk( + reference_embeddings_flat, query_embeddings_flat_chunk, + wrong_label_mask, k_nearest_neighbors + ) + all_features.append(features) + if n_chunks == 1: + nn_features = all_features[0] + else: + nn_features = tf.concat(all_features, axis=0) + return nn_features + + +def _nn_features_per_object_for_chunk( + reference_embeddings, query_embeddings, wrong_label_mask, + k_nearest_neighbors): + """Extracts features for each object using nearest neighbor attention. + + Args: + reference_embeddings: Tensor of shape [n_chunk, embedding_dim], + the embedding vectors for the reference frame. + query_embeddings: Tensor of shape [m_chunk, embedding_dim], the embedding + vectors for the query frames. + wrong_label_mask: + k_nearest_neighbors: Integer, the number of nearest neighbors to use. + + Returns: + nn_features: A float32 tensor of nearest neighbor features of shape + [m_chunk, n_objects, feature_dim]. + """ + reference_embeddings_key = reference_embeddings + query_embeddings_key = query_embeddings + dists = flattened_pairwise_distances(reference_embeddings_key, + query_embeddings_key) + dists = (dists[:, tf.newaxis, :] + + tf.cast(wrong_label_mask[tf.newaxis, :, :], tf.float32) * + WRONG_LABEL_PADDING_DISTANCE) + if k_nearest_neighbors == 1: + features = tf.reduce_min(dists, axis=2, keepdims=True) + else: + # Find the closest k and combine them according to attention_feature_type + dists, _ = tf.nn.top_k(-dists, k=k_nearest_neighbors) + dists = -dists + # If not enough real neighbors were found, pad with the farthest real + # neighbor. + valid_mask = tf.less(dists, WRONG_LABEL_PADDING_DISTANCE) + masked_dists = dists * tf.cast(valid_mask, tf.float32) + pad_dist = tf.tile(tf.reduce_max(masked_dists, axis=2)[..., tf.newaxis], + multiples=[1, 1, k_nearest_neighbors]) + dists = tf.where(valid_mask, dists, pad_dist) + # take mean of distances + features = tf.reduce_mean(dists, axis=2, keepdims=True) + return features + + +def create_embedding_segmentation_features(features, feature_dimension, + n_layers, kernel_size, reuse, + atrous_rates=None): + """Extracts features which can be used to estimate the final segmentation. + + Args: + features: input features of shape [batch, height, width, features] + feature_dimension: Integer, the dimensionality used in the segmentation + head layers. + n_layers: Integer, the number of layers in the segmentation head. + kernel_size: Integer, the kernel size used in the segmentation head. + reuse: reuse mode for the variable_scope. + atrous_rates: List of integers of length n_layers, the atrous rates to use. + + Returns: + Features to be used to estimate the segmentation labels of shape + [batch, height, width, embedding_seg_feat_dim]. + """ + if atrous_rates is None or not atrous_rates: + atrous_rates = [1 for _ in range(n_layers)] + assert len(atrous_rates) == n_layers + with tf.variable_scope('embedding_seg', reuse=reuse): + for n in range(n_layers): + features = model.split_separable_conv2d( + features, feature_dimension, kernel_size=kernel_size, + rate=atrous_rates[n], scope='split_separable_conv2d_{}'.format(n)) + return features + + +def add_image_summaries(images, nn_features, logits, batch_size, + prev_frame_nn_features=None): + """Adds image summaries of input images, attention features and logits. + + Args: + images: Image tensor of shape [batch, height, width, channels]. + nn_features: Nearest neighbor attention features of shape + [batch_size, height, width, n_objects, 1]. + logits: Float32 tensor of logits. + batch_size: Integer, the number of videos per clone per mini-batch. + prev_frame_nn_features: Nearest neighbor attention features wrt. the + last frame of shape [batch_size, height, width, n_objects, 1]. + Can be None. + """ + # Separate reference and query images. + reshaped_images = tf.reshape(images, tf.stack( + [batch_size, -1] + resolve_shape(images)[1:])) + reference_images = reshaped_images[:, 0] + query_images = reshaped_images[:, 1:] + query_images_reshaped = tf.reshape(query_images, tf.stack( + [-1] + resolve_shape(images)[1:])) + tf.summary.image('ref_images', reference_images, max_outputs=batch_size) + tf.summary.image('query_images', query_images_reshaped, max_outputs=10) + predictions = tf.cast( + tf.argmax(logits, axis=-1), tf.uint8)[..., tf.newaxis] + # Scale up so that we can actually see something. + tf.summary.image('predictions', predictions * 32, max_outputs=10) + # We currently only show the first dimension of the features for background + # and the first foreground object. + tf.summary.image('nn_fg_features', nn_features[..., 0:1, 0], + max_outputs=batch_size) + if prev_frame_nn_features is not None: + tf.summary.image('nn_fg_features_prev', prev_frame_nn_features[..., 0:1, 0], + max_outputs=batch_size) + tf.summary.image('nn_bg_features', nn_features[..., 1:2, 0], + max_outputs=batch_size) + if prev_frame_nn_features is not None: + tf.summary.image('nn_bg_features_prev', + prev_frame_nn_features[..., 1:2, 0], + max_outputs=batch_size) + + +def get_embeddings(images, model_options, embedding_dimension): + """Extracts embedding vectors for images. Should only be used for inference. + + Args: + images: A tensor of shape [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + embedding_dimension: Integer, the dimension of the embedding. + + Returns: + embeddings: A tensor of shape [batch, height, width, embedding_dimension]. + """ + features, end_points = model.extract_features( + images, + model_options, + is_training=False) + + if model_options.decoder_output_stride is not None: + decoder_output_stride = min(model_options.decoder_output_stride) + if model_options.crop_size is None: + height = tf.shape(images)[1] + width = tf.shape(images)[2] + else: + height, width = model_options.crop_size + features = model.refine_by_decoder( + features, + end_points, + crop_size=[height, width], + decoder_output_stride=[decoder_output_stride], + decoder_use_separable_conv=model_options.decoder_use_separable_conv, + model_variant=model_options.model_variant, + is_training=False) + + with tf.variable_scope('embedding'): + embeddings = split_separable_conv2d_with_identity_initializer( + features, embedding_dimension, scope='split_separable_conv2d') + return embeddings + + +def get_logits_with_matching(images, + model_options, + weight_decay=0.0001, + reuse=None, + is_training=False, + fine_tune_batch_norm=False, + reference_labels=None, + batch_size=None, + num_frames_per_video=None, + embedding_dimension=None, + max_neighbors_per_object=0, + k_nearest_neighbors=1, + use_softmax_feedback=True, + initial_softmax_feedback=None, + embedding_seg_feature_dimension=256, + embedding_seg_n_layers=4, + embedding_seg_kernel_size=7, + embedding_seg_atrous_rates=None, + normalize_nearest_neighbor_distances=True, + also_attend_to_previous_frame=True, + damage_initial_previous_frame_mask=False, + use_local_previous_frame_attention=True, + previous_frame_attention_window_size=15, + use_first_frame_matching=True, + also_return_embeddings=False, + ref_embeddings=None): + """Gets the logits by atrous/image spatial pyramid pooling using attention. + + Args: + images: A tensor of size [batch, height, width, channels]. + model_options: A ModelOptions instance to configure models. + weight_decay: The weight decay for model variables. + reuse: Reuse the model variables or not. + is_training: Is training or not. + fine_tune_batch_norm: Fine-tune the batch norm parameters or not. + reference_labels: The segmentation labels of the reference frame on which + attention is applied. + batch_size: Integer, the number of videos on a batch + num_frames_per_video: Integer, the number of frames per video + embedding_dimension: Integer, the dimension of the embedding + max_neighbors_per_object: Integer, the maximum number of candidates + for the nearest neighbor query per object after subsampling. + Can be 0 for no subsampling. + k_nearest_neighbors: Integer, the number of nearest neighbors to use. + use_softmax_feedback: Boolean, whether to give the softmax predictions of + the last frame as additional input to the segmentation head. + initial_softmax_feedback: List of Float32 tensors, or None. Can be used to + initialize the softmax predictions used for the feedback loop. + Only has an effect if use_softmax_feedback is True. + embedding_seg_feature_dimension: Integer, the dimensionality used in the + segmentation head layers. + embedding_seg_n_layers: Integer, the number of layers in the segmentation + head. + embedding_seg_kernel_size: Integer, the kernel size used in the + segmentation head. + embedding_seg_atrous_rates: List of integers of length + embedding_seg_n_layers, the atrous rates to use for the segmentation head. + normalize_nearest_neighbor_distances: Boolean, whether to normalize the + nearest neighbor distances to [0,1] using sigmoid, scale and shift. + also_attend_to_previous_frame: Boolean, whether to also use nearest + neighbor attention with respect to the previous frame. + damage_initial_previous_frame_mask: Boolean, whether to artificially damage + the initial previous frame mask. Only has an effect if + also_attend_to_previous_frame is True. + use_local_previous_frame_attention: Boolean, whether to restrict the + previous frame attention to a local search window. + Only has an effect, if also_attend_to_previous_frame is True. + previous_frame_attention_window_size: Integer, the window size used for + local previous frame attention, if use_local_previous_frame_attention + is True. + use_first_frame_matching: Boolean, whether to extract features by matching + to the reference frame. This should always be true except for ablation + experiments. + also_return_embeddings: Boolean, whether to return the embeddings as well. + ref_embeddings: Tuple of + (first_frame_embeddings, previous_frame_embeddings), + each of shape [batch, height, width, embedding_dimension], or None. + Returns: + outputs_to_logits: A map from output_type to logits. + If also_return_embeddings is True, it will also return an embeddings + tensor of shape [batch, height, width, embedding_dimension]. + """ + features, end_points = model.extract_features( + images, + model_options, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm) + + if model_options.decoder_output_stride: + decoder_output_stride = min(model_options.decoder_output_stride) + if model_options.crop_size is None: + height = tf.shape(images)[1] + width = tf.shape(images)[2] + else: + height, width = model_options.crop_size + decoder_height = model.scale_dimension(height, 1.0 / decoder_output_stride) + decoder_width = model.scale_dimension(width, 1.0 / decoder_output_stride) + features = model.refine_by_decoder( + features, + end_points, + crop_size=[height, width], + decoder_output_stride=[decoder_output_stride], + decoder_use_separable_conv=model_options.decoder_use_separable_conv, + model_variant=model_options.model_variant, + weight_decay=weight_decay, + reuse=reuse, + is_training=is_training, + fine_tune_batch_norm=fine_tune_batch_norm) + + with tf.variable_scope('embedding', reuse=reuse): + embeddings = split_separable_conv2d_with_identity_initializer( + features, embedding_dimension, scope='split_separable_conv2d') + embeddings = tf.identity(embeddings, name='embeddings') + scaled_reference_labels = tf.image.resize_nearest_neighbor( + reference_labels, + resolve_shape(embeddings, 4)[1:3], + align_corners=True) + h, w = decoder_height, decoder_width + if num_frames_per_video is None: + num_frames_per_video = tf.size(embeddings) // ( + batch_size * h * w * embedding_dimension) + new_labels_shape = tf.stack([batch_size, -1, h, w, 1]) + reshaped_reference_labels = tf.reshape(scaled_reference_labels, + new_labels_shape) + new_embeddings_shape = tf.stack([batch_size, + num_frames_per_video, h, w, + embedding_dimension]) + reshaped_embeddings = tf.reshape(embeddings, new_embeddings_shape) + all_nn_features = [] + all_ref_obj_ids = [] + # To keep things simple, we do all this separate for each sequence for now. + for n in range(batch_size): + embedding = reshaped_embeddings[n] + if ref_embeddings is None: + n_chunks = 100 + reference_embedding = embedding[0] + if also_attend_to_previous_frame or use_softmax_feedback: + queries_embedding = embedding[2:] + else: + queries_embedding = embedding[1:] + else: + if USE_CORRELATION_COST: + n_chunks = 20 + else: + n_chunks = 500 + reference_embedding = ref_embeddings[0][n] + queries_embedding = embedding + reference_labels = reshaped_reference_labels[n][0] + nn_features_n, ref_obj_ids = nearest_neighbor_features_per_object( + reference_embedding, queries_embedding, reference_labels, + max_neighbors_per_object, k_nearest_neighbors, n_chunks=n_chunks) + if normalize_nearest_neighbor_distances: + nn_features_n = (tf.nn.sigmoid(nn_features_n) - 0.5) * 2 + all_nn_features.append(nn_features_n) + all_ref_obj_ids.append(ref_obj_ids) + + feat_dim = resolve_shape(features)[-1] + features = tf.reshape(features, tf.stack( + [batch_size, num_frames_per_video, h, w, feat_dim])) + if ref_embeddings is None: + # Strip the features for the reference frame. + if also_attend_to_previous_frame or use_softmax_feedback: + features = features[:, 2:] + else: + features = features[:, 1:] + + # To keep things simple, we do all this separate for each sequence for now. + outputs_to_logits = {output: [] for + output in model_options.outputs_to_num_classes} + for n in range(batch_size): + features_n = features[n] + nn_features_n = all_nn_features[n] + nn_features_n_tr = tf.transpose(nn_features_n, [3, 0, 1, 2, 4]) + n_objs = tf.shape(nn_features_n_tr)[0] + # Repeat features for every object. + features_n_tiled = tf.tile(features_n[tf.newaxis], + multiples=[n_objs, 1, 1, 1, 1]) + prev_frame_labels = None + if also_attend_to_previous_frame: + prev_frame_labels = reshaped_reference_labels[n, 1] + if is_training and damage_initial_previous_frame_mask: + # Damage the previous frame masks. + prev_frame_labels = mask_damaging.damage_masks(prev_frame_labels, + dilate=False) + tf.summary.image('prev_frame_labels', + tf.cast(prev_frame_labels[tf.newaxis], + tf.uint8) * 32) + initial_softmax_feedback_n = create_initial_softmax_from_labels( + prev_frame_labels, reshaped_reference_labels[n][0], + decoder_output_stride=None, reduce_labels=True) + elif initial_softmax_feedback is not None: + initial_softmax_feedback_n = initial_softmax_feedback[n] + else: + initial_softmax_feedback_n = None + if initial_softmax_feedback_n is None: + last_softmax = tf.zeros((n_objs, h, w, 1), dtype=tf.float32) + else: + last_softmax = tf.transpose(initial_softmax_feedback_n, [2, 0, 1])[ + ..., tf.newaxis] + assert len(model_options.outputs_to_num_classes) == 1 + output = model_options.outputs_to_num_classes.keys()[0] + logits = [] + n_ref_frames = 1 + prev_frame_nn_features_n = None + if also_attend_to_previous_frame or use_softmax_feedback: + n_ref_frames += 1 + if ref_embeddings is not None: + n_ref_frames = 0 + for t in range(num_frames_per_video - n_ref_frames): + to_concat = [features_n_tiled[:, t]] + if use_first_frame_matching: + to_concat.append(nn_features_n_tr[:, t]) + if use_softmax_feedback: + to_concat.append(last_softmax) + if also_attend_to_previous_frame: + assert normalize_nearest_neighbor_distances, ( + 'previous frame attention currently only works when normalized ' + 'distances are used') + embedding = reshaped_embeddings[n] + if ref_embeddings is None: + last_frame_embedding = embedding[t + 1] + query_embeddings = embedding[t + 2, tf.newaxis] + else: + last_frame_embedding = ref_embeddings[1][0] + query_embeddings = embedding + if use_local_previous_frame_attention: + assert query_embeddings.shape[0] == 1 + prev_frame_nn_features_n = ( + local_previous_frame_nearest_neighbor_features_per_object( + last_frame_embedding, + query_embeddings[0], + prev_frame_labels, + all_ref_obj_ids[n], + max_distance=previous_frame_attention_window_size) + ) + else: + prev_frame_nn_features_n, _ = ( + nearest_neighbor_features_per_object( + last_frame_embedding, query_embeddings, prev_frame_labels, + max_neighbors_per_object, k_nearest_neighbors, + gt_ids=all_ref_obj_ids[n])) + prev_frame_nn_features_n = (tf.nn.sigmoid( + prev_frame_nn_features_n) - 0.5) * 2 + prev_frame_nn_features_n_sq = tf.squeeze(prev_frame_nn_features_n, + axis=0) + prev_frame_nn_features_n_tr = tf.transpose( + prev_frame_nn_features_n_sq, [2, 0, 1, 3]) + to_concat.append(prev_frame_nn_features_n_tr) + features_n_concat_t = tf.concat(to_concat, axis=-1) + embedding_seg_features_n_t = ( + create_embedding_segmentation_features( + features_n_concat_t, embedding_seg_feature_dimension, + embedding_seg_n_layers, embedding_seg_kernel_size, + reuse or n > 0, atrous_rates=embedding_seg_atrous_rates)) + logits_t = model.get_branch_logits( + embedding_seg_features_n_t, + 1, + model_options.atrous_rates, + aspp_with_batch_norm=model_options.aspp_with_batch_norm, + kernel_size=model_options.logits_kernel_size, + weight_decay=weight_decay, + reuse=reuse or n > 0 or t > 0, + scope_suffix=output + ) + logits.append(logits_t) + prev_frame_labels = tf.transpose(tf.argmax(logits_t, axis=0), + [2, 0, 1]) + last_softmax = tf.nn.softmax(logits_t, axis=0) + logits = tf.stack(logits, axis=1) + logits_shape = tf.stack( + [n_objs, num_frames_per_video - n_ref_frames] + + resolve_shape(logits)[2:-1]) + logits_reshaped = tf.reshape(logits, logits_shape) + logits_transposed = tf.transpose(logits_reshaped, [1, 2, 3, 0]) + outputs_to_logits[output].append(logits_transposed) + + add_image_summaries( + images[n * num_frames_per_video: (n+1) * num_frames_per_video], + nn_features_n, + logits_transposed, + batch_size=1, + prev_frame_nn_features=prev_frame_nn_features_n) + if also_return_embeddings: + return outputs_to_logits, embeddings + else: + return outputs_to_logits + + +def subsample_reference_embeddings_and_labels( + reference_embeddings_flat, reference_labels_flat, ref_obj_ids, + max_neighbors_per_object): + """Subsamples the reference embedding vectors and labels. + + After subsampling, at most max_neighbors_per_object items will remain per + class. + + Args: + reference_embeddings_flat: Tensor of shape [n, embedding_dim], + the embedding vectors for the reference frame. + reference_labels_flat: Tensor of shape [n, 1], + the class labels of the reference frame. + ref_obj_ids: An int32 tensor of the unique object ids present + in the reference labels. + max_neighbors_per_object: Integer, the maximum number of candidates + for the nearest neighbor query per object after subsampling, + or 0 for no subsampling. + + Returns: + reference_embeddings_flat: Tensor of shape [n_sub, embedding_dim], + the subsampled embedding vectors for the reference frame. + reference_labels_flat: Tensor of shape [n_sub, 1], + the class labels of the reference frame. + """ + if max_neighbors_per_object == 0: + return reference_embeddings_flat, reference_labels_flat + same_label_mask = tf.equal(reference_labels_flat[tf.newaxis, :], + ref_obj_ids[:, tf.newaxis]) + max_neighbors_per_object_repeated = tf.tile( + tf.constant(max_neighbors_per_object)[tf.newaxis], + multiples=[tf.size(ref_obj_ids)]) + # Somehow map_fn on GPU caused trouble sometimes, so let's put it on CPU + # for now. + with tf.device('cpu:0'): + subsampled_indices = tf.map_fn(_create_subsampling_mask, + (same_label_mask, + max_neighbors_per_object_repeated), + dtype=tf.int64, + name='subsample_labels_map_fn', + parallel_iterations=1) + mask = tf.not_equal(subsampled_indices, tf.constant(-1, dtype=tf.int64)) + masked_indices = tf.boolean_mask(subsampled_indices, mask) + reference_embeddings_flat = tf.gather(reference_embeddings_flat, + masked_indices) + reference_labels_flat = tf.gather(reference_labels_flat, masked_indices) + return reference_embeddings_flat, reference_labels_flat + + +def _create_subsampling_mask(args): + """Creates boolean mask which can be used to subsample the labels. + + Args: + args: tuple of (label_mask, max_neighbors_per_object), where label_mask + is the mask to be subsampled and max_neighbors_per_object is a int scalar, + the maximum number of neighbors to be retained after subsampling. + + Returns: + The boolean mask for subsampling the labels. + """ + label_mask, max_neighbors_per_object = args + indices = tf.squeeze(tf.where(label_mask), axis=1) + shuffled_indices = tf.random_shuffle(indices) + subsampled_indices = shuffled_indices[:max_neighbors_per_object] + n_pad = max_neighbors_per_object - tf.size(subsampled_indices) + padded_label = -1 + padding = tf.fill((n_pad,), tf.constant(padded_label, dtype=tf.int64)) + padded = tf.concat([subsampled_indices, padding], axis=0) + return padded + + +def conv2d_identity_initializer(scale=1.0, mean=0, stddev=3e-2): + """Creates an identity initializer for TensorFlow conv2d. + + We add a small amount of normal noise to the initialization matrix. + Code copied from lcchen@. + + Args: + scale: The scale coefficient for the identity weight matrix. + mean: A 0-D Tensor or Python value of type `dtype`. The mean of the + truncated normal distribution. + stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation + of the truncated normal distribution. + + Returns: + An identity initializer function for TensorFlow conv2d. + """ + def _initializer(shape, dtype=tf.float32, partition_info=None): + """Returns the identity matrix scaled by `scale`. + + Args: + shape: A tuple of int32 numbers indicating the shape of the initializing + matrix. + dtype: The data type of the initializing matrix. + partition_info: (Optional) variable_scope._PartitionInfo object holding + additional information about how the variable is partitioned. This input + is not used in our case, but is required by TensorFlow. + + Returns: + A identity matrix. + + Raises: + ValueError: If len(shape) != 4, or shape[0] != shape[1], or shape[0] is + not odd, or shape[1] is not odd.. + """ + del partition_info + if len(shape) != 4: + raise ValueError('Expect shape length to be 4.') + if shape[0] != shape[1]: + raise ValueError('Expect shape[0] = shape[1].') + if shape[0] % 2 != 1: + raise ValueError('Expect shape[0] to be odd value.') + if shape[1] % 2 != 1: + raise ValueError('Expect shape[1] to be odd value.') + weights = np.zeros(shape, dtype=np.float32) + center_y = shape[0] / 2 + center_x = shape[1] / 2 + min_channel = min(shape[2], shape[3]) + for i in range(min_channel): + weights[center_y, center_x, i, i] = scale + return tf.constant(weights, dtype=dtype) + tf.truncated_normal( + shape, mean=mean, stddev=stddev, dtype=dtype) + + return _initializer + + +def split_separable_conv2d_with_identity_initializer( + inputs, + filters, + kernel_size=3, + rate=1, + weight_decay=0.00004, + scope=None): + """Splits a separable conv2d into depthwise and pointwise conv2d. + + This operation differs from `tf.layers.separable_conv2d` as this operation + applies activation function between depthwise and pointwise conv2d. + + Args: + inputs: Input tensor with shape [batch, height, width, channels]. + filters: Number of filters in the 1x1 pointwise convolution. + kernel_size: A list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + rate: Atrous convolution rate for the depthwise convolution. + weight_decay: The weight decay to use for regularizing the model. + scope: Optional scope for the operation. + + Returns: + Computed features after split separable conv2d. + """ + initializer = conv2d_identity_initializer() + outputs = slim.separable_conv2d( + inputs, + None, + kernel_size=kernel_size, + depth_multiplier=1, + rate=rate, + weights_initializer=initializer, + weights_regularizer=None, + scope=scope + '_depthwise') + return slim.conv2d( + outputs, + filters, + 1, + weights_initializer=initializer, + weights_regularizer=slim.l2_regularizer(weight_decay), + scope=scope + '_pointwise') + + +def create_initial_softmax_from_labels(last_frame_labels, reference_labels, + decoder_output_stride, reduce_labels): + """Creates initial softmax predictions from last frame labels. + + Args: + last_frame_labels: last frame labels of shape [1, height, width, 1]. + reference_labels: reference frame labels of shape [1, height, width, 1]. + decoder_output_stride: Integer, the stride of the decoder. Can be None, in + this case it's assumed that the last_frame_labels and reference_labels + are already scaled to the decoder output resolution. + reduce_labels: Boolean, whether to reduce the depth of the softmax one_hot + encoding to the actual number of labels present in the reference frame + (otherwise the depth will be the highest label index + 1). + + Returns: + init_softmax: the initial softmax predictions. + """ + if decoder_output_stride is None: + labels_output_size = last_frame_labels + reference_labels_output_size = reference_labels + else: + h = tf.shape(last_frame_labels)[1] + w = tf.shape(last_frame_labels)[2] + h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride) + w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride) + labels_output_size = tf.image.resize_nearest_neighbor( + last_frame_labels, [h_sub, w_sub], align_corners=True) + reference_labels_output_size = tf.image.resize_nearest_neighbor( + reference_labels, [h_sub, w_sub], align_corners=True) + if reduce_labels: + unique_labels, _ = tf.unique(tf.reshape(reference_labels_output_size, [-1])) + depth = tf.size(unique_labels) + else: + depth = tf.reduce_max(reference_labels_output_size) + 1 + one_hot_assertion = tf.assert_less(tf.reduce_max(labels_output_size), depth) + with tf.control_dependencies([one_hot_assertion]): + init_softmax = tf.one_hot(tf.squeeze(labels_output_size, + axis=-1), + depth=depth, + dtype=tf.float32) + return init_softmax + + +def local_previous_frame_nearest_neighbor_features_per_object( + prev_frame_embedding, query_embedding, prev_frame_labels, + gt_ids, max_distance=9): + """Computes nearest neighbor features while only allowing local matches. + + Args: + prev_frame_embedding: Tensor of shape [height, width, embedding_dim], + the embedding vectors for the last frame. + query_embedding: Tensor of shape [height, width, embedding_dim], + the embedding vectors for the query frames. + prev_frame_labels: Tensor of shape [height, width, 1], the class labels of + the previous frame. + gt_ids: Int Tensor of shape [n_objs] of the sorted unique ground truth + ids in the first frame. + max_distance: Integer, the maximum distance allowed for local matching. + + Returns: + nn_features: A float32 np.array of nearest neighbor features of shape + [1, height, width, n_objects, 1]. + """ + with tf.name_scope( + 'local_previous_frame_nearest_neighbor_features_per_object'): + if USE_CORRELATION_COST: + tf.logging.info('Using correlation_cost.') + d = local_pairwise_distances(query_embedding, prev_frame_embedding, + max_distance=max_distance) + else: + # Slow fallback in case correlation_cost is not available. + tf.logging.warn('correlation cost is not available, using slow fallback ' + 'implementation.') + d = local_pairwise_distances2(query_embedding, prev_frame_embedding, + max_distance=max_distance) + d = (tf.nn.sigmoid(d) - 0.5) * 2 + height = tf.shape(prev_frame_embedding)[0] + width = tf.shape(prev_frame_embedding)[1] + + # Create offset versions of the mask. + if USE_CORRELATION_COST: + # New, faster code with cross-correlation via correlation_cost. + # Due to padding we have to add 1 to the labels. + offset_labels = correlation_cost_op.correlation_cost( + tf.ones((1, height, width, 1)), + tf.cast(prev_frame_labels + 1, tf.float32)[tf.newaxis], + kernel_size=1, + max_displacement=max_distance, stride_1=1, stride_2=1, + pad=max_distance) + offset_labels = tf.squeeze(offset_labels, axis=0)[..., tf.newaxis] + # Subtract the 1 again and round. + offset_labels = tf.round(offset_labels - 1) + offset_masks = tf.equal( + offset_labels, + tf.cast(gt_ids, tf.float32)[tf.newaxis, tf.newaxis, tf.newaxis, :]) + else: + # Slower code, without dependency to correlation_cost + masks = tf.equal(prev_frame_labels, gt_ids[tf.newaxis, tf.newaxis]) + padded_masks = tf.pad(masks, + [[max_distance, max_distance], + [max_distance, max_distance], + [0, 0]]) + offset_masks = [] + for y_start in range(2 * max_distance + 1): + y_end = y_start + height + masks_slice = padded_masks[y_start:y_end] + for x_start in range(2 * max_distance + 1): + x_end = x_start + width + offset_mask = masks_slice[:, x_start:x_end] + offset_masks.append(offset_mask) + offset_masks = tf.stack(offset_masks, axis=2) + + pad = tf.ones((height, width, (2 * max_distance + 1) ** 2, tf.size(gt_ids))) + d_tiled = tf.tile(d[..., tf.newaxis], multiples=(1, 1, 1, tf.size(gt_ids))) + d_masked = tf.where(offset_masks, d_tiled, pad) + dists = tf.reduce_min(d_masked, axis=2) + dists = tf.reshape(dists, (1, height, width, tf.size(gt_ids), 1)) + return dists diff --git a/models/research/feelvos/utils/embedding_utils_test.py b/models/research/feelvos/utils/embedding_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ddebd7b4e7fcc9402887ebf59d247fea815d6cda --- /dev/null +++ b/models/research/feelvos/utils/embedding_utils_test.py @@ -0,0 +1,213 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for embedding utils.""" + +import unittest +import numpy as np +import tensorflow as tf +from feelvos.utils import embedding_utils + +if embedding_utils.USE_CORRELATION_COST: + # pylint: disable=g-import-not-at-top + from correlation_cost.python.ops import correlation_cost_op + + +class EmbeddingUtilsTest(tf.test.TestCase): + + def test_pairwise_distances(self): + x = np.arange(100, dtype=np.float32).reshape(20, 5) + y = np.arange(100, 200, dtype=np.float32).reshape(20, 5) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + x = tf.constant(x) + y = tf.constant(y) + d1 = embedding_utils.pairwise_distances(x, y) + d2 = embedding_utils.pairwise_distances2(x, y) + d1_val, d2_val = sess.run([d1, d2]) + self.assertAllClose(d1_val, d2_val) + + @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, + 'depends on correlation_cost') + def test_correlation_cost_one_dimensional(self): + a = np.array([[[[1.0], [2.0]], [[3.0], [4.0]]]]) + b = np.array([[[[2.0], [1.0]], [[4.0], [3.0]]]]) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + c = correlation_cost_op.correlation_cost( + a, b, kernel_size=1, max_displacement=1, stride_1=1, stride_2=1, + pad=1) + c = tf.squeeze(c, axis=0) + c_val = sess.run(c) + self.assertAllEqual(c_val.shape, (2, 2, 9)) + for y in range(2): + for x in range(2): + for dy in range(-1, 2): + for dx in range(-1, 2): + a_slice = a[0, y, x, 0] + if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: + b_slice = 0 + else: + b_slice = b[0, y + dy, x + dx, 0] + expected = a_slice * b_slice + dy0 = dy + 1 + dx0 = dx + 1 + self.assertAlmostEqual(c_val[y, x, 3 * dy0 + dx0], expected) + + @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, + 'depends on correlation_cost') + def test_correlation_cost_two_dimensional(self): + a = np.array([[[[1.0, -5.0], [7.0, 2.0]], [[1.0, 3.0], [3.0, 4.0]]]]) + b = np.array([[[[2.0, 1.0], [0.0, -9.0]], [[4.0, 3.0], [3.0, 1.0]]]]) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + c = correlation_cost_op.correlation_cost( + a, b, kernel_size=1, max_displacement=1, stride_1=1, stride_2=1, + pad=1) + c = tf.squeeze(c, axis=0) + c_val = sess.run(c) + self.assertAllEqual(c_val.shape, (2, 2, 9)) + for y in range(2): + for x in range(2): + for dy in range(-1, 2): + for dx in range(-1, 2): + a_slice = a[0, y, x, :] + if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: + b_slice = 0 + else: + b_slice = b[0, y + dy, x + dx, :] + expected = (a_slice * b_slice).mean() + dy0 = dy + 1 + dx0 = dx + 1 + self.assertAlmostEqual(c_val[y, x, 3 * dy0 + dx0], expected) + + @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, + 'depends on correlation_cost') + def test_local_pairwise_distances_one_dimensional(self): + a = np.array([[[1.0], [2.0]], [[3.0], [4.0]]]) + b = np.array([[[2.0], [1.0]], [[4.0], [3.0]]]) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + a_tf = tf.constant(a, dtype=tf.float32) + b_tf = tf.constant(b, dtype=tf.float32) + d = embedding_utils.local_pairwise_distances(a_tf, b_tf, + max_distance=1) + d_val = sess.run(d) + for y in range(2): + for x in range(2): + for dy in range(-1, 2): + for dx in range(-1, 2): + a_slice = a[y, x, 0] + if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: + expected = np.float('inf') + else: + b_slice = b[y + dy, x + dx, 0] + expected = (a_slice - b_slice) ** 2 + dy0 = dy + 1 + dx0 = dx + 1 + self.assertAlmostEqual(d_val[y, x, 3 * dy0 + dx0], expected) + + @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, + 'depends on correlation_cost') + def test_local_pairwise_distances_shape(self): + a = np.zeros((4, 5, 2)) + b = np.zeros((4, 5, 2)) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + a_tf = tf.constant(a, dtype=tf.float32) + b_tf = tf.constant(b, dtype=tf.float32) + d = embedding_utils.local_pairwise_distances(a_tf, b_tf, max_distance=4) + d_val = sess.run(d) + self.assertAllEqual(d_val.shape, (4, 5, 81)) + + @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, + 'depends on correlation_cost') + def test_local_pairwise_distances_two_dimensional(self): + a = np.array([[[1.0, -5.0], [7.0, 2.0]], [[1.0, 3.0], [3.0, 4.0]]]) + b = np.array([[[2.0, 1.0], [0.0, -9.0]], [[4.0, 3.0], [3.0, 1.0]]]) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + a_tf = tf.constant(a, dtype=tf.float32) + b_tf = tf.constant(b, dtype=tf.float32) + d = embedding_utils.local_pairwise_distances(a_tf, b_tf, + max_distance=1) + d_val = sess.run(d) + for y in range(2): + for x in range(2): + for dy in range(-1, 2): + for dx in range(-1, 2): + a_slice = a[y, x, :] + if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: + expected = np.float('inf') + else: + b_slice = b[y + dy, x + dx, :] + expected = ((a_slice - b_slice) ** 2).sum() + dy0 = dy + 1 + dx0 = dx + 1 + self.assertAlmostEqual(d_val[y, x, 3 * dy0 + dx0], expected) + + @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, + 'depends on correlation_cost') + def test_local_previous_frame_nearest_neighbor_features_per_object(self): + prev_frame_embedding = np.array([[[1.0, -5.0], [7.0, 2.0]], + [[1.0, 3.0], [3.0, 4.0]]]) / 10 + query_embedding = np.array([[[2.0, 1.0], [0.0, -9.0]], + [[4.0, 3.0], [3.0, 1.0]]]) / 10 + prev_frame_labels = np.array([[[0], [1]], [[1], [0]]]) + gt_ids = np.array([0, 1]) + g = tf.Graph() + with g.as_default(): + with self.test_session(graph=g) as sess: + prev_frame_embedding_tf = tf.constant(prev_frame_embedding, + dtype=tf.float32) + query_embedding_tf = tf.constant(query_embedding, dtype=tf.float32) + embu = embedding_utils + dists = ( + embu.local_previous_frame_nearest_neighbor_features_per_object( + prev_frame_embedding_tf, query_embedding_tf, + prev_frame_labels, gt_ids, max_distance=1)) + dists = tf.squeeze(dists, axis=4) + dists = tf.squeeze(dists, axis=0) + dists_val = sess.run(dists) + for obj_id in gt_ids: + for y in range(2): + for x in range(2): + curr_min = 1.0 + for dy in range(-1, 2): + for dx in range(-1, 2): + # Attention: here we shift the prev frame embedding, + # not the query. + if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: + continue + if prev_frame_labels[y + dy, x + dx, 0] != obj_id: + continue + prev_frame_slice = prev_frame_embedding[y + dy, x + dx, :] + query_frame_slice = query_embedding[y, x, :] + v_unnorm = ((prev_frame_slice - query_frame_slice) ** 2).sum() + v = ((1.0 / (1.0 + np.exp(-v_unnorm))) - 0.5) * 2 + curr_min = min(curr_min, v) + expected = curr_min + self.assertAlmostEqual(dists_val[y, x, obj_id], expected, + places=5) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/feelvos/utils/eval_utils.py b/models/research/feelvos/utils/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..517ec0d788eb3a6ec48246e10920dd4b55332bf5 --- /dev/null +++ b/models/research/feelvos/utils/eval_utils.py @@ -0,0 +1,153 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions for evaluations.""" + +import numpy as np +import PIL +import tensorflow as tf + +pascal_colormap = [ + 0, 0, 0, + 0.5020, 0, 0, + 0, 0.5020, 0, + 0.5020, 0.5020, 0, + 0, 0, 0.5020, + 0.5020, 0, 0.5020, + 0, 0.5020, 0.5020, + 0.5020, 0.5020, 0.5020, + 0.2510, 0, 0, + 0.7529, 0, 0, + 0.2510, 0.5020, 0, + 0.7529, 0.5020, 0, + 0.2510, 0, 0.5020, + 0.7529, 0, 0.5020, + 0.2510, 0.5020, 0.5020, + 0.7529, 0.5020, 0.5020, + 0, 0.2510, 0, + 0.5020, 0.2510, 0, + 0, 0.7529, 0, + 0.5020, 0.7529, 0, + 0, 0.2510, 0.5020, + 0.5020, 0.2510, 0.5020, + 0, 0.7529, 0.5020, + 0.5020, 0.7529, 0.5020, + 0.2510, 0.2510, 0] + + +def save_segmentation_with_colormap(filename, img): + """Saves a segmentation with the pascal colormap as expected for DAVIS eval. + + Args: + filename: Where to store the segmentation. + img: A numpy array of the segmentation to be saved. + """ + if img.shape[-1] == 1: + img = img[..., 0] + + # Save with colormap. + colormap = (np.array(pascal_colormap) * 255).round().astype('uint8') + colormap_image = PIL.Image.new('P', (16, 16)) + colormap_image.putpalette(colormap) + pil_image = PIL.Image.fromarray(img.astype('uint8')) + pil_image_with_colormap = pil_image.quantize(palette=colormap_image) + with tf.gfile.GFile(filename, 'w') as f: + pil_image_with_colormap.save(f) + + +def save_embeddings(filename, embeddings): + with tf.gfile.GFile(filename, 'w') as f: + np.save(f, embeddings) + + +def calculate_iou(pred_labels, ref_labels): + """Calculates the intersection over union for binary segmentation. + + Args: + pred_labels: predicted segmentation labels. + ref_labels: reference segmentation labels. + + Returns: + The IoU between pred_labels and ref_labels + """ + if ref_labels.any(): + i = np.logical_and(pred_labels, ref_labels).sum() + u = np.logical_or(pred_labels, ref_labels).sum() + return i.astype('float') / u + else: + if pred_labels.any(): + return 0.0 + else: + return 1.0 + + +def calculate_multi_object_miou_tf(pred_labels, ref_labels): + """Calculates the mIoU for a batch of predicted and reference labels. + + Args: + pred_labels: Int32 tensor of shape [batch, height, width, 1]. + ref_labels: Int32 tensor of shape [batch, height, width, 1]. + + Returns: + The mIoU between pred_labels and ref_labels as float32 scalar tensor. + """ + + def calculate_multi_object_miou(pred_labels_, ref_labels_): + """Calculates the mIoU for predicted and reference labels in numpy. + + Args: + pred_labels_: int32 np.array of shape [batch, height, width, 1]. + ref_labels_: int32 np.array of shape [batch, height, width, 1]. + + Returns: + The mIoU between pred_labels_ and ref_labels_. + """ + assert len(pred_labels_.shape) == 4 + assert pred_labels_.shape[3] == 1 + assert pred_labels_.shape == ref_labels_.shape + ious = [] + for pred_label, ref_label in zip(pred_labels_, ref_labels_): + ids = np.setdiff1d(np.unique(ref_label), [0]) + if ids.size == 0: + continue + for id_ in ids: + iou = calculate_iou(pred_label == id_, ref_label == id_) + ious.append(iou) + if ious: + return np.cast['float32'](np.mean(ious)) + else: + return np.cast['float32'](1.0) + + miou = tf.py_func(calculate_multi_object_miou, [pred_labels, ref_labels], + tf.float32, name='calculate_multi_object_miou') + miou.set_shape(()) + return miou + + +def calculate_multi_object_ious(pred_labels, ref_labels, label_set): + """Calculates the intersection over union for binary segmentation. + + Args: + pred_labels: predicted segmentation labels. + ref_labels: reference segmentation labels. + label_set: int np.array of object ids. + + Returns: + float np.array of IoUs between pred_labels and ref_labels + for each object in label_set. + """ + # Background should not be included as object label. + return np.array([calculate_iou(pred_labels == label, ref_labels == label) + for label in label_set if label != 0]) diff --git a/models/research/feelvos/utils/mask_damaging.py b/models/research/feelvos/utils/mask_damaging.py new file mode 100644 index 0000000000000000000000000000000000000000..74f3cdab5a0e4374f0cd238544a9a582fd0eef92 --- /dev/null +++ b/models/research/feelvos/utils/mask_damaging.py @@ -0,0 +1,176 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for artificially damaging segmentation masks.""" + +import numpy as np +from scipy.ndimage import interpolation +from skimage import morphology +from skimage import transform +import tensorflow as tf + + +def damage_masks(labels, shift=True, scale=True, rotate=True, dilate=True): + """Damages segmentation masks by random transformations. + + Args: + labels: Int32 labels tensor of shape (height, width, 1). + shift: Boolean, whether to damage the masks by shifting. + scale: Boolean, whether to damage the masks by scaling. + rotate: Boolean, whether to damage the masks by rotation. + dilate: Boolean, whether to damage the masks by dilation. + + Returns: + The damaged version of labels. + """ + def _damage_masks_np(labels_): + return damage_masks_np(labels_, shift, scale, rotate, dilate) + damaged_masks = tf.py_func(_damage_masks_np, [labels], tf.int32, + name='damage_masks') + damaged_masks.set_shape(labels.get_shape()) + return damaged_masks + + +def damage_masks_np(labels, shift=True, scale=True, rotate=True, dilate=True): + """Performs the actual mask damaging in numpy. + + Args: + labels: Int32 numpy array of shape (height, width, 1). + shift: Boolean, whether to damage the masks by shifting. + scale: Boolean, whether to damage the masks by scaling. + rotate: Boolean, whether to damage the masks by rotation. + dilate: Boolean, whether to damage the masks by dilation. + + Returns: + The damaged version of labels. + """ + unique_labels = np.unique(labels) + unique_labels = np.setdiff1d(unique_labels, [0]) + # Shuffle to get random depth ordering when combining together. + np.random.shuffle(unique_labels) + damaged_labels = np.zeros_like(labels) + for l in unique_labels: + obj_mask = (labels == l) + damaged_obj_mask = _damage_single_object_mask(obj_mask, shift, scale, + rotate, dilate) + damaged_labels[damaged_obj_mask] = l + return damaged_labels + + +def _damage_single_object_mask(mask, shift, scale, rotate, dilate): + """Performs mask damaging in numpy for a single object. + + Args: + mask: Boolean numpy array of shape(height, width, 1). + shift: Boolean, whether to damage the masks by shifting. + scale: Boolean, whether to damage the masks by scaling. + rotate: Boolean, whether to damage the masks by rotation. + dilate: Boolean, whether to damage the masks by dilation. + + Returns: + The damaged version of mask. + """ + # For now we just do shifting and scaling. Better would be Affine or thin + # spline plate transformations. + if shift: + mask = _shift_mask(mask) + if scale: + mask = _scale_mask(mask) + if rotate: + mask = _rotate_mask(mask) + if dilate: + mask = _dilate_mask(mask) + return mask + + +def _shift_mask(mask, max_shift_factor=0.05): + """Damages a mask for a single object by randomly shifting it in numpy. + + Args: + mask: Boolean numpy array of shape(height, width, 1). + max_shift_factor: Float scalar, the maximum factor for random shifting. + + Returns: + The shifted version of mask. + """ + nzy, nzx, _ = mask.nonzero() + h = nzy.max() - nzy.min() + w = nzx.max() - nzx.min() + size = np.sqrt(h * w) + offset = np.random.uniform(-size * max_shift_factor, size * max_shift_factor, + 2) + shifted_mask = interpolation.shift(np.squeeze(mask, axis=2), + offset, order=0).astype('bool')[..., + np.newaxis] + return shifted_mask + + +def _scale_mask(mask, scale_amount=0.025): + """Damages a mask for a single object by randomly scaling it in numpy. + + Args: + mask: Boolean numpy array of shape(height, width, 1). + scale_amount: Float scalar, the maximum factor for random scaling. + + Returns: + The scaled version of mask. + """ + nzy, nzx, _ = mask.nonzero() + cy = 0.5 * (nzy.max() - nzy.min()) + cx = 0.5 * (nzx.max() - nzx.min()) + scale_factor = np.random.uniform(1.0 - scale_amount, 1.0 + scale_amount) + shift = transform.SimilarityTransform(translation=[-cx, -cy]) + inv_shift = transform.SimilarityTransform(translation=[cx, cy]) + s = transform.SimilarityTransform(scale=[scale_factor, scale_factor]) + m = (shift + (s + inv_shift)).inverse + scaled_mask = transform.warp(mask, m) > 0.5 + return scaled_mask + + +def _rotate_mask(mask, max_rot_degrees=3.0): + """Damages a mask for a single object by randomly rotating it in numpy. + + Args: + mask: Boolean numpy array of shape(height, width, 1). + max_rot_degrees: Float scalar, the maximum number of degrees to rotate. + + Returns: + The scaled version of mask. + """ + cy = 0.5 * mask.shape[0] + cx = 0.5 * mask.shape[1] + rot_degrees = np.random.uniform(-max_rot_degrees, max_rot_degrees) + shift = transform.SimilarityTransform(translation=[-cx, -cy]) + inv_shift = transform.SimilarityTransform(translation=[cx, cy]) + r = transform.SimilarityTransform(rotation=np.deg2rad(rot_degrees)) + m = (shift + (r + inv_shift)).inverse + scaled_mask = transform.warp(mask, m) > 0.5 + return scaled_mask + + +def _dilate_mask(mask, dilation_radius=5): + """Damages a mask for a single object by dilating it in numpy. + + Args: + mask: Boolean numpy array of shape(height, width, 1). + dilation_radius: Integer, the radius of the used disk structure element. + + Returns: + The dilated version of mask. + """ + disk = morphology.disk(dilation_radius, dtype=np.bool) + dilated_mask = morphology.binary_dilation( + np.squeeze(mask, axis=2), selem=disk)[..., np.newaxis] + return dilated_mask diff --git a/models/research/feelvos/utils/train_utils.py b/models/research/feelvos/utils/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02a04cd33645931c5c795bef8559c0d3f5c4c23c --- /dev/null +++ b/models/research/feelvos/utils/train_utils.py @@ -0,0 +1,269 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions for training.""" +import collections +import six +import tensorflow as tf + +from deeplab.core import preprocess_utils +from deeplab.utils import train_utils +from feelvos.utils import embedding_utils +from feelvos.utils import eval_utils + +slim = tf.contrib.slim +add_softmax_cross_entropy_loss_for_each_scale = ( + train_utils.add_softmax_cross_entropy_loss_for_each_scale) +get_model_gradient_multipliers = train_utils.get_model_gradient_multipliers +get_model_learning_rate = train_utils.get_model_learning_rate +resolve_shape = preprocess_utils.resolve_shape + + +def add_triplet_loss_for_each_scale(batch_size, num_frames_per_video, + embedding_dim, scales_to_embeddings, + labels, scope): + """Adds triplet loss for logits of each scale. + + Args: + batch_size: Int, the number of video chunks sampled per batch + num_frames_per_video: Int, the number of frames per video. + embedding_dim: Int, the dimension of the learned embedding + scales_to_embeddings: A map from embedding names for different scales to + embeddings. The embeddings have shape [batch, embeddings_height, + embeddings_width, embedding_dim]. + labels: Groundtruth labels with shape [batch, image_height, image_width, 1]. + scope: String, the scope for the loss. + + Raises: + ValueError: labels is None. + """ + if labels is None: + raise ValueError('No label for triplet loss.') + for scale, embeddings in scales_to_embeddings.iteritems(): + loss_scope = None + if scope: + loss_scope = '%s_%s' % (scope, scale) + # Label is downsampled to the same size as logits. + scaled_labels = tf.image.resize_nearest_neighbor( + labels, + resolve_shape(embeddings, 4)[1:3], + align_corners=True) + # Reshape from [batch * num_frames, ...] to [batch, num_frames, ...]. + h = tf.shape(embeddings)[1] + w = tf.shape(embeddings)[2] + new_labels_shape = tf.stack([batch_size, num_frames_per_video, h, w, 1]) + reshaped_labels = tf.reshape(scaled_labels, new_labels_shape) + new_embeddings_shape = tf.stack([batch_size, num_frames_per_video, h, w, + -1]) + reshaped_embeddings = tf.reshape(embeddings, new_embeddings_shape) + + with tf.name_scope(loss_scope): + total_loss = tf.constant(0, dtype=tf.float32) + for n in range(batch_size): + embedding = reshaped_embeddings[n] + label = reshaped_labels[n] + n_pixels = h * w + n_anchors_used = 256 + sampled_anchor_indices = tf.random_shuffle(tf.range(n_pixels))[ + :n_anchors_used] + anchors_pool = tf.reshape(embedding[0], [-1, embedding_dim]) + anchors_pool_classes = tf.reshape(label[0], [-1]) + anchors = tf.gather(anchors_pool, sampled_anchor_indices) + anchor_classes = tf.gather(anchors_pool_classes, sampled_anchor_indices) + + pos_neg_pool = tf.reshape(embedding[1:], [-1, embedding_dim]) + pos_neg_pool_classes = tf.reshape(label[1:], [-1]) + dists = embedding_utils.pairwise_distances(anchors, pos_neg_pool) + pos_mask = tf.equal(anchor_classes[:, tf.newaxis], + pos_neg_pool_classes[tf.newaxis, :]) + neg_mask = tf.logical_not(pos_mask) + pos_mask_f = tf.cast(pos_mask, tf.float32) + neg_mask_f = tf.cast(neg_mask, tf.float32) + pos_dists = pos_mask_f * dists + 1e20 * neg_mask_f + neg_dists = neg_mask_f * dists + 1e20 * pos_mask_f + pos_dists_min = tf.reduce_min(pos_dists, axis=1) + neg_dists_min = tf.reduce_min(neg_dists, axis=1) + margin = 1.0 + loss = tf.nn.relu(pos_dists_min - neg_dists_min + margin) + # Handle case that no positive is present (per anchor). + any_pos = tf.reduce_any(pos_mask, axis=1) + loss *= tf.cast(any_pos, tf.float32) + # Average over anchors + loss = tf.reduce_mean(loss, axis=0) + total_loss += loss + total_loss /= batch_size + # Scale the loss up a bit. + total_loss *= 3.0 + tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss) + + +def add_dynamic_softmax_cross_entropy_loss_for_each_scale( + scales_to_logits, labels, ignore_label, loss_weight=1.0, + upsample_logits=True, scope=None, top_k_percent_pixels=1.0, + hard_example_mining_step=100000): + """Adds softmax cross entropy loss per scale for logits with varying classes. + + Also adds summaries for mIoU. + + Args: + scales_to_logits: A map from logits names for different scales to logits. + The logits are a list of length batch_size of tensors of shape + [time, logits_height, logits_width, num_classes]. + labels: Groundtruth labels with shape [batch_size * time, image_height, + image_width, 1]. + ignore_label: Integer, label to ignore. + loss_weight: Float, loss weight. + upsample_logits: Boolean, upsample logits or not. + scope: String, the scope for the loss. + top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its + value < 1.0, only compute the loss for the top k percent pixels (e.g., + the top 20% pixels). This is useful for hard pixel mining. + hard_example_mining_step: An integer, the training step in which the + hard exampling mining kicks off. Note that we gradually reduce the + mining percent to the top_k_percent_pixels. For example, if + hard_example_mining_step=100K and top_k_percent_pixels=0.25, then + mining percent will gradually reduce from 100% to 25% until 100K steps + after which we only mine top 25% pixels. + + Raises: + ValueError: Label or logits is None. + """ + if labels is None: + raise ValueError('No label for softmax cross entropy loss.') + + if top_k_percent_pixels < 0 or top_k_percent_pixels > 1: + raise ValueError('Unexpected value of top_k_percent_pixels.') + + for scale, logits in six.iteritems(scales_to_logits): + loss_scope = None + if scope: + loss_scope = '%s_%s' % (scope, scale) + + if upsample_logits: + # Label is not downsampled, and instead we upsample logits. + assert isinstance(logits, collections.Sequence) + logits = [tf.image.resize_bilinear( + x, + preprocess_utils.resolve_shape(labels, 4)[1:3], + align_corners=True) for x in logits] + scaled_labels = labels + else: + # Label is downsampled to the same size as logits. + assert isinstance(logits, collections.Sequence) + scaled_labels = tf.image.resize_nearest_neighbor( + labels, + preprocess_utils.resolve_shape(logits[0], 4)[1:3], + align_corners=True) + + batch_size = len(logits) + num_time = preprocess_utils.resolve_shape(logits[0])[0] + reshaped_labels = tf.reshape( + scaled_labels, ([batch_size, num_time] + + preprocess_utils.resolve_shape(scaled_labels)[1:])) + for n, logits_n in enumerate(logits): + labels_n = reshaped_labels[n] + labels_n = tf.reshape(labels_n, shape=[-1]) + not_ignore_mask = tf.to_float(tf.not_equal(labels_n, + ignore_label)) * loss_weight + num_classes_n = tf.shape(logits_n)[-1] + one_hot_labels = slim.one_hot_encoding( + labels_n, num_classes_n, on_value=1.0, off_value=0.0) + logits_n_flat = tf.reshape(logits_n, shape=[-1, num_classes_n]) + if top_k_percent_pixels == 1.0: + tf.losses.softmax_cross_entropy( + one_hot_labels, + logits_n_flat, + weights=not_ignore_mask, + scope=loss_scope) + else: + # Only compute the loss for top k percent pixels. + # First, compute the loss for all pixels. Note we do not put the loss + # to loss_collection and set reduction = None to keep the shape. + num_pixels = tf.to_float(tf.shape(logits_n_flat)[0]) + pixel_losses = tf.losses.softmax_cross_entropy( + one_hot_labels, + logits_n_flat, + weights=not_ignore_mask, + scope='pixel_losses', + loss_collection=None, + reduction=tf.losses.Reduction.NONE) + # Compute the top_k_percent pixels based on current training step. + if hard_example_mining_step == 0: + # Directly focus on the top_k pixels. + top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels) + else: + # Gradually reduce the mining percent to top_k_percent_pixels. + global_step = tf.to_float(tf.train.get_or_create_global_step()) + ratio = tf.minimum(1.0, global_step / hard_example_mining_step) + top_k_pixels = tf.to_int32( + (ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels) + _, top_k_indices = tf.nn.top_k(pixel_losses, + k=top_k_pixels, + sorted=True, + name='top_k_percent_pixels') + # Compute the loss for the top k percent pixels. + tf.losses.softmax_cross_entropy( + tf.gather(one_hot_labels, top_k_indices), + tf.gather(logits_n_flat, top_k_indices), + weights=tf.gather(not_ignore_mask, top_k_indices), + scope=loss_scope) + + pred_n = tf.argmax(logits_n, axis=-1, output_type=tf.int32)[ + ..., tf.newaxis] + labels_n = labels[n * num_time: (n + 1) * num_time] + miou = eval_utils.calculate_multi_object_miou_tf(pred_n, labels_n) + tf.summary.scalar('miou', miou) + + +def get_model_init_fn(train_logdir, + tf_initial_checkpoint, + initialize_last_layer, + last_layers, + ignore_missing_vars=False): + """Gets the function initializing model variables from a checkpoint. + + Args: + train_logdir: Log directory for training. + tf_initial_checkpoint: TensorFlow checkpoint for initialization. + initialize_last_layer: Initialize last layer or not. + last_layers: Last layers of the model. + ignore_missing_vars: Ignore missing variables in the checkpoint. + + Returns: + Initialization function. + """ + if tf_initial_checkpoint is None: + tf.logging.info('Not initializing the model from a checkpoint.') + return None + + if tf.train.latest_checkpoint(train_logdir): + tf.logging.info('Ignoring initialization; other checkpoint exists') + return None + + tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint) + + # Variables that will not be restored. + exclude_list = ['global_step'] + if not initialize_last_layer: + exclude_list.extend(last_layers) + + variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list) + + if variables_to_restore: + return slim.assign_from_checkpoint_fn( + tf_initial_checkpoint, + variables_to_restore, + ignore_missing_vars=ignore_missing_vars) + return None diff --git a/models/research/feelvos/utils/video_input_generator.py b/models/research/feelvos/utils/video_input_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..c0135e50110c677865217c8a3f13d1d1d891f0b2 --- /dev/null +++ b/models/research/feelvos/utils/video_input_generator.py @@ -0,0 +1,558 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Wrapper for providing semantic segmentation video data.""" + +import tensorflow as tf +from feelvos import input_preprocess +from feelvos import model +from feelvos.utils import mask_damaging +from feelvos.utils import train_utils + +slim = tf.contrib.slim +dataset_data_provider = slim.dataset_data_provider + + +MIN_LABEL_COUNT = 10 + + +def decode_image_sequence(tensor, image_format='jpeg', shape=None, + channels=3, raw_dtype=tf.uint8): + """Decodes a sequence of images. + + Args: + tensor: the tensor of strings to decode, shape: [num_images] + image_format: a string (possibly tensor) with the format of the image. + Options include 'jpeg', 'png', and 'raw'. + shape: a list or tensor of the decoded image shape for a single image. + channels: if 'shape' is None, the third dimension of the image is set to + this value. + raw_dtype: if the image is encoded as raw bytes, this is the method of + decoding the bytes into values. + Returns: + The decoded images with shape [time, height, width, channels]. + """ + handler = slim.tfexample_decoder.Image( + shape=shape, channels=channels, dtype=raw_dtype, repeated=True) + return handler.tensors_to_item({'image/encoded': tensor, + 'image/format': image_format}) + + +def _get_data(data_provider, dataset_split, video_frames_are_decoded): + """Gets data from data provider. + + Args: + data_provider: An object of slim.data_provider. + dataset_split: Dataset split. + video_frames_are_decoded: Boolean, whether the video frames are already + decoded + + Returns: + image: Image Tensor. + label: Label Tensor storing segmentation annotations. + object_label: An integer refers to object_label according to labelmap. If + the example has more than one object_label, take the first one. + image_name: Image name. + height: Image height. + width: Image width. + video_id: String tensor representing the name of the video. + + Raises: + ValueError: Failed to find label. + """ + + if video_frames_are_decoded: + image, = data_provider.get(['image']) + else: + image, = data_provider.get(['image/encoded']) + + # Some datasets do not contain image_name. + if 'image_name' in data_provider.list_items(): + image_name, = data_provider.get(['image_name']) + else: + image_name = tf.constant('') + + height, width = data_provider.get(['height', 'width']) + + label = None + if dataset_split != 'test': + if video_frames_are_decoded: + if 'labels_class' not in data_provider.list_items(): + raise ValueError('Failed to find labels.') + label, = data_provider.get(['labels_class']) + else: + key = 'segmentation/object/encoded' + if key not in data_provider.list_items(): + raise ValueError('Failed to find labels.') + label, = data_provider.get([key]) + + object_label = None + video_id, = data_provider.get(['video_id']) + + return image, label, object_label, image_name, height, width, video_id + + +def _has_foreground_and_background_in_first_frame(label, subsampling_factor): + """Checks if the labels have foreground and background in the first frame. + + Args: + label: Label tensor of shape [num_frames, height, width, 1]. + subsampling_factor: Integer, the subsampling factor. + + Returns: + Boolean, whether the labels have foreground and background in the first + frame. + """ + h, w = train_utils.resolve_shape(label)[1:3] + label_downscaled = tf.squeeze( + tf.image.resize_nearest_neighbor(label[0, tf.newaxis], + [h // subsampling_factor, + w // subsampling_factor], + align_corners=True), + axis=0) + is_bg = tf.equal(label_downscaled, 0) + is_fg = tf.logical_not(is_bg) + # Just using reduce_any was not robust enough, so lets make sure the count + # is above MIN_LABEL_COUNT. + fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32)) + bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32)) + has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT) + has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT) + return tf.logical_and(has_bg, has_fg) + + +def _has_foreground_and_background_in_first_frame_2(label, + decoder_output_stride): + """Checks if the labels have foreground and background in the first frame. + + Second attempt, this time we use the actual output dimension for resizing. + + Args: + label: Label tensor of shape [num_frames, height, width, 1]. + decoder_output_stride: Integer, the stride of the decoder output. + + Returns: + Boolean, whether the labels have foreground and background in the first + frame. + """ + h, w = train_utils.resolve_shape(label)[1:3] + h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride) + w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride) + label_downscaled = tf.squeeze( + tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub], + align_corners=True), axis=0) + is_bg = tf.equal(label_downscaled, 0) + is_fg = tf.logical_not(is_bg) + # Just using reduce_any was not robust enough, so lets make sure the count + # is above MIN_LABEL_COUNT. + fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32)) + bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32)) + has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT) + has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT) + return tf.logical_and(has_bg, has_fg) + + +def _has_enough_pixels_of_each_object_in_first_frame( + label, decoder_output_stride): + """Checks if for each object (incl. background) enough pixels are visible. + + During test time, we will usually not see a reference frame in which only + very few pixels of one object are visible. These cases can be problematic + during training, especially if more than the 1-nearest neighbor is used. + That's why this function can be used to detect and filter these cases. + + Args: + label: Label tensor of shape [num_frames, height, width, 1]. + decoder_output_stride: Integer, the stride of the decoder output. + + Returns: + Boolean, whether the labels have enough pixels of each object in the first + frame. + """ + h, w = train_utils.resolve_shape(label)[1:3] + h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride) + w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride) + label_downscaled = tf.squeeze( + tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub], + align_corners=True), axis=0) + _, _, counts = tf.unique_with_counts( + tf.reshape(label_downscaled, [-1])) + has_enough_pixels_per_object = tf.reduce_all( + tf.greater_equal(counts, MIN_LABEL_COUNT)) + return has_enough_pixels_per_object + + +def get(dataset, + num_frames_per_video, + crop_size, + batch_size, + min_resize_value=None, + max_resize_value=None, + resize_factor=None, + min_scale_factor=1., + max_scale_factor=1., + scale_factor_step_size=0, + preprocess_image_and_label=True, + num_readers=1, + num_threads=1, + dataset_split=None, + is_training=True, + model_variant=None, + batch_capacity_factor=32, + video_frames_are_decoded=False, + decoder_output_stride=None, + first_frame_finetuning=False, + sample_only_first_frame_for_finetuning=False, + sample_adjacent_and_consistent_query_frames=False, + remap_labels_to_reference_frame=True, + generate_prev_frame_mask_by_mask_damaging=False, + three_frame_dataset=False, + add_prev_frame_label=True): + """Gets the dataset split for semantic segmentation. + + This functions gets the dataset split for semantic segmentation. In + particular, it is a wrapper of (1) dataset_data_provider which returns the raw + dataset split, (2) input_preprcess which preprocess the raw data, and (3) the + Tensorflow operation of batching the preprocessed data. Then, the output could + be directly used by training, evaluation or visualization. + + Args: + dataset: An instance of slim Dataset. + num_frames_per_video: The number of frames used per video + crop_size: Image crop size [height, width]. + batch_size: Batch size. + min_resize_value: Desired size of the smaller image side. + max_resize_value: Maximum allowed size of the larger image side. + resize_factor: Resized dimensions are multiple of factor plus one. + min_scale_factor: Minimum scale factor value. + max_scale_factor: Maximum scale factor value. + scale_factor_step_size: The step size from min scale factor to max scale + factor. The input is randomly scaled based on the value of + (min_scale_factor, max_scale_factor, scale_factor_step_size). + preprocess_image_and_label: Boolean variable specifies if preprocessing of + image and label will be performed or not. + num_readers: Number of readers for data provider. + num_threads: Number of threads for batching data. + dataset_split: Dataset split. + is_training: Is training or not. + model_variant: Model variant (string) for choosing how to mean-subtract the + images. See feature_extractor.network_map for supported model variants. + batch_capacity_factor: Batch capacity factor affecting the training queue + batch capacity. + video_frames_are_decoded: Boolean, whether the video frames are already + decoded + decoder_output_stride: Integer, the stride of the decoder output. + first_frame_finetuning: Boolean, whether to only sample the first frame + for fine-tuning. + sample_only_first_frame_for_finetuning: Boolean, whether to only sample the + first frame during fine-tuning. This should be False when using lucid or + wonderland data, but true when fine-tuning on the first frame only. + Only has an effect if first_frame_finetuning is True. + sample_adjacent_and_consistent_query_frames: Boolean, if true, the query + frames (all but the first frame which is the reference frame) will be + sampled such that they are adjacent video frames and have the same + crop coordinates and flip augmentation. + remap_labels_to_reference_frame: Boolean, whether to remap the labels of + the query frames to match the labels of the (downscaled) reference frame. + If a query frame contains a label which is not present in the reference, + it will be mapped to background. + generate_prev_frame_mask_by_mask_damaging: Boolean, whether to generate + the masks used as guidance from the previous frame by damaging the + ground truth mask. + three_frame_dataset: Boolean, whether the dataset has exactly three frames + per video of which the first is to be used as reference and the two + others are consecutive frames to be used as query frames. + add_prev_frame_label: Boolean, whether to sample one more frame before the + first query frame to obtain a previous frame label. Only has an effect, + if sample_adjacent_and_consistent_query_frames is True and + generate_prev_frame_mask_by_mask_damaging is False. + + Returns: + A dictionary of batched Tensors for semantic segmentation. + + Raises: + ValueError: dataset_split is None, or Failed to find labels. + """ + if dataset_split is None: + raise ValueError('Unknown dataset split.') + if model_variant is None: + tf.logging.warning('Please specify a model_variant. See ' + 'feature_extractor.network_map for supported model ' + 'variants.') + + data_provider = dataset_data_provider.DatasetDataProvider( + dataset, + num_readers=num_readers, + num_epochs=None if is_training else 1, + shuffle=is_training) + image, label, object_label, image_name, height, width, video_id = _get_data( + data_provider, dataset_split, video_frames_are_decoded) + + sampling_is_valid = tf.constant(True) + if num_frames_per_video is not None: + total_num_frames = tf.shape(image)[0] + if first_frame_finetuning or three_frame_dataset: + if sample_only_first_frame_for_finetuning: + assert not sample_adjacent_and_consistent_query_frames, ( + 'this option does not make sense for sampling only first frame.') + # Sample the first frame num_frames_per_video times. + sel_indices = tf.tile(tf.constant(0, dtype=tf.int32)[tf.newaxis], + multiples=[num_frames_per_video]) + else: + if sample_adjacent_and_consistent_query_frames: + if add_prev_frame_label: + num_frames_per_video += 1 + # Since this is first frame fine-tuning, we'll for now assume that + # each sequence has exactly 3 images: the ref frame and 2 adjacent + # query frames. + assert num_frames_per_video == 3 + with tf.control_dependencies([tf.assert_equal(total_num_frames, 3)]): + sel_indices = tf.constant([1, 2], dtype=tf.int32) + else: + # Sample num_frames_per_video - 1 query frames which are not the + # first frame. + sel_indices = tf.random_shuffle( + tf.range(1, total_num_frames))[:(num_frames_per_video - 1)] + # Concat first frame as reference frame to the front. + sel_indices = tf.concat([tf.constant(0, dtype=tf.int32)[tf.newaxis], + sel_indices], axis=0) + else: + if sample_adjacent_and_consistent_query_frames: + if add_prev_frame_label: + # Sample one more frame which we can use to provide initial softmax + # feedback. + num_frames_per_video += 1 + ref_idx = tf.random_shuffle(tf.range(total_num_frames))[0] + sampling_is_valid = tf.greater_equal(total_num_frames, + num_frames_per_video) + def sample_query_start_idx(): + return tf.random_shuffle( + tf.range(total_num_frames - num_frames_per_video + 1))[0] + query_start_idx = tf.cond(sampling_is_valid, sample_query_start_idx, + lambda: tf.constant(0, dtype=tf.int32)) + def sample_sel_indices(): + return tf.concat( + [ref_idx[tf.newaxis], + tf.range( + query_start_idx, + query_start_idx + (num_frames_per_video - 1))], axis=0) + sel_indices = tf.cond( + sampling_is_valid, sample_sel_indices, + lambda: tf.zeros((num_frames_per_video,), dtype=tf.int32)) + else: + # Randomly sample some frames from the video. + sel_indices = tf.random_shuffle( + tf.range(total_num_frames))[:num_frames_per_video] + image = tf.gather(image, sel_indices, axis=0) + if not video_frames_are_decoded: + image = decode_image_sequence(image) + + if label is not None: + if num_frames_per_video is not None: + label = tf.gather(label, sel_indices, axis=0) + if not video_frames_are_decoded: + label = decode_image_sequence(label, image_format='png', channels=1) + + # Sometimes, label is saved as [num_frames_per_video, height, width] or + # [num_frames_per_video, height, width, 1]. We change it to be + # [num_frames_per_video, height, width, 1]. + if label.shape.ndims == 3: + label = tf.expand_dims(label, 3) + elif label.shape.ndims == 4 and label.shape.dims[3] == 1: + pass + else: + raise ValueError('Input label shape must be ' + '[num_frames_per_video, height, width],' + ' or [num_frames, height, width, 1]. ' + 'Got {}'.format(label.shape.ndims)) + label.set_shape([None, None, None, 1]) + + # Add size of first dimension since tf can't figure it out automatically. + image.set_shape((num_frames_per_video, None, None, None)) + if label is not None: + label.set_shape((num_frames_per_video, None, None, None)) + + preceding_frame_label = None + if preprocess_image_and_label: + if num_frames_per_video is None: + raise ValueError('num_frame_per_video must be specified for preproc.') + original_images = [] + images = [] + labels = [] + if sample_adjacent_and_consistent_query_frames: + num_frames_individual_preproc = 1 + else: + num_frames_individual_preproc = num_frames_per_video + for frame_idx in range(num_frames_individual_preproc): + original_image_t, image_t, label_t = ( + input_preprocess.preprocess_image_and_label( + image[frame_idx], + label[frame_idx], + crop_height=crop_size[0] if crop_size is not None else None, + crop_width=crop_size[1] if crop_size is not None else None, + min_resize_value=min_resize_value, + max_resize_value=max_resize_value, + resize_factor=resize_factor, + min_scale_factor=min_scale_factor, + max_scale_factor=max_scale_factor, + scale_factor_step_size=scale_factor_step_size, + ignore_label=dataset.ignore_label, + is_training=is_training, + model_variant=model_variant)) + original_images.append(original_image_t) + images.append(image_t) + labels.append(label_t) + if sample_adjacent_and_consistent_query_frames: + imgs_for_preproc = [image[frame_idx] for frame_idx in + range(1, num_frames_per_video)] + labels_for_preproc = [label[frame_idx] for frame_idx in + range(1, num_frames_per_video)] + original_image_rest, image_rest, label_rest = ( + input_preprocess.preprocess_images_and_labels_consistently( + imgs_for_preproc, + labels_for_preproc, + crop_height=crop_size[0] if crop_size is not None else None, + crop_width=crop_size[1] if crop_size is not None else None, + min_resize_value=min_resize_value, + max_resize_value=max_resize_value, + resize_factor=resize_factor, + min_scale_factor=min_scale_factor, + max_scale_factor=max_scale_factor, + scale_factor_step_size=scale_factor_step_size, + ignore_label=dataset.ignore_label, + is_training=is_training, + model_variant=model_variant)) + original_images.extend(original_image_rest) + images.extend(image_rest) + labels.extend(label_rest) + assert len(original_images) == num_frames_per_video + assert len(images) == num_frames_per_video + assert len(labels) == num_frames_per_video + + if remap_labels_to_reference_frame: + # Remap labels to indices into the labels of the (downscaled) reference + # frame, or 0, i.e. background, for labels which are not present + # in the reference. + reference_labels = labels[0][tf.newaxis] + h, w = train_utils.resolve_shape(reference_labels)[1:3] + embedding_height = model.scale_dimension( + h, 1.0 / decoder_output_stride) + embedding_width = model.scale_dimension( + w, 1.0 / decoder_output_stride) + reference_labels_embedding_size = tf.squeeze( + tf.image.resize_nearest_neighbor( + reference_labels, tf.stack([embedding_height, embedding_width]), + align_corners=True), + axis=0) + # Get sorted unique labels in the reference frame. + labels_in_ref_frame, _ = tf.unique( + tf.reshape(reference_labels_embedding_size, [-1])) + labels_in_ref_frame = tf.contrib.framework.sort(labels_in_ref_frame) + for idx in range(1, len(labels)): + ref_label_mask = tf.equal( + labels[idx], + labels_in_ref_frame[tf.newaxis, tf.newaxis, :]) + remapped = tf.argmax(tf.cast(ref_label_mask, tf.uint8), axis=-1, + output_type=tf.int32) + # Set to 0 if label is not present + is_in_ref = tf.reduce_any(ref_label_mask, axis=-1) + remapped *= tf.cast(is_in_ref, tf.int32) + labels[idx] = remapped[..., tf.newaxis] + + if sample_adjacent_and_consistent_query_frames: + if first_frame_finetuning and generate_prev_frame_mask_by_mask_damaging: + preceding_frame_label = mask_damaging.damage_masks(labels[1]) + elif add_prev_frame_label: + # Discard the image of the additional frame and take the label as + # initialization for softmax feedback. + original_images = [original_images[0]] + original_images[2:] + preceding_frame_label = labels[1] + images = [images[0]] + images[2:] + labels = [labels[0]] + labels[2:] + num_frames_per_video -= 1 + + original_image = tf.stack(original_images, axis=0) + image = tf.stack(images, axis=0) + label = tf.stack(labels, axis=0) + else: + if label is not None: + # Need to set label shape due to batching. + label.set_shape([num_frames_per_video, + None if crop_size is None else crop_size[0], + None if crop_size is None else crop_size[1], + 1]) + original_image = tf.to_float(tf.zeros_like(label)) + if crop_size is None: + height = tf.shape(image)[1] + width = tf.shape(image)[2] + else: + height = crop_size[0] + width = crop_size[1] + + sample = {'image': image, + 'image_name': image_name, + 'height': height, + 'width': width, + 'video_id': video_id} + if label is not None: + sample['label'] = label + + if object_label is not None: + sample['object_label'] = object_label + + if preceding_frame_label is not None: + sample['preceding_frame_label'] = preceding_frame_label + + if not is_training: + # Original image is only used during visualization. + sample['original_image'] = original_image + + if is_training: + if first_frame_finetuning: + keep_input = tf.constant(True) + else: + keep_input = tf.logical_and(sampling_is_valid, tf.logical_and( + _has_enough_pixels_of_each_object_in_first_frame( + label, decoder_output_stride), + _has_foreground_and_background_in_first_frame_2( + label, decoder_output_stride))) + + batched = tf.train.maybe_batch(sample, + keep_input=keep_input, + batch_size=batch_size, + num_threads=num_threads, + capacity=batch_capacity_factor * batch_size, + dynamic_pad=True) + else: + batched = tf.train.batch(sample, + batch_size=batch_size, + num_threads=num_threads, + capacity=batch_capacity_factor * batch_size, + dynamic_pad=True) + + # Flatten from [batch, num_frames_per_video, ...] to + # batch * num_frames_per_video, ...]. + cropped_height = train_utils.resolve_shape(batched['image'])[2] + cropped_width = train_utils.resolve_shape(batched['image'])[3] + if num_frames_per_video is None: + first_dim = -1 + else: + first_dim = batch_size * num_frames_per_video + batched['image'] = tf.reshape(batched['image'], + [first_dim, cropped_height, cropped_width, 3]) + if label is not None: + batched['label'] = tf.reshape(batched['label'], + [first_dim, cropped_height, cropped_width, 1]) + return batched diff --git a/models/research/feelvos/vis_video.py b/models/research/feelvos/vis_video.py new file mode 100644 index 0000000000000000000000000000000000000000..211bccf52acdef83aca298285fc473748126de02 --- /dev/null +++ b/models/research/feelvos/vis_video.py @@ -0,0 +1,500 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Segmentation results evaluation and visualization for videos using attention. +""" + +import math +import os +import time +import numpy as np + +import tensorflow as tf + +from feelvos import common +from feelvos import model +from feelvos.datasets import video_dataset +from feelvos.utils import embedding_utils +from feelvos.utils import eval_utils +from feelvos.utils import video_input_generator + + +slim = tf.contrib.slim +flags = tf.app.flags +FLAGS = flags.FLAGS + +flags.DEFINE_integer('eval_interval_secs', 60 * 5, + 'How often (in seconds) to run evaluation.') + +flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') + +flags.DEFINE_integer('vis_batch_size', 1, + 'The number of images in each batch during evaluation.') + +flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.') + +flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.') + +flags.DEFINE_integer('output_stride', 8, + 'The ratio of input to output spatial resolution.') + +flags.DEFINE_string('dataset', 'davis_2016', + 'Name of the segmentation dataset.') + +flags.DEFINE_string('vis_split', 'val', + 'Which split of the dataset used for visualizing results') + +flags.DEFINE_string( + 'dataset_dir', + '/cns/is-d/home/lcchen/data/pascal_voc_seg/example_sstables', + 'Where the dataset resides.') + +flags.DEFINE_integer('num_vis_examples', -1, + 'Number of examples for visualization. If -1, use all ' + 'samples in the vis data.') + +flags.DEFINE_multi_integer('atrous_rates', None, + 'Atrous rates for atrous spatial pyramid pooling.') + +flags.DEFINE_bool('save_segmentations', False, 'Whether to save the ' + 'segmentation masks as ' + 'png images. Might be slow ' + 'on cns.') + +flags.DEFINE_bool('save_embeddings', False, 'Whether to save the embeddings as' + 'pickle. Might be slow on cns.') + +flags.DEFINE_bool('eval_once_and_quit', False, + 'Whether to just run the eval a single time and quit ' + 'afterwards. Otherwise, the eval is run in a loop with ' + 'new checkpoints.') + +flags.DEFINE_boolean('first_frame_finetuning', False, + 'Whether to only sample the first frame for fine-tuning.') + +# the folder where segmentations are saved. +_SEGMENTATION_SAVE_FOLDER = 'segmentation' +_EMBEDDINGS_SAVE_FOLDER = 'embeddings' + + +def _process_seq_data(segmentation_dir, embeddings_dir, seq_name, + predicted_labels, gt_labels, embeddings): + """Calculates the sequence IoU and optionally save the segmentation masks. + + Args: + segmentation_dir: Directory in which the segmentation results are stored. + embeddings_dir: Directory in which the embeddings are stored. + seq_name: String, the name of the sequence. + predicted_labels: Int64 np.array of shape [n_frames, height, width]. + gt_labels: Ground truth labels, Int64 np.array of shape + [n_frames, height, width]. + embeddings: Float32 np.array of embeddings of shape + [n_frames, decoder_height, decoder_width, embedding_dim], or None. + + Returns: + The IoU for the sequence (float). + """ + sequence_dir = os.path.join(segmentation_dir, seq_name) + tf.gfile.MakeDirs(sequence_dir) + embeddings_seq_dir = os.path.join(embeddings_dir, seq_name) + tf.gfile.MakeDirs(embeddings_seq_dir) + label_set = np.unique(gt_labels[0]) + ious = [] + assert len(predicted_labels) == len(gt_labels) + if embeddings is not None: + assert len(predicted_labels) == len(embeddings) + for t, (predicted_label, gt_label) in enumerate( + zip(predicted_labels, gt_labels)): + if FLAGS.save_segmentations: + seg_filename = os.path.join(segmentation_dir, seq_name, '%05d.png' % t) + eval_utils.save_segmentation_with_colormap(seg_filename, predicted_label) + if FLAGS.save_embeddings: + embedding_filename = os.path.join(embeddings_dir, seq_name, + '%05d.npy' % t) + assert embeddings is not None + eval_utils.save_embeddings(embedding_filename, embeddings[t]) + object_ious_t = eval_utils.calculate_multi_object_ious( + predicted_label, gt_label, label_set) + ious.append(object_ious_t) + # First and last frame are excluded in DAVIS eval. + seq_ious = np.mean(ious[1:-1], axis=0) + tf.logging.info('seq ious: %s %s', seq_name, seq_ious) + return seq_ious + + +def create_predictions(samples, reference_labels, first_frame_img, + model_options): + """Predicts segmentation labels for each frame of the video. + + Slower version than create_predictions_fast, but does support more options. + + Args: + samples: Dictionary of input samples. + reference_labels: Int tensor of shape [1, height, width, 1]. + first_frame_img: Float32 tensor of shape [height, width, 3]. + model_options: An InternalModelOptions instance to configure models. + + Returns: + predicted_labels: Int tensor of shape [time, height, width] of + predicted labels for each frame. + all_embeddings: Float32 tensor of shape + [time, height, width, embedding_dim], or None. + """ + + def predict(args, imgs): + """Predicts segmentation labels and softmax probabilities for each image. + + Args: + args: A tuple of (predictions, softmax_probabilities), where predictions + is an int tensor of shape [1, h, w] and softmax_probabilities is a + float32 tensor of shape [1, h_decoder, w_decoder, n_objects]. + imgs: Either a one-tuple of the image to predict labels for of shape + [h, w, 3], or pair of previous frame and current frame image. + + Returns: + predictions: The predicted labels as int tensor of shape [1, h, w]. + softmax_probabilities: The softmax probabilities of shape + [1, h_decoder, w_decoder, n_objects]. + """ + if FLAGS.save_embeddings: + last_frame_predictions, last_softmax_probabilities, _ = args + else: + last_frame_predictions, last_softmax_probabilities = args + + if FLAGS.also_attend_to_previous_frame or FLAGS.use_softmax_feedback: + ref_labels_to_use = tf.concat( + [reference_labels, last_frame_predictions[..., tf.newaxis]], + axis=0) + else: + ref_labels_to_use = reference_labels + + predictions, softmax_probabilities = model.predict_labels( + tf.stack((first_frame_img,) + imgs), + model_options=model_options, + image_pyramid=FLAGS.image_pyramid, + embedding_dimension=FLAGS.embedding_dimension, + reference_labels=ref_labels_to_use, + k_nearest_neighbors=FLAGS.k_nearest_neighbors, + use_softmax_feedback=FLAGS.use_softmax_feedback, + initial_softmax_feedback=last_softmax_probabilities, + embedding_seg_feature_dimension= + FLAGS.embedding_seg_feature_dimension, + embedding_seg_n_layers=FLAGS.embedding_seg_n_layers, + embedding_seg_kernel_size=FLAGS.embedding_seg_kernel_size, + embedding_seg_atrous_rates=FLAGS.embedding_seg_atrous_rates, + also_return_softmax_probabilities=True, + num_frames_per_video= + (3 if FLAGS.also_attend_to_previous_frame or + FLAGS.use_softmax_feedback else 2), + normalize_nearest_neighbor_distances= + FLAGS.normalize_nearest_neighbor_distances, + also_attend_to_previous_frame=FLAGS.also_attend_to_previous_frame, + use_local_previous_frame_attention= + FLAGS.use_local_previous_frame_attention, + previous_frame_attention_window_size= + FLAGS.previous_frame_attention_window_size, + use_first_frame_matching=FLAGS.use_first_frame_matching + ) + predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.int32) + + if FLAGS.save_embeddings: + names = [n.name for n in tf.get_default_graph().as_graph_def().node] + embedding_names = [x for x in names if 'embeddings' in x] + # This will crash when multi-scale inference is used. + assert len(embedding_names) == 1, len(embedding_names) + embedding_name = embedding_names[0] + ':0' + embeddings = tf.get_default_graph().get_tensor_by_name(embedding_name) + return predictions, softmax_probabilities, embeddings + else: + return predictions, softmax_probabilities + + init_labels = tf.squeeze(reference_labels, axis=-1) + init_softmax = embedding_utils.create_initial_softmax_from_labels( + reference_labels, reference_labels, common.parse_decoder_output_stride(), + reduce_labels=False) + if FLAGS.save_embeddings: + decoder_height = tf.shape(init_softmax)[1] + decoder_width = tf.shape(init_softmax)[2] + n_frames = (3 if FLAGS.also_attend_to_previous_frame + or FLAGS.use_softmax_feedback else 2) + embeddings_init = tf.zeros((n_frames, decoder_height, decoder_width, + FLAGS.embedding_dimension)) + init = (init_labels, init_softmax, embeddings_init) + else: + init = (init_labels, init_softmax) + # Do not eval the first frame again but concat the first frame ground + # truth instead. + if FLAGS.also_attend_to_previous_frame or FLAGS.use_softmax_feedback: + elems = (samples[common.IMAGE][:-1], samples[common.IMAGE][1:]) + else: + elems = (samples[common.IMAGE][1:],) + res = tf.scan(predict, elems, + initializer=init, + parallel_iterations=1, + swap_memory=True) + if FLAGS.save_embeddings: + predicted_labels, _, all_embeddings = res + first_frame_embeddings = all_embeddings[0, 0, tf.newaxis] + other_frame_embeddings = all_embeddings[:, -1] + all_embeddings = tf.concat( + [first_frame_embeddings, other_frame_embeddings], axis=0) + else: + predicted_labels, _ = res + all_embeddings = None + predicted_labels = tf.concat([reference_labels[..., 0], + tf.squeeze(predicted_labels, axis=1)], + axis=0) + return predicted_labels, all_embeddings + + +def create_predictions_fast(samples, reference_labels, first_frame_img, + model_options): + """Predicts segmentation labels for each frame of the video. + + Faster version than create_predictions, but does not support all options. + + Args: + samples: Dictionary of input samples. + reference_labels: Int tensor of shape [1, height, width, 1]. + first_frame_img: Float32 tensor of shape [height, width, 3]. + model_options: An InternalModelOptions instance to configure models. + + Returns: + predicted_labels: Int tensor of shape [time, height, width] of + predicted labels for each frame. + all_embeddings: Float32 tensor of shape + [time, height, width, embedding_dim], or None. + + Raises: + ValueError: If FLAGS.save_embeddings is True, FLAGS.use_softmax_feedback is + False, or FLAGS.also_attend_to_previous_frame is False. + """ + if FLAGS.save_embeddings: + raise ValueError('save_embeddings does not work with ' + 'create_predictions_fast. Use the slower ' + 'create_predictions instead.') + if not FLAGS.use_softmax_feedback: + raise ValueError('use_softmax_feedback must be True for ' + 'create_predictions_fast. Use the slower ' + 'create_predictions instead.') + if not FLAGS.also_attend_to_previous_frame: + raise ValueError('also_attend_to_previous_frame must be True for ' + 'create_predictions_fast. Use the slower ' + 'create_predictions instead.') + # Extract embeddings for first frame and prepare initial predictions. + first_frame_embeddings = embedding_utils.get_embeddings( + first_frame_img[tf.newaxis], model_options, FLAGS.embedding_dimension) + init_labels = tf.squeeze(reference_labels, axis=-1) + init_softmax = embedding_utils.create_initial_softmax_from_labels( + reference_labels, reference_labels, common.parse_decoder_output_stride(), + reduce_labels=False) + init = (init_labels, init_softmax, first_frame_embeddings) + + def predict(args, img): + """Predicts segmentation labels and softmax probabilities for each image. + + Args: + args: tuple of + (predictions, softmax_probabilities, last_frame_embeddings), where + predictions is an int tensor of shape [1, h, w], + softmax_probabilities is a float32 tensor of shape + [1, h_decoder, w_decoder, n_objects], + and last_frame_embeddings is a float32 tensor of shape + [h_decoder, w_decoder, embedding_dimension]. + img: Image to predict labels for of shape [h, w, 3]. + + Returns: + predictions: The predicted labels as int tensor of shape [1, h, w]. + softmax_probabilities: The softmax probabilities of shape + [1, h_decoder, w_decoder, n_objects]. + """ + (last_frame_predictions, last_softmax_probabilities, + prev_frame_embeddings) = args + ref_labels_to_use = tf.concat( + [reference_labels, last_frame_predictions[..., tf.newaxis]], + axis=0) + + predictions, softmax_probabilities, embeddings = model.predict_labels( + img[tf.newaxis], + model_options=model_options, + image_pyramid=FLAGS.image_pyramid, + embedding_dimension=FLAGS.embedding_dimension, + reference_labels=ref_labels_to_use, + k_nearest_neighbors=FLAGS.k_nearest_neighbors, + use_softmax_feedback=FLAGS.use_softmax_feedback, + initial_softmax_feedback=last_softmax_probabilities, + embedding_seg_feature_dimension= + FLAGS.embedding_seg_feature_dimension, + embedding_seg_n_layers=FLAGS.embedding_seg_n_layers, + embedding_seg_kernel_size=FLAGS.embedding_seg_kernel_size, + embedding_seg_atrous_rates=FLAGS.embedding_seg_atrous_rates, + also_return_softmax_probabilities=True, + num_frames_per_video=1, + normalize_nearest_neighbor_distances= + FLAGS.normalize_nearest_neighbor_distances, + also_attend_to_previous_frame=FLAGS.also_attend_to_previous_frame, + use_local_previous_frame_attention= + FLAGS.use_local_previous_frame_attention, + previous_frame_attention_window_size= + FLAGS.previous_frame_attention_window_size, + use_first_frame_matching=FLAGS.use_first_frame_matching, + also_return_embeddings=True, + ref_embeddings=(first_frame_embeddings, prev_frame_embeddings) + ) + predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.int32) + return predictions, softmax_probabilities, embeddings + + # Do not eval the first frame again but concat the first frame ground + # truth instead. + # If you have a lot of GPU memory, you can try to set swap_memory=False, + # and/or parallel_iterations=2. + elems = samples[common.IMAGE][1:] + res = tf.scan(predict, elems, + initializer=init, + parallel_iterations=1, + swap_memory=True) + predicted_labels, _, _ = res + predicted_labels = tf.concat([reference_labels[..., 0], + tf.squeeze(predicted_labels, axis=1)], + axis=0) + return predicted_labels + + +def main(unused_argv): + if FLAGS.vis_batch_size != 1: + raise ValueError('Only batch size 1 is supported for now') + + data_type = 'tf_sequence_example' + # Get dataset-dependent information. + dataset = video_dataset.get_dataset( + FLAGS.dataset, + FLAGS.vis_split, + dataset_dir=FLAGS.dataset_dir, + data_type=data_type) + + # Prepare for visualization. + tf.gfile.MakeDirs(FLAGS.vis_logdir) + segmentation_dir = os.path.join(FLAGS.vis_logdir, _SEGMENTATION_SAVE_FOLDER) + tf.gfile.MakeDirs(segmentation_dir) + embeddings_dir = os.path.join(FLAGS.vis_logdir, _EMBEDDINGS_SAVE_FOLDER) + tf.gfile.MakeDirs(embeddings_dir) + num_vis_examples = (dataset.num_videos if (FLAGS.num_vis_examples < 0) + else FLAGS.num_vis_examples) + if FLAGS.first_frame_finetuning: + num_vis_examples = 1 + + tf.logging.info('Visualizing on %s set', FLAGS.vis_split) + g = tf.Graph() + with g.as_default(): + # Without setting device to CPU we run out of memory. + with tf.device('cpu:0'): + samples = video_input_generator.get( + dataset, + None, + None, + FLAGS.vis_batch_size, + min_resize_value=FLAGS.min_resize_value, + max_resize_value=FLAGS.max_resize_value, + resize_factor=FLAGS.resize_factor, + dataset_split=FLAGS.vis_split, + is_training=False, + model_variant=FLAGS.model_variant, + preprocess_image_and_label=False, + remap_labels_to_reference_frame=False) + samples[common.IMAGE] = tf.cast(samples[common.IMAGE], tf.float32) + samples[common.LABEL] = tf.cast(samples[common.LABEL], tf.int32) + first_frame_img = samples[common.IMAGE][0] + reference_labels = samples[common.LABEL][0, tf.newaxis] + gt_labels = tf.squeeze(samples[common.LABEL], axis=-1) + seq_name = samples[common.VIDEO_ID][0] + + model_options = common.VideoModelOptions( + outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes}, + crop_size=None, + atrous_rates=FLAGS.atrous_rates, + output_stride=FLAGS.output_stride) + + all_embeddings = None + predicted_labels = create_predictions_fast( + samples, reference_labels, first_frame_img, model_options) + # If you need more options like saving embeddings, replace the call to + # create_predictions_fast with create_predictions. + + tf.train.get_or_create_global_step() + saver = tf.train.Saver(slim.get_variables_to_restore()) + sv = tf.train.Supervisor(graph=g, + logdir=FLAGS.vis_logdir, + init_op=tf.global_variables_initializer(), + summary_op=None, + summary_writer=None, + global_step=None, + saver=saver) + num_batches = int( + math.ceil(num_vis_examples / float(FLAGS.vis_batch_size))) + last_checkpoint = None + + # Infinite loop to visualize the results when new checkpoint is created. + while True: + last_checkpoint = slim.evaluation.wait_for_new_checkpoint( + FLAGS.checkpoint_dir, last_checkpoint) + start = time.time() + tf.logging.info( + 'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', + time.gmtime())) + tf.logging.info('Visualizing with model %s', last_checkpoint) + + all_ious = [] + with sv.managed_session(FLAGS.master, + start_standard_services=False) as sess: + sv.start_queue_runners(sess) + sv.saver.restore(sess, last_checkpoint) + + for batch in range(num_batches): + ops = [predicted_labels, gt_labels, seq_name] + if FLAGS.save_embeddings: + ops.append(all_embeddings) + tf.logging.info('Visualizing batch %d / %d', batch + 1, num_batches) + res = sess.run(ops) + tf.logging.info('Forwarding done') + pred_labels_val, gt_labels_val, seq_name_val = res[:3] + if FLAGS.save_embeddings: + all_embeddings_val = res[3] + else: + all_embeddings_val = None + seq_ious = _process_seq_data(segmentation_dir, embeddings_dir, + seq_name_val, pred_labels_val, + gt_labels_val, all_embeddings_val) + all_ious.append(seq_ious) + all_ious = np.concatenate(all_ious, axis=0) + tf.logging.info('n_seqs %s, mIoU %f', all_ious.shape, all_ious.mean()) + tf.logging.info( + 'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', + time.gmtime())) + result_dir = FLAGS.vis_logdir + '/results/' + tf.gfile.MakeDirs(result_dir) + with tf.gfile.GFile(result_dir + seq_name_val + '.txt', 'w') as f: + f.write(str(all_ious)) + if FLAGS.first_frame_finetuning or FLAGS.eval_once_and_quit: + break + time_to_next_eval = start + FLAGS.eval_interval_secs - time.time() + if time_to_next_eval > 0: + time.sleep(time_to_next_eval) + + +if __name__ == '__main__': + flags.mark_flag_as_required('checkpoint_dir') + flags.mark_flag_as_required('vis_logdir') + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/models/research/fivo/.gitattributes b/models/research/fivo/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..f706c0421d718f8af8e62d96d69101fe383d2b4f --- /dev/null +++ b/models/research/fivo/.gitattributes @@ -0,0 +1,2 @@ +*.pkl binary +*.tfrecord binary diff --git a/models/research/fivo/.gitignore b/models/research/fivo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..af2f537516daf33fdaf579436dfa33fdd9044f49 --- /dev/null +++ b/models/research/fivo/.gitignore @@ -0,0 +1,104 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +.static_storage/ +.media/ +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/models/research/fivo/README.md b/models/research/fivo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36d355b1b2961f2c8c8b721b5ce13c0c3eab1e8b --- /dev/null +++ b/models/research/fivo/README.md @@ -0,0 +1,215 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Filtering Variational Objectives + +This folder contains a TensorFlow implementation of the algorithms from + +Chris J. Maddison\*, Dieterich Lawson\*, George Tucker\*, Nicolas Heess, Mohammad Norouzi, Andriy Mnih, Arnaud Doucet, and Yee Whye Teh. "Filtering Variational Objectives." NIPS 2017. + +[https://arxiv.org/abs/1705.09279](https://arxiv.org/abs/1705.09279) + +This code implements 3 different bounds for training sequential latent variable models: the evidence lower bound (ELBO), the importance weighted auto-encoder bound (IWAE), and our bound, the filtering variational objective (FIVO). + +Additionally it contains several sequential latent variable model implementations: + +* Variational recurrent neural network (VRNN) +* Stochastic recurrent neural network (SRNN) +* Gaussian hidden Markov model with linear conditionals (GHMM) + +The VRNN and SRNN can be trained for sequence modeling of pianoroll and speech data. The GHMM is trainable on a synthetic dataset, useful as a simple example of an analytically tractable model. + +#### Directory Structure +The important parts of the code are organized as follows. + +``` +run_fivo.py # main script, contains flag definitions +fivo +├─smc.py # a sequential Monte Carlo implementation +├─bounds.py # code for computing each bound, uses smc.py +├─runners.py # code for VRNN and SRNN training and evaluation +├─ghmm_runners.py # code for GHMM training and evaluation +├─data +| ├─datasets.py # readers for pianoroll and speech datasets +| ├─calculate_pianoroll_mean.py # preprocesses the pianoroll datasets +| └─create_timit_dataset.py # preprocesses the TIMIT dataset +└─models + ├─base.py # base classes used in other models + ├─vrnn.py # VRNN implementation + ├─srnn.py # SRNN implementation + └─ghmm.py # Gaussian hidden Markov model (GHMM) implementation +bin +├─run_train.sh # an example script that runs training +├─run_eval.sh # an example script that runs evaluation +├─run_sample.sh # an example script that runs sampling +├─run_tests.sh # a script that runs all tests +└─download_pianorolls.sh # a script that downloads pianoroll files +``` + +### Pianorolls + +Requirements before we start: + +* TensorFlow (see [tensorflow.org](http://tensorflow.org) for how to install) +* [scipy](https://www.scipy.org/) +* [sonnet](https://github.com/deepmind/sonnet) + + +#### Download the Data + +The pianoroll datasets are encoded as pickled sparse arrays and are available at [http://www-etud.iro.umontreal.ca/~boulanni/icml2012](http://www-etud.iro.umontreal.ca/~boulanni/icml2012). You can use the script `bin/download_pianorolls.sh` to download the files into a directory of your choosing. +``` +export PIANOROLL_DIR=~/pianorolls +mkdir $PIANOROLL_DIR +sh bin/download_pianorolls.sh $PIANOROLL_DIR +``` + +#### Preprocess the Data + +The script `calculate_pianoroll_mean.py` loads a pianoroll pickle file, calculates the mean, updates the pickle file to include the mean under the key `train_mean`, and writes the file back to disk in-place. You should do this for all pianoroll datasets you wish to train on. + +``` +python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/piano-midi.de.pkl +python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/nottingham.de.pkl +python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/musedata.pkl +python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/jsb.pkl +``` + +#### Training + +Now we can train a model. Here is the command for a standard training run, taken from `bin/run_train.sh`: +``` +python run_fivo.py \ + --mode=train \ + --logdir=/tmp/fivo \ + --model=vrnn \ + --bound=fivo \ + --summarize_every=100 \ + --batch_size=4 \ + --num_samples=4 \ + --learning_rate=0.0001 \ + --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ + --dataset_type="pianoroll" +``` + +You should see output that looks something like this (with extra logging cruft): + +``` +Saving checkpoints for 0 into /tmp/fivo/model.ckpt. +Step 1, fivo bound per timestep: -11.322491 +global_step/sec: 7.49971 +Step 101, fivo bound per timestep: -11.399275 +global_step/sec: 8.04498 +Step 201, fivo bound per timestep: -11.174991 +global_step/sec: 8.03989 +Step 301, fivo bound per timestep: -11.073008 +``` +#### Evaluation + +You can also evaluate saved checkpoints. The `eval` mode loads a model checkpoint, tests its performance on all items in a dataset, and reports the log-likelihood averaged over the dataset. For example here is a command, taken from `bin/run_eval.sh`, that will evaluate a JSB model on the test set: + +``` +python run_fivo.py \ + --mode=eval \ + --split=test \ + --alsologtostderr \ + --logdir=/tmp/fivo \ + --model=vrnn \ + --batch_size=4 \ + --num_samples=4 \ + --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ + --dataset_type="pianoroll" +``` + +You should see output like this: +``` +Restoring parameters from /tmp/fivo/model.ckpt-0 +Model restored from step 0, evaluating. +test elbo ll/t: -12.198834, iwae ll/t: -11.981187 fivo ll/t: -11.579776 +test elbo ll/seq: -748.564789, iwae ll/seq: -735.209206 fivo ll/seq: -710.577141 +``` +The evaluation script prints log-likelihood in both nats per timestep (ll/t) and nats per sequence (ll/seq) for all three bounds. + +#### Sampling + +You can also sample from trained models. The `sample` mode loads a model checkpoint, conditions the model on a prefix of a randomly chosen datapoint, samples a sequence of outputs from the conditioned model, and writes out the samples and prefix to a `.npz` file in `logdir`. For example here is a command that samples from a model trained on JSB, taken from `bin/run_sample.sh`: +``` +python run_fivo.py \ + --mode=sample \ + --alsologtostderr \ + --logdir="/tmp/fivo" \ + --model=vrnn \ + --bound=fivo \ + --batch_size=4 \ + --num_samples=4 \ + --split=test \ + --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ + --dataset_type="pianoroll" \ + --prefix_length=25 \ + --sample_length=50 +``` + +Here `num_samples` denotes the number of samples used when conditioning the model as well as the number of trajectories to sample for each prefix. + +You should see very little output. +``` +Restoring parameters from /tmp/fivo/model.ckpt-0 +Running local_init_op. +Done running local_init_op. +``` + +Loading the samples with `np.load` confirms that we conditioned the model on 4 +prefixes of length 25 and sampled 4 sequences of length 50 for each prefix. +``` +>>> import numpy as np +>>> x = np.load("/tmp/fivo/samples.npz") +>>> x[()]['prefixes'].shape +(25, 4, 88) +>>> x[()]['samples'].shape +(50, 4, 4, 88) +``` + +### Training on TIMIT + +The TIMIT speech dataset is available at the [Linguistic Data Consortium website](https://catalog.ldc.upenn.edu/LDC93S1), but is unfortunately not free. These instructions will proceed assuming you have downloaded the TIMIT archive and extracted it into the directory `$RAW_TIMIT_DIR`. + +#### Preprocess TIMIT + +We preprocess TIMIT (as described in our paper) and write it out to a series of TFRecord files. To prepare the TIMIT dataset use the script `create_timit_dataset.py` +``` +export $TIMIT_DIR=~/timit_dataset +mkdir $TIMIT_DIR +python data/create_timit_dataset.py \ + --raw_timit_dir=$RAW_TIMIT_DIR \ + --out_dir=$TIMIT_DIR +``` +You should see this exact output: +``` +4389 train / 231 valid / 1680 test +train mean: 0.006060 train std: 548.136169 +``` + +#### Training on TIMIT +This is very similar to training on pianoroll datasets, with just a few flags switched. +``` +python run_fivo.py \ + --mode=train \ + --logdir=/tmp/fivo \ + --model=vrnn \ + --bound=fivo \ + --summarize_every=100 \ + --batch_size=4 \ + --num_samples=4 \ + --learning_rate=0.0001 \ + --dataset_path="$TIMIT_DIR/train" \ + --dataset_type="speech" +``` +Evaluation and sampling are similar. + +### Tests +This codebase comes with a number of tests to verify correctness, runnable via `bin/run_tests.sh`. The tests are also useful to look at for examples of how to use the code. + +### Contact + +This codebase is maintained by Dieterich Lawson. For questions and issues please open an issue on the tensorflow/models issues tracker and assign it to @dieterichlawson. diff --git a/models/research/fivo/bin/download_pianorolls.sh b/models/research/fivo/bin/download_pianorolls.sh new file mode 100644 index 0000000000000000000000000000000000000000..ef7050b4df5fb9815be04d133e659fa31d8d055e --- /dev/null +++ b/models/research/fivo/bin/download_pianorolls.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# A script to download the pianoroll datasets. +# Accepts one argument, the directory to put the files in + +if [ -z "$1" ] + then + echo "Error, must provide a directory to download the files to." + exit +fi + +echo "Downloading datasets into $1" +curl -s "http://www-etud.iro.umontreal.ca/~boulanni/Piano-midi.de.pickle" > $1/piano-midi.de.pkl +curl -s "http://www-etud.iro.umontreal.ca/~boulanni/Nottingham.pickle" > $1/nottingham.pkl +curl -s "http://www-etud.iro.umontreal.ca/~boulanni/MuseData.pickle" > $1/musedata.pkl +curl -s "http://www-etud.iro.umontreal.ca/~boulanni/JSB%20Chorales.pickle" > $1/jsb.pkl diff --git a/models/research/fivo/bin/run_eval.sh b/models/research/fivo/bin/run_eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..b30bcedc2d16e5bdd681386100ecca23612a139a --- /dev/null +++ b/models/research/fivo/bin/run_eval.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# An example of running evaluation. + +PIANOROLL_DIR=$HOME/pianorolls + +python run_fivo.py \ + --mode=eval \ + --logdir=/tmp/fivo \ + --model=vrnn \ + --batch_size=4 \ + --num_samples=4 \ + --split=test \ + --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ + --dataset_type="pianoroll" diff --git a/models/research/fivo/bin/run_sample.sh b/models/research/fivo/bin/run_sample.sh new file mode 100644 index 0000000000000000000000000000000000000000..e0c82a0cb137822e85035a23081ecf6408b7cca1 --- /dev/null +++ b/models/research/fivo/bin/run_sample.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# An example of sampling from the model. + +PIANOROLL_DIR=$HOME/pianorolls + +python run_fivo.py \ + --mode=sample \ + --alsologtostderr \ + --logdir="/tmp/fivo" \ + --model=vrnn \ + --bound=fivo \ + --batch_size=4 \ + --num_samples=4 \ + --split=test \ + --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ + --dataset_type="pianoroll" \ + --prefix_length=25 \ + --sample_length=50 diff --git a/models/research/fivo/bin/run_tests.sh b/models/research/fivo/bin/run_tests.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ea58f016620db98e258494919c6d339b5fd996e --- /dev/null +++ b/models/research/fivo/bin/run_tests.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +python -m fivo.smc_test && \ +python -m fivo.bounds_test && \ +python -m fivo.nested_utils_test && \ +python -m fivo.data.datasets_test && \ +python -m fivo.models.ghmm_test && \ +python -m fivo.models.vrnn_test && \ +python -m fivo.models.srnn_test && \ +python -m fivo.ghmm_runners_test && \ +python -m fivo.runners_test diff --git a/models/research/fivo/bin/run_train.sh b/models/research/fivo/bin/run_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..a845959770c77cd99528005e1ee69e4593fcae0c --- /dev/null +++ b/models/research/fivo/bin/run_train.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# An example of running training. + +PIANOROLL_DIR=$HOME/pianorolls + +python run_fivo.py \ + --mode=train \ + --logdir=/tmp/fivo \ + --model=vrnn \ + --bound=fivo \ + --summarize_every=100 \ + --batch_size=4 \ + --num_samples=4 \ + --learning_rate=0.0001 \ + --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ + --dataset_type="pianoroll" diff --git a/models/research/fivo/experimental/README.md b/models/research/fivo/experimental/README.md new file mode 100644 index 0000000000000000000000000000000000000000..649de0ba95cdee2fa1b101a588dc48903b2ca13b --- /dev/null +++ b/models/research/fivo/experimental/README.md @@ -0,0 +1 @@ +An experimental codebase for running simple examples. diff --git a/models/research/fivo/experimental/bounds.py b/models/research/fivo/experimental/bounds.py new file mode 100644 index 0000000000000000000000000000000000000000..afc970c59a1a86dbe8438b4e8bba791d3c95aa63 --- /dev/null +++ b/models/research/fivo/experimental/bounds.py @@ -0,0 +1,673 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple + +import tensorflow as tf +import summary_utils as summ + +Loss = namedtuple("Loss", "name loss vars") +Loss.__new__.__defaults__ = (tf.GraphKeys.TRAINABLE_VARIABLES,) + + +def iwae(model, observation, num_timesteps, num_samples=1, + summarize=False): + """Compute the IWAE evidence lower bound. + + Args: + model: A callable that computes one timestep of the model. + observation: A shape [batch_size*num_samples, state_size] Tensor + containing z_n, the observation for each sequence in the batch. + num_timesteps: The number of timesteps in each sequence, an integer. + num_samples: The number of samples to use to compute the IWAE bound. + Returns: + log_p_hat: The IWAE estimator of the lower bound on the log marginal. + loss: A tensor that you can perform gradient descent on to optimize the + bound. + maintain_ema_op: A no-op included for compatibility with FIVO. + states: The sequence of states sampled. + """ + # Initialization + num_instances = tf.shape(observation)[0] + batch_size = tf.cast(num_instances / num_samples, tf.int32) + states = [model.zero_state(num_instances)] + log_weights = [] + log_weight_acc = tf.zeros([num_samples, batch_size], dtype=observation.dtype) + + for t in xrange(num_timesteps): + # run the model for one timestep + (zt, log_q_zt, log_p_zt, log_p_x_given_z, _) = model( + states[-1], observation, t) + # update accumulators + states.append(zt) + log_weight = log_p_zt + log_p_x_given_z - log_q_zt + log_weight_acc += tf.reshape(log_weight, [num_samples, batch_size]) + if summarize: + weight_dist = tf.contrib.distributions.Categorical( + logits=tf.transpose(log_weight_acc, perm=[1, 0]), + allow_nan_stats=False) + weight_entropy = weight_dist.entropy() + weight_entropy = tf.reduce_mean(weight_entropy) + tf.summary.scalar("weight_entropy/%d" % t, weight_entropy) + log_weights.append(log_weight_acc) + # Compute the lower bound on the log evidence. + log_p_hat = (tf.reduce_logsumexp(log_weight_acc, axis=0) - + tf.log(tf.cast(num_samples, observation.dtype))) / num_timesteps + loss = -tf.reduce_mean(log_p_hat) + losses = [Loss("log_p_hat", loss)] + + # we clip off the initial state before returning. + # there are no emas for iwae, so we return a noop for that + return log_p_hat, losses, tf.no_op(), states[1:], log_weights + + +def multinomial_resampling(log_weights, states, n, b): + """Resample states with multinomial resampling. + + Args: + log_weights: A (n x b) Tensor representing a batch of b logits for n-ary + Categorical distribution. + states: A list of (b*n x d) Tensors that will be resample in from the groups + of every n-th row. + + Returns: + resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. + log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. + resampling_parameters: The Tensor of parameters of the resampling distribution. + ancestors: An (n x b) Tensor of integral indices representing the ancestry decisions. + resampling_dist: The distribution object for resampling. + """ + log_weights = tf.convert_to_tensor(log_weights) + states = [tf.convert_to_tensor(state) for state in states] + + resampling_parameters = tf.transpose(log_weights, perm=[1,0]) + resampling_dist = tf.contrib.distributions.Categorical(logits=resampling_parameters) + ancestors = tf.stop_gradient( + resampling_dist.sample(sample_shape=n)) + log_probs = resampling_dist.log_prob(ancestors) + + offset = tf.expand_dims(tf.range(b), 0) + ancestor_inds = tf.reshape(ancestors * b + offset, [-1]) + + resampled_states = [] + for state in states: + resampled_states.append(tf.gather(state, ancestor_inds)) + return resampled_states, log_probs, resampling_parameters, ancestors, resampling_dist + +def stratified_resampling(log_weights, states, n, b): + """Resample states with straitified resampling. + + Args: + log_weights: A (n x b) Tensor representing a batch of b logits for n-ary + Categorical distribution. + states: A list of (b*n x d) Tensors that will be resample in from the groups + of every n-th row. + + Returns: + resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. + log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. + resampling_parameters: The Tensor of parameters of the resampling distribution. + ancestors: An (n x b) Tensor of integral indices representing the ancestry decisions. + resampling_dist: The distribution object for resampling. + """ + log_weights = tf.convert_to_tensor(log_weights) + states = [tf.convert_to_tensor(state) for state in states] + + log_weights = tf.transpose(log_weights, perm=[1,0]) + + probs = tf.nn.softmax( + tf.tile(tf.expand_dims(log_weights, axis=1), + [1, n, 1]) + ) + + cdfs = tf.concat([tf.zeros((b,n,1), dtype=probs.dtype), tf.cumsum(probs, axis=2)], 2) + + bins = tf.range(n, dtype=probs.dtype) / n + bins = tf.tile(tf.reshape(bins, [1,-1,1]), [b,1,n+1]) + + strat_cdfs = tf.minimum(tf.maximum((cdfs - bins) * n, 0.0), 1.0) + resampling_parameters = strat_cdfs[:,:,1:] - strat_cdfs[:,:,:-1] + + resampling_dist = tf.contrib.distributions.Categorical( + probs = resampling_parameters, + allow_nan_stats=False) + + ancestors = tf.stop_gradient( + resampling_dist.sample()) + log_probs = resampling_dist.log_prob(ancestors) + + ancestors = tf.transpose(ancestors, perm=[1,0]) + log_probs = tf.transpose(log_probs, perm=[1,0]) + + offset = tf.expand_dims(tf.range(b), 0) + ancestor_inds = tf.reshape(ancestors * b + offset, [-1]) + + resampled_states = [] + for state in states: + resampled_states.append(tf.gather(state, ancestor_inds)) + + return resampled_states, log_probs, resampling_parameters, ancestors, resampling_dist + +def systematic_resampling(log_weights, states, n, b): + """Resample states with systematic resampling. + + Args: + log_weights: A (n x b) Tensor representing a batch of b logits for n-ary + Categorical distribution. + states: A list of (b*n x d) Tensors that will be resample in from the groups + of every n-th row. + + Returns: + resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. + log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. + resampling_parameters: The Tensor of parameters of the resampling distribution. + ancestors: An (n x b) Tensor of integral indices representing the ancestry decisions. + resampling_dist: The distribution object for resampling. + """ + + log_weights = tf.convert_to_tensor(log_weights) + states = [tf.convert_to_tensor(state) for state in states] + + log_weights = tf.transpose(log_weights, perm=[1,0]) + + probs = tf.nn.softmax( + tf.tile(tf.expand_dims(log_weights, axis=1), + [1, n, 1]) + ) + + cdfs = tf.concat([tf.zeros((b,n,1), dtype=probs.dtype), tf.cumsum(probs, axis=2)], 2) + + bins = tf.range(n, dtype=probs.dtype) / n + bins = tf.tile(tf.reshape(bins, [1,-1,1]), [b,1,n+1]) + + strat_cdfs = tf.minimum(tf.maximum((cdfs - bins) * n, 0.0), 1.0) + resampling_parameters = strat_cdfs[:,:,1:] - strat_cdfs[:,:,:-1] + + resampling_dist = tf.contrib.distributions.Categorical( + probs=resampling_parameters, + allow_nan_stats=True) + + U = tf.random_uniform((b, 1, 1), dtype=probs.dtype) + + ancestors = tf.stop_gradient(tf.reduce_sum(tf.to_float(U > strat_cdfs[:,:,1:]), axis=-1)) + log_probs = resampling_dist.log_prob(ancestors) + + ancestors = tf.transpose(ancestors, perm=[1,0]) + log_probs = tf.transpose(log_probs, perm=[1,0]) + + offset = tf.expand_dims(tf.range(b, dtype=probs.dtype), 0) + ancestor_inds = tf.reshape(ancestors * b + offset, [-1]) + + resampled_states = [] + for state in states: + resampled_states.append(tf.gather(state, ancestor_inds)) + + return resampled_states, log_probs, resampling_parameters, ancestors, resampling_dist + + +def log_blend(inputs, weights): + """Blends state in the log space. + + Args: + inputs: A set of scalar states, one for each particle in each particle filter. + Should be [num_samples, batch_size]. + weights: A set of weights used to blend the state. Each set of weights + should be of dimension [num_samples] (one weight for each previous particle). + There should be one set of weights for each new particle in each particle filter. + Thus the shape should be [num_samples, batch_size, num_samples] where + the first axis indexes new particle and the last axis indexes old particles. + Returns: + blended: The blended states, a tensor of shape [num_samples, batch_size]. + """ + raw_max = tf.reduce_max(inputs, axis=0, keepdims=True) + my_max = tf.stop_gradient( + tf.where(tf.is_finite(raw_max), raw_max, tf.zeros_like(raw_max)) + ) + # Don't ask. + blended = tf.log(tf.einsum("ijk,kj->ij", weights, tf.exp(inputs - raw_max))) + my_max + return blended + + +def relaxed_resampling(log_weights, states, num_samples, batch_size, + log_r_x=None, blend_type="log", temperature=0.5, + straight_through=False): + """Resample states with relaxed resampling. + + Args: + log_weights: A (n x b) Tensor representing a batch of b logits for n-ary + Categorical distribution. + states: A list of (b*n x d) Tensors that will be resample in from the groups + of every n-th row. + + Returns: + resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. + log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. + resampling_parameters: The Tensor of parameters of the resampling distribution. + ancestors: An (n x b x n) Tensor of relaxed one hot representations of the ancestry decisions. + resampling_dist: The distribution object for resampling. + """ + assert blend_type in ["log", "linear"], "Blend type must be 'log' or 'linear'." + log_weights = tf.convert_to_tensor(log_weights) + states = [tf.convert_to_tensor(state) for state in states] + state_dim = states[0].get_shape().as_list()[-1] + # weights are num_samples by batch_size, so we transpose to get a + # set of batch_size distributions over [0,num_samples). + resampling_parameters = tf.transpose(log_weights, perm=[1, 0]) + resampling_dist = tf.contrib.distributions.RelaxedOneHotCategorical( + temperature, + logits=resampling_parameters) + + # sample num_samples samples from the distribution, resulting in a + # [num_samples, batch_size, num_samples] Tensor that represents a set of + # [num_samples, batch_size] blending weights. The dimensions represent + # [sample index, batch index, blending weight index] + ancestors = resampling_dist.sample(sample_shape=num_samples) + if straight_through: + # Forward pass discrete choices, backwards pass soft choices + hard_ancestor_indices = tf.argmax(ancestors, axis=-1) + hard_ancestors = tf.one_hot(hard_ancestor_indices, num_samples, + dtype=ancestors.dtype) + ancestors = tf.stop_gradient(hard_ancestors - ancestors) + ancestors + log_probs = resampling_dist.log_prob(ancestors) + if log_r_x is not None and blend_type == "log": + log_r_x = tf.reshape(log_r_x, [num_samples, batch_size]) + log_r_x = log_blend(log_r_x, ancestors) + log_r_x = tf.reshape(log_r_x, [num_samples*batch_size]) + elif log_r_x is not None and blend_type == "linear": + # If blend type is linear just add log_r to the states that will be blended + # linearly. + states.append(log_r_x) + + # transpose the 'indices' to be [batch_index, blending weight index, sample index] + ancestor_inds = tf.transpose(ancestors, perm=[1, 2, 0]) + resampled_states = [] + for state in states: + # state is currently [num_samples * batch_size, state_dim] so we reshape + # to [num_samples, batch_size, state_dim] and then transpose to + # [batch_size, state_size, num_samples] + state = tf.transpose(tf.reshape(state, [num_samples, batch_size, -1]), perm=[1, 2, 0]) + # state is now (batch_size, state_size, num_samples) + # and ancestor is (batch index, blending weight index, sample index) + # multiplying these gives a matrix of size [batch_size, state_size, num_samples] + next_state = tf.matmul(state, ancestor_inds) + # transpose the state to be [num_samples, batch_size, state_size] + # and then reshape it to match the state format. + next_state = tf.reshape(tf.transpose(next_state, perm=[2,0,1]), [num_samples*batch_size, state_dim]) + resampled_states.append(next_state) + + new_dist = tf.contrib.distributions.Categorical( + logits=resampling_parameters) + + if log_r_x is not None and blend_type == "linear": + # If blend type is linear pop off log_r that we added to the states. + log_r_x = tf.squeeze(resampled_states[-1]) + resampled_states = resampled_states[:-1] + return resampled_states, log_probs, log_r_x, resampling_parameters, ancestors, new_dist + + +def fivo(model, + observation, + num_timesteps, + resampling_schedule, + num_samples=1, + use_resampling_grads=True, + resampling_type="multinomial", + resampling_temperature=0.5, + aux=True, + summarize=False): + """Compute the FIVO evidence lower bound. + + Args: + model: A callable that computes one timestep of the model. + observation: A shape [batch_size*num_samples, state_size] Tensor + containing z_n, the observation for each sequence in the batch. + num_timesteps: The number of timesteps in each sequence, an integer. + resampling_schedule: A list of booleans of length num_timesteps, contains + True if a resampling should occur on a specific timestep. + num_samples: The number of samples to use to compute the IWAE bound. + use_resampling_grads: Whether or not to include the resampling gradients + in loss. + resampling type: The type of resampling, one of "multinomial", "stratified", + "relaxed-logblend", "relaxed-linearblend", "relaxed-stateblend", or + "systematic". + resampling_temperature: A positive temperature only used for relaxed + resampling. + aux: If true, compute the FIVO-AUX bound. + Returns: + log_p_hat: The IWAE estimator of the lower bound on the log marginal. + loss: A tensor that you can perform gradient descent on to optimize the + bound. + maintain_ema_op: An op to update the baseline ema used for the resampling + gradients. + states: The sequence of states sampled. + """ + # Initialization + num_instances = tf.cast(tf.shape(observation)[0], tf.int32) + batch_size = tf.cast(num_instances / num_samples, tf.int32) + states = [model.zero_state(num_instances)] + prev_state = states[0] + log_weight_acc = tf.zeros(shape=[num_samples, batch_size], dtype=observation.dtype) + prev_log_r_zt = tf.zeros([num_instances], dtype=observation.dtype) + log_weights = [] + log_weights_all = [] + log_p_hats = [] + resampling_log_probs = [] + for t in xrange(num_timesteps): + # run the model for one timestep + (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_zt) = model( + prev_state, observation, t) + # update accumulators + states.append(zt) + log_weight = log_p_zt + log_p_x_given_z - log_q_zt + if aux: + if t == num_timesteps - 1: + log_weight -= prev_log_r_zt + else: + log_weight += log_r_zt - prev_log_r_zt + prev_log_r_zt = log_r_zt + log_weight_acc += tf.reshape(log_weight, [num_samples, batch_size]) + log_weights_all.append(log_weight_acc) + if resampling_schedule[t]: + + # These objects will be resampled + to_resample = [states[-1]] + if aux and "relaxed" not in resampling_type: + to_resample.append(prev_log_r_zt) + + # do the resampling + if resampling_type == "multinomial": + (resampled, + resampling_log_prob, + _, _, _) = multinomial_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size) + elif resampling_type == "stratified": + (resampled, + resampling_log_prob, + _, _, _) = stratified_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size) + elif resampling_type == "systematic": + (resampled, + resampling_log_prob, + _, _, _) = systematic_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size) + elif "relaxed" in resampling_type: + if aux: + if resampling_type == "relaxed-logblend": + (resampled, + resampling_log_prob, + prev_log_r_zt, + _, _, _) = relaxed_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size, + temperature=resampling_temperature, + log_r_x=prev_log_r_zt, + blend_type="log") + elif resampling_type == "relaxed-linearblend": + (resampled, + resampling_log_prob, + prev_log_r_zt, + _, _, _) = relaxed_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size, + temperature=resampling_temperature, + log_r_x=prev_log_r_zt, + blend_type="linear") + elif resampling_type == "relaxed-stateblend": + (resampled, + resampling_log_prob, + _, _, _, _) = relaxed_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size, + temperature=resampling_temperature) + # Calculate prev_log_r_zt from the post-resampling state + prev_r_zt = model.r.r_xn(resampled[0], t) + prev_log_r_zt = tf.reduce_sum( + prev_r_zt.log_prob(observation), axis=[1]) + elif resampling_type == "relaxed-stateblend-st": + (resampled, + resampling_log_prob, + _, _, _, _) = relaxed_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size, + temperature=resampling_temperature, + straight_through=True) + # Calculate prev_log_r_zt from the post-resampling state + prev_r_zt = model.r.r_xn(resampled[0], t) + prev_log_r_zt = tf.reduce_sum( + prev_r_zt.log_prob(observation), axis=[1]) + else: + (resampled, + resampling_log_prob, + _, _, _, _) = relaxed_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size, + temperature=resampling_temperature) + #if summarize: + # resampling_entropy = resampling_dist.entropy() + # resampling_entropy = tf.reduce_mean(resampling_entropy) + # tf.summary.scalar("weight_entropy/%d" % t, resampling_entropy) + + resampling_log_probs.append(tf.reduce_sum(resampling_log_prob, axis=0)) + prev_state = resampled[0] + if aux and "relaxed" not in resampling_type: + # Squeeze out the extra dim potentially added by resampling. + # prev_log_r_zt should always be [num_instances] + prev_log_r_zt = tf.squeeze(resampled[1]) + # Update the log p hat estimate, taking a log sum exp over the sample + # dimension. The appended tensor is [batch_size]. + log_p_hats.append( + tf.reduce_logsumexp(log_weight_acc, axis=0) - tf.log( + tf.cast(num_samples, dtype=observation.dtype))) + # reset the weights + log_weights.append(log_weight_acc) + log_weight_acc = tf.zeros_like(log_weight_acc) + else: + prev_state = states[-1] + # Compute the final weight update. If we just resampled this will be zero. + final_update = (tf.reduce_logsumexp(log_weight_acc, axis=0) - + tf.log(tf.cast(num_samples, dtype=observation.dtype))) + # If we ever resampled, then sum up the previous log p hat terms + if len(log_p_hats) > 0: + log_p_hat = tf.reduce_sum(log_p_hats, axis=0) + final_update + else: # otherwise, log_p_hat only comes from the final update + log_p_hat = final_update + + if use_resampling_grads and any(resampling_schedule): + # compute the rewards + # cumsum([a, b, c]) => [a, a+b, a+b+c] + # learning signal at timestep t is + # [sum from i=t+1 to T of log_p_hat_i for t=1:T] + # so we will compute (sum from i=1 to T of log_p_hat_i) + # and at timestep t will subtract off (sum from i=1 to t of log_p_hat_i) + # rewards is a [num_resampling_events, batch_size] Tensor + rewards = tf.stop_gradient( + tf.expand_dims(log_p_hat, 0) - tf.cumsum(log_p_hats, axis=0)) + batch_avg_rewards = tf.reduce_mean(rewards, axis=1) + # compute ema baseline. + # centered_rewards is [num_resampling_events, batch_size] + baseline_ema = tf.train.ExponentialMovingAverage(decay=0.94) + maintain_baseline_op = baseline_ema.apply([batch_avg_rewards]) + baseline = tf.expand_dims(baseline_ema.average(batch_avg_rewards), 1) + centered_rewards = rewards - baseline + if summarize: + summ.summarize_learning_signal(rewards, "rewards") + summ.summarize_learning_signal(centered_rewards, "centered_rewards") + # compute the loss tensor. + resampling_grads = tf.reduce_sum( + tf.stop_gradient(centered_rewards) * resampling_log_probs, axis=0) + losses = [Loss("log_p_hat", -tf.reduce_mean(log_p_hat)/num_timesteps), + Loss("resampling_grads", -tf.reduce_mean(resampling_grads)/num_timesteps)] + else: + losses = [Loss("log_p_hat", -tf.reduce_mean(log_p_hat)/num_timesteps)] + maintain_baseline_op = tf.no_op() + + log_p_hat /= num_timesteps + # we clip off the initial state before returning. + return log_p_hat, losses, maintain_baseline_op, states[1:], log_weights_all + + +def fivo_aux_td( + model, + observation, + num_timesteps, + resampling_schedule, + num_samples=1, + summarize=False): + """Compute the FIVO_AUX evidence lower bound.""" + # Initialization + num_instances = tf.cast(tf.shape(observation)[0], tf.int32) + batch_size = tf.cast(num_instances / num_samples, tf.int32) + states = [model.zero_state(num_instances)] + prev_state = states[0] + log_weight_acc = tf.zeros(shape=[num_samples, batch_size], dtype=observation.dtype) + prev_log_r = tf.zeros([num_instances], dtype=observation.dtype) + # must be pre-resampling + log_rs = [] + # must be post-resampling + r_tilde_params = [model.r_tilde.r_zt(states[0], observation, 0)] + log_r_tildes = [] + log_p_xs = [] + # contains the weight at each timestep before resampling only on resampling timesteps + log_weights = [] + # contains weight at each timestep before resampling + log_weights_all = [] + log_p_hats = [] + for t in xrange(num_timesteps): + # run the model for one timestep + # zt is state, [num_instances, state_dim] + # log_q_zt, log_p_x_given_z is [num_instances] + # r_tilde_mu, r_tilde_sigma is [num_instances, state_dim] + # p_ztplus1 is a normal distribution on [num_instances, state_dim] + (zt, log_q_zt, log_p_zt, log_p_x_given_z, + r_tilde_mu, r_tilde_sigma_sq, p_ztplus1) = model(prev_state, observation, t) + + # Compute the log weight without log r. + log_weight = log_p_zt + log_p_x_given_z - log_q_zt + + # Compute log r. + if t == num_timesteps - 1: + log_r = tf.zeros_like(prev_log_r) + else: + p_mu = p_ztplus1.mean() + p_sigma_sq = p_ztplus1.variance() + log_r = (tf.log(r_tilde_sigma_sq) - + tf.log(r_tilde_sigma_sq + p_sigma_sq) - + tf.square(r_tilde_mu - p_mu)/(r_tilde_sigma_sq + p_sigma_sq)) + log_r = 0.5*tf.reduce_sum(log_r, axis=-1) + + #log_weight += tf.stop_gradient(log_r - prev_log_r) + log_weight += log_r - prev_log_r + log_weight_acc += tf.reshape(log_weight, [num_samples, batch_size]) + + # Update accumulators + states.append(zt) + log_weights_all.append(log_weight_acc) + log_p_xs.append(log_p_x_given_z) + log_rs.append(log_r) + + # Compute log_r_tilde as [num_instances] Tensor. + prev_r_tilde_mu, prev_r_tilde_sigma_sq = r_tilde_params[-1] + prev_log_r_tilde = -0.5*tf.reduce_sum( + tf.square(zt - prev_r_tilde_mu)/prev_r_tilde_sigma_sq, axis=-1) + #tf.square(tf.stop_gradient(zt) - r_tilde_mu)/r_tilde_sigma_sq, axis=-1) + #tf.square(zt - r_tilde_mu)/r_tilde_sigma_sq, axis=-1) + log_r_tildes.append(prev_log_r_tilde) + + # optionally resample + if resampling_schedule[t]: + # These objects will be resampled + if t < num_timesteps - 1: + to_resample = [zt, log_r, r_tilde_mu, r_tilde_sigma_sq] + else: + to_resample = [zt, log_r] + (resampled, + _, _, _, _) = multinomial_resampling(log_weight_acc, + to_resample, + num_samples, + batch_size) + prev_state = resampled[0] + # Squeeze out the extra dim potentially added by resampling. + # prev_log_r_zt and log_r_tilde should always be [num_instances] + prev_log_r = tf.squeeze(resampled[1]) + if t < num_timesteps -1: + r_tilde_params.append((resampled[2], resampled[3])) + # Update the log p hat estimate, taking a log sum exp over the sample + # dimension. The appended tensor is [batch_size]. + log_p_hats.append( + tf.reduce_logsumexp(log_weight_acc, axis=0) - tf.log( + tf.cast(num_samples, dtype=observation.dtype))) + # reset the weights + log_weights.append(log_weight_acc) + log_weight_acc = tf.zeros_like(log_weight_acc) + else: + prev_state = zt + prev_log_r = log_r + if t < num_timesteps - 1: + r_tilde_params.append((r_tilde_mu, r_tilde_sigma_sq)) + + # Compute the final weight update. If we just resampled this will be zero. + final_update = (tf.reduce_logsumexp(log_weight_acc, axis=0) - + tf.log(tf.cast(num_samples, dtype=observation.dtype))) + # If we ever resampled, then sum up the previous log p hat terms + if len(log_p_hats) > 0: + log_p_hat = tf.reduce_sum(log_p_hats, axis=0) + final_update + else: # otherwise, log_p_hat only comes from the final update + log_p_hat = final_update + + # Compute the bellman loss. + # Will remove the first timestep as it is not used. + # log p(x_t|z_t) is in row t-1. + log_p_x = tf.reshape(tf.stack(log_p_xs), + [num_timesteps, num_samples, batch_size]) + # log r_t is contained in row t-1. + # last column is zeros (because at timestep T (num_timesteps) r is 1. + log_r = tf.reshape(tf.stack(log_rs), + [num_timesteps, num_samples, batch_size]) + # [num_timesteps, num_instances]. log r_tilde_t is in row t-1. + log_r_tilde = tf.reshape(tf.stack(log_r_tildes), + [num_timesteps, num_samples, batch_size]) + log_lambda = tf.reduce_mean(log_r_tilde - log_p_x - log_r, axis=1, + keepdims=True) + bellman_sos = tf.reduce_mean(tf.square( + log_r_tilde - tf.stop_gradient(log_lambda + log_p_x + log_r)), axis=[0, 1]) + bellman_loss = tf.reduce_mean(bellman_sos)/num_timesteps + tf.summary.scalar("bellman_loss", bellman_loss) + + if len(tf.get_collection("LOG_P_HAT_VARS")) == 0: + log_p_hat_collection = list(set(tf.trainable_variables()) - + set(tf.get_collection("R_TILDE_VARS"))) + for v in log_p_hat_collection: + tf.add_to_collection("LOG_P_HAT_VARS", v) + + log_p_hat /= num_timesteps + losses = [Loss("log_p_hat", -tf.reduce_mean(log_p_hat), "LOG_P_HAT_VARS"), + Loss("bellman_loss", bellman_loss, "R_TILDE_VARS")] + + return log_p_hat, losses, tf.no_op(), states[1:], log_weights_all diff --git a/models/research/fivo/experimental/data.py b/models/research/fivo/experimental/data.py new file mode 100644 index 0000000000000000000000000000000000000000..0842f212991e1651a12cca239c5b8380fea9d0f8 --- /dev/null +++ b/models/research/fivo/experimental/data.py @@ -0,0 +1,192 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +import models + + +def make_long_chain_dataset( + state_size=1, + num_obs=5, + steps_per_obs=3, + variance=1., + observation_variance=1., + batch_size=4, + num_samples=1, + observation_type=models.STANDARD_OBSERVATION, + transition_type=models.STANDARD_TRANSITION, + fixed_observation=None, + dtype="float32"): + """Creates a long chain data generating process. + + Creates a tf.data.Dataset that provides batches of data from a long + chain. + + Args: + state_size: The dimension of the state space of the process. + num_obs: The number of observations in the chain. + steps_per_obs: The number of steps between each observation. + variance: The variance of the normal distributions used at each timestep. + batch_size: The number of trajectories to include in each batch. + num_samples: The number of replicas of each trajectory to include in each + batch. + dtype: The datatype of the states and observations. + Returns: + dataset: A tf.data.Dataset that can be iterated over. + """ + num_timesteps = num_obs * steps_per_obs + def data_generator(): + """An infinite generator of latents and observations from the model.""" + while True: + states = [] + observations = [] + # z0 ~ Normal(0, sqrt(variance)). + states.append( + np.random.normal(size=[state_size], + scale=np.sqrt(variance)).astype(dtype)) + # start at 1 because we've already generated z0 + # go to num_timesteps+1 because we want to include the num_timesteps-th step + for t in xrange(1, num_timesteps+1): + if transition_type == models.ROUND_TRANSITION: + loc = np.round(states[-1]) + elif transition_type == models.STANDARD_TRANSITION: + loc = states[-1] + new_state = np.random.normal(size=[state_size], + loc=loc, + scale=np.sqrt(variance)) + states.append(new_state.astype(dtype)) + if t % steps_per_obs == 0: + if fixed_observation is None: + if observation_type == models.SQUARED_OBSERVATION: + loc = np.square(states[-1]) + elif observation_type == models.ABS_OBSERVATION: + loc = np.abs(states[-1]) + elif observation_type == models.STANDARD_OBSERVATION: + loc = states[-1] + new_obs = np.random.normal(size=[state_size], + loc=loc, + scale=np.sqrt(observation_variance)).astype(dtype) + else: + new_obs = np.ones([state_size])* fixed_observation + + observations.append(new_obs) + yield states, observations + + dataset = tf.data.Dataset.from_generator( + data_generator, + output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)), + output_shapes=([num_timesteps+1, state_size], [num_obs, state_size])) + dataset = dataset.repeat().batch(batch_size) + + def tile_batch(state, observation): + state = tf.tile(state, [num_samples, 1, 1]) + observation = tf.tile(observation, [num_samples, 1, 1]) + return state, observation + + dataset = dataset.map(tile_batch, num_parallel_calls=12).prefetch(1024) + return dataset + + +def make_dataset(bs=None, + state_size=1, + num_timesteps=10, + variance=1., + prior_type="unimodal", + bimodal_prior_weight=0.5, + bimodal_prior_mean=1, + transition_type=models.STANDARD_TRANSITION, + fixed_observation=None, + batch_size=4, + num_samples=1, + dtype='float32'): + """Creates a data generating process. + + Creates a tf.data.Dataset that provides batches of data. + + Args: + bs: The parameters of the data generating process. If None, new bs are + randomly generated. + state_size: The dimension of the state space of the process. + num_timesteps: The length of the state sequences in the process. + variance: The variance of the normal distributions used at each timestep. + batch_size: The number of trajectories to include in each batch. + num_samples: The number of replicas of each trajectory to include in each + batch. + Returns: + bs: The true bs used to generate the data + dataset: A tf.data.Dataset that can be iterated over. + """ + + if bs is None: + bs = [np.random.uniform(size=[state_size]).astype(dtype) for _ in xrange(num_timesteps)] + tf.logging.info("data generating processs bs: %s", + np.array(bs).reshape(num_timesteps)) + + + def data_generator(): + """An infinite generator of latents and observations from the model.""" + while True: + states = [] + if prior_type == "unimodal" or prior_type == "nonlinear": + # Prior is Normal(0, sqrt(variance)). + states.append(np.random.normal(size=[state_size], scale=np.sqrt(variance)).astype(dtype)) + elif prior_type == "bimodal": + if np.random.uniform() > bimodal_prior_weight: + loc = bimodal_prior_mean + else: + loc = - bimodal_prior_mean + states.append(np.random.normal(size=[state_size], + loc=loc, + scale=np.sqrt(variance) + ).astype(dtype)) + + for t in xrange(num_timesteps): + if transition_type == models.ROUND_TRANSITION: + loc = np.round(states[-1]) + elif transition_type == models.STANDARD_TRANSITION: + loc = states[-1] + loc += bs[t] + new_state = np.random.normal(size=[state_size], + loc=loc, + scale=np.sqrt(variance)).astype(dtype) + states.append(new_state) + + if fixed_observation is None: + observation = states[-1] + else: + observation = np.ones_like(states[-1]) * fixed_observation + yield np.array(states[:-1]), observation + + dataset = tf.data.Dataset.from_generator( + data_generator, + output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)), + output_shapes=([num_timesteps, state_size], [state_size])) + dataset = dataset.repeat().batch(batch_size) + + def tile_batch(state, observation): + state = tf.tile(state, [num_samples, 1, 1]) + observation = tf.tile(observation, [num_samples, 1]) + return state, observation + + dataset = dataset.map(tile_batch, num_parallel_calls=12).prefetch(1024) + return np.array(bs), dataset diff --git a/models/research/fivo/experimental/models.py b/models/research/fivo/experimental/models.py new file mode 100644 index 0000000000000000000000000000000000000000..62801ca1ee145e64c80b66e0c83dd7d834ac0847 --- /dev/null +++ b/models/research/fivo/experimental/models.py @@ -0,0 +1,1227 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import sonnet as snt +import tensorflow as tf +import numpy as np +import math + +SQUARED_OBSERVATION = "squared" +ABS_OBSERVATION = "abs" +STANDARD_OBSERVATION = "standard" +OBSERVATION_TYPES = [SQUARED_OBSERVATION, ABS_OBSERVATION, STANDARD_OBSERVATION] + +ROUND_TRANSITION = "round" +STANDARD_TRANSITION = "standard" +TRANSITION_TYPES = [ROUND_TRANSITION, STANDARD_TRANSITION] + + +class Q(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + dtype=tf.float32, + random_seed=None, + init_mu0_to_zero=False, + graph_collection_name="Q_VARS"): + self.sigma_min = sigma_min + self.dtype = dtype + self.graph_collection_name = graph_collection_name + initializers = [] + for t in xrange(num_timesteps): + if t == 0 and init_mu0_to_zero: + initializers.append( + {"w": tf.zeros_initializer, "b": tf.zeros_initializer}) + else: + initializers.append( + {"w": tf.random_uniform_initializer(seed=random_seed), + "b": tf.zeros_initializer}) + + def custom_getter(getter, *args, **kwargs): + out = getter(*args, **kwargs) + ref = tf.get_collection_ref(self.graph_collection_name) + if out not in ref: + ref.append(out) + return out + + self.mus = [ + snt.Linear(output_size=state_size, + initializers=initializers[t], + name="q_mu_%d" % t, + custom_getter=custom_getter + ) + for t in xrange(num_timesteps) + ] + self.sigmas = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="q_sigma_%d" % (t + 1), + collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], + initializer=tf.random_uniform_initializer(seed=random_seed)) + for t in xrange(num_timesteps) + ] + + def q_zt(self, observation, prev_state, t): + batch_size = tf.shape(prev_state)[0] + q_mu = self.mus[t](tf.concat([observation, prev_state], axis=1)) + q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) + q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) + return q_zt + + def summarize_weights(self): + for t, sigma in enumerate(self.sigmas): + tf.summary.scalar("q_sigma/%d" % t, sigma[0]) + for t, f in enumerate(self.mus): + tf.summary.scalar("q_mu/b_%d" % t, f.b[0]) + tf.summary.scalar("q_mu/w_obs_%d" % t, f.w[0,0]) + if t != 0: + tf.summary.scalar("q_mu/w_prev_state_%d" % t, f.w[1,0]) + + +class PreviousStateQ(Q): + + def q_zt(self, unused_observation, prev_state, t): + batch_size = tf.shape(prev_state)[0] + q_mu = self.mus[t](prev_state) + q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) + q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) + return q_zt + + def summarize_weights(self): + for t, sigma in enumerate(self.sigmas): + tf.summary.scalar("q_sigma/%d" % t, sigma[0]) + for t, f in enumerate(self.mus): + tf.summary.scalar("q_mu/b_%d" % t, f.b[0]) + tf.summary.scalar("q_mu/w_prev_state_%d" % t, f.w[0,0]) + + +class ObservationQ(Q): + + def q_zt(self, observation, prev_state, t): + batch_size = tf.shape(prev_state)[0] + q_mu = self.mus[t](observation) + q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) + q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) + return q_zt + + def summarize_weights(self): + for t, sigma in enumerate(self.sigmas): + tf.summary.scalar("q_sigma/%d" % t, sigma[0]) + for t, f in enumerate(self.mus): + tf.summary.scalar("q_mu/b_%d" % t, f.b[0]) + tf.summary.scalar("q_mu/w_obs_%d" % t, f.w[0,0]) + + +class SimpleMeanQ(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + dtype=tf.float32, + random_seed=None, + init_mu0_to_zero=False, + graph_collection_name="Q_VARS"): + self.sigma_min = sigma_min + self.dtype = dtype + self.graph_collection_name = graph_collection_name + initializers = [] + for t in xrange(num_timesteps): + if t == 0 and init_mu0_to_zero: + initializers.append(tf.zeros_initializer) + else: + initializers.append(tf.random_uniform_initializer(seed=random_seed)) + + self.mus = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="q_mu_%d" % (t + 1), + collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], + initializer=initializers[t]) + for t in xrange(num_timesteps) + ] + self.sigmas = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="q_sigma_%d" % (t + 1), + collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], + initializer=tf.random_uniform_initializer(seed=random_seed)) + for t in xrange(num_timesteps) + ] + + def q_zt(self, unused_observation, prev_state, t): + batch_size = tf.shape(prev_state)[0] + q_mu = tf.tile(self.mus[t][tf.newaxis, :], [batch_size, 1]) + q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) + q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) + return q_zt + + def summarize_weights(self): + for t, sigma in enumerate(self.sigmas): + tf.summary.scalar("q_sigma/%d" % t, sigma[0]) + for t, f in enumerate(self.mus): + tf.summary.scalar("q_mu/%d" % t, f[0]) + + +class R(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + dtype=tf.float32, + sigma_init=1., + random_seed=None, + graph_collection_name="R_VARS"): + self.dtype = dtype + self.sigma_min = sigma_min + initializers = {"w": tf.truncated_normal_initializer(seed=random_seed), + "b": tf.zeros_initializer} + self.graph_collection_name=graph_collection_name + + def custom_getter(getter, *args, **kwargs): + out = getter(*args, **kwargs) + ref = tf.get_collection_ref(self.graph_collection_name) + if out not in ref: + ref.append(out) + return out + + self.mus= [ + snt.Linear(output_size=state_size, + initializers=initializers, + name="r_mu_%d" % t, + custom_getter=custom_getter) + for t in xrange(num_timesteps) + ] + + self.sigmas = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="r_sigma_%d" % (t + 1), + collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], + #initializer=tf.random_uniform_initializer(seed=random_seed, maxval=100)) + initializer=tf.constant_initializer(sigma_init)) + for t in xrange(num_timesteps) + ] + + def r_xn(self, z_t, t): + batch_size = tf.shape(z_t)[0] + r_mu = self.mus[t](z_t) + r_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + r_sigma = tf.tile(r_sigma[tf.newaxis, :], [batch_size, 1]) + return tf.contrib.distributions.Normal( + loc=r_mu, scale=tf.sqrt(r_sigma)) + + def summarize_weights(self): + for t in range(len(self.mus) - 1): + tf.summary.scalar("r_mu/%d" % t, self.mus[t][0]) + tf.summary.scalar("r_sigma/%d" % t, self.sigmas[t][0]) + + +class P(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + variance=1.0, + dtype=tf.float32, + random_seed=None, + trainable=True, + init_bs_to_zero=False, + graph_collection_name="P_VARS"): + self.state_size = state_size + self.num_timesteps = num_timesteps + self.sigma_min = sigma_min + self.dtype = dtype + self.variance = variance + self.graph_collection_name = graph_collection_name + if init_bs_to_zero: + initializers = [tf.zeros_initializer for _ in xrange(num_timesteps)] + else: + initializers = [tf.random_uniform_initializer(seed=random_seed) for _ in xrange(num_timesteps)] + + self.bs = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="p_b_%d" % (t + 1), + initializer=initializers[t], + collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], + trainable=trainable) for t in xrange(num_timesteps) + ] + self.Bs = tf.cumsum(self.bs, reverse=True, axis=0) + + def posterior(self, observation, prev_state, t): + """Computes the true posterior p(z_t|z_{t-1}, z_n).""" + # bs[0] is really b_1 + # Bs[i] is sum from k=i+1^n b_k + mu = observation - self.Bs[t] + if t > 0: + mu += (prev_state + self.bs[t - 1]) * float(self.num_timesteps - t) + mu /= float(self.num_timesteps - t + 1) + sigma = tf.ones_like(mu) * self.variance * ( + float(self.num_timesteps - t) / float(self.num_timesteps - t + 1)) + return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) + + def lookahead(self, state, t): + """Computes the true lookahead distribution p(z_n|z_t).""" + mu = state + self.Bs[t] + sigma = tf.ones_like(state) * self.variance * float(self.num_timesteps - t) + return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) + + def likelihood(self, observation): + batch_size = tf.shape(observation)[0] + mu = tf.tile(tf.reduce_sum(self.bs, axis=0)[tf.newaxis, :], [batch_size, 1]) + sigma = tf.ones_like(mu) * self.variance * (self.num_timesteps + 1) + dist = tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) + # Average over the batch and take the sum over the state size + return tf.reduce_mean(tf.reduce_sum(dist.log_prob(observation), axis=1)) + + def p_zt(self, prev_state, t): + """Computes the model p(z_t| z_{t-1}).""" + batch_size = tf.shape(prev_state)[0] + if t > 0: + z_mu_p = prev_state + self.bs[t - 1] + else: # p(z_0) is Normal(0,1) + z_mu_p = tf.zeros([batch_size, self.state_size], dtype=self.dtype) + p_zt = tf.contrib.distributions.Normal( + loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance)) + return p_zt + + def generative(self, unused_observation, z_nm1): + """Computes the model's generative distribution p(z_n| z_{n-1}).""" + generative_p_mu = z_nm1 + self.bs[-1] + return tf.contrib.distributions.Normal( + loc=generative_p_mu, scale=tf.sqrt(tf.ones_like(generative_p_mu) * self.variance)) + + +class ShortChainNonlinearP(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + variance=1.0, + observation_variance=1.0, + transition_type=STANDARD_TRANSITION, + transition_dist=tf.contrib.distributions.Normal, + dtype=tf.float32, + random_seed=None): + self.state_size = state_size + self.num_timesteps = num_timesteps + self.sigma_min = sigma_min + self.dtype = dtype + self.variance = variance + self.observation_variance = observation_variance + self.transition_type = transition_type + self.transition_dist = transition_dist + + def p_zt(self, prev_state, t): + """Computes the model p(z_t| z_{t-1}).""" + batch_size = tf.shape(prev_state)[0] + if t > 0: + if self.transition_type == ROUND_TRANSITION: + loc = tf.round(prev_state) + tf.logging.info("p(z_%d | z_%d) ~ N(round(z_%d), %0.1f)" % (t, t-1, t-1, self.variance)) + elif self.transition_type == STANDARD_TRANSITION: + loc = prev_state + tf.logging.info("p(z_%d | z_%d) ~ N(z_%d, %0.1f)" % (t, t-1, t-1, self.variance)) + else: # p(z_0) is Normal(0,1) + loc = tf.zeros([batch_size, self.state_size], dtype=self.dtype) + tf.logging.info("p(z_0) ~ N(0,%0.1f)" % self.variance) + + p_zt = self.transition_dist( + loc=loc, + scale=tf.sqrt(tf.ones_like(loc) * self.variance)) + return p_zt + + def generative(self, unused_obs, z_ni): + """Computes the model's generative distribution p(x_i| z_{ni}).""" + if self.transition_type == ROUND_TRANSITION: + loc = tf.round(z_ni) + elif self.transition_type == STANDARD_TRANSITION: + loc = z_ni + generative_sigma_sq = tf.ones_like(loc) * self.observation_variance + return self.transition_dist( + loc=loc, scale=tf.sqrt(generative_sigma_sq)) + + +class BimodalPriorP(object): + + def __init__(self, + state_size, + num_timesteps, + mixing_coeff=0.5, + prior_mode_mean=1, + sigma_min=1e-5, + variance=1.0, + dtype=tf.float32, + random_seed=None, + trainable=True, + init_bs_to_zero=False, + graph_collection_name="P_VARS"): + self.state_size = state_size + self.num_timesteps = num_timesteps + self.sigma_min = sigma_min + self.dtype = dtype + self.variance = variance + self.mixing_coeff = mixing_coeff + self.prior_mode_mean = prior_mode_mean + + if init_bs_to_zero: + initializers = [tf.zeros_initializer for _ in xrange(num_timesteps)] + else: + initializers = [tf.random_uniform_initializer(seed=random_seed) for _ in xrange(num_timesteps)] + + self.bs = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="b_%d" % (t + 1), + initializer=initializers[t], + collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], + trainable=trainable) for t in xrange(num_timesteps) + ] + self.Bs = tf.cumsum(self.bs, reverse=True, axis=0) + + def posterior(self, observation, prev_state, t): + # NOTE: This is currently wrong, but would require a refactoring of + # summarize_q to fix as kl is not defined for a mixture + """Computes the true posterior p(z_t|z_{t-1}, z_n).""" + # bs[0] is really b_1 + # Bs[i] is sum from k=i+1^n b_k + mu = observation - self.Bs[t] + if t > 0: + mu += (prev_state + self.bs[t - 1]) * float(self.num_timesteps - t) + mu /= float(self.num_timesteps - t + 1) + sigma = tf.ones_like(mu) * self.variance * ( + float(self.num_timesteps - t) / float(self.num_timesteps - t + 1)) + return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) + + def lookahead(self, state, t): + """Computes the true lookahead distribution p(z_n|z_t).""" + mu = state + self.Bs[t] + sigma = tf.ones_like(state) * self.variance * float(self.num_timesteps - t) + return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) + + def likelihood(self, observation): + batch_size = tf.shape(observation)[0] + sum_of_bs = tf.tile(tf.reduce_sum(self.bs, axis=0)[tf.newaxis, :], [batch_size, 1]) + sigma = tf.ones_like(sum_of_bs) * self.variance * (self.num_timesteps + 1) + mu_pos = (tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean) + sum_of_bs + mu_neg = (tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean) + sum_of_bs + zn_pos = tf.contrib.distributions.Normal( + loc=mu_pos, + scale=tf.sqrt(sigma)) + zn_neg = tf.contrib.distributions.Normal( + loc=mu_neg, + scale=tf.sqrt(sigma)) + mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64) + mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1]) + mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs) + zn_dist = tf.contrib.distributions.Mixture( + cat=mode_selection_dist, + components=[zn_pos, zn_neg], + validate_args=True) + # Average over the batch and take the sum over the state size + return tf.reduce_mean(tf.reduce_sum(zn_dist.log_prob(observation), axis=1)) + + def p_zt(self, prev_state, t): + """Computes the model p(z_t| z_{t-1}).""" + batch_size = tf.shape(prev_state)[0] + if t > 0: + z_mu_p = prev_state + self.bs[t - 1] + p_zt = tf.contrib.distributions.Normal( + loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance)) + return p_zt + else: # p(z_0) is mixture of two Normals + mu_pos = tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean + mu_neg = tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean + z0_pos = tf.contrib.distributions.Normal( + loc=mu_pos, + scale=tf.sqrt(tf.ones_like(mu_pos) * self.variance)) + z0_neg = tf.contrib.distributions.Normal( + loc=mu_neg, + scale=tf.sqrt(tf.ones_like(mu_neg) * self.variance)) + mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64) + mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1]) + mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs) + z0_dist = tf.contrib.distributions.Mixture( + cat=mode_selection_dist, + components=[z0_pos, z0_neg], + validate_args=False) + return z0_dist + + def generative(self, unused_observation, z_nm1): + """Computes the model's generative distribution p(z_n| z_{n-1}).""" + generative_p_mu = z_nm1 + self.bs[-1] + return tf.contrib.distributions.Normal( + loc=generative_p_mu, scale=tf.sqrt(tf.ones_like(generative_p_mu) * self.variance)) + +class Model(object): + + def __init__(self, + p, + q, + r, + state_size, + num_timesteps, + dtype=tf.float32): + self.p = p + self.q = q + self.r = r + self.state_size = state_size + self.num_timesteps = num_timesteps + self.dtype = dtype + + def zero_state(self, batch_size): + return tf.zeros([batch_size, self.state_size], dtype=self.dtype) + + def __call__(self, prev_state, observation, t): + # Compute the q distribution over z, q(z_t|z_n, z_{t-1}). + q_zt = self.q.q_zt(observation, prev_state, t) + # Compute the p distribution over z, p(z_t|z_{t-1}). + p_zt = self.p.p_zt(prev_state, t) + # sample from q + zt = q_zt.sample() + r_xn = self.r.r_xn(zt, t) + # Calculate the logprobs and sum over the state size. + log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1) + log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1) + log_r_xn = tf.reduce_sum(r_xn.log_prob(observation), axis=1) + # If we're at the last timestep, also calc the logprob of the observation. + if t == self.num_timesteps - 1: + generative_dist = self.p.generative(observation, zt) + log_p_x_given_z = tf.reduce_sum(generative_dist.log_prob(observation), axis=1) + else: + log_p_x_given_z = tf.zeros_like(log_q_zt) + return (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_xn) + + @staticmethod + def create(state_size, + num_timesteps, + sigma_min=1e-5, + r_sigma_init=1, + variance=1.0, + mixing_coeff=0.5, + prior_mode_mean=1.0, + dtype=tf.float32, + random_seed=None, + train_p=True, + p_type="unimodal", + q_type="normal", + observation_variance=1.0, + transition_type=STANDARD_TRANSITION, + use_bs=True): + if p_type == "unimodal": + p = P(state_size, + num_timesteps, + sigma_min=sigma_min, + variance=variance, + dtype=dtype, + random_seed=random_seed, + trainable=train_p, + init_bs_to_zero=not use_bs) + elif p_type == "bimodal": + p = BimodalPriorP( + state_size, + num_timesteps, + mixing_coeff=mixing_coeff, + prior_mode_mean=prior_mode_mean, + sigma_min=sigma_min, + variance=variance, + dtype=dtype, + random_seed=random_seed, + trainable=train_p, + init_bs_to_zero=not use_bs) + elif "nonlinear" in p_type: + if "cauchy" in p_type: + trans_dist = tf.contrib.distributions.Cauchy + else: + trans_dist = tf.contrib.distributions.Normal + p = ShortChainNonlinearP( + state_size, + num_timesteps, + sigma_min=sigma_min, + variance=variance, + observation_variance=observation_variance, + transition_type=transition_type, + transition_dist=trans_dist, + dtype=dtype, + random_seed=random_seed + ) + + if q_type == "normal": + q_class = Q + elif q_type == "simple_mean": + q_class = SimpleMeanQ + elif q_type == "prev_state": + q_class = PreviousStateQ + elif q_type == "observation": + q_class = ObservationQ + + q = q_class(state_size, + num_timesteps, + sigma_min=sigma_min, + dtype=dtype, + random_seed=random_seed, + init_mu0_to_zero=not use_bs) + r = R(state_size, + num_timesteps, + sigma_min=sigma_min, + sigma_init=r_sigma_init, + dtype=dtype, + random_seed=random_seed) + model = Model(p, q, r, state_size, num_timesteps, dtype=dtype) + return model + + +class BackwardsModel(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + dtype=tf.float32): + self.state_size = state_size + self.num_timesteps = num_timesteps + self.sigma_min = sigma_min + self.dtype = dtype + self.bs = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="b_%d" % (t + 1), + initializer=tf.zeros_initializer) for t in xrange(num_timesteps) + ] + self.Bs = tf.cumsum(self.bs, reverse=True, axis=0) + self.q_mus = [ + snt.Linear(output_size=state_size) for _ in xrange(num_timesteps) + ] + self.q_sigmas = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="q_sigma_%d" % (t + 1), + initializer=tf.zeros_initializer) for t in xrange(num_timesteps) + ] + self.r_mus = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="r_mu_%d" % (t + 1), + initializer=tf.zeros_initializer) for t in xrange(num_timesteps) + ] + self.r_sigmas = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="r_sigma_%d" % (t + 1), + initializer=tf.zeros_initializer) for t in xrange(num_timesteps) + ] + + def zero_state(self, batch_size): + return tf.zeros([batch_size, self.state_size], dtype=self.dtype) + + def posterior(self, unused_observation, prev_state, unused_t): + # TODO(dieterichl): Correct this. + return tf.contrib.distributions.Normal( + loc=tf.zeros_like(prev_state), scale=tf.zeros_like(prev_state)) + + def lookahead(self, state, unused_t): + # TODO(dieterichl): Correct this. + return tf.contrib.distributions.Normal( + loc=tf.zeros_like(state), scale=tf.zeros_like(state)) + + def q_zt(self, observation, next_state, t): + """Computes the variational posterior q(z_{t}|z_{t+1}, z_n).""" + t_backwards = self.num_timesteps - t - 1 + batch_size = tf.shape(next_state)[0] + q_mu = self.q_mus[t_backwards](tf.concat([observation, next_state], axis=1)) + q_sigma = tf.maximum( + tf.nn.softplus(self.q_sigmas[t_backwards]), self.sigma_min) + q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) + q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) + return q_zt + + def p_zt(self, prev_state, t): + """Computes the model p(z_{t+1}| z_{t}).""" + t_backwards = self.num_timesteps - t - 1 + z_mu_p = prev_state + self.bs[t_backwards] + p_zt = tf.contrib.distributions.Normal( + loc=z_mu_p, scale=tf.ones_like(z_mu_p)) + return p_zt + + def generative(self, unused_observation, z_nm1): + """Computes the model's generative distribution p(z_n| z_{n-1}).""" + generative_p_mu = z_nm1 + self.bs[-1] + return tf.contrib.distributions.Normal( + loc=generative_p_mu, scale=tf.ones_like(generative_p_mu)) + + def r(self, z_t, t): + t_backwards = self.num_timesteps - t - 1 + batch_size = tf.shape(z_t)[0] + r_mu = tf.tile(self.r_mus[t_backwards][tf.newaxis, :], [batch_size, 1]) + r_sigma = tf.maximum( + tf.nn.softplus(self.r_sigmas[t_backwards]), self.sigma_min) + r_sigma = tf.tile(r_sigma[tf.newaxis, :], [batch_size, 1]) + return tf.contrib.distributions.Normal(loc=r_mu, scale=tf.sqrt(r_sigma)) + + def likelihood(self, observation): + batch_size = tf.shape(observation)[0] + mu = tf.tile(tf.reduce_sum(self.bs, axis=0)[tf.newaxis, :], [batch_size, 1]) + sigma = tf.ones_like(mu) * (self.num_timesteps + 1) + dist = tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) + # Average over the batch and take the sum over the state size + return tf.reduce_mean(tf.reduce_sum(dist.log_prob(observation), axis=1)) + + def __call__(self, next_state, observation, t): + # next state = z_{t+1} + # Compute the q distribution over z, q(z_{t}|z_n, z_{t+1}). + q_zt = self.q_zt(observation, next_state, t) + # sample from q + zt = q_zt.sample() + # Compute the p distribution over z, p(z_{t+1}|z_{t}). + p_zt = self.p_zt(zt, t) + # Compute log p(z_{t+1} | z_t) + if t == 0: + log_p_zt = p_zt.log_prob(observation) + else: + log_p_zt = p_zt.log_prob(next_state) + + # Compute r prior over zt + r_zt = self.r(zt, t) + log_r_zt = r_zt.log_prob(zt) + # Compute proposal density at zt + log_q_zt = q_zt.log_prob(zt) + # If we're at the last timestep, also calc the logprob of the observation. + + if t == self.num_timesteps - 1: + p_z0_dist = tf.contrib.distributions.Normal( + loc=tf.zeros_like(zt), scale=tf.ones_like(zt)) + z0_log_prob = p_z0_dist.log_prob(zt) + else: + z0_log_prob = tf.zeros_like(log_q_zt) + return (zt, log_q_zt, log_p_zt, z0_log_prob, log_r_zt) + + +class LongChainP(object): + + def __init__(self, + state_size, + num_obs, + steps_per_obs, + sigma_min=1e-5, + variance=1.0, + observation_variance=1.0, + observation_type=STANDARD_OBSERVATION, + transition_type=STANDARD_TRANSITION, + dtype=tf.float32, + random_seed=None): + self.state_size = state_size + self.steps_per_obs = steps_per_obs + self.num_obs = num_obs + self.num_timesteps = steps_per_obs*num_obs + 1 + self.sigma_min = sigma_min + self.dtype = dtype + self.variance = variance + self.observation_variance = observation_variance + self.observation_type = observation_type + self.transition_type = transition_type + + def likelihood(self, observations): + """Computes the model's true likelihood of the observations. + + Args: + observations: A [batch_size, m, state_size] Tensor representing each of + the m observations. + Returns: + logprob: The true likelihood of the observations given the model. + """ + raise ValueError("Likelihood is not defined for long-chain models") + # batch_size = tf.shape(observations)[0] + # mu = tf.zeros([batch_size, self.state_size, self.num_obs], dtype=self.dtype) + # sigma = np.fromfunction( + # lambda i, j: 1 + self.steps_per_obs*np.minimum(i+1, j+1), + # [self.num_obs, self.num_obs]) + # sigma += np.eye(self.num_obs) + # sigma = tf.convert_to_tensor(sigma * self.variance, dtype=self.dtype) + # sigma = tf.tile(sigma[tf.newaxis, tf.newaxis, ...], + # [batch_size, self.state_size, 1, 1]) + # dist = tf.contrib.distributions.MultivariateNormalFullCovariance( + # loc=mu, + # covariance_matrix=sigma) + # Average over the batch and take the sum over the state size + #return tf.reduce_mean(tf.reduce_sum(dist.log_prob(observations), axis=1)) + + def p_zt(self, prev_state, t): + """Computes the model p(z_t| z_{t-1}).""" + batch_size = tf.shape(prev_state)[0] + if t > 0: + if self.transition_type == ROUND_TRANSITION: + loc = tf.round(prev_state) + tf.logging.info("p(z_%d | z_%d) ~ N(round(z_%d), %0.1f)" % (t, t-1, t-1, self.variance)) + elif self.transition_type == STANDARD_TRANSITION: + loc = prev_state + tf.logging.info("p(z_%d | z_%d) ~ N(z_%d, %0.1f)" % (t, t-1, t-1, self.variance)) + else: # p(z_0) is Normal(0,1) + loc = tf.zeros([batch_size, self.state_size], dtype=self.dtype) + tf.logging.info("p(z_0) ~ N(0,%0.1f)" % self.variance) + + p_zt = tf.contrib.distributions.Normal( + loc=loc, + scale=tf.sqrt(tf.ones_like(loc) * self.variance)) + return p_zt + + def generative(self, z_ni, t): + """Computes the model's generative distribution p(x_i| z_{ni}).""" + if self.observation_type == SQUARED_OBSERVATION: + generative_mu = tf.square(z_ni) + tf.logging.info("p(x_%d | z_%d) ~ N(z_%d^2, %0.1f)" % (t, t, t, self.variance)) + elif self.observation_type == ABS_OBSERVATION: + generative_mu = tf.abs(z_ni) + tf.logging.info("p(x_%d | z_%d) ~ N(|z_%d|, %0.1f)" % (t, t, t, self.variance)) + elif self.observation_type == STANDARD_OBSERVATION: + generative_mu = z_ni + tf.logging.info("p(x_%d | z_%d) ~ N(z_%d, %0.1f)" % (t, t, t, self.variance)) + generative_sigma_sq = tf.ones_like(generative_mu) * self.observation_variance + return tf.contrib.distributions.Normal( + loc=generative_mu, scale=tf.sqrt(generative_sigma_sq)) + + +class LongChainQ(object): + + def __init__(self, + state_size, + num_obs, + steps_per_obs, + sigma_min=1e-5, + dtype=tf.float32, + random_seed=None): + self.state_size = state_size + self.sigma_min = sigma_min + self.dtype = dtype + self.steps_per_obs = steps_per_obs + self.num_obs = num_obs + self.num_timesteps = num_obs*steps_per_obs +1 + + initializers = { + "w": tf.random_uniform_initializer(seed=random_seed), + "b": tf.zeros_initializer + } + self.mus = [ + snt.Linear(output_size=state_size, initializers=initializers) + for t in xrange(self.num_timesteps) + ] + self.sigmas = [ + tf.get_variable( + shape=[state_size], + dtype=self.dtype, + name="q_sigma_%d" % (t + 1), + initializer=tf.random_uniform_initializer(seed=random_seed)) + for t in xrange(self.num_timesteps) + ] + + def first_relevant_obs_index(self, t): + return int(max((t-1)/self.steps_per_obs, 0)) + + def q_zt(self, observations, prev_state, t): + """Computes a distribution over z_t. + + Args: + observations: a [batch_size, num_observations, state_size] Tensor. + prev_state: a [batch_size, state_size] Tensor. + t: The current timestep, an int Tensor. + """ + # filter out unneeded past obs + first_relevant_obs_index = int(math.floor(max(t-1, 0) / self.steps_per_obs)) + num_relevant_observations = self.num_obs - first_relevant_obs_index + observations = observations[:,first_relevant_obs_index:,:] + batch_size = tf.shape(prev_state)[0] + # concatenate the prev state and observations along the second axis (that is + # not the batch or state size axis, and then flatten it to + # [batch_size, (num_relevant_observations + 1) * state_size] to feed it into + # the linear layer. + q_input = tf.concat([observations, prev_state[:,tf.newaxis, :]], axis=1) + q_input = tf.reshape(q_input, + [batch_size, (num_relevant_observations + 1) * self.state_size]) + q_mu = self.mus[t](q_input) + q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) + q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) + tf.logging.info( + "q(z_{t} | z_{tm1}, x_{obsf}:{obst}) ~ N(Linear([z_{tm1},x_{obsf}:{obst}]), sigma_{t})".format( + **{"t": t, + "tm1": t-1, + "obsf": (first_relevant_obs_index+1)*self.steps_per_obs, + "obst":self.steps_per_obs*self.num_obs})) + return q_zt + + def summarize_weights(self): + pass + +class LongChainR(object): + + def __init__(self, + state_size, + num_obs, + steps_per_obs, + sigma_min=1e-5, + dtype=tf.float32, + random_seed=None): + self.state_size = state_size + self.dtype = dtype + self.sigma_min = sigma_min + self.steps_per_obs = steps_per_obs + self.num_obs = num_obs + self.num_timesteps = num_obs*steps_per_obs + 1 + self.sigmas = [ + tf.get_variable( + shape=[self.num_future_obs(t)], + dtype=self.dtype, + name="r_sigma_%d" % (t + 1), + #initializer=tf.random_uniform_initializer(seed=random_seed, maxval=100)) + initializer=tf.constant_initializer(1.0)) + for t in range(self.num_timesteps) + ] + + def first_future_obs_index(self, t): + return int(math.floor(t / self.steps_per_obs)) + + def num_future_obs(self, t): + return int(self.num_obs - self.first_future_obs_index(t)) + + def r_xn(self, z_t, t): + """Computes a distribution over the future observations given current latent + state. + + The indexing in these messages is 1 indexed and inclusive. This is + consistent with the latex documents. + + Args: + z_t: [batch_size, state_size] Tensor + t: Current timestep + """ + tf.logging.info( + "r(x_{start}:{end} | z_{t}) ~ N(z_{t}, sigma_{t})".format( + **{"t": t, + "start": (self.first_future_obs_index(t)+1)*self.steps_per_obs, + "end": self.num_timesteps-1})) + batch_size = tf.shape(z_t)[0] + # the mean for all future observations is the same. + # this tiling results in a [batch_size, num_future_obs, state_size] Tensor + r_mu = tf.tile(z_t[:,tf.newaxis,:], [1, self.num_future_obs(t), 1]) + # compute the variance + r_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) + # the variance is the same across all state dimensions, so we only have to + # time sigma to be [batch_size, num_future_obs]. + r_sigma = tf.tile(r_sigma[tf.newaxis,:, tf.newaxis], [batch_size, 1, self.state_size]) + return tf.contrib.distributions.Normal( + loc=r_mu, scale=tf.sqrt(r_sigma)) + + def summarize_weights(self): + pass + + +class LongChainModel(object): + + def __init__(self, + p, + q, + r, + state_size, + num_obs, + steps_per_obs, + dtype=tf.float32, + disable_r=False): + self.p = p + self.q = q + self.r = r + self.disable_r = disable_r + self.state_size = state_size + self.num_obs = num_obs + self.steps_per_obs = steps_per_obs + self.num_timesteps = steps_per_obs*num_obs + 1 + self.dtype = dtype + + def zero_state(self, batch_size): + return tf.zeros([batch_size, self.state_size], dtype=self.dtype) + + def next_obs_ind(self, t): + return int(math.floor(max(t-1,0)/self.steps_per_obs)) + + def __call__(self, prev_state, observations, t): + """Computes the importance weight for the model system. + + Args: + prev_state: [batch_size, state_size] Tensor + observations: [batch_size, num_observations, state_size] Tensor + """ + # Compute the q distribution over z, q(z_t|z_n, z_{t-1}). + q_zt = self.q.q_zt(observations, prev_state, t) + # Compute the p distribution over z, p(z_t|z_{t-1}). + p_zt = self.p.p_zt(prev_state, t) + # sample from q and evaluate the logprobs, summing over the state size + zt = q_zt.sample() + log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1) + log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1) + if not self.disable_r and t < self.num_timesteps-1: + # score the remaining observations using r + r_xn = self.r.r_xn(zt, t) + log_r_xn = r_xn.log_prob(observations[:, self.next_obs_ind(t+1):, :]) + # sum over state size and observation, leaving the batch index + log_r_xn = tf.reduce_sum(log_r_xn, axis=[1,2]) + else: + log_r_xn = tf.zeros_like(log_p_zt) + if t != 0 and t % self.steps_per_obs == 0: + generative_dist = self.p.generative(zt, t) + log_p_x_given_z = generative_dist.log_prob(observations[:,self.next_obs_ind(t),:]) + log_p_x_given_z = tf.reduce_sum(log_p_x_given_z, axis=1) + else: + log_p_x_given_z = tf.zeros_like(log_q_zt) + return (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_xn) + + @staticmethod + def create(state_size, + num_obs, + steps_per_obs, + sigma_min=1e-5, + variance=1.0, + observation_variance=1.0, + observation_type=STANDARD_OBSERVATION, + transition_type=STANDARD_TRANSITION, + dtype=tf.float32, + random_seed=None, + disable_r=False): + p = LongChainP( + state_size, + num_obs, + steps_per_obs, + sigma_min=sigma_min, + variance=variance, + observation_variance=observation_variance, + observation_type=observation_type, + transition_type=transition_type, + dtype=dtype, + random_seed=random_seed) + q = LongChainQ( + state_size, + num_obs, + steps_per_obs, + sigma_min=sigma_min, + dtype=dtype, + random_seed=random_seed) + r = LongChainR( + state_size, + num_obs, + steps_per_obs, + sigma_min=sigma_min, + dtype=dtype, + random_seed=random_seed) + model = LongChainModel( + p, q, r, state_size, num_obs, steps_per_obs, + dtype=dtype, + disable_r=disable_r) + return model + + +class RTilde(object): + + def __init__(self, + state_size, + num_timesteps, + sigma_min=1e-5, + dtype=tf.float32, + random_seed=None, + graph_collection_name="R_TILDE_VARS"): + self.dtype = dtype + self.sigma_min = sigma_min + initializers = {"w": tf.truncated_normal_initializer(seed=random_seed), + "b": tf.zeros_initializer} + self.graph_collection_name=graph_collection_name + + def custom_getter(getter, *args, **kwargs): + out = getter(*args, **kwargs) + ref = tf.get_collection_ref(self.graph_collection_name) + if out not in ref: + ref.append(out) + return out + + self.fns = [ + snt.Linear(output_size=2*state_size, + initializers=initializers, + name="r_tilde_%d" % t, + custom_getter=custom_getter) + for t in xrange(num_timesteps) + ] + + def r_zt(self, z_t, observation, t): + #out = self.fns[t](tf.stop_gradient(tf.concat([z_t, observation], axis=1))) + out = self.fns[t](tf.concat([z_t, observation], axis=1)) + mu, raw_sigma_sq = tf.split(out, 2, axis=1) + sigma_sq = tf.maximum(tf.nn.softplus(raw_sigma_sq), self.sigma_min) + return mu, sigma_sq + +class TDModel(object): + + def __init__(self, + p, + q, + r_tilde, + state_size, + num_timesteps, + dtype=tf.float32, + disable_r=False): + self.p = p + self.q = q + self.r_tilde = r_tilde + self.disable_r = disable_r + self.state_size = state_size + self.num_timesteps = num_timesteps + self.dtype = dtype + + def zero_state(self, batch_size): + return tf.zeros([batch_size, self.state_size], dtype=self.dtype) + + def __call__(self, prev_state, observation, t): + """Computes the importance weight for the model system. + + Args: + prev_state: [batch_size, state_size] Tensor + observations: [batch_size, num_observations, state_size] Tensor + """ + # Compute the q distribution over z, q(z_t|z_n, z_{t-1}). + q_zt = self.q.q_zt(observation, prev_state, t) + # Compute the p distribution over z, p(z_t|z_{t-1}). + p_zt = self.p.p_zt(prev_state, t) + # sample from q and evaluate the logprobs, summing over the state size + zt = q_zt.sample() + # If it isn't the last timestep, compute the distribution over the next z. + if t < self.num_timesteps - 1: + p_ztplus1 = self.p.p_zt(zt, t+1) + else: + p_ztplus1 = None + log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1) + log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1) + + if not self.disable_r and t < self.num_timesteps-1: + # score the remaining observations using r + r_tilde_mu, r_tilde_sigma_sq = self.r_tilde.r_zt(zt, observation, t+1) + else: + r_tilde_mu = None + r_tilde_sigma_sq = None + if t == self.num_timesteps - 1: + generative_dist = self.p.generative(observation, zt) + log_p_x_given_z = tf.reduce_sum(generative_dist.log_prob(observation), axis=1) + else: + log_p_x_given_z = tf.zeros_like(log_q_zt) + return (zt, log_q_zt, log_p_zt, log_p_x_given_z, + r_tilde_mu, r_tilde_sigma_sq, p_ztplus1) + + @staticmethod + def create(state_size, + num_timesteps, + sigma_min=1e-5, + variance=1.0, + dtype=tf.float32, + random_seed=None, + train_p=True, + p_type="unimodal", + q_type="normal", + mixing_coeff=0.5, + prior_mode_mean=1.0, + observation_variance=1.0, + transition_type=STANDARD_TRANSITION, + use_bs=True): + if p_type == "unimodal": + p = P(state_size, + num_timesteps, + sigma_min=sigma_min, + variance=variance, + dtype=dtype, + random_seed=random_seed, + trainable=train_p, + init_bs_to_zero=not use_bs) + elif p_type == "bimodal": + p = BimodalPriorP( + state_size, + num_timesteps, + mixing_coeff=mixing_coeff, + prior_mode_mean=prior_mode_mean, + sigma_min=sigma_min, + variance=variance, + dtype=dtype, + random_seed=random_seed, + trainable=train_p, + init_bs_to_zero=not use_bs) + elif "nonlinear" in p_type: + if "cauchy" in p_type: + trans_dist = tf.contrib.distributions.Cauchy + else: + trans_dist = tf.contrib.distributions.Normal + + p = ShortChainNonlinearP( + state_size, + num_timesteps, + sigma_min=sigma_min, + variance=variance, + observation_variance=observation_variance, + transition_type=transition_type, + transition_dist=trans_dist, + dtype=dtype, + random_seed=random_seed + ) + + if q_type == "normal": + q_class = Q + elif q_type == "simple_mean": + q_class = SimpleMeanQ + elif q_type == "prev_state": + q_class = PreviousStateQ + elif q_type == "observation": + q_class = ObservationQ + + q = q_class(state_size, + num_timesteps, + sigma_min=sigma_min, + dtype=dtype, + random_seed=random_seed, + init_mu0_to_zero=not use_bs) + r_tilde = RTilde( + state_size, + num_timesteps, + sigma_min=sigma_min, + dtype=dtype, + random_seed=random_seed) + model = TDModel(p, q, r_tilde, state_size, num_timesteps, dtype=dtype) + return model diff --git a/models/research/fivo/experimental/run.sh b/models/research/fivo/experimental/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..c650f636d5313a196960a92b509202b47e7da518 --- /dev/null +++ b/models/research/fivo/experimental/run.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +model="forward" +T=5 +num_obs=1 +var=0.1 +n=4 +lr=0.0001 +bound="fivo-aux" +q_type="normal" +resampling_method="multinomial" +rgrad="true" +p_type="unimodal" +use_bs=false + +LOGDIR=/tmp/fivo/model-$model-$bound-$resampling_method-resampling-rgrad-$rgrad-T-$T-var-$var-n-$n-lr-$lr-q-$q_type-p-$p_type + +python train.py \ + --logdir=$LOGDIR \ + --model=$model \ + --bound=$bound \ + --q_type=$q_type \ + --p_type=$p_type \ + --variance=$var \ + --use_resampling_grads=$rgrad \ + --resampling=always \ + --resampling_method=$resampling_method \ + --batch_size=4 \ + --num_samples=$n \ + --num_timesteps=$T \ + --num_eval_samples=256 \ + --summarize_every=100 \ + --learning_rate=$lr \ + --decay_steps=1000000 \ + --max_steps=1000000000 \ + --random_seed=1234 \ + --train_p=false \ + --use_bs=$use_bs \ + --alsologtostderr diff --git a/models/research/fivo/experimental/summary_utils.py b/models/research/fivo/experimental/summary_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..04e4aeea257577e60d3651656d0c62355d501ea8 --- /dev/null +++ b/models/research/fivo/experimental/summary_utils.py @@ -0,0 +1,332 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utils for plotting and summarizing. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import matplotlib.gridspec as gridspec +import matplotlib.pyplot as plt +import numpy as np +import scipy + +import tensorflow as tf + +import models + + +def summarize_ess(weights, only_last_timestep=False): + """Plots the effective sample size. + + Args: + weights: List of length num_timesteps Tensors of shape + [num_samples, batch_size] + """ + num_timesteps = len(weights) + batch_size = tf.cast(tf.shape(weights[0])[1], dtype=tf.float64) + for i in range(num_timesteps): + if only_last_timestep and i < num_timesteps-1: continue + + w = tf.nn.softmax(weights[i], dim=0) + centered_weights = w - tf.reduce_mean(w, axis=0, keepdims=True) + variance = tf.reduce_sum(tf.square(centered_weights))/(batch_size-1) + ess = 1./tf.reduce_mean(tf.reduce_sum(tf.square(w), axis=0)) + tf.summary.scalar("ess/%d" % i, ess) + tf.summary.scalar("ese/%d" % i, ess / batch_size) + tf.summary.scalar("weight_variance/%d" % i, variance) + + +def summarize_particles(states, weights, observation, model): + """Plots particle locations and weights. + + Args: + states: List of length num_timesteps Tensors of shape + [batch_size*num_particles, state_size]. + weights: List of length num_timesteps Tensors of shape [num_samples, + batch_size] + observation: Tensor of shape [batch_size*num_samples, state_size] + """ + num_timesteps = len(weights) + num_samples, batch_size = weights[0].get_shape().as_list() + # get q0 information for plotting + q0_dist = model.q.q_zt(observation, tf.zeros_like(states[0]), 0) + q0_loc = q0_dist.loc[0:batch_size, 0] + q0_scale = q0_dist.scale[0:batch_size, 0] + # get posterior information for plotting + post = (model.p.mixing_coeff, model.p.prior_mode_mean, model.p.variance, + tf.reduce_sum(model.p.bs), model.p.num_timesteps) + + # Reshape states and weights to be [time, num_samples, batch_size] + states = tf.stack(states) + weights = tf.stack(weights) + # normalize the weights over the sample dimension + weights = tf.nn.softmax(weights, dim=1) + states = tf.reshape(states, tf.shape(weights)) + + ess = 1./tf.reduce_sum(tf.square(weights), axis=1) + + def _plot_states(states_batch, weights_batch, observation_batch, ess_batch, q0, post): + """ + states: [time, num_samples, batch_size] + weights [time, num_samples, batch_size] + observation: [batch_size, 1] + q0: ([batch_size], [batch_size]) + post: ... + """ + num_timesteps, _, batch_size = states_batch.shape + plots = [] + for i in range(batch_size): + states = states_batch[:,:,i] + weights = weights_batch[:,:,i] + observation = observation_batch[i] + ess = ess_batch[:,i] + q0_loc = q0[0][i] + q0_scale = q0[1][i] + + fig = plt.figure(figsize=(7, (num_timesteps + 1) * 2)) + # Each timestep gets two plots -- a bar plot and a histogram of state locs. + # The bar plot will be bar_rows rows tall. + # The histogram will be 1 row tall. + # There is also 1 extra plot at the top showing the posterior and q. + bar_rows = 8 + num_rows = (num_timesteps + 1) * (bar_rows + 1) + gs = gridspec.GridSpec(num_rows, 1) + + # Figure out how wide to make the plot + prior_lims = (post[1] * -2, post[1] * 2) + q_lims = (scipy.stats.norm.ppf(0.01, loc=q0_loc, scale=q0_scale), + scipy.stats.norm.ppf(0.99, loc=q0_loc, scale=q0_scale)) + state_width = states.max() - states.min() + state_lims = (states.min() - state_width * 0.15, + states.max() + state_width * 0.15) + + lims = (min(prior_lims[0], q_lims[0], state_lims[0]), + max(prior_lims[1], q_lims[1], state_lims[1])) + # plot the posterior + z0 = np.arange(lims[0], lims[1], 0.1) + alpha, pos_mu, sigma_sq, B, T = post + neg_mu = -pos_mu + scale = np.sqrt((T + 1) * sigma_sq) + p_zn = ( + alpha * scipy.stats.norm.pdf( + observation, loc=pos_mu + B, scale=scale) + (1 - alpha) * + scipy.stats.norm.pdf(observation, loc=neg_mu + B, scale=scale)) + p_z0 = ( + alpha * scipy.stats.norm.pdf(z0, loc=pos_mu, scale=np.sqrt(sigma_sq)) + + (1 - alpha) * scipy.stats.norm.pdf( + z0, loc=neg_mu, scale=np.sqrt(sigma_sq))) + p_zn_given_z0 = scipy.stats.norm.pdf( + observation, loc=z0 + B, scale=np.sqrt(T * sigma_sq)) + post_z0 = (p_z0 * p_zn_given_z0) / p_zn + # plot q + q_z0 = scipy.stats.norm.pdf(z0, loc=q0_loc, scale=q0_scale) + ax = plt.subplot(gs[0:bar_rows, :]) + ax.plot(z0, q_z0, color="blue") + ax.plot(z0, post_z0, color="green") + ax.plot(z0, p_z0, color="red") + ax.legend(("q", "posterior", "prior"), loc="best", prop={"size": 10}) + + ax.set_xticks([]) + ax.set_xlim(*lims) + + # plot the states + for t in range(num_timesteps): + start = (t + 1) * (bar_rows + 1) + ax1 = plt.subplot(gs[start:start + bar_rows, :]) + ax2 = plt.subplot(gs[start + bar_rows:start + bar_rows + 1, :]) + # plot the states barplot + # ax1.hist( + # states[t, :], + # weights=weights[t, :], + # bins=50, + # edgecolor="none", + # alpha=0.2) + ax1.bar(states[t,:], weights[t,:], width=0.02, alpha=0.2, edgecolor = "none") + ax1.set_ylabel("t=%d" % t) + ax1.set_xticks([]) + ax1.grid(True, which="both") + ax1.set_xlim(*lims) + # plot the observation + ax1.axvline(x=observation, color="red", linestyle="dashed") + # add the ESS + ax1.text(0.1, 0.9, "ESS: %0.2f" % ess[t], + ha='center', va='center', transform=ax1.transAxes) + + # plot the state location histogram + ax2.hist2d( + states[t, :], np.zeros_like(states[t, :]), bins=[50, 1], cmap="Greys") + ax2.grid(False) + ax2.set_yticks([]) + ax2.set_xlim(*lims) + if t != num_timesteps - 1: + ax2.set_xticks([]) + + fig.canvas.draw() + p = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + plots.append(p.reshape(fig.canvas.get_width_height()[::-1] + (3,))) + plt.close(fig) + return np.stack(plots) + + plots = tf.py_func(_plot_states, + [states, weights, observation, ess, (q0_loc, q0_scale), post], + [tf.uint8])[0] + tf.summary.image("states", plots, 5, collections=["infrequent_summaries"]) + + +def plot_weights(weights, resampled=None): + """Plots the weights and effective sample size from an SMC rollout. + + Args: + weights: [num_timesteps, num_samples, batch_size] importance weights + resampled: [num_timesteps] 0/1 indicating if resampling ocurred + """ + weights = tf.convert_to_tensor(weights) + + def _make_plots(weights, resampled): + num_timesteps, num_samples, batch_size = weights.shape + plots = [] + for i in range(batch_size): + fig, axes = plt.subplots(nrows=1, sharex=True, figsize=(8, 4)) + axes.stackplot(np.arange(num_timesteps), np.transpose(weights[:, :, i])) + axes.set_title("Weights") + axes.set_xlabel("Steps") + axes.set_ylim([0, 1]) + axes.set_xlim([0, num_timesteps - 1]) + for j in np.where(resampled > 0)[0]: + axes.axvline(x=j, color="red", linestyle="dashed", ymin=0.0, ymax=1.0) + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + plots.append(data) + plt.close(fig) + return np.stack(plots, axis=0) + + if resampled is None: + num_timesteps, _, batch_size = weights.get_shape().as_list() + resampled = tf.zeros([num_timesteps], dtype=tf.float32) + plots = tf.py_func(_make_plots, + [tf.nn.softmax(weights, dim=1), + tf.to_float(resampled)], [tf.uint8])[0] + batch_size = weights.get_shape().as_list()[-1] + tf.summary.image( + "weights", plots, batch_size, collections=["infrequent_summaries"]) + + +def summarize_weights(weights, num_timesteps, num_samples): + # weights is [num_timesteps, num_samples, batch_size] + weights = tf.convert_to_tensor(weights) + mean = tf.reduce_mean(weights, axis=1, keepdims=True) + squared_diff = tf.square(weights - mean) + variances = tf.reduce_sum(squared_diff, axis=1) / (num_samples - 1) + # average the variance over the batch + variances = tf.reduce_mean(variances, axis=1) + avg_magnitude = tf.reduce_mean(tf.abs(weights), axis=[1, 2]) + for t in xrange(num_timesteps): + tf.summary.scalar("weights/variance_%d" % t, variances[t]) + tf.summary.scalar("weights/magnitude_%d" % t, avg_magnitude[t]) + tf.summary.histogram("weights/step_%d" % t, weights[t]) + + +def summarize_learning_signal(rewards, tag): + num_resampling_events, _ = rewards.get_shape().as_list() + mean = tf.reduce_mean(rewards, axis=1) + avg_magnitude = tf.reduce_mean(tf.abs(rewards), axis=1) + reward_square = tf.reduce_mean(tf.square(rewards), axis=1) + for t in xrange(num_resampling_events): + tf.summary.scalar("%s/mean_%d" % (tag, t), mean[t]) + tf.summary.scalar("%s/magnitude_%d" % (tag, t), avg_magnitude[t]) + tf.summary.scalar("%s/squared_%d" % (tag, t), reward_square[t]) + tf.summary.histogram("%s/step_%d" % (tag, t), rewards[t]) + + +def summarize_qs(model, observation, states): + model.q.summarize_weights() + if hasattr(model.p, "posterior") and callable(getattr(model.p, "posterior")): + states = [tf.zeros_like(states[0])] + states[:-1] + for t, prev_state in enumerate(states): + p = model.p.posterior(observation, prev_state, t) + q = model.q.q_zt(observation, prev_state, t) + kl = tf.reduce_mean(tf.contrib.distributions.kl_divergence(p, q)) + tf.summary.scalar("kl_q/%d" % t, tf.reduce_mean(kl)) + mean_diff = q.loc - p.loc + mean_abs_err = tf.abs(mean_diff) + mean_rel_err = tf.abs(mean_diff / p.loc) + tf.summary.scalar("q_mean_convergence/absolute_error_%d" % t, + tf.reduce_mean(mean_abs_err)) + tf.summary.scalar("q_mean_convergence/relative_error_%d" % t, + tf.reduce_mean(mean_rel_err)) + sigma_diff = tf.square(q.scale) - tf.square(p.scale) + sigma_abs_err = tf.abs(sigma_diff) + sigma_rel_err = tf.abs(sigma_diff / tf.square(p.scale)) + tf.summary.scalar("q_variance_convergence/absolute_error_%d" % t, + tf.reduce_mean(sigma_abs_err)) + tf.summary.scalar("q_variance_convergence/relative_error_%d" % t, + tf.reduce_mean(sigma_rel_err)) + + +def summarize_rs(model, states): + model.r.summarize_weights() + for t, state in enumerate(states): + true_r = model.p.lookahead(state, t) + r = model.r.r_xn(state, t) + kl = tf.reduce_mean(tf.contrib.distributions.kl_divergence(true_r, r)) + tf.summary.scalar("kl_r/%d" % t, tf.reduce_mean(kl)) + mean_diff = true_r.loc - r.loc + mean_abs_err = tf.abs(mean_diff) + mean_rel_err = tf.abs(mean_diff / true_r.loc) + tf.summary.scalar("r_mean_convergence/absolute_error_%d" % t, + tf.reduce_mean(mean_abs_err)) + tf.summary.scalar("r_mean_convergence/relative_error_%d" % t, + tf.reduce_mean(mean_rel_err)) + sigma_diff = tf.square(r.scale) - tf.square(true_r.scale) + sigma_abs_err = tf.abs(sigma_diff) + sigma_rel_err = tf.abs(sigma_diff / tf.square(true_r.scale)) + tf.summary.scalar("r_variance_convergence/absolute_error_%d" % t, + tf.reduce_mean(sigma_abs_err)) + tf.summary.scalar("r_variance_convergence/relative_error_%d" % t, + tf.reduce_mean(sigma_rel_err)) + + +def summarize_model(model, true_bs, observation, states, bound, summarize_r=True): + if hasattr(model.p, "bs"): + model_b = tf.reduce_sum(model.p.bs, axis=0) + true_b = tf.reduce_sum(true_bs, axis=0) + abs_err = tf.abs(model_b - true_b) + rel_err = abs_err / true_b + tf.summary.scalar("sum_of_bs/data_generating_process", tf.reduce_mean(true_b)) + tf.summary.scalar("sum_of_bs/model", tf.reduce_mean(model_b)) + tf.summary.scalar("sum_of_bs/absolute_error", tf.reduce_mean(abs_err)) + tf.summary.scalar("sum_of_bs/relative_error", tf.reduce_mean(rel_err)) + #summarize_qs(model, observation, states) + #if bound == "fivo-aux" and summarize_r: + # summarize_rs(model, states) + + +def summarize_grads(grads, loss_name): + grad_ema = tf.train.ExponentialMovingAverage(decay=0.99) + vectorized_grads = tf.concat( + [tf.reshape(g, [-1]) for g, _ in grads if g is not None], axis=0) + new_second_moments = tf.square(vectorized_grads) + new_first_moments = vectorized_grads + maintain_grad_ema_op = grad_ema.apply([new_first_moments, new_second_moments]) + first_moments = grad_ema.average(new_first_moments) + second_moments = grad_ema.average(new_second_moments) + variances = second_moments - tf.square(first_moments) + tf.summary.scalar("grad_variance/%s" % loss_name, tf.reduce_mean(variances)) + tf.summary.histogram("grad_variance/%s" % loss_name, variances) + tf.summary.histogram("grad_mean/%s" % loss_name, first_moments) + return maintain_grad_ema_op diff --git a/models/research/fivo/experimental/train.py b/models/research/fivo/experimental/train.py new file mode 100644 index 0000000000000000000000000000000000000000..8abc9909b115298a30151a332d340f7b25e3cf90 --- /dev/null +++ b/models/research/fivo/experimental/train.py @@ -0,0 +1,637 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Main script for running fivo""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import defaultdict + +import numpy as np +import tensorflow as tf + +import bounds +import data +import models +import summary_utils as summ + +tf.logging.set_verbosity(tf.logging.INFO) + +tf.app.flags.DEFINE_integer("random_seed", None, + "A random seed for the data generating process. Same seed " + "-> same data generating process and initialization.") +tf.app.flags.DEFINE_enum("bound", "fivo", ["iwae", "fivo", "fivo-aux", "fivo-aux-td"], + "The bound to optimize.") +tf.app.flags.DEFINE_enum("model", "forward", ["forward", "long_chain"], + "The model to use.") +tf.app.flags.DEFINE_enum("q_type", "normal", + ["normal", "simple_mean", "prev_state", "observation"], + "The parameterization to use for q") +tf.app.flags.DEFINE_enum("p_type", "unimodal", ["unimodal", "bimodal", "nonlinear"], + "The type of prior.") +tf.app.flags.DEFINE_boolean("train_p", True, + "If false, do not train the model p.") + +tf.app.flags.DEFINE_integer("state_size", 1, + "The dimensionality of the state space.") +tf.app.flags.DEFINE_float("variance", 1.0, + "The variance of the data generating process.") + +tf.app.flags.DEFINE_boolean("use_bs", True, + "If False, initialize all bs to 0.") +tf.app.flags.DEFINE_float("bimodal_prior_weight", 0.5, + "The weight assigned to the positive mode of the prior in " + "both the data generating process and p.") +tf.app.flags.DEFINE_float("bimodal_prior_mean", None, + "If supplied, sets the mean of the 2 modes of the prior to " + "be 1 and -1 times the supplied value. This is for both the " + "data generating process and p.") +tf.app.flags.DEFINE_float("fixed_observation", None, + "If supplied, fix the observation to a constant value in the" + " data generating process only.") +tf.app.flags.DEFINE_float("r_sigma_init", 1., + "Value to initialize variance of r to.") +tf.app.flags.DEFINE_enum("observation_type", + models.STANDARD_OBSERVATION, models.OBSERVATION_TYPES, + "The type of observation for the long chain model.") +tf.app.flags.DEFINE_enum("transition_type", + models.STANDARD_TRANSITION, models.TRANSITION_TYPES, + "The type of transition for the long chain model.") +tf.app.flags.DEFINE_float("observation_variance", None, + "The variance of the observation. Defaults to 'variance'") + +tf.app.flags.DEFINE_integer("num_timesteps", 5, + "Number of timesteps in the sequence.") +tf.app.flags.DEFINE_integer("num_observations", 1, + "The number of observations.") +tf.app.flags.DEFINE_integer("steps_per_observation", 5, + "The number of timesteps between each observation.") + +tf.app.flags.DEFINE_integer("batch_size", 4, + "The number of examples per batch.") +tf.app.flags.DEFINE_integer("num_samples", 4, + "The number particles to use.") +tf.app.flags.DEFINE_integer("num_eval_samples", 512, + "The batch size and # of particles to use for eval.") + +tf.app.flags.DEFINE_string("resampling", "always", + "How to resample. Accepts 'always','never', or a " + "comma-separated list of booleans like 'true,true,false'.") +tf.app.flags.DEFINE_enum("resampling_method", "multinomial", ["multinomial", + "stratified", + "systematic", + "relaxed-logblend", + "relaxed-stateblend", + "relaxed-linearblend", + "relaxed-stateblend-st",], + "Type of resampling method to use.") +tf.app.flags.DEFINE_boolean("use_resampling_grads", True, + "Whether or not to use resampling grads to optimize FIVO." + "Disabled automatically if resampling_method=relaxed.") +tf.app.flags.DEFINE_boolean("disable_r", False, + "If false, r is not used for fivo-aux and is set to zeros.") + +tf.app.flags.DEFINE_float("learning_rate", 1e-4, + "The learning rate to use for ADAM or SGD.") +tf.app.flags.DEFINE_integer("decay_steps", 25000, + "The number of steps before the learning rate is halved.") +tf.app.flags.DEFINE_integer("max_steps", int(1e6), + "The number of steps to run training for.") + +tf.app.flags.DEFINE_string("logdir", "/tmp/fivo-aux", + "Directory for summaries and checkpoints.") + +tf.app.flags.DEFINE_integer("summarize_every", int(1e3), + "The number of steps between each evaluation.") +FLAGS = tf.app.flags.FLAGS + + +def combine_grad_lists(grad_lists): + # grads is num_losses by num_variables. + # each list could have different variables. + # for each variable, sum the grads across all losses. + grads_dict = defaultdict(list) + var_dict = {} + for grad_list in grad_lists: + for grad, var in grad_list: + if grad is not None: + grads_dict[var.name].append(grad) + var_dict[var.name] = var + + final_grads = [] + for var_name, var in var_dict.iteritems(): + grads = grads_dict[var_name] + if len(grads) > 0: + tf.logging.info("Var %s has combined grads from %s." % + (var_name, [g.name for g in grads])) + grad = tf.reduce_sum(grads, axis=0) + else: + tf.logging.info("Var %s has no grads" % var_name) + grad = None + final_grads.append((grad, var)) + return final_grads + + +def make_apply_grads_op(losses, global_step, learning_rate, lr_decay_steps): + for l in losses: + assert isinstance(l, bounds.Loss) + + lr = tf.train.exponential_decay( + learning_rate, global_step, lr_decay_steps, 0.5, staircase=False) + tf.summary.scalar("learning_rate", lr) + opt = tf.train.AdamOptimizer(lr) + + ema_ops = [] + grads = [] + for loss_name, loss, loss_var_collection in losses: + tf.logging.info("Computing grads of %s w.r.t. vars in collection %s" % + (loss_name, loss_var_collection)) + g = opt.compute_gradients(loss, + var_list=tf.get_collection(loss_var_collection)) + ema_ops.append(summ.summarize_grads(g, loss_name)) + grads.append(g) + + all_grads = combine_grad_lists(grads) + apply_grads_op = opt.apply_gradients(all_grads, global_step=global_step) + + # Update the emas after applying the grads. + with tf.control_dependencies([apply_grads_op]): + train_op = tf.group(*ema_ops) + return train_op + + +def add_check_numerics_ops(): + check_op = [] + for op in tf.get_default_graph().get_operations(): + bad = ["logits/Log", "sample/Reshape", "log_prob/mul", + "log_prob/SparseSoftmaxCrossEntropyWithLogits/Reshape", + "entropy/Reshape", "entropy/LogSoftmax", "Categorical", "Mean"] + if all([x not in op.name for x in bad]): + for output in op.outputs: + if output.dtype in [tf.float16, tf.float32, tf.float64]: + if op._get_control_flow_context() is not None: # pylint: disable=protected-access + raise ValueError("`tf.add_check_numerics_ops() is not compatible " + "with TensorFlow control flow operations such as " + "`tf.cond()` or `tf.while_loop()`.") + + message = op.name + ":" + str(output.value_index) + with tf.control_dependencies(check_op): + check_op = [tf.check_numerics(output, message=message)] + return tf.group(*check_op) + + +def create_long_chain_graph(bound, state_size, num_obs, steps_per_obs, + batch_size, num_samples, num_eval_samples, + resampling_schedule, use_resampling_grads, + learning_rate, lr_decay_steps, dtype="float64"): + num_timesteps = num_obs * steps_per_obs + 1 + # Make the dataset. + dataset = data.make_long_chain_dataset( + state_size=state_size, + num_obs=num_obs, + steps_per_obs=steps_per_obs, + batch_size=batch_size, + num_samples=num_samples, + variance=FLAGS.variance, + observation_variance=FLAGS.observation_variance, + dtype=dtype, + observation_type=FLAGS.observation_type, + transition_type=FLAGS.transition_type, + fixed_observation=FLAGS.fixed_observation) + itr = dataset.make_one_shot_iterator() + _, observations = itr.get_next() + # Make the dataset for eval + eval_dataset = data.make_long_chain_dataset( + state_size=state_size, + num_obs=num_obs, + steps_per_obs=steps_per_obs, + batch_size=batch_size, + num_samples=num_eval_samples, + variance=FLAGS.variance, + observation_variance=FLAGS.observation_variance, + dtype=dtype, + observation_type=FLAGS.observation_type, + transition_type=FLAGS.transition_type, + fixed_observation=FLAGS.fixed_observation) + eval_itr = eval_dataset.make_one_shot_iterator() + _, eval_observations = eval_itr.get_next() + + # Make the model. + model = models.LongChainModel.create( + state_size, + num_obs, + steps_per_obs, + observation_type=FLAGS.observation_type, + transition_type=FLAGS.transition_type, + variance=FLAGS.variance, + observation_variance=FLAGS.observation_variance, + dtype=tf.as_dtype(dtype), + disable_r=FLAGS.disable_r) + + # Compute the bound and loss + if bound == "iwae": + (_, losses, ema_op, _, _) = bounds.iwae( + model, + observations, + num_timesteps, + num_samples=num_samples) + (eval_log_p_hat, _, _, _, eval_log_weights) = bounds.iwae( + model, + eval_observations, + num_timesteps, + num_samples=num_eval_samples, + summarize=False) + eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) + elif bound == "fivo" or "fivo-aux": + (_, losses, ema_op, _, _) = bounds.fivo( + model, + observations, + num_timesteps, + resampling_schedule=resampling_schedule, + use_resampling_grads=use_resampling_grads, + resampling_type=FLAGS.resampling_method, + aux=("aux" in bound), + num_samples=num_samples) + (eval_log_p_hat, _, _, _, eval_log_weights) = bounds.fivo( + model, + eval_observations, + num_timesteps, + resampling_schedule=resampling_schedule, + use_resampling_grads=False, + resampling_type="multinomial", + aux=("aux" in bound), + num_samples=num_eval_samples, + summarize=False) + eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) + + summ.summarize_ess(eval_log_weights, only_last_timestep=True) + + tf.summary.scalar("log_p_hat", eval_log_p_hat) + + # Compute and apply grads. + global_step = tf.train.get_or_create_global_step() + + apply_grads = make_apply_grads_op(losses, + global_step, + learning_rate, + lr_decay_steps) + + # Update the emas after applying the grads. + with tf.control_dependencies([apply_grads]): + train_op = tf.group(ema_op) + + # We can't calculate the likelihood for most of these models + # so we just return zeros. + eval_likelihood = tf.zeros([], dtype=dtype) + return global_step, train_op, eval_log_p_hat, eval_likelihood + + +def create_graph(bound, state_size, num_timesteps, batch_size, + num_samples, num_eval_samples, resampling_schedule, + use_resampling_grads, learning_rate, lr_decay_steps, + train_p, dtype='float64'): + if FLAGS.use_bs: + true_bs = None + else: + true_bs = [np.zeros([state_size]).astype(dtype) for _ in xrange(num_timesteps)] + + # Make the dataset. + true_bs, dataset = data.make_dataset( + bs=true_bs, + state_size=state_size, + num_timesteps=num_timesteps, + batch_size=batch_size, + num_samples=num_samples, + variance=FLAGS.variance, + prior_type=FLAGS.p_type, + bimodal_prior_weight=FLAGS.bimodal_prior_weight, + bimodal_prior_mean=FLAGS.bimodal_prior_mean, + transition_type=FLAGS.transition_type, + fixed_observation=FLAGS.fixed_observation, + dtype=dtype) + itr = dataset.make_one_shot_iterator() + _, observations = itr.get_next() + # Make the dataset for eval + _, eval_dataset = data.make_dataset( + bs=true_bs, + state_size=state_size, + num_timesteps=num_timesteps, + batch_size=num_eval_samples, + num_samples=num_eval_samples, + variance=FLAGS.variance, + prior_type=FLAGS.p_type, + bimodal_prior_weight=FLAGS.bimodal_prior_weight, + bimodal_prior_mean=FLAGS.bimodal_prior_mean, + transition_type=FLAGS.transition_type, + fixed_observation=FLAGS.fixed_observation, + dtype=dtype) + eval_itr = eval_dataset.make_one_shot_iterator() + _, eval_observations = eval_itr.get_next() + + # Make the model. + if bound == "fivo-aux-td": + model = models.TDModel.create( + state_size, + num_timesteps, + variance=FLAGS.variance, + train_p=train_p, + p_type=FLAGS.p_type, + q_type=FLAGS.q_type, + mixing_coeff=FLAGS.bimodal_prior_weight, + prior_mode_mean=FLAGS.bimodal_prior_mean, + observation_variance=FLAGS.observation_variance, + transition_type=FLAGS.transition_type, + use_bs=FLAGS.use_bs, + dtype=tf.as_dtype(dtype), + random_seed=FLAGS.random_seed) + else: + model = models.Model.create( + state_size, + num_timesteps, + variance=FLAGS.variance, + train_p=train_p, + p_type=FLAGS.p_type, + q_type=FLAGS.q_type, + mixing_coeff=FLAGS.bimodal_prior_weight, + prior_mode_mean=FLAGS.bimodal_prior_mean, + observation_variance=FLAGS.observation_variance, + transition_type=FLAGS.transition_type, + use_bs=FLAGS.use_bs, + r_sigma_init=FLAGS.r_sigma_init, + dtype=tf.as_dtype(dtype), + random_seed=FLAGS.random_seed) + + # Compute the bound and loss + if bound == "iwae": + (_, losses, ema_op, _, _) = bounds.iwae( + model, + observations, + num_timesteps, + num_samples=num_samples) + (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.iwae( + model, + eval_observations, + num_timesteps, + num_samples=num_eval_samples, + summarize=True) + + eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) + + elif "fivo" in bound: + if bound == "fivo-aux-td": + (_, losses, ema_op, _, _) = bounds.fivo_aux_td( + model, + observations, + num_timesteps, + resampling_schedule=resampling_schedule, + num_samples=num_samples) + (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.fivo_aux_td( + model, + eval_observations, + num_timesteps, + resampling_schedule=resampling_schedule, + num_samples=num_eval_samples, + summarize=True) + else: + (_, losses, ema_op, _, _) = bounds.fivo( + model, + observations, + num_timesteps, + resampling_schedule=resampling_schedule, + use_resampling_grads=use_resampling_grads, + resampling_type=FLAGS.resampling_method, + aux=("aux" in bound), + num_samples=num_samples) + (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.fivo( + model, + eval_observations, + num_timesteps, + resampling_schedule=resampling_schedule, + use_resampling_grads=False, + resampling_type="multinomial", + aux=("aux" in bound), + num_samples=num_eval_samples, + summarize=True) + eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) + + summ.summarize_ess(eval_log_weights, only_last_timestep=True) + + # if FLAGS.p_type == "bimodal": + # # create the observations that showcase the model. + # mode_odds_ratio = tf.convert_to_tensor([1., 3., 1./3., 512., 1./512.], + # dtype=tf.float64) + # mode_odds_ratio = tf.expand_dims(mode_odds_ratio, 1) + # k = ((num_timesteps+1) * FLAGS.variance) / (2*FLAGS.bimodal_prior_mean) + # explain_obs = tf.reduce_sum(model.p.bs) + tf.log(mode_odds_ratio) * k + # explain_obs = tf.tile(explain_obs, [num_eval_samples, 1]) + # # run the model on the explainable observations + # if bound == "iwae": + # (_, _, _, explain_states, explain_log_weights) = bounds.iwae( + # model, + # explain_obs, + # num_timesteps, + # num_samples=num_eval_samples) + # elif bound == "fivo" or "fivo-aux": + # (_, _, _, explain_states, explain_log_weights) = bounds.fivo( + # model, + # explain_obs, + # num_timesteps, + # resampling_schedule=resampling_schedule, + # use_resampling_grads=False, + # resampling_type="multinomial", + # aux=("aux" in bound), + # num_samples=num_eval_samples) + # summ.summarize_particles(explain_states, + # explain_log_weights, + # explain_obs, + # model) + + # Calculate the true likelihood. + if hasattr(model.p, 'likelihood') and callable(getattr(model.p, 'likelihood')): + eval_likelihood = model.p.likelihood(eval_observations)/ FLAGS.num_timesteps + else: + eval_likelihood = tf.zeros_like(eval_log_p_hat) + + tf.summary.scalar("log_p_hat", eval_log_p_hat) + tf.summary.scalar("likelihood", eval_likelihood) + tf.summary.scalar("bound_gap", eval_likelihood - eval_log_p_hat) + summ.summarize_model(model, true_bs, eval_observations, eval_states, bound, + summarize_r=not bound == "fivo-aux-td") + + # Compute and apply grads. + global_step = tf.train.get_or_create_global_step() + + apply_grads = make_apply_grads_op(losses, + global_step, + learning_rate, + lr_decay_steps) + + # Update the emas after applying the grads. + with tf.control_dependencies([apply_grads]): + train_op = tf.group(ema_op) + #train_op = tf.group(ema_op, add_check_numerics_ops()) + + return global_step, train_op, eval_log_p_hat, eval_likelihood + + +def parse_resampling_schedule(schedule, num_timesteps): + schedule = schedule.strip().lower() + if schedule == "always": + return [True] * (num_timesteps - 1) + [False] + elif schedule == "never": + return [False] * num_timesteps + elif "every" in schedule: + n = int(schedule.split("_")[1]) + return [(i+1) % n == 0 for i in xrange(num_timesteps)] + else: + sched = [x.strip() == "true" for x in schedule.split(",")] + assert len( + sched + ) == num_timesteps, "Wrong number of timesteps in resampling schedule." + return sched + + +def create_log_hook(step, eval_log_p_hat, eval_likelihood): + def summ_formatter(d): + return ("Step {step}, log p_hat: {log_p_hat:.5f} likelihood: {likelihood:.5f}".format(**d)) + hook = tf.train.LoggingTensorHook( + { + "step": step, + "log_p_hat": eval_log_p_hat, + "likelihood": eval_likelihood, + }, + every_n_iter=FLAGS.summarize_every, + formatter=summ_formatter) + return hook + + +def create_infrequent_summary_hook(): + infrequent_summary_hook = tf.train.SummarySaverHook( + save_steps=10000, + output_dir=FLAGS.logdir, + summary_op=tf.summary.merge_all(key="infrequent_summaries") + ) + return infrequent_summary_hook + + +def main(unused_argv): + if FLAGS.model == "long_chain": + resampling_schedule = parse_resampling_schedule(FLAGS.resampling, + FLAGS.num_timesteps + 1) + else: + resampling_schedule = parse_resampling_schedule(FLAGS.resampling, + FLAGS.num_timesteps) + if FLAGS.random_seed is None: + seed = np.random.randint(0, high=10000) + else: + seed = FLAGS.random_seed + tf.logging.info("Using random seed %d", seed) + + if FLAGS.model == "long_chain": + assert FLAGS.q_type == "normal", "Q type %s not supported for long chain models" % FLAGS.q_type + assert FLAGS.p_type == "unimodal", "Bimodal priors are not supported for long chain models" + assert not FLAGS.use_bs, "Bs are not supported with long chain models" + assert FLAGS.num_timesteps == FLAGS.num_observations * FLAGS.steps_per_observation, "Num timesteps does not match." + assert FLAGS.bound != "fivo-aux-td", "TD Training is not compatible with long chain models." + + if FLAGS.model == "forward": + if "nonlinear" not in FLAGS.p_type: + assert FLAGS.transition_type == models.STANDARD_TRANSITION, "Non-standard transitions not supported by the forward model." + assert FLAGS.observation_type == models.STANDARD_OBSERVATION, "Non-standard observations not supported by the forward model." + assert FLAGS.observation_variance is None, "Forward model does not support observation variance." + assert FLAGS.num_observations == 1, "Forward model only supports 1 observation." + + if "relaxed" in FLAGS.resampling_method: + FLAGS.use_resampling_grads = False + assert FLAGS.bound != "fivo-aux-td", "TD Training is not compatible with relaxed resampling." + + if FLAGS.observation_variance is None: + FLAGS.observation_variance = FLAGS.variance + + if FLAGS.p_type == "bimodal": + assert FLAGS.bimodal_prior_mean is not None, "Must specify prior mean if using bimodal p." + + if FLAGS.p_type == "nonlinear" or FLAGS.p_type == "nonlinear-cauchy": + assert not FLAGS.use_bs, "Using bs is not compatible with the nonlinear model." + + g = tf.Graph() + with g.as_default(): + # Set the seeds. + tf.set_random_seed(seed) + np.random.seed(seed) + if FLAGS.model == "long_chain": + (global_step, train_op, eval_log_p_hat, + eval_likelihood) = create_long_chain_graph( + FLAGS.bound, + FLAGS.state_size, + FLAGS.num_observations, + FLAGS.steps_per_observation, + FLAGS.batch_size, + FLAGS.num_samples, + FLAGS.num_eval_samples, + resampling_schedule, + FLAGS.use_resampling_grads, + FLAGS.learning_rate, + FLAGS.decay_steps) + else: + (global_step, train_op, + eval_log_p_hat, eval_likelihood) = create_graph( + FLAGS.bound, + FLAGS.state_size, + FLAGS.num_timesteps, + FLAGS.batch_size, + FLAGS.num_samples, + FLAGS.num_eval_samples, + resampling_schedule, + FLAGS.use_resampling_grads, + FLAGS.learning_rate, + FLAGS.decay_steps, + FLAGS.train_p) + + log_hooks = [create_log_hook(global_step, eval_log_p_hat, eval_likelihood)] + if len(tf.get_collection("infrequent_summaries")) > 0: + log_hooks.append(create_infrequent_summary_hook()) + + tf.logging.info("trainable variables:") + tf.logging.info([v.name for v in tf.trainable_variables()]) + tf.logging.info("p vars:") + tf.logging.info([v.name for v in tf.get_collection("P_VARS")]) + tf.logging.info("q vars:") + tf.logging.info([v.name for v in tf.get_collection("Q_VARS")]) + tf.logging.info("r vars:") + tf.logging.info([v.name for v in tf.get_collection("R_VARS")]) + tf.logging.info("r tilde vars:") + tf.logging.info([v.name for v in tf.get_collection("R_TILDE_VARS")]) + + with tf.train.MonitoredTrainingSession( + master="", + is_chief=True, + hooks=log_hooks, + checkpoint_dir=FLAGS.logdir, + save_checkpoint_secs=120, + save_summaries_steps=FLAGS.summarize_every, + log_step_count_steps=FLAGS.summarize_every) as sess: + cur_step = -1 + while True: + if sess.should_stop() or cur_step > FLAGS.max_steps: + break + # run a step + _, cur_step = sess.run([train_op, global_step]) + + +if __name__ == "__main__": + tf.app.run(main) diff --git a/models/research/fivo/fivo/__init__.py b/models/research/fivo/fivo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/fivo/fivo/bounds.py b/models/research/fivo/fivo/bounds.py new file mode 100644 index 0000000000000000000000000000000000000000..088519033dd80669e99015b8e465888bd94a4cb1 --- /dev/null +++ b/models/research/fivo/fivo/bounds.py @@ -0,0 +1,317 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Implementation of objectives for training stochastic latent variable models. + +Contains implementations of the Importance Weighted Autoencoder objective (IWAE) +and the Filtering Variational objective (FIVO). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import tensorflow as tf + +from fivo import nested_utils as nested +from fivo import smc + + +def iwae(model, + observations, + seq_lengths, + num_samples=1, + parallel_iterations=30, + swap_memory=True): + """Computes the IWAE lower bound on the log marginal probability. + + This method accepts a stochastic latent variable model and some observations + and computes a stochastic lower bound on the log marginal probability of the + observations. The IWAE estimator is defined by averaging multiple importance + weights. For more details see "Importance Weighted Autoencoders" by Burda + et al. https://arxiv.org/abs/1509.00519. + + When num_samples = 1, this bound becomes the evidence lower bound (ELBO). + + Args: + model: A subclass of ELBOTrainableSequenceModel that implements one + timestep of the model. See models/vrnn.py for an example. + observations: The inputs to the model. A potentially nested list or tuple of + Tensors each of shape [max_seq_len, batch_size, ...]. The Tensors must + have a rank at least two and have matching shapes in the first two + dimensions, which represent time and the batch respectively. The model + will be provided with the observations before computing the bound. + seq_lengths: A [batch_size] Tensor of ints encoding the length of each + sequence in the batch (sequences can be padded to a common length). + num_samples: The number of samples to use. + parallel_iterations: The number of parallel iterations to use for the + internal while loop. + swap_memory: Whether GPU-CPU memory swapping should be enabled for the + internal while loop. + + Returns: + log_p_hat: A Tensor of shape [batch_size] containing IWAE's estimate of the + log marginal probability of the observations. + log_weights: A Tensor of shape [max_seq_len, batch_size, num_samples] + containing the log weights at each timestep. Will not be valid for + timesteps past the end of a sequence. + """ + log_p_hat, log_weights, _, final_state = fivo( + model, + observations, + seq_lengths, + num_samples=num_samples, + resampling_criterion=smc.never_resample_criterion, + parallel_iterations=parallel_iterations, + swap_memory=swap_memory) + return log_p_hat, log_weights, final_state + + +def fivo(model, + observations, + seq_lengths, + num_samples=1, + resampling_criterion=smc.ess_criterion, + resampling_type='multinomial', + relaxed_resampling_temperature=0.5, + parallel_iterations=30, + swap_memory=True, + random_seed=None): + """Computes the FIVO lower bound on the log marginal probability. + + This method accepts a stochastic latent variable model and some observations + and computes a stochastic lower bound on the log marginal probability of the + observations. The lower bound is defined by a particle filter's unbiased + estimate of the marginal probability of the observations. For more details see + "Filtering Variational Objectives" by Maddison et al. + https://arxiv.org/abs/1705.09279. + + When the resampling criterion is "never resample", this bound becomes IWAE. + + Args: + model: A subclass of ELBOTrainableSequenceModel that implements one + timestep of the model. See models/vrnn.py for an example. + observations: The inputs to the model. A potentially nested list or tuple of + Tensors each of shape [max_seq_len, batch_size, ...]. The Tensors must + have a rank at least two and have matching shapes in the first two + dimensions, which represent time and the batch respectively. The model + will be provided with the observations before computing the bound. + seq_lengths: A [batch_size] Tensor of ints encoding the length of each + sequence in the batch (sequences can be padded to a common length). + num_samples: The number of particles to use in each particle filter. + resampling_criterion: The resampling criterion to use for this particle + filter. Must accept the number of samples, the current log weights, + and the current timestep and return a boolean Tensor of shape [batch_size] + indicating whether each particle filter should resample. See + ess_criterion and related functions for examples. When + resampling_criterion is never_resample_criterion, resampling_fn is ignored + and never called. + resampling_type: The type of resampling, one of "multinomial" or "relaxed". + relaxed_resampling_temperature: A positive temperature only used for relaxed + resampling. + parallel_iterations: The number of parallel iterations to use for the + internal while loop. Note that values greater than 1 can introduce + non-determinism even when random_seed is provided. + swap_memory: Whether GPU-CPU memory swapping should be enabled for the + internal while loop. + random_seed: The random seed to pass to the resampling operations in + the particle filter. Mainly useful for testing. + + Returns: + log_p_hat: A Tensor of shape [batch_size] containing FIVO's estimate of the + log marginal probability of the observations. + log_weights: A Tensor of shape [max_seq_len, batch_size, num_samples] + containing the log weights at each timestep of the particle filter. Note + that on timesteps when a resampling operation is performed the log weights + are reset to 0. Will not be valid for timesteps past the end of a + sequence. + resampled: A Tensor of shape [max_seq_len, batch_size] indicating when the + particle filters resampled. Will be 1.0 on timesteps when resampling + occurred and 0.0 on timesteps when it did not. + """ + # batch_size is the number of particle filters running in parallel. + batch_size = tf.shape(seq_lengths)[0] + + # Each sequence in the batch will be the input data for a different + # particle filter. The batch will be laid out as: + # particle 1 of particle filter 1 + # particle 1 of particle filter 2 + # ... + # particle 1 of particle filter batch_size + # particle 2 of particle filter 1 + # ... + # particle num_samples of particle filter batch_size + observations = nested.tile_tensors(observations, [1, num_samples]) + tiled_seq_lengths = tf.tile(seq_lengths, [num_samples]) + model.set_observations(observations, tiled_seq_lengths) + + if resampling_type == 'multinomial': + resampling_fn = smc.multinomial_resampling + elif resampling_type == 'relaxed': + resampling_fn = functools.partial( + smc.relaxed_resampling, temperature=relaxed_resampling_temperature) + resampling_fn = functools.partial(resampling_fn, random_seed=random_seed) + + def transition_fn(prev_state, t): + if prev_state is None: + return model.zero_state(batch_size * num_samples, tf.float32) + return model.propose_and_weight(prev_state, t) + + log_p_hat, log_weights, resampled, final_state, _ = smc.smc( + transition_fn, + seq_lengths, + num_particles=num_samples, + resampling_criterion=resampling_criterion, + resampling_fn=resampling_fn, + parallel_iterations=parallel_iterations, + swap_memory=swap_memory) + + return log_p_hat, log_weights, resampled, final_state + +def fivo_aux_td( + model, + observations, + seq_lengths, + num_samples=1, + resampling_criterion=smc.ess_criterion, + resampling_type='multinomial', + relaxed_resampling_temperature=0.5, + parallel_iterations=30, + swap_memory=True, + random_seed=None): + """Experimental.""" + # batch_size is the number of particle filters running in parallel. + batch_size = tf.shape(seq_lengths)[0] + max_seq_len = tf.reduce_max(seq_lengths) + + # Each sequence in the batch will be the input data for a different + # particle filter. The batch will be laid out as: + # particle 1 of particle filter 1 + # particle 1 of particle filter 2 + # ... + # particle 1 of particle filter batch_size + # particle 2 of particle filter 1 + # ... + # particle num_samples of particle filter batch_size + observations = nested.tile_tensors(observations, [1, num_samples]) + tiled_seq_lengths = tf.tile(seq_lengths, [num_samples]) + model.set_observations(observations, tiled_seq_lengths) + + if resampling_type == 'multinomial': + resampling_fn = smc.multinomial_resampling + elif resampling_type == 'relaxed': + resampling_fn = functools.partial( + smc.relaxed_resampling, temperature=relaxed_resampling_temperature) + resampling_fn = functools.partial(resampling_fn, random_seed=random_seed) + + def transition_fn(prev_state, t): + if prev_state is None: + model_init_state = model.zero_state(batch_size * num_samples, tf.float32) + return (tf.zeros([num_samples*batch_size], dtype=tf.float32), + (tf.zeros([num_samples*batch_size, model.latent_size], dtype=tf.float32), + tf.zeros([num_samples*batch_size, model.latent_size], dtype=tf.float32)), + model_init_state) + + prev_log_r, prev_log_r_tilde, prev_model_state = prev_state + (new_model_state, zt, log_q_zt, log_p_zt, + log_p_x_given_z, log_r_tilde, p_ztplus1) = model(prev_model_state, t) + r_tilde_mu, r_tilde_sigma_sq = log_r_tilde + # Compute the weight without r. + log_weight = log_p_zt + log_p_x_given_z - log_q_zt + # Compute log_r and log_r_tilde. + p_mu = tf.stop_gradient(p_ztplus1.mean()) + p_sigma_sq = tf.stop_gradient(p_ztplus1.variance()) + log_r = (tf.log(r_tilde_sigma_sq) - + tf.log(r_tilde_sigma_sq + p_sigma_sq) - + tf.square(r_tilde_mu - p_mu)/(r_tilde_sigma_sq + p_sigma_sq)) + # log_r is [num_samples*batch_size, latent_size]. We sum it along the last + # dimension to compute log r. + log_r = 0.5*tf.reduce_sum(log_r, axis=-1) + # Compute prev log r tilde + prev_r_tilde_mu, prev_r_tilde_sigma_sq = prev_log_r_tilde + prev_log_r_tilde = -0.5*tf.reduce_sum( + tf.square(tf.stop_gradient(zt) - prev_r_tilde_mu)/prev_r_tilde_sigma_sq, axis=-1) + # If the sequence is on the last timestep, log_r and log_r_tilde are just zeros. + last_timestep = t >= (tiled_seq_lengths - 1) + log_r = tf.where(last_timestep, + tf.zeros_like(log_r), + log_r) + prev_log_r_tilde = tf.where(last_timestep, + tf.zeros_like(prev_log_r_tilde), + prev_log_r_tilde) + log_weight += tf.stop_gradient(log_r - prev_log_r) + new_state = (log_r, log_r_tilde, new_model_state) + loop_fn_args = (log_r, prev_log_r_tilde, log_p_x_given_z, log_r - prev_log_r) + return log_weight, new_state, loop_fn_args + + def loop_fn(loop_state, loop_args, unused_model_state, log_weights, resampled, mask, t): + if loop_state is None: + return (tf.zeros([batch_size], dtype=tf.float32), + tf.zeros([batch_size], dtype=tf.float32), + tf.zeros([num_samples, batch_size], dtype=tf.float32)) + log_p_hat_acc, bellman_loss_acc, log_r_diff_acc = loop_state + log_r, prev_log_r_tilde, log_p_x_given_z, log_r_diff = loop_args + # Compute the log_p_hat update + log_p_hat_update = tf.reduce_logsumexp( + log_weights, axis=0) - tf.log(tf.to_float(num_samples)) + # If it is the last timestep, we always add the update. + log_p_hat_acc += tf.cond(t >= max_seq_len-1, + lambda: log_p_hat_update, + lambda: log_p_hat_update * resampled) + # Compute the Bellman update. + log_r = tf.reshape(log_r, [num_samples, batch_size]) + prev_log_r_tilde = tf.reshape(prev_log_r_tilde, [num_samples, batch_size]) + log_p_x_given_z = tf.reshape(log_p_x_given_z, [num_samples, batch_size]) + mask = tf.reshape(mask, [num_samples, batch_size]) + # On the first timestep there is no bellman error because there is no + # prev_log_r_tilde. + mask = tf.cond(tf.equal(t, 0), + lambda: tf.zeros_like(mask), + lambda: mask) + # On the first timestep also fix up prev_log_r_tilde, which will be -inf. + prev_log_r_tilde = tf.where( + tf.is_inf(prev_log_r_tilde), + tf.zeros_like(prev_log_r_tilde), + prev_log_r_tilde) + # log_lambda is [num_samples, batch_size] + log_lambda = tf.reduce_mean(prev_log_r_tilde - log_p_x_given_z - log_r, + axis=0, keepdims=True) + bellman_error = mask * tf.square( + prev_log_r_tilde - + tf.stop_gradient(log_lambda + log_p_x_given_z + log_r) + ) + bellman_loss_acc += tf.reduce_mean(bellman_error, axis=0) + # Compute the log_r_diff update + log_r_diff_acc += mask * tf.reshape(log_r_diff, [num_samples, batch_size]) + return (log_p_hat_acc, bellman_loss_acc, log_r_diff_acc) + + log_weights, resampled, accs = smc.smc( + transition_fn, + seq_lengths, + num_particles=num_samples, + resampling_criterion=resampling_criterion, + resampling_fn=resampling_fn, + loop_fn=loop_fn, + parallel_iterations=parallel_iterations, + swap_memory=swap_memory) + + log_p_hat, bellman_loss, log_r_diff = accs + loss_per_seq = [- log_p_hat, bellman_loss] + tf.summary.scalar("bellman_loss", + tf.reduce_mean(bellman_loss / tf.to_float(seq_lengths))) + tf.summary.scalar("log_r_diff", + tf.reduce_mean(tf.reduce_mean(log_r_diff, axis=0) / tf.to_float(seq_lengths))) + return loss_per_seq, log_p_hat, log_weights, resampled diff --git a/models/research/fivo/fivo/bounds_test.py b/models/research/fivo/fivo/bounds_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c970f74f4cec36a855c54bbe6cdf8d76c3f86599 --- /dev/null +++ b/models/research/fivo/fivo/bounds_test.py @@ -0,0 +1,183 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.bounds""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from fivo.test_utils import create_vrnn +from fivo import bounds + + +class BoundsTest(tf.test.TestCase): + + def test_elbo(self): + """A golden-value test for the ELBO (the IWAE bound with num_samples=1).""" + tf.set_random_seed(1234) + with self.test_session() as sess: + model, inputs, targets, lengths = create_vrnn(random_seed=1234) + outs = bounds.iwae(model, (inputs, targets), lengths, num_samples=1, + parallel_iterations=1) + sess.run(tf.global_variables_initializer()) + log_p_hat, _, _ = sess.run(outs) + self.assertAllClose([-21.615765, -13.614225], log_p_hat) + + def test_iwae(self): + """A golden-value test for the IWAE bound.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + model, inputs, targets, lengths = create_vrnn(random_seed=1234) + outs = bounds.iwae(model, (inputs, targets), lengths, num_samples=4, + parallel_iterations=1) + sess.run(tf.global_variables_initializer()) + log_p_hat, weights, _ = sess.run(outs) + self.assertAllClose([-23.301426, -13.64028], log_p_hat) + weights_gt = np.array( + [[[-3.66708851, -2.07074022, -4.91751671, -5.03293562], + [-2.99690723, -3.17782736, -4.50084877, -3.48536515]], + [[-6.2539978, -4.37615728, -7.43738699, -7.85044909], + [-8.27518654, -6.71545124, -8.96198845, -7.05567837]], + [[-9.19093227, -8.01637268, -11.64603615, -10.51128292], + [-12.34527206, -11.54284477, -11.8667469, -9.69417381]], + [[-12.20609856, -10.47217369, -13.66270638, -13.46115875], + [-17.17656708, -16.25190353, -15.28658581, -12.33067703]], + [[-16.14766312, -15.57472229, -17.47755432, -17.98189926], + [-17.17656708, -16.25190353, -15.28658581, -12.33067703]], + [[-20.07182884, -18.43191147, -20.1606636, -21.45263863], + [-17.17656708, -16.25190353, -15.28658581, -12.33067703]], + [[-24.10270691, -22.20865822, -24.14675522, -25.27248383], + [-17.17656708, -16.25190353, -15.28658581, -12.33067703]]]) + self.assertAllClose(weights_gt, weights) + + def test_fivo(self): + """A golden-value test for the FIVO bound.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + model, inputs, targets, lengths = create_vrnn(random_seed=1234) + outs = bounds.fivo(model, (inputs, targets), lengths, num_samples=4, + random_seed=1234, parallel_iterations=1) + sess.run(tf.global_variables_initializer()) + log_p_hat, weights, resampled, _ = sess.run(outs) + self.assertAllClose([-22.98902512, -14.21689224], log_p_hat) + weights_gt = np.array( + [[[-3.66708851, -2.07074022, -4.91751671, -5.03293562], + [-2.99690723, -3.17782736, -4.50084877, -3.48536515]], + [[-2.67100811, -2.30541706, -2.34178066, -2.81751347], + [-8.27518654, -6.71545124, -8.96198845, -7.05567837]], + [[-5.65190411, -5.94563246, -6.55041981, -5.4783473], + [-12.34527206, -11.54284477, -11.8667469, -9.69417381]], + [[-8.71947861, -8.40143299, -8.54593086, -8.42822266], + [-4.28782988, -4.50591278, -3.40847206, -2.63650274]], + [[-12.7003831, -13.5039815, -12.3569726, -12.9489622], + [-4.28782988, -4.50591278, -3.40847206, -2.63650274]], + [[-16.4520301, -16.3611698, -15.0314846, -16.4197006], + [-4.28782988, -4.50591278, -3.40847206, -2.63650274]], + [[-20.7010765, -20.1379165, -19.0020351, -20.2395458], + [-4.28782988, -4.50591278, -3.40847206, -2.63650274]]]) + self.assertAllClose(weights_gt, weights) + resampled_gt = np.array( + [[1., 0.], + [0., 0.], + [0., 1.], + [0., 0.], + [0., 0.], + [0., 0.], + [0., 0.]]) + self.assertAllClose(resampled_gt, resampled) + + def test_fivo_relaxed(self): + """A golden-value test for the FIVO bound with relaxed sampling.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + model, inputs, targets, lengths = create_vrnn(random_seed=1234) + outs = bounds.fivo(model, (inputs, targets), lengths, num_samples=4, + random_seed=1234, parallel_iterations=1, + resampling_type="relaxed") + sess.run(tf.global_variables_initializer()) + log_p_hat, weights, resampled, _ = sess.run(outs) + self.assertAllClose([-22.942394, -14.273882], log_p_hat) + weights_gt = np.array( + [[[-3.66708851, -2.07074118, -4.91751575, -5.03293514], + [-2.99690628, -3.17782831, -4.50084877, -3.48536515]], + [[-2.84939098, -2.30087185, -2.35649204, -2.48417377], + [-8.27518654, -6.71545172, -8.96199131, -7.05567837]], + [[-5.92327023, -5.9433074, -6.5826683, -5.04259014], + [-12.34527206, -11.54284668, -11.86675072, -9.69417477]], + [[-8.95323944, -8.40061855, -8.52760506, -7.99130583], + [-4.58102798, -4.56017351, -3.46283388, -2.65550804]], + [[-12.87836456, -13.49628639, -12.31680107, -12.74228859], + [-4.58102798, -4.56017351, -3.46283388, -2.65550804]], + [[-16.78347397, -16.35150909, -14.98797417, -16.35162735], + [-4.58102798, -4.56017351, -3.46283388, -2.65550804]], + [[-20.81165886, -20.1307621, -18.92229652, -20.17458153], + [-4.58102798, -4.56017351, -3.46283388, -2.65550804]]]) + self.assertAllClose(weights_gt, weights) + resampled_gt = np.array( + [[1., 0.], + [0., 0.], + [0., 1.], + [0., 0.], + [0., 0.], + [0., 0.], + [0., 0.]]) + self.assertAllClose(resampled_gt, resampled) + + def test_fivo_aux_relaxed(self): + """A golden-value test for the FIVO-AUX bound with relaxed sampling.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + model, inputs, targets, lengths = create_vrnn(random_seed=1234, + use_tilt=True) + outs = bounds.fivo(model, (inputs, targets), lengths, num_samples=4, + random_seed=1234, parallel_iterations=1, + resampling_type="relaxed") + sess.run(tf.global_variables_initializer()) + log_p_hat, weights, resampled, _ = sess.run(outs) + self.assertAllClose([-23.1395, -14.271059], log_p_hat) + weights_gt = np.array( + [[[-5.19826221, -3.55476403, -5.98663855, -6.08058834], + [-6.31685925, -5.70243931, -7.07638931, -6.18138981]], + [[-3.97986865, -3.58831525, -3.85753584, -3.5010016], + [-11.38203049, -8.66213989, -11.23646641, -10.02024746]], + [[-6.62269831, -6.36680222, -6.78096485, -5.80072498], + [-3.55419445, -8.11326408, -3.48766923, -3.08593249]], + [[-10.56472301, -10.16084099, -9.96741676, -8.5270071], + [-6.04880285, -7.80853653, -4.72652149, -3.49711013]], + [[-13.36585426, -16.08720398, -13.33416367, -13.1017189], + [-0., -0., -0., -0.]], + [[-17.54233551, -17.35167503, -16.79163361, -16.51471138], + [0., -0., -0., -0.]], + [[-19.74024963, -18.69452858, -17.76246452, -18.76182365], + [0., -0., -0., -0.]]]) + self.assertAllClose(weights_gt, weights) + resampled_gt = np.array([[1., 0.], + [0., 1.], + [0., 0.], + [0., 1.], + [0., 0.], + [0., 0.], + [0., 0.]]) + self.assertAllClose(resampled_gt, resampled) + + +if __name__ == "__main__": + np.set_printoptions(threshold=np.nan) # Used to easily see the gold values. + # Use print(repr(numpy_array)) to print the values. + tf.test.main() diff --git a/models/research/fivo/fivo/data/__init__.py b/models/research/fivo/fivo/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/fivo/fivo/data/calculate_pianoroll_mean.py b/models/research/fivo/fivo/data/calculate_pianoroll_mean.py new file mode 100644 index 0000000000000000000000000000000000000000..93f712bd328f61a83faffc55ad2cf6ca33b47fb7 --- /dev/null +++ b/models/research/fivo/fivo/data/calculate_pianoroll_mean.py @@ -0,0 +1,65 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Script to calculate the mean of a pianoroll dataset. + +Given a pianoroll pickle file, this script loads the dataset and +calculates the mean of the training set. Then it updates the pickle file +so that the key "train_mean" points to the mean vector. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pickle +import numpy as np + +import tensorflow as tf + + +from datasets import sparse_pianoroll_to_dense + +tf.app.flags.DEFINE_string('in_file', None, + 'Filename of the pickled pianoroll dataset to load.') +tf.app.flags.DEFINE_string('out_file', None, + 'Name of the output pickle file. Defaults to in_file, ' + 'updating the input pickle file.') +tf.app.flags.mark_flag_as_required('in_file') + +FLAGS = tf.app.flags.FLAGS + +MIN_NOTE = 21 +MAX_NOTE = 108 +NUM_NOTES = MAX_NOTE - MIN_NOTE + 1 + + +def main(unused_argv): + if FLAGS.out_file is None: + FLAGS.out_file = FLAGS.in_file + with tf.gfile.Open(FLAGS.in_file, 'r') as f: + pianorolls = pickle.load(f) + dense_pianorolls = [sparse_pianoroll_to_dense(p, MIN_NOTE, NUM_NOTES)[0] + for p in pianorolls['train']] + # Concatenate all elements along the time axis. + concatenated = np.concatenate(dense_pianorolls, axis=0) + mean = np.mean(concatenated, axis=0) + pianorolls['train_mean'] = mean + # Write out the whole pickle file, including the train mean. + pickle.dump(pianorolls, open(FLAGS.out_file, 'wb')) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/fivo/fivo/data/create_timit_dataset.py b/models/research/fivo/fivo/data/create_timit_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1cd3b10cb0812c2d6aad51491924ecfe8eec37 --- /dev/null +++ b/models/research/fivo/fivo/data/create_timit_dataset.py @@ -0,0 +1,180 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preprocesses TIMIT from raw wavfiles to create a set of TFRecords. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import glob +import os +import random +import re + +import numpy as np +import tensorflow as tf + +tf.app.flags.DEFINE_string("raw_timit_dir", None, + "Directory containing TIMIT files.") +tf.app.flags.DEFINE_string("out_dir", None, + "Output directory for TFRecord files.") +tf.app.flags.DEFINE_float("valid_frac", 0.05, + "Fraction of train set to use as valid set. " + "Must be between 0.0 and 1.0.") + +tf.app.flags.mark_flag_as_required("raw_timit_dir") +tf.app.flags.mark_flag_as_required("out_dir") + +FLAGS = tf.app.flags.FLAGS + +NUM_TRAIN_FILES = 4620 +NUM_TEST_FILES = 1680 +SAMPLES_PER_TIMESTEP = 200 + +# Regexes for reading SPHERE header files. +SAMPLE_COUNT_REGEX = re.compile(r"sample_count -i (\d+)") +SAMPLE_MIN_REGEX = re.compile(r"sample_min -i (-?\d+)") +SAMPLE_MAX_REGEX = re.compile(r"sample_max -i (-?\d+)") + + +def get_filenames(split): + """Get all wav filenames from the TIMIT archive.""" + path = os.path.join(FLAGS.raw_timit_dir, "TIMIT", split, "*", "*", "*.WAV") + # Sort the output by name so the order is deterministic. + files = sorted(glob.glob(path)) + return files + + +def load_timit_wav(filename): + """Loads a TIMIT wavfile into a numpy array. + + TIMIT wavfiles include a SPHERE header, detailed in the TIMIT docs. The first + line is the header type and the second is the length of the header in bytes. + After the header, the remaining bytes are actual WAV data. + + The header includes information about the WAV data such as the number of + samples and minimum and maximum amplitude. This function asserts that the + loaded wav data matches the header. + + Args: + filename: The name of the TIMIT wavfile to load. + Returns: + wav: A numpy array containing the loaded wav data. + """ + wav_file = open(filename, "rb") + header_type = wav_file.readline() + header_length_str = wav_file.readline() + # The header length includes the length of the first two lines. + header_remaining_bytes = (int(header_length_str) - len(header_type) - + len(header_length_str)) + header = wav_file.read(header_remaining_bytes) + # Read the relevant header fields. + sample_count = int(SAMPLE_COUNT_REGEX.search(header).group(1)) + sample_min = int(SAMPLE_MIN_REGEX.search(header).group(1)) + sample_max = int(SAMPLE_MAX_REGEX.search(header).group(1)) + wav = np.fromstring(wav_file.read(), dtype="int16").astype("float32") + # Check that the loaded data conforms to the header description. + assert len(wav) == sample_count + assert wav.min() == sample_min + assert wav.max() == sample_max + return wav + + +def preprocess(wavs, block_size, mean, std): + """Normalize the wav data and reshape it into chunks.""" + processed_wavs = [] + for wav in wavs: + wav = (wav - mean) / std + wav_length = wav.shape[0] + if wav_length % block_size != 0: + pad_width = block_size - (wav_length % block_size) + wav = np.pad(wav, (0, pad_width), "constant") + assert wav.shape[0] % block_size == 0 + wav = wav.reshape((-1, block_size)) + processed_wavs.append(wav) + return processed_wavs + + +def create_tfrecord_from_wavs(wavs, output_file): + """Writes processed wav files to disk as sharded TFRecord files.""" + with tf.python_io.TFRecordWriter(output_file) as builder: + for wav in wavs: + builder.write(wav.astype(np.float32).tobytes()) + + +def main(unused_argv): + train_filenames = get_filenames("TRAIN") + test_filenames = get_filenames("TEST") + + num_train_files = len(train_filenames) + num_test_files = len(test_filenames) + num_valid_files = int(num_train_files * FLAGS.valid_frac) + num_train_files -= num_valid_files + + print("%d train / %d valid / %d test" % ( + num_train_files, num_valid_files, num_test_files)) + + random.seed(1234) + random.shuffle(train_filenames) + + valid_filenames = train_filenames[:num_valid_files] + train_filenames = train_filenames[num_valid_files:] + + # Make sure there is no overlap in the train, test, and valid sets. + train_s = set(train_filenames) + test_s = set(test_filenames) + valid_s = set(valid_filenames) + # Disable explicit length testing to make the assertions more readable. + # pylint: disable=g-explicit-length-test + assert len(train_s & test_s) == 0 + assert len(train_s & valid_s) == 0 + assert len(valid_s & test_s) == 0 + # pylint: enable=g-explicit-length-test + + train_wavs = [load_timit_wav(f) for f in train_filenames] + valid_wavs = [load_timit_wav(f) for f in valid_filenames] + test_wavs = [load_timit_wav(f) for f in test_filenames] + assert len(train_wavs) + len(valid_wavs) == NUM_TRAIN_FILES + assert len(test_wavs) == NUM_TEST_FILES + + # Calculate the mean and standard deviation of the train set. + train_stacked = np.hstack(train_wavs) + train_mean = np.mean(train_stacked) + train_std = np.std(train_stacked) + print("train mean: %f train std: %f" % (train_mean, train_std)) + + # Process all data, normalizing with the train set statistics. + processed_train_wavs = preprocess(train_wavs, SAMPLES_PER_TIMESTEP, + train_mean, train_std) + processed_valid_wavs = preprocess(valid_wavs, SAMPLES_PER_TIMESTEP, + train_mean, train_std) + processed_test_wavs = preprocess(test_wavs, SAMPLES_PER_TIMESTEP, train_mean, + train_std) + + # Write the datasets to disk. + create_tfrecord_from_wavs( + processed_train_wavs, + os.path.join(FLAGS.out_dir, "train")) + create_tfrecord_from_wavs( + processed_valid_wavs, + os.path.join(FLAGS.out_dir, "valid")) + create_tfrecord_from_wavs( + processed_test_wavs, + os.path.join(FLAGS.out_dir, "test")) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/fivo/fivo/data/datasets.py b/models/research/fivo/fivo/data/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..6d5324623250e31d65b23c97e7e684de59da1ba6 --- /dev/null +++ b/models/research/fivo/fivo/data/datasets.py @@ -0,0 +1,453 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Code for creating sequence datasets. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pickle + +import numpy as np +from scipy.sparse import coo_matrix +import tensorflow as tf + +# The default number of threads used to process data in parallel. +DEFAULT_PARALLELISM = 12 + + +def sparse_pianoroll_to_dense(pianoroll, min_note, num_notes): + """Converts a sparse pianoroll to a dense numpy array. + + Given a sparse pianoroll, converts it to a dense numpy array of shape + [num_timesteps, num_notes] where entry i,j is 1.0 if note j is active on + timestep i and 0.0 otherwise. + + Args: + pianoroll: A sparse pianoroll object, a list of tuples where the i'th tuple + contains the indices of the notes active at timestep i. + min_note: The minimum note in the pianoroll, subtracted from all notes so + that the minimum note becomes 0. + num_notes: The number of possible different note indices, determines the + second dimension of the resulting dense array. + Returns: + dense_pianoroll: A [num_timesteps, num_notes] numpy array of floats. + num_timesteps: A python int, the number of timesteps in the pianoroll. + """ + num_timesteps = len(pianoroll) + inds = [] + for time, chord in enumerate(pianoroll): + # Re-index the notes to start from min_note. + inds.extend((time, note-min_note) for note in chord) + shape = [num_timesteps, num_notes] + values = [1.] * len(inds) + sparse_pianoroll = coo_matrix( + (values, ([x[0] for x in inds], [x[1] for x in inds])), + shape=shape) + return sparse_pianoroll.toarray(), num_timesteps + + +def create_pianoroll_dataset(path, + split, + batch_size, + num_parallel_calls=DEFAULT_PARALLELISM, + shuffle=False, + repeat=False, + min_note=21, + max_note=108): + """Creates a pianoroll dataset. + + Args: + path: The path of a pickle file containing the dataset to load. + split: The split to use, can be train, test, or valid. + batch_size: The batch size. If repeat is False then it is not guaranteed + that the true batch size will match for all batches since batch_size + may not necessarily evenly divide the number of elements. + num_parallel_calls: The number of threads to use for parallel processing of + the data. + shuffle: If true, shuffles the order of the dataset. + repeat: If true, repeats the dataset endlessly. + min_note: The minimum note number of the dataset. For all pianoroll datasets + the minimum note is number 21, and changing this affects the dimension of + the data. This is useful mostly for testing. + max_note: The maximum note number of the dataset. For all pianoroll datasets + the maximum note is number 108, and changing this affects the dimension of + the data. This is useful mostly for testing. + Returns: + inputs: A batch of input sequences represented as a dense Tensor of shape + [time, batch_size, data_dimension]. The sequences in inputs are the + sequences in targets shifted one timestep into the future, padded with + zeros. This tensor is mean-centered, with the mean taken from the pickle + file key 'train_mean'. + targets: A batch of target sequences represented as a dense Tensor of + shape [time, batch_size, data_dimension]. + lens: An int Tensor of shape [batch_size] representing the lengths of each + sequence in the batch. + mean: A float Tensor of shape [data_dimension] containing the mean loaded + from the pickle file. + """ + # Load the data from disk. + num_notes = max_note - min_note + 1 + with tf.gfile.Open(path, "r") as f: + raw_data = pickle.load(f) + pianorolls = raw_data[split] + mean = raw_data["train_mean"] + num_examples = len(pianorolls) + + def pianoroll_generator(): + for sparse_pianoroll in pianorolls: + yield sparse_pianoroll_to_dense(sparse_pianoroll, min_note, num_notes) + + dataset = tf.data.Dataset.from_generator( + pianoroll_generator, + output_types=(tf.float64, tf.int64), + output_shapes=([None, num_notes], [])) + + if repeat: dataset = dataset.repeat() + if shuffle: dataset = dataset.shuffle(num_examples) + + # Batch sequences togther, padding them to a common length in time. + dataset = dataset.padded_batch(batch_size, + padded_shapes=([None, num_notes], [])) + + def process_pianoroll_batch(data, lengths): + """Create mean-centered and time-major next-step prediction Tensors.""" + data = tf.to_float(tf.transpose(data, perm=[1, 0, 2])) + lengths = tf.to_int32(lengths) + targets = data + # Mean center the inputs. + inputs = data - tf.constant(mean, dtype=tf.float32, + shape=[1, 1, mean.shape[0]]) + # Shift the inputs one step forward in time. Also remove the last timestep + # so that targets and inputs are the same length. + inputs = tf.pad(inputs, [[1, 0], [0, 0], [0, 0]], mode="CONSTANT")[:-1] + # Mask out unused timesteps. + inputs *= tf.expand_dims(tf.transpose( + tf.sequence_mask(lengths, dtype=inputs.dtype)), 2) + return inputs, targets, lengths + + dataset = dataset.map(process_pianoroll_batch, + num_parallel_calls=num_parallel_calls) + dataset = dataset.prefetch(num_examples) + + itr = dataset.make_one_shot_iterator() + inputs, targets, lengths = itr.get_next() + return inputs, targets, lengths, tf.constant(mean, dtype=tf.float32) + + +def create_human_pose_dataset( + path, + split, + batch_size, + num_parallel_calls=DEFAULT_PARALLELISM, + shuffle=False, + repeat=False,): + """Creates a human pose dataset. + + Args: + path: The path of a pickle file containing the dataset to load. + split: The split to use, can be train, test, or valid. + batch_size: The batch size. If repeat is False then it is not guaranteed + that the true batch size will match for all batches since batch_size + may not necessarily evenly divide the number of elements. + num_parallel_calls: The number of threads to use for parallel processing of + the data. + shuffle: If true, shuffles the order of the dataset. + repeat: If true, repeats the dataset endlessly. + Returns: + inputs: A batch of input sequences represented as a dense Tensor of shape + [time, batch_size, data_dimension]. The sequences in inputs are the + sequences in targets shifted one timestep into the future, padded with + zeros. This tensor is mean-centered, with the mean taken from the pickle + file key 'train_mean'. + targets: A batch of target sequences represented as a dense Tensor of + shape [time, batch_size, data_dimension]. + lens: An int Tensor of shape [batch_size] representing the lengths of each + sequence in the batch. + mean: A float Tensor of shape [data_dimension] containing the mean loaded + from the pickle file. + """ + # Load the data from disk. + with tf.gfile.Open(path, "r") as f: + raw_data = pickle.load(f) + + mean = raw_data["train_mean"] + pose_sequences = raw_data[split] + num_examples = len(pose_sequences) + num_features = pose_sequences[0].shape[1] + + def pose_generator(): + """A generator that yields pose data sequences.""" + # Each timestep has 32 x values followed by 32 y values so is 64 + # dimensional. + for pose_sequence in pose_sequences: + yield pose_sequence, pose_sequence.shape[0] + + dataset = tf.data.Dataset.from_generator( + pose_generator, + output_types=(tf.float64, tf.int64), + output_shapes=([None, num_features], [])) + + if repeat: + dataset = dataset.repeat() + if shuffle: + dataset = dataset.shuffle(num_examples) + + # Batch sequences togther, padding them to a common length in time. + dataset = dataset.padded_batch( + batch_size, padded_shapes=([None, num_features], [])) + + # Post-process each batch, ensuring that it is mean-centered and time-major. + def process_pose_data(data, lengths): + """Creates Tensors for next step prediction and mean-centers the input.""" + data = tf.to_float(tf.transpose(data, perm=[1, 0, 2])) + lengths = tf.to_int32(lengths) + targets = data + # Mean center the inputs. + inputs = data - tf.constant( + mean, dtype=tf.float32, shape=[1, 1, mean.shape[0]]) + # Shift the inputs one step forward in time. Also remove the last timestep + # so that targets and inputs are the same length. + inputs = tf.pad(inputs, [[1, 0], [0, 0], [0, 0]], mode="CONSTANT")[:-1] + # Mask out unused timesteps. + inputs *= tf.expand_dims( + tf.transpose(tf.sequence_mask(lengths, dtype=inputs.dtype)), 2) + return inputs, targets, lengths + + dataset = dataset.map( + process_pose_data, + num_parallel_calls=num_parallel_calls) + dataset = dataset.prefetch(num_examples) + + itr = dataset.make_one_shot_iterator() + inputs, targets, lengths = itr.get_next() + return inputs, targets, lengths, tf.constant(mean, dtype=tf.float32) + + +def create_speech_dataset(path, + batch_size, + samples_per_timestep=200, + num_parallel_calls=DEFAULT_PARALLELISM, + prefetch_buffer_size=2048, + shuffle=False, + repeat=False): + """Creates a speech dataset. + + Args: + path: The path of a possibly sharded TFRecord file containing the data. + batch_size: The batch size. If repeat is False then it is not guaranteed + that the true batch size will match for all batches since batch_size + may not necessarily evenly divide the number of elements. + samples_per_timestep: The number of audio samples per timestep. Used to + reshape the data into sequences of shape [time, samples_per_timestep]. + Should not change except for testing -- in all speech datasets 200 is the + number of samples per timestep. + num_parallel_calls: The number of threads to use for parallel processing of + the data. + prefetch_buffer_size: The size of the prefetch queues to use after reading + and processing the raw data. + shuffle: If true, shuffles the order of the dataset. + repeat: If true, repeats the dataset endlessly. + Returns: + inputs: A batch of input sequences represented as a dense Tensor of shape + [time, batch_size, samples_per_timestep]. The sequences in inputs are the + sequences in targets shifted one timestep into the future, padded with + zeros. + targets: A batch of target sequences represented as a dense Tensor of + shape [time, batch_size, samples_per_timestep]. + lens: An int Tensor of shape [batch_size] representing the lengths of each + sequence in the batch. + """ + filenames = [path] + + def read_speech_example(value): + """Parses a single tf.Example from the TFRecord file.""" + decoded = tf.decode_raw(value, out_type=tf.float32) + example = tf.reshape(decoded, [-1, samples_per_timestep]) + length = tf.shape(example)[0] + return example, length + + # Create the dataset from the TFRecord files + dataset = tf.data.TFRecordDataset(filenames).map( + read_speech_example, num_parallel_calls=num_parallel_calls) + dataset = dataset.prefetch(prefetch_buffer_size) + + if repeat: dataset = dataset.repeat() + if shuffle: dataset = dataset.shuffle(prefetch_buffer_size) + + dataset = dataset.padded_batch( + batch_size, padded_shapes=([None, samples_per_timestep], [])) + + def process_speech_batch(data, lengths): + """Creates Tensors for next step prediction.""" + data = tf.transpose(data, perm=[1, 0, 2]) + lengths = tf.to_int32(lengths) + targets = data + # Shift the inputs one step forward in time. Also remove the last timestep + # so that targets and inputs are the same length. + inputs = tf.pad(data, [[1, 0], [0, 0], [0, 0]], mode="CONSTANT")[:-1] + # Mask out unused timesteps. + inputs *= tf.expand_dims( + tf.transpose(tf.sequence_mask(lengths, dtype=inputs.dtype)), 2) + return inputs, targets, lengths + + dataset = dataset.map(process_speech_batch, + num_parallel_calls=num_parallel_calls) + dataset = dataset.prefetch(prefetch_buffer_size) + + itr = dataset.make_one_shot_iterator() + inputs, targets, lengths = itr.get_next() + return inputs, targets, lengths + + +SQUARED_OBSERVATION = "squared" +ABS_OBSERVATION = "abs" +STANDARD_OBSERVATION = "standard" +OBSERVATION_TYPES = [SQUARED_OBSERVATION, ABS_OBSERVATION, STANDARD_OBSERVATION] + +ROUND_TRANSITION = "round" +STANDARD_TRANSITION = "standard" +TRANSITION_TYPES = [ROUND_TRANSITION, STANDARD_TRANSITION] + + +def create_chain_graph_dataset( + batch_size, + num_timesteps, + steps_per_observation=None, + state_size=1, + transition_variance=1., + observation_variance=1., + transition_type=STANDARD_TRANSITION, + observation_type=STANDARD_OBSERVATION, + fixed_observation=None, + prefetch_buffer_size=2048, + dtype="float32"): + """Creates a toy chain graph dataset. + + Creates a dataset where the data are sampled from a diffusion process. The + 'latent' states of the process are sampled as a chain of Normals: + + z0 ~ N(0, transition_variance) + z1 ~ N(transition_fn(z0), transition_variance) + ... + + where transition_fn could be round z0 or pass it through unchanged. + + The observations are produced every steps_per_observation timesteps as a + function of the latent zs. For example if steps_per_observation is 3 then the + first observation will be produced as a function of z3: + + x1 ~ N(observation_fn(z3), observation_variance) + + where observation_fn could square z3, take the absolute value, or pass + it through unchanged. + + Only the observations are returned. + + Args: + batch_size: The batch size. The number of trajectories to run in parallel. + num_timesteps: The length of the chain of latent states (i.e. the + number of z's excluding z0. + steps_per_observation: The number of latent states between each observation, + must evenly divide num_timesteps. + state_size: The size of the latent state and observation, must be a + python int. + transition_variance: The variance of the transition density. + observation_variance: The variance of the observation density. + transition_type: Must be one of "round" or "standard". "round" means that + the transition density is centered at the rounded previous latent state. + "standard" centers the transition density at the previous latent state, + unchanged. + observation_type: Must be one of "squared", "abs" or "standard". "squared" + centers the observation density at the squared latent state. "abs" + centers the observaiton density at the absolute value of the current + latent state. "standard" centers the observation density at the current + latent state. + fixed_observation: If not None, fixes all observations to be a constant. + Must be a scalar. + prefetch_buffer_size: The size of the prefetch queues to use after reading + and processing the raw data. + dtype: A string convertible to a tensorflow datatype. The datatype used + to represent the states and observations. + Returns: + observations: A batch of observations represented as a dense Tensor of + shape [num_observations, batch_size, state_size]. num_observations is + num_timesteps/steps_per_observation. + lens: An int Tensor of shape [batch_size] representing the lengths of each + sequence in the batch. Will contain num_observations as each entry. + Raises: + ValueError: Raised if steps_per_observation does not evenly divide + num_timesteps. + """ + if steps_per_observation is None: + steps_per_observation = num_timesteps + if num_timesteps % steps_per_observation != 0: + raise ValueError("steps_per_observation must evenly divide num_timesteps.") + num_observations = int(num_timesteps / steps_per_observation) + def data_generator(): + """An infinite generator of latents and observations from the model.""" + transition_std = np.sqrt(transition_variance) + observation_std = np.sqrt(observation_variance) + while True: + states = [] + observations = [] + # Sample z0 ~ Normal(0, sqrt(variance)). + states.append( + np.random.normal(size=[state_size], + scale=observation_std).astype(dtype)) + # Start the range at 1 because we've already generated z0. + # The range ends at num_timesteps+1 because we want to include the + # num_timesteps-th step. + for t in xrange(1, num_timesteps+1): + if transition_type == ROUND_TRANSITION: + loc = np.round(states[-1]) + elif transition_type == STANDARD_TRANSITION: + loc = states[-1] + z_t = np.random.normal(size=[state_size], loc=loc, scale=transition_std) + states.append(z_t.astype(dtype)) + if t % steps_per_observation == 0: + if fixed_observation is None: + if observation_type == SQUARED_OBSERVATION: + loc = np.square(states[-1]) + elif observation_type == ABS_OBSERVATION: + loc = np.abs(states[-1]) + elif observation_type == STANDARD_OBSERVATION: + loc = states[-1] + x_t = np.random.normal(size=[state_size], + loc=loc, + scale=observation_std).astype(dtype) + else: + x_t = np.ones([state_size]) * fixed_observation + + observations.append(x_t) + yield states, observations + + dataset = tf.data.Dataset.from_generator( + data_generator, + output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)), + output_shapes=([num_timesteps+1, state_size], + [num_observations, state_size]) + ) + dataset = dataset.repeat().batch(batch_size) + dataset = dataset.prefetch(prefetch_buffer_size) + itr = dataset.make_one_shot_iterator() + _, observations = itr.get_next() + # Transpose observations from [batch, time, state_size] to + # [time, batch, state_size]. + observations = tf.transpose(observations, perm=[1, 0, 2]) + lengths = tf.ones([batch_size], dtype=tf.int32) * num_observations + return observations, lengths diff --git a/models/research/fivo/fivo/data/datasets_test.py b/models/research/fivo/fivo/data/datasets_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e6bbfda67aa44efc0bc4b1a34eb0cb9f09d53de5 --- /dev/null +++ b/models/research/fivo/fivo/data/datasets_test.py @@ -0,0 +1,303 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.data.datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pickle +import os + +import numpy as np +import tensorflow as tf + +from fivo.data import datasets + +FLAGS = tf.app.flags.FLAGS + + +class DatasetsTest(tf.test.TestCase): + + def test_sparse_pianoroll_to_dense_empty_at_end(self): + sparse_pianoroll = [(0, 1), (1, 0), (), (1,), (), ()] + dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( + sparse_pianoroll, min_note=0, num_notes=2) + self.assertEqual(num_timesteps, 6) + self.assertAllEqual([[1, 1], + [1, 1], + [0, 0], + [0, 1], + [0, 0], + [0, 0]], dense_pianoroll) + + def test_sparse_pianoroll_to_dense_with_chord(self): + sparse_pianoroll = [(0, 1), (1, 0), (), (1,)] + dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( + sparse_pianoroll, min_note=0, num_notes=2) + self.assertEqual(num_timesteps, 4) + self.assertAllEqual([[1, 1], + [1, 1], + [0, 0], + [0, 1]], dense_pianoroll) + + def test_sparse_pianoroll_to_dense_simple(self): + sparse_pianoroll = [(0,), (), (1,)] + dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( + sparse_pianoroll, min_note=0, num_notes=2) + self.assertEqual(num_timesteps, 3) + self.assertAllEqual([[1, 0], + [0, 0], + [0, 1]], dense_pianoroll) + + def test_sparse_pianoroll_to_dense_subtracts_min_note(self): + sparse_pianoroll = [(4, 5), (5, 4), (), (5,), (), ()] + dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( + sparse_pianoroll, min_note=4, num_notes=2) + self.assertEqual(num_timesteps, 6) + self.assertAllEqual([[1, 1], + [1, 1], + [0, 0], + [0, 1], + [0, 0], + [0, 0]], dense_pianoroll) + + def test_sparse_pianoroll_to_dense_uses_num_notes(self): + sparse_pianoroll = [(4, 5), (5, 4), (), (5,), (), ()] + dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( + sparse_pianoroll, min_note=4, num_notes=3) + self.assertEqual(num_timesteps, 6) + self.assertAllEqual([[1, 1, 0], + [1, 1, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0]], dense_pianoroll) + + def test_pianoroll_dataset(self): + pianoroll_data = [[(0,), (), (1,)], + [(0, 1), (1,)], + [(1,), (0,), (), (0, 1), (), ()]] + pianoroll_mean = np.zeros([3]) + pianoroll_mean[-1] = 1 + data = {"train": pianoroll_data, "train_mean": pianoroll_mean} + path = os.path.join(tf.test.get_temp_dir(), "test.pkl") + pickle.dump(data, open(path, "wb")) + with self.test_session() as sess: + inputs, targets, lens, mean = datasets.create_pianoroll_dataset( + path, "train", 2, num_parallel_calls=1, + shuffle=False, repeat=False, + min_note=0, max_note=2) + i1, t1, l1 = sess.run([inputs, targets, lens]) + i2, t2, l2 = sess.run([inputs, targets, lens]) + m = sess.run(mean) + # Check the lengths. + self.assertAllEqual([3, 2], l1) + self.assertAllEqual([6], l2) + # Check the mean. + self.assertAllEqual(pianoroll_mean, m) + # Check the targets. The targets should not be mean-centered and should + # be padded with zeros to a common length within a batch. + self.assertAllEqual([[1, 0, 0], + [0, 0, 0], + [0, 1, 0]], t1[:, 0, :]) + self.assertAllEqual([[1, 1, 0], + [0, 1, 0], + [0, 0, 0]], t1[:, 1, :]) + self.assertAllEqual([[0, 1, 0], + [1, 0, 0], + [0, 0, 0], + [1, 1, 0], + [0, 0, 0], + [0, 0, 0]], t2[:, 0, :]) + # Check the inputs. Each sequence should start with zeros on the first + # timestep. Each sequence should be padded with zeros to a common length + # within a batch. The mean should be subtracted from all timesteps except + # the first and the padding. + self.assertAllEqual([[0, 0, 0], + [1, 0, -1], + [0, 0, -1]], i1[:, 0, :]) + self.assertAllEqual([[0, 0, 0], + [1, 1, -1], + [0, 0, 0]], i1[:, 1, :]) + self.assertAllEqual([[0, 0, 0], + [0, 1, -1], + [1, 0, -1], + [0, 0, -1], + [1, 1, -1], + [0, 0, -1]], i2[:, 0, :]) + + def test_human_pose_dataset(self): + pose_data = [ + [[0, 0], [2, 2]], + [[2, 2]], + [[0, 0], [0, 0], [2, 2], [2, 2], [0, 0]], + ] + pose_data = [np.array(x, dtype=np.float64) for x in pose_data] + pose_data_mean = np.array([1, 1], dtype=np.float64) + data = { + "train": pose_data, + "train_mean": pose_data_mean, + } + path = os.path.join(tf.test.get_temp_dir(), "test_human_pose_dataset.pkl") + with open(path, "wb") as out: + pickle.dump(data, out) + with self.test_session() as sess: + inputs, targets, lens, mean = datasets.create_human_pose_dataset( + path, "train", 2, num_parallel_calls=1, shuffle=False, repeat=False) + i1, t1, l1 = sess.run([inputs, targets, lens]) + i2, t2, l2 = sess.run([inputs, targets, lens]) + m = sess.run(mean) + # Check the lengths. + self.assertAllEqual([2, 1], l1) + self.assertAllEqual([5], l2) + # Check the mean. + self.assertAllEqual(pose_data_mean, m) + # Check the targets. The targets should not be mean-centered and should + # be padded with zeros to a common length within a batch. + self.assertAllEqual([[0, 0], [2, 2]], t1[:, 0, :]) + self.assertAllEqual([[2, 2], [0, 0]], t1[:, 1, :]) + self.assertAllEqual([[0, 0], [0, 0], [2, 2], [2, 2], [0, 0]], t2[:, 0, :]) + # Check the inputs. Each sequence should start with zeros on the first + # timestep. Each sequence should be padded with zeros to a common length + # within a batch. The mean should be subtracted from all timesteps except + # the first and the padding. + self.assertAllEqual([[0, 0], [-1, -1]], i1[:, 0, :]) + self.assertAllEqual([[0, 0], [0, 0]], i1[:, 1, :]) + self.assertAllEqual([[0, 0], [-1, -1], [-1, -1], [1, 1], [1, 1]], + i2[:, 0, :]) + + def test_speech_dataset(self): + with self.test_session() as sess: + path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "test_data", + "tiny_speech_dataset.tfrecord") + inputs, targets, lens = datasets.create_speech_dataset( + path, 3, samples_per_timestep=2, num_parallel_calls=1, + prefetch_buffer_size=3, shuffle=False, repeat=False) + inputs1, targets1, lengths1 = sess.run([inputs, targets, lens]) + inputs2, targets2, lengths2 = sess.run([inputs, targets, lens]) + # Check the lengths. + self.assertAllEqual([1, 2, 3], lengths1) + self.assertAllEqual([4], lengths2) + # Check the targets. The targets should be padded with zeros to a common + # length within a batch. + self.assertAllEqual([[[0., 1.], [0., 1.], [0., 1.]], + [[0., 0.], [2., 3.], [2., 3.]], + [[0., 0.], [0., 0.], [4., 5.]]], + targets1) + self.assertAllEqual([[[0., 1.]], + [[2., 3.]], + [[4., 5.]], + [[6., 7.]]], + targets2) + # Check the inputs. Each sequence should start with zeros on the first + # timestep. Each sequence should be padded with zeros to a common length + # within a batch. + self.assertAllEqual([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 1.], [0., 1.]], + [[0., 0.], [0., 0.], [2., 3.]]], + inputs1) + self.assertAllEqual([[[0., 0.]], + [[0., 1.]], + [[2., 3.]], + [[4., 5.]]], + inputs2) + + def test_chain_graph_raises_error_on_wrong_steps_per_observation(self): + with self.assertRaises(ValueError): + datasets.create_chain_graph_dataset( + batch_size=4, + num_timesteps=10, + steps_per_observation=9) + + def test_chain_graph_single_obs(self): + with self.test_session() as sess: + np.random.seed(1234) + num_observations = 1 + num_timesteps = 5 + batch_size = 2 + state_size = 1 + observations, lengths = datasets.create_chain_graph_dataset( + batch_size=batch_size, + num_timesteps=num_timesteps, + state_size=state_size) + out_observations, out_lengths = sess.run([observations, lengths]) + self.assertAllEqual([num_observations, num_observations], out_lengths) + self.assertAllClose( + [[[1.426677], [-1.789461]]], + out_observations) + + def test_chain_graph_multiple_obs(self): + with self.test_session() as sess: + np.random.seed(1234) + num_observations = 3 + num_timesteps = 6 + batch_size = 2 + state_size = 1 + observations, lengths = datasets.create_chain_graph_dataset( + batch_size=batch_size, + num_timesteps=num_timesteps, + steps_per_observation=num_timesteps/num_observations, + state_size=state_size) + out_observations, out_lengths = sess.run([observations, lengths]) + self.assertAllEqual([num_observations, num_observations], out_lengths) + self.assertAllClose( + [[[0.40051451], [1.07405114]], + [[1.73932898], [3.16880035]], + [[-1.98377144], [2.82669163]]], + out_observations) + + def test_chain_graph_state_dims(self): + with self.test_session() as sess: + np.random.seed(1234) + num_observations = 1 + num_timesteps = 5 + batch_size = 2 + state_size = 3 + observations, lengths = datasets.create_chain_graph_dataset( + batch_size=batch_size, + num_timesteps=num_timesteps, + state_size=state_size) + out_observations, out_lengths = sess.run([observations, lengths]) + self.assertAllEqual([num_observations, num_observations], out_lengths) + self.assertAllClose( + [[[1.052287, -4.560759, 3.07988], + [2.008926, 0.495567, 3.488678]]], + out_observations) + + def test_chain_graph_fixed_obs(self): + with self.test_session() as sess: + np.random.seed(1234) + num_observations = 3 + num_timesteps = 6 + batch_size = 2 + state_size = 1 + observations, lengths = datasets.create_chain_graph_dataset( + batch_size=batch_size, + num_timesteps=num_timesteps, + steps_per_observation=num_timesteps/num_observations, + state_size=state_size, + fixed_observation=4.) + out_observations, out_lengths = sess.run([observations, lengths]) + self.assertAllEqual([num_observations, num_observations], out_lengths) + self.assertAllClose( + np.ones([num_observations, batch_size, state_size]) * 4., + out_observations) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/fivo/fivo/ghmm_runners.py b/models/research/fivo/fivo/ghmm_runners.py new file mode 100644 index 0000000000000000000000000000000000000000..1f1ba6d4f9ea9ed9dee7d95449ba73285c77f24d --- /dev/null +++ b/models/research/fivo/fivo/ghmm_runners.py @@ -0,0 +1,235 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Creates and runs Gaussian HMM-related graphs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import numpy as np +import tensorflow as tf + +from fivo import smc +from fivo import bounds +from fivo.data import datasets +from fivo.models import ghmm + + +def run_train(config): + """Runs training for a Gaussian HMM setup.""" + + def create_logging_hook(step, bound_value, likelihood, bound_gap): + """Creates a logging hook that prints the bound value periodically.""" + bound_label = config.bound + "/t" + def summary_formatter(log_dict): + string = ("Step {step}, %s: {value:.3f}, " + "likelihood: {ll:.3f}, gap: {gap:.3e}") % bound_label + return string.format(**log_dict) + logging_hook = tf.train.LoggingTensorHook( + {"step": step, "value": bound_value, + "ll": likelihood, "gap": bound_gap}, + every_n_iter=config.summarize_every, + formatter=summary_formatter) + return logging_hook + + def create_losses(model, observations, lengths): + """Creates the loss to be optimized. + + Args: + model: A Trainable GHMM model. + observations: A set of observations. + lengths: The lengths of each sequence in the observations. + Returns: + loss: A float Tensor that when differentiated yields the gradients + to apply to the model. Should be optimized via gradient descent. + bound: A float Tensor containing the value of the bound that is + being optimized. + true_ll: The true log-likelihood of the data under the model. + bound_gap: The gap between the bound and the true log-likelihood. + """ + # Compute lower bounds on the log likelihood. + if config.bound == "elbo": + ll_per_seq, _, _ = bounds.iwae( + model, observations, lengths, num_samples=1, + parallel_iterations=config.parallel_iterations + ) + elif config.bound == "iwae": + ll_per_seq, _, _ = bounds.iwae( + model, observations, lengths, num_samples=config.num_samples, + parallel_iterations=config.parallel_iterations + ) + elif config.bound == "fivo": + if config.resampling_type == "relaxed": + ll_per_seq, _, _, _ = bounds.fivo( + model, + observations, + lengths, + num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, + resampling_type=config.resampling_type, + relaxed_resampling_temperature=config. + relaxed_resampling_temperature, + random_seed=config.random_seed, + parallel_iterations=config.parallel_iterations) + else: + ll_per_seq, _, _, _ = bounds.fivo( + model, observations, lengths, + num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, + resampling_type=config.resampling_type, + random_seed=config.random_seed, + parallel_iterations=config.parallel_iterations + ) + ll_per_t = tf.reduce_mean(ll_per_seq / tf.to_float(lengths)) + # Compute the data's true likelihood under the model and the bound gap. + true_ll_per_seq = model.likelihood(tf.squeeze(observations)) + true_ll_per_t = tf.reduce_mean(true_ll_per_seq / tf.to_float(lengths)) + bound_gap = true_ll_per_seq - ll_per_seq + bound_gap = tf.reduce_mean(bound_gap/ tf.to_float(lengths)) + tf.summary.scalar("train_ll_bound", ll_per_t) + tf.summary.scalar("train_true_ll", true_ll_per_t) + tf.summary.scalar("bound_gap", bound_gap) + return -ll_per_t, ll_per_t, true_ll_per_t, bound_gap + + def create_graph(): + """Creates the training graph.""" + global_step = tf.train.get_or_create_global_step() + xs, lengths = datasets.create_chain_graph_dataset( + config.batch_size, + config.num_timesteps, + steps_per_observation=1, + state_size=1, + transition_variance=config.variance, + observation_variance=config.variance) + model = ghmm.TrainableGaussianHMM( + config.num_timesteps, + config.proposal_type, + transition_variances=config.variance, + emission_variances=config.variance, + random_seed=config.random_seed) + loss, bound, true_ll, gap = create_losses(model, xs, lengths) + opt = tf.train.AdamOptimizer(config.learning_rate) + grads = opt.compute_gradients(loss, var_list=tf.trainable_variables()) + train_op = opt.apply_gradients(grads, global_step=global_step) + return bound, true_ll, gap, train_op, global_step + + with tf.Graph().as_default(): + if config.random_seed: + tf.set_random_seed(config.random_seed) + np.random.seed(config.random_seed) + bound, true_ll, gap, train_op, global_step = create_graph() + log_hook = create_logging_hook(global_step, bound, true_ll, gap) + with tf.train.MonitoredTrainingSession( + master="", + hooks=[log_hook], + checkpoint_dir=config.logdir, + save_checkpoint_secs=120, + save_summaries_steps=config.summarize_every, + log_step_count_steps=config.summarize_every*20) as sess: + cur_step = -1 + while cur_step <= config.max_steps and not sess.should_stop(): + cur_step = sess.run(global_step) + _, cur_step = sess.run([train_op, global_step]) + + +def run_eval(config): + """Evaluates a Gaussian HMM using the given config.""" + + def create_bound(model, xs, lengths): + """Creates the bound to be evaluated.""" + if config.bound == "elbo": + ll_per_seq, log_weights, _ = bounds.iwae( + model, xs, lengths, num_samples=1, + parallel_iterations=config.parallel_iterations + ) + elif config.bound == "iwae": + ll_per_seq, log_weights, _ = bounds.iwae( + model, xs, lengths, num_samples=config.num_samples, + parallel_iterations=config.parallel_iterations + ) + elif config.bound == "fivo": + ll_per_seq, log_weights, resampled, _ = bounds.fivo( + model, xs, lengths, + num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, + resampling_type=config.resampling_type, + random_seed=config.random_seed, + parallel_iterations=config.parallel_iterations + ) + # Compute bound scaled by number of timesteps. + bound_per_t = ll_per_seq / tf.to_float(lengths) + if config.bound == "fivo": + return bound_per_t, log_weights, resampled + else: + return bound_per_t, log_weights + + def create_graph(): + """Creates the dataset, model, and bound.""" + xs, lengths = datasets.create_chain_graph_dataset( + config.batch_size, + config.num_timesteps, + steps_per_observation=1, + state_size=1, + transition_variance=config.variance, + observation_variance=config.variance) + model = ghmm.TrainableGaussianHMM( + config.num_timesteps, + config.proposal_type, + transition_variances=config.variance, + emission_variances=config.variance, + random_seed=config.random_seed) + true_likelihood = tf.reduce_mean( + model.likelihood(tf.squeeze(xs)) / tf.to_float(lengths)) + outs = [true_likelihood] + outs.extend(list(create_bound(model, xs, lengths))) + return outs + + with tf.Graph().as_default(): + if config.random_seed: + tf.set_random_seed(config.random_seed) + np.random.seed(config.random_seed) + graph_outs = create_graph() + with tf.train.SingularMonitoredSession( + checkpoint_dir=config.logdir) as sess: + outs = sess.run(graph_outs) + likelihood = outs[0] + avg_bound = np.mean(outs[1]) + std = np.std(outs[1]) + log_weights = outs[2] + log_weight_variances = np.var(log_weights, axis=2) + avg_log_weight_variance = np.var(log_weight_variances, axis=1) + avg_log_weight = np.mean(log_weights, axis=(1, 2)) + data = {"mean": avg_bound, "std": std, "log_weights": log_weights, + "log_weight_means": avg_log_weight, + "log_weight_variances": avg_log_weight_variance} + if len(outs) == 4: + data["resampled"] = outs[3] + data["avg_resampled"] = np.mean(outs[3], axis=1) + # Log some useful statistics. + tf.logging.info("Evaled bound %s with batch_size: %d, num_samples: %d." + % (config.bound, config.batch_size, config.num_samples)) + tf.logging.info("mean: %f, std: %f" % (avg_bound, std)) + tf.logging.info("true likelihood: %s" % likelihood) + tf.logging.info("avg log weight: %s" % avg_log_weight) + tf.logging.info("log weight variance: %s" % avg_log_weight_variance) + if len(outs) == 4: + tf.logging.info("avg resamples per t: %s" % data["avg_resampled"]) + if not tf.gfile.Exists(config.logdir): + tf.gfile.MakeDirs(config.logdir) + with tf.gfile.Open(os.path.join(config.logdir, "out.npz"), "w") as fout: + np.save(fout, data) diff --git a/models/research/fivo/fivo/ghmm_runners_test.py b/models/research/fivo/fivo/ghmm_runners_test.py new file mode 100644 index 0000000000000000000000000000000000000000..50044ad475b3458858b580a6ff7664267485757b --- /dev/null +++ b/models/research/fivo/fivo/ghmm_runners_test.py @@ -0,0 +1,106 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.ghmm_runners.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import tensorflow as tf + +from fivo import ghmm_runners + + +class GHMMRunnersTest(tf.test.TestCase): + + def default_config(self): + class Config(object): + pass + config = Config() + config.model = "ghmm" + config.bound = "fivo" + config.proposal_type = "prior" + config.batch_size = 4 + config.num_samples = 4 + config.num_timesteps = 10 + config.variance = 0.1 + config.resampling_type = "multinomial" + config.random_seed = 1234 + config.parallel_iterations = 1 + config.learning_rate = 1e-4 + config.summarize_every = 1 + config.max_steps = 1 + return config + + def test_eval_ghmm_notraining_fivo_prior(self): + self.eval_ghmm_notraining("fivo", "prior", -3.063864) + + def test_eval_ghmm_notraining_fivo_true_filtering(self): + self.eval_ghmm_notraining("fivo", "true-filtering", -1.1409812) + + def test_eval_ghmm_notraining_fivo_true_smoothing(self): + self.eval_ghmm_notraining("fivo", "true-smoothing", -0.85592091) + + def test_eval_ghmm_notraining_iwae_prior(self): + self.eval_ghmm_notraining("iwae", "prior", -5.9730167) + + def test_eval_ghmm_notraining_iwae_true_filtering(self): + self.eval_ghmm_notraining("iwae", "true-filtering", -1.1485999) + + def test_eval_ghmm_notraining_iwae_true_smoothing(self): + self.eval_ghmm_notraining("iwae", "true-smoothing", -0.85592091) + + def eval_ghmm_notraining(self, bound, proposal_type, expected_bound_avg): + config = self.default_config() + config.proposal_type = proposal_type + config.bound = bound + config.logdir = os.path.join( + tf.test.get_temp_dir(), "test-ghmm-%s-%s" % (proposal_type, bound)) + + ghmm_runners.run_eval(config) + + data = np.load(os.path.join(config.logdir, "out.npz")).item() + self.assertAlmostEqual(expected_bound_avg, data["mean"], places=3) + + def test_train_ghmm_for_one_step_and_eval_fivo_filtering(self): + self.train_ghmm_for_one_step_and_eval("fivo", "filtering", -16.727108) + + def test_train_ghmm_for_one_step_and_eval_fivo_smoothing(self): + self.train_ghmm_for_one_step_and_eval("fivo", "smoothing", -19.381277) + + def test_train_ghmm_for_one_step_and_eval_iwae_filtering(self): + self.train_ghmm_for_one_step_and_eval("iwae", "filtering", -33.31966) + + def test_train_ghmm_for_one_step_and_eval_iwae_smoothing(self): + self.train_ghmm_for_one_step_and_eval("iwae", "smoothing", -46.388447) + + def train_ghmm_for_one_step_and_eval(self, bound, proposal_type, expected_bound_avg): + config = self.default_config() + config.proposal_type = proposal_type + config.bound = bound + config.max_steps = 1 + config.logdir = os.path.join( + tf.test.get_temp_dir(), "test-ghmm-training-%s-%s" % (proposal_type, bound)) + ghmm_runners.run_train(config) + ghmm_runners.run_eval(config) + data = np.load(os.path.join(config.logdir, "out.npz")).item() + self.assertAlmostEqual(expected_bound_avg, data["mean"], places=2) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/fivo/fivo/models/__init__.py b/models/research/fivo/fivo/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/fivo/fivo/models/base.py b/models/research/fivo/fivo/models/base.py new file mode 100644 index 0000000000000000000000000000000000000000..5ffcb7af216f5659e71d7425eeb4e2c3158b3d47 --- /dev/null +++ b/models/research/fivo/fivo/models/base.py @@ -0,0 +1,342 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Reusable model classes for FIVO.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sonnet as snt +import tensorflow as tf + +from fivo import nested_utils as nested + +tfd = tf.contrib.distributions + + +class ELBOTrainableSequenceModel(object): + """An abstract class for ELBO-trainable sequence models to extend. + + Because the ELBO, IWAE, and FIVO bounds all accept the same arguments, + any model that is ELBO-trainable is also IWAE- and FIVO-trainable. + """ + + def zero_state(self, batch_size, dtype): + """Returns the initial state of the model as a Tensor or tuple of Tensors. + + Args: + batch_size: The batch size. + dtype: The datatype to use for the state. + """ + raise NotImplementedError("zero_state not yet implemented.") + + def set_observations(self, observations, seq_lengths): + """Sets the observations for the model. + + This method provides the model with all observed variables including both + inputs and targets. It will be called before running any computations with + the model that require the observations, e.g. training the model or + computing bounds, and should be used to run any necessary preprocessing + steps. + + Args: + observations: A potentially nested set of Tensors containing + all observations for the model, both inputs and targets. Typically + a set of Tensors with shape [max_seq_len, batch_size, data_size]. + seq_lengths: A [batch_size] Tensor of ints encoding the length of each + sequence in the batch (sequences can be padded to a common length). + """ + self.observations = observations + self.max_seq_len = tf.reduce_max(seq_lengths) + self.observations_ta = nested.tas_for_tensors( + observations, self.max_seq_len, clear_after_read=False) + self.seq_lengths = seq_lengths + + def propose_and_weight(self, state, t): + """Propogates model state one timestep and computes log weights. + + This method accepts the current state of the model and computes the state + for the next timestep as well as the incremental log weight of each + element in the batch. + + Args: + state: The current state of the model. + t: A scalar integer Tensor representing the current timestep. + Returns: + next_state: The state of the model after one timestep. + log_weights: A [batch_size] Tensor containing the incremental log weights. + """ + raise NotImplementedError("propose_and_weight not yet implemented.") + +DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), + "b": tf.zeros_initializer()} + + +class ConditionalNormalDistribution(object): + """A Normal distribution conditioned on Tensor inputs via a fc network.""" + + def __init__(self, size, hidden_layer_sizes, sigma_min=0.0, + raw_sigma_bias=0.25, hidden_activation_fn=tf.nn.relu, + initializers=None, name="conditional_normal_distribution"): + """Creates a conditional Normal distribution. + + Args: + size: The dimension of the random variable. + hidden_layer_sizes: The sizes of the hidden layers of the fully connected + network used to condition the distribution on the inputs. + sigma_min: The minimum standard deviation allowed, a scalar. + raw_sigma_bias: A scalar that is added to the raw standard deviation + output from the fully connected network. Set to 0.25 by default to + prevent standard deviations close to 0. + hidden_activation_fn: The activation function to use on the hidden layers + of the fully connected network. + initializers: The variable intitializers to use for the fully connected + network. The network is implemented using snt.nets.MLP so it must + be a dictionary mapping the keys 'w' and 'b' to the initializers for + the weights and biases. Defaults to xavier for the weights and zeros + for the biases when initializers is None. + name: The name of this distribution, used for sonnet scoping. + """ + self.sigma_min = sigma_min + self.raw_sigma_bias = raw_sigma_bias + self.name = name + self.size = size + if initializers is None: + initializers = DEFAULT_INITIALIZERS + self.fcnet = snt.nets.MLP( + output_sizes=hidden_layer_sizes + [2*size], + activation=hidden_activation_fn, + initializers=initializers, + activate_final=False, + use_bias=True, + name=name + "_fcnet") + + def condition(self, tensor_list, **unused_kwargs): + """Computes the parameters of a normal distribution based on the inputs.""" + inputs = tf.concat(tensor_list, axis=1) + outs = self.fcnet(inputs) + mu, sigma = tf.split(outs, 2, axis=1) + sigma = tf.maximum(tf.nn.softplus(sigma + self.raw_sigma_bias), + self.sigma_min) + return mu, sigma + + def __call__(self, *args, **kwargs): + """Creates a normal distribution conditioned on the inputs.""" + mu, sigma = self.condition(args, **kwargs) + return tf.contrib.distributions.Normal(loc=mu, scale=sigma) + + +class ConditionalBernoulliDistribution(object): + """A Bernoulli distribution conditioned on Tensor inputs via a fc net.""" + + def __init__(self, size, hidden_layer_sizes, hidden_activation_fn=tf.nn.relu, + initializers=None, bias_init=0.0, + name="conditional_bernoulli_distribution"): + """Creates a conditional Bernoulli distribution. + + Args: + size: The dimension of the random variable. + hidden_layer_sizes: The sizes of the hidden layers of the fully connected + network used to condition the distribution on the inputs. + hidden_activation_fn: The activation function to use on the hidden layers + of the fully connected network. + initializers: The variable intiializers to use for the fully connected + network. The network is implemented using snt.nets.MLP so it must + be a dictionary mapping the keys 'w' and 'b' to the initializers for + the weights and biases. Defaults to xavier for the weights and zeros + for the biases when initializers is None. + bias_init: A scalar or vector Tensor that is added to the output of the + fully-connected network that parameterizes the mean of this + distribution. + name: The name of this distribution, used for sonnet scoping. + """ + self.bias_init = bias_init + self.size = size + if initializers is None: + initializers = DEFAULT_INITIALIZERS + self.fcnet = snt.nets.MLP( + output_sizes=hidden_layer_sizes + [size], + activation=hidden_activation_fn, + initializers=initializers, + activate_final=False, + use_bias=True, + name=name + "_fcnet") + + def condition(self, tensor_list): + """Computes the p parameter of the Bernoulli distribution.""" + inputs = tf.concat(tensor_list, axis=1) + return self.fcnet(inputs) + self.bias_init + + def __call__(self, *args): + p = self.condition(args) + return tf.contrib.distributions.Bernoulli(logits=p) + + +class NormalApproximatePosterior(ConditionalNormalDistribution): + """A Normally-distributed approx. posterior with res_q parameterization.""" + + def __init__(self, size, hidden_layer_sizes, sigma_min=0.0, + raw_sigma_bias=0.25, hidden_activation_fn=tf.nn.relu, + initializers=None, smoothing=False, + name="conditional_normal_distribution"): + super(NormalApproximatePosterior, self).__init__( + size, hidden_layer_sizes, sigma_min=sigma_min, + raw_sigma_bias=raw_sigma_bias, + hidden_activation_fn=hidden_activation_fn, initializers=initializers, + name=name) + self.smoothing = smoothing + + def condition(self, tensor_list, prior_mu, smoothing_tensors=None): + """Generates the mean and variance of the normal distribution. + + Args: + tensor_list: The list of Tensors to condition on. Will be concatenated and + fed through a fully connected network. + prior_mu: The mean of the prior distribution associated with this + approximate posterior. Will be added to the mean produced by + this approximate posterior, in res_q fashion. + smoothing_tensors: A list of Tensors. If smoothing is True, these Tensors + will be concatenated with the tensors in tensor_list. + Returns: + mu: The mean of the approximate posterior. + sigma: The standard deviation of the approximate posterior. + """ + if self.smoothing: + tensor_list.extend(smoothing_tensors) + mu, sigma = super(NormalApproximatePosterior, self).condition(tensor_list) + return mu + prior_mu, sigma + + +class NonstationaryLinearDistribution(object): + """A set of loc-scale distributions that are linear functions of inputs. + + This class defines a series of location-scale distributions such that + the means are learnable linear functions of the inputs and the log variances + are learnable constants. The functions and log variances are different across + timesteps, allowing the distributions to be nonstationary. + """ + + def __init__(self, + num_timesteps, + inputs_per_timestep=None, + outputs_per_timestep=None, + initializers=None, + variance_min=0.0, + output_distribution=tfd.Normal, + dtype=tf.float32): + """Creates a NonstationaryLinearDistribution. + + Args: + num_timesteps: The number of timesteps, i.e. the number of distributions. + inputs_per_timestep: A list of python ints, the dimension of inputs to the + linear function at each timestep. If not provided, the dimension at each + timestep is assumed to be 1. + outputs_per_timestep: A list of python ints, the dimension of the output + distribution at each timestep. If not provided, the dimension at each + timestep is assumed to be 1. + initializers: A dictionary containing intializers for the variables. The + initializer under the key 'w' is used for the weights in the linear + function and the initializer under the key 'b' is used for the biases. + Defaults to xavier initialization for the weights and zeros for the + biases. + variance_min: Python float, the minimum variance of each distribution. + output_distribution: A locatin-scale subclass of tfd.Distribution that + defines the output distribution, e.g. Normal. + dtype: The dtype of the weights and biases. + """ + if not initializers: + initializers = DEFAULT_INITIALIZERS + if not inputs_per_timestep: + inputs_per_timestep = [1] * num_timesteps + if not outputs_per_timestep: + outputs_per_timestep = [1] * num_timesteps + self.num_timesteps = num_timesteps + self.variance_min = variance_min + self.initializers = initializers + self.dtype = dtype + self.output_distribution = output_distribution + + def _get_variables_ta(shapes, name, initializer, trainable=True): + """Creates a sequence of variables and stores them in a TensorArray.""" + # Infer shape if all shapes are equal. + first_shape = shapes[0] + infer_shape = all(shape == first_shape for shape in shapes) + ta = tf.TensorArray( + dtype=dtype, size=len(shapes), dynamic_size=False, + clear_after_read=False, infer_shape=infer_shape) + for t, shape in enumerate(shapes): + var = tf.get_variable( + name % t, shape=shape, initializer=initializer, trainable=trainable) + ta = ta.write(t, var) + return ta + + bias_shapes = [[num_outputs] for num_outputs in outputs_per_timestep] + self.log_variances = _get_variables_ta( + bias_shapes, "proposal_log_variance_%d", initializers["b"]) + self.mean_biases = _get_variables_ta( + bias_shapes, "proposal_b_%d", initializers["b"]) + weight_shapes = zip(inputs_per_timestep, outputs_per_timestep) + self.mean_weights = _get_variables_ta( + weight_shapes, "proposal_w_%d", initializers["w"]) + self.shapes = tf.TensorArray( + dtype=tf.int32, size=num_timesteps, + dynamic_size=False, clear_after_read=False).unstack(weight_shapes) + + def __call__(self, t, inputs): + """Computes the distribution at timestep t. + + Args: + t: Scalar integer Tensor, the current timestep. Must be in + [0, num_timesteps). + inputs: The inputs to the linear function parameterizing the mean of + the current distribution. A Tensor of shape [batch_size, num_inputs_t]. + Returns: + A tfd.Distribution subclass representing the distribution at timestep t. + """ + b = self.mean_biases.read(t) + w = self.mean_weights.read(t) + shape = self.shapes.read(t) + w = tf.reshape(w, shape) + b = tf.reshape(b, [shape[1], 1]) + log_variance = self.log_variances.read(t) + scale = tf.sqrt(tf.maximum(tf.exp(log_variance), self.variance_min)) + loc = tf.matmul(w, inputs, transpose_a=True) + b + return self.output_distribution(loc=loc, scale=scale) + + +def encode_all(inputs, encoder): + """Encodes a timeseries of inputs with a time independent encoder. + + Args: + inputs: A [time, batch, feature_dimensions] tensor. + encoder: A network that takes a [batch, features_dimensions] input and + encodes the input. + Returns: + A [time, batch, encoded_feature_dimensions] output tensor. + """ + input_shape = tf.shape(inputs) + num_timesteps, batch_size = input_shape[0], input_shape[1] + reshaped_inputs = tf.reshape(inputs, [-1, inputs.shape[-1]]) + inputs_encoded = encoder(reshaped_inputs) + inputs_encoded = tf.reshape(inputs_encoded, + [num_timesteps, batch_size, encoder.output_size]) + return inputs_encoded + + +def ta_for_tensor(x, **kwargs): + """Creates a TensorArray for the input tensor.""" + return tf.TensorArray( + x.dtype, tf.shape(x)[0], dynamic_size=False, **kwargs).unstack(x) diff --git a/models/research/fivo/fivo/models/ghmm.py b/models/research/fivo/fivo/models/ghmm.py new file mode 100644 index 0000000000000000000000000000000000000000..07cf6c50e803383ef5690e8d24010e4706286eb7 --- /dev/null +++ b/models/research/fivo/fivo/models/ghmm.py @@ -0,0 +1,483 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A Gaussian hidden markov model. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from fivo.models import base + +tfd = tf.contrib.distributions + + +class GaussianHMM(object): + """A hidden markov model with 1-D Gaussian latent space and observations. + + This is a hidden markov model where the state and observations are + one-dimensional Gaussians. The mean of each latent state is a linear + function of the previous latent state, and the mean of each observation + is a linear function of the current latent state. + + The description that follows is 0-indexed instead of 1-indexed to make + it easier to reason about the parameters passed to the model. + + The parameters of the model are: + T: The number timesteps, latent states, and observations. + vz_t, t=0 to T-1: The variance of the latent state at timestep t. + vx_t, t=0 to T-1: The variance of the observation at timestep t. + wz_t, t=1 to T-1: The weight that defines the latent transition at t. + wx_t, t=0 to T-1: The weight that defines the observation function at t. + + There are T vz_t, vx_t, and wx_t but only T-1 wz_t because there are only + T-1 transitions in the model. + + Given these parameters, sampling from the model is defined as + + z_0 ~ N(0, vz_0) + x_0 | z_0 ~ N(wx_0 * z_0, vx_0) + z_1 | z_0 ~ N(wz_1 * z_0, vz_1) + x_1 | z_1 ~ N(wx_1 * z_1, vx_1) + ... + z_{T-1} | z_{T-2} ~ N(wz_{T-1} * z_{T-2}, vz_{T-1}) + x_{T-1} | z_{T-1} ~ N(wx_{T-1} * z_{T-1}, vx_{T-1}). + """ + + def __init__(self, + num_timesteps, + transition_variances=1., + emission_variances=1., + transition_weights=1., + emission_weights=1., + dtype=tf.float32): + """Creates a gaussian hidden markov model. + + Args: + num_timesteps: A python int, the number of timesteps in the model. + transition_variances: The variance of p(z_t | z_t-1). Can be a scalar, + setting all variances to be the same, or a Tensor of shape + [num_timesteps]. + emission_variances: The variance of p(x_t | z_t). Can be a scalar, + setting all variances to be the same, or a Tensor of shape + [num_timesteps]. + transition_weights: The weight that defines the linear function that + produces the mean of z_t given z_{t-1}. Can be a scalar, setting + all weights to be the same, or a Tensor of shape [num_timesteps-1]. + emission_weights: The weight that defines the linear function that + produces the mean of x_t given z_t. Can be a scalar, setting + all weights to be the same, or a Tensor of shape [num_timesteps]. + dtype: The datatype of the state. + """ + self.num_timesteps = num_timesteps + self.dtype = dtype + + def _expand_param(param, size): + param = tf.convert_to_tensor(param, dtype=self.dtype) + if not param.get_shape().as_list(): + param = tf.tile(param[tf.newaxis], [size]) + + return param + + def _ta_for_param(param): + size = tf.shape(param)[0] + ta = tf.TensorArray(dtype=param.dtype, + size=size, + dynamic_size=False, + clear_after_read=False).unstack(param) + return ta + + self.transition_variances = _ta_for_param( + _expand_param(transition_variances, num_timesteps)) + self.transition_weights = _ta_for_param( + _expand_param(transition_weights, num_timesteps-1)) + em_var = _expand_param(emission_variances, num_timesteps) + self.emission_variances = _ta_for_param(em_var) + em_w = _expand_param(emission_weights, num_timesteps) + self.emission_weights = _ta_for_param(em_w) + self._compute_covariances(em_w, em_var) + + def _compute_covariances(self, emission_weights, emission_variances): + """Compute all covariance matrices. + + Computes the covaraince matrix for the latent variables, the observations, + and the covariance between the latents and observations. + + Args: + emission_weights: A Tensor of shape [num_timesteps] containing + the emission distribution weights at each timestep. + emission_variances: A Tensor of shape [num_timesteps] containing + the emiision distribution variances at each timestep. + """ + # Compute the marginal variance of each latent. + z_variances = [self.transition_variances.read(0)] + for i in range(1, self.num_timesteps): + z_variances.append( + z_variances[i-1] * tf.square(self.transition_weights.read(i-1)) + + self.transition_variances.read(i)) + # Compute the latent covariance matrix. + sigma_z = [] + for i in range(self.num_timesteps): + sigma_z_row = [] + for j in range(self.num_timesteps): + if i == j: + sigma_z_row.append(z_variances[i]) + continue + min_ind = min(i, j) + max_ind = max(i, j) + weight = tf.reduce_prod( + self.transition_weights.gather(tf.range(min_ind, max_ind))) + sigma_z_row.append(z_variances[min_ind] * weight) + sigma_z.append(tf.stack(sigma_z_row)) + self.sigma_z = tf.stack(sigma_z) + # Compute the observation covariance matrix. + x_weights_outer = tf.einsum("i,j->ij", emission_weights, emission_weights) + self.sigma_x = x_weights_outer * self.sigma_z + tf.diag(emission_variances) + # Compute the latent - observation covariance matrix. + # The first axis will index latents, the second axis will index observtions. + self.sigma_zx = emission_weights[tf.newaxis, :] * self.sigma_z + self.obs_dist = tfd.MultivariateNormalFullCovariance( + loc=tf.zeros([self.num_timesteps], dtype=tf.float32), + covariance_matrix=self.sigma_x) + + def transition(self, t, z_prev): + """Compute the transition distribution p(z_t | z_t-1). + + Args: + t: The current timestep, a scalar integer Tensor. When t=0 z_prev is + mostly ignored and the distribution p(z_0) is returned. z_prev is + 'mostly' ignored because it is still used to derive batch_size. + z_prev: A [batch_size] set of states. + Returns: + p(z_t | z_t-1) as a univariate normal distribution. + """ + batch_size = tf.shape(z_prev)[0] + scale = tf.sqrt(self.transition_variances.read(t)) + scale = tf.tile(scale[tf.newaxis], [batch_size]) + loc = tf.cond(tf.greater(t, 0), + lambda: self.transition_weights.read(t-1)*z_prev, + lambda: tf.zeros_like(scale)) + return tfd.Normal(loc=loc, scale=scale) + + def emission(self, t, z): + """Compute the emission distribution p(x_t | z_t). + + Args: + t: The current timestep, a scalar integer Tensor. + z: A [batch_size] set of the current states. + Returns: + p(x_t | z_t) as a univariate normal distribution. + """ + batch_size = tf.shape(z)[0] + scale = tf.sqrt(self.emission_variances.read(t)) + scale = tf.tile(scale[tf.newaxis], [batch_size]) + loc = self.emission_weights.read(t)*z + return tfd.Normal(loc=loc, scale=scale) + + def filtering(self, t, z_prev, x_cur): + """Computes the filtering distribution p(z_t | z_{t-1}, x_t). + + Args: + t: A python int, the index for z_t. When t is 0, z_prev is ignored, + giving p(z_0 | x_0). + z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape + [batch_size]. + x_cur: x_t, the current x to condition on. A Tensor of shape [batch_size]. + Returns: + p(z_t | z_{t-1}, x_t) as a univariate normal distribution. + """ + z_prev = tf.convert_to_tensor(z_prev) + x_cur = tf.convert_to_tensor(x_cur) + batch_size = tf.shape(z_prev)[0] + z_var = self.transition_variances.read(t) + x_var = self.emission_variances.read(t) + x_weight = self.emission_weights.read(t) + prev_state_weight = x_var/(tf.square(x_weight)*z_var + x_var) + prev_state_weight *= tf.cond(tf.greater(t, 0), + lambda: self.transition_weights.read(t-1), + lambda: tf.zeros_like(prev_state_weight)) + cur_obs_weight = (x_weight*z_var)/(tf.square(x_weight)*z_var + x_var) + loc = prev_state_weight*z_prev + cur_obs_weight*x_cur + scale = tf.sqrt((z_var*x_var)/(tf.square(x_weight)*z_var + x_var)) + scale = tf.tile(scale[tf.newaxis], [batch_size]) + return tfd.Normal(loc=loc, scale=scale) + + def smoothing(self, t, z_prev, xs): + """Computes the smoothing distribution p(z_t | z_{t-1}, x_{t:num_timesteps). + + Args: + t: A python int, the index for z_t. When t is 0, z_prev is ignored, + giving p(z_0 | x_{0:num_timesteps-1}). + z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape + [batch_size]. + xs: x_{t:num_timesteps}, the future xs to condition on. A Tensor of shape + [num_timesteps - t, batch_size]. + Returns: + p(z_t | z_{t-1}, x_{t:num_timesteps}) as a univariate normal distribution. + """ + xs = tf.convert_to_tensor(xs) + z_prev = tf.convert_to_tensor(z_prev) + batch_size = tf.shape(xs)[1] + mess_mean, mess_prec = tf.cond( + tf.less(t, self.num_timesteps-1), + lambda: tf.unstack(self._compute_backwards_messages(xs[1:]).read(0)), + lambda: [tf.zeros([batch_size]), tf.zeros([batch_size])]) + return self._smoothing_from_message(t, z_prev, xs[0], mess_mean, mess_prec) + + def _smoothing_from_message(self, t, z_prev, x_t, mess_mean, mess_prec): + """Computes the smoothing distribution given message incoming to z_t. + + Computes p(z_t | z_{t-1}, x_{t:num_timesteps}) given the message incoming + to the node for z_t. + + Args: + t: A python int, the index for z_t. When t is 0, z_prev is ignored. + z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape + [batch_size]. + x_t: The observation x at timestep t. + mess_mean: The mean of the message incoming to z_t, in information form. + mess_prec: The precision of the message incoming to z_t. + Returns: + p(z_t | z_{t-1}, x_{t:num_timesteps}) as a univariate normal distribution. + """ + + batch_size = tf.shape(x_t)[0] + z_var = self.transition_variances.read(t) + x_var = self.emission_variances.read(t) + w_x = self.emission_weights.read(t) + + def transition_term(): + return (tf.square(self.transition_weights.read(t))/ + self.transition_variances.read(t+1)) + + prec = 1./z_var + tf.square(w_x)/x_var + mess_prec + prec += tf.cond(tf.less(t, self.num_timesteps-1), + transition_term, lambda: 0.) + mean = x_t*(w_x/x_var) + mess_mean + mean += tf.cond(tf.greater(t, 0), + lambda: z_prev*(self.transition_weights.read(t-1)/z_var), + lambda: 0.) + mean = tf.reshape(mean / prec, [batch_size]) + scale = tf.reshape(tf.sqrt(1./prec), [batch_size]) + return tfd.Normal(loc=mean, scale=scale) + + def _compute_backwards_messages(self, xs): + """Computes the backwards messages used in smoothing.""" + batch_size = tf.shape(xs)[1] + num_xs = tf.shape(xs)[0] + until_t = self.num_timesteps - num_xs + xs = tf.TensorArray(dtype=xs.dtype, + size=num_xs, + dynamic_size=False, + clear_after_read=True).unstack(xs) + messages_ta = tf.TensorArray(dtype=xs.dtype, + size=num_xs, + dynamic_size=False, + clear_after_read=False) + + def compute_message(t, prev_mean, prev_prec, messages_ta): + """Computes one step of the backwards messages.""" + z_var = self.transition_variances.read(t) + w_z = self.transition_weights.read(t-1) + x_var = self.emission_variances.read(t) + w_x = self.emission_weights.read(t) + cur_x = xs.read(t - until_t) + + # If it isn't the first message, add the terms from the transition. + def transition_term(): + return (tf.square(self.transition_weights.read(t))/ + self.transition_variances.read(t+1)) + + unary_prec = 1/z_var + tf.square(w_x)/x_var + unary_prec += tf.cond(tf.less(t, self.num_timesteps-1), + transition_term, lambda: 0.) + + unary_mean = (w_x / x_var) * cur_x + pairwise_prec = w_z / z_var + + next_prec = -tf.square(pairwise_prec)/(unary_prec + prev_prec) + next_mean = (pairwise_prec * (unary_mean + prev_mean) / + (unary_prec + prev_prec)) + next_prec = tf.reshape(next_prec, [batch_size]) + next_mean = tf.reshape(next_mean, [batch_size]) + messages_ta = messages_ta.write(t - until_t, + tf.stack([next_mean, next_prec])) + return t-1, next_mean, next_prec, messages_ta + + def pred(t, *unused_args): + return tf.greater_equal(t, until_t) + + init_prec = tf.zeros([batch_size], dtype=xs.dtype) + init_mean = tf.zeros([batch_size], dtype=xs.dtype) + t0 = tf.constant(self.num_timesteps - 1, dtype=tf.int32) + + outs = tf.while_loop(pred, compute_message, + (t0, init_mean, init_prec, messages_ta)) + messages = outs[-1] + return messages + + def lookahead(self, t, z_prev): + """Compute the 'lookahead' distribution, p(x_{t:T} | z_{t-1}). + + Args: + t: A scalar Tensor int, the current timestep. Must be at least 1. + z_prev: The latent state at time t-1. A Tensor of shape [batch_size]. + Returns: + p(x_{t:T} | z_{t-1}) as a multivariate normal distribution. + """ + z_prev = tf.convert_to_tensor(z_prev) + sigma_zx = self.sigma_zx[t-1, t:] + z_var = self.sigma_z[t-1, t-1] + mean = tf.einsum("i,j->ij", z_prev, sigma_zx) / z_var + variance = (self.sigma_x[t:, t:] - + tf.einsum("i,j->ij", sigma_zx, sigma_zx) / z_var) + return tfd.MultivariateNormalFullCovariance( + loc=mean, covariance_matrix=variance) + + def likelihood(self, xs): + """Compute the true marginal likelihood of the data. + + Args: + xs: The observations, a [num_timesteps, batch_size] float Tensor. + Returns: + likelihoods: A [batch_size] float Tensor representing the likelihood of + each sequence of observations in the batch. + """ + return self.obs_dist.log_prob(tf.transpose(xs)) + + +class TrainableGaussianHMM(GaussianHMM, base.ELBOTrainableSequenceModel): + """An interface between importance-sampling training methods and the GHMM.""" + + def __init__(self, + num_timesteps, + proposal_type, + transition_variances=1., + emission_variances=1., + transition_weights=1., + emission_weights=1., + random_seed=None, + dtype=tf.float32): + """Constructs a trainable Gaussian HMM. + + Args: + num_timesteps: A python int, the number of timesteps in the model. + proposal_type: The type of proposal to use in the importance sampling + setup. Could be "filtering", "smoothing", "prior", "true-filtering", + or "true-smoothing". If "true-filtering" or "true-smoothing" are + selected, then the true filtering or smoothing distributions are used to + propose new states. If "learned-filtering" is selected then a + distribution with learnable parameters is used. Specifically at each + timestep the proposal is Gaussian with mean that is a learnable linear + function of the previous state and current observation. The log variance + is a per-timestep learnable constant. "learned-smoothing" is similar, + but the mean is a learnable linear function of the previous state and + all future observations. Note that this proposal class includes the true + posterior. If "prior" is selected then states are proposed from the + model's prior. + transition_variances: The variance of p(z_t | z_t-1). Can be a scalar, + setting all variances to be the same, or a Tensor of shape + [num_timesteps]. + emission_variances: The variance of p(x_t | z_t). Can be a scalar, + setting all variances to be the same, or a Tensor of shape + [num_timesteps]. + transition_weights: The weight that defines the linear function that + produces the mean of z_t given z_{t-1}. Can be a scalar, setting + all weights to be the same, or a Tensor of shape [num_timesteps-1]. + emission_weights: The weight that defines the linear function that + produces the mean of x_t given z_t. Can be a scalar, setting + all weights to be the same, or a Tensor of shape [num_timesteps]. + random_seed: A seed for the proposal sampling, mainly useful for testing. + dtype: The datatype of the state. + """ + super(TrainableGaussianHMM, self).__init__( + num_timesteps, transition_variances, emission_variances, + transition_weights, emission_weights, dtype=dtype) + self.random_seed = random_seed + assert proposal_type in ["filtering", "smoothing", "prior", + "true-filtering", "true-smoothing"] + if proposal_type == "true-filtering": + self.proposal = self._filtering_proposal + elif proposal_type == "true-smoothing": + self.proposal = self._smoothing_proposal + elif proposal_type == "prior": + self.proposal = self.transition + elif proposal_type == "filtering": + self._learned_proposal_fn = base.NonstationaryLinearDistribution( + num_timesteps, inputs_per_timestep=[1] + [2] * (num_timesteps-1)) + self.proposal = self._learned_filtering_proposal + elif proposal_type == "smoothing": + inputs_per_timestep = [num_timesteps] + [num_timesteps - t + for t in range(num_timesteps-1)] + self._learned_proposal_fn = base.NonstationaryLinearDistribution( + num_timesteps, inputs_per_timestep=inputs_per_timestep) + self.proposal = self._learned_smoothing_proposal + + def set_observations(self, xs, seq_lengths): + """Sets the observations and stores the backwards messages.""" + # Squeeze out data dimension since everything is 1-d. + xs = tf.squeeze(xs) + self.batch_size = tf.shape(xs)[1] + super(TrainableGaussianHMM, self).set_observations(xs, seq_lengths) + self.messages = self._compute_backwards_messages(xs[1:]) + + def zero_state(self, batch_size, dtype): + return tf.zeros([batch_size], dtype=dtype) + + def propose_and_weight(self, state, t): + """Computes the next state and log weights for the GHMM.""" + state_shape = tf.shape(state) + xt = self.observations[t] + p_zt = self.transition(t, state) + q_zt = self.proposal(t, state) + zt = q_zt.sample(seed=self.random_seed) + zt = tf.reshape(zt, state_shape) + p_xt_given_zt = self.emission(t, zt) + log_p_zt = p_zt.log_prob(zt) + log_q_zt = q_zt.log_prob(zt) + log_p_xt_given_zt = p_xt_given_zt.log_prob(xt) + weight = log_p_zt + log_p_xt_given_zt - log_q_zt + return weight, zt + + def _filtering_proposal(self, t, state): + """Uses the stored observations to compute the filtering distribution.""" + cur_x = self.observations[t] + return self.filtering(t, state, cur_x) + + def _smoothing_proposal(self, t, state): + """Uses the stored messages to compute the smoothing distribution.""" + mess_mean, mess_prec = tf.cond( + tf.less(t, self.num_timesteps-1), + lambda: tf.unstack(self.messages.read(t)), + lambda: [tf.zeros([self.batch_size]), tf.zeros([self.batch_size])]) + return self._smoothing_from_message(t, state, self.observations[t], + mess_mean, mess_prec) + + def _learned_filtering_proposal(self, t, state): + cur_x = self.observations[t] + inputs = tf.cond(tf.greater(t, 0), + lambda: tf.stack([state, cur_x], axis=0), + lambda: cur_x[tf.newaxis, :]) + return self._learned_proposal_fn(t, inputs) + + def _learned_smoothing_proposal(self, t, state): + xs = self.observations_ta.gather(tf.range(t, self.num_timesteps)) + inputs = tf.cond(tf.greater(t, 0), + lambda: tf.concat([state[tf.newaxis, :], xs], axis=0), + lambda: xs) + return self._learned_proposal_fn(t, inputs) diff --git a/models/research/fivo/fivo/models/ghmm_test.py b/models/research/fivo/fivo/models/ghmm_test.py new file mode 100644 index 0000000000000000000000000000000000000000..15a03c0c7abeae09bd1cfc87f917ef53ecac205f --- /dev/null +++ b/models/research/fivo/fivo/models/ghmm_test.py @@ -0,0 +1,313 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.models.ghmm""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from fivo.models.ghmm import GaussianHMM +from fivo.models.ghmm import TrainableGaussianHMM + + +class GHMMTest(tf.test.TestCase): + + def test_transition_no_weights(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, + transition_variances=[1., 2., 3.]) + prev_z = tf.constant([1., 2.], dtype=tf.float32) + z0 = ghmm.transition(0, prev_z) + z1 = ghmm.transition(1, prev_z) + z2 = ghmm.transition(2, prev_z) + outs = sess.run([z0.mean(), z0.variance(), + z1.mean(), z1.variance(), + z2.mean(), z2.variance()]) + self.assertAllClose(outs, [[0., 0.], [1., 1.], + [1., 2.], [2., 2.], + [1., 2.], [3., 3.]]) + + def test_transition_with_weights(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, + transition_variances=[1., 2., 3.], + transition_weights=[2., 3.]) + prev_z = tf.constant([1., 2.], dtype=tf.float32) + z0 = ghmm.transition(0, prev_z) + z1 = ghmm.transition(1, prev_z) + z2 = ghmm.transition(2, prev_z) + outs = sess.run([z0.mean(), z0.variance(), + z1.mean(), z1.variance(), + z2.mean(), z2.variance()]) + self.assertAllClose(outs, [[0., 0.], [1., 1.], + [2., 4.], [2., 2.], + [3., 6.], [3., 3.]]) + + def test_emission_no_weights(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, emission_variances=[1., 2., 3.]) + z = tf.constant([1., 2.], dtype=tf.float32) + x0 = ghmm.emission(0, z) + x1 = ghmm.emission(1, z) + x2 = ghmm.emission(2, z) + outs = sess.run([x0.mean(), x0.variance(), + x1.mean(), x1.variance(), + x2.mean(), x2.variance()]) + self.assertAllClose(outs, [[1., 2.], [1., 1.], + [1., 2.], [2., 2.], + [1., 2.], [3., 3.]]) + + def test_emission_with_weights(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, + emission_variances=[1., 2., 3.], + emission_weights=[1., 2., 3.]) + z = tf.constant([1., 2.], dtype=tf.float32) + x0 = ghmm.emission(0, z) + x1 = ghmm.emission(1, z) + x2 = ghmm.emission(2, z) + outs = sess.run([x0.mean(), x0.variance(), + x1.mean(), x1.variance(), + x2.mean(), x2.variance()]) + self.assertAllClose(outs, [[1., 2.], [1., 1.], + [2., 4.], [2., 2.], + [3., 6.], [3., 3.]]) + + def test_filtering_no_weights(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, + transition_variances=[1., 2., 3.], + emission_variances=[4., 5., 6.]) + z_prev = tf.constant([1., 2.], dtype=tf.float32) + x_cur = tf.constant([3., 4.], dtype=tf.float32) + expected_outs = [[[3./5., 4./5.], [4./5., 4./5.]], + [[11./7., 18./7.], [10./7., 10./7.]], + [[5./3., 8./3.], [2., 2.]]] + f_post_0 = ghmm.filtering(0, z_prev, x_cur) + f_post_1 = ghmm.filtering(1, z_prev, x_cur) + f_post_2 = ghmm.filtering(2, z_prev, x_cur) + outs = sess.run([[f_post_0.mean(), f_post_0.variance()], + [f_post_1.mean(), f_post_1.variance()], + [f_post_2.mean(), f_post_2.variance()]]) + self.assertAllClose(expected_outs, outs) + + def test_filtering_with_weights(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, + transition_variances=[1., 2., 3.], + emission_variances=[4., 5., 6.], + transition_weights=[7., 8.], + emission_weights=[9., 10., 11]) + z_prev = tf.constant([1., 2.], dtype=tf.float32) + x_cur = tf.constant([3., 4.], dtype=tf.float32) + expected_outs = [[[27./85., 36./85.], [4./85., 4./85.]], + [[95./205., 150./205.], [10./205., 10./205.]], + [[147./369., 228./369.], [18./369., 18./369.]]] + f_post_0 = ghmm.filtering(0, z_prev, x_cur) + f_post_1 = ghmm.filtering(1, z_prev, x_cur) + f_post_2 = ghmm.filtering(2, z_prev, x_cur) + outs = sess.run([[f_post_0.mean(), f_post_0.variance()], + [f_post_1.mean(), f_post_1.variance()], + [f_post_2.mean(), f_post_2.variance()]]) + self.assertAllClose(expected_outs, outs) + + def test_smoothing(self): + with self.test_session() as sess: + ghmm = GaussianHMM(3, + transition_variances=[1., 2., 3.], + emission_variances=[4., 5., 6.]) + z_prev = tf.constant([1., 2.], dtype=tf.float32) + xs = tf.constant([[1., 2.], + [3., 4.], + [5., 6.]], dtype=tf.float32) + s_post1 = ghmm.smoothing(0, z_prev, xs) + outs = sess.run([s_post1.mean(), s_post1.variance()]) + expected_outs = [[281./421., 410./421.], [292./421., 292./421.]] + self.assertAllClose(expected_outs, outs) + + expected_outs = [[149./73., 222./73.], [90./73., 90./73.]] + s_post2 = ghmm.smoothing(1, z_prev, xs[1:]) + outs = sess.run([s_post2.mean(), s_post2.variance()]) + self.assertAllClose(expected_outs, outs) + + s_post3 = ghmm.smoothing(2, z_prev, xs[2:]) + outs = sess.run([s_post3.mean(), s_post3.variance()]) + expected_outs = [[7./3., 10./3.], [2., 2.]] + self.assertAllClose(expected_outs, outs) + + def test_smoothing_with_weights(self): + with self.test_session() as sess: + x_weight = np.array([4, 5, 6, 7], dtype=np.float32) + sigma_x = np.array([5, 6, 7, 8], dtype=np.float32) + z_weight = np.array([1, 2, 3], dtype=np.float32) + sigma_z = np.array([1, 2, 3, 4], dtype=np.float32) + z_prev = np.array([1, 2], dtype=np.float32) + batch_size = 2 + xs = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32) + + z_cov, x_cov, z_x_cov = self._compute_covariance_matrices( + x_weight, z_weight, sigma_x, sigma_z) + + expected_outs = [] + # Compute mean and variance for z_0 when we don't condition + # on previous zs. + sigma_12 = z_x_cov[0, :] + sigma_12_22 = np.dot(sigma_12, np.linalg.inv(x_cov)) + mean = np.dot(sigma_12_22, xs) + variance = np.squeeze(z_cov[0, 0] - np.dot(sigma_12_22, sigma_12)) + expected_outs.append([mean, np.tile(variance, [batch_size])]) + + # Compute mean and variance for remaining z_ts. + for t in xrange(1, 4): + sigma_12 = np.concatenate([[z_cov[t, t - 1]], z_x_cov[t, t:]]) + sigma_22 = np.vstack(( + np.hstack((z_cov[t-1, t-1], z_x_cov[t-1, t:])), + np.hstack((np.transpose([z_x_cov[t-1, t:]]), x_cov[t:, t:])) + )) + sigma_12_22 = np.dot(sigma_12, np.linalg.inv(sigma_22)) + mean = np.dot(sigma_12_22, np.vstack((z_prev, xs[t:]))) + variance = np.squeeze(z_cov[t, t] - np.dot(sigma_12_22, sigma_12)) + expected_outs.append([mean, np.tile(variance, [batch_size])]) + + ghmm = GaussianHMM(4, + transition_variances=sigma_z, + emission_variances=sigma_x, + transition_weights=z_weight, + emission_weights=x_weight) + out_dists = [ghmm.smoothing(t, z_prev, xs[t:]) for t in range(0, 4)] + outs = [[d.mean(), d.variance()] for d in out_dists] + run_outs = sess.run(outs) + self.assertAllClose(expected_outs, run_outs) + + def test_covariance_matrices(self): + with self.test_session() as sess: + x_weight = np.array([4, 5, 6, 7], dtype=np.float32) + sigma_x = np.array([5, 6, 7, 8], dtype=np.float32) + z_weight = np.array([1, 2, 3], dtype=np.float32) + sigma_z = np.array([1, 2, 3, 4], dtype=np.float32) + + z_cov, x_cov, z_x_cov = self._compute_covariance_matrices( + x_weight, z_weight, sigma_x, sigma_z) + + ghmm = GaussianHMM(4, + transition_variances=sigma_z, + emission_variances=sigma_x, + transition_weights=z_weight, + emission_weights=x_weight) + self.assertAllClose(z_cov, sess.run(ghmm.sigma_z)) + self.assertAllClose(x_cov, sess.run(ghmm.sigma_x)) + self.assertAllClose(z_x_cov, sess.run(ghmm.sigma_zx)) + + def _compute_covariance_matrices(self, x_weight, z_weight, sigma_x, sigma_z): + # Create z covariance matrix from the definitions. + z_cov = np.zeros([4, 4]) + z_cov[0, 0] = sigma_z[0] + for i in range(1, 4): + z_cov[i, i] = (z_cov[i - 1, i - 1] * np.square(z_weight[i - 1]) + + sigma_z[i]) + for i in range(4): + for j in range(4): + if i == j: continue + min_ind = min(i, j) + max_ind = max(i, j) + weights = np.prod(z_weight[min_ind:max_ind]) + z_cov[i, j] = z_cov[min_ind, min_ind] * weights + # Compute the x covariance matrix and the z-x covariance matrix. + x_weights_outer = np.outer(x_weight, x_weight) + x_cov = x_weights_outer * z_cov + np.diag(sigma_x) + z_x_cov = x_weight * z_cov + return z_cov, x_cov, z_x_cov + + def test_lookahead(self): + x_weight = np.array([4, 5, 6, 7], dtype=np.float32) + sigma_x = np.array([5, 6, 7, 8], dtype=np.float32) + z_weight = np.array([1, 2, 3], dtype=np.float32) + sigma_z = np.array([1, 2, 3, 4], dtype=np.float32) + z_prev = np.array([1, 2], dtype=np.float32) + + with self.test_session() as sess: + z_cov, x_cov, z_x_cov = self._compute_covariance_matrices( + x_weight, z_weight, sigma_x, sigma_z) + + expected_outs = [] + for t in range(1, 4): + sigma_12 = z_x_cov[t-1, t:] + z_var = z_cov[t-1, t-1] + mean = np.outer(z_prev, sigma_12/z_var) + variance = x_cov[t:, t:] - np.outer(sigma_12, sigma_12)/ z_var + expected_outs.append([mean, variance]) + + ghmm = GaussianHMM(4, + transition_variances=sigma_z, + emission_variances=sigma_x, + transition_weights=z_weight, + emission_weights=x_weight) + out_dists = [ghmm.lookahead(t, z_prev) for t in range(1, 4)] + outs = [[d.mean(), d.covariance()] for d in out_dists] + run_outs = sess.run(outs) + self.assertAllClose(expected_outs, run_outs) + + +class TrainableGHMMTest(tf.test.TestCase): + + def test_filtering_proposal(self): + """Check that stashing the xs doesn't change the filtering distributions.""" + with self.test_session() as sess: + ghmm = TrainableGaussianHMM( + 3, "filtering", + transition_variances=[1., 2., 3.], + emission_variances=[4., 5., 6.], + transition_weights=[7., 8.], + emission_weights=[9., 10., 11]) + observations = tf.constant([[3., 4.], + [3., 4.], + [3., 4.]], dtype=tf.float32) + ghmm.set_observations(observations, [3, 3]) + z_prev = tf.constant([1., 2.], dtype=tf.float32) + + proposals = [ghmm._filtering_proposal(t, z_prev) for t in range(3)] + dist_params = [[p.mean(), p.variance()] for p in proposals] + + expected_outs = [[[27./85., 36./85.], [4./85., 4./85.]], + [[95./205., 150./205.], [10./205., 10./205.]], + [[147./369., 228./369.], [18./369., 18./369.]]] + self.assertAllClose(expected_outs, sess.run(dist_params)) + + def test_smoothing_proposal(self): + with self.test_session() as sess: + ghmm = TrainableGaussianHMM( + 3, "smoothing", + transition_variances=[1., 2., 3.], + emission_variances=[4., 5., 6.]) + xs = tf.constant([[1., 2.], + [3., 4.], + [5., 6.]], dtype=tf.float32) + ghmm.set_observations(xs, [3, 3]) + z_prev = tf.constant([1., 2.], dtype=tf.float32) + + proposals = [ghmm._smoothing_proposal(t, z_prev) for t in range(3)] + dist_params = [[p.mean(), p.variance()] for p in proposals] + + expected_outs = [[[281./421., 410./421.], [292./421., 292./421.]], + [[149./73., 222./73.], [90./73., 90./73.]], + [[7./3., 10./3.], [2., 2.]]] + self.assertAllClose(expected_outs, sess.run(dist_params)) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/fivo/fivo/models/srnn.py b/models/research/fivo/fivo/models/srnn.py new file mode 100644 index 0000000000000000000000000000000000000000..cdfb560eedffccf8edf41dbab4e85bbd8bbfab46 --- /dev/null +++ b/models/research/fivo/fivo/models/srnn.py @@ -0,0 +1,587 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SRNN classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +import functools + +import sonnet as snt +import tensorflow as tf + +from fivo.models import base + + +SRNNState = namedtuple("SRNNState", "rnn_state latent_encoded") + + +class SRNN(object): + """Implementation of a Stochastic Recurrent Neural Network (SRNN). + + Introduced in "Sequential Neural Models with Stochastic Layers" + by Fraccaro et al. https://arxiv.org/pdf/1605.07571.pdf. + + The SRNN is a sequence model similar to an RNN that uses stochastic latent + variables to improve its representational power. It can be thought of as a + sequential analogue to the variational auto-encoder (VAE). + + The SRNN has a deterministic RNN as its backbone, represented by the + sequence of RNN hidden states h_t. The latent state is conditioned on + the deterministic RNN states and previous latent state. Unlike the VRNN, the + the RNN state is not conditioned on the previous latent state. The latent + states have a Markov structure and it is assumed that + p(z_t | z_{1:t-1}) = p(z_t | z_{t-1}). + + In this implementation of the SRNN the latent state z_t is Gaussian. The + model's prior over z_t (also called the transition distribution) is + distributed as Normal(mu_t, diag(sigma_t^2)) where mu_t and sigma_t are the + mean and standard deviation output from a fully connected network that accepts + the rnn hidden state h_t and previous latent state z_{t-1} as input. + + The emission distribution p(x_t|z_t, h_t) is conditioned on the latent state + z_t as well as the current RNN hidden state h_t via a fully connected network. + + To increase the modeling power of the SRNN, two additional networks are + used to extract features from the data and the latent state. Those networks + are called data_encoder and latent_encoder respectively. + + For an example of how to call the SRNN's methods see sample_step. + + There are a few differences between this exposition and the paper. The main + goal was to be consistent with the VRNN code. A few components are renamed. + The backward RNN for approximating the posterior, g_phi_a in the paper, is the + rev_rnn_cell. The forward RNN that conditions the latent distribution, d in + the paper, is the rnn_cell. The paper doesn't name the NN's that serve as + feature extractors, and we name them here as the data_encoder and + latent_encoder. + """ + + def __init__(self, + rnn_cell, + data_encoder, + latent_encoder, + transition, + emission, + random_seed=None): + """Create a SRNN. + + Args: + rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the + deterministic backbone of the SRNN. The inputs to the RNN will be the + the encoded input of the current timestep, a Tensor of shape + [batch_size, encoded_data_size]. + data_encoder: A callable that accepts a batch of data x_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument the inputs x_t, a Tensor of the shape + [batch_size, data_size] and return a Tensor of shape + [batch_size, encoded_data_size]. This callable will be called multiple + times in the SRNN cell so if scoping is not handled correctly then + multiple copies of the variables in this network could be made. It is + recommended to use a snt.nets.MLP module, which takes care of this for + you. + latent_encoder: A callable that accepts a latent state z_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument a Tensor of shape [batch_size, latent_size] and + return a Tensor of shape [batch_size, encoded_latent_size]. + This callable must also have the property 'output_size' defined, + returning encoded_latent_size. + transition: A callable that implements the transition distribution + p(z_t|h_t, z_t-1). Must accept as argument the previous RNN hidden state + and previous encoded latent state then return a tf.distributions.Normal + distribution conditioned on the input. + emission: A callable that implements the emission distribution + p(x_t|z_t, h_t). Must accept as arguments the encoded latent state + and the RNN hidden state and return a subclass of + tf.distributions.Distribution that can be used to evaluate the logprob + of the targets. + random_seed: The seed for the random ops. Sets the seed for sample_step. + """ + self.random_seed = random_seed + self.rnn_cell = rnn_cell + self.data_encoder = data_encoder + self.latent_encoder = latent_encoder + self.encoded_z_size = latent_encoder.output_size + self.state_size = (self.rnn_cell.state_size) + self._transition = transition + self._emission = emission + + def zero_state(self, batch_size, dtype): + """The initial state of the SRNN. + + Contains the initial state of the RNN and the inital encoded latent. + + Args: + batch_size: The batch size. + dtype: The data type of the SRNN. + Returns: + zero_state: The initial state of the SRNN. + """ + return SRNNState( + rnn_state=self.rnn_cell.zero_state(batch_size, dtype), + latent_encoded=tf.zeros( + [batch_size, self.latent_encoder.output_size], dtype=dtype)) + + def run_rnn(self, prev_rnn_state, inputs): + """Runs the deterministic RNN for one step. + + Args: + prev_rnn_state: The state of the RNN from the previous timestep. + inputs: A Tensor of shape [batch_size, data_size], the current inputs to + the model. Most often this is x_{t-1}, the previous token in the + observation sequence. + Returns: + rnn_out: The output of the RNN. + rnn_state: The new state of the RNN. + """ + rnn_inputs = self.data_encoder(tf.to_float(inputs)) + rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state) + return rnn_out, rnn_state + + def transition(self, rnn_out, prev_latent_encoded): + """Computes the transition distribution p(z_t|h_t, z_{t-1}). + + Note that p(z_t | h_t, z_{t-1}) = p(z_t| z_{t-1}, x_{1:t-1}) + + Args: + rnn_out: The output of the rnn for the current timestep. + prev_latent_encoded: Float Tensor of shape + [batch_size, encoded_latent_size], the previous latent state z_{t-1} + run through latent_encoder. + Returns: + p(z_t | h_t): A normal distribution with event shape + [batch_size, latent_size]. + """ + return self._transition(rnn_out, prev_latent_encoded) + + def emission(self, latent, rnn_out): + """Computes the emission distribution p(x_t | z_t, h_t). + + Note that p(x_t | z_t, h_t) = p(x_t | z_t, x_{1:t-1}) + + Args: + latent: The stochastic latent state z_t. + rnn_out: The output of the rnn for the current timestep. + Returns: + p(x_t | z_t, h_t): A distribution with event shape + [batch_size, data_size]. + latent_encoded: The latent state encoded with latent_encoder. Should be + passed to transition() on the next timestep. + """ + latent_encoded = self.latent_encoder(latent) + return self._emission(latent_encoded, rnn_out), latent_encoded + + def sample_step(self, prev_state, inputs, unused_t): + """Samples one output from the model. + + Args: + prev_state: The previous state of the model, a SRNNState containing the + previous rnn state and the previous encoded latent. + inputs: A Tensor of shape [batch_size, data_size], the current inputs to + the model. Most often this is x_{t-1}, the previous token in the + observation sequence. + unused_t: The current timestep. Not used currently. + Returns: + new_state: The next state of the model, a SRNNState. + xt: A float Tensor of shape [batch_size, data_size], an output sampled + from the emission distribution. + """ + rnn_out, rnn_state = self.run_rnn(prev_state.rnn_state, + inputs) + p_zt = self.transition(rnn_out, prev_state.latent_encoded) + zt = p_zt.sample(seed=self.random_seed) + p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) + xt = p_xt_given_zt.sample(seed=self.random_seed) + new_state = SRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded) + return new_state, tf.to_float(xt) + +# pylint: disable=invalid-name +# pylint thinks this is a top-level constant. +TrainableSRNNState = namedtuple("TrainableSRNNState", + SRNNState._fields + ("rnn_out",)) +# pylint: enable=g-invalid-name + + +class TrainableSRNN(SRNN, base.ELBOTrainableSequenceModel): + """A SRNN subclass with proposals and methods for training and evaluation. + + This class adds proposals used for training with importance-sampling based + methods such as the ELBO. The model can be configured to propose from one + of three proposals: a learned filtering proposal, a learned smoothing + proposal, or the prior (i.e. the transition distribution). + + As described in the SRNN paper, the learned filtering proposal is + parameterized by a fully connected neural network that accepts as input the + current target x_t and the current rnn output h_t. The learned smoothing + proposal is also given the hidden state of an RNN run in reverse over the + inputs, so as to incorporate information about future observations. + + All learned proposals use the 'res_q' parameterization, meaning that instead + of directly producing the mean of z_t, the proposal network predicts the + 'residual' from the prior's mean. This is explored more in section 3.3 of + https://arxiv.org/pdf/1605.07571.pdf. + + During training, the latent state z_t is sampled from the proposal and the + reparameterization trick is used to provide low-variance gradients. + + Note that the SRNN paper refers to the proposals as the approximate posterior, + but we match the VRNN convention of referring to it as the encoder. + """ + + def __init__(self, + rnn_cell, + data_encoder, + latent_encoder, + transition, + emission, + proposal_type, + proposal=None, + rev_rnn_cell=None, + tilt=None, + random_seed=None): + """Create a trainable RNN. + + Args: + rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the + deterministic backbone of the SRNN. The inputs to the RNN will be the + the encoded input of the current timestep, a Tensor of shape + [batch_size, encoded_data_size]. + data_encoder: A callable that accepts a batch of data x_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument the inputs x_t, a Tensor of the shape + [batch_size, data_size] and return a Tensor of shape + [batch_size, encoded_data_size]. This callable will be called multiple + times in the SRNN cell so if scoping is not handled correctly then + multiple copies of the variables in this network could be made. It is + recommended to use a snt.nets.MLP module, which takes care of this for + you. + latent_encoder: A callable that accepts a latent state z_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument a Tensor of shape [batch_size, latent_size] and + return a Tensor of shape [batch_size, encoded_latent_size]. + This callable must also have the property 'output_size' defined, + returning encoded_latent_size. + transition: A callable that implements the transition distribution + p(z_t|h_t, z_t-1). Must accept as argument the previous RNN hidden state + and previous encoded latent state then return a tf.distributions.Normal + distribution conditioned on the input. + emission: A callable that implements the emission distribution + p(x_t|z_t, h_t). Must accept as arguments the encoded latent state + and the RNN hidden state and return a subclass of + tf.distributions.Distribution that can be used to evaluate the logprob + of the targets. + proposal_type: A string indicating the type of proposal to use. Can + be either "filtering", "smoothing", or "prior". When proposal_type is + "filtering" or "smoothing", proposal must be provided. When + proposal_type is "smoothing", rev_rnn_cell must also be provided. + proposal: A callable that implements the proposal q(z_t| h_t, x_{1:T}). + If proposal_type is "filtering" then proposal must accept as arguments + the current rnn output, the encoded target of the current timestep, + and the mean of the prior. If proposal_type is "smoothing" then + in addition to the current rnn output and the mean of the prior + proposal must accept as arguments the output of the reverse rnn. + proposal should return a tf.distributions.Normal distribution + conditioned on its inputs. If proposal_type is "prior" this argument is + ignored. + rev_rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will aggregate + forward rnn outputs in the reverse direction. The inputs to the RNN + will be the encoded reverse input of the current timestep, a Tensor of + shape [batch_size, encoded_data_size]. + tilt: A callable that implements the log of a positive tilting function + (ideally approximating log p(x_{t+1}|z_t, h_t). Must accept as arguments + the encoded latent state and the RNN hidden state and return a subclass + of tf.distributions.Distribution that can be used to evaluate the + logprob of x_{t+1}. Optionally, None and then no tilt is used. + random_seed: The seed for the random ops. Sets the seed for sample_step + and __call__. + """ + super(TrainableSRNN, self).__init__( + rnn_cell, data_encoder, latent_encoder, + transition, emission, random_seed=random_seed) + self.rev_rnn_cell = rev_rnn_cell + self._tilt = tilt + assert proposal_type in ["filtering", "smoothing", "prior"] + self._proposal = proposal + self.proposal_type = proposal_type + if proposal_type != "prior": + assert proposal, "If not proposing from the prior, must provide proposal." + if proposal_type == "smoothing": + assert rev_rnn_cell, "Must provide rev_rnn_cell for smoothing proposal." + + def zero_state(self, batch_size, dtype): + super_state = super(TrainableSRNN, self).zero_state(batch_size, dtype) + return TrainableSRNNState( + rnn_out=tf.zeros([batch_size, self.rnn_cell.output_size], dtype=dtype), + **super_state._asdict()) + + def set_observations(self, observations, seq_lengths): + """Stores the model's observations. + + Stores the observations (inputs and targets) in TensorArrays and precomputes + things for later like the reverse RNN output and encoded targets. + + Args: + observations: The observations of the model, a tuple containing two + Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors + should be the inputs and targets, respectively. + seq_lengths: An int Tensor of shape [batch_size] containing the length + of each sequence in observations. + """ + inputs, targets = observations + self.seq_lengths = seq_lengths + self.max_seq_len = tf.reduce_max(seq_lengths) + self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False) + targets_encoded = base.encode_all(targets, self.data_encoder) + self.targets_encoded_ta = base.ta_for_tensor(targets_encoded, + clear_after_read=False) + inputs_encoded = base.encode_all(inputs, self.data_encoder) + rnn_out, _ = tf.nn.dynamic_rnn(self.rnn_cell, + inputs_encoded, + time_major=True, + dtype=tf.float32, + scope="forward_rnn") + self.rnn_ta = base.ta_for_tensor(rnn_out, + clear_after_read=False) + if self.rev_rnn_cell: + targets_and_rnn_out = tf.concat([rnn_out, targets_encoded], 2) + reversed_targets_and_rnn_out = tf.reverse_sequence( + targets_and_rnn_out, seq_lengths, seq_axis=0, batch_axis=1) + # Compute the reverse rnn over the targets. + reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell, + reversed_targets_and_rnn_out, + time_major=True, + dtype=tf.float32, + scope="reverse_rnn") + reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths, + seq_axis=0, batch_axis=1) + self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out, + clear_after_read=False) + + def _filtering_proposal(self, rnn_out, prev_latent_encoded, prior, t): + """Computes the filtering proposal distribution.""" + return self._proposal(rnn_out, + prev_latent_encoded, + self.targets_encoded_ta.read(t), + prior_mu=prior.mean()) + + def _smoothing_proposal(self, rnn_out, prev_latent_encoded, prior, t): + """Computes the smoothing proposal distribution.""" + return self._proposal(rnn_out, + prev_latent_encoded, + smoothing_tensors=[self.reverse_rnn_ta.read(t)], + prior_mu=prior.mean()) + + def proposal(self, rnn_out, prev_latent_encoded, prior, t): + """Computes the proposal distribution specified by proposal_type. + + Args: + rnn_out: The output of the rnn for the current timestep. + prev_latent_encoded: Float Tensor of shape + [batch_size, encoded_latent_size], the previous latent state z_{t-1} + run through latent_encoder. + prior: A tf.distributions.Normal distribution representing the prior + over z_t, p(z_t | z_{1:t-1}, x_{1:t-1}). Used for 'res_q'. + t: A scalar int Tensor, the current timestep. + """ + if self.proposal_type == "filtering": + return self._filtering_proposal(rnn_out, prev_latent_encoded, prior, t) + elif self.proposal_type == "smoothing": + return self._smoothing_proposal(rnn_out, prev_latent_encoded, prior, t) + elif self.proposal_type == "prior": + return self.transition(rnn_out, prev_latent_encoded) + + def tilt(self, rnn_out, latent_encoded, targets): + r_func = self._tilt(rnn_out, latent_encoded) + return tf.reduce_sum(r_func.log_prob(targets), axis=-1) + + def propose_and_weight(self, state, t): + """Runs the model and computes importance weights for one timestep. + + Runs the model and computes importance weights, sampling from the proposal + instead of the transition/prior. + + Args: + state: The previous state of the model, a TrainableSRNNState containing + the previous rnn state, the previous rnn outs, and the previous encoded + latent. + t: A scalar integer Tensor, the current timestep. + Returns: + weights: A float Tensor of shape [batch_size]. + new_state: The new state of the model. + """ + targets = self.targets_ta.read(t) + rnn_out = self.rnn_ta.read(t) + p_zt = self.transition(rnn_out, state.latent_encoded) + q_zt = self.proposal(rnn_out, state.latent_encoded, p_zt, t) + zt = q_zt.sample(seed=self.random_seed) + p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) + log_p_xt_given_zt = tf.reduce_sum(p_xt_given_zt.log_prob(targets), axis=-1) + log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=-1) + log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=-1) + weights = log_p_zt + log_p_xt_given_zt - log_q_zt + if self._tilt: + prev_log_r = tf.cond( + tf.greater(t, 0), + lambda: self.tilt(state.rnn_out, state.latent_encoded, targets), + lambda: 0.) # On the first step, prev_log_r = 0. + log_r = tf.cond( + tf.less(t + 1, self.max_seq_len), + lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)), + lambda: 0.) + # On the last step, log_r = 0. + log_r *= tf.to_float(t < self.seq_lengths - 1) + weights += log_r - prev_log_r + + # This reshape is required because the TensorArray reports different shapes + # than the initial state provides (where the first dimension is unknown). + # The difference breaks the while_loop. Reshape prevents the error. + rnn_out = tf.reshape(rnn_out, tf.shape(state.rnn_out)) + + new_state = TrainableSRNNState(rnn_out=rnn_out, + rnn_state=state.rnn_state, # unmodified + latent_encoded=latent_encoded) + return weights, new_state + + +_DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), + "b": tf.zeros_initializer()} + + +def create_srnn( + data_size, + latent_size, + emission_class, + rnn_hidden_size=None, + fcnet_hidden_sizes=None, + encoded_data_size=None, + encoded_latent_size=None, + sigma_min=0.0, + raw_sigma_bias=0.25, + emission_bias_init=0.0, + use_tilt=False, + proposal_type="filtering", + initializers=None, + random_seed=None): + """A factory method for creating SRNN cells. + + Args: + data_size: The dimension of the vectors that make up the data sequences. + latent_size: The size of the stochastic latent state of the SRNN. + emission_class: The class of the emission distribution. Can be either + ConditionalNormalDistribution or ConditionalBernoulliDistribution. + rnn_hidden_size: The hidden state dimension of the RNN that forms the + deterministic part of this SRNN. If None, then it defaults + to latent_size. + fcnet_hidden_sizes: A list of python integers, the size of the hidden + layers of the fully connected networks that parameterize the conditional + distributions of the SRNN. If None, then it defaults to one hidden + layer of size latent_size. + encoded_data_size: The size of the output of the data encoding network. If + None, defaults to latent_size. + encoded_latent_size: The size of the output of the latent state encoding + network. If None, defaults to latent_size. + sigma_min: The minimum value that the standard deviation of the + distribution over the latent state can take. + raw_sigma_bias: A scalar that is added to the raw standard deviation + output from the neural networks that parameterize the prior and + approximate posterior. Useful for preventing standard deviations close + to zero. + emission_bias_init: A bias to added to the raw output of the fully + connected network that parameterizes the emission distribution. Useful + for initalizing the mean of the distribution to a sensible starting point + such as the mean of the training data. Only used with Bernoulli generative + distributions. + use_tilt: If true, create a SRNN with a tilting function. + proposal_type: The type of proposal to use. Can be "filtering", "smoothing", + or "prior". + initializers: The variable intitializers to use for the fully connected + networks and RNN cell. Must be a dictionary mapping the keys 'w' and 'b' + to the initializers for the weights and biases. Defaults to xavier for + the weights and zeros for the biases when initializers is None. + random_seed: A random seed for the SRNN resampling operations. + Returns: + model: A TrainableSRNN object. + """ + if rnn_hidden_size is None: + rnn_hidden_size = latent_size + if fcnet_hidden_sizes is None: + fcnet_hidden_sizes = [latent_size] + if encoded_data_size is None: + encoded_data_size = latent_size + if encoded_latent_size is None: + encoded_latent_size = latent_size + if initializers is None: + initializers = _DEFAULT_INITIALIZERS + data_encoder = snt.nets.MLP( + output_sizes=fcnet_hidden_sizes + [encoded_data_size], + initializers=initializers, + name="data_encoder") + latent_encoder = snt.nets.MLP( + output_sizes=fcnet_hidden_sizes + [encoded_latent_size], + initializers=initializers, + name="latent_encoder") + transition = base.ConditionalNormalDistribution( + size=latent_size, + hidden_layer_sizes=fcnet_hidden_sizes, + sigma_min=sigma_min, + raw_sigma_bias=raw_sigma_bias, + initializers=initializers, + name="prior") + # Construct the emission distribution. + if emission_class == base.ConditionalBernoulliDistribution: + # For Bernoulli distributed outputs, we initialize the bias so that the + # network generates on average the mean from the training set. + emission_dist = functools.partial(base.ConditionalBernoulliDistribution, + bias_init=emission_bias_init) + else: + emission_dist = base.ConditionalNormalDistribution + emission = emission_dist( + size=data_size, + hidden_layer_sizes=fcnet_hidden_sizes, + initializers=initializers, + name="generative") + # Construct the proposal distribution. + if proposal_type in ["filtering", "smoothing"]: + proposal = base.NormalApproximatePosterior( + size=latent_size, + hidden_layer_sizes=fcnet_hidden_sizes, + sigma_min=sigma_min, + raw_sigma_bias=raw_sigma_bias, + initializers=initializers, + smoothing=(proposal_type == "smoothing"), + name="approximate_posterior") + else: + proposal = None + + if use_tilt: + tilt = emission_dist( + size=data_size, + hidden_layer_sizes=fcnet_hidden_sizes, + initializers=initializers, + name="tilt") + else: + tilt = None + + rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, + initializer=initializers["w"]) + rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, + initializer=initializers["w"]) + return TrainableSRNN( + rnn_cell, data_encoder, latent_encoder, transition, + emission, proposal_type, proposal=proposal, rev_rnn_cell=rev_rnn_cell, + tilt=tilt, random_seed=random_seed) diff --git a/models/research/fivo/fivo/models/srnn_test.py b/models/research/fivo/fivo/models/srnn_test.py new file mode 100644 index 0000000000000000000000000000000000000000..39e10da134d3834babcf2eef1bb3e97fce12a07a --- /dev/null +++ b/models/research/fivo/fivo/models/srnn_test.py @@ -0,0 +1,105 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.models.srnn.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from fivo.models import base +from fivo.test_utils import create_srnn + + +class SrnnTest(tf.test.TestCase): + + def test_srnn_normal_emission(self): + self.run_srnn(base.ConditionalNormalDistribution, [-5.947752, -1.182961]) + + def test_srnn_bernoulli_emission(self): + self.run_srnn(base.ConditionalBernoulliDistribution, [-2.566631, -2.479234]) + + def run_srnn(self, generative_class, gt_log_alpha): + """Tests the SRNN. + + All test values are 'golden values' derived by running the code and copying + the output. + + Args: + generative_class: The class of the generative distribution to use. + gt_log_alpha: The ground-truth value of log alpha. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + batch_size = 2 + model, inputs, targets, _ = create_srnn(generative_class=generative_class, + batch_size=batch_size, + data_lengths=(1, 1), + random_seed=1234) + zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) + model.set_observations([inputs, targets], tf.convert_to_tensor([1, 1])) + model_out = model.propose_and_weight(zero_state, 0) + sess.run(tf.global_variables_initializer()) + log_alpha, state = sess.run(model_out) + self.assertAllClose( + state.latent_encoded, + [[0.591787, 1.310583], [-1.523136, 0.953918]]) + self.assertAllClose(state.rnn_out, + [[0.041675, -0.056038, -0.001823, 0.005224], + [0.042925, -0.044619, 0.021401, 0.016998]]) + self.assertAllClose(log_alpha, gt_log_alpha) + + def test_srnn_with_tilt_normal_emission(self): + self.run_srnn_with_tilt(base.ConditionalNormalDistribution, [-9.13577, -4.56725]) + + + def test_srnn_with_tilt_bernoulli_emission(self): + self.run_srnn_with_tilt(base.ConditionalBernoulliDistribution, [-4.617461, -5.079248]) + + def run_srnn_with_tilt(self, generative_class, gt_log_alpha): + """Tests the SRNN with a tilting function. + + All test values are 'golden values' derived by running the code and copying + the output. + + Args: + generative_class: The class of the generative distribution to use. + gt_log_alpha: The ground-truth value of log alpha. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + batch_size = 2 + model, inputs, targets, _ = create_srnn(generative_class=generative_class, + batch_size=batch_size, + data_lengths=(3, 2), + random_seed=1234, + use_tilt=True) + zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) + model.set_observations([inputs, targets], tf.convert_to_tensor([3, 2])) + model_out = model.propose_and_weight(zero_state, 0) + sess.run(tf.global_variables_initializer()) + log_alpha, state = sess.run(model_out) + self.assertAllClose( + state.latent_encoded, + [[0.591787, 1.310583], [-1.523136, 0.953918]]) + self.assertAllClose(state.rnn_out, + [[0.041675, -0.056038, -0.001823, 0.005224], + [0.042925, -0.044619, 0.021401, 0.016998]]) + self.assertAllClose(log_alpha, gt_log_alpha) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/fivo/fivo/models/vrnn.py b/models/research/fivo/fivo/models/vrnn.py new file mode 100644 index 0000000000000000000000000000000000000000..4e2552088c19f141a75d791d2be0d0a5238ed87c --- /dev/null +++ b/models/research/fivo/fivo/models/vrnn.py @@ -0,0 +1,572 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""VRNN classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +import functools + +import sonnet as snt +import tensorflow as tf + +from fivo.models import base + + +VRNNState = namedtuple("VRNNState", "rnn_state latent_encoded") + + +class VRNN(object): + """Implementation of a Variational Recurrent Neural Network (VRNN). + + Introduced in "A Recurrent Latent Variable Model for Sequential data" + by Chung et al. https://arxiv.org/pdf/1506.02216.pdf. + + The VRNN is a sequence model similar to an RNN that uses stochastic latent + variables to improve its representational power. It can be thought of as a + sequential analogue to the variational auto-encoder (VAE). + + The VRNN has a deterministic RNN as its backbone, represented by the + sequence of RNN hidden states h_t. At each timestep, the RNN hidden state h_t + is conditioned on the previous sequence element, x_{t-1}, as well as the + latent state from the previous timestep, z_{t-1}. + + In this implementation of the VRNN the latent state z_t is Gaussian. The + model's prior over z_t (also called the transition distribution) is + distributed as Normal(mu_t, diag(sigma_t^2)) where mu_t and sigma_t are the + mean and standard deviation output from a fully connected network that accepts + the rnn hidden state h_t as input. + + The emission distribution p(x_t|z_t, h_t) is conditioned on the latent state + z_t as well as the current RNN hidden state h_t via a fully connected network. + + To increase the modeling power of the VRNN, two additional networks are + used to extract features from the data and the latent state. Those networks + are called data_encoder and latent_encoder respectively. + + For an example of how to call the VRNN's methods see sample_step. + + There are a few differences between this exposition and the paper. + First, the indexing scheme for h_t is different than the paper's -- what the + paper calls h_t we call h_{t+1}. This is the same notation used by Fraccaro + et al. to describe the VRNN in the paper linked above. Also, the VRNN paper + uses VAE terminology to refer to the different internal networks, so it + refers to the emission distribution as the decoder. This implementation also + renames the functions phi_x and phi_z in the paper to data_encoder and + latent_encoder. + """ + + def __init__(self, + rnn_cell, + data_encoder, + latent_encoder, + transition, + emission, + random_seed=None): + """Create a VRNN. + + Args: + rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the + deterministic backbone of the VRNN. The inputs to the RNN will be the + encoded latent state of the previous timestep with shape + [batch_size, encoded_latent_size] as well as the encoded input of the + current timestep, a Tensor of shape [batch_size, encoded_data_size]. + data_encoder: A callable that accepts a batch of data x_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument the inputs x_t, a Tensor of the shape + [batch_size, data_size] and return a Tensor of shape + [batch_size, encoded_data_size]. This callable will be called multiple + times in the VRNN cell so if scoping is not handled correctly then + multiple copies of the variables in this network could be made. It is + recommended to use a snt.nets.MLP module, which takes care of this for + you. + latent_encoder: A callable that accepts a latent state z_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument a Tensor of shape [batch_size, latent_size] and + return a Tensor of shape [batch_size, encoded_latent_size]. + This callable must also have the property 'output_size' defined, + returning encoded_latent_size. + transition: A callable that implements the transition distribution + p(z_t|h_t). Must accept as argument the previous RNN hidden state and + return a tf.distributions.Normal distribution conditioned on the input. + emission: A callable that implements the emission distribution + p(x_t|z_t, h_t). Must accept as arguments the encoded latent state + and the RNN hidden state and return a subclass of + tf.distributions.Distribution that can be used to evaluate the logprob + of the targets. + random_seed: The seed for the random ops. Sets the seed for sample_step. + """ + self.random_seed = random_seed + self.rnn_cell = rnn_cell + self.data_encoder = data_encoder + self.latent_encoder = latent_encoder + self.encoded_z_size = latent_encoder.output_size + self.state_size = (self.rnn_cell.state_size) + self._transition = transition + self._emission = emission + + def zero_state(self, batch_size, dtype): + """The initial state of the VRNN. + + Contains the initial state of the RNN and the inital encoded latent. + + Args: + batch_size: The batch size. + dtype: The data type of the VRNN. + Returns: + zero_state: The initial state of the VRNN. + """ + return VRNNState( + rnn_state=self.rnn_cell.zero_state(batch_size, dtype), + latent_encoded=tf.zeros( + [batch_size, self.latent_encoder.output_size], dtype=dtype)) + + def run_rnn(self, prev_rnn_state, prev_latent_encoded, inputs): + """Runs the deterministic RNN for one step. + + Args: + prev_rnn_state: The state of the RNN from the previous timestep. + prev_latent_encoded: Float Tensor of shape + [batch_size, encoded_latent_size], the previous latent state z_{t-1} + run through latent_encoder. + inputs: A Tensor of shape [batch_size, data_size], the current inputs to + the model. Most often this is x_{t-1}, the previous token in the + observation sequence. + Returns: + rnn_out: The output of the RNN. + rnn_state: The new state of the RNN. + """ + inputs_encoded = self.data_encoder(tf.to_float(inputs)) + rnn_inputs = tf.concat([inputs_encoded, prev_latent_encoded], axis=1) + rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state) + return rnn_out, rnn_state + + def transition(self, rnn_out): + """Computes the transition distribution p(z_t|h_t). + + Note that p(z_t | h_t) = p(z_t| z_{1:t-1}, x_{1:t-1}) + + Args: + rnn_out: The output of the rnn for the current timestep. + Returns: + p(z_t | h_t): A normal distribution with event shape + [batch_size, latent_size]. + """ + return self._transition(rnn_out) + + def emission(self, latent, rnn_out): + """Computes the emission distribution p(x_t | z_t, h_t). + + Note that p(x_t | z_t, h_t) = p(x_t | z_{1:t}, x_{1:t-1}). + + Args: + latent: The stochastic latent state z_t. + rnn_out: The output of the rnn for the current timestep. + Returns: + p(x_t | z_t, h_t): A distribution with event shape + [batch_size, data_size]. + latent_encoded: The latent state encoded with latent_encoder. Should be + passed to run_rnn on the next timestep. + """ + latent_encoded = self.latent_encoder(latent) + return self._emission(latent_encoded, rnn_out), latent_encoded + + def sample_step(self, prev_state, inputs, unused_t): + """Samples one output from the model. + + Args: + prev_state: The previous state of the model, a VRNNState containing the + previous rnn state and the previous encoded latent. + inputs: A Tensor of shape [batch_size, data_size], the current inputs to + the model. Most often this is x_{t-1}, the previous token in the + observation sequence. + unused_t: The current timestep. Not used currently. + Returns: + new_state: The next state of the model, a VRNNState. + xt: A float Tensor of shape [batch_size, data_size], an output sampled + from the emission distribution. + """ + rnn_out, rnn_state = self.run_rnn(prev_state.rnn_state, + prev_state.latent_encoded, + inputs) + p_zt = self.transition(rnn_out) + zt = p_zt.sample(seed=self.random_seed) + p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) + xt = p_xt_given_zt.sample(seed=self.random_seed) + new_state = VRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded) + return new_state, tf.to_float(xt) + +# pylint: disable=invalid-name +# pylint thinks this is a top-level constant. +TrainableVRNNState = namedtuple("TrainableVRNNState", + VRNNState._fields + ("rnn_out",)) +# pylint: enable=g-invalid-name + + +class TrainableVRNN(VRNN, base.ELBOTrainableSequenceModel): + """A VRNN subclass with proposals and methods for training and evaluation. + + This class adds proposals used for training with importance-sampling based + methods such as the ELBO. The model can be configured to propose from one + of three proposals: a learned filtering proposal, a learned smoothing + proposal, or the prior (i.e. the transition distribution). + + As described in the VRNN paper, the learned filtering proposal is + parameterized by a fully connected neural network that accepts as input the + current target x_t and the current rnn output h_t. The learned smoothing + proposal is also given the hidden state of an RNN run in reverse over the + inputs, so as to incorporate information about future observations. This + smoothing proposal is not described in the VRNN paper. + + All learned proposals use the 'res_q' parameterization, meaning that instead + of directly producing the mean of z_t, the proposal network predicts the + 'residual' from the prior's mean. This is explored more in section 3.3 of + https://arxiv.org/pdf/1605.07571.pdf. + + During training, the latent state z_t is sampled from the proposal and the + reparameterization trick is used to provide low-variance gradients. + + Note that the VRNN paper uses VAE terminology to refer to the different + internal networks, so the proposal is referred to as the encoder. + """ + + def __init__(self, + rnn_cell, + data_encoder, + latent_encoder, + transition, + emission, + proposal_type, + proposal=None, + rev_rnn_cell=None, + tilt=None, + random_seed=None): + """Create a trainable RNN. + + Args: + rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the + deterministic backbone of the VRNN. The inputs to the RNN will be the + encoded latent state of the previous timestep with shape + [batch_size, encoded_latent_size] as well as the encoded input of the + current timestep, a Tensor of shape [batch_size, encoded_data_size]. + data_encoder: A callable that accepts a batch of data x_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument the inputs x_t, a Tensor of the shape + [batch_size, data_size] and return a Tensor of shape + [batch_size, encoded_data_size]. This callable will be called multiple + times in the VRNN cell so if scoping is not handled correctly then + multiple copies of the variables in this network could be made. It is + recommended to use a snt.nets.MLP module, which takes care of this for + you. + latent_encoder: A callable that accepts a latent state z_t and + 'encodes' it, e.g. runs it through a fully connected network. Must + accept as argument a Tensor of shape [batch_size, latent_size] and + return a Tensor of shape [batch_size, encoded_latent_size]. + This callable must also have the property 'output_size' defined, + returning encoded_latent_size. + transition: A callable that implements the transition distribution + p(z_t|h_t). Must accept as argument the previous RNN hidden state and + return a tf.distributions.Normal distribution conditioned on the input. + emission: A callable that implements the emission distribution + p(x_t|z_t, h_t). Must accept as arguments the encoded latent state + and the RNN hidden state and return a subclass of + tf.distributions.Distribution that can be used to evaluate the logprob + of the targets. + proposal_type: A string indicating the type of proposal to use. Can + be either "filtering", "smoothing", or "prior". When proposal_type is + "filtering" or "smoothing", proposal must be provided. When + proposal_type is "smoothing", rev_rnn_cell must also be provided. + proposal: A callable that implements the proposal q(z_t| h_t, x_{1:T}). + If proposal_type is "filtering" then proposal must accept as arguments + the current rnn output, the encoded target of the current timestep, + and the mean of the prior. If proposal_type is "smoothing" then + in addition to the current rnn output and the mean of the prior + proposal must accept as arguments the output of the reverse rnn. + proposal should return a tf.distributions.Normal distribution + conditioned on its inputs. If proposal_type is "prior" this argument is + ignored. + rev_rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will aggregate + observation statistics in the reverse direction. The inputs to the RNN + will be the encoded reverse input of the current timestep, a Tensor of + shape [batch_size, encoded_data_size]. + tilt: A callable that implements the log of a positive tilting function + (ideally approximating log p(x_{t+1}|z_t, h_t). Must accept as arguments + the encoded latent state and the RNN hidden state and return a subclass + of tf.distributions.Distribution that can be used to evaluate the + logprob of x_{t+1}. Optionally, None and then no tilt is used. + random_seed: The seed for the random ops. Sets the seed for sample_step + and __call__. + """ + super(TrainableVRNN, self).__init__( + rnn_cell, data_encoder, latent_encoder, + transition, emission, random_seed=random_seed) + self.rev_rnn_cell = rev_rnn_cell + self._tilt = tilt + assert proposal_type in ["filtering", "smoothing", "prior"] + self._proposal = proposal + self.proposal_type = proposal_type + if proposal_type != "prior": + assert proposal, "If not proposing from the prior, must provide proposal." + if proposal_type == "smoothing": + assert rev_rnn_cell, "Must provide rev_rnn_cell for smoothing proposal." + + def zero_state(self, batch_size, dtype): + super_state = super(TrainableVRNN, self).zero_state(batch_size, dtype) + return TrainableVRNNState( + rnn_out=tf.zeros([batch_size, self.rnn_cell.output_size], dtype=dtype), + **super_state._asdict()) + + def set_observations(self, observations, seq_lengths): + """Stores the model's observations. + + Stores the observations (inputs and targets) in TensorArrays and precomputes + things for later like the reverse RNN output and encoded targets. + + Args: + observations: The observations of the model, a tuple containing two + Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors + should be the inputs and targets, respectively. + seq_lengths: An int Tensor of shape [batch_size] containing the length + of each sequence in observations. + """ + inputs, targets = observations + self.seq_lengths = seq_lengths + self.max_seq_len = tf.reduce_max(seq_lengths) + self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False) + self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False) + targets_encoded = base.encode_all(targets, self.data_encoder) + self.targets_encoded_ta = base.ta_for_tensor(targets_encoded, + clear_after_read=False) + if self.rev_rnn_cell: + reverse_targets_encoded = tf.reverse_sequence( + targets_encoded, seq_lengths, seq_axis=0, batch_axis=1) + # Compute the reverse rnn over the targets. + reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell, + reverse_targets_encoded, + time_major=True, + dtype=tf.float32) + reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths, + seq_axis=0, batch_axis=1) + self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out, + clear_after_read=False) + + def _filtering_proposal(self, rnn_out, prior, t): + """Computes the filtering proposal distribution.""" + return self._proposal(rnn_out, + self.targets_encoded_ta.read(t), + prior_mu=prior.mean()) + + def _smoothing_proposal(self, rnn_out, prior, t): + """Computes the smoothing proposal distribution.""" + return self._proposal(rnn_out, + smoothing_tensors=[self.reverse_rnn_ta.read(t)], + prior_mu=prior.mean()) + + def proposal(self, rnn_out, prior, t): + """Computes the proposal distribution specified by proposal_type. + + Args: + rnn_out: The output of the rnn for the current timestep. + prior: A tf.distributions.Normal distribution representing the prior + over z_t, p(z_t | z_{1:t-1}, x_{1:t-1}). Used for 'res_q'. + t: A scalar int Tensor, the current timestep. + """ + if self.proposal_type == "filtering": + return self._filtering_proposal(rnn_out, prior, t) + elif self.proposal_type == "smoothing": + return self._smoothing_proposal(rnn_out, prior, t) + elif self.proposal_type == "prior": + return self.transition(rnn_out) + + def tilt(self, rnn_out, latent_encoded, targets): + r_func = self._tilt(rnn_out, latent_encoded) + return tf.reduce_sum(r_func.log_prob(targets), axis=-1) + + def propose_and_weight(self, state, t): + """Runs the model and computes importance weights for one timestep. + + Runs the model and computes importance weights, sampling from the proposal + instead of the transition/prior. + + Args: + state: The previous state of the model, a TrainableVRNNState containing + the previous rnn state, the previous rnn outs, and the previous encoded + latent. + t: A scalar integer Tensor, the current timestep. + Returns: + weights: A float Tensor of shape [batch_size]. + new_state: The new state of the model. + """ + inputs = self.inputs_ta.read(t) + targets = self.targets_ta.read(t) + rnn_out, next_rnn_state = self.run_rnn(state.rnn_state, + state.latent_encoded, + inputs) + p_zt = self.transition(rnn_out) + q_zt = self.proposal(rnn_out, p_zt, t) + zt = q_zt.sample(seed=self.random_seed) + p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) + log_p_xt_given_zt = tf.reduce_sum(p_xt_given_zt.log_prob(targets), axis=-1) + log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=-1) + log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=-1) + weights = log_p_zt + log_p_xt_given_zt - log_q_zt + if self._tilt: + prev_log_r = tf.cond( + tf.greater(t, 0), + lambda: self.tilt(state.rnn_out, state.latent_encoded, targets), + lambda: 0.) # On the first step, prev_log_r = 0. + log_r = tf.cond( + tf.less(t + 1, self.max_seq_len), + lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)), + lambda: 0.) + # On the last step, log_r = 0. + log_r *= tf.to_float(t < self.seq_lengths - 1) + weights += log_r - prev_log_r + new_state = TrainableVRNNState(rnn_state=next_rnn_state, + rnn_out=rnn_out, + latent_encoded=latent_encoded) + return weights, new_state + + +_DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), + "b": tf.zeros_initializer()} + + +def create_vrnn( + data_size, + latent_size, + emission_class, + rnn_hidden_size=None, + fcnet_hidden_sizes=None, + encoded_data_size=None, + encoded_latent_size=None, + sigma_min=0.0, + raw_sigma_bias=0.25, + emission_bias_init=0.0, + use_tilt=False, + proposal_type="filtering", + initializers=None, + random_seed=None): + """A factory method for creating VRNN cells. + + Args: + data_size: The dimension of the vectors that make up the data sequences. + latent_size: The size of the stochastic latent state of the VRNN. + emission_class: The class of the emission distribution. Can be either + ConditionalNormalDistribution or ConditionalBernoulliDistribution. + rnn_hidden_size: The hidden state dimension of the RNN that forms the + deterministic part of this VRNN. If None, then it defaults + to latent_size. + fcnet_hidden_sizes: A list of python integers, the size of the hidden + layers of the fully connected networks that parameterize the conditional + distributions of the VRNN. If None, then it defaults to one hidden + layer of size latent_size. + encoded_data_size: The size of the output of the data encoding network. If + None, defaults to latent_size. + encoded_latent_size: The size of the output of the latent state encoding + network. If None, defaults to latent_size. + sigma_min: The minimum value that the standard deviation of the + distribution over the latent state can take. + raw_sigma_bias: A scalar that is added to the raw standard deviation + output from the neural networks that parameterize the prior and + approximate posterior. Useful for preventing standard deviations close + to zero. + emission_bias_init: A bias to added to the raw output of the fully + connected network that parameterizes the emission distribution. Useful + for initalizing the mean of the distribution to a sensible starting point + such as the mean of the training data. Only used with Bernoulli generative + distributions. + use_tilt: If true, create a VRNN with a tilting function. + proposal_type: The type of proposal to use. Can be "filtering", "smoothing", + or "prior". + initializers: The variable intitializers to use for the fully connected + networks and RNN cell. Must be a dictionary mapping the keys 'w' and 'b' + to the initializers for the weights and biases. Defaults to xavier for + the weights and zeros for the biases when initializers is None. + random_seed: A random seed for the VRNN resampling operations. + Returns: + model: A TrainableVRNN object. + """ + if rnn_hidden_size is None: + rnn_hidden_size = latent_size + if fcnet_hidden_sizes is None: + fcnet_hidden_sizes = [latent_size] + if encoded_data_size is None: + encoded_data_size = latent_size + if encoded_latent_size is None: + encoded_latent_size = latent_size + if initializers is None: + initializers = _DEFAULT_INITIALIZERS + data_encoder = snt.nets.MLP( + output_sizes=fcnet_hidden_sizes + [encoded_data_size], + initializers=initializers, + name="data_encoder") + latent_encoder = snt.nets.MLP( + output_sizes=fcnet_hidden_sizes + [encoded_latent_size], + initializers=initializers, + name="latent_encoder") + transition = base.ConditionalNormalDistribution( + size=latent_size, + hidden_layer_sizes=fcnet_hidden_sizes, + sigma_min=sigma_min, + raw_sigma_bias=raw_sigma_bias, + initializers=initializers, + name="prior") + # Construct the emission distribution. + if emission_class == base.ConditionalBernoulliDistribution: + # For Bernoulli distributed outputs, we initialize the bias so that the + # network generates on average the mean from the training set. + emission_dist = functools.partial(base.ConditionalBernoulliDistribution, + bias_init=emission_bias_init) + else: + emission_dist = base.ConditionalNormalDistribution + emission = emission_dist( + size=data_size, + hidden_layer_sizes=fcnet_hidden_sizes, + initializers=initializers, + name="generative") + # Construct the proposal distribution. + if proposal_type in ["filtering", "smoothing"]: + proposal = base.NormalApproximatePosterior( + size=latent_size, + hidden_layer_sizes=fcnet_hidden_sizes, + sigma_min=sigma_min, + raw_sigma_bias=raw_sigma_bias, + initializers=initializers, + smoothing=(proposal_type == "smoothing"), + name="approximate_posterior") + else: + proposal = None + + if use_tilt: + tilt = emission_dist( + size=data_size, + hidden_layer_sizes=fcnet_hidden_sizes, + initializers=initializers, + name="tilt") + else: + tilt = None + + rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, + initializer=initializers["w"]) + rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, + initializer=initializers["w"]) + return TrainableVRNN( + rnn_cell, data_encoder, latent_encoder, transition, + emission, proposal_type, proposal=proposal, rev_rnn_cell=rev_rnn_cell, + tilt=tilt, random_seed=random_seed) diff --git a/models/research/fivo/fivo/models/vrnn_test.py b/models/research/fivo/fivo/models/vrnn_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2d9bde3d5b6c6f66a82bd331cf50a87737864239 --- /dev/null +++ b/models/research/fivo/fivo/models/vrnn_test.py @@ -0,0 +1,137 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.models.vrnn.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +import tensorflow as tf + +from fivo.models import base +from fivo.test_utils import create_vrnn + + +class VrnnTest(tf.test.TestCase): + + def test_vrnn_normal_emission(self): + self.run_vrnn(base.ConditionalNormalDistribution, [-4.509767, -3.242221]) + + def test_vrnn_bernoulli_emission(self): + self.run_vrnn(base.ConditionalBernoulliDistribution, [-2.63812733, -2.02216434]), + + def run_vrnn(self, generative_class, gt_log_p_x_given_z): + """Tests the VRNN. + + All test values are 'golden values' derived by running the code and copying + the output. + + Args: + generative_class: The class of the generative distribution to use. + gt_log_p_x_given_z: The ground-truth value of log p(x|z). + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + batch_size = 2 + model, inputs, targets, _ = create_vrnn(generative_class=generative_class, + batch_size=batch_size, + data_lengths=(1, 1), + random_seed=1234) + zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) + model.set_observations([inputs, targets], tf.convert_to_tensor([1, 1])) + model_out = model.propose_and_weight(zero_state, 0) + sess.run(tf.global_variables_initializer()) + log_alpha, state = sess.run(model_out) + rnn_state, latent_state, rnn_out = state + self.assertAllClose( + rnn_state.c, + [[-0.15014534, 0.0143046, 0.00160489, -0.12899463], + [-0.25015137, 0.09377634, -0.05000039, -0.17123522]]) + self.assertAllClose( + rnn_state.h, + [[-0.06842659, 0.00760155, 0.00096106, -0.05434214], + [-0.1109542, 0.0441804, -0.03121299, -0.07882939]] + ) + self.assertAllClose( + latent_state, + [[0.025241, 0.122011, 1.066661, 0.316209, -0.25369, 0.108215, + -1.501128, -0.440111, -0.40447, -0.156649, 1.206028], + [0.066824, 0.519937, 0.610973, 0.977739, -0.121889, -0.223429, + -0.32687, -0.578763, -0.56965, 0.751886, 0.681606]] + ) + self.assertAllClose(rnn_out, [[-0.068427, 0.007602, 0.000961, -0.054342], + [-0.110954, 0.04418, -0.031213, -0.078829]]) + gt_log_q_z = [-8.0895052, -6.75819111] + gt_log_p_z = [-7.246827, -6.512877] + gt_log_alpha = (np.array(gt_log_p_z) + + np.array(gt_log_p_x_given_z) - + np.array(gt_log_q_z)) + self.assertAllClose(log_alpha, gt_log_alpha) + + def test_vrnn_with_tilt_normal_emission(self): + self.run_vrnn_with_tilt(base.ConditionalNormalDistribution, [-5.198263, -6.31686]) + + def test_vrnn_with_tilt_bernoulli_emission(self): + self.run_vrnn_with_tilt(base.ConditionalBernoulliDistribution, [-4.66985, -3.802245]) + + def run_vrnn_with_tilt(self, generative_class, gt_log_alpha): + """Tests the VRNN with a tilting function. + + All test values are 'golden values' derived by running the code and copying + the output. + + Args: + generative_class: The class of the generative distribution to use. + gt_log_alpha: The ground-truth value of log alpha. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + batch_size = 2 + model, inputs, targets, _ = create_vrnn(generative_class=generative_class, + batch_size=batch_size, + data_lengths=(3, 2), + random_seed=1234, + use_tilt=True) + zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) + model.set_observations([inputs, targets], tf.convert_to_tensor([3, 2])) + model_out = model.propose_and_weight(zero_state, 0) + sess.run(tf.global_variables_initializer()) + log_alpha, state = sess.run(model_out) + rnn_state, latent_state, rnn_out = state + self.assertAllClose( + rnn_state.c, + [[-0.15014534, 0.0143046, 0.00160489, -0.12899463], + [-0.25015137, 0.09377634, -0.05000039, -0.17123522]]) + self.assertAllClose( + rnn_state.h, + [[-0.06842659, 0.00760155, 0.00096106, -0.05434214], + [-0.1109542, 0.0441804, -0.03121299, -0.07882939]] + ) + self.assertAllClose( + latent_state, + [[0.025241, 0.122011, 1.066661, 0.316209, -0.25369, 0.108215, + -1.501128, -0.440111, -0.40447, -0.156649, 1.206028], + [0.066824, 0.519937, 0.610973, 0.977739, -0.121889, -0.223429, + -0.32687, -0.578763, -0.56965, 0.751886, 0.681606]] + ) + self.assertAllClose(rnn_out, [[-0.068427, 0.007602, 0.000961, -0.054342], + [-0.110954, 0.04418, -0.031213, -0.078829]]) + self.assertAllClose(log_alpha, gt_log_alpha) + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/fivo/fivo/nested_utils.py b/models/research/fivo/fivo/nested_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef956a80c40d55331a3acbfe78111e099559ddea --- /dev/null +++ b/models/research/fivo/fivo/nested_utils.py @@ -0,0 +1,139 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A set of utils for dealing with nested lists and tuples of Tensors.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import itertools +import tensorflow as tf + +from tensorflow.python.util import nest + + +def map_nested(map_fn, nested): + """Executes map_fn on every element in a (potentially) nested structure. + + Args: + map_fn: A callable to execute on each element in 'nested'. + nested: A potentially nested combination of sequence objects. Sequence + objects include tuples, lists, namedtuples, and all subclasses of + collections.Sequence except strings. See nest.is_sequence for details. + For example [1, ('hello', 4.3)] is a nested structure containing elements + 1, 'hello', and 4.3. + Returns: + out_structure: A potentially nested combination of sequence objects with the + same structure as the 'nested' input argument. out_structure + contains the result of applying map_fn to each element in 'nested'. For + example map_nested(lambda x: x+1, [1, (3, 4.3)]) returns [2, (4, 5.3)]. + """ + out = map(map_fn, nest.flatten(nested)) + return nest.pack_sequence_as(nested, out) + + +def tile_tensors(tensors, multiples): + """Tiles a set of Tensors. + + Args: + tensors: A potentially nested tuple or list of Tensors with rank + greater than or equal to the length of 'multiples'. The Tensors do not + need to have the same rank, but their rank must not be dynamic. + multiples: A python list of ints indicating how to tile each Tensor + in 'tensors'. Similar to the 'multiples' argument to tf.tile. + Returns: + tiled_tensors: A potentially nested tuple or list of Tensors with the same + structure as the 'tensors' input argument. Contains the result of + applying tf.tile to each Tensor in 'tensors'. When the rank of a Tensor + in 'tensors' is greater than the length of multiples, multiples is padded + at the end with 1s. For example when tiling a 4-dimensional Tensor with + multiples [3, 4], multiples would be padded to [3, 4, 1, 1] before tiling. + """ + def tile_fn(x): + return tf.tile(x, multiples + [1] * (x.shape.ndims - len(multiples))) + + return map_nested(tile_fn, tensors) + + +def where_tensors(condition, x_tensors, y_tensors): + """Performs a tf.where operation on a two sets of Tensors. + + Args: + condition: The condition tensor to use for the where operation. + x_tensors: A potentially nested tuple or list of Tensors. + y_tensors: A potentially nested tuple or list of Tensors. Must have the + same structure as x_tensors. + Returns: + whered_tensors: A potentially nested tuple or list of Tensors with the + same structure as the 'tensors' input argument. Contains the result of + applying tf.where(condition, x, y) on each pair of elements in x_tensors + and y_tensors. + """ + flat_x = nest.flatten(x_tensors) + flat_y = nest.flatten(y_tensors) + result = [tf.where(condition, x, y) for x, y in + itertools.izip(flat_x, flat_y)] + + return nest.pack_sequence_as(x_tensors, result) + + +def gather_tensors(tensors, indices): + """Performs a tf.gather operation on a set of Tensors. + + Args: + tensors: A potentially nested tuple or list of Tensors. + indices: The indices to use for the gather operation. + Returns: + gathered_tensors: A potentially nested tuple or list of Tensors with the + same structure as the 'tensors' input argument. Contains the result of + applying tf.gather(x, indices) on each element x in 'tensors'. + """ + return map_nested(lambda x: tf.gather(x, indices), tensors) + + +def tas_for_tensors(tensors, length, **kwargs): + """Unstacks a set of Tensors into TensorArrays. + + Args: + tensors: A potentially nested tuple or list of Tensors with length in the + first dimension greater than or equal to the 'length' input argument. + length: The desired length of the TensorArrays. + **kwargs: Keyword args for TensorArray constructor. + Returns: + tensorarrays: A potentially nested tuple or list of TensorArrays with the + same structure as 'tensors'. Contains the result of unstacking each Tensor + in 'tensors'. + """ + def map_fn(x): + ta = tf.TensorArray(x.dtype, length, + name=x.name.split(':')[0] + '_ta', **kwargs) + return ta.unstack(x[:length, :]) + return map_nested(map_fn, tensors) + + +def read_tas(tas, index): + """Performs a read operation on a set of TensorArrays. + + Args: + tas: A potentially nested tuple or list of TensorArrays with length greater + than 'index'. + index: The location to read from. + Returns: + read_tensors: A potentially nested tuple or list of Tensors with the same + structure as the 'tas' input argument. Contains the result of + performing a read operation at 'index' on each TensorArray in 'tas'. + """ + return map_nested(lambda ta: ta.read(index), tas) diff --git a/models/research/fivo/fivo/nested_utils_test.py b/models/research/fivo/fivo/nested_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..87991dd79cdb29d12944f9afa3fd0c5178dc4eb5 --- /dev/null +++ b/models/research/fivo/fivo/nested_utils_test.py @@ -0,0 +1,125 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.nested_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import tensorflow as tf +nest = tf.contrib.framework.nest + +from fivo import nested_utils + +# An example namedtuple for use in the following tests. +ExampleTuple = collections.namedtuple('ExampleTuple', ['a', 'b']) + + +class NestedUtilsTest(tf.test.TestCase): + + def test_map_nested_works_on_nested_structures(self): + """Check that map_nested works with nested structures.""" + original = [1, (2, 3.2, (4., ExampleTuple(5, 6)))] + expected = [2, (3, 4.2, (5., ExampleTuple(6, 7)))] + out = nested_utils.map_nested(lambda x: x+1, original) + self.assertEqual(expected, out) + + def test_map_nested_works_on_single_objects(self): + """Check that map_nested works with raw objects.""" + original = 1 + expected = 2 + out = nested_utils.map_nested(lambda x: x+1, original) + self.assertEqual(expected, out) + + def test_map_nested_works_on_flat_lists(self): + """Check that map_nested works with a flat list.""" + original = [1, 2, 3] + expected = [2, 3, 4] + out = nested_utils.map_nested(lambda x: x+1, original) + self.assertEqual(expected, out) + + def test_tile_tensors(self): + """Checks that tile_tensors correctly tiles tensors of different ranks.""" + a = tf.range(20) + b = tf.reshape(a, [2, 10]) + c = tf.reshape(a, [2, 2, 5]) + a_tiled = tf.tile(a, [3]) + b_tiled = tf.tile(b, [3, 1]) + c_tiled = tf.tile(c, [3, 1, 1]) + tensors = [a, (b, ExampleTuple(c, c))] + expected_tensors = [a_tiled, (b_tiled, ExampleTuple(c_tiled, c_tiled))] + tiled = nested_utils.tile_tensors(tensors, [3]) + nest.assert_same_structure(expected_tensors, tiled) + with self.test_session() as sess: + expected, out = sess.run([expected_tensors, tiled]) + expected = nest.flatten(expected) + out = nest.flatten(out) + # Check that the tiling is correct. + for x, y in zip(expected, out): + self.assertAllClose(x, y) + + def test_gather_tensors(self): + a = tf.reshape(tf.range(20), [5, 4]) + inds = [0, 0, 1, 4] + a_gathered = tf.gather(a, inds) + tensors = [a, (a, ExampleTuple(a, a))] + gt_gathered = [a_gathered, (a_gathered, + ExampleTuple(a_gathered, a_gathered))] + gathered = nested_utils.gather_tensors(tensors, inds) + nest.assert_same_structure(gt_gathered, gathered) + with self.test_session() as sess: + gt, out = sess.run([gt_gathered, gathered]) + gt = nest.flatten(gt) + out = nest.flatten(out) + # Check that the gathering is correct. + for x, y in zip(gt, out): + self.assertAllClose(x, y) + + def test_tas_for_tensors(self): + a = tf.reshape(tf.range(20), [5, 4]) + tensors = [a, (a, ExampleTuple(a, a))] + tas = nested_utils.tas_for_tensors(tensors, 5) + nest.assert_same_structure(tensors, tas) + # We can't pass TensorArrays to sess.run so instead we turn then back into + # tensors to check that they were created correctly. + stacked = nested_utils.map_nested(lambda x: x.stack(), tas) + with self.test_session() as sess: + gt, out = sess.run([tensors, stacked]) + gt = nest.flatten(gt) + out = nest.flatten(out) + # Check that the tas were created correctly. + for x, y in zip(gt, out): + self.assertAllClose(x, y) + + def test_read_tas(self): + a = tf.reshape(tf.range(20), [5, 4]) + a_read = a[3, :] + tensors = [a, (a, ExampleTuple(a, a))] + gt_read = [a_read, (a_read, ExampleTuple(a_read, a_read))] + tas = nested_utils.tas_for_tensors(tensors, 5) + tas_read = nested_utils.read_tas(tas, 3) + nest.assert_same_structure(tas, tas_read) + with self.test_session() as sess: + gt, out = sess.run([gt_read, tas_read]) + gt = nest.flatten(gt) + out = nest.flatten(out) + # Check that the tas were read correctly. + for x, y in zip(gt, out): + self.assertAllClose(x, y) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/fivo/fivo/runners.py b/models/research/fivo/fivo/runners.py new file mode 100644 index 0000000000000000000000000000000000000000..ec6fb91bf51fa2c7c44d7402e635d257f80c3f7a --- /dev/null +++ b/models/research/fivo/fivo/runners.py @@ -0,0 +1,489 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""High-level code for creating and running FIVO-related Tensorflow graphs. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import os +import time + +import numpy as np +import tensorflow as tf + +from fivo import bounds +from fivo import smc + +from fivo.data import datasets +from fivo.models import base +from fivo.models import srnn +from fivo.models import vrnn + + +def create_dataset_and_model(config, split, shuffle, repeat): + """Creates the dataset and model for a given config. + + Args: + config: A configuration object with config values accessible as properties. + Most likely a FLAGS object. This function expects the properties + batch_size, dataset_path, dataset_type, and latent_size to be defined. + split: The dataset split to load. + shuffle: If true, shuffle the dataset randomly. + repeat: If true, repeat the dataset endlessly. + Returns: + inputs: A batch of input sequences represented as a dense Tensor of shape + [time, batch_size, data_dimension]. + targets: A batch of target sequences represented as a dense Tensor of + shape [time, batch_size, data_dimension]. + lens: An int Tensor of shape [batch_size] representing the lengths of each + sequence in the batch. + model: A vrnn.VRNNCell model object. + Raises: + ValueError: if the config is invalid. + """ + sigma_min = 0.0 + if config.dataset_type == "pianoroll": + inputs, targets, lengths, mean = datasets.create_pianoroll_dataset( + config.dataset_path, split, config.batch_size, shuffle=shuffle, + repeat=repeat) + # Convert the mean of the training set to logit space so it can be used to + # initialize the bias of the generative distribution. + emission_bias_init = -tf.log( + 1. / tf.clip_by_value(mean, 0.0001, 0.9999) - 1) + emission_distribution_class = base.ConditionalBernoulliDistribution + elif config.dataset_type == "speech": + inputs, targets, lengths = datasets.create_speech_dataset( + config.dataset_path, config.batch_size, + samples_per_timestep=config.data_dimension, prefetch_buffer_size=1, + shuffle=False, repeat=False) + # There is no bias for the generative distribution because the test set + # is assumed to be already standardized with the training set statistics. + mean = None + emission_bias_init = None + emission_distribution_class = base.ConditionalNormalDistribution + if config.model == "vrnn": + model = vrnn.create_vrnn(inputs.get_shape().as_list()[2], + config.latent_size, + emission_distribution_class, + emission_bias_init=emission_bias_init, + proposal_type=config.proposal_type, + sigma_min=sigma_min, + raw_sigma_bias=0.5, + use_tilt=(config.bound == "fivo-aux")) + elif config.model == "srnn": + model = srnn.create_srnn(inputs.get_shape().as_list()[2], + config.latent_size, + emission_distribution_class, + emission_bias_init=emission_bias_init, + proposal_type=config.proposal_type, + sigma_min=sigma_min, + raw_sigma_bias=0.5, + use_tilt=(config.bound == "fivo-aux")) + else: + raise ValueError("model flag: %s is unrecognized" % config.model) + return inputs, targets, lengths, model, mean + + +def restore_checkpoint_if_exists(saver, sess, logdir): + """Looks for a checkpoint and restores the session from it if found. + + Args: + saver: A tf.train.Saver for restoring the session. + sess: A TensorFlow session. + logdir: The directory to look for checkpoints in. + Returns: + True if a checkpoint was found and restored, False otherwise. + """ + checkpoint = tf.train.get_checkpoint_state(logdir) + if checkpoint: + checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path) + full_checkpoint_path = os.path.join(logdir, checkpoint_name) + saver.restore(sess, full_checkpoint_path) + return True + return False + + +def wait_for_checkpoint(saver, sess, logdir): + """Loops until the session is restored from a checkpoint in logdir. + + Args: + saver: A tf.train.Saver for restoring the session. + sess: A TensorFlow session. + logdir: The directory to look for checkpoints in. + """ + while not restore_checkpoint_if_exists(saver, sess, logdir): + tf.logging.info("Checkpoint not found in %s, sleeping for 60 seconds." + % logdir) + time.sleep(60) + + +def run_train(config, create_dataset_and_model_fn=create_dataset_and_model): + """Runs training for a sequential latent variable model. + + Args: + config: A configuration object with config values accessible as properties. + Most likely a FLAGS object. For a list of expected properties and their + meaning see the flags defined in fivo.py. + create_dataset_and_model_fn: If present, calls this function to create a + dataset and model instead of create_dataset_and_model() above. The + signature must be the same. + """ + + def create_logging_hook(step, bound_value): + """Creates a logging hook that prints the bound value periodically.""" + bound_label = config.bound + " bound" + if config.normalize_by_seq_len: + bound_label += " per timestep" + else: + bound_label += " per sequence" + def summary_formatter(log_dict): + return "Step %d, %s: %f" % ( + log_dict["step"], bound_label, log_dict["bound_value"]) + logging_hook = tf.train.LoggingTensorHook( + {"step": step, "bound_value": bound_value}, + every_n_iter=config.summarize_every, + formatter=summary_formatter) + return logging_hook + + def create_loss(): + """Creates the loss to be optimized. + + Returns: + bound: A float Tensor containing the value of the bound that is + being optimized. + loss: A float Tensor that when differentiated yields the gradients + to apply to the model. Should be optimized via gradient descent. + """ + inputs, targets, lengths, model, _ = create_dataset_and_model_fn( + config, split="train", shuffle=True, repeat=True) + # Compute lower bounds on the log likelihood. + if config.bound == "elbo": + ll_per_seq, _, _ = bounds.iwae( + model, (inputs, targets), lengths, num_samples=1, + parallel_iterations=config.parallel_iterations + ) + elif config.bound == "iwae": + ll_per_seq, _, _ = bounds.iwae( + model, (inputs, targets), lengths, num_samples=config.num_samples, + parallel_iterations=config.parallel_iterations + ) + elif config.bound in ("fivo", "fivo-aux"): + if config.resampling_type == "relaxed": + ll_per_seq, _, _, _ = bounds.fivo( + model, (inputs, targets), + lengths, + num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, + resampling_type=config.resampling_type, + random_seed=config.random_seed, + relaxed_resampling_temperature=config. + relaxed_resampling_temperature, + parallel_iterations=config.parallel_iterations + ) + else: + ll_per_seq, _, _, _ = bounds.fivo( + model, (inputs, targets), lengths, num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, + resampling_type=config.resampling_type, + random_seed=config.random_seed, + parallel_iterations=config.parallel_iterations + ) + # Compute loss scaled by number of timesteps. + ll_per_t = tf.reduce_mean(ll_per_seq / tf.to_float(lengths)) + ll_per_seq = tf.reduce_mean(ll_per_seq) + + tf.summary.scalar("train_ll_per_seq", ll_per_seq) + tf.summary.scalar("train_ll_per_t", ll_per_t) + + if config.normalize_by_seq_len: + return ll_per_t, -ll_per_t + else: + return ll_per_seq, -ll_per_seq + + def create_graph(): + """Creates the training graph.""" + global_step = tf.train.get_or_create_global_step() + bound, loss = create_loss() + opt = tf.train.AdamOptimizer(config.learning_rate) + grads = opt.compute_gradients(loss, var_list=tf.trainable_variables()) + train_op = opt.apply_gradients(grads, global_step=global_step) + return bound, train_op, global_step + + device = tf.train.replica_device_setter(ps_tasks=config.ps_tasks) + with tf.Graph().as_default(): + if config.random_seed: tf.set_random_seed(config.random_seed) + with tf.device(device): + bound, train_op, global_step = create_graph() + log_hook = create_logging_hook(global_step, bound) + start_training = not config.stagger_workers + with tf.train.MonitoredTrainingSession( + master=config.master, + is_chief=config.task == 0, + hooks=[log_hook], + checkpoint_dir=config.logdir, + save_checkpoint_secs=120, + save_summaries_steps=config.summarize_every, + log_step_count_steps=config.summarize_every) as sess: + cur_step = -1 + while not sess.should_stop() and cur_step <= config.max_steps: + if config.task > 0 and not start_training: + cur_step = sess.run(global_step) + tf.logging.info("task %d not active yet, sleeping at step %d" % + (config.task, cur_step)) + time.sleep(30) + if cur_step >= config.task * 1000: + start_training = True + else: + _, cur_step = sess.run([train_op, global_step]) + + +def run_eval(config, create_dataset_and_model_fn=create_dataset_and_model): + """Runs evaluation for a sequential latent variable model. + + This method runs only one evaluation over the dataset, writes summaries to + disk, and then terminates. It does not loop indefinitely. + + Args: + config: A configuration object with config values accessible as properties. + Most likely a FLAGS object. For a list of expected properties and their + meaning see the flags defined in fivo.py. + create_dataset_and_model_fn: If present, calls this function to create a + dataset and model instead of create_dataset_and_model() above. The + signature must be the same. + """ + + def create_graph(): + """Creates the evaluation graph. + + Returns: + lower_bounds: A tuple of float Tensors containing the values of the 3 + evidence lower bounds, summed across the batch. + total_batch_length: The total number of timesteps in the batch, summed + across batch examples. + batch_size: The batch size. + global_step: The global step the checkpoint was loaded from. + """ + global_step = tf.train.get_or_create_global_step() + inputs, targets, lengths, model, _ = create_dataset_and_model_fn( + config, split=config.split, shuffle=False, repeat=False) + # Compute lower bounds on the log likelihood. + elbo_ll_per_seq, _, _ = bounds.iwae( + model, (inputs, targets), lengths, num_samples=1, + parallel_iterations=config.parallel_iterations + ) + iwae_ll_per_seq, _, _ = bounds.iwae( + model, (inputs, targets), lengths, num_samples=config.num_samples, + parallel_iterations=config.parallel_iterations + ) + # The resampling type should only be used for training, so we ignore it. + fivo_ll_per_seq, _, _, _ = bounds.fivo( + model, (inputs, targets), lengths, num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, random_seed=config.random_seed, + parallel_iterations=config.parallel_iterations + ) + elbo_ll = tf.reduce_sum(elbo_ll_per_seq) + iwae_ll = tf.reduce_sum(iwae_ll_per_seq) + fivo_ll = tf.reduce_sum(fivo_ll_per_seq) + batch_size = tf.shape(lengths)[0] + total_batch_length = tf.reduce_sum(lengths) + return ((elbo_ll, iwae_ll, fivo_ll), total_batch_length, batch_size, + global_step) + + def average_bounds_over_dataset(lower_bounds, total_batch_length, batch_size, + sess): + """Computes the values of the bounds, averaged over the datset. + + Args: + lower_bounds: Tuple of float Tensors containing the values of the bounds + evaluated on a single batch. + total_batch_length: Integer Tensor that represents the total number of + timesteps in the current batch. + batch_size: Integer Tensor containing the batch size. This can vary if the + requested batch_size does not evenly divide the size of the dataset. + sess: A TensorFlow Session object. + Returns: + ll_per_t: A length 3 numpy array of floats containing each bound's average + value, normalized by the total number of timesteps in the datset. Can + be interpreted as a lower bound on the average log likelihood per + timestep in the dataset. + ll_per_seq: A length 3 numpy array of floats containing each bound's + average value, normalized by the number of sequences in the dataset. + Can be interpreted as a lower bound on the average log likelihood per + sequence in the datset. + """ + total_ll = np.zeros(3, dtype=np.float64) + total_n_elems = 0.0 + total_length = 0.0 + while True: + try: + outs = sess.run([lower_bounds, batch_size, total_batch_length]) + except tf.errors.OutOfRangeError: + break + total_ll += outs[0] + total_n_elems += outs[1] + total_length += outs[2] + ll_per_t = total_ll / total_length + ll_per_seq = total_ll / total_n_elems + return ll_per_t, ll_per_seq + + def summarize_lls(lls_per_t, lls_per_seq, summary_writer, step): + """Creates log-likelihood lower bound summaries and writes them to disk. + + Args: + lls_per_t: An array of 3 python floats, contains the values of the + evaluated bounds normalized by the number of timesteps. + lls_per_seq: An array of 3 python floats, contains the values of the + evaluated bounds normalized by the number of sequences. + summary_writer: A tf.SummaryWriter. + step: The current global step. + """ + def scalar_summary(name, value): + value = tf.Summary.Value(tag=name, simple_value=value) + return tf.Summary(value=[value]) + + for i, bound in enumerate(["elbo", "iwae", "fivo"]): + per_t_summary = scalar_summary("%s/%s_ll_per_t" % (config.split, bound), + lls_per_t[i]) + per_seq_summary = scalar_summary("%s/%s_ll_per_seq" % + (config.split, bound), + lls_per_seq[i]) + summary_writer.add_summary(per_t_summary, global_step=step) + summary_writer.add_summary(per_seq_summary, global_step=step) + summary_writer.flush() + + with tf.Graph().as_default(): + if config.random_seed: tf.set_random_seed(config.random_seed) + lower_bounds, total_batch_length, batch_size, global_step = create_graph() + summary_dir = config.logdir + "/" + config.split + summary_writer = tf.summary.FileWriter( + summary_dir, flush_secs=15, max_queue=100) + saver = tf.train.Saver() + with tf.train.SingularMonitoredSession() as sess: + wait_for_checkpoint(saver, sess, config.logdir) + step = sess.run(global_step) + tf.logging.info("Model restored from step %d, evaluating." % step) + ll_per_t, ll_per_seq = average_bounds_over_dataset( + lower_bounds, total_batch_length, batch_size, sess) + summarize_lls(ll_per_t, ll_per_seq, summary_writer, step) + tf.logging.info("%s elbo ll/t: %f, iwae ll/t: %f fivo ll/t: %f", + config.split, ll_per_t[0], ll_per_t[1], ll_per_t[2]) + tf.logging.info("%s elbo ll/seq: %f, iwae ll/seq: %f fivo ll/seq: %f", + config.split, ll_per_seq[0], ll_per_seq[1], ll_per_seq[2]) + + +def run_sample(config, create_dataset_and_model_fn=create_dataset_and_model): + """Sample from the model. Only pianorolls and pose datasets are supported.""" + + def sample_from_model(model, initial_state, initial_inputs, mean): + """Samples a sequence of outputs from the model. + + The mean must be supplied -- if it isn't the results will be incorrect. + + Args: + model: A model with sample_step implemented. See models/vrnn.py for an + example. + initial_state: The initial state of the model. + initial_inputs: The initial inputs to feed into the model. + mean: The mean of the training set, a Tensor of shape [data_dimension]. + Returns: + samples: A Tensor of shape [sample_length, batch_size, num_timesteps, + data_dimension] containing the samples from the model. + """ + initial_state, initial_output = model.sample_step(initial_state, + initial_inputs, 0) + output_ta = tf.TensorArray(size=config.sample_length, + dtype=tf.float32, + dynamic_size=False, + clear_after_read=True) + output_ta = output_ta.write(0, initial_output) + t0 = tf.constant(1, dtype=tf.int32) + + def sample_step(t, state, prev_outputs, output_ta): + state, output = model.sample_step(state, prev_outputs, t) + output_ta = output_ta.write(t, output) + centered_output = output - mean[tf.newaxis, :] + return t+1, state, centered_output, output_ta + + def sample_predicate(t, *unused_args): + return t < config.sample_length + + _, _, _, output_ta = tf.while_loop( + sample_predicate, + sample_step, + loop_vars=(t0, initial_state, initial_output, output_ta), + parallel_iterations=config.parallel_iterations + ) + samples = output_ta.stack() + samples = tf.reshape(samples, [config.sample_length, config.batch_size, + config.num_samples, config.data_dimension]) + return samples + + def create_graph(): + """Creates the graph to sample from the model. + + First, the model is conditioned on a prefix by sampling a batch of data + and trimming it to prefix_length. The configured bound is used to do the + conditioning. Then the final state from the conditioning is used to sample + from the model. + + Returns: + samples: A Tensor of shape [sample_length, batch_size, + num_samples, data_dimension] representing samples from the model. + prefixes: A Tensor of shape [prefix_length, batch_size, data_dimension] + representing the prefixes the model was conditioned on. + """ + inputs, targets, lengths, model, mean = create_dataset_and_model_fn( + config, split=config.split, shuffle=True, repeat=True) + input_prefixes = inputs[:config.prefix_length] + target_prefixes = targets[:config.prefix_length] + prefix_lengths = tf.ones_like(lengths) * config.prefix_length + if config.bound == "elbo": + _, _, state = bounds.iwae( + model, (input_prefixes, target_prefixes), + prefix_lengths, num_samples=1) + elif config.bound == "iwae": + _, _, state = bounds.iwae( + model, (input_prefixes, target_prefixes), + prefix_lengths, num_samples=config.num_samples) + elif config.bound == "fivo": + _, _, _, state = bounds.fivo( + model, (input_prefixes, target_prefixes), prefix_lengths, + num_samples=config.num_samples, + resampling_criterion=smc.ess_criterion, + random_seed=config.random_seed) + sample_inputs = tf.tile(inputs[config.prefix_length], + [config.num_samples, 1]) + samples = sample_from_model(model, state, sample_inputs, mean) + return samples, target_prefixes + + with tf.Graph().as_default(): + if config.random_seed: + tf.set_random_seed(config.random_seed) + samples, prefixes = create_graph() + if config.sample_out_dir: + out_dir = config.sample_our_dir + else: + out_dir = config.logdir + if not tf.gfile.Exists(out_dir): + tf.gfile.MakeDirs(out_dir) + with tf.train.SingularMonitoredSession( + checkpoint_dir=config.logdir) as sess: + samples_out, prefixes_out = sess.run([samples, prefixes]) + with tf.gfile.Open(os.path.join(out_dir, "samples.npz"), "w") as fout: + np.save(fout, {"prefixes": prefixes_out, "samples": samples_out}) diff --git a/models/research/fivo/fivo/runners_test.py b/models/research/fivo/fivo/runners_test.py new file mode 100644 index 0000000000000000000000000000000000000000..eb050c0a0b38b2511f3d2fb9ec846e63ead3b5ac --- /dev/null +++ b/models/research/fivo/fivo/runners_test.py @@ -0,0 +1,242 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.runners""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import tensorflow as tf + +from fivo import runners +from fivo.models import base +from fivo.models import vrnn + +FLAGS = tf.app.flags.FLAGS + + +class RunnersTest(tf.test.TestCase): + + def default_config(self): + class Config(object): + pass + config = Config() + config.model = "vrnn" + config.latent_size = 64 + config.batch_size = 4 + config.num_samples = 4 + config.resampling_type = "multinomial" + config.normalize_by_seq_len = True + config.learning_rate = 0.0001 + config.max_steps = int(1e6) + config.summarize_every = 50 + # Master must be "" to prevent state from persisting between sessions. + config.master = "" + config.task = 0 + config.ps_tasks = 0 + config.stagger_workers = True + config.random_seed = 1234 + config.parallel_iterations = 1 + config.dataset_type = "pianoroll" + config.data_dimension = None + config.dataset_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "test_data", "tiny_pianoroll.pkl") + config.proposal_type = "filtering" + return config + + def run_training_one_step(self, bound, dataset_type, data_dimension, + dataset_filename, dir_prefix, resampling_type, + model, batch_size=2, num_samples=3, + create_dataset_and_model_fn=(runners.create_dataset_and_model)): + config = self.default_config() + config.model = model + config.resampling_type = resampling_type + config.relaxed_resampling_temperature = 0.5 + config.bound = bound + config.split = "train" + config.dataset_type = dataset_type + config.dataset_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "test_data", + dataset_filename) + config.max_steps = 1 + config.batch_size = batch_size + config.num_samples = num_samples + config.latent_size = 4 + config.data_dimension = data_dimension + config.logdir = os.path.join(tf.test.get_temp_dir(), "%s-%s-%s-%s" % + (dir_prefix, bound, dataset_type, model)) + runners.run_train(config, + create_dataset_and_model_fn=create_dataset_and_model_fn) + return config + + def dummmy_dataset_and_model_fn(self, *unused_args, **unused_kwargs): + # We ignore the arguments in the dummy but need to preserve prototype. + batch_elements = 5 + sequence_length = 4 + data_dimensions = 3 + dataset = tf.data.Dataset.from_tensors( + tf.zeros((sequence_length, batch_elements, data_dimensions), + dtype=tf.float32)) + inputs = dataset.make_one_shot_iterator().get_next() + targets = tf.zeros_like(inputs) + lengths = tf.constant([sequence_length] * batch_elements) + mean = tf.constant((0.0, 0.0, 0.0)) + model = vrnn.create_vrnn(data_dimensions, 1, + base.ConditionalNormalDistribution) + return inputs, targets, lengths, model, mean + + def test_training_one_step_fivo_pianoroll_vrnn(self): + self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "multinomial", "vrnn") + + def test_training_one_step_iwae_pianoroll_vrnn(self): + self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "multinomial", "vrnn") + + def test_training_one_step_elbo_pianoroll_vrnn(self): + self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "multinomial", "vrnn") + + def test_training_one_step_fivo_speech_vrnn(self): + self.run_training_one_step("fivo", "speech", 2, "tiny_speech_dataset.tfrecord", + "test-training", "multinomial", "vrnn") + + def test_training_one_step_iwae_speech_vrnn(self): + self.run_training_one_step("iwae", "speech", 2, "tiny_speech_dataset.tfrecord", + "test-training", "multinomial", "vrnn") + + def test_training_one_step_elbo_speech_vrnn(self): + self.run_training_one_step("elbo", "speech", 2, "tiny_speech_dataset.tfrecord", + "test-training", "multinomial", "vrnn") + + def test_training_one_step_fivo_pianoroll_srnn(self): + self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "multinomial", "srnn") + + def test_training_one_step_iwae_pianoroll_srnn(self): + self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "multinomial", "srnn") + + def test_training_one_step_elbo_pianoroll_srnn(self): + self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "multinomial", "srnn") + + def test_training_one_step_fivo_speech_srnn(self): + self.run_training_one_step("fivo", "speech", 2, "tiny_speech_dataset.tfrecord", + "test-training", "multinomial", "srnn") + + def test_training_one_step_iwae_speech_srnn(self): + self.run_training_one_step("iwae", "speech", 2, "tiny_speech_dataset.tfrecord", + "test-training", "multinomial", "srnn") + + def test_training_one_step_elbo_speech_srnn(self): + self.run_training_one_step("elbo", "speech", 2, "tiny_speech_dataset.tfrecord", + "test-training", "multinomial", "srnn") + + def test_training_one_step_fivo_pianoroll_vrnn_relaxed(self): + self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "relaxed", "vrnn") + + def test_training_one_step_iwae_pianoroll_vrnn_relaxed(self): + self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "relaxed", "vrnn") + + def test_training_one_step_elbo_pianoroll_vrnn_relaxed(self): + self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "relaxed", "vrnn") + + def test_training_one_step_fivo_pianoroll_srnn_relaxed(self): + self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "relaxed", "srnn") + + def test_training_one_step_iwae_pianoroll_srnn_relaxed(self): + self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "relaxed", "srnn") + + def test_training_one_step_elbo_pianoroll_srnn_relaxed(self): + self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", + "test-training", "relaxed", "srnn") + + def test_eval_vrnn(self): + self.run_eval("vrnn") + + def test_eval_srnn(self): + self.run_eval("srnn") + + def run_eval(self, model): + config = self.run_training_one_step( + "fivo", "pianoroll", 88, "tiny_pianoroll.pkl", "test-eval-" + model, + "multinomial", model) + config.split = "train" + runners.run_eval(config) + + def test_sampling_vrnn(self): + self.run_sampling("vrnn") + + def test_sampling_srnn(self): + self.run_sampling("srnn") + + def run_sampling(self, model): + """Test sampling from the model.""" + config = self.run_training_one_step( + "fivo", "pianoroll", 88, "tiny_pianoroll.pkl", "test-sampling", "multinomial", + model) + config.prefix_length = 3 + config.sample_length = 6 + config.split = "train" + config.sample_out_dir = None + + runners.run_sample(config) + unused_samples = np.load(os.path.join(config.logdir, "samples.npz")) + + def test_training_with_custom_fn(self): + self.run_training_one_step( + "fivo", "pianoroll", 3, "tiny_pianoroll.pkl", + "test-training-custom-fn", "multinomial", "vrnn", batch_size=5, + create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) + + def test_eval_with_custom_fn(self): + config = self.run_training_one_step( + "fivo", "pianoroll", 1, "tiny_pianoroll.pkl", + "test-eval-custom-fn", "multinomial", "vrnn", batch_size=1, + create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) + config.split = "train" + runners.run_eval( + config, + create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) + + def test_sampling_with_custom_fn(self): + config = self.run_training_one_step( + "fivo", "pianoroll", 3, "tiny_pianoroll.pkl", + "test-sample-custom-fn", "multinomial", "vrnn", batch_size=5, + create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) + config.prefix_length = 2 + config.sample_length = 3 + config.split = "train" + config.sample_out_dir = None + + runners.run_sample( + config, + create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) + unused_samples = np.load(os.path.join(config.logdir, "samples.npz")) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/fivo/fivo/smc.py b/models/research/fivo/fivo/smc.py new file mode 100644 index 0000000000000000000000000000000000000000..25d4969043e2cb8bc2c2c7a3770d3d2dfcca0bef --- /dev/null +++ b/models/research/fivo/fivo/smc.py @@ -0,0 +1,338 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Implementation of sequential Monte Carlo algorithms. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +import fivo.nested_utils as nested + + +def ess_criterion(log_weights, unused_t): + """A criterion that resamples based on effective sample size.""" + num_particles = tf.shape(log_weights)[0] + # Calculate the effective sample size. + ess_num = 2 * tf.reduce_logsumexp(log_weights, axis=0) + ess_denom = tf.reduce_logsumexp(2 * log_weights, axis=0) + log_ess = ess_num - ess_denom + return log_ess <= tf.log(tf.to_float(num_particles) / 2.0) + + +def never_resample_criterion(log_weights, unused_t): + """A criterion that never resamples.""" + batch_size = tf.shape(log_weights)[1] + return tf.cast(tf.zeros([batch_size]), tf.bool) + + +def always_resample_criterion(log_weights, unused_t): + """A criterion resamples at every timestep.""" + batch_size = tf.shape(log_weights)[1] + return tf.cast(tf.ones([batch_size]), tf.bool) + + +def multinomial_resampling(log_weights, states, num_particles, batch_size, + random_seed=None): + """Resample states with multinomial resampling. + + Args: + log_weights: A [num_particles, batch_size] Tensor representing a batch + of batch_size logits for num_particles-ary Categorical distribution. + states: A nested list of [batch_size*num_particles, data_size] Tensors that + will be resampled from the groups of every num_particles-th row. + num_particles: The number of particles/samples. + batch_size: The batch size. + random_seed: The random seed to pass to the resampling operations in + the particle filter. Mainly useful for testing. + + Returns: + resampled_states: A nested list of [batch_size*num_particles, data_size] + Tensors resampled via multinomial sampling. + """ + # Calculate the ancestor indices via resampling. Because we maintain the + # log unnormalized weights, we pass the weights in as logits, allowing + # the distribution object to apply a softmax and normalize them. + resampling_parameters = tf.transpose(log_weights, perm=[1, 0]) + resampling_dist = tf.contrib.distributions.Categorical( + logits=resampling_parameters) + ancestors = tf.stop_gradient( + resampling_dist.sample(sample_shape=num_particles, seed=random_seed)) + + # Because the batch is flattened, we must modify ancestor_inds to index the + # proper samples. The particles in the ith filter are distributed every + # batch_size rows in the batch, and offset i rows from the top. So, to + # correct the indices we multiply by the batch_size and add the proper offset. + # Crucially, when ancestor_inds is flattened the layout of the batch is + # maintained. + offset = tf.expand_dims(tf.range(batch_size), 0) + ancestor_inds = tf.reshape(ancestors * batch_size + offset, [-1]) + + resampled_states = nested.gather_tensors(states, ancestor_inds) + return resampled_states + + +def _blend_tensor(blending_weights, tensor, num_particles, batch_size): + """Blend tensor according to the weights. + + The first dimension of tensor is actually a 2d index compacted to a 1d + index and similarly for blended_tensor. So if we index these Tensors + by [(i, j), k], then + + blended_tensor[(i, j), k] = + sum_l tensor[(l, j), :] * blending_weights[i, j, l]. + + Args: + blending_weights: [num_particles, batch_size, num_particles] weights where + the indices represent [sample index, batch index, blending weight index]. + tensor: [num_particles * batch_size, state_dim] Tensor to be blended. + num_particles: The number of particles/samples. + batch_size: The batch size. + + Returns: + blended_tensor: [num_particles*batch_size, state_dim] blended Tensor. + """ + # tensor is currently [num_particles * batch_size, state_dim], so we reshape + # it to [num_particles, batch_size, state_dim]. Then, transpose it to + # [batch_size, state_size, num_particles]. + tensor = tf.transpose( + tf.reshape(tensor, [num_particles, batch_size, -1]), perm=[1, 2, 0]) + blending_weights = tf.transpose(blending_weights, perm=[1, 2, 0]) + # blendeding_weights is [batch index, blending weight index, sample index]. + # Multiplying these gives a matrix of size [batch_size, state_size, + # num_particles]. + tensor = tf.matmul(tensor, blending_weights) + # transpose the tensor to be [num_particles, batch_size, state_size] + # and then reshape it to match the original format. + tensor = tf.reshape(tf.transpose(tensor, perm=[2, 0, 1]), + [num_particles*batch_size, -1]) + return tensor + + +def relaxed_resampling(log_weights, states, num_particles, batch_size, + temperature=0.5, random_seed=None): + """Resample states with relaxed resampling. + + Draw soft "ancestors" using the Gumbel-Softmax distribution. + + Args: + log_weights: A [num_particles, batch_size] Tensor representing a batch + of batch_size logits for num_particles-ary Categorical distribution. + states: A nested list of [batch_size * num_particles, d] Tensors that will + be resampled from the groups of every num_particles-th row. + num_particles: The number of particles/samples. + batch_size: The batch size. + temperature: The temperature used for the relaxed one hot distribution. + random_seed: The random seed to pass to the resampling operations in + the particle filter. Mainly useful for testing. + + Returns: + resampled_states: A nested list of [batch_size * num_particles, d] + Tensors resampled via multinomial sampling. + """ + # log_weights are [num_particles, batch_size], so we transpose to get a + # set of batch_size distributions over [0, num_particles). + resampling_parameters = tf.transpose(log_weights, perm=[1, 0]) + resampling_dist = tf.contrib.distributions.RelaxedOneHotCategorical( + temperature, + logits=resampling_parameters) + + # Sample num_particles samples from the distribution, resulting in a + # [num_particles, batch_size, num_particles] Tensor that represents a set of + # [num_particles, batch_size] blending weights. The dimensions represent + # [particle index, batch index, blending weight index]. + ancestors = resampling_dist.sample(sample_shape=num_particles, + seed=random_seed) + def map_fn(tensor): + return _blend_tensor(ancestors, tensor, num_particles, batch_size) + + resampled_states = nested.map_nested(map_fn, states) + return resampled_states + + +def smc( + transition_fn, + num_steps, + num_particles=1, + resampling_criterion=ess_criterion, + resampling_fn=multinomial_resampling, + loop_fn=None, + parallel_iterations=30, + swap_memory=True): + """Run a sequential Monte Carlo (SMC) algorithm. + + This method runs an SMC algorithm that evolves systems of particles + using the supplied transition function for the specified number of steps. The + particles are optionally resampled using resampling_fn when indicated by + resampling_criterion. + + Args: + transition_fn: A callable that propogates a batch of particles one step. + Must accept as arguments a batch of particle states and the current + timestep. Must return the particle states one timestep in the future, the + incremental weights of each particle as a [num_samples*batch_size] float + Tensor, and optionally a set of arguments to pass to the loop_fn. If + the loop args are not provided, they will be set to None. Before the + first timestep transition_fn will be called with the arguments None, -1 + and should return the initial particle states. + num_steps: A [batch_size] Tensor of ints representing the number of steps + to run each filter for. + num_particles: A scalar int, the number of particles to use in each filter. + resampling_criterion: The resampling criterion to use for this particle + filter. Must accept the current log weights and timestep and + return a boolean Tensor of shape [batch_size] indicating whether each + particle filter should resample. See ess_criterion and related functions + for examples. When resampling_criterion is never_resample_criterion, + resampling_fn is ignored and never called. + resampling_fn: A callable that performs the resampling operation. Must + accept as arguments the log weights, particle states, num_particles, + and batch_size and return the resampled particle states. See + multinomial_resampling and relaxed_resampling for examples. + loop_fn: A callable that performs operations on the weights and + particle states, useful for accumulating and processing state that + shouldn't be resampled. At each timestep after (possibly) resampling + loop_fn will be called with the previous loop_state, a set of arguments + produced by transition_fn called loop_args, the resampled particle states, + the current log weights as [num_particles, batch_size] float Tensor, a + [batch_size] float Tensor representing whether or not each filter + resampled, the current mask indicating which filters are active, and the + current timestep. It must return the next loop state. Before the first + timestep loop_fn will be called with the arguments None, None, None, None, + -1 and must return the initial loop state. The loop state can be a + possibly nested structure of Tensors and TensorArrays. + parallel_iterations: The number of parallel iterations to use for the + internal while loop. Note that values greater than 1 can introduce + non-determinism even when resampling is deterministic. + swap_memory: Whether GPU-CPU memory swapping should be enabled for the + internal while loop. + + Returns: + log_z_hat: A Tensor of shape [batch_size] containing an estimate of the log + normalizing constant that converts between the unormalized target + distribution (as defined by the weights) and the true target distribution. + log_weights: A Tensor of shape [max_num_steps, batch_size, num_particles] + containing the log weights at each timestep of the particle filter. + Will not be valid for timesteps past the supplied num_steps. + resampled: A float Tensor of shape [max_num_steps, batch_size] indicating + when the particle filters resampled. Will be 1.0 on timesteps when + resampling occurred and 0.0 on timesteps when it did not. + final_loop_state: The final state returned by loop_fn. If loop_fn is None + then 0 will be returned. + """ + # batch_size represents the number of particle filters running in parallel. + batch_size = tf.shape(num_steps)[0] + # Create a TensorArray where element t is the [num_particles*batch_size] + # sequence mask for timestep t. + max_num_steps = tf.reduce_max(num_steps) + seq_mask = tf.transpose( + tf.sequence_mask(num_steps, maxlen=max_num_steps, dtype=tf.float32), + perm=[1, 0]) + seq_mask = tf.tile(seq_mask, [1, num_particles]) + mask_ta = tf.TensorArray(seq_mask.dtype, + max_num_steps, + name='mask_ta') + mask_ta = mask_ta.unstack(seq_mask) + # Initialize the state. + t0 = tf.constant(0, tf.int32) + init_particle_state = transition_fn(None, -1) + + def transition(*args): + transition_outs = transition_fn(*args) + if len(transition_outs) == 2: + return transition_outs + (None,) + else: + return transition_outs + + if loop_fn is None: + loop_fn = lambda *args: 0 + + init_loop_state = loop_fn(None, None, None, None, None, None, -1) + init_states = (init_particle_state, init_loop_state) + ta_names = ['log_weights', 'resampled'] + tas = [tf.TensorArray(tf.float32, max_num_steps, name='%s_ta' % n) + for n in ta_names] + log_weights_acc = tf.zeros([num_particles, batch_size], dtype=tf.float32) + log_z_hat_acc = tf.zeros([batch_size], dtype=tf.float32) + + def while_predicate(t, *unused_args): + return t < max_num_steps + + def while_step(t, state, tas, log_weights_acc, log_z_hat_acc): + """Implements one timestep of the particle filter.""" + particle_state, loop_state = state + cur_mask = nested.read_tas(mask_ta, t) + # Propagate the particles one step. + log_alpha, new_particle_state, loop_args = transition(particle_state, t) + # Update the current weights with the incremental weights. + log_alpha *= cur_mask + log_alpha = tf.reshape(log_alpha, [num_particles, batch_size]) + log_weights_acc += log_alpha + + should_resample = resampling_criterion(log_weights_acc, t) + + if resampling_criterion == never_resample_criterion: + resampled = tf.to_float(should_resample) + else: + # Compute the states as if we did resample. + resampled_states = resampling_fn( + log_weights_acc, + new_particle_state, + num_particles, + batch_size) + # Decide whether or not we should resample; don't resample if we are past + # the end of a sequence. + should_resample = tf.logical_and(should_resample, + cur_mask[:batch_size] > 0.) + float_should_resample = tf.to_float(should_resample) + new_particle_state = nested.where_tensors( + tf.tile(should_resample, [num_particles]), + resampled_states, + new_particle_state) + resampled = float_should_resample + + new_loop_state = loop_fn(loop_state, loop_args, new_particle_state, + log_weights_acc, resampled, cur_mask, t) + # Update log Z hat. + log_z_hat_update = tf.reduce_logsumexp( + log_weights_acc, axis=0) - tf.log(tf.to_float(num_particles)) + # If it is the last timestep, always add the update. + log_z_hat_acc += tf.cond(t < max_num_steps - 1, + lambda: log_z_hat_update * resampled, + lambda: log_z_hat_update) + # Update the TensorArrays before we reset the weights so that we capture + # the incremental weights and not zeros. + ta_updates = [log_weights_acc, resampled] + new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)] + # For the particle filters that resampled, reset weights to zero. + log_weights_acc *= (1. - tf.tile(resampled[tf.newaxis, :], + [num_particles, 1])) + new_state = (new_particle_state, new_loop_state) + return t + 1, new_state, new_tas, log_weights_acc, log_z_hat_acc + + _, final_state, tas, _, log_z_hat = tf.while_loop( + while_predicate, + while_step, + loop_vars=(t0, init_states, tas, log_weights_acc, log_z_hat_acc), + parallel_iterations=parallel_iterations, + swap_memory=swap_memory) + + log_weights, resampled = [x.stack() for x in tas] + log_weights = tf.transpose(log_weights, perm=[0, 2, 1]) + final_particle_state, final_loop_state = final_state + return (log_z_hat, log_weights, resampled, + final_particle_state, final_loop_state) diff --git a/models/research/fivo/fivo/smc_test.py b/models/research/fivo/fivo/smc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ae32a62f21e037252bda44e3e1f47e007c9b7b9b --- /dev/null +++ b/models/research/fivo/fivo/smc_test.py @@ -0,0 +1,241 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for fivo.smc.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import scipy +import tensorflow as tf + +from fivo import smc + +lse = scipy.special.logsumexp + + +def _simple_transition_fn(state, unused_t): + if state is None: + return tf.zeros([4], dtype=tf.float32) + return tf.constant([5., 4., 1., 0.5]), tf.zeros([4], dtype=tf.float32) + + +def _resample_at_step_criterion(step): + """A criterion that resamples once at a specific timestep.""" + def criterion(log_weights, t): + batch_size = tf.shape(log_weights)[1] + return tf.fill([batch_size], tf.equal(t, step)) + return criterion + + +class SMCTest(tf.test.TestCase): + + def test_never_resampling(self): + """Test that never_resample_criterion makes smc not resample. + + Also test that the weights and log_z_hat are computed correctly when never + resampling. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + outs = smc.smc( + _simple_transition_fn, + num_steps=tf.convert_to_tensor([5, 3]), + num_particles=2, + resampling_criterion=smc.never_resample_criterion) + log_z_hat, weights, resampled = sess.run(outs[0:3]) + gt_weights = np.array( + [[[5, 1], [4, .5]], + [[10, 2], [8, 1]], + [[15, 3], [12, 1.5]], + [[20, 4], [12, 1.5]], + [[25, 5], [12, 1.5]]], + dtype=np.float32) + gt_log_z_hat = np.array( + [lse([25, 5]) - np.log(2), + lse([12, 1.5]) - np.log(2)], + dtype=np.float32) + self.assertAllClose(gt_log_z_hat, log_z_hat) + self.assertAllClose(gt_weights, weights) + self.assertAllEqual(np.zeros_like(resampled), resampled) + + def test_always_resampling(self): + """Test always_resample_criterion makes smc always resample. + + Past a sequence end the filter should not resample, however. + Also check that weights and log_z_hat estimate are correct. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + outs = smc.smc( + _simple_transition_fn, + num_steps=tf.convert_to_tensor([5, 3]), + num_particles=2, + resampling_criterion=smc.always_resample_criterion) + log_z_hat, weights, resampled = sess.run(outs[0:3]) + gt_weights = np.array( + [[[5, 1], [4, .5]], + [[5, 1], [4, .5]], + [[5, 1], [4, .5]], + [[5, 1], [0., 0.]], + [[5, 1], [0., 0.]]], + dtype=np.float32) + gt_log_z_hat = np.array( + [5*lse([5, 1]) - 5*np.log(2), + 3*lse([4, .5]) - 3*np.log(2)], + dtype=np.float32) + gt_resampled = np.array( + [[1, 1], [1, 1], [1, 1], [1, 0], [1, 0]], + dtype=np.float32) + self.assertAllClose(gt_log_z_hat, log_z_hat) + self.assertAllClose(gt_weights, weights) + self.assertAllEqual(gt_resampled, resampled) + + def test_weights_reset_when_resampling_at_sequence_end(self): + """Test that the weights are reset when resampling at the sequence end. + + When resampling happens on the last timestep of a sequence the weights + should be set to zero on the next timestep and remain zero afterwards. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + outs = smc.smc( + _simple_transition_fn, + num_steps=tf.convert_to_tensor([5, 3]), + num_particles=2, + resampling_criterion=_resample_at_step_criterion(2)) + log_z_hat, weights, resampled = sess.run(outs[0:3]) + gt_log_z = np.array( + [lse([15, 3]) + lse([10, 2]) - 2*np.log(2), + lse([12, 1.5]) - np.log(2)], + dtype=np.float32) + gt_resampled = np.array( + [[0, 0], [0, 0], [1, 1], [0, 0], [0, 0]], + dtype=np.float32) + gt_weights = np.array( + [[[5, 1], [4, .5]], + [[10, 2], [8, 1]], + [[15, 3], [12, 1.5]], + [[5, 1], [0, 0]], + [[10, 2], [0, 0]]], + dtype=np.float32) + self.assertAllClose(gt_log_z, log_z_hat) + self.assertAllEqual(gt_resampled, resampled) + self.assertAllEqual(gt_weights, weights) + + def test_weights_not_updated_past_sequence_end(self): + """Test that non-zero weights are not updated past the end of a sequence.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + outs = smc.smc( + _simple_transition_fn, + num_steps=tf.convert_to_tensor([6, 4]), + num_particles=2, + resampling_criterion=_resample_at_step_criterion(1)) + log_z_hat, weights, resampled = sess.run(outs[0:3]) + gt_log_z_hat = np.array( + [lse([10, 2]) + lse([20, 4]) - 2*np.log(2), + lse([8, 1]) + lse([8, 1]) - 2*np.log(2)], + dtype=np.float32) + # Ensure that we only resample on the 2nd timestep. + gt_resampled = np.array( + [[0, 0], [1, 1], [0, 0], [0, 0], [0, 0], [0, 0]], + dtype=np.float32) + # Ensure that the weights after the end of the sequence don't change. + # Ensure that the weights after resampling before the end of the sequence + # do change. + gt_weights = np.array( + [[[5, 1], [4, .5]], + [[10, 2], [8, 1]], + [[5, 1], [4, .5]], + [[10, 2], [8, 1]], + [[15, 3], [8, 1]], + [[20, 4], [8, 1]]], + dtype=np.float32) + self.assertAllClose(gt_log_z_hat, log_z_hat) + self.assertAllEqual(gt_resampled, resampled) + self.assertAllEqual(gt_weights, weights) + + def test_resampling_on_max_num_steps(self): + """Test that everything is correct when resampling on step max_num_steps. + + When resampling on step max_num_steps (i.e. the last step of the longest + sequence), ensure that there are no off-by-one errors preventing resampling + and also that the weights are not updated. + """ + tf.set_random_seed(1234) + with self.test_session() as sess: + outs = smc.smc( + _simple_transition_fn, + num_steps=tf.convert_to_tensor([4, 2]), + num_particles=2, + resampling_criterion=_resample_at_step_criterion(3)) + log_z_hat, weights, resampled = sess.run(outs[0:3]) + gt_log_z_hat = np.array( + [lse([20, 4]) - np.log(2), + lse([8, 1]) - np.log(2)], + dtype=np.float32) + # Ensure that we only resample on the 3rd timestep and that the second + # filter doesn't resample at all because it is only run for 2 steps. + gt_resampled = np.array( + [[0, 0], [0, 0], [0, 0], [1, 0]], + dtype=np.float32) + gt_weights = np.array( + [[[5, 1], [4, .5]], + [[10, 2], [8, 1]], + [[15, 3], [8, 1]], + [[20, 4], [8, 1]]], + dtype=np.float32) + self.assertAllClose(gt_log_z_hat, log_z_hat) + self.assertAllEqual(gt_resampled, resampled) + self.assertAllEqual(gt_weights, weights) + + def test_multinomial_resampling(self): + """Test that mulitnomial resampling selects the correct states.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + # Setup input. + inf = 1000.0 # Very large value in log space. + num_samples = 2 + batch_size = 2 + log_weights = tf.convert_to_tensor([[inf, 0], [0, inf]]) + states = tf.convert_to_tensor([1, 2, 3, 4]) + # Run test. + resampled_states = smc.multinomial_resampling( + log_weights, states, num_samples, batch_size, random_seed=0) + resampled_states_values = sess.run(resampled_states) + self.assertAllEqual(resampled_states_values, [1, 4, 1, 4]) + + def test_blend_tensor(self): + """Test that relaxed resampling blends the correct states.""" + tf.set_random_seed(1234) + with self.test_session() as sess: + # Setup input. + num_samples = 2 + batch_size = 2 + blending_weights = tf.convert_to_tensor( + [[[0.5, 0.5], [0.25, 0.75]], [[0.75, 0.25], [0.5, 0.5]]]) + states = tf.convert_to_tensor([4., 8., 12., 16.]) + # Run test. + blended_states = smc._blend_tensor(blending_weights, states, + num_samples, batch_size) + blended_states_values = sess.run(blended_states) + self.assertAllClose(blended_states_values[:, 0], [8., 14., 6., 12.]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/fivo/fivo/test_data/tiny_pianoroll.pkl b/models/research/fivo/fivo/test_data/tiny_pianoroll.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c2e4639da96ff5735576cd45dcccb5e0cd1cabec Binary files /dev/null and b/models/research/fivo/fivo/test_data/tiny_pianoroll.pkl differ diff --git a/models/research/fivo/fivo/test_data/tiny_speech_dataset.tfrecord b/models/research/fivo/fivo/test_data/tiny_speech_dataset.tfrecord new file mode 100644 index 0000000000000000000000000000000000000000..93fe8791b631da35b9d03d37e6494cc7c50cb55d Binary files /dev/null and b/models/research/fivo/fivo/test_data/tiny_speech_dataset.tfrecord differ diff --git a/models/research/fivo/fivo/test_utils.py b/models/research/fivo/fivo/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..48bbd3d483c45457b82b12ac1587d4c314b79f49 --- /dev/null +++ b/models/research/fivo/fivo/test_utils.py @@ -0,0 +1,144 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for testing FIVO. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from fivo.models import base +from fivo.models import srnn +from fivo.models import vrnn + + +def create_vrnn(generative_class=base.ConditionalNormalDistribution, + batch_size=2, data_size=3, rnn_hidden_size=4, + latent_size=5, fcnet_hidden_size=7, encoded_data_size=9, + encoded_latent_size=11, num_timesteps=7, data_lengths=(7, 4), + use_tilt=False, random_seed=None): + """Creates a VRNN and some dummy data to feed it for testing purposes. + + Args: + generative_class: The class of the generative distribution. + batch_size: The number of elements per batch. + data_size: The dimension of the vectors that make up the data sequences. + rnn_hidden_size: The hidden state dimension of the RNN that forms the + deterministic part of this VRNN. + latent_size: The size of the stochastic latent state of the VRNN. + fcnet_hidden_size: The size of the hidden layer of the fully connected + networks that parameterize the conditional probability distributions + of the VRNN. + encoded_data_size: The size of the output of the data encoding network. + encoded_latent_size: The size of the output of the latent state encoding + network. + num_timesteps: The maximum number of timesteps in the data. + data_lengths: A tuple of size batch_size that contains the desired lengths + of each sequence in the dummy data. + use_tilt: Use a tilting function. + random_seed: A random seed to feed the VRNN, mainly useful for testing + purposes. + + Returns: + model: A VRNN object. + inputs: A Tensor of shape [num_timesteps, batch_size, data_size], the inputs + to the model, also known as the observations. + targets: A Tensor of shape [num_timesteps, batch_size, data_size], the + desired outputs of the model. + lengths: A Tensor of shape [batch_size], the lengths of the sequences in the + batch. + """ + + fcnet_hidden_sizes = [fcnet_hidden_size] + initializers = {"w": tf.contrib.layers.xavier_initializer(seed=random_seed), + "b": tf.zeros_initializer()} + model = vrnn.create_vrnn( + data_size, + latent_size, + generative_class, + rnn_hidden_size=rnn_hidden_size, + fcnet_hidden_sizes=fcnet_hidden_sizes, + encoded_data_size=encoded_data_size, + encoded_latent_size=encoded_latent_size, + use_tilt=use_tilt, + initializers=initializers, + random_seed=random_seed) + inputs = tf.random_uniform([num_timesteps, batch_size, data_size], + seed=random_seed, dtype=tf.float32) + targets = tf.random_uniform([num_timesteps, batch_size, data_size], + seed=random_seed, dtype=tf.float32) + lengths = tf.constant(data_lengths, dtype=tf.int32) + return model, inputs, targets, lengths + + +def create_srnn(generative_class=base.ConditionalNormalDistribution, + batch_size=2, data_size=3, rnn_hidden_size=4, + latent_size=5, fcnet_hidden_size=7, encoded_data_size=3, + encoded_latent_size=2, num_timesteps=7, data_lengths=(7, 4), + use_tilt=False, random_seed=None): + """Creates a SRNN and some dummy data to feed it for testing purposes. + + Args: + generative_class: The class of the generative distribution. + batch_size: The number of elements per batch. + data_size: The dimension of the vectors that make up the data sequences. + rnn_hidden_size: The hidden state dimension of the RNN that forms the + deterministic part of this SRNN. + latent_size: The size of the stochastic latent state of the SRNN. + fcnet_hidden_size: The size of the hidden layer of the fully connected + networks that parameterize the conditional probability distributions + of the SRNN. + encoded_data_size: The size of the output of the data encoding network. + encoded_latent_size: The size of the output of the latent state encoding + network. + num_timesteps: The maximum number of timesteps in the data. + data_lengths: A tuple of size batch_size that contains the desired lengths + of each sequence in the dummy data. + use_tilt: Use a tilting function. + random_seed: A random seed to feed the SRNN, mainly useful for testing + purposes. + + Returns: + model: A SRNN object. + inputs: A Tensor of shape [num_timesteps, batch_size, data_size], the inputs + to the model, also known as the observations. + targets: A Tensor of shape [num_timesteps, batch_size, data_size], the + desired outputs of the model. + lengths: A Tensor of shape [batch_size], the lengths of the sequences in the + batch. + """ + + fcnet_hidden_sizes = [fcnet_hidden_size] + initializers = {"w": tf.contrib.layers.xavier_initializer(seed=random_seed), + "b": tf.zeros_initializer()} + model = srnn.create_srnn( + data_size, + latent_size, + generative_class, + rnn_hidden_size=rnn_hidden_size, + fcnet_hidden_sizes=fcnet_hidden_sizes, + encoded_data_size=encoded_data_size, + encoded_latent_size=encoded_latent_size, + use_tilt=use_tilt, + initializers=initializers, + random_seed=random_seed) + inputs = tf.random_uniform([num_timesteps, batch_size, data_size], + seed=random_seed, dtype=tf.float32) + targets = tf.random_uniform([num_timesteps, batch_size, data_size], + seed=random_seed, dtype=tf.float32) + lengths = tf.constant(data_lengths, dtype=tf.int32) + return model, inputs, targets, lengths diff --git a/models/research/fivo/run_fivo.py b/models/research/fivo/run_fivo.py new file mode 100644 index 0000000000000000000000000000000000000000..1ca079421f09fb65439dae210b1c3760240b51ad --- /dev/null +++ b/models/research/fivo/run_fivo.py @@ -0,0 +1,142 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A script to run training for sequential latent variable models. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from fivo import ghmm_runners +from fivo import runners + +# Shared flags. +tf.app.flags.DEFINE_enum("mode", "train", + ["train", "eval", "sample"], + "The mode of the binary.") +tf.app.flags.DEFINE_enum("model", "vrnn", + ["vrnn", "ghmm", "srnn"], + "Model choice.") +tf.app.flags.DEFINE_integer("latent_size", 64, + "The size of the latent state of the model.") +tf.app.flags.DEFINE_enum("dataset_type", "pianoroll", + ["pianoroll", "speech", "pose"], + "The type of dataset.") +tf.app.flags.DEFINE_string("dataset_path", "", + "Path to load the dataset from.") +tf.app.flags.DEFINE_integer("data_dimension", None, + "The dimension of each vector in the data sequence. " + "Defaults to 88 for pianoroll datasets and 200 for speech " + "datasets. Should not need to be changed except for " + "testing.") +tf.app.flags.DEFINE_integer("batch_size", 4, + "Batch size.") +tf.app.flags.DEFINE_integer("num_samples", 4, + "The number of samples (or particles) for multisample " + "algorithms.") +tf.app.flags.DEFINE_string("logdir", "/tmp/smc_vi", + "The directory to keep checkpoints and summaries in.") +tf.app.flags.DEFINE_integer("random_seed", None, + "A random seed for seeding the TensorFlow graph.") +tf.app.flags.DEFINE_integer("parallel_iterations", 30, + "The number of parallel iterations to use for the while " + "loop that computes the bounds.") + +# Training flags. +tf.app.flags.DEFINE_enum("bound", "fivo", + ["elbo", "iwae", "fivo", "fivo-aux"], + "The bound to optimize.") +tf.app.flags.DEFINE_boolean("normalize_by_seq_len", True, + "If true, normalize the loss by the number of timesteps " + "per sequence.") +tf.app.flags.DEFINE_float("learning_rate", 0.0002, + "The learning rate for ADAM.") +tf.app.flags.DEFINE_integer("max_steps", int(1e9), + "The number of gradient update steps to train for.") +tf.app.flags.DEFINE_integer("summarize_every", 50, + "The number of steps between summaries.") +tf.app.flags.DEFINE_enum("resampling_type", "multinomial", + ["multinomial", "relaxed"], + "The resampling strategy to use for training.") +tf.app.flags.DEFINE_float("relaxed_resampling_temperature", 0.5, + "The relaxation temperature for relaxed resampling.") +tf.app.flags.DEFINE_enum("proposal_type", "filtering", + ["prior", "filtering", "smoothing", + "true-filtering", "true-smoothing"], + "The type of proposal to use. true-filtering and true-smoothing " + "are only available for the GHMM. The specific implementation " + "of each proposal type is left to model-writers.") + +# Distributed training flags. +tf.app.flags.DEFINE_string("master", "", + "The BNS name of the TensorFlow master to use.") +tf.app.flags.DEFINE_integer("task", 0, + "Task id of the replica running the training.") +tf.app.flags.DEFINE_integer("ps_tasks", 0, + "Number of tasks in the ps job. If 0 no ps job is used.") +tf.app.flags.DEFINE_boolean("stagger_workers", True, + "If true, bring one worker online every 1000 steps.") + +# Evaluation flags. +tf.app.flags.DEFINE_enum("split", "train", + ["train", "test", "valid"], + "Split to evaluate the model on.") + +# Sampling flags. +tf.app.flags.DEFINE_integer("sample_length", 50, + "The number of timesteps to sample for.") +tf.app.flags.DEFINE_integer("prefix_length", 25, + "The number of timesteps to condition the model on " + "before sampling.") +tf.app.flags.DEFINE_string("sample_out_dir", None, + "The directory to write the samples to. " + "Defaults to logdir.") + +# GHMM flags. +tf.app.flags.DEFINE_float("variance", 0.1, + "The variance of the ghmm.") +tf.app.flags.DEFINE_integer("num_timesteps", 5, + "The number of timesteps to run the gmp for.") +FLAGS = tf.app.flags.FLAGS + +PIANOROLL_DEFAULT_DATA_DIMENSION = 88 +SPEECH_DEFAULT_DATA_DIMENSION = 200 + + +def main(unused_argv): + tf.logging.set_verbosity(tf.logging.INFO) + if FLAGS.model in ["vrnn", "srnn"]: + if FLAGS.data_dimension is None: + if FLAGS.dataset_type == "pianoroll": + FLAGS.data_dimension = PIANOROLL_DEFAULT_DATA_DIMENSION + elif FLAGS.dataset_type == "speech": + FLAGS.data_dimension = SPEECH_DEFAULT_DATA_DIMENSION + if FLAGS.mode == "train": + runners.run_train(FLAGS) + elif FLAGS.mode == "eval": + runners.run_eval(FLAGS) + elif FLAGS.mode == "sample": + runners.run_sample(FLAGS) + elif FLAGS.model == "ghmm": + if FLAGS.mode == "train": + ghmm_runners.run_train(FLAGS) + elif FLAGS.mode == "eval": + ghmm_runners.run_eval(FLAGS) + +if __name__ == "__main__": + tf.app.run(main) diff --git a/models/research/global_objectives/README.md b/models/research/global_objectives/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f9a778c59d420f9bf5deccf4b2b45147636de582 --- /dev/null +++ b/models/research/global_objectives/README.md @@ -0,0 +1,152 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Global Objectives +The Global Objectives library provides TensorFlow loss functions that optimize +directly for a variety of objectives including AUC, recall at precision, and +more. The global objectives losses can be used as drop-in replacements for +TensorFlow's standard multilabel loss functions: +`tf.nn.sigmoid_cross_entropy_with_logits` and `tf.losses.sigmoid_cross_entropy`. + +Many machine learning classification models are optimized for classification +accuracy, when the real objective the user cares about is different and can be +precision at a fixed recall, precision-recall AUC, ROC AUC or similar metrics. +These are referred to as "global objectives" because they depend on how the +model classifies the dataset as a whole and do not decouple across data points +as accuracy does. + +Because these objectives are combinatorial, discontinuous, and essentially +intractable to optimize directly, the functions in this library approximate +their corresponding objectives. This approximation approach follows the same +pattern as optimizing for accuracy, where a surrogate objective such as +cross-entropy or the hinge loss is used as an upper bound on the error rate. + +## Getting Started +For a full example of how to use the loss functions in practice, see +loss_layers_example.py. + +Briefly, global objective losses can be used to replace +`tf.nn.sigmoid_cross_entropy_with_logits` by providing the relevant +additional arguments. For example, + +``` python +tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) +``` + +could be replaced with + +``` python +global_objectives.recall_at_precision_loss( + labels=labels, + logits=logits, + target_precision=0.95)[0] +``` + +Just as minimizing the cross-entropy loss will maximize accuracy, the loss +functions in loss_layers.py were written so that minimizing the loss will +maximize the corresponding objective. + +The global objective losses have two return values -- the loss tensor and +additional quantities for debugging and customization -- which is why the first +value is used above. For more information, see +[Visualization & Debugging](#visualization-debugging). + +## Binary Label Format +Binary classification problems can be represented as a multi-class problem with +two classes, or as a multi-label problem with one label. (Recall that multiclass +problems have mutually exclusive classes, e.g. 'cat xor dog', and multilabel +have classes which are not mutually exclusive, e.g. an image can contain a cat, +a dog, both, or neither.) The softmax loss +(`tf.nn.softmax_cross_entropy_with_logits`) is used for multi-class problems, +while the sigmoid loss (`tf.nn.sigmoid_cross_entropy_with_logits`) is used for +multi-label problems. + +A multiclass label format for binary classification might represent positives +with the label [1, 0] and negatives with the label [0, 1], while the multilbel +format for the same problem would use [1] and [0], respectively. + +All global objectives loss functions assume that the multilabel format is used. +Accordingly, if your current loss function is softmax, the labels will have to +be reformatted for the loss to work properly. + +## Dual Variables +Global objectives losses (except for `roc_auc_loss`) use internal variables +called dual variables or Lagrange multipliers to enforce the desired constraint +(e.g. if optimzing for recall at precision, the constraint is on precision). + +These dual variables are created and initialized internally by the loss +functions, and are updated during training by the same optimizer used for the +model's other variables. To initialize the dual variables to a particular value, +use the `lambdas_initializer` argument. The dual variables can be found under +the key `lambdas` in the `other_outputs` dictionary returned by the losses. + +## Loss Function Arguments +The following arguments are common to all loss functions in the library, and are +either required or very important. + +* `labels`: Corresponds directly to the `labels` argument of + `tf.nn.sigmoid_cross_entropy_with_logits`. +* `logits`: Corresponds directly to the `logits` argument of + `tf.nn.sigmoid_cross_entropy_with_logits`. +* `dual_rate_factor`: A floating point value which controls the step size for + the Lagrange multipliers. Setting this value less than 1.0 will cause the + constraint to be enforced more gradually and will result in more stable + training. + +In addition, the objectives with a single constraint (e.g. +`recall_at_precision_loss`) have an argument (e.g. `target_precision`) used to +specify the value of the constraint. The optional `precision_range` argument to +`precision_recall_auc_loss` is used to specify the range of precision values +over which to optimize the AUC, and defaults to the interval [0, 1]. + +Optional arguments: + +* `weights`: A tensor which acts as coefficients for the loss. If a weight of x + is provided for a datapoint and that datapoint is a true (false) positive + (negative), it will be counted as x true (false) positives (negatives). + Defaults to 1.0. +* `label_priors`: A tensor specifying the fraction of positive datapoints for + each label. If not provided, it will be computed inside the loss function. +* `surrogate_type`: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. +* `lambdas_initializer`: An initializer for the dual variables (Lagrange + multipliers). See also the Dual Variables section. +* `num_anchors` (precision_recall_auc_loss only): The number of grid points used + when approximating the AUC as a Riemann sum. + +## Hyperparameters +While the functional form of the global objectives losses allow them to be +easily substituted in place of `sigmoid_cross_entropy_with_logits`, model +hyperparameters such as learning rate, weight decay, etc. may need to be +fine-tuned to the new loss. Fortunately, the amount of hyperparameter re-tuning +is usually minor. + +The most important hyperparameters to modify are the learning rate and +dual_rate_factor (see the section on Loss Function Arguments, above). + +## Visualization & Debugging +The global objectives losses return two values. The first is a tensor +representing the numerical value of the loss, which can be passed to an +optimizer. The second is a dictionary of tensors created by the loss function +which are not necessary for optimization but useful in debugging. These vary +depending on the loss function, but usually include `lambdas` (the Lagrange +multipliers) as well as the lower bound on true positives and upper bound on +false positives. + +When visualizing the loss during training, note that the global objectives +losses differ from standard losses in some important ways: + +* The global losses may be negative. This is because the value returned by the + loss includes terms involving the Lagrange multipliers, which may be negative. +* The global losses may not decrease over the course of training. To enforce the + constraints in the objective, the loss changes over time and may increase. + +## More Info +For more details, see the [Global Objectives paper](https://arxiv.org/abs/1608.04802). + +## Maintainers + +* Mariano Schain +* Elad Eban +* [Alan Mackey](https://github.com/mackeya-google) diff --git a/models/research/global_objectives/loss_layers.py b/models/research/global_objectives/loss_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..eaea05398ef3771247060afda63be184ea76cdf0 --- /dev/null +++ b/models/research/global_objectives/loss_layers.py @@ -0,0 +1,930 @@ +# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Loss functions for learning global objectives. + +These functions have two return values: a Tensor with the value of +the loss, and a dictionary of internal quantities for customizability. +""" + +# Dependency imports +import numpy +import tensorflow as tf + +from global_objectives import util + + +def precision_recall_auc_loss( + labels, + logits, + precision_range=(0.0, 1.0), + num_anchors=20, + weights=1.0, + dual_rate_factor=0.1, + label_priors=None, + surrogate_type='xent', + lambdas_initializer=tf.constant_initializer(1.0), + reuse=None, + variables_collections=None, + trainable=True, + scope=None): + """Computes precision-recall AUC loss. + + The loss is based on a sum of losses for recall at a range of + precision values (anchor points). This sum is a Riemann sum that + approximates the area under the precision-recall curve. + + The per-example `weights` argument changes not only the coefficients of + individual training examples, but how the examples are counted toward the + constraint. If `label_priors` is given, it MUST take `weights` into account. + That is, + label_priors = P / (P + N) + where + P = sum_i (wt_i on positives) + N = sum_i (wt_i on negatives). + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape as `labels`. + precision_range: A length-two tuple, the range of precision values over + which to compute AUC. The entries must be nonnegative, increasing, and + less than or equal to 1.0. + num_anchors: The number of grid points used to approximate the Riemann sum. + weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape + [batch_size] or [batch_size, num_labels]. + dual_rate_factor: A floating point value which controls the step size for + the Lagrange multipliers. + label_priors: None, or a floating point `Tensor` of shape [num_labels] + containing the prior probability of each label (i.e. the fraction of the + training data consisting of positive examples). If None, the label + priors are computed from `labels` with a moving average. See the notes + above regarding the interaction with `weights` and do not set this unless + you have a good reason to do so. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. + lambdas_initializer: An initializer for the Lagrange multipliers. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for the variables. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + scope: Optional scope for `variable_scope`. + + Returns: + loss: A `Tensor` of the same shape as `logits` with the component-wise + loss. + other_outputs: A dictionary of useful internal quantities for debugging. For + more details, see http://arxiv.org/pdf/1608.04802.pdf. + lambdas: A Tensor of shape [1, num_labels, num_anchors] consisting of the + Lagrange multipliers. + biases: A Tensor of shape [1, num_labels, num_anchors] consisting of the + learned bias term for each. + label_priors: A Tensor of shape [1, num_labels, 1] consisting of the prior + probability of each label learned by the loss, if not provided. + true_positives_lower_bound: Lower bound on the number of true positives + given `labels` and `logits`. This is the same lower bound which is used + in the loss expression to be optimized. + false_positives_upper_bound: Upper bound on the number of false positives + given `labels` and `logits`. This is the same upper bound which is used + in the loss expression to be optimized. + + Raises: + ValueError: If `surrogate_type` is not `xent` or `hinge`. + """ + with tf.variable_scope(scope, + 'precision_recall_auc', + [labels, logits, label_priors], + reuse=reuse): + labels, logits, weights, original_shape = _prepare_labels_logits_weights( + labels, logits, weights) + num_labels = util.get_num_labels(logits) + + # Convert other inputs to tensors and standardize dtypes. + dual_rate_factor = util.convert_and_cast( + dual_rate_factor, 'dual_rate_factor', logits.dtype) + + # Create Tensor of anchor points and distance between anchors. + precision_values, delta = _range_to_anchors_and_delta( + precision_range, num_anchors, logits.dtype) + # Create lambdas with shape [1, num_labels, num_anchors]. + lambdas, lambdas_variable = _create_dual_variable( + 'lambdas', + shape=[1, num_labels, num_anchors], + dtype=logits.dtype, + initializer=lambdas_initializer, + collections=variables_collections, + trainable=trainable, + dual_rate_factor=dual_rate_factor) + # Create biases with shape [1, num_labels, num_anchors]. + biases = tf.contrib.framework.model_variable( + name='biases', + shape=[1, num_labels, num_anchors], + dtype=logits.dtype, + initializer=tf.zeros_initializer(), + collections=variables_collections, + trainable=trainable) + # Maybe create label_priors. + label_priors = maybe_create_label_priors( + label_priors, labels, weights, variables_collections) + label_priors = tf.reshape(label_priors, [1, num_labels, 1]) + + # Expand logits, labels, and weights to shape [batch_size, num_labels, 1]. + logits = tf.expand_dims(logits, 2) + labels = tf.expand_dims(labels, 2) + weights = tf.expand_dims(weights, 2) + + # Calculate weighted loss and other outputs. The log(2.0) term corrects for + # logloss not being an upper bound on the indicator function. + loss = weights * util.weighted_surrogate_loss( + labels, + logits + biases, + surrogate_type=surrogate_type, + positive_weights=1.0 + lambdas * (1.0 - precision_values), + negative_weights=lambdas * precision_values) + maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 + maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) + lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2 + per_anchor_loss = loss - lambda_term + per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2) + # Normalize the AUC such that a perfect score function will have AUC 1.0. + # Because precision_range is discretized into num_anchors + 1 intervals + # but only num_anchors terms are included in the Riemann sum, the + # effective length of the integration interval is `delta` less than the + # length of precision_range. + scaled_loss = tf.div(per_label_loss, + precision_range[1] - precision_range[0] - delta, + name='AUC_Normalize') + scaled_loss = tf.reshape(scaled_loss, original_shape) + + other_outputs = { + 'lambdas': lambdas_variable, + 'biases': biases, + 'label_priors': label_priors, + 'true_positives_lower_bound': true_positives_lower_bound( + labels, logits, weights, surrogate_type), + 'false_positives_upper_bound': false_positives_upper_bound( + labels, logits, weights, surrogate_type)} + + return scaled_loss, other_outputs + + +def roc_auc_loss( + labels, + logits, + weights=1.0, + surrogate_type='xent', + scope=None): + """Computes ROC AUC loss. + + The area under the ROC curve is the probability p that a randomly chosen + positive example will be scored higher than a randomly chosen negative + example. This loss approximates 1-p by using a surrogate (either hinge loss or + cross entropy) for the indicator function. Specifically, the loss is: + + sum_i sum_j w_i*w_j*loss(logit_i - logit_j) + + where i ranges over the positive datapoints, j ranges over the negative + datapoints, logit_k denotes the logit (or score) of the k-th datapoint, and + loss is either the hinge or log loss given a positive label. + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape and dtype as `labels`. + weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape + [batch_size] or [batch_size, num_labels]. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for the indicator function. + scope: Optional scope for `name_scope`. + + Returns: + loss: A `Tensor` of the same shape as `logits` with the component-wise loss. + other_outputs: An empty dictionary, for consistency. + + Raises: + ValueError: If `surrogate_type` is not `xent` or `hinge`. + """ + with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]): + # Convert inputs to tensors and standardize dtypes. + labels, logits, weights, original_shape = _prepare_labels_logits_weights( + labels, logits, weights) + + # Create tensors of pairwise differences for logits and labels, and + # pairwise products of weights. These have shape + # [batch_size, batch_size, num_labels]. + logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1) + labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1) + weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1) + + signed_logits_difference = labels_difference * logits_difference + raw_loss = util.weighted_surrogate_loss( + labels=tf.ones_like(signed_logits_difference), + logits=signed_logits_difference, + surrogate_type=surrogate_type) + weighted_loss = weights_product * raw_loss + + # Zero out entries of the loss where labels_difference zero (so loss is only + # computed on pairs with different labels). + loss = tf.reduce_mean(tf.abs(labels_difference) * weighted_loss, 0) * 0.5 + loss = tf.reshape(loss, original_shape) + return loss, {} + + +def recall_at_precision_loss( + labels, + logits, + target_precision, + weights=1.0, + dual_rate_factor=0.1, + label_priors=None, + surrogate_type='xent', + lambdas_initializer=tf.constant_initializer(1.0), + reuse=None, + variables_collections=None, + trainable=True, + scope=None): + """Computes recall at precision loss. + + The loss is based on a surrogate of the form + wt * w(+) * loss(+) + wt * w(-) * loss(-) - c * pi, + where: + - w(+) = 1 + lambdas * (1 - target_precision) + - loss(+) is the cross-entropy loss on the positive examples + - w(-) = lambdas * target_precision + - loss(-) is the cross-entropy loss on the negative examples + - wt is a scalar or tensor of per-example weights + - c = lambdas * (1 - target_precision) + - pi is the label_priors. + + The per-example weights change not only the coefficients of individual + training examples, but how the examples are counted toward the constraint. + If `label_priors` is given, it MUST take `weights` into account. That is, + label_priors = P / (P + N) + where + P = sum_i (wt_i on positives) + N = sum_i (wt_i on negatives). + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape as `labels`. + target_precision: The precision at which to compute the loss. Can be a + floating point value between 0 and 1 for a single precision value, or a + `Tensor` of shape [num_labels], holding each label's target precision + value. + weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape + [batch_size] or [batch_size, num_labels]. + dual_rate_factor: A floating point value which controls the step size for + the Lagrange multipliers. + label_priors: None, or a floating point `Tensor` of shape [num_labels] + containing the prior probability of each label (i.e. the fraction of the + training data consisting of positive examples). If None, the label + priors are computed from `labels` with a moving average. See the notes + above regarding the interaction with `weights` and do not set this unless + you have a good reason to do so. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. + lambdas_initializer: An initializer for the Lagrange multipliers. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for the variables. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + scope: Optional scope for `variable_scope`. + + Returns: + loss: A `Tensor` of the same shape as `logits` with the component-wise + loss. + other_outputs: A dictionary of useful internal quantities for debugging. For + more details, see http://arxiv.org/pdf/1608.04802.pdf. + lambdas: A Tensor of shape [num_labels] consisting of the Lagrange + multipliers. + label_priors: A Tensor of shape [num_labels] consisting of the prior + probability of each label learned by the loss, if not provided. + true_positives_lower_bound: Lower bound on the number of true positives + given `labels` and `logits`. This is the same lower bound which is used + in the loss expression to be optimized. + false_positives_upper_bound: Upper bound on the number of false positives + given `labels` and `logits`. This is the same upper bound which is used + in the loss expression to be optimized. + + Raises: + ValueError: If `logits` and `labels` do not have the same shape. + """ + with tf.variable_scope(scope, + 'recall_at_precision', + [logits, labels, label_priors], + reuse=reuse): + labels, logits, weights, original_shape = _prepare_labels_logits_weights( + labels, logits, weights) + num_labels = util.get_num_labels(logits) + + # Convert other inputs to tensors and standardize dtypes. + target_precision = util.convert_and_cast( + target_precision, 'target_precision', logits.dtype) + dual_rate_factor = util.convert_and_cast( + dual_rate_factor, 'dual_rate_factor', logits.dtype) + + # Create lambdas. + lambdas, lambdas_variable = _create_dual_variable( + 'lambdas', + shape=[num_labels], + dtype=logits.dtype, + initializer=lambdas_initializer, + collections=variables_collections, + trainable=trainable, + dual_rate_factor=dual_rate_factor) + # Maybe create label_priors. + label_priors = maybe_create_label_priors( + label_priors, labels, weights, variables_collections) + + # Calculate weighted loss and other outputs. The log(2.0) term corrects for + # logloss not being an upper bound on the indicator function. + weighted_loss = weights * util.weighted_surrogate_loss( + labels, + logits, + surrogate_type=surrogate_type, + positive_weights=1.0 + lambdas * (1.0 - target_precision), + negative_weights=lambdas * target_precision) + maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 + maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) + lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2 + loss = tf.reshape(weighted_loss - lambda_term, original_shape) + other_outputs = { + 'lambdas': lambdas_variable, + 'label_priors': label_priors, + 'true_positives_lower_bound': true_positives_lower_bound( + labels, logits, weights, surrogate_type), + 'false_positives_upper_bound': false_positives_upper_bound( + labels, logits, weights, surrogate_type)} + + return loss, other_outputs + + +def precision_at_recall_loss( + labels, + logits, + target_recall, + weights=1.0, + dual_rate_factor=0.1, + label_priors=None, + surrogate_type='xent', + lambdas_initializer=tf.constant_initializer(1.0), + reuse=None, + variables_collections=None, + trainable=True, + scope=None): + """Computes precision at recall loss. + + The loss is based on a surrogate of the form + wt * loss(-) + lambdas * (pi * (b - 1) + wt * loss(+)) + where: + - loss(-) is the cross-entropy loss on the negative examples + - loss(+) is the cross-entropy loss on the positive examples + - wt is a scalar or tensor of per-example weights + - b is the target recall + - pi is the label_priors. + + The per-example weights change not only the coefficients of individual + training examples, but how the examples are counted toward the constraint. + If `label_priors` is given, it MUST take `weights` into account. That is, + label_priors = P / (P + N) + where + P = sum_i (wt_i on positives) + N = sum_i (wt_i on negatives). + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape as `labels`. + target_recall: The recall at which to compute the loss. Can be a floating + point value between 0 and 1 for a single target recall value, or a + `Tensor` of shape [num_labels] holding each label's target recall value. + weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape + [batch_size] or [batch_size, num_labels]. + dual_rate_factor: A floating point value which controls the step size for + the Lagrange multipliers. + label_priors: None, or a floating point `Tensor` of shape [num_labels] + containing the prior probability of each label (i.e. the fraction of the + training data consisting of positive examples). If None, the label + priors are computed from `labels` with a moving average. See the notes + above regarding the interaction with `weights` and do not set this unless + you have a good reason to do so. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. + lambdas_initializer: An initializer for the Lagrange multipliers. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for the variables. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + scope: Optional scope for `variable_scope`. + + Returns: + loss: A `Tensor` of the same shape as `logits` with the component-wise + loss. + other_outputs: A dictionary of useful internal quantities for debugging. For + more details, see http://arxiv.org/pdf/1608.04802.pdf. + lambdas: A Tensor of shape [num_labels] consisting of the Lagrange + multipliers. + label_priors: A Tensor of shape [num_labels] consisting of the prior + probability of each label learned by the loss, if not provided. + true_positives_lower_bound: Lower bound on the number of true positives + given `labels` and `logits`. This is the same lower bound which is used + in the loss expression to be optimized. + false_positives_upper_bound: Upper bound on the number of false positives + given `labels` and `logits`. This is the same upper bound which is used + in the loss expression to be optimized. + """ + with tf.variable_scope(scope, + 'precision_at_recall', + [logits, labels, label_priors], + reuse=reuse): + labels, logits, weights, original_shape = _prepare_labels_logits_weights( + labels, logits, weights) + num_labels = util.get_num_labels(logits) + + # Convert other inputs to tensors and standardize dtypes. + target_recall = util.convert_and_cast( + target_recall, 'target_recall', logits.dtype) + dual_rate_factor = util.convert_and_cast( + dual_rate_factor, 'dual_rate_factor', logits.dtype) + + # Create lambdas. + lambdas, lambdas_variable = _create_dual_variable( + 'lambdas', + shape=[num_labels], + dtype=logits.dtype, + initializer=lambdas_initializer, + collections=variables_collections, + trainable=trainable, + dual_rate_factor=dual_rate_factor) + # Maybe create label_priors. + label_priors = maybe_create_label_priors( + label_priors, labels, weights, variables_collections) + + # Calculate weighted loss and other outputs. The log(2.0) term corrects for + # logloss not being an upper bound on the indicator function. + weighted_loss = weights * util.weighted_surrogate_loss( + labels, + logits, + surrogate_type, + positive_weights=lambdas, + negative_weights=1.0) + maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 + maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) + lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2 + loss = tf.reshape(weighted_loss + lambda_term, original_shape) + other_outputs = { + 'lambdas': lambdas_variable, + 'label_priors': label_priors, + 'true_positives_lower_bound': true_positives_lower_bound( + labels, logits, weights, surrogate_type), + 'false_positives_upper_bound': false_positives_upper_bound( + labels, logits, weights, surrogate_type)} + + return loss, other_outputs + + +def false_positive_rate_at_true_positive_rate_loss( + labels, + logits, + target_rate, + weights=1.0, + dual_rate_factor=0.1, + label_priors=None, + surrogate_type='xent', + lambdas_initializer=tf.constant_initializer(1.0), + reuse=None, + variables_collections=None, + trainable=True, + scope=None): + """Computes false positive rate at true positive rate loss. + + Note that `true positive rate` is a synonym for Recall, and that minimizing + the false positive rate and maximizing precision are equivalent for a fixed + Recall. Therefore, this function is identical to precision_at_recall_loss. + + The per-example weights change not only the coefficients of individual + training examples, but how the examples are counted toward the constraint. + If `label_priors` is given, it MUST take `weights` into account. That is, + label_priors = P / (P + N) + where + P = sum_i (wt_i on positives) + N = sum_i (wt_i on negatives). + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape as `labels`. + target_rate: The true positive rate at which to compute the loss. Can be a + floating point value between 0 and 1 for a single true positive rate, or + a `Tensor` of shape [num_labels] holding each label's true positive rate. + weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape + [batch_size] or [batch_size, num_labels]. + dual_rate_factor: A floating point value which controls the step size for + the Lagrange multipliers. + label_priors: None, or a floating point `Tensor` of shape [num_labels] + containing the prior probability of each label (i.e. the fraction of the + training data consisting of positive examples). If None, the label + priors are computed from `labels` with a moving average. See the notes + above regarding the interaction with `weights` and do not set this unless + you have a good reason to do so. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. 'xent' will use the cross-entropy + loss surrogate, and 'hinge' will use the hinge loss. + lambdas_initializer: An initializer op for the Lagrange multipliers. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for the variables. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + scope: Optional scope for `variable_scope`. + + Returns: + loss: A `Tensor` of the same shape as `logits` with the component-wise + loss. + other_outputs: A dictionary of useful internal quantities for debugging. For + more details, see http://arxiv.org/pdf/1608.04802.pdf. + lambdas: A Tensor of shape [num_labels] consisting of the Lagrange + multipliers. + label_priors: A Tensor of shape [num_labels] consisting of the prior + probability of each label learned by the loss, if not provided. + true_positives_lower_bound: Lower bound on the number of true positives + given `labels` and `logits`. This is the same lower bound which is used + in the loss expression to be optimized. + false_positives_upper_bound: Upper bound on the number of false positives + given `labels` and `logits`. This is the same upper bound which is used + in the loss expression to be optimized. + + Raises: + ValueError: If `surrogate_type` is not `xent` or `hinge`. + """ + return precision_at_recall_loss(labels=labels, + logits=logits, + target_recall=target_rate, + weights=weights, + dual_rate_factor=dual_rate_factor, + label_priors=label_priors, + surrogate_type=surrogate_type, + lambdas_initializer=lambdas_initializer, + reuse=reuse, + variables_collections=variables_collections, + trainable=trainable, + scope=scope) + + +def true_positive_rate_at_false_positive_rate_loss( + labels, + logits, + target_rate, + weights=1.0, + dual_rate_factor=0.1, + label_priors=None, + surrogate_type='xent', + lambdas_initializer=tf.constant_initializer(1.0), + reuse=None, + variables_collections=None, + trainable=True, + scope=None): + """Computes true positive rate at false positive rate loss. + + The loss is based on a surrogate of the form + wt * loss(+) + lambdas * (wt * loss(-) - r * (1 - pi)) + where: + - loss(-) is the loss on the negative examples + - loss(+) is the loss on the positive examples + - wt is a scalar or tensor of per-example weights + - r is the target rate + - pi is the label_priors. + + The per-example weights change not only the coefficients of individual + training examples, but how the examples are counted toward the constraint. + If `label_priors` is given, it MUST take `weights` into account. That is, + label_priors = P / (P + N) + where + P = sum_i (wt_i on positives) + N = sum_i (wt_i on negatives). + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape as `labels`. + target_rate: The false positive rate at which to compute the loss. Can be a + floating point value between 0 and 1 for a single false positive rate, or + a `Tensor` of shape [num_labels] holding each label's false positive rate. + weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape + [batch_size] or [batch_size, num_labels]. + dual_rate_factor: A floating point value which controls the step size for + the Lagrange multipliers. + label_priors: None, or a floating point `Tensor` of shape [num_labels] + containing the prior probability of each label (i.e. the fraction of the + training data consisting of positive examples). If None, the label + priors are computed from `labels` with a moving average. See the notes + above regarding the interaction with `weights` and do not set this unless + you have a good reason to do so. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. 'xent' will use the cross-entropy + loss surrogate, and 'hinge' will use the hinge loss. + lambdas_initializer: An initializer op for the Lagrange multipliers. + reuse: Whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + variables_collections: Optional list of collections for the variables. + trainable: If `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + scope: Optional scope for `variable_scope`. + + Returns: + loss: A `Tensor` of the same shape as `logits` with the component-wise + loss. + other_outputs: A dictionary of useful internal quantities for debugging. For + more details, see http://arxiv.org/pdf/1608.04802.pdf. + lambdas: A Tensor of shape [num_labels] consisting of the Lagrange + multipliers. + label_priors: A Tensor of shape [num_labels] consisting of the prior + probability of each label learned by the loss, if not provided. + true_positives_lower_bound: Lower bound on the number of true positives + given `labels` and `logits`. This is the same lower bound which is used + in the loss expression to be optimized. + false_positives_upper_bound: Upper bound on the number of false positives + given `labels` and `logits`. This is the same upper bound which is used + in the loss expression to be optimized. + + Raises: + ValueError: If `surrogate_type` is not `xent` or `hinge`. + """ + with tf.variable_scope(scope, + 'tpr_at_fpr', + [labels, logits, label_priors], + reuse=reuse): + labels, logits, weights, original_shape = _prepare_labels_logits_weights( + labels, logits, weights) + num_labels = util.get_num_labels(logits) + + # Convert other inputs to tensors and standardize dtypes. + target_rate = util.convert_and_cast( + target_rate, 'target_rate', logits.dtype) + dual_rate_factor = util.convert_and_cast( + dual_rate_factor, 'dual_rate_factor', logits.dtype) + + # Create lambdas. + lambdas, lambdas_variable = _create_dual_variable( + 'lambdas', + shape=[num_labels], + dtype=logits.dtype, + initializer=lambdas_initializer, + collections=variables_collections, + trainable=trainable, + dual_rate_factor=dual_rate_factor) + # Maybe create label_priors. + label_priors = maybe_create_label_priors( + label_priors, labels, weights, variables_collections) + + # Loss op and other outputs. The log(2.0) term corrects for + # logloss not being an upper bound on the indicator function. + weighted_loss = weights * util.weighted_surrogate_loss( + labels, + logits, + surrogate_type=surrogate_type, + positive_weights=1.0, + negative_weights=lambdas) + maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 + maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) + lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2 + loss = tf.reshape(weighted_loss - lambda_term, original_shape) + other_outputs = { + 'lambdas': lambdas_variable, + 'label_priors': label_priors, + 'true_positives_lower_bound': true_positives_lower_bound( + labels, logits, weights, surrogate_type), + 'false_positives_upper_bound': false_positives_upper_bound( + labels, logits, weights, surrogate_type)} + + return loss, other_outputs + + +def _prepare_labels_logits_weights(labels, logits, weights): + """Validates labels, logits, and weights. + + Converts inputs to tensors, checks shape compatibility, and casts dtype if + necessary. + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` with the same shape as `labels`. + weights: Either `None` or a `Tensor` with shape broadcastable to `logits`. + + Returns: + labels: Same as `labels` arg after possible conversion to tensor, cast, and + reshape. + logits: Same as `logits` arg after possible conversion to tensor and + reshape. + weights: Same as `weights` arg after possible conversion, cast, and reshape. + original_shape: Shape of `labels` and `logits` before reshape. + + Raises: + ValueError: If `labels` and `logits` do not have the same shape. + """ + # Convert `labels` and `logits` to Tensors and standardize dtypes. + logits = tf.convert_to_tensor(logits, name='logits') + labels = util.convert_and_cast(labels, 'labels', logits.dtype.base_dtype) + weights = util.convert_and_cast(weights, 'weights', logits.dtype.base_dtype) + + try: + labels.get_shape().merge_with(logits.get_shape()) + except ValueError: + raise ValueError('logits and labels must have the same shape (%s vs %s)' % + (logits.get_shape(), labels.get_shape())) + + original_shape = labels.get_shape().as_list() + if labels.get_shape().ndims > 0: + original_shape[0] = -1 + if labels.get_shape().ndims <= 1: + labels = tf.reshape(labels, [-1, 1]) + logits = tf.reshape(logits, [-1, 1]) + + if weights.get_shape().ndims == 1: + # Weights has shape [batch_size]. Reshape to [batch_size, 1]. + weights = tf.reshape(weights, [-1, 1]) + if weights.get_shape().ndims == 0: + # Weights is a scalar. Change shape of weights to match logits. + weights *= tf.ones_like(logits) + + return labels, logits, weights, original_shape + + +def _range_to_anchors_and_delta(precision_range, num_anchors, dtype): + """Calculates anchor points from precision range. + + Args: + precision_range: As required in precision_recall_auc_loss. + num_anchors: int, number of equally spaced anchor points. + dtype: Data type of returned tensors. + + Returns: + precision_values: A `Tensor` of data type dtype with equally spaced values + in the interval precision_range. + delta: The spacing between the values in precision_values. + + Raises: + ValueError: If precision_range is invalid. + """ + # Validate precision_range. + if not 0 <= precision_range[0] <= precision_range[-1] <= 1: + raise ValueError('precision values must obey 0 <= %f <= %f <= 1' % + (precision_range[0], precision_range[-1])) + if not 0 < len(precision_range) < 3: + raise ValueError('length of precision_range (%d) must be 1 or 2' % + len(precision_range)) + + # Sets precision_values uniformly between min_precision and max_precision. + values = numpy.linspace(start=precision_range[0], + stop=precision_range[1], + num=num_anchors+2)[1:-1] + precision_values = util.convert_and_cast( + values, 'precision_values', dtype) + delta = util.convert_and_cast( + values[0] - precision_range[0], 'delta', dtype) + # Makes precision_values [1, 1, num_anchors]. + precision_values = util.expand_outer(precision_values, 3) + return precision_values, delta + + +def _create_dual_variable(name, shape, dtype, initializer, collections, + trainable, dual_rate_factor): + """Creates a new dual variable. + + Dual variables are required to be nonnegative. If trainable, their gradient + is reversed so that they are maximized (rather than minimized) by the + optimizer. + + Args: + name: A string, the name for the new variable. + shape: Shape of the new variable. + dtype: Data type for the new variable. + initializer: Initializer for the new variable. + collections: List of graph collections keys. The new variable is added to + these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. + trainable: If `True`, the default, also adds the variable to the graph + collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as + the default list of variables to use by the `Optimizer` classes. + dual_rate_factor: A floating point value or `Tensor`. The learning rate for + the dual variable is scaled by this factor. + + Returns: + dual_value: An op that computes the absolute value of the dual variable + and reverses its gradient. + dual_variable: The underlying variable itself. + """ + # We disable partitioning while constructing dual variables because they will + # be updated with assign, which is not available for partitioned variables. + partitioner = tf.get_variable_scope().partitioner + try: + tf.get_variable_scope().set_partitioner(None) + dual_variable = tf.contrib.framework.model_variable( + name=name, + shape=shape, + dtype=dtype, + initializer=initializer, + collections=collections, + trainable=trainable) + finally: + tf.get_variable_scope().set_partitioner(partitioner) + # Using the absolute value enforces nonnegativity. + dual_value = tf.abs(dual_variable) + + if trainable: + # To reverse the gradient on the dual variable, multiply the gradient by + # -dual_rate_factor + dual_value = (tf.stop_gradient((1.0 + dual_rate_factor) * dual_value) + - dual_rate_factor * dual_value) + return dual_value, dual_variable + + +def maybe_create_label_priors(label_priors, + labels, + weights, + variables_collections): + """Creates moving average ops to track label priors, if necessary. + + Args: + label_priors: As required in e.g. precision_recall_auc_loss. + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + weights: As required in e.g. precision_recall_auc_loss. + variables_collections: Optional list of collections for the variables, if + any must be created. + + Returns: + label_priors: A Tensor of shape [num_labels] consisting of the + weighted label priors, after updating with moving average ops if created. + """ + if label_priors is not None: + label_priors = util.convert_and_cast( + label_priors, name='label_priors', dtype=labels.dtype.base_dtype) + return tf.squeeze(label_priors) + + label_priors = util.build_label_priors( + labels, + weights, + variables_collections=variables_collections) + return label_priors + + +def true_positives_lower_bound(labels, logits, weights, surrogate_type): + """Calculate a lower bound on the number of true positives. + + This lower bound on the number of true positives given `logits` and `labels` + is the same one used in the global objectives loss functions. + + Args: + labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels]. + logits: A `Tensor` of shape [batch_size, num_labels] or + [batch_size, num_labels, num_anchors]. If the third dimension is present, + the lower bound is computed on each slice [:, :, k] independently. + weights: Per-example loss coefficients, with shape broadcast-compatible with + that of `labels`. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. + + Returns: + A `Tensor` of shape [num_labels] or [num_labels, num_anchors]. + """ + maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 + maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) + if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3: + labels = tf.expand_dims(labels, 2) + loss_on_positives = util.weighted_surrogate_loss( + labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2 + return tf.reduce_sum(weights * (labels - loss_on_positives), 0) + + +def false_positives_upper_bound(labels, logits, weights, surrogate_type): + """Calculate an upper bound on the number of false positives. + + This upper bound on the number of false positives given `logits` and `labels` + is the same one used in the global objectives loss functions. + + Args: + labels: A `Tensor` of shape [batch_size, num_labels] + logits: A `Tensor` of shape [batch_size, num_labels] or + [batch_size, num_labels, num_anchors]. If the third dimension is present, + the lower bound is computed on each slice [:, :, k] independently. + weights: Per-example loss coefficients, with shape broadcast-compatible with + that of `labels`. + surrogate_type: Either 'xent' or 'hinge', specifying which upper bound + should be used for indicator functions. + + Returns: + A `Tensor` of shape [num_labels] or [num_labels, num_anchors]. + """ + maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 + maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) + loss_on_negatives = util.weighted_surrogate_loss( + labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2 + return tf.reduce_sum(weights * loss_on_negatives, 0) diff --git a/models/research/global_objectives/loss_layers_example.py b/models/research/global_objectives/loss_layers_example.py new file mode 100644 index 0000000000000000000000000000000000000000..2323cb0762e7f4eade8f283162be61cc45513d49 --- /dev/null +++ b/models/research/global_objectives/loss_layers_example.py @@ -0,0 +1,211 @@ +# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Example for using global objectives. + +Illustrate, using synthetic data, how using the precision_at_recall loss +significanly improves the performace of a linear classifier. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports +import numpy as np +from sklearn.metrics import precision_score +import tensorflow as tf +from global_objectives import loss_layers + +# When optimizing using global_objectives, if set to True then the saddle point +# optimization steps are performed internally by the Tensorflow optimizer, +# otherwise by dedicated saddle-point steps as part of the optimization loop. +USE_GO_SADDLE_POINT_OPT = False + +TARGET_RECALL = 0.98 +TRAIN_ITERATIONS = 150 +LEARNING_RATE = 1.0 +GO_DUAL_RATE_FACTOR = 15.0 +NUM_CHECKPOINTS = 6 + +EXPERIMENT_DATA_CONFIG = { + 'positives_centers': [[0, 1.0], [1, -0.5]], + 'negatives_centers': [[0, -0.5], [1, 1.0]], + 'positives_variances': [0.15, 0.1], + 'negatives_variances': [0.15, 0.1], + 'positives_counts': [500, 50], + 'negatives_counts': [3000, 100] +} + + +def create_training_and_eval_data_for_experiment(**data_config): + """Creates train and eval data sets. + + Note: The synthesized binary-labeled data is a mixture of four Gaussians - two + positives and two negatives. The centers, variances, and sizes for each of + the two positives and negatives mixtures are passed in the respective keys + of data_config: + + Args: + **data_config: Dictionary with Array entries as follows: + positives_centers - float [2,2] two centers of positives data sets. + negatives_centers - float [2,2] two centers of negatives data sets. + positives_variances - float [2] Variances for the positives sets. + negatives_variances - float [2] Variances for the negatives sets. + positives_counts - int [2] Counts for each of the two positives sets. + negatives_counts - int [2] Counts for each of the two negatives sets. + + Returns: + A dictionary with two shuffled data sets created - one for training and one + for eval. The dictionary keys are 'train_data', 'train_labels', 'eval_data', + and 'eval_labels'. The data points are two-dimentional floats, and the + labels are in {0,1}. + """ + def data_points(is_positives, index): + variance = data_config['positives_variances' + if is_positives else 'negatives_variances'][index] + center = data_config['positives_centers' + if is_positives else 'negatives_centers'][index] + count = data_config['positives_counts' + if is_positives else 'negatives_counts'][index] + return variance*np.random.randn(count, 2) + np.array([center]) + + def create_data(): + return np.concatenate([data_points(False, 0), + data_points(True, 0), + data_points(True, 1), + data_points(False, 1)], axis=0) + + def create_labels(): + """Creates an array of 0.0 or 1.0 labels for the data_config batches.""" + return np.array([0.0]*data_config['negatives_counts'][0] + + [1.0]*data_config['positives_counts'][0] + + [1.0]*data_config['positives_counts'][1] + + [0.0]*data_config['negatives_counts'][1]) + + permutation = np.random.permutation( + sum(data_config['positives_counts'] + data_config['negatives_counts'])) + + train_data = create_data()[permutation, :] + eval_data = create_data()[permutation, :] + train_labels = create_labels()[permutation] + eval_labels = create_labels()[permutation] + + return { + 'train_data': train_data, + 'train_labels': train_labels, + 'eval_data': eval_data, + 'eval_labels': eval_labels + } + + +def train_model(data, use_global_objectives): + """Trains a linear model for maximal accuracy or precision at given recall.""" + + def precision_at_recall(scores, labels, target_recall): + """Computes precision - at target recall - over data.""" + positive_scores = scores[labels == 1.0] + threshold = np.percentile(positive_scores, 100 - target_recall*100) + predicted = scores >= threshold + return precision_score(labels, predicted) + + w = tf.Variable(tf.constant([-1.0, -1.0], shape=[2, 1]), trainable=True, + name='weights', dtype=tf.float32) + b = tf.Variable(tf.zeros([1]), trainable=True, name='biases', + dtype=tf.float32) + + logits = tf.matmul(tf.cast(data['train_data'], tf.float32), w) + b + + labels = tf.constant( + data['train_labels'], + shape=[len(data['train_labels']), 1], + dtype=tf.float32) + + if use_global_objectives: + loss, other_outputs = loss_layers.precision_at_recall_loss( + labels, logits, + TARGET_RECALL, + dual_rate_factor=GO_DUAL_RATE_FACTOR) + loss = tf.reduce_mean(loss) + else: + loss = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)) + + global_step = tf.Variable(0, trainable=False) + + learning_rate = tf.train.polynomial_decay( + LEARNING_RATE, + global_step, + TRAIN_ITERATIONS, (LEARNING_RATE / TRAIN_ITERATIONS), + power=1.0, + cycle=False, + name='learning_rate') + + optimizer = tf.train.GradientDescentOptimizer(learning_rate) + + if (not use_global_objectives) or USE_GO_SADDLE_POINT_OPT: + training_op = optimizer.minimize(loss, global_step=global_step) + else: + lambdas = other_outputs['lambdas'] + primal_update_op = optimizer.minimize(loss, var_list=[w, b]) + dual_update_op = optimizer.minimize( + loss, global_step=global_step, var_list=[lambdas]) + + # Training loop: + with tf.Session() as sess: + checkpoint_step = TRAIN_ITERATIONS // NUM_CHECKPOINTS + sess.run(tf.global_variables_initializer()) + step = sess.run(global_step) + + while step <= TRAIN_ITERATIONS: + if (not use_global_objectives) or USE_GO_SADDLE_POINT_OPT: + _, step, loss_value, w_value, b_value = sess.run( + [training_op, global_step, loss, w, b]) + else: + _, w_value, b_value = sess.run([primal_update_op, w, b]) + _, loss_value, step = sess.run([dual_update_op, loss, global_step]) + + if use_global_objectives: + go_outputs = sess.run(other_outputs.values()) + + if step % checkpoint_step == 0: + precision = precision_at_recall( + np.dot(data['train_data'], w_value) + b_value, + data['train_labels'], TARGET_RECALL) + + tf.logging.info('Loss = %f Precision = %f', loss_value, precision) + if use_global_objectives: + for i, output_name in enumerate(other_outputs.keys()): + tf.logging.info('\t%s = %f', output_name, go_outputs[i]) + + w_value, b_value = sess.run([w, b]) + return precision_at_recall(np.dot(data['eval_data'], w_value) + b_value, + data['eval_labels'], + TARGET_RECALL) + + +def main(unused_argv): + del unused_argv + experiment_data = create_training_and_eval_data_for_experiment( + **EXPERIMENT_DATA_CONFIG) + global_objectives_loss_precision = train_model(experiment_data, True) + tf.logging.info('global_objectives precision at requested recall is %f', + global_objectives_loss_precision) + cross_entropy_loss_precision = train_model(experiment_data, False) + tf.logging.info('cross_entropy precision at requested recall is %f', + cross_entropy_loss_precision) + + +if __name__ == '__main__': + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/models/research/global_objectives/loss_layers_test.py b/models/research/global_objectives/loss_layers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3f91c80deec16a34f5271cdfadbd0d364c3a8cea --- /dev/null +++ b/models/research/global_objectives/loss_layers_test.py @@ -0,0 +1,1379 @@ +# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for global objectives loss layers.""" + +# Dependency imports +from absl.testing import parameterized +import numpy +import tensorflow as tf + +from global_objectives import loss_layers +from global_objectives import util + + +# TODO: Include weights in the lagrange multiplier update tests. +class PrecisionRecallAUCLossTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ('_xent', 'xent', 0.7), + ('_hinge', 'hinge', 0.7), + ('_hinge_2', 'hinge', 0.5) + ) + def testSinglePointAUC(self, surrogate_type, target_precision): + # Tests a case with only one anchor point, where the loss should equal + # recall_at_precision_loss + batch_shape = [10, 2] + logits = tf.Variable(tf.random_normal(batch_shape)) + labels = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + + auc_loss, _ = loss_layers.precision_recall_auc_loss( + labels, + logits, + precision_range=(target_precision - 0.01, target_precision + 0.01), + num_anchors=1, + surrogate_type=surrogate_type) + point_loss, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=target_precision, + surrogate_type=surrogate_type) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(auc_loss.eval(), point_loss.eval()) + + def testThreePointAUC(self): + # Tests a case with three anchor points against a weighted sum of recall + # at precision losses. + batch_shape = [11, 3] + logits = tf.Variable(tf.random_normal(batch_shape)) + labels = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + + # TODO: Place the hing/xent loss in a for loop. + auc_loss, _ = loss_layers.precision_recall_auc_loss( + labels, logits, num_anchors=1) + first_point_loss, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=0.25) + second_point_loss, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=0.5) + third_point_loss, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=0.75) + expected_loss = (first_point_loss + second_point_loss + + third_point_loss) / 3 + + auc_loss_hinge, _ = loss_layers.precision_recall_auc_loss( + labels, logits, num_anchors=1, surrogate_type='hinge') + first_point_hinge, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=0.25, surrogate_type='hinge') + second_point_hinge, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=0.5, surrogate_type='hinge') + third_point_hinge, _ = loss_layers.recall_at_precision_loss( + labels, logits, target_precision=0.75, surrogate_type='hinge') + expected_hinge = (first_point_hinge + second_point_hinge + + third_point_hinge) / 3 + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(auc_loss.eval(), expected_loss.eval()) + self.assertAllClose(auc_loss_hinge.eval(), expected_hinge.eval()) + + def testLagrangeMultiplierUpdateDirection(self): + for target_precision in [0.35, 0.65]: + precision_range = (target_precision - 0.01, target_precision + 0.01) + + for surrogate_type in ['xent', 'hinge']: + kwargs = {'precision_range': precision_range, + 'num_anchors': 1, + 'surrogate_type': surrogate_type, + 'scope': 'pr-auc_{}_{}'.format(target_precision, + surrogate_type)} + run_lagrange_multiplier_test( + global_objective=loss_layers.precision_recall_auc_loss, + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=loss_layers.precision_recall_auc_loss, + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + +class ROCAUCLossTest(parameterized.TestCase, tf.test.TestCase): + + def testSimpleScores(self): + # Tests the loss on data with only one negative example with score zero. + # In this case, the loss should equal the surrogate loss on the scores with + # positive labels. + num_positives = 10 + scores_positives = tf.constant(3.0 * numpy.random.randn(num_positives), + shape=[num_positives, 1]) + labels = tf.constant([0.0] + [1.0] * num_positives, + shape=[num_positives + 1, 1]) + scores = tf.concat([[[0.0]], scores_positives], 0) + + loss = tf.reduce_sum( + loss_layers.roc_auc_loss(labels, scores, surrogate_type='hinge')[0]) + expected_loss = tf.reduce_sum( + tf.maximum(1.0 - scores_positives, 0)) / (num_positives + 1) + with self.test_session(): + self.assertAllClose(expected_loss.eval(), loss.eval()) + + def testRandomROCLoss(self): + # Checks that random Bernoulli scores and labels has ~25% swaps. + shape = [1000, 30] + scores = tf.constant( + numpy.random.randint(0, 2, size=shape), shape=shape, dtype=tf.float32) + labels = tf.constant( + numpy.random.randint(0, 2, size=shape), shape=shape, dtype=tf.float32) + loss = tf.reduce_mean(loss_layers.roc_auc_loss( + labels, scores, surrogate_type='hinge')[0]) + with self.test_session(): + self.assertAllClose(0.25, loss.eval(), 1e-2) + + @parameterized.named_parameters( + ('_zero_hinge', 'xent', + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [-5.0, -7.0, -9.0, 8.0, 10.0, 14.0], + 0.0), + ('_zero_xent', 'hinge', + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [-0.2, 0, -0.1, 1.0, 1.1, 1.0], + 0.0), + ('_xent', 'xent', + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, -17.0, -19.0, 1.0, 14.0, 14.0], + numpy.log(1.0 + numpy.exp(-1.0)) / 6), + ('_hinge', 'hinge', + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [-0.2, -0.05, 0.0, 0.95, 0.8, 1.0], + 0.4 / 6) + ) + def testManualROCLoss(self, surrogate_type, labels, logits, expected_value): + labels = tf.constant(labels) + logits = tf.constant(logits) + loss, _ = loss_layers.roc_auc_loss( + labels=labels, logits=logits, surrogate_type=surrogate_type) + + with self.test_session(): + self.assertAllClose(expected_value, tf.reduce_sum(loss).eval()) + + def testMultiLabelROCLoss(self): + # Tests the loss on multi-label data against manually computed loss. + targets = numpy.array([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) + scores = numpy.array([[0.1, 1.0, 1.1, 1.0], [1.0, 0.0, 1.3, 1.1]]) + class_1_auc = tf.reduce_sum( + loss_layers.roc_auc_loss(targets[0], scores[0])[0]) + class_2_auc = tf.reduce_sum( + loss_layers.roc_auc_loss(targets[1], scores[1])[0]) + total_auc = tf.reduce_sum(loss_layers.roc_auc_loss( + targets.transpose(), scores.transpose())[0]) + + with self.test_session(): + self.assertAllClose(total_auc.eval(), + class_1_auc.eval() + class_2_auc.eval()) + + def testWeights(self): + # Test the loss with per-example weights. + # The logits_negatives below are repeated, so that setting half their + # weights to 2 and the other half to 0 should leave the loss unchanged. + logits_positives = tf.constant([2.54321, -0.26, 3.334334], shape=[3, 1]) + logits_negatives = tf.constant([-0.6, 1, -1.3, -1.3, -0.6, 1], shape=[6, 1]) + logits = tf.concat([logits_positives, logits_negatives], 0) + targets = tf.constant([1, 1, 1, 0, 0, 0, 0, 0, 0], + shape=[9, 1], dtype=tf.float32) + weights = tf.constant([1, 1, 1, 0, 0, 0, 2, 2, 2], + shape=[9, 1], dtype=tf.float32) + + loss = tf.reduce_sum(loss_layers.roc_auc_loss(targets, logits)[0]) + weighted_loss = tf.reduce_sum( + loss_layers.roc_auc_loss(targets, logits, weights)[0]) + + with self.test_session(): + self.assertAllClose(loss.eval(), weighted_loss.eval()) + + +class RecallAtPrecisionTest(tf.test.TestCase): + + def testEqualWeightLoss(self): + # Tests a special case where the loss should equal cross entropy loss. + target_precision = 1.0 + num_labels = 5 + batch_shape = [20, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.7))) + label_priors = tf.constant(0.34, shape=[num_labels]) + + loss, _ = loss_layers.recall_at_precision_loss( + targets, logits, target_precision, label_priors=label_priors) + expected_loss = ( + tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits( + logits, targets)) + + with self.test_session() as session: + tf.global_variables_initializer().run() + loss_val, expected_val = session.run([loss, expected_loss]) + self.assertAllClose(loss_val, expected_val) + + def testEqualWeightLossWithMultiplePrecisions(self): + """Tests a case where the loss equals xent loss with multiple precisions.""" + target_precision = [1.0, 1.0] + num_labels = 2 + batch_size = 20 + target_shape = [batch_size, num_labels] + logits = tf.Variable(tf.random_normal(target_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(target_shape), 0.7))) + label_priors = tf.constant([0.34], shape=[num_labels]) + + loss, _ = loss_layers.recall_at_precision_loss( + targets, + logits, + target_precision, + label_priors=label_priors, + surrogate_type='xent', + ) + + expected_loss = ( + tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits( + logits, targets)) + + with self.test_session() as session: + tf.global_variables_initializer().run() + loss_val, expected_val = session.run([loss, expected_loss]) + self.assertAllClose(loss_val, expected_val) + + def testPositivesOnlyLoss(self): + # Tests a special case where the loss should equal cross entropy loss + # on the negatives only. + target_precision = 1.0 + num_labels = 3 + batch_shape = [30, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors = tf.constant(0.45, shape=[num_labels]) + + loss, _ = loss_layers.recall_at_precision_loss( + targets, logits, target_precision, label_priors=label_priors, + lambdas_initializer=tf.zeros_initializer()) + expected_loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, + logits, + positive_weights=1.0, + negative_weights=0.0) + + with self.test_session() as session: + tf.global_variables_initializer().run() + loss_val, expected_val = session.run([loss, expected_loss]) + self.assertAllClose(loss_val, expected_val) + + def testEquivalenceBetweenSingleAndMultiplePrecisions(self): + """Checks recall at precision with different precision values. + + Runs recall at precision with multiple precision values, and runs each label + seperately with its own precision value as a scalar. Validates that the + returned loss values are the same. + """ + target_precision = [0.2, 0.9, 0.4] + num_labels = 3 + batch_shape = [30, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors = tf.constant([0.45, 0.8, 0.3], shape=[num_labels]) + + multi_label_loss, _ = loss_layers.recall_at_precision_loss( + targets, logits, target_precision, label_priors=label_priors, + ) + + single_label_losses = [ + loss_layers.recall_at_precision_loss( + tf.expand_dims(targets[:, i], -1), + tf.expand_dims(logits[:, i], -1), + target_precision[i], + label_priors=label_priors[i])[0] + for i in range(num_labels) + ] + + single_label_losses = tf.concat(single_label_losses, 1) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_label_loss_val, single_label_loss_val = session.run( + [multi_label_loss, single_label_losses]) + self.assertAllClose(multi_label_loss_val, single_label_loss_val) + + def testEquivalenceBetweenSingleAndEqualMultiplePrecisions(self): + """Compares single and multiple target precisions with the same value. + + Checks that using a single target precision and multiple target precisions + with the same value would result in the same loss value. + """ + num_labels = 2 + target_shape = [20, num_labels] + logits = tf.Variable(tf.random_normal(target_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(target_shape), 0.7))) + label_priors = tf.constant([0.34], shape=[num_labels]) + + multi_precision_loss, _ = loss_layers.recall_at_precision_loss( + targets, + logits, + [0.75, 0.75], + label_priors=label_priors, + surrogate_type='xent', + ) + + single_precision_loss, _ = loss_layers.recall_at_precision_loss( + targets, + logits, + 0.75, + label_priors=label_priors, + surrogate_type='xent', + ) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_precision_loss_val, single_precision_loss_val = session.run( + [multi_precision_loss, single_precision_loss]) + self.assertAllClose(multi_precision_loss_val, single_precision_loss_val) + + def testLagrangeMultiplierUpdateDirection(self): + for target_precision in [0.35, 0.65]: + for surrogate_type in ['xent', 'hinge']: + kwargs = {'target_precision': target_precision, + 'surrogate_type': surrogate_type, + 'scope': 'r-at-p_{}_{}'.format(target_precision, + surrogate_type)} + run_lagrange_multiplier_test( + global_objective=loss_layers.recall_at_precision_loss, + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=loss_layers.recall_at_precision_loss, + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testLagrangeMultiplierUpdateDirectionWithMultiplePrecisions(self): + """Runs Lagrange multiplier test with multiple precision values.""" + target_precision = [0.65, 0.35] + + for surrogate_type in ['xent', 'hinge']: + scope_str = 'r-at-p_{}_{}'.format( + '_'.join([str(precision) for precision in target_precision]), + surrogate_type) + kwargs = { + 'target_precision': target_precision, + 'surrogate_type': surrogate_type, + 'scope': scope_str, + } + run_lagrange_multiplier_test( + global_objective=loss_layers.recall_at_precision_loss, + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=loss_layers.recall_at_precision_loss, + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + +class PrecisionAtRecallTest(tf.test.TestCase): + + def testCrossEntropyEquivalence(self): + # Checks a special case where the loss should equal cross-entropy loss. + target_recall = 1.0 + num_labels = 3 + batch_shape = [10, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + + loss, _ = loss_layers.precision_at_recall_loss( + targets, logits, target_recall, + lambdas_initializer=tf.constant_initializer(1.0)) + expected_loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, logits) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(loss.eval(), expected_loss.eval()) + + def testNegativesOnlyLoss(self): + # Checks a special case where the loss should equal the loss on + # the negative examples only. + target_recall = 0.61828 + num_labels = 4 + batch_shape = [8, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6))) + + loss, _ = loss_layers.precision_at_recall_loss( + targets, + logits, + target_recall, + surrogate_type='hinge', + lambdas_initializer=tf.constant_initializer(0.0), + scope='negatives_only_test') + expected_loss = util.weighted_hinge_loss( + targets, logits, positive_weights=0.0, negative_weights=1.0) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(expected_loss.eval(), loss.eval()) + + def testLagrangeMultiplierUpdateDirection(self): + for target_recall in [0.34, 0.66]: + for surrogate_type in ['xent', 'hinge']: + kwargs = {'target_recall': target_recall, + 'dual_rate_factor': 1.0, + 'surrogate_type': surrogate_type, + 'scope': 'p-at-r_{}_{}'.format(target_recall, surrogate_type)} + + run_lagrange_multiplier_test( + global_objective=loss_layers.precision_at_recall_loss, + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=loss_layers.precision_at_recall_loss, + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testCrossEntropyEquivalenceWithMultipleRecalls(self): + """Checks a case where the loss equals xent loss with multiple recalls.""" + num_labels = 3 + target_recall = [1.0] * num_labels + batch_shape = [10, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + + loss, _ = loss_layers.precision_at_recall_loss( + targets, logits, target_recall, + lambdas_initializer=tf.constant_initializer(1.0)) + expected_loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, logits) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(loss.eval(), expected_loss.eval()) + + def testNegativesOnlyLossWithMultipleRecalls(self): + """Tests a case where the loss equals the loss on the negative examples. + + Checks this special case using multiple target recall values. + """ + num_labels = 4 + target_recall = [0.61828] * num_labels + batch_shape = [8, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6))) + + loss, _ = loss_layers.precision_at_recall_loss( + targets, + logits, + target_recall, + surrogate_type='hinge', + lambdas_initializer=tf.constant_initializer(0.0), + scope='negatives_only_test') + expected_loss = util.weighted_hinge_loss( + targets, logits, positive_weights=0.0, negative_weights=1.0) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(expected_loss.eval(), loss.eval()) + + def testLagrangeMultiplierUpdateDirectionWithMultipleRecalls(self): + """Runs Lagrange multiplier test with multiple recall values.""" + target_recall = [0.34, 0.66] + for surrogate_type in ['xent', 'hinge']: + scope_str = 'p-at-r_{}_{}'.format( + '_'.join([str(recall) for recall in target_recall]), + surrogate_type) + kwargs = {'target_recall': target_recall, + 'dual_rate_factor': 1.0, + 'surrogate_type': surrogate_type, + 'scope': scope_str} + + run_lagrange_multiplier_test( + global_objective=loss_layers.precision_at_recall_loss, + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=loss_layers.precision_at_recall_loss, + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testEquivalenceBetweenSingleAndMultipleRecalls(self): + """Checks precision at recall with multiple different recall values. + + Runs precision at recall with multiple recall values, and runs each label + seperately with its own recall value as a scalar. Validates that the + returned loss values are the same. + """ + target_precision = [0.7, 0.9, 0.4] + num_labels = 3 + batch_shape = [30, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors = tf.constant(0.45, shape=[num_labels]) + + multi_label_loss, _ = loss_layers.precision_at_recall_loss( + targets, logits, target_precision, label_priors=label_priors + ) + + single_label_losses = [ + loss_layers.precision_at_recall_loss( + tf.expand_dims(targets[:, i], -1), + tf.expand_dims(logits[:, i], -1), + target_precision[i], + label_priors=label_priors[i])[0] + for i in range(num_labels) + ] + + single_label_losses = tf.concat(single_label_losses, 1) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_label_loss_val, single_label_loss_val = session.run( + [multi_label_loss, single_label_losses]) + self.assertAllClose(multi_label_loss_val, single_label_loss_val) + + def testEquivalenceBetweenSingleAndEqualMultipleRecalls(self): + """Compares single and multiple target recalls of the same value. + + Checks that using a single target recall and multiple recalls with the + same value would result in the same loss value. + """ + num_labels = 2 + target_shape = [20, num_labels] + logits = tf.Variable(tf.random_normal(target_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(target_shape), 0.7))) + label_priors = tf.constant([0.34], shape=[num_labels]) + + multi_precision_loss, _ = loss_layers.precision_at_recall_loss( + targets, + logits, + [0.75, 0.75], + label_priors=label_priors, + surrogate_type='xent', + ) + + single_precision_loss, _ = loss_layers.precision_at_recall_loss( + targets, + logits, + 0.75, + label_priors=label_priors, + surrogate_type='xent', + ) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_precision_loss_val, single_precision_loss_val = session.run( + [multi_precision_loss, single_precision_loss]) + self.assertAllClose(multi_precision_loss_val, single_precision_loss_val) + + +class FalsePositiveRateAtTruePositiveRateTest(tf.test.TestCase): + + def testNegativesOnlyLoss(self): + # Checks a special case where the loss returned should be the loss on the + # negative examples. + target_recall = 0.6 + num_labels = 3 + batch_shape = [3, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors = tf.constant(numpy.random.uniform(size=[num_labels]), + dtype=tf.float32) + + xent_loss, _ = loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, target_recall, label_priors=label_priors, + lambdas_initializer=tf.constant_initializer(0.0)) + xent_expected = util.weighted_sigmoid_cross_entropy_with_logits( + targets, + logits, + positive_weights=0.0, + negative_weights=1.0) + hinge_loss, _ = loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, target_recall, label_priors=label_priors, + lambdas_initializer=tf.constant_initializer(0.0), + surrogate_type='hinge') + hinge_expected = util.weighted_hinge_loss( + targets, + logits, + positive_weights=0.0, + negative_weights=1.0) + + with self.test_session() as session: + tf.global_variables_initializer().run() + xent_val, xent_expected = session.run([xent_loss, xent_expected]) + self.assertAllClose(xent_val, xent_expected) + hinge_val, hinge_expected = session.run([hinge_loss, hinge_expected]) + self.assertAllClose(hinge_val, hinge_expected) + + def testPositivesOnlyLoss(self): + # Checks a special case where the loss returned should be the loss on the + # positive examples only. + target_recall = 1.0 + num_labels = 5 + batch_shape = [5, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.ones_like(logits) + label_priors = tf.constant(numpy.random.uniform(size=[num_labels]), + dtype=tf.float32) + + loss, _ = loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, target_recall, label_priors=label_priors) + expected_loss = tf.nn.sigmoid_cross_entropy_with_logits( + labels=targets, logits=logits) + hinge_loss, _ = loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, target_recall, label_priors=label_priors, + surrogate_type='hinge') + expected_hinge = util.weighted_hinge_loss( + targets, logits) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(loss.eval(), expected_loss.eval()) + self.assertAllClose(hinge_loss.eval(), expected_hinge.eval()) + + def testEqualWeightLoss(self): + # Checks a special case where the loss returned should be proportional to + # the ordinary loss. + target_recall = 1.0 + num_labels = 4 + batch_shape = [40, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6))) + label_priors = tf.constant(0.5, shape=[num_labels]) + + loss, _ = loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, target_recall, label_priors=label_priors) + expected_loss = tf.nn.sigmoid_cross_entropy_with_logits( + labels=targets, logits=logits) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(loss.eval(), expected_loss.eval()) + + def testLagrangeMultiplierUpdateDirection(self): + for target_rate in [0.35, 0.65]: + for surrogate_type in ['xent', 'hinge']: + kwargs = {'target_rate': target_rate, + 'surrogate_type': surrogate_type, + 'scope': 'fpr-at-tpr_{}_{}'.format(target_rate, + surrogate_type)} + # True positive rate is a synonym for recall, so we use the + # recall constraint data. + run_lagrange_multiplier_test( + global_objective=( + loss_layers.false_positive_rate_at_true_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=( + loss_layers.false_positive_rate_at_true_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testLagrangeMultiplierUpdateDirectionWithMultipleRates(self): + """Runs Lagrange multiplier test with multiple target rates.""" + target_rate = [0.35, 0.65] + for surrogate_type in ['xent', 'hinge']: + kwargs = {'target_rate': target_rate, + 'surrogate_type': surrogate_type, + 'scope': 'fpr-at-tpr_{}_{}'.format( + '_'.join([str(target) for target in target_rate]), + surrogate_type)} + # True positive rate is a synonym for recall, so we use the + # recall constraint data. + run_lagrange_multiplier_test( + global_objective=( + loss_layers.false_positive_rate_at_true_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=( + loss_layers.false_positive_rate_at_true_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testEquivalenceBetweenSingleAndEqualMultipleRates(self): + """Compares single and multiple target rates of the same value. + + Checks that using a single target rate and multiple rates with the + same value would result in the same loss value. + """ + num_labels = 2 + target_shape = [20, num_labels] + logits = tf.Variable(tf.random_normal(target_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(target_shape), 0.7))) + label_priors = tf.constant([0.34], shape=[num_labels]) + + multi_label_loss, _ = ( + loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, [0.75, 0.75], label_priors=label_priors)) + + single_label_loss, _ = ( + loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, 0.75, label_priors=label_priors)) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_label_loss_val, single_label_loss_val = session.run( + [multi_label_loss, single_label_loss]) + self.assertAllClose(multi_label_loss_val, single_label_loss_val) + + def testEquivalenceBetweenSingleAndMultipleRates(self): + """Compares single and multiple target rates of different values. + + Runs false_positive_rate_at_true_positive_rate_loss with multiple target + rates, and runs each label seperately with its own target rate as a + scalar. Validates that the returned loss values are the same. + """ + target_precision = [0.7, 0.9, 0.4] + num_labels = 3 + batch_shape = [30, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors = tf.constant(0.45, shape=[num_labels]) + + multi_label_loss, _ = ( + loss_layers.false_positive_rate_at_true_positive_rate_loss( + targets, logits, target_precision, label_priors=label_priors)) + + single_label_losses = [ + loss_layers.false_positive_rate_at_true_positive_rate_loss( + tf.expand_dims(targets[:, i], -1), + tf.expand_dims(logits[:, i], -1), + target_precision[i], + label_priors=label_priors[i])[0] + for i in range(num_labels) + ] + + single_label_losses = tf.concat(single_label_losses, 1) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_label_loss_val, single_label_loss_val = session.run( + [multi_label_loss, single_label_losses]) + self.assertAllClose(multi_label_loss_val, single_label_loss_val) + + +class TruePositiveRateAtFalsePositiveRateTest(tf.test.TestCase): + + def testPositivesOnlyLoss(self): + # A special case where the loss should equal the loss on the positive + # examples. + target_rate = numpy.random.uniform() + num_labels = 3 + batch_shape = [20, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6))) + label_priors = tf.constant(numpy.random.uniform(size=[num_labels]), + dtype=tf.float32) + + xent_loss, _ = loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, target_rate, label_priors=label_priors, + lambdas_initializer=tf.constant_initializer(0.0)) + xent_expected = util.weighted_sigmoid_cross_entropy_with_logits( + targets, + logits, + positive_weights=1.0, + negative_weights=0.0) + hinge_loss, _ = loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, target_rate, label_priors=label_priors, + lambdas_initializer=tf.constant_initializer(0.0), + surrogate_type='hinge') + hinge_expected = util.weighted_hinge_loss( + targets, + logits, + positive_weights=1.0, + negative_weights=0.0) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(xent_expected.eval(), xent_loss.eval()) + self.assertAllClose(hinge_expected.eval(), hinge_loss.eval()) + + def testNegativesOnlyLoss(self): + # A special case where the loss should equal the loss on the negative + # examples, minus target_rate * (1 - label_priors) * maybe_log2. + target_rate = numpy.random.uniform() + num_labels = 3 + batch_shape = [25, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.zeros_like(logits) + label_priors = tf.constant(numpy.random.uniform(size=[num_labels]), + dtype=tf.float32) + + xent_loss, _ = loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, target_rate, label_priors=label_priors) + xent_expected = tf.subtract( + util.weighted_sigmoid_cross_entropy_with_logits(targets, + logits, + positive_weights=0.0, + negative_weights=1.0), + target_rate * (1.0 - label_priors) * numpy.log(2)) + hinge_loss, _ = loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, target_rate, label_priors=label_priors, + surrogate_type='hinge') + hinge_expected = util.weighted_hinge_loss( + targets, logits) - target_rate * (1.0 - label_priors) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(xent_expected.eval(), xent_loss.eval()) + self.assertAllClose(hinge_expected.eval(), hinge_loss.eval()) + + def testLagrangeMultiplierUpdateDirection(self): + for target_rate in [0.35, 0.65]: + for surrogate_type in ['xent', 'hinge']: + kwargs = {'target_rate': target_rate, + 'surrogate_type': surrogate_type, + 'scope': 'tpr-at-fpr_{}_{}'.format(target_rate, + surrogate_type)} + run_lagrange_multiplier_test( + global_objective=( + loss_layers.true_positive_rate_at_false_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=( + loss_layers.true_positive_rate_at_false_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testLagrangeMultiplierUpdateDirectionWithMultipleRates(self): + """Runs Lagrange multiplier test with multiple target rates.""" + target_rate = [0.35, 0.65] + for surrogate_type in ['xent', 'hinge']: + kwargs = {'target_rate': target_rate, + 'surrogate_type': surrogate_type, + 'scope': 'tpr-at-fpr_{}_{}'.format( + '_'.join([str(target) for target in target_rate]), + surrogate_type)} + run_lagrange_multiplier_test( + global_objective=( + loss_layers.true_positive_rate_at_false_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_multilabel_data, + test_object=self) + kwargs['scope'] = 'other-' + kwargs['scope'] + run_lagrange_multiplier_test( + global_objective=( + loss_layers.true_positive_rate_at_false_positive_rate_loss), + objective_kwargs=kwargs, + data_builder=_other_multilabel_data(surrogate_type), + test_object=self) + + def testEquivalenceBetweenSingleAndEqualMultipleRates(self): + """Compares single and multiple target rates of the same value. + + Checks that using a single target rate and multiple rates with the + same value would result in the same loss value. + """ + num_labels = 2 + target_shape = [20, num_labels] + logits = tf.Variable(tf.random_normal(target_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(target_shape), 0.7))) + label_priors = tf.constant([0.34], shape=[num_labels]) + + multi_label_loss, _ = ( + loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, [0.75, 0.75], label_priors=label_priors)) + + single_label_loss, _ = ( + loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, 0.75, label_priors=label_priors)) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_label_loss_val, single_label_loss_val = session.run( + [multi_label_loss, single_label_loss]) + self.assertAllClose(multi_label_loss_val, single_label_loss_val) + + def testEquivalenceBetweenSingleAndMultipleRates(self): + """Compares single and multiple target rates of different values. + + Runs true_positive_rate_at_false_positive_rate_loss with multiple target + rates, and runs each label seperately with its own target rate as a + scalar. Validates that the returned loss values are the same. + """ + target_precision = [0.7, 0.9, 0.4] + num_labels = 3 + batch_shape = [30, num_labels] + logits = tf.Variable(tf.random_normal(batch_shape)) + targets = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors = tf.constant(0.45, shape=[num_labels]) + + multi_label_loss, _ = ( + loss_layers.true_positive_rate_at_false_positive_rate_loss( + targets, logits, target_precision, label_priors=label_priors)) + + single_label_losses = [ + loss_layers.true_positive_rate_at_false_positive_rate_loss( + tf.expand_dims(targets[:, i], -1), + tf.expand_dims(logits[:, i], -1), + target_precision[i], + label_priors=label_priors[i])[0] + for i in range(num_labels) + ] + + single_label_losses = tf.concat(single_label_losses, 1) + + with self.test_session() as session: + tf.global_variables_initializer().run() + multi_label_loss_val, single_label_loss_val = session.run( + [multi_label_loss, single_label_losses]) + self.assertAllClose(multi_label_loss_val, single_label_loss_val) + + +class UtilityFunctionsTest(tf.test.TestCase): + + def testTrainableDualVariable(self): + # Confirm correct behavior of a trainable dual variable. + x = tf.get_variable('primal', dtype=tf.float32, initializer=2.0) + y_value, y = loss_layers._create_dual_variable( + 'dual', shape=None, dtype=tf.float32, initializer=1.0, collections=None, + trainable=True, dual_rate_factor=0.3) + optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) + update = optimizer.minimize(0.5 * tf.square(x - y_value)) + + with self.test_session(): + tf.global_variables_initializer().run() + update.run() + self.assertAllClose(0.7, y.eval()) + + def testUntrainableDualVariable(self): + # Confirm correct behavior of dual variable which is not trainable. + x = tf.get_variable('primal', dtype=tf.float32, initializer=-2.0) + y_value, y = loss_layers._create_dual_variable( + 'dual', shape=None, dtype=tf.float32, initializer=1.0, collections=None, + trainable=False, dual_rate_factor=0.8) + optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) + update = optimizer.minimize(tf.square(x) * y_value + tf.exp(y_value)) + + with self.test_session(): + tf.global_variables_initializer().run() + update.run() + self.assertAllClose(1.0, y.eval()) + + +class BoundTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ('_xent', 'xent', 1.0, [2.0, 1.0]), + ('_xent_weighted', 'xent', + numpy.array([0, 2, 0.5, 1, 2, 3]).reshape(6, 1), [2.5, 0]), + ('_hinge', 'hinge', 1.0, [2.0, 1.0]), + ('_hinge_weighted', 'hinge', + numpy.array([1.0, 2, 3, 4, 5, 6]).reshape(6, 1), [5.0, 1])) + def testLowerBoundMultilabel(self, surrogate_type, weights, expected): + labels, logits, _ = _multilabel_data() + lower_bound = loss_layers.true_positives_lower_bound( + labels, logits, weights, surrogate_type) + + with self.test_session(): + self.assertAllClose(lower_bound.eval(), expected) + + @parameterized.named_parameters( + ('_xent', 'xent'), ('_hinge', 'hinge')) + def testLowerBoundOtherMultilabel(self, surrogate_type): + labels, logits, _ = _other_multilabel_data(surrogate_type)() + lower_bound = loss_layers.true_positives_lower_bound( + labels, logits, 1.0, surrogate_type) + + with self.test_session(): + self.assertAllClose(lower_bound.eval(), [4.0, 2.0], atol=1e-5) + + @parameterized.named_parameters( + ('_xent', 'xent', 1.0, [1.0, 2.0]), + ('_xent_weighted', 'xent', + numpy.array([3.0, 2, 1, 0, 1, 2]).reshape(6, 1), [2.0, 1.0]), + ('_hinge', 'hinge', 1.0, [1.0, 2.0]), + ('_hinge_weighted', 'hinge', + numpy.array([13, 12, 11, 0.5, 0, 0.5]).reshape(6, 1), [0.5, 0.5])) + def testUpperBoundMultilabel(self, surrogate_type, weights, expected): + labels, logits, _ = _multilabel_data() + upper_bound = loss_layers.false_positives_upper_bound( + labels, logits, weights, surrogate_type) + + with self.test_session(): + self.assertAllClose(upper_bound.eval(), expected) + + @parameterized.named_parameters( + ('_xent', 'xent'), ('_hinge', 'hinge')) + def testUpperBoundOtherMultilabel(self, surrogate_type): + labels, logits, _ = _other_multilabel_data(surrogate_type)() + upper_bound = loss_layers.false_positives_upper_bound( + labels, logits, 1.0, surrogate_type) + + with self.test_session(): + self.assertAllClose(upper_bound.eval(), [2.0, 4.0], atol=1e-5) + + @parameterized.named_parameters( + ('_lower', 'lower'), ('_upper', 'upper')) + def testThreeDimensionalLogits(self, bound): + bound_function = loss_layers.false_positives_upper_bound + if bound == 'lower': + bound_function = loss_layers.true_positives_lower_bound + random_labels = numpy.float32(numpy.random.uniform(size=[2, 3]) > 0.5) + random_logits = numpy.float32(numpy.random.randn(2, 3, 2)) + first_slice_logits = random_logits[:, :, 0].reshape(2, 3) + second_slice_logits = random_logits[:, :, 1].reshape(2, 3) + + full_bound = bound_function( + tf.constant(random_labels), tf.constant(random_logits), 1.0, 'xent') + first_slice_bound = bound_function(tf.constant(random_labels), + tf.constant(first_slice_logits), + 1.0, + 'xent') + second_slice_bound = bound_function(tf.constant(random_labels), + tf.constant(second_slice_logits), + 1.0, + 'xent') + stacked_bound = tf.stack([first_slice_bound, second_slice_bound], axis=1) + + with self.test_session(): + self.assertAllClose(full_bound.eval(), stacked_bound.eval()) + + +def run_lagrange_multiplier_test(global_objective, + objective_kwargs, + data_builder, + test_object): + """Runs a test for the Lagrange multiplier update of `global_objective`. + + The test checks that the constraint for `global_objective` is satisfied on + the first label of the data produced by `data_builder` but not the second. + + Args: + global_objective: One of the global objectives. + objective_kwargs: A dictionary of keyword arguments to pass to + `global_objective`. Must contain an entry for the constraint argument + of `global_objective`, e.g. 'target_rate' or 'target_precision'. + data_builder: A function which returns tensors corresponding to labels, + logits, and label priors. + test_object: An instance of tf.test.TestCase. + """ + # Construct global objective kwargs from a copy of `objective_kwargs`. + kwargs = dict(objective_kwargs) + targets, logits, priors = data_builder() + kwargs['labels'] = targets + kwargs['logits'] = logits + kwargs['label_priors'] = priors + + loss, output_dict = global_objective(**kwargs) + lambdas = tf.squeeze(output_dict['lambdas']) + opt = tf.train.GradientDescentOptimizer(learning_rate=0.1) + update_op = opt.minimize(loss, var_list=[output_dict['lambdas']]) + + with test_object.test_session() as session: + tf.global_variables_initializer().run() + lambdas_before = session.run(lambdas) + session.run(update_op) + lambdas_after = session.run(lambdas) + test_object.assertLess(lambdas_after[0], lambdas_before[0]) + test_object.assertGreater(lambdas_after[1], lambdas_before[1]) + + +class CrossFunctionTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ('_auc01xent', loss_layers.precision_recall_auc_loss, { + 'precision_range': (0.0, 1.0), 'surrogate_type': 'xent' + }), + ('_auc051xent', loss_layers.precision_recall_auc_loss, { + 'precision_range': (0.5, 1.0), 'surrogate_type': 'xent' + }), + ('_auc01)hinge', loss_layers.precision_recall_auc_loss, { + 'precision_range': (0.0, 1.0), 'surrogate_type': 'hinge' + }), + ('_ratp04', loss_layers.recall_at_precision_loss, { + 'target_precision': 0.4, 'surrogate_type': 'xent' + }), + ('_ratp066', loss_layers.recall_at_precision_loss, { + 'target_precision': 0.66, 'surrogate_type': 'xent' + }), + ('_ratp07_hinge', loss_layers.recall_at_precision_loss, { + 'target_precision': 0.7, 'surrogate_type': 'hinge' + }), + ('_fpattp066', loss_layers.false_positive_rate_at_true_positive_rate_loss, + {'target_rate': 0.66, 'surrogate_type': 'xent'}), + ('_fpattp046', loss_layers.false_positive_rate_at_true_positive_rate_loss, + { + 'target_rate': 0.46, 'surrogate_type': 'xent' + }), + ('_fpattp076_hinge', + loss_layers.false_positive_rate_at_true_positive_rate_loss, { + 'target_rate': 0.76, 'surrogate_type': 'hinge' + }), + ('_fpattp036_hinge', + loss_layers.false_positive_rate_at_true_positive_rate_loss, { + 'target_rate': 0.36, 'surrogate_type': 'hinge' + }), + ) + def testWeigtedGlobalObjective(self, + global_objective, + objective_kwargs): + """Runs a test of `global_objective` with per-example weights. + + Args: + global_objective: One of the global objectives. + objective_kwargs: A dictionary of keyword arguments to pass to + `global_objective`. Must contain keys 'surrogate_type', and the keyword + for the constraint argument of `global_objective`, e.g. 'target_rate' or + 'target_precision'. + """ + logits_positives = tf.constant([1, -0.5, 3], shape=[3, 1]) + logits_negatives = tf.constant([-0.5, 1, -1, -1, -0.5, 1], shape=[6, 1]) + + # Dummy tensor is used to compute the gradients. + dummy = tf.constant(1.0) + logits = tf.concat([logits_positives, logits_negatives], 0) + logits = tf.multiply(logits, dummy) + targets = tf.constant([1, 1, 1, 0, 0, 0, 0, 0, 0], + shape=[9, 1], dtype=tf.float32) + priors = tf.constant(1.0/3.0, shape=[1]) + weights = tf.constant([1, 1, 1, 0, 0, 0, 2, 2, 2], + shape=[9, 1], dtype=tf.float32) + + # Construct global objective kwargs. + objective_kwargs['labels'] = targets + objective_kwargs['logits'] = logits + objective_kwargs['label_priors'] = priors + + scope = 'weighted_test' + # Unweighted loss. + objective_kwargs['scope'] = scope + '_plain' + raw_loss, update = global_objective(**objective_kwargs) + loss = tf.reduce_sum(raw_loss) + + # Weighted loss. + objective_kwargs['weights'] = weights + objective_kwargs['scope'] = scope + '_weighted' + raw_weighted_loss, weighted_update = global_objective(**objective_kwargs) + weighted_loss = tf.reduce_sum(raw_weighted_loss) + + lambdas = tf.contrib.framework.get_unique_variable(scope + '_plain/lambdas') + weighted_lambdas = tf.contrib.framework.get_unique_variable( + scope + '_weighted/lambdas') + logits_gradient = tf.gradients(loss, dummy) + weighted_logits_gradient = tf.gradients(weighted_loss, dummy) + + with self.test_session() as session: + tf.global_variables_initializer().run() + self.assertAllClose(loss.eval(), weighted_loss.eval()) + + logits_grad, weighted_logits_grad = session.run( + [logits_gradient, weighted_logits_gradient]) + self.assertAllClose(logits_grad, weighted_logits_grad) + + session.run([update, weighted_update]) + lambdas_value, weighted_lambdas_value = session.run( + [lambdas, weighted_lambdas]) + self.assertAllClose(lambdas_value, weighted_lambdas_value) + + @parameterized.named_parameters( + ('_prauc051xent', loss_layers.precision_recall_auc_loss, { + 'precision_range': (0.5, 1.0), 'surrogate_type': 'xent' + }), + ('_prauc01hinge', loss_layers.precision_recall_auc_loss, { + 'precision_range': (0.0, 1.0), 'surrogate_type': 'hinge' + }), + ('_rocxent', loss_layers.roc_auc_loss, {'surrogate_type': 'xent'}), + ('_rochinge', loss_layers.roc_auc_loss, {'surrogate_type': 'xent'}), + ('_ratp04', loss_layers.recall_at_precision_loss, { + 'target_precision': 0.4, 'surrogate_type': 'xent' + }), + ('_ratp07_hinge', loss_layers.recall_at_precision_loss, { + 'target_precision': 0.7, 'surrogate_type': 'hinge' + }), + ('_patr05', loss_layers.precision_at_recall_loss, { + 'target_recall': 0.4, 'surrogate_type': 'xent' + }), + ('_patr08_hinge', loss_layers.precision_at_recall_loss, { + 'target_recall': 0.7, 'surrogate_type': 'hinge' + }), + ('_fpattp046', loss_layers.false_positive_rate_at_true_positive_rate_loss, + { + 'target_rate': 0.46, 'surrogate_type': 'xent' + }), + ('_fpattp036_hinge', + loss_layers.false_positive_rate_at_true_positive_rate_loss, { + 'target_rate': 0.36, 'surrogate_type': 'hinge' + }), + ('_tpatfp076', loss_layers.true_positive_rate_at_false_positive_rate_loss, + { + 'target_rate': 0.76, 'surrogate_type': 'xent' + }), + ('_tpatfp036_hinge', + loss_layers.true_positive_rate_at_false_positive_rate_loss, { + 'target_rate': 0.36, 'surrogate_type': 'hinge' + }), + ) + def testVectorAndMatrixLabelEquivalence(self, + global_objective, + objective_kwargs): + """Tests equivalence between label shape [batch_size] or [batch_size, 1].""" + vector_labels = tf.constant([1.0, 1.0, 0.0, 0.0], shape=[4]) + vector_logits = tf.constant([1.0, 0.1, 0.1, -1.0], shape=[4]) + + # Construct vector global objective kwargs and loss. + vector_kwargs = objective_kwargs.copy() + vector_kwargs['labels'] = vector_labels + vector_kwargs['logits'] = vector_logits + vector_loss, _ = global_objective(**vector_kwargs) + vector_loss_sum = tf.reduce_sum(vector_loss) + + # Construct matrix global objective kwargs and loss. + matrix_kwargs = objective_kwargs.copy() + matrix_kwargs['labels'] = tf.expand_dims(vector_labels, 1) + matrix_kwargs['logits'] = tf.expand_dims(vector_logits, 1) + matrix_loss, _ = global_objective(**matrix_kwargs) + matrix_loss_sum = tf.reduce_sum(matrix_loss) + + self.assertEqual(1, vector_loss.get_shape().ndims) + self.assertEqual(2, matrix_loss.get_shape().ndims) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(vector_loss_sum.eval(), matrix_loss_sum.eval()) + + @parameterized.named_parameters( + ('_prauc', loss_layers.precision_recall_auc_loss, None), + ('_roc', loss_layers.roc_auc_loss, None), + ('_rap', loss_layers.recall_at_precision_loss, {'target_precision': 0.8}), + ('_patr', loss_layers.precision_at_recall_loss, {'target_recall': 0.7}), + ('_fpattp', loss_layers.false_positive_rate_at_true_positive_rate_loss, + {'target_rate': 0.9}), + ('_tpatfp', loss_layers.true_positive_rate_at_false_positive_rate_loss, + {'target_rate': 0.1}) + ) + def testUnknownBatchSize(self, global_objective, objective_kwargs): + # Tests that there are no errors when the batch size is not known. + batch_shape = [5, 2] + logits = tf.placeholder(tf.float32) + logits_feed = numpy.random.randn(*batch_shape) + labels = tf.placeholder(tf.float32) + labels_feed = logits_feed > 0.1 + logits.set_shape([None, 2]) + labels.set_shape([None, 2]) + + if objective_kwargs is None: + objective_kwargs = {} + + placeholder_kwargs = objective_kwargs.copy() + placeholder_kwargs['labels'] = labels + placeholder_kwargs['logits'] = logits + placeholder_loss, _ = global_objective(**placeholder_kwargs) + + kwargs = objective_kwargs.copy() + kwargs['labels'] = labels_feed + kwargs['logits'] = logits_feed + loss, _ = global_objective(**kwargs) + + with self.test_session() as session: + tf.global_variables_initializer().run() + feed_loss_val = session.run(placeholder_loss, + feed_dict={logits: logits_feed, + labels: labels_feed}) + loss_val = session.run(loss) + self.assertAllClose(feed_loss_val, loss_val) + + +# Both sets of logits below are designed so that the surrogate precision and +# recall (true positive rate) of class 1 is ~ 2/3, and the same surrogates for +# class 2 are ~ 1/3. The false positive rate surrogates are ~ 1/3 and 2/3. +def _multilabel_data(): + targets = tf.constant([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], shape=[6, 1]) + targets = tf.concat([targets, targets], 1) + logits_positives = tf.constant([[0.0, 15], + [16, 0.0], + [14, 0.0]], shape=[3, 2]) + logits_negatives = tf.constant([[-17, 0.0], + [-15, 0.0], + [0.0, -101]], shape=[3, 2]) + logits = tf.concat([logits_positives, logits_negatives], 0) + priors = tf.constant(0.5, shape=[2]) + + return targets, logits, priors + + +def _other_multilabel_data(surrogate_type): + targets = tf.constant( + [1.0] * 6 + [0.0] * 6, shape=[12, 1]) + targets = tf.concat([targets, targets], 1) + logits_positives = tf.constant([[0.0, 13], + [12, 0.0], + [15, 0.0], + [0.0, 30], + [13, 0.0], + [18, 0.0]], shape=[6, 2]) + # A score of cost_2 incurs a loss of ~2.0. + cost_2 = 1.0 if surrogate_type == 'hinge' else 1.09861229 + logits_negatives = tf.constant([[-16, cost_2], + [-15, cost_2], + [cost_2, -111], + [-133, -14,], + [-14.0100101, -16,], + [-19.888828882, -101]], shape=[6, 2]) + logits = tf.concat([logits_positives, logits_negatives], 0) + priors = tf.constant(0.5, shape=[2]) + + def builder(): + return targets, logits, priors + + return builder + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/global_objectives/test_all.py b/models/research/global_objectives/test_all.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e439e219840a9ec5c65382c6bc392b1d68b447 --- /dev/null +++ b/models/research/global_objectives/test_all.py @@ -0,0 +1,37 @@ +# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Runs all unit tests in the Global Objectives package. + +Requires that TensorFlow and abseil (https://github.com/abseil/abseil-py) be +installed on your machine. Command to run the tests: +python test_all.py + +""" + +import os +import sys +import unittest + +this_file = os.path.realpath(__file__) +start_dir = os.path.dirname(this_file) +parent_dir = os.path.dirname(start_dir) + +sys.path.append(parent_dir) +loader = unittest.TestLoader() +suite = loader.discover(start_dir, pattern='*_test.py') + +runner = unittest.TextTestRunner(verbosity=2) +runner.run(suite) diff --git a/models/research/global_objectives/util.py b/models/research/global_objectives/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b287a90bd743e5466b875c933c3872868f4a5f --- /dev/null +++ b/models/research/global_objectives/util.py @@ -0,0 +1,348 @@ +# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains utility functions for the global objectives library.""" + +# Dependency imports +import tensorflow as tf + + +def weighted_sigmoid_cross_entropy_with_logits(labels, + logits, + positive_weights=1.0, + negative_weights=1.0, + name=None): + """Computes a weighting of sigmoid cross entropy given `logits`. + + Measures the weighted probability error in discrete classification tasks in + which classes are independent and not mutually exclusive. For instance, one + could perform multilabel classification where a picture can contain both an + elephant and a dog at the same time. The class weight multiplies the + different types of errors. + For brevity, let `x = logits`, `z = labels`, `c = positive_weights`, + `d = negative_weights` The + weighed logistic loss is + + ``` + c * z * -log(sigmoid(x)) + d * (1 - z) * -log(1 - sigmoid(x)) + = c * z * -log(1 / (1 + exp(-x))) - d * (1 - z) * log(exp(-x) / (1 + exp(-x))) + = c * z * log(1 + exp(-x)) + d * (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) + = c * z * log(1 + exp(-x)) + d * (1 - z) * (x + log(1 + exp(-x))) + = (1 - z) * x * d + (1 - z + c * z ) * log(1 + exp(-x)) + = - d * x * z + d * x + (d - d * z + c * z ) * log(1 + exp(-x)) + ``` + + To ensure stability and avoid overflow, the implementation uses the identity + log(1 + exp(-x)) = max(0,-x) + log(1 + exp(-abs(x))) + and the result is computed as + + ``` + = -d * x * z + d * x + + (d - d * z + c * z ) * (max(0,-x) + log(1 + exp(-abs(x)))) + ``` + + Note that the loss is NOT an upper bound on the 0-1 loss, unless it is divided + by log(2). + + Args: + labels: A `Tensor` of type `float32` or `float64`. `labels` can be a 2D + tensor with shape [batch_size, num_labels] or a 3D tensor with shape + [batch_size, num_labels, K]. + logits: A `Tensor` of the same type and shape as `labels`. If `logits` has + shape [batch_size, num_labels, K], the loss is computed separately on each + slice [:, :, k] of `logits`. + positive_weights: A `Tensor` that holds positive weights and has the + following semantics according to its shape: + scalar - A global positive weight. + 1D tensor - must be of size K, a weight for each 'attempt' + 2D tensor - of size [num_labels, K'] where K' is either K or 1. + The `positive_weights` will be expanded to the left to match the + dimensions of logits and labels. + negative_weights: A `Tensor` that holds positive weight and has the + semantics identical to positive_weights. + name: A name for the operation (optional). + + Returns: + A `Tensor` of the same shape as `logits` with the componentwise + weighted logistic losses. + """ + with tf.name_scope( + name, + 'weighted_logistic_loss', + [logits, labels, positive_weights, negative_weights]) as name: + labels, logits, positive_weights, negative_weights = prepare_loss_args( + labels, logits, positive_weights, negative_weights) + + softplus_term = tf.add(tf.maximum(-logits, 0.0), + tf.log(1.0 + tf.exp(-tf.abs(logits)))) + weight_dependent_factor = ( + negative_weights + (positive_weights - negative_weights) * labels) + return (negative_weights * (logits - labels * logits) + + weight_dependent_factor * softplus_term) + + +def weighted_hinge_loss(labels, + logits, + positive_weights=1.0, + negative_weights=1.0, + name=None): + """Computes weighted hinge loss given logits `logits`. + + The loss applies to multi-label classification tasks where labels are + independent and not mutually exclusive. See also + `weighted_sigmoid_cross_entropy_with_logits`. + + Args: + labels: A `Tensor` of type `float32` or `float64`. Each entry must be + either 0 or 1. `labels` can be a 2D tensor with shape + [batch_size, num_labels] or a 3D tensor with shape + [batch_size, num_labels, K]. + logits: A `Tensor` of the same type and shape as `labels`. If `logits` has + shape [batch_size, num_labels, K], the loss is computed separately on each + slice [:, :, k] of `logits`. + positive_weights: A `Tensor` that holds positive weights and has the + following semantics according to its shape: + scalar - A global positive weight. + 1D tensor - must be of size K, a weight for each 'attempt' + 2D tensor - of size [num_labels, K'] where K' is either K or 1. + The `positive_weights` will be expanded to the left to match the + dimensions of logits and labels. + negative_weights: A `Tensor` that holds positive weight and has the + semantics identical to positive_weights. + name: A name for the operation (optional). + + Returns: + A `Tensor` of the same shape as `logits` with the componentwise + weighted hinge loss. + """ + with tf.name_scope( + name, 'weighted_hinge_loss', + [logits, labels, positive_weights, negative_weights]) as name: + labels, logits, positive_weights, negative_weights = prepare_loss_args( + labels, logits, positive_weights, negative_weights) + + positives_term = positive_weights * labels * tf.maximum(1.0 - logits, 0) + negatives_term = (negative_weights * (1.0 - labels) + * tf.maximum(1.0 + logits, 0)) + return positives_term + negatives_term + + +def weighted_surrogate_loss(labels, + logits, + surrogate_type='xent', + positive_weights=1.0, + negative_weights=1.0, + name=None): + """Returns either weighted cross-entropy or hinge loss. + + For example `surrogate_type` is 'xent' returns the weighted cross + entropy loss. + + Args: + labels: A `Tensor` of type `float32` or `float64`. Each entry must be + between 0 and 1. `labels` can be a 2D tensor with shape + [batch_size, num_labels] or a 3D tensor with shape + [batch_size, num_labels, K]. + logits: A `Tensor` of the same type and shape as `labels`. If `logits` has + shape [batch_size, num_labels, K], each slice [:, :, k] represents an + 'attempt' to predict `labels` and the loss is computed per slice. + surrogate_type: A string that determines which loss to return, supports + 'xent' for cross-entropy and 'hinge' for hinge loss. + positive_weights: A `Tensor` that holds positive weights and has the + following semantics according to its shape: + scalar - A global positive weight. + 1D tensor - must be of size K, a weight for each 'attempt' + 2D tensor - of size [num_labels, K'] where K' is either K or 1. + The `positive_weights` will be expanded to the left to match the + dimensions of logits and labels. + negative_weights: A `Tensor` that holds positive weight and has the + semantics identical to positive_weights. + name: A name for the operation (optional). + + Returns: + The weigthed loss. + + Raises: + ValueError: If value of `surrogate_type` is not supported. + """ + with tf.name_scope( + name, 'weighted_loss', + [logits, labels, surrogate_type, positive_weights, + negative_weights]) as name: + if surrogate_type == 'xent': + return weighted_sigmoid_cross_entropy_with_logits( + logits=logits, + labels=labels, + positive_weights=positive_weights, + negative_weights=negative_weights, + name=name) + elif surrogate_type == 'hinge': + return weighted_hinge_loss( + logits=logits, + labels=labels, + positive_weights=positive_weights, + negative_weights=negative_weights, + name=name) + raise ValueError('surrogate_type %s not supported.' % surrogate_type) + + +def expand_outer(tensor, rank): + """Expands the given `Tensor` outwards to a target rank. + + For example if rank = 3 and tensor.shape is [3, 4], this function will expand + to such that the resulting shape will be [1, 3, 4]. + + Args: + tensor: The tensor to expand. + rank: The target dimension. + + Returns: + The expanded tensor. + + Raises: + ValueError: If rank of `tensor` is unknown, or if `rank` is smaller than + the rank of `tensor`. + """ + if tensor.get_shape().ndims is None: + raise ValueError('tensor dimension must be known.') + if len(tensor.get_shape()) > rank: + raise ValueError( + '`rank` must be at least the current tensor dimension: (%s vs %s).' % + (rank, len(tensor.get_shape()))) + while len(tensor.get_shape()) < rank: + tensor = tf.expand_dims(tensor, 0) + return tensor + + +def build_label_priors(labels, + weights=None, + positive_pseudocount=1.0, + negative_pseudocount=1.0, + variables_collections=None): + """Creates an op to maintain and update label prior probabilities. + + For each label, the label priors are estimated as + (P + sum_i w_i y_i) / (P + N + sum_i w_i), + where y_i is the ith label, w_i is the ith weight, P is a pseudo-count of + positive labels, and N is a pseudo-count of negative labels. The index i + ranges over all labels observed during all evaluations of the returned op. + + Args: + labels: A `Tensor` with shape [batch_size, num_labels]. Entries should be + in [0, 1]. + weights: Coefficients representing the weight of each label. Must be either + a Tensor of shape [batch_size, num_labels] or `None`, in which case each + weight is treated as 1.0. + positive_pseudocount: Number of positive labels used to initialize the label + priors. + negative_pseudocount: Number of negative labels used to initialize the label + priors. + variables_collections: Optional list of collections for created variables. + + Returns: + label_priors: An op to update the weighted label_priors. Gives the + current value of the label priors when evaluated. + """ + dtype = labels.dtype.base_dtype + num_labels = get_num_labels(labels) + + if weights is None: + weights = tf.ones_like(labels) + + # We disable partitioning while constructing dual variables because they will + # be updated with assign, which is not available for partitioned variables. + partitioner = tf.get_variable_scope().partitioner + try: + tf.get_variable_scope().set_partitioner(None) + # Create variable and update op for weighted label counts. + weighted_label_counts = tf.contrib.framework.model_variable( + name='weighted_label_counts', + shape=[num_labels], + dtype=dtype, + initializer=tf.constant_initializer( + [positive_pseudocount] * num_labels, dtype=dtype), + collections=variables_collections, + trainable=False) + weighted_label_counts_update = weighted_label_counts.assign_add( + tf.reduce_sum(weights * labels, 0)) + + # Create variable and update op for the sum of the weights. + weight_sum = tf.contrib.framework.model_variable( + name='weight_sum', + shape=[num_labels], + dtype=dtype, + initializer=tf.constant_initializer( + [positive_pseudocount + negative_pseudocount] * num_labels, + dtype=dtype), + collections=variables_collections, + trainable=False) + weight_sum_update = weight_sum.assign_add(tf.reduce_sum(weights, 0)) + + finally: + tf.get_variable_scope().set_partitioner(partitioner) + + label_priors = tf.div( + weighted_label_counts_update, + weight_sum_update) + return label_priors + + +def convert_and_cast(value, name, dtype): + """Convert input to tensor and cast to dtype. + + Args: + value: An object whose type has a registered Tensor conversion function, + e.g. python numerical type or numpy array. + name: Name to use for the new Tensor, if one is created. + dtype: Optional element type for the returned tensor. + + Returns: + A tensor. + """ + return tf.cast(tf.convert_to_tensor(value, name=name), dtype=dtype) + + +def prepare_loss_args(labels, logits, positive_weights, negative_weights): + """Prepare arguments for weighted loss functions. + + If needed, will convert given arguments to appropriate type and shape. + + Args: + labels: labels or labels of the loss function. + logits: Logits of the loss function. + positive_weights: Weight on the positive examples. + negative_weights: Weight on the negative examples. + + Returns: + Converted labels, logits, positive_weights, negative_weights. + """ + logits = tf.convert_to_tensor(logits, name='logits') + labels = convert_and_cast(labels, 'labels', logits.dtype) + if len(labels.get_shape()) == 2 and len(logits.get_shape()) == 3: + labels = tf.expand_dims(labels, [2]) + + positive_weights = convert_and_cast(positive_weights, 'positive_weights', + logits.dtype) + positive_weights = expand_outer(positive_weights, logits.get_shape().ndims) + negative_weights = convert_and_cast(negative_weights, 'negative_weights', + logits.dtype) + negative_weights = expand_outer(negative_weights, logits.get_shape().ndims) + return labels, logits, positive_weights, negative_weights + + +def get_num_labels(labels_or_logits): + """Returns the number of labels inferred from labels_or_logits.""" + if labels_or_logits.get_shape().ndims <= 1: + return 1 + return labels_or_logits.get_shape()[1].value diff --git a/models/research/global_objectives/util_test.py b/models/research/global_objectives/util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..195252a53eb1d0a50735d2f987b0882681b0544a --- /dev/null +++ b/models/research/global_objectives/util_test.py @@ -0,0 +1,333 @@ +# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for global objectives util functions.""" + +# Dependency imports +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from global_objectives import util + + +def weighted_sigmoid_cross_entropy(targets, logits, weight): + return (weight * targets * np.log(1.0 + np.exp(-logits)) + ( + (1.0 - targets) * np.log(1.0 + 1.0 / np.exp(-logits)))) + + +def hinge_loss(labels, logits): + # Mostly copied from tensorflow.python.ops.losses but with loss per datapoint. + labels = tf.to_float(labels) + all_ones = tf.ones_like(labels) + labels = tf.subtract(2 * labels, all_ones) + return tf.nn.relu(tf.subtract(all_ones, tf.multiply(labels, logits))) + + +class WeightedSigmoidCrossEntropyTest(parameterized.TestCase, tf.test.TestCase): + + def testTrivialCompatibilityWithSigmoidCrossEntropy(self): + """Tests compatibility with unweighted function with weight 1.0.""" + x_shape = [300, 10] + targets = np.random.random_sample(x_shape).astype(np.float32) + logits = np.random.randn(*x_shape).astype(np.float32) + weighted_loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, + logits) + expected_loss = ( + tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits( + logits, targets)) + with self.test_session(): + self.assertAllClose(expected_loss.eval(), + weighted_loss.eval(), + atol=0.000001) + + def testNonTrivialCompatibilityWithSigmoidCrossEntropy(self): + """Tests use of an arbitrary weight (4.12).""" + x_shape = [300, 10] + targets = np.random.random_sample(x_shape).astype(np.float32) + logits = np.random.randn(*x_shape).astype(np.float32) + weight = 4.12 + weighted_loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, + logits, + weight, + weight) + expected_loss = ( + weight * + tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits( + logits, targets)) + with self.test_session(): + self.assertAllClose(expected_loss.eval(), + weighted_loss.eval(), + atol=0.000001) + + def testDifferentSizeWeightedSigmoidCrossEntropy(self): + """Tests correctness on 3D tensors. + + Tests that the function works as expected when logits is a 3D tensor and + targets is a 2D tensor. + """ + targets_shape = [30, 4] + logits_shape = [targets_shape[0], targets_shape[1], 3] + targets = np.random.random_sample(targets_shape).astype(np.float32) + logits = np.random.randn(*logits_shape).astype(np.float32) + + weight_vector = [2.0, 3.0, 13.0] + loss = util.weighted_sigmoid_cross_entropy_with_logits(targets, + logits, + weight_vector) + + with self.test_session(): + loss = loss.eval() + for i in range(0, len(weight_vector)): + expected = weighted_sigmoid_cross_entropy(targets, logits[:, :, i], + weight_vector[i]) + self.assertAllClose(loss[:, :, i], expected, atol=0.000001) + + @parameterized.parameters((300, 10, 0.3), (20, 4, 2.0), (30, 4, 3.9)) + def testWeightedSigmoidCrossEntropy(self, batch_size, num_labels, weight): + """Tests thats the tf and numpy functions agree on many instances.""" + x_shape = [batch_size, num_labels] + targets = np.random.random_sample(x_shape).astype(np.float32) + logits = np.random.randn(*x_shape).astype(np.float32) + + with self.test_session(): + loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, + logits, + weight, + 1.0, + name='weighted-loss') + expected = weighted_sigmoid_cross_entropy(targets, logits, weight) + self.assertAllClose(expected, loss.eval(), atol=0.000001) + + def testGradients(self): + """Tests that weighted loss gradients behave as expected.""" + dummy_tensor = tf.constant(1.0) + + positives_shape = [10, 1] + positives_logits = dummy_tensor * tf.Variable( + tf.random_normal(positives_shape) + 1.0) + positives_targets = tf.ones(positives_shape) + positives_weight = 4.6 + positives_loss = ( + tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits( + positives_logits, positives_targets) * positives_weight) + + negatives_shape = [190, 1] + negatives_logits = dummy_tensor * tf.Variable( + tf.random_normal(negatives_shape)) + negatives_targets = tf.zeros(negatives_shape) + negatives_weight = 0.9 + negatives_loss = ( + tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits( + negatives_logits, negatives_targets) * negatives_weight) + + all_logits = tf.concat([positives_logits, negatives_logits], 0) + all_targets = tf.concat([positives_targets, negatives_targets], 0) + weighted_loss = tf.reduce_sum( + util.weighted_sigmoid_cross_entropy_with_logits( + all_targets, all_logits, positives_weight, negatives_weight)) + weighted_gradients = tf.gradients(weighted_loss, dummy_tensor) + + expected_loss = tf.add( + tf.reduce_sum(positives_loss), + tf.reduce_sum(negatives_loss)) + expected_gradients = tf.gradients(expected_loss, dummy_tensor) + + with tf.Session() as session: + tf.global_variables_initializer().run() + grad, expected_grad = session.run( + [weighted_gradients, expected_gradients]) + self.assertAllClose(grad, expected_grad) + + def testDtypeFlexibility(self): + """Tests the loss on inputs of varying data types.""" + shape = [20, 3] + logits = np.random.randn(*shape) + targets = tf.truncated_normal(shape) + positive_weights = tf.constant(3, dtype=tf.int64) + negative_weights = 1 + + loss = util.weighted_sigmoid_cross_entropy_with_logits( + targets, logits, positive_weights, negative_weights) + + with self.test_session(): + self.assertEqual(loss.eval().dtype, np.float) + + +class WeightedHingeLossTest(tf.test.TestCase): + + def testTrivialCompatibilityWithHinge(self): + # Tests compatibility with unweighted hinge loss. + x_shape = [55, 10] + logits = tf.constant(np.random.randn(*x_shape).astype(np.float32)) + targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.3)) + weighted_loss = util.weighted_hinge_loss(targets, logits) + expected_loss = hinge_loss(targets, logits) + with self.test_session(): + self.assertAllClose(expected_loss.eval(), weighted_loss.eval()) + + def testLessTrivialCompatibilityWithHinge(self): + # Tests compatibility with a constant weight for positives and negatives. + x_shape = [56, 11] + logits = tf.constant(np.random.randn(*x_shape).astype(np.float32)) + targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.7)) + weight = 1.0 + 1.0/2 + 1.0/3 + 1.0/4 + 1.0/5 + 1.0/6 + 1.0/7 + weighted_loss = util.weighted_hinge_loss(targets, logits, weight, weight) + expected_loss = hinge_loss(targets, logits) * weight + with self.test_session(): + self.assertAllClose(expected_loss.eval(), weighted_loss.eval()) + + def testNontrivialCompatibilityWithHinge(self): + # Tests compatibility with different positive and negative weights. + x_shape = [23, 8] + logits_positives = tf.constant(np.random.randn(*x_shape).astype(np.float32)) + logits_negatives = tf.constant(np.random.randn(*x_shape).astype(np.float32)) + targets_positives = tf.ones(x_shape) + targets_negatives = tf.zeros(x_shape) + logits = tf.concat([logits_positives, logits_negatives], 0) + targets = tf.concat([targets_positives, targets_negatives], 0) + + raw_loss = util.weighted_hinge_loss(targets, + logits, + positive_weights=3.4, + negative_weights=1.2) + loss = tf.reduce_sum(raw_loss, 0) + positives_hinge = hinge_loss(targets_positives, logits_positives) + negatives_hinge = hinge_loss(targets_negatives, logits_negatives) + expected = tf.add(tf.reduce_sum(3.4 * positives_hinge, 0), + tf.reduce_sum(1.2 * negatives_hinge, 0)) + + with self.test_session(): + self.assertAllClose(loss.eval(), expected.eval()) + + def test3DLogitsAndTargets(self): + # Tests correctness when logits is 3D and targets is 2D. + targets_shape = [30, 4] + logits_shape = [targets_shape[0], targets_shape[1], 3] + targets = tf.to_float( + tf.constant(np.random.random_sample(targets_shape) > 0.7)) + logits = tf.constant(np.random.randn(*logits_shape).astype(np.float32)) + weight_vector = [1.0, 1.0, 1.0] + loss = util.weighted_hinge_loss(targets, logits, weight_vector) + + with self.test_session(): + loss_value = loss.eval() + for i in range(len(weight_vector)): + expected = hinge_loss(targets, logits[:, :, i]).eval() + self.assertAllClose(loss_value[:, :, i], expected) + + +class BuildLabelPriorsTest(tf.test.TestCase): + + def testLabelPriorConsistency(self): + # Checks that, with zero pseudocounts, the returned label priors reproduce + # label frequencies in the batch. + batch_shape = [4, 10] + labels = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.678))) + + label_priors_update = util.build_label_priors( + labels=labels, positive_pseudocount=0, negative_pseudocount=0) + expected_priors = tf.reduce_mean(labels, 0) + + with self.test_session(): + tf.global_variables_initializer().run() + self.assertAllClose(label_priors_update.eval(), expected_priors.eval()) + + def testLabelPriorsUpdate(self): + # Checks that the update of label priors behaves as expected. + batch_shape = [1, 5] + labels = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4))) + label_priors_update = util.build_label_priors(labels) + + label_sum = np.ones(shape=batch_shape) + weight_sum = 2.0 * np.ones(shape=batch_shape) + + with self.test_session() as session: + tf.global_variables_initializer().run() + + for _ in range(3): + label_sum += labels.eval() + weight_sum += np.ones(shape=batch_shape) + expected_posteriors = label_sum / weight_sum + label_priors = label_priors_update.eval().reshape(batch_shape) + self.assertAllClose(label_priors, expected_posteriors) + + # Re-initialize labels to get a new random sample. + session.run(labels.initializer) + + def testLabelPriorsUpdateWithWeights(self): + # Checks the update of label priors with per-example weights. + batch_size = 6 + num_labels = 5 + batch_shape = [batch_size, num_labels] + labels = tf.Variable( + tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6))) + weights = tf.Variable(tf.random_uniform(batch_shape) * 6.2) + + update_op = util.build_label_priors(labels, weights=weights) + + expected_weighted_label_counts = 1.0 + tf.reduce_sum(weights * labels, 0) + expected_weight_sum = 2.0 + tf.reduce_sum(weights, 0) + expected_label_posteriors = tf.divide(expected_weighted_label_counts, + expected_weight_sum) + + with self.test_session() as session: + tf.global_variables_initializer().run() + + updated_priors, expected_posteriors = session.run( + [update_op, expected_label_posteriors]) + self.assertAllClose(updated_priors, expected_posteriors) + + +class WeightedSurrogateLossTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.parameters( + ('hinge', util.weighted_hinge_loss), + ('xent', util.weighted_sigmoid_cross_entropy_with_logits)) + def testCompatibilityLoss(self, loss_name, loss_fn): + x_shape = [28, 4] + logits = tf.constant(np.random.randn(*x_shape).astype(np.float32)) + targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.5)) + positive_weights = 0.66 + negative_weights = 11.1 + expected_loss = loss_fn( + targets, + logits, + positive_weights=positive_weights, + negative_weights=negative_weights) + computed_loss = util.weighted_surrogate_loss( + targets, + logits, + loss_name, + positive_weights=positive_weights, + negative_weights=negative_weights) + with self.test_session(): + self.assertAllClose(expected_loss.eval(), computed_loss.eval()) + + def testSurrogatgeError(self): + x_shape = [7, 3] + logits = tf.constant(np.random.randn(*x_shape).astype(np.float32)) + targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.5)) + + with self.assertRaises(ValueError): + util.weighted_surrogate_loss(logits, targets, 'bug') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/im2txt/.gitignore b/models/research/im2txt/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fb46913cc7a5994c4324de50829c95d7858c30f4 --- /dev/null +++ b/models/research/im2txt/.gitignore @@ -0,0 +1,7 @@ +/bazel-bin +/bazel-ci_build-cache +/bazel-genfiles +/bazel-out +/bazel-im2txt +/bazel-testlogs +/bazel-tf diff --git a/models/research/im2txt/README.md b/models/research/im2txt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2eb72822a39e3959a5a9370f26a9cc5c12be0fda --- /dev/null +++ b/models/research/im2txt/README.md @@ -0,0 +1,342 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Show and Tell: A Neural Image Caption Generator + +A TensorFlow implementation of the image-to-text model described in the paper: + +"Show and Tell: Lessons learned from the 2015 MSCOCO Image Captioning +Challenge." + +Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan. + +*IEEE transactions on pattern analysis and machine intelligence (2016).* + +Full text available at: http://arxiv.org/abs/1609.06647 + +## Contact +***Author:*** Chris Shallue + +***Pull requests and issues:*** @cshallue + +## Contents +* [Model Overview](#model-overview) + * [Introduction](#introduction) + * [Architecture](#architecture) +* [Getting Started](#getting-started) + * [A Note on Hardware and Training Time](#a-note-on-hardware-and-training-time) + * [Install Required Packages](#install-required-packages) + * [Prepare the Training Data](#prepare-the-training-data) + * [Download the Inception v3 Checkpoint](#download-the-inception-v3-checkpoint) +* [Training a Model](#training-a-model) + * [Initial Training](#initial-training) + * [Fine Tune the Inception v3 Model](#fine-tune-the-inception-v3-model) +* [Generating Captions](#generating-captions) + +## Model Overview + +### Introduction + +The *Show and Tell* model is a deep neural network that learns how to describe +the content of images. For example: + +![Example captions](g3doc/example_captions.jpg) + +### Architecture + +The *Show and Tell* model is an example of an *encoder-decoder* neural network. +It works by first "encoding" an image into a fixed-length vector representation, +and then "decoding" the representation into a natural language description. + +The image encoder is a deep convolutional neural network. This type of +network is widely used for image tasks and is currently state-of-the-art for +object recognition and detection. Our particular choice of network is the +[*Inception v3*](http://arxiv.org/abs/1512.00567) image recognition model +pretrained on the +[ILSVRC-2012-CLS](http://www.image-net.org/challenges/LSVRC/2012/) image +classification dataset. + +The decoder is a long short-term memory (LSTM) network. This type of network is +commonly used for sequence modeling tasks such as language modeling and machine +translation. In the *Show and Tell* model, the LSTM network is trained as a +language model conditioned on the image encoding. + +Words in the captions are represented with an embedding model. Each word in the +vocabulary is associated with a fixed-length vector representation that is +learned during training. + +The following diagram illustrates the model architecture. + +![Show and Tell Architecture](g3doc/show_and_tell_architecture.png) + +In this diagram, \{*s*0, *s*1, ..., *s**N*-1\} +are the words of the caption and \{*w**e**s*0, +*w**e**s*1, ..., *w**e**s**N*-1\} +are their corresponding word embedding vectors. The outputs \{*p*1, +*p*2, ..., *p**N*\} of the LSTM are probability +distributions generated by the model for the next word in the sentence. The +terms \{log *p*1(*s*1), +log *p*2(*s*2), ..., +log *p**N*(*s**N*)\} are the log-likelihoods of the +correct word at each step; the negated sum of these terms is the minimization +objective of the model. + +During the first phase of training the parameters of the *Inception v3* model +are kept fixed: it is simply a static image encoder function. A single trainable +layer is added on top of the *Inception v3* model to transform the image +embedding into the word embedding vector space. The model is trained with +respect to the parameters of the word embeddings, the parameters of the layer on +top of *Inception v3* and the parameters of the LSTM. In the second phase of +training, all parameters - including the parameters of *Inception v3* - are +trained to jointly fine-tune the image encoder and the LSTM. + +Given a trained model and an image we use *beam search* to generate captions for +that image. Captions are generated word-by-word, where at each step *t* we use +the set of sentences already generated with length *t* - 1 to generate a new set +of sentences with length *t*. We keep only the top *k* candidates at each step, +where the hyperparameter *k* is called the *beam size*. We have found the best +performance with *k* = 3. + +## Getting Started + +### A Note on Hardware and Training Time + +The time required to train the *Show and Tell* model depends on your specific +hardware and computational capacity. In this guide we assume you will be running +training on a single machine with a GPU. In our experience on an NVIDIA Tesla +K20m GPU the initial training phase takes 1-2 weeks. The second training phase +may take several additional weeks to achieve peak performance (but you can stop +this phase early and still get reasonable results). + +It is possible to achieve a speed-up by implementing distributed training across +a cluster of machines with GPUs, but that is not covered in this guide. + +Whilst it is possible to run this code on a CPU, beware that this may be +approximately 10 times slower. + +### Install Required Packages +First ensure that you have installed the following required packages: + +* **Bazel** ([instructions](http://bazel.io/docs/install.html)) +* **Python 2.7** +* **TensorFlow** 1.0 or greater ([instructions](https://www.tensorflow.org/install/)) +* **NumPy** ([instructions](http://www.scipy.org/install.html)) +* **Natural Language Toolkit (NLTK)**: + * First install NLTK ([instructions](http://www.nltk.org/install.html)) + * Then install the NLTK data package "punkt" ([instructions](http://www.nltk.org/data.html)) +* **Unzip** +### Prepare the Training Data + +To train the model you will need to provide training data in native TFRecord +format. The TFRecord format consists of a set of sharded files containing +serialized `tf.SequenceExample` protocol buffers. Each `tf.SequenceExample` +proto contains an image (JPEG format), a caption and metadata such as the image +id. + +Each caption is a list of words. During preprocessing, a dictionary is created +that assigns each word in the vocabulary to an integer-valued id. Each caption +is encoded as a list of integer word ids in the `tf.SequenceExample` protos. + +We have provided a script to download and preprocess the [MSCOCO](http://mscoco.org/) image captioning data set into this format. Downloading +and preprocessing the data may take several hours depending on your network and +computer speed. Please be patient. + +Before running the script, ensure that your hard disk has at least 150GB of +available space for storing the downloaded and processed data. + +```shell +# Location to save the MSCOCO data. +MSCOCO_DIR="${HOME}/im2txt/data/mscoco" + +# Build the preprocessing script. +cd research/im2txt +bazel build //im2txt:download_and_preprocess_mscoco + +# Run the preprocessing script. +bazel-bin/im2txt/download_and_preprocess_mscoco "${MSCOCO_DIR}" +``` + +The final line of the output should read: + +``` +2016-09-01 16:47:47.296630: Finished processing all 20267 image-caption pairs in data set 'test'. +``` + +When the script finishes you will find 256 training, 4 validation and 8 testing +files in `DATA_DIR`. The files will match the patterns `train-?????-of-00256`, +`val-?????-of-00004` and `test-?????-of-00008`, respectively. + +### Download the Inception v3 Checkpoint + +The *Show and Tell* model requires a pretrained *Inception v3* checkpoint file +to initialize the parameters of its image encoder submodel. + +This checkpoint file is provided by the +[TensorFlow-Slim image classification library](https://github.com/tensorflow/models/tree/master/research/slim#tensorflow-slim-image-classification-library) +which provides a suite of pre-trained image classification models. You can read +more about the models provided by the library +[here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models). + + +Run the following commands to download the *Inception v3* checkpoint. + +```shell +# Location to save the Inception v3 checkpoint. +INCEPTION_DIR="${HOME}/im2txt/data" +mkdir -p ${INCEPTION_DIR} + +wget "http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz" +tar -xvf "inception_v3_2016_08_28.tar.gz" -C ${INCEPTION_DIR} +rm "inception_v3_2016_08_28.tar.gz" +``` + +Note that the *Inception v3* checkpoint will only be used for initializing the +parameters of the *Show and Tell* model. Once the *Show and Tell* model starts +training it will save its own checkpoint files containing the values of all its +parameters (including copies of the *Inception v3* parameters). If training is +stopped and restarted, the parameter values will be restored from the latest +*Show and Tell* checkpoint and the *Inception v3* checkpoint will be ignored. In +other words, the *Inception v3* checkpoint is only used in the 0-th global step +(initialization) of training the *Show and Tell* model. + +## Training a Model + +### Initial Training + +Run the training script. + +```shell +# Directory containing preprocessed MSCOCO data. +MSCOCO_DIR="${HOME}/im2txt/data/mscoco" + +# Inception v3 checkpoint file. +INCEPTION_CHECKPOINT="${HOME}/im2txt/data/inception_v3.ckpt" + +# Directory to save the model. +MODEL_DIR="${HOME}/im2txt/model" + +# Build the model. +cd research/im2txt +bazel build -c opt //im2txt/... + +# Run the training script. +bazel-bin/im2txt/train \ + --input_file_pattern="${MSCOCO_DIR}/train-?????-of-00256" \ + --inception_checkpoint_file="${INCEPTION_CHECKPOINT}" \ + --train_dir="${MODEL_DIR}/train" \ + --train_inception=false \ + --number_of_steps=1000000 +``` + +Run the evaluation script in a separate process. This will log evaluation +metrics to TensorBoard which allows training progress to be monitored in +real-time. + +Note that you may run out of memory if you run the evaluation script on the same +GPU as the training script. You can run the command +`export CUDA_VISIBLE_DEVICES=""` to force the evaluation script to run on CPU. +If evaluation runs too slowly on CPU, you can decrease the value of +`--num_eval_examples`. + +```shell +MSCOCO_DIR="${HOME}/im2txt/data/mscoco" +MODEL_DIR="${HOME}/im2txt/model" + +# Ignore GPU devices (only necessary if your GPU is currently memory +# constrained, for example, by running the training script). +export CUDA_VISIBLE_DEVICES="" + +# Run the evaluation script. This will run in a loop, periodically loading the +# latest model checkpoint file and computing evaluation metrics. +bazel-bin/im2txt/evaluate \ + --input_file_pattern="${MSCOCO_DIR}/val-?????-of-00004" \ + --checkpoint_dir="${MODEL_DIR}/train" \ + --eval_dir="${MODEL_DIR}/eval" +``` + +Run a TensorBoard server in a separate process for real-time monitoring of +training progress and evaluation metrics. + +```shell +MODEL_DIR="${HOME}/im2txt/model" + +# Run a TensorBoard server. +tensorboard --logdir="${MODEL_DIR}" +``` + +### Fine Tune the Inception v3 Model + +Your model will already be able to generate reasonable captions after the first +phase of training. Try it out! (See [Generating Captions](#generating-captions)). + +You can further improve the performance of the model by running a +second training phase to jointly fine-tune the parameters of the *Inception v3* +image submodel and the LSTM. + +```shell +# Restart the training script with --train_inception=true. +bazel-bin/im2txt/train \ + --input_file_pattern="${MSCOCO_DIR}/train-?????-of-00256" \ + --train_dir="${MODEL_DIR}/train" \ + --train_inception=true \ + --number_of_steps=3000000 # Additional 2M steps (assuming 1M in initial training). +``` + +Note that training will proceed much slower now, and the model will continue to +improve by a small amount for a long time. We have found that it will improve +slowly for an additional 2-2.5 million steps before it begins to overfit. This +may take several weeks on a single GPU. If you don't care about absolutely +optimal performance then feel free to halt training sooner by stopping the +training script or passing a smaller value to the flag `--number_of_steps`. Your +model will still work reasonably well. + +## Generating Captions + +Your trained *Show and Tell* model can generate captions for any JPEG image! The +following command line will generate captions for an image from the test set. + +```shell +# Path to checkpoint file or a directory containing checkpoint files. Passing +# a directory will only work if there is also a file named 'checkpoint' which +# lists the available checkpoints in the directory. It will not work if you +# point to a directory with just a copy of a model checkpoint: in that case, +# you will need to pass the checkpoint path explicitly. +CHECKPOINT_PATH="${HOME}/im2txt/model/train" + +# Vocabulary file generated by the preprocessing script. +VOCAB_FILE="${HOME}/im2txt/data/mscoco/word_counts.txt" + +# JPEG image file to caption. +IMAGE_FILE="${HOME}/im2txt/data/mscoco/raw-data/val2014/COCO_val2014_000000224477.jpg" + +# Build the inference binary. +cd research/im2txt +bazel build -c opt //im2txt:run_inference + +# Ignore GPU devices (only necessary if your GPU is currently memory +# constrained, for example, by running the training script). +export CUDA_VISIBLE_DEVICES="" + +# Run inference to generate captions. +bazel-bin/im2txt/run_inference \ + --checkpoint_path=${CHECKPOINT_PATH} \ + --vocab_file=${VOCAB_FILE} \ + --input_files=${IMAGE_FILE} +``` + +Example output: + +``` +Captions for image COCO_val2014_000000224477.jpg: + 0) a man riding a wave on top of a surfboard . (p=0.040413) + 1) a person riding a surf board on a wave (p=0.017452) + 2) a man riding a wave on a surfboard in the ocean . (p=0.005743) +``` + +Note: you may get different results. Some variation between different models is +expected. + +Here is the image: + +![Surfer](g3doc/COCO_val2014_000000224477.jpg) diff --git a/models/research/im2txt/WORKSPACE b/models/research/im2txt/WORKSPACE new file mode 100644 index 0000000000000000000000000000000000000000..22da718b06f9c61be4ffdf45e48919ed4a5f17ae --- /dev/null +++ b/models/research/im2txt/WORKSPACE @@ -0,0 +1 @@ +workspace(name = "im2txt") diff --git a/models/research/im2txt/conda-env/ubuntu-18-04-environment.yaml b/models/research/im2txt/conda-env/ubuntu-18-04-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..332ff2a47f8f49fcdde7b769c29ff84cf5a5ff9d --- /dev/null +++ b/models/research/im2txt/conda-env/ubuntu-18-04-environment.yaml @@ -0,0 +1,142 @@ +name: im2txt +channels: + - defaults +dependencies: + - _tflow_select=2.3.0=mkl + - absl-py=0.5.0=py27_0 + - astor=0.7.1=py27_0 + - backports=1.0=py27_1 + - backports.functools_lru_cache=1.5=py27_1 + - backports.shutil_get_terminal_size=1.0.0=py27_2 + - backports.weakref=1.0.post1=py27_0 + - backports_abc=0.5=py27_0 + - blas=1.0=mkl + - bleach=3.0.2=py27_0 + - ca-certificates=2018.03.07=0 + - certifi=2018.10.15=py27_0 + - configparser=3.5.0=py27_0 + - cycler=0.10.0=py27_0 + - dbus=1.13.2=h714fa37_1 + - decorator=4.3.0=py27_0 + - entrypoints=0.2.3=py27_2 + - enum34=1.1.6=py27_1 + - expat=2.2.6=he6710b0_0 + - fastcache=1.0.2=py27h14c3975_2 + - fontconfig=2.13.0=h9420a91_0 + - freetype=2.9.1=h8a8886c_1 + - funcsigs=1.0.2=py27_0 + - functools32=3.2.3.2=py27_1 + - futures=3.2.0=py27_0 + - gast=0.2.0=py27_0 + - glib=2.56.2=hd408876_0 + - gmp=6.1.2=h6c8ec71_1 + - gmpy2=2.0.8=py27h10f8cd9_2 + - grpcio=1.12.1=py27hdbcaa40_0 + - gst-plugins-base=1.14.0=hbbd80ab_1 + - gstreamer=1.14.0=hb453b48_1 + - h5py=2.8.0=py27h989c5e5_3 + - hdf5=1.10.2=hba1933b_1 + - icu=58.2=h9c2bf20_1 + - intel-openmp=2019.0=118 + - ipaddress=1.0.22=py27_0 + - ipykernel=4.10.0=py27_0 + - ipython=5.8.0=py27_0 + - ipython_genutils=0.2.0=py27_0 + - ipywidgets=7.4.2=py27_0 + - jinja2=2.10=py27_0 + - jpeg=9b=h024ee3a_2 + - jsonschema=2.6.0=py27_0 + - jupyter=1.0.0=py27_7 + - jupyter_client=5.2.3=py27_0 + - jupyter_console=5.2.0=py27_1 + - jupyter_core=4.4.0=py27_0 + - keras-applications=1.0.6=py27_0 + - keras-preprocessing=1.0.5=py27_0 + - kiwisolver=1.0.1=py27hf484d3e_0 + - libedit=3.1.20170329=h6b74fdf_2 + - libffi=3.2.1=hd88cf55_4 + - libgcc-ng=8.2.0=hdf63c60_1 + - libgfortran-ng=7.3.0=hdf63c60_0 + - libpng=1.6.35=hbc83047_0 + - libprotobuf=3.6.0=hdbcaa40_0 + - libsodium=1.0.16=h1bed415_0 + - libstdcxx-ng=8.2.0=hdf63c60_1 + - libuuid=1.0.3=h1bed415_2 + - libxcb=1.13=h1bed415_1 + - libxml2=2.9.8=h26e45fe_1 + - linecache2=1.0.0=py27_0 + - markdown=3.0.1=py27_0 + - markupsafe=1.0=py27h14c3975_1 + - matplotlib=2.2.3=py27hb69df0a_0 + - mistune=0.8.4=py27h7b6447c_0 + - mkl=2019.0=118 + - mkl_fft=1.0.6=py27h7dd41cf_0 + - mkl_random=1.0.1=py27h4414c95_1 + - mock=2.0.0=py27_0 + - mpc=1.1.0=h10f8cd9_1 + - mpfr=4.0.1=hdf1c602_3 + - mpmath=1.0.0=py27_2 + - nbconvert=5.3.1=py27_0 + - nbformat=4.4.0=py27_0 + - ncurses=6.1=hf484d3e_0 + - nltk=3.3.0=py27_0 + - nose=1.3.7=py27_2 + - notebook=5.7.0=py27_0 + - numpy=1.15.3=py27h1d66e8a_0 + - numpy-base=1.15.3=py27h81de0dd_0 + - openssl=1.0.2p=h14c3975_0 + - pandas=0.23.4=py27h04863e7_0 + - pandoc=2.2.3.2=0 + - pandocfilters=1.4.2=py27_1 + - pathlib2=2.3.2=py27_0 + - pbr=4.3.0=py27_0 + - pcre=8.42=h439df22_0 + - pexpect=4.6.0=py27_0 + - pickleshare=0.7.5=py27_0 + - pip=10.0.1=py27_0 + - prometheus_client=0.4.2=py27_0 + - prompt_toolkit=1.0.15=py27_0 + - protobuf=3.6.0=py27hf484d3e_0 + - ptyprocess=0.6.0=py27_0 + - pygments=2.2.0=py27_0 + - pyparsing=2.2.2=py27_0 + - pyqt=5.9.2=py27h05f1152_2 + - python=2.7.15=h77bded6_2 + - python-dateutil=2.7.3=py27_0 + - pytz=2018.5=py27_0 + - pyzmq=17.1.2=py27h14c3975_0 + - qt=5.9.6=h8703b6f_2 + - qtconsole=4.4.2=py27_0 + - readline=7.0=h7b6447c_5 + - scandir=1.9.0=py27h14c3975_0 + - scipy=1.1.0=py27hfa4b5c9_1 + - send2trash=1.5.0=py27_0 + - setuptools=40.4.3=py27_0 + - simplegeneric=0.8.1=py27_2 + - singledispatch=3.4.0.3=py27_0 + - sip=4.19.8=py27hf484d3e_0 + - six=1.11.0=py27_1 + - sqlite=3.25.2=h7b6447c_0 + - subprocess32=3.5.3=py27h7b6447c_0 + - sympy=1.3=py27_0 + - tensorboard=1.11.0=py27hf484d3e_0 + - tensorflow=1.11.0=mkl_py27h25e0b76_0 + - tensorflow-base=1.11.0=mkl_py27h3c3e929_0 + - termcolor=1.1.0=py27_1 + - terminado=0.8.1=py27_1 + - testpath=0.4.2=py27_0 + - tk=8.6.8=hbc83047_0 + - tornado=5.1.1=py27h7b6447c_0 + - traceback2=1.4.0=py27_0 + - traitlets=4.3.2=py27_0 + - unittest2=1.1.0=py27_0 + - wcwidth=0.1.7=py27_0 + - webencodings=0.5.1=py27_1 + - werkzeug=0.14.1=py27_0 + - wheel=0.32.2=py27_0 + - widgetsnbextension=3.4.2=py27_0 + - xz=5.2.4=h14c3975_4 + - zeromq=4.2.5=hf484d3e_1 + - zlib=1.2.11=ha838bed_2 +prefix: /home/arinto_murdopo/anaconda3/envs/im2txt + diff --git a/models/research/im2txt/g3doc/COCO_val2014_000000224477.jpg b/models/research/im2txt/g3doc/COCO_val2014_000000224477.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8976fa84b40b04c5bf1205a49c8d236b747f8f9b Binary files /dev/null and b/models/research/im2txt/g3doc/COCO_val2014_000000224477.jpg differ diff --git a/models/research/im2txt/g3doc/example_captions.jpg b/models/research/im2txt/g3doc/example_captions.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3a8f43247e5c9c39a3f93daaf1ad34837959ec5 Binary files /dev/null and b/models/research/im2txt/g3doc/example_captions.jpg differ diff --git a/models/research/im2txt/g3doc/show_and_tell_architecture.png b/models/research/im2txt/g3doc/show_and_tell_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..984590d54ba4aa089b5740fd69f6dc6216b9047f Binary files /dev/null and b/models/research/im2txt/g3doc/show_and_tell_architecture.png differ diff --git a/models/research/im2txt/im2txt/BUILD b/models/research/im2txt/im2txt/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..8c403171153c36ee43cde2788dbfcaf9c7bf4293 --- /dev/null +++ b/models/research/im2txt/im2txt/BUILD @@ -0,0 +1,96 @@ +package(default_visibility = [":internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = [ + "//im2txt/...", + ], +) + +py_binary( + name = "build_mscoco_data", + srcs = [ + "data/build_mscoco_data.py", + ], +) + +sh_binary( + name = "download_and_preprocess_mscoco", + srcs = ["data/download_and_preprocess_mscoco.sh"], + data = [ + ":build_mscoco_data", + ], +) + +py_library( + name = "configuration", + srcs = ["configuration.py"], + srcs_version = "PY2AND3", +) + +py_library( + name = "show_and_tell_model", + srcs = ["show_and_tell_model.py"], + srcs_version = "PY2AND3", + deps = [ + "//im2txt/ops:image_embedding", + "//im2txt/ops:image_processing", + "//im2txt/ops:inputs", + ], +) + +py_test( + name = "show_and_tell_model_test", + size = "large", + srcs = ["show_and_tell_model_test.py"], + deps = [ + ":configuration", + ":show_and_tell_model", + ], +) + +py_library( + name = "inference_wrapper", + srcs = ["inference_wrapper.py"], + srcs_version = "PY2AND3", + deps = [ + ":show_and_tell_model", + "//im2txt/inference_utils:inference_wrapper_base", + ], +) + +py_binary( + name = "train", + srcs = ["train.py"], + srcs_version = "PY2AND3", + deps = [ + ":configuration", + ":show_and_tell_model", + ], +) + +py_binary( + name = "evaluate", + srcs = ["evaluate.py"], + srcs_version = "PY2AND3", + deps = [ + ":configuration", + ":show_and_tell_model", + ], +) + +py_binary( + name = "run_inference", + srcs = ["run_inference.py"], + srcs_version = "PY2AND3", + deps = [ + ":configuration", + ":inference_wrapper", + "//im2txt/inference_utils:caption_generator", + "//im2txt/inference_utils:vocabulary", + ], +) diff --git a/models/research/im2txt/im2txt/configuration.py b/models/research/im2txt/im2txt/configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..3b664eb9f0cd963fb26929d019ec9cdb3282d0a8 --- /dev/null +++ b/models/research/im2txt/im2txt/configuration.py @@ -0,0 +1,104 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image-to-text model and training configurations.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class ModelConfig(object): + """Wrapper class for model hyperparameters.""" + + def __init__(self): + """Sets the default model hyperparameters.""" + # File pattern of sharded TFRecord file containing SequenceExample protos. + # Must be provided in training and evaluation modes. + self.input_file_pattern = None + + # Image format ("jpeg" or "png"). + self.image_format = "jpeg" + + # Approximate number of values per input shard. Used to ensure sufficient + # mixing between shards in training. + self.values_per_input_shard = 2300 + # Minimum number of shards to keep in the input queue. + self.input_queue_capacity_factor = 2 + # Number of threads for prefetching SequenceExample protos. + self.num_input_reader_threads = 1 + + # Name of the SequenceExample context feature containing image data. + self.image_feature_name = "image/data" + # Name of the SequenceExample feature list containing integer captions. + self.caption_feature_name = "image/caption_ids" + + # Number of unique words in the vocab (plus 1, for ). + # The default value is larger than the expected actual vocab size to allow + # for differences between tokenizer versions used in preprocessing. There is + # no harm in using a value greater than the actual vocab size, but using a + # value less than the actual vocab size will result in an error. + self.vocab_size = 12000 + + # Number of threads for image preprocessing. Should be a multiple of 2. + self.num_preprocess_threads = 4 + + # Batch size. + self.batch_size = 32 + + # File containing an Inception v3 checkpoint to initialize the variables + # of the Inception model. Must be provided when starting training for the + # first time. + self.inception_checkpoint_file = None + + # Dimensions of Inception v3 input images. + self.image_height = 299 + self.image_width = 299 + + # Scale used to initialize model variables. + self.initializer_scale = 0.08 + + # LSTM input and output dimensionality, respectively. + self.embedding_size = 512 + self.num_lstm_units = 512 + + # If < 1.0, the dropout keep probability applied to LSTM variables. + self.lstm_dropout_keep_prob = 0.7 + + +class TrainingConfig(object): + """Wrapper class for training hyperparameters.""" + + def __init__(self): + """Sets the default training hyperparameters.""" + # Number of examples per epoch of training data. + self.num_examples_per_epoch = 586363 + + # Optimizer for training the model. + self.optimizer = "SGD" + + # Learning rate for the initial phase of training. + self.initial_learning_rate = 2.0 + self.learning_rate_decay_factor = 0.5 + self.num_epochs_per_decay = 8.0 + + # Learning rate when fine tuning the Inception v3 parameters. + self.train_inception_learning_rate = 0.0005 + + # If not None, clip gradients to this value. + self.clip_gradients = 5.0 + + # How many model checkpoints to keep. + self.max_checkpoints_to_keep = 5 diff --git a/models/research/im2txt/im2txt/data/build_mscoco_data.py b/models/research/im2txt/im2txt/data/build_mscoco_data.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3e9d977669bf63d8e39128336319b48c0432dd --- /dev/null +++ b/models/research/im2txt/im2txt/data/build_mscoco_data.py @@ -0,0 +1,483 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts MSCOCO data to TFRecord file format with SequenceExample protos. + +The MSCOCO images are expected to reside in JPEG files located in the following +directory structure: + + train_image_dir/COCO_train2014_000000000151.jpg + train_image_dir/COCO_train2014_000000000260.jpg + ... + +and + + val_image_dir/COCO_val2014_000000000042.jpg + val_image_dir/COCO_val2014_000000000073.jpg + ... + +The MSCOCO annotations JSON files are expected to reside in train_captions_file +and val_captions_file respectively. + +This script converts the combined MSCOCO data into sharded data files consisting +of 256, 4 and 8 TFRecord files, respectively: + + output_dir/train-00000-of-00256 + output_dir/train-00001-of-00256 + ... + output_dir/train-00255-of-00256 + +and + + output_dir/val-00000-of-00004 + ... + output_dir/val-00003-of-00004 + +and + + output_dir/test-00000-of-00008 + ... + output_dir/test-00007-of-00008 + +Each TFRecord file contains ~2300 records. Each record within the TFRecord file +is a serialized SequenceExample proto consisting of precisely one image-caption +pair. Note that each image has multiple captions (usually 5) and therefore each +image is replicated multiple times in the TFRecord files. + +The SequenceExample proto contains the following fields: + + context: + image/image_id: integer MSCOCO image identifier + image/data: string containing JPEG encoded image in RGB colorspace + + feature_lists: + image/caption: list of strings containing the (tokenized) caption words + image/caption_ids: list of integer ids corresponding to the caption words + +The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer. +The vocabulary of word identifiers is constructed from the sorted list (by +descending frequency) of word tokens in the training set. Only tokens appearing +at least 4 times are considered; all other words get the "unknown" word id. + +NOTE: This script will consume around 100GB of disk space because each image +in the MSCOCO dataset is replicated ~5 times (once per caption) in the output. +This is done for two reasons: + 1. In order to better shuffle the training data. + 2. It makes it easier to perform asynchronous preprocessing of each image in + TensorFlow. + +Running this script using 16 threads may take around 1 hour on a HP Z420. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import Counter +from collections import namedtuple +from datetime import datetime +import json +import os.path +import random +import sys +import threading + + + +import nltk.tokenize +import numpy as np +from six.moves import xrange +import tensorflow as tf + +tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/", + "Training image directory.") +tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014", + "Validation image directory.") + +tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json", + "Training captions JSON file.") +tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json", + "Validation captions JSON file.") + +tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.") + +tf.flags.DEFINE_integer("train_shards", 256, + "Number of shards in training TFRecord files.") +tf.flags.DEFINE_integer("val_shards", 4, + "Number of shards in validation TFRecord files.") +tf.flags.DEFINE_integer("test_shards", 8, + "Number of shards in testing TFRecord files.") + +tf.flags.DEFINE_string("start_word", "", + "Special word added to the beginning of each sentence.") +tf.flags.DEFINE_string("end_word", "", + "Special word added to the end of each sentence.") +tf.flags.DEFINE_string("unknown_word", "", + "Special word meaning 'unknown'.") +tf.flags.DEFINE_integer("min_word_count", 4, + "The minimum number of occurrences of each word in the " + "training set for inclusion in the vocabulary.") +tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt", + "Output vocabulary file of word counts.") + +tf.flags.DEFINE_integer("num_threads", 8, + "Number of threads to preprocess the images.") + +FLAGS = tf.flags.FLAGS + +ImageMetadata = namedtuple("ImageMetadata", + ["image_id", "filename", "captions"]) + + +class Vocabulary(object): + """Simple vocabulary wrapper.""" + + def __init__(self, vocab, unk_id): + """Initializes the vocabulary. + + Args: + vocab: A dictionary of word to word_id. + unk_id: Id of the special 'unknown' word. + """ + self._vocab = vocab + self._unk_id = unk_id + + def word_to_id(self, word): + """Returns the integer id of a word string.""" + if word in self._vocab: + return self._vocab[word] + else: + return self._unk_id + + +class ImageDecoder(object): + """Helper class for decoding images in TensorFlow.""" + + def __init__(self): + # Create a single TensorFlow Session for all image decoding calls. + self._sess = tf.Session() + + # TensorFlow ops for JPEG decoding. + self._encoded_jpeg = tf.placeholder(dtype=tf.string) + self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3) + + def decode_jpeg(self, encoded_jpeg): + image = self._sess.run(self._decode_jpeg, + feed_dict={self._encoded_jpeg: encoded_jpeg}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _int64_feature(value): + """Wrapper for inserting an int64 Feature into a SequenceExample proto.""" + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + +def _bytes_feature(value): + """Wrapper for inserting a bytes Feature into a SequenceExample proto.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) + + +def _int64_feature_list(values): + """Wrapper for inserting an int64 FeatureList into a SequenceExample proto.""" + return tf.train.FeatureList(feature=[_int64_feature(v) for v in values]) + + +def _bytes_feature_list(values): + """Wrapper for inserting a bytes FeatureList into a SequenceExample proto.""" + return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) + + +def _to_sequence_example(image, decoder, vocab): + """Builds a SequenceExample proto for an image-caption pair. + + Args: + image: An ImageMetadata object. + decoder: An ImageDecoder object. + vocab: A Vocabulary object. + + Returns: + A SequenceExample proto. + """ + with tf.gfile.FastGFile(image.filename, "r") as f: + encoded_image = f.read() + + try: + decoder.decode_jpeg(encoded_image) + except (tf.errors.InvalidArgumentError, AssertionError): + print("Skipping file with invalid JPEG data: %s" % image.filename) + return + + context = tf.train.Features(feature={ + "image/image_id": _int64_feature(image.image_id), + "image/data": _bytes_feature(encoded_image), + }) + + assert len(image.captions) == 1 + caption = image.captions[0] + caption_ids = [vocab.word_to_id(word) for word in caption] + feature_lists = tf.train.FeatureLists(feature_list={ + "image/caption": _bytes_feature_list(caption), + "image/caption_ids": _int64_feature_list(caption_ids) + }) + sequence_example = tf.train.SequenceExample( + context=context, feature_lists=feature_lists) + + return sequence_example + + +def _process_image_files(thread_index, ranges, name, images, decoder, vocab, + num_shards): + """Processes and saves a subset of images as TFRecord files in one thread. + + Args: + thread_index: Integer thread identifier within [0, len(ranges)]. + ranges: A list of pairs of integers specifying the ranges of the dataset to + process in parallel. + name: Unique identifier specifying the dataset. + images: List of ImageMetadata. + decoder: An ImageDecoder object. + vocab: A Vocabulary object. + num_shards: Integer number of shards for the output files. + """ + # Each thread produces N shards where N = num_shards / num_threads. For + # instance, if num_shards = 128, and num_threads = 2, then the first thread + # would produce shards [0, 64). + num_threads = len(ranges) + assert not num_shards % num_threads + num_shards_per_batch = int(num_shards / num_threads) + + shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], + num_shards_per_batch + 1).astype(int) + num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0] + + counter = 0 + for s in xrange(num_shards_per_batch): + # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' + shard = thread_index * num_shards_per_batch + s + output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards) + output_file = os.path.join(FLAGS.output_dir, output_filename) + writer = tf.python_io.TFRecordWriter(output_file) + + shard_counter = 0 + images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) + for i in images_in_shard: + image = images[i] + + sequence_example = _to_sequence_example(image, decoder, vocab) + if sequence_example is not None: + writer.write(sequence_example.SerializeToString()) + shard_counter += 1 + counter += 1 + + if not counter % 1000: + print("%s [thread %d]: Processed %d of %d items in thread batch." % + (datetime.now(), thread_index, counter, num_images_in_thread)) + sys.stdout.flush() + + writer.close() + print("%s [thread %d]: Wrote %d image-caption pairs to %s" % + (datetime.now(), thread_index, shard_counter, output_file)) + sys.stdout.flush() + shard_counter = 0 + print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." % + (datetime.now(), thread_index, counter, num_shards_per_batch)) + sys.stdout.flush() + + +def _process_dataset(name, images, vocab, num_shards): + """Processes a complete data set and saves it as a TFRecord. + + Args: + name: Unique identifier specifying the dataset. + images: List of ImageMetadata. + vocab: A Vocabulary object. + num_shards: Integer number of shards for the output files. + """ + # Break up each image into a separate entity for each caption. + images = [ImageMetadata(image.image_id, image.filename, [caption]) + for image in images for caption in image.captions] + + # Shuffle the ordering of images. Make the randomization repeatable. + random.seed(12345) + random.shuffle(images) + + # Break the images into num_threads batches. Batch i is defined as + # images[ranges[i][0]:ranges[i][1]]. + num_threads = min(num_shards, FLAGS.num_threads) + spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int) + ranges = [] + threads = [] + for i in xrange(len(spacing) - 1): + ranges.append([spacing[i], spacing[i + 1]]) + + # Create a mechanism for monitoring when all threads are finished. + coord = tf.train.Coordinator() + + # Create a utility for decoding JPEG images to run sanity checks. + decoder = ImageDecoder() + + # Launch a thread for each batch. + print("Launching %d threads for spacings: %s" % (num_threads, ranges)) + for thread_index in xrange(len(ranges)): + args = (thread_index, ranges, name, images, decoder, vocab, num_shards) + t = threading.Thread(target=_process_image_files, args=args) + t.start() + threads.append(t) + + # Wait for all the threads to terminate. + coord.join(threads) + print("%s: Finished processing all %d image-caption pairs in data set '%s'." % + (datetime.now(), len(images), name)) + + +def _create_vocab(captions): + """Creates the vocabulary of word to word_id. + + The vocabulary is saved to disk in a text file of word counts. The id of each + word in the file is its corresponding 0-based line number. + + Args: + captions: A list of lists of strings. + + Returns: + A Vocabulary object. + """ + print("Creating vocabulary.") + counter = Counter() + for c in captions: + counter.update(c) + print("Total words:", len(counter)) + + # Filter uncommon words and sort by descending count. + word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count] + word_counts.sort(key=lambda x: x[1], reverse=True) + print("Words in vocabulary:", len(word_counts)) + + # Write out the word counts file. + with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f: + f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts])) + print("Wrote vocabulary file:", FLAGS.word_counts_output_file) + + # Create the vocabulary dictionary. + reverse_vocab = [x[0] for x in word_counts] + unk_id = len(reverse_vocab) + vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) + vocab = Vocabulary(vocab_dict, unk_id) + + return vocab + + +def _process_caption(caption): + """Processes a caption string into a list of tonenized words. + + Args: + caption: A string caption. + + Returns: + A list of strings; the tokenized caption. + """ + tokenized_caption = [FLAGS.start_word] + tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower())) + tokenized_caption.append(FLAGS.end_word) + return tokenized_caption + + +def _load_and_process_metadata(captions_file, image_dir): + """Loads image metadata from a JSON file and processes the captions. + + Args: + captions_file: JSON file containing caption annotations. + image_dir: Directory containing the image files. + + Returns: + A list of ImageMetadata. + """ + with tf.gfile.FastGFile(captions_file, "r") as f: + caption_data = json.load(f) + + # Extract the filenames. + id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]] + + # Extract the captions. Each image_id is associated with multiple captions. + id_to_captions = {} + for annotation in caption_data["annotations"]: + image_id = annotation["image_id"] + caption = annotation["caption"] + id_to_captions.setdefault(image_id, []) + id_to_captions[image_id].append(caption) + + assert len(id_to_filename) == len(id_to_captions) + assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys()) + print("Loaded caption metadata for %d images from %s" % + (len(id_to_filename), captions_file)) + + # Process the captions and combine the data into a list of ImageMetadata. + print("Processing captions.") + image_metadata = [] + num_captions = 0 + for image_id, base_filename in id_to_filename: + filename = os.path.join(image_dir, base_filename) + captions = [_process_caption(c) for c in id_to_captions[image_id]] + image_metadata.append(ImageMetadata(image_id, filename, captions)) + num_captions += len(captions) + print("Finished processing %d captions for %d images in %s" % + (num_captions, len(id_to_filename), captions_file)) + + return image_metadata + + +def main(unused_argv): + def _is_valid_num_shards(num_shards): + """Returns True if num_shards is compatible with FLAGS.num_threads.""" + return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads + + assert _is_valid_num_shards(FLAGS.train_shards), ( + "Please make the FLAGS.num_threads commensurate with FLAGS.train_shards") + assert _is_valid_num_shards(FLAGS.val_shards), ( + "Please make the FLAGS.num_threads commensurate with FLAGS.val_shards") + assert _is_valid_num_shards(FLAGS.test_shards), ( + "Please make the FLAGS.num_threads commensurate with FLAGS.test_shards") + + if not tf.gfile.IsDirectory(FLAGS.output_dir): + tf.gfile.MakeDirs(FLAGS.output_dir) + + # Load image metadata from caption files. + mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file, + FLAGS.train_image_dir) + mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file, + FLAGS.val_image_dir) + + # Redistribute the MSCOCO data as follows: + # train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset. + # val_dataset = 5% of mscoco_val_dataset (for validation during training). + # test_dataset = 10% of mscoco_val_dataset (for final evaluation). + train_cutoff = int(0.85 * len(mscoco_val_dataset)) + val_cutoff = int(0.90 * len(mscoco_val_dataset)) + train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff] + val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff] + test_dataset = mscoco_val_dataset[val_cutoff:] + + # Create vocabulary from the training captions. + train_captions = [c for image in train_dataset for c in image.captions] + vocab = _create_vocab(train_captions) + + _process_dataset("train", train_dataset, vocab, FLAGS.train_shards) + _process_dataset("val", val_dataset, vocab, FLAGS.val_shards) + _process_dataset("test", test_dataset, vocab, FLAGS.test_shards) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh b/models/research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab3ff28d576adcbf1992de4c00dfa350dd93b1c3 --- /dev/null +++ b/models/research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess the MSCOCO data set. +# +# The outputs of this script are sharded TFRecord files containing serialized +# SequenceExample protocol buffers. See build_mscoco_data.py for details of how +# the SequenceExample protocol buffers are constructed. +# +# usage: +# ./download_and_preprocess_mscoco.sh +set -e + +if [ -z "$1" ]; then + echo "usage download_and_preproces_mscoco.sh [data dir]" + exit +fi + +if [ "$(uname)" == "Darwin" ]; then + UNZIP="tar -xf" +else + UNZIP="unzip -nq" +fi + +# Create the output directories. +OUTPUT_DIR="${1%/}" +SCRATCH_DIR="${OUTPUT_DIR}/raw-data" +mkdir -p "${OUTPUT_DIR}" +mkdir -p "${SCRATCH_DIR}" +CURRENT_DIR=$(pwd) +WORK_DIR="$0.runfiles/im2txt/im2txt" + +# Helper function to download and unpack a .zip file. +function download_and_unzip() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f ${FILENAME} ]; then + echo "Downloading ${FILENAME} to $(pwd)" + wget -nd -c "${BASE_URL}/${FILENAME}" + else + echo "Skipping download of ${FILENAME}" + fi + echo "Unzipping ${FILENAME}" + ${UNZIP} ${FILENAME} +} + +cd ${SCRATCH_DIR} + +# Download the images. +BASE_IMAGE_URL="http://msvocds.blob.core.windows.net/coco2014" + +TRAIN_IMAGE_FILE="train2014.zip" +download_and_unzip ${BASE_IMAGE_URL} ${TRAIN_IMAGE_FILE} +TRAIN_IMAGE_DIR="${SCRATCH_DIR}/train2014" + +VAL_IMAGE_FILE="val2014.zip" +download_and_unzip ${BASE_IMAGE_URL} ${VAL_IMAGE_FILE} +VAL_IMAGE_DIR="${SCRATCH_DIR}/val2014" + +# Download the captions. +BASE_CAPTIONS_URL="http://msvocds.blob.core.windows.net/annotations-1-0-3" +CAPTIONS_FILE="captions_train-val2014.zip" +download_and_unzip ${BASE_CAPTIONS_URL} ${CAPTIONS_FILE} +TRAIN_CAPTIONS_FILE="${SCRATCH_DIR}/annotations/captions_train2014.json" +VAL_CAPTIONS_FILE="${SCRATCH_DIR}/annotations/captions_val2014.json" + +# Build TFRecords of the image data. +cd "${CURRENT_DIR}" +BUILD_SCRIPT="${WORK_DIR}/build_mscoco_data" +"${BUILD_SCRIPT}" \ + --train_image_dir="${TRAIN_IMAGE_DIR}" \ + --val_image_dir="${VAL_IMAGE_DIR}" \ + --train_captions_file="${TRAIN_CAPTIONS_FILE}" \ + --val_captions_file="${VAL_CAPTIONS_FILE}" \ + --output_dir="${OUTPUT_DIR}" \ + --word_counts_output_file="${OUTPUT_DIR}/word_counts.txt" \ diff --git a/models/research/im2txt/im2txt/evaluate.py b/models/research/im2txt/im2txt/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..0c81a59dab56626cb2c6a19433544f4d239cbd9d --- /dev/null +++ b/models/research/im2txt/im2txt/evaluate.py @@ -0,0 +1,198 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Evaluate the model. + +This script should be run concurrently with training so that summaries show up +in TensorBoard. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os.path +import time + + +import numpy as np +import tensorflow as tf + +from im2txt import configuration +from im2txt import show_and_tell_model + +FLAGS = tf.flags.FLAGS + +tf.flags.DEFINE_string("input_file_pattern", "", + "File pattern of sharded TFRecord input files.") +tf.flags.DEFINE_string("checkpoint_dir", "", + "Directory containing model checkpoints.") +tf.flags.DEFINE_string("eval_dir", "", "Directory to write event logs.") + +tf.flags.DEFINE_integer("eval_interval_secs", 600, + "Interval between evaluation runs.") +tf.flags.DEFINE_integer("num_eval_examples", 10132, + "Number of examples for evaluation.") + +tf.flags.DEFINE_integer("min_global_step", 5000, + "Minimum global step to run evaluation.") + +tf.logging.set_verbosity(tf.logging.INFO) + + +def evaluate_model(sess, model, global_step, summary_writer, summary_op): + """Computes perplexity-per-word over the evaluation dataset. + + Summaries and perplexity-per-word are written out to the eval directory. + + Args: + sess: Session object. + model: Instance of ShowAndTellModel; the model to evaluate. + global_step: Integer; global step of the model checkpoint. + summary_writer: Instance of FileWriter. + summary_op: Op for generating model summaries. + """ + # Log model summaries on a single batch. + summary_str = sess.run(summary_op) + summary_writer.add_summary(summary_str, global_step) + + # Compute perplexity over the entire dataset. + num_eval_batches = int( + math.ceil(FLAGS.num_eval_examples / model.config.batch_size)) + + start_time = time.time() + sum_losses = 0. + sum_weights = 0. + for i in range(num_eval_batches): + cross_entropy_losses, weights = sess.run([ + model.target_cross_entropy_losses, + model.target_cross_entropy_loss_weights + ]) + sum_losses += np.sum(cross_entropy_losses * weights) + sum_weights += np.sum(weights) + if not i % 100: + tf.logging.info("Computed losses for %d of %d batches.", i + 1, + num_eval_batches) + eval_time = time.time() - start_time + + perplexity = math.exp(sum_losses / sum_weights) + tf.logging.info("Perplexity = %f (%.2g sec)", perplexity, eval_time) + + # Log perplexity to the FileWriter. + summary = tf.Summary() + value = summary.value.add() + value.simple_value = perplexity + value.tag = "Perplexity" + summary_writer.add_summary(summary, global_step) + + # Write the Events file to the eval directory. + summary_writer.flush() + tf.logging.info("Finished processing evaluation at global step %d.", + global_step) + + +def run_once(model, saver, summary_writer, summary_op): + """Evaluates the latest model checkpoint. + + Args: + model: Instance of ShowAndTellModel; the model to evaluate. + saver: Instance of tf.train.Saver for restoring model Variables. + summary_writer: Instance of FileWriter. + summary_op: Op for generating model summaries. + """ + model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) + if not model_path: + tf.logging.info("Skipping evaluation. No checkpoint found in: %s", + FLAGS.checkpoint_dir) + return + + with tf.Session() as sess: + # Load model from checkpoint. + tf.logging.info("Loading model from checkpoint: %s", model_path) + saver.restore(sess, model_path) + global_step = tf.train.global_step(sess, model.global_step.name) + tf.logging.info("Successfully loaded %s at global step = %d.", + os.path.basename(model_path), global_step) + if global_step < FLAGS.min_global_step: + tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step, + FLAGS.min_global_step) + return + + # Start the queue runners. + coord = tf.train.Coordinator() + threads = tf.train.start_queue_runners(coord=coord) + + # Run evaluation on the latest checkpoint. + try: + evaluate_model( + sess=sess, + model=model, + global_step=global_step, + summary_writer=summary_writer, + summary_op=summary_op) + except Exception as e: # pylint: disable=broad-except + tf.logging.error("Evaluation failed.") + coord.request_stop(e) + + coord.request_stop() + coord.join(threads, stop_grace_period_secs=10) + + +def run(): + """Runs evaluation in a loop, and logs summaries to TensorBoard.""" + # Create the evaluation directory if it doesn't exist. + eval_dir = FLAGS.eval_dir + if not tf.gfile.IsDirectory(eval_dir): + tf.logging.info("Creating eval directory: %s", eval_dir) + tf.gfile.MakeDirs(eval_dir) + + g = tf.Graph() + with g.as_default(): + # Build the model for evaluation. + model_config = configuration.ModelConfig() + model_config.input_file_pattern = FLAGS.input_file_pattern + model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval") + model.build() + + # Create the Saver to restore model Variables. + saver = tf.train.Saver() + + # Create the summary operation and the summary writer. + summary_op = tf.summary.merge_all() + summary_writer = tf.summary.FileWriter(eval_dir) + + g.finalize() + + # Run a new evaluation run every eval_interval_secs. + while True: + start = time.time() + tf.logging.info("Starting evaluation at " + time.strftime( + "%Y-%m-%d-%H:%M:%S", time.localtime())) + run_once(model, saver, summary_writer, summary_op) + time_to_next_eval = start + FLAGS.eval_interval_secs - time.time() + if time_to_next_eval > 0: + time.sleep(time_to_next_eval) + + +def main(unused_argv): + assert FLAGS.input_file_pattern, "--input_file_pattern is required" + assert FLAGS.checkpoint_dir, "--checkpoint_dir is required" + assert FLAGS.eval_dir, "--eval_dir is required" + run() + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/im2txt/im2txt/inference_utils/BUILD b/models/research/im2txt/im2txt/inference_utils/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..82a15fd3ca487e542c41ab337404f8caa63b8c63 --- /dev/null +++ b/models/research/im2txt/im2txt/inference_utils/BUILD @@ -0,0 +1,31 @@ +package(default_visibility = ["//im2txt:internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +py_library( + name = "inference_wrapper_base", + srcs = ["inference_wrapper_base.py"], + srcs_version = "PY2AND3", +) + +py_library( + name = "vocabulary", + srcs = ["vocabulary.py"], + srcs_version = "PY2AND3", +) + +py_library( + name = "caption_generator", + srcs = ["caption_generator.py"], + srcs_version = "PY2AND3", +) + +py_test( + name = "caption_generator_test", + srcs = ["caption_generator_test.py"], + deps = [ + ":caption_generator", + ], +) diff --git a/models/research/im2txt/im2txt/inference_utils/caption_generator.py b/models/research/im2txt/im2txt/inference_utils/caption_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..f158d3d2330e8f839efdad4cbc4d38811b58d826 --- /dev/null +++ b/models/research/im2txt/im2txt/inference_utils/caption_generator.py @@ -0,0 +1,213 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Class for generating captions from an image-to-text model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import heapq +import math + + +import numpy as np + + +class Caption(object): + """Represents a complete or partial caption.""" + + def __init__(self, sentence, state, logprob, score, metadata=None): + """Initializes the Caption. + + Args: + sentence: List of word ids in the caption. + state: Model state after generating the previous word. + logprob: Log-probability of the caption. + score: Score of the caption. + metadata: Optional metadata associated with the partial sentence. If not + None, a list of strings with the same length as 'sentence'. + """ + self.sentence = sentence + self.state = state + self.logprob = logprob + self.score = score + self.metadata = metadata + + def __cmp__(self, other): + """Compares Captions by score.""" + assert isinstance(other, Caption) + if self.score == other.score: + return 0 + elif self.score < other.score: + return -1 + else: + return 1 + + # For Python 3 compatibility (__cmp__ is deprecated). + def __lt__(self, other): + assert isinstance(other, Caption) + return self.score < other.score + + # Also for Python 3 compatibility. + def __eq__(self, other): + assert isinstance(other, Caption) + return self.score == other.score + + +class TopN(object): + """Maintains the top n elements of an incrementally provided set.""" + + def __init__(self, n): + self._n = n + self._data = [] + + def size(self): + assert self._data is not None + return len(self._data) + + def push(self, x): + """Pushes a new element.""" + assert self._data is not None + if len(self._data) < self._n: + heapq.heappush(self._data, x) + else: + heapq.heappushpop(self._data, x) + + def extract(self, sort=False): + """Extracts all elements from the TopN. This is a destructive operation. + + The only method that can be called immediately after extract() is reset(). + + Args: + sort: Whether to return the elements in descending sorted order. + + Returns: + A list of data; the top n elements provided to the set. + """ + assert self._data is not None + data = self._data + self._data = None + if sort: + data.sort(reverse=True) + return data + + def reset(self): + """Returns the TopN to an empty state.""" + self._data = [] + + +class CaptionGenerator(object): + """Class to generate captions from an image-to-text model.""" + + def __init__(self, + model, + vocab, + beam_size=3, + max_caption_length=20, + length_normalization_factor=0.0): + """Initializes the generator. + + Args: + model: Object encapsulating a trained image-to-text model. Must have + methods feed_image() and inference_step(). For example, an instance of + InferenceWrapperBase. + vocab: A Vocabulary object. + beam_size: Beam size to use when generating captions. + max_caption_length: The maximum caption length before stopping the search. + length_normalization_factor: If != 0, a number x such that captions are + scored by logprob/length^x, rather than logprob. This changes the + relative scores of captions depending on their lengths. For example, if + x > 0 then longer captions will be favored. + """ + self.vocab = vocab + self.model = model + + self.beam_size = beam_size + self.max_caption_length = max_caption_length + self.length_normalization_factor = length_normalization_factor + + def beam_search(self, sess, encoded_image): + """Runs beam search caption generation on a single image. + + Args: + sess: TensorFlow Session object. + encoded_image: An encoded image string. + + Returns: + A list of Caption sorted by descending score. + """ + # Feed in the image to get the initial state. + initial_state = self.model.feed_image(sess, encoded_image) + + initial_beam = Caption( + sentence=[self.vocab.start_id], + state=initial_state[0], + logprob=0.0, + score=0.0, + metadata=[""]) + partial_captions = TopN(self.beam_size) + partial_captions.push(initial_beam) + complete_captions = TopN(self.beam_size) + + # Run beam search. + for _ in range(self.max_caption_length - 1): + partial_captions_list = partial_captions.extract() + partial_captions.reset() + input_feed = np.array([c.sentence[-1] for c in partial_captions_list]) + state_feed = np.array([c.state for c in partial_captions_list]) + + softmax, new_states, metadata = self.model.inference_step(sess, + input_feed, + state_feed) + + for i, partial_caption in enumerate(partial_captions_list): + word_probabilities = softmax[i] + state = new_states[i] + # For this partial caption, get the beam_size most probable next words. + # Sort the indexes with numpy, select the last self.beam_size + # (3 by default) (ie, the most likely) and then reverse the sorted + # indexes with [::-1] to sort them from higher to lower. + most_likely_words = np.argsort(word_probabilities)[:-self.beam_size][::-1] + + for w in most_likely_words: + p = word_probabilities[w] + if p < 1e-12: + continue # Avoid log(0). + sentence = partial_caption.sentence + [w] + logprob = partial_caption.logprob + math.log(p) + score = logprob + if metadata: + metadata_list = partial_caption.metadata + [metadata[i]] + else: + metadata_list = None + if w == self.vocab.end_id: + if self.length_normalization_factor > 0: + score /= len(sentence)**self.length_normalization_factor + beam = Caption(sentence, state, logprob, score, metadata_list) + complete_captions.push(beam) + else: + beam = Caption(sentence, state, logprob, score, metadata_list) + partial_captions.push(beam) + if partial_captions.size() == 0: + # We have run out of partial candidates; happens when beam_size = 1. + break + + # If we have no complete captions then fall back to the partial captions. + # But never output a mixture of complete and partial captions because a + # partial caption could have a higher score than all the complete captions. + if not complete_captions.size(): + complete_captions = partial_captions + + return complete_captions.extract(sort=True) diff --git a/models/research/im2txt/im2txt/inference_utils/caption_generator_test.py b/models/research/im2txt/im2txt/inference_utils/caption_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bbd069313ac4ddb10a8463d166ab282b68b2e24d --- /dev/null +++ b/models/research/im2txt/im2txt/inference_utils/caption_generator_test.py @@ -0,0 +1,178 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Unit tests for CaptionGenerator.""" + +import math + + + +import numpy as np +import tensorflow as tf + +from im2txt.inference_utils import caption_generator + + +class FakeVocab(object): + """Fake Vocabulary for testing purposes.""" + + def __init__(self): + self.start_id = 0 # Word id denoting sentence start. + self.end_id = 1 # Word id denoting sentence end. + + +class FakeModel(object): + """Fake model for testing purposes.""" + + def __init__(self): + # Number of words in the vocab. + self._vocab_size = 12 + + # Dimensionality of the nominal model state. + self._state_size = 1 + + # Map of previous word to the probability distribution of the next word. + self._probabilities = { + 0: {1: 0.1, + 2: 0.2, + 3: 0.3, + 4: 0.4}, + 2: {5: 0.1, + 6: 0.9}, + 3: {1: 0.1, + 7: 0.4, + 8: 0.5}, + 4: {1: 0.3, + 9: 0.3, + 10: 0.4}, + 5: {1: 1.0}, + 6: {1: 1.0}, + 7: {1: 1.0}, + 8: {1: 1.0}, + 9: {1: 0.5, + 11: 0.5}, + 10: {1: 1.0}, + 11: {1: 1.0}, + } + + # pylint: disable=unused-argument + + def feed_image(self, sess, encoded_image): + # Return a nominal model state. + return np.zeros([1, self._state_size]) + + def inference_step(self, sess, input_feed, state_feed): + # Compute the matrix of softmax distributions for the next batch of words. + batch_size = input_feed.shape[0] + softmax_output = np.zeros([batch_size, self._vocab_size]) + for batch_index, word_id in enumerate(input_feed): + for next_word, probability in self._probabilities[word_id].items(): + softmax_output[batch_index, next_word] = probability + + # Nominal state and metadata. + new_state = np.zeros([batch_size, self._state_size]) + metadata = None + + return softmax_output, new_state, metadata + + # pylint: enable=unused-argument + + +class CaptionGeneratorTest(tf.test.TestCase): + + def _assertExpectedCaptions(self, + expected_captions, + beam_size=3, + max_caption_length=20, + length_normalization_factor=0): + """Tests that beam search generates the expected captions. + + Args: + expected_captions: A sequence of pairs (sentence, probability), where + sentence is a list of integer ids and probability is a float in [0, 1]. + beam_size: Parameter passed to beam_search(). + max_caption_length: Parameter passed to beam_search(). + length_normalization_factor: Parameter passed to beam_search(). + """ + expected_sentences = [c[0] for c in expected_captions] + expected_probabilities = [c[1] for c in expected_captions] + + # Generate captions. + generator = caption_generator.CaptionGenerator( + model=FakeModel(), + vocab=FakeVocab(), + beam_size=beam_size, + max_caption_length=max_caption_length, + length_normalization_factor=length_normalization_factor) + actual_captions = generator.beam_search(sess=None, encoded_image=None) + + actual_sentences = [c.sentence for c in actual_captions] + actual_probabilities = [math.exp(c.logprob) for c in actual_captions] + + self.assertEqual(expected_sentences, actual_sentences) + self.assertAllClose(expected_probabilities, actual_probabilities) + + def testBeamSize(self): + # Beam size = 1. + expected = [([0, 4, 10, 1], 0.16)] + self._assertExpectedCaptions(expected, beam_size=1) + + # Beam size = 2. + expected = [([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15)] + self._assertExpectedCaptions(expected, beam_size=2) + + # Beam size = 3. + expected = [ + ([0, 2, 6, 1], 0.18), ([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15) + ] + self._assertExpectedCaptions(expected, beam_size=3) + + def testMaxLength(self): + # Max length = 1. + expected = [([0], 1.0)] + self._assertExpectedCaptions(expected, max_caption_length=1) + + # Max length = 2. + # There are no complete sentences, so partial sentences are returned. + expected = [([0, 4], 0.4), ([0, 3], 0.3), ([0, 2], 0.2)] + self._assertExpectedCaptions(expected, max_caption_length=2) + + # Max length = 3. + # There is at least one complete sentence, so only complete sentences are + # returned. + expected = [([0, 4, 1], 0.12), ([0, 3, 1], 0.03)] + self._assertExpectedCaptions(expected, max_caption_length=3) + + # Max length = 4. + expected = [ + ([0, 2, 6, 1], 0.18), ([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15) + ] + self._assertExpectedCaptions(expected, max_caption_length=4) + + def testLengthNormalization(self): + # Length normalization factor = 3. + # The longest caption is returned first, despite having low probability, + # because it has the highest log(probability)/length**3. + expected = [ + ([0, 4, 9, 11, 1], 0.06), + ([0, 2, 6, 1], 0.18), + ([0, 4, 10, 1], 0.16), + ([0, 3, 8, 1], 0.15), + ] + self._assertExpectedCaptions( + expected, beam_size=4, length_normalization_factor=3) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/im2txt/im2txt/inference_utils/inference_wrapper_base.py b/models/research/im2txt/im2txt/inference_utils/inference_wrapper_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e94cd6af474488e4b8175fc959e1dbe33cca18c9 --- /dev/null +++ b/models/research/im2txt/im2txt/inference_utils/inference_wrapper_base.py @@ -0,0 +1,181 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base wrapper class for performing inference with an image-to-text model. + +Subclasses must implement the following methods: + + build_model(): + Builds the model for inference and returns the model object. + + feed_image(): + Takes an encoded image and returns the initial model state, where "state" + is a numpy array whose specifics are defined by the subclass, e.g. + concatenated LSTM state. It's assumed that feed_image() will be called + precisely once at the start of inference for each image. Subclasses may + compute and/or save per-image internal context in this method. + + inference_step(): + Takes a batch of inputs and states at a single time-step. Returns the + softmax output corresponding to the inputs, and the new states of the batch. + Optionally also returns metadata about the current inference step, e.g. a + serialized numpy array containing activations from a particular model layer. + +Client usage: + 1. Build the model inference graph via build_graph_from_config() or + build_graph_from_proto(). + 2. Call the resulting restore_fn to load the model checkpoint. + 3. For each image in a batch of images: + a) Call feed_image() once to get the initial state. + b) For each step of caption generation, call inference_step(). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path + + +import tensorflow as tf + +# pylint: disable=unused-argument + + +class InferenceWrapperBase(object): + """Base wrapper class for performing inference with an image-to-text model.""" + + def __init__(self): + pass + + def build_model(self, model_config): + """Builds the model for inference. + + Args: + model_config: Object containing configuration for building the model. + + Returns: + model: The model object. + """ + tf.logging.fatal("Please implement build_model in subclass") + + def _create_restore_fn(self, checkpoint_path, saver): + """Creates a function that restores a model from checkpoint. + + Args: + checkpoint_path: Checkpoint file or a directory containing a checkpoint + file. + saver: Saver for restoring variables from the checkpoint file. + + Returns: + restore_fn: A function such that restore_fn(sess) loads model variables + from the checkpoint file. + + Raises: + ValueError: If checkpoint_path does not refer to a checkpoint file or a + directory containing a checkpoint file. + """ + if tf.gfile.IsDirectory(checkpoint_path): + checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) + if not checkpoint_path: + raise ValueError("No checkpoint file found in: %s" % checkpoint_path) + + def _restore_fn(sess): + tf.logging.info("Loading model from checkpoint: %s", checkpoint_path) + saver.restore(sess, checkpoint_path) + tf.logging.info("Successfully loaded checkpoint: %s", + os.path.basename(checkpoint_path)) + + return _restore_fn + + def build_graph_from_config(self, model_config, checkpoint_path): + """Builds the inference graph from a configuration object. + + Args: + model_config: Object containing configuration for building the model. + checkpoint_path: Checkpoint file or a directory containing a checkpoint + file. + + Returns: + restore_fn: A function such that restore_fn(sess) loads model variables + from the checkpoint file. + """ + tf.logging.info("Building model.") + self.build_model(model_config) + saver = tf.train.Saver() + + return self._create_restore_fn(checkpoint_path, saver) + + def build_graph_from_proto(self, graph_def_file, saver_def_file, + checkpoint_path): + """Builds the inference graph from serialized GraphDef and SaverDef protos. + + Args: + graph_def_file: File containing a serialized GraphDef proto. + saver_def_file: File containing a serialized SaverDef proto. + checkpoint_path: Checkpoint file or a directory containing a checkpoint + file. + + Returns: + restore_fn: A function such that restore_fn(sess) loads model variables + from the checkpoint file. + """ + # Load the Graph. + tf.logging.info("Loading GraphDef from file: %s", graph_def_file) + graph_def = tf.GraphDef() + with tf.gfile.FastGFile(graph_def_file, "rb") as f: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name="") + + # Load the Saver. + tf.logging.info("Loading SaverDef from file: %s", saver_def_file) + saver_def = tf.train.SaverDef() + with tf.gfile.FastGFile(saver_def_file, "rb") as f: + saver_def.ParseFromString(f.read()) + saver = tf.train.Saver(saver_def=saver_def) + + return self._create_restore_fn(checkpoint_path, saver) + + def feed_image(self, sess, encoded_image): + """Feeds an image and returns the initial model state. + + See comments at the top of file. + + Args: + sess: TensorFlow Session object. + encoded_image: An encoded image string. + + Returns: + state: A numpy array of shape [1, state_size]. + """ + tf.logging.fatal("Please implement feed_image in subclass") + + def inference_step(self, sess, input_feed, state_feed): + """Runs one step of inference. + + Args: + sess: TensorFlow Session object. + input_feed: A numpy array of shape [batch_size]. + state_feed: A numpy array of shape [batch_size, state_size]. + + Returns: + softmax_output: A numpy array of shape [batch_size, vocab_size]. + new_state: A numpy array of shape [batch_size, state_size]. + metadata: Optional. If not None, a string containing metadata about the + current inference step (e.g. serialized numpy array containing + activations from a particular model layer.). + """ + tf.logging.fatal("Please implement inference_step in subclass") + +# pylint: enable=unused-argument diff --git a/models/research/im2txt/im2txt/inference_utils/vocabulary.py b/models/research/im2txt/im2txt/inference_utils/vocabulary.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf0ada9c2242cb32c2ea9a300d16411f5e83fab --- /dev/null +++ b/models/research/im2txt/im2txt/inference_utils/vocabulary.py @@ -0,0 +1,78 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Vocabulary class for an image-to-text model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + + +class Vocabulary(object): + """Vocabulary class for an image-to-text model.""" + + def __init__(self, + vocab_file, + start_word="", + end_word="", + unk_word=""): + """Initializes the vocabulary. + + Args: + vocab_file: File containing the vocabulary, where the words are the first + whitespace-separated token on each line (other tokens are ignored) and + the word ids are the corresponding line numbers. + start_word: Special word denoting sentence start. + end_word: Special word denoting sentence end. + unk_word: Special word denoting unknown words. + """ + if not tf.gfile.Exists(vocab_file): + tf.logging.fatal("Vocab file %s not found.", vocab_file) + tf.logging.info("Initializing vocabulary from file: %s", vocab_file) + + with tf.gfile.GFile(vocab_file, mode="r") as f: + reverse_vocab = list(f.readlines()) + reverse_vocab = [line.split()[0] for line in reverse_vocab] + assert start_word in reverse_vocab + assert end_word in reverse_vocab + if unk_word not in reverse_vocab: + reverse_vocab.append(unk_word) + vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) + + tf.logging.info("Created vocabulary with %d words" % len(vocab)) + + self.vocab = vocab # vocab[word] = id + self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word + + # Save special word ids. + self.start_id = vocab[start_word] + self.end_id = vocab[end_word] + self.unk_id = vocab[unk_word] + + def word_to_id(self, word): + """Returns the integer word id of a word string.""" + if word in self.vocab: + return self.vocab[word] + else: + return self.unk_id + + def id_to_word(self, word_id): + """Returns the word string of an integer word id.""" + if word_id >= len(self.reverse_vocab): + return self.reverse_vocab[self.unk_id] + else: + return self.reverse_vocab[word_id] diff --git a/models/research/im2txt/im2txt/inference_wrapper.py b/models/research/im2txt/im2txt/inference_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..a047a9c8d084fd9e69c937915cea8553c2d51817 --- /dev/null +++ b/models/research/im2txt/im2txt/inference_wrapper.py @@ -0,0 +1,51 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Model wrapper class for performing inference with a ShowAndTellModel.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from im2txt import show_and_tell_model +from im2txt.inference_utils import inference_wrapper_base + + +class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase): + """Model wrapper class for performing inference with a ShowAndTellModel.""" + + def __init__(self): + super(InferenceWrapper, self).__init__() + + def build_model(self, model_config): + model = show_and_tell_model.ShowAndTellModel(model_config, mode="inference") + model.build() + return model + + def feed_image(self, sess, encoded_image): + initial_state = sess.run(fetches="lstm/initial_state:0", + feed_dict={"image_feed:0": encoded_image}) + return initial_state + + def inference_step(self, sess, input_feed, state_feed): + softmax_output, state_output = sess.run( + fetches=["softmax:0", "lstm/state:0"], + feed_dict={ + "input_feed:0": input_feed, + "lstm/state_feed:0": state_feed, + }) + return softmax_output, state_output, None diff --git a/models/research/im2txt/im2txt/ops/BUILD b/models/research/im2txt/im2txt/ops/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..7d48bf3938c7ecfc94ac6498386e7ce214b8be92 --- /dev/null +++ b/models/research/im2txt/im2txt/ops/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//im2txt:internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +py_library( + name = "image_processing", + srcs = ["image_processing.py"], + srcs_version = "PY2AND3", +) + +py_library( + name = "image_embedding", + srcs = ["image_embedding.py"], + srcs_version = "PY2AND3", +) + +py_test( + name = "image_embedding_test", + size = "small", + srcs = ["image_embedding_test.py"], + deps = [ + ":image_embedding", + ], +) + +py_library( + name = "inputs", + srcs = ["inputs.py"], + srcs_version = "PY2AND3", +) diff --git a/models/research/im2txt/im2txt/ops/image_embedding.py b/models/research/im2txt/im2txt/ops/image_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..58e3ddaa95fa799f245fe2a46f2e948be7d9ebf2 --- /dev/null +++ b/models/research/im2txt/im2txt/ops/image_embedding.py @@ -0,0 +1,114 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image embedding ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base + +slim = tf.contrib.slim + + +def inception_v3(images, + trainable=True, + is_training=True, + weight_decay=0.00004, + stddev=0.1, + dropout_keep_prob=0.8, + use_batch_norm=True, + batch_norm_params=None, + add_summaries=True, + scope="InceptionV3"): + """Builds an Inception V3 subgraph for image embeddings. + + Args: + images: A float32 Tensor of shape [batch, height, width, channels]. + trainable: Whether the inception submodel should be trainable or not. + is_training: Boolean indicating training mode or not. + weight_decay: Coefficient for weight regularization. + stddev: The standard deviation of the trunctated normal weight initializer. + dropout_keep_prob: Dropout keep probability. + use_batch_norm: Whether to use batch normalization. + batch_norm_params: Parameters for batch normalization. See + tf.contrib.layers.batch_norm for details. + add_summaries: Whether to add activation summaries. + scope: Optional Variable scope. + + Returns: + end_points: A dictionary of activations from inception_v3 layers. + """ + # Only consider the inception model to be in training mode if it's trainable. + is_inception_model_training = trainable and is_training + + if use_batch_norm: + # Default parameters for batch normalization. + if not batch_norm_params: + batch_norm_params = { + "is_training": is_inception_model_training, + "trainable": trainable, + # Decay for the moving averages. + "decay": 0.9997, + # Epsilon to prevent 0s in variance. + "epsilon": 0.001, + # Collection containing the moving mean and moving variance. + "variables_collections": { + "beta": None, + "gamma": None, + "moving_mean": ["moving_vars"], + "moving_variance": ["moving_vars"], + } + } + else: + batch_norm_params = None + + if trainable: + weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay) + else: + weights_regularizer = None + + with tf.variable_scope(scope, "InceptionV3", [images]) as scope: + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + weights_regularizer=weights_regularizer, + trainable=trainable): + with slim.arg_scope( + [slim.conv2d], + weights_initializer=tf.truncated_normal_initializer(stddev=stddev), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params): + net, end_points = inception_v3_base(images, scope=scope) + with tf.variable_scope("logits"): + shape = net.get_shape() + net = slim.avg_pool2d(net, shape[1:3], padding="VALID", scope="pool") + net = slim.dropout( + net, + keep_prob=dropout_keep_prob, + is_training=is_inception_model_training, + scope="dropout") + net = slim.flatten(net, scope="flatten") + + # Add summaries. + if add_summaries: + for v in end_points.values(): + tf.contrib.layers.summaries.summarize_activation(v) + + return net diff --git a/models/research/im2txt/im2txt/ops/image_embedding_test.py b/models/research/im2txt/im2txt/ops/image_embedding_test.py new file mode 100644 index 0000000000000000000000000000000000000000..66324d68eee0ec9c450375c25229d80283fc909f --- /dev/null +++ b/models/research/im2txt/im2txt/ops/image_embedding_test.py @@ -0,0 +1,136 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for tensorflow_models.im2txt.ops.image_embedding.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from im2txt.ops import image_embedding + + +class InceptionV3Test(tf.test.TestCase): + + def setUp(self): + super(InceptionV3Test, self).setUp() + + batch_size = 4 + height = 299 + width = 299 + num_channels = 3 + self._images = tf.placeholder(tf.float32, + [batch_size, height, width, num_channels]) + self._batch_size = batch_size + + def _countInceptionParameters(self): + """Counts the number of parameters in the inception model at top scope.""" + counter = {} + for v in tf.global_variables(): + name_tokens = v.op.name.split("/") + if name_tokens[0] == "InceptionV3": + name = "InceptionV3/" + name_tokens[1] + num_params = v.get_shape().num_elements() + assert num_params + counter[name] = counter.get(name, 0) + num_params + return counter + + def _verifyParameterCounts(self): + """Verifies the number of parameters in the inception model.""" + param_counts = self._countInceptionParameters() + expected_param_counts = { + "InceptionV3/Conv2d_1a_3x3": 960, + "InceptionV3/Conv2d_2a_3x3": 9312, + "InceptionV3/Conv2d_2b_3x3": 18624, + "InceptionV3/Conv2d_3b_1x1": 5360, + "InceptionV3/Conv2d_4a_3x3": 138816, + "InceptionV3/Mixed_5b": 256368, + "InceptionV3/Mixed_5c": 277968, + "InceptionV3/Mixed_5d": 285648, + "InceptionV3/Mixed_6a": 1153920, + "InceptionV3/Mixed_6b": 1298944, + "InceptionV3/Mixed_6c": 1692736, + "InceptionV3/Mixed_6d": 1692736, + "InceptionV3/Mixed_6e": 2143872, + "InceptionV3/Mixed_7a": 1699584, + "InceptionV3/Mixed_7b": 5047872, + "InceptionV3/Mixed_7c": 6080064, + } + self.assertDictEqual(expected_param_counts, param_counts) + + def _assertCollectionSize(self, expected_size, collection): + actual_size = len(tf.get_collection(collection)) + if expected_size != actual_size: + self.fail("Found %d items in collection %s (expected %d)." % + (actual_size, collection, expected_size)) + + def testTrainableTrueIsTrainingTrue(self): + embeddings = image_embedding.inception_v3( + self._images, trainable=True, is_training=True) + self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) + + self._verifyParameterCounts() + self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) + self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES) + self._assertCollectionSize(188, tf.GraphKeys.UPDATE_OPS) + self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES) + self._assertCollectionSize(0, tf.GraphKeys.LOSSES) + self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) + + def testTrainableTrueIsTrainingFalse(self): + embeddings = image_embedding.inception_v3( + self._images, trainable=True, is_training=False) + self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) + + self._verifyParameterCounts() + self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) + self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES) + self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) + self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES) + self._assertCollectionSize(0, tf.GraphKeys.LOSSES) + self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) + + def testTrainableFalseIsTrainingTrue(self): + embeddings = image_embedding.inception_v3( + self._images, trainable=False, is_training=True) + self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) + + self._verifyParameterCounts() + self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) + self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES) + self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) + self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES) + self._assertCollectionSize(0, tf.GraphKeys.LOSSES) + self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) + + def testTrainableFalseIsTrainingFalse(self): + embeddings = image_embedding.inception_v3( + self._images, trainable=False, is_training=False) + self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) + + self._verifyParameterCounts() + self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) + self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES) + self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) + self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES) + self._assertCollectionSize(0, tf.GraphKeys.LOSSES) + self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/im2txt/im2txt/ops/image_processing.py b/models/research/im2txt/im2txt/ops/image_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..6a7545547d5507febaabebf642ee81b6f94319f6 --- /dev/null +++ b/models/research/im2txt/im2txt/ops/image_processing.py @@ -0,0 +1,133 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Helper functions for image preprocessing.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + + +def distort_image(image, thread_id): + """Perform random distortions on an image. + + Args: + image: A float32 Tensor of shape [height, width, 3] with values in [0, 1). + thread_id: Preprocessing thread id used to select the ordering of color + distortions. There should be a multiple of 2 preprocessing threads. + + Returns: + distorted_image: A float32 Tensor of shape [height, width, 3] with values in + [0, 1]. + """ + # Randomly flip horizontally. + with tf.name_scope("flip_horizontal", values=[image]): + image = tf.image.random_flip_left_right(image) + + # Randomly distort the colors based on thread id. + color_ordering = thread_id % 2 + with tf.name_scope("distort_color", values=[image]): + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.032) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.032) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + + return image + + +def process_image(encoded_image, + is_training, + height, + width, + resize_height=346, + resize_width=346, + thread_id=0, + image_format="jpeg"): + """Decode an image, resize and apply random distortions. + + In training, images are distorted slightly differently depending on thread_id. + + Args: + encoded_image: String Tensor containing the image. + is_training: Boolean; whether preprocessing for training or eval. + height: Height of the output image. + width: Width of the output image. + resize_height: If > 0, resize height before crop to final dimensions. + resize_width: If > 0, resize width before crop to final dimensions. + thread_id: Preprocessing thread id used to select the ordering of color + distortions. There should be a multiple of 2 preprocessing threads. + image_format: "jpeg" or "png". + + Returns: + A float32 Tensor of shape [height, width, 3] with values in [-1, 1]. + + Raises: + ValueError: If image_format is invalid. + """ + # Helper function to log an image summary to the visualizer. Summaries are + # only logged in thread 0. + def image_summary(name, image): + if not thread_id: + tf.summary.image(name, tf.expand_dims(image, 0)) + + # Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1). + with tf.name_scope("decode", values=[encoded_image]): + if image_format == "jpeg": + image = tf.image.decode_jpeg(encoded_image, channels=3) + elif image_format == "png": + image = tf.image.decode_png(encoded_image, channels=3) + else: + raise ValueError("Invalid image format: %s" % image_format) + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + image_summary("original_image", image) + + # Resize image. + assert (resize_height > 0) == (resize_width > 0) + if resize_height: + image = tf.image.resize_images(image, + size=[resize_height, resize_width], + method=tf.image.ResizeMethod.BILINEAR) + + # Crop to final dimensions. + if is_training: + image = tf.random_crop(image, [height, width, 3]) + else: + # Central crop, assuming resize_height > height, resize_width > width. + image = tf.image.resize_image_with_crop_or_pad(image, height, width) + + image_summary("resized_image", image) + + # Randomly distort the image. + if is_training: + image = distort_image(image, thread_id) + + image_summary("final_image", image) + + # Rescale to [-1,1] instead of [0, 1] + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image diff --git a/models/research/im2txt/im2txt/ops/inputs.py b/models/research/im2txt/im2txt/ops/inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc90c0ce5dfd5c30fe0e0e543999bb15cc13a8c --- /dev/null +++ b/models/research/im2txt/im2txt/ops/inputs.py @@ -0,0 +1,204 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Input ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + + +def parse_sequence_example(serialized, image_feature, caption_feature): + """Parses a tensorflow.SequenceExample into an image and caption. + + Args: + serialized: A scalar string Tensor; a single serialized SequenceExample. + image_feature: Name of SequenceExample context feature containing image + data. + caption_feature: Name of SequenceExample feature list containing integer + captions. + + Returns: + encoded_image: A scalar string Tensor containing a JPEG encoded image. + caption: A 1-D uint64 Tensor with dynamically specified length. + """ + context, sequence = tf.parse_single_sequence_example( + serialized, + context_features={ + image_feature: tf.FixedLenFeature([], dtype=tf.string) + }, + sequence_features={ + caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64), + }) + + encoded_image = context[image_feature] + caption = sequence[caption_feature] + return encoded_image, caption + + +def prefetch_input_data(reader, + file_pattern, + is_training, + batch_size, + values_per_shard, + input_queue_capacity_factor=16, + num_reader_threads=1, + shard_queue_name="filename_queue", + value_queue_name="input_queue"): + """Prefetches string values from disk into an input queue. + + In training the capacity of the queue is important because a larger queue + means better mixing of training examples between shards. The minimum number of + values kept in the queue is values_per_shard * input_queue_capacity_factor, + where input_queue_memory factor should be chosen to trade-off better mixing + with memory usage. + + Args: + reader: Instance of tf.ReaderBase. + file_pattern: Comma-separated list of file patterns (e.g. + /tmp/train_data-?????-of-00100). + is_training: Boolean; whether prefetching for training or eval. + batch_size: Model batch size used to determine queue capacity. + values_per_shard: Approximate number of values per shard. + input_queue_capacity_factor: Minimum number of values to keep in the queue + in multiples of values_per_shard. See comments above. + num_reader_threads: Number of reader threads to fill the queue. + shard_queue_name: Name for the shards filename queue. + value_queue_name: Name for the values input queue. + + Returns: + A Queue containing prefetched string values. + """ + data_files = [] + for pattern in file_pattern.split(","): + data_files.extend(tf.gfile.Glob(pattern)) + if not data_files: + tf.logging.fatal("Found no input files matching %s", file_pattern) + else: + tf.logging.info("Prefetching values from %d files matching %s", + len(data_files), file_pattern) + + if is_training: + filename_queue = tf.train.string_input_producer( + data_files, shuffle=True, capacity=16, name=shard_queue_name) + min_queue_examples = values_per_shard * input_queue_capacity_factor + capacity = min_queue_examples + 100 * batch_size + values_queue = tf.RandomShuffleQueue( + capacity=capacity, + min_after_dequeue=min_queue_examples, + dtypes=[tf.string], + name="random_" + value_queue_name) + else: + filename_queue = tf.train.string_input_producer( + data_files, shuffle=False, capacity=1, name=shard_queue_name) + capacity = values_per_shard + 3 * batch_size + values_queue = tf.FIFOQueue( + capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name) + + enqueue_ops = [] + for _ in range(num_reader_threads): + _, value = reader.read(filename_queue) + enqueue_ops.append(values_queue.enqueue([value])) + tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( + values_queue, enqueue_ops)) + tf.summary.scalar( + "queue/%s/fraction_of_%d_full" % (values_queue.name, capacity), + tf.cast(values_queue.size(), tf.float32) * (1. / capacity)) + + return values_queue + + +def batch_with_dynamic_pad(images_and_captions, + batch_size, + queue_capacity, + add_summaries=True): + """Batches input images and captions. + + This function splits the caption into an input sequence and a target sequence, + where the target sequence is the input sequence right-shifted by 1. Input and + target sequences are batched and padded up to the maximum length of sequences + in the batch. A mask is created to distinguish real words from padding words. + + Example: + Actual captions in the batch ('-' denotes padded character): + [ + [ 1 2 3 4 5 ], + [ 1 2 3 4 - ], + [ 1 2 3 - - ], + ] + + input_seqs: + [ + [ 1 2 3 4 ], + [ 1 2 3 - ], + [ 1 2 - - ], + ] + + target_seqs: + [ + [ 2 3 4 5 ], + [ 2 3 4 - ], + [ 2 3 - - ], + ] + + mask: + [ + [ 1 1 1 1 ], + [ 1 1 1 0 ], + [ 1 1 0 0 ], + ] + + Args: + images_and_captions: A list of pairs [image, caption], where image is a + Tensor of shape [height, width, channels] and caption is a 1-D Tensor of + any length. Each pair will be processed and added to the queue in a + separate thread. + batch_size: Batch size. + queue_capacity: Queue capacity. + add_summaries: If true, add caption length summaries. + + Returns: + images: A Tensor of shape [batch_size, height, width, channels]. + input_seqs: An int32 Tensor of shape [batch_size, padded_length]. + target_seqs: An int32 Tensor of shape [batch_size, padded_length]. + mask: An int32 0/1 Tensor of shape [batch_size, padded_length]. + """ + enqueue_list = [] + for image, caption in images_and_captions: + caption_length = tf.shape(caption)[0] + input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0) + + input_seq = tf.slice(caption, [0], input_length) + target_seq = tf.slice(caption, [1], input_length) + indicator = tf.ones(input_length, dtype=tf.int32) + enqueue_list.append([image, input_seq, target_seq, indicator]) + + images, input_seqs, target_seqs, mask = tf.train.batch_join( + enqueue_list, + batch_size=batch_size, + capacity=queue_capacity, + dynamic_pad=True, + name="batch_and_pad") + + if add_summaries: + lengths = tf.add(tf.reduce_sum(mask, 1), 1) + tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths)) + tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths)) + tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths)) + + return images, input_seqs, target_seqs, mask diff --git a/models/research/im2txt/im2txt/run_inference.py b/models/research/im2txt/im2txt/run_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..9848522df162e52394ee8349dab1f5220aeb88f6 --- /dev/null +++ b/models/research/im2txt/im2txt/run_inference.py @@ -0,0 +1,85 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Generate captions for images using default beam search parameters.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os + + +import tensorflow as tf + +from im2txt import configuration +from im2txt import inference_wrapper +from im2txt.inference_utils import caption_generator +from im2txt.inference_utils import vocabulary + +FLAGS = tf.flags.FLAGS + +tf.flags.DEFINE_string("checkpoint_path", "", + "Model checkpoint file or directory containing a " + "model checkpoint file.") +tf.flags.DEFINE_string("vocab_file", "", "Text file containing the vocabulary.") +tf.flags.DEFINE_string("input_files", "", + "File pattern or comma-separated list of file patterns " + "of image files.") + +tf.logging.set_verbosity(tf.logging.INFO) + + +def main(_): + # Build the inference graph. + g = tf.Graph() + with g.as_default(): + model = inference_wrapper.InferenceWrapper() + restore_fn = model.build_graph_from_config(configuration.ModelConfig(), + FLAGS.checkpoint_path) + g.finalize() + + # Create the vocabulary. + vocab = vocabulary.Vocabulary(FLAGS.vocab_file) + + filenames = [] + for file_pattern in FLAGS.input_files.split(","): + filenames.extend(tf.gfile.Glob(file_pattern)) + tf.logging.info("Running caption generation on %d files matching %s", + len(filenames), FLAGS.input_files) + + with tf.Session(graph=g) as sess: + # Load the model from checkpoint. + restore_fn(sess) + + # Prepare the caption generator. Here we are implicitly using the default + # beam search parameters. See caption_generator.py for a description of the + # available beam search parameters. + generator = caption_generator.CaptionGenerator(model, vocab) + + for filename in filenames: + with tf.gfile.GFile(filename, "rb") as f: + image = f.read() + captions = generator.beam_search(sess, image) + print("Captions for image %s:" % os.path.basename(filename)) + for i, caption in enumerate(captions): + # Ignore begin and end words. + sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]] + sentence = " ".join(sentence) + print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/im2txt/im2txt/show_and_tell_model.py b/models/research/im2txt/im2txt/show_and_tell_model.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac29e7fdb80fbefe3594eabc972648a3fb32312 --- /dev/null +++ b/models/research/im2txt/im2txt/show_and_tell_model.py @@ -0,0 +1,358 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555. + +"Show and Tell: A Neural Image Caption Generator" +Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from im2txt.ops import image_embedding +from im2txt.ops import image_processing +from im2txt.ops import inputs as input_ops + + +class ShowAndTellModel(object): + """Image-to-text implementation based on http://arxiv.org/abs/1411.4555. + + "Show and Tell: A Neural Image Caption Generator" + Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan + """ + + def __init__(self, config, mode, train_inception=False): + """Basic setup. + + Args: + config: Object containing configuration parameters. + mode: "train", "eval" or "inference". + train_inception: Whether the inception submodel variables are trainable. + """ + assert mode in ["train", "eval", "inference"] + self.config = config + self.mode = mode + self.train_inception = train_inception + + # Reader for the input data. + self.reader = tf.TFRecordReader() + + # To match the "Show and Tell" paper we initialize all variables with a + # random uniform initializer. + self.initializer = tf.random_uniform_initializer( + minval=-self.config.initializer_scale, + maxval=self.config.initializer_scale) + + # A float32 Tensor with shape [batch_size, height, width, channels]. + self.images = None + + # An int32 Tensor with shape [batch_size, padded_length]. + self.input_seqs = None + + # An int32 Tensor with shape [batch_size, padded_length]. + self.target_seqs = None + + # An int32 0/1 Tensor with shape [batch_size, padded_length]. + self.input_mask = None + + # A float32 Tensor with shape [batch_size, embedding_size]. + self.image_embeddings = None + + # A float32 Tensor with shape [batch_size, padded_length, embedding_size]. + self.seq_embeddings = None + + # A float32 scalar Tensor; the total loss for the trainer to optimize. + self.total_loss = None + + # A float32 Tensor with shape [batch_size * padded_length]. + self.target_cross_entropy_losses = None + + # A float32 Tensor with shape [batch_size * padded_length]. + self.target_cross_entropy_loss_weights = None + + # Collection of variables from the inception submodel. + self.inception_variables = [] + + # Function to restore the inception submodel from checkpoint. + self.init_fn = None + + # Global step Tensor. + self.global_step = None + + def is_training(self): + """Returns true if the model is built for training mode.""" + return self.mode == "train" + + def process_image(self, encoded_image, thread_id=0): + """Decodes and processes an image string. + + Args: + encoded_image: A scalar string Tensor; the encoded image. + thread_id: Preprocessing thread id used to select the ordering of color + distortions. + + Returns: + A float32 Tensor of shape [height, width, 3]; the processed image. + """ + return image_processing.process_image(encoded_image, + is_training=self.is_training(), + height=self.config.image_height, + width=self.config.image_width, + thread_id=thread_id, + image_format=self.config.image_format) + + def build_inputs(self): + """Input prefetching, preprocessing and batching. + + Outputs: + self.images + self.input_seqs + self.target_seqs (training and eval only) + self.input_mask (training and eval only) + """ + if self.mode == "inference": + # In inference mode, images and inputs are fed via placeholders. + image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed") + input_feed = tf.placeholder(dtype=tf.int64, + shape=[None], # batch_size + name="input_feed") + + # Process image and insert batch dimensions. + images = tf.expand_dims(self.process_image(image_feed), 0) + input_seqs = tf.expand_dims(input_feed, 1) + + # No target sequences or input mask in inference mode. + target_seqs = None + input_mask = None + else: + # Prefetch serialized SequenceExample protos. + input_queue = input_ops.prefetch_input_data( + self.reader, + self.config.input_file_pattern, + is_training=self.is_training(), + batch_size=self.config.batch_size, + values_per_shard=self.config.values_per_input_shard, + input_queue_capacity_factor=self.config.input_queue_capacity_factor, + num_reader_threads=self.config.num_input_reader_threads) + + # Image processing and random distortion. Split across multiple threads + # with each thread applying a slightly different distortion. + assert self.config.num_preprocess_threads % 2 == 0 + images_and_captions = [] + for thread_id in range(self.config.num_preprocess_threads): + serialized_sequence_example = input_queue.dequeue() + encoded_image, caption = input_ops.parse_sequence_example( + serialized_sequence_example, + image_feature=self.config.image_feature_name, + caption_feature=self.config.caption_feature_name) + image = self.process_image(encoded_image, thread_id=thread_id) + images_and_captions.append([image, caption]) + + # Batch inputs. + queue_capacity = (2 * self.config.num_preprocess_threads * + self.config.batch_size) + images, input_seqs, target_seqs, input_mask = ( + input_ops.batch_with_dynamic_pad(images_and_captions, + batch_size=self.config.batch_size, + queue_capacity=queue_capacity)) + + self.images = images + self.input_seqs = input_seqs + self.target_seqs = target_seqs + self.input_mask = input_mask + + def build_image_embeddings(self): + """Builds the image model subgraph and generates image embeddings. + + Inputs: + self.images + + Outputs: + self.image_embeddings + """ + inception_output = image_embedding.inception_v3( + self.images, + trainable=self.train_inception, + is_training=self.is_training()) + self.inception_variables = tf.get_collection( + tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3") + + # Map inception output into embedding space. + with tf.variable_scope("image_embedding") as scope: + image_embeddings = tf.contrib.layers.fully_connected( + inputs=inception_output, + num_outputs=self.config.embedding_size, + activation_fn=None, + weights_initializer=self.initializer, + biases_initializer=None, + scope=scope) + + # Save the embedding size in the graph. + tf.constant(self.config.embedding_size, name="embedding_size") + + self.image_embeddings = image_embeddings + + def build_seq_embeddings(self): + """Builds the input sequence embeddings. + + Inputs: + self.input_seqs + + Outputs: + self.seq_embeddings + """ + with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"): + embedding_map = tf.get_variable( + name="map", + shape=[self.config.vocab_size, self.config.embedding_size], + initializer=self.initializer) + seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) + + self.seq_embeddings = seq_embeddings + + def build_model(self): + """Builds the model. + + Inputs: + self.image_embeddings + self.seq_embeddings + self.target_seqs (training and eval only) + self.input_mask (training and eval only) + + Outputs: + self.total_loss (training and eval only) + self.target_cross_entropy_losses (training and eval only) + self.target_cross_entropy_loss_weights (training and eval only) + """ + # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the + # modified LSTM in the "Show and Tell" paper has no biases and outputs + # new_c * sigmoid(o). + lstm_cell = tf.contrib.rnn.BasicLSTMCell( + num_units=self.config.num_lstm_units, state_is_tuple=True) + if self.mode == "train": + lstm_cell = tf.contrib.rnn.DropoutWrapper( + lstm_cell, + input_keep_prob=self.config.lstm_dropout_keep_prob, + output_keep_prob=self.config.lstm_dropout_keep_prob) + + with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope: + # Feed the image embeddings to set the initial LSTM state. + zero_state = lstm_cell.zero_state( + batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32) + _, initial_state = lstm_cell(self.image_embeddings, zero_state) + + # Allow the LSTM variables to be reused. + lstm_scope.reuse_variables() + + if self.mode == "inference": + # In inference mode, use concatenated states for convenient feeding and + # fetching. + tf.concat(axis=1, values=initial_state, name="initial_state") + + # Placeholder for feeding a batch of concatenated states. + state_feed = tf.placeholder(dtype=tf.float32, + shape=[None, sum(lstm_cell.state_size)], + name="state_feed") + state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1) + + # Run a single LSTM step. + lstm_outputs, state_tuple = lstm_cell( + inputs=tf.squeeze(self.seq_embeddings, axis=[1]), + state=state_tuple) + + # Concatentate the resulting state. + tf.concat(axis=1, values=state_tuple, name="state") + else: + # Run the batch of sequence embeddings through the LSTM. + sequence_length = tf.reduce_sum(self.input_mask, 1) + lstm_outputs, _ = tf.nn.dynamic_rnn(cell=lstm_cell, + inputs=self.seq_embeddings, + sequence_length=sequence_length, + initial_state=initial_state, + dtype=tf.float32, + scope=lstm_scope) + + # Stack batches vertically. + lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size]) + + with tf.variable_scope("logits") as logits_scope: + logits = tf.contrib.layers.fully_connected( + inputs=lstm_outputs, + num_outputs=self.config.vocab_size, + activation_fn=None, + weights_initializer=self.initializer, + scope=logits_scope) + + if self.mode == "inference": + tf.nn.softmax(logits, name="softmax") + else: + targets = tf.reshape(self.target_seqs, [-1]) + weights = tf.to_float(tf.reshape(self.input_mask, [-1])) + + # Compute losses. + losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, + logits=logits) + batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)), + tf.reduce_sum(weights), + name="batch_loss") + tf.losses.add_loss(batch_loss) + total_loss = tf.losses.get_total_loss() + + # Add summaries. + tf.summary.scalar("losses/batch_loss", batch_loss) + tf.summary.scalar("losses/total_loss", total_loss) + for var in tf.trainable_variables(): + tf.summary.histogram("parameters/" + var.op.name, var) + + self.total_loss = total_loss + self.target_cross_entropy_losses = losses # Used in evaluation. + self.target_cross_entropy_loss_weights = weights # Used in evaluation. + + def setup_inception_initializer(self): + """Sets up the function to restore inception variables from checkpoint.""" + if self.mode != "inference": + # Restore inception variables only. + saver = tf.train.Saver(self.inception_variables) + + def restore_fn(sess): + tf.logging.info("Restoring Inception variables from checkpoint file %s", + self.config.inception_checkpoint_file) + saver.restore(sess, self.config.inception_checkpoint_file) + + self.init_fn = restore_fn + + def setup_global_step(self): + """Sets up the global step Tensor.""" + global_step = tf.Variable( + initial_value=0, + name="global_step", + trainable=False, + collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES]) + + self.global_step = global_step + + def build(self): + """Creates all ops for training and evaluation.""" + self.build_inputs() + self.build_image_embeddings() + self.build_seq_embeddings() + self.build_model() + self.setup_inception_initializer() + self.setup_global_step() diff --git a/models/research/im2txt/im2txt/show_and_tell_model_test.py b/models/research/im2txt/im2txt/show_and_tell_model_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0bdfb6e1a3ae3c15bd1c8daf005fe2542436ca8e --- /dev/null +++ b/models/research/im2txt/im2txt/show_and_tell_model_test.py @@ -0,0 +1,200 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for tensorflow_models.im2txt.show_and_tell_model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numpy as np +import tensorflow as tf + +from im2txt import configuration +from im2txt import show_and_tell_model + + +class ShowAndTellModel(show_and_tell_model.ShowAndTellModel): + """Subclass of ShowAndTellModel without the disk I/O.""" + + def build_inputs(self): + if self.mode == "inference": + # Inference mode doesn't read from disk, so defer to parent. + return super(ShowAndTellModel, self).build_inputs() + else: + # Replace disk I/O with random Tensors. + self.images = tf.random_uniform( + shape=[self.config.batch_size, self.config.image_height, + self.config.image_width, 3], + minval=-1, + maxval=1) + self.input_seqs = tf.random_uniform( + [self.config.batch_size, 15], + minval=0, + maxval=self.config.vocab_size, + dtype=tf.int64) + self.target_seqs = tf.random_uniform( + [self.config.batch_size, 15], + minval=0, + maxval=self.config.vocab_size, + dtype=tf.int64) + self.input_mask = tf.ones_like(self.input_seqs) + + +class ShowAndTellModelTest(tf.test.TestCase): + + def setUp(self): + super(ShowAndTellModelTest, self).setUp() + self._model_config = configuration.ModelConfig() + + def _countModelParameters(self): + """Counts the number of parameters in the model at top level scope.""" + counter = {} + for v in tf.global_variables(): + name = v.op.name.split("/")[0] + num_params = v.get_shape().num_elements() + assert num_params + counter[name] = counter.get(name, 0) + num_params + return counter + + def _checkModelParameters(self): + """Verifies the number of parameters in the model.""" + param_counts = self._countModelParameters() + expected_param_counts = { + "InceptionV3": 21802784, + # inception_output_size * embedding_size + "image_embedding": 1048576, + # vocab_size * embedding_size + "seq_embedding": 6144000, + # (embedding_size + num_lstm_units + 1) * 4 * num_lstm_units + "lstm": 2099200, + # (num_lstm_units + 1) * vocab_size + "logits": 6156000, + "global_step": 1, + } + self.assertDictEqual(expected_param_counts, param_counts) + + def _checkOutputs(self, expected_shapes, feed_dict=None): + """Verifies that the model produces expected outputs. + + Args: + expected_shapes: A dict mapping Tensor or Tensor name to expected output + shape. + feed_dict: Values of Tensors to feed into Session.run(). + """ + fetches = expected_shapes.keys() + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + outputs = sess.run(fetches, feed_dict) + + for index, output in enumerate(outputs): + tensor = fetches[index] + expected = expected_shapes[tensor] + actual = output.shape + if expected != actual: + self.fail("Tensor %s has shape %s (expected %s)." % + (tensor, actual, expected)) + + def testBuildForTraining(self): + model = ShowAndTellModel(self._model_config, mode="train") + model.build() + + self._checkModelParameters() + + expected_shapes = { + # [batch_size, image_height, image_width, 3] + model.images: (32, 299, 299, 3), + # [batch_size, sequence_length] + model.input_seqs: (32, 15), + # [batch_size, sequence_length] + model.target_seqs: (32, 15), + # [batch_size, sequence_length] + model.input_mask: (32, 15), + # [batch_size, embedding_size] + model.image_embeddings: (32, 512), + # [batch_size, sequence_length, embedding_size] + model.seq_embeddings: (32, 15, 512), + # Scalar + model.total_loss: (), + # [batch_size * sequence_length] + model.target_cross_entropy_losses: (480,), + # [batch_size * sequence_length] + model.target_cross_entropy_loss_weights: (480,), + } + self._checkOutputs(expected_shapes) + + def testBuildForEval(self): + model = ShowAndTellModel(self._model_config, mode="eval") + model.build() + + self._checkModelParameters() + + expected_shapes = { + # [batch_size, image_height, image_width, 3] + model.images: (32, 299, 299, 3), + # [batch_size, sequence_length] + model.input_seqs: (32, 15), + # [batch_size, sequence_length] + model.target_seqs: (32, 15), + # [batch_size, sequence_length] + model.input_mask: (32, 15), + # [batch_size, embedding_size] + model.image_embeddings: (32, 512), + # [batch_size, sequence_length, embedding_size] + model.seq_embeddings: (32, 15, 512), + # Scalar + model.total_loss: (), + # [batch_size * sequence_length] + model.target_cross_entropy_losses: (480,), + # [batch_size * sequence_length] + model.target_cross_entropy_loss_weights: (480,), + } + self._checkOutputs(expected_shapes) + + def testBuildForInference(self): + model = ShowAndTellModel(self._model_config, mode="inference") + model.build() + + self._checkModelParameters() + + # Test feeding an image to get the initial LSTM state. + images_feed = np.random.rand(1, 299, 299, 3) + feed_dict = {model.images: images_feed} + expected_shapes = { + # [batch_size, embedding_size] + model.image_embeddings: (1, 512), + # [batch_size, 2 * num_lstm_units] + "lstm/initial_state:0": (1, 1024), + } + self._checkOutputs(expected_shapes, feed_dict) + + # Test feeding a batch of inputs and LSTM states to get softmax output and + # LSTM states. + input_feed = np.random.randint(0, 10, size=3) + state_feed = np.random.rand(3, 1024) + feed_dict = {"input_feed:0": input_feed, "lstm/state_feed:0": state_feed} + expected_shapes = { + # [batch_size, 2 * num_lstm_units] + "lstm/state:0": (3, 1024), + # [batch_size, vocab_size] + "softmax:0": (3, 12000), + } + self._checkOutputs(expected_shapes, feed_dict) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/im2txt/im2txt/train.py b/models/research/im2txt/im2txt/train.py new file mode 100644 index 0000000000000000000000000000000000000000..db602735ba11e7f540a4e985333d8a457512c977 --- /dev/null +++ b/models/research/im2txt/im2txt/train.py @@ -0,0 +1,114 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Train the model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from im2txt import configuration +from im2txt import show_and_tell_model + +FLAGS = tf.app.flags.FLAGS + +tf.flags.DEFINE_string("input_file_pattern", "", + "File pattern of sharded TFRecord input files.") +tf.flags.DEFINE_string("inception_checkpoint_file", "", + "Path to a pretrained inception_v3 model.") +tf.flags.DEFINE_string("train_dir", "", + "Directory for saving and loading model checkpoints.") +tf.flags.DEFINE_boolean("train_inception", False, + "Whether to train inception submodel variables.") +tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.") +tf.flags.DEFINE_integer("log_every_n_steps", 1, + "Frequency at which loss and global step are logged.") + +tf.logging.set_verbosity(tf.logging.INFO) + + +def main(unused_argv): + assert FLAGS.input_file_pattern, "--input_file_pattern is required" + assert FLAGS.train_dir, "--train_dir is required" + + model_config = configuration.ModelConfig() + model_config.input_file_pattern = FLAGS.input_file_pattern + model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file + training_config = configuration.TrainingConfig() + + # Create training directory. + train_dir = FLAGS.train_dir + if not tf.gfile.IsDirectory(train_dir): + tf.logging.info("Creating training directory: %s", train_dir) + tf.gfile.MakeDirs(train_dir) + + # Build the TensorFlow graph. + g = tf.Graph() + with g.as_default(): + # Build the model. + model = show_and_tell_model.ShowAndTellModel( + model_config, mode="train", train_inception=FLAGS.train_inception) + model.build() + + # Set up the learning rate. + learning_rate_decay_fn = None + if FLAGS.train_inception: + learning_rate = tf.constant(training_config.train_inception_learning_rate) + else: + learning_rate = tf.constant(training_config.initial_learning_rate) + if training_config.learning_rate_decay_factor > 0: + num_batches_per_epoch = (training_config.num_examples_per_epoch / + model_config.batch_size) + decay_steps = int(num_batches_per_epoch * + training_config.num_epochs_per_decay) + + def _learning_rate_decay_fn(learning_rate, global_step): + return tf.train.exponential_decay( + learning_rate, + global_step, + decay_steps=decay_steps, + decay_rate=training_config.learning_rate_decay_factor, + staircase=True) + + learning_rate_decay_fn = _learning_rate_decay_fn + + # Set up the training ops. + train_op = tf.contrib.layers.optimize_loss( + loss=model.total_loss, + global_step=model.global_step, + learning_rate=learning_rate, + optimizer=training_config.optimizer, + clip_gradients=training_config.clip_gradients, + learning_rate_decay_fn=learning_rate_decay_fn) + + # Set up the Saver for saving and restoring model checkpoints. + saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep) + + # Run training. + tf.contrib.slim.learning.train( + train_op, + train_dir, + log_every_n_steps=FLAGS.log_every_n_steps, + graph=g, + global_step=model.global_step, + number_of_steps=FLAGS.number_of_steps, + init_fn=model.init_fn, + saver=saver) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/inception/.gitignore b/models/research/inception/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..58cbf2f4e0d5d39a0e3910d6993508546dad429f --- /dev/null +++ b/models/research/inception/.gitignore @@ -0,0 +1,7 @@ +/bazel-bin +/bazel-ci_build-cache +/bazel-genfiles +/bazel-out +/bazel-inception +/bazel-testlogs +/bazel-tf diff --git a/models/research/inception/README.md b/models/research/inception/README.md new file mode 100644 index 0000000000000000000000000000000000000000..beed66cf5cd83a6843ec39b28b5dbd88f1c0d3d0 --- /dev/null +++ b/models/research/inception/README.md @@ -0,0 +1,858 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +**NOTE: For the most part, you will find a newer version of this code at [models/research/slim](https://github.com/tensorflow/models/tree/master/research/slim).** In particular: + +* `inception_train.py` and `imagenet_train.py` should no longer be used. The slim editions for running on multiple GPUs are the current best examples. +* `inception_distributed_train.py` and `imagenet_distributed_train.py` are still valid examples of distributed training. + +For performance benchmarking, please see https://www.tensorflow.org/performance/benchmarks. + +--- + +# Inception in TensorFlow + +[ImageNet](http://www.image-net.org/) is a common academic data set in machine +learning for training an image recognition system. Code in this directory +demonstrates how to use TensorFlow to train and evaluate a type of convolutional +neural network (CNN) on this academic data set. In particular, we demonstrate +how to train the Inception v3 architecture as specified in: + +_Rethinking the Inception Architecture for Computer Vision_ + +Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew +Wojna + +http://arxiv.org/abs/1512.00567 + +This network achieves 21.2% top-1 and 5.6% top-5 error for single frame +evaluation with a computational cost of 5 billion multiply-adds per inference +and with using less than 25 million parameters. Below is a visualization of the +model architecture. + +![Inception-v3 Architecture](g3doc/inception_v3_architecture.png) + +## Description of Code + +The code base provides three core binaries for: + +* Training an Inception v3 network from scratch across multiple GPUs and/or + multiple machines using the ImageNet 2012 Challenge training data set. +* Evaluating an Inception v3 network using the ImageNet 2012 Challenge + validation data set. +* Retraining an Inception v3 network on a novel task and back-propagating the + errors to fine tune the network weights. + +The training procedure employs synchronous stochastic gradient descent across +multiple GPUs. The user may specify the number of GPUs they wish to harness. The +synchronous training performs *batch-splitting* by dividing a given batch across +multiple GPUs. + +The training set up is nearly identical to the section [Training a Model Using +Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#launching_and_training_the_model_on_multiple_gpu_cards) +where we have substituted the CIFAR-10 model architecture with Inception v3. The +primary differences with that setup are: + +* Calculate and update the batch-norm statistics during training so that they + may be substituted in during evaluation. +* Specify the model architecture using a (still experimental) higher level + language called TensorFlow-Slim. + +For more details about TensorFlow-Slim, please see the [Slim README](inception/slim/README.md). Please note that this higher-level language is still +*experimental* and the API may change over time depending on usage and +subsequent research. + +## Getting Started + +Before you run the training script for the first time, you will need to download +and convert the ImageNet data to native TFRecord format. The TFRecord format +consists of a set of sharded files where each entry is a serialized `tf.Example` +proto. Each `tf.Example` proto contains the ImageNet image (JPEG encoded) as +well as metadata such as label and bounding box information. See +[`parse_example_proto`](inception/image_processing.py) for details. + +We provide a single [script](inception/data/download_and_preprocess_imagenet.sh) for +downloading and converting ImageNet data to TFRecord format. Downloading and +preprocessing the data may take several hours (up to half a day) depending on +your network and computer speed. Please be patient. + +To begin, you will need to sign up for an account with [ImageNet](http://image-net.org) to gain access to the data. Look for the sign up page, +create an account and request an access key to download the data. + +After you have `USERNAME` and `PASSWORD`, you are ready to run our script. Make +sure that your hard disk has at least 500 GB of free space for downloading and +storing the data. Here we select `DATA_DIR=$HOME/imagenet-data` as such a +location but feel free to edit accordingly. + +When you run the below script, please enter *USERNAME* and *PASSWORD* when +prompted. This will occur at the very beginning. Once these values are entered, +you will not need to interact with the script again. + +```shell +# location of where to place the ImageNet data +DATA_DIR=$HOME/imagenet-data + +# build the preprocessing script. +cd tensorflow-models/inception +bazel build //inception:download_and_preprocess_imagenet + +# run it +bazel-bin/inception/download_and_preprocess_imagenet "${DATA_DIR}" +``` + +The final line of the output script should read: + +```shell +2016-02-17 14:30:17.287989: Finished writing all 1281167 images in data set. +``` + +When the script finishes, you will find 1024 training files and 128 validation +files in the `DATA_DIR`. The files will match the patterns +`train-?????-of-01024` and `validation-?????-of-00128`, respectively. + +[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now +ready to train or evaluate with the ImageNet data set. + +## How to Train from Scratch + +**WARNING** Training an Inception v3 network from scratch is a computationally +intensive task and depending on your compute setup may take several days or even +weeks. + +*Before proceeding* please read the [Convolutional Neural Networks](https://www.tensorflow.org/tutorials/deep_cnn/index.html) tutorial; in +particular, focus on [Training a Model Using Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#launching_and_training_the_model_on_multiple_gpu_cards). The model training method is nearly identical to that described in the +CIFAR-10 multi-GPU model training. Briefly, the model training + +* Places an individual model replica on each GPU. +* Splits the batch across the GPUs. +* Updates model parameters synchronously by waiting for all GPUs to finish + processing a batch of data. + +The training procedure is encapsulated by this diagram of how operations and +variables are placed on CPU and GPUs respectively. + +
+ +
+ +Each tower computes the gradients for a portion of the batch and the gradients +are combined and averaged across the multiple towers in order to provide a +single update of the Variables stored on the CPU. + +A crucial aspect of training a network of this size is *training speed* in terms +of wall-clock time. The training speed is dictated by many factors -- most +importantly the batch size and the learning rate schedule. Both of these +parameters are heavily coupled to the hardware set up. + +Generally speaking, a batch size is a difficult parameter to tune as it requires +balancing memory demands of the model, memory available on the GPU and speed of +computation. Generally speaking, employing larger batch sizes leads to more +efficient computation and potentially more efficient training steps. + +We have tested several hardware setups for training this model from scratch but +we emphasize that depending your hardware set up, you may need to adapt the +batch size and learning rate schedule. + +Please see the comments in `inception_train.py` for a few selected learning rate +plans based on some selected hardware setups. + +To train this model, you simply need to specify the following: + +```shell +# Build the model. Note that we need to make sure the TensorFlow is ready to +# use before this as this command will not build TensorFlow. +cd tensorflow-models/inception +bazel build //inception:imagenet_train + +# run it +bazel-bin/inception/imagenet_train --num_gpus=1 --batch_size=32 --train_dir=/tmp/imagenet_train --data_dir=/tmp/imagenet_data +``` + +The model reads in the ImageNet training data from `--data_dir`. If you followed +the instructions in [Getting Started](#getting-started), then set +`--data_dir="${DATA_DIR}"`. The script assumes that there exists a set of +sharded TFRecord files containing the ImageNet data. If you have not created +TFRecord files, please refer to [Getting Started](#getting-started) + +Here is the output of the above command line when running on a Tesla K40c: + +```shell +2016-03-07 12:24:59.922898: step 0, loss = 13.11 (5.3 examples/sec; 6.064 sec/batch) +2016-03-07 12:25:55.206783: step 10, loss = 13.71 (9.4 examples/sec; 3.394 sec/batch) +2016-03-07 12:26:28.905231: step 20, loss = 14.81 (9.5 examples/sec; 3.380 sec/batch) +2016-03-07 12:27:02.699719: step 30, loss = 14.45 (9.5 examples/sec; 3.378 sec/batch) +2016-03-07 12:27:36.515699: step 40, loss = 13.98 (9.5 examples/sec; 3.376 sec/batch) +2016-03-07 12:28:10.220956: step 50, loss = 13.92 (9.6 examples/sec; 3.327 sec/batch) +2016-03-07 12:28:43.658223: step 60, loss = 13.28 (9.6 examples/sec; 3.350 sec/batch) +... +``` + +In this example, a log entry is printed every 10 step and the line includes the +total loss (starts around 13.0-14.0) and the speed of processing in terms of +throughput (examples / sec) and batch speed (sec/batch). + +The number of GPU devices is specified by `--num_gpus` (which defaults to 1). +Specifying `--num_gpus` greater then 1 splits the batch evenly split across the +GPU cards. + +```shell +# Build the model. Note that we need to make sure the TensorFlow is ready to +# use before this as this command will not build TensorFlow. +cd tensorflow-models/inception +bazel build //inception:imagenet_train + +# run it +bazel-bin/inception/imagenet_train --num_gpus=2 --batch_size=64 --train_dir=/tmp/imagenet_train +``` + +This model splits the batch of 64 images across 2 GPUs and calculates the +average gradient by waiting for both GPUs to finish calculating the gradients +from their respective data (See diagram above). Generally speaking, using larger +numbers of GPUs leads to higher throughput as well as the opportunity to use +larger batch sizes. In turn, larger batch sizes imply better estimates of the +gradient enabling the usage of higher learning rates. In summary, using more +GPUs results in simply faster training speed. + +Note that selecting a batch size is a difficult parameter to tune as it requires +balancing memory demands of the model, memory available on the GPU and speed of +computation. Generally speaking, employing larger batch sizes leads to more +efficient computation and potentially more efficient training steps. + +Note that there is considerable noise in the loss function on individual steps +in the previous log. Because of this noise, it is difficult to discern how well +a model is learning. The solution to the last problem is to launch TensorBoard +pointing to the directory containing the events log. + +```shell +tensorboard --logdir=/tmp/imagenet_train +``` + +TensorBoard has access to the many Summaries produced by the model that describe +multitudes of statistics tracking the model behavior and the quality of the +learned model. In particular, TensorBoard tracks a exponentially smoothed +version of the loss. In practice, it is far easier to judge how well a model +learns by monitoring the smoothed version of the loss. + +## How to Train from Scratch in a Distributed Setting + +**NOTE** Distributed TensorFlow requires version 0.8 or later. + +Distributed TensorFlow lets us use multiple machines to train a model faster. +This is quite different from the training with multiple GPU towers on a single +machine where all parameters and gradients computation are in the same place. We +coordinate the computation across multiple machines by employing a centralized +repository for parameters that maintains a unified, single copy of model +parameters. Each individual machine sends gradient updates to the centralized +parameter repository which coordinates these updates and sends back updated +parameters to the individual machines running the model training. + +We term each machine that runs a copy of the training a `worker` or `replica`. +We term each machine that maintains model parameters a `ps`, short for +`parameter server`. Note that we might have more than one machine acting as a +`ps` as the model parameters may be sharded across multiple machines. + +Variables may be updated with synchronous or asynchronous gradient updates. One +may construct a an [`Optimizer`](https://www.tensorflow.org/api_docs/python/train.html#optimizers) in TensorFlow +that constructs the necessary graph for either case diagrammed below from the +TensorFlow [Whitepaper](http://download.tensorflow.org/paper/whitepaper2015.pdf): + +
+ +
+ +In [a recent paper](https://arxiv.org/abs/1604.00981), synchronous gradient +updates have demonstrated to reach higher accuracy in a shorter amount of time. +In this distributed Inception example we employ synchronous gradient updates. + +Note that in this example each replica has a single tower that uses one GPU. + +The command-line flags `worker_hosts` and `ps_hosts` specify available servers. +The same binary will be used for both the `worker` jobs and the `ps` jobs. +Command line flag `job_name` will be used to specify what role a task will be +playing and `task_id` will be used to identify which one of the jobs it is +running. Several things to note here: + +* The numbers of `ps` and `worker` tasks are inferred from the lists of hosts + specified in the flags. The `task_id` should be within the range `[0, + num_ps_tasks)` for `ps` tasks and `[0, num_worker_tasks)` for `worker` + tasks. +* `ps` and `worker` tasks can run on the same machine, as long as that machine + has sufficient resources to handle both tasks. Note that the `ps` task does + not benefit from a GPU, so it should not attempt to use one (see below). +* Multiple `worker` tasks can run on the same machine with multiple GPUs so + machine_A with 2 GPUs may have 2 workers while machine_B with 1 GPU just has + 1 worker. +* The default learning rate schedule works well for a wide range of number of + replicas [25, 50, 100] but feel free to tune it for even better results. +* The command line of both `ps` and `worker` tasks should include the complete + list of `ps_hosts` and `worker_hosts`. +* There is a chief `worker` among all workers which defaults to `worker` 0. + The chief will be in charge of initializing all the parameters, writing out + the summaries and the checkpoint. The checkpoint and summary will be in the + `train_dir` of the host for `worker` 0. +* Each worker processes a batch_size number of examples but each gradient + update is computed from all replicas. Hence, the effective batch size of + this model is batch_size * num_workers. + +```shell +# Build the model. Note that we need to make sure the TensorFlow is ready to +# use before this as this command will not build TensorFlow. +cd tensorflow-models/inception +bazel build //inception:imagenet_distributed_train + +# To start worker 0, go to the worker0 host and run the following (Note that +# task_id should be in the range [0, num_worker_tasks): +bazel-bin/inception/imagenet_distributed_train \ +--batch_size=32 \ +--data_dir=$HOME/imagenet-data \ +--job_name='worker' \ +--task_id=0 \ +--ps_hosts='ps0.example.com:2222' \ +--worker_hosts='worker0.example.com:2222,worker1.example.com:2222' + +# To start worker 1, go to the worker1 host and run the following (Note that +# task_id should be in the range [0, num_worker_tasks): +bazel-bin/inception/imagenet_distributed_train \ +--batch_size=32 \ +--data_dir=$HOME/imagenet-data \ +--job_name='worker' \ +--task_id=1 \ +--ps_hosts='ps0.example.com:2222' \ +--worker_hosts='worker0.example.com:2222,worker1.example.com:2222' + +# To start the parameter server (ps), go to the ps host and run the following (Note +# that task_id should be in the range [0, num_ps_tasks): +bazel-bin/inception/imagenet_distributed_train \ +--job_name='ps' \ +--task_id=0 \ +--ps_hosts='ps0.example.com:2222' \ +--worker_hosts='worker0.example.com:2222,worker1.example.com:2222' +``` + +If you have installed a GPU-compatible version of TensorFlow, the `ps` will also +try to allocate GPU memory although it is not helpful. This could potentially +crash the worker on the same machine as it has little to no GPU memory to +allocate. To avoid this, you can prepend the previous command to start `ps` +with: `CUDA_VISIBLE_DEVICES=''` + +```shell +CUDA_VISIBLE_DEVICES='' bazel-bin/inception/imagenet_distributed_train \ +--job_name='ps' \ +--task_id=0 \ +--ps_hosts='ps0.example.com:2222' \ +--worker_hosts='worker0.example.com:2222,worker1.example.com:2222' +``` + +If you have run everything correctly, you should see a log in each `worker` job +that looks like the following. Note the training speed varies depending on your +hardware and the first several steps could take much longer. + +```shell +INFO:tensorflow:PS hosts are: ['ps0.example.com:2222', 'ps1.example.com:2222'] +INFO:tensorflow:Worker hosts are: ['worker0.example.com:2222', 'worker1.example.com:2222'] +I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job ps -> {ps0.example.com:2222, ps1.example.com:2222} +I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job worker -> {localhost:2222, worker1.example.com:2222} +I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:202] Started server with target: grpc://localhost:2222 +INFO:tensorflow:Created variable global_step:0 with shape () and init + +... + +INFO:tensorflow:Created variable logits/logits/biases:0 with shape (1001,) and init +INFO:tensorflow:SyncReplicas enabled: replicas_to_aggregate=2; total_num_replicas=2 +INFO:tensorflow:2016-04-13 01:56:26.405639 Supervisor +INFO:tensorflow:Started 2 queues for processing input data. +INFO:tensorflow:global_step/sec: 0 +INFO:tensorflow:Worker 0: 2016-04-13 01:58:40.342404: step 0, loss = 12.97(0.0 examples/sec; 65.428  sec/batch) +INFO:tensorflow:global_step/sec: 0.0172907 +... +``` + +and a log in each `ps` job that looks like the following: + +```shell +INFO:tensorflow:PS hosts are: ['ps0.example.com:2222', 'ps1.example.com:2222'] +INFO:tensorflow:Worker hosts are: ['worker0.example.com:2222', 'worker1.example.com:2222'] +I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job ps -> {localhost:2222, ps1.example.com:2222} +I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job worker -> {worker0.example.com:2222, worker1.example.com:2222} +I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:202] Started server with target: grpc://localhost:2222 +``` + +If you compiled TensorFlow (from v1.1-rc3) with VERBS support and you have the +required device and IB verbs SW stack, you can specify --protocol='grpc+verbs' +In order to use Verbs RDMA for Tensor passing between workers and ps. +Need to add the the --protocol flag in all tasks (ps and workers). +The default protocol is the TensorFlow default protocol of grpc. + + +[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now +training Inception in a distributed manner. + +## How to Evaluate + +Evaluating an Inception v3 model on the ImageNet 2012 validation data set +requires running a separate binary. + +The evaluation procedure is nearly identical to [Evaluating a Model](https://www.tensorflow.org/tutorials/deep_cnn/index.html#evaluating_a_model) +described in the [Convolutional Neural Network](https://www.tensorflow.org/tutorials/deep_cnn/index.html) tutorial. + +**WARNING** Be careful not to run the evaluation and training binary on the same +GPU or else you might run out of memory. Consider running the evaluation on a +separate GPU if available or suspending the training binary while running the +evaluation on the same GPU. + +Briefly, one can evaluate the model by running: + +```shell +# Build the model. Note that we need to make sure the TensorFlow is ready to +# use before this as this command will not build TensorFlow. +cd tensorflow-models/inception +bazel build //inception:imagenet_eval + +# run it +bazel-bin/inception/imagenet_eval --checkpoint_dir=/tmp/imagenet_train --eval_dir=/tmp/imagenet_eval +``` + +Note that we point `--checkpoint_dir` to the location of the checkpoints saved +by `inception_train.py` above. Running the above command results in the +following output: + +```shell +2016-02-17 22:32:50.391206: precision @ 1 = 0.735 +... +``` + +The script calculates the precision @ 1 over the entire validation data +periodically. The precision @ 1 measures the how often the highest scoring +prediction from the model matched the ImageNet label -- in this case, 73.5%. If +you wish to run the eval just once and not periodically, append the `--run_once` +option. + +Much like the training script, `imagenet_eval.py` also exports summaries that +may be visualized in TensorBoard. These summaries calculate additional +statistics on the predictions (e.g. recall @ 5) as well as monitor the +statistics of the model activations and weights during evaluation. + +## How to Fine-Tune a Pre-Trained Model on a New Task + +### Getting Started + +Much like training the ImageNet model we must first convert a new data set to +the sharded TFRecord format which each entry is a serialized `tf.Example` proto. + +We have provided a script demonstrating how to do this for small data set of of +a few thousand flower images spread across 5 labels: + +```shell +daisy, dandelion, roses, sunflowers, tulips +``` + +There is a single automated script that downloads the data set and converts it +to the TFRecord format. Much like the ImageNet data set, each record in the +TFRecord format is a serialized `tf.Example` proto whose entries include a +JPEG-encoded string and an integer label. Please see [`parse_example_proto`](inception/image_processing.py) for details. + +The script just takes a few minutes to run depending your network connection +speed for downloading and processing the images. Your hard disk requires 200MB +of free storage. Here we select `DATA_DIR=/tmp/flowers-data/` as such a location +but feel free to edit accordingly. + +```shell +# location of where to place the flowers data +FLOWERS_DATA_DIR=/tmp/flowers-data/ + +# build the preprocessing script. +cd tensorflow-models/inception +bazel build //inception:download_and_preprocess_flowers + +# run it +bazel-bin/inception/download_and_preprocess_flowers "${FLOWERS_DATA_DIR}" +``` + +If the script runs successfully, the final line of the terminal output should +look like: + +```shell +2016-02-24 20:42:25.067551: Finished writing all 3170 images in data set. +``` + +When the script finishes you will find 2 shards for the training and validation +files in the `DATA_DIR`. The files will match the patterns `train-?????-of-00002` +and `validation-?????-of-00002`, respectively. + +**NOTE** If you wish to prepare a custom image data set for transfer learning, +you will need to invoke [`build_image_data.py`](inception/data/build_image_data.py) on +your custom data set. Please see the associated options and assumptions behind +this script by reading the comments section of [`build_image_data.py`](inception/data/build_image_data.py). Also, if your custom data has a different +number of examples or classes, you need to change the appropriate values in +[`imagenet_data.py`](inception/imagenet_data.py). + +The second piece you will need is a trained Inception v3 image model. You have +the option of either training one yourself (See [How to Train from Scratch](#how-to-train-from-scratch) for details) or you can download a pre-trained +model like so: + +```shell +# location of where to place the Inception v3 model +INCEPTION_MODEL_DIR=$HOME/inception-v3-model +mkdir -p ${INCEPTION_MODEL_DIR} +cd ${INCEPTION_MODEL_DIR} + +# download the Inception v3 model +curl -O http://download.tensorflow.org/models/image/imagenet/inception-v3-2016-03-01.tar.gz +tar xzf inception-v3-2016-03-01.tar.gz + +# this will create a directory called inception-v3 which contains the following files. +> ls inception-v3 +README.txt +checkpoint +model.ckpt-157585 +``` + +[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now +ready to fine-tune your pre-trained Inception v3 model with the flower data set. + +### How to Retrain a Trained Model on the Flowers Data + +We are now ready to fine-tune a pre-trained Inception-v3 model on the flowers +data set. This requires two distinct changes to our training procedure: + +1. Build the exact same model as previously except we change the number of + labels in the final classification layer. + +2. Restore all weights from the pre-trained Inception-v3 except for the final + classification layer; this will get randomly initialized instead. + +We can perform these two operations by specifying two flags: +`--pretrained_model_checkpoint_path` and `--fine_tune`. The first flag is a +string that points to the path of a pre-trained Inception-v3 model. If this flag +is specified, it will load the entire model from the checkpoint before the +script begins training. + +The second flag `--fine_tune` is a boolean that indicates whether the last +classification layer should be randomly initialized or restored. You may set +this flag to false if you wish to continue training a pre-trained model from a +checkpoint. If you set this flag to true, you can train a new classification +layer from scratch. + +In order to understand how `--fine_tune` works, please see the discussion on +`Variables` in the TensorFlow-Slim [`README.md`](inception/slim/README.md). + +Putting this all together you can retrain a pre-trained Inception-v3 model on +the flowers data set with the following command. + +```shell +# Build the model. Note that we need to make sure the TensorFlow is ready to +# use before this as this command will not build TensorFlow. +cd tensorflow-models/inception +bazel build //inception:flowers_train + +# Path to the downloaded Inception-v3 model. +MODEL_PATH="${INCEPTION_MODEL_DIR}/inception-v3/model.ckpt-157585" + +# Directory where the flowers data resides. +FLOWERS_DATA_DIR=/tmp/flowers-data/ + +# Directory where to save the checkpoint and events files. +TRAIN_DIR=/tmp/flowers_train/ + +# Run the fine-tuning on the flowers data set starting from the pre-trained +# Imagenet-v3 model. +bazel-bin/inception/flowers_train \ + --train_dir="${TRAIN_DIR}" \ + --data_dir="${FLOWERS_DATA_DIR}" \ + --pretrained_model_checkpoint_path="${MODEL_PATH}" \ + --fine_tune=True \ + --initial_learning_rate=0.001 \ + --input_queue_memory_factor=1 +``` + +We have added a few extra options to the training procedure. + +* Fine-tuning a model a separate data set requires significantly lowering the + initial learning rate. We set the initial learning rate to 0.001. +* The flowers data set is quite small so we shrink the size of the shuffling + queue of examples. See [Adjusting Memory Demands](#adjusting-memory-demands) + for more details. + +The training script will only reports the loss. To evaluate the quality of the +fine-tuned model, you will need to run `flowers_eval`: + +```shell +# Build the model. Note that we need to make sure the TensorFlow is ready to +# use before this as this command will not build TensorFlow. +cd tensorflow-models/inception +bazel build //inception:flowers_eval + +# Directory where we saved the fine-tuned checkpoint and events files. +TRAIN_DIR=/tmp/flowers_train/ + +# Directory where the flowers data resides. +FLOWERS_DATA_DIR=/tmp/flowers-data/ + +# Directory where to save the evaluation events files. +EVAL_DIR=/tmp/flowers_eval/ + +# Evaluate the fine-tuned model on a hold-out of the flower data set. +bazel-bin/inception/flowers_eval \ + --eval_dir="${EVAL_DIR}" \ + --data_dir="${FLOWERS_DATA_DIR}" \ + --subset=validation \ + --num_examples=500 \ + --checkpoint_dir="${TRAIN_DIR}" \ + --input_queue_memory_factor=1 \ + --run_once +``` + +We find that the evaluation arrives at roughly 93.4% precision@1 after the model +has been running for 2000 steps. + +```shell +Successfully loaded model from /tmp/flowers/model.ckpt-1999 at step=1999. +2016-03-01 16:52:51.761219: starting evaluation on (validation). +2016-03-01 16:53:05.450419: [20 batches out of 20] (36.5 examples/sec; 0.684sec/batch) +2016-03-01 16:53:05.450471: precision @ 1 = 0.9340 recall @ 5 = 0.9960 [500 examples] +``` + +## How to Construct a New Dataset for Retraining + +One can use the existing scripts supplied with this model to build a new dataset +for training or fine-tuning. The main script to employ is +[`build_image_data.py`](inception/data/build_image_data.py). Briefly, this script takes a +structured directory of images and converts it to a sharded `TFRecord` that can +be read by the Inception model. + +In particular, you will need to create a directory of training images that +reside within `$TRAIN_DIR` and `$VALIDATION_DIR` arranged as such: + +```shell + $TRAIN_DIR/dog/image0.jpeg + $TRAIN_DIR/dog/image1.jpg + $TRAIN_DIR/dog/image2.png + ... + $TRAIN_DIR/cat/weird-image.jpeg + $TRAIN_DIR/cat/my-image.jpeg + $TRAIN_DIR/cat/my-image.JPG + ... + $VALIDATION_DIR/dog/imageA.jpeg + $VALIDATION_DIR/dog/imageB.jpg + $VALIDATION_DIR/dog/imageC.png + ... + $VALIDATION_DIR/cat/weird-image.PNG + $VALIDATION_DIR/cat/that-image.jpg + $VALIDATION_DIR/cat/cat.JPG + ... +``` +**NOTE**: This script will append an extra background class indexed at 0, so +your class labels will range from 0 to num_labels. Using the example above, the +corresponding class labels generated from `build_image_data.py` will be as +follows: +```shell +0 +1 dog +2 cat +``` + +Each sub-directory in `$TRAIN_DIR` and `$VALIDATION_DIR` corresponds to a unique +label for the images that reside within that sub-directory. The images may be +JPEG or PNG images. We do not support other images types currently. + +Once the data is arranged in this directory structure, we can run +`build_image_data.py` on the data to generate the sharded `TFRecord` dataset. +Each entry of the `TFRecord` is a serialized `tf.Example` protocol buffer. A +complete list of information contained in the `tf.Example` is described in the +comments of `build_image_data.py`. + +To run `build_image_data.py`, you can run the following command line: + +```shell +# location to where to save the TFRecord data. +OUTPUT_DIRECTORY=$HOME/my-custom-data/ + +# build the preprocessing script. +cd tensorflow-models/inception +bazel build //inception:build_image_data + +# convert the data. +bazel-bin/inception/build_image_data \ + --train_directory="${TRAIN_DIR}" \ + --validation_directory="${VALIDATION_DIR}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --labels_file="${LABELS_FILE}" \ + --train_shards=128 \ + --validation_shards=24 \ + --num_threads=8 +``` + +where the `$OUTPUT_DIRECTORY` is the location of the sharded `TFRecords`. The +`$LABELS_FILE` will be a text file that is read by the script that provides +a list of all of the labels. For instance, in the case flowers data set, the +`$LABELS_FILE` contained the following data: + +```shell +daisy +dandelion +roses +sunflowers +tulips +``` + +Note that each row of each label corresponds with the entry in the final +classifier in the model. That is, the `daisy` corresponds to the classifier for +entry `1`; `dandelion` is entry `2`, etc. We skip label `0` as a background +class. + +After running this script produces files that look like the following: + +```shell + $TRAIN_DIR/train-00000-of-00128 + $TRAIN_DIR/train-00001-of-00128 + ... + $TRAIN_DIR/train-00127-of-00128 + +and + + $VALIDATION_DIR/validation-00000-of-00024 + $VALIDATION_DIR/validation-00001-of-00024 + ... + $VALIDATION_DIR/validation-00023-of-00024 +``` + +where 128 and 24 are the number of shards specified for each dataset, +respectively. Generally speaking, we aim for selecting the number of shards such +that roughly 1024 images reside in each shard. Once this data set is built, you +are ready to train or fine-tune an Inception model on this data set. + +Note, if you are piggy backing on the flowers retraining scripts, be sure to +update `num_classes()` and `num_examples_per_epoch()` in `flowers_data.py` +to correspond with your data. + +## Practical Considerations for Training a Model + +The model architecture and training procedure is heavily dependent on the +hardware used to train the model. If you wish to train or fine-tune this model +on your machine **you will need to adjust and empirically determine a good set +of training hyper-parameters for your setup**. What follows are some general +considerations for novices. + +### Finding Good Hyperparameters + +Roughly 5-10 hyper-parameters govern the speed at which a network is trained. In +addition to `--batch_size` and `--num_gpus`, there are several constants defined +in [inception_train.py](inception/inception_train.py) which dictate the learning +schedule. + +```shell +RMSPROP_DECAY = 0.9 # Decay term for RMSProp. +MOMENTUM = 0.9 # Momentum in RMSProp. +RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. +INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. +NUM_EPOCHS_PER_DECAY = 30.0 # Epochs after which learning rate decays. +LEARNING_RATE_DECAY_FACTOR = 0.16 # Learning rate decay factor. +``` + +There are many papers that discuss the various tricks and trade-offs associated +with training a model with stochastic gradient descent. For those new to the +field, some great references are: + +* Y Bengio, [Practical recommendations for gradient-based training of deep + architectures](http://arxiv.org/abs/1206.5533) +* I Goodfellow, Y Bengio and A Courville, [Deep Learning] + (http://www.deeplearningbook.org/) + +What follows is a summary of some general advice for identifying appropriate +model hyper-parameters in the context of this particular model training setup. +Namely, this library provides *synchronous* updates to model parameters based on +batch-splitting the model across multiple GPUs. + +* Higher learning rates leads to faster training. Too high of learning rate + leads to instability and will cause model parameters to diverge to infinity + or NaN. + +* Larger batch sizes lead to higher quality estimates of the gradient and + permit training the model with higher learning rates. + +* Often the GPU memory is a bottleneck that prevents employing larger batch + sizes. Employing more GPUs allows one to use larger batch sizes because + this model splits the batch across the GPUs. + +**NOTE** If one wishes to train this model with *asynchronous* gradient updates, +one will need to substantially alter this model and new considerations need to +be factored into hyperparameter tuning. See [Large Scale Distributed Deep +Networks](http://research.google.com/archive/large_deep_networks_nips2012.html) +for a discussion in this domain. + +### Adjusting Memory Demands + +Training this model has large memory demands in terms of the CPU and GPU. Let's +discuss each item in turn. + +GPU memory is relatively small compared to CPU memory. Two items dictate the +amount of GPU memory employed -- model architecture and batch size. Assuming +that you keep the model architecture fixed, the sole parameter governing the GPU +demand is the batch size. A good rule of thumb is to try employ as large of +batch size as will fit on the GPU. + +If you run out of GPU memory, either lower the `--batch_size` or employ more +GPUs on your desktop. The model performs batch-splitting across GPUs, thus N +GPUs can handle N times the batch size of 1 GPU. + +The model requires a large amount of CPU memory as well. We have tuned the model +to employ about ~20GB of CPU memory. Thus, having access to about 40 GB of CPU +memory would be ideal. + +If that is not possible, you can tune down the memory demands of the model via +lowering `--input_queue_memory_factor`. Images are preprocessed asynchronously +with respect to the main training across `--num_preprocess_threads` threads. The +preprocessed images are stored in shuffling queue in which each GPU performs a +dequeue operation in order to receive a `batch_size` worth of images. + +In order to guarantee good shuffling across the data, we maintain a large +shuffling queue of 1024 x `input_queue_memory_factor` images. For the current +model architecture, this corresponds to about 4GB of CPU memory. You may lower +`input_queue_memory_factor` in order to decrease the memory footprint. Keep in +mind though that lowering this value drastically may result in a model with +slightly lower predictive accuracy when training from scratch. Please see +comments in [`image_processing.py`](inception/image_processing.py) for more details. + +## Troubleshooting + +#### The model runs out of CPU memory. + +In lieu of buying more CPU memory, an easy fix is to decrease +`--input_queue_memory_factor`. See [Adjusting Memory Demands](#adjusting-memory-demands). + +#### The model runs out of GPU memory. + +The data is not able to fit on the GPU card. The simplest solution is to +decrease the batch size of the model. Otherwise, you will need to think about a +more sophisticated method for specifying the training which cuts up the model +across multiple `session.run()` calls or partitions the model across multiple +GPUs. See [Using GPUs](https://www.tensorflow.org/how_tos/using_gpu/index.html) +and [Adjusting Memory Demands](#adjusting-memory-demands) for more information. + +#### The model training results in NaN's. + +The learning rate of the model is too high. Turn down your learning rate. + +#### I wish to train a model with a different image size. + +The simplest solution is to artificially resize your images to `299x299` pixels. +See [Images](https://www.tensorflow.org/api_docs/python/image.html) section for +many resizing, cropping and padding methods. Note that the entire model +architecture is predicated on a `299x299` image, thus if you wish to change the +input image size, then you may need to redesign the entire model architecture. + +#### What hardware specification are these hyper-parameters targeted for? + +We targeted a desktop with 128GB of CPU ram connected to 8 NVIDIA Tesla K40 GPU +cards but we have run this on desktops with 32GB of CPU ram and 1 NVIDIA Tesla +K40. You can get a sense of the various training configurations we tested by +reading the comments in [`inception_train.py`](inception/inception_train.py). + +#### How do I continue training from a checkpoint in distributed setting? + +You only need to make sure that the checkpoint is in a location that can be +reached by all of the `ps` tasks. By specifying the checkpoint location with +`--train_dir` , the `ps` servers will load the checkpoint before commencing +training. diff --git a/models/research/inception/WORKSPACE b/models/research/inception/WORKSPACE new file mode 100644 index 0000000000000000000000000000000000000000..2d7b4fb254a0fcebe695cb3fd3685af29a02e0b0 --- /dev/null +++ b/models/research/inception/WORKSPACE @@ -0,0 +1 @@ +workspace(name = "inception") diff --git a/models/research/inception/g3doc/inception_v3_architecture.png b/models/research/inception/g3doc/inception_v3_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..91fb734a104b2f63114ade7c8f9b2f95ce6334a6 Binary files /dev/null and b/models/research/inception/g3doc/inception_v3_architecture.png differ diff --git a/models/research/inception/inception/BUILD b/models/research/inception/inception/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..21fc27aa57c14f6a72359cf15d446787c8ea6c2e --- /dev/null +++ b/models/research/inception/inception/BUILD @@ -0,0 +1,198 @@ +# Description: +# Example TensorFlow models for ImageNet. + +package(default_visibility = [":internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = ["//inception/..."], +) + +py_library( + name = "dataset", + srcs = [ + "dataset.py", + ], +) + +py_library( + name = "imagenet_data", + srcs = [ + "imagenet_data.py", + ], + deps = [ + ":dataset", + ], +) + +py_library( + name = "flowers_data", + srcs = [ + "flowers_data.py", + ], + deps = [ + ":dataset", + ], +) + +py_library( + name = "image_processing", + srcs = [ + "image_processing.py", + ], +) + +py_library( + name = "inception", + srcs = [ + "inception_model.py", + ], + visibility = ["//visibility:public"], + deps = [ + ":dataset", + "//inception/slim", + ], +) + +py_binary( + name = "imagenet_eval", + srcs = [ + "imagenet_eval.py", + ], + deps = [ + ":imagenet_data", + ":inception_eval", + ], +) + +py_binary( + name = "flowers_eval", + srcs = [ + "flowers_eval.py", + ], + deps = [ + ":flowers_data", + ":inception_eval", + ], +) + +py_library( + name = "inception_eval", + srcs = [ + "inception_eval.py", + ], + deps = [ + ":image_processing", + ":inception", + ], +) + +py_binary( + name = "imagenet_train", + srcs = [ + "imagenet_train.py", + ], + deps = [ + ":imagenet_data", + ":inception_train", + ], +) + +py_binary( + name = "imagenet_distributed_train", + srcs = [ + "imagenet_distributed_train.py", + ], + deps = [ + ":imagenet_data", + ":inception_distributed_train", + ], +) + +py_binary( + name = "flowers_train", + srcs = [ + "flowers_train.py", + ], + deps = [ + ":flowers_data", + ":inception_train", + ], +) + +py_library( + name = "inception_train", + srcs = [ + "inception_train.py", + ], + deps = [ + ":image_processing", + ":inception", + ], +) + +py_library( + name = "inception_distributed_train", + srcs = [ + "inception_distributed_train.py", + ], + deps = [ + ":image_processing", + ":inception", + ], +) + +py_binary( + name = "build_image_data", + srcs = ["data/build_image_data.py"], +) + +sh_binary( + name = "download_and_preprocess_flowers", + srcs = ["data/download_and_preprocess_flowers.sh"], + data = [ + ":build_image_data", + ], +) + +sh_binary( + name = "download_and_preprocess_imagenet", + srcs = ["data/download_and_preprocess_imagenet.sh"], + data = [ + "data/download_imagenet.sh", + "data/imagenet_2012_validation_synset_labels.txt", + "data/imagenet_lsvrc_2015_synsets.txt", + "data/imagenet_metadata.txt", + "data/preprocess_imagenet_validation_data.py", + "data/process_bounding_boxes.py", + ":build_imagenet_data", + ], +) + +py_binary( + name = "build_imagenet_data", + srcs = ["data/build_imagenet_data.py"], +) + +filegroup( + name = "srcs", + srcs = glob( + [ + "**/*.py", + "BUILD", + ], + ), +) + +filegroup( + name = "imagenet_metadata", + srcs = [ + "data/imagenet_lsvrc_2015_synsets.txt", + "data/imagenet_metadata.txt", + ], + visibility = ["//visibility:public"], +) diff --git a/models/research/inception/inception/data/build_image_data.py b/models/research/inception/inception/data/build_image_data.py new file mode 100644 index 0000000000000000000000000000000000000000..894388b7f758a46746870f2f0d55d1df7d3fe29b --- /dev/null +++ b/models/research/inception/inception/data/build_image_data.py @@ -0,0 +1,436 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts image data to TFRecords file format with Example protos. + +The image data set is expected to reside in JPEG files located in the +following directory structure. + + data_dir/label_0/image0.jpeg + data_dir/label_0/image1.jpg + ... + data_dir/label_1/weird-image.jpeg + data_dir/label_1/my-image.jpeg + ... + +where the sub-directory is the unique label associated with these images. + +This TensorFlow script converts the training and evaluation data into +a sharded data set consisting of TFRecord files + + train_directory/train-00000-of-01024 + train_directory/train-00001-of-01024 + ... + train_directory/train-01023-of-01024 + +and + + validation_directory/validation-00000-of-00128 + validation_directory/validation-00001-of-00128 + ... + validation_directory/validation-00127-of-00128 + +where we have selected 1024 and 128 shards for each data set. Each record +within the TFRecord file is a serialized Example proto. The Example proto +contains the following fields: + + image/encoded: string containing JPEG encoded image in RGB colorspace + image/height: integer, image height in pixels + image/width: integer, image width in pixels + image/colorspace: string, specifying the colorspace, always 'RGB' + image/channels: integer, specifying the number of channels, always 3 + image/format: string, specifying the format, always 'JPEG' + + image/filename: string containing the basename of the image file + e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' + image/class/label: integer specifying the index in a classification layer. + The label ranges from [0, num_labels] where 0 is unused and left as + the background class. + image/class/text: string specifying the human-readable version of the label + e.g. 'dog' + +If your data set involves bounding boxes, please look at build_imagenet_data.py. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import os +import random +import sys +import threading + +import numpy as np +import tensorflow as tf + +tf.app.flags.DEFINE_string('train_directory', '/tmp/', + 'Training data directory') +tf.app.flags.DEFINE_string('validation_directory', '/tmp/', + 'Validation data directory') +tf.app.flags.DEFINE_string('output_directory', '/tmp/', + 'Output data directory') + +tf.app.flags.DEFINE_integer('train_shards', 2, + 'Number of shards in training TFRecord files.') +tf.app.flags.DEFINE_integer('validation_shards', 2, + 'Number of shards in validation TFRecord files.') + +tf.app.flags.DEFINE_integer('num_threads', 2, + 'Number of threads to preprocess the images.') + +# The labels file contains a list of valid labels are held in this file. +# Assumes that the file contains entries as such: +# dog +# cat +# flower +# where each line corresponds to a label. We map each label contained in +# the file to an integer corresponding to the line number starting from 0. +tf.app.flags.DEFINE_string('labels_file', '', 'Labels file') + + +FLAGS = tf.app.flags.FLAGS + + +def _int64_feature(value): + """Wrapper for inserting int64 features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def _bytes_feature(value): + """Wrapper for inserting bytes features into Example proto.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _convert_to_example(filename, image_buffer, label, text, height, width): + """Build an Example proto for an example. + + Args: + filename: string, path to an image file, e.g., '/path/to/example.JPG' + image_buffer: string, JPEG encoding of RGB image + label: integer, identifier for the ground truth for the network + text: string, unique human-readable, e.g. 'dog' + height: integer, image height in pixels + width: integer, image width in pixels + Returns: + Example proto + """ + + colorspace = 'RGB' + channels = 3 + image_format = 'JPEG' + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': _int64_feature(height), + 'image/width': _int64_feature(width), + 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)), + 'image/channels': _int64_feature(channels), + 'image/class/label': _int64_feature(label), + 'image/class/text': _bytes_feature(tf.compat.as_bytes(text)), + 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)), + 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))), + 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))})) + return example + + +class ImageCoder(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self): + # Create a single Session to run all image coding calls. + self._sess = tf.Session() + + # Initializes function that converts PNG to JPEG data. + self._png_data = tf.placeholder(dtype=tf.string) + image = tf.image.decode_png(self._png_data, channels=3) + self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that decodes RGB JPEG data. + self._decode_jpeg_data = tf.placeholder(dtype=tf.string) + self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) + + def png_to_jpeg(self, image_data): + return self._sess.run(self._png_to_jpeg, + feed_dict={self._png_data: image_data}) + + def decode_jpeg(self, image_data): + image = self._sess.run(self._decode_jpeg, + feed_dict={self._decode_jpeg_data: image_data}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _is_png(filename): + """Determine if a file contains a PNG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a PNG. + """ + return filename.endswith('.png') + + +def _process_image(filename, coder): + """Process a single image file. + + Args: + filename: string, path to an image file e.g., '/path/to/example.JPG'. + coder: instance of ImageCoder to provide TensorFlow image coding utils. + Returns: + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + """ + # Read the image file. + with tf.gfile.FastGFile(filename, 'rb') as f: + image_data = f.read() + + # Convert any PNG to JPEG's for consistency. + if _is_png(filename): + print('Converting PNG to JPEG for %s' % filename) + image_data = coder.png_to_jpeg(image_data) + + # Decode the RGB JPEG. + image = coder.decode_jpeg(image_data) + + # Check that image converted to RGB + assert len(image.shape) == 3 + height = image.shape[0] + width = image.shape[1] + assert image.shape[2] == 3 + + return image_data, height, width + + +def _process_image_files_batch(coder, thread_index, ranges, name, filenames, + texts, labels, num_shards): + """Processes and saves list of images as TFRecord in 1 thread. + + Args: + coder: instance of ImageCoder to provide TensorFlow image coding utils. + thread_index: integer, unique batch to run index is within [0, len(ranges)). + ranges: list of pairs of integers specifying ranges of each batches to + analyze in parallel. + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + texts: list of strings; each string is human readable, e.g. 'dog' + labels: list of integer; each integer identifies the ground truth + num_shards: integer number of shards for this data set. + """ + # Each thread produces N shards where N = int(num_shards / num_threads). + # For instance, if num_shards = 128, and the num_threads = 2, then the first + # thread would produce shards [0, 64). + num_threads = len(ranges) + assert not num_shards % num_threads + num_shards_per_batch = int(num_shards / num_threads) + + shard_ranges = np.linspace(ranges[thread_index][0], + ranges[thread_index][1], + num_shards_per_batch + 1).astype(int) + num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] + + counter = 0 + for s in range(num_shards_per_batch): + # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' + shard = thread_index * num_shards_per_batch + s + output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) + output_file = os.path.join(FLAGS.output_directory, output_filename) + writer = tf.python_io.TFRecordWriter(output_file) + + shard_counter = 0 + files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) + for i in files_in_shard: + filename = filenames[i] + label = labels[i] + text = texts[i] + + try: + image_buffer, height, width = _process_image(filename, coder) + except Exception as e: + print(e) + print('SKIPPED: Unexpected error while decoding %s.' % filename) + continue + + example = _convert_to_example(filename, image_buffer, label, + text, height, width) + writer.write(example.SerializeToString()) + shard_counter += 1 + counter += 1 + + if not counter % 1000: + print('%s [thread %d]: Processed %d of %d images in thread batch.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + writer.close() + print('%s [thread %d]: Wrote %d images to %s' % + (datetime.now(), thread_index, shard_counter, output_file)) + sys.stdout.flush() + shard_counter = 0 + print('%s [thread %d]: Wrote %d images to %d shards.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + +def _process_image_files(name, filenames, texts, labels, num_shards): + """Process and save list of images as TFRecord of Example protos. + + Args: + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + texts: list of strings; each string is human readable, e.g. 'dog' + labels: list of integer; each integer identifies the ground truth + num_shards: integer number of shards for this data set. + """ + assert len(filenames) == len(texts) + assert len(filenames) == len(labels) + + # Break all images into batches with a [ranges[i][0], ranges[i][1]]. + spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) + ranges = [] + for i in range(len(spacing) - 1): + ranges.append([spacing[i], spacing[i + 1]]) + + # Launch a thread for each batch. + print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) + sys.stdout.flush() + + # Create a mechanism for monitoring when all threads are finished. + coord = tf.train.Coordinator() + + # Create a generic TensorFlow-based utility for converting all image codings. + coder = ImageCoder() + + threads = [] + for thread_index in range(len(ranges)): + args = (coder, thread_index, ranges, name, filenames, + texts, labels, num_shards) + t = threading.Thread(target=_process_image_files_batch, args=args) + t.start() + threads.append(t) + + # Wait for all the threads to terminate. + coord.join(threads) + print('%s: Finished writing all %d images in data set.' % + (datetime.now(), len(filenames))) + sys.stdout.flush() + + +def _find_image_files(data_dir, labels_file): + """Build a list of all images files and labels in the data set. + + Args: + data_dir: string, path to the root directory of images. + + Assumes that the image data set resides in JPEG files located in + the following directory structure. + + data_dir/dog/another-image.JPEG + data_dir/dog/my-image.jpg + + where 'dog' is the label associated with these images. + + labels_file: string, path to the labels file. + + The list of valid labels are held in this file. Assumes that the file + contains entries as such: + dog + cat + flower + where each line corresponds to a label. We map each label contained in + the file to an integer starting with the integer 0 corresponding to the + label contained in the first line. + + Returns: + filenames: list of strings; each string is a path to an image file. + texts: list of strings; each string is the class, e.g. 'dog' + labels: list of integer; each integer identifies the ground truth. + """ + print('Determining list of input files and labels from %s.' % data_dir) + unique_labels = [l.strip() for l in tf.gfile.FastGFile( + labels_file, 'r').readlines()] + + labels = [] + filenames = [] + texts = [] + + # Leave label index 0 empty as a background class. + label_index = 1 + + # Construct the list of JPEG files and labels. + for text in unique_labels: + jpeg_file_path = '%s/%s/*' % (data_dir, text) + matching_files = tf.gfile.Glob(jpeg_file_path) + + labels.extend([label_index] * len(matching_files)) + texts.extend([text] * len(matching_files)) + filenames.extend(matching_files) + + if not label_index % 100: + print('Finished finding files in %d of %d classes.' % ( + label_index, len(labels))) + label_index += 1 + + # Shuffle the ordering of all image files in order to guarantee + # random ordering of the images with respect to label in the + # saved TFRecord files. Make the randomization repeatable. + shuffled_index = list(range(len(filenames))) + random.seed(12345) + random.shuffle(shuffled_index) + + filenames = [filenames[i] for i in shuffled_index] + texts = [texts[i] for i in shuffled_index] + labels = [labels[i] for i in shuffled_index] + + print('Found %d JPEG files across %d labels inside %s.' % + (len(filenames), len(unique_labels), data_dir)) + return filenames, texts, labels + + +def _process_dataset(name, directory, num_shards, labels_file): + """Process a complete data set and save it as a TFRecord. + + Args: + name: string, unique identifier specifying the data set. + directory: string, root path to the data set. + num_shards: integer number of shards for this data set. + labels_file: string, path to the labels file. + """ + filenames, texts, labels = _find_image_files(directory, labels_file) + _process_image_files(name, filenames, texts, labels, num_shards) + + +def main(unused_argv): + assert not FLAGS.train_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') + assert not FLAGS.validation_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with ' + 'FLAGS.validation_shards') + print('Saving results to %s' % FLAGS.output_directory) + + # Run it! + _process_dataset('validation', FLAGS.validation_directory, + FLAGS.validation_shards, FLAGS.labels_file) + _process_dataset('train', FLAGS.train_directory, + FLAGS.train_shards, FLAGS.labels_file) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/inception/inception/data/build_imagenet_data.py b/models/research/inception/inception/data/build_imagenet_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c054735e782297f990451e29ff4383af24bbe802 --- /dev/null +++ b/models/research/inception/inception/data/build_imagenet_data.py @@ -0,0 +1,707 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts ImageNet data to TFRecords file format with Example protos. + +The raw ImageNet data set is expected to reside in JPEG files located in the +following directory structure. + + data_dir/n01440764/ILSVRC2012_val_00000293.JPEG + data_dir/n01440764/ILSVRC2012_val_00000543.JPEG + ... + +where 'n01440764' is the unique synset label associated with +these images. + +The training data set consists of 1000 sub-directories (i.e. labels) +each containing 1200 JPEG images for a total of 1.2M JPEG images. + +The evaluation data set consists of 1000 sub-directories (i.e. labels) +each containing 50 JPEG images for a total of 50K JPEG images. + +This TensorFlow script converts the training and evaluation data into +a sharded data set consisting of 1024 and 128 TFRecord files, respectively. + + train_directory/train-00000-of-01024 + train_directory/train-00001-of-01024 + ... + train_directory/train-01023-of-01024 + +and + + validation_directory/validation-00000-of-00128 + validation_directory/validation-00001-of-00128 + ... + validation_directory/validation-00127-of-00128 + +Each validation TFRecord file contains ~390 records. Each training TFREcord +file contains ~1250 records. Each record within the TFRecord file is a +serialized Example proto. The Example proto contains the following fields: + + image/encoded: string containing JPEG encoded image in RGB colorspace + image/height: integer, image height in pixels + image/width: integer, image width in pixels + image/colorspace: string, specifying the colorspace, always 'RGB' + image/channels: integer, specifying the number of channels, always 3 + image/format: string, specifying the format, always 'JPEG' + + image/filename: string containing the basename of the image file + e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' + image/class/label: integer specifying the index in a classification layer. + The label ranges from [1, 1000] where 0 is not used. + image/class/synset: string specifying the unique ID of the label, + e.g. 'n01440764' + image/class/text: string specifying the human-readable version of the label + e.g. 'red fox, Vulpes vulpes' + + image/object/bbox/xmin: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/xmax: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/ymin: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/ymax: list of integers specifying the 0+ human annotated + bounding boxes + image/object/bbox/label: integer specifying the index in a classification + layer. The label ranges from [1, 1000] where 0 is not used. Note this is + always identical to the image label. + +Note that the length of xmin is identical to the length of xmax, ymin and ymax +for each example. + +Running this script using 16 threads may take around ~2.5 hours on an HP Z420. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import os +import random +import sys +import threading + +import numpy as np +import six +import tensorflow as tf + +tf.app.flags.DEFINE_string('train_directory', '/tmp/', + 'Training data directory') +tf.app.flags.DEFINE_string('validation_directory', '/tmp/', + 'Validation data directory') +tf.app.flags.DEFINE_string('output_directory', '/tmp/', + 'Output data directory') + +tf.app.flags.DEFINE_integer('train_shards', 1024, + 'Number of shards in training TFRecord files.') +tf.app.flags.DEFINE_integer('validation_shards', 128, + 'Number of shards in validation TFRecord files.') + +tf.app.flags.DEFINE_integer('num_threads', 8, + 'Number of threads to preprocess the images.') + +# The labels file contains a list of valid labels are held in this file. +# Assumes that the file contains entries as such: +# n01440764 +# n01443537 +# n01484850 +# where each line corresponds to a label expressed as a synset. We map +# each synset contained in the file to an integer (based on the alphabetical +# ordering). See below for details. +tf.app.flags.DEFINE_string('labels_file', + 'imagenet_lsvrc_2015_synsets.txt', + 'Labels file') + +# This file containing mapping from synset to human-readable label. +# Assumes each line of the file looks like: +# +# n02119247 black fox +# n02119359 silver fox +# n02119477 red fox, Vulpes fulva +# +# where each line corresponds to a unique mapping. Note that each line is +# formatted as \t. +tf.app.flags.DEFINE_string('imagenet_metadata_file', + 'imagenet_metadata.txt', + 'ImageNet metadata file') + +# This file is the output of process_bounding_box.py +# Assumes each line of the file looks like: +# +# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 +# +# where each line corresponds to one bounding box annotation associated +# with an image. Each line can be parsed as: +# +# , , , , +# +# Note that there might exist mulitple bounding box annotations associated +# with an image file. +tf.app.flags.DEFINE_string('bounding_box_file', + './imagenet_2012_bounding_boxes.csv', + 'Bounding box file') + +FLAGS = tf.app.flags.FLAGS + + +def _int64_feature(value): + """Wrapper for inserting int64 features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def _float_feature(value): + """Wrapper for inserting float features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + +def _bytes_feature(value): + """Wrapper for inserting bytes features into Example proto.""" + if six.PY3 and isinstance(value, six.text_type): + value = six.binary_type(value, encoding='utf-8') + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _convert_to_example(filename, image_buffer, label, synset, human, bbox, + height, width): + """Build an Example proto for an example. + + Args: + filename: string, path to an image file, e.g., '/path/to/example.JPG' + image_buffer: string, JPEG encoding of RGB image + label: integer, identifier for the ground truth for the network + synset: string, unique WordNet ID specifying the label, e.g., 'n02323233' + human: string, human-readable label, e.g., 'red fox, Vulpes vulpes' + bbox: list of bounding boxes; each box is a list of integers + specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to + the same label as the image label. + height: integer, image height in pixels + width: integer, image width in pixels + Returns: + Example proto + """ + xmin = [] + ymin = [] + xmax = [] + ymax = [] + for b in bbox: + assert len(b) == 4 + # pylint: disable=expression-not-assigned + [l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)] + # pylint: enable=expression-not-assigned + + colorspace = 'RGB' + channels = 3 + image_format = 'JPEG' + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': _int64_feature(height), + 'image/width': _int64_feature(width), + 'image/colorspace': _bytes_feature(colorspace), + 'image/channels': _int64_feature(channels), + 'image/class/label': _int64_feature(label), + 'image/class/synset': _bytes_feature(synset), + 'image/class/text': _bytes_feature(human), + 'image/object/bbox/xmin': _float_feature(xmin), + 'image/object/bbox/xmax': _float_feature(xmax), + 'image/object/bbox/ymin': _float_feature(ymin), + 'image/object/bbox/ymax': _float_feature(ymax), + 'image/object/bbox/label': _int64_feature([label] * len(xmin)), + 'image/format': _bytes_feature(image_format), + 'image/filename': _bytes_feature(os.path.basename(filename)), + 'image/encoded': _bytes_feature(image_buffer)})) + return example + + +class ImageCoder(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self): + # Create a single Session to run all image coding calls. + self._sess = tf.Session() + + # Initializes function that converts PNG to JPEG data. + self._png_data = tf.placeholder(dtype=tf.string) + image = tf.image.decode_png(self._png_data, channels=3) + self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that converts CMYK JPEG data to RGB JPEG data. + self._cmyk_data = tf.placeholder(dtype=tf.string) + image = tf.image.decode_jpeg(self._cmyk_data, channels=0) + self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that decodes RGB JPEG data. + self._decode_jpeg_data = tf.placeholder(dtype=tf.string) + self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) + + def png_to_jpeg(self, image_data): + return self._sess.run(self._png_to_jpeg, + feed_dict={self._png_data: image_data}) + + def cmyk_to_rgb(self, image_data): + return self._sess.run(self._cmyk_to_rgb, + feed_dict={self._cmyk_data: image_data}) + + def decode_jpeg(self, image_data): + image = self._sess.run(self._decode_jpeg, + feed_dict={self._decode_jpeg_data: image_data}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _is_png(filename): + """Determine if a file contains a PNG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a PNG. + """ + # File list from: + # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU + return 'n02105855_2933.JPEG' in filename + + +def _is_cmyk(filename): + """Determine if file contains a CMYK JPEG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a JPEG encoded with CMYK color space. + """ + # File list from: + # https://github.com/cytsai/ilsvrc-cmyk-image-list + blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG', + 'n02447366_23489.JPEG', 'n02492035_15739.JPEG', + 'n02747177_10752.JPEG', 'n03018349_4028.JPEG', + 'n03062245_4620.JPEG', 'n03347037_9675.JPEG', + 'n03467068_12171.JPEG', 'n03529860_11437.JPEG', + 'n03544143_17228.JPEG', 'n03633091_5218.JPEG', + 'n03710637_5125.JPEG', 'n03961711_5286.JPEG', + 'n04033995_2932.JPEG', 'n04258138_17003.JPEG', + 'n04264628_27969.JPEG', 'n04336792_7448.JPEG', + 'n04371774_5854.JPEG', 'n04596742_4225.JPEG', + 'n07583066_647.JPEG', 'n13037406_4650.JPEG'] + return filename.split('/')[-1] in blacklist + + +def _process_image(filename, coder): + """Process a single image file. + + Args: + filename: string, path to an image file e.g., '/path/to/example.JPG'. + coder: instance of ImageCoder to provide TensorFlow image coding utils. + Returns: + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + """ + # Read the image file. + with tf.gfile.FastGFile(filename, 'rb') as f: + image_data = f.read() + + # Clean the dirty data. + if _is_png(filename): + # 1 image is a PNG. + print('Converting PNG to JPEG for %s' % filename) + image_data = coder.png_to_jpeg(image_data) + elif _is_cmyk(filename): + # 22 JPEG images are in CMYK colorspace. + print('Converting CMYK to RGB for %s' % filename) + image_data = coder.cmyk_to_rgb(image_data) + + # Decode the RGB JPEG. + image = coder.decode_jpeg(image_data) + + # Check that image converted to RGB + assert len(image.shape) == 3 + height = image.shape[0] + width = image.shape[1] + assert image.shape[2] == 3 + + return image_data, height, width + + +def _process_image_files_batch(coder, thread_index, ranges, name, filenames, + synsets, labels, humans, bboxes, num_shards): + """Processes and saves list of images as TFRecord in 1 thread. + + Args: + coder: instance of ImageCoder to provide TensorFlow image coding utils. + thread_index: integer, unique batch to run index is within [0, len(ranges)). + ranges: list of pairs of integers specifying ranges of each batches to + analyze in parallel. + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + synsets: list of strings; each string is a unique WordNet ID + labels: list of integer; each integer identifies the ground truth + humans: list of strings; each string is a human-readable label + bboxes: list of bounding boxes for each image. Note that each entry in this + list might contain from 0+ entries corresponding to the number of bounding + box annotations for the image. + num_shards: integer number of shards for this data set. + """ + # Each thread produces N shards where N = int(num_shards / num_threads). + # For instance, if num_shards = 128, and the num_threads = 2, then the first + # thread would produce shards [0, 64). + num_threads = len(ranges) + assert not num_shards % num_threads + num_shards_per_batch = int(num_shards / num_threads) + + shard_ranges = np.linspace(ranges[thread_index][0], + ranges[thread_index][1], + num_shards_per_batch + 1).astype(int) + num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] + + counter = 0 + for s in range(num_shards_per_batch): + # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' + shard = thread_index * num_shards_per_batch + s + output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) + output_file = os.path.join(FLAGS.output_directory, output_filename) + writer = tf.python_io.TFRecordWriter(output_file) + + shard_counter = 0 + files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) + for i in files_in_shard: + filename = filenames[i] + label = labels[i] + synset = synsets[i] + human = humans[i] + bbox = bboxes[i] + + image_buffer, height, width = _process_image(filename, coder) + + example = _convert_to_example(filename, image_buffer, label, + synset, human, bbox, + height, width) + writer.write(example.SerializeToString()) + shard_counter += 1 + counter += 1 + + if not counter % 1000: + print('%s [thread %d]: Processed %d of %d images in thread batch.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + writer.close() + print('%s [thread %d]: Wrote %d images to %s' % + (datetime.now(), thread_index, shard_counter, output_file)) + sys.stdout.flush() + shard_counter = 0 + print('%s [thread %d]: Wrote %d images to %d shards.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + +def _process_image_files(name, filenames, synsets, labels, humans, + bboxes, num_shards): + """Process and save list of images as TFRecord of Example protos. + + Args: + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + synsets: list of strings; each string is a unique WordNet ID + labels: list of integer; each integer identifies the ground truth + humans: list of strings; each string is a human-readable label + bboxes: list of bounding boxes for each image. Note that each entry in this + list might contain from 0+ entries corresponding to the number of bounding + box annotations for the image. + num_shards: integer number of shards for this data set. + """ + assert len(filenames) == len(synsets) + assert len(filenames) == len(labels) + assert len(filenames) == len(humans) + assert len(filenames) == len(bboxes) + + # Break all images into batches with a [ranges[i][0], ranges[i][1]]. + spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) + ranges = [] + threads = [] + for i in range(len(spacing) - 1): + ranges.append([spacing[i], spacing[i + 1]]) + + # Launch a thread for each batch. + print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) + sys.stdout.flush() + + # Create a mechanism for monitoring when all threads are finished. + coord = tf.train.Coordinator() + + # Create a generic TensorFlow-based utility for converting all image codings. + coder = ImageCoder() + + threads = [] + for thread_index in range(len(ranges)): + args = (coder, thread_index, ranges, name, filenames, + synsets, labels, humans, bboxes, num_shards) + t = threading.Thread(target=_process_image_files_batch, args=args) + t.start() + threads.append(t) + + # Wait for all the threads to terminate. + coord.join(threads) + print('%s: Finished writing all %d images in data set.' % + (datetime.now(), len(filenames))) + sys.stdout.flush() + + +def _find_image_files(data_dir, labels_file): + """Build a list of all images files and labels in the data set. + + Args: + data_dir: string, path to the root directory of images. + + Assumes that the ImageNet data set resides in JPEG files located in + the following directory structure. + + data_dir/n01440764/ILSVRC2012_val_00000293.JPEG + data_dir/n01440764/ILSVRC2012_val_00000543.JPEG + + where 'n01440764' is the unique synset label associated with these images. + + labels_file: string, path to the labels file. + + The list of valid labels are held in this file. Assumes that the file + contains entries as such: + n01440764 + n01443537 + n01484850 + where each line corresponds to a label expressed as a synset. We map + each synset contained in the file to an integer (based on the alphabetical + ordering) starting with the integer 1 corresponding to the synset + contained in the first line. + + The reason we start the integer labels at 1 is to reserve label 0 as an + unused background class. + + Returns: + filenames: list of strings; each string is a path to an image file. + synsets: list of strings; each string is a unique WordNet ID. + labels: list of integer; each integer identifies the ground truth. + """ + print('Determining list of input files and labels from %s.' % data_dir) + challenge_synsets = [l.strip() for l in + tf.gfile.FastGFile(labels_file, 'r').readlines()] + + labels = [] + filenames = [] + synsets = [] + + # Leave label index 0 empty as a background class. + label_index = 1 + + # Construct the list of JPEG files and labels. + for synset in challenge_synsets: + jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset) + matching_files = tf.gfile.Glob(jpeg_file_path) + + labels.extend([label_index] * len(matching_files)) + synsets.extend([synset] * len(matching_files)) + filenames.extend(matching_files) + + if not label_index % 100: + print('Finished finding files in %d of %d classes.' % ( + label_index, len(challenge_synsets))) + label_index += 1 + + # Shuffle the ordering of all image files in order to guarantee + # random ordering of the images with respect to label in the + # saved TFRecord files. Make the randomization repeatable. + shuffled_index = list(range(len(filenames))) + random.seed(12345) + random.shuffle(shuffled_index) + + filenames = [filenames[i] for i in shuffled_index] + synsets = [synsets[i] for i in shuffled_index] + labels = [labels[i] for i in shuffled_index] + + print('Found %d JPEG files across %d labels inside %s.' % + (len(filenames), len(challenge_synsets), data_dir)) + return filenames, synsets, labels + + +def _find_human_readable_labels(synsets, synset_to_human): + """Build a list of human-readable labels. + + Args: + synsets: list of strings; each string is a unique WordNet ID. + synset_to_human: dict of synset to human labels, e.g., + 'n02119022' --> 'red fox, Vulpes vulpes' + + Returns: + List of human-readable strings corresponding to each synset. + """ + humans = [] + for s in synsets: + assert s in synset_to_human, ('Failed to find: %s' % s) + humans.append(synset_to_human[s]) + return humans + + +def _find_image_bounding_boxes(filenames, image_to_bboxes): + """Find the bounding boxes for a given image file. + + Args: + filenames: list of strings; each string is a path to an image file. + image_to_bboxes: dictionary mapping image file names to a list of + bounding boxes. This list contains 0+ bounding boxes. + Returns: + List of bounding boxes for each image. Note that each entry in this + list might contain from 0+ entries corresponding to the number of bounding + box annotations for the image. + """ + num_image_bbox = 0 + bboxes = [] + for f in filenames: + basename = os.path.basename(f) + if basename in image_to_bboxes: + bboxes.append(image_to_bboxes[basename]) + num_image_bbox += 1 + else: + bboxes.append([]) + print('Found %d images with bboxes out of %d images' % ( + num_image_bbox, len(filenames))) + return bboxes + + +def _process_dataset(name, directory, num_shards, synset_to_human, + image_to_bboxes): + """Process a complete data set and save it as a TFRecord. + + Args: + name: string, unique identifier specifying the data set. + directory: string, root path to the data set. + num_shards: integer number of shards for this data set. + synset_to_human: dict of synset to human labels, e.g., + 'n02119022' --> 'red fox, Vulpes vulpes' + image_to_bboxes: dictionary mapping image file names to a list of + bounding boxes. This list contains 0+ bounding boxes. + """ + filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file) + humans = _find_human_readable_labels(synsets, synset_to_human) + bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes) + _process_image_files(name, filenames, synsets, labels, + humans, bboxes, num_shards) + + +def _build_synset_lookup(imagenet_metadata_file): + """Build lookup for synset to human-readable label. + + Args: + imagenet_metadata_file: string, path to file containing mapping from + synset to human-readable label. + + Assumes each line of the file looks like: + + n02119247 black fox + n02119359 silver fox + n02119477 red fox, Vulpes fulva + + where each line corresponds to a unique mapping. Note that each line is + formatted as \t. + + Returns: + Dictionary of synset to human labels, such as: + 'n02119022' --> 'red fox, Vulpes vulpes' + """ + lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines() + synset_to_human = {} + for l in lines: + if l: + parts = l.strip().split('\t') + assert len(parts) == 2 + synset = parts[0] + human = parts[1] + synset_to_human[synset] = human + return synset_to_human + + +def _build_bounding_box_lookup(bounding_box_file): + """Build a lookup from image file to bounding boxes. + + Args: + bounding_box_file: string, path to file with bounding boxes annotations. + + Assumes each line of the file looks like: + + n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 + + where each line corresponds to one bounding box annotation associated + with an image. Each line can be parsed as: + + , , , , + + Note that there might exist mulitple bounding box annotations associated + with an image file. This file is the output of process_bounding_boxes.py. + + Returns: + Dictionary mapping image file names to a list of bounding boxes. This list + contains 0+ bounding boxes. + """ + lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines() + images_to_bboxes = {} + num_bbox = 0 + num_image = 0 + for l in lines: + if l: + parts = l.split(',') + assert len(parts) == 5, ('Failed to parse: %s' % l) + filename = parts[0] + xmin = float(parts[1]) + ymin = float(parts[2]) + xmax = float(parts[3]) + ymax = float(parts[4]) + box = [xmin, ymin, xmax, ymax] + + if filename not in images_to_bboxes: + images_to_bboxes[filename] = [] + num_image += 1 + images_to_bboxes[filename].append(box) + num_bbox += 1 + + print('Successfully read %d bounding boxes ' + 'across %d images.' % (num_bbox, num_image)) + return images_to_bboxes + + +def main(unused_argv): + assert not FLAGS.train_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') + assert not FLAGS.validation_shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with ' + 'FLAGS.validation_shards') + print('Saving results to %s' % FLAGS.output_directory) + + # Build a map from synset to human-readable label. + synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file) + image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file) + + # Run it! + _process_dataset('validation', FLAGS.validation_directory, + FLAGS.validation_shards, synset_to_human, image_to_bboxes) + _process_dataset('train', FLAGS.train_directory, FLAGS.train_shards, + synset_to_human, image_to_bboxes) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/inception/inception/data/download_and_preprocess_flowers.sh b/models/research/inception/inception/data/download_and_preprocess_flowers.sh new file mode 100644 index 0000000000000000000000000000000000000000..ee045c164e803ab38be69fb1933134e7f37f1793 --- /dev/null +++ b/models/research/inception/inception/data/download_and_preprocess_flowers.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess the flowers data set. This data set +# provides a demonstration for how to perform fine-tuning (i.e. tranfer +# learning) from one model to a new data set. +# +# This script provides a demonstration for how to prepare an arbitrary +# data set for training an Inception v3 model. +# +# We demonstrate this with the flowers data set which consists of images +# of labeled flower images from 5 classes: +# +# daisy, dandelion, roses, sunflowers, tulips +# +# The final output of this script are sharded TFRecord files containing +# serialized Example protocol buffers. See build_image_data.py for +# details of how the Example protocol buffer contains image data. +# +# usage: +# ./download_and_preprocess_flowers.sh [data-dir] +set -e + +if [ -z "$1" ]; then + echo "Usage: download_and_preprocess_flowers.sh [data dir]" + exit +fi + +# Create the output and temporary directories. +DATA_DIR="${1%/}" +SCRATCH_DIR="${DATA_DIR}/raw-data" +WORK_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +mkdir -p "${DATA_DIR}" +mkdir -p "${SCRATCH_DIR}" + +# Download the flowers data. +DATA_URL="http://download.tensorflow.org/example_images/flower_photos.tgz" +CURRENT_DIR=$(pwd) +cd "${DATA_DIR}" +TARBALL="flower_photos.tgz" +if [ ! -f ${TARBALL} ]; then + echo "Downloading flower data set." + curl -o ${TARBALL} "${DATA_URL}" +else + echo "Skipping download of flower data." +fi + +# Note the locations of the train and validation data. +TRAIN_DIRECTORY="${SCRATCH_DIR}/train" +VALIDATION_DIRECTORY="${SCRATCH_DIR}/validation" + +# Expands the data into the flower_photos/ directory and rename it as the +# train directory. +tar xf flower_photos.tgz +rm -rf "${TRAIN_DIRECTORY}" "${VALIDATION_DIRECTORY}" +mv flower_photos "${TRAIN_DIRECTORY}" + +# Generate a list of 5 labels: daisy, dandelion, roses, sunflowers, tulips +LABELS_FILE="${SCRATCH_DIR}/labels.txt" +ls -1 "${TRAIN_DIRECTORY}" | grep -v 'LICENSE' | sed 's/\///' | sort > "${LABELS_FILE}" + +# Generate the validation data set. +while read LABEL; do + VALIDATION_DIR_FOR_LABEL="${VALIDATION_DIRECTORY}/${LABEL}" + TRAIN_DIR_FOR_LABEL="${TRAIN_DIRECTORY}/${LABEL}" + + # Move the first randomly selected 100 images to the validation set. + mkdir -p "${VALIDATION_DIR_FOR_LABEL}" + VALIDATION_IMAGES=$(ls -1 "${TRAIN_DIR_FOR_LABEL}" | shuf | head -100) + for IMAGE in ${VALIDATION_IMAGES}; do + mv -f "${TRAIN_DIRECTORY}/${LABEL}/${IMAGE}" "${VALIDATION_DIR_FOR_LABEL}" + done +done < "${LABELS_FILE}" + +# Build the TFRecords version of the image data. +cd "${CURRENT_DIR}" +BUILD_SCRIPT="${WORK_DIR}/build_image_data.py" +OUTPUT_DIRECTORY="${DATA_DIR}" +"${BUILD_SCRIPT}" \ + --train_directory="${TRAIN_DIRECTORY}" \ + --validation_directory="${VALIDATION_DIRECTORY}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --labels_file="${LABELS_FILE}" diff --git a/models/research/inception/inception/data/download_and_preprocess_flowers_mac.sh b/models/research/inception/inception/data/download_and_preprocess_flowers_mac.sh new file mode 100644 index 0000000000000000000000000000000000000000..154905635b19aeaaea087a8e76afda9b8c624d59 --- /dev/null +++ b/models/research/inception/inception/data/download_and_preprocess_flowers_mac.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess the flowers data set. This data set +# provides a demonstration for how to perform fine-tuning (i.e. tranfer +# learning) from one model to a new data set. +# +# This script provides a demonstration for how to prepare an arbitrary +# data set for training an Inception v3 model. +# +# We demonstrate this with the flowers data set which consists of images +# of labeled flower images from 5 classes: +# +# daisy, dandelion, roses, sunflowers, tulips +# +# The final output of this script are sharded TFRecord files containing +# serialized Example protocol buffers. See build_image_data.py for +# details of how the Example protocol buffer contains image data. +# +# usage: +# ./download_and_preprocess_flowers.sh [data-dir] +set -e + +if [ -z "$1" ]; then + echo "Usage: download_and_preprocess_flowers.sh [data dir]" + exit +fi + +# Create the output and temporary directories. +DATA_DIR="${1%/}" +SCRATCH_DIR="${DATA_DIR}/raw-data/" +mkdir -p "${DATA_DIR}" +mkdir -p "${SCRATCH_DIR}" +WORK_DIR="$0.runfiles/inception/inception" + +# Download the flowers data. +DATA_URL="http://download.tensorflow.org/example_images/flower_photos.tgz" +CURRENT_DIR=$(pwd) +cd "${DATA_DIR}" +TARBALL="flower_photos.tgz" +if [ ! -f ${TARBALL} ]; then + echo "Downloading flower data set." + curl -o ${TARBALL} "${DATA_URL}" +else + echo "Skipping download of flower data." +fi + +# Note the locations of the train and validation data. +TRAIN_DIRECTORY="${SCRATCH_DIR}train/" +VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" + +# Expands the data into the flower_photos/ directory and rename it as the +# train directory. +tar xf flower_photos.tgz +rm -rf "${TRAIN_DIRECTORY}" "${VALIDATION_DIRECTORY}" +mv flower_photos "${TRAIN_DIRECTORY}" + +# Generate a list of 5 labels: daisy, dandelion, roses, sunflowers, tulips +LABELS_FILE="${SCRATCH_DIR}/labels.txt" +ls -1 "${TRAIN_DIRECTORY}" | grep -v 'LICENSE' | sed 's/\///' | sort > "${LABELS_FILE}" + +# Generate the validation data set. +while read LABEL; do + VALIDATION_DIR_FOR_LABEL="${VALIDATION_DIRECTORY}${LABEL}" + TRAIN_DIR_FOR_LABEL="${TRAIN_DIRECTORY}${LABEL}" + + # Move the first randomly selected 100 images to the validation set. + mkdir -p "${VALIDATION_DIR_FOR_LABEL}" + VALIDATION_IMAGES=$(ls -1 "${TRAIN_DIR_FOR_LABEL}" | gshuf | head -100) + for IMAGE in ${VALIDATION_IMAGES}; do + mv -f "${TRAIN_DIRECTORY}${LABEL}/${IMAGE}" "${VALIDATION_DIR_FOR_LABEL}" + done +done < "${LABELS_FILE}" + +# Build the TFRecords version of the image data. +cd "${CURRENT_DIR}" +BUILD_SCRIPT="${WORK_DIR}/build_image_data" +OUTPUT_DIRECTORY="${DATA_DIR}" +"${BUILD_SCRIPT}" \ + --train_directory="${TRAIN_DIRECTORY}" \ + --validation_directory="${VALIDATION_DIRECTORY}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --labels_file="${LABELS_FILE}" diff --git a/models/research/inception/inception/data/download_and_preprocess_imagenet.sh b/models/research/inception/inception/data/download_and_preprocess_imagenet.sh new file mode 100644 index 0000000000000000000000000000000000000000..6faae831075d4f6bfdc8bf8797219f7a0e4c1797 --- /dev/null +++ b/models/research/inception/inception/data/download_and_preprocess_imagenet.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess ImageNet Challenge 2012 +# training and validation data set. +# +# The final output of this script are sharded TFRecord files containing +# serialized Example protocol buffers. See build_imagenet_data.py for +# details of how the Example protocol buffers contain the ImageNet data. +# +# The final output of this script appears as such: +# +# data_dir/train-00000-of-01024 +# data_dir/train-00001-of-01024 +# ... +# data_dir/train-01023-of-01024 +# +# and +# +# data_dir/validation-00000-of-00128 +# data_dir/validation-00001-of-00128 +# ... +# data_dir/validation-00127-of-00128 +# +# Note that this script may take several hours to run to completion. The +# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending +# on the speed of your machine. Please be patient. +# +# **IMPORTANT** +# To download the raw images, the user must create an account with image-net.org +# and generate a username and access_key. The latter two are required for +# downloading the raw images. +# +# usage: +# ./download_and_preprocess_imagenet.sh [data-dir] +set -e + +if [ -z "$1" ]; then + echo "Usage: download_and_preprocess_imagenet.sh [data dir]" + exit +fi + +# Create the output and temporary directories. +DATA_DIR="${1%/}" +SCRATCH_DIR="${DATA_DIR}/raw-data/" +mkdir -p "${DATA_DIR}" +mkdir -p "${SCRATCH_DIR}" +WORK_DIR="$0.runfiles/inception/inception" + +# Download the ImageNet data. +LABELS_FILE="${WORK_DIR}/data/imagenet_lsvrc_2015_synsets.txt" +DOWNLOAD_SCRIPT="${WORK_DIR}/data/download_imagenet.sh" +"${DOWNLOAD_SCRIPT}" "${SCRATCH_DIR}" "${LABELS_FILE}" + +# Note the locations of the train and validation data. +TRAIN_DIRECTORY="${SCRATCH_DIR}train/" +VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" + +# Preprocess the validation data by moving the images into the appropriate +# sub-directory based on the label (synset) of the image. +echo "Organizing the validation data into sub-directories." +PREPROCESS_VAL_SCRIPT="${WORK_DIR}/data/preprocess_imagenet_validation_data.py" +VAL_LABELS_FILE="${WORK_DIR}/data/imagenet_2012_validation_synset_labels.txt" + +"${PREPROCESS_VAL_SCRIPT}" "${VALIDATION_DIRECTORY}" "${VAL_LABELS_FILE}" + +# Convert the XML files for bounding box annotations into a single CSV. +echo "Extracting bounding box information from XML." +BOUNDING_BOX_SCRIPT="${WORK_DIR}/data/process_bounding_boxes.py" +BOUNDING_BOX_FILE="${SCRATCH_DIR}/imagenet_2012_bounding_boxes.csv" +BOUNDING_BOX_DIR="${SCRATCH_DIR}bounding_boxes/" + +"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \ + | sort > "${BOUNDING_BOX_FILE}" +echo "Finished downloading and preprocessing the ImageNet data." + +# Build the TFRecords version of the ImageNet data. +BUILD_SCRIPT="${WORK_DIR}/build_imagenet_data" +OUTPUT_DIRECTORY="${DATA_DIR}" +IMAGENET_METADATA_FILE="${WORK_DIR}/data/imagenet_metadata.txt" + +"${BUILD_SCRIPT}" \ + --train_directory="${TRAIN_DIRECTORY}" \ + --validation_directory="${VALIDATION_DIRECTORY}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \ + --labels_file="${LABELS_FILE}" \ + --bounding_box_file="${BOUNDING_BOX_FILE}" diff --git a/models/research/inception/inception/data/download_imagenet.sh b/models/research/inception/inception/data/download_imagenet.sh new file mode 100644 index 0000000000000000000000000000000000000000..f6c77781c0bcaad642ec7a38a7ba00693ef8ef83 --- /dev/null +++ b/models/research/inception/inception/data/download_imagenet.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download ImageNet Challenge 2012 training and validation data set. +# +# Downloads and decompresses raw images and bounding boxes. +# +# **IMPORTANT** +# To download the raw images, the user must create an account with image-net.org +# and generate a username and access_key. The latter two are required for +# downloading the raw images. +# +# usage: +# ./download_imagenet.sh [dir name] [synsets file] +set -e + +if [ "x$IMAGENET_ACCESS_KEY" == x -o "x$IMAGENET_USERNAME" == x ]; then + cat < ') + sys.exit(-1) + data_dir = sys.argv[1] + validation_labels_file = sys.argv[2] + + # Read in the 50000 synsets associated with the validation data set. + labels = [l.strip() for l in open(validation_labels_file).readlines()] + unique_labels = set(labels) + + # Make all sub-directories in the validation data dir. + for label in unique_labels: + labeled_data_dir = os.path.join(data_dir, label) + # Catch error if sub-directory exists + try: + os.makedirs(labeled_data_dir) + except OSError as e: + # Raise all errors but 'EEXIST' + if e.errno != errno.EEXIST: + raise + + # Move all of the image to the appropriate sub-directory. + for i in range(len(labels)): + basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1) + original_filename = os.path.join(data_dir, basename) + if not os.path.exists(original_filename): + print('Failed to find: %s' % original_filename) + sys.exit(-1) + new_filename = os.path.join(data_dir, labels[i], basename) + os.rename(original_filename, new_filename) diff --git a/models/research/inception/inception/data/process_bounding_boxes.py b/models/research/inception/inception/data/process_bounding_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..5e9fd786e40b6d95b89fcc9f9774aa7f132c1a6f --- /dev/null +++ b/models/research/inception/inception/data/process_bounding_boxes.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Process the ImageNet Challenge bounding boxes for TensorFlow model training. + +This script is called as + +process_bounding_boxes.py [synsets-file] + +Where is a directory containing the downloaded and unpacked bounding box +data. If [synsets-file] is supplied, then only the bounding boxes whose +synstes are contained within this file are returned. Note that the +[synsets-file] file contains synset ids, one per line. + +The script dumps out a CSV text file in which each line contains an entry. + n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 + +The entry can be read as: + , , , , + +The bounding box for contains two points (xmin, ymin) and +(xmax, ymax) specifying the lower-left corner and upper-right corner of a +bounding box in *relative* coordinates. + +The user supplies a directory where the XML files reside. The directory +structure in the directory is assumed to look like this: + +/nXXXXXXXX/nXXXXXXXX_YYYY.xml + +Each XML file contains a bounding box annotation. The script: + + (1) Parses the XML file and extracts the filename, label and bounding box info. + + (2) The bounding box is specified in the XML files as integer (xmin, ymin) and + (xmax, ymax) *relative* to image size displayed to the human annotator. The + size of the image displayed to the human annotator is stored in the XML file + as integer (height, width). + + Note that the displayed size will differ from the actual size of the image + downloaded from image-net.org. To make the bounding box annotation useable, + we convert bounding box to floating point numbers relative to displayed + height and width of the image. + + Note that each XML file might contain N bounding box annotations. + + Note that the points are all clamped at a range of [0.0, 1.0] because some + human annotations extend outside the range of the supplied image. + + See details here: http://image-net.org/download-bboxes + +(3) By default, the script outputs all valid bounding boxes. If a + [synsets-file] is supplied, only the subset of bounding boxes associated + with those synsets are outputted. Importantly, one can supply a list of + synsets in the ImageNet Challenge and output the list of bounding boxes + associated with the training images of the ILSVRC. + + We use these bounding boxes to inform the random distortion of images + supplied to the network. + +If you run this script successfully, you will see the following output +to stderr: +> Finished processing 544546 XML files. +> Skipped 0 XML files not in ImageNet Challenge. +> Skipped 0 bounding boxes not in ImageNet Challenge. +> Wrote 615299 bounding boxes from 544546 annotated images. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import glob +import os.path +import sys +import xml.etree.ElementTree as ET + + +class BoundingBox(object): + pass + + +def GetItem(name, root, index=0): + count = 0 + for item in root.iter(name): + if count == index: + return item.text + count += 1 + # Failed to find "index" occurrence of item. + return -1 + + +def GetInt(name, root, index=0): + # In some XML annotation files, the point values are not integers, but floats. + # So we add a float function to avoid ValueError. + return int(float(GetItem(name, root, index))) + + +def FindNumberBoundingBoxes(root): + index = 0 + while True: + if GetInt('xmin', root, index) == -1: + break + index += 1 + return index + + +def ProcessXMLAnnotation(xml_file): + """Process a single XML file containing a bounding box.""" + # pylint: disable=broad-except + try: + tree = ET.parse(xml_file) + except Exception: + print('Failed to parse: ' + xml_file, file=sys.stderr) + return None + # pylint: enable=broad-except + root = tree.getroot() + + num_boxes = FindNumberBoundingBoxes(root) + boxes = [] + + for index in range(num_boxes): + box = BoundingBox() + # Grab the 'index' annotation. + box.xmin = GetInt('xmin', root, index) + box.ymin = GetInt('ymin', root, index) + box.xmax = GetInt('xmax', root, index) + box.ymax = GetInt('ymax', root, index) + + box.width = GetInt('width', root) + box.height = GetInt('height', root) + box.filename = GetItem('filename', root) + '.JPEG' + box.label = GetItem('name', root) + + xmin = float(box.xmin) / float(box.width) + xmax = float(box.xmax) / float(box.width) + ymin = float(box.ymin) / float(box.height) + ymax = float(box.ymax) / float(box.height) + + # Some images contain bounding box annotations that + # extend outside of the supplied image. See, e.g. + # n03127925/n03127925_147.xml + # Additionally, for some bounding boxes, the min > max + # or the box is entirely outside of the image. + min_x = min(xmin, xmax) + max_x = max(xmin, xmax) + box.xmin_scaled = min(max(min_x, 0.0), 1.0) + box.xmax_scaled = min(max(max_x, 0.0), 1.0) + + min_y = min(ymin, ymax) + max_y = max(ymin, ymax) + box.ymin_scaled = min(max(min_y, 0.0), 1.0) + box.ymax_scaled = min(max(max_y, 0.0), 1.0) + + boxes.append(box) + + return boxes + +if __name__ == '__main__': + if len(sys.argv) < 2 or len(sys.argv) > 3: + print('Invalid usage\n' + 'usage: process_bounding_boxes.py [synsets-file]', + file=sys.stderr) + sys.exit(-1) + + xml_files = glob.glob(sys.argv[1] + '/*/*.xml') + print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]), + file=sys.stderr) + + if len(sys.argv) == 3: + labels = set([l.strip() for l in open(sys.argv[2]).readlines()]) + print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]), + file=sys.stderr) + else: + labels = None + + skipped_boxes = 0 + skipped_files = 0 + saved_boxes = 0 + saved_files = 0 + for file_index, one_file in enumerate(xml_files): + # Example: <...>/n06470073/n00141669_6790.xml + label = os.path.basename(os.path.dirname(one_file)) + + # Determine if the annotation is from an ImageNet Challenge label. + if labels is not None and label not in labels: + skipped_files += 1 + continue + + bboxes = ProcessXMLAnnotation(one_file) + assert bboxes is not None, 'No bounding boxes found in ' + one_file + + found_box = False + for bbox in bboxes: + if labels is not None: + if bbox.label != label: + # Note: There is a slight bug in the bounding box annotation data. + # Many of the dog labels have the human label 'Scottish_deerhound' + # instead of the synset ID 'n02092002' in the bbox.label field. As a + # simple hack to overcome this issue, we only exclude bbox labels + # *which are synset ID's* that do not match original synset label for + # the XML file. + if bbox.label in labels: + skipped_boxes += 1 + continue + + # Guard against improperly specified boxes. + if (bbox.xmin_scaled >= bbox.xmax_scaled or + bbox.ymin_scaled >= bbox.ymax_scaled): + skipped_boxes += 1 + continue + + # Note bbox.filename occasionally contains '%s' in the name. This is + # data set noise that is fixed by just using the basename of the XML file. + image_filename = os.path.splitext(os.path.basename(one_file))[0] + print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' % + (image_filename, + bbox.xmin_scaled, bbox.ymin_scaled, + bbox.xmax_scaled, bbox.ymax_scaled)) + + saved_boxes += 1 + found_box = True + if found_box: + saved_files += 1 + else: + skipped_files += 1 + + if not file_index % 5000: + print('--> processed %d of %d XML files.' % + (file_index + 1, len(xml_files)), + file=sys.stderr) + print('--> skipped %d boxes and %d XML files.' % + (skipped_boxes, skipped_files), file=sys.stderr) + + print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr) + print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files, + file=sys.stderr) + print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes, + file=sys.stderr) + print('Wrote %d bounding boxes from %d annotated images.' % + (saved_boxes, saved_files), + file=sys.stderr) + print('Finished.', file=sys.stderr) diff --git a/models/research/inception/inception/dataset.py b/models/research/inception/inception/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..752c97e03b0361975d64b72892cc94333e353dfb --- /dev/null +++ b/models/research/inception/inception/dataset.py @@ -0,0 +1,103 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Small library that points to a data set. + +Methods of Data class: + data_files: Returns a python list of all (sharded) data set files. + num_examples_per_epoch: Returns the number of examples in the data set. + num_classes: Returns the number of classes in the data set. + reader: Return a reader for a single entry from the data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod +import os + + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +# Basic model parameters. +tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata', + """Path to the processed data, i.e. """ + """TFRecord of Example protos.""") + + +class Dataset(object): + """A simple class for handling data sets.""" + __metaclass__ = ABCMeta + + def __init__(self, name, subset): + """Initialize dataset using a subset and the path to the data.""" + assert subset in self.available_subsets(), self.available_subsets() + self.name = name + self.subset = subset + + @abstractmethod + def num_classes(self): + """Returns the number of classes in the data set.""" + pass + # return 10 + + @abstractmethod + def num_examples_per_epoch(self): + """Returns the number of examples in the data subset.""" + pass + # if self.subset == 'train': + # return 10000 + # if self.subset == 'validation': + # return 1000 + + @abstractmethod + def download_message(self): + """Prints a download message for the Dataset.""" + pass + + def available_subsets(self): + """Returns the list of available subsets.""" + return ['train', 'validation'] + + def data_files(self): + """Returns a python list of all (sharded) data subset files. + + Returns: + python list of all (sharded) data set files. + Raises: + ValueError: if there are not data_files matching the subset. + """ + tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset) + data_files = tf.gfile.Glob(tf_record_pattern) + if not data_files: + print('No files found for dataset %s/%s at %s' % (self.name, + self.subset, + FLAGS.data_dir)) + + self.download_message() + exit(-1) + return data_files + + def reader(self): + """Return a reader for a single entry from the data set. + + See io_ops.py for details of Reader class. + + Returns: + Reader object that reads the data set. + """ + return tf.TFRecordReader() diff --git a/models/research/inception/inception/flowers_data.py b/models/research/inception/inception/flowers_data.py new file mode 100644 index 0000000000000000000000000000000000000000..022b5234deef035a6150a54ed74445b510f1b148 --- /dev/null +++ b/models/research/inception/inception/flowers_data.py @@ -0,0 +1,52 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Small library that points to the flowers data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from inception.dataset import Dataset + + +class FlowersData(Dataset): + """Flowers data set.""" + + def __init__(self, subset): + super(FlowersData, self).__init__('Flowers', subset) + + def num_classes(self): + """Returns the number of classes in the data set.""" + return 5 + + def num_examples_per_epoch(self): + """Returns the number of examples in the data subset.""" + if self.subset == 'train': + return 3170 + if self.subset == 'validation': + return 500 + + def download_message(self): + """Instruction to download and extract the tarball from Flowers website.""" + + print('Failed to find any Flowers %s files'% self.subset) + print('') + print('If you have already downloaded and processed the data, then make ' + 'sure to set --data_dir to point to the directory containing the ' + 'location of the sharded TFRecords.\n') + print('Please see README.md for instructions on how to build ' + 'the flowers dataset using download_and_preprocess_flowers.\n') diff --git a/models/research/inception/inception/flowers_eval.py b/models/research/inception/inception/flowers_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..ae3e9dc14c8dc83368aa83f523ade92e12113554 --- /dev/null +++ b/models/research/inception/inception/flowers_eval.py @@ -0,0 +1,40 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to evaluate Inception on the flowers data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception import inception_eval +from inception.flowers_data import FlowersData + +FLAGS = tf.app.flags.FLAGS + + +def main(unused_argv=None): + dataset = FlowersData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.eval_dir): + tf.gfile.DeleteRecursively(FLAGS.eval_dir) + tf.gfile.MakeDirs(FLAGS.eval_dir) + inception_eval.evaluate(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/inception/inception/flowers_train.py b/models/research/inception/inception/flowers_train.py new file mode 100644 index 0000000000000000000000000000000000000000..1f044a539d48ef6ce011831210b4bc31eba278f3 --- /dev/null +++ b/models/research/inception/inception/flowers_train.py @@ -0,0 +1,41 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to train Inception on the flowers data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf + +from inception import inception_train +from inception.flowers_data import FlowersData + +FLAGS = tf.app.flags.FLAGS + + +def main(_): + dataset = FlowersData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.train_dir): + tf.gfile.DeleteRecursively(FLAGS.train_dir) + tf.gfile.MakeDirs(FLAGS.train_dir) + inception_train.train(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/inception/inception/image_processing.py b/models/research/inception/inception/image_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..fe74f1b3c9958060b15f52df80b11606c7ccf343 --- /dev/null +++ b/models/research/inception/inception/image_processing.py @@ -0,0 +1,513 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Read and preprocess image data. + + Image processing occurs on a single image at a time. Image are read and + preprocessed in parallel across multiple threads. The resulting images + are concatenated together to form a single batch for training or evaluation. + + -- Provide processed image data for a network: + inputs: Construct batches of evaluation examples of images. + distorted_inputs: Construct batches of training examples of images. + batch_inputs: Construct batches of training or evaluation examples of images. + + -- Data processing: + parse_example_proto: Parses an Example proto containing a training example + of an image. + + -- Image decoding: + decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor. + + -- Image preprocessing: + image_preprocessing: Decode and preprocess one image for evaluation or training + distort_image: Distort one image for training a network. + eval_image: Prepare one image for evaluation. + distort_color: Distort the color in one image for training. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_integer('batch_size', 32, + """Number of images to process in a batch.""") +tf.app.flags.DEFINE_integer('image_size', 299, + """Provide square images of this size.""") +tf.app.flags.DEFINE_integer('num_preprocess_threads', 4, + """Number of preprocessing threads per tower. """ + """Please make this a multiple of 4.""") +tf.app.flags.DEFINE_integer('num_readers', 4, + """Number of parallel readers during train.""") + +# Images are preprocessed asynchronously using multiple threads specified by +# --num_preprocss_threads and the resulting processed images are stored in a +# random shuffling queue. The shuffling queue dequeues --batch_size images +# for processing on a given Inception tower. A larger shuffling queue guarantees +# better mixing across examples within a batch and results in slightly higher +# predictive performance in a trained model. Empirically, +# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size +# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of +# 16GB. If the machine is memory limited, then decrease this factor to +# decrease the CPU memory footprint, accordingly. +tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16, + """Size of the queue of preprocessed images. """ + """Default is ideal but try smaller values, e.g. """ + """4, 2 or 1, if host memory is constrained. See """ + """comments in code for more details.""") + + +def inputs(dataset, batch_size=None, num_preprocess_threads=None): + """Generate batches of ImageNet images for evaluation. + + Use this function as the inputs for evaluating a network. + + Note that some (minimal) image preprocessing occurs during evaluation + including central cropping and resizing of the image to fit the network. + + Args: + dataset: instance of Dataset class specifying the dataset. + batch_size: integer, number of examples in batch + num_preprocess_threads: integer, total number of preprocessing threads but + None defaults to FLAGS.num_preprocess_threads. + + Returns: + images: Images. 4D tensor of size [batch_size, FLAGS.image_size, + image_size, 3]. + labels: 1-D integer Tensor of [FLAGS.batch_size]. + """ + if not batch_size: + batch_size = FLAGS.batch_size + + # Force all input processing onto CPU in order to reserve the GPU for + # the forward inference and back-propagation. + with tf.device('/cpu:0'): + images, labels = batch_inputs( + dataset, batch_size, train=False, + num_preprocess_threads=num_preprocess_threads, + num_readers=1) + + return images, labels + + +def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None): + """Generate batches of distorted versions of ImageNet images. + + Use this function as the inputs for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + dataset: instance of Dataset class specifying the dataset. + batch_size: integer, number of examples in batch + num_preprocess_threads: integer, total number of preprocessing threads but + None defaults to FLAGS.num_preprocess_threads. + + Returns: + images: Images. 4D tensor of size [batch_size, FLAGS.image_size, + FLAGS.image_size, 3]. + labels: 1-D integer Tensor of [batch_size]. + """ + if not batch_size: + batch_size = FLAGS.batch_size + + # Force all input processing onto CPU in order to reserve the GPU for + # the forward inference and back-propagation. + with tf.device('/cpu:0'): + images, labels = batch_inputs( + dataset, batch_size, train=True, + num_preprocess_threads=num_preprocess_threads, + num_readers=FLAGS.num_readers) + return images, labels + + +def decode_jpeg(image_buffer, scope=None): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + with tf.name_scope(values=[image_buffer], name=scope, + default_name='decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3) + + # After this point, all image pixels reside in [0,1) + # until the very end, when they're rescaled to (-1, 1). The various + # adjust_* ops all require this range for dtype float. + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + return image + + +def distort_color(image, thread_id=0, scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather than adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: Tensor containing single image. + thread_id: preprocessing thread ID. + scope: Optional scope for name_scope. + Returns: + color-distorted image + """ + with tf.name_scope(values=[image], name=scope, default_name='distort_color'): + color_ordering = thread_id % 2 + + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +def distort_image(image, height, width, bbox, thread_id=0, scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D float Tensor of image + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + thread_id: integer indicating the preprocessing thread. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of distorted image used for training. + """ + with tf.name_scope(values=[image, height, width, bbox], name=scope, + default_name='distort_image'): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # Display the bounding box in the first thread only. + if not thread_id: + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + tf.summary.image('image_with_bounding_boxes', image_with_box) + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an allowed + # range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.75, 1.33], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + if not thread_id: + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distort_bbox) + tf.summary.image('images_with_distorted_bounding_box', + image_with_distorted_box) + + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + resize_method = thread_id % 4 + distorted_image = tf.image.resize_images(distorted_image, [height, width], + method=resize_method) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image('cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. + distorted_image = distort_color(distorted_image, thread_id) + + if not thread_id: + tf.summary.image('final_distorted_image', + tf.expand_dims(distorted_image, 0)) + return distorted_image + + +def eval_image(image, height, width, scope=None): + """Prepare one image for evaluation. + + Args: + image: 3-D float Tensor + height: integer + width: integer + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.name_scope(values=[image, height, width], name=scope, + default_name='eval_image'): + # Crop the central region of the image with an area containing 87.5% of + # the original image. + image = tf.image.central_crop(image, central_fraction=0.875) + + # Resize the image to the original height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + return image + + +def image_preprocessing(image_buffer, bbox, train, thread_id=0): + """Decode and preprocess one image for evaluation or training. + + Args: + image_buffer: JPEG encoded string Tensor + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + train: boolean + thread_id: integer indicating preprocessing thread + + Returns: + 3-D float Tensor containing an appropriately scaled image + + Raises: + ValueError: if user does not provide bounding box + """ + if bbox is None: + raise ValueError('Please supply a bounding box.') + + image = decode_jpeg(image_buffer) + height = FLAGS.image_size + width = FLAGS.image_size + + if train: + image = distort_image(image, height, width, bbox, thread_id) + else: + image = eval_image(image, height, width) + + # Finally, rescale to [-1,1] instead of [0, 1) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat(axis=0, values=[ymin, xmin, ymax, xmax]) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None, + num_readers=1): + """Contruct batches of training or evaluation examples from the image dataset. + + Args: + dataset: instance of Dataset class specifying the dataset. + See dataset.py for details. + batch_size: integer + train: boolean + num_preprocess_threads: integer, total number of preprocessing threads + num_readers: integer, number of parallel readers + + Returns: + images: 4-D float Tensor of a batch of images + labels: 1-D integer Tensor of [batch_size]. + + Raises: + ValueError: if data is not found + """ + with tf.name_scope('batch_processing'): + data_files = dataset.data_files() + if data_files is None: + raise ValueError('No data files found for this dataset') + + # Create filename_queue + if train: + filename_queue = tf.train.string_input_producer(data_files, + shuffle=True, + capacity=16) + else: + filename_queue = tf.train.string_input_producer(data_files, + shuffle=False, + capacity=1) + if num_preprocess_threads is None: + num_preprocess_threads = FLAGS.num_preprocess_threads + + if num_preprocess_threads % 4: + raise ValueError('Please make num_preprocess_threads a multiple ' + 'of 4 (%d % 4 != 0).', num_preprocess_threads) + + if num_readers is None: + num_readers = FLAGS.num_readers + + if num_readers < 1: + raise ValueError('Please make num_readers at least 1') + + # Approximate number of examples per shard. + examples_per_shard = 1024 + # Size the random shuffle queue to balance between good global + # mixing (more examples) and memory use (fewer examples). + # 1 image uses 299*299*3*4 bytes = 1MB + # The default input_queue_memory_factor is 16 implying a shuffling queue + # size: examples_per_shard * 16 * 1MB = 17.6GB + min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor + if train: + examples_queue = tf.RandomShuffleQueue( + capacity=min_queue_examples + 3 * batch_size, + min_after_dequeue=min_queue_examples, + dtypes=[tf.string]) + else: + examples_queue = tf.FIFOQueue( + capacity=examples_per_shard + 3 * batch_size, + dtypes=[tf.string]) + + # Create multiple readers to populate the queue of examples. + if num_readers > 1: + enqueue_ops = [] + for _ in range(num_readers): + reader = dataset.reader() + _, value = reader.read(filename_queue) + enqueue_ops.append(examples_queue.enqueue([value])) + + tf.train.queue_runner.add_queue_runner( + tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops)) + example_serialized = examples_queue.dequeue() + else: + reader = dataset.reader() + _, example_serialized = reader.read(filename_queue) + + images_and_labels = [] + for thread_id in range(num_preprocess_threads): + # Parse a serialized Example proto to extract the image and metadata. + image_buffer, label_index, bbox, _ = parse_example_proto( + example_serialized) + image = image_preprocessing(image_buffer, bbox, train, thread_id) + images_and_labels.append([image, label_index]) + + images, label_index_batch = tf.train.batch_join( + images_and_labels, + batch_size=batch_size, + capacity=2 * num_preprocess_threads * batch_size) + + # Reshape images into these desired dimensions. + height = FLAGS.image_size + width = FLAGS.image_size + depth = 3 + + images = tf.cast(images, tf.float32) + images = tf.reshape(images, shape=[batch_size, height, width, depth]) + + # Display the training images in the visualizer. + tf.summary.image('images', images) + + return images, tf.reshape(label_index_batch, [batch_size]) diff --git a/models/research/inception/inception/imagenet_data.py b/models/research/inception/inception/imagenet_data.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6d22e1292632f0899355d5aa7183c3f5f33b2c --- /dev/null +++ b/models/research/inception/inception/imagenet_data.py @@ -0,0 +1,59 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Small library that points to the ImageNet data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +from inception.dataset import Dataset + + +class ImagenetData(Dataset): + """ImageNet data set.""" + + def __init__(self, subset): + super(ImagenetData, self).__init__('ImageNet', subset) + + def num_classes(self): + """Returns the number of classes in the data set.""" + return 1000 + + def num_examples_per_epoch(self): + """Returns the number of examples in the data set.""" + # Bounding box data consists of 615299 bounding boxes for 544546 images. + if self.subset == 'train': + return 1281167 + if self.subset == 'validation': + return 50000 + + def download_message(self): + """Instruction to download and extract the tarball from Flowers website.""" + + print('Failed to find any ImageNet %s files'% self.subset) + print('') + print('If you have already downloaded and processed the data, then make ' + 'sure to set --data_dir to point to the directory containing the ' + 'location of the sharded TFRecords.\n') + print('If you have not downloaded and prepared the ImageNet data in the ' + 'TFRecord format, you will need to do this at least once. This ' + 'process could take several hours depending on the speed of your ' + 'computer and network connection\n') + print('Please see README.md for instructions on how to build ' + 'the ImageNet dataset using download_and_preprocess_imagenet.\n') + print('Note that the raw data size is 300 GB and the processed data size ' + 'is 150 GB. Please ensure you have at least 500GB disk space.') diff --git a/models/research/inception/inception/imagenet_distributed_train.py b/models/research/inception/inception/imagenet_distributed_train.py new file mode 100644 index 0000000000000000000000000000000000000000..f3615e012f042649b52e37aeaeeb2c3efc07f92c --- /dev/null +++ b/models/research/inception/inception/imagenet_distributed_train.py @@ -0,0 +1,66 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=line-too-long +"""A binary to train Inception in a distributed manner using multiple systems. + +Please see accompanying README.md for details and instructions. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from inception import inception_distributed_train +from inception.imagenet_data import ImagenetData + +FLAGS = tf.app.flags.FLAGS + + +def main(unused_args): + assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker' + + # Extract all the hostnames for the ps and worker jobs to construct the + # cluster spec. + ps_hosts = FLAGS.ps_hosts.split(',') + worker_hosts = FLAGS.worker_hosts.split(',') + tf.logging.info('PS hosts are: %s' % ps_hosts) + tf.logging.info('Worker hosts are: %s' % worker_hosts) + + cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts, + 'worker': worker_hosts}) + server = tf.train.Server( + {'ps': ps_hosts, + 'worker': worker_hosts}, + job_name=FLAGS.job_name, + task_index=FLAGS.task_id, + protocol=FLAGS.protocol) + + if FLAGS.job_name == 'ps': + # `ps` jobs wait for incoming connections from the workers. + server.join() + else: + # `worker` jobs will actually do the work. + dataset = ImagenetData(subset=FLAGS.subset) + assert dataset.data_files() + # Only the chief checks for or creates train_dir. + if FLAGS.task_id == 0: + if not tf.gfile.Exists(FLAGS.train_dir): + tf.gfile.MakeDirs(FLAGS.train_dir) + inception_distributed_train.train(server.target, dataset, cluster_spec) + +if __name__ == '__main__': + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/models/research/inception/inception/imagenet_eval.py b/models/research/inception/inception/imagenet_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..e6f8bac2ee71021914715172296d63dd56b5a6f9 --- /dev/null +++ b/models/research/inception/inception/imagenet_eval.py @@ -0,0 +1,46 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to evaluate Inception on the ImageNet data set. + +Note that using the supplied pre-trained inception checkpoint, the eval should +achieve: + precision @ 1 = 0.7874 recall @ 5 = 0.9436 [50000 examples] + +See the README.md for more details. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception import inception_eval +from inception.imagenet_data import ImagenetData + +FLAGS = tf.app.flags.FLAGS + + +def main(unused_argv=None): + dataset = ImagenetData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.eval_dir): + tf.gfile.DeleteRecursively(FLAGS.eval_dir) + tf.gfile.MakeDirs(FLAGS.eval_dir) + inception_eval.evaluate(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/inception/inception/imagenet_train.py b/models/research/inception/inception/imagenet_train.py new file mode 100644 index 0000000000000000000000000000000000000000..3ffb55ee963e5b9f8e31915a78eef518324642aa --- /dev/null +++ b/models/research/inception/inception/imagenet_train.py @@ -0,0 +1,41 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A binary to train Inception on the ImageNet data set. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + + +import tensorflow as tf + +from inception import inception_train +from inception.imagenet_data import ImagenetData + +FLAGS = tf.app.flags.FLAGS + + +def main(_): + dataset = ImagenetData(subset=FLAGS.subset) + assert dataset.data_files() + if tf.gfile.Exists(FLAGS.train_dir): + tf.gfile.DeleteRecursively(FLAGS.train_dir) + tf.gfile.MakeDirs(FLAGS.train_dir) + inception_train.train(dataset) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/inception/inception/inception_distributed_train.py b/models/research/inception/inception/inception_distributed_train.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a589acb5fe386fd648ae3fae926ee927c0ca79 --- /dev/null +++ b/models/research/inception/inception/inception_distributed_train.py @@ -0,0 +1,314 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A library to train Inception using multiple replicas with synchronous update. + +Please see accompanying README.md for details and instructions. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import os.path +import time + +import numpy as np +import tensorflow as tf + +from inception import image_processing +from inception import inception_model as inception +from inception.slim import slim + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"') +tf.app.flags.DEFINE_string('ps_hosts', '', + """Comma-separated list of hostname:port for the """ + """parameter server jobs. e.g. """ + """'machine1:2222,machine2:1111,machine2:2222'""") +tf.app.flags.DEFINE_string('worker_hosts', '', + """Comma-separated list of hostname:port for the """ + """worker jobs. e.g. """ + """'machine1:2222,machine2:1111,machine2:2222'""") +tf.app.flags.DEFINE_string('protocol', 'grpc', + """Communication protocol to use in distributed """ + """execution (default grpc) """) + +tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train', + """Directory where to write event logs """ + """and checkpoint.""") +tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.') +tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".') +tf.app.flags.DEFINE_boolean('log_device_placement', False, + 'Whether to log device placement.') + +# Task ID is used to select the chief and also to access the local_step for +# each replica to check staleness of the gradients in SyncReplicasOptimizer. +tf.app.flags.DEFINE_integer( + 'task_id', 0, 'Task ID of the worker/replica running the training.') + +# More details can be found in the SyncReplicasOptimizer class: +# tensorflow/python/training/sync_replicas_optimizer.py +tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1, + """Number of gradients to collect before """ + """updating the parameters.""") +tf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60, + 'Save interval seconds.') +tf.app.flags.DEFINE_integer('save_summaries_secs', 180, + 'Save summaries interval seconds.') + +# **IMPORTANT** +# Please note that this learning rate schedule is heavily dependent on the +# hardware architecture, batch size and any changes to the model architecture +# specification. Selecting a finely tuned learning rate schedule is an +# empirical process that requires some experimentation. Please see README.md +# more guidance and discussion. +# +# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981 +tf.app.flags.DEFINE_float('initial_learning_rate', 0.045, + 'Initial learning rate.') +tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0, + 'Epochs after which learning rate decays.') +tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.94, + 'Learning rate decay factor.') + +# Constants dictating the learning rate schedule. +RMSPROP_DECAY = 0.9 # Decay term for RMSProp. +RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp. +RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. + + +def train(target, dataset, cluster_spec): + """Train Inception on a dataset for a number of steps.""" + # Number of workers and parameter servers are inferred from the workers and ps + # hosts string. + num_workers = len(cluster_spec.as_dict()['worker']) + num_parameter_servers = len(cluster_spec.as_dict()['ps']) + # If no value is given, num_replicas_to_aggregate defaults to be the number of + # workers. + if FLAGS.num_replicas_to_aggregate == -1: + num_replicas_to_aggregate = num_workers + else: + num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate + + # Both should be greater than 0 in a distributed training. + assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and ' + 'num_parameter_servers' + ' must be > 0.') + + # Choose worker 0 as the chief. Note that any worker could be the chief + # but there should be only one chief. + is_chief = (FLAGS.task_id == 0) + + # Ops are assigned to worker by default. + with tf.device('/job:worker/task:%d' % FLAGS.task_id): + # Variables and its related init/assign ops are assigned to ps. + with slim.scopes.arg_scope( + [slim.variables.variable, slim.variables.global_step], + device=slim.variables.VariableDeviceChooser(num_parameter_servers)): + # Create a variable to count the number of train() calls. This equals the + # number of updates applied to the variables. + global_step = slim.variables.global_step() + + # Calculate the learning rate schedule. + num_batches_per_epoch = (dataset.num_examples_per_epoch() / + FLAGS.batch_size) + # Decay steps need to be divided by the number of replicas to aggregate. + decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay / + num_replicas_to_aggregate) + + # Decay the learning rate exponentially based on the number of steps. + lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, + global_step, + decay_steps, + FLAGS.learning_rate_decay_factor, + staircase=True) + # Add a summary to track the learning rate. + tf.summary.scalar('learning_rate', lr) + + # Create an optimizer that performs gradient descent. + opt = tf.train.RMSPropOptimizer(lr, + RMSPROP_DECAY, + momentum=RMSPROP_MOMENTUM, + epsilon=RMSPROP_EPSILON) + + images, labels = image_processing.distorted_inputs( + dataset, + batch_size=FLAGS.batch_size, + num_preprocess_threads=FLAGS.num_preprocess_threads) + + # Number of classes in the Dataset label set plus 1. + # Label 0 is reserved for an (unused) background class. + num_classes = dataset.num_classes() + 1 + logits = inception.inference(images, num_classes, for_training=True) + # Add classification loss. + inception.loss(logits, labels) + + # Gather all of the losses including regularization losses. + losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) + losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + + total_loss = tf.add_n(losses, name='total_loss') + + if is_chief: + # Compute the moving average of all individual losses and the + # total loss. + loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') + loss_averages_op = loss_averages.apply(losses + [total_loss]) + + # Attach a scalar summmary to all individual losses and the total loss; + # do the same for the averaged version of the losses. + for l in losses + [total_loss]: + loss_name = l.op.name + # Name each loss as '(raw)' and name the moving average version of the + # loss as the original loss name. + tf.summary.scalar(loss_name + ' (raw)', l) + tf.summary.scalar(loss_name, loss_averages.average(l)) + + # Add dependency to compute loss_averages. + with tf.control_dependencies([loss_averages_op]): + total_loss = tf.identity(total_loss) + + # Track the moving averages of all trainable variables. + # Note that we maintain a 'double-average' of the BatchNormalization + # global statistics. + # This is not needed when the number of replicas are small but important + # for synchronous distributed training with tens of workers/replicas. + exp_moving_averager = tf.train.ExponentialMovingAverage( + inception.MOVING_AVERAGE_DECAY, global_step) + + variables_to_average = ( + tf.trainable_variables() + tf.moving_average_variables()) + + # Add histograms for model variables. + for var in variables_to_average: + tf.summary.histogram(var.op.name, var) + + # Create synchronous replica optimizer. + opt = tf.train.SyncReplicasOptimizer( + opt, + replicas_to_aggregate=num_replicas_to_aggregate, + total_num_replicas=num_workers, + variable_averages=exp_moving_averager, + variables_to_average=variables_to_average) + + batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION) + assert batchnorm_updates, 'Batchnorm updates are missing' + batchnorm_updates_op = tf.group(*batchnorm_updates) + # Add dependency to compute batchnorm_updates. + with tf.control_dependencies([batchnorm_updates_op]): + total_loss = tf.identity(total_loss) + + # Compute gradients with respect to the loss. + grads = opt.compute_gradients(total_loss) + + # Add histograms for gradients. + for grad, var in grads: + if grad is not None: + tf.summary.histogram(var.op.name + '/gradients', grad) + + apply_gradients_op = opt.apply_gradients(grads, global_step=global_step) + + with tf.control_dependencies([apply_gradients_op]): + train_op = tf.identity(total_loss, name='train_op') + + # Get chief queue_runners and init_tokens, which is used to synchronize + # replicas. More details can be found in SyncReplicasOptimizer. + chief_queue_runners = [opt.get_chief_queue_runner()] + init_tokens_op = opt.get_init_tokens_op() + + # Create a saver. + saver = tf.train.Saver() + + # Build the summary operation based on the TF collection of Summaries. + summary_op = tf.summary.merge_all() + + # Build an initialization operation to run below. + init_op = tf.global_variables_initializer() + + # We run the summaries in the same thread as the training operations by + # passing in None for summary_op to avoid a summary_thread being started. + # Running summaries and training operations in parallel could run out of + # GPU memory. + sv = tf.train.Supervisor(is_chief=is_chief, + logdir=FLAGS.train_dir, + init_op=init_op, + summary_op=None, + global_step=global_step, + saver=saver, + save_model_secs=FLAGS.save_interval_secs) + + tf.logging.info('%s Supervisor' % datetime.now()) + + sess_config = tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=FLAGS.log_device_placement) + + # Get a session. + sess = sv.prepare_or_wait_for_session(target, config=sess_config) + + # Start the queue runners. + queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS) + sv.start_queue_runners(sess, queue_runners) + tf.logging.info('Started %d queues for processing input data.', + len(queue_runners)) + + if is_chief: + sv.start_queue_runners(sess, chief_queue_runners) + sess.run(init_tokens_op) + + # Train, checking for Nans. Concurrently run the summary operation at a + # specified interval. Note that the summary_op and train_op never run + # simultaneously in order to prevent running out of GPU memory. + next_summary_time = time.time() + FLAGS.save_summaries_secs + while not sv.should_stop(): + try: + start_time = time.time() + loss_value, step = sess.run([train_op, global_step]) + assert not np.isnan(loss_value), 'Model diverged with loss = NaN' + if step > FLAGS.max_steps: + break + duration = time.time() - start_time + + if step % 30 == 0: + examples_per_sec = FLAGS.batch_size / float(duration) + format_str = ('Worker %d: %s: step %d, loss = %.2f' + '(%.1f examples/sec; %.3f sec/batch)') + tf.logging.info(format_str % + (FLAGS.task_id, datetime.now(), step, loss_value, + examples_per_sec, duration)) + + # Determine if the summary_op should be run on the chief worker. + if is_chief and next_summary_time < time.time(): + tf.logging.info('Running Summary operation on the chief.') + summary_str = sess.run(summary_op) + sv.summary_computed(sess, summary_str) + tf.logging.info('Finished running Summary operation.') + + # Determine the next time for running the summary. + next_summary_time += FLAGS.save_summaries_secs + except: + if is_chief: + tf.logging.info('Chief got exception while running!') + raise + + # Stop the supervisor. This also waits for service threads to finish. + sv.stop() + + # Save after the training ends. + if is_chief: + saver.save(sess, + os.path.join(FLAGS.train_dir, 'model.ckpt'), + global_step=global_step) diff --git a/models/research/inception/inception/inception_eval.py b/models/research/inception/inception/inception_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..e7cfc3c399dd82a915b3a49c7ddd4a8565292f69 --- /dev/null +++ b/models/research/inception/inception/inception_eval.py @@ -0,0 +1,171 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A library to evaluate Inception on a single GPU. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import math +import os.path +import time + + +import numpy as np +import tensorflow as tf + +from inception import image_processing +from inception import inception_model as inception + + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval', + """Directory where to write event logs.""") +tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train', + """Directory where to read model checkpoints.""") + +# Flags governing the frequency of the eval. +tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5, + """How often to run the eval.""") +tf.app.flags.DEFINE_boolean('run_once', False, + """Whether to run eval only once.""") + +# Flags governing the data used for the eval. +tf.app.flags.DEFINE_integer('num_examples', 50000, + """Number of examples to run. Note that the eval """ + """ImageNet dataset contains 50000 examples.""") +tf.app.flags.DEFINE_string('subset', 'validation', + """Either 'validation' or 'train'.""") + + +def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op): + """Runs Eval once. + + Args: + saver: Saver. + summary_writer: Summary writer. + top_1_op: Top 1 op. + top_5_op: Top 5 op. + summary_op: Summary op. + """ + with tf.Session() as sess: + ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) + if ckpt and ckpt.model_checkpoint_path: + if os.path.isabs(ckpt.model_checkpoint_path): + # Restores from checkpoint with absolute path. + saver.restore(sess, ckpt.model_checkpoint_path) + else: + # Restores from checkpoint with relative path. + saver.restore(sess, os.path.join(FLAGS.checkpoint_dir, + ckpt.model_checkpoint_path)) + + # Assuming model_checkpoint_path looks something like: + # /my-favorite-path/imagenet_train/model.ckpt-0, + # extract global_step from it. + global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] + print('Successfully loaded model from %s at step=%s.' % + (ckpt.model_checkpoint_path, global_step)) + else: + print('No checkpoint file found') + return + + # Start the queue runners. + coord = tf.train.Coordinator() + try: + threads = [] + for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): + threads.extend(qr.create_threads(sess, coord=coord, daemon=True, + start=True)) + + num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size)) + # Counts the number of correct predictions. + count_top_1 = 0.0 + count_top_5 = 0.0 + total_sample_count = num_iter * FLAGS.batch_size + step = 0 + + print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset)) + start_time = time.time() + while step < num_iter and not coord.should_stop(): + top_1, top_5 = sess.run([top_1_op, top_5_op]) + count_top_1 += np.sum(top_1) + count_top_5 += np.sum(top_5) + step += 1 + if step % 20 == 0: + duration = time.time() - start_time + sec_per_batch = duration / 20.0 + examples_per_sec = FLAGS.batch_size / sec_per_batch + print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f' + 'sec/batch)' % (datetime.now(), step, num_iter, + examples_per_sec, sec_per_batch)) + start_time = time.time() + + # Compute precision @ 1. + precision_at_1 = count_top_1 / total_sample_count + recall_at_5 = count_top_5 / total_sample_count + print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % + (datetime.now(), precision_at_1, recall_at_5, total_sample_count)) + + summary = tf.Summary() + summary.ParseFromString(sess.run(summary_op)) + summary.value.add(tag='Precision @ 1', simple_value=precision_at_1) + summary.value.add(tag='Recall @ 5', simple_value=recall_at_5) + summary_writer.add_summary(summary, global_step) + + except Exception as e: # pylint: disable=broad-except + coord.request_stop(e) + + coord.request_stop() + coord.join(threads, stop_grace_period_secs=10) + + +def evaluate(dataset): + """Evaluate model on Dataset for a number of steps.""" + with tf.Graph().as_default(): + # Get images and labels from the dataset. + images, labels = image_processing.inputs(dataset) + + # Number of classes in the Dataset label set plus 1. + # Label 0 is reserved for an (unused) background class. + num_classes = dataset.num_classes() + 1 + + # Build a Graph that computes the logits predictions from the + # inference model. + logits, _ = inception.inference(images, num_classes) + + # Calculate predictions. + top_1_op = tf.nn.in_top_k(logits, labels, 1) + top_5_op = tf.nn.in_top_k(logits, labels, 5) + + # Restore the moving average version of the learned variables for eval. + variable_averages = tf.train.ExponentialMovingAverage( + inception.MOVING_AVERAGE_DECAY) + variables_to_restore = variable_averages.variables_to_restore() + saver = tf.train.Saver(variables_to_restore) + + # Build the summary operation based on the TF collection of Summaries. + summary_op = tf.summary.merge_all() + + graph_def = tf.get_default_graph().as_graph_def() + summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, + graph_def=graph_def) + + while True: + _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op) + if FLAGS.run_once: + break + time.sleep(FLAGS.eval_interval_secs) diff --git a/models/research/inception/inception/inception_model.py b/models/research/inception/inception/inception_model.py new file mode 100644 index 0000000000000000000000000000000000000000..fedae13ae712f09d23ff020b161d86e87ee46e95 --- /dev/null +++ b/models/research/inception/inception/inception_model.py @@ -0,0 +1,157 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Build the Inception v3 network on ImageNet data set. + +The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567 + +Summary of available functions: + inference: Compute inference on the model inputs to make a prediction + loss: Compute the loss of the prediction with respect to the labels +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re + +import tensorflow as tf + +from inception.slim import slim + +FLAGS = tf.app.flags.FLAGS + +# If a model is trained using multiple GPUs, prefix all Op names with tower_name +# to differentiate the operations. Note that this prefix is removed from the +# names of the summaries when visualizing a model. +TOWER_NAME = 'tower' + +# Batch normalization. Constant governing the exponential moving average of +# the 'global' mean and variance for all activations. +BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997 + +# The decay to use for the moving average. +MOVING_AVERAGE_DECAY = 0.9999 + + +def inference(images, num_classes, for_training=False, restore_logits=True, + scope=None): + """Build Inception v3 model architecture. + + See here for reference: http://arxiv.org/abs/1512.00567 + + Args: + images: Images returned from inputs() or distorted_inputs(). + num_classes: number of classes + for_training: If set to `True`, build the inference model for training. + Kernels that operate differently for inference during training + e.g. dropout, are appropriately configured. + restore_logits: whether or not the logits layers should be restored. + Useful for fine-tuning a model with different num_classes. + scope: optional prefix string identifying the ImageNet tower. + + Returns: + Logits. 2-D float Tensor. + Auxiliary Logits. 2-D float Tensor of side-head. Used for training only. + """ + # Parameters for BatchNorm. + batch_norm_params = { + # Decay for the moving averages. + 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, + # epsilon to prevent 0s in variance. + 'epsilon': 0.001, + } + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): + with slim.arg_scope([slim.ops.conv2d], + stddev=0.1, + activation=tf.nn.relu, + batch_norm_params=batch_norm_params): + logits, endpoints = slim.inception.inception_v3( + images, + dropout_keep_prob=0.8, + num_classes=num_classes, + is_training=for_training, + restore_logits=restore_logits, + scope=scope) + + # Add summaries for viewing model statistics on TensorBoard. + _activation_summaries(endpoints) + + # Grab the logits associated with the side head. Employed during training. + auxiliary_logits = endpoints['aux_logits'] + + return logits, auxiliary_logits + + +def loss(logits, labels, batch_size=None): + """Adds all losses for the model. + + Note the final loss is not returned. Instead, the list of losses are collected + by slim.losses. The losses are accumulated in tower_loss() and summed to + calculate the total loss. + + Args: + logits: List of logits from inference(). Each entry is a 2-D float Tensor. + labels: Labels from distorted_inputs or inputs(). 1-D tensor + of shape [batch_size] + batch_size: integer + """ + if not batch_size: + batch_size = FLAGS.batch_size + + # Reshape the labels into a dense Tensor of + # shape [FLAGS.batch_size, num_classes]. + sparse_labels = tf.reshape(labels, [batch_size, 1]) + indices = tf.reshape(tf.range(batch_size), [batch_size, 1]) + concated = tf.concat(axis=1, values=[indices, sparse_labels]) + num_classes = logits[0].get_shape()[-1].value + dense_labels = tf.sparse_to_dense(concated, + [batch_size, num_classes], + 1.0, 0.0) + + # Cross entropy loss for the main softmax prediction. + slim.losses.cross_entropy_loss(logits[0], + dense_labels, + label_smoothing=0.1, + weight=1.0) + + # Cross entropy loss for the auxiliary softmax head. + slim.losses.cross_entropy_loss(logits[1], + dense_labels, + label_smoothing=0.1, + weight=0.4, + scope='aux_loss') + + +def _activation_summary(x): + """Helper to create summaries for activations. + + Creates a summary that provides a histogram of activations. + Creates a summary that measure the sparsity of activations. + + Args: + x: Tensor + """ + # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training + # session. This helps the clarity of presentation on tensorboard. + tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) + tf.summary.histogram(tensor_name + '/activations', x) + tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) + + +def _activation_summaries(endpoints): + with tf.name_scope('summaries'): + for act in endpoints.values(): + _activation_summary(act) diff --git a/models/research/inception/inception/inception_train.py b/models/research/inception/inception/inception_train.py new file mode 100644 index 0000000000000000000000000000000000000000..e1c32713b2012aec8a18637ec5dd79a1cc84d90f --- /dev/null +++ b/models/research/inception/inception/inception_train.py @@ -0,0 +1,357 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A library to train Inception using multiple GPUs with synchronous updates. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +from datetime import datetime +import os.path +import re +import time + +import numpy as np +import tensorflow as tf + +from inception import image_processing +from inception import inception_model as inception +from inception.slim import slim + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train', + """Directory where to write event logs """ + """and checkpoint.""") +tf.app.flags.DEFINE_integer('max_steps', 10000000, + """Number of batches to run.""") +tf.app.flags.DEFINE_string('subset', 'train', + """Either 'train' or 'validation'.""") + +# Flags governing the hardware employed for running TensorFlow. +tf.app.flags.DEFINE_integer('num_gpus', 1, + """How many GPUs to use.""") +tf.app.flags.DEFINE_boolean('log_device_placement', False, + """Whether to log device placement.""") + +# Flags governing the type of training. +tf.app.flags.DEFINE_boolean('fine_tune', False, + """If set, randomly initialize the final layer """ + """of weights in order to train the network on a """ + """new task.""") +tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '', + """If specified, restore this pretrained model """ + """before beginning any training.""") + +# **IMPORTANT** +# Please note that this learning rate schedule is heavily dependent on the +# hardware architecture, batch size and any changes to the model architecture +# specification. Selecting a finely tuned learning rate schedule is an +# empirical process that requires some experimentation. Please see README.md +# more guidance and discussion. +# +# With 8 Tesla K40's and a batch size = 256, the following setup achieves +# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs). +# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997. +tf.app.flags.DEFINE_float('initial_learning_rate', 0.1, + """Initial learning rate.""") +tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0, + """Epochs after which learning rate decays.""") +tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16, + """Learning rate decay factor.""") + +# Constants dictating the learning rate schedule. +RMSPROP_DECAY = 0.9 # Decay term for RMSProp. +RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp. +RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. + + +def _tower_loss(images, labels, num_classes, scope, reuse_variables=None): + """Calculate the total loss on a single tower running the ImageNet model. + + We perform 'batch splitting'. This means that we cut up a batch across + multiple GPUs. For instance, if the batch size = 32 and num_gpus = 2, + then each tower will operate on an batch of 16 images. + + Args: + images: Images. 4D tensor of size [batch_size, FLAGS.image_size, + FLAGS.image_size, 3]. + labels: 1-D integer Tensor of [batch_size]. + num_classes: number of classes + scope: unique prefix string identifying the ImageNet tower, e.g. + 'tower_0'. + + Returns: + Tensor of shape [] containing the total loss for a batch of data + """ + # When fine-tuning a model, we do not restore the logits but instead we + # randomly initialize the logits. The number of classes in the output of the + # logit is the number of classes in specified Dataset. + restore_logits = not FLAGS.fine_tune + + # Build inference Graph. + with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): + logits = inception.inference(images, num_classes, for_training=True, + restore_logits=restore_logits, + scope=scope) + + # Build the portion of the Graph calculating the losses. Note that we will + # assemble the total_loss using a custom function below. + split_batch_size = images.get_shape().as_list()[0] + inception.loss(logits, labels, batch_size=split_batch_size) + + # Assemble all of the losses for the current tower only. + losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope) + + # Calculate the total loss for the current tower. + regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + total_loss = tf.add_n(losses + regularization_losses, name='total_loss') + + # Compute the moving average of all individual losses and the total loss. + loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') + loss_averages_op = loss_averages.apply(losses + [total_loss]) + + # Attach a scalar summmary to all individual losses and the total loss; do the + # same for the averaged version of the losses. + for l in losses + [total_loss]: + # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training + # session. This helps the clarity of presentation on TensorBoard. + loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name) + # Name each loss as '(raw)' and name the moving average version of the loss + # as the original loss name. + tf.summary.scalar(loss_name +' (raw)', l) + tf.summary.scalar(loss_name, loss_averages.average(l)) + + with tf.control_dependencies([loss_averages_op]): + total_loss = tf.identity(total_loss) + return total_loss + + +def _average_gradients(tower_grads): + """Calculate the average gradient for each shared variable across all towers. + + Note that this function provides a synchronization point across all towers. + + Args: + tower_grads: List of lists of (gradient, variable) tuples. The outer list + is over individual gradients. The inner list is over the gradient + calculation for each tower. + Returns: + List of pairs of (gradient, variable) where the gradient has been averaged + across all towers. + """ + average_grads = [] + for grad_and_vars in zip(*tower_grads): + # Note that each grad_and_vars looks like the following: + # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) + grads = [] + for g, _ in grad_and_vars: + # Add 0 dimension to the gradients to represent the tower. + expanded_g = tf.expand_dims(g, 0) + + # Append on a 'tower' dimension which we will average over below. + grads.append(expanded_g) + + # Average over the 'tower' dimension. + grad = tf.concat(axis=0, values=grads) + grad = tf.reduce_mean(grad, 0) + + # Keep in mind that the Variables are redundant because they are shared + # across towers. So .. we will just return the first tower's pointer to + # the Variable. + v = grad_and_vars[0][1] + grad_and_var = (grad, v) + average_grads.append(grad_and_var) + return average_grads + + +def train(dataset): + """Train on dataset for a number of steps.""" + with tf.Graph().as_default(), tf.device('/cpu:0'): + # Create a variable to count the number of train() calls. This equals the + # number of batches processed * FLAGS.num_gpus. + global_step = tf.get_variable( + 'global_step', [], + initializer=tf.constant_initializer(0), trainable=False) + + # Calculate the learning rate schedule. + num_batches_per_epoch = (dataset.num_examples_per_epoch() / + FLAGS.batch_size) + decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) + + # Decay the learning rate exponentially based on the number of steps. + lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, + global_step, + decay_steps, + FLAGS.learning_rate_decay_factor, + staircase=True) + + # Create an optimizer that performs gradient descent. + opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY, + momentum=RMSPROP_MOMENTUM, + epsilon=RMSPROP_EPSILON) + + # Get images and labels for ImageNet and split the batch across GPUs. + assert FLAGS.batch_size % FLAGS.num_gpus == 0, ( + 'Batch size must be divisible by number of GPUs') + split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus) + + # Override the number of preprocessing threads to account for the increased + # number of GPU towers. + num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus + images, labels = image_processing.distorted_inputs( + dataset, + num_preprocess_threads=num_preprocess_threads) + + input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES)) + + # Number of classes in the Dataset label set plus 1. + # Label 0 is reserved for an (unused) background class. + num_classes = dataset.num_classes() + 1 + + # Split the batch of images and labels for towers. + images_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=images) + labels_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=labels) + + # Calculate the gradients for each model tower. + tower_grads = [] + reuse_variables = None + for i in range(FLAGS.num_gpus): + with tf.device('/gpu:%d' % i): + with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope: + # Force all Variables to reside on the CPU. + with slim.arg_scope([slim.variables.variable], device='/cpu:0'): + # Calculate the loss for one tower of the ImageNet model. This + # function constructs the entire ImageNet model but shares the + # variables across all towers. + loss = _tower_loss(images_splits[i], labels_splits[i], num_classes, + scope, reuse_variables) + + # Reuse variables for the next tower. + reuse_variables = True + + # Retain the summaries from the final tower. + summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) + + # Retain the Batch Normalization updates operations only from the + # final tower. Ideally, we should grab the updates from all towers + # but these stats accumulate extremely fast so we can ignore the + # other stats from the other towers without significant detriment. + batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION, + scope) + + # Calculate the gradients for the batch of data on this ImageNet + # tower. + grads = opt.compute_gradients(loss) + + # Keep track of the gradients across all towers. + tower_grads.append(grads) + + # We must calculate the mean of each gradient. Note that this is the + # synchronization point across all towers. + grads = _average_gradients(tower_grads) + + # Add a summaries for the input processing and global_step. + summaries.extend(input_summaries) + + # Add a summary to track the learning rate. + summaries.append(tf.summary.scalar('learning_rate', lr)) + + # Add histograms for gradients. + for grad, var in grads: + if grad is not None: + summaries.append( + tf.summary.histogram(var.op.name + '/gradients', grad)) + + # Apply the gradients to adjust the shared variables. + apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) + + # Add histograms for trainable variables. + for var in tf.trainable_variables(): + summaries.append(tf.summary.histogram(var.op.name, var)) + + # Track the moving averages of all trainable variables. + # Note that we maintain a "double-average" of the BatchNormalization + # global statistics. This is more complicated then need be but we employ + # this for backward-compatibility with our previous models. + variable_averages = tf.train.ExponentialMovingAverage( + inception.MOVING_AVERAGE_DECAY, global_step) + + # Another possibility is to use tf.slim.get_variables(). + variables_to_average = (tf.trainable_variables() + + tf.moving_average_variables()) + variables_averages_op = variable_averages.apply(variables_to_average) + + # Group all updates to into a single train op. + batchnorm_updates_op = tf.group(*batchnorm_updates) + train_op = tf.group(apply_gradient_op, variables_averages_op, + batchnorm_updates_op) + + # Create a saver. + saver = tf.train.Saver(tf.global_variables()) + + # Build the summary operation from the last tower summaries. + summary_op = tf.summary.merge(summaries) + + # Build an initialization operation to run below. + init = tf.global_variables_initializer() + + # Start running operations on the Graph. allow_soft_placement must be set to + # True to build towers on GPU, as some of the ops do not have GPU + # implementations. + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, + log_device_placement=FLAGS.log_device_placement)) + sess.run(init) + + if FLAGS.pretrained_model_checkpoint_path: + assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path) + variables_to_restore = tf.get_collection( + slim.variables.VARIABLES_TO_RESTORE) + restorer = tf.train.Saver(variables_to_restore) + restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path) + print('%s: Pre-trained model restored from %s' % + (datetime.now(), FLAGS.pretrained_model_checkpoint_path)) + + # Start the queue runners. + tf.train.start_queue_runners(sess=sess) + + summary_writer = tf.summary.FileWriter( + FLAGS.train_dir, + graph=sess.graph) + + for step in range(FLAGS.max_steps): + start_time = time.time() + _, loss_value = sess.run([train_op, loss]) + duration = time.time() - start_time + + assert not np.isnan(loss_value), 'Model diverged with loss = NaN' + + if step % 10 == 0: + examples_per_sec = FLAGS.batch_size / float(duration) + format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' + 'sec/batch)') + print(format_str % (datetime.now(), step, loss_value, + examples_per_sec, duration)) + + if step % 100 == 0: + summary_str = sess.run(summary_op) + summary_writer.add_summary(summary_str, step) + + # Save the model checkpoint periodically. + if step % 5000 == 0 or (step + 1) == FLAGS.max_steps: + checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') + saver.save(sess, checkpoint_path, global_step=step) diff --git a/models/research/inception/inception/slim/BUILD b/models/research/inception/inception/slim/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..174e77d5c2654380232174a2bb8b29c6b9affc5d --- /dev/null +++ b/models/research/inception/inception/slim/BUILD @@ -0,0 +1,112 @@ +# Description: +# Contains the operations and nets for building TensorFlow-Slim models. + +package(default_visibility = ["//inception:internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +py_library( + name = "scopes", + srcs = ["scopes.py"], +) + +py_test( + name = "scopes_test", + size = "small", + srcs = ["scopes_test.py"], + deps = [ + ":scopes", + ], +) + +py_library( + name = "variables", + srcs = ["variables.py"], + deps = [ + ":scopes", + ], +) + +py_test( + name = "variables_test", + size = "small", + srcs = ["variables_test.py"], + deps = [ + ":variables", + ], +) + +py_library( + name = "losses", + srcs = ["losses.py"], +) + +py_test( + name = "losses_test", + size = "small", + srcs = ["losses_test.py"], + deps = [ + ":losses", + ], +) + +py_library( + name = "ops", + srcs = ["ops.py"], + deps = [ + ":losses", + ":scopes", + ":variables", + ], +) + +py_test( + name = "ops_test", + size = "small", + srcs = ["ops_test.py"], + deps = [ + ":ops", + ":variables", + ], +) + +py_library( + name = "inception", + srcs = ["inception_model.py"], + deps = [ + ":ops", + ":scopes", + ], +) + +py_test( + name = "inception_test", + size = "medium", + srcs = ["inception_test.py"], + deps = [ + ":inception", + ], +) + +py_library( + name = "slim", + srcs = ["slim.py"], + deps = [ + ":inception", + ":losses", + ":ops", + ":scopes", + ":variables", + ], +) + +py_test( + name = "collections_test", + size = "small", + srcs = ["collections_test.py"], + deps = [ + ":slim", + ], +) diff --git a/models/research/inception/inception/slim/README.md b/models/research/inception/inception/slim/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36d8b7eb19ae47d8810ed97abe203aa34be50a75 --- /dev/null +++ b/models/research/inception/inception/slim/README.md @@ -0,0 +1,621 @@ +# TensorFlow-Slim + +TF-Slim is a lightweight library for defining, training and evaluating models in +TensorFlow. It enables defining complex networks quickly and concisely while +keeping a model's architecture transparent and its hyperparameters explicit. + +[TOC] + +## Teaser + +As a demonstration of the simplicity of using TF-Slim, compare the simplicity of +the code necessary for defining the entire [VGG](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) network using TF-Slim to +the lengthy and verbose nature of defining just the first three layers (out of +16) using native tensorflow: + +```python{.good} +# VGG16 in TF-Slim. +def vgg16(inputs): + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') + net = slim.ops.max_pool(net, [2, 2], scope='pool1') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') + net = slim.ops.max_pool(net, [2, 2], scope='pool2') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') + net = slim.ops.max_pool(net, [2, 2], scope='pool3') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') + net = slim.ops.max_pool(net, [2, 2], scope='pool4') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') + net = slim.ops.max_pool(net, [2, 2], scope='pool5') + net = slim.ops.flatten(net, scope='flatten5') + net = slim.ops.fc(net, 4096, scope='fc6') + net = slim.ops.dropout(net, 0.5, scope='dropout6') + net = slim.ops.fc(net, 4096, scope='fc7') + net = slim.ops.dropout(net, 0.5, scope='dropout7') + net = slim.ops.fc(net, 1000, activation=None, scope='fc8') + return net +``` + +```python{.bad} +# Layers 1-3 (out of 16) of VGG16 in native tensorflow. +def vgg16(inputs): + with tf.name_scope('conv1_1') as scope: + kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights') + conv = tf.nn.conv2d(inputs, kernel, [1, 1, 1, 1], padding='SAME') + biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') + bias = tf.nn.bias_add(conv, biases) + conv1 = tf.nn.relu(bias, name=scope) + with tf.name_scope('conv1_2') as scope: + kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights') + conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') + biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') + bias = tf.nn.bias_add(conv, biases) + conv1 = tf.nn.relu(bias, name=scope) + with tf.name_scope('pool1') + pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1') +``` + +## Why TF-Slim? + +TF-Slim offers several advantages over just the built-in tensorflow libraries: + +* Allows one to define models much more compactly by eliminating boilerplate + code. This is accomplished through the use of [argument scoping](./scopes.py) + and numerous high level [operations](./ops.py). These tools increase + readability and maintainability, reduce the likelihood of an error from + copy-and-pasting hyperparameter values and simplifies hyperparameter tuning. +* Makes developing models simple by providing commonly used [loss functions](./losses.py) +* Provides a concise [definition](./inception_model.py) of [Inception v3](http://arxiv.org/abs/1512.00567) network architecture ready to be used + out-of-the-box or subsumed into new models. + +Additionally TF-Slim was designed with several principles in mind: + +* The various modules of TF-Slim (scopes, variables, ops, losses) are + independent. This flexibility allows users to pick and choose components of + TF-Slim completely à la carte. +* TF-Slim is written using a Functional Programming style. That means it's + super-lightweight and can be used right alongside any of TensorFlow's native + operations. +* Makes re-using network architectures easy. This allows users to build new + networks on top of existing ones as well as fine-tuning pre-trained models + on new tasks. + +## What are the various components of TF-Slim? + +TF-Slim is composed of several parts which were designed to exist independently. +These include: + +* [scopes.py](./scopes.py): provides a new scope named `arg_scope` that allows + a user to define default arguments for specific operations within that + scope. +* [variables.py](./variables.py): provides convenience wrappers for variable + creation and manipulation. +* [ops.py](./ops.py): provides high level operations for building models using + tensorflow. +* [losses.py](./losses.py): contains commonly used loss functions. + +## Defining Models + +Models can be succinctly defined using TF-Slim by combining its variables, +operations and scopes. Each of these elements are defined below. + +### Variables + +Creating [`Variables`](https://www.tensorflow.org/how_tos/variables/index.html) +in native tensorflow requires either a predefined value or an initialization +mechanism (random, normally distributed). Furthermore, if a variable needs to be +created on a specific device, such as a GPU, the specification must be [made +explicit](https://www.tensorflow.org/how_tos/using_gpu/index.html). To alleviate +the code required for variable creation, TF-Slim provides a set of thin wrapper +functions in [variables.py](./variables.py) which allow callers to easily define +variables. + +For example, to create a `weight` variable, initialize it using a truncated +normal distribution, regularize it with an `l2_loss` and place it on the `CPU`, +one need only declare the following: + +```python +weights = variables.variable('weights', + shape=[10, 10, 3 , 3], + initializer=tf.truncated_normal_initializer(stddev=0.1), + regularizer=lambda t: losses.l2_loss(t, weight=0.05), + device='/cpu:0') +``` + +In addition to the functionality provided by `tf.Variable`, `slim.variables` +keeps track of the variables created by `slim.ops` to define a model, which +allows one to distinguish variables that belong to the model versus other +variables. + +```python +# Get all the variables defined by the model. +model_variables = slim.variables.get_variables() + +# Get all the variables with the same given name, i.e. 'weights', 'biases'. +weights = slim.variables.get_variables_by_name('weights') +biases = slim.variables.get_variables_by_name('biases') + +# Get all the variables in VARIABLES_TO_RESTORE collection. +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) + + +weights = variables.variable('weights', + shape=[10, 10, 3 , 3], + initializer=tf.truncated_normal_initializer(stddev=0.1), + regularizer=lambda t: losses.l2_loss(t, weight=0.05), + device='/cpu:0') +``` + +### Operations (Layers) + +While the set of TensorFlow operations is quite extensive, builders of neural +networks typically think of models in terms of "layers". A layer, such as a +Convolutional Layer, a Fully Connected Layer or a BatchNorm Layer are more +abstract than a single TensorFlow operation and typically involve many such +operations. For example, a Convolutional Layer in a neural network is built +using several steps: + +1. Creating the weight variables +2. Creating the bias variables +3. Convolving the weights with the input from the previous layer +4. Adding the biases to the result of the convolution. + +In python code this can be rather laborious: + +```python +input = ... +with tf.name_scope('conv1_1') as scope: + kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, + stddev=1e-1), name='weights') + conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') + biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), + trainable=True, name='biases') + bias = tf.nn.bias_add(conv, biases) + conv1 = tf.nn.relu(bias, name=scope) +``` + +To alleviate the need to duplicate this code repeatedly, TF-Slim provides a +number of convenient operations defined at the (more abstract) level of neural +network layers. For example, compare the code above to an invocation of the +TF-Slim code: + +```python +input = ... +net = slim.ops.conv2d(input, [3, 3], 128, scope='conv1_1') +``` + +TF-Slim provides numerous operations used in building neural networks which +roughly correspond to such layers. These include: + +Layer | TF-Slim Op +--------------------- | ------------------------ +Convolutional Layer | [ops.conv2d](./ops.py) +Fully Connected Layer | [ops.fc](./ops.py) +BatchNorm layer | [ops.batch_norm](./ops.py) +Max Pooling Layer | [ops.max_pool](./ops.py) +Avg Pooling Layer | [ops.avg_pool](./ops.py) +Dropout Layer | [ops.dropout](./ops.py) + +[ops.py](./ops.py) also includes operations that are not really "layers" per se, +but are often used to manipulate hidden unit representations during inference: + +Operation | TF-Slim Op +--------- | --------------------- +Flatten | [ops.flatten](./ops.py) + +TF-Slim also provides a meta-operation called `repeat_op` that allows one to +repeatedly perform the same operation. Consider the following snippet from the +[VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) network whose layers +perform several convolutions in a row between pooling layers: + +```python +net = ... +net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_1') +net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_2') +net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_3') +net = slim.ops.max_pool(net, [2, 2], scope='pool3') +``` + +This clear duplication of code can be removed via a standard loop: + +```python +net = ... +for i in range(3): + net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_' % (i+1)) +net = slim.ops.max_pool(net, [2, 2], scope='pool3') +``` + +While this does reduce the amount of duplication, it can be made even cleaner by +using the `RepeatOp`: + +```python +net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') +net = slim.ops.max_pool(net, [2, 2], scope='pool2') +``` + +Notice that the RepeatOp not only applies the same argument in-line, it also is +smart enough to unroll the scopes such that the scopes assigned to each +subsequent call of `ops.conv2d` is appended with an underscore and iteration +number. More concretely, the scopes in the example above would be 'conv3_1', +'conv3_2' and 'conv3_3'. + +### Scopes + +In addition to the types of scope mechanisms in TensorFlow ([name_scope](https://www.tensorflow.org/api_docs/python/framework.html#name_scope), +[variable_scope](https://www.tensorflow.org/api_docs/python/state_ops.html#variable_scope), +TF-Slim adds a new scoping mechanism called "argument scope" or [arg_scope](./scopes.py). This new scope allows a user to specify one or more operations and +a set of arguments which will be passed to each of the operations defined in the +`arg_scope`. This functionality is best illustrated by example. Consider the +following code snippet: + +```python +net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv1') +net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=0.01, weight_decay=0.0005, scope='conv2') +net = slim.ops.conv2d(net, 256, [11, 11], padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv3') +``` + +It should be clear that these three Convolution layers share many of the same +hyperparameters. Two have the same padding, all three have the same weight_decay +and standard deviation of its weights. Not only do the duplicated values make +the code more difficult to read, it also adds the addition burder to the writer +of needing to doublecheck that all of the values are identical in each step. One +solution would be to specify default values using variables: + +```python +padding='SAME' +stddev=0.01 +weight_decay=0.0005 +net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv1') +net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=stddev, weight_decay=weight_decay, scope='conv2') +net = slim.ops.conv2d(net, 256, [11, 11], padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv3') + +``` + +This solution ensures that all three convolutions share the exact same variable +values but doesn't reduce the code clutter. By using an `arg_scope`, we can both +ensure that each layer uses the same values and simplify the code: + +```python + with slim.arg_scope([slim.ops.conv2d], padding='SAME', stddev=0.01, weight_decay=0.0005): + net = slim.ops.conv2d(inputs, 64, [11, 11], scope='conv1') + net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2') + net = slim.ops.conv2d(net, 256, [11, 11], scope='conv3') +``` + +As the example illustrates, the use of arg_scope makes the code cleaner, simpler +and easier to maintain. Notice that while argument values are specifed in the +arg_scope, they can be overwritten locally. In particular, while the padding +argument has been set to 'SAME', the second convolution overrides it with the +value of 'VALID'. + +One can also nest `arg_scope`s and use multiple operations in the same scope. +For example: + +```python +with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + with arg_scope([slim.ops.conv2d], padding='SAME'), slim.arg_scope([slim.ops.fc], bias=1.0): + net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') + net = slim.ops.conv2d(net, 256, [5, 5], stddev=0.03, scope='conv2') + net = slim.ops.flatten(net) + net = slim.ops.fc(net, 1000, activation=None, scope='fc') +``` + +In this example, the first `arg_scope` applies the same `stddev` and +`weight_decay` arguments to the `conv2d` and `fc` ops in its scope. In the +second `arg_scope`, additional default arguments to `conv2d` only are specified. + +In addition to `arg_scope`, TF-Slim provides several decorators that wrap the +use of tensorflow arg scopes. These include `@AddArgScope`, `@AddNameScope`, +`@AddVariableScope`, `@AddOpScope` and `@AddVariableOpScope`. To illustrate +their use, consider the following example. + +```python +def MyNewOp(inputs): + varA = ... + varB = ... + outputs = tf.multiply(varA, inputs) + varB + return outputs + +``` + +In this example, the user has created a new op which creates two variables. To +ensure that these variables exist within a certain variable scope (to avoid +collisions with variables with the same name), in standard TF, the op must be +called within a variable scope: + +```python +inputs = ... +with tf.variable_scope('layer1'): + outputs = MyNewOp(inputs) +``` + +As an alternative, one can use TF-Slim's decorators to decorate the function and +simplify the call: + +```python +@AddVariableScope +def MyNewOp(inputs): + ... + return outputs + + +inputs = ... +outputs = MyNewOp('layer1') +``` + +The `@AddVariableScope` decorater simply applies the `tf.variable_scope` scoping +to the called function taking "layer1" as its argument. This allows the code to +be written more concisely. + +### Losses + +The loss function defines a quantity that we want to minimize. For +classification problems, this is typically the cross entropy between the true +(one-hot) distribution and the predicted probability distribution across +classes. For regression problems, this is often the sum-of-squares differences +between the predicted and true values. + +Certain models, such as multi-task learning models, require the use of multiple +loss functions simultaneously. In other words, the loss function ultimatey being +minimized is the sum of various other loss functions. For example, consider a +model that predicts both the type of scene in an image as well as the depth from +the camera of each pixel. This model's loss function would be the sum of the +classification loss and depth prediction loss. + +TF-Slim provides an easy-to-use mechanism for defining and keeping track of loss +functions via the [losses.py](./losses.py) module. Consider the simple case +where we want to train the VGG network: + +```python +# Load the images and labels. +images, labels = ... + +# Create the model. +predictions = ... + +# Define the loss functions and get the total loss. +loss = losses.cross_entropy_loss(predictions, labels) +``` + +In this example, we start by creating the model (using TF-Slim's VGG +implementation), and add the standard classification loss. Now, lets turn to the +case where we have a multi-task model that produces multiple outputs: + +```python +# Load the images and labels. +images, scene_labels, depth_labels = ... + +# Create the model. +scene_predictions, depth_predictions = CreateMultiTaskModel(images) + +# Define the loss functions and get the total loss. +classification_loss = slim.losses.cross_entropy_loss(scene_predictions, scene_labels) +sum_of_squares_loss = slim.losses.l2loss(depth_predictions - depth_labels) + +# The following two lines have the same effect: +total_loss1 = classification_loss + sum_of_squares_loss +total_loss2 = tf.get_collection(slim.losses.LOSSES_COLLECTION) +``` + +In this example, we have two losses which we add by calling +`losses.cross_entropy_loss` and `losses.l2loss`. We can obtain the +total loss by adding them together (`total_loss1`) or by calling +`losses.GetTotalLoss()`. How did this work? When you create a loss function via +TF-Slim, TF-Slim adds the loss to a special TensorFlow collection of loss +functions. This enables you to either manage the total loss manually, or allow +TF-Slim to manage them for you. + +What if you want to let TF-Slim manage the losses for you but have a custom loss +function? [losses.py](./losses.py) also has a function that adds this loss to +TF-Slims collection. For example: + +```python +# Load the images and labels. +images, scene_labels, depth_labels, pose_labels = ... + +# Create the model. +scene_predictions, depth_predictions, pose_predictions = CreateMultiTaskModel(images) + +# Define the loss functions and get the total loss. +classification_loss = slim.losses.cross_entropy_loss(scene_predictions, scene_labels) +sum_of_squares_loss = slim.losses.l2loss(depth_predictions - depth_labels) +pose_loss = MyCustomLossFunction(pose_predictions, pose_labels) +tf.add_to_collection(slim.losses.LOSSES_COLLECTION, pose_loss) # Letting TF-Slim know about the additional loss. + +# The following two lines have the same effect: +total_loss1 = classification_loss + sum_of_squares_loss + pose_loss +total_loss2 = losses.GetTotalLoss() +``` + +In this example, we can again either produce the total loss function manually or +let TF-Slim know about the additional loss and let TF-Slim handle the losses. + +## Putting the Pieces Together + +By combining TF-Slim Variables, Operations and scopes, we can write a normally +very complex network with very few lines of code. For example, the entire [VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) architecture can be +defined with just the following snippet: + +```python +with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') + net = slim.ops.max_pool(net, [2, 2], scope='pool1') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') + net = slim.ops.max_pool(net, [2, 2], scope='pool2') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') + net = slim.ops.max_pool(net, [2, 2], scope='pool3') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') + net = slim.ops.max_pool(net, [2, 2], scope='pool4') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') + net = slim.ops.max_pool(net, [2, 2], scope='pool5') + net = slim.ops.flatten(net, scope='flatten5') + net = slim.ops.fc(net, 4096, scope='fc6') + net = slim.ops.dropout(net, 0.5, scope='dropout6') + net = slim.ops.fc(net, 4096, scope='fc7') + net = slim.ops.dropout(net, 0.5, scope='dropout7') + net = slim.ops.fc(net, 1000, activation=None, scope='fc8') +return net +``` + +## Re-using previously defined network architectures and pre-trained models. + +### Brief Recap on Restoring Variables from a Checkpoint + +After a model has been trained, it can be restored using `tf.train.Saver()` +which restores `Variables` from a given checkpoint. For many cases, +`tf.train.Saver()` provides a simple mechanism to restore all or just a few +variables. + +```python +# Create some variables. +v1 = tf.Variable(..., name="v1") +v2 = tf.Variable(..., name="v2") +... +# Add ops to restore all the variables. +restorer = tf.train.Saver() + +# Add ops to restore some variables. +restorer = tf.train.Saver([v1, v2]) + +# Later, launch the model, use the saver to restore variables from disk, and +# do some work with the model. +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... +``` + +See [Restoring Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#restoring-variables) +and [Choosing which Variables to Save and Restore](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#choosing-which-variables-to-save-and-restore) +sections of the [Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html) page for +more details. + +### Using slim.variables to Track which Variables need to be Restored + +It is often desirable to fine-tune a pre-trained model on an entirely new +dataset or even a new task. In these situations, one must specify which layers +of the model should be reused (and consequently loaded from a checkpoint) and +which layers are new. Indicating which variables or layers should be restored is +a process that quickly becomes cumbersome when done manually. + +To help keep track of which variables to restore, `slim.variables` provides a +`restore` argument when creating each Variable. By default, all variables are +marked as `restore=True`, which results in all variables defined by the model +being restored. + +```python +# Create some variables. +v1 = slim.variables.variable(name="v1", ..., restore=False) +v2 = slim.variables.variable(name="v2", ...) # By default restore=True +... +# Get list of variables to restore (which contains only 'v2') +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... +``` + +Additionally, every layer in `slim.ops` that creates slim.variables (such as +`slim.ops.conv2d`, `slim.ops.fc`, `slim.ops.batch_norm`) also has a `restore` +argument which controls whether the variables created by that layer should be +restored or not. + +```python +# Create a small network. +net = slim.ops.conv2d(images, 32, [7, 7], stride=2, scope='conv1') +net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') +net = slim.ops.conv2d(net, 128, [3, 3], scope='conv3') +net = slim.ops.max_pool(net, [3, 3], stride=2, scope='pool3') +net = slim.ops.flatten(net) +net = slim.ops.fc(net, 10, scope='logits', restore=False) +... + +# VARIABLES_TO_RESTORE would contain the 'weights' and 'bias' defined by 'conv1' +# 'conv2' and 'conv3' but not the ones defined by 'logits' +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) + +# Create a restorer that would restore only the needed variables. +restorer = tf.train.Saver(variables_to_restore) + +# Create a saver that would save all the variables (including 'logits'). +saver = tf.train.Saver() +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + + # Do some work with the model + ... + saver.save(sess, "/tmp/new_model.ckpt") +``` + +Note: When restoring variables from a checkpoint, the `Saver` locates the +variable names in a checkpoint file and maps them to variables in the current +graph. Above, we created a saver by passing to it a list of variables. In this +case, the names of the variables to locate in the checkpoint file were +implicitly obtained from each provided variable's `var.op.name`. + +This works well when the variable names in the checkpoint file match those in +the graph. However, sometimes, we want to restore a model from a checkpoint +whose variables have different names those in the current graph. In this case, +we must provide the `Saver` a dictionary that maps from each checkpoint variable +name to each graph variable. Consider the following example where the checkpoint +variables names are obtained via a simple function: + +```python +# Assuming that 'conv1/weights' should be restored from 'vgg16/conv1/weights' +def name_in_checkpoint(var): + return 'vgg16/' + var.op.name + +# Assuming that 'conv1/weights' and 'conv1/bias' should be restored from 'conv1/params1' and 'conv1/params2' +def name_in_checkpoint(var): + if "weights" in var.op.name: + return var.op.name.replace("weights", "params1") + if "bias" in var.op.name: + return var.op.name.replace("bias", "params2") + +variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) +variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore} +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") +``` + +### Reusing the VGG16 network defined in TF-Slim on a different task, i.e. PASCAL-VOC. + +Assuming one have already a pre-trained VGG16 model, one just need to replace +the last layer `fc8` with a new layer `fc8_pascal` and use `restore=False`. + +```python +def vgg16_pascal(inputs): + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): + net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') + net = slim.ops.max_pool(net, [2, 2], scope='pool1') + net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') + net = slim.ops.max_pool(net, [2, 2], scope='pool2') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') + net = slim.ops.max_pool(net, [2, 2], scope='pool3') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') + net = slim.ops.max_pool(net, [2, 2], scope='pool4') + net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') + net = slim.ops.max_pool(net, [2, 2], scope='pool5') + net = slim.ops.flatten(net, scope='flatten5') + net = slim.ops.fc(net, 4096, scope='fc6') + net = slim.ops.dropout(net, 0.5, scope='dropout6') + net = slim.ops.fc(net, 4096, scope='fc7') + net = slim.ops.dropout(net, 0.5, scope='dropout7') + # To reuse vgg16 on PASCAL-VOC, just change the last layer. + net = slim.ops.fc(net, 21, activation=None, scope='fc8_pascal', restore=False) + return net +``` + +## Authors + +Sergio Guadarrama and Nathan Silberman diff --git a/models/research/inception/inception/slim/collections_test.py b/models/research/inception/inception/slim/collections_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2a1f170edaaedae337df8e0b552a03dd82b263d4 --- /dev/null +++ b/models/research/inception/inception/slim/collections_test.py @@ -0,0 +1,181 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for inception.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from inception.slim import slim + + +def get_variables(scope=None): + return slim.variables.get_variables(scope) + + +def get_variables_by_name(name): + return slim.variables.get_variables_by_name(name) + + +class CollectionsTest(tf.test.TestCase): + + def testVariables(self): + batch_size = 5 + height, width = 299, 299 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + with slim.arg_scope([slim.ops.conv2d], + batch_norm_params={'decay': 0.9997}): + slim.inception.inception_v3(inputs) + self.assertEqual(len(get_variables()), 388) + self.assertEqual(len(get_variables_by_name('weights')), 98) + self.assertEqual(len(get_variables_by_name('biases')), 2) + self.assertEqual(len(get_variables_by_name('beta')), 96) + self.assertEqual(len(get_variables_by_name('gamma')), 0) + self.assertEqual(len(get_variables_by_name('moving_mean')), 96) + self.assertEqual(len(get_variables_by_name('moving_variance')), 96) + + def testVariablesWithoutBatchNorm(self): + batch_size = 5 + height, width = 299, 299 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + with slim.arg_scope([slim.ops.conv2d], + batch_norm_params=None): + slim.inception.inception_v3(inputs) + self.assertEqual(len(get_variables()), 196) + self.assertEqual(len(get_variables_by_name('weights')), 98) + self.assertEqual(len(get_variables_by_name('biases')), 98) + self.assertEqual(len(get_variables_by_name('beta')), 0) + self.assertEqual(len(get_variables_by_name('gamma')), 0) + self.assertEqual(len(get_variables_by_name('moving_mean')), 0) + self.assertEqual(len(get_variables_by_name('moving_variance')), 0) + + def testVariablesByLayer(self): + batch_size = 5 + height, width = 299, 299 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + with slim.arg_scope([slim.ops.conv2d], + batch_norm_params={'decay': 0.9997}): + slim.inception.inception_v3(inputs) + self.assertEqual(len(get_variables()), 388) + self.assertEqual(len(get_variables('conv0')), 4) + self.assertEqual(len(get_variables('conv1')), 4) + self.assertEqual(len(get_variables('conv2')), 4) + self.assertEqual(len(get_variables('conv3')), 4) + self.assertEqual(len(get_variables('conv4')), 4) + self.assertEqual(len(get_variables('mixed_35x35x256a')), 28) + self.assertEqual(len(get_variables('mixed_35x35x288a')), 28) + self.assertEqual(len(get_variables('mixed_35x35x288b')), 28) + self.assertEqual(len(get_variables('mixed_17x17x768a')), 16) + self.assertEqual(len(get_variables('mixed_17x17x768b')), 40) + self.assertEqual(len(get_variables('mixed_17x17x768c')), 40) + self.assertEqual(len(get_variables('mixed_17x17x768d')), 40) + self.assertEqual(len(get_variables('mixed_17x17x768e')), 40) + self.assertEqual(len(get_variables('mixed_8x8x2048a')), 36) + self.assertEqual(len(get_variables('mixed_8x8x2048b')), 36) + self.assertEqual(len(get_variables('logits')), 2) + self.assertEqual(len(get_variables('aux_logits')), 10) + + def testVariablesToRestore(self): + batch_size = 5 + height, width = 299, 299 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + with slim.arg_scope([slim.ops.conv2d], + batch_norm_params={'decay': 0.9997}): + slim.inception.inception_v3(inputs) + variables_to_restore = tf.get_collection( + slim.variables.VARIABLES_TO_RESTORE) + self.assertEqual(len(variables_to_restore), 388) + self.assertListEqual(variables_to_restore, get_variables()) + + def testVariablesToRestoreWithoutLogits(self): + batch_size = 5 + height, width = 299, 299 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + with slim.arg_scope([slim.ops.conv2d], + batch_norm_params={'decay': 0.9997}): + slim.inception.inception_v3(inputs, restore_logits=False) + variables_to_restore = tf.get_collection( + slim.variables.VARIABLES_TO_RESTORE) + self.assertEqual(len(variables_to_restore), 384) + + def testRegularizationLosses(self): + batch_size = 5 + height, width = 299, 299 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): + slim.inception.inception_v3(inputs) + losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + self.assertEqual(len(losses), len(get_variables_by_name('weights'))) + + def testTotalLossWithoutRegularization(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1001 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + dense_labels = tf.random_uniform((batch_size, num_classes)) + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0): + logits, end_points = slim.inception.inception_v3( + inputs, + num_classes=num_classes) + # Cross entropy loss for the main softmax prediction. + slim.losses.cross_entropy_loss(logits, + dense_labels, + label_smoothing=0.1, + weight=1.0) + # Cross entropy loss for the auxiliary softmax head. + slim.losses.cross_entropy_loss(end_points['aux_logits'], + dense_labels, + label_smoothing=0.1, + weight=0.4, + scope='aux_loss') + losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) + self.assertEqual(len(losses), 2) + + def testTotalLossWithRegularization(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + dense_labels = tf.random_uniform((batch_size, num_classes)) + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): + logits, end_points = slim.inception.inception_v3(inputs, num_classes) + # Cross entropy loss for the main softmax prediction. + slim.losses.cross_entropy_loss(logits, + dense_labels, + label_smoothing=0.1, + weight=1.0) + # Cross entropy loss for the auxiliary softmax head. + slim.losses.cross_entropy_loss(end_points['aux_logits'], + dense_labels, + label_smoothing=0.1, + weight=0.4, + scope='aux_loss') + losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) + self.assertEqual(len(losses), 2) + reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + self.assertEqual(len(reg_losses), 98) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/inception/inception/slim/inception_model.py b/models/research/inception/inception/slim/inception_model.py new file mode 100644 index 0000000000000000000000000000000000000000..6136ab1ba68716f4f135110a4d5c518b732b23df --- /dev/null +++ b/models/research/inception/inception/slim/inception_model.py @@ -0,0 +1,356 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Inception-v3 expressed in TensorFlow-Slim. + + Usage: + + # Parameters for BatchNorm. + batch_norm_params = { + # Decay for the batch_norm moving averages. + 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, + # epsilon to prevent 0s in variance. + 'epsilon': 0.001, + } + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): + with slim.arg_scope([slim.ops.conv2d], + stddev=0.1, + activation=tf.nn.relu, + batch_norm_params=batch_norm_params): + # Force all Variables to reside on the CPU. + with slim.arg_scope([slim.variables.variable], device='/cpu:0'): + logits, endpoints = slim.inception.inception_v3( + images, + dropout_keep_prob=0.8, + num_classes=num_classes, + is_training=for_training, + restore_logits=restore_logits, + scope=scope) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from inception.slim import ops +from inception.slim import scopes + + +def inception_v3(inputs, + dropout_keep_prob=0.8, + num_classes=1000, + is_training=True, + restore_logits=True, + scope=''): + """Latest Inception from http://arxiv.org/abs/1512.00567. + + "Rethinking the Inception Architecture for Computer Vision" + + Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, + Zbigniew Wojna + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + dropout_keep_prob: dropout keep_prob. + num_classes: number of predicted classes. + is_training: whether is training or not. + restore_logits: whether or not the logits layers should be restored. + Useful for fine-tuning a model with different num_classes. + scope: Optional scope for name_scope. + + Returns: + a list containing 'logits', 'aux_logits' Tensors. + """ + # end_points will collect relevant activations for external use, for example + # summaries or losses. + end_points = {} + with tf.name_scope(scope, 'inception_v3', [inputs]): + with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout], + is_training=is_training): + with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], + stride=1, padding='VALID'): + # 299 x 299 x 3 + end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2, + scope='conv0') + # 149 x 149 x 32 + end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3], + scope='conv1') + # 147 x 147 x 32 + end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3], + padding='SAME', scope='conv2') + # 147 x 147 x 64 + end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3], + stride=2, scope='pool1') + # 73 x 73 x 64 + end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1], + scope='conv3') + # 73 x 73 x 80. + end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3], + scope='conv4') + # 71 x 71 x 192. + end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3], + stride=2, scope='pool2') + # 35 x 35 x 192. + net = end_points['pool2'] + # Inception blocks + with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], + stride=1, padding='SAME'): + # mixed: 35 x 35 x 256. + with tf.variable_scope('mixed_35x35x256a'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 64, [1, 1]) + with tf.variable_scope('branch5x5'): + branch5x5 = ops.conv2d(net, 48, [1, 1]) + branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 32, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) + end_points['mixed_35x35x256a'] = net + # mixed_1: 35 x 35 x 288. + with tf.variable_scope('mixed_35x35x288a'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 64, [1, 1]) + with tf.variable_scope('branch5x5'): + branch5x5 = ops.conv2d(net, 48, [1, 1]) + branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) + end_points['mixed_35x35x288a'] = net + # mixed_2: 35 x 35 x 288. + with tf.variable_scope('mixed_35x35x288b'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 64, [1, 1]) + with tf.variable_scope('branch5x5'): + branch5x5 = ops.conv2d(net, 48, [1, 1]) + branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) + end_points['mixed_35x35x288b'] = net + # mixed_3: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768a'): + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID') + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 64, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3], + stride=2, padding='VALID') + with tf.variable_scope('branch_pool'): + branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') + net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool]) + end_points['mixed_17x17x768a'] = net + # mixed4: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768b'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 128, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 128, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 128, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768b'] = net + # mixed_5: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768c'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 160, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 160, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768c'] = net + # mixed_6: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768d'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 160, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 160, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768d'] = net + # mixed_7: 17 x 17 x 768. + with tf.variable_scope('mixed_17x17x768e'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 192, [1, 1]) + with tf.variable_scope('branch7x7'): + branch7x7 = ops.conv2d(net, 192, [1, 1]) + branch7x7 = ops.conv2d(branch7x7, 192, [1, 7]) + branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) + with tf.variable_scope('branch7x7dbl'): + branch7x7dbl = ops.conv2d(net, 192, [1, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) + branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) + end_points['mixed_17x17x768e'] = net + # Auxiliary Head logits + aux_logits = tf.identity(end_points['mixed_17x17x768e']) + with tf.variable_scope('aux_logits'): + aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3, + padding='VALID') + aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj') + # Shape of feature map before the final layer. + shape = aux_logits.get_shape() + aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01, + padding='VALID') + aux_logits = ops.flatten(aux_logits) + aux_logits = ops.fc(aux_logits, num_classes, activation=None, + stddev=0.001, restore=restore_logits) + end_points['aux_logits'] = aux_logits + # mixed_8: 8 x 8 x 1280. + # Note that the scope below is not changed to not void previous + # checkpoints. + # (TODO) Fix the scope when appropriate. + with tf.variable_scope('mixed_17x17x1280a'): + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 192, [1, 1]) + branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2, + padding='VALID') + with tf.variable_scope('branch7x7x3'): + branch7x7x3 = ops.conv2d(net, 192, [1, 1]) + branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7]) + branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1]) + branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3], + stride=2, padding='VALID') + with tf.variable_scope('branch_pool'): + branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') + net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool]) + end_points['mixed_17x17x1280a'] = net + # mixed_9: 8 x 8 x 2048. + with tf.variable_scope('mixed_8x8x2048a'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 320, [1, 1]) + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 384, [1, 1]) + branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]), + ops.conv2d(branch3x3, 384, [3, 1])]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 448, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) + branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]), + ops.conv2d(branch3x3dbl, 384, [3, 1])]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool]) + end_points['mixed_8x8x2048a'] = net + # mixed_10: 8 x 8 x 2048. + with tf.variable_scope('mixed_8x8x2048b'): + with tf.variable_scope('branch1x1'): + branch1x1 = ops.conv2d(net, 320, [1, 1]) + with tf.variable_scope('branch3x3'): + branch3x3 = ops.conv2d(net, 384, [1, 1]) + branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]), + ops.conv2d(branch3x3, 384, [3, 1])]) + with tf.variable_scope('branch3x3dbl'): + branch3x3dbl = ops.conv2d(net, 448, [1, 1]) + branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) + branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]), + ops.conv2d(branch3x3dbl, 384, [3, 1])]) + with tf.variable_scope('branch_pool'): + branch_pool = ops.avg_pool(net, [3, 3]) + branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) + net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool]) + end_points['mixed_8x8x2048b'] = net + # Final pooling and prediction + with tf.variable_scope('logits'): + shape = net.get_shape() + net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool') + # 1 x 1 x 2048 + net = ops.dropout(net, dropout_keep_prob, scope='dropout') + net = ops.flatten(net, scope='flatten') + # 2048 + logits = ops.fc(net, num_classes, activation=None, scope='logits', + restore=restore_logits) + # 1000 + end_points['logits'] = logits + end_points['predictions'] = tf.nn.softmax(logits, name='predictions') + return logits, end_points + + +def inception_v3_parameters(weight_decay=0.00004, stddev=0.1, + batch_norm_decay=0.9997, batch_norm_epsilon=0.001): + """Yields the scope with the default parameters for inception_v3. + + Args: + weight_decay: the weight decay for weights variables. + stddev: standard deviation of the truncated guassian weight distribution. + batch_norm_decay: decay for the moving average of batch_norm momentums. + batch_norm_epsilon: small float added to variance to avoid dividing by zero. + + Yields: + a arg_scope with the parameters needed for inception_v3. + """ + # Set weight_decay for weights in Conv and FC layers. + with scopes.arg_scope([ops.conv2d, ops.fc], + weight_decay=weight_decay): + # Set stddev, activation and parameters for batch_norm. + with scopes.arg_scope([ops.conv2d], + stddev=stddev, + activation=tf.nn.relu, + batch_norm_params={ + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon}) as arg_scope: + yield arg_scope diff --git a/models/research/inception/inception/slim/inception_test.py b/models/research/inception/inception/slim/inception_test.py new file mode 100644 index 0000000000000000000000000000000000000000..231dea298f4b761aa90224df1c263873bc890ac5 --- /dev/null +++ b/models/research/inception/inception/slim/inception_test.py @@ -0,0 +1,134 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.inception.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from inception.slim import inception_model as inception + + +class InceptionTest(tf.test.TestCase): + + def testBuildLogits(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + logits, _ = inception.inception_v3(inputs, num_classes) + self.assertTrue(logits.op.name.startswith('logits')) + self.assertListEqual(logits.get_shape().as_list(), + [batch_size, num_classes]) + + def testBuildEndPoints(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + _, end_points = inception.inception_v3(inputs, num_classes) + self.assertTrue('logits' in end_points) + logits = end_points['logits'] + self.assertListEqual(logits.get_shape().as_list(), + [batch_size, num_classes]) + self.assertTrue('aux_logits' in end_points) + aux_logits = end_points['aux_logits'] + self.assertListEqual(aux_logits.get_shape().as_list(), + [batch_size, num_classes]) + pre_pool = end_points['mixed_8x8x2048b'] + self.assertListEqual(pre_pool.get_shape().as_list(), + [batch_size, 8, 8, 2048]) + + def testVariablesSetDevice(self): + batch_size = 5 + height, width = 299, 299 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + # Force all Variables to reside on the device. + with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): + inception.inception_v3(inputs, num_classes) + with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): + inception.inception_v3(inputs, num_classes) + for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): + self.assertDeviceEqual(v.device, '/cpu:0') + for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): + self.assertDeviceEqual(v.device, '/gpu:0') + + def testHalfSizeImages(self): + batch_size = 5 + height, width = 150, 150 + num_classes = 1000 + with self.test_session(): + inputs = tf.random_uniform((batch_size, height, width, 3)) + logits, end_points = inception.inception_v3(inputs, num_classes) + self.assertTrue(logits.op.name.startswith('logits')) + self.assertListEqual(logits.get_shape().as_list(), + [batch_size, num_classes]) + pre_pool = end_points['mixed_8x8x2048b'] + self.assertListEqual(pre_pool.get_shape().as_list(), + [batch_size, 3, 3, 2048]) + + def testUnknowBatchSize(self): + batch_size = 1 + height, width = 299, 299 + num_classes = 1000 + with self.test_session() as sess: + inputs = tf.placeholder(tf.float32, (None, height, width, 3)) + logits, _ = inception.inception_v3(inputs, num_classes) + self.assertTrue(logits.op.name.startswith('logits')) + self.assertListEqual(logits.get_shape().as_list(), + [None, num_classes]) + images = tf.random_uniform((batch_size, height, width, 3)) + sess.run(tf.global_variables_initializer()) + output = sess.run(logits, {inputs: images.eval()}) + self.assertEquals(output.shape, (batch_size, num_classes)) + + def testEvaluation(self): + batch_size = 2 + height, width = 299, 299 + num_classes = 1000 + with self.test_session() as sess: + eval_inputs = tf.random_uniform((batch_size, height, width, 3)) + logits, _ = inception.inception_v3(eval_inputs, num_classes, + is_training=False) + predictions = tf.argmax(logits, 1) + sess.run(tf.global_variables_initializer()) + output = sess.run(predictions) + self.assertEquals(output.shape, (batch_size,)) + + def testTrainEvalWithReuse(self): + train_batch_size = 5 + eval_batch_size = 2 + height, width = 150, 150 + num_classes = 1000 + with self.test_session() as sess: + train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) + inception.inception_v3(train_inputs, num_classes) + tf.get_variable_scope().reuse_variables() + eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) + logits, _ = inception.inception_v3(eval_inputs, num_classes, + is_training=False) + predictions = tf.argmax(logits, 1) + sess.run(tf.global_variables_initializer()) + output = sess.run(predictions) + self.assertEquals(output.shape, (eval_batch_size,)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/inception/inception/slim/losses.py b/models/research/inception/inception/slim/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..78298d092fab3afc264e427fb060602c27ea97b0 --- /dev/null +++ b/models/research/inception/inception/slim/losses.py @@ -0,0 +1,174 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for various Neural Network TensorFlow losses. + + All the losses defined here add themselves to the LOSSES_COLLECTION + collection. + + l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso. + l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay. + cross_entropy_loss: Define a cross entropy loss using + softmax_cross_entropy_with_logits. Useful for classification. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +# In order to gather all losses in a network, the user should use this +# key for get_collection, i.e: +# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) +LOSSES_COLLECTION = '_losses' + + +def l1_regularizer(weight=1.0, scope=None): + """Define a L1 regularizer. + + Args: + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + a regularizer function. + """ + def regularizer(tensor): + with tf.name_scope(scope, 'L1Regularizer', [tensor]): + l1_weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='weight') + return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') + return regularizer + + +def l2_regularizer(weight=1.0, scope=None): + """Define a L2 regularizer. + + Args: + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + a regularizer function. + """ + def regularizer(tensor): + with tf.name_scope(scope, 'L2Regularizer', [tensor]): + l2_weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='weight') + return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') + return regularizer + + +def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None): + """Define a L1L2 regularizer. + + Args: + weight_l1: scale the L1 loss by this factor. + weight_l2: scale the L2 loss by this factor. + scope: Optional scope for name_scope. + + Returns: + a regularizer function. + """ + def regularizer(tensor): + with tf.name_scope(scope, 'L1L2Regularizer', [tensor]): + weight_l1_t = tf.convert_to_tensor(weight_l1, + dtype=tensor.dtype.base_dtype, + name='weight_l1') + weight_l2_t = tf.convert_to_tensor(weight_l2, + dtype=tensor.dtype.base_dtype, + name='weight_l2') + reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), + name='value_l1') + reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), + name='value_l2') + return tf.add(reg_l1, reg_l2, name='value') + return regularizer + + +def l1_loss(tensor, weight=1.0, scope=None): + """Define a L1Loss, useful for regularize, i.e. lasso. + + Args: + tensor: tensor to regularize. + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + the L1 loss op. + """ + with tf.name_scope(scope, 'L1Loss', [tensor]): + weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='loss_weight') + loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss + + +def l2_loss(tensor, weight=1.0, scope=None): + """Define a L2Loss, useful for regularize, i.e. weight decay. + + Args: + tensor: tensor to regularize. + weight: an optional weight to modulate the loss. + scope: Optional scope for name_scope. + + Returns: + the L2 loss op. + """ + with tf.name_scope(scope, 'L2Loss', [tensor]): + weight = tf.convert_to_tensor(weight, + dtype=tensor.dtype.base_dtype, + name='loss_weight') + loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss + + +def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0, + weight=1.0, scope=None): + """Define a Cross Entropy loss using softmax_cross_entropy_with_logits. + + It can scale the loss by weight factor, and smooth the labels. + + Args: + logits: [batch_size, num_classes] logits outputs of the network . + one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels. + label_smoothing: if greater than 0 then smooth the labels. + weight: scale the loss by this factor. + scope: Optional scope for name_scope. + + Returns: + A tensor with the softmax_cross_entropy loss. + """ + logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape()) + with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]): + num_classes = one_hot_labels.get_shape()[-1].value + one_hot_labels = tf.cast(one_hot_labels, logits.dtype) + if label_smoothing > 0: + smooth_positives = 1.0 - label_smoothing + smooth_negatives = label_smoothing / num_classes + one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives + cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits( + logits, one_hot_labels, name='xentropy') + + weight = tf.convert_to_tensor(weight, + dtype=logits.dtype.base_dtype, + name='loss_weight') + loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value') + tf.add_to_collection(LOSSES_COLLECTION, loss) + return loss diff --git a/models/research/inception/inception/slim/losses_test.py b/models/research/inception/inception/slim/losses_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e267f6520779f63be0becf41ceccc7de494e14f7 --- /dev/null +++ b/models/research/inception/inception/slim/losses_test.py @@ -0,0 +1,177 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.losses.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from inception.slim import losses + + +class LossesTest(tf.test.TestCase): + + def testL1Loss(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + weights = tf.constant(1.0, shape=shape) + wd = 0.01 + loss = losses.l1_loss(weights, wd) + self.assertEquals(loss.op.name, 'L1Loss/value') + self.assertAlmostEqual(loss.eval(), num_elem * wd, 5) + + def testL2Loss(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + weights = tf.constant(1.0, shape=shape) + wd = 0.01 + loss = losses.l2_loss(weights, wd) + self.assertEquals(loss.op.name, 'L2Loss/value') + self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5) + + +class RegularizersTest(tf.test.TestCase): + + def testL1Regularizer(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + loss = losses.l1_regularizer()(tensor) + self.assertEquals(loss.op.name, 'L1Regularizer/value') + self.assertAlmostEqual(loss.eval(), num_elem, 5) + + def testL1RegularizerWithScope(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + loss = losses.l1_regularizer(scope='L1')(tensor) + self.assertEquals(loss.op.name, 'L1/value') + self.assertAlmostEqual(loss.eval(), num_elem, 5) + + def testL1RegularizerWithWeight(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + weight = 0.01 + loss = losses.l1_regularizer(weight)(tensor) + self.assertEquals(loss.op.name, 'L1Regularizer/value') + self.assertAlmostEqual(loss.eval(), num_elem * weight, 5) + + def testL2Regularizer(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + loss = losses.l2_regularizer()(tensor) + self.assertEquals(loss.op.name, 'L2Regularizer/value') + self.assertAlmostEqual(loss.eval(), num_elem / 2, 5) + + def testL2RegularizerWithScope(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + loss = losses.l2_regularizer(scope='L2')(tensor) + self.assertEquals(loss.op.name, 'L2/value') + self.assertAlmostEqual(loss.eval(), num_elem / 2, 5) + + def testL2RegularizerWithWeight(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + weight = 0.01 + loss = losses.l2_regularizer(weight)(tensor) + self.assertEquals(loss.op.name, 'L2Regularizer/value') + self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5) + + def testL1L2Regularizer(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + loss = losses.l1_l2_regularizer()(tensor) + self.assertEquals(loss.op.name, 'L1L2Regularizer/value') + self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) + + def testL1L2RegularizerWithScope(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + loss = losses.l1_l2_regularizer(scope='L1L2')(tensor) + self.assertEquals(loss.op.name, 'L1L2/value') + self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) + + def testL1L2RegularizerWithWeights(self): + with self.test_session(): + shape = [5, 5, 5] + num_elem = 5 * 5 * 5 + tensor = tf.constant(1.0, shape=shape) + weight_l1 = 0.01 + weight_l2 = 0.05 + loss = losses.l1_l2_regularizer(weight_l1, weight_l2)(tensor) + self.assertEquals(loss.op.name, 'L1L2Regularizer/value') + self.assertAlmostEqual(loss.eval(), + num_elem * weight_l1 + num_elem * weight_l2 / 2, 5) + + +class CrossEntropyLossTest(tf.test.TestCase): + + def testCrossEntropyLossAllCorrect(self): + with self.test_session(): + logits = tf.constant([[10.0, 0.0, 0.0], + [0.0, 10.0, 0.0], + [0.0, 0.0, 10.0]]) + labels = tf.constant([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + loss = losses.cross_entropy_loss(logits, labels) + self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') + self.assertAlmostEqual(loss.eval(), 0.0, 3) + + def testCrossEntropyLossAllWrong(self): + with self.test_session(): + logits = tf.constant([[10.0, 0.0, 0.0], + [0.0, 10.0, 0.0], + [0.0, 0.0, 10.0]]) + labels = tf.constant([[0, 0, 1], + [1, 0, 0], + [0, 1, 0]]) + loss = losses.cross_entropy_loss(logits, labels) + self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') + self.assertAlmostEqual(loss.eval(), 10.0, 3) + + def testCrossEntropyLossAllWrongWithWeight(self): + with self.test_session(): + logits = tf.constant([[10.0, 0.0, 0.0], + [0.0, 10.0, 0.0], + [0.0, 0.0, 10.0]]) + labels = tf.constant([[0, 0, 1], + [1, 0, 0], + [0, 1, 0]]) + loss = losses.cross_entropy_loss(logits, labels, weight=0.5) + self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') + self.assertAlmostEqual(loss.eval(), 5.0, 3) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/inception/inception/slim/ops.py b/models/research/inception/inception/slim/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..54fda4eb81f3a138d9bb2748c21164b88570ede9 --- /dev/null +++ b/models/research/inception/inception/slim/ops.py @@ -0,0 +1,473 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for typical Neural Network TensorFlow layers. + + Additionally it maintains a collection with update_ops that need to be + updated after the ops have been computed, for example to update moving means + and moving variances of batch_norm. + + Ops that have different behavior during training or eval have an is_training + parameter. Additionally Ops that contain variables.variable have a trainable + parameter, which control if the ops variables are trainable or not. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf + +from tensorflow.python.training import moving_averages + +from inception.slim import losses +from inception.slim import scopes +from inception.slim import variables + +# Used to keep the update ops done by batch_norm. +UPDATE_OPS_COLLECTION = '_update_ops_' + + +@scopes.add_arg_scope +def batch_norm(inputs, + decay=0.999, + center=True, + scale=False, + epsilon=0.001, + moving_vars='moving_vars', + activation=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a Batch Normalization layer. + + Args: + inputs: a tensor of size [batch_size, height, width, channels] + or [batch_size, channels]. + decay: decay for the moving average. + center: If True, subtract beta. If False, beta is not created and ignored. + scale: If True, multiply by gamma. If False, gamma is + not used. When the next layer is linear (also e.g. ReLU), this can be + disabled since the scaling can be done by the next layer. + epsilon: small float added to variance to avoid dividing by zero. + moving_vars: collection to store the moving_mean and moving_variance. + activation: activation function. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + + Returns: + a tensor representing the output of the operation. + + """ + inputs_shape = inputs.get_shape() + with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse): + axis = list(range(len(inputs_shape) - 1)) + params_shape = inputs_shape[-1:] + # Allocate parameters for the beta and gamma of the normalization. + beta, gamma = None, None + if center: + beta = variables.variable('beta', + params_shape, + initializer=tf.zeros_initializer(), + trainable=trainable, + restore=restore) + if scale: + gamma = variables.variable('gamma', + params_shape, + initializer=tf.ones_initializer(), + trainable=trainable, + restore=restore) + # Create moving_mean and moving_variance add them to + # GraphKeys.MOVING_AVERAGE_VARIABLES collections. + moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES] + moving_mean = variables.variable('moving_mean', + params_shape, + initializer=tf.zeros_initializer(), + trainable=False, + restore=restore, + collections=moving_collections) + moving_variance = variables.variable('moving_variance', + params_shape, + initializer=tf.ones_initializer(), + trainable=False, + restore=restore, + collections=moving_collections) + if is_training: + # Calculate the moments based on the individual batch. + mean, variance = tf.nn.moments(inputs, axis) + + update_moving_mean = moving_averages.assign_moving_average( + moving_mean, mean, decay) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) + update_moving_variance = moving_averages.assign_moving_average( + moving_variance, variance, decay) + tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) + else: + # Just use the moving_mean and moving_variance. + mean = moving_mean + variance = moving_variance + # Normalize the activations. + outputs = tf.nn.batch_normalization( + inputs, mean, variance, beta, gamma, epsilon) + outputs.set_shape(inputs.get_shape()) + if activation: + outputs = activation(outputs) + return outputs + + +def _two_element_tuple(int_or_tuple): + """Converts `int_or_tuple` to height, width. + + Several of the functions that follow accept arguments as either + a tuple of 2 integers or a single integer. A single integer + indicates that the 2 values of the tuple are the same. + + This functions normalizes the input value by always returning a tuple. + + Args: + int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape. + + Returns: + A tuple with 2 values. + + Raises: + ValueError: If `int_or_tuple` it not well formed. + """ + if isinstance(int_or_tuple, (list, tuple)): + if len(int_or_tuple) != 2: + raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple) + return int(int_or_tuple[0]), int(int_or_tuple[1]) + if isinstance(int_or_tuple, int): + return int(int_or_tuple), int(int_or_tuple) + if isinstance(int_or_tuple, tf.TensorShape): + if len(int_or_tuple) == 2: + return int_or_tuple[0], int_or_tuple[1] + raise ValueError('Must be an int, a list with 2 elements or a TensorShape of ' + 'length 2') + + +@scopes.add_arg_scope +def conv2d(inputs, + num_filters_out, + kernel_size, + stride=1, + padding='SAME', + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a 2D convolution followed by an optional batch_norm layer. + + conv2d creates a variable called 'weights', representing the convolutional + kernel, that is convolved with the input. If `batch_norm_params` is None, a + second variable called 'biases' is added to the result of the convolution + operation. + + Args: + inputs: a tensor of size [batch_size, height, width, channels]. + num_filters_out: the number of output filters. + kernel_size: a list of length 2: [kernel_height, kernel_width] of + of the filters. Can be an int if both values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: one of 'VALID' or 'SAME'. + activation: activation function. + stddev: standard deviation of the truncated guassian weight distribution. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + Returns: + a tensor representing the output of the operation. + + """ + with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + num_filters_in = inputs.get_shape()[-1] + weights_shape = [kernel_h, kernel_w, + num_filters_in, num_filters_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay > 0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], + padding=padding) + if batch_norm_params is not None: + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(conv, **batch_norm_params) + else: + bias_shape = [num_filters_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.bias_add(conv, biases) + if activation: + outputs = activation(outputs) + return outputs + + +@scopes.add_arg_scope +def fc(inputs, + num_units_out, + activation=tf.nn.relu, + stddev=0.01, + bias=0.0, + weight_decay=0, + batch_norm_params=None, + is_training=True, + trainable=True, + restore=True, + scope=None, + reuse=None): + """Adds a fully connected layer followed by an optional batch_norm layer. + + FC creates a variable called 'weights', representing the fully connected + weight matrix, that is multiplied by the input. If `batch_norm` is None, a + second variable called 'biases' is added to the result of the initial + vector-matrix multiplication. + + Args: + inputs: a [B x N] tensor where B is the batch size and N is the number of + input units in the layer. + num_units_out: the number of output units in the layer. + activation: activation function. + stddev: the standard deviation for the weights. + bias: the initial value of the biases. + weight_decay: the weight decay. + batch_norm_params: parameters for the batch_norm. If is None don't use it. + is_training: whether or not the model is in training mode. + trainable: whether or not the variables should be trainable or not. + restore: whether or not the variables should be marked for restore. + scope: Optional scope for variable_scope. + reuse: whether or not the layer and its variables should be reused. To be + able to reuse the layer scope must be given. + + Returns: + the tensor variable representing the result of the series of operations. + """ + with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse): + num_units_in = inputs.get_shape()[1] + weights_shape = [num_units_in, num_units_out] + weights_initializer = tf.truncated_normal_initializer(stddev=stddev) + l2_regularizer = None + if weight_decay and weight_decay > 0: + l2_regularizer = losses.l2_regularizer(weight_decay) + weights = variables.variable('weights', + shape=weights_shape, + initializer=weights_initializer, + regularizer=l2_regularizer, + trainable=trainable, + restore=restore) + if batch_norm_params is not None: + outputs = tf.matmul(inputs, weights) + with scopes.arg_scope([batch_norm], is_training=is_training, + trainable=trainable, restore=restore): + outputs = batch_norm(outputs, **batch_norm_params) + else: + bias_shape = [num_units_out,] + bias_initializer = tf.constant_initializer(bias) + biases = variables.variable('biases', + shape=bias_shape, + initializer=bias_initializer, + trainable=trainable, + restore=restore) + outputs = tf.nn.xw_plus_b(inputs, weights, biases) + if activation: + outputs = activation(outputs) + return outputs + + +def one_hot_encoding(labels, num_classes, scope=None): + """Transform numeric labels into onehot_labels. + + Args: + labels: [batch_size] target labels. + num_classes: total number of classes. + scope: Optional scope for name_scope. + Returns: + one hot encoding of the labels. + """ + with tf.name_scope(scope, 'OneHotEncoding', [labels]): + batch_size = labels.get_shape()[0] + indices = tf.expand_dims(tf.range(0, batch_size), 1) + labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) + concated = tf.concat(axis=1, values=[indices, labels]) + onehot_labels = tf.sparse_to_dense( + concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) + onehot_labels.set_shape([batch_size, num_classes]) + return onehot_labels + + +@scopes.add_arg_scope +def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Adds a Max Pooling layer. + + It is assumed by the wrapper that the pooling is only done per image and not + in depth or batch. + + Args: + inputs: a tensor of size [batch_size, height, width, depth]. + kernel_size: a list of length 2: [kernel_height, kernel_width] of the + pooling kernel over which the op is computed. Can be an int if both + values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: the padding method, either 'VALID' or 'SAME'. + scope: Optional scope for name_scope. + + Returns: + a tensor representing the results of the pooling operation. + Raises: + ValueError: if 'kernel_size' is not a 2-D list + """ + with tf.name_scope(scope, 'MaxPool', [inputs]): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + return tf.nn.max_pool(inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding) + + +@scopes.add_arg_scope +def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): + """Adds a Avg Pooling layer. + + It is assumed by the wrapper that the pooling is only done per image and not + in depth or batch. + + Args: + inputs: a tensor of size [batch_size, height, width, depth]. + kernel_size: a list of length 2: [kernel_height, kernel_width] of the + pooling kernel over which the op is computed. Can be an int if both + values are the same. + stride: a list of length 2: [stride_height, stride_width]. + Can be an int if both strides are the same. Note that presently + both strides must have the same value. + padding: the padding method, either 'VALID' or 'SAME'. + scope: Optional scope for name_scope. + + Returns: + a tensor representing the results of the pooling operation. + """ + with tf.name_scope(scope, 'AvgPool', [inputs]): + kernel_h, kernel_w = _two_element_tuple(kernel_size) + stride_h, stride_w = _two_element_tuple(stride) + return tf.nn.avg_pool(inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding) + + +@scopes.add_arg_scope +def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): + """Returns a dropout layer applied to the input. + + Args: + inputs: the tensor to pass to the Dropout layer. + keep_prob: the probability of keeping each input unit. + is_training: whether or not the model is in training mode. If so, dropout is + applied and values scaled. Otherwise, inputs is returned. + scope: Optional scope for name_scope. + + Returns: + a tensor representing the output of the operation. + """ + if is_training and keep_prob > 0: + with tf.name_scope(scope, 'Dropout', [inputs]): + return tf.nn.dropout(inputs, keep_prob) + else: + return inputs + + +def flatten(inputs, scope=None): + """Flattens the input while maintaining the batch_size. + + Assumes that the first dimension represents the batch. + + Args: + inputs: a tensor of size [batch_size, ...]. + scope: Optional scope for name_scope. + + Returns: + a flattened tensor with shape [batch_size, k]. + Raises: + ValueError: if inputs.shape is wrong. + """ + if len(inputs.get_shape()) < 2: + raise ValueError('Inputs must be have a least 2 dimensions') + dims = inputs.get_shape()[1:] + k = dims.num_elements() + with tf.name_scope(scope, 'Flatten', [inputs]): + return tf.reshape(inputs, [-1, k]) + + +def repeat_op(repetitions, inputs, op, *args, **kwargs): + """Build a sequential Tower starting from inputs by using an op repeatedly. + + It creates new scopes for each operation by increasing the counter. + Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') + it will repeat the given op under the following variable_scopes: + conv1/Conv + conv1/Conv_1 + conv1/Conv_2 + + Args: + repetitions: number or repetitions. + inputs: a tensor of size [batch_size, height, width, channels]. + op: an operation. + *args: args for the op. + **kwargs: kwargs for the op. + + Returns: + a tensor result of applying the operation op, num times. + Raises: + ValueError: if the op is unknown or wrong. + """ + scope = kwargs.pop('scope', None) + with tf.variable_scope(scope, 'RepeatOp', [inputs]): + tower = inputs + for _ in range(repetitions): + tower = op(tower, *args, **kwargs) + return tower diff --git a/models/research/inception/inception/slim/ops_test.py b/models/research/inception/inception/slim/ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..13dc5d9aacf6e283540a406d419a67d2d7215161 --- /dev/null +++ b/models/research/inception/inception/slim/ops_test.py @@ -0,0 +1,687 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.ops.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numpy as np +import tensorflow as tf + +from inception.slim import ops +from inception.slim import scopes +from inception.slim import variables + + +class ConvTest(tf.test.TestCase): + + def testCreateConv(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3]) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateSquareConv(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, 3) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateConvWithTensorShape(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, images.get_shape()[1:3]) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) + + def testCreateFullyConv(self): + height, width = 6, 6 + with self.test_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + output = ops.conv2d(images, 64, images.get_shape()[1:3], padding='VALID') + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64]) + + def testCreateVerticalConv(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 1]) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), + [5, height, width, 32]) + + def testCreateHorizontalConv(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [1, 3]) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), + [5, height, width, 32]) + + def testCreateConvWithStride(self): + height, width = 6, 6 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], stride=2) + self.assertEquals(output.op.name, 'Conv/Relu') + self.assertListEqual(output.get_shape().as_list(), + [5, height/2, width/2, 32]) + + def testCreateConvCreatesWeightsAndBiasesVars(self): + height, width = 3, 3 + images = tf.random_uniform((5, height, width, 3), seed=1) + with self.test_session(): + self.assertFalse(variables.get_variables('conv1/weights')) + self.assertFalse(variables.get_variables('conv1/biases')) + ops.conv2d(images, 32, [3, 3], scope='conv1') + self.assertTrue(variables.get_variables('conv1/weights')) + self.assertTrue(variables.get_variables('conv1/biases')) + + def testCreateConvWithScope(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEquals(output.op.name, 'conv1/Relu') + + def testCreateConvWithoutActivation(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], activation=None) + self.assertEquals(output.op.name, 'Conv/BiasAdd') + + def testCreateConvValid(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.conv2d(images, 32, [3, 3], padding='VALID') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32]) + + def testCreateConvWithWD(self): + height, width = 3, 3 + with self.test_session() as sess: + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3], weight_decay=0.01) + wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] + self.assertEquals(wd.op.name, + 'Conv/weights/Regularizer/L2Regularizer/value') + sess.run(tf.global_variables_initializer()) + self.assertTrue(sess.run(wd) <= 0.01) + + def testCreateConvWithoutWD(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3], weight_decay=0) + self.assertEquals( + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), []) + + def testReuseVars(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3], scope='conv1') + self.assertEquals(len(variables.get_variables()), 2) + ops.conv2d(images, 32, [3, 3], scope='conv1', reuse=True) + self.assertEquals(len(variables.get_variables()), 2) + + def testNonReuseVars(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3]) + self.assertEquals(len(variables.get_variables()), 2) + ops.conv2d(images, 32, [3, 3]) + self.assertEquals(len(variables.get_variables()), 4) + + def testReuseConvWithWD(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1') + self.assertEquals(len(variables.get_variables()), 2) + self.assertEquals( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1', + reuse=True) + self.assertEquals(len(variables.get_variables()), 2) + self.assertEquals( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + + def testConvWithBatchNorm(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}): + net = ops.conv2d(images, 32, [3, 3]) + net = ops.conv2d(net, 32, [3, 3]) + self.assertEquals(len(variables.get_variables()), 8) + self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3) + self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 3) + + def testReuseConvWithBatchNorm(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 32), seed=1) + with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}): + net = ops.conv2d(images, 32, [3, 3], scope='Conv') + net = ops.conv2d(net, 32, [3, 3], scope='Conv', reuse=True) + self.assertEquals(len(variables.get_variables()), 4) + self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3) + self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 0) + + +class FCTest(tf.test.TestCase): + + def testCreateFC(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + output = ops.fc(inputs, 32) + self.assertEquals(output.op.name, 'FC/Relu') + self.assertListEqual(output.get_shape().as_list(), [5, 32]) + + def testCreateFCWithScope(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + output = ops.fc(inputs, 32, scope='fc1') + self.assertEquals(output.op.name, 'fc1/Relu') + + def testCreateFcCreatesWeightsAndBiasesVars(self): + height, width = 3, 3 + inputs = tf.random_uniform((5, height * width * 3), seed=1) + with self.test_session(): + self.assertFalse(variables.get_variables('fc1/weights')) + self.assertFalse(variables.get_variables('fc1/biases')) + ops.fc(inputs, 32, scope='fc1') + self.assertTrue(variables.get_variables('fc1/weights')) + self.assertTrue(variables.get_variables('fc1/biases')) + + def testReuseVars(self): + height, width = 3, 3 + inputs = tf.random_uniform((5, height * width * 3), seed=1) + with self.test_session(): + ops.fc(inputs, 32, scope='fc1') + self.assertEquals(len(variables.get_variables('fc1')), 2) + ops.fc(inputs, 32, scope='fc1', reuse=True) + self.assertEquals(len(variables.get_variables('fc1')), 2) + + def testNonReuseVars(self): + height, width = 3, 3 + inputs = tf.random_uniform((5, height * width * 3), seed=1) + with self.test_session(): + ops.fc(inputs, 32) + self.assertEquals(len(variables.get_variables('FC')), 2) + ops.fc(inputs, 32) + self.assertEquals(len(variables.get_variables('FC')), 4) + + def testCreateFCWithoutActivation(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + output = ops.fc(inputs, 32, activation=None) + self.assertEquals(output.op.name, 'FC/xw_plus_b') + + def testCreateFCWithWD(self): + height, width = 3, 3 + with self.test_session() as sess: + inputs = tf.random_uniform((5, height * width * 3), seed=1) + ops.fc(inputs, 32, weight_decay=0.01) + wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] + self.assertEquals(wd.op.name, + 'FC/weights/Regularizer/L2Regularizer/value') + sess.run(tf.global_variables_initializer()) + self.assertTrue(sess.run(wd) <= 0.01) + + def testCreateFCWithoutWD(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + ops.fc(inputs, 32, weight_decay=0) + self.assertEquals( + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), []) + + def testReuseFCWithWD(self): + height, width = 3, 3 + with self.test_session(): + inputs = tf.random_uniform((5, height * width * 3), seed=1) + ops.fc(inputs, 32, weight_decay=0.01, scope='fc') + self.assertEquals(len(variables.get_variables()), 2) + self.assertEquals( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + ops.fc(inputs, 32, weight_decay=0.01, scope='fc', reuse=True) + self.assertEquals(len(variables.get_variables()), 2) + self.assertEquals( + len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) + + def testFCWithBatchNorm(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height * width * 3), seed=1) + with scopes.arg_scope([ops.fc], batch_norm_params={}): + net = ops.fc(images, 27) + net = ops.fc(net, 27) + self.assertEquals(len(variables.get_variables()), 8) + self.assertEquals(len(variables.get_variables('FC/BatchNorm')), 3) + self.assertEquals(len(variables.get_variables('FC_1/BatchNorm')), 3) + + def testReuseFCWithBatchNorm(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height * width * 3), seed=1) + with scopes.arg_scope([ops.fc], batch_norm_params={'decay': 0.9}): + net = ops.fc(images, 27, scope='fc1') + net = ops.fc(net, 27, scope='fc1', reuse=True) + self.assertEquals(len(variables.get_variables()), 4) + self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3) + + +class MaxPoolTest(tf.test.TestCase): + + def testCreateMaxPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3]) + self.assertEquals(output.op.name, 'MaxPool/MaxPool') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + def testCreateSquareMaxPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, 3) + self.assertEquals(output.op.name, 'MaxPool/MaxPool') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + def testCreateMaxPoolWithScope(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3], scope='pool1') + self.assertEquals(output.op.name, 'pool1/MaxPool') + + def testCreateMaxPoolSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3], padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3]) + + def testCreateMaxPoolStrideSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, [3, 3], stride=1, padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) + + def testGlobalMaxPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.max_pool(images, images.get_shape()[1:3], stride=1) + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + +class AvgPoolTest(tf.test.TestCase): + + def testCreateAvgPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3]) + self.assertEquals(output.op.name, 'AvgPool/AvgPool') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + def testCreateSquareAvgPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, 3) + self.assertEquals(output.op.name, 'AvgPool/AvgPool') + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + def testCreateAvgPoolWithScope(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3], scope='pool1') + self.assertEquals(output.op.name, 'pool1/AvgPool') + + def testCreateAvgPoolSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3], padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3]) + + def testCreateAvgPoolStrideSAME(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME') + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) + + def testGlobalAvgPool(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.avg_pool(images, images.get_shape()[1:3], stride=1) + self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) + + +class OneHotEncodingTest(tf.test.TestCase): + + def testOneHotEncodingCreate(self): + with self.test_session(): + labels = tf.constant([0, 1, 2]) + output = ops.one_hot_encoding(labels, num_classes=3) + self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense') + self.assertListEqual(output.get_shape().as_list(), [3, 3]) + + def testOneHotEncoding(self): + with self.test_session(): + labels = tf.constant([0, 1, 2]) + one_hot_labels = tf.constant([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + output = ops.one_hot_encoding(labels, num_classes=3) + self.assertAllClose(output.eval(), one_hot_labels.eval()) + + +class DropoutTest(tf.test.TestCase): + + def testCreateDropout(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.dropout(images) + self.assertEquals(output.op.name, 'Dropout/dropout/mul') + output.get_shape().assert_is_compatible_with(images.get_shape()) + + def testCreateDropoutNoTraining(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1, name='images') + output = ops.dropout(images, is_training=False) + self.assertEquals(output, images) + + +class FlattenTest(tf.test.TestCase): + + def testFlatten4D(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1, name='images') + output = ops.flatten(images) + self.assertEquals(output.get_shape().num_elements(), + images.get_shape().num_elements()) + self.assertEqual(output.get_shape()[0], images.get_shape()[0]) + + def testFlatten3D(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width), seed=1, name='images') + output = ops.flatten(images) + self.assertEquals(output.get_shape().num_elements(), + images.get_shape().num_elements()) + self.assertEqual(output.get_shape()[0], images.get_shape()[0]) + + def testFlattenBatchSize(self): + height, width = 3, 3 + with self.test_session() as sess: + images = tf.random_uniform((5, height, width, 3), seed=1, name='images') + inputs = tf.placeholder(tf.int32, (None, height, width, 3)) + output = ops.flatten(inputs) + self.assertEquals(output.get_shape().as_list(), + [None, height * width * 3]) + output = sess.run(output, {inputs: images.eval()}) + self.assertEquals(output.size, + images.get_shape().num_elements()) + self.assertEqual(output.shape[0], images.get_shape()[0]) + + +class BatchNormTest(tf.test.TestCase): + + def testCreateOp(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + output = ops.batch_norm(images) + self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm')) + self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) + + def testCreateVariables(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images) + beta = variables.get_variables_by_name('beta')[0] + self.assertEquals(beta.op.name, 'BatchNorm/beta') + gamma = variables.get_variables_by_name('gamma') + self.assertEquals(gamma, []) + moving_mean = tf.moving_average_variables()[0] + moving_variance = tf.moving_average_variables()[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testCreateVariablesWithScale(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scale=True) + beta = variables.get_variables_by_name('beta')[0] + gamma = variables.get_variables_by_name('gamma')[0] + self.assertEquals(beta.op.name, 'BatchNorm/beta') + self.assertEquals(gamma.op.name, 'BatchNorm/gamma') + moving_mean = tf.moving_average_variables()[0] + moving_variance = tf.moving_average_variables()[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testCreateVariablesWithoutCenterWithScale(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, center=False, scale=True) + beta = variables.get_variables_by_name('beta') + self.assertEquals(beta, []) + gamma = variables.get_variables_by_name('gamma')[0] + self.assertEquals(gamma.op.name, 'BatchNorm/gamma') + moving_mean = tf.moving_average_variables()[0] + moving_variance = tf.moving_average_variables()[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testCreateVariablesWithoutCenterWithoutScale(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, center=False, scale=False) + beta = variables.get_variables_by_name('beta') + self.assertEquals(beta, []) + gamma = variables.get_variables_by_name('gamma') + self.assertEquals(gamma, []) + moving_mean = tf.moving_average_variables()[0] + moving_variance = tf.moving_average_variables()[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testMovingAverageVariables(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scale=True) + moving_mean = tf.moving_average_variables()[0] + moving_variance = tf.moving_average_variables()[1] + self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') + self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') + + def testUpdateOps(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + update_moving_mean = update_ops[0] + update_moving_variance = update_ops[1] + self.assertEquals(update_moving_mean.op.name, + 'BatchNorm/AssignMovingAvg') + self.assertEquals(update_moving_variance.op.name, + 'BatchNorm/AssignMovingAvg_1') + + def testReuseVariables(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scale=True, scope='bn') + ops.batch_norm(images, scale=True, scope='bn', reuse=True) + beta = variables.get_variables_by_name('beta') + gamma = variables.get_variables_by_name('gamma') + self.assertEquals(len(beta), 1) + self.assertEquals(len(gamma), 1) + moving_vars = tf.get_collection('moving_vars') + self.assertEquals(len(moving_vars), 2) + + def testReuseUpdateOps(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + ops.batch_norm(images, scope='bn') + self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2) + ops.batch_norm(images, scope='bn', reuse=True) + self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4) + + def testCreateMovingVars(self): + height, width = 3, 3 + with self.test_session(): + images = tf.random_uniform((5, height, width, 3), seed=1) + _ = ops.batch_norm(images, moving_vars='moving_vars') + moving_mean = tf.get_collection('moving_vars', + 'BatchNorm/moving_mean') + self.assertEquals(len(moving_mean), 1) + self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean') + moving_variance = tf.get_collection('moving_vars', + 'BatchNorm/moving_variance') + self.assertEquals(len(moving_variance), 1) + self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance') + + def testComputeMovingVars(self): + height, width = 3, 3 + with self.test_session() as sess: + image_shape = (10, height, width, 3) + image_values = np.random.rand(*image_shape) + expected_mean = np.mean(image_values, axis=(0, 1, 2)) + expected_var = np.var(image_values, axis=(0, 1, 2)) + images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) + output = ops.batch_norm(images, decay=0.1) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + with tf.control_dependencies(update_ops): + output = tf.identity(output) + # Initialize all variables + sess.run(tf.global_variables_initializer()) + moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] + moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] + mean, variance = sess.run([moving_mean, moving_variance]) + # After initialization moving_mean == 0 and moving_variance == 1. + self.assertAllClose(mean, [0] * 3) + self.assertAllClose(variance, [1] * 3) + for _ in range(10): + sess.run([output]) + mean = moving_mean.eval() + variance = moving_variance.eval() + # After 10 updates with decay 0.1 moving_mean == expected_mean and + # moving_variance == expected_var. + self.assertAllClose(mean, expected_mean) + self.assertAllClose(variance, expected_var) + + def testEvalMovingVars(self): + height, width = 3, 3 + with self.test_session() as sess: + image_shape = (10, height, width, 3) + image_values = np.random.rand(*image_shape) + expected_mean = np.mean(image_values, axis=(0, 1, 2)) + expected_var = np.var(image_values, axis=(0, 1, 2)) + images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) + output = ops.batch_norm(images, decay=0.1, is_training=False) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + with tf.control_dependencies(update_ops): + output = tf.identity(output) + # Initialize all variables + sess.run(tf.global_variables_initializer()) + moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] + moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] + mean, variance = sess.run([moving_mean, moving_variance]) + # After initialization moving_mean == 0 and moving_variance == 1. + self.assertAllClose(mean, [0] * 3) + self.assertAllClose(variance, [1] * 3) + # Simulate assigment from saver restore. + init_assigns = [tf.assign(moving_mean, expected_mean), + tf.assign(moving_variance, expected_var)] + sess.run(init_assigns) + for _ in range(10): + sess.run([output], {images: np.random.rand(*image_shape)}) + mean = moving_mean.eval() + variance = moving_variance.eval() + # Although we feed different images, the moving_mean and moving_variance + # shouldn't change. + self.assertAllClose(mean, expected_mean) + self.assertAllClose(variance, expected_var) + + def testReuseVars(self): + height, width = 3, 3 + with self.test_session() as sess: + image_shape = (10, height, width, 3) + image_values = np.random.rand(*image_shape) + expected_mean = np.mean(image_values, axis=(0, 1, 2)) + expected_var = np.var(image_values, axis=(0, 1, 2)) + images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) + output = ops.batch_norm(images, decay=0.1, is_training=False) + update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) + with tf.control_dependencies(update_ops): + output = tf.identity(output) + # Initialize all variables + sess.run(tf.global_variables_initializer()) + moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] + moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] + mean, variance = sess.run([moving_mean, moving_variance]) + # After initialization moving_mean == 0 and moving_variance == 1. + self.assertAllClose(mean, [0] * 3) + self.assertAllClose(variance, [1] * 3) + # Simulate assigment from saver restore. + init_assigns = [tf.assign(moving_mean, expected_mean), + tf.assign(moving_variance, expected_var)] + sess.run(init_assigns) + for _ in range(10): + sess.run([output], {images: np.random.rand(*image_shape)}) + mean = moving_mean.eval() + variance = moving_variance.eval() + # Although we feed different images, the moving_mean and moving_variance + # shouldn't change. + self.assertAllClose(mean, expected_mean) + self.assertAllClose(variance, expected_var) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/inception/inception/slim/scopes.py b/models/research/inception/inception/slim/scopes.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2fb0a2efa7d30eaddb36fc30265f30cbaeb9ef --- /dev/null +++ b/models/research/inception/inception/slim/scopes.py @@ -0,0 +1,170 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains the new arg_scope used for TF-Slim ops. + + Allows one to define models much more compactly by eliminating boilerplate + code. This is accomplished through the use of argument scoping (arg_scope). + + Example of how to use scopes.arg_scope: + + with scopes.arg_scope(ops.conv2d, padding='SAME', + stddev=0.01, weight_decay=0.0005): + net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') + net = ops.conv2d(net, 256, [5, 5], scope='conv2') + + The first call to conv2d will overwrite padding: + ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', + stddev=0.01, weight_decay=0.0005, scope='conv1') + + The second call to Conv will use predefined args: + ops.conv2d(inputs, 256, [5, 5], padding='SAME', + stddev=0.01, weight_decay=0.0005, scope='conv2') + + Example of how to reuse an arg_scope: + with scopes.arg_scope(ops.conv2d, padding='SAME', + stddev=0.01, weight_decay=0.0005) as conv2d_arg_scope: + net = ops.conv2d(net, 256, [5, 5], scope='conv1') + .... + + with scopes.arg_scope(conv2d_arg_scope): + net = ops.conv2d(net, 256, [5, 5], scope='conv2') + + Example of how to use scopes.add_arg_scope: + + @scopes.add_arg_scope + def conv2d(*args, **kwargs) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import functools + +from tensorflow.python.framework import ops + +_ARGSTACK_KEY = ("__arg_stack",) + +_DECORATED_OPS = set() + + +def _get_arg_stack(): + stack = ops.get_collection(_ARGSTACK_KEY) + if stack: + return stack[0] + else: + stack = [{}] + ops.add_to_collection(_ARGSTACK_KEY, stack) + return stack + + +def _current_arg_scope(): + stack = _get_arg_stack() + return stack[-1] + + +def _add_op(op): + key_op = (op.__module__, op.__name__) + if key_op not in _DECORATED_OPS: + _DECORATED_OPS.add(key_op) + + +@contextlib.contextmanager +def arg_scope(list_ops_or_scope, **kwargs): + """Stores the default arguments for the given set of list_ops. + + For usage, please see examples at top of the file. + + Args: + list_ops_or_scope: List or tuple of operations to set argument scope for or + a dictionary containg the current scope. When list_ops_or_scope is a dict, + kwargs must be empty. When list_ops_or_scope is a list or tuple, then + every op in it need to be decorated with @add_arg_scope to work. + **kwargs: keyword=value that will define the defaults for each op in + list_ops. All the ops need to accept the given set of arguments. + + Yields: + the current_scope, which is a dictionary of {op: {arg: value}} + Raises: + TypeError: if list_ops is not a list or a tuple. + ValueError: if any op in list_ops has not be decorated with @add_arg_scope. + """ + if isinstance(list_ops_or_scope, dict): + # Assumes that list_ops_or_scope is a scope that is being reused. + if kwargs: + raise ValueError("When attempting to re-use a scope by suppling a" + "dictionary, kwargs must be empty.") + current_scope = list_ops_or_scope.copy() + try: + _get_arg_stack().append(current_scope) + yield current_scope + finally: + _get_arg_stack().pop() + else: + # Assumes that list_ops_or_scope is a list/tuple of ops with kwargs. + if not isinstance(list_ops_or_scope, (list, tuple)): + raise TypeError("list_ops_or_scope must either be a list/tuple or reused" + "scope (i.e. dict)") + try: + current_scope = _current_arg_scope().copy() + for op in list_ops_or_scope: + key_op = (op.__module__, op.__name__) + if not has_arg_scope(op): + raise ValueError("%s is not decorated with @add_arg_scope", key_op) + if key_op in current_scope: + current_kwargs = current_scope[key_op].copy() + current_kwargs.update(kwargs) + current_scope[key_op] = current_kwargs + else: + current_scope[key_op] = kwargs.copy() + _get_arg_stack().append(current_scope) + yield current_scope + finally: + _get_arg_stack().pop() + + +def add_arg_scope(func): + """Decorates a function with args so it can be used within an arg_scope. + + Args: + func: function to decorate. + + Returns: + A tuple with the decorated function func_with_args(). + """ + @functools.wraps(func) + def func_with_args(*args, **kwargs): + current_scope = _current_arg_scope() + current_args = kwargs + key_func = (func.__module__, func.__name__) + if key_func in current_scope: + current_args = current_scope[key_func].copy() + current_args.update(kwargs) + return func(*args, **current_args) + _add_op(func) + return func_with_args + + +def has_arg_scope(func): + """Checks whether a func has been decorated with @add_arg_scope or not. + + Args: + func: function to check. + + Returns: + a boolean. + """ + key_op = (func.__module__, func.__name__) + return key_op in _DECORATED_OPS diff --git a/models/research/inception/inception/slim/scopes_test.py b/models/research/inception/inception/slim/scopes_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cd349399ed7300dde38ac9bcb9818abc9d0680b4 --- /dev/null +++ b/models/research/inception/inception/slim/scopes_test.py @@ -0,0 +1,162 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests slim.scopes.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf +from inception.slim import scopes + + +@scopes.add_arg_scope +def func1(*args, **kwargs): + return (args, kwargs) + + +@scopes.add_arg_scope +def func2(*args, **kwargs): + return (args, kwargs) + + +class ArgScopeTest(tf.test.TestCase): + + def testEmptyArgScope(self): + with self.test_session(): + self.assertEqual(scopes._current_arg_scope(), {}) + + def testCurrentArgScope(self): + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + key_op = (func1.__module__, func1.__name__) + current_scope = {key_op: func1_kwargs.copy()} + with self.test_session(): + with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope: + self.assertDictEqual(scope, current_scope) + + def testCurrentArgScopeNested(self): + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + func2_kwargs = {'b': 2, 'd': [2]} + key = lambda f: (f.__module__, f.__name__) + current_scope = {key(func1): func1_kwargs.copy(), + key(func2): func2_kwargs.copy()} + with self.test_session(): + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + with scopes.arg_scope([func2], b=2, d=[2]) as scope: + self.assertDictEqual(scope, current_scope) + + def testReuseArgScope(self): + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + key_op = (func1.__module__, func1.__name__) + current_scope = {key_op: func1_kwargs.copy()} + with self.test_session(): + with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1: + pass + with scopes.arg_scope(scope1) as scope: + self.assertDictEqual(scope, current_scope) + + def testReuseArgScopeNested(self): + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + func2_kwargs = {'b': 2, 'd': [2]} + key = lambda f: (f.__module__, f.__name__) + current_scope1 = {key(func1): func1_kwargs.copy()} + current_scope2 = {key(func1): func1_kwargs.copy(), + key(func2): func2_kwargs.copy()} + with self.test_session(): + with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1: + with scopes.arg_scope([func2], b=2, d=[2]) as scope2: + pass + with scopes.arg_scope(scope1): + self.assertDictEqual(scopes._current_arg_scope(), current_scope1) + with scopes.arg_scope(scope2): + self.assertDictEqual(scopes._current_arg_scope(), current_scope2) + + def testSimpleArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with self.test_session(): + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testSimpleArgScopeWithTuple(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with self.test_session(): + with scopes.arg_scope((func1,), a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testOverwriteArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': 2, 'c': [1]} + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + args, kwargs = func1(0, b=2) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testNestedArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with scopes.arg_scope([func1], a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + func1_kwargs['b'] = 2 + with scopes.arg_scope([func1], b=2): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testSharedArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with scopes.arg_scope([func1, func2], a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + args, kwargs = func2(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testSharedArgScopeTuple(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + args, kwargs = func2(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + + def testPartiallySharedArgScope(self): + func1_args = (0,) + func1_kwargs = {'a': 1, 'b': None, 'c': [1]} + func2_args = (1,) + func2_kwargs = {'a': 1, 'b': None, 'd': [2]} + with scopes.arg_scope([func1, func2], a=1, b=None): + with scopes.arg_scope([func1], c=[1]), scopes.arg_scope([func2], d=[2]): + args, kwargs = func1(0) + self.assertTupleEqual(args, func1_args) + self.assertDictEqual(kwargs, func1_kwargs) + args, kwargs = func2(1) + self.assertTupleEqual(args, func2_args) + self.assertDictEqual(kwargs, func2_kwargs) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/inception/inception/slim/slim.py b/models/research/inception/inception/slim/slim.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a5c0f8c52b66db899835480c331ffafdc386e2 --- /dev/null +++ b/models/research/inception/inception/slim/slim.py @@ -0,0 +1,24 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TF-Slim grouped API. Please see README.md for details and usage.""" +# pylint: disable=unused-import + +# Collapse tf-slim into a single namespace. +from inception.slim import inception_model as inception +from inception.slim import losses +from inception.slim import ops +from inception.slim import scopes +from inception.slim import variables +from inception.slim.scopes import arg_scope diff --git a/models/research/inception/inception/slim/variables.py b/models/research/inception/inception/slim/variables.py new file mode 100644 index 0000000000000000000000000000000000000000..1d967b79e9563724b1114995a732cfd4dd486afd --- /dev/null +++ b/models/research/inception/inception/slim/variables.py @@ -0,0 +1,289 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains convenience wrappers for creating variables in TF-Slim. + +The variables module is typically used for defining model variables from the +ops routines (see slim.ops). Such variables are used for training, evaluation +and inference of models. + +All the variables created through this module would be added to the +MODEL_VARIABLES collection, if you create a model variable outside slim, it can +be added with slim.variables.add_variable(external_variable, reuse). + +Usage: + weights_initializer = tf.truncated_normal_initializer(stddev=0.01) + l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005) + weights = variables.variable('weights', + shape=[100, 100], + initializer=weights_initializer, + regularizer=l2_regularizer, + device='/cpu:0') + + biases = variables.variable('biases', + shape=[100], + initializer=tf.zeros_initializer(), + device='/cpu:0') + + # More complex example. + + net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1') + net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') + with slim.arg_scope([variables.variable], restore=False): + net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3') + + # Get all model variables from all the layers. + model_variables = slim.variables.get_variables() + + # Get all model variables from a specific the layer, i.e 'conv1'. + conv1_variables = slim.variables.get_variables('conv1') + + # Get all weights from all the layers. + weights = slim.variables.get_variables_by_name('weights') + + # Get all bias from all the layers. + biases = slim.variables.get_variables_by_name('biases') + + # Get all variables to restore. + # (i.e. only those created by 'conv1' and 'conv2') + variables_to_restore = slim.variables.get_variables_to_restore() + +************************************************ +* Initializing model variables from a checkpoint +************************************************ + +# Create some variables. +v1 = slim.variables.variable(name="v1", ..., restore=False) +v2 = slim.variables.variable(name="v2", ...) # By default restore=True +... +# The list of variables to restore should only contain 'v2'. +variables_to_restore = slim.variables.get_variables_to_restore() +restorer = tf.train.Saver(variables_to_restore) +with tf.Session() as sess: + # Restore variables from disk. + restorer.restore(sess, "/tmp/model.ckpt") + print("Model restored.") + # Do some work with the model + ... + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from inception.slim import scopes + +# Collection containing all the variables created using slim.variables +MODEL_VARIABLES = '_model_variables_' + +# Collection containing the slim.variables that are created with restore=True. +VARIABLES_TO_RESTORE = '_variables_to_restore_' + + +def add_variable(var, restore=True): + """Adds a variable to the MODEL_VARIABLES collection. + + Optionally it will add the variable to the VARIABLES_TO_RESTORE collection. + Args: + var: a variable. + restore: whether the variable should be added to the + VARIABLES_TO_RESTORE collection. + + """ + collections = [MODEL_VARIABLES] + if restore: + collections.append(VARIABLES_TO_RESTORE) + for collection in collections: + if var not in tf.get_collection(collection): + tf.add_to_collection(collection, var) + + +def get_variables(scope=None, suffix=None): + """Gets the list of variables, filtered by scope and/or suffix. + + Args: + scope: an optional scope for filtering the variables to return. + suffix: an optional suffix for filtering the variables to return. + + Returns: + a copied list of variables with scope and suffix. + """ + candidates = tf.get_collection(MODEL_VARIABLES, scope)[:] + if suffix is not None: + candidates = [var for var in candidates if var.op.name.endswith(suffix)] + return candidates + + +def get_variables_to_restore(): + """Gets the list of variables to restore. + + Returns: + a copied list of variables. + """ + return tf.get_collection(VARIABLES_TO_RESTORE)[:] + + +def get_variables_by_name(given_name, scope=None): + """Gets the list of variables that were given that name. + + Args: + given_name: name given to the variable without scope. + scope: an optional scope for filtering the variables to return. + + Returns: + a copied list of variables with the given name and prefix. + """ + return get_variables(scope=scope, suffix=given_name) + + +def get_unique_variable(name): + """Gets the variable uniquely identified by that name. + + Args: + name: a name that uniquely identifies the variable. + + Returns: + a tensorflow variable. + + Raises: + ValueError: if no variable uniquely identified by the name exists. + """ + candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name) + if not candidates: + raise ValueError('Couldnt find variable %s' % name) + + for candidate in candidates: + if candidate.op.name == name: + return candidate + raise ValueError('Variable %s does not uniquely identify a variable', name) + + +class VariableDeviceChooser(object): + """Slim device chooser for variables. + + When using a parameter server it will assign them in a round-robin fashion. + When not using a parameter server it allows GPU:0 placement otherwise CPU:0. + """ + + def __init__(self, + num_parameter_servers=0, + ps_device='/job:ps', + placement='CPU:0'): + """Initialize VariableDeviceChooser. + + Args: + num_parameter_servers: number of parameter servers. + ps_device: string representing the parameter server device. + placement: string representing the placement of the variable either CPU:0 + or GPU:0. When using parameter servers forced to CPU:0. + """ + self._num_ps = num_parameter_servers + self._ps_device = ps_device + self._placement = placement if num_parameter_servers == 0 else 'CPU:0' + self._next_task_id = 0 + + def __call__(self, op): + device_string = '' + if self._num_ps > 0: + task_id = self._next_task_id + self._next_task_id = (self._next_task_id + 1) % self._num_ps + device_string = '%s/task:%d' % (self._ps_device, task_id) + device_string += '/%s' % self._placement + return device_string + + +# TODO(sguada) Remove once get_variable is able to colocate op.devices. +def variable_device(device, name): + """Fix the variable device to colocate its ops.""" + if callable(device): + var_name = tf.get_variable_scope().name + '/' + name + var_def = tf.NodeDef(name=var_name, op='Variable') + device = device(var_def) + if device is None: + device = '' + return device + + +@scopes.add_arg_scope +def global_step(device=''): + """Returns the global step variable. + + Args: + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + + Returns: + the tensor representing the global step variable. + """ + global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP) + if global_step_ref: + return global_step_ref[0] + else: + collections = [ + VARIABLES_TO_RESTORE, + tf.GraphKeys.GLOBAL_VARIABLES, + tf.GraphKeys.GLOBAL_STEP, + ] + # Get the device for the variable. + with tf.device(variable_device(device, 'global_step')): + return tf.get_variable('global_step', shape=[], dtype=tf.int64, + initializer=tf.zeros_initializer(), + trainable=False, collections=collections) + + +@scopes.add_arg_scope +def variable(name, shape=None, dtype=tf.float32, initializer=None, + regularizer=None, trainable=True, collections=None, device='', + restore=True): + """Gets an existing variable with these parameters or creates a new one. + + It also add itself to a group with its name. + + Args: + name: the name of the new or existing variable. + shape: shape of the new or existing variable. + dtype: type of the new or existing variable (defaults to `DT_FLOAT`). + initializer: initializer for the variable if one is created. + regularizer: a (Tensor -> Tensor or None) function; the result of + applying it on a newly created variable will be added to the collection + GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. + trainable: If `True` also add the variable to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). + collections: A list of collection names to which the Variable will be added. + Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES + and MODEL_VARIABLES collections. + device: Optional device to place the variable. It can be an string or a + function that is called to get the device for the variable. + restore: whether the variable should be added to the + VARIABLES_TO_RESTORE collection. + + Returns: + The created or existing variable. + """ + collections = list(collections or []) + + # Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES + collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES] + # Add to VARIABLES_TO_RESTORE if necessary + if restore: + collections.append(VARIABLES_TO_RESTORE) + # Remove duplicates + collections = set(collections) + # Get the device for the variable. + with tf.device(variable_device(device, name)): + return tf.get_variable(name, shape=shape, dtype=dtype, + initializer=initializer, regularizer=regularizer, + trainable=trainable, collections=collections) diff --git a/models/research/inception/inception/slim/variables_test.py b/models/research/inception/inception/slim/variables_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c1944dfeb0fba7ad99f104b0c366c41d737c63 --- /dev/null +++ b/models/research/inception/inception/slim/variables_test.py @@ -0,0 +1,392 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for slim.variables.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from inception.slim import scopes +from inception.slim import variables + + +class VariablesTest(tf.test.TestCase): + + def testCreateVariable(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + self.assertEquals(a.op.name, 'A/a') + self.assertListEqual(a.get_shape().as_list(), [5]) + + def testGetVariables(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('B'): + b = variables.variable('a', [5]) + self.assertEquals([a, b], variables.get_variables()) + self.assertEquals([a], variables.get_variables('A')) + self.assertEquals([b], variables.get_variables('B')) + + def testGetVariablesSuffix(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('A'): + b = variables.variable('b', [5]) + self.assertEquals([a], variables.get_variables(suffix='a')) + self.assertEquals([b], variables.get_variables(suffix='b')) + + def testGetVariableWithSingleVar(self): + with self.test_session(): + with tf.variable_scope('parent'): + a = variables.variable('child', [5]) + self.assertEquals(a, variables.get_unique_variable('parent/child')) + + def testGetVariableWithDistractors(self): + with self.test_session(): + with tf.variable_scope('parent'): + a = variables.variable('child', [5]) + with tf.variable_scope('child'): + variables.variable('grandchild1', [7]) + variables.variable('grandchild2', [9]) + self.assertEquals(a, variables.get_unique_variable('parent/child')) + + def testGetVariableThrowsExceptionWithNoMatch(self): + var_name = 'cant_find_me' + with self.test_session(): + with self.assertRaises(ValueError): + variables.get_unique_variable(var_name) + + def testGetThrowsExceptionWithChildrenButNoMatch(self): + var_name = 'parent/child' + with self.test_session(): + with tf.variable_scope(var_name): + variables.variable('grandchild1', [7]) + variables.variable('grandchild2', [9]) + with self.assertRaises(ValueError): + variables.get_unique_variable(var_name) + + def testGetVariablesToRestore(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + with tf.variable_scope('B'): + b = variables.variable('a', [5]) + self.assertEquals([a, b], variables.get_variables_to_restore()) + + def testNoneGetVariablesToRestore(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5], restore=False) + with tf.variable_scope('B'): + b = variables.variable('a', [5], restore=False) + self.assertEquals([], variables.get_variables_to_restore()) + self.assertEquals([a, b], variables.get_variables()) + + def testGetMixedVariablesToRestore(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + b = variables.variable('b', [5], restore=False) + with tf.variable_scope('B'): + c = variables.variable('c', [5]) + d = variables.variable('d', [5], restore=False) + self.assertEquals([a, b, c, d], variables.get_variables()) + self.assertEquals([a, c], variables.get_variables_to_restore()) + + def testReuseVariable(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', []) + with tf.variable_scope('A', reuse=True): + b = variables.variable('a', []) + self.assertEquals(a, b) + self.assertListEqual([a], variables.get_variables()) + + def testVariableWithDevice(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [], device='cpu:0') + b = variables.variable('b', [], device='cpu:1') + self.assertDeviceEqual(a.device, 'cpu:0') + self.assertDeviceEqual(b.device, 'cpu:1') + + def testVariableWithDeviceFromScope(self): + with self.test_session(): + with tf.device('/cpu:0'): + a = variables.variable('a', []) + b = variables.variable('b', [], device='cpu:1') + self.assertDeviceEqual(a.device, 'cpu:0') + self.assertDeviceEqual(b.device, 'cpu:1') + + def testVariableWithDeviceFunction(self): + class DevFn(object): + + def __init__(self): + self.counter = -1 + + def __call__(self, op): + self.counter += 1 + return 'cpu:%d' % self.counter + + with self.test_session(): + with scopes.arg_scope([variables.variable], device=DevFn()): + a = variables.variable('a', []) + b = variables.variable('b', []) + c = variables.variable('c', [], device='cpu:12') + d = variables.variable('d', []) + with tf.device('cpu:99'): + e_init = tf.constant(12) + e = variables.variable('e', initializer=e_init) + self.assertDeviceEqual(a.device, 'cpu:0') + self.assertDeviceEqual(a.initial_value.device, 'cpu:0') + self.assertDeviceEqual(b.device, 'cpu:1') + self.assertDeviceEqual(b.initial_value.device, 'cpu:1') + self.assertDeviceEqual(c.device, 'cpu:12') + self.assertDeviceEqual(c.initial_value.device, 'cpu:12') + self.assertDeviceEqual(d.device, 'cpu:2') + self.assertDeviceEqual(d.initial_value.device, 'cpu:2') + self.assertDeviceEqual(e.device, 'cpu:3') + self.assertDeviceEqual(e.initial_value.device, 'cpu:99') + + def testVariableWithReplicaDeviceSetter(self): + with self.test_session(): + with tf.device(tf.train.replica_device_setter(ps_tasks=2)): + a = variables.variable('a', []) + b = variables.variable('b', []) + c = variables.variable('c', [], device='cpu:12') + d = variables.variable('d', []) + with tf.device('cpu:99'): + e_init = tf.constant(12) + e = variables.variable('e', initializer=e_init) + # The values below highlight how the replica_device_setter puts initial + # values on the worker job, and how it merges explicit devices. + self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') + self.assertDeviceEqual(a.initial_value.device, '/job:worker/cpu:0') + self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') + self.assertDeviceEqual(b.initial_value.device, '/job:worker/cpu:0') + self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12') + self.assertDeviceEqual(c.initial_value.device, '/job:worker/cpu:12') + self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0') + self.assertDeviceEqual(d.initial_value.device, '/job:worker/cpu:0') + self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0') + self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99') + + def testVariableWithVariableDeviceChooser(self): + + with tf.Graph().as_default(): + device_fn = variables.VariableDeviceChooser(num_parameter_servers=2) + with scopes.arg_scope([variables.variable], device=device_fn): + a = variables.variable('a', []) + b = variables.variable('b', []) + c = variables.variable('c', [], device='cpu:12') + d = variables.variable('d', []) + with tf.device('cpu:99'): + e_init = tf.constant(12) + e = variables.variable('e', initializer=e_init) + # The values below highlight how the VariableDeviceChooser puts initial + # values on the same device as the variable job. + self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') + self.assertDeviceEqual(a.initial_value.device, a.device) + self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') + self.assertDeviceEqual(b.initial_value.device, b.device) + self.assertDeviceEqual(c.device, '/cpu:12') + self.assertDeviceEqual(c.initial_value.device, c.device) + self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0') + self.assertDeviceEqual(d.initial_value.device, d.device) + self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0') + self.assertDeviceEqual(e.initial_value.device, '/cpu:99') + + def testVariableGPUPlacement(self): + + with tf.Graph().as_default(): + device_fn = variables.VariableDeviceChooser(placement='gpu:0') + with scopes.arg_scope([variables.variable], device=device_fn): + a = variables.variable('a', []) + b = variables.variable('b', []) + c = variables.variable('c', [], device='cpu:12') + d = variables.variable('d', []) + with tf.device('cpu:99'): + e_init = tf.constant(12) + e = variables.variable('e', initializer=e_init) + # The values below highlight how the VariableDeviceChooser puts initial + # values on the same device as the variable job. + self.assertDeviceEqual(a.device, '/gpu:0') + self.assertDeviceEqual(a.initial_value.device, a.device) + self.assertDeviceEqual(b.device, '/gpu:0') + self.assertDeviceEqual(b.initial_value.device, b.device) + self.assertDeviceEqual(c.device, '/cpu:12') + self.assertDeviceEqual(c.initial_value.device, c.device) + self.assertDeviceEqual(d.device, '/gpu:0') + self.assertDeviceEqual(d.initial_value.device, d.device) + self.assertDeviceEqual(e.device, '/gpu:0') + self.assertDeviceEqual(e.initial_value.device, '/cpu:99') + + def testVariableCollection(self): + with self.test_session(): + a = variables.variable('a', [], collections='A') + b = variables.variable('b', [], collections='B') + self.assertEquals(a, tf.get_collection('A')[0]) + self.assertEquals(b, tf.get_collection('B')[0]) + + def testVariableCollections(self): + with self.test_session(): + a = variables.variable('a', [], collections=['A', 'C']) + b = variables.variable('b', [], collections=['B', 'C']) + self.assertEquals(a, tf.get_collection('A')[0]) + self.assertEquals(b, tf.get_collection('B')[0]) + + def testVariableCollectionsWithArgScope(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], collections='A'): + a = variables.variable('a', []) + b = variables.variable('b', []) + self.assertListEqual([a, b], tf.get_collection('A')) + + def testVariableCollectionsWithArgScopeNested(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], collections='A'): + a = variables.variable('a', []) + with scopes.arg_scope([variables.variable], collections='B'): + b = variables.variable('b', []) + self.assertEquals(a, tf.get_collection('A')[0]) + self.assertEquals(b, tf.get_collection('B')[0]) + + def testVariableCollectionsWithArgScopeNonNested(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], collections='A'): + a = variables.variable('a', []) + with scopes.arg_scope([variables.variable], collections='B'): + b = variables.variable('b', []) + variables.variable('c', []) + self.assertListEqual([a], tf.get_collection('A')) + self.assertListEqual([b], tf.get_collection('B')) + + def testVariableRestoreWithArgScopeNested(self): + with self.test_session(): + with scopes.arg_scope([variables.variable], restore=True): + a = variables.variable('a', []) + with scopes.arg_scope([variables.variable], + trainable=False, + collections=['A', 'B']): + b = variables.variable('b', []) + c = variables.variable('c', []) + self.assertListEqual([a, b, c], variables.get_variables_to_restore()) + self.assertListEqual([a, c], tf.trainable_variables()) + self.assertListEqual([b], tf.get_collection('A')) + self.assertListEqual([b], tf.get_collection('B')) + + +class GetVariablesByNameTest(tf.test.TestCase): + + def testGetVariableGivenNameScoped(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + b = variables.variable('b', [5]) + self.assertEquals([a], variables.get_variables_by_name('a')) + self.assertEquals([b], variables.get_variables_by_name('b')) + + def testGetVariablesByNameReturnsByValueWithScope(self): + with self.test_session(): + with tf.variable_scope('A'): + a = variables.variable('a', [5]) + matched_variables = variables.get_variables_by_name('a') + + # If variables.get_variables_by_name returns the list by reference, the + # following append should persist, and be returned, in subsequent calls + # to variables.get_variables_by_name('a'). + matched_variables.append(4) + + matched_variables = variables.get_variables_by_name('a') + self.assertEquals([a], matched_variables) + + def testGetVariablesByNameReturnsByValueWithoutScope(self): + with self.test_session(): + a = variables.variable('a', [5]) + matched_variables = variables.get_variables_by_name('a') + + # If variables.get_variables_by_name returns the list by reference, the + # following append should persist, and be returned, in subsequent calls + # to variables.get_variables_by_name('a'). + matched_variables.append(4) + + matched_variables = variables.get_variables_by_name('a') + self.assertEquals([a], matched_variables) + + +class GlobalStepTest(tf.test.TestCase): + + def testStable(self): + with tf.Graph().as_default(): + gs = variables.global_step() + gs2 = variables.global_step() + self.assertTrue(gs is gs2) + + def testDevice(self): + with tf.Graph().as_default(): + with scopes.arg_scope([variables.global_step], device='/gpu:0'): + gs = variables.global_step() + self.assertDeviceEqual(gs.device, '/gpu:0') + + def testDeviceFn(self): + class DevFn(object): + + def __init__(self): + self.counter = -1 + + def __call__(self, op): + self.counter += 1 + return '/cpu:%d' % self.counter + + with tf.Graph().as_default(): + with scopes.arg_scope([variables.global_step], device=DevFn()): + gs = variables.global_step() + gs2 = variables.global_step() + self.assertDeviceEqual(gs.device, '/cpu:0') + self.assertEquals(gs, gs2) + self.assertDeviceEqual(gs2.device, '/cpu:0') + + def testReplicaDeviceSetter(self): + device_fn = tf.train.replica_device_setter(2) + with tf.Graph().as_default(): + with scopes.arg_scope([variables.global_step], device=device_fn): + gs = variables.global_step() + gs2 = variables.global_step() + self.assertEquals(gs, gs2) + self.assertDeviceEqual(gs.device, '/job:ps/task:0') + self.assertDeviceEqual(gs.initial_value.device, '/job:ps/task:0') + self.assertDeviceEqual(gs2.device, '/job:ps/task:0') + self.assertDeviceEqual(gs2.initial_value.device, '/job:ps/task:0') + + def testVariableWithVariableDeviceChooser(self): + + with tf.Graph().as_default(): + device_fn = variables.VariableDeviceChooser() + with scopes.arg_scope([variables.global_step], device=device_fn): + gs = variables.global_step() + gs2 = variables.global_step() + self.assertEquals(gs, gs2) + self.assertDeviceEqual(gs.device, 'cpu:0') + self.assertDeviceEqual(gs.initial_value.device, gs.device) + self.assertDeviceEqual(gs2.device, 'cpu:0') + self.assertDeviceEqual(gs2.initial_value.device, gs2.device) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/keypointnet/CONTRIBUTING.md b/models/research/keypointnet/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..939e5341e74dc2371c8b47f0e27b50581bed5f63 --- /dev/null +++ b/models/research/keypointnet/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google.com/conduct/). diff --git a/models/research/keypointnet/LICENSE b/models/research/keypointnet/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/models/research/keypointnet/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/models/research/keypointnet/README.md b/models/research/keypointnet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8de88ca5a18816984302a9c20639364a7c8cde53 --- /dev/null +++ b/models/research/keypointnet/README.md @@ -0,0 +1,46 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# KeypointNet +This is an implementation of the keypoint network proposed in "Discovery of +Latent 3D Keypoints via End-to-end Geometric Reasoning +[[pdf](https://arxiv.org/pdf/1807.03146.pdf)]". Given a single 2D image of a +known class, this network can predict a set of 3D keypoints that are consistent +across viewing angles of the same object and across object instances. These +keypoints and their detectors are discovered and learned automatically without +keypoint location supervision [[demo](https://keypointnet.github.io)]. + +## Datasets: + ShapeNet's rendering for + [Cars](https://storage.googleapis.com/discovery-3dkeypoints-data/cars_with_keypoints.zip), + [Planes](https://storage.googleapis.com/discovery-3dkeypoints-data/planes_with_keypoints.zip), + [Chairs](https://storage.googleapis.com/discovery-3dkeypoints-data/chairs_with_keypoints.zip). + + Each set contains: +1. tfrecords +2. train.txt, a list of tfrecords used for training. +2. dev.txt, a list of tfrecords used for validation. +3. test.txt, a list of tfrecords used for testing. +4. projection.txt, storing the global 4x4 camera projection matrix. +5. job.txt, storing ShapeNet's object IDs in each tfrecord. + +## Training: + Run `main.py --model_dir=MODEL_DIR --dset=DSET` + + where MODEL_DIR is a folder for storing model checkpoints: (see [tf.estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)), and DSET should point to the folder containing tfrecords (download above). + +## Inference: + Run `main.py --model_dir=MODEL_DIR --input=INPUT --predict` + + where MODEL_DIR is the model checkpoint folder, and INPUT is a folder containing png or jpeg test images. + We trained the network using the total batch size of 256 (8 x 32 replicas). You may have to tune the learning rate if your batch size is different. + +## Code credit: + Supasorn Suwajanakorn + +## Contact: + supasorn@gmail.com, [snavely,tompson,mnorouzi]@google.com + + +(This is not an officially supported Google product) diff --git a/models/research/keypointnet/main.py b/models/research/keypointnet/main.py new file mode 100644 index 0000000000000000000000000000000000000000..04b30159404e01529c898ee75fb1ed78f705f539 --- /dev/null +++ b/models/research/keypointnet/main.py @@ -0,0 +1,697 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""KeypointNet!! + +A reimplementation of 'Discovery of Latent 3D Keypoints via End-to-end +Geometric Reasoning' keypoint network. Given a single 2D image of a known class, +this network can predict a set of 3D keypoints that are consistent across +viewing angles of the same object and across object instances. These keypoints +and their detectors are discovered and learned automatically without +keypoint location supervision. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import matplotlib.pyplot as plt +import numpy as np +import os +from scipy import misc +import sys +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils + +FLAGS = tf.app.flags.FLAGS + +tf.app.flags.DEFINE_boolean("predict", False, "Running inference if true") +tf.app.flags.DEFINE_string( + "input", + "", + "Input folder containing images") +tf.app.flags.DEFINE_string("model_dir", None, "Estimator model_dir") +tf.app.flags.DEFINE_string( + "dset", + "", + "Path to the directory containing the dataset.") +tf.app.flags.DEFINE_integer("steps", 200000, "Training steps") +tf.app.flags.DEFINE_integer("batch_size", 8, "Size of mini-batch.") +tf.app.flags.DEFINE_string( + "hparams", "", + "A comma-separated list of `name=value` hyperparameter values. This flag " + "is used to override hyperparameter settings either when manually " + "selecting hyperparameters or when using Vizier.") +tf.app.flags.DEFINE_integer( + "sync_replicas", -1, + "If > 0, use SyncReplicasOptimizer and use this many replicas per sync.") + +# Fixed input size 128 x 128. +vw = vh = 128 + + +def create_input_fn(split, batch_size): + """Returns input_fn for tf.estimator.Estimator. + + Reads tfrecords and construts input_fn for either training or eval. All + tfrecords not in test.txt or dev.txt will be assigned to training set. + + Args: + split: A string indicating the split. Can be either 'train' or 'validation'. + batch_size: The batch size! + + Returns: + input_fn for tf.estimator.Estimator. + + Raises: + IOError: If test.txt or dev.txt are not found. + """ + + if (not os.path.exists(os.path.join(FLAGS.dset, "test.txt")) or + not os.path.exists(os.path.join(FLAGS.dset, "dev.txt"))): + raise IOError("test.txt or dev.txt not found") + + with open(os.path.join(FLAGS.dset, "test.txt"), "r") as f: + testset = [x.strip() for x in f.readlines()] + + with open(os.path.join(FLAGS.dset, "dev.txt"), "r") as f: + validset = [x.strip() for x in f.readlines()] + + files = os.listdir(FLAGS.dset) + filenames = [] + for f in files: + sp = os.path.splitext(f) + if sp[1] != ".tfrecord" or sp[0] in testset: + continue + + if ((split == "validation" and sp[0] in validset) or + (split == "train" and sp[0] not in validset)): + filenames.append(os.path.join(FLAGS.dset, f)) + + def input_fn(): + """input_fn for tf.estimator.Estimator.""" + + def parser(serialized_example): + """Parses a single tf.Example into image and label tensors.""" + fs = tf.parse_single_example( + serialized_example, + features={ + "img0": tf.FixedLenFeature([], tf.string), + "img1": tf.FixedLenFeature([], tf.string), + "mv0": tf.FixedLenFeature([16], tf.float32), + "mvi0": tf.FixedLenFeature([16], tf.float32), + "mv1": tf.FixedLenFeature([16], tf.float32), + "mvi1": tf.FixedLenFeature([16], tf.float32), + }) + + fs["img0"] = tf.div(tf.to_float(tf.image.decode_png(fs["img0"], 4)), 255) + fs["img1"] = tf.div(tf.to_float(tf.image.decode_png(fs["img1"], 4)), 255) + + fs["img0"].set_shape([vh, vw, 4]) + fs["img1"].set_shape([vh, vw, 4]) + + # fs["lr0"] = [fs["mv0"][0]] + # fs["lr1"] = [fs["mv1"][0]] + + fs["lr0"] = tf.convert_to_tensor([fs["mv0"][0]]) + fs["lr1"] = tf.convert_to_tensor([fs["mv1"][0]]) + + return fs + + np.random.shuffle(filenames) + dataset = tf.data.TFRecordDataset(filenames) + dataset = dataset.map(parser, num_parallel_calls=4) + dataset = dataset.shuffle(400).repeat().batch(batch_size) + dataset = dataset.prefetch(buffer_size=256) + + return dataset.make_one_shot_iterator().get_next(), None + + return input_fn + + +class Transformer(object): + """A utility for projecting 3D points to 2D coordinates and vice versa. + + 3D points are represented in 4D-homogeneous world coordinates. The pixel + coordinates are represented in normalized device coordinates [-1, 1]. + See https://learnopengl.com/Getting-started/Coordinate-Systems. + """ + + def __get_matrix(self, lines): + return np.array([[float(y) for y in x.strip().split(" ")] for x in lines]) + + def __read_projection_matrix(self, filename): + if not os.path.exists(filename): + filename = "/cns/vz-d/home/supasorn/datasets/cars/projection.txt" + with open(filename, "r") as f: + lines = f.readlines() + return self.__get_matrix(lines) + + def __init__(self, w, h, dataset_dir): + self.w = w + self.h = h + p = self.__read_projection_matrix(dataset_dir + "projection.txt") + + # transposed of inversed projection matrix. + self.pinv_t = tf.constant([[1.0 / p[0, 0], 0, 0, + 0], [0, 1.0 / p[1, 1], 0, 0], [0, 0, 1, 0], + [0, 0, 0, 1]]) + self.f = p[0, 0] + + def project(self, xyzw): + """Projects homogeneous 3D coordinates to normalized device coordinates.""" + + z = xyzw[:, :, 2:3] + 1e-8 + return tf.concat([-self.f * xyzw[:, :, :2] / z, z], axis=2) + + def unproject(self, xyz): + """Unprojects normalized device coordinates with depth to 3D coordinates.""" + + z = xyz[:, :, 2:] + xy = -xyz * z + + def batch_matmul(a, b): + return tf.reshape( + tf.matmul(tf.reshape(a, [-1, a.shape[2].value]), b), + [-1, a.shape[1].value, a.shape[2].value]) + + return batch_matmul( + tf.concat([xy[:, :, :2], z, tf.ones_like(z)], axis=2), self.pinv_t) + + +def meshgrid(h): + """Returns a meshgrid ranging from [-1, 1] in x, y axes.""" + + r = np.arange(0.5, h, 1) / (h / 2) - 1 + ranx, rany = tf.meshgrid(r, -r) + return tf.to_float(ranx), tf.to_float(rany) + + +def estimate_rotation(xyz0, xyz1, pconf, noise): + """Estimates the rotation between two sets of keypoints. + + The rotation is estimated by first subtracting mean from each set of keypoints + and computing SVD of the covariance matrix. + + Args: + xyz0: [batch, num_kp, 3] The first set of keypoints. + xyz1: [batch, num_kp, 3] The second set of keypoints. + pconf: [batch, num_kp] The weights used to compute the rotation estimate. + noise: A number indicating the noise added to the keypoints. + + Returns: + [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices. + """ + + xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise) + xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise) + + pconf2 = tf.expand_dims(pconf, 2) + cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True) + cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True) + + x = xyz0 - cen0 + y = xyz1 - cen1 + + cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y) + _, u, v = tf.svd(cov, full_matrices=True) + + d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True)) + ud = tf.concat( + [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)], + axis=2) + return tf.matmul(ud, v, transpose_b=True) + + +def relative_pose_loss(xyz0, xyz1, rot, pconf, noise): + """Computes the relative pose loss (chordal, angular). + + Args: + xyz0: [batch, num_kp, 3] The first set of keypoints. + xyz1: [batch, num_kp, 3] The second set of keypoints. + rot: [batch, 4, 4] The ground-truth rotation matrices. + pconf: [batch, num_kp] The weights used to compute the rotation estimate. + noise: A number indicating the noise added to the keypoints. + + Returns: + A tuple (chordal loss, angular loss). + """ + + r_transposed = estimate_rotation(xyz0, xyz1, pconf, noise) + rotation = rot[:, :3, :3] + frob_sqr = tf.reduce_sum(tf.square(r_transposed - rotation), axis=[1, 2]) + frob = tf.sqrt(frob_sqr) + + return tf.reduce_mean(frob_sqr), \ + 2.0 * tf.reduce_mean(tf.asin(tf.minimum(1.0, frob / (2 * math.sqrt(2))))) + + +def separation_loss(xyz, delta): + """Computes the separation loss. + + Args: + xyz: [batch, num_kp, 3] Input keypoints. + delta: A separation threshold. Incur 0 cost if the distance >= delta. + + Returns: + The seperation loss. + """ + + num_kp = tf.shape(xyz)[1] + t1 = tf.tile(xyz, [1, num_kp, 1]) + + t2 = tf.reshape(tf.tile(xyz, [1, 1, num_kp]), tf.shape(t1)) + diffsq = tf.square(t1 - t2) + + # -> [batch, num_kp ^ 2] + lensqr = tf.reduce_sum(diffsq, axis=2) + + return (tf.reduce_sum(tf.maximum(-lensqr + delta, 0.0)) / tf.to_float( + num_kp * FLAGS.batch_size * 2)) + + +def consistency_loss(uv0, uv1, pconf): + """Computes multi-view consistency loss between two sets of keypoints. + + Args: + uv0: [batch, num_kp, 2] The first set of keypoint 2D coordinates. + uv1: [batch, num_kp, 2] The second set of keypoint 2D coordinates. + pconf: [batch, num_kp] The weights used to compute the rotation estimate. + + Returns: + The consistency loss. + """ + + # [batch, num_kp, 2] + wd = tf.square(uv0 - uv1) * tf.expand_dims(pconf, 2) + wd = tf.reduce_sum(wd, axis=[1, 2]) + return tf.reduce_mean(wd) + + +def variance_loss(probmap, ranx, rany, uv): + """Computes the variance loss as part of Sillhouette consistency. + + Args: + probmap: [batch, num_kp, h, w] The distribution map of keypoint locations. + ranx: X-axis meshgrid. + rany: Y-axis meshgrid. + uv: [batch, num_kp, 2] Keypoint locations (in NDC). + + Returns: + The variance loss. + """ + + ran = tf.stack([ranx, rany], axis=2) + + sh = tf.shape(ran) + # [batch, num_kp, vh, vw, 2] + ran = tf.reshape(ran, [1, 1, sh[0], sh[1], 2]) + + sh = tf.shape(uv) + uv = tf.reshape(uv, [sh[0], sh[1], 1, 1, 2]) + + diff = tf.reduce_sum(tf.square(uv - ran), axis=4) + diff *= probmap + + return tf.reduce_mean(tf.reduce_sum(diff, axis=[2, 3])) + + +def dilated_cnn(images, num_filters, is_training): + """Constructs a base dilated convolutional network. + + Args: + images: [batch, h, w, 3] Input RGB images. + num_filters: The number of filters for all layers. + is_training: True if this function is called during training. + + Returns: + Output of this dilated CNN. + """ + + net = images + + with slim.arg_scope( + [slim.conv2d, slim.fully_connected], + normalizer_fn=slim.batch_norm, + activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=0.1), + normalizer_params={"is_training": is_training}): + for i, r in enumerate([1, 1, 2, 4, 8, 16, 1, 2, 4, 8, 16, 1]): + net = slim.conv2d(net, num_filters, [3, 3], rate=r, scope="dconv%d" % i) + + return net + + +def orientation_network(images, num_filters, is_training): + """Constructs a network that infers the orientation of an object. + + Args: + images: [batch, h, w, 3] Input RGB images. + num_filters: The number of filters for all layers. + is_training: True if this function is called during training. + + Returns: + Output of the orientation network. + """ + + with tf.variable_scope("OrientationNetwork"): + net = dilated_cnn(images, num_filters, is_training) + + modules = 2 + prob = slim.conv2d(net, 2, [3, 3], rate=1, activation_fn=None) + prob = tf.transpose(prob, [0, 3, 1, 2]) + + prob = tf.reshape(prob, [-1, modules, vh * vw]) + prob = tf.nn.softmax(prob) + ranx, rany = meshgrid(vh) + + prob = tf.reshape(prob, [-1, 2, vh, vw]) + + sx = tf.reduce_sum(prob * ranx, axis=[2, 3]) + sy = tf.reduce_sum(prob * rany, axis=[2, 3]) # -> batch x modules + + out_xy = tf.reshape(tf.stack([sx, sy], -1), [-1, modules, 2]) + + return out_xy + + +def keypoint_network(rgba, + num_filters, + num_kp, + is_training, + lr_gt=None, + anneal=1): + """Constructs our main keypoint network that predicts 3D keypoints. + + Args: + rgba: [batch, h, w, 4] Input RGB images with alpha channel. + num_filters: The number of filters for all layers. + num_kp: The number of keypoints. + is_training: True if this function is called during training. + lr_gt: The groundtruth orientation flag used at the beginning of training. + Then we linearly anneal in the prediction. + anneal: A number between [0, 1] where 1 means using the ground-truth + orientation and 0 means using our estimate. + + Returns: + uv: [batch, num_kp, 2] 2D locations of keypoints. + z: [batch, num_kp] The depth of keypoints. + orient: [batch, 2, 2] Two 2D coordinates that correspond to [1, 0, 0] and + [-1, 0, 0] in object space. + sill: The Sillhouette loss. + variance: The variance loss. + prob_viz: A visualization of all predicted keypoints. + prob_vizs: A list of visualizations of each keypoint. + + """ + + images = rgba[:, :, :, :3] + + # [batch, 1] + orient = orientation_network(images, num_filters * 0.5, is_training) + + # [batch, 1] + lr_estimated = tf.maximum(0.0, tf.sign(orient[:, 0, :1] - orient[:, 1, :1])) + + if lr_gt is None: + lr = lr_estimated + else: + lr_gt = tf.maximum(0.0, tf.sign(lr_gt[:, :1])) + lr = tf.round(lr_gt * anneal + lr_estimated * (1 - anneal)) + + lrtiled = tf.tile( + tf.expand_dims(tf.expand_dims(lr, 1), 1), + [1, images.shape[1], images.shape[2], 1]) + + images = tf.concat([images, lrtiled], axis=3) + + mask = rgba[:, :, :, 3] + mask = tf.cast(tf.greater(mask, tf.zeros_like(mask)), dtype=tf.float32) + + net = dilated_cnn(images, num_filters, is_training) + + # The probability distribution map. + prob = slim.conv2d( + net, num_kp, [3, 3], rate=1, scope="conv_xy", activation_fn=None) + + # We added the fixed camera distance as a bias. + z = -30 + slim.conv2d( + net, num_kp, [3, 3], rate=1, scope="conv_z", activation_fn=None) + + prob = tf.transpose(prob, [0, 3, 1, 2]) + z = tf.transpose(z, [0, 3, 1, 2]) + + prob = tf.reshape(prob, [-1, num_kp, vh * vw]) + prob = tf.nn.softmax(prob, name="softmax") + + ranx, rany = meshgrid(vh) + prob = tf.reshape(prob, [-1, num_kp, vh, vw]) + + # These are for visualizing the distribution maps. + prob_viz = tf.expand_dims(tf.reduce_sum(prob, 1), 3) + prob_vizs = [tf.expand_dims(prob[:, i, :, :], 3) for i in range(num_kp)] + + sx = tf.reduce_sum(prob * ranx, axis=[2, 3]) + sy = tf.reduce_sum(prob * rany, axis=[2, 3]) # -> batch x num_kp + + # [batch, num_kp] + sill = tf.reduce_sum(prob * tf.expand_dims(mask, 1), axis=[2, 3]) + sill = tf.reduce_mean(-tf.log(sill + 1e-12)) + + z = tf.reduce_sum(prob * z, axis=[2, 3]) + uv = tf.reshape(tf.stack([sx, sy], -1), [-1, num_kp, 2]) + + variance = variance_loss(prob, ranx, rany, uv) + + return uv, z, orient, sill, variance, prob_viz, prob_vizs + + +def model_fn(features, labels, mode, hparams): + """Returns model_fn for tf.estimator.Estimator.""" + + del labels + + is_training = (mode == tf.estimator.ModeKeys.TRAIN) + t = Transformer(vw, vh, FLAGS.dset) + + def func1(x): + return tf.transpose(tf.reshape(features[x], [-1, 4, 4]), [0, 2, 1]) + + mv = [func1("mv%d" % i) for i in range(2)] + mvi = [func1("mvi%d" % i) for i in range(2)] + + uvz = [None] * 2 + uvz_proj = [None] * 2 # uvz coordinates projected on to the other view. + viz = [None] * 2 + vizs = [None] * 2 + + loss_sill = 0 + loss_variance = 0 + loss_con = 0 + loss_sep = 0 + loss_lr = 0 + + for i in range(2): + with tf.variable_scope("KeypointNetwork", reuse=i > 0): + # anneal: 1 = using ground-truth, 0 = using our estimate orientation. + anneal = tf.to_float(hparams.lr_anneal_end - tf.train.get_global_step()) + anneal = tf.clip_by_value( + anneal / (hparams.lr_anneal_end - hparams.lr_anneal_start), 0.0, 1.0) + + uv, z, orient, sill, variance, viz[i], vizs[i] = keypoint_network( + features["img%d" % i], + hparams.num_filters, + hparams.num_kp, + is_training, + lr_gt=features["lr%d" % i], + anneal=anneal) + + # x-positive/negative axes (dominant direction). + xp_axis = tf.tile( + tf.constant([[[1.0, 0, 0, 1], [-1.0, 0, 0, 1]]]), + [tf.shape(orient)[0], 1, 1]) + + # [batch, 2, 4] = [batch, 2, 4] x [batch, 4, 4] + xp = tf.matmul(xp_axis, mv[i]) + + # [batch, 2, 3] + xp = t.project(xp) + + loss_lr += tf.losses.mean_squared_error(orient[:, :, :2], xp[:, :, :2]) + loss_variance += variance + loss_sill += sill + + uv = tf.reshape(uv, [-1, hparams.num_kp, 2]) + z = tf.reshape(z, [-1, hparams.num_kp, 1]) + + # [batch, num_kp, 3] + uvz[i] = tf.concat([uv, z], axis=2) + + world_coords = tf.matmul(t.unproject(uvz[i]), mvi[i]) + + # [batch, num_kp, 3] + uvz_proj[i] = t.project(tf.matmul(world_coords, mv[1 - i])) + + pconf = tf.ones( + [tf.shape(uv)[0], tf.shape(uv)[1]], dtype=tf.float32) / hparams.num_kp + + for i in range(2): + loss_con += consistency_loss(uvz_proj[i][:, :, :2], uvz[1 - i][:, :, :2], + pconf) + loss_sep += separation_loss( + t.unproject(uvz[i])[:, :, :3], hparams.sep_delta) + + chordal, angular = relative_pose_loss( + t.unproject(uvz[0])[:, :, :3], + t.unproject(uvz[1])[:, :, :3], tf.matmul(mvi[0], mv[1]), pconf, + hparams.noise) + + loss = ( + hparams.loss_pose * angular + + hparams.loss_con * loss_con + + hparams.loss_sep * loss_sep + + hparams.loss_sill * loss_sill + + hparams.loss_lr * loss_lr + + hparams.loss_variance * loss_variance + ) + + def touint8(img): + return tf.cast(img * 255.0, tf.uint8) + + with tf.variable_scope("output"): + tf.summary.image("0_img0", touint8(features["img0"][:, :, :, :3])) + tf.summary.image("1_combined", viz[0]) + for i in range(hparams.num_kp): + tf.summary.image("2_f%02d" % i, vizs[0][i]) + + with tf.variable_scope("stats"): + tf.summary.scalar("anneal", anneal) + tf.summary.scalar("closs", loss_con) + tf.summary.scalar("seploss", loss_sep) + tf.summary.scalar("angular", angular) + tf.summary.scalar("chordal", chordal) + tf.summary.scalar("lrloss", loss_lr) + tf.summary.scalar("sill", loss_sill) + tf.summary.scalar("vloss", loss_variance) + + return { + "loss": loss, + "predictions": { + "img0": features["img0"], + "img1": features["img1"], + "uvz0": uvz[0], + "uvz1": uvz[1] + }, + "eval_metric_ops": { + "closs": tf.metrics.mean(loss_con), + "angular_loss": tf.metrics.mean(angular), + "chordal_loss": tf.metrics.mean(chordal), + } + } + + +def predict(input_folder, hparams): + """Predicts keypoints on all images in input_folder.""" + + cols = plt.cm.get_cmap("rainbow")( + np.linspace(0, 1.0, hparams.num_kp))[:, :4] + + img = tf.placeholder(tf.float32, shape=(1, 128, 128, 4)) + + with tf.variable_scope("KeypointNetwork"): + ret = keypoint_network( + img, hparams.num_filters, hparams.num_kp, False) + + uv = tf.reshape(ret[0], [-1, hparams.num_kp, 2]) + z = tf.reshape(ret[1], [-1, hparams.num_kp, 1]) + uvz = tf.concat([uv, z], axis=2) + + sess = tf.Session() + saver = tf.train.Saver() + ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir) + + print("loading model: ", ckpt.model_checkpoint_path) + saver.restore(sess, ckpt.model_checkpoint_path) + + files = [x for x in os.listdir(input_folder) + if x[-3:] in ["jpg", "png"]] + + output_folder = os.path.join(input_folder, "output") + if not os.path.exists(output_folder): + os.mkdir(output_folder) + + for f in files: + orig = misc.imread(os.path.join(input_folder, f)).astype(float) / 255 + if orig.shape[2] == 3: + orig = np.concatenate((orig, np.ones_like(orig[:, :, :1])), axis=2) + + uv_ret = sess.run(uvz, feed_dict={img: np.expand_dims(orig, 0)}) + + utils.draw_ndc_points(orig, uv_ret.reshape(hparams.num_kp, 3), cols) + misc.imsave(os.path.join(output_folder, f), orig) + + +def _default_hparams(): + """Returns default or overridden user-specified hyperparameters.""" + + hparams = tf.contrib.training.HParams( + num_filters=64, # Number of filters. + num_kp=10, # Numer of keypoints. + + loss_pose=0.2, # Pose Loss. + loss_con=1.0, # Multiview consistency Loss. + loss_sep=1.0, # Seperation Loss. + loss_sill=1.0, # Sillhouette Loss. + loss_lr=1.0, # Orientation Loss. + loss_variance=0.5, # Variance Loss (part of Sillhouette loss). + + sep_delta=0.05, # Seperation threshold. + noise=0.1, # Noise added during estimating rotation. + + learning_rate=1.0e-3, + lr_anneal_start=30000, # When to anneal in the orientation prediction. + lr_anneal_end=60000, # When to use the prediction completely. + ) + if FLAGS.hparams: + hparams = hparams.parse(FLAGS.hparams) + return hparams + + +def main(argv): + del argv + + hparams = _default_hparams() + + if FLAGS.predict: + predict(FLAGS.input, hparams) + else: + utils.train_and_eval( + model_dir=FLAGS.model_dir, + model_fn=model_fn, + input_fn=create_input_fn, + hparams=hparams, + steps=FLAGS.steps, + batch_size=FLAGS.batch_size, + save_checkpoints_secs=600, + eval_throttle_secs=1800, + eval_steps=5, + sync_replicas=FLAGS.sync_replicas, + ) + + +if __name__ == "__main__": + sys.excepthook = utils.colored_hook( + os.path.dirname(os.path.realpath(__file__))) + tf.app.run() diff --git a/models/research/keypointnet/tools/gen_tfrecords.py b/models/research/keypointnet/tools/gen_tfrecords.py new file mode 100644 index 0000000000000000000000000000000000000000..2f973b7fe5f16951dbfa01edd2a759b96b4f79db --- /dev/null +++ b/models/research/keypointnet/tools/gen_tfrecords.py @@ -0,0 +1,99 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""An example script to generate a tfrecord file from a folder containing the +renderings. + +Example usage: + python gen_tfrecords.py --input=FOLDER --output=output.tfrecord + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import os +from scipy import misc +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS +tf.app.flags.DEFINE_string("input", "", "Input folder containing images") +tf.app.flags.DEFINE_string("output", "", "Output tfrecord.") + + +def get_matrix(lines): + return np.array([[float(y) for y in x.strip().split(" ")] for x in lines]) + + +def read_model_view_matrices(filename): + with open(filename, "r") as f: + lines = f.readlines() + return get_matrix(lines[:4]), get_matrix(lines[4:]) + + +def bytes_feature(values): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) + + +def generate(): + with tf.python_io.TFRecordWriter(FLAGS.output) as tfrecord_writer: + with tf.Graph().as_default(): + im0 = tf.placeholder(dtype=tf.uint8) + im1 = tf.placeholder(dtype=tf.uint8) + encoded0 = tf.image.encode_png(im0) + encoded1 = tf.image.encode_png(im1) + + with tf.Session() as sess: + count = 0 + indir = FLAGS.input + "/" + while tf.gfile.Exists(indir + "%06d.txt" % count): + print("saving %06d" % count) + image0 = misc.imread(indir + "%06d.png" % (count * 2)) + image1 = misc.imread(indir + "%06d.png" % (count * 2 + 1)) + + mat0, mat1 = read_model_view_matrices(indir + "%06d.txt" % count) + + mati0 = np.linalg.inv(mat0).flatten() + mati1 = np.linalg.inv(mat1).flatten() + mat0 = mat0.flatten() + mat1 = mat1.flatten() + + st0, st1 = sess.run([encoded0, encoded1], + feed_dict={im0: image0, im1: image1}) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'img0': bytes_feature(st0), + 'img1': bytes_feature(st1), + 'mv0': tf.train.Feature( + float_list=tf.train.FloatList(value=mat0)), + 'mvi0': tf.train.Feature( + float_list=tf.train.FloatList(value=mati0)), + 'mv1': tf.train.Feature( + float_list=tf.train.FloatList(value=mat1)), + 'mvi1': tf.train.Feature( + float_list=tf.train.FloatList(value=mati1)), + })) + + tfrecord_writer.write(example.SerializeToString()) + count += 1 + + +def main(argv): + del argv + generate() + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/keypointnet/tools/render.py b/models/research/keypointnet/tools/render.py new file mode 100644 index 0000000000000000000000000000000000000000..3a8872675d83cc414d6348dbc7a56e924541b8d7 --- /dev/null +++ b/models/research/keypointnet/tools/render.py @@ -0,0 +1,310 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Script to render object views from ShapeNet obj models. + +Example usage: + blender -b --python render.py -- -m model.obj -o output/ -s 128 -n 120 -fov 5 + +""" +from __future__ import print_function + +import argparse +import itertools +import json +from math import pi +import os +import random +import sys +from mathutils import Vector +import math +import mathutils +import time +import copy + +import bpy + +sys.path.append(os.path.dirname(__file__)) + +BG_LUMINANCE = 0 + + +def look_at(obj_camera, point): + loc_camera = obj_camera.location + direction = point - loc_camera + # point the cameras '-Z' and use its 'Y' as up + rot_quat = direction.to_track_quat('-Z', 'Y') + + obj_camera.rotation_euler = rot_quat.to_euler() + + +def roll_camera(obj_camera): + roll_rotate = mathutils.Euler( + (0, 0, random.random() * math.pi - math.pi * 0.5), 'XYZ') + obj_camera.rotation_euler = (obj_camera.rotation_euler.to_matrix() * + roll_rotate.to_matrix()).to_euler() + + +def norm(x): + return math.sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2]) + + +def normalize(x): + n = norm(x) + x[0] /= n + x[1] /= n + x[2] /= n + + +def random_top_sphere(): + xyz = [random.normalvariate(0, 1) for x in range(3)] + normalize(xyz) + + if xyz[2] < 0: + xyz[2] *= -1 + return xyz + + +def perturb_sphere(loc, size): + while True: + xyz = [random.normalvariate(0, 1) for x in range(3)] + normalize(xyz) + + nloc = [loc[i] + xyz[i] * random.random() * size for i in range(3)] + normalize(nloc) + + if nloc[2] >= 0: + return nloc + + +def perturb(loc, size): + while True: + nloc = [loc[i] + random.random() * size * 2 - size for i in range(3)] + if nloc[2] >= 0: + return nloc + + bpy.ops.object.mode_set() + + +def delete_all_objects(): + bpy.ops.object.select_by_type(type="MESH") + bpy.ops.object.delete(use_global=False) + + +def set_scene(render_size, fov, alpha=False): + """Set up default scene properties.""" + delete_all_objects() + + cam = bpy.data.cameras["Camera"] + cam.angle = fov * pi / 180 + + light = bpy.data.objects["Lamp"] + light.location = (0, 0, 1) + look_at(light, Vector((0.0, 0, 0))) + bpy.data.lamps['Lamp'].type = "HEMI" + bpy.data.lamps['Lamp'].energy = 1 + bpy.data.lamps['Lamp'].use_specular = False + bpy.data.lamps['Lamp'].use_diffuse = True + + bpy.context.scene.world.horizon_color = ( + BG_LUMINANCE, BG_LUMINANCE, BG_LUMINANCE) + + bpy.context.scene.render.resolution_x = render_size + bpy.context.scene.render.resolution_y = render_size + bpy.context.scene.render.resolution_percentage = 100 + + bpy.context.scene.render.use_antialiasing = True + bpy.context.scene.render.antialiasing_samples = '5' + + +def get_modelview_matrix(): + cam = bpy.data.objects["Camera"] + bpy.context.scene.update() + + # when apply to object with CV coordinate i.e. to_blender * obj + # this gives object in blender coordinate + to_blender = mathutils.Matrix( + ((1., 0., 0., 0.), + (0., 0., -1., 0.), + (0., 1., 0., 0.), + (0., 0., 0., 1.))) + return cam.matrix_world.inverted() * to_blender + + +def print_matrix(f, mat): + for i in range(4): + for j in range(4): + f.write("%lf " % mat[i][j]) + f.write("\n") + + +def mul(loc, v): + return [loc[i] * v for i in range(3)] + + +def merge_all(): + bpy.ops.object.select_by_type(type="MESH") + bpy.context.scene.objects.active = bpy.context.selected_objects[0] + bpy.ops.object.join() + obj = bpy.context.scene.objects.active + bpy.ops.object.origin_set(type="ORIGIN_CENTER_OF_MASS") + return obj + + +def insert_frame(obj, frame_number): + obj.keyframe_insert(data_path="location", frame=frame_number) + obj.keyframe_insert(data_path="rotation_euler", frame=frame_number) + obj.keyframe_insert(data_path="scale", frame=frame_number) + + +def render(output_prefix): + bpy.context.scene.render.filepath = output_prefix + bpy.context.scene.render.image_settings.file_format = "PNG" + bpy.context.scene.render.alpha_mode = "TRANSPARENT" + bpy.context.scene.render.image_settings.color_mode = "RGBA" + bpy.ops.render.render(write_still=True, animation=True) + + +def render_obj( + obj_fn, save_dir, n, perturb_size, rotate=False, roll=False, scale=1.0): + + # Load object. + bpy.ops.import_scene.obj(filepath=obj_fn) + cur_obj = merge_all() + + scale = 2.0 / max(cur_obj.dimensions) * scale + cur_obj.scale = (scale, scale, scale) + # Using the center of mass as the origin doesn't really work, because Blender + # assumes the object is a solid shell. This seems to generate better-looking + # rotations. + + bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') + + # bpy.ops.mesh.primitive_cube_add(location=(0, 0, 1)) + # cube = bpy.data.objects["Cube"] + # cube.scale = (0.2, 0.2, 0.2) + + for polygon in cur_obj.data.polygons: + polygon.use_smooth = True + + bpy.ops.object.select_all(action="DESELECT") + + camera = bpy.data.objects["Camera"] + + # os.system("mkdir " + save_dir) + for i in range(n): + fo = open(save_dir + "/%06d.txt" % i, "w") + d = 30 + shift = 0.2 + if rotate: + t = 1.0 * i / (n-1) * 2 * math.pi + loc = [math.sin(t), math.cos(t), 1] + + normalize(loc) + camera.location = mul(loc, d) + look_at(camera, Vector((0.0, 0, 0))) + + print_matrix(fo, get_modelview_matrix()) + print_matrix(fo, get_modelview_matrix()) + + insert_frame(camera, 2 * i) + insert_frame(camera, 2 * i + 1) + + else: + loc = random_top_sphere() + + camera.location = mul(loc, d) + look_at(camera, Vector((0.0, 0, 0))) + + if roll: + roll_camera(camera) + camera.location = perturb(mul(loc, d), shift) + + print_matrix(fo, get_modelview_matrix()) + insert_frame(camera, 2 * i) + + if perturb_size > 0: + loc = perturb_sphere(loc, perturb_size) + else: + loc = random_top_sphere() + + camera.location = mul(loc, d) + look_at(camera, Vector((0.0, 0, 0))) + if roll: + roll_camera(camera) + camera.location = perturb(mul(loc, d), shift) + + print_matrix(fo, get_modelview_matrix()) + insert_frame(camera, 2 * i + 1) + + fo.close() + + # Create a bunch of views of the object + bpy.context.scene.frame_start = 0 + bpy.context.scene.frame_end = 2 * n - 1 + + stem = os.path.join(save_dir, '######') + render(stem) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--model', dest='model', + required=True, + help='Path to model obj file.') + parser.add_argument('-o', '--output_dir', dest='output_dir', + required=True, + help='Where to output files.') + parser.add_argument('-s', '--output_size', dest='output_size', + required=True, + help='Width and height of output in pixels, e.g. 32x32.') + parser.add_argument('-n', '--num_frames', dest='n', type=int, + required=True, + help='Number of frames to generate per clip.') + + parser.add_argument('-scale', '--scale', dest='scale', type=float, + help='object scaling', default=1) + + parser.add_argument('-perturb', '--perturb', dest='perturb', type=float, + help='sphere perturbation', default=0) + + parser.add_argument('-rotate', '--rotate', dest='rotate', action='store_true', + help='render rotating test set') + + parser.add_argument('-roll', '--roll', dest='roll', action='store_true', + help='add roll') + + parser.add_argument( + '-fov', '--fov', dest='fov', type=float, required=True, + help='field of view') + + if '--' not in sys.argv: + parser.print_help() + exit(1) + + argv = sys.argv[sys.argv.index('--') + 1:] + args, _ = parser.parse_known_args(argv) + + random.seed(args.model + str(time.time()) + str(os.getpid())) + # random.seed(0) + + set_scene(int(args.output_size), args.fov) + render_obj( + args.model, args.output_dir, args.n, args.perturb, args.rotate, + args.roll, args.scale) + exit() + + +if __name__ == '__main__': + main() diff --git a/models/research/keypointnet/utils.py b/models/research/keypointnet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..148b7a3ed843638cff597be0c462b7e335df9857 --- /dev/null +++ b/models/research/keypointnet/utils.py @@ -0,0 +1,307 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Utility functions for KeypointNet. + +These are helper / tensorflow related functions. The actual implementation and +algorithm is in main.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import numpy as np +import os +import re +import tensorflow as tf +import tensorflow.contrib.slim as slim +import time +import traceback + + +class TrainingHook(tf.train.SessionRunHook): + """A utility for displaying training information such as the loss, percent + completed, estimated finish date and time.""" + + def __init__(self, steps): + self.steps = steps + + self.last_time = time.time() + self.last_est = self.last_time + + self.eta_interval = int(math.ceil(0.1 * self.steps)) + self.current_interval = 0 + + def before_run(self, run_context): + graph = tf.get_default_graph() + return tf.train.SessionRunArgs( + {"loss": graph.get_collection("total_loss")[0]}) + + def after_run(self, run_context, run_values): + step = run_context.session.run(tf.train.get_global_step()) + now = time.time() + + if self.current_interval < self.eta_interval: + self.duration = now - self.last_est + self.current_interval += 1 + if step % self.eta_interval == 0: + self.duration = now - self.last_est + self.last_est = now + + eta_time = float(self.steps - step) / self.current_interval * \ + self.duration + m, s = divmod(eta_time, 60) + h, m = divmod(m, 60) + eta = "%d:%02d:%02d" % (h, m, s) + + print("%.2f%% (%d/%d): %.3e t %.3f @ %s (%s)" % ( + step * 100.0 / self.steps, + step, + self.steps, + run_values.results["loss"], + now - self.last_time, + time.strftime("%a %d %H:%M:%S", time.localtime(time.time() + eta_time)), + eta)) + + self.last_time = now + + +def standard_model_fn( + func, steps, run_config=None, sync_replicas=0, optimizer_fn=None): + """Creates model_fn for tf.Estimator. + + Args: + func: A model_fn with prototype model_fn(features, labels, mode, hparams). + steps: Training steps. + run_config: tf.estimatorRunConfig (usually passed in from TF_CONFIG). + sync_replicas: The number of replicas used to compute gradient for + synchronous training. + optimizer_fn: The type of the optimizer. Default to Adam. + + Returns: + model_fn for tf.estimator.Estimator. + """ + + def fn(features, labels, mode, params): + """Returns model_fn for tf.estimator.Estimator.""" + + is_training = (mode == tf.estimator.ModeKeys.TRAIN) + ret = func(features, labels, mode, params) + + tf.add_to_collection("total_loss", ret["loss"]) + train_op = None + + training_hooks = [] + if is_training: + training_hooks.append(TrainingHook(steps)) + + if optimizer_fn is None: + optimizer = tf.train.AdamOptimizer(params.learning_rate) + else: + optimizer = optimizer_fn + + if run_config is not None and run_config.num_worker_replicas > 1: + sr = sync_replicas + if sr <= 0: + sr = run_config.num_worker_replicas + + optimizer = tf.train.SyncReplicasOptimizer( + optimizer, + replicas_to_aggregate=sr, + total_num_replicas=run_config.num_worker_replicas) + + training_hooks.append( + optimizer.make_session_run_hook( + run_config.is_chief, num_tokens=run_config.num_worker_replicas)) + + optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer, 5) + train_op = slim.learning.create_train_op(ret["loss"], optimizer) + + if "eval_metric_ops" not in ret: + ret["eval_metric_ops"] = {} + + return tf.estimator.EstimatorSpec( + mode=mode, + predictions=ret["predictions"], + loss=ret["loss"], + train_op=train_op, + eval_metric_ops=ret["eval_metric_ops"], + training_hooks=training_hooks) + return fn + + +def train_and_eval( + model_dir, + steps, + batch_size, + model_fn, + input_fn, + hparams, + keep_checkpoint_every_n_hours=0.5, + save_checkpoints_secs=180, + save_summary_steps=50, + eval_steps=20, + eval_start_delay_secs=10, + eval_throttle_secs=300, + sync_replicas=0): + """Trains and evaluates our model. Supports local and distributed training. + + Args: + model_dir: The output directory for trained parameters, checkpoints, etc. + steps: Training steps. + batch_size: Batch size. + model_fn: A func with prototype model_fn(features, labels, mode, hparams). + input_fn: A input function for the tf.estimator.Estimator. + hparams: tf.HParams containing a set of hyperparameters. + keep_checkpoint_every_n_hours: Number of hours between each checkpoint + to be saved. + save_checkpoints_secs: Save checkpoints every this many seconds. + save_summary_steps: Save summaries every this many steps. + eval_steps: Number of steps to evaluate model. + eval_start_delay_secs: Start evaluating after waiting for this many seconds. + eval_throttle_secs: Do not re-evaluate unless the last evaluation was + started at least this many seconds ago + sync_replicas: Number of synchronous replicas for distributed training. + + Returns: + None + """ + + run_config = tf.estimator.RunConfig( + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, + save_checkpoints_secs=save_checkpoints_secs, + save_summary_steps=save_summary_steps) + + estimator = tf.estimator.Estimator( + model_dir=model_dir, + model_fn=standard_model_fn( + model_fn, + steps, + run_config, + sync_replicas=sync_replicas), + params=hparams, config=run_config) + + train_spec = tf.estimator.TrainSpec( + input_fn=input_fn(split="train", batch_size=batch_size), + max_steps=steps) + + eval_spec = tf.estimator.EvalSpec( + input_fn=input_fn(split="validation", batch_size=batch_size), + steps=eval_steps, + start_delay_secs=eval_start_delay_secs, + throttle_secs=eval_throttle_secs) + + tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) + + +def draw_circle(rgb, u, v, col, r): + """Draws a simple anti-aliasing circle in-place. + + Args: + rgb: Input image to be modified. + u: Horizontal coordinate. + v: Vertical coordinate. + col: Color. + r: Radius. + """ + + ir = int(math.ceil(r)) + for i in range(-ir-1, ir+2): + for j in range(-ir-1, ir+2): + nu = int(round(u + i)) + nv = int(round(v + j)) + if nu < 0 or nu >= rgb.shape[1] or nv < 0 or nv >= rgb.shape[0]: + continue + + du = abs(nu - u) + dv = abs(nv - v) + + # need sqrt to keep scale + t = math.sqrt(du * du + dv * dv) - math.sqrt(r * r) + if t < 0: + rgb[nv, nu, :] = col + else: + t = 1 - t + if t > 0: + # t = t ** 0.3 + rgb[nv, nu, :] = col * t + rgb[nv, nu, :] * (1-t) + + +def draw_ndc_points(rgb, xy, cols): + """Draws keypoints onto an input image. + + Args: + rgb: Input image to be modified. + xy: [n x 2] matrix of 2D locations. + cols: A list of colors for the keypoints. + """ + + vh, vw = rgb.shape[0], rgb.shape[1] + + for j in range(len(cols)): + x, y = xy[j, :2] + x = (min(max(x, -1), 1) * vw / 2 + vw / 2) - 0.5 + y = vh - 0.5 - (min(max(y, -1), 1) * vh / 2 + vh / 2) + + x = int(round(x)) + y = int(round(y)) + if x < 0 or y < 0 or x >= vw or y >= vh: + continue + + rad = 1.5 + rad *= rgb.shape[0] / 128.0 + draw_circle(rgb, x, y, np.array([0.0, 0.0, 0.0, 1.0]), rad * 1.5) + draw_circle(rgb, x, y, cols[j], rad) + + +def colored_hook(home_dir): + """Colorizes python's error message. + + Args: + home_dir: directory where code resides (to highlight your own files). + Returns: + The traceback hook. + """ + + def hook(type_, value, tb): + def colorize(text, color, own=0): + """Returns colorized text.""" + endcolor = "\x1b[0m" + codes = { + "green": "\x1b[0;32m", + "green_own": "\x1b[1;32;40m", + "red": "\x1b[0;31m", + "red_own": "\x1b[1;31m", + "yellow": "\x1b[0;33m", + "yellow_own": "\x1b[1;33m", + "black": "\x1b[0;90m", + "black_own": "\x1b[1;90m", + "cyan": "\033[1;36m", + } + return codes[color + ("_own" if own else "")] + text + endcolor + + for filename, line_num, func, text in traceback.extract_tb(tb): + basename = os.path.basename(filename) + own = (home_dir in filename) or ("/" not in filename) + + print(colorize("\"" + basename + '"', "green", own) + " in " + func) + print("%s: %s" % ( + colorize("%5d" % line_num, "red", own), + colorize(text, "yellow", own))) + print(" %s" % colorize(filename, "black", own)) + + print(colorize("%s: %s" % (type_.__name__, value), "cyan")) + return hook diff --git a/models/research/learned_optimizer/.gitignore b/models/research/learned_optimizer/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/learned_optimizer/BUILD b/models/research/learned_optimizer/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..629c9a06b51d10eb7cab69ed0d9dd0bfa52fd2f0 --- /dev/null +++ b/models/research/learned_optimizer/BUILD @@ -0,0 +1,33 @@ +# Learning to Optimize Learning (LOL) + +package(default_visibility = ["//visibility:public"]) + +# Libraries +# ========= + +py_library( + name = "metaopt", + srcs = ["metaopt.py"], + deps = [ + "//learned_optimizer/problems:datasets", + "//learned_optimizer/problems:problem_generator", + ], +) + +# Binaries +# ======== +py_binary( + name = "metarun", + srcs = ["metarun.py"], + deps = [ + ":metaopt", + "//learned_optimizer/optimizer:coordinatewise_rnn", + "//learned_optimizer/optimizer:global_learning_rate", + "//learned_optimizer/optimizer:hierarchical_rnn", + "//learned_optimizer/optimizer:learning_rate_schedule", + "//learned_optimizer/optimizer:trainable_adam", + "//learned_optimizer/problems:problem_sets", + "//learned_optimizer/problems:problem_spec", + ], +) + diff --git a/models/research/learned_optimizer/README.md b/models/research/learned_optimizer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6a32514f053f97bc64dc87c4ec972c8223a83fe2 --- /dev/null +++ b/models/research/learned_optimizer/README.md @@ -0,0 +1,47 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Learned Optimizer + +Code for [Learned Optimizers that Scale and Generalize](https://arxiv.org/abs/1703.04813). + +## Requirements + +* Bazel ([install](https://bazel.build/versions/master/docs/install.html)) +* TensorFlow >= v1.3 +* Python 2.7.x + +## Training a Learned Optimizer + +## Code Overview +In the top-level directory, ```metaopt.py``` contains the code to train and test a learned optimizer. ```metarun.py``` packages the actual training procedure into a +single file, defining and exposing many flags to tune the procedure, from selecting the optimizer type and problem set to more fine-grained hyperparameter settings. +There is no testing binary; testing can be done ad-hoc via ```metaopt.test_optimizer``` by passing an optimizer object and a directory with a checkpoint. + +The ```optimizer``` directory contains a base ```trainable_optimizer.py``` class and a number of extensions, including the ```hierarchical_rnn``` optimizer used in +the paper, a ```coordinatewise_rnn``` optimizer that more closely matches previous work, and a number of simpler optimizers to demonstrate the basic mechanics of +a learnable optimizer. + +The ```problems``` directory contains the code to build the problems that were used in the meta-training set. + +### Binaries +```metarun.py```: meta-training of a learned optimizer + +### Command-Line Flags +The flags most relevant to meta-training are defined in ```metarun.py```. The default values will meta-train a HierarchicalRNN optimizer with the hyperparameter +settings used in the paper. + +### Using a Learned Optimizer as a Black Box +The ```trainable_optimizer``` inherits from ```tf.train.Optimizer```, so a properly instantiated version can be used to train any model in any APIs that accept +this class. There are just 2 caveats: + +1. If using the Hierarchical RNN optimizer, the apply_gradients return type must be changed (see comments inline for what exactly must be removed) + +2. Care must be taken to restore the variables from the optimizer without overriding them. Optimizer variables should be loaded manually using a pretrained checkpoint +and a ```tf.train.Saver``` with only the optimizer variables. Then, when constructing the session, ensure that any automatic variable initialization does not +re-initialize the loaded optimizer variables. + +## Contact for Issues + +* Olga Wichrowska (@olganw), Niru Maheswaranathan (@nirum) diff --git a/models/research/learned_optimizer/metaopt.py b/models/research/learned_optimizer/metaopt.py new file mode 100644 index 0000000000000000000000000000000000000000..62c06272d3096ed63296744792c8742826380536 --- /dev/null +++ b/models/research/learned_optimizer/metaopt.py @@ -0,0 +1,639 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Helper utilities for training and testing optimizers.""" + +from collections import defaultdict +import random +import sys +import time + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from learned_optimizer.optimizer import trainable_optimizer +from learned_optimizer.optimizer import utils +from learned_optimizer.problems import datasets +from learned_optimizer.problems import problem_generator + +tf.app.flags.DEFINE_integer("ps_tasks", 0, + """Number of tasks in the ps job. + If 0 no ps job is used.""") +tf.app.flags.DEFINE_float("nan_l2_reg", 1e-2, + """Strength of l2-reg when NaNs are encountered.""") +tf.app.flags.DEFINE_float("l2_reg", 0., + """Lambda value for parameter regularization.""") +# Default is 0.9 +tf.app.flags.DEFINE_float("rms_decay", 0.9, + """Decay value for the RMSProp metaoptimizer.""") +# Default is 1e-10 +tf.app.flags.DEFINE_float("rms_epsilon", 1e-20, + """Epsilon value for the RMSProp metaoptimizer.""") +tf.app.flags.DEFINE_boolean("set_profiling", False, + """Enable memory usage and computation time """ + """tracing for tensorflow nodes (available in """ + """TensorBoard).""") +tf.app.flags.DEFINE_boolean("reset_rnn_params", True, + """Reset the parameters of the optimizer + from one meta-iteration to the next.""") + +FLAGS = tf.app.flags.FLAGS +OPTIMIZER_SCOPE = "LOL" +OPT_SUM_COLLECTION = "LOL_summaries" + + +def sigmoid_weights(n, slope=0.1, offset=5): + """Generates a sigmoid, scaled to sum to 1. + + This function is used to generate weights that serve to mask out + the early objective values of an optimization problem such that + initial variation in the objective is phased out (hence the sigmoid + starts at zero and ramps up to the maximum value, and the total + weight is normalized to sum to one) + + Args: + n: the number of samples + slope: slope of the sigmoid (Default: 0.1) + offset: threshold of the sigmoid (Default: 5) + + Returns: + No + """ + x = np.arange(n) + y = 1. / (1. + np.exp(-slope * (x-offset))) + y_normalized = y / np.sum(y) + return y_normalized + + +def sample_numiter(scale, min_steps=50): + """Samples a number of iterations from an exponential distribution. + + Args: + scale: parameter for the exponential distribution + min_steps: minimum number of steps to run (additive) + + Returns: + num_steps: An integer equal to a rounded sample from the exponential + distribution + the value of min_steps. + """ + return int(np.round(np.random.exponential(scale=scale)) + min_steps) + + +def train_optimizer(logdir, + optimizer_spec, + problems_and_data, + num_problems, + num_meta_iterations, + num_unroll_func, + num_partial_unroll_itrs_func, + learning_rate=1e-4, + gradient_clip=5., + is_chief=False, + select_random_problems=True, + callbacks=None, + obj_train_max_multiplier=-1, + out=sys.stdout): + """Trains the meta-parameters of this optimizer. + + Args: + logdir: a directory filepath for storing model checkpoints (must exist) + optimizer_spec: specification for an Optimizer (see utils.Spec) + problems_and_data: a list of tuples containing three elements: a problem + specification (see utils.Spec), a dataset (see datasets.Dataset), and + a batch_size (int) for generating a problem and corresponding dataset. If + the problem doesn't have data, set dataset to None. + num_problems: the number of problems to sample during meta-training + num_meta_iterations: the number of iterations (steps) to run the + meta-optimizer for on each subproblem. + num_unroll_func: called once per meta iteration and returns the number of + unrolls to do for that meta iteration. + num_partial_unroll_itrs_func: called once per unroll and returns the number + of iterations to do for that unroll. + learning_rate: learning rate of the RMSProp meta-optimizer (Default: 1e-4) + gradient_clip: value to clip gradients at (Default: 5.0) + is_chief: whether this is the chief task (Default: False) + select_random_problems: whether to select training problems randomly + (Default: True) + callbacks: a list of callback functions that is run after every random + problem draw + obj_train_max_multiplier: the maximum increase in the objective value over + a single training run. Ignored if < 0. + out: where to write output to, e.g. a file handle (Default: sys.stdout) + + Raises: + ValueError: If one of the subproblems has a negative objective value. + """ + + if select_random_problems: + # iterate over random draws of problem / dataset pairs + sampler = (random.choice(problems_and_data) for _ in range(num_problems)) + else: + # iterate over a random shuffle of problems, looping if necessary + num_repeats = (num_problems / len(problems_and_data)) + 1 + random.shuffle(problems_and_data) + sampler = (problems_and_data * num_repeats)[:num_problems] + + for problem_itr, (problem_spec, dataset, batch_size) in enumerate(sampler): + + # timer used to time how long it takes to initialize a problem + problem_start_time = time.time() + + # if dataset is None, use the EMPTY_DATASET + if dataset is None: + dataset = datasets.EMPTY_DATASET + batch_size = dataset.size + + # build a new graph for this problem + graph = tf.Graph() + real_device_setter = tf.train.replica_device_setter(FLAGS.ps_tasks) + + def custom_device_setter(op): + # Places the local variables onto the workers. + if trainable_optimizer.is_local_state_variable(op): + return "/job:worker" + else: + return real_device_setter(op) + + if real_device_setter: + device_setter = custom_device_setter + else: + device_setter = None + + with graph.as_default(), graph.device(device_setter): + + # initialize a problem + problem = problem_spec.build() + + # build the optimizer + opt = optimizer_spec.build() + + # get the meta-objective for training the optimizer + train_output = opt.train(problem, dataset) + + state_keys = opt.state_keys + for key, val in zip(state_keys, train_output.output_state[0]): + finite_val = utils.make_finite(val, replacement=tf.zeros_like(val)) + tf.summary.histogram("State/{}".format(key), finite_val, + collections=[OPT_SUM_COLLECTION]) + + tf.summary.scalar("MetaObjective", train_output.metaobj, + collections=[OPT_SUM_COLLECTION]) + + # Per-problem meta-objective + tf.summary.scalar(problem_spec.callable.__name__ + "_MetaObjective", + train_output.metaobj, + collections=[OPT_SUM_COLLECTION]) + + # create the meta-train_op + global_step = tf.Variable(0, name="global_step", trainable=False) + meta_parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + scope=OPTIMIZER_SCOPE) + # parameter regularization + reg_l2 = FLAGS.l2_reg * sum([tf.reduce_sum(param ** 2) + for param in meta_parameters]) + + # compute the meta-gradients + meta_opt = tf.train.RMSPropOptimizer(learning_rate, decay=FLAGS.rms_decay, + use_locking=True, + epsilon=FLAGS.rms_epsilon) + grads_and_vars = meta_opt.compute_gradients(train_output.metaobj + reg_l2, + meta_parameters) + + # clip the gradients + clipped_grads_and_vars = [] + for grad, var in grads_and_vars: + clipped_grad = tf.clip_by_value( + utils.make_finite(grad, replacement=tf.zeros_like(var)), + -gradient_clip, gradient_clip) + clipped_grads_and_vars.append((clipped_grad, var)) + + # histogram summary of grads and vars + for grad, var in grads_and_vars: + tf.summary.histogram( + var.name + "_rawgrad", + utils.make_finite( + grad, replacement=tf.zeros_like(grad)), + collections=[OPT_SUM_COLLECTION]) + for grad, var in clipped_grads_and_vars: + tf.summary.histogram(var.name + "_var", var, + collections=[OPT_SUM_COLLECTION]) + tf.summary.histogram(var.name + "_grad", grad, + collections=[OPT_SUM_COLLECTION]) + + # builds the train and summary operations + train_op = meta_opt.apply_gradients(clipped_grads_and_vars, + global_step=global_step) + + # only grab summaries defined for LOL, not inside the problem + summary_op = tf.summary.merge_all(key=OPT_SUM_COLLECTION) + + # make sure the state gets propagated after the gradients and summaries + # were computed. + with tf.control_dependencies([train_op, summary_op]): + propagate_loop_state_ops = [] + for dest, src in zip( + train_output.init_loop_vars, train_output.output_loop_vars): + propagate_loop_state_ops.append(dest.assign(src)) + propagate_loop_state_op = tf.group(*propagate_loop_state_ops) + + # create the supervisor + sv = tf.train.Supervisor( + graph=graph, + is_chief=is_chief, + logdir=logdir, + summary_op=None, + save_model_secs=0, # we save checkpoints manually + global_step=global_step, + ) + + with sv.managed_session() as sess: + + init_time = time.time() - problem_start_time + out.write("--------- Problem #{} ---------\n".format(problem_itr)) + out.write("{callable.__name__}{args}{kwargs}\n".format( + **problem_spec.__dict__)) + out.write("Took {} seconds to initialize.\n".format(init_time)) + out.flush() + + # For profiling summaries + if FLAGS.set_profiling: + summary_writer = tf.summary.FileWriter(logdir, graph=sess.graph) + + # used to store information during training + metadata = defaultdict(list) + + for k in range(num_meta_iterations): + + if sv.should_stop(): + break + + problem.init_fn(sess) + + # set run options (for profiling) + full_trace_opt = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) + run_options = full_trace_opt if FLAGS.set_profiling else None + run_metadata = tf.RunMetadata() if FLAGS.set_profiling else None + + num_unrolls = num_unroll_func() + partial_unroll_iters = [ + num_partial_unroll_itrs_func() for _ in xrange(num_unrolls) + ] + total_num_iter = sum(partial_unroll_iters) + + objective_weights = [np.ones(num) / float(num) + for num in partial_unroll_iters] + db = dataset.batch_indices(total_num_iter, batch_size) + dataset_batches = [] + last_index = 0 + for num in partial_unroll_iters: + dataset_batches.append(db[last_index:last_index + num]) + last_index += num + + train_start_time = time.time() + + unroll_itr = 0 + additional_log_info = "" + + for unroll_itr in range(num_unrolls): + first_unroll = unroll_itr == 0 + if FLAGS.reset_rnn_params: + reset_state = first_unroll and k == 0 + else: + reset_state = first_unroll + + feed = { + train_output.obj_weights: objective_weights[unroll_itr], + train_output.batches: dataset_batches[unroll_itr], + train_output.first_unroll: first_unroll, + train_output.reset_state: reset_state, + } + + # run the train and summary ops + # when a "save_diagnostics" flag is turned on + fetches_list = [ + train_output.metaobj, + train_output.problem_objectives, + train_output.initial_obj, + summary_op, + clipped_grads_and_vars, + train_op + ] + if unroll_itr + 1 < num_unrolls: + fetches_list += [propagate_loop_state_op] + + fetched = sess.run(fetches_list, feed_dict=feed, + options=run_options, run_metadata=run_metadata) + meta_obj = fetched[0] + sub_obj = fetched[1] + init_obj = fetched[2] + summ = fetched[3] + meta_grads_and_params = fetched[4] + + # assert that the subproblem objectives are non-negative + # (this is so that we can rescale the objective by the initial value + # and not worry about rescaling by a negative value) + if np.any(sub_obj < 0): + raise ValueError( + "Training problem objectives must be nonnegative.") + # If the objective has increased more than we want, exit this + # training run and start over on another meta iteration. + if obj_train_max_multiplier > 0 and ( + sub_obj[-1] > (init_obj + + abs(init_obj) * (obj_train_max_multiplier - 1))): + msg = " Broke early at {} out of {} unrolls. ".format( + unroll_itr + 1, num_unrolls) + additional_log_info += msg + break + + # only the chief task is allowed to write the summary + if is_chief: + sv.summary_computed(sess, summ) + + metadata["subproblem_objs"].append(sub_obj) + # store training metadata to pass to the callback + metadata["meta_objs"].append(meta_obj) + metadata["meta_grads_and_params"].append(meta_grads_and_params) + + optimization_time = time.time() - train_start_time + + if FLAGS.set_profiling: + summary_name = "%02d_iter%04d_%02d" % (FLAGS.task, problem_itr, k) + summary_writer.add_run_metadata(run_metadata, summary_name) + + metadata["global_step"].append(sess.run(global_step)) + metadata["runtimes"].append(optimization_time) + + # write a diagnostic message to the output + args = (k, meta_obj, optimization_time, + sum(partial_unroll_iters[:unroll_itr+1])) + out.write(" [{:02}] {}, {} seconds, {} iters ".format(*args)) + out.write("(unrolled {} steps)".format( + ", ".join([str(s) for s in partial_unroll_iters[:unroll_itr+1]]))) + out.write("{}\n".format(additional_log_info)) + out.flush() + + if FLAGS.set_profiling: + summary_writer.close() + + # force a checkpoint save before we load a new problem + # only the chief task has the save_path and can write the checkpoint + if is_chief: + sv.saver.save(sess, sv.save_path, global_step=global_step) + + # run the callbacks on the chief + if is_chief and callbacks is not None: + for callback in callbacks: + if hasattr(callback, "__call__"): + problem_name = problem_spec.callable.__name__ + callback(problem_name, problem_itr, logdir, metadata) + + +def test_optimizer(optimizer, + problem, + num_iter, + dataset=datasets.EMPTY_DATASET, + batch_size=None, + seed=None, + graph=None, + logdir=None, + record_every=None): + """Tests an optimization algorithm on a given problem. + + Args: + optimizer: Either a tf.train.Optimizer instance, or an Optimizer instance + inheriting from trainable_optimizer.py + problem: A Problem instance that defines an optimization problem to solve + num_iter: The number of iterations of the optimizer to run + dataset: The dataset to train the problem against + batch_size: The number of samples per batch. If None (default), the + batch size is set to the full batch (dataset.size) + seed: A random seed used for drawing the initial parameters, or a list of + numpy arrays used to explicitly initialize the parameters. + graph: The tensorflow graph to execute (if None, uses the default graph) + logdir: A directory containing model checkpoints. If given, then the + parameters of the optimizer are loaded from the latest checkpoint + in this folder. + record_every: if an integer, stores the parameters, objective, and gradient + every recored_every iterations. If None, nothing is stored + + Returns: + objective_values: A list of the objective values during optimization + parameters: The parameters obtained after training + records: A dictionary containing lists of the parameters and gradients + during optimization saved every record_every iterations (empty if + record_every is set to None) + """ + + if dataset is None: + dataset = datasets.EMPTY_DATASET + batch_size = dataset.size + else: + # default batch size is the entire dataset + batch_size = dataset.size if batch_size is None else batch_size + + graph = tf.get_default_graph() if graph is None else graph + with graph.as_default(): + + # define the parameters of the optimization problem + if isinstance(seed, (list, tuple)): + # seed is a list of arrays + params = problem_generator.init_fixed_variables(seed) + else: + # seed is an int or None + params = problem.init_variables(seed) + + data_placeholder = tf.placeholder(tf.float32) + labels_placeholder = tf.placeholder(tf.int32) + + # get the problem objective and gradient(s) + obj = problem.objective(params, data_placeholder, labels_placeholder) + gradients = problem.gradients(obj, params) + + vars_to_preinitialize = params + + with tf.Session(graph=graph) as sess: + # initialize the parameter scope variables; necessary for apply_gradients + sess.run(tf.variables_initializer(vars_to_preinitialize)) + coord = tf.train.Coordinator() + threads = tf.train.start_queue_runners(sess=sess, coord=coord) + + # create the train operation and training variables + try: + train_op, real_params = optimizer.apply_gradients(zip(gradients, params)) + obj = problem.objective(real_params, data_placeholder, labels_placeholder) + except TypeError: + # If all goes well, this exception should only be thrown when we are using + # a non-hrnn optimizer. + train_op = optimizer.apply_gradients(zip(gradients, params)) + + vars_to_restore = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + scope=OPTIMIZER_SCOPE) + vars_to_initialize = list( + set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) - + set(vars_to_restore) - set(vars_to_preinitialize)) + # load or initialize optimizer variables + if logdir is not None: + restorer = tf.Saver(var_list=vars_to_restore) + ckpt = tf.train.latest_checkpoint(logdir) + restorer.restore(sess, ckpt) + else: + sess.run(tf.variables_initializer(vars_to_restore)) + # initialize all the other variables + sess.run(tf.variables_initializer(vars_to_initialize)) + + problem.init_fn(sess) + + # generate the minibatch indices + batch_inds = dataset.batch_indices(num_iter, batch_size) + + # run the train operation for n iterations and save the objectives + records = defaultdict(list) + objective_values = [] + for itr, batch in enumerate(batch_inds): + + # data to feed in + feed = {data_placeholder: dataset.data[batch], + labels_placeholder: dataset.labels[batch]} + full_feed = {data_placeholder: dataset.data, + labels_placeholder: dataset.labels} + + # record stuff + if record_every is not None and (itr % record_every) == 0: + def grad_value(g): + if isinstance(g, tf.IndexedSlices): + return g.values + else: + return g + + records_fetch = {} + for p in params: + for key in optimizer.get_slot_names(): + v = optimizer.get_slot(p, key) + records_fetch[p.name + "_" + key] = v + gav_fetch = [(grad_value(g), v) for g, v in zip(gradients, params)] + + _, gav_eval, records_eval = sess.run( + (obj, gav_fetch, records_fetch), feed_dict=feed) + full_obj_eval = sess.run([obj], feed_dict=full_feed) + + records["objective"].append(full_obj_eval) + records["grad_norm"].append([np.linalg.norm(g.ravel()) + for g, _ in gav_eval]) + records["param_norm"].append([np.linalg.norm(v.ravel()) + for _, v in gav_eval]) + records["grad"].append([g for g, _ in gav_eval]) + records["param"].append([v for _, v in gav_eval]) + records["iter"].append(itr) + + for k, v in records_eval.iteritems(): + records[k].append(v) + + # run the optimization train operation + objective_values.append(sess.run([train_op, obj], feed_dict=feed)[1]) + + # final parameters + parameters = [sess.run(p) for p in params] + coord.request_stop() + coord.join(threads) + + return objective_values, parameters, records + + +def run_wall_clock_test(optimizer, + problem, + num_steps, + dataset=datasets.EMPTY_DATASET, + seed=None, + logdir=None, + batch_size=None): + """Runs optimization with the given parameters and return average iter time. + + Args: + optimizer: The tf.train.Optimizer instance + problem: The problem to optimize (a problem_generator.Problem) + num_steps: The number of steps to run optimization for + dataset: The dataset to train the problem against + seed: The seed used for drawing the initial parameters, or a list of + numpy arrays used to explicitly initialize the parameters + logdir: A directory containing model checkpoints. If given, then the + parameters of the optimizer are loaded from the latest checkpoint + in this folder. + batch_size: The number of samples per batch. + + Returns: + The average time in seconds for a single optimization iteration. + """ + if dataset is None: + dataset = datasets.EMPTY_DATASET + batch_size = dataset.size + else: + # default batch size is the entire dataset + batch_size = dataset.size if batch_size is None else batch_size + + # define the parameters of the optimization problem + if isinstance(seed, (list, tuple)): + # seed is a list of arrays + params = problem_generator.init_fixed_variables(seed) + else: + # seed is an int or None + params = problem.init_variables(seed) + + data_placeholder = tf.placeholder(tf.float32) + labels_placeholder = tf.placeholder(tf.int32) + + obj = problem.objective(params, data_placeholder, labels_placeholder) + gradients = problem.gradients(obj, params) + vars_to_preinitialize = params + + with tf.Session(graph=tf.get_default_graph()) as sess: + # initialize the parameter scope variables; necessary for apply_gradients + sess.run(tf.variables_initializer(vars_to_preinitialize)) + train_op = optimizer.apply_gradients(zip(gradients, params)) + if isinstance(train_op, tuple) or isinstance(train_op, list): + # LOL apply_gradients returns a tuple. Regular optimizers do not. + train_op = train_op[0] + vars_to_restore = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + scope=OPTIMIZER_SCOPE) + vars_to_initialize = list( + set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) - + set(vars_to_restore) - set(vars_to_preinitialize)) + # load or initialize optimizer variables + if logdir is not None: + restorer = tf.Saver(var_list=vars_to_restore) + ckpt = tf.train.latest_checkpoint(logdir) + restorer.restore(sess, ckpt) + else: + sess.run(tf.variables_initializer(vars_to_restore)) + # initialize all the other variables + sess.run(tf.variables_initializer(vars_to_initialize)) + + problem.init_fn(sess) + + # generate the minibatch indices + batch_inds = dataset.batch_indices(num_steps, batch_size) + + avg_iter_time = [] + for batch in batch_inds: + # data to feed in + feed = {data_placeholder: dataset.data[batch], + labels_placeholder: dataset.labels[batch]} + + # run the optimization train operation + start = time.time() + sess.run([train_op], feed_dict=feed) + avg_iter_time.append(time.time() - start) + + return np.median(np.array(avg_iter_time)) diff --git a/models/research/learned_optimizer/metarun.py b/models/research/learned_optimizer/metarun.py new file mode 100644 index 0000000000000000000000000000000000000000..45a29623c7fd1381cef590c4e8440d8749585b72 --- /dev/null +++ b/models/research/learned_optimizer/metarun.py @@ -0,0 +1,394 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Scripts for meta-optimization.""" + +from __future__ import print_function + +import os + +import tensorflow as tf + +import metaopt +from learned_optimizer.optimizer import coordinatewise_rnn +from learned_optimizer.optimizer import global_learning_rate +from learned_optimizer.optimizer import hierarchical_rnn +from learned_optimizer.optimizer import learning_rate_schedule +from learned_optimizer.optimizer import trainable_adam +from learned_optimizer.problems import problem_sets as ps +from learned_optimizer.problems import problem_spec + +tf.app.flags.DEFINE_string("train_dir", "/tmp/lol/", + """Directory to store parameters and results.""") + +tf.app.flags.DEFINE_integer("task", 0, + """Task id of the replica running the training.""") +tf.app.flags.DEFINE_integer("worker_tasks", 1, + """Number of tasks in the worker job.""") + +tf.app.flags.DEFINE_integer("num_problems", 1000, + """Number of sub-problems to run.""") +tf.app.flags.DEFINE_integer("num_meta_iterations", 5, + """Number of meta-iterations to optimize.""") +tf.app.flags.DEFINE_integer("num_unroll_scale", 40, + """The scale parameter of the exponential + distribution from which the number of partial + unrolls is drawn""") +tf.app.flags.DEFINE_integer("min_num_unrolls", 1, + """The minimum number of unrolls per problem.""") +tf.app.flags.DEFINE_integer("num_partial_unroll_itr_scale", 200, + """The scale parameter of the exponential + distribution from which the number of iterations + per unroll is drawn.""") +tf.app.flags.DEFINE_integer("min_num_itr_partial_unroll", 50, + """The minimum number of iterations for one + unroll.""") + +tf.app.flags.DEFINE_string("optimizer", "HierarchicalRNN", + """Which meta-optimizer to train.""") + +# CoordinatewiseRNN-specific flags +tf.app.flags.DEFINE_integer("cell_size", 20, + """Size of the RNN hidden state in each layer.""") +tf.app.flags.DEFINE_integer("num_cells", 2, + """Number of RNN layers.""") +tf.app.flags.DEFINE_string("cell_cls", "GRUCell", + """Type of RNN cell to use.""") + +# Metaoptimization parameters +tf.app.flags.DEFINE_float("meta_learning_rate", 1e-6, + """The learning rate for the meta-optimizer.""") +tf.app.flags.DEFINE_float("gradient_clip_level", 1e4, + """The level to clip gradients to.""") + +# Training set selection +tf.app.flags.DEFINE_boolean("include_quadratic_problems", False, + """Include non-noisy quadratic problems.""") +tf.app.flags.DEFINE_boolean("include_noisy_quadratic_problems", True, + """Include noisy quadratic problems.""") +tf.app.flags.DEFINE_boolean("include_large_quadratic_problems", True, + """Include very large quadratic problems.""") +tf.app.flags.DEFINE_boolean("include_bowl_problems", True, + """Include 2D bowl problems.""") +tf.app.flags.DEFINE_boolean("include_softmax_2_class_problems", True, + """Include 2-class logistic regression problems.""") +tf.app.flags.DEFINE_boolean("include_noisy_softmax_2_class_problems", True, + """Include noisy 2-class logistic regression + problems.""") +tf.app.flags.DEFINE_boolean("include_optimization_test_problems", True, + """Include non-noisy versions of classic + optimization test problems, e.g. Rosenbrock.""") +tf.app.flags.DEFINE_boolean("include_noisy_optimization_test_problems", True, + """Include gradient-noise versions of classic + optimization test problems, e.g. Rosenbrock""") +tf.app.flags.DEFINE_boolean("include_fully_connected_random_2_class_problems", + True, """Include MLP problems for 2 classes.""") +tf.app.flags.DEFINE_boolean("include_matmul_problems", True, + """Include matrix multiplication problems.""") +tf.app.flags.DEFINE_boolean("include_log_objective_problems", True, + """Include problems where the objective is the log + objective of another problem, e.g. Bowl.""") +tf.app.flags.DEFINE_boolean("include_rescale_problems", True, + """Include problems where the parameters are scaled + version of the original parameters.""") +tf.app.flags.DEFINE_boolean("include_norm_problems", True, + """Include problems where the objective is the + N-norm of another problem, e.g. Quadratic.""") +tf.app.flags.DEFINE_boolean("include_sum_problems", True, + """Include problems where the objective is the sum + of the objectives of the subproblems that make + up the problem parameters. Per-problem tensors + are still independent of each other.""") +tf.app.flags.DEFINE_boolean("include_sparse_gradient_problems", True, + """Include problems where the gradient is set to 0 + with some high probability.""") +tf.app.flags.DEFINE_boolean("include_sparse_softmax_problems", False, + """Include sparse softmax problems.""") +tf.app.flags.DEFINE_boolean("include_one_hot_sparse_softmax_problems", False, + """Include one-hot sparse softmax problems.""") +tf.app.flags.DEFINE_boolean("include_noisy_bowl_problems", True, + """Include noisy bowl problems.""") +tf.app.flags.DEFINE_boolean("include_noisy_norm_problems", True, + """Include noisy norm problems.""") +tf.app.flags.DEFINE_boolean("include_noisy_sum_problems", True, + """Include noisy sum problems.""") +tf.app.flags.DEFINE_boolean("include_sum_of_quadratics_problems", False, + """Include sum of quadratics problems.""") +tf.app.flags.DEFINE_boolean("include_projection_quadratic_problems", False, + """Include projection quadratic problems.""") +tf.app.flags.DEFINE_boolean("include_outward_snake_problems", False, + """Include outward snake problems.""") +tf.app.flags.DEFINE_boolean("include_dependency_chain_problems", False, + """Include dependency chain problems.""") +tf.app.flags.DEFINE_boolean("include_min_max_well_problems", False, + """Include min-max well problems.""") + +# Optimizer parameters: initialization and scale values +tf.app.flags.DEFINE_float("min_lr", 1e-6, + """The minimum initial learning rate.""") +tf.app.flags.DEFINE_float("max_lr", 1e-2, + """The maximum initial learning rate.""") + +# Optimizer parameters: small features. +tf.app.flags.DEFINE_boolean("zero_init_lr_weights", True, + """Whether to initialize the learning rate weights + to 0 rather than the scaled random initialization + used for other RNN variables.""") +tf.app.flags.DEFINE_boolean("use_relative_lr", True, + """Whether to use the relative learning rate as an + input during training. Can only be used if + learnable_decay is also True.""") +tf.app.flags.DEFINE_boolean("use_extreme_indicator", False, + """Whether to use the extreme indicator for learning + rates as an input during training. Can only be + used if learnable_decay is also True.""") +tf.app.flags.DEFINE_boolean("use_log_means_squared", True, + """Whether to track the log of the mean squared + grads instead of the means squared grads.""") +tf.app.flags.DEFINE_boolean("use_problem_lr_mean", True, + """Whether to use the mean over all learning rates + in the problem when calculating the relative + learning rate.""") + +# Optimizer parameters: major features +tf.app.flags.DEFINE_boolean("learnable_decay", True, + """Whether to learn weights that dynamically + modulate the input scale via RMS decay.""") +tf.app.flags.DEFINE_boolean("dynamic_output_scale", True, + """Whether to learn weights that dynamically + modulate the output scale.""") +tf.app.flags.DEFINE_boolean("use_log_objective", True, + """Whether to use the log of the scaled objective + rather than just the scaled obj for training.""") +tf.app.flags.DEFINE_boolean("use_attention", False, + """Whether to learn where to attend.""") +tf.app.flags.DEFINE_boolean("use_second_derivatives", True, + """Whether to use second derivatives.""") +tf.app.flags.DEFINE_integer("num_gradient_scales", 4, + """How many different timescales to keep for + gradient history. If > 1, also learns a scale + factor for gradient history.""") +tf.app.flags.DEFINE_float("max_log_lr", 33, + """The maximum log learning rate allowed.""") +tf.app.flags.DEFINE_float("objective_training_max_multiplier", -1, + """How much the objective can grow before training on + this problem / param pair is terminated. Sets a max + on the objective value when multiplied by the + initial objective. If <= 0, not used.""") +tf.app.flags.DEFINE_boolean("use_gradient_shortcut", True, + """Whether to add a learned affine projection of the + gradient to the update delta in addition to the + gradient function computed by the RNN.""") +tf.app.flags.DEFINE_boolean("use_lr_shortcut", False, + """Whether to add the difference between the current + learning rate and the desired learning rate to + the RNN input.""") +tf.app.flags.DEFINE_boolean("use_grad_products", True, + """Whether to use gradient products in the input to + the RNN. Only applicable when num_gradient_scales + > 1.""") +tf.app.flags.DEFINE_boolean("use_multiple_scale_decays", False, + """Whether to use many-timescale scale decays.""") +tf.app.flags.DEFINE_boolean("use_numerator_epsilon", False, + """Whether to use epsilon in the numerator of the + log objective.""") +tf.app.flags.DEFINE_boolean("learnable_inp_decay", True, + """Whether to learn input decay weight and bias.""") +tf.app.flags.DEFINE_boolean("learnable_rnn_init", True, + """Whether to learn RNN state initialization.""") + +FLAGS = tf.app.flags.FLAGS + +# The Size of the RNN hidden state in each layer: +# [PerParam, PerTensor, Global]. The length of this list must be 1, 2, or 3. +# If less than 3, the Global and/or PerTensor RNNs will not be created. + +HRNN_CELL_SIZES = [10, 20, 20] + + + +def register_optimizers(): + opts = {} + opts["CoordinatewiseRNN"] = coordinatewise_rnn.CoordinatewiseRNN + opts["GlobalLearningRate"] = global_learning_rate.GlobalLearningRate + opts["HierarchicalRNN"] = hierarchical_rnn.HierarchicalRNN + opts["LearningRateSchedule"] = learning_rate_schedule.LearningRateSchedule + opts["TrainableAdam"] = trainable_adam.TrainableAdam + return opts + + +def main(unused_argv): + """Runs the main script.""" + + opts = register_optimizers() + + # Choose a set of problems to optimize. By default this includes quadratics, + # 2-dimensional bowls, 2-class softmax problems, and non-noisy optimization + # test problems (e.g. Rosenbrock, Beale) + problems_and_data = [] + + if FLAGS.include_sparse_softmax_problems: + problems_and_data.extend(ps.sparse_softmax_2_class_sparse_problems()) + + if FLAGS.include_one_hot_sparse_softmax_problems: + problems_and_data.extend( + ps.one_hot_sparse_softmax_2_class_sparse_problems()) + + if FLAGS.include_quadratic_problems: + problems_and_data.extend(ps.quadratic_problems()) + + if FLAGS.include_noisy_quadratic_problems: + problems_and_data.extend(ps.quadratic_problems_noisy()) + + if FLAGS.include_large_quadratic_problems: + problems_and_data.extend(ps.quadratic_problems_large()) + + if FLAGS.include_bowl_problems: + problems_and_data.extend(ps.bowl_problems()) + + if FLAGS.include_noisy_bowl_problems: + problems_and_data.extend(ps.bowl_problems_noisy()) + + if FLAGS.include_softmax_2_class_problems: + problems_and_data.extend(ps.softmax_2_class_problems()) + + if FLAGS.include_noisy_softmax_2_class_problems: + problems_and_data.extend(ps.softmax_2_class_problems_noisy()) + + if FLAGS.include_optimization_test_problems: + problems_and_data.extend(ps.optimization_test_problems()) + + if FLAGS.include_noisy_optimization_test_problems: + problems_and_data.extend(ps.optimization_test_problems_noisy()) + + if FLAGS.include_fully_connected_random_2_class_problems: + problems_and_data.extend(ps.fully_connected_random_2_class_problems()) + + if FLAGS.include_matmul_problems: + problems_and_data.extend(ps.matmul_problems()) + + if FLAGS.include_log_objective_problems: + problems_and_data.extend(ps.log_objective_problems()) + + if FLAGS.include_rescale_problems: + problems_and_data.extend(ps.rescale_problems()) + + if FLAGS.include_norm_problems: + problems_and_data.extend(ps.norm_problems()) + + if FLAGS.include_noisy_norm_problems: + problems_and_data.extend(ps.norm_problems_noisy()) + + if FLAGS.include_sum_problems: + problems_and_data.extend(ps.sum_problems()) + + if FLAGS.include_noisy_sum_problems: + problems_and_data.extend(ps.sum_problems_noisy()) + + if FLAGS.include_sparse_gradient_problems: + problems_and_data.extend(ps.sparse_gradient_problems()) + if FLAGS.include_fully_connected_random_2_class_problems: + problems_and_data.extend(ps.sparse_gradient_problems_mlp()) + + if FLAGS.include_min_max_well_problems: + problems_and_data.extend(ps.min_max_well_problems()) + + if FLAGS.include_sum_of_quadratics_problems: + problems_and_data.extend(ps.sum_of_quadratics_problems()) + + if FLAGS.include_projection_quadratic_problems: + problems_and_data.extend(ps.projection_quadratic_problems()) + + if FLAGS.include_outward_snake_problems: + problems_and_data.extend(ps.outward_snake_problems()) + + if FLAGS.include_dependency_chain_problems: + problems_and_data.extend(ps.dependency_chain_problems()) + + # log directory + logdir = os.path.join(FLAGS.train_dir, + "{}_{}_{}_{}".format(FLAGS.optimizer, + FLAGS.cell_cls, + FLAGS.cell_size, + FLAGS.num_cells)) + + # get the optimizer class and arguments + optimizer_cls = opts[FLAGS.optimizer] + + assert len(HRNN_CELL_SIZES) in [1, 2, 3] + optimizer_args = (HRNN_CELL_SIZES,) + + optimizer_kwargs = { + "init_lr_range": (FLAGS.min_lr, FLAGS.max_lr), + "learnable_decay": FLAGS.learnable_decay, + "dynamic_output_scale": FLAGS.dynamic_output_scale, + "cell_cls": getattr(tf.contrib.rnn, FLAGS.cell_cls), + "use_attention": FLAGS.use_attention, + "use_log_objective": FLAGS.use_log_objective, + "num_gradient_scales": FLAGS.num_gradient_scales, + "zero_init_lr_weights": FLAGS.zero_init_lr_weights, + "use_log_means_squared": FLAGS.use_log_means_squared, + "use_relative_lr": FLAGS.use_relative_lr, + "use_extreme_indicator": FLAGS.use_extreme_indicator, + "max_log_lr": FLAGS.max_log_lr, + "obj_train_max_multiplier": FLAGS.objective_training_max_multiplier, + "use_problem_lr_mean": FLAGS.use_problem_lr_mean, + "use_gradient_shortcut": FLAGS.use_gradient_shortcut, + "use_second_derivatives": FLAGS.use_second_derivatives, + "use_lr_shortcut": FLAGS.use_lr_shortcut, + "use_grad_products": FLAGS.use_grad_products, + "use_multiple_scale_decays": FLAGS.use_multiple_scale_decays, + "use_numerator_epsilon": FLAGS.use_numerator_epsilon, + "learnable_inp_decay": FLAGS.learnable_inp_decay, + "learnable_rnn_init": FLAGS.learnable_rnn_init, + } + optimizer_spec = problem_spec.Spec( + optimizer_cls, optimizer_args, optimizer_kwargs) + + # make log directory + tf.gfile.MakeDirs(logdir) + + is_chief = FLAGS.task == 0 + # if this is a distributed run, make the chief run through problems in order + select_random_problems = FLAGS.worker_tasks == 1 or not is_chief + + def num_unrolls(): + return metaopt.sample_numiter(FLAGS.num_unroll_scale, FLAGS.min_num_unrolls) + + def num_partial_unroll_itrs(): + return metaopt.sample_numiter(FLAGS.num_partial_unroll_itr_scale, + FLAGS.min_num_itr_partial_unroll) + + # run it + metaopt.train_optimizer( + logdir, + optimizer_spec, + problems_and_data, + FLAGS.num_problems, + FLAGS.num_meta_iterations, + num_unrolls, + num_partial_unroll_itrs, + learning_rate=FLAGS.meta_learning_rate, + gradient_clip=FLAGS.gradient_clip_level, + is_chief=is_chief, + select_random_problems=select_random_problems, + obj_train_max_multiplier=FLAGS.objective_training_max_multiplier, + callbacks=[]) + + return 0 + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/learned_optimizer/optimizer/BUILD b/models/research/learned_optimizer/optimizer/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..8953e7592ace416b786be2a6fa59f4c537c82644 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/BUILD @@ -0,0 +1,69 @@ +package(default_visibility = ["//visibility:public"]) + +# Libraries +# ========= +py_library( + name = "coordinatewise_rnn", + srcs = ["coordinatewise_rnn.py"], + deps = [ + ":trainable_optimizer", + ":utils", + ], +) + +py_library( + name = "global_learning_rate", + srcs = ["global_learning_rate.py"], + deps = [ + ":trainable_optimizer", + ], +) + +py_library( + name = "hierarchical_rnn", + srcs = ["hierarchical_rnn.py"], + deps = [ + ":rnn_cells", + ":trainable_optimizer", + ":utils", + ], +) + +py_library( + name = "learning_rate_schedule", + srcs = ["learning_rate_schedule.py"], + deps = [ + ":trainable_optimizer", + ], +) + +py_library( + name = "rnn_cells", + srcs = ["rnn_cells.py"], + deps = [ + ":utils", + ], +) + +py_library( + name = "trainable_adam", + srcs = ["trainable_adam.py"], + deps = [ + ":trainable_optimizer", + ":utils", + ], +) + +py_library( + name = "trainable_optimizer", + srcs = ["trainable_optimizer.py"], + deps = [ + ], +) + +py_library( + name = "utils", + srcs = ["utils.py"], + deps = [ + ], +) diff --git a/models/research/learned_optimizer/optimizer/coordinatewise_rnn.py b/models/research/learned_optimizer/optimizer/coordinatewise_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..3d699504b7a3d86643bea6b295d20b2434131a99 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/coordinatewise_rnn.py @@ -0,0 +1,316 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Collection of trainable optimizers for meta-optimization.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import numpy as np +import tensorflow as tf + +from learned_optimizer.optimizer import utils +from learned_optimizer.optimizer import trainable_optimizer as opt + + +# Default was 1e-3 +tf.app.flags.DEFINE_float("crnn_rnn_readout_scale", 0.5, + """The initialization scale for the RNN readouts.""") +tf.app.flags.DEFINE_float("crnn_default_decay_var_init", 2.2, + """The default initializer value for any decay/ + momentum style variables and constants. + sigmoid(2.2) ~ 0.9, sigmoid(-2.2) ~ 0.01.""") + +FLAGS = tf.flags.FLAGS + + +class CoordinatewiseRNN(opt.TrainableOptimizer): + """RNN that operates on each coordinate of the problem independently.""" + + def __init__(self, + cell_sizes, + cell_cls, + init_lr_range=(1., 1.), + dynamic_output_scale=True, + learnable_decay=True, + zero_init_lr_weights=False, + **kwargs): + """Initializes the RNN per-parameter optimizer. + + Args: + cell_sizes: List of hidden state sizes for each RNN cell in the network + cell_cls: tf.contrib.rnn class for specifying the RNN cell type + init_lr_range: the range in which to initialize the learning rates. + dynamic_output_scale: whether to learn weights that dynamically modulate + the output scale (default: True) + learnable_decay: whether to learn weights that dynamically modulate the + input scale via RMS style decay (default: True) + zero_init_lr_weights: whether to initialize the lr weights to zero + **kwargs: args passed to TrainableOptimizer's constructor + + Raises: + ValueError: If the init lr range is not of length 2. + ValueError: If the init lr range is not a valid range (min > max). + """ + if len(init_lr_range) != 2: + raise ValueError( + "Initial LR range must be len 2, was {}".format(len(init_lr_range))) + if init_lr_range[0] > init_lr_range[1]: + raise ValueError("Initial LR range min is greater than max.") + self.init_lr_range = init_lr_range + + self.zero_init_lr_weights = zero_init_lr_weights + self.reuse_vars = False + + # create the RNN cell + with tf.variable_scope(opt.OPTIMIZER_SCOPE): + self.component_cells = [cell_cls(sz) for sz in cell_sizes] + self.cell = tf.contrib.rnn.MultiRNNCell(self.component_cells) + + # random normal initialization scaled by the output size + scale_factor = FLAGS.crnn_rnn_readout_scale / math.sqrt(cell_sizes[-1]) + scaled_init = tf.random_normal_initializer(0., scale_factor) + + # weights for projecting the hidden state to a parameter update + self.update_weights = tf.get_variable("update_weights", + shape=(cell_sizes[-1], 1), + initializer=scaled_init) + + self._initialize_decay(learnable_decay, (cell_sizes[-1], 1), scaled_init) + + self._initialize_lr(dynamic_output_scale, (cell_sizes[-1], 1), + scaled_init) + + state_size = sum([sum(state_size) for state_size in self.cell.state_size]) + self._init_vector = tf.get_variable( + "init_vector", shape=[1, state_size], + initializer=tf.random_uniform_initializer(-1., 1.)) + + state_keys = ["rms", "rnn", "learning_rate", "decay"] + super(CoordinatewiseRNN, self).__init__("cRNN", state_keys, **kwargs) + + def _initialize_decay( + self, learnable_decay, weights_tensor_shape, scaled_init): + """Initializes the decay weights and bias variables or tensors. + + Args: + learnable_decay: Whether to use learnable decay. + weights_tensor_shape: The shape the weight tensor should take. + scaled_init: The scaled initialization for the weights tensor. + """ + if learnable_decay: + + # weights for projecting the hidden state to the RMS decay term + self.decay_weights = tf.get_variable("decay_weights", + shape=weights_tensor_shape, + initializer=scaled_init) + self.decay_bias = tf.get_variable( + "decay_bias", shape=(1,), + initializer=tf.constant_initializer( + FLAGS.crnn_default_decay_var_init)) + else: + self.decay_weights = tf.zeros_like(self.update_weights) + self.decay_bias = tf.constant(FLAGS.crnn_default_decay_var_init) + + def _initialize_lr( + self, dynamic_output_scale, weights_tensor_shape, scaled_init): + """Initializes the learning rate weights and bias variables or tensors. + + Args: + dynamic_output_scale: Whether to use a dynamic output scale. + weights_tensor_shape: The shape the weight tensor should take. + scaled_init: The scaled initialization for the weights tensor. + """ + if dynamic_output_scale: + zero_init = tf.constant_initializer(0.) + wt_init = zero_init if self.zero_init_lr_weights else scaled_init + self.lr_weights = tf.get_variable("learning_rate_weights", + shape=weights_tensor_shape, + initializer=wt_init) + self.lr_bias = tf.get_variable("learning_rate_bias", shape=(1,), + initializer=zero_init) + else: + self.lr_weights = tf.zeros_like(self.update_weights) + self.lr_bias = tf.zeros([1, 1]) + + def _initialize_state(self, var): + """Return a dictionary mapping names of state variables to their values.""" + vectorized_shape = [var.get_shape().num_elements(), 1] + + min_lr = self.init_lr_range[0] + max_lr = self.init_lr_range[1] + if min_lr == max_lr: + init_lr = tf.constant(min_lr, shape=vectorized_shape) + else: + actual_vals = tf.random_uniform(vectorized_shape, + np.log(min_lr), + np.log(max_lr)) + init_lr = tf.exp(actual_vals) + + ones = tf.ones(vectorized_shape) + rnn_init = ones * self._init_vector + + return { + "rms": tf.ones(vectorized_shape), + "learning_rate": init_lr, + "rnn": rnn_init, + "decay": tf.ones(vectorized_shape), + } + + def _compute_update(self, param, grad, state): + """Update parameters given the gradient and state. + + Args: + param: tensor of parameters + grad: tensor of gradients with the same shape as param + state: a dictionary containing any state for the optimizer + + Returns: + updated_param: updated parameters + updated_state: updated state variables in a dictionary + """ + + with tf.variable_scope(opt.OPTIMIZER_SCOPE) as scope: + + if self.reuse_vars: + scope.reuse_variables() + else: + self.reuse_vars = True + + param_shape = tf.shape(param) + + (grad_values, decay_state, rms_state, rnn_state, learning_rate_state, + grad_indices) = self._extract_gradients_and_internal_state( + grad, state, param_shape) + + # Vectorize and scale the gradients. + grad_scaled, rms = utils.rms_scaling(grad_values, decay_state, rms_state) + + # Apply the RNN update. + rnn_state_tuples = self._unpack_rnn_state_into_tuples(rnn_state) + rnn_output, rnn_state_tuples = self.cell(grad_scaled, rnn_state_tuples) + rnn_state = self._pack_tuples_into_rnn_state(rnn_state_tuples) + + # Compute the update direction (a linear projection of the RNN output). + delta = utils.project(rnn_output, self.update_weights) + + # The updated decay is an affine projection of the hidden state + decay = utils.project(rnn_output, self.decay_weights, + bias=self.decay_bias, activation=tf.nn.sigmoid) + + # Compute the change in learning rate (an affine projection of the RNN + # state, passed through a 2x sigmoid, so the change is bounded). + learning_rate_change = 2. * utils.project(rnn_output, self.lr_weights, + bias=self.lr_bias, + activation=tf.nn.sigmoid) + + # Update the learning rate. + new_learning_rate = learning_rate_change * learning_rate_state + + # Apply the update to the parameters. + update = tf.reshape(new_learning_rate * delta, tf.shape(grad_values)) + + if isinstance(grad, tf.IndexedSlices): + update = utils.stack_tensor(update, grad_indices, param, + param_shape[:1]) + rms = utils.update_slices(rms, grad_indices, state["rms"], param_shape) + new_learning_rate = utils.update_slices(new_learning_rate, grad_indices, + state["learning_rate"], + param_shape) + rnn_state = utils.update_slices(rnn_state, grad_indices, state["rnn"], + param_shape) + decay = utils.update_slices(decay, grad_indices, state["decay"], + param_shape) + + new_param = param - update + + # Collect the update and new state. + new_state = { + "rms": rms, + "learning_rate": new_learning_rate, + "rnn": rnn_state, + "decay": decay, + } + + return new_param, new_state + + def _extract_gradients_and_internal_state(self, grad, state, param_shape): + """Extracts the gradients and relevant internal state. + + If the gradient is sparse, extracts the appropriate slices from the state. + + Args: + grad: The current gradient. + state: The current state. + param_shape: The shape of the parameter (used if gradient is sparse). + + Returns: + grad_values: The gradient value tensor. + decay_state: The current decay state. + rms_state: The current rms state. + rnn_state: The current state of the internal rnns. + learning_rate_state: The current learning rate state. + grad_indices: The indices for the gradient tensor, if sparse. + None otherwise. + """ + if isinstance(grad, tf.IndexedSlices): + grad_indices, grad_values = utils.accumulate_sparse_gradients(grad) + decay_state = utils.slice_tensor(state["decay"], grad_indices, + param_shape) + rms_state = utils.slice_tensor(state["rms"], grad_indices, param_shape) + rnn_state = utils.slice_tensor(state["rnn"], grad_indices, param_shape) + learning_rate_state = utils.slice_tensor(state["learning_rate"], + grad_indices, param_shape) + decay_state.set_shape([None, 1]) + rms_state.set_shape([None, 1]) + else: + grad_values = grad + grad_indices = None + + decay_state = state["decay"] + rms_state = state["rms"] + rnn_state = state["rnn"] + learning_rate_state = state["learning_rate"] + return (grad_values, decay_state, rms_state, rnn_state, learning_rate_state, + grad_indices) + + def _unpack_rnn_state_into_tuples(self, rnn_state): + """Creates state tuples from the rnn state vector.""" + rnn_state_tuples = [] + cur_state_pos = 0 + for cell in self.component_cells: + total_state_size = sum(cell.state_size) + cur_state = tf.slice(rnn_state, [0, cur_state_pos], + [-1, total_state_size]) + cur_state_tuple = tf.split(value=cur_state, num_or_size_splits=2, + axis=1) + rnn_state_tuples.append(cur_state_tuple) + cur_state_pos += total_state_size + return rnn_state_tuples + + def _pack_tuples_into_rnn_state(self, rnn_state_tuples): + """Creates a single state vector concatenated along column axis.""" + rnn_state = None + for new_state_tuple in rnn_state_tuples: + new_c, new_h = new_state_tuple + if rnn_state is None: + rnn_state = tf.concat([new_c, new_h], axis=1) + else: + rnn_state = tf.concat([rnn_state, tf.concat([new_c, new_h], 1)], axis=1) + return rnn_state + diff --git a/models/research/learned_optimizer/optimizer/global_learning_rate.py b/models/research/learned_optimizer/optimizer/global_learning_rate.py new file mode 100644 index 0000000000000000000000000000000000000000..bcf102fff054e9fe9e92d4379538f6394314fe1c --- /dev/null +++ b/models/research/learned_optimizer/optimizer/global_learning_rate.py @@ -0,0 +1,40 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A trainable optimizer that learns a single global learning rate.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from learned_optimizer.optimizer import trainable_optimizer + + +class GlobalLearningRate(trainable_optimizer.TrainableOptimizer): + """Optimizes for a single global learning rate.""" + + def __init__(self, initial_rate=1e-3, **kwargs): + """Initializes the global learning rate.""" + with tf.variable_scope(trainable_optimizer.OPTIMIZER_SCOPE): + initializer = tf.constant_initializer(initial_rate) + self.learning_rate = tf.get_variable("global_learning_rate", shape=(), + initializer=initializer) + super(GlobalLearningRate, self).__init__("GLR", [], **kwargs) + + def _compute_update(self, param, grad, state): + return param - tf.scalar_mul(self.learning_rate, grad), state + diff --git a/models/research/learned_optimizer/optimizer/hierarchical_rnn.py b/models/research/learned_optimizer/optimizer/hierarchical_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..953b72b5d04724a11a0e95385bbe0c6a0d91289d --- /dev/null +++ b/models/research/learned_optimizer/optimizer/hierarchical_rnn.py @@ -0,0 +1,792 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Collection of trainable optimizers for meta-optimization.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import numpy as np +import tensorflow as tf + +from tensorflow.python.ops import state_ops +from learned_optimizer.optimizer import rnn_cells +from learned_optimizer.optimizer import trainable_optimizer as opt +from learned_optimizer.optimizer import utils + +# Default was 0.1 +tf.app.flags.DEFINE_float("biasgrucell_scale", 0.5, + """The scale for the internal BiasGRUCell vars.""") +# Default was 0 +tf.app.flags.DEFINE_float("biasgrucell_gate_bias_init", 2.2, + """The bias for the internal BiasGRUCell reset and + update gate variables.""") +# Default was 1e-3 +tf.app.flags.DEFINE_float("hrnn_rnn_readout_scale", 0.5, + """The initialization scale for the RNN readouts.""") +tf.app.flags.DEFINE_float("hrnn_default_decay_var_init", 2.2, + """The default initializer value for any decay/ + momentum style variables and constants. + sigmoid(2.2) ~ 0.9, sigmoid(-2.2) ~ 0.01.""") +# Default was 2.2 +tf.app.flags.DEFINE_float("scale_decay_bias_init", 3.2, + """The initialization for the scale decay bias. This + is the initial bias for the timescale for the + exponential avg of the mean square gradients.""") +tf.app.flags.DEFINE_float("learning_rate_momentum_logit_init", 3.2, + """Initialization for the learning rate momentum.""") +# Default was 0.1 +tf.app.flags.DEFINE_float("hrnn_affine_scale", 0.5, + """The initialization scale for the weight matrix of + the bias variables in layer0 and 1 of the hrnn.""") + +FLAGS = tf.flags.FLAGS + + +class HierarchicalRNN(opt.TrainableOptimizer): + """3 level hierarchical RNN. + + Optionally uses second order gradient information and has decoupled evaluation + and update locations. + """ + + def __init__(self, level_sizes, init_lr_range=(1e-6, 1e-2), + learnable_decay=True, dynamic_output_scale=True, + use_attention=False, use_log_objective=True, + num_gradient_scales=4, zero_init_lr_weights=True, + use_log_means_squared=True, use_relative_lr=True, + use_extreme_indicator=False, max_log_lr=33, + obj_train_max_multiplier=-1, use_problem_lr_mean=False, + use_gradient_shortcut=False, use_lr_shortcut=False, + use_grad_products=False, use_multiple_scale_decays=False, + learnable_inp_decay=True, learnable_rnn_init=True, + random_seed=None, **kwargs): + """Initializes the RNN per-parameter optimizer. + + The hierarchy consists of up to three levels: + Level 0: per parameter RNN + Level 1: per tensor RNN + Level 2: global RNN + + Args: + level_sizes: list or tuple with 1, 2, or 3 integers, the number of units + in each RNN in the hierarchy (level0, level1, level2). + length 1: only coordinatewise rnn's will be used + length 2: coordinatewise and tensor-level rnn's will be used + length 3: a single global-level rnn will be used in addition to + coordinatewise and tensor-level + init_lr_range: the range in which to initialize the learning rates + learnable_decay: whether to learn weights that dynamically modulate the + input scale via RMS style decay + dynamic_output_scale: whether to learn weights that dynamically modulate + the output scale + use_attention: whether to use attention to train the optimizer + use_log_objective: whether to train on the log of the objective + num_gradient_scales: the number of scales to use for gradient history + zero_init_lr_weights: whether to initialize the lr weights to zero + use_log_means_squared: whether to track the log of the means_squared, + used as a measure of signal vs. noise in gradient. + use_relative_lr: whether to use the relative learning rate as an + input during training (requires learnable_decay=True) + use_extreme_indicator: whether to use the extreme indicator for learning + rates as an input during training (requires learnable_decay=True) + max_log_lr: the maximum log learning rate allowed during train or test + obj_train_max_multiplier: max objective increase during a training run + use_problem_lr_mean: whether to use the mean over all learning rates in + the problem when calculating the relative learning rate as opposed to + the per-tensor mean + use_gradient_shortcut: Whether to add a learned affine projection of the + gradient to the update delta in addition to the gradient function + computed by the RNN + use_lr_shortcut: Whether to add as input the difference between the log lr + and the desired log lr (1e-3) + use_grad_products: Whether to use gradient products in the rnn input. + Only applicable if num_gradient_scales > 1 + use_multiple_scale_decays: Whether to use multiple scales for the scale + decay, as with input decay + learnable_inp_decay: Whether to learn the input decay weights and bias. + learnable_rnn_init: Whether to learn the RNN state initialization. + random_seed: Random seed for random variable initializers. (Default: None) + **kwargs: args passed to TrainableOptimizer's constructor + + Raises: + ValueError: If level_sizes is not a length 1, 2, or 3 list. + ValueError: If there are any non-integer sizes in level_sizes. + ValueError: If the init lr range is not of length 2. + ValueError: If the init lr range is not a valid range (min > max). + """ + if len(level_sizes) not in [1, 2, 3]: + raise ValueError("HierarchicalRNN only supports 1, 2, or 3 levels in the " + "hierarchy, but {} were requested.".format( + len(level_sizes))) + if any(not isinstance(level, int) for level in level_sizes): + raise ValueError("Level sizes must be integer values, were {}".format( + level_sizes)) + if len(init_lr_range) != 2: + raise ValueError( + "Initial LR range must be len 2, was {}".format(len(init_lr_range))) + if init_lr_range[0] > init_lr_range[1]: + raise ValueError("Initial LR range min is greater than max.") + + self.learnable_decay = learnable_decay + self.dynamic_output_scale = dynamic_output_scale + self.use_attention = use_attention + self.use_log_objective = use_log_objective + self.num_gradient_scales = num_gradient_scales + self.zero_init_lr_weights = zero_init_lr_weights + self.use_log_means_squared = use_log_means_squared + self.use_relative_lr = use_relative_lr + self.use_extreme_indicator = use_extreme_indicator + self.max_log_lr = max_log_lr + self.use_problem_lr_mean = use_problem_lr_mean + self.use_gradient_shortcut = use_gradient_shortcut + self.use_lr_shortcut = use_lr_shortcut + self.use_grad_products = use_grad_products + self.use_multiple_scale_decays = use_multiple_scale_decays + self.learnable_inp_decay = learnable_inp_decay + self.learnable_rnn_init = learnable_rnn_init + + self.random_seed = random_seed + + self.num_layers = len(level_sizes) + self.init_lr_range = init_lr_range + + self.reuse_vars = None + self.reuse_global_state = None + self.cells = [] + self.init_vectors = [] + + with tf.variable_scope(opt.OPTIMIZER_SCOPE): + + self._initialize_rnn_cells(level_sizes) + + # get the cell size for the per-parameter RNN (Level 0) + cell_size = level_sizes[0] + + # Random normal initialization scaled by the output size. This is the + # scale for the RNN *readouts*. RNN internal weight scale is set in the + # BiasGRUCell call. + scale_factor = FLAGS.hrnn_rnn_readout_scale / math.sqrt(cell_size) + scaled_init = tf.random_normal_initializer(0., scale_factor, + seed=self.random_seed) + + # weights for projecting the hidden state to a parameter update + self.update_weights = tf.get_variable("update_weights", + shape=(cell_size, 1), + initializer=scaled_init) + + if self.use_attention: + # weights for projecting the hidden state to the location at which the + # gradient is attended + self.attention_weights = tf.get_variable( + "attention_weights", + initializer=self.update_weights.initialized_value()) + + # weights for projecting the hidden state to the RMS decay term + self._initialize_scale_decay((cell_size, 1), scaled_init) + self._initialize_input_decay((cell_size, 1), scaled_init) + + self._initialize_lr((cell_size, 1), scaled_init) + + state_keys = ["parameter", "layer", "scl_decay", "inp_decay", "true_param"] + + if self.dynamic_output_scale: + state_keys.append("log_learning_rate") + + for i in range(self.num_gradient_scales): + state_keys.append("grad_accum{}".format(i + 1)) + state_keys.append("ms{}".format(i + 1)) + + super(HierarchicalRNN, self).__init__( + "hRNN", state_keys, use_attention=use_attention, + use_log_objective=use_log_objective, + obj_train_max_multiplier=obj_train_max_multiplier, **kwargs) + + def _initialize_rnn_cells(self, level_sizes): + """Initializes the RNN cells to use in the hierarchical RNN.""" + + # RNN Cell layers (0 -> lowest, 1 -> middle, 2 -> global) + for level in range(self.num_layers): + scope = "Level{}_RNN".format(level) + with tf.variable_scope(scope): + hcell = rnn_cells.BiasGRUCell( + level_sizes[level], + scale=FLAGS.biasgrucell_scale, + gate_bias_init=FLAGS.biasgrucell_gate_bias_init, + random_seed=self.random_seed) + self.cells.append(hcell) + if self.learnable_rnn_init: + self.init_vectors.append(tf.Variable( + tf.random_uniform([1, hcell.state_size], -1., 1., + seed=self.random_seed), + name="init_vector")) + else: + self.init_vectors.append( + tf.random_uniform([1, hcell.state_size], -1., 1., + seed=self.random_seed)) + + def _initialize_scale_decay(self, weights_tensor_shape, scaled_init): + """Initializes the scale decay weights and bias variables or tensors. + + Args: + weights_tensor_shape: The shape the weight tensor should take. + scaled_init: The scaled initialization for the weights tensor. + """ + if self.learnable_decay: + self.scl_decay_weights = tf.get_variable("scl_decay_weights", + shape=weights_tensor_shape, + initializer=scaled_init) + scl_decay_bias_init = tf.constant_initializer( + FLAGS.scale_decay_bias_init) + self.scl_decay_bias = tf.get_variable("scl_decay_bias", + shape=(1,), + initializer=scl_decay_bias_init) + else: + self.scl_decay_weights = tf.zeros_like(self.update_weights) + self.scl_decay_bias = tf.log(0.93 / (1. - 0.93)) + + def _initialize_input_decay(self, weights_tensor_shape, scaled_init): + """Initializes the input scale decay weights and bias variables or tensors. + + Args: + weights_tensor_shape: The shape the weight tensor should take. + scaled_init: The scaled initialization for the weights tensor. + """ + if (self.learnable_decay and self.num_gradient_scales > 1 and + self.learnable_inp_decay): + self.inp_decay_weights = tf.get_variable("inp_decay_weights", + shape=weights_tensor_shape, + initializer=scaled_init) + inp_decay_bias_init = tf.constant_initializer( + FLAGS.hrnn_default_decay_var_init) + self.inp_decay_bias = tf.get_variable("inp_decay_bias", + shape=(1,), + initializer=inp_decay_bias_init) + else: + self.inp_decay_weights = tf.zeros_like(self.update_weights) + self.inp_decay_bias = tf.log(0.89 / (1. - 0.89)) + + def _initialize_lr(self, weights_tensor_shape, scaled_init): + """Initializes the learning rate weights and bias variables or tensors. + + Args: + weights_tensor_shape: The shape the weight tensor should take. + scaled_init: The scaled initialization for the weights tensor. + """ + if self.dynamic_output_scale: + zero_init = tf.constant_initializer(0.) + wt_init = zero_init if self.zero_init_lr_weights else scaled_init + self.lr_weights = tf.get_variable("learning_rate_weights", + shape=weights_tensor_shape, + initializer=wt_init) + self.lr_bias = tf.get_variable("learning_rate_bias", shape=(1,), + initializer=zero_init) + else: + self.lr_weights = tf.zeros_like(self.update_weights) + self.lr_bias = tf.zeros([1, 1]) + + def _initialize_state(self, var): + """Return a dictionary mapping names of state variables to their values.""" + var_vectorized = tf.reshape(var, [-1, 1]) + ndim = var_vectorized.get_shape().as_list()[0] + + state = { + # parameter init tensor is [var_ndim x layer0_cell_size] + "parameter": tf.ones([ndim, 1]) * self.init_vectors[0], + "scl_decay": tf.zeros_like(var_vectorized), + "inp_decay": tf.zeros_like(var_vectorized), + "true_param": var, + } + + if self.num_layers > 1: + # layer init tensor is [1 x layer1_cell_size] + state["layer"] = tf.ones([1, 1]) * self.init_vectors[1] + + if self.dynamic_output_scale: + min_lr = self.init_lr_range[0] + max_lr = self.init_lr_range[1] + if min_lr == max_lr: + log_init_lr = tf.log(min_lr * tf.ones_like(var_vectorized)) + else: + # Use a random offset to increase the likelihood that the average of the + # LRs for this variable is different from the LRs for other variables. + actual_vals = tf.random_uniform(var_vectorized.get_shape().as_list(), + np.log(min_lr) / 2., + np.log(max_lr) / 2., + seed=self.random_seed) + offset = tf.random_uniform((), np.log(min_lr) / 2., np.log(max_lr) / 2., + seed=self.random_seed) + log_init_lr = actual_vals + offset + # Clip the log learning rate to the flag at the top end, and to + # (log(min int32) - 1) at the bottom + clipped = tf.clip_by_value(log_init_lr, -33, self.max_log_lr) + state["log_learning_rate"] = clipped + + for i in range(self.num_gradient_scales): + state["grad_accum{}".format(i + 1)] = tf.zeros_like(var_vectorized) + state["ms{}".format(i + 1)] = tf.zeros_like(var_vectorized) + + return state + + def _initialize_global_state(self): + if self.num_layers < 3: + return [] + rnn_global_init = tf.ones([1, 1]) * self.init_vectors[2] + return [rnn_global_init] + + def _compute_updates(self, params, grads, states, global_state): + # Store the updated parameters and states. + updated_params = [] + updated_attention = [] + updated_states = [] + + with tf.variable_scope(opt.OPTIMIZER_SCOPE): + + mean_log_lr = self._compute_mean_log_lr(states) + + # Iterate over the layers. + for param, grad_unflat, state in zip(params, grads, states): + + with tf.variable_scope("PerTensor", reuse=self.reuse_vars): + self.reuse_vars = True + grad = tf.reshape(grad_unflat, [-1, 1]) + + # Create the RNN input. We will optionally extend it with additional + # features such as curvature and gradient signal vs. noise. + (grads_scaled, mean_squared_gradients, + grads_accum) = self._compute_scaled_and_ms_grads(grad, state) + rnn_input = [g for g in grads_scaled] + + self._extend_rnn_input(rnn_input, state, grads_scaled, + mean_squared_gradients, mean_log_lr) + + # Concatenate any features we've collected. + rnn_input_tensor = tf.concat(rnn_input, 1) + + layer_state, new_param_state = self._update_rnn_cells( + state, global_state, rnn_input_tensor, + len(rnn_input) != len(grads_scaled)) + + (scl_decay, inp_decay, new_log_lr, update_step, lr_attend, + attention_delta) = self._compute_rnn_state_projections( + state, new_param_state, grads_scaled) + + # Apply updates and store state variables. + if self.use_attention: + truth = state["true_param"] + updated_param = truth - update_step + attention_step = tf.reshape(lr_attend * attention_delta, + truth.get_shape()) + updated_attention.append(truth - attention_step) + else: + updated_param = param - update_step + updated_attention.append(updated_param) + updated_params.append(updated_param) + + # Collect the new state. + new_state = { + "parameter": new_param_state, + "scl_decay": scl_decay, + "inp_decay": inp_decay, + "true_param": updated_param, + } + if layer_state is not None: + new_state["layer"] = layer_state + + if self.dynamic_output_scale: + new_state["log_learning_rate"] = new_log_lr + + for i in range(self.num_gradient_scales): + new_state["grad_accum{}".format(i + 1)] = grads_accum[i] + new_state["ms{}".format(i + 1)] = mean_squared_gradients[i] + updated_states.append(new_state) + + updated_global_state = self._compute_updated_global_state([layer_state], + global_state) + + return (updated_params, updated_states, [updated_global_state], + updated_attention) + + def _compute_mean_log_lr(self, states): + """Computes the mean log learning rate across all variables.""" + if self.use_problem_lr_mean and self.use_relative_lr: + + sum_log_lr = 0. + count_log_lr = 0. + for state in states: + sum_log_lr += tf.reduce_sum(state["log_learning_rate"]) + # Note: get_shape().num_elements()=num elements in the original tensor. + count_log_lr += state["log_learning_rate"].get_shape().num_elements() + return sum_log_lr / count_log_lr + + def _compute_scaled_and_ms_grads(self, grad, state): + """Computes the scaled gradient and the mean squared gradients. + + Gradients are also accumulated across different timescales if appropriate. + + Args: + grad: The gradient tensor for this layer. + state: The optimizer state for this layer. + + Returns: + The scaled gradients, mean squared gradients, and accumulated gradients. + """ + input_decays = [state["inp_decay"]] + scale_decays = [state["scl_decay"]] + if self.use_multiple_scale_decays and self.num_gradient_scales > 1: + for i in range(self.num_gradient_scales - 1): + scale_decays.append(tf.sqrt(scale_decays[i])) + + for i in range(self.num_gradient_scales - 1): + # Each accumulator on twice the timescale of the one before. + input_decays.append(tf.sqrt(input_decays[i])) + grads_accum = [] + grads_scaled = [] + mean_squared_gradients = [] + + # populate the scaled gradients and associated mean_squared values + if self.num_gradient_scales > 0: + for i, decay in enumerate(input_decays): + if self.num_gradient_scales == 1: + # We don't accumulate if no scales, just take the current gradient. + grad_accum = grad + else: + # The state vars are 1-indexed. + old_accum = state["grad_accum{}".format(i + 1)] + grad_accum = grad * (1. - decay) + old_accum * decay + + grads_accum.append(grad_accum) + + sd = scale_decays[i if self.use_multiple_scale_decays else 0] + grad_scaled, ms = utils.rms_scaling(grad_accum, sd, + state["ms{}".format(i + 1)], + update_ms=True) + grads_scaled.append(grad_scaled) + mean_squared_gradients.append(ms) + + return grads_scaled, mean_squared_gradients, grads_accum + + def _extend_rnn_input(self, rnn_input, state, grads_scaled, + mean_squared_gradients, mean_log_lr): + """Computes additional rnn inputs and adds them to the rnn_input list.""" + if self.num_gradient_scales > 1 and self.use_grad_products: + # This gives a measure of curvature relative to input averaging + # lengthscale and to the learning rate + grad_products = [a * b for a, b in + zip(grads_scaled[:-1], grads_scaled[1:])] + rnn_input.extend([g for g in grad_products]) + + if self.use_log_means_squared: + log_means_squared = [tf.log(ms + 1e-16) + for ms in mean_squared_gradients] + + avg = tf.reduce_mean(log_means_squared, axis=0) + # This gives a measure of the signal vs. noise contribution to the + # gradient, at the current averaging lengthscale. If all the noise + # is averaged out, and if updates are small, these will be 0. + mean_log_means_squared = [m - avg for m in log_means_squared] + + rnn_input.extend([m for m in mean_log_means_squared]) + + if self.use_relative_lr or self.use_extreme_indicator: + if not self.dynamic_output_scale: + raise Exception("Relative LR and Extreme Indicator features " + "require dynamic_output_scale to be set to True.") + log_lr_vec = tf.reshape(state["log_learning_rate"], [-1, 1]) + if self.use_relative_lr: + if self.use_problem_lr_mean: + # Learning rate of this dimension vs. rest of target problem. + relative_lr = log_lr_vec - mean_log_lr + else: + # Learning rate of this dimension vs. rest of tensor. + relative_lr = log_lr_vec - tf.reduce_mean(log_lr_vec) + rnn_input.append(relative_lr) + if self.use_extreme_indicator: + # Indicator of extremely large or extremely small learning rate. + extreme_indicator = (tf.nn.relu(log_lr_vec - tf.log(1.)) - + tf.nn.relu(tf.log(1e-6) - log_lr_vec)) + rnn_input.append(extreme_indicator) + + if self.use_lr_shortcut: + log_lr_vec = tf.reshape(state["log_learning_rate"], [-1, 1]) + rnn_input.append(log_lr_vec - tf.log(1e-3)) + + def _update_rnn_cells(self, state, global_state, rnn_input_tensor, + use_additional_features): + """Updates the component RNN cells with the given state and tensor. + + Args: + state: The current state of the optimizer. + global_state: The current global RNN state. + rnn_input_tensor: The input tensor to the RNN. + use_additional_features: Whether the rnn input tensor contains additional + features beyond the scaled gradients (affects whether the rnn input + tensor is used as input to the RNN.) + + Returns: + layer_state: The new state of the per-tensor RNN. + new_param_state: The new state of the per-parameter RNN. + """ + # lowest level (per parameter) + # input -> gradient for this parameter + # bias -> output from the layer RNN + with tf.variable_scope("Layer0_RNN"): + total_bias = None + if self.num_layers > 1: + sz = 3 * self.cells[0].state_size # size of the concatenated bias + param_bias = utils.affine([state["layer"]], sz, + scope="Param/Affine", + scale=FLAGS.hrnn_affine_scale, + random_seed=self.random_seed) + total_bias = param_bias + if self.num_layers == 3: + global_bias = utils.affine(global_state, sz, + scope="Global/Affine", + scale=FLAGS.hrnn_affine_scale, + random_seed=self.random_seed) + total_bias += global_bias + + new_param_state, _ = self.cells[0]( + rnn_input_tensor, state["parameter"], bias=total_bias) + + if self.num_layers > 1: + # middle level (per layer) + # input -> average hidden state from each parameter in this layer + # bias -> output from the RNN at the global level + with tf.variable_scope("Layer1_RNN"): + if not use_additional_features: + # Restore old behavior and only add the mean of the new params. + layer_input = tf.reduce_mean(new_param_state, 0, keep_dims=True) + else: + layer_input = tf.reduce_mean( + tf.concat((new_param_state, rnn_input_tensor), 1), 0, + keep_dims=True) + if self.num_layers == 3: + sz = 3 * self.cells[1].state_size + layer_bias = utils.affine(global_state, sz, + scale=FLAGS.hrnn_affine_scale, + random_seed=self.random_seed) + layer_state, _ = self.cells[1]( + layer_input, state["layer"], bias=layer_bias) + else: + layer_state, _ = self.cells[1](layer_input, state["layer"]) + else: + layer_state = None + + return layer_state, new_param_state + + def _compute_rnn_state_projections(self, state, new_param_state, + grads_scaled): + """Computes the RNN state-based updates to parameters and update steps.""" + # Compute the update direction (a linear projection of the RNN output). + update_weights = self.update_weights + + update_delta = utils.project(new_param_state, update_weights) + if self.use_gradient_shortcut: + # Include an affine projection of just the direction of the gradient + # so that RNN hidden states are freed up to store more complex + # functions of the gradient and other parameters. + grads_scaled_tensor = tf.concat([g for g in grads_scaled], 1) + update_delta += utils.affine(grads_scaled_tensor, 1, + scope="GradsToDelta", + include_bias=False, + vec_mean=1. / len(grads_scaled), + random_seed=self.random_seed) + if self.dynamic_output_scale: + denom = tf.sqrt(tf.reduce_mean(update_delta ** 2) + 1e-16) + + update_delta /= denom + + if self.use_attention: + attention_weights = self.attention_weights + attention_delta = utils.project(new_param_state, + attention_weights) + if self.use_gradient_shortcut: + attention_delta += utils.affine(grads_scaled_tensor, 1, + scope="GradsToAttnDelta", + include_bias=False, + vec_mean=1. / len(grads_scaled), + random_seed=self.random_seed) + if self.dynamic_output_scale: + attention_delta /= tf.sqrt( + tf.reduce_mean(attention_delta ** 2) + 1e-16) + else: + attention_delta = None + + # The updated decay is an affine projection of the hidden state. + scl_decay = utils.project(new_param_state, self.scl_decay_weights, + bias=self.scl_decay_bias, + activation=tf.nn.sigmoid) + # This is only used if learnable_decay and num_gradient_scales > 1 + inp_decay = utils.project(new_param_state, self.inp_decay_weights, + bias=self.inp_decay_bias, + activation=tf.nn.sigmoid) + + # Also update the learning rate. + lr_param, lr_attend, new_log_lr = self._compute_new_learning_rate( + state, new_param_state) + + update_step = tf.reshape(lr_param * update_delta, + state["true_param"].get_shape()) + + return (scl_decay, inp_decay, new_log_lr, update_step, lr_attend, + attention_delta) + + def _compute_new_learning_rate(self, state, new_param_state): + if self.dynamic_output_scale: + # Compute the change in learning rate (an affine projection of the + # RNN state, passed through a sigmoid or log depending on flags). + # Update the learning rate, w/ momentum. + lr_change = utils.project(new_param_state, self.lr_weights, + bias=self.lr_bias) + step_log_lr = state["log_learning_rate"] + lr_change + + # Clip the log learning rate to the flag at the top end, and to + # (log(min int32) - 1) at the bottom + + # Check out this hack: we want to be able to compute the gradient + # of the downstream result w.r.t lr weights and bias, even if the + # value of step_log_lr is outside the clip range. So we clip, + # subtract off step_log_lr, and wrap all that in a stop_gradient so + # TF never tries to take the gradient of the clip... or the + # subtraction. Then we add BACK step_log_lr so that downstream still + # receives the clipped value. But the GRADIENT of step_log_lr will + # be the gradient of the unclipped value, which we added back in + # after stop_gradients. + step_log_lr += tf.stop_gradient( + tf.clip_by_value(step_log_lr, -33, self.max_log_lr) + - step_log_lr) + + lr_momentum_logit = tf.get_variable( + "learning_rate_momentum_logit", + initializer=FLAGS.learning_rate_momentum_logit_init) + lrm = tf.nn.sigmoid(lr_momentum_logit) + new_log_lr = (lrm * state["log_learning_rate"] + + (1. - lrm) * step_log_lr) + param_stepsize_offset = tf.get_variable("param_stepsize_offset", + initializer=-1.) + lr_param = tf.exp(step_log_lr + param_stepsize_offset) + lr_attend = tf.exp(step_log_lr) if self.use_attention else lr_param + else: + # Dynamic output scale is off, LR param is always 1. + lr_param = 2. * utils.project(new_param_state, self.lr_weights, + bias=self.lr_bias, + activation=tf.nn.sigmoid) + new_log_lr = None + lr_attend = lr_param + + return lr_param, lr_attend, new_log_lr + + def _compute_updated_global_state(self, layer_states, global_state): + """Computes the new global state gives the layers states and old state. + + Args: + layer_states: The current layer states. + global_state: The old global state. + + Returns: + The updated global state. + """ + updated_global_state = [] + if self.num_layers == 3: + # highest (global) layer + # input -> average hidden state from each layer-specific RNN + # bias -> None + with tf.variable_scope("Layer2_RNN", reuse=self.reuse_global_state): + self.reuse_global_state = True + global_input = tf.reduce_mean(tf.concat(layer_states, 0), 0, + keep_dims=True) + updated_global_state, _ = self.cells[2](global_input, global_state[0]) + return updated_global_state + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + """Overwrites the tf.train.Optimizer interface for applying gradients.""" + + # Pull out the variables. + grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works. + for g, v in grads_and_vars: + if not isinstance(g, (tf.Tensor, tf.IndexedSlices, type(None))): + raise TypeError( + "Gradient must be a Tensor, IndexedSlices, or None: %s" % g) + if not isinstance(v, tf.Variable): + raise TypeError( + "Variable must be a tf.Variable: %s" % v) + if g is not None: + self._assert_valid_dtypes([g, v]) + var_list = [v for g, v in grads_and_vars if g is not None] + if not var_list: + raise ValueError("No gradients provided for any variable: %s" % + (grads_and_vars,)) + + # Create slots for the variables. + with tf.control_dependencies(None): + self._create_slots(var_list) + + # Store update ops in this list. + with tf.op_scope([], name, self._name) as name: + + # Prepare the global state. + with tf.variable_scope(self._name, reuse=self.reuse_global_state): + gs = self._initialize_global_state() + if gs: + global_state = [tf.get_variable("global_state", initializer=gs[0])] + else: + global_state = [] + + # Get the states for each variable in the list. + states = [{key: self.get_slot(var, key) for key in self.get_slot_names()} + for var in var_list] + + # Compute updated values. + grads, params = zip(*grads_and_vars) + args = (params, grads, states, global_state) + updates = self._compute_updates(*args) + new_params, new_states, new_global_state, new_attention = updates + # Assign op for new global state. + update_ops = [tf.assign(gs, ngs) + for gs, ngs in zip(global_state, new_global_state)] + + # Create the assign ops for the params and state variables. + args = (params, states, new_params, new_attention, new_states) + for var, state, new_var, new_var_attend, new_state in zip(*args): + # Assign updates to the state variables. + state_assign_ops = [tf.assign(state_var, new_state[key]) + for key, state_var in state.items()] + + # Update the parameter. + with tf.control_dependencies(state_assign_ops): + if self.use_attention: + # Assign to the attended location, rather than the actual location + # so that the gradients are computed where attention is. + param_update_op = var.assign(new_var_attend) + else: + param_update_op = var.assign(new_var) + + with tf.name_scope("update_" + var.op.name): #, tf.colocate_with(var): + update_ops.append(param_update_op) + + real_params = [self.get_slot(var, "true_param") for var in var_list] + + if global_step is None: + # NOTE: if using the optimizer in a non-test-optimizer setting (e.g. + # on Inception), remove the real_params return value. Otherwise + # the code will throw an error. + return self._finish(update_ops, name), real_params + else: + with tf.control_dependencies([self._finish(update_ops, "update")]): + return state_ops.assign_add(global_step, 1, name=name).op, real_params diff --git a/models/research/learned_optimizer/optimizer/learning_rate_schedule.py b/models/research/learned_optimizer/optimizer/learning_rate_schedule.py new file mode 100644 index 0000000000000000000000000000000000000000..53db8addd3d152bfa02630ec6e37f0cc1776abc8 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/learning_rate_schedule.py @@ -0,0 +1,60 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A trainable optimizer that learns a learning rate schedule.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from learned_optimizer.optimizer import trainable_optimizer + + +class LearningRateSchedule(trainable_optimizer.TrainableOptimizer): + """Learns a learning rate schedule over a fixed number of iterations.""" + + def __init__(self, initial_rate=0.0, n_steps=1000, **kwargs): + """Initializes the learning rates.""" + self.max_index = tf.constant(n_steps-1, dtype=tf.int32) + + with tf.variable_scope(trainable_optimizer.OPTIMIZER_SCOPE): + initializer = tf.constant_initializer(initial_rate) + self.learning_rates = tf.get_variable("learning_rates", + shape=([n_steps,]), + initializer=initializer) + + super(LearningRateSchedule, self).__init__("LRS", ["itr"], **kwargs) + + def _initialize_state(self, var): + """Return a dictionary mapping names of state variables to their values.""" + return { + "itr": tf.constant(0, dtype=tf.int32), + } + + def _compute_update(self, param, grad, state): + """Compute updates of parameters.""" + + # get the learning rate at the current index, if the index + # is greater than the number of available learning rates, + # use the last one + index = tf.minimum(state["itr"], self.max_index) + learning_rate = tf.gather(self.learning_rates, index) + + # update the parameters: parameter - learning_rate * gradient + updated_param = param - tf.scalar_mul(learning_rate, grad) + + return updated_param, {"itr": state["itr"] + 1} diff --git a/models/research/learned_optimizer/optimizer/rnn_cells.py b/models/research/learned_optimizer/optimizer/rnn_cells.py new file mode 100644 index 0000000000000000000000000000000000000000..3d68de04ca5318bb0f264d4f4647ddbc6fbe08e0 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/rnn_cells.py @@ -0,0 +1,68 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Custom RNN cells for hierarchical RNNs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from learned_optimizer.optimizer import utils + + +class BiasGRUCell(tf.contrib.rnn.RNNCell): + """GRU cell (cf. http://arxiv.org/abs/1406.1078) with an additional bias.""" + + def __init__(self, num_units, activation=tf.tanh, scale=0.1, + gate_bias_init=0., random_seed=None): + self._num_units = num_units + self._activation = activation + self._scale = scale + self._gate_bias_init = gate_bias_init + self._random_seed = random_seed + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + def __call__(self, inputs, state, bias=None): + # Split the injected bias vector into a bias for the r, u, and c updates. + if bias is None: + bias = tf.zeros((1, 3)) + + r_bias, u_bias, c_bias = tf.split(bias, 3, 1) + + with tf.variable_scope(type(self).__name__): # "BiasGRUCell" + with tf.variable_scope("gates"): # Reset gate and update gate. + proj = utils.affine([inputs, state], 2 * self._num_units, + scale=self._scale, bias_init=self._gate_bias_init, + random_seed=self._random_seed) + r_lin, u_lin = tf.split(proj, 2, 1) + r, u = tf.nn.sigmoid(r_lin + r_bias), tf.nn.sigmoid(u_lin + u_bias) + + with tf.variable_scope("candidate"): + proj = utils.affine([inputs, r * state], self._num_units, + scale=self._scale, random_seed=self._random_seed) + c = self._activation(proj + c_bias) + + new_h = u * state + (1 - u) * c + + return new_h, new_h diff --git a/models/research/learned_optimizer/optimizer/trainable_adam.py b/models/research/learned_optimizer/optimizer/trainable_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..638217f1b723da8633dc7a82623392eaaf190829 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/trainable_adam.py @@ -0,0 +1,210 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A trainable ADAM optimizer that learns its internal variables.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from learned_optimizer.optimizer import trainable_optimizer as opt +from learned_optimizer.optimizer import utils + + +class TrainableAdam(opt.TrainableOptimizer): + """Adam optimizer with learnable scalar parameters. + + See Kingma et. al., 2014 for algorithm (http://arxiv.org/abs/1412.6980). + """ + + def __init__(self, + learning_rate=1e-3, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + **kwargs): + """Initializes the TrainableAdam optimizer with the given initial values. + + Args: + learning_rate: The learning rate (default: 1e-3). + beta1: The exponential decay rate for the 1st moment estimates. + beta2: The exponential decay rate for the 2nd moment estimates. + epsilon: A small constant for numerical stability. + **kwargs: Any additional keyword arguments for TrainableOptimizer. + + Raises: + ValueError: if the learning rate or epsilon is not positive + ValueError: if beta1 or beta2 is not in (0, 1). + """ + if learning_rate <= 0: + raise ValueError("Learning rate must be positive.") + if epsilon <= 0: + raise ValueError("Epsilon must be positive.") + if not 0 < beta1 < 1 or not 0 < beta2 < 1: + raise ValueError("Beta values must be between 0 and 1, exclusive.") + + self._reuse_vars = False + + with tf.variable_scope(opt.OPTIMIZER_SCOPE): + def inv_sigmoid(x): + return np.log(x / (1.0 - x)) + + self.log_learning_rate = tf.get_variable( + "log_learning_rate", + shape=[], + initializer=tf.constant_initializer(np.log(learning_rate))) + self.beta1_logit = tf.get_variable( + "beta1_logit", + shape=[], + initializer=tf.constant_initializer(inv_sigmoid(beta1))) + self.beta2_logit = tf.get_variable( + "beta2_logit", + shape=[], + initializer=tf.constant_initializer(inv_sigmoid(beta2))) + self.log_epsilon = tf.get_variable( + "log_epsilon", + shape=[], + initializer=tf.constant_initializer(np.log(epsilon))) + + # Key names are derived from Algorithm 1 described in + # https://arxiv.org/pdf/1412.6980.pdf + state_keys = ["m", "v", "t"] + super(TrainableAdam, self).__init__("Adam", state_keys, **kwargs) + + def _initialize_state(self, var): + """Returns a dictionary mapping names of state variables to their values.""" + vectorized_shape = var.get_shape().num_elements(), 1 + + return {key: tf.zeros(vectorized_shape) for key in self.state_keys} + + def _compute_update(self, param, grad, state): + """Calculates the new internal state and parameters. + + If the gradient is sparse, updates the appropriate slices in the internal + state and stacks the update tensor. + + Args: + param: A tensor of parameters. + grad: A tensor of gradients with the same shape as param. + state: A dictionary containing any state for the optimizer. + + Returns: + updated_param: The updated parameters. + updated_state: The updated state variables in a dictionary. + """ + + with tf.variable_scope(opt.OPTIMIZER_SCOPE) as scope: + + if self._reuse_vars: + scope.reuse_variables() + else: + self._reuse_vars = True + + (grad_values, first_moment, second_moment, timestep, grad_indices + ) = self._extract_gradients_and_internal_state( + grad, state, tf.shape(param)) + + beta1 = tf.nn.sigmoid(self.beta1_logit) + beta2 = tf.nn.sigmoid(self.beta2_logit) + epsilon = tf.exp(self.log_epsilon) + 1e-10 + learning_rate = tf.exp(self.log_learning_rate) + + old_grad_shape = tf.shape(grad_values) + grad_values = tf.reshape(grad_values, [-1, 1]) + + new_timestep = timestep + 1 + new_first_moment = self._update_adam_estimate( + first_moment, grad_values, beta1) + new_second_moment = self._debias_adam_estimate( + second_moment, tf.square(grad_values), beta2) + + debiased_first_moment = self._debias_adam_estimate( + new_first_moment, beta1, new_timestep) + debiased_second_moment = self._debias_adam_estimate( + new_second_moment, beta2, new_timestep) + + # Propagating through the square root of 0 is very bad for stability. + update = (learning_rate * debiased_first_moment / + (tf.sqrt(debiased_second_moment + 1e-10) + epsilon)) + + update = tf.reshape(update, old_grad_shape) + + if grad_indices is not None: + param_shape = tf.shape(param) + update = utils.stack_tensor( + update, grad_indices, param, param_shape[:1]) + new_first_moment = utils.update_slices( + new_first_moment, grad_indices, state["m"], param_shape) + new_second_moment = utils.update_slices( + new_second_moment, grad_indices, state["v"], param_shape) + new_timestep = utils.update_slices( + new_timestep, grad_indices, state["t"], param_shape) + + new_param = param - update + + # collect the update and new state + new_state = { + "m": new_first_moment, + "v": new_second_moment, + "t": new_timestep + } + + return new_param, new_state + + def _update_adam_estimate(self, estimate, value, beta): + """Returns a beta-weighted average of estimate and value.""" + return (beta * estimate) + ((1 - beta) * value) + + def _debias_adam_estimate(self, estimate, beta, t_step): + """Returns a debiased estimate based on beta and the timestep.""" + return estimate / (1 - tf.pow(beta, t_step)) + + def _extract_gradients_and_internal_state(self, grad, state, param_shape): + """Extracts the gradients and relevant internal state. + + If the gradient is sparse, extracts the appropriate slices from the state. + + Args: + grad: The current gradient. + state: The current state. + param_shape: The shape of the parameter (used if gradient is sparse). + + Returns: + grad_values: The gradient value tensor. + first_moment: The first moment tensor (internal state). + second_moment: The second moment tensor (internal state). + timestep: The current timestep (internal state). + grad_indices: The indices for the gradient tensor, if sparse. + None otherwise. + """ + grad_values = grad + grad_indices = None + first_moment = state["m"] + second_moment = state["v"] + timestep = state["t"] + + if isinstance(grad, tf.IndexedSlices): + grad_indices, grad_values = utils.accumulate_sparse_gradients(grad) + first_moment = utils.slice_tensor( + first_moment, grad_indices, param_shape) + second_moment = utils.slice_tensor( + second_moment, grad_indices, param_shape) + timestep = utils.slice_tensor(timestep, grad_indices, param_shape) + + return grad_values, first_moment, second_moment, timestep, grad_indices + diff --git a/models/research/learned_optimizer/optimizer/trainable_optimizer.py b/models/research/learned_optimizer/optimizer/trainable_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..955112a9dd1d3b0af5ae2f5f0fe8eff65d2dbfc7 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/trainable_optimizer.py @@ -0,0 +1,574 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A base class definition for trainable optimizers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import itertools + +import tensorflow as tf + +from tensorflow.python.framework import tensor_shape + +OPTIMIZER_SCOPE = "LOL" +_LOCAL_VARIABLE_PREFIX = "local_state_" +_LOCAL_STATE_VARIABLE_COLLECTION = "local_state_collection" +EPSILON = 1e-6 + + +class TrainableOptimizer(tf.train.Optimizer): + """Base class for trainable optimizers. + + A trainable optimizer is an optimizer that has parameters that can themselves + be learned (meta-optimized). + + Subclasses must implement: + _compute_update(self, param, grad, state) + """ + + def __init__(self, name, state_keys, use_attention=False, + use_log_objective=False, obj_train_max_multiplier=-1, + use_second_derivatives=True, use_numerator_epsilon=False, + **kwargs): + """Initializes the optimizer with the given name and settings. + + Args: + name: The name string for this optimizer. + state_keys: The names of any required state variables (list) + use_attention: Whether this optimizer uses attention (Default: True) + use_log_objective: Whether this optimizer uses the logarithm of the + objective when computing the loss (Default: False) + obj_train_max_multiplier: The maximum multiplier for the increase in the + objective before meta-training is stopped. If <= 0, meta-training is + not stopped early. (Default: -1) + use_second_derivatives: Whether this optimizer uses second derivatives in + meta-training. This should be set to False if some second derivatives + in the meta-training problem set are not defined in Tensorflow. + (Default: True) + use_numerator_epsilon: Whether to use epsilon in the numerator when + scaling the problem objective during meta-training. (Default: False) + **kwargs: Any additional keyword arguments. + """ + self.use_second_derivatives = use_second_derivatives + self.state_keys = sorted(state_keys) + self.use_attention = use_attention + self.use_log_objective = use_log_objective + self.obj_train_max_multiplier = obj_train_max_multiplier + self.use_numerator_epsilon = use_numerator_epsilon + + use_locking = False + super(TrainableOptimizer, self).__init__(use_locking, name) + + def _create_slots(self, var_list): + """Creates all slots needed by the variables. + + Args: + var_list: A list of `Variable` objects. + """ + for var in var_list: + init_states = self._initialize_state(var) + for slot_name in sorted(init_states): + slot_var_name = "{}_{}".format(self.get_name(), slot_name) + value = init_states[slot_name] + self._get_or_make_slot(var, value, slot_name, slot_var_name) + + def _initialize_state(self, var): + """Initializes any state required for this variable. + + Args: + var: a tensor containing parameters to be optimized + + Returns: + state: a dictionary mapping state keys to initial state values (tensors) + """ + return {} + + def _initialize_global_state(self): + """Initializes any global state values.""" + return [] + + def _apply_common(self, grad, var): + """Applies the optimizer updates to the variables. + + Note: this should only get called via _apply_dense or _apply_sparse when + using the optimizer via optimizer.minimize or optimizer.apply_gradients. + During meta-training, the optimizer.train function should be used to + construct an optimization path that is differentiable. + + Args: + grad: A tensor representing the gradient. + var: A tf.Variable with the same shape as grad. + + Returns: + update_op: A tensorflow op that assigns new values to the variable, and + also defines dependencies that update the state variables for the + optimizer. + """ + state = {key: self.get_slot(var, key) for key in self.get_slot_names()} + new_var, new_state = self._compute_update(var, grad, state) + state_assign_ops = [tf.assign(state_var, new_state[key]) + for key, state_var in state.items()] + with tf.control_dependencies(state_assign_ops): + update_op = var.assign(new_var) + + return update_op + + def _apply_dense(self, grad, var): + """Adds ops to apply dense gradients to 'var'.""" + return self._apply_common(grad, var) + + def _apply_sparse(self, grad, var): + """Adds ops to apply sparse gradients to 'var'.""" + return self._apply_common(grad, var) + + def _compute_update(self, param, grad, state): + """Computes the update step for optimization. + + Args: + param: A tensor of parameters to optimize. + grad: The gradient tensor of the objective with respect to the parameters. + (It has the same shape as param.) + state: A dictionary containing any extra state required by the optimizer. + + Returns: + updated_params: The updated parameters. + updated_state: The dictionary of updated state variable(s). + """ + raise NotImplementedError + + def _compute_updates(self, params, grads, states, global_state): + """Maps the compute update functions for each parameter. + + This function can be overriden by a subclass if the subclass wants to + combine information across the different parameters in the list. + + Args: + params: A list of parameter tensors. + grads: A list of gradients corresponding to each parameter. + states: A list of state variables corresponding to each parameter. + global_state: A list of global state variables for the problem. + + Returns: + new_params: The updated parameters. + new_states: The updated states. + new_global_state: The updated global state. + attention_params: A list of attention parameters. This is the same as + new_params if the optimizer does not use attention. + """ + # Zip up the arguments to _compute_update. + args = zip(params, grads, states) + + # Call compute_update on each set of parameter/gradient/state args. + new_params, new_states = zip(*list( + itertools.starmap(self._compute_update, args))) + + # Global state is unused in the basic case, just pass it through. + return list(new_params), list(new_states), global_state, list(new_params) + + def train(self, problem, dataset): + """Creates graph operations to train the optimizer. + + Args: + problem: A problem_generator.Problem instance to train on. + dataset: A datasets.Dataset tuple to use when training. + + Returns: + meta_objective: A tensorflow operation for computing the meta-objective + obj_weights: A tensor placeholder for feeding in the objective weights + obj_values: The subproblem objective values during optimization + batches: The batch indexes tensor for overriding with feed_dict + first_unroll: A placeholder signifying if this is a first unroll + (this will propagate the gradients slightly differently). + reset_state: A placeholder signifying that the rnn state should be reset. + output_state: The final state of the optimizer + init_loop_vars_to_override: Local variables that can be assigned to + propagate the optimizer and problem state for unrolling + final_loop_vals: Final values of the loop variables that can be + assigned to init_loop_vars_to_override. + """ + + # Placeholder for the objective weights + obj_weights = tf.placeholder(tf.float32) + num_iter = tf.shape(obj_weights)[0] + + # Unpack the dataset and generate the minibatches for training + data, labels = dataset + # Convert the ndarrays to tensors so we can pass them back in via feed_dict + data = tf.constant(data) + labels = tf.constant(labels) + batches = tf.placeholder(tf.int32) + first_unroll = tf.placeholder_with_default(False, []) + reset_state = tf.placeholder_with_default(False, []) + + training_output = collections.namedtuple("TrainingOutput", + ["metaobj", + "obj_weights", + "problem_objectives", + "initial_obj", + "batches", + "first_unroll", + "reset_state", + "output_state", + "init_loop_vars", + "output_loop_vars"]) + + def loop_body(itr, obj_accum, params, attend_params, flattened_states, + global_state, all_obj, unused_init_obj, data, + labels, batches): + """Body of the meta-training while loop for optimizing a sub-problem. + + Args: + itr: The current meta-training iteration. + obj_accum: The accumulated objective over all training steps so far. + params: The parameters of the sub-problem. + attend_params: The parameters of the sub-problems at the attended + location. + flattened_states: The states of the trainable optimizer, sorted and + flattened into a list (since a while loop can't handle nested lists + or dictionaries). + global_state: The global state of the optimizer. + all_obj: The list of all objective values in the training process. + unused_init_obj: The initial objective (unused here, but needed in the + variable list because it's used in a stopping condition in the + loop_cond.) + data: The data for this problem. + labels: The labels corresponding to the data. + batches: The batch indexes needed for shuffled minibatch creation. + + Returns: + itr: The updated meta-training iteration. + obj_accum: The updated accumulated objective. + params: The new parameters of the sub-problem. + attend_params: The new parameters of the sub-problems at the attended + location. + flattened_states: The new states of the trainable optimizer. + global_state: The updated global state. + all_obj: The updates list of all objective values. + unused_init_obj: The initial objective. + data: The data for this problem. + labels: The labels corresponding to the data. + batches: The batch indexes needed for shuffled minibatch creation. + """ + batch_indices = tf.gather(batches, itr) + batch_data = tf.gather(data, batch_indices) + batch_labels = tf.gather(labels, batch_indices) + + # Compute the objective over the entire dataset (full batch). + obj = problem.objective(params, data, labels) + + # Compute the gradients on just the current batch + if self.use_attention: + current_obj = problem.objective(attend_params, batch_data, batch_labels) + grads = problem.gradients(current_obj, attend_params) + else: + current_obj = problem.objective(params, batch_data, batch_labels) + grads = problem.gradients(current_obj, params) + + if not self.use_second_derivatives: + new_grads = [] + for grad in grads: + if isinstance(grad, tf.IndexedSlices): + new_grads.append( + tf.IndexedSlices(tf.stop_gradient(grad.values), grad.indices)) + else: + new_grads.append(tf.stop_gradient(grad)) + grads = new_grads + + # store the objective value for the entire problem at each iteration + all_obj = tf.concat([all_obj, tf.reshape(obj, (1,))], 0) + + # accumulate the weighted objective for the entire dataset + acc = tf.gather(obj_weights, itr) * obj + + obj_accum = tf.add(obj_accum, acc) + # Set the shape to keep the shape invariant for obj_accum. Without this, + # the graph builder thinks the tensor shape is unknown on the 2nd iter. + obj_accum.set_shape([]) + + # convert flattened_states to dictionaries + dict_states = [dict(zip(self.state_keys, flat_state)) + for flat_state in flattened_states] + + # compute the new parameters and states + args = (params, grads, dict_states, global_state) + updates = self._compute_updates(*args) + new_params, new_states, new_global_state, new_attend_params = updates + + # flatten the states + new_flattened_states = map(flatten_and_sort, new_states) + + return [itr + 1, obj_accum, new_params, new_attend_params, + new_flattened_states, new_global_state, all_obj, unused_init_obj, + data, labels, batches] + + def loop_cond(itr, obj_accum, unused_params, unused_attend_params, + unused_flattened_states, unused_global_state, all_obj, + init_obj, *args): + """Termination conditions of the sub-problem optimization loop.""" + del args # unused + + cond1 = tf.less(itr, num_iter) # We've run < num_iter times + cond2 = tf.is_finite(obj_accum) # The objective is still finite + + if self.obj_train_max_multiplier > 0: + current_obj = tf.gather(all_obj, itr) + # Account for negative init_obj too + max_diff = (self.obj_train_max_multiplier - 1) * tf.abs(init_obj) + max_obj = init_obj + max_diff + # The objective is a reasonable multiplier of the original objective + cond3 = tf.less(current_obj, max_obj) + + return tf.logical_and(tf.logical_and(cond1, cond2), cond3, + name="training_loop_cond") + else: + return tf.logical_and(cond1, cond2, name="training_loop_cond") + + init = self._initialize_training_loop_parameters( + problem, data, labels, batches, first_unroll, reset_state) + loop_vars, invariants, initial_obj, init_loop_vars_to_override = init + + loop_output = tf.while_loop(loop_cond, loop_body, loop_vars, + swap_memory=True, shape_invariants=invariants) + meta_obj, problem_objectives = loop_output[1], loop_output[6] + + # The meta objective is normalized by the initial objective at the start of + # the series of partial unrolls. + scaled_meta_objective = self.scale_objective( + meta_obj, problem_objectives, initial_obj) + + final_loop_vals = ( + [initial_obj] + loop_output[2] + loop_output[3] + loop_output[5]) + final_loop_vals.extend(itertools.chain(*loop_output[4])) + + return training_output(scaled_meta_objective, + obj_weights, + problem_objectives, + initial_obj, + batches, + first_unroll, + reset_state, + loop_output[4], + init_loop_vars_to_override, + final_loop_vals) + + def _initialize_training_loop_parameters( + self, problem, data, labels, batches, first_unroll, reset_state): + """Initializes the vars and params needed for the training process. + + Args: + problem: The problem being optimized. + data: The data for the problem. + labels: The corresponding labels for the data. + batches: The indexes needed to create shuffled batches of the data. + first_unroll: Whether this is the first unroll in a partial unrolling. + reset_state: Whether RNN state variables should be reset. + + Returns: + loop_vars: The while loop variables for training. + invariants: The corresponding variable shapes (required by while loop). + initial_obj: The initial objective (used later for scaling). + init_loop_vars_to_override: The loop vars that can be overridden when + performing training via partial unrolls. + """ + # Extract these separately so we don't have to make inter-variable + # dependencies. + initial_tensors = problem.init_tensors() + + return_initial_tensor_values = first_unroll + initial_params_vars, initial_params = local_state_variables( + initial_tensors, return_initial_tensor_values) + initial_attend_params_vars, initial_attend_params = local_state_variables( + initial_tensors, return_initial_tensor_values) + # Recalculate the initial objective for the list on each partial unroll with + # the new initial_params. initial_obj holds the value from the very first + # unroll. + initial_obj_init = problem.objective(initial_params, data, labels) + return_initial_obj_init = first_unroll + [initial_obj_var], [initial_obj] = local_state_variables( + [initial_obj_init], return_initial_obj_init) + + # Initialize the loop variables. + initial_itr = tf.constant(0, dtype=tf.int32) + initial_meta_obj = tf.constant(0, dtype=tf.float32) + # N.B. the use of initial_obj_init here rather than initial_obj + initial_problem_objectives = tf.reshape(initial_obj_init, (1,)) + + # Initialize the extra state. + initial_state_vars = [] + initial_state = [] + state_shapes = [] + return_initial_state_values = reset_state + for param in initial_tensors: + param_state_vars, param_state = local_state_variables( + flatten_and_sort(self._initialize_state(param)), + return_initial_state_values) + + initial_state_vars.append(param_state_vars) + initial_state.append(param_state) + state_shapes.append([f.get_shape() for f in param_state]) + + # Initialize any global (problem-level) state. + initial_global_state_vars, initial_global_state = local_state_variables( + self._initialize_global_state(), return_initial_state_values) + + global_shapes = [] + for item in initial_global_state: + global_shapes.append(item.get_shape()) + + # build the list of loop variables: + loop_vars = [ + initial_itr, + initial_meta_obj, + initial_params, # Local variables. + initial_attend_params, # Local variables. + initial_state, # Local variables. + initial_global_state, # Local variables. + initial_problem_objectives, + initial_obj, # Local variable. + data, + labels, + batches, + ] + + invariants = [ + initial_itr.get_shape(), + initial_meta_obj.get_shape(), + [t.get_shape() for t in initial_params], + [t.get_shape() for t in initial_attend_params], + state_shapes, + global_shapes, + tensor_shape.TensorShape([None]), # The problem objectives list grows + initial_obj.get_shape(), + tensor_shape.unknown_shape(), # Placeholder shapes are unknown + tensor_shape.unknown_shape(), + tensor_shape.unknown_shape(), + ] + + # Initialize local variables that we will override with final tensors at the + # next iter. + init_loop_vars_to_override = ( + [initial_obj_var] + initial_params_vars + initial_attend_params_vars + + initial_global_state_vars) + init_loop_vars_to_override.extend(itertools.chain(*initial_state_vars)) + + return loop_vars, invariants, initial_obj, init_loop_vars_to_override + + def scale_objective(self, total_obj, all_objs, initial_obj, + obj_scale_eps=1e-6): + """Normalizes the objective based on the initial objective value. + + Args: + total_obj: The total accumulated objective over the training run. + all_objs: A list of all the individual objectives over the training run. + initial_obj: The initial objective value. + obj_scale_eps: The epsilon value to use in computations for stability. + + Returns: + The scaled objective as a single value. + """ + if self.use_log_objective: + if self.use_numerator_epsilon: + scaled_problem_obj = ((all_objs + obj_scale_eps) / + (initial_obj + obj_scale_eps)) + log_scaled_problem_obj = tf.log(scaled_problem_obj) + else: + scaled_problem_obj = all_objs / (initial_obj + obj_scale_eps) + log_scaled_problem_obj = tf.log(scaled_problem_obj + obj_scale_eps) + return tf.reduce_mean(log_scaled_problem_obj) + else: + return total_obj / (initial_obj + obj_scale_eps) + + +def local_state_variables(init_values, return_init_values): + """Create local variables initialized from init_values. + + This will create local variables from a list of init_values. Each variable + will be named based on the value's shape and dtype. + + As a convenience, a boolean tensor allows you to return value from + the created local variable or from the original init value. + + Args: + init_values: iterable of tensors + return_init_values: boolean tensor + + Returns: + local_vars: list of the created local variables. + vals: if return_init_values is true, then this returns the values of + init_values. Otherwise it returns the values of the local_vars. + """ + if not init_values: + return [], [] + + # This generates a harmless warning when saving the metagraph. + variable_use_count = tf.get_collection_ref(_LOCAL_STATE_VARIABLE_COLLECTION) + if not variable_use_count: + variable_use_count.append(collections.defaultdict(int)) + variable_use_count = variable_use_count[0] + + local_vars = [] + with tf.variable_scope(OPTIMIZER_SCOPE): + # We can't use the init_value as an initializer as init_value may + # itself depend on some problem variables. This would produce + # inter-variable initialization order dependence which TensorFlow + # sucks at making easy. + for init_value in init_values: + name = create_local_state_variable_name(init_value) + unique_name = name + "_" + str(variable_use_count[name]) + variable_use_count[name] += 1 + # The overarching idea here is to be able to reuse variables between + # different sessions on the same TensorFlow master without errors. By + # uniquifying based on the type and name we mirror the checks made inside + # TensorFlow, while still allowing some memory reuse. Ultimately this is a + # hack due to the broken Session.reset(). + local_vars.append( + tf.get_local_variable( + unique_name, + initializer=tf.zeros( + init_value.get_shape(), dtype=init_value.dtype))) + + # It makes things a lot simpler if we use the init_value the first + # iteration, instead of the variable itself. It allows us to propagate + # gradients through it as well as simplifying initialization. The variable + # ends up assigned to after the first iteration. + vals = tf.cond(return_init_values, lambda: init_values, lambda: local_vars) + if len(init_values) == 1: + # tf.cond extracts elements from singleton lists. + vals = [vals] + return local_vars, vals + + +def create_local_state_variable_name(tensor): + """Create a name of the variable based on its type and shape.""" + if not tensor.get_shape().is_fully_defined(): + raise ValueError("Need a fully specified shape to create a local variable.") + + return (_LOCAL_VARIABLE_PREFIX + "_".join( + map(str, tensor.get_shape().as_list())) + "_" + tensor.dtype.name) + + +def is_local_state_variable(op): + """Returns if this op is a local state variable created for training.""" + return op.node_def.op in ["Variable", "VariableV2"] and op.name.startswith( + OPTIMIZER_SCOPE + "/" + _LOCAL_VARIABLE_PREFIX) + + +def flatten_and_sort(dictionary): + """Flattens a dictionary into a list of values sorted by the keys.""" + return [dictionary[k] for k in sorted(dictionary.keys())] diff --git a/models/research/learned_optimizer/optimizer/utils.py b/models/research/learned_optimizer/optimizer/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..58744f4cb7919a84ecc8702ff1236e4c0a03f218 --- /dev/null +++ b/models/research/learned_optimizer/optimizer/utils.py @@ -0,0 +1,278 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities and helper functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + + +def make_finite(t, replacement): + """Replaces non-finite tensor values with the replacement value.""" + return tf.where(tf.is_finite(t), t, replacement) + + +def asinh(x): + """Computes the inverse hyperbolic sine function (in tensorflow).""" + return tf.log(x + tf.sqrt(1. + x ** 2)) + + +def affine(inputs, output_size, scope="Affine", scale=0.1, vec_mean=0., + include_bias=True, bias_init=0., random_seed=None): + """Computes an affine function of the inputs. + + Creates or recalls tensorflow variables "Matrix" and "Bias" + to generate an affine operation on the input. + + If the inputs are a list of tensors, they are concatenated together. + + Initial weights for the matrix are drawn from a Gaussian with zero + mean and standard deviation that is the given scale divided by the + square root of the input dimension. Initial weights for the bias are + set to zero. + + Args: + inputs: List of tensors with shape (batch_size, input_size) + output_size: Size (dimension) of the output + scope: Variable scope for these parameters (default: "Affine") + scale: Initial weight scale for the matrix parameters (default: 0.1), + this constant is divided by the sqrt of the input size to get the + std. deviation of the initial weights + vec_mean: The mean for the random initializer + include_bias: Whether to include the bias term + bias_init: The initializer bias (default 0.) + random_seed: Random seed for random initializers. (Default: None) + + Returns: + output: Tensor with shape (batch_size, output_size) + """ + + # Concatenate the input arguments. + x = tf.concat(inputs, 1) + + with tf.variable_scope(scope): + input_size = x.get_shape().as_list()[1] + + sigma = scale / np.sqrt(input_size) + rand_init = tf.random_normal_initializer(mean=vec_mean, stddev=sigma, + seed=random_seed) + + matrix = tf.get_variable("Matrix", [input_size, output_size], + dtype=tf.float32, initializer=rand_init) + + if include_bias: + bias = tf.get_variable("Bias", [output_size], dtype=tf.float32, + initializer=tf.constant_initializer(bias_init, + tf.float32)) + else: + bias = 0. + output = tf.matmul(x, matrix) + bias + + return output + + +def project(inputs, weights, bias=0., activation=tf.identity): + """Computes an affine or linear projection of the inputs. + + Projects the inputs onto the given weight vector and (optionally) + adds a bias and passes the result through an activation function. + + Args: + inputs: matrix of inputs with shape [batch_size, dim] + weights: weight matrix with shape [dim, output_dim] + bias: bias vector with shape [output_dim] (default: 0) + activation: nonlinear activation function (default: tf.identity) + + Returns: + outputs: an op which computes activation(inputs @ weights + bias) + """ + return activation(tf.matmul(inputs, weights) + bias) + + +def new_mean_squared(grad_vec, decay, ms): + """Calculates the new accumulated mean squared of the gradient. + + Args: + grad_vec: the vector for the current gradient + decay: the decay term + ms: the previous mean_squared value + + Returns: + the new mean_squared value + """ + decay_size = decay.get_shape().num_elements() + decay_check_ops = [ + tf.assert_less_equal(decay, 1., summarize=decay_size), + tf.assert_greater_equal(decay, 0., summarize=decay_size)] + + with tf.control_dependencies(decay_check_ops): + grad_squared = tf.square(grad_vec) + + # If the previous mean_squared is the 0 vector, don't use the decay and just + # return the full grad_squared. This should only happen on the first timestep. + decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)), + lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay) + + # Update the running average of squared gradients. + epsilon = 1e-12 + return (1. - decay) * (grad_squared + epsilon) + decay * ms + + +def rms_scaling(gradient, decay, ms, update_ms=True): + """Vectorizes and scales a tensor of gradients. + + Args: + gradient: the current gradient + decay: the current decay value. + ms: the previous mean squared value + update_ms: Whether to update the mean squared value (default: True) + + Returns: + The scaled gradient and the new ms value if update_ms is True, + the old ms value otherwise. + """ + + # Vectorize the gradients and compute the squared gradients. + grad_vec = tf.reshape(gradient, [-1, 1]) + + if update_ms: + ms = new_mean_squared(grad_vec, decay, ms) + + # Scale the current gradients by the RMS, squashed by the asinh function. + scaled_gradient = asinh(grad_vec / tf.sqrt(ms + 1e-16)) + + return scaled_gradient, ms + + +def accumulate_sparse_gradients(grad): + """Accumulates repeated indices of a sparse gradient update. + + Args: + grad: a tf.IndexedSlices gradient + + Returns: + grad_indices: unique indices + grad_values: gradient values corresponding to the indices + """ + + grad_indices, grad_segments = tf.unique(grad.indices) + grad_values = tf.unsorted_segment_sum(grad.values, grad_segments, + tf.shape(grad_indices)[0]) + return grad_indices, grad_values + + +def slice_tensor(dense_tensor, indices, head_dims): + """Extracts slices from a partially flattened dense tensor. + + indices is assumed to index into the first dimension of head_dims. + dense_tensor is assumed to have a shape [D_0, D_1, ...] such that + prod(head_dims) == D_0. This function will extract slices along the + first_dimension of head_dims. + + Example: + + Consider a tensor with shape head_dims = [100, 2] and a dense_tensor with + shape [200, 3]. Note that the first dimension of dense_tensor equals the + product of head_dims. This function will reshape dense_tensor such that + its shape is now [100, 2, 3] (i.e. the first dimension became head-dims) + and then slice it along the first dimension. After slicing, the slices will + have their initial dimensions flattened just as they were in dense_tensor + (e.g. if there are 4 indices, the return value will have a shape of [4, 3]). + + Args: + dense_tensor: a N-D dense tensor. Shape: [D_0, D_1, ...] + indices: a 1-D integer tensor. Shape: [K] + head_dims: True dimensions of the dense_tensor's first dimension. + + Returns: + Extracted slices. Shape [K, D_1, ...] + """ + + tail_dims = tf.shape(dense_tensor)[1:] + dense_tensor = tf.reshape(dense_tensor, + tf.concat([head_dims, tail_dims], 0)) + + slices = tf.gather(dense_tensor, indices) + # NOTE(siege): This kills the shape annotation. + return tf.reshape(slices, tf.concat([[-1], tail_dims], 0)) + + +def stack_tensor(slices, indices, dense_tensor, head_dims): + """Reconsititutes a tensor from slices and corresponding indices. + + This is an inverse operation to slice_tensor. Missing slices are set to 0. + + Args: + slices: a tensor. Shape [K, D_1, ...] + indices: a 1-D integer tensor. Shape: [K] + dense_tensor: the original tensor the slices were taken + from. Shape: [D_0, D_1, ...] + head_dims: True dimensions of the dense_tensor's first dimension. + + Returns: + Reconsituted tensor. Shape: [D_0, D_1, ...] + """ + # NOTE(siege): This cast shouldn't be necessary. + indices = tf.cast(indices, tf.int32) + + tail_dims = tf.shape(dense_tensor)[1:] + dense_shape = tf.concat([head_dims, tail_dims], 0) + + slices = tf.reshape(slices, tf.concat([[-1], dense_shape[1:]], 0)) + indices = tf.expand_dims(indices, -1) + + return tf.reshape(tf.scatter_nd(indices, slices, dense_shape), + tf.shape(dense_tensor)) + + +def update_slices(slices, indices, dense_tensor, head_dims): + """Reconstitutes a tensor from slices and corresponding indices. + + Like _stack_tensor, but instead of setting missing slices to 0, sets them to + what they were in the original tensor. The return value is reshaped to be + the same as dense_tensor. + + Args: + slices: a tensor. Shape [K, D_1, ...] + indices: a 1-D integer tensor. Shape: [K] + dense_tensor: the original tensor the slices were taken + from. Shape: [D_0, D_1, ...] + head_dims: True dimensions of the dense_tensor's first dimension. + + Returns: + Reconsituted tensor. Shape: [D_0, D_1, ...] + """ + # NOTE(siege): This cast shouldn't be necessary. + indices = tf.cast(indices, tf.int32) + + tail_dims = tf.shape(dense_tensor)[1:] + dense_shape = tf.concat([head_dims, tail_dims], 0) + + update_mask_vals = tf.fill(tf.shape(indices), 1) + reshaped_indices = tf.expand_dims(indices, -1) + update_mask = tf.equal( + tf.scatter_nd(reshaped_indices, update_mask_vals, head_dims[:1]), 1) + + reshaped_dense_slices = tf.reshape( + stack_tensor(slices, indices, dense_tensor, head_dims), dense_shape) + reshaped_dense_tensor = tf.reshape(dense_tensor, dense_shape) + + return tf.reshape( + tf.where(update_mask, reshaped_dense_slices, reshaped_dense_tensor), + tf.shape(dense_tensor)) diff --git a/models/research/learned_optimizer/problems/BUILD b/models/research/learned_optimizer/problems/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..c704618821b36ca23f221f724888cde4e5d5a5ad --- /dev/null +++ b/models/research/learned_optimizer/problems/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +# Libraries +# ===== + +py_library( + name = "datasets", + srcs = ["datasets.py"], + deps = [ + ], +) + +py_library( + name = "model_adapter", + srcs = ["model_adapter.py"], + deps = [ + ":problem_generator", + ], +) + +py_library( + name = "problem_generator", + srcs = ["problem_generator.py"], + deps = [ + ":problem_spec", + ], +) + +py_library( + name = "problem_sets", + srcs = ["problem_sets.py"], + deps = [ + ":datasets", + ":model_adapter", + ":problem_generator", + ], +) + +py_library( + name = "problem_spec", + srcs = ["problem_spec.py"], + deps = [], +) diff --git a/models/research/learned_optimizer/problems/datasets.py b/models/research/learned_optimizer/problems/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..edf3df6532178b0e60ab93c78611d2313798e639 --- /dev/null +++ b/models/research/learned_optimizer/problems/datasets.py @@ -0,0 +1,218 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to generate or load datasets for supervised learning.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple + +import numpy as np +from sklearn.datasets import make_classification + +MAX_SEED = 4294967295 + + +class Dataset(namedtuple("Dataset", "data labels")): + """Helper class for managing a supervised learning dataset. + + Args: + data: an array of type float32 with N samples, each of which is the set + of features for that sample. (Shape (N, D_i), where N is the number of + samples and D_i is the number of features for that sample.) + labels: an array of type int32 or int64 with N elements, indicating the + class label for the corresponding set of features in data. + """ + # Since this is an immutable object, we don't need to reserve slots. + __slots__ = () + + @property + def size(self): + """Dataset size (number of samples).""" + return len(self.data) + + def batch_indices(self, num_batches, batch_size): + """Creates indices of shuffled minibatches. + + Args: + num_batches: the number of batches to generate + batch_size: the size of each batch + + Returns: + batch_indices: a list of minibatch indices, arranged so that the dataset + is randomly shuffled. + + Raises: + ValueError: if the data and labels have different lengths + """ + if len(self.data) != len(self.labels): + raise ValueError("Labels and data must have the same number of samples.") + + batch_indices = [] + + # Follows logic in mnist.py to ensure we cover the entire dataset. + index_in_epoch = 0 + dataset_size = len(self.data) + dataset_indices = np.arange(dataset_size) + np.random.shuffle(dataset_indices) + + for _ in range(num_batches): + start = index_in_epoch + index_in_epoch += batch_size + if index_in_epoch > dataset_size: + + # Finished epoch, reshuffle. + np.random.shuffle(dataset_indices) + + # Start next epoch. + start = 0 + index_in_epoch = batch_size + + end = index_in_epoch + batch_indices.append(dataset_indices[start:end].tolist()) + + return batch_indices + + +def noisy_parity_class(n_samples, + n_classes=2, + n_context_ids=5, + noise_prob=0.25, + random_seed=None): + """Returns a randomly generated sparse-to-sparse dataset. + + The label is a parity class of a set of context classes. + + Args: + n_samples: number of samples (data points) + n_classes: number of class labels (default: 2) + n_context_ids: how many classes to take the parity of (default: 5). + noise_prob: how often to corrupt the label (default: 0.25) + random_seed: seed used for drawing the random data (default: None) + Returns: + dataset: A Dataset namedtuple containing the generated data and labels + """ + np.random.seed(random_seed) + x = np.random.randint(0, n_classes, [n_samples, n_context_ids]) + noise = np.random.binomial(1, noise_prob, [n_samples]) + y = (np.sum(x, 1) + noise) % n_classes + return Dataset(x.astype("float32"), y.astype("int32")) + + +def random(n_features, n_samples, n_classes=2, sep=1.0, random_seed=None): + """Returns a randomly generated classification dataset. + + Args: + n_features: number of features (dependent variables) + n_samples: number of samples (data points) + n_classes: number of class labels (default: 2) + sep: separation of the two classes, a higher value corresponds to + an easier classification problem (default: 1.0) + random_seed: seed used for drawing the random data (default: None) + + Returns: + dataset: A Dataset namedtuple containing the generated data and labels + """ + # Generate the problem data. + x, y = make_classification(n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + n_redundant=0, + n_classes=n_classes, + class_sep=sep, + random_state=random_seed) + + return Dataset(x.astype("float32"), y.astype("int32")) + + +def random_binary(n_features, n_samples, random_seed=None): + """Returns a randomly generated dataset of binary values. + + Args: + n_features: number of features (dependent variables) + n_samples: number of samples (data points) + random_seed: seed used for drawing the random data (default: None) + + Returns: + dataset: A Dataset namedtuple containing the generated data and labels + """ + random_seed = (np.random.randint(MAX_SEED) if random_seed is None + else random_seed) + np.random.seed(random_seed) + + x = np.random.randint(2, size=(n_samples, n_features)) + y = np.zeros((n_samples, 1)) + + return Dataset(x.astype("float32"), y.astype("int32")) + + +def random_symmetric(n_features, n_samples, random_seed=None): + """Returns a randomly generated dataset of values and their negatives. + + Args: + n_features: number of features (dependent variables) + n_samples: number of samples (data points) + random_seed: seed used for drawing the random data (default: None) + + Returns: + dataset: A Dataset namedtuple containing the generated data and labels + """ + random_seed = (np.random.randint(MAX_SEED) if random_seed is None + else random_seed) + np.random.seed(random_seed) + + x1 = np.random.normal(size=(int(n_samples/2), n_features)) + x = np.concatenate((x1, -x1), axis=0) + y = np.zeros((n_samples, 1)) + + return Dataset(x.astype("float32"), y.astype("int32")) + + +def random_mlp(n_features, n_samples, random_seed=None, n_layers=6, width=20): + """Returns a generated output of an MLP with random weights. + + Args: + n_features: number of features (dependent variables) + n_samples: number of samples (data points) + random_seed: seed used for drawing the random data (default: None) + n_layers: number of layers in random MLP + width: width of the layers in random MLP + + Returns: + dataset: A Dataset namedtuple containing the generated data and labels + """ + random_seed = (np.random.randint(MAX_SEED) if random_seed is None + else random_seed) + np.random.seed(random_seed) + + x = np.random.normal(size=(n_samples, n_features)) + y = x + n_in = n_features + scale_factor = np.sqrt(2.) / np.sqrt(n_features) + for _ in range(n_layers): + weights = np.random.normal(size=(n_in, width)) * scale_factor + y = np.dot(y, weights).clip(min=0) + n_in = width + + y = y[:, 0] + y[y > 0] = 1 + + return Dataset(x.astype("float32"), y.astype("int32")) + + +EMPTY_DATASET = Dataset(np.array([], dtype="float32"), + np.array([], dtype="int32")) diff --git a/models/research/learned_optimizer/problems/model_adapter.py b/models/research/learned_optimizer/problems/model_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..8455992366dd46172e2a78471004779b1a4f091b --- /dev/null +++ b/models/research/learned_optimizer/problems/model_adapter.py @@ -0,0 +1,190 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Implementation of the ModelAdapter class.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mock +import tensorflow as tf + +from learned_optimizer.problems import problem_generator as pg + + +class ModelAdapter(pg.Problem): + """Adapts Tensorflow models/graphs into a form suitable for meta-training. + + This class adapts an existing TensorFlow graph into a form suitable for + meta-training a learned optimizer. + """ + + def __init__(self, make_loss_and_init_fn): + """Wraps a model in the Problem interface. + + make_loss_and_init argument is a callable that returns a tuple of + two other callables as follows. + + The first will construct most of the graph and return the problem loss. It + is essential that this graph contains the totality of the model's variables, + but none of its queues. + + The second will return construct the model initialization graph given a list + of parameters and return a callable that is passed an instance of + tf.Session, and should initialize the models' parameters. + + An argument value function would look like this: + + ```python + def make_loss_and_init_fn(): + inputs = queued_reader() + + def make_loss(): + return create_model_with_variables(inputs) + + def make_init_fn(parameters): + saver = tf.Saver(parameters) + def init_fn(sess): + sess.restore(sess, ...) + return init_fn + + return make_loss, make_init_fn + ``` + + Args: + make_loss_and_init_fn: a callable, as described aboce + """ + make_loss_fn, make_init_fn = make_loss_and_init_fn() + + self.make_loss_fn = make_loss_fn + self.parameters, self.constants = _get_variables(make_loss_fn) + + if make_init_fn is not None: + init_fn = make_init_fn(self.parameters + self.constants) + else: + init_op = tf.initialize_variables(self.parameters + self.constants) + init_fn = lambda sess: sess.run(init_op) + + tf.logging.info("ModelAdapter parameters: %s", + [op.name for op in self.parameters]) + tf.logging.info("ModelAdapter constants: %s", + [op.name for op in self.constants]) + + super(ModelAdapter, self).__init__( + [], random_seed=None, noise_stdev=0.0, init_fn=init_fn) + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return self.parameters + + def init_variables(self, seed=None): + """Returns a list of variables with the given shape.""" + # NOTE(siege): This is awkward, as these are not set as trainable. + return self.parameters + + def objective(self, parameters, data=None, labels=None): + """Computes the objective given a list of parameters. + + Args: + parameters: The parameters to optimize (as a list of tensors) + data: An optional batch of data for calculating objectives + labels: An optional batch of corresponding labels + + Returns: + A scalar tensor representing the objective value + """ + # We need to set up a mapping based on the original parameter names, because + # the parameters passed can be arbitrary tensors. + parameter_mapping = { + old_p.name: p + for old_p, p in zip(self.parameters, parameters) + } + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + return _make_with_custom_variables(self.make_loss_fn, parameter_mapping) + + +def _get_variables(func): + """Calls func, returning any variables created. + + The created variables are modified to not be trainable, and are placed into + the LOCAL_VARIABLES collection. + + Args: + func: Function to be called. + + Returns: + A tuple (variables, constants) where the first element is a list of + trainable variables and the second is the non-trainable variables. + """ + variables = [] + constants = [] + + # We need to create these variables like normal, so grab the original + # constructor before we mock it. + original_init = tf.Variable.__init__ + + def custom_init(self, *args, **kwargs): + trainable = kwargs["trainable"] + kwargs["trainable"] = False + # Making these variables local keeps them out of the optimizer's checkpoints + # somehow. + kwargs["collections"] = [tf.GraphKeys.LOCAL_VARIABLES] + original_init(self, *args, **kwargs) + if trainable: + variables.append(self) + else: + constants.append(self) + + # This name-scope is just a nicety for TensorBoard. + with tf.name_scope("unused_graph"): + with mock.patch.object(tf.Variable, "__init__", custom_init): + func() + + return variables, constants + + +def _make_with_custom_variables(func, variable_mapping): + """Calls func and replaces the value of some variables created in it. + + Args: + func: Function to be called. + variable_mapping: A mapping of variable name to the replacement tensor or + tf.Variable. + + Returns: + The return value of func is returned. + """ + original_value = tf.Variable.value + + def custom_value(self): + if self.name in variable_mapping: + replacement = variable_mapping[self.name] + tf.logging.info("Replaced %s with %s" % (self.name, replacement)) + + # value() method needs to return a tensor, we need to call value on it. + # This has to be done manually like this otherwise we'll get an infinite + # loop. + if isinstance(replacement, tf.Variable): + replacement = original_value(replacement) + + return replacement + else: + return original_value(self) + + with mock.patch.object(tf.Variable, "value", custom_value): + with mock.patch.object(tf.Variable, "_AsTensor", custom_value): + return func() diff --git a/models/research/learned_optimizer/problems/problem_generator.py b/models/research/learned_optimizer/problems/problem_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..abe1008faadbb04163bc27e0b991e3ec4ba9e6bc --- /dev/null +++ b/models/research/learned_optimizer/problems/problem_generator.py @@ -0,0 +1,1016 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generates toy optimization problems. + +This module contains a base class, Problem, that defines a minimal interface +for optimization problems, and a few specific problem types that subclass it. + +Test functions for optimization: http://www.sfu.ca/~ssurjano/optimization.html +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from learned_optimizer.problems import problem_spec as prob_spec + +tf.app.flags.DEFINE_float("l2_reg_scale", 1e-3, + """Scaling factor for parameter value regularization + in softmax classifier problems.""") +FLAGS = tf.app.flags.FLAGS + +EPSILON = 1e-6 +MAX_SEED = 4294967295 +PARAMETER_SCOPE = "parameters" + +_Spec = prob_spec.Spec + + +class Problem(object): + """Base class for optimization problems. + + This defines an interface for optimization problems, including objective and + gradients functions and a feed_generator function that yields data to pass to + feed_dict in tensorflow. + + Subclasses of Problem must (at the minimum) override the objective method, + which computes the objective/loss/cost to minimize, and specify the desired + shape of the parameters in a list in the param_shapes attribute. + """ + + def __init__(self, param_shapes, random_seed, noise_stdev, init_fn=None): + """Initializes a global random seed for the problem. + + Args: + param_shapes: A list of tuples defining the expected shapes of the + parameters for this problem + random_seed: Either an integer (or None, in which case the seed is + randomly drawn) + noise_stdev: Strength (standard deviation) of added gradient noise + init_fn: A function taking a tf.Session object that is used to + initialize the problem's variables. + + Raises: + ValueError: If the random_seed is not an integer and not None + """ + if random_seed is not None and not isinstance(random_seed, int): + raise ValueError("random_seed must be an integer or None") + + # Pick a random seed. + self.random_seed = (np.random.randint(MAX_SEED) if random_seed is None + else random_seed) + + # Store the noise level. + self.noise_stdev = noise_stdev + + # Set the random seed to ensure any random data in the problem is the same. + np.random.seed(self.random_seed) + + # Store the parameter shapes. + self.param_shapes = param_shapes + + if init_fn is not None: + self.init_fn = init_fn + else: + self.init_fn = lambda _: None + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_normal(shape, seed=seed) for shape in self.param_shapes] + + def init_variables(self, seed=None): + """Returns a list of variables with the given shape.""" + with tf.variable_scope(PARAMETER_SCOPE): + params = [tf.Variable(param) for param in self.init_tensors(seed)] + return params + + def objective(self, parameters, data=None, labels=None): + """Computes the objective given a list of parameters. + + Args: + parameters: The parameters to optimize (as a list of tensors) + data: An optional batch of data for calculating objectives + labels: An optional batch of corresponding labels + + Returns: + A scalar tensor representing the objective value + """ + raise NotImplementedError + + def gradients(self, objective, parameters): + """Compute gradients of the objective with respect to the parameters. + + Args: + objective: The objective op (e.g. output of self.objective()) + parameters: A list of tensors (the parameters to optimize) + + Returns: + A list of tensors representing the gradient for each parameter, + returned in the same order as the given list + """ + grads = tf.gradients(objective, list(parameters)) + noisy_grads = [] + + for grad in grads: + if isinstance(grad, tf.IndexedSlices): + noise = self.noise_stdev * tf.random_normal(tf.shape(grad.values)) + new_grad = tf.IndexedSlices(grad.values + noise, grad.indices) + else: + new_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape()) + noisy_grads.append(new_grad) + + return noisy_grads + + +class Quadratic(Problem): + """Optimizes a random quadratic function. + + The objective is: f(x) = (1/2) ||Wx - y||_2^2 + where W is a random Gaussian matrix and y is a random Gaussian vector. + """ + + def __init__(self, ndim, random_seed=None, noise_stdev=0.0): + """Initializes a random quadratic problem.""" + param_shapes = [(ndim, 1)] + super(Quadratic, self).__init__(param_shapes, random_seed, noise_stdev) + + # Generate a random problem instance. + self.w = np.random.randn(ndim, ndim).astype("float32") + self.y = np.random.randn(ndim, 1).astype("float32") + + def objective(self, params, data=None, labels=None): + """Quadratic objective (see base class for details).""" + return tf.nn.l2_loss(tf.matmul(self.w, params[0]) - self.y) + + +class SoftmaxClassifier(Problem): + """Helper functions for supervised softmax classification problems.""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_normal(shape, seed=seed) * 1.2 / np.sqrt(shape[0]) + for shape in self.param_shapes] + + def inference(self, params, data): + """Computes logits given parameters and data. + + Args: + params: List of parameter tensors or variables + data: Batch of features with samples along the first dimension + + Returns: + logits: Un-normalized logits with shape (num_samples, num_classes) + """ + raise NotImplementedError + + def objective(self, params, data, labels): + """Computes the softmax cross entropy. + + Args: + params: List of parameter tensors or variables + data: Batch of features with samples along the first dimension + labels: Vector of labels with the same number of samples as the data + + Returns: + loss: Softmax cross entropy loss averaged over the samples in the batch + + Raises: + ValueError: If the objective is to be computed over >2 classes, because + this operation is broken in tensorflow at the moment. + """ + # Forward pass. + logits = self.inference(params, data) + + # Compute the loss. + l2reg = [tf.reduce_sum(param ** 2) for param in params] + if int(logits.get_shape()[1]) == 2: + labels = tf.cast(labels, tf.float32) + losses = tf.nn.sigmoid_cross_entropy_with_logits( + labels=labels, logits=logits[:, 0]) + else: + raise ValueError("Unable to compute softmax cross entropy for more than" + " 2 classes.") + + return tf.reduce_mean(losses) + tf.reduce_mean(l2reg) * FLAGS.l2_reg_scale + + def argmax(self, logits): + """Samples the most likely class label given the logits. + + Args: + logits: Un-normalized logits with shape (num_samples, num_classes) + + Returns: + predictions: Predicted class labels, has shape (num_samples,) + """ + return tf.cast(tf.argmax(tf.nn.softmax(logits), 1), tf.int32) + + def accuracy(self, params, data, labels): + """Computes the accuracy (fraction of correct classifications). + + Args: + params: List of parameter tensors or variables + data: Batch of features with samples along the first dimension + labels: Vector of labels with the same number of samples as the data + + Returns: + accuracy: Fraction of correct classifications across the batch + """ + predictions = self.argmax(self.inference(params, data)) + return tf.contrib.metrics.accuracy(predictions, tf.cast(labels, tf.int32)) + + +class SoftmaxRegression(SoftmaxClassifier): + """Builds a softmax regression problem.""" + + def __init__(self, n_features, n_classes, activation=tf.identity, + random_seed=None, noise_stdev=0.0): + self.activation = activation + self.n_features = n_features + param_shapes = [(n_features, n_classes), (n_classes,)] + super(SoftmaxRegression, self).__init__(param_shapes, + random_seed, + noise_stdev) + + def inference(self, params, data): + features = tf.reshape(data, (-1, self.n_features)) + return tf.matmul(features, params[0]) + params[1] + + +class SparseSoftmaxRegression(SoftmaxClassifier): + """Builds a sparse input softmax regression problem.""" + + def __init__(self, + n_features, + n_classes, + activation=tf.identity, + random_seed=None, + noise_stdev=0.0): + self.activation = activation + self.n_features = n_features + param_shapes = [(n_classes, n_features), (n_features, n_classes), ( + n_classes,)] + super(SparseSoftmaxRegression, self).__init__(param_shapes, random_seed, + noise_stdev) + + def inference(self, params, data): + all_embeddings, softmax_weights, softmax_bias = params + embeddings = tf.nn.embedding_lookup(all_embeddings, tf.cast(data, tf.int32)) + embeddings = tf.reduce_sum(embeddings, 1) + return tf.matmul(embeddings, softmax_weights) + softmax_bias + + +class OneHotSparseSoftmaxRegression(SoftmaxClassifier): + """Builds a sparse input softmax regression problem. + + This is identical to SparseSoftmaxRegression, but without using embedding + ops. + """ + + def __init__(self, + n_features, + n_classes, + activation=tf.identity, + random_seed=None, + noise_stdev=0.0): + self.activation = activation + self.n_features = n_features + self.n_classes = n_classes + param_shapes = [(n_classes, n_features), (n_features, n_classes), ( + n_classes,)] + super(OneHotSparseSoftmaxRegression, self).__init__(param_shapes, + random_seed, + noise_stdev) + + def inference(self, params, data): + all_embeddings, softmax_weights, softmax_bias = params + num_ids = tf.shape(data)[1] + one_hot_embeddings = tf.one_hot(tf.cast(data, tf.int32), self.n_classes) + one_hot_embeddings = tf.reshape(one_hot_embeddings, [-1, self.n_classes]) + embeddings = tf.matmul(one_hot_embeddings, all_embeddings) + embeddings = tf.reshape(embeddings, [-1, num_ids, self.n_features]) + embeddings = tf.reduce_sum(embeddings, 1) + return tf.matmul(embeddings, softmax_weights) + softmax_bias + + +class FullyConnected(SoftmaxClassifier): + """Builds a multi-layer perceptron classifier.""" + + def __init__(self, n_features, n_classes, hidden_sizes=(32, 64), + activation=tf.nn.sigmoid, random_seed=None, noise_stdev=0.0): + """Initializes an multi-layer perceptron classification problem.""" + # Store the number of features and activation function. + self.n_features = n_features + self.activation = activation + + # Define the network as a list of weight + bias shapes for each layer. + param_shapes = [] + for ix, sz in enumerate(hidden_sizes + (n_classes,)): + + # The previous layer"s size (n_features if input). + prev_size = n_features if ix == 0 else hidden_sizes[ix - 1] + + # Weight shape for this layer. + param_shapes.append((prev_size, sz)) + + # Bias shape for this layer. + param_shapes.append((sz,)) + + super(FullyConnected, self).__init__(param_shapes, random_seed, noise_stdev) + + def inference(self, params, data): + # Flatten the features into a vector. + features = tf.reshape(data, (-1, self.n_features)) + + # Pass the data through the network. + preactivations = tf.matmul(features, params[0]) + params[1] + + for layer in range(2, len(self.param_shapes), 2): + net = self.activation(preactivations) + preactivations = tf.matmul(net, params[layer]) + params[layer + 1] + + return preactivations + + def accuracy(self, params, data, labels): + """Computes the accuracy (fraction of correct classifications). + + Args: + params: List of parameter tensors or variables + data: Batch of features with samples along the first dimension + labels: Vector of labels with the same number of samples as the data + + Returns: + accuracy: Fraction of correct classifications across the batch + """ + predictions = self.argmax(self.activation(self.inference(params, data))) + return tf.contrib.metrics.accuracy(predictions, tf.cast(labels, tf.int32)) + + +class ConvNet(SoftmaxClassifier): + """Builds an N-layer convnet for image classification.""" + + def __init__(self, + image_shape, + n_classes, + filter_list, + activation=tf.nn.relu, + random_seed=None, + noise_stdev=0.0): + # Number of channels, number of pixels in x- and y- dimensions. + n_channels, px, py = image_shape + + # Store the activation. + self.activation = activation + + param_shapes = [] + input_size = n_channels + for fltr in filter_list: + # Add conv2d filters. + param_shapes.append((fltr[0], fltr[1], input_size, fltr[2])) + input_size = fltr[2] + + # Number of units in the final (dense) layer. + self.affine_size = input_size * px * py + + param_shapes.append((self.affine_size, n_classes)) # affine weights + param_shapes.append((n_classes,)) # affine bias + + super(ConvNet, self).__init__(param_shapes, random_seed, noise_stdev) + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_normal(shape, mean=0., stddev=0.01, seed=seed) + for shape in self.param_shapes] + + def inference(self, params, data): + + # Unpack. + w_conv_list = params[:-2] + output_w, output_b = params[-2:] + + conv_input = data + for w_conv in w_conv_list: + layer = tf.nn.conv2d(conv_input, w_conv, strides=[1] * 4, padding="SAME") + output = self.activation(layer) + conv_input = output + + # Flatten. + flattened = tf.reshape(conv_input, (-1, self.affine_size)) + + # Fully connected layer. + return tf.matmul(flattened, output_w) + output_b + + +class Bowl(Problem): + """A 2D quadratic bowl.""" + + def __init__(self, condition_number, angle=0.0, + random_seed=None, noise_stdev=0.0): + assert condition_number > 0, "Condition number must be positive." + + # Define parameter shapes. + param_shapes = [(2, 1)] + super(Bowl, self).__init__(param_shapes, random_seed, noise_stdev) + + self.condition_number = condition_number + self.angle = angle + self._build_matrix(condition_number, angle) + + def _build_matrix(self, condition_number, angle): + """Builds the Hessian matrix.""" + hessian = np.array([[condition_number, 0.], [0., 1.]], dtype="float32") + + # Build the rotation matrix. + rotation_matrix = np.array([ + [np.cos(angle), -np.sin(angle)], + [np.sin(angle), np.cos(angle)] + ]) + + # The objective is 0.5 * || Ax ||_2^2 + # where the data matrix (A) is: sqrt(Hessian).dot(rotation_matrix). + self.matrix = np.sqrt(hessian).dot(rotation_matrix) + + def objective(self, params, data=None, labels=None): + mtx = tf.constant(self.matrix, dtype=tf.float32) + return tf.nn.l2_loss(tf.matmul(mtx, params[0])) + + def surface(self, xlim=5, ylim=5, n=50): + xm, ym = _mesh(xlim, ylim, n) + pts = np.vstack([xm.ravel(), ym.ravel()]) + zm = 0.5 * np.linalg.norm(self.matrix.dot(pts), axis=0) ** 2 + return xm, ym, zm.reshape(n, n) + + +class Problem2D(Problem): + + def __init__(self, random_seed=None, noise_stdev=0.0): + param_shapes = [(2,)] + super(Problem2D, self).__init__(param_shapes, random_seed, noise_stdev) + + def surface(self, n=50, xlim=5, ylim=5): + """Computes the objective surface over a 2d mesh.""" + + # Create a mesh over the given coordinate ranges. + xm, ym = _mesh(xlim, ylim, n) + + with tf.Graph().as_default(), tf.Session() as sess: + + # Ops to compute the objective at every (x, y) point. + x = tf.placeholder(tf.float32, shape=xm.shape) + y = tf.placeholder(tf.float32, shape=ym.shape) + obj = self.objective([[x, y]]) + + # Run the computation. + zm = sess.run(obj, feed_dict={x: xm, y: ym}) + + return xm, ym, zm + + +class Rosenbrock(Problem2D): + """See https://en.wikipedia.org/wiki/Rosenbrock_function. + + This function has a single global minima at [1, 1] + The objective value at this point is zero. + """ + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=-5., maxval=10., seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = (1 - x)**2 + 100 * (y - x**2)**2 + return tf.squeeze(obj) + + +def make_rosenbrock_loss_and_init(device=None): + """A variable-backed version of Rosenbrock problem. + + See the Rosenbrock class for details. + + Args: + device: Where to place the ops of this problem. + + Returns: + A tuple of two callables, first of which creates the loss and the second + creates the parameter initializer function. + """ + def make_rosenbrock_loss(): + with tf.name_scope("optimizee"): + with tf.device(device): + x = tf.get_variable("x", [1]) + y = tf.get_variable("y", [1]) + c = tf.get_variable( + "c", [1], + initializer=tf.constant_initializer(100.0), + trainable=False) + obj = (1 - x)**2 + c * (y - x**2)**2 + return tf.squeeze(obj) + + def make_init_fn(parameters): + with tf.device(device): + init_op = tf.variables_initializer(parameters) + def init_fn(sess): + tf.logging.info("Initializing model parameters.") + sess.run(init_op) + return init_fn + + return make_rosenbrock_loss, make_init_fn + + +class Saddle(Problem2D): + """Loss surface around a saddle point.""" + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = x ** 2 - y ** 2 + return tf.squeeze(obj) + + +class LogSumExp(Problem2D): + """2D function defined by the log of the sum of exponentials.""" + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = tf.log(tf.exp(x + 3. * y - 0.1) + + tf.exp(x - 3. * y - 0.1) + + tf.exp(-x - 0.1) + 1.0) + return tf.squeeze(obj) + + +class Ackley(Problem2D): + """Ackley's function (contains many local minima).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=-32.768, maxval=32.768, seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = (-20 * tf.exp(-0.2 * tf.sqrt(0.5 * (x ** 2 + y ** 2))) - + tf.exp(0.5 * (tf.cos(2 * np.pi * x) + tf.cos(2 * np.pi * y))) + + tf.exp(1.0) + 20.) + return tf.squeeze(obj) + + +class Beale(Problem2D): + """Beale function (a multimodal function with sharp peaks).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=-4.5, maxval=4.5, seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = ((1.5 - x + x * y) ** 2 + + (2.25 - x + x * y ** 2) ** 2 + + (2.625 - x + x * y ** 3) ** 2) + return tf.squeeze(obj) + + +class Booth(Problem2D): + """Booth's function (has a long valley along one dimension).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=-10., maxval=10., seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = (x + 2 * y - 7) ** 2 + (2 * x + y - 5) ** 2 + return tf.squeeze(obj) + + +class StyblinskiTang(Problem2D): + """Styblinski-Tang function (a bumpy function in two dimensions).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=-5., maxval=5., seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + params = tf.split(params[0], 2, axis=0) + obj = 0.5 * tf.reduce_sum([x ** 4 - 16 * x ** 2 + 5 * x + for x in params], 0) + 80. + return tf.squeeze(obj) + + +class Matyas(Problem2D): + """Matyas function (a function with a single global minimum in a valley).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=-10, maxval=10, seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + obj = 0.26 * (x ** 2 + y ** 2) - 0.48 * x * y + return tf.squeeze(obj) + + +class Branin(Problem2D): + """Branin function (a function with three global minima).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + x1 = tf.random_uniform((1,), minval=-5., maxval=10., + seed=seed) + x2 = tf.random_uniform((1,), minval=0., maxval=15., + seed=seed) + return [tf.concat([x1, x2], 0)] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + + # Define some constants. + a = 1. + b = 5.1 / (4. * np.pi ** 2) + c = 5 / np.pi + r = 6. + s = 10. + t = 1 / (8. * np.pi) + + # Evaluate the function. + obj = a * (y - b * x ** 2 + c * x - r) ** 2 + s * (1 - t) * tf.cos(x) + s + return tf.squeeze(obj) + + +class Michalewicz(Problem2D): + """Michalewicz function (has steep ridges and valleys).""" + + def init_tensors(self, seed=None): + """Returns a list of tensors with the given shape.""" + return [tf.random_uniform(shape, minval=0., maxval=np.pi, seed=seed) + for shape in self.param_shapes] + + def objective(self, params, data=None, labels=None): + x, y = tf.split(params[0], 2, axis=0) + m = 5 # Defines how steep the ridges are (larger m => steeper ridges). + obj = 2. - (tf.sin(x) * tf.sin(x ** 2 / np.pi) ** (2 * m) + + tf.sin(y) * tf.sin(2 * y ** 2 / np.pi) ** (2 * m)) + return tf.squeeze(obj) + + +class Rescale(Problem): + """Takes an existing problem, and rescales all the parameters.""" + + def __init__(self, problem_spec, scale=10., noise_stdev=0.0): + self.problem = problem_spec.build() + self.param_shapes = self.problem.param_shapes + self.scale = scale + + super(Rescale, self).__init__(self.param_shapes, random_seed=None, + noise_stdev=noise_stdev) + + def init_tensors(self, seed=None): + params_raw = self.problem.init_tensors(seed=seed) + params = [t * self.scale for t in params_raw] + return params + + def objective(self, params, data=None, labels=None): + params_raw = [t/self.scale for t in params] + + problem_obj = self.problem.objective(params_raw, data, labels) + return problem_obj + + +class SumTask(Problem): + """Takes a list of problems and modifies the objective to be their sum.""" + + def __init__(self, problem_specs, noise_stdev=0.0): + self.problems = [ps.build() for ps in problem_specs] + self.param_shapes = [] + for prob in self.problems: + self.param_shapes += prob.param_shapes + + super(SumTask, self).__init__(self.param_shapes, random_seed=None, + noise_stdev=noise_stdev) + + def init_tensors(self, seed=None): + tensors = [] + for prob in self.problems: + tensors += prob.init_tensors(seed=seed) + return tensors + + def objective(self, params, data=None, labels=None): + obj = 0. + index = 0 + for prob in self.problems: + num_params = len(prob.param_shapes) + obj += prob.objective(params[index:index + num_params]) + index += num_params + return obj + + +class IsotropicQuadratic(Problem): + """An isotropic quadratic problem.""" + + def objective(self, params, data=None, labels=None): + return sum([tf.reduce_sum(param ** 2) for param in params]) + + +class Norm(Problem): + """Takes an existing problem and modifies the objective to be its N-norm.""" + + def __init__(self, ndim, random_seed=None, noise_stdev=0.0, norm_power=2.): + param_shapes = [(ndim, 1)] + super(Norm, self).__init__(param_shapes, random_seed, noise_stdev) + + # Generate a random problem instance. + self.w = np.random.randn(ndim, ndim).astype("float32") + self.y = np.random.randn(ndim, 1).astype("float32") + self.norm_power = norm_power + + def objective(self, params, data=None, labels=None): + diff = tf.matmul(self.w, params[0]) - self.y + exp = 1. / self.norm_power + loss = tf.reduce_sum((tf.abs(diff) + EPSILON) ** self.norm_power) ** exp + return loss + + +class LogObjective(Problem): + """Takes an existing problem and modifies the objective to be its log.""" + + def __init__(self, problem_spec): + self.problem = problem_spec.build() + self.param_shapes = self.problem.param_shapes + + super(LogObjective, self).__init__(self.param_shapes, + random_seed=None, + noise_stdev=0.0) + + def objective(self, params, data=None, labels=None): + problem_obj = self.problem.objective(params, data, labels) + return tf.log(problem_obj + EPSILON) - tf.log(EPSILON) + + +class SparseProblem(Problem): + """Takes a problem and sets gradients to 0 with the given probability.""" + + def __init__(self, + problem_spec, + zero_probability=0.99, + random_seed=None, + noise_stdev=0.0): + self.problem = problem_spec.build() + self.param_shapes = self.problem.param_shapes + self.zero_prob = zero_probability + + super(SparseProblem, self).__init__(self.param_shapes, + random_seed=random_seed, + noise_stdev=noise_stdev) + + def objective(self, parameters, data=None, labels=None): + return self.problem.objective(parameters, data, labels) + + def gradients(self, objective, parameters): + grads = tf.gradients(objective, list(parameters)) + + new_grads = [] + for grad in grads: + mask = tf.greater(self.zero_prob, tf.random_uniform(grad.get_shape())) + zero_grad = tf.zeros_like(grad, dtype=tf.float32) + noisy_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape()) + new_grads.append(tf.where(mask, zero_grad, noisy_grad)) + return new_grads + + +class DependencyChain(Problem): + """A problem in which parameters must be optimized in order. + + A sequence of parameters which all need to be brought to 0, but where each + parameter in the sequence can't be brought to 0 until the preceding one + has been. This should take a long time to optimize, with steady + (or accelerating) progress throughout the entire process. + """ + + def __init__(self, ndim, random_seed=None, noise_stdev=0.): + param_shapes = [(ndim + 1,)] + self.ndim = ndim + super(DependencyChain, self).__init__( + param_shapes, random_seed, noise_stdev) + + def objective(self, params, data=None, labels=None): + terms = params[0][0]**2 + params[0][1:]**2 / (params[0][:-1]**2 + EPSILON) + return tf.reduce_sum(terms) + + +class MinMaxWell(Problem): + """Problem with global min when both the min and max (absolute) params are 1. + + The gradient for all but two parameters (the min and max) is zero. This + should therefore encourage the optimizer to behave sensible even when + parameters have zero gradients, as is common eg for some deep neural nets. + """ + + def __init__(self, ndim, random_seed=None, noise_stdev=0.): + param_shapes = [(ndim,)] + self.ndim = ndim + super(MinMaxWell, self).__init__(param_shapes, random_seed, noise_stdev) + + def objective(self, params, data=None, labels=None): + params_sqr = params[0]**2 + min_sqr = tf.reduce_min(params_sqr) + max_sqr = tf.reduce_max(params_sqr) + epsilon = 1e-12 + + return max_sqr + 1./min_sqr - 2. + epsilon + + +class OutwardSnake(Problem): + """A winding path out to infinity. + + Ideal step length stays constant along the entire path. + """ + + def __init__(self, ndim, random_seed=None, noise_stdev=0.): + param_shapes = [(ndim,)] + self.ndim = ndim + super(OutwardSnake, self).__init__(param_shapes, random_seed, noise_stdev) + + def objective(self, params, data, labels=None): + radius = tf.sqrt(tf.reduce_sum(params[0]**2)) + rad_loss = tf.reduce_sum(1. / (radius + 1e-6) * data[:, 0]) + + sin_dist = params[0][1:] - tf.cos(params[0][:-1]) * np.pi + sin_loss = tf.reduce_sum((sin_dist * data[:, 1:])**2) + + return rad_loss + sin_loss + + +class ProjectionQuadratic(Problem): + """Dataset consists of different directions to probe. Global min is at 0.""" + + def __init__(self, ndim, random_seed=None, noise_stdev=0.): + param_shapes = [(1, ndim)] + super(ProjectionQuadratic, self).__init__( + param_shapes, random_seed, noise_stdev) + + def objective(self, params, data, labels=None): + return tf.reduce_sum((params[0] * data)**2) + + +class SumOfQuadratics(Problem): + + def __init__(self, ndim, random_seed=None, noise_stdev=0.): + param_shapes = [(1, ndim)] + super(SumOfQuadratics, self).__init__( + param_shapes, random_seed, noise_stdev) + + def objective(self, params, data, labels=None): + epsilon = 1e-12 + # Assume dataset is designed so that the global minimum is at params=0. + # Subtract loss at params=0, so that global minimum has objective value + # epsilon (added to avoid floating point issues). + return (tf.reduce_sum((params[0] - data)**2) - tf.reduce_sum(data**2) + + epsilon) + + +class MatMulAlgorithm(Problem): + """A 6-th order polynomial optimization problem. + + This problem is parametrized by n and k. A solution to this problem with + objective value exactly zero defines a matrix multiplication algorithm of + n x n matrices using k multiplications between matrices. When applied + recursively, such an algorithm has complexity O(n^(log_n(k))). + + Given n, it is not known in general which values of k in [n^2, n^3] have a + solution. There is always a solution with k = n^3 (this is the naive + algorithm). + + In the special case n = 2, it is known that there are solutions for k = {7, 8} + but not for k <= 6. For n = 3, it is known that there are exact solutions for + 23 <= k <= 27, and there are asymptotic solutions for k = {21, 22}, but the + other cases are unknown. + + For a given n and k, if one solution exists then infinitely many solutions + exist due to permutation and scaling symmetries in the parameters. + + This is a very hard problem for some values of n and k (e.g. n = 3, k = 21), + but very easy for other values (e.g. n = 2, k = 7). + + For a given n and k, the specific formulation of this problem is as follows. + Let theta_a, theta_b, theta_c be parameter matrices with respective dimensions + [n**2, k], [n**2, k], [k, n**2]. Then for any matrices a, b with shape [n, n], + we can form the matrix c with shape [n, n] via the operation: + ((vec(a) * theta_a) .* (vec(b) * theta_b)) * theta_c = vec(c), (#) + where vec(x) is the operator that flattens a matrix with shape [n, n] into a + row vector with shape [1, n**2], * denotes matrix multiplication and .* + denotes elementwise multiplication. + + This operation, parameterized by theta_a, theta_b, theta_c, is a matrix + multiplication algorithm iff c = a*b for all [n, n] matrices a and b. But + actually it suffices to verify all combinations of one-hot matrices a and b, + of which there are n**4 such combinations. This gives a batch of n**4 matrix + triplets (a, b, c) such that equation (#) must hold for each triplet. We solve + for theta_a, theta_b, theta_c by minimizing the sum of squares of errors + across this batch. + + Finally, theta_c can be computed from theta_a and theta_b. Therefore it + suffices to learn theta_a and theta_b, from which theta_c and therefore the + objective value can be computed. + """ + + def __init__(self, n, k): + assert isinstance(n, int), "n must be an integer" + assert isinstance(k, int), "k must be an integer" + assert n >= 2, "Must have n >= 2" + assert k >= n**2 and k <= n**3, "Must have n**2 <= k <= n**3" + + param_shapes = [(n**2, k), (n**2, k)] # theta_a, theta_b + super(MatMulAlgorithm, self).__init__( + param_shapes, random_seed=None, noise_stdev=0.0) + + self.n = n + self.k = k + + # Build a batch of all combinations of one-hot matrices a, b, and their + # respective products c. Correctness on this batch is a necessary and + # sufficient condition for the algorithm to be valid. The number of matrices + # in {a, b, c}_3d is n**4 and each matrix is n x n. + onehots = np.identity(n**2).reshape(n**2, n, n) + a_3d = np.repeat(onehots, n**2, axis=0) + b_3d = np.tile(onehots, [n**2, 1, 1]) + c_3d = np.matmul(a_3d, b_3d) + + # Convert the batch to 2D Tensors. + self.a = tf.constant(a_3d.reshape(n**4, n**2), tf.float32, name="a") + self.b = tf.constant(b_3d.reshape(n**4, n**2), tf.float32, name="b") + self.c = tf.constant(c_3d.reshape(n**4, n**2), tf.float32, name="c") + + def init_tensors(self, seed=None): + # Initialize params such that the columns of theta_a and theta_b have L2 + # norm 1. + def _param_initializer(shape, seed=None): + x = tf.random_normal(shape, dtype=tf.float32, seed=seed) + return tf.transpose(tf.nn.l2_normalize(tf.transpose(x), 1)) + + return [_param_initializer(shape, seed) for shape in self.param_shapes] + + def objective(self, parameters, data=None, labels=None): + theta_a = parameters[0] + theta_b = parameters[1] + + # Compute theta_c from theta_a and theta_b. + p = tf.matmul(self.a, theta_a) * tf.matmul(self.b, theta_b) + p_trans = tf.transpose(p, name="p_trans") + p_inv = tf.matmul( + tf.matrix_inverse(tf.matmul(p_trans, p)), p_trans, name="p_inv") + theta_c = tf.matmul(p_inv, self.c, name="theta_c") + + # Compute the "predicted" value of c. + c_hat = tf.matmul(p, theta_c, name="c_hat") + + # Compute the loss (sum of squared errors). + loss = tf.reduce_sum((c_hat - self.c)**2, name="loss") + + return loss + + +def matmul_problem_sequence(n, k_min, k_max): + """Helper to generate a sequence of matrix multiplication problems.""" + return [(_Spec(MatMulAlgorithm, (n, k), {}), None, None) + for k in range(k_min, k_max + 1)] + + +def init_fixed_variables(arrays): + with tf.variable_scope(PARAMETER_SCOPE): + params = [tf.Variable(arr.astype("float32")) for arr in arrays] + return params + + +def _mesh(xlim, ylim, n): + """Creates a 2D meshgrid covering the given ranges. + + Args: + xlim: int that defines the desired x-range (-xlim, xlim) + ylim: int that defines the desired y-range (-ylim, ylim) + n: number of points in each dimension of the mesh + + Returns: + xm: 2D array of x-values in the mesh + ym: 2D array of y-values in the mesh + """ + return np.meshgrid(np.linspace(-xlim, xlim, n), + np.linspace(-ylim, ylim, n)) diff --git a/models/research/learned_optimizer/problems/problem_sets.py b/models/research/learned_optimizer/problems/problem_sets.py new file mode 100644 index 0000000000000000000000000000000000000000..eaf9273b87ef69c6b3087330bdf46c8de7107a15 --- /dev/null +++ b/models/research/learned_optimizer/problems/problem_sets.py @@ -0,0 +1,561 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Groups of problems of different types for optimizer training.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from learned_optimizer.problems import datasets +from learned_optimizer.problems import model_adapter +from learned_optimizer.problems import problem_generator as pg +from learned_optimizer.problems import problem_spec + +_Spec = problem_spec.Spec + + +def quadratic_problems(): + return [ + (_Spec(pg.Quadratic, (20,), {}), None, None), + (_Spec(pg.Quadratic, (25,), {}), None, None), + (_Spec(pg.Quadratic, (50,), {}), None, None), + (_Spec(pg.Quadratic, (100,), {}), None, None), + ] + + +# Note: this group contains one non-noisy problem for historical reasons. The +# original training set before the refactor included this set of quadratics. +def quadratic_problems_noisy(): + return [ + (_Spec(pg.Quadratic, (20,), {"noise_stdev": 0.5}), None, None), + (_Spec(pg.Quadratic, (25,), {"noise_stdev": 0.0}), None, None), + (_Spec(pg.Quadratic, (50,), {"noise_stdev": 1.0}), None, None), + (_Spec(pg.Quadratic, (100,), {"noise_stdev": 2.0}), None, None), + ] + + +def quadratic_problems_large(): + return [ + (_Spec(pg.Quadratic, (784,), {}), None, None), + (_Spec(pg.Quadratic, (1024,), {}), None, None), + (_Spec(pg.Quadratic, (2048,), {}), None, None), + ] + + +def bowl_problems(): + return [ + (_Spec(pg.Bowl, (0.1,), {"noise_stdev": 0.0}), None, None), + (_Spec(pg.Bowl, (1.0,), {"noise_stdev": 0.0}), None, None), + (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.0}), None, None), + (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.0, "angle": np.pi / 4.}), + None, None), + ] + + +def bowl_problems_noisy(): + return [ + (_Spec(pg.Bowl, (0.1,), {"noise_stdev": 0.1}), None, None), + (_Spec(pg.Bowl, (1.0,), {"noise_stdev": 0.1}), None, None), + (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.1}), None, None), + (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.1, "angle": np.pi / 4.}), + None, None), + ] + + +def sparse_softmax_2_class_sparse_problems(): + return [(_Spec(pg.SparseSoftmaxRegression, (5, 2), {"noise_stdev": 0.0}), + datasets.noisy_parity_class(5, random_seed=123), 23),] + + +def one_hot_sparse_softmax_2_class_sparse_problems(): + return [ + (_Spec(pg.OneHotSparseSoftmaxRegression, (5, 2), {"noise_stdev": 0.0}), + datasets.noisy_parity_class(5, random_seed=123), 23), + ] + + +def softmax_2_class_problems(): + return [ + (_Spec(pg.SoftmaxRegression, (10, 2), {}), datasets.random( + 10, 1000, random_seed=123, sep=2.0), 100), + (_Spec(pg.SoftmaxRegression, (100, 2), {}), datasets.random( + 100, 1000, random_seed=123), 50), + (_Spec(pg.SoftmaxRegression, (200, 2), {}), datasets.random( + 200, 1000, random_seed=123, sep=1.5), 20), + (_Spec(pg.SoftmaxRegression, (256, 2), {}), datasets.random( + 256, 1000, random_seed=123, sep=1.5), 100), + ] + + +def softmax_2_class_problems_noisy(): + return [ + (_Spec(pg.SoftmaxRegression, (10, 2), {"noise_stdev": 0.5}), + datasets.random(10, 1000, random_seed=123, sep=2.0), 100), + (_Spec(pg.SoftmaxRegression, (100, 2), {"noise_stdev": 0.1}), + datasets.random(100, 1000, random_seed=123), 50), + (_Spec(pg.SoftmaxRegression, (200, 2), {"noise_stdev": 0.1}), + datasets.random(200, 1000, random_seed=123, sep=1.5), 20), + (_Spec(pg.SoftmaxRegression, (256, 2), {"noise_stdev": 0.5}), + datasets.random(256, 1000, random_seed=123, sep=1.5), 100), + ] + + +def optimization_test_problems(): + return [ + (_Spec(pg.Ackley, (), {}), None, None), + (_Spec(pg.Beale, (), {}), None, None), + (_Spec(pg.Booth, (), {}), None, None), + (_Spec(pg.Branin, (), {}), None, None), + (_Spec(pg.LogSumExp, (), {}), None, None), + (_Spec(pg.Matyas, (), {}), None, None), + (_Spec(pg.Michalewicz, (), {}), None, None), + (_Spec(pg.Rosenbrock, (), {}), None, None), + (_Spec(pg.StyblinskiTang, (), {}), None, None), + ] + + +def optimization_test_problems_noisy(): + return [ + (_Spec(pg.Ackley, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.Beale, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.Booth, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.Branin, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.LogSumExp, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.Matyas, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.Michalewicz, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.Rosenbrock, (), {"noise_stdev": 1.}), None, None), + (_Spec(pg.StyblinskiTang, (), {"noise_stdev": 1.}), None, None), + ] + + +def fully_connected_random_2_class_problems(): + return [ + (_Spec(pg.FullyConnected, (8, 2), + {"hidden_sizes": (8, 5,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(8, 1000), 10), + (_Spec(pg.FullyConnected, (12, 2), + {"hidden_sizes": (8, 5, 3), "activation": tf.nn.sigmoid}), + datasets.random_mlp(12, 1000), 200), + (_Spec(pg.FullyConnected, (5, 2), + {"hidden_sizes": (4, 4, 4, 4,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(5, 1000), 100), + (_Spec(pg.FullyConnected, (11, 2), + {"hidden_sizes": (4, 5, 6,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(11, 1000), 64), + (_Spec(pg.FullyConnected, (9, 2), + {"hidden_sizes": (8,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(9, 1000), 128), + (_Spec(pg.FullyConnected, (7, 2), + {"hidden_sizes": (8, 5,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(7, 1000), 16), + (_Spec(pg.FullyConnected, (8, 2), + {"hidden_sizes": (32, 64,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(8, 1000), 10), + (_Spec(pg.FullyConnected, (12, 2), + {"hidden_sizes": (16, 8, 3), "activation": tf.nn.sigmoid}), + datasets.random_mlp(12, 1000), 200), + (_Spec(pg.FullyConnected, (5, 2), + {"hidden_sizes": (8, 8, 8, 8,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(5, 1000), 100), + (_Spec(pg.FullyConnected, (11, 2), + {"hidden_sizes": (10, 12, 12,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(11, 1000), 64), + (_Spec(pg.FullyConnected, (9, 2), + {"hidden_sizes": (32,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(9, 1000), 128), + (_Spec(pg.FullyConnected, (7, 2), + {"hidden_sizes": (32, 64,), "activation": tf.nn.sigmoid}), + datasets.random_mlp(7, 1000), 16), + ] + + +def matmul_problems(): + return sum([ + pg.matmul_problem_sequence(2, 5, 8), + pg.matmul_problem_sequence(3, 19, 24)], []) + + +def log_objective_problems(): + return [ + (_Spec(pg.LogObjective, [_Spec(pg.Quadratic, (20,), {})], {}), + None, None), + (_Spec(pg.LogObjective, [_Spec(pg.Quadratic, (50,), {})], {}), + None, None), + (_Spec(pg.LogObjective, [_Spec(pg.Quadratic, (100,), {})], {}), + None, None), + (_Spec(pg.LogObjective, [_Spec(pg.Bowl, (0.1,), {})], {}), None, None), + (_Spec(pg.LogObjective, [_Spec(pg.Bowl, (1.0,), {})], {}), None, None), + (_Spec(pg.LogObjective, [_Spec(pg.Bowl, (5.0,), {})], {}), None, None), + ] + + +def sparse_gradient_problems(): + return [ + (_Spec(pg.SparseProblem, [_Spec(pg.Quadratic, (20,), {})], {}), + None, None), + (_Spec(pg.SparseProblem, [_Spec(pg.Quadratic, (50,), {})], {}), + None, None), + (_Spec(pg.SparseProblem, [_Spec(pg.Quadratic, (100,), {})], {}), + None, None), + (_Spec(pg.SparseProblem, [_Spec(pg.Bowl, (0.1,), {})], {}), None, None), + (_Spec(pg.SparseProblem, [_Spec(pg.Bowl, (1.0,), {})], {}), None, None), + (_Spec(pg.SparseProblem, [_Spec(pg.Bowl, (5.0,), {})], {}), None, None), + ] + + +def sparse_gradient_problems_mlp(): + return [ + (_Spec(pg.SparseProblem, [ + _Spec(pg.FullyConnected, (8, 2), { + "hidden_sizes": (8, 5,), + "activation": tf.nn.sigmoid + }) + ], {}), datasets.random_mlp(8, 1000), 10), + (_Spec(pg.SparseProblem, [ + _Spec(pg.FullyConnected, (12, 2), { + "hidden_sizes": (8, 5, 3), + "activation": tf.nn.sigmoid + }) + ], {}), datasets.random_mlp(12, 1000), 200), + (_Spec(pg.SparseProblem, [ + _Spec(pg.FullyConnected, (5, 2), { + "hidden_sizes": (4, 4, 4, 4,), + "activation": tf.nn.sigmoid + }) + ], {}), datasets.random_mlp(5, 1000), 100), + ] + + +def rescale_problems(): + return [ + (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 2.5})], + {"scale": 0.123}), None, None), + (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 1.5})], + {"scale": 8}), None, None), + (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 2.})], + {"scale": 50}), None, None), + (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 3.})], + {"scale": 200}), None, None), + (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 1.})], + {"scale": 1000}), None, None), + (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (20,), {})], {"scale": 0.1}), + None, None), + (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (25,), {})], {"scale": 10.}), + None, None), + (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (50,), {})], {"scale": 350.}), + None, None), + (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (100,), {})], {"scale": 132}), + None, None), + ] + + +def norm_problems(): + return [ + # < 1 Norm causes NaN gradients early in training. + (_Spec(pg.Norm, (27,), {"norm_power": 1.}), None, None), + (_Spec(pg.Norm, (25,), {"norm_power": 2.}), None, None), + (_Spec(pg.Norm, (22,), {"norm_power": 3.}), None, None), + ] + + +def norm_problems_noisy(): + return [ + # < 1 Norm causes NaN gradients early in training. + (_Spec(pg.Norm, (19,), {"noise_stdev": .1, "norm_power": 1.}), + None, None), + (_Spec(pg.Norm, (26,), {"noise_stdev": .1, "norm_power": 2.}), + None, None), + (_Spec(pg.Norm, (23,), {"noise_stdev": .1, "norm_power": 3.}), + None, None), + ] + + +def sum_problems(): + return [ + (_Spec(pg.SumTask, [[ + _Spec(pg.Quadratic, (11,), {}), + _Spec(pg.Quadratic, (3,), {}), + _Spec(pg.Quadratic, (9,), {}), + _Spec(pg.Quadratic, (7,), {}), + _Spec(pg.Quadratic, (5,), {}), + _Spec(pg.Quadratic, (13,), {}), + _Spec(pg.Quadratic, (12,), {}) + ]], {}), None, None), + (_Spec(pg.SumTask, [[ + _Spec(pg.Norm, (18,), {"norm_power": 3}), + _Spec(pg.Quadratic, (25,), {}), + _Spec(pg.Rosenbrock, (), {}) + ]], {}), None, None), + (_Spec(pg.SumTask, [[ + _Spec(pg.Rosenbrock, (), {}), + _Spec(pg.LogSumExp, (), {}), + _Spec(pg.Ackley, (), {}), + _Spec(pg.Beale, (), {}), + _Spec(pg.Booth, (), {}), + _Spec(pg.StyblinskiTang, (), {}), + _Spec(pg.Matyas, (), {}), + _Spec(pg.Branin, (), {}), + _Spec(pg.Michalewicz, (), {}) + ]], {}), None, None), + (_Spec(pg.SumTask, [[ + _Spec(pg.Rosenbrock, (), {}), + _Spec(pg.LogSumExp, (), {}), + _Spec(pg.Ackley, (), {}), + _Spec(pg.Beale, (), {}), + _Spec(pg.Booth, (), {}), + _Spec(pg.StyblinskiTang, (), {}), + _Spec(pg.Matyas, (), {}), + _Spec(pg.Branin, (), {}), + _Spec(pg.Michalewicz, (), {}), + _Spec(pg.Quadratic, (5,), {}), + _Spec(pg.Quadratic, (13,), {}) + ]], {}), None, None), + (_Spec(pg.SumTask, [[ + _Spec(pg.Quadratic, (11,), {}), + _Spec(pg.Quadratic, (3,), {}) + ]], {}), None, None), + (_Spec(pg.SumTask, [[ + _Spec(pg.Rosenbrock, (), {}), + _Spec(pg.LogSumExp, (), {}), + _Spec(pg.Ackley, (), {}) + ]], {}), None, None), + ] + + +def sum_problems_noisy(): + return [ + (_Spec(pg.SumTask, [[ + _Spec(pg.Quadratic, (11,), {"noise_stdev": 0.1}), + _Spec(pg.Quadratic, (3,), {"noise_stdev": 0.1}), + _Spec(pg.Quadratic, (9,), {"noise_stdev": 0.1}), + _Spec(pg.Quadratic, (7,), {"noise_stdev": 0.1}), + _Spec(pg.Quadratic, (5,), {"noise_stdev": 0.1}), + _Spec(pg.Quadratic, (13,), {"noise_stdev": 0.1}), + _Spec(pg.Quadratic, (12,), {"noise_stdev": 0.1}) + ]], {}), None, None), + (_Spec(pg.SumTask, [[ + _Spec(pg.Rosenbrock, (), {}), + _Spec(pg.LogSumExp, (), {}), + _Spec(pg.Ackley, (), {}), + _Spec(pg.Beale, (), {}), + _Spec(pg.Booth, (), {}), + _Spec(pg.StyblinskiTang, (), {}), + _Spec(pg.Matyas, (), {}), + _Spec(pg.Branin, (), {}), + _Spec(pg.Michalewicz, (), {}), + _Spec(pg.Quadratic, (5,), {}), + _Spec(pg.Quadratic, (13,), {"noise_stdev": 0.5}) + ]], {}), None, None), + ] + + +def dependency_chain_problems(): + return [ + (_Spec(pg.DependencyChain, (20,), {}), datasets.random_binary( + 20, 1000), 100), + (_Spec(pg.DependencyChain, (12,), {}), datasets.random_binary( + 12, 200), 10), + (_Spec(pg.DependencyChain, (56,), {}), datasets.random_binary( + 56, 5000), 100), + (_Spec(pg.DependencyChain, (64,), {}), datasets.random_binary( + 64, 1000), 50), + (_Spec(pg.DependencyChain, (13,), {}), datasets.random_binary( + 13, 10000), 50), + (_Spec(pg.DependencyChain, (20,), {}), datasets.random_binary( + 20, 1000), 128), + (_Spec(pg.DependencyChain, (12,), {}), datasets.random_binary( + 12, 300), 16), + (_Spec(pg.DependencyChain, (56,), {}), datasets.random_binary( + 56, 5000), 128), + (_Spec(pg.DependencyChain, (64,), {}), datasets.random_binary( + 64, 1000), 64), + (_Spec(pg.DependencyChain, (13,), {}), datasets.random_binary( + 13, 10000), 32), + ] + + +def outward_snake_problems(): + return [ + (_Spec(pg.OutwardSnake, (20,), {}), datasets.random_binary( + 20, 1000), 100), + (_Spec(pg.OutwardSnake, (12,), {}), datasets.random_binary( + 12, 200), 10), + (_Spec(pg.OutwardSnake, (56,), {}), datasets.random_binary( + 56, 5000), 100), + (_Spec(pg.OutwardSnake, (64,), {}), datasets.random_binary( + 64, 1000), 50), + (_Spec(pg.OutwardSnake, (13,), {}), datasets.random_binary( + 13, 10000), 50), + (_Spec(pg.OutwardSnake, (20,), {}), datasets.random_binary( + 20, 1000), 128), + (_Spec(pg.OutwardSnake, (12,), {}), datasets.random_binary( + 12, 300), 16), + (_Spec(pg.OutwardSnake, (56,), {}), datasets.random_binary( + 56, 5000), 128), + (_Spec(pg.OutwardSnake, (64,), {}), datasets.random_binary( + 64, 1000), 64), + (_Spec(pg.OutwardSnake, (13,), {}), datasets.random_binary( + 13, 10000), 32), + ] + + +def min_max_well_problems(): + return [ + (_Spec(pg.MinMaxWell, (20,), {}), None, None), + (_Spec(pg.MinMaxWell, (12,), {}), None, None), + (_Spec(pg.MinMaxWell, (56,), {}), None, None), + (_Spec(pg.MinMaxWell, (64,), {}), None, None), + (_Spec(pg.MinMaxWell, (13,), {}), None, None), + ] + + +def sum_of_quadratics_problems(): + return [ + (_Spec(pg.SumOfQuadratics, (20,), {}), + datasets.random_symmetric(20, 1000), 100), + (_Spec(pg.SumOfQuadratics, (12,), {}), + datasets.random_symmetric(12, 100), 10), + (_Spec(pg.SumOfQuadratics, (56,), {}), + datasets.random_symmetric(56, 5000), 100), + (_Spec(pg.SumOfQuadratics, (64,), {}), + datasets.random_symmetric(64, 1000), 50), + (_Spec(pg.SumOfQuadratics, (13,), {}), + datasets.random_symmetric(13, 10000), 50), + (_Spec(pg.SumOfQuadratics, (20,), {}), + datasets.random_symmetric(20, 1000), 128), + (_Spec(pg.SumOfQuadratics, (12,), {}), + datasets.random_symmetric(12, 100), 16), + (_Spec(pg.SumOfQuadratics, (56,), {}), + datasets.random_symmetric(56, 5000), 128), + (_Spec(pg.SumOfQuadratics, (64,), {}), + datasets.random_symmetric(64, 1000), 64), + (_Spec(pg.SumOfQuadratics, (13,), {}), + datasets.random_symmetric(13, 10000), 32), + ] + + +def projection_quadratic_problems(): + return [ + (_Spec(pg.ProjectionQuadratic, (20,), {}), + datasets.random_symmetric(20, 1000), 100), + (_Spec(pg.ProjectionQuadratic, (12,), {}), + datasets.random_symmetric(12, 100), 10), + (_Spec(pg.ProjectionQuadratic, (56,), {}), + datasets.random_symmetric(56, 5000), 100), + (_Spec(pg.ProjectionQuadratic, (64,), {}), + datasets.random_symmetric(64, 1000), 50), + (_Spec(pg.ProjectionQuadratic, (13,), {}), + datasets.random_symmetric(13, 10000), 50), + (_Spec(pg.ProjectionQuadratic, (20,), {}), + datasets.random_symmetric(20, 1000), 128), + (_Spec(pg.ProjectionQuadratic, (12,), {}), + datasets.random_symmetric(12, 100), 16), + (_Spec(pg.ProjectionQuadratic, (56,), {}), + datasets.random_symmetric(56, 5000), 128), + (_Spec(pg.ProjectionQuadratic, (64,), {}), + datasets.random_symmetric(64, 1000), 64), + (_Spec(pg.ProjectionQuadratic, (13,), {}), + datasets.random_symmetric(13, 10000), 32), + ] + + +def adapter_rosenbrock_local(): + return [(_Spec(model_adapter.ModelAdapter, + (pg.make_rosenbrock_loss_and_init,), {}), None, None),] + + +def adapter_rosenbrock_worker(): + return [(_Spec(model_adapter.ModelAdapter, + (pg.make_rosenbrock_loss_and_init,), + {"device": "/job:worker"}), None, None),] + + +def _test_problem_mlp_scaled_init_small(): + return [ + np.random.randn(10, 32) * np.sqrt(2./10), + np.random.randn(32,) * 0.1, + np.random.randn(32, 64) * np.sqrt(2./32.), + np.random.randn(64,) * 0.1, + np.random.randn(64, 2) * np.sqrt(2./64.), + np.random.randn(2,) * 0.1 + ] + + +def _test_problem_mlp_scaled_init_large(): + return [ + np.random.randn(20, 32) * np.sqrt(2./20), + np.random.randn(32,) * 0.1, + np.random.randn(32, 64) * np.sqrt(2./32.), + np.random.randn(64,) * 0.1, + np.random.randn(64, 10) * np.sqrt(2./64.), + np.random.randn(10,) * 0.1 + ] + + +def _test_problem_mlp_scaled_init_mnist(): + return [ + np.random.randn(784, 64) * np.sqrt(2./784.), + np.random.randn(64,) * 0.1, + np.random.randn(64, 10) * np.sqrt(2./ 64.), + np.random.randn(10,) * 0.1, + ] + + +# Wrap this construction in a function to avoid UnparsedFlagAccessError +def test_problems(): + """Test problems for visualizations.""" + # Unlike the training problem sets, these test problems are made up of + # length-5 tuples. The final items in the tuple are the name of the problem + # and the initialization random_seed for testing consistency. + tp = [ + (_Spec(pg.Quadratic, (20,), {"random_seed": 1234}), None, None, + "quad_problem", 5678), + (_Spec(pg.Quadratic, (20,), {"noise_stdev": 1.0, "random_seed": 1234}), + None, None, "quad_problem_noise", 5678), + (_Spec(pg.Rosenbrock, (), {"random_seed": 1234}), None, None, + "rosenbrock", 5678), + (_Spec(pg.Rosenbrock, (), {"random_seed": 1234, "noise_stdev": 1.0}), + None, None, "rosenbrock_noise", 5678), + (_Spec(pg.SoftmaxRegression, (10, 2), {}), datasets.random( + 10, 10000, random_seed=1234), 100, "softmax", 5678), + (_Spec(pg.SoftmaxRegression, (10, 2), {"noise_stdev": 1.0}), + datasets.random(10, 10000, random_seed=1234), 100, "softmax_noise", + 5678), + (_Spec(pg.FullyConnected, (10, 2), {}), datasets.random( + 10, 10000, random_seed=1234), 100, "mlp_small", + _test_problem_mlp_scaled_init_small()), + (_Spec(pg.FullyConnected, (20, 10), {}), datasets.random( + 20, 10000, n_classes=10, random_seed=1234), 100, "mlp_large", + _test_problem_mlp_scaled_init_large()), + (_Spec(pg.FullyConnected, (784, 10), + {"hidden_sizes": (64,), "activation": tf.nn.sigmoid}), + datasets.mnist(), 64, "mlp_mnist_sigmoid", + _test_problem_mlp_scaled_init_mnist()), + (_Spec(pg.FullyConnected, (784, 10), + {"hidden_sizes": (64,), "activation": tf.nn.relu}), + datasets.mnist(), 64, "mlp_mnist_relu", + _test_problem_mlp_scaled_init_mnist()), + (_Spec(pg.ConvNet, ((1, 28, 28), 10, [(3, 3, 8), (5, 5, 8)]), + {"activation": tf.nn.sigmoid}), datasets.mnist(), 64, + "convnet_mnist_sigmoid", None), + (_Spec(pg.ConvNet, ((1, 28, 28), 10, [(3, 3, 8), (5, 5, 8)]), + {"activation": tf.nn.relu}), datasets.mnist(), 64, + "convnet_mnist_relu", None), + ] + return tp diff --git a/models/research/learned_optimizer/problems/problem_spec.py b/models/research/learned_optimizer/problems/problem_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..e30c47b277e5c8b3b8aba3b8d691a2af3a595ef6 --- /dev/null +++ b/models/research/learned_optimizer/problems/problem_spec.py @@ -0,0 +1,33 @@ +# Copyright 2017 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Wrapper around a training problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple + + +class Spec(namedtuple("Spec", "callable args kwargs")): + """Syntactic sugar for keeping track of a function/class + args.""" + + # Since this is an immutable object, we don't need to reserve slots. + __slots__ = () + + def build(self): + """Returns the output of the callable.""" + return self.callable(*self.args, **self.kwargs) diff --git a/models/research/learning_to_remember_rare_events/README.md b/models/research/learning_to_remember_rare_events/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2eeadea784d4d22efc88c56e482c5d5374c90e24 --- /dev/null +++ b/models/research/learning_to_remember_rare_events/README.md @@ -0,0 +1,61 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +--- + +Code for the Memory Module as described +in "Learning to Remember Rare Events" by +Lukasz Kaiser, Ofir Nachum, Aurko Roy, and Samy Bengio +published as a conference paper at ICLR 2017. + +Requirements: +* TensorFlow (see tensorflow.org for how to install) +* Some basic command-line utilities (git, unzip). + +Description: + +The general memory module is located in memory.py. +Some code is provided to see the memory module in +action on the standard Omniglot dataset. +Download and setup the dataset using data_utils.py +and then run the training script train.py +(see example commands below). + +Note that the structure and parameters of the model +are optimized for the data preparation as provided. + +Quick Start: + +First download and set-up Omniglot data by running + +``` +python data_utils.py +``` + +Then run the training script: + +``` +python train.py --memory_size=8192 \ + --batch_size=16 --validation_length=50 \ + --episode_width=5 --episode_length=30 +``` + +The first validation batch may look like this (although it is noisy): +``` +0-shot: 0.040, 1-shot: 0.404, 2-shot: 0.516, 3-shot: 0.604, + 4-shot: 0.656, 5-shot: 0.684 +``` +At step 500 you may see something like this: +``` +0-shot: 0.036, 1-shot: 0.836, 2-shot: 0.900, 3-shot: 0.940, + 4-shot: 0.944, 5-shot: 0.916 +``` +At step 4000 you may see something like this: +``` +0-shot: 0.044, 1-shot: 0.960, 2-shot: 1.000, 3-shot: 0.988, + 4-shot: 0.972, 5-shot: 0.992 +``` + +Maintained by Ofir Nachum (ofirnachum) and +Lukasz Kaiser (lukaszkaiser). diff --git a/models/research/learning_to_remember_rare_events/data_utils.py b/models/research/learning_to_remember_rare_events/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..03d5dafb251d4e058a6780b447aabdcd1a84a1d4 --- /dev/null +++ b/models/research/learning_to_remember_rare_events/data_utils.py @@ -0,0 +1,243 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +"""Data loading and other utilities. + +Use this file to first copy over and pre-process the Omniglot dataset. +Simply call + python data_utils.py +""" + +import logging +import os +import subprocess +from six.moves import cPickle as pickle + +import numpy as np +from scipy.misc import imresize +from scipy.misc import imrotate +from scipy.ndimage import imread +from six.moves import xrange +import tensorflow as tf + + +MAIN_DIR = '' +REPO_LOCATION = 'https://github.com/brendenlake/omniglot.git' +REPO_DIR = os.path.join(MAIN_DIR, 'omniglot') +DATA_DIR = os.path.join(REPO_DIR, 'python') +TRAIN_DIR = os.path.join(DATA_DIR, 'images_background') +TEST_DIR = os.path.join(DATA_DIR, 'images_evaluation') +DATA_FILE_FORMAT = os.path.join(MAIN_DIR, '%s_omni.pkl') + +TRAIN_ROTATIONS = True # augment training data with rotations +TEST_ROTATIONS = False # augment testing data with rotations +IMAGE_ORIGINAL_SIZE = 105 +IMAGE_NEW_SIZE = 28 + + +def get_data(): + """Get data in form suitable for episodic training. + + Returns: + Train and test data as dictionaries mapping + label to list of examples. + """ + with tf.gfile.GFile(DATA_FILE_FORMAT % 'train', 'rb') as f: + processed_train_data = pickle.load(f) + with tf.gfile.GFile(DATA_FILE_FORMAT % 'test', 'rb') as f: + processed_test_data = pickle.load(f) + + train_data = {} + test_data = {} + + for data, processed_data in zip([train_data, test_data], + [processed_train_data, processed_test_data]): + for image, label in zip(processed_data['images'], + processed_data['labels']): + if label not in data: + data[label] = [] + data[label].append(image.reshape([-1]).astype('float32')) + + intersection = set(train_data.keys()) & set(test_data.keys()) + assert not intersection, 'Train and test data intersect.' + ok_num_examples = [len(ll) == 20 for _, ll in train_data.items()] + assert all(ok_num_examples), 'Bad number of examples in train data.' + ok_num_examples = [len(ll) == 20 for _, ll in test_data.items()] + assert all(ok_num_examples), 'Bad number of examples in test data.' + + logging.info('Number of labels in train data: %d.', len(train_data)) + logging.info('Number of labels in test data: %d.', len(test_data)) + + return train_data, test_data + + +def crawl_directory(directory, augment_with_rotations=False, + first_label=0): + """Crawls data directory and returns stuff.""" + label_idx = first_label + images = [] + labels = [] + info = [] + + # traverse root directory + for root, _, files in os.walk(directory): + logging.info('Reading files from %s', root) + fileflag = 0 + for file_name in files: + full_file_name = os.path.join(root, file_name) + img = imread(full_file_name, flatten=True) + for i, angle in enumerate([0, 90, 180, 270]): + if not augment_with_rotations and i > 0: + break + + images.append(imrotate(img, angle)) + labels.append(label_idx + i) + info.append(full_file_name) + + fileflag = 1 + + if fileflag: + label_idx += 4 if augment_with_rotations else 1 + + return images, labels, info + + +def resize_images(images, new_width, new_height): + """Resize images to new dimensions.""" + resized_images = np.zeros([images.shape[0], new_width, new_height], + dtype=np.float32) + + for i in range(images.shape[0]): + resized_images[i, :, :] = imresize(images[i, :, :], + [new_width, new_height], + interp='bilinear', + mode=None) + return resized_images + + +def write_datafiles(directory, write_file, + resize=True, rotate=False, + new_width=IMAGE_NEW_SIZE, new_height=IMAGE_NEW_SIZE, + first_label=0): + """Load and preprocess images from a directory and write them to a file. + + Args: + directory: Directory of alphabet sub-directories. + write_file: Filename to write to. + resize: Whether to resize the images. + rotate: Whether to augment the dataset with rotations. + new_width: New resize width. + new_height: New resize height. + first_label: Label to start with. + + Returns: + Number of new labels created. + """ + + # these are the default sizes for Omniglot: + imgwidth = IMAGE_ORIGINAL_SIZE + imgheight = IMAGE_ORIGINAL_SIZE + + logging.info('Reading the data.') + images, labels, info = crawl_directory(directory, + augment_with_rotations=rotate, + first_label=first_label) + + images_np = np.zeros([len(images), imgwidth, imgheight], dtype=np.bool) + labels_np = np.zeros([len(labels)], dtype=np.uint32) + for i in xrange(len(images)): + images_np[i, :, :] = images[i] + labels_np[i] = labels[i] + + if resize: + logging.info('Resizing images.') + resized_images = resize_images(images_np, new_width, new_height) + + logging.info('Writing resized data in float32 format.') + data = {'images': resized_images, + 'labels': labels_np, + 'info': info} + with tf.gfile.GFile(write_file, 'w') as f: + pickle.dump(data, f) + else: + logging.info('Writing original sized data in boolean format.') + data = {'images': images_np, + 'labels': labels_np, + 'info': info} + with tf.gfile.GFile(write_file, 'w') as f: + pickle.dump(data, f) + + return len(np.unique(labels_np)) + + +def maybe_download_data(): + """Download Omniglot repo if it does not exist.""" + if os.path.exists(REPO_DIR): + logging.info('It appears that Git repo already exists.') + else: + logging.info('It appears that Git repo does not exist.') + logging.info('Cloning now.') + + subprocess.check_output('git clone %s' % REPO_LOCATION, shell=True) + + if os.path.exists(TRAIN_DIR): + logging.info('It appears that train data has already been unzipped.') + else: + logging.info('It appears that train data has not been unzipped.') + logging.info('Unzipping now.') + + subprocess.check_output('unzip %s.zip -d %s' % (TRAIN_DIR, DATA_DIR), + shell=True) + + if os.path.exists(TEST_DIR): + logging.info('It appears that test data has already been unzipped.') + else: + logging.info('It appears that test data has not been unzipped.') + logging.info('Unzipping now.') + + subprocess.check_output('unzip %s.zip -d %s' % (TEST_DIR, DATA_DIR), + shell=True) + + +def preprocess_omniglot(): + """Download and prepare raw Omniglot data. + + Downloads the data from GitHub if it does not exist. + Then load the images, augment with rotations if desired. + Resize the images and write them to a pickle file. + """ + + maybe_download_data() + + directory = TRAIN_DIR + write_file = DATA_FILE_FORMAT % 'train' + num_labels = write_datafiles( + directory, write_file, resize=True, rotate=TRAIN_ROTATIONS, + new_width=IMAGE_NEW_SIZE, new_height=IMAGE_NEW_SIZE) + + directory = TEST_DIR + write_file = DATA_FILE_FORMAT % 'test' + write_datafiles(directory, write_file, resize=True, rotate=TEST_ROTATIONS, + new_width=IMAGE_NEW_SIZE, new_height=IMAGE_NEW_SIZE, + first_label=num_labels) + + +def main(unused_argv): + logging.basicConfig(level=logging.INFO) + preprocess_omniglot() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/learning_to_remember_rare_events/memory.py b/models/research/learning_to_remember_rare_events/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..2f40ff57f9434994f08b1ad97dc23142bb23daaa --- /dev/null +++ b/models/research/learning_to_remember_rare_events/memory.py @@ -0,0 +1,392 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +"""Memory module for storing "nearest neighbors". + +Implements a key-value memory for generalized one-shot learning +as described in the paper +"Learning to Remember Rare Events" +by Lukasz Kaiser, Ofir Nachum, Aurko Roy, Samy Bengio, +published as a conference paper at ICLR 2017. +""" + +import numpy as np +from six.moves import xrange +import tensorflow as tf + + +class Memory(object): + """Memory module.""" + + def __init__(self, key_dim, memory_size, vocab_size, + choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0, + var_cache_device='', nn_device=''): + self.key_dim = key_dim + self.memory_size = memory_size + self.vocab_size = vocab_size + self.choose_k = min(choose_k, memory_size) + self.alpha = alpha + self.correct_in_top = correct_in_top + self.age_noise = age_noise + self.var_cache_device = var_cache_device # Variables are cached here. + self.nn_device = nn_device # Device to perform nearest neighbour matmul. + + caching_device = var_cache_device if var_cache_device else None + self.update_memory = tf.constant(True) # Can be fed "false" if needed. + self.mem_keys = tf.get_variable( + 'memkeys', [self.memory_size, self.key_dim], trainable=False, + initializer=tf.random_uniform_initializer(-0.0, 0.0), + caching_device=caching_device) + self.mem_vals = tf.get_variable( + 'memvals', [self.memory_size], dtype=tf.int32, trainable=False, + initializer=tf.constant_initializer(0, tf.int32), + caching_device=caching_device) + self.mem_age = tf.get_variable( + 'memage', [self.memory_size], dtype=tf.float32, trainable=False, + initializer=tf.constant_initializer(0.0), caching_device=caching_device) + self.recent_idx = tf.get_variable( + 'recent_idx', [self.vocab_size], dtype=tf.int32, trainable=False, + initializer=tf.constant_initializer(0, tf.int32)) + + # variable for projecting query vector into memory key + self.query_proj = tf.get_variable( + 'memory_query_proj', [self.key_dim, self.key_dim], dtype=tf.float32, + initializer=tf.truncated_normal_initializer(0, 0.01), + caching_device=caching_device) + + def get(self): + return self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx + + def set(self, k, v, a, r=None): + return tf.group( + self.mem_keys.assign(k), + self.mem_vals.assign(v), + self.mem_age.assign(a), + (self.recent_idx.assign(r) if r is not None else tf.group())) + + def clear(self): + return tf.variables_initializer([self.mem_keys, self.mem_vals, self.mem_age, + self.recent_idx]) + + def get_hint_pool_idxs(self, normalized_query): + """Get small set of idxs to compute nearest neighbor queries on. + + This is an expensive look-up on the whole memory that is used to + avoid more expensive operations later on. + + Args: + normalized_query: A Tensor of shape [None, key_dim]. + + Returns: + A Tensor of shape [None, choose_k] of indices in memory + that are closest to the queries. + + """ + # look up in large memory, no gradients + with tf.device(self.nn_device): + similarities = tf.matmul(tf.stop_gradient(normalized_query), + self.mem_keys, transpose_b=True, name='nn_mmul') + _, hint_pool_idxs = tf.nn.top_k( + tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk') + return hint_pool_idxs + + def make_update_op(self, upd_idxs, upd_keys, upd_vals, + batch_size, use_recent_idx, intended_output): + """Function that creates all the update ops.""" + mem_age_incr = self.mem_age.assign_add(tf.ones([self.memory_size], + dtype=tf.float32)) + with tf.control_dependencies([mem_age_incr]): + mem_age_upd = tf.scatter_update( + self.mem_age, upd_idxs, tf.zeros([batch_size], dtype=tf.float32)) + + mem_key_upd = tf.scatter_update( + self.mem_keys, upd_idxs, upd_keys) + mem_val_upd = tf.scatter_update( + self.mem_vals, upd_idxs, upd_vals) + + if use_recent_idx: + recent_idx_upd = tf.scatter_update( + self.recent_idx, intended_output, upd_idxs) + else: + recent_idx_upd = tf.group() + + return tf.group(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd) + + def query(self, query_vec, intended_output, use_recent_idx=True): + """Queries memory for nearest neighbor. + + Args: + query_vec: A batch of vectors to query (embedding of input to model). + intended_output: The values that would be the correct output of the + memory. + use_recent_idx: Whether to always insert at least one instance of a + correct memory fetch. + + Returns: + A tuple (result, mask, teacher_loss). + result: The result of the memory look up. + mask: The affinity of the query to the result. + teacher_loss: The loss for training the memory module. + """ + + batch_size = tf.shape(query_vec)[0] + output_given = intended_output is not None + + # prepare query for memory lookup + query_vec = tf.matmul(query_vec, self.query_proj) + normalized_query = tf.nn.l2_normalize(query_vec, dim=1) + + hint_pool_idxs = self.get_hint_pool_idxs(normalized_query) + + if output_given and use_recent_idx: # add at least one correct memory + most_recent_hint_idx = tf.gather(self.recent_idx, intended_output) + hint_pool_idxs = tf.concat( + axis=1, + values=[hint_pool_idxs, tf.expand_dims(most_recent_hint_idx, 1)]) + choose_k = tf.shape(hint_pool_idxs)[1] + + with tf.device(self.var_cache_device): + # create small memory and look up with gradients + my_mem_keys = tf.stop_gradient(tf.gather(self.mem_keys, hint_pool_idxs, + name='my_mem_keys_gather')) + similarities = tf.matmul(tf.expand_dims(normalized_query, 1), + my_mem_keys, adjoint_b=True, name='batch_mmul') + hint_pool_sims = tf.squeeze(similarities, [1], name='hint_pool_sims') + hint_pool_mem_vals = tf.gather(self.mem_vals, hint_pool_idxs, + name='hint_pool_mem_vals') + # Calculate softmax mask on the top-k if requested. + # Softmax temperature. Say we have K elements at dist x and one at (x+a). + # Softmax of the last is e^tm(x+a)/Ke^tm*x + e^tm(x+a) = e^tm*a/K+e^tm*a. + # To make that 20% we'd need to have e^tm*a ~= 0.2K, so tm = log(0.2K)/a. + softmax_temp = max(1.0, np.log(0.2 * self.choose_k) / self.alpha) + mask = tf.nn.softmax(hint_pool_sims[:, :choose_k - 1] * softmax_temp) + + # prepare returned values + nearest_neighbor = tf.to_int32( + tf.argmax(hint_pool_sims[:, :choose_k - 1], 1)) + + no_teacher_idxs = tf.gather( + tf.reshape(hint_pool_idxs, [-1]), + nearest_neighbor + choose_k * tf.range(batch_size)) + + with tf.device(self.var_cache_device): + result = tf.gather(self.mem_vals, tf.reshape(no_teacher_idxs, [-1])) + + if not output_given: + teacher_loss = None + return result, mask, teacher_loss + + # prepare hints from the teacher on hint pool + teacher_hints = tf.to_float( + tf.abs(tf.expand_dims(intended_output, 1) - hint_pool_mem_vals)) + teacher_hints = 1.0 - tf.minimum(1.0, teacher_hints) + + teacher_vals, teacher_hint_idxs = tf.nn.top_k( + hint_pool_sims * teacher_hints, k=1) + neg_teacher_vals, _ = tf.nn.top_k( + hint_pool_sims * (1 - teacher_hints), k=1) + + # bring back idxs to full memory + teacher_idxs = tf.gather( + tf.reshape(hint_pool_idxs, [-1]), + teacher_hint_idxs[:, 0] + choose_k * tf.range(batch_size)) + + # zero-out teacher_vals if there are no hints + teacher_vals *= ( + 1 - tf.to_float(tf.equal(0.0, tf.reduce_sum(teacher_hints, 1)))) + + # we'll determine whether to do an update to memory based on whether + # memory was queried correctly + sliced_hints = tf.slice(teacher_hints, [0, 0], [-1, self.correct_in_top]) + incorrect_memory_lookup = tf.equal(0.0, tf.reduce_sum(sliced_hints, 1)) + + # loss based on triplet loss + teacher_loss = (tf.nn.relu(neg_teacher_vals - teacher_vals + self.alpha) + - self.alpha) + + # prepare memory updates + update_keys = normalized_query + update_vals = intended_output + + fetched_idxs = teacher_idxs # correctly fetched from memory + with tf.device(self.var_cache_device): + fetched_keys = tf.gather(self.mem_keys, fetched_idxs, name='fetched_keys') + fetched_vals = tf.gather(self.mem_vals, fetched_idxs, name='fetched_vals') + + # do memory updates here + fetched_keys_upd = update_keys + fetched_keys # Momentum-like update + fetched_keys_upd = tf.nn.l2_normalize(fetched_keys_upd, dim=1) + # Randomize age a bit, e.g., to select different ones in parallel workers. + mem_age_with_noise = self.mem_age + tf.random_uniform( + [self.memory_size], - self.age_noise, self.age_noise) + + _, oldest_idxs = tf.nn.top_k(mem_age_with_noise, k=batch_size, sorted=False) + + with tf.control_dependencies([result]): + upd_idxs = tf.where(incorrect_memory_lookup, + oldest_idxs, + fetched_idxs) + # upd_idxs = tf.Print(upd_idxs, [upd_idxs], "UPD IDX", summarize=8) + upd_keys = tf.where(incorrect_memory_lookup, + update_keys, + fetched_keys_upd) + upd_vals = tf.where(incorrect_memory_lookup, + update_vals, + fetched_vals) + + def make_update_op(): + return self.make_update_op(upd_idxs, upd_keys, upd_vals, + batch_size, use_recent_idx, intended_output) + + update_op = tf.cond(self.update_memory, make_update_op, tf.no_op) + + with tf.control_dependencies([update_op]): + result = tf.identity(result) + mask = tf.identity(mask) + teacher_loss = tf.identity(teacher_loss) + + return result, mask, tf.reduce_mean(teacher_loss) + + +class LSHMemory(Memory): + """Memory employing locality sensitive hashing. + + Note: Not fully tested. + """ + + def __init__(self, key_dim, memory_size, vocab_size, + choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0, + var_cache_device='', nn_device='', + num_hashes=None, num_libraries=None): + super(LSHMemory, self).__init__( + key_dim, memory_size, vocab_size, + choose_k=choose_k, alpha=alpha, correct_in_top=1, age_noise=age_noise, + var_cache_device=var_cache_device, nn_device=nn_device) + + self.num_libraries = num_libraries or int(self.choose_k ** 0.5) + self.num_per_hash_slot = max(1, self.choose_k // self.num_libraries) + self.num_hashes = (num_hashes or + int(np.log2(self.memory_size / self.num_per_hash_slot))) + self.num_hashes = min(max(self.num_hashes, 1), 20) + self.num_hash_slots = 2 ** self.num_hashes + + # hashing vectors + self.hash_vecs = [ + tf.get_variable( + 'hash_vecs%d' % i, [self.num_hashes, self.key_dim], + dtype=tf.float32, trainable=False, + initializer=tf.truncated_normal_initializer(0, 1)) + for i in xrange(self.num_libraries)] + + # map representing which hash slots map to which mem keys + self.hash_slots = [ + tf.get_variable( + 'hash_slots%d' % i, [self.num_hash_slots, self.num_per_hash_slot], + dtype=tf.int32, trainable=False, + initializer=tf.random_uniform_initializer(maxval=self.memory_size, + dtype=tf.int32)) + for i in xrange(self.num_libraries)] + + def get(self): # not implemented + return self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx + + def set(self, k, v, a, r=None): # not implemented + return tf.group( + self.mem_keys.assign(k), + self.mem_vals.assign(v), + self.mem_age.assign(a), + (self.recent_idx.assign(r) if r is not None else tf.group())) + + def clear(self): + return tf.variables_initializer([self.mem_keys, self.mem_vals, self.mem_age, + self.recent_idx] + self.hash_slots) + + def get_hash_slots(self, query): + """Gets hashed-to buckets for batch of queries. + + Args: + query: 2-d Tensor of query vectors. + + Returns: + A list of hashed-to buckets for each hash function. + """ + + binary_hash = [ + tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0) + for i in xrange(self.num_libraries)] + hash_slot_idxs = [ + tf.reduce_sum( + tf.to_int32(binary_hash[i]) * + tf.constant([[2 ** i for i in xrange(self.num_hashes)]], + dtype=tf.int32), 1) + for i in xrange(self.num_libraries)] + return hash_slot_idxs + + def get_hint_pool_idxs(self, normalized_query): + """Get small set of idxs to compute nearest neighbor queries on. + + This is an expensive look-up on the whole memory that is used to + avoid more expensive operations later on. + + Args: + normalized_query: A Tensor of shape [None, key_dim]. + + Returns: + A Tensor of shape [None, choose_k] of indices in memory + that are closest to the queries. + + """ + # get hash of query vecs + hash_slot_idxs = self.get_hash_slots(normalized_query) + + # grab mem idxs in the hash slots + hint_pool_idxs = [ + tf.maximum(tf.minimum( + tf.gather(self.hash_slots[i], idxs), + self.memory_size - 1), 0) + for i, idxs in enumerate(hash_slot_idxs)] + + return tf.concat(axis=1, values=hint_pool_idxs) + + def make_update_op(self, upd_idxs, upd_keys, upd_vals, + batch_size, use_recent_idx, intended_output): + """Function that creates all the update ops.""" + base_update_op = super(LSHMemory, self).make_update_op( + upd_idxs, upd_keys, upd_vals, + batch_size, use_recent_idx, intended_output) + + # compute hash slots to be updated + hash_slot_idxs = self.get_hash_slots(upd_keys) + + # make updates + update_ops = [] + with tf.control_dependencies([base_update_op]): + for i, slot_idxs in enumerate(hash_slot_idxs): + # for each slot, choose which entry to replace + entry_idx = tf.random_uniform([batch_size], + maxval=self.num_per_hash_slot, + dtype=tf.int32) + entry_mul = 1 - tf.one_hot(entry_idx, self.num_per_hash_slot, + dtype=tf.int32) + entry_add = (tf.expand_dims(upd_idxs, 1) * + tf.one_hot(entry_idx, self.num_per_hash_slot, + dtype=tf.int32)) + + mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs, entry_mul) + with tf.control_dependencies([mul_op]): + add_op = tf.scatter_add(self.hash_slots[i], slot_idxs, entry_add) + update_ops.append(add_op) + + return tf.group(*update_ops) diff --git a/models/research/learning_to_remember_rare_events/model.py b/models/research/learning_to_remember_rare_events/model.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6b460047fda3349c04d0e024c035f69a300461 --- /dev/null +++ b/models/research/learning_to_remember_rare_events/model.py @@ -0,0 +1,302 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +"""Model using memory component. + +The model embeds images using a standard CNN architecture. +These embeddings are used as keys to the memory component, +which returns nearest neighbors. +""" + +import tensorflow as tf + +import memory + +FLAGS = tf.flags.FLAGS + + +class BasicClassifier(object): + + def __init__(self, output_dim): + self.output_dim = output_dim + + def core_builder(self, memory_val, x, y): + del x, y + y_pred = memory_val + loss = 0.0 + + return loss, y_pred + + +class LeNet(object): + """Standard CNN architecture.""" + + def __init__(self, image_size, num_channels, hidden_dim): + self.image_size = image_size + self.num_channels = num_channels + self.hidden_dim = hidden_dim + self.matrix_init = tf.truncated_normal_initializer(stddev=0.1) + self.vector_init = tf.constant_initializer(0.0) + + def core_builder(self, x): + """Embeds x using standard CNN architecture. + + Args: + x: Batch of images as a 2-d Tensor [batch_size, -1]. + + Returns: + A 2-d Tensor [batch_size, hidden_dim] of embedded images. + """ + + ch1 = 32 * 2 # number of channels in 1st layer + ch2 = 64 * 2 # number of channels in 2nd layer + conv1_weights = tf.get_variable('conv1_w', + [3, 3, self.num_channels, ch1], + initializer=self.matrix_init) + conv1_biases = tf.get_variable('conv1_b', [ch1], + initializer=self.vector_init) + conv1a_weights = tf.get_variable('conv1a_w', + [3, 3, ch1, ch1], + initializer=self.matrix_init) + conv1a_biases = tf.get_variable('conv1a_b', [ch1], + initializer=self.vector_init) + + conv2_weights = tf.get_variable('conv2_w', [3, 3, ch1, ch2], + initializer=self.matrix_init) + conv2_biases = tf.get_variable('conv2_b', [ch2], + initializer=self.vector_init) + conv2a_weights = tf.get_variable('conv2a_w', [3, 3, ch2, ch2], + initializer=self.matrix_init) + conv2a_biases = tf.get_variable('conv2a_b', [ch2], + initializer=self.vector_init) + + # fully connected + fc1_weights = tf.get_variable( + 'fc1_w', [self.image_size // 4 * self.image_size // 4 * ch2, + self.hidden_dim], initializer=self.matrix_init) + fc1_biases = tf.get_variable('fc1_b', [self.hidden_dim], + initializer=self.vector_init) + + # define model + x = tf.reshape(x, + [-1, self.image_size, self.image_size, self.num_channels]) + batch_size = tf.shape(x)[0] + + conv1 = tf.nn.conv2d(x, conv1_weights, + strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + conv1 = tf.nn.conv2d(relu1, conv1a_weights, + strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1a_biases)) + + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], padding='SAME') + + conv2 = tf.nn.conv2d(pool1, conv2_weights, + strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + conv2 = tf.nn.conv2d(relu2, conv2a_weights, + strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2a_biases)) + + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], padding='SAME') + + reshape = tf.reshape(pool2, [batch_size, -1]) + hidden = tf.matmul(reshape, fc1_weights) + fc1_biases + + return hidden + + +class Model(object): + """Model for coordinating between CNN embedder and Memory module.""" + + def __init__(self, input_dim, output_dim, rep_dim, memory_size, vocab_size, + learning_rate=0.0001, use_lsh=False): + self.input_dim = input_dim + self.output_dim = output_dim + self.rep_dim = rep_dim + self.memory_size = memory_size + self.vocab_size = vocab_size + self.learning_rate = learning_rate + self.use_lsh = use_lsh + + self.embedder = self.get_embedder() + self.memory = self.get_memory() + self.classifier = self.get_classifier() + + self.global_step = tf.train.get_or_create_global_step() + + def get_embedder(self): + return LeNet(int(self.input_dim ** 0.5), 1, self.rep_dim) + + def get_memory(self): + cls = memory.LSHMemory if self.use_lsh else memory.Memory + return cls(self.rep_dim, self.memory_size, self.vocab_size) + + def get_classifier(self): + return BasicClassifier(self.output_dim) + + def core_builder(self, x, y, keep_prob, use_recent_idx=True): + embeddings = self.embedder.core_builder(x) + if keep_prob < 1.0: + embeddings = tf.nn.dropout(embeddings, keep_prob) + memory_val, _, teacher_loss = self.memory.query( + embeddings, y, use_recent_idx=use_recent_idx) + loss, y_pred = self.classifier.core_builder(memory_val, x, y) + + return loss + teacher_loss, y_pred + + def train(self, x, y): + loss, _ = self.core_builder(x, y, keep_prob=0.3) + gradient_ops = self.training_ops(loss) + return loss, gradient_ops + + def eval(self, x, y): + _, y_preds = self.core_builder(x, y, keep_prob=1.0, + use_recent_idx=False) + return y_preds + + def get_xy_placeholders(self): + return (tf.placeholder(tf.float32, [None, self.input_dim]), + tf.placeholder(tf.int32, [None])) + + def setup(self): + """Sets up all components of the computation graph.""" + + self.x, self.y = self.get_xy_placeholders() + + # This context creates variables + with tf.variable_scope('core', reuse=None): + self.loss, self.gradient_ops = self.train(self.x, self.y) + # And this one re-uses them (thus the `reuse=True`) + with tf.variable_scope('core', reuse=True): + self.y_preds = self.eval(self.x, self.y) + + def training_ops(self, loss): + opt = self.get_optimizer() + params = tf.trainable_variables() + gradients = tf.gradients(loss, params) + clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) + return opt.apply_gradients(zip(clipped_gradients, params), + global_step=self.global_step) + + def get_optimizer(self): + return tf.train.AdamOptimizer(learning_rate=self.learning_rate, + epsilon=1e-4) + + def one_step(self, sess, x, y): + outputs = [self.loss, self.gradient_ops] + return sess.run(outputs, feed_dict={self.x: x, self.y: y}) + + def episode_step(self, sess, x, y, clear_memory=False): + """Performs training steps on episodic input. + + Args: + sess: A Tensorflow Session. + x: A list of batches of images defining the episode. + y: A list of batches of labels corresponding to x. + clear_memory: Whether to clear the memory before the episode. + + Returns: + List of losses the same length as the episode. + """ + + outputs = [self.loss, self.gradient_ops] + + if clear_memory: + self.clear_memory(sess) + + losses = [] + for xx, yy in zip(x, y): + out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy}) + loss = out[0] + losses.append(loss) + + return losses + + def predict(self, sess, x, y=None): + """Predict the labels on a single batch of examples. + + Args: + sess: A Tensorflow Session. + x: A batch of images. + y: The labels for the images in x. + This allows for updating the memory. + + Returns: + Predicted y. + """ + + # Storing current memory state to restore it after prediction + mem_keys, mem_vals, mem_age, _ = self.memory.get() + cur_memory = ( + tf.identity(mem_keys), + tf.identity(mem_vals), + tf.identity(mem_age), + None, + ) + + outputs = [self.y_preds] + if y is None: + ret = sess.run(outputs, feed_dict={self.x: x}) + else: + ret = sess.run(outputs, feed_dict={self.x: x, self.y: y}) + + # Restoring memory state + self.memory.set(*cur_memory) + + return ret + + def episode_predict(self, sess, x, y, clear_memory=False): + """Predict the labels on an episode of examples. + + Args: + sess: A Tensorflow Session. + x: A list of batches of images. + y: A list of labels for the images in x. + This allows for updating the memory. + clear_memory: Whether to clear the memory before the episode. + + Returns: + List of predicted y. + """ + + # Storing current memory state to restore it after prediction + mem_keys, mem_vals, mem_age, _ = self.memory.get() + cur_memory = ( + tf.identity(mem_keys), + tf.identity(mem_vals), + tf.identity(mem_age), + None, + ) + + if clear_memory: + self.clear_memory(sess) + + outputs = [self.y_preds] + y_preds = [] + for xx, yy in zip(x, y): + out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy}) + y_pred = out[0] + y_preds.append(y_pred) + + # Restoring memory state + self.memory.set(*cur_memory) + + return y_preds + + def clear_memory(self, sess): + sess.run([self.memory.clear()]) diff --git a/models/research/learning_to_remember_rare_events/train.py b/models/research/learning_to_remember_rare_events/train.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c6d06b5ee02e73128ee2b23f3b399d29b1e212 --- /dev/null +++ b/models/research/learning_to_remember_rare_events/train.py @@ -0,0 +1,242 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +r"""Script for training model. + +Simple command to get up and running: + python train.py --memory_size=8192 \ + --batch_size=16 --validation_length=50 \ + --episode_width=5 --episode_length=30 +""" + +import logging +import os +import random + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import data_utils +import model + +FLAGS = tf.flags.FLAGS + +tf.flags.DEFINE_integer('rep_dim', 128, + 'dimension of keys to use in memory') +tf.flags.DEFINE_integer('episode_length', 100, 'length of episode') +tf.flags.DEFINE_integer('episode_width', 5, + 'number of distinct labels in a single episode') +tf.flags.DEFINE_integer('memory_size', None, 'number of slots in memory. ' + 'Leave as None to default to episode length') +tf.flags.DEFINE_integer('batch_size', 16, 'batch size') +tf.flags.DEFINE_integer('num_episodes', 100000, 'number of training episodes') +tf.flags.DEFINE_integer('validation_frequency', 20, + 'every so many training episodes, ' + 'assess validation accuracy') +tf.flags.DEFINE_integer('validation_length', 10, + 'number of episodes to use to compute ' + 'validation accuracy') +tf.flags.DEFINE_integer('seed', 888, 'random seed for training sampling') +tf.flags.DEFINE_string('save_dir', '', 'directory to save model to') +tf.flags.DEFINE_bool('use_lsh', False, + 'use locality-sensitive hashing ' + '(NOTE: not fully tested)') + + +class Trainer(object): + """Class that takes care of training, validating, and checkpointing model.""" + + def __init__(self, train_data, valid_data, input_dim, output_dim=None): + self.train_data = train_data + self.valid_data = valid_data + self.input_dim = input_dim + + self.rep_dim = FLAGS.rep_dim + self.episode_length = FLAGS.episode_length + self.episode_width = FLAGS.episode_width + self.batch_size = FLAGS.batch_size + self.memory_size = (self.episode_length * self.batch_size + if FLAGS.memory_size is None else FLAGS.memory_size) + self.use_lsh = FLAGS.use_lsh + + self.output_dim = (output_dim if output_dim is not None + else self.episode_width) + + def get_model(self): + # vocab size is the number of distinct values that + # could go into the memory key-value storage + vocab_size = self.episode_width * self.batch_size + return model.Model( + self.input_dim, self.output_dim, self.rep_dim, self.memory_size, + vocab_size, use_lsh=self.use_lsh) + + def sample_episode_batch(self, data, + episode_length, episode_width, batch_size): + """Generates a random batch for training or validation. + + Structures each element of the batch as an 'episode'. + Each episode contains episode_length examples and + episode_width distinct labels. + + Args: + data: A dictionary mapping label to list of examples. + episode_length: Number of examples in each episode. + episode_width: Distinct number of labels in each episode. + batch_size: Batch size (number of episodes). + + Returns: + A tuple (x, y) where x is a list of batches of examples + with size episode_length and y is a list of batches of labels. + """ + + episodes_x = [[] for _ in xrange(episode_length)] + episodes_y = [[] for _ in xrange(episode_length)] + assert len(data) >= episode_width + keys = data.keys() + for b in xrange(batch_size): + episode_labels = random.sample(keys, episode_width) + remainder = episode_length % episode_width + remainders = [0] * (episode_width - remainder) + [1] * remainder + episode_x = [ + random.sample(data[lab], + r + (episode_length - remainder) // episode_width) + for lab, r in zip(episode_labels, remainders)] + episode = sum([[(x, i, ii) for ii, x in enumerate(xx)] + for i, xx in enumerate(episode_x)], []) + random.shuffle(episode) + # Arrange episode so that each distinct label is seen before moving to + # 2nd showing + episode.sort(key=lambda elem: elem[2]) + assert len(episode) == episode_length + for i in xrange(episode_length): + episodes_x[i].append(episode[i][0]) + episodes_y[i].append(episode[i][1] + b * episode_width) + + return ([np.array(xx).astype('float32') for xx in episodes_x], + [np.array(yy).astype('int32') for yy in episodes_y]) + + def compute_correct(self, ys, y_preds): + return np.mean(np.equal(y_preds, np.array(ys))) + + def individual_compute_correct(self, y, y_pred): + return y_pred == y + + def run(self): + """Performs training. + + Trains a model using episodic training. + Every so often, runs some evaluations on validation data. + """ + + train_data, valid_data = self.train_data, self.valid_data + input_dim, output_dim = self.input_dim, self.output_dim + rep_dim, episode_length = self.rep_dim, self.episode_length + episode_width, memory_size = self.episode_width, self.memory_size + batch_size = self.batch_size + + train_size = len(train_data) + valid_size = len(valid_data) + logging.info('train_size (number of labels) %d', train_size) + logging.info('valid_size (number of labels) %d', valid_size) + logging.info('input_dim %d', input_dim) + logging.info('output_dim %d', output_dim) + logging.info('rep_dim %d', rep_dim) + logging.info('episode_length %d', episode_length) + logging.info('episode_width %d', episode_width) + logging.info('memory_size %d', memory_size) + logging.info('batch_size %d', batch_size) + + assert all(len(v) >= float(episode_length) / episode_width + for v in train_data.values()) + assert all(len(v) >= float(episode_length) / episode_width + for v in valid_data.values()) + + output_dim = episode_width + self.model = self.get_model() + self.model.setup() + + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + + saver = tf.train.Saver(max_to_keep=10) + ckpt = None + if FLAGS.save_dir: + ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir) + if ckpt and ckpt.model_checkpoint_path: + logging.info('restoring from %s', ckpt.model_checkpoint_path) + saver.restore(sess, ckpt.model_checkpoint_path) + + logging.info('starting now') + losses = [] + random.seed(FLAGS.seed) + np.random.seed(FLAGS.seed) + for i in xrange(FLAGS.num_episodes): + x, y = self.sample_episode_batch( + train_data, episode_length, episode_width, batch_size) + outputs = self.model.episode_step(sess, x, y, clear_memory=True) + loss = outputs + losses.append(loss) + + if i % FLAGS.validation_frequency == 0: + logging.info('episode batch %d, avg train loss %f', + i, np.mean(losses)) + losses = [] + + # validation + correct = [] + num_shots = episode_length // episode_width + correct_by_shot = dict((k, []) for k in xrange(num_shots)) + for _ in xrange(FLAGS.validation_length): + x, y = self.sample_episode_batch( + valid_data, episode_length, episode_width, 1) + outputs = self.model.episode_predict( + sess, x, y, clear_memory=True) + y_preds = outputs + correct.append(self.compute_correct(np.array(y), y_preds)) + + # compute per-shot accuracies + seen_counts = [0] * episode_width + # loop over episode steps + for yy, yy_preds in zip(y, y_preds): + # loop over batch examples + yyy, yyy_preds = int(yy[0]), int(yy_preds[0]) + count = seen_counts[yyy % episode_width] + if count in correct_by_shot: + correct_by_shot[count].append( + self.individual_compute_correct(yyy, yyy_preds)) + seen_counts[yyy % episode_width] = count + 1 + + logging.info('validation overall accuracy %f', np.mean(correct)) + logging.info('%d-shot: %.3f, ' * num_shots, + *sum([[k, np.mean(correct_by_shot[k])] + for k in xrange(num_shots)], [])) + + if saver and FLAGS.save_dir: + saved_file = saver.save(sess, + os.path.join(FLAGS.save_dir, 'model.ckpt'), + global_step=self.model.global_step) + logging.info('saved model to %s', saved_file) + + +def main(unused_argv): + train_data, valid_data = data_utils.get_data() + trainer = Trainer(train_data, valid_data, data_utils.IMAGE_NEW_SIZE ** 2) + trainer.run() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + tf.app.run() diff --git a/models/research/learning_unsupervised_learning/.gitignore b/models/research/learning_unsupervised_learning/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d --- /dev/null +++ b/models/research/learning_unsupervised_learning/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/models/research/learning_unsupervised_learning/README.md b/models/research/learning_unsupervised_learning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0e38717f5de29df28959062889abeb1ce578feea --- /dev/null +++ b/models/research/learning_unsupervised_learning/README.md @@ -0,0 +1,40 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Learning Unsupervised Learning Rules +This repository contains code and weights for the learned update rule +presented in "Learning Unsupervised Learning Rules." At this time, this +code can not meta-train the update rule. + +### Structure +`run_eval.py` contains the main training loop. This constructs an op +that runs one iteration of the learned update rule and assigns the +results to variables. Additionally, it loads the weights from our +pre-trained model. + +The base model and the update rule architecture definition can be found in +`architectures/more_local_weight_update.py`. For a complete description +of the model, see our [paper](https://arxiv.org/abs/1804.00222). + +### Dependencies +[absl]([https://github.com/abseil/abseil-py), [tensorflow](https://tensorflow.org), [sonnet](https://github.com/deepmind/sonnet) + +### Usage + +First, download the [pre-trained optimizer model weights](https://storage.googleapis.com/learning_unsupervised_learning/200_tf_graph.zip) and extract it. + +```bash +# move to the folder above this folder +cd path_to/research/learning_unsupervised_learning/../ + +# launch the eval script +python -m learning_unsupervised_learning.run_eval \ +--train_log_dir="/tmp/learning_unsupervised_learning" \ +--checkpoint_dir="/path/to/downloaded/model/tf_graph_data.ckpt" +``` + +### Contact +Luke Metz, Niru Maheswaranathan, Github: @lukemetz, @nirum. Email: {lmetz, nirum}@google.com + + diff --git a/models/research/learning_unsupervised_learning/__init__.py b/models/research/learning_unsupervised_learning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/learning_unsupervised_learning/architectures/__init__.py b/models/research/learning_unsupervised_learning/architectures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af9545f26da538aa986b19a96b6cfa2bc7459227 --- /dev/null +++ b/models/research/learning_unsupervised_learning/architectures/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +import more_local_weight_update diff --git a/models/research/learning_unsupervised_learning/architectures/common.py b/models/research/learning_unsupervised_learning/architectures/common.py new file mode 100644 index 0000000000000000000000000000000000000000..43a2d4f8965ecd337abd3a072a7ecb789df21910 --- /dev/null +++ b/models/research/learning_unsupervised_learning/architectures/common.py @@ -0,0 +1,153 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sonnet as snt +import tensorflow as tf +import numpy as np +import collections +from learning_unsupervised_learning import utils + +from tensorflow.python.util import nest + +from learning_unsupervised_learning import variable_replace + + +class LinearBatchNorm(snt.AbstractModule): + """Module that does a Linear layer then a BatchNorm followed by an activation fn""" + def __init__(self, size, activation_fn=tf.nn.relu, name="LinearBatchNorm"): + self.size = size + self.activation_fn = activation_fn + super(LinearBatchNorm, self).__init__(name=name) + + def _build(self, x): + x = tf.to_float(x) + initializers={"w": tf.truncated_normal_initializer(stddev=0.01)} + lin = snt.Linear(self.size, use_bias=False, initializers=initializers) + z = lin(x) + + scale = tf.constant(1., dtype=tf.float32) + offset = tf.get_variable( + "b", + shape=[1, z.shape.as_list()[1]], + initializer=tf.truncated_normal_initializer(stddev=0.1), + dtype=tf.float32 + ) + + mean, var = tf.nn.moments(z, [0], keep_dims=True) + z = ((z - mean) * tf.rsqrt(var + 1e-6)) * scale + offset + + x_p = self.activation_fn(z) + + return z, x_p + + # This needs to work by string name sadly due to how the variable replace + # works and would also work even if the custom getter approuch was used. + # This is verbose, but it should atleast be clear as to what is going on. + # TODO(lmetz) a better way to do this (the next 3 functions: + # _raw_name, w(), b() ) + def _raw_name(self, var_name): + """Return just the name of the variable, not the scopes.""" + return var_name.split("/")[-1].split(":")[0] + + + @property + def w(self): + var_list = snt.get_variables_in_module(self) + w = [x for x in var_list if self._raw_name(x.name) == "w"] + assert len(w) == 1 + return w[0] + + @property + def b(self): + var_list = snt.get_variables_in_module(self) + b = [x for x in var_list if self._raw_name(x.name) == "b"] + assert len(b) == 1 + return b[0] + + + +class Linear(snt.AbstractModule): + def __init__(self, size, use_bias=True, init_const_mag=True): + self.size = size + self.use_bias = use_bias + self.init_const_mag = init_const_mag + super(Linear, self).__init__(name="commonLinear") + + def _build(self, x): + if self.init_const_mag: + initializers={"w": tf.truncated_normal_initializer(stddev=0.01)} + else: + initializers={} + lin = snt.Linear(self.size, use_bias=self.use_bias, initializers=initializers) + z = lin(x) + return z + + # This needs to work by string name sadly due to how the variable replace + # works and would also work even if the custom getter approuch was used. + # This is verbose, but it should atleast be clear as to what is going on. + # TODO(lmetz) a better way to do this (the next 3 functions: + # _raw_name, w(), b() ) + def _raw_name(self, var_name): + """Return just the name of the variable, not the scopes.""" + return var_name.split("/")[-1].split(":")[0] + + @property + def w(self): + var_list = snt.get_variables_in_module(self) + if self.use_bias: + assert len(var_list) == 2, "Found not 2 but %d" % len(var_list) + else: + assert len(var_list) == 1, "Found not 1 but %d" % len(var_list) + w = [x for x in var_list if self._raw_name(x.name) == "w"] + assert len(w) == 1 + return w[0] + + @property + def b(self): + var_list = snt.get_variables_in_module(self) + assert len(var_list) == 2, "Found not 2 but %d" % len(var_list) + b = [x for x in var_list if self._raw_name(x.name) == "b"] + assert len(b) == 1 + return b[0] + + +def transformer_at_state(base_model, new_variables): + """Get the base_model that has been transformed to use the variables + in final_state. + Args: + base_model: snt.Module + Goes from batch to features + new_variables: list + New list of variables to use + Returns: + func: callable of same api as base_model. + """ + assert not variable_replace.in_variable_replace_scope() + + def _feature_transformer(input_data): + """Feature transformer at the end of training.""" + initial_variables = base_model.get_variables() + replacement = collections.OrderedDict( + utils.eqzip(initial_variables, new_variables)) + with variable_replace.variable_replace(replacement): + features = base_model(input_data) + return features + + return _feature_transformer diff --git a/models/research/learning_unsupervised_learning/architectures/more_local_weight_update.py b/models/research/learning_unsupervised_learning/architectures/more_local_weight_update.py new file mode 100644 index 0000000000000000000000000000000000000000..117549af0f21f9e5148435b73f664a08013f8786 --- /dev/null +++ b/models/research/learning_unsupervised_learning/architectures/more_local_weight_update.py @@ -0,0 +1,861 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import sonnet as snt +import tensorflow as tf + +from learning_unsupervised_learning.architectures import common +from learning_unsupervised_learning import optimizers +from learning_unsupervised_learning import utils +from learning_unsupervised_learning import summary_utils + +OptState = collections.namedtuple('OptState', + ['variables', 'opt_state', 'index']) + +BaseModelOutputs = collections.namedtuple( + 'BaseModelOutputs', ['xs', 'zs', 'mods', 'batch', 'backward_mods']) + + +class GradChannelReadout(snt.AbstractModule): + """Perform a linear readout and reshape from input 3 tensor.""" + + def __init__(self, + num_grad_channels, + device, + perm=(2, 0, 1), + name='GradChannelReadout'): + """Args: + + num_grad_channels: int + number of channels to readout to. + device: str or callable + devicwe to place weights. + perm: list or tuple + transpose applied. + """ + + self.num_grad_channels = num_grad_channels + self.device = device + self.perm = perm + super(GradChannelReadout, self).__init__(name=name) + + def _build(self, h): + with tf.device(self.device): + mod = snt.Linear(self.num_grad_channels) + ret = snt.BatchApply(mod)(h) + # return as [num_grad_channels] x [bs] x [num units] + return tf.transpose(ret, perm=self.perm) + + +def get_weight_stats(x, axis): + """ Compute weight statistics over the given axis. + + Args: + x: tf.Tensor + a batch of activations. + axis: int + axis to perform statistics over. + Returns: + tf.Tensor + a 3-D tensor with statistics. + """ + if x is None: + return [] + + stats = [] + l1 = tf.reduce_mean(tf.abs(x), axis=axis) + l2 = tf.sqrt(tf.reduce_mean(x**2, axis=axis) + 1e-6) + + mean, var = tf.nn.moments(x, [axis]) + stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)]) + + stats = [tf.reshape(s, [-1, 1, 1]) for s in stats] + + return stats + + +class AddUnitBatchStatistics(snt.AbstractModule): + """Compute some number of statistics over units and concat them on.""" + + def __init__(self, name='AddUnitBatchStatistics'): + super(AddUnitBatchStatistics, self).__init__(name=name) + + def _build(self, x): + # [channel, bs, 1] + output = x + for d in [0, 1]: + stats = [] + l1 = tf.reduce_mean(tf.abs(x), axis=d, keepdims=True) + l2 = tf.sqrt(tf.reduce_mean(x**2, axis=d, keepdims=True) + 1e-6) + + mean, var = tf.nn.moments(x, [d], keepdims=True) + stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)]) + + to_add = tf.concat(stats, axis=2) # [channels/1, units/1, stats] + output += snt.BatchApply(snt.Linear(x.shape.as_list()[2]))(to_add) + return output + + +class ConcatUnitConv(snt.AbstractModule): + """Do a small number of convolutions over units and concat / add them on.""" + + def __init__(self, add=True): + self.add = add + super(ConcatUnitConv, self).__init__(name='ConcatUnitConv') + + def _build(self, x): + # x is [units, bs, 1] + net = tf.transpose(x, [1, 0, 2]) # now [bs x units x 1] + channels = x.shape.as_list()[2] + mod = snt.Conv1D(output_channels=channels, kernel_shape=[3]) + net = mod(net) + net = snt.BatchNorm(axis=[0, 1])(net, is_training=False) + net = tf.nn.relu(net) + mod = snt.Conv1D(output_channels=channels, kernel_shape=[3]) + net = mod(net) + net = snt.BatchNorm(axis=[0, 1])(net, is_training=False) + net = tf.nn.relu(net) + to_concat = tf.transpose(net, [1, 0, 2]) + if self.add: + return x + to_concat + else: + return tf.concat([x, to_concat], 2) + + +class MoreLocalWeightUpdateProcess(snt.AbstractModule): + + def __init__( + self, + remote_device, + local_device, + top_delta_size=64, + top_delta_layers=2, + compute_h_size=64, + compute_h_layers=1, + delta_dim=32, + num_grad_channels=4, + normalize_epsilon=1., + ): + self.local_device = local_device + self.remote_device = remote_device + self.top_delta_size = top_delta_size + self.top_delta_layers = top_delta_layers + self.compute_h_size = compute_h_size + self.compute_h_layers = compute_h_layers + self.delta_dim = delta_dim + self.num_grad_channels = num_grad_channels + self.normalize_epsilon = normalize_epsilon, + + with tf.device(local_device): + self.opt = optimizers.UnrollableGradientDescentRollingOptimizer( + learning_rate=1e-4) + + # lazily initialized for readouts + self.readout_mods = {} + + super(MoreLocalWeightUpdateProcess, + self).__init__(name='MoreLocalWeightUpdateProcess') + + with tf.device(remote_device): + self() + + def normalize(self, change_w, normalize_epsilon=None): + if normalize_epsilon is None: + normalize_epsilon = self.normalize_epsilon + + # normalize the weights per receptive-field, rather than per-matrix + var = tf.reduce_mean(tf.square(change_w), axis=0, keepdims=True) + change_w = (change_w) / tf.sqrt(normalize_epsilon + var) + return change_w + + def _build(self): + pass + + @snt.reuse_variables + def compute_top_delta(self, z): + """ parameterization of topD. This converts the top level activation + to an error signal. + Args: + z: tf.Tensor + batch of final layer post activations + Returns + delta: tf.Tensor + the error signal + """ + s_idx = 0 + with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device): + # typically this takes [BS, length, input_channels], + # We are applying this such that we convolve over the batch dimension. + act = tf.expand_dims(tf.transpose(z, [1, 0]), 2) # [channels, BS, 1] + + mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5]) + act = mod(act) + + act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) + act = tf.nn.relu(act) + + bs = act.shape.as_list()[0] + act = tf.transpose(act, [2, 1, 0]) + act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act) + act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) + act = tf.nn.relu(act) + act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act) + act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) + act = tf.nn.relu(act) + act = tf.transpose(act, [2, 1, 0]) + + prev_act = act + for i in range(self.top_delta_layers): + mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3]) + act = mod(act) + + act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) + act = tf.nn.relu(act) + + prev_act = act + + mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3]) + act = mod(act) + + # [bs, feature_channels, delta_channels] + act = tf.transpose(act, [1, 0, 2]) + return act + + @snt.reuse_variables + def compute_h(self, + x, + z, + d, + bias, + W_bot, + W_top, + compute_perc=1.0, + compute_units=None): + """z = [BS, n_units] a = [BS, n_units] b = [BS, n_units] d = [BS, n_units, delta_channels] + + """ + + s_idx = 0 + if compute_perc != 1.0: + assert compute_units is None + + with tf.device(self.remote_device): + inp_feat = [x, z] + inp_feat = [tf.transpose(f, [1, 0]) for f in inp_feat] + + units = x.shape.as_list()[1] + bs = x.shape.as_list()[0] + + # add unit ID, to help the network differentiate units + id_theta = tf.linspace(0., (4) * np.pi, units) + assert bs is not None + id_theta_bs = tf.reshape(id_theta, [-1, 1]) * tf.ones([1, bs]) + inp_feat += [tf.sin(id_theta_bs), tf.cos(id_theta_bs)] + + # list of [units, BS, 1] + inp_feat = [tf.expand_dims(f, 2) for f in inp_feat] + + d_trans = tf.transpose(d, [1, 0, 2]) + + if compute_perc != 1.0: + compute_units = int(compute_perc * inp_feat.shape.as_list()[0]) + + # add weight matrix statistics, both from above and below + w_stats_bot = get_weight_stats(W_bot, 0) + w_stats_top = get_weight_stats(W_top, 1) + w_stats = w_stats_bot + w_stats_top + if W_bot is None or W_top is None: + # if it's an edge layer (top or bottom), just duplicate the stats for + # the weight matrix that does exist + w_stats = w_stats + w_stats + w_stats = [tf.ones([1, x.shape[0], 1]) * ww for ww in w_stats] + # w_stats is a list, with entries with shape UNITS x 1 x channels + + if compute_units is None: + inp_feat_in = inp_feat + d_trans_in = d_trans + w_stats_in = w_stats + bias_in = tf.transpose(bias) + else: + # only run on a subset of the activations. + mask = tf.random_uniform( + minval=0, + maxval=1, + dtype=tf.float32, + shape=inp_feat[0].shape.as_list()[0:1]) + _, ind = tf.nn.top_k(mask, k=compute_units) + ind = tf.reshape(ind, [-1, 1]) + + inp_feat_in = [tf.gather_nd(xx, ind) for xx in inp_feat] + w_stats_in = [tf.gather_nd(xx, ind) for xx in w_stats] + d_trans_in = tf.gather_nd(d_trans, ind) + bias_in = tf.gather_nd(tf.transpose(bias), ind) + + w_stats_in = tf.concat(w_stats_in, 2) + w_stats_in_norm = w_stats_in * tf.rsqrt( + tf.reduce_mean(w_stats_in**2) + 1e-6) + + act = tf.concat(inp_feat_in + [d_trans_in], 2) + act = snt.BatchNorm(axis=[0, 1])(act, is_training=True) + + bias_dense = tf.reshape(bias_in, [-1, 1, 1]) * tf.ones([1, bs, 1]) + act = tf.concat([w_stats_in_norm, bias_dense, act], 2) + + mod = snt.Conv1D(output_channels=self.compute_h_size, kernel_shape=[3]) + act = mod(act) + + act = snt.BatchNorm(axis=[0, 1])(act, is_training=True) + act = tf.nn.relu(act) + + act2 = ConcatUnitConv()(act) + act = act2 + + prev_act = act + for i in range(self.compute_h_layers): + mod = snt.Conv1D(output_channels=self.compute_h_size, kernel_shape=[3]) + act = mod(act) + + act = snt.BatchNorm(axis=[0, 1])(act, is_training=True) + act = tf.nn.relu(act) + + act = ConcatUnitConv()(act) + + prev_act = act + + h = act + if compute_units is not None: + shape = inp_feat[0].shape.as_list()[:1] + h.shape.as_list()[1:] + h = tf.scatter_nd(ind, h, shape=shape) + + h = tf.transpose(h, [1, 0, 2]) # [bs, units, channels] + + return h + + ## wrappers to allow forward and backward to have different variables + @snt.reuse_variables + def merge_change_w_forward(self, change_w_terms, global_prefix='', prefix=''): + return self.merge_change_w( + change_w_terms, global_prefix=global_prefix, prefix=prefix) + + @snt.reuse_variables + def merge_change_w_backward(self, change_w_terms, global_prefix='', + prefix=''): + return self.merge_change_w( + change_w_terms, global_prefix=global_prefix, prefix=prefix) + + def merge_change_w(self, change_w_terms, global_prefix='', prefix=''): + with tf.device( + self.remote_device), tf.name_scope(global_prefix + '_merge_change_w'): + w_base = change_w_terms['w_base'] + + for kk in sorted(change_w_terms.keys()): + name = global_prefix + 'change_w_plane_%s' % kk + delta_w = change_w_terms[kk] + mean, var = tf.nn.moments(delta_w, [0, 1]) + root_mean_square = tf.sqrt(tf.reduce_mean(delta_w**2) + 1e-6) + + for kk in sorted(change_w_terms.keys()): + change_w_terms[kk] = self.normalize(change_w_terms[kk]) + + initializers = { + 'w': tf.constant_initializer(0.1), + 'b': tf.zeros_initializer() + } + mod = snt.Linear( + 1, + name=global_prefix + '_weight_readout_coeffs', + initializers=initializers) + + change_w_terms_list = [ + change_w_terms[kk] for kk in sorted(change_w_terms.keys()) + ] + stack_terms = tf.stack(change_w_terms_list, axis=-1) + change_w = tf.squeeze( + snt.BatchApply(mod)(stack_terms), axis=-1) / len(change_w_terms) + + # only allow perpendicular updates, or updates which grow length. don't + # allow length to decay towards zero. + ip = tf.reduce_mean(change_w * w_base) + # zero out any updates that shrink length + ip = tf.nn.relu(ip) + change_w -= w_base * ip + change_w /= tf.sqrt(len(change_w_terms) * 1.) + + change_w = self.normalize(change_w) + + # encourage the receptive field to not collapse to 0 + change_w -= w_base / 7. # This is an arbitrary scale choice + + return tf.identity(change_w) + + @snt.reuse_variables + def bias_readout(self, h): + with tf.device(self.remote_device): + mod = snt.Linear(1, name='bias_readout') + ret = snt.BatchApply(mod)(h) + return tf.squeeze(ret, 2) + + @snt.reuse_variables + def next_delta(self, z, h, d): + with tf.device(self.remote_device): + return d * tf.expand_dims(tf.nn.sigmoid(z), 2) + self.to_delta_size(h) + + @utils.create_variables_in_class_scope + def get_readout_mod(self, name): + if name not in self.readout_mods: + self.readout_mods[name] = GradChannelReadout( + self.num_grad_channels, device=self.remote_device, name=name) + + return self.readout_mods[name] + + @utils.create_variables_in_class_scope + def low_rank_readout(self, name, h1, h2, psd=False): + BS = h1.shape.as_list()[0] + r_t = self.get_readout_mod(name + '_top')(h1) + if psd: + r_b = r_t + else: + r_b = self.get_readout_mod(name + '_bottom')(h2) + return tf.reduce_mean(tf.matmul(r_b, r_t, transpose_a=True), axis=0) / BS + + @snt.reuse_variables + def to_delta_size(self, h): + with tf.device(self.remote_device): + mod = snt.Linear(self.delta_dim) + return snt.BatchApply(mod)(h) + + @snt.reuse_variables + def initial_state(self, variables): + """The inner optimization state. + + Args: + variables: list of tf.Variable + list of variables to get the initial state of. + Returns: + opt_state: OptState + """ + + with tf.device(self.local_device): + initial_opt_state = self.opt.get_state(variables) + + return OptState( + variables=variables, opt_state=initial_opt_state, index=tf.constant(0)) + + @snt.reuse_variables + def compute_next_state(self, grads, learning_rate, cur_state, + cur_transformer): + + summaries = [] + with tf.device(self.local_device): + with tf.control_dependencies(summaries): + new_vars, new_state = self.opt.compute_updates( + cur_state.variables, grads, learning_rate, cur_state.opt_state) + pass + + return OptState( + variables=tuple(new_vars), + opt_state=new_state, + index=cur_state.index + 1) + + def assign_state(self, base_model, next_state): + var_ups = [ + v.assign(nv) for v, nv in utils.eqzip(base_model.get_variables(), + next_state.variables) + ] + + opt_ups = self.opt.assign_state(next_state.opt_state) + + return tf.group(opt_ups, *var_ups) + + def local_variables(self): + return list(self.opt.get_variables()) + + def remote_variables(self): + train = list( + snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES)) + train += list( + snt.get_variables_in_module(self, + tf.GraphKeys.MOVING_AVERAGE_VARIABLES)) + return train + + +class MoreLocalWeightUpdateWLearner(snt.AbstractModule): + """The BaseModel that the UnsupervisedUpdateRule acts on. + """ + + def __init__(self, + remote_device, + local_device, + inner_size=128, + output_size=32, + n_layers=4, + shuffle_input=True, + activation_fn=tf.nn.relu, + identical_updates=True, + **kwargs): + self.local_device = local_device + self.remote_device = remote_device + self.inner_size = inner_size + self.n_layers = n_layers + self.shuffle_input = shuffle_input + self.activation_fn = activation_fn + self.identical_updates = identical_updates + + self.output_size = output_size + if output_size == None: + self.output_size = inner_size + + self.shuffle_ind = None + + super(MoreLocalWeightUpdateWLearner, self).__init__( + name='LocalWeightUpdateWLearner', **kwargs) + + @snt.reuse_variables + def get_shuffle_ind(self, size): + if self.shuffle_ind is None: + # put the shuffle in tf memory to make the eval jobs + # re-entrant. + shuffle_ind_val = np.random.permutation(size) + shuffle_ind = tf.get_variable( + name='shuffle_ind', dtype=tf.int64, initializer=shuffle_ind_val) + unshuffle_ind = tf.scatter_nd( + tf.reshape(shuffle_ind, [-1, 1]), tf.range(size), [size]) + + return shuffle_ind, unshuffle_ind + + def _build(self, batch): + image = batch.image + x0 = snt.BatchFlatten()(image) + if self.shuffle_input: + size = x0.shape.as_list()[1] + shuffle_ind, unshuffle_ind = self.get_shuffle_ind(size) + x0 = tf.gather(x0, shuffle_ind, axis=1) + + xs = [x0] + mods = [] + zs = [] + init = {} + + for i in range(self.n_layers): + mod = common.LinearBatchNorm( + self.inner_size, activation_fn=self.activation_fn) + z, x = mod(xs[i]) + xs.append(x) + zs.append(z) + mods.append(mod) + + mod = common.LinearBatchNorm( + self.output_size, activation_fn=self.activation_fn) + z, x = mod(xs[-1]) + mods.append(mod) + + xs.append(x) + zs.append(z) + + embedding_x = xs[-1] + + # make a random set of backward mods + backward_mods = [] + for i, (x, x_p1) in enumerate(zip(xs[0:-1], xs[1:])): + m = common.LinearBatchNorm( + x_p1.shape.as_list()[1], activation_fn=tf.identity) + _ = m(x) + backward_mods.append(m) + + shape = image.shape.as_list()[1:4] + + for mods_p, prefix in [(mods, 'forward'), (backward_mods, 'backward')]: + if self.shuffle_input: + unshuf_w = tf.gather(mods_p[0].w, unshuffle_ind, axis=0) + else: + unshuf_w = mods_p[0].w + img = summary_utils.first_layer_weight_image(unshuf_w, shape) + tf.summary.image(prefix + '_w0_receptive_field', img) + + for i, m in enumerate(mods_p[0:]): + img = summary_utils.inner_layer_weight_image(m.w) + tf.summary.image(prefix + '_w%d' % (i + 1), img) + + img = summary_utils.sorted_images(image, batch.label_onehot) + tf.summary.image('inputs', img) + + # log out pre-activations and activations + for all_vis, base_name in [(xs, 'x'), (zs, 'z')]: + for i, x_vis in enumerate(all_vis): + img = summary_utils.activation_image(x_vis, batch.label_onehot) + tf.summary.image('%s%d' % (base_name, i), img) + + embedding_x = tf.identity(embedding_x) + + outputs = BaseModelOutputs( + xs=xs, zs=zs, mods=mods, batch=batch, backward_mods=backward_mods) + + return embedding_x, outputs + + def compute_next_h_d(self, meta_opt, w_bot, w_top, bias, x, z, d, backward_w): + """ Propogate error back down the network while computing hidden state. + """ + if z is None: + z = x + + h = meta_opt.compute_h(x, z, d, bias, w_bot, + w_top) # [bs x 60 x h_channels] + + # compute the next d + delta = meta_opt.next_delta(z, h, d) + + if backward_w is not None: + + def delta_matmul(w, delta): + d = tf.transpose(delta, [0, 2, 1]) # [bs x delta_channels x n_units) + d = snt.BatchApply(lambda x: tf.matmul(x, w, transpose_b=True))(d) + d = tf.transpose(d, [0, 2, 1]) + return d + + # replace the "backward pass" with a random matrix. + d = delta_matmul(backward_w, delta) # [bs x 60 x delta_channels] + var = tf.reduce_mean(tf.square(d), [2], keepdims=True) + d = d * tf.rsqrt(1e-6 + var) + + return h, d + + def weight_change_for_layer(self, meta_opt, l_idx, w_base, b_base, upper_h, + lower_h, upper_x, lower_x, prefix, include_bias): + """Compute the change in weights for each layer. + This computes something roughly analagous to a gradient. + """ + reduce_upper_h = upper_h + reduce_lower_h = lower_h + + BS = lower_x.shape.as_list()[0] + + change_w_terms = dict() + + # initial weight value normalized + # normalize the weights per receptive-field, rather than per-matrix + weight_scale = tf.rsqrt( + tf.reduce_mean(w_base**2, axis=0, keepdims=True) + 1e-6) + w_base *= weight_scale + + change_w_terms['w_base'] = w_base + + # this will act to decay larger weights towards zero + change_w_terms['large_decay'] = w_base**2 * tf.sign(w_base) + + # term based on activations + ux0 = upper_x - tf.reduce_mean(upper_x, axis=0, keepdims=True) + uxs0 = ux0 * tf.rsqrt(tf.reduce_mean(ux0**2, axis=0, keepdims=True) + 1e-6) + change_U = tf.matmul(uxs0, uxs0, transpose_a=True) / BS + change_U /= tf.sqrt(float(change_U.shape.as_list()[0])) + + cw = tf.matmul(w_base, change_U) + cw_scale = tf.rsqrt(tf.reduce_mean(cw**2 + 1e-8)) + cw *= cw_scale + change_w_terms['decorr_x'] = cw + + # hebbian term + lx0 = lower_x - tf.reduce_mean(lower_x, axis=0, keepdims=True) + lxs0 = lx0 * tf.rsqrt(tf.reduce_mean(lx0**2, axis=0, keepdims=True) + 1e-6) + cw = tf.matmul(lxs0, uxs0, transpose_a=True) / BS + change_w_terms['hebb'] = -cw + + # 0th order term + w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_0', upper_h, + lower_h) + change_w_terms['0_order'] = w_term + + # # rbf term (weight update scaled by distance from 0) + w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_rbf', + reduce_upper_h, reduce_lower_h) + change_w_terms['rbf'] = tf.exp(-w_base**2) * w_term + + # 1st order term (weight dependent update to weights) + w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_1', + reduce_upper_h, reduce_lower_h) + change_w_terms['1_order'] = w_base * w_term + + # more terms based on single layer readouts. + for update_type in ['lin', 'sqr']: + for h_source, h_source_name in [(reduce_upper_h, 'upper'), + (reduce_lower_h, 'lower')]: + structures = ['symm'] + if update_type == 'lin' and h_source_name == 'upper': + structures += ['psd'] + for structure in structures: + name = update_type + '_' + h_source_name + '_' + structure + if structure == 'symm': + change_U = meta_opt.low_rank_readout(prefix + name, h_source, + h_source) + change_U = (change_U + tf.transpose(change_U)) / tf.sqrt(2.) + change_U = tf.matrix_set_diag(change_U, + tf.zeros( + [change_U.shape.as_list()[0]])) + elif structure == 'psd': + change_U = meta_opt.low_rank_readout( + prefix + name, h_source, None, psd=True) + else: + assert False + change_U /= tf.sqrt(float(change_U.shape.as_list()[0])) + + if update_type == 'lin': + sign_multiplier = tf.ones_like(w_base) + w_base_l = w_base + elif update_type == 'sqr': + sign_multiplier = tf.sign(w_base) + w_base_l = tf.sqrt(1. + w_base**2) - 1. + + if h_source_name == 'upper': + cw = tf.matmul(w_base_l, change_U) # [N^l-1 x N^l] + elif h_source_name == 'lower': + cw = tf.matmul(change_U, w_base_l) + change_w_terms[name] = cw * sign_multiplier + + + if prefix == 'forward': + change_w = meta_opt.merge_change_w_forward( + change_w_terms, global_prefix=prefix, prefix='l%d' % l_idx) + elif prefix == 'backward': + change_w = meta_opt.merge_change_w_backward( + change_w_terms, global_prefix=prefix, prefix='l%d' % l_idx) + else: + assert (False) + + if not include_bias: + return change_w + + change_b = tf.reduce_mean(meta_opt.bias_readout(upper_h), [0]) + + # force nonlinearities to be exercised -- biases can't all be increased without bound + change_b_mean = tf.reduce_mean(change_b) + offset = -tf.nn.relu(-change_b_mean) + change_b -= offset + + var = tf.reduce_mean(tf.square(change_b), [0], keepdims=True) + change_b = (change_b) / tf.sqrt(0.5 + var) + return change_w, change_b + + def compute_next_state(self, outputs, meta_opt, previous_state): + zs = outputs.zs + xs = outputs.xs + batch = outputs.batch + mods = outputs.mods + backward_mods = outputs.backward_mods + variables = self.get_variables() + + rev_mods = mods[::-1] + rev_backward_mods = backward_mods[::-1] + rev_xs = xs[::-1] + rev_zs = zs[::-1] + [None] + + to_top = xs[-1] + + # variables that change in the loop + hs = [] + d = meta_opt.compute_top_delta(to_top) # [bs x 32 x delta_channels] + + iterator = utils.eqzip(rev_backward_mods + [None], rev_mods + [None], + [None] + rev_mods, rev_xs, rev_zs) + for (backward_mod, lower_mod, upper_mod, x, z) in iterator: + w_bot = None + if not lower_mod is None: + w_bot = previous_state.variables[variables.index(lower_mod.w)] + w_top = None + if not upper_mod is None: + w_top = previous_state.variables[variables.index(upper_mod.w)] + backward_w = None + if backward_mod is not None: + backward_w = previous_state.variables[variables.index(backward_mod.w)] + if lower_mod is not None: + bias = previous_state.variables[variables.index(lower_mod.b)] + else: + bias = tf.zeros([x.shape[1]]) + + h, d = self.compute_next_h_d( + meta_opt=meta_opt, + w_bot=w_bot, + w_top=w_top, + bias=bias, + backward_w=backward_w, + x=x, + z=z, + d=d) + hs.append(h) + + w_forward_var_idx = [variables.index(mod.w) for mod in rev_mods] + w_backward_var_idx = [variables.index(mod.w) for mod in rev_backward_mods] + b_var_idx = [variables.index(mod.b) for mod in rev_mods] + + # storage location for outputs of below loop + grads = [None for _ in previous_state.variables] + + # over-ride learning rate for perturbation variables + learning_rate = [None for _ in previous_state.variables] + + # This is a map -- no state is shared cross loop + for l_idx, w_forward_idx, w_backward_idx, b_idx, upper_h, lower_h, lower_x, upper_x in utils.eqzip( + range(len(w_forward_var_idx)), w_forward_var_idx, w_backward_var_idx, + b_var_idx, hs[:-1], hs[1:], xs[::-1][1:], xs[::-1][:-1]): + + b_base = previous_state.variables[b_idx] + change_w_forward, change_b = self.weight_change_for_layer( + meta_opt=meta_opt, + l_idx=l_idx, + w_base=previous_state.variables[w_forward_idx], + b_base=b_base, + upper_h=upper_h, + lower_h=lower_h, + upper_x=upper_x, + lower_x=lower_x, + prefix='forward', + include_bias=True) + + if self.identical_updates: + change_w_backward = change_w_forward + else: + change_w_backward = self.weight_change_for_layer( + meta_opt=meta_opt, + l_idx=l_idx, + w_base=previous_state.variables[w_backward_idx], + b_base=b_base, + upper_h=upper_h, + lower_h=lower_h, + upper_x=upper_x, + lower_x=lower_x, + prefix='backward', + include_bias=False) + + grads[w_forward_idx] = change_w_forward + + grads[w_backward_idx] = change_w_backward + + grads[b_idx] = change_b + + cur_transformer = common.transformer_at_state(self, + previous_state.variables) + next_state = meta_opt.compute_next_state( + grads, + learning_rate=learning_rate, + cur_state=previous_state, + cur_transformer=lambda x: cur_transformer(x)[0]) + return next_state + + def initial_state(self, meta_opt): + return meta_opt.initial_state(self.get_variables()) diff --git a/models/research/learning_unsupervised_learning/datasets/__init__.py b/models/research/learning_unsupervised_learning/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9949cd96ca8f2fe1c39705a5ca8570de9cad5a66 --- /dev/null +++ b/models/research/learning_unsupervised_learning/datasets/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import mnist diff --git a/models/research/learning_unsupervised_learning/datasets/common.py b/models/research/learning_unsupervised_learning/datasets/common.py new file mode 100644 index 0000000000000000000000000000000000000000..11f65ceab57a4114ca3876b3cb6eed86e2263745 --- /dev/null +++ b/models/research/learning_unsupervised_learning/datasets/common.py @@ -0,0 +1,29 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import tensorflow as tf +import numpy as np + +ImageLabelOnehot = collections.namedtuple('ImageLabelOnehot', + ['image', 'label', 'label_onehot']) +ImageLabelOnehotRegression = collections.namedtuple( + "ImageLabelOnehotRegression", + ["image", "label", "label_onehot", "regression_target"]) diff --git a/models/research/learning_unsupervised_learning/datasets/mnist.py b/models/research/learning_unsupervised_learning/datasets/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..6ee595d99ad2523042454f038b4665095f501caf --- /dev/null +++ b/models/research/learning_unsupervised_learning/datasets/mnist.py @@ -0,0 +1,74 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +import sonnet as snt +import tensorflow as tf +from tensorflow.python.keras.datasets import mnist +from learning_unsupervised_learning.datasets import common + +class Mnist(snt.AbstractModule): + def __init__(self, device, batch_size=128, name="Mnist"): + self.device = device + self.batch_size = batch_size + + self._make_dataset() + self.iterator = None + + super(Mnist, self).__init__(name=name) + + def _make_dataset(self): + (x_train, y_train), (x_test, y_test) = mnist.load_data() + + x_train = x_train.reshape(60000, 784) + x_test = x_test.reshape(10000, 784) + + dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) + dataset = dataset.repeat() + dataset = dataset.shuffle(self.batch_size * 3) + dataset = dataset.batch(self.batch_size) + def _map_fn(image, label): + image = tf.to_float(image) / 255. + label.set_shape([self.batch_size]) + label = tf.cast(label, dtype=tf.int32) + label_onehot = tf.one_hot(label, 10) + image = tf.reshape(image, [self.batch_size, 28, 28, 1]) + return common.ImageLabelOnehot( + image=image, label=label, label_onehot=label_onehot) + + self.dataset = dataset.map(_map_fn) + + def _build(self): + if self.iterator is None: + self.iterator = self.dataset.make_one_shot_iterator() + batch = self.iterator.get_next() + [b.set_shape([self.batch_size] + b.shape.as_list()[1:]) for b in batch] + return batch + + +class TinyMnist(Mnist): + def __init__(self, *args, **kwargs): + kwargs.setdefault("name", "TinyMnist") + super(TinyMnist, self).__init__(*args, **kwargs) + + def _make_dataset(self): + super(TinyMnist, self)._make_dataset() + + def _map_fn(batch): + new_img = tf.image.resize_images(batch.image, [14, 14]) + return common.ImageLabelOnehot( + image=new_img, label=batch.label, label_onehot=batch.label_onehot) + + self.dataset = self.dataset.map(_map_fn) diff --git a/models/research/learning_unsupervised_learning/evaluation.py b/models/research/learning_unsupervised_learning/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..2ec40e99a672f9420200653b92818374e0e84d78 --- /dev/null +++ b/models/research/learning_unsupervised_learning/evaluation.py @@ -0,0 +1,76 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +"""Evaluation job. + +This sits on the side and performs evaluation on a saved model. +This is a separate process for ease of use and stability of numbers. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from learning_unsupervised_learning import utils + + +def construct_evaluation_graph(theta_process_fn=None, + w_learner_fn=None, + dataset_fn=None, + meta_objectives=None, + ): + """Construct the evaluation graph. + """ + if meta_objectives is None: + meta_objectives = [] + + tf.train.create_global_step() + + local_device = "" + remote_device = "" + + meta_opt = theta_process_fn( + remote_device=remote_device, local_device=local_device) + + base_model = w_learner_fn( + remote_device=remote_device, local_device=local_device) + + train_dataset = dataset_fn(device=local_device) + + # construct variables + x, outputs = base_model(train_dataset()) + initial_state = base_model.initial_state(meta_opt, max_steps=10) + next_state = base_model.compute_next_state(outputs, meta_opt, initial_state) + with utils.state_barrier_context(next_state): + train_one_step_op = meta_opt.assign_state(base_model, next_state) + + meta_objs = [] + for meta_obj_fn in meta_objectives: + meta_obj = meta_obj_fn(local_device="", remote_device="") + meta_objs.append(meta_obj) + J = meta_obj(train_dataset, lambda x: base_model(x)[0]) + tf.summary.scalar(str(meta_obj.__class__.__name__)+"_J", tf.reduce_mean(J)) + + # TODO(lmetz) this is kinda error prone. + # We should share the construction of the global variables across train and + # make sure both sets of savable variables are the same + checkpoint_vars = meta_opt.remote_variables() + [tf.train.get_global_step()] + for meta_obj in meta_objs: + checkpoint_vars.extend(meta_obj.remote_variables()) + + return checkpoint_vars, train_one_step_op, (base_model, train_dataset) diff --git a/models/research/learning_unsupervised_learning/meta_objective/__init__.py b/models/research/learning_unsupervised_learning/meta_objective/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54c46145e3c3a9f19110f92197f1d3cb2afe31fb --- /dev/null +++ b/models/research/learning_unsupervised_learning/meta_objective/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +import sklearn +import linear_regression diff --git a/models/research/learning_unsupervised_learning/meta_objective/linear_regression.py b/models/research/learning_unsupervised_learning/meta_objective/linear_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..b49fc2529ccba08a6b47019cd7546f8fb409b28b --- /dev/null +++ b/models/research/learning_unsupervised_learning/meta_objective/linear_regression.py @@ -0,0 +1,258 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + + +"""Closed form linear regression. + +Can be differentiated through. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import sonnet as snt +import tensorflow as tf + +from learning_unsupervised_learning import utils +from learning_unsupervised_learning import variable_replace + + +def solve_ridge(x, y, ridge_factor): + with tf.name_scope("solve_ridge"): + # Added a column of ones to the end of the feature matrix for bias + A = tf.concat([x, tf.ones((x.shape.as_list()[0], 1))], axis=1) + + # Analytic solution for the ridge regression loss + inv_target = tf.matmul(A, A, transpose_a=True) + np_diag_penalty = ridge_factor * np.ones( + A.shape.as_list()[1], dtype="float32") + # Remove penalty on bias component of weights + np_diag_penalty[-1] = 0. + diag_penalty = tf.constant(np_diag_penalty) + inv_target += tf.diag(diag_penalty) + + inv = tf.matrix_inverse(inv_target) + w = tf.matmul(inv, tf.matmul(A, y, transpose_a=True)) + return w + + +class LinearRegressionMetaObjective(snt.AbstractModule): + """A meta objective based on training Ridge Regression with analytic solution. + + This is used to evaluate the performance of a given feature set trained in + some other manner. + """ + + def __init__(self, + local_device=None, + remote_device=None, + zero_one_labels=True, + normalize_y_hat=True, + normalize_act=False, + averages=1, + ridge_factor=0.1, + center_y=True, + hinge_loss=False, + samples_per_class=10, + test_train_scalar=1.0, + ): + self._local_device = local_device + self._remote_device = remote_device + self.zero_one_labels = zero_one_labels + self.normalize_y_hat = normalize_y_hat + self.normalize_act = normalize_act + self.ridge_factor = ridge_factor + self.averages = averages + self.samples_per_class = samples_per_class + self.center_y=center_y + self.test_train_scalar=test_train_scalar + self.hinge_loss = hinge_loss + + self.dataset_map = {} + + super(LinearRegressionMetaObjective, + self).__init__(name="LinearRegressionMetaObjective") + + def _build(self, dataset, feature_transformer): + if self.samples_per_class is not None: + if dataset not in self.dataset_map: + # datasets are outside of frames from while loops + with tf.control_dependencies(None): + self.dataset_map[dataset] = utils.sample_n_per_class( + dataset, self.samples_per_class) + + dataset = self.dataset_map[dataset] + + stats = collections.defaultdict(list) + losses = [] + # TODO(lmetz) move this to ingraph control flow? + for _ in xrange(self.averages): + loss, stat = self._build_once(dataset, feature_transformer) + losses.append(loss) + for k, v in stat.items(): + stats[k].append(v) + stats = {k: tf.add_n(v) / float(len(v)) for k, v in stats.items()} + + summary_updates = [] + for k, v in stats.items(): + tf.summary.scalar(k, v) + + with tf.control_dependencies(summary_updates): + return tf.add_n(losses) / float(len(losses)) + + def _build_once(self, dataset, feature_transformer): + with tf.device(self._local_device): + batch = dataset() + num_classes = batch.label_onehot.shape.as_list()[1] + + regression_mod = snt.Linear(num_classes) + + if self.normalize_act: + + def normalize_transformer(x): + unnorm_x = feature_transformer(x) + return tf.nn.l2_normalize(unnorm_x, 0) + + feature_transformer_wrap = normalize_transformer + else: + feature_transformer_wrap = feature_transformer + + # construct the variables of the right shape in the sonnet module by + # calling a forward pass through the regressor. + with utils.assert_no_new_variables(): + dummy_features = feature_transformer_wrap(batch) + regression_mod(dummy_features) + reg_w = regression_mod.w + reg_b = regression_mod.b + + batch_test = dataset() + all_batch = utils.structure_map_multi(lambda x: tf.concat(x, 0), [batch, batch_test]) + #all_batch = tf.concat([batch, batch_test], 0) + # Grab a new batch of data from the dataset. + features = feature_transformer_wrap(all_batch) + features, features_test = utils.structure_map_split(lambda x: tf.split(x, 2, axis=0), features) + + def center_y(y): + y -= tf.reduce_mean(y) + y *= tf.rsqrt(tf.reduce_mean(tf.reduce_sum(y**2, axis=[1], keep_dims=True))) + return y + def get_y_vec(batch): + y_pieces = [] + if hasattr(batch, "label_onehot"): + if self.zero_one_labels: + y_pieces += [batch.label_onehot] + else: + y_pieces += [2. * batch.label_onehot - 1.] + if hasattr(batch, "regression_target"): + y_pieces += [batch.regression_target] + y = tf.concat(y_pieces, 1) + if self.center_y: + y = center_y(y) + return y + + y_train = get_y_vec(batch) + + w = solve_ridge(features, y_train, self.ridge_factor) + + # Generate features from another batch to evaluate loss on the validation + # set. This provide a less overfit signal to the learned optimizer. + y_test = get_y_vec(batch_test) + + def compute_logit(features): + # We have updated the classifier mod in previous steps, we need to + # substitute out those variables to get new values. + replacement = collections.OrderedDict([(reg_w, w[:-1]), (reg_b, w[-1])]) + with variable_replace.variable_replace(replacement): + logits = regression_mod(features) + + return logits + + batch_size = y_train.shape.as_list()[0] + + logit_train = compute_logit(features) + logit_test_unnorm = compute_logit(features_test) + if self.normalize_y_hat: + logit_test = logit_test_unnorm / tf.sqrt( + tf.reduce_sum(logit_test_unnorm**2, axis=[1], keep_dims=True)) + else: + logit_test = logit_test_unnorm + + stats = {} + + if self.hinge_loss: + # slightly closer to the true classification loss + # any distance smaller than 1 is guaranteed to map to the correct class + mse_test = tf.reduce_sum(tf.nn.relu(tf.reduce_sum(tf.square(logit_test - y_test), axis=1)-1.)) / batch_size + else: + mse_test = tf.reduce_sum(tf.square(logit_test - y_test)) / batch_size + + stats["mse_test"] = mse_test + + mse_train = tf.reduce_sum(tf.square(logit_train - y_train)) / batch_size + stats["mse_train"] = mse_train + + is_correct_test = tf.equal(tf.argmax(logit_test, 1), tf.argmax(y_test, 1)) + accuracy_test = tf.reduce_mean(tf.cast(is_correct_test, tf.float32)) + stats["accuracy_test"] = accuracy_test + + def test_confusion_fn(): + test_confusion = tf.confusion_matrix(tf.argmax(y_test, 1), tf.argmax(logit_test, 1)) + test_confusion = tf.to_float(test_confusion) / tf.constant((logit_test.shape.as_list()[0] / float(logit_test.shape.as_list()[1])), dtype=tf.float32) + test_confusion = tf.expand_dims(tf.expand_dims(test_confusion, 0), 3) + return test_confusion + tf.summary.image("test_confusion", test_confusion_fn()) + + def train_confusion_fn(): + train_confusion = tf.confusion_matrix(tf.argmax(y_train, 1), tf.argmax(logit_train, 1)) + train_confusion = tf.to_float(train_confusion) / tf.constant((logit_train.shape.as_list()[0] / float(logit_train.shape.as_list()[1])), dtype=tf.float32) + train_confusion = tf.expand_dims(tf.expand_dims(train_confusion, 0), 3) + return train_confusion + tf.summary.image("train_confusion", train_confusion_fn()) + + is_correct = tf.equal(tf.argmax(logit_train, 1), tf.argmax(y_train, 1)) + accuracy_train = tf.reduce_mean(tf.cast(is_correct, tf.float32)) + stats["accuracy_train"] = accuracy_train + + reg = self.ridge_factor * tf.reduce_sum(tf.square(w[:-1])) / batch_size + stats["ridge_component"] = reg + + stats["total_loss"] = mse_test + reg + + loss_to_train_at = (reg+ mse_test) * self.test_train_scalar + (mse_train + reg)*(1 - self.test_train_scalar) + + loss_to_train_at = tf.identity(loss_to_train_at) + + # Minimizing the test loss should not require regurization because the + # metaobjective is solved for the training loss + return loss_to_train_at, stats + + def local_variables(self): + """List of variables that need to be updated for each evaluation. + + These variables should not be stored on a parameter server and + should be reset every computation of a meta_objective loss. + + Returns: + vars: list of tf.Variable + """ + return list( + snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES)) + + def remote_variables(self): + return [] diff --git a/models/research/learning_unsupervised_learning/meta_objective/sklearn.py b/models/research/learning_unsupervised_learning/meta_objective/sklearn.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1f2d59102c511fd42ad323c32ab1709bd60c90 --- /dev/null +++ b/models/research/learning_unsupervised_learning/meta_objective/sklearn.py @@ -0,0 +1,167 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +""" + +Can NOT be differentiated through. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import sonnet as snt +import tensorflow as tf +from tensorflow.python.framework import function + +from learning_unsupervised_learning import utils + +from learning_unsupervised_learning.meta_objective import utils as meta_obj_utils + +from sklearn import svm +from sklearn import linear_model + + +def build_fit(device, model_fn, num_classes, probs=True): + + def _py_fit_predict(trX, trY, teX): + assert len(np.unique(trY)) == num_classes + model = model_fn() + model.fit(trX, trY) + trP = model.predict(trX) + teP = model.predict(teX) + if probs: + teP_probs = model.predict_log_proba(teX) + return trP.astype(np.int64), teP.astype(np.int64), teP_probs.astype( + np.float32) + else: + teP = model.predict(teX) + return trP.astype(np.int64), teP.astype(np.int64) + + def return_fn(trX, trY, teX): + with tf.device(device): + with tf.device("/cpu:0"): + if probs: + return tf.py_func( + _py_fit_predict, + [tf.identity(trX), + tf.identity(trY), + tf.identity(teX)], [tf.int64, tf.int64, tf.float32]) + else: + return tf.py_func( + _py_fit_predict, + [tf.identity(trX), + tf.identity(trY), + tf.identity(teX)], [tf.int64, tf.int64]) + + return return_fn + + +class SKLearn(meta_obj_utils.MultiTrialMetaObjective): + + def __init__( + self, + local_device=None, + remote_device=None, + averages=1, + samples_per_class=10, + probs=False, + stddev=0.01, + n_samples=10, + name="SKLearn", + ): + self._local_device = local_device + self._remote_device = remote_device + self.name = name + self.probs = probs + self.n_samples = n_samples + self.stddev = stddev + + super(SKLearn, self).__init__( + name=name, samples_per_class=samples_per_class, averages=averages) + + def _get_model(self): + raise NotImplemented() + + def _build_once(self, dataset, feature_transformer): + with tf.device(self._local_device): + tr_batch = dataset() + te_batch = dataset() + num_classes = tr_batch.label_onehot.shape.as_list()[1] + all_batch = utils.structure_map_multi(lambda x: tf.concat(x, 0), + [tr_batch, te_batch]) + features = feature_transformer(all_batch) + trX, teX = utils.structure_map_split(lambda x: tf.split(x, 2, axis=0), + features) + trY = tf.to_int64(tr_batch.label) + trY_onehot = tf.to_int32(tr_batch.label_onehot) + teY = tf.to_int64(te_batch.label) + teY_shape = teY.shape.as_list() + + def blackbox((trX, trY, teX, teY)): + trY = tf.to_int32(tf.rint(trY)) + teY = tf.to_int32(tf.rint(teY)) + tf_fn = build_fit( + self._local_device, + self._get_model, + num_classes=num_classes, + probs=self.probs) + if self.probs: + trP, teP, teP_probs = tf_fn(trX, trY, teX) + else: + trP, teP = tf_fn(trX, trY, teX) + + teY.set_shape(teY_shape) + if self.probs: + onehot = tf.one_hot(teY, num_classes) + crossent = -tf.reduce_sum(onehot * teP_probs, [1]) + return tf.reduce_mean(crossent) + else: + # use error rate as the loss if no surrogate is avalible. + return 1 - tf.reduce_mean( + tf.to_float(tf.equal(teY, tf.to_int32(teP)))) + + test_loss = blackbox((trX, tf.to_float(trY), teX, tf.to_float(teY))) + + stats = {} + + tf_fn = build_fit( + self._local_device, + self._get_model, + num_classes=num_classes, + probs=self.probs) + if self.probs: + trP, teP, teP_probs = tf_fn(trX, trY, teX) + else: + trP, teP = tf_fn(trX, trY, teX) + stats["%s/accuracy_train" % self.name] = tf.reduce_mean( + tf.to_float(tf.equal(tf.to_int32(trY), tf.to_int32(trP)))) + stats["%s/accuracy_test" % self.name] = tf.reduce_mean( + tf.to_float(tf.equal(tf.to_int32(teY), tf.to_int32(teP)))) + stats["%s/test_loss" % self.name] = test_loss + return test_loss, stats + + +class LogisticRegression(SKLearn): + + def __init__(self, C=1.0, name="LogisticRegression", probs=True, **kwargs): + self.C = C + super(LogisticRegression, self).__init__(name=name, probs=probs, **kwargs) + + def _get_model(self): + return linear_model.LogisticRegression(C=self.C) diff --git a/models/research/learning_unsupervised_learning/meta_objective/utils.py b/models/research/learning_unsupervised_learning/meta_objective/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a29197d1d0cb7f0fdcebac3980027640651f185b --- /dev/null +++ b/models/research/learning_unsupervised_learning/meta_objective/utils.py @@ -0,0 +1,78 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import sonnet as snt +import tensorflow as tf + +from learning_unsupervised_learning import optimizers +from learning_unsupervised_learning import utils +from learning_unsupervised_learning import summary_utils +from learning_unsupervised_learning import variable_replace + +class MultiTrialMetaObjective(snt.AbstractModule): + def __init__(self, samples_per_class, averages, **kwargs): + self.samples_per_class = samples_per_class + self.averages = averages + self.dataset_map = {} + + super(MultiTrialMetaObjective, + self).__init__(**kwargs) + + def _build(self, dataset, feature_transformer): + if self.samples_per_class is not None: + if dataset not in self.dataset_map: + # datasets are outside of frames from while loops + with tf.control_dependencies(None): + self.dataset_map[dataset] = utils.sample_n_per_class( + dataset, self.samples_per_class) + + dataset = self.dataset_map[dataset] + + stats = collections.defaultdict(list) + losses = [] + # TODO(lmetz) move this to ingraph control flow? + for _ in xrange(self.averages): + loss, stat = self._build_once(dataset, feature_transformer) + losses.append(loss) + for k, v in stat.items(): + stats[k].append(v) + stats = {k: tf.add_n(v) / float(len(v)) for k, v in stats.items()} + + for k, v in stats.items(): + tf.summary.scalar(k, v) + + return tf.add_n(losses) / float(len(losses)) + + def local_variables(self): + """List of variables that need to be updated for each evaluation. + + These variables should not be stored on a parameter server and + should be reset every computation of a meta_objective loss. + + Returns: + vars: list of tf.Variable + """ + return list( + snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES)) + + def remote_variables(self): + return [] diff --git a/models/research/learning_unsupervised_learning/optimizers.py b/models/research/learning_unsupervised_learning/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..02c6106b19d1255907beb0ade07c46c5b065f701 --- /dev/null +++ b/models/research/learning_unsupervised_learning/optimizers.py @@ -0,0 +1,133 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + + +"""Optimizers for use in unrolled optimization. + +These optimizers contain a compute_updates function and its own ability to keep +track of internal state. +These functions can be used with a tf.while_loop to perform multiple training +steps per sess.run. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import collections +import tensorflow as tf +import sonnet as snt + +from learning_unsupervised_learning import utils + +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.training import optimizer +from tensorflow.python.training import training_ops + + +class UnrollableOptimizer(snt.AbstractModule): + """Interface for optimizers that can be used in unrolled computation. + apply_gradients is derrived from compute_update and assign_state. + """ + + def __init__(self, *args, **kwargs): + super(UnrollableOptimizer, self).__init__(*args, **kwargs) + self() + + @abc.abstractmethod + def compute_updates(self, xs, gs, state=None): + """Compute next step updates for a given variable list and state. + + Args: + xs: list of tensors + The "variables" to perform an update on. + Note these must match the same order for which get_state was originally + called. + gs: list of tensors + Gradients of `xs` with respect to some loss. + state: Any + Optimizer specific state to keep track of accumulators such as momentum + terms + """ + raise NotImplementedError() + + def _build(self): + pass + + @abc.abstractmethod + def get_state(self, var_list): + """Get the state value associated with a list of tf.Variables. + + This state is commonly going to be a NamedTuple that contains some + mapping between variables and the state associated with those variables. + This state could be a moving momentum variable tracked by the optimizer. + + Args: + var_list: list of tf.Variable + Returns: + state: Any + Optimizer specific state + """ + raise NotImplementedError() + + def assign_state(self, state): + """Assigns the state to the optimizers internal variables. + + Args: + state: Any + Returns: + op: tf.Operation + The operation that performs the assignment. + """ + raise NotImplementedError() + + def apply_gradients(self, grad_vars): + gradients, variables = zip(*grad_vars) + state = self.get_state(variables) + new_vars, new_state = self.compute_updates(variables, gradients, state) + assign_op = self.assign_state(new_state) + op = utils.assign_variables(variables, new_vars) + return tf.group(assign_op, op, name="apply_gradients") + + +class UnrollableGradientDescentRollingOptimizer(UnrollableOptimizer): + + def __init__(self, + learning_rate, + name="UnrollableGradientDescentRollingOptimizer"): + self.learning_rate = learning_rate + super(UnrollableGradientDescentRollingOptimizer, self).__init__(name=name) + + + def compute_updates(self, xs, gs, learning_rates, state): + new_vars = [] + for x, g, lr in utils.eqzip(xs, gs, learning_rates): + if lr is None: + lr = self.learning_rate + if g is not None: + new_vars.append((x * (1 - lr) - g * lr)) + else: + new_vars.append(x) + return new_vars, state + + def get_state(self, var_list): + return tf.constant(0.0) + + def assign_state(self, state, var_list=None): + return tf.no_op() diff --git a/models/research/learning_unsupervised_learning/run_eval.py b/models/research/learning_unsupervised_learning/run_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb2529dd4cc5354012befd5790c8d402f4caafd --- /dev/null +++ b/models/research/learning_unsupervised_learning/run_eval.py @@ -0,0 +1,122 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" Script that iteratively applies the unsupervised update rule and evaluates the + +meta-objective performance. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +from absl import app + +from learning_unsupervised_learning import evaluation +from learning_unsupervised_learning import datasets +from learning_unsupervised_learning import architectures +from learning_unsupervised_learning import summary_utils +from learning_unsupervised_learning import meta_objective + +import tensorflow as tf +import sonnet as snt + +from tensorflow.contrib.framework.python.framework import checkpoint_utils + +flags.DEFINE_string("checkpoint_dir", None, "Dir to load pretrained update rule from") +flags.DEFINE_string("train_log_dir", None, "Training log directory") + +FLAGS = flags.FLAGS + + +def train(train_log_dir, checkpoint_dir, eval_every_n_steps=10, num_steps=3000): + dataset_fn = datasets.mnist.TinyMnist + w_learner_fn = architectures.more_local_weight_update.MoreLocalWeightUpdateWLearner + theta_process_fn = architectures.more_local_weight_update.MoreLocalWeightUpdateProcess + + meta_objectives = [] + meta_objectives.append( + meta_objective.linear_regression.LinearRegressionMetaObjective) + meta_objectives.append(meta_objective.sklearn.LogisticRegression) + + checkpoint_vars, train_one_step_op, ( + base_model, dataset) = evaluation.construct_evaluation_graph( + theta_process_fn=theta_process_fn, + w_learner_fn=w_learner_fn, + dataset_fn=dataset_fn, + meta_objectives=meta_objectives) + batch = dataset() + pre_logit, outputs = base_model(batch) + + global_step = tf.train.get_or_create_global_step() + var_list = list( + snt.get_variables_in_module(base_model, tf.GraphKeys.TRAINABLE_VARIABLES)) + + tf.logging.info("all vars") + for v in tf.all_variables(): + tf.logging.info(" %s" % str(v)) + global_step = tf.train.get_global_step() + accumulate_global_step = global_step.assign_add(1) + reset_global_step = global_step.assign(0) + + train_op = tf.group( + train_one_step_op, accumulate_global_step, name="train_op") + + summary_op = tf.summary.merge_all() + + file_writer = summary_utils.LoggingFileWriter(train_log_dir, regexes=[".*"]) + if checkpoint_dir: + str_var_list = checkpoint_utils.list_variables(checkpoint_dir) + name_to_v_map = {v.op.name: v for v in tf.all_variables()} + var_list = [ + name_to_v_map[vn] for vn, _ in str_var_list if vn in name_to_v_map + ] + saver = tf.train.Saver(var_list) + missed_variables = [ + v.op.name for v in set( + snt.get_variables_in_scope("LocalWeightUpdateProcess", + tf.GraphKeys.GLOBAL_VARIABLES)) - + set(var_list) + ] + assert len(missed_variables) == 0, "Missed a theta variable." + + hooks = [] + + with tf.train.SingularMonitoredSession(master="", hooks=hooks) as sess: + + # global step should be restored from the evals job checkpoint or zero for fresh. + step = sess.run(global_step) + + if step == 0 and checkpoint_dir: + tf.logging.info("force restore") + saver.restore(sess, checkpoint_dir) + tf.logging.info("force restore done") + sess.run(reset_global_step) + step = sess.run(global_step) + + while step < num_steps: + if step % eval_every_n_steps == 0: + s, _, step = sess.run([summary_op, train_op, global_step]) + file_writer.add_summary(s, step) + else: + _, step = sess.run([train_op, global_step]) + + +def main(argv): + train(FLAGS.train_log_dir, FLAGS.checkpoint_dir) + + +if __name__ == "__main__": + app.run(main) diff --git a/models/research/learning_unsupervised_learning/summary_utils.py b/models/research/learning_unsupervised_learning/summary_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d5c0fdd9186bdef0b4e25ca10978e22ab910d276 --- /dev/null +++ b/models/research/learning_unsupervised_learning/summary_utils.py @@ -0,0 +1,181 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + + +import collections +import functools +import threading +import tensorflow as tf +import matplotlib +import numpy as np +import time +import re +import math +matplotlib.use("Agg") + +import matplotlib.pyplot as plt +import scipy.signal + +from tensorflow.python.util import tf_should_use +from tensorflow.contrib.summary import summary_ops +from tensorflow.python.ops import summary_op_util +from tensorflow.contrib.summary import gen_summary_ops + +_DEBUG_DISABLE_SUMMARIES=False + +class LoggingFileWriter(tf.summary.FileWriter): + """A FileWriter that also logs things out. + + This is entirely for ease of debugging / not having to open up Tensorboard + a lot. + """ + + def __init__(self, logdir, regexes=[], **kwargs): + self.regexes = regexes + super(LoggingFileWriter, self).__init__(logdir, **kwargs) + + def add_summary(self, summary, global_step): + if type(summary) != tf.Summary: + summary_p = tf.Summary() + summary_p.ParseFromString(summary) + summary = summary_p + for s in summary.value: + for exists in [re.match(p, s.tag) for p in self.regexes]: + if exists is not None: + tf.logging.info("%d ] %s : %f", global_step, s.tag, s.simple_value) + break + super(LoggingFileWriter, self).add_summary(summary, global_step) + + +def image_grid(images, max_grid_size=4, border=1): + """Given images and N, return first N^2 images as an NxN image grid. + + Args: + images: a `Tensor` of size [batch_size, height, width, channels] + max_grid_size: Maximum image grid height/width + + Returns: + Single image batch, of dim [1, h*n, w*n, c] + """ + batch_size = images.shape.as_list()[0] + to_pad = int((np.ceil(np.sqrt(batch_size)))**2 - batch_size) + images = tf.pad(images, [[0, to_pad], [0, border], [0, border], [0, 0]]) + + batch_size = images.shape.as_list()[0] + grid_size = min(int(np.sqrt(batch_size)), max_grid_size) + assert images.shape.as_list()[0] >= grid_size * grid_size + + # If we have a depth channel + if images.shape.as_list()[-1] == 4: + images = images[:grid_size * grid_size, :, :, 0:3] + depth = tf.image.grayscale_to_rgb(images[:grid_size * grid_size, :, :, 3:4]) + + images = tf.reshape(images, [-1, images.shape.as_list()[2], 3]) + split = tf.split(images, grid_size, axis=0) + depth = tf.reshape(depth, [-1, images.shape.as_list()[2], 3]) + depth_split = tf.split(depth, grid_size, axis=0) + grid = tf.concat(split + depth_split, 1) + return tf.expand_dims(grid, 0) + else: + images = images[:grid_size * grid_size, :, :, :] + images = tf.reshape( + images, [-1, images.shape.as_list()[2], + images.shape.as_list()[3]]) + split = tf.split(value=images, num_or_size_splits=grid_size, axis=0) + grid = tf.concat(split, 1) + return tf.expand_dims(grid, 0) + + +def first_layer_weight_image(weight, shape): + weight_image = tf.reshape(weight, + shape + [tf.identity(weight).shape.as_list()[1]]) + # [winx, winy, wout] + mean, var = tf.nn.moments(weight_image, [0,1,2], keep_dims=True) + #mean, var = tf.nn.moments(weight_image, [0,1], keep_dims=True) + weight_image = (weight_image - mean) / tf.sqrt(var + 1e-5) + weight_image = (weight_image + 1.0) / 2.0 + weight_image = tf.clip_by_value(weight_image, 0, 1) + weight_image = tf.transpose(weight_image, (3, 0, 1, 2)) + grid = image_grid(weight_image, max_grid_size=10) + return grid + +def inner_layer_weight_image(weight): + """Visualize a weight matrix of an inner layer. + Add padding to make it square, then visualize as a gray scale image + """ + weight = tf.identity(weight) # turn into a tensor + weight = weight / (tf.reduce_max(tf.abs(weight), [0], keep_dims=True)) + weight = tf.reshape(weight, [1]+weight.shape.as_list() + [1]) + return weight + + +def activation_image(activations, label_onehot): + """Make a row sorted by class for each activation. Put a black line around the activations.""" + labels = tf.argmax(label_onehot, axis=1) + _, n_classes = label_onehot.shape.as_list() + mean, var = tf.nn.moments(activations, [0, 1]) + activations = (activations - mean)/tf.sqrt(var+1e-5) + + activations = tf.clip_by_value(activations, -1, 1) + activations = (activations + 1.0) / 2.0 # shift to [0, 1] + + canvas = [] + for i in xrange(n_classes): + inds = tf.where(tf.equal(labels, i)) + + def _gather(): + return tf.squeeze(tf.gather(activations, inds), 1) + + def _empty(): + return tf.zeros([0, activations.shape.as_list()[1]], dtype=tf.float32) + + assert inds.shape.as_list()[0] is None + x = tf.cond(tf.equal(tf.shape(inds)[0], 0), _empty, _gather) + canvas.append(x) + canvas.append(tf.zeros([1, activations.shape.as_list()[1]])) + canvas = tf.concat(canvas, 0) + canvas = tf.reshape(canvas, [1, activations.shape.as_list()[0]+n_classes, canvas.shape.as_list()[1], 1]) + return canvas + + +def sorted_images(images, label_onehot): + # images is [bs, x, y, c] + labels = tf.argmax(label_onehot, axis=1) + _, n_classes = label_onehot.shape.as_list() + to_stack = [] + for i in xrange(n_classes): + inds = tf.where(tf.equal(labels, i)) + + def _gather(): + return tf.squeeze(tf.gather(images, inds), 1) + + def _empty(): + return tf.zeros([0] + images.shape.as_list()[1:], dtype=tf.float32) + + assert inds.shape.as_list()[0] is None + x = tf.cond(tf.equal(tf.shape(inds)[0], 0), _empty, _gather) + to_stack.append(x) + # pad / trim all up to 10. + padded = [] + for t in to_stack: + n_found = tf.shape(t)[0] + pad = tf.pad(t[0:10], tf.stack([tf.stack([0,tf.maximum(0, 10-n_found)]), [0,0], [0,0], [0,0]])) + padded.append(pad) + + xs = [tf.concat(tf.split(p, 10), axis=1) for p in padded] + ys = tf.concat(xs, axis=2) + ys = tf.cast(tf.clip_by_value(ys, 0., 1.) * 255., tf.uint8) + return ys diff --git a/models/research/learning_unsupervised_learning/utils.py b/models/research/learning_unsupervised_learning/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ca56ca93181df1ed9c403fef79e8154c3c9515b4 --- /dev/null +++ b/models/research/learning_unsupervised_learning/utils.py @@ -0,0 +1,287 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import tensorflow as tf +import sonnet as snt +import itertools +import functools + +from tensorflow.core.framework import node_def_pb2 +from tensorflow.python.framework import device as pydev +from tensorflow.python.framework import errors +from tensorflow.python.ops import variable_scope as variable_scope_ops +from sonnet.python.modules import util as snt_util + +from tensorflow.python.util import nest + + +def eqzip(*args): + """Zip but raises error if lengths don't match. + + Args: + *args: list of lists or tuples + Returns: + list: the result of zip + Raises: + ValueError: when the lengths don't match + """ + + sizes = [len(x) for x in args] + if not all([sizes[0] == x for x in sizes]): + raise ValueError("Lists are of different sizes. \n %s"%str(sizes)) + return zip(*args) + + +@contextlib.contextmanager +def assert_no_new_variables(): + """Ensure that no tf.Variables are constructed inside the context. + + Yields: + None + Raises: + ValueError: if there is a variable created. + """ + num_vars = len(tf.global_variables()) + old_variables = tf.global_variables() + yield + if len(tf.global_variables()) != num_vars: + new_vars = set(tf.global_variables()) - set(old_variables) + tf.logging.error("NEW VARIABLES CREATED") + tf.logging.error(10*"=") + for v in new_vars: + tf.logging.error(v) + + raise ValueError("Variables created inside an " + "assert_no_new_variables context") + if old_variables != tf.global_variables(): + raise ValueError("Variables somehow changed inside an " + "assert_no_new_variables context." + "This means something modified the tf.global_variables()") + + +def get_variables_in_modules(module_list): + var_list = [] + for m in module_list: + var_list.extend(snt.get_variables_in_module(m)) + return var_list + + +def state_barrier_context(state): + """Return a context manager that prevents interior ops from running + unless the whole state has been computed. + + This is to prevent assign race conditions. + """ + tensors = [x for x in nest.flatten(state) if type(x) == tf.Tensor] + tarray = [x.flow for x in nest.flatten(state) if hasattr(x, "flow")] + return tf.control_dependencies(tensors + tarray) + + +def _identity_fn(tf_entity): + if hasattr(tf_entity, "identity"): + return tf_entity.identity() + else: + return tf.identity(tf_entity) + + +def state_barrier_result(state): + """Return the same state, but with a control dependency to prevent it from + being partially computed + """ + with state_barrier_context(state): + return nest.map_structure(_identity_fn, state) + + +def train_iterator(num_iterations): + """Iterator that returns an index of the current step. + This iterator runs forever if num_iterations is None + otherwise it runs for some fixed amount of steps. + """ + if num_iterations is None: + return itertools.count() + else: + return xrange(num_iterations) + + +def print_op(op, msg): + """Print a string and return an op wrapped in a control dependency to make + sure it ran.""" + print_op = tf.Print(tf.constant(0), [tf.constant(0)], msg) + return tf.group(op, print_op) + + +class MultiQueueRunner(tf.train.QueueRunner): + """A QueueRunner with multiple queues """ + def __init__(self, queues, enqueue_ops): + close_op = tf.group(* [q.close() for q in queues]) + cancel_op = tf.group( + * [q.close(cancel_pending_enqueues=True) for q in queues]) + queue_closed_exception_types = (errors.OutOfRangeError,) + + enqueue_op = tf.group(*enqueue_ops, name="multi_enqueue") + + super(MultiQueueRunner, self).__init__( + queues[0], + enqueue_ops=[enqueue_op], + close_op=close_op, + cancel_op=cancel_op, + queue_closed_exception_types=queue_closed_exception_types) + + +# This function is not elegant, but I tried so many other ways to get this to +# work and this is the only one that ended up not incuring significant overhead +# or obscure tensorflow bugs. +def sample_n_per_class(dataset, samples_per_class): + """Create a new callable / dataset object that returns batches of each with + samples_per_class per label. + + Args: + dataset: fn + samples_per_class: int + Returns: + function, [] -> batch where batch is the same type as the return of + dataset(). + """ + + with tf.control_dependencies(None), tf.name_scope(None): + with tf.name_scope("queue_runner/sample_n_per_class"): + batch = dataset() + num_classes = batch.label_onehot.shape.as_list()[1] + batch_size = num_classes * samples_per_class + + flatten = nest.flatten(batch) + queues = [] + enqueue_ops = [] + capacity = samples_per_class * 20 + for i in xrange(num_classes): + queue = tf.FIFOQueue( + capacity=capacity, + shapes=[f.shape.as_list()[1:] for f in flatten], + dtypes=[f.dtype for f in flatten]) + queues.append(queue) + + idx = tf.where(tf.equal(batch.label, i)) + sub_batch = [] + to_enqueue = [] + for elem in batch: + new_e = tf.gather(elem, idx) + new_e = tf.squeeze(new_e, 1) + to_enqueue.append(new_e) + + remaining = (capacity - queue.size()) + to_add = tf.minimum(tf.shape(idx)[0], remaining) + + def _enqueue(): + return queue.enqueue_many([t[:to_add] for t in to_enqueue]) + + enqueue_op = tf.cond( + tf.equal(to_add, 0), tf.no_op, _enqueue) + enqueue_ops.append(enqueue_op) + + # This has caused many deadlocks / issues. This is some logging to at least + # shed light to what is going on. + print_lam = lambda: tf.Print(tf.constant(0.0), [q.size() for q in queues], "MultiQueueRunner queues status. Has capacity %d"%capacity) + some_percent_of_time = tf.less(tf.random_uniform([]), 0.0005) + maybe_print = tf.cond(some_percent_of_time, print_lam, lambda: tf.constant(0.0)) + with tf.control_dependencies([maybe_print]): + enqueue_ops = [tf.group(e) for e in enqueue_ops] + qr = MultiQueueRunner(queues=queues, enqueue_ops=enqueue_ops) + tf.train.add_queue_runner(qr) + + def dequeue_batch(): + with tf.name_scope("sample_n_per_batch/dequeue/"): + entries = [] + for q in queues: + entries.append(q.dequeue_many(samples_per_class)) + + flat_batch = [tf.concat(x, 0) for x in zip(*entries)] + idx = tf.random_shuffle(tf.range(batch_size)) + flat_batch = [tf.gather(f, idx, axis=0) for f in flat_batch] + return nest.pack_sequence_as(batch, flat_batch) + + return dequeue_batch + +def structure_map_multi(func, values): + all_values = [nest.flatten(v) for v in values] + rets = [] + for pair in zip(*all_values): + rets.append(func(pair)) + return nest.pack_sequence_as(values[0], rets) + +def structure_map_split(func, value): + vv = nest.flatten(value) + rets = [] + for v in vv: + rets.append(func(v)) + return [nest.pack_sequence_as(value, r) for r in zip(*rets)] + +def assign_variables(targets, values): + return tf.group(*[t.assign(v) for t,v in eqzip(targets, values)], + name="assign_variables") + + +def create_variables_in_class_scope(method): + """Force the variables constructed in this class to live in the sonnet module. + Wraps a method on a sonnet module. + + For example the following will create two different variables. + ``` + class Mod(snt.AbstractModule): + @create_variables_in_class_scope + def dynamic_thing(self, input, name): + return snt.Linear(name)(input) + mod.dynamic_thing(x, name="module_nameA") + mod.dynamic_thing(x, name="module_nameB") + # reuse + mod.dynamic_thing(y, name="module_nameA") + ``` + """ + @functools.wraps(method) + def wrapper(obj, *args, **kwargs): + def default_context_manager(reuse=None): + variable_scope = obj.variable_scope + return tf.variable_scope(variable_scope, reuse=reuse) + + variable_scope_context_manager = getattr(obj, "_enter_variable_scope", + default_context_manager) + graph = tf.get_default_graph() + + # Temporarily enter the variable scope to capture it + with variable_scope_context_manager() as tmp_variable_scope: + variable_scope = tmp_variable_scope + + with variable_scope_ops._pure_variable_scope( + variable_scope, reuse=tf.AUTO_REUSE) as pure_variable_scope: + + name_scope = variable_scope.original_name_scope + if name_scope[-1] != "/": + name_scope += "/" + + with tf.name_scope(name_scope): + sub_scope = snt_util.to_snake_case(method.__name__) + with tf.name_scope(sub_scope) as scope: + out_ops = method(obj, *args, **kwargs) + return out_ops + + return wrapper + diff --git a/models/research/learning_unsupervised_learning/variable_replace.py b/models/research/learning_unsupervised_learning/variable_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..ebfbeadc8aba7f8a09e1392f1de8d7b33f10d43c --- /dev/null +++ b/models/research/learning_unsupervised_learning/variable_replace.py @@ -0,0 +1,112 @@ +# Copyright 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division + +import tensorflow as tf +from contextlib import contextmanager + +from tensorflow.python.ops import variable_scope + +# sanity global state to ensure non recursive. +_is_variable_replacing = [False] + +def in_variable_replace_scope(): + return _is_variable_replacing[0] + +@contextmanager +def variable_replace(replacements, no_new=True): + """ A context manager that replaces variables. + + This is a context manager that replaces all calls to + get_variable with the variable in replacements. + This function does not support recursive application. + + Args: + replacements: dict + dictionary mapping a variable to replace (the key), with + the variable one wants to replace this variable with (the value). + no_new: bool + raise an error if variables were created. + This is for sanity checking. + Raises: + ValueError: if a new variable or not all the replacements are used. + """ + # TODO(lmetz) This function is a bit scary, as it relies on monkey patching + # the call to get_variable. Ideally this can be done with variable_scope's + # custom_getter attribute, but when initially writing this that was not + # avalible. + + replacements = {k: v for k, v in replacements.items() if not k == v} + + init_vars = tf.trainable_variables() + old_get_variable = variable_scope.get_variable + old_tf_get_variable = tf.get_variable + + names_replace = {} + has_replaced_names = [] + tf.logging.vlog(2, "Trying to replace") + for k, v in replacements.items(): + tf.logging.vlog(2, k.name + " >> " + v.name) + tf.logging.vlog(2, "===") + + for k, v in replacements.items(): + strip_name = k.name.replace("/read:0", "") + strip_name = strip_name.replace(":0", "") + names_replace[strip_name] = v + # TODO(lmetz) is there a cleaner way to do this? + def new_get_variable(name, *args, **kwargs): + #print "Monkeypatch get variable run with name:", name + n = tf.get_variable_scope().name + "/" + name + #print "Monkeypatch get variable run with name:", n + if n in names_replace: + has_replaced_names.append(n) + return names_replace[n] + else: + return old_get_variable(name, *args, **kwargs) + + # perform the monkey patch + if _is_variable_replacing[0] == True: + raise ValueError("No recursive calling to variable replace allowed.") + + variable_scope.get_variable = new_get_variable + tf.get_variable = new_get_variable + + _is_variable_replacing[0] = True + + yield + + if set(has_replaced_names) != set(names_replace.keys()): + print "Didn't use all replacements" + print "replaced variables that are not requested??" + print "===" + for n in list(set(has_replaced_names) - set(names_replace.keys())): + print n + print "Missed replacing variables" + print "===" + for n in list(set(names_replace.keys()) - set(has_replaced_names)): + print n, "==>", names_replace[n].name + raise ValueError("Fix this -- see stderr") + + # undo the monkey patch + tf.get_variable = old_tf_get_variable + variable_scope.get_variable = old_get_variable + + _is_variable_replacing[0] = False + + final_vars = tf.trainable_variables() + assert set(init_vars) == set(final_vars), "trainable variables changed" diff --git a/models/research/lexnet_nc/README.md b/models/research/lexnet_nc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4ecb5d39867c2ebf7280b9d19bbabb41957b9465 --- /dev/null +++ b/models/research/lexnet_nc/README.md @@ -0,0 +1,215 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# LexNET for Noun Compound Relation Classification + +This is a [Tensorflow](http://www.tensorflow.org/) implementation of the LexNET +algorithm for classifying relationships, specifically applied to classifying the +relationships that hold between noun compounds: + +* *olive oil* is oil that is *made from* olives +* *cooking oil* which is oil that is *used for* cooking +* *motor oil* is oil that is *contained in* a motor + +The model is a supervised classifier that predicts the relationship that holds +between the constituents of a two-word noun compound using: + +1. A neural "paraphrase" of each syntactic dependency path that connects the + constituents in a large corpus. For example, given a sentence like *This fine + oil is made from first-press olives*, the dependency path is something like + `oil from POBJ> olive`. +2. The distributional information provided by the individual words; i.e., the + word embeddings of the two consituents. +3. The distributional signal provided by the compound itself; i.e., the + embedding of the noun compound in context. + +The model includes several variants: *path-based model* uses (1) alone, the +*distributional model* uses (2) alone, and the *integrated model* uses (1) and +(2). The *distributional-nc model* and the *integrated-nc* model each add (3). + +Training a model requires the following: + +1. A collection of noun compounds that have been labeled using a *relation + inventory*. The inventory describes the specific relationships that you'd + like the model to differentiate (e.g. *part of* versus *composed of* versus + *purpose*), and generally may consist of tens of classes. You can download + the dataset used in the paper from + [here](https://vered1986.github.io/papers/Tratz2011_Dataset.tar.gz). +2. A collection of word embeddings: the path-based model uses the word + embeddings as part of the path representation, and the distributional models + use the word embeddings directly as prediction features. +3. The path-based model requires a collection of syntactic dependency parses + that connect the constituents for each noun compound. To generate these, + you'll need a corpus from which to train this data; we used Wikipedia and the + [LDC GigaWord5](https://catalog.ldc.upenn.edu/LDC2011T07) corpora. + +# Contents + +The following source code is included here: + +* `learn_path_embeddings.py` is a script that trains and evaluates a path-based + model to predict a noun-compound relationship given labeled noun-compounds and + dependency parse paths. +* `learn_classifier.py` is a script that trains and evaluates a classifier based + on any combination of paths, word embeddings, and noun-compound embeddings. +* `get_indicative_paths.py` is a script that generates the most indicative + syntactic dependency paths for a particular relationship. + +Also included are utilities for preparing data for training: + +* `text_embeddings_to_binary.py` converts a text file containing word embeddings + into a binary file that is quicker to load. +* `extract_paths.py` finds all the dependency paths that connect words in a + corpus. +* `sorted_paths_to_examples.py` processes the output of `extract_paths.py` to + produce summarized training data. + +This code (in particular, the utilities used to prepare the data) differs from +the code that was used to prepare data for the paper. Notably, we used a +proprietary dependency parser instead of spaCy, which is used here. + +# Dependencies + +* [TensorFlow](http://www.tensorflow.org/): see detailed installation + instructions at that site. +* [SciKit Learn](http://scikit-learn.org/): you can probably just install this + with `pip install sklearn`. +* [SpaCy](https://spacy.io/): `pip install spacy` ought to do the trick, along + with the English model. + +# Creating the Model + +This sections described the steps necessary to create and evaluate the model +described in the paper. + +## Generate Path Data + +To begin, you need three text files: + +1. **Corpus**. This file should contain natural language sentences, written with + one sentence per line. For purposes of exposition, we'll assume that you + have English Wikipedia serialized this way in `${HOME}/data/wiki.txt`. +2. **Labeled Noun Compound Pairs**. This file contain (modfier, head, label) + tuples, tab-separated, with one per line. The *label* represented the + relationship between the head and the modifier; e.g., if `purpose` is one + your labels, you could possibly include `toothpastepurpose`. +3. **Word Embeddings**. We used the + [GloVe](https://nlp.stanford.edu/projects/glove/) word embeddings; in + particular the 6B token, 300d variant. We'll assume you have this file as + `${HOME}/data/glove.6B.300d.txt`. + +We first processed the embeddings from their text format into something that we +can load a little bit more quickly: + + ./text_embeddings_to_binary.py \ + --input ${HOME}/data/glove.6B.300d.txt \ + --output_vocab ${HOME}/data/vocab.txt \ + --output_npy ${HOME}/data/glove.6B.300d.npy + +Next, we'll extract all the dependency parse paths connecting our labeled pairs +from the corpus. This process takes a *looooong* time, but is trivially +parallelized using map-reduce if you have access to that technology. + + ./extract_paths.py \ + --corpus ${HOME}/data/wiki.txt \ + --labeled_pairs ${HOME}/data/labeled-pairs.tsv \ + --output ${HOME}/data/paths.tsv + +The file it produces (`paths.tsv`) is a tab-separated file that contains the +modifier, the head, the label, the encoded path, and the sentence from which the +path was drawn. (This last is mostly for sanity checking.) A sample row might +look something like this (where newlines would actually be tab characters): + + navy + captain + owner_emp_use + /PROPN/dobj/>::enter/VERB/ROOT/^::follow/VERB/advcl/<::in/ADP/prep/<::footstep/NOUN/pobj/<::of/ADP/prep/<::father/NOUN/pobj/<::bover/PROPN/appos/<::/PROPN/compound/< + He entered the Royal Navy following in the footsteps of his father Captain John Bover and two of his elder brothers as volunteer aboard HMS Perseus + +This file must be sorted as follows: + + sort -k1,3 -t$'\t' paths.tsv > sorted.paths.tsv + +In particular, rows with the same modifier, head, and label must appear +contiguously. + +We next create a file that contains all the relation labels from our original +labeled pairs: + + awk 'BEGIN {FS="\t"} {print $3}' < ${HOME}/data/labeled-pairs.tsv \ + | sort -u > ${HOME}/data/relations.txt + +With these in hand, we're ready to produce the train, validation, and test data: + + ./sorted_paths_to_examples.py \ + --input ${HOME}/data/sorted.paths.tsv \ + --vocab ${HOME}/data/vocab.txt \ + --relations ${HOME}/data/relations.txt \ + --splits ${HOME}/data/splits.txt \ + --output_dir ${HOME}/data + +Here, `splits.txt` is a file that indicates which "split" (train, test, or +validation) you want the pair to appear in. It should be a tab-separate file +which conatins the modifier, head, and the dataset ( `train`, `test`, or `val`) +into which the pair should be placed; e.g.,: + + tooth paste train + banana seat test + +The program will produce a separate file for each dataset split in the directory +specified by `--output_dir`. Each file is contains `tf.train.Example` protocol +buffers encoded using the `TFRecord` file format. + +## Create Path Embeddings + +Now we're ready to train the path embeddings using `learn_path_embeddings.py`: + + ./learn_path_embeddings.py \ + --train ${HOME}/data/train.tfrecs.gz \ + --val ${HOME}/data/val.tfrecs.gz \ + --text ${HOME}/data/test.tfrecs.gz \ + --embeddings ${HOME}/data/glove.6B.300d.npy + --relations ${HOME}/data/relations.txt + --output ${HOME}/data/path-embeddings \ + --logdir /tmp/learn_path_embeddings + +The path embeddings will be placed at the location specified by `--output`. + +## Train classifiers + +Train classifiers and evaluate on the validation and test data using +`train_classifiers.py` script. This shell script fragment will iterate through +each dataset, split, corpus, and model type to train and evaluate classifiers. + + LOGDIR=/tmp/learn_classifier + for DATASET in tratz/fine_grained tratz/coarse_grained ; do + for SPLIT in random lexical_head lexical_mod lexical_full ; do + for CORPUS in wiki_gigiawords ; do + for MODEL in dist dist-nc path integrated integrated-nc ; do + # Filename for the log that will contain the classifier results. + LOGFILE=$(echo "${DATASET}.${SPLIT}.${CORPUS}.${MODEL}.log" | sed -e "s,/,.,g") + python learn_classifier.py \ + --dataset_dir ~/lexnet/datasets \ + --dataset "${DATASET}" \ + --corpus "${SPLIT}/${CORPUS}" \ + --embeddings_base_path ~/lexnet/embeddings \ + --logdir ${LOGDIR} \ + --input "${MODEL}" > "${LOGDIR}/${LOGFILE}" + done + done + done + done + +The log file will contain the final performance (precision, recall, F1) on the +train, dev, and test sets, and will include a confusion matrix for each. + +# Contact + +If you have any questions, issues, or suggestions, feel free to contact either +@vered1986 or @waterson. + +If you use this code for any published research, please include the following citation: + +Olive Oil Is Made of Olives, Baby Oil Is Made for Babies: Interpreting Noun Compounds Using Paraphrases in a Neural Model. +Vered Shwartz and Chris Waterson. NAACL 2018. [link](https://arxiv.org/pdf/1803.08073.pdf). diff --git a/models/research/lexnet_nc/extract_paths.py b/models/research/lexnet_nc/extract_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..833eec2c1b8a176b487d4e663a737b9502b49eda --- /dev/null +++ b/models/research/lexnet_nc/extract_paths.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import itertools +import sys + +import spacy +import tensorflow as tf + +tf.flags.DEFINE_string('corpus', '', 'Filename of corpus') +tf.flags.DEFINE_string('labeled_pairs', '', 'Filename of labeled pairs') +tf.flags.DEFINE_string('output', '', 'Filename of output file') +FLAGS = tf.flags.FLAGS + + +def get_path(mod_token, head_token): + """Returns the path between a modifier token and a head token.""" + # Compute the path from the root to each token. + mod_ancestors = list(reversed(list(mod_token.ancestors))) + head_ancestors = list(reversed(list(head_token.ancestors))) + + # If the paths don't start at the same place (odd!) then there is no path at + # all. + if (not mod_ancestors or not head_ancestors + or mod_ancestors[0] != head_ancestors[0]): + return None + + # Eject elements from the common path until we reach the first differing + # ancestor. + ix = 1 + while (ix < len(mod_ancestors) and ix < len(head_ancestors) + and mod_ancestors[ix] == head_ancestors[ix]): + ix += 1 + + # Construct the path. TODO: add "satellites", possibly honor sentence + # ordering between modifier and head rather than just always traversing from + # the modifier to the head? + path = ['/'.join(('', mod_token.pos_, mod_token.dep_, '>'))] + + path += ['/'.join((tok.lemma_, tok.pos_, tok.dep_, '>')) + for tok in reversed(mod_ancestors[ix:])] + + root_token = mod_ancestors[ix - 1] + path += ['/'.join((root_token.lemma_, root_token.pos_, root_token.dep_, '^'))] + + path += ['/'.join((tok.lemma_, tok.pos_, tok.dep_, '<')) + for tok in head_ancestors[ix:]] + + path += ['/'.join(('', head_token.pos_, head_token.dep_, '<'))] + + return '::'.join(path) + + +def main(_): + nlp = spacy.load('en_core_web_sm') + + # Grab the set of labeled pairs for which we wish to collect paths. + with tf.gfile.GFile(FLAGS.labeled_pairs) as fh: + parts = (l.decode('utf-8').split('\t') for l in fh.read().splitlines()) + labeled_pairs = {(mod, head): rel for mod, head, rel in parts} + + # Create a mapping from each head to the modifiers that are used with it. + mods_for_head = { + head: set(hm[1] for hm in head_mods) + for head, head_mods in itertools.groupby( + sorted((head, mod) for (mod, head) in labeled_pairs.iterkeys()), + lambda (head, mod): head)} + + # Collect all the heads that we know about. + heads = set(mods_for_head.keys()) + + # For each sentence that contains a (head, modifier) pair that's in our set, + # emit the dependency path that connects the pair. + out_fh = sys.stdout if not FLAGS.output else tf.gfile.GFile(FLAGS.output, 'w') + in_fh = sys.stdin if not FLAGS.corpus else tf.gfile.GFile(FLAGS.corpus) + + num_paths = 0 + for line, sen in enumerate(in_fh, start=1): + if line % 100 == 0: + print('\rProcessing line %d: %d paths' % (line, num_paths), + end='', file=sys.stderr) + + sen = sen.decode('utf-8').strip() + doc = nlp(sen) + + for head_token in doc: + head_text = head_token.text.lower() + if head_text in heads: + mods = mods_for_head[head_text] + for mod_token in doc: + mod_text = mod_token.text.lower() + if mod_text in mods: + path = get_path(mod_token, head_token) + if path: + label = labeled_pairs[(mod_text, head_text)] + line = '\t'.join((mod_text, head_text, label, path, sen)) + print(line.encode('utf-8'), file=out_fh) + num_paths += 1 + + out_fh.close() + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lexnet_nc/get_indicative_paths.py b/models/research/lexnet_nc/get_indicative_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b34cca221a07c0b633024b71f082b8f61b3a45 --- /dev/null +++ b/models/research/lexnet_nc/get_indicative_paths.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Extracts paths that are indicative of each relation.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import tensorflow as tf + +from . import path_model +from . import lexnet_common + +tf.flags.DEFINE_string( + 'dataset_dir', 'datasets', + 'Dataset base directory') + +tf.flags.DEFINE_string( + 'dataset', + 'tratz/fine_grained', + 'Subdirectory containing the corpus directories: ' + 'subdirectory of dataset_dir') + +tf.flags.DEFINE_string( + 'corpus', 'random/wiki', + 'Subdirectory containing the corpus and split: ' + 'subdirectory of dataset_dir/dataset') + +tf.flags.DEFINE_string( + 'embeddings_base_path', 'embeddings', + 'Embeddings base directory') + +tf.flags.DEFINE_string( + 'logdir', 'logdir', + 'Directory of model output files') + +tf.flags.DEFINE_integer( + 'top_k', 20, 'Number of top paths to extract') + +tf.flags.DEFINE_float( + 'threshold', 0.8, 'Threshold above which to consider paths as indicative') + +FLAGS = tf.flags.FLAGS + + +def main(_): + hparams = path_model.PathBasedModel.default_hparams() + + # First things first. Load the path data. + path_embeddings_file = 'path_embeddings/{dataset}/{corpus}'.format( + dataset=FLAGS.dataset, + corpus=FLAGS.corpus) + + path_dim = (hparams.lemma_dim + hparams.pos_dim + + hparams.dep_dim + hparams.dir_dim) + + path_embeddings, path_to_index = path_model.load_path_embeddings( + os.path.join(FLAGS.embeddings_base_path, path_embeddings_file), + path_dim) + + # Load and count the classes so we can correctly instantiate the model. + classes_filename = os.path.join( + FLAGS.dataset_dir, FLAGS.dataset, 'classes.txt') + + with open(classes_filename) as f_in: + classes = f_in.read().splitlines() + + hparams.num_classes = len(classes) + + # We need the word embeddings to instantiate the model, too. + print('Loading word embeddings...') + lemma_embeddings = lexnet_common.load_word_embeddings( + FLAGS.embeddings_base_path, hparams.lemma_embeddings_file) + + # Instantiate the model. + with tf.Graph().as_default(): + with tf.variable_scope('lexnet'): + instance = tf.placeholder(dtype=tf.string) + model = path_model.PathBasedModel( + hparams, lemma_embeddings, instance) + + with tf.Session() as session: + model_dir = '{logdir}/results/{dataset}/path/{corpus}'.format( + logdir=FLAGS.logdir, + dataset=FLAGS.dataset, + corpus=FLAGS.corpus) + + saver = tf.train.Saver() + saver.restore(session, os.path.join(model_dir, 'best.ckpt')) + + path_model.get_indicative_paths( + model, session, path_to_index, path_embeddings, classes, + model_dir, FLAGS.top_k, FLAGS.threshold) + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lexnet_nc/learn_classifier.py b/models/research/lexnet_nc/learn_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..ec284029535609ffd2cc0f2f5cddb9b87954aa81 --- /dev/null +++ b/models/research/lexnet_nc/learn_classifier.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trains the integrated LexNET classifier.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import lexnet_common +import lexnet_model +import path_model +from sklearn import metrics +import tensorflow as tf + +tf.flags.DEFINE_string( + 'dataset_dir', 'datasets', + 'Dataset base directory') + +tf.flags.DEFINE_string( + 'dataset', 'tratz/fine_grained', + 'Subdirectory containing the corpus directories: ' + 'subdirectory of dataset_dir') + +tf.flags.DEFINE_string( + 'corpus', 'wiki/random', + 'Subdirectory containing the corpus and split: ' + 'subdirectory of dataset_dir/dataset') + +tf.flags.DEFINE_string( + 'embeddings_base_path', 'embeddings', + 'Embeddings base directory') + +tf.flags.DEFINE_string( + 'logdir', 'logdir', + 'Directory of model output files') + +tf.flags.DEFINE_string('hparams', '', 'Hyper-parameters') + +tf.flags.DEFINE_string( + 'input', 'integrated', + 'The model(dist/dist-nc/path/integrated/integrated-nc') + +FLAGS = tf.flags.FLAGS + + +def main(_): + # Pick up any one-off hyper-parameters. + hparams = lexnet_model.LexNETModel.default_hparams() + hparams.corpus = FLAGS.corpus + hparams.input = FLAGS.input + hparams.path_embeddings_file = 'path_embeddings/%s/%s' % ( + FLAGS.dataset, FLAGS.corpus) + + input_dir = hparams.input if hparams.input != 'path' else 'path_classifier' + + # Set the number of classes + classes_filename = os.path.join( + FLAGS.dataset_dir, FLAGS.dataset, 'classes.txt') + with open(classes_filename) as f_in: + classes = f_in.read().splitlines() + + hparams.num_classes = len(classes) + print('Model will predict into %d classes' % hparams.num_classes) + + # Get the datasets + train_set, val_set, test_set = ( + os.path.join( + FLAGS.dataset_dir, FLAGS.dataset, FLAGS.corpus, + filename + '.tfrecs.gz') + for filename in ['train', 'val', 'test']) + + print('Running with hyper-parameters: {}'.format(hparams)) + + # Load the instances + print('Loading instances...') + opts = tf.python_io.TFRecordOptions( + compression_type=tf.python_io.TFRecordCompressionType.GZIP) + train_instances = list(tf.python_io.tf_record_iterator(train_set, opts)) + val_instances = list(tf.python_io.tf_record_iterator(val_set, opts)) + test_instances = list(tf.python_io.tf_record_iterator(test_set, opts)) + + # Load the word embeddings + print('Loading word embeddings...') + relata_embeddings, path_embeddings, nc_embeddings, path_to_index = ( + None, None, None, None) + if hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: + relata_embeddings = lexnet_common.load_word_embeddings( + FLAGS.embeddings_base_path, hparams.relata_embeddings_file) + + if hparams.input in ['path', 'integrated', 'integrated-nc']: + path_embeddings, path_to_index = path_model.load_path_embeddings( + os.path.join(FLAGS.embeddings_base_path, hparams.path_embeddings_file), + hparams.path_dim) + + if hparams.input in ['dist-nc', 'integrated-nc']: + nc_embeddings = lexnet_common.load_word_embeddings( + FLAGS.embeddings_base_path, hparams.nc_embeddings_file) + + # Define the graph and the model + with tf.Graph().as_default(): + model = lexnet_model.LexNETModel( + hparams, relata_embeddings, path_embeddings, + nc_embeddings, path_to_index) + + # Initialize a session and start training + session = tf.Session() + session.run(tf.global_variables_initializer()) + + # Initalize the path mapping + if hparams.input in ['path', 'integrated', 'integrated-nc']: + session.run(tf.tables_initializer()) + session.run(model.initialize_path_op, { + model.path_initial_value_t: path_embeddings + }) + + # Initialize the NC embeddings + if hparams.input in ['dist-nc', 'integrated-nc']: + session.run(model.initialize_nc_op, { + model.nc_initial_value_t: nc_embeddings + }) + + # Load the labels + print('Loading labels...') + train_labels = model.load_labels(session, train_instances) + val_labels = model.load_labels(session, val_instances) + test_labels = model.load_labels(session, test_instances) + + save_path = '{logdir}/results/{dataset}/{input}/{corpus}'.format( + logdir=FLAGS.logdir, dataset=FLAGS.dataset, + corpus=model.hparams.corpus, input=input_dir) + + if not os.path.exists(save_path): + os.makedirs(save_path) + + # Train the model + print('Training the model...') + model.fit(session, train_instances, epoch_completed, + val_instances, val_labels, save_path) + + # Print the best performance on the validation set + print('Best performance on the validation set: F1=%.3f' % + epoch_completed.best_f1) + + # Evaluate on the train and validation sets + lexnet_common.full_evaluation(model, session, train_instances, train_labels, + 'Train', classes) + lexnet_common.full_evaluation(model, session, val_instances, val_labels, + 'Validation', classes) + test_predictions = lexnet_common.full_evaluation( + model, session, test_instances, test_labels, 'Test', classes) + + # Write the test predictions to a file + predictions_file = os.path.join(save_path, 'test_predictions.tsv') + print('Saving test predictions to %s' % save_path) + test_pairs = model.load_pairs(session, test_instances) + lexnet_common.write_predictions(test_pairs, test_labels, test_predictions, + classes, predictions_file) + + +def epoch_completed(model, session, epoch, epoch_loss, + val_instances, val_labels, save_path): + """Runs every time an epoch completes. + + Print the performance on the validation set, and update the saved model if + its performance is better on the previous ones. If the performance dropped, + tell the training to stop. + + Args: + model: The currently trained path-based model. + session: The current TensorFlow session. + epoch: The epoch number. + epoch_loss: The current epoch loss. + val_instances: The validation set instances (evaluation between epochs). + val_labels: The validation set labels (for evaluation between epochs). + save_path: Where to save the model. + + Returns: + whether the training should stop. + """ + stop_training = False + + # Evaluate on the validation set + val_pred = model.predict(session, val_instances) + precision, recall, f1, _ = metrics.precision_recall_fscore_support( + val_labels, val_pred, average='weighted') + print( + 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( + epoch + 1, model.hparams.num_epochs, epoch_loss, + precision, recall, f1)) + + # If the F1 is much smaller than the previous one, stop training. Else, if + # it's bigger, save the model. + if f1 < epoch_completed.best_f1 - 0.08: + stop_training = True + + if f1 > epoch_completed.best_f1: + saver = tf.train.Saver() + checkpoint_filename = os.path.join(save_path, 'best.ckpt') + print('Saving model in: %s' % checkpoint_filename) + saver.save(session, checkpoint_filename) + print('Model saved in file: %s' % checkpoint_filename) + epoch_completed.best_f1 = f1 + + return stop_training + +epoch_completed.best_f1 = 0 + +if __name__ == '__main__': + tf.app.run(main) diff --git a/models/research/lexnet_nc/learn_path_embeddings.py b/models/research/lexnet_nc/learn_path_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..480378f4aa010ee27f0387685bac488cedbb2ab9 --- /dev/null +++ b/models/research/lexnet_nc/learn_path_embeddings.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Trains the LexNET path-based model.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import lexnet_common +import path_model +from sklearn import metrics +import tensorflow as tf + +tf.flags.DEFINE_string('train', '', 'training dataset, tfrecs') +tf.flags.DEFINE_string('val', '', 'validation dataset, tfrecs') +tf.flags.DEFINE_string('test', '', 'test dataset, tfrecs') +tf.flags.DEFINE_string('embeddings', '', 'embeddings, npy') +tf.flags.DEFINE_string('relations', '', 'file containing relation labels') +tf.flags.DEFINE_string('output_dir', '', 'output directory for path embeddings') +tf.flags.DEFINE_string('logdir', '', 'directory for model training') +FLAGS = tf.flags.FLAGS + + +def main(_): + # Pick up any one-off hyper-parameters. + hparams = path_model.PathBasedModel.default_hparams() + + with open(FLAGS.relations) as fh: + relations = fh.read().splitlines() + + hparams.num_classes = len(relations) + print('Model will predict into %d classes' % hparams.num_classes) + + print('Running with hyper-parameters: {}'.format(hparams)) + + # Load the instances + print('Loading instances...') + opts = tf.python_io.TFRecordOptions( + compression_type=tf.python_io.TFRecordCompressionType.GZIP) + + train_instances = list(tf.python_io.tf_record_iterator(FLAGS.train, opts)) + val_instances = list(tf.python_io.tf_record_iterator(FLAGS.val, opts)) + test_instances = list(tf.python_io.tf_record_iterator(FLAGS.test, opts)) + + # Load the word embeddings + print('Loading word embeddings...') + lemma_embeddings = lexnet_common.load_word_embeddings(FLAGS.embeddings) + + # Define the graph and the model + with tf.Graph().as_default(): + with tf.variable_scope('lexnet'): + options = tf.python_io.TFRecordOptions( + compression_type=tf.python_io.TFRecordCompressionType.GZIP) + reader = tf.TFRecordReader(options=options) + _, train_instance = reader.read( + tf.train.string_input_producer([FLAGS.train])) + shuffled_train_instance = tf.train.shuffle_batch( + [train_instance], + batch_size=1, + num_threads=1, + capacity=len(train_instances), + min_after_dequeue=100, + )[0] + + train_model = path_model.PathBasedModel( + hparams, lemma_embeddings, shuffled_train_instance) + + with tf.variable_scope('lexnet', reuse=True): + val_instance = tf.placeholder(dtype=tf.string) + val_model = path_model.PathBasedModel( + hparams, lemma_embeddings, val_instance) + + # Initialize a session and start training + best_model_saver = tf.train.Saver() + f1_t = tf.placeholder(tf.float32) + best_f1_t = tf.Variable(0.0, trainable=False, name='best_f1') + assign_best_f1_op = tf.assign(best_f1_t, f1_t) + + supervisor = tf.train.Supervisor( + logdir=FLAGS.logdir, + global_step=train_model.global_step) + + with supervisor.managed_session() as session: + # Load the labels + print('Loading labels...') + val_labels = train_model.load_labels(session, val_instances) + + # Train the model + print('Training the model...') + + while True: + step = session.run(train_model.global_step) + epoch = (step + len(train_instances) - 1) // len(train_instances) + if epoch > hparams.num_epochs: + break + + print('Starting epoch %d (step %d)...' % (1 + epoch, step)) + + epoch_loss = train_model.run_one_epoch(session, len(train_instances)) + + best_f1 = session.run(best_f1_t) + f1 = epoch_completed(val_model, session, epoch, epoch_loss, + val_instances, val_labels, best_model_saver, + FLAGS.logdir, best_f1) + + if f1 > best_f1: + session.run(assign_best_f1_op, {f1_t: f1}) + + if f1 < best_f1 - 0.08: + tf.logging.info('Stopping training after %d epochs.\n' % epoch) + break + + # Print the best performance on the validation set + best_f1 = session.run(best_f1_t) + print('Best performance on the validation set: F1=%.3f' % best_f1) + + # Save the path embeddings + print('Computing the path embeddings...') + instances = train_instances + val_instances + test_instances + path_index, path_vectors = path_model.compute_path_embeddings( + val_model, session, instances) + + if not os.path.exists(path_emb_dir): + os.makedirs(path_emb_dir) + + path_model.save_path_embeddings( + val_model, path_vectors, path_index, FLAGS.output_dir) + + +def epoch_completed(model, session, epoch, epoch_loss, + val_instances, val_labels, saver, save_path, best_f1): + """Runs every time an epoch completes. + + Print the performance on the validation set, and update the saved model if + its performance is better on the previous ones. If the performance dropped, + tell the training to stop. + + Args: + model: The currently trained path-based model. + session: The current TensorFlow session. + epoch: The epoch number. + epoch_loss: The current epoch loss. + val_instances: The validation set instances (evaluation between epochs). + val_labels: The validation set labels (for evaluation between epochs). + saver: tf.Saver object + save_path: Where to save the model. + best_f1: the best F1 achieved so far. + + Returns: + The F1 achieved on the training set. + """ + # Evaluate on the validation set + val_pred = model.predict(session, val_instances) + precision, recall, f1, _ = metrics.precision_recall_fscore_support( + val_labels, val_pred, average='weighted') + print( + 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( + epoch + 1, model.hparams.num_epochs, epoch_loss, + precision, recall, f1)) + + if f1 > best_f1: + save_filename = os.path.join(save_path, 'best.ckpt') + print('Saving model in: %s' % save_filename) + saver.save(session, save_filename) + print('Model saved in file: %s' % save_filename) + + return f1 + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/models/research/lexnet_nc/lexnet_common.py b/models/research/lexnet_nc/lexnet_common.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e8a104d00c1c2f90731f4045c3c8e69e370dbf --- /dev/null +++ b/models/research/lexnet_nc/lexnet_common.py @@ -0,0 +1,197 @@ +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Common stuff used with LexNET.""" +# pylint: disable=bad-whitespace + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +from sklearn import metrics +import tensorflow as tf + +# Part of speech tags used in the paths. +POSTAGS = [ + 'PAD', 'VERB', 'CONJ', 'NOUN', 'PUNCT', + 'ADP', 'ADJ', 'DET', 'ADV', 'PART', + 'NUM', 'X', 'INTJ', 'SYM', +] + +POSTAG_TO_ID = {tag: tid for tid, tag in enumerate(POSTAGS)} + +# Dependency labels used in the paths. +DEPLABELS = [ + 'PAD', 'UNK', 'ROOT', 'abbrev', 'acomp', 'advcl', + 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', + 'auxpass', 'cc', 'ccomp', 'complm', 'conj', 'cop', + 'csubj', 'csubjpass', 'dep', 'det', 'dobj', 'expl', + 'infmod', 'iobj', 'mark', 'mwe', 'nc', 'neg', + 'nn', 'npadvmod', 'nsubj', 'nsubjpass', 'num', 'number', + 'p', 'parataxis', 'partmod', 'pcomp', 'pobj', 'poss', + 'preconj', 'predet', 'prep', 'prepc', 'prt', 'ps', + 'purpcl', 'quantmod', 'rcmod', 'ref', 'rel', 'suffix', + 'title', 'tmod', 'xcomp', 'xsubj', +] + +DEPLABEL_TO_ID = {label: lid for lid, label in enumerate(DEPLABELS)} + +# Direction codes used in the paths. +DIRS = '_^V<>' +DIR_TO_ID = {dir: did for did, dir in enumerate(DIRS)} + + +def load_word_embeddings(embedding_filename): + """Loads pretrained word embeddings from a binary file and returns the matrix. + + Adds the , , , and tokens to the beginning of the vocab. + + Args: + embedding_filename: filename of the binary NPY data + + Returns: + The word embeddings matrix + """ + embeddings = np.load(embedding_filename) + dim = embeddings.shape[1] + + # Four initially random vectors for the special tokens: , , , + special_embeddings = np.random.normal(0, 0.1, (4, dim)) + embeddings = np.vstack((special_embeddings, embeddings)) + embeddings = embeddings.astype(np.float32) + + return embeddings + + +def full_evaluation(model, session, instances, labels, set_name, classes): + """Prints a full evaluation on the current set. + + Performance (recall, precision and F1), classification report (per + class performance), and confusion matrix). + + Args: + model: The currently trained path-based model. + session: The current TensorFlow session. + instances: The current set instances. + labels: The current set labels. + set_name: The current set name (train/validation/test). + classes: The class label names. + + Returns: + The model's prediction for the given instances. + """ + + # Predict the labels + pred = model.predict(session, instances) + + # Print the performance + precision, recall, f1, _ = metrics.precision_recall_fscore_support( + labels, pred, average='weighted') + + print('%s set: Precision: %.3f, Recall: %.3f, F1: %.3f' % ( + set_name, precision, recall, f1)) + + # Print a classification report + print('%s classification report:' % set_name) + print(metrics.classification_report(labels, pred, target_names=classes)) + + # Print the confusion matrix + print('%s confusion matrix:' % set_name) + cm = metrics.confusion_matrix(labels, pred, labels=range(len(classes))) + cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] * 100 + print_cm(cm, labels=classes) + return pred + + +def print_cm(cm, labels): + """Pretty print for confusion matrices. + + From: https://gist.github.com/zachguo/10296432. + + Args: + cm: The confusion matrix. + labels: The class names. + """ + columnwidth = 10 + empty_cell = ' ' * columnwidth + short_labels = [label[:12].rjust(10, ' ') for label in labels] + + # Print header + header = empty_cell + ' ' + header += ''.join([' %{0}s '.format(columnwidth) % label + for label in short_labels]) + + print(header) + + # Print rows + for i, label1 in enumerate(short_labels): + row = '%{0}s '.format(columnwidth) % label1[:10] + for j in range(len(short_labels)): + value = int(cm[i, j]) if not np.isnan(cm[i, j]) else 0 + cell = ' %{0}d '.format(10) % value + row += cell + ' ' + print(row) + + +def load_all_labels(records): + """Reads TensorFlow examples from a RecordReader and returns only the labels. + + Args: + records: a record list with TensorFlow examples. + + Returns: + The labels + """ + curr_features = tf.parse_example(records, { + 'rel_id': tf.FixedLenFeature([1], dtype=tf.int64), + }) + + labels = tf.squeeze(curr_features['rel_id'], [-1]) + return labels + + +def load_all_pairs(records): + """Reads TensorFlow examples from a RecordReader and returns the word pairs. + + Args: + records: a record list with TensorFlow examples. + + Returns: + The word pairs + """ + curr_features = tf.parse_example(records, { + 'pair': tf.FixedLenFeature([1], dtype=tf.string) + }) + + word_pairs = curr_features['pair'] + return word_pairs + + +def write_predictions(pairs, labels, predictions, classes, predictions_file): + """Write the predictions to a file. + + Args: + pairs: the word pairs (list of tuple of two strings). + labels: the gold-standard labels for these pairs (array of rel ID). + predictions: the predicted labels for these pairs (array of rel ID). + classes: a list of relation names. + predictions_file: where to save the predictions. + """ + with open(predictions_file, 'w') as f_out: + for pair, label, pred in zip(pairs, labels, predictions): + w1, w2 = pair + f_out.write('\t'.join([w1, w2, classes[label], classes[pred]]) + '\n') diff --git a/models/research/lexnet_nc/lexnet_model.py b/models/research/lexnet_nc/lexnet_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f16b030b3bb3fee68b91122bcd03226ffcfa4a --- /dev/null +++ b/models/research/lexnet_nc/lexnet_model.py @@ -0,0 +1,438 @@ +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""The integrated LexNET model.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import lexnet_common +import numpy as np +import tensorflow as tf +from six.moves import xrange + + +class LexNETModel(object): + """The LexNET model for classifying relationships between noun compounds.""" + + @classmethod + def default_hparams(cls): + """Returns the default hyper-parameters.""" + return tf.contrib.training.HParams( + batch_size=10, + num_classes=37, + num_epochs=30, + input_keep_prob=0.9, + input='integrated', # dist/ dist-nc/ path/ integrated/ integrated-nc + learn_relata=False, + corpus='wiki_gigawords', + random_seed=133, # zero means no random seed + relata_embeddings_file='glove/glove.6B.300d.bin', + nc_embeddings_file='nc_glove/vecs.6B.300d.bin', + path_embeddings_file='path_embeddings/tratz/fine_grained/wiki', + hidden_layers=1, + path_dim=60) + + def __init__(self, hparams, relata_embeddings, path_embeddings, nc_embeddings, + path_to_index): + """Initialize the LexNET classifier. + + Args: + hparams: the hyper-parameters. + relata_embeddings: word embeddings for the distributional component. + path_embeddings: embeddings for the paths. + nc_embeddings: noun compound embeddings. + path_to_index: a mapping from string path to an index in the path + embeddings matrix. + """ + self.hparams = hparams + + self.path_embeddings = path_embeddings + self.relata_embeddings = relata_embeddings + self.nc_embeddings = nc_embeddings + + self.vocab_size, self.relata_dim = 0, 0 + self.path_to_index = None + self.path_dim = 0 + + # Set the random seed + if hparams.random_seed > 0: + tf.set_random_seed(hparams.random_seed) + + # Get the vocabulary size and relata dim + if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: + self.vocab_size, self.relata_dim = self.relata_embeddings.shape + + # Create the mapping from string path to an index in the embeddings matrix + if self.hparams.input in ['path', 'integrated', 'integrated-nc']: + self.path_to_index = tf.contrib.lookup.HashTable( + tf.contrib.lookup.KeyValueTensorInitializer( + tf.constant(path_to_index.keys()), + tf.constant(path_to_index.values()), + key_dtype=tf.string, value_dtype=tf.int32), 0) + + self.path_dim = self.path_embeddings.shape[1] + + # Create the network + self.__create_computation_graph__() + + def __create_computation_graph__(self): + """Initialize the model and define the graph.""" + network_input = 0 + + # Define the network inputs + # Distributional x and y + if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: + network_input += 2 * self.relata_dim + self.relata_lookup = tf.get_variable( + 'relata_lookup', + initializer=self.relata_embeddings, + dtype=tf.float32, + trainable=self.hparams.learn_relata) + + # Path-based + if self.hparams.input in ['path', 'integrated', 'integrated-nc']: + network_input += self.path_dim + + self.path_initial_value_t = tf.placeholder(tf.float32, None) + + self.path_lookup = tf.get_variable( + name='path_lookup', + dtype=tf.float32, + trainable=False, + shape=self.path_embeddings.shape) + + self.initialize_path_op = tf.assign( + self.path_lookup, self.path_initial_value_t, validate_shape=False) + + # Distributional noun compound + if self.hparams.input in ['dist-nc', 'integrated-nc']: + network_input += self.relata_dim + + self.nc_initial_value_t = tf.placeholder(tf.float32, None) + + self.nc_lookup = tf.get_variable( + name='nc_lookup', + dtype=tf.float32, + trainable=False, + shape=self.nc_embeddings.shape) + + self.initialize_nc_op = tf.assign( + self.nc_lookup, self.nc_initial_value_t, validate_shape=False) + + hidden_dim = network_input // 2 + + # Define the MLP + if self.hparams.hidden_layers == 0: + self.weights1 = tf.get_variable( + 'W1', + shape=[network_input, self.hparams.num_classes], + dtype=tf.float32) + self.bias1 = tf.get_variable( + 'b1', + shape=[self.hparams.num_classes], + dtype=tf.float32) + + elif self.hparams.hidden_layers == 1: + + self.weights1 = tf.get_variable( + 'W1', + shape=[network_input, hidden_dim], + dtype=tf.float32) + self.bias1 = tf.get_variable( + 'b1', + shape=[hidden_dim], + dtype=tf.float32) + + self.weights2 = tf.get_variable( + 'W2', + shape=[hidden_dim, self.hparams.num_classes], + dtype=tf.float32) + self.bias2 = tf.get_variable( + 'b2', + shape=[self.hparams.num_classes], + dtype=tf.float32) + + else: + raise ValueError('Only 0 or 1 hidden layers are supported') + + # Define the variables + self.instances = tf.placeholder(dtype=tf.string, + shape=[self.hparams.batch_size]) + + (self.x_embedding_id, + self.y_embedding_id, + self.nc_embedding_id, + self.path_embedding_id, + self.path_counts, + self.labels) = parse_tensorflow_examples( + self.instances, self.hparams.batch_size, self.path_to_index) + + # Create the MLP + self.__mlp__() + + self.instances_to_load = tf.placeholder(dtype=tf.string, shape=[None]) + self.labels_to_load = lexnet_common.load_all_labels(self.instances_to_load) + self.pairs_to_load = lexnet_common.load_all_pairs(self.instances_to_load) + + def load_labels(self, session, instances): + """Loads the labels for these instances. + + Args: + session: The current TensorFlow session, + instances: The instances for which to load the labels. + + Returns: + the labels of these instances. + """ + return session.run(self.labels_to_load, + feed_dict={self.instances_to_load: instances}) + + def load_pairs(self, session, instances): + """Loads the word pairs for these instances. + + Args: + session: The current TensorFlow session, + instances: The instances for which to load the labels. + + Returns: + the word pairs of these instances. + """ + word_pairs = session.run(self.pairs_to_load, + feed_dict={self.instances_to_load: instances}) + return [pair[0].split('::') for pair in word_pairs] + + def __train_single_batch__(self, session, batch_instances): + """Train a single batch. + + Args: + session: The current TensorFlow session. + batch_instances: TensorFlow examples containing the training intances + + Returns: + The cost for the current batch. + """ + cost, _ = session.run([self.cost, self.train_op], + feed_dict={self.instances: batch_instances}) + + return cost + + def fit(self, session, inputs, on_epoch_completed, val_instances, val_labels, + save_path): + """Train the model. + + Args: + session: The current TensorFlow session. + inputs: + on_epoch_completed: A method to call after each epoch. + val_instances: The validation set instances (evaluation between epochs). + val_labels: The validation set labels (for evaluation between epochs). + save_path: Where to save the model. + """ + for epoch in range(self.hparams.num_epochs): + + losses = [] + epoch_indices = list(np.random.permutation(len(inputs))) + + # If the number of instances doesn't divide by batch_size, enlarge it + # by duplicating training examples + mod = len(epoch_indices) % self.hparams.batch_size + if mod > 0: + epoch_indices.extend([np.random.randint(0, high=len(inputs))] * mod) + + # Define the batches + n_batches = len(epoch_indices) // self.hparams.batch_size + + for minibatch in range(n_batches): + + batch_indices = epoch_indices[minibatch * self.hparams.batch_size:( + minibatch + 1) * self.hparams.batch_size] + batch_instances = [inputs[i] for i in batch_indices] + + loss = self.__train_single_batch__(session, batch_instances) + losses.append(loss) + + epoch_loss = np.nanmean(losses) + + if on_epoch_completed: + should_stop = on_epoch_completed(self, session, epoch, epoch_loss, + val_instances, val_labels, save_path) + if should_stop: + print('Stopping training after %d epochs.' % epoch) + return + + def predict(self, session, inputs): + """Predict the classification of the test set. + + Args: + session: The current TensorFlow session. + inputs: the train paths, x, y and/or nc vectors + + Returns: + The test predictions. + """ + predictions, _ = zip(*self.predict_with_score(session, inputs)) + return np.array(predictions) + + def predict_with_score(self, session, inputs): + """Predict the classification of the test set. + + Args: + session: The current TensorFlow session. + inputs: the test paths, x, y and/or nc vectors + + Returns: + The test predictions along with their scores. + """ + test_pred = [0] * len(inputs) + + for chunk in xrange(0, len(test_pred), self.hparams.batch_size): + + # Initialize the variables with the current batch data + batch_indices = list( + range(chunk, min(chunk + self.hparams.batch_size, len(test_pred)))) + + # If the batch is too small, add a few other examples + if len(batch_indices) < self.hparams.batch_size: + batch_indices += [0] * (self.hparams.batch_size-len(batch_indices)) + + batch_instances = [inputs[i] for i in batch_indices] + + predictions, scores = session.run( + [self.predictions, self.scores], + feed_dict={self.instances: batch_instances}) + + for index_in_batch, index_in_dataset in enumerate(batch_indices): + prediction = predictions[index_in_batch] + score = scores[index_in_batch][prediction] + test_pred[index_in_dataset] = (prediction, score) + + return test_pred + + def __mlp__(self): + """Performs the MLP operations. + + Returns: the prediction object to be computed in a Session + """ + # Define the operations + + # Network input + vec_inputs = [] + + # Distributional component + if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: + for emb_id in [self.x_embedding_id, self.y_embedding_id]: + vec_inputs.append(tf.nn.embedding_lookup(self.relata_lookup, emb_id)) + + # Noun compound component + if self.hparams.input in ['dist-nc', 'integrated-nc']: + vec = tf.nn.embedding_lookup(self.nc_lookup, self.nc_embedding_id) + vec_inputs.append(vec) + + # Path-based component + if self.hparams.input in ['path', 'integrated', 'integrated-nc']: + + # Get the current paths for each batch instance + self.path_embeddings = tf.nn.embedding_lookup(self.path_lookup, + self.path_embedding_id) + + # self.path_embeddings is of shape + # [batch_size, max_path_per_instance, output_dim] + # We need to multiply it by path counts + # ([batch_size, max_path_per_instance]). + # Start by duplicating path_counts along the output_dim axis. + self.path_freq = tf.tile(tf.expand_dims(self.path_counts, -1), + [1, 1, self.path_dim]) + + # Compute the averaged path vector for each instance. + # First, multiply the path embeddings and frequencies element-wise. + self.weighted = tf.multiply(self.path_freq, self.path_embeddings) + + # Second, take the sum to get a tensor of shape [batch_size, output_dim]. + self.pair_path_embeddings = tf.reduce_sum(self.weighted, 1) + + # Finally, divide by the total number of paths. + # The number of paths for each pair has a shape [batch_size, 1], + # We duplicate it output_dim times along the second axis. + self.num_paths = tf.clip_by_value( + tf.reduce_sum(self.path_counts, 1), 1, np.inf) + self.num_paths = tf.tile(tf.expand_dims(self.num_paths, -1), + [1, self.path_dim]) + + # And finally, divide pair_path_embeddings by num_paths element-wise. + self.pair_path_embeddings = tf.div( + self.pair_path_embeddings, self.num_paths) + vec_inputs.append(self.pair_path_embeddings) + + # Concatenate the inputs and feed to the MLP + self.input_vec = tf.nn.dropout( + tf.concat(vec_inputs, 1), + keep_prob=self.hparams.input_keep_prob) + + h = tf.matmul(self.input_vec, self.weights1) + self.output = h + + if self.hparams.hidden_layers == 1: + self.output = tf.matmul(tf.nn.tanh(h), self.weights2) + + self.scores = self.output + self.predictions = tf.argmax(self.scores, axis=1) + + # Define the loss function and the optimization algorithm + self.cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=self.scores, labels=self.labels) + self.cost = tf.reduce_sum(self.cross_entropies, name='cost') + self.global_step = tf.Variable(0, name='global_step', trainable=False) + self.optimizer = tf.train.AdamOptimizer() + self.train_op = self.optimizer.minimize( + self.cost, global_step=self.global_step) + + +def parse_tensorflow_examples(record, batch_size, path_to_index): + """Reads TensorFlow examples from a RecordReader. + + Args: + record: a record with TensorFlow examples. + batch_size: the number of instances in a minibatch + path_to_index: mapping from string path to index in the embeddings matrix. + + Returns: + The word embeddings IDs, paths and counts + """ + features = tf.parse_example( + record, { + 'x_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64), + 'y_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64), + 'nc_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64), + 'reprs': tf.FixedLenSequenceFeature( + shape=(), dtype=tf.string, allow_missing=True), + 'counts': tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'rel_id': tf.FixedLenFeature([1], dtype=tf.int64) + }) + + x_embedding_id = tf.squeeze(features['x_embedding_id'], [-1]) + y_embedding_id = tf.squeeze(features['y_embedding_id'], [-1]) + nc_embedding_id = tf.squeeze(features['nc_embedding_id'], [-1]) + labels = tf.squeeze(features['rel_id'], [-1]) + path_counts = tf.to_float(tf.reshape(features['counts'], [batch_size, -1])) + + path_embedding_id = None + if path_to_index: + path_embedding_id = path_to_index.lookup(features['reprs']) + + return ( + x_embedding_id, y_embedding_id, nc_embedding_id, + path_embedding_id, path_counts, labels) diff --git a/models/research/lexnet_nc/path_model.py b/models/research/lexnet_nc/path_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c283841775d673baa8a4bc8c438d65f288a2c555 --- /dev/null +++ b/models/research/lexnet_nc/path_model.py @@ -0,0 +1,547 @@ +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""LexNET Path-based Model.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import itertools +import os + +import lexnet_common +import numpy as np +import tensorflow as tf + + +class PathBasedModel(object): + """The LexNET path-based model for classifying semantic relations.""" + + @classmethod + def default_hparams(cls): + """Returns the default hyper-parameters.""" + return tf.contrib.training.HParams( + max_path_len=8, + num_classes=37, + num_epochs=30, + input_keep_prob=0.9, + learning_rate=0.001, + learn_lemmas=False, + random_seed=133, # zero means no random seed + lemma_embeddings_file='glove/glove.6B.50d.bin', + num_pos=len(lexnet_common.POSTAGS), + num_dep=len(lexnet_common.DEPLABELS), + num_directions=len(lexnet_common.DIRS), + lemma_dim=50, + pos_dim=4, + dep_dim=5, + dir_dim=1) + + def __init__(self, hparams, lemma_embeddings, instance): + """Initialize the LexNET classifier. + + Args: + hparams: the hyper-parameters. + lemma_embeddings: word embeddings for the path-based component. + instance: string tensor containing the input instance + """ + self.hparams = hparams + self.lemma_embeddings = lemma_embeddings + self.instance = instance + self.vocab_size, self.lemma_dim = self.lemma_embeddings.shape + + # Set the random seed + if hparams.random_seed > 0: + tf.set_random_seed(hparams.random_seed) + + # Create the network + self.__create_computation_graph__() + + def __create_computation_graph__(self): + """Initialize the model and define the graph.""" + self.lstm_input_dim = sum([self.hparams.lemma_dim, self.hparams.pos_dim, + self.hparams.dep_dim, self.hparams.dir_dim]) + self.lstm_output_dim = self.lstm_input_dim + + network_input = self.lstm_output_dim + self.lemma_lookup = tf.get_variable( + 'lemma_lookup', + initializer=self.lemma_embeddings, + dtype=tf.float32, + trainable=self.hparams.learn_lemmas) + self.pos_lookup = tf.get_variable( + 'pos_lookup', + shape=[self.hparams.num_pos, self.hparams.pos_dim], + dtype=tf.float32) + self.dep_lookup = tf.get_variable( + 'dep_lookup', + shape=[self.hparams.num_dep, self.hparams.dep_dim], + dtype=tf.float32) + self.dir_lookup = tf.get_variable( + 'dir_lookup', + shape=[self.hparams.num_directions, self.hparams.dir_dim], + dtype=tf.float32) + + self.weights1 = tf.get_variable( + 'W1', + shape=[network_input, self.hparams.num_classes], + dtype=tf.float32) + self.bias1 = tf.get_variable( + 'b1', + shape=[self.hparams.num_classes], + dtype=tf.float32) + + # Define the variables + (self.batch_paths, + self.path_counts, + self.seq_lengths, + self.path_strings, + self.batch_labels) = _parse_tensorflow_example( + self.instance, self.hparams.max_path_len, self.hparams.input_keep_prob) + + # Create the LSTM + self.__lstm__() + + # Create the MLP + self.__mlp__() + + self.instances_to_load = tf.placeholder(dtype=tf.string, shape=[None]) + self.labels_to_load = lexnet_common.load_all_labels(self.instances_to_load) + + def load_labels(self, session, batch_instances): + """Loads the labels of the current instances. + + Args: + session: the current TensorFlow session. + batch_instances: the dataset instances. + + Returns: + the labels. + """ + return session.run(self.labels_to_load, + feed_dict={self.instances_to_load: batch_instances}) + + def run_one_epoch(self, session, num_steps): + """Train the model. + + Args: + session: The current TensorFlow session. + num_steps: The number of steps in each epoch. + + Returns: + The mean loss for the epoch. + + Raises: + ArithmeticError: if the loss becomes non-finite. + """ + losses = [] + + for step in range(num_steps): + curr_loss, _ = session.run([self.cost, self.train_op]) + if not np.isfinite(curr_loss): + raise ArithmeticError('nan loss at step %d' % step) + + losses.append(curr_loss) + + return np.mean(losses) + + def predict(self, session, inputs): + """Predict the classification of the test set. + + Args: + session: The current TensorFlow session. + inputs: the train paths, x, y and/or nc vectors + + Returns: + The test predictions. + """ + predictions, _ = zip(*self.predict_with_score(session, inputs)) + return np.array(predictions) + + def predict_with_score(self, session, inputs): + """Predict the classification of the test set. + + Args: + session: The current TensorFlow session. + inputs: the test paths, x, y and/or nc vectors + + Returns: + The test predictions along with their scores. + """ + test_pred = [0] * len(inputs) + + for index, instance in enumerate(inputs): + + prediction, scores = session.run( + [self.predictions, self.scores], + feed_dict={self.instance: instance}) + + test_pred[index] = (prediction, scores[prediction]) + + return test_pred + + def __mlp__(self): + """Performs the MLP operations. + + Returns: the prediction object to be computed in a Session + """ + # Feed the paths to the MLP: path_embeddings is + # [num_batch_paths, output_dim], and when we multiply it by W + # ([output_dim, num_classes]), we get a matrix of class distributions: + # [num_batch_paths, num_classes]. + self.distributions = tf.matmul(self.path_embeddings, self.weights1) + + # Now, compute weighted average on the class distributions, using the path + # frequency as weights. + + # First, reshape path_freq to the same shape of distributions + self.path_freq = tf.tile(tf.expand_dims(self.path_counts, -1), + [1, self.hparams.num_classes]) + + # Second, multiply the distributions and frequencies element-wise. + self.weighted = tf.multiply(self.path_freq, self.distributions) + + # Finally, take the average to get a tensor of shape [1, num_classes]. + self.weighted_sum = tf.reduce_sum(self.weighted, 0) + self.num_paths = tf.clip_by_value(tf.reduce_sum(self.path_counts), + 1, np.inf) + self.num_paths = tf.tile(tf.expand_dims(self.num_paths, -1), + [self.hparams.num_classes]) + self.scores = tf.div(self.weighted_sum, self.num_paths) + self.predictions = tf.argmax(self.scores) + + # Define the loss function and the optimization algorithm + self.cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=self.scores, labels=tf.reduce_mean(self.batch_labels)) + self.cost = tf.reduce_sum(self.cross_entropies, name='cost') + self.global_step = tf.Variable(0, name='global_step', trainable=False) + self.optimizer = tf.train.AdamOptimizer() + self.train_op = self.optimizer.minimize(self.cost, + global_step=self.global_step) + + def __lstm__(self): + """Defines the LSTM operations. + + Returns: + A matrix of path embeddings. + """ + lookup_tables = [self.lemma_lookup, self.pos_lookup, + self.dep_lookup, self.dir_lookup] + + # Split the edges to components: list of 4 tensors + # [num_batch_paths, max_path_len, 1] + self.edge_components = tf.split(self.batch_paths, 4, axis=2) + + # Look up the components embeddings and concatenate them back together + self.path_matrix = tf.concat([ + tf.squeeze(tf.nn.embedding_lookup(lookup_table, component), 2) + for lookup_table, component in + zip(lookup_tables, self.edge_components) + ], axis=2) + + self.sequence_lengths = tf.reshape(self.seq_lengths, [-1]) + + # Define the LSTM. + # The input is [num_batch_paths, max_path_len, input_dim]. + lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.lstm_output_dim) + + # The output is [num_batch_paths, max_path_len, output_dim]. + self.lstm_outputs, _ = tf.nn.dynamic_rnn( + lstm_cell, self.path_matrix, dtype=tf.float32, + sequence_length=self.sequence_lengths) + + # Slice the last *relevant* output for each instance -> + # [num_batch_paths, output_dim] + self.path_embeddings = _extract_last_relevant(self.lstm_outputs, + self.sequence_lengths) + + +def _parse_tensorflow_example(record, max_path_len, input_keep_prob): + """Reads TensorFlow examples from a RecordReader. + + Args: + record: a record with TensorFlow example. + max_path_len: the maximum path length. + input_keep_prob: 1 - the word dropout probability + + Returns: + The paths and counts + """ + features = tf.parse_single_example(record, { + 'lemmas': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'postags': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'deplabels': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'dirs': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'counts': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'pathlens': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.int64, allow_missing=True), + 'reprs': + tf.FixedLenSequenceFeature( + shape=(), dtype=tf.string, allow_missing=True), + 'rel_id': + tf.FixedLenFeature([], dtype=tf.int64) + }) + + path_counts = tf.to_float(features['counts']) + seq_lengths = features['pathlens'] + + # Concatenate the edge components to create a path tensor: + # [max_paths_per_ins, max_path_length, 4] + lemmas = _word_dropout( + tf.reshape(features['lemmas'], [-1, max_path_len]), input_keep_prob) + + paths = tf.stack( + [lemmas] + [ + tf.reshape(features[f], [-1, max_path_len]) + for f in ('postags', 'deplabels', 'dirs') + ], + axis=-1) + + path_strings = features['reprs'] + + # Add an empty path to pairs with no paths + paths = tf.cond( + tf.shape(paths)[0] > 0, + lambda: paths, + lambda: tf.zeros([1, max_path_len, 4], dtype=tf.int64)) + + # Paths are left-padded. We reverse them to make them right-padded. + #paths = tf.reverse(paths, axis=[1]) + + path_counts = tf.cond( + tf.shape(path_counts)[0] > 0, + lambda: path_counts, + lambda: tf.constant([1.0], dtype=tf.float32)) + + seq_lengths = tf.cond( + tf.shape(seq_lengths)[0] > 0, + lambda: seq_lengths, + lambda: tf.constant([1], dtype=tf.int64)) + + # Duplicate the label for each path + labels = tf.ones_like(path_counts, dtype=tf.int64) * features['rel_id'] + + return paths, path_counts, seq_lengths, path_strings, labels + + +def _extract_last_relevant(output, seq_lengths): + """Get the last relevant LSTM output cell for each batch instance. + + Args: + output: the LSTM outputs - a tensor with shape + [num_paths, output_dim, max_path_len] + seq_lengths: the sequences length per instance + + Returns: + The last relevant LSTM output cell for each batch instance. + """ + max_length = int(output.get_shape()[1]) + path_lengths = tf.clip_by_value(seq_lengths - 1, 0, max_length) + relevant = tf.reduce_sum(tf.multiply(output, tf.expand_dims( + tf.one_hot(path_lengths, max_length), -1)), 1) + return relevant + + +def _word_dropout(words, input_keep_prob): + """Drops words with probability 1 - input_keep_prob. + + Args: + words: a list of lemmas from the paths. + input_keep_prob: the probability to keep the word. + + Returns: + The revised list where some of the words are ed. + """ + # Create the mask: (-1) to drop, 1 to keep + prob = tf.random_uniform(tf.shape(words), 0, 1) + condition = tf.less(prob, (1 - input_keep_prob)) + mask = tf.where(condition, + tf.negative(tf.ones_like(words)), tf.ones_like(words)) + + # We need to keep zeros (), and change other numbers to 1 () + # if their mask is -1. First, we multiply the mask and the words. + # Zeros will stay zeros, and words to drop will become negative. + # Then, we change negative values to 1. + masked_words = tf.multiply(mask, words) + condition = tf.less(masked_words, 0) + dropped_words = tf.where(condition, tf.ones_like(words), words) + return dropped_words + + +def compute_path_embeddings(model, session, instances): + """Compute the path embeddings for all the distinct paths. + + Args: + model: The trained path-based model. + session: The current TensorFlow session. + instances: All the train, test and validation instances. + + Returns: + The path to ID index and the path embeddings. + """ + # Get an index for each distinct path + path_index = collections.defaultdict(itertools.count(0).next) + path_vectors = {} + + for instance in instances: + curr_path_embeddings, curr_path_strings = session.run( + [model.path_embeddings, model.path_strings], + feed_dict={model.instance: instance}) + + for i, path in enumerate(curr_path_strings): + if not path: + continue + + # Set a new/existing index for the path + index = path_index[path] + + # Save its vector + path_vectors[index] = curr_path_embeddings[i, :] + + print('Number of distinct paths: %d' % len(path_index)) + return path_index, path_vectors + + +def save_path_embeddings(model, path_vectors, path_index, embeddings_base_path): + """Saves the path embeddings. + + Args: + model: The trained path-based model. + path_vectors: The path embeddings. + path_index: A map from path to ID. + embeddings_base_path: The base directory where the embeddings are. + """ + index_range = range(max(path_index.values()) + 1) + path_matrix = [path_vectors[i] for i in index_range] + path_matrix = np.vstack(path_matrix) + + # Save the path embeddings + path_vector_filename = os.path.join( + embeddings_base_path, '%d_path_vectors' % model.lstm_output_dim) + with open(path_vector_filename, 'w') as f_out: + np.save(f_out, path_matrix) + + index_to_path = {i: p for p, i in path_index.iteritems()} + path_vocab = [index_to_path[i] for i in index_range] + + # Save the path vocabulary + path_vocab_filename = os.path.join( + embeddings_base_path, '%d_path_vocab' % model.lstm_output_dim) + with open(path_vocab_filename, 'w') as f_out: + f_out.write('\n'.join(path_vocab)) + f_out.write('\n') + + print('Saved path embeddings.') + + +def load_path_embeddings(path_embeddings_dir, path_dim): + """Loads pretrained path embeddings from a binary file and returns the matrix. + + Args: + path_embeddings_dir: The directory for the path embeddings. + path_dim: The dimension of the path embeddings, used as prefix to the + path_vocab and path_vectors files. + + Returns: + The path embeddings matrix and the path_to_index dictionary. + """ + prefix = path_embeddings_dir + '/%d' % path_dim + '_' + with open(prefix + 'path_vocab') as f_in: + vocab = f_in.read().splitlines() + + vocab_size = len(vocab) + embedding_file = prefix + 'path_vectors' + + print('Embedding file "%s" has %d paths' % (embedding_file, vocab_size)) + + with open(embedding_file) as f_in: + embeddings = np.load(f_in) + + path_to_index = {p: i for i, p in enumerate(vocab)} + return embeddings, path_to_index + + +def get_indicative_paths(model, session, path_index, path_vectors, classes, + save_dir, k=20, threshold=0.8): + """Gets the most indicative paths for each class. + + Args: + model: The trained path-based model. + session: The current TensorFlow session. + path_index: A map from path to ID. + path_vectors: The path embeddings. + classes: The class label names. + save_dir: Where to save the paths. + k: The k for top-k paths. + threshold: The threshold above which to consider paths as indicative. + """ + # Define graph variables for this operation + p_path_embedding = tf.placeholder(dtype=tf.float32, + shape=[1, model.lstm_output_dim]) + p_distributions = tf.nn.softmax(tf.matmul(p_path_embedding, model.weights1)) + + # Treat each path as a pair instance with a single path, and get the + # relation distribution for it. Then, take the top paths for each relation. + + # This dictionary contains a relation as a key, and the value is a list of + # tuples of path index and score. A relation r will contain (p, s) if the + # path p is classified to r with a confidence of s. + prediction_per_relation = collections.defaultdict(list) + + index_to_path = {i: p for p, i in path_index.iteritems()} + + # Predict all the paths + for index in range(len(path_index)): + curr_path_vector = path_vectors[index] + + distribution = session.run(p_distributions, + feed_dict={ + p_path_embedding: np.reshape( + curr_path_vector, + [1, model.lstm_output_dim])}) + + distribution = distribution[0, :] + prediction = np.argmax(distribution) + prediction_per_relation[prediction].append( + (index, distribution[prediction])) + + if index % 10000 == 0: + print('Classified %d/%d (%3.2f%%) of the paths' % ( + index, len(path_index), 100 * index / len(path_index))) + + # Retrieve k-best scoring paths for each relation + for relation_index, relation in enumerate(classes): + curr_paths = sorted(prediction_per_relation[relation_index], + key=lambda item: item[1], reverse=True) + above_t = [(p, s) for (p, s) in curr_paths if s >= threshold] + top_k = curr_paths[k+1] + relation_paths = above_t if len(above_t) > len(top_k) else top_k + + paths_filename = os.path.join(save_dir, '%s.paths' % relation) + with open(paths_filename, 'w') as f_out: + for index, score in relation_paths: + print('\t'.join([index_to_path[index], str(score)]), file=f_out) diff --git a/models/research/lexnet_nc/sorted_paths_to_examples.py b/models/research/lexnet_nc/sorted_paths_to_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..c21d25d710ae793f6eefd889b98414c923e4fbe6 --- /dev/null +++ b/models/research/lexnet_nc/sorted_paths_to_examples.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Takes as input a sorted, tab-separated of paths to produce tf.Examples.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import itertools +import os +import sys +import tensorflow as tf + +import lexnet_common + +tf.flags.DEFINE_string('input', '', 'tab-separated input data') +tf.flags.DEFINE_string('vocab', '', 'a text file containing lemma vocabulary') +tf.flags.DEFINE_string('relations', '', 'a text file containing the relations') +tf.flags.DEFINE_string('output_dir', '', 'output directory') +tf.flags.DEFINE_string('splits', '', 'text file enumerating splits') +tf.flags.DEFINE_string('default_split', '', 'default split for unlabeled pairs') +tf.flags.DEFINE_string('compression', 'GZIP', 'compression for output records') +tf.flags.DEFINE_integer('max_paths', 100, 'maximum number of paths per record') +tf.flags.DEFINE_integer('max_pathlen', 8, 'maximum path length') +FLAGS = tf.flags.FLAGS + + +def _int64_features(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def _bytes_features(value): + value = [v.encode('utf-8') if isinstance(v, unicode) else v for v in value] + return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) + + +class CreateExampleFn(object): + + def __init__(self): + # Read the vocabulary. N.B. that 0 = PAD, 1 = UNK, 2 = , 3 = , hence + # the enumeration starting at 4. + with tf.gfile.GFile(FLAGS.vocab) as fh: + self.vocab = {w: ix for ix, w in enumerate(fh.read().splitlines(), start=4)} + + self.vocab.update({'': 0, '': 1, '': 2, '': 3}) + + # Read the relations. + with tf.gfile.GFile(FLAGS.relations) as fh: + self.relations = {r: ix for ix, r in enumerate(fh.read().splitlines())} + + # Some hackery to map from SpaCy postags to Google's. + lexnet_common.POSTAG_TO_ID['PROPN'] = lexnet_common.POSTAG_TO_ID['NOUN'] + lexnet_common.POSTAG_TO_ID['PRON'] = lexnet_common.POSTAG_TO_ID['NOUN'] + lexnet_common.POSTAG_TO_ID['CCONJ'] = lexnet_common.POSTAG_TO_ID['CONJ'] + #lexnet_common.DEPLABEL_TO_ID['relcl'] = lexnet_common.DEPLABEL_TO_ID['rel'] + #lexnet_common.DEPLABEL_TO_ID['compound'] = lexnet_common.DEPLABEL_TO_ID['xcomp'] + #lexnet_common.DEPLABEL_TO_ID['oprd'] = lexnet_common.DEPLABEL_TO_ID['UNK'] + + def __call__(self, mod, head, rel, raw_paths): + # Drop any really long paths. + paths = [] + counts = [] + for raw, count in raw_paths.most_common(FLAGS.max_paths): + path = raw.split('::') + if len(path) <= FLAGS.max_pathlen: + paths.append(path) + counts.append(count) + + if not paths: + return None + + # Compute the true length. + pathlens = [len(path) for path in paths] + + # Pad each path out to max_pathlen so the LSTM can eat it. + paths = ( + itertools.islice( + itertools.chain(path, itertools.repeat('/PAD/PAD/_')), + FLAGS.max_pathlen) + for path in paths) + + # Split the lemma, POS, dependency label, and direction each into a + # separate feature. + lemmas, postags, deplabels, dirs = zip( + *(part.split('/') for part in itertools.chain(*paths))) + + lemmas = [self.vocab.get(lemma, 1) for lemma in lemmas] + postags = [lexnet_common.POSTAG_TO_ID[pos] for pos in postags] + deplabels = [lexnet_common.DEPLABEL_TO_ID.get(dep, 1) for dep in deplabels] + dirs = [lexnet_common.DIR_TO_ID.get(d, 0) for d in dirs] + + return tf.train.Example(features=tf.train.Features(feature={ + 'pair': _bytes_features(['::'.join((mod, head))]), + 'rel': _bytes_features([rel]), + 'rel_id': _int64_features([self.relations[rel]]), + 'reprs': _bytes_features(raw_paths), + 'pathlens': _int64_features(pathlens), + 'counts': _int64_features(counts), + 'lemmas': _int64_features(lemmas), + 'dirs': _int64_features(dirs), + 'deplabels': _int64_features(deplabels), + 'postags': _int64_features(postags), + 'x_embedding_id': _int64_features([self.vocab[mod]]), + 'y_embedding_id': _int64_features([self.vocab[head]]), + })) + + +def main(_): + # Read the splits file, if there is one. + assignments = {} + if FLAGS.splits: + with tf.gfile.GFile(FLAGS.splits) as fh: + parts = (line.split('\t') for line in fh.read().splitlines()) + assignments = {(mod, head): split for mod, head, split in parts} + + splits = set(assignments.itervalues()) + if FLAGS.default_split: + default_split = FLAGS.default_split + splits.add(FLAGS.default_split) + elif splits: + default_split = iter(splits).next() + else: + print('Please specify --splits, --default_split, or both', file=sys.stderr) + return 1 + + last_mod, last_head, last_label = None, None, None + raw_paths = collections.Counter() + + # Keep track of pairs we've seen to ensure that we don't get unsorted data. + seen_labeled_pairs = set() + + # Set up output compression + compression_type = getattr( + tf.python_io.TFRecordCompressionType, FLAGS.compression) + options = tf.python_io.TFRecordOptions(compression_type=compression_type) + + writers = { + split: tf.python_io.TFRecordWriter( + os.path.join(FLAGS.output_dir, '%s.tfrecs.gz' % split), + options=options) + for split in splits} + + create_example = CreateExampleFn() + + in_fh = sys.stdin if not FLAGS.input else tf.gfile.GFile(FLAGS.input) + for lineno, line in enumerate(in_fh, start=1): + if lineno % 100 == 0: + print('\rProcessed %d lines...' % lineno, end='', file=sys.stderr) + + parts = line.decode('utf-8').strip().split('\t') + if len(parts) != 5: + print('Skipping line %d: %d columns (expected 5)' % ( + lineno, len(parts)), file=sys.stderr) + + continue + + mod, head, label, raw_path, source = parts + if mod == last_mod and head == last_head and label == last_label: + raw_paths.update([raw_path]) + continue + + if last_mod and last_head and last_label and raw_paths: + if (last_mod, last_head, last_label) in seen_labeled_pairs: + print('It looks like the input data is not sorted; ignoring extra ' + 'record for (%s::%s, %s) at line %d' % ( + last_mod, last_head, last_label, lineno)) + else: + ex = create_example(last_mod, last_head, last_label, raw_paths) + if ex: + split = assignments.get((last_mod, last_head), default_split) + writers[split].write(ex.SerializeToString()) + + seen_labeled_pairs.add((last_mod, last_head, last_label)) + + last_mod, last_head, last_label = mod, head, label + raw_paths = collections.Counter() + + if last_mod and last_head and last_label and raw_paths: + ex = create_example(last_mod, last_head, last_label, raw_paths) + if ex: + split = assignments.get((last_mod, last_head), default_split) + writers[split].write(ex.SerializeToString()) + + for writer in writers.itervalues(): + writer.close() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lexnet_nc/text_embeddings_to_binary.py b/models/research/lexnet_nc/text_embeddings_to_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..8226a7654e6da733ba1e8c46810a8ec8afd7a2c0 --- /dev/null +++ b/models/research/lexnet_nc/text_embeddings_to_binary.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# Copyright 2017, 2018 Google, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts a text embedding file into a binary format for quicker loading.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +tf.flags.DEFINE_string('input', '', 'text file containing embeddings') +tf.flags.DEFINE_string('output_vocab', '', 'output file for vocabulary') +tf.flags.DEFINE_string('output_npy', '', 'output file for binary') +FLAGS = tf.flags.FLAGS + +def main(_): + vecs = [] + vocab = [] + with tf.gfile.GFile(FLAGS.input) as fh: + for line in fh: + parts = line.strip().split() + vocab.append(parts[0]) + vecs.append([float(x) for x in parts[1:]]) + + with tf.gfile.GFile(FLAGS.output_vocab, 'w') as fh: + fh.write('\n'.join(vocab)) + fh.write('\n') + + vecs = np.array(vecs, dtype=np.float32) + np.save(FLAGS.output_npy, vecs, allow_pickle=False) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lfads/README.md b/models/research/lfads/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c75b656e4746894c42251e29a530271bb6484e4f --- /dev/null +++ b/models/research/lfads/README.md @@ -0,0 +1,224 @@ +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# LFADS - Latent Factor Analysis via Dynamical Systems + +This code implements the model from the paper "[LFADS - Latent Factor Analysis via Dynamical Systems](http://biorxiv.org/content/early/2017/06/20/152884)". It is a sequential variational auto-encoder designed specifically for investigating neuroscience data, but can be applied widely to any time series data. In an unsupervised setting, LFADS is able to decompose time series data into various factors, such as an initial condition, a generative dynamical system, control inputs to that generator, and a low dimensional description of the observed data, called the factors. Additionally, the observation model is a loss on a probability distribution, so when LFADS processes a dataset, a denoised version of the dataset is also created. For example, if the dataset is raw spike counts, then under the negative log-likelihood loss under a Poisson distribution, the denoised data would be the inferred Poisson rates. + + +## Prerequisites + +The code is written in Python 2.7.6. You will also need: + +* **TensorFlow** version 1.5 ([install](https://www.tensorflow.org/install/)) - +* **NumPy, SciPy, Matplotlib** ([install SciPy stack](https://www.scipy.org/install.html), contains all of them) +* **h5py** ([install](https://pypi.python.org/pypi/h5py)) + + +## Getting started + +Before starting, run the following: + +
+$ export PYTHONPATH=$PYTHONPATH:/path/to/your/directory/lfads/
+
+ +where "path/to/your/directory" is replaced with the path to the LFADS repository (you can get this path by using the `pwd` command). This allows the nested directories to access modules from their parent directory. + +## Generate synthetic data + +In order to generate the synthetic datasets first, from the top-level lfads directory, run: + +```sh +$ cd synth_data +$ ./run_generate_synth_data.sh +$ cd .. +``` + +These synthetic datasets are provided 1. to gain insight into how the LFADS algorithm operates, and 2. to give reasonable starting points for analyses you might be interested for your own data. + +## Train an LFADS model + +Now that we have our example datasets, we can train some models! To spin up an LFADS model on the synthetic data, run any of the following commands. For the examples that are in the paper, the important hyperparameters are roughly replicated. Most hyperparameters are insensitive to small changes or won't ever be changed unless you want a very fine level of control. In the first example, all hyperparameter flags are enumerated for easy copy-pasting, but for the rest of the examples only the most important flags (~the first 9) are specified for brevity. For a full list of flags, their descriptions, and their default values, refer to the top of `run_lfads.py`. Please see Table 1 in the Online Methods of the associated paper for definitions of the most important hyperparameters. + +```sh +# Run LFADS on chaotic rnn data with no input pulses (g = 1.5) with spiking noise +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_no_inputs \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_no_inputs \ +--co_dim=0 \ +--factors_dim=20 \ +--ext_input_dim=0 \ +--controller_input_lag=1 \ +--output_dist=poisson \ +--do_causal_controller=false \ +--batch_size=128 \ +--learning_rate_init=0.01 \ +--learning_rate_stop=1e-05 \ +--learning_rate_decay_factor=0.95 \ +--learning_rate_n_to_compare=6 \ +--do_reset_learning_rate=false \ +--keep_prob=0.95 \ +--con_dim=128 \ +--gen_dim=200 \ +--ci_enc_dim=128 \ +--ic_dim=64 \ +--ic_enc_dim=128 \ +--ic_prior_var_min=0.1 \ +--gen_cell_input_weight_scale=1.0 \ +--cell_weight_scale=1.0 \ +--do_feed_factors_to_controller=true \ +--kl_start_step=0 \ +--kl_increase_steps=2000 \ +--kl_ic_weight=1.0 \ +--l2_con_scale=0.0 \ +--l2_gen_scale=2000.0 \ +--l2_start_step=0 \ +--l2_increase_steps=2000 \ +--ic_prior_var_scale=0.1 \ +--ic_post_var_min=0.0001 \ +--kl_co_weight=1.0 \ +--prior_ar_nvar=0.1 \ +--cell_clip_value=5.0 \ +--max_ckpt_to_keep_lve=5 \ +--do_train_prior_ar_atau=true \ +--co_prior_var_scale=0.1 \ +--csv_log=fitlog \ +--feedback_factors_or_rates=factors \ +--do_train_prior_ar_nvar=true \ +--max_grad_norm=200.0 \ +--device=gpu:0 \ +--num_steps_for_gen_ic=100000000 \ +--ps_nexamples_to_process=100000000 \ +--checkpoint_name=lfads_vae \ +--temporal_spike_jitter_width=0 \ +--checkpoint_pb_load_name=checkpoint \ +--inject_ext_input_to_gen=false \ +--co_mean_corr_scale=0.0 \ +--gen_cell_rec_weight_scale=1.0 \ +--max_ckpt_to_keep=5 \ +--output_filename_stem="" \ +--ic_prior_var_max=0.1 \ +--prior_ar_atau=10.0 \ +--do_train_io_only=false \ +--do_train_encoder_only=false + +# Run LFADS on chaotic rnn data with no input pulses (g = 1.5) with Gaussian noise +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=gaussian_chaotic_rnn_no_inputs \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_inputs_g2p5 \ +--co_dim=1 \ +--factors_dim=20 \ +--output_dist=gaussian + + +# Run LFADS on chaotic rnn data with input pulses (g = 2.5) +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_inputs_g2p5 \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_inputs_g2p5 \ +--co_dim=1 \ +--factors_dim=20 \ +--output_dist=poisson + +# Run LFADS on multi-session RNN data +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_multisession \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_multisession \ +--factors_dim=10 \ +--output_dist=poisson + +# Run LFADS on integration to bound model data +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=itb_rnn \ +--lfads_save_dir=/tmp/lfads_itb_rnn \ +--co_dim=1 \ +--factors_dim=20 \ +--controller_input_lag=0 \ +--output_dist=poisson + +# Run LFADS on chaotic RNN data with labels +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnns_labeled \ +--lfads_save_dir=/tmp/lfads_chaotic_rnns_labeled \ +--co_dim=0 \ +--factors_dim=20 \ +--controller_input_lag=0 \ +--ext_input_dim=1 \ +--output_dist=poisson + +# Run LFADS on chaotic rnn data with no input pulses (g = 1.5) with Gaussian noise +$ python run_lfads.py --kind=train \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_no_inputs \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_no_inputs \ +--co_dim=0 \ +--factors_dim=20 \ +--ext_input_dim=0 \ +--controller_input_lag=1 \ +--output_dist=gaussian \ + + +``` + +**Tip**: If you are running LFADS on GPU and would like to run more than one model concurrently, set the `--allow_gpu_growth=True` flag on each job, otherwise one model will take up the entire GPU for performance purposes. Also, one needs to install the TensorFlow libraries with GPU support. + + +## Visualize a training model + +To visualize training curves and various other metrics while training and LFADS model, run the following command on your model directory. To launch a tensorboard on the chaotic RNN data with input pulses, for example: + +```sh +tensorboard --logdir=/tmp/lfads_chaotic_rnn_inputs_g2p5 +``` + +## Evaluate a trained model + +Once your model is finished training, there are multiple ways you can evaluate +it. Below are some sample commands to evaluate an LFADS model trained on the +chaotic rnn data with input pulses (g = 2.5). The key differences here are +setting the `--kind` flag to the appropriate mode, as well as the +`--checkpoint_pb_load_name` flag to `checkpoint_lve` and the `--batch_size` flag +(if you'd like to make it larger or smaller). All other flags should be the +same as used in training, so that the same model architecture is built. + +```sh +# Take samples from posterior then average (denoising operation) +$ python run_lfads.py --kind=posterior_sample_and_average \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_inputs_g2p5 \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_inputs_g2p5 \ +--co_dim=1 \ +--factors_dim=20 \ +--batch_size=1024 \ +--checkpoint_pb_load_name=checkpoint_lve + +# Sample from prior (generation of completely new samples) +$ python run_lfads.py --kind=prior_sample \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_inputs_g2p5 \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_inputs_g2p5 \ +--co_dim=1 \ +--factors_dim=20 \ +--batch_size=50 \ +--checkpoint_pb_load_name=checkpoint_lve + +# Write down model parameters +$ python run_lfads.py --kind=write_model_params \ +--data_dir=/tmp/rnn_synth_data_v1.0/ \ +--data_filename_stem=chaotic_rnn_inputs_g2p5 \ +--lfads_save_dir=/tmp/lfads_chaotic_rnn_inputs_g2p5 \ +--co_dim=1 \ +--factors_dim=20 \ +--checkpoint_pb_load_name=checkpoint_lve +``` + +## Contact + +File any issues with the [issue tracker](https://github.com/tensorflow/models/issues). For any questions or problems, this code is maintained by [@sussillo](https://github.com/sussillo) and [@jazcollins](https://github.com/jazcollins). + diff --git a/models/research/lfads/distributions.py b/models/research/lfads/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..351d019af2b16117eb329b6ef1812aa006834b62 --- /dev/null +++ b/models/research/lfads/distributions.py @@ -0,0 +1,493 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +import numpy as np +import tensorflow as tf +from utils import linear, log_sum_exp + +class Poisson(object): + """Poisson distributon + + Computes the log probability under the model. + + """ + def __init__(self, log_rates): + """ Create Poisson distributions with log_rates parameters. + + Args: + log_rates: a tensor-like list of log rates underlying the Poisson dist. + """ + self.logr = log_rates + + def logp(self, bin_counts): + """Compute the log probability for the counts in the bin, under the model. + + Args: + bin_counts: array-like integer counts + + Returns: + The log-probability under the Poisson models for each element of + bin_counts. + """ + k = tf.to_float(bin_counts) + # log poisson(k, r) = log(r^k * e^(-r) / k!) = k log(r) - r - log k! + # log poisson(k, r=exp(x)) = k * x - exp(x) - lgamma(k + 1) + return k * self.logr - tf.exp(self.logr) - tf.lgamma(k + 1) + + +def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0): + """Log-likelihood under a Gaussian distribution with diagonal covariance. + Returns the log-likelihood for each dimension. One should sum the + results for the log-likelihood under the full multidimensional model. + + Args: + z: The value to compute the log-likelihood. + mu: The mean of the Gaussian + logvar: The log variance of the Gaussian. + + Returns: + The log-likelihood under the Gaussian model. + """ + + return -0.5 * (logvar + np.log(2*np.pi) + \ + tf.square((z-mu)/tf.exp(0.5*logvar))) + + +def gaussian_pos_log_likelihood(unused_mean, logvar, noise): + """Gaussian log-likelihood function for a posterior in VAE + + Note: This function is specialized for a posterior distribution, that has the + form of z = mean + sigma * noise. + + Args: + unused_mean: ignore + logvar: The log variance of the distribution + noise: The noise used in the sampling of the posterior. + + Returns: + The log-likelihood under the Gaussian model. + """ + # ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2 + return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise)) + + +class Gaussian(object): + """Base class for Gaussian distribution classes.""" + pass + + +class DiagonalGaussian(Gaussian): + """Diagonal Gaussian with different constant mean and variances in each + dimension. + """ + + def __init__(self, batch_size, z_size, mean, logvar): + """Create a diagonal gaussian distribution. + + Args: + batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples. + z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor. + mean: The N-D mean of the distribution. + logvar: The N-D log variance of the diagonal distribution. + """ + size__xz = [None, z_size] + self.mean = mean # bxn already + self.logvar = logvar # bxn already + self.noise = noise = tf.random_normal(tf.shape(logvar)) + self.sample = mean + tf.exp(0.5 * logvar) * noise + mean.set_shape(size__xz) + logvar.set_shape(size__xz) + self.sample.set_shape(size__xz) + + def logp(self, z=None): + """Compute the log-likelihood under the distribution. + + Args: + z (optional): value to compute likelihood for, if None, use sample. + + Returns: + The likelihood of z under the model. + """ + if z is None: + z = self.sample + + # This is needed to make sure that the gradients are simple. + # The value of the function shouldn't change. + if z == self.sample: + return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise) + + return diag_gaussian_log_likelihood(z, self.mean, self.logvar) + + +class LearnableDiagonalGaussian(Gaussian): + """Diagonal Gaussian whose mean and variance are learned parameters.""" + + def __init__(self, batch_size, z_size, name, mean_init=0.0, + var_init=1.0, var_min=0.0, var_max=1000000.0): + """Create a learnable diagonal gaussian distribution. + + Args: + batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples. + z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor. + name: prefix name for the mean and log TF variables. + mean_init (optional): The N-D mean initialization of the distribution. + var_init (optional): The N-D variance initialization of the diagonal + distribution. + var_min (optional): The minimum value the learned variance can take in any + dimension. + var_max (optional): The maximum value the learned variance can take in any + dimension. + """ + + size_1xn = [1, z_size] + size__xn = [None, z_size] + size_bx1 = tf.stack([batch_size, 1]) + assert var_init > 0.0, "Problems" + assert var_max >= var_min, "Problems" + assert var_init >= var_min, "Problems" + assert var_max >= var_init, "Problems" + + + z_mean_1xn = tf.get_variable(name=name+"/mean", shape=size_1xn, + initializer=tf.constant_initializer(mean_init)) + self.mean_bxn = mean_bxn = tf.tile(z_mean_1xn, size_bx1) + mean_bxn.set_shape(size__xn) # tile loses shape + + log_var_init = np.log(var_init) + if var_max > var_min: + var_is_trainable = True + else: + var_is_trainable = False + + z_logvar_1xn = \ + tf.get_variable(name=(name+"/logvar"), shape=size_1xn, + initializer=tf.constant_initializer(log_var_init), + trainable=var_is_trainable) + + if var_is_trainable: + z_logit_var_1xn = tf.exp(z_logvar_1xn) + z_var_1xn = tf.nn.sigmoid(z_logit_var_1xn)*(var_max-var_min) + var_min + z_logvar_1xn = tf.log(z_var_1xn) + + logvar_bxn = tf.tile(z_logvar_1xn, size_bx1) + self.logvar_bxn = logvar_bxn + self.noise_bxn = noise_bxn = tf.random_normal(tf.shape(logvar_bxn)) + self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn + + def logp(self, z=None): + """Compute the log-likelihood under the distribution. + + Args: + z (optional): value to compute likelihood for, if None, use sample. + + Returns: + The likelihood of z under the model. + """ + if z is None: + z = self.sample + + # This is needed to make sure that the gradients are simple. + # The value of the function shouldn't change. + if z == self.sample_bxn: + return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn, + self.noise_bxn) + + return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn) + + @property + def mean(self): + return self.mean_bxn + + @property + def logvar(self): + return self.logvar_bxn + + @property + def sample(self): + return self.sample_bxn + + +class DiagonalGaussianFromInput(Gaussian): + """Diagonal Gaussian whose mean and variance are conditioned on other + variables. + + Note: the parameters to convert from input to the learned mean and log + variance are held in this class. + """ + + def __init__(self, x_bxu, z_size, name, var_min=0.0): + """Create an input dependent diagonal Gaussian distribution. + + Args: + x: The input tensor from which the mean and variance are computed, + via a linear transformation of x. I.e. + mu = Wx + b, log(var) = Mx + c + z_size: The size of the distribution. + name: The name to prefix to learned variables. + var_min (optional): Minimal variance allowed. This is an additional + way to control the amount of information getting through the stochastic + layer. + """ + size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size]) + self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+"/mean")) + logvar_bxn = linear(x_bxu, z_size, name=(name+"/logvar")) + if var_min > 0.0: + logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min) + self.logvar_bxn = logvar_bxn + + self.noise_bxn = noise_bxn = tf.random_normal(size_bxn) + self.noise_bxn.set_shape([None, z_size]) + self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn + + def logp(self, z=None): + """Compute the log-likelihood under the distribution. + + Args: + z (optional): value to compute likelihood for, if None, use sample. + + Returns: + The likelihood of z under the model. + """ + + if z is None: + z = self.sample + + # This is needed to make sure that the gradients are simple. + # The value of the function shouldn't change. + if z == self.sample_bxn: + return gaussian_pos_log_likelihood(self.mean_bxn, + self.logvar_bxn, self.noise_bxn) + + return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn) + + @property + def mean(self): + return self.mean_bxn + + @property + def logvar(self): + return self.logvar_bxn + + @property + def sample(self): + return self.sample_bxn + + +class GaussianProcess: + """Base class for Gaussian processes.""" + pass + + +class LearnableAutoRegressive1Prior(GaussianProcess): + """AR(1) model where autocorrelation and process variance are learned + parameters. Assumed zero mean. + + """ + + def __init__(self, batch_size, z_size, + autocorrelation_taus, noise_variances, + do_train_prior_ar_atau, do_train_prior_ar_nvar, + num_steps, name): + """Create a learnable autoregressive (1) process. + + Args: + batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples. + z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor. + autocorrelation_taus: The auto correlation time constant of the AR(1) + process. + A value of 0 is uncorrelated gaussian noise. + noise_variances: The variance of the additive noise, *not* the process + variance. + do_train_prior_ar_atau: Train or leave as constant, the autocorrelation? + do_train_prior_ar_nvar: Train or leave as constant, the noise variance? + num_steps: Number of steps to run the process. + name: The name to prefix to learned TF variables. + """ + + # Note the use of the plural in all of these quantities. This is intended + # to mark that even though a sample z_t from the posterior is thought of a + # single sample of a multidimensional gaussian, the prior is actually + # thought of as U AR(1) processes, where U is the dimension of the inferred + # input. + size_bx1 = tf.stack([batch_size, 1]) + size__xu = [None, z_size] + # process variance, the variance at time t over all instantiations of AR(1) + # with these parameters. + log_evar_inits_1xu = tf.expand_dims(tf.log(noise_variances), 0) + self.logevars_1xu = logevars_1xu = \ + tf.Variable(log_evar_inits_1xu, name=name+"/logevars", dtype=tf.float32, + trainable=do_train_prior_ar_nvar) + self.logevars_bxu = logevars_bxu = tf.tile(logevars_1xu, size_bx1) + logevars_bxu.set_shape(size__xu) # tile loses shape + + # \tau, which is the autocorrelation time constant of the AR(1) process + log_atau_inits_1xu = tf.expand_dims(tf.log(autocorrelation_taus), 0) + self.logataus_1xu = logataus_1xu = \ + tf.Variable(log_atau_inits_1xu, name=name+"/logatau", dtype=tf.float32, + trainable=do_train_prior_ar_atau) + + # phi in x_t = \mu + phi x_tm1 + \eps + # phi = exp(-1/tau) + # phi = exp(-1/exp(logtau)) + # phi = exp(-exp(-logtau)) + phis_1xu = tf.exp(-tf.exp(-logataus_1xu)) + self.phis_bxu = phis_bxu = tf.tile(phis_1xu, size_bx1) + phis_bxu.set_shape(size__xu) + + # process noise + # pvar = evar / (1- phi^2) + # logpvar = log ( exp(logevar) / (1 - phi^2) ) + # logpvar = logevar - log(1-phi^2) + # logpvar = logevar - (log(1-phi) + log(1+phi)) + self.logpvars_1xu = \ + logevars_1xu - tf.log(1.0-phis_1xu) - tf.log(1.0+phis_1xu) + self.logpvars_bxu = logpvars_bxu = tf.tile(self.logpvars_1xu, size_bx1) + logpvars_bxu.set_shape(size__xu) + + # process mean (zero but included in for completeness) + self.pmeans_bxu = pmeans_bxu = tf.zeros_like(phis_bxu) + + # For sampling from the prior during de-novo generation. + self.means_t = means_t = [None] * num_steps + self.logvars_t = logvars_t = [None] * num_steps + self.samples_t = samples_t = [None] * num_steps + self.gaussians_t = gaussians_t = [None] * num_steps + sample_bxu = tf.zeros_like(phis_bxu) + for t in range(num_steps): + # process variance used here to make process completely stationary + if t == 0: + logvar_pt_bxu = self.logpvars_bxu + else: + logvar_pt_bxu = self.logevars_bxu + + z_mean_pt_bxu = pmeans_bxu + phis_bxu * sample_bxu + gaussians_t[t] = DiagonalGaussian(batch_size, z_size, + mean=z_mean_pt_bxu, + logvar=logvar_pt_bxu) + sample_bxu = gaussians_t[t].sample + samples_t[t] = sample_bxu + logvars_t[t] = logvar_pt_bxu + means_t[t] = z_mean_pt_bxu + + def logp_t(self, z_t_bxu, z_tm1_bxu=None): + """Compute the log-likelihood under the distribution for a given time t, + not the whole sequence. + + Args: + z_t_bxu: sample to compute likelihood for at time t. + z_tm1_bxu (optional): sample condition probability of z_t upon. + + Returns: + The likelihood of p_t under the model at time t. i.e. + p(z_t|z_tm1_bxu) = N(z_tm1_bxu * phis, eps^2) + + """ + if z_tm1_bxu is None: + return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu, + self.logpvars_bxu) + else: + means_t_bxu = self.pmeans_bxu + self.phis_bxu * z_tm1_bxu + logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu, + means_t_bxu, + self.logevars_bxu) + return logp_tgtm1_bxu + + +class KLCost_GaussianGaussian(object): + """log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian prior. See + eqn 10 and Appendix B in VAE for latter term, + http://arxiv.org/abs/1312.6114 + + The log p(x|z) term is the reconstruction error under the model. + The KL term represents the penalty for passing information from the encoder + to the decoder. + To sample KL(q||p), we simply sample + ln q - ln p + by drawing samples from q and averaging. + """ + + def __init__(self, zs, prior_zs): + """Create a lower bound in three parts, normalized reconstruction + cost, normalized KL divergence cost, and their sum. + + E_q[ln p(z_i | z_{i+1}) / q(z_i | x) + \int q(z) ln p(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_p^2) + \ + sigma_q^2 / sigma_p^2 + (mean_p - mean_q)^2 / sigma_p^2) + + \int q(z) ln q(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_q^2) + 1) + + Args: + zs: posterior z ~ q(z|x) + prior_zs: prior zs + """ + # L = -KL + log p(x|z), to maximize bound on likelihood + # -L = KL - log p(x|z), to minimize bound on NLL + # so 'KL cost' is postive KL divergence + kl_b = 0.0 + for z, prior_z in zip(zs, prior_zs): + assert isinstance(z, Gaussian) + assert isinstance(prior_z, Gaussian) + # ln(2pi) terms cancel + kl_b += 0.5 * tf.reduce_sum( + prior_z.logvar - z.logvar + + tf.exp(z.logvar - prior_z.logvar) + + tf.square((z.mean - prior_z.mean) / tf.exp(0.5 * prior_z.logvar)) + - 1.0, [1]) + + self.kl_cost_b = kl_b + self.kl_cost = tf.reduce_mean(kl_b) + + +class KLCost_GaussianGaussianProcessSampled(object): + """ log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian process + prior via sampling. + + The log p(x|z) term is the reconstruction error under the model. + The KL term represents the penalty for passing information from the encoder + to the decoder. + To sample KL(q||p), we simply sample + ln q - ln p + by drawing samples from q and averaging. + """ + + def __init__(self, post_zs, prior_z_process): + """Create a lower bound in three parts, normalized reconstruction + cost, normalized KL divergence cost, and their sum. + + Args: + post_zs: posterior z ~ q(z|x) + prior_z_process: prior AR(1) process + """ + assert len(post_zs) > 1, "GP is for time, need more than 1 time step." + assert isinstance(prior_z_process, GaussianProcess), "Must use GP." + + # L = -KL + log p(x|z), to maximize bound on likelihood + # -L = KL - log p(x|z), to minimize bound on NLL + # so 'KL cost' is postive KL divergence + z0_bxu = post_zs[0].sample + logq_bxu = post_zs[0].logp(z0_bxu) + logp_bxu = prior_z_process.logp_t(z0_bxu) + z_tm1_bxu = z0_bxu + for z_t in post_zs[1:]: + # posterior is independent in time, prior is not + z_t_bxu = z_t.sample + logq_bxu += z_t.logp(z_t_bxu) + logp_bxu += prior_z_process.logp_t(z_t_bxu, z_tm1_bxu) + z_tm1_bxu = z_t_bxu + + kl_bxu = logq_bxu - logp_bxu + kl_b = tf.reduce_sum(kl_bxu, [1]) + self.kl_cost_b = kl_b + self.kl_cost = tf.reduce_mean(kl_b) diff --git a/models/research/lfads/lfads.py b/models/research/lfads/lfads.py new file mode 100644 index 0000000000000000000000000000000000000000..308ebabe90fbbb90701ac0585e7c1eaeaf6e3649 --- /dev/null +++ b/models/research/lfads/lfads.py @@ -0,0 +1,2170 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +""" +LFADS - Latent Factor Analysis via Dynamical Systems. + +LFADS is an unsupervised method to decompose time series data into +various factors, such as an initial condition, a generative +dynamical system, control inputs to that generator, and a low +dimensional description of the observed data, called the factors. +Additionally, the observations have a noise model (in this case +Poisson), so a denoised version of the observations is also created +(e.g. underlying rates of a Poisson distribution given the observed +event counts). + +The main data structure being passed around is a dataset. This is a dictionary +of data dictionaries. + +DATASET: The top level dictionary is simply name (string -> dictionary). +The nested dictionary is the DATA DICTIONARY, which has the following keys: + 'train_data' and 'valid_data', whose values are the corresponding training + and validation data with shape + ExTxD, E - # examples, T - # time steps, D - # dimensions in data. + The data dictionary also has a few more keys: + 'train_ext_input' and 'valid_ext_input', if there are know external inputs + to the system being modeled, these take on dimensions: + ExTxI, E - # examples, T - # time steps, I = # dimensions in input. + 'alignment_matrix_cxf' - If you are using multiple days data, it's possible + that one can align the channels (see manuscript). If so each dataset will + contain this matrix, which will be used for both the input adapter and the + output adapter for each dataset. These matrices, if provided, must be of + size [data_dim x factors] where data_dim is the number of neurons recorded + on that day, and factors is chosen and set through the '--factors' flag. + 'alignment_bias_c' - See alignment_matrix_cxf. This bias will used to + the offset for the alignment transformation. It will *subtract* off the + bias from the data, so pca style inits can align factors across sessions. + + + If one runs LFADS on data where the true rates are known for some trials, + (say simulated, testing data, as in the example shipped with the paper), then + one can add three more fields for plotting purposes. These are 'train_truth' + and 'valid_truth', and 'conversion_factor'. These have the same dimensions as + 'train_data', and 'valid_data' but represent the underlying rates of the + observations. Finally, if one needs to convert scale for plotting the true + underlying firing rates, there is the 'conversion_factor' key. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numpy as np +import os +import tensorflow as tf +from distributions import LearnableDiagonalGaussian, DiagonalGaussianFromInput +from distributions import diag_gaussian_log_likelihood +from distributions import KLCost_GaussianGaussian, Poisson +from distributions import LearnableAutoRegressive1Prior +from distributions import KLCost_GaussianGaussianProcessSampled + +from utils import init_linear, linear, list_t_bxn_to_tensor_bxtxn, write_data +from utils import log_sum_exp, flatten +from plot_lfads import plot_lfads + + +class GRU(object): + """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). + + """ + def __init__(self, num_units, forget_bias=1.0, weight_scale=1.0, + clip_value=np.inf, collections=None): + """Create a GRU object. + + Args: + num_units: Number of units in the GRU + forget_bias (optional): Hack to help learning. + weight_scale (optional): weights are scaled by ws/sqrt(#inputs), with + ws being the weight scale. + clip_value (optional): if the recurrent values grow above this value, + clip them. + collections (optional): List of additonal collections variables should + belong to. + """ + self._num_units = num_units + self._forget_bias = forget_bias + self._weight_scale = weight_scale + self._clip_value = clip_value + self._collections = collections + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + @property + def state_multiplier(self): + return 1 + + def output_from_state(self, state): + """Return the output portion of the state.""" + return state + + def __call__(self, inputs, state, scope=None): + """Gated recurrent unit (GRU) function. + + Args: + inputs: A 2D batch x input_dim tensor of inputs. + state: The previous state from the last time step. + scope (optional): TF variable scope for defined GRU variables. + + Returns: + A tuple (state, state), where state is the newly computed state at time t. + It is returned twice to respect an interface that works for LSTMs. + """ + + x = inputs + h = state + if inputs is not None: + xh = tf.concat(axis=1, values=[x, h]) + else: + xh = h + + with tf.variable_scope(scope or type(self).__name__): # "GRU" + with tf.variable_scope("Gates"): # Reset gate and update gate. + # We start with bias of 1.0 to not reset and not update. + r, u = tf.split(axis=1, num_or_size_splits=2, value=linear(xh, + 2 * self._num_units, + alpha=self._weight_scale, + name="xh_2_ru", + collections=self._collections)) + r, u = tf.sigmoid(r), tf.sigmoid(u + self._forget_bias) + with tf.variable_scope("Candidate"): + xrh = tf.concat(axis=1, values=[x, r * h]) + c = tf.tanh(linear(xrh, self._num_units, name="xrh_2_c", + collections=self._collections)) + new_h = u * h + (1 - u) * c + new_h = tf.clip_by_value(new_h, -self._clip_value, self._clip_value) + + return new_h, new_h + + +class GenGRU(object): + """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). + + This version is specialized for the generator, but isn't as fast, so + we have two. Note this allows for l2 regularization on the recurrent + weights, but also implicitly rescales the inputs via the 1/sqrt(input) + scaling in the linear helper routine to be large magnitude, if there are + fewer inputs than recurrent state. + + """ + def __init__(self, num_units, forget_bias=1.0, + input_weight_scale=1.0, rec_weight_scale=1.0, clip_value=np.inf, + input_collections=None, recurrent_collections=None): + """Create a GRU object. + + Args: + num_units: Number of units in the GRU + forget_bias (optional): Hack to help learning. + input_weight_scale (optional): weights are scaled ws/sqrt(#inputs), with + ws being the weight scale. + rec_weight_scale (optional): weights are scaled ws/sqrt(#inputs), + with ws being the weight scale. + clip_value (optional): if the recurrent values grow above this value, + clip them. + input_collections (optional): List of additonal collections variables + that input->rec weights should belong to. + recurrent_collections (optional): List of additonal collections variables + that rec->rec weights should belong to. + """ + self._num_units = num_units + self._forget_bias = forget_bias + self._input_weight_scale = input_weight_scale + self._rec_weight_scale = rec_weight_scale + self._clip_value = clip_value + self._input_collections = input_collections + self._rec_collections = recurrent_collections + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + @property + def state_multiplier(self): + return 1 + + def output_from_state(self, state): + """Return the output portion of the state.""" + return state + + def __call__(self, inputs, state, scope=None): + """Gated recurrent unit (GRU) function. + + Args: + inputs: A 2D batch x input_dim tensor of inputs. + state: The previous state from the last time step. + scope (optional): TF variable scope for defined GRU variables. + + Returns: + A tuple (state, state), where state is the newly computed state at time t. + It is returned twice to respect an interface that works for LSTMs. + """ + + x = inputs + h = state + with tf.variable_scope(scope or type(self).__name__): # "GRU" + with tf.variable_scope("Gates"): # Reset gate and update gate. + # We start with bias of 1.0 to not reset and not update. + r_x = u_x = 0.0 + if x is not None: + r_x, u_x = tf.split(axis=1, num_or_size_splits=2, value=linear(x, + 2 * self._num_units, + alpha=self._input_weight_scale, + do_bias=False, + name="x_2_ru", + normalized=False, + collections=self._input_collections)) + + r_h, u_h = tf.split(axis=1, num_or_size_splits=2, value=linear(h, + 2 * self._num_units, + do_bias=True, + alpha=self._rec_weight_scale, + name="h_2_ru", + collections=self._rec_collections)) + r = r_x + r_h + u = u_x + u_h + r, u = tf.sigmoid(r), tf.sigmoid(u + self._forget_bias) + + with tf.variable_scope("Candidate"): + c_x = 0.0 + if x is not None: + c_x = linear(x, self._num_units, name="x_2_c", do_bias=False, + alpha=self._input_weight_scale, + normalized=False, + collections=self._input_collections) + c_rh = linear(r*h, self._num_units, name="rh_2_c", do_bias=True, + alpha=self._rec_weight_scale, + collections=self._rec_collections) + c = tf.tanh(c_x + c_rh) + + new_h = u * h + (1 - u) * c + new_h = tf.clip_by_value(new_h, -self._clip_value, self._clip_value) + + return new_h, new_h + + +class LFADS(object): + """LFADS - Latent Factor Analysis via Dynamical Systems. + + LFADS is an unsupervised method to decompose time series data into + various factors, such as an initial condition, a generative + dynamical system, inferred inputs to that generator, and a low + dimensional description of the observed data, called the factors. + Additoinally, the observations have a noise model (in this case + Poisson), so a denoised version of the observations is also created + (e.g. underlying rates of a Poisson distribution given the observed + event counts). + """ + + def __init__(self, hps, kind="train", datasets=None): + """Create an LFADS model. + + train - a model for training, sampling of posteriors is used + posterior_sample_and_average - sample from the posterior, this is used + for evaluating the expected value of the outputs of LFADS, given a + specific input, by averaging over multiple samples from the approx + posterior. Also used for the lower bound on the negative + log-likelihood using IWAE error (Importance Weighed Auto-encoder). + This is the denoising operation. + prior_sample - a model for generation - sampling from priors is used + + Args: + hps: The dictionary of hyper parameters. + kind: the type of model to build (see above). + datasets: a dictionary of named data_dictionaries, see top of lfads.py + """ + print("Building graph...") + all_kinds = ['train', 'posterior_sample_and_average', 'posterior_push_mean', + 'prior_sample'] + assert kind in all_kinds, 'Wrong kind' + if hps.feedback_factors_or_rates == "rates": + assert len(hps.dataset_names) == 1, \ + "Multiple datasets not supported for rate feedback." + num_steps = hps.num_steps + ic_dim = hps.ic_dim + co_dim = hps.co_dim + ext_input_dim = hps.ext_input_dim + cell_class = GRU + gen_cell_class = GenGRU + + def makelambda(v): # Used with tf.case + return lambda: v + + # Define the data placeholder, and deal with all parts of the graph + # that are dataset dependent. + self.dataName = tf.placeholder(tf.string, shape=()) + # The batch_size to be inferred from data, as normal. + # Additionally, the data_dim will be inferred as well, allowing for a + # single placeholder for all datasets, regardless of data dimension. + if hps.output_dist == 'poisson': + # Enforce correct dtype + assert np.issubdtype( + datasets[hps.dataset_names[0]]['train_data'].dtype, int), \ + "Data dtype must be int for poisson output distribution" + data_dtype = tf.int32 + elif hps.output_dist == 'gaussian': + assert np.issubdtype( + datasets[hps.dataset_names[0]]['train_data'].dtype, float), \ + "Data dtype must be float for gaussian output dsitribution" + data_dtype = tf.float32 + else: + assert False, "NIY" + self.dataset_ph = dataset_ph = tf.placeholder(data_dtype, + [None, num_steps, None], + name="data") + self.train_step = tf.get_variable("global_step", [], tf.int64, + tf.zeros_initializer(), + trainable=False) + self.hps = hps + ndatasets = hps.ndatasets + factors_dim = hps.factors_dim + self.preds = preds = [None] * ndatasets + self.fns_in_fac_Ws = fns_in_fac_Ws = [None] * ndatasets + self.fns_in_fatcor_bs = fns_in_fac_bs = [None] * ndatasets + self.fns_out_fac_Ws = fns_out_fac_Ws = [None] * ndatasets + self.fns_out_fac_bs = fns_out_fac_bs = [None] * ndatasets + self.datasetNames = dataset_names = hps.dataset_names + self.ext_inputs = ext_inputs = None + + if len(dataset_names) == 1: # single session + if 'alignment_matrix_cxf' in datasets[dataset_names[0]].keys(): + used_in_factors_dim = factors_dim + in_identity_if_poss = False + else: + used_in_factors_dim = hps.dataset_dims[dataset_names[0]] + in_identity_if_poss = True + else: # multisession + used_in_factors_dim = factors_dim + in_identity_if_poss = False + + for d, name in enumerate(dataset_names): + data_dim = hps.dataset_dims[name] + in_mat_cxf = None + in_bias_1xf = None + align_bias_1xc = None + + if datasets and 'alignment_matrix_cxf' in datasets[name].keys(): + dataset = datasets[name] + if hps.do_train_readin: + print("Initializing trainable readin matrix with alignment matrix" \ + " provided for dataset:", name) + else: + print("Setting non-trainable readin matrix to alignment matrix" \ + " provided for dataset:", name) + in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32) + if in_mat_cxf.shape != (data_dim, factors_dim): + raise ValueError("""Alignment matrix must have dimensions %d x %d + (data_dim x factors_dim), but currently has %d x %d."""% + (data_dim, factors_dim, in_mat_cxf.shape[0], + in_mat_cxf.shape[1])) + if datasets and 'alignment_bias_c' in datasets[name].keys(): + dataset = datasets[name] + if hps.do_train_readin: + print("Initializing trainable readin bias with alignment bias " \ + "provided for dataset:", name) + else: + print("Setting non-trainable readin bias to alignment bias " \ + "provided for dataset:", name) + align_bias_c = dataset['alignment_bias_c'].astype(np.float32) + align_bias_1xc = np.expand_dims(align_bias_c, axis=0) + if align_bias_1xc.shape[1] != data_dim: + raise ValueError("""Alignment bias must have dimensions %d + (data_dim), but currently has %d."""% + (data_dim, in_mat_cxf.shape[0])) + if in_mat_cxf is not None and align_bias_1xc is not None: + # (data - alignment_bias) * W_in + # data * W_in - alignment_bias * W_in + # So b = -alignment_bias * W_in to accommodate PCA style offset. + in_bias_1xf = -np.dot(align_bias_1xc, in_mat_cxf) + + if hps.do_train_readin: + # only add to IO transformations collection only if we want it to be + # learnable, because IO_transformations collection will be trained + # when do_train_io_only + collections_readin=['IO_transformations'] + else: + collections_readin=None + + in_fac_lin = init_linear(data_dim, used_in_factors_dim, + do_bias=True, + mat_init_value=in_mat_cxf, + bias_init_value=in_bias_1xf, + identity_if_possible=in_identity_if_poss, + normalized=False, name="x_2_infac_"+name, + collections=collections_readin, + trainable=hps.do_train_readin) + in_fac_W, in_fac_b = in_fac_lin + fns_in_fac_Ws[d] = makelambda(in_fac_W) + fns_in_fac_bs[d] = makelambda(in_fac_b) + + with tf.variable_scope("glm"): + out_identity_if_poss = False + if len(dataset_names) == 1 and \ + factors_dim == hps.dataset_dims[dataset_names[0]]: + out_identity_if_poss = True + for d, name in enumerate(dataset_names): + data_dim = hps.dataset_dims[name] + in_mat_cxf = None + if datasets and 'alignment_matrix_cxf' in datasets[name].keys(): + dataset = datasets[name] + in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32) + + if datasets and 'alignment_bias_c' in datasets[name].keys(): + dataset = datasets[name] + align_bias_c = dataset['alignment_bias_c'].astype(np.float32) + align_bias_1xc = np.expand_dims(align_bias_c, axis=0) + + out_mat_fxc = None + out_bias_1xc = None + if in_mat_cxf is not None: + out_mat_fxc = in_mat_cxf.T + if align_bias_1xc is not None: + out_bias_1xc = align_bias_1xc + + if hps.output_dist == 'poisson': + out_fac_lin = init_linear(factors_dim, data_dim, do_bias=True, + mat_init_value=out_mat_fxc, + bias_init_value=out_bias_1xc, + identity_if_possible=out_identity_if_poss, + normalized=False, + name="fac_2_logrates_"+name, + collections=['IO_transformations']) + out_fac_W, out_fac_b = out_fac_lin + + elif hps.output_dist == 'gaussian': + out_fac_lin_mean = \ + init_linear(factors_dim, data_dim, do_bias=True, + mat_init_value=out_mat_fxc, + bias_init_value=out_bias_1xc, + normalized=False, + name="fac_2_means_"+name, + collections=['IO_transformations']) + out_fac_W_mean, out_fac_b_mean = out_fac_lin_mean + + mat_init_value = np.zeros([factors_dim, data_dim]).astype(np.float32) + bias_init_value = np.ones([1, data_dim]).astype(np.float32) + out_fac_lin_logvar = \ + init_linear(factors_dim, data_dim, do_bias=True, + mat_init_value=mat_init_value, + bias_init_value=bias_init_value, + normalized=False, + name="fac_2_logvars_"+name, + collections=['IO_transformations']) + out_fac_W_mean, out_fac_b_mean = out_fac_lin_mean + out_fac_W_logvar, out_fac_b_logvar = out_fac_lin_logvar + out_fac_W = tf.concat( + axis=1, values=[out_fac_W_mean, out_fac_W_logvar]) + out_fac_b = tf.concat( + axis=1, values=[out_fac_b_mean, out_fac_b_logvar]) + else: + assert False, "NIY" + + preds[d] = tf.equal(tf.constant(name), self.dataName) + data_dim = hps.dataset_dims[name] + fns_out_fac_Ws[d] = makelambda(out_fac_W) + fns_out_fac_bs[d] = makelambda(out_fac_b) + + pf_pairs_in_fac_Ws = zip(preds, fns_in_fac_Ws) + pf_pairs_in_fac_bs = zip(preds, fns_in_fac_bs) + pf_pairs_out_fac_Ws = zip(preds, fns_out_fac_Ws) + pf_pairs_out_fac_bs = zip(preds, fns_out_fac_bs) + + this_in_fac_W = tf.case(pf_pairs_in_fac_Ws, exclusive=True) + this_in_fac_b = tf.case(pf_pairs_in_fac_bs, exclusive=True) + this_out_fac_W = tf.case(pf_pairs_out_fac_Ws, exclusive=True) + this_out_fac_b = tf.case(pf_pairs_out_fac_bs, exclusive=True) + + # External inputs (not changing by dataset, by definition). + if hps.ext_input_dim > 0: + self.ext_input = tf.placeholder(tf.float32, + [None, num_steps, ext_input_dim], + name="ext_input") + else: + self.ext_input = None + ext_input_bxtxi = self.ext_input + + self.keep_prob = keep_prob = tf.placeholder(tf.float32, [], "keep_prob") + self.batch_size = batch_size = int(hps.batch_size) + self.learning_rate = tf.Variable(float(hps.learning_rate_init), + trainable=False, name="learning_rate") + self.learning_rate_decay_op = self.learning_rate.assign( + self.learning_rate * hps.learning_rate_decay_factor) + + # Dropout the data. + dataset_do_bxtxd = tf.nn.dropout(tf.to_float(dataset_ph), keep_prob) + if hps.ext_input_dim > 0: + ext_input_do_bxtxi = tf.nn.dropout(ext_input_bxtxi, keep_prob) + else: + ext_input_do_bxtxi = None + + # ENCODERS + def encode_data(dataset_bxtxd, enc_cell, name, forward_or_reverse, + num_steps_to_encode): + """Encode data for LFADS + Args: + dataset_bxtxd - the data to encode, as a 3 tensor, with dims + time x batch x data dims. + enc_cell: encoder cell + name: name of encoder + forward_or_reverse: string, encode in forward or reverse direction + num_steps_to_encode: number of steps to encode, 0:num_steps_to_encode + Returns: + encoded data as a list with num_steps_to_encode items, in order + """ + if forward_or_reverse == "forward": + dstr = "_fwd" + time_fwd_or_rev = range(num_steps_to_encode) + else: + dstr = "_rev" + time_fwd_or_rev = reversed(range(num_steps_to_encode)) + + with tf.variable_scope(name+"_enc"+dstr, reuse=False): + enc_state = tf.tile( + tf.Variable(tf.zeros([1, enc_cell.state_size]), + name=name+"_enc_t0"+dstr), tf.stack([batch_size, 1])) + enc_state.set_shape([None, enc_cell.state_size]) # tile loses shape + + enc_outs = [None] * num_steps_to_encode + for i, t in enumerate(time_fwd_or_rev): + with tf.variable_scope(name+"_enc"+dstr, reuse=True if i > 0 else None): + dataset_t_bxd = dataset_bxtxd[:,t,:] + in_fac_t_bxf = tf.matmul(dataset_t_bxd, this_in_fac_W) + this_in_fac_b + in_fac_t_bxf.set_shape([None, used_in_factors_dim]) + if ext_input_dim > 0 and not hps.inject_ext_input_to_gen: + ext_input_t_bxi = ext_input_do_bxtxi[:,t,:] + enc_input_t_bxfpe = tf.concat( + axis=1, values=[in_fac_t_bxf, ext_input_t_bxi]) + else: + enc_input_t_bxfpe = in_fac_t_bxf + enc_out, enc_state = enc_cell(enc_input_t_bxfpe, enc_state) + enc_outs[t] = enc_out + + return enc_outs + + # Encode initial condition means and variances + # ([x_T, x_T-1, ... x_0] and [x_0, x_1, ... x_T] -> g0/c0) + self.ic_enc_fwd = [None] * num_steps + self.ic_enc_rev = [None] * num_steps + if ic_dim > 0: + enc_ic_cell = cell_class(hps.ic_enc_dim, + weight_scale=hps.cell_weight_scale, + clip_value=hps.cell_clip_value) + ic_enc_fwd = encode_data(dataset_do_bxtxd, enc_ic_cell, + "ic", "forward", + hps.num_steps_for_gen_ic) + ic_enc_rev = encode_data(dataset_do_bxtxd, enc_ic_cell, + "ic", "reverse", + hps.num_steps_for_gen_ic) + self.ic_enc_fwd = ic_enc_fwd + self.ic_enc_rev = ic_enc_rev + + # Encoder control input means and variances, bi-directional encoding so: + # ([x_T, x_T-1, ..., x_0] and [x_0, x_1 ... x_T] -> u_t) + self.ci_enc_fwd = [None] * num_steps + self.ci_enc_rev = [None] * num_steps + if co_dim > 0: + enc_ci_cell = cell_class(hps.ci_enc_dim, + weight_scale=hps.cell_weight_scale, + clip_value=hps.cell_clip_value) + ci_enc_fwd = encode_data(dataset_do_bxtxd, enc_ci_cell, + "ci", "forward", + hps.num_steps) + if hps.do_causal_controller: + ci_enc_rev = None + else: + ci_enc_rev = encode_data(dataset_do_bxtxd, enc_ci_cell, + "ci", "reverse", + hps.num_steps) + self.ci_enc_fwd = ci_enc_fwd + self.ci_enc_rev = ci_enc_rev + + # STOCHASTIC LATENT VARIABLES, priors and posteriors + # (initial conditions g0, and control inputs, u_t) + # Note that zs represent all the stochastic latent variables. + with tf.variable_scope("z", reuse=False): + self.prior_zs_g0 = None + self.posterior_zs_g0 = None + self.g0s_val = None + if ic_dim > 0: + self.prior_zs_g0 = \ + LearnableDiagonalGaussian(batch_size, ic_dim, name="prior_g0", + mean_init=0.0, + var_min=hps.ic_prior_var_min, + var_init=hps.ic_prior_var_scale, + var_max=hps.ic_prior_var_max) + ic_enc = tf.concat(axis=1, values=[ic_enc_fwd[-1], ic_enc_rev[0]]) + ic_enc = tf.nn.dropout(ic_enc, keep_prob) + self.posterior_zs_g0 = \ + DiagonalGaussianFromInput(ic_enc, ic_dim, "ic_enc_2_post_g0", + var_min=hps.ic_post_var_min) + if kind in ["train", "posterior_sample_and_average", + "posterior_push_mean"]: + zs_g0 = self.posterior_zs_g0 + else: + zs_g0 = self.prior_zs_g0 + if kind in ["train", "posterior_sample_and_average", "prior_sample"]: + self.g0s_val = zs_g0.sample + else: + self.g0s_val = zs_g0.mean + + # Priors for controller, 'co' for controller output + self.prior_zs_co = prior_zs_co = [None] * num_steps + self.posterior_zs_co = posterior_zs_co = [None] * num_steps + self.zs_co = zs_co = [None] * num_steps + self.prior_zs_ar_con = None + if co_dim > 0: + # Controller outputs + autocorrelation_taus = [hps.prior_ar_atau for x in range(hps.co_dim)] + noise_variances = [hps.prior_ar_nvar for x in range(hps.co_dim)] + self.prior_zs_ar_con = prior_zs_ar_con = \ + LearnableAutoRegressive1Prior(batch_size, hps.co_dim, + autocorrelation_taus, + noise_variances, + hps.do_train_prior_ar_atau, + hps.do_train_prior_ar_nvar, + num_steps, "u_prior_ar1") + + # CONTROLLER -> GENERATOR -> RATES + # (u(t) -> gen(t) -> factors(t) -> rates(t) -> p(x_t|z_t) ) + self.controller_outputs = u_t = [None] * num_steps + self.con_ics = con_state = None + self.con_states = con_states = [None] * num_steps + self.con_outs = con_outs = [None] * num_steps + self.gen_inputs = gen_inputs = [None] * num_steps + if co_dim > 0: + # gen_cell_class here for l2 penalty recurrent weights + # didn't split the cell_weight scale here, because I doubt it matters + con_cell = gen_cell_class(hps.con_dim, + input_weight_scale=hps.cell_weight_scale, + rec_weight_scale=hps.cell_weight_scale, + clip_value=hps.cell_clip_value, + recurrent_collections=['l2_con_reg']) + with tf.variable_scope("con", reuse=False): + self.con_ics = tf.tile( + tf.Variable(tf.zeros([1, hps.con_dim*con_cell.state_multiplier]), + name="c0"), + tf.stack([batch_size, 1])) + self.con_ics.set_shape([None, con_cell.state_size]) # tile loses shape + con_states[-1] = self.con_ics + + gen_cell = gen_cell_class(hps.gen_dim, + input_weight_scale=hps.gen_cell_input_weight_scale, + rec_weight_scale=hps.gen_cell_rec_weight_scale, + clip_value=hps.cell_clip_value, + recurrent_collections=['l2_gen_reg']) + with tf.variable_scope("gen", reuse=False): + if ic_dim == 0: + self.gen_ics = tf.tile( + tf.Variable(tf.zeros([1, gen_cell.state_size]), name="g0"), + tf.stack([batch_size, 1])) + else: + self.gen_ics = linear(self.g0s_val, gen_cell.state_size, + identity_if_possible=True, + name="g0_2_gen_ic") + + self.gen_states = gen_states = [None] * num_steps + self.gen_outs = gen_outs = [None] * num_steps + gen_states[-1] = self.gen_ics + gen_outs[-1] = gen_cell.output_from_state(gen_states[-1]) + self.factors = factors = [None] * num_steps + factors[-1] = linear(gen_outs[-1], factors_dim, do_bias=False, + normalized=True, name="gen_2_fac") + + self.rates = rates = [None] * num_steps + # rates[-1] is collected to potentially feed back to controller + with tf.variable_scope("glm", reuse=False): + if hps.output_dist == 'poisson': + log_rates_t0 = tf.matmul(factors[-1], this_out_fac_W) + this_out_fac_b + log_rates_t0.set_shape([None, None]) + rates[-1] = tf.exp(log_rates_t0) # rate + rates[-1].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]]) + elif hps.output_dist == 'gaussian': + mean_n_logvars = tf.matmul(factors[-1],this_out_fac_W) + this_out_fac_b + mean_n_logvars.set_shape([None, None]) + means_t_bxd, logvars_t_bxd = tf.split(axis=1, num_or_size_splits=2, + value=mean_n_logvars) + rates[-1] = means_t_bxd + else: + assert False, "NIY" + + # We support multiple output distributions, for example Poisson, and also + # Gaussian. In these two cases respectively, there are one and two + # parameters (rates vs. mean and variance). So the output_dist_params + # tensor will variable sizes via tf.concat and tf.split, along the 1st + # dimension. So in the case of gaussian, for example, it'll be + # batch x (D+D), where each D dims is the mean, and then variances, + # respectively. For a distribution with 3 parameters, it would be + # batch x (D+D+D). + self.output_dist_params = dist_params = [None] * num_steps + self.log_p_xgz_b = log_p_xgz_b = 0.0 # log P(x|z) + for t in range(num_steps): + # Controller + if co_dim > 0: + # Build inputs for controller + tlag = t - hps.controller_input_lag + if tlag < 0: + con_in_f_t = tf.zeros_like(ci_enc_fwd[0]) + else: + con_in_f_t = ci_enc_fwd[tlag] + if hps.do_causal_controller: + # If controller is causal (wrt to data generation process), then it + # cannot see future data. Thus, excluding ci_enc_rev[t] is obvious. + # Less obvious is the need to exclude factors[t-1]. This arises + # because information flows from g0 through factors to the controller + # input. The g0 encoding is backwards, so we must necessarily exclude + # the factors in order to keep the controller input purely from a + # forward encoding (however unlikely it is that + # g0->factors->controller channel might actually be used in this way). + con_in_list_t = [con_in_f_t] + else: + tlag_rev = t + hps.controller_input_lag + if tlag_rev >= num_steps: + # better than zeros + con_in_r_t = tf.zeros_like(ci_enc_rev[0]) + else: + con_in_r_t = ci_enc_rev[tlag_rev] + con_in_list_t = [con_in_f_t, con_in_r_t] + + if hps.do_feed_factors_to_controller: + if hps.feedback_factors_or_rates == "factors": + con_in_list_t.append(factors[t-1]) + elif hps.feedback_factors_or_rates == "rates": + con_in_list_t.append(rates[t-1]) + else: + assert False, "NIY" + + con_in_t = tf.concat(axis=1, values=con_in_list_t) + con_in_t = tf.nn.dropout(con_in_t, keep_prob) + with tf.variable_scope("con", reuse=True if t > 0 else None): + con_outs[t], con_states[t] = con_cell(con_in_t, con_states[t-1]) + posterior_zs_co[t] = \ + DiagonalGaussianFromInput(con_outs[t], co_dim, + name="con_to_post_co") + if kind == "train": + u_t[t] = posterior_zs_co[t].sample + elif kind == "posterior_sample_and_average": + u_t[t] = posterior_zs_co[t].sample + elif kind == "posterior_push_mean": + u_t[t] = posterior_zs_co[t].mean + else: + u_t[t] = prior_zs_ar_con.samples_t[t] + + # Inputs to the generator (controller output + external input) + if ext_input_dim > 0 and hps.inject_ext_input_to_gen: + ext_input_t_bxi = ext_input_do_bxtxi[:,t,:] + if co_dim > 0: + gen_inputs[t] = tf.concat(axis=1, values=[u_t[t], ext_input_t_bxi]) + else: + gen_inputs[t] = ext_input_t_bxi + else: + gen_inputs[t] = u_t[t] + + # Generator + data_t_bxd = dataset_ph[:,t,:] + with tf.variable_scope("gen", reuse=True if t > 0 else None): + gen_outs[t], gen_states[t] = gen_cell(gen_inputs[t], gen_states[t-1]) + gen_outs[t] = tf.nn.dropout(gen_outs[t], keep_prob) + with tf.variable_scope("gen", reuse=True): # ic defined it above + factors[t] = linear(gen_outs[t], factors_dim, do_bias=False, + normalized=True, name="gen_2_fac") + with tf.variable_scope("glm", reuse=True if t > 0 else None): + if hps.output_dist == 'poisson': + log_rates_t = tf.matmul(factors[t], this_out_fac_W) + this_out_fac_b + log_rates_t.set_shape([None, None]) + rates[t] = dist_params[t] = tf.exp(tf.clip_by_value(log_rates_t, -hps._clip_value, hps._clip_value)) # rates feed back + rates[t].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]]) + loglikelihood_t = Poisson(log_rates_t).logp(data_t_bxd) + + elif hps.output_dist == 'gaussian': + mean_n_logvars = tf.matmul(factors[t],this_out_fac_W) + this_out_fac_b + mean_n_logvars.set_shape([None, None]) + means_t_bxd, logvars_t_bxd = tf.split(axis=1, num_or_size_splits=2, + value=mean_n_logvars) + rates[t] = means_t_bxd # rates feed back to controller + dist_params[t] = tf.concat( + axis=1, values=[means_t_bxd, tf.exp(tf.clip_by_value(logvars_t_bxd, -hps._clip_value, hps._clip_value))]) + loglikelihood_t = \ + diag_gaussian_log_likelihood(data_t_bxd, + means_t_bxd, logvars_t_bxd) + else: + assert False, "NIY" + + log_p_xgz_b += tf.reduce_sum(loglikelihood_t, [1]) + + # Correlation of inferred inputs cost. + self.corr_cost = tf.constant(0.0) + if hps.co_mean_corr_scale > 0.0: + all_sum_corr = [] + for i in range(hps.co_dim): + for j in range(i+1, hps.co_dim): + sum_corr_ij = tf.constant(0.0) + for t in range(num_steps): + u_mean_t = posterior_zs_co[t].mean + sum_corr_ij += u_mean_t[:,i]*u_mean_t[:,j] + all_sum_corr.append(0.5 * tf.square(sum_corr_ij)) + self.corr_cost = tf.reduce_mean(all_sum_corr) # div by batch and by n*(n-1)/2 pairs + + # Variational Lower Bound on posterior, p(z|x), plus reconstruction cost. + # KL and reconstruction costs are normalized only by batch size, not by + # dimension, or by time steps. + kl_cost_g0_b = tf.zeros_like(batch_size, dtype=tf.float32) + kl_cost_co_b = tf.zeros_like(batch_size, dtype=tf.float32) + self.kl_cost = tf.constant(0.0) # VAE KL cost + self.recon_cost = tf.constant(0.0) # VAE reconstruction cost + self.nll_bound_vae = tf.constant(0.0) + self.nll_bound_iwae = tf.constant(0.0) # for eval with IWAE cost. + if kind in ["train", "posterior_sample_and_average", "posterior_push_mean"]: + kl_cost_g0_b = 0.0 + kl_cost_co_b = 0.0 + if ic_dim > 0: + g0_priors = [self.prior_zs_g0] + g0_posts = [self.posterior_zs_g0] + kl_cost_g0_b = KLCost_GaussianGaussian(g0_posts, g0_priors).kl_cost_b + kl_cost_g0_b = hps.kl_ic_weight * kl_cost_g0_b + if co_dim > 0: + kl_cost_co_b = \ + KLCost_GaussianGaussianProcessSampled( + posterior_zs_co, prior_zs_ar_con).kl_cost_b + kl_cost_co_b = hps.kl_co_weight * kl_cost_co_b + + # L = -KL + log p(x|z), to maximize bound on likelihood + # -L = KL - log p(x|z), to minimize bound on NLL + # so 'reconstruction cost' is negative log likelihood + self.recon_cost = - tf.reduce_mean(log_p_xgz_b) + self.kl_cost = tf.reduce_mean(kl_cost_g0_b + kl_cost_co_b) + + lb_on_ll_b = log_p_xgz_b - kl_cost_g0_b - kl_cost_co_b + + # VAE error averages outside the log + self.nll_bound_vae = -tf.reduce_mean(lb_on_ll_b) + + # IWAE error averages inside the log + k = tf.cast(tf.shape(log_p_xgz_b)[0], tf.float32) + iwae_lb_on_ll = -tf.log(k) + log_sum_exp(lb_on_ll_b) + self.nll_bound_iwae = -iwae_lb_on_ll + + # L2 regularization on the generator, normalized by number of parameters. + self.l2_cost = tf.constant(0.0) + if self.hps.l2_gen_scale > 0.0 or self.hps.l2_con_scale > 0.0: + l2_costs = [] + l2_numels = [] + l2_reg_var_lists = [tf.get_collection('l2_gen_reg'), + tf.get_collection('l2_con_reg')] + l2_reg_scales = [self.hps.l2_gen_scale, self.hps.l2_con_scale] + for l2_reg_vars, l2_scale in zip(l2_reg_var_lists, l2_reg_scales): + for v in l2_reg_vars: + numel = tf.reduce_prod(tf.concat(axis=0, values=tf.shape(v))) + numel_f = tf.cast(numel, tf.float32) + l2_numels.append(numel_f) + v_l2 = tf.reduce_sum(v*v) + l2_costs.append(0.5 * l2_scale * v_l2) + self.l2_cost = tf.add_n(l2_costs) / tf.add_n(l2_numels) + + # Compute the cost for training, part of the graph regardless. + # The KL cost can be problematic at the beginning of optimization, + # so we allow an exponential increase in weighting the KL from 0 + # to 1. + self.kl_decay_step = tf.maximum(self.train_step - hps.kl_start_step, 0) + self.l2_decay_step = tf.maximum(self.train_step - hps.l2_start_step, 0) + kl_decay_step_f = tf.cast(self.kl_decay_step, tf.float32) + l2_decay_step_f = tf.cast(self.l2_decay_step, tf.float32) + kl_increase_steps_f = tf.cast(hps.kl_increase_steps, tf.float32) + l2_increase_steps_f = tf.cast(hps.l2_increase_steps, tf.float32) + self.kl_weight = kl_weight = \ + tf.minimum(kl_decay_step_f / kl_increase_steps_f, 1.0) + self.l2_weight = l2_weight = \ + tf.minimum(l2_decay_step_f / l2_increase_steps_f, 1.0) + + self.timed_kl_cost = kl_weight * self.kl_cost + self.timed_l2_cost = l2_weight * self.l2_cost + self.weight_corr_cost = hps.co_mean_corr_scale * self.corr_cost + self.cost = self.recon_cost + self.timed_kl_cost + \ + self.timed_l2_cost + self.weight_corr_cost + + if kind != "train": + # save every so often + self.seso_saver = tf.train.Saver(tf.global_variables(), + max_to_keep=hps.max_ckpt_to_keep) + # lowest validation error + self.lve_saver = tf.train.Saver(tf.global_variables(), + max_to_keep=hps.max_ckpt_to_keep_lve) + + return + + # OPTIMIZATION + # train the io matrices only + if self.hps.do_train_io_only: + self.train_vars = tvars = \ + tf.get_collection('IO_transformations', + scope=tf.get_variable_scope().name) + # train the encoder only + elif self.hps.do_train_encoder_only: + tvars1 = \ + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, + scope='LFADS/ic_enc_*') + tvars2 = \ + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, + scope='LFADS/z/ic_enc_*') + + self.train_vars = tvars = tvars1 + tvars2 + # train all variables + else: + self.train_vars = tvars = \ + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, + scope=tf.get_variable_scope().name) + print("done.") + print("Model Variables (to be optimized): ") + total_params = 0 + for i in range(len(tvars)): + shape = tvars[i].get_shape().as_list() + print(" ", i, tvars[i].name, shape) + total_params += np.prod(shape) + print("Total model parameters: ", total_params) + + grads = tf.gradients(self.cost, tvars) + grads, grad_global_norm = tf.clip_by_global_norm(grads, hps.max_grad_norm) + opt = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9, beta2=0.999, + epsilon=1e-01) + self.grads = grads + self.grad_global_norm = grad_global_norm + self.train_op = opt.apply_gradients( + zip(grads, tvars), global_step=self.train_step) + + self.seso_saver = tf.train.Saver(tf.global_variables(), + max_to_keep=hps.max_ckpt_to_keep) + + # lowest validation error + self.lve_saver = tf.train.Saver(tf.global_variables(), + max_to_keep=hps.max_ckpt_to_keep) + + # SUMMARIES, used only during training. + # example summary + self.example_image = tf.placeholder(tf.float32, shape=[1,None,None,3], + name='image_tensor') + self.example_summ = tf.summary.image("LFADS example", self.example_image, + collections=["example_summaries"]) + + # general training summaries + self.lr_summ = tf.summary.scalar("Learning rate", self.learning_rate) + self.kl_weight_summ = tf.summary.scalar("KL weight", self.kl_weight) + self.l2_weight_summ = tf.summary.scalar("L2 weight", self.l2_weight) + self.corr_cost_summ = tf.summary.scalar("Corr cost", self.weight_corr_cost) + self.grad_global_norm_summ = tf.summary.scalar("Gradient global norm", + self.grad_global_norm) + if hps.co_dim > 0: + self.atau_summ = [None] * hps.co_dim + self.pvar_summ = [None] * hps.co_dim + for c in range(hps.co_dim): + self.atau_summ[c] = \ + tf.summary.scalar("AR Autocorrelation taus " + str(c), + tf.exp(self.prior_zs_ar_con.logataus_1xu[0,c])) + self.pvar_summ[c] = \ + tf.summary.scalar("AR Variances " + str(c), + tf.exp(self.prior_zs_ar_con.logpvars_1xu[0,c])) + + # cost summaries, separated into different collections for + # training vs validation. We make placeholders for these, because + # even though the graph computes these costs on a per-batch basis, + # we want to report the more reliable metric of per-epoch cost. + kl_cost_ph = tf.placeholder(tf.float32, shape=[], name='kl_cost_ph') + self.kl_t_cost_summ = tf.summary.scalar("KL cost (train)", kl_cost_ph, + collections=["train_summaries"]) + self.kl_v_cost_summ = tf.summary.scalar("KL cost (valid)", kl_cost_ph, + collections=["valid_summaries"]) + l2_cost_ph = tf.placeholder(tf.float32, shape=[], name='l2_cost_ph') + self.l2_cost_summ = tf.summary.scalar("L2 cost", l2_cost_ph, + collections=["train_summaries"]) + + recon_cost_ph = tf.placeholder(tf.float32, shape=[], name='recon_cost_ph') + self.recon_t_cost_summ = tf.summary.scalar("Reconstruction cost (train)", + recon_cost_ph, + collections=["train_summaries"]) + self.recon_v_cost_summ = tf.summary.scalar("Reconstruction cost (valid)", + recon_cost_ph, + collections=["valid_summaries"]) + + total_cost_ph = tf.placeholder(tf.float32, shape=[], name='total_cost_ph') + self.cost_t_summ = tf.summary.scalar("Total cost (train)", total_cost_ph, + collections=["train_summaries"]) + self.cost_v_summ = tf.summary.scalar("Total cost (valid)", total_cost_ph, + collections=["valid_summaries"]) + + self.kl_cost_ph = kl_cost_ph + self.l2_cost_ph = l2_cost_ph + self.recon_cost_ph = recon_cost_ph + self.total_cost_ph = total_cost_ph + + # Merged summaries, for easy coding later. + self.merged_examples = tf.summary.merge_all(key="example_summaries") + self.merged_generic = tf.summary.merge_all() # default key is 'summaries' + self.merged_train = tf.summary.merge_all(key="train_summaries") + self.merged_valid = tf.summary.merge_all(key="valid_summaries") + + session = tf.get_default_session() + self.logfile = os.path.join(hps.lfads_save_dir, "lfads_log") + self.writer = tf.summary.FileWriter(self.logfile) + + def build_feed_dict(self, train_name, data_bxtxd, ext_input_bxtxi=None, + keep_prob=None): + """Build the feed dictionary, handles cases where there is no value defined. + + Args: + train_name: The key into the datasets, to set the tf.case statement for + the proper readin / readout matrices. + data_bxtxd: The data tensor + ext_input_bxtxi (optional): The external input tensor + keep_prob: The drop out keep probability. + + Returns: + The feed dictionary with TF tensors as keys and data as values, for use + with tf.Session.run() + + """ + feed_dict = {} + B, T, _ = data_bxtxd.shape + feed_dict[self.dataName] = train_name + feed_dict[self.dataset_ph] = data_bxtxd + + if self.ext_input is not None and ext_input_bxtxi is not None: + feed_dict[self.ext_input] = ext_input_bxtxi + + if keep_prob is None: + feed_dict[self.keep_prob] = self.hps.keep_prob + else: + feed_dict[self.keep_prob] = keep_prob + + return feed_dict + + @staticmethod + def get_batch(data_extxd, ext_input_extxi=None, batch_size=None, + example_idxs=None): + """Get a batch of data, either randomly chosen, or specified directly. + + Args: + data_extxd: The data to model, numpy tensors with shape: + # examples x # time steps x # dimensions + ext_input_extxi (optional): The external inputs, numpy tensor with shape: + # examples x # time steps x # external input dimensions + batch_size: The size of the batch to return + example_idxs (optional): The example indices used to select examples. + + Returns: + A tuple with two parts: + 1. Batched data numpy tensor with shape: + batch_size x # time steps x # dimensions + 2. Batched external input numpy tensor with shape: + batch_size x # time steps x # external input dims + """ + assert batch_size is not None or example_idxs is not None, "Problems" + E, T, D = data_extxd.shape + if example_idxs is None: + example_idxs = np.random.choice(E, batch_size) + + ext_input_bxtxi = None + if ext_input_extxi is not None: + ext_input_bxtxi = ext_input_extxi[example_idxs,:,:] + + return data_extxd[example_idxs,:,:], ext_input_bxtxi + + @staticmethod + def example_idxs_mod_batch_size(nexamples, batch_size): + """Given a number of examples, E, and a batch_size, B, generate indices + [0, 1, 2, ... B-1; + [B, B+1, ... 2*B-1; + ... + ] + returning those indices as a 2-dim tensor shaped like E/B x B. Note that + shape is only correct if E % B == 0. If not, then an extra row is generated + so that the remainder of examples is included. The extra examples are + explicitly to to the zero index (see randomize_example_idxs_mod_batch_size) + for randomized behavior. + + Args: + nexamples: The number of examples to batch up. + batch_size: The size of the batch. + Returns: + 2-dim tensor as described above. + """ + bmrem = batch_size - (nexamples % batch_size) + bmrem_examples = [] + if bmrem < batch_size: + #bmrem_examples = np.zeros(bmrem, dtype=np.int32) + ridxs = np.random.permutation(nexamples)[0:bmrem].astype(np.int32) + bmrem_examples = np.sort(ridxs) + example_idxs = range(nexamples) + list(bmrem_examples) + example_idxs_e_x_edivb = np.reshape(example_idxs, [-1, batch_size]) + return example_idxs_e_x_edivb, bmrem + + @staticmethod + def randomize_example_idxs_mod_batch_size(nexamples, batch_size): + """Indices 1:nexamples, randomized, in 2D form of + shape = (nexamples / batch_size) x batch_size. The remainder + is managed by drawing randomly from 1:nexamples. + + Args: + nexamples: number of examples to randomize + batch_size: number of elements in batch + + Returns: + The randomized, properly shaped indicies. + """ + assert nexamples > batch_size, "Problems" + bmrem = batch_size - nexamples % batch_size + bmrem_examples = [] + if bmrem < batch_size: + bmrem_examples = np.random.choice(range(nexamples), + size=bmrem, replace=False) + example_idxs = range(nexamples) + list(bmrem_examples) + mixed_example_idxs = np.random.permutation(example_idxs) + example_idxs_e_x_edivb = np.reshape(mixed_example_idxs, [-1, batch_size]) + return example_idxs_e_x_edivb, bmrem + + def shuffle_spikes_in_time(self, data_bxtxd): + """Shuffle the spikes in the temporal dimension. This is useful to + help the LFADS system avoid overfitting to individual spikes or fast + oscillations found in the data that are irrelevant to behavior. A + pure 'tabula rasa' approach would avoid this, but LFADS is sensitive + enough to pick up dynamics that you may not want. + + Args: + data_bxtxd: numpy array of spike count data to be shuffled. + Returns: + S_bxtxd, a numpy array with the same dimensions and contents as + data_bxtxd, but shuffled appropriately. + + """ + + B, T, N = data_bxtxd.shape + w = self.hps.temporal_spike_jitter_width + + if w == 0: + return data_bxtxd + + max_counts = np.max(data_bxtxd) + S_bxtxd = np.zeros([B,T,N]) + + # Intuitively, shuffle spike occurances, 0 or 1, but since we have counts, + # Do it over and over again up to the max count. + for mc in range(1,max_counts+1): + idxs = np.nonzero(data_bxtxd >= mc) + + data_ones = np.zeros_like(data_bxtxd) + data_ones[data_bxtxd >= mc] = 1 + + nfound = len(idxs[0]) + shuffles_incrs_in_time = np.random.randint(-w, w, size=nfound) + + shuffle_tidxs = idxs[1].copy() + shuffle_tidxs += shuffles_incrs_in_time + + # Reflect on the boundaries to not lose mass. + shuffle_tidxs[shuffle_tidxs < 0] = -shuffle_tidxs[shuffle_tidxs < 0] + shuffle_tidxs[shuffle_tidxs > T-1] = \ + (T-1)-(shuffle_tidxs[shuffle_tidxs > T-1] -(T-1)) + + for iii in zip(idxs[0], shuffle_tidxs, idxs[2]): + S_bxtxd[iii] += 1 + + return S_bxtxd + + def shuffle_and_flatten_datasets(self, datasets, kind='train'): + """Since LFADS supports multiple datasets in the same dynamical model, + we have to be careful to use all the data in a single training epoch. But + since the datasets my have different data dimensionality, we cannot batch + examples from data dictionaries together. Instead, we generate random + batches within each data dictionary, and then randomize these batches + while holding onto the dataname, so that when it's time to feed + the graph, the correct in/out matrices can be selected, per batch. + + Args: + datasets: A dict of data dicts. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + kind: 'train' or 'valid' + + Returns: + A flat list, in which each element is a pair ('name', indices). + """ + batch_size = self.hps.batch_size + ndatasets = len(datasets) + random_example_idxs = {} + epoch_idxs = {} + all_name_example_idx_pairs = [] + kind_data = kind + '_data' + for name, data_dict in datasets.items(): + nexamples, ntime, data_dim = data_dict[kind_data].shape + epoch_idxs[name] = 0 + random_example_idxs, _ = \ + self.randomize_example_idxs_mod_batch_size(nexamples, batch_size) + + epoch_size = random_example_idxs.shape[0] + names = [name] * epoch_size + all_name_example_idx_pairs += zip(names, random_example_idxs) + + np.random.shuffle(all_name_example_idx_pairs) # shuffle in place + + return all_name_example_idx_pairs + + def train_epoch(self, datasets, batch_size=None, do_save_ckpt=True): + """Train the model through the entire dataset once. + + Args: + datasets: A dict of data dicts. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + batch_size (optional): The batch_size to use + do_save_ckpt (optional): Should the routine save a checkpoint on this + training epoch? + + Returns: + A tuple with 6 float values: + (total cost of the epoch, epoch reconstruction cost, + epoch kl cost, KL weight used this training epoch, + total l2 cost on generator, and the corresponding weight). + """ + ops_to_eval = [self.cost, self.recon_cost, + self.kl_cost, self.kl_weight, + self.l2_cost, self.l2_weight, + self.train_op] + collected_op_values = self.run_epoch(datasets, ops_to_eval, kind="train") + + total_cost = total_recon_cost = total_kl_cost = 0.0 + # normalizing by batch done in distributions.py + epoch_size = len(collected_op_values) + for op_values in collected_op_values: + total_cost += op_values[0] + total_recon_cost += op_values[1] + total_kl_cost += op_values[2] + + kl_weight = collected_op_values[-1][3] + l2_cost = collected_op_values[-1][4] + l2_weight = collected_op_values[-1][5] + + epoch_total_cost = total_cost / epoch_size + epoch_recon_cost = total_recon_cost / epoch_size + epoch_kl_cost = total_kl_cost / epoch_size + + if do_save_ckpt: + session = tf.get_default_session() + checkpoint_path = os.path.join(self.hps.lfads_save_dir, + self.hps.checkpoint_name + '.ckpt') + self.seso_saver.save(session, checkpoint_path, + global_step=self.train_step) + + return epoch_total_cost, epoch_recon_cost, epoch_kl_cost, \ + kl_weight, l2_cost, l2_weight + + + def run_epoch(self, datasets, ops_to_eval, kind="train", batch_size=None, + do_collect=True, keep_prob=None): + """Run the model through the entire dataset once. + + Args: + datasets: A dict of data dicts. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + ops_to_eval: A list of tensorflow operations that will be evaluated in + the tf.session.run() call. + batch_size (optional): The batch_size to use + do_collect (optional): Should the routine collect all session.run + output as a list, and return it? + keep_prob (optional): The dropout keep probability. + + Returns: + A list of lists, the internal list is the return for the ops for each + session.run() call. The outer list collects over the epoch. + """ + hps = self.hps + all_name_example_idx_pairs = \ + self.shuffle_and_flatten_datasets(datasets, kind) + + kind_data = kind + '_data' + kind_ext_input = kind + '_ext_input' + + total_cost = total_recon_cost = total_kl_cost = 0.0 + session = tf.get_default_session() + epoch_size = len(all_name_example_idx_pairs) + evaled_ops_list = [] + for name, example_idxs in all_name_example_idx_pairs: + data_dict = datasets[name] + data_extxd = data_dict[kind_data] + if hps.output_dist == 'poisson' and hps.temporal_spike_jitter_width > 0: + data_extxd = self.shuffle_spikes_in_time(data_extxd) + + ext_input_extxi = data_dict[kind_ext_input] + data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd, ext_input_extxi, + example_idxs=example_idxs) + + feed_dict = self.build_feed_dict(name, data_bxtxd, ext_input_bxtxi, + keep_prob=keep_prob) + evaled_ops_np = session.run(ops_to_eval, feed_dict=feed_dict) + if do_collect: + evaled_ops_list.append(evaled_ops_np) + + return evaled_ops_list + + def summarize_all(self, datasets, summary_values): + """Plot and summarize stuff in tensorboard. + + Note that everything done in the current function is otherwise done on + a single, randomly selected dataset (except for summary_values, which are + passed in.) + + Args: + datasets, the dictionary of datasets used in the study. + summary_values: These summary values are created from the training loop, + and so summarize the entire set of datasets. + """ + hps = self.hps + tr_kl_cost = summary_values['tr_kl_cost'] + tr_recon_cost = summary_values['tr_recon_cost'] + tr_total_cost = summary_values['tr_total_cost'] + kl_weight = summary_values['kl_weight'] + l2_weight = summary_values['l2_weight'] + l2_cost = summary_values['l2_cost'] + has_any_valid_set = summary_values['has_any_valid_set'] + i = summary_values['nepochs'] + + session = tf.get_default_session() + train_summ, train_step = session.run([self.merged_train, + self.train_step], + feed_dict={self.l2_cost_ph:l2_cost, + self.kl_cost_ph:tr_kl_cost, + self.recon_cost_ph:tr_recon_cost, + self.total_cost_ph:tr_total_cost}) + self.writer.add_summary(train_summ, train_step) + if has_any_valid_set: + ev_kl_cost = summary_values['ev_kl_cost'] + ev_recon_cost = summary_values['ev_recon_cost'] + ev_total_cost = summary_values['ev_total_cost'] + eval_summ = session.run(self.merged_valid, + feed_dict={self.kl_cost_ph:ev_kl_cost, + self.recon_cost_ph:ev_recon_cost, + self.total_cost_ph:ev_total_cost}) + self.writer.add_summary(eval_summ, train_step) + print("Epoch:%d, step:%d (TRAIN, VALID): total: %.2f, %.2f\ + recon: %.2f, %.2f, kl: %.2f, %.2f, l2: %.5f,\ + kl weight: %.2f, l2 weight: %.2f" % \ + (i, train_step, tr_total_cost, ev_total_cost, + tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost, + l2_cost, kl_weight, l2_weight)) + + csv_outstr = "epoch,%d, step,%d, total,%.2f,%.2f, \ + recon,%.2f,%.2f, kl,%.2f,%.2f, l2,%.5f, \ + klweight,%.2f, l2weight,%.2f\n"% \ + (i, train_step, tr_total_cost, ev_total_cost, + tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost, + l2_cost, kl_weight, l2_weight) + + else: + print("Epoch:%d, step:%d TRAIN: total: %.2f recon: %.2f, kl: %.2f,\ + l2: %.5f, kl weight: %.2f, l2 weight: %.2f" % \ + (i, train_step, tr_total_cost, tr_recon_cost, tr_kl_cost, + l2_cost, kl_weight, l2_weight)) + csv_outstr = "epoch,%d, step,%d, total,%.2f, recon,%.2f, kl,%.2f, \ + l2,%.5f, klweight,%.2f, l2weight,%.2f\n"% \ + (i, train_step, tr_total_cost, tr_recon_cost, + tr_kl_cost, l2_cost, kl_weight, l2_weight) + + if self.hps.csv_log: + csv_file = os.path.join(self.hps.lfads_save_dir, self.hps.csv_log+'.csv') + with open(csv_file, "a") as myfile: + myfile.write(csv_outstr) + + + def plot_single_example(self, datasets): + """Plot an image relating to a randomly chosen, specific example. We use + posterior sample and average by taking one example, and filling a whole + batch with that example, sample from the posterior, and then average the + quantities. + + """ + hps = self.hps + all_data_names = datasets.keys() + data_name = np.random.permutation(all_data_names)[0] + data_dict = datasets[data_name] + has_valid_set = True if data_dict['valid_data'] is not None else False + cf = 1.0 # plotting concern + + # posterior sample and average here + E, _, _ = data_dict['train_data'].shape + eidx = np.random.choice(E) + example_idxs = eidx * np.ones(hps.batch_size, dtype=np.int32) + + train_data_bxtxd, train_ext_input_bxtxi = \ + self.get_batch(data_dict['train_data'], data_dict['train_ext_input'], + example_idxs=example_idxs) + + truth_train_data_bxtxd = None + if 'train_truth' in data_dict and data_dict['train_truth'] is not None: + truth_train_data_bxtxd, _ = self.get_batch(data_dict['train_truth'], + example_idxs=example_idxs) + cf = data_dict['conversion_factor'] + + # plotter does averaging + train_model_values = self.eval_model_runs_batch(data_name, + train_data_bxtxd, + train_ext_input_bxtxi, + do_average_batch=False) + + train_step = train_model_values['train_steps'] + feed_dict = self.build_feed_dict(data_name, train_data_bxtxd, + train_ext_input_bxtxi, keep_prob=1.0) + + session = tf.get_default_session() + generic_summ = session.run(self.merged_generic, feed_dict=feed_dict) + self.writer.add_summary(generic_summ, train_step) + + valid_data_bxtxd = valid_model_values = valid_ext_input_bxtxi = None + truth_valid_data_bxtxd = None + if has_valid_set: + E, _, _ = data_dict['valid_data'].shape + eidx = np.random.choice(E) + example_idxs = eidx * np.ones(hps.batch_size, dtype=np.int32) + valid_data_bxtxd, valid_ext_input_bxtxi = \ + self.get_batch(data_dict['valid_data'], + data_dict['valid_ext_input'], + example_idxs=example_idxs) + if 'valid_truth' in data_dict and data_dict['valid_truth'] is not None: + truth_valid_data_bxtxd, _ = self.get_batch(data_dict['valid_truth'], + example_idxs=example_idxs) + else: + truth_valid_data_bxtxd = None + + # plotter does averaging + valid_model_values = self.eval_model_runs_batch(data_name, + valid_data_bxtxd, + valid_ext_input_bxtxi, + do_average_batch=False) + + example_image = plot_lfads(train_bxtxd=train_data_bxtxd, + train_model_vals=train_model_values, + train_ext_input_bxtxi=train_ext_input_bxtxi, + train_truth_bxtxd=truth_train_data_bxtxd, + valid_bxtxd=valid_data_bxtxd, + valid_model_vals=valid_model_values, + valid_ext_input_bxtxi=valid_ext_input_bxtxi, + valid_truth_bxtxd=truth_valid_data_bxtxd, + bidx=None, cf=cf, output_dist=hps.output_dist) + example_image = np.expand_dims(example_image, axis=0) + example_summ = session.run(self.merged_examples, + feed_dict={self.example_image : example_image}) + self.writer.add_summary(example_summ) + + def train_model(self, datasets): + """Train the model, print per-epoch information, and save checkpoints. + + Loop over training epochs. The function that actually does the + training is train_epoch. This function iterates over the training + data, one epoch at a time. The learning rate schedule is such + that it will stay the same until the cost goes up in comparison to + the last few values, then it will drop. + + Args: + datasets: A dict of data dicts. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + """ + hps = self.hps + has_any_valid_set = False + for data_dict in datasets.values(): + if data_dict['valid_data'] is not None: + has_any_valid_set = True + break + + session = tf.get_default_session() + lr = session.run(self.learning_rate) + lr_stop = hps.learning_rate_stop + i = -1 + train_costs = [] + valid_costs = [] + ev_total_cost = ev_recon_cost = ev_kl_cost = 0.0 + lowest_ev_cost = np.Inf + while True: + i += 1 + do_save_ckpt = True if i % 10 ==0 else False + tr_total_cost, tr_recon_cost, tr_kl_cost, kl_weight, l2_cost, l2_weight = \ + self.train_epoch(datasets, do_save_ckpt=do_save_ckpt) + + # Evaluate the validation cost, and potentially save. Note that this + # routine will not save a validation checkpoint until the kl weight and + # l2 weights are equal to 1.0. + if has_any_valid_set: + ev_total_cost, ev_recon_cost, ev_kl_cost = \ + self.eval_cost_epoch(datasets, kind='valid') + valid_costs.append(ev_total_cost) + + # > 1 may give more consistent results, but not the actual lowest vae. + # == 1 gives the lowest vae seen so far. + n_lve = 1 + run_avg_lve = np.mean(valid_costs[-n_lve:]) + + # conditions for saving checkpoints: + # KL weight must have finished stepping (>=1.0), AND + # L2 weight must have finished stepping OR L2 is not being used, AND + # the current run has a lower LVE than previous runs AND + # len(valid_costs > n_lve) (not sure what that does) + if kl_weight >= 1.0 and \ + (l2_weight >= 1.0 or \ + (self.hps.l2_gen_scale == 0.0 and self.hps.l2_con_scale == 0.0)) \ + and (len(valid_costs) > n_lve and run_avg_lve < lowest_ev_cost): + + lowest_ev_cost = run_avg_lve + checkpoint_path = os.path.join(self.hps.lfads_save_dir, + self.hps.checkpoint_name + '_lve.ckpt') + self.lve_saver.save(session, checkpoint_path, + global_step=self.train_step, + latest_filename='checkpoint_lve') + + # Plot and summarize. + values = {'nepochs':i, 'has_any_valid_set': has_any_valid_set, + 'tr_total_cost':tr_total_cost, 'ev_total_cost':ev_total_cost, + 'tr_recon_cost':tr_recon_cost, 'ev_recon_cost':ev_recon_cost, + 'tr_kl_cost':tr_kl_cost, 'ev_kl_cost':ev_kl_cost, + 'l2_weight':l2_weight, 'kl_weight':kl_weight, + 'l2_cost':l2_cost} + self.summarize_all(datasets, values) + self.plot_single_example(datasets) + + # Manage learning rate. + train_res = tr_total_cost + n_lr = hps.learning_rate_n_to_compare + if len(train_costs) > n_lr and train_res > np.max(train_costs[-n_lr:]): + _ = session.run(self.learning_rate_decay_op) + lr = session.run(self.learning_rate) + print(" Decreasing learning rate to %f." % lr) + # Force the system to run n_lr times while at this lr. + train_costs.append(np.inf) + else: + train_costs.append(train_res) + + if lr < lr_stop: + print("Stopping optimization based on learning rate criteria.") + break + + def eval_cost_epoch(self, datasets, kind='train', ext_input_extxi=None, + batch_size=None): + """Evaluate the cost of the epoch. + + Args: + data_dict: The dictionary of data (training and validation) used for + training and evaluation of the model, respectively. + + Returns: + a 3 tuple of costs: + (epoch total cost, epoch reconstruction cost, epoch KL cost) + """ + ops_to_eval = [self.cost, self.recon_cost, self.kl_cost] + collected_op_values = self.run_epoch(datasets, ops_to_eval, kind=kind, + keep_prob=1.0) + + total_cost = total_recon_cost = total_kl_cost = 0.0 + # normalizing by batch done in distributions.py + epoch_size = len(collected_op_values) + for op_values in collected_op_values: + total_cost += op_values[0] + total_recon_cost += op_values[1] + total_kl_cost += op_values[2] + + epoch_total_cost = total_cost / epoch_size + epoch_recon_cost = total_recon_cost / epoch_size + epoch_kl_cost = total_kl_cost / epoch_size + + return epoch_total_cost, epoch_recon_cost, epoch_kl_cost + + def eval_model_runs_batch(self, data_name, data_bxtxd, ext_input_bxtxi=None, + do_eval_cost=False, do_average_batch=False): + """Returns all the goodies for the entire model, per batch. + + If data_bxtxd and ext_input_bxtxi can have fewer than batch_size along dim 1 + in which case this handles the padding and truncating automatically + + Args: + data_name: The name of the data dict, to select which in/out matrices + to use. + data_bxtxd: Numpy array training data with shape: + batch_size x # time steps x # dimensions + ext_input_bxtxi: Numpy array training external input with shape: + batch_size x # time steps x # external input dims + do_eval_cost (optional): If true, the IWAE (Importance Weighted + Autoencoder) log likeihood bound, instead of the VAE version. + do_average_batch (optional): average over the batch, useful for getting + good IWAE costs, and model outputs for a single data point. + + Returns: + A dictionary with the outputs of the model decoder, namely: + prior g0 mean, prior g0 variance, approx. posterior mean, approx + posterior mean, the generator initial conditions, the control inputs (if + enabled), the state of the generator, the factors, and the rates. + """ + session = tf.get_default_session() + + # if fewer than batch_size provided, pad to batch_size + hps = self.hps + batch_size = hps.batch_size + E, _, _ = data_bxtxd.shape + if E < hps.batch_size: + data_bxtxd = np.pad(data_bxtxd, ((0, hps.batch_size-E), (0, 0), (0, 0)), + mode='constant', constant_values=0) + if ext_input_bxtxi is not None: + ext_input_bxtxi = np.pad(ext_input_bxtxi, + ((0, hps.batch_size-E), (0, 0), (0, 0)), + mode='constant', constant_values=0) + + feed_dict = self.build_feed_dict(data_name, data_bxtxd, + ext_input_bxtxi, keep_prob=1.0) + + # Non-temporal signals will be batch x dim. + # Temporal signals are list length T with elements batch x dim. + tf_vals = [self.gen_ics, self.gen_states, self.factors, + self.output_dist_params] + tf_vals.append(self.cost) + tf_vals.append(self.nll_bound_vae) + tf_vals.append(self.nll_bound_iwae) + tf_vals.append(self.train_step) # not train_op! + if self.hps.ic_dim > 0: + tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar, + self.posterior_zs_g0.mean, self.posterior_zs_g0.logvar] + if self.hps.co_dim > 0: + tf_vals.append(self.controller_outputs) + tf_vals_flat, fidxs = flatten(tf_vals) + + np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict) + + ff = 0 + gen_ics = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + gen_states = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + factors = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + out_dist_params = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + costs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + nll_bound_vaes = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + nll_bound_iwaes = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1 + train_steps = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1 + if self.hps.ic_dim > 0: + prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1 + prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + post_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + post_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + if self.hps.co_dim > 0: + controller_outputs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + + # [0] are to take out the non-temporal items from lists + gen_ics = gen_ics[0] + costs = costs[0] + nll_bound_vaes = nll_bound_vaes[0] + nll_bound_iwaes = nll_bound_iwaes[0] + train_steps = train_steps[0] + + # Convert to full tensors, not lists of tensors in time dim. + gen_states = list_t_bxn_to_tensor_bxtxn(gen_states) + factors = list_t_bxn_to_tensor_bxtxn(factors) + out_dist_params = list_t_bxn_to_tensor_bxtxn(out_dist_params) + if self.hps.ic_dim > 0: + # select first time point + prior_g0_mean = prior_g0_mean[0] + prior_g0_logvar = prior_g0_logvar[0] + post_g0_mean = post_g0_mean[0] + post_g0_logvar = post_g0_logvar[0] + if self.hps.co_dim > 0: + controller_outputs = list_t_bxn_to_tensor_bxtxn(controller_outputs) + + # slice out the trials in case < batch_size provided + if E < hps.batch_size: + idx = np.arange(E) + gen_ics = gen_ics[idx, :] + gen_states = gen_states[idx, :] + factors = factors[idx, :, :] + out_dist_params = out_dist_params[idx, :, :] + if self.hps.ic_dim > 0: + prior_g0_mean = prior_g0_mean[idx, :] + prior_g0_logvar = prior_g0_logvar[idx, :] + post_g0_mean = post_g0_mean[idx, :] + post_g0_logvar = post_g0_logvar[idx, :] + if self.hps.co_dim > 0: + controller_outputs = controller_outputs[idx, :, :] + + if do_average_batch: + gen_ics = np.mean(gen_ics, axis=0) + gen_states = np.mean(gen_states, axis=0) + factors = np.mean(factors, axis=0) + out_dist_params = np.mean(out_dist_params, axis=0) + if self.hps.ic_dim > 0: + prior_g0_mean = np.mean(prior_g0_mean, axis=0) + prior_g0_logvar = np.mean(prior_g0_logvar, axis=0) + post_g0_mean = np.mean(post_g0_mean, axis=0) + post_g0_logvar = np.mean(post_g0_logvar, axis=0) + if self.hps.co_dim > 0: + controller_outputs = np.mean(controller_outputs, axis=0) + + model_vals = {} + model_vals['gen_ics'] = gen_ics + model_vals['gen_states'] = gen_states + model_vals['factors'] = factors + model_vals['output_dist_params'] = out_dist_params + model_vals['costs'] = costs + model_vals['nll_bound_vaes'] = nll_bound_vaes + model_vals['nll_bound_iwaes'] = nll_bound_iwaes + model_vals['train_steps'] = train_steps + if self.hps.ic_dim > 0: + model_vals['prior_g0_mean'] = prior_g0_mean + model_vals['prior_g0_logvar'] = prior_g0_logvar + model_vals['post_g0_mean'] = post_g0_mean + model_vals['post_g0_logvar'] = post_g0_logvar + if self.hps.co_dim > 0: + model_vals['controller_outputs'] = controller_outputs + + return model_vals + + def eval_model_runs_avg_epoch(self, data_name, data_extxd, + ext_input_extxi=None): + """Returns all the expected value for goodies for the entire model. + + The expected value is taken over hidden (z) variables, namely the initial + conditions and the control inputs. The expected value is approximate, and + accomplished via sampling (batch_size) samples for every examples. + + Args: + data_name: The name of the data dict, to select which in/out matrices + to use. + data_extxd: Numpy array training data with shape: + # examples x # time steps x # dimensions + ext_input_extxi (optional): Numpy array training external input with + shape: # examples x # time steps x # external input dims + + Returns: + A dictionary with the averaged outputs of the model decoder, namely: + prior g0 mean, prior g0 variance, approx. posterior mean, approx + posterior mean, the generator initial conditions, the control inputs (if + enabled), the state of the generator, the factors, and the output + distribution parameters, e.g. (rates or mean and variances). + """ + hps = self.hps + batch_size = hps.batch_size + E, T, D = data_extxd.shape + E_to_process = hps.ps_nexamples_to_process + if E_to_process > E: + E_to_process = E + + if hps.ic_dim > 0: + prior_g0_mean = np.zeros([E_to_process, hps.ic_dim]) + prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim]) + post_g0_mean = np.zeros([E_to_process, hps.ic_dim]) + post_g0_logvar = np.zeros([E_to_process, hps.ic_dim]) + + if hps.co_dim > 0: + controller_outputs = np.zeros([E_to_process, T, hps.co_dim]) + gen_ics = np.zeros([E_to_process, hps.gen_dim]) + gen_states = np.zeros([E_to_process, T, hps.gen_dim]) + factors = np.zeros([E_to_process, T, hps.factors_dim]) + + if hps.output_dist == 'poisson': + out_dist_params = np.zeros([E_to_process, T, D]) + elif hps.output_dist == 'gaussian': + out_dist_params = np.zeros([E_to_process, T, D+D]) + else: + assert False, "NIY" + + costs = np.zeros(E_to_process) + nll_bound_vaes = np.zeros(E_to_process) + nll_bound_iwaes = np.zeros(E_to_process) + train_steps = np.zeros(E_to_process) + for es_idx in range(E_to_process): + print("Running %d of %d." % (es_idx+1, E_to_process)) + example_idxs = es_idx * np.ones(batch_size, dtype=np.int32) + data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd, + ext_input_extxi, + batch_size=batch_size, + example_idxs=example_idxs) + model_values = self.eval_model_runs_batch(data_name, data_bxtxd, + ext_input_bxtxi, + do_eval_cost=True, + do_average_batch=True) + + if self.hps.ic_dim > 0: + prior_g0_mean[es_idx,:] = model_values['prior_g0_mean'] + prior_g0_logvar[es_idx,:] = model_values['prior_g0_logvar'] + post_g0_mean[es_idx,:] = model_values['post_g0_mean'] + post_g0_logvar[es_idx,:] = model_values['post_g0_logvar'] + gen_ics[es_idx,:] = model_values['gen_ics'] + + if self.hps.co_dim > 0: + controller_outputs[es_idx,:,:] = model_values['controller_outputs'] + gen_states[es_idx,:,:] = model_values['gen_states'] + factors[es_idx,:,:] = model_values['factors'] + out_dist_params[es_idx,:,:] = model_values['output_dist_params'] + costs[es_idx] = model_values['costs'] + nll_bound_vaes[es_idx] = model_values['nll_bound_vaes'] + nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes'] + train_steps[es_idx] = model_values['train_steps'] + print('bound nll(vae): %.3f, bound nll(iwae): %.3f' \ + % (nll_bound_vaes[es_idx], nll_bound_iwaes[es_idx])) + + model_runs = {} + if self.hps.ic_dim > 0: + model_runs['prior_g0_mean'] = prior_g0_mean + model_runs['prior_g0_logvar'] = prior_g0_logvar + model_runs['post_g0_mean'] = post_g0_mean + model_runs['post_g0_logvar'] = post_g0_logvar + model_runs['gen_ics'] = gen_ics + + if self.hps.co_dim > 0: + model_runs['controller_outputs'] = controller_outputs + model_runs['gen_states'] = gen_states + model_runs['factors'] = factors + model_runs['output_dist_params'] = out_dist_params + model_runs['costs'] = costs + model_runs['nll_bound_vaes'] = nll_bound_vaes + model_runs['nll_bound_iwaes'] = nll_bound_iwaes + model_runs['train_steps'] = train_steps + return model_runs + + def eval_model_runs_push_mean(self, data_name, data_extxd, + ext_input_extxi=None): + """Returns values of interest for the model by pushing the means through + + The mean values for both initial conditions and the control inputs are + pushed through the model instead of sampling (as is done in + eval_model_runs_avg_epoch). + This is a quick and approximate version of estimating these values instead + of sampling from the posterior many times and then averaging those values of + interest. + + Internally, a total of batch_size trials are run through the model at once. + + Args: + data_name: The name of the data dict, to select which in/out matrices + to use. + data_extxd: Numpy array training data with shape: + # examples x # time steps x # dimensions + ext_input_extxi (optional): Numpy array training external input with + shape: # examples x # time steps x # external input dims + + Returns: + A dictionary with the estimated outputs of the model decoder, namely: + prior g0 mean, prior g0 variance, approx. posterior mean, approx + posterior mean, the generator initial conditions, the control inputs (if + enabled), the state of the generator, the factors, and the output + distribution parameters, e.g. (rates or mean and variances). + """ + hps = self.hps + batch_size = hps.batch_size + E, T, D = data_extxd.shape + E_to_process = hps.ps_nexamples_to_process + if E_to_process > E: + print("Setting number of posterior samples to process to : ", E) + E_to_process = E + + if hps.ic_dim > 0: + prior_g0_mean = np.zeros([E_to_process, hps.ic_dim]) + prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim]) + post_g0_mean = np.zeros([E_to_process, hps.ic_dim]) + post_g0_logvar = np.zeros([E_to_process, hps.ic_dim]) + + if hps.co_dim > 0: + controller_outputs = np.zeros([E_to_process, T, hps.co_dim]) + gen_ics = np.zeros([E_to_process, hps.gen_dim]) + gen_states = np.zeros([E_to_process, T, hps.gen_dim]) + factors = np.zeros([E_to_process, T, hps.factors_dim]) + + if hps.output_dist == 'poisson': + out_dist_params = np.zeros([E_to_process, T, D]) + elif hps.output_dist == 'gaussian': + out_dist_params = np.zeros([E_to_process, T, D+D]) + else: + assert False, "NIY" + + costs = np.zeros(E_to_process) + nll_bound_vaes = np.zeros(E_to_process) + nll_bound_iwaes = np.zeros(E_to_process) + train_steps = np.zeros(E_to_process) + + # generator that will yield 0:N in groups of per items, e.g. + # (0:per-1), (per:2*per-1), ..., with the last group containing <= per items + # this will be used to feed per=batch_size trials into the model at a time + def trial_batches(N, per): + for i in range(0, N, per): + yield np.arange(i, min(i+per, N), dtype=np.int32) + + for batch_idx, es_idx in enumerate(trial_batches(E_to_process, + hps.batch_size)): + print("Running trial batch %d with %d trials" % (batch_idx+1, + len(es_idx))) + data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd, + ext_input_extxi, + batch_size=batch_size, + example_idxs=es_idx) + model_values = self.eval_model_runs_batch(data_name, data_bxtxd, + ext_input_bxtxi, + do_eval_cost=True, + do_average_batch=False) + + if self.hps.ic_dim > 0: + prior_g0_mean[es_idx,:] = model_values['prior_g0_mean'] + prior_g0_logvar[es_idx,:] = model_values['prior_g0_logvar'] + post_g0_mean[es_idx,:] = model_values['post_g0_mean'] + post_g0_logvar[es_idx,:] = model_values['post_g0_logvar'] + gen_ics[es_idx,:] = model_values['gen_ics'] + + if self.hps.co_dim > 0: + controller_outputs[es_idx,:,:] = model_values['controller_outputs'] + gen_states[es_idx,:,:] = model_values['gen_states'] + factors[es_idx,:,:] = model_values['factors'] + out_dist_params[es_idx,:,:] = model_values['output_dist_params'] + + # TODO + # model_values['costs'] and other costs come out as scalars, summed over + # all the trials in the batch. what we want is the per-trial costs + costs[es_idx] = model_values['costs'] + nll_bound_vaes[es_idx] = model_values['nll_bound_vaes'] + nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes'] + + train_steps[es_idx] = model_values['train_steps'] + + model_runs = {} + if self.hps.ic_dim > 0: + model_runs['prior_g0_mean'] = prior_g0_mean + model_runs['prior_g0_logvar'] = prior_g0_logvar + model_runs['post_g0_mean'] = post_g0_mean + model_runs['post_g0_logvar'] = post_g0_logvar + model_runs['gen_ics'] = gen_ics + + if self.hps.co_dim > 0: + model_runs['controller_outputs'] = controller_outputs + model_runs['gen_states'] = gen_states + model_runs['factors'] = factors + model_runs['output_dist_params'] = out_dist_params + + # You probably do not want the LL associated values when pushing the mean + # instead of sampling. + model_runs['costs'] = costs + model_runs['nll_bound_vaes'] = nll_bound_vaes + model_runs['nll_bound_iwaes'] = nll_bound_iwaes + model_runs['train_steps'] = train_steps + return model_runs + + def write_model_runs(self, datasets, output_fname=None, push_mean=False): + """Run the model on the data in data_dict, and save the computed values. + + LFADS generates a number of outputs for each examples, and these are all + saved. They are: + The mean and variance of the prior of g0. + The mean and variance of approximate posterior of g0. + The control inputs (if enabled) + The initial conditions, g0, for all examples. + The generator states for all time. + The factors for all time. + The output distribution parameters (e.g. rates) for all time. + + Args: + datasets: a dictionary of named data_dictionaries, see top of lfads.py + output_fname: a file name stem for the output files. + push_mean: if False (default), generates batch_size samples for each trial + and averages the results. if True, runs each trial once without noise, + pushing the posterior mean initial conditions and control inputs through + the trained model. False is used for posterior_sample_and_average, True + is used for posterior_push_mean. + """ + hps = self.hps + kind = hps.kind + + for data_name, data_dict in datasets.items(): + data_tuple = [('train', data_dict['train_data'], + data_dict['train_ext_input']), + ('valid', data_dict['valid_data'], + data_dict['valid_ext_input'])] + for data_kind, data_extxd, ext_input_extxi in data_tuple: + if not output_fname: + fname = "model_runs_" + data_name + '_' + data_kind + '_' + kind + else: + fname = output_fname + data_name + '_' + data_kind + '_' + kind + + print("Writing data for %s data and kind %s." % (data_name, data_kind)) + if push_mean: + model_runs = self.eval_model_runs_push_mean(data_name, data_extxd, + ext_input_extxi) + else: + model_runs = self.eval_model_runs_avg_epoch(data_name, data_extxd, + ext_input_extxi) + full_fname = os.path.join(hps.lfads_save_dir, fname) + write_data(full_fname, model_runs, compression='gzip') + print("Done.") + + def write_model_samples(self, dataset_name, output_fname=None): + """Use the prior distribution to generate batch_size number of samples + from the model. + + LFADS generates a number of outputs for each sample, and these are all + saved. They are: + The mean and variance of the prior of g0. + The control inputs (if enabled) + The initial conditions, g0, for all examples. + The generator states for all time. + The factors for all time. + The output distribution parameters (e.g. rates) for all time. + + Args: + dataset_name: The name of the dataset to grab the factors -> rates + alignment matrices from. + output_fname: The name of the file in which to save the generated + samples. + """ + hps = self.hps + batch_size = hps.batch_size + + print("Generating %d samples" % (batch_size)) + tf_vals = [self.factors, self.gen_states, self.gen_ics, + self.cost, self.output_dist_params] + if hps.ic_dim > 0: + tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar] + if hps.co_dim > 0: + tf_vals += [self.prior_zs_ar_con.samples_t] + tf_vals_flat, fidxs = flatten(tf_vals) + + session = tf.get_default_session() + feed_dict = {} + feed_dict[self.dataName] = dataset_name + feed_dict[self.keep_prob] = 1.0 + + np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict) + + ff = 0 + factors = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + gen_states = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + gen_ics = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + costs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + output_dist_params = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + if hps.ic_dim > 0: + prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + if hps.co_dim > 0: + prior_zs_ar_con = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1 + + # [0] are to take out the non-temporal items from lists + gen_ics = gen_ics[0] + costs = costs[0] + + # Convert to full tensors, not lists of tensors in time dim. + gen_states = list_t_bxn_to_tensor_bxtxn(gen_states) + factors = list_t_bxn_to_tensor_bxtxn(factors) + output_dist_params = list_t_bxn_to_tensor_bxtxn(output_dist_params) + if hps.ic_dim > 0: + prior_g0_mean = prior_g0_mean[0] + prior_g0_logvar = prior_g0_logvar[0] + if hps.co_dim > 0: + prior_zs_ar_con = list_t_bxn_to_tensor_bxtxn(prior_zs_ar_con) + + model_vals = {} + model_vals['gen_ics'] = gen_ics + model_vals['gen_states'] = gen_states + model_vals['factors'] = factors + model_vals['output_dist_params'] = output_dist_params + model_vals['costs'] = costs.reshape(1) + if hps.ic_dim > 0: + model_vals['prior_g0_mean'] = prior_g0_mean + model_vals['prior_g0_logvar'] = prior_g0_logvar + if hps.co_dim > 0: + model_vals['prior_zs_ar_con'] = prior_zs_ar_con + + full_fname = os.path.join(hps.lfads_save_dir, output_fname) + write_data(full_fname, model_vals, compression='gzip') + print("Done.") + + @staticmethod + def eval_model_parameters(use_nested=True, include_strs=None): + """Evaluate and return all of the TF variables in the model. + + Args: + use_nested (optional): For returning values, use a nested dictoinary, based + on variable scoping, or return all variables in a flat dictionary. + include_strs (optional): A list of strings to use as a filter, to reduce the + number of variables returned. A variable name must contain at least one + string in include_strs as a sub-string in order to be returned. + + Returns: + The parameters of the model. This can be in a flat + dictionary, or a nested dictionary, where the nesting is by variable + scope. + """ + all_tf_vars = tf.global_variables() + session = tf.get_default_session() + all_tf_vars_eval = session.run(all_tf_vars) + vars_dict = {} + strs = ["LFADS"] + if include_strs: + strs += include_strs + + for i, (var, var_eval) in enumerate(zip(all_tf_vars, all_tf_vars_eval)): + if any(s in include_strs for s in var.name): + if not isinstance(var_eval, np.ndarray): # for H5PY + print(var.name, """ is not numpy array, saving as numpy array + with value: """, var_eval, type(var_eval)) + e = np.array(var_eval) + print(e, type(e)) + else: + e = var_eval + vars_dict[var.name] = e + + if not use_nested: + return vars_dict + + var_names = vars_dict.keys() + nested_vars_dict = {} + current_dict = nested_vars_dict + for v, var_name in enumerate(var_names): + var_split_name_list = var_name.split('/') + split_name_list_len = len(var_split_name_list) + current_dict = nested_vars_dict + for p, part in enumerate(var_split_name_list): + if p < split_name_list_len - 1: + if part in current_dict: + current_dict = current_dict[part] + else: + current_dict[part] = {} + current_dict = current_dict[part] + else: + current_dict[part] = vars_dict[var_name] + + return nested_vars_dict + + @staticmethod + def spikify_rates(rates_bxtxd): + """Randomly spikify underlying rates according a Poisson distribution + + Args: + rates_bxtxd: a numpy tensor with shape: + + Returns: + A numpy array with the same shape as rates_bxtxd, but with the event + counts. + """ + + B,T,N = rates_bxtxd.shape + assert all([B > 0, N > 0]), "problems" + + # Because the rates are changing, there is nesting + spikes_bxtxd = np.zeros([B,T,N], dtype=np.int32) + for b in range(B): + for t in range(T): + for n in range(N): + rate = rates_bxtxd[b,t,n] + count = np.random.poisson(rate) + spikes_bxtxd[b,t,n] = count + + return spikes_bxtxd diff --git a/models/research/lfads/plot_lfads.py b/models/research/lfads/plot_lfads.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e1a0332ef2affeae147edda4779cc4a7e9a0ef --- /dev/null +++ b/models/research/lfads/plot_lfads.py @@ -0,0 +1,181 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import matplotlib +matplotlib.use('Agg') +from matplotlib import pyplot as plt +import numpy as np +import tensorflow as tf + +def _plot_item(W, name, full_name, nspaces): + plt.figure() + if W.shape == (): + print(name, ": ", W) + elif W.shape[0] == 1: + plt.stem(W.T) + plt.title(full_name) + elif W.shape[1] == 1: + plt.stem(W) + plt.title(full_name) + else: + plt.imshow(np.abs(W), interpolation='nearest', cmap='jet'); + plt.colorbar() + plt.title(full_name) + + +def all_plot(d, full_name="", exclude="", nspaces=0): + """Recursively plot all the LFADS model parameters in the nested + dictionary.""" + for k, v in d.iteritems(): + this_name = full_name+"/"+k + if isinstance(v, dict): + all_plot(v, full_name=this_name, exclude=exclude, nspaces=nspaces+4) + else: + if exclude == "" or exclude not in this_name: + _plot_item(v, name=k, full_name=full_name+"/"+k, nspaces=nspaces+4) + + + +def plot_time_series(vals_bxtxn, bidx=None, n_to_plot=np.inf, scale=1.0, + color='r', title=None): + + if bidx is None: + vals_txn = np.mean(vals_bxtxn, axis=0) + else: + vals_txn = vals_bxtxn[bidx,:,:] + + T, N = vals_txn.shape + if n_to_plot > N: + n_to_plot = N + + plt.plot(vals_txn[:,0:n_to_plot] + scale*np.array(range(n_to_plot)), + color=color, lw=1.0) + plt.axis('tight') + if title: + plt.title(title) + + +def plot_lfads_timeseries(data_bxtxn, model_vals, ext_input_bxtxi=None, + truth_bxtxn=None, bidx=None, output_dist="poisson", + conversion_factor=1.0, subplot_cidx=0, + col_title=None): + + n_to_plot = 10 + scale = 1.0 + nrows = 7 + plt.subplot(nrows,2,1+subplot_cidx) + + if output_dist == 'poisson': + rates = means = conversion_factor * model_vals['output_dist_params'] + plot_time_series(rates, bidx, n_to_plot=n_to_plot, scale=scale, + title=col_title + " rates (LFADS - red, Truth - black)") + elif output_dist == 'gaussian': + means_vars = model_vals['output_dist_params'] + means, vars = np.split(means_vars,2, axis=2) # bxtxn + stds = np.sqrt(vars) + plot_time_series(means, bidx, n_to_plot=n_to_plot, scale=scale, + title=col_title + " means (LFADS - red, Truth - black)") + plot_time_series(means+stds, bidx, n_to_plot=n_to_plot, scale=scale, + color='c') + plot_time_series(means-stds, bidx, n_to_plot=n_to_plot, scale=scale, + color='c') + else: + assert 'NIY' + + + if truth_bxtxn is not None: + plot_time_series(truth_bxtxn, bidx, n_to_plot=n_to_plot, color='k', + scale=scale) + + input_title = "" + if "controller_outputs" in model_vals.keys(): + input_title += " Controller Output" + plt.subplot(nrows,2,3+subplot_cidx) + u_t = model_vals['controller_outputs'][0:-1] + plot_time_series(u_t, bidx, n_to_plot=n_to_plot, color='c', scale=1.0, + title=col_title + input_title) + + if ext_input_bxtxi is not None: + input_title += " External Input" + plot_time_series(ext_input_bxtxi, n_to_plot=n_to_plot, color='b', + scale=scale, title=col_title + input_title) + + plt.subplot(nrows,2,5+subplot_cidx) + plot_time_series(means, bidx, + n_to_plot=n_to_plot, scale=1.0, + title=col_title + " Spikes (LFADS - red, Spikes - black)") + plot_time_series(data_bxtxn, bidx, n_to_plot=n_to_plot, color='k', scale=1.0) + + plt.subplot(nrows,2,7+subplot_cidx) + plot_time_series(model_vals['factors'], bidx, n_to_plot=n_to_plot, color='b', + scale=2.0, title=col_title + " Factors") + + plt.subplot(nrows,2,9+subplot_cidx) + plot_time_series(model_vals['gen_states'], bidx, n_to_plot=n_to_plot, + color='g', scale=1.0, title=col_title + " Generator State") + + if bidx is not None: + data_nxt = data_bxtxn[bidx,:,:].T + params_nxt = model_vals['output_dist_params'][bidx,:,:].T + else: + data_nxt = np.mean(data_bxtxn, axis=0).T + params_nxt = np.mean(model_vals['output_dist_params'], axis=0).T + if output_dist == 'poisson': + means_nxt = params_nxt + elif output_dist == 'gaussian': # (means+vars) x time + means_nxt = np.vsplit(params_nxt,2)[0] # get means + else: + assert "NIY" + + plt.subplot(nrows,2,11+subplot_cidx) + plt.imshow(data_nxt, aspect='auto', interpolation='nearest') + plt.title(col_title + ' Data') + + plt.subplot(nrows,2,13+subplot_cidx) + plt.imshow(means_nxt, aspect='auto', interpolation='nearest') + plt.title(col_title + ' Means') + + +def plot_lfads(train_bxtxd, train_model_vals, + train_ext_input_bxtxi=None, train_truth_bxtxd=None, + valid_bxtxd=None, valid_model_vals=None, + valid_ext_input_bxtxi=None, valid_truth_bxtxd=None, + bidx=None, cf=1.0, output_dist='poisson'): + + # Plotting + f = plt.figure(figsize=(18,20), tight_layout=True) + plot_lfads_timeseries(train_bxtxd, train_model_vals, + train_ext_input_bxtxi, + truth_bxtxn=train_truth_bxtxd, + conversion_factor=cf, bidx=bidx, + output_dist=output_dist, col_title='Train') + plot_lfads_timeseries(valid_bxtxd, valid_model_vals, + valid_ext_input_bxtxi, + truth_bxtxn=valid_truth_bxtxd, + conversion_factor=cf, bidx=bidx, + output_dist=output_dist, + subplot_cidx=1, col_title='Valid') + + # Convert from figure to an numpy array width x height x 3 (last for RGB) + f.canvas.draw() + data = np.fromstring(f.canvas.tostring_rgb(), dtype=np.uint8, sep='') + data_wxhx3 = data.reshape(f.canvas.get_width_height()[::-1] + (3,)) + plt.close() + + return data_wxhx3 diff --git a/models/research/lfads/run_lfads.py b/models/research/lfads/run_lfads.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1c0d5e4deab50481cd32efdd044c61707204cc --- /dev/null +++ b/models/research/lfads/run_lfads.py @@ -0,0 +1,815 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from lfads import LFADS +import numpy as np +import os +import tensorflow as tf +import re +import utils +import sys +MAX_INT = sys.maxsize + +# Lots of hyperparameters, but most are pretty insensitive. The +# explanation of these hyperparameters is found below, in the flags +# session. + +CHECKPOINT_PB_LOAD_NAME = "checkpoint" +CHECKPOINT_NAME = "lfads_vae" +CSV_LOG = "fitlog" +OUTPUT_FILENAME_STEM = "" +DEVICE = "gpu:0" # "cpu:0", or other gpus, e.g. "gpu:1" +MAX_CKPT_TO_KEEP = 5 +MAX_CKPT_TO_KEEP_LVE = 5 +PS_NEXAMPLES_TO_PROCESS = MAX_INT # if larger than number of examples, process all +EXT_INPUT_DIM = 0 +IC_DIM = 64 +FACTORS_DIM = 50 +IC_ENC_DIM = 128 +GEN_DIM = 200 +GEN_CELL_INPUT_WEIGHT_SCALE = 1.0 +GEN_CELL_REC_WEIGHT_SCALE = 1.0 +CELL_WEIGHT_SCALE = 1.0 +BATCH_SIZE = 128 +LEARNING_RATE_INIT = 0.01 +LEARNING_RATE_DECAY_FACTOR = 0.95 +LEARNING_RATE_STOP = 0.00001 +LEARNING_RATE_N_TO_COMPARE = 6 +INJECT_EXT_INPUT_TO_GEN = False +DO_TRAIN_IO_ONLY = False +DO_TRAIN_ENCODER_ONLY = False +DO_RESET_LEARNING_RATE = False +FEEDBACK_FACTORS_OR_RATES = "factors" +DO_TRAIN_READIN = True + +# Calibrated just above the average value for the rnn synthetic data. +MAX_GRAD_NORM = 200.0 +CELL_CLIP_VALUE = 5.0 +KEEP_PROB = 0.95 +TEMPORAL_SPIKE_JITTER_WIDTH = 0 +OUTPUT_DISTRIBUTION = 'poisson' # 'poisson' or 'gaussian' +NUM_STEPS_FOR_GEN_IC = MAX_INT # set to num_steps if greater than num_steps + +DATA_DIR = "/tmp/rnn_synth_data_v1.0/" +DATA_FILENAME_STEM = "chaotic_rnn_inputs_g1p5" +LFADS_SAVE_DIR = "/tmp/lfads_chaotic_rnn_inputs_g1p5/" +CO_DIM = 1 +DO_CAUSAL_CONTROLLER = False +DO_FEED_FACTORS_TO_CONTROLLER = True +CONTROLLER_INPUT_LAG = 1 +PRIOR_AR_AUTOCORRELATION = 10.0 +PRIOR_AR_PROCESS_VAR = 0.1 +DO_TRAIN_PRIOR_AR_ATAU = True +DO_TRAIN_PRIOR_AR_NVAR = True +CI_ENC_DIM = 128 +CON_DIM = 128 +CO_PRIOR_VAR_SCALE = 0.1 +KL_INCREASE_STEPS = 2000 +L2_INCREASE_STEPS = 2000 +L2_GEN_SCALE = 2000.0 +L2_CON_SCALE = 0.0 +# scale of regularizer on time correlation of inferred inputs +CO_MEAN_CORR_SCALE = 0.0 +KL_IC_WEIGHT = 1.0 +KL_CO_WEIGHT = 1.0 +KL_START_STEP = 0 +L2_START_STEP = 0 +IC_PRIOR_VAR_MIN = 0.1 +IC_PRIOR_VAR_SCALE = 0.1 +IC_PRIOR_VAR_MAX = 0.1 +IC_POST_VAR_MIN = 0.0001 # protection from KL blowing up + +flags = tf.app.flags +flags.DEFINE_string("kind", "train", + "Type of model to build {train, \ + posterior_sample_and_average, \ + posterior_push_mean, \ + prior_sample, write_model_params") +flags.DEFINE_string("output_dist", OUTPUT_DISTRIBUTION, + "Type of output distribution, 'poisson' or 'gaussian'") +flags.DEFINE_boolean("allow_gpu_growth", False, + "If true, only allocate amount of memory needed for \ + Session. Otherwise, use full GPU memory.") + +# DATA +flags.DEFINE_string("data_dir", DATA_DIR, "Data for training") +flags.DEFINE_string("data_filename_stem", DATA_FILENAME_STEM, + "Filename stem for data dictionaries.") +flags.DEFINE_string("lfads_save_dir", LFADS_SAVE_DIR, "model save dir") +flags.DEFINE_string("checkpoint_pb_load_name", CHECKPOINT_PB_LOAD_NAME, + "Name of checkpoint files, use 'checkpoint_lve' for best \ + error") +flags.DEFINE_string("checkpoint_name", CHECKPOINT_NAME, + "Name of checkpoint files (.ckpt appended)") +flags.DEFINE_string("output_filename_stem", OUTPUT_FILENAME_STEM, + "Name of output file (postfix will be added)") +flags.DEFINE_string("device", DEVICE, + "Which device to use (default: \"gpu:0\", can also be \ + \"cpu:0\", \"gpu:1\", etc)") +flags.DEFINE_string("csv_log", CSV_LOG, + "Name of file to keep running log of fit likelihoods, \ + etc (.csv appended)") +flags.DEFINE_integer("max_ckpt_to_keep", MAX_CKPT_TO_KEEP, + "Max # of checkpoints to keep (rolling)") +flags.DEFINE_integer("ps_nexamples_to_process", PS_NEXAMPLES_TO_PROCESS, + "Number of examples to process for posterior sample and \ + average (not number of samples to average over).") +flags.DEFINE_integer("max_ckpt_to_keep_lve", MAX_CKPT_TO_KEEP_LVE, + "Max # of checkpoints to keep for lowest validation error \ + models (rolling)") +flags.DEFINE_integer("ext_input_dim", EXT_INPUT_DIM, "Dimension of external \ +inputs") +flags.DEFINE_integer("num_steps_for_gen_ic", NUM_STEPS_FOR_GEN_IC, + "Number of steps to train the generator initial conditon.") + + +# If there are observed inputs, there are two ways to add that observed +# input to the model. The first is by treating as something to be +# inferred, and thus encoding the observed input via the encoders, and then +# input to the generator via the "inferred inputs" channel. Second, one +# can input the input directly into the generator. This has the downside +# of making the generation process strictly dependent on knowing the +# observed input for any generated trial. +flags.DEFINE_boolean("inject_ext_input_to_gen", + INJECT_EXT_INPUT_TO_GEN, + "Should observed inputs be input to model via encoders, \ + or injected directly into generator?") + +# CELL + +# The combined recurrent and input weights of the encoder and +# controller cells are by default set to scale at ws/sqrt(#inputs), +# with ws=1.0. You can change this scaling with this parameter. +flags.DEFINE_float("cell_weight_scale", CELL_WEIGHT_SCALE, + "Input scaling for input weights in generator.") + + +# GENERATION + +# Note that the dimension of the initial conditions is separated from the +# dimensions of the generator initial conditions (and a linear matrix will +# adapt the shapes if necessary). This is just another way to control +# complexity. In all likelihood, setting the ic dims to the size of the +# generator hidden state is just fine. +flags.DEFINE_integer("ic_dim", IC_DIM, "Dimension of h0") +# Setting the dimensions of the factors to something smaller than the data +# dimension is a way to get a reduced dimensionality representation of your +# data. +flags.DEFINE_integer("factors_dim", FACTORS_DIM, + "Number of factors from generator") +flags.DEFINE_integer("ic_enc_dim", IC_ENC_DIM, + "Cell hidden size, encoder of h0") + +# Controlling the size of the generator is one way to control complexity of +# the dynamics (there is also l2, which will squeeze out unnecessary +# dynamics also). The modern deep learning approach is to make these cells +# as large as tolerable (from a waiting perspective), and then regularize +# them to death with drop out or whatever. I don't know if this is correct +# for the LFADS application or not. +flags.DEFINE_integer("gen_dim", GEN_DIM, + "Cell hidden size, generator.") +# The weights of the generator cell by default set to scale at +# ws/sqrt(#inputs), with ws=1.0. You can change ws for +# the input weights or the recurrent weights with these hyperparameters. +flags.DEFINE_float("gen_cell_input_weight_scale", GEN_CELL_INPUT_WEIGHT_SCALE, + "Input scaling for input weights in generator.") +flags.DEFINE_float("gen_cell_rec_weight_scale", GEN_CELL_REC_WEIGHT_SCALE, + "Input scaling for rec weights in generator.") + +# KL DISTRIBUTIONS +# If you don't know what you are donig here, please leave alone, the +# defaults should be fine for most cases, irregardless of other parameters. +# +# If you don't want the prior variance to be learned, set the +# following values to the same thing: ic_prior_var_min, +# ic_prior_var_scale, ic_prior_var_max. The prior mean will be +# learned regardless. +flags.DEFINE_float("ic_prior_var_min", IC_PRIOR_VAR_MIN, + "Minimum variance in posterior h0 codes.") +flags.DEFINE_float("ic_prior_var_scale", IC_PRIOR_VAR_SCALE, + "Variance of ic prior distribution") +flags.DEFINE_float("ic_prior_var_max", IC_PRIOR_VAR_MAX, + "Maximum variance of IC prior distribution.") +# If you really want to limit the information from encoder to decoder, +# Increase ic_post_var_min above 0.0. +flags.DEFINE_float("ic_post_var_min", IC_POST_VAR_MIN, + "Minimum variance of IC posterior distribution.") +flags.DEFINE_float("co_prior_var_scale", CO_PRIOR_VAR_SCALE, + "Variance of control input prior distribution.") + + +flags.DEFINE_float("prior_ar_atau", PRIOR_AR_AUTOCORRELATION, + "Initial autocorrelation of AR(1) priors.") +flags.DEFINE_float("prior_ar_nvar", PRIOR_AR_PROCESS_VAR, + "Initial noise variance for AR(1) priors.") +flags.DEFINE_boolean("do_train_prior_ar_atau", DO_TRAIN_PRIOR_AR_ATAU, + "Is the value for atau an init, or the constant value?") +flags.DEFINE_boolean("do_train_prior_ar_nvar", DO_TRAIN_PRIOR_AR_NVAR, + "Is the value for noise variance an init, or the constant \ + value?") + +# CONTROLLER +# This parameter critically controls whether or not there is a controller +# (along with controller encoders placed into the LFADS graph. If CO_DIM > +# 1, that means there is a 1 dimensional controller outputs, if equal to 0, +# then no controller. +flags.DEFINE_integer("co_dim", CO_DIM, + "Number of control net outputs (>0 builds that graph).") + +# The controller will be more powerful if it can see the encoding of the entire +# trial. However, this allows the controller to create inferred inputs that are +# acausal with respect to the actual data generation process. E.g. the data +# generator could have an input at time t, but the controller, after seeing the +# entirety of the trial could infer that the input is coming a little before +# time t, because there are no restrictions on the data the controller sees. +# One can force the controller to be causal (with respect to perturbations in +# the data generator) so that it only sees forward encodings of the data at time +# t that originate at times before or at time t. One can also control the data +# the controller sees by using an input lag (forward encoding at time [t-tlag] +# for controller input at time t. The same can be done in the reverse direction +# (controller input at time t from reverse encoding at time [t+tlag], in the +# case of an acausal controller). Setting this lag > 0 (even lag=1) can be a +# powerful way of avoiding very spiky decodes. Finally, one can manually control +# whether the factors at time t-1 are fed to the controller at time t. +# +# If you don't care about any of this, and just want to smooth your data, set +# do_causal_controller = False +# do_feed_factors_to_controller = True +# causal_input_lag = 0 +flags.DEFINE_boolean("do_causal_controller", + DO_CAUSAL_CONTROLLER, + "Restrict the controller create only causal inferred \ + inputs?") +# Strictly speaking, feeding either the factors or the rates to the controller +# violates causality, since the g0 gets to see all the data. This may or may not +# be only a theoretical concern. +flags.DEFINE_boolean("do_feed_factors_to_controller", + DO_FEED_FACTORS_TO_CONTROLLER, + "Should factors[t-1] be input to controller at time t?") +flags.DEFINE_string("feedback_factors_or_rates", FEEDBACK_FACTORS_OR_RATES, + "Feedback the factors or the rates to the controller? \ + Acceptable values: 'factors' or 'rates'.") +flags.DEFINE_integer("controller_input_lag", CONTROLLER_INPUT_LAG, + "Time lag on the encoding to controller t-lag for \ + forward, t+lag for reverse.") + +flags.DEFINE_integer("ci_enc_dim", CI_ENC_DIM, + "Cell hidden size, encoder of control inputs") +flags.DEFINE_integer("con_dim", CON_DIM, + "Cell hidden size, controller") + + +# OPTIMIZATION +flags.DEFINE_integer("batch_size", BATCH_SIZE, + "Batch size to use during training.") +flags.DEFINE_float("learning_rate_init", LEARNING_RATE_INIT, + "Learning rate initial value") +flags.DEFINE_float("learning_rate_decay_factor", LEARNING_RATE_DECAY_FACTOR, + "Learning rate decay, decay by this fraction every so \ + often.") +flags.DEFINE_float("learning_rate_stop", LEARNING_RATE_STOP, + "The lr is adaptively reduced, stop training at this value.") +# Rather put the learning rate on an exponentially decreasiong schedule, +# the current algorithm pays attention to the learning rate, and if it +# isn't regularly decreasing, it will decrease the learning rate. So far, +# it works fine, though it is not perfect. +flags.DEFINE_integer("learning_rate_n_to_compare", LEARNING_RATE_N_TO_COMPARE, + "Number of previous costs current cost has to be worse \ + than, to lower learning rate.") + +# This sets a value, above which, the gradients will be clipped. This hp +# is extremely useful to avoid an infrequent, but highly pathological +# problem whereby the gradient is so large that it destroys the +# optimziation by setting parameters too large, leading to a vicious cycle +# that ends in NaNs. If it's too large, it's useless, if it's too small, +# it essentially becomes the learning rate. It's pretty insensitive, though. +flags.DEFINE_float("max_grad_norm", MAX_GRAD_NORM, + "Max norm of gradient before clipping.") + +# If your optimizations start "NaN-ing out", reduce this value so that +# the values of the network don't grow out of control. Typically, once +# this parameter is set to a reasonable value, one stops having numerical +# problems. +flags.DEFINE_float("cell_clip_value", CELL_CLIP_VALUE, + "Max value recurrent cell can take before being clipped.") + +# This flag is used for an experiment where one sees if training a model with +# many days data can be used to learn the dynamics from a held-out days data. +# If you don't care about that particular experiment, this flag should always be +# false. +flags.DEFINE_boolean("do_train_io_only", DO_TRAIN_IO_ONLY, + "Train only the input (readin) and output (readout) \ + affine functions.") + +# This flag is used for an experiment where one wants to know if the dynamics +# learned by the generator generalize across conditions. In that case, you might +# train up a model on one set of data, and then only further train the encoder +# on another set of data (the conditions to be tested) so that the model is +# forced to use the same dynamics to describe that data. If you don't care about +# that particular experiment, this flag should always be false. +flags.DEFINE_boolean("do_train_encoder_only", DO_TRAIN_ENCODER_ONLY, + "Train only the encoder weights.") + +flags.DEFINE_boolean("do_reset_learning_rate", DO_RESET_LEARNING_RATE, + "Reset the learning rate to initial value.") + + +# for multi-session "stitching" models, the per-session readin matrices map from +# neurons to input factors which are fed into the shared encoder. These are +# initialized by alignment_matrix_cxf and alignment_bias_c in the input .h5 +# files. They can be fixed or made trainable. +flags.DEFINE_boolean("do_train_readin", DO_TRAIN_READIN, "Whether to train the \ + readin matrices and bias vectors. False leaves them fixed \ + at their initial values specified by the alignment \ + matrices and vectors.") + + +# OVERFITTING +# Dropout is done on the input data, on controller inputs (from +# encoder), on outputs from generator to factors. +flags.DEFINE_float("keep_prob", KEEP_PROB, "Dropout keep probability.") +# It appears that the system will happily fit spikes (blessing or +# curse, depending). You may not want this. Jittering the spikes a +# bit will help (-/+ bin size, as specified here). +flags.DEFINE_integer("temporal_spike_jitter_width", + TEMPORAL_SPIKE_JITTER_WIDTH, + "Shuffle spikes around this window.") + +# General note about helping ascribe controller inputs vs dynamics: +# +# If controller is heavily penalized, then it won't have any output. +# If dynamics are heavily penalized, then generator won't make +# dynamics. Note this l2 penalty is only on the recurrent portion of +# the RNNs, as dropout is also available, penalizing the feed-forward +# connections. +flags.DEFINE_float("l2_gen_scale", L2_GEN_SCALE, + "L2 regularization cost for the generator only.") +flags.DEFINE_float("l2_con_scale", L2_CON_SCALE, + "L2 regularization cost for the controller only.") +flags.DEFINE_float("co_mean_corr_scale", CO_MEAN_CORR_SCALE, + "Cost of correlation (thru time)in the means of \ + controller output.") + +# UNDERFITTING +# If the primary task of LFADS is "filtering" of data and not +# generation, then it is possible that the KL penalty is too strong. +# Empirically, we have found this to be the case. So we add a +# hyperparameter in front of the the two KL terms (one for the initial +# conditions to the generator, the other for the controller outputs). +# You should always think of the the default values as 1.0, and that +# leads to a standard VAE formulation whereby the numbers that are +# optimized are a lower-bound on the log-likelihood of the data. When +# these 2 HPs deviate from 1.0, one cannot make any statement about +# what those LL lower bounds mean anymore, and they cannot be compared +# (AFAIK). +flags.DEFINE_float("kl_ic_weight", KL_IC_WEIGHT, + "Strength of KL weight on initial conditions KL penatly.") +flags.DEFINE_float("kl_co_weight", KL_CO_WEIGHT, + "Strength of KL weight on controller output KL penalty.") + +# Sometimes the task can be sufficiently hard to learn that the +# optimizer takes the 'easy route', and simply minimizes the KL +# divergence, setting it to near zero, and the optimization gets +# stuck. These two parameters will help avoid that by by getting the +# optimization to 'latch' on to the main optimization, and only +# turning in the regularizers later. +flags.DEFINE_integer("kl_start_step", KL_START_STEP, + "Start increasing weight after this many steps.") +# training passes, not epochs, increase by 0.5 every kl_increase_steps +flags.DEFINE_integer("kl_increase_steps", KL_INCREASE_STEPS, + "Increase weight of kl cost to avoid local minimum.") +# Same story for l2 regularizer. One wants a simple generator, for scientific +# reasons, but not at the expense of hosing the optimization. +flags.DEFINE_integer("l2_start_step", L2_START_STEP, + "Start increasing l2 weight after this many steps.") +flags.DEFINE_integer("l2_increase_steps", L2_INCREASE_STEPS, + "Increase weight of l2 cost to avoid local minimum.") + +FLAGS = flags.FLAGS + + +def build_model(hps, kind="train", datasets=None): + """Builds a model from either random initialization, or saved parameters. + + Args: + hps: The hyper parameters for the model. + kind: (optional) The kind of model to build. Training vs inference require + different graphs. + datasets: The datasets structure (see top of lfads.py). + + Returns: + an LFADS model. + """ + + build_kind = kind + if build_kind == "write_model_params": + build_kind = "train" + with tf.variable_scope("LFADS", reuse=None): + model = LFADS(hps, kind=build_kind, datasets=datasets) + + if not os.path.exists(hps.lfads_save_dir): + print("Save directory %s does not exist, creating it." % hps.lfads_save_dir) + os.makedirs(hps.lfads_save_dir) + + cp_pb_ln = hps.checkpoint_pb_load_name + cp_pb_ln = 'checkpoint' if cp_pb_ln == "" else cp_pb_ln + if cp_pb_ln == 'checkpoint': + print("Loading latest training checkpoint in: ", hps.lfads_save_dir) + saver = model.seso_saver + elif cp_pb_ln == 'checkpoint_lve': + print("Loading lowest validation checkpoint in: ", hps.lfads_save_dir) + saver = model.lve_saver + else: + print("Loading checkpoint: ", cp_pb_ln, ", in: ", hps.lfads_save_dir) + saver = model.seso_saver + + ckpt = tf.train.get_checkpoint_state(hps.lfads_save_dir, + latest_filename=cp_pb_ln) + + session = tf.get_default_session() + print("ckpt: ", ckpt) + if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): + print("Reading model parameters from %s" % ckpt.model_checkpoint_path) + saver.restore(session, ckpt.model_checkpoint_path) + else: + print("Created model with fresh parameters.") + if kind in ["posterior_sample_and_average", "posterior_push_mean", + "prior_sample", "write_model_params"]: + print("Possible error!!! You are running ", kind, " on a newly \ + initialized model!") + # cannot print ckpt.model_check_point path if no ckpt + print("Are you sure you sure a checkpoint in ", hps.lfads_save_dir, + " exists?") + + tf.global_variables_initializer().run() + + if ckpt: + train_step_str = re.search('-[0-9]+$', ckpt.model_checkpoint_path).group() + else: + train_step_str = '-0' + + fname = 'hyperparameters' + train_step_str + '.txt' + hp_fname = os.path.join(hps.lfads_save_dir, fname) + hps_for_saving = jsonify_dict(hps) + utils.write_data(hp_fname, hps_for_saving, use_json=True) + + return model + + +def jsonify_dict(d): + """Turns python booleans into strings so hps dict can be written in json. + Creates a shallow-copied dictionary first, then accomplishes string + conversion. + + Args: + d: hyperparameter dictionary + + Returns: hyperparameter dictionary with bool's as strings + """ + + d2 = d.copy() # shallow copy is fine by assumption of d being shallow + def jsonify_bool(boolean_value): + if boolean_value: + return "true" + else: + return "false" + + for key in d2.keys(): + if isinstance(d2[key], bool): + d2[key] = jsonify_bool(d2[key]) + return d2 + + +def build_hyperparameter_dict(flags): + """Simple script for saving hyper parameters. Under the hood the + flags structure isn't a dictionary, so it has to be simplified since we + want to be able to view file as text. + + Args: + flags: From tf.app.flags + + Returns: + dictionary of hyper parameters (ignoring other flag types). + """ + d = {} + # Data + d['output_dist'] = flags.output_dist + d['data_dir'] = flags.data_dir + d['lfads_save_dir'] = flags.lfads_save_dir + d['checkpoint_pb_load_name'] = flags.checkpoint_pb_load_name + d['checkpoint_name'] = flags.checkpoint_name + d['output_filename_stem'] = flags.output_filename_stem + d['max_ckpt_to_keep'] = flags.max_ckpt_to_keep + d['max_ckpt_to_keep_lve'] = flags.max_ckpt_to_keep_lve + d['ps_nexamples_to_process'] = flags.ps_nexamples_to_process + d['ext_input_dim'] = flags.ext_input_dim + d['data_filename_stem'] = flags.data_filename_stem + d['device'] = flags.device + d['csv_log'] = flags.csv_log + d['num_steps_for_gen_ic'] = flags.num_steps_for_gen_ic + d['inject_ext_input_to_gen'] = flags.inject_ext_input_to_gen + # Cell + d['cell_weight_scale'] = flags.cell_weight_scale + # Generation + d['ic_dim'] = flags.ic_dim + d['factors_dim'] = flags.factors_dim + d['ic_enc_dim'] = flags.ic_enc_dim + d['gen_dim'] = flags.gen_dim + d['gen_cell_input_weight_scale'] = flags.gen_cell_input_weight_scale + d['gen_cell_rec_weight_scale'] = flags.gen_cell_rec_weight_scale + # KL distributions + d['ic_prior_var_min'] = flags.ic_prior_var_min + d['ic_prior_var_scale'] = flags.ic_prior_var_scale + d['ic_prior_var_max'] = flags.ic_prior_var_max + d['ic_post_var_min'] = flags.ic_post_var_min + d['co_prior_var_scale'] = flags.co_prior_var_scale + d['prior_ar_atau'] = flags.prior_ar_atau + d['prior_ar_nvar'] = flags.prior_ar_nvar + d['do_train_prior_ar_atau'] = flags.do_train_prior_ar_atau + d['do_train_prior_ar_nvar'] = flags.do_train_prior_ar_nvar + # Controller + d['do_causal_controller'] = flags.do_causal_controller + d['controller_input_lag'] = flags.controller_input_lag + d['do_feed_factors_to_controller'] = flags.do_feed_factors_to_controller + d['feedback_factors_or_rates'] = flags.feedback_factors_or_rates + d['co_dim'] = flags.co_dim + d['ci_enc_dim'] = flags.ci_enc_dim + d['con_dim'] = flags.con_dim + d['co_mean_corr_scale'] = flags.co_mean_corr_scale + # Optimization + d['batch_size'] = flags.batch_size + d['learning_rate_init'] = flags.learning_rate_init + d['learning_rate_decay_factor'] = flags.learning_rate_decay_factor + d['learning_rate_stop'] = flags.learning_rate_stop + d['learning_rate_n_to_compare'] = flags.learning_rate_n_to_compare + d['max_grad_norm'] = flags.max_grad_norm + d['cell_clip_value'] = flags.cell_clip_value + d['do_train_io_only'] = flags.do_train_io_only + d['do_train_encoder_only'] = flags.do_train_encoder_only + d['do_reset_learning_rate'] = flags.do_reset_learning_rate + d['do_train_readin'] = flags.do_train_readin + + # Overfitting + d['keep_prob'] = flags.keep_prob + d['temporal_spike_jitter_width'] = flags.temporal_spike_jitter_width + d['l2_gen_scale'] = flags.l2_gen_scale + d['l2_con_scale'] = flags.l2_con_scale + # Underfitting + d['kl_ic_weight'] = flags.kl_ic_weight + d['kl_co_weight'] = flags.kl_co_weight + d['kl_start_step'] = flags.kl_start_step + d['kl_increase_steps'] = flags.kl_increase_steps + d['l2_start_step'] = flags.l2_start_step + d['l2_increase_steps'] = flags.l2_increase_steps + d['_clip_value'] = 80 # bounds the tf.exp to avoid INF + + return d + + +class hps_dict_to_obj(dict): + """Helper class allowing us to access hps dictionary more easily.""" + + def __getattr__(self, key): + if key in self: + return self[key] + else: + assert False, ("%s does not exist." % key) + def __setattr__(self, key, value): + self[key] = value + + +def train(hps, datasets): + """Train the LFADS model. + + Args: + hps: The dictionary of hyperparameters. + datasets: A dictionary of data dictionaries. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + """ + model = build_model(hps, kind="train", datasets=datasets) + if hps.do_reset_learning_rate: + sess = tf.get_default_session() + sess.run(model.learning_rate.initializer) + + model.train_model(datasets) + + +def write_model_runs(hps, datasets, output_fname=None, push_mean=False): + """Run the model on the data in data_dict, and save the computed values. + + LFADS generates a number of outputs for each examples, and these are all + saved. They are: + The mean and variance of the prior of g0. + The mean and variance of approximate posterior of g0. + The control inputs (if enabled) + The initial conditions, g0, for all examples. + The generator states for all time. + The factors for all time. + The rates for all time. + + Args: + hps: The dictionary of hyperparameters. + datasets: A dictionary of data dictionaries. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + output_fname (optional): output filename stem to write the model runs. + push_mean: if False (default), generates batch_size samples for each trial + and averages the results. if True, runs each trial once without noise, + pushing the posterior mean initial conditions and control inputs through + the trained model. False is used for posterior_sample_and_average, True + is used for posterior_push_mean. + """ + model = build_model(hps, kind=hps.kind, datasets=datasets) + model.write_model_runs(datasets, output_fname, push_mean) + + +def write_model_samples(hps, datasets, dataset_name=None, output_fname=None): + """Use the prior distribution to generate samples from the model. + Generates batch_size number of samples (set through FLAGS). + + LFADS generates a number of outputs for each examples, and these are all + saved. They are: + The mean and variance of the prior of g0. + The control inputs (if enabled) + The initial conditions, g0, for all examples. + The generator states for all time. + The factors for all time. + The output distribution parameters (e.g. rates) for all time. + + Args: + hps: The dictionary of hyperparameters. + datasets: A dictionary of data dictionaries. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + dataset_name: The name of the dataset to grab the factors -> rates + alignment matrices from. Only a concern with models trained on + multi-session data. By default, uses the first dataset in the data dict. + output_fname: The name prefix of the file in which to save the generated + samples. + """ + if not output_fname: + output_fname = "model_runs_" + hps.kind + else: + output_fname = output_fname + "model_runs_" + hps.kind + if not dataset_name: + dataset_name = datasets.keys()[0] + else: + if dataset_name not in datasets.keys(): + raise ValueError("Invalid dataset name '%s'."%(dataset_name)) + model = build_model(hps, kind=hps.kind, datasets=datasets) + model.write_model_samples(dataset_name, output_fname) + + +def write_model_parameters(hps, output_fname=None, datasets=None): + """Save all the model parameters + + Save all the parameters to hps.lfads_save_dir. + + Args: + hps: The dictionary of hyperparameters. + output_fname: The prefix of the file in which to save the generated + samples. + datasets: A dictionary of data dictionaries. The dataset dict is simply a + name(string)-> data dictionary mapping (See top of lfads.py). + """ + if not output_fname: + output_fname = "model_params" + else: + output_fname = output_fname + "_model_params" + fname = os.path.join(hps.lfads_save_dir, output_fname) + print("Writing model parameters to: ", fname) + # save the optimizer params as well + model = build_model(hps, kind="write_model_params", datasets=datasets) + model_params = model.eval_model_parameters(use_nested=False, + include_strs="LFADS") + utils.write_data(fname, model_params, compression=None) + print("Done.") + + +def clean_data_dict(data_dict): + """Add some key/value pairs to the data dict, if they are missing. + Args: + data_dict - dictionary containing data for LFADS + Returns: + data_dict with some keys filled in, if they are absent. + """ + + keys = ['train_truth', 'train_ext_input', 'valid_data', + 'valid_truth', 'valid_ext_input', 'valid_train'] + for k in keys: + if k not in data_dict: + data_dict[k] = None + + return data_dict + + +def load_datasets(data_dir, data_filename_stem): + """Load the datasets from a specified directory. + + Example files look like + >data_dir/my_dataset_first_day + >data_dir/my_dataset_second_day + + If my_dataset (filename) stem is in the directory, the read routine will try + and load it. The datasets dictionary will then look like + dataset['first_day'] -> (first day data dictionary) + dataset['second_day'] -> (first day data dictionary) + + Args: + data_dir: The directory from which to load the datasets. + data_filename_stem: The stem of the filename for the datasets. + + Returns: + datasets: a dataset dictionary, with one name->data dictionary pair for + each dataset file. + """ + print("Reading data from ", data_dir) + datasets = utils.read_datasets(data_dir, data_filename_stem) + for k, data_dict in datasets.items(): + datasets[k] = clean_data_dict(data_dict) + + train_total_size = len(data_dict['train_data']) + if train_total_size == 0: + print("Did not load training set.") + else: + print("Found training set with number examples: ", train_total_size) + + valid_total_size = len(data_dict['valid_data']) + if valid_total_size == 0: + print("Did not load validation set.") + else: + print("Found validation set with number examples: ", valid_total_size) + + return datasets + + +def main(_): + """Get this whole shindig off the ground.""" + d = build_hyperparameter_dict(FLAGS) + hps = hps_dict_to_obj(d) # hyper parameters + kind = FLAGS.kind + + # Read the data, if necessary. + train_set = valid_set = None + if kind in ["train", "posterior_sample_and_average", "posterior_push_mean", + "prior_sample", "write_model_params"]: + datasets = load_datasets(hps.data_dir, hps.data_filename_stem) + else: + raise ValueError('Kind {} is not supported.'.format(kind)) + + # infer the dataset names and dataset dimensions from the loaded files + hps.kind = kind # needs to be added here, cuz not saved as hyperparam + hps.dataset_names = [] + hps.dataset_dims = {} + for key in datasets: + hps.dataset_names.append(key) + hps.dataset_dims[key] = datasets[key]['data_dim'] + + # also store down the dimensionality of the data + # - just pull from one set, required to be same for all sets + hps.num_steps = datasets.values()[0]['num_steps'] + hps.ndatasets = len(hps.dataset_names) + + if hps.num_steps_for_gen_ic > hps.num_steps: + hps.num_steps_for_gen_ic = hps.num_steps + + # Build and run the model, for varying purposes. + config = tf.ConfigProto(allow_soft_placement=True, + log_device_placement=False) + if FLAGS.allow_gpu_growth: + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + with sess.as_default(): + with tf.device(hps.device): + if kind == "train": + train(hps, datasets) + elif kind == "posterior_sample_and_average": + write_model_runs(hps, datasets, hps.output_filename_stem, + push_mean=False) + elif kind == "posterior_push_mean": + write_model_runs(hps, datasets, hps.output_filename_stem, + push_mean=True) + elif kind == "prior_sample": + write_model_samples(hps, datasets, hps.output_filename_stem) + elif kind == "write_model_params": + write_model_parameters(hps, hps.output_filename_stem, datasets) + else: + assert False, ("Kind %s is not implemented. " % kind) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/lfads/synth_data/generate_chaotic_rnn_data.py b/models/research/lfads/synth_data/generate_chaotic_rnn_data.py new file mode 100644 index 0000000000000000000000000000000000000000..3de72e58b2208eacf508e6048d3fb6d66bf2e167 --- /dev/null +++ b/models/research/lfads/synth_data/generate_chaotic_rnn_data.py @@ -0,0 +1,200 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import print_function + +import h5py +import numpy as np +import os +import tensorflow as tf # used for flags here + +from utils import write_datasets +from synthetic_data_utils import add_alignment_projections, generate_data +from synthetic_data_utils import generate_rnn, get_train_n_valid_inds +from synthetic_data_utils import nparray_and_transpose +from synthetic_data_utils import spikify_data, gaussify_data, split_list_by_inds +import matplotlib +import matplotlib.pyplot as plt +import scipy.signal + +matplotlib.rcParams['image.interpolation'] = 'nearest' +DATA_DIR = "rnn_synth_data_v1.0" + +flags = tf.app.flags +flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/", + "Directory for saving data.") +flags.DEFINE_string("datafile_name", "thits_data", + "Name of data file for input case.") +flags.DEFINE_string("noise_type", "poisson", "Noise type for data.") +flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.") +flags.DEFINE_float("T", 1.0, "Time in seconds to generate.") +flags.DEFINE_integer("C", 100, "Number of conditions") +flags.DEFINE_integer("N", 50, "Number of units for the RNN") +flags.DEFINE_integer("S", 50, "Number of sampled units from RNN") +flags.DEFINE_integer("npcs", 10, "Number of PCS for multi-session case.") +flags.DEFINE_float("train_percentage", 4.0/5.0, + "Percentage of train vs validation trials") +flags.DEFINE_integer("nreplications", 40, + "Number of noise replications of the same underlying rates.") +flags.DEFINE_float("g", 1.5, "Complexity of dynamics") +flags.DEFINE_float("x0_std", 1.0, + "Volume from which to pull initial conditions (affects diversity of dynamics.") +flags.DEFINE_float("tau", 0.025, "Time constant of RNN") +flags.DEFINE_float("dt", 0.010, "Time bin") +flags.DEFINE_float("input_magnitude", 20.0, + "For the input case, what is the value of the input?") +flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second") +FLAGS = flags.FLAGS + + +# Note that with N small, (as it is 25 above), the finite size effects +# will have pretty dramatic effects on the dynamics of the random RNN. +# If you want more complex dynamics, you'll have to run the script a +# lot, or increase N (or g). + +# Getting hard vs. easy data can be a little stochastic, so we set the seed. + +# Pull out some commonly used parameters. +# These are user parameters (configuration) +rng = np.random.RandomState(seed=FLAGS.synth_data_seed) +T = FLAGS.T +C = FLAGS.C +N = FLAGS.N +S = FLAGS.S +input_magnitude = FLAGS.input_magnitude +nreplications = FLAGS.nreplications +E = nreplications * C # total number of trials +# S is the number of measurements in each datasets, w/ each +# dataset having a different set of observations. +ndatasets = N/S # ok if rounded down +train_percentage = FLAGS.train_percentage +ntime_steps = int(T / FLAGS.dt) +# End of user parameters + +rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate) + +# Check to make sure the RNN is the one we used in the paper. +if N == 50: + assert abs(rnn['W'][0,0] - 0.06239899) < 1e-8, 'Error in random seed?' + rem_check = nreplications * train_percentage + assert abs(rem_check - int(rem_check)) < 1e-8, \ + 'Train percentage * nreplications should be integral number.' + + +# Initial condition generation, and condition label generation. This +# happens outside of the dataset loop, so that all datasets have the +# same conditions, which is similar to a neurophys setup. +condition_number = 0 +x0s = [] +condition_labels = [] +for c in range(C): + x0 = FLAGS.x0_std * rng.randn(N, 1) + x0s.append(np.tile(x0, nreplications)) # replicate x0 nreplications times + # replicate the condition label nreplications times + for ns in range(nreplications): + condition_labels.append(condition_number) + condition_number += 1 +x0s = np.concatenate(x0s, axis=1) + +# Containers for storing data across data. +datasets = {} +for n in range(ndatasets): + print(n+1, " of ", ndatasets) + + # First generate all firing rates. in the next loop, generate all + # replications this allows the random state for rate generation to be + # independent of n_replications. + dataset_name = 'dataset_N' + str(N) + '_S' + str(S) + if S < N: + dataset_name += '_n' + str(n+1) + + # Sample neuron subsets. The assumption is the PC axes of the RNN + # are not unit aligned, so sampling units is adequate to sample all + # the high-variance PCs. + P_sxn = np.eye(S,N) + for m in range(n): + P_sxn = np.roll(P_sxn, S, axis=1) + + if input_magnitude > 0.0: + # time of "hits" randomly chosen between [1/4 and 3/4] of total time + input_times = rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4) + else: + input_times = None + + rates, x0s, inputs = \ + generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn, + input_magnitude=input_magnitude, + input_times=input_times) + + if FLAGS.noise_type == "poisson": + noisy_data = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate']) + elif FLAGS.noise_type == "gaussian": + noisy_data = gaussify_data(rates, rng, rnn['dt'], rnn['max_firing_rate']) + else: + raise ValueError("Only noise types supported are poisson or gaussian") + + # split into train and validation sets + train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, + nreplications) + + # Split the data, inputs, labels and times into train vs. validation. + rates_train, rates_valid = \ + split_list_by_inds(rates, train_inds, valid_inds) + noisy_data_train, noisy_data_valid = \ + split_list_by_inds(noisy_data, train_inds, valid_inds) + input_train, inputs_valid = \ + split_list_by_inds(inputs, train_inds, valid_inds) + condition_labels_train, condition_labels_valid = \ + split_list_by_inds(condition_labels, train_inds, valid_inds) + input_times_train, input_times_valid = \ + split_list_by_inds(input_times, train_inds, valid_inds) + + # Turn rates, noisy_data, and input into numpy arrays. + rates_train = nparray_and_transpose(rates_train) + rates_valid = nparray_and_transpose(rates_valid) + noisy_data_train = nparray_and_transpose(noisy_data_train) + noisy_data_valid = nparray_and_transpose(noisy_data_valid) + input_train = nparray_and_transpose(input_train) + inputs_valid = nparray_and_transpose(inputs_valid) + + # Note that we put these 'truth' rates and input into this + # structure, the only data that is used in LFADS are the noisy + # data e.g. spike trains. The rest is either for printing or posterity. + data = {'train_truth': rates_train, + 'valid_truth': rates_valid, + 'input_train_truth' : input_train, + 'input_valid_truth' : inputs_valid, + 'train_data' : noisy_data_train, + 'valid_data' : noisy_data_valid, + 'train_percentage' : train_percentage, + 'nreplications' : nreplications, + 'dt' : rnn['dt'], + 'input_magnitude' : input_magnitude, + 'input_times_train' : input_times_train, + 'input_times_valid' : input_times_valid, + 'P_sxn' : P_sxn, + 'condition_labels_train' : condition_labels_train, + 'condition_labels_valid' : condition_labels_valid, + 'conversion_factor': 1.0 / rnn['conversion_factor']} + datasets[dataset_name] = data + +if S < N: + # Note that this isn't necessary for this synthetic example, but + # it's useful to see how the input factor matrices were initialized + # for actual neurophysiology data. + datasets = add_alignment_projections(datasets, npcs=FLAGS.npcs) + +# Write out the datasets. +write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets) diff --git a/models/research/lfads/synth_data/generate_itb_data.py b/models/research/lfads/synth_data/generate_itb_data.py new file mode 100644 index 0000000000000000000000000000000000000000..66bc45d02e962915eb4be09d41da3162763ad40c --- /dev/null +++ b/models/research/lfads/synth_data/generate_itb_data.py @@ -0,0 +1,209 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import print_function + +import h5py +import numpy as np +import os +from six.moves import xrange +import tensorflow as tf + +from utils import write_datasets +from synthetic_data_utils import normalize_rates +from synthetic_data_utils import get_train_n_valid_inds, nparray_and_transpose +from synthetic_data_utils import spikify_data, split_list_by_inds + +DATA_DIR = "rnn_synth_data_v1.0" + +flags = tf.app.flags +flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/", + "Directory for saving data.") +flags.DEFINE_string("datafile_name", "itb_rnn", + "Name of data file for input case.") +flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.") +flags.DEFINE_float("T", 1.0, "Time in seconds to generate.") +flags.DEFINE_integer("C", 800, "Number of conditions") +flags.DEFINE_integer("N", 50, "Number of units for the RNN") +flags.DEFINE_float("train_percentage", 4.0/5.0, + "Percentage of train vs validation trials") +flags.DEFINE_integer("nreplications", 5, + "Number of spikifications of the same underlying rates.") +flags.DEFINE_float("tau", 0.025, "Time constant of RNN") +flags.DEFINE_float("dt", 0.010, "Time bin") +flags.DEFINE_float("max_firing_rate", 30.0, + "Map 1.0 of RNN to a spikes per second") +flags.DEFINE_float("u_std", 0.25, + "Std dev of input to integration to bound model") +flags.DEFINE_string("checkpoint_path", "SAMPLE_CHECKPOINT", + """Path to directory with checkpoints of model + trained on integration to bound task. Currently this + is a placeholder which tells the code to grab the + checkpoint that is provided with the code + (in /trained_itb/..). If you have your own checkpoint + you would like to restore, you would point it to + that path.""") +FLAGS = flags.FLAGS + + +class IntegrationToBoundModel: + def __init__(self, N): + scale = 0.8 / float(N**0.5) + self.N = N + self.Wh_nxn = tf.Variable(tf.random_normal([N, N], stddev=scale)) + self.b_1xn = tf.Variable(tf.zeros([1, N])) + self.Bu_1xn = tf.Variable(tf.zeros([1, N])) + self.Wro_nxo = tf.Variable(tf.random_normal([N, 1], stddev=scale)) + self.bro_o = tf.Variable(tf.zeros([1])) + + def call(self, h_tm1_bxn, u_bx1): + act_t_bxn = tf.matmul(h_tm1_bxn, self.Wh_nxn) + self.b_1xn + u_bx1 * self.Bu_1xn + h_t_bxn = tf.nn.tanh(act_t_bxn) + z_t = tf.nn.xw_plus_b(h_t_bxn, self.Wro_nxo, self.bro_o) + return z_t, h_t_bxn + +def get_data_batch(batch_size, T, rng, u_std): + u_bxt = rng.randn(batch_size, T) * u_std + running_sum_b = np.zeros([batch_size]) + labels_bxt = np.zeros([batch_size, T]) + for t in xrange(T): + running_sum_b += u_bxt[:, t] + labels_bxt[:, t] += running_sum_b + labels_bxt = np.clip(labels_bxt, -1, 1) + return u_bxt, labels_bxt + + +rng = np.random.RandomState(seed=FLAGS.synth_data_seed) +u_rng = np.random.RandomState(seed=FLAGS.synth_data_seed+1) +T = FLAGS.T +C = FLAGS.C +N = FLAGS.N # must be same N as in trained model (provided example is N = 50) +nreplications = FLAGS.nreplications +E = nreplications * C # total number of trials +train_percentage = FLAGS.train_percentage +ntimesteps = int(T / FLAGS.dt) +batch_size = 1 # gives one example per ntrial + +model = IntegrationToBoundModel(N) +inputs_ph_t = [tf.placeholder(tf.float32, + shape=[None, 1]) for _ in range(ntimesteps)] +state = tf.zeros([batch_size, N]) +saver = tf.train.Saver() + +P_nxn = rng.randn(N,N) / np.sqrt(N) # random projections + +# unroll RNN for T timesteps +outputs_t = [] +states_t = [] + +for inp in inputs_ph_t: + output, state = model.call(state, inp) + outputs_t.append(output) + states_t.append(state) + +with tf.Session() as sess: + # restore the latest model ckpt + if FLAGS.checkpoint_path == "SAMPLE_CHECKPOINT": + dir_path = os.path.dirname(os.path.realpath(__file__)) + model_checkpoint_path = os.path.join(dir_path, "trained_itb/model-65000") + else: + model_checkpoint_path = FLAGS.checkpoint_path + try: + saver.restore(sess, model_checkpoint_path) + print ('Model restored from', model_checkpoint_path) + except: + assert False, ("No checkpoints to restore from, is the path %s correct?" + %model_checkpoint_path) + + # generate data for trials + data_e = [] + u_e = [] + outs_e = [] + for c in range(C): + u_1xt, outs_1xt = get_data_batch(batch_size, ntimesteps, u_rng, FLAGS.u_std) + + feed_dict = {} + for t in xrange(ntimesteps): + feed_dict[inputs_ph_t[t]] = np.reshape(u_1xt[:,t], (batch_size,-1)) + + states_t_bxn, outputs_t_bxn = sess.run([states_t, outputs_t], + feed_dict=feed_dict) + states_nxt = np.transpose(np.squeeze(np.asarray(states_t_bxn))) + outputs_t_bxn = np.squeeze(np.asarray(outputs_t_bxn)) + r_sxt = np.dot(P_nxn, states_nxt) + + for s in xrange(nreplications): + data_e.append(r_sxt) + u_e.append(u_1xt) + outs_e.append(outputs_t_bxn) + + truth_data_e = normalize_rates(data_e, E, N) + +spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt, + max_firing_rate=FLAGS.max_firing_rate) +train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, + nreplications) + +data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e, + train_inds, + valid_inds) +data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e, + train_inds, + valid_inds) + +data_train_truth = nparray_and_transpose(data_train_truth) +data_valid_truth = nparray_and_transpose(data_valid_truth) +data_train_spiking = nparray_and_transpose(data_train_spiking) +data_valid_spiking = nparray_and_transpose(data_valid_spiking) + +# save down the inputs used to generate this data +train_inputs_u, valid_inputs_u = split_list_by_inds(u_e, + train_inds, + valid_inds) +train_inputs_u = nparray_and_transpose(train_inputs_u) +valid_inputs_u = nparray_and_transpose(valid_inputs_u) + +# save down the network outputs (may be useful later) +train_outputs_u, valid_outputs_u = split_list_by_inds(outs_e, + train_inds, + valid_inds) +train_outputs_u = np.array(train_outputs_u) +valid_outputs_u = np.array(valid_outputs_u) + + +data = { 'train_truth': data_train_truth, + 'valid_truth': data_valid_truth, + 'train_data' : data_train_spiking, + 'valid_data' : data_valid_spiking, + 'train_percentage' : train_percentage, + 'nreplications' : nreplications, + 'dt' : FLAGS.dt, + 'u_std' : FLAGS.u_std, + 'max_firing_rate': FLAGS.max_firing_rate, + 'train_inputs_u': train_inputs_u, + 'valid_inputs_u': valid_inputs_u, + 'train_outputs_u': train_outputs_u, + 'valid_outputs_u': valid_outputs_u, + 'conversion_factor' : FLAGS.max_firing_rate/(1.0/FLAGS.dt) } + +# just one dataset here +datasets = {} +dataset_name = 'dataset_N' + str(N) +datasets[dataset_name] = data + +# write out the dataset +write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets) +print ('Saved to ', os.path.join(FLAGS.save_dir, + FLAGS.datafile_name + '_' + dataset_name)) diff --git a/models/research/lfads/synth_data/generate_labeled_rnn_data.py b/models/research/lfads/synth_data/generate_labeled_rnn_data.py new file mode 100644 index 0000000000000000000000000000000000000000..0695585486534428c77e328e7ee1de755292d6c0 --- /dev/null +++ b/models/research/lfads/synth_data/generate_labeled_rnn_data.py @@ -0,0 +1,147 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import print_function + +import os +import h5py +import numpy as np +from six.moves import xrange + +from synthetic_data_utils import generate_data, generate_rnn +from synthetic_data_utils import get_train_n_valid_inds +from synthetic_data_utils import nparray_and_transpose +from synthetic_data_utils import spikify_data, split_list_by_inds +import tensorflow as tf +from utils import write_datasets + +DATA_DIR = "rnn_synth_data_v1.0" + +flags = tf.app.flags +flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/", + "Directory for saving data.") +flags.DEFINE_string("datafile_name", "conditioned_rnn_data", + "Name of data file for input case.") +flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.") +flags.DEFINE_float("T", 1.0, "Time in seconds to generate.") +flags.DEFINE_integer("C", 400, "Number of conditions") +flags.DEFINE_integer("N", 50, "Number of units for the RNN") +flags.DEFINE_float("train_percentage", 4.0/5.0, + "Percentage of train vs validation trials") +flags.DEFINE_integer("nreplications", 10, + "Number of spikifications of the same underlying rates.") +flags.DEFINE_float("g", 1.5, "Complexity of dynamics") +flags.DEFINE_float("x0_std", 1.0, + "Volume from which to pull initial conditions (affects diversity of dynamics.") +flags.DEFINE_float("tau", 0.025, "Time constant of RNN") +flags.DEFINE_float("dt", 0.010, "Time bin") +flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second") +FLAGS = flags.FLAGS + +rng = np.random.RandomState(seed=FLAGS.synth_data_seed) +rnn_rngs = [np.random.RandomState(seed=FLAGS.synth_data_seed+1), + np.random.RandomState(seed=FLAGS.synth_data_seed+2)] +T = FLAGS.T +C = FLAGS.C +N = FLAGS.N +nreplications = FLAGS.nreplications +E = nreplications * C +train_percentage = FLAGS.train_percentage +ntimesteps = int(T / FLAGS.dt) + +rnn_a = generate_rnn(rnn_rngs[0], N, FLAGS.g, FLAGS.tau, FLAGS.dt, + FLAGS.max_firing_rate) +rnn_b = generate_rnn(rnn_rngs[1], N, FLAGS.g, FLAGS.tau, FLAGS.dt, + FLAGS.max_firing_rate) +rnns = [rnn_a, rnn_b] + +# pick which RNN is used on each trial +rnn_to_use = rng.randint(2, size=E) +ext_input = np.repeat(np.expand_dims(rnn_to_use, axis=1), ntimesteps, axis=1) +ext_input = np.expand_dims(ext_input, axis=2) # these are "a's" in the paper + +x0s = [] +condition_labels = [] +condition_number = 0 +for c in range(C): + x0 = FLAGS.x0_std * rng.randn(N, 1) + x0s.append(np.tile(x0, nreplications)) + for ns in range(nreplications): + condition_labels.append(condition_number) + condition_number += 1 +x0s = np.concatenate(x0s, axis=1) + +P_nxn = rng.randn(N, N) / np.sqrt(N) + +# generate trials for both RNNs +rates_a, x0s_a, _ = generate_data(rnn_a, T=T, E=E, x0s=x0s, P_sxn=P_nxn, + input_magnitude=0.0, input_times=None) +spikes_a = spikify_data(rates_a, rng, rnn_a['dt'], rnn_a['max_firing_rate']) + +rates_b, x0s_b, _ = generate_data(rnn_b, T=T, E=E, x0s=x0s, P_sxn=P_nxn, + input_magnitude=0.0, input_times=None) +spikes_b = spikify_data(rates_b, rng, rnn_b['dt'], rnn_b['max_firing_rate']) + +# not the best way to do this but E is small enough +rates = [] +spikes = [] +for trial in xrange(E): + if rnn_to_use[trial] == 0: + rates.append(rates_a[trial]) + spikes.append(spikes_a[trial]) + else: + rates.append(rates_b[trial]) + spikes.append(spikes_b[trial]) + +# split into train and validation sets +train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, + nreplications) + +rates_train, rates_valid = split_list_by_inds(rates, train_inds, valid_inds) +spikes_train, spikes_valid = split_list_by_inds(spikes, train_inds, valid_inds) +condition_labels_train, condition_labels_valid = split_list_by_inds( + condition_labels, train_inds, valid_inds) +ext_input_train, ext_input_valid = split_list_by_inds( + ext_input, train_inds, valid_inds) + +rates_train = nparray_and_transpose(rates_train) +rates_valid = nparray_and_transpose(rates_valid) +spikes_train = nparray_and_transpose(spikes_train) +spikes_valid = nparray_and_transpose(spikes_valid) + +# add train_ext_input and valid_ext input +data = {'train_truth': rates_train, + 'valid_truth': rates_valid, + 'train_data' : spikes_train, + 'valid_data' : spikes_valid, + 'train_ext_input' : np.array(ext_input_train), + 'valid_ext_input': np.array(ext_input_valid), + 'train_percentage' : train_percentage, + 'nreplications' : nreplications, + 'dt' : FLAGS.dt, + 'P_sxn' : P_nxn, + 'condition_labels_train' : condition_labels_train, + 'condition_labels_valid' : condition_labels_valid, + 'conversion_factor': 1.0 / rnn_a['conversion_factor']} + +# just one dataset here +datasets = {} +dataset_name = 'dataset_N' + str(N) +datasets[dataset_name] = data + +# write out the dataset +write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets) +print ('Saved to ', os.path.join(FLAGS.save_dir, + FLAGS.datafile_name + '_' + dataset_name)) diff --git a/models/research/lfads/synth_data/run_generate_synth_data.sh b/models/research/lfads/synth_data/run_generate_synth_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ebc8ce2e5eec1e21fd839db18f247b38ebfde38 --- /dev/null +++ b/models/research/lfads/synth_data/run_generate_synth_data.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== + +SYNTH_PATH=/tmp/rnn_synth_data_v1.0/ + + echo "Generating chaotic rnn data with no input pulses (g=1.5) with spiking noise" + python generate_chaotic_rnn_data.py --save_dir=$SYNTH_PATH --datafile_name=chaotic_rnn_no_inputs --synth_data_seed=5 --T=1.0 --C=400 --N=50 --S=50 --train_percentage=0.8 --nreplications=10 --g=1.5 --x0_std=1.0 --tau=0.025 --dt=0.01 --input_magnitude=0.0 --max_firing_rate=30.0 --noise_type='poisson' + +echo "Generating chaotic rnn data with no input pulses (g=1.5) with Gaussian noise" +python generate_chaotic_rnn_data.py --save_dir=$SYNTH_PATH --datafile_name=gaussian_chaotic_rnn_no_inputs --synth_data_seed=5 --T=1.0 --C=400 --N=50 --S=50 --train_percentage=0.8 --nreplications=10 --g=1.5 --x0_std=1.0 --tau=0.025 --dt=0.01 --input_magnitude=0.0 --max_firing_rate=30.0 --noise_type='gaussian' + + echo "Generating chaotic rnn data with input pulses (g=1.5)" + python generate_chaotic_rnn_data.py --save_dir=$SYNTH_PATH --datafile_name=chaotic_rnn_inputs_g1p5 --synth_data_seed=5 --T=1.0 --C=400 --N=50 --S=50 --train_percentage=0.8 --nreplications=10 --g=1.5 --x0_std=1.0 --tau=0.025 --dt=0.01 --input_magnitude=20.0 --max_firing_rate=30.0 --noise_type='poisson' + + echo "Generating chaotic rnn data with input pulses (g=2.5)" + python generate_chaotic_rnn_data.py --save_dir=$SYNTH_PATH --datafile_name=chaotic_rnn_inputs_g2p5 --synth_data_seed=5 --T=1.0 --C=400 --N=50 --S=50 --train_percentage=0.8 --nreplications=10 --g=2.5 --x0_std=1.0 --tau=0.025 --dt=0.01 --input_magnitude=20.0 --max_firing_rate=30.0 --noise_type='poisson' + + echo "Generate the multi-session RNN data (no multi-session synth example in paper)" + python generate_chaotic_rnn_data.py --save_dir=$SYNTH_PATH --datafile_name=chaotic_rnn_multisession --synth_data_seed=5 --T=1.0 --C=150 --N=100 --S=20 --npcs=10 --train_percentage=0.8 --nreplications=40 --g=1.5 --x0_std=1.0 --tau=0.025 --dt=0.01 --input_magnitude=0.0 --max_firing_rate=30.0 --noise_type='poisson' + + echo "Generating Integration-to-bound RNN data" + python generate_itb_data.py --save_dir=$SYNTH_PATH --datafile_name=itb_rnn --u_std=0.25 --checkpoint_path=SAMPLE_CHECKPOINT --synth_data_seed=5 --T=1.0 --C=800 --N=50 --train_percentage=0.8 --nreplications=5 --tau=0.025 --dt=0.01 --max_firing_rate=30.0 + + echo "Generating chaotic rnn data with external input labels (no external input labels example in paper)" + python generate_labeled_rnn_data.py --save_dir=$SYNTH_PATH --datafile_name=chaotic_rnns_labeled --synth_data_seed=5 --T=1.0 --C=400 --N=50 --train_percentage=0.8 --nreplications=10 --g=1.5 --x0_std=1.0 --tau=0.025 --dt=0.01 --max_firing_rate=30.0 diff --git a/models/research/lfads/synth_data/synthetic_data_utils.py b/models/research/lfads/synth_data/synthetic_data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cc264ee49fdc7fbb53f17d52ca4ced64addefb27 --- /dev/null +++ b/models/research/lfads/synth_data/synthetic_data_utils.py @@ -0,0 +1,348 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import print_function + +import h5py +import numpy as np +import os + +from utils import write_datasets +import matplotlib +import matplotlib.pyplot as plt +import scipy.signal + + +def generate_rnn(rng, N, g, tau, dt, max_firing_rate): + """Create a (vanilla) RNN with a bunch of hyper parameters for generating +chaotic data. + Args: + rng: numpy random number generator + N: number of hidden units + g: scaling of recurrent weight matrix in g W, with W ~ N(0,1/N) + tau: time scale of individual unit dynamics + dt: time step for equation updates + max_firing_rate: how to resecale the -1,1 firing rates + Returns: + the dictionary of these parameters, plus some others. +""" + rnn = {} + rnn['N'] = N + rnn['W'] = rng.randn(N,N)/np.sqrt(N) + rnn['Bin'] = rng.randn(N)/np.sqrt(1.0) + rnn['Bin2'] = rng.randn(N)/np.sqrt(1.0) + rnn['b'] = np.zeros(N) + rnn['g'] = g + rnn['tau'] = tau + rnn['dt'] = dt + rnn['max_firing_rate'] = max_firing_rate + mfr = rnn['max_firing_rate'] # spikes / sec + nbins_per_sec = 1.0/rnn['dt'] # bins / sec + # Used for plotting in LFADS + rnn['conversion_factor'] = mfr / nbins_per_sec # spikes / bin + return rnn + + +def generate_data(rnn, T, E, x0s=None, P_sxn=None, input_magnitude=0.0, + input_times=None): + """ Generates data from an randomly initialized RNN. + Args: + rnn: the rnn + T: Time in seconds to run (divided by rnn['dt'] to get steps, rounded down. + E: total number of examples + S: number of samples (subsampling N) + Returns: + A list of length E of NxT tensors of the network being run. + """ + N = rnn['N'] + def run_rnn(rnn, x0, ntime_steps, input_time=None): + rs = np.zeros([N,ntime_steps]) + x_tm1 = x0 + r_tm1 = np.tanh(x0) + tau = rnn['tau'] + dt = rnn['dt'] + alpha = (1.0-dt/tau) + W = dt/tau*rnn['W']*rnn['g'] + Bin = dt/tau*rnn['Bin'] + Bin2 = dt/tau*rnn['Bin2'] + b = dt/tau*rnn['b'] + + us = np.zeros([1, ntime_steps]) + for t in range(ntime_steps): + x_t = alpha*x_tm1 + np.dot(W,r_tm1) + b + if input_time is not None and t == input_time: + us[0,t] = input_magnitude + x_t += Bin * us[0,t] # DCS is this what was used? + r_t = np.tanh(x_t) + x_tm1 = x_t + r_tm1 = r_t + rs[:,t] = r_t + return rs, us + + if P_sxn is None: + P_sxn = np.eye(N) + ntime_steps = int(T / rnn['dt']) + data_e = [] + inputs_e = [] + for e in range(E): + input_time = input_times[e] if input_times is not None else None + r_nxt, u_uxt = run_rnn(rnn, x0s[:,e], ntime_steps, input_time) + r_sxt = np.dot(P_sxn, r_nxt) + inputs_e.append(u_uxt) + data_e.append(r_sxt) + + S = P_sxn.shape[0] + data_e = normalize_rates(data_e, E, S) + + return data_e, x0s, inputs_e + + +def normalize_rates(data_e, E, S): + # Normalization, made more complex because of the P matrices. + # Normalize by min and max in each channel. This normalization will + # cause offset differences between identical rnn runs, but different + # t hits. + for e in range(E): + r_sxt = data_e[e] + for i in range(S): + rmin = np.min(r_sxt[i,:]) + rmax = np.max(r_sxt[i,:]) + assert rmax - rmin != 0, 'Something wrong' + r_sxt[i,:] = (r_sxt[i,:] - rmin)/(rmax-rmin) + data_e[e] = r_sxt + return data_e + + +def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100): + """ Apply spikes to a continuous dataset whose values are between 0.0 and 1.0 + Args: + data_e: nexamples length list of NxT trials + dt: how often the data are sampled + max_firing_rate: the firing rate that is associated with a value of 1.0 + Returns: + spikified_e: a list of length b of the data represented as spikes, + sampled from the underlying poisson process. + """ + + E = len(data_e) + spikes_e = [] + for e in range(E): + data = data_e[e] + N,T = data.shape + data_s = np.zeros([N,T]).astype(np.int) + for n in range(N): + f = data[n,:] + s = rng.poisson(f*max_firing_rate*dt, size=T) + data_s[n,:] = s + spikes_e.append(data_s) + + return spikes_e + + +def gaussify_data(data_e, rng, dt=1.0, max_firing_rate=100): + """ Apply gaussian noise to a continuous dataset whose values are between + 0.0 and 1.0 + + Args: + data_e: nexamples length list of NxT trials + dt: how often the data are sampled + max_firing_rate: the firing rate that is associated with a value of 1.0 + Returns: + gauss_e: a list of length b of the data with noise. + """ + + E = len(data_e) + mfr = max_firing_rate + gauss_e = [] + for e in range(E): + data = data_e[e] + N,T = data.shape + noisy_data = data * mfr + np.random.randn(N,T) * (5.0*mfr) * np.sqrt(dt) + gauss_e.append(noisy_data) + + return gauss_e + + + +def get_train_n_valid_inds(num_trials, train_fraction, nreplications): + """Split the numbers between 0 and num_trials-1 into two portions for + training and validation, based on the train fraction. + Args: + num_trials: the number of trials + train_fraction: (e.g. .80) + nreplications: the number of spiking trials per initial condition + Returns: + a 2-tuple of two lists: the training indices and validation indices + """ + train_inds = [] + valid_inds = [] + for i in range(num_trials): + # This line divides up the trials so that within one initial condition, + # the randomness of spikifying the condition is shared among both + # training and validation data splits. + if (i % nreplications)+1 > train_fraction * nreplications: + valid_inds.append(i) + else: + train_inds.append(i) + + return train_inds, valid_inds + + +def split_list_by_inds(data, inds1, inds2): + """Take the data, a list, and split it up based on the indices in inds1 and + inds2. + Args: + data: the list of data to split + inds1, the first list of indices + inds2, the second list of indices + Returns: a 2-tuple of two lists. + """ + if data is None or len(data) == 0: + return [], [] + else: + dout1 = [data[i] for i in inds1] + dout2 = [data[i] for i in inds2] + return dout1, dout2 + + +def nparray_and_transpose(data_a_b_c): + """Convert the list of items in data to a numpy array, and transpose it + Args: + data: data_asbsc: a nested, nested list of length a, with sublist length + b, with sublist length c. + Returns: + a numpy 3-tensor with dimensions a x c x b +""" + data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c]) + data_axcxb = np.transpose(data_axbxc, axes=[0,2,1]) + return data_axcxb + + +def add_alignment_projections(datasets, npcs, ntime=None, nsamples=None): + """Create a matrix that aligns the datasets a bit, under + the assumption that each dataset is observing the same underlying dynamical + system. + + Args: + datasets: The dictionary of dataset structures. + npcs: The number of pcs for each, basically like lfads factors. + nsamples (optional): Number of samples to take for each dataset. + ntime (optional): Number of time steps to take in each sample. + + Returns: + The dataset structures, with the field alignment_matrix_cxf added. + This is # channels x npcs dimension +""" + nchannels_all = 0 + channel_idxs = {} + conditions_all = {} + nconditions_all = 0 + for name, dataset in datasets.items(): + cidxs = np.where(dataset['P_sxn'])[1] # non-zero entries in columns + channel_idxs[name] = [cidxs[0], cidxs[-1]+1] + nchannels_all += cidxs[-1]+1 - cidxs[0] + conditions_all[name] = np.unique(dataset['condition_labels_train']) + + all_conditions_list = \ + np.unique(np.ndarray.flatten(np.array(conditions_all.values()))) + nconditions_all = all_conditions_list.shape[0] + + if ntime is None: + ntime = dataset['train_data'].shape[1] + if nsamples is None: + nsamples = dataset['train_data'].shape[0] + + # In the data workup in the paper, Chethan did intra condition + # averaging, so let's do that here. + avg_data_all = {} + for name, conditions in conditions_all.items(): + dataset = datasets[name] + avg_data_all[name] = {} + for cname in conditions: + td_idxs = np.argwhere(np.array(dataset['condition_labels_train'])==cname) + data = np.squeeze(dataset['train_data'][td_idxs,:,:], axis=1) + avg_data = np.mean(data, axis=0) + avg_data_all[name][cname] = avg_data + + # Visualize this in the morning. + all_data_nxtc = np.zeros([nchannels_all, ntime * nconditions_all]) + for name, dataset in datasets.items(): + cidx_s = channel_idxs[name][0] + cidx_f = channel_idxs[name][1] + for cname in conditions_all[name]: + cidxs = np.argwhere(all_conditions_list == cname) + if cidxs.shape[0] > 0: + cidx = cidxs[0][0] + all_tidxs = np.arange(0, ntime+1) + cidx*ntime + all_data_nxtc[cidx_s:cidx_f, all_tidxs[0]:all_tidxs[-1]] = \ + avg_data_all[name][cname].T + + # A bit of filtering. We don't care about spectral properties, or + # filtering artifacts, simply correlate time steps a bit. + filt_len = 6 + bc_filt = np.ones([filt_len])/float(filt_len) + for c in range(nchannels_all): + all_data_nxtc[c,:] = scipy.signal.filtfilt(bc_filt, [1.0], all_data_nxtc[c,:]) + + # Compute the PCs. + all_data_mean_nx1 = np.mean(all_data_nxtc, axis=1, keepdims=True) + all_data_zm_nxtc = all_data_nxtc - all_data_mean_nx1 + corr_mat_nxn = np.dot(all_data_zm_nxtc, all_data_zm_nxtc.T) + evals_n, evecs_nxn = np.linalg.eigh(corr_mat_nxn) + sidxs = np.flipud(np.argsort(evals_n)) # sort such that 0th is highest + evals_n = evals_n[sidxs] + evecs_nxn = evecs_nxn[:,sidxs] + + # Project all the channels data onto the low-D PCA basis, where + # low-d is the npcs parameter. + all_data_pca_pxtc = np.dot(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc) + + # Now for each dataset, we regress the channel data onto the top + # pcs, and this will be our alignment matrix for that dataset. + # |B - A*W|^2 + for name, dataset in datasets.items(): + cidx_s = channel_idxs[name][0] + cidx_f = channel_idxs[name][1] + all_data_zm_chxtc = all_data_zm_nxtc[cidx_s:cidx_f,:] # ch for channel + W_chxp, _, _, _ = \ + np.linalg.lstsq(all_data_zm_chxtc.T, all_data_pca_pxtc.T) + dataset['alignment_matrix_cxf'] = W_chxp + alignment_bias_cx1 = all_data_mean_nx1[cidx_s:cidx_f] + dataset['alignment_bias_c'] = np.squeeze(alignment_bias_cx1, axis=1) + + do_debug_plot = False + if do_debug_plot: + pc_vecs = evecs_nxn[:,0:npcs] + ntoplot = 400 + + plt.figure() + plt.plot(np.log10(evals_n), '-x') + plt.figure() + plt.subplot(311) + plt.imshow(all_data_pca_pxtc) + plt.colorbar() + + plt.subplot(312) + plt.imshow(np.dot(W_chxp.T, all_data_zm_chxtc)) + plt.colorbar() + + plt.subplot(313) + plt.imshow(np.dot(all_data_zm_chxtc.T, W_chxp).T - all_data_pca_pxtc) + plt.colorbar() + + import pdb + pdb.set_trace() + + return datasets diff --git a/models/research/lfads/synth_data/trained_itb/model-65000.data-00000-of-00001 b/models/research/lfads/synth_data/trained_itb/model-65000.data-00000-of-00001 new file mode 100644 index 0000000000000000000000000000000000000000..9459a2a1b72f56dc16b3eca210911f14081e7fd5 Binary files /dev/null and b/models/research/lfads/synth_data/trained_itb/model-65000.data-00000-of-00001 differ diff --git a/models/research/lfads/synth_data/trained_itb/model-65000.index b/models/research/lfads/synth_data/trained_itb/model-65000.index new file mode 100644 index 0000000000000000000000000000000000000000..dd9c793acf8dc79e07833d1c0edc8a2fa86d806a Binary files /dev/null and b/models/research/lfads/synth_data/trained_itb/model-65000.index differ diff --git a/models/research/lfads/synth_data/trained_itb/model-65000.meta b/models/research/lfads/synth_data/trained_itb/model-65000.meta new file mode 100644 index 0000000000000000000000000000000000000000..2b97380f5373b6ffc29629a6653cb229df629e27 --- /dev/null +++ b/models/research/lfads/synth_data/trained_itb/model-65000.meta @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc43cc94549c645387862920487167dfa19e8ea26e0978ce03f286fa96f7a462 +size 1053549 diff --git a/models/research/lfads/utils.py b/models/research/lfads/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e64825ffc1d423de1d9fe85bc1c00a19e5f4ad7e --- /dev/null +++ b/models/research/lfads/utils.py @@ -0,0 +1,367 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ============================================================================== +from __future__ import print_function + +import os +import h5py +import json + +import numpy as np +import tensorflow as tf + + +def log_sum_exp(x_k): + """Computes log \sum exp in a numerically stable way. + log ( sum_i exp(x_i) ) + log ( sum_i exp(x_i - m + m) ), with m = max(x_i) + log ( sum_i exp(x_i - m)*exp(m) ) + log ( sum_i exp(x_i - m) + m + + Args: + x_k - k -dimensional list of arguments to log_sum_exp. + + Returns: + log_sum_exp of the arguments. + """ + m = tf.reduce_max(x_k) + x1_k = x_k - m + u_k = tf.exp(x1_k) + z = tf.reduce_sum(u_k) + return tf.log(z) + m + + +def linear(x, out_size, do_bias=True, alpha=1.0, identity_if_possible=False, + normalized=False, name=None, collections=None): + """Linear (affine) transformation, y = x W + b, for a variety of + configurations. + + Args: + x: input The tensor to tranformation. + out_size: The integer size of non-batch output dimension. + do_bias (optional): Add a learnable bias vector to the operation. + alpha (optional): A multiplicative scaling for the weight initialization + of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}. + identity_if_possible (optional): just return identity, + if x.shape[1] == out_size. + normalized (optional): Option to divide out by the norms of the rows of W. + name (optional): The name prefix to add to variables. + collections (optional): List of additional collections. (Placed in + tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.) + + Returns: + In the equation, y = x W + b, returns the tensorflow op that yields y. + """ + in_size = int(x.get_shape()[1]) # from Dimension(10) -> 10 + stddev = alpha/np.sqrt(float(in_size)) + mat_init = tf.random_normal_initializer(0.0, stddev) + wname = (name + "/W") if name else "/W" + + if identity_if_possible and in_size == out_size: + # Sometimes linear layers are nothing more than size adapters. + return tf.identity(x, name=(wname+'_ident')) + + W,b = init_linear(in_size, out_size, do_bias=do_bias, alpha=alpha, + normalized=normalized, name=name, collections=collections) + + if do_bias: + return tf.matmul(x, W) + b + else: + return tf.matmul(x, W) + + +def init_linear(in_size, out_size, do_bias=True, mat_init_value=None, + bias_init_value=None, alpha=1.0, identity_if_possible=False, + normalized=False, name=None, collections=None, trainable=True): + """Linear (affine) transformation, y = x W + b, for a variety of + configurations. + + Args: + in_size: The integer size of the non-batc input dimension. [(x),y] + out_size: The integer size of non-batch output dimension. [x,(y)] + do_bias (optional): Add a (learnable) bias vector to the operation, + if false, b will be None + mat_init_value (optional): numpy constant for matrix initialization, if None + , do random, with additional parameters. + alpha (optional): A multiplicative scaling for the weight initialization + of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}. + identity_if_possible (optional): just return identity, + if x.shape[1] == out_size. + normalized (optional): Option to divide out by the norms of the rows of W. + name (optional): The name prefix to add to variables. + collections (optional): List of additional collections. (Placed in + tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.) + + Returns: + In the equation, y = x W + b, returns the pair (W, b). + """ + + if mat_init_value is not None and mat_init_value.shape != (in_size, out_size): + raise ValueError( + 'Provided mat_init_value must have shape [%d, %d].'%(in_size, out_size)) + if bias_init_value is not None and bias_init_value.shape != (1,out_size): + raise ValueError( + 'Provided bias_init_value must have shape [1,%d].'%(out_size,)) + + if mat_init_value is None: + stddev = alpha/np.sqrt(float(in_size)) + mat_init = tf.random_normal_initializer(0.0, stddev) + + wname = (name + "/W") if name else "/W" + + if identity_if_possible and in_size == out_size: + return (tf.constant(np.eye(in_size).astype(np.float32)), + tf.zeros(in_size)) + + # Note the use of get_variable vs. tf.Variable. this is because get_variable + # does not allow the initialization of the variable with a value. + if normalized: + w_collections = [tf.GraphKeys.GLOBAL_VARIABLES, "norm-variables"] + if collections: + w_collections += collections + if mat_init_value is not None: + w = tf.Variable(mat_init_value, name=wname, collections=w_collections, + trainable=trainable) + else: + w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init, + collections=w_collections, trainable=trainable) + w = tf.nn.l2_normalize(w, dim=0) # x W, so xW_j = \sum_i x_bi W_ij + else: + w_collections = [tf.GraphKeys.GLOBAL_VARIABLES] + if collections: + w_collections += collections + if mat_init_value is not None: + w = tf.Variable(mat_init_value, name=wname, collections=w_collections, + trainable=trainable) + else: + w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init, + collections=w_collections, trainable=trainable) + b = None + if do_bias: + b_collections = [tf.GraphKeys.GLOBAL_VARIABLES] + if collections: + b_collections += collections + bname = (name + "/b") if name else "/b" + if bias_init_value is None: + b = tf.get_variable(bname, [1, out_size], + initializer=tf.zeros_initializer(), + collections=b_collections, + trainable=trainable) + else: + b = tf.Variable(bias_init_value, name=bname, + collections=b_collections, + trainable=trainable) + + return (w, b) + + +def write_data(data_fname, data_dict, use_json=False, compression=None): + """Write data in HD5F format. + + Args: + data_fname: The filename of teh file in which to write the data. + data_dict: The dictionary of data to write. The keys are strings + and the values are numpy arrays. + use_json (optional): human readable format for simple items + compression (optional): The compression to use for h5py (disabled by + default because the library borks on scalars, otherwise try 'gzip'). + """ + + dir_name = os.path.dirname(data_fname) + if not os.path.exists(dir_name): + os.makedirs(dir_name) + + if use_json: + the_file = open(data_fname,'wb') + json.dump(data_dict, the_file) + the_file.close() + else: + try: + with h5py.File(data_fname, 'w') as hf: + for k, v in data_dict.items(): + clean_k = k.replace('/', '_') + if clean_k is not k: + print('Warning: saving variable with name: ', k, ' as ', clean_k) + else: + print('Saving variable with name: ', clean_k) + hf.create_dataset(clean_k, data=v, compression=compression) + except IOError: + print("Cannot open %s for writing.", data_fname) + raise + + +def read_data(data_fname): + """ Read saved data in HDF5 format. + + Args: + data_fname: The filename of the file from which to read the data. + Returns: + A dictionary whose keys will vary depending on dataset (but should + always contain the keys 'train_data' and 'valid_data') and whose + values are numpy arrays. + """ + + try: + with h5py.File(data_fname, 'r') as hf: + data_dict = {k: np.array(v) for k, v in hf.items()} + return data_dict + except IOError: + print("Cannot open %s for reading." % data_fname) + raise + + +def write_datasets(data_path, data_fname_stem, dataset_dict, compression=None): + """Write datasets in HD5F format. + + This function assumes the dataset_dict is a mapping ( string -> + to data_dict ). It calls write_data for each data dictionary, + post-fixing the data filename with the key of the dataset. + + Args: + data_path: The path to the save directory. + data_fname_stem: The filename stem of the file in which to write the data. + dataset_dict: The dictionary of datasets. The keys are strings + and the values data dictionaries (str -> numpy arrays) associations. + compression (optional): The compression to use for h5py (disabled by + default because the library borks on scalars, otherwise try 'gzip'). + """ + + full_name_stem = os.path.join(data_path, data_fname_stem) + for s, data_dict in dataset_dict.items(): + write_data(full_name_stem + "_" + s, data_dict, compression=compression) + + +def read_datasets(data_path, data_fname_stem): + """Read dataset sin HD5F format. + + This function assumes the dataset_dict is a mapping ( string -> + to data_dict ). It calls write_data for each data dictionary, + post-fixing the data filename with the key of the dataset. + + Args: + data_path: The path to the save directory. + data_fname_stem: The filename stem of the file in which to write the data. + """ + + dataset_dict = {} + fnames = os.listdir(data_path) + + print ('loading data from ' + data_path + ' with stem ' + data_fname_stem) + for fname in fnames: + if fname.startswith(data_fname_stem): + data_dict = read_data(os.path.join(data_path,fname)) + idx = len(data_fname_stem) + 1 + key = fname[idx:] + data_dict['data_dim'] = data_dict['train_data'].shape[2] + data_dict['num_steps'] = data_dict['train_data'].shape[1] + dataset_dict[key] = data_dict + + if len(dataset_dict) == 0: + raise ValueError("Failed to load any datasets, are you sure that the " + "'--data_dir' and '--data_filename_stem' flag values " + "are correct?") + + print (str(len(dataset_dict)) + ' datasets loaded') + return dataset_dict + + +# NUMPY utility functions +def list_t_bxn_to_list_b_txn(values_t_bxn): + """Convert a length T list of BxN numpy tensors of length B list of TxN numpy + tensors. + + Args: + values_t_bxn: The length T list of BxN numpy tensors. + + Returns: + The length B list of TxN numpy tensors. + """ + T = len(values_t_bxn) + B, N = values_t_bxn[0].shape + values_b_txn = [] + for b in range(B): + values_pb_txn = np.zeros([T,N]) + for t in range(T): + values_pb_txn[t,:] = values_t_bxn[t][b,:] + values_b_txn.append(values_pb_txn) + + return values_b_txn + + +def list_t_bxn_to_tensor_bxtxn(values_t_bxn): + """Convert a length T list of BxN numpy tensors to single numpy tensor with + shape BxTxN. + + Args: + values_t_bxn: The length T list of BxN numpy tensors. + + Returns: + values_bxtxn: The BxTxN numpy tensor. + """ + + T = len(values_t_bxn) + B, N = values_t_bxn[0].shape + values_bxtxn = np.zeros([B,T,N]) + for t in range(T): + values_bxtxn[:,t,:] = values_t_bxn[t] + + return values_bxtxn + + +def tensor_bxtxn_to_list_t_bxn(tensor_bxtxn): + """Convert a numpy tensor with shape BxTxN to a length T list of numpy tensors + with shape BxT. + + Args: + tensor_bxtxn: The BxTxN numpy tensor. + + Returns: + A length T list of numpy tensors with shape BxT. + """ + + values_t_bxn = [] + B, T, N = tensor_bxtxn.shape + for t in range(T): + values_t_bxn.append(np.squeeze(tensor_bxtxn[:,t,:])) + + return values_t_bxn + + +def flatten(list_of_lists): + """Takes a list of lists and returns a list of the elements. + + Args: + list_of_lists: List of lists. + + Returns: + flat_list: Flattened list. + flat_list_idxs: Flattened list indices. + """ + flat_list = [] + flat_list_idxs = [] + start_idx = 0 + for item in list_of_lists: + if isinstance(item, list): + flat_list += item + l = len(item) + idxs = range(start_idx, start_idx+l) + start_idx = start_idx+l + else: # a value + flat_list.append(item) + idxs = [start_idx] + start_idx += 1 + flat_list_idxs.append(idxs) + + return flat_list, flat_list_idxs diff --git a/models/research/lm_1b/BUILD b/models/research/lm_1b/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..ca5bc1f6ce4347a3b5f18d1bb59284aa9d07a567 --- /dev/null +++ b/models/research/lm_1b/BUILD @@ -0,0 +1,27 @@ +package(default_visibility = [":internal"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +package_group( + name = "internal", + packages = [ + "//lm_1b/...", + ], +) + +py_library( + name = "data_utils", + srcs = ["data_utils.py"], +) + +py_binary( + name = "lm_1b_eval", + srcs = [ + "lm_1b_eval.py", + ], + deps = [ + ":data_utils", + ], +) diff --git a/models/research/lm_1b/README.md b/models/research/lm_1b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f48afbfe23aff6681e641296e73b2c6b0e5a9b48 --- /dev/null +++ b/models/research/lm_1b/README.md @@ -0,0 +1,198 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +Language Model on One Billion Word Benchmark + +Authors: + +Oriol Vinyals (vinyals@google.com, github: OriolVinyals), +Xin Pan + +Paper Authors: + +Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, Yonghui Wu + +TL;DR + +This is a pretrained model on One Billion Word Benchmark. +If you use this model in your publication, please cite the original paper: + +@article{jozefowicz2016exploring, + title={Exploring the Limits of Language Modeling}, + author={Jozefowicz, Rafal and Vinyals, Oriol and Schuster, Mike + and Shazeer, Noam and Wu, Yonghui}, + journal={arXiv preprint arXiv:1602.02410}, + year={2016} +} + +Introduction + +In this release, we open source a model trained on the One Billion Word +Benchmark (http://arxiv.org/abs/1312.3005), a large language corpus in English +which was released in 2013. This dataset contains about one billion words, and +has a vocabulary size of about 800K words. It contains mostly news data. Since +sentences in the training set are shuffled, models can ignore the context and +focus on sentence level language modeling. + +In the original release and subsequent work, people have used the same test set +to train models on this dataset as a standard benchmark for language modeling. +Recently, we wrote an article (http://arxiv.org/abs/1602.02410) describing a +model hybrid between character CNN, a large and deep LSTM, and a specific +Softmax architecture which allowed us to train the best model on this dataset +thus far, almost halving the best perplexity previously obtained by others. + +Code Release + +The open-sourced components include: + +* TensorFlow GraphDef proto buffer text file. +* TensorFlow pre-trained checkpoint shards. +* Code used to evaluate the pre-trained model. +* Vocabulary file. +* Test set from LM-1B evaluation. + +The code supports 4 evaluation modes: + +* Given provided dataset, calculate the model's perplexity. +* Given a prefix sentence, predict the next words. +* Dump the softmax embedding, character-level CNN word embeddings. +* Give a sentence, dump the embedding from the LSTM state. + +Results + +Model | Test Perplexity | Number of Params [billions] +------|-----------------|---------------------------- +Sigmoid-RNN-2048 [Blackout] | 68.3 | 4.1 +Interpolated KN 5-gram, 1.1B n-grams [chelba2013one] | 67.6 | 1.76 +Sparse Non-Negative Matrix LM [shazeer2015sparse] | 52.9 | 33 +RNN-1024 + MaxEnt 9-gram features [chelba2013one] | 51.3 | 20 +LSTM-512-512 | 54.1 | 0.82 +LSTM-1024-512 | 48.2 | 0.82 +LSTM-2048-512 | 43.7 | 0.83 +LSTM-8192-2048 (No Dropout) | 37.9 | 3.3 +LSTM-8192-2048 (50\% Dropout) | 32.2 | 3.3 +2-Layer LSTM-8192-1024 (BIG LSTM) | 30.6 | 1.8 +(THIS RELEASE) BIG LSTM+CNN Inputs | 30.0 | 1.04 + +How To Run + +Prerequisites: + +* Install TensorFlow. +* Install Bazel. +* Download the data files: + * Model GraphDef file: + [link](http://download.tensorflow.org/models/LM_LSTM_CNN/graph-2016-09-10.pbtxt) + * Model Checkpoint sharded file: + [1](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-base) + [2](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-char-embedding) + [3](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-lstm) + [4](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax0) + [5](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax1) + [6](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax2) + [7](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax3) + [8](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax4) + [9](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax5) + [10](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax6) + [11](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax7) + [12](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax8) + * Vocabulary file: + [link](http://download.tensorflow.org/models/LM_LSTM_CNN/vocab-2016-09-10.txt) + * test dataset: link + [link](http://download.tensorflow.org/models/LM_LSTM_CNN/test/news.en.heldout-00000-of-00050) +* It is recommended to run on a modern desktop instead of a laptop. + +```shell +# 1. Clone the code to your workspace. +# 2. Download the data to your workspace. +# 3. Create an empty WORKSPACE file in your workspace. +# 4. Create an empty output directory in your workspace. +# Example directory structure below: +$ ls -R +.: +data lm_1b output WORKSPACE + +./data: +ckpt-base ckpt-lstm ckpt-softmax1 ckpt-softmax3 ckpt-softmax5 +ckpt-softmax7 graph-2016-09-10.pbtxt vocab-2016-09-10.txt +ckpt-char-embedding ckpt-softmax0 ckpt-softmax2 ckpt-softmax4 ckpt-softmax6 +ckpt-softmax8 news.en.heldout-00000-of-00050 + +./lm_1b: +BUILD data_utils.py lm_1b_eval.py README.md + +./output: + +# Build the codes. +$ bazel build -c opt lm_1b/... +# Run sample mode: +$ bazel-bin/lm_1b/lm_1b_eval --mode sample \ + --prefix "I love that I" \ + --pbtxt data/graph-2016-09-10.pbtxt \ + --vocab_file data/vocab-2016-09-10.txt \ + --ckpt 'data/ckpt-*' +...(omitted some TensorFlow output) +I love +I love that +I love that I +I love that I find +I love that I find that +I love that I find that amazing +...(omitted) + +# Run eval mode: +$ bazel-bin/lm_1b/lm_1b_eval --mode eval \ + --pbtxt data/graph-2016-09-10.pbtxt \ + --vocab_file data/vocab-2016-09-10.txt \ + --input_data data/news.en.heldout-00000-of-00050 \ + --ckpt 'data/ckpt-*' +...(omitted some TensorFlow output) +Loaded step 14108582. +# perplexity is high initially because words without context are harder to +# predict. +Eval Step: 0, Average Perplexity: 2045.512297. +Eval Step: 1, Average Perplexity: 229.478699. +Eval Step: 2, Average Perplexity: 208.116787. +Eval Step: 3, Average Perplexity: 338.870601. +Eval Step: 4, Average Perplexity: 228.950107. +Eval Step: 5, Average Perplexity: 197.685857. +Eval Step: 6, Average Perplexity: 156.287063. +Eval Step: 7, Average Perplexity: 124.866189. +Eval Step: 8, Average Perplexity: 147.204975. +Eval Step: 9, Average Perplexity: 90.124864. +Eval Step: 10, Average Perplexity: 59.897914. +Eval Step: 11, Average Perplexity: 42.591137. +...(omitted) +Eval Step: 4529, Average Perplexity: 29.243668. +Eval Step: 4530, Average Perplexity: 29.302362. +Eval Step: 4531, Average Perplexity: 29.285674. +...(omitted. At convergence, it should be around 30.) + +# Run dump_emb mode: +$ bazel-bin/lm_1b/lm_1b_eval --mode dump_emb \ + --pbtxt data/graph-2016-09-10.pbtxt \ + --vocab_file data/vocab-2016-09-10.txt \ + --ckpt 'data/ckpt-*' \ + --save_dir output +...(omitted some TensorFlow output) +Finished softmax weights +Finished word embedding 0/793471 +Finished word embedding 1/793471 +Finished word embedding 2/793471 +...(omitted) +$ ls output/ +embeddings_softmax.npy ... + +# Run dump_lstm_emb mode: +$ bazel-bin/lm_1b/lm_1b_eval --mode dump_lstm_emb \ + --pbtxt data/graph-2016-09-10.pbtxt \ + --vocab_file data/vocab-2016-09-10.txt \ + --ckpt 'data/ckpt-*' \ + --sentence "I love who I am ." \ + --save_dir output +$ ls output/ +lstm_emb_step_0.npy lstm_emb_step_2.npy lstm_emb_step_4.npy +lstm_emb_step_6.npy lstm_emb_step_1.npy lstm_emb_step_3.npy +lstm_emb_step_5.npy +``` diff --git a/models/research/lm_1b/data_utils.py b/models/research/lm_1b/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ad8d3391ef6db07c1d6c234450a6d23a8e19a178 --- /dev/null +++ b/models/research/lm_1b/data_utils.py @@ -0,0 +1,279 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A library for loading 1B word benchmark dataset.""" + +import random + +import numpy as np +import tensorflow as tf + + +class Vocabulary(object): + """Class that holds a vocabulary for the dataset.""" + + def __init__(self, filename): + """Initialize vocabulary. + + Args: + filename: Vocabulary file name. + """ + + self._id_to_word = [] + self._word_to_id = {} + self._unk = -1 + self._bos = -1 + self._eos = -1 + + with tf.gfile.Open(filename) as f: + idx = 0 + for line in f: + word_name = line.strip() + if word_name == '': + self._bos = idx + elif word_name == '': + self._eos = idx + elif word_name == '': + self._unk = idx + if word_name == '!!!MAXTERMID': + continue + + self._id_to_word.append(word_name) + self._word_to_id[word_name] = idx + idx += 1 + + @property + def bos(self): + return self._bos + + @property + def eos(self): + return self._eos + + @property + def unk(self): + return self._unk + + @property + def size(self): + return len(self._id_to_word) + + def word_to_id(self, word): + if word in self._word_to_id: + return self._word_to_id[word] + return self.unk + + def id_to_word(self, cur_id): + if cur_id < self.size: + return self._id_to_word[cur_id] + return 'ERROR' + + def decode(self, cur_ids): + """Convert a list of ids to a sentence, with space inserted.""" + return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids]) + + def encode(self, sentence): + """Convert a sentence to a list of ids, with special tokens added.""" + word_ids = [self.word_to_id(cur_word) for cur_word in sentence.split()] + return np.array([self.bos] + word_ids + [self.eos], dtype=np.int32) + + +class CharsVocabulary(Vocabulary): + """Vocabulary containing character-level information.""" + + def __init__(self, filename, max_word_length): + super(CharsVocabulary, self).__init__(filename) + self._max_word_length = max_word_length + chars_set = set() + + for word in self._id_to_word: + chars_set |= set(word) + + free_ids = [] + for i in range(256): + if chr(i) in chars_set: + continue + free_ids.append(chr(i)) + + if len(free_ids) < 5: + raise ValueError('Not enough free char ids: %d' % len(free_ids)) + + self.bos_char = free_ids[0] # + self.eos_char = free_ids[1] # + self.bow_char = free_ids[2] # + self.eow_char = free_ids[3] # + self.pad_char = free_ids[4] # + + chars_set |= {self.bos_char, self.eos_char, self.bow_char, self.eow_char, + self.pad_char} + + self._char_set = chars_set + num_words = len(self._id_to_word) + + self._word_char_ids = np.zeros([num_words, max_word_length], dtype=np.int32) + + self.bos_chars = self._convert_word_to_char_ids(self.bos_char) + self.eos_chars = self._convert_word_to_char_ids(self.eos_char) + + for i, word in enumerate(self._id_to_word): + self._word_char_ids[i] = self._convert_word_to_char_ids(word) + + @property + def word_char_ids(self): + return self._word_char_ids + + @property + def max_word_length(self): + return self._max_word_length + + def _convert_word_to_char_ids(self, word): + code = np.zeros([self.max_word_length], dtype=np.int32) + code[:] = ord(self.pad_char) + + if len(word) > self.max_word_length - 2: + word = word[:self.max_word_length-2] + cur_word = self.bow_char + word + self.eow_char + for j in range(len(cur_word)): + code[j] = ord(cur_word[j]) + return code + + def word_to_char_ids(self, word): + if word in self._word_to_id: + return self._word_char_ids[self._word_to_id[word]] + else: + return self._convert_word_to_char_ids(word) + + def encode_chars(self, sentence): + chars_ids = [self.word_to_char_ids(cur_word) + for cur_word in sentence.split()] + return np.vstack([self.bos_chars] + chars_ids + [self.eos_chars]) + + +def get_batch(generator, batch_size, num_steps, max_word_length, pad=False): + """Read batches of input.""" + cur_stream = [None] * batch_size + + inputs = np.zeros([batch_size, num_steps], np.int32) + char_inputs = np.zeros([batch_size, num_steps, max_word_length], np.int32) + global_word_ids = np.zeros([batch_size, num_steps], np.int32) + targets = np.zeros([batch_size, num_steps], np.int32) + weights = np.ones([batch_size, num_steps], np.float32) + + no_more_data = False + while True: + inputs[:] = 0 + char_inputs[:] = 0 + global_word_ids[:] = 0 + targets[:] = 0 + weights[:] = 0.0 + + for i in range(batch_size): + cur_pos = 0 + + while cur_pos < num_steps: + if cur_stream[i] is None or len(cur_stream[i][0]) <= 1: + try: + cur_stream[i] = list(generator.next()) + except StopIteration: + # No more data, exhaust current streams and quit + no_more_data = True + break + + how_many = min(len(cur_stream[i][0]) - 1, num_steps - cur_pos) + next_pos = cur_pos + how_many + + inputs[i, cur_pos:next_pos] = cur_stream[i][0][:how_many] + char_inputs[i, cur_pos:next_pos] = cur_stream[i][1][:how_many] + global_word_ids[i, cur_pos:next_pos] = cur_stream[i][2][:how_many] + targets[i, cur_pos:next_pos] = cur_stream[i][0][1:how_many+1] + weights[i, cur_pos:next_pos] = 1.0 + + cur_pos = next_pos + cur_stream[i][0] = cur_stream[i][0][how_many:] + cur_stream[i][1] = cur_stream[i][1][how_many:] + cur_stream[i][2] = cur_stream[i][2][how_many:] + + if pad: + break + + if no_more_data and np.sum(weights) == 0: + # There is no more data and this is an empty batch. Done! + break + yield inputs, char_inputs, global_word_ids, targets, weights + + +class LM1BDataset(object): + """Utility class for 1B word benchmark dataset. + + The current implementation reads the data from the tokenized text files. + """ + + def __init__(self, filepattern, vocab): + """Initialize LM1BDataset reader. + + Args: + filepattern: Dataset file pattern. + vocab: Vocabulary. + """ + self._vocab = vocab + self._all_shards = tf.gfile.Glob(filepattern) + tf.logging.info('Found %d shards at %s', len(self._all_shards), filepattern) + + def _load_random_shard(self): + """Randomly select a file and read it.""" + return self._load_shard(random.choice(self._all_shards)) + + def _load_shard(self, shard_name): + """Read one file and convert to ids. + + Args: + shard_name: file path. + + Returns: + list of (id, char_id, global_word_id) tuples. + """ + tf.logging.info('Loading data from: %s', shard_name) + with tf.gfile.Open(shard_name) as f: + sentences = f.readlines() + chars_ids = [self.vocab.encode_chars(sentence) for sentence in sentences] + ids = [self.vocab.encode(sentence) for sentence in sentences] + + global_word_ids = [] + current_idx = 0 + for word_ids in ids: + current_size = len(word_ids) - 1 # without symbol + cur_ids = np.arange(current_idx, current_idx + current_size) + global_word_ids.append(cur_ids) + current_idx += current_size + + tf.logging.info('Loaded %d words.', current_idx) + tf.logging.info('Finished loading') + return zip(ids, chars_ids, global_word_ids) + + def _get_sentence(self, forever=True): + while True: + ids = self._load_random_shard() + for current_ids in ids: + yield current_ids + if not forever: + break + + def get_batch(self, batch_size, num_steps, pad=False, forever=True): + return get_batch(self._get_sentence(forever), batch_size, num_steps, + self.vocab.max_word_length, pad=pad) + + @property + def vocab(self): + return self._vocab diff --git a/models/research/lm_1b/lm_1b_eval.py b/models/research/lm_1b/lm_1b_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..ce8634757558c135ba137a9b9e09a733977adc3a --- /dev/null +++ b/models/research/lm_1b/lm_1b_eval.py @@ -0,0 +1,308 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Eval pre-trained 1 billion word language model. +""" +import os +import sys + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from google.protobuf import text_format +import data_utils + +FLAGS = tf.flags.FLAGS +# General flags. +tf.flags.DEFINE_string('mode', 'eval', + 'One of [sample, eval, dump_emb, dump_lstm_emb]. ' + '"sample" mode samples future word predictions, using ' + 'FLAGS.prefix as prefix (prefix could be left empty). ' + '"eval" mode calculates perplexity of the ' + 'FLAGS.input_data. ' + '"dump_emb" mode dumps word and softmax embeddings to ' + 'FLAGS.save_dir. embeddings are dumped in the same ' + 'order as words in vocabulary. All words in vocabulary ' + 'are dumped.' + 'dump_lstm_emb dumps lstm embeddings of FLAGS.sentence ' + 'to FLAGS.save_dir.') +tf.flags.DEFINE_string('pbtxt', '', + 'GraphDef proto text file used to construct model ' + 'structure.') +tf.flags.DEFINE_string('ckpt', '', + 'Checkpoint directory used to fill model values.') +tf.flags.DEFINE_string('vocab_file', '', 'Vocabulary file.') +tf.flags.DEFINE_string('save_dir', '', + 'Used for "dump_emb" mode to save word embeddings.') +# sample mode flags. +tf.flags.DEFINE_string('prefix', '', + 'Used for "sample" mode to predict next words.') +tf.flags.DEFINE_integer('max_sample_words', 100, + 'Sampling stops either when is met or this number ' + 'of steps has passed.') +tf.flags.DEFINE_integer('num_samples', 3, + 'Number of samples to generate for the prefix.') +# dump_lstm_emb mode flags. +tf.flags.DEFINE_string('sentence', '', + 'Used as input for "dump_lstm_emb" mode.') +# eval mode flags. +tf.flags.DEFINE_string('input_data', '', + 'Input data files for eval model.') +tf.flags.DEFINE_integer('max_eval_steps', 1000000, + 'Maximum mumber of steps to run "eval" mode.') + + +# For saving demo resources, use batch size 1 and step 1. +BATCH_SIZE = 1 +NUM_TIMESTEPS = 1 +MAX_WORD_LEN = 50 + + +def _LoadModel(gd_file, ckpt_file): + """Load the model from GraphDef and Checkpoint. + + Args: + gd_file: GraphDef proto text file. + ckpt_file: TensorFlow Checkpoint file. + + Returns: + TensorFlow session and tensors dict. + """ + with tf.Graph().as_default(): + sys.stderr.write('Recovering graph.\n') + with tf.gfile.FastGFile(gd_file, 'r') as f: + s = f.read().decode() + gd = tf.GraphDef() + text_format.Merge(s, gd) + + tf.logging.info('Recovering Graph %s', gd_file) + t = {} + [t['states_init'], t['lstm/lstm_0/control_dependency'], + t['lstm/lstm_1/control_dependency'], t['softmax_out'], t['class_ids_out'], + t['class_weights_out'], t['log_perplexity_out'], t['inputs_in'], + t['targets_in'], t['target_weights_in'], t['char_inputs_in'], + t['all_embs'], t['softmax_weights'], t['global_step'] + ] = tf.import_graph_def(gd, {}, ['states_init', + 'lstm/lstm_0/control_dependency:0', + 'lstm/lstm_1/control_dependency:0', + 'softmax_out:0', + 'class_ids_out:0', + 'class_weights_out:0', + 'log_perplexity_out:0', + 'inputs_in:0', + 'targets_in:0', + 'target_weights_in:0', + 'char_inputs_in:0', + 'all_embs_out:0', + 'Reshape_3:0', + 'global_step:0'], name='') + + sys.stderr.write('Recovering checkpoint %s\n' % ckpt_file) + sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) + sess.run('save/restore_all', {'save/Const:0': ckpt_file}) + sess.run(t['states_init']) + + return sess, t + + +def _EvalModel(dataset): + """Evaluate model perplexity using provided dataset. + + Args: + dataset: LM1BDataset object. + """ + sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) + + current_step = t['global_step'].eval(session=sess) + sys.stderr.write('Loaded step %d.\n' % current_step) + + data_gen = dataset.get_batch(BATCH_SIZE, NUM_TIMESTEPS, forever=False) + sum_num = 0.0 + sum_den = 0.0 + perplexity = 0.0 + for i, (inputs, char_inputs, _, targets, weights) in enumerate(data_gen): + input_dict = {t['inputs_in']: inputs, + t['targets_in']: targets, + t['target_weights_in']: weights} + if 'char_inputs_in' in t: + input_dict[t['char_inputs_in']] = char_inputs + log_perp = sess.run(t['log_perplexity_out'], feed_dict=input_dict) + + if np.isnan(log_perp): + sys.stderr.error('log_perplexity is Nan.\n') + else: + sum_num += log_perp * weights.mean() + sum_den += weights.mean() + if sum_den > 0: + perplexity = np.exp(sum_num / sum_den) + + sys.stderr.write('Eval Step: %d, Average Perplexity: %f.\n' % + (i, perplexity)) + + if i > FLAGS.max_eval_steps: + break + + +def _SampleSoftmax(softmax): + return min(np.sum(np.cumsum(softmax) < np.random.rand()), len(softmax) - 1) + + +def _SampleModel(prefix_words, vocab): + """Predict next words using the given prefix words. + + Args: + prefix_words: Prefix words. + vocab: Vocabulary. Contains max word chard id length and converts between + words and ids. + """ + targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) + weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32) + + sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) + + if prefix_words.find('') != 0: + prefix_words = ' ' + prefix_words + + prefix = [vocab.word_to_id(w) for w in prefix_words.split()] + prefix_char_ids = [vocab.word_to_char_ids(w) for w in prefix_words.split()] + for _ in xrange(FLAGS.num_samples): + inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) + char_ids_inputs = np.zeros( + [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) + samples = prefix[:] + char_ids_samples = prefix_char_ids[:] + sent = '' + while True: + inputs[0, 0] = samples[0] + char_ids_inputs[0, 0, :] = char_ids_samples[0] + samples = samples[1:] + char_ids_samples = char_ids_samples[1:] + + softmax = sess.run(t['softmax_out'], + feed_dict={t['char_inputs_in']: char_ids_inputs, + t['inputs_in']: inputs, + t['targets_in']: targets, + t['target_weights_in']: weights}) + + sample = _SampleSoftmax(softmax[0]) + sample_char_ids = vocab.word_to_char_ids(vocab.id_to_word(sample)) + + if not samples: + samples = [sample] + char_ids_samples = [sample_char_ids] + sent += vocab.id_to_word(samples[0]) + ' ' + sys.stderr.write('%s\n' % sent) + + if (vocab.id_to_word(samples[0]) == '' or + len(sent) > FLAGS.max_sample_words): + break + + +def _DumpEmb(vocab): + """Dump the softmax weights and word embeddings to files. + + Args: + vocab: Vocabulary. Contains vocabulary size and converts word to ids. + """ + assert FLAGS.save_dir, 'Must specify FLAGS.save_dir for dump_emb.' + inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) + targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) + weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32) + + sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) + + softmax_weights = sess.run(t['softmax_weights']) + fname = FLAGS.save_dir + '/embeddings_softmax.npy' + with tf.gfile.Open(fname, mode='w') as f: + np.save(f, softmax_weights) + sys.stderr.write('Finished softmax weights\n') + + all_embs = np.zeros([vocab.size, 1024]) + for i in xrange(vocab.size): + input_dict = {t['inputs_in']: inputs, + t['targets_in']: targets, + t['target_weights_in']: weights} + if 'char_inputs_in' in t: + input_dict[t['char_inputs_in']] = ( + vocab.word_char_ids[i].reshape([-1, 1, MAX_WORD_LEN])) + embs = sess.run(t['all_embs'], input_dict) + all_embs[i, :] = embs + sys.stderr.write('Finished word embedding %d/%d\n' % (i, vocab.size)) + + fname = FLAGS.save_dir + '/embeddings_char_cnn.npy' + with tf.gfile.Open(fname, mode='w') as f: + np.save(f, all_embs) + sys.stderr.write('Embedding file saved\n') + + +def _DumpSentenceEmbedding(sentence, vocab): + """Predict next words using the given prefix words. + + Args: + sentence: Sentence words. + vocab: Vocabulary. Contains max word chard id length and converts between + words and ids. + """ + targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) + weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32) + + sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) + + if sentence.find('') != 0: + sentence = ' ' + sentence + + word_ids = [vocab.word_to_id(w) for w in sentence.split()] + char_ids = [vocab.word_to_char_ids(w) for w in sentence.split()] + + inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) + char_ids_inputs = np.zeros( + [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) + for i in xrange(len(word_ids)): + inputs[0, 0] = word_ids[i] + char_ids_inputs[0, 0, :] = char_ids[i] + + # Add 'lstm/lstm_0/control_dependency' if you want to dump previous layer + # LSTM. + lstm_emb = sess.run(t['lstm/lstm_1/control_dependency'], + feed_dict={t['char_inputs_in']: char_ids_inputs, + t['inputs_in']: inputs, + t['targets_in']: targets, + t['target_weights_in']: weights}) + + fname = os.path.join(FLAGS.save_dir, 'lstm_emb_step_%d.npy' % i) + with tf.gfile.Open(fname, mode='w') as f: + np.save(f, lstm_emb) + sys.stderr.write('LSTM embedding step %d file saved\n' % i) + + +def main(unused_argv): + vocab = data_utils.CharsVocabulary(FLAGS.vocab_file, MAX_WORD_LEN) + + if FLAGS.mode == 'eval': + dataset = data_utils.LM1BDataset(FLAGS.input_data, vocab) + _EvalModel(dataset) + elif FLAGS.mode == 'sample': + _SampleModel(FLAGS.prefix, vocab) + elif FLAGS.mode == 'dump_emb': + _DumpEmb(vocab) + elif FLAGS.mode == 'dump_lstm_emb': + _DumpSentenceEmbedding(FLAGS.sentence, vocab) + else: + raise Exception('Mode not supported.') + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lm_commonsense/README.md b/models/research/lm_commonsense/README.md new file mode 100644 index 0000000000000000000000000000000000000000..78c8f53ca226f09c4b185490d6966f98bf584889 --- /dev/null +++ b/models/research/lm_commonsense/README.md @@ -0,0 +1,170 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# A Simple Method for Commonsense Reasoning + +This repository contains code to reproduce results from [*A Simple Method for Commonsense Reasoning*](https://arxiv.org/abs/1806.02847). + +Authors and contact: + +* Trieu H. Trinh (thtrieu@google.com, github: thtrieu) +* Quoc V. Le (qvl@google.com) + +## TL;DR + +Commonsense reasoning is a long-standing challenge for deep learning. For example, +it is difficult to use neural networks to tackle the Winograd Schema dataset - a difficult subset of Pronoun Disambiguation problems. In this work, we use language models to score substitued sentences to decide the correct reference of the ambiguous pronoun (see Figure below for an example). + +![Figure 1. Overview of our method.](method.jpg) + +This simple unsupervised method achieves new state-of-the-art (*as of June 1st, 2018*) results on both benchmark PDP-60 and WSC-273 (See Table below), without using rule-based reasoning nor expensive annotated knowledge bases. + +| Commonsense-reasoning test | Previous best result | Ours | +| ----------------------------|:----------------------:|:-----:| +| Pronoun Disambiguation | 66.7% | 70% | +| Winograd Schema Challenge | 52.8% | 63.7% | + + + +## Citation + +If you use our released models below in your publication, please cite the original paper: + +@article{TBD} + + +## Requirements +* Python >=2.6 +* Tensorflow >= v1.4 +* Numpy >= 1.12.1 + +## Details of this release + +The open-sourced components include: + +* Test sets from Pronoun Disambiguation Problem (PDP-60) and Winograd Schema Challenges (WSC-273). +* Tensorflow metagraph and checkpoints of 14 language models (See Appendix A in the paper). +* A vocabulary file. +* Code to reproduce results from the original paper. + +## How to run + +### 1. Download data files + +Download all files from the [Google Cloud Storage of this project](https://console.cloud.google.com/storage/browser/commonsense-reasoning/). The easiest way is to install and use `gsutil cp` command-line tool (See [install gsutil](https://cloud.google.com/storage/docs/gsutil_install)). + + +```shell +# Download everything from the project gs://commonsense-reasoning +$ gsutil cp -R gs://commonsense-reasoning/* . +Copying gs://commonsense-reasoning/reproduce/vocab.txt... +Copying gs://commonsense-reasoning/reproduce/commonsense_test/pdp60.json... +Copying gs://commonsense-reasoning/reproduce/commonsense_test/wsc273.json... + +...(omitted) +``` + +All downloaded content should be in `./reproduce/`. This includes two tests `pdp60.json` and `wsc273.json`, a vocabulary file `vocab.txt` and checkpoints for all 14 language models, each includes three files (`.data`, `.index` and `.meta`). All checkpoint names start with `ckpt-best` since they are saved at the best perplexity on a hold-out text corpus. + +```shell +# Check for the content +$ ls reproduce/* +reproduce/vocab.txt + +reproduce/commonsense_test: +pdp60.json wsc273.json + +reproduce/lm01: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm02: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm03: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm04: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm05: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm06: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm07: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm08: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm09: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm10: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm11: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm12: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm13: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta + +reproduce/lm14: +ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta +``` + +### 2. Run evaluation code + +To reproduce results from the paper, simply run `eval.py` script. + +```shell +$ python eval.py --data_dir=reproduce + +Restored from ./reproduce/lm01 +Reset RNN states. +Processing patch (1, 1) / (2, 4) +Probs for +[['Then' 'Dad' 'figured' ..., 'man' "'s" 'board-bill'] + ['Then' 'Dad' 'figured' ..., 'man' "'s" 'board-bill'] + ['Always' 'before' ',' ..., 'now' ',' 'for'] + ..., + ['Mark' 'was' 'close' ..., 'promising' 'him' ','] + ['Mark' 'was' 'close' ..., 'promising' 'him' ','] + ['Mark' 'was' 'close' ..., 'promising' 'him' ',']] += +[[ 1.64250596e-05 1.77780055e-06 4.14267970e-06 ..., 1.87315454e-03 + 1.57723188e-01 6.31845817e-02] + [ 1.64250596e-05 1.77780055e-06 4.14267970e-06 ..., 1.87315454e-03 + 1.57723188e-01 6.31845817e-02] + [ 1.28243030e-07 3.80435935e-03 1.12383246e-01 ..., 9.67682712e-03 + 2.17407525e-01 1.08243264e-01] + ..., + [ 1.15557734e-04 2.92792241e-03 3.46455898e-04 ..., 2.72328052e-05 + 3.37066874e-02 7.89367408e-02] + [ 1.15557734e-04 2.92792241e-03 3.46455898e-04 ..., 2.72328052e-05 + 3.37066874e-02 7.89367408e-02] + [ 1.15557734e-04 2.92792241e-03 3.46455898e-04 ..., 2.72328052e-05 + 3.37066874e-02 7.89367408e-02]] +Processing patch (1, 2) / (2, 4) + +...(omitted) + +Accuracy of 1 LM(s) on pdp60 = 0.6 + +...(omitted) + +Accuracy of 5 LM(s) on pdp60 = 0.7 + +...(omitted) + +Accuracy of 10 LM(s) on wsc273 = 0.615 + +...(omitted) + +Accuracy of 14 LM(s) on wsc273 = 0.637 +``` diff --git a/models/research/lm_commonsense/eval.py b/models/research/lm_commonsense/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..e5b7ff98b50a5af4e066d3d9f82c1acae81c3e93 --- /dev/null +++ b/models/research/lm_commonsense/eval.py @@ -0,0 +1,190 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import pickle as pkl +import numpy as np +import tensorflow as tf +import utils + +tf.app.flags.DEFINE_string( + 'data_dir', 'reproduce', + 'Path to directory containing data and model checkpoints.') + + +FLAGS = tf.app.flags.FLAGS + + +class EnsembleLM(object): + """Ensemble of language models.""" + + def __init__(self, test_data_name='wsc273'): + vocab_file = os.path.join(FLAGS.data_dir, 'vocab.txt') + self.vocab = utils.CharsVocabulary(vocab_file, 50) + assert test_data_name in ['pdp60', 'wsc273'], ( + 'Test data must be pdp60 or wsc273, got {}'.format(test_data_name)) + self.test_data_name = test_data_name + + test_data = utils.parse_commonsense_reasoning_test(test_data_name) + self.question_ids, self.sentences, self.labels = test_data + self.all_probs = [] # aggregate single-model prediction here. + + def add_single_model(self, model_name='lm1'): + """Add a single model into the current ensemble.""" + # Create single LM + single_lm = SingleRecurrentLanguageModel(self.vocab, model_name) + + # Add the single LM prediction. + probs = single_lm.assign_probs(self.sentences, self.test_data_name) + self.all_probs.append(probs) + print('Done adding {}'.format(model_name)) + + def evaluate(self): + """Evaluate the current ensemble.""" + # Attach word probabilities and correctness label to each substitution + ensembled_probs = sum(self.all_probs) / len(self.all_probs) + scorings = [] + for i, sentence in enumerate(self.sentences): + correctness = self.labels[i] + word_probs = ensembled_probs[i, :len(sentence)] + joint_prob = np.prod(word_probs, dtype=np.float64) + + scorings.append(dict( + correctness=correctness, + sentence=sentence, + joint_prob=joint_prob, + word_probs=word_probs)) + scoring_mode = 'full' if self.test_data_name == 'pdp60' else 'partial' + return utils.compare_substitutions( + self.question_ids, scorings, scoring_mode) + + +class SingleRecurrentLanguageModel(object): + """Single Recurrent Language Model.""" + + def __init__(self, vocab, model_name='lm01'): + self.vocab = vocab + self.log_dir = os.path.join(FLAGS.data_dir, model_name) + + def reset(self): + self.sess.run(self.tensors['states_init']) + + def _score(self, word_patch): + """Score a matrix of shape (batch_size, num_timesteps+1) str tokens.""" + word_ids = np.array( + [[self.vocab.word_to_id(word) for word in row] + for row in word_patch]) + char_ids = np.array( + [[self.vocab.word_to_char_ids(word) for word in row] + for row in word_patch]) + print('Probs for \n{}\n='.format(np.array(word_patch)[:, 1:])) + + input_ids, target_ids = word_ids[:, :-1], word_ids[:, 1:] + input_char_ids = char_ids[:, :-1, :] + + softmax = self.sess.run(self.tensors['softmax_out'], feed_dict={ + self.tensors['inputs_in']: input_ids, + self.tensors['char_inputs_in']: input_char_ids + }) + + batch_size, num_timesteps = self.shape + softmax = softmax.reshape((num_timesteps, batch_size, -1)) + softmax = np.transpose(softmax, [1, 0, 2]) + probs = np.array([[softmax[row, col, target_ids[row, col]] + for col in range(num_timesteps)] + for row in range(batch_size)]) + print(probs) + return probs + + def _score_patches(self, word_patches): + """Score a 2D matrix of word_patches and stitch results together.""" + batch_size, num_timesteps = self.shape + nrow, ncol = len(word_patches), len(word_patches[0]) + max_len = num_timesteps * ncol + probs = np.zeros([0, max_len]) # accumulate results into this. + + # Loop through the 2D matrix of word_patches and score each. + for i, row in enumerate(word_patches): + print('Reset RNN states.') + self.reset() # reset states before processing each row. + row_probs = np.zeros([batch_size, 0]) + for j, word_patch in enumerate(row): + print('Processing patch ' + '({}, {}) / ({}, {})'.format(i+1, j+1, nrow, ncol)) + patch_probs = (self._score(word_patch) if word_patch else + np.zeros([batch_size, num_timesteps])) + row_probs = np.concatenate([row_probs, patch_probs], 1) + probs = np.concatenate([probs, row_probs], 0) + return probs + + def assign_probs(self, sentences, test_data_name='wsc273'): + """Return prediction accuracy using this LM for a test.""" + + probs_cache = os.path.join(self.log_dir, '{}.probs'.format(test_data_name)) + if os.path.exists(probs_cache): + print('Reading cached result from {}'.format(probs_cache)) + with tf.gfile.Open(probs_cache, 'r') as f: + probs = pkl.load(f) + else: + tf.reset_default_graph() + self.sess = tf.Session() + # Build the graph. + saver = tf.train.import_meta_graph( + os.path.join(self.log_dir, 'ckpt-best.meta')) + saver.restore(self.sess, os.path.join(self.log_dir, 'ckpt-best')) + print('Restored from {}'.format(self.log_dir)) + graph = tf.get_default_graph() + self.tensors = dict( + inputs_in=graph.get_tensor_by_name('test_inputs_in:0'), + char_inputs_in=graph.get_tensor_by_name('test_char_inputs_in:0'), + softmax_out=graph.get_tensor_by_name('SotaRNN_1/softmax_out:0'), + states_init=graph.get_operation_by_name('SotaRNN_1/states_init')) + self.shape = self.tensors['inputs_in'].shape.as_list() + + # Cut sentences into patches of shape processable by the LM. + batch_size, num_timesteps = self.shape + word_patches = utils.cut_to_patches(sentences, batch_size, num_timesteps) + probs = self._score_patches(word_patches) + + # Cache the probs since they are expensive to evaluate + with tf.gfile.Open(probs_cache, 'w') as f: + pkl.dump(probs, f) + return probs + + +def evaluate_ensemble(test_data_name, number_of_lms): + ensemble = EnsembleLM(test_data_name) + model_list = ['lm{:02d}'.format(i+1) for i in range(number_of_lms)] + for model_name in model_list: + ensemble.add_single_model(model_name) + accuracy = ensemble.evaluate() + print('Accuracy of {} LM(s) on {} = {}'.format( + number_of_lms, test_data_name, accuracy)) + + +def main(_): + evaluate_ensemble('pdp60', 1) # 60% + evaluate_ensemble('pdp60', 5) # 70% + evaluate_ensemble('wsc273', 10) # 61.5% + evaluate_ensemble('wsc273', 14) # 63.7% + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/models/research/lm_commonsense/method.jpg b/models/research/lm_commonsense/method.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee8a5506fccca3cbb67f7bda0ccef78303cb228b Binary files /dev/null and b/models/research/lm_commonsense/method.jpg differ diff --git a/models/research/lm_commonsense/utils.py b/models/research/lm_commonsense/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d75f2b0fb72716860ea6d438e6b8ca2732d13c84 --- /dev/null +++ b/models/research/lm_commonsense/utils.py @@ -0,0 +1,368 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import numpy as np +import tensorflow as tf + +FLAGS = tf.flags.FLAGS + + +class Vocabulary(object): + """Class that holds a vocabulary for the dataset.""" + + def __init__(self, filename): + + self._id_to_word = [] + self._word_to_id = {} + self._unk = -1 + self._bos = -1 + self._eos = -1 + + with tf.gfile.Open(filename) as f: + idx = 0 + for line in f: + word_name = line.strip() + if word_name == '': + self._bos = idx + elif word_name == '': + self._eos = idx + elif word_name == '': + self._unk = idx + if word_name == '!!!MAXTERMID': + continue + + self._id_to_word.append(word_name) + self._word_to_id[word_name] = idx + idx += 1 + + @property + def bos(self): + return self._bos + + @property + def eos(self): + return self._eos + + @property + def unk(self): + return self._unk + + @property + def size(self): + return len(self._id_to_word) + + def word_to_id(self, word): + if word in self._word_to_id: + return self._word_to_id[word] + else: + if word.lower() in self._word_to_id: + return self._word_to_id[word.lower()] + return self.unk + + def id_to_word(self, cur_id): + if cur_id < self.size: + return self._id_to_word[int(cur_id)] + return '' + + def decode(self, cur_ids): + return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids]) + + def encode(self, sentence): + word_ids = [self.word_to_id(cur_word) for cur_word in sentence.split()] + return np.array([self.bos] + word_ids + [self.eos], dtype=np.int32) + + +class CharsVocabulary(Vocabulary): + """Vocabulary containing character-level information.""" + + def __init__(self, filename, max_word_length): + super(CharsVocabulary, self).__init__(filename) + + self._max_word_length = max_word_length + chars_set = set() + + for word in self._id_to_word: + chars_set |= set(word) + + free_ids = [] + for i in range(256): + if chr(i) in chars_set: + continue + free_ids.append(chr(i)) + + if len(free_ids) < 5: + raise ValueError('Not enough free char ids: %d' % len(free_ids)) + + self.bos_char = free_ids[0] # + self.eos_char = free_ids[1] # + self.bow_char = free_ids[2] # + self.eow_char = free_ids[3] # + self.pad_char = free_ids[4] # + + chars_set |= {self.bos_char, self.eos_char, self.bow_char, self.eow_char, + self.pad_char} + + self._char_set = chars_set + num_words = len(self._id_to_word) + + self._word_char_ids = np.zeros([num_words, max_word_length], dtype=np.int32) + + self.bos_chars = self._convert_word_to_char_ids(self.bos_char) + self.eos_chars = self._convert_word_to_char_ids(self.eos_char) + + for i, word in enumerate(self._id_to_word): + if i == self.bos: + self._word_char_ids[i] = self.bos_chars + elif i == self.eos: + self._word_char_ids[i] = self.eos_chars + else: + self._word_char_ids[i] = self._convert_word_to_char_ids(word) + + @property + def max_word_length(self): + return self._max_word_length + + def _convert_word_to_char_ids(self, word): + code = np.zeros([self.max_word_length], dtype=np.int32) + code[:] = ord(self.pad_char) + + if len(word) > self.max_word_length - 2: + word = word[:self.max_word_length-2] + cur_word = self.bow_char + word + self.eow_char + for j in range(len(cur_word)): + code[j] = ord(cur_word[j]) + return code + + def word_to_char_ids(self, word): + if word in self._word_to_id: + return self._word_char_ids[self._word_to_id[word]] + else: + return self._convert_word_to_char_ids(word) + + def encode_chars(self, sentence): + chars_ids = [self.word_to_char_ids(cur_word) + for cur_word in sentence.split()] + return np.vstack([self.bos_chars] + chars_ids + [self.eos_chars]) + + +_SPECIAL_CHAR_MAP = { + '\xe2\x80\x98': '\'', + '\xe2\x80\x99': '\'', + '\xe2\x80\x9c': '"', + '\xe2\x80\x9d': '"', + '\xe2\x80\x93': '-', + '\xe2\x80\x94': '-', + '\xe2\x88\x92': '-', + '\xce\x84': '\'', + '\xc2\xb4': '\'', + '`': '\'' +} + +_START_SPECIAL_CHARS = ['.', ',', '?', '!', ';', ':', '[', ']', '\'', '+', '/', + '\xc2\xa3', '$', '~', '*', '%', '{', '}', '#', '&', '-', + '"', '(', ')', '='] + list(_SPECIAL_CHAR_MAP.keys()) +_SPECIAL_CHARS = _START_SPECIAL_CHARS + [ + '\'s', '\'m', '\'t', '\'re', '\'d', '\'ve', '\'ll'] + + +def tokenize(sentence): + """Tokenize a sentence.""" + sentence = str(sentence) + words = sentence.strip().split() + tokenized = [] # return this + + for word in words: + if word.lower() in ['mr.', 'ms.']: + tokenized.append(word) + continue + + # Split special chars at the start of word + will_split = True + while will_split: + will_split = False + for char in _START_SPECIAL_CHARS: + if word.startswith(char): + tokenized.append(char) + word = word[len(char):] + will_split = True + + # Split special chars at the end of word + special_end_tokens = [] + will_split = True + while will_split: + will_split = False + for char in _SPECIAL_CHARS: + if word.endswith(char): + special_end_tokens = [char] + special_end_tokens + word = word[:-len(char)] + will_split = True + + if word: + tokenized.append(word) + tokenized += special_end_tokens + + # Add necessary end of sentence token. + if tokenized[-1] not in ['.', '!', '?']: + tokenized += ['.'] + return tokenized + + +def parse_commonsense_reasoning_test(test_data_name): + """Read JSON test data.""" + with tf.gfile.Open(os.path.join( + FLAGS.data_dir, 'commonsense_test', + '{}.json'.format(test_data_name)), 'r') as f: + data = json.load(f) + + question_ids = [d['question_id'] for d in data] + sentences = [tokenize(d['substitution']) for d in data] + labels = [d['correctness'] for d in data] + + return question_ids, sentences, labels + + +PAD = '' + + +def cut_to_patches(sentences, batch_size, num_timesteps): + """Cut sentences into patches of shape (batch_size, num_timesteps). + + Args: + sentences: a list of sentences, each sentence is a list of str token. + batch_size: batch size + num_timesteps: number of backprop step + + Returns: + patches: A 2D matrix, + each entry is a matrix of shape (batch_size, num_timesteps). + """ + preprocessed = [['']+sentence+[''] for sentence in sentences] + max_len = max([len(sent) for sent in preprocessed]) + + # Pad to shape [height, width] + # where height is a multiple of batch_size + # and width is a multiple of num_timesteps + nrow = int(np.ceil(len(preprocessed) * 1.0 / batch_size)) + ncol = int(np.ceil(max_len * 1.0 / num_timesteps)) + height, width = nrow * batch_size, ncol * num_timesteps + 1 + preprocessed = [sent + [PAD] * (width - len(sent)) for sent in preprocessed] + preprocessed += [[PAD] * width] * (height - len(preprocessed)) + + # Cut preprocessed into patches of shape [batch_size, num_timesteps] + patches = [] + for row in range(nrow): + patches.append([]) + for col in range(ncol): + patch = [sent[col * num_timesteps: + (col+1) * num_timesteps + 1] + for sent in preprocessed[row * batch_size: + (row+1) * batch_size]] + if np.all(np.array(patch)[:, 1:] == PAD): + patch = None # no need to process this patch. + patches[-1].append(patch) + return patches + + +def _substitution_mask(sent1, sent2): + """Binary mask identifying substituted part in two sentences. + + Example sentence and their mask: + First sentence = "I like the cat 's color" + 0 0 0 1 0 0 + Second sentence = "I like the yellow dog 's color" + 0 0 0 1 1 0 0 + + Args: + sent1: first sentence + sent2: second sentence + + Returns: + mask1: mask for first sentence + mask2: mask for second sentence + """ + mask1_start, mask2_start = [], [] + while sent1[0] == sent2[0]: + sent1 = sent1[1:] + sent2 = sent2[1:] + mask1_start.append(0.) + mask2_start.append(0.) + + mask1_end, mask2_end = [], [] + while sent1[-1] == sent2[-1]: + if (len(sent1) == 1) or (len(sent2) == 1): + break + sent1 = sent1[:-1] + sent2 = sent2[:-1] + mask1_end = [0.] + mask1_end + mask2_end = [0.] + mask2_end + + assert sent1 or sent2, 'Two sentences are identical.' + return (mask1_start + [1.] * len(sent1) + mask1_end, + mask2_start + [1.] * len(sent2) + mask2_end) + + +def _convert_to_partial(scoring1, scoring2): + """Convert full scoring into partial scoring.""" + mask1, mask2 = _substitution_mask( + scoring1['sentence'], scoring2['sentence']) + + def _partial_score(scoring, mask): + word_probs = [max(_) for _ in zip(scoring['word_probs'], mask)] + scoring.update(word_probs=word_probs, + joint_prob=np.prod(word_probs)) + + _partial_score(scoring1, mask1) + _partial_score(scoring2, mask2) + + +def compare_substitutions(question_ids, scorings, mode='full'): + """Return accuracy by comparing two consecutive scorings.""" + prediction_correctness = [] + # Compare two consecutive substitutions + for i in range(len(scorings) // 2): + scoring1, scoring2 = scorings[2*i: 2*i+2] + if mode == 'partial': # fix joint prob into partial prob + _convert_to_partial(scoring1, scoring2) + + prediction_correctness.append( + (scoring2['joint_prob'] > scoring1['joint_prob']) == + scoring2['correctness']) + + # Two consecutive substitutions always belong to the same question + question_ids = [qid for i, qid in enumerate(question_ids) if i % 2 == 0] + assert len(question_ids) == len(prediction_correctness) + num_questions = len(set(question_ids)) + + # Question is correctly answered only if + # all predictions of the same question_id is correct + num_correct_answer = 0 + previous_qid = None + correctly_answered = False + for predict, qid in zip(prediction_correctness, question_ids): + if qid != previous_qid: + previous_qid = qid + num_correct_answer += int(correctly_answered) + correctly_answered = True + correctly_answered = correctly_answered and predict + num_correct_answer += int(correctly_answered) + + return num_correct_answer / num_questions diff --git a/models/research/lstm_object_detection/README.md b/models/research/lstm_object_detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a696ba3df306768cfa28223ad957ef564667c7dd --- /dev/null +++ b/models/research/lstm_object_detection/README.md @@ -0,0 +1,40 @@ +# Tensorflow Mobile Video Object Detection + +Tensorflow mobile video object detection implementation proposed in the +following papers: + +

+ +

+ +``` +"Mobile Video Object Detection with Temporally-Aware Feature Maps", +Liu, Mason and Zhu, Menglong, CVPR 2018. +``` +\[[link](http://openaccess.thecvf.com/content_cvpr_2018/papers/Liu_Mobile_Video_Object_CVPR_2018_paper.pdf)\]\[[bibtex]( +https://scholar.googleusercontent.com/scholar.bib?q=info:hq5rcMUUXysJ:scholar.google.com/&output=citation&scisig=AAGBfm0AAAAAXLdwXcU5g_wiMQ40EvbHQ9kTyvfUxffh&scisf=4&ct=citation&cd=-1&hl=en)\] + + +

+ +

+ +``` +"Looking Fast and Slow: Memory-Guided Mobile Video Object Detection", +Liu, Mason and Zhu, Menglong and White, Marie and Li, Yinxiao and Kalenichenko, Dmitry +``` +\[[link](https://arxiv.org/abs/1903.10172)\]\[[bibtex]( +https://scholar.googleusercontent.com/scholar.bib?q=info:rLqvkztmWYgJ:scholar.google.com/&output=citation&scisig=AAGBfm0AAAAAXLdwNf-LJlm2M1ymQHbq2wYA995MHpJu&scisf=4&ct=citation&cd=-1&hl=en)\] + + +## Maintainers +* masonliuw@gmail.com +* yinxiao@google.com +* menglong@google.com +* yongzhe@google.com +* lzyuan@google.com + + +## Table of Contents + + * Exporting a trained model diff --git a/models/research/lstm_object_detection/__init__.py b/models/research/lstm_object_detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/builders/__init__.py b/models/research/lstm_object_detection/builders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/builders/graph_rewriter_builder.py b/models/research/lstm_object_detection/builders/graph_rewriter_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..accced2f0fccec190894348d5518bd991332fc71 --- /dev/null +++ b/models/research/lstm_object_detection/builders/graph_rewriter_builder.py @@ -0,0 +1,147 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Custom version for quantized training and evaluation functions. + +The main difference between this and the third_party graph_rewriter_builder.py +is that this version uses experimental_create_training_graph which allows the +customization of freeze_bn_delay. +""" + +import re +import tensorflow.compat.v1 as tf +from tensorflow.contrib import layers as contrib_layers +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.contrib.quantize.python import common +from tensorflow.contrib.quantize.python import input_to_ops +from tensorflow.contrib.quantize.python import quant_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops + + +def build(graph_rewriter_config, + quant_overrides_config=None, + is_training=True, + is_export=False): + """Returns a function that modifies default graph based on options. + + Args: + graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto. + quant_overrides_config: quant_overrides_pb2.QuantOverrides proto. + is_training: whether in training or eval mode. + is_export: whether exporting the graph. + """ + def graph_rewrite_fn(): + """Function to quantize weights and activation of the default graph.""" + if (graph_rewriter_config.quantization.weight_bits != 8 or + graph_rewriter_config.quantization.activation_bits != 8): + raise ValueError('Only 8bit quantization is supported') + + graph = tf.get_default_graph() + + # Insert custom quant ops. + if quant_overrides_config is not None: + input_to_ops_map = input_to_ops.InputToOps(graph) + for q in quant_overrides_config.quant_configs: + producer = graph.get_operation_by_name(q.op_name) + if producer is None: + raise ValueError('Op name does not exist in graph.') + context = _get_context_from_op(producer) + consumers = input_to_ops_map.ConsumerOperations(producer) + if q.fixed_range: + _insert_fixed_quant_op( + context, + q.quant_op_name, + producer, + consumers, + init_min=q.min, + init_max=q.max, + quant_delay=q.delay if is_training else 0) + else: + raise ValueError('Learned ranges are not yet supported.') + + # Quantize the graph by inserting quantize ops for weights and activations + if is_training: + contrib_quantize.experimental_create_training_graph( + input_graph=graph, + quant_delay=graph_rewriter_config.quantization.delay, + freeze_bn_delay=graph_rewriter_config.quantization.delay) + else: + contrib_quantize.experimental_create_eval_graph( + input_graph=graph, + quant_delay=graph_rewriter_config.quantization.delay + if not is_export else 0) + + contrib_layers.summarize_collection('quant_vars') + + return graph_rewrite_fn + + +def _get_context_from_op(op): + """Gets the root context name from the op name.""" + context_re = re.search(r'^(.*)/([^/]+)', op.name) + if context_re: + return context_re.group(1) + return '' + + +def _insert_fixed_quant_op(context, + name, + producer, + consumers, + init_min=-6.0, + init_max=6.0, + quant_delay=None): + """Adds a fake quant op with fixed ranges. + + Args: + context: The parent scope of the op to be quantized. + name: The name of the fake quant op. + producer: The producer op to be quantized. + consumers: The consumer ops to the producer op. + init_min: The minimum range for the fake quant op. + init_max: The maximum range for the fake quant op. + quant_delay: Number of steps to wait before activating the fake quant op. + + Raises: + ValueError: When producer operation is not directly connected to the + consumer operation. + """ + name_prefix = name if not context else context + '/' + name + inputs = producer.outputs[0] + quant = quant_ops.FixedQuantize( + inputs, init_min=init_min, init_max=init_max, scope=name_prefix) + + if quant_delay and quant_delay > 0: + activate_quant = math_ops.greater_equal( + common.CreateOrGetQuantizationStep(), + quant_delay, + name=name_prefix + '/activate_quant') + quant = control_flow_ops.cond( + activate_quant, + lambda: quant, + lambda: inputs, + name=name_prefix + '/delayed_quant') + + if consumers: + tensors_modified_count = common.RerouteTensor( + quant, inputs, can_modify=consumers) + # Some operations can have multiple output tensors going to the same + # consumer. Since consumers is a set, we need to ensure that + # tensors_modified_count is greater than or equal to the length of the set + # of consumers. + if tensors_modified_count < len(consumers): + raise ValueError('No inputs quantized for ops: [%s]' % ', '.join( + [consumer.name for consumer in consumers])) diff --git a/models/research/lstm_object_detection/builders/graph_rewriter_builder_test.py b/models/research/lstm_object_detection/builders/graph_rewriter_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e06a9f5a3d729fe122bc00e74e2d158b3d06482e --- /dev/null +++ b/models/research/lstm_object_detection/builders/graph_rewriter_builder_test.py @@ -0,0 +1,117 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for graph_rewriter_builder.""" +import mock +import tensorflow.compat.v1 as tf +from tensorflow.contrib import layers as contrib_layers +from tensorflow.contrib import quantize as contrib_quantize +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from lstm_object_detection.builders import graph_rewriter_builder +from lstm_object_detection.protos import quant_overrides_pb2 +from object_detection.protos import graph_rewriter_pb2 + + +class QuantizationBuilderTest(tf.test.TestCase): + + def testQuantizationBuilderSetsUpCorrectTrainArguments(self): + with mock.patch.object( + contrib_quantize, + 'experimental_create_training_graph') as mock_quant_fn: + with mock.patch.object(contrib_layers, + 'summarize_collection') as mock_summarize_col: + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewriter_proto.quantization.weight_bits = 8 + graph_rewriter_proto.quantization.activation_bits = 8 + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, is_training=True) + graph_rewrite_fn() + _, kwargs = mock_quant_fn.call_args + self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) + self.assertEqual(kwargs['quant_delay'], 10) + mock_summarize_col.assert_called_with('quant_vars') + + def testQuantizationBuilderSetsUpCorrectEvalArguments(self): + with mock.patch.object(contrib_quantize, + 'experimental_create_eval_graph') as mock_quant_fn: + with mock.patch.object(contrib_layers, + 'summarize_collection') as mock_summarize_col: + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, is_training=False) + graph_rewrite_fn() + _, kwargs = mock_quant_fn.call_args + self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) + mock_summarize_col.assert_called_with('quant_vars') + + def testQuantizationBuilderAddsQuantOverride(self): + graph = ops.Graph() + with graph.as_default(): + self._buildGraph() + + quant_overrides_proto = quant_overrides_pb2.QuantOverrides() + quant_config = quant_overrides_proto.quant_configs.add() + quant_config.op_name = 'test_graph/add_ab' + quant_config.quant_op_name = 'act_quant' + quant_config.fixed_range = True + quant_config.min = 0 + quant_config.max = 6 + quant_config.delay = 100 + + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewriter_proto.quantization.weight_bits = 8 + graph_rewriter_proto.quantization.activation_bits = 8 + + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, + quant_overrides_config=quant_overrides_proto, + is_training=True) + graph_rewrite_fn() + + act_quant_found = False + quant_delay_found = False + for op in graph.get_operations(): + if (quant_config.quant_op_name in op.name and + op.type == 'FakeQuantWithMinMaxArgs'): + act_quant_found = True + min_val = op.get_attr('min') + max_val = op.get_attr('max') + self.assertEqual(min_val, quant_config.min) + self.assertEqual(max_val, quant_config.max) + if ('activate_quant' in op.name and + quant_config.quant_op_name in op.name and op.type == 'Const'): + tensor = op.get_attr('value') + if tensor.int64_val[0] == quant_config.delay: + quant_delay_found = True + + self.assertTrue(act_quant_found) + self.assertTrue(quant_delay_found) + + def _buildGraph(self, scope='test_graph'): + with ops.name_scope(scope): + a = tf.constant(10, dtype=dtypes.float32, name='input_a') + b = tf.constant(20, dtype=dtypes.float32, name='input_b') + ab = tf.add(a, b, name='add_ab') + c = tf.constant(30, dtype=dtypes.float32, name='input_c') + abc = tf.multiply(ab, c, name='mul_ab_c') + return abc + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/configs/lstm_ssd_interleaved_mobilenet_v2_imagenet.config b/models/research/lstm_object_detection/configs/lstm_ssd_interleaved_mobilenet_v2_imagenet.config new file mode 100644 index 0000000000000000000000000000000000000000..536d7d5327114efa159475433f051c627043e64f --- /dev/null +++ b/models/research/lstm_object_detection/configs/lstm_ssd_interleaved_mobilenet_v2_imagenet.config @@ -0,0 +1,239 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# For training on Imagenet Video with LSTM Interleaved Mobilenet V2 + +[lstm_object_detection.protos.lstm_model] { + train_unroll_length: 4 + eval_unroll_length: 4 + lstm_state_depth: 320 + depth_multipliers: 1.4 + depth_multipliers: 0.35 + pre_bottleneck: true + low_res: true + train_interleave_method: 'RANDOM_SKIP_SMALL' + eval_interleave_method: 'SKIP3' +} +model { + ssd { + num_classes: 30 # Num of class for imagenet vid dataset. + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + num_layers: 5 + min_scale: 0.2 + max_scale: 0.95 + aspect_ratios: 1.0 + aspect_ratios: 2.0 + aspect_ratios: 0.5 + aspect_ratios: 3.0 + aspect_ratios: 0.3333 + } + } + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + box_predictor { + convolutional_box_predictor { + min_depth: 0 + max_depth: 0 + num_layers_before_predictor: 3 + use_dropout: false + dropout_keep_probability: 0.8 + kernel_size: 3 + box_code_size: 4 + apply_sigmoid_to_scores: false + use_depthwise: true + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + train: true, + scale: true, + center: true, + decay: 0.9997, + epsilon: 0.001, + } + } + } + } + feature_extractor { + type: 'lstm_ssd_interleaved_mobilenet_v2' + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + train: true, + scale: true, + center: true, + decay: 0.9997, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + hard_example_miner { + num_hard_examples: 3000 + iou_threshold: 0.99 + loss_type: CLASSIFICATION + max_negatives_per_positive: 3 + min_negatives_per_image: 0 + } + classification_weight: 1.0 + localization_weight: 4.0 + } + normalize_loss_by_num_matches: true + post_processing { + batch_non_max_suppression { + score_threshold: -20.0 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + batch_size: 8 + optimizer { + use_moving_average: false + rms_prop_optimizer: { + learning_rate: { + exponential_decay_learning_rate { + initial_learning_rate: 0.002 + decay_steps: 200000 + decay_factor: 0.95 + } + } + momentum_optimizer_value: 0.9 + decay: 0.9 + epsilon: 1.0 + } + } + gradient_clipping_by_norm: 10.0 + batch_queue_capacity: 12 + prefetch_queue_capacity: 4 +} + +train_input_reader: { + shuffle_buffer_size: 32 + queue_capacity: 12 + prefetch_size: 12 + min_after_dequeue: 4 + label_map_path: "path/to/label_map" + external_input_reader { + [lstm_object_detection.protos.GoogleInputReader.google_input_reader] { + tf_record_video_input_reader: { + input_path: '/data/lstm_detection/tfrecords/test.tfrecord' + data_type: TF_SEQUENCE_EXAMPLE + video_length: 4 + } + } + } +} + +eval_config: { + metrics_set: "coco_evaluation_all_frames" + use_moving_averages: true + min_score_threshold: 0.5 + max_num_boxes_to_visualize: 300 + visualize_groundtruth_boxes: true + groundtruth_box_visualization_color: "red" +} + +eval_input_reader { + label_map_path: "path/to/label_map" + shuffle: true + num_epochs: 1 + num_parallel_batches: 1 + num_readers: 1 + external_input_reader { + [lstm_object_detection.protos.GoogleInputReader.google_input_reader] { + tf_record_video_input_reader: { + input_path: "path/to/sequence_example/data" + data_type: TF_SEQUENCE_EXAMPLE + video_length: 10 + } + } + } +} + +eval_input_reader: { + label_map_path: "path/to/label_map" + external_input_reader { + [lstm_object_detection.protos.GoogleInputReader.google_input_reader] { + tf_record_video_input_reader: { + input_path: "path/to/sequence_example/data" + data_type: TF_SEQUENCE_EXAMPLE + video_length: 4 + } + } + } + shuffle: true + num_readers: 1 +} diff --git a/models/research/lstm_object_detection/configs/lstm_ssd_mobilenet_v1_imagenet.config b/models/research/lstm_object_detection/configs/lstm_ssd_mobilenet_v1_imagenet.config new file mode 100644 index 0000000000000000000000000000000000000000..cb357ec17eeb80795d48a5aea50f98f3934ff1ad --- /dev/null +++ b/models/research/lstm_object_detection/configs/lstm_ssd_mobilenet_v1_imagenet.config @@ -0,0 +1,232 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# For training on Imagenet Video with LSTM Mobilenet V1 + +[lstm_object_detection.protos.lstm_model] { + train_unroll_length: 4 + eval_unroll_length: 4 +} + +model { + ssd { + num_classes: 30 # Num of class for imagenet vid dataset. + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + num_layers: 5 + min_scale: 0.2 + max_scale: 0.95 + aspect_ratios: 1.0 + aspect_ratios: 2.0 + aspect_ratios: 0.5 + aspect_ratios: 3.0 + aspect_ratios: 0.3333 + } + } + image_resizer { + fixed_shape_resizer { + height: 256 + width: 256 + } + } + box_predictor { + convolutional_box_predictor { + min_depth: 0 + max_depth: 0 + num_layers_before_predictor: 3 + use_dropout: false + dropout_keep_probability: 0.8 + kernel_size: 3 + box_code_size: 4 + apply_sigmoid_to_scores: false + use_depthwise: true + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + train: true, + scale: true, + center: true, + decay: 0.9997, + epsilon: 0.001, + } + } + } + } + feature_extractor { + type: 'lstm_mobilenet_v1' + min_depth: 16 + depth_multiplier: 1.0 + use_depthwise: true + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + train: true, + scale: true, + center: true, + decay: 0.9997, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + hard_example_miner { + num_hard_examples: 3000 + iou_threshold: 0.99 + loss_type: CLASSIFICATION + max_negatives_per_positive: 3 + min_negatives_per_image: 0 + } + classification_weight: 1.0 + localization_weight: 4.0 + } + normalize_loss_by_num_matches: true + post_processing { + batch_non_max_suppression { + score_threshold: -20.0 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + batch_size: 8 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + ssd_random_crop { + } + } + optimizer { + use_moving_average: false + rms_prop_optimizer: { + learning_rate: { + exponential_decay_learning_rate { + initial_learning_rate: 0.002 + decay_steps: 200000 + decay_factor: 0.95 + } + } + momentum_optimizer_value: 0.9 + decay: 0.9 + epsilon: 1.0 + } + } + + from_detection_checkpoint: true + gradient_clipping_by_norm: 10.0 + batch_queue_capacity: 12 + prefetch_queue_capacity: 4 + fine_tune_checkpoint: "/path/to/checkpoint/" + fine_tune_checkpoint_type: "detection" +} + + +train_input_reader: { + shuffle_buffer_size: 32 + queue_capacity: 12 + prefetch_size: 12 + min_after_dequeue: 4 + label_map_path: "path/to/label_map" + external_input_reader { + [lstm_object_detection.protos.GoogleInputReader.google_input_reader] { + tf_record_video_input_reader: { + input_path: "path/to/sequence_example/data" + data_type: TF_SEQUENCE_EXAMPLE + video_length: 4 + } + } + } +} + +eval_config: { + metrics_set: "coco_evaluation_all_frames" + use_moving_averages: true + min_score_threshold: 0.5 + max_num_boxes_to_visualize: 300 + visualize_groundtruth_boxes: true + groundtruth_box_visualization_color: "red" +} + +eval_input_reader: { + label_map_path: "path/to/label_map" + external_input_reader { + [lstm_object_detection.protos.GoogleInputReader.google_input_reader] { + tf_record_video_input_reader: { + input_path: "path/to/sequence_example/data" + data_type: TF_SEQUENCE_EXAMPLE + video_length: 4 + } + } + } + shuffle: true + num_readers: 1 +} diff --git a/models/research/lstm_object_detection/eval.py b/models/research/lstm_object_detection/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..aac25c1182bd354b710a7bb83c7bd68365f14fed --- /dev/null +++ b/models/research/lstm_object_detection/eval.py @@ -0,0 +1,108 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Evaluation executable for detection models. + +This executable is used to evaluate DetectionModels. Example usage: + ./eval \ + --logtostderr \ + --checkpoint_dir=path/to/checkpoint_dir \ + --eval_dir=path/to/eval_dir \ + --pipeline_config_path=pipeline_config.pbtxt +""" + +import functools +import os +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from lstm_object_detection import evaluator +from lstm_object_detection import model_builder +from lstm_object_detection.inputs import seq_dataset_builder +from lstm_object_detection.utils import config_util +from object_detection.utils import label_map_util + +tf.logging.set_verbosity(tf.logging.INFO) +flags = tf.app.flags +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job.') +flags.DEFINE_string('checkpoint_dir', '', + 'Directory containing checkpoints to evaluate, typically ' + 'set to `train_dir` used in the training job.') +flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.') +flags.DEFINE_string('pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file. If provided, other configs are ignored') +flags.DEFINE_boolean('run_once', False, 'Option to only run a single pass of ' + 'evaluation. Overrides the `max_evals` parameter in the ' + 'provided config.') +FLAGS = flags.FLAGS + + +def main(unused_argv): + assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.' + assert FLAGS.eval_dir, '`eval_dir` is missing.' + if FLAGS.pipeline_config_path: + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + else: + configs = config_util.get_configs_from_multiple_files( + model_config_path=FLAGS.model_config_path, + eval_config_path=FLAGS.eval_config_path, + eval_input_config_path=FLAGS.input_config_path) + + pipeline_proto = config_util.create_pipeline_proto_from_configs(configs) + config_text = text_format.MessageToString(pipeline_proto) + tf.gfile.MakeDirs(FLAGS.eval_dir) + with tf.gfile.Open(os.path.join(FLAGS.eval_dir, 'pipeline.config'), + 'wb') as f: + f.write(config_text) + + model_config = configs['model'] + lstm_config = configs['lstm_model'] + eval_config = configs['eval_config'] + input_config = configs['eval_input_config'] + + if FLAGS.eval_training_data: + input_config.external_input_reader.CopyFrom( + configs['train_input_config'].external_input_reader) + lstm_config.eval_unroll_length = lstm_config.train_unroll_length + + model_fn = functools.partial( + model_builder.build, + model_config=model_config, + lstm_config=lstm_config, + is_training=False) + + def get_next(config, model_config, lstm_config, unroll_length): + return seq_dataset_builder.build(config, model_config, lstm_config, + unroll_length) + + create_input_dict_fn = functools.partial(get_next, input_config, model_config, + lstm_config, + lstm_config.eval_unroll_length) + + label_map = label_map_util.load_labelmap(input_config.label_map_path) + max_num_classes = max([item.id for item in label_map.item]) + categories = label_map_util.convert_label_map_to_categories( + label_map, max_num_classes) + + if FLAGS.run_once: + eval_config.max_evals = 1 + + evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories, + FLAGS.checkpoint_dir, FLAGS.eval_dir) + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lstm_object_detection/evaluator.py b/models/research/lstm_object_detection/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed3e476e8e9bfd9c0d4cfe71925ccb7ff5f6b07 --- /dev/null +++ b/models/research/lstm_object_detection/evaluator.py @@ -0,0 +1,337 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Detection model evaluator. + +This file provides a generic evaluation method that can be used to evaluate a +DetectionModel. + +""" + +import tensorflow.compat.v1 as tf +from tensorflow.contrib import tfprof as contrib_tfprof +from lstm_object_detection.metrics import coco_evaluation_all_frames +from object_detection import eval_util +from object_detection.core import prefetcher +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.utils import object_detection_evaluation + + +# A dictionary of metric names to classes that implement the metric. The classes +# in the dictionary must implement +# utils.object_detection_evaluation.DetectionEvaluator interface. +EVAL_METRICS_CLASS_DICT = { + 'pascal_voc_detection_metrics': + object_detection_evaluation.PascalDetectionEvaluator, + 'weighted_pascal_voc_detection_metrics': + object_detection_evaluation.WeightedPascalDetectionEvaluator, + 'pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.PascalInstanceSegmentationEvaluator, + 'weighted_pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, + 'open_images_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, + 'coco_detection_metrics': + coco_evaluation.CocoDetectionEvaluator, + 'coco_mask_metrics': + coco_evaluation.CocoMaskEvaluator, + 'coco_evaluation_all_frames': + coco_evaluation_all_frames.CocoEvaluationAllFrames, +} + +EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics' + + +def _create_detection_op(model, input_dict, batch): + """Create detection ops. + + Args: + model: model to perform predictions with. + input_dict: A dict holds input data. + batch: batch size for evaluation. + + Returns: + Detection tensor ops. + """ + video_tensor = tf.stack(list(input_dict[fields.InputDataFields.image])) + preprocessed_video, true_image_shapes = model.preprocess( + tf.to_float(video_tensor)) + if batch is not None: + prediction_dict = model.predict(preprocessed_video, true_image_shapes, + batch) + else: + prediction_dict = model.predict(preprocessed_video, true_image_shapes) + + return model.postprocess(prediction_dict, true_image_shapes) + + +def _extract_prediction_tensors(model, + create_input_dict_fn, + ignore_groundtruth=False): + """Restores the model in a tensorflow session. + + Args: + model: model to perform predictions with. + create_input_dict_fn: function to create input tensor dictionaries. + ignore_groundtruth: whether groundtruth should be ignored. + + + Returns: + tensor_dict: A tensor dictionary with evaluations. + """ + input_dict = create_input_dict_fn() + batch = None + if 'batch' in input_dict: + batch = input_dict.pop('batch') + else: + prefetch_queue = prefetcher.prefetch(input_dict, capacity=500) + input_dict = prefetch_queue.dequeue() + # consistent format for images and videos + for key, value in input_dict.iteritems(): + input_dict[key] = (value,) + + detections = _create_detection_op(model, input_dict, batch) + + # Print out anaylsis of the model. + contrib_tfprof.model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=contrib_tfprof.model_analyzer + .TRAINABLE_VARS_PARAMS_STAT_OPTIONS) + contrib_tfprof.model_analyzer.print_model_analysis( + tf.get_default_graph(), + tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS) + + num_frames = len(input_dict[fields.InputDataFields.image]) + ret = [] + for i in range(num_frames): + original_image = tf.expand_dims(input_dict[fields.InputDataFields.image][i], + 0) + groundtruth = None + if not ignore_groundtruth: + groundtruth = { + fields.InputDataFields.groundtruth_boxes: + input_dict[fields.InputDataFields.groundtruth_boxes][i], + fields.InputDataFields.groundtruth_classes: + input_dict[fields.InputDataFields.groundtruth_classes][i], + } + optional_keys = ( + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_group_of, + ) + for opt_key in optional_keys: + if opt_key in input_dict: + groundtruth[opt_key] = input_dict[opt_key][i] + if fields.DetectionResultFields.detection_masks in detections: + groundtruth[fields.InputDataFields.groundtruth_instance_masks] = ( + input_dict[fields.InputDataFields.groundtruth_instance_masks][i]) + + detections_frame = { + key: tf.expand_dims(value[i], 0) + for key, value in detections.iteritems() + } + + source_id = ( + batch.key[0] if batch is not None else + input_dict[fields.InputDataFields.source_id][i]) + ret.append( + eval_util.result_dict_for_single_example( + original_image, + source_id, + detections_frame, + groundtruth, + class_agnostic=(fields.DetectionResultFields.detection_classes + not in detections), + scale_to_absolute=True)) + return ret + + +def get_evaluators(eval_config, categories): + """Returns the evaluator class according to eval_config, valid for categories. + + Args: + eval_config: evaluation configurations. + categories: a list of categories to evaluate. + Returns: + An list of instances of DetectionEvaluator. + + Raises: + ValueError: if metric is not in the metric class dictionary. + """ + eval_metric_fn_keys = eval_config.metrics_set + if not eval_metric_fn_keys: + eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] + evaluators_list = [] + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: + raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) + else: + evaluators_list.append( + EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories)) + return evaluators_list + + +def evaluate(create_input_dict_fn, + create_model_fn, + eval_config, + categories, + checkpoint_dir, + eval_dir, + graph_hook_fn=None): + """Evaluation function for detection models. + + Args: + create_input_dict_fn: a function to create a tensor input dictionary. + create_model_fn: a function that creates a DetectionModel. + eval_config: a eval_pb2.EvalConfig protobuf. + categories: a list of category dictionaries. Each dict in the list should + have an integer 'id' field and string 'name' field. + checkpoint_dir: directory to load the checkpoints to evaluate from. + eval_dir: directory to write evaluation metrics summary to. + graph_hook_fn: Optional function that is called after the training graph is + completely built. This is helpful to perform additional changes to the + training graph such as optimizing batchnorm. The function should modify + the default graph. + + Returns: + metrics: A dictionary containing metric names and values from the latest + run. + """ + + model = create_model_fn() + + if eval_config.ignore_groundtruth and not eval_config.export_path: + tf.logging.fatal('If ignore_groundtruth=True then an export_path is ' + 'required. Aborting!!!') + + tensor_dicts = _extract_prediction_tensors( + model=model, + create_input_dict_fn=create_input_dict_fn, + ignore_groundtruth=eval_config.ignore_groundtruth) + + def _process_batch(tensor_dicts, + sess, + batch_index, + counters, + losses_dict=None): + """Evaluates tensors in tensor_dicts, visualizing the first K examples. + + This function calls sess.run on tensor_dicts, evaluating the original_image + tensor only on the first K examples and visualizing detections overlaid + on this original_image. + + Args: + tensor_dicts: a dictionary of tensors + sess: tensorflow session + batch_index: the index of the batch amongst all batches in the run. + counters: a dictionary holding 'success' and 'skipped' fields which can + be updated to keep track of number of successful and failed runs, + respectively. If these fields are not updated, then the success/skipped + counter values shown at the end of evaluation will be incorrect. + losses_dict: Optional dictonary of scalar loss tensors. Necessary only + for matching function signiture in third_party eval_util.py. + + Returns: + result_dict: a dictionary of numpy arrays + result_losses_dict: a dictionary of scalar losses. This is empty if input + losses_dict is None. Necessary only for matching function signiture in + third_party eval_util.py. + """ + if batch_index % 10 == 0: + tf.logging.info('Running eval ops batch %d', batch_index) + if not losses_dict: + losses_dict = {} + try: + result_dicts, result_losses_dict = sess.run([tensor_dicts, losses_dict]) + counters['success'] += 1 + except tf.errors.InvalidArgumentError: + tf.logging.info('Skipping image') + counters['skipped'] += 1 + return {} + num_images = len(tensor_dicts) + for i in range(num_images): + result_dict = result_dicts[i] + global_step = tf.train.global_step(sess, tf.train.get_global_step()) + tag = 'image-%d' % (batch_index * num_images + i) + if batch_index < eval_config.num_visualizations / num_images: + eval_util.visualize_detection_results( + result_dict, + tag, + global_step, + categories=categories, + summary_dir=eval_dir, + export_dir=eval_config.visualization_export_dir, + show_groundtruth=eval_config.visualize_groundtruth_boxes, + groundtruth_box_visualization_color=eval_config. + groundtruth_box_visualization_color, + min_score_thresh=eval_config.min_score_threshold, + max_num_predictions=eval_config.max_num_boxes_to_visualize, + skip_scores=eval_config.skip_scores, + skip_labels=eval_config.skip_labels, + keep_image_id_for_visualization_export=eval_config. + keep_image_id_for_visualization_export) + if num_images > 1: + return result_dicts, result_losses_dict + else: + return result_dicts[0], result_losses_dict + + variables_to_restore = tf.global_variables() + global_step = tf.train.get_or_create_global_step() + variables_to_restore.append(global_step) + + if graph_hook_fn: + graph_hook_fn() + + if eval_config.use_moving_averages: + variable_averages = tf.train.ExponentialMovingAverage(0.0) + variables_to_restore = variable_averages.variables_to_restore() + for key in variables_to_restore.keys(): + if 'moving_mean' in key: + variables_to_restore[key.replace( + 'moving_mean', 'moving_mean/ExponentialMovingAverage')] = ( + variables_to_restore[key]) + del variables_to_restore[key] + if 'moving_variance' in key: + variables_to_restore[key.replace( + 'moving_variance', 'moving_variance/ExponentialMovingAverage')] = ( + variables_to_restore[key]) + del variables_to_restore[key] + + saver = tf.train.Saver(variables_to_restore) + + def _restore_latest_checkpoint(sess): + latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) + saver.restore(sess, latest_checkpoint) + + metrics = eval_util.repeated_checkpoint_run( + tensor_dict=tensor_dicts, + summary_dir=eval_dir, + evaluators=get_evaluators(eval_config, categories), + batch_processor=_process_batch, + checkpoint_dirs=[checkpoint_dir], + variables_to_restore=None, + restore_fn=_restore_latest_checkpoint, + num_batches=eval_config.num_examples, + eval_interval_secs=eval_config.eval_interval_secs, + max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else + eval_config.max_evals + if eval_config.max_evals else None), + master=eval_config.eval_master, + save_graph=eval_config.save_graph, + save_graph_dir=(eval_dir if eval_config.save_graph else '')) + + return metrics diff --git a/models/research/lstm_object_detection/export_tflite_lstd_graph.py b/models/research/lstm_object_detection/export_tflite_lstd_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..7e933fb480d04aefa66ec6c4c8ec38f91dee9cb6 --- /dev/null +++ b/models/research/lstm_object_detection/export_tflite_lstd_graph.py @@ -0,0 +1,138 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Exports an LSTM detection model to use with tf-lite. + +Outputs file: +* A tflite compatible frozen graph - $output_directory/tflite_graph.pb + +The exported graph has the following input and output nodes. + +Inputs: +'input_video_tensor': a float32 tensor of shape +[unroll_length, height, width, 3] containing the normalized input image. +Note that the height and width must be compatible with the height and +width configured in the fixed_shape_image resizer options in the pipeline +config proto. + +Outputs: +If add_postprocessing_op is true: frozen graph adds a + TFLite_Detection_PostProcess custom op node has four outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected boxes +else: + the graph has three outputs: + 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] + containing the encoded box predictions. + 'raw_outputs/class_predictions': a float32 tensor of shape + [1, num_anchors, num_classes] containing the class scores for each anchor + after applying score conversion. + 'anchors': a float32 constant tensor of shape [num_anchors, 4] + containing the anchor boxes. + +Example Usage: +-------------- +python lstm_object_detection/export_tflite_lstd_graph.py \ + --pipeline_config_path path/to/lstm_pipeline.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +with contents: + - tflite_graph.pbtxt + - tflite_graph.pb +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the NMS iou_threshold to be 0.5 and +NMS score_threshold to be 0.0): +python lstm_object_detection/export_tflite_lstd_graph.py \ + --pipeline_config_path path/to/lstm_pipeline.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + --config_override " \ + model{ \ + ssd{ \ + post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.0 \ + iou_threshold: 0.5 \ + } \ + } \ + } \ + } \ + " +""" + +import tensorflow.compat.v1 as tf + +from lstm_object_detection import export_tflite_lstd_graph_lib +from lstm_object_detection.utils import config_util + +flags = tf.app.flags +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string( + 'pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.') +flags.DEFINE_integer('max_detections', 10, + 'Maximum number of detections (boxes) to show.') +flags.DEFINE_integer('max_classes_per_detection', 1, + 'Maximum number of classes to output per detection box.') +flags.DEFINE_integer( + 'detections_per_class', 100, + 'Number of anchors used per class in Regular Non-Max-Suppression.') +flags.DEFINE_bool('add_postprocessing_op', True, + 'Add TFLite custom op for postprocessing to the graph.') +flags.DEFINE_bool( + 'use_regular_nms', False, + 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.') +flags.DEFINE_string( + 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') + +FLAGS = flags.FLAGS + + +def main(argv): + del argv # Unused. + flags.mark_flag_as_required('output_directory') + flags.mark_flag_as_required('pipeline_config_path') + flags.mark_flag_as_required('trained_checkpoint_prefix') + + pipeline_config = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + + export_tflite_lstd_graph_lib.export_tflite_graph( + pipeline_config, + FLAGS.trained_checkpoint_prefix, + FLAGS.output_directory, + FLAGS.add_postprocessing_op, + FLAGS.max_detections, + FLAGS.max_classes_per_detection, + use_regular_nms=FLAGS.use_regular_nms) + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/models/research/lstm_object_detection/export_tflite_lstd_graph_lib.py b/models/research/lstm_object_detection/export_tflite_lstd_graph_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..e066f11b45f2bd4608b08656040abba2632b4aa2 --- /dev/null +++ b/models/research/lstm_object_detection/export_tflite_lstd_graph_lib.py @@ -0,0 +1,327 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Exports detection models to use with tf-lite. + +See export_tflite_lstd_graph.py for usage. +""" +import os +import tempfile + +import numpy as np +import tensorflow.compat.v1 as tf + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.framework import types_pb2 +from tensorflow.core.protobuf import saver_pb2 +from tensorflow.tools.graph_transforms import TransformGraph +from lstm_object_detection import model_builder +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import post_processing_builder +from object_detection.core import box_list + +_DEFAULT_NUM_CHANNELS = 3 +_DEFAULT_NUM_COORD_BOX = 4 + + +def get_const_center_size_encoded_anchors(anchors): + """Exports center-size encoded anchors as a constant tensor. + + Args: + anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor + boxes + + Returns: + encoded_anchors: a float32 constant tensor of shape [num_anchors, 4] + containing the anchor boxes. + """ + anchor_boxlist = box_list.BoxList(anchors) + y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes() + num_anchors = y.get_shape().as_list() + + with tf.Session() as sess: + y_out, x_out, h_out, w_out = sess.run([y, x, h, w]) + encoded_anchors = tf.constant( + np.transpose(np.stack((y_out, x_out, h_out, w_out))), + dtype=tf.float32, + shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX], + name='anchors') + return encoded_anchors + + +def append_postprocessing_op(frozen_graph_def, + max_detections, + max_classes_per_detection, + nms_score_threshold, + nms_iou_threshold, + num_classes, + scale_values, + detections_per_class=100, + use_regular_nms=False): + """Appends postprocessing custom op. + + Args: + frozen_graph_def: Frozen GraphDef for SSD model after freezing the + checkpoint + max_detections: Maximum number of detections (boxes) to show + max_classes_per_detection: Number of classes to display per detection + nms_score_threshold: Score threshold used in Non-maximal suppression in + post-processing + nms_iou_threshold: Intersection-over-union threshold used in Non-maximal + suppression in post-processing + num_classes: number of classes in SSD detector + scale_values: scale values is a dict with following key-value pairs + {y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode + centersize boxes + detections_per_class: In regular NonMaxSuppression, number of anchors used + for NonMaxSuppression per class + use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of + Fast NMS. + + Returns: + transformed_graph_def: Frozen GraphDef with postprocessing custom op + appended + TFLite_Detection_PostProcess custom op node has four outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected + boxes + """ + new_output = frozen_graph_def.node.add() + new_output.op = 'TFLite_Detection_PostProcess' + new_output.name = 'TFLite_Detection_PostProcess' + new_output.attr['_output_quantized'].CopyFrom( + attr_value_pb2.AttrValue(b=True)) + new_output.attr['_output_types'].list.type.extend([ + types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, + types_pb2.DT_FLOAT + ]) + new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom( + attr_value_pb2.AttrValue(b=True)) + new_output.attr['max_detections'].CopyFrom( + attr_value_pb2.AttrValue(i=max_detections)) + new_output.attr['max_classes_per_detection'].CopyFrom( + attr_value_pb2.AttrValue(i=max_classes_per_detection)) + new_output.attr['nms_score_threshold'].CopyFrom( + attr_value_pb2.AttrValue(f=nms_score_threshold.pop())) + new_output.attr['nms_iou_threshold'].CopyFrom( + attr_value_pb2.AttrValue(f=nms_iou_threshold.pop())) + new_output.attr['num_classes'].CopyFrom( + attr_value_pb2.AttrValue(i=num_classes)) + + new_output.attr['y_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop())) + new_output.attr['x_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop())) + new_output.attr['h_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop())) + new_output.attr['w_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop())) + new_output.attr['detections_per_class'].CopyFrom( + attr_value_pb2.AttrValue(i=detections_per_class)) + new_output.attr['use_regular_nms'].CopyFrom( + attr_value_pb2.AttrValue(b=use_regular_nms)) + + new_output.input.extend( + ['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors']) + # Transform the graph to append new postprocessing op + input_names = [] + output_names = ['TFLite_Detection_PostProcess'] + transforms = ['strip_unused_nodes'] + transformed_graph_def = TransformGraph(frozen_graph_def, input_names, + output_names, transforms) + return transformed_graph_def + + +def export_tflite_graph(pipeline_config, + trained_checkpoint_prefix, + output_dir, + add_postprocessing_op, + max_detections, + max_classes_per_detection, + detections_per_class=100, + use_regular_nms=False, + binary_graph_name='tflite_graph.pb', + txt_graph_name='tflite_graph.pbtxt'): + """Exports a tflite compatible graph and anchors for ssd detection model. + + Anchors are written to a tensor and tflite compatible graph + is written to output_dir/tflite_graph.pb. + + Args: + pipeline_config: Dictionary of configuration objects. Keys are `model`, + `train_config`, `train_input_config`, `eval_config`, `eval_input_config`, + `lstm_model`. Value are the corresponding config objects. + trained_checkpoint_prefix: a file prefix for the checkpoint containing the + trained parameters of the SSD model. + output_dir: A directory to write the tflite graph and anchor file to. + add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a + TFLite_Detection_PostProcess custom op + max_detections: Maximum number of detections (boxes) to show + max_classes_per_detection: Number of classes to display per detection + detections_per_class: In regular NonMaxSuppression, number of anchors used + for NonMaxSuppression per class + use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of + Fast NMS. + binary_graph_name: Name of the exported graph file in binary format. + txt_graph_name: Name of the exported graph file in text format. + + Raises: + ValueError: if the pipeline config contains models other than ssd or uses an + fixed_shape_resizer and provides a shape as well. + """ + model_config = pipeline_config['model'] + lstm_config = pipeline_config['lstm_model'] + eval_config = pipeline_config['eval_config'] + tf.gfile.MakeDirs(output_dir) + if model_config.WhichOneof('model') != 'ssd': + raise ValueError('Only ssd models are supported in tflite. ' + 'Found {} in config'.format( + model_config.WhichOneof('model'))) + + num_classes = model_config.ssd.num_classes + nms_score_threshold = { + model_config.ssd.post_processing.batch_non_max_suppression.score_threshold + } + nms_iou_threshold = { + model_config.ssd.post_processing.batch_non_max_suppression.iou_threshold + } + scale_values = {} + scale_values['y_scale'] = { + model_config.ssd.box_coder.faster_rcnn_box_coder.y_scale + } + scale_values['x_scale'] = { + model_config.ssd.box_coder.faster_rcnn_box_coder.x_scale + } + scale_values['h_scale'] = { + model_config.ssd.box_coder.faster_rcnn_box_coder.height_scale + } + scale_values['w_scale'] = { + model_config.ssd.box_coder.faster_rcnn_box_coder.width_scale + } + + image_resizer_config = model_config.ssd.image_resizer + image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') + num_channels = _DEFAULT_NUM_CHANNELS + if image_resizer == 'fixed_shape_resizer': + height = image_resizer_config.fixed_shape_resizer.height + width = image_resizer_config.fixed_shape_resizer.width + if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: + num_channels = 1 + + shape = [lstm_config.eval_unroll_length, height, width, num_channels] + else: + raise ValueError( + 'Only fixed_shape_resizer' + 'is supported with tflite. Found {}'.format( + image_resizer_config.WhichOneof('image_resizer_oneof'))) + + video_tensor = tf.placeholder( + tf.float32, shape=shape, name='input_video_tensor') + + detection_model = model_builder.build( + model_config, lstm_config, is_training=False) + preprocessed_video, true_image_shapes = detection_model.preprocess( + tf.to_float(video_tensor)) + predicted_tensors = detection_model.predict(preprocessed_video, + true_image_shapes) + # predicted_tensors = detection_model.postprocess(predicted_tensors, + # true_image_shapes) + # The score conversion occurs before the post-processing custom op + _, score_conversion_fn = post_processing_builder.build( + model_config.ssd.post_processing) + class_predictions = score_conversion_fn( + predicted_tensors['class_predictions_with_background']) + + with tf.name_scope('raw_outputs'): + # 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] + # containing the encoded box predictions. Note that these are raw + # predictions and no Non-Max suppression is applied on them and + # no decode center size boxes is applied to them. + tf.identity(predicted_tensors['box_encodings'], name='box_encodings') + # 'raw_outputs/class_predictions': a float32 tensor of shape + # [1, num_anchors, num_classes] containing the class scores for each anchor + # after applying score conversion. + tf.identity(class_predictions, name='class_predictions') + # 'anchors': a float32 tensor of shape + # [4, num_anchors] containing the anchors as a constant node. + tf.identity( + get_const_center_size_encoded_anchors(predicted_tensors['anchors']), + name='anchors') + + # Add global step to the graph, so we know the training step number when we + # evaluate the model. + tf.train.get_or_create_global_step() + + # graph rewriter + is_quantized = ('graph_rewriter' in pipeline_config) + if is_quantized: + graph_rewriter_config = pipeline_config['graph_rewriter'] + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False, is_export=True) + graph_rewriter_fn() + + if model_config.ssd.feature_extractor.HasField('fpn'): + exporter.rewrite_nn_resize_op(is_quantized) + + # freeze the graph + saver_kwargs = {} + if eval_config.use_moving_averages: + saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 + moving_average_checkpoint = tempfile.NamedTemporaryFile() + exporter.replace_variable_values_with_moving_averages( + tf.get_default_graph(), trained_checkpoint_prefix, + moving_average_checkpoint.name) + checkpoint_to_use = moving_average_checkpoint.name + else: + checkpoint_to_use = trained_checkpoint_prefix + + saver = tf.train.Saver(**saver_kwargs) + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_to_use, + output_node_names=','.join([ + 'raw_outputs/box_encodings', 'raw_outputs/class_predictions', + 'anchors' + ]), + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + clear_devices=True, + output_graph='', + initializer_nodes='') + + # Add new operation to do post processing in a custom op (TF Lite only) + + if add_postprocessing_op: + transformed_graph_def = append_postprocessing_op( + frozen_graph_def, max_detections, max_classes_per_detection, + nms_score_threshold, nms_iou_threshold, num_classes, scale_values, + detections_per_class, use_regular_nms) + else: + # Return frozen without adding post-processing custom op + transformed_graph_def = frozen_graph_def + + binary_graph = os.path.join(output_dir, binary_graph_name) + with tf.gfile.GFile(binary_graph, 'wb') as f: + f.write(transformed_graph_def.SerializeToString()) + txt_graph = os.path.join(output_dir, txt_graph_name) + with tf.gfile.GFile(txt_graph, 'w') as f: + f.write(str(transformed_graph_def)) diff --git a/models/research/lstm_object_detection/export_tflite_lstd_model.py b/models/research/lstm_object_detection/export_tflite_lstd_model.py new file mode 100644 index 0000000000000000000000000000000000000000..58c674728b5b0e274ae112d66abe3ff72f63b86e --- /dev/null +++ b/models/research/lstm_object_detection/export_tflite_lstd_model.py @@ -0,0 +1,65 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Export a LSTD model in tflite format.""" + +import os +from absl import flags +import tensorflow.compat.v1 as tf + +from lstm_object_detection.utils import config_util + +flags.DEFINE_string('export_path', None, 'Path to export model.') +flags.DEFINE_string('frozen_graph_path', None, 'Path to frozen graph.') +flags.DEFINE_string( + 'pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config file.') + +FLAGS = flags.FLAGS + + +def main(_): + flags.mark_flag_as_required('export_path') + flags.mark_flag_as_required('frozen_graph_path') + flags.mark_flag_as_required('pipeline_config_path') + + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + lstm_config = configs['lstm_model'] + + input_arrays = ['input_video_tensor'] + output_arrays = [ + 'TFLite_Detection_PostProcess', + 'TFLite_Detection_PostProcess:1', + 'TFLite_Detection_PostProcess:2', + 'TFLite_Detection_PostProcess:3', + ] + input_shapes = { + 'input_video_tensor': [lstm_config.eval_unroll_length, 320, 320, 3], + } + + converter = tf.lite.TFLiteConverter.from_frozen_graph( + FLAGS.frozen_graph_path, + input_arrays, + output_arrays, + input_shapes=input_shapes) + converter.allow_custom_ops = True + tflite_model = converter.convert() + ofilename = os.path.join(FLAGS.export_path) + open(ofilename, 'wb').write(tflite_model) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lstm_object_detection/g3doc/Interleaved_Intro.png b/models/research/lstm_object_detection/g3doc/Interleaved_Intro.png new file mode 100644 index 0000000000000000000000000000000000000000..2b829c997bc75e807c0982b1d71334966452b122 Binary files /dev/null and b/models/research/lstm_object_detection/g3doc/Interleaved_Intro.png differ diff --git a/models/research/lstm_object_detection/g3doc/exporting_models.md b/models/research/lstm_object_detection/g3doc/exporting_models.md new file mode 100644 index 0000000000000000000000000000000000000000..7d501d97efdfb8d259e867164aa04f275b56a036 --- /dev/null +++ b/models/research/lstm_object_detection/g3doc/exporting_models.md @@ -0,0 +1,49 @@ +# Exporting a tflite model from a checkpoint + +Starting from a trained model checkpoint, creating a tflite model requires 2 +steps: + +* exporting a tflite frozen graph from a checkpoint +* exporting a tflite model from a frozen graph + +## Exporting a tflite frozen graph from a checkpoint + +With a candidate checkpoint to export, run the following command from +tensorflow/models/research: + +```bash +# from tensorflow/models/research +PIPELINE_CONFIG_PATH={path to pipeline config} +TRAINED_CKPT_PREFIX=/{path to model.ckpt} +EXPORT_DIR={path to folder that will be used for export} +python lstm_object_detection/export_tflite_lstd_graph.py \ + --pipeline_config_path ${PIPELINE_CONFIG_PATH} \ + --trained_checkpoint_prefix ${TRAINED_CKPT_PREFIX} \ + --output_directory ${EXPORT_DIR} \ + --add_preprocessing_op +``` + +After export, you should see the directory ${EXPORT_DIR} containing the +following files: + +* `tflite_graph.pb` +* `tflite_graph.pbtxt` + +## Exporting a tflite model from a frozen graph + +We then take the exported tflite-compatable tflite model, and convert it to a +TFLite FlatBuffer file by running the following: + +```bash +# from tensorflow/models/research +FROZEN_GRAPH_PATH={path to exported tflite_graph.pb} +EXPORT_PATH={path to filename that will be used for export} +PIPELINE_CONFIG_PATH={path to pipeline config} +python lstm_object_detection/export_tflite_lstd_model.py \ + --export_path ${EXPORT_PATH} \ + --frozen_graph_path ${FROZEN_GRAPH_PATH} \ + --pipeline_config_path ${PIPELINE_CONFIG_PATH} +``` + +After export, you should see the file ${EXPORT_PATH} containing the FlatBuffer +model to be used by an application. diff --git a/models/research/lstm_object_detection/g3doc/lstm_ssd_intro.png b/models/research/lstm_object_detection/g3doc/lstm_ssd_intro.png new file mode 100644 index 0000000000000000000000000000000000000000..fa62eb533b9190bcf05094d12781808dc85f1107 Binary files /dev/null and b/models/research/lstm_object_detection/g3doc/lstm_ssd_intro.png differ diff --git a/models/research/lstm_object_detection/inputs/__init__.py b/models/research/lstm_object_detection/inputs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/inputs/seq_dataset_builder.py b/models/research/lstm_object_detection/inputs/seq_dataset_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..55e24820f60d24d14db64f2aea21e462ee278ff2 --- /dev/null +++ b/models/research/lstm_object_detection/inputs/seq_dataset_builder.py @@ -0,0 +1,242 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""tf.data.Dataset builder. + +Creates data sources for DetectionModels from an InputReader config. See +input_reader.proto for options. + +Note: If users wishes to also use their own InputReaders with the Object +Detection configuration framework, they should define their own builder function +that wraps the build function. +""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss +from lstm_object_detection.inputs import tf_sequence_example_decoder +from lstm_object_detection.protos import input_reader_google_pb2 +from object_detection.core import preprocessor +from object_detection.core import preprocessor_cache +from object_detection.core import standard_fields as fields +from object_detection.protos import input_reader_pb2 +from object_detection.utils import ops as util_ops + +parallel_reader = slim.parallel_reader +# TODO(yinxiao): Make the following variable into configurable proto. +# Padding size for the labeled objects in each frame. Here we assume each +# frame has a total number of objects less than _PADDING_SIZE. +_PADDING_SIZE = 30 + + +def _build_training_batch_dict(batch_sequences_with_states, unroll_length, + batch_size): + """Builds training batch samples. + + Args: + batch_sequences_with_states: A batch_sequences_with_states object. + unroll_length: Unrolled length for LSTM training. + batch_size: Batch size for queue outputs. + + Returns: + A dictionary of tensors based on items in input_reader_config. + """ + seq_tensors_dict = { + fields.InputDataFields.image: [], + fields.InputDataFields.groundtruth_boxes: [], + fields.InputDataFields.groundtruth_classes: [], + 'batch': batch_sequences_with_states, + } + for i in range(unroll_length): + for j in range(batch_size): + filtered_dict = util_ops.filter_groundtruth_with_nan_box_coordinates({ + fields.InputDataFields.groundtruth_boxes: ( + batch_sequences_with_states.sequences['groundtruth_boxes'][j][i]), + fields.InputDataFields.groundtruth_classes: ( + batch_sequences_with_states.sequences['groundtruth_classes'][j][i] + ), + }) + filtered_dict = util_ops.retain_groundtruth_with_positive_classes( + filtered_dict) + seq_tensors_dict[fields.InputDataFields.image].append( + batch_sequences_with_states.sequences['image'][j][i]) + seq_tensors_dict[fields.InputDataFields.groundtruth_boxes].append( + filtered_dict[fields.InputDataFields.groundtruth_boxes]) + seq_tensors_dict[fields.InputDataFields.groundtruth_classes].append( + filtered_dict[fields.InputDataFields.groundtruth_classes]) + seq_tensors_dict[fields.InputDataFields.image] = tuple( + seq_tensors_dict[fields.InputDataFields.image]) + seq_tensors_dict[fields.InputDataFields.groundtruth_boxes] = tuple( + seq_tensors_dict[fields.InputDataFields.groundtruth_boxes]) + seq_tensors_dict[fields.InputDataFields.groundtruth_classes] = tuple( + seq_tensors_dict[fields.InputDataFields.groundtruth_classes]) + + return seq_tensors_dict + + +def build(input_reader_config, + model_config, + lstm_config, + unroll_length, + data_augmentation_options=None, + batch_size=1): + """Builds a tensor dictionary based on the InputReader config. + + Args: + input_reader_config: An input_reader_builder.InputReader object. + model_config: A model.proto object containing the config for the desired + DetectionModel. + lstm_config: LSTM specific configs. + unroll_length: Unrolled length for LSTM training. + data_augmentation_options: A list of tuples, where each tuple contains a + data augmentation function and a dictionary containing arguments and their + values (see preprocessor.py). + batch_size: Batch size for queue outputs. + + Returns: + A dictionary of tensors based on items in the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + ValueError: If no input paths are specified. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + external_reader_config = input_reader_config.external_input_reader + external_input_reader_config = external_reader_config.Extensions[ + input_reader_google_pb2.GoogleInputReader.google_input_reader] + input_reader_type = external_input_reader_config.WhichOneof('input_reader') + + if input_reader_type == 'tf_record_video_input_reader': + config = external_input_reader_config.tf_record_video_input_reader + reader_type_class = tf.TFRecordReader + else: + raise ValueError( + 'Unsupported reader in input_reader_config: %s' % input_reader_type) + + if not config.input_path: + raise ValueError('At least one input path must be specified in ' + '`input_reader_config`.') + key, value = parallel_reader.parallel_read( + config.input_path[:], # Convert `RepeatedScalarContainer` to list. + reader_class=reader_type_class, + num_epochs=(input_reader_config.num_epochs + if input_reader_config.num_epochs else None), + num_readers=input_reader_config.num_readers, + shuffle=input_reader_config.shuffle, + dtypes=[tf.string, tf.string], + capacity=input_reader_config.queue_capacity, + min_after_dequeue=input_reader_config.min_after_dequeue) + + # TODO(yinxiao): Add loading instance mask option. + decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder() + + keys_to_decode = [ + fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes + ] + tensor_dict = decoder.decode(value, items=keys_to_decode) + + tensor_dict['image'].set_shape([None, None, None, 3]) + tensor_dict['groundtruth_boxes'].set_shape([None, None, 4]) + + height = model_config.ssd.image_resizer.fixed_shape_resizer.height + width = model_config.ssd.image_resizer.fixed_shape_resizer.width + + # If data augmentation is specified in the config file, the preprocessor + # will be called here to augment the data as specified. Most common + # augmentations include horizontal flip and cropping. + if data_augmentation_options: + images_pre = tf.split(tensor_dict['image'], config.video_length, axis=0) + bboxes_pre = tf.split( + tensor_dict['groundtruth_boxes'], config.video_length, axis=0) + labels_pre = tf.split( + tensor_dict['groundtruth_classes'], config.video_length, axis=0) + images_proc, bboxes_proc, labels_proc = [], [], [] + cache = preprocessor_cache.PreprocessorCache() + + for i, _ in enumerate(images_pre): + image_dict = { + fields.InputDataFields.image: + images_pre[i], + fields.InputDataFields.groundtruth_boxes: + tf.squeeze(bboxes_pre[i], axis=0), + fields.InputDataFields.groundtruth_classes: + tf.squeeze(labels_pre[i], axis=0), + } + image_dict = preprocessor.preprocess( + image_dict, + data_augmentation_options, + func_arg_map=preprocessor.get_default_func_arg_map(), + preprocess_vars_cache=cache) + # Pads detection count to _PADDING_SIZE. + image_dict[fields.InputDataFields.groundtruth_boxes] = tf.pad( + image_dict[fields.InputDataFields.groundtruth_boxes], + [[0, _PADDING_SIZE], [0, 0]]) + image_dict[fields.InputDataFields.groundtruth_boxes] = tf.slice( + image_dict[fields.InputDataFields.groundtruth_boxes], [0, 0], + [_PADDING_SIZE, -1]) + image_dict[fields.InputDataFields.groundtruth_classes] = tf.pad( + image_dict[fields.InputDataFields.groundtruth_classes], + [[0, _PADDING_SIZE]]) + image_dict[fields.InputDataFields.groundtruth_classes] = tf.slice( + image_dict[fields.InputDataFields.groundtruth_classes], [0], + [_PADDING_SIZE]) + images_proc.append(image_dict[fields.InputDataFields.image]) + bboxes_proc.append(image_dict[fields.InputDataFields.groundtruth_boxes]) + labels_proc.append(image_dict[fields.InputDataFields.groundtruth_classes]) + tensor_dict['image'] = tf.concat(images_proc, axis=0) + tensor_dict['groundtruth_boxes'] = tf.stack(bboxes_proc, axis=0) + tensor_dict['groundtruth_classes'] = tf.stack(labels_proc, axis=0) + else: + # Pads detection count to _PADDING_SIZE per frame. + tensor_dict['groundtruth_boxes'] = tf.pad( + tensor_dict['groundtruth_boxes'], [[0, 0], [0, _PADDING_SIZE], [0, 0]]) + tensor_dict['groundtruth_boxes'] = tf.slice( + tensor_dict['groundtruth_boxes'], [0, 0, 0], [-1, _PADDING_SIZE, -1]) + tensor_dict['groundtruth_classes'] = tf.pad( + tensor_dict['groundtruth_classes'], [[0, 0], [0, _PADDING_SIZE]]) + tensor_dict['groundtruth_classes'] = tf.slice( + tensor_dict['groundtruth_classes'], [0, 0], [-1, _PADDING_SIZE]) + + tensor_dict['image'], _ = preprocessor.resize_image( + tensor_dict['image'], new_height=height, new_width=width) + + num_steps = config.video_length / unroll_length + + init_states = { + 'lstm_state_c': + tf.zeros([height / 32, width / 32, lstm_config.lstm_state_depth]), + 'lstm_state_h': + tf.zeros([height / 32, width / 32, lstm_config.lstm_state_depth]), + 'lstm_state_step': + tf.constant(num_steps, shape=[]), + } + + batch = sqss.batch_sequences_with_states( + input_key=key, + input_sequences=tensor_dict, + input_context={}, + input_length=None, + initial_states=init_states, + num_unroll=unroll_length, + batch_size=batch_size, + num_threads=batch_size, + make_keys_unique=True, + capacity=batch_size * batch_size) + + return _build_training_batch_dict(batch, unroll_length, batch_size) diff --git a/models/research/lstm_object_detection/inputs/seq_dataset_builder_test.py b/models/research/lstm_object_detection/inputs/seq_dataset_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4b894d24f71fea1c5c372ec0ead9141af6d5ef6f --- /dev/null +++ b/models/research/lstm_object_detection/inputs/seq_dataset_builder_test.py @@ -0,0 +1,282 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for dataset_builder.""" + +import os +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from tensorflow.core.example import example_pb2 +from tensorflow.core.example import feature_pb2 +from lstm_object_detection.inputs import seq_dataset_builder +from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2 +from object_detection.builders import preprocessor_builder +from object_detection.core import standard_fields as fields +from object_detection.protos import input_reader_pb2 +from object_detection.protos import pipeline_pb2 +from object_detection.protos import preprocessor_pb2 + + +class DatasetBuilderTest(tf.test.TestCase): + + def _create_tf_record(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + image_tensor = np.random.randint(255, size=(16, 16, 3)).astype(np.uint8) + with self.test_session(): + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() + + sequence_example = example_pb2.SequenceExample( + context=feature_pb2.Features( + feature={ + 'image/format': + feature_pb2.Feature( + bytes_list=feature_pb2.BytesList( + value=['jpeg'.encode('utf-8')])), + 'image/height': + feature_pb2.Feature( + int64_list=feature_pb2.Int64List(value=[16])), + 'image/width': + feature_pb2.Feature( + int64_list=feature_pb2.Int64List(value=[16])), + }), + feature_lists=feature_pb2.FeatureLists( + feature_list={ + 'image/encoded': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + bytes_list=feature_pb2.BytesList( + value=[encoded_jpeg])), + ]), + 'image/object/bbox/xmin': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[0.0])), + ]), + 'image/object/bbox/xmax': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[1.0])) + ]), + 'image/object/bbox/ymin': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[0.0])), + ]), + 'image/object/bbox/ymax': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[1.0])) + ]), + 'image/object/class/label': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + int64_list=feature_pb2.Int64List(value=[2])) + ]), + })) + + writer.write(sequence_example.SerializeToString()) + writer.close() + + return path + + def _get_model_configs_from_proto(self): + """Creates a model text proto for testing. + + Returns: + A dictionary of model configs. + """ + + model_text_proto = """ + [lstm_object_detection.protos.lstm_model] { + train_unroll_length: 4 + eval_unroll_length: 4 + } + model { + ssd { + feature_extractor { + type: 'lstm_mobilenet_v1_fpn' + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + negative_class_weight: 2.0 + box_coder { + faster_rcnn_box_coder { + } + } + matcher { + argmax_matcher { + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + aspect_ratios: 1.0 + } + } + image_resizer { + fixed_shape_resizer { + height: 32 + width: 32 + } + } + box_predictor { + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + normalize_loc_loss_by_codesize: true + loss { + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + } + } + }""" + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + text_format.Merge(model_text_proto, pipeline_config) + configs = {} + configs['model'] = pipeline_config.model + configs['lstm_model'] = pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model] + + return configs + + def _get_data_augmentation_preprocessor_proto(self): + preprocessor_text_proto = """ + random_horizontal_flip { + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + return preprocessor_proto + + def _create_training_dict(self, tensor_dict): + image_dict = {} + all_dict = {} + all_dict['batch'] = tensor_dict.pop('batch') + for i, _ in enumerate(tensor_dict[fields.InputDataFields.image]): + for key, val in tensor_dict.items(): + image_dict[key] = val[i] + + image_dict[fields.InputDataFields.image] = tf.to_float( + tf.expand_dims(image_dict[fields.InputDataFields.image], 0)) + suffix = str(i) + for key, val in image_dict.items(): + all_dict[key + suffix] = val + return all_dict + + def _get_input_proto(self, input_reader): + return """ + external_input_reader { + [lstm_object_detection.protos.GoogleInputReader.google_input_reader] { + %s: { + input_path: '{0}' + data_type: TF_SEQUENCE_EXAMPLE + video_length: 4 + } + } + } + """ % input_reader + + def test_video_input_reader(self): + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge( + self._get_input_proto('tf_record_video_input_reader'), + input_reader_proto) + + configs = self._get_model_configs_from_proto() + tensor_dict = seq_dataset_builder.build( + input_reader_proto, + configs['model'], + configs['lstm_model'], + unroll_length=1) + + all_dict = self._create_training_dict(tensor_dict) + + self.assertEqual((1, 32, 32, 3), all_dict['image0'].shape) + self.assertEqual(4, all_dict['groundtruth_boxes0'].shape[1]) + + def test_build_with_data_augmentation(self): + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge( + self._get_input_proto('tf_record_video_input_reader'), + input_reader_proto) + + configs = self._get_model_configs_from_proto() + data_augmentation_options = [ + preprocessor_builder.build( + self._get_data_augmentation_preprocessor_proto()) + ] + tensor_dict = seq_dataset_builder.build( + input_reader_proto, + configs['model'], + configs['lstm_model'], + unroll_length=1, + data_augmentation_options=data_augmentation_options) + + all_dict = self._create_training_dict(tensor_dict) + self.assertEqual((1, 32, 32, 3), all_dict['image0'].shape) + self.assertEqual(4, all_dict['groundtruth_boxes0'].shape[1]) + + def test_raises_error_without_input_paths(self): + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + configs = self._get_model_configs_from_proto() + with self.assertRaises(ValueError): + _ = seq_dataset_builder.build( + input_reader_proto, + configs['model'], + configs['lstm_model'], + unroll_length=1) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/inputs/tf_sequence_example_decoder.py b/models/research/lstm_object_detection/inputs/tf_sequence_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..def945b3f07d5c0ef35c454c495405971e04574a --- /dev/null +++ b/models/research/lstm_object_detection/inputs/tf_sequence_example_decoder.py @@ -0,0 +1,263 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tensorflow Sequence Example proto decoder. + +A decoder to decode string tensors containing serialized +tensorflow.SequenceExample protos. +""" +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.core import data_decoder +from object_detection.core import standard_fields as fields + +tfexample_decoder = slim.tfexample_decoder + + +class BoundingBoxSequence(tfexample_decoder.ItemHandler): + """An ItemHandler that concatenates SparseTensors to Bounding Boxes. + """ + + def __init__(self, keys=None, prefix=None, return_dense=True, + default_value=-1.0): + """Initialize the bounding box handler. + + Args: + keys: A list of four key names representing the ymin, xmin, ymax, xmax + in the Example or SequenceExample. + prefix: An optional prefix for each of the bounding box keys in the + Example or SequenceExample. If provided, `prefix` is prepended to each + key in `keys`. + return_dense: if True, returns a dense tensor; if False, returns as + sparse tensor. + default_value: The value used when the `tensor_key` is not found in a + particular `TFExample`. + + Raises: + ValueError: if keys is not `None` and also not a list of exactly 4 keys + """ + if keys is None: + keys = ['ymin', 'xmin', 'ymax', 'xmax'] + elif len(keys) != 4: + raise ValueError('BoundingBoxSequence expects 4 keys but got {}'.format( + len(keys))) + self._prefix = prefix + self._keys = keys + self._full_keys = [prefix + k for k in keys] + self._return_dense = return_dense + self._default_value = default_value + super(BoundingBoxSequence, self).__init__(self._full_keys) + + def tensors_to_item(self, keys_to_tensors): + """Maps the given dictionary of tensors to a concatenated list of bboxes. + + Args: + keys_to_tensors: a mapping of TF-Example keys to parsed tensors. + + Returns: + [time, num_boxes, 4] tensor of bounding box coordinates, in order + [y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor + or a dense Tensor is determined by the return_dense parameter. Empty + positions in the sparse tensor are filled with -1.0 values. + """ + sides = [] + for key in self._full_keys: + value = keys_to_tensors[key] + expanded_dims = tf.concat( + [tf.to_int64(tf.shape(value)), + tf.constant([1], dtype=tf.int64)], 0) + side = tf.sparse_reshape(value, expanded_dims) + sides.append(side) + bounding_boxes = tf.sparse_concat(2, sides) + if self._return_dense: + bounding_boxes = tf.sparse_tensor_to_dense( + bounding_boxes, default_value=self._default_value) + return bounding_boxes + + +class TFSequenceExampleDecoder(data_decoder.DataDecoder): + """Tensorflow Sequence Example proto decoder.""" + + def __init__(self): + """Constructor sets keys_to_features and items_to_handlers.""" + self.keys_to_context_features = { + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/filename': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/key/sha256': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/source_id': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/height': + tf.FixedLenFeature((), tf.int64, 1), + 'image/width': + tf.FixedLenFeature((), tf.int64, 1), + } + self.keys_to_features = { + 'image/encoded': tf.FixedLenSequenceFeature((), tf.string), + 'bbox/xmin': tf.VarLenFeature(dtype=tf.float32), + 'bbox/xmax': tf.VarLenFeature(dtype=tf.float32), + 'bbox/ymin': tf.VarLenFeature(dtype=tf.float32), + 'bbox/ymax': tf.VarLenFeature(dtype=tf.float32), + 'bbox/label/index': tf.VarLenFeature(dtype=tf.int64), + 'bbox/label/string': tf.VarLenFeature(tf.string), + 'area': tf.VarLenFeature(tf.float32), + 'is_crowd': tf.VarLenFeature(tf.int64), + 'difficult': tf.VarLenFeature(tf.int64), + 'group_of': tf.VarLenFeature(tf.int64), + } + self.items_to_handlers = { + fields.InputDataFields.image: + tfexample_decoder.Image( + image_key='image/encoded', + format_key='image/format', + channels=3, + repeated=True), + fields.InputDataFields.source_id: ( + tfexample_decoder.Tensor('image/source_id')), + fields.InputDataFields.key: ( + tfexample_decoder.Tensor('image/key/sha256')), + fields.InputDataFields.filename: ( + tfexample_decoder.Tensor('image/filename')), + # Object boxes and classes. + fields.InputDataFields.groundtruth_boxes: + BoundingBoxSequence(prefix='bbox/'), + fields.InputDataFields.groundtruth_classes: ( + tfexample_decoder.Tensor('bbox/label/index')), + fields.InputDataFields.groundtruth_area: + tfexample_decoder.Tensor('area'), + fields.InputDataFields.groundtruth_is_crowd: ( + tfexample_decoder.Tensor('is_crowd')), + fields.InputDataFields.groundtruth_difficult: ( + tfexample_decoder.Tensor('difficult')), + fields.InputDataFields.groundtruth_group_of: ( + tfexample_decoder.Tensor('group_of')) + } + + def decode(self, tf_seq_example_string_tensor, items=None): + """Decodes serialized tf.SequenceExample and returns a tensor dictionary. + + Args: + tf_seq_example_string_tensor: A string tensor holding a serialized + tensorflow example proto. + items: The list of items to decode. These must be a subset of the item + keys in self._items_to_handlers. If `items` is left as None, then all + of the items in self._items_to_handlers are decoded. + + Returns: + A dictionary of the following tensors. + fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, seq] + containing image(s). + fields.InputDataFields.source_id - string tensor containing original + image id. + fields.InputDataFields.key - string tensor with unique sha256 hash key. + fields.InputDataFields.filename - string tensor with original dataset + filename. + fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape + [None, 4] containing box corners. + fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape + [None] containing classes for the boxes. + fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape + [None] containing object mask area in pixel squared. + fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape + [None] indicating if the boxes enclose a crowd. + fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape + [None] indicating if the boxes represent `difficult` instances. + """ + serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[]) + decoder = TFSequenceExampleDecoderHelper(self.keys_to_context_features, + self.keys_to_features, + self.items_to_handlers) + if not items: + items = decoder.list_items() + tensors = decoder.decode(serialized_example, items=items) + tensor_dict = dict(zip(items, tensors)) + + return tensor_dict + + +class TFSequenceExampleDecoderHelper(data_decoder.DataDecoder): + """A decoder helper class for TensorFlow SequenceExamples. + + To perform this decoding operation, a SequenceExampleDecoder is given a list + of ItemHandlers. Each ItemHandler indicates the set of features. + """ + + def __init__(self, keys_to_context_features, keys_to_sequence_features, + items_to_handlers): + """Constructs the decoder. + + Args: + keys_to_context_features: A dictionary from TF-SequenceExample context + keys to either tf.VarLenFeature or tf.FixedLenFeature instances. + See tensorflow's parsing_ops.py. + keys_to_sequence_features: A dictionary from TF-SequenceExample sequence + keys to either tf.VarLenFeature or tf.FixedLenSequenceFeature instances. + items_to_handlers: A dictionary from items (strings) to ItemHandler + instances. Note that the ItemHandler's are provided the keys that they + use to return the final item Tensors. + Raises: + ValueError: If the same key is present for context features and sequence + features. + """ + unique_keys = set() + unique_keys.update(keys_to_context_features) + unique_keys.update(keys_to_sequence_features) + if len(unique_keys) != ( + len(keys_to_context_features) + len(keys_to_sequence_features)): + # This situation is ambiguous in the decoder's keys_to_tensors variable. + raise ValueError('Context and sequence keys are not unique. \n' + ' Context keys: %s \n Sequence keys: %s' % + (list(keys_to_context_features.keys()), + list(keys_to_sequence_features.keys()))) + self._keys_to_context_features = keys_to_context_features + self._keys_to_sequence_features = keys_to_sequence_features + self._items_to_handlers = items_to_handlers + + def list_items(self): + """Returns keys of items.""" + return self._items_to_handlers.keys() + + def decode(self, serialized_example, items=None): + """Decodes the given serialized TF-SequenceExample. + + Args: + serialized_example: A serialized TF-SequenceExample tensor. + items: The list of items to decode. These must be a subset of the item + keys in self._items_to_handlers. If `items` is left as None, then all + of the items in self._items_to_handlers are decoded. + Returns: + The decoded items, a list of tensor. + """ + context, feature_list = tf.parse_single_sequence_example( + serialized_example, self._keys_to_context_features, + self._keys_to_sequence_features) + # Reshape non-sparse elements just once: + for k in self._keys_to_context_features: + v = self._keys_to_context_features[k] + if isinstance(v, tf.FixedLenFeature): + context[k] = tf.reshape(context[k], v.shape) + if not items: + items = self._items_to_handlers.keys() + outputs = [] + for item in items: + handler = self._items_to_handlers[item] + keys_to_tensors = { + key: context[key] if key in context else feature_list[key] + for key in handler.keys + } + outputs.append(handler.tensors_to_item(keys_to_tensors)) + return outputs diff --git a/models/research/lstm_object_detection/inputs/tf_sequence_example_decoder_test.py b/models/research/lstm_object_detection/inputs/tf_sequence_example_decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..dbbb8d3c7443dabcfc0df08638e2a381eca2cc31 --- /dev/null +++ b/models/research/lstm_object_detection/inputs/tf_sequence_example_decoder_test.py @@ -0,0 +1,113 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for lstm_object_detection.tf_sequence_example_decoder.""" + +import numpy as np +import tensorflow.compat.v1 as tf +from tensorflow.core.example import example_pb2 +from tensorflow.core.example import feature_pb2 +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import parsing_ops +from lstm_object_detection.inputs import tf_sequence_example_decoder +from object_detection.core import standard_fields as fields + + +class TFSequenceExampleDecoderTest(tf.test.TestCase): + """Tests for sequence example decoder.""" + + def _EncodeImage(self, image_tensor, encoding_type='jpeg'): + with self.test_session(): + if encoding_type == 'jpeg': + image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() + else: + raise ValueError('Invalid encoding type.') + return image_encoded + + def _DecodeImage(self, image_encoded, encoding_type='jpeg'): + with self.test_session(): + if encoding_type == 'jpeg': + image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval() + else: + raise ValueError('Invalid encoding type.') + return image_decoded + + def testDecodeJpegImageAndBoundingBox(self): + """Test if the decoder can correctly decode the image and bounding box. + + A set of random images (represented as an image tensor) is first decoded as + the groundtrue image. Meanwhile, the image tensor will be encoded and pass + through the sequence example, and then decoded as images. The groundtruth + image and the decoded image are expected to be equal. Similar tests are + also applied to labels such as bounding box. + """ + image_tensor = np.random.randint(256, size=(256, 256, 3)).astype(np.uint8) + encoded_jpeg = self._EncodeImage(image_tensor) + decoded_jpeg = self._DecodeImage(encoded_jpeg) + + sequence_example = example_pb2.SequenceExample( + feature_lists=feature_pb2.FeatureLists( + feature_list={ + 'image/encoded': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + bytes_list=feature_pb2.BytesList( + value=[encoded_jpeg])), + ]), + 'bbox/xmin': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[0.0])), + ]), + 'bbox/xmax': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[1.0])) + ]), + 'bbox/ymin': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[0.0])), + ]), + 'bbox/ymax': + feature_pb2.FeatureList(feature=[ + feature_pb2.Feature( + float_list=feature_pb2.FloatList(value=[1.0])) + ]), + })).SerializeToString() + + example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder() + tensor_dict = example_decoder.decode(tf.convert_to_tensor(sequence_example)) + + # Test tensor dict image dimension. + self.assertAllEqual( + (tensor_dict[fields.InputDataFields.image].get_shape().as_list()), + [None, None, None, 3]) + with self.test_session() as sess: + tensor_dict[fields.InputDataFields.image] = tf.squeeze( + tensor_dict[fields.InputDataFields.image]) + tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze( + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + tensor_dict = sess.run(tensor_dict) + + # Test decoded image. + self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image]) + # Test decoded bounding box. + self.assertAllEqual([0.0, 0.0, 1.0, 1.0], + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/lstm/__init__.py b/models/research/lstm_object_detection/lstm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/lstm/lstm_cells.py b/models/research/lstm_object_detection/lstm/lstm_cells.py new file mode 100644 index 0000000000000000000000000000000000000000..a553073d978b4b61e6f550fa65e2a2ccc7bfe92d --- /dev/null +++ b/models/research/lstm_object_detection/lstm/lstm_cells.py @@ -0,0 +1,734 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""BottleneckConvLSTMCell implementation.""" +import functools + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from tensorflow.contrib import rnn as contrib_rnn +from tensorflow.contrib.framework.python.ops import variables as contrib_variables +import lstm_object_detection.lstm.utils as lstm_utils + + +class BottleneckConvLSTMCell(contrib_rnn.RNNCell): + """Basic LSTM recurrent network cell using separable convolutions. + + The implementation is based on: + Mobile Video Object Detection with Temporally-Aware Feature Maps + https://arxiv.org/abs/1711.06368. + + We add forget_bias (default: 1) to the biases of the forget gate in order to + reduce the scale of forgetting in the beginning of the training. + + This LSTM first projects inputs to the size of the output before doing gate + computations. This saves params unless the input is less than a third of the + state size channel-wise. + """ + + def __init__(self, + filter_size, + output_size, + num_units, + forget_bias=1.0, + activation=tf.tanh, + flatten_state=False, + clip_state=False, + output_bottleneck=False, + pre_bottleneck=False, + visualize_gates=False): + """Initializes the basic LSTM cell. + + Args: + filter_size: collection, conv filter size. + output_size: collection, the width/height dimensions of the cell/output. + num_units: int, The number of channels in the LSTM cell. + forget_bias: float, The bias added to forget gates (see above). + activation: Activation function of the inner states. + flatten_state: if True, state tensor will be flattened and stored as a 2-d + tensor. Use for exporting the model to tfmini. + clip_state: if True, clip state between [-6, 6]. + output_bottleneck: if True, the cell bottleneck will be concatenated to + the cell output. + pre_bottleneck: if True, cell assumes that bottlenecking was performing + before the function was called. + visualize_gates: if True, add histogram summaries of all gates and outputs + to tensorboard. + """ + self._filter_size = list(filter_size) + self._output_size = list(output_size) + self._num_units = num_units + self._forget_bias = forget_bias + self._activation = activation + self._viz_gates = visualize_gates + self._flatten_state = flatten_state + self._clip_state = clip_state + self._output_bottleneck = output_bottleneck + self._pre_bottleneck = pre_bottleneck + self._param_count = self._num_units + for dim in self._output_size: + self._param_count *= dim + + @property + def state_size(self): + return contrib_rnn.LSTMStateTuple(self._output_size + [self._num_units], + self._output_size + [self._num_units]) + + @property + def state_size_flat(self): + return contrib_rnn.LSTMStateTuple([self._param_count], [self._param_count]) + + @property + def output_size(self): + return self._output_size + [self._num_units] + + def __call__(self, inputs, state, scope=None): + """Long short-term memory cell (LSTM) with bottlenecking. + + Args: + inputs: Input tensor at the current timestep. + state: Tuple of tensors, the state and output at the previous timestep. + scope: Optional scope. + + Returns: + A tuple where the first element is the LSTM output and the second is + a LSTMStateTuple of the state at the current timestep. + """ + scope = scope or 'conv_lstm_cell' + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + c, h = state + + # unflatten state if necessary + if self._flatten_state: + c = tf.reshape(c, [-1] + self.output_size) + h = tf.reshape(h, [-1] + self.output_size) + + # summary of input passed into cell + if self._viz_gates: + slim.summaries.add_histogram_summary(inputs, 'cell_input') + if self._pre_bottleneck: + bottleneck = inputs + else: + bottleneck = slim.separable_conv2d( + tf.concat([inputs, h], 3), + self._num_units, + self._filter_size, + depth_multiplier=1, + activation_fn=self._activation, + normalizer_fn=None, + scope='bottleneck') + + if self._viz_gates: + slim.summaries.add_histogram_summary(bottleneck, 'bottleneck') + + concat = slim.separable_conv2d( + bottleneck, + 4 * self._num_units, + self._filter_size, + depth_multiplier=1, + activation_fn=None, + normalizer_fn=None, + scope='gates') + + i, j, f, o = tf.split(concat, 4, 3) + + new_c = ( + c * tf.sigmoid(f + self._forget_bias) + + tf.sigmoid(i) * self._activation(j)) + if self._clip_state: + new_c = tf.clip_by_value(new_c, -6, 6) + new_h = self._activation(new_c) * tf.sigmoid(o) + # summary of cell output and new state + if self._viz_gates: + slim.summaries.add_histogram_summary(new_h, 'cell_output') + slim.summaries.add_histogram_summary(new_c, 'cell_state') + + output = new_h + if self._output_bottleneck: + output = tf.concat([new_h, bottleneck], axis=3) + + # reflatten state to store it + if self._flatten_state: + new_c = tf.reshape(new_c, [-1, self._param_count]) + new_h = tf.reshape(new_h, [-1, self._param_count]) + + return output, contrib_rnn.LSTMStateTuple(new_c, new_h) + + def init_state(self, state_name, batch_size, dtype, learned_state=False): + """Creates an initial state compatible with this cell. + + Args: + state_name: name of the state tensor + batch_size: model batch size + dtype: dtype for the tensor values i.e. tf.float32 + learned_state: whether the initial state should be learnable. If false, + the initial state is set to all 0's + + Returns: + The created initial state. + """ + state_size = ( + self.state_size_flat if self._flatten_state else self.state_size) + # list of 2 zero tensors or variables tensors, depending on if + # learned_state is true + # pylint: disable=g-long-ternary,g-complex-comprehension + ret_flat = [(contrib_variables.model_variable( + state_name + str(i), + shape=s, + dtype=dtype, + initializer=tf.truncated_normal_initializer(stddev=0.03)) + if learned_state else tf.zeros( + [batch_size] + s, dtype=dtype, name=state_name)) + for i, s in enumerate(state_size)] + + # duplicates initial state across the batch axis if it's learned + if learned_state: + ret_flat = [ + tf.stack([tensor + for i in range(int(batch_size))]) + for tensor in ret_flat + ] + for s, r in zip(state_size, ret_flat): + r.set_shape([None] + s) + return tf.nest.pack_sequence_as(structure=[1, 1], flat_sequence=ret_flat) + + def pre_bottleneck(self, inputs, state, input_index): + """Apply pre-bottleneck projection to inputs. + + Pre-bottleneck operation maps features of different channels into the same + dimension. The purpose of this op is to share the features from both large + and small models in the same LSTM cell. + + Args: + inputs: 4D Tensor with shape [batch_size x width x height x input_size]. + state: 4D Tensor with shape [batch_size x width x height x state_size]. + input_index: integer index indicating which base features the inputs + correspoding to. + + Returns: + inputs: pre-bottlenecked inputs. + Raises: + ValueError: If pre_bottleneck is not set or inputs is not rank 4. + """ + # Sometimes state is a tuple, in which case it cannot be modified, e.g. + # during training, tf.contrib.training.SequenceQueueingStateSaver + # returns the state as a tuple. This should not be an issue since we + # only need to modify state[1] during export, when state should be a + # list. + if len(inputs.shape) != 4: + raise ValueError('Expect rank 4 feature tensor.') + if not self._flatten_state and len(state.shape) != 4: + raise ValueError('Expect rank 4 state tensor.') + if self._flatten_state and len(state.shape) != 2: + raise ValueError('Expect rank 2 state tensor when flatten_state is set.') + + with tf.name_scope(None): + state = tf.identity(state, name='raw_inputs/init_lstm_h') + if self._flatten_state: + batch_size = inputs.shape[0] + height = inputs.shape[1] + width = inputs.shape[2] + state = tf.reshape(state, [batch_size, height, width, -1]) + with tf.variable_scope('conv_lstm_cell', reuse=tf.AUTO_REUSE): + scope_name = 'bottleneck_%d' % input_index + inputs = slim.separable_conv2d( + tf.concat([inputs, state], 3), + self.output_size[-1], + self._filter_size, + depth_multiplier=1, + activation_fn=tf.nn.relu6, + normalizer_fn=None, + scope=scope_name) + # For exporting inference graph, we only mark the first timestep. + with tf.name_scope(None): + inputs = tf.identity( + inputs, name='raw_outputs/base_endpoint_%d' % (input_index + 1)) + return inputs + + +class GroupedConvLSTMCell(contrib_rnn.RNNCell): + """Basic LSTM recurrent network cell using separable convolutions. + + The implementation is based on: https://arxiv.org/abs/1903.10172. + + We add forget_bias (default: 1) to the biases of the forget gate in order to + reduce the scale of forgetting in the beginning of the training. + + This LSTM first projects inputs to the size of the output before doing gate + computations. This saves params unless the input is less than a third of the + state size channel-wise. Computation of bottlenecks and gates is divided + into independent groups for further savings. + """ + + def __init__(self, + filter_size, + output_size, + num_units, + is_training, + forget_bias=1.0, + activation=tf.tanh, + use_batch_norm=False, + flatten_state=False, + groups=4, + clip_state=False, + scale_state=False, + output_bottleneck=False, + pre_bottleneck=False, + is_quantized=False, + visualize_gates=False, + conv_op_overrides=None): + """Initialize the basic LSTM cell. + + Args: + filter_size: collection, conv filter size + output_size: collection, the width/height dimensions of the cell/output + num_units: int, The number of channels in the LSTM cell. + is_training: Whether the LSTM is in training mode. + forget_bias: float, The bias added to forget gates (see above). + activation: Activation function of the inner states. + use_batch_norm: if True, use batch norm after convolution + flatten_state: if True, state tensor will be flattened and stored as a 2-d + tensor. Use for exporting the model to tfmini + groups: Number of groups to split the state into. Must evenly divide + num_units. + clip_state: if True, clips state between [-6, 6]. + scale_state: if True, scales state so that all values are under 6 at all + times. + output_bottleneck: if True, the cell bottleneck will be concatenated to + the cell output. + pre_bottleneck: if True, cell assumes that bottlenecking was performing + before the function was called. + is_quantized: if True, the model is in quantize mode, which requires + quantization friendly concat and separable_conv2d ops. + visualize_gates: if True, add histogram summaries of all gates and outputs + to tensorboard + conv_op_overrides: A list of convolutional operations that override the + 'bottleneck' and 'convolution' layers before lstm gates. If None, the + original implementation of seperable_conv will be used. The length of + the list should be two. + + Raises: + ValueError: when both clip_state and scale_state are enabled. + """ + if clip_state and scale_state: + raise ValueError('clip_state and scale_state cannot both be enabled.') + + self._filter_size = list(filter_size) + self._output_size = list(output_size) + self._num_units = num_units + self._is_training = is_training + self._forget_bias = forget_bias + self._activation = activation + self._use_batch_norm = use_batch_norm + self._viz_gates = visualize_gates + self._flatten_state = flatten_state + self._param_count = self._num_units + self._groups = groups + self._scale_state = scale_state + self._clip_state = clip_state + self._output_bottleneck = output_bottleneck + self._pre_bottleneck = pre_bottleneck + self._is_quantized = is_quantized + for dim in self._output_size: + self._param_count *= dim + self._conv_op_overrides = conv_op_overrides + if self._conv_op_overrides and len(self._conv_op_overrides) != 2: + raise ValueError('Bottleneck and Convolutional layer should be overriden' + 'together') + + @property + def state_size(self): + return contrib_rnn.LSTMStateTuple(self._output_size + [self._num_units], + self._output_size + [self._num_units]) + + @property + def state_size_flat(self): + return contrib_rnn.LSTMStateTuple([self._param_count], [self._param_count]) + + @property + def output_size(self): + return self._output_size + [self._num_units] + + @property + def filter_size(self): + return self._filter_size + + @property + def num_groups(self): + return self._groups + + def __call__(self, inputs, state, scope=None): + """Long short-term memory cell (LSTM) with bottlenecking. + + Includes logic for quantization-aware training. Note that all concats and + activations use fixed ranges unless stated otherwise. + + Args: + inputs: Input tensor at the current timestep. + state: Tuple of tensors, the state at the previous timestep. + scope: Optional scope. + + Returns: + A tuple where the first element is the LSTM output and the second is + a LSTMStateTuple of the state at the current timestep. + """ + scope = scope or 'conv_lstm_cell' + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + c, h = state + + # Set nodes to be under raw_inputs/ name scope for tfmini export. + with tf.name_scope(None): + c = tf.identity(c, name='raw_inputs/init_lstm_c') + # When pre_bottleneck is enabled, input h handle is in rnn_decoder.py + if not self._pre_bottleneck: + h = tf.identity(h, name='raw_inputs/init_lstm_h') + + # unflatten state if necessary + if self._flatten_state: + c = tf.reshape(c, [-1] + self.output_size) + h = tf.reshape(h, [-1] + self.output_size) + + c_list = tf.split(c, self._groups, axis=3) + if self._pre_bottleneck: + inputs_list = tf.split(inputs, self._groups, axis=3) + else: + h_list = tf.split(h, self._groups, axis=3) + out_bottleneck = [] + out_c = [] + out_h = [] + # summary of input passed into cell + if self._viz_gates: + slim.summaries.add_histogram_summary(inputs, 'cell_input') + + for k in range(self._groups): + if self._pre_bottleneck: + bottleneck = inputs_list[k] + else: + if self._conv_op_overrides: + bottleneck_fn = self._conv_op_overrides[0] + else: + bottleneck_fn = functools.partial( + lstm_utils.quantizable_separable_conv2d, + kernel_size=self._filter_size, + activation_fn=self._activation) + if self._use_batch_norm: + b_x = bottleneck_fn( + inputs=inputs, + num_outputs=self._num_units // self._groups, + is_quantized=self._is_quantized, + depth_multiplier=1, + normalizer_fn=None, + scope='bottleneck_%d_x' % k) + b_h = bottleneck_fn( + inputs=h_list[k], + num_outputs=self._num_units // self._groups, + is_quantized=self._is_quantized, + depth_multiplier=1, + normalizer_fn=None, + scope='bottleneck_%d_h' % k) + b_x = slim.batch_norm( + b_x, + scale=True, + is_training=self._is_training, + scope='BatchNorm_%d_X' % k) + b_h = slim.batch_norm( + b_h, + scale=True, + is_training=self._is_training, + scope='BatchNorm_%d_H' % k) + bottleneck = b_x + b_h + else: + # All concats use fixed quantization ranges to prevent rescaling + # at inference. Both |inputs| and |h_list| are tensors resulting + # from Relu6 operations so we fix the ranges to [0, 6]. + bottleneck_concat = lstm_utils.quantizable_concat( + [inputs, h_list[k]], + axis=3, + is_training=False, + is_quantized=self._is_quantized, + scope='bottleneck_%d/quantized_concat' % k) + bottleneck = bottleneck_fn( + inputs=bottleneck_concat, + num_outputs=self._num_units // self._groups, + is_quantized=self._is_quantized, + depth_multiplier=1, + normalizer_fn=None, + scope='bottleneck_%d' % k) + + if self._conv_op_overrides: + conv_fn = self._conv_op_overrides[1] + else: + conv_fn = functools.partial( + lstm_utils.quantizable_separable_conv2d, + kernel_size=self._filter_size, + activation_fn=None) + concat = conv_fn( + inputs=bottleneck, + num_outputs=4 * self._num_units // self._groups, + is_quantized=self._is_quantized, + depth_multiplier=1, + normalizer_fn=None, + scope='concat_conv_%d' % k) + + # Since there is no activation in the previous separable conv, we + # quantize here. A starting range of [-6, 6] is used because the + # tensors are input to a Sigmoid function that saturates at these + # ranges. + concat = lstm_utils.quantize_op( + concat, + is_training=self._is_training, + default_min=-6, + default_max=6, + is_quantized=self._is_quantized, + scope='gates_%d/act_quant' % k) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + i, j, f, o = tf.split(concat, 4, 3) + + f_add = f + self._forget_bias + f_add = lstm_utils.quantize_op( + f_add, + is_training=self._is_training, + default_min=-6, + default_max=6, + is_quantized=self._is_quantized, + scope='forget_gate_%d/add_quant' % k) + f_act = tf.sigmoid(f_add) + + a = c_list[k] * f_act + a = lstm_utils.quantize_op( + a, + is_training=self._is_training, + is_quantized=self._is_quantized, + scope='forget_gate_%d/mul_quant' % k) + + i_act = tf.sigmoid(i) + + j_act = self._activation(j) + # The quantization range is fixed for the relu6 to ensure that zero + # is exactly representable. + j_act = lstm_utils.fixed_quantize_op( + j_act, + fixed_min=0.0, + fixed_max=6.0, + is_quantized=self._is_quantized, + scope='new_input_%d/act_quant' % k) + + b = i_act * j_act + b = lstm_utils.quantize_op( + b, + is_training=self._is_training, + is_quantized=self._is_quantized, + scope='input_gate_%d/mul_quant' % k) + + new_c = a + b + # The quantization range is fixed to [0, 6] due to an optimization in + # TFLite. The order of operations is as fllows: + # Add -> FakeQuant -> Relu6 -> FakeQuant -> Concat. + # The fakequant ranges to the concat must be fixed to ensure all inputs + # to the concat have the same range, removing the need for rescaling. + # The quantization ranges input to the relu6 are propagated to its + # output. Any mismatch between these two ranges will cause an error. + new_c = lstm_utils.fixed_quantize_op( + new_c, + fixed_min=0.0, + fixed_max=6.0, + is_quantized=self._is_quantized, + scope='new_c_%d/add_quant' % k) + + if not self._is_quantized: + if self._scale_state: + normalizer = tf.maximum(1.0, + tf.reduce_max(new_c, axis=(1, 2, 3)) / 6) + new_c /= tf.reshape(normalizer, [tf.shape(new_c)[0], 1, 1, 1]) + elif self._clip_state: + new_c = tf.clip_by_value(new_c, -6, 6) + + new_c_act = self._activation(new_c) + # The quantization range is fixed for the relu6 to ensure that zero + # is exactly representable. + new_c_act = lstm_utils.fixed_quantize_op( + new_c_act, + fixed_min=0.0, + fixed_max=6.0, + is_quantized=self._is_quantized, + scope='new_c_%d/act_quant' % k) + + o_act = tf.sigmoid(o) + + new_h = new_c_act * o_act + # The quantization range is fixed since it is input to a concat. + # A range of [0, 6] is used since |new_h| is a product of ranges [0, 6] + # and [0, 1]. + new_h_act = lstm_utils.fixed_quantize_op( + new_h, + fixed_min=0.0, + fixed_max=6.0, + is_quantized=self._is_quantized, + scope='new_h_%d/act_quant' % k) + + out_bottleneck.append(bottleneck) + out_c.append(new_c_act) + out_h.append(new_h_act) + + # Since all inputs to the below concats are already quantized, we can use + # a regular concat operation. + new_c = tf.concat(out_c, axis=3) + new_h = tf.concat(out_h, axis=3) + + # |bottleneck| is input to a concat with |new_h|. We must use + # quantizable_concat() with a fixed range that matches |new_h|. + bottleneck = lstm_utils.quantizable_concat( + out_bottleneck, + axis=3, + is_training=False, + is_quantized=self._is_quantized, + scope='out_bottleneck/quantized_concat') + + # summary of cell output and new state + if self._viz_gates: + slim.summaries.add_histogram_summary(new_h, 'cell_output') + slim.summaries.add_histogram_summary(new_c, 'cell_state') + + output = new_h + if self._output_bottleneck: + output = lstm_utils.quantizable_concat( + [new_h, bottleneck], + axis=3, + is_training=False, + is_quantized=self._is_quantized, + scope='new_output/quantized_concat') + + # reflatten state to store it + if self._flatten_state: + new_c = tf.reshape(new_c, [-1, self._param_count], name='lstm_c') + new_h = tf.reshape(new_h, [-1, self._param_count], name='lstm_h') + + # Set nodes to be under raw_outputs/ name scope for tfmini export. + with tf.name_scope(None): + new_c = tf.identity(new_c, name='raw_outputs/lstm_c') + new_h = tf.identity(new_h, name='raw_outputs/lstm_h') + states_and_output = contrib_rnn.LSTMStateTuple(new_c, new_h) + + return output, states_and_output + + def init_state(self, state_name, batch_size, dtype, learned_state=False): + """Creates an initial state compatible with this cell. + + Args: + state_name: name of the state tensor + batch_size: model batch size + dtype: dtype for the tensor values i.e. tf.float32 + learned_state: whether the initial state should be learnable. If false, + the initial state is set to all 0's + + Returns: + ret: the created initial state + """ + state_size = ( + self.state_size_flat if self._flatten_state else self.state_size) + # list of 2 zero tensors or variables tensors, + # depending on if learned_state is true + # pylint: disable=g-long-ternary,g-complex-comprehension + ret_flat = [(contrib_variables.model_variable( + state_name + str(i), + shape=s, + dtype=dtype, + initializer=tf.truncated_normal_initializer(stddev=0.03)) + if learned_state else tf.zeros( + [batch_size] + s, dtype=dtype, name=state_name)) + for i, s in enumerate(state_size)] + + # duplicates initial state across the batch axis if it's learned + if learned_state: + ret_flat = [tf.stack([tensor for i in range(int(batch_size))]) + for tensor in ret_flat] + for s, r in zip(state_size, ret_flat): + r = tf.reshape(r, [-1] + s) + ret = tf.nest.pack_sequence_as(structure=[1, 1], flat_sequence=ret_flat) + return ret + + def pre_bottleneck(self, inputs, state, input_index): + """Apply pre-bottleneck projection to inputs. + + Pre-bottleneck operation maps features of different channels into the same + dimension. The purpose of this op is to share the features from both large + and small models in the same LSTM cell. + + Args: + inputs: 4D Tensor with shape [batch_size x width x height x input_size]. + state: 4D Tensor with shape [batch_size x width x height x state_size]. + input_index: integer index indicating which base features the inputs + correspoding to. + + Returns: + inputs: pre-bottlenecked inputs. + Raises: + ValueError: If pre_bottleneck is not set or inputs is not rank 4. + """ + # Sometimes state is a tuple, in which case it cannot be modified, e.g. + # during training, tf.contrib.training.SequenceQueueingStateSaver + # returns the state as a tuple. This should not be an issue since we + # only need to modify state[1] during export, when state should be a + # list. + if not self._pre_bottleneck: + raise ValueError('Only applied when pre_bottleneck is set to true.') + if len(inputs.shape) != 4: + raise ValueError('Expect a rank 4 feature tensor.') + if not self._flatten_state and len(state.shape) != 4: + raise ValueError('Expect rank 4 state tensor.') + if self._flatten_state and len(state.shape) != 2: + raise ValueError('Expect rank 2 state tensor when flatten_state is set.') + + with tf.name_scope(None): + state = tf.identity( + state, name='raw_inputs/init_lstm_h_%d' % (input_index + 1)) + if self._flatten_state: + batch_size = inputs.shape[0] + height = inputs.shape[1] + width = inputs.shape[2] + state = tf.reshape(state, [batch_size, height, width, -1]) + with tf.variable_scope('conv_lstm_cell', reuse=tf.AUTO_REUSE): + state_split = tf.split(state, self._groups, axis=3) + with tf.variable_scope('bottleneck_%d' % input_index): + bottleneck_out = [] + for k in range(self._groups): + with tf.variable_scope('group_%d' % k): + bottleneck_out.append( + lstm_utils.quantizable_separable_conv2d( + lstm_utils.quantizable_concat( + [inputs, state_split[k]], + axis=3, + is_training=self._is_training, + is_quantized=self._is_quantized, + scope='quantized_concat'), + self.output_size[-1] / self._groups, + self._filter_size, + is_quantized=self._is_quantized, + depth_multiplier=1, + activation_fn=tf.nn.relu6, + normalizer_fn=None, + scope='project')) + inputs = lstm_utils.quantizable_concat( + bottleneck_out, + axis=3, + is_training=self._is_training, + is_quantized=self._is_quantized, + scope='bottleneck_out/quantized_concat') + # For exporting inference graph, we only mark the first timestep. + with tf.name_scope(None): + inputs = tf.identity( + inputs, name='raw_outputs/base_endpoint_%d' % (input_index + 1)) + return inputs diff --git a/models/research/lstm_object_detection/lstm/lstm_cells_test.py b/models/research/lstm_object_detection/lstm/lstm_cells_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b296310194dde2a10249c0af266d50ff762ec745 --- /dev/null +++ b/models/research/lstm_object_detection/lstm/lstm_cells_test.py @@ -0,0 +1,412 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for lstm_object_detection.lstm.lstm_cells.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf + +from lstm_object_detection.lstm import lstm_cells + + +class BottleneckConvLstmCellsTest(tf.test.TestCase): + + def test_run_lstm_cell(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units) + init_state = cell.init_state( + state_name, batch_size, dtype, learned_state) + output, state_tuple = cell(inputs, init_state) + self.assertAllEqual([4, 10, 10, 15], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state_tuple[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state_tuple[1].shape.as_list()) + + def test_run_lstm_cell_with_flattened_state(self): + filter_size = [3, 3] + output_dim = 10 + output_size = [output_dim] * 2 + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32) + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + flatten_state=True) + init_state = cell.init_state( + state_name, batch_size, dtype, learned_state) + output, state_tuple = cell(inputs, init_state) + self.assertAllEqual([4, 10, 10, 15], output.shape.as_list()) + self.assertAllEqual([4, 1500], state_tuple[0].shape.as_list()) + self.assertAllEqual([4, 1500], state_tuple[1].shape.as_list()) + + def test_run_lstm_cell_with_output_bottleneck(self): + filter_size = [3, 3] + output_dim = 10 + output_size = [output_dim] * 2 + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32) + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + output_bottleneck=True) + init_state = cell.init_state( + state_name, batch_size, dtype, learned_state) + output, state_tuple = cell(inputs, init_state) + self.assertAllEqual([4, 10, 10, 30], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state_tuple[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state_tuple[1].shape.as_list()) + + def test_get_init_state(self): + filter_size = [3, 3] + output_dim = 10 + output_size = [output_dim] * 2 + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units) + init_c, init_h = cell.init_state( + state_name, batch_size, dtype, learned_state) + + self.assertEqual(tf.float32, init_c.dtype) + self.assertEqual(tf.float32, init_h.dtype) + with self.test_session() as sess: + init_c_res, init_h_res = sess.run([init_c, init_h]) + self.assertAllClose(np.zeros((4, 10, 10, 15)), init_c_res) + self.assertAllClose(np.zeros((4, 10, 10, 15)), init_h_res) + + def test_get_init_learned_state(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = True + + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units) + init_c, init_h = cell.init_state( + state_name, batch_size, dtype, learned_state) + + self.assertEqual(tf.float32, init_c.dtype) + self.assertEqual(tf.float32, init_h.dtype) + self.assertAllEqual([4, 10, 10, 15], init_c.shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], init_h.shape.as_list()) + + def test_unroll(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + unroll = 10 + learned_state = False + + inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units) + state = cell.init_state( + state_name, batch_size, dtype, learned_state) + for step in range(unroll): + output, state = cell(inputs, state) + self.assertAllEqual([4, 10, 10, 15], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state[1].shape.as_list()) + + def test_prebottleneck(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + unroll = 10 + learned_state = False + + inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32) + inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + pre_bottleneck=True) + state = cell.init_state( + state_name, batch_size, dtype, learned_state) + for step in range(unroll): + if step % 2 == 0: + inputs = cell.pre_bottleneck(inputs_large, state[1], 0) + else: + inputs = cell.pre_bottleneck(inputs_small, state[1], 1) + output, state = cell(inputs, state) + self.assertAllEqual([4, 10, 10, 15], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 15], state[1].shape.as_list()) + + def test_flatten_state(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 15 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + unroll = 10 + learned_state = False + + inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32) + inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + pre_bottleneck=True, + flatten_state=True) + state = cell.init_state( + state_name, batch_size, dtype, learned_state) + for step in range(unroll): + if step % 2 == 0: + inputs = cell.pre_bottleneck(inputs_large, state[1], 0) + else: + inputs = cell.pre_bottleneck(inputs_small, state[1], 1) + output, state = cell(inputs, state) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output_result, state_result = sess.run([output, state]) + self.assertAllEqual((4, 10, 10, 15), output_result.shape) + self.assertAllEqual((4, 10*10*15), state_result[0].shape) + self.assertAllEqual((4, 10*10*15), state_result[1].shape) + + +class GroupedConvLstmCellsTest(tf.test.TestCase): + + def test_run_lstm_cell(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True) + init_state = cell.init_state( + state_name, batch_size, dtype, learned_state) + output, state_tuple = cell(inputs, init_state) + self.assertAllEqual([4, 10, 10, 16], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state_tuple[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state_tuple[1].shape.as_list()) + + def test_run_lstm_cell_with_output_bottleneck(self): + filter_size = [3, 3] + output_dim = 10 + output_size = [output_dim] * 2 + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32) + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True, + output_bottleneck=True) + init_state = cell.init_state( + state_name, batch_size, dtype, learned_state) + output, state_tuple = cell(inputs, init_state) + self.assertAllEqual([4, 10, 10, 32], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state_tuple[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state_tuple[1].shape.as_list()) + + def test_get_init_state(self): + filter_size = [3, 3] + output_dim = 10 + output_size = [output_dim] * 2 + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = False + + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True) + init_c, init_h = cell.init_state( + state_name, batch_size, dtype, learned_state) + + self.assertEqual(tf.float32, init_c.dtype) + self.assertEqual(tf.float32, init_h.dtype) + with self.test_session() as sess: + init_c_res, init_h_res = sess.run([init_c, init_h]) + self.assertAllClose(np.zeros((4, 10, 10, 16)), init_c_res) + self.assertAllClose(np.zeros((4, 10, 10, 16)), init_h_res) + + def test_get_init_learned_state(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + learned_state = True + + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True) + init_c, init_h = cell.init_state( + state_name, batch_size, dtype, learned_state) + + self.assertEqual(tf.float32, init_c.dtype) + self.assertEqual(tf.float32, init_h.dtype) + self.assertAllEqual([4, 10, 10, 16], init_c.shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], init_h.shape.as_list()) + + def test_unroll(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + unroll = 10 + learned_state = False + + inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True) + state = cell.init_state( + state_name, batch_size, dtype, learned_state) + for step in range(unroll): + output, state = cell(inputs, state) + self.assertAllEqual([4, 10, 10, 16], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state[1].shape.as_list()) + + def test_prebottleneck(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + unroll = 10 + learned_state = False + + inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32) + inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True, + pre_bottleneck=True) + state = cell.init_state( + state_name, batch_size, dtype, learned_state) + for step in range(unroll): + if step % 2 == 0: + inputs = cell.pre_bottleneck(inputs_large, state[1], 0) + else: + inputs = cell.pre_bottleneck(inputs_small, state[1], 1) + output, state = cell(inputs, state) + self.assertAllEqual([4, 10, 10, 16], output.shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state[0].shape.as_list()) + self.assertAllEqual([4, 10, 10, 16], state[1].shape.as_list()) + + def test_flatten_state(self): + filter_size = [3, 3] + output_size = [10, 10] + num_units = 16 + state_name = 'lstm_state' + batch_size = 4 + dtype = tf.float32 + unroll = 10 + learned_state = False + + inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32) + inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32) + cell = lstm_cells.GroupedConvLSTMCell( + filter_size=filter_size, + output_size=output_size, + num_units=num_units, + is_training=True, + pre_bottleneck=True, + flatten_state=True) + state = cell.init_state( + state_name, batch_size, dtype, learned_state) + for step in range(unroll): + if step % 2 == 0: + inputs = cell.pre_bottleneck(inputs_large, state[1], 0) + else: + inputs = cell.pre_bottleneck(inputs_small, state[1], 1) + output, state = cell(inputs, state) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + output_result, state_result = sess.run([output, state]) + self.assertAllEqual((4, 10, 10, 16), output_result.shape) + self.assertAllEqual((4, 10*10*16), state_result[0].shape) + self.assertAllEqual((4, 10*10*16), state_result[1].shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/lstm/rnn_decoder.py b/models/research/lstm_object_detection/lstm/rnn_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..185ca130396fa8687ba9359f91366b64d16d0255 --- /dev/null +++ b/models/research/lstm_object_detection/lstm/rnn_decoder.py @@ -0,0 +1,269 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Custom RNN decoder.""" + +import tensorflow.compat.v1 as tf +import lstm_object_detection.lstm.utils as lstm_utils + + +class _NoVariableScope(object): + + def __enter__(self): + return + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +def rnn_decoder(decoder_inputs, + initial_state, + cell, + loop_function=None, + scope=None): + """RNN decoder for the LSTM-SSD model. + + This decoder returns a list of all states, rather than only the final state. + Args: + decoder_inputs: A list of 4D Tensors with shape [batch_size x input_size]. + initial_state: 2D Tensor with shape [batch_size x cell.state_size]. + cell: rnn_cell.RNNCell defining the cell function and size. + loop_function: If not None, this function will be applied to the i-th output + in order to generate the i+1-st input, and decoder_inputs will be ignored, + except for the first element ("GO" symbol). This can be used for decoding, + but also for training to emulate http://arxiv.org/abs/1506.03099. + Signature -- loop_function(prev, i) = next + * prev is a 2D Tensor of shape [batch_size x output_size], + * i is an integer, the step number (when advanced control is needed), + * next is a 2D Tensor of shape [batch_size x input_size]. + scope: optional VariableScope for the created subgraph. + Returns: + A tuple of the form (outputs, state), where: + outputs: A list of the same length as decoder_inputs of 4D Tensors with + shape [batch_size x output_size] containing generated outputs. + states: A list of the same length as decoder_inputs of the state of each + cell at each time-step. It is a 2D Tensor of shape + [batch_size x cell.state_size]. + """ + with tf.variable_scope(scope) if scope else _NoVariableScope(): + state_tuple = initial_state + outputs = [] + states = [] + prev = None + for local_step, decoder_input in enumerate(decoder_inputs): + if loop_function is not None and prev is not None: + with tf.variable_scope('loop_function', reuse=True): + decoder_input = loop_function(prev, local_step) + output, state_tuple = cell(decoder_input, state_tuple) + outputs.append(output) + states.append(state_tuple) + if loop_function is not None: + prev = output + return outputs, states + +def multi_input_rnn_decoder(decoder_inputs, + initial_state, + cell, + sequence_step, + selection_strategy='RANDOM', + is_training=None, + is_quantized=False, + preprocess_fn_list=None, + pre_bottleneck=False, + flatten_state=False, + scope=None): + """RNN decoder for the Interleaved LSTM-SSD model. + + This decoder takes multiple sequences of inputs and selects the input to feed + to the rnn at each timestep using its selection_strategy, which can be random, + learned, or deterministic. + This decoder returns a list of all states, rather than only the final state. + Args: + decoder_inputs: A list of lists of 2D Tensors [batch_size x input_size]. + initial_state: 2D Tensor with shape [batch_size x cell.state_size]. + cell: rnn_cell.RNNCell defining the cell function and size. + sequence_step: Tensor [batch_size] of the step number of the first elements + in the sequence. + selection_strategy: Method for picking the decoder_input to use at each + timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number + of times to use the second input before using the first. + is_training: boolean, whether the network is training. When using learned + selection, attempts exploration if training. + is_quantized: flag to enable/disable quantization mode. + preprocess_fn_list: List of functions accepting two tensor arguments: one + timestep of decoder_inputs and the lstm state. If not None, + decoder_inputs[i] will be updated with preprocess_fn[i] at the start of + each timestep. + pre_bottleneck: if True, use separate bottleneck weights for each sequence. + Useful when input sequences have differing numbers of channels. Final + bottlenecks will have the same dimension. + flatten_state: Whether the LSTM state is flattened. + scope: optional VariableScope for the created subgraph. + Returns: + A tuple of the form (outputs, state), where: + outputs: A list of the same length as decoder_inputs of 2D Tensors with + shape [batch_size x output_size] containing generated outputs. + states: A list of the same length as decoder_inputs of the state of each + cell at each time-step. It is a 2D Tensor of shape + [batch_size x cell.state_size]. + Raises: + ValueError: If selection_strategy is not recognized or unexpected unroll + length. + """ + if flatten_state and len(decoder_inputs[0]) > 1: + raise ValueError('In export mode, unroll length should not be more than 1') + with tf.variable_scope(scope) if scope else _NoVariableScope(): + state_tuple = initial_state + outputs = [] + states = [] + batch_size = decoder_inputs[0][0].shape[0].value + num_sequences = len(decoder_inputs) + sequence_length = len(decoder_inputs[0]) + + for local_step in range(sequence_length): + for sequence_index in range(num_sequences): + if preprocess_fn_list is not None: + decoder_inputs[sequence_index][local_step] = ( + preprocess_fn_list[sequence_index]( + decoder_inputs[sequence_index][local_step], state_tuple[0])) + if pre_bottleneck: + decoder_inputs[sequence_index][local_step] = cell.pre_bottleneck( + inputs=decoder_inputs[sequence_index][local_step], + state=state_tuple[1], + input_index=sequence_index) + + action = generate_action(selection_strategy, local_step, sequence_step, + [batch_size, 1, 1, 1]) + inputs, _ = ( + select_inputs(decoder_inputs, action, local_step, is_training, + is_quantized)) + # Mark base network endpoints under raw_inputs/ + with tf.name_scope(None): + inputs = tf.identity(inputs, 'raw_inputs/base_endpoint') + output, state_tuple_out = cell(inputs, state_tuple) + state_tuple = select_state(state_tuple, state_tuple_out, action) + + outputs.append(output) + states.append(state_tuple) + return outputs, states + + +def generate_action(selection_strategy, local_step, sequence_step, + action_shape): + """Generate current (binary) action based on selection strategy. + + Args: + selection_strategy: Method for picking the decoder_input to use at each + timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number + of times to use the second input before using the first. + local_step: Tensor [batch_size] of the step number within the current + unrolled batch. + sequence_step: Tensor [batch_size] of the step number of the first elements + in the sequence. + action_shape: The shape of action tensor to be generated. + + Returns: + A tensor of shape action_shape, each element is an individual action. + + Raises: + ValueError: if selection_strategy is not supported or if 'SKIP' is not + followed by numerics. + """ + if selection_strategy.startswith('RANDOM'): + action = tf.random.uniform(action_shape, maxval=2, dtype=tf.int32) + action = tf.minimum(action, 1) + + # First step always runs large network. + if local_step == 0 and sequence_step is not None: + action *= tf.minimum( + tf.reshape(tf.cast(sequence_step, tf.int32), action_shape), 1) + elif selection_strategy.startswith('SKIP'): + inter_count = int(selection_strategy[4:]) + if local_step % (inter_count + 1) == 0: + action = tf.zeros(action_shape) + else: + action = tf.ones(action_shape) + else: + raise ValueError('Selection strategy %s not recognized' % + selection_strategy) + return tf.cast(action, tf.int32) + + +def select_inputs(decoder_inputs, action, local_step, is_training, is_quantized, + get_alt_inputs=False): + """Selects sequence from decoder_inputs based on 1D actions. + + Given multiple input batches, creates a single output batch by + selecting from the action[i]-ith input for the i-th batch element. + + Args: + decoder_inputs: A 2-D list of tensor inputs. + action: A tensor of shape [batch_size]. Each element corresponds to an index + of decoder_inputs to choose. + local_step: The current timestep. + is_training: boolean, whether the network is training. When using learned + selection, attempts exploration if training. + is_quantized: flag to enable/disable quantization mode. + get_alt_inputs: Whether the non-chosen inputs should also be returned. + + Returns: + The constructed output. Also outputs the elements that were not chosen + if get_alt_inputs is True, otherwise None. + + Raises: + ValueError: if the decoder inputs contains other than two sequences. + """ + num_seqs = len(decoder_inputs) + if not num_seqs == 2: + raise ValueError('Currently only supports two sets of inputs.') + stacked_inputs = tf.stack( + [decoder_inputs[seq_index][local_step] for seq_index in range(num_seqs)], + axis=-1) + action_index = tf.one_hot(action, num_seqs) + selected_inputs = ( + lstm_utils.quantize_op(stacked_inputs * action_index, is_training, + is_quantized, scope='quant_selected_inputs')) + inputs = tf.reduce_sum(selected_inputs, axis=-1) + inputs_alt = None + # Only works for 2 models. + if get_alt_inputs: + # Reverse of action_index. + action_index_alt = tf.one_hot(action, num_seqs, on_value=0.0, off_value=1.0) + selected_inputs = ( + lstm_utils.quantize_op(stacked_inputs * action_index_alt, is_training, + is_quantized, scope='quant_selected_inputs_alt')) + inputs_alt = tf.reduce_sum(selected_inputs, axis=-1) + return inputs, inputs_alt + +def select_state(previous_state, new_state, action): + """Select state given action. + + Currently only supports binary action. If action is 0, it means the state is + generated from the large model, and thus we will update the state. Otherwise, + if the action is 1, it means the state is generated from the small model, and + in interleaved model, we skip this state update. + + Args: + previous_state: A state tuple representing state from previous step. + new_state: A state tuple representing newly computed state. + action: A tensor the same shape as state. + + Returns: + A state tuple selected based on the given action. + """ + action = tf.cast(action, tf.float32) + state_c = previous_state[0] * action + new_state[0] * (1 - action) + state_h = previous_state[1] * action + new_state[1] * (1 - action) + return (state_c, state_h) diff --git a/models/research/lstm_object_detection/lstm/rnn_decoder_test.py b/models/research/lstm_object_detection/lstm/rnn_decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..480694f6fde57332b2f72357d5d6903ec7a12f87 --- /dev/null +++ b/models/research/lstm_object_detection/lstm/rnn_decoder_test.py @@ -0,0 +1,306 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for lstm_object_detection.lstm.rnn_decoder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf + +from tensorflow.contrib import layers as contrib_layers +from tensorflow.contrib import rnn as contrib_rnn +from lstm_object_detection.lstm import rnn_decoder + + +class MockRnnCell(contrib_rnn.RNNCell): + + def __init__(self, input_size, num_units): + self._input_size = input_size + self._num_units = num_units + self._filter_size = [3, 3] + + def __call__(self, inputs, state_tuple): + outputs = tf.concat([inputs, state_tuple[0]], axis=3) + new_state_tuple = (tf.multiply(state_tuple[0], 2), state_tuple[1]) + return outputs, new_state_tuple + + def state_size(self): + return self._num_units + + def output_size(self): + return self._input_size + self._num_units + + def pre_bottleneck(self, inputs, state, input_index): + with tf.variable_scope('bottleneck_%d' % input_index, reuse=tf.AUTO_REUSE): + inputs = contrib_layers.separable_conv2d( + tf.concat([inputs, state], 3), + self._input_size, + self._filter_size, + depth_multiplier=1, + activation_fn=tf.nn.relu6, + normalizer_fn=None) + return inputs + + +class RnnDecoderTest(tf.test.TestCase): + + def test_rnn_decoder_single_unroll(self): + batch_size = 2 + num_unroll = 1 + num_units = 64 + width = 8 + height = 10 + input_channels = 128 + + initial_state = tf.random_normal((batch_size, width, height, num_units)) + inputs = tf.random_normal([batch_size, width, height, input_channels]) + + rnn_cell = MockRnnCell(input_channels, num_units) + outputs, states = rnn_decoder.rnn_decoder( + decoder_inputs=[inputs] * num_unroll, + initial_state=(initial_state, initial_state), + cell=rnn_cell) + + self.assertEqual(len(outputs), num_unroll) + self.assertEqual(len(states), num_unroll) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + results = sess.run((outputs, states, inputs, initial_state)) + outputs_results = results[0] + states_results = results[1] + inputs_results = results[2] + initial_states_results = results[3] + self.assertEqual(outputs_results[0].shape, + (batch_size, width, height, input_channels + num_units)) + self.assertAllEqual( + outputs_results[0], + np.concatenate((inputs_results, initial_states_results), axis=3)) + self.assertEqual(states_results[0][0].shape, + (batch_size, width, height, num_units)) + self.assertEqual(states_results[0][1].shape, + (batch_size, width, height, num_units)) + self.assertAllEqual(states_results[0][0], + np.multiply(initial_states_results, 2.0)) + self.assertAllEqual(states_results[0][1], initial_states_results) + + def test_rnn_decoder_multiple_unroll(self): + batch_size = 2 + num_unroll = 3 + num_units = 64 + width = 8 + height = 10 + input_channels = 128 + + initial_state = tf.random_normal((batch_size, width, height, num_units)) + inputs = tf.random_normal([batch_size, width, height, input_channels]) + + rnn_cell = MockRnnCell(input_channels, num_units) + outputs, states = rnn_decoder.rnn_decoder( + decoder_inputs=[inputs] * num_unroll, + initial_state=(initial_state, initial_state), + cell=rnn_cell) + + self.assertEqual(len(outputs), num_unroll) + self.assertEqual(len(states), num_unroll) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + results = sess.run((outputs, states, inputs, initial_state)) + outputs_results = results[0] + states_results = results[1] + inputs_results = results[2] + initial_states_results = results[3] + for i in range(num_unroll): + previous_state = ([initial_states_results, initial_states_results] + if i == 0 else states_results[i - 1]) + self.assertEqual( + outputs_results[i].shape, + (batch_size, width, height, input_channels + num_units)) + self.assertAllEqual( + outputs_results[i], + np.concatenate((inputs_results, previous_state[0]), axis=3)) + self.assertEqual(states_results[i][0].shape, + (batch_size, width, height, num_units)) + self.assertEqual(states_results[i][1].shape, + (batch_size, width, height, num_units)) + self.assertAllEqual(states_results[i][0], + np.multiply(previous_state[0], 2.0)) + self.assertAllEqual(states_results[i][1], previous_state[1]) + + +class MultiInputRnnDecoderTest(tf.test.TestCase): + + def test_rnn_decoder_single_unroll(self): + batch_size = 2 + num_unroll = 1 + num_units = 12 + width = 8 + height = 10 + input_channels_large = 24 + input_channels_small = 12 + bottleneck_channels = 20 + + initial_state_c = tf.random_normal((batch_size, width, height, num_units)) + initial_state_h = tf.random_normal((batch_size, width, height, num_units)) + initial_state = (initial_state_c, initial_state_h) + inputs_large = tf.random_normal( + [batch_size, width, height, input_channels_large]) + inputs_small = tf.random_normal( + [batch_size, width, height, input_channels_small]) + + rnn_cell = MockRnnCell(bottleneck_channels, num_units) + outputs, states = rnn_decoder.multi_input_rnn_decoder( + decoder_inputs=[[inputs_large] * num_unroll, + [inputs_small] * num_unroll], + initial_state=initial_state, + cell=rnn_cell, + sequence_step=tf.zeros([batch_size]), + pre_bottleneck=True) + + self.assertEqual(len(outputs), num_unroll) + self.assertEqual(len(states), num_unroll) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + results = sess.run( + (outputs, states, inputs_large, inputs_small, initial_state)) + outputs_results = results[0] + states_results = results[1] + initial_states_results = results[4] + self.assertEqual( + outputs_results[0].shape, + (batch_size, width, height, bottleneck_channels + num_units)) + self.assertEqual(states_results[0][0].shape, + (batch_size, width, height, num_units)) + self.assertEqual(states_results[0][1].shape, + (batch_size, width, height, num_units)) + # The first step should always update state. + self.assertAllEqual(states_results[0][0], + np.multiply(initial_states_results[0], 2)) + self.assertAllEqual(states_results[0][1], initial_states_results[1]) + + def test_rnn_decoder_multiple_unroll(self): + batch_size = 2 + num_unroll = 3 + num_units = 12 + width = 8 + height = 10 + input_channels_large = 24 + input_channels_small = 12 + bottleneck_channels = 20 + + initial_state_c = tf.random_normal((batch_size, width, height, num_units)) + initial_state_h = tf.random_normal((batch_size, width, height, num_units)) + initial_state = (initial_state_c, initial_state_h) + inputs_large = tf.random_normal( + [batch_size, width, height, input_channels_large]) + inputs_small = tf.random_normal( + [batch_size, width, height, input_channels_small]) + + rnn_cell = MockRnnCell(bottleneck_channels, num_units) + outputs, states = rnn_decoder.multi_input_rnn_decoder( + decoder_inputs=[[inputs_large] * num_unroll, + [inputs_small] * num_unroll], + initial_state=initial_state, + cell=rnn_cell, + sequence_step=tf.zeros([batch_size]), + pre_bottleneck=True) + + self.assertEqual(len(outputs), num_unroll) + self.assertEqual(len(states), num_unroll) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + results = sess.run( + (outputs, states, inputs_large, inputs_small, initial_state)) + outputs_results = results[0] + states_results = results[1] + initial_states_results = results[4] + + # The first step should always update state. + self.assertAllEqual(states_results[0][0], + np.multiply(initial_states_results[0], 2)) + self.assertAllEqual(states_results[0][1], initial_states_results[1]) + for i in range(num_unroll): + self.assertEqual( + outputs_results[i].shape, + (batch_size, width, height, bottleneck_channels + num_units)) + self.assertEqual(states_results[i][0].shape, + (batch_size, width, height, num_units)) + self.assertEqual(states_results[i][1].shape, + (batch_size, width, height, num_units)) + + def test_rnn_decoder_multiple_unroll_with_skip(self): + batch_size = 2 + num_unroll = 5 + num_units = 12 + width = 8 + height = 10 + input_channels_large = 24 + input_channels_small = 12 + bottleneck_channels = 20 + skip = 2 + + initial_state_c = tf.random_normal((batch_size, width, height, num_units)) + initial_state_h = tf.random_normal((batch_size, width, height, num_units)) + initial_state = (initial_state_c, initial_state_h) + inputs_large = tf.random_normal( + [batch_size, width, height, input_channels_large]) + inputs_small = tf.random_normal( + [batch_size, width, height, input_channels_small]) + + rnn_cell = MockRnnCell(bottleneck_channels, num_units) + outputs, states = rnn_decoder.multi_input_rnn_decoder( + decoder_inputs=[[inputs_large] * num_unroll, + [inputs_small] * num_unroll], + initial_state=initial_state, + cell=rnn_cell, + sequence_step=tf.zeros([batch_size]), + pre_bottleneck=True, + selection_strategy='SKIP%d' % skip) + + self.assertEqual(len(outputs), num_unroll) + self.assertEqual(len(states), num_unroll) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + results = sess.run( + (outputs, states, inputs_large, inputs_small, initial_state)) + outputs_results = results[0] + states_results = results[1] + initial_states_results = results[4] + + for i in range(num_unroll): + self.assertEqual( + outputs_results[i].shape, + (batch_size, width, height, bottleneck_channels + num_units)) + self.assertEqual(states_results[i][0].shape, + (batch_size, width, height, num_units)) + self.assertEqual(states_results[i][1].shape, + (batch_size, width, height, num_units)) + + previous_state = ( + initial_states_results if i == 0 else states_results[i - 1]) + # State only updates during key frames + if i % (skip + 1) == 0: + self.assertAllEqual(states_results[i][0], + np.multiply(previous_state[0], 2)) + self.assertAllEqual(states_results[i][1], previous_state[1]) + else: + self.assertAllEqual(states_results[i][0], previous_state[0]) + self.assertAllEqual(states_results[i][1], previous_state[1]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/lstm/utils.py b/models/research/lstm_object_detection/lstm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0c87db4bb208ece5102df327e5487fbffb2fe2ce --- /dev/null +++ b/models/research/lstm_object_detection/lstm/utils.py @@ -0,0 +1,257 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Quantization related ops for LSTM.""" + +from __future__ import absolute_import +from __future__ import division + +import tensorflow.compat.v1 as tf +from tensorflow.contrib import framework as contrib_framework +from tensorflow.contrib import layers as contrib_layers +from tensorflow.python.training import moving_averages + + +def _quant_var( + name, + initializer_val, + vars_collection=tf.GraphKeys.MOVING_AVERAGE_VARIABLES, +): + """Create an var for storing the min/max quantization range.""" + return contrib_framework.model_variable( + name, + shape=[], + initializer=tf.constant_initializer(initializer_val), + collections=[vars_collection], + trainable=False) + + +def quantizable_concat(inputs, + axis, + is_training, + is_quantized=True, + default_min=0, + default_max=6, + ema_decay=0.999, + scope='quantized_concat'): + """Concat replacement with quantization option. + + Allows concat inputs to share the same min max ranges, + from experimental/gazelle/synthetic/model/tpu/utils.py. + + Args: + inputs: list of tensors to concatenate. + axis: dimension along which to concatenate. + is_training: true if the graph is a training graph. + is_quantized: flag to enable/disable quantization. + default_min: default min value for fake quant op. + default_max: default max value for fake quant op. + ema_decay: the moving average decay for the quantization variables. + scope: Optional scope for variable_scope. + + Returns: + Tensor resulting from concatenation of input tensors + """ + if is_quantized: + with tf.variable_scope(scope): + tf.logging.info('inputs: {}'.format(inputs)) + for t in inputs: + tf.logging.info(t) + + min_var = _quant_var('min', default_min) + max_var = _quant_var('max', default_max) + if not is_training: + # If we are building an eval graph just use the values in the variables. + quant_inputs = [ + tf.fake_quant_with_min_max_vars(t, min_var, max_var) for t in inputs + ] + tf.logging.info('min_val: {}'.format(min_var)) + tf.logging.info('max_val: {}'.format(max_var)) + else: + concat_tensors = tf.concat(inputs, axis=axis) + tf.logging.info('concat_tensors: {}'.format(concat_tensors)) + # TFLite requires that 0.0 is always in the [min; max] range. + range_min = tf.minimum( + tf.reduce_min(concat_tensors), 0.0, name='SafeQuantRangeMin') + range_max = tf.maximum( + tf.reduce_max(concat_tensors), 0.0, name='SafeQuantRangeMax') + # Otherwise we need to keep track of the moving averages of the min and + # of the elements of the input tensor max. + min_val = moving_averages.assign_moving_average( + min_var, + range_min, + ema_decay, + name='AssignMinEma') + max_val = moving_averages.assign_moving_average( + max_var, + range_max, + ema_decay, + name='AssignMaxEma') + tf.logging.info('min_val: {}'.format(min_val)) + tf.logging.info('max_val: {}'.format(max_val)) + quant_inputs = [ + tf.fake_quant_with_min_max_vars(t, min_val, max_val) for t in inputs + ] + tf.logging.info('quant_inputs: {}'.format(quant_inputs)) + outputs = tf.concat(quant_inputs, axis=axis) + tf.logging.info('outputs: {}'.format(outputs)) + else: + outputs = tf.concat(inputs, axis=axis) + return outputs + + +def quantizable_separable_conv2d(inputs, + num_outputs, + kernel_size, + is_quantized=True, + depth_multiplier=1, + stride=1, + activation_fn=tf.nn.relu6, + normalizer_fn=None, + weights_initializer=None, + pointwise_initializer=None, + scope=None): + """Quantization friendly backward compatible separable conv2d. + + This op has the same API is separable_conv2d. The main difference is that an + additional BiasAdd is manually inserted after the depthwise conv, such that + the depthwise bias will not have name conflict with pointwise bias. The + motivation of this op is that quantization script need BiasAdd in order to + recognize the op, in which a native call to separable_conv2d do not create + for the depthwise conv. + + Args: + inputs: A tensor of size [batch_size, height, width, channels]. + num_outputs: The number of pointwise convolution output filters. If is + None, then we skip the pointwise convolution stage. + kernel_size: A list of length 2: [kernel_height, kernel_width] of the + filters. Can be an int if both values are the same. + is_quantized: flag to enable/disable quantization. + depth_multiplier: The number of depthwise convolution output channels for + each input channel. The total number of depthwise convolution output + channels will be equal to num_filters_in * depth_multiplier. + stride: A list of length 2: [stride_height, stride_width], specifying the + depthwise convolution stride. Can be an int if both strides are the same. + activation_fn: Activation function. The default value is a ReLU function. + Explicitly set it to None to skip it and maintain a linear activation. + normalizer_fn: Normalization function to use instead of biases. + weights_initializer: An initializer for the depthwise weights. + pointwise_initializer: An initializer for the pointwise weights. + scope: Optional scope for variable_scope. + + Returns: + Tensor resulting from concatenation of input tensors + """ + if is_quantized: + outputs = contrib_layers.separable_conv2d( + inputs, + None, + kernel_size, + depth_multiplier=depth_multiplier, + stride=1, + activation_fn=None, + normalizer_fn=None, + biases_initializer=None, + weights_initializer=weights_initializer, + pointwise_initializer=None, + scope=scope) + outputs = contrib_layers.bias_add( + outputs, trainable=True, scope='%s_bias' % scope) + outputs = contrib_layers.conv2d( + outputs, + num_outputs, [1, 1], + activation_fn=activation_fn, + stride=stride, + normalizer_fn=normalizer_fn, + weights_initializer=pointwise_initializer, + scope=scope) + else: + outputs = contrib_layers.separable_conv2d( + inputs, + num_outputs, + kernel_size, + depth_multiplier=depth_multiplier, + stride=stride, + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_initializer=weights_initializer, + pointwise_initializer=pointwise_initializer, + scope=scope) + return outputs + + +def quantize_op(inputs, + is_training=True, + is_quantized=True, + default_min=0, + default_max=6, + ema_decay=0.999, + scope='quant'): + """Inserts a fake quantization op after inputs. + + Args: + inputs: A tensor of size [batch_size, height, width, channels]. + is_training: true if the graph is a training graph. + is_quantized: flag to enable/disable quantization. + default_min: default min value for fake quant op. + default_max: default max value for fake quant op. + ema_decay: the moving average decay for the quantization variables. + scope: Optional scope for variable_scope. + + Returns: + Tensor resulting from quantizing the input tensors. + """ + if not is_quantized: + return inputs + + with tf.variable_scope(scope): + min_var = _quant_var('min', default_min) + max_var = _quant_var('max', default_max) + if not is_training: + # Just use variables in the checkpoint. + return tf.fake_quant_with_min_max_vars(inputs, min_var, max_var) + + # While training, collect EMAs of ranges seen, store in min_var, max_var. + # TFLite requires that 0.0 is always in the [min; max] range. + range_min = tf.minimum(tf.reduce_min(inputs), 0.0, 'SafeQuantRangeMin') + # We set the lower_bound of max_range to prevent range collapse. + range_max = tf.maximum(tf.reduce_max(inputs), 1e-5, 'SafeQuantRangeMax') + min_val = moving_averages.assign_moving_average( + min_var, range_min, ema_decay, name='AssignMinEma') + max_val = moving_averages.assign_moving_average( + max_var, range_max, ema_decay, name='AssignMaxEma') + return tf.fake_quant_with_min_max_vars(inputs, min_val, max_val) + + +def fixed_quantize_op(inputs, is_quantized=True, + fixed_min=0.0, fixed_max=6.0, scope='quant'): + """Inserts a fake quantization op with fixed range after inputs. + + Args: + inputs: A tensor of size [batch_size, height, width, channels]. + is_quantized: flag to enable/disable quantization. + fixed_min: fixed min value for fake quant op. + fixed_max: fixed max value for fake quant op. + scope: Optional scope for variable_scope. + + Returns: + Tensor resulting from quantizing the input tensors. + """ + if not is_quantized: + return inputs + + with tf.variable_scope(scope): + # Just use fixed quantization range. + return tf.fake_quant_with_min_max_args(inputs, fixed_min, fixed_max) diff --git a/models/research/lstm_object_detection/lstm/utils_test.py b/models/research/lstm_object_detection/lstm/utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f5bc75db8f7e7be44fc15898598e5179e51236 --- /dev/null +++ b/models/research/lstm_object_detection/lstm/utils_test.py @@ -0,0 +1,149 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for lstm_object_detection.lstm.utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +from lstm_object_detection.lstm import utils + + +class QuantizableUtilsTest(tf.test.TestCase): + + def test_quantizable_concat_is_training(self): + inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32) + inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32) + concat_in_train = utils.quantizable_concat([inputs_1, inputs_2], + axis=3, + is_training=True) + self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list()) + self._check_min_max_ema(tf.get_default_graph()) + self._check_min_max_vars(tf.get_default_graph()) + + def test_quantizable_concat_inference(self): + inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32) + inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32) + concat_in_train = utils.quantizable_concat([inputs_1, inputs_2], + axis=3, + is_training=False) + self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list()) + self._check_no_min_max_ema(tf.get_default_graph()) + self._check_min_max_vars(tf.get_default_graph()) + + def test_quantizable_concat_not_quantized_is_training(self): + inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32) + inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32) + concat_in_train = utils.quantizable_concat([inputs_1, inputs_2], + axis=3, + is_training=True, + is_quantized=False) + self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list()) + self._check_no_min_max_ema(tf.get_default_graph()) + self._check_no_min_max_vars(tf.get_default_graph()) + + def test_quantizable_concat_not_quantized_inference(self): + inputs_1 = tf.zeros([4, 10, 10, 1], dtype=tf.float32) + inputs_2 = tf.ones([4, 10, 10, 2], dtype=tf.float32) + concat_in_train = utils.quantizable_concat([inputs_1, inputs_2], + axis=3, + is_training=False, + is_quantized=False) + self.assertAllEqual([4, 10, 10, 3], concat_in_train.shape.as_list()) + self._check_no_min_max_ema(tf.get_default_graph()) + self._check_no_min_max_vars(tf.get_default_graph()) + + def test_quantize_op_is_training(self): + inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32) + outputs = utils.quantize_op(inputs) + self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list()) + self._check_min_max_ema(tf.get_default_graph()) + self._check_min_max_vars(tf.get_default_graph()) + + def test_quantize_op_inference(self): + inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32) + outputs = utils.quantize_op(inputs, is_training=False) + self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list()) + self._check_no_min_max_ema(tf.get_default_graph()) + self._check_min_max_vars(tf.get_default_graph()) + + def test_fixed_quantize_op(self): + inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32) + outputs = utils.fixed_quantize_op(inputs) + self.assertAllEqual(inputs.shape.as_list(), outputs.shape.as_list()) + self._check_no_min_max_ema(tf.get_default_graph()) + self._check_no_min_max_vars(tf.get_default_graph()) + + def _check_min_max_vars(self, graph): + op_types = [op.type for op in graph.get_operations()] + self.assertTrue( + any('FakeQuantWithMinMaxVars' in op_type for op_type in op_types)) + + def _check_min_max_ema(self, graph): + op_names = [op.name for op in graph.get_operations()] + self.assertTrue(any('AssignMinEma' in name for name in op_names)) + self.assertTrue(any('AssignMaxEma' in name for name in op_names)) + self.assertTrue(any('SafeQuantRangeMin' in name for name in op_names)) + self.assertTrue(any('SafeQuantRangeMax' in name for name in op_names)) + + def _check_no_min_max_vars(self, graph): + op_types = [op.type for op in graph.get_operations()] + self.assertFalse( + any('FakeQuantWithMinMaxVars' in op_type for op_type in op_types)) + + def _check_no_min_max_ema(self, graph): + op_names = [op.name for op in graph.get_operations()] + self.assertFalse(any('AssignMinEma' in name for name in op_names)) + self.assertFalse(any('AssignMaxEma' in name for name in op_names)) + self.assertFalse(any('SafeQuantRangeMin' in name for name in op_names)) + self.assertFalse(any('SafeQuantRangeMax' in name for name in op_names)) + + +class QuantizableSeparableConv2dTest(tf.test.TestCase): + + def test_quantizable_separable_conv2d(self): + inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32) + num_outputs = 64 + kernel_size = [3, 3] + scope = 'QuantSeparable' + outputs = utils.quantizable_separable_conv2d( + inputs, num_outputs, kernel_size, scope=scope) + self.assertAllEqual([4, 10, 10, num_outputs], outputs.shape.as_list()) + self._check_depthwise_bias_add(tf.get_default_graph(), scope) + + def test_quantizable_separable_conv2d_not_quantized(self): + inputs = tf.zeros([4, 10, 10, 128], dtype=tf.float32) + num_outputs = 64 + kernel_size = [3, 3] + scope = 'QuantSeparable' + outputs = utils.quantizable_separable_conv2d( + inputs, num_outputs, kernel_size, is_quantized=False, scope=scope) + self.assertAllEqual([4, 10, 10, num_outputs], outputs.shape.as_list()) + self._check_no_depthwise_bias_add(tf.get_default_graph(), scope) + + def _check_depthwise_bias_add(self, graph, scope): + op_names = [op.name for op in graph.get_operations()] + self.assertTrue( + any('%s_bias/BiasAdd' % scope in name for name in op_names)) + + def _check_no_depthwise_bias_add(self, graph, scope): + op_names = [op.name for op in graph.get_operations()] + self.assertFalse( + any('%s_bias/BiasAdd' % scope in name for name in op_names)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/meta_architectures/__init__.py b/models/research/lstm_object_detection/meta_architectures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch.py b/models/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..22edc97ee348df8a4a4ce8b885a4df6a6b891072 --- /dev/null +++ b/models/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch.py @@ -0,0 +1,463 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""LSTM SSD Meta-architecture definition. + +General tensorflow implementation of convolutional Multibox/SSD detection +models with LSTM states, for use on video data. This implementation supports +both regular LSTM-SSD and interleaved LSTM-SSD framework. + +See https://arxiv.org/abs/1711.06368 and https://arxiv.org/abs/1903.10172 +for details. +""" +import abc +import re +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list_ops +from object_detection.core import matcher +from object_detection.core import standard_fields as fields +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class LSTMSSDMetaArch(ssd_meta_arch.SSDMetaArch): + """LSTM Meta-architecture definition.""" + + def __init__(self, + is_training, + anchor_generator, + box_predictor, + box_coder, + feature_extractor, + encode_background_as_zeros, + image_resizer_fn, + non_max_suppression_fn, + score_conversion_fn, + classification_loss, + localization_loss, + classification_loss_weight, + localization_loss_weight, + normalize_loss_by_num_matches, + hard_example_miner, + unroll_length, + target_assigner_instance, + add_summaries=True): + super(LSTMSSDMetaArch, self).__init__( + is_training=is_training, + anchor_generator=anchor_generator, + box_predictor=box_predictor, + box_coder=box_coder, + feature_extractor=feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=score_conversion_fn, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_loss_weight, + localization_loss_weight=localization_loss_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=hard_example_miner, + target_assigner_instance=target_assigner_instance, + add_summaries=add_summaries) + self._unroll_length = unroll_length + + @property + def unroll_length(self): + return self._unroll_length + + @unroll_length.setter + def unroll_length(self, unroll_length): + self._unroll_length = unroll_length + + def predict(self, preprocessed_inputs, true_image_shapes, states=None, + state_name='lstm_state', feature_scope=None): + with tf.variable_scope(self._extract_features_scope, + values=[preprocessed_inputs], reuse=tf.AUTO_REUSE): + feature_maps = self._feature_extractor.extract_features( + preprocessed_inputs, states, state_name, + unroll_length=self._unroll_length, scope=feature_scope) + feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps) + image_shape = shape_utils.combined_static_and_dynamic_shape( + preprocessed_inputs) + self._batch_size = preprocessed_inputs.shape[0].value / self._unroll_length + self._states = states + anchors = self._anchor_generator.generate(feature_map_spatial_dims, + im_height=image_shape[1], + im_width=image_shape[2]) + with tf.variable_scope('MultipleGridAnchorGenerator', reuse=tf.AUTO_REUSE): + self._anchors = box_list_ops.concatenate(anchors) + prediction_dict = self._box_predictor.predict( + feature_maps, self._anchor_generator.num_anchors_per_location()) + with tf.variable_scope('Loss', reuse=tf.AUTO_REUSE): + box_encodings = tf.concat(prediction_dict['box_encodings'], axis=1) + if box_encodings.shape.ndims == 4 and box_encodings.shape[2] == 1: + box_encodings = tf.squeeze(box_encodings, axis=2) + class_predictions_with_background = tf.concat( + prediction_dict['class_predictions_with_background'], axis=1) + predictions_dict = { + 'preprocessed_inputs': preprocessed_inputs, + 'box_encodings': box_encodings, + 'class_predictions_with_background': class_predictions_with_background, + 'feature_maps': feature_maps, + 'anchors': self._anchors.get(), + 'states_and_outputs': self._feature_extractor.states_and_outputs, + } + # In cases such as exporting the model, the states is always zero. Thus the + # step should be ignored. + if states is not None: + predictions_dict['step'] = self._feature_extractor.step + return predictions_dict + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Computes scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding prediction tensors with + 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 2) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + scope: Optional scope name. + + Returns: + a dictionary mapping loss keys (`localization_loss` and + `classification_loss`) to scalar tensors representing corresponding loss + values. + """ + with tf.name_scope(scope, 'Loss', prediction_dict.values()): + keypoints = None + if self.groundtruth_has_field(fields.BoxListFields.keypoints): + keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints) + weights = None + if self.groundtruth_has_field(fields.BoxListFields.weights): + weights = self.groundtruth_lists(fields.BoxListFields.weights) + (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) = self._assign_targets( + self.groundtruth_lists(fields.BoxListFields.boxes), + self.groundtruth_lists(fields.BoxListFields.classes), + keypoints, weights) + match_list = [matcher.Match(match) for match in tf.unstack(batch_match)] + if self._add_summaries: + self._summarize_target_assignment( + self.groundtruth_lists(fields.BoxListFields.boxes), match_list) + location_losses = self._localization_loss( + prediction_dict['box_encodings'], + batch_reg_targets, + ignore_nan_targets=True, + weights=batch_reg_weights) + cls_losses = ops.reduce_sum_trailing_dimensions( + self._classification_loss( + prediction_dict['class_predictions_with_background'], + batch_cls_targets, + weights=batch_cls_weights), + ndims=2) + + if self._hard_example_miner: + (loc_loss_list, cls_loss_list) = self._apply_hard_mining( + location_losses, cls_losses, prediction_dict, match_list) + localization_loss = tf.reduce_sum(tf.stack(loc_loss_list)) + classification_loss = tf.reduce_sum(tf.stack(cls_loss_list)) + + if self._add_summaries: + self._hard_example_miner.summarize() + else: + if self._add_summaries: + class_ids = tf.argmax(batch_cls_targets, axis=2) + flattened_class_ids = tf.reshape(class_ids, [-1]) + flattened_classification_losses = tf.reshape(cls_losses, [-1]) + self._summarize_anchor_classification_loss( + flattened_class_ids, flattened_classification_losses) + localization_loss = tf.reduce_sum(location_losses) + classification_loss = tf.reduce_sum(cls_losses) + + # Optionally normalize by number of positive matches + normalizer = tf.constant(1.0, dtype=tf.float32) + if self._normalize_loss_by_num_matches: + normalizer = tf.maximum(tf.to_float(tf.reduce_sum(batch_reg_weights)), + 1.0) + + with tf.name_scope('localization_loss'): + localization_loss_normalizer = normalizer + if self._normalize_loc_loss_by_codesize: + localization_loss_normalizer *= self._box_coder.code_size + localization_loss = ((self._localization_loss_weight / ( + localization_loss_normalizer)) * localization_loss) + with tf.name_scope('classification_loss'): + classification_loss = ((self._classification_loss_weight / normalizer) * + classification_loss) + + loss_dict = { + 'localization_loss': localization_loss, + 'classification_loss': classification_loss + } + return loss_dict + + def restore_map(self, fine_tune_checkpoint_type='lstm'): + """Returns a map of variables to load from a foreign checkpoint. + + See parent class for details. + + Args: + fine_tune_checkpoint_type: the type of checkpoint to restore from, either + SSD/LSTM detection checkpoint (with compatible variable names) + classification checkpoint for initialization prior to training. + Available options: `classification`, `detection`, `interleaved`, + and `lstm`. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + Raises: + ValueError: if fine_tune_checkpoint_type is not among + `classification`/`detection`/`interleaved`/`lstm`. + """ + if fine_tune_checkpoint_type not in [ + 'classification', 'detection', 'interleaved', 'lstm', + 'interleaved_pretrain' + ]: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + + self._restored_networks += 1 + base_network_scope = self.get_base_network_scope() + if base_network_scope: + scope_to_replace = '{0}_{1}'.format(base_network_scope, + self._restored_networks) + + interleaved_model = False + for variable in tf.global_variables(): + if scope_to_replace in variable.op.name: + interleaved_model = True + break + + variables_to_restore = {} + for variable in tf.global_variables(): + var_name = variable.op.name + if 'global_step' in var_name: + continue + + # Remove FeatureExtractor prefix for classification checkpoints. + if (fine_tune_checkpoint_type == 'classification' or + fine_tune_checkpoint_type == 'interleaved_pretrain'): + var_name = ( + re.split('^' + self._extract_features_scope + '/', var_name)[-1]) + + # When loading from single frame detection checkpoints, we need to + # remap FeatureMaps variable names. + if ('FeatureMaps' in var_name and + fine_tune_checkpoint_type == 'detection'): + var_name = var_name.replace('FeatureMaps', + self.get_base_network_scope()) + + # Load interleaved checkpoint specifically. + if interleaved_model: # Interleaved LSTD. + if 'interleaved' in fine_tune_checkpoint_type: + variables_to_restore[var_name] = variable + else: + # Restore non-base layers from the first checkpoint only. + if self._restored_networks == 1: + if base_network_scope + '_' not in var_name: # LSTM and FeatureMap + variables_to_restore[var_name] = variable + if scope_to_replace in var_name: + var_name = var_name.replace(scope_to_replace, base_network_scope) + variables_to_restore[var_name] = variable + else: + # Restore from the first model of interleaved checkpoints + if 'interleaved' in fine_tune_checkpoint_type: + var_name = var_name.replace(self.get_base_network_scope(), + self.get_base_network_scope() + '_1', 1) + + variables_to_restore[var_name] = variable + + return variables_to_restore + + def get_base_network_scope(self): + """Returns the variable scope of the base network. + + Returns: + The variable scope of the feature extractor base network, e.g. MobilenetV1 + """ + return self._feature_extractor.get_base_network_scope() + + +class LSTMSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """LSTM SSD Meta-architecture Feature Extractor definition.""" + + __metaclass__ = abc.ABCMeta + + @property + def clip_state(self): + return self._clip_state + + @clip_state.setter + def clip_state(self, clip_state): + self._clip_state = clip_state + + @property + def depth_multipliers(self): + return self._depth_multipliers + + @depth_multipliers.setter + def depth_multipliers(self, depth_multipliers): + self._depth_multipliers = depth_multipliers + + @property + def lstm_state_depth(self): + return self._lstm_state_depth + + @lstm_state_depth.setter + def lstm_state_depth(self, lstm_state_depth): + self._lstm_state_depth = lstm_state_depth + + @property + def is_quantized(self): + return self._is_quantized + + @is_quantized.setter + def is_quantized(self, is_quantized): + self._is_quantized = is_quantized + + @property + def interleaved(self): + return False + + @property + def states_and_outputs(self): + """LSTM states and outputs. + + This variable includes both LSTM states {C_t} and outputs {h_t}. + + Returns: + states_and_outputs: A list of 4-D float tensors, including the lstm state + and output at each timestep. + """ + return self._states_out + + @property + def step(self): + return self._step + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def get_base_network_scope(self): + """Returns the variable scope of the base network. + + Returns: + The variable scope of the base network, e.g. MobilenetV1 + """ + return self._base_network_scope + + @abc.abstractmethod + def create_lstm_cell(self, batch_size, output_size, state_saver, state_name): + """Create the LSTM cell, and initialize state if necessary. + + Args: + batch_size: input batch size. + output_size: output size of the lstm cell, [width, height]. + state_saver: a state saver object with methods `state` and `save_state`. + state_name: string, the name to use with the state_saver. + Returns: + lstm_cell: the lstm cell unit. + init_state: initial state representations. + step: the step + """ + pass + + +class LSTMSSDInterleavedFeatureExtractor(LSTMSSDFeatureExtractor): + """LSTM SSD Meta-architecture Interleaved Feature Extractor definition.""" + + __metaclass__ = abc.ABCMeta + + @property + def pre_bottleneck(self): + return self._pre_bottleneck + + @pre_bottleneck.setter + def pre_bottleneck(self, pre_bottleneck): + self._pre_bottleneck = pre_bottleneck + + @property + def low_res(self): + return self._low_res + + @low_res.setter + def low_res(self, low_res): + self._low_res = low_res + + @property + def interleaved(self): + return True + + @property + def interleave_method(self): + return self._interleave_method + + @interleave_method.setter + def interleave_method(self, interleave_method): + self._interleave_method = interleave_method + + @abc.abstractmethod + def extract_base_features_large(self, preprocessed_inputs): + """Extract the large base model features. + + Args: + preprocessed_inputs: preprocessed input images of shape: + [batch, width, height, depth]. + + Returns: + net: the last feature map created from the base feature extractor. + end_points: a dictionary of feature maps created. + """ + pass + + @abc.abstractmethod + def extract_base_features_small(self, preprocessed_inputs): + """Extract the small base model features. + + Args: + preprocessed_inputs: preprocessed input images of shape: + [batch, width, height, depth]. + + Returns: + net: the last feature map created from the base feature extractor. + end_points: a dictionary of feature maps created. + """ + pass diff --git a/models/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch_test.py b/models/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..03e8a1274603806c19bc36ad09022c9b4d6ca91b --- /dev/null +++ b/models/research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch_test.py @@ -0,0 +1,320 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for meta_architectures.lstm_ssd_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from lstm_object_detection.lstm import lstm_cells +from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch +from object_detection.core import anchor_generator +from object_detection.core import box_list +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import region_similarity_calculator as sim_calc +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.models import feature_map_generators +from object_detection.utils import test_case +from object_detection.utils import test_utils + + +MAX_TOTAL_NUM_BOXES = 5 +NUM_CLASSES = 1 + + +class FakeLSTMFeatureExtractor( + lstm_ssd_meta_arch.LSTMSSDFeatureExtractor): + + def __init__(self): + super(FakeLSTMFeatureExtractor, self).__init__( + is_training=True, + depth_multiplier=1.0, + min_depth=0, + pad_to_multiple=1, + conv_hyperparams_fn=self.scope_fn) + self._lstm_state_depth = 256 + + def scope_fn(self): + with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu6) as sc: + return sc + + def create_lstm_cell(self): + pass + + def extract_features(self, preprocessed_inputs, state_saver=None, + state_name='lstm_state', unroll_length=5, scope=None): + with tf.variable_scope('mock_model'): + net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32, + kernel_size=1, scope='layer1') + image_features = {'last_layer': net} + + self._states_out = {} + feature_map_layout = { + 'from_layer': ['last_layer'], + 'layer_depth': [-1], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=(self._depth_multiplier), + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + return list(feature_maps.values()) + + +class FakeLSTMInterleavedFeatureExtractor( + lstm_ssd_meta_arch.LSTMSSDInterleavedFeatureExtractor): + + def __init__(self): + super(FakeLSTMInterleavedFeatureExtractor, self).__init__( + is_training=True, + depth_multiplier=1.0, + min_depth=0, + pad_to_multiple=1, + conv_hyperparams_fn=self.scope_fn) + self._lstm_state_depth = 256 + + def scope_fn(self): + with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu6) as sc: + return sc + + def create_lstm_cell(self): + pass + + def extract_base_features_large(self, preprocessed_inputs): + with tf.variable_scope('base_large'): + net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32, + kernel_size=1, scope='layer1') + return net + + def extract_base_features_small(self, preprocessed_inputs): + with tf.variable_scope('base_small'): + net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32, + kernel_size=1, scope='layer1') + return net + + def extract_features(self, preprocessed_inputs, state_saver=None, + state_name='lstm_state', unroll_length=5, scope=None): + with tf.variable_scope('mock_model'): + net_large = self.extract_base_features_large(preprocessed_inputs) + net_small = self.extract_base_features_small(preprocessed_inputs) + net = slim.conv2d( + inputs=tf.concat([net_large, net_small], axis=3), + num_outputs=32, + kernel_size=1, + scope='layer1') + image_features = {'last_layer': net} + + self._states_out = {} + feature_map_layout = { + 'from_layer': ['last_layer'], + 'layer_depth': [-1], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=(self._depth_multiplier), + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + return list(feature_maps.values()) + + +class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator): + """Sets up a simple 2x2 anchor grid on the unit square.""" + + def name_scope(self): + return 'MockAnchorGenerator' + + def num_anchors_per_location(self): + return [1] + + def _generate(self, feature_map_shape_list, im_height, im_width): + return [box_list.BoxList( + tf.constant([[0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [1., 1., 1.5, 1.5] # Anchor that is outside clip_window. + ], tf.float32))] + + def num_anchors(self): + return 4 + + +class LSTMSSDMetaArchTest(test_case.TestCase): + + def _create_model(self, + interleaved=False, + apply_hard_mining=True, + normalize_loc_loss_by_codesize=False, + add_background_class=True, + random_example_sampling=False, + use_expected_classification_loss_under_sampling=False, + min_num_negative_samples=1, + desired_negative_sampling_ratio=3, + unroll_length=1): + num_classes = NUM_CLASSES + is_training = False + mock_anchor_generator = MockAnchorGenerator2x2() + mock_box_predictor = test_utils.MockBoxPredictor(is_training, num_classes) + mock_box_coder = test_utils.MockBoxCoder() + if interleaved: + fake_feature_extractor = FakeLSTMInterleavedFeatureExtractor() + else: + fake_feature_extractor = FakeLSTMFeatureExtractor() + mock_matcher = test_utils.MockMatcher() + region_similarity_calculator = sim_calc.IouSimilarity() + encode_background_as_zeros = False + def image_resizer_fn(image): + return [tf.identity(image), tf.shape(image)] + + classification_loss = losses.WeightedSigmoidClassificationLoss() + localization_loss = losses.WeightedSmoothL1LocalizationLoss() + non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=-20.0, + iou_thresh=1.0, + max_size_per_class=5, + max_total_size=MAX_TOTAL_NUM_BOXES) + classification_loss_weight = 1.0 + localization_loss_weight = 1.0 + negative_class_weight = 1.0 + normalize_loss_by_num_matches = False + + hard_example_miner = None + if apply_hard_mining: + # This hard example miner is expected to be a no-op. + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=None, + iou_threshold=1.0) + + target_assigner_instance = target_assigner.TargetAssigner( + region_similarity_calculator, + mock_matcher, + mock_box_coder, + negative_class_weight=negative_class_weight) + + code_size = 4 + model = lstm_ssd_meta_arch.LSTMSSDMetaArch( + is_training=is_training, + anchor_generator=mock_anchor_generator, + box_predictor=mock_box_predictor, + box_coder=mock_box_coder, + feature_extractor=fake_feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=tf.identity, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_loss_weight, + localization_loss_weight=localization_loss_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=hard_example_miner, + unroll_length=unroll_length, + target_assigner_instance=target_assigner_instance, + add_summaries=False) + return model, num_classes, mock_anchor_generator.num_anchors(), code_size + + def _get_value_for_matching_key(self, dictionary, suffix): + for key in dictionary.keys(): + if key.endswith(suffix): + return dictionary[key] + raise ValueError('key not found {}'.format(suffix)) + + def test_predict_returns_correct_items_and_sizes(self): + batch_size = 3 + height = width = 2 + num_unroll = 1 + + graph = tf.Graph() + with graph.as_default(): + model, num_classes, num_anchors, code_size = self._create_model() + preprocessed_images = tf.random_uniform( + [batch_size * num_unroll, height, width, 3], + minval=-1., + maxval=1.) + true_image_shapes = tf.tile( + [[height, width, 3]], [batch_size, 1]) + prediction_dict = model.predict(preprocessed_images, true_image_shapes) + + + self.assertIn('preprocessed_inputs', prediction_dict) + self.assertIn('box_encodings', prediction_dict) + self.assertIn('class_predictions_with_background', prediction_dict) + self.assertIn('feature_maps', prediction_dict) + self.assertIn('anchors', prediction_dict) + self.assertAllEqual( + [batch_size * num_unroll, height, width, 3], + prediction_dict['preprocessed_inputs'].shape.as_list()) + self.assertAllEqual( + [batch_size * num_unroll, num_anchors, code_size], + prediction_dict['box_encodings'].shape.as_list()) + self.assertAllEqual( + [batch_size * num_unroll, num_anchors, num_classes + 1], + prediction_dict['class_predictions_with_background'].shape.as_list()) + self.assertAllEqual( + [num_anchors, code_size], + prediction_dict['anchors'].shape.as_list()) + + def test_interleaved_predict_returns_correct_items_and_sizes(self): + batch_size = 3 + height = width = 2 + num_unroll = 1 + + graph = tf.Graph() + with graph.as_default(): + model, num_classes, num_anchors, code_size = self._create_model( + interleaved=True) + preprocessed_images = tf.random_uniform( + [batch_size * num_unroll, height, width, 3], + minval=-1., + maxval=1.) + true_image_shapes = tf.tile( + [[height, width, 3]], [batch_size, 1]) + prediction_dict = model.predict(preprocessed_images, true_image_shapes) + + self.assertIn('preprocessed_inputs', prediction_dict) + self.assertIn('box_encodings', prediction_dict) + self.assertIn('class_predictions_with_background', prediction_dict) + self.assertIn('feature_maps', prediction_dict) + self.assertIn('anchors', prediction_dict) + self.assertAllEqual( + [batch_size * num_unroll, height, width, 3], + prediction_dict['preprocessed_inputs'].shape.as_list()) + self.assertAllEqual( + [batch_size * num_unroll, num_anchors, code_size], + prediction_dict['box_encodings'].shape.as_list()) + self.assertAllEqual( + [batch_size * num_unroll, num_anchors, num_classes + 1], + prediction_dict['class_predictions_with_background'].shape.as_list()) + self.assertAllEqual( + [num_anchors, code_size], + prediction_dict['anchors'].shape.as_list()) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/metrics/__init__.py b/models/research/lstm_object_detection/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/metrics/coco_evaluation_all_frames.py b/models/research/lstm_object_detection/metrics/coco_evaluation_all_frames.py new file mode 100644 index 0000000000000000000000000000000000000000..8e6d336cbf71ecfdf5f438b6f74e078db1a6fb17 --- /dev/null +++ b/models/research/lstm_object_detection/metrics/coco_evaluation_all_frames.py @@ -0,0 +1,124 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class for evaluating video object detections with COCO metrics.""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.metrics import coco_evaluation +from object_detection.metrics import coco_tools + + +class CocoEvaluationAllFrames(coco_evaluation.CocoDetectionEvaluator): + """Class to evaluate COCO detection metrics for frame sequences. + + The class overrides two functions: add_single_ground_truth_image_info and + add_single_detected_image_info. + + For the evaluation of sequence video detection, by iterating through the + entire groundtruth_dict, all the frames in the unrolled frames in one LSTM + training sample are considered. Therefore, both groundtruth and detection + results of all frames are added for the evaluation. This is used when all the + frames are labeled in the video object detection training job. + """ + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Add groundtruth results of all frames to the eval pipeline. + + This method overrides the function defined in the base class. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A list of dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + """ + for idx, gt in enumerate(groundtruth_dict): + if not gt: + continue + + image_frame_id = '{}_{}'.format(image_id, idx) + if image_frame_id in self._image_ids: + tf.logging.warning( + 'Ignoring ground truth with image id %s since it was ' + 'previously added', image_frame_id) + continue + + self._groundtruth_list.extend( + coco_tools.ExportSingleImageGroundtruthToCoco( + image_id=image_frame_id, + next_annotation_id=self._annotation_id, + category_id_set=self._category_id_set, + groundtruth_boxes=gt[ + standard_fields.InputDataFields.groundtruth_boxes], + groundtruth_classes=gt[ + standard_fields.InputDataFields.groundtruth_classes])) + self._annotation_id += ( + gt[standard_fields.InputDataFields.groundtruth_boxes].shape[0]) + + # Boolean to indicate whether a detection has been added for this image. + self._image_ids[image_frame_id] = False + + def add_single_detected_image_info(self, image_id, detections_dict): + """Add detection results of all frames to the eval pipeline. + + This method overrides the function defined in the base class. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A list of dictionary containing - + DetectionResultFields.detection_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` detection boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + + Raises: + ValueError: If groundtruth for the image_id is not available. + """ + for idx, det in enumerate(detections_dict): + if not det: + continue + + image_frame_id = '{}_{}'.format(image_id, idx) + if image_frame_id not in self._image_ids: + raise ValueError( + 'Missing groundtruth for image-frame id: {}'.format(image_frame_id)) + + if self._image_ids[image_frame_id]: + tf.logging.warning( + 'Ignoring detection with image id %s since it was ' + 'previously added', image_frame_id) + continue + + self._detection_boxes_list.extend( + coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id=image_frame_id, + category_id_set=self._category_id_set, + detection_boxes=det[ + standard_fields.DetectionResultFields.detection_boxes], + detection_scores=det[ + standard_fields.DetectionResultFields.detection_scores], + detection_classes=det[ + standard_fields.DetectionResultFields.detection_classes])) + self._image_ids[image_frame_id] = True diff --git a/models/research/lstm_object_detection/metrics/coco_evaluation_all_frames_test.py b/models/research/lstm_object_detection/metrics/coco_evaluation_all_frames_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1e7b7546b037d974bde9e3dadef94d7535235b --- /dev/null +++ b/models/research/lstm_object_detection/metrics/coco_evaluation_all_frames_test.py @@ -0,0 +1,156 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for video_object_detection.metrics.coco_video_evaluation.""" + +import numpy as np +import tensorflow.compat.v1 as tf +from lstm_object_detection.metrics import coco_evaluation_all_frames +from object_detection.core import standard_fields + + +class CocoEvaluationAllFramesTest(tf.test.TestCase): + + def testGroundtruthAndDetectionsDisagreeOnAllFrames(self): + """Tests that mAP is calculated on several different frame results.""" + category_list = [{'id': 0, 'name': 'dog'}, {'id': 1, 'name': 'cat'}] + video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames( + category_list) + video_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict=[{ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]) + }, { + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]) + }]) + video_evaluator.add_single_detected_image_info( + image_id='image1', + # A different groundtruth box on the frame other than the last one. + detections_dict=[{ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }, { + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }]) + + metrics = video_evaluator.evaluate() + self.assertNotEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testGroundtruthAndDetections(self): + """Tests that mAP is calculated correctly on GT and Detections.""" + category_list = [{'id': 0, 'name': 'dog'}, {'id': 1, 'name': 'cat'}] + video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames( + category_list) + video_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict=[{ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]) + }]) + video_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict=[{ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]) + }]) + video_evaluator.add_single_ground_truth_image_info( + image_id='image3', + groundtruth_dict=[{ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 100., 100., 120.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]) + }]) + video_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict=[{ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }]) + video_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict=[{ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }]) + video_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict=[{ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 100., 100., 120.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }]) + metrics = video_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testMissingDetectionResults(self): + """Tests if groundtrue is missing, raises ValueError.""" + category_list = [{'id': 0, 'name': 'dog'}] + video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames( + category_list) + video_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict=[{ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]) + }]) + with self.assertRaisesRegexp(ValueError, + r'Missing groundtruth for image-frame id:.*'): + video_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict=[{ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/model_builder.py b/models/research/lstm_object_detection/model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..d622558cf75f6664f9a1b075e3ed690caf457f68 --- /dev/null +++ b/models/research/lstm_object_detection/model_builder.py @@ -0,0 +1,192 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build a DetectionModel from configuration.""" +from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch +from lstm_object_detection.models import lstm_ssd_interleaved_mobilenet_v2_feature_extractor +from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor +from object_detection.builders import anchor_generator_builder +from object_detection.builders import box_coder_builder +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import image_resizer_builder +from object_detection.builders import losses_builder +from object_detection.builders import matcher_builder +from object_detection.builders import model_builder +from object_detection.builders import post_processing_builder +from object_detection.builders import region_similarity_calculator_builder as sim_calc +from object_detection.core import target_assigner + +model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP.update({ + 'lstm_ssd_mobilenet_v1': + lstm_ssd_mobilenet_v1_feature_extractor + .LSTMSSDMobileNetV1FeatureExtractor, + 'lstm_ssd_interleaved_mobilenet_v2': + lstm_ssd_interleaved_mobilenet_v2_feature_extractor + .LSTMSSDInterleavedMobilenetV2FeatureExtractor, +}) +SSD_FEATURE_EXTRACTOR_CLASS_MAP = model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP + + +def build(model_config, lstm_config, is_training): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + lstm_config: LstmModel config proto that specifies LSTM train/eval configs. + is_training: True if this model is being built for training purposes. + + Returns: + DetectionModel based on the config. + + Raises: + ValueError: On invalid meta architecture or model. + """ + return _build_lstm_model(model_config.ssd, lstm_config, is_training) + + +def _build_lstm_feature_extractor(feature_extractor_config, + is_training, + lstm_config, + reuse_weights=None): + """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. + + Args: + feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. + is_training: True if this feature extractor is being built for training. + lstm_config: LSTM-SSD specific configs. + reuse_weights: If the feature extractor should reuse weights. + + Returns: + ssd_meta_arch.SSDFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + + feature_type = feature_extractor_config.type + depth_multiplier = feature_extractor_config.depth_multiplier + min_depth = feature_extractor_config.min_depth + pad_to_multiple = feature_extractor_config.pad_to_multiple + use_explicit_padding = feature_extractor_config.use_explicit_padding + use_depthwise = feature_extractor_config.use_depthwise + conv_hyperparams = hyperparams_builder.build( + feature_extractor_config.conv_hyperparams, is_training) + override_base_feature_extractor_hyperparams = ( + feature_extractor_config.override_base_feature_extractor_hyperparams) + + if feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type)) + + feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type] + feature_extractor = feature_extractor_class( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams, reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams) + + # Extra configs for LSTM-SSD. + feature_extractor.lstm_state_depth = lstm_config.lstm_state_depth + feature_extractor.flatten_state = lstm_config.flatten_state + feature_extractor.clip_state = lstm_config.clip_state + feature_extractor.scale_state = lstm_config.scale_state + feature_extractor.is_quantized = lstm_config.is_quantized + feature_extractor.low_res = lstm_config.low_res + # Extra configs for interleaved LSTM-SSD. + if 'interleaved' in feature_extractor_config.type: + feature_extractor.pre_bottleneck = lstm_config.pre_bottleneck + feature_extractor.depth_multipliers = lstm_config.depth_multipliers + if is_training: + feature_extractor.interleave_method = lstm_config.train_interleave_method + else: + feature_extractor.interleave_method = lstm_config.eval_interleave_method + return feature_extractor + + +def _build_lstm_model(ssd_config, lstm_config, is_training): + """Builds an LSTM detection model based on the model config. + + Args: + ssd_config: A ssd.proto object containing the config for the desired + LSTMSSDMetaArch. + lstm_config: LstmModel config proto that specifies LSTM train/eval configs. + is_training: True if this model is being built for training purposes. + + Returns: + LSTMSSDMetaArch based on the config. + Raises: + ValueError: If ssd_config.type is not recognized (i.e. not registered in + model_class_map), or if lstm_config.interleave_strategy is not recognized. + ValueError: If unroll_length is not specified in the config file. + """ + feature_extractor = _build_lstm_feature_extractor( + ssd_config.feature_extractor, is_training, lstm_config) + + box_coder = box_coder_builder.build(ssd_config.box_coder) + matcher = matcher_builder.build(ssd_config.matcher) + region_similarity_calculator = sim_calc.build( + ssd_config.similarity_calculator) + + num_classes = ssd_config.num_classes + ssd_box_predictor = box_predictor_builder.build(hyperparams_builder.build, + ssd_config.box_predictor, + is_training, num_classes) + anchor_generator = anchor_generator_builder.build(ssd_config.anchor_generator) + image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) + non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( + ssd_config.post_processing) + (classification_loss, localization_loss, classification_weight, + localization_weight, miner, _, _) = losses_builder.build(ssd_config.loss) + + normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches + encode_background_as_zeros = ssd_config.encode_background_as_zeros + negative_class_weight = ssd_config.negative_class_weight + + # Extra configs for lstm unroll length. + unroll_length = None + if 'lstm' in ssd_config.feature_extractor.type: + if is_training: + unroll_length = lstm_config.train_unroll_length + else: + unroll_length = lstm_config.eval_unroll_length + if unroll_length is None: + raise ValueError('No unroll length found in the config file') + + target_assigner_instance = target_assigner.TargetAssigner( + region_similarity_calculator, + matcher, + box_coder, + negative_class_weight=negative_class_weight) + + lstm_model = lstm_ssd_meta_arch.LSTMSSDMetaArch( + is_training=is_training, + anchor_generator=anchor_generator, + box_predictor=ssd_box_predictor, + box_coder=box_coder, + feature_extractor=feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=score_conversion_fn, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_weight, + localization_loss_weight=localization_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=miner, + unroll_length=unroll_length, + target_assigner_instance=target_assigner_instance) + + return lstm_model diff --git a/models/research/lstm_object_detection/model_builder_test.py b/models/research/lstm_object_detection/model_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9d64b537cdc4044d5302845c53a1a3e4ac700f39 --- /dev/null +++ b/models/research/lstm_object_detection/model_builder_test.py @@ -0,0 +1,302 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for lstm_object_detection.tensorflow.model_builder.""" + +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from lstm_object_detection import model_builder +from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch +from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2 +from object_detection.protos import pipeline_pb2 + + +class ModelBuilderTest(tf.test.TestCase): + + def create_train_model(self, model_config, lstm_config): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + lstm_config: LstmModel config proto that specifies LSTM train/eval + configs. + + Returns: + DetectionModel based on the config. + """ + return model_builder.build(model_config, lstm_config, is_training=True) + + def create_eval_model(self, model_config, lstm_config): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + lstm_config: LstmModel config proto that specifies LSTM train/eval + configs. + + Returns: + DetectionModel based on the config. + """ + return model_builder.build(model_config, lstm_config, is_training=False) + + def get_model_configs_from_proto(self): + """Creates a model text proto for testing. + + Returns: + A dictionary of model configs. + """ + + model_text_proto = """ + [lstm_object_detection.protos.lstm_model] { + train_unroll_length: 4 + eval_unroll_length: 4 + } + model { + ssd { + feature_extractor { + type: 'lstm_ssd_mobilenet_v1' + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + negative_class_weight: 2.0 + box_coder { + faster_rcnn_box_coder { + } + } + matcher { + argmax_matcher { + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + aspect_ratios: 1.0 + } + } + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + box_predictor { + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + normalize_loc_loss_by_codesize: true + loss { + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + } + } + }""" + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + text_format.Merge(model_text_proto, pipeline_config) + + configs = {} + configs['model'] = pipeline_config.model + configs['lstm_model'] = pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model] + + return configs + + def get_interleaved_model_configs_from_proto(self): + """Creates an interleaved model text proto for testing. + + Returns: + A dictionary of model configs. + """ + + model_text_proto = """ + [lstm_object_detection.protos.lstm_model] { + train_unroll_length: 4 + eval_unroll_length: 10 + lstm_state_depth: 320 + depth_multipliers: 1.4 + depth_multipliers: 0.35 + pre_bottleneck: true + low_res: true + train_interleave_method: 'RANDOM_SKIP_SMALL' + eval_interleave_method: 'SKIP3' + } + model { + ssd { + feature_extractor { + type: 'lstm_ssd_interleaved_mobilenet_v2' + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + negative_class_weight: 2.0 + box_coder { + faster_rcnn_box_coder { + } + } + matcher { + argmax_matcher { + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + aspect_ratios: 1.0 + } + } + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + box_predictor { + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + normalize_loc_loss_by_codesize: true + loss { + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + } + } + }""" + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + text_format.Merge(model_text_proto, pipeline_config) + + configs = {} + configs['model'] = pipeline_config.model + configs['lstm_model'] = pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model] + + return configs + + def test_model_creation_from_valid_configs(self): + configs = self.get_model_configs_from_proto() + # Test model properties. + self.assertEqual(configs['model'].ssd.negative_class_weight, 2.0) + self.assertTrue(configs['model'].ssd.normalize_loc_loss_by_codesize) + self.assertEqual(configs['model'].ssd.feature_extractor.type, + 'lstm_ssd_mobilenet_v1') + + model = self.create_train_model(configs['model'], configs['lstm_model']) + # Test architechture type. + self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch) + # Test LSTM unroll length. + self.assertEqual(model.unroll_length, 4) + + model = self.create_eval_model(configs['model'], configs['lstm_model']) + # Test architechture type. + self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch) + # Test LSTM configs. + self.assertEqual(model.unroll_length, 4) + + def test_interleaved_model_creation_from_valid_configs(self): + configs = self.get_interleaved_model_configs_from_proto() + # Test model properties. + self.assertEqual(configs['model'].ssd.negative_class_weight, 2.0) + self.assertTrue(configs['model'].ssd.normalize_loc_loss_by_codesize) + self.assertEqual(configs['model'].ssd.feature_extractor.type, + 'lstm_ssd_interleaved_mobilenet_v2') + + model = self.create_train_model(configs['model'], configs['lstm_model']) + # Test architechture type. + self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch) + # Test LSTM configs. + self.assertEqual(model.unroll_length, 4) + self.assertEqual(model._feature_extractor.lstm_state_depth, 320) + self.assertAllClose(model._feature_extractor.depth_multipliers, (1.4, 0.35)) + self.assertTrue(model._feature_extractor.pre_bottleneck) + self.assertTrue(model._feature_extractor.low_res) + self.assertEqual(model._feature_extractor.interleave_method, + 'RANDOM_SKIP_SMALL') + + model = self.create_eval_model(configs['model'], configs['lstm_model']) + # Test architechture type. + self.assertIsInstance(model, lstm_ssd_meta_arch.LSTMSSDMetaArch) + # Test LSTM configs. + self.assertEqual(model.unroll_length, 10) + self.assertEqual(model._feature_extractor.lstm_state_depth, 320) + self.assertAllClose(model._feature_extractor.depth_multipliers, (1.4, 0.35)) + self.assertTrue(model._feature_extractor.pre_bottleneck) + self.assertTrue(model._feature_extractor.low_res) + self.assertEqual(model._feature_extractor.interleave_method, 'SKIP3') + + def test_model_creation_from_invalid_configs(self): + configs = self.get_model_configs_from_proto() + # Test model build failure with wrong input configs. + with self.assertRaises(AttributeError): + _ = self.create_train_model(configs['model'], configs['model']) + with self.assertRaises(AttributeError): + _ = self.create_eval_model(configs['model'], configs['model']) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/models/__init__.py b/models/research/lstm_object_detection/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py b/models/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..5a2d4bd0bdceb39801b46b864f512273ae10f8bc --- /dev/null +++ b/models/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py @@ -0,0 +1,298 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from tensorflow.python.framework import ops as tf_ops +from lstm_object_detection.lstm import lstm_cells +from lstm_object_detection.lstm import rnn_decoder +from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch +from lstm_object_detection.models import mobilenet_defs +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +class LSTMSSDInterleavedMobilenetV2FeatureExtractor( + lstm_ssd_meta_arch.LSTMSSDInterleavedFeatureExtractor): + """LSTM-SSD Interleaved Feature Extractor using MobilenetV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=True, + override_base_feature_extractor_hyperparams=False): + """Interleaved Feature Extractor for LSTD Models with MobileNet v2. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is True. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(LSTMSSDInterleavedMobilenetV2FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + # RANDOM_SKIP_SMALL means the training policy is random and the small model + # does not update state during training. + if self._is_training: + self._interleave_method = 'RANDOM_SKIP_SMALL' + else: + self._interleave_method = 'SKIP9' + + self._flatten_state = False + self._scale_state = False + self._clip_state = True + self._pre_bottleneck = True + self._feature_map_layout = { + 'from_layer': ['layer_19', '', '', '', ''], + 'layer_depth': [-1, 256, 256, 256, 256], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + self._low_res = True + self._base_network_scope = 'MobilenetV2' + + def extract_base_features_large(self, preprocessed_inputs): + """Extract the large base model features. + + Variables are created under the scope of /MobilenetV2_1/ + + Args: + preprocessed_inputs: preprocessed input images of shape: + [batch, width, height, depth]. + + Returns: + net: the last feature map created from the base feature extractor. + end_points: a dictionary of feature maps created. + """ + scope_name = self._base_network_scope + '_1' + with tf.variable_scope(scope_name, reuse=self._reuse_weights) as base_scope: + net, end_points = mobilenet_v2.mobilenet_base( + preprocessed_inputs, + depth_multiplier=self._depth_multipliers[0], + conv_defs=mobilenet_defs.mobilenet_v2_lite_def( + is_quantized=self._is_quantized), + use_explicit_padding=self._use_explicit_padding, + scope=base_scope) + return net, end_points + + def extract_base_features_small(self, preprocessed_inputs): + """Extract the small base model features. + + Variables are created under the scope of /MobilenetV2_2/ + + Args: + preprocessed_inputs: preprocessed input images of shape: + [batch, width, height, depth]. + + Returns: + net: the last feature map created from the base feature extractor. + end_points: a dictionary of feature maps created. + """ + scope_name = self._base_network_scope + '_2' + with tf.variable_scope(scope_name, reuse=self._reuse_weights) as base_scope: + if self._low_res: + height_small = preprocessed_inputs.get_shape().as_list()[1] // 2 + width_small = preprocessed_inputs.get_shape().as_list()[2] // 2 + inputs_small = tf.image.resize_images(preprocessed_inputs, + [height_small, width_small]) + # Create end point handle for tflite deployment. + with tf.name_scope(None): + inputs_small = tf.identity( + inputs_small, name='normalized_input_image_tensor_small') + else: + inputs_small = preprocessed_inputs + net, end_points = mobilenet_v2.mobilenet_base( + inputs_small, + depth_multiplier=self._depth_multipliers[1], + conv_defs=mobilenet_defs.mobilenet_v2_lite_def( + is_quantized=self._is_quantized, low_res=self._low_res), + use_explicit_padding=self._use_explicit_padding, + scope=base_scope) + return net, end_points + + def create_lstm_cell(self, batch_size, output_size, state_saver, state_name, + dtype=tf.float32): + """Create the LSTM cell, and initialize state if necessary. + + Args: + batch_size: input batch size. + output_size: output size of the lstm cell, [width, height]. + state_saver: a state saver object with methods `state` and `save_state`. + state_name: string, the name to use with the state_saver. + dtype: dtype to initialize lstm state. + + Returns: + lstm_cell: the lstm cell unit. + init_state: initial state representations. + step: the step + """ + lstm_cell = lstm_cells.GroupedConvLSTMCell( + filter_size=(3, 3), + output_size=output_size, + num_units=max(self._min_depth, self._lstm_state_depth), + is_training=self._is_training, + activation=tf.nn.relu6, + flatten_state=self._flatten_state, + scale_state=self._scale_state, + clip_state=self._clip_state, + output_bottleneck=True, + pre_bottleneck=self._pre_bottleneck, + is_quantized=self._is_quantized, + visualize_gates=False) + + if state_saver is None: + init_state = lstm_cell.init_state('lstm_state', batch_size, dtype) + step = None + else: + step = state_saver.state(state_name + '_step') + c = state_saver.state(state_name + '_c') + h = state_saver.state(state_name + '_h') + c.set_shape([batch_size] + c.get_shape().as_list()[1:]) + h.set_shape([batch_size] + h.get_shape().as_list()[1:]) + init_state = (c, h) + return lstm_cell, init_state, step + + def extract_features(self, preprocessed_inputs, state_saver=None, + state_name='lstm_state', unroll_length=10, scope=None): + """Extract features from preprocessed inputs. + + The features include the base network features, lstm features and SSD + features, organized in the following name scope: + + /MobilenetV2_1/... + /MobilenetV2_2/... + /LSTM/... + /FeatureMap/... + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of consecutive frames from video clips. + state_saver: A state saver object with methods `state` and `save_state`. + state_name: Python string, the name to use with the state_saver. + unroll_length: number of steps to unroll the lstm. + scope: Scope for the base network of the feature extractor. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + Raises: + ValueError: if interleave_method not recognized or large and small base + network output feature maps of different sizes. + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + preprocessed_inputs = ops.pad_to_multiple( + preprocessed_inputs, self._pad_to_multiple) + batch_size = preprocessed_inputs.shape[0].value // unroll_length + batch_axis = 0 + nets = [] + + # Batch processing of mobilenet features. + with slim.arg_scope(mobilenet_v2.training_scope( + is_training=self._is_training, + bn_decay=0.9997)), \ + slim.arg_scope([mobilenet.depth_multiplier], + min_depth=self._min_depth, divisible_by=8): + # Big model. + net, _ = self.extract_base_features_large(preprocessed_inputs) + nets.append(net) + large_base_feature_shape = net.shape + + # Small models + net, _ = self.extract_base_features_small(preprocessed_inputs) + nets.append(net) + small_base_feature_shape = net.shape + if not (large_base_feature_shape[1] == small_base_feature_shape[1] and + large_base_feature_shape[2] == small_base_feature_shape[2]): + raise ValueError('Large and Small base network feature map dimension ' + 'not equal!') + + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('LSTM', reuse=self._reuse_weights): + output_size = (large_base_feature_shape[1], large_base_feature_shape[2]) + lstm_cell, init_state, step = self.create_lstm_cell( + batch_size, output_size, state_saver, state_name, + dtype=preprocessed_inputs.dtype) + + nets_seq = [ + tf.split(net, unroll_length, axis=batch_axis) for net in nets + ] + + net_seq, states_out = rnn_decoder.multi_input_rnn_decoder( + nets_seq, + init_state, + lstm_cell, + step, + selection_strategy=self._interleave_method, + is_training=self._is_training, + is_quantized=self._is_quantized, + pre_bottleneck=self._pre_bottleneck, + flatten_state=self._flatten_state, + scope=None) + self._states_out = states_out + + image_features = {} + if state_saver is not None: + self._step = state_saver.state(state_name + '_step') + batcher_ops = [ + state_saver.save_state(state_name + '_c', states_out[-1][0]), + state_saver.save_state(state_name + '_h', states_out[-1][1]), + state_saver.save_state(state_name + '_step', self._step + 1)] + with tf_ops.control_dependencies(batcher_ops): + image_features['layer_19'] = tf.concat(net_seq, 0) + else: + image_features['layer_19'] = tf.concat(net_seq, 0) + + # SSD layers. + with tf.variable_scope('FeatureMap'): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=self._feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features, + pool_residual=True) + return list(feature_maps.values()) diff --git a/models/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py b/models/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b285f0e44417a309f54973327c16b55c1169260f --- /dev/null +++ b/models/research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py @@ -0,0 +1,352 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for lstm_ssd_interleaved_mobilenet_v2_feature_extractor.""" + +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim +from tensorflow.contrib import training as contrib_training + +from lstm_object_detection.models import lstm_ssd_interleaved_mobilenet_v2_feature_extractor +from object_detection.models import ssd_feature_extractor_test + + +class LSTMSSDInterleavedMobilenetV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + is_quantized=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_quantized: whether to quantize the graph. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + def conv_hyperparams_fn(): + with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm), \ + slim.arg_scope([slim.batch_norm], is_training=False) as sc: + return sc + feature_extractor = ( + lstm_ssd_interleaved_mobilenet_v2_feature_extractor + .LSTMSSDInterleavedMobilenetV2FeatureExtractor(False, depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn)) + feature_extractor.lstm_state_depth = int(320 * depth_multiplier) + feature_extractor.depth_multipliers = [ + depth_multiplier, depth_multiplier / 4.0 + ] + feature_extractor.is_quantized = is_quantized + return feature_extractor + + def test_feature_extractor_construct_with_expected_params(self): + def conv_hyperparams_fn(): + with (slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm) and + slim.arg_scope([slim.batch_norm], decay=0.97, epsilon=1e-3)) as sc: + return sc + + params = { + 'is_training': True, + 'depth_multiplier': .55, + 'min_depth': 9, + 'pad_to_multiple': 3, + 'conv_hyperparams_fn': conv_hyperparams_fn, + 'reuse_weights': False, + 'use_explicit_padding': True, + 'use_depthwise': False, + 'override_base_feature_extractor_hyperparams': True} + + feature_extractor = ( + lstm_ssd_interleaved_mobilenet_v2_feature_extractor + .LSTMSSDInterleavedMobilenetV2FeatureExtractor(**params)) + + self.assertEqual(params['is_training'], + feature_extractor._is_training) + self.assertEqual(params['depth_multiplier'], + feature_extractor._depth_multiplier) + self.assertEqual(params['min_depth'], + feature_extractor._min_depth) + self.assertEqual(params['pad_to_multiple'], + feature_extractor._pad_to_multiple) + self.assertEqual(params['conv_hyperparams_fn'], + feature_extractor._conv_hyperparams_fn) + self.assertEqual(params['reuse_weights'], + feature_extractor._reuse_weights) + self.assertEqual(params['use_explicit_padding'], + feature_extractor._use_explicit_padding) + self.assertEqual(params['use_depthwise'], + feature_extractor._use_depthwise) + self.assertEqual(params['override_base_feature_extractor_hyperparams'], + (feature_extractor. + _override_base_feature_extractor_hyperparams)) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 4, 4, 640), + (2, 2, 2, 256), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_unroll10(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(10, 4, 4, 640), + (10, 2, 2, 256), (10, 1, 1, 256), + (10, 1, 1, 256), (10, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 10, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, unroll_length=10) + + def test_extract_features_returns_correct_shapes_320(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 10, 10, 640), + (2, 5, 5, 256), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 320 + image_width = 320 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 10, 10, 64), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 10, 10, 640), + (2, 5, 5, 256), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_names = ['MobilenetV2', 'LSTM', 'FeatureMap'] + self.check_feature_extractor_variables_under_scopes( + depth_multiplier, pad_to_multiple, scope_names) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 32 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image, unroll_length=1) + self.assertTrue(any(op.type.startswith('FusedBatchNorm') + for op in tf.get_default_graph().get_operations())) + + def test_variables_for_tflite(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 32 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + tflite_unsupported = ['SquaredDifference'] + _ = feature_extractor.extract_features(preprocessed_image, unroll_length=1) + self.assertFalse(any(op.type in tflite_unsupported + for op in tf.get_default_graph().get_operations())) + + def test_output_nodes_for_tflite(self): + image_height = 64 + image_width = 64 + depth_multiplier = 1.0 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image, unroll_length=1) + + tflite_nodes = [ + 'raw_inputs/init_lstm_c', + 'raw_inputs/init_lstm_h', + 'raw_inputs/base_endpoint', + 'raw_outputs/lstm_c', + 'raw_outputs/lstm_h', + 'raw_outputs/base_endpoint_1', + 'raw_outputs/base_endpoint_2' + ] + ops_names = [op.name for op in tf.get_default_graph().get_operations()] + for node in tflite_nodes: + self.assertTrue(any(node in s for s in ops_names)) + + def test_fixed_concat_nodes(self): + image_height = 64 + image_width = 64 + depth_multiplier = 1.0 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, is_quantized=True) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image, unroll_length=1) + + concat_nodes = [ + 'MobilenetV2_1/expanded_conv_16/project/Relu6', + 'MobilenetV2_2/expanded_conv_16/project/Relu6' + ] + ops_names = [op.name for op in tf.get_default_graph().get_operations()] + for node in concat_nodes: + self.assertTrue(any(node in s for s in ops_names)) + + def test_lstm_states(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + state_channel = 320 + init_state1 = { + 'lstm_state_c': tf.zeros( + [image_height // 32, image_width // 32, state_channel]), + 'lstm_state_h': tf.zeros( + [image_height // 32, image_width // 32, state_channel]), + 'lstm_state_step': tf.zeros([1]) + } + init_state2 = { + 'lstm_state_c': tf.random_uniform( + [image_height // 32, image_width // 32, state_channel]), + 'lstm_state_h': tf.random_uniform( + [image_height // 32, image_width // 32, state_channel]), + 'lstm_state_step': tf.zeros([1]) + } + seq = {'dummy': tf.random_uniform([2, 1, 1, 1])} + stateful_reader1 = contrib_training.SequenceQueueingStateSaver( + batch_size=1, + num_unroll=1, + input_length=2, + input_key='', + input_sequences=seq, + input_context={}, + initial_states=init_state1, + capacity=1) + stateful_reader2 = contrib_training.SequenceQueueingStateSaver( + batch_size=1, + num_unroll=1, + input_length=2, + input_key='', + input_sequences=seq, + input_context={}, + initial_states=init_state2, + capacity=1) + image = tf.random_uniform([1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + with tf.variable_scope('zero_state'): + feature_maps1 = feature_extractor.extract_features( + image, stateful_reader1.next_batch, unroll_length=1) + with tf.variable_scope('random_state'): + feature_maps2 = feature_extractor.extract_features( + image, stateful_reader2.next_batch, unroll_length=1) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + sess.run(tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)) + sess.run([stateful_reader1.prefetch_op, stateful_reader2.prefetch_op]) + maps1, maps2 = sess.run([feature_maps1, feature_maps2]) + state = sess.run(stateful_reader1.next_batch.state('lstm_state_c')) + # feature maps should be different because states are different + self.assertFalse(np.all(np.equal(maps1[0], maps2[0]))) + # state should no longer be zero after update + self.assertTrue(state.any()) + + def check_extract_features_returns_correct_shape( + self, batch_size, image_height, image_width, depth_multiplier, + pad_to_multiple, expected_feature_map_shapes, unroll_length=1): + def graph_fn(image_tensor): + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + feature_maps = feature_extractor.extract_features( + image_tensor, unroll_length=unroll_length) + return feature_maps + + image_tensor = np.random.rand(batch_size, image_height, image_width, + 3).astype(np.float32) + feature_maps = self.execute(graph_fn, [image_tensor]) + for feature_map, expected_shape in zip( + feature_maps, expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def check_feature_extractor_variables_under_scopes( + self, depth_multiplier, pad_to_multiple, scope_names): + g = tf.Graph() + with g.as_default(): + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple) + preprocessed_inputs = tf.placeholder(tf.float32, (4, 320, 320, 3)) + feature_extractor.extract_features( + preprocessed_inputs, unroll_length=1) + variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + for variable in variables: + self.assertTrue( + any([ + variable.name.startswith(scope_name) + for scope_name in scope_names + ]), 'Variable name: ' + variable.name + + ' is not under any provided scopes: ' + ','.join(scope_names)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor.py b/models/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..cccf740aadd337d29bec56a7fed93fc6937fc123 --- /dev/null +++ b/models/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor.py @@ -0,0 +1,211 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""LSTMSSDFeatureExtractor for MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim +from tensorflow.python.framework import ops as tf_ops +from lstm_object_detection.lstm import lstm_cells +from lstm_object_detection.lstm import rnn_decoder +from lstm_object_detection.meta_architectures import lstm_ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +class LSTMSSDMobileNetV1FeatureExtractor( + lstm_ssd_meta_arch.LSTMSSDFeatureExtractor): + """LSTM Feature Extractor using MobilenetV1 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=True, + override_base_feature_extractor_hyperparams=False, + lstm_state_depth=256): + """Initializes instance of MobileNetV1 Feature Extractor for LSTMSSD Models. + + Args: + is_training: A boolean whether the network is in training mode. + depth_multiplier: A float depth multiplier for feature extractor. + min_depth: A number representing minimum feature extractor depth. + pad_to_multiple: The nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is True. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + lstm_state_depth: An integter of the depth of the lstm state. + """ + super(LSTMSSDMobileNetV1FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + self._feature_map_layout = { + 'from_layer': ['Conv2d_13_pointwise_lstm', '', '', '', ''], + 'layer_depth': [-1, 512, 256, 256, 128], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + self._base_network_scope = 'MobilenetV1' + self._lstm_state_depth = lstm_state_depth + + def create_lstm_cell(self, batch_size, output_size, state_saver, state_name, + dtype=tf.float32): + """Create the LSTM cell, and initialize state if necessary. + + Args: + batch_size: input batch size. + output_size: output size of the lstm cell, [width, height]. + state_saver: a state saver object with methods `state` and `save_state`. + state_name: string, the name to use with the state_saver. + dtype: dtype to initialize lstm state. + + Returns: + lstm_cell: the lstm cell unit. + init_state: initial state representations. + step: the step + """ + lstm_cell = lstm_cells.BottleneckConvLSTMCell( + filter_size=(3, 3), + output_size=output_size, + num_units=max(self._min_depth, self._lstm_state_depth), + activation=tf.nn.relu6, + visualize_gates=False) + + if state_saver is None: + init_state = lstm_cell.init_state(state_name, batch_size, dtype) + step = None + else: + step = state_saver.state(state_name + '_step') + c = state_saver.state(state_name + '_c') + h = state_saver.state(state_name + '_h') + init_state = (c, h) + return lstm_cell, init_state, step + + def extract_features(self, + preprocessed_inputs, + state_saver=None, + state_name='lstm_state', + unroll_length=5, + scope=None): + """Extracts features from preprocessed inputs. + + The features include the base network features, lstm features and SSD + features, organized in the following name scope: + + /MobilenetV1/... + /LSTM/... + /FeatureMaps/... + + Args: + preprocessed_inputs: A [batch, height, width, channels] float tensor + representing a batch of consecutive frames from video clips. + state_saver: A state saver object with methods `state` and `save_state`. + state_name: A python string for the name to use with the state_saver. + unroll_length: The number of steps to unroll the lstm. + scope: The scope for the base network of the feature extractor. + + Returns: + A list of tensors where the ith tensor has shape [batch, height_i, + width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope(is_training=self._is_training)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + with slim.arg_scope([slim.batch_norm], fused=False): + # Base network. + with tf.variable_scope( + scope, self._base_network_scope, + reuse=self._reuse_weights) as scope: + net, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + + with slim.arg_scope(self._conv_hyperparams_fn()): + with slim.arg_scope( + [slim.batch_norm], fused=False, is_training=self._is_training): + # ConvLSTM layers. + batch_size = net.shape[0].value // unroll_length + with tf.variable_scope('LSTM', reuse=self._reuse_weights) as lstm_scope: + lstm_cell, init_state, _ = self.create_lstm_cell( + batch_size, + (net.shape[1].value, net.shape[2].value), + state_saver, + state_name, + dtype=preprocessed_inputs.dtype) + net_seq = list(tf.split(net, unroll_length)) + + # Identities added for inputing state tensors externally. + c_ident = tf.identity(init_state[0], name='lstm_state_in_c') + h_ident = tf.identity(init_state[1], name='lstm_state_in_h') + init_state = (c_ident, h_ident) + + net_seq, states_out = rnn_decoder.rnn_decoder( + net_seq, init_state, lstm_cell, scope=lstm_scope) + batcher_ops = None + self._states_out = states_out + if state_saver is not None: + self._step = state_saver.state('%s_step' % state_name) + batcher_ops = [ + state_saver.save_state('%s_c' % state_name, states_out[-1][0]), + state_saver.save_state('%s_h' % state_name, states_out[-1][1]), + state_saver.save_state('%s_step' % state_name, self._step + 1) + ] + with tf_ops.control_dependencies(batcher_ops): + image_features['Conv2d_13_pointwise_lstm'] = tf.concat(net_seq, 0) + + # Identities added for reading output states, to be reused externally. + tf.identity(states_out[-1][0], name='lstm_state_out_c') + tf.identity(states_out[-1][1], name='lstm_state_out_h') + + # SSD layers. + with tf.variable_scope('FeatureMaps', reuse=self._reuse_weights): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=self._feature_map_layout, + depth_multiplier=(self._depth_multiplier), + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/models/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py b/models/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..56ad2745dae558acdb806c8f236d25754799cf49 --- /dev/null +++ b/models/research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py @@ -0,0 +1,179 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor.""" + +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim +from tensorflow.contrib import training as contrib_training + +from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extractor +from object_detection.models import ssd_feature_extractor_test + + +class LstmSsdMobilenetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier=1.0, + pad_to_multiple=1, + is_training=True, + use_explicit_padding=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: A float depth multiplier for feature extractor. + pad_to_multiple: The nearest multiple to zero pad the input height and + width dimensions to. + is_training: A boolean whether the network is in training mode. + use_explicit_padding: A boolean whether to use explicit padding. + + Returns: + An lstm_ssd_meta_arch.LSTMSSDMobileNetV1FeatureExtractor object. + """ + min_depth = 32 + extractor = ( + feature_extractor.LSTMSSDMobileNetV1FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + extractor.lstm_state_depth = int(256 * depth_multiplier) + return extractor + + def test_feature_extractor_construct_with_expected_params(self): + def conv_hyperparams_fn(): + with (slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm) and + slim.arg_scope([slim.batch_norm], decay=0.97, epsilon=1e-3)) as sc: + return sc + + params = { + 'is_training': True, + 'depth_multiplier': .55, + 'min_depth': 9, + 'pad_to_multiple': 3, + 'conv_hyperparams_fn': conv_hyperparams_fn, + 'reuse_weights': False, + 'use_explicit_padding': True, + 'use_depthwise': False, + 'override_base_feature_extractor_hyperparams': True} + + extractor = ( + feature_extractor.LSTMSSDMobileNetV1FeatureExtractor(**params)) + + self.assertEqual(params['is_training'], + extractor._is_training) + self.assertEqual(params['depth_multiplier'], + extractor._depth_multiplier) + self.assertEqual(params['min_depth'], + extractor._min_depth) + self.assertEqual(params['pad_to_multiple'], + extractor._pad_to_multiple) + self.assertEqual(params['conv_hyperparams_fn'], + extractor._conv_hyperparams_fn) + self.assertEqual(params['reuse_weights'], + extractor._reuse_weights) + self.assertEqual(params['use_explicit_padding'], + extractor._use_explicit_padding) + self.assertEqual(params['use_depthwise'], + extractor._use_depthwise) + self.assertEqual(params['override_base_feature_extractor_hyperparams'], + (extractor. + _override_base_feature_extractor_hyperparams)) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + batch_size = 5 + expected_feature_map_shape = [(batch_size, 8, 8, 256), (batch_size, 4, 4, + 512), + (batch_size, 2, 2, 256), (batch_size, 1, 1, + 256)] + self.check_extract_features_returns_correct_shape( + batch_size, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + batch_size, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True) + + def test_preprocess_returns_correct_value_range(self): + test_image = np.random.rand(5, 128, 128, 3) + extractor = self._create_feature_extractor() + preprocessed_image = extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + scope_name = 'MobilenetV1' + g = tf.Graph() + with g.as_default(): + preprocessed_inputs = tf.placeholder(tf.float32, (5, 256, 256, 3)) + extractor = self._create_feature_extractor() + extractor.extract_features(preprocessed_inputs) + variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + find_scope = False + for variable in variables: + if scope_name in variable.name: + find_scope = True + break + self.assertTrue(find_scope) + + def test_lstm_non_zero_state(self): + init_state = { + 'lstm_state_c': tf.zeros([8, 8, 256]), + 'lstm_state_h': tf.zeros([8, 8, 256]), + 'lstm_state_step': tf.zeros([1]) + } + seq = {'test': tf.random_uniform([3, 1, 1, 1])} + stateful_reader = contrib_training.SequenceQueueingStateSaver( + batch_size=1, + num_unroll=1, + input_length=2, + input_key='', + input_sequences=seq, + input_context={}, + initial_states=init_state, + capacity=1) + extractor = self._create_feature_extractor() + image = tf.random_uniform([5, 256, 256, 3]) + with tf.variable_scope('zero_state'): + feature_map = extractor.extract_features( + image, stateful_reader.next_batch) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + sess.run([stateful_reader.prefetch_op]) + _ = sess.run([feature_map]) + # Update states with the next batch. + state = sess.run(stateful_reader.next_batch.state('lstm_state_c')) + # State should no longer be zero after update. + self.assertTrue(state.any()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/models/mobilenet_defs.py b/models/research/lstm_object_detection/models/mobilenet_defs.py new file mode 100644 index 0000000000000000000000000000000000000000..4f984240215b818c3e8c9b5481db3319b54ef8fd --- /dev/null +++ b/models/research/lstm_object_detection/models/mobilenet_defs.py @@ -0,0 +1,142 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Definitions for modified MobileNet models used in LSTD.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim +from nets import mobilenet_v1 +from nets.mobilenet import conv_blocks as mobilenet_convs +from nets.mobilenet import mobilenet + + +def mobilenet_v1_lite_def(depth_multiplier, low_res=False): + """Conv definitions for a lite MobileNet v1 model. + + Args: + depth_multiplier: float depth multiplier for MobileNet. + low_res: An option of low-res conv input for interleave model. + + Returns: + Array of convolutions. + + Raises: + ValueError: On invalid channels with provided depth multiplier. + """ + conv = mobilenet_v1.Conv + sep_conv = mobilenet_v1.DepthSepConv + + def _find_target_depth(original, depth_multiplier): + # Find the target depth such that: + # int(target * depth_multiplier) == original + pseudo_target = int(original / depth_multiplier) + for target in range(pseudo_target - 1, pseudo_target + 2): + if int(target * depth_multiplier) == original: + return target + raise ValueError('Cannot have %d channels with depth multiplier %0.2f' % + (original, depth_multiplier)) + + return [ + conv(kernel=[3, 3], stride=2, depth=32), + sep_conv(kernel=[3, 3], stride=1, depth=64), + sep_conv(kernel=[3, 3], stride=2, depth=128), + sep_conv(kernel=[3, 3], stride=1, depth=128), + sep_conv(kernel=[3, 3], stride=2, depth=256), + sep_conv(kernel=[3, 3], stride=1, depth=256), + sep_conv(kernel=[3, 3], stride=2, depth=512), + sep_conv(kernel=[3, 3], stride=1, depth=512), + sep_conv(kernel=[3, 3], stride=1, depth=512), + sep_conv(kernel=[3, 3], stride=1, depth=512), + sep_conv(kernel=[3, 3], stride=1, depth=512), + sep_conv(kernel=[3, 3], stride=1, depth=512), + sep_conv(kernel=[3, 3], stride=1 if low_res else 2, depth=1024), + sep_conv( + kernel=[3, 3], + stride=1, + depth=int(_find_target_depth(1024, depth_multiplier))) + ] + + +def mobilenet_v2_lite_def(reduced=False, is_quantized=False, low_res=False): + """Conv definitions for a lite MobileNet v2 model. + + Args: + reduced: Determines the scaling factor for expanded conv. If True, a factor + of 6 is used. If False, a factor of 3 is used. + is_quantized: Whether the model is trained in quantized mode. + low_res: Whether the input to the model is of half resolution. + + Returns: + Array of convolutions. + """ + expanded_conv = mobilenet_convs.expanded_conv + expand_input = mobilenet_convs.expand_input_by_factor + op = mobilenet.op + return dict( + defaults={ + # Note: these parameters of batch norm affect the architecture + # that's why they are here and not in training_scope. + (slim.batch_norm,): { + 'center': True, + 'scale': True + }, + (slim.conv2d, slim.fully_connected, slim.separable_conv2d): { + 'normalizer_fn': slim.batch_norm, + 'activation_fn': tf.nn.relu6 + }, + (expanded_conv,): { + 'expansion_size': expand_input(6), + 'split_expansion': 1, + 'normalizer_fn': slim.batch_norm, + 'residual': True + }, + (slim.conv2d, slim.separable_conv2d): { + 'padding': 'SAME' + } + }, + spec=[ + op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]), + op(expanded_conv, + expansion_size=expand_input(1, divisible_by=1), + num_outputs=16), + op(expanded_conv, + expansion_size=(expand_input(3, divisible_by=1) + if reduced else expand_input(6)), + stride=2, + num_outputs=24), + op(expanded_conv, + expansion_size=(expand_input(3, divisible_by=1) + if reduced else expand_input(6)), + stride=1, + num_outputs=24), + op(expanded_conv, stride=2, num_outputs=32), + op(expanded_conv, stride=1, num_outputs=32), + op(expanded_conv, stride=1, num_outputs=32), + op(expanded_conv, stride=2, num_outputs=64), + op(expanded_conv, stride=1, num_outputs=64), + op(expanded_conv, stride=1, num_outputs=64), + op(expanded_conv, stride=1, num_outputs=64), + op(expanded_conv, stride=1, num_outputs=96), + op(expanded_conv, stride=1, num_outputs=96), + op(expanded_conv, stride=1, num_outputs=96), + op(expanded_conv, stride=1 if low_res else 2, num_outputs=160), + op(expanded_conv, stride=1, num_outputs=160), + op(expanded_conv, stride=1, num_outputs=160), + op(expanded_conv, + stride=1, + num_outputs=320, + project_activation_fn=(tf.nn.relu6 + if is_quantized else tf.identity)) + ], + ) diff --git a/models/research/lstm_object_detection/models/mobilenet_defs_test.py b/models/research/lstm_object_detection/models/mobilenet_defs_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b5bda504bb02ac89f55e3acd370862f513a3a3 --- /dev/null +++ b/models/research/lstm_object_detection/models/mobilenet_defs_test.py @@ -0,0 +1,136 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for lstm_object_detection.models.mobilenet_defs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +from lstm_object_detection.models import mobilenet_defs +from nets import mobilenet_v1 +from nets.mobilenet import mobilenet_v2 + + +class MobilenetV1DefsTest(tf.test.TestCase): + + def test_mobilenet_v1_lite_def(self): + net, _ = mobilenet_v1.mobilenet_v1_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + final_endpoint='Conv2d_13_pointwise', + min_depth=8, + depth_multiplier=1.0, + conv_defs=mobilenet_defs.mobilenet_v1_lite_def(1.0), + use_explicit_padding=True, + scope='MobilenetV1') + self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 1024]) + + def test_mobilenet_v1_lite_def_depthmultiplier_half(self): + net, _ = mobilenet_v1.mobilenet_v1_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + final_endpoint='Conv2d_13_pointwise', + min_depth=8, + depth_multiplier=0.5, + conv_defs=mobilenet_defs.mobilenet_v1_lite_def(0.5), + use_explicit_padding=True, + scope='MobilenetV1') + self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 1024]) + + def test_mobilenet_v1_lite_def_depthmultiplier_2x(self): + net, _ = mobilenet_v1.mobilenet_v1_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + final_endpoint='Conv2d_13_pointwise', + min_depth=8, + depth_multiplier=2.0, + conv_defs=mobilenet_defs.mobilenet_v1_lite_def(2.0), + use_explicit_padding=True, + scope='MobilenetV1') + self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 1024]) + + def test_mobilenet_v1_lite_def_low_res(self): + net, _ = mobilenet_v1.mobilenet_v1_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + final_endpoint='Conv2d_13_pointwise', + min_depth=8, + depth_multiplier=1.0, + conv_defs=mobilenet_defs.mobilenet_v1_lite_def(1.0, low_res=True), + use_explicit_padding=True, + scope='MobilenetV1') + self.assertEqual(net.get_shape().as_list(), [10, 20, 20, 1024]) + + +class MobilenetV2DefsTest(tf.test.TestCase): + + def test_mobilenet_v2_lite_def(self): + net, features = mobilenet_v2.mobilenet_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + min_depth=8, + depth_multiplier=1.0, + conv_defs=mobilenet_defs.mobilenet_v2_lite_def(), + use_explicit_padding=True, + scope='MobilenetV2') + self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 320]) + self._assert_contains_op('MobilenetV2/expanded_conv_16/project/Identity') + self.assertEqual( + features['layer_3/expansion_output'].get_shape().as_list(), + [10, 160, 160, 96]) + self.assertEqual( + features['layer_4/expansion_output'].get_shape().as_list(), + [10, 80, 80, 144]) + + def test_mobilenet_v2_lite_def_is_quantized(self): + net, _ = mobilenet_v2.mobilenet_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + min_depth=8, + depth_multiplier=1.0, + conv_defs=mobilenet_defs.mobilenet_v2_lite_def(is_quantized=True), + use_explicit_padding=True, + scope='MobilenetV2') + self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 320]) + self._assert_contains_op('MobilenetV2/expanded_conv_16/project/Relu6') + + def test_mobilenet_v2_lite_def_low_res(self): + net, _ = mobilenet_v2.mobilenet_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + min_depth=8, + depth_multiplier=1.0, + conv_defs=mobilenet_defs.mobilenet_v2_lite_def(low_res=True), + use_explicit_padding=True, + scope='MobilenetV2') + self.assertEqual(net.get_shape().as_list(), [10, 20, 20, 320]) + + def test_mobilenet_v2_lite_def_reduced(self): + net, features = mobilenet_v2.mobilenet_base( + tf.placeholder(tf.float32, (10, 320, 320, 3)), + min_depth=8, + depth_multiplier=1.0, + conv_defs=mobilenet_defs.mobilenet_v2_lite_def(reduced=True), + use_explicit_padding=True, + scope='MobilenetV2') + self.assertEqual(net.get_shape().as_list(), [10, 10, 10, 320]) + self.assertEqual( + features['layer_3/expansion_output'].get_shape().as_list(), + [10, 160, 160, 48]) + self.assertEqual( + features['layer_4/expansion_output'].get_shape().as_list(), + [10, 80, 80, 72]) + + def _assert_contains_op(self, op_name): + op_names = [op.name for op in tf.get_default_graph().get_operations()] + self.assertIn(op_name, op_names) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/lstm_object_detection/protos/__init__.py b/models/research/lstm_object_detection/protos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/protos/input_reader_google.proto b/models/research/lstm_object_detection/protos/input_reader_google.proto new file mode 100644 index 0000000000000000000000000000000000000000..2c494a62e97321ee9206cebe28cd6601049f3293 --- /dev/null +++ b/models/research/lstm_object_detection/protos/input_reader_google.proto @@ -0,0 +1,32 @@ +syntax = "proto2"; + +package lstm_object_detection.protos; + +import "object_detection/protos/input_reader.proto"; + +message GoogleInputReader { + extend object_detection.protos.ExternalInputReader { + optional GoogleInputReader google_input_reader = 444; + } + + oneof input_reader { + TFRecordVideoInputReader tf_record_video_input_reader = 1; + } +} + +message TFRecordVideoInputReader { + // Path(s) to tfrecords of input data. + repeated string input_path = 1; + + enum DataType { + UNSPECIFIED = 0; + TF_EXAMPLE = 1; + TF_SEQUENCE_EXAMPLE = 2; + } + optional DataType data_type = 2 [default=TF_SEQUENCE_EXAMPLE]; + + // Length of the video sequence. All the input video sequence should have the + // same length in frames, e.g. 5 frames. + optional int32 video_length = 3; +} + diff --git a/models/research/lstm_object_detection/protos/pipeline.proto b/models/research/lstm_object_detection/protos/pipeline.proto new file mode 100644 index 0000000000000000000000000000000000000000..10dd652554ad38e933acdedf8ce1479f15eed9d7 --- /dev/null +++ b/models/research/lstm_object_detection/protos/pipeline.proto @@ -0,0 +1,69 @@ +syntax = "proto2"; + +package lstm_object_detection.protos; + +import "object_detection/protos/pipeline.proto"; +import "lstm_object_detection/protos/quant_overrides.proto"; + +extend object_detection.protos.TrainEvalPipelineConfig { + optional LstmModel lstm_model = 205743444; + optional QuantOverrides quant_overrides = 246059837; +} + +// Message for extra fields needed for configuring LSTM model. +message LstmModel { + // Unroll length for training LSTMs. + optional int32 train_unroll_length = 1; + + // Unroll length for evaluating LSTMs. + optional int32 eval_unroll_length = 2; + + // Depth of the lstm feature map. + optional int32 lstm_state_depth = 3 [default = 256]; + + // Depth multipliers for multiple feature extractors. Used for interleaved + // or ensemble model. + repeated float depth_multipliers = 4; + + // Specifies how models are interleaved when multiple feature extractors are + // used during training. Must be in ['RANDOM', 'RANDOM_SKIP_SMALL']. + optional string train_interleave_method = 5 [default = 'RANDOM']; + + // Specifies how models are interleaved when multiple feature extractors are + // used during training. Must be in ['RANDOM', 'RANDOM_SKIP', 'SKIPK']. + optional string eval_interleave_method = 6 [default = 'SKIP9']; + + // The stride of the lstm state. + optional int32 lstm_state_stride = 7 [default = 32]; + + // Whether to flattern LSTM state and output. Note that this is typically + // intended only to be modified internally by export_tfmini_lstd_graph_lib + // to support flatten state for tfmini/tflite. Do not set this field in + // the pipeline config file unless necessary. + optional bool flatten_state = 8 [default = false]; + + // Whether to apply bottleneck layer before going into LSTM gates. This + // allows multiple feature extractors to use separate bottleneck layers + // instead of sharing the same one so that different base model output + // feature dimensions are not forced to be the same. + // For example: + // Model 1 outputs feature map f_1 of depth d_1. + // Model 2 outputs feature map f_2 of depth d_2. + // Pre-bottlenecking allows lstm input to be either: + // conv(concat([f_1, h])) or conv(concat([f_2, h])). + optional bool pre_bottleneck = 9 [default = false]; + + // Normalize LSTM state, default false. + optional bool scale_state = 10 [default = false]; + + // Clip LSTM state at [0, 6], default true. + optional bool clip_state = 11 [default = true]; + + // If the model is in quantized training. This field does NOT need to be set + // manually. Instead, it will be overridden by configs in graph_rewriter. + optional bool is_quantized = 12 [default = false]; + + // Downsample input image when using the smaller network in interleaved + // models, default false. + optional bool low_res = 13 [default = false]; +} diff --git a/models/research/lstm_object_detection/protos/quant_overrides.proto b/models/research/lstm_object_detection/protos/quant_overrides.proto new file mode 100644 index 0000000000000000000000000000000000000000..9dc0eaf86e5f507f87b87fe1571b4e3d82991df1 --- /dev/null +++ b/models/research/lstm_object_detection/protos/quant_overrides.proto @@ -0,0 +1,40 @@ +syntax = "proto2"; + +package lstm_object_detection.protos; + +// Message to override default quantization behavior. +message QuantOverrides { + repeated QuantConfig quant_configs = 1; +} + +// Parameters to manually create fake quant ops outside of the generic +// tensorflow/contrib/quantize/python/quantize.py script. This may be +// used to override default behaviour or quantize ops not already supported. +message QuantConfig { + // The name of the op to add a fake quant op to. + required string op_name = 1; + + // The name of the fake quant op. + required string quant_op_name = 2; + + // Whether the fake quant op uses fixed ranges. Otherwise, learned moving + // average ranges are used. + required bool fixed_range = 3 [default = false]; + + // The intitial minimum value of the range. + optional float min = 4 [default = -6]; + + // The initial maximum value of the range. + optional float max = 5 [default = 6]; + + // Number of steps to delay before quantization takes effect during training. + optional int32 delay = 6 [default = 500000]; + + // Number of bits to use for quantizing weights. + // Only 8 bit is supported for now. + optional int32 weight_bits = 7 [default = 8]; + + // Number of bits to use for quantizing activations. + // Only 8 bit is supported for now. + optional int32 activation_bits = 8 [default = 8]; +} diff --git a/models/research/lstm_object_detection/test_tflite_model.py b/models/research/lstm_object_detection/test_tflite_model.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b5e15e210ab6c191911d3c440cef33d936274c --- /dev/null +++ b/models/research/lstm_object_detection/test_tflite_model.py @@ -0,0 +1,53 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test a tflite model using random input data.""" + +from __future__ import print_function +from absl import flags +import numpy as np +import tensorflow.compat.v1 as tf + +flags.DEFINE_string('model_path', None, 'Path to model.') +FLAGS = flags.FLAGS + + +def main(_): + + flags.mark_flag_as_required('model_path') + + # Load TFLite model and allocate tensors. + interpreter = tf.lite.Interpreter(model_path=FLAGS.model_path) + interpreter.allocate_tensors() + + # Get input and output tensors. + input_details = interpreter.get_input_details() + print('input_details:', input_details) + output_details = interpreter.get_output_details() + print('output_details:', output_details) + + # Test model on random input data. + input_shape = input_details[0]['shape'] + # change the following line to feed into your own data. + input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) + interpreter.set_tensor(input_details[0]['index'], input_data) + + interpreter.invoke() + output_data = interpreter.get_tensor(output_details[0]['index']) + print(output_data) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lstm_object_detection/tflite/BUILD b/models/research/lstm_object_detection/tflite/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..66068925da4fde7eb99215d907d627e0ff1d3847 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/BUILD @@ -0,0 +1,81 @@ +package( + default_visibility = ["//visibility:public"], +) + +licenses(["notice"]) + +cc_library( + name = "mobile_ssd_client", + srcs = ["mobile_ssd_client.cc"], + hdrs = ["mobile_ssd_client.h"], + deps = [ + "//protos:box_encodings_cc_proto", + "//protos:detections_cc_proto", + "//protos:labelmap_cc_proto", + "//protos:mobile_ssd_client_options_cc_proto", + "//utils:conversion_utils", + "//utils:ssd_utils", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/memory", + "@com_google_absl//absl/types:span", + "@com_google_glog//:glog", + "@gemmlowp", + ], +) + +config_setting( + name = "enable_edgetpu", + define_values = {"enable_edgetpu": "true"}, + visibility = ["//visibility:public"], +) + +cc_library( + name = "mobile_ssd_tflite_client", + srcs = ["mobile_ssd_tflite_client.cc"], + hdrs = ["mobile_ssd_tflite_client.h"], + defines = select({ + "//conditions:default": [], + "enable_edgetpu": ["ENABLE_EDGETPU"], + }), + deps = [ + ":mobile_ssd_client", + "@com_google_glog//:glog", + "@com_google_absl//absl/memory", + "@org_tensorflow//tensorflow/lite:arena_planner", + "@org_tensorflow//tensorflow/lite:framework", + "@org_tensorflow//tensorflow/lite/delegates/nnapi:nnapi_delegate", + "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", + "//protos:anchor_generation_options_cc_proto", + "//utils:file_utils", + "//utils:ssd_utils", + ] + select({ + "//conditions:default": [], + "enable_edgetpu": [ + "@libedgetpu//libedgetpu:header", + ], + }), + alwayslink = 1, +) + +cc_library( + name = "mobile_lstd_tflite_client", + srcs = ["mobile_lstd_tflite_client.cc"], + hdrs = ["mobile_lstd_tflite_client.h"], + defines = select({ + "//conditions:default": [], + "enable_edgetpu": ["ENABLE_EDGETPU"], + }), + deps = [ + ":mobile_ssd_client", + ":mobile_ssd_tflite_client", + "@com_google_glog//:glog", + "@com_google_absl//absl/base:core_headers", + "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", + ] + select({ + "//conditions:default": [], + "enable_edgetpu": [ + "@libedgetpu//libedgetpu:header", + ], + }), + alwayslink = 1, +) diff --git a/models/research/lstm_object_detection/tflite/WORKSPACE b/models/research/lstm_object_detection/tflite/WORKSPACE new file mode 100644 index 0000000000000000000000000000000000000000..3bce3814f365ec2bcc1122d7dfc8a5ba5f7d3dcb --- /dev/null +++ b/models/research/lstm_object_detection/tflite/WORKSPACE @@ -0,0 +1,133 @@ +workspace(name = "lstm_object_detection") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") + +http_archive( + name = "bazel_skylib", + sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d", + strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b", + urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"], +) +load("@bazel_skylib//lib:versions.bzl", "versions") +versions.check(minimum_bazel_version = "0.23.0") + +# ABSL cpp library. +http_archive( + name = "com_google_absl", + urls = [ + "https://github.com/abseil/abseil-cpp/archive/a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a.tar.gz", + ], + sha256 = "d437920d1434c766d22e85773b899c77c672b8b4865d5dc2cd61a29fdff3cf03", + strip_prefix = "abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a", +) + +http_archive( + name = "rules_cc", + strip_prefix = "rules_cc-master", + urls = ["https://github.com/bazelbuild/rules_cc/archive/master.zip"], +) + +# GoogleTest/GoogleMock framework. Used by most unit-tests. +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags needed by glog +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "f28359aeba12f30d73d9e4711ef356dc842886968112162bc73002645139c39c", + strip_prefix = "glog-0.4.0", + urls = ["https://github.com/google/glog/archive/v0.4.0.tar.gz"], +) + +http_archive( + name = "zlib", + build_file = "@com_google_protobuf//:third_party/zlib.BUILD", + sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1", + strip_prefix = "zlib-1.2.11", + urls = ["https://zlib.net/zlib-1.2.11.tar.gz"], +) + +http_archive( + name = "gemmlowp", + sha256 = "6678b484d929f2d0d3229d8ac4e3b815a950c86bb9f17851471d143f6d4f7834", + strip_prefix = "gemmlowp-12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3", + urls = [ + "http://mirror.tensorflow.org/github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip", + "https://github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip", + ], +) + +#----------------------------------------------------------------------------- +# proto +#----------------------------------------------------------------------------- +# proto_library, cc_proto_library and java_proto_library rules implicitly depend +# on @com_google_protobuf//:proto, @com_google_protobuf//:cc_toolchain and +# @com_google_protobuf//:java_toolchain, respectively. +# This statement defines the @com_google_protobuf repo. +http_archive( + name = "com_google_protobuf", + strip_prefix = "protobuf-3.8.0", + urls = ["https://github.com/google/protobuf/archive/v3.8.0.zip"], + sha256 = "1e622ce4b84b88b6d2cdf1db38d1a634fe2392d74f0b7b74ff98f3a51838ee53", +) + +# java_lite_proto_library rules implicitly depend on +# @com_google_protobuf_javalite//:javalite_toolchain, which is the JavaLite proto +# runtime (base classes and common utilities). +http_archive( + name = "com_google_protobuf_javalite", + strip_prefix = "protobuf-384989534b2246d413dbcd750744faab2607b516", + urls = ["https://github.com/google/protobuf/archive/384989534b2246d413dbcd750744faab2607b516.zip"], + sha256 = "79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc", +) + +# +# http_archive( +# name = "com_google_protobuf", +# strip_prefix = "protobuf-master", +# urls = ["https://github.com/protocolbuffers/protobuf/archive/master.zip"], +# ) + +# Needed by TensorFlow +http_archive( + name = "io_bazel_rules_closure", + sha256 = "e0a111000aeed2051f29fcc7a3f83be3ad8c6c93c186e64beb1ad313f0c7f9f9", + strip_prefix = "rules_closure-cf1e44edb908e9616030cc83d085989b8e6cd6df", + urls = [ + "http://mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", + "https://github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", # 2019-04-04 + ], +) + + +# TensorFlow r1.14-rc0 +http_archive( + name = "org_tensorflow", + strip_prefix = "tensorflow-1.14.0-rc0", + sha256 = "76404a6157a45e8d7a07e4f5690275256260130145924c2a7c73f6eda2a3de10", + urls = ["https://github.com/tensorflow/tensorflow/archive/v1.14.0-rc0.zip"], +) + +load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") +tf_workspace(tf_repo_name = "org_tensorflow") + +git_repository( + name = "libedgetpu", + remote = "sso://coral.googlesource.com/edgetpu-native", + commit = "83e47d1bcf22686fae5150ebb99281f6134ef062", +) diff --git a/models/research/lstm_object_detection/tflite/mobile_lstd_tflite_client.cc b/models/research/lstm_object_detection/tflite/mobile_lstd_tflite_client.cc new file mode 100644 index 0000000000000000000000000000000000000000..05a7bbac1b5c8a58c4f10476a2be4fb3a097a463 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/mobile_lstd_tflite_client.cc @@ -0,0 +1,261 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "mobile_lstd_tflite_client.h" + +#include + +namespace lstm_object_detection { +namespace tflite { + +std::unique_ptr MobileLSTDTfLiteClient::Create() { + auto client = absl::make_unique(); + if (!client->InitializeClient(CreateDefaultOptions())) { + LOG(ERROR) << "Failed to initialize client"; + return nullptr; + } + return client; +} + +protos::ClientOptions MobileLSTDTfLiteClient::CreateDefaultOptions() { + const int kMaxDetections = 100; + const int kClassesPerDetection = 1; + const double kScoreThreshold = -2.0; + const double kIouThreshold = 0.5; + + protos::ClientOptions options; + options.set_max_detections(kMaxDetections); + options.set_max_categories(kClassesPerDetection); + options.set_score_threshold(kScoreThreshold); + options.set_iou_threshold(kIouThreshold); + options.set_agnostic_mode(false); + options.set_quantize(false); + options.set_num_keypoints(0); + + return options; +} + +std::unique_ptr MobileLSTDTfLiteClient::Create( + const protos::ClientOptions& options) { + auto client = absl::make_unique(); + if (!client->InitializeClient(options)) { + LOG(ERROR) << "Failed to initialize client"; + return nullptr; + } + return client; +} + +bool MobileLSTDTfLiteClient::InitializeInterpreter( + const protos::ClientOptions& options) { + if (options.prefer_nnapi_delegate()) { + LOG(ERROR) << "NNAPI not supported."; + return false; + } else { + interpreter_->UseNNAPI(false); + } + +#ifdef ENABLE_EDGETPU + interpreter_->SetExternalContext(kTfLiteEdgeTpuContext, + edge_tpu_context_.get()); +#endif + + // Inputs are: normalized_input_image_tensor, raw_inputs/init_lstm_c, + // raw_inputs/init_lstm_h + if (interpreter_->inputs().size() != 3) { + LOG(ERROR) << "Invalid number of interpreter inputs: " << + interpreter_->inputs().size(); + return false; + } + + const std::vector input_tensor_indices = interpreter_->inputs(); + const TfLiteTensor& input_lstm_c = + *interpreter_->tensor(input_tensor_indices[1]); + if (input_lstm_c.dims->size != 4) { + LOG(ERROR) << "Invalid input lstm_c dimensions: " << + input_lstm_c.dims->size; + return false; + } + if (input_lstm_c.dims->data[0] != 1) { + LOG(ERROR) << "Invalid input lstm_c batch size: " << + input_lstm_c.dims->data[0]; + return false; + } + lstm_state_width_ = input_lstm_c.dims->data[1]; + lstm_state_height_ = input_lstm_c.dims->data[2]; + lstm_state_depth_ = input_lstm_c.dims->data[3]; + lstm_state_size_ = lstm_state_width_ * lstm_state_height_ * lstm_state_depth_; + + const TfLiteTensor& input_lstm_h = + *interpreter_->tensor(input_tensor_indices[2]); + if (!ValidateStateTensor(input_lstm_h, "input lstm_h")) { + return false; + } + + // Outputs are: + // TFLite_Detection_PostProcess, + // TFLite_Detection_PostProcess:1, + // TFLite_Detection_PostProcess:2, + // TFLite_Detection_PostProcess:3, + // raw_outputs/lstm_c, raw_outputs/lstm_h + if (interpreter_->outputs().size() != 6) { + LOG(ERROR) << "Invalid number of interpreter outputs: " << + interpreter_->outputs().size(); + return false; + } + + const std::vector output_tensor_indices = interpreter_->outputs(); + const TfLiteTensor& output_lstm_c = + *interpreter_->tensor(output_tensor_indices[4]); + if (!ValidateStateTensor(output_lstm_c, "output lstm_c")) { + return false; + } + const TfLiteTensor& output_lstm_h = + *interpreter_->tensor(output_tensor_indices[5]); + if (!ValidateStateTensor(output_lstm_h, "output lstm_h")) { + return false; + } + + // Initialize state with all zeroes. + lstm_c_data_.resize(lstm_state_size_); + lstm_h_data_.resize(lstm_state_size_); + lstm_c_data_uint8_.resize(lstm_state_size_); + lstm_h_data_uint8_.resize(lstm_state_size_); + + if (interpreter_->AllocateTensors() != kTfLiteOk) { + LOG(ERROR) << "Failed to allocate tensors"; + return false; + } + + return true; +} + +bool MobileLSTDTfLiteClient::ValidateStateTensor(const TfLiteTensor& tensor, + const std::string& name) { + if (tensor.dims->size != 4) { + LOG(ERROR) << "Invalid " << name << " dimensions: " << tensor.dims->size; + return false; + } + if (tensor.dims->data[0] != 1) { + LOG(ERROR) << "Invalid " << name << " batch size: " << tensor.dims->data[0]; + return false; + } + if (tensor.dims->data[1] != lstm_state_width_ || + tensor.dims->data[2] != lstm_state_height_ || + tensor.dims->data[3] != lstm_state_depth_) { + LOG(ERROR) << "Invalid " << name << " dimensions: [" << + tensor.dims->data[0] << ", " << tensor.dims->data[1] << ", " << + tensor.dims->data[2] << ", " << tensor.dims->data[3] << "]"; + return false; + } + return true; +} + +bool MobileLSTDTfLiteClient::ComputeOutputLayerCount() { + // Outputs are: raw_outputs/box_encodings, raw_outputs/class_predictions, + // raw_outputs/lstm_c, raw_outputs/lstm_h + CHECK_EQ(interpreter_->outputs().size(), 4); + num_output_layers_ = 1; + return true; +} + +bool MobileLSTDTfLiteClient::FloatInference(const uint8_t* input_data) { + // Inputs are: normalized_input_image_tensor, raw_inputs/init_lstm_c, + // raw_inputs/init_lstm_h + CHECK(input_data) << "Input data cannot be null."; + float* input = interpreter_->typed_input_tensor(0); + CHECK(input) << "Input tensor cannot be null."; + // Normalize the uint8 input image with mean_value_, std_value_. + NormalizeInputImage(input_data, input); + + // Copy input LSTM state into TFLite's input tensors. + float* lstm_c_input = interpreter_->typed_input_tensor(1); + CHECK(lstm_c_input) << "Input lstm_c tensor cannot be null."; + std::copy(lstm_c_data_.begin(), lstm_c_data_.end(), lstm_c_input); + + float* lstm_h_input = interpreter_->typed_input_tensor(2); + CHECK(lstm_h_input) << "Input lstm_h tensor cannot be null."; + std::copy(lstm_h_data_.begin(), lstm_h_data_.end(), lstm_h_input); + + // Run inference on inputs. + CHECK_EQ(interpreter_->Invoke(), kTfLiteOk) << "Invoking interpreter failed."; + + // Copy LSTM state out of TFLite's output tensors. + // Outputs are: raw_outputs/box_encodings, raw_outputs/class_predictions, + // raw_outputs/lstm_c, raw_outputs/lstm_h + float* lstm_c_output = interpreter_->typed_output_tensor(2); + CHECK(lstm_c_output) << "Output lstm_c tensor cannot be null."; + std::copy(lstm_c_output, lstm_c_output + lstm_state_size_, + lstm_c_data_.begin()); + + float* lstm_h_output = interpreter_->typed_output_tensor(3); + CHECK(lstm_h_output) << "Output lstm_h tensor cannot be null."; + std::copy(lstm_h_output, lstm_h_output + lstm_state_size_, + lstm_h_data_.begin()); + return true; +} + +bool MobileLSTDTfLiteClient::QuantizedInference(const uint8_t* input_data) { + // Inputs are: normalized_input_image_tensor, raw_inputs/init_lstm_c, + // raw_inputs/init_lstm_h + CHECK(input_data) << "Input data cannot be null."; + uint8_t* input = interpreter_->typed_input_tensor(0); + CHECK(input) << "Input tensor cannot be null."; + memcpy(input, input_data, input_size_); + + // Copy input LSTM state into TFLite's input tensors. + uint8_t* lstm_c_input = interpreter_->typed_input_tensor(1); + CHECK(lstm_c_input) << "Input lstm_c tensor cannot be null."; + std::copy(lstm_c_data_uint8_.begin(), lstm_c_data_uint8_.end(), lstm_c_input); + + uint8_t* lstm_h_input = interpreter_->typed_input_tensor(2); + CHECK(lstm_h_input) << "Input lstm_h tensor cannot be null."; + std::copy(lstm_h_data_uint8_.begin(), lstm_h_data_uint8_.end(), lstm_h_input); + + // Run inference on inputs. + CHECK_EQ(interpreter_->Invoke(), kTfLiteOk) << "Invoking interpreter failed."; + + // Copy LSTM state out of TFLite's output tensors. + // Outputs are: + // TFLite_Detection_PostProcess, + // TFLite_Detection_PostProcess:1, + // TFLite_Detection_PostProcess:2, + // TFLite_Detection_PostProcess:3, + // raw_outputs/lstm_c, raw_outputs/lstm_h + uint8_t* lstm_c_output = interpreter_->typed_output_tensor(4); + CHECK(lstm_c_output) << "Output lstm_c tensor cannot be null."; + std::copy(lstm_c_output, lstm_c_output + lstm_state_size_, + lstm_c_data_uint8_.begin()); + + uint8_t* lstm_h_output = interpreter_->typed_output_tensor(5); + CHECK(lstm_h_output) << "Output lstm_h tensor cannot be null."; + std::copy(lstm_h_output, lstm_h_output + lstm_state_size_, + lstm_h_data_uint8_.begin()); + return true; +} + +bool MobileLSTDTfLiteClient::Inference(const uint8_t* input_data) { + if (input_data == nullptr) { + LOG(ERROR) << "input_data cannot be null for inference."; + return false; + } + if (IsQuantizedModel()) + return QuantizedInference(input_data); + else + return FloatInference(input_data); + return true; +} + +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/mobile_lstd_tflite_client.h b/models/research/lstm_object_detection/tflite/mobile_lstd_tflite_client.h new file mode 100644 index 0000000000000000000000000000000000000000..e4f16bc945a6725025e285885967637629d0a5fc --- /dev/null +++ b/models/research/lstm_object_detection/tflite/mobile_lstd_tflite_client.h @@ -0,0 +1,74 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_LSTD_TFLITE_CLIENT_H_ +#define TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_LSTD_TFLITE_CLIENT_H_ + +#include +#include + +#include +#include "mobile_ssd_client.h" +#include "mobile_ssd_tflite_client.h" + +namespace lstm_object_detection { +namespace tflite { + +// Client for LSTD MobileNet TfLite model. +class MobileLSTDTfLiteClient : public MobileSSDTfLiteClient { + public: + MobileLSTDTfLiteClient() = default; + // Create with default options. + static std::unique_ptr Create(); + static std::unique_ptr Create( + const protos::ClientOptions& options); + ~MobileLSTDTfLiteClient() override = default; + static protos::ClientOptions CreateDefaultOptions(); + + protected: + bool InitializeInterpreter(const protos::ClientOptions& options) override; + bool ComputeOutputLayerCount() override; + bool Inference(const uint8_t* input_data) override; + + private: + // MobileLSTDTfLiteClient is neither copyable nor movable. + MobileLSTDTfLiteClient(const MobileLSTDTfLiteClient&) = delete; + MobileLSTDTfLiteClient& operator=(const MobileLSTDTfLiteClient&) = delete; + + bool ValidateStateTensor(const TfLiteTensor& tensor, const std::string& name); + + // Helper functions used by Inference functions. + bool FloatInference(const uint8_t* input_data); + bool QuantizedInference(const uint8_t* input_data); + + // LSTM model parameters. + int lstm_state_width_ = 0; + int lstm_state_height_ = 0; + int lstm_state_depth_ = 0; + int lstm_state_size_ = 0; + + // LSTM state stored between float inference runs. + std::vector lstm_c_data_; + std::vector lstm_h_data_; + + // LSTM state stored between uint8 inference runs. + std::vector lstm_c_data_uint8_; + std::vector lstm_h_data_uint8_; +}; + +} // namespace tflite +} // namespace lstm_object_detection + +#endif // TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_LSTD_TFLITE_CLIENT_H_ diff --git a/models/research/lstm_object_detection/tflite/mobile_ssd_client.cc b/models/research/lstm_object_detection/tflite/mobile_ssd_client.cc new file mode 100644 index 0000000000000000000000000000000000000000..27bf70109e46d2b9612480bb192f01aa3c9bfde1 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/mobile_ssd_client.cc @@ -0,0 +1,209 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "mobile_ssd_client.h" + +#include + +#include + +#include +#include "absl/memory/memory.h" +#include "utils/conversion_utils.h" +#include "utils/ssd_utils.h" + +namespace lstm_object_detection { +namespace tflite { + +bool MobileSSDClient::InitializeClient(const protos::ClientOptions& options) { + options_ = options; + return true; +} + +bool MobileSSDClient::Detect(const uint8_t* pixels, int width, int height, + int bytes_per_pixel, int bytes_per_row, + protos::DetectionResults* detections) { + SetInputDims(width, height); + // Grayscale input images are only compatible with grayscale models, and + // color input images are only compatible with color models. + CHECK((bytes_per_pixel == 1 && input_depth_ == 1) || + (bytes_per_pixel >= 3 && input_depth_ >= 3)); + if (HasPadding(width, height, bytes_per_pixel, bytes_per_row)) { + std::vector unpadded_pixels = + RemovePadding(pixels, width, height, bytes_per_pixel, bytes_per_row); + return Detect(&unpadded_pixels[0], detections); + } else { + return Detect(pixels, detections); + } +} + +bool MobileSSDClient::Detect(const uint8_t* pixels, + protos::DetectionResults* detections) { + return BatchDetect(pixels, 1, absl::MakeSpan(&detections, 1)); +} + +bool MobileSSDClient::BatchDetect( + const uint8_t* pixels, int batch_size, + absl::Span detections) { + if (detections.size() != batch_size) { + LOG(ERROR) << "Batch size does not match output cardinality."; + return false; + } + if (batch_size != batch_size_) { + if (!SetBatchSize(batch_size)) { + LOG(ERROR) << "Couldn't set batch size."; + return false; + } + } + if (!Inference(pixels)) { + LOG(ERROR) << "Couldn't inference."; + return false; + } + for (int batch = 0; batch < batch_size; ++batch) { + if (RequiresPostProcessing()) { + LOG(ERROR) << "Post Processing not supported."; + return false; + } else { + if (!NoPostProcessNoAnchors(detections[batch])) { + LOG(ERROR) << "NoPostProcessNoAnchors failed."; + return false; + } + } + } + + return true; +} + +bool MobileSSDClient::SetBatchSize(int batch_size) { + batch_size_ = batch_size; + AllocateBuffers(); + if (batch_size != 1) { + LOG(ERROR) + << "Only single batch inference supported by default. All child " + "classes that support batched inference should override this method " + "and not return an error if the batch size is supported. (E.g. " + "MobileSSDTfLiteClient)."; + return false; + } + return true; +} + +bool MobileSSDClient::NoPostProcessNoAnchors( + protos::DetectionResults* detections) { + LOG(ERROR) << "not yet implemented"; + return false; +} + +bool MobileSSDClient::RequiresPostProcessing() const { + return anchors_.y_size() > 0; +} + +void MobileSSDClient::SetInputDims(int width, int height) { + CHECK_EQ(width, input_width_); + CHECK_EQ(height, input_height_); +} + +int MobileSSDClient::GetNumberOfLabels() const { return labelmap_.item_size(); } + +std::string MobileSSDClient::GetLabelDisplayName(const int class_index) const { + if (class_index < 0 || class_index >= GetNumberOfLabels()) { + return ""; + } + return labelmap_.item(class_index).display_name(); +} + +std::string MobileSSDClient::GetLabelName(const int class_index) const { + if (class_index < 0 || class_index >= GetNumberOfLabels()) { + return ""; + } + return labelmap_.item(class_index).name(); +} + +int MobileSSDClient::GetLabelId(const int class_index) const { + if (class_index < 0 || class_index >= GetNumberOfLabels() || + !labelmap_.item(class_index).has_id()) { + return -1; + } + return labelmap_.item(class_index).id(); +} + +void MobileSSDClient::SetLabelDisplayNameInResults( + protos::DetectionResults* detections) { + for (auto& det : *detections->mutable_detection()) { + for (const auto& class_index : det.class_index()) { + det.add_display_name(GetLabelDisplayName(class_index)); + } + } +} + +void MobileSSDClient::SetLabelNameInResults( + protos::DetectionResults* detections) { + for (auto& det : *detections->mutable_detection()) { + for (const auto& class_index : det.class_index()) { + det.add_class_name(GetLabelName(class_index)); + } + } +} + +void MobileSSDClient::InitParams(const bool agnostic_mode, + const bool quantize, + const int num_keypoints) { + num_keypoints_ = num_keypoints; + code_size_ = 4 + 2 * num_keypoints; + num_boxes_ = output_locations_size_ / code_size_; + if (agnostic_mode) { + num_classes_ = output_scores_size_ / num_boxes_; + } else { + num_classes_ = (output_scores_size_ / num_boxes_) - 1; + } + quantize_ = quantize; + AllocateBuffers(); +} + +void MobileSSDClient::AllocateBuffers() { + // Allocate the output vectors + output_locations_.resize(output_locations_size_ * batch_size_); + output_scores_.resize(output_scores_size_ * batch_size_); + + if (quantize_) { + quantized_output_pointers_ = + absl::make_unique>>>( + batch_size_ * num_output_layers_ * 2); + for (int batch = 0; batch < batch_size_; ++batch) { + for (int i = 0; i < num_output_layers_; ++i) { + quantized_output_pointers_->at(2 * (i + batch * num_output_layers_)) = + absl::make_unique>(output_locations_sizes_[i]); + quantized_output_pointers_->at(2 * (i + batch * num_output_layers_) + + 1) = + absl::make_unique>(output_scores_sizes_[i]); + } + } + + quantized_output_pointers_array_.reset( + new uint8_t*[batch_size_ * num_output_layers_ * 2]); + for (int i = 0; i < batch_size_ * num_output_layers_ * 2; ++i) { + quantized_output_pointers_array_[i] = + quantized_output_pointers_->at(i)->data(); + } + + gemm_context_.set_max_num_threads(1); + } else { + output_pointers_[0] = output_locations_.data(); + output_pointers_[1] = output_scores_.data(); + } +} + +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/mobile_ssd_client.h b/models/research/lstm_object_detection/tflite/mobile_ssd_client.h new file mode 100644 index 0000000000000000000000000000000000000000..609bf5c9820b091c39e468cb6c6d3126ebe843c3 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/mobile_ssd_client.h @@ -0,0 +1,216 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_SSD_CLIENT_H_ +#define TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_SSD_CLIENT_H_ + +#include +#include + +#include +#include "absl/types/span.h" +#include "public/gemmlowp.h" +#include "protos/box_encodings.pb.h" +#include "protos/detections.pb.h" +#include "protos/labelmap.pb.h" +#include "protos/mobile_ssd_client_options.pb.h" + +namespace lstm_object_detection { +namespace tflite { + +// MobileSSDClient base class. Not thread-safe. +class MobileSSDClient { + public: + MobileSSDClient() = default; + virtual ~MobileSSDClient() = default; + + // Runs detection on the image represented by 'pixels', described by the + // associated 'width', 'height', 'bytes_per_pixel' and 'bytes_per_row'. All + // these integers must be positive, 'bytes_per_row' must be sufficiently + // large, and for 'bytes_per_pixel' only values 1, 3, 4 may be passed. + // Depending on the implementation most combinations may not be allowed. + bool Detect(const uint8_t* pixels, int width, int height, int bytes_per_pixel, + int bytes_per_row, protos::DetectionResults* detections); + + // Same as before, but a contiguous bytewise encoding of 'pixels' is assumed. + // That encoding can be assigned directly to the input layer of the neural + // network. + bool Detect(const uint8_t* pixels, protos::DetectionResults* detections); + + // Runs batched inference on the provided buffer. "pixels" is assumed to be a + // continuous buffer of width * height * depth * batch_size pixels. It will + // populate the detections result with batch_size DetectionResults where the + // first result corresponds to the first image contained within the pixels + // block. Note that not all models generalize correctly to multi-batch + // inference and in some cases the addition of extra batches may corrupt the + // output on the model. For example, if a network performs operations across + // batches, BatchDetect([A, B]) may not equal [Detect(A), Detect(B)]. + bool BatchDetect(const uint8_t* pixels, int batch_size, + absl::Span detections); + + // Sets the dimensions of the input image on the fly, to be effective for the + // next Detect() call. + void SetInputDims(int width, int height); + + // Returns the width of the input image which is always positive. Usually a + // constant or the width last set via 'SetInputDims()'. + int GetInputWidth() const { return input_width_; } + + // Returns the height of the input image which is always positive. Usually a + // constant or the width last set via 'SetInputDims()'. + int GetInputHeight() const { return input_height_; } + + // Returns the depth of the input image, which is the same as bytes per pixel. + // This will be 3 (for RGB images), 4 (for RGBA images), or 1 (for grayscale + // images). + int GetInputDepth() const { return input_depth_; } + + // Returns the number of possible detection labels or classes. If + // agnostic_mode is on, then this method must return 1. + int GetNumberOfLabels() const; + + // Returns human readable class labels given predicted class index. The range + // of 'label_index' is determined by 'GetNumberOfLabels()'. Returns an empty + // string if the label display name is undefined or 'label_index' is out of + // range. + std::string GetLabelDisplayName(const int class_index) const; + + // Returns Knowledge Graph MID class labels given predicted class index. The + // range of 'label_index' is determined by 'GetNumberOfLabels()'. Returns an + // empty string if the label name is undefined or 'label_index' is out of + // range. + std::string GetLabelName(const int class_index) const; + + // Returns the class/label ID for a given predicted class index. The range of + // 'label_index' is determined by 'GetNumberOfLabels()'. Returns -1 in case + // 'label_index' is out of range. + int GetLabelId(const int class_index) const; + + // Explicitly sets human readable string class name to each detection using + // the `display_name` field. + void SetLabelDisplayNameInResults(protos::DetectionResults* detections); + + // Explicitly sets string class name to each detection using the `class_name` + // fields. + void SetLabelNameInResults(protos::DetectionResults* detections); + + protected: + // Initializes the client from options. + virtual bool InitializeClient(const protos::ClientOptions& options); + + // Initializes various model specific parameters. + virtual void InitParams() { + InitParams(false, false, 0); + } + + virtual void InitParams(const bool agnostic_mode, + const bool quantize, + const int num_keypoints); + + virtual void InitParams(const bool agnostic_mode, const bool quantize, + const int num_keypoints, + const protos::BoxCoder& coder) { + InitParams(agnostic_mode, quantize, num_keypoints); + *options_.mutable_box_coder() = coder; + } + + virtual void AllocateBuffers(); + + // Sets the batch size of inference. If reimplmented, overrider is responsible + // for calling parent (the returned status code may be ignored). + virtual bool SetBatchSize(int batch_size); + + // Perform client specific inference on input_data. + virtual bool Inference(const uint8_t* input_data) = 0; + + // Directly populates the results when no post-processing should take place + // and no anchors are present. This is only possible when the TensorFlow + // graph contains the customized post-processing ops. + virtual bool NoPostProcessNoAnchors(protos::DetectionResults* detections); + + // Returns true iff the model returns raw output and needs its results + // post-processed (including non-maximum suppression). If false then anchors + // do not need to be present, LoadAnchors() can be implemented empty. Note + // that almost all models around require post-processing. + bool RequiresPostProcessing() const; + + // Load client specific labelmap proto file. + virtual void LoadLabelMap() = 0; + + // Anchors for the model. + protos::CenterSizeEncoding anchors_; + // Labelmap for the model. + protos::StringIntLabelMapProto labelmap_; + // Options for the model. + protos::ClientOptions options_; + + // Buffers for storing the model predictions + float* output_pointers_[2]; + // The dimension of output_locations is [batch_size x num_anchors x 4] + std::vector output_locations_; + // The dimension of output_scores is: + // If background class is included: + // [batch_size x num_anchors x (num_classes + 1)] + // If background class is NOT included: + // [batch_size x num_anchors x num_classes] + std::vector output_scores_; + void* transient_data_; + + // Total location and score sizes. + int output_locations_size_; + int output_scores_size_; + // Output location and score sizes for each output layer. + std::vector output_locations_sizes_; + std::vector output_scores_sizes_; + + // Preproccessing related parameters + float mean_value_; + float std_value_; + std::vector location_zero_points_; + std::vector location_scales_; + std::vector score_zero_points_; + std::vector score_scales_; + + int num_output_layers_ = 1; + + // Model related parameters + int input_size_; + int num_classes_; + int num_boxes_; + int input_width_; + int input_height_; + int input_depth_ = 3; // Default value is set for backward compatibility. + int code_size_; + + int batch_size_ = 1; // Default value is set for backwards compatibility. + + // The number of keypoints by detection. Specific to faces for now. + int num_keypoints_; + // Whether to use the quantized model. + bool quantize_; + // The indices of restricted classes (empty if none was passed in the config). + std::vector restricted_class_indices_; + + // Buffers for storing quantized model predictions + std::unique_ptr>>> + quantized_output_pointers_; + std::unique_ptr quantized_output_pointers_array_; + gemmlowp::GemmContext gemm_context_; +}; + +} // namespace tflite +} // namespace lstm_object_detection + +#endif // TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_SSD_CLIENT_H_ diff --git a/models/research/lstm_object_detection/tflite/mobile_ssd_tflite_client.cc b/models/research/lstm_object_detection/tflite/mobile_ssd_tflite_client.cc new file mode 100644 index 0000000000000000000000000000000000000000..f2b70a663954e211eb515524eecdc4d75b779460 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/mobile_ssd_tflite_client.cc @@ -0,0 +1,579 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "mobile_ssd_tflite_client.h" + +#include +#include "tensorflow/lite/arena_planner.h" +#include "tensorflow/lite/context.h" +#include "tensorflow/lite/kernels/register.h" +#include "utils/file_utils.h" +#include "utils/ssd_utils.h" + +namespace lstm_object_detection { +namespace tflite { + +namespace { + +constexpr int kInputBatch = 1; +constexpr int kInputDepth = 1; +constexpr int kNumBoundingBoxCoordinates = 4; // xmin, ymin, width, height +constexpr int GetBoxIndex(const int layer) { return (2 * layer); } +constexpr int GetScoreIndex(const int layer) { return (2 * layer + 1); } + +} // namespace + +MobileSSDTfLiteClient::MobileSSDTfLiteClient() {} + +std::unique_ptr<::tflite::MutableOpResolver> +MobileSSDTfLiteClient::CreateOpResolver() { + return absl::make_unique<::tflite::ops::builtin::BuiltinOpResolver>(); +} + +bool MobileSSDTfLiteClient::InitializeClient( + const protos::ClientOptions& options) { + if (!MobileSSDClient::InitializeClient(options)) { + return false; + } + if (options.has_external_files()) { + if (options.external_files().model_file_name().empty() && + options.external_files().model_file_content().empty()) { + LOG(ERROR) + << "MobileSSDClient: both `external_files.model_file_name` and " + "`external_files.model_file_content` are empty which is invalid."; + } + if (!options_.external_files().model_file_content().empty()) { + model_ = ::tflite::FlatBufferModel::BuildFromBuffer( + options_.external_files().model_file_content().data(), + options_.external_files().model_file_content().size()); + } else { + const char* tflite_model_filename = reinterpret_cast( + options_.external_files().model_file_name().c_str()); + + model_ = ::tflite::FlatBufferModel::BuildFromFile(tflite_model_filename); + } + } else { + LOG(ERROR) << "Embedded model is not supported."; + return false; + } + if (!model_) { + LOG(ERROR) << "Failed to load model"; + return false; + } + + LoadLabelMap(); + + resolver_ = CreateOpResolver(); + +#ifdef ENABLE_EDGETPU + edge_tpu_context_ = + edgetpu::EdgeTpuManager::GetSingleton()->NewEdgeTpuContext(); + resolver_->AddCustom(edgetpu::kCustomOp, edgetpu::RegisterCustomOp()); +#endif + + ::tflite::InterpreterBuilder(*model_, *resolver_)(&interpreter_); + if (!interpreter_) { + LOG(ERROR) << "Failed to build interpreter"; + return false; + } + + if (!InitializeInterpreter(options)) { + LOG(ERROR) << "Failed to initialize interpreter"; + return false; + } + + if (RequiresPostProcessing() && !ComputeOutputSize()) { + LOG(ERROR) << "Failed to compute output size"; + return false; + } + + // Initializes number of boxes, number of keypoints, quantized model flag and + // allocates output arrays based on output size computed by + // ComputeOutputSize() + agnostic_mode_ = options.agnostic_mode(); + if (!restricted_class_indices_.empty()) { + LOG(ERROR) << "Restricted class unsupported."; + return false; + } + // Default num_keypoints will be overridden by value specified by + // GetNumberOfKeypoints() + const int num_keypoints = GetNumberOfKeypoints(); + + // Other parameters are not needed and do not make sense when the model + // contains the post-processing ops. Avoid init altogether in this case. + if (RequiresPostProcessing()) { + InitParams(IsAgnosticMode(), IsQuantizedModel(), num_keypoints, + GetBoxCoder()); + } + + SetImageNormalizationParams(); + // Getting shape of input tensors. This also checks for size consistency with + // anchors. It also makes input_width_ and input_height_ available to + // LoadAnchors + if (!SetInputShape()) { + LOG(ERROR) << "Failed to set input shape"; + return false; + } + + // Output sizes are compared to expect sizes based on number of anchors, + // number of classes, number of key points and number of values used to + // represent a bounding box. + if (RequiresPostProcessing() && !CheckOutputSizes()) { + LOG(ERROR) << "Check for output size failed"; + return false; + } + + SetZeroPointsAndScaleFactors(quantize_); + + LOG(INFO) << "Model initialized:" + << " input_size: " << input_size_ + << ", output_locations_size: " << output_locations_size_ + << ", preprocessing mean value: " << mean_value_ + << ", preprocessing std value: " << std_value_; + + return true; +} + +void MobileSSDTfLiteClient::SetImageNormalizationParams() { + mean_value_ = 127.5f; + std_value_ = 127.5f; +} + +int MobileSSDTfLiteClient::GetNumberOfKeypoints() const { + return options_.num_keypoints(); +} + +bool MobileSSDTfLiteClient::SetInputShape() { + // inputs() maps the input tensor index to the index TFLite's tensors + const int input_tensor_index = interpreter_->inputs()[0]; + const TfLiteTensor* input_tensor = interpreter_->tensor(input_tensor_index); + if ((input_tensor->type != kTfLiteUInt8) && + (input_tensor->type != kTfLiteFloat32)) { + LOG(ERROR) << "Unsupported tensor input type: " << input_tensor->type; + return false; + } + if (input_tensor->dims->size != 4) { + LOG(ERROR) << "Expected input tensor dimension size to be 4, got " + << input_tensor->dims->size; + return false; + } + input_depth_ = input_tensor->dims->data[3]; + input_width_ = input_tensor->dims->data[2]; + input_height_ = input_tensor->dims->data[1]; + input_size_ = input_height_ * input_width_ * input_depth_ * batch_size_; + return true; +} + +bool MobileSSDTfLiteClient::InitializeInterpreter( + const protos::ClientOptions& options) { + if (options.prefer_nnapi_delegate()) { + LOG(ERROR) << "NNAPI not supported."; + return false; + } + interpreter_->UseNNAPI(false); + +#ifdef ENABLE_EDGETPU + interpreter_->SetExternalContext(kTfLiteEdgeTpuContext, + edge_tpu_context_.get()); +#endif + + if (options.num_threads() > 0) { + interpreter_->SetNumThreads(options.num_threads()); + } + + if (interpreter_->inputs().size() != 1) { + LOG(ERROR) << "Invalid number of interpreter inputs: " + << interpreter_->inputs().size(); + return false; + } + + if (interpreter_->AllocateTensors() != kTfLiteOk) { + LOG(ERROR) << "Failed to allocate tensors!"; + return false; + } + return true; +} + +bool MobileSSDTfLiteClient::CheckOutputSizes() { + int expected_output_locations_size = + anchors_.y_size() * (kNumBoundingBoxCoordinates + 2 * num_keypoints_); + if (output_locations_size_ != expected_output_locations_size) { + LOG(ERROR) + << "The dimension of output_locations must be [num_anchors x 4]. Got " + << output_locations_size_ << " but expected " + << expected_output_locations_size; + return false; + } + + // Include background class score when not in agnostic mode + int expected_output_scores_size = + anchors_.y_size() * (labelmap_.item_size() + (IsAgnosticMode() ? 0 : 1)); + if (output_scores_size_ != expected_output_scores_size) { + LOG(ERROR) + << "The dimension of output_scores is: " + "[num_anchors x (num_classes + 1)] if background class is included. " + "[num_anchors x num_classes] if background class is not included. " + "Got " + << output_scores_size_ << " but expected " + << expected_output_scores_size; + return false; + } + return true; +} + +bool MobileSSDTfLiteClient::IsQuantizedModel() const { + const int input_tensor_index = interpreter_->inputs()[0]; + const TfLiteTensor* input_tensor = interpreter_->tensor(input_tensor_index); + return input_tensor->type == kTfLiteUInt8; +} + +void MobileSSDTfLiteClient::SetZeroPointsAndScaleFactors( + bool is_quantized_model) { + // Sets initial scale to 1 and zero_points to 0. These values are only + // written over in quantized model case. + location_zero_points_.assign(num_output_layers_, 0); + location_scales_.assign(num_output_layers_, 1); + score_zero_points_.assign(num_output_layers_, 0); + score_scales_.assign(num_output_layers_, 1); + + // Set scale and zero_point for quantized model + if (is_quantized_model) { + for (int layer = 0; layer < num_output_layers_; ++layer) { + const int location_tensor_index = + interpreter_->outputs()[GetBoxIndex(layer)]; + const TfLiteTensor* location_tensor = + interpreter_->tensor(location_tensor_index); + + location_zero_points_[layer] = location_tensor->params.zero_point; + location_scales_[layer] = location_tensor->params.scale; + + // Class Scores + const int score_tensor_index = + interpreter_->outputs()[GetScoreIndex(layer)]; + const TfLiteTensor* score_tensor = + interpreter_->tensor(score_tensor_index); + + score_zero_points_[layer] = score_tensor->params.zero_point; + score_scales_[layer] = score_tensor->params.scale; + } + } +} + +bool MobileSSDTfLiteClient::ComputeOutputLocationsSize( + const TfLiteTensor* location_tensor, int layer) { + const int location_tensor_size = location_tensor->dims->size; + if (location_tensor_size == 3) { + const int location_code_size = location_tensor->dims->data[2]; + const int location_num_anchors = location_tensor->dims->data[1]; + output_locations_sizes_[layer] = location_code_size * location_num_anchors; + } else if (location_tensor_size == 4) { + const int location_depth = location_tensor->dims->data[3]; + const int location_width = location_tensor->dims->data[2]; + const int location_height = location_tensor->dims->data[1]; + output_locations_sizes_[layer] = + location_depth * location_width * location_height; + } else { + LOG(ERROR) << "Expected location_tensor_size of 3 or 4, got " + << location_tensor_size; + return false; + } + return true; +} + +bool MobileSSDTfLiteClient::ComputeOutputScoresSize( + const TfLiteTensor* score_tensor, int layer) { + const int score_tensor_size = score_tensor->dims->size; + if (score_tensor_size == 3) { + const int score_num_classes = score_tensor->dims->data[2]; + const int score_num_anchors = score_tensor->dims->data[1]; + output_scores_sizes_[layer] = score_num_classes * score_num_anchors; + } else if (score_tensor_size == 4) { + const int score_depth = score_tensor->dims->data[3]; + const int score_width = score_tensor->dims->data[2]; + const int score_height = score_tensor->dims->data[1]; + output_scores_sizes_[layer] = score_depth * score_width * score_height; + } else { + LOG(ERROR) << "Expected score_tensor_size of 3 or 4, got " + << score_tensor_size; + return false; + } + return true; +} + +bool MobileSSDTfLiteClient::ComputeOutputLayerCount() { + // Compute number of layers in the output model + const int num_outputs = interpreter_->outputs().size(); + if (num_outputs == 0) { + LOG(ERROR) << "Number of outputs cannot be zero."; + return false; + } + if (num_outputs % 2 != 0) { + LOG(ERROR) << "Number of outputs must be evenly divisible by 2. Actual " + "number of outputs: " + << num_outputs; + return false; + } + num_output_layers_ = num_outputs / 2; + return true; +} + +bool MobileSSDTfLiteClient::ComputeOutputSize() { + if (!ComputeOutputLayerCount()) { + return false; + } + + // Allocate output arrays for box location and class scores + output_locations_sizes_.resize(num_output_layers_); + output_scores_sizes_.resize(num_output_layers_); + output_locations_size_ = 0; + output_scores_size_ = 0; + // This loop calculates the total size of data occupied by the output as well + // as the size for everylayer of the model. For quantized case, it also stores + // the offset and scale factor needed to transform the data back to floating + // point values. + for (int layer = 0; layer < num_output_layers_; ++layer) { + // Calculate sizes of Box locations output + const int location_tensor_index = + interpreter_->outputs()[GetBoxIndex(layer)]; + const TfLiteTensor* location_tensor = + interpreter_->tensor(location_tensor_index); + if (!ComputeOutputLocationsSize(location_tensor, layer)) { + return false; + } + output_locations_size_ += output_locations_sizes_[layer]; + + // Class Scores + const int score_tensor_index = + interpreter_->outputs()[GetScoreIndex(layer)]; + const TfLiteTensor* score_tensor = interpreter_->tensor(score_tensor_index); + if (!ComputeOutputScoresSize(score_tensor, layer)) { + return false; + } + output_scores_size_ += output_scores_sizes_[layer]; + } + return true; +} + +void MobileSSDTfLiteClient::NormalizeInputImage(const uint8_t* input_data, + float* normalized_input_data) { + float reciprocal_std_value_ = (1.0f / std_value_); + for (int i = 0; i < input_size_; i++, input_data++, normalized_input_data++) { + *normalized_input_data = + reciprocal_std_value_ * (static_cast(*input_data) - mean_value_); + } +} + +void MobileSSDTfLiteClient::GetOutputBoxesAndScoreTensorsFromFloat() { + float* output_score_pointer = output_scores_.data(); + float* output_location_pointer = output_locations_.data(); + for (int batch = 0; batch < batch_size_; ++batch) { + for (int layer = 0; layer < num_output_layers_; ++layer) { + // Write output location data + const float* location_data = + interpreter_->typed_output_tensor(GetBoxIndex(layer)) + + batch * output_locations_sizes_[layer]; + memcpy(output_location_pointer, location_data, + output_locations_sizes_[layer] * sizeof(float)); + output_location_pointer += output_locations_sizes_[layer]; + + // Write output class scores + const float* score_data = + interpreter_->typed_output_tensor(GetScoreIndex(layer)) + + batch * output_scores_sizes_[layer]; + memcpy(output_score_pointer, score_data, + output_scores_sizes_[layer] * sizeof(float)); + output_score_pointer += output_scores_sizes_[layer]; + } + } +} + +void MobileSSDTfLiteClient::GetOutputBoxesAndScoreTensorsFromUInt8() { + // The box locations and score are now convert back to floating point from + // their quantized version by shifting and scaling the output tensors on an + // element-wise basis + auto output_score_it = output_scores_.begin(); + auto output_location_it = output_locations_.begin(); + for (int batch = 0; batch < batch_size_; ++batch) { + for (int layer = 0; layer < num_output_layers_; ++layer) { + // Write output location data + const auto location_scale = location_scales_[layer]; + const auto location_zero_point = location_zero_points_[layer]; + const auto* location_data = + interpreter_->typed_output_tensor(GetBoxIndex(layer)); + for (int j = 0; j < output_locations_sizes_[layer]; + ++j, ++output_location_it) { + *output_location_it = + location_scale * + (static_cast( + location_data[j + batch * output_locations_sizes_[layer]]) - + location_zero_point); + } + + // write output class scores + const auto score_scale = score_scales_[layer]; + const auto score_zero_point = score_zero_points_[layer]; + const auto* score_data = + interpreter_->typed_output_tensor(GetScoreIndex(layer)); + for (int j = 0; j < output_scores_sizes_[layer]; ++j, ++output_score_it) { + *output_score_it = + score_scale * + (static_cast( + score_data[j + batch * output_scores_sizes_[layer]]) - + score_zero_point); + } + } + } +} + +bool MobileSSDTfLiteClient::FloatInference(const uint8_t* input_data) { + auto* input = interpreter_->typed_input_tensor(0); + if (input == nullptr) { + LOG(ERROR) << "Input tensor cannot be null for inference."; + return false; + } + // The non-quantized model assumes float input + // So we normalize the uint8 input image using mean_value_ + // and std_value_ + NormalizeInputImage(input_data, input); + // Applies model to the data. The data will be store in the output tensors + if (interpreter_->Invoke() != kTfLiteOk) { + LOG(ERROR) << "Invoking interpreter resulted in non-okay status."; + return false; + } + // Parse outputs + if (RequiresPostProcessing()) { + GetOutputBoxesAndScoreTensorsFromFloat(); + } + return true; +} + +bool MobileSSDTfLiteClient::QuantizedInference(const uint8_t* input_data) { + auto* input = interpreter_->typed_input_tensor(0); + if (input == nullptr) { + LOG(ERROR) << "Input tensor cannot be null for inference."; + return false; + } + memcpy(input, input_data, input_size_); + + // Applies model to the data. The data will be store in the output tensors + if (interpreter_->Invoke() != kTfLiteOk) { + LOG(ERROR) << "Invoking interpreter resulted in non-okay status."; + return false; + } + // Parse outputs + if (RequiresPostProcessing()) { + GetOutputBoxesAndScoreTensorsFromUInt8(); + } + return true; +} + +bool MobileSSDTfLiteClient::Inference(const uint8_t* input_data) { + if (input_data == nullptr) { + LOG(ERROR) << "input_data cannot be null for inference."; + return false; + } + if (IsQuantizedModel()) + return QuantizedInference(input_data); + else + return FloatInference(input_data); + return true; +} + +bool MobileSSDTfLiteClient::NoPostProcessNoAnchors( + protos::DetectionResults* detections) { + const float* boxes = interpreter_->typed_output_tensor(0); + const float* classes = interpreter_->typed_output_tensor(1); + const float* confidences = interpreter_->typed_output_tensor(2); + int num_detections = + static_cast(interpreter_->typed_output_tensor(3)[0]); + int max_detections = options_.max_detections() > 0 ? options_.max_detections() + : num_detections; + + std::vector sorted_indices; + sorted_indices.resize(num_detections); + for (int i = 0; i < num_detections; ++i) sorted_indices[i] = i; + std::sort(sorted_indices.begin(), sorted_indices.end(), + [&confidences](const int i, const int j) { + return confidences[i] > confidences[j]; + }); + + for (int i = 0; + i < num_detections && detections->detection_size() < max_detections; + ++i) { + const int index = sorted_indices[i]; + if (confidences[index] < options_.score_threshold()) { + break; + } + const int class_index = classes[index]; + protos::Detection* detection = detections->add_detection(); + detection->add_score(confidences[index]); + detection->add_class_index(class_index); + // For some reason it is not OK to add class/label names here, they appear + // to mess up the drishti graph. + // detection->add_display_name(GetLabelDisplayName(class_index)); + // detection->add_class_name(GetLabelName(class_index)); + + protos::BoxCornerEncoding* box = detection->mutable_box(); + box->add_ymin(boxes[4 * index]); + box->add_xmin(boxes[4 * index + 1]); + box->add_ymax(boxes[4 * index + 2]); + box->add_xmax(boxes[4 * index + 3]); + } + return true; +} + +bool MobileSSDTfLiteClient::SetBatchSize(int batch_size) { + if (!this->MobileSSDClient::SetBatchSize(batch_size)) { + LOG(ERROR) << "Error in SetBatchSize()"; + return false; + } + input_size_ = input_height_ * input_width_ * input_depth_ * batch_size_; + + for (int input : interpreter_->inputs()) { + auto* old_dims = interpreter_->tensor(input)->dims; + std::vector new_dims(old_dims->data, old_dims->data + old_dims->size); + new_dims[0] = batch_size; + if (interpreter_->ResizeInputTensor(input, new_dims) != kTfLiteOk) { + LOG(ERROR) << "Unable to resize input for new batch size"; + return false; + } + } + + if (interpreter_->AllocateTensors() != kTfLiteOk) { + LOG(ERROR) << "Unable to reallocate tensors"; + return false; + } + + return true; +} + +void MobileSSDTfLiteClient::LoadLabelMap() { + if (options_.has_external_files()) { + if (options_.external_files().has_label_map_file_content() || + options_.external_files().has_label_map_file_name()) { + CHECK(LoadLabelMapFromFileOrBytes( + options_.external_files().label_map_file_name(), + options_.external_files().label_map_file_content(), &labelmap_)); + } else { + LOG(ERROR) << "MobileSSDTfLiteClient: both " + "'external_files.label_map_file_content` and " + "'external_files.label_map_file_name` are empty" + " which is invalid."; + } + } +} + +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/mobile_ssd_tflite_client.h b/models/research/lstm_object_detection/tflite/mobile_ssd_tflite_client.h new file mode 100644 index 0000000000000000000000000000000000000000..40af225549cefafea8fab1450ccafefbdc1aed4d --- /dev/null +++ b/models/research/lstm_object_detection/tflite/mobile_ssd_tflite_client.h @@ -0,0 +1,115 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_SSD_TFLITE_CLIENT_H_ +#define TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_SSD_TFLITE_CLIENT_H_ + +#include +#include + +#include "absl/memory/memory.h" +#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h" +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/model.h" +#include "mobile_ssd_client.h" +#include "protos/anchor_generation_options.pb.h" +#ifdef ENABLE_EDGETPU +#include "libedgetpu/edgetpu.h" +#endif // ENABLE_EDGETPU + +namespace lstm_object_detection { +namespace tflite { + +class MobileSSDTfLiteClient : public MobileSSDClient { + public: + MobileSSDTfLiteClient(); + explicit MobileSSDTfLiteClient( + std::unique_ptr<::tflite::OpResolver> resolver); + ~MobileSSDTfLiteClient() override = default; + + protected: + // By default CreateOpResolver will create + // tflite::ops::builtin::BuiltinOpResolver. Overriding the function allows the + // client to use custom op resolvers. + virtual std::unique_ptr<::tflite::MutableOpResolver> CreateOpResolver(); + + bool InitializeClient(const protos::ClientOptions& options) override; + + virtual bool InitializeInterpreter(const protos::ClientOptions& options); + virtual bool ComputeOutputLayerCount(); + + bool Inference(const uint8_t* input_data) override; + + bool NoPostProcessNoAnchors(protos::DetectionResults* detections) override; + + // Use with caution. Not all models work correctly when resized to larger + // batch sizes. This will resize the input tensor to have the given batch size + // and propagate the batch dimension throughout the graph. + bool SetBatchSize(int batch_size) override; + + // This can be overridden in a subclass to load label map from file + void LoadLabelMap() override; + + // This can be overridden in a subclass to return customized box coder. + virtual const protos::BoxCoder GetBoxCoder() { return protos::BoxCoder(); } + + virtual void SetImageNormalizationParams(); + void NormalizeInputImage(const uint8_t* input_data, + float* normalized_input_data); + void GetOutputBoxesAndScoreTensorsFromFloat(); + + virtual bool IsQuantizedModel() const; + +#ifdef ENABLE_EDGETPU + std::unique_ptr edge_tpu_context_; +#endif + + std::unique_ptr<::tflite::FlatBufferModel> model_; + std::unique_ptr<::tflite::MutableOpResolver> resolver_; + std::unique_ptr<::tflite::Interpreter> interpreter_; + + private: + // MobileSSDTfLiteClient is neither copyable nor movable. + MobileSSDTfLiteClient(const MobileSSDTfLiteClient&) = delete; + MobileSSDTfLiteClient& operator=(const MobileSSDTfLiteClient&) = delete; + + // Helper functions used by Initialize Client. + virtual int GetNumberOfKeypoints() const; + + // Returns true if the client is in class-agnostic mode. This function can be + // overridden in a subclass to return an ad-hoc value (e.g. hard-coded). + virtual bool IsAgnosticMode() const { return agnostic_mode_; } + bool CheckOutputSizes(); + bool ComputeOutputSize(); + bool SetInputShape(); + void SetZeroPointsAndScaleFactors(bool is_quantized_model); + bool ComputeOutputLocationsSize(const TfLiteTensor* location_tensor, + int layer); + bool ComputeOutputScoresSize(const TfLiteTensor* score_tensor, int layer); + + // The agnostic_mode_ field should never be directly read. Always use its + // virtual accessor method: IsAgnosticMode(). + bool agnostic_mode_; + + // Helper functions used by Inference functions + bool FloatInference(const uint8_t* input_data); + bool QuantizedInference(const uint8_t* input_data); + void GetOutputBoxesAndScoreTensorsFromUInt8(); +}; + +} // namespace tflite +} // namespace lstm_object_detection + +#endif // TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_MOBILE_SSD_TFLITE_CLIENT_H_ diff --git a/models/research/lstm_object_detection/tflite/protos/BUILD b/models/research/lstm_object_detection/tflite/protos/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..80d50ed7ec82ad555d96805233d576e1a7326901 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/BUILD @@ -0,0 +1,61 @@ +package( + default_visibility = ["//visibility:public"], +) + +licenses(["notice"]) + +proto_library( + name = "box_encodings_proto", + srcs = ["box_encodings.proto"], +) + +cc_proto_library( + name = "box_encodings_cc_proto", + deps = [":box_encodings_proto"], +) + +proto_library( + name = "detections_proto", + srcs = ["detections.proto"], + deps = [":box_encodings_proto"], +) + +cc_proto_library( + name = "detections_cc_proto", + deps = [":detections_proto"], +) + +proto_library( + name = "labelmap_proto", + srcs = ["labelmap.proto"], +) + +cc_proto_library( + name = "labelmap_cc_proto", + deps = [":labelmap_proto"], +) + +proto_library( + name = "mobile_ssd_client_options_proto", + srcs = ["mobile_ssd_client_options.proto"], + deps = [ + ":anchor_generation_options_proto", + ":box_encodings_proto", + ":labelmap_proto", + ], +) + +cc_proto_library( + name = "mobile_ssd_client_options_cc_proto", + deps = [":mobile_ssd_client_options_proto"], +) + +proto_library( + name = "anchor_generation_options_proto", + srcs = ["anchor_generation_options.proto"], +) + +cc_proto_library( + name = "anchor_generation_options_cc_proto", + deps = [":anchor_generation_options_proto"], +) diff --git a/models/research/lstm_object_detection/tflite/protos/anchor_generation_options.proto b/models/research/lstm_object_detection/tflite/protos/anchor_generation_options.proto new file mode 100644 index 0000000000000000000000000000000000000000..d164c239f93f51205e360623f58cbbd441c62ab2 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/anchor_generation_options.proto @@ -0,0 +1,53 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package lstm_object_detection.tflite.protos; + + + +// This is dervice from TensorFlow's SsdAnchorGenerator proto that is used to +// configures TensorFlow's anchor generator. +// object_detection/protos/ssd_anchor_generator.proto +message AnchorGenerationOptions { + // The input image width in pixels + optional int32 image_width = 1; + + // The input image height in pixels + optional int32 image_height = 2; + + // The base anchor width in pixels + optional int32 base_anchor_width = 3; + + // The base anchor height in pixels + optional int32 base_anchor_height = 4; + + // The minimum anchor scaling (should be < 1.0) + optional float min_anchor_scale = 5; + + // The maximum anchor scaling + optional float max_anchor_scale = 6; + + // List of aspect ratios to generate anchors for. Aspect ratio is specified as + // (width/height) + repeated float anchor_aspect_ratios = 7 [packed = true]; + + // List of strides in pixels for each layer + repeated int32 anchor_strides = 8 [packed = true]; + + // List of offset in pixels for each layer + repeated int32 anchor_offsets = 9 [packed = true]; +} diff --git a/models/research/lstm_object_detection/tflite/protos/box_encodings.proto b/models/research/lstm_object_detection/tflite/protos/box_encodings.proto new file mode 100644 index 0000000000000000000000000000000000000000..d701914e93d214b905198ca753d5f5adb1cd172f --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/box_encodings.proto @@ -0,0 +1,97 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package lstm_object_detection.tflite.protos; + +// The bounding box representation by center location and width/height. +// Also includes optional keypoint coordinates. +// It is a default representation in modern object detection systems. +message CenterSizeEncoding { + // Encoded anchor box center. + repeated float y = 1; + repeated float x = 2; + + // Encoded anchor box height. + repeated float h = 3; + + // Encoded anchor box width. + repeated float w = 4; + + // Encoded keypoint coordinates. + repeated float keypoint_y = 5; + repeated float keypoint_x = 6; +} + +// The scaling factors for decoding predicted offsets with CenterSizeEncoding. +// For example, given a prediction and an anchor in CenterSizeEncoding, the +// decoded location is: +// y = prediction.y / coder.y_scale() * anchor.h + anchor.y; +// x = prediction.x / coder.x_scale() * anchor.w + anchor.x; +// h = exp(prediction.h / coder.h_scale()) * anchor.h; +// w = exp(prediction.w / coder.w_scale()) * anchor.w; +// keypoint_y = prediction.keypoint_y / coder.keypoint_y_scale() * anchor.h +// + anchor.y; +// keypoint_x = prediction.keypoint_x / coder.keypoint_x_scale() * anchor.w +// + anchor.x; +// See mobile_ssd::DecodeCenterSizeBoxes for more details. +// This coder is compatible with models trained using +// object_detection.protos.FasterRcnnBoxCoder and +// object_detection.protos.KeypointBoxCoder. +message CenterSizeOffsetCoder { + // Scale factor for encoded box center offset. + optional float y_scale = 1 [default = 10.0]; + optional float x_scale = 2 [default = 10.0]; + + // Scale factor for encoded box height offset. + optional float h_scale = 3 [default = 5.0]; + + // Scale factor for encoded box width offset. + optional float w_scale = 4 [default = 5.0]; + + // Scale factor for encoded keypoint coordinate offset. + optional float keypoint_y_scale = 5 [default = 10.0]; + optional float keypoint_x_scale = 6 [default = 10.0]; +} + +// The canonical representation of bounding box. +message BoxCornerEncoding { + // Box corners. + repeated float ymin = 1; + repeated float xmin = 2; + repeated float ymax = 3; + repeated float xmax = 4; + + // Keypoint coordinates. + repeated float keypoint_y = 5; + repeated float keypoint_x = 6; +} + +// The scaling value used to adjust predicted bounding box corners. +// For example, given a prediction in BoxCornerEncoding and an anchor in +// CenterSizeEncoding, the decoded location is: +// ymin = prediction.ymin * coder.stddev + anchor.y - anchor.h / 2 +// xmin = prediction.xmin * coder.stddev + anchor.x - anchor.w / 2 +// ymax = prediction.ymax * coder.stddev + anchor.y + anchor.h / 2 +// xmax = prediction.xmax * coder.stddev + anchor.x + anchor.w / 2 +// This coder doesn't support keypoints. +// See mobile_ssd::DecodeBoxCornerBoxes for more details. +// This coder is compatible with models trained using +// object_detection.protos.MeanStddevBoxCoder. +message BoxCornerOffsetCoder { + // The standard deviation used to encode and decode boxes. + optional float stddev = 1 [default = 0.01]; +} diff --git a/models/research/lstm_object_detection/tflite/protos/detections.proto b/models/research/lstm_object_detection/tflite/protos/detections.proto new file mode 100644 index 0000000000000000000000000000000000000000..7dc46a1990e4c89eba837ded12d98079ee71efd3 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/detections.proto @@ -0,0 +1,39 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package lstm_object_detection.tflite.protos; + +import "protos/box_encodings.proto"; + +// DetectionResults is a list of Detection. +message DetectionResults { + repeated Detection detection = 1; +} + +// Detection consists of a bounding box, class confidences and indices. +message Detection { + // Each detection message consists of only one bounding box. + optional BoxCornerEncoding box = 1; + // A box can be associated with multiple confidences for multiple classes. + repeated float score = 2; + repeated int32 class_index = 3; + // Optional, for readability and easier access for external modules. + // A unique name that identifies the class, e.g. a MID. + repeated string class_name = 4; + // A human readable name of the class. + repeated string display_name = 5; +} diff --git a/models/research/lstm_object_detection/tflite/protos/labelmap.proto b/models/research/lstm_object_detection/tflite/protos/labelmap.proto new file mode 100644 index 0000000000000000000000000000000000000000..1c99931ff9d1a355b7dd577c9f563bdd03ccaad0 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/labelmap.proto @@ -0,0 +1,67 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This proto defines the labelmap used in the detection models, which maps +// the numerical class index outputs to KG mid or human readable string of +// object class name. +// +// An example labelmap looks like the following: +// item { +// name: "/m/0frqm" +// id: 1 +// display_name: "Envelope" +// } +// item { +// name: "/m/02dl1y" +// id: 2 +// display_name: "Hat" +// } +// item { +// name: "/m/01krhy" +// id: 3 +// display_name: "Tiara" +// } + +syntax = "proto2"; + +package lstm_object_detection.tflite.protos; + + + +message StringIntLabelMapItem { + optional string name = 1; + optional int32 id = 2; + repeated float embedding = 3 [packed = true]; + optional string display_name = 4; + // Optional list of children used to represent a hierarchy. + // + // E.g.: + // + // item { + // name: "/m/02xwb" # Fruit + // child_name: "/m/014j1m" # Apple + // child_name: "/m/0388q" # Grape + // ... + // } + // item { + // name: "/m/014j1m" # Apple + // ... + // } + repeated string child_name = 5; +} + +message StringIntLabelMapProto { + repeated StringIntLabelMapItem item = 1; +} diff --git a/models/research/lstm_object_detection/tflite/protos/mobile_ssd_client_options.proto b/models/research/lstm_object_detection/tflite/protos/mobile_ssd_client_options.proto new file mode 100644 index 0000000000000000000000000000000000000000..d501c213c11476675250f232b430e3ab1b62dac4 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/mobile_ssd_client_options.proto @@ -0,0 +1,122 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package lstm_object_detection.tflite.protos; + +import "protos/anchor_generation_options.proto"; +import "protos/box_encodings.proto"; + +// Next ID: 17 +message ClientOptions { + // The name of the Mobile SSD Client. + optional string mobile_ssd_client_name = 1; + + // The maximum number of detections to return. + optional uint32 max_detections = 2 [default = 10]; + + // The maximum number of categories to return per detection. + optional uint32 max_categories = 3 [default = 1]; + + // The global score threshold below which detections are rejected. + optional float score_threshold = 4 [default = 0.0]; + + // The threshold on intersection-over-union used by non-maxima suppression. + optional float iou_threshold = 5 [default = 0.3]; + + // Optional whitelist of class names. If non-empty, detections whose class + // name is not in this set will be filtered out. Duplicate or unknown class + // names are ignored. + repeated string class_name_whitelist = 6; + + // SSD in single class agnostic model. + optional bool agnostic_mode = 7 [default = false]; + + // Fully convolutional mode, which requires on-the-fly anchor generation. + optional bool fully_conv = 8 [default = false]; + + // Quantized model. + optional bool quantize = 9 [default = false]; + + // Number of keypoints. + optional uint32 num_keypoints = 10 [default = 0]; + + // Optional anchor generations options. This can be used to generate + // anchors for an SSD model. It is utilized in + // MobileSSDTfLiteClient::LoadAnchors() + optional AnchorGenerationOptions anchor_generation_options = 12; + + // Optional box coder specifications. This can be used for models trained + // with a customized box coder. If unspecified, it will use + // CenterSizeOffsetCoder and its default parameters. + optional BoxCoder box_coder = 13; + + // The external model files used to create the detector. + // This is an alternative to registered models, where you specify external + // model via the following: + // - model using model_file_name or model_file_content + // - labelmap using label_map_file_content + // - anchors using anchor_generation_options,proto (TODO: add support for + // filename as well) + optional ExternalFiles external_files = 16; + + message ExternalFiles { + // Path to the model file in FlatBuffer format. + optional string model_file_name = 1; + + // Content of the model file. If provided, this takes precedence over the + // model_file_name field. + optional bytes model_file_content = 2; + + // Path to the label map file. + optional string label_map_file_name = 4; + + // Content of the label map file. If provided, this takes precedence over + // the label_map_file_name field. + optional bytes label_map_file_content = 3; + + // Path to the anchor file. + optional string anchor_file_name = 5; + + // Content of the anchor file. If provided, this takes precedence over + // the anchor_file_name field. + optional bytes anchor_file_content = 6; + } + + // Whether to use NNAPI delegate for hardware acceleration. + // If it fails, it will fall back to the normal CPU execution. + optional bool prefer_nnapi_delegate = 14; + + // Number of threads to be used by TFlite interpreter for SSD inference. Does + // single-threaded inference by default. + optional int32 num_threads = 15 [default = 1]; + + extensions 1000 to max; +} + +message BoxCoder { + oneof box_coder_oneof { + CenterSizeOffsetCoder center_size_offset_coder = 1; + BoxCornerOffsetCoder box_corner_offset_coder = 2; + } +} + +message ModelData { + oneof source { + string model_file = 1; + bytes embedded_model = 2; + } +} diff --git a/models/research/lstm_object_detection/tflite/protos/proto_config.asciipb b/models/research/lstm_object_detection/tflite/protos/proto_config.asciipb new file mode 100644 index 0000000000000000000000000000000000000000..e01dc7c4808f5764041a25177293cb157e165d7e --- /dev/null +++ b/models/research/lstm_object_detection/tflite/protos/proto_config.asciipb @@ -0,0 +1,5 @@ +# This file necessary for Portable Proto library +allow_all: true + +# Other configuration options: +optimize_mode: LITE_RUNTIME diff --git a/models/research/lstm_object_detection/tflite/utils/BUILD b/models/research/lstm_object_detection/tflite/utils/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..6a6d19b8e130045bf0f11ab013a977ae97c39389 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/BUILD @@ -0,0 +1,48 @@ +package( + default_visibility = ["//visibility:public"], +) + +licenses(["notice"]) + +cc_library( + name = "conversion_utils", + srcs = ["conversion_utils.cc"], + hdrs = ["conversion_utils.h"], + deps = [ + "@com_google_absl//absl/base:core_headers", + "@com_google_glog//:glog", + ], +) + +cc_test( + name = "conversion_utils_test", + srcs = ["conversion_utils_test.cc"], + deps = [ + ":conversion_utils", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "ssd_utils", + srcs = ["ssd_utils.cc"], + hdrs = ["ssd_utils.h"], + deps = [ + "//protos:anchor_generation_options_cc_proto", + "//protos:box_encodings_cc_proto", + "//protos:detections_cc_proto", + "@com_google_absl//absl/strings", + "@com_google_glog//:glog", + ], +) + +cc_library( + name = "file_utils", + srcs = ["file_utils.cc"], + hdrs = ["file_utils.h"], + deps = [ + "//protos:labelmap_cc_proto", + "@com_google_absl//absl/strings", + "@com_google_glog//:glog", + ], +) diff --git a/models/research/lstm_object_detection/tflite/utils/conversion_utils.cc b/models/research/lstm_object_detection/tflite/utils/conversion_utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..072d2ba1853877ea1907c5a9b7ede7564a25d4d5 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/conversion_utils.cc @@ -0,0 +1,65 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "utils/conversion_utils.h" + +#include + +namespace lstm_object_detection { +namespace tflite { + +bool HasPadding(int width, int height, int bytes_per_pixel, int bytes_per_row) { + CHECK_LT(0, width); + CHECK_LT(0, height); + CHECK(bytes_per_pixel == 1 || bytes_per_pixel == 3 || bytes_per_pixel == 4); + CHECK_LE(width * bytes_per_pixel, bytes_per_row); + + if (bytes_per_pixel == 4) { + return true; + } + return (width * bytes_per_pixel < bytes_per_row); +} + +std::vector RemovePadding(const uint8_t* image_data, int width, + int height, int bytes_per_pixel, + int bytes_per_row) { + CHECK_LT(0, width); + CHECK_LT(0, height); + CHECK(bytes_per_pixel == 1 || bytes_per_pixel == 3 || bytes_per_pixel == 4); + CHECK_LE(width * bytes_per_pixel, bytes_per_row); + + const int unpadded_bytes_per_pixel = (bytes_per_pixel == 1 ? 1 : 3); + const int pixel_padding = (bytes_per_pixel == 4 ? 1 : 0); + std::vector unpadded_image_data(width * height * + unpadded_bytes_per_pixel); + + const uint8_t* row_ptr = image_data; + int index = 0; + for (int y = 0; y < height; ++y) { + const uint8_t* ptr = row_ptr; + for (int x = 0; x < width; ++x) { + for (int d = 0; d < unpadded_bytes_per_pixel; ++d) { + unpadded_image_data[index++] = *ptr++; + } + ptr += pixel_padding; + } + row_ptr += bytes_per_row; + } + + return unpadded_image_data; +} + +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/utils/conversion_utils.h b/models/research/lstm_object_detection/tflite/utils/conversion_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..964a38103dd9042218e8c7bda495871efeecc516 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/conversion_utils.h @@ -0,0 +1,47 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Lightweight utilities related to conversion of input images. + +#ifndef TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_CONVERSION_UTILS_H_ +#define TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_CONVERSION_UTILS_H_ + +#include + +#include + +namespace lstm_object_detection { +namespace tflite { + +// Finds out whether a call to 'RemovePadding()' is needed to process the given +// pixel data constellation in order to make it suitable for model input layer. +// All integers must be positive, 'bytes_per_row' must be sufficiently large, +// and for 'bytes_per_pixel' only values 1, 3, 4 may be passed and implies a +// grayscale, RGB, or RGBA image. Returns true iff excessive bytes exist in the +// associated pixel data. +bool HasPadding(int width, int height, int bytes_per_pixel, int bytes_per_row); + +// Removes padding at the pixel and row level of pixel data which is stored in +// the usual row major order ("interleaved"). Produces pixel data which is +// suitable for model input layer. If 'HasPadding()' is false then this +// function will return an identical copy of 'image'. For restrictions on the +// integer parameters see comment on 'HasPadding()'. +std::vector RemovePadding(const uint8_t* image, int width, int height, + int bytes_per_pixel, int bytes_per_row); + +} // namespace tflite +} // namespace lstm_object_detection + +#endif // TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_CONVERSION_UTILS_H_ diff --git a/models/research/lstm_object_detection/tflite/utils/conversion_utils_test.cc b/models/research/lstm_object_detection/tflite/utils/conversion_utils_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..97ddf3c7dffd98f43ff3d12a7bfb8f39bc538f03 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/conversion_utils_test.cc @@ -0,0 +1,163 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "utils/conversion_utils.h" + +#include + +#include +#include +#include "gtest/gtest.h" + +using testing::ContainerEq; + +namespace lstm_object_detection { +namespace tflite { +namespace { + +TEST(ConversionUtilsTests, HasPaddingNonPositiveDimensions) { + EXPECT_DEATH(HasPadding(/* width= */ 0, /* height= */ 4, + /* bytes_per_pixel= */ 4, /* bytes_per_row= */ 12), + ""); + EXPECT_DEATH(HasPadding(/* width= */ 3, /* height= */ 0, + /* bytes_per_pixel= */ 4, /* bytes_per_row= */ 12), + ""); +} + +TEST(ConversionUtilsTests, HasPaddingIllegalDepth) { + for (int bytes_per_pixel : {-1, 0, 2, 5, 6}) { + EXPECT_DEATH(HasPadding(/* width= */ 3, /* height= */ 4, bytes_per_pixel, + /* bytes_per_row= */ 12), + ""); + } +} + +TEST(ConversionUtilsTests, HasPaddingWithRGBAImage) { + const int kWidth = 3; + const int kHeight = 4; + const int kBytesPerPixel = 4; + EXPECT_DEATH( + HasPadding(kWidth, kHeight, kBytesPerPixel, /* bytes_per_row= */ 11), ""); + EXPECT_TRUE( + HasPadding(kWidth, kHeight, kBytesPerPixel, /* bytes_per_row= */ 12)); + EXPECT_TRUE( + HasPadding(kWidth, kHeight, kBytesPerPixel, /* bytes_per_row= */ 13)); +} + +TEST(ConversionUtilsTests, HasPaddingWithRGBImage) { + const int kWidth = 3; + const int kHeight = 4; + const int kBytesPerPixel = 3; + EXPECT_DEATH( + HasPadding(kWidth, kHeight, kBytesPerPixel, /* bytes_per_row= */ 8), ""); + EXPECT_FALSE( + HasPadding(kWidth, kHeight, kBytesPerPixel, /* bytes_per_row= */ 9)); + EXPECT_TRUE( + HasPadding(kWidth, kHeight, kBytesPerPixel, /* bytes_per_row= */ 10)); +} + +TEST(ConversionUtilsTests, HasPaddingWithGrayscaleImage) { + const int kWidth = 3; + const int kHeight = 4; + const int kBytesPerPixel = 1; + EXPECT_DEATH( + HasPadding(kWidth, kHeight, kBytesPerPixel, + /* bytes_per_row= */ 2), ""); + EXPECT_FALSE( + HasPadding(kWidth, kHeight, kBytesPerPixel, + /* bytes_per_row= */ 3)); + EXPECT_TRUE( + HasPadding(kWidth, kHeight, kBytesPerPixel, + /* bytes_per_row= */ 4)); +} + +TEST(ConversionUtilsTests, RemovePaddingWithRGBAImage) { + constexpr int kWidth = 4; + constexpr int kHeight = 2; + constexpr int kBytesPerPixel = 4; + constexpr int kStride = kBytesPerPixel * kWidth * sizeof(uint8_t); + const std::vector kImageData{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36}; + ASSERT_EQ(kHeight * kStride, kImageData.size()); + + std::vector actual = + RemovePadding(&kImageData[0], kWidth, kHeight, kBytesPerPixel, kStride); + + const std::vector kExpected = { + 1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, + 21, 22, 23, 25, 26, 27, 29, 30, 31, 33, 34, 35, + }; + EXPECT_EQ(3 * kWidth * kHeight, actual.size()); + EXPECT_THAT(actual, ContainerEq(kExpected)); +} + +TEST(ConversionUtilsTests, RemovePaddingWithRGBImage) { + constexpr int kWidth = 4; + constexpr int kHeight = 2; + constexpr int kBytesPerPixel = 3; + constexpr int kBytesPerRow = kBytesPerPixel * kWidth * sizeof(uint8_t); + const std::vector kImageData{1, 2, 3, 5, 6, 7, 9, 10, + 11, 13, 14, 15, 21, 22, 23, 25, + 26, 27, 29, 30, 31, 33, 34, 35}; + ASSERT_EQ(kHeight * kBytesPerRow, kImageData.size()); + + std::vector actual = RemovePadding(&kImageData[0], kWidth, kHeight, + kBytesPerPixel, kBytesPerRow); + + EXPECT_EQ(3 * kWidth * kHeight, actual.size()); + EXPECT_THAT(actual, ContainerEq(kImageData)); +} + +TEST(ConversionUtilsTests, RemovePaddingWithGrayscaleImage) { + constexpr int kWidth = 8; + constexpr int kHeight = 2; + constexpr int kBytesPerPixel = 1; + constexpr int kBytesPerRow = kBytesPerPixel * kWidth * sizeof(uint8_t); + const std::vector kImageData{ + 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26, 27, 28, + }; + ASSERT_EQ(kHeight * kBytesPerRow, kImageData.size()); + + std::vector actual = RemovePadding(&kImageData[0], kWidth, kHeight, + kBytesPerPixel, kBytesPerRow); + + EXPECT_EQ(kWidth * kHeight, actual.size()); + EXPECT_THAT(actual, ContainerEq(kImageData)); +} + +TEST(ConversionUtilsTests, RemovePaddingWithPadding) { + constexpr int kWidth = 8; + constexpr int kHeight = 2; + constexpr int kBytesPerPixel = 1; + // Pad each row with two bytes. + constexpr int kBytesPerRow = kBytesPerPixel * (kWidth + 2) * sizeof(uint8_t); + const std::vector kImageData{1, 2, 3, 4, 5, 6, 7, 8, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}; + ASSERT_EQ(kHeight * kBytesPerRow, kImageData.size()); + + std::vector actual = RemovePadding(&kImageData[0], kWidth, kHeight, + kBytesPerPixel, kBytesPerRow); + + const std::vector kExpected = { + 1, 2, 3, 4, 5, 6, 7, 8, 23, 24, 25, 26, 27, 28, 29, 30, + }; + EXPECT_EQ(kWidth * kHeight, actual.size()); + EXPECT_THAT(actual, ContainerEq(kExpected)); +} + +} // namespace +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/utils/file_utils.cc b/models/research/lstm_object_detection/tflite/utils/file_utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..e1c86ab2e36805b2978c90a935ded0bbd78c87b8 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/file_utils.cc @@ -0,0 +1,55 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "utils/file_utils.h" + +#include + +#include + +namespace lstm_object_detection { +namespace tflite { + +std::string ReadFileToString(absl::string_view filename) { + std::ifstream file(filename.data(), std::ios::binary | std::ios::ate); + CHECK(file.is_open()); + int filesize = file.tellg(); + std::string result; + result.resize(filesize); + CHECK_EQ(result.size(), filesize); + file.seekg(0); + CHECK(file.read(&(result)[0], filesize)); + file.close(); + return result; +} + +bool LoadLabelMapFromFileOrBytes(const std::string& labelmap_file, + const std::string& labelmap_bytes, + protos::StringIntLabelMapProto* labelmap) { + if (!labelmap_bytes.empty()) { + CHECK(labelmap->ParseFromString(labelmap_bytes)); + } else { + if (labelmap_file.empty()) { + LOG(ERROR) << "labelmap file empty."; + return false; + } + const std::string proto_bytes = ReadFileToString(labelmap_file); + CHECK(labelmap->ParseFromString(proto_bytes)); + } + return true; +} + +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/utils/file_utils.h b/models/research/lstm_object_detection/tflite/utils/file_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8eea3a10d107a8cd29a71e81be9cf66f20013ce3 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/file_utils.h @@ -0,0 +1,38 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_FILE_UTILS_H_ +#define TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_FILE_UTILS_H_ + +#include + +#include "absl/strings/string_view.h" +#include "protos/labelmap.pb.h" + +namespace lstm_object_detection { +namespace tflite { + +std::string ReadFileToString(absl::string_view filename); + +// Load labelmap from a binary proto file or bytes string. +// labelmap_bytes takes precedence over labelmap_file. +bool LoadLabelMapFromFileOrBytes(const std::string& labelmap_file, + const std::string& labelmap_bytes, + protos::StringIntLabelMapProto* labelmap); + +} // namespace tflite +} // namespace lstm_object_detection + +#endif // TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_FILE_UTILS_H_ diff --git a/models/research/lstm_object_detection/tflite/utils/ssd_utils.cc b/models/research/lstm_object_detection/tflite/utils/ssd_utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..fdae8efee04357945ccf1b7b9542a21b48ed880b --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/ssd_utils.cc @@ -0,0 +1,537 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "utils/ssd_utils.h" + +#include + +#include + +#include +#include "absl/strings/str_cat.h" + +namespace lstm_object_detection { +namespace tflite { +namespace { +using protos::AnchorGenerationOptions; +using protos::BoxCornerEncoding; +using protos::BoxCornerOffsetCoder; +using protos::CenterSizeEncoding; +using protos::CenterSizeOffsetCoder; +using protos::DetectionResults; + +void DecreasingArgSort(const std::vector& values, + std::vector* indices) { + indices->resize(values.size()); + for (int i = 0; i < values.size(); ++i) (*indices)[i] = i; + std::sort( + indices->begin(), indices->end(), + [&values](const int i, const int j) { return values[i] > values[j]; }); +} + +void DecreasingPartialArgSort(const float* values, int num_values, + int num_to_sort, int* indices) { + for (int i = 0; i < num_values; ++i) { + indices[i] = i; + } + std::partial_sort( + indices, indices + num_to_sort, indices + num_values, + [&values](const int i, const int j) { return values[i] > values[j]; }); +} + +// The row index offset is 1 if background class is included and 0 otherwise. +int GetLabelOffset(const int num_boxes, + const int num_classes, + const int score_size) { + const int label_offset = score_size / num_boxes - num_classes; + CHECK_EQ(score_size, (num_classes + label_offset) * num_boxes); + return label_offset; +} + +void ApplyThreshold(const std::vector& values, + const float threshold, + std::vector* keep_values, + std::vector* keep_indices) { + for (int i = 0; i < values.size(); i++) { + if (values[i] >= threshold) { + keep_values->emplace_back(values[i]); + keep_indices->emplace_back(i); + } + } +} + +void ValidateBoxes(const BoxCornerEncoding& boxes) { + const int num_boxes = boxes.ymin_size(); + CHECK_EQ(num_boxes, boxes.ymax_size()); + CHECK_EQ(num_boxes, boxes.xmin_size()); + CHECK_EQ(num_boxes, boxes.xmax_size()); + + for (int i = 0; i < num_boxes; ++i) { + CHECK_GE(boxes.ymax(i), boxes.ymin(i)); + CHECK_GE(boxes.xmax(i), boxes.xmin(i)); + } +} +} // namespace + + +void DecodeBoxCornerBoxes(const BoxCornerEncoding& predictions, + const CenterSizeEncoding& anchors, + const BoxCornerOffsetCoder& coder, + BoxCornerEncoding* decoded_boxes) { + const int num_boxes = predictions.ymin_size(); + CHECK_EQ(num_boxes, anchors.y_size()); + CHECK_EQ(predictions.keypoint_y_size(), 0) + << "BoxCornerOffsetCoder doesn't work with keypoints."; + + float ymin, xmin, ymax, xmax; + for (int i = 0; i < num_boxes; ++i) { + ymin = predictions.ymin(i) * coder.stddev() + + (anchors.y(i) - anchors.h(i) / 2); + xmin = predictions.xmin(i) * coder.stddev() + + (anchors.x(i) - anchors.w(i) / 2); + ymax = predictions.ymax(i) * coder.stddev() + + (anchors.y(i) + anchors.h(i) / 2); + xmax = predictions.xmax(i) * coder.stddev() + + (anchors.x(i) + anchors.w(i) / 2); + + decoded_boxes->add_ymin(ymin); + decoded_boxes->add_xmin(xmin); + decoded_boxes->add_ymax(std::max(ymax, ymin)); + decoded_boxes->add_xmax(std::max(xmax, xmin)); + } +} + +void DecodeCenterSizeBoxes(const CenterSizeEncoding& predictions, + const CenterSizeEncoding& anchors, + const CenterSizeOffsetCoder& coder, + BoxCornerEncoding* decoded_boxes) { + CHECK_EQ(predictions.y_size(), anchors.y_size()); + const int num_boxes = predictions.y_size(); + const int num_keypoints = predictions.keypoint_y_size() / num_boxes; + float ycenter, xcenter, h, w, ymin, xmin, ymax, xmax; + for (int i = 0; i < num_boxes; ++i) { + ycenter = predictions.y(i) / coder.y_scale() * anchors.h(i) + anchors.y(i); + xcenter = predictions.x(i) / coder.x_scale() * anchors.w(i) + anchors.x(i); + h = std::exp(predictions.h(i) / coder.h_scale()) * anchors.h(i); + w = std::exp(predictions.w(i) / coder.w_scale()) * anchors.w(i); + + ymin = ycenter - h / 2.; + xmin = xcenter - w / 2.; + ymax = ycenter + h / 2.; + xmax = xcenter + w / 2.; + + decoded_boxes->add_ymin(ymin); + decoded_boxes->add_xmin(xmin); + decoded_boxes->add_ymax(ymax); + decoded_boxes->add_xmax(xmax); + + // keypoints + for (int j = 0; j < num_keypoints; ++j) { + float keypoint_y = predictions.keypoint_y(num_keypoints * i + j) / + coder.keypoint_y_scale() * anchors.h(i) + anchors.y(i); + float keypoint_x = predictions.keypoint_x(num_keypoints * i + j) / + coder.keypoint_x_scale() * anchors.w(i) + anchors.x(i); + decoded_boxes->add_keypoint_y(keypoint_y); + decoded_boxes->add_keypoint_x(keypoint_x); + } + } +} + +float ComputeIOU(const BoxCornerEncoding& boxes, const int i, const int j) { + const float area_i = + (boxes.ymax(i) - boxes.ymin(i)) * (boxes.xmax(i) - boxes.xmin(i)); + const float area_j = + (boxes.ymax(j) - boxes.ymin(j)) * (boxes.xmax(j) - boxes.xmin(j)); + if (area_i <= 0 || area_j <= 0) return 0.0; + const float intersection_ymin = std::max(boxes.ymin(i), boxes.ymin(j)); + const float intersection_xmin = std::max(boxes.xmin(i), boxes.xmin(j)); + const float intersection_ymax = std::min(boxes.ymax(i), boxes.ymax(j)); + const float intersection_xmax = std::min(boxes.xmax(i), boxes.xmax(j)); + const float intersection_area = + std::max(intersection_ymax - intersection_ymin, 0.0) * + std::max(intersection_xmax - intersection_xmin, 0.0); + return intersection_area / (area_i + area_j - intersection_area); +} + +void NonMaxSuppressionMultiClass(const BoxCornerEncoding& boxes, + const std::vector& scores, + const int num_classes, + const int max_detection_per_class, + const float score_threshold, + const float iou_threshold, + DetectionResults* detections) { + const int num_boxes = boxes.ymin_size(); + const int num_keypoints = boxes.keypoint_y_size() / num_boxes; + // The row index offset is 1 if the background class is included. + const int label_offset = + GetLabelOffset(num_boxes, num_classes, scores.size()); + + detections->Clear(); + std::vector selected; + std::vector class_scores; + class_scores.resize(num_boxes); + // For each class, perform non-max suppression. + for (int col = 0; col < num_classes; col++) { + for (int row = 0; row < num_boxes; row++) { + class_scores[row] = + scores[row * (num_classes + label_offset) + col + label_offset]; + } + NonMaxSuppression(boxes, class_scores, max_detection_per_class, + score_threshold, iou_threshold, &selected); + for (const auto& selected_index : selected) { + auto* new_detection = detections->add_detection(); + auto* new_detection_box = new_detection->mutable_box(); + new_detection_box->add_ymin(boxes.ymin(selected_index)); + new_detection_box->add_xmin(boxes.xmin(selected_index)); + new_detection_box->add_ymax(boxes.ymax(selected_index)); + new_detection_box->add_xmax(boxes.xmax(selected_index)); + new_detection->add_score(class_scores[selected_index]); + new_detection->add_class_index(col); + for (int i = 0; i < num_keypoints; ++i) { + new_detection_box->add_keypoint_y(boxes.keypoint_y( + selected_index * num_keypoints + i)); + new_detection_box->add_keypoint_x(boxes.keypoint_x( + selected_index * num_keypoints + i)); + } + } + } +} + +void NonMaxSuppressionMultiClassFast( + const BoxCornerEncoding& boxes, const std::vector& scores, + const int num_classes, const int max_detection, const int max_category, + const float score_threshold, const float iou_threshold, + DetectionResults* detections) { + const int num_boxes = boxes.ymin_size(); + const int num_keypoints = boxes.keypoint_y_size() / num_boxes; + const int label_offset = + GetLabelOffset(num_boxes, num_classes, scores.size()); + + int num_category = std::min(max_category, num_classes); + detections->Clear(); + std::vector max_scores; + max_scores.resize(num_boxes); + std::vector sorted_class_indices; + sorted_class_indices.resize(num_boxes * num_classes); + for (int row = 0; row < num_boxes; row++) { + const float* box_scores = + scores.data() + row * (num_classes + label_offset) + label_offset; + int* class_indices = sorted_class_indices.data() + row * num_classes; + DecreasingPartialArgSort(box_scores, num_classes, num_category, + class_indices); + max_scores[row] = box_scores[class_indices[0]]; + } + // Perform non-max suppression on max scores + std::vector selected; + NonMaxSuppression(boxes, max_scores, max_detection, score_threshold, + iou_threshold, &selected); + for (const auto& selected_index : selected) { + auto* new_detection = detections->add_detection(); + auto* new_detection_box = new_detection->mutable_box(); + new_detection_box->add_ymin(boxes.ymin(selected_index)); + new_detection_box->add_xmin(boxes.xmin(selected_index)); + new_detection_box->add_ymax(boxes.ymax(selected_index)); + new_detection_box->add_xmax(boxes.xmax(selected_index)); + const float* box_scores = scores.data() + + selected_index * (num_classes + label_offset) + + label_offset; + const int* class_indices = + sorted_class_indices.data() + selected_index * num_classes; + for (int i = 0; i < num_category; ++i) { + new_detection->add_score(box_scores[class_indices[i]]); + new_detection->add_class_index(class_indices[i]); + } + for (int i = 0; i < num_keypoints; ++i) { + new_detection_box->add_keypoint_y(boxes.keypoint_y( + selected_index * num_keypoints + i)); + new_detection_box->add_keypoint_x(boxes.keypoint_x( + selected_index * num_keypoints + i)); + } + } +} + +void NonMaxSuppressionMultiClassRestrict( + std::vector restricted_class_indices, const BoxCornerEncoding& boxes, + const std::vector& scores, const int num_classes, + const int max_detection, const int max_category, + const float score_threshold, const float iou_threshold, + DetectionResults* detections) { + int num_boxes = boxes.ymin_size(); + const int label_offset = + GetLabelOffset(num_boxes, num_classes, scores.size()); + // Slice the score matrix along columns to extract the scores of the + // restricted classes. + int restricted_num_classes = restricted_class_indices.size(); + std::vector restricted_scores; + restricted_scores.reserve(num_boxes * restricted_num_classes); + for (int i = 0; i < num_boxes; ++i) { + for (int index : restricted_class_indices) { + CHECK(index >= 0 && index < num_classes + label_offset); + restricted_scores.push_back( + scores[i * (num_classes + label_offset) + index + label_offset]); + } + } + // Apply non-maxima suppression to the sliced score matrix. + NonMaxSuppressionMultiClassFast( + boxes, restricted_scores, restricted_num_classes, max_detection, + max_category, score_threshold, iou_threshold, detections); + // Resulting indices are based on score matrix column index: remap to the + // original class indices. + for (auto& detection : *detections->mutable_detection()) { + for (int i = 0; i < detection.class_index_size(); ++i) { + detection.set_class_index( + i, restricted_class_indices[detection.class_index(i)]); + } + } +} + +void NonMaxSuppression(const BoxCornerEncoding& boxes, + const std::vector& scores, + const int max_detection, const float score_threshold, + const float iou_threshold, std::vector* selected) { + CHECK_EQ(boxes.ymin_size(), scores.size()) + << "The number of bounding boxes and scores does not match."; + CHECK_GT(max_detection, 0) << "Maximum detections should be positive."; + CHECK_GT(iou_threshold, 0.0) << "iou_threshold should be positive."; + CHECK_LT(iou_threshold, 1.0) << "iou_threshold should be less than 1."; + ValidateBoxes(boxes); + + // threshold scores + std::vector keep_indices; + std::vector keep_scores; + ApplyThreshold(scores, score_threshold, &keep_scores, &keep_indices); + + std::vector sorted_indices; + DecreasingArgSort(keep_scores, &sorted_indices); + + const int num_boxes = keep_scores.size(); + const int output_size = std::min(num_boxes, max_detection); + std::vector active(num_boxes, true); + selected->clear(); + int num_active = active.size(); + for (int i = 0; i < num_boxes; ++i) { + if (num_active == 0 || selected->size() >= output_size) break; + if (active[i]) { + selected->push_back(keep_indices[sorted_indices[i]]); + active[i] = false; + num_active--; + } else { + continue; + } + for (int j = i + 1; j < num_boxes; ++j) { + if (active[j]) { + float iou = ComputeIOU(boxes, keep_indices[sorted_indices[i]], + keep_indices[sorted_indices[j]]); + if (iou > iou_threshold) { + active[j] = false; + num_active--; + } + } + } + } +} + +void NormalizeDetectionBoxes(const int width, const int height, + DetectionResults* boxes) { + for (auto& det : *boxes->mutable_detection()) { + auto *box = det.mutable_box(); + box->set_ymin(0, box->ymin(0) / height); + box->set_ymax(0, box->ymax(0) / height); + box->set_xmin(0, box->xmin(0) / width); + box->set_xmax(0, box->xmax(0) / width); + const int num_keypoints = box->keypoint_y_size(); + for (int i = 0; i < num_keypoints; ++i) { + box->set_keypoint_y(i, box->keypoint_y(i) / height); + box->set_keypoint_x(i, box->keypoint_x(i) / width); + } + } +} + +void DenormalizeDetectionBoxes(const int width, const int height, + DetectionResults* boxes) { + for (auto& det : *boxes->mutable_detection()) { + auto* box = det.mutable_box(); + box->set_ymin(0, box->ymin(0) * (height - 1)); + box->set_ymax(0, box->ymax(0) * (height - 1)); + box->set_xmin(0, box->xmin(0) * (width - 1)); + box->set_xmax(0, box->xmax(0) * (width - 1)); + const int num_keypoints = box->keypoint_y_size(); + for (int i = 0; i < num_keypoints; ++i) { + box->set_keypoint_y(i, box->keypoint_y(i) * (height - 1)); + box->set_keypoint_x(i, box->keypoint_x(i) * (width - 1)); + } + } +} + +void ClampBoxCoordinates(DetectionResults* boxes) { + for (auto& detection : *boxes->mutable_detection()) { + auto* box = detection.mutable_box(); + box->set_ymin(0, std::max(0.f, box->ymin(0))); + box->set_ymax(0, std::min(1.f, box->ymax(0))); + box->set_xmin(0, std::max(0.f, box->xmin(0))); + box->set_xmax(0, std::min(1.f, box->xmax(0))); + } +} + +bool GenerateSsdAnchors(const AnchorGenerationOptions& options, + CenterSizeEncoding* anchors) { + const int base_anchor_width = options.base_anchor_width(); + const int base_anchor_height = options.base_anchor_height(); + const float min_anchor_scale = options.min_anchor_scale(); + const float max_anchor_scale = options.max_anchor_scale(); + + const float* aspect_ratios_ptr = options.anchor_aspect_ratios().data(); + const int num_aspect_ratios = options.anchor_aspect_ratios_size(); + const std::vector anchor_aspect_ratios( + aspect_ratios_ptr, aspect_ratios_ptr + num_aspect_ratios); + + const int* strides_ptr = options.anchor_strides().data(); + const int num_strides = options.anchor_strides_size(); + const std::vector anchor_strides(strides_ptr, strides_ptr + num_strides); + + // Must set both image width and height or neither + CHECK_EQ(options.has_image_width(), options.has_image_height()); + + if (options.has_image_width() && options.has_image_height()) { + const int* offsets_ptr = options.anchor_offsets().data(); + const int num_offsets = options.anchor_offsets_size(); + const std::vector anchor_offsets(offsets_ptr, + offsets_ptr + num_offsets); + return GenerateSsdAnchors( + options.image_width(), options.image_height(), base_anchor_width, + base_anchor_height, min_anchor_scale, max_anchor_scale, + anchor_aspect_ratios, anchor_strides, anchor_offsets, anchors); + } + return GenerateSsdAnchors(base_anchor_width, base_anchor_height, + min_anchor_scale, max_anchor_scale, + anchor_aspect_ratios, anchor_strides, anchors); +} + +bool GenerateSsdAnchors(int input_width, int input_height, float min_scale, + float max_scale, + const std::vector& aspect_ratios, + const std::vector& anchor_strides, + CenterSizeEncoding* anchors) { + int num_layers = anchor_strides.size(); + std::vector anchor_offsets(num_layers); + for (int i = 0; i < num_layers; ++i) { + anchor_offsets[i] = (anchor_strides[i] + 1) / 2; + } + return GenerateSsdAnchors(input_width, + input_height, + input_width, + input_height, + min_scale, + max_scale, + aspect_ratios, + anchor_strides, + anchor_offsets, + anchors); +} + +bool GenerateSsdAnchors(int input_width, int input_height, + int base_anchor_width, int base_anchor_height, + float min_scale, float max_scale, + const std::vector& aspect_ratios, + const std::vector& anchor_strides, + const std::vector& anchor_offsets, + CenterSizeEncoding* anchors) { + constexpr float kSqrt2 = 1.414213562f; + int num_layers = anchor_strides.size(); + if (num_layers != anchor_offsets.size()) { + LOG(ERROR) << absl::StrCat("The size of anchor strides (", + anchor_strides.size(), + ") and anchor " + "offsets (", + anchor_offsets.size(), ") must be the same."); + return false; + } + std::vector scales(num_layers); + // Populate scales. + for (int i = 0; i < num_layers; ++i) { + scales[i] = min_scale + (max_scale - min_scale) * i / (num_layers - 1); + } + // Populate square roots of aspect ratios. + int num_aspect_ratios = aspect_ratios.size(); + std::vector sqrt_aspect_ratios(num_aspect_ratios); + for (int i = 0; i < num_aspect_ratios; ++i) { + sqrt_aspect_ratios[i] = std::sqrt(aspect_ratios[i]); + } + // Generate anchors. + float normalized_width = static_cast(base_anchor_width) / input_width; + float normalized_height = + static_cast(base_anchor_height) / input_height; + anchors->Clear(); + for (int i = 0; i < num_layers; ++i) { + float scale = scales[i]; + float next_scale; + if (i == num_layers - 1) { + next_scale = 1.0; + } else { + next_scale = scales[i + 1]; + } + float interpolated_scale = std::sqrt(scale * next_scale); + float normalized_scale_width = scale * normalized_width; + float normalized_scale_height = scale * normalized_height; + int anchor_map_height = + (input_height + anchor_strides[i] - 1) / anchor_strides[i]; + int anchor_map_width = + (input_width + anchor_strides[i] - 1) / anchor_strides[i]; + for (int anchor_idx_y = 0; anchor_idx_y < anchor_map_height; + ++anchor_idx_y) { + float y = static_cast( + anchor_offsets[i] + anchor_strides[i] * anchor_idx_y) / input_height; + for (int anchor_idx_x = 0; anchor_idx_x < anchor_map_width; + ++anchor_idx_x) { + float x = static_cast( + anchor_offsets[i] + anchor_strides[i] * anchor_idx_x) / input_width; + if (i == 0) { + // Scale: 0.1, Aspect Ratio: 1.0 + anchors->add_x(x); + anchors->add_y(y); + anchors->add_w(0.1 * normalized_width); + anchors->add_h(0.1 * normalized_height); + // Scale: scale, Aspect Ratio: 2.0 + anchors->add_x(x); + anchors->add_y(y); + anchors->add_w(normalized_scale_width * kSqrt2); + anchors->add_h(normalized_scale_height / kSqrt2); + // Scale: scale, Aspect Ratio: 0.5 + anchors->add_x(x); + anchors->add_y(y); + anchors->add_w(normalized_scale_width / kSqrt2); + anchors->add_h(normalized_scale_height * kSqrt2); + continue; + } + for (int j = 0; j < num_aspect_ratios; ++j) { + // Scale: scale, Aspect Ratio: aspect_ratio + anchors->add_x(x); + anchors->add_y(y); + anchors->add_w(normalized_scale_width * sqrt_aspect_ratios[j]); + anchors->add_h(normalized_scale_height / sqrt_aspect_ratios[j]); + } + // Interpolated anchors + anchors->add_x(x); + anchors->add_y(y); + anchors->add_w(interpolated_scale * normalized_width); + anchors->add_h(interpolated_scale * normalized_height); + } + } + } + return true; +} + +} // namespace tflite +} // namespace lstm_object_detection diff --git a/models/research/lstm_object_detection/tflite/utils/ssd_utils.h b/models/research/lstm_object_detection/tflite/utils/ssd_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a9199e7fd5ca2c5826caf9ee6db9c1afc82a4534 --- /dev/null +++ b/models/research/lstm_object_detection/tflite/utils/ssd_utils.h @@ -0,0 +1,119 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_SSD_UTILS_H_ +#define TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_SSD_UTILS_H_ + +#include "protos/anchor_generation_options.pb.h" +#include "protos/box_encodings.pb.h" +#include "protos/detections.pb.h" + +namespace lstm_object_detection { +namespace tflite { + +// Decodes bounding boxes using CenterSizeOffsetCoder given network +// predictions and anchor encodings. +void DecodeCenterSizeBoxes(const protos::CenterSizeEncoding& predictions, + const protos::CenterSizeEncoding& anchors, + const protos::CenterSizeOffsetCoder& coder, + protos::BoxCornerEncoding* decoded_boxes); + +// Decodes bounding boxes using BoxCornerOffsetCoder given network +// predictions and anchor encodings. +void DecodeBoxCornerBoxes(const protos::BoxCornerEncoding& predictions, + const protos::CenterSizeEncoding& anchors, + const protos::BoxCornerOffsetCoder& coder, + protos::BoxCornerEncoding* decoded_boxes); + +// Computes IOU overlap between two bounding boxes. +float ComputeIOU(const protos::BoxCornerEncoding& boxes, const int i, + const int j); + +// Performs Non-max suppression (multi-class) on a list of bounding boxes +// and prediction scores. +void NonMaxSuppressionMultiClass(const protos::BoxCornerEncoding& boxes, + const std::vector& scores, + const int num_classes, + const int max_detection_per_class, + const float score_threshold, + const float iou_threshold, + protos::DetectionResults* detections); + +// A fast (but not exact) version of non-max suppression (multi-class). +// Instead of computing per class non-max suppression, anchor-wise class +// maximum is computed on a list of bounding boxes and scores. This means +// that different classes can suppress each other. +void NonMaxSuppressionMultiClassFast( + const protos::BoxCornerEncoding& boxes, const std::vector& scores, + const int num_classes, const int max_detection, const int max_category, + const float score_threshold, const float iou_threshold, + protos::DetectionResults* detections); + +// Similar to NonMaxSuppressionMultiClassFast, but restricts the results to +// the provided list of class indices. This effectively filters out any class +// whose index is not in this whitelist. +void NonMaxSuppressionMultiClassRestrict( + std::vector restricted_class_indices, + const protos::BoxCornerEncoding& boxes, const std::vector& scores, + const int num_classes, const int max_detection, const int max_category, + const float score_threshold, const float iou_threshold, + protos::DetectionResults* detections); + +// Performs Non-max suppression (single class) on a list of bounding boxes +// and scores. The function implements a modified version of: +// third_party/tensorflow/core/kernels/non_max_suppression_op.cc +void NonMaxSuppression(const protos::BoxCornerEncoding& boxes, + const std::vector& scores, + const int max_detection, const float score_threshold, + const float iou_threshold, + std::vector* selected_indices); + +// Normalizes output bounding boxes such that the coordinates are in [0, 1]. +void NormalizeDetectionBoxes(const int width, const int height, + protos::DetectionResults* boxes); + +// Denormalizes output bounding boxes so that the coordinates are scaled to +// the absolute width and height. +void DenormalizeDetectionBoxes(const int width, const int height, + protos::DetectionResults* boxes); + +// Clamps detection box coordinates to be between [0, 1]. +void ClampBoxCoordinates(protos::DetectionResults* boxes); + +// Generates SSD anchors for the given input and anchor parameters. These +// methods generate the anchors described in https://arxiv.org/abs/1512.02325 +// and is similar to the anchor generation logic in +// //third_party/tensorflow_models/ +// object_detection/anchor_generators/multiple_grid_anchor_generator.py. +bool GenerateSsdAnchors(int input_width, int input_height, float min_scale, + float max_scale, + const std::vector& aspect_ratios, + const std::vector& anchor_strides, + protos::CenterSizeEncoding* anchors); + +bool GenerateSsdAnchors(int input_width, int input_height, + int base_anchor_width, int base_anchor_height, + float min_scale, float max_scale, + const std::vector& aspect_ratios, + const std::vector& anchor_strides, + const std::vector& anchor_offsets, + protos::CenterSizeEncoding* anchors); + +bool GenerateSsdAnchors(const protos::AnchorGenerationOptions& options, + protos::CenterSizeEncoding* anchors); +} // namespace tflite +} // namespace lstm_object_detection + +#endif // TENSORFLOW_MODELS_LSTM_OBJECT_DETECTION_TFLITE_UTILS_SSD_UTILS_H_ diff --git a/models/research/lstm_object_detection/train.py b/models/research/lstm_object_detection/train.py new file mode 100644 index 0000000000000000000000000000000000000000..7a3dfbc5d38062984469f7c3ea92ebb6034e99d0 --- /dev/null +++ b/models/research/lstm_object_detection/train.py @@ -0,0 +1,185 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Training executable for detection models. + +This executable is used to train DetectionModels. There are two ways of +configuring the training job: + +1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file +can be specified by --pipeline_config_path. + +Example usage: + ./train \ + --logtostderr \ + --train_dir=path/to/train_dir \ + --pipeline_config_path=pipeline_config.pbtxt + +2) Three configuration files can be provided: a model_pb2.DetectionModel +configuration file to define what type of DetectionModel is being trained, an +input_reader_pb2.InputReader file to specify what training data will be used and +a train_pb2.TrainConfig file to configure training parameters. + +Example usage: + ./train \ + --logtostderr \ + --train_dir=path/to/train_dir \ + --model_config_path=model_config.pbtxt \ + --train_config_path=train_config.pbtxt \ + --input_config_path=train_input_config.pbtxt + +""" + +import functools +import json +import os +from absl import flags +import tensorflow.compat.v1 as tf +from lstm_object_detection import model_builder +from lstm_object_detection import trainer +from lstm_object_detection.inputs import seq_dataset_builder +from lstm_object_detection.utils import config_util +from object_detection.builders import preprocessor_builder + +flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') +flags.DEFINE_integer('task', 0, 'task id') +flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.') +flags.DEFINE_boolean( + 'clone_on_cpu', False, + 'Force clones to be deployed on CPU. Note that even if ' + 'set to False (allowing ops to run on gpu), some ops may ' + 'still be run on the CPU if they have no GPU kernel.') +flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer ' + 'replicas.') +flags.DEFINE_integer( + 'ps_tasks', 0, 'Number of parameter server tasks. If None, does not use ' + 'a parameter server.') +flags.DEFINE_string( + 'train_dir', '', + 'Directory to save the checkpoints and training summaries.') + +flags.DEFINE_string( + 'pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file. If provided, other configs are ignored') + +flags.DEFINE_string('train_config_path', '', + 'Path to a train_pb2.TrainConfig config file.') +flags.DEFINE_string('input_config_path', '', + 'Path to an input_reader_pb2.InputReader config file.') +flags.DEFINE_string('model_config_path', '', + 'Path to a model_pb2.DetectionModel config file.') + +FLAGS = flags.FLAGS + + +def main(_): + assert FLAGS.train_dir, '`train_dir` is missing.' + if FLAGS.task == 0: + tf.gfile.MakeDirs(FLAGS.train_dir) + if FLAGS.pipeline_config_path: + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + if FLAGS.task == 0: + tf.gfile.Copy( + FLAGS.pipeline_config_path, + os.path.join(FLAGS.train_dir, 'pipeline.config'), + overwrite=True) + else: + configs = config_util.get_configs_from_multiple_files( + model_config_path=FLAGS.model_config_path, + train_config_path=FLAGS.train_config_path, + train_input_config_path=FLAGS.input_config_path) + if FLAGS.task == 0: + for name, config in [('model.config', FLAGS.model_config_path), + ('train.config', FLAGS.train_config_path), + ('input.config', FLAGS.input_config_path)]: + tf.gfile.Copy( + config, os.path.join(FLAGS.train_dir, name), overwrite=True) + + model_config = configs['model'] + lstm_config = configs['lstm_model'] + train_config = configs['train_config'] + input_config = configs['train_input_config'] + + model_fn = functools.partial( + model_builder.build, + model_config=model_config, + lstm_config=lstm_config, + is_training=True) + + def get_next(config, model_config, lstm_config, unroll_length): + data_augmentation_options = [ + preprocessor_builder.build(step) + for step in train_config.data_augmentation_options + ] + return seq_dataset_builder.build( + config, + model_config, + lstm_config, + unroll_length, + data_augmentation_options, + batch_size=train_config.batch_size) + + create_input_dict_fn = functools.partial(get_next, input_config, model_config, + lstm_config, + lstm_config.train_unroll_length) + + env = json.loads(os.environ.get('TF_CONFIG', '{}')) + cluster_data = env.get('cluster', None) + cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None + task_data = env.get('task', None) or {'type': 'master', 'index': 0} + task_info = type('TaskSpec', (object,), task_data) + + # Parameters for a single worker. + ps_tasks = 0 + worker_replicas = 1 + worker_job_name = 'lonely_worker' + task = 0 + is_chief = True + master = '' + + if cluster_data and 'worker' in cluster_data: + # Number of total worker replicas include "worker"s and the "master". + worker_replicas = len(cluster_data['worker']) + 1 + if cluster_data and 'ps' in cluster_data: + ps_tasks = len(cluster_data['ps']) + + if worker_replicas > 1 and ps_tasks < 1: + raise ValueError('At least 1 ps task is needed for distributed training.') + + if worker_replicas >= 1 and ps_tasks > 0: + # Set up distributed training. + server = tf.train.Server( + tf.train.ClusterSpec(cluster), + protocol='grpc', + job_name=task_info.type, + task_index=task_info.index) + if task_info.type == 'ps': + server.join() + return + + worker_job_name = '%s/task:%d' % (task_info.type, task_info.index) + task = task_info.index + is_chief = (task_info.type == 'master') + master = server.target + + trainer.train(create_input_dict_fn, model_fn, train_config, master, task, + FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks, + worker_job_name, is_chief, FLAGS.train_dir) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/lstm_object_detection/trainer.py b/models/research/lstm_object_detection/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..17ae96c8f2c16af9fbbae795d75c871d87a37e5f --- /dev/null +++ b/models/research/lstm_object_detection/trainer.py @@ -0,0 +1,414 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Detection model trainer. + +This file provides a generic training method that can be used to train a +DetectionModel. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import six +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.builders import optimizer_builder +from object_detection.core import standard_fields as fields +from object_detection.utils import ops as util_ops +from object_detection.utils import variables_helper +from deployment import model_deploy + + +def create_input_queue(create_tensor_dict_fn): + """Sets up reader, prefetcher and returns input queue. + + Args: + create_tensor_dict_fn: function to create tensor dictionary. + + Returns: + all_dict: A dictionary holds tensors for images, boxes, and targets. + """ + tensor_dict = create_tensor_dict_fn() + all_dict = {} + + num_images = len(tensor_dict[fields.InputDataFields.image]) + all_dict['batch'] = tensor_dict['batch'] + del tensor_dict['batch'] + + for i in range(num_images): + suffix = str(i) + for key, val in tensor_dict.items(): + all_dict[key + suffix] = val[i] + + all_dict[fields.InputDataFields.image + suffix] = tf.to_float( + tf.expand_dims(all_dict[fields.InputDataFields.image + suffix], 0)) + + return all_dict + + +def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False): + """Dequeues batch and constructs inputs to object detection model. + + Args: + input_queue: BatchQueue object holding enqueued tensor_dicts. + num_classes: Number of classes. + merge_multiple_label_boxes: Whether to merge boxes with multiple labels + or not. Defaults to false. Merged boxes are represented with a single + box and a k-hot encoding of the multiple labels associated with the + boxes. + + Returns: + images: a list of 3-D float tensor of images. + image_keys: a list of string keys for the images. + locations: a list of tensors of shape [num_boxes, 4] containing the corners + of the groundtruth boxes. + classes: a list of padded one-hot tensors containing target classes. + masks: a list of 3-D float tensors of shape [num_boxes, image_height, + image_width] containing instance masks for objects if present in the + input_queue. Else returns None. + keypoints: a list of 3-D float tensors of shape [num_boxes, num_keypoints, + 2] containing keypoints for objects if present in the + input queue. Else returns None. + """ + read_data_list = input_queue + label_id_offset = 1 + + def extract_images_and_targets(read_data): + """Extract images and targets from the input dict.""" + suffix = 0 + + images = [] + keys = [] + locations = [] + classes = [] + masks = [] + keypoints = [] + + while fields.InputDataFields.image + str(suffix) in read_data: + image = read_data[fields.InputDataFields.image + str(suffix)] + key = '' + if fields.InputDataFields.source_id in read_data: + key = read_data[fields.InputDataFields.source_id + str(suffix)] + location_gt = ( + read_data[fields.InputDataFields.groundtruth_boxes + str(suffix)]) + classes_gt = tf.cast( + read_data[fields.InputDataFields.groundtruth_classes + str(suffix)], + tf.int32) + classes_gt -= label_id_offset + masks_gt = read_data.get( + fields.InputDataFields.groundtruth_instance_masks + str(suffix)) + keypoints_gt = read_data.get( + fields.InputDataFields.groundtruth_keypoints + str(suffix)) + + if merge_multiple_label_boxes: + location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( + location_gt, classes_gt, num_classes) + else: + classes_gt = util_ops.padded_one_hot_encoding( + indices=classes_gt, depth=num_classes, left_pad=0) + + # Batch read input data and groundtruth. Images and locations, classes by + # default should have the same number of items. + images.append(image) + keys.append(key) + locations.append(location_gt) + classes.append(classes_gt) + masks.append(masks_gt) + keypoints.append(keypoints_gt) + + suffix += 1 + + return (images, keys, locations, classes, masks, keypoints) + + return extract_images_and_targets(read_data_list) + + +def _create_losses(input_queue, create_model_fn, train_config): + """Creates loss function for a DetectionModel. + + Args: + input_queue: BatchQueue object holding enqueued tensor_dicts. + create_model_fn: A function to create the DetectionModel. + train_config: a train_pb2.TrainConfig protobuf. + """ + + detection_model = create_model_fn() + (images, _, groundtruth_boxes_list, groundtruth_classes_list, + groundtruth_masks_list, groundtruth_keypoints_list) = get_inputs( + input_queue, detection_model.num_classes, + train_config.merge_multiple_label_boxes) + + preprocessed_images = [] + true_image_shapes = [] + for image in images: + resized_image, true_image_shape = detection_model.preprocess(image) + preprocessed_images.append(resized_image) + true_image_shapes.append(true_image_shape) + + images = tf.concat(preprocessed_images, 0) + true_image_shapes = tf.concat(true_image_shapes, 0) + + if any(mask is None for mask in groundtruth_masks_list): + groundtruth_masks_list = None + if any(keypoints is None for keypoints in groundtruth_keypoints_list): + groundtruth_keypoints_list = None + + detection_model.provide_groundtruth( + groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, + groundtruth_keypoints_list) + prediction_dict = detection_model.predict(images, true_image_shapes, + input_queue['batch']) + + losses_dict = detection_model.loss(prediction_dict, true_image_shapes) + for loss_tensor in losses_dict.values(): + tf.losses.add_loss(loss_tensor) + + +def get_restore_checkpoint_ops(restore_checkpoints, detection_model, + train_config): + """Restore checkpoint from saved checkpoints. + + Args: + restore_checkpoints: loaded checkpoints. + detection_model: Object detection model built from config file. + train_config: a train_pb2.TrainConfig protobuf. + + Returns: + restorers: A list ops to init the model from checkpoints. + + """ + restorers = [] + vars_restored = [] + for restore_checkpoint in restore_checkpoints: + var_map = detection_model.restore_map( + fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type) + available_var_map = ( + variables_helper.get_variables_available_in_checkpoint( + var_map, restore_checkpoint)) + for var_name, var in six.iteritems(available_var_map): + if var in vars_restored: + tf.logging.info('Variable %s contained in multiple checkpoints', + var.op.name) + del available_var_map[var_name] + else: + vars_restored.append(var) + + # Initialize from ExponentialMovingAverages if possible. + available_ema_var_map = {} + ckpt_reader = tf.train.NewCheckpointReader(restore_checkpoint) + ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map() + for var_name, var in six.iteritems(available_var_map): + var_name_ema = var_name + '/ExponentialMovingAverage' + if var_name_ema in ckpt_vars_to_shape_map: + available_ema_var_map[var_name_ema] = var + else: + available_ema_var_map[var_name] = var + available_var_map = available_ema_var_map + init_saver = tf.train.Saver(available_var_map) + if list(available_var_map.keys()): + restorers.append(init_saver) + else: + tf.logging.info('WARNING: Checkpoint %s has no restorable variables', + restore_checkpoint) + + return restorers + + +def train(create_tensor_dict_fn, + create_model_fn, + train_config, + master, + task, + num_clones, + worker_replicas, + clone_on_cpu, + ps_tasks, + worker_job_name, + is_chief, + train_dir, + graph_hook_fn=None): + """Training function for detection models. + + Args: + create_tensor_dict_fn: a function to create a tensor input dictionary. + create_model_fn: a function that creates a DetectionModel and generates + losses. + train_config: a train_pb2.TrainConfig protobuf. + master: BNS name of the TensorFlow master to use. + task: The task id of this training instance. + num_clones: The number of clones to run per machine. + worker_replicas: The number of work replicas to train with. + clone_on_cpu: True if clones should be forced to run on CPU. + ps_tasks: Number of parameter server tasks. + worker_job_name: Name of the worker job. + is_chief: Whether this replica is the chief replica. + train_dir: Directory to write checkpoints and training summaries to. + graph_hook_fn: Optional function that is called after the training graph is + completely built. This is helpful to perform additional changes to the + training graph such as optimizing batchnorm. The function should modify + the default graph. + """ + + detection_model = create_model_fn() + + with tf.Graph().as_default(): + # Build a configuration specifying multi-GPU and multi-replicas. + deploy_config = model_deploy.DeploymentConfig( + num_clones=num_clones, + clone_on_cpu=clone_on_cpu, + replica_id=task, + num_replicas=worker_replicas, + num_ps_tasks=ps_tasks, + worker_job_name=worker_job_name) + + # Place the global step on the device storing the variables. + with tf.device(deploy_config.variables_device()): + global_step = slim.create_global_step() + + with tf.device(deploy_config.inputs_device()): + input_queue = create_input_queue(create_tensor_dict_fn) + + # Gather initial summaries. + # TODO(rathodv): See if summaries can be added/extracted from global tf + # collections so that they don't have to be passed around. + summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) + global_summaries = set([]) + + model_fn = functools.partial( + _create_losses, + create_model_fn=create_model_fn, + train_config=train_config) + clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue]) + first_clone_scope = clones[0].scope + + # Gather update_ops from the first clone. These contain, for example, + # the updates for the batch_norm variables created by model_fn. + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) + + with tf.device(deploy_config.optimizer_device()): + training_optimizer, optimizer_summary_vars = optimizer_builder.build( + train_config.optimizer) + for var in optimizer_summary_vars: + tf.summary.scalar(var.op.name, var) + + sync_optimizer = None + if train_config.sync_replicas: + training_optimizer = tf.train.SyncReplicasOptimizer( + training_optimizer, + replicas_to_aggregate=train_config.replicas_to_aggregate, + total_num_replicas=train_config.worker_replicas) + sync_optimizer = training_optimizer + + # Create ops required to initialize the model from a given checkpoint. + init_fn = None + if train_config.fine_tune_checkpoint: + restore_checkpoints = [ + path.strip() for path in train_config.fine_tune_checkpoint.split(',') + ] + + restorers = get_restore_checkpoint_ops(restore_checkpoints, + detection_model, train_config) + + def initializer_fn(sess): + for i, restorer in enumerate(restorers): + restorer.restore(sess, restore_checkpoints[i]) + + init_fn = initializer_fn + + with tf.device(deploy_config.optimizer_device()): + regularization_losses = ( + None if train_config.add_regularization_loss else []) + total_loss, grads_and_vars = model_deploy.optimize_clones( + clones, + training_optimizer, + regularization_losses=regularization_losses) + total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.') + + # Optionally multiply bias gradients by train_config.bias_grad_multiplier. + if train_config.bias_grad_multiplier: + biases_regex_list = ['.*/biases'] + grads_and_vars = variables_helper.multiply_gradients_matching_regex( + grads_and_vars, + biases_regex_list, + multiplier=train_config.bias_grad_multiplier) + + # Optionally clip gradients + if train_config.gradient_clipping_by_norm > 0: + with tf.name_scope('clip_grads'): + grads_and_vars = slim.learning.clip_gradient_norms( + grads_and_vars, train_config.gradient_clipping_by_norm) + + moving_average_variables = slim.get_model_variables() + variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step) + update_ops.append(variable_averages.apply(moving_average_variables)) + + # Create gradient updates. + grad_updates = training_optimizer.apply_gradients( + grads_and_vars, global_step=global_step) + update_ops.append(grad_updates) + update_op = tf.group(*update_ops, name='update_barrier') + with tf.control_dependencies([update_op]): + train_tensor = tf.identity(total_loss, name='train_op') + + if graph_hook_fn: + with tf.device(deploy_config.variables_device()): + graph_hook_fn() + + # Add summaries. + for model_var in slim.get_model_variables(): + global_summaries.add(tf.summary.histogram(model_var.op.name, model_var)) + for loss_tensor in tf.losses.get_losses(): + global_summaries.add(tf.summary.scalar(loss_tensor.op.name, loss_tensor)) + global_summaries.add( + tf.summary.scalar('TotalLoss', tf.losses.get_total_loss())) + + # Add the summaries from the first clone. These contain the summaries + # created by model_fn and either optimize_clones() or _gather_clone_loss(). + summaries |= set( + tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) + summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, 'critic_loss')) + summaries |= global_summaries + + # Merge all summaries together. + summary_op = tf.summary.merge(list(summaries), name='summary_op') + + # Soft placement allows placing on CPU ops without GPU implementation. + session_config = tf.ConfigProto( + allow_soft_placement=True, log_device_placement=False) + + # Save checkpoints regularly. + keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours + saver = tf.train.Saver( + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) + + slim.learning.train( + train_tensor, + logdir=train_dir, + master=master, + is_chief=is_chief, + session_config=session_config, + startup_delay_steps=train_config.startup_delay_steps, + init_fn=init_fn, + summary_op=summary_op, + number_of_steps=(train_config.num_steps + if train_config.num_steps else None), + save_summaries_secs=120, + sync_optimizer=sync_optimizer, + saver=saver) diff --git a/models/research/lstm_object_detection/utils/__init__.py b/models/research/lstm_object_detection/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/lstm_object_detection/utils/config_util.py b/models/research/lstm_object_detection/utils/config_util.py new file mode 100644 index 0000000000000000000000000000000000000000..d46d2d703c4f6439a5eb3173fa642941df80d39b --- /dev/null +++ b/models/research/lstm_object_detection/utils/config_util.py @@ -0,0 +1,106 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Added functionality to load from pipeline config for lstm framework.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from lstm_object_detection.protos import input_reader_google_pb2 # pylint: disable=unused-import +from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2 +from object_detection.protos import pipeline_pb2 +from object_detection.utils import config_util + + +def get_configs_from_pipeline_file(pipeline_config_path): + """Reads configuration from a pipeline_pb2.TrainEvalPipelineConfig. + + Args: + pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text + proto. + + Returns: + Dictionary of configuration objects. Keys are `model`, `train_config`, + `train_input_config`, `eval_config`, `eval_input_config`, `lstm_model`. + Value are the corresponding config objects. + """ + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.gfile.GFile(pipeline_config_path, "r") as f: + proto_str = f.read() + text_format.Merge(proto_str, pipeline_config) + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + if pipeline_config.HasExtension(internal_pipeline_pb2.lstm_model): + configs["lstm_model"] = pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model] + return configs + + +def create_pipeline_proto_from_configs(configs): + """Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary. + + This function nearly performs the inverse operation of + get_configs_from_pipeline_file(). Instead of returning a file path, it returns + a `TrainEvalPipelineConfig` object. + + Args: + configs: Dictionary of configs. See get_configs_from_pipeline_file(). + + Returns: + A fully populated pipeline_pb2.TrainEvalPipelineConfig. + """ + pipeline_config = config_util.create_pipeline_proto_from_configs(configs) + if "lstm_model" in configs: + pipeline_config.Extensions[internal_pipeline_pb2.lstm_model].CopyFrom( + configs["lstm_model"]) + return pipeline_config + + +def get_configs_from_multiple_files(model_config_path="", + train_config_path="", + train_input_config_path="", + eval_config_path="", + eval_input_config_path="", + lstm_config_path=""): + """Reads training configuration from multiple config files. + + Args: + model_config_path: Path to model_pb2.DetectionModel. + train_config_path: Path to train_pb2.TrainConfig. + train_input_config_path: Path to input_reader_pb2.InputReader. + eval_config_path: Path to eval_pb2.EvalConfig. + eval_input_config_path: Path to input_reader_pb2.InputReader. + lstm_config_path: Path to pipeline_pb2.LstmModel. + + Returns: + Dictionary of configuration objects. Keys are `model`, `train_config`, + `train_input_config`, `eval_config`, `eval_input_config`, `lstm_model`. + Key/Values are returned only for valid (non-empty) strings. + """ + configs = config_util.get_configs_from_multiple_files( + model_config_path=model_config_path, + train_config_path=train_config_path, + train_input_config_path=train_input_config_path, + eval_config_path=eval_config_path, + eval_input_config_path=eval_input_config_path) + if lstm_config_path: + lstm_config = internal_pipeline_pb2.LstmModel() + with tf.gfile.GFile(lstm_config_path, "r") as f: + text_format.Merge(f.read(), lstm_config) + configs["lstm_model"] = lstm_config + return configs diff --git a/models/research/lstm_object_detection/utils/config_util_test.py b/models/research/lstm_object_detection/utils/config_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcbc39b4214a8c881721782e10003925fbf8917 --- /dev/null +++ b/models/research/lstm_object_detection/utils/config_util_test.py @@ -0,0 +1,94 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.utils.config_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from lstm_object_detection.protos import pipeline_pb2 as internal_pipeline_pb2 +from lstm_object_detection.utils import config_util +from object_detection.protos import pipeline_pb2 + + +def _write_config(config, config_path): + """Writes a config object to disk.""" + config_text = text_format.MessageToString(config) + with tf.gfile.Open(config_path, "wb") as f: + f.write(config_text) + + +class ConfigUtilTest(tf.test.TestCase): + + def test_get_configs_from_pipeline_file(self): + """Test that proto configs can be read from pipeline config file.""" + pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.num_classes = 10 + pipeline_config.train_config.batch_size = 32 + pipeline_config.train_input_reader.label_map_path = "path/to/label_map" + pipeline_config.eval_config.num_examples = 20 + pipeline_config.eval_input_reader.add().queue_capacity = 100 + + pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model].train_unroll_length = 5 + pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model].eval_unroll_length = 10 + + _write_config(pipeline_config, pipeline_config_path) + + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + self.assertProtoEquals(pipeline_config.model, configs["model"]) + self.assertProtoEquals(pipeline_config.train_config, + configs["train_config"]) + self.assertProtoEquals(pipeline_config.train_input_reader, + configs["train_input_config"]) + self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"]) + self.assertProtoEquals(pipeline_config.eval_input_reader, + configs["eval_input_configs"]) + self.assertProtoEquals( + pipeline_config.Extensions[internal_pipeline_pb2.lstm_model], + configs["lstm_model"]) + + def test_create_pipeline_proto_from_configs(self): + """Tests that proto can be reconstructed from configs dictionary.""" + pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.num_classes = 10 + pipeline_config.train_config.batch_size = 32 + pipeline_config.train_input_reader.label_map_path = "path/to/label_map" + pipeline_config.eval_config.num_examples = 20 + pipeline_config.eval_input_reader.add().queue_capacity = 100 + + pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model].train_unroll_length = 5 + pipeline_config.Extensions[ + internal_pipeline_pb2.lstm_model].eval_unroll_length = 10 + _write_config(pipeline_config, pipeline_config_path) + + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + pipeline_config_reconstructed = ( + config_util.create_pipeline_proto_from_configs(configs)) + self.assertEqual(pipeline_config, pipeline_config_reconstructed) + + +if __name__ == "__main__": + tf.test.main() diff --git a/models/research/marco/Automated_Marco.py b/models/research/marco/Automated_Marco.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b50d336a9162a8b3e35bcbca0940d0de747e13 --- /dev/null +++ b/models/research/marco/Automated_Marco.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import tensorflow as tf +import csv +import os +import argparse + + +""" +usage: +Processes all .jpg, .png, .bmp and .gif files found in the specified directory and its subdirectories. + --PATH ( Path to directory of images or path to directory with subdirectory of images). e.g Path/To/Directory/ + --Model_PATH path to the tensorflow model +""" + + +parser = argparse.ArgumentParser(description='Crystal Detection Program') + + +parser.add_argument('--PATH', type=str, help='path to image directory. Recursively finds all image files in directory and sub directories') # path to image directory or containing sub directories. +parser.add_argument('--MODEL_PATH', type=str, default='./savedmodel',help='the file path to the tensorflow model ') +args = vars(parser.parse_args()) +PATH = args['PATH'] +model_path = args['MODEL_PATH'] + + +crystal_images = [os.path.join(dp, f) for dp, dn, filenames in os.walk(PATH) for f in filenames if os.path.splitext(f)[1] in ['.jpg','png','bmp','gif']] +size = len(crystal_images) + +def load_images(file_list): + for i in file_list: + files = open(i,'rb') + yield {"image_bytes":[files.read()]},i + + + +iterator = load_images(crystal_images) + +with open(PATH +'results.csv', 'w') as csvfile: + Writer = csv.writer(csvfile, delimiter=' ',quotechar=' ', quoting=csv.QUOTE_MINIMAL) + + predicter= tf.contrib.predictor.from_saved_model(model_path) + dic = {} + + + k = 0 + for _ in range(size): + + data,name = next(iterator) + results = predicter(data) + + vals =results['scores'][0] + classes = results['classes'][0] + dictionary = dict(zip(classes,vals)) + + print('Image path: '+ name+' Crystal: '+str(dictionary[b'Crystals'])+' Other: '+ str(dictionary[b'Other'])+' Precipitate: '+ str(dictionary[b'Precipitate'])+' Clear: '+ str(dictionary[b'Clear'])) + Writer.writerow(['Image path: '+ name,'Crystal: '+str(dictionary[b'Crystals']),'Other: '+ str(dictionary[b'Other']),'Precipitate: '+ str(dictionary[b'Precipitate']),'Clear: '+ str(dictionary[b'Clear'])]) + diff --git a/models/research/marco/README.md b/models/research/marco/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d6c0b15c976f1ca88008ea9440b83e78236dc59d --- /dev/null +++ b/models/research/marco/README.md @@ -0,0 +1,81 @@ +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +Automating the Evaluation of Crystallization Experiments +======================================================== + +This is a pretrained model described in the paper: + +[Classification of crystallization outcomes using deep convolutional neural networks](https://arxiv.org/abs/1803.10342). + +This model takes images of crystallization experiments as an input: + +crystal sample + +It classifies it as belonging to one of four categories: crystals, precipitate, clear, or 'others'. + +The model is a variant of [Inception-v3](https://arxiv.org/abs/1512.00567) trained on data from the [MARCO](http://marco.ccr.buffalo.edu) repository. + +Model +----- + +The model can be downloaded from: + +https://storage.googleapis.com/marco-168219-model/savedmodel.zip + +Example +------- + +1. Install TensorFlow and the [Google Cloud SDK](https://cloud.google.com/sdk/gcloud/). + +2. Download and unzip the model: + + ```bash + unzip savedmodel.zip + ``` + +3. A sample image can be downloaded from: + + https://storage.googleapis.com/marco-168219-model/002s_C6_ImagerDefaults_9.jpg + + Convert your image into a JSON request using: + + ```bash + python jpeg2json.py 002s_C6_ImagerDefaults_9.jpg > request.json + ``` + +4. To issue a prediction, run: + + ```bash + gcloud ml-engine local predict --model-dir=savedmodel --json-instances=request.json + ``` + +The request should return normalized scores for each class: + +
+CLASSES                                            SCORES
+[u'Crystals', u'Other', u'Precipitate', u'Clear']  [0.926338255405426, 0.026199858635663986, 0.026074528694152832, 0.021387407556176186]
+
+ +CloudML Endpoint +---------------- + +The model can also be accessed on [Google CloudML](https://cloud.google.com/ml-engine/) by issuing: + +```bash +gcloud ml-engine predict --model marco_168219_model --json-instances request.json +``` + +Ask the author for access privileges to the CloudML instance. + +Note +---- + +`002s_C6_ImagerDefaults_9.jpg` is a sample from the +[MARCO](http://marco.ccr.buffalo.edu) repository, contributed to the dataset under the [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) license. + +Author +------ + +[Vincent Vanhoucke](mailto:vanhoucke@google.com) (github: vincentvanhoucke) + diff --git a/models/research/marco/jpeg2json.py b/models/research/marco/jpeg2json.py new file mode 100644 index 0000000000000000000000000000000000000000..db795e05bef478d290be51ac6526f38f31b0bc65 --- /dev/null +++ b/models/research/marco/jpeg2json.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""jpeg2json.py: Converts a JPEG image into a json request to CloudML. + +Usage: +python jpeg2json.py 002s_C6_ImagerDefaults_9.jpg > request.json + +See: +https://cloud.google.com/ml-engine/docs/concepts/prediction-overview#online_prediction_input_data +""" + +import base64 +import sys + + +def to_json(data): + return '{"image_bytes":{"b64": "%s"}}' % base64.b64encode(data) + + +if __name__ == '__main__': + file = open(sys.argv[1]) if len(sys.argv) > 1 else sys.stdin + print(to_json(file.read())) diff --git a/models/research/marco/request.json b/models/research/marco/request.json new file mode 100644 index 0000000000000000000000000000000000000000..338a00917b3d9724c77a58e1bccf31e81a7e6ed2 --- /dev/null +++ b/models/research/marco/request.json @@ -0,0 +1 @@ +{"image_bytes":{"b64": "/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAPABQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD0iPg4J47VKB0FNAPUVIORius9ERRxyacvTJpB1GOpqVMY4GKBEeOfalGQcdqd04xR1IzQFhe3P05pyg9zSOMAHH40hOF60mIjcAjIJyPSoMke1TnGPfrVWd8nAHNBLEkKsKpk4JHbPFOkl29etUpJsE0bEMsTOFBBP41kXdwFPX9addXqYIDVy+t6rFbq21gz46Vk5Ethrd6gyOpPbJrjdRmVhnHP1NOmupLiYtIevYVk6tKYzhazb6Gb1M6+mUSYUc/U1QlYNyP50yV2eUk9KbCQ0h3HpTdiSRVJHp9KrXG3POcj1NPaf5GORWdJKSxJ5qQFlK+p/CqzkHuabJIc9aibt3qhEsTHouMUr4yRn9ahDYHFG4kmkAjDrg/gKapKt9KCSBTQOeRTAs7t67s8+lMbGOaaWwAM0EjPJpANNGfWkY/NTW60wJPwFHT0pVGQKNtACYzwOab04xzUpG1eKjYE9aAFUkGnn14FRrhakA3dKAG9u1AznpSlSOv5VLEny5P50ARnI+tHIOR+NSbcHmmyN2HSgBhHpxSoSTwMmgDJzUiDHSgAPGPyp/PFSwQruy3FEyqw+WlcCDODUhOVyeKjVR3qQ4K8UANQEngA+5qxnKYYkH271FGQvXmnPJxjpQ9QIm4anAjA5UfSnou8+op4RBgcUXERBuepp8JO7jH9aawA6U6MEjgc0AEhBBHNMGfxqXyj1OcUgUhccYoAhbrTkJ3U9YsnFSGMLQBE0meO9KGyQDTXC9e9M3fMaaVwLGflFIzcVEpOPanhC3AzVqArjWPNNCk9quwWbP2NatppDnkIfyqrJDSbMOK3duxq5BYO3YgV00Gk7BjGDVxLDHAFK/YfL3OYh0w8Zq7Fp6jrg10Caa7dBV630SVv4OKYaHNx2qqen6VZS3JPyrxXWQaC3939KuxaCwx8tK6A4tbORj6e9TJpjNjiu5TRD/dq0mjYAyBRzIRwqaVzkjr1qzFpA4yufrXdR6SBjip00tRxtpc4jh49I4GFqZdI5AC813C6ZjAxUi6cMdKXMBw6aSOygfSpRo/qK7VdPA7U4aeoHI5o5hHFDSM4yOPT0p40oD+H9K7NbFcdKcLIADgYouwOJ/skenvTRpQzytdwbJT2Gab9g44x60cwjhzpI/ujApG0kf3a7c2A/u0n2AY6UXA4VtJGBxUb6QM/cGK7s2IxyM9qjOnjuPei4HANpAPbpVZtIB7HmvQzp4OcioTp2ewp8wjzt9IA6D8KryaS46DBr0Z9LB/hFV30r/Z4o5wPOH06RfUiq0ltIvVc/hXo8mk/7NVJdHPZc/hT5kFzzuS3z1WqstkrenNd9Nouc/J+lZ9xorA5Cn8qNBcxw02nHsKoy2jp2/Su7fTmXqBioH01HGMU72HzHAtEynkU3B712N1opwSq8fWse60t4zyho0ZXMYzdeaZjrVua2dSeDiq7Ls+lJxKTGg4OKeaiJ5pwbjmpcRpjh6UPmkBNKzVIxADjFGDmlGQOKkXnrRcBpBKj0pD0qRWwp3VGxz1HWgGKjAHqalEg9W+lVs84NSBcDOaGgJi47k/jVctuYnrQxHU1GWXPBzRYCYtx1OaZk5pVUuM5pPLbBNAg5xkDNM2nHSpAfWl3fJ0oATado4GKYVPXAp2444pMnJ7ZoGQ9zQc+macFO4daklgYDd2oERA47nNMY+oyfanYPenYOc4oAg7d8mm/yqaZR2qHtgdqAHD1pyimjpThkCgY/PcHPtUTEk96cp60hKkDPWgCP64NKPSlYYPHWgZ4oAeAe/SnA00df60uMcmgCeNsfWrVtKFPP86oKakVuc00xHSWl2Aa3bS5BwR3rhopyMd61LO+KHrT3Gd9azKVAz+tbFnMoI964axvwSOea3bS9GRzUONxpncWU4Dr6D3robWRSuQRiuEsLrewyRXTabOSMcGs3oWtTdc5BqOQ7h8tAIMfXmk5I/rW0JESiV3BBpIyM89ae/Qk1AWweCK6E7madmXIyM9cVcixxisyN898mr0L5HH5VLRpuaCVIPwqvCxIIJqb6UjKcRSee1SIwBqNsYOKaCQaGrnHONjRicZFXY5FHB9KxkkIPJq3FJyCT+Fc04GXNY2Y5BjipVcY68VlpN71MswzyawaNYTL+7J60hb8qqrLxmneZ7mhM6IzJWbgZP4UjNxnrUTPxxmkL8YqkzVSJSenPvSFzj0NQh/em7/enc1UiUn8qRicfyqNn6CmbwenWnctSKAPTFSbiOOxqDqeDUkeTXedZLuwPTFOjPy8jmoic8cD6UKWB5oEWFGTj1p/CkdKihkGeuKkd1bAyM9aQxW75qF+vBqYsAPpUEyjII6UhMgkDMcKelRsmIycc09uM7eBVO+vQIWUHDYxxSbIehQubgvL5aYJ/lWRrt6tlaEs3zHoB60+S5WC3lmBweTXF6xdNITcXUh4PyL2qW9TKT0Hy3spjZiTuP6Vzc08MAkluW3OScKamvdVkeQRL8oIrAvY1dnadiD2PrWUnrqZk41SISB2GKx9Tv8AzZdxOF7VT1CZdmF7VXmX7Sq7PugUcq3JuSvcKSpH3aglIiBbPJ9Kqu5AESjJBpkrMh+fqe1FhAZy5KgUE/u+BUWRjvzTgHSNj296bEQYJbnvSmNgM0+AgNlzwKLiYOcKcCgCDvRtPamjlvapVYZ5FAEZHSjOOtSSJjk8VD1OO9ADkAJyTmnhC3OKjGFq1FINtDArsmGo206Zhu460zdnrQA9FPYUvRgD1pnnYXC0hbGCTzQA98s2OwpGHFNDkngcU8DjBoAWOIkbiODUqDjillcLEq/rSL/qyaQD0iLHP60jnGBnjNILn5dq8VCWyffvQA6VsHj6VGCc89aU53DPU0pOBx1pgTMu0AnFNVvmzSEtKADQo2MDnOaALABK5NNHPy0s0pChTj6iohLjvSAPLO8imhGJwM8UGQ7s8k1KW2x5BwTTAiV9jYpWYkA0zcGPrSnJwMHFFgLFtISNvelnBQ5PApYk8o7qbNKZm29KXURD5nzVcjkULx371CLUsm5WG3NJ5L8heQKNGBaW4XgMeKYzBxhartFtAOas2cqRklkBOOKEuwhqkxgnGD703fnk5qa4kMrDA/KprKwnuHwkZNaKHVgURGXORzQLeRm4Fdnp3heR8Gf5V71v2WhWNtjcvmOPaqukUoNnA2GjXNwRhD+VdJY+FmwGnYKPeu0trORgFt4Qi+o4rQt9DlmwZWOKLtlcqRytvpVrbgADefYVfgs3k4iiIHrXZWegxIclAc9a1oNMRMDaKWgORxFtoc0mN/Falt4fQY3LmuxjslGPlAqwlsB0FK/Ym5zMGjImML+lXYtLUHoK6FLX25qVLb1oEYSaeqgkj9KmSxGeBW2IAM5FL5IBGBzSEZAshxwPyp4s1GcitgQc4A5p3k4PSgZkC0XI6ipBaj0zWr5OBwKTyjnpRYVjMFqPSni2HoK0TFxnB5oSPPGMe9FgM42xA5Wj7N7VppESMn8qGQqBxxRYLGZ9nGOB1oW2xjitDy+c0KpzTsBn+Rg9DQbYHAI6VoPF6fWlEYwKLBYzTbDHTrSfZxjGK0RGR9KDHmiwGYbYelM+zAnpWqyYPPSlaMelFgsYxtwcjGBTTakHkcetaphzT/IxwB0osFjEa0yegqJrMf3elbxhC9qa0C4ziiwrGA1kOPlqNrJf7oroGtxjPWo2txjkUuULHOSacvYCq8mmqQcLXTmAY6cGo2t+vHNFibHHz6RE+dyCs+50BGyVGDXdyW4weKryWwIzijUTiecXOiyJ9zms24s9pImh49a9Qlsweo4qlcaeHGCgNHqS4voeU3WjW0+dvDHtWDf+GJcloxmvXbvQ4pM4XB9RWTcaNPHkwsfpTTa2YczR4vdabNA2HQjHtVF0ZTXsVxbN927tg49axbzw/YXIPlfu29MfSq5u6KVTuebFio5pm/mun1Lwvcw5MYDr7H6Vz81nJE2HRh+FHKnqjRSTIVc5q0hxg+tVvL21NEwwM8YqHEpMtyRqY92KgRMnmjzcnGcipFXIzgjHtU7FELxjfgU18/d9Kmd4wMgnfTQruVIXNAio5bJxSRxFiTirs0iBSpQbvWo4pQoIwDRcBsY2Ec1K5d14AxVV92SxzyakjlcDBHFKwDZCcBR19qVVdR8wp8bhW981NJFI8e4MMfWncCHeBUZUu/HSnxKrEiT8MVC29GOATQIl3hWwetSeePL2nkVVYEjcahYk8A0rDuWHcHIXvSlsrVcH3qVH28GiwDCf/wBVMZKc+S2QKcpH8XamIYvSnbqdJgKCOagyQaBj+pOaAoPIpAafuFAiNgRiinZyOaQDJ9TQAozgUMaUnt0pNuevWgBVIx7U4MDimY5xS9MUDHhiD1qxC/qaqdWPrUidRzQBr285TkH6Vq2t5Iu05rn4mOMLVj7SwAAGMdcU0wO/0u9GzJbB+tdVo2pgMuTkGvH7e+YsDk4rqtI1NQoG/kUpRuioux7BBdo7YzitCFlcHB6V53pWqFnG45ArttOkBhD7hj0zWXwGi94szJ6VQmUqSSa2ZNvljbzWdcpkY71vTncxnCxTinweTgVoW8w3DniseePac96La4IYA9a3eqIjKz1OrgfPSrac4xWNYT7sDtWzDzg9jWexrox+PYUx+BU2B25psiZHrVrUwqQuV1apVlIOaikGOv51EW9OlEoXR59SLRoxzEnkipklrLWQnFSrLgjmuWcDNSsayy8cHmpBL2rNWXsakWT3rBo2jMvmTjkmmtJz6VUEh9aaZKSN4zLZfnGcUm+qqyZPNL5naqRqplnzPzpN/Wqxcg88+lG+maKQ1epp4OCaaD3pTn6V6B6Y8YBGelKXHIpmegPWhid3bJoAlAzj0p+QeMVEucECkjDBuelIRZUjBBOapXk/lR1ZOOtZmsMqpljjFJCZVur9hGQoO7Fc7NemR/K3jOck1YuLwMr4HbGaw/JjhEk8zkKMk+9DaijBu5Hrt6kVuEzwOTXGT34u78B8eSgzxVrxBe+ajEHCPwM9q5We5W1tikRDO3U+lYbkNliS/S61QL0jU4ArJ1zUDJdkRj5E4ApiIdplDfN1qndyKrAcE9SfWpdrkFW5uN2cj8altBItu0zcIOnvVGd90megqR71jGIh9wVT2sIWKQIxlxk9hVaWQO+WOTSFzLJhQcVNcWvlxg96AIUYZBxmpHkZj83Sm2ZEZZ5OcVFLIWbd2pCFYbnOKYyEDJpY2457808tujx2pgQgD15oyQacSFBxUbdBQBIWLdaiTh6DnoKApGKAHhNzZJwKXocLTA3r6U9GHagAdQo68mmA5FSMhkHyn8ab5e3A/OgBhAyKc+CcY4ApGB61JAuASeTQA+BgFI21IgBbntRFHlck4GaDIiqQM0gGz/PyBwKUofLHX6UzJ2/WrDcBRntTAqgFiT6VNEF3DI5p4cZxildkB+UCgCKdVViRzUflkhWGaldw5wB1qaMbmWPgAUAOs7Xz2LE4VetSTwRgYU8imzSbMpF931pS67QvUmlqBnyYwOmaEwelTNbky7QRmr1tYKFLOR9TTvYCpawhnUscCtu70iGO1Ry/LDOPSqIgVblAzYTGatTSrcR7UY7R0FKTfQRiyqsTYABwauQRrIgfuBmqdxCyOec02O4ZOBnAoeuwE805U7AOtRxgtk5xUUkpZt2PpSxSNnnkU0gJPOKkDPy1MtzjIAHPFRbPNcYBrY0vQbm8YbYzt9SK0VPuG+xlsrOeh57Vpabol1dOCqHb713Gm+F7e0Aa5YM4/hrobOxdwFt4gq/TFUn2LVPucnp/heCEBrj5m9K6Oz08IoW1gA9yK6Wx0Mkgycmt+00pUUYXAqX5lXS2OTtdGllx5rH6Vt2WhRpglRn1rpoLEAD5eaux2mByBRclyMO301U6KMVfislAHH4VrJAM9KnSDp2FIkzI7TGPl/Op1teuRzWmkIHXrThEcikBnrbcc1PHCAPu9quGMZB9KcE4NAioIz/CKf5PfpU4U1JszwevWmwKgiwDnmpo0ABwKk24PTil2UAV2XnKgD0oKEqOanKkCnIvXkU2IgKYWjbn+lTlOlNK5JpAM25HAphj/A1OuQOOBQy5wKLWAhVc9qHXIx+NTBcGmlfamtR2K4TJ/pQFw2alCknkUEcCgBm3oaXYAc4/+tT+Dg07kigCApgjvSFO44FTuOOKbj5aaCxCQCvvQU9OlSYpBw1MLEIUdKcmD1qQAbs4696VVweBigdiF155696jK4PQ1aZQeQBnvTMA9RQ9AsQBMUeWe9WSOKawyMgc0bisVjGD2ppi9quBRg8fhSsmRz3pWCxnND3qAwDHvWmy89s1BIvPToaLCsZ7QDHAqCS39K1dmaYUyelKwrGLLbDnpVR7QHtXQvBkVWeDtgUWFY5m409G+8tYt7oMUvO3B9a7prfIqtLb/KBj8aVmtiXFM8zuNJurbiI709DWPd2ltMSt1b4PrivWJ7Pd1H4Vk3mkJIpDIM/Slp1I5WtjxzUvC6uu60bcPSuYvdNntTiRG/KvbLrQWQkxZU+1Yt9ZkDbcwhl9cVab9RqbW54/kxtg9qsrfMq4IHpXaX/hmC4DPakBv7prldR0i4tWO+JsD2otGRqplGTym+YZ3UqXAVNoJDVXcMvUYNM6npzUOBdyYRSTyYXkmpms5oWBkX5T3pltIyMCCRVm4vHdQpYkd6nW49CKYDygByahVSODV2PY0Y3HBqHPmSADAA4BoTANoCsSMDFQyTADAHtU8u7oG4NVZYGCjHNCERqST157U4uysM0oj7dKa55Cn86AJA/mDGBSG1KnJ6GpEi+XcMU52Y4254pDIhZv97bxUTpjK8Zq1vmA2hjg9s0xrKXy2kyPpmn6iKh3cjNAHqOtIGIPekLZwDQBNEgc4Y0jxbGIzkUsKhj15qN9ysQ3agY5gAlMUZNICSDxxSxvg+9AD3j+XIqFW2nFTO5PtULKepOOaBE0bKeGpSnXbzUA61KhI4FAxrgg9KOanZjgZA4quwOScYoESBMcg0v3SKjDcYNKMk+1AywhOcjipVm2ocgEmoFYYA70Ow3AUCJY5Pm61o2MzeYOcVkqOOas2p+cc1SYHfaVdlGXdj8a7TT9RfA8tsg9RXmOnXKjBcnAre0/VBHIRGeO1TJcxadj1Wwvy4CSHg1cxvyc8elcjpeoiZAv8eOtdLpRkYAtgjNZp8pe4XMYYYA6VnSwlWyBXSPAoGetZ91ECSAOldEJ3MJxsVbG4ZWUd/5V0llNuUVyoQxvuArUsbjGMGnIISOoQgjP60/Gc5/OqVvLuTBPNWlOQM0QkWxkiDBx1qlIhWtVBkc4JqvPH1xW0Wc1WFzNPBGaVXO4U+VCDxUByDRKFzgnGxaWXJx1qRZOnNUd/wClOD8e1csqZCdjQEnv9KUv61QWQ5wPyp5k/wAmsXA1jItiXHFLuqkr+pp6ydOaLG0ZFrd0yaN4qtv9+fegvnvRY0UjSUDPPSncZ4pinP5UAH8K7j3BW65xmkyep60EEinBcjANADo85B7d6V3+YAjGKapwTzjHFQyPzjt61IhZLjbkHgVn38sbx4kIINVb+5ZXIBFZl3KVQySOFA6ZpSsjJsztVuY4BIw6CuV1OWSdY98mxC2duetW/EV8rRBItpGcsa5bXbtjJDHGTuCj8Kyk2zFlXX5PtF1FaRD0yao61bwWNosatumIy1RKsk138smHB+8TVHWWYAgnew75zUbNK5FyjJOzQ4BwtU2dSQoOTnmmszlMH1qLYRkk4qrE3JpkTI5471EAgOeq0juGU81CWO3AFAE1o6ifc33RU15deaMc4qkq4HJpG4/GjcBQ55FIwJPFAG0k0rOAPc0AMyUOO9KJOmaYRwCc80DGMgfSgQrEknvR2FJn5QfzpV5UkUAKeg9aUAtj0qNietOjlOMd6AHFMtxSJGWfCmg7gDxVmwdFcMw+tAEbBosD1prOMD1qa6cSzZUcVAwABoGPjUbcn9amUBUzUUbbyM8Cp5U3uqpznigRFuLE4zxUbx4Gea0GhECEEfNjNQMVcqvagCJGAQcc1MFDR7j1PFJOQABtAApJJFKYHGKQEewKDzmlg24IahAGQ881GEI5zx60wLYUyfLEvI7gUxUeJsyA5q7pdwkMeMZPUmkvLhLhnZVCmlfWwFSBWlkzjCjrUlzGUAI7jtTrcs37peM9TTZXwwUnPFPqA21jZyW5+tXTIqwlZGJPYiqskhVAqqRUIilnb5fypPUCSSbnk5pYCdpKnBqCaAwthup61IJcAIqjFP0ESSkGPGMk96gCIfT3qy8LGMEAgfzpbHTbi7lCxIWJ9BVxhcCqLfPI79BWppWg3V84EcbbfUiux0bwqkKrLft/wGuqsrVyoisYNqj+LFWnbYtU+rOc0rwrbWYV7ohn9K6mzs3lwlpD5aHvitvTfD3zbpyWb3rqbPS1TAC4HtSfmXdLY5mw0AZ3SfOx9a6K00tEAwuK2oLQLjAq7FbcDjAobZLZmQ2K8YA4q7Fa4GAPzq8kIAHFTpEO9IkpLDjnFSrEM9M1bEYB9KURjjFJiI0iGRkCn+WKlRSBg04oTyOuKBMhI54FOC5I4pwGCc88Up4ziiwr2G7Rt4NLtwcVKo+XB/KlIA4xxUhcgKHrzQmOanK8VGyBTnPHencBrD5qaBg9eKZ9ohMhQPk/WpsfKMUxBt3AdqRU6mpFI4OaXI7UuoyJhUZGD71K3DdKaykjIqmAwdSD0p+OOKaRkZGOOtOXp1osA0A9O9GeMGn/AMXNNbkgigZH2NNBHIqXA6A0wjB9qdrDsJt6c8U5R8vvTkXOacRjpzQgI26YHNJgDtxTj3ozjrTeobDdmDnOKYVwfTNTE5HTmk60tSiMKCoxS44HNOUdqD931ppCIyDimbSM55qU/dqMigGJjjmgcCnDkCjpxTsISgk8c4pp56ikz19aqwmI3Uc1GeakOev4Uw9e5+tKwxgHak2+1TdQcDFIR6Ac0rARlO3rUTxjcatEZP4daaQuPehhYoPFzxULRdBWmyDpTWi+vFSKxkvDySR9arSQde9bM0XAqsYuOR+VKxNjEltQVII5rNu9NWRSGTNdS0XFVngzn+tIlpM891HQActBlG9q56/s5ovku4fMT+9jNesz2oOeKy7zTldSCoI+lHqQ4djxXU/D1vcgvanDelcpfaXNZuQ8Z+uK9s1Tw8Mlocox9OK5y+smVTHeRBl6BsU9fUFNrc8qZtoxRGUOS1dZq3htXVpbQj1xXMXFlLbyFZEK/hRZS2NVO4yVgV+XNRCQADrzV2BFxtk4zwKhazfcxjG4ZzUWsWMhR5ThTn2qSSGWI4bNRo0kMvHBBqzNdtJGd5B4pO6ApNnOeaSUqwGBzVqKBnRmBBx2qrM+RsAwc8mgCzAR5XFR/wAX+FRxblI+YYrXtkhWHdJtI/Wlew9zN3AMF5yKhmmkCkZYD09auzCIy7oiDVa9bd94L+FMRWhQOeankijVMnB9KrLwwwcZq/FFGytuPTpikxlBTtOQacxBYknJqxLbKBlTmodgFMQhQbOOtQopBzVjoD/KmoR+H8qBipEZB+tDwnHXOO1OJMbZB4qMykuTnrSERD0p4yRxS5Xj3p3ABPQdqYwdyF60zzPlI4+tMk5XINMB6Ciwh4IPFPDYB9aaqgjOOaXac9RQA6LJPSpGGMEiokO1sGpd27g8igBU3dQOKlUlO1NBwn3T9aYzswHNAF+1uMMA5OK1dPu1SXnpXNgkHaetXLWQ5GTxTQ7npej30ZOQ2CK7fSdQ2oAhPPUntXlWjeWQHMwBHbNdro98g/d7h061nNJlxbPSIJ45bZTuyaSWLgkDmuf0u58sbWOR2ro4JDMhJ+UelKDsOSujLuIxycc4qpG5RsE1qTiNdwzlqzJFBLEcYrpT0MHoblhPkLzWvE+celclYzFGwTj0zW/aTgkcis3oy07mrEfm4FTum4HjtVSJuRVxTn6VpGYmZ88eCaoSLg5rcmjz07VnzxYzxXRF3OSrAzW4NM3YNTyofpVZwRnH4mqcLnHKNh+/v3pwYZ5xmoM8UoJ696xlSEThvm+tO3cmqykhvapQ3fNYuBaZNvOSTRv/ABqEtgCjfnFLlNFI3I+CR608tgD0pMYIoxn2Fbn0g4kbaFYAZPSmkcZ7U0PkhcdqTEDncc5wKpXsywQu5OMD9atSsMHtj1rmtUuDJKYx90etLciTsRQSPIGdsnPIFcn4n1GQSNASQK37u+jsLN5HOSBwK4iSQ3Mkl1d4KtyKzb1cjGT6GVqMk32eOKLJldsj6VQhtLmS9Ac5bGGz6VpWF15t/NLIFxGPkBqIX/lSSO2zdITjiolJozsjn9WuBZSukZyzZGfSsO4uw5A/n3qzrkvn3RIGOayQheXAznPFEV1Zm2XJZFMWcAfhUccDS5Z/lQdzTJQVID8ACo/OZxtGSoo9AGzICcRDI9aMCNckc+9SPIq44OaqsS3XikAkjZIAFHAGOppo5z6UsPDAnGBTEK/3feoS3t+dSv8AMS2cYqPpnrQA6M+YSD0FLwFz2pigAZ701s8CgCSMbzzTipx8o49aWCPIIzk+1SM5SPaRgnrQMquDnH501eHBParOwFOeD1qArxmgRKWLj1pAxXsc09BsT5sZpQm8EgGgB8UmxSMAseKHjyO3HJpmAjLzSzMc8Hg+lAyNSVbj86uRTFFDdxVdkXC7c5PXNWYkDr8/HFADHuGl3Zp9kmSzEZI5qeyhRi/uKRpUhBVOfei/RCK8qtLMOB70jqoyF5PanGQs2eQKYjKJCe/oaAIwCjH+VAYkAEdKtR2xmyxOD6UpjRY1UDLZ5NAFUsy84OKVJMZyetWr1N6BY1JA74qkUKNz+dCAuLvyG9fSpfLjlbAzkdKhQu0ee3algDeZknGOaQhZ3EbFetPs5zGzEYBPemXCo/ziolViOnWqUeYCW5zKwYNkmnW9o7SJsG4ntV7R9JuL11Eanb/eNegaNoVvYqrSKHlrRRURxi5GFo3hqa7CyXfyRj1rr7CwgtF8qxhDt/exWxp+kzXZXcpSL0AxXX6ZosUAUKn4kU/U2UVE5vTfD8k7B7rJ/wBmutsdKSIAKoA9hWvbWSr2+laUVqAOmPwovclsyo7cRn5Y8mr1vaMcFyfwrSit1B4A5qxHAMVJJUSHHQfjVlYMirCxgDAFSx8elJiuVkhAHOCMU7YAcgVaUfNjikaPIBHWjYVyvsDH+VJswamxhqcFz9aTYrkGORSgEmpWXYKaOmR1pITZFKmTx170gX1qVuM+tN70xXGA5Ge1OJyPUCnYFIQeRxQmAo+YVDcRNIpUHGe9TIPTin/lQBj2mlJBL5jMWY+taBXYRipiBnFIRmi7e4ETjutKD9M0/wBqaAMniqQDG55OMU1SehqQYxSFM0dRkQyp5FOCgHNKw4yOlOZc89TR1GR8mlz044pwAHXj2pxA2imAwAdT0pCuG9qeQSB6UDleO1IYijbyfrQ2OSe9OHHFNY8AkfhTQEZGQfXrTQDmpMEN2+tMIxzVDDikxgUoOKTPPNUA0YDc0ueBSkAHpQPzoWwWEFBTqT3pw+8aMcYPNIZGAaTbTsc0rYByM0kBGwBppUetPPFNOe4ouA3GTj8aay5OcVIehHSjt+FNhYjP3cDFKBtABzTyvGPWnIo70XHYjIGaTb+FTAZJpjD0oFYjxg88+tI2AAOOafjpgDmlwM9KdhWInUcDvUJi44/GrJ69eKbjHpj0qbBYqNFzULwg8dK0QMrheM1HKig8ikS0Zbw9qqyQjnI5rXaMc8VBJEPSlYVjCuLMEcDrWPfaXHKhBQH6iuteIHjFVZYOpA496WxLjc8y1HQZIHZoM467a53UdOiuQ0dxEEkHQ4r2Ce1VwcjIPrWDqejRzKcrj3Ao33M3FrY8R1XRZ7Ukou6POQQM+tYxmkgJAzketeuanpcttv8Al8yLngjOK5TVNEgulZ4Rsk5yPz9qfqVGocSV887icGoZ4zGcHn6VfvbGa1lwynA9qpsTn56lxNU7iQucY3EeuDULoSxI71YkMOwbMhj15qJWJGD+dQMQowIwCR3qzBD56kBirfWlUErxj3qu0jwyddoNLcAkgeJzuJqFYzKxwwB96tSzeapGOfWolXy1+YcnvTAYItqBn5z0qRSqjC9asu1vIQCGUYqk6YJ2NgUtwHxsQcSECmS4HeomLM3PFHzAc5/GmA9fmYVNIi+X8mNxqBC3b8qA7A9ce1KwxGRxw2eKaUHXvUrSbuD1okgaOPeelAiueuKVmOAPWmFjn1qRAGxupgR9uelSIoIxjinFRnpx7UPhcbaBgF25OelMByeaTnPSnYIoENqRCFPPWomBPFKVOaALqSDaVOMVX3DPApijkZp4jGM/lQAu7PTtU8OV57VXCY5BpyykDbxmgDbtLoRAVu6dqb7ly2K4uJz1GfzrZ06cAqrHAHrVDR6lpGoSOU2tnHOD6V31nexzwDkBsc4rxvTroxSKUbr3zXd+Hr3dKFYggj1rCUbamsX0OqMAkUsG5qp9mIc56VNbmTcdn3c4qd2JXB9K1jLoZyiZcy7GyBV6zuOmTyPSq065YgkfQ1VR2jlHpnpWjVzNHX2k27HOT2rThOQMEVzNhcdOePWt61kBGQc1jezKL5OQarTJyeKnXPHpSsnHNb05Gc1cx5ojg4qm6cmtmdPUY9qz5ose1dsHc5ZRM5lx1poHHFWJFqLArRwuYuIwcHOKcTTee1BrKVMmwuc0Bs+1Jnjikbr1rNwGdKDkc8EU4EYOar52nrkVJvyPSsrn04rEgZ7VU8xmlznAqdmJG3pUE2I1JXrRcTKl/dAkoueOprC1J0jjLMSOO1Wrycbi7EDmuR1S9lnmkQOAg6A96iXkZSkZ2pXLX1xyT5a8AHvXOarcFnNvASW7Cr2sXYtLYmM5lbjFc5b3Lxb5GGZm6VjdswbLNxiyswkrZmc8gelYVzeHzTk89quXc/Pm3BDP2FYNyWlkLqO/QULXclsluHTYA2S55NVrZxHMGI+lQPuHLHJJwKmMQWEOxwT0xT2J3JL6VXztx0rPBcfd60kpxxnNKuRii1gGkkHJP50wMQeeRUkiHHXmoCCCSfyFAh2/t+tOzhQTTApYc08Y3AUABB2fKOtQnk1bY/LtXrURRe45oAYvABPJHpTgC/zKc06OLecZwBT+Nm0daBjYWKAnvTXO9qU8cCkX3oEORS5CrktS7SjENww9alicRhm4yelJkHLN1NAyMlnGcfpUiuAnApY1VlJ6Y61G4Gfl6UAI6lyMkcmppYjkBfXFRRsC6g+tSTEq/HTNADGUq2CeasW0TO2AxHue1IuGcBRnjrWoWiitAuPnPehuwJFE/uA2W5PFU5G3OfappgXOc1XKknAGaEhE0YXC5bn61YgjQNlxkVWSJ1wSDinyy7BtFAFiW4IJ2DGeOKbbMcjPTuarpvkyQDj1q6XxAFCjNGwFhJfPPloASe+Ky7jKOwOD26VYjuPKB2nDHimvEsiFy3PakBFETnr8uO9P3ggY5NQHOeOlXtOsZryRVjUnn0rRRvqIgijaRgqgk5rr9A8NSTkTXXyp1wc1r6D4ditFWS4XMnXBrtdL0eW7K71KxDooq/Q0jDqzN03TwuIbOLA7tiut0jw8NytKCW681uaToyRKoCYA9RXRWtkFwMc0Ghn2enhNvy4xWvBa46gCrcNtgHIq3HFgYxSJZBHAB0FWUi9c1MseADipUQZ4oJZGsXA9KeEAPIzUyL0z1pzLUktjGUDOOajxg8dDUpHHBqJuDg00Sxx4P0pd2M80wHGCOlOIyBjvzQSKw3Dgc03aQcUq4z7U85HNQ0FxvDA5PNRMhHuKmcdx1puc8YzRsgTIsA9RTQMdRU7AdqY6+1FwGKOOKULnrSgcZpwPpQA3ZjntRj8akbGcdqb1zihAMII6ik6CpCM/jQADnmquBFj5qQ5z9acynHtTR074pDEIGOBRtpRTiuRz0oQyIDJ6UFSO1SAClx+VMCEjvR+FPIAGDSEdu1UgE600rhifypxHHHWgdMHqKYxmex60u4Z6dutDLzntQ4wOOlPcAYegqPB/On4GAcn6Ug6Akd6BoiHB5FB4/GnOpJ6YppzimgAdKRjzxR0Xikp2AVSQM96d1HPWmc5NOQYNFrjAgd6M5OKCPrTtpoGRnHemnoakK84PQ0mM9KQDQvAzSAcEU8D3pSuT70DsNWlHUGjGOMc08ZCnHemkA3gnimEZ6U9RnPakHNJqwxhXAxSAHPSpMZz6UBaBWI9o5NIU460/vRwRTERgYHFRScHPX61OenFQtjHI70MViFgPzqN1yDnpUxpm3IxUtCsQlBgdKhki5PpnirrDp6Uwp8vtSaJsZkkPOcVUltwQeK2XQYz1qB4e4GR1pNCOavbBZEOVBz7VyWseHjuaSAYbngcDNekyxd6o3FsrdR+NJaEygmeMalpwdWjuogD0ziuO1jQniLPHll/P+le86ro8cysCnHsK4rVNHkg3YXcnuKa8jPWG541LE6nB60ioRyTXbatoaybnhXDdxXLXNpJCxUg5HtQ0maxlcgRuPTFRSN5ko4qTCn5W4zUR2qeOfes7WLLkBSNQdvHfPNQSkPnjp2pPMBXC0x1kHIBwalAOeMlQQetRuOR7VMhyRkUSYC/dwc0wIwh8vOO9IzjGG5xU6uFU549KgdEKkjrSAhGN2CTj2obAbg5pwBHXHSlSJ2bpk0wECgkEnmnylmXb2qN1KHHfNKruhPFAETRkDgdKcuSvT2qYHcvIGKEAycEdaAIQDnBqQDGQRSTAhj0/CmuWHI70DAAE4px4XJxjpTVDAf0pWz359qACPAzjFJtYEHqKZtPPYUbm6E0CHZ59vSnoQwOTUTZ6ikVuTQMlYkdM4poGCCach7cUrEbh7UCH/dxjrU8Ltu461TZ8sfbinxyYPWhAdLpt228K7cCuy0W8MTgh/mx615taz4bIzXTaXeElfmwKbVykz1vQNVZWPmHg+prpgodMnqa8x0e8wyHOK9A0e8EybJThh0rBvldzVaqxPNGuCBj3rKu0PRcnHoK2JVzJx0qndx966IswmitYz4IXdXRafcAgAnkda5I5imH90mtqxmIwR26jNTUXUlM62J90eKsKfWsqzmBA5HSr6NkCs4SsKQ+RC3FVJo+vHH0q+Dmo5EyOK7qUzJoxpo++OKqsnNa80fJzVSSLArvg7kOJmlCDSEEDpVl0x2phT1qmjJxKxHU44o7ZqYrg5z+FMK+1ZuJm0a24kjI/GjzOgpmQM+9I3XPpXnn0orykP7VUupw7EZpbidQvNYWsah5aBY8B36GlchuxnazfIboW8Z6dTXJeJroQzIUHbBxWrqfl2cJurhvmPT3Ned6/eXF1IZBkJWT956GE3YW4k81zJkmqktykUJbPznIFWoI86SXc4ftWKys/LdM1Oj0M2QQsZ5iXJIzSyuoD469qgJMbPtPuKhdyEO48GmyQZQSMnrSTuZDgZwKiBOcn8KfGw35OM0ARtHgYPNIn3gM0sjF2wDT1AXnPSgCJ2O44HA71Fgs9W4yNhAAOagY7WIweKEA4jahApsQy3HU05XGwr/EaZuMZJzzQAu3bk5zUbtl+OKcXyoFJt56c0AAcquB+NK52r71KigQgtjNRuAWIoAkgxs3NyDTHGOneljXMZJP4UojJYAmgBQAIwT1PQUqJu5NMkXLYU5AqUH7qL+NAE3lKsRO76CqwBJOBxjvT+Q2CcilcnGR0oArxnbJ8wqby2kG4dKikUhqt2kihTG4yM5oAWLEUR4+Y8fSpkR5NoAOO5qCNx52X6elW5btdp8sYyOKAIpEA4Xn1pitGhzjLe9PX5Eyx5YZxQsCMm5jy3QUAKkhuZdgAG7inXNpFHJgPu4pGha3BZOGxnNRSE43ZOcUegFhAsMJAwSajIG0cn3qFGcgkg4pzSgjpgUWERybSxwMAU0M2AAcgUpXexCium8NeG5r11kmUpEO571pGPViWuiKWhaLNqEq4UhO5xXoelaTFZIsUKBpT3q9p1gq4tbKMYBwWFdvoGgLEFZxk+tV6m0YWM7Q9BZysk/J4/Cu507TkjUALirdjYhQMDA+lbEFvjGBijcor29oF7VfjgxgGp4ouBxVhIvxNBLZGkYz0/SpQnP1qZU/KnmPI5pCZXVNp56VIoz1p+BmlK8+lJsliBe/eg84pQSMd6UDPTgUEXGjBHPWmmPPWptuQSOKaR0zUXFch2bRjqKQL3WpSeoph74OB6UCbIySG4pQ4/E08DjHeonXnmncVhw5yB0puCM4pV+7mnE4FDAapGOfwpxAbrQV6YpyjHHakMiYY980wgZ4Jqf2qE8ZA9aaAXkcmjOad2pSAcYouFhmd1JjBpxGMelIRnmmAnt2pmOakx1xTCPXrQMQDBPpTu3JpgOKkxkZotcY3rnA5oGSeaTdg4pQ3NVYQ08mk9fWnMMUwHJ9KBoPT1pD9KWg4xmgYhw3FB4NIBS9eO9MZGvDfWjOOD0p+Oc96GXcBRcYEZGKhZOBU5PGaY6+lF7DREVz1pCncVMvIwevtTdpJHpVXCxDt9cinKARnmpNgNMwRnFCY7CqOtLkEdcYpQ2evajHNMAVcjrTW4py8HAoPXkA0JDGKOTnFAJpGyelA9sUAKw44pOce3SnEcDAyaQ5zTQWGH5c5pRnginZBx60vFK47AVB/rRgY5pccUY55osBE64P0pCCBUj9Mn8qZ169aNhWuRnnikI4xUhAB96GAIxSCxUZOcUiipyAM+tNxjoOtJMGiIrz9KMEkdakK4x3pSvQmhk2K5T8aYy9ABVph6cCoyvPvSJaKMsXPH0qBo859K0XTJqJk54FKwrGRNACp71kX+nLIrZHX2rp5IsD1NVpYc5yKVhWvoeYavoxiYvGvy1yGraOk6uUUB69rvbRZAVx19q5LWdDY5khGD6etPcxlBx1R4RqlhLA5DLj3xWSysODmvWtU0uO5DRTLskHQkVwGsaTLYSHcpK56099OpUJ3MUIf4etXIWkHG3I+lVxtD89qDcEE4PaspI1THxuVkLHAI6UskokJ3Y98VBuDHnpR5ZUbs8UrAPcqGAXmnNGWAOQMVAXDNUkZZmOc4oAbj97zzUsrEMCgIpqxl3OOgqzA6Iu2UZ9DSApuJN24jrQ2ecitCWQFNq49uKqptJIencCoH5xihBkkE4FTShQDgD2quv3s+lAEnltkjOSKbnA+cUrHCk/nSK27/wCvQA+BlHQcmknO0ntSjCLuWmSSecwyBQBFkkd6cCCcnt0p6oBgHjPemyL83SgY1gD3GKFUnkClxngU4ZTB70CJYoyw+brUB4PXpUpkLLx+OKYwIoQEXc81Io4BxSIhLcVY2iNVJ6+lADolfnA4rSsrhkcAnGPeqEbnIINSFz5mTTTA7fSLw/Lk++a7nSb1jhi3I9DXlGmXRUjdkD612mi3v3STxUTiXFnp1nfCRgCMnFWJPmDHGc1zumXKkq3YmuoAVrfevQ1nGXK7FNXVzEuI8k8HiktJtkgGfapr0MCx7Vl7ijj0roaujnOrsLjoM1t2824cHmuMs7jGDk10VhPkAZrmkrA9Ub0TAgd6l6iqcLZq2nNbU5kEUkeRVaWHHpWiVFRvHXo0pjSuZEsXPSq5THatWWLHIqs0fNdaZMolBk46UwqQcYq40Z9ue1RMnPTmnYxlAcD7UxslCSalIyRmq92+IsAc9K8ls90xr6Vg2T9wd65Sd3v9TVx/qouOK3temMFmwzy3SvP9U1WTT7f93neeTUu72MJu24/xbOJrhFZ/lTgCsHUnhitkRSC5+ZqqxyXGoNNcS5wDms6VlKl5HJOelZNW0uZ3vqNuNQkZDHtAQdMVnm8wePypl7IRkrwDVJAXPc45NNJENlidyBuJ5aq0jHPNOZi7YPQUyRwc4/OgQ2Q7gAKAjDvRngAdaeZCA30oAgZtrZ6mnq+Y/XNQu2T2+tNVgGBNAicOyjjg0m4596crgryMmm5PagYK3zHNKF3Hr1pkalnJ7Chjg8HvQApXbyBScuaex+QZpig47gCgCRNzfeIwKiJOSRzmpApZTjpTkwqYNAECu2fap1y7fLURA3VPD8pG3uKACJeSMcChyAwK/nU4wq5z1qPaNhLde1AERJzkHr1qcD5MfxVHaqHmAY4FXZAkQLDpjihgiuI8quepp08SxhdvpyaYZN2MZpXBYAmgBqrmRQxwKt3CRKo8rqeKjaIJGuep5pMhJFJOQOTRuACOV3CkVM6CNcfeYU+Kblifv0yRkUhmb5qAsKzN5W0/efrUXy9DzikaUEE55qN1YKG9aBEjy/LtX7tMWMu2ADkninW8bSsAoLMeAK7vwz4cESi4uwPUCtFFLVgk2VPDHhrewuLsEJ1wa7/T7B7orDbrshHUgVPpWmyXsiKEKwjpXoWiaQkESgLVebNoxKmg6HHbIo2c/qa6yzslUcDjtUtrbBMACtKKLJAFFu5Y23gAxV6KLoMYFLHBwOM1bij9qCWRrH6VKiY6/jTyoHsaUYPHamRcbt7CgZyCKsRqDxxTJo9rZHSovqSxgAJ9KULkYoQgn3qQqV5BpMTZAyjI703BU4FTSDpxg0wDg0iRq9eaVsfiKMcAg80E4PPGe9BLQxvaowQCQanI4JxULLg0BYF605kyAQM4pq8nB61MnGQRUsHoVynUgc07HBB6ipsHdTXGDwaPIZGeBn9KQjj2pXPB/pUZY+4HpVCHN17gVG3IOKkAOPakwBkAc0Ahh9CcUq9CCaAOxHWlYY5HXNCAaceuaOo460q9eaGBGcUxjeOmaTqOtKRkdaTkCgBjDAzTgflxSn2H4U2mgEYelIOtKe56UgGef0pjAHdmmlTnPanYGelLgk9KBjTnrRjrTsde1Jj3pjsMwMDrTu/NJSDntTGJ9cUr5HI9aDzxRj9KAAkUvYegqMZ79KfkYBNFhjcc8UvBHFBOc5oXHQUWuMUcGmPy3v0p/J4pp9OelO1gTGhefrSn9aO1KP1oGNAwRR6g0HuAeKaT2qkAwgfhR0U44p3agc0kMFPqaUjnIo6daOADigBv8We1NHt3oY+gNKAMD1p3Acp9e9OPXHr3pjcHOKdklR60BsDEY5qM9RxTweucUwvjOKTQCHrTTjr+lKefpTc460CDgmkPFIWweBkUu4Y56UrANPHvS4yOtHvS/WiwEbHB4ppHJ5p5+lN75JosKw0rnOOvpTNoqRl+uaMcdKViStIvJNQumRV1lz+NRsh7YpCaM6SP2qhc2wYZxW0yEgiq8kfbGRSsI4bXNDS5jJUbZOxFcFqdkObe9j47MRXtNxbhl/pXN63o0d5EwK/MOho30ZlOn1R8+6/oUlozSRZMZ7j8Kw4FUON44zzXrWoWL2cjQ3S7oz3xXF+INBa3PnQDKH0/Ch9mKE+jMjyYWVtowe1ULtWUbRT/ADjHkEHB/SoTKWPXPpWbi0bXuQxo2Ru4z0q+kagYk4HrVUP83sKfJvIHPymkwJ1eNW4Hy0jMkjHHA7VHHjacjJqaUReSuF2sOvvSAjIBx6dqSTCfMDTFlCsCO1STXAlUjAoAqOxY5PNQknPFSMcHHYU0kk5IyKYCDkYJODUsQGfm6VGOWA7VIACnoc0DGyfNnHSmgbTTlBBqxGgIG6i9hEBJZQKeFORnt2pkqGNjjpSbs4yaAFdgCMcetIQM8U4oOMflUZbHXpQAoOBxTlIZsntTVOelGCOgoGTOfk44NMPzDLUCQ4x1zU0Me8gA8mjYQ1cjGOaexwwJNTtbvGeegqJyGYZ4oQFi1kJIBrq9FmKsoY5rkYFHmjJwK3tNlEcqgninuho9K0uUYXnk13Gnyq9uFJzxXl+k3Y454rt9JvU+UHpiueaNovoat5GCp9KwbtQGY1vzOsi5XpWNeJyeOoreErowmrEFpMVPoK6LT7nlckYNcdvKSAH8s1uadPkDntioqIz8zt7aQMAxNX4nz1rA0+bgDOfati3c8ZNYxlZkM0FOVp5UY4zmooiKsx4bNdtOZcCpLHkHHSoGi/OtJ0zg1A8eBXfTnc05bma8eByPaozFnrWi8fPp7VH5RJ5FdCZlKJkSPgZHX+VUbmUDOT71NK/yk1harMwUgdTXk7npt2MLxBc/aZtqnCR9TXF36C73luEHGT3rotauDaWh3D55D1rh9S1LGyBBx3rFy10MJPuV768W3t2ghIBY1ivFsYM7ZXGcVNqePNDD7xGcVRuWd1BPQDGKSM2xsp8zLfwjiqrNsXCjGaaZSqkGnAGYZxwKqxJET1C9fWliUYOeppsgaMHA4NQkkDJ4NAFiNDJIVXrUUwIJA60RStGDtGM8ZpRmQjA+tAFdlI70RrubJqaXGcAdutRLmgAVecZ4pw4IUHinIoGDTuACetAAX2oVGADUQ+Y5FOOWJzTkwqkY5NACbs4HSpHPyADgetRICSS3SnyuCMCgBYuUI79aVVzktTUO1SB3p5VsEnpQAoC46U9CBknsKhjXOe1WIYlHzufwoAf5B8rzG4HvVV2yOOlWnmaXCgYA6CqsisjZYYoAdGrBgTVt2LgKvSqyvuAHQ1aDqke1RljQBDHG5bPYd6JHZjgCrDtt2oBwRzU0MUZkO77oGaYWKf7wx/OOe1PAAVVYck8mrspjIAwAo6Gq8kZlJkBwo6UrjsV2kVVIHWhAZevAqOZfnAyOavlUjtiRy2MUySpJEu3IbJ9KfEjzsqICT0xTYYnmfYmSSa9B8MaBFZxi5uxuc8gEVpFW1YJXegeGPDyWsS3N2Pm7Ka7vRtLkvpFZ1IjHQU7Q9JlvpFlmGIh0WvRtK01YkUBeKfXU3hAi0jSxCihUArpLaALhMfSnWlv8oHvWpDBwOKdiyOCDjGKuJFjkVNHHhSMYqVVxgdqdhDYxtGKkUgEcUpUHpxSYxSsZtkpUEZHWmFPm600OU6GnBgcenvTtoRckjzjg8fSldty4brSRuVbGeKeygjNQvMTsVxwePyqQZP8AhSlDzjinIcHB61LENPK46n3qLP51Oy5B9ag570rEgqnsKc6DoOfehCVGOxqUjcCFFILkG30NQsCCcirDAg0x+e1MRCeCCDUi4znNGPlJ/OmjqKSAcenHSkIwhOM0KcH2pGYDNNITGHnNRPktz0qXnmgjPtQURKSfoKc33s4zQoIzxxTsdSPpQFhpHcUA+nT1pwH40jDr7UIBhxk5zQOBz+lKeQSRzRxt9aYDe3rimkHNOA+XnOaD056dqYxOCBjrUTdakIKmmtn8KYDT05HNC8UpwTzzQeBzSGLgH60Ac0q8gHFAAHWmhicZzxQAMU7HcUZwKY7jCvfrim9sVIw44PFJgDHFAyFhg/WlA/8Ar045FNHWmA0rhjSHAHapDyeOlMxnrQAmefWlI9OtCjn1pGGfrTAWmnjn8Kcppr4Ix3pjQd6QdSaYM5zmnAjPtQMUnIppyRxUhA9OKZxnigaGkY47UdCOaCOxoC9c/hQANyM+lITilIAxmkPOeadxCcHBNI3XNKB04pxAI78Uhjd2UpVYBT0NAGAcdfSmk4JA+lNBYG6+55pp4PFKwx3oJ6Y9elFwGFsEg0h+tK5GRmkIBIx/+qlcAIz3oOSM0DpTjx0ouA3HQ0oHrRnt3FKKQDSM8Gkx34p/UHtSL6Gl1ERkenSggg1KQCfalIwBRcLEDJz0/GgrjJIqcrmmMvBFK4miqyc9MVFImRxVorg5pjDIoJaM94wKqTQBicitWReeBUEiUWEcjrGkRXkbLIozivONSsZNNmMVwm6BuhNe0zwgnBGMc1gaxpcd3EY5Fz6UJ9GZzhfVHz94j0LywZ7UZT0/KuUEZ3YI5r2LVdPl0ycxyDdCTxXF+IdEA/0i1GV9PyoatoyITtozlPJCkMatAEx5UDiqku/OOmKdC+Fwf51lKNjYVJPmJxTZiXXjoPSmOcM2BgVJCE2jccGkA2KFXxuOKRrUhuD0qcqudoaq7ZOQG4FADMLu2kZNXEt43hILYfqKgtygY7hmlnkBdQhxSYEZjVWO7tTHAByDRyzYJ4zUzKhgwD8+f0pgV9wLAkdKsR4Zc5waj2DHJx61Ecr0/CluBPne2CB71FcIqj5PxpEBLZqSZWxnj6UAVQ7KBT0IOcjrTjCzLkHp2oXG3OKYAABgj8qe+NnHJqI54pYz60DF8plGW6VNb8HOaYz/ACADoBUe89AcUAXjcMwIc5zUDcc1DuJNPDBhg8GjYC1GQFznpVu1my6gn6VlZYfSrNqSWApoDutIlDAZbkV2ujEO20NzXmOnT+Xtwea63Sr5o5EbJFZzTLi0ehicxBY2NR3rEqMDrVC1uVuVVs/NmtVgGgHpjrURdhyV0c9drhs4NWNOmw4HNLerubgcAVRjcJL6HOcVvLVHOdppk+cEnFdJaycLg8+tcRptwCFP511NhLkAA1yS0ZDN6Ft1aEJGBz7VkW7Z68VpWzdj17VpCQQepbUZHTimtHmpI+g9aeUwCK7aczqjqigyf/rprRke1XSlNKe1dsZilE4G6kIXqPzrn9QnCrvboK1L+QKnPWuR1662xlc9R0rzW9Dokzl/GGpGQLsP3elcTcs5Ifv61sa6XILnkE1gm6LBs8gDpUJWRzyd3qNExeVi54A6GqUkpZ+R71PDkgsQck1Dc/7P44oJKrAvJhsdavxhYrdiOc8CqJUqMnvUok/dBe5psENuJRjjHSq4G9Qx4GaeYt74HNOZAGVO1AEeN3HYU7ftJ46inMEjJHU9sVA5OcdKAHAA5yfzqFiAacctwvT1ocbOM5oAejAKexpjZxmo2bOOtSA71A9KAFTnj86eBwR+VKAFTpUaNlwD2oAcRtbHNI2CamdBjJPNQugGOeT1oAljGSO6npViTDYB4A4qAfJjH3RTxIOSaAGyEKcACnwrJOdq9qiLbiMda0rPbbwSM33jwBRsCJtMt0yWcg4NVNQdXlfABA4FPhmxGw7dcVSYlmYAdaLa3G3pYYkbE5XpVuzA88eaflzzUWCMKCOatFQsIBxz3oEizNscsykcfd4o0vyyzmc/L/OqudpOPSoojuZieg6UdLDvqWbp0aRynAzgYpk9wqRrGvQDNVJFJkAFNmI6A5A65osK4rnOGHNWYEebCAEk9qrQxmQgfpXd+FNEEarcTjqOBWsVbUW+hY8MaGlrGs9woL9QCK9B0HSHvJVlnB2DotR6FpLXcgeRfk7LXpekacI1GF5A+lM3hANM05Y1UKuAK6G1tsD9KdaW4GOK04IcYNNI1EghwOnIq0g5zSKPbFTxgZHrT2JHr27VIOnHSkVAfbFKFK5wKdyWhQOQRTSMZJ7U5DngcVMAGGCBUt2IZWABzSbecDrU8keOR1pig+/FJsWwAZ78075l4PJpwXJBAGKfjJ4x9DSbIYiNuGBx2pDke5pwUZ470jKRipExrNxxjNRdep5qU8cgVG69MdP5UxIQAdMnHpUm4AdMHFNAyuAcgUZzgdMUkhAecdD71Ew5z271Jj+9wKUgEZHOKQiEfNxjimkY4Ap+O4HBoAznmgYwAGmkc88fSngYzTWJz7UIBmMHJpMYGAPwpzDgEHil789aAGD249qCMH2pxGD7elIw70IAIzzim05Tk+1L14pgMxzwKUAelL0oPHTp2oQxmCc5NJjI+tPPsfwpMEHrx60xjCMkimyAdRUrfTHrUbcHnNMCLjr3FLgnjrSsMc0oweaQxE6+uKepyeRTQAG4pwIHtTQBjFN/lUgw4HtTWGDVDGfSkJ607v8ASo25oAUncMgUzrTwe3WmnigaE7ZowPpThSMSCOKAExTQMDnpTvqaDgmgZGAR9KQjHPrT8dqRhkYNMCNhzzQc+lOyNvTPpTQaChV59aCMHpSrkZA5pGIxTAaw5x+VKB70EAKKM85BpAGBTcetKTnij86YxMnPtS+3ekz1JNA6jFACDjI/GmtjPvUhpp6jNIBnf09qaxw3+eakxye9BxjnFO4EB5+o9aco47UoBH86Mc9qLisJ3xT+MZpCPxo7frQA3HNGD1B4NIT2GOKFPP1oQCg8e1A7Z60p5owM8UuoAc0vJxQe2elJkDFFhC5wfWmvhh6U4jP/ANakIP5UPQViE+gzkU1jkkVMQB16moyMnsKSYNERXdmoivPFWD/k0zGRQKxVkjz16VUngBA4z71pleP6VC65+lKwjkda0uO7hZXXOf8A61eaapYSaZcmORd0LdD+Ve23MII5/Sub1zSo72JlkUdODQn0exnUp31R4B4k0Xyc3FsMxnrXKMPLbpXr+o2T6fM0E4zE3ANcR4g0cQTGROYm5+n60mraMiEujOXYZB9abGr7sAZqaYeWSOo9aIJPLcMpxWbVjVBLu2424NLFblgMnk1PdXCTAcfN61CGVVHODUgRSwmGTDcj0FOeJXhVkxuzjGaWb5ud2TUHmMMD0pgMcFGwevpSM2T84pWJJB70o/2uc0ATQ9PXioZAd/y1LgKflPGKSXEbAk5pICEsVOccVLHKrN8wOKjJ3tz0FG7jA6U2ArMFc7SalWMeWSfyqADdyDz9cUbyM+gpAPEDsm4UiR5bHQ1LBcGM8njHSonlxIT6+lPUY2RNjlSaiUHdmpGPmHPc0gQ/lQIM460AA85pUHOTyalmRPL3Kee9ADA3btUicEZ71CV+XI605GIxuoGa1jLhhzXVadIzbQpJNcPBLtYZ4rpdJvNpB/Kh7DR3GnzMhAJPFdhpzedb5LZrzuzuzIwOMV1Wj3mxsE8HisZ+RcX0NS+j27iM4x0rEmyrFuOa37h/Mj46GsW9Xg8Yx6VrB3RhNWZY0mbDYJ5z0rsNLnztyeK89s3KzZrstKm4U9vasqsSWjr7eTpWpbP0xWHayZArVtm4GOaiLsQtGbUPJFWgPUVRt26c81fTketbxnY64O6IylJsGas7T2HNIE5rqhU0NLHiGr3GDgcYrhdeuWkcgGug1q6xuJauK1G4DliOa576Eyeph6xNJIojGSB1NY5QQodx5JrWu5MNtHzHuayLyQMwLcn09KSfQzYrv8m2PntVVQeWbqPzqeylVXaRhwB0qGeQNlwMA0CK8rmRximS4U8Hkd6ecgZUgVGowCTjFMAjYoCe5pxIIyMe5qB33HHvT8HGOM9aAE5bkfhUbsQTnrUwBAwKZjLZbigBigr8x707G7lqOS2OwqTIUHI4oAiC5bAo27ODT93IweaTnJ6E0AI/IwBzREh5JOKE+VuQKnOWwBQAwtn5efpQrcE1IUC4PU03YD15oAhLkngcetWIYt4/xpFjUPj1p7OYyAOnpQAGEowIHXpT2c+WFPNPXe6bwMD3qOKEu5LHAzQBLCpYrGBjPU1cMEcSMeMjvVQv5c2I+T0zUc8sgJU/zo3HsRSPh89fenks+1agzx75qzbqGQs3JHTNMRLLE0cYO/k9qjZQkIIOXPahd8zYJIA71FKhQjJyKQDt5RSCTk/pUYUu2eaMFz9K6Dw1orX1wCynyxyTirjHqxF7wnopncTzriJecGvTNE01rqVTsxEv3Riquj6d5zxwW64jXr716ZoOmLCqYUccYq/M1pwuWdF04RquABXU20AGBio7S2AAAA4rVgj7YppHSlYkgixjgfSrgj24x+RpIkxgdvWp1HP9apEsQJntxRtOKnC+nNGzJzQyRFb2qUIMcH8KaEYHPIFSodvcVIpERBU8inqAQM05lHJPWkUAdCfxpMjccAP60wkA+h9qcoHfj0NIyDuT7UrEsQkZPPFSZx/T3qEEng8iphjA6/jSe5LDv0/I0xycHkg/nT146ce1IQCOePxoRLRGp4HPB9qXHBB6UjrxjHFNBIxk9KQAAw+7/OmHr1+bNSlsjOeKY6g5IoRI0DIPSkB2kjHNNORyPypdx64/KiwWuDdM80zgZzUit2PIpkgG7HagLC4Bz+maawB4FCn5e9BPbOAfWkAzGBTSOTin9STmmtyx60wE9PalxwaD04HtQPSkMYPrTueOcUjf/qpVP6UwBsd+KAeoob3ppOKYDj2xSZ7HigHP1pCeOlFgAjA4PSm4z1IxS4JxnNIcjB6j1pjGN3zzTV46VIDx6+tNORRYYHkA8YpCenFBAIxTR15HNMBQfSlbOaQ8c96Qn/a4qkAm7tTTz0oPXqaAxBAycUhjl4GSabnBGKO3WkPTigYYyeBR9T3oB7UHp6UAIetHXignHWjn6igYD0Oaae+TmlOeeeKTsKaAjbPIPTNKAPXinHHqDTSMdKBjgcf4imv69aBwOxobp2zQgG59KRaPbvSrgUIoTGT9KBg0p6Hn6U3tzQIU9DikHTr+VGfloXrzTGBOCOaG9aVufrTVIB+tACZyaRjlsU7JA9qaTnJ9PWkA09PpSjke9KSenNIOmDzQAh6elGR0Uc0hz2pyjH/16LgN245o2ggYwKU5JOR+dJjrSAG5ApwXjrSA8cGggk4qhAzAgZpQhY8U0ZLYNLvI6VIDtgGcdfamYPSlzg+1JnnrQDYEc+/SmFePepCcfWm9SaQETgg9Mg00g4x0FSnHQcijGPrQmJkJXJNNZQBzjmrGABz3qIqc+1ArFV0wCOc1SuYAw6dK1nj+Wq0iYGKLAcR4h0dLyBlKjcORXm1/aNAz2tyMr0Fe43UAZcVxvinRluYy6j5xzTWqszGpDqjwDxBpTWs5Kj92en+c1ipleCOa9SvrMTxvbzrhl6Zrz/VrF7KdlbIx0NS09mTGRmScNkip4YfNzgjOMjNRIjSnrSgtG2RyelZNWNCPy2EhXnOetTGApy/ekLHO5uallmMkAA42/wAqGwK8xXACjp6VCWPOT07U9mUxYx8wNEODyQCaAIzkHhuaVlIxv61ZuEi2K6gg9CKhDbuvJH6UAMbAwKgY5Per8cKsMN1NU2Qq5GelCYCbDwcnAoXGfmqXeFjIK81FuAPHFAEu4Yzjio2AJG09aQMQeKdwrAigZJGAEwTyaWYhPlBNNdeAQaY+W69BQIA2akxgc8ioVFTMT0oGWLdlz8wBX3qC7ZTIdgwBUTSYIApM5NCEOQ4Na+myHcBWTGPWtGxbawx+FMaOtsZSpGBXR6c+8qR1rj7OYnAJrpdKfCcN06is5FI7K0nJh2tyRxVS7Od27mmWoJiDKc06R8rjHNKGmxM0Ze4pICOK6TRrnKAE9K5q6O1icYq1pF2ElAz3qpq6M90ekWEgIHNblq/I965HS7gHbzjPauktJBxXNszNnRWrcCtOJuBzxWLavyK1rdgR/Wq5jopl1Onv60uPamockYBqUelbwmdMT5V1y4LIctz9a469uFJ2qfetXWLrO7B4FcpLIdxZqq2hg3qO3Z3E9aybkkEk8k1fkmUR44BNUCu9iQeKSEyBZNikE9ajZzkDPSlkXYeDTQuVJNMkcW3EDvUcxycDoKbkinBMgluPegYxOME09mbrjrT4gAOeTUtyqgAd8ZouBBHIBgt1pHffyDULH5vXFSqMqO2O1ADA2DzmpWO4diKhZOM1NBEXQkdutAEJbjjFShSFyaRY/mx6dakGWYKKAI36+1TwtwKV0DDaORSRLgjNAE7gBcnrUMR+YkjtUz4L801ABuJHAoAYGywB788U94yWGBkVEhw+TUkUpjbJoGTtIyoFxwarmQr0p7yGVgelJMAoAAGRQgCF8yqXHeprkBnynSmIiDbk8mtG6CGAFQAQOtDBIy9g3AGpHwnyKOKhPLgnNPkOOM0CJN22EY4OarPIXb5qezMV4NOs7d551RASScVUY3E2X9C02S/u1jReCeTivUdOsFtY47S3XLn7xql4e0xdNs0CqGuHrvPDmkMp8yVcu36Vpv6Fwjc2PDOkrDGoIyx6mu4sbbaBxiqGl2gjVcCuitYadjrikkS28WAO1aMSDA6CoYl2gADNWEIBHvV2Bkyp26U8DB4pFJ4qZcNTJYijHTNPA9P0ppyDg08AEnFSyRwQ846+lKUwwz26UhPHfil35GGPzVNiWxCM9F6ZzSZw3I4pwbcCOnvTWHzZoJHZzx296XovI4pm71HWngggnmlYkAAfp1xijO0e1KSKThu2anqJjW46GkOcZIpSCue9NVjyGFMhiFiQPbtSggjk4pNmeaMZGPx6UnYdgX7uB0+tMYHbjp9KcDjPp6UknTIzQTYh5XjqPXNJ2OfyqQYI5zTGXD0MaQ7BxTX6U8DoM5FNcHoeKSFYjBOcGl4zg0nfI70rYHSgBrAjketNbk0/JNNGcnFACHoaQHk7acO5prD2oCwHkEGjFIKeRjNA7DTyMGmkAnFOPSkIA6cUwEFIeBmg5GCTSigAyTzmkPTGf1oJ+bnpTSQQT0NMYmD6cUnIz604Hgdc0nbPrQAxe5zn+lHuP/10YORnpSnrTAb0xjmhvwzSnr7+tJj1psdhpPbrSdeD0p/+cU3HpSGN/nR+NKeo44pDjFACZpT1pM9uaXk0wGkUAngd6U03kHrQMOaXgdDzSnp0pMCgYnT2zSY5zk0p4pvc0wQnTg896TPrwKUnBwKCDnmgY08HmgkdeKUgCmMcmgBTz3prHBpcUEe/FFwGkk0uOaCOeeaOlADmxtqP2JpxPGfypOpBOKLjFBPIBxQ2Ce9J1ApdvuaAEwDTiOKbQXAxxTAMDPI5FIeD3pSRg4701hkAj86lAPBA696jPFGCAfWkA7ZpgL2pueacMFj6U3HzUXAXtTfenZ7Gk6jJ5obEHJNLyVpQc0vGMUXCwyj8etLt465puKTCwYzTgpHSmrknjpUqD2pCIyo/Gm445qdh6f8A6qjbjk//AK6OoMjcflio2QEZ6jNTHJPpRt+X3piKEsecis27tgykECtuRKqTKO4yKLAeW+LNDIzPEPmFefa1Yrf2zcYlUV77f2qyKwYdR0ry/wATaQ1jcmWNf3TH8qfxepz1IcuqPFLiJ7WZlYYIqMgyHcOK7TxJpYljM8S89xXGOhR2zxUNX1HF3AptTOOOpNM3gpjFDM20+hqNMK/qvesrFiovcjHc/SlUr5hwMVYlmQxrxyOKgPIBpIBJG42kcdqjTg9PxqeSLdjaaiwVyCM0wJt24DB5FVmRsliaduK8jimNKTketCQDpU+UMCDmqzL7VKhbpjNWFVRGcjmjYCosbnBPAHepTEUAIwakI/d4HQVG75VVHSgBrFht6YNIVbJ9KmyCi+lJIwXDLzxQMhyT0pVb1pN2X46U4jc+BxQIa3XI4pVOB/Wl8snO0HNMYFeGwDQBIGxxVi2fDiqa5IqeLg5BxQM3rKTJGOtdHpkhQ4JPNcnp8m1x04robWXLgg1LGjtLKVoFUqcgitRSJE3dzzXN2VyXRUPT1rchcLGKzRTsylqK5rPhby5VPvWxeruTsKwZmKsc1undGGzO70SbcF56119i/AwT715t4euuzHkV32mybkBrnqKzJaOqtHyBWvamsCybOOa2LU9BmsjSBqxtjFWFGQKqRtjFTLJge5qos6os+LLpizHd35rFvCApA5rYvXxGT0rnLliSTXTe5gVZX45NMR9q/Wmyc89KaoLUySVQHyzDiq8jnPHTtTpXYKFzgVCxyRg0gEJyc04vkYqJj83rU0Cc7mHFMBAGRgevenEsSSx60+4YZG3pUJbK8jpQMHwH4FG4bTTGByPej2oEKx45q5BJiHavGTzUCwnHPNSINqN2J9aBhI2M4qMOd2c4pyjIOe1KyAD3NAD0fsMZNBBDZzUUfD80/OTigByk78nOKkeTIIpEQFSSTQiBnOMYoAjXPy/rVgKCcHpio4gDIDzgU525IDGgY+OAupbsKbLEcZ61NCzLbn3pI28xgg70AQxxlyOcYrWLRraYGC3vVeWJYMgfMcVQ3t5oLcUbhsXIwuDkDNQzRYUv2J4pC3zYB4PXNLLL5mFUfKtCQNkKAthR613ngzR1jT7ZcLkg/KDWH4V0lr+7UsP3ack16pptmJHjjRcRrjGPWtdtEEVfU0fD+nGebzpBz2HpXoem2iqi+tZejWQjC5ArrLKEccc/yqkdcI2LVpCAorWhXAGRUNrF8oq8inr2q0rFgv8A+qpUHGSCKao5FWE5FDE7CxYPFTBcYx+tMVF7HBqYKQPpSuSxp5HNLHnBBzinlc8k0xiQaNyGObj3zTe+en4UuQeBximE4PSggkTkEZxSgdR2pgPUdQRTkbceevekxMcwBAGKQHHHalYHBx2qNlxyeKRI9zwaQOCB/Kq0pJ4ByadGuBu5pdbE2LIOaTo3HNNTGOMkUvQnFILADjPPeg8ckU1/vZHHenZDLxSEMY5BI7UzfnP8qVzt/wDrVASPxp6CsTE/Lg9e1IQSOetJG+evIqXhl7ikA1Rjp0ND9OtD/d47dqaWGPSkgIsc46UrAEdaU88ikU/lRYCPkGnAZB9KTaCaATkg/lQAYI6UdeD0pQOvehlJ57UAMKkE+lHO7tTnOTzSNmhIBpHXFIO+TSn0FIf8mqAQ84NNHv3o+6Tim9D1oGh5+71GKbj1zijcfrSn7xIoAbjA9KAccUp65pPXvTGJ7HikORSnOeaTBwaAEJJ+tIDS9CD6UhGBxzQAHoKQ0Z9fwpG65oGGMDimn0BpevJ5pDzzQAYyD6+lNxTh9aQjpTQxDjBxQMHt0o/ClGCefSkMQ5xSEDPWnY980jdRimIOKb2I5p3Y80hPWmMYcY+lIPanA5HHFAXrnFK4xjHIx3+tIAO9PIHOP1pv86BiAdeuaD05NKOF96ME5pgMI3Z9aQ9OKf7U3t9aAsIfu4OMGgDK4oIz9DQfXoaQhwPHsaO3WmbqUcN1NMdx2OD71GyjbnOR61Jnt1oPT2FAEK8dakAyvY5o2gilGccdPWkloA0gjtSAZPGKc3UUnoScUAHGfWmcknFPzjntSMcDFADeMY70Uh460ZoAUmgD1oHA56e9LkZ4oAQjgYoGMc07OaUgdD0pdQGgjOMYAp275eO9Iy88dKMVWgrAWz0603GQadjj0p+BgfyqLjI8Yx796Q47YqVgcVGQefWqFYY4zxUMiZGCO3FTBSKYwO7NCEZ0ycEGsHW9OS7t3Rx1rqJo6pXMWVPFHmFk1Znh+rWL2Nw8Ug/dk8E+lcD4k0vynMsQ+Q88V794p0hL2Anb84HFeYXlqQZLedfUc0S/mRytODseWFioKckU4bMdMGtLXdPa0mbA+U8jFYxU884qJK+paY8jc+B0pzkxkAimKSnBHFSBDOfp61mUNWUZGani2ty3SotqKG9RTg4VQMcd6QDmWNjjpVcwFn2gfiKfIQSSB1psUhRuetMC1bwEMqnqKryOxd1GCKnky2ZEP3RVFS27dzk0ICZIzn5jgUyaBcAoalcgxgk8ioAx+b0NICJwyqMHr70wbsDdUzMCNucn1pECs3zHAFMCPgdKlQbmyvU0skKqgYEYNLCyq+c8UDL0duQm4sBiqNyn7481LJcMrED7pqA5ZtxpLQGKuFOCBS7hmkbP8X6U3jOe1MRetXG9cV0FlJ8o9a5a3fD981u2Mu4A0MaOt09wcc8V0enSK7FG6VxljMcjB4rfsZGJBB5rJotM37qMFTjpXOX6YkPrXQqxeIZPasfUowDkVdNmU1ZkOkTGKdee9ekaJcB1XB4IryeKQpLXfeGLnfGvPNFWOlxbnotjIMD0rbtnP1Fc1p7cDPNb9s3ArlY4mvGSVzjJqwvYYzVODt1xV1Me9JOx0I+Hr24yCKxZmyTz1q1dPknnmqbjucgV2JGBC65/ChCvTn3prMOMU3d2pgOlQEccmq7qU4AqZZME+tRyHceec0DIlGXBqwuTkdKhGNwAqfzAiketAiFxtJySaYctT/vHpTW6HtmgB2dwzwcUvC9uTTR90cVIVwoPc+tAx8b8ZJ5xQzg8CmsoC49qbEnyseaADJAO08U6IktyaVBhcYpu4hsigCQgqc4/OlBBfnrULs3cn601X+brQBfZlA9j2pgkCucDjGKjQbgDnr0pGTrk80AWEbGdoyTUcpwSTiiJ9i89aAA4O7pQA+CYlNgGalt42DgrgHOaigjAHXGf0p6eYvzA8UDL1zNGny9Wxkn3qiMTNg4qCQkud2fxoBKcjgGhIG7j5xtchegqfTbV7mdY0BJY9qgjy5wOSa9B8GaQIY1uJR8xPyg1a91XFa7N/QNOWytUiVcMRljiu60Ky2qGxk1jaZbb5Bkd67XSrbAUAU4rudFOJsabD93I610NpFuIxVCyhxjity0iAHpWqR0JE8SbcDNWY+vPNNRefSpQhHI/OrCw/aD0poDKRTx154qTrS2JEB7kc1MmCDjr6UxAOg/OnhRjrUktBnmj7w/zzSuCe3WmqTmmRYDlTQP73alY5+tAbOQKGSLtBXIGaAuOmOKUEA8flSlSTxUiY0sV5zmnAg8Dk/Sk2kZGOlIq4b1pMmw0Lhzx171JtXbgijHA45BpW4HFIVhgHTHJpT196HGORQBkdcHFAhhJI70inBweM08A4weKR1G3gUhWIJgxIPamBVJz09qnxleaQKFJzQ1fcBqjBx2p6kg9eKQqDz37iggZ4HGMUMQ4nJwRUVL0J65pCeBn8KewkIScEDmk6H096UtlqTIxUjFxzxQcCkU8Gn8EnJ4pAMBAbHFLjBxSOuM/zpRwuKaQDHXByOlITx605j1YHtTR0zQAw8D1o7U5vUU0dxQAxhgdKQ4OPSnnk4pMcHnNAyP1FG4enNL/AJ5pP50wFz05oyfQYpMYOKU9KYCDk5o68HNJj3xRyKAAjj3pOlHXrmjrx3oGNwKafzFOI4pMUAN6YoJ5pQPzpPxoGID60lHej+dAA3BpAfpS/wA6TH50DFHPUUHg+1IDk07tTAb04Pem9j0pSfmoH60MENXjgmjODinYpp4A6EUDF69KTHB5py8Ckz+VADV4IyAaQjmnkYz70Y7UDIyKTBp7DnpSdxQAwjAIpuMk8kmnsOe+KaRxzQAhI6gc0h/zmnbTTec0CFHH1oBySKaeuaMn8aYDsgdaTOBikPqen86QjHHrQA4nPOaTtQeOgpR0waQXEOM0Dp9aXikJBOBQMYeeBSheTS4PbjFOxkUgGgZ696MUvvS4yeKAEB9OlO49cGkHv1oI4GKYCsc9O9GcAYppXAHY0o6ikAvSnDpTcYHYnvSqcdadhDz+lRt1p+TkjtTT9KWwxm3rzSHg8U8jFIRk07EleRc/Sq8iZ/GrzgYxUMi4XjmgZiXtuGUjbmvOfFuk/OZo1wQea9WnQkdK53V7MTIwYA5prQmpHmR4PrFmLmCRSPnXOK4O6t2hmZWGCDXsXiXTTZ3TOq/KTk1wfiawBX7RGvB6/rUuPKzmi7HLFgw2t1HNPUqijk+1V2ba54pu4nkmspRNVqPz8/BPJqw0n7kLtBOetPsoY3OHOCaiuY2jkYA5Uc1N7sLWEbGzJWmQxGSRQvU1LHhhnPShW2NuTNMBZEeJyh71TdHXk5GavzEmIODkmqjTB1CkZPrQgIQxYHJpCcgKO1K+Am3+IUxR05/GgAIwcd6B7VKyFQDgHNAiy/BwD60XAjbcVx2oRc9xwKnKYXA9ahdW6cAGgBm8k8/SnB8Ag0qR9c9aa6hT70DFd+KaTk+9AGTz1p+AOn50CHRtyMYzWxp3IHIxWQowc1o2DY5zQNHS2I2mui0ztXK2coyOfrXR6fOigelZyHE6dBlB6Vm6mvymr0MqNENp/WqWoNlDgUQ3JqI5yVsOcGui8JXZW5VSeDXMXTYkIq3oly0d0o962kroUT2/TJQyqc10dm5IFcVoU4eJOe1ddZNkCuKaGlqbluTwM4rQhbgVlW5461oQnAxWTZtE+EXUk5HaoJiCwB6VK7+lQEE43d69AxIWT1NN2cc1JJkVAz9T1oAY/wArcChVO0k0INz85xT5NqjAPAoAiAABJHJpFGSSe1IAzHjpUqYTIzQBESQ2FyKcVyRnrQepPakD85xQA51wMAjNPxhAWPNFuuXwabNnpnIoGSoQQelSRoFByeKiiXC55z6U4v8AL0oAeq5VtuB+FVzwx7g8VKzHZgdD3pwwEGcZzQBG6fLgdajEfzDdxUxOG5PXtUYJZhjmgCQcY5FSCIv+J4piJuJboBVuBCBuz06ZoHYjuIFjIGee9Vy2FxU9yCXJZhk81UxuYdaEJl2AN5YJHWpQ+35ABkCoVcoioegpU3HLDpQMhlPzHik3b+CRx2p0rcUWsJmkVFBJNVFXEbnhbTDe3is3+rXk8V6hZRKgVVHA4ArH8P6eLCwjBxvYZNdLpcW9wcHihPmfkaRidBoluflz3rsrCHAHBFY+kQfKDjmuqsIeF5rVHVBGnYwcjjFbEUeAMVWtE2ge9aMY6Yq0agi7mwamAxxjilAH0pRlT7UyWG31FLsPY08HjmkPQEUEMFyKcMfnSBsmlI5wAc0iWPAPBpCucFaRTg+gp4PHvikySMg9DTQTnkVKVJzUbAg+1BI7qRj6U8DAGfzqNQSeOg71IM9DzSYmPGSc9fakIB6dc801TnkGnNkge1TYkRvTvQ3QZoBIxTvegQwYOcilxz1HNB75oVuvSlYQjgH8KavTFSbRyKjHymgQx1HUHvTVORjtUj9elMPy9qBCcrmm7jnjpTzyMjGabgAkUANY55oOR9aVh2/Gmng0AIeR70mOppkMflh8MW3Etg1I1FhDcY5p27pj8qQc0o6cdKEAsjBmx0ph9OTSvluaYchsUwsObOOKYfqc07PNBANIBpIB6U0+3WnMMGkP3aBjGXnI7UAjHNKcd/rTcDdk/lQAMOmORTeg9qU9PrTR1oAcCCATR1UYNMPUU5en1pjEPByaQkk+1OPOKT2HX1oAQ9PrR7ijr6YoI5x70ANOB60g7E07pnNNX3oAQcDmkIx2607HajqRQMYfWk7e1ObtTe1ABwetGPekX3o6UDExyTR+dBPtRQAdc00/Q0v1pSMfSgBB0HpQTxSUEfLQAY5FJg5+tLjikNMBSePWk6d/ypCcmlz1HegYpPJpMetAoY5xQAh/HPao+3NPOcZJoP5UDGHOcikOM07HFIRz9KAGHg96Mj8aeRxTMZODzSEKoz0oYevWk6D0FKT6UwG+vPFAPJ6/jQcY96MeppDQY7UEYNOUetDdMCgLCDrnpS/qRTe3NOzjFAC8fl60p5PHX1ppPHTmlIxg/pQABckd6XBHalXH50DpQwsNzkc4pBxz1p23v0pH44xQAhb5cDApQP5Uz8OacueD6elMB+AOnagigYxz1px9KkY3jpTTjpinkccc4pmOvNVcVhGGe+KicZyDwKkIJpvXntRcCs6cEGs67iBU8VsEZHFVJ4sqeRTA898Vad59u/y8gHtXmF5bf623k+62QPY17nqVtvVs/lXl3irTvIuiQMKxz+NO3MrHPVhZ8x41rVi1rduuOMkg49zVOGNXzuwPSu58SWIubYyKv7xBz+GfauFkLRuQeDk4rLdExY4bkOATVmB1Y4l/HNUvMIyepoL7nBPGf1rNos0LmJNn7gZHfFOjCfYzu4b3qCKcLGRjmq0ryI3zHrSXYCwZB5eMDjtVWbKvnGBTox5ufmxSXCFCAxyPWmBG3znIpjDaQO1KvHOaQDPrQBIp+U5PTtUjvgKRj1zUWxgOBwR1oXdxgZoAUyfPkn6Uu8MRxyKibLDBGMdxT4lXBzkY6ZoAHJWQHtTZD8+aViH47im8buRQhilx2GaUtkAUFlZcAANQgGcEUAP9DVu2foDVFuGx2qeBsHvQBt2j9Md627WTgckVz9q42qffpWtA2cbc1LA6vT5OBhs+2asXjfIaydNduvatO4P7kk1K0YS1RzF99/GKZZzbJgR1pdRb5jVCOTa4OelbkRPavC05kgjbtXeae2QK8o8BXm5Nmf8APNeo6adyjBriqblvc6C3OK0IW4zWXbMOM1oxHjisWVFnwnMoHQ1CSQSeatmLJz1AqC4xjC16BmVJXYj3qMe9SDoSaacbAOhoGM3kA4FRZLVMRkEDFQ4wc+tAEy4UCkxnLCo9xBHqas4HlgAjd1NAyHIA75NRnG72qYKAM9agYEscUCJoD++yOvpTmAMh3flSQ/Icnr0o8wZLHBoGTKu84A4pHjKjpUayspyKkRvNYZ5AoAVFBPOQBTXwD8tMbdk7etKmTndigBZCCAB1qLDAfL3qxGgEZZup6UzeMDA5oAWOTOE5q60mwDBBI9qopkc/xdhU8Tc/McmhjQ2YtIeRjNLEgx15FJIS7Niot7bcZxQBPC4JI70pk52/wiq6Ern1oY46GiwDm+Y5Ga6/wPpXnTm5mH7tOa5fT7Z7m4SJckk16va2y6dpsVsgw5GWqpOysuoRV3ctq3myjHQ8D6V1OiW/Q45Nc1pURklBx0ru9Jt8hdoqoqysbwV9ToNMgwF4rp7BNoHGMVmabCNqgAZrobSHaMfnWqR1RRbjGFHGKsoce1RIMcVOgyO9VcbRMjA8HrTxjrUIGCM1IASvNVoQ9BxfmlDbutQ4z1py9cigm1yXbg1IBke9NFOBx7elQJi7d3UU/HrSBscmnkc5pEsYQc80NSkg8UDA46CkSNUYzzRgDGMmnMMDqMU0H5c9qCWIOp6U44PWkQndR0NAmCk4waeDkZzyKacHkUgOGpCHkE9Kjx+lSbs9Kb3PFIQEn049aQrnBPNL0HHI9KTPGT+VAhjdx+VRkHHANTP1yBTSfbjvSERjgYpMDmnZGBjrmm+uRTAQ/wA6bjIwOtOJB6DtSHI6daQhvQ45ocd+9KTkmkz9aYCDA70p5HBpre1IvA9qQDjxSEdQTTTwevWgnDUABODxSAn6ZpW700c8AUAKxHr+NNJ4pxqMk/WgAY8YxTScHp0pew9e9N+tAwPfB/A0zt1zTj14pM57UAHejJFJgZG3rRimAE0u8fWmmjHp3oAdnjik7cYz6UCk5B44oAdxgUnegcik5K9qAEJ9OtH1peSOtNJweeaBiEkGmntSjvR05oAaeDQf50uaTIz6DNIYnb1pRkUNjOKO3WmITqaKM0gpjCl+tHejsaQCYHWkPrilyce1NPWgA4/GjvmkPb29KCfT/wDVQMOho6Z7+9JnAyaQngCgBxPI7A0w9aU56ntQDznNMYDgc0nUk5pwGQc0hAyPWkA3rxTcDt1p+CR6UFcc0AR9qXGfrTyvAA4oOBxQBHj6GndBxShaXoKAGAcHPFJg1IcdPWkA46UXCwwEZ5pxXP0pCpHenDpigBPQU7BH0pAeTSn6nigBe+FpMcc9aUf5NBwDjvQxjcHt1poHFS/Wm8H6UMBnuKcpwp4pcDtRimmKwv8AnFKQe9GO9Kd3egdhp79s0h+makAyOeDTdvpxSGMxntSMPlxjFSYAFIASM0bCIW+U1FKvpmrJXPOcU0qTyapBYxL+MFfTNcT4n09bi2faPnAJBx9favRLyIFTxnjjiuZ1WDcp44poUldWPF7uPazI445Vga8/8Sad9nuiyg7DyD+ftXrniax8q5aRBhG6/rXG63Zi6s3+UeYgJHHNRLR37nJZp2PPUUd+tKI8n5TzSXMbRykHjnFIm4Hk8e1ZyRaY4ZQ4PH4UXD+awOOBUmMgFxz05p0VuGBPbtUjIwcL8uAabMSyZIpGR0PIwKeWTb/SgCsgyQKtLGCp6cVW6yHAI9KtRRllBLUMAkB8g4xxUEZK5JB/GrLxsAf7p64quSWQkUAOkO9QQMAelAChQevtUBY5HOaljTJ+bIFADWYB+B+NNkI35HQ0k0ZWQgUD5uowaAE5BPH0oLFT0waNxDYpUyc5oAGfcQe9SxZ3DjIqNULNgVNHlHwaBmpbEFQFzWvbZIUjqKyLMA8nrWxbAKoqWCNmxcrx1rUZiYTngYrM0+RcjpW5MF+z54PFQ3qU1ocjqfDGskt83tWtqhy7VjSHnr0rfoYo7PwNe+VfRpnvj+de46VIQBg9RXzboU5hvo3zwDzX0RocgeCNgc5Fctbc0e1zqrY9Oa0YWxisu2IwMdO1aMJ6YrAEz4maIhWqpIhIAxWmSDUckQPSu65JhygioJGzjmt/+zzKp4rIvLfYxGKdxlQNikbnBPSjbjNBPH6UANDfNkflUm4gf0qNRlqkdicdOPagCRWAjII60xPmf26U1AWY56U9FHOScUDCQg5xxioc4Izk1Mo+VjigJu68UACrnHuanU7GIHpTFXa2TnApehB65NAATj+tC8ZK0jqXfriprGIPI6t0xxQBWkLbwueKVF+dQOtOIwzA9RTo4sYbkgdaAJCgWUDHTvTmAJG0Utuu9ixPyjrQ7BAQBzQMgJIBqMgkDIqwE3Rlu+abOy8AY4FADHOFHY00Lls0xiScY4rQ0q1a6u44lH3jVRXUk67wFpQy15KPlTpXSyuZ589cnj6UqRLY2EVrGADjLYqSwi3yjA4qU+Z8xslZWOg0K3wASD1rvdIg+6eg61zWiwYCV2+lQbQpxWyR0QRuafFtANbUA+UCqVnGQg6VpR4GB3rVLQ3sSqB2qZeO1RKBjjpTsnHU0WAm+lKp5oTpTwvHvTIYhUnpSKvenqcHFDAhutIkFyKeCCOlM5HUUucUrEscD2pynkgg8U337U4kkD1oJZIelJjtTVY/WlJ9ue9SSHPTtTVYA7TTv6UOAeSBQITo39aXjPWkI+U+lAxj3oEHQ/SkJ5zjmnAZHzfnTScnjtSJDGRnt7U4AN1OKQZFJkKeQaAA8ZpgPTv6U58c4NMyc8CgQpzhsZFMzjkjNPDAcjmgEHknn0pAM9PSk7YxyalIzjt70zgPx+dIQwrzxxRyG5p3qelNbnimDGngikPXig/ewD2oPrQITqOlMPGfSn96aehpANPzAYowaOAcHijPP9aAFzxTSOaA3PSndv6UABGR6VEwP1qTI6Z+tNPH40wGEHHtTD0/xqZxkZFR49RQMZjnFB704qBx3FGO/PtmkBGMilY5oPNIODQIQke1KPpQAOwI780e9ADsYGaaaM4Oadj0FAxlKuKUZxzSenp7VQCd+aaRzTuSKa2ePSkAnWjrRmgH16UDGmm49acD+FB6A4pAJ/Kig+tJQAHikycUp5pDxx3p3AB1obp70nOeKOlAAOTQQAc0E88de9ITxzQMTjjvQeuOtGSelNyetABikIzwM5p2OOtJ0HTvSGGMUdOKUcUg9qYC8d6RsZoUcmlPf2oAQUY79qUD8zQDjrQAhH5UEelKc/nSE9hQAdDxQRxRig9fagBKUDH0pTTSSxoAccY60gHOcU3GKeGPGKAE+tIDn0pTz1pMDigBQM9OfpSA4zSr19qCaAADPFBAFGfSg460DG5NOHXnpRSgd89KBCqckj1px7E0wEYGKd6etMY4HA+tJuJH0pQcjB4/CgDnFIBrZJ5pME+tSAZ6U8DHvQO5AEORxShOuasEDtxTH4GOKrYRRmXPGOKwNVhI3cda6SRc9BWZqMIkiJPahDPM/Edp5kTgD8a4C7jw5z2OGFesazDlW4wB/wDXrzvWbby7okjCtxRJXVjCrHqeZeJ9P8ifzUHyNz/OsCNvmwfWvR9Ysxc2boV+YZxXns0XkzMpGCCayWqMo9ia4IZAFxmoPNZECngClQlm+lLMoAHINRtoWNaVnTB60sEQYjdzk1CMhqn34+YdaALF9bxxTbIz2FVyGjFTIfMcFs4qGR/m2kULsBLE+VOTweuap3H7tiB0qUOqkgiorltzLihAQE+lTRuwHPP1pmNy4HFIMD5TQA5n3HpzTZF2hTzzQRsAJpSwcjNADc5OKcv1GO9IygHABxQwwKBksRwcjjFSA7ucc1WV8DFSxkE4HWgDVtOV9K0oC2Bisaz3etasBZSM85pMDVst25ea6Qk/ZhzkY5rlreUg8HArcgkLW5GSazkUtjF1RhuOKw5SNxrY1TIc1iSnnrW62MkWLWXY4I7V718PNQ+2aRESeV4P5mvn2JhnrXrXwhvwVmtyemMDP1rCsvduX0Pa7VvlFaEbDgg8Vk2bZUdq04TxjtXKiUz42RMmpQnSokOCaljbnJ6V3MC1GdiE96xb23Llz0A61q9RVe9kxblV61KBHKyqUY0zaDzVuWJi7FhgGq7ptqxkaHBzUhA2n3pirg59KXOevFADckLingHbkH600/KpPrTonzgdqBgWwuKfGMLnOaV0APNMCMAOe9ACs52465p0XHJoX/VkdTTo1AVt3pQAwt83FTwEowC9TUaIN3JoyTIdtACvky4A4qYHbEy460sKqvLcmhF80sT2oGMiYIhGevao2Ys3NSPGEan2kAmlA6DrR5iHYBU88DtUMkeY92KsbNspDH5eoqKeUMhUHrSGVApJxmu88DadsU3ko4XpmuU0qza7u44lHOa9QSEWtpFbR4G1ctVzdlYIK7uNmYu5Y9TWzo0HzLWNAnmTCut0e3wV4JpxVjaCuzqNFt8hcCuz02LgZ61g6RCFC4HaursYwFGRzWsUdUUacC4UCrcY54NVohxU8YPrzV2NCyMCnK35YqIN69adkZ4NUkSyRScjBJqUEd6hwcAAcU5O+etIlkjDnrT1ORTFcYwcinL1oJJFGep4oI4GaTd+VKeQMn6VJLE9jThnk5pABjBoIwQP1oEPI+XgUHnmjPAzS9uOtIkRecg0+PBODUbcfSmB8du9FrkslZduRyeaZnDfWpCRnvyKYcA8/nSELu/woz2pF6Y5pCexH40CFznj0ppwaUDHShhwD+tAhCuOeopuD3xTxnpnOaQ9CCMUmA0Lk4FNYEZx+tSDkcdv1pykNxilcREpOOaDgnNOIHfj0ph4zmgBT+lMYjPFO6Y7CmEYP60xDWxnI603dnofwp+SMZphXk4xSAUnrimd+M8045x3pp9qBBjpR7etLzzSHrnIpDG4wSRyKPp1p46E01uc4piDtnFNPT2pynilK/nSGN4xUfQU8sBkDimnkcCgAPfNNI4yO1KeV4FIelADSvOaQjHNOJOOOc03AA70IYuOfpSH3pSeKQE4piDjoaFOB60vc4pvJJAoQCnr7UmST7UhOORSA4z1oAXHFN4xzyacTzTCcUABH1Bph69KcDnjrQTn/GkMaccZpcUfjRnjHSgBvSk/Cndu5o5NADRx2pTyeaPrQf0oAZj0o6ilI4FNoAOw4NNP3uKeelN6ZouMTjGR1pG7D0pexFGPWgAOeO9GcmmmnY7jjigYgGR9KTuSaft4oIoAO1NLc80uOeaae9AhwOKQZz/jSKeM07PTn6cU0MPX1pCOcUoAwCaQkn60ALx3pCO56Ug5oIIpIBeCOKAOcetNHNLTAVvTI+lNBwelOpR785pANAOcmgDmlz+VISc4pgGcHGKP5Uo569qVRzxz7UhhjtSFSOfWnYPBo68mmA0DA5NIw/CnhcjJ700jNFwEX0zT/amY6gGnAZNMEKuT07elSAHcf0xTFGO9PBPSgA57YpSSKCR60Y3CmmAm70phP51IVx1pu3npQBG31qtcIXVhiruzvimsuOaaHucTrNvjdkZFcB4it8q5x06V6xrUCujEDmvPvEEBIfHbNNEyWh51OuMMBx0NcR4psvJuTIowrciu/uU/fMhBGawtdtPtOnupGXTJFc8vdmczVjzpWIyKfywHrTZkKSMpHINKFOBzRJDQ+OJmbIqxBEkm7edpHSpLGIOMt09agu0a3kK561G+gy1aGME7xuI6VWuXQy8DGO1MhlK5x1psifvQ/rT6iHXDIzDjBA7VXfDJ1HFWZfLIGOeKogHPFAxV9aMgN1z3p444qQxoI8MCG7UAQP8AvGzSDj1p4U4xjOaDGfSgBATjpzQeVPtT1wMZpz4wdvQ0DK4PpxUsfvTePxpVOTigDUsjkAZrSRtvANZFqTuBHGK04iTSYF+3yMZrWgfahyeayLc9PStJCdhyOKhjM7U3yxrElPzHFauokBs5rHk9a1WxmKrYI7123wzvTba9Fk4RyQfyNcKp54rd8LXHkatC2cYapmrxaLR9T2DDYK1YjXP6VLvhQ5681uwnncDXBEzPjiElnGPSrax5qtbYAJ78VN5vU13yZVicptUDrURjDsSRkD9ahluD602OVnOOwHNCTAZqUKvCCi4IrnpAd5B4FdRaRNPcYboelVdYsxBuGBkHrRtoNGFtAYDPGKicnOPyp7ZJ9qaevH60wGkCiPhs0r8cZzTOx+U0ATK4YndmpNw28iq5G3oetPjDNjNAyaBVJJJz6UuQWKgc01EIfgdaRz5cmR1oAJUZMkjmljyibzwTTw4lwW5NOnK+WoBxigBsbHBY1NDISVVBz3qsysq5z1qzppWIPJJ06A0PYa3EucZ5+97U22ZowxFQSk7zjnNSISVwBmjoFyeabO7sTVVVLHjtQ/3RnvVnT4TNPGg6scVUES2dn4F07ar3co6cCuiuMnJ/iY1PYWq2emQwgckc1C43yHHIHAqE+aVza3Kkixp0O5xxXbaJDkrkVzWlwHI712+iQ4xWqNaaOn0mHG2ult0IQD+lZWlxYANbcPAGa2R1JEyLgVOnT0pijpUijPtTGLuweRinYPUGkxlc0zJBzzVIgsIxyAak4NQg5wRzUuBwaSEx/GORzT0PFRgnvTue1IRKeR60h+vIpoBHWnckfjQTYetOPQ5FMXgDinscUiWA6GkOcjFHBz2OKY2R04NBI8DP1NNYdMil3dcYoz3pCHDlcUgHH0pVPGefejA6rzSENyPwpSOmP1prdcj9aUcjkUCE6GnMMrwRTCT3pQ3AyKBAR0K/lTiNwzjmmgjrxQDkntSEBwOufrQBtPTrQe386GOPwoEKWFNcg0nGQe5pGznkUWABjvz9aYfz9KVsUnX8KQDTyPpTOx5p5PBx0prKOooEIeO/FIemSacccik6Y4oAaQeSDwaD14605uRxR6Y4pAC/dwfrSZODS57E01vvcdaLANbg0rZ7GkYc9KTIOaLABzg5ppxxRu4pvGM0ALnGaTP0ppYUgYZ9qAuPyO1J1qGSeOL5pJFUe5rLu/EWnWynzLpOKaV9hXSNn3/Wm+u6uTm8Z2IYiFWl+h61Rl8YzOD5MG0HuTT5H1DmXQ7neO9IW9wK83u/E2puT5TImfbNUDqmqXDjzbg4PYDFFl1YubyPUmnRerKPxqu2oW6H5pV/OvPIprgn5pHOepJNTKrMckk/jRePcLs7Z9XtUyfNX8/8+lRPrdqPuyA1yPkkrnBzxU0Vv83Ipc8SrM6Y61Ao6n8qF1qAnABzWDHbkqMDk1NDb/NyKXPELM2xqsbMQEapUvVb+EisyGE8etWkXGAOAKXMhl9bng8U7zlA5qn79aWlcRY+2QDgyAH3NSLIjgEMCMVlPaozbsZNWIvlIxwBxTuNF7IPTmjtUAY4NPQkjr+dAyQmkxzzSbsn07Ude9OwBwDzRxzSjpScd+ppBcQemKO5wM0pGB/WgDKkUwD+KkGB2px4I5waPWgBrZHTpQRntTm5GRim8ZOO1AxMflQTwad1z1xTe2MYoEKDgY7Uh55Gce9LjNG04oGJ0xTe/NOOcCjGe1ADQe+OKd0HvQF47UEYoAXt70n50KMmlIoAAAB1zRsznmlUc85/KlY80AIV5wORS44/kKBxmnYz9fWgYhHHrQAM89cUqjnA7UopgIOaMZHSnKOe1LjJ560gI2XmhRx9acRzS4wTTQDVTsDzTscUoBHNPXG3GPwoGMCDmpFRQOtL9BxSnBp2AYwJPFLt4HGakUYFLtwPahDIWqIjg1ZK+vSmMh5xTuOxmX0QaMjFcJr9tw+enP4V6FcLlSBXK6/AAGOM+1MTPHNbiMU+e2azHAJ9Vbg10vieDJY4/CuYT51Ze4rOrG8bnPJHAeI7T7NetxhTyKyA/wA3Fdz4ttPPs1mUfMvWuEK/Nipi+aJmuxo2Eqqw3Hiqt6zGYsTle1RoWUHBp5yY+elZpWZQQ44OeKWccgr0NJEoTGTVuNY2OHPHancCpsDMuelBRRu/SkmXbIQDxTXbpigB8LL0IBqKcFXAPSlwAQR3pLjIYAnpQA+JsKSelPYqFDj05qGM5BGKWQDYKQETv8xxTQ+QOeadsyOvNL5e0HNMBVBPFP2Y5HSmLwM04vxg0DLVr1rThbNZdsQelXrc455FDA1LY4IzWqj/ACcism0IYitWFcoQDWbKMbUzySOTWNJ1zWxqYO4gmsaTrWy2MhoOau6dIY7qNh2NUBVq0IEopIo+nvB919p0uB85JWuutm+Uep5rzT4a3Yl0+NcdB6fX2r0i2PAAJrz7csmiJbnx0smF4oacLyDWYLg9M0GXcOvSu+xRceVmPBzmtLSrdpwcZ4rItG3Nk1v2JMcBIO0k9ap7AatjbrG6sQBtpviG2V43lxw3Sr+mbJLdVfqe9WfESRQaWpxyeMVhJ2kUkeVXKeW7Cq7AoAe9X75t05IPyiqchBxxWzERgNkGpox+7JxmkIyOnAFG8KmOxpDEkTgetW0RViBIBJqtkN9RTzuwAucCgCdCGYKvWo5ouueppbMfvBmnAvLcBTyCaBlbymHfA/Knxplvm6CtK5jRRtwOBzVWZkWNVTr3pXuFrFVyTwf0qcMRGsQxjqaImTzFyBjvT3KZYjrTAghhLygHgelWzDjKpj61GsoViQBk9KZLMMEL1NJgRzgb8L2rovBNl9p1RGYcLzXNIcn1969I8B2nk2L3DDluBmtPhg2KKvI6a4Yc47cCq1vHvcd6kmH3UHXvVywh3NmogrI23Zq6ZBjZ713GjQ4ArltLi+ccV3ekRL8uRWqOimjfsYsIO9aaJx0qtbpwo6VcVcY5rU6B6L79KeG9qavHWnYGc1QmPUk+mKcw9e9IFxwOlOzng079CWMB2+/vUyHI6/hTFWnquDz0qQaH4JHWnDI6mhak4I5FBAgJ6U4Ed6TqDiigkfjkcilxnIx1puSMe9KSQf8A61IliZxQec01sdR60gz2piFGO9PXGBTFBbkdqeo/P2qWIdjjNNBPXtT+eKacAdKESJuGORTT/k0MBxxTTyeOhoELyOaQj0pOo5pvQ5oAfnpSbuenNHXnvQeaBNDieKQng9aaQeTik7ZzSEO6fT1ozzj+lIM4wabkgHP6UgHH1BGKbu9e9KcEj1pCM57CgQhOOOKQ8fjQQPxpmfxoAcfXGaQkjHegjB9s0g/UUgA9Pal7cc0gx6dab0zzQA4nDUHkHPWmE8ehpu8KCSaBCk5OKjz7dKzNT16wsB+/nXf/AHRXKal44d2KWMW0f3m5pqLZLkkdzLKEGWYAfWsu78QWNsWDTKWHUCvPZ9Svb07ppW+gOKrhGfIbOfU0m4rcV29jq7vxkoVjbwknsTXOXnijV7piqSLGp7AVXjhwMEZpVtgCOM0vaJbILX3KUs11dOTNPI3/AAI0gtyxG7Jz681qrbDgYx/Wp4rXn3+lS6smPlRl29sdwA61cjtiVHGKvx2+GGBVqODtjAqHJspIzPsh3AAZFSQ2mGArWEA3cDk1Mtv8w9KQWM5bYDbkVbjt+en41eEA4OOnFTrFgj1pDsUFtx3FTRw8jAq4Yhkccnk04KBggYH0oSGQLFkfSnRxirO0AUKuMnpzVJCGquB049aULyPpTs4FCAfjTSBgeop4Gf8AGm456VIAMDP1oBDGHFIgxznilk/rSjgVSAcDjp1p/TjmmJ1+lOP1poYbveoLm5aAhsbk747VMRwO496jkQMuCOKd7AS29zFOoMbA+tTnmspbRFkDoSD7GtGFuADSfkA/AxwKUeooPfPSgDAz2pgBA29PyoODzRjNJ9P1oACOMc0N1z/KlJ456Uh5Hy5zSAZjnvS4/Glxk0AcZNMA46CjP5fzpee3WgjI7UgEHvSc4z0FI3UcZp68/SmMb260uMH1pcZ6YpQKAGAEduacF70vtQD2AoAUc0oA7ijjsBSc8cc0xAMelKevAoVhR35HNIYDPAoBx/8AXoGfSlUfN0+gpgL+FHOc07H6UoHPIpDGAU7BFOLDk8U0ZJpgIvPuPSnr+lBHb0pGBx6fSqQEwGKdgcdMVXwQOc0qs2O1Fhp2JT1459qUNke1R456/hS8dc02gH9eaaw596Pxp3QUrWKTK0qg81g6vBvibjPB/CukZcg88VnXkO+JxjqKEDPIPEdsMPn3rzyX91eMCcAmvXfE1r9/gd68p1yLyrjIxnNNrSxjJEF1EJoZYiPlZeK801GEwXMinsa9OVt8auP4eprjfGFqI7nzFHDf59a5qbtJxMZKzOdHAHqadIzFPL6iohljz+FSxgLyTTkrMZGVYGnx5/iOPrU8xTcAoqKUgpgcAUrgCFPJfd1qBCCcgUqgBW3fhSRMQOBye9AD5F8sAg9aRcMPm/OpThkVT1A61CoIJVulCAQblJxRjcSOhqZVUDrjNRuNsmGPSgBgUg57UjtlsHmrCSqM71+XHFVXIOSelADlIU1JlTkVAoPrSjgZNAF62ALVowrx6msy3YhR2FaEDZHvSY0X7YYIrWgxt561k25FasG0p6ECpkNGVq2M1iSk5NbWqfe4rEk6mtVsZkYPPtUsRAYVCetOBORzmktxntPwkvNwZDgFehH4+1ez2pG0Cvm74Y33kauiEcNnt7H2r6LspMqMgY7VxVlaoKfRnxGWweTTlfvmovcmlTk12oo0bI7pAtdxFaqNLQKPmPJrhbRtjA9TXaW11v05ULYIonfSwI09JXZEN5O76VZ1CCS5s3GcharaXMVjx1+tdDpUYmglUg5KkmsqmmpSPIdWg8mUjPU81QAwwyMV1PimER3D8Ywe1cu5rW91cWwkrBuFpJFHyr3pEOAT1qRFyN2eAKQEe3DBep9qtW6qFc85AqqrbWJFTxMBE5PJbgUMEM34Y4HJqxbssWX5ziqzKV57VNGymP5uSKHsBLdTB0B9arxgBGYnmlkUlN3YdKYqFgece1HQYpTcpboaW3gaRjg8DvSfMvyk8CpbabylOOtAhskGxiN3I61VcHJPf3q1LKSCx4PtVZmJI9KEDJ7GIySoo6k17HpVr9l0yCEDGFycV5t4PsvterxjGQvPSvWpV2jaOi8VVXpEumtGysib5S3ataxiwuaq28PT1rWgQbVHOaEaRRsaNDkg13GlQ4VcD8a5nQoSQuOa7SwTaoyOa1idcFoXosgVaRs5z1qFcEe9OVccE1pYssA/nSqx70xeDg1IvuKdxD0fnmn8Z44pu0AinBWHTmi4Eg+bkccdaVT2NMXG4dafn86TJY4e1SKwqLqvFOQ4WglkhxmjORTTz9KTP5UyR5IOaBz14xUZPcGmGXH1oJuWOh9+lJ19qh80NjNPUgHrSYiQE5HHPSnBsDI6elRfePPalXrUiJ8+9NPByM0zPX1oz04oJFzxx940c/iKaSaAeOelIBMZoPTFIGHINLnPFIAHp6d6Xg+tMJAPWgt60hNj89utIeOO/YVGWOKTcd3SmTcfuI4zTWwDkdaRiN2RSbgAR70ALuJoOecVGST0/lS7vWkIXNIeaaWx2xTWYbf8KAJMjacUn1zUW8DI9RUN1dxW0ZkmkWNAM7mPFAi0W46/hUFxdRW6F5WVVHUscVwXiH4i2dqWj07/AEiXpuHK1wGpa9qWrSlrmdgh6IpwKrk6yIc+x6hrPjiytWK2585/RegrjtU8WajfhlVvKibsvX865qGJmxnNaUFv7H8aTnFbInV7kKxs7ZZmY+rHNX7e3yOanhgxgAdKtwxYIOPwrGU3LcpJIhgiKnFWjDtcHsamjhO4cYFXBBuXPcVmykUFi5GBzUptzu9q0Uh46VIsI9OaaGUUt/lz+lWI7focVciiGamijB4xTApLByOlWUiAOTU5j6dvapgOBiiw7kAiGF4/Sn+WFIJqX+EcUrDBGKaQMTaOmKeF6Yo5696ce1FguMYDIHFLySM0rH5qVeH6UbDAjjr7Ug6c09/amDp700IQjPFOXj1zTRyfSnDpTAFHNPJwKRcc5FNJODSGgHWlJ7Ui+/pThknrVCHqMAg0gJzyOKTFOPFA7DQMscUpoHByKYSScAimMVQCcmpEOGJ7dqjzjAp2dq9KBoS4mKKWHJHam2d5Hcr8rDcOCPSq902VOK5+XzbW486AkHuOxqo2ejEzsscfjTsZrI0vVUuUCMdsvQqeM1qKwI45pNNbiF70oGSf6UYoyB0FIA4FNB5peo9u9J6cYpgHcDFIfYU7Az6Uck0AIOvFKTnpSGl4pDE6c4pcc+1HXGOKOnWgAX8BQeMYH5Uc9aMcc9aYB+BxRg0DpxSnGPSgBB9acMnvTR1zg08HvQA7p8v86XAyOMU0Ng5NODggDnFMBQCWJH50MeOv5UZH/wCqnBhgcYoGgUDHA/OkXPbinqCcY6elDYHQfnQMZz9Ce9Az06inEc04AUwG57cc00rk0/AzwOaciYXmgZFt46kfWnYwOvNSYwORzSZU9RTuFho+vFG/3pCMHrSBevpVXCwrEEVXlHy1OFycimMnHNIo4rxLbZDkDj3NeP8Aiq3w7npg/wCe9e863DviNePeLYTukOPWmZTRxVi+d6HpVDxHD51g395aswnZPnPerN0gljZWHDCuSouWVzGSujytwyuQfWnJkjHOc1a1aEwXTKQetVkY9hWkiUT+SwjMhIwPeqiMx4qdA0ibQeO4phxGSMc1CGKV3JsUc0kaMrcgYFS27rlsj5quXojisYtmN7dfzpX1sBVLKVx0IqPKspPekeIsAQajClWpgOO/oKhkDZznmrTcIWXr61Dk9z1oTAhy3rTgrYBJz70qjcTT0HBXFADcZHuKIxkHFBG1vemqDuHpQBatxwQRVyLrVWBqtRYDUMZo2xAIz09K04CMFunFYsRIYfzrQST5QO9SxlbUzk8VjSDrWpfNnOay5DzirWxBGaBweRSGgnmkM3vDNwYNSicdQ1fUejy+ZaxOOQwr5M0x9tyh9DzX034Eu/tGiQMD0H9T7Vz4pbSFL4T5DXvTkPOKbnmhTzXShl2zG6UV1ML5jSNeK5axbDA10dhICMntTYI6XSIfMJA7DNdJoziATFzwVrA0aYKrbACzcc1Z1O6FnZFSwEp64NYVHf3Slpqcj4xnEl5KAeAa5HPzelamrXBuJ2Oc81lYJJFapWVidxzfKmAOTSJKQpFIVbYTzxT0QCLnqaALUEKm2Zm601CA4PGAaZFK3CdBUzlFOO4FIoa5Dxc9z1pIo8A+lV5JCowDxUkbloyM80xE7OpUL0ApFdQ4AxgVW2ktjNSiLGCx60WAkbMoJQcVCqYOH7VY84IhA6UyEFjk9KAIpB1HQVGnJ/rVm477BkVDBHudVHOTVU1diZ6L8NLIBZbpwQF4z+ddsi+ZJk8g81l+G7T7FoUKAYZ8E1v2UOfmx1qW7ybN0rJImgi6ACrsaZnUDtSwxgHOKmsl3XGT61UTSKOt0CHhT6V1tshxxWDokO2MNXQW+R6GtonVHREoUnGRUmMU4YPtT8etWyhijHrUikcUmBRtIoETjk9aVWwcc0xQccUZIpkseeTS8gZPSo92CKlyDjjNBLFU5Ape/Smgc+1OwccA0rksepI60hOOvSk6H0pG6elBLFJBYVBMhbgfXNOPrnpQWJOCDQ9iRIEI+8eKnHBwelRjGeOnanhh0/WpirKwmO9uQe1AyGNNH3hn8Kd0YZzQwH88YGKMnPvTfoKTJoJFLfSkLH2xQeeR+tNPU4oExSR24pc+1R9wSD+dFIQ7JxxzTCxx7UNxxmkbn60EgW460hNIeOtAzwKAED+vSkySTjpSE47UwkDPGaLASBiMkdOnNIzEnP8Ak0xSPXH1prt64xS2EOZ8cmopZlRGZ2AHvWD4g8U2OlIy7xLOOka15lr3ibUdXYqXMUPZFNUlfchz7HceI/HNpYbo7Miecdh0FeZavrOqazKTczssZ6IpwKijtWJ5q/b2Rz61LqKPwkWb3Mq2tMnGMe/rWlBagEZya0oLAowLDArQjtRkEAVk5ORSRTgtzgcYrQgt/UVZhg6YFW47c8HpUjIIoOM4q3FbgfU1bitxjpxViOIA4qSrFVIMgZq5FCCoGKmjiyDgdKniXHaiwIrJCAcYzinBBkjirWzB4FJj5u3NUkMhWPGMnFKAA9S4wSMUEdKpEoYy5BFLjjin4/KjHFOwDDnBHrTu3FG3JwacAe/WgYgzxnrTj6Uij5qdg4pMY1+DnilXrTXHNOjHQfrR0AVifpTcnaKcw/GmUIQqZ60ufakUelLjj3psAB601jxzTumfWmEE9DTSHcehz1pc4pOmO9O6cetAAuSeaUkk0oHGaTHBz9eaTY9RCeMUijGeKCufalAB57U0CYmOp7fypCc9KDycDPFKqk84oGiJlyDjvWfcwcGtYrjpVeVC2allo5ySEq26MlXHcVraXqhGIrrqOA3amTwAZxVOSIjtxWkZ6WZLXY61JAwBXoaceeeRXL2uqGzkRJcmI8Z9K6S3lWaJXRsqRnNDjbURJS8e9IDigd+tSAdc9MUnanUhoGIBzyKQjHanDB6fhRnHH60CE5xjJpG6ZoHJ5pQOaBh1pfrTT2wM0oYY96bAcQTjn9KaTgDA5pwx0xSHGcUAJz3pw68UKBTsdDQA09ev0oAwOetOweM0AgjmncLDl6D0HelHXNAAIxThxjIoQx38NLu9iaTGenGaAPemMU9Pem554pT35pCCBigB3BOfSnhs4qLJHbin9OtMB7cjoBTGXnjGaUcGjdyKGhoZsOf8aXn05p+fpSAZz2pjG8gdKYxxnFT7flNRsueOlFxpGZfoHjYDrivKfGdttMmPrXr06jbnFcB41tMo7dyKZEkeGXS7Lsj3q2T8qnOKTWY/Luc4706EhoD9KwrIxtocX4uttk+8DrXMgkCu98UQedYCTGSua4gIATmlB3gZLckikVfSoJTvkOMU8qCSAOKYU2zbRk+9SUTW4AJzgkUjnzGPotLHGUfBpQuAegoQDRISccUj7iDn71CrtYZGakkA5brmgCqT8mAcGmBjx37VY+zlgCDTxaMIPMABAPNAFZSc1Juy31FPSLfx3pjoUfGOlACn1NOTG2kk5QZHNRkHcPSgCzEOTjgVZiBz9arRZx7VZiySKBlyFcmtKGMNHnvWfb/eFacDrsII59qiRSMzUMqTWS/WtfUsFjjmsh+tarYzIycGm9RyaU8mmE/nUgWrRsSjI6V9B/CW783TDGcjaB2+vtXzvEcSA+lez/By6IkMf97/AAPtWeIV6Y+jPBD7E0q0rClVe561oBNAdoGK07S4OAOnrWPvqaKXY4PNVcDutOvltIix5wO9Yutas05YA9az1mdoyxJ2ntVWbn61CSTuA12IHJ+Y0wYRe2TTTyDnrTYwWlHt60wJZG+QL0GaapycdqbOckj8KfaKfMwRke9AxJD5Z47U3l1J9qluUXnHX3ojYLARwCaAK7DKdeRUkZIAA6UmN3A/OpI/kOO/SgBM7ev4Yq0VPkFzyTwKTyf3YduWpruSQOlC1Au2tqnkeZKOvQVXkAGQuABVqaQi3BFZ28u3fFK1xsPukkmtHw9bG61KGPtmsvHz/ePFdt8ObPzL55mHC9/wraOkWxJXaR6KUCmKJQdqjFbVlDhORxWbaJ5kpfsTxXQwx4iGRyaxR0bsjZdkRbNWdHi3Sg9ar3ZIjUY61r+H4d0i4A461pE0gtTsdOj2wqB2FaSKwUbeaoWQIQda0UbICjpWyOglRcADmpVPrTATxx+NKMZ5/KqGSgjIyaeBzxyKjqRcetMQ4Ajp0oII7daAMUoOR0xSExOooAOaApBzxipEGV5HGfShskcB0IpQeMEUoHoKCOMigliLwaHHII6UtNJ/LFBLGdSPeg5IAFL94jPWgDPTilIQuMIDTj6UIMcZzmjbg1KBi96UZNJjpxzS9KZLCkB79qU8g460xjtPoKCWBJz703JDfKDmlDdM4pMjdkZIoExSefWmZz0pXHHHWojwc9D70hbkm4Ac0hPGaaB15yaaTzQKwp9aaW4xnBpCcGmE9hQIfmmE4PP6U122jJNc3rniA24MdsMuf4vSjcTdjX1HU7fT4i9xIAew9a4HX/FV3eBorTMUR43dzVLUJpb2QvKSWNVVt89RxSc1HYzd2ZIgd2JclmPcmp4rTGPX6Vqpaj+7VmO3z1GKylNsEjOitR1AxjtVyGHaeFyRV+K3wcqtXI7bBqB2KNrbTOpNxIHPXAGMVcith0HSr8Fvz04qysHHTimOxShtwrYI4NW0hAHvUwi2jOKsxp04FAFeNO1WAmGBpwTBFSquRTsNCKuMU4Lhj6Ui9MEU4HJ5FFhgRgUw53e1TYyBmonAxkdKaHYeVx0ximsOPb1qWP5k6VG3OeOKa0E0MUc0pHNOAGf8KPrQJCFeQaTHPHepNuVBoAycigdhmPWlx3oI+Yjnij+lDGMYfNTkBH0o2nPFPXqaQIawGfrTSPapGGcU0LzzTQWEUYFH4U4jA96QDt1FHULDNvPegjHpTyMdKQDkDtVBYFBPPFOIJOcU4fL15oPTjpUjsNGSff2p2CeBzQoyfepAMDnrR1HYiI4IxUIlVmKKeasMOPrUDIoYHABpoSQ5Rng08jA460KMfjTsfnQUNwfzpjKTxipRwMmjB/Ok0Mpypnp1qjcR8YxitYr1qrNHkehpDMOaIEYIB9jTLbUprC5Xq0BwCPT3rSmiyPeqE8HU45qozsJxOotLhLiJXQ5B561aAyfauFt7m4sbhXjy0efmX29q66yu1njDKeccg1Uo9UQXDxgUwinAg+9HUYFSAmBQeetH4UHpQMb0HelHTkdaOP8A61BIx9KBCYyOlL9AKTORQKQxw4oBGDikzx7ChcHmmCHAcU7GQMHj0pFHPanDk+gpjA9evSnEDt+lIq46mnpx05oAMe1GO3Wjnt0pRjjsaaGOHHejJC0AevWl79KaAABjOOaM4PIpSeeTTGBJGKAHMKQDmhQe4zTwucU0MQ9AOxpeOnalxtOMZpdmQMUDI9vXFOUdOakWPABI6Uu3NMCPBJAzSFSOP1qQxsM4PNAPGGB+tFrjRUmj4IFcp4rti9k5A5Arsmx6Via7DutpB6j0prQHqj5y8TQbJmJ55rPs2ymMfhXTeMINs0gI9a5ax6svf1rOqtDBDb6LzrKaP2zivNLgFJmX0Neqov78g8hhjmvOtft/s+oyKc4JrKl1RjLczgWxwDmkjYq+W61JG4C/XpRIQzbunrQ9xlu3ZN6bwDnrQYFM5GcrmoY0LsQvB7U6KQxvtYnPWpS7ATajD9nwAQQaolsrj0p9zPK7bXJKjpUIJ64ppCJYyyKPQ0+S7KRmJRwetRgN5YJzihQGDFhmiwxIZgr5HWnht5JOKrEFT04qaMfLkGgB5XcR7Uph3ozimhyBznFPSQlSoPXtQMkgTI57VYVQACDVeJvlK9DU0ZbPSgC1FnitC35HrWamc1cilK+1S0UivqPBJrHfmtTUH3Ek1lydTVrYzIz65phpxqNgc+vNIZJEcPnFemfCq68vVo1x68/gfavMFOD7mux8B3Bh1eNh7/yNElzRaDqcIRzxQpHSg4z/AIUmc9RzQApI7UFiMcH60gIwB6UE89sUAW1lJiAHQUIwwWzzUEZwpzSfwHFADshmFOkkVT8tRD2pVUk4oGMdgTirMDHhhkYoNvtRSR1qMHY2COM0bgTOjMN5GATxmmOuMA5zU3nghQRwKEIJLnGBzSAiKMigjoKAQXHpSsSUJPTtTAC3I70wLRuOwyBVq0hEwLNjd2rP24GOpq1aStHKhOaLaaDTNC8VYYxGck4rJk/dj5R1q9f3AlnyDxis+Y/MM96UQkNQktXq3gK1+z6KZiPmcjrXl1qu90XuTXtmi23laVawL6A/rW09I2HS+K50OlW3yoCD0rdEeAB6VW06LHPY1oNjP61ibRMu++aZRjgV0fh2PnjtXOSfPekmuu8Op8pJraOxtDc6S3T92M8VZiUgZANQRdqux4A561aNxVPrThtznpQMeg5pdoJxVAOGM0vJ6cUwAL9acCPoaZLHKSuM1IpDZ4z9aZjgdPypUz6c+1FxEgAAGRipVXjjmoh2yMn1qRW2j7vFITHgjOKUAcdqbkDkZ/ClY8UyWHeowBkjOaew46/lTcZFBIncd6UDFLgH2FOQen/6qlisLs4BB6UHkA9Kd6elIBgmhCGgdsc0hxnHenFTScAn+tAhrfL3NBAbtTjzxSgDvQKxHt9qNv0qTj8KRhyDjIpCGEcdeajZc896mI55FDLkA8UCsVwhzxTH689asbRg461GVz1zSvYCDHNQXU8VpG0szBVHrTdWv4NNtnnncKF6Dua8x1nXbjVp8klIB0X1p76szlK2iNnV/ED3c2y3JSEH86ypnMzAk5NUoByBV6BCDk9D2rKc77EIRYcjmpBAOuKsKmQSOatwxZ6Csh2KccDH+GrK23PI+tXI4W4zxVqKEdhQOxUjtx1xVmOEL2qykODzVhYsj6VQFVIz2qykYIqREAyDUgUAAUxoh2dyKVVwasKnzGh09KEDRGQAMt0oX6VMgBXH6UpiFNMLFcjB9PajGD9akdMjNCgHGTzTAROT05pjDBIPOKlxtYEDinSrxuoDcht8kFTSsuGwce9PiABzTpBg5p9RoiwBSkdyMU/Azx1pwX5cUgGAcY/pQoIP+FPUcYPNKRk9MCgZCV5PApNp9MVYxzyfzpm3nnpQIYgyeRSKPm9qlAx25pMc/wCNAxCpApoXjkYqXGAOmfajGOfzoAhIoC55xUhHOP1pyqT1x+VCGQspPbmlVakK0bQO3NO4IYFyelBHORUwGAfemMOcClce4iKSeuPwpzjrmnoAFycUjdMEY70LUZA/GMU1RnmpXXJOBSqo28imJDOhzilxipNoAx2poUljQMaBxkikPXnrTz+gpDgUxkbAY571Ew6j9alwcnvRsAGTSsFynJFkEkcVUli3gjHFarLu57fzqF03c+lIe5km3GcYxV62AjAxwadIuOAOagZtmc9aaYmjWglEi4JwR1xU3pXNm/8AIl3Hp3rftpluIlkRhgjNU11IZKeTzRyD05oPOKOv09qkQhyO1IOaUgd6DjFAxcjt1pvbOD9KjldIyN5xuOBUnUe1ABkYBpwP5UEDqetC+1MB4OKeOaYtSDOfSmCBcZ65p64HAHNAwMUYNBQ4Yx70YB7cGmj1oxg8UwHgcUAE444ozgelJnjr1oGLj1HWgDHTrQeg9qXOR9aAHA8cDn1pPu5pFH6U8ZOCeRTAQZH+Jp4yBz1oyMjilGPxpoYgyeKce3HFIOuKeufqKdgQKc8jpQVyOlKAKX6dfWixRXli9AcVm6hHujPHatdskHNUL5fkIApXK3PCvHtuUu3GOK4C0wt2R2Jr1f4i22H34x+FeUP+7vgeOtFRaHLsy5cLskDVx3jq32XSyY4YV3F0peIGuc8ZQedp0UuMkZzXLB2kjKojzzPapBkgYxTXTGT6Uq9ucYq56MEWopfKlUjtSTtvlZxjrTCmVDg/hUcW85HY1KAnlBMYJXGeacqLsXOOakL7kSIj7vemMp3fLyooAibcMxnoKaMjjH5VcZlIJON2KzzIA5IPFCAkfJA45p2dpUGmK+45zjFMlcsxOOlACvJtJHHNRxth8ilTDN82KUABuOlAFuL5sVahUg9earxJgA1YiYn6UDLkK/MKsrHz6iqsOdwxWjCPlOallGXfrjt0rKl69K19R6kc1kv1q1sZkRqN+vtUjc9KjakMaOv+Nb/hqXy71D3Gfw4Nc/24rT0iTZcIfSqiJmGeenWk5HWjFIPQ1IxT7U4DPSmYoDEe1AEvbHeg4xjNNGWPXNNJwelADwMcVNFIEcHrVccHg4qReQR0oAt7zMwA+lOubYAqBg8ZJqvbybDn05p7XBZtzDnFIYhiAUDPzZpufm29B0pUJaUbu9STRgOFTJ4zkUwJpFUqFQY9agddjAfjzUkB2kZ6D1qKZsybgfx9KQEqbV6nPvSBwwZugqFnBGBmkHOAKYBIzFuM5NIQTyTT4hh8t2qYbSTwD6U1uIueHYPP1OBeOte6aVH+9UAcIuK8l8CwCXVlbH3RXseix5DsehPatKvRGlPZs6PTkAiGRU742sc8AU63TbGPXFR3fy27HvWJsjKgJM7E812/h5MQg4/GuMsk3E+5ruNEUi2A5FbrY1gbcY7k1MhwOTUKcDrzUi4/izVG1ywD0zTs4qvkgjtTlb5ulUImByKeBkUxOlPwcUXAcnHXrT1xxyKZn2p/APpQwH9emDihc4yMe9MPH3TkUK204PfrQSyQDaeeadkk8daaCMcHNLtwMiglik880d+e/rSdeo6U4kduaLiFXjHFPwMehpuelOxkcmkxC47ilweKQAgc0H26UxMXimkU3PGe9O9OlIkQdaQ9Tmg9aM8YPWgQoPOO1GQOtJjI4pCDnikAuQD60nQ/hQOOveg5NAhucH0rL1/WbfSbQzXEgBH3QO5pviHWbbR7J57lgCBwvcmvGNY1a512+NxOx8v+BOwFFlu9jKc7aIt6vrFzrV60szERA/IlNgjOBgcVDbRZxitW3iBIrGc2yEghi7Y4q/FGT+FLDGM4FXYYj0qBpBBHhRxVxItuMcg0qQ45q3CgPWgYsUQOPSp449pB606JdvFWvLXGR3oKsRlBinR4B7ZpVG0+tKI1EhYD5qoBXTuKTAxkZqUn2qNh83P1oQApPB796k6qD1NRjg5A4qROAaYCKcEU+mOMdKeGyOaQxrLg896YFAb0NTldy59KjK5zTRIki7gMdRTzho+nIpFPODxSgYyKChgHI46U51zTQMMKfxxxTER4xjPanjkegoYdcCkXpQAg6nB4NOIIA7UmOfbNPPIxxQMaMYpGALDI6UoIPPNI3JHegBf5UgHPIpfSlxQAh+lJ396d0FN78UwGgZ6cU/GB70Y4xjFLjBGaAEx370qjj/GgAdM0/wBvbikMjPbjj1pAPnpx4FKqjNMBGBxjNIx96ecZz0qFkJJPvQA4BetOUdSwoC5OM07kt+lAxhBb6U5h2FSYAGO9RN8uTQAhbHJHNQEFjwKecseBxT8BR7mq2KGAADpQRnJPcU7HHP6U3uaBDSCST0FNcZzxxUoUnr0NNdjuIHJpFFScbBx970qhOCQcda1JE+UmqNwuM4HTmpBnOaoSqEd6q+FddayvDZXjHynb5GPqTWlfQZDZ6VyOrWpbJXIYHgitIvozKXc9fRw4DLyDTs449K4fwV4gMqCxvWxMvCk9x0FdsCDzxzUtWGtRSMjI+nFIOpBo6AD2oz+dIAZQcZHApw4AxTSM5BpR0xmgLDs8Uo4pvXBApw/UU0AoOOlO5zmkGBj9KdnFMBwzxmnio808dadhij6UYwOBzRyQDjOaBk47UFBwOtHA/wAaX69aAOO30oAABjOOaXIPX8qaRz14pwAFNDH4wM460oz93tQuW69Kk2js2aAsR5PIAxTgOMUo4NLkZ4pgIEOc0oYDrSFxnHp6U3ryBTv0CxKWzyOBQhBI/wAKjXB4PWpApXp1qkrjHYU/1qrcxZU1Mc9c8g0yQ5Xnk1LVikzy74jW7GANt55/z0rxXUVKXYPQZr3/AOIMIewJxyPb/wCtXg2uJiVjjHNN6xOeatIvH57VGHpWNrUXm6XKmOV6VtWB8yxUHtVC7jyJY8cEVxPRk1UeUyr+9IPrimyKFQjPNWNUQx3bgDoapnLHnNbTRkiaJio55FPjfBOF/SpLZQBh+9N3KhIIJrNDHjqrMMZpzfLHlTk1WdiwAJOBwKEYkEZyKAEQM5IB/CmNGvbg0u4r06mnhMqDnn0oAhTO7n9KeRhju71YZFUA96jucFlKjtQMr+WcnFOUYPNSLnvTlHz9KBEsDnAU9KtRcZqkpO4irUJY9aBmhD1yOavwMMYrMhYirsb4XJqWikU9SPJxWU+Mk1p3/OazJBVrYzIjUL1MRj/CoZOvApDI8/gav6ef3vpVDPNW7M/MPUd6qO4mZoHNBAFPprc9KkYzNJg5+tOI/lSoD3oAkB29KYx44pzKQajPcdaAJY0yhY0oHHpSxAuAvQCpTHtX8aBjSAqjPWnnAUbcc9eKruSeSM08NgAUATQjdMPQVbQRgNkjJ4yaplgrAZ5zzTpzuPy+nNICa9ZcAJjgdqrPl1CjHvSxoTG2evvSI+0kYzTsBGIzyPSpYxs5PNKy4IOODSuyk/X0oAmRA0BbHU1G4A6HHvSiXbEVz+NRo2etVTWomd/8OYcGWTIr1jQYsWikjljXm/w/h22DPjhsV6npEYWGIAHIp1Xqax0ijcRQIee1UtSOLbHTNaDACMZ65rM1c4jUHuazjuarYj08cqK7bSxshWuN05cyoBXbWkeEX6VumawNGE7sDIqdkI5FVolxyOlXB0Ao5tTWxHypweRUyAE8CgAEjNPTg1QChcHIp6sR1pRjGKXYDTQMXqcg0Dr60m0g9aOhpkjyVNI4yKTODxilXB46GgTEUkH2qROD9elRgc461J6cc0XYtCRuRjvR09j/ADpq55PengHkEUrkgD1Hanjj8ahyQeKkB7c+1Ah+eSD0pQwxim57mk4HSmIccYBpuefUUBhnB6GlPNIQ3nn0pB+tPyKjbof6UCHKcjGKXPNRse44oznpxSAecc5rN13V7bSrGS4uXACjgdyak1PUYdNsnubpgiIO/evFvEWvS67ds7kiEH5E7YoXd7GVSdtEUtd1a68QagZ7gkQg/InYCnW0HAyKbbQ5AI5rWtojisZzcmZJC28PGa04YxkcfhTYIQOnJrRhix2qChYIhuzitCOPvTIY9varcaeg4pWGgjTJwOKsJHg59aciBQCPyqZVDjHpTKsNUZFToex9KjA5HrT8YbI/GqsA/HODSElWpvOc9qcDvBB607AP4I65zTeh56UxCUO08j+VSHLDjGaQXE6dehpwPXPSm+9H6CgQ8jPemrw1CnOQaDjOaY2SofxFNZdrYxSKfzFSN93cOaQyJhyDTgOfak544pV7iqEA74opy88UhHegAIyuM01fSngdKTGG9qaAQClWlHH40DrQwGj2oNOx6009KQxB/KnDO7FIM5pe+adgCkIxSnpgUi9KAFUZAzQAcmnKOeKAM9qLhYQD0pQDijHajIFHUY09etKuMc9KABzml7dePSgYh6elJ+tOPc0Ih60xIQKevSpAvSjGaRj2AoKGyMOfSoT87Yp7jP0pp45HU8U0IXp8oHNAAA9aVVxyetB5PrihjuNY5PPApUQYDGlRORkZ9qc5wcD86Q+gx27L1qPGOnNPGCTgcUYCqMU7ARuvHzVUnTdzirjDJ+Y5NRSIWz2pNBcxbmLcDxn3rDvrXIOBXWTQ4B61mXUAKnOBUg0cZPZtHKJYvldTkEV23hvVvtdusNxgTJx9RWNPByeMCo7dfKmDp8rL3q076Mi1jvQQce1APJ9Ko6ddrcQqT94cGr4XvSasAvX1peOKUD3pOp9qQxcUDrilA5xjNKQBx+dMAGOuelKOaQKT34py8c9qAHD6U/HpUecYp4OVAqhirn149KXk8YoGOueKUdOKLDAgYpmew4qQcZJpOD060ACr6mnrk8cdOppAnBzQASRjp60wH4HGKeo9aYvOcde9PXAxzQNCsOSc00jBBp+e/wClNGCSKYxh/lTgpxmnlcCkHAJHAoGKE/OpOgqJiQM07fnGTV2AQAc8VHKpzntU3X2qOQDHPSpZSOQ8axh9NkHGR7V8/wDiCPDv9a+jvE0G+wkwMnFfPXiFcyuSKa1iYVfiItBbfbsvpUV4Ck+OlHhw/M6Hv2qbVEw5OOlcclq0TP4Uzy/xLB5eoPxgZzWbEACM8iuh8aRbLoMOh9vpXNAEg4FavWKZzonndVChecVBJIryBsY+lQEnPOacBlRjrWexRMvJBI61JjY2AR7VXbchHrUqyAgbhjA4pASR4IYuRimqd7daZINqYz1pI+MDt7UwJJgQm7nIqLrzzx2qdm3DA6U3jNAEe89ORTw2DzSShdwxikXnjFAE8bDr3NWomB+tVI0yBircULgbh0oGWI25wKtRN+AqlGDmrAIB5pMZDeHNZsnWr143NZ7Gq6EDKhc1Ke9RP6ZpARmrNofnAPSq2KmtzhqaAqbvekJJ5Apmfxp+D2pDFAzTlU9hUtsu5uR9a1Le2Tq3SmBlFWzgikKbQTith7FpizRAflVFrZ0DBwQc9DSAqRMY8mp2fMOMZOaY0RGajG4A5PAoGDYHXrSrjYWPaneSzDceAelAUZ2nmgALAnigPtOPypdg3e4pjqSc4/GgCVGOAACalSDOOxJqKGYKBgc08TkOGoAu3ceyJV+XP0rP2Mrc8d60pWDIjvyaqN+9lO3gUkNjdoEQzyWqOLO7j1pJTiTGeBUtspaZQO5rWluQz13wbF5ejRnnnFel6cvC8ZxXC6DEI9KtIzzkCu/sRjH0rObuzfZJGi3AA7msnWD8yAdK1W7HpxWNqp/0hRSjuWi5oy5uE/Cu6t0woritCU/aU9Aa7mI5HTrW25tB2RMmKmQgnA71CAR9akjGTT2NLkwOKk569qjUdutSj600wAH1p6nng0jbeoxmkAqhEjHJyOtNPA9cUAnNOIyfamIbgnqKQ5Bp6kdCaXHPI4pITBW46YNPByMZ96YEA57Gn+WDk5oExw4we1OJ4pFHGDRyMcUgYv8A+qhW7YNIpPSlIG6glj+D1pvIPNAo9zTENzjgHvTgcjjmgn2/ClUelK4gOSBimk5zTjxTWIzg0xDc4PXioZ50gieSQhUUZJNPc7QT2715L8TvFhnk/srTnwo4lZf5UkrmcpcqMrx54pbXr829sxWziOOD941k2kXTiqWn22TnHNbttDheKynO+iMUurJrSLgVr28WMe9V7eEZxitSCPFZlEsMXHBFaEEQReeajhTpV2JeetAxyRkEVcjSmxp6elWVXuKBoQJxnFAG05FSYB6fSkOSMntVJDYuN2CODQG45FIrbTQ4B5B5pgIxPQHihWIYY/Gm7smhSN2D1oEydxuAIpoOOtCsQcHpSugJpeQxxxjNNbgj0pUYcg0jcjrQAjHHzDrTu+ajLH8qcegz0FMB3RuKmjbBIPQ1ChyOmKeD0oYIVupzR3zTjyM9qaKaAcoHWg4zjsaF5P0pxpgMwaUnvS0Yz14pAJ1NLS8ntS49aYEZ5pGHHpT8d8UhGcUhjR6/pTsYANC9cmjPFMBMcUvH5UuOQKMDaMUwEXp70oHoDSqOaXn8akoQHJ9TTThj0p4GKMYHvTAbjJ46UEfQU7GBTcfnTEGNxx7U/oKRRxQRmixQMM8Uhx0FDNxTf/10ANc9vypwGBlqVecmlJ46ZoAafSgLk0uAT3p2MZ9aBoR8KMCoSM9P/wBdSsvc96QgAZpiY3BA9Kbxnj86eASeaUL6DJphYjCcgnr6UjJgEd/epyoUHuaifPbjmluUVZl4PAJ9qz54/WtbYSDge1QvD3IwKnYdrmBcQc5HU1nXA8s+/aujukABC1iX0Z5pIloz49YOn3KM/wDqs4au6tZlnt0kQggjINea6jDvRlI61r+CtWMTGwuW5H+rJ7+1av3kZ9TuhjtR34pFOelKDt+tQUPHAye1OGCfemA88GlHegCTbnpzSHoOKQD3NP7cUxjMFunBp6gCgUozg4NMLB3p2RSLg9e1O288UIYoUYOaWPGfelIzz3pwAU0DEJwRkUoIxjGKdgY+al8sAUwEUCnHngDFIfzpwzxkcUDEU07GDn1pehpQeMfrVIYgPtQAfSnHOeBS5wKAGEYGTSZx0p3P4UYGadyrDM+opu7cDj1waew4+lVI92+T0zRoBT1dC9s47Fa+d/FEOy4lXHQ19G3uTCwxkYrwLxtEE1CcEYGaFszGrujldBbZd7T3rS1VflyvesfS3Camo9TW1qfC1yT+ImWsTgPGkeVjYjPHWuXt1BIBya7LxcgNhEQPX+dcQjkMBWkdYHMtGSXFthiV6etVWPlvjqKsyzH7p6CqrqCM1mUIzmQjNSxLlc9hSwx7uOM1KNoQjPzCgCF1LEYySP0oQEfWpEcHoeKbuBYnbxQAqMQf8aCpYZHUGmlgDkDrSo4AOeuaAEZSp5609MGkGXfOKcnDc9RQCLduiZ56VYE6j5cfKDVFGO7jkVKiHbnHB9aLDuWRIC3HAqcAnHaq8K8c81dgXOM0noNFC7UrVB+prUvhjg1mv3xVLYghPTrUTHmpiOfeoZeuaQyLvUsB+YZqI1LDjdTAqKufbFPJ59hSjA4A5p23P1pATWx5966K0jU26+p7VzkPDCtrTpy08SY4zVctxM67T9PC+VlQAe9VPE2lhNzbcDrkDrXXW1sWsoT+tXvEdgkmkI5AJC4rmcrSRpFXPCZ/lYg1CTzgc1tajZgzyDGME81jSKI3PoK6GSPkm4C4wAKiQMxz+dRu3zZqzAwEWfXpS2AYQVJU96txQ7ogG6d6iK+Ypf0pPNZSAPWgAmh8sjbToLbchY8VPNFlgxOQFBqI3PVQKWoxZXJUIucigDy4G3cMaSBxuJYU6QNODtHAoEVyNw3EVd0lN97EvqapEEDBrV8Npv1SAehrekS9z2zSY8GzjHZa7OzyTXKaYP8ATYP9lT/KursyQDXO2bzZdJ4FYuonN8c88CtkkYGPSsS5+e+Y9acdxo2/Dqf6QK7SLBUdq5Dw8Ns2T6V18JyBjJrY3hsTBc8mnkYHAojzmlcZPNC1LbHrIOvp1qRXBxnvVcYVu9TDDCqvYCRSM08jGKiTjvmpBkU7gL0PvTsZA5pvNAHegTHdPSnA4GRTRzzTlODg0xDsn6ipF7mmA/Sl7elIQ7nH1oz9RSZGQBxS5z3zTFcaOtPJxx2pOlAI6kdaQhwPJB7UjZHIPFDECmseaBMcCCRkc9qbuIPpTSQH570pbI5oFcdu6GmE8800nb05rF8U65Boely3UzYYDCrnqaGTJ8quc/8AEnxSNKtDaWrj7ZKMcfwivILKIyOXlyzscsT6065up9X1KW9uiWkc557VpWkBA6VM5cq5Uc7bbuy1aQZIxxWzbQ9BVeyh6YFbMEQ9MVzjQ6CHnjitCGPkd8UW8fTPAq7HHjoMU0UOiQd+DVuNMCmRJkYAFW1Tj/69MSFjHcdKsKpPIxmolGOtSK3FOxSJCKYxApwwfakYc4o2KI24zjH50zcV69DTiCOtN/x6UyQbA5HSgYJx0PakyRkHvRkYx37UAySMjgHk9qlzkYJ/GoGGcEU5JO2OaGCYrDawIpxOWGDxSsAOp4pFHUZ6jNBQjgnIHehN38QoXgYp5J7UAIhw1SY6mo+/vUinNDEOXpg0Y9KXtSjlTz1poBvH/wBangccUmMHFKCOlMA6HpSkYNLjP4UuMgZ/GgY3OD9aDmmCHEmSc+1SkdO9IBhGCDSD2xTiMAZ5oGM80DGr1HpS4FOXpyKABVCsIB6HrSke1Lj04FA6/wA6BjfrSD3pwHNKB3pDD3ApTxyKQD1o+tMBDntSKppwHf1pwHGaBoQ8Dimtnt0p2eOetNPNMBowDk0AEj60uMnOKdjnk/hQFhAOMdqQjIxTiePSkzzmkNIABzg07APTrSA9KUY/Ggqwm3JppALYzwKe2QKb8o6daBWFCHBA4FBwBhfzoyT1pjHseB6U7DGk7sgfnQqj+I5NKOnAwO5o3ADA6UwSQMAP8KhmwT0x7U4k5HOT600KWJPalYdyjNHnPp2FZt1ED2rdeM5PJqrPGAOKlitc5G8tiGJ7GsDUI3gkWWLIdDkYruLyDNYl5aEhvT0pxdmZyidB4U1pNVsEYH96vysvcVvHryK8rsPM0fURPFkKT849RXpthdR3lqksZBB5qpLqhIsU5cCjt1oFJDH9vrT1GAaYvB6cU8CgYHBFO25AxSfU04egpjQgHTjFKD0ppIzzSjBximA9Tzk04nknt7U3BA6UL1PagYqnnrTw59aaQM+9KM9sU0FiQAf/AFqXOe2KRT0zTj6kc0FB2PSlUEc01Tjg08dc9qYCls470vPJyKQDgUEDPBoGOGc+tKBwaaOOQeKN+O1OwIRgM9MCq4G2U+h61ZDBuoxTGUck01puVoyndrmM4GeK8J+IkeNUnwOv+Fe7z5wee1eJfElP+JnIcdRmhdTKtsjzS0OzUlJ9a3tSHyVz+7bfKfeuivgTGCPSuWp8Rn9g47xYudKQ9x/jXnxch69J8Sx50hu+3/GvN5FBbjrTh8JzdSYFWJbggdagKgnp36U6NTtPapjbkKCOagoVFEZyOhFVpI2LDHepFUiQBqv22wbmYdKWwGUQyZDdaaWyas3RDHeO9VgN7cGmA6ULtG3pTUXvSFCCck1KR8gGMZoAUNsPAp2cnPc0/wAtfKHPNRiLB9BQgLEWPlq5GM/SqUYOeBVuPcq5YUDJ1BXpViBypqtG2fcelW7f7xz0pMaKl8+7rWa5rQv/ALxrOeqWxAwnioXxUpNQvSGRd85qSI4PBqI/rT4utMBrD1qRBzzxQVxUqqNmcUgCEbmrf0Kz33KOegNZVnCSNxHSus0KP5o8YzWqdkKx6FbMP7OAXGVFV9Ruml0WXLY206CIiHngEZNQa2n2fRSuMmTnr2rkaRojyjUZi9xJngd6yJmBcAc1oasTHOwHFZkXzNk/rW7IInTnGO9PUYGSeKmnIGKiDbjxSGOSYqrr6jFBfKjHY0iqGOAeaHAHAGKaA1YlNxAvGeMVmXMflyEDmtOwYi154xyB61VuV/eA9jUrcbKnIA65q8k/lwFCMEjrULY3jNJdMPM46Yoeohkj5I4xW14RXdq0X1rFLKR710PglM6tGevNb0/hYup7TpS5vlz2FdTbcKa5nSR/pZPoK6W2+7nNczNZ7lxjxWNH814enJrZk5JI/KsWA/6QSaqI0dHoY/ftXU25JUe1czoYyxb1NdPbgKo5rQ3g9C0rYxRuJOT3pucj+tKPTNWtCr3JBzQCQwGcYpUAA96XHzZpDJFP51Ipyeeo9ajA96cvDCqAcDTgeOKAM45pNuMYpiFBx24p3IJOKjpwyOM0CJAwwO1OB6A9ai6+1L260CJe+DSHrx0pAQR6Ypc5z6GgTFBJGfwpM800dDg/hTgeOuc0CHt0yOtNJx1FJkgYI4ozx/SjYQMM4PXFIGGB3ppzj+tMZsDNAhLiVYYWkkYKiAsSTXgXjjxBJ4i1dljY/Y4jhB2PvXY/FPxM0UX9lWb/ADv/AK0g9PavOLK34HHNF+VXZhOXM7FqzgGAABW3ZwnjI/Gq9pAPlrYt4+QDn8K53qIt2sXFalvFnjrVa2iyRita3XAqWNE0aYA4GKnjjpY0BxVqNMcjmhDFRRgHHFSoCDj19KVV9cVIRjgZqkMaenuaVcZ4596VlPbk0zHPXj2qguPZsE/40KwPB60m/Iw1NcdxRYCUgMPWomByQDyKVJOeakzuB5pbBuVnH50nO7pz7VNIuO/Woj8ppoB6HPGcU1hjkUDnH507qOaY9ySNtyc9qQZDc0wDnAxipCQVJakA4j0of1/ChGOOOlL0PrmkMQU5TjkdaTHpTl6fpTEScYHrSrwTjrSDnFPGKEMQg9c80ZI5yKUYz14pCfxqhjvQfrS8A0KBwc/hSjnijoAgxQ2c8U4dfakbngUhjOe9AxS4yOtAGPpTAUjAGBQOtH45pB1znimIF/SlpB6ml5OKBiAEdqdgY70g9cU8eueaQxuKQjindTz0pO+KYgAz3oz69KU+1NbNMYmc0f54oPTmlGAPegAxg8GgnjJxmlJBPvTenfNIY0DJJJp1KBz2p34UDGdvTNKMgf1p386RiFGTQMbgn2pvC8/lTiSeabjqT1phYjlmCAbsjJx0p6YIDE/SnHaecZzTGz04p7iegjtuzg0wIxxg/jUioe5qTZj6CnsCRGIwvv7UpUnrwKkJVOnWm5LcngUbjRG47A5qvLEe/wCFXBj+GmPgDLYqGi7GZLDnPHHrVC4tuwFbbgHoKheMnORUtCaOWubDfnjmptAMlhMUbmFu3pW4YAaQWyjGa0i7aEOJpIQcEd6eOOtVLZlWQRk9elWjzSaJsO75605TmkGcUoHPJpjJAARnrQRyMHFIB6U7p3oGIMA8UE5Pv3pSAQcHB6ik7+5700A9SSOR06044xg1HubGDSq3IoGPB28dKl2k4IqIHnBB+tPySOOKdgJAuPelNRiTGA1Sj9KBoZjjNOH3aXtxwaMUxgOlBA79KFOc7hS7sAZpjGc546UdRzTifekwOx4poCPJB65oZsdaeQB0JFMkGBx/OhjVivOcpx1xXjnxMQ/bCepxXsE4+U4615N8SsiZCevNEDOtseQXHy3a/WukujmAfSubvv8Aj5H1roZv+PVPpXLU3RkvhZz+uDOkzCvM5Fwxz+den60MaRKfX/GvMJD8+TTp/Czne43Ow81ZSf5SvrVYqXO6ljUhsk8VDGWJwCikDB7024LLCmOCetLdSZK7RgVXdmcAUIGMc5GGNOhiyeD3phBz7VatnMfIHHU0MCGRTvweCKkKARg5xTrhxIxcYGaZvyu0ngUAJCeTzxTy4aTnnFVwQuaVR1IPWgC4hAxirsf7wemKzojxkdelXLfOM96GNMnRMMa0LdQV6dqpxDnJ61ehPNTIpGZfr81ZzjitPUSd5rNetFsZkJ56VDIOTU9QSYzUgQkc+1PiGCO9MPTrzT4uT60xkuCSB2qcJkgHpTQADzU6j7uaEIuQIPKGO3Na2gXGLtcdAax423HauefStrS7F1cFVrRWtqB6VBOJkiVcZPUUzxfCBYoB6cnNZmlRTrLFuAySKseNr8CPykONq4P1rllFqSSNYvTU8l1wjzn74rKP3R9au3x3zMzHqarTYG1VPbpW0jMiYBtwJ7cUwDYOOasSKqpnvjJqsFLcj7tShksY3E9OKtwxKY8sAWJ4qpnj5elTWxK7c5PNMC0CVIXsfSoZQxfmpZpAAuF6HOfSoPNLOSOaENiTINmSCDVMklvWtHynmidx2qkYmByR0oQgC4xXVeBEB1VcVyxJ711XgIj+1F4/StofCxdUez6Io+0OeuP04rpIPu8VzOjNm7lDHpjmumtvu1zGk9y02e9YcQzO31rdkPDE+lYcH+uY+9OA0dZ4fX5eldJEMKM1znh8jBOc810sZBHXmtTeJID+dOHIyaQYpy4yRxmmVceuc8H61NtyOOKiXn3ApckdzmmFyQEpQSeCKaST0oU460xEoY468+tAZs4PSkyPxpc8U7gLwBkHmlU8800EelBPNAEp9ugpw5HvUat29KcGxwTQIcAOvegH1xRuBNKCOc9KBC89qaevtTvUA9qTrx3oEAPbPtSdOh5pR9RQeeaLgNYYPtWL4o1eLRdImupSA2NqDPUnpWuXwDu6DrXifxH1xtV1k2kL5tbc7QezHihaszqSsjmZZJL67e4nJaWQ7iTWlZ2+ACRVa1iz16963bOLCDisqk7swSJraLpkc1q2sZ/+tUNvH6VqWsOTz17VmUWLWLHSr0SDJNQxptIq4iAtkDn61SGTRL07Vdj4GO1QIuB3qZR6Hii1wTsSAKBT8DnmkGCKM9OtModTGXjg08N3Jp2PYYo2AgHPQc+lIxIPT8KmZA3Tg1C3DEfnTTDYafmOe/WlDdcjmmsTnK5znrS7tw5HNMETZyCDzTGTd9aYrYJ3ZAp5PPTiiw7kJUpmnjkZFOPzDB60nOT6UCDkHtmnj7nGKjbr7CnID14GOaBkgOWOT0/SnnOajB4PvUgPvmhhcB2pw64xx7Uh605f0+tIZIOmAcUvB69Kbxjk8inqOuOtACY9KDzn+VPPoKaetUAo45FO5wcU0c/hTh60xjTwDijt796XGfr2pB0x1pAhAOMGnY554oIx1oHXrigYuAT3zSEdMU9fQmjqAPancLDPSinYwOKB0wKAEAOKUenWgc+mKUY4zSGGM/UUmOSDxT19qQkjjqaaAZRzmlJJ60dF5oGITgU0c9ad34oGfamgGgDnFKOegpQOORRkigYE8YBpQOuKb0pCQScfzosArHA460z1+vNOwT2pSAP/AK1MBgznrj2oGOval6+tPCgZDEdOlAxpGePXvQExyTT8YxnuKGOB1zQOwdulIT70ZwPakClj7UhjSQDkc0bSclqlCKBz+VN4707iGEcYFN8vPLHmnluflowT1zQVYYdoGBzUZTOOw+tWCoHfFJtGOelKwFYx/wB0fjThB/eNWSAOO1HIA9R6U7AVmtEJB7joaeBjg1KxOPSmuOadiZIaOM08HP0pmeCO/tT19+aRA4DjvSigYB44FB5x60DDjPPUUoHAORzSqO+c0uMKBnn6UxjcEcmlXA6jmjPYnp3pdwwdtADs8DFKc59BSLuA4x9c1JuyKaQXARrwcdKeOKTP4ClPTgcUxhkmnD1NAbjpSbiOhNMY4EAdaCfpmmH5j1pMH8qVhihd3PSk5z0pyigtzVIYHpzTCD14p24ZpCRjn86ARUnA6GvKPiavzJzzz/nrXrM3KnH4mvLPifwIwDnr/T3pxWpFbY8W1D/j4GPWt8jNoh9uawdR5m59fWt0H/Q06jiuSr0MVszG1of8SSY9Px968rnOWI7V6trPGhz57/415RNndk06Xws55fEJvwcEnAp7SFuAPyqIRk8+vekyUfGKgZoIAwUHnHetGS2gfTfMjwJF4NYcc+AfWrUMrlSFJ2t1pNMZW2HI4zUi5QFc8VYZVUIR1FNuAZlLDjHWm9RFMnPHamHnj9KeBzyc0qpzTAiUkdc4qRQB0p5ClT69qVFzwKQFiNBjOetWoV7Dmq0YwvNW4uBxQMsxDketXoV496pwrk8VfRSF59KllIydQOCeePas2Q1o3/3jWa/WtOhmR+1QyHjnvUx5qCT9KkZCetOj6009afH1FMC+y9MfzpWJwQKkK8c02NSzYFNCLekIGuVBr0jRoYy0ajjceeK89sImWUFRXovhyMmLzXH3RRV2uOJ0MiiArGhBJbAzXEePpkjkIV8kDB/zmuhnuxGzTE/KoP515f4q1Fri7bnjNY04vmuW3oYF1Idx9OtQofmBY0yQ7pOnFOfgj6VqyB80nPXrRvHlhSKgcYI71NHGW69O1ICQMoVQPX0qWJ93yr1J5oWHbGS1PgjxG75AI6UXGEzADZnj+VMQbUJFMfOSx6Ui7tp7ChAaenzeVbur96jmXzU3J+dQ7CYy+enWrML7Ygrd6lrqhmW+d3OeK6bwOcaqmcf5/CubnwZCVOR2re8GMBqqeua6Kfwsnqe2aRgXTn1ArqLU/LxXK6Sf9KfPoK6e0OVGAcVzMue5el5BArFg/wBeeeM1tOep9qw4f9ecetOIROs0Fvl565rpYeetczoWVQEiulgfj+Va3N4llRkelAAHXH+FAJx7fWjI780Iq49T6H86VXznJ59KjJ6ilA6ZNUguTBs9+aenI5IBqIenFSoBj3pXBD8Up6cikBwOaTPbJqlqDYpPfPSkzk0ueeQDR3yKBXADPPpTlxzyaTPPtS9aQ7jhknBPSnjPP+NMPGM5pQfc07kiknd1zStj680wnJ/nSZ55zg0wJBhep6+tDEeuTTCSRknntTJnCIWc4AGSSelJ2Qnoct8Q9fGjaKwjcC6n+VAOuO5rxOLmRQSWZjk+tbPjXVn8QeInkU/6LBlIsd+nNV9Ktdt9DcFiNgIxj1ok+VWOZvmdy/aW5BwevpW3bxEYGB6Y7VFFGpY7BgdhnrWlaxYI9652BPBFkjuK1bePA561DboRir6JwOOaQxwTOOOnvVlExgmkiXA+vrVpVGMdaq47CKSCM81KufUU0CngcDHA9apAOGfxpTwOopF6euaXPamFwAznHWlBIPBxQBTtoYUAGc9ODTJOeuKdhl/xpc7hg0irlVxg/WmgZ74qeVKi2k9evbmqTEKe+OtKBj7p4NNZTkE8VIh9fzphqKOhoIHXPFOwfwpCCDSGRlcAnnBpo46jIqYEHOetRuOecjFNDJFYADninj0HPtUKnPFTA0WEO5NIDgHNA6AinY4OKLDHZ9+KcvOaYvSngcf/AF6Vhjl6jr+NOPX6UwHk05TjmmCFBwe3NKOxGKQc9s0px9aaAOnekbj607rzSHrikMOlAbt+tKAMnml+8OvFIoVcEdSBRjnOKAP85pOcghsY7UxAR6nFAGCDS5Ocik3DqTnNMYN15pw644/Om8EHn8KUfdx0/GkMdjjtTeSRxxSg+vI9aQduv500Ah4Jx36imjr70/GMHNNb680AIwwPegdM0oyQN3X2NBHTnFMYhPHek7c0vNHXrQAnBAp2OnNIenFJnnB5zQMcDxgdaT0A4pQcZyaDQAcLznJpCfU/lSYycmnHHrTGISThQf1oA9OvrSn0HSgsAOKBocFAGSaaXAHBpCWNJtJOWNFgbEZyw4oSPd17U/IA4o5PSmNCrgDgUAk9DigDBOTmlz+FIoQADvmg4J4FHHt+dBPH+BpisJjJ5NOx74ppak30w0Hgd6DgI2TmowxzwcUErtOT+tA7DFPfvTuTn+dRRtk8fdqUNSasYi/U/jTl/wB7FID/APWpRgnk4oAcFByKft9Tj3zTAcN6gU/cfUUAKF685pNoHpQGGaAeaChVVs8HFPC+p60gzt4pw9DjjvmmAvT6U4YC8ZpvTFOB2nBp2GHX/Cl257Upx1H6UFuKYxoIHDDijOelHUUKOec0DF/H9aaV54Jp+MdTmg/jTGRgZ4oKnqf50/B6g8UE02BTuAcHNeWfE/IWIEnoePy969YnGQeePrXk/wAUj80Qz0Df096cdGZ1tUeMajgzY9620P8AoSViajjzhj1rcTIsl69K463QxjszK1zA0OUdj/jXlMwBfjFeo60f+JHICf8AOa8ukGX565p0/hZzv4hORHTHyTyKmMeY6bgsRgdqgZFHGCw5rRjjEcYY9arxQbnA7n3qa7VomCnkClu7DEZsNuycVEJt2cEjPao3k3k5NQA4binYRcgClzuH0p6p+86dTVeBucE1M0pzx0pDCQICw7iogw7HFIQXYnNAUdqYi4jfJjGanhIxg1UTg1aQHAx19qBl+BsdKvIVKAjrWbCCQee1XYgSvJqWVcz77ljjvWY+c81pXv3iKznPPFadDMiPWq8o5qz2OMVXl61IyA9aki6jtUbfeqWHJYUwNNjmp7WLLZxUA4NXLV8dKOmgkbujWw8wFx8tddFcJBCY0OO1cPa35jBGfxqxLqhVcluBUNNvUpOxq65fJHYSKrfNnFee3hErFzVrVdUM7lQcrWW8xOF7etUlyoW5VcENxzSqMMN1OLAOT09KazkjPWgBzhS3Tk0/zQsg9Bio4zxk1OtuZIS4HSgZJ5vmD0FJ5m/5FHyjmo3A27R0AzRbyLGCx70AWGUqAGXtTXIYccYo80TTcn5QKW5UBRt6mgCVziNVU8HrSBiysfTpUMKsGDOangHUg8mh6AUXDbsmtzwkSNVi44J9KxpgfOIx0rV8MnbqkP1ransT1PcNMbF0eOCK6myPyYrkLBv9LTnqtdXpxzjFc3QupuacnOfpWHD/AK81tk8cdaxAMXBHTBogCOn0NjtZR+ldHA2QM1zOgHlveuli6DHP1rQ2iy2G9TTuG75NRLipB9KpMolBAGP1pwzjrUY570/JB69aLjbHKDjJOMe9Sp04FRqQQepIpw46ZoFcdnHB6+1KQM89KQHjpQCapMCQDk5xtpdoxnFNU9u1OGaEFxR+lJnFGetIR3NMLi5J5p4ODTFGORxSnrzQK4rGmk4peMGmnr9KYDicjiuO+JGtnTtI+zQv/pNxwuOoXua6qWQRxs7EAKMkmvFPFGpHWddlmUkwxnbHnpj1pLuzKpLSxk21vhR3PvW1ZwYGB1zVa2jJI4rZtoscYOM1hKV3cysWbWPOM9TWvbw4Ud/aq9tGOwrUgXBqLjQ+NMD2q5ChBB602JOatIuBxxVIY5KkA7jmmjtxTwTVWBDkf86kHX0qLgjtmhSw64oKJ+vXrSgYHFMV8++DUgYetAJXFH6GlHA9qaCDn+tBPHtTAeBx7/WmOCTwOlODYAz0pep5oAhORnOajI5JBxVllBBqIpk+9UgGE44PX2pODnbmpCvTNM2/Nzn60bDHK5xt5p/H503v+NLjOOaLAIV5zTcbuvJqbtSFd3K8Gi4EWP0p6Dn/ABpcc+1OA5+lMQA/5FSAYGaaDk8cU8dMZoGIRyMjBNL247UoOT15oZvegYEcZOKM5bnpSAZODTsEjr0oGhV9RT15pqccd6digYvqO1Nx0oXO4mnHBbOKBjR79KX+Hg5ox2J70vA4yaBijtzxSk889M0igAADvSkndQgDA7UHPfkUo9setNJP4CmAmOfpSgEAHNKPUc+lHbk4NIYmT6DFHQ5yeaXOPpSKc0AIDkUd/wCgo74Xn1pwOBjODQOwg+v60nYk804jn39qbn8qYAp44xmkbHHtRjJ460AYPrTAOeo/Kj370pOFxmkyevGaBh65pewBHJ5ppzRzn3qkgFJ46UgGDgmndcdaXG3tQMaOTz0oKjvn6Up4PB5o5b60DEyBn0o59Tg0oUjnrUm3HGTQNIiA4yMU7HqTink/3Rig8gilcZH9KTrUoxjqBTT75x7U0wsMPoeRSYP0p+fSkZuKGxpDSozkmo2cJTZGINVJpO5NTcb0JJLjA61RnuyVOGxmoLq4ODg/lWPd3e0EgmnHciTOq0ty0AyavoQaxfDM/wBpsC3vitracCqluZoXgE0H2HNKBjOafkcUkMaM59/epFHGOM03d1wKQMewoCxIFB54o4GTjmhSCvFHbkd6Yx3zHNOAxyeabuyRjp6Upbn2oXcY5j+VBPzU0YPPanDpmmgHr93FKG9fzpobg4o6gHmmxodx6ikyR0/nTMbT61IhBNMYq80hPrTqQnpxQMFYY4prd/WmOTnNNDNjDDj1p2DYjmbDf4V5L8VHzMmO2f6V6vMeDg1498TJN92VJ5GacdzOtseS3+DMM+tbsf8Ax4LnPSsG8G64GSOveugbIsQB6d64qvQxWzMDWDjR7jPt3ry9ziTP869M159ukTY74615jISXOaun8LOd/EPR9zAN0qRASx9BUUYIGetT7gCFFZjJlUKm/JyDxSSnz0LPkGmmUhRH29adOyuiKh+6OfegCr5e4kY6UGIBfrUwQqhYHFRlt/X8KAIdoA5p288AChkycZ5qeGHc+CO1AEKqT0P4VJGduRU0wWNfl64qsSOKEBZjzxmrSEAVTRuAO1WYvu+1Ay5EenFX4mAB4rNiJFXY2yv4VLGilfn5zz+NZj9etXr0jJxVB+tadCBuTmq8nfNTfSoJOSakZHUsAy1Rd6ntvvCmhF4sMipoicfKazi/NWrWYA5Pb1pDLm/ywcdxVK4umZNueKbd3JIODWc0hpoAJy9EgIbrSIMuCae2NxyKVwKzEk1MFAj6dalCAHkc02RSOP0oGNRQTg8Vp2jp5ToTjis5cqckU5XLS7R0PFDVwvYdKp6DByaaU5PpVlVLg/Lz0qMgoDnk0APW32QBwcknFMZThdx71PET5LBs461AwZjnPApIB8rjcFFXbdUQ5bjFZhjcNk59avKC8XJ4pvYERzKGbJNTaKdupxHPfrVdtwyDmpNObF6h6c1rSJZ7fpzZmtmP92us005rjdLbMdqw9DXXaeefr6Vgy57mzmsR+Lxq2/4V+lYt0AL09amG4kzoNBb94SfSurgxt4/OuP0M4mPvXWwt8o9a1NYstBeevFPBx3qNWxgmpHGQSD+tMu4hk4x3p4I9cmqV45jUNjPPrVmFiyqe1DaQJ3J1bnnFSjk1EBg8inrmi4Eig/hS9Dg0ijvmlZuc96oQ5SM57UpIzxUYO7tThgH0o2He4/OKTODSfXpQ3TFMGxQQTyaUdOTTMjt1pwPrQK4p+U9KYxxn1p5IqNmC5L8Ack0XEcp8QNWNhpbQxN+/m+Uewry+3iOfetbxbqTanrspU5jiJRfzqrbRFm6daU3yqxzt8zuXLOLAyea2LaPkYJwaqW0Y6dAK1IIx6cVgBct04G2tCIdsYFVYBgjtmr0IweaLAWYVxVhQO/WmIuFp44xmmkUOAycZpQME8U3JHFOBNWguVdRnkgtg0K7nLBcelW4RIF+cgj1oO0jB59adHwu0YwOwovoCFxxwcDvRklcqMinZB6U4HHvSuOxWMuGDMtSpMh71J1z6U0xIwwQKLgPDKeQQR9aXPpUAgQk7crj9aPLccq5pjLH86GB5x19KrZmXsGFKJz0ZSKdguT5zSMoIpnnKfXP0p6sp4zyelUA3G0nPNKp560/r3ppTBzQA4+xozz1470i8cU4gfjRYdwxxzRg/gKTnk9aep9KLAIMdBTwBzimnjp0pwORgcUDQNnt1oAxz3pVyOvWjryelIYYwc8Ype1KOOtBFO4wHXnkUtJ0603djpzSQEnbAoB5NNj5GW4NPxmmMaT+dHfHf1pQOelKB81NjQoPGT1Jpxb14HpQME55pG5GO1JagAOMk0gbnilIwMYz2pOg44ppAKDzg0cE0zHINL909SKdhgRz04pR8ooLcA03vnNIYp9jSlu1IOTQeeBRYAHPSk/z1peelAHPPNMYAcHtSr04oA9SacRkUAN6D1NNI684p5HuB/WlyAMYwKYDFBxQeeMmn/wARpAR1PWgYAECjtjJozn8eaXqOOKBiY/T0pcjtRxn60m7kkdaYx49qQkY6kmmlu2TSbs8+9Fhji2O+cUZGD296Znj0pQMn/GgYuSOuabmnc44oYZ5PagBneo3YAc4zUjdO9QvnvikwRXldj34qhcHg561cmBPU1RnHy1IMzb2TCnkVzt/ccEd63L04Q965LVX+QmiO5jN2Ox+Htz51rOo/heuzU4xmvLfhDeGWW+iY4IbI+leoKMk1o3rYI6ok3cHIpMZHHQUY+uKUZAODSQxoOM54pdxBwKceo6Gj5e1MYA5wf0p45wAfzpg6EgcUp+XnrQMecbeKUZIyahVucCnMx4GadgJVzjnpTh161FuP3akUcDOaYxVyO+KkUg/hTMZwTTwBnrQPYXGabtI6U8ED6U4kHpTAjUnNKRmlppXJ96BoNoprqCBSjjqaaT+FMZVmGAcmvE/iNKG1afHQHA/SvbLvDBvYV4J4+k/4nF0PRsfoKa6mNboefT4a5H1roZ+LQ59BXPDLXgHfNdDdcWrEdBXFV3Rl9k5bxI3/ABKXOMZrzgjLGu/8VvjSl9/8a4A85Jq4fCc7+IngACMSePSoepHNOVD5eR19KRACDWYx27OMnFAUq+e5piD58sOKtuUEe4HntQBAXJyp9akS2Jbg54zVQSHdk81YF4V6dcYodwQTAJIMU9J8E8fjVRpN7Enk5p0WCDRYCaUb1zUXJ6Cngkt7U5gQ9FwHxKfSrESkAYqKNsH8KsxtnGcUDJYQSPerUaHaeahj9RVuJh9aljMu8+8aoOOK0b/79Z79elX0IIz9agfrUxqFsZ5pDI+hwas2g+eq9XLJfmA71SEyoW5pd56jpUeeaQ5xxUjHNIW4pNopgJJp7N0JFAACd2KkVsP81QqfmyTUqkeZk9BQBJnc/FK2MHJ5pqShWJqN2BbPSkBLH8+eKbEpSccd6WEhCMkGpoSvnZbHtTGWw6xhi2MkVCMMpYDgVE5D557mtK1jjWwfIyzUnohrUzC5JO38qZFJ+9G4cZpzkeaVWmxJy2TyKZJeuJo2IVRxgVFI5jC7fyqtyx4ByDU75kizt6cUrWHuMknBzjrRaPidD3zULrwB6UsXyyDtWtPclntuhSeZp9u2c4rstOPK/SvPvB83maWoJyRjvXeaa4IB6dqxlo2VLZHQ54UelZV8MXX1rUQkpzWbqQxMhzUx3EjQ0ltk6+4rrrY5UVxdk379PyrsrM5RfXFao0TLoNTDkVGi/XNSlcYHfrxQWiORA4wfrU6LhRTQpx70/kUPUaHgDFLigEdSaODQhNjhwOKUjPNNFOBqkFwAwacOc0zf29aXIyKAuPB696ac9utJkDJ70uSRTBi4Gc0hPIzQDSEimK4ZIX1NYni7URp+iTuDh3BVR7kVs54rzn4hXvn30VmpO2P529O9C31Im7I5G2TcdzA5Y5P1rYtIgWqpbRZI+tbNtH04rGbuzIsW8Y9K0oY8kAZqC3TgcVehXk4NQMniXHarsC47darwpyOauRjB+lUBIuMU/rTRgZp3OKpDuB5PHalB+bvQBzS46g0wHHGM5oBO3g03P40o46UDFUkDrTxn8cU0HnnFPAHWgQ7JA4oDep5pPp2p2MigYobApQeuaZjvSd8+lFh3JO/Bo4PUUzPf14p3GBTsFxdinOAMmmSQLnpjNSZGM4pc+ozQhkSxuh+9kVMOAcjtSDmndRTTAQjIyOtKFIpwH8qdx0J4pgRnrjGRSEDtUhA9aacbsY5qrjE69acuAOKUKByc8U7j0o3AaAcetL0yCaX+LHSgjnNTYdw/WhfckijtkUnIORzSsUKcGg9elNYnIOKdn86dgFGT0pwT36UwHH1NSLkY96LDEAJPWlAAIPNKOuaa2QcDvQMeMg9OTRxnrzTQSCM0uACCOR3oAXjscU1eppx5HFCgdutNAJ6Y4pCP507ORjkYpuOQKdx2GnnjNA+lKBkkelOUjpjigY1uegxijH5kU/tjik6kHOKAE/h5yaXOAc9qUdMmg4xyaLjEQgjPenZqInY+c/KacT6dKAA889aUY5yaaTg8dDShcHpTGKCOfegnB6/hQOB70YLdqQxN3OOnakLcn3pSoJ470u0Z6ZqkFhpJIA7UoVs81IODxil7HrRcoYFxnJz3pMcdsU89DikIGKAG9fpQcYOaVs4xzQFJ4zTGIzfL2zTdxzzzUgRR1oIVfSlcZESTnHSopB6VMzD6VDIwGRUtjKcuATg4FUbjkGrspBziqU/NSJmPeDg8/XFcnqyHY1djcqCD61zWrxHaeKEzGaMj4YXDW3iqSJuFkQ17cvIHvXz9o9wbLxVbN0UuF/WvoCIhowfXmtHuTT1ViQ/d549KOcUnf6U7PynrmgsO3IyaOBjA5pNxJwOaec8cUxijB5ppAPPag5PSnYAHrQCIymOlJtOcj9KmIyOtGD07U0MEG7rxUq8YzTAD2pwqgQ5gOtIAaRSGHrindqBihCPencge1CnAGaGINACEjFNGM8GgikA5yKaKFPSoXGDwetTHgetQydOooTGineSAROxzkCvnzxpOZNTuX6gua971SUR2sxAH3T/KvnHxDLulkY85NO/us5626MG1G++X0zW3qbbbFjzzisjS1DXi56CtTXW2Wij1rhqO8jJ6ROK8XuRYwoK4rGTgV1vi5yfLUdMVyyqSCQOc1pH4DDqx27yvl9RQgyvHU1HODvBPQ0qZzkE1mMtyRD7Pk4/CqagthSflFTrvY4OeajcLEvPX0oQCSweWwwc5qvMhQ4PBqwJd8i7ugpLwl5N2BjFAFUDNTwrzSIBj5qMY6NQA8sRJx0p2/wCc1GOlO7g4BoAsRkkg1aQcZqrHj86vx4K+9DYySIdM1cj4XJqpHyelW0Q1LGjOvSSxrPf3rRveDis2TrWi2IGEflUDGpj0qF+vFSMaOTWhpiZlUVQHQEGtfRIi91GMVUdxMxMGmZ5qRqjP3sVIwbGBR/yzyaPanD7vsaAEQU5hg8dKfs+UYp20MRmi4yr1Oe1SxqXOKe0YDH0ojbZ2PsaAHSLsYDoMVIi7zkdFHaos5TJGaEkIBA4B60ATIpKnj3p6PNtx29KZBJsfJ5HSpxcRqNvvk5ouBDCjBnds8d6iPzScE1NLcKY9qfxHmmxRsp3MOtCAs2iEN8/A96n8vccIOKiYlxtXHNWLF1UgueelDGUmhKoxP3j61XBAbArWu3RssAMdBist02tVU2KSPTPAUoexZc8j/wCvXomktwpz1ryr4ez4JTOMjufrXpmlPwKmoveYnrE62I7ohiqWqrwjVatTlPpUepLm3BxWS3JTI7JuUOa7TTDuiXPpXC2jYAAPNdror7oAenatehombcPHp061Jg7s1Ajd8VI0hxzihIq5OOOlKOh9agR9x61IDTGOI4yKXOD0pvfr9aUcmmA4GlY8e9J6Uv1oQAAO4o7UY9aD29aBgfel4FJ2zxRjmmFwB556UFsc9qQ9OlIemKLgQ3UwhhdycbR+leSajK13qNxMxzliB9K7/wAX3H2fTJQv8Q2ivPoEw2MfWm3aJhN6lu1i4HX6VqQryOKqW6cDqK0rdORXOSXIlGKuRryO3rUEK9KuQrmgEyaJcAEjNWF/KmRjC461KMDFUhiqMelOXPakBH4Uo+tUA8deBRj3oHX3pR1oGKBnvzS7SKAQTyKUN65pjG4NKDz34p6kEk/hQByKLgAzjrxTs9gKCBn3oPT60DQbvTpTic8imY460cn8KB3Hd+1L0JppBx70bjnmncB4AH1p2eOvFMU8UucH3osMfgEcdaUZFIDjnNLnvSAAevNKc9qaBk5o3HJJqhkgPr0NO2ggZ6+tRj7wOaeeeKdwHMvHFAx+VKpwOeRRwcZoGLgZ5xg96Q4PNKRjOaTPHFAxp4z6UgHze9PI49aMc5/lQMj7804dulKRxjFIP0pgLjJpVbI5pB6U/oB70hjc5IzRuIYe9GM9etDDuOtMYueMZ+lPDdAegqI84NOByMnrSsA/PHHT0pOAfT0pu7PPU00nmmkMkzzmkBwD/WmK4ztOfpSqc0DHA5X0pcdMU0EdutO4HIpBYQ9adj8aUD2o6U0AmPSkA5zT8U3vyOfWqGNbBBB4zUcZAJU5zUx6Yxk0yVTnI6jmgY/AOc+lLnjrTVwygjFKeBQAhORwOtOPt6Ug/MU4AdzTKsN9cfpQAccZpwwaXPYUBYQg8/ypBz0zTuwPWkPegYdO/wCVKR155prMMcZphfnpRYLj8gKcCmliM7c00uSBTcnn1NUkK45mPuOaRj680nJ70pAweaGWiNz1qvKT+dWHIHSoJMnNQx2Kz5waqSg4NW2HFQMvGKRJQlj3DpWRqFuWQ4HtXQPGeOKhktt3UcUmJo8r1y1a0u4bpVPySAn869r0G6F5pFrMpyWjBNcpqGjR3MTKwBBrb8KwvaWn2ZzwnC/SrWqM0uVnRDqMdaMdxQjD1xTsgEjIIoKEA259e1OXPX86QDg7aeoyDTAB7daOgOKcoAGMYPtRt4z6U0MaAQfenA5IzShSR0FJtJ5I5FCGPHemltqnHagZ6AUFCe2KoCK0Y4bOeuanDADp1oRNopWUN1pJ9xjgScelBBpF44p4NMBhBxQO1PprDmhAhD6VDJwuBUp4JzUMhxyOtMpHNeK5TBpk0i5A24r551p8u2K90+IVxs0mRBwWIrwTWGy5olpA5aushuiJm4LVPr7ZbaB0p2gpwWHJqnq7Fro+2c1wS1k2RLZI4nxRJm4AyOKw5VKBSOh5q54glL3h9M1nEls8k4rZ6RRgu5PKwkCjjHrTVAVsHHFQ53DC9RUTOxPv61mMvSzKjnbUHlNOSxPPtTI1yeeSeatwSeUCAuc9c0bbAU0Q7jyKdcnAAWrDMu4tx0qEAO3PHegCsNzdeBUiDJwcU4A5bp+VSRwlwT0xQMaORgDNAU5GDQMKxFPQ4YZoESoNv0qzGx2j0qH0xjmpkHA70DLUKk1oIDt6VTtiAea0PMHl44zUMpGPf/eNZjnmtG9bLms1+ta9DMa1QMfSpT1qJ+T061IwXlun1ro/CsJkvkx74/KudQEsABxXcfD+3M2rwjBI55x04NVeyuJnCjk9SajYc5qZRycHNRtjvUDIacTwAKQ4HfNNJ+bnpTAsB/kx3pm7qOQai5z7VPGmVzQAoJKD8qG4TNTCMCMGkKqVyCMCkMYMiMe5phQj588VIMHjPApsj5Ax2pgHYc856UrxnzFHrSIu6Qe3NShgsoY57UAIqBJxu+6KvzSp5akKBniqk6Z+ZeaZligGeKNw2LRwU3xsBUMjHOO9RxllJHOKmCb3GThjQA/GyMHPPvVWV8k8jrVqQI+FDYI9TUEsezFOL1EzpPA8/l36qxxnP9fevWdIkG9h+XNeJ+G5zFqEZz3r2LSJcXCnH3uaqruC2O5s3yOnWn3a7oCOtVbFuF71dm5Rh0rme5CMu2znmuy8PuDEAa4yE4Yiuo0GTAxnpWpaOqTBxRJ14FRwvlan61S3KuMDc8cVMhyf1qHGD8tSA+tJ7lJkm4Uqk5qPPX1qRRjGeaARJ/OjNLjIyDSn3FO4XAUHOPemj26UucUBcCTgDGaD1o460n06UwFzikIPamn8xSOSAfagDjPHMwZ44f8AgVcxAmWB9a1PE03n6pJjoo2iqcK4HSlUeiRzt6lu3XpWhB1qpADtHf3q/AuCRx65rIm5ch5xxV6FeAaqQLnFX4h3oBMlXpx1p2MduaF/WnDmmixuOKcOlH4Ud/SquA4Zp30pv5U4dKLgOH604DdgnpnpTQehx+VKMH0/Gi4xxUHpxQqnv+VHGBz+VEfbnmncY5c45Bpw7Z5poYHqfenDrnrmgAApee/SjI7UHFMYEcUbQeO9KMj6UqnJpXGM2kZpRntT8+tNJGeKYxQfwoBHalHJ4xRjGDQMU9DxSA9aRs8YpD1xzVICVRzmnMCRnuKjHHfOKcCScmmND1JyQepp2eeR+VMPX196CwyetTYaJC2RzzQSB0x9Kj2knJxTlUgdKv1C4uACetH86UKd2cUd+OKkpBg5zR06UoPGKBngUwDkDjrRikPqe1AOfzpjHL6H86acn6Up4PFI3A4pDGpwcGg9Rx+VDdBSDIbJxTAXPPFMJ5OM0Mew/OgYA9+9AxQoC88+9OUjOBTFGfpTh14oGSDH5U4e3SmLzkGnqCBQMUdM+9O700dPrSbvm96dgHk45IpCfrTcnnOaMkn2p2GL06ZpCenX3pOOc5+lGSeoGKYIiJ8t+B8p/nUo6A5/CkdQ2Rjr3pkWQdrfeHH1ouOxPn9aUmmnn/ClPQd/pSKAjJFAPJxz70vT1+lNJABx3oAM4B4NIQR360A7uwzQQce5qgEI696GGMZFOwRSdiKBoUgD0owB9aFTJGfpShDnrSuVYYTzgU1sf/rqZkH4+tRtHgZobCxA/FQyHg1PIpGKqy5x7VLC5CTmmHBpWzzUefU0CuLjnFORAe1NU/TFWICu4dMVSVwHx2wOAyjFWBbBCGQHI4p8ZXHrUxIKnqKtKwNaEa9vapV6DAqLn0p4Py1JBJx27UoOc4pq/dz2pRhST0FAxw6nP4U4k8+lN6kClJwOODTAdk5wKATn5qMgAUvXGaaACSKcD8vzUuMjmm4AHQ0xjgeOaTcCcUgx0NNIGeKBj896kDLjmoATn2p5PTA/Om0IeQO1Nx1xSnnigY6GgBjemKrTH04qw45wPzqpdsNh5/GkWtjzb4mz/uo0HuT+leK6k4aU4716j8SrjN0FyPlrye4O+5H1oqO0UjilrJm5palLXOKwb1yWmbP3e9dCP3VmfpXJ6vL5Wmyv0LGuFakTZwuoAzXLEc5PWoNjKGAFS+Z85c4pY5BvJYcn1raZlErCNlXcenal8sYyanu3HCoOBUCqTjOdprMYRnYeuM1a81AgDYzjqKriHL+wFRAYfk8D3otcCwg3Nz0pr5JO3gUrEBsqeMUoKkccUAMjGOMZz7UskjINvrShwqkg9arysG+tADgcHJ71IrDbk1Wzk0obBwOhoA0VYEirAOAMZxVKLGBmri9BQMtwsT1PNXlxtrOh68nirYchT3FSxlC9xuwOtZz9cmr94ck1QY81p0II2zUbe/ankGmH696kY+Llq9P+E1v5mrxNjOAe3sfavM7cZfn17V7X8HLMeY0xGAvQ4+vtSqu1NivY8L3YprGk701ucUxjM+tHtinYzTQKAHAYHpVqNh5eKqgZBz1qSNsD5umKAJS+fpSBgynioi29wB9KkaPyyB68mgCIbiwUdKldCp9DTC2wgjtzUm4yHI+tAye3TMYJ6k4qO44YZHA/nSwSeW4J6DpSysOe560dQEdz5YB602Bxkk/lSMQabAuZApoAtEE4OOvpR5wSTpVhwqIfXFVGQuwNCAUEli+KkbEqZx0FQF9o46CkSQhGA70CLFk5iukYV67os4aGCXOB3NeNxsd+R2r07wrcGfSwM5K4rSprG4lueoWLnC56Vr9QAB2rndKm8yBGz7VvxnKKx6etcrIMwjbcsPet3RXIfrxWLejZOG7GtDSJP36+/WtFsWjtYH+Uc1aTp61RtOVGaupx9KbKJQO1OAGfQ+lIhqTGTmjcdxjrxxmlUkZHNBzuzmkyQe1PoFyVG6D86kbnHTNQg4P9akVuKEwF4HFBA7GkGTzQPSmMPrilzgUjdM96Rj2poLinkdMVXu2Edu7E9BU3QcdayvEU/laZMfajdibsjgrgmW5kY9WYmpoU+n0qvGctk/Wr0PHHvWc3qc5agTgcVehX8arwjgYzV+AcDjIrO4iwiYAxVtF4FQxDpmpxyPWmmOw5eRxTgeOlNJ7Cl6iqKHUoIwOMU0ce1LgepNMBwFLz0HSkBFOzRcYZbOKUdaTPbvS5zxTuAvNPUAD39aYOOvSlyegoGSbfbApegHftTAcAGjd2OPrTGSDt2xS9Bn1qMEZ65NLjPGSBQMlyP/rUvbpxUS8dKeDjpxQMcBnrzS47frTUPU070oGJtGDRkd6XnpjP0pCfUHPtTGGe1KRgdD9KarYPel3DPNO4CrweBwe1OIwOaYGABPSlVh0/SmA85xgUvb3pu70NA55zQkO48fnTgcnC9femBeevFKFFMaJKcfXGBTQCKB+JFAwA9PzpQcUnUd6d6dBQMaQODTQPSn8kAY+lIOvNMYAcCl9uxo5JzzmkI6YzmhAJyOvaoQSwwCcetOOWOB09aUgbhgYFDGhFG3p+NKBtPHSjp060KeO1IYo7evpSjnoOlMGckdvWnY5poY8EgdqevfnJqEnA6Z+lHmAcHI780bjuWMjOBSYBz61H5qjHzCjcCOCMVSQD2ycDr3pAc4FMLHik3dqeiGkSEgfX1NBHTimcGnjpzSckUkOAxnH0pjrzuAwak9qDwDzSuOwKQVzil4xzTFwpOMYNPLAjGOtAxCBzRgdhQOvOKUsBVCEwMHA6UrLxznrxQp5PNIeetBSFA49+lJ34zx7UHO40nJNMB3ufypdwHemEYPHNHuKVhpj9/wAvpUbsOnUUEZ4pjcCk0MZI+OgNVJXzU8hA69KpyEAZNSDZHITzVZj81SSNjPpVV32twaZJJuwODT45egzVGSYVA10FzyKaYrnSW10ucE8VcWZWOAQa4eXVBGMk4rV8J3v9pNLIDlIzs61otri5+h06n0xTwD1piYzzmn44OM4qRIXnIBzz6UvXnpikxg89KPlFMew/cOBxmlDDv96oyoPT8KcPcH8qYxSWz0zUmCVz3pucj+VOyOh60AOXPHr6Uvfmox94jvSg4PNMBSAOaAQB2pGpmMnFA7D8+3NAbPb8aaqjvkYp+Bx60wHA980pwfqKgdiB0P1piueKFqOxMzBeTWdqL7YHI7VaZ8H3rF8Q3SxWMh44HHFG4S0R4r48uvN1GVs8dK4e1UyXqjuTW54luPNupWzxuNZuhx77ouT0rKvKyOJasv6o+yED14rivF0nl2EcXc5rrNXbfN5Y7VwPjK43XQjByF7Vz0lqjOo9Dn098VHK58zC9O1PiwSPX3psuFIIq5vUks28QlOJKSQLGTzmo4t2flPLVFKGDkHk56VAFiIZYAnANRyRbZSByM0iOV5z0pwyV3Z5PWkAkmAcHpUB4Jx0qZxtAPrTAoJ9qYEZz60jRnr2p+OTmpGJkQADAFAysRjp1pyg54pWBHFOjBJoEWoVJFXY0+UeveqsYwPar0f3RigZNCnOKuCMbeOlVoRVzPyemKljMe9TD96z3FaN8fnNZ78HitOhBEentTCM/SpD9KZipAtWSbpFHc19CfCu18nTfMx97GOPr7V4LosPm3aADvX0t4Itfs+jwjAHGenuazxHwpETdonyZmlAx9KdxTSOa0NBPWmgc4pe5Hekx81ADgBnGaJOQMdqQDHOaVOc5PWgBI8KwapXl3nkioSMZ9Kap+bjkUAWCobk9hQrhc4PtQuPKJzUeO/btQMlLZwe2ac5+UeppgJGadGSQSaAH7dqqD3pOkmR2o3lnGRxT0IG48UIRo2cP2rrwcU5bIq7DPSqNrPJCxZW4Bqy1455z170NPoUmrEE1uRGSarLEfLJPHNWROzqyt0pSjIikjg9KNhblbbtHcGu38B3ed8LHg//AF6424IHpmtTwndeRqCDOATzWq96LRL0Z7VoMv7sx5+7XU2zbk29utcRpMoS5xnAPpXYWT8DJ5rlkiXuLqCYCsBwOKfpzbJVJp1yu6L1OearQuBJlTTgwTO90+QeWv0rQjyRWFpUgaIc54rct2OASau5RMDzUytntios5PrTlbBoHck4xyKa3sKdnIpuMH2poBqgkHPSnhSMd6Qc854p45pMdwDYxS5Ipo6471IRx70x3AEGmnlvT0oBHrSMOM5HFAhD71z3i+TFiEPG41v7sdelcz4xb93Cp9TTjuKb0OYQAdquQjgGqsQGeauQ4z6VjLc5y9COavwdqoQ9sflV+A+hqQLcYqYE+uBUCYxUg7Ypool3UZ+uaYMYOacKsLj8/lSjtjikHXA5oblSF4PY0XAeBnr0peBVIi5H3XU/WgPdj+FCPWixSLvFKMepqkZ515aLP0FNjvskh4ZAR7U7MDRB4pV69KqLeRn+Fh+FWUYNyKYyROfcelKOTz1+lM5wO1IWI6A5pjJQO/Y0uQM9M1B8x5FSKo/i5PpRcBxl2g00O7cIMD1NLtWndOO1BQRgqpyc1KvTJqMj8KcpBJGKLDuPBPf8KacjPGaXIAoOSeOlUhpijJHSkKk9RR06U7POeKGgAqAKCOnApXYYPt603qMmpRQoxnpxThjPNMzg0oIxzirQEnuO1PyCPwqNc+1OGNvA+tN6Aheh6U9T2/pR2GMGm8fjUosecYHNL0xnp2qPcaA3r+NUA/6daXrng0wHIoLZ+tAx2c8cYFMyWOOQPX1o+8w44pxwBz1pjEI/ugYFNPSn4z04pOTQgGAdCTmkYAnFOJx7ikzwTimNEfzL0ORT0bI4pQBxikI4460h2Hjn6UpXI5xUYJxyeKcpz0ouMYI0DEbevNKY8cAkU8gkZA6Uo+ancdiIKwPJyKeo45qQLxRt6YFMaAL6c0ds84pxBz6Uo5zg0WGhpBz7UjDnAp5HB9aYoLMen5UWHcaHVX2E/MegqYDPU1Xmtg8ySHhl71YXsOvpSGGM0mDyCOKfg96THBFOwxOg4NGMA9+aBgUvGPXmqAQ5pD1p2O1I2MDNAwx1xSdDRkYz0FNY/lTsFxcjHNRueKUmoJjwTUtDuRzNgVScjNSynrk1TduOamwiKVgCcGqUj81PO2AfSs+d+M9qCGiC4mxkA9OlZl1d4U81Ley4yc1hX0+A3WkRIzPEOqNFC53Y4r074ZRbPDEE3ef95wfWvFtWZJJAspyrHFfQPguNYPDWnKv3RCuPyrSTskkRS1bbNkc9Kfs785+tOC46Dmndj2BoNhgHtz6UuAV6dPWlYntjFMDcmmhjgQpJI5p+QR8p60wrk5HX0xSqMnnANMCQHIx1xSEZGT1oAx35PWpKY0RnnPPNJ3x3p+0Y7UuB1pDEC469aQg9sU8DH0ppweDQAwuQeetPHIHpTJPl6Dr3pgYjPWqCxKVJ9KiIIPNKshPBoJFGwyvKcfSuL8dXXladJgkEqa7G4O0E9RXlfxKvuBED+FNaMxqvQ8q1eUu5/vE1b0KPbAznOayb598uOp9q3rZfJ0wdiRXHXd9DnRl3bgzu57V5nrMxmvpG684rvdVmEdpPLnBIwK83lJaRmPOadPqzGW6Qm0qn16VHwUPc1IHGAMc04RrvGOBiobAbby+WBnk0v33LN0p/ljy/ftVQkgkZNAEryL82KRXwuO9MKZFNAO73oAkkkGwKeTUaEn2qXy89agHUjtQgJiucN2p+4BeKjZvlCimMeAKAJFyxFXYIkYEk/hWerEYqxHLggetFho0zEghB7U9BgACqqysVA7Vci+6PWlsMtQDn2qywwh7iqsZOTirQxtxSYGPe/frPkHNaF7jccdaznrToQM70ijsaKcnLrxSQHS+D7YzajGMd/SvpXRojDaxJjGBjFeFfDSyMt+jFeh9K9+s1IUDpgdaxq6zSMqr0SPjEjHSkXngU7qaRenFamxGRj6Uq8nNB5pVHGeMUAPfk4FNXJGMUoPPNOUcYFAET4BOaaDxx3qWRckA1E4w2PSgCcgCFfekBBC+1ITuTjtTVPPSgZPIB1B5PFRhwBgHk0h559aQoVznrQImA3OoFOkwHwOeKZFwhOcVKhHlEnBNCGWhH/owIGc0hUlApGBSQ3arDtHUVIJgE3dSaNR6FVX2McjgVNLcq6jA6VG6lkZgOKpkHoM0bi2Hs2/5jViwk8m5VunNVyjog3DrSI2DmtKbJZ7PpFz5kFvMDXcWMmcc/WvK/B9z5+nmInlen616LpM26FSetYTVnYmWp0TAlT6VlqSkuCOhrSiIZBis66G2fPTPNTF6iR1OhyjYvPtXTWpridElIIBOM9BXZWLZUGrKL5PHtR3yKTr/jQvBqugEozg5pG4AIzmgEFD6d8U4jI6gD2pILjFPJGcAVKMgZ71Dgg8c09X46UNjQ8dc4p5PvUYb86UHn60wbFHDHFJ0oJyOKQHigLit3x+Vcf4xYtNCoPTJrrWOAATiuN8XsRdxj2prcmb0MWPP61bhPQ1TUg9Ks2/TnNYswuaMJq9CcVQhHAq/DyKi4XLcfIB71OgqCLjFTq2eadxj1HenY7GmFvalU+n1qkxjwDSgUme4py0x3FwMc0uBjikXn2FLu4oKQox3NV7gbZFdQD2NS5GKVhuUg0ILiqFYfdH5U8DAwMCq1u+MxseR+tWQcgVQx3tmkA60nGeetOHGKYxRj/wCsKUDGc9fSk4/+vSkgYoC48Dj2oHXNNBGMYpQeM9qYx+e5paZnilHPbimO4/Pc0o5PGKTooo60ikKDS5HQdaQcdB0pBwO9MaBxlTn0pVOUA9qQ9xjimxnKe+cUDHngClY/lSdBz1pQc46VSGKpPfpUoAxUXGcAU8dOetMESe45NKOvOM00AY5pUyc8UkUBzj3poGWOelKw5HNNzgkDgimMf06imkZOTwKNucHP4UoHABoGOHsOaYxIFKSSQB1oI3jkfhTAbG+VzTlORz+VIEG3HQUqjAGaCkOI5BxzTcc809c/jS4yBxQMjAweO/akxjrUgUj60EE9qYxMj0FKODwM0hJHA796VBgnigY/HGCOKbH8pxxThng01+SCPxoHYkHTjpQBk8Ui8nNKAB060BYUjjNIQMHA5xRgZwetK2Rz270xkcp4xzTlGFwelIFyS2cjt9KlI9qY0MxnseaUdPpS8d+9IQSaY0Gcgd6OnUZoJGT9KQniiwDiRzk+1NYZPTBo7Y/GkK54zTHYKa2cHHIpSOo5+hoK9BTAZjHBzTfpUuB600r6d6LhykdRSAH6VKenOagmbjipZaKk3J9KpyDnmrMzc9KpTNjJ61DFcr3B+U5NZVy2FNX7hsg1kXbc461JDM29k4yCa53UZvlb69a1759oPWuX1SYgH0poxkzNbE+oRRsN2ZAK+k9Ej8nSrSPH3Y1H6V8yaFK1z4psYUPBmXI/GvqaEbI1XsBim9JJDpaxbJ0JIxmhs8g0ikcYpWwx/wAKs1SA9OT7UgYbfelwDTQO2PrTAk3fL+nFKDlQTTep6cd6U4xgHimgsOzyc8/hTuSOaYe3FKORjnFO40iTP50DOKaMj7vOKdzjPagYnzc8cUh6Z6GnqcikJA+lIZHg8Z59KcAM/wCNKRkelNB6c1XQCKUYPSoXJxVl1z1qu/GaSKsUryUCJjnHFeF+O70z6lKQflXivYNeuVgtJXJAwD3r5/8AEV15lzM2c5Y1TdkclZ3djHt18+9UA8Zre1F9luqjHSsvQYt0pkI6Vc1GTdMe6ivPm7yMuljj/Ftx5VqsOeTya4pmyuQcVteLLvzr8gcovFYABNbJcsLGG7bDJLA1b2gwMzfhUG3P0qRvlQKe1ZsYxXOBkYxSAB3J7U7PzH0xUWecDkUAOkBXHNPTbxkVEzF2BxwKsRYYsT0ApAOLjyGAqrEATyKGPU+vSmqOOuKYEgx5xyeKjcjcQBTnwF460wcnmgY70p8eMjFM+9jFTR4Bx+NAFyFPkya0oBwAKowqTGMGrkZKgUmMvLDtIwalWM7MmoYmPFXPmETAjFSxmBfDDms1h9K1L4fO2RWZJ1rboZEff2qSAZkApmKns/8AXKDk80luM9j+FVltXzSvHv8AjXr1omdv+c1wXw8tfK0uOTAG4f416DadBnrXPvNs56jvI+KG+nNNz6U/nNNPWtjpG1Iv3eajPSl38CgCXaCvB/ClJVV4696jXPFDKc8UWAMksCTxmklA3nmpSoAAFRso346UDEQqENES7n/WgY2kZ5p1u+xy3YDpQBKkWx/X0qOZssalklDAHOM1A2DxQAkbZOD0pRkEjNCJjJ6YpAOTigQ9EJOe9WniKqvPFWdOtWkQn1qWaAkEOelF+g7EBYLatjqaqpgHcR096sMA0fl9xUHlZXI6UWAkkO9Nw6kYqoQQenA7Vb3BUA6GmEZI9O9OOgmdD4NuzDd+XnAb/wCvXq2iy4yuOeteIabOILpGXjmvWtGut3lSqeGp1V1IZ39o+UAJOai1FcqG44qvp8ucZ596vXA3wtjk1z7MhEemzbWXnBBrtdMn3YGa88t5Nr4PUV1ujzkgVqizr1bNOJI5qvA2VFWOo5FAh6nA7c1IuSORUajJHFSlcnIGKm4xD/OlHP4UoAxjrilxxz0NMBO3rSdetOPHPemHjpTQXELEHnGKU4I4o4YZ7+9MyB9Kdxi1xXjJ8XyLjtXaEjaccVxPjA4vIyfShbkTehlJzj3q3AMdapRNxVuLp9KxZhc0IiavQn1/OqEPQVehxx24qRltD3qfBIPPNQQ9s5qZT6DigEyRRwMnPFPUc0wGlBPWqTHclAA7UufSowcDGc0vB+tNFIcT7cUhPXAoA5pVGO1UO4DJAzThgd+tKCDyOtOGO4FAIo3wZGSeMcqcN9PWrcciyKGXkEZBpzAFCrKMHrUFqBGTGenYe1PoUizznJBpQeehpM5HTmnfWmFxMjOKcOT0pOMjANOyehwKY7inNKAO9ITzg8D2pc5YdKY7ju2B2pR9aYH9elJg7gfXvSGSkjHJ5pwqIsA2MUoYdxTGSk8cUg9TTM7s4py5PBpFIOMY61GrYYjHQ5p5Xk46+lMAAds8kimMk3Z+hpRx0GaZgHkVIOnrVDFH0qVG9Kip69BTGSr14oUYzTVPJo8xQOetFh3HEDFIBjJFRPMACRyaFZ2XKjjtmkUibORzxSHOOtRqG3ZboalI60xjM9MU4MMDjFGPUUHOAKYw3DoetOHUU1cE54pwxkY5FBSHjk8HilGBznrSDHBHWlxjGD9aQxSoPTrSMpyaXPbr70hIABHWqQDQvAzTgajeVEH7xwoPrUJvI84TL/7ozRuNMtMc8YpMAj3oHzANjHsaeCPTpQhjVGD3pwY5wetC46ilIGc9adhhkdcZprZJ29R/KlIIHFIOX5//AF00gHH0/pS8ADimknGaMk5yMCnYY485puO+Mn2oJ6+nvTS3AA6daaAU80FWx6/4UgOe2PejPJNMYnoRwBS5PT9aMf40hXn2oGkO3DmgHjHpQRjHHNN/iwBigsU89elNbuRR1GKGPr+lS2FiJ+evFVJh8xOatueCc8VTmPoc1IFSWqMxwauTnjqKoz89etIkpTtjNY18+M4PPpWpcH2rFvT1qSJMxtQfKmuV1NshhnvXSXzcHngVzV9973oTMZIrfDi3M/ju2Ug/Kd/5V9QqMjmvBfhZp6nxck47RmvfV6DFNvmnc1pq0Ehy/KeeKkwO/emgc8enSn9uwrUoacHNG1scUvTIp+Plx/KmMaRj06U36ipGAwc5qM5BzjikMDnsakAwMnrSbfSncgciqAMEDnij+dIX7UbwfegYoHp0p20Hn07Uwtjp0qJ2xytDYWFdsNmoy/PTrTXY4pBQhskEnGO9RyMCDjgUxmIz/KqV3OUibBqhXscV8RNQ+z2RjDDLe/1rw7VJS8hx3Nd/8QdRE12yK3yr7154oNxeKoHGaio7KxwyfNK5uaXGILPeeM81l6rcCK1mlY844rWvW8m3SJeCR0Fcd4zuxDbiBWwT15riguaRM3ZHFXpM07Oe5qJk2cetODhiM4pLlt7D2rab1sZJDoPnYCkuQcgY+tPt/kGcdaWUbm56+9ZdRkCpg880BcZIFTSkA9cA0ZAAUdTTAgfAQY696iLEdzU8iBFBJ5NQ43N6ihAIhJND5B9qcylR9aXbuAAoGRgkketSKtMPykCngkDigBu3B5/OpogAcmohknntzinpuL9OKANKFsqKvxjgZ61nQDAAB57VpQdB60mCLluvzc1osB5B7cVStVOeauuCIiKl7j6HOX3MjVmuOfxrUvx+8PrWa/WtuhmiIDPWrmmReZeRKOpNVVGTmt7wjbfaNXtwBk7hSWmoz6D8M23kafbxgdFHtXU2gx6ZrK0yPCAeg7VtQAYxgkGueBxyd2fEuznOc0xh81TOMjmo2HPNbHaMbFNC4TIqVunbmo8/LjNACBttOLYGeee1QnJOM8Uqnn6UASliemaaAxYbqmjGc8UjkbjQAwp82cUjRlEz61LG65I7ikd94wBQAxUyo5pAuGIY1MjDy8cbulIOCe5PagZJDEWUlulM8oBjUsUh27RSORjHc0IC3DO0MS4JouZxKgwTmq7PhAuBxT7cKXG84p2W4XLMCKiDcOT+lS7EMKhce9MdwUcA8Cqcc5Hy560rXC9iUwb5gg6etLdQrGpCdQcU63kXzTnJNBG4FjkrmlrcZQ+6eRyK9C8H3vnWnllvmX/69cHdKFJPOTzWt4SvPs94oJ4Nav3ombR7VpU+9F5roI23r+GK4rSbjEhB6Hpiuss5MqAT1rmkjMq3I8uc4ra0a4wwBNZ2ooNm+madLtYVUXdFHpdg+Y1Pf3q+uMdDWBo84eIe1bsT/LyaqwmSA+vFSA4+lMBB9804jNKwXH5yfenHjioSCpJ7U9HyMj8aYDuR1NRMDzUp5+lNJGMHpTuBGCfxFNPU4p+BzzSMQf8AGkO4zOfwrivG3/H5AeeQa7U/dzjiuO8cEb7Y+uf6UR3JnsYULZI+lXYeSMms2B+Rjir8R+YZ71kznNGI9OO1XYSMDms63PuQKvQnKA9qgdy/ESPerCduwqvGe9Tq1CBEoHNKG/Cmqccd6ARmrQx4pytUYGetOH6Ux3JFpwpg7EYp3QUxocT6D9KNw9KZ/KgZ9KaKuP8AvHr0qOYELvXqPbtTh+FKeeOtO4xYn3oDn8qkyTVdVKORztNS49aLjRITwOeaAR603FIBg800xjsgilB7mkwD0HWnDOPei5Vhy++OaXOD7UgJxmhQRz0p3GKG57mnZAGBn/Cl74pehoGNUcknpT8+lJn0NIQMYoKQDIbr+NI3MwIpTjHr7UjdAe+aaAd0PT8KXePXHsaafu8UwRZ6nimNEhmUeppBK7fcXj3pyooA9qeBiquOwKJGHzMPwpdgC55Jp4PPTFIfvfSh3Y0ATCEAZptqSYgO461ID9BmoYm2yOp4xilYosdOfwpw6elMVi2QBShe/emUh24dO9IMgYpQoo6n0FMYEjFKv5Cmjpg0/qKTKF4z0oB9OlNIO7mhlJBAOM96dgGSziMYxmq264l5TCj1Iq3HCijPUjvSkelO47dysLRCQ0pZ29CeKsxxomNiKv4YoCjv2p4xjB+uaBpDWcqRnv61IDwM9aidQyHHXtT4m3ovHPf2pjHcUuM8UnTPvS8/WmMCQB82eKUcjPX8KQjpnpQvsefamMaVPvSHJqYdOtMcckHigdhnXgdqXBGfSlwQP/rUmcdqYAO44/KjjJBBpcDseKTtjjFA0BOAeOaCeenNLkH05pCATntTGNLHoc0pbuRmhsdM005BxQMXPf8ApTW47UE4prEGpY0iKXJ+lVJieatSdD0qpOwyeoqGBSmPbv61SnPYircnf3qlPgjFDJM+5P8Ak1h3zc9a2blhtOf0rCvT14qSGYeotgHmuW1CZVOTXR6ietcVrxYk7Dk+lVFXZzzbR6X8FYxc6vNc87UQrXtyKPyryD9ny2K6FeTMPmabGfbFeuoPXn6Uo/EzoivdRKzA9zUfb1p+M8Z4oPA65zWqLEXJP0qTd68VHjnFPUAg0xEmAQcU3G0kHmnduaRj6mgYuMjigZIIzzQBjpxSgnv+dMBuykKBeak5PSkY460DIWB6nn0pobOc8VMcEc0wqDTQDCARzUTDb0HFSsD60wkYx2oGVpidpwRWB4guxa2cjk4wvBrcuG2A8jBrzr4i6iIoTCp6jHB+tMyqS5UeWeIrwy3Er7s5JNUtDh8yUysOBzVbUJPMlK571q2yi104nozDiuavLoci7iTyCS5eRj8kY6dq8x8S3putQkOc4OAO1dt4huvsOlMM/PJmvNJn3yMxOOamkrLmM5u7sRM2CKemScDqajK5xjrUijaQMc1L1ESRq6nHp3qQKckseB61IoXlmwMdKrNIzNj0pARynL/N0qUMARjrTGjO4Me/vUkbAPkc0MCCU4PNNDBTnsKsOAxJIwO1V5Vx7ZoAbv8AmyxwKlhcDJIqBY8kc/hUuMEDPSgYN8zZH05pDkHrUyj5TioW4Y9aAJEAOcnHarChflC81Tzxmp4HywoA0o02gdq0IQAoP86pQsCgq/EflAxmkxl23bOBirUxJi+U8VDbICK0ZYwtv0GakdtDlL3lzWawrW1AfOay2GWrYyQxRXc/Cq1NxrqsR8kYya4pFycV658KLEQWMlwRhpTgfhUz+EU3yxZ6xYY2g9617cc9Kx7EDaK2YPXtWaRxrc+J3HHFRkY71akTjgVVcHNWd4xh6VG4xUx6fhULHJ5oARVIBbvTRnOafzzQvHXvQA8PgVFIWY/WhhhvapIwM5OMYoAZEcAipIuWx096h/iPTrUmMNntQA8OFcgDinMwJBHU1GCOnHNKRghjQMsW7qoIK5Y1YeDcu8cH0qkDgjHWrRchdpPJFDAgUEsAeaV3ORUoZQMetK6KTkdMUAQbnwR1pIY2dhtHenoME5OKsWrIo54NMQ6OIxzjd1qYHbGy7sE1HKCxDZ4qUKrx78A4FJjKd83I7/4VFZSmKdW7g09wZGz2HU1Bja3Wrg7Es9Y0K8823jkU845rt9LuMoOma8i8HX3WImvRtLuMYIPFZTWpkzrZMSRH6VmQkxyEe9X7WQMue1UrxCk25e9ZxetgR1mgXPOCeDXVWzbl5rzrSJyjqeBXdabLvQHtWrGzWQkkYqVW45FQIfQ1KrYGOc1KE2Pxk/rQB6UisfWnFvQUxCEZHHSgHtTsnFM3c89qY7gTgnPTtQ2fxpDkdDkfypACuck0Bca3cVyPj2M+RbOP4WNdgTuHfiub8bpu0dnH8JH86E9QlscRD1HPetCMnI/nWbEeKvRnOM9qzluc7NKFvzq5Cw6VnQnJx2q5ARn2qBGpEx6VZVqpQn9atoex6UBcl7/WngAVGD+JpwODnNMZIO3oaeT0qJTmng4pjTHg96M5600ZyKcB69qaKuOA9T1pVGeaQdOetOxyKZSGlRnJFPA4yaT6c0vegoDzThwBnrTWHoeacnQZpgOFAHI96B1oB7UxjugooHSjrzTKRIo/Kn4B61CTkYzTyxJ6dqQ0x+MdKXoOtM3ZOB+FKTxTKuIBycUp/SjOcilPp2ouMQY6/rSEZGAKdgAnFKvIplIaATgdxTgeDk4pc54PFBUZy3UUwHAZFOxk0isNvFGen6UxjsUhIBppznjpSqBz6mncYpJI+UVHtxLnrmph046+tRvkc56UFE46dBTjjFMRh6U8cmmUA6cUo5HPWk5+lKOOvSmMBSgZIxQTgc0gIHHY0WKHE4x6Clx696aOvGadkg85/KgYmCBwOtJnnPemyTKhyTye1R4km/2U+nNNIBXmVWCqM5NT46YFCQqq4HX1pQNuMnihlDSuMH8KjhJR3X1ORUjnnBqOb5WVxn0pjJ+owetJvCjnpVW8uDBEGCliTgAVWhjublSZ/wB2D0Aql3JvrZGpHIsmSpDAUrjiq9tAtsjCPOCcmrGcjpQWJ6cUjLuAyehpcAjkUFcDg8CmUGcjHFIfTinY64pMYoQ7DfSj19acRzmm4IzimAduO9NOead34+lJgdDQAn60jHjpR2x2oxigdhpJ+pNRnGOakbgVBI3JIxxUspEUjYzwAPWqk7DBGKsyHI68VSlI5A/OpYMrM6n6+hqlKeSeasy8ZPcVSnPX+lSSyhcnjjoKxL0n5jWvcNWTeAbTSM5HN6ieDXIajjzDnvXaX6ZVjjmuI1pHWYYHWrhqznqaHvHwftfsvhWNsY81t/1r0Bc5yDXOeBLcW/hDS0wAxhBIroQTU0tbs67WHjqfSl5OeOaBzgU9QOSa1EJ396eML3pODnFJkE4yMVS0GSjlc5pWUEc1GD7+1OD9c9qLAL9Kdjim+9ODdqAEL4NBYMO1MYc5IqNuDzQOxIWxnBqJuSdtAOO1NJwKCkIWz160xjjp1pXwRx1qCWTZ19ODTuJlXUpRFEzsegzXhXjvUvPvX+YnGf616l4z1IW2nSfNgkE14Hrl200ztnqaHtc5K8tbFSyT7RejjvWzdNukSIfdXrVbRYhHC07DnsahvbkQ2087kZwcVwzfNLQy2WpyPjS+8668oHKrXLAAntmrF/cme5d25yeKpNuGSK2l7q5TFa6ltYxgkU1gMj1piS8D9aZ5jbqxGWCnzYzmosAUb85p6fd+bgmmASyZiUIOBUDMAQR1FTswweeBVVgCx4IoAk83PzE/hTGbdksPpUJBBz0qROR1zigBYmAOe9O27yTTNgz0p4POKBjoZMZ/rTJTls9aQoQCaCeMHpQAg5+gqeBefaoRgGrETD15oA0LfoCenpWrb9OT+VZlvjArShHygjpSYzVs0DMK0LgYgrOsiRg9DWhOMwZweal7jb0OXv8A7xrLYfMa1L7O8k1nlQWyK3MkLboXkVR1Jr3zwnai10u3iXoFrxjw/ame+iABPNe7aRHtiRfQAVEzKs9DpLIYArXtycAisuzAKgVqwDjrUHMtT4xlHGM1VZa0JNp6dRVJxzTPQKzZz7VEy/nVllPaonAxTAb1j5po5NNdugpM4PWgCZkwuT+VQnqAD1NSFiVyeoqFgQc0ADjnNP3g4xwabyQC1IwJGR0oAfEMtknpVmcABcYxVVDhvanMd2TzQBJGR5q45Heps+bNheM8VVibA7getTQZV9wJxQMtPF5bgHn3p8u0RZXg0yeXewJ6DqKilcAf0oAAoZl3dM9afcjEvHAH60wPiJTimzMZCB0zTEaFsEdAAx5qJ2aJmC9M4qmpaJ8E1q29uJArE5LUnpqNa6EDAJbjHU9c1SmAzxVy83AjgZNV2iPl5zyaE+oMm0m5Ntcq2SOa9S0W7EsSMDwRXj6khq7nwfqGV8ljz2q5q6uYyR6zpkwaMCrl2vmRE45rndLuPu84ro4mDR1zPR3JTK1lJhwpNdrol0doB61wk4MU2fxrf0e4zgg/MOorRaopnoUT7gDUu4j6Vm6fPuVa0lO4DFMgenPOOKeDjt1qMDB5607OMc1IXHDPSmk9eePrQTk4oYYFO4XEBGeM+9KG3ZFMJwAOaCcnNFxjicjnjFZPiSLztGuIxzwD+taobnFQXSCSCRB/EMUXBnlMZweD0q5C4zgis9iVuJIyMFWINW4W5HSpqKzOdmnCelXYOtUIDjG6r0Rw3J/GsmI0YeMY5PvVlG5HOapRnPSrMZzigCyvJqQcCmJUg9KpMB69OeKcBz0po/lTgTigpMeMetL0PHX3po6ZNO79aY0O5zyaQexoOMCgnHFNFXHDrzSZyODmmk980DFMdyTNLkZpgI5xTJJPLwSM544podyfP50opqYIB7U7qaY0KOtBIHrQSBjtSigpMN3P9afj3pgNOznPNOxSYoPPp70rdyPzpF5HGcUdhQMfHSEnf7UoYDjIOabvy5A60ykSKScnsOlKDnJpBjtTh15GaEMDk9Dx708cHoKQDIzxmngY570IpMao4x0GaUjaSRTgOc0HkYzTGhh69aAc4Hb1pXUY+lGM00UGeeOaSTkH1p2PwFJ9eaoY2NjtVhVhWAFVYTlWHoalXsOtMZMxoXpzTF4ahFYFiTkHpQUiX0zwaa2OM0mRtz0AqNp8kqgy30pjHtKqDLMARUQlklOI12g9zUYgyweU5PpVpSNg4xjqBRsMbDboDk/M/qasKe1MAx+FKG+Yeh70blIk/i9CKM568UwN+dKMU7DFK5bkcCiVdyn6VIuMYzTT1yBSsUV4cFcEdOKfjnFNYbZ+PukfrTDcIZ/LU5Y8n2qrAixxx9aXv/SmkkEjrRniixSHn6Ucd/ypuTnANL069KY0GB1xS4zSZ9zSBgSQKaGKRTSTnj86CfmHelI6+9ADT3x1pGzilOdvHWkPT2phYaT+dIxpxGT60w9D6UMYyQn61Wc8GrDYxVWQAZyeahjIZmPaqshwM9zVhgOc1TmIx9KQmVJ3Izzn2qnO+0YPQ1POeWbPNZ0rdc81LJK05BDY71nXIB+npVuR8t9KpympIZlXse5ABwaxJdPW4uYldSQXAFdNIm/oKm07TzcajbYHAkBI/GqhuTy3Z6to0Ig0q0hXjZGBir+QMdabGu0AAcKMU4jOMVUFZGz1HAg80u7J9vSo8Yz6U8EZ4FaJisPyc0m0FqUAHGKU8GhDFUegpxI44pB04JFKSM5pgDDA60Bjjmg5IyOcU3PJzQMXOc+lNYHnnNLxz2FLu455osBE4yuRUZ5PvUzmom7mkMhZtrHmqV7KAh54q3NyCK5rxJfLa2cpJwQDQTN2Vzzz4i6sJJGiUnGeefrXmLnz7oD3rW8SX5uLmVyepP8AWqOjw5cyt255qa0uVHBe7uzRn/dW6Qr3rkfGN+I4vs6H64rpbu4CiSd+Ao4rzHWrs3N5IxOeeK56MdbkVH0M8tzmmliQQKGxjmnoB5Z9TTk9SRkXXHIqWaPoQKci4YZHX1qzLIrj5QMAVAFSHaWwadP97K9BULAhuO/WpVbdwKYEBbJBNSoFbqaQJhsn7tMUkE56UAOkiG3dng0wcfKBx61JI2Y1A4pnHYUDEwR05pvQ5NPbMeCRyaiZsHNAEkhJFNHAx1pw55pRtPWgBoU8kdqmgByMjimqw544qWE/MMDjNAGpaodo9BWrAPlGKy4TwMnjFalufkFJjNS1jOQQK0bpCLbk9qpWLEkYFaN1n7NyMVPUb2OQvh85BqkAd1aF8vzmqsUeWGfWtzNHWeBrPfcq5HC816xpvGOa4XwZbCK0Dkct7V32mDGMVMtzlqu7OitRhRmtWEcCsu0HC+tacPSpMkfGcinJzUBXDVcuEOapSqQN1DPQGSAAnmonGUNPdwQc1GjdAKVgK5XjNMHWrUgGGqsBkcU0BKQNgJqFupp4B249OabigBjcYGDVjZ+5yKjQAnDDrUqoeQT8ooAjjXJ6ZpHXr9anRdrnP6U4R5yaLjIMDYDSo7AfSpCm1fbNMxycc0CJACRuJolOUz3qIs2MdqceYxnk5oAcW3hVAwBVxogqxscdOapBvmXA6VrWcX2t1Q8Gh6DRRZRJLhRxVqG4MbBc/KOKSaA20jh/vCqnJY0boNiSRi0pJPB6ZpjSn/VilLqE2kciiUBUDDr1oAY0WEz3q1o90be6Rs45qp5u4HrUathwRVxfRktXPYtFvBJErevrXXafNuUZNeU+E78PEI2bkV32lXX59KxnG2hg9Gb18mU3DtS6XOY5B/WnIwkhx6iqOfKmwfXilB9Ckz0PSbkFRjjNdHAcoOcV5/ot4CAGPSu10+UMnvVMTNEdu9Ox70xTnGacGHI70iRSSaaSfx9DRnrk4NOHND0AjIPNNzx1qTGevJprLj6UBcSmtkjg0Hil7Aikxnl/iaD7Nrc4HAJB+vFVoGOP1rf+INuVubedR97Ib9K5qB+gFOWqTMZaM2IXBUCr8GCBWTbMPf8ACtCF8YHasiGaULZ71cRsdxWdE2OtW4nzx0pAX4yc81MDg+1VI36CrEZzTQideB0p1RKcdKeGH1pjuSbh1o3ZqMtimuXLIVOFHWmkUmTZwRRnHJpmcDjJozn8apDuPySe2KVfvZ45po9/zp64HJ70FJigYNPOTwOnrTByck/lTwcZAFBSHDtSnPPY03PNOBHHFMq4oP596XnOM5ozx0/OkyPxpjHHA60hYY64pD60A/lVFIcGOadg568UwcgYzUmQO/SgYh44PWhcBqB8xJP50HOQVOOeadhkvPGOtSZJ61H6mlB5PqaLFXJeAOuKN2KYcke9C89aZSHZ+bnNOPr2prD0JoLYGKLlIcOtKcZ5zSA5FLntSKQg6ZP5UoPHqKD7GkJIGTwapDIx8kzj8RUi5BycVGw/exn14NS855H5U7jQ7PA4pw9e9NB6AU9cGncqxC6MzDnC06NQvQfjTv1zQo2j+tPUaHE5/GomLIwI5HpUnIxg/WlUZ60yrXFVsgYxRk/SoiuxsgnaetSjkdfegaHduTThwuec0zOBk0oPHemUScggAinE5br0qLcOtQyTeWCzZwOaAY+4uNiHHL9hUNhasjNLKcyvyT6e1V7RmnnM825VB+RPX3rQEmDxmnboCXVj2Jx05pp6cdacDzkUnc+lUirCcHnp70ueoz+NJnk4PFKcfh6CkUO5pCKTP0oz9aBi54z3ppIzxSluOKCAeB1poAJ/Kkbpx/OkznjtSd/880DQHjimMetOJz9aikYeppMpEbnuOtVpDzknmpZDVSYkDk9ahgRyv1GKpzN1qaR+ME4NUZn+9zxSJZVuXAbJPGOazLmTanB61YuW+Y5NZdzL830pENjZH5qo8mWI5xUdxNtzz0qk1yA3pSIuakJDHmus8K26vcq5GdvOa4WC6AYdK9N8FxF7ETFcbunuKqK6lQZ0w56cU7p7GhRt6CnHGc81ZYwD16U5eRQBnGc0Yx0qkgHg0p5pFOB7UjH8KY0B4GaQE5pQRwaH5HGKAAHrnilyMY6ioiDmgEgjuaYyTb0wc57U3GM5PNOUnHP40jcnPApbAMY8dQajZsrStgVC7fKf60AV7uTaD9K8r+ImrlVMKnrwa73xBeC2tXZiAcE8V4N4s1H7VcyNnjJAprTU5a8+hz1yxnuNvqe1a2Bb2qqv3nqjpcPmTGV/urzU11cAeZM33U6Vx1Zc0rGC8zC8WX3k2ohRsE9a4STczZx1rR1u8N1dsc8Z4qpCCcZxgVfwRsY/E7lbaT1HFS5AwCOlPYZc7elQSgqeaz3GStIGYYo8wKpUDmoEY7uhzU8Y+Uk8mgBjMc5IxmnIoAJFMOWBPpTdxztzxQA9pAQAAOPWmDLtu9etN78c05WwT6dKAGkZz6UxWKn6VJIdo6dai68d6AFkkLkHtQATjgUBeOlIjc+goGSjjjsBSqNw9qYX6gYpwf5SKAHYOOKlgySKhMny7RUtuMsBQhGrbgnHrWvax5x61l2o+7xW7Y4GOM1MikjX06IKeeRVy9GYTjOMVXs8lhxirl4P3B+lJLUqexx98MselNs4TJMi9yanukJkPA+tXtEtt9wCR3rpRj0O+0ONVgjQDAFdbp4xt44rmtLXaqjFdPYgAj2rJnHPc37Xoo7VpQ8gHHNZtp0FaUWcCkSj5Iu4Nrcisa7T94fSuz1618qQ8e9cndLubIoR6BmTKB+NQxghquMnPsahKjmgCJm3HGDTY8cg8HtSsuBketNAz7UAWQyiIYA3dDVc4DdRSxqdrHNQv6mlYBy8v1p4l2grwar5OM/rRtzznrTAtSDhSppBIVcc8Ug4i56k8UkgwAcc96AHvL1AqON9oOe9JG2TzTyOe3FACuQStDL0wafIBhSKjBy200IZYiQAbjVy1d45leM9PSqsagMFbOOtWrR/Llbd0p3Al1R/ObzAOtZzkoxz1Iqe5ly5VemaHjEuCvJA5FJaBuVYlLyZOSBUko+XHWpU2onynkHml8suuQOCaGBWWPinGElN/QCpJk2YByKswyKICpAOemaL9RCaPdta3SkHC969K0u8DbHVvlavKJAUfI611/hi+82LymbkdM1UldXMZo9XsbjcBjGKW+Xo47c1iaLd7gFY8j3rf3eZHgn6cVhsyYsdpdxtYAn613ui3QKgE15ipMM+Peus0O8+569K13Gz0ON9yg55p44IxVKxm3Jzwavbs9KTRAA4OT0qUEGoOCaevBGe9SwJCQeaTbnqcinAAr70nGKAuR4ySKCMGlJHPHFKT8ue1JoLnM+ObVrnQ5miGZEIK+vUV5xA3SvZr2FZYJEYAhhg145cQtaXcsL9UPSqWsbET7l+3fDDHFacTgjrWLC/HPWtG1fIwcVkzNmpC/PPNXIn+Y84rLifB61ajekSakbd81ajfPfFZsT5xzVyI5PNFgLgbNOBx2qFDxUmcdDVIZICcYoBA60zNOA5A7Ux3F544qQDpzj0oTGAadnJ6cUXHccoAPNGP50gPPANKMk9uaC7i9TkHrSrz70ijr2xS7sfSmNMeKAeQBUZPA5Ip2RjrTKQ8nHfmjtTATjANKD3zTKHMeB2NOUE9aaOv9akB45607DuKcADHJphO58DpSsewpUHrTKQ8AYPIFCjJ7UmMn/Gk5JPpTuUSA4/xpwwW+lRgc4PSnAgE88dKZQ9jzjNOz6daaFBpw/Si5SF+lL164oUflS9+lBSDoaAfypDg5zRnP8AhTGh/TueKB830pM89OKAxHehFDZeUyM5BGKkU5wc00gtkZx7UqZEYDcGkykORgWIyMjinAc89aqQ8XUv4Yq0DwOf1popC45yTSjk45pyjvjvRgZPr7VVxobjBpQM/SlU4FHT6UXKD261CP3fH8PanuxHSmSSxqv7xgMnHWmmNkgbPelByT60wDDZHSpMdx0pjQm7j0qh5hupyin92h+Y+p9KfqEkhUQ2+DI3U/3RU1rbrbwhRySPmPqfWnfQErsmGABtAAHalAGSehFBz06UE4B70IsdkdD0pfvA+vamAhjx2pGLYOOo6CrBDnUc4zmoXkMYOenrSC5ABEuVI6+lNj/0li5P7teg96NgJ0cEfWlyMD1FMZQeOajbcAR1FBROSQODTvfn86rQyhlIPGKmBGODQMDyeOtDe9Ge/emn1zzSGIzcEgfhUMp6H9KlJ96rSNycmkykRyN1wapXDZzg4qd2wD6VRuJMZzUiZDLJwc9az7iQZHfipZ5enPOKzZZRgH8jSIbILqT5TzWPcS4U56VZu5z81YN9PtBXPFIybIry6xxmsi5vcNljTL65KkknJ7VzGo3xDn5ufSqjG5hKVjtNBkk1DUoraIbizAH6V9BaVarZ2UMCABUUCvHfgVo5uHn1WZSNnyICOua9vTGM9QaL3nZdDppxtDXqPyQetO+gpp9qVMnGfyrRFjsEdOKaRg96kOM8UhIxyM5pjIyccg0gBY4JoY+gpytgYoAUKegp20+lIDtIp2/mhjsNJAYYNIQD0prEE1GWOcGlcCQNjg4qNjgZzUbkgn1pN+eD1o3AezZXNU7mUKCewp8z8cHAFc94g1AW1sxJ/GgmUlFHGfEHWdsbRI/LcV5Bdyma4wOQTW94s1I3V45zkZwKxrCIZLuPlHNKrLljY4G+Z3ZcwILQIo+Z+vtXNeKb8QQ+QjcnqK2r25WKF534A4ArzvU7o3Vy7k9+K5qcbu7Im9LFJ+Tk85qxChx14quRk8GrcB+Xb39ac3clIj3kN0xUcke8j1NSSMCfemq4RsnOakCNozG1TZEcbE8se1O2eZ8/Qe9V53BYAc4o3GRsT+fSm9D70pG4HB6UwEjr0oAXIPbihTxSoMrmkZcZx1NAC8uuSeKPLwu6kBKjBHFBbt2oAUk44/GmAZNPbB4XmnmPagJ60ARKMnFOxlsZ4oPCknihc4PFACKOe4q9bKMjHIqqgyRV+1TpmgDSg7bRyK2rBRjnvxWbZx5I5HNbtlGqkD9ahlo1bFGZvlHFT3o/dkd6ksWAGFApb1T5Z4pxWo6mxy00e6Tit7Qbfb8xFZyRB5uPWuj02Py0AzXQ9jlk9DodPHSujsevSsDT1GetdBZdazOWW5vWg6cVpQg+1Z1qOBWlD05pWFE+fvFFuvORyOn6V57dw4dua9j8VaeSrAcjjGT9PevLNWtzGzZHIqY7Hovc55lycVDOm3HNXVUnOBzVaeM7vmNF9RFcMuMGm7QynpUUnDcU1XIbPWnYCRQQrdMVXYZBq0CGjx0NRSgA4oAijQgEmlxwSfwqRAChpjE4GR37UAKTkgH8/SlfDL64pSnGT6VGSQSKBgoxJk05dxOByKQfMAcVPDtRiW6GgCwkQZQW7VAqqswJ5XNAnKnb/Caa5DJ6YoQF26VcK6EYaoBPkHPX1qPz8xqD/DUR557mhAWoWDklsY/WrFk8aRSbjz2rNGVOF4qWEYkIbOO9DQrj2fzJfl6e1SxOVBBJ9qfbomWzgCmzbASB60eQ7DXYtjdyaswwFsbRgVAqMW2gE96mgnaNyOooe2gIr3SjecA8dak0y5a2uUcHAzzT22sCeCetVZBwGA5qovoTJXPTNKu9wjlU8HqQa7CwuRJGCCK8m8Nahx5Lvj05rvdIusEKTWc4nO1Zm9eJzuFT6XdGKQAnHNMVhLFxzVNgYZcj1pQl0KWp6do95vCHPHSukhYMK830C9ztBPNdzp8+4Alqtolo1CO/f0qVVwOaarAqCBSgnd7VD1JbHgAUxulOzzjvTXxjrRohDVHPYn0o5BwOKTPNKM8ZFJlAwPtXmPj2yNprCTxr8lwOSBxkAe1eoMMgdu1cz450032jOyDdLCQVwOeoz2oi7Mlq55zExH41fgkwQayoWBUY6fzq3E/OOaUlZmVjYR+hFWonzWXE5x2q7A+VHtUiNW3bmr0TdPSsiB8GtCKTIFIRoo3pUoOcZ5qpE3NWkHA6VQXJFx+FSA4FMXHGaAeaBkue46U4dOKjyO1KD0pgmSDkY6UFiqEjk03fzS5yOKCkxxOcZpeMcdKYBye9P79uaaLAnOPypW4wB0pFwTmlBOTx+dMpMVR78U4elNByeKUHjiqGiQHAoLY6U3r0pVHzZP4UFIUEgc96eCMUzqRThy360XKQ/nHP5UDjIpOvAp2CAT6Uy0xw9+46in4GMVEM9u9SqOOlNjQvYgU4AgU0kkccc0oPy0FD/XtR70zOD6igEgmgpDj1pTmmc5BFKx4xVFCk/wD6qQnLfSkU4OMU2aQIvuO1BSJAcY4+tK8qgcHJqsPNnGc7E9asRRCPtzSKRUjDtfHJ2AitFVAGB1qpKP8AS4TzyDVwY6dxTQ1oKpwRTx69Pao0Oc9c08H8jVFoUcCmSlto2DJzzUgPHFMxyTk80hkcpIhkI6qpP6Vxmn6Tc61bvd3d9Mjs52xqOBjpXaOm9ChPBGDTLW2S2t44YhtRB0FNScVoHLd6kelpPDZRxXTGSROC/wDe96t5xnBpnI6dDTwM00WRQxiMsT8zN1NS54po5J9RS4IOO9OwxTk0DpzwaQn86TPHXNMYqoqA475peAMGm57EmoZnLN5UfUjk+gpoYyUfapDHnMY6+9L5DRMWgfC/3O1SqgRNqjp+tRSxPIhAldD6gUwSHrOPuvw3vThIjD76n8apnTyVKvcyN+FVxpAXCpcSBAc49aLoNS5O0auCzrz700zhOUkRh6FqrNpUDoQ7OaSLTLT7jxDcPWi6HZlqO/hJIMigjqM0r38A58xfwOaiGl2JXm3Sm/2faJ923SldFJMnjmSeHfGcqehqN2O33pVCxoFRQqj0qCZgQRmpuUQSuSD6Vm3D/MeeBVmaTGcdKzLl+Cc0rkMhuZAO/Ssm7m2pwenUVYvJgO56ViXUpOcmkZyZWurkDdya5+9uOpzn2q1e3G4/L+tYN/PgdeaDGTKGp3J2nmuaw91eIi7iWbGM1e1G4BYgkV0Pws0T+2fFFuCMxRsHfjtVOXIrmUI+0mkfQ3w/0gaP4Zs4CoDlNzd+tdQg44x+VRIuEVQcADFTDAGCOKmjGyuz0HuKABzxinEZ5AFGMjpTeQePyra4hQQx7nNLgZJoz6Co3bnFDGh33jz0o2gkimg+lAYj60wHFcDk9PSmkEDrmjdxzTWbPSkxjS2aTeT701gc5pOMUIQ4kN7Gq04xkjrTnbae9RSzAqc8H1oYXKdzNtQ7sj3ry3x3rXMkaMceldj4r1UWsDANzivDtdv2uLl2znJpruzjrTvojPkcz3H1NW24AjHT+KoLUGNDI2Nx6CqusXgsbJiT+8cd65Jy55WRitFdmD4t1IM32eJgFUc4Fct94ZqS5kaaZnY5JNRxkbuat+6rIz3d2SBNoGcc0oyQArD60yWQuwC9BUkalRnFZjHGIBuuaryhgdwzxU5k+fHaopZdw2gUCASny9uflqJsc0jZ6Cm7S1ADwcd6YefqKcEI570Y5OaBjolJJxyKHOHHFSRHJwBjNJdxgBSMZIoAikYGmE8YzRsPGeadgEZFACxD3qedwVX2FVNxA46UoYt160AKx56VKhAHPT3oAB4IJ+lOOM4529aAEQ/NkVoWbFmA6VQHDdwKv2hHy8dO9MDdtccAVuWEYJ+bNYljgEE810enoXwMVnIuJrWSAfNg4p94AVIyKmtoWCnt6UyeIgnNVDVhU2KNrB8+SMGte2QAgVVgTBHGa0LdfmGa2kckjasFxg1vWHUA8CsSxHAFbtkMnNQc8jctenA6VpQjIz3rOteBwa04enSgS3OB8SWxZNwHOP8ACvK/Edph+R19Pwr2/U4d8eG57V5v4r0/IYKOR0/Ss1oem0eSTqIpiKr3igN1GDWnqtm4k4HNY1ykg+U5yKbRJn3CAHioNvrVtlLcnk01oyAeKYiBwQBtOaR+VyakVSWx2pZV2DANFwKwbaMEUq/OyrjFGBn3p6Dy2z+VAE8rLuwOMcVUZhu6c0ssgycDmmY4B7mhASo2SAPxp0nNQqSh6dachOTQAoG1xU8i8k9jUDjkZ/OpnPyDnpQwGnGD2qSCLILHoKi3Aj5qkSViNoFAEoCtKoXkVI8YD7R1NNtHCSbnHGKsOS65XqKNhoTb5S4PWq4UsxJ+7TpZGK4YHipraMyr04FGwtyKNmB6HHTNKkgOQcZ7VcYxKqqcdO1Z8qDfx60LUGTCFtoc9KiuDheKvSyKbaMAYYDFU3h8xwB3oT7g/IjtJWilVgT19a7/AES9FxAGDfOBzXBPGYwcgjFXdE1FrS4Xc3yE81TXMjKpG569pN4HAU9RV+4XcuRz3rk7K6H7uaM5RuvtXTWc6zQjnINYNWdzKLLOl3BilAJwfQ13ujXu9VyeRXm8oMUuR+db+hX2GUFsGtNy2eqWswZAc1aGDzXPaXdqygZrcR9yjnilYzZKM5pHzS5/D3pwJI5FIkhxzUgJ2gUEfNjjHpSjOT2oY0xAcYBpkqK6lWXIYYNOxjmhicc1LVwPF9ZsW0zU57QghUI2e4IzUMTGu2+JGml7ePUIl5j4fA65xXAQyc8GqfvRuQ0asD5HXpV6B8EVkRvz1q7A/IrNkmtG+CD3rQjbGMVjo+RnvWhbMCvJGfSgTNeFulXI2G0etZEcuOhq3FJ6ikkI0Q3HXmlBzVdHzxUwOQKpCuSdelOHqO9MBHanKfSmUO5yecmnj1HFMHt0pwOfpQNDgfT05pRxx3pqjkUoYD8apFXHdOvSjJb6UzP1FOWmO47OSKd1FNxzT0Bplocq0/gYx1pB2prHAPpSLQq55Pan96Yp+UY6YpepzTHckByOKd2Pt1pmfSnDjrVFIeOnI4PWnBuaYDzgg805up9h0p+pQ8HAx+tCn5qRc4waVaRSHHmkyBn1oYZ4FAHNBaYpHT0pGPc9qCcDNIpLD2plEYZ3OEGB61IsIXluT6mnH6UpIOBQUKjDpipARjNMCgjgU7IXFOxSZBPxPAT05zVkcmq1x1RvQ1ZQ5OTTGhy/N160AYPPSlHC4FJk556Uy0OjIGRk07qOKYPf9KcDwaaGM3E5A4p3QUhPpxRjp3oKQY+tAPvilGe9J196ZQH6Um7NOZSeKaRzTHYM5OO9NPHfmlA+akkcIpZugpoBk8mxMAZY9BRDH5YJP3m6mmxqcmRxyeg9qlP5UxoUjngmkOOc0pPFNPHvQUH0opATikY+tADiQahkBVlkHbrTyeOKTJ+62MUx2AsD3PPtTH4GRzjtSKcfKTyKY7YU1DLRHK3B5qjNJznFWJn4x2rMuZeSM9aklsinfqfX0rKupsAjJ5FWbiXGfWsi6my2OOKRm2VbubYhJwT6Vz93OWcnJwOlXL6fjBPSufvJwXbHAH60GUmV72cLnFc9fT5yf51cvZcknP51gX1x1GRnFNIwkzKuJS87fXFfR/wL8Pf2foTX8yETXBwuf7vBBrwjwnpD6xr9naquRLKFb6V9f6TaJZ6fBbRLhIowg49Bis6r5pKCN8NCyc2W+hFTIBxUJHPJ61KowT7V0RVjckAIpjAYpT9cE0xuKYwJA4oxkdcU3BPJpyKfWhdwAgZx0pG7VIU4zUZU5OcUANJqIsQcinmmEdcc0ABPBPrTA3Jz+VO3Y69KrTZzkHNDuA5znINZmpXC28TP6cVNLIyAknArg/Guui3t5EQ4PTrSvdmVSfKjj/G2sGadkDZwa4UEyy7jyB3qbUbh7iYsT1plvGSQAfrUVp8qsjiSbZOzBIzI+Aij864PxFqJvLk4PyjitrxRqYjT7NC3Trg1yG0u57k1nTXKuZkTfM7IRMZJPT1qJs5O3pUzDsPpT4otwGalu4iBDgggcetW0bKnNMkjCrjvURViNwzgUtxgW5IHWmNgAY60+NRgt1qM5JyPyoEIpyak4Lciq54J605CQcmgY6Q/Pz0ozleO9NdtzZxQzDAI4oESE+WRilmk3lQewqA5YZpgJU0WGTd+O1PX5uO5qEvTkJFACyIA5A7VGoKt14qRmIJzyaaAOcigB4yTg0N96hSOgzUgjJ6jigAUgnrWpYrlgMVmRJhiPStayDKy8UwR0NhHyPlxXXaZEAgO3Fcvp+4jJA4FdLppdtoJIFZNXNYm+qKIhjr61XmjA6805pCEULwaaTleTWtJWJqshVfSrlqvzAkVXUdu1XLbrWkjkZsWC4HNbtiBnp1rGsh+lbVn1FRY55m1ajgetaMI96z7U1ow+uM0hRMe6TcCBXIeILbep4rtJweufrWJqdruViO4pNHpni2swGGUh1+UdK5m8tzJPmPnI5r0XxJZF5SCM4/+tXHLbhJ2DcA96jzJOTMflTNkZqvcH5uK19UjVJWBHU1jTgB6a11EyNiUGcZqOR9y8fSpGJ2H9KgU4+9TAj7/AEpznPIp8YyzdKHA5GOaYFYZJJqfIMQ5ppX5AaY2Vx70AKBk9aenEnTio8kH3NKGOcGgBzHg/pUkPzR5NVznPtUkblc+9AD2UlvapbdhGxyPxqCQ9xxSZJAGfpQBdmUBMgcnvTbeYp1PFETF1CMPpUcseF4o8gJnYSZCmrWnyAMU9RVa3j2455PWpmTyn30eQBcKFlwBmmSMCQcCh5gSWI7YqFTkZ9KYFmUhQrAZpqXBZsgYxT4lDqfaq0yhPumlvoBJc3HnLt4B71Uwyn6Gn243TZ6ip7iIIee9NPl0Dc6Hwvqo4t5TweBn8K7XT7loZQpPymvI4pDFIHGQQcjFd7oOoreWqqT+8Uf4Upx6o5pxtqd6ds0WeOeait5mgmGDjniqOmXOV8tzz61buVwNw+tRF2dhxdzu9CvzKqYPzjt612Wn3IeMd68f0e/aCRcEg16DpN8JVR1OD/EKtqwSR2CkHrTsnIqnbTBgOasg56VLRBN70o7Gowe2acGKg5qBMexB5ppHvz2pC3y8Cmg7uvAqkSQXtvHd2rwSKGRxXiOo2kmmahNaSD5ozgH14r3JlwARXB/ErSvMhTUYF+aPiTHfOBSi7St3KepxcchIGDVyGUjnPFYsU46k1dhlxQ1bQzsbcMnc1egkxisWCQd6uwy4PPeoJNxXyBircUmeBWRG+e/FXYWGB1piNVHxgirSPu61nRvxVyJu1IRaTGKkXioFapARVDuSgnuRS9OKi3Clz71SQ7jwTjml3Yx3Paow2frUiKSeaewxwy2PWpB8vXHpScAetKvJyetBSHKM81IBimj16UFuKNy0KetMk+430p45NIxBGMUyiO0fdCM9qsA5+lUNPk3GZVPCNg1fH6U7WZdx49qVTkYPApo/Sne46GgaHfj0pc8k4pp6Hin8EH9KZaEyTnnFSA4FRdT/ADpxODjPFMpEvQHvTHyeRxQxwvWonnVFyTSKRJ64yTTlyOhrLa9eRysCk+4rQt43UFm7jvTtbctE5JGCOaAOOOtJ1HWjBHIoGPxjjr9KG/GmhjTlOOT1qykIMEfMOO1Kr/h3prnkDtQevFJFEiybulOJ4HNV4wy8YzUqtyM9apaopEvagMcA/hTA2BjvSb8dRQhkh4OaVTzmolY5wafkZxmqZSH5HU9aXgjjpUfU89acPlpNFpjjnk5pucGlDA/1pWAx0oGhrDP1pjKGwHGcGnZwT1IoI4poY3OeKrajJLHayPCPnAyKsY7ikYBlKsAR0NUOxDZTGa2SRxgkcips+lJsCKAv3R2oPIJpFAeOlNJyORQDuXpTXJzzn6UwFb2PNMJP1pW4PpUZ79qYIc3JzVaZvkIp7NjJzxVWeTPFQyiCd8Dk1l3Eg5zVi5kxWVcycHFSQ2V7ubA681jXc5A6jNWbqXdn0FYV9PtGM8mkZSZQvJ8g85rBvrj0/OrV7PhiAa52+ueGyce1NIwlIgvLrjr061hzS735p13OTwDUmg6fLqurW1pCNzSuEH41SajqzKzm7I9p+AfhziXWJ14H7uPI6Hg5r3NRhaxvDGlR6RolrZxAL5cahsd2xya2l4AJPFY0U5NzfU9KyilFdByjd2pW4OaQEEEqaQnHXrXSJBnJxTjkim/xdaXc2MEUwFLY60bgMg0pIxSEce9K5Vh+7K5HpUTSA/WkOV4BqJ/mIx2pbsBzHg1ETjgdaepwcHrUcnqPxqgGSHP4dqgeQKOaWVqydSu1t42djjHSpbIk7alDxDqa21s7E4bBrxDxJqjXdw/zZGa6DxprzTSvGjVwjNuLMeaL8iuzhnPnYirubPc9BTdWu10+yOG/esKnDrbQtcTYGOlcPrWoNeXDMWOM8CuZLnldkzfKrIz7ydpp2ZiTk0sJwD9OtV2IJGKnjXgk9MVU5XM0hTja2ajExUfLRP8AKgx61CORUWAcZS55qfzgYggqKOMnoCaUQ5lCng0aDGEkd+KaW5yKkkX5ioNMkUjAXpQIj4OfSkBy1Ozgcim54GetAx74C5prfdBGOe1K7ZGBUbbgOaAJB90HPaox8zemaF4HzH8KdkHnGBQAEAH1pAdpwTxS54yORSdTk0AKzktxThyfw5pV29aVCN1ACsu3HPWpN/alYDihV3HnpQBJb4LZP41tWPLr7dKyoI+duOtbWmR/MBigaR0enRlmUIPauvs7cRxKD1NYejw/dAH412lnaEqjMO3NYyep0QjoVDAFXkVXbitm5QbOOtZk6Y61001oY1Ssoq5arluarLz7VbtRnGDVSORm3ZDnjrW3aLyKxbPpyMVt2nUVG5zyNi2HA960YemR2qhbdBmtGIHGc5osKO5lyZPBH61RuVBUk1ot8ynOarzJgYODQ1c9JHCeIbLduMYzxXnt8kUwdSNkqV65qURJwe9ebeLLF4pmaIc98VlbWxTPO9WySd3BHFZjwlotw7Vvanay+aBIuARmsqc7QUTPpmqIMp22rgjioCNx4HNXjCWJGKpkGNzimIRRhsd6bLz9RTycneAc1G+D689aAFjfIwe1MkA6jmkUEEY+lOYYzigZGSc4H4Gn44z3ojTcevNKw6gGgQ3aTyKeD2PX1pyYZQPSmMOcgUAOIwgJ60sZDOAKa7AgAA01MrzQMtFtkwx1FFw+6XAPWoSxJ3HqamxlVJ+8aBDYy6SAZ6Vblc5+Y9etVZARgt1NSxDcp396AIZX9OlSWgDEg0yVduBTYw6nKnimBftGxOR2amX8RBPpTYJEVt7feqS5uVmQAde9T1DoRWQUg56ipWOUOe1V1kVVIXrVi3KsDk8H1oYFGXjpVvS7t7WdXB71FIqmRh2BpXj2qD2q4slq6PRNNvVniWVDzgZH5V0VtN5sYHcivKtE1BrWYKT8h4P6V3lhdAorqcqelZzjY5muVmyxMUuRXR+H9RKuoJ4rmwfMjBHXFJbzNFJkGqi7qxondHsmn3auoIOK3beTcvavM9A1XKqp6mu3066DADNDRDRubqN2OKhjcMOKf1/nUEihj9KUEZ5NMIz0PSj6daTuA9snoeagurdZ4HhlGVdcYNTDFObOefrUyV0B4J4g099F1aW1fO0H5GPcYH+NV7ebBHNepfEbQ/7T00XFuoNzB09xxmvHUmIOD+RrRPnjfqJnRW8wxg9far8EowK5u3n6c1q282QKholo3oJOmDWjBJ61g28mDwa0beTPOSaRDNu3fJx2q/E1Y8MmQO1X4pB0zigRoo2B161IrcVSRyTVhG4qloIsAg+1OFRqcVIOnpVIEOA5qccCokqQD3NA0OHzZp4GDSLgCgHnFItDyfypBzTcfN06U4cDpTRaY7PzY9qOmOtN6cnFOwccUMtGfp48u+u06ZfcPyrSBBOOayfMEet7Cf8AWJn8q1x7VbZQ9e/pS0g9utHbNBQ4dyBR0Pak3cZNRySN/AuSe1MpFgleckiq7TqMhSCfQVGIHl/1r/gDVmGFIx8qjPSjRFIhkEswwgKj3piWOeJHJ9s1fHHQUADHYGldl6EcEKRLhVAqx0HPWowwNKOvNBQ5elKCB70hIFJ05HSqRSYrLk8cGmbmU4apA3vTZMMuD1NMpAvTPWng9ARzUKkp7iplYNzxTKQ8H26U0nLcd6F45pCdwB6YplAchs5GO9Y2qS6jDIxtVVkPAz1raHGe/PNRyAkgDpTTsO1xlj5gtoxOR5uOcetWelMAxwOvrS5yOKZSHryc9D0o3Z6c+1R7ivJpVIJBHWgpEg+U0/cO1REEse1CnDc0MpEhwSeaaeD7UjONvagncD3NMoOmeKT9c9aUggZ/SkYcEc0XGJ3pp68daaFwxOcnFK2O3WgYdOKaDS7vzppznjigoR+Rz+lMbp3pze3Wo2Ye9DAgmfGQDiqE0p2nirN05I4x9ay7hyM9ai4mVrqTOfSsm7lxkd6tXcuFPOax7iQDn+dIybKd3LtVjXN6hcDJOeK072bqCa5bUrnGcdqZjJlDUrnDMQa5jULrLEd6ualcnDAGuduJSzcmqOaTHF979a9s+AHhvzrmXV7hf3UfyR5H8frXkfhfSZta1i2s4FJMrhAcdMmvsLwzpEWh6NbWUAH7tQGPq2OTWFV3tBdTqwsLe+zaiTapqQ45qMHsfSnEnp61vBWR0sXp7UooXg5oyc4NWAuaUnnmmE56UZx1pgSHgAimucc55pC9NIDZ5pMZHI5HTk0qtx05proQ2aRiTjsaa2B6ivgg9eKrs2DjvT3k59RVO5lA5HFJuwmyK7nCrnOAK818beINgdEbHatrxVrS2sDgNzXjer373lw5ZiRmiK+0zjrVL+6indztPKXdutNiXOWbhRUYXc2OazNf1NbeEwxn5u5Fc9STm7Iy0irsoeJdVMshhiPyDjiubPzfWmyyF3JJ60gJpv3VZGW+rAjDdKlRzjB6U0Lk4PWpBGACWOAKzGExUgAdajJVXHcUxxtGfWm4z1oAuCQKvWo1kxlu/aoMkmnMCFxRYADZJPepdwCkkjcars3A9qM7vcUAK2M5qJ2yR+lTMny+9QuMnihAKhwPenKoI5pjDAAHWlDcgfjQA9488iom44q15gKAfnUPBY5/GgCIknjtQOT9al2bug4qMjLYGKBD40z9KXBDcVJF1CmpJ1UYwecUDIxkkc1Mj8YqE9RUioRzjNAF2CTn3xW1pAZ5AM9TXP2ykuOOc11egW7M4Jzinsio6nceH4uVB5rubWMGEZ44HFYHh6z8uMFRknvXVwxYT5uvasN2dcVZGXfJgHHpWLOM9a3tQHp0rEnXJrshsclXcroM9RxVy1A3DjiqwUj6VatsbhxSZzSNuzxj2rXtF+YVlWK/Litm2GGGelQjnma9scjpWjD0ArOt+2K0YsY4602StzNz+dNfB9jT9tMfjjimegjKvog+Sa4XxFZvJLhR1PNejTQl+g5rC1ayVlJHDAVD0L3PLdVt45LZyF+ZPlIri9Rsfs7Ag53DIr0m4s3aecgfKTXP6/p6Pag52yLWT91huefO21ypOCBVOUF92a0biBjOy+hqK7i8qHnrVkGYRsTHSkjQNk05/mGe/akiVgTimA1kGSelNB4GKlwdrE81GeOQKAGhgh6cmmM/YUp5570gXJBOaAJFJXBHAp5be1MbgDHNCgAZ7UAOVRkimY5FOXLtwaftXaSTzQArHauMdqQOfyoB3gseopUXKk44oAk3HaAe1Sr8yAgc1CjDBBp8XzSKIyeKAHyx8qDjnvVhI1WMlevpVS5diwx1HekVyvLHrStoArqpJGKj+4pxzSmYKcYp0jqQD7U9QIkwCfSpBnscCnW8YkiY55HSnJEVGetFwGBQW/nVvyw1vlR8w6io43RQcjBPSiF8OQT8pHekBXIKc+9dB4e1TYywSN8p4Ht0rCvCB93oTUMLMrAgcirWqsyJRuj1qxuQCBnr0q5J83K81xHh/U/NAhlPPQV1lpcgqFfv3rJpxZz3cXY09PuzBJ1xg16BoepCQKpNeZv8uSv1rV0i/aFwCcAVonzI03PZbK434FaQOPeuJ0XUg6KCeK621mEiD1pMzasWuaUcdeTTQc9D+FOGD2qWTYXvTh055NN7elAHc1Ihsg+UrgYPBrw74i6E2jawbiAH7LcEkY6KeBjrXursGBx+NYviXSYtY0qa2l5LDKn3HTuKlS9nK/QaPn+Cb5uDWra3HHX86wtQgl0zUJbScYkjbacVPa3PI5reURtHV2824A1p20vTBxXNWk44HrWtbyj16Vk0ZtHQwyj1NXoJNx61gQSE4IrUt5AQCKRLRsxP6kZq2jHOKzIH4GccVdRsAHNMRfVumKlT2qnE2WBOKux9PU072ETJ6dqePamjpTx60hoVT6/hR3oPWkzmmix3JB/lThwKaeD7Ucnn0plJinJBp2cj6UyQ4QnPWnLycmmUjD1xzBqtlL7EfqK6FegrF8QWvnxRuDjyznn8K07JxJbRuMEMM1WjSNFsWQ3PBo75/Km5GacvWkNC4zin4x05poFBOMYHA96opMcWx6D6UobABPfjimnnFKy/KCD0oLRLRkU0HIJzims3JGeMetIpDlxyR0pcbQcc96a/CDaOtB+X/wDXVFocr7j/AEp4x1qNBkU9TjH8qZSFBwcd6UdetNJ78/jSAgYyaBokXBHNMI28pyPSnAg+tOAoLG7t33e9KnA78U106svWgv2b8aZSHhsfSmF8nHanjAHA4pnygk96pFIcSAOaE6nPTtSEcZ7elJF05/KgpDyBnBo54xSHBwQelLj1pjHMeBjr601vUYzTc4z/ADpRn+LFMY/gj0NLnFRgH1yKU9MigslzkdqY+enY02NzyDTz1pFEeMLikxzz+FPwQvA4pjdePzoGhMnPGPxpCfzpG+tNJx35pjFqCVvl4HIqRzge1VJ2wp5qWMqXL4zxWRcvye4q9dvwT1zWPdSYUipM5MpXcgBIGKxbuXBPIq7eOOeRWHqMu3PNBlJmZqU4HcVympT9cmtbVJs59q5TVZyRwcDFUjmmzI1C5y5FZ2SzU+Z9zk1r+EdGk1vW7S0jUnzXAJHYZok0tWZRi5ysj2r9n/wssNm+s3SfvGPlxgj8c17gntjArN0LTY9L0y2soQAsCBR25rVC8HuaxormfO+p6llFcq6DsAknPWpljGOmaiUdeKkDsD7V1aIQ50xUTbSOTzTgxHPamyMCfalcpIjJIpG6Gms2OvSkzlaAEZj0H60iyEHgcU0n65qMnAJoAshgR7e9QyBsZWofMPUHFMlnKjg0XAY85A+YYNc7r+qraROSwDH36VZ1jUUtoi7sM4ryHxbrrXEzrGxI+tKK5mc1WpyqyMzxPrD3ty6hjtrAVdxyevagZZtx61Df3aWkRPG4jgZqKtS/uxOZK2rK+qXy2sJAPz1xV5M80rMxJzVm8uzcysXPFQOV2YHNQvdRm3zO5VCg1IF2KM0qgZOaJXLduB2qQHHJQY60wqwODnmrEEiqvIyccCmGQGQFh+FICMQF1JPAAqILjIq1JMXUj+ECqqkZoAawwwwenWrSKrQ5PWqrDc/FPyRwDQBFNjJAzQp4/lT5F+QnGKgB6ZPFAhzsxB7Ck7e9KDzx0ppILGgYpXjNNXIIJp+/gcYAoP8Aq/rQAqMpb5ulDnL5HSogfX0pQfloEPDgjBNIODnHNIAfWnY2qO1AyRG5zSlmZ6ijNSrjGT2oAkUZYDvV7CsoRce9UlG5cirlqh3jvQCNPTbT5xx2ru9AtI44csOSa5bTVUBWPfgV23h5PNaPOQM81LV0bw8js9JGIkAXAxitPawzkkUWUYSNAAeB6VbOGB6UoRN2Yt5k5FZEw59xW7ejINY1wPmOK6kcdQrD8OKs2681CAOg61at8g81Mkc8jYsQQBj862LbtisqzGMZ59K1rbrnNQjnma9vjj+VaMXQGs625FaEPIpkR3KANJtBHFKef5UqnGMjBpnfEix2z17mqWq2/wC4LDritI8kelMuMFCvWpZqjgAikyAjOOK5TXYxtaMfezzXpV7poUvJGPvda4TXbUpc5bHrms5JN3QbHlWqp5dwxAztODWdqDieEFBjHBNdl4lskEbMh5fnP+TXI7QIXVuaSd1cl6GakYCEnHtQioZBin+WxYgdBRDGFclulWSRTL+AqsAc89BzV59ufm6daicKm4/wt0pAVHVSARSNjAFPzjgdKRv85pgNPfIzS5Gwr3pGYY44NRMSc4oAmU7ScdaGzkkjApi545yalYfhQAsYABOeTR5jpwBxTS+0Db070pJIGKAIz82cGrFg5iYueg4pgVRwAOamQbFZSOKGA8lHO7bxVeduwHB7U5JgpIAyDSSDADdc9aAGqgK8c57UbGJ29KIsMSCfpVxEAhOetDYFYqYgCO9OMzBeD14ps+7hR0xmiNxtwR0oEI5c84qXeXRc8Ed6SRwUAXqKYrBYyD160APWMtLhhwankiCDBUDFRxbn5Y9qSWV/utzRrcBscpikDL97PFdjoOqLcxLHIf3g6ZrkDFiMN2NMgmaGYMpwQarSSsZzhc9Vt7jI2Meam3FGznPeuZ0XU1uogrnEg71vQy8YbrWabi9TGLcXZnU6HqZicBmrv9F1QMoDHj0rxxZDE+5a6fQtV8sgFua0tc0auexW0ocA9qtDAxmuQ0rUt+05zXTWs3mqCP1rNozaLgI6Cjg9KbkgmkDcZotYkkLYB5+oqM4NGOOmcGgc9TzUSV0CPNPi14ZN7bjU7Nf38Qw6juO5OT/SvHYJirYPrX1VcRiSNlbJBGDzXz78SvDL6JqrT2y/6NMdwx/D7dTVUJ/8u5fIpFCxucDngelblrKGAOQK4y1lIIFbtlcdMVco2E0dTbygYB61p28nQg8GudtZemK1LaXBx296zsZs6KCT5eKuxSehBrDglwQAeDWlavTIZswN8uavxNxyayoJM4z0q9C/FSybF9DnHen5AqBGAANOX5jzTRaJAcn6U8HHpTBgL2oA3ds1QDsknjpTicAY60DAHHWkHOeKZSI7gEJuyfpVlB61Dd8WxxUq5IA5o6FpkOoLutmA6ngAVDonyWIhJy8R2NzV7GF68U1ESMsVADPyfemtrFoePwxTs01cAU7PH1plIcOcUYI9KQAAfSl+8M0FoN2DzwD3qRGB5J/+vSAcYodV57GnuUgdw30HvTCik+mOaYQV78deaYLhfMwM5+lNIpFkg4ADHH0pN5B5Gfxpm52XgY/GnCPco3sTz0oLQhnA4YEfQU8SE8qOPU0mwBcjinOyLjuTTLQcs4BOcdRUi4GBUFuS5ZhwM9alaLfyx5FBSHGQbsA804kn61HgBgcVIpzxk0yh5OQAKY4DDDde3tTjwO2f501uvGCe+e1MaGoSCFPPvUg7j34piEmjPzDFMoGJLYHalb26eooc8DIpCwoLQvOOMYFAYE4xTSxB4FNOetNDHk44Ip33uM4xTONtG7rmmUkP3DpijHWos5APSnhtvPWmNDuAflGaVX7YFMDDHoMUm4n6UikTKRzmmP7AcUxTgc9aUsNo5oKGE9sUx6eevtUch9etK4+gx275qjcPgEg1PK+MjNZ1zL161LFcqXknB5xWJeSZ5wMVevHJJ9BWPdy880iGZ93KACcGuf1CXr6VqX0gGe2K53UZeDmgwkYupzYUnNclqUpJIzW5qswJIXpXMXrkuavY5psqgFmxX0L+z/4WNvBJrN3Fh3BjiBHb1rxnwToUuv69bWUakhmG/wBlzzX2Nomnxabp1vZ26gRQoEUDjpWFV3agup1YWnZc7NCMYxwPxqXP401QMHPWlBxzW8UkrHQOzSgg/SmA8kUhGM4xVjHPj8KiOCead2pjUAGPpTCQpPP4UrPj6VE5B5yM0n5AIXz6VDI2Bk0jyCMc9PaoJJ125Xmi5LYszjAJPFZl/eJCjszAClvbpUUszfKO1eceMPEQUMkbcmkldmFWpyoqeMPEHmO6Ix56V59PKZXyxJJp9xM9xMSSTmmXEkdjCZZcb/Q0qk7e6jlSv7zIry4S0ty8hG7sK4jU9Qa5mY5qbV9RkvJic8VjvxWaXLqyJS5h6Z5yM08uFTGKIF34HFEycnaDgVDd2FhquTx2xTk5+lSQJleTSbMMQKQWHZRFB6mkQeZzSNH+7J6elMUtt47UASzoqgAnH0qqeQdtPkLFsE84pGwOM0ANIC555qPkcsMmn8E5JpzY25H4UAQs7MAO1IMHjp704c5P8qaSM8GgQ4oMcmoyvPFDOSMDGKeDgADrQMMhV/2qXBYd8VE3DZpwkx1oARwo6dab79v505hvwelNYHj0FAEqH0pZOTjPFMiBzkdakUE80AKkZJGalEfpigAlhipSHWgAUYIBrStyiRncPmNVbePLh3qxw0nygYoWo0bekRvLOmAdleueGdPCxRFlHJzivNvCysHX5cnPXFev+HoH2I756VMzqoo3VtzswBgY4pJICi8CtCHG3kdBzUN0MLn+VaQT2Ln3OfvuRyeax5xzWxfhuSCax5AcknoO9bI45kIGAc1YgHIPBqEj2zU1v196hmDNmzPQVr246YrIsz0rYt+oPT6UrHLM1bYAgelX4TwPWs+3z74rQh/P3pNExKWBzig89BQtLj0pHchOVX1qoz/Nz1q43C4zWZNw4+tOKuaXsTP8wx+lcrr1kJhIAvuK6o5ABX0rMulaRyMketKxVzyfW7EvA0RGG6iuDnt/LuHjHTOCa9j8R6XJ5vmR5wRXE32kKLjDDLNyfWs9hNHGNCqShf4TVS7i8qUrj5fatXXLWSKUgKVI4HuKzGWRocPzihdySjJHuGagZWCnP4VPuZJscgUtyv8AF2psRQ55WkYE9T0qVlwc9qTCkZHamBEFyM8ZpSgVR696erAdsYochxuFAETNzThLkHNM2ZyDSEH0AoAcf1qUZVFJFMhXdJz0qWUhhtU/hQAxT849Kndt4x0amRqGDBjjA4ogXDZbmgBJY+m3r3pu4hSpFTXBIPy4ANREfJz1oAZCoZ+nPvU8rlRjnIqugw+d1SuzHk9qAGSFiop0IHcZzQo3ocdqI2KH5gM0ATmLbIOOMVHOg6U+SUtHk8mopJC4A60gJYXIwD17UpJMm5sUioCo9RTmTeMjORTEXvka0LAfhWVcLzkVPFM8asgPympUjWWAufvUl7o9yra3L28gZTXc6LqiXcQRzhxXn8g2vxU9pctAyujYIq2lJGU4XPUVfs3SpopTE4weveuf0TVUu0EcpAcVs57H9KzTcXZmSbi7M7PQdVKgKWNegaRqe4LzXiME7ROCDxXX6Fq/K5J/zmtWr6lvU9ghuFYdf1qZSTXLaffrIF55rftp1fHzYNZszaLgo6jk4oyOMdKcMdMcUibDDyAaxvEukw61pktrKMnGVPv2rc6elRPjPHArKceqGfLeuadNpGpy20y4KN19afZTevSvZfiT4WTV7JruBf8ASYlz16gZ968RdHtZTGwKspxg10wmqkb9SjprOfAHJrXgmOBzwa5OynJUcjIrcsps4z2qGiGjpbWXcBmtSCXGMVz1tL0rUtnHBqWZ2Ogt5DitCKTvmsS3fB571fiftU2FY1oX6VaRuQRWbExO05q4j596aAsryeB+NSg44qFDgD1qUZzmqQC/Tn1qVRjqelMTjJpSw709xojueYmqWP7gqNgSDnFODbY+TTLiPLHnA+tCk55pgNP3HbgYplkg/WnDjk0wYB+tO+8QPSgtCjnNOwc4zxQBjmlGOc0yhC3ovIpG3NxwPpUhPOKT1oRaI2jGeSWp4QAY4x1peM4600gknHFMpCMoB4OKPMYDgbsU7aM880/AxxwBTLRHHmRcnIz2p8oCISoGR3pVbHGOtJMRgKO9HUsW3UrEoIxUqA9CelIvYYoyecUFIGHGSckU1CAQeaC3WgDdyeD2plIkAGCfWmHPHpTkPGAefSmHriqKQquM8daTdzxikAA7YzTSBnAGPagY7JJJJP0p3bp1pqbsk9qXk54yKCkLkkHnGOlKp4pucDBpucGmWh+MZ7Gm9vcUOcgf0pGIpoYp5GaXdzkimE4pQ3NMpC560meopG6nsKbnrz9aBkmcGjOCcUzcCOaTOOo6UikPJ65qGQkZ6Upb14qvIwA7+9SxkM54PNZV1Jg8VbuXOSBWZdMOeeakllK5b5T61iXcmPpWjdyY9axbx/U0GTZlX8nOK5vU5tqnnJrV1CfBbnk9K5u9f5Tk9KDCTMXUZNoJJ4rAcmST6960tTkDHArS8CaBJr3iC2tkQsm8F/Ze9XoldmMYucuVHsXwC8L/AGKwk1a6TbNN8kRPXb617Oi7R14qnpdlFYWUNrAAsUShQB6VfUDAyOKxpLmbm+p6duVcq6C/TkUHg5HJoDAHGKecduldNtRDVbGTjmkJ4OKdjg4qM8UMEN3dc5o3jHrSNgjFQsdpNIZI7emPaq8jYBIod/mGelVppByCc0mJsZPJuB9Kz7q5WJMsQAKbe3axKSWrz/xP4hADpG34UJX2MKlRRRJ4p8RBEZEb8jXl9/dyXU5IJZmNSaheSXUhAySarSyxadCZJiC/XFEpqCstzltze9LYkeWLT4DJMRvxwK43VdRe9lZ3J2+lR6vqUl7KSW+XsKzcsQfSskravciUub0I5D81M344IpS2T0pSuTn1qW7iJIhjkc1MZSw2AYFRBgqgYoD4bOOO9SBIF8vt1pobYcHrTppwygAdKriTe2SKAJyN3XgY6U5ACdqECqkkjZ44pysdhOeaAuSXIwMdTVULg4NSqS2Qac6lVVvWjYCI+nagneQB0prsBgDqTTUJHI60ASzAJHgHmq6ghuTjNOkJY8mmtQISQgvxRnHvR1pyJxzxQAxjzzTTnGae4J5PQU3Gef0oGSRncRipX2jjNV0+XkVLyVoAci8e9WoICxGAKrx4wM8VopeLHDtRRmk/IBsICTHd0okfzZCEqFfmYsxqe0UbyR9adrBuWIYumT83pWpZ2OWGR1qrbIpkBb1rrtLsi6K7ABe1S3Y0irm94T00SPGcbQvWvV9ItwkIBHauL8NWqxBd2BnpXoFkQIxgYxx0pqLudkLJFlYwqkiqdzyCDWg0gZBt61nXfetkiZmHfdxiseUHBxWxe8k1lyjB4qzkmU8EVNAOR0qM9Tnipohz71LOdmtZjpzWvbE59KyrTjAI5rWtx+dTc55mnb9sDrWhCeKzoK0YD2oIjuViB680qAZOaPwpSRj3qTsQyRAQRVZoQRz+FWge56elBxinsWiDy8ID14qnNAu7Pc1edii57dqz72bMYK/eJ7Uldl3M7UkXZ90EDivPvEVs1rdNOoO3GelejOpa3ycbvQ1i6zAsts6uoOR0qZRsO9zyrWmS8CTEbWHy1gXcASJ2HUDoK7vUdMje0cIuHUk1yWpWjCU7D0HI/OostkJnMCJZcnow5xVOdsBlPUVo+Q4uGIBHJ4qjdRkyEkfWq6kFRXJzn8KbEw3kHjNSlOeOtQFfmpgLIuG9qQEbD3IqYkPFgj5hUflbQSaAI1IOc8UYGSfwBpyjcDx9KjJxwTQA5SYzkY5pYziXLDI9KYW5GOlPMg3dOaAHygE8cVJ91KrmQA8fSpdw2CgCdIDImV5x1pCAFIJ/Go4LtoSQOQfSnSOGUkDFKzArFSXIXnFPLMFw1PgKiT5uKSQgk98UwHQMu3aepprheSfWnRqrEEdaSYFWxnNICJjg4HenxxnZu6mpHRdo9SKcnyoe1MCwsRW2DADk4pI5lAIYc9KktpM27K3IFUZMh8DuaSAmIDMfQ1JJIYkK8YIpiuCm3uKiuTujAB5o3EV8GRiQKcYip5qS2IClTTm5kx0p3ALaV4pA6Egiux0bV1uUEcpw471yjRARqVxnvVZJmim3IcGq0mrMznC56aGGMHpVi2uGgbKng1y+i6ysyiKcjPqa6ANwSpBFQm4OzMU3F6nb6LrOGAZsf5NdxpephlBYgn3rxaGZonyp4FdTour/ADAFsVo0mro03PZrWcSAE1bBNcVo+rBgBurqLa6EigA/rWdiWi7270hGVOOuaajZJ559Kf34pMixBIMjaw4ryD4qeFGik/tKxj+Rj86r68nNezMob3qpd2y3NvJDMoZHBBBrNXpy5kNM+XrdijcituymBAq9498NyaLqLvGh+zSNuUgcDJPFYVm5Ugdq6XaS5kD1OrtJQwB6VrW8o4ORzXM2kvQ/hWxby8DmsmiGjoYJd3Gea0LduetYNs5PIPNatq/TOakho24X9TxVyJ6yYn4FXYWz60CNSN/SrKngDOfeqEcgXA5NONwQOF69KaAvu4AzUe4Ek9qoG5Gc80onBxzT5iki+XGeKjD5qt5gLc5NShgadiywrd6kBqFB1J5H8qlT2OaEUPSQbsGp4xk57VVeEyHKnBp/mPEoEgOOmRVehaLR54HSl6AepqJHDDKnOafnPPWnYY4HJpGP50ZwDTe/egtCmlHtSH5uPSlHAPpTuWhepNOz8o5xTQTjjvSE8e1BaHr8o46+9G0HDc5FMBJI/lUpOMCmUh6n1oXJJ7elImCOtB57cUFiADcfWjJxj9aO3WjkDpTKHJ0wODmo2JzTlIC5brUZJJODVIYoOeKU9fSmoADzT2x1ploVT1pN2OlIpxSMOPegpDt2aQjue1MBIapDkjNBSE6Dr1pGHH68UmcZ9KQ5pjE6cZzS9D7UmR0pGboBTKRIMHvTGHWkB60p5xSKtcapAPqKcx4PBqN84PagsccGhjQjN3FVpm4zmpJTVOd8DGfpUFMqyvway7hxyT1q7cP1xWRdvwcUjJlG8kHJFc/fTehrVvHABrn72TAJP50GUmY+oSAZLVzWoTHJrW1GYkt6Vzl/LnjvTSOaTKDIZ5gFBr6I+DHhgaTpQv50H2iYZUkcgGvLvhh4ZfW9ZjaVCYYiGY44+lfTNlarFGiIAkajAX0FZVm21TXzOrDQ5Vzsso4AzjOasK3PFQgAD5f1oX5ffmt4KysbvXcnA6mgtioy5UHPSk35GMjFNvsCHF9tKzDGQeKhZgRz1FReYV4PIoTBkrnGearSSjnJ5pJ5evNZ88wU8k/WjYlsnnnwP8KytQv0hiLZGP1qpqOpLAjF2ArzrxL4kLllRqSTkc9Sryl7xN4i5Koxx7GvPr27e5lJYk1FcXMlzIeSRVG/vYrKI8gy0Snb3YnPa/vSJ7q7isoizYLkcCuL1e/kupSXJx6U3UNQeeUl2yKz3bce9ZrTVkSlzAGyRmnk4zjmmKucYp4+9UN3EMZdvzHpTM5NSuc9f/100RZOVPFIBshzgA9KUbj1pJVKgelPUEKCTxQAbgCB19adEFVmZunYVEuPM5JNOY5Y+hoAjkIJJz1pm7jHannHI7VCx+bApgTKw69qW4lJwvaolJAAwDQwLLk96QDfvfhT1HHXmmIDjNByrd6AFkGCKCBjmgtkZo3CgBF6UrH5iOopmcZI6ZpcZ/nQAbs8cYpO3FBUYFOjXLdc0ACcCnKdx9qUqFBpF696AHqpIPHFOB4wBUqLlcetEkRXaRQARke+TVu2B3be9PsYEX55O3apo08yfIGKL3HY1LC2DvGX65Brv9Fs5braiDCevtWD4Z07zWUyivTdIsxCiqg5qL3eh0wh3NLTLBIRGM9P1rqLSI8Z44FZFnbSM4bHGa6CMbI8V0JWRuhkiqueOnSqM4H4Vdmbt3qhcg446U9TOZjX3X3rKlPJrUvRWVNwDVHJMrsMsealh7cZqMgbsGpIeWqGYs1rTtWvb9qybMdM9a17ftipOeZpW+Soz1q/EOnrVC36Dpmr0PFJma3IgOaDwfSjI7daByOlCOtMjc+tM3E5HYU5wQDxzUCA59qZaC6YmIqvWqcEBbhx0rQMJZc96fGm0Ypc1kWigbYZPPSs+4sfO3HkDpWvu/eMveoZH2Z3dM9qWpSscNd6a0UsiOPvZwa43WNKkiu1Crwzdfzr1vUbcSruxlu1cpqsDMSzL9zNQ73A8p1ixa1vMuMIR/jXPX4xIcD5O1ena/HFf2JQAeapz+HNcPqmn/ZoyX5z0/WpT7ia7HLTfIcCodwwatSxsWJquYiGOR8prQgjLcjNOd8rsxyKHXHIxikQF2APfpQBHu2nnvTGG7nFWDGd20imYEbnPSgCLbhcZpjZ65qQrgkDvSeX8vOcUAIV3YNTgp5W09aRVxFmo2bJNADtvGR0pytgGmhyY8Ht6VGWz9KACRyTxUkHXnkU0xkfN2oU4NAFoEK3YcVFI53DoaiBJPXNKvr3oSAnyeOKkUBupqASFWGRUiyqDjHvQBYjKrGVyCc1A/ykng0qkcg8ClZDKQFOMUCGwnb82OBTZJAzkAdalkZQuwdaYqDDZ+9SGLbBeckdabMy7vlphHJ5xTQhHJpgSM52dSDUUg6EUA/OBipmUAj+7RsIjikZDnkHrXTaLrew+XOfl9T+NcuRuPHUU9VZRwarSWjJlFM9NjaOZd8TZHtSrM0TgrwRXEaRq8tpIAWyvcfn/jXY29zBexbo2APpmou4ehztOLOr0XWSGAJ5HFd9o2r7iuWGMeteL5eIjqPcVt6RrjW7Ksh49a2spq6KUkz3a0uhIM5rQVsr2PuK850TXY5Qu1x9M119pehsc1k1YrlNkHPSlfpVeOZelS7w2OetS1fcjlMrX9Jh1iwkt7hByDtJ7HnFeH+I9Dl0a+kiZW2AnafUZP8AhX0KeTzgGue8WaEmsWLrtXzQPlb3wf8AGohL2b8gPErd+Qc8VsWkmStZupadNpl5JFKpBBI6cHk1NZyHA9utbyXVEs6O1f14rVtpOOv0rBtXPGa1bd849ayIsbduxOM1pwEYrGtmxz2rSjf5aBGgjc8VaiUGqUP51eiHSgRYEKY+YYNRmzjbkinqSeBzUy56CmkVcgWzVTx+dSCAKeefpUwx3pw+76U7DuRiPC89aeqjrjmlGSefwFSBeaZSFRdo96bJgg55Bp+femN1oSLIhDtH7vK57VYUEDnrTV7804EAf41RSFIycCk9c84oHQnNJ7DvTLQgzjNLgn3pQMj2pygdjnmlY0QHsKXvSE8k+lGQBziqRSBPvHmpB0zxUa4JOadyTgUFocFwCetKSc4pD2x2pCeeaZQ8MAOlMkfaBzik3EDOOtMc5PzflTHcep3DpxScD6U4HPPrUYB4zVIocGHelJyKYc5pQ2BSLQoOOD1pc8YpvakAyKChWPNKG44600kKaaWAOe1A0yQnGSaaWBGT2qNpo0++4A9zUD3cQyF+fPZeae5SZaBBX3oz2xVSOZ26Iw+tWUPtTKQpOOfelJ7+nSmE5GO9NOfYkUFIkbHPrULnAOO9PJNRSnipKI5GwDmqMxzU8z8GqFwxwT+VJhcrXT4BFY13JyQe1aFyxIOTWReMTnFSZyMy+l71zOpTZBHGa2r9+uDXN38nLHjiqRzyZiahIADzzWdYWUl/fRxICxdgBUt6xaTaBnJr1j4R+FdxGo3ScD7oIqpP2ceYzpw9pKx3PgPw/HoWjRx4HnOAzketdZEwB5GcVEBgYxilPHIrClF35md78i0pDcAe/wBKXpnI+tV1fbyKeZck7jjNdJKYrMOR2phbA5NMZuc96glmC9Dk+lJiuTNJwearST4ORmqstxtzWbd6gsYJZuaVyXNF64uMBsnGO9c9q+spBGxLcj3rD13xJHAGCvnHHWvOdZ12a7kYITtNUoX1ZzTrX0ibHiHxI9wzqrfL061yru87MWJx71Dy2XmOFrG1bVwAYrc4HrUyqX92BnZR1luW9T1aO1UxwkF/WuVvL1pSWc5NV55WkYknJqEgkeorPSJm25biuxY57U3Oc46UuzPTmlCHGf0qGwJEcgbRj0q0Ihs5IzVLaQal3kDaRmlYBHBzkjihZAsZBA570+SUbACKjZQUyOlADXfcgXHFO3ERbaYF+XJ6UqyDac9ulAEYGG54okJb7vNP3ZxxTGIxgCgBvBXnNNOD3p2Mgdu+aFXnnGKAI+/0p4cFcE0uAQahC5PHagCQHAPpTXYHAFObIT0qIj169qAAn5cY5pM44p3oaRcZHegQZz2pxyBRjBI5zTwBjOaBjSfWnRHnmmkZ6VJCpYjHSgAfJHShFOKtmDAyadBHl8HFFwsOtl2g7uvar1valyGY5HpUDYDgJg1q2q7rfAHIHJqWUiosW6Ty0HOa3LCzCSKpTc/oBWfpsLPdcA59a7jSbWO3dHPzSHtQ+xcVc3PDWmOEjeYbR2Fd5YQKHGR0rO0OzklUO/yg9BXUWtpgHJBxzxVU49zsjGyJII2RgUPBq2d2zkU+GIKAQDVgAEYYYrcqxnyIRyRxVK6Iwa1LgAA4rLuSCCM0GEzFvDycVkzHDHNat7jcaypF5pnLIh7k9qmhGcdKixg1PCOR3qGYs1bPoOBWrbg4FZVnzWtbnkVJzzNGHBAq5CeKpW56VciNBktyBG9eKkVhgD0rNhmBfrmrqOD06dKk6kyRxUYGDz0qTcM80u8dMdaZaGO4RfeoBPuHANLdqdvAPNV4gxOMYxQloXcVQTOc9alWAMTn8qd5ZDBqlXnpQ2UmV5rQKvv0waybzT0ljkVhyQf610T4PXBxWe7BpmBGBU2bLPL9X0lra9KjoehH41zniTTgbUkoxXGM46HmvVde04XOSn3xypHbr7VyOo2kz2k0UifMASOKiavZhY8hnsgQ3l8YrHvkaNMDmu1u7IW85MmQWJ4Nc5qUQVnBXOeR7U+pHQ5856c1LAjD5lycU4RsQx2mngFY1wOKokimmH0aq5bcQamnj5zUSoCQaAFlXgEUw5JxUqjrnpUbKcnBH1oAaJSYyhBo24xnPtTWwpBxxU8bBh79qAK5JHFKq/OCakP3iTxTHHAPX3oAWVznA70i9PmpoUk0/wBwaABGHPvSrgNkUirwT3p2QFx3NACSEnJHNMBOR6mg/dxTh0FAE27KAHr60qSMr/LnmoEyWx+VWoUAwz9QcUANkDFwcYJpJWKHJ61NKwJG0dO9QT5bn+dAhNytjH41ZKgqCDkVDHD8nI49aVVKn5vu0MBjArJuwDSSMrKoAOae4BUgGmqm0/MMehoASNdrDNTNtx6YpGG3bx9KZw5xg8mgCPdhsg8Vcs7+S2kBjYgDsTSw2XmEkcheoqvNCY+cECqUlsJq6O50zWIbyMJIQG9zVuaMjkEY9q85inaN8qSD7HFdPpGujiO55Hrn/wCvU8ri7xMJU7bHR2WpT2T5jLY9K7nw74vRmVJpArD1P/164IBJo98Lbh6CqksTA5UlW9RWsakamkhKTR9Eabq0VwimORWJ962YpwwBUivmrTfE19pTqHLFAeoz/jXoPhv4g2s+2O4cIT1yR7e9OVGS1jqWmnuevJIGHOOlPOCeeBXOafrMF2oaGVGUjsc1rR3Csg55HrXO49GOxheMfDseqWpkiQCZe+OvB9vevLbmxlsLhopFKsDjnvXu5dCBnvXM+KNBj1AGWFQJQPTr19qVOXL7r2IlHqeeWxOQCa1rY9wM1Sls5LWZo5lKsOnGM1atcjk/pVSRmzZtXDHHStGI54BrIgwehrRt9wxj8aRJrQNhcZq7Cx6DrWbC3A6/SrsRPFArF+LjvU64wPWqsZIPNWEYnJ7U0MlGc9qOv0pO3WnAA/4VQ0PUfWpB04NMX3+lKx20FoHbt3po64pOetOA6Uy0Lxmg80neng5GcUykI/AxSKOM5oY8UowF/pQWhSfanqABkVGDk09jxgdKCkBNIcEc1m3uqxWkojmDD0NLaalDcttUEVXK9yk0zRQ89akB5qFMZ4/WnIfmyTzSNESZ554NIenrUeT6UoPHcYplJgucnJ4o703cS2e9Ln6UxoeAAPrUYPOKeOlR00V1F98013VTzSjnio5IwTk/lQUHnDGScj2pjXDEnYjHHtUixrtHyjj2py8KeKCiq32iQjaFA96Ps8j/AH5GH+6cVa7c4oAzxTuxpIgS0jxhgW/3ualWKNT8igeuBT+c+lKuOvekzRDT17YpAcmnN1zTT1OCaaGhcZJPemnqachHrzTGHU9zQWJnuKjkJxinNx0qGQ9c8UmMrXBI44rPuHwDVyckg1mznk5pEso3Lkk88VlXLEkgVo3DZzgVQmGVJqWQzDvUwGJrlNSkwzBB3rrdSRmyFHHTisD7C9xdLGqkljxgVUFcwmr6EPhDw9Jq+qx7lJQHLelfQ2l2sdhaRwQgBEGOKwvB2hx6VYruUCVhycV0a4UYY8VnNupLyR0QioRsSkAc5+lRseppu8A+38qid89Oa1SC5IZCDj+VBkI5/SqjyANz1xVS5vljGWbkVXMQ2aE1xzwD0rPuLhV5Y4rntY8T2tnGxaZQfc151r3xADs0dqCxPfPFOMJS2MZ14xPQ9Y8QQWqMWkUEH1rzvXPGDTMy25P1rjrrUbu/YvO7YPbNRovy5PCj1pvkp+bMPeqblqe6numJkZsGq800Nom6QjPpmqd7qsdspSPlsdc1zV/ePO2WY1lKcpvXYG1HSJe1TWHnJVDhB2FZG8seT1qA5bPJp8Le2ahu2xnvqyUQk8npSOu0E08yY4HU024+bhOfWo3GRRPirLYEfAyfaqqIQamL4BA6n1oYETNtOc81JG696gc7eWOTSKcigETO6s1NZzgjoKj3ADPJozuBoAcM4pgHNKvoafIuEPvQAwkZOMVGO+abyDR60ASM4Apmcn2puMjoacCAMUAA7c/nRyDwRjNNYYxQORQA9uRj0pHwMADPvSZGaM89aAEOO1Nzz8oxSk/pQ3txQIMnPPNOJ47VGF9c1IF55oGIoJ+mKsW6ktjdgCogOP6U5CVBxkZ60AXJ5iAFXpjFRwuwORSRoXYKe9TCHEu1SaFZAWbdMuNxrobSIyQhQAqDqayhayQqhKk7j1rrNFst8aCUn1wKiUtLmkUT6LpeZd+PkBzn1rudH06LzBLMuSPugVR0m2UNl+ABhR0rq9FtA0gdiSOwpwTerN4RNnS0IA+Qity0BJwQMVBbxDACr+VacUYC55FdEUrHSkAOMjrSngc0j464qGWQ568VdhNjJ3ABA7VnTAMDirEs2Tjiq0n3TipehhJ3Me+GDWXKBnj8q1b5c9azZF9elHQ5ZlVuTUsI5A71GRg1ND1zUsyZqWuep6VqQZ4xWZa5yDitSEcUjmmXosEVdiHHSqUI9eKvQHHrQzJbnKQzfNnPNalvcAKK5mGbDda0beb1NS0bJm+smeTUqOCetZ9tJ5i9eavQAcUI1THyDIBxxUcijgrxU7EHioyMEccUkWNZcJkg4qMOQuTwKlc4HHSoyA69xjpTRSY4McDALZ/Sq8vyOdwxn2qZFKHt9TUd2pLAjnjmi+pRnyyABsjOeKyrvy3kJKDOMdK2xACOQTn1rK1O2kCkxDlecYptJ6DTOU8Q6FBPF5oQbh8wIA/wrzfXNLZXO2MkE46dK9oglSaIpKvzdGBrmfEumbnJiT5D3x0/SueScWVpLU8YMX2aYo4z6ioZ4tsm6MZXuK6nVtHbezbfmHU461XsLSOWGWOQgNjg1TkrXIsczOmEJx1rPIIPrWzdIyM0TjofSsx1AyO9UiWVHbLU0H34qSSPaoIqIjn60wCXGRg0gO0Zyc9qac4GalUAx0AM39j3qQdMHpUW0kmnZO3BFACk4z6UisMYNNLZHFCYJ5oAVyQeOlGSV9/SmMcU4dM9qAFDcc/yp8ZUnBOKj47807ZxmgCeLAcYxg1ZkX5cY+lUYwd/Bq7vGBk/WhgRlsqcDB96jL88809sEnb0NMUgEg0CHQT7flbke9XEYSDGAR61msOeDzViEkDg5FDQD3IVzkflULMScKOlLI5yT3qSNNybgMUANZiV+br2qKFmD5XtU5cFsECmMNjEDnNCAvWd6YXbKgA9QRVe9mE8nGOew4qMZIwaSKLL5J6UrK9wEeDaDkfjUCkp3xV1ssAjVG8Chcg4pqVgLum6vNauBuJX0/ya6q01C3vUHzBZPQ4rgGyD0x71NBPJEwKMePQ05RUjOUL7HezQAqdygg+1Zd3YMh8y3Zgw9KraZrxUKk/I9f8AJroIJYbld0TAg+/NKNSdLczs0Z2meJtT0iRR5sm1T0YnHb39q7/w/wDE5SES9ODxk/l7/WuHv9PjmVuMH6VgXemywElckdutdMa1Op8SGvI+mdI8YaffxqI7iPkDjIz2/wAa6OC9ilQFHXBr4/tb+6snBilkVh/tH/H2rqdG+IOoWRRZHLqpGc5Pp7+1TLDRl8DKu1ufQmtabDexlkA8wcggf/WrlGtHt5Crqc544rn9F+KcEm1bsBSfp7e9djZa/pWsoPKmj3kdMisnTnDdaGckuhFbofl9q1IAABjrTI7Zc/uyCD+NXIYDuGRWZFixboO47VdiTAHrUUSkdRVuIcc9adhEiD0FTKKYg9KkwfxNNILDhzx/KnqppFXjng1IoplJCgYXPao+rewp0pGCBTV9qaRQoPJ7Uv0pp5OO9OUbjgdKZSHAY57mlYDp+NAzu9qRzljikaIbj5jTm5IHekGfr9KdgA5NMpACM+2cUp659aaOM8UoGSM0FIr3VtDO2ZIlY+6imw2cUbblUA+1WT97mhjjHB/CjUtAp47CpF74FRL61MDgD0plIj53U5sY4600EZzR3ODTKQ3nOe9L2pO1HYU0NEmeARUefU0uTtOeaZ2pooePyo4+tNBO6jp0oKQq9CBQOmfwoB60LwM0FIUnB96b3yKRh60oyBz1pooXdnjilXpgHp1zUbeoGPwp+TtoLQrHAphH508/d+tNP6Uihh+vSl3DA56UEDJIGB7UmMEkUykNcnmq8pwPSp36Hio5Bn1pMdihNyDWVdZJPPFatyCD1rMuRnNSJmZM2MgVRlJdsCrdwPmOKqJxKAoyP5U0rmRdi0ky25cJkn0FWfDGhJHcvNMv3emRWtpOViBYYFWpLyKJf4VB/CpbaVipcq1NRZFAA7jtio5psqQRg5rmb3xLY2gYy3EYI7bq5TWfiXptsGEcquw7Ag1cKb6IwnXit2ekvdKoJ3Csu81i3gRi8yg9eteH6x8U7iYsLVNo9/8A9dcXqXivU74nfOwU9lJH9a29j/M7HPLEt/Cj3XXPH1jaBv3ylvQEV5xrnxFubosloSAfT/8AXXnqie6OZGdvcmtK1sQOvJ9aTlTpke/U3ZJcXl9qDlriVyD2yasW9uF5Pf1pwRYl+bAHqaoX2qJECIyCaxnXlPRFqEYbmnLPDbplyM9hWHqGsNIdqHAFZdzeSTMSScVUYlsdazStqyZTciZpHdySc/WmyA7eeKWGMueuKmng2oDnPvQ5E2K8ZH8VIfvfL1pzLgcHNMLc4AqQFDYPXmrKAADPWq6DHPFSSOWjGB0oAkkZf4SKrscnA4pyKWOSaa5AyB16UAQuOcHpUiYIoCZFNI280AOcD0phO3vSs3HFRM+ScUASq3NSjDjLHiqwYcc1IXG3AoAZIQGOOlMGc0H/ADmkzTAVs4wf0pq5PFDHI5py8dOtIAJwwNB/PNI1KPl680AL05P40pXd0/Kmsx3elSBhjFAEWMHFKQT2qTbkA05kYLntQBCoIP8AQ1Io3HnNJtOelOQMOgoAk24Az1NWYLYuOgquvLD1q9ayMDgcZ9aTuCJIbZsMRwR3qzpECy3PzsAB1NW32xWeFALt1NXNJsFWMNI3WpvoWlqaLKkrDy1DgcDAro9Js2hiDclz2qPw/p8eN2MovrXW6Zai4m+RMKvc0oroaxRJplk3yNLjJ7Yrr7G38oqVXg1Rs7QqybscV0dtGABxXQkdEFYmhA3DOPwq6gDKcVUA/wDrVPCGySa1SNESFRjmq0sG5uDirJbPB61HI204GKNRNGfLbhSSeapy9CM1oSk4P51n3Jz2qXqYT8jLvCMc1nP0PNXbw8ms5zyc0WOWTISKlh61F1NTQfexUszZq2nStSE8d6zLXtWnAeKRzTL0Q6Yq5C2R6VThPpVyPihoyW55bHOwbJrRtLn5uuazZYiDxzUSzGNxTTUth7HZWM2CPSt6BhtB71x+lXIYgZznvXU2swYDGDxUS3Noss9j78im9QeeaXORUO47gMjaaNzRMB1Pv605yAB7d6CoXJHWoXyQKW5SYM25sg9DSK+9SD16VGAeamgQKwx0qikx8I3AqR0qG6jUZy3OKsswV/eqtyp3Bj0qUrsq5g3dkyzb4lIJ61TvFwhWUdeK6kKCMlQao6rZq6Bgv4AU3roxo821qBYslV3Z9q5e7tRLETbLtkHJr1S70QXMLlfv44rhRplxZ3kizjg+1YuKV7D9TzbUIpC754esOfcX54Ndzq1gDfSLztzXO6lYmOT7vB71UWQ0ZGCwx6VEYW6nrV/ywnQZpsu1owR24piKHl84IwPek2E8CrD9vSm8HgUwIDkdRUTkY4zVp05x2qN4+elFwIUwc80jcdKcw2n3pp5oAaWzjNSA4AFM2HPH508ggUAAIJIP6VYEgEe0jIFV14HvS7sdaLASAgH0FSMDjFRRkE1MzYHI5FADBwT14pjEk09nBB9ahY4P1oAcmS49KsfdBzTIV+bP6VKWx8uKAExnnv6U4EgEUbTyR1qWFQcbuvegQ1oS3OPm7mmBypG8fXitQNGIyGIBNZ90UBIHakmBJI8ZAK4FVpJMPkcVCpI9805TzzzTsBZgnJbpk0+dlYDAwarx/uznIpzuWxnrSsBVkBLEYp4X5cA8+9TCLq3FRNwfrTuBECQ3oau2l7NbsCjnr61AqBvr60MuAOearmBpM6vT9bSTak5w3rWruinTjayn0rz4MVI61cs9RlgwN5xnkGs5U09UZuHY6i80uKYFogM+341g3mmyxMSvIrUs9XSQDecN39K0PNSZeQGz3FJTnASbRxbq8bdCKu6dqU9nKrxTMpHYMRW5c2EUoJA59qx7jTHQkpzXTTr33Hoz1rwJ45SXZBeOA3ADE/8A169c0+6guEDRMDx2r5CiaW2kBUspHevSPBHjqS1kSG8clRwCT/8AXq6lBT96BlKNj6FWMHkdKmQZxgcetc9omuw30CssqlSOxrfibzMbcEe1c22jIsTBTj0A71Ii4ALf/qpUHAJp3DHGeKdykKPWgnaM0nGcZprksRjkU0hiZ3EUvT2JoA780ZxVFJBn881JGODzzTAOOTTx0yOlDKSHDABpvbihunH1pGByAPrSLQq5zQx5INKvLEmmnqcUF9BfTNL6mmpk53Up+UUxoQctyaDwSTRSMeKCkgXpink4WmL36U9jxjrTKQwE+1KT06fSk7mgGgpAevSjtQ3UdKTqQPTvTLQ8HjHao9x3Y7UoYEY9ec0wcnPpTSGSdMZo70gABpO3oaCh3NCmmjOME8etCdDQUhT1/WlHPPemsM9acuD0/GmNASB7UqnKnnimOelOU8cUFoVe4PGeBSMOMZoztIwMipMZ6UilqM7/AEpG5zmn4GMdKXblSOlBaIGXk1Gcen4VZZOvtUMi8HI/KgZSuEyM/pWTdR46VuSr06Vl3rbVJOBUNgzAukI6nisi+1qy0iJnuWG4DgU3xZr0Om27szjdjge9eF+Idfn1G6eSRjszwK2p07rmexwVq/K+WO56Jq3xUeNTHaxHjp09/euO1P4g6pdk4l2D/wDX71xc8zOSc8VGuSenNN1Ix2RyO8t2ad5rV3dMWlnkOfc1SaZ3bqSfenQWjyc4wDWnbWAGABk1nLEMqNIz4YHl9RWlbWQHJHNX4rYIMtiia5hizlhx2rB1JS2NVBR3HRRgDgDHrTbi+it165YVlXWpO5Ii4FZkheQksSaFC+rE6n8pdu9RknYjOB6VRJLA560ir+lJ6jNVe2xkI3Wnqh+tA2jgmrChVTI61LYyMbUYVK8ofAPSoQhLE4NNYlevWiw7jpyo4TpURUAZJ5puTnrzSck80CFPSng9KjcnApQfl96AJgyoDzmmBSx46UzHembjnGeKAJzgdaglfrjpTS5OaaBk80ALnt60ypcccU11AAoAYpJ4pTSjjpSgZoAYevWmk88cVK4xUY4JoEJjHel+7R1OT1p2OKBiDnqaeVGaaOMelP3Y5NAAUz1oC4PpTg49KOT2oAlgUF+elWZSrKFjHSoIojjPOKswAIdx+9SArmPa3PWniLAG6niTfLk1Y2k/NjP4UAVCQ0nAxWhZWjO24D5V71Xji3TZrdsYpLkrBCOvWlJ2HFEVnBLcTqqoWFdvpWjl3G/lF7CmQWsemWaoiBpcYPFdRosEi2ymUYZvbpU3ctUbRSWhJptkJnMaLtQd667SYo4YxFjkfrVDS7cRrhV575rodPhAIIArZRN4IfHCAd3Q9cVfhBOCKUQAkHHFTRIRkAfjWqNErEsaAsTnmraAbTkc1UhBGSe/aplIHGaqxSFZDuz/ACqGZB19KsZJPvSTKNpz6U2D2MyZtoIxWbdvkEAdav3JIYgVl3CkE1NjmmZd0etUG5/pV+6HX+lUR1ORQc0txgHPJqWJeRjqKZ0bAqeIYOahmcjRtumD1rSh+6DWdbDAHrWnCOBxQjmmXIO5q7Fgjk1Sizjp1q7ERjmmzJbnmjMGNULxQDkcA96keQbzjiqN1Mc4rCndMto09KuMMMkV2WnyblHI5rzvTZCZhg8HtXfaPzGOK2qaIFozXRietKvpnj6Ug+6KQkqeKyTNEySQHbxUW7I57VITkYPWoUHzkHp60y0SIoZcnpT1+UheophG08GlQ7mz3oLTCUHdu7EYqM/OCD1qyuDRsVeelNMaIY4yODSXIyCMDAqZ+nXBqIgsPc0y7lNotoytcvqlsJpHJUBh04rtJYsr8vasDUodkgZh8vrU8qY7nl2uWYS9Ylcetc1rtnugEkXOOtep67piXO1k+9XKapprOrqOMcY9axasG55WwIkwRxVa5BQkrx9K6PU7Aw3GwptJNZlxaEcAZrRNMmxkxqzREkU3Zz6cVq20aBSrjk1TuF2MVHQUX1EUXyCMUNICOetD4L/SmOozkd+1MBjtkZHNEY54pQnegDt3oAcRimtyB60pY96iJP4UASDAprnjim8+tGD3oAdHwasHGMnmq+CBxT9xAxzQA75Ryabs55oPIFSxkk80ANycZGTiph80ZY9aacAYxg5oDcYFAChypp6NhuKRgCB0zURJ4HSgRYZ9w5PNRSqcZ60Rjdk56U53ZVxQBX3HGBzQpwcHrUgQjkjg05EDHk0DGgFjipFToTTVJV808SHccdu1AhzOMHjioiBjk1NHhjgiiSMdDSAjQ8HGOlMKNnPehuuVPWnqRjDdaYERDHtzSFSvWp2I2cdaikcEc0XGMWQg5U81etNSkhIGfzqgq5HWmHiqvclq51tnqkcgUOcGtHcsi8YYe1cIjsv3c+1Xra/lhIwxFQ6fYlxOmmto3HIBqlJYFSTF1os9ZjYATfnWpG8M4zGwqoVJU2TYt+GfEN3pE6q5Yx59a9p8MeLbe8iTDj35rwl48HDCrFhdS2MgaGQrz0rqbhWWu5DVj6ktbxJ1XDe9W9/TuOleJ+G/GLLtSdjn/wDXXo+m67FcLy4Oa55U5QZSszpOp9qcBjBzmq0NwrruBBFWUcHr3pXuWoigUhBHNSLg0bc00x8omM/jTgML7fWnAcUEZGKCrDByfxpD98+gFO6CgDHNMpICMVExznHapWqLjnFCHYUAFSeeaG/u0uOOtNHLfSmNLUUnHFI3Q05uGxUchz6UyrDgflzmnk8DAqLoBT1PA96CkhR6Gm55pNw9T70dScGmMQsNxPUU4HnHamLyR+tPHQcUFJB1HXimDrxTm+7jPApg4YetCKHZ+bpRnnijPcnAoI5wKChPrTo/p+NM6fWnqO1MYhb0pyE8/Wmjng05OM+1BSFYdTTlA9eKXaOx4oApFIRhzkdKVeBkdKf1oA4I70FAenAoHAoA6UHAAo0GmNY8n2qJjkH9BT5GUHkis3UNQigQlnAx0oE5Jai3cwijLEgYrzzxj4mj0+CQs43dhVfxl40itEdIn3Pjsa8T1zU7rVbhnkZiCfWtYUftTOKtiG/dgQ+I9dn1S6dnY7ewzWAQ7nitBbYZ+bmrMVrgcACoq1uiMIU3uZkNkzY3Vft7JRjirqxxQgGRsEVDPqcUYIiArku5bGtox3LUVqqjLnApJruC3XAPNYk2oSSE8kCqU8pJ5P1pqn3E6vY1LvVGkGE4FZskrMSSSfrVcMSeaUscYAq9FsZu73J0Oe1SMVVeBzUEBIqWQ4+719aQEb8jik6Yx0pxHAFIQAOvNIAUZcccVYQY57VDEQCcjipVZduB3oAVpCFOOlV3cu1WTHlCScDFMt4Qx5IA70XArZweachBOB1qW6Vd2EqNVIXPegAlXiouegqRjximhsL70ARkkkCkxzQDycUpxn3oAY2e/wCVPUVG780gY49qAHlueKM5xmmN0FANADiM9KBkd+acMd6QnngGgBpb160gB/AUcnr3py9xQAw8HjpSg5OaU+tIAe1ADuPSjOee1PC8c9aOAfagAVQOtTRgZ56VF+lSKMDJNAF5JI1UAdBTpZRJhVAHvVBclutXbVctgDmi1tQWoyOMCTkVswJGIhhTTbSyM0qjHfmtm5WIbYolBZfapcruxSVkYi2TyXA2dTXX6NaNbsscSFnI5NR6HY+STNKMueme1dvoFtGy+YVyalts0hETStKMr75xnHIyK6azs8oMDn+VSwRoEAHUdK1LNAXGFrSMXubpIgtLVkODWtZqEUetSwwjbnGKRkLMB3raOpdrbGhGw2DipI/aoIw3lqDUynC4qki7kgUZJA5oVTv5pgyelSrV7D3FyFIpHYYI7U8gcnIqGTgHmpYnoUrk5J45rLuQMYrQnJ5OKz5sYyaTOeZl3QBOapMoUnHFX7j5m57dqqSDAxSOeRUPX3qxCO2KhYZfAqeHg1JkzSthnFaUQxj1rMgPygmtOE8ZoRzTLcRzirsPQCqUQ7irkdNmKPIZM7TiqhUuTxWqYCQKfFZkkHHJ71C0NHIqaZbHzAcd673S12xD1rCsLLDA/jXRWybAMVMpX0JvqXAMHk0kg5FIDwBSZ7VKZaYqn5h2qXHXHSoenTrS7j7iqLTHN3wTTYuCSaUZ9cmhVOeOlO5aJI+GOelPkJKkjp0qMDnmgkkY6UFpkbuc4p65HOODTCpJ6VMOV5GOKbGmDHCk54NZ9zEJ1ZW6GtDGQSelQSx5PTApIq5zZtWSVk6j1rJ8QaPL5HmwcsOvFdm1uNxOOaJY9yYIGBSn3GjxLWrKS7QEJ+8WuRZdkrRzrtf0r3a80XdK0irjPbFeeeJ9AYXxdkPPQ4/z6VktHYGjz29j53L2rMn+YZHXvXU6ppkiE4GBXPXVuUPIzVolmaqgEkimPHzxVlojmkK4HTNMCAIAMEUyRQM1YKg8g1G45pAVSpzTSMnjpUzLxTStMCPHNOBFKBQRigBQcUcYpO1HQ0ALyKduxgjr60zNKBk0AKSWbJNSL1qMCnduetAEj/d6/hULNmnl8jB61EwBoQDopCD1q2F8wgnpVFRzViNscUMC4ANhDVUm4Jx0qUykgdDUMrZNJCGAkgE5yKdznNNUcc9KlTkEUxjgxVs5qQyBl5qGnr1x0FAhpQjmjdzUu0YqMrnFACt82OOtRuuOcYxVpIyE5pGRTSApoMNyKJEwferAjweBmnOuRnFFxlVAAfanMV2j1odCB7UzBJA5pgOA461NFdSwt8rEU1SPoaY6mqTE0bVnrbqMSjPvWtb31vN3AJri+lSpIVPUimkuhDid3HnO6J81tabrdzZuOSR6V5za6hLEepNa1tq+7Ac1tGb2epDiez6T40QYEjAf5Ndhpvi21lABkBNfO8V6rjINTR37xnKsR+NPkpyGnJH1Ha6vbTr8sig9+aupdRN0dTmvluHW7lPu3DCtiw8WX0RH+kEjpS+r9mUqj6o+k1YHnr9KdkH3JrxXSfHk6lRI+6uw07xtFIAHIqXSnE0U4s7np060E5GDWFb+IraX+Ic1ej1K3kHD1G25orPYuN/KkAHXqaYs8bgYINP3DrTuilED2HYUKM5zS8etKPXNFx2EI5FQyKQTirBwaaxBoRVisd/HAxT4t2BnpUhIx2pFO3ApphYaDjnvQOjHt0pWIINIOnJp3Q7CJkY4qQdKQemacpGMUmy0hj8DH4U0DmpG+Y4zTR256U7jSDaM/rQxx3pwI654pHwetIdhg9KevTgUi9qcOvp+NNsaAc59aXbj3pNwHpSNKqnIIxSchomHSlPoe1U5L+CLJaVfzrL1DxTp9qp3yihXewnOMdzfyByc9Kgub6C3XdI6gCvMtb+JcESsttkmvNde8bX+oOQspRfTNaxoyfxaGMsSvsnuGseOtN08Nl1Jrgta+Lx3stmmfevHri5eZiZHZj7moC+BnOK0/dU/Mwc6k92d1d/ErXLhv3cmwH2rKu/F+rXQIuLknPYDFcxu3d6XzETqc1DxKWyFyd2W555bhyWJJPUmoCn96q0t8q8LVKW8Zz14rGdWUxrljsaTzRxgliDVOfUyAVjFZ0khYnJqIjNZcqBzbJZriSX7zVD35opfr0p3JHDkZFMKgmlzj6UHpSuA3AzUgXPSmDHrTlOKQDtoA461JGB1JqPPHHSlUHAoAHbDfSoict7U6Xjp+tMHSgB/TpSFiDkUKeKawA5zQBIZmK4ycUqMV6GocipA4AoAeSc+9Rk4PWkZyRmoy/egB7HAqJmNBORn1pKYC5/OkLZ9qO9GN2cUgGnnNA4FL3wKUYzigBB156Up9qTmgHuaAFz8tAFAwaXGKAFzgUh6ilHuaQ8daAALxmnYwB60A8U4DLUAIFJFPWMnipAtTQoQTxSuBCUwBgUuw5/pU5xu4p0SFnyBmgCJV2/WtGygY4K8miG182YZ/Gt6ytFiyU6ngUmykhtqhRRn71bOn6c0zglfmNWNJ0uV2V2UkHpXZ6bprRBfkAPtS9DSMOpnWeiOVVSeWrr9H0xbW2C/xU6wt2D5K8it62g+TvmrjF7G0Yle0tsEZ5atOGEKc4wakiiCAcc1YVSW6ZFbJGiQKvPWniP5snFOAz2p6rxTSKQ5fQ0pAwfWlC4PtSt1xVoYi8HnpT93OMUzvxwKUn1psB+eKq3DkZ5qZjxxVW455pEyZVnNULjNXJeRVScfnUmEjOlGT3zVeQfLzVyQc+lVpQcHPNIwkUGHOBU8XOO1ROOaljwGGelJmLNC3JAGa0Ys4HFZ1vjbWlD0H0pHNMuQnjHvV2KqMJq7F+lNmUdzhkthgVZit1AHSrSwc4P86nEQHbJrmchEUEQVs4xir0XSo1GMYp4yBx+VG40SA8UmSBSE9+9A5+lUjRD0OTS9TUecEU4c9eKopMeuMj0p45IxUaAnmnL1pFpj2J6GgUMOh6CjtyKEVccNoGB1oP3cd6Ewe2MUuATnnJp3LQoI2gCkYdMUjfex2pc8D3p7DuIVPHFMZfUdam/iwORSHHU9qRSKckQPGKxtVsFljIKA/hXQuAScdKryIG68iplEo8t1vRVKttFcFq2l+Wx4r3bUrESKSMH/ACPeuL1fSyysSox/n3pJktHj01sQTkc1QkhIzxmu11TTmR2GP1rnbm3weasRiuuM1G3TmtGaEDvVV4xnrSApsPTpTSBjFTMpzimMKAGAetMbrzUmPemjnrQBGQeOtIcmnkY70nagBnsacO2aMc/Wlxg0AAznFO9j0pCKKAA03GTmloxQAgHSnq2etJ9etLjIzjFAD88U1uT1pCeaXvQAoPt+FSZ/CmDOcelOIyPpQAmTnnFOVsHqKaPzoIxj3oAsA56U0Zz82KjBIIwadk4yaBFpXUjjrQSB9aq7iKeHOM0WAsooPNJKhHNQq5FTK24YOealoZXlqIDODU064J54quTg8VSEOI5waaTz605GJ4NBHPFIZG3PJB5oUY4pSOc04c5p3AQHnGeakRyOQaZjHHalxj6VSkKxaiumQjDHHpVyK9GMGsjvTlYnNVzCsbizA/danCV1OQxx9axBIR3OalS5YVSqNCsbkd5IpGCePQ1oW+ryR4+Y1zK3We3NTC4X1rVVu4uU7ix8Syxn/WEYrdsvGMiYzIeK8tWbPQ1IJmB61XNF7is0e0WvjZxj94PwrUg8csMDf+teFJdSKBz+tTLfyA4JNHLBlKUke/Q+Nl/ibNXYvGKHHIOK+fU1WQdGNWo9blUEhjU+yiUqkkfQS+L4zyePxqQeKovWvA08QPg5Y1YTxCwOcmp9kUqrPdG8SxMeCBTT4jjz1NeLL4hPqfapB4g56nn60ezH7U9l/wCEjj9eKd/wkkY968a/4SD1Y4pTr/X5jR7Maqnso8Rx/jR/wkae2a8ZbX+eGPNNbxAR/EcUeyH7Y9mPiNR0PFJ/wkyDqR9K8XbxFgHDGo28RNz8xo9kHtme1jxRGCeRxUUni2Jf4q8Rk8RsecmqsviGRs8mmqQe3Z7hJ4yTPDA1Tn8a44DAV4jJrkv941XfWJXB+Ymn7KIvbSPZLrxxIucSCsO98dzkELJ+teXSajIwwSfzqs107dz+dUowRDnJnc33i+5lB/fEVz95rEsxJaVjz61gPMe5qJpfeh1EthcvcvTXe7r1qs8x9s1VeYCommrKVRspaFsyn1phmweTVJ5ielRF2JrJu4+YuPcHtVd5iT1JqHJpeeM80hCliaQHNCjmnChsQjAU085wKU00ZBIpXGN9aO49KdjvTggNIBmOeKCOhqZV60w/eoAYBSgHNOC54py0AJgdTR5nFOYflTT34oAjY5PNM705854pg5PvQA/IxxTCaOe1IRigAJNJk96OtNPBoAUnNGOODTcUvI6UwE7AdKUdaQg7qeOg70gF6dqOKKCSeo70AMoxznmnH0pO/NADD70o60EZGaVQfxoAUZ7Zp2MjmkApQOOKAEIoIp2KMZNAAOnSpFGDSDtUirwaAJY+RU7EhcUyEDb71OsYbn8qVgGQRb26Vq2toSen6UafbBsciun0uwVsA4oZaRnWWnFnG1evpXWaR4fLbWcVqaPpaggkCuusLNRjOKTZrGCKOn6UEQDb0rbt7PC/1q7BCqrwPwq9CgHQfhVRia2K9pahCMj3rRij65HHaiMDPGKmXjGRWti0AXHHfFPQelJt5BzUiD86pIaDpTwaQ9aVOfoKuwx4+gNAznnpQDnPvS9qaHcQjn2pp6c04/rSelMBjdM1Xl6HFWSOKrTcqRipZEilLiqkwOKtyfSqk2fTNIxkVH7ZNVpjVhz6iqkpOKlowkVH4bHrzT4+ue1MkxnJHFSR/hQzBmhb8gVoRHKjms63NX4SR0qTnkXoj2OfpVyI5z6VRgIBOTV2LmqZmkf/2Q=="}} diff --git a/models/research/maskgan/README.md b/models/research/maskgan/README.md new file mode 100644 index 0000000000000000000000000000000000000000..10ee8a4c4dd546983469b07e2fb8207fc200534d --- /dev/null +++ b/models/research/maskgan/README.md @@ -0,0 +1,111 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# MaskGAN: Better Text Generation via Filling in the ______ + +Code for [*MaskGAN: Better Text Generation via Filling in the +______*](https://arxiv.org/abs/1801.07736) published at ICLR 2018. + +## Requirements + +* TensorFlow >= v1.5 + +## Instructions + +Warning: The open-source version of this code is still in the process of being +tested. Pretraining may not work correctly. + +For training on PTB: + +1. Follow instructions here ([Tensorflow RNN Language Model Tutorial](https://www.tensorflow.org/tutorials/sequences/recurrent)) to train a language model on PTB dataset. +Copy PTB data downloaded from the above tensorflow RNN tutorial to folder "/tmp/ptb". It should contain following three files: ptb.train.txt, ptb.test.txt, ptb.valid.txt +Make folder /tmp/pretrain-lm and copy checkpoints from above Tensorflow RNN tutorial under this folder. + + +2. Run MaskGAN in MLE pretraining mode. If step 1 was not run*, set +`language_model_ckpt_dir` to empty. + +```bash +python train_mask_gan.py \ + --data_dir='/tmp/ptb' \ + --batch_size=20 \ + --sequence_length=20 \ + --base_directory='/tmp/maskGAN' \ + --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2,dis_num_layers=2,gen_learning_rate=0.00074876,dis_learning_rate=5e-4,baseline_decay=0.99,dis_train_iterations=1,gen_learning_rate_decay=0.95" \ + --mode='TRAIN' \ + --max_steps=100000 \ + --language_model_ckpt_dir=/tmp/pretrain-lm/ \ + --generator_model='seq2seq_vd' \ + --discriminator_model='rnn_zaremba' \ + --is_present_rate=0.5 \ + --summaries_every=10 \ + --print_every=250 \ + --max_num_to_print=3 \ + --gen_training_strategy=cross_entropy \ + --seq2seq_share_embedding +``` + +3. Run MaskGAN in GAN mode. If step 2 was not run, set `maskgan_ckpt` to empty. +```bash +python train_mask_gan.py \ + --data_dir='/tmp/ptb' \ + --batch_size=128 \ + --sequence_length=20 \ + --base_directory='/tmp/maskGAN' \ + --mask_strategy=contiguous \ + --maskgan_ckpt='/tmp/maskGAN' \ + --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2,dis_num_layers=2,gen_learning_rate=0.000038877,gen_learning_rate_decay=1.0,gen_full_learning_rate_steps=2000000,gen_vd_keep_prob=0.33971,rl_discount_rate=0.89072,dis_learning_rate=5e-4,baseline_decay=0.99,dis_train_iterations=2,dis_pretrain_learning_rate=0.005,critic_learning_rate=5.1761e-7,dis_vd_keep_prob=0.71940" \ + --mode='TRAIN' \ + --max_steps=100000 \ + --generator_model='seq2seq_vd' \ + --discriminator_model='seq2seq_vd' \ + --is_present_rate=0.5 \ + --summaries_every=250 \ + --print_every=250 \ + --max_num_to_print=3 \ + --gen_training_strategy='reinforce' \ + --seq2seq_share_embedding=true \ + --baseline_method=critic \ + --attention_option=luong +``` + +4. Generate samples: +```bash +python generate_samples.py \ + --data_dir /tmp/ptb/ \ + --data_set=ptb \ + --batch_size=256 \ + --sequence_length=20 \ + --base_directory /tmp/imdbsample/ \ + --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2,gen_vd_keep_prob=0.33971" \ + --generator_model=seq2seq_vd \ + --discriminator_model=seq2seq_vd \ + --is_present_rate=0.0 \ + --maskgan_ckpt=/tmp/maskGAN \ + --seq2seq_share_embedding=True \ + --dis_share_embedding=True \ + --attention_option=luong \ + --mask_strategy=contiguous \ + --baseline_method=critic \ + --number_epochs=4 +``` + + +* While trying to run Step 2, the following error appears: + NotFoundError (see above for traceback): Restoring from checkpoint failed. This is most likely due to a Variable name or other graph key that is missing from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. Original error: + + Key critic/rnn/biases not found in checkpoint + [[node save/RestoreV2 (defined at train_mask_gan.py:431) ]] + + This is an issue with seq2seq model because it uses the attention mechanism. + The issue arises if you saved the model with an earlier version (seq2seq is old) and restore with a recent one (saver.restore got updated). + The naming convention for LSTM parameters changed, e.g. cell_0/basic_lstm_cell/weights became cell_0/basic_lstm_cell/kernel. + Which is why you cannot restore them if you try to restore old checkpoints with recent TF. + The below script will help rename the variables and everything will work as expected. + https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py + +## Contact for Issues + +* Liam Fedus, @liamb315 +* Andrew M. Dai, @a-dai diff --git a/models/research/maskgan/data/__init__.py b/models/research/maskgan/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/maskgan/data/imdb_loader.py b/models/research/maskgan/data/imdb_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..8169b3336b4ac0e1a36e35dbaed4c01f38f1ec02 --- /dev/null +++ b/models/research/maskgan/data/imdb_loader.py @@ -0,0 +1,136 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""IMDB data loader and helpers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +# Dependency imports +import numpy as np + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS +tf.app.flags.DEFINE_boolean('prefix_label', True, 'Vocabulary file.') + +np.set_printoptions(precision=3) +np.set_printoptions(suppress=True) + +EOS_INDEX = 88892 + + +def _read_words(filename, use_prefix=True): + all_words = [] + sequence_example = tf.train.SequenceExample() + for r in tf.python_io.tf_record_iterator(filename): + sequence_example.ParseFromString(r) + + if FLAGS.prefix_label and use_prefix: + label = sequence_example.context.feature['class'].int64_list.value[0] + review_words = [EOS_INDEX + 1 + label] + else: + review_words = [] + review_words.extend([ + f.int64_list.value[0] + for f in sequence_example.feature_lists.feature_list['token_id'].feature + ]) + all_words.append(review_words) + return all_words + + +def build_vocab(vocab_file): + word_to_id = {} + + with tf.gfile.GFile(vocab_file, 'r') as f: + index = 0 + for word in f: + word_to_id[word.strip()] = index + index += 1 + word_to_id[''] = EOS_INDEX + + return word_to_id + + +def imdb_raw_data(data_path=None): + """Load IMDB raw data from data directory "data_path". + Reads IMDB tf record files containing integer ids, + and performs mini-batching of the inputs. + Args: + data_path: string path to the directory where simple-examples.tgz has + been extracted. + Returns: + tuple (train_data, valid_data) + where each of the data objects can be passed to IMDBIterator. + """ + + train_path = os.path.join(data_path, 'train_lm.tfrecords') + valid_path = os.path.join(data_path, 'test_lm.tfrecords') + + train_data = _read_words(train_path) + valid_data = _read_words(valid_path) + return train_data, valid_data + + +def imdb_iterator(raw_data, batch_size, num_steps, epoch_size_override=None): + """Iterate on the raw IMDB data. + + This generates batch_size pointers into the raw IMDB data, and allows + minibatch iteration along these pointers. + + Args: + raw_data: one of the raw data outputs from imdb_raw_data. + batch_size: int, the batch size. + num_steps: int, the number of unrolls. + + Yields: + Pairs of the batched data, each a matrix of shape [batch_size, num_steps]. + The second element of the tuple is the same data time-shifted to the + right by one. The third is a set of weights with 1 indicating a word was + present and 0 not. + + Raises: + ValueError: if batch_size or num_steps are too high. + """ + del epoch_size_override + data_len = len(raw_data) + num_batches = data_len // batch_size - 1 + + for batch in range(num_batches): + x = np.zeros([batch_size, num_steps], dtype=np.int32) + y = np.zeros([batch_size, num_steps], dtype=np.int32) + w = np.zeros([batch_size, num_steps], dtype=np.float) + + for i in range(batch_size): + data_index = batch * batch_size + i + example = raw_data[data_index] + + if len(example) > num_steps: + final_x = example[:num_steps] + final_y = example[1:(num_steps + 1)] + w[i] = 1 + + else: + to_fill_in = num_steps - len(example) + final_x = example + [EOS_INDEX] * to_fill_in + final_y = final_x[1:] + [EOS_INDEX] + w[i] = [1] * len(example) + [0] * to_fill_in + + x[i] = final_x + y[i] = final_y + + yield (x, y, w) diff --git a/models/research/maskgan/data/ptb_loader.py b/models/research/maskgan/data/ptb_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..43105952a667f968faf12a4561f85964f0a123ae --- /dev/null +++ b/models/research/maskgan/data/ptb_loader.py @@ -0,0 +1,123 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""PTB data loader and helpers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +# Dependency imports +import numpy as np + +import tensorflow as tf + +EOS_INDEX = 0 + + +def _read_words(filename): + with tf.gfile.GFile(filename, "r") as f: + return f.read().decode("utf-8").replace("\n", "").split() + + +def build_vocab(filename): + data = _read_words(filename) + + counter = collections.Counter(data) + count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) + + words, _ = list(zip(*count_pairs)) + word_to_id = dict(zip(words, range(len(words)))) + print(":", word_to_id[""]) + global EOS_INDEX + EOS_INDEX = word_to_id[""] + + return word_to_id + + +def _file_to_word_ids(filename, word_to_id): + data = _read_words(filename) + return [word_to_id[word] for word in data if word in word_to_id] + + +def ptb_raw_data(data_path=None): + """Load PTB raw data from data directory "data_path". + Reads PTB text files, converts strings to integer ids, + and performs mini-batching of the inputs. + The PTB dataset comes from Tomas Mikolov's webpage: + http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz + Args: + data_path: string path to the directory where simple-examples.tgz has + been extracted. + Returns: + tuple (train_data, valid_data, test_data, vocabulary) + where each of the data objects can be passed to PTBIterator. + """ + + train_path = os.path.join(data_path, "ptb.train.txt") + valid_path = os.path.join(data_path, "ptb.valid.txt") + test_path = os.path.join(data_path, "ptb.test.txt") + + word_to_id = build_vocab(train_path) + train_data = _file_to_word_ids(train_path, word_to_id) + valid_data = _file_to_word_ids(valid_path, word_to_id) + test_data = _file_to_word_ids(test_path, word_to_id) + vocabulary = len(word_to_id) + return train_data, valid_data, test_data, vocabulary + + +def ptb_iterator(raw_data, batch_size, num_steps, epoch_size_override=None): + """Iterate on the raw PTB data. + + This generates batch_size pointers into the raw PTB data, and allows + minibatch iteration along these pointers. + + Args: + raw_data: one of the raw data outputs from ptb_raw_data. + batch_size: int, the batch size. + num_steps: int, the number of unrolls. + + Yields: + Pairs of the batched data, each a matrix of shape [batch_size, num_steps]. + The second element of the tuple is the same data time-shifted to the + right by one. + + Raises: + ValueError: if batch_size or num_steps are too high. + """ + raw_data = np.array(raw_data, dtype=np.int32) + + data_len = len(raw_data) + batch_len = data_len // batch_size + data = np.full([batch_size, batch_len], EOS_INDEX, dtype=np.int32) + for i in range(batch_size): + data[i] = raw_data[batch_len * i:batch_len * (i + 1)] + + if epoch_size_override: + epoch_size = epoch_size_override + else: + epoch_size = (batch_len - 1) // num_steps + + if epoch_size == 0: + raise ValueError("epoch_size == 0, decrease batch_size or num_steps") + + # print("Number of batches per epoch: %d" % epoch_size) + for i in range(epoch_size): + x = data[:, i * num_steps:(i + 1) * num_steps] + y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1] + w = np.ones_like(x) + yield (x, y, w) diff --git a/models/research/maskgan/generate_samples.py b/models/research/maskgan/generate_samples.py new file mode 100644 index 0000000000000000000000000000000000000000..d4215ebc75a074b316010eb60189bf7428dfcfc5 --- /dev/null +++ b/models/research/maskgan/generate_samples.py @@ -0,0 +1,281 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generate samples from the MaskGAN. + +Launch command: + python generate_samples.py + --data_dir=/tmp/data/imdb --data_set=imdb + --batch_size=256 --sequence_length=20 --base_directory=/tmp/imdb + --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2, + gen_vd_keep_prob=1.0" --generator_model=seq2seq_vd + --discriminator_model=seq2seq_vd --is_present_rate=0.5 + --maskgan_ckpt=/tmp/model.ckpt-45494 + --seq2seq_share_embedding=True --dis_share_embedding=True + --attention_option=luong --mask_strategy=contiguous --baseline_method=critic + --number_epochs=4 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from functools import partial +import os +# Dependency imports + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import train_mask_gan +from data import imdb_loader +from data import ptb_loader + +# Data. +from model_utils import helper +from model_utils import model_utils + +SAMPLE_TRAIN = 'TRAIN' +SAMPLE_VALIDATION = 'VALIDATION' + +## Sample Generation. +## Binary and setup FLAGS. +tf.app.flags.DEFINE_enum('sample_mode', 'TRAIN', + [SAMPLE_TRAIN, SAMPLE_VALIDATION], + 'Dataset to sample from.') +tf.app.flags.DEFINE_string('output_path', '/tmp', 'Model output directory.') +tf.app.flags.DEFINE_boolean( + 'output_masked_logs', False, + 'Whether to display for human evaluation (show masking).') +tf.app.flags.DEFINE_integer('number_epochs', 1, + 'The number of epochs to produce.') + +FLAGS = tf.app.flags.FLAGS + + +def get_iterator(data): + """Return the data iterator.""" + if FLAGS.data_set == 'ptb': + iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length, + FLAGS.epoch_size_override) + elif FLAGS.data_set == 'imdb': + iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length) + return iterator + + +def convert_to_human_readable(id_to_word, arr, p, max_num_to_print): + """Convert a np.array of indices into words using id_to_word dictionary. + Return max_num_to_print results. + """ + + assert arr.ndim == 2 + + samples = [] + for sequence_id in xrange(min(len(arr), max_num_to_print)): + sample = [] + for i, index in enumerate(arr[sequence_id, :]): + if p[sequence_id, i] == 1: + sample.append(str(id_to_word[index])) + else: + sample.append('*' + str(id_to_word[index])) + buffer_str = ' '.join(sample) + samples.append(buffer_str) + return samples + + +def write_unmasked_log(log, id_to_word, sequence_eval): + """Helper function for logging evaluated sequences without mask.""" + indices_arr = np.asarray(sequence_eval) + samples = helper.convert_to_human_readable(id_to_word, indices_arr, + FLAGS.batch_size) + for sample in samples: + log.write(sample + '\n') + log.flush() + return samples + + +def write_masked_log(log, id_to_word, sequence_eval, present_eval): + indices_arr = np.asarray(sequence_eval) + samples = convert_to_human_readable(id_to_word, indices_arr, present_eval, + FLAGS.batch_size) + for sample in samples: + log.write(sample + '\n') + log.flush() + return samples + + +def generate_logs(sess, model, log, id_to_word, feed): + """Impute Sequences using the model for a particular feed and send it to + logs. + """ + # Impute Sequences. + [p, inputs_eval, sequence_eval] = sess.run( + [model.present, model.inputs, model.fake_sequence], feed_dict=feed) + + # Add the 0th time-step for coherence. + first_token = np.expand_dims(inputs_eval[:, 0], axis=1) + sequence_eval = np.concatenate((first_token, sequence_eval), axis=1) + + # 0th token always present. + p = np.concatenate((np.ones((FLAGS.batch_size, 1)), p), axis=1) + + if FLAGS.output_masked_logs: + samples = write_masked_log(log, id_to_word, sequence_eval, p) + else: + samples = write_unmasked_log(log, id_to_word, sequence_eval) + return samples + + +def generate_samples(hparams, data, id_to_word, log_dir, output_file): + """"Generate samples. + + Args: + hparams: Hyperparameters for the MaskGAN. + data: Data to evaluate. + id_to_word: Dictionary of indices to words. + log_dir: Log directory. + output_file: Output file for the samples. + """ + # Boolean indicating operational mode. + is_training = False + + # Set a random seed to keep fixed mask. + np.random.seed(0) + + with tf.Graph().as_default(): + # Construct the model. + model = train_mask_gan.create_MaskGAN(hparams, is_training) + + ## Retrieve the initial savers. + init_savers = model_utils.retrieve_init_savers(hparams) + + ## Initial saver function to supervisor. + init_fn = partial(model_utils.init_fn, init_savers) + + is_chief = FLAGS.task == 0 + + # Create the supervisor. It will take care of initialization, summaries, + # checkpoints, and recovery. + sv = tf.Supervisor( + logdir=log_dir, + is_chief=is_chief, + saver=model.saver, + global_step=model.global_step, + recovery_wait_secs=30, + summary_op=None, + init_fn=init_fn) + + # Get an initialized, and possibly recovered session. Launch the + # services: Checkpointing, Summaries, step counting. + # + # When multiple replicas of this program are running the services are + # only launched by the 'chief' replica. + with sv.managed_session( + FLAGS.master, start_standard_services=False) as sess: + + # Generator statefulness over the epoch. + [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( + [model.eval_initial_state, model.fake_gen_initial_state]) + + for n in xrange(FLAGS.number_epochs): + print('Epoch number: %d' % n) + # print('Percent done: %.2f' % float(n) / float(FLAGS.number_epochs)) + iterator = get_iterator(data) + for x, y, _ in iterator: + if FLAGS.eval_language_model: + is_present_rate = 0. + else: + is_present_rate = FLAGS.is_present_rate + tf.logging.info( + 'Evaluating on is_present_rate=%.3f.' % is_present_rate) + + model_utils.assign_percent_real(sess, model.percent_real_update, + model.new_rate, is_present_rate) + + # Randomly mask out tokens. + p = model_utils.generate_mask() + + eval_feed = {model.inputs: x, model.targets: y, model.present: p} + + if FLAGS.data_set == 'ptb': + # Statefulness for *evaluation* Generator. + for i, (c, h) in enumerate(model.eval_initial_state): + eval_feed[c] = gen_initial_state_eval[i].c + eval_feed[h] = gen_initial_state_eval[i].h + + # Statefulness for the Generator. + for i, (c, h) in enumerate(model.fake_gen_initial_state): + eval_feed[c] = fake_gen_initial_state_eval[i].c + eval_feed[h] = fake_gen_initial_state_eval[i].h + + [gen_initial_state_eval, fake_gen_initial_state_eval, _] = sess.run( + [ + model.eval_final_state, model.fake_gen_final_state, + model.global_step + ], + feed_dict=eval_feed) + + generate_logs(sess, model, output_file, id_to_word, eval_feed) + output_file.close() + print('Closing output_file.') + return + + +def main(_): + hparams = train_mask_gan.create_hparams() + log_dir = FLAGS.base_directory + + tf.gfile.MakeDirs(FLAGS.output_path) + output_file = tf.gfile.GFile( + os.path.join(FLAGS.output_path, 'reviews.txt'), mode='w') + + # Load data set. + if FLAGS.data_set == 'ptb': + raw_data = ptb_loader.ptb_raw_data(FLAGS.data_dir) + train_data, valid_data, _, _ = raw_data + elif FLAGS.data_set == 'imdb': + raw_data = imdb_loader.imdb_raw_data(FLAGS.data_dir) + train_data, valid_data = raw_data + else: + raise NotImplementedError + + # Generating more data on train set. + if FLAGS.sample_mode == SAMPLE_TRAIN: + data_set = train_data + elif FLAGS.sample_mode == SAMPLE_VALIDATION: + data_set = valid_data + else: + raise NotImplementedError + + # Dictionary and reverse dictionry. + if FLAGS.data_set == 'ptb': + word_to_id = ptb_loader.build_vocab( + os.path.join(FLAGS.data_dir, 'ptb.train.txt')) + elif FLAGS.data_set == 'imdb': + word_to_id = imdb_loader.build_vocab( + os.path.join(FLAGS.data_dir, 'vocab.txt')) + id_to_word = {v: k for k, v in word_to_id.iteritems()} + + FLAGS.vocab_size = len(id_to_word) + print('Vocab size: %d' % FLAGS.vocab_size) + + generate_samples(hparams, data_set, id_to_word, log_dir, output_file) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/maskgan/losses/__init__.py b/models/research/maskgan/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/maskgan/losses/losses.py b/models/research/maskgan/losses/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..38d0e7b4d13cfae9652d8c70f08bfba5c478e150 --- /dev/null +++ b/models/research/maskgan/losses/losses.py @@ -0,0 +1,186 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Losses for Generator and Discriminator.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +def discriminator_loss(predictions, labels, missing_tokens): + """Discriminator loss based on predictions and labels. + + Args: + predictions: Discriminator linear predictions Tensor of shape [batch_size, + sequence_length] + labels: Labels for predictions, Tensor of shape [batch_size, + sequence_length] + missing_tokens: Indicator for the missing tokens. Evaluate the loss only + on the tokens that were missing. + + Returns: + loss: Scalar tf.float32 loss. + + """ + loss = tf.losses.sigmoid_cross_entropy(labels, + predictions, + weights=missing_tokens) + loss = tf.Print( + loss, [loss, labels, missing_tokens], + message='loss, labels, missing_tokens', + summarize=25, + first_n=25) + return loss + + +def cross_entropy_loss_matrix(gen_labels, gen_logits): + """Computes the cross entropy loss for G. + + Args: + gen_labels: Labels for the correct token. + gen_logits: Generator logits. + + Returns: + loss_matrix: Loss matrix of shape [batch_size, sequence_length]. + """ + cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=gen_labels, logits=gen_logits) + return cross_entropy_loss + + +def GAN_loss_matrix(dis_predictions): + """Computes the cross entropy loss for G. + + Args: + dis_predictions: Discriminator predictions. + + Returns: + loss_matrix: Loss matrix of shape [batch_size, sequence_length]. + """ + eps = tf.constant(1e-7, tf.float32) + gan_loss_matrix = -tf.log(dis_predictions + eps) + return gan_loss_matrix + + +def generator_GAN_loss(predictions): + """Generator GAN loss based on Discriminator predictions.""" + return -tf.log(tf.reduce_mean(predictions)) + + +def generator_blended_forward_loss(gen_logits, gen_labels, dis_predictions, + is_real_input): + """Computes the masked-loss for G. This will be a blend of cross-entropy + loss where the true label is known and GAN loss where the true label has been + masked. + + Args: + gen_logits: Generator logits. + gen_labels: Labels for the correct token. + dis_predictions: Discriminator predictions. + is_real_input: Tensor indicating whether the label is present. + + Returns: + loss: Scalar tf.float32 total loss. + """ + cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=gen_labels, logits=gen_logits) + gan_loss = -tf.log(dis_predictions) + loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss) + return tf.reduce_mean(loss_matrix) + + +def wasserstein_generator_loss(gen_logits, gen_labels, dis_values, + is_real_input): + """Computes the masked-loss for G. This will be a blend of cross-entropy + loss where the true label is known and GAN loss where the true label is + missing. + + Args: + gen_logits: Generator logits. + gen_labels: Labels for the correct token. + dis_values: Discriminator values Tensor of shape [batch_size, + sequence_length]. + is_real_input: Tensor indicating whether the label is present. + + Returns: + loss: Scalar tf.float32 total loss. + """ + cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=gen_labels, logits=gen_logits) + # Maximize the dis_values (minimize the negative) + gan_loss = -dis_values + loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss) + loss = tf.reduce_mean(loss_matrix) + return loss + + +def wasserstein_discriminator_loss(real_values, fake_values): + """Wasserstein discriminator loss. + + Args: + real_values: Value given by the Wasserstein Discriminator to real data. + fake_values: Value given by the Wasserstein Discriminator to fake data. + + Returns: + loss: Scalar tf.float32 loss. + + """ + real_avg = tf.reduce_mean(real_values) + fake_avg = tf.reduce_mean(fake_values) + + wasserstein_loss = real_avg - fake_avg + return wasserstein_loss + + +def wasserstein_discriminator_loss_intrabatch(values, is_real_input): + """Wasserstein discriminator loss. This is an odd variant where the value + difference is between the real tokens and the fake tokens within a single + batch. + + Args: + values: Value given by the Wasserstein Discriminator of shape [batch_size, + sequence_length] to an imputed batch (real and fake). + is_real_input: tf.bool Tensor of shape [batch_size, sequence_length]. If + true, it indicates that the label is known. + + Returns: + wasserstein_loss: Scalar tf.float32 loss. + + """ + zero_tensor = tf.constant(0., dtype=tf.float32, shape=[]) + + present = tf.cast(is_real_input, tf.float32) + missing = tf.cast(1 - present, tf.float32) + + # Counts for real and fake tokens. + real_count = tf.reduce_sum(present) + fake_count = tf.reduce_sum(missing) + + # Averages for real and fake token values. + real = tf.mul(values, present) + fake = tf.mul(values, missing) + real_avg = tf.reduce_sum(real) / real_count + fake_avg = tf.reduce_sum(fake) / fake_count + + # If there are no real or fake entries in the batch, we assign an average + # value of zero. + real_avg = tf.where(tf.equal(real_count, 0), zero_tensor, real_avg) + fake_avg = tf.where(tf.equal(fake_count, 0), zero_tensor, fake_avg) + + wasserstein_loss = real_avg - fake_avg + return wasserstein_loss diff --git a/models/research/maskgan/model_utils/__init__.py b/models/research/maskgan/model_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/maskgan/model_utils/helper.py b/models/research/maskgan/model_utils/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..36115b484a007cda715b038e5cf52cbdd0b072ba --- /dev/null +++ b/models/research/maskgan/model_utils/helper.py @@ -0,0 +1,158 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Random helper functions for converting between indices and one-hot encodings +as well as printing/logging helpers. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import xrange +import tensorflow as tf + + +def variable_summaries(var, name): + """Attach a lot of summaries to a Tensor.""" + mean = tf.reduce_mean(var) + tf.summary.scalar('mean/' + name, mean) + with tf.name_scope('stddev'): + stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) + tf.summary.scalar('sttdev/' + name, stddev) + tf.summary.scalar('max/' + name, tf.reduce_max(var)) + tf.summary.scalar('min/' + name, tf.reduce_min(var)) + tf.summary.histogram(name, var) + + +def zip_seq_pred_crossent(id_to_word, sequences, predictions, cross_entropy): + """Zip together the sequences, predictions, cross entropy.""" + indices = convert_to_indices(sequences) + + batch_of_metrics = [] + + for ind_batch, pred_batch, crossent_batch in zip(indices, predictions, + cross_entropy): + metrics = [] + + for index, pred, crossent in zip(ind_batch, pred_batch, crossent_batch): + metrics.append([str(id_to_word[index]), pred, crossent]) + + batch_of_metrics.append(metrics) + return batch_of_metrics + + +def print_and_log(log, id_to_word, sequence_eval, max_num_to_print=5): + """Helper function for printing and logging evaluated sequences.""" + indices_eval = convert_to_indices(sequence_eval) + indices_arr = np.asarray(indices_eval) + samples = convert_to_human_readable(id_to_word, indices_arr, max_num_to_print) + + for i, sample in enumerate(samples): + print('Sample', i, '. ', sample) + log.write('\nSample ' + str(i) + '. ' + sample) + log.write('\n') + print('\n') + log.flush() + + +def convert_to_human_readable(id_to_word, arr, max_num_to_print): + """Convert a np.array of indices into words using id_to_word dictionary. + Return max_num_to_print results. + """ + assert arr.ndim == 2 + + samples = [] + for sequence_id in xrange(min(len(arr), max_num_to_print)): + buffer_str = ' '.join( + [str(id_to_word[index]) for index in arr[sequence_id, :]]) + samples.append(buffer_str) + return samples + + +def index_to_vocab_array(indices, vocab_size, sequence_length): + """Convert the indices into an array with vocab_size one-hot encoding.""" + + # Extract properties of the indices. + num_batches = len(indices) + shape = list(indices.shape) + shape.append(vocab_size) + + # Construct the vocab_size array. + new_arr = np.zeros(shape) + + for n in xrange(num_batches): + indices_batch = indices[n] + new_arr_batch = new_arr[n] + + # We map all indices greater than the vocabulary size to an unknown + # character. + indices_batch = np.where(indices_batch < vocab_size, indices_batch, + vocab_size - 1) + + # Convert indices to vocab_size dimensions. + new_arr_batch[np.arange(sequence_length), indices_batch] = 1 + return new_arr + + +def convert_to_indices(sequences): + """Convert a list of size [batch_size, sequence_length, vocab_size] to + a list of size [batch_size, sequence_length] where the vocab element is + denoted by the index. + """ + batch_of_indices = [] + + for sequence in sequences: + indices = [] + for embedding in sequence: + indices.append(np.argmax(embedding)) + batch_of_indices.append(indices) + return batch_of_indices + + +def convert_and_zip(id_to_word, sequences, predictions): + """Helper function for printing or logging. Retrieves list of sequences + and predictions and zips them together. + """ + indices = convert_to_indices(sequences) + + batch_of_indices_predictions = [] + + for index_batch, pred_batch in zip(indices, predictions): + indices_predictions = [] + + for index, pred in zip(index_batch, pred_batch): + indices_predictions.append([str(id_to_word[index]), pred]) + batch_of_indices_predictions.append(indices_predictions) + return batch_of_indices_predictions + + +def recursive_length(item): + """Recursively determine the total number of elements in nested list.""" + if type(item) == list: + return sum(recursive_length(subitem) for subitem in item) + else: + return 1. + + +def percent_correct(real_sequence, fake_sequences): + """Determine the percent of tokens correctly generated within a batch.""" + identical = 0. + for fake_sequence in fake_sequences: + for real, fake in zip(real_sequence, fake_sequence): + if real == fake: + identical += 1. + return identical / recursive_length(fake_sequences) diff --git a/models/research/maskgan/model_utils/model_construction.py b/models/research/maskgan/model_utils/model_construction.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfa1df343984d903ace5984a90c36cc0b67dbe3 --- /dev/null +++ b/models/research/maskgan/model_utils/model_construction.py @@ -0,0 +1,234 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Model construction.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf +from models import bidirectional +from models import bidirectional_vd + +from models import bidirectional_zaremba +from models import cnn +from models import critic_vd +from models import feedforward +from models import rnn +from models import rnn_nas +from models import rnn_vd +from models import rnn_zaremba +from models import seq2seq +from models import seq2seq_nas +from models import seq2seq_vd +from models import seq2seq_zaremba + +FLAGS = tf.app.flags.FLAGS + + +# TODO(adai): IMDB labels placeholder to model. +def create_generator(hparams, + inputs, + targets, + present, + is_training, + is_validating, + reuse=None): + """Create the Generator model specified by the FLAGS and hparams. + + Args; + hparams: Hyperparameters for the MaskGAN. + inputs: tf.int32 Tensor of the sequence input of shape [batch_size, + sequence_length]. + present: tf.bool Tensor indicating the presence or absence of the token + of shape [batch_size, sequence_length]. + is_training: Whether the model is training. + is_validating: Whether the model is being run in validation mode for + calculating the perplexity. + reuse (Optional): Whether to reuse the model. + + Returns: + Tuple of the (sequence, logits, log_probs) of the Generator. Sequence + and logits have shape [batch_size, sequence_length, vocab_size]. The + log_probs will have shape [batch_size, sequence_length]. Log_probs + corresponds to the log probability of selecting the words. + """ + if FLAGS.generator_model == 'rnn': + (sequence, logits, log_probs, initial_state, final_state) = rnn.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + elif FLAGS.generator_model == 'rnn_zaremba': + (sequence, logits, log_probs, initial_state, + final_state) = rnn_zaremba.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + elif FLAGS.generator_model == 'seq2seq': + (sequence, logits, log_probs, initial_state, + final_state) = seq2seq.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + elif FLAGS.generator_model == 'seq2seq_zaremba': + (sequence, logits, log_probs, initial_state, + final_state) = seq2seq_zaremba.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + elif FLAGS.generator_model == 'rnn_nas': + (sequence, logits, log_probs, initial_state, + final_state) = rnn_nas.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + elif FLAGS.generator_model == 'seq2seq_nas': + (sequence, logits, log_probs, initial_state, + final_state) = seq2seq_nas.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + elif FLAGS.generator_model == 'seq2seq_vd': + (sequence, logits, log_probs, initial_state, final_state, + encoder_states) = seq2seq_vd.generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + else: + raise NotImplementedError + return (sequence, logits, log_probs, initial_state, final_state, + encoder_states) + + +def create_discriminator(hparams, + sequence, + is_training, + reuse=None, + initial_state=None, + inputs=None, + present=None): + """Create the Discriminator model specified by the FLAGS and hparams. + + Args: + hparams: Hyperparameters for the MaskGAN. + sequence: tf.int32 Tensor sequence of shape [batch_size, sequence_length] + is_training: Whether the model is training. + reuse (Optional): Whether to reuse the model. + + Returns: + predictions: tf.float32 Tensor of predictions of shape [batch_size, + sequence_length] + """ + if FLAGS.discriminator_model == 'cnn': + predictions = cnn.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'fnn': + predictions = feedforward.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'rnn': + predictions = rnn.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'bidirectional': + predictions = bidirectional.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'bidirectional_zaremba': + predictions = bidirectional_zaremba.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'seq2seq_vd': + predictions = seq2seq_vd.discriminator( + hparams, + inputs, + present, + sequence, + is_training=is_training, + reuse=reuse) + elif FLAGS.discriminator_model == 'rnn_zaremba': + predictions = rnn_zaremba.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'rnn_nas': + predictions = rnn_nas.discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + elif FLAGS.discriminator_model == 'rnn_vd': + predictions = rnn_vd.discriminator( + hparams, + sequence, + is_training=is_training, + reuse=reuse, + initial_state=initial_state) + elif FLAGS.discriminator_model == 'bidirectional_vd': + predictions = bidirectional_vd.discriminator( + hparams, + sequence, + is_training=is_training, + reuse=reuse, + initial_state=initial_state) + else: + raise NotImplementedError + return predictions + + +def create_critic(hparams, sequence, is_training, reuse=None): + """Create the Critic model specified by the FLAGS and hparams. + + Args: + hparams: Hyperparameters for the MaskGAN. + sequence: tf.int32 Tensor sequence of shape [batch_size, sequence_length] + is_training: Whether the model is training. + reuse (Optional): Whether to reuse the model. + + Returns: + values: tf.float32 Tensor of predictions of shape [batch_size, + sequence_length] + """ + if FLAGS.baseline_method == 'critic': + if FLAGS.discriminator_model == 'seq2seq_vd': + values = critic_vd.critic_seq2seq_vd_derivative( + hparams, sequence, is_training, reuse=reuse) + else: + raise NotImplementedError + else: + raise NotImplementedError + return values diff --git a/models/research/maskgan/model_utils/model_losses.py b/models/research/maskgan/model_utils/model_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..c8f337dc48b4f1efb1cf8604327376ddaa9994ea --- /dev/null +++ b/models/research/maskgan/model_utils/model_losses.py @@ -0,0 +1,327 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Model loss construction.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports +import numpy as np +from six.moves import xrange +import tensorflow as tf + +# Useful for REINFORCE baseline. +from losses import losses + +FLAGS = tf.app.flags.FLAGS + + +def create_dis_loss(fake_predictions, real_predictions, targets_present): + """Compute Discriminator loss across real/fake.""" + + missing = tf.cast(targets_present, tf.int32) + missing = 1 - missing + missing = tf.cast(missing, tf.bool) + + real_labels = tf.ones([FLAGS.batch_size, FLAGS.sequence_length]) + dis_loss_real = tf.losses.sigmoid_cross_entropy( + real_labels, real_predictions, weights=missing) + dis_loss_fake = tf.losses.sigmoid_cross_entropy( + targets_present, fake_predictions, weights=missing) + + dis_loss = (dis_loss_fake + dis_loss_real) / 2. + return dis_loss, dis_loss_fake, dis_loss_real + + +def create_critic_loss(cumulative_rewards, estimated_values, present): + """Compute Critic loss in estimating the value function. This should be an + estimate only for the missing elements.""" + missing = tf.cast(present, tf.int32) + missing = 1 - missing + missing = tf.cast(missing, tf.bool) + + loss = tf.losses.mean_squared_error( + labels=cumulative_rewards, predictions=estimated_values, weights=missing) + return loss + + +def create_masked_cross_entropy_loss(targets, present, logits): + """Calculate the cross entropy loss matrices for the masked tokens.""" + cross_entropy_losses = losses.cross_entropy_loss_matrix(targets, logits) + + # Zeros matrix. + zeros_losses = tf.zeros( + shape=[FLAGS.batch_size, FLAGS.sequence_length], dtype=tf.float32) + + missing_ce_loss = tf.where(present, zeros_losses, cross_entropy_losses) + + return missing_ce_loss + + +def calculate_reinforce_objective(hparams, + log_probs, + dis_predictions, + present, + estimated_values=None): + """Calculate the REINFORCE objectives. The REINFORCE objective should + only be on the tokens that were missing. Specifically, the final Generator + reward should be based on the Discriminator predictions on missing tokens. + The log probaibilities should be only for missing tokens and the baseline + should be calculated only on the missing tokens. + + For this model, we optimize the reward is the log of the *conditional* + probability the Discriminator assigns to the distribution. Specifically, for + a Discriminator D which outputs probability of real, given the past context, + + r_t = log D(x_t|x_0,x_1,...x_{t-1}) + + And the policy for Generator G is the log-probability of taking action x2 + given the past context. + + + Args: + hparams: MaskGAN hyperparameters. + log_probs: tf.float32 Tensor of log probailities of the tokens selected by + the Generator. Shape [batch_size, sequence_length]. + dis_predictions: tf.float32 Tensor of the predictions from the + Discriminator. Shape [batch_size, sequence_length]. + present: tf.bool Tensor indicating which tokens are present. Shape + [batch_size, sequence_length]. + estimated_values: tf.float32 Tensor of estimated state values of tokens. + Shape [batch_size, sequence_length] + + Returns: + final_gen_objective: Final REINFORCE objective for the sequence. + rewards: tf.float32 Tensor of rewards for sequence of shape [batch_size, + sequence_length] + advantages: tf.float32 Tensor of advantages for sequence of shape + [batch_size, sequence_length] + baselines: tf.float32 Tensor of baselines for sequence of shape + [batch_size, sequence_length] + maintain_averages_op: ExponentialMovingAverage apply average op to + maintain the baseline. + """ + # Final Generator objective. + final_gen_objective = 0. + gamma = hparams.rl_discount_rate + eps = 1e-7 + + # Generator rewards are log-probabilities. + eps = tf.constant(1e-7, tf.float32) + dis_predictions = tf.nn.sigmoid(dis_predictions) + rewards = tf.log(dis_predictions + eps) + + # Apply only for missing elements. + zeros = tf.zeros_like(present, dtype=tf.float32) + log_probs = tf.where(present, zeros, log_probs) + rewards = tf.where(present, zeros, rewards) + + # Unstack Tensors into lists. + rewards_list = tf.unstack(rewards, axis=1) + log_probs_list = tf.unstack(log_probs, axis=1) + missing = 1. - tf.cast(present, tf.float32) + missing_list = tf.unstack(missing, axis=1) + + # Cumulative Discounted Returns. The true value function V*(s). + cumulative_rewards = [] + for t in xrange(FLAGS.sequence_length): + cum_value = tf.zeros(shape=[FLAGS.batch_size]) + for s in xrange(t, FLAGS.sequence_length): + cum_value += missing_list[s] * np.power(gamma, (s - t)) * rewards_list[s] + cumulative_rewards.append(cum_value) + cumulative_rewards = tf.stack(cumulative_rewards, axis=1) + + ## REINFORCE with different baselines. + # We create a separate critic functionality for the Discriminator. This + # will need to operate unidirectionally and it may take in the past context. + if FLAGS.baseline_method == 'critic': + + # Critic loss calculated from the estimated value function \hat{V}(s) + # versus the true value function V*(s). + critic_loss = create_critic_loss(cumulative_rewards, estimated_values, + present) + + # Baselines are coming from the critic's estimated state values. + baselines = tf.unstack(estimated_values, axis=1) + + ## Calculate the Advantages, A(s,a) = Q(s,a) - \hat{V}(s). + advantages = [] + for t in xrange(FLAGS.sequence_length): + log_probability = log_probs_list[t] + cum_advantage = tf.zeros(shape=[FLAGS.batch_size]) + + for s in xrange(t, FLAGS.sequence_length): + cum_advantage += missing_list[s] * np.power(gamma, + (s - t)) * rewards_list[s] + cum_advantage -= baselines[t] + # Clip advantages. + cum_advantage = tf.clip_by_value(cum_advantage, -FLAGS.advantage_clipping, + FLAGS.advantage_clipping) + advantages.append(missing_list[t] * cum_advantage) + final_gen_objective += tf.multiply( + log_probability, missing_list[t] * tf.stop_gradient(cum_advantage)) + + maintain_averages_op = None + baselines = tf.stack(baselines, axis=1) + advantages = tf.stack(advantages, axis=1) + + # Split the batch into half. Use half for MC estimates for REINFORCE. + # Use the other half to establish a baseline. + elif FLAGS.baseline_method == 'dis_batch': + # TODO(liamfedus): Recheck. + [rewards_half, baseline_half] = tf.split( + rewards, num_or_size_splits=2, axis=0) + [log_probs_half, _] = tf.split(log_probs, num_or_size_splits=2, axis=0) + [reward_present_half, baseline_present_half] = tf.split( + present, num_or_size_splits=2, axis=0) + + # Unstack to lists. + baseline_list = tf.unstack(baseline_half, axis=1) + baseline_missing = 1. - tf.cast(baseline_present_half, tf.float32) + baseline_missing_list = tf.unstack(baseline_missing, axis=1) + + baselines = [] + for t in xrange(FLAGS.sequence_length): + # Calculate baseline only for missing tokens. + num_missing = tf.reduce_sum(baseline_missing_list[t]) + + avg_baseline = tf.reduce_sum( + baseline_missing_list[t] * baseline_list[t], keep_dims=True) / ( + num_missing + eps) + baseline = tf.tile(avg_baseline, multiples=[FLAGS.batch_size / 2]) + baselines.append(baseline) + + # Unstack to lists. + rewards_list = tf.unstack(rewards_half, axis=1) + log_probs_list = tf.unstack(log_probs_half, axis=1) + reward_missing = 1. - tf.cast(reward_present_half, tf.float32) + reward_missing_list = tf.unstack(reward_missing, axis=1) + + ## Calculate the Advantages, A(s,a) = Q(s,a) - \hat{V}(s). + advantages = [] + for t in xrange(FLAGS.sequence_length): + log_probability = log_probs_list[t] + cum_advantage = tf.zeros(shape=[FLAGS.batch_size / 2]) + + for s in xrange(t, FLAGS.sequence_length): + cum_advantage += reward_missing_list[s] * np.power(gamma, (s - t)) * ( + rewards_list[s] - baselines[s]) + # Clip advantages. + cum_advantage = tf.clip_by_value(cum_advantage, -FLAGS.advantage_clipping, + FLAGS.advantage_clipping) + advantages.append(reward_missing_list[t] * cum_advantage) + final_gen_objective += tf.multiply( + log_probability, + reward_missing_list[t] * tf.stop_gradient(cum_advantage)) + + # Cumulative Discounted Returns. The true value function V*(s). + cumulative_rewards = [] + for t in xrange(FLAGS.sequence_length): + cum_value = tf.zeros(shape=[FLAGS.batch_size / 2]) + for s in xrange(t, FLAGS.sequence_length): + cum_value += reward_missing_list[s] * np.power(gamma, ( + s - t)) * rewards_list[s] + cumulative_rewards.append(cum_value) + cumulative_rewards = tf.stack(cumulative_rewards, axis=1) + + rewards = rewards_half + critic_loss = None + maintain_averages_op = None + baselines = tf.stack(baselines, axis=1) + advantages = tf.stack(advantages, axis=1) + + # Exponential Moving Average baseline. + elif FLAGS.baseline_method == 'ema': + # TODO(liamfedus): Recheck. + # Lists of rewards and Log probabilities of the actions taken only for + # missing tokens. + ema = tf.train.ExponentialMovingAverage(decay=hparams.baseline_decay) + maintain_averages_op = ema.apply(rewards_list) + + baselines = [] + for r in rewards_list: + baselines.append(ema.average(r)) + + ## Calculate the Advantages, A(s,a) = Q(s,a) - \hat{V}(s). + advantages = [] + for t in xrange(FLAGS.sequence_length): + log_probability = log_probs_list[t] + + # Calculate the forward advantage only on the missing tokens. + cum_advantage = tf.zeros(shape=[FLAGS.batch_size]) + for s in xrange(t, FLAGS.sequence_length): + cum_advantage += missing_list[s] * np.power(gamma, (s - t)) * ( + rewards_list[s] - baselines[s]) + # Clip advantages. + cum_advantage = tf.clip_by_value(cum_advantage, -FLAGS.advantage_clipping, + FLAGS.advantage_clipping) + advantages.append(missing_list[t] * cum_advantage) + final_gen_objective += tf.multiply( + log_probability, missing_list[t] * tf.stop_gradient(cum_advantage)) + + critic_loss = None + baselines = tf.stack(baselines, axis=1) + advantages = tf.stack(advantages, axis=1) + + elif FLAGS.baseline_method is None: + num_missing = tf.reduce_sum(missing) + final_gen_objective += tf.reduce_sum(rewards) / (num_missing + eps) + baselines = tf.zeros_like(rewards) + critic_loss = None + maintain_averages_op = None + advantages = cumulative_rewards + + else: + raise NotImplementedError + + return [ + final_gen_objective, log_probs, rewards, advantages, baselines, + maintain_averages_op, critic_loss, cumulative_rewards + ] + + +def calculate_log_perplexity(logits, targets, present): + """Calculate the average log perplexity per *missing* token. + + Args: + logits: tf.float32 Tensor of the logits of shape [batch_size, + sequence_length, vocab_size]. + targets: tf.int32 Tensor of the sequence target of shape [batch_size, + sequence_length]. + present: tf.bool Tensor indicating the presence or absence of the token + of shape [batch_size, sequence_length]. + + Returns: + avg_log_perplexity: Scalar indicating the average log perplexity per + missing token in the batch. + """ + # logits = tf.Print(logits, [logits], message='logits:', summarize=50) + # targets = tf.Print(targets, [targets], message='targets:', summarize=50) + eps = 1e-12 + logits = tf.reshape(logits, [-1, FLAGS.vocab_size]) + + # Only calculate log-perplexity on missing tokens. + weights = tf.cast(present, tf.float32) + weights = 1. - weights + weights = tf.reshape(weights, [-1]) + num_missing = tf.reduce_sum(weights) + + log_perplexity = tf.contrib.legacy_seq2seq.sequence_loss_by_example( + [logits], [tf.reshape(targets, [-1])], [weights]) + + avg_log_perplexity = tf.reduce_sum(log_perplexity) / (num_missing + eps) + return avg_log_perplexity diff --git a/models/research/maskgan/model_utils/model_optimization.py b/models/research/maskgan/model_utils/model_optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..caae271fe8bed390f032763972a43312f7a8ce9b --- /dev/null +++ b/models/research/maskgan/model_utils/model_optimization.py @@ -0,0 +1,194 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Model optimization.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def create_dis_pretrain_op(hparams, dis_loss, global_step): + """Create a train op for pretraining.""" + with tf.name_scope('pretrain_generator'): + optimizer = tf.train.AdamOptimizer(hparams.dis_pretrain_learning_rate) + dis_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('dis') + ] + if FLAGS.dis_update_share_embedding and FLAGS.dis_share_embedding: + shared_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/decoder/rnn/embedding' + ][0] + dis_vars.append(shared_embedding) + dis_grads = tf.gradients(dis_loss, dis_vars) + dis_grads_clipped, _ = tf.clip_by_global_norm(dis_grads, + FLAGS.grad_clipping) + dis_pretrain_op = optimizer.apply_gradients( + zip(dis_grads_clipped, dis_vars), global_step=global_step) + return dis_pretrain_op + + +def create_gen_pretrain_op(hparams, cross_entropy_loss, global_step): + """Create a train op for pretraining.""" + with tf.name_scope('pretrain_generator'): + optimizer = tf.train.AdamOptimizer(hparams.gen_pretrain_learning_rate) + gen_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('gen') + ] + gen_grads = tf.gradients(cross_entropy_loss, gen_vars) + gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, + FLAGS.grad_clipping) + gen_pretrain_op = optimizer.apply_gradients( + zip(gen_grads_clipped, gen_vars), global_step=global_step) + return gen_pretrain_op + + +def create_gen_train_op(hparams, learning_rate, gen_loss, global_step, mode): + """Create Generator train op.""" + del hparams + with tf.name_scope('train_generator'): + if FLAGS.generator_optimizer == 'sgd': + gen_optimizer = tf.train.GradientDescentOptimizer(learning_rate) + elif FLAGS.generator_optimizer == 'adam': + gen_optimizer = tf.train.AdamOptimizer(learning_rate) + else: + raise NotImplementedError + gen_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('gen') + ] + print('Optimizing Generator vars.') + for v in gen_vars: + print(v) + if mode == 'MINIMIZE': + gen_grads = tf.gradients(gen_loss, gen_vars) + elif mode == 'MAXIMIZE': + gen_grads = tf.gradients(-gen_loss, gen_vars) + else: + raise ValueError("Must be one of 'MINIMIZE' or 'MAXIMIZE'") + gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, + FLAGS.grad_clipping) + gen_train_op = gen_optimizer.apply_gradients( + zip(gen_grads_clipped, gen_vars), global_step=global_step) + return gen_train_op, gen_grads_clipped, gen_vars + + +def create_reinforce_gen_train_op(hparams, learning_rate, final_gen_reward, + averages_op, global_step): + """Create the Generator train_op when using REINFORCE. + + Args: + hparams: MaskGAN hyperparameters. + learning_rate: tf.Variable scalar learning rate. + final_gen_objective: Scalar final REINFORCE objective for the sequence. + averages_op: ExponentialMovingAverage apply average op to + maintain the baseline. + global_step: global_step tf.Variable. + + Returns: + gen_train_op: Generator training op. + """ + del hparams + with tf.name_scope('train_generator'): + if FLAGS.generator_optimizer == 'sgd': + gen_optimizer = tf.train.GradientDescentOptimizer(learning_rate) + elif FLAGS.generator_optimizer == 'adam': + gen_optimizer = tf.train.AdamOptimizer(learning_rate) + else: + raise NotImplementedError + gen_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('gen') + ] + print('\nOptimizing Generator vars:') + for v in gen_vars: + print(v) + + # Maximize reward. + gen_grads = tf.gradients(-final_gen_reward, gen_vars) + gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, + FLAGS.grad_clipping) + maximize_op = gen_optimizer.apply_gradients( + zip(gen_grads_clipped, gen_vars), global_step=global_step) + + # Group maintain averages op. + if averages_op: + gen_train_op = tf.group(maximize_op, averages_op) + else: + gen_train_op = maximize_op + + return [gen_train_op, gen_grads, gen_vars] + + +def create_dis_train_op(hparams, dis_loss, global_step): + """Create Discriminator train op.""" + with tf.name_scope('train_discriminator'): + dis_optimizer = tf.train.AdamOptimizer(hparams.dis_learning_rate) + dis_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('dis') + ] + if FLAGS.dis_update_share_embedding and FLAGS.dis_share_embedding: + shared_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/decoder/rnn/embedding' + ][0] + dis_vars.append(shared_embedding) + print('\nOptimizing Discriminator vars:') + for v in dis_vars: + print(v) + dis_grads = tf.gradients(dis_loss, dis_vars) + dis_grads_clipped, _ = tf.clip_by_global_norm(dis_grads, + FLAGS.grad_clipping) + dis_train_op = dis_optimizer.apply_gradients( + zip(dis_grads_clipped, dis_vars), global_step=global_step) + return dis_train_op, dis_grads_clipped, dis_vars + + +def create_critic_train_op(hparams, critic_loss, global_step): + """Create Discriminator train op.""" + with tf.name_scope('train_critic'): + critic_optimizer = tf.train.AdamOptimizer(hparams.critic_learning_rate) + output_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('critic') + ] + + if FLAGS.critic_update_dis_vars: + if FLAGS.discriminator_model == 'bidirectional_vd': + critic_vars = [ + v for v in tf.trainable_variables() + if v.op.name.startswith('dis/rnn') + ] + elif FLAGS.discriminator_model == 'seq2seq_vd': + critic_vars = [ + v for v in tf.trainable_variables() + if v.op.name.startswith('dis/decoder/rnn/multi_rnn_cell') + ] + critic_vars.extend(output_vars) + else: + critic_vars = output_vars + print('\nOptimizing Critic vars:') + for v in critic_vars: + print(v) + critic_grads = tf.gradients(critic_loss, critic_vars) + critic_grads_clipped, _ = tf.clip_by_global_norm(critic_grads, + FLAGS.grad_clipping) + critic_train_op = critic_optimizer.apply_gradients( + zip(critic_grads_clipped, critic_vars), global_step=global_step) + return critic_train_op, critic_grads_clipped, critic_vars diff --git a/models/research/maskgan/model_utils/model_utils.py b/models/research/maskgan/model_utils/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0e3183582e0f17b7d4ca54450231ea9bad039e40 --- /dev/null +++ b/models/research/maskgan/model_utils/model_utils.py @@ -0,0 +1,291 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Model utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports +import numpy as np + +import tensorflow as tf +from model_utils import variable_mapping + +FLAGS = tf.app.flags.FLAGS + + +def generate_mask(): + """Generate the mask to be fed into the model.""" + if FLAGS.mask_strategy == 'random': + p = np.random.choice( + [True, False], + size=[FLAGS.batch_size, FLAGS.sequence_length], + p=[FLAGS.is_present_rate, 1. - FLAGS.is_present_rate]) + + elif FLAGS.mask_strategy == 'contiguous': + masked_length = int((1 - FLAGS.is_present_rate) * FLAGS.sequence_length) - 1 + # Determine location to start masking. + start_mask = np.random.randint( + 1, FLAGS.sequence_length - masked_length + 1, size=FLAGS.batch_size) + p = np.full([FLAGS.batch_size, FLAGS.sequence_length], True, dtype=bool) + + # Create contiguous masked section to be False. + for i, index in enumerate(start_mask): + p[i, index:index + masked_length] = False + + else: + raise NotImplementedError + + return p + + +def assign_percent_real(session, percent_real_update, new_rate, current_rate): + """Run assign operation where the we load the current_rate of percent + real into a Tensorflow variable. + + Args: + session: Current tf.Session. + percent_real_update: tf.assign operation. + new_rate: tf.placeholder for the new rate. + current_rate: Percent of tokens that are currently real. Fake tokens + are the ones being imputed by the Generator. + """ + session.run(percent_real_update, feed_dict={new_rate: current_rate}) + + +def assign_learning_rate(session, lr_update, lr_placeholder, new_lr): + """Run assign operation where the we load the current_rate of percent + real into a Tensorflow variable. + + Args: + session: Current tf.Session. + lr_update: tf.assign operation. + lr_placeholder: tf.placeholder for the new learning rate. + new_lr: New learning rate to use. + """ + session.run(lr_update, feed_dict={lr_placeholder: new_lr}) + + +def clip_weights(variables, c_lower, c_upper): + """Clip a list of weights to be within a certain range. + + Args: + variables: List of tf.Variable weights. + c_lower: Lower bound for weights. + c_upper: Upper bound for weights. + """ + clip_ops = [] + + for var in variables: + clipped_var = tf.clip_by_value(var, c_lower, c_upper) + + clip_ops.append(tf.assign(var, clipped_var)) + return tf.group(*clip_ops) + + +def retrieve_init_savers(hparams): + """Retrieve a dictionary of all the initial savers for the models. + + Args: + hparams: MaskGAN hyperparameters. + """ + ## Dictionary of init savers. + init_savers = {} + + ## Load Generator weights from MaskGAN checkpoint. + if FLAGS.maskgan_ckpt: + gen_vars = [ + v for v in tf.trainable_variables() if v.op.name.startswith('gen') + ] + init_saver = tf.train.Saver(var_list=gen_vars) + init_savers['init_saver'] = init_saver + + ## Load the Discriminator weights from the MaskGAN checkpoint if + # the weights are compatible. + if FLAGS.discriminator_model == 'seq2seq_vd': + dis_variable_maps = variable_mapping.dis_seq2seq_vd(hparams) + dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) + init_savers['dis_init_saver'] = dis_init_saver + + ## Load weights from language model checkpoint. + if FLAGS.language_model_ckpt_dir: + if FLAGS.maskgan_ckpt is None: + ## Generator Variables/Savers. + if FLAGS.generator_model == 'rnn_nas': + gen_variable_maps = variable_mapping.rnn_nas(hparams, model='gen') + gen_init_saver = tf.train.Saver(var_list=gen_variable_maps) + init_savers['gen_init_saver'] = gen_init_saver + + elif FLAGS.generator_model == 'seq2seq_nas': + # Encoder. + gen_encoder_variable_maps = variable_mapping.gen_encoder_seq2seq_nas( + hparams) + gen_encoder_init_saver = tf.train.Saver( + var_list=gen_encoder_variable_maps) + # Decoder. + gen_decoder_variable_maps = variable_mapping.gen_decoder_seq2seq_nas( + hparams) + gen_decoder_init_saver = tf.train.Saver( + var_list=gen_decoder_variable_maps) + init_savers['gen_encoder_init_saver'] = gen_encoder_init_saver + init_savers['gen_decoder_init_saver'] = gen_decoder_init_saver + + # seq2seq_vd derived from the same code base as seq2seq_zaremba. + elif (FLAGS.generator_model == 'seq2seq_zaremba' or + FLAGS.generator_model == 'seq2seq_vd'): + # Encoder. + gen_encoder_variable_maps = variable_mapping.gen_encoder_seq2seq( + hparams) + gen_encoder_init_saver = tf.train.Saver( + var_list=gen_encoder_variable_maps) + # Decoder. + gen_decoder_variable_maps = variable_mapping.gen_decoder_seq2seq( + hparams) + gen_decoder_init_saver = tf.train.Saver( + var_list=gen_decoder_variable_maps) + init_savers['gen_encoder_init_saver'] = gen_encoder_init_saver + init_savers['gen_decoder_init_saver'] = gen_decoder_init_saver + + else: + raise NotImplementedError + + ## Discriminator Variables/Savers. + if FLAGS.discriminator_model == 'rnn_nas': + dis_variable_maps = variable_mapping.rnn_nas(hparams, model='dis') + dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) + init_savers['dis_init_saver'] = dis_init_saver + + # rnn_vd derived from the same code base as rnn_zaremba. + elif (FLAGS.discriminator_model == 'rnn_zaremba' or + FLAGS.discriminator_model == 'rnn_vd'): + dis_variable_maps = variable_mapping.rnn_zaremba(hparams, model='dis') + dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) + init_savers['dis_init_saver'] = dis_init_saver + + elif (FLAGS.discriminator_model == 'bidirectional_zaremba' or + FLAGS.discriminator_model == 'bidirectional_vd'): + dis_fwd_variable_maps = variable_mapping.dis_fwd_bidirectional(hparams) + dis_bwd_variable_maps = variable_mapping.dis_bwd_bidirectional(hparams) + # Savers for the forward/backward Discriminator components. + dis_fwd_init_saver = tf.train.Saver(var_list=dis_fwd_variable_maps) + dis_bwd_init_saver = tf.train.Saver(var_list=dis_bwd_variable_maps) + init_savers['dis_fwd_init_saver'] = dis_fwd_init_saver + init_savers['dis_bwd_init_saver'] = dis_bwd_init_saver + + elif FLAGS.discriminator_model == 'cnn': + dis_variable_maps = variable_mapping.cnn() + dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) + init_savers['dis_init_saver'] = dis_init_saver + + elif FLAGS.discriminator_model == 'seq2seq_vd': + # Encoder. + dis_encoder_variable_maps = variable_mapping.dis_encoder_seq2seq(hparams) + dis_encoder_init_saver = tf.train.Saver( + var_list=dis_encoder_variable_maps) + # Decoder. + dis_decoder_variable_maps = variable_mapping.dis_decoder_seq2seq(hparams) + dis_decoder_init_saver = tf.train.Saver( + var_list=dis_decoder_variable_maps) + init_savers['dis_encoder_init_saver'] = dis_encoder_init_saver + init_savers['dis_decoder_init_saver'] = dis_decoder_init_saver + + return init_savers + + +def init_fn(init_savers, sess): + """The init_fn to be passed to the Supervisor. + + Args: + init_savers: Dictionary of init_savers. 'init_saver_name': init_saver. + sess: tf.Session. + """ + ## Load Generator weights from MaskGAN checkpoint. + if FLAGS.maskgan_ckpt: + print('Restoring Generator from %s.' % FLAGS.maskgan_ckpt) + tf.logging.info('Restoring Generator from %s.' % FLAGS.maskgan_ckpt) + print('Asserting Generator is a seq2seq-variant.') + tf.logging.info('Asserting Generator is a seq2seq-variant.') + assert FLAGS.generator_model.startswith('seq2seq') + init_saver = init_savers['init_saver'] + init_saver.restore(sess, FLAGS.maskgan_ckpt) + + ## Load the Discriminator weights from the MaskGAN checkpoint if + # the weights are compatible. + if FLAGS.discriminator_model == 'seq2seq_vd': + print('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt) + tf.logging.info('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt) + dis_init_saver = init_savers['dis_init_saver'] + dis_init_saver.restore(sess, FLAGS.maskgan_ckpt) + + ## Load weights from language model checkpoint. + if FLAGS.language_model_ckpt_dir: + if FLAGS.maskgan_ckpt is None: + ## Generator Models. + if FLAGS.generator_model == 'rnn_nas': + load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) + print('Restoring Generator from %s.' % load_ckpt) + tf.logging.info('Restoring Generator from %s.' % load_ckpt) + gen_init_saver = init_savers['gen_init_saver'] + gen_init_saver.restore(sess, load_ckpt) + + elif FLAGS.generator_model.startswith('seq2seq'): + load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) + print('Restoring Generator from %s.' % load_ckpt) + tf.logging.info('Restoring Generator from %s.' % load_ckpt) + gen_encoder_init_saver = init_savers['gen_encoder_init_saver'] + gen_decoder_init_saver = init_savers['gen_decoder_init_saver'] + gen_encoder_init_saver.restore(sess, load_ckpt) + gen_decoder_init_saver.restore(sess, load_ckpt) + + ## Discriminator Models. + if (FLAGS.discriminator_model == 'rnn_nas' or + FLAGS.discriminator_model == 'rnn_zaremba' or + FLAGS.discriminator_model == 'rnn_vd' or + FLAGS.discriminator_model == 'cnn'): + load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) + print('Restoring Discriminator from %s.' % load_ckpt) + tf.logging.info('Restoring Discriminator from %s.' % load_ckpt) + dis_init_saver = init_savers['dis_init_saver'] + dis_init_saver.restore(sess, load_ckpt) + + elif (FLAGS.discriminator_model == 'bidirectional_zaremba' or + FLAGS.discriminator_model == 'bidirectional_vd'): + assert FLAGS.language_model_ckpt_dir_reversed is not None, ( + 'Need a reversed directory to fill in the backward components.') + load_fwd_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) + load_bwd_ckpt = tf.train.latest_checkpoint( + FLAGS.language_model_ckpt_dir_reversed) + print('Restoring Discriminator from %s and %s.' % (load_fwd_ckpt, + load_bwd_ckpt)) + tf.logging.info('Restoring Discriminator from %s and %s.' % + (load_fwd_ckpt, load_bwd_ckpt)) + dis_fwd_init_saver = init_savers['dis_fwd_init_saver'] + dis_bwd_init_saver = init_savers['dis_bwd_init_saver'] + dis_fwd_init_saver.restore(sess, load_fwd_ckpt) + dis_bwd_init_saver.restore(sess, load_bwd_ckpt) + + elif FLAGS.discriminator_model == 'seq2seq_vd': + load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) + print('Restoring Discriminator from %s.' % load_ckpt) + tf.logging.info('Restoring Discriminator from %s.' % load_ckpt) + dis_encoder_init_saver = init_savers['dis_encoder_init_saver'] + dis_decoder_init_saver = init_savers['dis_decoder_init_saver'] + dis_encoder_init_saver.restore(sess, load_ckpt) + dis_decoder_init_saver.restore(sess, load_ckpt) + + else: + return diff --git a/models/research/maskgan/model_utils/n_gram.py b/models/research/maskgan/model_utils/n_gram.py new file mode 100644 index 0000000000000000000000000000000000000000..b889dde849a60d95aa38c57cd8c864249233514f --- /dev/null +++ b/models/research/maskgan/model_utils/n_gram.py @@ -0,0 +1,66 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""We calculate n-Grams from the training text. We will use this as an +evaluation metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange + + +def hash_function(input_tuple): + """Hash function for a tuple.""" + return hash(input_tuple) + + +def find_all_ngrams(dataset, n): + """Generate a list of all ngrams.""" + return zip(*[dataset[i:] for i in xrange(n)]) + + +def construct_ngrams_dict(ngrams_list): + """Construct a ngram dictionary which maps an ngram tuple to the number + of times it appears in the text.""" + counts = {} + + for t in ngrams_list: + key = hash_function(t) + if key in counts: + counts[key] += 1 + else: + counts[key] = 1 + return counts + + +def percent_unique_ngrams_in_train(train_ngrams_dict, gen_ngrams_dict): + """Compute the percent of ngrams generated by the model that are + present in the training text and are unique.""" + + # *Total* number of n-grams produced by the generator. + total_ngrams_produced = 0 + + for _, value in gen_ngrams_dict.iteritems(): + total_ngrams_produced += value + + # The unique ngrams in the training set. + unique_ngrams_in_train = 0. + + for key, _ in gen_ngrams_dict.iteritems(): + if key in train_ngrams_dict: + unique_ngrams_in_train += 1 + return float(unique_ngrams_in_train) / float(total_ngrams_produced) diff --git a/models/research/maskgan/model_utils/variable_mapping.py b/models/research/maskgan/model_utils/variable_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..0301b969716fe473ac98c2e3bba5c04662461954 --- /dev/null +++ b/models/research/maskgan/model_utils/variable_mapping.py @@ -0,0 +1,745 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def rnn_nas(hparams, model): + assert model == 'gen' or model == 'dis' + + # This logic is only valid for rnn_zaremba + if model == 'gen': + assert FLAGS.generator_model == 'rnn_nas' + assert hparams.gen_num_layers == 2 + + if model == 'dis': + assert FLAGS.discriminator_model == 'rnn_nas' + assert hparams.dis_num_layers == 2 + + # Output variables only for the Generator. Discriminator output biases + # will begin randomly initialized. + if model == 'gen': + softmax_b = [ + v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_b' + ][0] + + # Common elements to Generator and Discriminator. + embedding = [ + v for v in tf.trainable_variables() + if v.op.name == str(model) + '/rnn/embedding' + ][0] + lstm_w_0 = [ + v for v in tf.trainable_variables() + if v.op.name == + str(model) + '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat' + ][0] + lstm_b_0 = [ + v for v in tf.trainable_variables() + if v.op.name == str(model) + + '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat' + ][0] + lstm_w_1 = [ + v for v in tf.trainable_variables() + if v.op.name == + str(model) + '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat' + ][0] + lstm_b_1 = [ + v for v in tf.trainable_variables() + if v.op.name == str(model) + + '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat' + ][0] + + # Dictionary mapping. + if model == 'gen': + variable_mapping = { + 'Model/embeddings/input_embedding': + embedding, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': + lstm_w_0, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': + lstm_b_0, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': + lstm_w_1, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': + lstm_b_1, + 'Model/softmax_b': + softmax_b + } + else: + variable_mapping = { + 'Model/embeddings/input_embedding': + embedding, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': + lstm_w_0, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': + lstm_b_0, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': + lstm_w_1, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': + lstm_b_1 + } + + return variable_mapping + + +def cnn(): + """Variable mapping for the CNN embedding. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_var. + """ + # This logic is only valid for cnn + assert FLAGS.discriminator_model == 'cnn' + + # Retrieve CNN embedding. + embedding = [ + v for v in tf.trainable_variables() if v.op.name == 'dis/embedding' + ][0] + + # Variable mapping. + variable_mapping = {'Model/embedding': embedding} + + return variable_mapping + + +def rnn_zaremba(hparams, model): + """Returns the PTB Variable name to MaskGAN Variable dictionary mapping. This + is a highly restrictive function just for testing. This will need to be + generalized. + + Args: + hparams: Hyperparameters for the MaskGAN. + model: Model type, one of ['gen', 'dis']. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_var. + """ + assert model == 'gen' or model == 'dis' + + # This logic is only valid for rnn_zaremba + if model == 'gen': + assert FLAGS.generator_model == 'rnn_zaremba' + assert hparams.gen_num_layers == 2 + + if model == 'dis': + assert (FLAGS.discriminator_model == 'rnn_zaremba' or + FLAGS.discriminator_model == 'rnn_vd') + assert hparams.dis_num_layers == 2 + + # Output variables only for the Generator. Discriminator output weights + # and biases will begin randomly initialized. + if model == 'gen': + softmax_w = [ + v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_w' + ][0] + softmax_b = [ + v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_b' + ][0] + + # Common elements to Generator and Discriminator. + if not FLAGS.dis_share_embedding or model != 'dis': + embedding = [ + v for v in tf.trainable_variables() + if v.op.name == str(model) + '/rnn/embedding' + ][0] + lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == str(model) + + '/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == str(model) + + '/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == str(model) + + '/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == str(model) + + '/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + # Dictionary mapping. + if model == 'gen': + variable_mapping = { + 'Model/embedding': embedding, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1, + 'Model/softmax_w': softmax_w, + 'Model/softmax_b': softmax_b + } + else: + if FLAGS.dis_share_embedding: + variable_mapping = { + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1 + } + else: + variable_mapping = { + 'Model/embedding': embedding, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1 + } + + return variable_mapping + + +def gen_encoder_seq2seq_nas(hparams): + """Returns the NAS Variable name to MaskGAN Variable + dictionary mapping. This is a highly restrictive function just for testing. + This is for the *unidirecitional* seq2seq_nas encoder. + + Args: + hparams: Hyperparameters for the MaskGAN. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. + """ + assert FLAGS.generator_model == 'seq2seq_nas' + assert hparams.gen_num_layers == 2 + ## Encoder forward variables. + + if not FLAGS.seq2seq_share_embedding: + encoder_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/encoder/rnn/embedding' + ][0] + encoder_lstm_w_0 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat' + ][0] + encoder_lstm_b_0 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat' + ][0] + encoder_lstm_w_1 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat' + ][0] + encoder_lstm_b_1 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat' + ][0] + + if not FLAGS.seq2seq_share_embedding: + variable_mapping = { + 'Model/embeddings/input_embedding': + encoder_embedding, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': + encoder_lstm_w_0, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': + encoder_lstm_b_0, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': + encoder_lstm_w_1, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': + encoder_lstm_b_1 + } + else: + variable_mapping = { + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': + encoder_lstm_w_0, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': + encoder_lstm_b_0, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': + encoder_lstm_w_1, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': + encoder_lstm_b_1 + } + return variable_mapping + + +def gen_decoder_seq2seq_nas(hparams): + assert FLAGS.generator_model == 'seq2seq_nas' + assert hparams.gen_num_layers == 2 + + decoder_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/decoder/rnn/embedding' + ][0] + decoder_lstm_w_0 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat' + ][0] + decoder_lstm_b_0 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat' + ][0] + decoder_lstm_w_1 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat' + ][0] + decoder_lstm_b_1 = [ + v for v in tf.trainable_variables() + if v.op.name == + 'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat' + ][0] + + decoder_softmax_b = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/decoder/rnn/softmax_b' + ][0] + + variable_mapping = { + 'Model/embeddings/input_embedding': + decoder_embedding, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': + decoder_lstm_w_0, + 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': + decoder_lstm_b_0, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': + decoder_lstm_w_1, + 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': + decoder_lstm_b_1, + 'Model/softmax_b': + decoder_softmax_b + } + + return variable_mapping + + +def gen_encoder_seq2seq(hparams): + """Returns the PTB Variable name to MaskGAN Variable + dictionary mapping. This is a highly restrictive function just for testing. + This is foe the *unidirecitional* seq2seq_zaremba encoder. + + Args: + hparams: Hyperparameters for the MaskGAN. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. + """ + assert (FLAGS.generator_model == 'seq2seq_zaremba' or + FLAGS.generator_model == 'seq2seq_vd') + assert hparams.gen_num_layers == 2 + + ## Encoder forward variables. + if not FLAGS.seq2seq_share_embedding: + encoder_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/encoder/rnn/embedding' + ][0] + encoder_lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + encoder_lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + encoder_lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + encoder_lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + if FLAGS.data_set == 'ptb': + model_str = 'Model' + else: + model_str = 'model' + + if not FLAGS.seq2seq_share_embedding: + variable_mapping = { + str(model_str) + '/embedding': + encoder_embedding, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + encoder_lstm_w_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + encoder_lstm_b_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + encoder_lstm_w_1, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + encoder_lstm_b_1 + } + else: + variable_mapping = { + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + encoder_lstm_w_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + encoder_lstm_b_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + encoder_lstm_w_1, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + encoder_lstm_b_1 + } + return variable_mapping + + +def gen_decoder_seq2seq(hparams): + assert (FLAGS.generator_model == 'seq2seq_zaremba' or + FLAGS.generator_model == 'seq2seq_vd') + assert hparams.gen_num_layers == 2 + + decoder_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/decoder/rnn/embedding' + ][0] + decoder_lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + decoder_lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + decoder_lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + decoder_lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + decoder_softmax_b = [ + v for v in tf.trainable_variables() + if v.op.name == 'gen/decoder/rnn/softmax_b' + ][0] + + if FLAGS.data_set == 'ptb': + model_str = 'Model' + else: + model_str = 'model' + + variable_mapping = { + str(model_str) + '/embedding': + decoder_embedding, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + decoder_lstm_w_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + decoder_lstm_b_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + decoder_lstm_w_1, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + decoder_lstm_b_1, + str(model_str) + '/softmax_b': + decoder_softmax_b + } + return variable_mapping + + +def dis_fwd_bidirectional(hparams): + """Returns the *forward* PTB Variable name to MaskGAN Variable dictionary + mapping. This is a highly restrictive function just for testing. This is for + the bidirectional_zaremba discriminator. + + Args: + FLAGS: Flags for the model. + hparams: Hyperparameters for the MaskGAN. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. + """ + assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or + FLAGS.discriminator_model == 'bidirectional_vd') + assert hparams.dis_num_layers == 2 + + # Forward Discriminator Elements. + if not FLAGS.dis_share_embedding: + embedding = [ + v for v in tf.trainable_variables() if v.op.name == 'dis/embedding' + ][0] + fw_lstm_w_0 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + fw_lstm_b_0 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + fw_lstm_w_1 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + fw_lstm_b_1 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + if FLAGS.dis_share_embedding: + variable_mapping = { + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1 + } + else: + variable_mapping = { + 'Model/embedding': embedding, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1 + } + return variable_mapping + + +def dis_bwd_bidirectional(hparams): + """Returns the *backward* PTB Variable name to MaskGAN Variable dictionary + mapping. This is a highly restrictive function just for testing. This is for + the bidirectional_zaremba discriminator. + + Args: + hparams: Hyperparameters for the MaskGAN. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. + """ + assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or + FLAGS.discriminator_model == 'bidirectional_vd') + assert hparams.dis_num_layers == 2 + + # Backward Discriminator Elements. + bw_lstm_w_0 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + bw_lstm_b_0 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + bw_lstm_w_1 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + bw_lstm_b_1 = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + variable_mapping = { + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': bw_lstm_w_0, + 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': bw_lstm_b_0, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': bw_lstm_w_1, + 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': bw_lstm_b_1 + } + return variable_mapping + + +def dis_encoder_seq2seq(hparams): + """Returns the PTB Variable name to MaskGAN Variable + dictionary mapping. + + Args: + hparams: Hyperparameters for the MaskGAN. + + Returns: + variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. + """ + assert FLAGS.discriminator_model == 'seq2seq_vd' + assert hparams.dis_num_layers == 2 + + ## Encoder forward variables. + encoder_lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + encoder_lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + encoder_lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + encoder_lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + if FLAGS.data_set == 'ptb': + model_str = 'Model' + else: + model_str = 'model' + + variable_mapping = { + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + encoder_lstm_w_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + encoder_lstm_b_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + encoder_lstm_w_1, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + encoder_lstm_b_1 + } + return variable_mapping + + +def dis_decoder_seq2seq(hparams): + assert FLAGS.discriminator_model == 'seq2seq_vd' + assert hparams.dis_num_layers == 2 + + if not FLAGS.dis_share_embedding: + decoder_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/decoder/rnn/embedding' + ][0] + decoder_lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + decoder_lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + decoder_lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + decoder_lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + if FLAGS.data_set == 'ptb': + model_str = 'Model' + else: + model_str = 'model' + + if not FLAGS.dis_share_embedding: + variable_mapping = { + str(model_str) + '/embedding': + decoder_embedding, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + decoder_lstm_w_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + decoder_lstm_b_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + decoder_lstm_w_1, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + decoder_lstm_b_1 + } + else: + variable_mapping = { + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + decoder_lstm_w_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + decoder_lstm_b_0, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + decoder_lstm_w_1, + str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + decoder_lstm_b_1, + } + return variable_mapping + + +def dis_seq2seq_vd(hparams): + assert FLAGS.discriminator_model == 'seq2seq_vd' + assert hparams.dis_num_layers == 2 + + if not FLAGS.dis_share_embedding: + decoder_embedding = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/decoder/rnn/embedding' + ][0] + + ## Encoder variables. + encoder_lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + encoder_lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + encoder_lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + encoder_lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + ## Attention. + if FLAGS.attention_option is not None: + decoder_attention_keys = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/decoder/attention_keys/weights' + ][0] + decoder_attention_construct_weights = [ + v for v in tf.trainable_variables() + if v.op.name == 'dis/decoder/rnn/attention_construct/weights' + ][0] + + ## Decoder. + decoder_lstm_w_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' + ][0] + decoder_lstm_b_0 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' + ][0] + decoder_lstm_w_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' + ][0] + decoder_lstm_b_1 = [ + v for v in tf.trainable_variables() if v.op.name == + 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' + ][0] + + # Standard variable mappings. + variable_mapping = { + 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + encoder_lstm_w_0, + 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + encoder_lstm_b_0, + 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + encoder_lstm_w_1, + 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + encoder_lstm_b_1, + 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': + decoder_lstm_w_0, + 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias': + decoder_lstm_b_0, + 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': + decoder_lstm_w_1, + 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias': + decoder_lstm_b_1 + } + + # Optional variable mappings. + if not FLAGS.dis_share_embedding: + variable_mapping['gen/decoder/rnn/embedding'] = decoder_embedding + if FLAGS.attention_option is not None: + variable_mapping[ + 'gen/decoder/attention_keys/weights'] = decoder_attention_keys + variable_mapping[ + 'gen/decoder/rnn/attention_construct/weights'] = decoder_attention_construct_weights + + return variable_mapping diff --git a/models/research/maskgan/models/__init__.py b/models/research/maskgan/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/maskgan/models/attention_utils.py b/models/research/maskgan/models/attention_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd9e41dd3178d6210e8f81d628b7d92004a6601 --- /dev/null +++ b/models/research/maskgan/models/attention_utils.py @@ -0,0 +1,477 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Attention-based decoder functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.python.framework import function + +__all__ = [ + "prepare_attention", "attention_decoder_fn_train", + "attention_decoder_fn_inference" +] + + +def attention_decoder_fn_train(encoder_state, + attention_keys, + attention_values, + attention_score_fn, + attention_construct_fn, + name=None): + """Attentional decoder function for `dynamic_rnn_decoder` during training. + + The `attention_decoder_fn_train` is a training function for an + attention-based sequence-to-sequence model. It should be used when + `dynamic_rnn_decoder` is in the training mode. + + The `attention_decoder_fn_train` is called with a set of the user arguments + and returns the `decoder_fn`, which can be passed to the + `dynamic_rnn_decoder`, such that + + ``` + dynamic_fn_train = attention_decoder_fn_train(encoder_state) + outputs_train, state_train = dynamic_rnn_decoder( + decoder_fn=dynamic_fn_train, ...) + ``` + + Further usage can be found in the `kernel_tests/seq2seq_test.py`. + + Args: + encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. + attention_keys: to be compared with target states. + attention_values: to be used to construct context vectors. + attention_score_fn: to compute similarity between key and target states. + attention_construct_fn: to build attention states. + name: (default: `None`) NameScope for the decoder function; + defaults to "simple_decoder_fn_train" + + Returns: + A decoder function with the required interface of `dynamic_rnn_decoder` + intended for training. + """ + with tf.name_scope(name, "attention_decoder_fn_train", [ + encoder_state, attention_keys, attention_values, attention_score_fn, + attention_construct_fn + ]): + pass + + def decoder_fn(time, cell_state, cell_input, cell_output, context_state): + """Decoder function used in the `dynamic_rnn_decoder` for training. + + Args: + time: positive integer constant reflecting the current timestep. + cell_state: state of RNNCell. + cell_input: input provided by `dynamic_rnn_decoder`. + cell_output: output of RNNCell. + context_state: context state provided by `dynamic_rnn_decoder`. + + Returns: + A tuple (done, next state, next input, emit output, next context state) + where: + + done: `None`, which is used by the `dynamic_rnn_decoder` to indicate + that `sequence_lengths` in `dynamic_rnn_decoder` should be used. + + next state: `cell_state`, this decoder function does not modify the + given state. + + next input: `cell_input`, this decoder function does not modify the + given input. The input could be modified when applying e.g. attention. + + emit output: `cell_output`, this decoder function does not modify the + given output. + + next context state: `context_state`, this decoder function does not + modify the given context state. The context state could be modified when + applying e.g. beam search. + """ + with tf.name_scope( + name, "attention_decoder_fn_train", + [time, cell_state, cell_input, cell_output, context_state]): + if cell_state is None: # first call, return encoder_state + cell_state = encoder_state + + # init attention + attention = _init_attention(encoder_state) + else: + # construct attention + attention = attention_construct_fn(cell_output, attention_keys, + attention_values) + cell_output = attention + + # combine cell_input and attention + next_input = tf.concat([cell_input, attention], 1) + + return (None, cell_state, next_input, cell_output, context_state) + + return decoder_fn + + +def attention_decoder_fn_inference(output_fn, + encoder_state, + attention_keys, + attention_values, + attention_score_fn, + attention_construct_fn, + embeddings, + start_of_sequence_id, + end_of_sequence_id, + maximum_length, + num_decoder_symbols, + dtype=tf.int32, + name=None): + """Attentional decoder function for `dynamic_rnn_decoder` during inference. + + The `attention_decoder_fn_inference` is a simple inference function for a + sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is + in the inference mode. + + The `attention_decoder_fn_inference` is called with user arguments + and returns the `decoder_fn`, which can be passed to the + `dynamic_rnn_decoder`, such that + + ``` + dynamic_fn_inference = attention_decoder_fn_inference(...) + outputs_inference, state_inference = dynamic_rnn_decoder( + decoder_fn=dynamic_fn_inference, ...) + ``` + + Further usage can be found in the `kernel_tests/seq2seq_test.py`. + + Args: + output_fn: An output function to project your `cell_output` onto class + logits. + + An example of an output function; + + ``` + tf.variable_scope("decoder") as varscope + output_fn = lambda x: tf.contrib.layers.linear(x, num_decoder_symbols, + scope=varscope) + + outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...) + logits_train = output_fn(outputs_train) + + varscope.reuse_variables() + logits_inference, state_inference = seq2seq.dynamic_rnn_decoder( + output_fn=output_fn, ...) + ``` + + If `None` is supplied it will act as an identity function, which + might be wanted when using the RNNCell `OutputProjectionWrapper`. + + encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. + attention_keys: to be compared with target states. + attention_values: to be used to construct context vectors. + attention_score_fn: to compute similarity between key and target states. + attention_construct_fn: to build attention states. + embeddings: The embeddings matrix used for the decoder sized + `[num_decoder_symbols, embedding_size]`. + start_of_sequence_id: The start of sequence ID in the decoder embeddings. + end_of_sequence_id: The end of sequence ID in the decoder embeddings. + maximum_length: The maximum allowed of time steps to decode. + num_decoder_symbols: The number of classes to decode at each time step. + dtype: (default: `tf.int32`) The default data type to use when + handling integer objects. + name: (default: `None`) NameScope for the decoder function; + defaults to "attention_decoder_fn_inference" + + Returns: + A decoder function with the required interface of `dynamic_rnn_decoder` + intended for inference. + """ + with tf.name_scope(name, "attention_decoder_fn_inference", [ + output_fn, encoder_state, attention_keys, attention_values, + attention_score_fn, attention_construct_fn, embeddings, + start_of_sequence_id, end_of_sequence_id, maximum_length, + num_decoder_symbols, dtype + ]): + start_of_sequence_id = tf.convert_to_tensor(start_of_sequence_id, dtype) + end_of_sequence_id = tf.convert_to_tensor(end_of_sequence_id, dtype) + maximum_length = tf.convert_to_tensor(maximum_length, dtype) + num_decoder_symbols = tf.convert_to_tensor(num_decoder_symbols, dtype) + encoder_info = tf.contrib.framework.nest.flatten(encoder_state)[0] + batch_size = encoder_info.get_shape()[0].value + if output_fn is None: + output_fn = lambda x: x + if batch_size is None: + batch_size = tf.shape(encoder_info)[0] + + def decoder_fn(time, cell_state, cell_input, cell_output, context_state): + """Decoder function used in the `dynamic_rnn_decoder` for inference. + + The main difference between this decoder function and the `decoder_fn` in + `attention_decoder_fn_train` is how `next_cell_input` is calculated. In + decoder function we calculate the next input by applying an argmax across + the feature dimension of the output from the decoder. This is a + greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014) + use beam-search instead. + + Args: + time: positive integer constant reflecting the current timestep. + cell_state: state of RNNCell. + cell_input: input provided by `dynamic_rnn_decoder`. + cell_output: output of RNNCell. + context_state: context state provided by `dynamic_rnn_decoder`. + + Returns: + A tuple (done, next state, next input, emit output, next context state) + where: + + done: A boolean vector to indicate which sentences has reached a + `end_of_sequence_id`. This is used for early stopping by the + `dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with + all elements as `true` is returned. + + next state: `cell_state`, this decoder function does not modify the + given state. + + next input: The embedding from argmax of the `cell_output` is used as + `next_input`. + + emit output: If `output_fn is None` the supplied `cell_output` is + returned, else the `output_fn` is used to update the `cell_output` + before calculating `next_input` and returning `cell_output`. + + next context state: `context_state`, this decoder function does not + modify the given context state. The context state could be modified when + applying e.g. beam search. + + Raises: + ValueError: if cell_input is not None. + + """ + with tf.name_scope( + name, "attention_decoder_fn_inference", + [time, cell_state, cell_input, cell_output, context_state]): + if cell_input is not None: + raise ValueError( + "Expected cell_input to be None, but saw: %s" % cell_input) + if cell_output is None: + # invariant that this is time == 0 + next_input_id = tf.ones( + [ + batch_size, + ], dtype=dtype) * ( + start_of_sequence_id) + done = tf.zeros( + [ + batch_size, + ], dtype=tf.bool) + cell_state = encoder_state + cell_output = tf.zeros([num_decoder_symbols], dtype=tf.float32) + cell_input = tf.gather(embeddings, next_input_id) + + # init attention + attention = _init_attention(encoder_state) + else: + # construct attention + attention = attention_construct_fn(cell_output, attention_keys, + attention_values) + cell_output = attention + + # argmax decoder + cell_output = output_fn(cell_output) # logits + next_input_id = tf.cast(tf.argmax(cell_output, 1), dtype=dtype) + done = tf.equal(next_input_id, end_of_sequence_id) + cell_input = tf.gather(embeddings, next_input_id) + + # combine cell_input and attention + next_input = tf.concat([cell_input, attention], 1) + + # if time > maxlen, return all true vector + done = tf.cond( + tf.greater(time, maximum_length), + lambda: tf.ones([ + batch_size,], dtype=tf.bool), lambda: done) + return (done, cell_state, next_input, cell_output, context_state) + + return decoder_fn + + +## Helper functions ## +def prepare_attention(attention_states, attention_option, num_units, + reuse=None): + """Prepare keys/values/functions for attention. + + Args: + attention_states: hidden states to attend over. + attention_option: how to compute attention, either "luong" or "bahdanau". + num_units: hidden state dimension. + reuse: whether to reuse variable scope. + + Returns: + attention_keys: to be compared with target states. + attention_values: to be used to construct context vectors. + attention_score_fn: to compute similarity between key and target states. + attention_construct_fn: to build attention states. + """ + # Prepare attention keys / values from attention_states + with tf.variable_scope("attention_keys", reuse=reuse) as scope: + attention_keys = tf.contrib.layers.linear( + attention_states, num_units, biases_initializer=None, scope=scope) + attention_values = attention_states + + # Attention score function + attention_score_fn = _create_attention_score_fn("attention_score", num_units, + attention_option, reuse) + # Attention construction function + attention_construct_fn = _create_attention_construct_fn( + "attention_construct", num_units, attention_score_fn, reuse) + + return (attention_keys, attention_values, attention_score_fn, + attention_construct_fn) + + +def _init_attention(encoder_state): + """Initialize attention. Handling both LSTM and GRU. + + Args: + encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. + + Returns: + attn: initial zero attention vector. + """ + + # Multi- vs single-layer + # TODO(thangluong): is this the best way to check? + if isinstance(encoder_state, tuple): + top_state = encoder_state[-1] + else: + top_state = encoder_state + + # LSTM vs GRU + if isinstance(top_state, tf.contrib.rnn.LSTMStateTuple): + attn = tf.zeros_like(top_state.h) + else: + attn = tf.zeros_like(top_state) + + return attn + + +def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse): + """Function to compute attention vectors. + + Args: + name: to label variables. + num_units: hidden state dimension. + attention_score_fn: to compute similarity between key and target states. + reuse: whether to reuse variable scope. + + Returns: + attention_construct_fn: to build attention states. + """ + + def construct_fn(attention_query, attention_keys, attention_values): + with tf.variable_scope(name, reuse=reuse) as scope: + context = attention_score_fn(attention_query, attention_keys, + attention_values) + concat_input = tf.concat([attention_query, context], 1) + attention = tf.contrib.layers.linear( + concat_input, num_units, biases_initializer=None, scope=scope) + return attention + + return construct_fn + + +# keys: [batch_size, attention_length, attn_size] +# query: [batch_size, 1, attn_size] +# return weights [batch_size, attention_length] +@function.Defun(func_name="attn_add_fun", noinline=True) +def _attn_add_fun(v, keys, query): + return tf.reduce_sum(v * tf.tanh(keys + query), [2]) + + +@function.Defun(func_name="attn_mul_fun", noinline=True) +def _attn_mul_fun(keys, query): + return tf.reduce_sum(keys * query, [2]) + + +def _create_attention_score_fn(name, + num_units, + attention_option, + reuse, + dtype=tf.float32): + """Different ways to compute attention scores. + + Args: + name: to label variables. + num_units: hidden state dimension. + attention_option: how to compute attention, either "luong" or "bahdanau". + "bahdanau": additive (Bahdanau et al., ICLR'2015) + "luong": multiplicative (Luong et al., EMNLP'2015) + reuse: whether to reuse variable scope. + dtype: (default: `tf.float32`) data type to use. + + Returns: + attention_score_fn: to compute similarity between key and target states. + """ + with tf.variable_scope(name, reuse=reuse): + if attention_option == "bahdanau": + query_w = tf.get_variable("attnW", [num_units, num_units], dtype=dtype) + score_v = tf.get_variable("attnV", [num_units], dtype=dtype) + + def attention_score_fn(query, keys, values): + """Put attention masks on attention_values using attention_keys and query. + + Args: + query: A Tensor of shape [batch_size, num_units]. + keys: A Tensor of shape [batch_size, attention_length, num_units]. + values: A Tensor of shape [batch_size, attention_length, num_units]. + + Returns: + context_vector: A Tensor of shape [batch_size, num_units]. + + Raises: + ValueError: if attention_option is neither "luong" or "bahdanau". + + + """ + if attention_option == "bahdanau": + # transform query + query = tf.matmul(query, query_w) + + # reshape query: [batch_size, 1, num_units] + query = tf.reshape(query, [-1, 1, num_units]) + + # attn_fun + scores = _attn_add_fun(score_v, keys, query) + elif attention_option == "luong": + # reshape query: [batch_size, 1, num_units] + query = tf.reshape(query, [-1, 1, num_units]) + + # attn_fun + scores = _attn_mul_fun(keys, query) + else: + raise ValueError("Unknown attention option %s!" % attention_option) + + # Compute alignment weights + # scores: [batch_size, length] + # alignments: [batch_size, length] + # TODO(thangluong): not normalize over padding positions. + alignments = tf.nn.softmax(scores) + + # Now calculate the attention-weighted vector. + alignments = tf.expand_dims(alignments, 2) + context_vector = tf.reduce_sum(alignments * values, [1]) + context_vector.set_shape([None, num_units]) + + return context_vector + + return attention_score_fn diff --git a/models/research/maskgan/models/bidirectional.py b/models/research/maskgan/models/bidirectional.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6b3fe45f9ffe7dffdeb5c0d571de7e68227498 --- /dev/null +++ b/models/research/maskgan/models/bidirectional.py @@ -0,0 +1,75 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple bidirectional model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +# ZoneoutWrapper. +from regularization import zoneout + +FLAGS = tf.app.flags.FLAGS + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the bidirectional Discriminator graph.""" + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('dis', reuse=reuse): + cell_fwd = tf.contrib.rnn.LayerNormBasicLSTMCell( + hparams.dis_rnn_size, forget_bias=1.0, reuse=reuse) + cell_bwd = tf.contrib.rnn.LayerNormBasicLSTMCell( + hparams.dis_rnn_size, forget_bias=1.0, reuse=reuse) + if FLAGS.zoneout_drop_prob > 0.0: + cell_fwd = zoneout.ZoneoutWrapper( + cell_fwd, + zoneout_drop_prob=FLAGS.zoneout_drop_prob, + is_training=is_training) + cell_bwd = zoneout.ZoneoutWrapper( + cell_bwd, + zoneout_drop_prob=FLAGS.zoneout_drop_prob, + is_training=is_training) + + state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32) + state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) + + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + rnn_inputs = tf.unstack(rnn_inputs, axis=1) + + with tf.variable_scope('rnn') as vs: + outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn( + cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs) + + # Prediction is linear output for Discriminator. + predictions = tf.contrib.layers.linear(outputs, 1, scope=vs) + + predictions = tf.transpose(predictions, [1, 0, 2]) + return tf.squeeze(predictions, axis=2) diff --git a/models/research/maskgan/models/bidirectional_vd.py b/models/research/maskgan/models/bidirectional_vd.py new file mode 100644 index 0000000000000000000000000000000000000000..469af9da57a8a0dbf280327308a17fa6e0277a86 --- /dev/null +++ b/models/research/maskgan/models/bidirectional_vd.py @@ -0,0 +1,116 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple bidirectional model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from regularization import variational_dropout + +FLAGS = tf.app.flags.FLAGS + + +def discriminator(hparams, + sequence, + is_training, + reuse=None, + initial_state=None): + """Define the Discriminator graph.""" + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('dis', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and hparams.dis_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, + hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) + + cell_fwd = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + cell_bwd = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + # print initial_state + # print cell_fwd.zero_state(FLAGS.batch_size, tf.float32) + if initial_state: + state_fwd = [[tf.identity(x) for x in inner_initial_state] + for inner_initial_state in initial_state] + state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) + else: + state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32) + state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.dis_vd_keep_prob, + 2 * hparams.dis_rnn_size) + + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + rnn_inputs = tf.unstack(rnn_inputs, axis=1) + + with tf.variable_scope('rnn') as vs: + outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn( + cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs) + + if is_training: + outputs *= output_mask + + # Prediction is linear output for Discriminator. + predictions = tf.contrib.layers.linear(outputs, 1, scope=vs) + predictions = tf.transpose(predictions, [1, 0, 2]) + + if FLAGS.baseline_method == 'critic': + with tf.variable_scope('critic', reuse=reuse) as critic_scope: + values = tf.contrib.layers.linear(outputs, 1, scope=critic_scope) + values = tf.transpose(values, [1, 0, 2]) + + return tf.squeeze(predictions, axis=2), tf.squeeze(values, axis=2) + + else: + return tf.squeeze(predictions, axis=2), None diff --git a/models/research/maskgan/models/bidirectional_zaremba.py b/models/research/maskgan/models/bidirectional_zaremba.py new file mode 100644 index 0000000000000000000000000000000000000000..b0683d7cc1493a8aa0298b7dc91020a152a9da36 --- /dev/null +++ b/models/research/maskgan/models/bidirectional_zaremba.py @@ -0,0 +1,83 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple bidirectional model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the bidirectional Discriminator graph.""" + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('dis', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and FLAGS.keep_prob < 1: + + def attn_cell(): + return tf.contrib.rnn.DropoutWrapper( + lstm_cell(), output_keep_prob=FLAGS.keep_prob) + + cell_fwd = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + cell_bwd = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32) + state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) + + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + rnn_inputs = tf.unstack(rnn_inputs, axis=1) + + with tf.variable_scope('rnn') as vs: + outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn( + cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs) + + # Prediction is linear output for Discriminator. + predictions = tf.contrib.layers.linear(outputs, 1, scope=vs) + + predictions = tf.transpose(predictions, [1, 0, 2]) + return tf.squeeze(predictions, axis=2) diff --git a/models/research/maskgan/models/cnn.py b/models/research/maskgan/models/cnn.py new file mode 100644 index 0000000000000000000000000000000000000000..ca682debf1630f5773cef48b874334d28d1fc6fc --- /dev/null +++ b/models/research/maskgan/models/cnn.py @@ -0,0 +1,93 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple CNN model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the Discriminator graph.""" + del is_training + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + "If you wish to share Discriminator/Generator embeddings, they must be" + " same dimension.") + with tf.variable_scope("gen/rnn", reuse=True): + embedding = tf.get_variable("embedding", + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + dis_filter_sizes = [3, 4, 5, 6, 7, 8, 9, 10, 15, 20] + + with tf.variable_scope("dis", reuse=reuse): + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable("embedding", + [FLAGS.vocab_size, hparams.dis_rnn_size]) + cnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + # Create a convolution layer for each filter size + conv_outputs = [] + for filter_size in dis_filter_sizes: + with tf.variable_scope("conv-%s" % filter_size): + # Convolution Layer + filter_shape = [ + filter_size, hparams.dis_rnn_size, hparams.dis_num_filters + ] + W = tf.get_variable( + name="W", initializer=tf.truncated_normal(filter_shape, stddev=0.1)) + b = tf.get_variable( + name="b", + initializer=tf.constant(0.1, shape=[hparams.dis_num_filters])) + conv = tf.nn.conv1d( + cnn_inputs, W, stride=1, padding="SAME", name="conv") + + # Apply nonlinearity + h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") + + conv_outputs.append(h) + + # Combine all the pooled features + dis_num_filters_total = hparams.dis_num_filters * len(dis_filter_sizes) + + h_conv = tf.concat(conv_outputs, axis=2) + h_conv_flat = tf.reshape(h_conv, [-1, dis_num_filters_total]) + + # Add dropout + with tf.variable_scope("dropout"): + h_drop = tf.nn.dropout(h_conv_flat, FLAGS.keep_prob) + + with tf.variable_scope("fully_connected"): + fc = tf.contrib.layers.fully_connected( + h_drop, num_outputs=dis_num_filters_total / 2) + + # Final (unnormalized) scores and predictions + with tf.variable_scope("output"): + W = tf.get_variable( + "W", + shape=[dis_num_filters_total / 2, 1], + initializer=tf.contrib.layers.xavier_initializer()) + b = tf.get_variable(name="b", initializer=tf.constant(0.1, shape=[1])) + predictions = tf.nn.xw_plus_b(fc, W, b, name="predictions") + predictions = tf.reshape( + predictions, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + return predictions diff --git a/models/research/maskgan/models/critic_vd.py b/models/research/maskgan/models/critic_vd.py new file mode 100644 index 0000000000000000000000000000000000000000..ede8b7bb77af28f562c2e3942728899fe9b16422 --- /dev/null +++ b/models/research/maskgan/models/critic_vd.py @@ -0,0 +1,108 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Critic model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange +import tensorflow as tf +from regularization import variational_dropout + +FLAGS = tf.app.flags.FLAGS + + +def critic_seq2seq_vd_derivative(hparams, sequence, is_training, reuse=None): + """Define the Critic graph which is derived from the seq2seq_vd + Discriminator. This will be initialized with the same parameters as the + language model and will share the forward RNN components with the + Discriminator. This estimates the V(s_t), where the state + s_t = x_0,...,x_t-1. + """ + assert FLAGS.discriminator_model == 'seq2seq_vd' + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + else: + with tf.variable_scope('dis/decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + with tf.variable_scope( + 'dis/decoder/rnn/multi_rnn_cell', reuse=True) as dis_scope: + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=True) + + attn_cell = lstm_cell + if is_training and hparams.dis_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, + hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) + + cell_critic = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + with tf.variable_scope('critic', reuse=reuse): + state_dis = cell_critic.zero_state(FLAGS.batch_size, tf.float32) + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) + + with tf.variable_scope('rnn') as vs: + values = [] + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + if t == 0: + rnn_in = tf.zeros_like(rnn_inputs[:, 0]) + else: + rnn_in = rnn_inputs[:, t - 1] + rnn_out, state_dis = cell_critic(rnn_in, state_dis, scope=dis_scope) + + if is_training: + rnn_out *= output_mask + + # Prediction is linear output for Discriminator. + value = tf.contrib.layers.linear(rnn_out, 1, scope=vs) + + values.append(value) + values = tf.stack(values, axis=1) + return tf.squeeze(values, axis=2) diff --git a/models/research/maskgan/models/evaluation_utils.py b/models/research/maskgan/models/evaluation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fc2a3a16f0b2c03736bfaa881c5c14546240d283 --- /dev/null +++ b/models/research/maskgan/models/evaluation_utils.py @@ -0,0 +1,280 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Evaluation utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import Counter +# Dependency imports +import numpy as np +from scipy.special import expit + +import tensorflow as tf + +from model_utils import helper +from model_utils import n_gram + +FLAGS = tf.app.flags.FLAGS + + +def print_and_log_losses(log, step, is_present_rate, avg_dis_loss, + avg_gen_loss): + """Prints and logs losses to the log file. + + Args: + log: GFile for logs. + step: Global step. + is_present_rate: Current masking rate. + avg_dis_loss: List of Discriminator losses. + avg_gen_loss: List of Generator losses. + """ + print('global_step: %d' % step) + print(' is_present_rate: %.3f' % is_present_rate) + print(' D train loss: %.5f' % np.mean(avg_dis_loss)) + print(' G train loss: %.5f' % np.mean(avg_gen_loss)) + log.write('\nglobal_step: %d\n' % step) + log.write((' is_present_rate: %.3f\n' % is_present_rate)) + log.write(' D train loss: %.5f\n' % np.mean(avg_dis_loss)) + log.write(' G train loss: %.5f\n' % np.mean(avg_gen_loss)) + + +def print_and_log(log, id_to_word, sequence_eval, max_num_to_print=5): + """Helper function for printing and logging evaluated sequences.""" + indices_arr = np.asarray(sequence_eval) + samples = helper.convert_to_human_readable(id_to_word, indices_arr, + max_num_to_print) + + for i, sample in enumerate(samples): + print('Sample', i, '. ', sample) + log.write('\nSample ' + str(i) + '. ' + sample) + log.write('\n') + print('\n') + log.flush() + return samples + + +def zip_seq_pred_crossent(id_to_word, sequences, predictions, cross_entropy): + """Zip together the sequences, predictions, cross entropy.""" + indices = np.asarray(sequences) + + batch_of_metrics = [] + + for ind_batch, pred_batch, crossent_batch in zip(indices, predictions, + cross_entropy): + metrics = [] + + for index, pred, crossent in zip(ind_batch, pred_batch, crossent_batch): + metrics.append([str(id_to_word[index]), pred, crossent]) + + batch_of_metrics.append(metrics) + return batch_of_metrics + + +def zip_metrics(indices, *args): + """Zip together the indices matrices with the provided metrics matrices.""" + batch_of_metrics = [] + for metrics_batch in zip(indices, *args): + + metrics = [] + for m in zip(*metrics_batch): + metrics.append(m) + batch_of_metrics.append(metrics) + return batch_of_metrics + + +def print_formatted(present, id_to_word, log, batch_of_tuples): + """Print and log metrics.""" + num_cols = len(batch_of_tuples[0][0]) + repeat_float_format = '{:<12.3f} ' + repeat_str_format = '{:<13}' + + format_str = ''.join( + ['[{:<1}] {:<20}', + str(repeat_float_format * (num_cols - 1))]) + + # TODO(liamfedus): Generalize the logging. This is sloppy. + header_format_str = ''.join( + ['[{:<1}] {:<20}', + str(repeat_str_format * (num_cols - 1))]) + header_str = header_format_str.format('p', 'Word', 'p(real)', 'log-perp', + 'log(p(a))', 'r', 'R=V*(s)', 'b=V(s)', + 'A(a,s)') + + for i, batch in enumerate(batch_of_tuples): + print(' Sample: %d' % i) + log.write(' Sample %d.\n' % i) + print(' ', header_str) + log.write(' ' + str(header_str) + '\n') + + for j, t in enumerate(batch): + t = list(t) + t[0] = id_to_word[t[0]] + buffer_str = format_str.format(int(present[i][j]), *t) + print(' ', buffer_str) + log.write(' ' + str(buffer_str) + '\n') + log.flush() + + +def generate_RL_logs(sess, model, log, id_to_word, feed): + """Generate complete logs while running with REINFORCE.""" + # Impute Sequences. + [ + p, + fake_sequence_eval, + fake_predictions_eval, + _, + fake_cross_entropy_losses_eval, + _, + fake_log_probs_eval, + fake_rewards_eval, + fake_baselines_eval, + cumulative_rewards_eval, + fake_advantages_eval, + ] = sess.run( + [ + model.present, + model.fake_sequence, + model.fake_predictions, + model.real_predictions, + model.fake_cross_entropy_losses, + model.fake_logits, + model.fake_log_probs, + model.fake_rewards, + model.fake_baselines, + model.cumulative_rewards, + model.fake_advantages, + ], + feed_dict=feed) + + indices = np.asarray(fake_sequence_eval) + + # Convert Discriminator linear layer to probability. + fake_prob_eval = expit(fake_predictions_eval) + + # Add metrics. + fake_tuples = zip_metrics(indices, fake_prob_eval, + fake_cross_entropy_losses_eval, fake_log_probs_eval, + fake_rewards_eval, cumulative_rewards_eval, + fake_baselines_eval, fake_advantages_eval) + + # real_tuples = zip_metrics(indices, ) + + # Print forward sequences. + tuples_to_print = fake_tuples[:FLAGS.max_num_to_print] + print_formatted(p, id_to_word, log, tuples_to_print) + + print('Samples') + log.write('Samples\n') + samples = print_and_log(log, id_to_word, fake_sequence_eval, + FLAGS.max_num_to_print) + return samples + + +def generate_logs(sess, model, log, id_to_word, feed): + """Impute Sequences using the model for a particular feed and send it to + logs.""" + # Impute Sequences. + [ + p, sequence_eval, fake_predictions_eval, fake_cross_entropy_losses_eval, + fake_logits_eval + ] = sess.run( + [ + model.present, model.fake_sequence, model.fake_predictions, + model.fake_cross_entropy_losses, model.fake_logits + ], + feed_dict=feed) + + # Convert Discriminator linear layer to probability. + fake_prob_eval = expit(fake_predictions_eval) + + # Forward Masked Tuples. + fake_tuples = zip_seq_pred_crossent(id_to_word, sequence_eval, fake_prob_eval, + fake_cross_entropy_losses_eval) + + tuples_to_print = fake_tuples[:FLAGS.max_num_to_print] + + if FLAGS.print_verbose: + print('fake_logits_eval') + print(fake_logits_eval) + + for i, batch in enumerate(tuples_to_print): + print(' Sample %d.' % i) + log.write(' Sample %d.\n' % i) + for j, pred in enumerate(batch): + buffer_str = ('[{:<1}] {:<20} {:<7.3f} {:<7.3f}').format( + int(p[i][j]), pred[0], pred[1], pred[2]) + print(' ', buffer_str) + log.write(' ' + str(buffer_str) + '\n') + log.flush() + + print('Samples') + log.write('Samples\n') + samples = print_and_log(log, id_to_word, sequence_eval, + FLAGS.max_num_to_print) + return samples + + +def create_merged_ngram_dictionaries(indices, n): + """Generate a single dictionary for the full batch. + + Args: + indices: List of lists of indices. + n: Degree of n-grams. + + Returns: + Dictionary of hashed(n-gram tuples) to counts in the batch of indices. + """ + ngram_dicts = [] + + for ind in indices: + ngrams = n_gram.find_all_ngrams(ind, n=n) + ngram_counts = n_gram.construct_ngrams_dict(ngrams) + ngram_dicts.append(ngram_counts) + + merged_gen_dict = Counter() + for ngram_dict in ngram_dicts: + merged_gen_dict += Counter(ngram_dict) + return merged_gen_dict + + +def sequence_ngram_evaluation(sess, sequence, log, feed, data_ngram_count, n): + """Calculates the percent of ngrams produced in the sequence is present in + data_ngram_count. + + Args: + sess: tf.Session. + sequence: Sequence Tensor from the MaskGAN model. + log: gFile log. + feed: Feed to evaluate. + data_ngram_count: Dictionary of hashed(n-gram tuples) to counts in the + data_set. + + Returns: + avg_percent_captured: Percent of produced ngrams that appear in the + data_ngram_count. + """ + del log + # Impute sequence. + [sequence_eval] = sess.run([sequence], feed_dict=feed) + indices = sequence_eval + + # Retrieve the counts across the batch of indices. + gen_ngram_counts = create_merged_ngram_dictionaries( + indices, n=n) + return n_gram.percent_unique_ngrams_in_train(data_ngram_count, + gen_ngram_counts) diff --git a/models/research/maskgan/models/feedforward.py b/models/research/maskgan/models/feedforward.py new file mode 100644 index 0000000000000000000000000000000000000000..d48a517d6bea65477b8a940ed770f92203da6dfd --- /dev/null +++ b/models/research/maskgan/models/feedforward.py @@ -0,0 +1,98 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple FNN model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the Discriminator graph.""" + del is_training + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + "If you wish to share Discriminator/Generator embeddings, they must be" + " same dimension.") + with tf.variable_scope("gen/rnn", reuse=True): + embedding = tf.get_variable("embedding", + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope("dis", reuse=reuse): + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable("embedding", + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + embeddings = tf.nn.embedding_lookup(embedding, sequence) + + # Input matrices. + W = tf.get_variable( + "W", + initializer=tf.truncated_normal( + shape=[3 * hparams.dis_embedding_dim, hparams.dis_hidden_dim], + stddev=0.1)) + b = tf.get_variable( + "b", initializer=tf.constant(0.1, shape=[hparams.dis_hidden_dim])) + + # Output matrices. + W_out = tf.get_variable( + "W_out", + initializer=tf.truncated_normal( + shape=[hparams.dis_hidden_dim, 1], stddev=0.1)) + b_out = tf.get_variable("b_out", initializer=tf.constant(0.1, shape=[1])) + + predictions = [] + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + inp = embeddings[:, t] + + if t > 0: + past_inp = tf.unstack(embeddings[:, 0:t], axis=1) + avg_past_inp = tf.add_n(past_inp) / len(past_inp) + else: + avg_past_inp = tf.zeros_like(inp) + + if t < FLAGS.sequence_length: + future_inp = tf.unstack(embeddings[:, t:], axis=1) + avg_future_inp = tf.add_n(future_inp) / len(future_inp) + else: + avg_future_inp = tf.zeros_like(inp) + + # Cumulative input. + concat_inp = tf.concat([avg_past_inp, inp, avg_future_inp], axis=1) + + # Hidden activations. + hidden = tf.nn.relu(tf.nn.xw_plus_b(concat_inp, W, b, name="scores")) + + # Add dropout + with tf.variable_scope("dropout"): + hidden = tf.nn.dropout(hidden, FLAGS.keep_prob) + + # Output. + output = tf.nn.xw_plus_b(hidden, W_out, b_out, name="output") + + predictions.append(output) + predictions = tf.stack(predictions, axis=1) + return tf.squeeze(predictions, axis=2) diff --git a/models/research/maskgan/models/rnn.py b/models/research/maskgan/models/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..40b3a7aa3b85ddfd3002d845416b5004088620fc --- /dev/null +++ b/models/research/maskgan/models/rnn.py @@ -0,0 +1,211 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple RNN model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange +import tensorflow as tf + +# ZoneoutWrapper. +from regularization import zoneout + +FLAGS = tf.app.flags.FLAGS + + +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph. + + G will now impute tokens that have been masked from the input seqeunce. + """ + tf.logging.warning( + 'Undirectional generative model is not a useful model for this MaskGAN ' + 'because future context is needed. Use only for debugging purposes.') + init_scale = 0.05 + initializer = tf.random_uniform_initializer(-init_scale, init_scale) + + with tf.variable_scope('gen', reuse=reuse, initializer=initializer): + + def lstm_cell(): + return tf.contrib.rnn.LayerNormBasicLSTMCell( + hparams.gen_rnn_size, reuse=reuse) + + attn_cell = lstm_cell + if FLAGS.zoneout_drop_prob > 0.0: + + def attn_cell(): + return zoneout.ZoneoutWrapper( + lstm_cell(), + zoneout_drop_prob=FLAGS.zoneout_drop_prob, + is_training=is_training) + + cell_gen = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + softmax_w = tf.get_variable('softmax_w', + [hparams.gen_rnn_size, FLAGS.vocab_size]) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the model is the first token to provide context. The + # model will then predict token t > 0. + if t == 0: + # Always provide the real input at t = 0. + state_gen = initial_state + rnn_inp = rnn_inputs[:, t] + + # If the target at the last time-step was present, read in the real. + # If the target at the last time-step was not present, read in the fake. + else: + real_rnn_inp = rnn_inputs[:, t] + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + + # Use teacher forcing. + if (is_training and + FLAGS.gen_training_strategy == 'cross_entropy') or is_validating: + rnn_inp = real_rnn_inp + else: + # Note that targets_t-1 == inputs_(t) + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + logit = tf.matmul(rnn_out, softmax_w) + softmax_b + + # Real sample. + real = targets[:, t] + + # Fake sample. + categorical = tf.contrib.distributions.Categorical(logits=logit) + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + + # Output for Generator will either be generated or the target. + # If present: Return real. + # If not present: Return fake. + output = tf.where(targets_present[:, t], real, fake) + + # Append to lists. + sequence.append(output) + logits.append(logit) + log_probs.append(log_prob) + + # Produce the RNN state had the model operated only + # over real data. + real_state_gen = initial_state + for t in xrange(FLAGS.sequence_length): + tf.get_variable_scope().reuse_variables() + + rnn_inp = rnn_inputs[:, t] + + # RNN. + rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen) + + final_state = real_state_gen + + return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( + log_probs, axis=1), initial_state, final_state) + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the Discriminator graph. + + Args: + hparams: Hyperparameters for the MaskGAN. + FLAGS: Current flags. + sequence: [FLAGS.batch_size, FLAGS.sequence_length] + is_training: + reuse + + Returns: + predictions: + """ + tf.logging.warning( + 'Undirectional Discriminative model is not a useful model for this ' + 'MaskGAN because future context is needed. Use only for debugging ' + 'purposes.') + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('dis', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.LayerNormBasicLSTMCell( + hparams.dis_rnn_size, reuse=reuse) + + attn_cell = lstm_cell + if FLAGS.zoneout_drop_prob > 0.0: + + def attn_cell(): + return zoneout.ZoneoutWrapper( + lstm_cell(), + zoneout_drop_prob=FLAGS.zoneout_drop_prob, + is_training=is_training) + + cell_dis = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn') as vs: + predictions = [] + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_in = rnn_inputs[:, t] + rnn_out, state_dis = cell_dis(rnn_in, state_dis) + + # Prediction is linear output for Discriminator. + pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) + + predictions.append(pred) + predictions = tf.stack(predictions, axis=1) + return tf.squeeze(predictions, axis=2) diff --git a/models/research/maskgan/models/rnn_nas.py b/models/research/maskgan/models/rnn_nas.py new file mode 100644 index 0000000000000000000000000000000000000000..618ace2f8196fb4718ae01bc406f114523fd44cc --- /dev/null +++ b/models/research/maskgan/models/rnn_nas.py @@ -0,0 +1,234 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple RNN model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +from six.moves import xrange +import tensorflow as tf + +# NAS Code.. +from nas_utils import configs +from nas_utils import custom_cell +from nas_utils import variational_dropout + +FLAGS = tf.app.flags.FLAGS + + +def get_config(): + return configs.AlienConfig2() + + +LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h']) + + +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph. + + G will now impute tokens that have been masked from the input seqeunce. + """ + tf.logging.info( + 'Undirectional generative model is not a useful model for this MaskGAN ' + 'because future context is needed. Use only for debugging purposes.') + config = get_config() + config.keep_prob = [hparams.gen_nas_keep_prob_0, hparams.gen_nas_keep_prob_1] + configs.print_config(config) + + init_scale = config.init_scale + initializer = tf.random_uniform_initializer(-init_scale, init_scale) + + with tf.variable_scope('gen', reuse=reuse, initializer=initializer): + # Neural architecture search cell. + cell = custom_cell.Alien(config.hidden_size) + + if is_training: + [h2h_masks, _, _, + output_mask] = variational_dropout.generate_variational_dropout_masks( + hparams, config.keep_prob) + else: + output_mask = None + + cell_gen = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) + initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + softmax_w = tf.matrix_transpose(embedding) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the model is the first token to provide context. The + # model will then predict token t > 0. + if t == 0: + # Always provide the real input at t = 0. + state_gen = initial_state + rnn_inp = rnn_inputs[:, t] + + # If the input is present, read in the input at t. + # If the input is not present, read in the previously generated. + else: + real_rnn_inp = rnn_inputs[:, t] + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + + # While validating, the decoder should be operating in teacher + # forcing regime. Also, if we're just training with cross_entropy + # use teacher forcing. + if is_validating or (is_training and + FLAGS.gen_training_strategy == 'cross_entropy'): + rnn_inp = real_rnn_inp + else: + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + if is_training: + state_gen = list(state_gen) + for layer_num, per_layer_state in enumerate(state_gen): + per_layer_state = LSTMTuple( + per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) + state_gen[layer_num] = per_layer_state + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + + if is_training: + rnn_out = output_mask * rnn_out + + logit = tf.matmul(rnn_out, softmax_w) + softmax_b + + # Real sample. + real = targets[:, t] + + categorical = tf.contrib.distributions.Categorical(logits=logit) + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + + # Output for Generator will either be generated or the input. + # + # If present: Return real. + # If not present: Return fake. + output = tf.where(targets_present[:, t], real, fake) + + # Add to lists. + sequence.append(output) + log_probs.append(log_prob) + logits.append(logit) + + # Produce the RNN state had the model operated only + # over real data. + real_state_gen = initial_state + for t in xrange(FLAGS.sequence_length): + tf.get_variable_scope().reuse_variables() + + rnn_inp = rnn_inputs[:, t] + + # RNN. + rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen) + + final_state = real_state_gen + + return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( + log_probs, axis=1), initial_state, final_state) + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the Discriminator graph.""" + tf.logging.info( + 'Undirectional Discriminative model is not a useful model for this ' + 'MaskGAN because future context is needed. Use only for debugging ' + 'purposes.') + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + config = get_config() + config.keep_prob = [hparams.dis_nas_keep_prob_0, hparams.dis_nas_keep_prob_1] + configs.print_config(config) + + with tf.variable_scope('dis', reuse=reuse): + # Neural architecture search cell. + cell = custom_cell.Alien(config.hidden_size) + + if is_training: + [h2h_masks, _, _, + output_mask] = variational_dropout.generate_variational_dropout_masks( + hparams, config.keep_prob) + else: + output_mask = None + + cell_dis = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) + state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn') as vs: + predictions = [] + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_in = rnn_inputs[:, t] + + if is_training: + state_dis = list(state_dis) + for layer_num, per_layer_state in enumerate(state_dis): + per_layer_state = LSTMTuple( + per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) + state_dis[layer_num] = per_layer_state + + # RNN. + rnn_out, state_dis = cell_dis(rnn_in, state_dis) + + if is_training: + rnn_out = output_mask * rnn_out + + # Prediction is linear output for Discriminator. + pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) + + predictions.append(pred) + predictions = tf.stack(predictions, axis=1) + return tf.squeeze(predictions, axis=2) diff --git a/models/research/maskgan/models/rnn_vd.py b/models/research/maskgan/models/rnn_vd.py new file mode 100644 index 0000000000000000000000000000000000000000..428f1a54bda7d6e5f9dd55061149664b1b3e751d --- /dev/null +++ b/models/research/maskgan/models/rnn_vd.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple RNN model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange +import tensorflow as tf +from regularization import variational_dropout + +FLAGS = tf.app.flags.FLAGS + + +def discriminator(hparams, + sequence, + is_training, + reuse=None, + initial_state=None): + """Define the Discriminator graph.""" + tf.logging.info( + 'Undirectional Discriminative model is not a useful model for this ' + 'MaskGAN because future context is needed. Use only for debugging ' + 'purposes.') + sequence = tf.cast(sequence, tf.int32) + + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('dis', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and hparams.dis_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, + hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) + + cell_dis = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + if initial_state: + state_dis = [[tf.identity(x) for x in inner_initial_state] + for inner_initial_state in initial_state] + else: + state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) + + with tf.variable_scope('rnn') as vs: + predictions, rnn_outs = [], [] + + if not FLAGS.dis_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_in = rnn_inputs[:, t] + rnn_out, state_dis = cell_dis(rnn_in, state_dis) + + if is_training: + rnn_out *= output_mask + + # Prediction is linear output for Discriminator. + pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) + predictions.append(pred) + rnn_outs.append(rnn_out) + + predictions = tf.stack(predictions, axis=1) + + if FLAGS.baseline_method == 'critic': + with tf.variable_scope('critic', reuse=reuse) as critic_scope: + rnn_outs = tf.stack(rnn_outs, axis=1) + values = tf.contrib.layers.linear(rnn_outs, 1, scope=critic_scope) + return tf.squeeze(predictions, axis=2), tf.squeeze(values, axis=2) + + else: + return tf.squeeze(predictions, axis=2), None diff --git a/models/research/maskgan/models/rnn_zaremba.py b/models/research/maskgan/models/rnn_zaremba.py new file mode 100644 index 0000000000000000000000000000000000000000..9369c77fbb849551721b46321e6868a7aeaceea6 --- /dev/null +++ b/models/research/maskgan/models/rnn_zaremba.py @@ -0,0 +1,196 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple RNN model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph. + + G will now impute tokens that have been masked from the input seqeunce. + """ + tf.logging.warning( + 'Undirectional generative model is not a useful model for this MaskGAN ' + 'because future context is needed. Use only for debugging purposes.') + init_scale = 0.05 + initializer = tf.random_uniform_initializer(-init_scale, init_scale) + with tf.variable_scope('gen', reuse=reuse, initializer=initializer): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell(hparams.gen_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and FLAGS.keep_prob < 1: + + def attn_cell(): + return tf.contrib.rnn.DropoutWrapper( + lstm_cell(), output_keep_prob=FLAGS.keep_prob) + + cell_gen = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + softmax_w = tf.get_variable('softmax_w', + [hparams.gen_rnn_size, FLAGS.vocab_size]) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + + fake = None + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the model is the first token to provide context. The + # model will then predict token t > 0. + if t == 0: + # Always provide the real input at t = 0. + state_gen = initial_state + rnn_inp = rnn_inputs[:, t] + + # If the input is present, read in the input at t. + # If the input is not present, read in the previously generated. + else: + real_rnn_inp = rnn_inputs[:, t] + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + + # While validating, the decoder should be operating in teacher + # forcing regime. Also, if we're just training with cross_entropy + # use teacher forcing. + if is_validating or (is_training and + FLAGS.gen_training_strategy == 'cross_entropy'): + rnn_inp = real_rnn_inp + else: + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + logit = tf.matmul(rnn_out, softmax_w) + softmax_b + + # Real sample. + real = targets[:, t] + + categorical = tf.contrib.distributions.Categorical(logits=logit) + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + + # Output for Generator will either be generated or the input. + # + # If present: Return real. + # If not present: Return fake. + output = tf.where(targets_present[:, t], real, fake) + + # Add to lists. + sequence.append(output) + log_probs.append(log_prob) + logits.append(logit) + + # Produce the RNN state had the model operated only + # over real data. + real_state_gen = initial_state + for t in xrange(FLAGS.sequence_length): + tf.get_variable_scope().reuse_variables() + + rnn_inp = rnn_inputs[:, t] + + # RNN. + rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen) + + final_state = real_state_gen + + return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( + log_probs, axis=1), initial_state, final_state) + + +def discriminator(hparams, sequence, is_training, reuse=None): + """Define the Discriminator graph.""" + tf.logging.warning( + 'Undirectional Discriminative model is not a useful model for this ' + 'MaskGAN because future context is needed. Use only for debugging ' + 'purposes.') + sequence = tf.cast(sequence, tf.int32) + + with tf.variable_scope('dis', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell(hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and FLAGS.keep_prob < 1: + + def attn_cell(): + return tf.contrib.rnn.DropoutWrapper( + lstm_cell(), output_keep_prob=FLAGS.keep_prob) + + cell_dis = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn') as vs: + predictions = [] + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_in = rnn_inputs[:, t] + rnn_out, state_dis = cell_dis(rnn_in, state_dis) + + # Prediction is linear output for Discriminator. + pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) + + predictions.append(pred) + predictions = tf.stack(predictions, axis=1) + return tf.squeeze(predictions, axis=2) diff --git a/models/research/maskgan/models/rollout.py b/models/research/maskgan/models/rollout.py new file mode 100644 index 0000000000000000000000000000000000000000..6919af2e31fa362f702e96e135d4a2bc06e063a2 --- /dev/null +++ b/models/research/maskgan/models/rollout.py @@ -0,0 +1,384 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Rollout RNN model definitions which call rnn_zaremba code.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import xrange +import tensorflow as tf + +from losses import losses +from model_utils import helper +from model_utils import model_construction +from model_utils import model_losses +from model_utils import model_optimization + +FLAGS = tf.app.flags.FLAGS + + +def create_rollout_MaskGAN(hparams, is_training): + """Create the MaskGAN model. + + Args: + hparams: Hyperparameters for the MaskGAN. + is_training: Boolean indicating operational mode (train/inference). + evaluated with a teacher forcing regime. + + Return: + model: Namedtuple for specifying the MaskGAN.""" + global_step = tf.Variable(0, name='global_step', trainable=False) + + new_learning_rate = tf.placeholder(tf.float32, [], name='new_learning_rate') + learning_rate = tf.Variable(0.0, name='learning_rate', trainable=False) + learning_rate_update = tf.assign(learning_rate, new_learning_rate) + + new_rate = tf.placeholder(tf.float32, [], name='new_rate') + percent_real_var = tf.Variable(0.0, trainable=False) + percent_real_update = tf.assign(percent_real_var, new_rate) + + ## Placeholders. + inputs = tf.placeholder( + tf.int32, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + present = tf.placeholder( + tf.bool, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + inv_present = tf.placeholder( + tf.bool, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + + ## Rollout Generator. + fwd_gen_rollouts = rollout_generator( + hparams, inputs, present, is_training=is_training, is_validating=False) + inv_gen_rollouts = rollout_generator( + hparams, + inputs, + inv_present, + is_training=is_training, + is_validating=False, + reuse=True) + + ## Rollout Discriminator. + fwd_dis_rollouts = rollout_discriminator( + hparams, fwd_gen_rollouts, is_training=is_training) + inv_dis_rollouts = rollout_discriminator( + hparams, inv_gen_rollouts, is_training=is_training, reuse=True) + + ## Discriminator Loss. + [dis_loss, dis_loss_pred, dis_loss_inv_pred] = rollout_discriminator_loss( + fwd_dis_rollouts, present, inv_dis_rollouts, inv_present) + + ## Average log-perplexity for only missing words. However, to do this, + # the logits are still computed using teacher forcing, that is, the ground + # truth tokens are fed in at each time point to be valid. + # TODO(liamfedus): Fix the naming convention. + with tf.variable_scope('gen_rollout'): + _, fwd_eval_logits, _ = model_construction.create_generator( + hparams, + inputs, + present, + is_training=False, + is_validating=True, + reuse=True) + + avg_log_perplexity = model_losses.calculate_log_perplexity( + fwd_eval_logits, inputs, present) + + ## Generator Loss. + # 1. Cross Entropy losses on missing tokens. + [fwd_cross_entropy_losses, + inv_cross_entropy_losses] = rollout_masked_cross_entropy_loss( + inputs, present, inv_present, fwd_gen_rollouts, inv_gen_rollouts) + + # 2. GAN losses on missing tokens. + [fwd_RL_loss, + fwd_RL_statistics, fwd_averages_op] = rollout_reinforce_objective( + hparams, fwd_gen_rollouts, fwd_dis_rollouts, present) + [inv_RL_loss, + inv_RL_statistics, inv_averages_op] = rollout_reinforce_objective( + hparams, inv_gen_rollouts, inv_dis_rollouts, inv_present) + + # TODO(liamfedus): Generalize this to use all logs. + [fwd_sequence, fwd_logits, fwd_log_probs] = fwd_gen_rollouts[-1] + [inv_sequence, inv_logits, inv_log_probs] = inv_gen_rollouts[-1] + + # TODO(liamfedus): Generalize this to use all logs. + fwd_predictions = fwd_dis_rollouts[-1] + inv_predictions = inv_dis_rollouts[-1] + + # TODO(liamfedus): Generalize this to use all logs. + [fwd_log_probs, fwd_rewards, fwd_advantages, + fwd_baselines] = fwd_RL_statistics[-1] + [inv_log_probs, inv_rewards, inv_advantages, + inv_baselines] = inv_RL_statistics[-1] + + ## Pre-training. + if FLAGS.gen_pretrain_steps: + # TODO(liamfedus): Rewrite this. + fwd_cross_entropy_loss = tf.reduce_mean(fwd_cross_entropy_losses) + gen_pretrain_op = model_optimization.create_gen_pretrain_op( + hparams, fwd_cross_entropy_loss, global_step) + else: + gen_pretrain_op = tf.no_op('gen_pretrain_no_op') + if FLAGS.dis_pretrain_steps: + dis_pretrain_op = model_optimization.create_dis_pretrain_op( + hparams, dis_loss, global_step) + else: + dis_pretrain_op = tf.no_op('dis_pretrain_no_op') + + ## Generator Train Op. + # 1. Cross-Entropy. + if FLAGS.gen_training_strategy == 'cross_entropy': + gen_loss = tf.reduce_mean( + fwd_cross_entropy_losses + inv_cross_entropy_losses) / 2. + [gen_train_op, gen_grads, + gen_vars] = model_optimization.create_gen_train_op( + hparams, learning_rate, gen_loss, global_step, mode='MINIMIZE') + + # 2. GAN (REINFORCE) + elif FLAGS.gen_training_strategy == 'reinforce': + gen_loss = (fwd_RL_loss + inv_RL_loss) / 2. + [gen_train_op, gen_grads, + gen_vars] = model_optimization.create_reinforce_gen_train_op( + hparams, learning_rate, gen_loss, fwd_averages_op, inv_averages_op, + global_step) + + else: + raise NotImplementedError + + ## Discriminator Train Op. + dis_train_op, dis_grads, dis_vars = model_optimization.create_dis_train_op( + hparams, dis_loss, global_step) + + ## Summaries. + with tf.name_scope('general'): + tf.summary.scalar('percent_real', percent_real_var) + tf.summary.scalar('learning_rate', learning_rate) + + with tf.name_scope('generator_losses'): + tf.summary.scalar('gen_loss', tf.reduce_mean(gen_loss)) + tf.summary.scalar('gen_loss_fwd_cross_entropy', + tf.reduce_mean(fwd_cross_entropy_losses)) + tf.summary.scalar('gen_loss_inv_cross_entropy', + tf.reduce_mean(inv_cross_entropy_losses)) + + with tf.name_scope('REINFORCE'): + with tf.name_scope('objective'): + tf.summary.scalar('fwd_RL_loss', tf.reduce_mean(fwd_RL_loss)) + tf.summary.scalar('inv_RL_loss', tf.reduce_mean(inv_RL_loss)) + + with tf.name_scope('rewards'): + helper.variable_summaries(fwd_rewards, 'fwd_rewards') + helper.variable_summaries(inv_rewards, 'inv_rewards') + + with tf.name_scope('advantages'): + helper.variable_summaries(fwd_advantages, 'fwd_advantages') + helper.variable_summaries(inv_advantages, 'inv_advantages') + + with tf.name_scope('baselines'): + helper.variable_summaries(fwd_baselines, 'fwd_baselines') + helper.variable_summaries(inv_baselines, 'inv_baselines') + + with tf.name_scope('log_probs'): + helper.variable_summaries(fwd_log_probs, 'fwd_log_probs') + helper.variable_summaries(inv_log_probs, 'inv_log_probs') + + with tf.name_scope('discriminator_losses'): + tf.summary.scalar('dis_loss', dis_loss) + tf.summary.scalar('dis_loss_fwd_sequence', dis_loss_pred) + tf.summary.scalar('dis_loss_inv_sequence', dis_loss_inv_pred) + + with tf.name_scope('logits'): + helper.variable_summaries(fwd_logits, 'fwd_logits') + helper.variable_summaries(inv_logits, 'inv_logits') + + for v, g in zip(gen_vars, gen_grads): + helper.variable_summaries(v, v.op.name) + helper.variable_summaries(g, 'grad/' + v.op.name) + + for v, g in zip(dis_vars, dis_grads): + helper.variable_summaries(v, v.op.name) + helper.variable_summaries(g, 'grad/' + v.op.name) + + merge_summaries_op = tf.summary.merge_all() + + # Model saver. + saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep=5) + + # Named tuple that captures elements of the MaskGAN model. + Model = collections.namedtuple('Model', [ + 'inputs', 'present', 'inv_present', 'percent_real_update', 'new_rate', + 'fwd_sequence', 'fwd_logits', 'fwd_rewards', 'fwd_advantages', + 'fwd_log_probs', 'fwd_predictions', 'fwd_cross_entropy_losses', + 'inv_sequence', 'inv_logits', 'inv_rewards', 'inv_advantages', + 'inv_log_probs', 'inv_predictions', 'inv_cross_entropy_losses', + 'avg_log_perplexity', 'dis_loss', 'gen_loss', 'dis_train_op', + 'gen_train_op', 'gen_pretrain_op', 'dis_pretrain_op', + 'merge_summaries_op', 'global_step', 'new_learning_rate', + 'learning_rate_update', 'saver' + ]) + + model = Model( + inputs, present, inv_present, percent_real_update, new_rate, fwd_sequence, + fwd_logits, fwd_rewards, fwd_advantages, fwd_log_probs, fwd_predictions, + fwd_cross_entropy_losses, inv_sequence, inv_logits, inv_rewards, + inv_advantages, inv_log_probs, inv_predictions, inv_cross_entropy_losses, + avg_log_perplexity, dis_loss, gen_loss, dis_train_op, gen_train_op, + gen_pretrain_op, dis_pretrain_op, merge_summaries_op, global_step, + new_learning_rate, learning_rate_update, saver) + return model + + +def rollout_generator(hparams, + inputs, + input_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph which does rollouts. + + G will now impute tokens that have been masked from the input seqeunce. + """ + rollouts = [] + + with tf.variable_scope('gen_rollout'): + for n in xrange(FLAGS.num_rollouts): + if n > 0: + # TODO(liamfedus): Why is it necessary here to manually set reuse? + reuse = True + tf.get_variable_scope().reuse_variables() + + [sequence, logits, log_probs] = model_construction.create_generator( + hparams, + inputs, + input_present, + is_training, + is_validating, + reuse=reuse) + + rollouts.append([sequence, logits, log_probs]) + + # Length assertion. + assert len(rollouts) == FLAGS.num_rollouts + + return rollouts + + +def rollout_discriminator(hparams, gen_rollouts, is_training, reuse=None): + """Define the Discriminator graph which does rollouts. + + G will now impute tokens that have been masked from the input seqeunce. + """ + rollout_predictions = [] + + with tf.variable_scope('dis_rollout'): + for n, rollout in enumerate(gen_rollouts): + if n > 0: + # TODO(liamfedus): Why is it necessary here to manually set reuse? + reuse = True + tf.get_variable_scope().reuse_variables() + + [sequence, _, _] = rollout + + predictions = model_construction.create_discriminator( + hparams, sequence, is_training=is_training, reuse=reuse) + + # Predictions for each rollout. + rollout_predictions.append(predictions) + + # Length assertion. + assert len(rollout_predictions) == FLAGS.num_rollouts + + return rollout_predictions + + +def rollout_reinforce_objective(hparams, gen_rollouts, dis_rollouts, present): + cumulative_gen_objective = 0. + cumulative_averages_op = [] + cumulative_statistics = [] + + assert len(gen_rollouts) == len(dis_rollouts) + + for gen_rollout, dis_rollout in zip(gen_rollouts, dis_rollouts): + [_, _, log_probs] = gen_rollout + dis_predictions = dis_rollout + + [ + final_gen_objective, log_probs, rewards, advantages, baselines, + maintain_averages_op + ] = model_losses.calculate_reinforce_objective(hparams, log_probs, + dis_predictions, present) + + # Accumulate results. + cumulative_gen_objective += final_gen_objective + cumulative_averages_op.append(maintain_averages_op) + cumulative_statistics.append([log_probs, rewards, advantages, baselines]) + + # Group all the averaging operations. + cumulative_averages_op = tf.group(*cumulative_averages_op) + cumulative_gen_objective /= FLAGS.num_rollouts + [log_probs, rewards, advantages, baselines] = cumulative_statistics[-1] + + # Length assertion. + assert len(cumulative_statistics) == FLAGS.num_rollouts + + return [ + cumulative_gen_objective, cumulative_statistics, cumulative_averages_op + ] + + +def rollout_masked_cross_entropy_loss(inputs, present, inv_present, + fwd_rollouts, inv_rollouts): + cumulative_fwd_cross_entropy_losses = tf.zeros( + shape=[FLAGS.batch_size, FLAGS.sequence_length]) + cumulative_inv_cross_entropy_losses = tf.zeros( + shape=[FLAGS.batch_size, FLAGS.sequence_length]) + + for fwd_rollout, inv_rollout in zip(fwd_rollouts, inv_rollouts): + [_, fwd_logits, _] = fwd_rollout + [_, inv_logits, _] = inv_rollout + + [fwd_cross_entropy_losses, + inv_cross_entropy_losses] = model_losses.create_masked_cross_entropy_loss( + inputs, present, inv_present, fwd_logits, inv_logits) + + cumulative_fwd_cross_entropy_losses = tf.add( + cumulative_fwd_cross_entropy_losses, fwd_cross_entropy_losses) + cumulative_inv_cross_entropy_losses = tf.add( + cumulative_inv_cross_entropy_losses, inv_cross_entropy_losses) + + return [ + cumulative_fwd_cross_entropy_losses, cumulative_inv_cross_entropy_losses + ] + + +def rollout_discriminator_loss(fwd_rollouts, present, inv_rollouts, + inv_present): + + dis_loss = 0 + dis_loss_pred = 0 + dis_loss_inv_pred = 0 + + for fwd_predictions, inv_predictions in zip(fwd_rollouts, inv_rollouts): + dis_loss_pred += losses.discriminator_loss(fwd_predictions, present) + dis_loss_inv_pred += losses.discriminator_loss(inv_predictions, inv_present) + + dis_loss_pred /= FLAGS.num_rollouts + dis_loss_inv_pred /= FLAGS.num_rollouts + + dis_loss = (dis_loss_pred + dis_loss_inv_pred) / 2. + return [dis_loss, dis_loss_pred, dis_loss_inv_pred] diff --git a/models/research/maskgan/models/seq2seq.py b/models/research/maskgan/models/seq2seq.py new file mode 100644 index 0000000000000000000000000000000000000000..fac397c98381309f6c7c6d428fcec3c665bcff98 --- /dev/null +++ b/models/research/maskgan/models/seq2seq.py @@ -0,0 +1,277 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple seq2seq model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from six.moves import xrange +from models import attention_utils + +# ZoneoutWrapper. +from regularization import zoneout + +FLAGS = tf.app.flags.FLAGS + + +def transform_input_with_is_missing_token(inputs, targets_present): + """Transforms the inputs to have missing tokens when it's masked out. The + mask is for the targets, so therefore, to determine if an input at time t is + masked, we have to check if the target at time t - 1 is masked out. + + e.g. + inputs = [a, b, c, d] + targets = [b, c, d, e] + targets_present = [1, 0, 1, 0] + + then, + transformed_input = [a, b, , d] + + Args: + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the word. + + Returns: + transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] + which takes on value of inputs when the input is present and takes on + value=vocab_size to indicate a missing token. + """ + # To fill in if the input is missing. + input_missing = tf.constant( + FLAGS.vocab_size, + dtype=tf.int32, + shape=[FLAGS.batch_size, FLAGS.sequence_length]) + + # The 0th input will always be present to MaskGAN. + zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) + + # Input present mask. + inputs_present = tf.concat( + [zeroth_input_present, targets_present[:, :-1]], axis=1) + + transformed_input = tf.where(inputs_present, inputs, input_missing) + return transformed_input + + +def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): + """Define the Encoder graph.""" + # We will use the same variable from the decoder. + if FLAGS.seq2seq_share_embedding: + with tf.variable_scope('decoder/rnn'): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('encoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.LayerNormBasicLSTMCell( + hparams.gen_rnn_size, reuse=reuse) + + attn_cell = lstm_cell + if FLAGS.zoneout_drop_prob > 0.0: + + def attn_cell(): + return zoneout.ZoneoutWrapper( + lstm_cell(), + zoneout_drop_prob=FLAGS.zoneout_drop_prob, + is_training=is_training) + + cell = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) + + # Add a missing token for inputs not present. + real_inputs = inputs + masked_inputs = transform_input_with_is_missing_token( + inputs, targets_present) + + with tf.variable_scope('rnn'): + hidden_states = [] + + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size + 1, hparams.gen_rnn_size]) + + real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) + masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) + + state = initial_state + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_inp = masked_rnn_inputs[:, t] + rnn_out, state = cell(rnn_inp, state) + hidden_states.append(rnn_out) + final_masked_state = state + hidden_states = tf.stack(hidden_states, axis=1) + + # Produce the RNN state had the model operated only + # over real data. + real_state = initial_state + for t in xrange(FLAGS.sequence_length): + tf.get_variable_scope().reuse_variables() + + # RNN. + rnn_inp = real_rnn_inputs[:, t] + rnn_out, real_state = cell(rnn_inp, real_state) + final_state = real_state + + return (hidden_states, final_masked_state), initial_state, final_state + + +def gen_decoder(hparams, + inputs, + targets, + targets_present, + encoding_state, + is_training, + is_validating, + reuse=None): + """Define the Decoder graph. The Decoder will now impute tokens that + have been masked from the input seqeunce. + """ + gen_decoder_rnn_size = hparams.gen_rnn_size + + with tf.variable_scope('decoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.LayerNormBasicLSTMCell( + gen_decoder_rnn_size, reuse=reuse) + + attn_cell = lstm_cell + if FLAGS.zoneout_drop_prob > 0.0: + + def attn_cell(): + return zoneout.ZoneoutWrapper( + lstm_cell(), + zoneout_drop_prob=FLAGS.zoneout_drop_prob, + is_training=is_training) + + cell_gen = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + # Hidden encoder states. + hidden_vector_encodings = encoding_state[0] + + # Carry forward the final state tuple from the encoder. + # State tuples. + state_gen = encoding_state[1] + + if FLAGS.attention_option is not None: + (attention_keys, attention_values, _, + attention_construct_fn) = attention_utils.prepare_attention( + hidden_vector_encodings, + FLAGS.attention_option, + num_units=gen_decoder_rnn_size, + reuse=reuse) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, gen_decoder_rnn_size]) + softmax_w = tf.get_variable('softmax_w', + [gen_decoder_rnn_size, FLAGS.vocab_size]) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the Decoder. + if t == 0: + # Always provide the real input at t = 0. + rnn_inp = rnn_inputs[:, t] + + # If the input is present, read in the input at t. + # If the input is not present, read in the previously generated. + else: + real_rnn_inp = rnn_inputs[:, t] + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + + # While validating, the decoder should be operating in teacher + # forcing regime. Also, if we're just training with cross_entropy + # use teacher forcing. + if is_validating or (is_training and + FLAGS.gen_training_strategy == 'cross_entropy'): + rnn_inp = real_rnn_inp + else: + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + + if FLAGS.attention_option is not None: + rnn_out = attention_construct_fn(rnn_out, attention_keys, + attention_values) + # # TODO(liamfedus): Assert not "monotonic" attention_type. + # # TODO(liamfedus): FLAGS.attention_type. + # context_state = revised_attention_utils._empty_state() + # rnn_out, context_state = attention_construct_fn( + # rnn_out, attention_keys, attention_values, context_state, t) + logit = tf.matmul(rnn_out, softmax_w) + softmax_b + + # Output for Decoder. + # If input is present: Return real at t+1. + # If input is not present: Return fake for t+1. + real = targets[:, t] + + categorical = tf.contrib.distributions.Categorical(logits=logit) + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + + output = tf.where(targets_present[:, t], real, fake) + + # Add to lists. + sequence.append(output) + log_probs.append(log_prob) + logits.append(logit) + + return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( + log_probs, axis=1)) + + +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph.""" + with tf.variable_scope('gen', reuse=reuse): + encoder_states, initial_state, final_state = gen_encoder( + hparams, inputs, targets_present, is_training=is_training, reuse=reuse) + stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( + hparams, + inputs, + targets, + targets_present, + encoder_states, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, + final_state) diff --git a/models/research/maskgan/models/seq2seq_nas.py b/models/research/maskgan/models/seq2seq_nas.py new file mode 100644 index 0000000000000000000000000000000000000000..cede90f5625c6e46740ad7601681712e73f07450 --- /dev/null +++ b/models/research/maskgan/models/seq2seq_nas.py @@ -0,0 +1,333 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple seq2seq model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +from six.moves import xrange +import tensorflow as tf + +from models import attention_utils + +# NAS Code.. +from nas_utils import configs +from nas_utils import custom_cell +from nas_utils import variational_dropout + +FLAGS = tf.app.flags.FLAGS + + +def get_config(): + return configs.AlienConfig2() + + +LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h']) + + +def transform_input_with_is_missing_token(inputs, targets_present): + """Transforms the inputs to have missing tokens when it's masked out. The + mask is for the targets, so therefore, to determine if an input at time t is + masked, we have to check if the target at time t - 1 is masked out. + + e.g. + inputs = [a, b, c, d] + targets = [b, c, d, e] + targets_present = [1, 0, 1, 0] + + then, + transformed_input = [a, b, , d] + + Args: + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the word. + + Returns: + transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] + which takes on value of inputs when the input is present and takes on + value=vocab_size to indicate a missing token. + """ + # To fill in if the input is missing. + input_missing = tf.constant( + FLAGS.vocab_size, + dtype=tf.int32, + shape=[FLAGS.batch_size, FLAGS.sequence_length]) + + # The 0th input will always be present to MaskGAN. + zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) + + # Input present mask. + inputs_present = tf.concat( + [zeroth_input_present, targets_present[:, :-1]], axis=1) + + transformed_input = tf.where(inputs_present, inputs, input_missing) + return transformed_input + + +def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): + """Define the Encoder graph. + + + Args: + hparams: Hyperparameters for the MaskGAN. + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the target. + is_training: Boolean indicating operational mode (train/inference). + reuse (Optional): Whether to reuse the variables. + + Returns: + Tuple of (hidden_states, final_state). + """ + config = get_config() + configs.print_config(config) + # We will use the same variable from the decoder. + if FLAGS.seq2seq_share_embedding: + with tf.variable_scope('decoder/rnn'): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('encoder', reuse=reuse): + # Neural architecture search cell. + cell = custom_cell.Alien(config.hidden_size) + + if is_training: + [h2h_masks, h2i_masks, _, + output_mask] = variational_dropout.generate_variational_dropout_masks( + hparams, config.keep_prob) + else: + h2i_masks, output_mask = None, None + + cell = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) + + initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) + + # Add a missing token for inputs not present. + real_inputs = inputs + masked_inputs = transform_input_with_is_missing_token( + inputs, targets_present) + + with tf.variable_scope('rnn'): + hidden_states = [] + + # Split the embedding into two parts so that we can load the PTB + # weights into one part of the Variable. + if not FLAGS.seq2seq_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + missing_embedding = tf.get_variable('missing_embedding', + [1, hparams.gen_rnn_size]) + embedding = tf.concat([embedding, missing_embedding], axis=0) + + real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) + masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) + + if is_training and FLAGS.keep_prob < 1: + masked_rnn_inputs = tf.nn.dropout(masked_rnn_inputs, FLAGS.keep_prob) + + state = initial_state + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_inp = masked_rnn_inputs[:, t] + + if is_training: + state = list(state) + for layer_num, per_layer_state in enumerate(state): + per_layer_state = LSTMTuple( + per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) + state[layer_num] = per_layer_state + + rnn_out, state = cell(rnn_inp, state, h2i_masks) + + if is_training: + rnn_out = output_mask * rnn_out + + hidden_states.append(rnn_out) + final_masked_state = state + hidden_states = tf.stack(hidden_states, axis=1) + + # Produce the RNN state had the model operated only + # over real data. + real_state = initial_state + for t in xrange(FLAGS.sequence_length): + tf.get_variable_scope().reuse_variables() + + # RNN. + rnn_inp = real_rnn_inputs[:, t] + rnn_out, real_state = cell(rnn_inp, real_state) + final_state = real_state + + return (hidden_states, final_masked_state), initial_state, final_state + + +def gen_decoder(hparams, + inputs, + targets, + targets_present, + encoding_state, + is_training, + is_validating, + reuse=None): + """Define the Decoder graph. The Decoder will now impute tokens that + have been masked from the input seqeunce. + """ + config = get_config() + gen_decoder_rnn_size = hparams.gen_rnn_size + + if FLAGS.seq2seq_share_embedding: + with tf.variable_scope('decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, gen_decoder_rnn_size]) + + with tf.variable_scope('decoder', reuse=reuse): + # Neural architecture search cell. + cell = custom_cell.Alien(config.hidden_size) + + if is_training: + [h2h_masks, _, _, + output_mask] = variational_dropout.generate_variational_dropout_masks( + hparams, config.keep_prob) + else: + output_mask = None + + cell_gen = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) + + # Hidden encoder states. + hidden_vector_encodings = encoding_state[0] + + # Carry forward the final state tuple from the encoder. + # State tuples. + state_gen = encoding_state[1] + + if FLAGS.attention_option is not None: + (attention_keys, attention_values, _, + attention_construct_fn) = attention_utils.prepare_attention( + hidden_vector_encodings, + FLAGS.attention_option, + num_units=gen_decoder_rnn_size, + reuse=reuse) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + + if not FLAGS.seq2seq_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, gen_decoder_rnn_size]) + softmax_w = tf.matrix_transpose(embedding) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the Decoder. + if t == 0: + # Always provide the real input at t = 0. + rnn_inp = rnn_inputs[:, t] + + # If the input is present, read in the input at t. + # If the input is not present, read in the previously generated. + else: + real_rnn_inp = rnn_inputs[:, t] + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + + # While validating, the decoder should be operating in teacher + # forcing regime. Also, if we're just training with cross_entropy + # use teacher forcing. + if is_validating or (is_training and + FLAGS.gen_training_strategy == 'cross_entropy'): + rnn_inp = real_rnn_inp + else: + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + if is_training: + state_gen = list(state_gen) + for layer_num, per_layer_state in enumerate(state_gen): + per_layer_state = LSTMTuple( + per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) + state_gen[layer_num] = per_layer_state + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + + if is_training: + rnn_out = output_mask * rnn_out + + if FLAGS.attention_option is not None: + rnn_out = attention_construct_fn(rnn_out, attention_keys, + attention_values) + # # TODO(liamfedus): Assert not "monotonic" attention_type. + # # TODO(liamfedus): FLAGS.attention_type. + # context_state = revised_attention_utils._empty_state() + # rnn_out, context_state = attention_construct_fn( + # rnn_out, attention_keys, attention_values, context_state, t) + logit = tf.matmul(rnn_out, softmax_w) + softmax_b + + # Output for Decoder. + # If input is present: Return real at t+1. + # If input is not present: Return fake for t+1. + real = targets[:, t] + + categorical = tf.contrib.distributions.Categorical(logits=logit) + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + + output = tf.where(targets_present[:, t], real, fake) + + # Add to lists. + sequence.append(output) + log_probs.append(log_prob) + logits.append(logit) + + return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( + log_probs, axis=1)) + + +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph.""" + with tf.variable_scope('gen', reuse=reuse): + encoder_states, initial_state, final_state = gen_encoder( + hparams, inputs, targets_present, is_training=is_training, reuse=reuse) + stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( + hparams, + inputs, + targets, + targets_present, + encoder_states, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, + final_state) diff --git a/models/research/maskgan/models/seq2seq_vd.py b/models/research/maskgan/models/seq2seq_vd.py new file mode 100644 index 0000000000000000000000000000000000000000..850eda435c48c73d574a06b1b65a12f71a18f276 --- /dev/null +++ b/models/research/maskgan/models/seq2seq_vd.py @@ -0,0 +1,609 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple seq2seq model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import xrange +import tensorflow as tf + +from models import attention_utils +from regularization import variational_dropout + +FLAGS = tf.app.flags.FLAGS + + +def transform_input_with_is_missing_token(inputs, targets_present): + """Transforms the inputs to have missing tokens when it's masked out. The + mask is for the targets, so therefore, to determine if an input at time t is + masked, we have to check if the target at time t - 1 is masked out. + + e.g. + inputs = [a, b, c, d] + targets = [b, c, d, e] + targets_present = [1, 0, 1, 0] + + which computes, + inputs_present = [1, 1, 0, 1] + + and outputs, + transformed_input = [a, b, , d] + + Args: + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the word. + + Returns: + transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] + which takes on value of inputs when the input is present and takes on + value=vocab_size to indicate a missing token. + """ + # To fill in if the input is missing. + input_missing = tf.constant( + FLAGS.vocab_size, + dtype=tf.int32, + shape=[FLAGS.batch_size, FLAGS.sequence_length]) + + # The 0th input will always be present to MaskGAN. + zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) + + # Input present mask. + inputs_present = tf.concat( + [zeroth_input_present, targets_present[:, :-1]], axis=1) + + transformed_input = tf.where(inputs_present, inputs, input_missing) + return transformed_input + + +# TODO(adai): IMDB labels placeholder to encoder. +def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): + """Define the Encoder graph. + + Args: + hparams: Hyperparameters for the MaskGAN. + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the target. + is_training: Boolean indicating operational mode (train/inference). + reuse (Optional): Whether to reuse the variables. + + Returns: + Tuple of (hidden_states, final_state). + """ + # We will use the same variable from the decoder. + if FLAGS.seq2seq_share_embedding: + with tf.variable_scope('decoder/rnn'): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('encoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.gen_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and hparams.gen_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.gen_rnn_size, + hparams.gen_vd_keep_prob, hparams.gen_vd_keep_prob) + + cell = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) + + # Add a missing token for inputs not present. + real_inputs = inputs + masked_inputs = transform_input_with_is_missing_token( + inputs, targets_present) + + with tf.variable_scope('rnn') as scope: + hidden_states = [] + + # Split the embedding into two parts so that we can load the PTB + # weights into one part of the Variable. + if not FLAGS.seq2seq_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + missing_embedding = tf.get_variable('missing_embedding', + [1, hparams.gen_rnn_size]) + embedding = tf.concat([embedding, missing_embedding], axis=0) + + # TODO(adai): Perhaps append IMDB labels placeholder to input at + # each time point. + real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) + masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) + + state = initial_state + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform( + tf.stack([FLAGS.batch_size, 1, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.gen_vd_keep_prob, hparams.gen_rnn_size) + + hidden_states, state = tf.nn.dynamic_rnn( + cell, masked_rnn_inputs, initial_state=state, scope=scope) + if is_training: + hidden_states *= output_mask + + final_masked_state = state + + # Produce the RNN state had the model operated only + # over real data. + real_state = initial_state + _, real_state = tf.nn.dynamic_rnn( + cell, real_rnn_inputs, initial_state=real_state, scope=scope) + final_state = real_state + + return (hidden_states, final_masked_state), initial_state, final_state + + +# TODO(adai): IMDB labels placeholder to encoder. +def gen_encoder_cnn(hparams, inputs, targets_present, is_training, reuse=None): + """Define the CNN Encoder graph.""" + del reuse + sequence = transform_input_with_is_missing_token(inputs, targets_present) + + # TODO(liamfedus): Make this a hyperparameter. + dis_filter_sizes = [3, 4, 5, 6, 7, 8, 9, 10, 15, 20] + + # Keeping track of l2 regularization loss (optional) + # l2_loss = tf.constant(0.0) + + with tf.variable_scope('encoder', reuse=True): + with tf.variable_scope('rnn'): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + cnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + # Create a convolution layer for each filter size + conv_outputs = [] + for filter_size in dis_filter_sizes: + with tf.variable_scope('conv-%s' % filter_size): + # Convolution Layer + filter_shape = [ + filter_size, hparams.gen_rnn_size, hparams.dis_num_filters + ] + W = tf.get_variable( + name='W', initializer=tf.truncated_normal(filter_shape, stddev=0.1)) + b = tf.get_variable( + name='b', + initializer=tf.constant(0.1, shape=[hparams.dis_num_filters])) + conv = tf.nn.conv1d(cnn_inputs, W, stride=1, padding='SAME', name='conv') + + # Apply nonlinearity + h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu') + + conv_outputs.append(h) + + # Combine all the pooled features + dis_num_filters_total = hparams.dis_num_filters * len(dis_filter_sizes) + + h_conv = tf.concat(conv_outputs, axis=2) + h_conv_flat = tf.reshape(h_conv, [-1, dis_num_filters_total]) + + # Add dropout + if is_training: + with tf.variable_scope('dropout'): + h_conv_flat = tf.nn.dropout(h_conv_flat, hparams.gen_vd_keep_prob) + + # Final (unnormalized) scores and predictions + with tf.variable_scope('output'): + W = tf.get_variable( + 'W', + shape=[dis_num_filters_total, hparams.gen_rnn_size], + initializer=tf.contrib.layers.xavier_initializer()) + b = tf.get_variable( + name='b', initializer=tf.constant(0.1, shape=[hparams.gen_rnn_size])) + # l2_loss += tf.nn.l2_loss(W) + # l2_loss += tf.nn.l2_loss(b) + predictions = tf.nn.xw_plus_b(h_conv_flat, W, b, name='predictions') + predictions = tf.reshape( + predictions, + shape=[FLAGS.batch_size, FLAGS.sequence_length, hparams.gen_rnn_size]) + final_state = tf.reduce_mean(predictions, 1) + return predictions, (final_state, final_state) + + +# TODO(adai): IMDB labels placeholder to decoder. +def gen_decoder(hparams, + inputs, + targets, + targets_present, + encoding_state, + is_training, + is_validating, + reuse=None): + """Define the Decoder graph. The Decoder will now impute tokens that + have been masked from the input seqeunce. + """ + gen_decoder_rnn_size = hparams.gen_rnn_size + + targets = tf.Print(targets, [targets], message='targets', summarize=50) + if FLAGS.seq2seq_share_embedding: + with tf.variable_scope('decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + + with tf.variable_scope('decoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + gen_decoder_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and hparams.gen_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.gen_rnn_size, + hparams.gen_vd_keep_prob, hparams.gen_vd_keep_prob) + + cell_gen = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + # Hidden encoder states. + hidden_vector_encodings = encoding_state[0] + + # Carry forward the final state tuple from the encoder. + # State tuples. + state_gen = encoding_state[1] + + if FLAGS.attention_option is not None: + (attention_keys, attention_values, _, + attention_construct_fn) = attention_utils.prepare_attention( + hidden_vector_encodings, + FLAGS.attention_option, + num_units=gen_decoder_rnn_size, + reuse=reuse) + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.gen_vd_keep_prob, hparams.gen_rnn_size) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + + if not FLAGS.seq2seq_share_embedding: + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + softmax_w = tf.matrix_transpose(embedding) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + # TODO(adai): Perhaps append IMDB labels placeholder to input at + # each time point. + + rnn_outs = [] + + fake = None + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the Decoder. + if t == 0: + # Always provide the real input at t = 0. + rnn_inp = rnn_inputs[:, t] + + # If the input is present, read in the input at t. + # If the input is not present, read in the previously generated. + else: + real_rnn_inp = rnn_inputs[:, t] + + # While validating, the decoder should be operating in teacher + # forcing regime. Also, if we're just training with cross_entropy + # use teacher forcing. + if is_validating or FLAGS.gen_training_strategy == 'cross_entropy': + rnn_inp = real_rnn_inp + else: + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + + if FLAGS.attention_option is not None: + rnn_out = attention_construct_fn(rnn_out, attention_keys, + attention_values) + if is_training: + rnn_out *= output_mask + + rnn_outs.append(rnn_out) + if FLAGS.gen_training_strategy != 'cross_entropy': + logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b) + + # Output for Decoder. + # If input is present: Return real at t+1. + # If input is not present: Return fake for t+1. + real = targets[:, t] + + categorical = tf.contrib.distributions.Categorical(logits=logit) + if FLAGS.use_gen_mode: + fake = categorical.mode() + else: + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + output = tf.where(targets_present[:, t], real, fake) + + else: + real = targets[:, t] + logit = tf.zeros(tf.stack([FLAGS.batch_size, FLAGS.vocab_size])) + log_prob = tf.zeros(tf.stack([FLAGS.batch_size])) + output = real + + # Add to lists. + sequence.append(output) + log_probs.append(log_prob) + logits.append(logit) + + if FLAGS.gen_training_strategy == 'cross_entropy': + logits = tf.nn.bias_add( + tf.matmul( + tf.reshape(tf.stack(rnn_outs, 1), [-1, gen_decoder_rnn_size]), + softmax_w), softmax_b) + logits = tf.reshape(logits, + [-1, FLAGS.sequence_length, FLAGS.vocab_size]) + else: + logits = tf.stack(logits, axis=1) + + return (tf.stack(sequence, axis=1), logits, tf.stack(log_probs, axis=1)) + + +def dis_encoder(hparams, masked_inputs, is_training, reuse=None, + embedding=None): + """Define the Discriminator encoder. Reads in the masked inputs for context + and produces the hidden states of the encoder.""" + with tf.variable_scope('encoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and hparams.dis_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, + hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) + + cell_dis = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) + + with tf.variable_scope('rnn'): + hidden_states = [] + + missing_embedding = tf.get_variable('missing_embedding', + [1, hparams.dis_rnn_size]) + embedding = tf.concat([embedding, missing_embedding], axis=0) + masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_in = masked_rnn_inputs[:, t] + rnn_out, state_dis = cell_dis(rnn_in, state_dis) + if is_training: + rnn_out *= output_mask + hidden_states.append(rnn_out) + final_state = state_dis + + return (tf.stack(hidden_states, axis=1), final_state) + + +def dis_decoder(hparams, + sequence, + encoding_state, + is_training, + reuse=None, + embedding=None): + """Define the Discriminator decoder. Read in the sequence and predict + at each time point.""" + sequence = tf.cast(sequence, tf.int32) + + with tf.variable_scope('decoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell( + hparams.dis_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and hparams.dis_vd_keep_prob < 1: + + def attn_cell(): + return variational_dropout.VariationalDropoutWrapper( + lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, + hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) + + cell_dis = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.dis_num_layers)], + state_is_tuple=True) + + # Hidden encoder states. + hidden_vector_encodings = encoding_state[0] + + # Carry forward the final state tuple from the encoder. + # State tuples. + state = encoding_state[1] + + if FLAGS.attention_option is not None: + (attention_keys, attention_values, _, + attention_construct_fn) = attention_utils.prepare_attention( + hidden_vector_encodings, + FLAGS.attention_option, + num_units=hparams.dis_rnn_size, + reuse=reuse) + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + if is_training: + output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) + + with tf.variable_scope('rnn') as vs: + predictions = [] + + rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) + + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_in = rnn_inputs[:, t] + rnn_out, state = cell_dis(rnn_in, state) + + if FLAGS.attention_option is not None: + rnn_out = attention_construct_fn(rnn_out, attention_keys, + attention_values) + if is_training: + rnn_out *= output_mask + + # Prediction is linear output for Discriminator. + pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) + predictions.append(pred) + + predictions = tf.stack(predictions, axis=1) + return tf.squeeze(predictions, axis=2) + + +def discriminator(hparams, + inputs, + targets_present, + sequence, + is_training, + reuse=None): + """Define the Discriminator graph.""" + if FLAGS.dis_share_embedding: + assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( + 'If you wish to share Discriminator/Generator embeddings, they must be' + ' same dimension.') + with tf.variable_scope('gen/decoder/rnn', reuse=True): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + else: + # Explicitly share the embedding. + with tf.variable_scope('dis/decoder/rnn', reuse=reuse): + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.dis_rnn_size]) + + # Mask the input sequence. + masked_inputs = transform_input_with_is_missing_token(inputs, targets_present) + + # Confirm masking. + masked_inputs = tf.Print( + masked_inputs, [inputs, targets_present, masked_inputs, sequence], + message='inputs, targets_present, masked_inputs, sequence', + summarize=10) + + with tf.variable_scope('dis', reuse=reuse): + encoder_states = dis_encoder( + hparams, + masked_inputs, + is_training=is_training, + reuse=reuse, + embedding=embedding) + predictions = dis_decoder( + hparams, + sequence, + encoder_states, + is_training=is_training, + reuse=reuse, + embedding=embedding) + + # if FLAGS.baseline_method == 'critic': + # with tf.variable_scope('critic', reuse=reuse) as critic_scope: + # values = tf.contrib.layers.linear(rnn_outs, 1, scope=critic_scope) + # values = tf.squeeze(values, axis=2) + # else: + # values = None + + return predictions + + +# TODO(adai): IMDB labels placeholder to encoder/decoder. +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph.""" + with tf.variable_scope('gen', reuse=reuse): + encoder_states, initial_state, final_state = gen_encoder( + hparams, inputs, targets_present, is_training=is_training, reuse=reuse) + stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( + hparams, + inputs, + targets, + targets_present, + encoder_states, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, + final_state, encoder_states) diff --git a/models/research/maskgan/models/seq2seq_zaremba.py b/models/research/maskgan/models/seq2seq_zaremba.py new file mode 100644 index 0000000000000000000000000000000000000000..25f6ce44f0cb2fe650e23b332ace014ab7cdf469 --- /dev/null +++ b/models/research/maskgan/models/seq2seq_zaremba.py @@ -0,0 +1,305 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Simple seq2seq model definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from six.moves import xrange +from models import attention_utils + +FLAGS = tf.app.flags.FLAGS + + +def transform_input_with_is_missing_token(inputs, targets_present): + """Transforms the inputs to have missing tokens when it's masked out. The + mask is for the targets, so therefore, to determine if an input at time t is + masked, we have to check if the target at time t - 1 is masked out. + + e.g. + inputs = [a, b, c, d] + targets = [b, c, d, e] + targets_present = [1, 0, 1, 0] + + then, + transformed_input = [a, b, , d] + + Args: + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the word. + + Returns: + transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] + which takes on value of inputs when the input is present and takes on + value=vocab_size to indicate a missing token. + """ + # To fill in if the input is missing. + input_missing = tf.constant(FLAGS.vocab_size, + dtype=tf.int32, + shape=[FLAGS.batch_size, FLAGS.sequence_length]) + + # The 0th input will always be present to MaskGAN. + zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) + + # Input present mask. + inputs_present = tf.concat( + [zeroth_input_present, targets_present[:, :-1]], axis=1) + + transformed_input = tf.where(inputs_present, inputs, input_missing) + return transformed_input + + +def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): + """Define the Encoder graph. + + + Args: + hparams: Hyperparameters for the MaskGAN. + inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens + up to, but not including, vocab_size. + targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with + True representing the presence of the target. + is_training: Boolean indicating operational mode (train/inference). + reuse (Optional): Whether to reuse the variables. + + Returns: + Tuple of (hidden_states, final_state). + """ + with tf.variable_scope('encoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell(hparams.gen_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and FLAGS.keep_prob < 1: + + def attn_cell(): + return tf.contrib.rnn.DropoutWrapper( + lstm_cell(), output_keep_prob=FLAGS.keep_prob) + + cell = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) + + # Add a missing token for inputs not present. + real_inputs = inputs + masked_inputs = transform_input_with_is_missing_token(inputs, + targets_present) + + with tf.variable_scope('rnn'): + hidden_states = [] + + # Split the embedding into two parts so that we can load the PTB + # weights into one part of the Variable. + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + missing_embedding = tf.get_variable('missing_embedding', + [1, hparams.gen_rnn_size]) + embedding = tf.concat([embedding, missing_embedding], axis=0) + + real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) + masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) + + if is_training and FLAGS.keep_prob < 1: + masked_rnn_inputs = tf.nn.dropout(masked_rnn_inputs, FLAGS.keep_prob) + + state = initial_state + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + rnn_inp = masked_rnn_inputs[:, t] + rnn_out, state = cell(rnn_inp, state) + hidden_states.append(rnn_out) + final_masked_state = state + hidden_states = tf.stack(hidden_states, axis=1) + + # Produce the RNN state had the model operated only + # over real data. + real_state = initial_state + for t in xrange(FLAGS.sequence_length): + tf.get_variable_scope().reuse_variables() + + # RNN. + rnn_inp = real_rnn_inputs[:, t] + rnn_out, real_state = cell(rnn_inp, real_state) + final_state = real_state + + return (hidden_states, final_masked_state), initial_state, final_state + + +def gen_decoder(hparams, + inputs, + targets, + targets_present, + encoding_state, + is_training, + is_validating, + reuse=None): + """Define the Decoder graph. The Decoder will now impute tokens that + have been masked from the input seqeunce. + """ + gen_decoder_rnn_size = hparams.gen_rnn_size + + with tf.variable_scope('decoder', reuse=reuse): + + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell(gen_decoder_rnn_size, + forget_bias=0.0, + state_is_tuple=True, + reuse=reuse) + + attn_cell = lstm_cell + if is_training and FLAGS.keep_prob < 1: + + def attn_cell(): + return tf.contrib.rnn.DropoutWrapper( + lstm_cell(), output_keep_prob=FLAGS.keep_prob) + + cell_gen = tf.contrib.rnn.MultiRNNCell( + [attn_cell() for _ in range(hparams.gen_num_layers)], + state_is_tuple=True) + + # Hidden encoder states. + hidden_vector_encodings = encoding_state[0] + + # Carry forward the final state tuple from the encoder. + # State tuples. + state_gen = encoding_state[1] + + if FLAGS.attention_option is not None: + (attention_keys, attention_values, _, + attention_construct_fn) = attention_utils.prepare_attention( + hidden_vector_encodings, + FLAGS.attention_option, + num_units=gen_decoder_rnn_size, + reuse=reuse) + + with tf.variable_scope('rnn'): + sequence, logits, log_probs = [], [], [] + + embedding = tf.get_variable('embedding', + [FLAGS.vocab_size, hparams.gen_rnn_size]) + softmax_w = tf.matrix_transpose(embedding) + softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) + + rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) + + if is_training and FLAGS.keep_prob < 1: + rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) + + rnn_outs = [] + + fake = None + for t in xrange(FLAGS.sequence_length): + if t > 0: + tf.get_variable_scope().reuse_variables() + + # Input to the Decoder. + if t == 0: + # Always provide the real input at t = 0. + rnn_inp = rnn_inputs[:, t] + + # If the input is present, read in the input at t. + # If the input is not present, read in the previously generated. + else: + real_rnn_inp = rnn_inputs[:, t] + + # While validating, the decoder should be operating in teacher + # forcing regime. Also, if we're just training with cross_entropy + # use teacher forcing. + if is_validating or FLAGS.gen_training_strategy == 'cross_entropy': + rnn_inp = real_rnn_inp + else: + fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) + rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, + fake_rnn_inp) + + # RNN. + rnn_out, state_gen = cell_gen(rnn_inp, state_gen) + + if FLAGS.attention_option is not None: + rnn_out = attention_construct_fn(rnn_out, attention_keys, + attention_values) + rnn_outs.append(rnn_out) + if FLAGS.gen_training_strategy != 'cross_entropy': + logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b) + + # Output for Decoder. + # If input is present: Return real at t+1. + # If input is not present: Return fake for t+1. + real = targets[:, t] + + categorical = tf.contrib.distributions.Categorical(logits=logit) + fake = categorical.sample() + log_prob = categorical.log_prob(fake) + + output = tf.where(targets_present[:, t], real, fake) + + else: + batch_size = tf.shape(rnn_out)[0] + logit = tf.zeros(tf.stack([batch_size, FLAGS.vocab_size])) + log_prob = tf.zeros(tf.stack([batch_size])) + output = targets[:, t] + + # Add to lists. + sequence.append(output) + log_probs.append(log_prob) + logits.append(logit) + if FLAGS.gen_training_strategy == 'cross_entropy': + logits = tf.nn.bias_add( + tf.matmul( + tf.reshape(tf.stack(rnn_outs, 1), [-1, gen_decoder_rnn_size]), + softmax_w), softmax_b) + logits = tf.reshape(logits, + [-1, FLAGS.sequence_length, FLAGS.vocab_size]) + else: + logits = tf.stack(logits, axis=1) + + return (tf.stack(sequence, axis=1), logits, tf.stack(log_probs, axis=1)) + + +def generator(hparams, + inputs, + targets, + targets_present, + is_training, + is_validating, + reuse=None): + """Define the Generator graph.""" + with tf.variable_scope('gen', reuse=reuse): + encoder_states, initial_state, final_state = gen_encoder( + hparams, inputs, targets_present, is_training=is_training, reuse=reuse) + stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( + hparams, + inputs, + targets, + targets_present, + encoder_states, + is_training=is_training, + is_validating=is_validating, + reuse=reuse) + return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, + final_state) diff --git a/models/research/maskgan/nas_utils/__init__.py b/models/research/maskgan/nas_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/maskgan/nas_utils/configs.py b/models/research/maskgan/nas_utils/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..80d867c36d1de07663d59d6c161aaf9cbe241d95 --- /dev/null +++ b/models/research/maskgan/nas_utils/configs.py @@ -0,0 +1,46 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def print_config(config): + print("-" * 10, "Configuration Specs", "-" * 10) + for item in dir(config): + if list(item)[0] != "_": + print(item, getattr(config, item)) + print("-" * 29) + + +class AlienConfig2(object): + """Base 8 740 shared embeddings, gets 64.0 (mean: std: min: max: ).""" + init_scale = 0.05 + learning_rate = 1.0 + max_grad_norm = 10 + num_layers = 2 + num_steps = 25 + hidden_size = 740 + max_epoch = 70 + max_max_epoch = 250 + keep_prob = [1 - 0.15, 1 - 0.45] + lr_decay = 0.95 + batch_size = 20 + vocab_size = 10000 + weight_decay = 1e-4 + share_embeddings = True + cell = "alien" + dropout_type = "variational" diff --git a/models/research/maskgan/nas_utils/custom_cell.py b/models/research/maskgan/nas_utils/custom_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..6add7ffa4e0d69da56d2bba7d9da3875b5c4dd3b --- /dev/null +++ b/models/research/maskgan/nas_utils/custom_cell.py @@ -0,0 +1,166 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +import tensorflow as tf + +flags = tf.flags +FLAGS = tf.app.flags.FLAGS +LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h']) + + +def cell_depth(num): + num /= 2 + val = np.log2(1 + num) + assert abs(val - int(val)) == 0 + return int(val) + + +class GenericMultiRNNCell(tf.contrib.rnn.RNNCell): + """More generic version of MultiRNNCell that allows you to pass in a dropout mask""" + + def __init__(self, cells): + """Create a RNN cell composed sequentially of a number of RNNCells. + + Args: + cells: list of RNNCells that will be composed in this order. + state_is_tuple: If True, accepted and returned states are n-tuples, where + `n = len(cells)`. If False, the states are all + concatenated along the column axis. This latter behavior will soon be + deprecated. + + Raises: + ValueError: if cells is empty (not allowed), or at least one of the cells + returns a state tuple but the flag `state_is_tuple` is `False`. + """ + self._cells = cells + + @property + def state_size(self): + return tuple(cell.state_size for cell in self._cells) + + @property + def output_size(self): + return self._cells[-1].output_size + + def __call__(self, inputs, state, input_masks=None, scope=None): + """Run this multi-layer cell on inputs, starting from state.""" + with tf.variable_scope(scope or type(self).__name__): + cur_inp = inputs + new_states = [] + for i, cell in enumerate(self._cells): + with tf.variable_scope('Cell%d' % i): + cur_state = state[i] + if input_masks is not None: + cur_inp *= input_masks[i] + cur_inp, new_state = cell(cur_inp, cur_state) + new_states.append(new_state) + new_states = tuple(new_states) + return cur_inp, new_states + + +class AlienRNNBuilder(tf.contrib.rnn.RNNCell): + + def __init__(self, num_units, params, additional_params, base_size): + self.num_units = num_units + self.cell_create_index = additional_params[0] + self.cell_inject_index = additional_params[1] + self.base_size = base_size + self.cell_params = params[ + -2:] # Cell injection parameters are always the last two + params = params[:-2] + self.depth = cell_depth(len(params)) + self.params = params + self.units_per_layer = [2**i for i in range(self.depth) + ][::-1] # start with the biggest layer + + def __call__(self, inputs, state, scope=None): + with tf.variable_scope(scope or type(self).__name__): + definition1 = ['add', 'elem_mult', 'max'] + definition2 = [tf.identity, tf.tanh, tf.sigmoid, tf.nn.relu, tf.sin] + layer_outputs = [[] for _ in range(self.depth)] + with tf.variable_scope('rnn_builder'): + curr_index = 0 + c, h = state + + # Run all dense matrix multiplications at once + big_h_mat = tf.get_variable( + 'big_h_mat', [self.num_units, + self.base_size * self.num_units], tf.float32) + big_inputs_mat = tf.get_variable( + 'big_inputs_mat', [self.num_units, + self.base_size * self.num_units], tf.float32) + big_h_output = tf.matmul(h, big_h_mat) + big_inputs_output = tf.matmul(inputs, big_inputs_mat) + h_splits = tf.split(big_h_output, self.base_size, axis=1) + inputs_splits = tf.split(big_inputs_output, self.base_size, axis=1) + + for layer_num, units in enumerate(self.units_per_layer): + for unit_num in range(units): + with tf.variable_scope( + 'layer_{}_unit_{}'.format(layer_num, unit_num)): + if layer_num == 0: + prev1_mat = h_splits[unit_num] + prev2_mat = inputs_splits[unit_num] + else: + prev1_mat = layer_outputs[layer_num - 1][2 * unit_num] + prev2_mat = layer_outputs[layer_num - 1][2 * unit_num + 1] + if definition1[self.params[curr_index]] == 'add': + output = prev1_mat + prev2_mat + elif definition1[self.params[curr_index]] == 'elem_mult': + output = prev1_mat * prev2_mat + elif definition1[self.params[curr_index]] == 'max': + output = tf.maximum(prev1_mat, prev2_mat) + if curr_index / 2 == self.cell_create_index: # Take the new cell before the activation + new_c = tf.identity(output) + output = definition2[self.params[curr_index + 1]](output) + if curr_index / 2 == self.cell_inject_index: + if definition1[self.cell_params[0]] == 'add': + output += c + elif definition1[self.cell_params[0]] == 'elem_mult': + output *= c + elif definition1[self.cell_params[0]] == 'max': + output = tf.maximum(output, c) + output = definition2[self.cell_params[1]](output) + layer_outputs[layer_num].append(output) + curr_index += 2 + new_h = layer_outputs[-1][-1] + return new_h, LSTMTuple(new_c, new_h) + + @property + def state_size(self): + return LSTMTuple(self.num_units, self.num_units) + + @property + def output_size(self): + return self.num_units + + +class Alien(AlienRNNBuilder): + """Base 8 Cell.""" + + def __init__(self, num_units): + params = [ + 0, 2, 0, 3, 0, 2, 1, 3, 0, 1, 0, 2, 0, 1, 0, 2, 1, 1, 0, 1, 1, 1, 0, 2, + 1, 0, 0, 1, 1, 1, 0, 1 + ] + additional_params = [12, 8] + base_size = 8 + super(Alien, self).__init__(num_units, params, additional_params, base_size) diff --git a/models/research/maskgan/nas_utils/variational_dropout.py b/models/research/maskgan/nas_utils/variational_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..49cc29f0cd77f7bef9e3c47e7d7dae73fa877ecd --- /dev/null +++ b/models/research/maskgan/nas_utils/variational_dropout.py @@ -0,0 +1,61 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Variational Dropout.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +FLAGS = tf.app.flags.FLAGS + + +def generate_dropout_masks(keep_prob, shape, amount): + masks = [] + for _ in range(amount): + dropout_mask = tf.random_uniform(shape) + (keep_prob) + dropout_mask = tf.floor(dropout_mask) / (keep_prob) + masks.append(dropout_mask) + return masks + + +def generate_variational_dropout_masks(hparams, keep_prob): + [batch_size, num_steps, size, num_layers] = [ + FLAGS.batch_size, FLAGS.sequence_length, hparams.gen_rnn_size, + hparams.gen_num_layers + ] + if len(keep_prob) == 2: + emb_keep_prob = keep_prob[0] # keep prob for embedding matrix + h2h_keep_prob = emb_keep_prob # keep prob for hidden to hidden connections + h2i_keep_prob = keep_prob[1] # keep prob for hidden to input connections + out_keep_prob = h2i_keep_prob # keep probability for output state + else: + emb_keep_prob = keep_prob[0] # keep prob for embedding matrix + h2h_keep_prob = keep_prob[1] # keep prob for hidden to hidden connections + h2i_keep_prob = keep_prob[2] # keep prob for hidden to input connections + out_keep_prob = keep_prob[3] # keep probability for output state + h2i_masks = [] # Masks for input to recurrent connections + h2h_masks = [] # Masks for recurrent to recurrent connections + + # Input word dropout mask + emb_masks = generate_dropout_masks(emb_keep_prob, [num_steps, 1], batch_size) + output_mask = generate_dropout_masks(out_keep_prob, [batch_size, size], 1)[0] + h2i_masks = generate_dropout_masks(h2i_keep_prob, [batch_size, size], + num_layers) + h2h_masks = generate_dropout_masks(h2h_keep_prob, [batch_size, size], + num_layers) + return h2h_masks, h2i_masks, emb_masks, output_mask diff --git a/models/research/maskgan/pretrain_mask_gan.py b/models/research/maskgan/pretrain_mask_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..1a9d8ee947deaa3e31cc4c332969ed529e60305e --- /dev/null +++ b/models/research/maskgan/pretrain_mask_gan.py @@ -0,0 +1,231 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Pretraining functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import numpy as np + +import tensorflow as tf + +from data import imdb_loader +from data import ptb_loader + +# Data. +from model_utils import model_utils +from models import evaluation_utils + +tf.app.flags.DEFINE_integer( + 'gen_pretrain_steps', None, + 'The number of steps to pretrain the generator with cross entropy loss.') +tf.app.flags.DEFINE_integer( + 'dis_pretrain_steps', None, + 'The number of steps to pretrain the discriminator.') + +FLAGS = tf.app.flags.FLAGS + + +def pretrain_generator(sv, sess, model, data, log, id_to_word, + data_ngram_counts, is_chief): + """Pretrain the generator with classic language modeling training.""" + print('\nPretraining generator for %d steps.' % FLAGS.gen_pretrain_steps) + log.write( + '\nPretraining generator for %d steps.\n' % FLAGS.gen_pretrain_steps) + + is_pretraining = True + + while is_pretraining: + + costs = 0. + iters = 0 + if FLAGS.data_set == 'ptb': + iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length, + FLAGS.epoch_size_override) + elif FLAGS.data_set == 'imdb': + iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length) + + for x, y, _ in iterator: + + # For pretraining with cross entropy loss, we have all tokens in the + # forward sequence present (all True). + model_utils.assign_percent_real(sess, model.percent_real_update, + model.new_rate, 1.0) + p = np.ones(shape=[FLAGS.batch_size, FLAGS.sequence_length], dtype=bool) + + pretrain_feed = {model.inputs: x, model.targets: y, model.present: p} + + [losses, cost_eval, _, step] = sess.run( + [ + model.fake_cross_entropy_losses, model.avg_log_perplexity, + model.gen_pretrain_op, model.global_step + ], + feed_dict=pretrain_feed) + + costs += cost_eval + iters += FLAGS.sequence_length + + # Calulate rolling perplexity. + perplexity = np.exp(costs / iters) + + # Summaries. + if is_chief and step % FLAGS.summaries_every == 0: + # Graph summaries. + summary_str = sess.run( + model.merge_summaries_op, feed_dict=pretrain_feed) + sv.SummaryComputed(sess, summary_str) + + # Additional summary. + for n, data_ngram_count in data_ngram_counts.iteritems(): + avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, + int(n)) + summary_percent_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/%s-grams_percent_correct' % n, + simple_value=avg_percent_captured) + ]) + sv.SummaryComputed(sess, summary_percent_str, global_step=step) + + summary_perplexity_str = tf.Summary(value=[ + tf.Summary.Value(tag='general/perplexity', simple_value=perplexity) + ]) + sv.SummaryComputed(sess, summary_perplexity_str, global_step=step) + + # Printing and logging + if is_chief and step % FLAGS.print_every == 0: + print('global_step: %d' % step) + print(' generator loss: %.3f' % np.mean(losses)) + print(' perplexity: %.3f' % perplexity) + log.write('global_step: %d\n' % step) + log.write(' generator loss: %.3f\n' % np.mean(losses)) + log.write(' perplexity: %.3f\n' % perplexity) + + for n, data_ngram_count in data_ngram_counts.iteritems(): + avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, + int(n)) + print(' percent of %s-grams captured: %.3f.\n' % + (n, avg_percent_captured)) + log.write(' percent of %s-grams captured: %.3f.\n\n' % + (n, avg_percent_captured)) + + evaluation_utils.generate_logs(sess, model, log, id_to_word, + pretrain_feed) + + if step >= FLAGS.gen_pretrain_steps: + is_pretraining = False + break + return + + +def pretrain_discriminator(sv, sess, model, data, log, id_to_word, + data_ngram_counts, is_chief): + print('\nPretraining discriminator for %d steps.' % FLAGS.dis_pretrain_steps) + log.write( + '\nPretraining discriminator for %d steps.\n' % FLAGS.dis_pretrain_steps) + + is_pretraining = True + + while is_pretraining: + + cumulative_costs = 0. + iters = 0 + if FLAGS.data_set == 'ptb': + iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length, + FLAGS.epoch_size_override) + elif FLAGS.data_set == 'imdb': + iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length) + + for x, y, _ in iterator: + is_present_rate = FLAGS.is_present_rate + # is_present_rate = np.random.uniform(low=0.0, high=1.0) + model_utils.assign_percent_real(sess, model.percent_real_update, + model.new_rate, is_present_rate) + # Randomly mask out tokens. + p = model_utils.generate_mask() + + pretrain_feed = {model.inputs: x, model.targets: y, model.present: p} + + [_, dis_loss_eval, gen_log_perplexity_eval, step] = sess.run( + [ + model.dis_pretrain_op, model.dis_loss, model.avg_log_perplexity, + model.global_step + ], + feed_dict=pretrain_feed) + + cumulative_costs += gen_log_perplexity_eval + iters += 1 + + # Calulate rolling perplexity. + perplexity = np.exp(cumulative_costs / iters) + + # Summaries. + if is_chief and step % FLAGS.summaries_every == 0: + # Graph summaries. + summary_str = sess.run( + model.merge_summaries_op, feed_dict=pretrain_feed) + sv.SummaryComputed(sess, summary_str) + + # Additional summary. + for n, data_ngram_count in data_ngram_counts.iteritems(): + avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, + int(n)) + summary_percent_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/%s-grams_percent_correct' % n, + simple_value=avg_percent_captured) + ]) + sv.SummaryComputed(sess, summary_percent_str, global_step=step) + + summary_perplexity_str = tf.Summary(value=[ + tf.Summary.Value(tag='general/perplexity', simple_value=perplexity) + ]) + sv.SummaryComputed(sess, summary_perplexity_str, global_step=step) + + # Printing and logging + if is_chief and step % FLAGS.print_every == 0: + print('global_step: %d' % step) + print(' discriminator loss: %.3f' % dis_loss_eval) + print(' perplexity: %.3f' % perplexity) + log.write('global_step: %d\n' % step) + log.write(' discriminator loss: %.3f\n' % dis_loss_eval) + log.write(' perplexity: %.3f\n' % perplexity) + + for n, data_ngram_count in data_ngram_counts.iteritems(): + avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, + int(n)) + print(' percent of %s-grams captured: %.3f.\n' % + (n, avg_percent_captured)) + log.write(' percent of %s-grams captured: %.3f.\n\n' % + (n, avg_percent_captured)) + + evaluation_utils.generate_logs(sess, model, log, id_to_word, + pretrain_feed) + + if step >= FLAGS.dis_pretrain_steps + int(FLAGS.gen_pretrain_steps or 0): + is_pretraining = False + break + return diff --git a/models/research/maskgan/regularization/__init__.py b/models/research/maskgan/regularization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/maskgan/regularization/variational_dropout.py b/models/research/maskgan/regularization/variational_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..d67fe52eee45c31012fe50e5de662d27565befae --- /dev/null +++ b/models/research/maskgan/regularization/variational_dropout.py @@ -0,0 +1,56 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Variational Dropout Wrapper.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +class VariationalDropoutWrapper(tf.contrib.rnn.RNNCell): + """Add variational dropout to a RNN cell.""" + + def __init__(self, cell, batch_size, input_size, recurrent_keep_prob, + input_keep_prob): + self._cell = cell + self._recurrent_keep_prob = recurrent_keep_prob + self._input_keep_prob = input_keep_prob + + def make_mask(keep_prob, units): + random_tensor = keep_prob + # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) + random_tensor += tf.random_uniform(tf.stack([batch_size, units])) + return tf.floor(random_tensor) / keep_prob + + self._recurrent_mask = make_mask(recurrent_keep_prob, + self._cell.state_size[0]) + self._input_mask = self._recurrent_mask + + @property + def state_size(self): + return self._cell.state_size + + @property + def output_size(self): + return self._cell.output_size + + def __call__(self, inputs, state, scope=None): + dropped_inputs = inputs * self._input_mask + dropped_state = (state[0], state[1] * self._recurrent_mask) + new_h, new_state = self._cell(dropped_inputs, dropped_state, scope) + return new_h, new_state diff --git a/models/research/maskgan/regularization/zoneout.py b/models/research/maskgan/regularization/zoneout.py new file mode 100644 index 0000000000000000000000000000000000000000..5f9ef3e3014ae6f2e7eea1a2937c5f1e2c356411 --- /dev/null +++ b/models/research/maskgan/regularization/zoneout.py @@ -0,0 +1,64 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Zoneout Wrapper""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +class ZoneoutWrapper(tf.contrib.rnn.RNNCell): + """Add Zoneout to a RNN cell.""" + + def __init__(self, cell, zoneout_drop_prob, is_training=True): + self._cell = cell + self._zoneout_prob = zoneout_drop_prob + self._is_training = is_training + + @property + def state_size(self): + return self._cell.state_size + + @property + def output_size(self): + return self._cell.output_size + + def __call__(self, inputs, state, scope=None): + output, new_state = self._cell(inputs, state, scope) + if not isinstance(self._cell.state_size, tuple): + new_state = tf.split(value=new_state, num_or_size_splits=2, axis=1) + state = tf.split(value=state, num_or_size_splits=2, axis=1) + final_new_state = [new_state[0], new_state[1]] + if self._is_training: + for i, state_element in enumerate(state): + random_tensor = 1 - self._zoneout_prob # keep probability + random_tensor += tf.random_uniform(tf.shape(state_element)) + # 0. if [zoneout_prob, 1.0) and 1. if [1.0, 1.0 + zoneout_prob) + binary_tensor = tf.floor(random_tensor) + final_new_state[ + i] = (new_state[i] - state_element) * binary_tensor + state_element + else: + for i, state_element in enumerate(state): + final_new_state[ + i] = state_element * self._zoneout_prob + new_state[i] * ( + 1 - self._zoneout_prob) + if isinstance(self._cell.state_size, tuple): + return output, tf.contrib.rnn.LSTMStateTuple( + final_new_state[0], final_new_state[1]) + + return output, tf.concat([final_new_state[0], final_new_state[1]], 1) diff --git a/models/research/maskgan/sample_shuffler.py b/models/research/maskgan/sample_shuffler.py new file mode 100644 index 0000000000000000000000000000000000000000..58c31fb573a864b33f3d6e2f17b42e42f1d0ea4d --- /dev/null +++ b/models/research/maskgan/sample_shuffler.py @@ -0,0 +1,95 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Shuffle samples for human evaluation. + +Local launch command: + python sample_shuffler.py + --input_ml_path=/tmp/ptb/seq2seq_vd_shareemb_forreal_55_3 + --input_gan_path=/tmp/ptb/MaskGAN_PTB_ari_avg_56.29_v2.0.0 + --output_file_name=/tmp/ptb/shuffled_output.txt + + python sample_shuffler.py + --input_ml_path=/tmp/generate_samples/MaskGAN_IMDB_Benchmark_87.1_v0.3.0 + --input_gan_path=/tmp/generate_samples/MaskGAN_IMDB_v1.0.1 + --output_file_name=/tmp/imdb/shuffled_output.txt +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +# Dependency imports +import numpy as np + +import tensorflow as tf + +tf.app.flags.DEFINE_string('input_ml_path', '/tmp', 'Model output directory.') +tf.app.flags.DEFINE_string('input_gan_path', '/tmp', 'Model output directory.') +tf.app.flags.DEFINE_string('output_file_name', '/tmp/ptb/shuffled_output.txt', + 'Model output file.') +tf.app.flags.DEFINE_boolean( + 'output_masked_logs', False, + 'Whether to display for human evaluation (show masking).') +tf.app.flags.DEFINE_integer('number_epochs', 1, + 'The number of epochs to produce.') + +FLAGS = tf.app.flags.FLAGS + + +def shuffle_samples(input_file_1, input_file_2): + """Shuffle the examples.""" + shuffled = [] + + # Set a random seed to keep fixed mask. + np.random.seed(0) + + for line_1, line_2 in zip(input_file_1, input_file_2): + rand = np.random.randint(1, 3) + if rand == 1: + shuffled.append((rand, line_1, line_2)) + else: + shuffled.append((rand, line_2, line_1)) + input_file_1.close() + input_file_2.close() + return shuffled + + +def generate_output(shuffled_tuples, output_file_name): + output_file = tf.gfile.GFile(output_file_name, mode='w') + + for tup in shuffled_tuples: + formatted_tuple = ('\n{:<1}, {:<1}, {:<1}').format(tup[0], tup[1].rstrip(), + tup[2].rstrip()) + output_file.write(formatted_tuple) + output_file.close() + + +def main(_): + ml_samples_file = tf.gfile.GFile( + os.path.join(FLAGS.input_ml_path, 'reviews.txt'), mode='r') + gan_samples_file = tf.gfile.GFile( + os.path.join(FLAGS.input_gan_path, 'reviews.txt'), mode='r') + + # Generate shuffled tuples. + shuffled_tuples = shuffle_samples(ml_samples_file, gan_samples_file) + + # Output to file. + generate_output(shuffled_tuples, FLAGS.output_file_name) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/maskgan/train_mask_gan.py b/models/research/maskgan/train_mask_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..1e70c2284a8704b1c92dcdec850ac29fc9625667 --- /dev/null +++ b/models/research/maskgan/train_mask_gan.py @@ -0,0 +1,1167 @@ +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Launch example: + +[IMDB] +python train_mask_gan.py --data_dir +/tmp/imdb --data_set imdb --batch_size 128 +--sequence_length 20 --base_directory /tmp/maskGAN_v0.01 +--hparams="gen_rnn_size=650,gen_num_layers=2,dis_rnn_size=650,dis_num_layers=2 +,critic_learning_rate=0.0009756,dis_learning_rate=0.0000585, +dis_train_iterations=8,gen_learning_rate=0.0016624, +gen_full_learning_rate_steps=1e9,gen_learning_rate_decay=0.999999, +rl_discount_rate=0.8835659" --mode TRAIN --max_steps 1000000 +--generator_model seq2seq_vd --discriminator_model seq2seq_vd +--is_present_rate 0.5 --summaries_every 25 --print_every 25 + --max_num_to_print=3 --generator_optimizer=adam + --seq2seq_share_embedding=True --baseline_method=critic + --attention_option=luong --n_gram_eval=4 --mask_strategy=contiguous + --gen_training_strategy=reinforce --dis_pretrain_steps=100 + --perplexity_threshold=1000000 + --dis_share_embedding=True --maskgan_ckpt + /tmp/model.ckpt-171091 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from functools import partial +import os +import time +# Dependency imports + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import pretrain_mask_gan +from data import imdb_loader +from data import ptb_loader +from model_utils import helper +from model_utils import model_construction +from model_utils import model_losses +from model_utils import model_optimization + +# Data. +from model_utils import model_utils + +from model_utils import n_gram +from models import evaluation_utils + +from models import rollout + +np.set_printoptions(precision=3) +np.set_printoptions(suppress=True) + +MODE_TRAIN = 'TRAIN' +MODE_TRAIN_EVAL = 'TRAIN_EVAL' +MODE_VALIDATION = 'VALIDATION' +MODE_TEST = 'TEST' + +## Binary and setup FLAGS. +tf.app.flags.DEFINE_enum( + 'mode', 'TRAIN', [MODE_TRAIN, MODE_VALIDATION, MODE_TEST, MODE_TRAIN_EVAL], + 'What this binary will do.') +tf.app.flags.DEFINE_string('master', '', + """Name of the TensorFlow master to use.""") +tf.app.flags.DEFINE_string('eval_master', '', + """Name prefix of the Tensorflow eval master.""") +tf.app.flags.DEFINE_integer('task', 0, + """Task id of the replica running the training.""") +tf.app.flags.DEFINE_integer('ps_tasks', 0, """Number of tasks in the ps job. + If 0 no ps job is used.""") + +## General FLAGS. +tf.app.flags.DEFINE_string( + 'hparams', '', 'Comma separated list of name=value hyperparameter pairs.') +tf.app.flags.DEFINE_integer('batch_size', 20, 'The batch size.') +tf.app.flags.DEFINE_integer('vocab_size', 10000, 'The vocabulary size.') +tf.app.flags.DEFINE_integer('sequence_length', 20, 'The sequence length.') +tf.app.flags.DEFINE_integer('max_steps', 1000000, + 'Maximum number of steps to run.') +tf.app.flags.DEFINE_string( + 'mask_strategy', 'random', 'Strategy for masking the words. Determine the ' + 'characterisitics of how the words are dropped out. One of ' + "['contiguous', 'random'].") +tf.app.flags.DEFINE_float('is_present_rate', 0.5, + 'Percent of tokens present in the forward sequence.') +tf.app.flags.DEFINE_float('is_present_rate_decay', None, 'Decay rate for the ' + 'percent of words that are real (are present).') +tf.app.flags.DEFINE_string( + 'generator_model', 'seq2seq', + "Type of Generator model. One of ['rnn', 'seq2seq', 'seq2seq_zaremba'," + "'rnn_zaremba', 'rnn_nas', 'seq2seq_nas']") +tf.app.flags.DEFINE_string( + 'attention_option', None, + "Attention mechanism. One of [None, 'luong', 'bahdanau']") +tf.app.flags.DEFINE_string( + 'discriminator_model', 'bidirectional', + "Type of Discriminator model. One of ['cnn', 'rnn', 'bidirectional', " + "'rnn_zaremba', 'bidirectional_zaremba', 'rnn_nas', 'rnn_vd', 'seq2seq_vd']" +) +tf.app.flags.DEFINE_boolean('seq2seq_share_embedding', False, + 'Whether to share the ' + 'embeddings between the encoder and decoder.') +tf.app.flags.DEFINE_boolean( + 'dis_share_embedding', False, 'Whether to share the ' + 'embeddings between the generator and discriminator.') +tf.app.flags.DEFINE_boolean('dis_update_share_embedding', False, 'Whether the ' + 'discriminator should update the shared embedding.') +tf.app.flags.DEFINE_boolean('use_gen_mode', False, + 'Use the mode of the generator ' + 'to produce samples.') +tf.app.flags.DEFINE_boolean('critic_update_dis_vars', False, + 'Whether the critic ' + 'updates the discriminator variables.') + +## Training FLAGS. +tf.app.flags.DEFINE_string( + 'gen_training_strategy', 'reinforce', + "Method for training the Generator. One of ['cross_entropy', 'reinforce']") +tf.app.flags.DEFINE_string( + 'generator_optimizer', 'adam', + "Type of Generator optimizer. One of ['sgd', 'adam']") +tf.app.flags.DEFINE_float('grad_clipping', 10., 'Norm for gradient clipping.') +tf.app.flags.DEFINE_float('advantage_clipping', 5., 'Clipping for advantages.') +tf.app.flags.DEFINE_string( + 'baseline_method', None, + "Approach for baseline. One of ['critic', 'dis_batch', 'ema', None]") +tf.app.flags.DEFINE_float('perplexity_threshold', 15000, + 'Limit for perplexity before terminating job.') +tf.app.flags.DEFINE_float('zoneout_drop_prob', 0.1, + 'Probability for dropping parameter for zoneout.') +tf.app.flags.DEFINE_float('keep_prob', 0.5, + 'Probability for keeping parameter for dropout.') + +## Logging and evaluation FLAGS. +tf.app.flags.DEFINE_integer('print_every', 250, + 'Frequency to print and log the ' + 'outputs of the model.') +tf.app.flags.DEFINE_integer('max_num_to_print', 5, + 'Number of samples to log/print.') +tf.app.flags.DEFINE_boolean('print_verbose', False, 'Whether to print in full.') +tf.app.flags.DEFINE_integer('summaries_every', 100, + 'Frequency to compute summaries.') +tf.app.flags.DEFINE_boolean('eval_language_model', False, + 'Whether to evaluate on ' + 'all words as in language modeling.') +tf.app.flags.DEFINE_float('eval_interval_secs', 60, + 'Delay for evaluating model.') +tf.app.flags.DEFINE_integer( + 'n_gram_eval', 4, """The degree of the n-grams to use for evaluation.""") +tf.app.flags.DEFINE_integer( + 'epoch_size_override', None, + 'If an integer, this dictates the size of the epochs and will potentially ' + 'not iterate over all the data.') +tf.app.flags.DEFINE_integer('eval_epoch_size_override', None, + 'Number of evaluation steps.') + +## Directories and checkpoints. +tf.app.flags.DEFINE_string('base_directory', '/tmp/maskGAN_v0.00', + 'Base directory for the logging, events and graph.') +tf.app.flags.DEFINE_string('data_set', 'ptb', 'Data set to operate on. One of' + "['ptb', 'imdb']") +tf.app.flags.DEFINE_string('data_dir', '/tmp/data/ptb', + 'Directory for the training data.') +tf.app.flags.DEFINE_string( + 'language_model_ckpt_dir', None, + 'Directory storing checkpoints to initialize the model. Pretrained models' + 'are stored at /tmp/maskGAN/pretrained/') +tf.app.flags.DEFINE_string( + 'language_model_ckpt_dir_reversed', None, + 'Directory storing checkpoints of reversed models to initialize the model.' + 'Pretrained models stored at' + 'are stored at /tmp/PTB/pretrained_reversed') +tf.app.flags.DEFINE_string( + 'maskgan_ckpt', None, + 'Override which checkpoint file to use to restore the ' + 'model. A pretrained seq2seq_zaremba model is stored at ' + '/tmp/maskGAN/pretrain/seq2seq_zaremba/train/model.ckpt-64912') + +tf.app.flags.DEFINE_boolean('wasserstein_objective', False, + '(DEPRECATED) Whether to use the WGAN training.') +tf.app.flags.DEFINE_integer('num_rollouts', 1, + 'The number of rolled out predictions to make.') +tf.app.flags.DEFINE_float('c_lower', -0.01, 'Lower bound for weights.') +tf.app.flags.DEFINE_float('c_upper', 0.01, 'Upper bound for weights.') + +FLAGS = tf.app.flags.FLAGS + + +def create_hparams(): + """Create the hparams object for generic training hyperparameters.""" + hparams = tf.contrib.training.HParams( + gen_num_layers=2, + dis_num_layers=2, + gen_rnn_size=740, + dis_rnn_size=740, + gen_learning_rate=5e-4, + dis_learning_rate=5e-3, + critic_learning_rate=5e-3, + dis_train_iterations=1, + gen_learning_rate_decay=1.0, + gen_full_learning_rate_steps=1e7, + baseline_decay=0.999999, + rl_discount_rate=0.9, + gen_vd_keep_prob=0.5, + dis_vd_keep_prob=0.5, + dis_pretrain_learning_rate=5e-3, + dis_num_filters=128, + dis_hidden_dim=128, + gen_nas_keep_prob_0=0.85, + gen_nas_keep_prob_1=0.55, + dis_nas_keep_prob_0=0.85, + dis_nas_keep_prob_1=0.55) + # Command line flags override any of the preceding hyperparameter values. + if FLAGS.hparams: + hparams = hparams.parse(FLAGS.hparams) + return hparams + + +def create_MaskGAN(hparams, is_training): + """Create the MaskGAN model. + + Args: + hparams: Hyperparameters for the MaskGAN. + is_training: Boolean indicating operational mode (train/inference). + evaluated with a teacher forcing regime. + + Return: + model: Namedtuple for specifying the MaskGAN. + """ + global_step = tf.Variable(0, name='global_step', trainable=False) + + new_learning_rate = tf.placeholder(tf.float32, [], name='new_learning_rate') + learning_rate = tf.Variable(0.0, name='learning_rate', trainable=False) + learning_rate_update = tf.assign(learning_rate, new_learning_rate) + + new_rate = tf.placeholder(tf.float32, [], name='new_rate') + percent_real_var = tf.Variable(0.0, trainable=False) + percent_real_update = tf.assign(percent_real_var, new_rate) + + ## Placeholders. + inputs = tf.placeholder( + tf.int32, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + targets = tf.placeholder( + tf.int32, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + present = tf.placeholder( + tf.bool, shape=[FLAGS.batch_size, FLAGS.sequence_length]) + # TODO(adai): Placeholder for IMDB label. + + ## Real Sequence is the targets. + real_sequence = targets + + ## Fakse Sequence from the Generator. + # TODO(adai): Generator must have IMDB labels placeholder. + (fake_sequence, fake_logits, fake_log_probs, fake_gen_initial_state, + fake_gen_final_state, _) = model_construction.create_generator( + hparams, + inputs, + targets, + present, + is_training=is_training, + is_validating=False) + (_, eval_logits, _, eval_initial_state, eval_final_state, + _) = model_construction.create_generator( + hparams, + inputs, + targets, + present, + is_training=False, + is_validating=True, + reuse=True) + + ## Discriminator. + fake_predictions = model_construction.create_discriminator( + hparams, + fake_sequence, + is_training=is_training, + inputs=inputs, + present=present) + real_predictions = model_construction.create_discriminator( + hparams, + real_sequence, + is_training=is_training, + reuse=True, + inputs=inputs, + present=present) + + ## Critic. + # The critic will be used to estimate the forward rewards to the Generator. + if FLAGS.baseline_method == 'critic': + est_state_values = model_construction.create_critic( + hparams, fake_sequence, is_training=is_training) + else: + est_state_values = None + + ## Discriminator Loss. + [dis_loss, dis_loss_fake, dis_loss_real] = model_losses.create_dis_loss( + fake_predictions, real_predictions, present) + + ## Average log-perplexity for only missing words. However, to do this, + # the logits are still computed using teacher forcing, that is, the ground + # truth tokens are fed in at each time point to be valid. + avg_log_perplexity = model_losses.calculate_log_perplexity( + eval_logits, targets, present) + + ## Generator Objective. + # 1. Cross Entropy losses on missing tokens. + fake_cross_entropy_losses = model_losses.create_masked_cross_entropy_loss( + targets, present, fake_logits) + + # 2. GAN REINFORCE losses. + [ + fake_RL_loss, fake_log_probs, fake_rewards, fake_advantages, + fake_baselines, fake_averages_op, critic_loss, cumulative_rewards + ] = model_losses.calculate_reinforce_objective( + hparams, fake_log_probs, fake_predictions, present, est_state_values) + + ## Pre-training. + if FLAGS.gen_pretrain_steps: + raise NotImplementedError + # # TODO(liamfedus): Rewrite this. + # fwd_cross_entropy_loss = tf.reduce_mean(fwd_cross_entropy_losses) + # gen_pretrain_op = model_optimization.create_gen_pretrain_op( + # hparams, fwd_cross_entropy_loss, global_step) + else: + gen_pretrain_op = None + if FLAGS.dis_pretrain_steps: + dis_pretrain_op = model_optimization.create_dis_pretrain_op( + hparams, dis_loss, global_step) + else: + dis_pretrain_op = None + + ## Generator Train Op. + # 1. Cross-Entropy. + if FLAGS.gen_training_strategy == 'cross_entropy': + gen_loss = tf.reduce_mean(fake_cross_entropy_losses) + [gen_train_op, gen_grads, + gen_vars] = model_optimization.create_gen_train_op( + hparams, learning_rate, gen_loss, global_step, mode='MINIMIZE') + + # 2. GAN (REINFORCE) + elif FLAGS.gen_training_strategy == 'reinforce': + gen_loss = fake_RL_loss + [gen_train_op, gen_grads, + gen_vars] = model_optimization.create_reinforce_gen_train_op( + hparams, learning_rate, gen_loss, fake_averages_op, global_step) + + else: + raise NotImplementedError + + ## Discriminator Train Op. + dis_train_op, dis_grads, dis_vars = model_optimization.create_dis_train_op( + hparams, dis_loss, global_step) + + ## Critic Train Op. + if critic_loss is not None: + [critic_train_op, _, _] = model_optimization.create_critic_train_op( + hparams, critic_loss, global_step) + dis_train_op = tf.group(dis_train_op, critic_train_op) + + ## Summaries. + with tf.name_scope('general'): + tf.summary.scalar('percent_real', percent_real_var) + tf.summary.scalar('learning_rate', learning_rate) + + with tf.name_scope('generator_objectives'): + tf.summary.scalar('gen_objective', tf.reduce_mean(gen_loss)) + tf.summary.scalar('gen_loss_cross_entropy', + tf.reduce_mean(fake_cross_entropy_losses)) + + with tf.name_scope('REINFORCE'): + with tf.name_scope('objective'): + tf.summary.scalar('fake_RL_loss', tf.reduce_mean(fake_RL_loss)) + + with tf.name_scope('rewards'): + helper.variable_summaries(cumulative_rewards, 'rewards') + + with tf.name_scope('advantages'): + helper.variable_summaries(fake_advantages, 'advantages') + + with tf.name_scope('baselines'): + helper.variable_summaries(fake_baselines, 'baselines') + + with tf.name_scope('log_probs'): + helper.variable_summaries(fake_log_probs, 'log_probs') + + with tf.name_scope('discriminator_losses'): + tf.summary.scalar('dis_loss', dis_loss) + tf.summary.scalar('dis_loss_fake_sequence', dis_loss_fake) + tf.summary.scalar('dis_loss_prob_fake_sequence', tf.exp(-dis_loss_fake)) + tf.summary.scalar('dis_loss_real_sequence', dis_loss_real) + tf.summary.scalar('dis_loss_prob_real_sequence', tf.exp(-dis_loss_real)) + + if critic_loss is not None: + with tf.name_scope('critic_losses'): + tf.summary.scalar('critic_loss', critic_loss) + + with tf.name_scope('logits'): + helper.variable_summaries(fake_logits, 'fake_logits') + + for v, g in zip(gen_vars, gen_grads): + helper.variable_summaries(v, v.op.name) + helper.variable_summaries(g, 'grad/' + v.op.name) + + for v, g in zip(dis_vars, dis_grads): + helper.variable_summaries(v, v.op.name) + helper.variable_summaries(g, 'grad/' + v.op.name) + + merge_summaries_op = tf.summary.merge_all() + text_summary_placeholder = tf.placeholder(tf.string) + text_summary_op = tf.summary.text('Samples', text_summary_placeholder) + + # Model saver. + saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep=5) + + # Named tuple that captures elements of the MaskGAN model. + Model = collections.namedtuple('Model', [ + 'inputs', 'targets', 'present', 'percent_real_update', 'new_rate', + 'fake_sequence', 'fake_logits', 'fake_rewards', 'fake_baselines', + 'fake_advantages', 'fake_log_probs', 'fake_predictions', + 'real_predictions', 'fake_cross_entropy_losses', 'fake_gen_initial_state', + 'fake_gen_final_state', 'eval_initial_state', 'eval_final_state', + 'avg_log_perplexity', 'dis_loss', 'gen_loss', 'critic_loss', + 'cumulative_rewards', 'dis_train_op', 'gen_train_op', 'gen_pretrain_op', + 'dis_pretrain_op', 'merge_summaries_op', 'global_step', + 'new_learning_rate', 'learning_rate_update', 'saver', 'text_summary_op', + 'text_summary_placeholder' + ]) + + model = Model( + inputs, targets, present, percent_real_update, new_rate, fake_sequence, + fake_logits, fake_rewards, fake_baselines, fake_advantages, + fake_log_probs, fake_predictions, real_predictions, + fake_cross_entropy_losses, fake_gen_initial_state, fake_gen_final_state, + eval_initial_state, eval_final_state, avg_log_perplexity, dis_loss, + gen_loss, critic_loss, cumulative_rewards, dis_train_op, gen_train_op, + gen_pretrain_op, dis_pretrain_op, merge_summaries_op, global_step, + new_learning_rate, learning_rate_update, saver, text_summary_op, + text_summary_placeholder) + return model + + +def compute_geometric_average(percent_captured): + """Compute the geometric average of the n-gram metrics.""" + + res = 1. + for _, n_gram_percent in percent_captured.iteritems(): + res *= n_gram_percent + + return np.power(res, 1. / float(len(percent_captured))) + + +def compute_arithmetic_average(percent_captured): + """Compute the arithmetic average of the n-gram metrics.""" + N = len(percent_captured) + + res = 0. + for _, n_gram_percent in percent_captured.iteritems(): + res += n_gram_percent + + return res / float(N) + + +def get_iterator(data): + """Return the data iterator.""" + if FLAGS.data_set == 'ptb': + iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length, + FLAGS.epoch_size_override) + elif FLAGS.data_set == 'imdb': + iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, + FLAGS.sequence_length) + return iterator + + +def train_model(hparams, data, log_dir, log, id_to_word, data_ngram_counts): + """Train model. + + Args: + hparams: Hyperparameters for the MaskGAN. + data: Data to evaluate. + log_dir: Directory to save checkpoints. + log: Readable log for the experiment. + id_to_word: Dictionary of indices to words. + data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the + data_set. + """ + print('Training model.') + tf.logging.info('Training model.') + + # Boolean indicating operational mode. + is_training = True + + # Write all the information to the logs. + log.write('hparams\n') + log.write(str(hparams)) + log.flush() + + is_chief = FLAGS.task == 0 + + with tf.Graph().as_default(): + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): + container_name = '' + with tf.container(container_name): + # Construct the model. + if FLAGS.num_rollouts == 1: + model = create_MaskGAN(hparams, is_training) + elif FLAGS.num_rollouts > 1: + model = rollout.create_rollout_MaskGAN(hparams, is_training) + else: + raise ValueError + + print('\nTrainable Variables in Graph:') + for v in tf.trainable_variables(): + print(v) + + ## Retrieve the initial savers. + init_savers = model_utils.retrieve_init_savers(hparams) + + ## Initial saver function to supervisor. + init_fn = partial(model_utils.init_fn, init_savers) + + # Create the supervisor. It will take care of initialization, + # summaries, checkpoints, and recovery. + sv = tf.train.Supervisor( + logdir=log_dir, + is_chief=is_chief, + saver=model.saver, + global_step=model.global_step, + save_model_secs=60, + recovery_wait_secs=30, + summary_op=None, + init_fn=init_fn) + + # Get an initialized, and possibly recovered session. Launch the + # services: Checkpointing, Summaries, step counting. + # + # When multiple replicas of this program are running the services are + # only launched by the 'chief' replica. + with sv.managed_session(FLAGS.master) as sess: + + ## Pretrain the generator. + if FLAGS.gen_pretrain_steps: + pretrain_mask_gan.pretrain_generator(sv, sess, model, data, log, + id_to_word, data_ngram_counts, + is_chief) + + ## Pretrain the discriminator. + if FLAGS.dis_pretrain_steps: + pretrain_mask_gan.pretrain_discriminator( + sv, sess, model, data, log, id_to_word, data_ngram_counts, + is_chief) + + # Initial indicators for printing and summarizing. + print_step_division = -1 + summary_step_division = -1 + + # Run iterative computation in a loop. + while not sv.ShouldStop(): + is_present_rate = FLAGS.is_present_rate + + if FLAGS.is_present_rate_decay is not None: + is_present_rate *= (1. - FLAGS.is_present_rate_decay) + + model_utils.assign_percent_real(sess, model.percent_real_update, + model.new_rate, is_present_rate) + + # GAN training. + avg_epoch_gen_loss, avg_epoch_dis_loss = [], [] + cumulative_costs = 0. + gen_iters = 0 + + # Generator and Discriminator statefulness initial evaluation. + # TODO(liamfedus): Throughout the code I am implicitly assuming + # that the Generator and Discriminator are equal sized. + [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( + [model.eval_initial_state, model.fake_gen_initial_state]) + dis_initial_state_eval = fake_gen_initial_state_eval + + # Save zeros state to reset later. + zeros_state = fake_gen_initial_state_eval + + ## Offset Discriminator. + if FLAGS.ps_tasks == 0: + dis_offset = 1 + else: + dis_offset = FLAGS.task * 1000 + 1 + dis_iterator = get_iterator(data) + + for i in range(dis_offset): + try: + dis_x, dis_y, _ = next(dis_iterator) + except StopIteration: + dis_iterator = get_iterator(data) + dis_initial_state_eval = zeros_state + dis_x, dis_y, _ = next(dis_iterator) + + p = model_utils.generate_mask() + + # Construct the train feed. + train_feed = { + model.inputs: dis_x, + model.targets: dis_y, + model.present: p + } + + if FLAGS.data_set == 'ptb': + # Statefulness of the Generator being used for Discriminator. + for i, (c, h) in enumerate(model.fake_gen_initial_state): + train_feed[c] = dis_initial_state_eval[i].c + train_feed[h] = dis_initial_state_eval[i].h + + # Determine the state had the Generator run over real data. We + # use this state for the Discriminator. + [dis_initial_state_eval] = sess.run( + [model.fake_gen_final_state], train_feed) + + ## Training loop. + iterator = get_iterator(data) + gen_initial_state_eval = zeros_state + + if FLAGS.ps_tasks > 0: + gen_offset = FLAGS.task * 1000 + 1 + for i in range(gen_offset): + try: + next(iterator) + except StopIteration: + dis_iterator = get_iterator(data) + dis_initial_state_eval = zeros_state + next(dis_iterator) + + for x, y, _ in iterator: + for _ in xrange(hparams.dis_train_iterations): + try: + dis_x, dis_y, _ = next(dis_iterator) + except StopIteration: + dis_iterator = get_iterator(data) + dis_initial_state_eval = zeros_state + dis_x, dis_y, _ = next(dis_iterator) + + if FLAGS.data_set == 'ptb': + [dis_initial_state_eval] = sess.run( + [model.fake_gen_initial_state]) + + p = model_utils.generate_mask() + + # Construct the train feed. + train_feed = { + model.inputs: dis_x, + model.targets: dis_y, + model.present: p + } + + # Statefulness for the Discriminator. + if FLAGS.data_set == 'ptb': + for i, (c, h) in enumerate(model.fake_gen_initial_state): + train_feed[c] = dis_initial_state_eval[i].c + train_feed[h] = dis_initial_state_eval[i].h + + _, dis_loss_eval, step = sess.run( + [model.dis_train_op, model.dis_loss, model.global_step], + feed_dict=train_feed) + + # Determine the state had the Generator run over real data. + # Use this state for the Discriminator. + [dis_initial_state_eval] = sess.run( + [model.fake_gen_final_state], train_feed) + + # Randomly mask out tokens. + p = model_utils.generate_mask() + + # Construct the train feed. + train_feed = {model.inputs: x, model.targets: y, model.present: p} + + # Statefulness for Generator. + if FLAGS.data_set == 'ptb': + tf.logging.info('Generator is stateful.') + print('Generator is stateful.') + # Statefulness for *evaluation* Generator. + for i, (c, h) in enumerate(model.eval_initial_state): + train_feed[c] = gen_initial_state_eval[i].c + train_feed[h] = gen_initial_state_eval[i].h + + # Statefulness for Generator. + for i, (c, h) in enumerate(model.fake_gen_initial_state): + train_feed[c] = fake_gen_initial_state_eval[i].c + train_feed[h] = fake_gen_initial_state_eval[i].h + + # Determine whether to decay learning rate. + lr_decay = hparams.gen_learning_rate_decay**max( + step + 1 - hparams.gen_full_learning_rate_steps, 0.0) + + # Assign learning rate. + gen_learning_rate = hparams.gen_learning_rate * lr_decay + model_utils.assign_learning_rate(sess, model.learning_rate_update, + model.new_learning_rate, + gen_learning_rate) + + [_, gen_loss_eval, gen_log_perplexity_eval, step] = sess.run( + [ + model.gen_train_op, model.gen_loss, + model.avg_log_perplexity, model.global_step + ], + feed_dict=train_feed) + + cumulative_costs += gen_log_perplexity_eval + gen_iters += 1 + + # Determine the state had the Generator run over real data. + [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( + [model.eval_final_state, + model.fake_gen_final_state], train_feed) + + avg_epoch_dis_loss.append(dis_loss_eval) + avg_epoch_gen_loss.append(gen_loss_eval) + + ## Summaries. + # Calulate rolling perplexity. + perplexity = np.exp(cumulative_costs / gen_iters) + + if is_chief and (step / FLAGS.summaries_every > + summary_step_division): + summary_step_division = step / FLAGS.summaries_every + + # Confirm perplexity is not infinite. + if (not np.isfinite(perplexity) or + perplexity >= FLAGS.perplexity_threshold): + print('Training raising FloatingPoinError.') + raise FloatingPointError( + 'Training infinite perplexity: %.3f' % perplexity) + + # Graph summaries. + summary_str = sess.run( + model.merge_summaries_op, feed_dict=train_feed) + sv.SummaryComputed(sess, summary_str) + + # Summary: n-gram + avg_percent_captured = {'2': 0., '3': 0., '4': 0.} + for n, data_ngram_count in data_ngram_counts.iteritems(): + batch_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, train_feed, + data_ngram_count, int(n)) + summary_percent_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/%s-grams_percent_correct' % n, + simple_value=batch_percent_captured) + ]) + sv.SummaryComputed( + sess, summary_percent_str, global_step=step) + + # Summary: geometric_avg + geometric_avg = compute_geometric_average(avg_percent_captured) + summary_geometric_avg_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/geometric_avg', simple_value=geometric_avg) + ]) + sv.SummaryComputed( + sess, summary_geometric_avg_str, global_step=step) + + # Summary: arithmetic_avg + arithmetic_avg = compute_arithmetic_average( + avg_percent_captured) + summary_arithmetic_avg_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/arithmetic_avg', + simple_value=arithmetic_avg) + ]) + sv.SummaryComputed( + sess, summary_arithmetic_avg_str, global_step=step) + + # Summary: perplexity + summary_perplexity_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/perplexity', simple_value=perplexity) + ]) + sv.SummaryComputed( + sess, summary_perplexity_str, global_step=step) + + ## Printing and logging + if is_chief and (step / FLAGS.print_every > print_step_division): + print_step_division = (step / FLAGS.print_every) + print('global_step: %d' % step) + print(' perplexity: %.3f' % perplexity) + print(' gen_learning_rate: %.6f' % gen_learning_rate) + log.write('global_step: %d\n' % step) + log.write(' perplexity: %.3f\n' % perplexity) + log.write(' gen_learning_rate: %.6f' % gen_learning_rate) + + # Average percent captured for each of the n-grams. + avg_percent_captured = {'2': 0., '3': 0., '4': 0.} + for n, data_ngram_count in data_ngram_counts.iteritems(): + batch_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, train_feed, + data_ngram_count, int(n)) + avg_percent_captured[n] = batch_percent_captured + print(' percent of %s-grams captured: %.3f.' % + (n, batch_percent_captured)) + log.write(' percent of %s-grams captured: %.3f.\n' % + (n, batch_percent_captured)) + geometric_avg = compute_geometric_average(avg_percent_captured) + print(' geometric_avg: %.3f.' % geometric_avg) + log.write(' geometric_avg: %.3f.' % geometric_avg) + arithmetic_avg = compute_arithmetic_average( + avg_percent_captured) + print(' arithmetic_avg: %.3f.' % arithmetic_avg) + log.write(' arithmetic_avg: %.3f.' % arithmetic_avg) + + evaluation_utils.print_and_log_losses( + log, step, is_present_rate, avg_epoch_dis_loss, + avg_epoch_gen_loss) + + if FLAGS.gen_training_strategy == 'reinforce': + evaluation_utils.generate_RL_logs(sess, model, log, + id_to_word, train_feed) + else: + evaluation_utils.generate_logs(sess, model, log, id_to_word, + train_feed) + log.flush() + + log.close() + + +def evaluate_once(data, sv, model, sess, train_dir, log, id_to_word, + data_ngram_counts, eval_saver): + """Evaluate model for a number of steps. + + Args: + data: Dataset. + sv: Supervisor. + model: The GAN model we have just built. + sess: A session to use. + train_dir: Path to a directory containing checkpoints. + log: Evaluation log for evaluation. + id_to_word: Dictionary of indices to words. + data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the + data_set. + eval_saver: Evaluation saver.r. + """ + tf.logging.info('Evaluate Once.') + # Load the last model checkpoint, or initialize the graph. + model_save_path = tf.latest_checkpoint(train_dir) + if not model_save_path: + tf.logging.warning('No checkpoint yet in: %s', train_dir) + return + + tf.logging.info('Starting eval of: %s' % model_save_path) + tf.logging.info('Only restoring trainable variables.') + eval_saver.restore(sess, model_save_path) + + # Run the requested number of evaluation steps + avg_epoch_gen_loss, avg_epoch_dis_loss = [], [] + cumulative_costs = 0. + + # Average percent captured for each of the n-grams. + avg_percent_captured = {'2': 0., '3': 0., '4': 0.} + + # Set a random seed to keep fixed mask. + np.random.seed(0) + gen_iters = 0 + + # Generator statefulness over the epoch. + # TODO(liamfedus): Check this. + [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( + [model.eval_initial_state, model.fake_gen_initial_state]) + + if FLAGS.eval_language_model: + is_present_rate = 0. + tf.logging.info('Overriding is_present_rate=0. for evaluation.') + print('Overriding is_present_rate=0. for evaluation.') + + iterator = get_iterator(data) + + for x, y, _ in iterator: + if FLAGS.eval_language_model: + is_present_rate = 0. + else: + is_present_rate = FLAGS.is_present_rate + tf.logging.info('Evaluating on is_present_rate=%.3f.' % is_present_rate) + + model_utils.assign_percent_real(sess, model.percent_real_update, + model.new_rate, is_present_rate) + + # Randomly mask out tokens. + p = model_utils.generate_mask() + + eval_feed = {model.inputs: x, model.targets: y, model.present: p} + + if FLAGS.data_set == 'ptb': + # Statefulness for *evaluation* Generator. + for i, (c, h) in enumerate(model.eval_initial_state): + eval_feed[c] = gen_initial_state_eval[i].c + eval_feed[h] = gen_initial_state_eval[i].h + + # Statefulness for the Generator. + for i, (c, h) in enumerate(model.fake_gen_initial_state): + eval_feed[c] = fake_gen_initial_state_eval[i].c + eval_feed[h] = fake_gen_initial_state_eval[i].h + + [ + gen_log_perplexity_eval, dis_loss_eval, gen_loss_eval, + gen_initial_state_eval, fake_gen_initial_state_eval, step + ] = sess.run( + [ + model.avg_log_perplexity, model.dis_loss, model.gen_loss, + model.eval_final_state, model.fake_gen_final_state, + model.global_step + ], + feed_dict=eval_feed) + + for n, data_ngram_count in data_ngram_counts.iteritems(): + batch_percent_captured = evaluation_utils.sequence_ngram_evaluation( + sess, model.fake_sequence, log, eval_feed, data_ngram_count, int(n)) + avg_percent_captured[n] += batch_percent_captured + + cumulative_costs += gen_log_perplexity_eval + + avg_epoch_dis_loss.append(dis_loss_eval) + avg_epoch_gen_loss.append(gen_loss_eval) + + gen_iters += 1 + + # Calulate rolling metrics. + perplexity = np.exp(cumulative_costs / gen_iters) + for n, _ in avg_percent_captured.iteritems(): + avg_percent_captured[n] /= gen_iters + + # Confirm perplexity is not infinite. + if not np.isfinite(perplexity) or perplexity >= FLAGS.perplexity_threshold: + print('Evaluation raising FloatingPointError.') + raise FloatingPointError( + 'Evaluation infinite perplexity: %.3f' % perplexity) + + ## Printing and logging. + evaluation_utils.print_and_log_losses(log, step, is_present_rate, + avg_epoch_dis_loss, avg_epoch_gen_loss) + print(' perplexity: %.3f' % perplexity) + log.write(' perplexity: %.3f\n' % perplexity) + + for n, n_gram_percent in avg_percent_captured.iteritems(): + n = int(n) + print(' percent of %d-grams captured: %.3f.' % (n, n_gram_percent)) + log.write(' percent of %d-grams captured: %.3f.\n' % (n, n_gram_percent)) + + samples = evaluation_utils.generate_logs(sess, model, log, id_to_word, + eval_feed) + + ## Summaries. + summary_str = sess.run(model.merge_summaries_op, feed_dict=eval_feed) + sv.SummaryComputed(sess, summary_str) + + # Summary: text + summary_str = sess.run(model.text_summary_op, + {model.text_summary_placeholder: '\n\n'.join(samples)}) + sv.SummaryComputed(sess, summary_str, global_step=step) + + # Summary: n-gram + for n, n_gram_percent in avg_percent_captured.iteritems(): + n = int(n) + summary_percent_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/%d-grams_percent_correct' % n, + simple_value=n_gram_percent) + ]) + sv.SummaryComputed(sess, summary_percent_str, global_step=step) + + # Summary: geometric_avg + geometric_avg = compute_geometric_average(avg_percent_captured) + summary_geometric_avg_str = tf.Summary(value=[ + tf.Summary.Value(tag='general/geometric_avg', simple_value=geometric_avg) + ]) + sv.SummaryComputed(sess, summary_geometric_avg_str, global_step=step) + + # Summary: arithmetic_avg + arithmetic_avg = compute_arithmetic_average(avg_percent_captured) + summary_arithmetic_avg_str = tf.Summary(value=[ + tf.Summary.Value( + tag='general/arithmetic_avg', simple_value=arithmetic_avg) + ]) + sv.SummaryComputed(sess, summary_arithmetic_avg_str, global_step=step) + + # Summary: perplexity + summary_perplexity_str = tf.Summary(value=[ + tf.Summary.Value(tag='general/perplexity', simple_value=perplexity) + ]) + sv.SummaryComputed(sess, summary_perplexity_str, global_step=step) + + +def evaluate_model(hparams, data, train_dir, log, id_to_word, + data_ngram_counts): + """Evaluate MaskGAN model. + + Args: + hparams: Hyperparameters for the MaskGAN. + data: Data to evaluate. + train_dir: Path to a directory containing checkpoints. + id_to_word: Dictionary of indices to words. + data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the + data_set. + """ + tf.logging.error('Evaluate model.') + + # Boolean indicating operational mode. + is_training = False + + if FLAGS.mode == MODE_VALIDATION: + logdir = FLAGS.base_directory + '/validation' + elif FLAGS.mode == MODE_TRAIN_EVAL: + logdir = FLAGS.base_directory + '/train_eval' + elif FLAGS.mode == MODE_TEST: + logdir = FLAGS.base_directory + '/test' + else: + raise NotImplementedError + + # Wait for a checkpoint to exist. + print(train_dir) + print(tf.train.latest_checkpoint(train_dir)) + while not tf.train.latest_checkpoint(train_dir): + tf.logging.error('Waiting for checkpoint...') + print('Waiting for checkpoint...') + time.sleep(10) + + with tf.Graph().as_default(): + # Use a separate container for each trial + container_name = '' + with tf.container(container_name): + + # Construct the model. + if FLAGS.num_rollouts == 1: + model = create_MaskGAN(hparams, is_training) + elif FLAGS.num_rollouts > 1: + model = rollout.create_rollout_MaskGAN(hparams, is_training) + else: + raise ValueError + + # Create the supervisor. It will take care of initialization, summaries, + # checkpoints, and recovery. We only pass the trainable variables + # to load since things like baselines keep batch_size which may not + # match between training and evaluation. + evaluation_variables = tf.trainable_variables() + evaluation_variables.append(model.global_step) + eval_saver = tf.train.Saver(var_list=evaluation_variables) + sv = tf.Supervisor(logdir=logdir) + sess = sv.PrepareSession(FLAGS.eval_master, start_standard_services=False) + + tf.logging.info('Before sv.Loop.') + sv.Loop(FLAGS.eval_interval_secs, evaluate_once, + (data, sv, model, sess, train_dir, log, id_to_word, + data_ngram_counts, eval_saver)) + + sv.WaitForStop() + tf.logging.info('sv.Stop().') + sv.Stop() + + +def main(_): + hparams = create_hparams() + train_dir = FLAGS.base_directory + '/train' + + # Load data set. + if FLAGS.data_set == 'ptb': + raw_data = ptb_loader.ptb_raw_data(FLAGS.data_dir) + train_data, valid_data, test_data, _ = raw_data + valid_data_flat = valid_data + elif FLAGS.data_set == 'imdb': + raw_data = imdb_loader.imdb_raw_data(FLAGS.data_dir) + # TODO(liamfedus): Get an IMDB test partition. + train_data, valid_data = raw_data + valid_data_flat = [word for review in valid_data for word in review] + else: + raise NotImplementedError + + if FLAGS.mode == MODE_TRAIN or FLAGS.mode == MODE_TRAIN_EVAL: + data_set = train_data + elif FLAGS.mode == MODE_VALIDATION: + data_set = valid_data + elif FLAGS.mode == MODE_TEST: + data_set = test_data + else: + raise NotImplementedError + + # Dictionary and reverse dictionry. + if FLAGS.data_set == 'ptb': + word_to_id = ptb_loader.build_vocab( + os.path.join(FLAGS.data_dir, 'ptb.train.txt')) + elif FLAGS.data_set == 'imdb': + word_to_id = imdb_loader.build_vocab( + os.path.join(FLAGS.data_dir, 'vocab.txt')) + id_to_word = {v: k for k, v in word_to_id.iteritems()} + + # Dictionary of Training Set n-gram counts. + bigram_tuples = n_gram.find_all_ngrams(valid_data_flat, n=2) + trigram_tuples = n_gram.find_all_ngrams(valid_data_flat, n=3) + fourgram_tuples = n_gram.find_all_ngrams(valid_data_flat, n=4) + + bigram_counts = n_gram.construct_ngrams_dict(bigram_tuples) + trigram_counts = n_gram.construct_ngrams_dict(trigram_tuples) + fourgram_counts = n_gram.construct_ngrams_dict(fourgram_tuples) + print('Unique %d-grams: %d' % (2, len(bigram_counts))) + print('Unique %d-grams: %d' % (3, len(trigram_counts))) + print('Unique %d-grams: %d' % (4, len(fourgram_counts))) + + data_ngram_counts = { + '2': bigram_counts, + '3': trigram_counts, + '4': fourgram_counts + } + + # TODO(liamfedus): This was necessary because there was a problem with our + # originally trained IMDB models. The EOS_INDEX was off by one, which means, + # two words were mapping to index 86933. The presence of '
' is going + # to throw and out of vocabulary error. + FLAGS.vocab_size = len(id_to_word) + print('Vocab size: %d' % FLAGS.vocab_size) + + tf.gfile.MakeDirs(FLAGS.base_directory) + + if FLAGS.mode == MODE_TRAIN: + log = tf.gfile.GFile( + os.path.join(FLAGS.base_directory, 'train-log.txt'), mode='w') + elif FLAGS.mode == MODE_VALIDATION: + log = tf.gfile.GFile( + os.path.join(FLAGS.base_directory, 'validation-log.txt'), mode='w') + elif FLAGS.mode == MODE_TRAIN_EVAL: + log = tf.gfile.GFile( + os.path.join(FLAGS.base_directory, 'train_eval-log.txt'), mode='w') + else: + log = tf.gfile.GFile( + os.path.join(FLAGS.base_directory, 'test-log.txt'), mode='w') + + if FLAGS.mode == MODE_TRAIN: + train_model(hparams, data_set, train_dir, log, id_to_word, + data_ngram_counts) + + elif FLAGS.mode == MODE_VALIDATION: + evaluate_model(hparams, data_set, train_dir, log, id_to_word, + data_ngram_counts) + elif FLAGS.mode == MODE_TRAIN_EVAL: + evaluate_model(hparams, data_set, train_dir, log, id_to_word, + data_ngram_counts) + + elif FLAGS.mode == MODE_TEST: + evaluate_model(hparams, data_set, train_dir, log, id_to_word, + data_ngram_counts) + + else: + raise NotImplementedError + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/namignizer/.gitignore b/models/research/namignizer/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2dae8043534bc7a079f36caa6c673f74c39e5dfa --- /dev/null +++ b/models/research/namignizer/.gitignore @@ -0,0 +1,6 @@ +# Remove the pyc files +*.pyc + +# Ignore the model and the data +model/ +data/ diff --git a/models/research/namignizer/README.md b/models/research/namignizer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..475a087541913aaa3fca9d2094b4c23de52dbb41 --- /dev/null +++ b/models/research/namignizer/README.md @@ -0,0 +1,86 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Namignizer + +Use a variation of the [PTB](https://www.tensorflow.org/versions/r0.8/tutorials/recurrent/index.html#recurrent-neural-networks) model to recognize and generate names using the [Kaggle Baby Name Database](https://www.kaggle.com/kaggle/us-baby-names). + +### API +Namignizer is implemented in Tensorflow 0.8r and uses the python package `pandas` for some data processing. + +#### How to use +Download the data from Kaggle and place it in your data directory (or use the small training data provided). The example data looks like so: + +``` +Id,Name,Year,Gender,Count +1,Mary,1880,F,7065 +2,Anna,1880,F,2604 +3,Emma,1880,F,2003 +4,Elizabeth,1880,F,1939 +5,Minnie,1880,F,1746 +6,Margaret,1880,F,1578 +7,Ida,1880,F,1472 +8,Alice,1880,F,1414 +9,Bertha,1880,F,1320 +``` + +But any data with the two columns: `Name` and `Count` will work. + +With the data, we can then train the model: + +```python +train("data/SmallNames.txt", "model/namignizer", SmallConfig) +``` + +And you will get the output: + +``` +Reading Name data in data/SmallNames.txt +Epoch: 1 Learning rate: 1.000 +0.090 perplexity: 18.539 speed: 282 lps +... +0.890 perplexity: 1.478 speed: 285 lps +0.990 perplexity: 1.477 speed: 284 lps +Epoch: 13 Train Perplexity: 1.477 +``` + +This will as a side effect write model checkpoints to the `model` directory. With this you will be able to determine the perplexity your model will give you for any arbitrary set of names like so: + +```python +namignize(["mary", "ida", "gazorpazorp", "houyhnhnms", "bob"], + tf.train.latest_checkpoint("model"), SmallConfig) +``` +You will provide the same config and the same checkpoint directory. This will allow you to use a the model you just trained. You will then get a perplexity output for each name like so: + +``` +Name mary gives us a perplexity of 1.03105580807 +Name ida gives us a perplexity of 1.07770049572 +Name gazorpazorp gives us a perplexity of 175.940353394 +Name houyhnhnms gives us a perplexity of 9.53870773315 +Name bob gives us a perplexity of 6.03938627243 +``` + +Finally, you will also be able generate names using the model like so: + +```python +namignator(tf.train.latest_checkpoint("model"), SmallConfig) +``` + +Again, you will need to provide the same config and the same checkpoint directory. This will allow you to use a the model you just trained. You will then get a single generated name. Examples of output that I got when using the provided data are: + +``` +['b', 'e', 'r', 't', 'h', 'a', '`'] +['m', 'a', 'r', 'y', '`'] +['a', 'n', 'n', 'a', '`'] +['m', 'a', 'r', 'y', '`'] +['b', 'e', 'r', 't', 'h', 'a', '`'] +['a', 'n', 'n', 'a', '`'] +['e', 'l', 'i', 'z', 'a', 'b', 'e', 't', 'h', '`'] +``` + +Notice that each name ends with a backtick. This marks the end of the name. + +### Contact Info + +Feel free to reach out to me at knt(at google) or k.nathaniel.tucker(at gmail) diff --git a/models/research/namignizer/data_utils.py b/models/research/namignizer/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4320215026ccf7a2b31ffd476c25a153ecd92b86 --- /dev/null +++ b/models/research/namignizer/data_utils.py @@ -0,0 +1,119 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for parsing Kaggle baby names files.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os + +import numpy as np +import tensorflow as tf +import pandas as pd + +# the default end of name rep will be zero +_EON = 0 + + +def read_names(names_path): + """read data from downloaded file. See SmallNames.txt for example format + or go to https://www.kaggle.com/kaggle/us-baby-names for full lists + + Args: + names_path: path to the csv file similar to the example type + Returns: + Dataset: a namedtuple of two elements: deduped names and their associated + counts. The names contain only 26 chars and are all lower case + """ + names_data = pd.read_csv(names_path) + names_data.Name = names_data.Name.str.lower() + + name_data = names_data.groupby(by=["Name"])["Count"].sum() + name_counts = np.array(name_data.tolist()) + names_deduped = np.array(name_data.index.tolist()) + + Dataset = collections.namedtuple('Dataset', ['Name', 'Count']) + return Dataset(names_deduped, name_counts) + + +def _letter_to_number(letter): + """converts letters to numbers between 1 and 27""" + # ord of lower case 'a' is 97 + return ord(letter) - 96 + + +def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size): + """Takes a list of names and counts like those output from read_names, and + makes an iterator yielding a batch_size by num_steps array of random names + separated by an end of name token. The names are chosen randomly according + to their counts. The batch may end mid-name + + Args: + names: a set of lowercase names composed of 26 characters + counts: a list of the frequency of those names + batch_size: int + num_steps: int + epoch_size: number of batches to yield + Yields: + (x, y): a batch_size by num_steps array of ints representing letters, where + x will be the input and y will be the target + """ + name_distribution = counts / counts.sum() + + for i in range(epoch_size): + data = np.zeros(batch_size * num_steps + 1) + samples = np.random.choice(names, size=batch_size * num_steps // 2, + replace=True, p=name_distribution) + + data_index = 0 + for sample in samples: + if data_index >= batch_size * num_steps: + break + for letter in map(_letter_to_number, sample) + [_EON]: + if data_index >= batch_size * num_steps: + break + data[data_index] = letter + data_index += 1 + + x = data[:batch_size * num_steps].reshape((batch_size, num_steps)) + y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps)) + + yield (x, y) + + +def name_to_batch(name, batch_size, num_steps): + """ Takes a single name and fills a batch with it + + Args: + name: lowercase composed of 26 characters + batch_size: int + num_steps: int + Returns: + x, y: a batch_size by num_steps array of ints representing letters, where + x will be the input and y will be the target. The array is filled up + to the length of the string, the rest is filled with zeros + """ + data = np.zeros(batch_size * num_steps + 1) + + data_index = 0 + for letter in map(_letter_to_number, name) + [_EON]: + data[data_index] = letter + data_index += 1 + + x = data[:batch_size * num_steps].reshape((batch_size, num_steps)) + y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps)) + + return x, y diff --git a/models/research/namignizer/model.py b/models/research/namignizer/model.py new file mode 100644 index 0000000000000000000000000000000000000000..72c5c5ecb61e8a92ec2e74b8cc7ca13bb6ace817 --- /dev/null +++ b/models/research/namignizer/model.py @@ -0,0 +1,136 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""RNN model with embeddings""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + + +class NamignizerModel(object): + """The Namignizer model ~ strongly based on PTB""" + + def __init__(self, is_training, config): + self.batch_size = batch_size = config.batch_size + self.num_steps = num_steps = config.num_steps + size = config.hidden_size + # will always be 27 + vocab_size = config.vocab_size + + # placeholders for inputs + self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) + self._targets = tf.placeholder(tf.int32, [batch_size, num_steps]) + # weights for the loss function + self._weights = tf.placeholder(tf.float32, [batch_size * num_steps]) + + # lstm for our RNN cell (GRU supported too) + lstm_cells = [] + for layer in range(config.num_layers): + lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0) + if is_training and config.keep_prob < 1: + lstm_cell = tf.contrib.rnn.DropoutWrapper( + lstm_cell, output_keep_prob=config.keep_prob) + lstm_cells.append(lstm_cell) + cell = tf.contrib.rnn.MultiRNNCell(lstm_cells) + + self._initial_state = cell.zero_state(batch_size, tf.float32) + + with tf.device("/cpu:0"): + embedding = tf.get_variable("embedding", [vocab_size, size]) + inputs = tf.nn.embedding_lookup(embedding, self._input_data) + + if is_training and config.keep_prob < 1: + inputs = tf.nn.dropout(inputs, config.keep_prob) + + outputs = [] + state = self._initial_state + with tf.variable_scope("RNN"): + for time_step in range(num_steps): + if time_step > 0: + tf.get_variable_scope().reuse_variables() + (cell_output, state) = cell(inputs[:, time_step, :], state) + outputs.append(cell_output) + + output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, size]) + softmax_w = tf.get_variable("softmax_w", [size, vocab_size]) + softmax_b = tf.get_variable("softmax_b", [vocab_size]) + logits = tf.matmul(output, softmax_w) + softmax_b + loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( + [logits], + [tf.reshape(self._targets, [-1])], + [self._weights]) + self._loss = loss + self._cost = cost = tf.reduce_sum(loss) / batch_size + self._final_state = state + + # probabilities of each letter + self._activations = tf.nn.softmax(logits) + + # ability to save the model + self.saver = tf.train.Saver(tf.global_variables()) + + if not is_training: + return + + self._lr = tf.Variable(0.0, trainable=False) + tvars = tf.trainable_variables() + grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), + config.max_grad_norm) + optimizer = tf.train.GradientDescentOptimizer(self.lr) + self._train_op = optimizer.apply_gradients(zip(grads, tvars)) + + def assign_lr(self, session, lr_value): + session.run(tf.assign(self.lr, lr_value)) + + @property + def input_data(self): + return self._input_data + + @property + def targets(self): + return self._targets + + @property + def activations(self): + return self._activations + + @property + def weights(self): + return self._weights + + @property + def initial_state(self): + return self._initial_state + + @property + def cost(self): + return self._cost + + @property + def loss(self): + return self._loss + + @property + def final_state(self): + return self._final_state + + @property + def lr(self): + return self._lr + + @property + def train_op(self): + return self._train_op diff --git a/models/research/namignizer/names.py b/models/research/namignizer/names.py new file mode 100644 index 0000000000000000000000000000000000000000..253742716391f2f4b7a0c0cf4987e40a2aaa808f --- /dev/null +++ b/models/research/namignizer/names.py @@ -0,0 +1,259 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A library showing off sequence recognition and generation with the simple +example of names. + +We use recurrent neural nets to learn complex functions able to recognize and +generate sequences of a given form. This can be used for natural language +syntax recognition, dynamically generating maps or puzzles and of course +baby name generation. + +Before using this module, it is recommended to read the Tensorflow tutorial on +recurrent neural nets, as it explains the basic concepts of this model, and +will show off another module, the PTB module on which this model bases itself. + +Here is an overview of the functions available in this module: + +* RNN Module for sequence functions based on PTB + +* Name recognition specifically for recognizing names, but can be adapted to + recognizing sequence patterns + +* Name generations specifically for generating names, but can be adapted to + generating arbitrary sequence patterns +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time + +import tensorflow as tf +import numpy as np + +from model import NamignizerModel +import data_utils + + +class SmallConfig(object): + """Small config.""" + init_scale = 0.1 + learning_rate = 1.0 + max_grad_norm = 5 + num_layers = 2 + num_steps = 20 + hidden_size = 200 + max_epoch = 4 + max_max_epoch = 13 + keep_prob = 1.0 + lr_decay = 0.5 + batch_size = 20 + vocab_size = 27 + epoch_size = 100 + + +class LargeConfig(object): + """Medium config.""" + init_scale = 0.05 + learning_rate = 1.0 + max_grad_norm = 5 + num_layers = 2 + num_steps = 35 + hidden_size = 650 + max_epoch = 6 + max_max_epoch = 39 + keep_prob = 0.5 + lr_decay = 0.8 + batch_size = 20 + vocab_size = 27 + epoch_size = 100 + + +class TestConfig(object): + """Tiny config, for testing.""" + init_scale = 0.1 + learning_rate = 1.0 + max_grad_norm = 1 + num_layers = 1 + num_steps = 2 + hidden_size = 2 + max_epoch = 1 + max_max_epoch = 1 + keep_prob = 1.0 + lr_decay = 0.5 + batch_size = 20 + vocab_size = 27 + epoch_size = 100 + + +def run_epoch(session, m, names, counts, epoch_size, eval_op, verbose=False): + """Runs the model on the given data for one epoch + + Args: + session: the tf session holding the model graph + m: an instance of the NamignizerModel + names: a set of lowercase names of 26 characters + counts: a list of the frequency of the above names + epoch_size: the number of batches to run + eval_op: whether to change the params or not, and how to do it + Kwargs: + verbose: whether to print out state of training during the epoch + Returns: + cost: the average cost during the last stage of the epoch + """ + start_time = time.time() + costs = 0.0 + iters = 0 + for step, (x, y) in enumerate(data_utils.namignizer_iterator(names, counts, + m.batch_size, m.num_steps, epoch_size)): + + cost, _ = session.run([m.cost, eval_op], + {m.input_data: x, + m.targets: y, + m.weights: np.ones(m.batch_size * m.num_steps)}) + costs += cost + iters += m.num_steps + + if verbose and step % (epoch_size // 10) == 9: + print("%.3f perplexity: %.3f speed: %.0f lps" % + (step * 1.0 / epoch_size, np.exp(costs / iters), + iters * m.batch_size / (time.time() - start_time))) + + if step >= epoch_size: + break + + return np.exp(costs / iters) + + +def train(data_dir, checkpoint_path, config): + """Trains the model with the given data + + Args: + data_dir: path to the data for the model (see data_utils for data + format) + checkpoint_path: the path to save the trained model checkpoints + config: one of the above configs that specify the model and how it + should be run and trained + Returns: + None + """ + # Prepare Name data. + print("Reading Name data in %s" % data_dir) + names, counts = data_utils.read_names(data_dir) + + with tf.Graph().as_default(), tf.Session() as session: + initializer = tf.random_uniform_initializer(-config.init_scale, + config.init_scale) + with tf.variable_scope("model", reuse=None, initializer=initializer): + m = NamignizerModel(is_training=True, config=config) + + tf.global_variables_initializer().run() + + for i in range(config.max_max_epoch): + lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0) + m.assign_lr(session, config.learning_rate * lr_decay) + + print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) + train_perplexity = run_epoch(session, m, names, counts, config.epoch_size, m.train_op, + verbose=True) + print("Epoch: %d Train Perplexity: %.3f" % + (i + 1, train_perplexity)) + + m.saver.save(session, checkpoint_path, global_step=i) + + +def namignize(names, checkpoint_path, config): + """Recognizes names and prints the Perplexity of the model for each names + in the list + + Args: + names: a list of names in the model format + checkpoint_path: the path to restore the trained model from, should not + include the model name, just the path to + config: one of the above configs that specify the model and how it + should be run and trained + Returns: + None + """ + with tf.Graph().as_default(), tf.Session() as session: + + with tf.variable_scope("model"): + m = NamignizerModel(is_training=False, config=config) + + m.saver.restore(session, checkpoint_path) + + for name in names: + x, y = data_utils.name_to_batch(name, m.batch_size, m.num_steps) + + cost, loss, _ = session.run([m.cost, m.loss, tf.no_op()], + {m.input_data: x, + m.targets: y, + m.weights: np.concatenate(( + np.ones(len(name)), np.zeros(m.batch_size * m.num_steps - len(name))))}) + + print("Name {} gives us a perplexity of {}".format( + name, np.exp(cost))) + + +def namignator(checkpoint_path, config): + """Generates names randomly according to a given model + + Args: + checkpoint_path: the path to restore the trained model from, should not + include the model name, just the path to + config: one of the above configs that specify the model and how it + should be run and trained + Returns: + None + """ + # mutate the config to become a name generator config + config.num_steps = 1 + config.batch_size = 1 + + with tf.Graph().as_default(), tf.Session() as session: + + with tf.variable_scope("model"): + m = NamignizerModel(is_training=False, config=config) + + m.saver.restore(session, checkpoint_path) + + activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()], + {m.input_data: np.zeros((1, 1)), + m.targets: np.zeros((1, 1)), + m.weights: np.ones(1)}) + + # sample from our softmax activations + next_letter = np.random.choice(27, p=activations[0]) + name = [next_letter] + while next_letter != 0: + activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()], + {m.input_data: [[next_letter]], + m.targets: np.zeros((1, 1)), + m.initial_state: final_state, + m.weights: np.ones(1)}) + + next_letter = np.random.choice(27, p=activations[0]) + name += [next_letter] + + print(map(lambda x: chr(x + 96), name)) + + +if __name__ == "__main__": + train("data/SmallNames.txt", "model/namignizer", SmallConfig) + + namignize(["mary", "ida", "gazorbazorb", "mmmhmm", "bob"], + tf.train.latest_checkpoint("model"), SmallConfig) + + namignator(tf.train.latest_checkpoint("model"), SmallConfig) diff --git a/models/research/neural_gpu/README.md b/models/research/neural_gpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..097ef318c4e071f59e4212b0cd901907758d73e7 --- /dev/null +++ b/models/research/neural_gpu/README.md @@ -0,0 +1,87 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# NeuralGPU +Code for the Neural GPU model described in http://arxiv.org/abs/1511.08228. +The extended version was described in https://arxiv.org/abs/1610.08613. + +Requirements: +* TensorFlow (see tensorflow.org for how to install) + +The model can be trained on the following algorithmic tasks: + +* `sort` - Sort a symbol list +* `kvsort` - Sort symbol keys in dictionary +* `id` - Return the same symbol list +* `rev` - Reverse a symbol list +* `rev2` - Reverse a symbol dictionary by key +* `incr` - Add one to a symbol value +* `add` - Long decimal addition +* `left` - First symbol in list +* `right` - Last symbol in list +* `left-shift` - Left shift a symbol list +* `right-shift` - Right shift a symbol list +* `bmul` - Long binary multiplication +* `mul` - Long decimal multiplication +* `dup` - Duplicate a symbol list with padding +* `badd` - Long binary addition +* `qadd` - Long quaternary addition +* `search` - Search for symbol key in dictionary + +It can also be trained on the WMT English-French translation task: + +* `wmt` - WMT English-French translation (data will be downloaded) + +The value range for symbols are defined by the `vocab_size` flag. +In particular, the values are in the range `vocab_size - 1`. +So if you set `--vocab_size=16` (the default) then `--problem=rev` +will be reversing lists of 15 symbols, and `--problem=id` will be identity +on a list of up to 15 symbols. + + +To train the model on the binary multiplication task run: + +``` +python neural_gpu_trainer.py --problem=bmul +``` + +This trains the Extended Neural GPU, to train the original model run: + +``` +python neural_gpu_trainer.py --problem=bmul --beam_size=0 +``` + +While training, interim / checkpoint model parameters will be +written to `/tmp/neural_gpu/`. + +Once the amount of error gets down to what you're comfortable +with, hit `Ctrl-C` to stop the training process. The latest +model parameters will be in `/tmp/neural_gpu/neural_gpu.ckpt-` +and used on any subsequent run. + +To evaluate a trained model on how well it decodes run: + +``` +python neural_gpu_trainer.py --problem=bmul --mode=1 +``` + +To interact with a model (experimental, see code) run: + +``` +python neural_gpu_trainer.py --problem=bmul --mode=2 +``` + +To train on WMT data, set a larger --nmaps and --vocab_size and avoid curriculum: + +``` +python neural_gpu_trainer.py --problem=wmt --vocab_size=32768 --nmaps=256 + --vec_size=256 --curriculum_seq=1.0 --max_length=60 --data_dir ~/wmt +``` + +With less memory, try lower batch size, e.g. `--batch_size=4`. With more GPUs +in your system, there will be a batch on every GPU so you can run larger models. +For example, `--batch_size=4 --num_gpus=4 --nmaps=512 --vec_size=512` will +run a large model (512-size) on 4 GPUs, with effective batches of 4*4=16. + +Maintained by Lukasz Kaiser (lukaszkaiser) diff --git a/models/research/neural_gpu/data_utils.py b/models/research/neural_gpu/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3c14ff701fce79408fde6505239530dc5b848dd7 --- /dev/null +++ b/models/research/neural_gpu/data_utils.py @@ -0,0 +1,458 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Neural GPU -- data generation and batching utilities.""" + +import math +import os +import random +import sys +import time + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import program_utils + +FLAGS = tf.app.flags.FLAGS + +bins = [2 + bin_idx_i for bin_idx_i in xrange(256)] +all_tasks = ["sort", "kvsort", "id", "rev", "rev2", "incr", "add", "left", + "right", "left-shift", "right-shift", "bmul", "mul", "dup", + "badd", "qadd", "search", "progeval", "progsynth"] +log_filename = "" +vocab, rev_vocab = None, None + + +def pad(l): + for b in bins: + if b >= l: return b + return bins[-1] + + +def bin_for(l): + for i, b in enumerate(bins): + if b >= l: return i + return len(bins) - 1 + + +train_set = {} +test_set = {} +for some_task in all_tasks: + train_set[some_task] = [] + test_set[some_task] = [] + for all_max_len in xrange(10000): + train_set[some_task].append([]) + test_set[some_task].append([]) + + +def read_tmp_file(name): + """Read from a file with the given name in our log directory or above.""" + dirname = os.path.dirname(log_filename) + fname = os.path.join(dirname, name + ".txt") + if not tf.gfile.Exists(fname): + print_out("== not found file: " + fname) + fname = os.path.join(dirname, "../" + name + ".txt") + if not tf.gfile.Exists(fname): + print_out("== not found file: " + fname) + fname = os.path.join(dirname, "../../" + name + ".txt") + if not tf.gfile.Exists(fname): + print_out("== not found file: " + fname) + return None + print_out("== found file: " + fname) + res = [] + with tf.gfile.GFile(fname, mode="r") as f: + for line in f: + res.append(line.strip()) + return res + + +def write_tmp_file(name, lines): + dirname = os.path.dirname(log_filename) + fname = os.path.join(dirname, name + ".txt") + with tf.gfile.GFile(fname, mode="w") as f: + for line in lines: + f.write(line + "\n") + + +def add(n1, n2, base=10): + """Add two numbers represented as lower-endian digit lists.""" + k = max(len(n1), len(n2)) + 1 + d1 = n1 + [0 for _ in xrange(k - len(n1))] + d2 = n2 + [0 for _ in xrange(k - len(n2))] + res = [] + carry = 0 + for i in xrange(k): + if d1[i] + d2[i] + carry < base: + res.append(d1[i] + d2[i] + carry) + carry = 0 + else: + res.append(d1[i] + d2[i] + carry - base) + carry = 1 + while res and res[-1] == 0: + res = res[:-1] + if res: return res + return [0] + + +def init_data(task, length, nbr_cases, nclass): + """Data initialization.""" + def rand_pair(l, task): + """Random data pair for a task. Total length should be <= l.""" + k = int((l-1)/2) + base = 10 + if task[0] == "b": base = 2 + if task[0] == "q": base = 4 + d1 = [np.random.randint(base) for _ in xrange(k)] + d2 = [np.random.randint(base) for _ in xrange(k)] + if task in ["add", "badd", "qadd"]: + res = add(d1, d2, base) + elif task in ["mul", "bmul"]: + d1n = sum([d * (base ** i) for i, d in enumerate(d1)]) + d2n = sum([d * (base ** i) for i, d in enumerate(d2)]) + if task == "bmul": + res = [int(x) for x in list(reversed(str(bin(d1n * d2n))))[:-2]] + else: + res = [int(x) for x in list(reversed(str(d1n * d2n)))] + else: + sys.exit() + sep = [12] + if task in ["add", "badd", "qadd"]: sep = [11] + inp = [d + 1 for d in d1] + sep + [d + 1 for d in d2] + return inp, [r + 1 for r in res] + + def rand_dup_pair(l): + """Random data pair for duplication task. Total length should be <= l.""" + k = int(l/2) + x = [np.random.randint(nclass - 1) + 1 for _ in xrange(k)] + inp = x + [0 for _ in xrange(l - k)] + res = x + x + [0 for _ in xrange(l - 2*k)] + return inp, res + + def rand_rev2_pair(l): + """Random data pair for reverse2 task. Total length should be <= l.""" + inp = [(np.random.randint(nclass - 1) + 1, + np.random.randint(nclass - 1) + 1) for _ in xrange(l/2)] + res = [i for i in reversed(inp)] + return [x for p in inp for x in p], [x for p in res for x in p] + + def rand_search_pair(l): + """Random data pair for search task. Total length should be <= l.""" + inp = [(np.random.randint(nclass - 1) + 1, + np.random.randint(nclass - 1) + 1) for _ in xrange(l-1/2)] + q = np.random.randint(nclass - 1) + 1 + res = 0 + for (k, v) in reversed(inp): + if k == q: + res = v + return [x for p in inp for x in p] + [q], [res] + + def rand_kvsort_pair(l): + """Random data pair for key-value sort. Total length should be <= l.""" + keys = [(np.random.randint(nclass - 1) + 1, i) for i in xrange(l/2)] + vals = [np.random.randint(nclass - 1) + 1 for _ in xrange(l/2)] + kv = [(k, vals[i]) for (k, i) in keys] + sorted_kv = [(k, vals[i]) for (k, i) in sorted(keys)] + return [x for p in kv for x in p], [x for p in sorted_kv for x in p] + + def prog_io_pair(prog, max_len, counter=0): + try: + ilen = np.random.randint(max_len - 3) + 1 + bound = max(15 - (counter / 20), 1) + inp = [random.choice(range(-bound, bound)) for _ in range(ilen)] + inp_toks = [program_utils.prog_rev_vocab[t] + for t in program_utils.tokenize(str(inp)) if t != ","] + out = program_utils.evaluate(prog, {"a": inp}) + out_toks = [program_utils.prog_rev_vocab[t] + for t in program_utils.tokenize(str(out)) if t != ","] + if counter > 400: + out_toks = [] + if (out_toks and out_toks[0] == program_utils.prog_rev_vocab["["] and + len(out_toks) != len([o for o in out if o == ","]) + 3): + raise ValueError("generated list with too long ints") + if (out_toks and out_toks[0] != program_utils.prog_rev_vocab["["] and + len(out_toks) > 1): + raise ValueError("generated one int but tokenized it to many") + if len(out_toks) > max_len: + raise ValueError("output too long") + return (inp_toks, out_toks) + except ValueError: + return prog_io_pair(prog, max_len, counter+1) + + def spec(inp): + """Return the target given the input for some tasks.""" + if task == "sort": + return sorted(inp) + elif task == "id": + return inp + elif task == "rev": + return [i for i in reversed(inp)] + elif task == "incr": + carry = 1 + res = [] + for i in xrange(len(inp)): + if inp[i] + carry < nclass: + res.append(inp[i] + carry) + carry = 0 + else: + res.append(1) + carry = 1 + return res + elif task == "left": + return [inp[0]] + elif task == "right": + return [inp[-1]] + elif task == "left-shift": + return [inp[l-1] for l in xrange(len(inp))] + elif task == "right-shift": + return [inp[l+1] for l in xrange(len(inp))] + else: + print_out("Unknown spec for task " + str(task)) + sys.exit() + + l = length + cur_time = time.time() + total_time = 0.0 + + is_prog = task in ["progeval", "progsynth"] + if is_prog: + inputs_per_prog = 5 + program_utils.make_vocab() + progs = read_tmp_file("programs_len%d" % (l / 10)) + if not progs: + progs = program_utils.gen(l / 10, 1.2 * nbr_cases / inputs_per_prog) + write_tmp_file("programs_len%d" % (l / 10), progs) + prog_ios = read_tmp_file("programs_len%d_io" % (l / 10)) + nbr_cases = min(nbr_cases, len(progs) * inputs_per_prog) / 1.2 + if not prog_ios: + # Generate program io data. + prog_ios = [] + for pidx, prog in enumerate(progs): + if pidx % 500 == 0: + print_out("== generating io pairs for program %d" % pidx) + if pidx * inputs_per_prog > nbr_cases * 1.2: + break + ptoks = [program_utils.prog_rev_vocab[t] + for t in program_utils.tokenize(prog)] + ptoks.append(program_utils.prog_rev_vocab["_EOS"]) + plen = len(ptoks) + for _ in xrange(inputs_per_prog): + if task == "progeval": + inp, out = prog_io_pair(prog, plen) + prog_ios.append(str(inp) + "\t" + str(out) + "\t" + prog) + elif task == "progsynth": + plen = max(len(ptoks), 8) + for _ in xrange(3): + inp, out = prog_io_pair(prog, plen / 2) + prog_ios.append(str(inp) + "\t" + str(out) + "\t" + prog) + write_tmp_file("programs_len%d_io" % (l / 10), prog_ios) + prog_ios_dict = {} + for s in prog_ios: + i, o, p = s.split("\t") + i_clean = "".join([c for c in i if c.isdigit() or c == " "]) + o_clean = "".join([c for c in o if c.isdigit() or c == " "]) + inp = [int(x) for x in i_clean.split()] + out = [int(x) for x in o_clean.split()] + if inp and out: + if p in prog_ios_dict: + prog_ios_dict[p].append([inp, out]) + else: + prog_ios_dict[p] = [[inp, out]] + # Use prog_ios_dict to create data. + progs = [] + for prog in prog_ios_dict: + if len([c for c in prog if c == ";"]) <= (l / 10): + progs.append(prog) + nbr_cases = min(nbr_cases, len(progs) * inputs_per_prog) / 1.2 + print_out("== %d training cases on %d progs" % (nbr_cases, len(progs))) + for pidx, prog in enumerate(progs): + if pidx * inputs_per_prog > nbr_cases * 1.2: + break + ptoks = [program_utils.prog_rev_vocab[t] + for t in program_utils.tokenize(prog)] + ptoks.append(program_utils.prog_rev_vocab["_EOS"]) + plen = len(ptoks) + dset = train_set if pidx < nbr_cases / inputs_per_prog else test_set + for _ in xrange(inputs_per_prog): + if task == "progeval": + inp, out = prog_ios_dict[prog].pop() + dset[task][bin_for(plen)].append([[ptoks, inp, [], []], [out]]) + elif task == "progsynth": + plen, ilist = max(len(ptoks), 8), [[]] + for _ in xrange(3): + inp, out = prog_ios_dict[prog].pop() + ilist.append(inp + out) + dset[task][bin_for(plen)].append([ilist, [ptoks]]) + + for case in xrange(0 if is_prog else nbr_cases): + total_time += time.time() - cur_time + cur_time = time.time() + if l > 10000 and case % 100 == 1: + print_out(" avg gen time %.4f s" % (total_time / float(case))) + if task in ["add", "badd", "qadd", "bmul", "mul"]: + i, t = rand_pair(l, task) + train_set[task][bin_for(len(i))].append([[[], i, [], []], [t]]) + i, t = rand_pair(l, task) + test_set[task][bin_for(len(i))].append([[[], i, [], []], [t]]) + elif task == "dup": + i, t = rand_dup_pair(l) + train_set[task][bin_for(len(i))].append([[i], [t]]) + i, t = rand_dup_pair(l) + test_set[task][bin_for(len(i))].append([[i], [t]]) + elif task == "rev2": + i, t = rand_rev2_pair(l) + train_set[task][bin_for(len(i))].append([[i], [t]]) + i, t = rand_rev2_pair(l) + test_set[task][bin_for(len(i))].append([[i], [t]]) + elif task == "search": + i, t = rand_search_pair(l) + train_set[task][bin_for(len(i))].append([[i], [t]]) + i, t = rand_search_pair(l) + test_set[task][bin_for(len(i))].append([[i], [t]]) + elif task == "kvsort": + i, t = rand_kvsort_pair(l) + train_set[task][bin_for(len(i))].append([[i], [t]]) + i, t = rand_kvsort_pair(l) + test_set[task][bin_for(len(i))].append([[i], [t]]) + elif task not in ["progeval", "progsynth"]: + inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)] + target = spec(inp) + train_set[task][bin_for(l)].append([[inp], [target]]) + inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)] + target = spec(inp) + test_set[task][bin_for(l)].append([[inp], [target]]) + + +def to_symbol(i): + """Covert ids to text.""" + if i == 0: return "" + if i == 11: return "+" + if i == 12: return "*" + return str(i-1) + + +def to_id(s): + """Covert text to ids.""" + if s == "+": return 11 + if s == "*": return 12 + return int(s) + 1 + + +def get_batch(bin_id, batch_size, data_set, height, offset=None, preset=None): + """Get a batch of data, training or testing.""" + inputs, targets = [], [] + pad_length = bins[bin_id] + for b in xrange(batch_size): + if preset is None: + elem = random.choice(data_set[bin_id]) + if offset is not None and offset + b < len(data_set[bin_id]): + elem = data_set[bin_id][offset + b] + else: + elem = preset + inpt, targett, inpl, targetl = elem[0], elem[1], [], [] + for inp in inpt: + inpl.append(inp + [0 for _ in xrange(pad_length - len(inp))]) + if len(inpl) == 1: + for _ in xrange(height - 1): + inpl.append([0 for _ in xrange(pad_length)]) + for target in targett: + targetl.append(target + [0 for _ in xrange(pad_length - len(target))]) + inputs.append(inpl) + targets.append(targetl) + res_input = np.array(inputs, dtype=np.int32) + res_target = np.array(targets, dtype=np.int32) + assert list(res_input.shape) == [batch_size, height, pad_length] + assert list(res_target.shape) == [batch_size, 1, pad_length] + return res_input, res_target + + +def print_out(s, newline=True): + """Print a message out and log it to file.""" + if log_filename: + try: + with tf.gfile.GFile(log_filename, mode="a") as f: + f.write(s + ("\n" if newline else "")) + # pylint: disable=bare-except + except: + sys.stderr.write("Error appending to %s\n" % log_filename) + sys.stdout.write(s + ("\n" if newline else "")) + sys.stdout.flush() + + +def decode(output): + return [np.argmax(o, axis=1) for o in output] + + +def accuracy(inpt_t, output, target_t, batch_size, nprint, + beam_out=None, beam_scores=None): + """Calculate output accuracy given target.""" + assert nprint < batch_size + 1 + inpt = [] + for h in xrange(inpt_t.shape[1]): + inpt.extend([inpt_t[:, h, l] for l in xrange(inpt_t.shape[2])]) + target = [target_t[:, 0, l] for l in xrange(target_t.shape[2])] + def tok(i): + if rev_vocab and i < len(rev_vocab): + return rev_vocab[i] + return str(i - 1) + def task_print(inp, output, target): + stop_bound = 0 + print_len = 0 + while print_len < len(target) and target[print_len] > stop_bound: + print_len += 1 + print_out(" i: " + " ".join([tok(i) for i in inp if i > 0])) + print_out(" o: " + + " ".join([tok(output[l]) for l in xrange(print_len)])) + print_out(" t: " + + " ".join([tok(target[l]) for l in xrange(print_len)])) + decoded_target = target + decoded_output = decode(output) + # Use beam output if given and score is high enough. + if beam_out is not None: + for b in xrange(batch_size): + if beam_scores[b] >= 10.0: + for l in xrange(min(len(decoded_output), beam_out.shape[2])): + decoded_output[l][b] = int(beam_out[b, 0, l]) + total = 0 + errors = 0 + seq = [0 for b in xrange(batch_size)] + for l in xrange(len(decoded_output)): + for b in xrange(batch_size): + if decoded_target[l][b] > 0: + total += 1 + if decoded_output[l][b] != decoded_target[l][b]: + seq[b] = 1 + errors += 1 + e = 0 # Previous error index + for _ in xrange(min(nprint, sum(seq))): + while seq[e] == 0: + e += 1 + task_print([inpt[l][e] for l in xrange(len(inpt))], + [decoded_output[l][e] for l in xrange(len(decoded_target))], + [decoded_target[l][e] for l in xrange(len(decoded_target))]) + e += 1 + for b in xrange(nprint - errors): + task_print([inpt[l][b] for l in xrange(len(inpt))], + [decoded_output[l][b] for l in xrange(len(decoded_target))], + [decoded_target[l][b] for l in xrange(len(decoded_target))]) + return errors, total, sum(seq) + + +def safe_exp(x): + perp = 10000 + x = float(x) + if x < 100: perp = math.exp(x) + if perp > 10000: return 10000 + return perp diff --git a/models/research/neural_gpu/neural_gpu.py b/models/research/neural_gpu/neural_gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..55b2b3e99224b31c672014195e9ef23fa1e892f7 --- /dev/null +++ b/models/research/neural_gpu/neural_gpu.py @@ -0,0 +1,747 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Neural GPU Model.""" + +import time + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +from tensorflow.python.framework import function +import data_utils as data + +do_jit = False # Gives more speed but experimental for now. +jit_scope = tf.contrib.compiler.jit.experimental_jit_scope + + +def conv_linear(args, kw, kh, nin, nout, rate, do_bias, bias_start, prefix): + """Convolutional linear map.""" + if not isinstance(args, (list, tuple)): + args = [args] + with tf.variable_scope(prefix): + with tf.device("/cpu:0"): + k = tf.get_variable("CvK", [kw, kh, nin, nout]) + if len(args) == 1: + arg = args[0] + else: + arg = tf.concat(axis=3, values=args) + res = tf.nn.convolution(arg, k, dilation_rate=(rate, 1), padding="SAME") + if not do_bias: return res + with tf.device("/cpu:0"): + bias_term = tf.get_variable( + "CvB", [nout], initializer=tf.constant_initializer(bias_start)) + bias_term = tf.reshape(bias_term, [1, 1, 1, nout]) + return res + bias_term + + +def sigmoid_cutoff(x, cutoff): + """Sigmoid with cutoff, e.g., 1.2sigmoid(x) - 0.1.""" + y = tf.sigmoid(x) + if cutoff < 1.01: return y + d = (cutoff - 1.0) / 2.0 + return tf.minimum(1.0, tf.maximum(0.0, cutoff * y - d), name="cutoff_min") + + +@function.Defun(tf.float32, noinline=True) +def sigmoid_cutoff_12(x): + """Sigmoid with cutoff 1.2, specialized for speed and memory use.""" + y = tf.sigmoid(x) + return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1), name="cutoff_min_12") + + +@function.Defun(tf.float32, noinline=True) +def sigmoid_hard(x): + """Hard sigmoid.""" + return tf.minimum(1.0, tf.maximum(0.0, 0.25 * x + 0.5)) + + +def place_at14(decided, selected, it): + """Place selected at it-th coordinate of decided, dim=1 of 4.""" + slice1 = decided[:, :it, :, :] + slice2 = decided[:, it + 1:, :, :] + return tf.concat(axis=1, values=[slice1, selected, slice2]) + + +def place_at13(decided, selected, it): + """Place selected at it-th coordinate of decided, dim=1 of 3.""" + slice1 = decided[:, :it, :] + slice2 = decided[:, it + 1:, :] + return tf.concat(axis=1, values=[slice1, selected, slice2]) + + +def tanh_cutoff(x, cutoff): + """Tanh with cutoff, e.g., 1.1tanh(x) cut to [-1. 1].""" + y = tf.tanh(x) + if cutoff < 1.01: return y + d = (cutoff - 1.0) / 2.0 + return tf.minimum(1.0, tf.maximum(-1.0, (1.0 + d) * y)) + + +@function.Defun(tf.float32, noinline=True) +def tanh_hard(x): + """Hard tanh.""" + return tf.minimum(1.0, tf.maximum(0.0, x)) + + +def layer_norm(x, nmaps, prefix, epsilon=1e-5): + """Layer normalize the 4D tensor x, averaging over the last dimension.""" + with tf.variable_scope(prefix): + scale = tf.get_variable("layer_norm_scale", [nmaps], + initializer=tf.ones_initializer()) + bias = tf.get_variable("layer_norm_bias", [nmaps], + initializer=tf.zeros_initializer()) + mean, variance = tf.nn.moments(x, [3], keep_dims=True) + norm_x = (x - mean) / tf.sqrt(variance + epsilon) + return norm_x * scale + bias + + +def conv_gru(inpts, mem, kw, kh, nmaps, rate, cutoff, prefix, do_layer_norm, + args_len=None): + """Convolutional GRU.""" + def conv_lin(args, suffix, bias_start): + total_args_len = args_len or len(args) * nmaps + res = conv_linear(args, kw, kh, total_args_len, nmaps, rate, True, + bias_start, prefix + "/" + suffix) + if do_layer_norm: + return layer_norm(res, nmaps, prefix + "/" + suffix) + else: + return res + if cutoff == 1.2: + reset = sigmoid_cutoff_12(conv_lin(inpts + [mem], "r", 1.0)) + gate = sigmoid_cutoff_12(conv_lin(inpts + [mem], "g", 1.0)) + elif cutoff > 10: + reset = sigmoid_hard(conv_lin(inpts + [mem], "r", 1.0)) + gate = sigmoid_hard(conv_lin(inpts + [mem], "g", 1.0)) + else: + reset = sigmoid_cutoff(conv_lin(inpts + [mem], "r", 1.0), cutoff) + gate = sigmoid_cutoff(conv_lin(inpts + [mem], "g", 1.0), cutoff) + if cutoff > 10: + candidate = tanh_hard(conv_lin(inpts + [reset * mem], "c", 0.0)) + else: + # candidate = tanh_cutoff(conv_lin(inpts + [reset * mem], "c", 0.0), cutoff) + candidate = tf.tanh(conv_lin(inpts + [reset * mem], "c", 0.0)) + return gate * mem + (1 - gate) * candidate + + +CHOOSE_K = 256 + + +def memory_call(q, l, nmaps, mem_size, vocab_size, num_gpus, update_mem): + raise ValueError("Fill for experiments with additional memory structures.") + + +def memory_run(step, nmaps, mem_size, batch_size, vocab_size, + global_step, do_training, update_mem, decay_factor, num_gpus, + target_emb_weights, output_w, gpu_targets_tn, it): + """Run memory.""" + q = step[:, 0, it, :] + mlabels = gpu_targets_tn[:, it, 0] + res, mask, mem_loss = memory_call( + q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem) + res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1) + + # Mix gold and original in the first steps, 20% later. + gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7) + use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor) + use_gold = tf.maximum(use_gold, 0.2) * do_training + mem = tf.cond(tf.less(tf.random_uniform([]), use_gold), + lambda: use_gold * gold + (1.0 - use_gold) * res, + lambda: res) + mem = tf.reshape(mem, [-1, 1, 1, nmaps]) + return mem, mem_loss, update_mem + + +@tf.RegisterGradient("CustomIdG") +def _custom_id_grad(_, grads): + return grads + + +def quantize(t, quant_scale, max_value=1.0): + """Quantize a tensor t with each element in [-max_value, max_value].""" + t = tf.minimum(max_value, tf.maximum(t, -max_value)) + big = quant_scale * (t + max_value) + 0.5 + with tf.get_default_graph().gradient_override_map({"Floor": "CustomIdG"}): + res = (tf.floor(big) / quant_scale) - max_value + return res + + +def quantize_weights_op(quant_scale, max_value): + ops = [v.assign(quantize(v, quant_scale, float(max_value))) + for v in tf.trainable_variables()] + return tf.group(*ops) + + +def autoenc_quantize(x, nbits, nmaps, do_training, layers=1): + """Autoencoder into nbits vectors of bits, using noise and sigmoids.""" + enc_x = tf.reshape(x, [-1, nmaps]) + for i in xrange(layers - 1): + enc_x = tf.layers.dense(enc_x, nmaps, name="autoenc_%d" % i) + enc_x = tf.layers.dense(enc_x, nbits, name="autoenc_%d" % (layers - 1)) + noise = tf.truncated_normal(tf.shape(enc_x), stddev=2.0) + dec_x = sigmoid_cutoff_12(enc_x + noise * do_training) + dec_x = tf.reshape(dec_x, [-1, nbits]) + for i in xrange(layers): + dec_x = tf.layers.dense(dec_x, nmaps, name="autodec_%d" % i) + return tf.reshape(dec_x, tf.shape(x)) + + +def make_dense(targets, noclass, low_param): + """Move a batch of targets to a dense 1-hot representation.""" + low = low_param / float(noclass - 1) + high = 1.0 - low * (noclass - 1) + targets = tf.cast(targets, tf.int64) + return tf.one_hot(targets, depth=noclass, on_value=high, off_value=low) + + +def reorder_beam(beam_size, batch_size, beam_val, output, is_first, + tensors_to_reorder): + """Reorder to minimize beam costs.""" + # beam_val is [batch_size x beam_size]; let b = batch_size * beam_size + # decided is len x b x a x b + # output is b x out_size; step is b x len x a x b; + outputs = tf.split(axis=0, num_or_size_splits=beam_size, value=tf.nn.log_softmax(output)) + all_beam_vals, all_beam_idx = [], [] + beam_range = 1 if is_first else beam_size + for i in xrange(beam_range): + top_out, top_out_idx = tf.nn.top_k(outputs[i], k=beam_size) + cur_beam_val = beam_val[:, i] + top_out = tf.Print(top_out, [top_out, top_out_idx, beam_val, i, + cur_beam_val], "GREPO", summarize=8) + all_beam_vals.append(top_out + tf.expand_dims(cur_beam_val, 1)) + all_beam_idx.append(top_out_idx) + all_beam_idx = tf.reshape(tf.transpose(tf.concat(axis=1, values=all_beam_idx), [1, 0]), + [-1]) + top_beam, top_beam_idx = tf.nn.top_k(tf.concat(axis=1, values=all_beam_vals), k=beam_size) + top_beam_idx = tf.Print(top_beam_idx, [top_beam, top_beam_idx], + "GREP", summarize=8) + reordered = [[] for _ in xrange(len(tensors_to_reorder) + 1)] + top_out_idx = [] + for i in xrange(beam_size): + which_idx = top_beam_idx[:, i] * batch_size + tf.range(batch_size) + top_out_idx.append(tf.gather(all_beam_idx, which_idx)) + which_beam = top_beam_idx[:, i] / beam_size # [batch] + which_beam = which_beam * batch_size + tf.range(batch_size) + reordered[0].append(tf.gather(output, which_beam)) + for i, t in enumerate(tensors_to_reorder): + reordered[i + 1].append(tf.gather(t, which_beam)) + new_tensors = [tf.concat(axis=0, values=t) for t in reordered] + top_out_idx = tf.concat(axis=0, values=top_out_idx) + return (top_beam, new_tensors[0], top_out_idx, new_tensors[1:]) + + +class NeuralGPU(object): + """Neural GPU Model.""" + + def __init__(self, nmaps, vec_size, niclass, noclass, dropout, + max_grad_norm, cutoff, nconvs, kw, kh, height, mem_size, + learning_rate, min_length, num_gpus, num_replicas, + grad_noise_scale, sampling_rate, act_noise=0.0, do_rnn=False, + atrous=False, beam_size=1, backward=True, do_layer_norm=False, + autoenc_decay=1.0): + # Feeds for parameters and ops to update them. + self.nmaps = nmaps + if backward: + self.global_step = tf.Variable(0, trainable=False, name="global_step") + self.cur_length = tf.Variable(min_length, trainable=False) + self.cur_length_incr_op = self.cur_length.assign_add(1) + self.lr = tf.Variable(learning_rate, trainable=False) + self.lr_decay_op = self.lr.assign(self.lr * 0.995) + self.do_training = tf.placeholder(tf.float32, name="do_training") + self.update_mem = tf.placeholder(tf.int32, name="update_mem") + self.noise_param = tf.placeholder(tf.float32, name="noise_param") + + # Feeds for inputs, targets, outputs, losses, etc. + self.input = tf.placeholder(tf.int32, name="inp") + self.target = tf.placeholder(tf.int32, name="tgt") + self.prev_step = tf.placeholder(tf.float32, name="prev_step") + gpu_input = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.input) + gpu_target = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.target) + gpu_prev_step = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.prev_step) + batch_size = tf.shape(gpu_input[0])[0] + + if backward: + adam_lr = 0.005 * self.lr + adam = tf.train.AdamOptimizer(adam_lr, epsilon=1e-3) + + def adam_update(grads): + return adam.apply_gradients(zip(grads, tf.trainable_variables()), + global_step=self.global_step, + name="adam_update") + + # When switching from Adam to SGD we perform reverse-decay. + if backward: + global_step_float = tf.cast(self.global_step, tf.float32) + sampling_decay_exponent = global_step_float / 100000.0 + sampling_decay = tf.maximum(0.05, tf.pow(0.5, sampling_decay_exponent)) + self.sampling = sampling_rate * 0.05 / sampling_decay + else: + self.sampling = tf.constant(0.0) + + # Cache variables on cpu if needed. + if num_replicas > 1 or num_gpus > 1: + with tf.device("/cpu:0"): + caching_const = tf.constant(0) + tf.get_variable_scope().set_caching_device(caching_const.op.device) + # partitioner = tf.variable_axis_size_partitioner(1024*256*4) + # tf.get_variable_scope().set_partitioner(partitioner) + + def gpu_avg(l): + if l[0] is None: + for elem in l: + assert elem is None + return 0.0 + if len(l) < 2: + return l[0] + return sum(l) / float(num_gpus) + + self.length_tensor = tf.placeholder(tf.int32, name="length") + + with tf.device("/cpu:0"): + emb_weights = tf.get_variable( + "embedding", [niclass, vec_size], + initializer=tf.random_uniform_initializer(-1.7, 1.7)) + if beam_size > 0: + target_emb_weights = tf.get_variable( + "target_embedding", [noclass, nmaps], + initializer=tf.random_uniform_initializer(-1.7, 1.7)) + e0 = tf.scatter_update(emb_weights, + tf.constant(0, dtype=tf.int32, shape=[1]), + tf.zeros([1, vec_size])) + output_w = tf.get_variable("output_w", [nmaps, noclass], tf.float32) + + def conv_rate(layer): + if atrous: + return 2**layer + return 1 + + # pylint: disable=cell-var-from-loop + def enc_step(step): + """Encoder step.""" + if autoenc_decay < 1.0: + quant_step = autoenc_quantize(step, 16, nmaps, self.do_training) + if backward: + exp_glob = tf.train.exponential_decay(1.0, self.global_step - 10000, + 1000, autoenc_decay) + dec_factor = 1.0 - exp_glob # * self.do_training + dec_factor = tf.cond(tf.less(self.global_step, 10500), + lambda: tf.constant(0.05), lambda: dec_factor) + else: + dec_factor = 1.0 + cur = tf.cond(tf.less(tf.random_uniform([]), dec_factor), + lambda: quant_step, lambda: step) + else: + cur = step + if dropout > 0.0001: + cur = tf.nn.dropout(cur, keep_prob) + if act_noise > 0.00001: + cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale + # Do nconvs-many CGRU steps. + if do_jit and tf.get_variable_scope().reuse: + with jit_scope(): + for layer in xrange(nconvs): + cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer), + cutoff, "ecgru_%d" % layer, do_layer_norm) + else: + for layer in xrange(nconvs): + cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer), + cutoff, "ecgru_%d" % layer, do_layer_norm) + return cur + + zero_tgt = tf.zeros([batch_size, nmaps, 1]) + zero_tgt.set_shape([None, nmaps, 1]) + + def dec_substep(step, decided): + """Decoder sub-step.""" + cur = step + if dropout > 0.0001: + cur = tf.nn.dropout(cur, keep_prob) + if act_noise > 0.00001: + cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale + # Do nconvs-many CGRU steps. + if do_jit and tf.get_variable_scope().reuse: + with jit_scope(): + for layer in xrange(nconvs): + cur = conv_gru([decided], cur, kw, kh, nmaps, conv_rate(layer), + cutoff, "dcgru_%d" % layer, do_layer_norm) + else: + for layer in xrange(nconvs): + cur = conv_gru([decided], cur, kw, kh, nmaps, conv_rate(layer), + cutoff, "dcgru_%d" % layer, do_layer_norm) + return cur + # pylint: enable=cell-var-from-loop + + def dec_step(step, it, it_int, decided, output_ta, tgts, + mloss, nupd_in, out_idx, beam_cost): + """Decoder step.""" + nupd, mem_loss = 0, 0.0 + if mem_size > 0: + it_incr = tf.minimum(it+1, length - 1) + mem, mem_loss, nupd = memory_run( + step, nmaps, mem_size, batch_size, noclass, self.global_step, + self.do_training, self.update_mem, 10, num_gpus, + target_emb_weights, output_w, gpu_targets_tn, it_incr) + step = dec_substep(step, decided) + output_l = tf.expand_dims(tf.expand_dims(step[:, it, 0, :], 1), 1) + # Calculate argmax output. + output = tf.reshape(output_l, [-1, nmaps]) + # pylint: disable=cell-var-from-loop + output = tf.matmul(output, output_w) + if beam_size > 1: + beam_cost, output, out, reordered = reorder_beam( + beam_size, batch_size, beam_cost, output, it_int == 0, + [output_l, out_idx, step, decided]) + [output_l, out_idx, step, decided] = reordered + else: + # Scheduled sampling. + out = tf.multinomial(tf.stop_gradient(output), 1) + out = tf.to_int32(tf.squeeze(out, [1])) + out_write = output_ta.write(it, output_l[:batch_size, :, :, :]) + output = tf.gather(target_emb_weights, out) + output = tf.reshape(output, [-1, 1, nmaps]) + output = tf.concat(axis=1, values=[output] * height) + tgt = tgts[it, :, :, :] + selected = tf.cond(tf.less(tf.random_uniform([]), self.sampling), + lambda: output, lambda: tgt) + # pylint: enable=cell-var-from-loop + dec_write = place_at14(decided, tf.expand_dims(selected, 1), it) + out_idx = place_at13( + out_idx, tf.reshape(out, [beam_size * batch_size, 1, 1]), it) + if mem_size > 0: + mem = tf.concat(axis=2, values=[mem] * height) + dec_write = place_at14(dec_write, mem, it_incr) + return (step, dec_write, out_write, mloss + mem_loss, nupd_in + nupd, + out_idx, beam_cost) + + # Main model construction. + gpu_outputs = [] + gpu_losses = [] + gpu_grad_norms = [] + grads_list = [] + gpu_out_idx = [] + self.after_enc_step = [] + for gpu in xrange(num_gpus): # Multi-GPU towers, average gradients later. + length = self.length_tensor + length_float = tf.cast(length, tf.float32) + if gpu > 0: + tf.get_variable_scope().reuse_variables() + gpu_outputs.append([]) + gpu_losses.append([]) + gpu_grad_norms.append([]) + with tf.name_scope("gpu%d" % gpu), tf.device("/gpu:%d" % gpu): + # Main graph creation loop. + data.print_out("Creating model.") + start_time = time.time() + + # Embed inputs and calculate mask. + with tf.device("/cpu:0"): + tgt_shape = tf.shape(tf.squeeze(gpu_target[gpu], [1])) + weights = tf.where(tf.squeeze(gpu_target[gpu], [1]) > 0, + tf.ones(tgt_shape), tf.zeros(tgt_shape)) + + # Embed inputs and targets. + with tf.control_dependencies([e0]): + start = tf.gather(emb_weights, gpu_input[gpu]) # b x h x l x nmaps + gpu_targets_tn = gpu_target[gpu] # b x 1 x len + if beam_size > 0: + embedded_targets_tn = tf.gather(target_emb_weights, + gpu_targets_tn) + embedded_targets_tn = tf.transpose( + embedded_targets_tn, [2, 0, 1, 3]) # len x b x 1 x nmaps + embedded_targets_tn = tf.concat(axis=2, values=[embedded_targets_tn] * height) + + # First image comes from start by applying convolution and adding 0s. + start = tf.transpose(start, [0, 2, 1, 3]) # Now b x len x h x vec_s + first = conv_linear(start, 1, 1, vec_size, nmaps, 1, True, 0.0, "input") + first = layer_norm(first, nmaps, "input") + + # Computation steps. + keep_prob = dropout * 3.0 / tf.sqrt(length_float) + keep_prob = 1.0 - self.do_training * keep_prob + act_noise_scale = act_noise * self.do_training + + # Start with a convolutional gate merging previous step. + step = conv_gru([gpu_prev_step[gpu]], first, + kw, kh, nmaps, 1, cutoff, "first", do_layer_norm) + + # This is just for running a baseline RNN seq2seq model. + if do_rnn: + self.after_enc_step.append(step) # Not meaningful here, but needed. + def lstm_cell(): + return tf.contrib.rnn.BasicLSTMCell(height * nmaps) + cell = tf.contrib.rnn.MultiRNNCell( + [lstm_cell() for _ in range(nconvs)]) + with tf.variable_scope("encoder"): + encoder_outputs, encoder_state = tf.nn.dynamic_rnn( + cell, tf.reshape(step, [batch_size, length, height * nmaps]), + dtype=tf.float32, time_major=False) + + # Attention. + attn = tf.layers.dense( + encoder_outputs, height * nmaps, name="attn1") + + # pylint: disable=cell-var-from-loop + @function.Defun(noinline=True) + def attention_query(query, attn_v): + vecs = tf.tanh(attn + tf.expand_dims(query, 1)) + mask = tf.reduce_sum(vecs * tf.reshape(attn_v, [1, 1, -1]), 2) + mask = tf.nn.softmax(mask) + return tf.reduce_sum(encoder_outputs * tf.expand_dims(mask, 2), 1) + + with tf.variable_scope("decoder"): + def decoder_loop_fn(state__prev_cell_out__unused, cell_inp__cur_tgt): + """Decoder loop function.""" + state, prev_cell_out, _ = state__prev_cell_out__unused + cell_inp, cur_tgt = cell_inp__cur_tgt + attn_q = tf.layers.dense(prev_cell_out, height * nmaps, + name="attn_query") + attn_res = attention_query(attn_q, tf.get_variable( + "attn_v", [height * nmaps], + initializer=tf.random_uniform_initializer(-0.1, 0.1))) + concatenated = tf.reshape(tf.concat(axis=1, values=[cell_inp, attn_res]), + [batch_size, 2 * height * nmaps]) + cell_inp = tf.layers.dense( + concatenated, height * nmaps, name="attn_merge") + output, new_state = cell(cell_inp, state) + + mem_loss = 0.0 + if mem_size > 0: + res, mask, mem_loss = memory_call( + output, cur_tgt, height * nmaps, mem_size, noclass, + num_gpus, self.update_mem) + res = tf.gather(target_emb_weights, res) + res *= tf.expand_dims(mask[:, 0], 1) + output = tf.layers.dense( + tf.concat(axis=1, values=[output, res]), height * nmaps, name="rnnmem") + + return new_state, output, mem_loss + # pylint: enable=cell-var-from-loop + gpu_targets = tf.squeeze(gpu_target[gpu], [1]) # b x len + gpu_tgt_trans = tf.transpose(gpu_targets, [1, 0]) + dec_zero = tf.zeros([batch_size, 1], dtype=tf.int32) + dec_inp = tf.concat(axis=1, values=[dec_zero, gpu_targets]) + dec_inp = dec_inp[:, :length] + embedded_dec_inp = tf.gather(target_emb_weights, dec_inp) + embedded_dec_inp_proj = tf.layers.dense( + embedded_dec_inp, height * nmaps, name="dec_proj") + embedded_dec_inp_proj = tf.transpose(embedded_dec_inp_proj, + [1, 0, 2]) + init_vals = (encoder_state, + tf.zeros([batch_size, height * nmaps]), 0.0) + _, dec_outputs, mem_losses = tf.scan( + decoder_loop_fn, (embedded_dec_inp_proj, gpu_tgt_trans), + initializer=init_vals) + mem_loss = tf.reduce_mean(mem_losses) + outputs = tf.layers.dense(dec_outputs, nmaps, name="out_proj") + # Final convolution to get logits, list outputs. + outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w) + outputs = tf.reshape(outputs, [length, batch_size, noclass]) + gpu_out_idx.append(tf.argmax(outputs, 2)) + else: # Here we go with the Neural GPU. + # Encoder. + enc_length = length + step = enc_step(step) # First step hard-coded. + # pylint: disable=cell-var-from-loop + i = tf.constant(1) + c = lambda i, _s: tf.less(i, enc_length) + def enc_step_lambda(i, step): + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + new_step = enc_step(step) + return (i + 1, new_step) + _, step = tf.while_loop( + c, enc_step_lambda, [i, step], + parallel_iterations=1, swap_memory=True) + # pylint: enable=cell-var-from-loop + + self.after_enc_step.append(step) + + # Decoder. + if beam_size > 0: + output_ta = tf.TensorArray( + dtype=tf.float32, size=length, dynamic_size=False, + infer_shape=False, name="outputs") + out_idx = tf.zeros([beam_size * batch_size, length, 1], + dtype=tf.int32) + decided_t = tf.zeros([beam_size * batch_size, length, + height, vec_size]) + + # Prepare for beam search. + tgts = tf.concat(axis=1, values=[embedded_targets_tn] * beam_size) + beam_cost = tf.zeros([batch_size, beam_size]) + step = tf.concat(axis=0, values=[step] * beam_size) + # First step hard-coded. + step, decided_t, output_ta, mem_loss, nupd, oi, bc = dec_step( + step, 0, 0, decided_t, output_ta, tgts, 0.0, 0, out_idx, + beam_cost) + tf.get_variable_scope().reuse_variables() + # pylint: disable=cell-var-from-loop + def step_lambda(i, step, dec_t, out_ta, ml, nu, oi, bc): + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + s, d, t, nml, nu, oi, bc = dec_step( + step, i, 1, dec_t, out_ta, tgts, ml, nu, oi, bc) + return (i + 1, s, d, t, nml, nu, oi, bc) + i = tf.constant(1) + c = lambda i, _s, _d, _o, _ml, _nu, _oi, _bc: tf.less(i, length) + _, step, _, output_ta, mem_loss, nupd, out_idx, _ = tf.while_loop( + c, step_lambda, + [i, step, decided_t, output_ta, mem_loss, nupd, oi, bc], + parallel_iterations=1, swap_memory=True) + # pylint: enable=cell-var-from-loop + gpu_out_idx.append(tf.squeeze(out_idx, [2])) + outputs = output_ta.stack() + outputs = tf.squeeze(outputs, [2, 3]) # Now l x b x nmaps + else: + # If beam_size is 0 or less, we don't have a decoder. + mem_loss = 0.0 + outputs = tf.transpose(step[:, :, 1, :], [1, 0, 2]) + gpu_out_idx.append(tf.argmax(outputs, 2)) + + # Final convolution to get logits, list outputs. + outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w) + outputs = tf.reshape(outputs, [length, batch_size, noclass]) + gpu_outputs[gpu] = tf.nn.softmax(outputs) + + # Calculate cross-entropy loss and normalize it. + targets_soft = make_dense(tf.squeeze(gpu_target[gpu], [1]), + noclass, 0.1) + targets_soft = tf.reshape(targets_soft, [-1, noclass]) + targets_hard = make_dense(tf.squeeze(gpu_target[gpu], [1]), + noclass, 0.0) + targets_hard = tf.reshape(targets_hard, [-1, noclass]) + output = tf.transpose(outputs, [1, 0, 2]) + xent_soft = tf.reshape(tf.nn.softmax_cross_entropy_with_logits( + logits=tf.reshape(output, [-1, noclass]), labels=targets_soft), + [batch_size, length]) + xent_hard = tf.reshape(tf.nn.softmax_cross_entropy_with_logits( + logits=tf.reshape(output, [-1, noclass]), labels=targets_hard), + [batch_size, length]) + low, high = 0.1 / float(noclass - 1), 0.9 + const = high * tf.log(high) + float(noclass - 1) * low * tf.log(low) + weight_sum = tf.reduce_sum(weights) + 1e-20 + true_perp = tf.reduce_sum(xent_hard * weights) / weight_sum + soft_loss = tf.reduce_sum(xent_soft * weights) / weight_sum + perp_loss = soft_loss + const + # Final loss: cross-entropy + shared parameter relaxation part + extra. + mem_loss = 0.5 * tf.reduce_mean(mem_loss) / length_float + total_loss = perp_loss + mem_loss + gpu_losses[gpu].append(true_perp) + + # Gradients. + if backward: + data.print_out("Creating backward pass for the model.") + grads = tf.gradients( + total_loss, tf.trainable_variables(), + colocate_gradients_with_ops=True) + for g_i, g in enumerate(grads): + if isinstance(g, tf.IndexedSlices): + grads[g_i] = tf.convert_to_tensor(g) + grads, norm = tf.clip_by_global_norm(grads, max_grad_norm) + gpu_grad_norms[gpu].append(norm) + for g in grads: + if grad_noise_scale > 0.001: + g += tf.truncated_normal(tf.shape(g)) * self.noise_param + grads_list.append(grads) + else: + gpu_grad_norms[gpu].append(0.0) + data.print_out("Created model for gpu %d in %.2f s." + % (gpu, time.time() - start_time)) + + self.updates = [] + self.after_enc_step = tf.concat(axis=0, values=self.after_enc_step) # Concat GPUs. + if backward: + tf.get_variable_scope()._reuse = False + tf.get_variable_scope().set_caching_device(None) + grads = [gpu_avg([grads_list[g][i] for g in xrange(num_gpus)]) + for i in xrange(len(grads_list[0]))] + update = adam_update(grads) + self.updates.append(update) + else: + self.updates.append(tf.no_op()) + + self.losses = [gpu_avg([gpu_losses[g][i] for g in xrange(num_gpus)]) + for i in xrange(len(gpu_losses[0]))] + self.out_idx = tf.concat(axis=0, values=gpu_out_idx) + self.grad_norms = [gpu_avg([gpu_grad_norms[g][i] for g in xrange(num_gpus)]) + for i in xrange(len(gpu_grad_norms[0]))] + self.outputs = [tf.concat(axis=1, values=[gpu_outputs[g] for g in xrange(num_gpus)])] + self.quantize_op = quantize_weights_op(512, 8) + if backward: + self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10) + + def step(self, sess, inp, target, do_backward_in, noise_param=None, + beam_size=2, eos_id=2, eos_cost=0.0, update_mem=None, state=None): + """Run a step of the network.""" + batch_size, height, length = inp.shape[0], inp.shape[1], inp.shape[2] + do_backward = do_backward_in + train_mode = True + if do_backward_in is None: + do_backward = False + train_mode = False + if update_mem is None: + update_mem = do_backward + feed_in = {} + # print " feeding sequences of length %d" % length + if state is None: + state = np.zeros([batch_size, length, height, self.nmaps]) + feed_in[self.prev_step.name] = state + feed_in[self.length_tensor.name] = length + feed_in[self.noise_param.name] = noise_param if noise_param else 0.0 + feed_in[self.do_training.name] = 1.0 if do_backward else 0.0 + feed_in[self.update_mem.name] = 1 if update_mem else 0 + if do_backward_in is False: + feed_in[self.sampling.name] = 0.0 + index = 0 # We're dynamic now. + feed_out = [] + if do_backward: + feed_out.append(self.updates[index]) + feed_out.append(self.grad_norms[index]) + if train_mode: + feed_out.append(self.losses[index]) + feed_in[self.input.name] = inp + feed_in[self.target.name] = target + feed_out.append(self.outputs[index]) + if train_mode: + # Make a full-sequence training step with one call to session.run. + res = sess.run([self.after_enc_step] + feed_out, feed_in) + after_enc_state, res = res[0], res[1:] + else: + # Make a full-sequence decoding step with one call to session.run. + feed_in[self.sampling.name] = 1.1 # Sample every time. + res = sess.run([self.after_enc_step, self.out_idx] + feed_out, feed_in) + after_enc_state, out_idx = res[0], res[1] + res = [res[2][l] for l in xrange(length)] + outputs = [out_idx[:, i] for i in xrange(length)] + cost = [0.0 for _ in xrange(beam_size * batch_size)] + seen_eos = [0 for _ in xrange(beam_size * batch_size)] + for idx, logit in enumerate(res): + best = outputs[idx] + for b in xrange(batch_size): + if seen_eos[b] > 1: + cost[b] -= eos_cost + else: + cost[b] += np.log(logit[b][best[b]]) + if best[b] in [eos_id]: + seen_eos[b] += 1 + res = [[-c for c in cost]] + outputs + # Collect and output results. + offset = 0 + norm = None + if do_backward: + offset = 2 + norm = res[1] + if train_mode: + outputs = res[offset + 1] + outputs = [outputs[l] for l in xrange(length)] + return res[offset], outputs, norm, after_enc_state diff --git a/models/research/neural_gpu/neural_gpu_trainer.py b/models/research/neural_gpu/neural_gpu_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..1f704b0da880dbde4b09bf2cc108edb034d7b1a0 --- /dev/null +++ b/models/research/neural_gpu/neural_gpu_trainer.py @@ -0,0 +1,1027 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Neural GPU.""" + +from __future__ import print_function + +import math +import os +import random +import sys +import threading +import time + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import program_utils +import data_utils as data +import neural_gpu as ngpu +import wmt_utils as wmt + +tf.app.flags.DEFINE_float("lr", 0.1, "Learning rate.") +tf.app.flags.DEFINE_float("init_weight", 0.8, "Initial weights deviation.") +tf.app.flags.DEFINE_float("max_grad_norm", 4.0, "Clip gradients to this norm.") +tf.app.flags.DEFINE_float("cutoff", 1.2, "Cutoff at the gates.") +tf.app.flags.DEFINE_float("curriculum_ppx", 9.9, "Move curriculum if ppl < X.") +tf.app.flags.DEFINE_float("curriculum_seq", 0.3, "Move curriculum if seq < X.") +tf.app.flags.DEFINE_float("dropout", 0.1, "Dropout that much.") +tf.app.flags.DEFINE_float("grad_noise_scale", 0.0, "Gradient noise scale.") +tf.app.flags.DEFINE_float("max_sampling_rate", 0.1, "Maximal sampling rate.") +tf.app.flags.DEFINE_float("length_norm", 0.0, "Length normalization.") +tf.app.flags.DEFINE_float("train_beam_freq", 0.0, "Beam-based training.") +tf.app.flags.DEFINE_float("train_beam_anneal", 20000, "How many steps anneal.") +tf.app.flags.DEFINE_integer("eval_beam_steps", 4, "How many beam steps eval.") +tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size.") +tf.app.flags.DEFINE_integer("steps_per_checkpoint", 100, "Steps per epoch.") +tf.app.flags.DEFINE_integer("nmaps", 64, "Number of floats in each cell.") +tf.app.flags.DEFINE_integer("vec_size", 64, "Size of word vectors.") +tf.app.flags.DEFINE_integer("train_data_size", 1000, "Training examples/len.") +tf.app.flags.DEFINE_integer("max_length", 40, "Maximum length.") +tf.app.flags.DEFINE_integer("random_seed", 125459, "Random seed.") +tf.app.flags.DEFINE_integer("nconvs", 2, "How many convolutions / 1 step.") +tf.app.flags.DEFINE_integer("kw", 3, "Kernel width.") +tf.app.flags.DEFINE_integer("kh", 3, "Kernel height.") +tf.app.flags.DEFINE_integer("height", 4, "Height.") +tf.app.flags.DEFINE_integer("mem_size", -1, "Memory size (sqrt)") +tf.app.flags.DEFINE_integer("soft_mem_size", 1024, "Softmax memory this size.") +tf.app.flags.DEFINE_integer("num_gpus", 1, "Number of GPUs to use.") +tf.app.flags.DEFINE_integer("num_replicas", 1, "Number of replicas in use.") +tf.app.flags.DEFINE_integer("beam_size", 1, "Beam size during decoding. " + "If 0, no decoder, the non-extended Neural GPU.") +tf.app.flags.DEFINE_integer("max_target_vocab", 0, + "Maximal size of target vocabulary.") +tf.app.flags.DEFINE_integer("decode_offset", 0, "Offset for decoding.") +tf.app.flags.DEFINE_integer("task", -1, "Task id when running on borg.") +tf.app.flags.DEFINE_integer("nprint", 0, "How many test examples to print out.") +tf.app.flags.DEFINE_integer("eval_bin_print", 3, "How many bins step in eval.") +tf.app.flags.DEFINE_integer("mode", 0, "Mode: 0-train other-decode.") +tf.app.flags.DEFINE_bool("atrous", False, "Whether to use atrous convs.") +tf.app.flags.DEFINE_bool("layer_norm", False, "Do layer normalization.") +tf.app.flags.DEFINE_bool("quantize", False, "Whether to quantize variables.") +tf.app.flags.DEFINE_bool("do_train", True, "If false, only update memory.") +tf.app.flags.DEFINE_bool("rnn_baseline", False, "If true build an RNN instead.") +tf.app.flags.DEFINE_bool("simple_tokenizer", False, + "If true, tokenize on spaces only, digits are 0.") +tf.app.flags.DEFINE_bool("normalize_digits", True, + "Whether to normalize digits with simple tokenizer.") +tf.app.flags.DEFINE_integer("vocab_size", 16, "Joint vocabulary size.") +tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory") +tf.app.flags.DEFINE_string("train_dir", "/tmp/", "Directory to store models.") +tf.app.flags.DEFINE_string("test_file_prefix", "", "Files to test (.en,.fr).") +tf.app.flags.DEFINE_integer("max_train_data_size", 0, + "Limit on the size of training data (0: no limit).") +tf.app.flags.DEFINE_string("word_vector_file_en", "", + "Optional file with word vectors to start training.") +tf.app.flags.DEFINE_string("word_vector_file_fr", "", + "Optional file with word vectors to start training.") +tf.app.flags.DEFINE_string("problem", "wmt", "What problem are we solving?.") + +tf.app.flags.DEFINE_integer("ps_tasks", 0, "Number of ps tasks used.") +tf.app.flags.DEFINE_string("master", "", "Name of the TensorFlow master.") + +FLAGS = tf.app.flags.FLAGS +EXTRA_EVAL = 10 +EVAL_LEN_INCR = 8 +MAXLEN_F = 2.0 + + +def zero_split(tok_list, append=None): + """Split tok_list (list of ints) on 0s, append int to all parts if given.""" + res, cur, l = [], [], 0 + for tok in tok_list: + if tok == 0: + if append is not None: + cur.append(append) + res.append(cur) + l = max(l, len(cur)) + cur = [] + else: + cur.append(tok) + if append is not None: + cur.append(append) + res.append(cur) + l = max(l, len(cur)) + return res, l + + +def read_data(source_path, target_path, buckets, max_size=None, print_out=True): + """Read data from source and target files and put into buckets. + + Args: + source_path: path to the files with token-ids for the source language. + target_path: path to the file with token-ids for the target language; + it must be aligned with the source file: n-th line contains the desired + output for n-th line from the source_path. + buckets: the buckets to use. + max_size: maximum number of lines to read, all other will be ignored; + if 0 or None, data files will be read completely (no limit). + If set to 1, no data will be returned (empty lists of the right form). + print_out: whether to print out status or not. + + Returns: + data_set: a list of length len(_buckets); data_set[n] contains a list of + (source, target) pairs read from the provided data files that fit + into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and + len(target) < _buckets[n][1]; source and target are lists of token-ids. + """ + data_set = [[] for _ in buckets] + counter = 0 + if max_size != 1: + with tf.gfile.GFile(source_path, mode="r") as source_file: + with tf.gfile.GFile(target_path, mode="r") as target_file: + source, target = source_file.readline(), target_file.readline() + while source and target and (not max_size or counter < max_size): + counter += 1 + if counter % 100000 == 0 and print_out: + print(" reading data line %d" % counter) + sys.stdout.flush() + source_ids = [int(x) for x in source.split()] + target_ids = [int(x) for x in target.split()] + source_ids, source_len = zero_split(source_ids) + target_ids, target_len = zero_split(target_ids, append=wmt.EOS_ID) + for bucket_id, size in enumerate(buckets): + if source_len <= size and target_len <= size: + data_set[bucket_id].append([source_ids, target_ids]) + break + source, target = source_file.readline(), target_file.readline() + return data_set + + +global_train_set = {"wmt": []} +train_buckets_scale = {"wmt": []} + + +def calculate_buckets_scale(data_set, buckets, problem): + """Calculate buckets scales for the given data set.""" + train_bucket_sizes = [len(data_set[b]) for b in xrange(len(buckets))] + train_total_size = max(1, float(sum(train_bucket_sizes))) + + # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use + # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to + # the size if i-th training bucket, as used later. + if problem not in train_buckets_scale: + train_buckets_scale[problem] = [] + train_buckets_scale[problem].append( + [sum(train_bucket_sizes[:i + 1]) / train_total_size + for i in xrange(len(train_bucket_sizes))]) + return train_total_size + + +def read_data_into_global(source_path, target_path, buckets, + max_size=None, print_out=True): + """Read data into the global variables (can be in a separate thread).""" + # pylint: disable=global-variable-not-assigned + global global_train_set, train_buckets_scale + # pylint: enable=global-variable-not-assigned + data_set = read_data(source_path, target_path, buckets, max_size, print_out) + global_train_set["wmt"].append(data_set) + train_total_size = calculate_buckets_scale(data_set, buckets, "wmt") + if print_out: + print(" Finished global data reading (%d)." % train_total_size) + + +def initialize(sess=None): + """Initialize data and model.""" + global MAXLEN_F + # Create training directory if it does not exist. + if not tf.gfile.IsDirectory(FLAGS.train_dir): + data.print_out("Creating training directory %s." % FLAGS.train_dir) + tf.gfile.MkDir(FLAGS.train_dir) + decode_suffix = "beam%dln%d" % (FLAGS.beam_size, + int(100 * FLAGS.length_norm)) + if FLAGS.mode == 0: + decode_suffix = "" + if FLAGS.task >= 0: + data.log_filename = os.path.join(FLAGS.train_dir, + "log%d%s" % (FLAGS.task, decode_suffix)) + else: + data.log_filename = os.path.join(FLAGS.train_dir, "neural_gpu/log") + + # Set random seed. + if FLAGS.random_seed > 0: + seed = FLAGS.random_seed + max(0, FLAGS.task) + tf.set_random_seed(seed) + random.seed(seed) + np.random.seed(seed) + + # Check data sizes. + assert data.bins + max_length = min(FLAGS.max_length, data.bins[-1]) + while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL: + data.bins = data.bins[:-1] + if sess is None and FLAGS.task == 0 and FLAGS.num_replicas > 1: + if max_length > 60: + max_length = max_length * 1 / 2 # Save memory on chief. + min_length = min(14, max_length - 3) if FLAGS.problem == "wmt" else 3 + for p in FLAGS.problem.split("-"): + if p in ["progeval", "progsynth"]: + min_length = max(26, min_length) + assert max_length + 1 > min_length + while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL: + data.bins = data.bins[:-1] + + # Create checkpoint directory if it does not exist. + if FLAGS.mode == 0 or FLAGS.task < 0: + checkpoint_dir = os.path.join(FLAGS.train_dir, "neural_gpu%s" + % ("" if FLAGS.task < 0 else str(FLAGS.task))) + else: + checkpoint_dir = FLAGS.train_dir + if not tf.gfile.IsDirectory(checkpoint_dir): + data.print_out("Creating checkpoint directory %s." % checkpoint_dir) + tf.gfile.MkDir(checkpoint_dir) + + # Prepare data. + if FLAGS.problem == "wmt": + # Prepare WMT data. + data.print_out("Preparing WMT data in %s" % FLAGS.data_dir) + if FLAGS.simple_tokenizer: + MAXLEN_F = 3.5 + (en_train, fr_train, en_dev, fr_dev, + en_path, fr_path) = wmt.prepare_wmt_data( + FLAGS.data_dir, FLAGS.vocab_size, + tokenizer=wmt.space_tokenizer, + normalize_digits=FLAGS.normalize_digits) + else: + (en_train, fr_train, en_dev, fr_dev, + en_path, fr_path) = wmt.prepare_wmt_data( + FLAGS.data_dir, FLAGS.vocab_size) + + # Read data into buckets and compute their sizes. + fr_vocab, rev_fr_vocab = wmt.initialize_vocabulary(fr_path) + data.vocab = fr_vocab + data.rev_vocab = rev_fr_vocab + data.print_out("Reading development and training data (limit: %d)." + % FLAGS.max_train_data_size) + dev_set = {} + dev_set["wmt"] = read_data(en_dev, fr_dev, data.bins) + def data_read(size, print_out): + read_data_into_global(en_train, fr_train, data.bins, size, print_out) + data_read(50000, False) + read_thread_small = threading.Thread( + name="reading-data-small", target=lambda: data_read(900000, False)) + read_thread_small.start() + read_thread_full = threading.Thread( + name="reading-data-full", + target=lambda: data_read(FLAGS.max_train_data_size, True)) + read_thread_full.start() + data.print_out("Data reading set up.") + else: + # Prepare algorithmic data. + en_path, fr_path = None, None + tasks = FLAGS.problem.split("-") + data_size = FLAGS.train_data_size + for t in tasks: + data.print_out("Generating data for %s." % t) + if t in ["progeval", "progsynth"]: + data.init_data(t, data.bins[-1], 20 * data_size, FLAGS.vocab_size) + if len(program_utils.prog_vocab) > FLAGS.vocab_size - 2: + raise ValueError("Increase vocab_size to %d for prog-tasks." + % (len(program_utils.prog_vocab) + 2)) + data.rev_vocab = program_utils.prog_vocab + data.vocab = program_utils.prog_rev_vocab + else: + for l in xrange(max_length + EXTRA_EVAL - 1): + data.init_data(t, l, data_size, FLAGS.vocab_size) + data.init_data(t, data.bins[-2], data_size, FLAGS.vocab_size) + data.init_data(t, data.bins[-1], data_size, FLAGS.vocab_size) + if t not in global_train_set: + global_train_set[t] = [] + global_train_set[t].append(data.train_set[t]) + calculate_buckets_scale(data.train_set[t], data.bins, t) + dev_set = data.test_set + + # Grid-search parameters. + lr = FLAGS.lr + init_weight = FLAGS.init_weight + max_grad_norm = FLAGS.max_grad_norm + if sess is not None and FLAGS.task > -1: + def job_id_factor(step): + """If jobid / step mod 3 is 0, 1, 2: say 0, 1, -1.""" + return ((((FLAGS.task / step) % 3) + 1) % 3) - 1 + lr *= math.pow(2, job_id_factor(1)) + init_weight *= math.pow(1.5, job_id_factor(3)) + max_grad_norm *= math.pow(2, job_id_factor(9)) + + # Print out parameters. + curriculum = FLAGS.curriculum_seq + msg1 = ("layers %d kw %d h %d kh %d batch %d noise %.2f" + % (FLAGS.nconvs, FLAGS.kw, FLAGS.height, FLAGS.kh, + FLAGS.batch_size, FLAGS.grad_noise_scale)) + msg2 = ("cut %.2f lr %.3f iw %.2f cr %.2f nm %d d%.4f gn %.2f %s" + % (FLAGS.cutoff, lr, init_weight, curriculum, FLAGS.nmaps, + FLAGS.dropout, max_grad_norm, msg1)) + data.print_out(msg2) + + # Create model and initialize it. + tf.get_variable_scope().set_initializer( + tf.orthogonal_initializer(gain=1.8 * init_weight)) + max_sampling_rate = FLAGS.max_sampling_rate if FLAGS.mode == 0 else 0.0 + o = FLAGS.vocab_size if FLAGS.max_target_vocab < 1 else FLAGS.max_target_vocab + ngpu.CHOOSE_K = FLAGS.soft_mem_size + do_beam_model = FLAGS.train_beam_freq > 0.0001 and FLAGS.beam_size > 1 + beam_size = FLAGS.beam_size if FLAGS.mode > 0 and not do_beam_model else 1 + beam_size = min(beam_size, FLAGS.beam_size) + beam_model = None + def make_ngpu(cur_beam_size, back): + return ngpu.NeuralGPU( + FLAGS.nmaps, FLAGS.vec_size, FLAGS.vocab_size, o, + FLAGS.dropout, max_grad_norm, FLAGS.cutoff, FLAGS.nconvs, + FLAGS.kw, FLAGS.kh, FLAGS.height, FLAGS.mem_size, + lr / math.sqrt(FLAGS.num_replicas), min_length + 3, FLAGS.num_gpus, + FLAGS.num_replicas, FLAGS.grad_noise_scale, max_sampling_rate, + atrous=FLAGS.atrous, do_rnn=FLAGS.rnn_baseline, + do_layer_norm=FLAGS.layer_norm, beam_size=cur_beam_size, backward=back) + if sess is None: + with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): + model = make_ngpu(beam_size, True) + if do_beam_model: + tf.get_variable_scope().reuse_variables() + beam_model = make_ngpu(FLAGS.beam_size, False) + else: + model = make_ngpu(beam_size, True) + if do_beam_model: + tf.get_variable_scope().reuse_variables() + beam_model = make_ngpu(FLAGS.beam_size, False) + + sv = None + if sess is None: + # The supervisor configuration has a few overriden options. + sv = tf.train.Supervisor(logdir=checkpoint_dir, + is_chief=(FLAGS.task < 1), + saver=model.saver, + summary_op=None, + save_summaries_secs=60, + save_model_secs=15 * 60, + global_step=model.global_step) + + config = tf.ConfigProto(allow_soft_placement=True) + sess = sv.PrepareSession(FLAGS.master, config=config) + + data.print_out("Created model. Checkpoint dir %s" % checkpoint_dir) + + # Load model from parameters if a checkpoint exists. + ckpt = tf.train.get_checkpoint_state(checkpoint_dir) + if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + ".index"): + data.print_out("Reading model parameters from %s" + % ckpt.model_checkpoint_path) + model.saver.restore(sess, ckpt.model_checkpoint_path) + elif sv is None: + sess.run(tf.global_variables_initializer()) + data.print_out("Initialized variables (no supervisor mode).") + elif FLAGS.task < 1 and FLAGS.mem_size > 0: + # sess.run(model.mem_norm_op) + data.print_out("Created new model and normalized mem (on chief).") + + # Return the model and needed variables. + return (model, beam_model, min_length, max_length, checkpoint_dir, + (global_train_set, dev_set, en_path, fr_path), sv, sess) + + +def m_step(model, beam_model, sess, batch_size, inp, target, bucket, nsteps, p): + """Evaluation multi-step for program synthesis.""" + state, scores, hist = None, [[-11.0 for _ in xrange(batch_size)]], [] + for _ in xrange(nsteps): + # Get the best beam (no training, just forward model). + new_target, new_first, new_inp, new_scores = get_best_beam( + beam_model, sess, inp, target, + batch_size, FLAGS.beam_size, bucket, hist, p, test_mode=True) + hist.append(new_first) + _, _, _, state = model.step(sess, inp, new_target, False, state=state) + inp = new_inp + scores.append([max(scores[-1][i], new_scores[i]) + for i in xrange(batch_size)]) + # The final step with the true target. + loss, res, _, _ = model.step(sess, inp, target, False, state=state) + return loss, res, new_target, scores[1:] + + +def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True, + offset=None, beam_model=None): + """Test model on test data of length l using the given session.""" + if not dev[p][bin_id]: + data.print_out(" bin %d (%d)\t%s\tppl NA errors NA seq-errors NA" + % (bin_id, data.bins[bin_id], p)) + return 1.0, 1.0, 0.0 + inpt, target = data.get_batch( + bin_id, batch_size, dev[p], FLAGS.height, offset) + if FLAGS.beam_size > 1 and beam_model: + loss, res, new_tgt, scores = m_step( + model, beam_model, sess, batch_size, inpt, target, bin_id, + FLAGS.eval_beam_steps, p) + score_avgs = [sum(s) / float(len(s)) for s in scores] + score_maxs = [max(s) for s in scores] + score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i]) + for i in xrange(FLAGS.eval_beam_steps)] + data.print_out(" == scores (avg, max): %s" % "; ".join(score_str)) + errors, total, seq_err = data.accuracy(inpt, res, target, batch_size, + nprint, new_tgt, scores[-1]) + else: + loss, res, _, _ = model.step(sess, inpt, target, False) + errors, total, seq_err = data.accuracy(inpt, res, target, batch_size, + nprint) + seq_err = float(seq_err) / batch_size + if total > 0: + errors = float(errors) / total + if print_out: + data.print_out(" bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f" + % (bin_id, data.bins[bin_id], p, data.safe_exp(loss), + 100 * errors, 100 * seq_err)) + return (errors, seq_err, loss) + + +def assign_vectors(word_vector_file, embedding_key, vocab_path, sess): + """Assign the embedding_key variable from the given word vectors file.""" + # For words in the word vector file, set their embedding at start. + if not tf.gfile.Exists(word_vector_file): + data.print_out("Word vector file does not exist: %s" % word_vector_file) + sys.exit(1) + vocab, _ = wmt.initialize_vocabulary(vocab_path) + vectors_variable = [v for v in tf.trainable_variables() + if embedding_key == v.name] + if len(vectors_variable) != 1: + data.print_out("Word vector variable not found or too many.") + sys.exit(1) + vectors_variable = vectors_variable[0] + vectors = vectors_variable.eval() + data.print_out("Pre-setting word vectors from %s" % word_vector_file) + with tf.gfile.GFile(word_vector_file, mode="r") as f: + # Lines have format: dog 0.045123 -0.61323 0.413667 ... + for line in f: + line_parts = line.split() + # The first part is the word. + word = line_parts[0] + if word in vocab: + # Remaining parts are components of the vector. + word_vector = np.array(map(float, line_parts[1:])) + if len(word_vector) != FLAGS.vec_size: + data.print_out("Warn: Word '%s', Expecting vector size %d, " + "found %d" % (word, FLAGS.vec_size, + len(word_vector))) + else: + vectors[vocab[word]] = word_vector + # Assign the modified vectors to the vectors_variable in the graph. + sess.run([vectors_variable.initializer], + {vectors_variable.initializer.inputs[1]: vectors}) + + +def print_vectors(embedding_key, vocab_path, word_vector_file): + """Print vectors from the given variable.""" + _, rev_vocab = wmt.initialize_vocabulary(vocab_path) + vectors_variable = [v for v in tf.trainable_variables() + if embedding_key == v.name] + if len(vectors_variable) != 1: + data.print_out("Word vector variable not found or too many.") + sys.exit(1) + vectors_variable = vectors_variable[0] + vectors = vectors_variable.eval() + l, s = vectors.shape[0], vectors.shape[1] + data.print_out("Printing %d word vectors from %s to %s." + % (l, embedding_key, word_vector_file)) + with tf.gfile.GFile(word_vector_file, mode="w") as f: + # Lines have format: dog 0.045123 -0.61323 0.413667 ... + for i in xrange(l): + f.write(rev_vocab[i]) + for j in xrange(s): + f.write(" %.8f" % vectors[i][j]) + f.write("\n") + + +def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set): + """Get a random bucket id.""" + # Choose a bucket according to data distribution. Pick a random number + # in [0, 1] and use the corresponding interval in train_buckets_scale. + random_number_01 = np.random.random_sample() + bucket_id = min([i for i in xrange(len(train_buckets_scale_c)) + if train_buckets_scale_c[i] > random_number_01]) + while bucket_id > 0 and not data_set[bucket_id]: + bucket_id -= 1 + for _ in xrange(10 if np.random.random_sample() < 0.9 else 1): + if data.bins[bucket_id] > max_cur_length: + random_number_01 = min(random_number_01, np.random.random_sample()) + bucket_id = min([i for i in xrange(len(train_buckets_scale_c)) + if train_buckets_scale_c[i] > random_number_01]) + while bucket_id > 0 and not data_set[bucket_id]: + bucket_id -= 1 + return bucket_id + + +def score_beams(beams, target, inp, history, p, + print_out=False, test_mode=False): + """Score beams.""" + if p == "progsynth": + return score_beams_prog(beams, target, inp, history, print_out, test_mode) + elif test_mode: + return beams[0], 10.0 if str(beams[0][:len(target)]) == str(target) else 0.0 + else: + history_s = [str(h) for h in history] + best, best_score, tgt, eos_id = None, -1000.0, target, None + if p == "wmt": + eos_id = wmt.EOS_ID + if eos_id and eos_id in target: + tgt = target[:target.index(eos_id)] + for beam in beams: + if eos_id and eos_id in beam: + beam = beam[:beam.index(eos_id)] + l = min(len(tgt), len(beam)) + score = len([i for i in xrange(l) if tgt[i] == beam[i]]) / float(len(tgt)) + hist_score = 20.0 if str([b for b in beam if b > 0]) in history_s else 0.0 + if score < 1.0: + score -= hist_score + if score > best_score: + best = beam + best_score = score + return best, best_score + + +def score_beams_prog(beams, target, inp, history, print_out=False, + test_mode=False): + """Score beams for program synthesis.""" + tgt_prog = linearize(target, program_utils.prog_vocab, True, 1) + hist_progs = [linearize(h, program_utils.prog_vocab, True, 1) + for h in history] + tgt_set = set(target) + if print_out: + print("target: ", tgt_prog) + inps, tgt_outs = [], [] + for i in xrange(3): + ilist = [inp[i + 1, l] for l in xrange(inp.shape[1])] + clist = [program_utils.prog_vocab[x] for x in ilist if x > 0] + olist = clist[clist.index("]") + 1:] # outputs + clist = clist[1:clist.index("]")] # inputs + inps.append([int(x) for x in clist]) + if olist[0] == "[": # olist may be [int] or just int + tgt_outs.append(str([int(x) for x in olist[1:-1]])) + else: + if len(olist) == 1: + tgt_outs.append(olist[0]) + else: + print([program_utils.prog_vocab[x] for x in ilist if x > 0]) + print(olist) + print(tgt_prog) + print(program_utils.evaluate(tgt_prog, {"a": inps[-1]})) + print("AAAAA") + tgt_outs.append(olist[0]) + if not test_mode: + for _ in xrange(7): + ilen = np.random.randint(len(target) - 3) + 1 + inps.append([random.choice(range(-15, 15)) for _ in range(ilen)]) + tgt_outs.extend([program_utils.evaluate(tgt_prog, {"a": inp}) + for inp in inps[3:]]) + best, best_prog, best_score = None, "", -1000.0 + for beam in beams: + b_prog = linearize(beam, program_utils.prog_vocab, True, 1) + b_set = set(beam) + jsim = len(tgt_set & b_set) / float(len(tgt_set | b_set)) + b_outs = [program_utils.evaluate(b_prog, {"a": inp}) for inp in inps] + errs = len([x for x in b_outs if x == "ERROR"]) + imatches = len([i for i in xrange(3) if b_outs[i] == tgt_outs[i]]) + perfect = 10.0 if imatches == 3 else 0.0 + hist_score = 20.0 if b_prog in hist_progs else 0.0 + if test_mode: + score = perfect - errs + else: + matches = len([i for i in xrange(10) if b_outs[i] == tgt_outs[i]]) + score = perfect + matches + jsim - errs + if score < 10.0: + score -= hist_score + # print b_prog + # print "jsim: ", jsim, " errs: ", errs, " mtchs: ", matches, " s: ", score + if score > best_score: + best = beam + best_prog = b_prog + best_score = score + if print_out: + print("best score: ", best_score, " best prog: ", best_prog) + return best, best_score + + +def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size, + bucket, history, p, test_mode=False): + """Run beam_model, score beams, and return the best as target and in input.""" + _, output_logits, _, _ = beam_model.step( + sess, inp, target, None, beam_size=FLAGS.beam_size) + new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp) + for b in xrange(batch_size): + outputs = [] + history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])] + for h in history] + for beam_idx in xrange(beam_size): + outputs.append([int(o[beam_idx * batch_size + b]) + for o in output_logits]) + target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])] + best, best_score = score_beams( + outputs, [t for t in target_t if t > 0], inp[b, :, :], + [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode) + scores.append(best_score) + if 1 in best: # Only until _EOS. + best = best[:best.index(1) + 1] + best += [0 for _ in xrange(len(target_t) - len(best))] + new_targets.append([best]) + first, _ = score_beams( + outputs, [t for t in target_t if t > 0], inp[b, :, :], + [[t for t in h if t > 0] for h in history_b], p, test_mode=True) + if 1 in first: # Only until _EOS. + first = first[:first.index(1) + 1] + first += [0 for _ in xrange(len(target_t) - len(first))] + new_inp[b, 0, :] = np.array(first, dtype=np.int32) + new_firsts.append([first]) + # Change target if we found a great answer. + new_target = np.array(new_targets, dtype=np.int32) + for b in xrange(batch_size): + if scores[b] >= 10.0: + target[b, 0, :] = new_target[b, 0, :] + new_first = np.array(new_firsts, dtype=np.int32) + return new_target, new_first, new_inp, scores + + +def train(): + """Train the model.""" + batch_size = FLAGS.batch_size * FLAGS.num_gpus + (model, beam_model, min_length, max_length, checkpoint_dir, + (train_set, dev_set, en_vocab_path, fr_vocab_path), sv, sess) = initialize() + with sess.as_default(): + quant_op = model.quantize_op + max_cur_length = min(min_length + 3, max_length) + prev_acc_perp = [1000000 for _ in xrange(5)] + prev_seq_err = 1.0 + is_chief = FLAGS.task < 1 + do_report = False + + # Main traning loop. + while not sv.ShouldStop(): + global_step, max_cur_length, learning_rate = sess.run( + [model.global_step, model.cur_length, model.lr]) + acc_loss, acc_l1, acc_total, acc_errors, acc_seq_err = 0.0, 0.0, 0, 0, 0 + acc_grad_norm, step_count, step_c1, step_time = 0.0, 0, 0, 0.0 + + # For words in the word vector file, set their embedding at start. + bound1 = FLAGS.steps_per_checkpoint - 1 + if FLAGS.word_vector_file_en and global_step < bound1 and is_chief: + assign_vectors(FLAGS.word_vector_file_en, "embedding:0", + en_vocab_path, sess) + if FLAGS.max_target_vocab < 1: + assign_vectors(FLAGS.word_vector_file_en, "target_embedding:0", + en_vocab_path, sess) + + if FLAGS.word_vector_file_fr and global_step < bound1 and is_chief: + assign_vectors(FLAGS.word_vector_file_fr, "embedding:0", + fr_vocab_path, sess) + if FLAGS.max_target_vocab < 1: + assign_vectors(FLAGS.word_vector_file_fr, "target_embedding:0", + fr_vocab_path, sess) + + for _ in xrange(FLAGS.steps_per_checkpoint): + step_count += 1 + step_c1 += 1 + global_step = int(model.global_step.eval()) + train_beam_anneal = global_step / float(FLAGS.train_beam_anneal) + train_beam_freq = FLAGS.train_beam_freq * min(1.0, train_beam_anneal) + p = random.choice(FLAGS.problem.split("-")) + train_set = global_train_set[p][-1] + bucket_id = get_bucket_id(train_buckets_scale[p][-1], max_cur_length, + train_set) + # Prefer longer stuff 60% of time if not wmt. + if np.random.randint(100) < 60 and FLAGS.problem != "wmt": + bucket1 = get_bucket_id(train_buckets_scale[p][-1], max_cur_length, + train_set) + bucket_id = max(bucket1, bucket_id) + + # Run a step and time it. + start_time = time.time() + inp, target = data.get_batch(bucket_id, batch_size, train_set, + FLAGS.height) + noise_param = math.sqrt(math.pow(global_step + 1, -0.55) * + prev_seq_err) * FLAGS.grad_noise_scale + # In multi-step mode, we use best from beam for middle steps. + state, new_target, scores, history = None, None, None, [] + while (FLAGS.beam_size > 1 and + train_beam_freq > np.random.random_sample()): + # Get the best beam (no training, just forward model). + new_target, new_first, new_inp, scores = get_best_beam( + beam_model, sess, inp, target, + batch_size, FLAGS.beam_size, bucket_id, history, p) + history.append(new_first) + # Training step with the previous input and the best beam as target. + _, _, _, state = model.step(sess, inp, new_target, FLAGS.do_train, + noise_param, update_mem=True, state=state) + # Change input to the new one for the next step. + inp = new_inp + # If all results are great, stop (todo: not to wait for all?). + if FLAGS.nprint > 1: + print(scores) + if sum(scores) / float(len(scores)) >= 10.0: + break + # The final step with the true target. + loss, res, gnorm, _ = model.step( + sess, inp, target, FLAGS.do_train, noise_param, + update_mem=True, state=state) + step_time += time.time() - start_time + acc_grad_norm += 0.0 if gnorm is None else float(gnorm) + + # Accumulate statistics. + acc_loss += loss + acc_l1 += loss + errors, total, seq_err = data.accuracy( + inp, res, target, batch_size, 0, new_target, scores) + if FLAGS.nprint > 1: + print("seq_err: ", seq_err) + acc_total += total + acc_errors += errors + acc_seq_err += seq_err + + # Report summary every 10 steps. + if step_count + 3 > FLAGS.steps_per_checkpoint: + do_report = True # Don't polute plot too early. + if is_chief and step_count % 10 == 1 and do_report: + cur_loss = acc_l1 / float(step_c1) + acc_l1, step_c1 = 0.0, 0 + cur_perp = data.safe_exp(cur_loss) + summary = tf.Summary() + summary.value.extend( + [tf.Summary.Value(tag="log_perplexity", simple_value=cur_loss), + tf.Summary.Value(tag="perplexity", simple_value=cur_perp)]) + sv.SummaryComputed(sess, summary, global_step) + + # Normalize and print out accumulated statistics. + acc_loss /= step_count + step_time /= FLAGS.steps_per_checkpoint + acc_seq_err = float(acc_seq_err) / (step_count * batch_size) + prev_seq_err = max(0.0, acc_seq_err - 0.02) # No noise at error < 2%. + acc_errors = float(acc_errors) / acc_total if acc_total > 0 else 1.0 + t_size = float(sum([len(x) for x in train_set])) / float(1000000) + msg = ("step %d step-time %.2f train-size %.3f lr %.6f grad-norm %.4f" + % (global_step + 1, step_time, t_size, learning_rate, + acc_grad_norm / FLAGS.steps_per_checkpoint)) + data.print_out("%s len %d ppl %.6f errors %.2f sequence-errors %.2f" % + (msg, max_cur_length, data.safe_exp(acc_loss), + 100*acc_errors, 100*acc_seq_err)) + + # If errors are below the curriculum threshold, move curriculum forward. + is_good = FLAGS.curriculum_ppx > data.safe_exp(acc_loss) + is_good = is_good and FLAGS.curriculum_seq > acc_seq_err + if is_good and is_chief: + if FLAGS.quantize: + # Quantize weights. + data.print_out(" Quantizing parameters.") + sess.run([quant_op]) + # Increase current length (until the next with training data). + sess.run(model.cur_length_incr_op) + # Forget last perplexities if we're not yet at the end. + if max_cur_length < max_length: + prev_acc_perp.append(1000000) + + # Lower learning rate if we're worse than the last 5 checkpoints. + acc_perp = data.safe_exp(acc_loss) + if acc_perp > max(prev_acc_perp[-5:]) and is_chief: + sess.run(model.lr_decay_op) + prev_acc_perp.append(acc_perp) + + # Save checkpoint. + if is_chief: + checkpoint_path = os.path.join(checkpoint_dir, "neural_gpu.ckpt") + model.saver.save(sess, checkpoint_path, + global_step=model.global_step) + + # Run evaluation. + bin_bound = 4 + for p in FLAGS.problem.split("-"): + total_loss, total_err, tl_counter = 0.0, 0.0, 0 + for bin_id in xrange(len(data.bins)): + if bin_id < bin_bound or bin_id % FLAGS.eval_bin_print == 1: + err, _, loss = single_test(bin_id, model, sess, FLAGS.nprint, + batch_size * 4, dev_set, p, + beam_model=beam_model) + if loss > 0.0: + total_loss += loss + total_err += err + tl_counter += 1 + test_loss = total_loss / max(1, tl_counter) + test_err = total_err / max(1, tl_counter) + test_perp = data.safe_exp(test_loss) + summary = tf.Summary() + summary.value.extend( + [tf.Summary.Value(tag="test/%s/loss" % p, simple_value=test_loss), + tf.Summary.Value(tag="test/%s/error" % p, simple_value=test_err), + tf.Summary.Value(tag="test/%s/perplexity" % p, + simple_value=test_perp)]) + sv.SummaryComputed(sess, summary, global_step) + + +def linearize(output, rev_fr_vocab, simple_tokenizer=None, eos_id=wmt.EOS_ID): + # If there is an EOS symbol in outputs, cut them at that point (WMT). + if eos_id in output: + output = output[:output.index(eos_id)] + # Print out French sentence corresponding to outputs. + if simple_tokenizer or FLAGS.simple_tokenizer: + vlen = len(rev_fr_vocab) + def vget(o): + if o < vlen: + return rev_fr_vocab[o] + return "UNK" + return " ".join([vget(o) for o in output]) + else: + return wmt.basic_detokenizer([rev_fr_vocab[o] for o in output]) + + +def evaluate(): + """Evaluate an existing model.""" + batch_size = FLAGS.batch_size * FLAGS.num_gpus + with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: + (model, beam_model, _, _, _, + (_, dev_set, en_vocab_path, fr_vocab_path), _, sess) = initialize(sess) + for p in FLAGS.problem.split("-"): + for bin_id in xrange(len(data.bins)): + if (FLAGS.task >= 0 and bin_id > 4) or (FLAGS.nprint == 0 and + bin_id > 8 and p == "wmt"): + break + single_test(bin_id, model, sess, FLAGS.nprint, batch_size, dev_set, p, + beam_model=beam_model) + path = FLAGS.test_file_prefix + xid = "" if FLAGS.task < 0 else ("%.4d" % (FLAGS.task+FLAGS.decode_offset)) + en_path, fr_path = path + ".en" + xid, path + ".fr" + xid + # Evaluate the test file if they exist. + if path and tf.gfile.Exists(en_path) and tf.gfile.Exists(fr_path): + data.print_out("Translating test set %s" % en_path) + # Read lines. + en_lines, fr_lines = [], [] + with tf.gfile.GFile(en_path, mode="r") as f: + for line in f: + en_lines.append(line.strip()) + with tf.gfile.GFile(fr_path, mode="r") as f: + for line in f: + fr_lines.append(line.strip()) + # Tokenize and convert to ids. + en_vocab, _ = wmt.initialize_vocabulary(en_vocab_path) + _, rev_fr_vocab = wmt.initialize_vocabulary(fr_vocab_path) + if FLAGS.simple_tokenizer: + en_ids = [wmt.sentence_to_token_ids( + l, en_vocab, tokenizer=wmt.space_tokenizer, + normalize_digits=FLAGS.normalize_digits) + for l in en_lines] + else: + en_ids = [wmt.sentence_to_token_ids(l, en_vocab) for l in en_lines] + # Translate. + results = [] + for idx, token_ids in enumerate(en_ids): + if idx % 5 == 0: + data.print_out("Translating example %d of %d." % (idx, len(en_ids))) + # Which bucket does it belong to? + buckets = [b for b in xrange(len(data.bins)) + if data.bins[b] >= len(token_ids)] + if buckets: + result, result_cost = [], 100000000.0 + for bucket_id in buckets: + if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR: + break + # Get a 1-element batch to feed the sentence to the model. + used_batch_size = 1 # batch_size + inp, target = data.get_batch( + bucket_id, used_batch_size, None, FLAGS.height, + preset=([token_ids], [[]])) + loss, output_logits, _, _ = model.step( + sess, inp, target, None, beam_size=FLAGS.beam_size) + outputs = [int(o[0]) for o in output_logits] + loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm) + if FLAGS.simple_tokenizer: + cur_out = outputs + if wmt.EOS_ID in cur_out: + cur_out = cur_out[:cur_out.index(wmt.EOS_ID)] + res_tags = [rev_fr_vocab[o] for o in cur_out] + bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags) + loss += 1000.0 * bad_words + 100.0 * bad_brack + # print (bucket_id, loss) + if loss < result_cost: + result = outputs + result_cost = loss + final = linearize(result, rev_fr_vocab) + results.append("%s\t%s\n" % (final, fr_lines[idx])) + # print result_cost + sys.stderr.write(results[-1]) + sys.stderr.flush() + else: + sys.stderr.write("TOOO_LONG\t%s\n" % fr_lines[idx]) + sys.stderr.flush() + if xid: + decode_suffix = "beam%dln%dn" % (FLAGS.beam_size, + int(100 * FLAGS.length_norm)) + with tf.gfile.GFile(path + ".res" + decode_suffix + xid, mode="w") as f: + for line in results: + f.write(line) + + +def mul(l): + res = 1.0 + for s in l: + res *= s + return res + + +def interactive(): + """Interactively probe an existing model.""" + with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: + # Initialize model. + (model, _, _, _, _, (_, _, en_path, fr_path), _, _) = initialize(sess) + # Load vocabularies. + en_vocab, rev_en_vocab = wmt.initialize_vocabulary(en_path) + _, rev_fr_vocab = wmt.initialize_vocabulary(fr_path) + # Print out vectors and variables. + if FLAGS.nprint > 0 and FLAGS.word_vector_file_en: + print_vectors("embedding:0", en_path, FLAGS.word_vector_file_en) + if FLAGS.nprint > 0 and FLAGS.word_vector_file_fr: + print_vectors("target_embedding:0", fr_path, FLAGS.word_vector_file_fr) + total = 0 + for v in tf.trainable_variables(): + shape = v.get_shape().as_list() + total += mul(shape) + print(v.name, shape, mul(shape)) + print(total) + # Start interactive loop. + sys.stdout.write("Input to Neural GPU Translation Model.\n") + sys.stdout.write("> ") + sys.stdout.flush() + inpt = sys.stdin.readline(), "" + while inpt: + cures = [] + # Get token-ids for the input sentence. + if FLAGS.simple_tokenizer: + token_ids = wmt.sentence_to_token_ids( + inpt, en_vocab, tokenizer=wmt.space_tokenizer, + normalize_digits=FLAGS.normalize_digits) + else: + token_ids = wmt.sentence_to_token_ids(inpt, en_vocab) + print([rev_en_vocab[t] for t in token_ids]) + # Which bucket does it belong to? + buckets = [b for b in xrange(len(data.bins)) + if data.bins[b] >= max(len(token_ids), len(cures))] + if cures: + buckets = [buckets[0]] + if buckets: + result, result_cost = [], 10000000.0 + for bucket_id in buckets: + if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR: + break + glen = 1 + for gen_idx in xrange(glen): + # Get a 1-element batch to feed the sentence to the model. + inp, target = data.get_batch( + bucket_id, 1, None, FLAGS.height, preset=([token_ids], [cures])) + loss, output_logits, _, _ = model.step( + sess, inp, target, None, beam_size=FLAGS.beam_size, + update_mem=False) + # If it is a greedy decoder, outputs are argmaxes of output_logits. + if FLAGS.beam_size > 1: + outputs = [int(o) for o in output_logits] + else: + loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm) + outputs = [int(np.argmax(logit, axis=1)) + for logit in output_logits] + print([rev_fr_vocab[t] for t in outputs]) + print(loss, data.bins[bucket_id]) + print(linearize(outputs, rev_fr_vocab)) + cures.append(outputs[gen_idx]) + print(cures) + print(linearize(cures, rev_fr_vocab)) + if FLAGS.simple_tokenizer: + cur_out = outputs + if wmt.EOS_ID in cur_out: + cur_out = cur_out[:cur_out.index(wmt.EOS_ID)] + res_tags = [rev_fr_vocab[o] for o in cur_out] + bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags) + loss += 1000.0 * bad_words + 100.0 * bad_brack + if loss < result_cost: + result = outputs + result_cost = loss + print("FINAL", result_cost) + print([rev_fr_vocab[t] for t in result]) + print(linearize(result, rev_fr_vocab)) + else: + print("TOOO_LONG") + sys.stdout.write("> ") + sys.stdout.flush() + inpt = sys.stdin.readline(), "" + + +def main(_): + if FLAGS.mode == 0: + train() + elif FLAGS.mode == 1: + evaluate() + else: + interactive() + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/neural_gpu/program_utils.py b/models/research/neural_gpu/program_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1f49d01292012487c4a01a5832fb044a378645ff --- /dev/null +++ b/models/research/neural_gpu/program_utils.py @@ -0,0 +1,444 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for generating program synthesis and evaluation data.""" + +import contextlib +import sys +import random +import os + +try: + import StringIO +except ImportError: + from io import StringIO + +class ListType(object): + def __init__(self, arg): + self.arg = arg + + def __str__(self): + return "[" + str(self.arg) + "]" + + def __eq__(self, other): + if not isinstance(other, ListType): + return False + return self.arg == other.arg + + def __hash__(self): + return hash(self.arg) + +class VarType(object): + def __init__(self, arg): + self.arg = arg + + def __str__(self): + return str(self.arg) + + def __eq__(self, other): + if not isinstance(other, VarType): + return False + return self.arg == other.arg + + def __hash__(self): + return hash(self.arg) + +class FunctionType(object): + def __init__(self, args): + self.args = args + + def __str__(self): + return str(self.args[0]) + " -> " + str(self.args[1]) + + def __eq__(self, other): + if not isinstance(other, FunctionType): + return False + return self.args == other.args + + def __hash__(self): + return hash(tuple(self.args)) + + +class Function(object): + def __init__(self, name, arg_types, output_type, fn_arg_types = None): + self.name = name + self.arg_types = arg_types + self.fn_arg_types = fn_arg_types or [] + self.output_type = output_type + +Null = 100 +## Functions +f_head = Function("c_head", [ListType("Int")], "Int") +def c_head(xs): return xs[0] if len(xs) > 0 else Null + +f_last = Function("c_last", [ListType("Int")], "Int") +def c_last(xs): return xs[-1] if len(xs) > 0 else Null + +f_take = Function("c_take", ["Int", ListType("Int")], ListType("Int")) +def c_take(n, xs): return xs[:n] + +f_drop = Function("c_drop", ["Int", ListType("Int")], ListType("Int")) +def c_drop(n, xs): return xs[n:] + +f_access = Function("c_access", ["Int", ListType("Int")], "Int") +def c_access(n, xs): return xs[n] if n >= 0 and len(xs) > n else Null + +f_max = Function("c_max", [ListType("Int")], "Int") +def c_max(xs): return max(xs) if len(xs) > 0 else Null + +f_min = Function("c_min", [ListType("Int")], "Int") +def c_min(xs): return min(xs) if len(xs) > 0 else Null + +f_reverse = Function("c_reverse", [ListType("Int")], ListType("Int")) +def c_reverse(xs): return list(reversed(xs)) + +f_sort = Function("sorted", [ListType("Int")], ListType("Int")) +# def c_sort(xs): return sorted(xs) + +f_sum = Function("sum", [ListType("Int")], "Int") +# def c_sum(xs): return sum(xs) + + +## Lambdas +# Int -> Int +def plus_one(x): return x + 1 +def minus_one(x): return x - 1 +def times_two(x): return x * 2 +def neg(x): return x * (-1) +def div_two(x): return int(x/2) +def sq(x): return x**2 +def times_three(x): return x * 3 +def div_three(x): return int(x/3) +def times_four(x): return x * 4 +def div_four(x): return int(x/4) + +# Int -> Bool +def pos(x): return x > 0 +def neg(x): return x < 0 +def even(x): return x%2 == 0 +def odd(x): return x%2 == 1 + +# Int -> Int -> Int +def add(x, y): return x + y +def sub(x, y): return x - y +def mul(x, y): return x * y + +# HOFs +f_map = Function("map", [ListType("Int")], + ListType("Int"), + [FunctionType(["Int", "Int"])]) +f_filter = Function("filter", [ListType("Int")], + ListType("Int"), + [FunctionType(["Int", "Bool"])]) +f_count = Function("c_count", [ListType("Int")], + "Int", + [FunctionType(["Int", "Bool"])]) +def c_count(f, xs): return len([x for x in xs if f(x)]) + +f_zipwith = Function("c_zipwith", [ListType("Int"), ListType("Int")], + ListType("Int"), + [FunctionType(["Int", "Int", "Int"])]) #FIX +def c_zipwith(f, xs, ys): return [f(x, y) for (x, y) in zip(xs, ys)] + +f_scan = Function("c_scan", [ListType("Int")], + ListType("Int"), + [FunctionType(["Int", "Int", "Int"])]) +def c_scan(f, xs): + out = xs + for i in range(1, len(xs)): + out[i] = f(xs[i], xs[i -1]) + return out + +@contextlib.contextmanager +def stdoutIO(stdout=None): + old = sys.stdout + if stdout is None: + stdout = StringIO.StringIO() + sys.stdout = stdout + yield stdout + sys.stdout = old + + +def evaluate(program_str, input_names_to_vals, default="ERROR"): + exec_str = [] + for name, val in input_names_to_vals.iteritems(): + exec_str += name + " = " + str(val) + "; " + exec_str += program_str + if type(exec_str) is list: + exec_str = "".join(exec_str) + + with stdoutIO() as s: + # pylint: disable=bare-except + try: + exec(exec_str + " print(out)") + return s.getvalue()[:-1] + except: + return default + # pylint: enable=bare-except + + +class Statement(object): + """Statement class.""" + + def __init__(self, fn, output_var, arg_vars, fn_args=None): + self.fn = fn + self.output_var = output_var + self.arg_vars = arg_vars + self.fn_args = fn_args or [] + + def __str__(self): + return "%s = %s(%s%s%s)"%(self.output_var, + self.fn.name, + ", ".join(self.fn_args), + ", " if self.fn_args else "", + ", ".join(self.arg_vars)) + + def substitute(self, env): + self.output_var = env.get(self.output_var, self.output_var) + self.arg_vars = [env.get(v, v) for v in self.arg_vars] + + +class ProgramGrower(object): + """Grow programs.""" + + def __init__(self, functions, types_to_lambdas): + self.functions = functions + self.types_to_lambdas = types_to_lambdas + + def grow_body(self, new_var_name, dependencies, types_to_vars): + """Grow the program body.""" + choices = [] + for f in self.functions: + if all([a in types_to_vars.keys() for a in f.arg_types]): + choices.append(f) + + f = random.choice(choices) + args = [] + for t in f.arg_types: + possible_vars = random.choice(types_to_vars[t]) + var = random.choice(possible_vars) + args.append(var) + dependencies.setdefault(new_var_name, []).extend( + [var] + (dependencies[var])) + + fn_args = [random.choice(self.types_to_lambdas[t]) for t in f.fn_arg_types] + types_to_vars.setdefault(f.output_type, []).append(new_var_name) + + return Statement(f, new_var_name, args, fn_args) + + def grow(self, program_len, input_types): + """Grow the program.""" + var_names = list(reversed(map(chr, range(97, 123)))) + dependencies = dict() + types_to_vars = dict() + input_names = [] + for t in input_types: + var = var_names.pop() + dependencies[var] = [] + types_to_vars.setdefault(t, []).append(var) + input_names.append(var) + + statements = [] + for _ in range(program_len - 1): + var = var_names.pop() + statements.append(self.grow_body(var, dependencies, types_to_vars)) + statements.append(self.grow_body("out", dependencies, types_to_vars)) + + new_var_names = [c for c in map(chr, range(97, 123)) + if c not in input_names] + new_var_names.reverse() + keep_statements = [] + env = dict() + for s in statements: + if s.output_var in dependencies["out"]: + keep_statements.append(s) + env[s.output_var] = new_var_names.pop() + if s.output_var == "out": + keep_statements.append(s) + + for k in keep_statements: + k.substitute(env) + + return Program(input_names, input_types, ";".join( + [str(k) for k in keep_statements])) + + +class Program(object): + """The program class.""" + + def __init__(self, input_names, input_types, body): + self.input_names = input_names + self.input_types = input_types + self.body = body + + def evaluate(self, inputs): + """Evaluate this program.""" + if len(inputs) != len(self.input_names): + raise AssertionError("inputs and input_names have to" + "have the same len. inp: %s , names: %s" % + (str(inputs), str(self.input_names))) + inp_str = "" + for (name, inp) in zip(self.input_names, inputs): + inp_str += name + " = " + str(inp) + "; " + + with stdoutIO() as s: + # pylint: disable=exec-used + exec(inp_str + self.body + "; print(out)") + # pylint: enable=exec-used + return s.getvalue()[:-1] + + def flat_str(self): + out = "" + for s in self.body.split(";"): + out += s + ";" + return out + + def __str__(self): + out = "" + for (n, t) in zip(self.input_names, self.input_types): + out += n + " = " + str(t) + "\n" + for s in self.body.split(";"): + out += s + "\n" + return out + + +prog_vocab = [] +prog_rev_vocab = {} + + +def tokenize(string, tokens=None): + """Tokenize the program string.""" + if tokens is None: + tokens = prog_vocab + tokens = sorted(tokens, key=len, reverse=True) + out = [] + string = string.strip() + while string: + found = False + for t in tokens: + if string.startswith(t): + out.append(t) + string = string[len(t):] + found = True + break + if not found: + raise ValueError("Couldn't tokenize this: " + string) + string = string.strip() + return out + + +def clean_up(output, max_val=100): + o = eval(str(output)) + if isinstance(o, bool): + return o + if isinstance(o, int): + if o >= 0: + return min(o, max_val) + else: + return max(o, -1 * max_val) + if isinstance(o, list): + return [clean_up(l) for l in o] + + +def make_vocab(): + gen(2, 0) + + +def gen(max_len, how_many): + """Generate some programs.""" + functions = [f_head, f_last, f_take, f_drop, f_access, f_max, f_min, + f_reverse, f_sort, f_sum, f_map, f_filter, f_count, f_zipwith, + f_scan] + + types_to_lambdas = { + FunctionType(["Int", "Int"]): ["plus_one", "minus_one", "times_two", + "div_two", "sq", "times_three", + "div_three", "times_four", "div_four"], + FunctionType(["Int", "Bool"]): ["pos", "neg", "even", "odd"], + FunctionType(["Int", "Int", "Int"]): ["add", "sub", "mul"] + } + + tokens = [] + for f in functions: + tokens.append(f.name) + for v in types_to_lambdas.values(): + tokens.extend(v) + tokens.extend(["=", ";", ",", "(", ")", "[", "]", "Int", "out"]) + tokens.extend(map(chr, range(97, 123))) + + io_tokens = map(str, range(-220, 220)) + if not prog_vocab: + prog_vocab.extend(["_PAD", "_EOS"] + tokens + io_tokens) + for i, t in enumerate(prog_vocab): + prog_rev_vocab[t] = i + + io_tokens += [",", "[", "]", ")", "(", "None"] + grower = ProgramGrower(functions=functions, + types_to_lambdas=types_to_lambdas) + + def mk_inp(l): + return [random.choice(range(-5, 5)) for _ in range(l)] + + tar = [ListType("Int")] + inps = [[mk_inp(3)], [mk_inp(5)], [mk_inp(7)], [mk_inp(15)]] + + save_prefix = None + outcomes_to_programs = dict() + tried = set() + counter = 0 + choices = [0] if max_len == 0 else range(max_len) + while counter < 100 * how_many and len(outcomes_to_programs) < how_many: + counter += 1 + length = random.choice(choices) + t = grower.grow(length, tar) + while t in tried: + length = random.choice(choices) + t = grower.grow(length, tar) + # print(t.flat_str()) + tried.add(t) + outcomes = [clean_up(t.evaluate(i)) for i in inps] + outcome_str = str(zip(inps, outcomes)) + if outcome_str in outcomes_to_programs: + outcomes_to_programs[outcome_str] = min( + [t.flat_str(), outcomes_to_programs[outcome_str]], + key=lambda x: len(tokenize(x, tokens))) + else: + outcomes_to_programs[outcome_str] = t.flat_str() + if counter % 5000 == 0: + print("== proggen: tried: " + str(counter)) + print("== proggen: kept: " + str(len(outcomes_to_programs))) + + if counter % 250000 == 0 and save_prefix is not None: + print("saving...") + save_counter = 0 + progfilename = os.path.join(save_prefix, "prog_" + str(counter) + ".txt") + iofilename = os.path.join(save_prefix, "io_" + str(counter) + ".txt") + prog_token_filename = os.path.join(save_prefix, + "prog_tokens_" + str(counter) + ".txt") + io_token_filename = os.path.join(save_prefix, + "io_tokens_" + str(counter) + ".txt") + with open(progfilename, "a+") as fp, \ + open(iofilename, "a+") as fi, \ + open(prog_token_filename, "a+") as ftp, \ + open(io_token_filename, "a+") as fti: + for (o, p) in outcomes_to_programs.iteritems(): + save_counter += 1 + if save_counter % 500 == 0: + print("saving %d of %d" % (save_counter, len(outcomes_to_programs))) + fp.write(p+"\n") + fi.write(o+"\n") + ftp.write(str(tokenize(p, tokens))+"\n") + fti.write(str(tokenize(o, io_tokens))+"\n") + + return list(outcomes_to_programs.values()) diff --git a/models/research/neural_gpu/wmt_utils.py b/models/research/neural_gpu/wmt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef831918f9c9279eb1c6e560e5730739e5fe9521 --- /dev/null +++ b/models/research/neural_gpu/wmt_utils.py @@ -0,0 +1,437 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for downloading data from WMT, tokenizing, vocabularies.""" + +from __future__ import print_function + +import gzip +import os +import re +import tarfile + +from six.moves import urllib +import tensorflow as tf + +# Special vocabulary symbols - we always put them at the start. +_PAD = b"_PAD" +_GO = b"_GO" +_EOS = b"_EOS" +_UNK = b"_CHAR_UNK" +_SPACE = b"_SPACE" +_START_VOCAB = [_PAD, _GO, _EOS, _UNK, _SPACE] + +PAD_ID = 0 +GO_ID = 1 +EOS_ID = 2 +UNK_ID = 3 +SPACE_ID = 4 + +# Regular expressions used to tokenize. +_CHAR_MARKER = "_CHAR_" +_CHAR_MARKER_LEN = len(_CHAR_MARKER) +_SPEC_CHARS = "" + chr(226) + chr(153) + chr(128) +_PUNCTUATION = "][.,!?\"':;%$#@&*+}{|><=/^~)(_`,0123456789" + _SPEC_CHARS + "-" +_WORD_SPLIT = re.compile("([" + _PUNCTUATION + "])") +_OLD_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])") +_DIGIT_RE = re.compile(br"\d") + +# URLs for WMT data. +_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/training-giga-fren.tar" +_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/dev-v2.tgz" + + +def maybe_download(directory, filename, url): + """Download filename from url unless it's already in directory.""" + if not tf.gfile.Exists(directory): + print("Creating directory %s" % directory) + os.mkdir(directory) + filepath = os.path.join(directory, filename) + if not tf.gfile.Exists(filepath): + print("Downloading %s to %s" % (url, filepath)) + filepath, _ = urllib.request.urlretrieve(url, filepath) + statinfo = os.stat(filepath) + print("Successfully downloaded", filename, statinfo.st_size, "bytes") + return filepath + + +def gunzip_file(gz_path, new_path): + """Unzips from gz_path into new_path.""" + print("Unpacking %s to %s" % (gz_path, new_path)) + with gzip.open(gz_path, "rb") as gz_file: + with open(new_path, "wb") as new_file: + for line in gz_file: + new_file.write(line) + + +def get_wmt_enfr_train_set(directory): + """Download the WMT en-fr training corpus to directory unless it's there.""" + train_path = os.path.join(directory, "giga-fren.release2.fixed") + if not (tf.gfile.Exists(train_path +".fr") and + tf.gfile.Exists(train_path +".en")): + corpus_file = maybe_download(directory, "training-giga-fren.tar", + _WMT_ENFR_TRAIN_URL) + print("Extracting tar file %s" % corpus_file) + with tarfile.open(corpus_file, "r") as corpus_tar: + corpus_tar.extractall(directory) + gunzip_file(train_path + ".fr.gz", train_path + ".fr") + gunzip_file(train_path + ".en.gz", train_path + ".en") + return train_path + + +def get_wmt_enfr_dev_set(directory): + """Download the WMT en-fr training corpus to directory unless it's there.""" + dev_name = "newstest2013" + dev_path = os.path.join(directory, dev_name) + if not (tf.gfile.Exists(dev_path + ".fr") and + tf.gfile.Exists(dev_path + ".en")): + dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL) + print("Extracting tgz file %s" % dev_file) + with tarfile.open(dev_file, "r:gz") as dev_tar: + fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") + en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") + fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix. + en_dev_file.name = dev_name + ".en" + dev_tar.extract(fr_dev_file, directory) + dev_tar.extract(en_dev_file, directory) + return dev_path + + +def is_char(token): + if len(token) > _CHAR_MARKER_LEN: + if token[:_CHAR_MARKER_LEN] == _CHAR_MARKER: + return True + return False + + +def basic_detokenizer(tokens): + """Reverse the process of the basic tokenizer below.""" + result = [] + previous_nospace = True + for t in tokens: + if is_char(t): + result.append(t[_CHAR_MARKER_LEN:]) + previous_nospace = True + elif t == _SPACE: + result.append(" ") + previous_nospace = True + elif previous_nospace: + result.append(t) + previous_nospace = False + else: + result.extend([" ", t]) + previous_nospace = False + return "".join(result) + + +old_style = False + + +def basic_tokenizer(sentence): + """Very basic tokenizer: split the sentence into a list of tokens.""" + words = [] + if old_style: + for space_separated_fragment in sentence.strip().split(): + words.extend(re.split(_OLD_WORD_SPLIT, space_separated_fragment)) + return [w for w in words if w] + for space_separated_fragment in sentence.strip().split(): + tokens = [t for t in re.split(_WORD_SPLIT, space_separated_fragment) if t] + first_is_char = False + for i, t in enumerate(tokens): + if len(t) == 1 and t in _PUNCTUATION: + tokens[i] = _CHAR_MARKER + t + if i == 0: + first_is_char = True + if words and words[-1] != _SPACE and (first_is_char or is_char(words[-1])): + tokens = [_SPACE] + tokens + spaced_tokens = [] + for i, tok in enumerate(tokens): + spaced_tokens.append(tokens[i]) + if i < len(tokens) - 1: + if tok != _SPACE and not (is_char(tok) or is_char(tokens[i+1])): + spaced_tokens.append(_SPACE) + words.extend(spaced_tokens) + return words + + +def space_tokenizer(sentence): + return sentence.strip().split() + + +def is_pos_tag(token): + """Check if token is a part-of-speech tag.""" + return(token in ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", + "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT", + "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", + "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", + "WP$", "WRB", ".", ",", ":", ")", "-LRB-", "(", "-RRB-", + "HYPH", "$", "``", "''", "ADD", "AFX", "QTR", "BES", "-DFL-", + "GW", "HVS", "NFP"]) + + +def parse_constraints(inpt, res): + ntags = len(res) + nwords = len(inpt) + npostags = len([x for x in res if is_pos_tag(x)]) + nclose = len([x for x in res if x[0] == "/"]) + nopen = ntags - nclose - npostags + return (abs(npostags - nwords), abs(nclose - nopen)) + + +def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, + tokenizer=None, normalize_digits=False): + """Create vocabulary file (if it does not exist yet) from data file. + + Data file is assumed to contain one sentence per line. Each sentence is + tokenized and digits are normalized (if normalize_digits is set). + Vocabulary contains the most-frequent tokens up to max_vocabulary_size. + We write it to vocabulary_path in a one-token-per-line format, so that later + token in the first line gets id=0, second line gets id=1, and so on. + + Args: + vocabulary_path: path where the vocabulary will be created. + data_path: data file that will be used to create vocabulary. + max_vocabulary_size: limit on the size of the created vocabulary. + tokenizer: a function to use to tokenize each data sentence; + if None, basic_tokenizer will be used. + normalize_digits: Boolean; if true, all digits are replaced by 0s. + """ + if not tf.gfile.Exists(vocabulary_path): + print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) + vocab, chars = {}, {} + for c in _PUNCTUATION: + chars[c] = 1 + + # Read French file. + with tf.gfile.GFile(data_path + ".fr", mode="rb") as f: + counter = 0 + for line_in in f: + line = " ".join(line_in.split()) + counter += 1 + if counter % 100000 == 0: + print(" processing fr line %d" % counter) + for c in line: + if c in chars: + chars[c] += 1 + else: + chars[c] = 1 + tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) + tokens = [t for t in tokens if not is_char(t) and t != _SPACE] + for w in tokens: + word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w + if word in vocab: + vocab[word] += 1000000000 # We want target words first. + else: + vocab[word] = 1000000000 + + # Read English file. + with tf.gfile.GFile(data_path + ".en", mode="rb") as f: + counter = 0 + for line_in in f: + line = " ".join(line_in.split()) + counter += 1 + if counter % 100000 == 0: + print(" processing en line %d" % counter) + for c in line: + if c in chars: + chars[c] += 1 + else: + chars[c] = 1 + tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) + tokens = [t for t in tokens if not is_char(t) and t != _SPACE] + for w in tokens: + word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w + if word in vocab: + vocab[word] += 1 + else: + vocab[word] = 1 + + sorted_vocab = sorted(vocab, key=vocab.get, reverse=True) + sorted_chars = sorted(chars, key=vocab.get, reverse=True) + sorted_chars = [_CHAR_MARKER + c for c in sorted_chars] + vocab_list = _START_VOCAB + sorted_chars + sorted_vocab + if tokenizer: + vocab_list = _START_VOCAB + sorted_vocab + if len(vocab_list) > max_vocabulary_size: + vocab_list = vocab_list[:max_vocabulary_size] + with tf.gfile.GFile(vocabulary_path, mode="wb") as vocab_file: + for w in vocab_list: + vocab_file.write(w + b"\n") + + +def initialize_vocabulary(vocabulary_path): + """Initialize vocabulary from file. + + We assume the vocabulary is stored one-item-per-line, so a file: + dog + cat + will result in a vocabulary {"dog": 0, "cat": 1}, and this function will + also return the reversed-vocabulary ["dog", "cat"]. + + Args: + vocabulary_path: path to the file containing the vocabulary. + + Returns: + a pair: the vocabulary (a dictionary mapping string to integers), and + the reversed vocabulary (a list, which reverses the vocabulary mapping). + + Raises: + ValueError: if the provided vocabulary_path does not exist. + """ + if tf.gfile.Exists(vocabulary_path): + rev_vocab = [] + with tf.gfile.GFile(vocabulary_path, mode="rb") as f: + rev_vocab.extend(f.readlines()) + rev_vocab = [line.strip() for line in rev_vocab] + vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) + return vocab, rev_vocab + else: + raise ValueError("Vocabulary file %s not found.", vocabulary_path) + + +def sentence_to_token_ids_raw(sentence, vocabulary, + tokenizer=None, normalize_digits=old_style): + """Convert a string to list of integers representing token-ids. + + For example, a sentence "I have a dog" may become tokenized into + ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, + "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. + + Args: + sentence: the sentence in bytes format to convert to token-ids. + vocabulary: a dictionary mapping tokens to integers. + tokenizer: a function to use to tokenize each sentence; + if None, basic_tokenizer will be used. + normalize_digits: Boolean; if true, all digits are replaced by 0s. + + Returns: + a list of integers, the token-ids for the sentence. + """ + if tokenizer: + words = tokenizer(sentence) + else: + words = basic_tokenizer(sentence) + result = [] + for w in words: + if normalize_digits: + w = re.sub(_DIGIT_RE, b"0", w) + if w in vocabulary: + result.append(vocabulary[w]) + else: + if tokenizer: + result.append(UNK_ID) + else: + result.append(SPACE_ID) + for c in w: + result.append(vocabulary.get(_CHAR_MARKER + c, UNK_ID)) + result.append(SPACE_ID) + while result and result[0] == SPACE_ID: + result = result[1:] + while result and result[-1] == SPACE_ID: + result = result[:-1] + return result + + +def sentence_to_token_ids(sentence, vocabulary, + tokenizer=None, normalize_digits=old_style): + """Convert a string to list of integers representing token-ids, tab=0.""" + tab_parts = sentence.strip().split("\t") + toks = [sentence_to_token_ids_raw(t, vocabulary, tokenizer, normalize_digits) + for t in tab_parts] + res = [] + for t in toks: + res.extend(t) + res.append(0) + return res[:-1] + + +def data_to_token_ids(data_path, target_path, vocabulary_path, + tokenizer=None, normalize_digits=False): + """Tokenize data file and turn into token-ids using given vocabulary file. + + This function loads data line-by-line from data_path, calls the above + sentence_to_token_ids, and saves the result to target_path. See comment + for sentence_to_token_ids on the details of token-ids format. + + Args: + data_path: path to the data file in one-sentence-per-line format. + target_path: path where the file with token-ids will be created. + vocabulary_path: path to the vocabulary file. + tokenizer: a function to use to tokenize each sentence; + if None, basic_tokenizer will be used. + normalize_digits: Boolean; if true, all digits are replaced by 0s. + """ + if not tf.gfile.Exists(target_path): + print("Tokenizing data in %s" % data_path) + vocab, _ = initialize_vocabulary(vocabulary_path) + with tf.gfile.GFile(data_path, mode="rb") as data_file: + with tf.gfile.GFile(target_path, mode="w") as tokens_file: + counter = 0 + for line in data_file: + counter += 1 + if counter % 100000 == 0: + print(" tokenizing line %d" % counter) + token_ids = sentence_to_token_ids(line, vocab, tokenizer, + normalize_digits) + tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") + + +def prepare_wmt_data(data_dir, vocabulary_size, + tokenizer=None, normalize_digits=False): + """Get WMT data into data_dir, create vocabularies and tokenize data. + + Args: + data_dir: directory in which the data sets will be stored. + vocabulary_size: size of the joint vocabulary to create and use. + tokenizer: a function to use to tokenize each data sentence; + if None, basic_tokenizer will be used. + normalize_digits: Boolean; if true, all digits are replaced by 0s. + + Returns: + A tuple of 6 elements: + (1) path to the token-ids for English training data-set, + (2) path to the token-ids for French training data-set, + (3) path to the token-ids for English development data-set, + (4) path to the token-ids for French development data-set, + (5) path to the vocabulary file, + (6) path to the vocabulary file (for compatibility with non-joint vocab). + """ + # Get wmt data to the specified directory. + train_path = get_wmt_enfr_train_set(data_dir) + dev_path = get_wmt_enfr_dev_set(data_dir) + + # Create vocabularies of the appropriate sizes. + vocab_path = os.path.join(data_dir, "vocab%d.txt" % vocabulary_size) + create_vocabulary(vocab_path, train_path, vocabulary_size, + tokenizer=tokenizer, normalize_digits=normalize_digits) + + # Create token ids for the training data. + fr_train_ids_path = train_path + (".ids%d.fr" % vocabulary_size) + en_train_ids_path = train_path + (".ids%d.en" % vocabulary_size) + data_to_token_ids(train_path + ".fr", fr_train_ids_path, vocab_path, + tokenizer=tokenizer, normalize_digits=normalize_digits) + data_to_token_ids(train_path + ".en", en_train_ids_path, vocab_path, + tokenizer=tokenizer, normalize_digits=normalize_digits) + + # Create token ids for the development data. + fr_dev_ids_path = dev_path + (".ids%d.fr" % vocabulary_size) + en_dev_ids_path = dev_path + (".ids%d.en" % vocabulary_size) + data_to_token_ids(dev_path + ".fr", fr_dev_ids_path, vocab_path, + tokenizer=tokenizer, normalize_digits=normalize_digits) + data_to_token_ids(dev_path + ".en", en_dev_ids_path, vocab_path, + tokenizer=tokenizer, normalize_digits=normalize_digits) + + return (en_train_ids_path, fr_train_ids_path, + en_dev_ids_path, fr_dev_ids_path, + vocab_path, vocab_path) diff --git a/models/research/neural_programmer/README.md b/models/research/neural_programmer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dcc27f6fb015ec625935a0ea37d814a2ba10d2e3 --- /dev/null +++ b/models/research/neural_programmer/README.md @@ -0,0 +1,26 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Neural Programmer + +Implementation of the Neural Programmer model as described in this [paper](https://openreview.net/pdf?id=ry2YOrcge). + +Download and extract the data from the [WikiTableQuestions](https://ppasupat.github.io/WikiTableQuestions/) site. The dataset contains +11321, 2831, and 4344 examples for training, development, and testing respectively. We use their tokenization, number and date pre-processing. Please note that the above paper used the [initial release](https://github.com/ppasupat/WikiTableQuestions/releases/tag/v0.2) for training, development and testing. + +Change the `data_dir FLAG` to the location of the data. + +### Training +Run `python neural_programmer.py` + +The models are written to `FLAGS.output_dir`. + +### Testing +Run `python neural_programmer.py --evaluator_job=True` + +The models are loaded from `FLAGS.output_dir`. The evaluation is done on development data. + +In case of errors because of encoding, add `"# -*- coding: utf-8 -*-"` as the first line in `wiki_data.py` + +Maintained by Arvind Neelakantan (arvind2505) diff --git a/models/research/neural_programmer/data_utils.py b/models/research/neural_programmer/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4df80c66ad21d2e046fabf78446dd199ae117b44 --- /dev/null +++ b/models/research/neural_programmer/data_utils.py @@ -0,0 +1,666 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for constructing vocabulary, converting the examples to integer format and building the required masks for batch computation Author: aneelakantan (Arvind Neelakantan) +""" + +from __future__ import print_function + +import copy +import numbers +import numpy as np +import wiki_data + + +def return_index(a): + for i in range(len(a)): + if (a[i] == 1.0): + return i + + +def construct_vocab(data, utility, add_word=False): + ans = [] + for example in data: + sent = "" + for word in example.question: + if (not (isinstance(word, numbers.Number))): + sent += word + " " + example.original_nc = copy.deepcopy(example.number_columns) + example.original_wc = copy.deepcopy(example.word_columns) + example.original_nc_names = copy.deepcopy(example.number_column_names) + example.original_wc_names = copy.deepcopy(example.word_column_names) + if (add_word): + continue + number_found = 0 + if (not (example.is_bad_example)): + for word in example.question: + if (isinstance(word, numbers.Number)): + number_found += 1 + else: + if (not (utility.word_ids.has_key(word))): + utility.words.append(word) + utility.word_count[word] = 1 + utility.word_ids[word] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[word]] = word + else: + utility.word_count[word] += 1 + for col_name in example.word_column_names: + for word in col_name: + if (isinstance(word, numbers.Number)): + number_found += 1 + else: + if (not (utility.word_ids.has_key(word))): + utility.words.append(word) + utility.word_count[word] = 1 + utility.word_ids[word] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[word]] = word + else: + utility.word_count[word] += 1 + for col_name in example.number_column_names: + for word in col_name: + if (isinstance(word, numbers.Number)): + number_found += 1 + else: + if (not (utility.word_ids.has_key(word))): + utility.words.append(word) + utility.word_count[word] = 1 + utility.word_ids[word] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[word]] = word + else: + utility.word_count[word] += 1 + + +def word_lookup(word, utility): + if (utility.word_ids.has_key(word)): + return word + else: + return utility.unk_token + + +def convert_to_int_2d_and_pad(a, utility): + ans = [] + #print a + for b in a: + temp = [] + if (len(b) > utility.FLAGS.max_entry_length): + b = b[0:utility.FLAGS.max_entry_length] + for remaining in range(len(b), utility.FLAGS.max_entry_length): + b.append(utility.dummy_token) + assert len(b) == utility.FLAGS.max_entry_length + for word in b: + temp.append(utility.word_ids[word_lookup(word, utility)]) + ans.append(temp) + #print ans + return ans + + +def convert_to_bool_and_pad(a, utility): + a = a.tolist() + for i in range(len(a)): + for j in range(len(a[i])): + if (a[i][j] < 1): + a[i][j] = False + else: + a[i][j] = True + a[i] = a[i] + [False] * (utility.FLAGS.max_elements - len(a[i])) + return a + + +seen_tables = {} + + +def partial_match(question, table, number): + answer = [] + match = {} + for i in range(len(table)): + temp = [] + for j in range(len(table[i])): + temp.append(0) + answer.append(temp) + for i in range(len(table)): + for j in range(len(table[i])): + for word in question: + if (number): + if (word == table[i][j]): + answer[i][j] = 1.0 + match[i] = 1.0 + else: + if (word in table[i][j]): + answer[i][j] = 1.0 + match[i] = 1.0 + return answer, match + + +def exact_match(question, table, number): + #performs exact match operation + answer = [] + match = {} + matched_indices = [] + for i in range(len(table)): + temp = [] + for j in range(len(table[i])): + temp.append(0) + answer.append(temp) + for i in range(len(table)): + for j in range(len(table[i])): + if (number): + for word in question: + if (word == table[i][j]): + match[i] = 1.0 + answer[i][j] = 1.0 + else: + table_entry = table[i][j] + for k in range(len(question)): + if (k + len(table_entry) <= len(question)): + if (table_entry == question[k:(k + len(table_entry))]): + #if(len(table_entry) == 1): + #print "match: ", table_entry, question + match[i] = 1.0 + answer[i][j] = 1.0 + matched_indices.append((k, len(table_entry))) + return answer, match, matched_indices + + +def partial_column_match(question, table, number): + answer = [] + for i in range(len(table)): + answer.append(0) + for i in range(len(table)): + for word in question: + if (word in table[i]): + answer[i] = 1.0 + return answer + + +def exact_column_match(question, table, number): + #performs exact match on column names + answer = [] + matched_indices = [] + for i in range(len(table)): + answer.append(0) + for i in range(len(table)): + table_entry = table[i] + for k in range(len(question)): + if (k + len(table_entry) <= len(question)): + if (table_entry == question[k:(k + len(table_entry))]): + answer[i] = 1.0 + matched_indices.append((k, len(table_entry))) + return answer, matched_indices + + +def get_max_entry(a): + e = {} + for w in a: + if (w != "UNK, "): + if (e.has_key(w)): + e[w] += 1 + else: + e[w] = 1 + if (len(e) > 0): + (key, val) = sorted(e.items(), key=lambda x: -1 * x[1])[0] + if (val > 1): + return key + else: + return -1.0 + else: + return -1.0 + + +def list_join(a): + ans = "" + for w in a: + ans += str(w) + ", " + return ans + + +def group_by_max(table, number): + #computes the most frequently occurring entry in a column + answer = [] + for i in range(len(table)): + temp = [] + for j in range(len(table[i])): + temp.append(0) + answer.append(temp) + for i in range(len(table)): + if (number): + curr = table[i] + else: + curr = [list_join(w) for w in table[i]] + max_entry = get_max_entry(curr) + #print i, max_entry + for j in range(len(curr)): + if (max_entry == curr[j]): + answer[i][j] = 1.0 + else: + answer[i][j] = 0.0 + return answer + + +def pick_one(a): + for i in range(len(a)): + if (1.0 in a[i]): + return True + return False + + +def check_processed_cols(col, utility): + return True in [ + True for y in col + if (y != utility.FLAGS.pad_int and y != + utility.FLAGS.bad_number_pre_process) + ] + + +def complete_wiki_processing(data, utility, train=True): + #convert to integers and padding + processed_data = [] + num_bad_examples = 0 + for example in data: + number_found = 0 + if (example.is_bad_example): + num_bad_examples += 1 + if (not (example.is_bad_example)): + example.string_question = example.question[:] + #entry match + example.processed_number_columns = example.processed_number_columns[:] + example.processed_word_columns = example.processed_word_columns[:] + example.word_exact_match, word_match, matched_indices = exact_match( + example.string_question, example.original_wc, number=False) + example.number_exact_match, number_match, _ = exact_match( + example.string_question, example.original_nc, number=True) + if (not (pick_one(example.word_exact_match)) and not ( + pick_one(example.number_exact_match))): + assert len(word_match) == 0 + assert len(number_match) == 0 + example.word_exact_match, word_match = partial_match( + example.string_question, example.original_wc, number=False) + #group by max + example.word_group_by_max = group_by_max(example.original_wc, False) + example.number_group_by_max = group_by_max(example.original_nc, True) + #column name match + example.word_column_exact_match, wcol_matched_indices = exact_column_match( + example.string_question, example.original_wc_names, number=False) + example.number_column_exact_match, ncol_matched_indices = exact_column_match( + example.string_question, example.original_nc_names, number=False) + if (not (1.0 in example.word_column_exact_match) and not ( + 1.0 in example.number_column_exact_match)): + example.word_column_exact_match = partial_column_match( + example.string_question, example.original_wc_names, number=False) + example.number_column_exact_match = partial_column_match( + example.string_question, example.original_nc_names, number=False) + if (len(word_match) > 0 or len(number_match) > 0): + example.question.append(utility.entry_match_token) + if (1.0 in example.word_column_exact_match or + 1.0 in example.number_column_exact_match): + example.question.append(utility.column_match_token) + example.string_question = example.question[:] + example.number_lookup_matrix = np.transpose( + example.number_lookup_matrix)[:] + example.word_lookup_matrix = np.transpose(example.word_lookup_matrix)[:] + example.columns = example.number_columns[:] + example.word_columns = example.word_columns[:] + example.len_total_cols = len(example.word_column_names) + len( + example.number_column_names) + example.column_names = example.number_column_names[:] + example.word_column_names = example.word_column_names[:] + example.string_column_names = example.number_column_names[:] + example.string_word_column_names = example.word_column_names[:] + example.sorted_number_index = [] + example.sorted_word_index = [] + example.column_mask = [] + example.word_column_mask = [] + example.processed_column_mask = [] + example.processed_word_column_mask = [] + example.word_column_entry_mask = [] + example.question_attention_mask = [] + example.question_number = example.question_number_1 = -1 + example.question_attention_mask = [] + example.ordinal_question = [] + example.ordinal_question_one = [] + new_question = [] + if (len(example.number_columns) > 0): + example.len_col = len(example.number_columns[0]) + else: + example.len_col = len(example.word_columns[0]) + for (start, length) in matched_indices: + for j in range(length): + example.question[start + j] = utility.unk_token + #print example.question + for word in example.question: + if (isinstance(word, numbers.Number) or wiki_data.is_date(word)): + if (not (isinstance(word, numbers.Number)) and + wiki_data.is_date(word)): + word = word.replace("X", "").replace("-", "") + number_found += 1 + if (number_found == 1): + example.question_number = word + if (len(example.ordinal_question) > 0): + example.ordinal_question[len(example.ordinal_question) - 1] = 1.0 + else: + example.ordinal_question.append(1.0) + elif (number_found == 2): + example.question_number_1 = word + if (len(example.ordinal_question_one) > 0): + example.ordinal_question_one[len(example.ordinal_question_one) - + 1] = 1.0 + else: + example.ordinal_question_one.append(1.0) + else: + new_question.append(word) + example.ordinal_question.append(0.0) + example.ordinal_question_one.append(0.0) + example.question = [ + utility.word_ids[word_lookup(w, utility)] for w in new_question + ] + example.question_attention_mask = [0.0] * len(example.question) + #when the first question number occurs before a word + example.ordinal_question = example.ordinal_question[0:len( + example.question)] + example.ordinal_question_one = example.ordinal_question_one[0:len( + example.question)] + #question-padding + example.question = [utility.word_ids[utility.dummy_token]] * ( + utility.FLAGS.question_length - len(example.question) + ) + example.question + example.question_attention_mask = [-10000.0] * ( + utility.FLAGS.question_length - len(example.question_attention_mask) + ) + example.question_attention_mask + example.ordinal_question = [0.0] * (utility.FLAGS.question_length - + len(example.ordinal_question) + ) + example.ordinal_question + example.ordinal_question_one = [0.0] * (utility.FLAGS.question_length - + len(example.ordinal_question_one) + ) + example.ordinal_question_one + if (True): + #number columns and related-padding + num_cols = len(example.columns) + start = 0 + for column in example.number_columns: + if (check_processed_cols(example.processed_number_columns[start], + utility)): + example.processed_column_mask.append(0.0) + sorted_index = sorted( + range(len(example.processed_number_columns[start])), + key=lambda k: example.processed_number_columns[start][k], + reverse=True) + sorted_index = sorted_index + [utility.FLAGS.pad_int] * ( + utility.FLAGS.max_elements - len(sorted_index)) + example.sorted_number_index.append(sorted_index) + example.columns[start] = column + [utility.FLAGS.pad_int] * ( + utility.FLAGS.max_elements - len(column)) + example.processed_number_columns[start] += [utility.FLAGS.pad_int] * ( + utility.FLAGS.max_elements - + len(example.processed_number_columns[start])) + start += 1 + example.column_mask.append(0.0) + for remaining in range(num_cols, utility.FLAGS.max_number_cols): + example.sorted_number_index.append([utility.FLAGS.pad_int] * + (utility.FLAGS.max_elements)) + example.columns.append([utility.FLAGS.pad_int] * + (utility.FLAGS.max_elements)) + example.processed_number_columns.append([utility.FLAGS.pad_int] * + (utility.FLAGS.max_elements)) + example.number_exact_match.append([0.0] * + (utility.FLAGS.max_elements)) + example.number_group_by_max.append([0.0] * + (utility.FLAGS.max_elements)) + example.column_mask.append(-100000000.0) + example.processed_column_mask.append(-100000000.0) + example.number_column_exact_match.append(0.0) + example.column_names.append([utility.dummy_token]) + #word column and related-padding + start = 0 + word_num_cols = len(example.word_columns) + for column in example.word_columns: + if (check_processed_cols(example.processed_word_columns[start], + utility)): + example.processed_word_column_mask.append(0.0) + sorted_index = sorted( + range(len(example.processed_word_columns[start])), + key=lambda k: example.processed_word_columns[start][k], + reverse=True) + sorted_index = sorted_index + [utility.FLAGS.pad_int] * ( + utility.FLAGS.max_elements - len(sorted_index)) + example.sorted_word_index.append(sorted_index) + column = convert_to_int_2d_and_pad(column, utility) + example.word_columns[start] = column + [[ + utility.word_ids[utility.dummy_token] + ] * utility.FLAGS.max_entry_length] * (utility.FLAGS.max_elements - + len(column)) + example.processed_word_columns[start] += [utility.FLAGS.pad_int] * ( + utility.FLAGS.max_elements - + len(example.processed_word_columns[start])) + example.word_column_entry_mask.append([0] * len(column) + [ + utility.word_ids[utility.dummy_token] + ] * (utility.FLAGS.max_elements - len(column))) + start += 1 + example.word_column_mask.append(0.0) + for remaining in range(word_num_cols, utility.FLAGS.max_word_cols): + example.sorted_word_index.append([utility.FLAGS.pad_int] * + (utility.FLAGS.max_elements)) + example.word_columns.append([[utility.word_ids[utility.dummy_token]] * + utility.FLAGS.max_entry_length] * + (utility.FLAGS.max_elements)) + example.word_column_entry_mask.append( + [utility.word_ids[utility.dummy_token]] * + (utility.FLAGS.max_elements)) + example.word_exact_match.append([0.0] * (utility.FLAGS.max_elements)) + example.word_group_by_max.append([0.0] * (utility.FLAGS.max_elements)) + example.processed_word_columns.append([utility.FLAGS.pad_int] * + (utility.FLAGS.max_elements)) + example.word_column_mask.append(-100000000.0) + example.processed_word_column_mask.append(-100000000.0) + example.word_column_exact_match.append(0.0) + example.word_column_names.append([utility.dummy_token] * + utility.FLAGS.max_entry_length) + seen_tables[example.table_key] = 1 + #convert column and word column names to integers + example.column_ids = convert_to_int_2d_and_pad(example.column_names, + utility) + example.word_column_ids = convert_to_int_2d_and_pad( + example.word_column_names, utility) + for i_em in range(len(example.number_exact_match)): + example.number_exact_match[i_em] = example.number_exact_match[ + i_em] + [0.0] * (utility.FLAGS.max_elements - + len(example.number_exact_match[i_em])) + example.number_group_by_max[i_em] = example.number_group_by_max[ + i_em] + [0.0] * (utility.FLAGS.max_elements - + len(example.number_group_by_max[i_em])) + for i_em in range(len(example.word_exact_match)): + example.word_exact_match[i_em] = example.word_exact_match[ + i_em] + [0.0] * (utility.FLAGS.max_elements - + len(example.word_exact_match[i_em])) + example.word_group_by_max[i_em] = example.word_group_by_max[ + i_em] + [0.0] * (utility.FLAGS.max_elements - + len(example.word_group_by_max[i_em])) + example.exact_match = example.number_exact_match + example.word_exact_match + example.group_by_max = example.number_group_by_max + example.word_group_by_max + example.exact_column_match = example.number_column_exact_match + example.word_column_exact_match + #answer and related mask, padding + if (example.is_lookup): + example.answer = example.calc_answer + example.number_print_answer = example.number_lookup_matrix.tolist() + example.word_print_answer = example.word_lookup_matrix.tolist() + for i_answer in range(len(example.number_print_answer)): + example.number_print_answer[i_answer] = example.number_print_answer[ + i_answer] + [0.0] * (utility.FLAGS.max_elements - + len(example.number_print_answer[i_answer])) + for i_answer in range(len(example.word_print_answer)): + example.word_print_answer[i_answer] = example.word_print_answer[ + i_answer] + [0.0] * (utility.FLAGS.max_elements - + len(example.word_print_answer[i_answer])) + example.number_lookup_matrix = convert_to_bool_and_pad( + example.number_lookup_matrix, utility) + example.word_lookup_matrix = convert_to_bool_and_pad( + example.word_lookup_matrix, utility) + for remaining in range(num_cols, utility.FLAGS.max_number_cols): + example.number_lookup_matrix.append([False] * + utility.FLAGS.max_elements) + example.number_print_answer.append([0.0] * utility.FLAGS.max_elements) + for remaining in range(word_num_cols, utility.FLAGS.max_word_cols): + example.word_lookup_matrix.append([False] * + utility.FLAGS.max_elements) + example.word_print_answer.append([0.0] * utility.FLAGS.max_elements) + example.print_answer = example.number_print_answer + example.word_print_answer + else: + example.answer = example.calc_answer + example.print_answer = [[0.0] * (utility.FLAGS.max_elements)] * ( + utility.FLAGS.max_number_cols + utility.FLAGS.max_word_cols) + #question_number masks + if (example.question_number == -1): + example.question_number_mask = np.zeros([utility.FLAGS.max_elements]) + else: + example.question_number_mask = np.ones([utility.FLAGS.max_elements]) + if (example.question_number_1 == -1): + example.question_number_one_mask = -10000.0 + else: + example.question_number_one_mask = np.float64(0.0) + if (example.len_col > utility.FLAGS.max_elements): + continue + processed_data.append(example) + return processed_data + + +def add_special_words(utility): + utility.words.append(utility.entry_match_token) + utility.word_ids[utility.entry_match_token] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[ + utility.entry_match_token]] = utility.entry_match_token + utility.entry_match_token_id = utility.word_ids[utility.entry_match_token] + print("entry match token: ", utility.word_ids[ + utility.entry_match_token], utility.entry_match_token_id) + utility.words.append(utility.column_match_token) + utility.word_ids[utility.column_match_token] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[ + utility.column_match_token]] = utility.column_match_token + utility.column_match_token_id = utility.word_ids[utility.column_match_token] + print("entry match token: ", utility.word_ids[ + utility.column_match_token], utility.column_match_token_id) + utility.words.append(utility.dummy_token) + utility.word_ids[utility.dummy_token] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[ + utility.dummy_token]] = utility.dummy_token + utility.dummy_token_id = utility.word_ids[utility.dummy_token] + utility.words.append(utility.unk_token) + utility.word_ids[utility.unk_token] = len(utility.word_ids) + utility.reverse_word_ids[utility.word_ids[ + utility.unk_token]] = utility.unk_token + + +def perform_word_cutoff(utility): + if (utility.FLAGS.word_cutoff > 0): + for word in utility.word_ids.keys(): + if (utility.word_count.has_key(word) and utility.word_count[word] < + utility.FLAGS.word_cutoff and word != utility.unk_token and + word != utility.dummy_token and word != utility.entry_match_token and + word != utility.column_match_token): + utility.word_ids.pop(word) + utility.words.remove(word) + + +def word_dropout(question, utility): + if (utility.FLAGS.word_dropout_prob > 0.0): + new_question = [] + for i in range(len(question)): + if (question[i] != utility.dummy_token_id and + utility.random.random() > utility.FLAGS.word_dropout_prob): + new_question.append(utility.word_ids[utility.unk_token]) + else: + new_question.append(question[i]) + return new_question + else: + return question + + +def generate_feed_dict(data, curr, batch_size, gr, train=False, utility=None): + #prepare feed dict dictionary + feed_dict = {} + feed_examples = [] + for j in range(batch_size): + feed_examples.append(data[curr + j]) + if (train): + feed_dict[gr.batch_question] = [ + word_dropout(feed_examples[j].question, utility) + for j in range(batch_size) + ] + else: + feed_dict[gr.batch_question] = [ + feed_examples[j].question for j in range(batch_size) + ] + feed_dict[gr.batch_question_attention_mask] = [ + feed_examples[j].question_attention_mask for j in range(batch_size) + ] + feed_dict[ + gr.batch_answer] = [feed_examples[j].answer for j in range(batch_size)] + feed_dict[gr.batch_number_column] = [ + feed_examples[j].columns for j in range(batch_size) + ] + feed_dict[gr.batch_processed_number_column] = [ + feed_examples[j].processed_number_columns for j in range(batch_size) + ] + feed_dict[gr.batch_processed_sorted_index_number_column] = [ + feed_examples[j].sorted_number_index for j in range(batch_size) + ] + feed_dict[gr.batch_processed_sorted_index_word_column] = [ + feed_examples[j].sorted_word_index for j in range(batch_size) + ] + feed_dict[gr.batch_question_number] = np.array( + [feed_examples[j].question_number for j in range(batch_size)]).reshape( + (batch_size, 1)) + feed_dict[gr.batch_question_number_one] = np.array( + [feed_examples[j].question_number_1 for j in range(batch_size)]).reshape( + (batch_size, 1)) + feed_dict[gr.batch_question_number_mask] = [ + feed_examples[j].question_number_mask for j in range(batch_size) + ] + feed_dict[gr.batch_question_number_one_mask] = np.array( + [feed_examples[j].question_number_one_mask for j in range(batch_size) + ]).reshape((batch_size, 1)) + feed_dict[gr.batch_print_answer] = [ + feed_examples[j].print_answer for j in range(batch_size) + ] + feed_dict[gr.batch_exact_match] = [ + feed_examples[j].exact_match for j in range(batch_size) + ] + feed_dict[gr.batch_group_by_max] = [ + feed_examples[j].group_by_max for j in range(batch_size) + ] + feed_dict[gr.batch_column_exact_match] = [ + feed_examples[j].exact_column_match for j in range(batch_size) + ] + feed_dict[gr.batch_ordinal_question] = [ + feed_examples[j].ordinal_question for j in range(batch_size) + ] + feed_dict[gr.batch_ordinal_question_one] = [ + feed_examples[j].ordinal_question_one for j in range(batch_size) + ] + feed_dict[gr.batch_number_column_mask] = [ + feed_examples[j].column_mask for j in range(batch_size) + ] + feed_dict[gr.batch_number_column_names] = [ + feed_examples[j].column_ids for j in range(batch_size) + ] + feed_dict[gr.batch_processed_word_column] = [ + feed_examples[j].processed_word_columns for j in range(batch_size) + ] + feed_dict[gr.batch_word_column_mask] = [ + feed_examples[j].word_column_mask for j in range(batch_size) + ] + feed_dict[gr.batch_word_column_names] = [ + feed_examples[j].word_column_ids for j in range(batch_size) + ] + feed_dict[gr.batch_word_column_entry_mask] = [ + feed_examples[j].word_column_entry_mask for j in range(batch_size) + ] + return feed_dict diff --git a/models/research/neural_programmer/model.py b/models/research/neural_programmer/model.py new file mode 100644 index 0000000000000000000000000000000000000000..610d66699e6e41188be58cc1f623c030d243c689 --- /dev/null +++ b/models/research/neural_programmer/model.py @@ -0,0 +1,679 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Author: aneelakantan (Arvind Neelakantan) +""" + +from __future__ import print_function + +import numpy as np +import tensorflow as tf +import nn_utils + + +class Graph(): + + def __init__(self, utility, batch_size, max_passes, mode="train"): + self.utility = utility + self.data_type = self.utility.tf_data_type[self.utility.FLAGS.data_type] + self.max_elements = self.utility.FLAGS.max_elements + max_elements = self.utility.FLAGS.max_elements + self.num_cols = self.utility.FLAGS.max_number_cols + self.num_word_cols = self.utility.FLAGS.max_word_cols + self.question_length = self.utility.FLAGS.question_length + self.batch_size = batch_size + self.max_passes = max_passes + self.mode = mode + self.embedding_dims = self.utility.FLAGS.embedding_dims + #input question and a mask + self.batch_question = tf.placeholder(tf.int32, + [batch_size, self.question_length]) + self.batch_question_attention_mask = tf.placeholder( + self.data_type, [batch_size, self.question_length]) + #ground truth scalar answer and lookup answer + self.batch_answer = tf.placeholder(self.data_type, [batch_size]) + self.batch_print_answer = tf.placeholder( + self.data_type, + [batch_size, self.num_cols + self.num_word_cols, max_elements]) + #number columns and its processed version + self.batch_number_column = tf.placeholder( + self.data_type, [batch_size, self.num_cols, max_elements + ]) #columns with numeric entries + self.batch_processed_number_column = tf.placeholder( + self.data_type, [batch_size, self.num_cols, max_elements]) + self.batch_processed_sorted_index_number_column = tf.placeholder( + tf.int32, [batch_size, self.num_cols, max_elements]) + #word columns and its processed version + self.batch_processed_word_column = tf.placeholder( + self.data_type, [batch_size, self.num_word_cols, max_elements]) + self.batch_processed_sorted_index_word_column = tf.placeholder( + tf.int32, [batch_size, self.num_word_cols, max_elements]) + self.batch_word_column_entry_mask = tf.placeholder( + tf.int32, [batch_size, self.num_word_cols, max_elements]) + #names of word and number columns along with their mask + self.batch_word_column_names = tf.placeholder( + tf.int32, + [batch_size, self.num_word_cols, self.utility.FLAGS.max_entry_length]) + self.batch_word_column_mask = tf.placeholder( + self.data_type, [batch_size, self.num_word_cols]) + self.batch_number_column_names = tf.placeholder( + tf.int32, + [batch_size, self.num_cols, self.utility.FLAGS.max_entry_length]) + self.batch_number_column_mask = tf.placeholder(self.data_type, + [batch_size, self.num_cols]) + #exact match and group by max operation + self.batch_exact_match = tf.placeholder( + self.data_type, + [batch_size, self.num_cols + self.num_word_cols, max_elements]) + self.batch_column_exact_match = tf.placeholder( + self.data_type, [batch_size, self.num_cols + self.num_word_cols]) + self.batch_group_by_max = tf.placeholder( + self.data_type, + [batch_size, self.num_cols + self.num_word_cols, max_elements]) + #numbers in the question along with their position. This is used to compute arguments to the comparison operations + self.batch_question_number = tf.placeholder(self.data_type, [batch_size, 1]) + self.batch_question_number_one = tf.placeholder(self.data_type, + [batch_size, 1]) + self.batch_question_number_mask = tf.placeholder( + self.data_type, [batch_size, max_elements]) + self.batch_question_number_one_mask = tf.placeholder(self.data_type, + [batch_size, 1]) + self.batch_ordinal_question = tf.placeholder( + self.data_type, [batch_size, self.question_length]) + self.batch_ordinal_question_one = tf.placeholder( + self.data_type, [batch_size, self.question_length]) + + def LSTM_question_embedding(self, sentence, sentence_length): + #LSTM processes the input question + lstm_params = "question_lstm" + hidden_vectors = [] + sentence = self.batch_question + question_hidden = tf.zeros( + [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type) + question_c_hidden = tf.zeros( + [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type) + if (self.utility.FLAGS.rnn_dropout > 0.0): + if (self.mode == "train"): + rnn_dropout_mask = tf.cast( + tf.random_uniform( + tf.shape(question_hidden), minval=0.0, maxval=1.0) < + self.utility.FLAGS.rnn_dropout, + self.data_type) / self.utility.FLAGS.rnn_dropout + else: + rnn_dropout_mask = tf.ones_like(question_hidden) + for question_iterator in range(self.question_length): + curr_word = sentence[:, question_iterator] + question_vector = nn_utils.apply_dropout( + nn_utils.get_embedding(curr_word, self.utility, self.params), + self.utility.FLAGS.dropout, self.mode) + question_hidden, question_c_hidden = nn_utils.LSTMCell( + question_vector, question_hidden, question_c_hidden, lstm_params, + self.params) + if (self.utility.FLAGS.rnn_dropout > 0.0): + question_hidden = question_hidden * rnn_dropout_mask + hidden_vectors.append(tf.expand_dims(question_hidden, 0)) + hidden_vectors = tf.concat(axis=0, values=hidden_vectors) + return question_hidden, hidden_vectors + + def history_recurrent_step(self, curr_hprev, hprev): + #A single RNN step for controller or history RNN + return tf.tanh( + tf.matmul( + tf.concat(axis=1, values=[hprev, curr_hprev]), self.params[ + "history_recurrent"])) + self.params["history_recurrent_bias"] + + def question_number_softmax(self, hidden_vectors): + #Attention on quetsion to decide the question number to passed to comparison ops + def compute_ans(op_embedding, comparison): + op_embedding = tf.expand_dims(op_embedding, 0) + #dot product of operation embedding with hidden state to the left of the number occurrence + first = tf.transpose( + tf.matmul(op_embedding, + tf.transpose( + tf.reduce_sum(hidden_vectors * tf.tile( + tf.expand_dims( + tf.transpose(self.batch_ordinal_question), 2), + [1, 1, self.utility.FLAGS.embedding_dims]), 0)))) + second = self.batch_question_number_one_mask + tf.transpose( + tf.matmul(op_embedding, + tf.transpose( + tf.reduce_sum(hidden_vectors * tf.tile( + tf.expand_dims( + tf.transpose(self.batch_ordinal_question_one), 2 + ), [1, 1, self.utility.FLAGS.embedding_dims]), 0)))) + question_number_softmax = tf.nn.softmax(tf.concat(axis=1, values=[first, second])) + if (self.mode == "test"): + cond = tf.equal(question_number_softmax, + tf.reshape( + tf.reduce_max(question_number_softmax, 1), + [self.batch_size, 1])) + question_number_softmax = tf.where( + cond, + tf.fill(tf.shape(question_number_softmax), 1.0), + tf.fill(tf.shape(question_number_softmax), 0.0)) + question_number_softmax = tf.cast(question_number_softmax, + self.data_type) + ans = tf.reshape( + tf.reduce_sum(question_number_softmax * tf.concat( + axis=1, values=[self.batch_question_number, self.batch_question_number_one]), + 1), [self.batch_size, 1]) + return ans + + def compute_op_position(op_name): + for i in range(len(self.utility.operations_set)): + if (op_name == self.utility.operations_set[i]): + return i + + def compute_question_number(op_name): + op_embedding = tf.nn.embedding_lookup(self.params_unit, + compute_op_position(op_name)) + return compute_ans(op_embedding, op_name) + + curr_greater_question_number = compute_question_number("greater") + curr_lesser_question_number = compute_question_number("lesser") + curr_geq_question_number = compute_question_number("geq") + curr_leq_question_number = compute_question_number("leq") + return curr_greater_question_number, curr_lesser_question_number, curr_geq_question_number, curr_leq_question_number + + def perform_attention(self, context_vector, hidden_vectors, length, mask): + #Performs attention on hiddent_vectors using context vector + context_vector = tf.tile( + tf.expand_dims(context_vector, 0), [length, 1, 1]) #time * bs * d + attention_softmax = tf.nn.softmax( + tf.transpose(tf.reduce_sum(context_vector * hidden_vectors, 2)) + + mask) #batch_size * time + attention_softmax = tf.tile( + tf.expand_dims(tf.transpose(attention_softmax), 2), + [1, 1, self.embedding_dims]) + ans_vector = tf.reduce_sum(attention_softmax * hidden_vectors, 0) + return ans_vector + + #computes embeddings for column names using parameters of question module + def get_column_hidden_vectors(self): + #vector representations for the column names + self.column_hidden_vectors = tf.reduce_sum( + nn_utils.get_embedding(self.batch_number_column_names, self.utility, + self.params), 2) + self.word_column_hidden_vectors = tf.reduce_sum( + nn_utils.get_embedding(self.batch_word_column_names, self.utility, + self.params), 2) + + def create_summary_embeddings(self): + #embeddings for each text entry in the table using parameters of the question module + self.summary_text_entry_embeddings = tf.reduce_sum( + tf.expand_dims(self.batch_exact_match, 3) * tf.expand_dims( + tf.expand_dims( + tf.expand_dims( + nn_utils.get_embedding(self.utility.entry_match_token_id, + self.utility, self.params), 0), 1), + 2), 2) + + def compute_column_softmax(self, column_controller_vector, time_step): + #compute softmax over all the columns using column controller vector + column_controller_vector = tf.tile( + tf.expand_dims(column_controller_vector, 1), + [1, self.num_cols + self.num_word_cols, 1]) #max_cols * bs * d + column_controller_vector = nn_utils.apply_dropout( + column_controller_vector, self.utility.FLAGS.dropout, self.mode) + self.full_column_hidden_vectors = tf.concat( + axis=1, values=[self.column_hidden_vectors, self.word_column_hidden_vectors]) + self.full_column_hidden_vectors += self.summary_text_entry_embeddings + self.full_column_hidden_vectors = nn_utils.apply_dropout( + self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode) + column_logits = tf.reduce_sum( + column_controller_vector * self.full_column_hidden_vectors, 2) + ( + self.params["word_match_feature_column_name"] * + self.batch_column_exact_match) + self.full_column_mask + column_softmax = tf.nn.softmax(column_logits) #batch_size * max_cols + return column_softmax + + def compute_first_or_last(self, select, first=True): + #perform first ot last operation on row select with probabilistic row selection + answer = tf.zeros_like(select) + running_sum = tf.zeros([self.batch_size, 1], self.data_type) + for i in range(self.max_elements): + if (first): + current = tf.slice(select, [0, i], [self.batch_size, 1]) + else: + current = tf.slice(select, [0, self.max_elements - 1 - i], + [self.batch_size, 1]) + curr_prob = current * (1 - running_sum) + curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type) + running_sum += curr_prob + temp_ans = [] + curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0) + for i_ans in range(self.max_elements): + if (not (first) and i_ans == self.max_elements - 1 - i): + temp_ans.append(curr_prob) + elif (first and i_ans == i): + temp_ans.append(curr_prob) + else: + temp_ans.append(tf.zeros_like(curr_prob)) + temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans)) + answer += temp_ans + return answer + + def make_hard_softmax(self, softmax): + #converts soft selection to hard selection. used at test time + cond = tf.equal( + softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1])) + softmax = tf.where( + cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0)) + softmax = tf.cast(softmax, self.data_type) + return softmax + + def compute_max_or_min(self, select, maxi=True): + #computes the argmax and argmin of a column with probabilistic row selection + answer = tf.zeros([ + self.batch_size, self.num_cols + self.num_word_cols, self.max_elements + ], self.data_type) + sum_prob = tf.zeros([self.batch_size, self.num_cols + self.num_word_cols], + self.data_type) + for j in range(self.max_elements): + if (maxi): + curr_pos = j + else: + curr_pos = self.max_elements - 1 - j + select_index = tf.slice(self.full_processed_sorted_index_column, + [0, 0, curr_pos], [self.batch_size, -1, 1]) + select_mask = tf.equal( + tf.tile( + tf.expand_dims( + tf.tile( + tf.expand_dims(tf.range(self.max_elements), 0), + [self.batch_size, 1]), 1), + [1, self.num_cols + self.num_word_cols, 1]), select_index) + curr_prob = tf.expand_dims(select, 1) * tf.cast( + select_mask, self.data_type) * self.select_bad_number_mask + curr_prob = curr_prob * tf.expand_dims((1 - sum_prob), 2) + curr_prob = curr_prob * tf.expand_dims( + tf.cast((1 - sum_prob) > 0.0, self.data_type), 2) + answer = tf.where(select_mask, curr_prob, answer) + sum_prob += tf.reduce_sum(curr_prob, 2) + return answer + + def perform_operations(self, softmax, full_column_softmax, select, + prev_select_1, curr_pass): + #performs all the 15 operations. computes scalar output, lookup answer and row selector + column_softmax = tf.slice(full_column_softmax, [0, 0], + [self.batch_size, self.num_cols]) + word_column_softmax = tf.slice(full_column_softmax, [0, self.num_cols], + [self.batch_size, self.num_word_cols]) + init_max = self.compute_max_or_min(select, maxi=True) + init_min = self.compute_max_or_min(select, maxi=False) + #operations that are column independent + count = tf.reshape(tf.reduce_sum(select, 1), [self.batch_size, 1]) + select_full_column_softmax = tf.tile( + tf.expand_dims(full_column_softmax, 2), + [1, 1, self.max_elements + ]) #BS * (max_cols + max_word_cols) * max_elements + select_word_column_softmax = tf.tile( + tf.expand_dims(word_column_softmax, 2), + [1, 1, self.max_elements]) #BS * max_word_cols * max_elements + select_greater = tf.reduce_sum( + self.init_select_greater * select_full_column_softmax, + 1) * self.batch_question_number_mask #BS * max_elements + select_lesser = tf.reduce_sum( + self.init_select_lesser * select_full_column_softmax, + 1) * self.batch_question_number_mask #BS * max_elements + select_geq = tf.reduce_sum( + self.init_select_geq * select_full_column_softmax, + 1) * self.batch_question_number_mask #BS * max_elements + select_leq = tf.reduce_sum( + self.init_select_leq * select_full_column_softmax, + 1) * self.batch_question_number_mask #BS * max_elements + select_max = tf.reduce_sum(init_max * select_full_column_softmax, + 1) #BS * max_elements + select_min = tf.reduce_sum(init_min * select_full_column_softmax, + 1) #BS * max_elements + select_prev = tf.concat(axis=1, values=[ + tf.slice(select, [0, 1], [self.batch_size, self.max_elements - 1]), + tf.cast(tf.zeros([self.batch_size, 1]), self.data_type) + ]) + select_next = tf.concat(axis=1, values=[ + tf.cast(tf.zeros([self.batch_size, 1]), self.data_type), tf.slice( + select, [0, 0], [self.batch_size, self.max_elements - 1]) + ]) + select_last_rs = self.compute_first_or_last(select, False) + select_first_rs = self.compute_first_or_last(select, True) + select_word_match = tf.reduce_sum(self.batch_exact_match * + select_full_column_softmax, 1) + select_group_by_max = tf.reduce_sum(self.batch_group_by_max * + select_full_column_softmax, 1) + length_content = 1 + length_select = 13 + length_print = 1 + values = tf.concat(axis=1, values=[count]) + softmax_content = tf.slice(softmax, [0, 0], + [self.batch_size, length_content]) + #compute scalar output + output = tf.reduce_sum(tf.multiply(softmax_content, values), 1) + #compute lookup answer + softmax_print = tf.slice(softmax, [0, length_content + length_select], + [self.batch_size, length_print]) + curr_print = select_full_column_softmax * tf.tile( + tf.expand_dims(select, 1), + [1, self.num_cols + self.num_word_cols, 1 + ]) #BS * max_cols * max_elements (conisders only column) + self.batch_lookup_answer = curr_print * tf.tile( + tf.expand_dims(softmax_print, 2), + [1, self.num_cols + self.num_word_cols, self.max_elements + ]) #BS * max_cols * max_elements + self.batch_lookup_answer = self.batch_lookup_answer * self.select_full_mask + #compute row select + softmax_select = tf.slice(softmax, [0, length_content], + [self.batch_size, length_select]) + select_lists = [ + tf.expand_dims(select_prev, 1), tf.expand_dims(select_next, 1), + tf.expand_dims(select_first_rs, 1), tf.expand_dims(select_last_rs, 1), + tf.expand_dims(select_group_by_max, 1), + tf.expand_dims(select_greater, 1), tf.expand_dims(select_lesser, 1), + tf.expand_dims(select_geq, 1), tf.expand_dims(select_leq, 1), + tf.expand_dims(select_max, 1), tf.expand_dims(select_min, 1), + tf.expand_dims(select_word_match, 1), + tf.expand_dims(self.reset_select, 1) + ] + select = tf.reduce_sum( + tf.tile(tf.expand_dims(softmax_select, 2), [1, 1, self.max_elements]) * + tf.concat(axis=1, values=select_lists), 1) + select = select * self.select_whole_mask + return output, select + + def one_pass(self, select, question_embedding, hidden_vectors, hprev, + prev_select_1, curr_pass): + #Performs one timestep which involves selecting an operation and a column + attention_vector = self.perform_attention( + hprev, hidden_vectors, self.question_length, + self.batch_question_attention_mask) #batch_size * embedding_dims + controller_vector = tf.nn.relu( + tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul( + tf.concat(axis=1, values=[question_embedding, attention_vector]), self.params[ + "controller"])) + column_controller_vector = tf.nn.relu( + tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul( + tf.concat(axis=1, values=[question_embedding, attention_vector]), self.params[ + "column_controller"])) + controller_vector = nn_utils.apply_dropout( + controller_vector, self.utility.FLAGS.dropout, self.mode) + self.operation_logits = tf.matmul(controller_vector, + tf.transpose(self.params_unit)) + softmax = tf.nn.softmax(self.operation_logits) + soft_softmax = softmax + #compute column softmax: bs * max_columns + weighted_op_representation = tf.transpose( + tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax))) + column_controller_vector = tf.nn.relu( + tf.matmul( + tf.concat(axis=1, values=[ + column_controller_vector, weighted_op_representation + ]), self.params["break_conditional"])) + full_column_softmax = self.compute_column_softmax(column_controller_vector, + curr_pass) + soft_column_softmax = full_column_softmax + if (self.mode == "test"): + full_column_softmax = self.make_hard_softmax(full_column_softmax) + softmax = self.make_hard_softmax(softmax) + output, select = self.perform_operations(softmax, full_column_softmax, + select, prev_select_1, curr_pass) + return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax + + def compute_lookup_error(self, val): + #computes lookup error. + cond = tf.equal(self.batch_print_answer, val) + inter = tf.where( + cond, self.init_print_error, + tf.tile( + tf.reshape(tf.constant(1e10, self.data_type), [1, 1, 1]), [ + self.batch_size, self.utility.FLAGS.max_word_cols + + self.utility.FLAGS.max_number_cols, + self.utility.FLAGS.max_elements + ])) + return tf.reduce_min(tf.reduce_min(inter, 1), 1) * tf.cast( + tf.greater( + tf.reduce_sum(tf.reduce_sum(tf.cast(cond, self.data_type), 1), 1), + 0.0), self.data_type) + + def soft_min(self, x, y): + return tf.maximum(-1.0 * (1 / ( + self.utility.FLAGS.soft_min_value + 0.0)) * tf.log( + tf.exp(-self.utility.FLAGS.soft_min_value * x) + tf.exp( + -self.utility.FLAGS.soft_min_value * y)), tf.zeros_like(x)) + + def error_computation(self): + #computes the error of each example in a batch + math_error = 0.5 * tf.square(tf.subtract(self.scalar_output, self.batch_answer)) + #scale math error + math_error = math_error / self.rows + math_error = tf.minimum(math_error, self.utility.FLAGS.max_math_error * + tf.ones(tf.shape(math_error), self.data_type)) + self.init_print_error = tf.where( + self.batch_gold_select, -1 * tf.log(self.batch_lookup_answer + 1e-300 + + self.invert_select_full_mask), -1 * + tf.log(1 - self.batch_lookup_answer)) * self.select_full_mask + print_error_1 = self.init_print_error * tf.cast( + tf.equal(self.batch_print_answer, 0.0), self.data_type) + print_error = tf.reduce_sum(tf.reduce_sum((print_error_1), 1), 1) + for val in range(1, 58): + print_error += self.compute_lookup_error(val + 0.0) + print_error = print_error * self.utility.FLAGS.print_cost / self.num_entries + if (self.mode == "train"): + error = tf.where( + tf.logical_and( + tf.not_equal(self.batch_answer, 0.0), + tf.not_equal( + tf.reduce_sum(tf.reduce_sum(self.batch_print_answer, 1), 1), + 0.0)), + self.soft_min(math_error, print_error), + tf.where( + tf.not_equal(self.batch_answer, 0.0), math_error, print_error)) + else: + error = tf.where( + tf.logical_and( + tf.equal(self.scalar_output, 0.0), + tf.equal( + tf.reduce_sum(tf.reduce_sum(self.batch_lookup_answer, 1), 1), + 0.0)), + tf.ones_like(math_error), + tf.where( + tf.equal(self.scalar_output, 0.0), print_error, math_error)) + return error + + def batch_process(self): + #Computes loss and fraction of correct examples in a batch. + self.params_unit = nn_utils.apply_dropout( + self.params["unit"], self.utility.FLAGS.dropout, self.mode) + batch_size = self.batch_size + max_passes = self.max_passes + num_timesteps = 1 + max_elements = self.max_elements + select = tf.cast( + tf.fill([self.batch_size, max_elements], 1.0), self.data_type) + hprev = tf.cast( + tf.fill([self.batch_size, self.embedding_dims], 0.0), + self.data_type) #running sum of the hidden states of the model + output = tf.cast(tf.fill([self.batch_size, 1], 0.0), + self.data_type) #output of the model + correct = tf.cast( + tf.fill([1], 0.0), self.data_type + ) #to compute accuracy, returns number of correct examples for this batch + total_error = 0.0 + prev_select_1 = tf.zeros_like(select) + self.create_summary_embeddings() + self.get_column_hidden_vectors() + #get question embedding + question_embedding, hidden_vectors = self.LSTM_question_embedding( + self.batch_question, self.question_length) + #compute arguments for comparison operation + greater_question_number, lesser_question_number, geq_question_number, leq_question_number = self.question_number_softmax( + hidden_vectors) + self.init_select_greater = tf.cast( + tf.greater(self.full_processed_column, + tf.expand_dims(greater_question_number, 2)), self. + data_type) * self.select_bad_number_mask #bs * max_cols * max_elements + self.init_select_lesser = tf.cast( + tf.less(self.full_processed_column, + tf.expand_dims(lesser_question_number, 2)), self. + data_type) * self.select_bad_number_mask #bs * max_cols * max_elements + self.init_select_geq = tf.cast( + tf.greater_equal(self.full_processed_column, + tf.expand_dims(geq_question_number, 2)), self. + data_type) * self.select_bad_number_mask #bs * max_cols * max_elements + self.init_select_leq = tf.cast( + tf.less_equal(self.full_processed_column, + tf.expand_dims(leq_question_number, 2)), self. + data_type) * self.select_bad_number_mask #bs * max_cols * max_elements + self.init_select_word_match = 0 + if (self.utility.FLAGS.rnn_dropout > 0.0): + if (self.mode == "train"): + history_rnn_dropout_mask = tf.cast( + tf.random_uniform( + tf.shape(hprev), minval=0.0, maxval=1.0) < + self.utility.FLAGS.rnn_dropout, + self.data_type) / self.utility.FLAGS.rnn_dropout + else: + history_rnn_dropout_mask = tf.ones_like(hprev) + select = select * self.select_whole_mask + self.batch_log_prob = tf.zeros([self.batch_size], dtype=self.data_type) + #Perform max_passes and at each pass select operation and column + for curr_pass in range(max_passes): + print("step: ", curr_pass) + output, select, softmax, soft_softmax, column_softmax, soft_column_softmax = self.one_pass( + select, question_embedding, hidden_vectors, hprev, prev_select_1, + curr_pass) + prev_select_1 = select + #compute input to history RNN + input_op = tf.transpose( + tf.matmul( + tf.transpose(self.params_unit), tf.transpose( + soft_softmax))) #weighted average of emebdding of operations + input_col = tf.reduce_sum( + tf.expand_dims(soft_column_softmax, 2) * + self.full_column_hidden_vectors, 1) + history_input = tf.concat(axis=1, values=[input_op, input_col]) + history_input = nn_utils.apply_dropout( + history_input, self.utility.FLAGS.dropout, self.mode) + hprev = self.history_recurrent_step(history_input, hprev) + if (self.utility.FLAGS.rnn_dropout > 0.0): + hprev = hprev * history_rnn_dropout_mask + self.scalar_output = output + error = self.error_computation() + cond = tf.less(error, 0.0001, name="cond") + correct_add = tf.where( + cond, tf.fill(tf.shape(cond), 1.0), tf.fill(tf.shape(cond), 0.0)) + correct = tf.reduce_sum(correct_add) + error = error / batch_size + total_error = tf.reduce_sum(error) + total_correct = correct / batch_size + return total_error, total_correct + + def compute_error(self): + #Sets mask variables and performs batch processing + self.batch_gold_select = self.batch_print_answer > 0.0 + self.full_column_mask = tf.concat( + axis=1, values=[self.batch_number_column_mask, self.batch_word_column_mask]) + self.full_processed_column = tf.concat( + axis=1, + values=[self.batch_processed_number_column, self.batch_processed_word_column]) + self.full_processed_sorted_index_column = tf.concat(axis=1, values=[ + self.batch_processed_sorted_index_number_column, + self.batch_processed_sorted_index_word_column + ]) + self.select_bad_number_mask = tf.cast( + tf.logical_and( + tf.not_equal(self.full_processed_column, + self.utility.FLAGS.pad_int), + tf.not_equal(self.full_processed_column, + self.utility.FLAGS.bad_number_pre_process)), + self.data_type) + self.select_mask = tf.cast( + tf.logical_not( + tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int)), + self.data_type) + self.select_word_mask = tf.cast( + tf.logical_not( + tf.equal(self.batch_word_column_entry_mask, + self.utility.dummy_token_id)), self.data_type) + self.select_full_mask = tf.concat( + axis=1, values=[self.select_mask, self.select_word_mask]) + self.select_whole_mask = tf.maximum( + tf.reshape( + tf.slice(self.select_mask, [0, 0, 0], + [self.batch_size, 1, self.max_elements]), + [self.batch_size, self.max_elements]), + tf.reshape( + tf.slice(self.select_word_mask, [0, 0, 0], + [self.batch_size, 1, self.max_elements]), + [self.batch_size, self.max_elements])) + self.invert_select_full_mask = tf.cast( + tf.concat(axis=1, values=[ + tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int), + tf.equal(self.batch_word_column_entry_mask, + self.utility.dummy_token_id) + ]), self.data_type) + self.batch_lookup_answer = tf.zeros(tf.shape(self.batch_gold_select)) + self.reset_select = self.select_whole_mask + self.rows = tf.reduce_sum(self.select_whole_mask, 1) + self.num_entries = tf.reshape( + tf.reduce_sum(tf.reduce_sum(self.select_full_mask, 1), 1), + [self.batch_size]) + self.final_error, self.final_correct = self.batch_process() + return self.final_error + + def create_graph(self, params, global_step): + #Creates the graph to compute error, gradient computation and updates parameters + self.params = params + batch_size = self.batch_size + learning_rate = tf.cast(self.utility.FLAGS.learning_rate, self.data_type) + self.total_cost = self.compute_error() + optimize_params = self.params.values() + optimize_names = self.params.keys() + print("optimize params ", optimize_names) + if (self.utility.FLAGS.l2_regularizer > 0.0): + reg_cost = 0.0 + for ind_param in self.params.keys(): + reg_cost += tf.nn.l2_loss(self.params[ind_param]) + self.total_cost += self.utility.FLAGS.l2_regularizer * reg_cost + grads = tf.gradients(self.total_cost, optimize_params, name="gradients") + grad_norm = 0.0 + for p, name in zip(grads, optimize_names): + print("grads: ", p, name) + if isinstance(p, tf.IndexedSlices): + grad_norm += tf.reduce_sum(p.values * p.values) + elif not (p == None): + grad_norm += tf.reduce_sum(p * p) + grad_norm = tf.sqrt(grad_norm) + max_grad_norm = np.float32(self.utility.FLAGS.clip_gradients).astype( + self.utility.np_data_type[self.utility.FLAGS.data_type]) + grad_scale = tf.minimum( + tf.cast(1.0, self.data_type), max_grad_norm / grad_norm) + clipped_grads = list() + for p in grads: + if isinstance(p, tf.IndexedSlices): + tmp = p.values * grad_scale + clipped_grads.append(tf.IndexedSlices(tmp, p.indices)) + elif not (p == None): + clipped_grads.append(p * grad_scale) + else: + clipped_grads.append(p) + grads = clipped_grads + self.global_step = global_step + params_list = self.params.values() + params_list.append(self.global_step) + adam = tf.train.AdamOptimizer( + learning_rate, + epsilon=tf.cast(self.utility.FLAGS.eps, self.data_type), + use_locking=True) + self.step = adam.apply_gradients(zip(grads, optimize_params), + global_step=self.global_step) + self.init_op = tf.global_variables_initializer() diff --git a/models/research/neural_programmer/neural_programmer.py b/models/research/neural_programmer/neural_programmer.py new file mode 100644 index 0000000000000000000000000000000000000000..145ca13d6ac8ce80d651f902440bfb3240f1c7a2 --- /dev/null +++ b/models/research/neural_programmer/neural_programmer.py @@ -0,0 +1,239 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of the Neural Programmer model described in https://openreview.net/pdf?id=ry2YOrcge + +This file calls functions to load & pre-process data, construct the TF graph +and performs training or evaluation as specified by the flag evaluator_job +Author: aneelakantan (Arvind Neelakantan) +""" +from __future__ import print_function + +import time +from random import Random +import numpy as np +import tensorflow as tf +import model +import wiki_data +import parameters +import data_utils + +tf.flags.DEFINE_integer("train_steps", 100001, "Number of steps to train") +tf.flags.DEFINE_integer("eval_cycle", 500, + "Evaluate model at every eval_cycle steps") +tf.flags.DEFINE_integer("max_elements", 100, + "maximum rows that are considered for processing") +tf.flags.DEFINE_integer( + "max_number_cols", 15, + "maximum number columns that are considered for processing") +tf.flags.DEFINE_integer( + "max_word_cols", 25, + "maximum number columns that are considered for processing") +tf.flags.DEFINE_integer("question_length", 62, "maximum question length") +tf.flags.DEFINE_integer("max_entry_length", 1, "") +tf.flags.DEFINE_integer("max_passes", 4, "number of operation passes") +tf.flags.DEFINE_integer("embedding_dims", 256, "") +tf.flags.DEFINE_integer("batch_size", 20, "") +tf.flags.DEFINE_float("clip_gradients", 1.0, "") +tf.flags.DEFINE_float("eps", 1e-6, "") +tf.flags.DEFINE_float("param_init", 0.1, "") +tf.flags.DEFINE_float("learning_rate", 0.001, "") +tf.flags.DEFINE_float("l2_regularizer", 0.0001, "") +tf.flags.DEFINE_float("print_cost", 50.0, + "weighting factor in the objective function") +tf.flags.DEFINE_string("job_id", "temp", """job id""") +tf.flags.DEFINE_string("output_dir", "../model/", + """output_dir""") +tf.flags.DEFINE_string("data_dir", "../data/", + """data_dir""") +tf.flags.DEFINE_integer("write_every", 500, "wrtie every N") +tf.flags.DEFINE_integer("param_seed", 150, "") +tf.flags.DEFINE_integer("python_seed", 200, "") +tf.flags.DEFINE_float("dropout", 0.8, "dropout keep probability") +tf.flags.DEFINE_float("rnn_dropout", 0.9, + "dropout keep probability for rnn connections") +tf.flags.DEFINE_float("pad_int", -20000.0, + "number columns are padded with pad_int") +tf.flags.DEFINE_string("data_type", "double", "float or double") +tf.flags.DEFINE_float("word_dropout_prob", 0.9, "word dropout keep prob") +tf.flags.DEFINE_integer("word_cutoff", 10, "") +tf.flags.DEFINE_integer("vocab_size", 10800, "") +tf.flags.DEFINE_boolean("evaluator_job", False, + "wehther to run as trainer/evaluator") +tf.flags.DEFINE_float( + "bad_number_pre_process", -200000.0, + "number that is added to a corrupted table entry in a number column") +tf.flags.DEFINE_float("max_math_error", 3.0, + "max square loss error that is considered") +tf.flags.DEFINE_float("soft_min_value", 5.0, "") +FLAGS = tf.flags.FLAGS + + +class Utility: + #holds FLAGS and other variables that are used in different files + def __init__(self): + global FLAGS + self.FLAGS = FLAGS + self.unk_token = "UNK" + self.entry_match_token = "entry_match" + self.column_match_token = "column_match" + self.dummy_token = "dummy_token" + self.tf_data_type = {} + self.tf_data_type["double"] = tf.float64 + self.tf_data_type["float"] = tf.float32 + self.np_data_type = {} + self.np_data_type["double"] = np.float64 + self.np_data_type["float"] = np.float32 + self.operations_set = ["count"] + [ + "prev", "next", "first_rs", "last_rs", "group_by_max", "greater", + "lesser", "geq", "leq", "max", "min", "word-match" + ] + ["reset_select"] + ["print"] + self.word_ids = {} + self.reverse_word_ids = {} + self.word_count = {} + self.random = Random(FLAGS.python_seed) + + +def evaluate(sess, data, batch_size, graph, i): + #computes accuracy + num_examples = 0.0 + gc = 0.0 + for j in range(0, len(data) - batch_size + 1, batch_size): + [ct] = sess.run([graph.final_correct], + feed_dict=data_utils.generate_feed_dict(data, j, batch_size, + graph)) + gc += ct * batch_size + num_examples += batch_size + print("dev set accuracy after ", i, " : ", gc / num_examples) + print(num_examples, len(data)) + print("--------") + + +def Train(graph, utility, batch_size, train_data, sess, model_dir, + saver): + #performs training + curr = 0 + train_set_loss = 0.0 + utility.random.shuffle(train_data) + start = time.time() + for i in range(utility.FLAGS.train_steps): + curr_step = i + if (i > 0 and i % FLAGS.write_every == 0): + model_file = model_dir + "/model_" + str(i) + saver.save(sess, model_file) + if curr + batch_size >= len(train_data): + curr = 0 + utility.random.shuffle(train_data) + step, cost_value = sess.run( + [graph.step, graph.total_cost], + feed_dict=data_utils.generate_feed_dict( + train_data, curr, batch_size, graph, train=True, utility=utility)) + curr = curr + batch_size + train_set_loss += cost_value + if (i > 0 and i % FLAGS.eval_cycle == 0): + end = time.time() + time_taken = end - start + print("step ", i, " ", time_taken, " seconds ") + start = end + print(" printing train set loss: ", train_set_loss / utility.FLAGS.eval_cycle) + train_set_loss = 0.0 + + +def master(train_data, dev_data, utility): + #creates TF graph and calls trainer or evaluator + batch_size = utility.FLAGS.batch_size + model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" + #create all paramters of the model + param_class = parameters.Parameters(utility) + params, global_step, init = param_class.parameters(utility) + key = "test" if (FLAGS.evaluator_job) else "train" + graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) + graph.create_graph(params, global_step) + prev_dev_error = 0.0 + final_loss = 0.0 + final_accuracy = 0.0 + #start session + with tf.Session() as sess: + sess.run(init.name) + sess.run(graph.init_op.name) + to_save = params.copy() + saver = tf.train.Saver(to_save, max_to_keep=500) + if (FLAGS.evaluator_job): + while True: + selected_models = {} + file_list = tf.gfile.ListDirectory(model_dir) + for model_file in file_list: + if ("checkpoint" in model_file or "index" in model_file or + "meta" in model_file): + continue + if ("data" in model_file): + model_file = model_file.split(".")[0] + model_step = int( + model_file.split("_")[len(model_file.split("_")) - 1]) + selected_models[model_step] = model_file + file_list = sorted(selected_models.items(), key=lambda x: x[0]) + if (len(file_list) > 0): + file_list = file_list[0:len(file_list) - 1] + print("list of models: ", file_list) + for model_file in file_list: + model_file = model_file[1] + print("restoring: ", model_file) + saver.restore(sess, model_dir + "/" + model_file) + model_step = int( + model_file.split("_")[len(model_file.split("_")) - 1]) + print("evaluating on dev ", model_file, model_step) + evaluate(sess, dev_data, batch_size, graph, model_step) + else: + ckpt = tf.train.get_checkpoint_state(model_dir) + print("model dir: ", model_dir) + if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): + print("create dir: ", utility.FLAGS.output_dir) + tf.gfile.MkDir(utility.FLAGS.output_dir) + if (not (tf.gfile.IsDirectory(model_dir))): + print("create dir: ", model_dir) + tf.gfile.MkDir(model_dir) + Train(graph, utility, batch_size, train_data, sess, model_dir, + saver) + +def main(args): + utility = Utility() + train_name = "random-split-1-train.examples" + dev_name = "random-split-1-dev.examples" + test_name = "pristine-unseen-tables.examples" + #load data + dat = wiki_data.WikiQuestionGenerator(train_name, dev_name, test_name, FLAGS.data_dir) + train_data, dev_data, test_data = dat.load() + utility.words = [] + utility.word_ids = {} + utility.reverse_word_ids = {} + #construct vocabulary + data_utils.construct_vocab(train_data, utility) + data_utils.construct_vocab(dev_data, utility, True) + data_utils.construct_vocab(test_data, utility, True) + data_utils.add_special_words(utility) + data_utils.perform_word_cutoff(utility) + #convert data to int format and pad the inputs + train_data = data_utils.complete_wiki_processing(train_data, utility, True) + dev_data = data_utils.complete_wiki_processing(dev_data, utility, False) + test_data = data_utils.complete_wiki_processing(test_data, utility, False) + print("# train examples ", len(train_data)) + print("# dev examples ", len(dev_data)) + print("# test examples ", len(test_data)) + print("running open source") + #construct TF graph and train or evaluate + master(train_data, dev_data, utility) + + +if __name__ == "__main__": + tf.app.run() diff --git a/models/research/neural_programmer/nn_utils.py b/models/research/neural_programmer/nn_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3a1a98bf7f71631410fc88982b336d33a02f52 --- /dev/null +++ b/models/research/neural_programmer/nn_utils.py @@ -0,0 +1,68 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Author: aneelakantan (Arvind Neelakantan) +""" + +import tensorflow as tf + +def get_embedding(word, utility, params): + return tf.nn.embedding_lookup(params["word"], word) + + +def apply_dropout(x, dropout_rate, mode): + if (dropout_rate > 0.0): + if (mode == "train"): + x = tf.nn.dropout(x, dropout_rate) + else: + x = x + return x + + +def LSTMCell(x, mprev, cprev, key, params): + """Create an LSTM cell. + + Implements the equations in pg.2 from + "Long Short-Term Memory Based Recurrent Neural Network Architectures + For Large Vocabulary Speech Recognition", + Hasim Sak, Andrew Senior, Francoise Beaufays. + + Args: + w: A dictionary of the weights and optional biases as returned + by LSTMParametersSplit(). + x: Inputs to this cell. + mprev: m_{t-1}, the recurrent activations (same as the output) + from the previous cell. + cprev: c_{t-1}, the cell activations from the previous cell. + keep_prob: Keep probability on the input and the outputs of a cell. + + Returns: + m: Outputs of this cell. + c: Cell Activations. + """ + + i = tf.matmul(x, params[key + "_ix"]) + tf.matmul(mprev, params[key + "_im"]) + i = tf.nn.bias_add(i, params[key + "_i"]) + f = tf.matmul(x, params[key + "_fx"]) + tf.matmul(mprev, params[key + "_fm"]) + f = tf.nn.bias_add(f, params[key + "_f"]) + c = tf.matmul(x, params[key + "_cx"]) + tf.matmul(mprev, params[key + "_cm"]) + c = tf.nn.bias_add(c, params[key + "_c"]) + o = tf.matmul(x, params[key + "_ox"]) + tf.matmul(mprev, params[key + "_om"]) + o = tf.nn.bias_add(o, params[key + "_o"]) + i = tf.sigmoid(i, name="i_gate") + f = tf.sigmoid(f, name="f_gate") + o = tf.sigmoid(o, name="o_gate") + c = f * cprev + i * tf.tanh(c) + m = o * c + return m, c diff --git a/models/research/neural_programmer/parameters.py b/models/research/neural_programmer/parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..c576ae822b2d93c561381e27fe65afd2902b564e --- /dev/null +++ b/models/research/neural_programmer/parameters.py @@ -0,0 +1,89 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Author: aneelakantan (Arvind Neelakantan) +""" + +import numpy as np +import tensorflow as tf + + +class Parameters: + + def __init__(self, u): + self.utility = u + self.init_seed_counter = 0 + self.word_init = {} + + def parameters(self, utility): + params = {} + inits = [] + embedding_dims = self.utility.FLAGS.embedding_dims + params["unit"] = tf.Variable( + self.RandomUniformInit([len(utility.operations_set), embedding_dims])) + params["word"] = tf.Variable( + self.RandomUniformInit([utility.FLAGS.vocab_size, embedding_dims])) + params["word_match_feature_column_name"] = tf.Variable( + self.RandomUniformInit([1])) + params["controller"] = tf.Variable( + self.RandomUniformInit([2 * embedding_dims, embedding_dims])) + params["column_controller"] = tf.Variable( + self.RandomUniformInit([2 * embedding_dims, embedding_dims])) + params["column_controller_prev"] = tf.Variable( + self.RandomUniformInit([embedding_dims, embedding_dims])) + params["controller_prev"] = tf.Variable( + self.RandomUniformInit([embedding_dims, embedding_dims])) + global_step = tf.Variable(1, name="global_step") + #weigths of question and history RNN (or LSTM) + key_list = ["question_lstm"] + for key in key_list: + # Weights going from inputs to nodes. + for wgts in ["ix", "fx", "cx", "ox"]: + params[key + "_" + wgts] = tf.Variable( + self.RandomUniformInit([embedding_dims, embedding_dims])) + # Weights going from nodes to nodes. + for wgts in ["im", "fm", "cm", "om"]: + params[key + "_" + wgts] = tf.Variable( + self.RandomUniformInit([embedding_dims, embedding_dims])) + #Biases for the gates and cell + for bias in ["i", "f", "c", "o"]: + if (bias == "f"): + print("forget gate bias") + params[key + "_" + bias] = tf.Variable( + tf.random_uniform([embedding_dims], 1.0, 1.1, self.utility. + tf_data_type[self.utility.FLAGS.data_type])) + else: + params[key + "_" + bias] = tf.Variable( + self.RandomUniformInit([embedding_dims])) + params["history_recurrent"] = tf.Variable( + self.RandomUniformInit([3 * embedding_dims, embedding_dims])) + params["history_recurrent_bias"] = tf.Variable( + self.RandomUniformInit([1, embedding_dims])) + params["break_conditional"] = tf.Variable( + self.RandomUniformInit([2 * embedding_dims, embedding_dims])) + init = tf.global_variables_initializer() + return params, global_step, init + + def RandomUniformInit(self, shape): + """Returns a RandomUniform Tensor between -param_init and param_init.""" + param_seed = self.utility.FLAGS.param_seed + self.init_seed_counter += 1 + return tf.random_uniform( + shape, -1.0 * + (np.float32(self.utility.FLAGS.param_init) + ).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]), + (np.float32(self.utility.FLAGS.param_init) + ).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]), + self.utility.tf_data_type[self.utility.FLAGS.data_type], + param_seed + self.init_seed_counter) diff --git a/models/research/neural_programmer/wiki_data.py b/models/research/neural_programmer/wiki_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c91637ca1ae537526ebddf4408b0fccd22d0f5e1 --- /dev/null +++ b/models/research/neural_programmer/wiki_data.py @@ -0,0 +1,532 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Loads the WikiQuestions dataset. + +An example consists of question, table. Additionally, we store the processed +columns which store the entries after performing number, date and other +preprocessing as done in the baseline. +columns, column names and processed columns are split into word and number +columns. +lookup answer (or matrix) is also split into number and word lookup matrix +Author: aneelakantan (Arvind Neelakantan) +""" +from __future__ import print_function + +import math +import os +import re +import numpy as np +import unicodedata as ud +import tensorflow as tf + +bad_number = -200000.0 #number that is added to a corrupted table entry in a number column + +def is_nan_or_inf(number): + return math.isnan(number) or math.isinf(number) + +def strip_accents(s): + u = unicode(s, "utf-8") + u_new = ''.join(c for c in ud.normalize('NFKD', u) if ud.category(c) != 'Mn') + return u_new.encode("utf-8") + + +def correct_unicode(string): + string = strip_accents(string) + string = re.sub("\xc2\xa0", " ", string).strip() + string = re.sub("\xe2\x80\x93", "-", string).strip() + #string = re.sub(ur'[\u0300-\u036F]', "", string) + string = re.sub("‚", ",", string) + string = re.sub("…", "...", string) + #string = re.sub("[·・]", ".", string) + string = re.sub("ˆ", "^", string) + string = re.sub("Ëœ", "~", string) + string = re.sub("‹", "<", string) + string = re.sub("›", ">", string) + #string = re.sub("[‘’´`]", "'", string) + #string = re.sub("[“”«»]", "\"", string) + #string = re.sub("[•†‡]", "", string) + #string = re.sub("[‐‑–—]", "-", string) + string = re.sub(r'[\u2E00-\uFFFF]', "", string) + string = re.sub("\\s+", " ", string).strip() + return string + + +def simple_normalize(string): + string = correct_unicode(string) + # Citations + string = re.sub("\[(nb ?)?\d+\]", "", string) + string = re.sub("\*+$", "", string) + # Year in parenthesis + string = re.sub("\(\d* ?-? ?\d*\)", "", string) + string = re.sub("^\"(.*)\"$", "", string) + return string + + +def full_normalize(string): + #print "an: ", string + string = simple_normalize(string) + # Remove trailing info in brackets + string = re.sub("\[[^\]]*\]", "", string) + # Remove most unicode characters in other languages + string = re.sub(r'[\u007F-\uFFFF]', "", string.strip()) + # Remove trailing info in parenthesis + string = re.sub("\([^)]*\)$", "", string.strip()) + string = final_normalize(string) + # Get rid of question marks + string = re.sub("\?", "", string).strip() + # Get rid of trailing colons (usually occur in column titles) + string = re.sub("\:$", " ", string).strip() + # Get rid of slashes + string = re.sub(r"/", " ", string).strip() + string = re.sub(r"\\", " ", string).strip() + # Replace colon, slash, and dash with space + # Note: need better replacement for this when parsing time + string = re.sub(r"\:", " ", string).strip() + string = re.sub("/", " ", string).strip() + string = re.sub("-", " ", string).strip() + # Convert empty strings to UNK + # Important to do this last or near last + if not string: + string = "UNK" + return string + +def final_normalize(string): + # Remove leading and trailing whitespace + string = re.sub("\\s+", " ", string).strip() + # Convert entirely to lowercase + string = string.lower() + # Get rid of strangely escaped newline characters + string = re.sub("\\\\n", " ", string).strip() + # Get rid of quotation marks + string = re.sub(r"\"", "", string).strip() + string = re.sub(r"\'", "", string).strip() + string = re.sub(r"`", "", string).strip() + # Get rid of * + string = re.sub("\*", "", string).strip() + return string + +def is_number(x): + try: + f = float(x) + return not is_nan_or_inf(f) + except ValueError: + return False + except TypeError: + return False + + +class WikiExample(object): + + def __init__(self, id, question, answer, table_key): + self.question_id = id + self.question = question + self.answer = answer + self.table_key = table_key + self.lookup_matrix = [] + self.is_bad_example = False + self.is_word_lookup = False + self.is_ambiguous_word_lookup = False + self.is_number_lookup = False + self.is_number_calc = False + self.is_unknown_answer = False + + +class TableInfo(object): + + def __init__(self, word_columns, word_column_names, word_column_indices, + number_columns, number_column_names, number_column_indices, + processed_word_columns, processed_number_columns, orig_columns): + self.word_columns = word_columns + self.word_column_names = word_column_names + self.word_column_indices = word_column_indices + self.number_columns = number_columns + self.number_column_names = number_column_names + self.number_column_indices = number_column_indices + self.processed_word_columns = processed_word_columns + self.processed_number_columns = processed_number_columns + self.orig_columns = orig_columns + + +class WikiQuestionLoader(object): + + def __init__(self, data_name, root_folder): + self.root_folder = root_folder + self.data_folder = os.path.join(self.root_folder, "data") + self.examples = [] + self.data_name = data_name + + def num_questions(self): + return len(self.examples) + + def load_qa(self): + data_source = os.path.join(self.data_folder, self.data_name) + f = tf.gfile.GFile(data_source, "r") + id_regex = re.compile("\(id ([^\)]*)\)") + for line in f: + id_match = id_regex.search(line) + id = id_match.group(1) + self.examples.append(id) + + def load(self): + self.load_qa() + + +def is_date(word): + if (not (bool(re.search("[a-z0-9]", word, re.IGNORECASE)))): + return False + if (len(word) != 10): + return False + if (word[4] != "-"): + return False + if (word[7] != "-"): + return False + for i in range(len(word)): + if (not (word[i] == "X" or word[i] == "x" or word[i] == "-" or re.search( + "[0-9]", word[i]))): + return False + return True + + +class WikiQuestionGenerator(object): + + def __init__(self, train_name, dev_name, test_name, root_folder): + self.train_name = train_name + self.dev_name = dev_name + self.test_name = test_name + self.train_loader = WikiQuestionLoader(train_name, root_folder) + self.dev_loader = WikiQuestionLoader(dev_name, root_folder) + self.test_loader = WikiQuestionLoader(test_name, root_folder) + self.bad_examples = 0 + self.root_folder = root_folder + self.data_folder = os.path.join(self.root_folder, "annotated/data") + self.annotated_examples = {} + self.annotated_tables = {} + self.annotated_word_reject = {} + self.annotated_word_reject["-lrb-"] = 1 + self.annotated_word_reject["-rrb-"] = 1 + self.annotated_word_reject["UNK"] = 1 + + def is_money(self, word): + if (not (bool(re.search("[a-z0-9]", word, re.IGNORECASE)))): + return False + for i in range(len(word)): + if (not (word[i] == "E" or word[i] == "." or re.search("[0-9]", + word[i]))): + return False + return True + + def remove_consecutive(self, ner_tags, ner_values): + for i in range(len(ner_tags)): + if ((ner_tags[i] == "NUMBER" or ner_tags[i] == "MONEY" or + ner_tags[i] == "PERCENT" or ner_tags[i] == "DATE") and + i + 1 < len(ner_tags) and ner_tags[i] == ner_tags[i + 1] and + ner_values[i] == ner_values[i + 1] and ner_values[i] != ""): + word = ner_values[i] + word = word.replace(">", "").replace("<", "").replace("=", "").replace( + "%", "").replace("~", "").replace("$", "").replace("£", "").replace( + "€", "") + if (re.search("[A-Z]", word) and not (is_date(word)) and not ( + self.is_money(word))): + ner_values[i] = "A" + else: + ner_values[i] = "," + return ner_tags, ner_values + + def pre_process_sentence(self, tokens, ner_tags, ner_values): + sentence = [] + tokens = tokens.split("|") + ner_tags = ner_tags.split("|") + ner_values = ner_values.split("|") + ner_tags, ner_values = self.remove_consecutive(ner_tags, ner_values) + #print "old: ", tokens + for i in range(len(tokens)): + word = tokens[i] + if (ner_values[i] != "" and + (ner_tags[i] == "NUMBER" or ner_tags[i] == "MONEY" or + ner_tags[i] == "PERCENT" or ner_tags[i] == "DATE")): + word = ner_values[i] + word = word.replace(">", "").replace("<", "").replace("=", "").replace( + "%", "").replace("~", "").replace("$", "").replace("£", "").replace( + "€", "") + if (re.search("[A-Z]", word) and not (is_date(word)) and not ( + self.is_money(word))): + word = tokens[i] + if (is_number(ner_values[i])): + word = float(ner_values[i]) + elif (is_number(word)): + word = float(word) + if (tokens[i] == "score"): + word = "score" + if (is_number(word)): + word = float(word) + if (not (self.annotated_word_reject.has_key(word))): + if (is_number(word) or is_date(word) or self.is_money(word)): + sentence.append(word) + else: + word = full_normalize(word) + if (not (self.annotated_word_reject.has_key(word)) and + bool(re.search("[a-z0-9]", word, re.IGNORECASE))): + m = re.search(",", word) + sentence.append(word.replace(",", "")) + if (len(sentence) == 0): + sentence.append("UNK") + return sentence + + def load_annotated_data(self, in_file): + self.annotated_examples = {} + self.annotated_tables = {} + f = tf.gfile.GFile(in_file, "r") + counter = 0 + for line in f: + if (counter > 0): + line = line.strip() + (question_id, utterance, context, target_value, tokens, lemma_tokens, + pos_tags, ner_tags, ner_values, target_canon) = line.split("\t") + question = self.pre_process_sentence(tokens, ner_tags, ner_values) + target_canon = target_canon.split("|") + self.annotated_examples[question_id] = WikiExample( + question_id, question, target_canon, context) + self.annotated_tables[context] = [] + counter += 1 + print("Annotated examples loaded ", len(self.annotated_examples)) + f.close() + + def is_number_column(self, a): + for w in a: + if (len(w) != 1): + return False + if (not (is_number(w[0]))): + return False + return True + + def convert_table(self, table): + answer = [] + for i in range(len(table)): + temp = [] + for j in range(len(table[i])): + temp.append(" ".join([str(w) for w in table[i][j]])) + answer.append(temp) + return answer + + def load_annotated_tables(self): + for table in self.annotated_tables.keys(): + annotated_table = table.replace("csv", "annotated") + orig_columns = [] + processed_columns = [] + f = tf.gfile.GFile(os.path.join(self.root_folder, annotated_table), "r") + counter = 0 + for line in f: + if (counter > 0): + line = line.strip() + line = line + "\t" * (13 - len(line.split("\t"))) + (row, col, read_id, content, tokens, lemma_tokens, pos_tags, ner_tags, + ner_values, number, date, num2, read_list) = line.split("\t") + counter += 1 + f.close() + max_row = int(row) + max_col = int(col) + for i in range(max_col + 1): + orig_columns.append([]) + processed_columns.append([]) + for j in range(max_row + 1): + orig_columns[i].append(bad_number) + processed_columns[i].append(bad_number) + #print orig_columns + f = tf.gfile.GFile(os.path.join(self.root_folder, annotated_table), "r") + counter = 0 + column_names = [] + for line in f: + if (counter > 0): + line = line.strip() + line = line + "\t" * (13 - len(line.split("\t"))) + (row, col, read_id, content, tokens, lemma_tokens, pos_tags, ner_tags, + ner_values, number, date, num2, read_list) = line.split("\t") + entry = self.pre_process_sentence(tokens, ner_tags, ner_values) + if (row == "-1"): + column_names.append(entry) + else: + orig_columns[int(col)][int(row)] = entry + if (len(entry) == 1 and is_number(entry[0])): + processed_columns[int(col)][int(row)] = float(entry[0]) + else: + for single_entry in entry: + if (is_number(single_entry)): + processed_columns[int(col)][int(row)] = float(single_entry) + break + nt = ner_tags.split("|") + nv = ner_values.split("|") + for i_entry in range(len(tokens.split("|"))): + if (nt[i_entry] == "DATE" and + is_number(nv[i_entry].replace("-", "").replace("X", ""))): + processed_columns[int(col)][int(row)] = float(nv[ + i_entry].replace("-", "").replace("X", "")) + #processed_columns[int(col)][int(row)] = float(nv[i_entry]) + if (len(entry) == 1 and (is_number(entry[0]) or is_date(entry[0]) or + self.is_money(entry[0]))): + if (len(entry) == 1 and not (is_number(entry[0])) and + is_date(entry[0])): + entry[0] = entry[0].replace("X", "x") + counter += 1 + word_columns = [] + processed_word_columns = [] + word_column_names = [] + word_column_indices = [] + number_columns = [] + processed_number_columns = [] + number_column_names = [] + number_column_indices = [] + for i in range(max_col + 1): + if (self.is_number_column(orig_columns[i])): + number_column_indices.append(i) + number_column_names.append(column_names[i]) + temp = [] + for w in orig_columns[i]: + if (is_number(w[0])): + temp.append(w[0]) + number_columns.append(temp) + processed_number_columns.append(processed_columns[i]) + else: + word_column_indices.append(i) + word_column_names.append(column_names[i]) + word_columns.append(orig_columns[i]) + processed_word_columns.append(processed_columns[i]) + table_info = TableInfo( + word_columns, word_column_names, word_column_indices, number_columns, + number_column_names, number_column_indices, processed_word_columns, + processed_number_columns, orig_columns) + self.annotated_tables[table] = table_info + f.close() + + def answer_classification(self): + lookup_questions = 0 + number_lookup_questions = 0 + word_lookup_questions = 0 + ambiguous_lookup_questions = 0 + number_questions = 0 + bad_questions = 0 + ice_bad_questions = 0 + tot = 0 + got = 0 + ice = {} + with tf.gfile.GFile( + self.root_folder + "/arvind-with-norms-2.tsv", mode="r") as f: + lines = f.readlines() + for line in lines: + line = line.strip() + if (not (self.annotated_examples.has_key(line.split("\t")[0]))): + continue + if (len(line.split("\t")) == 4): + line = line + "\t" * (5 - len(line.split("\t"))) + if (not (is_number(line.split("\t")[2]))): + ice_bad_questions += 1 + (example_id, ans_index, ans_raw, process_answer, + matched_cells) = line.split("\t") + if (ice.has_key(example_id)): + ice[example_id].append(line.split("\t")) + else: + ice[example_id] = [line.split("\t")] + for q_id in self.annotated_examples.keys(): + tot += 1 + example = self.annotated_examples[q_id] + table_info = self.annotated_tables[example.table_key] + # Figure out if the answer is numerical or lookup + n_cols = len(table_info.orig_columns) + n_rows = len(table_info.orig_columns[0]) + example.lookup_matrix = np.zeros((n_rows, n_cols)) + exact_matches = {} + for (example_id, ans_index, ans_raw, process_answer, + matched_cells) in ice[q_id]: + for match_cell in matched_cells.split("|"): + if (len(match_cell.split(",")) == 2): + (row, col) = match_cell.split(",") + row = int(row) + col = int(col) + if (row >= 0): + exact_matches[ans_index] = 1 + answer_is_in_table = len(exact_matches) == len(example.answer) + if (answer_is_in_table): + for (example_id, ans_index, ans_raw, process_answer, + matched_cells) in ice[q_id]: + for match_cell in matched_cells.split("|"): + if (len(match_cell.split(",")) == 2): + (row, col) = match_cell.split(",") + row = int(row) + col = int(col) + example.lookup_matrix[row, col] = float(ans_index) + 1.0 + example.lookup_number_answer = 0.0 + if (answer_is_in_table): + lookup_questions += 1 + if len(example.answer) == 1 and is_number(example.answer[0]): + example.number_answer = float(example.answer[0]) + number_lookup_questions += 1 + example.is_number_lookup = True + else: + #print "word lookup" + example.calc_answer = example.number_answer = 0.0 + word_lookup_questions += 1 + example.is_word_lookup = True + else: + if (len(example.answer) == 1 and is_number(example.answer[0])): + example.number_answer = example.answer[0] + example.is_number_calc = True + else: + bad_questions += 1 + example.is_bad_example = True + example.is_unknown_answer = True + example.is_lookup = example.is_word_lookup or example.is_number_lookup + if not example.is_word_lookup and not example.is_bad_example: + number_questions += 1 + example.calc_answer = example.answer[0] + example.lookup_number_answer = example.calc_answer + # Split up the lookup matrix into word part and number part + number_column_indices = table_info.number_column_indices + word_column_indices = table_info.word_column_indices + example.word_columns = table_info.word_columns + example.number_columns = table_info.number_columns + example.word_column_names = table_info.word_column_names + example.processed_number_columns = table_info.processed_number_columns + example.processed_word_columns = table_info.processed_word_columns + example.number_column_names = table_info.number_column_names + example.number_lookup_matrix = example.lookup_matrix[:, + number_column_indices] + example.word_lookup_matrix = example.lookup_matrix[:, word_column_indices] + + def load(self): + train_data = [] + dev_data = [] + test_data = [] + self.load_annotated_data( + os.path.join(self.data_folder, "training.annotated")) + self.load_annotated_tables() + self.answer_classification() + self.train_loader.load() + self.dev_loader.load() + for i in range(self.train_loader.num_questions()): + example = self.train_loader.examples[i] + example = self.annotated_examples[example] + train_data.append(example) + for i in range(self.dev_loader.num_questions()): + example = self.dev_loader.examples[i] + dev_data.append(self.annotated_examples[example]) + + self.load_annotated_data( + os.path.join(self.data_folder, "pristine-unseen-tables.annotated")) + self.load_annotated_tables() + self.answer_classification() + self.test_loader.load() + for i in range(self.test_loader.num_questions()): + example = self.test_loader.examples[i] + test_data.append(self.annotated_examples[example]) + return train_data, dev_data, test_data diff --git a/models/research/next_frame_prediction/README.md b/models/research/next_frame_prediction/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9aa9b6fc5a3146a5e24ce53422d985570891d42b --- /dev/null +++ b/models/research/next_frame_prediction/README.md @@ -0,0 +1,89 @@ +![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) +![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +Visual Dynamics: Probabilistic Future Frame Synthesis via Cross Convolutional Networks. + +Introduction + +https://arxiv.org/pdf/1607.02586v1.pdf + +This is an implementation based on my understanding, with small +variations. It doesn't necessarily represents the paper published +by the original authors. + +Authors: Xin Pan, Anelia Angelova + +Results: + +![Sample1](g3doc/cross_conv.png) + +![Sample2](g3doc/cross_conv2.png) + +![Loss](g3doc/cross_conv3.png) + +Prerequisite: + +1. Install TensorFlow (r0.12), Bazel. + +2. Download the Sprites dataset or generate moving object dataset. + +Sprites data is located here: + +http://www.scottreed.info/files/nips2015-analogy-data.tar.gz + +Convert .mat files into images and use sprites_gen.py to convert them +to tf.SequenceExample. + +How to run: + +```shell +$ ls -R +.: +data next_frame_prediction WORKSPACE + +./data: +tfrecords tfrecords_test + +./next_frame_prediction: +cross_conv g3doc README.md + +./next_frame_prediction/cross_conv: +BUILD eval.py objects_gen.py model.py reader.py sprites_gen.py train.py + +./next_frame_prediction/g3doc: +cross_conv2.png cross_conv3.png cross_conv.png + + +# Build everything. +$ bazel build -c opt next_frame_prediction/... + +# The following example runs the generated 2d objects. +# For Sprites dataset, image_size should be 60, norm_scale should be 255.0. +# Batch size is normally 16~64, depending on your memory size. + +# Run training. +$ bazel-bin/next_frame_prediction/cross_conv/train \ + --batch_size=1 \ + --data_filepattern=data/tfrecords \ + --image_size=64 \ + --log_root=/tmp/predict + +step: 1, loss: 24.428671 +step: 2, loss: 19.211605 +step: 3, loss: 5.543143 +step: 4, loss: 3.035339 +step: 5, loss: 1.771392 +step: 6, loss: 2.099824 +step: 7, loss: 1.747665 +step: 8, loss: 1.572436 +step: 9, loss: 1.586816 +step: 10, loss: 1.434191 + +# Run eval. +$ bazel-bin/next_frame_prediction/cross_conv/eval \ + --batch_size=1 \ + --data_filepattern=data/tfrecords_test \ + --image_size=64 \ + --log_root=/tmp/predict +``` diff --git a/models/research/next_frame_prediction/cross_conv/BUILD b/models/research/next_frame_prediction/cross_conv/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..b435087f34f6ffbeba016119c60724d8ac3eb180 --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/BUILD @@ -0,0 +1,48 @@ +licenses(["notice"]) # Apache 2.0 + +package_group( + name = "internal", + packages = [ + "//next_frame_prediction/...", + ], +) + +package(default_visibility = [":internal"]) + +py_library( + name = "model", + srcs = ["model.py"], +) + +py_library( + name = "reader", + srcs = ["reader.py"], +) + +py_binary( + name = "train", + srcs = ["train.py"], + deps = [ + ":model", + ":reader", + ], +) + +py_binary( + name = "eval", + srcs = ["eval.py"], + deps = [ + ":model", + ":reader", + ], +) + +py_binary( + name = "example_gen", + srcs = ["example_gen.py"], +) + +py_binary( + name = "sprites_gen", + srcs = ["sprites_gen.py"], +) diff --git a/models/research/next_frame_prediction/cross_conv/eval.py b/models/research/next_frame_prediction/cross_conv/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..17ebc0e0edd2911f828cbb145ee40a06db8795b5 --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/eval.py @@ -0,0 +1,119 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Eval Cross Convolutional Model.""" +import io +import os +import sys +import time + +import numpy as np +from six.moves import xrange +import tensorflow as tf + +import model as cross_conv_model +import reader + +FLAGS = tf.flags.FLAGS +tf.flags.DEFINE_string('log_root', '/tmp/moving_obj', 'The root dir of output.') +tf.flags.DEFINE_string('data_filepattern', + 'est', + 'training data file pattern.') +tf.flags.DEFINE_integer('batch_size', 1, 'Batch size.') +tf.flags.DEFINE_integer('image_size', 64, 'Image height and width.') +tf.flags.DEFINE_float('norm_scale', 1.0, 'Normalize the original image') +tf.flags.DEFINE_float('scale', 10.0, + 'Scale the image after norm_scale and move the diff ' + 'to the positive realm.') +tf.flags.DEFINE_integer('sequence_length', 2, 'tf.SequenceExample length.') +tf.flags.DEFINE_integer('eval_batch_count', 100, + 'Average the result this number of examples.') +tf.flags.DEFINE_bool('l2_loss', True, 'If true, include l2_loss.') +tf.flags.DEFINE_bool('reconstr_loss', False, 'If true, include reconstr_loss.') +tf.flags.DEFINE_bool('kl_loss', True, 'If true, include KL loss.') + +slim = tf.contrib.slim + + +def _Eval(): + params = dict() + params['batch_size'] = FLAGS.batch_size + params['seq_len'] = FLAGS.sequence_length + params['image_size'] = FLAGS.image_size + params['is_training'] = False + params['norm_scale'] = FLAGS.norm_scale + params['scale'] = FLAGS.scale + params['l2_loss'] = FLAGS.l2_loss + params['reconstr_loss'] = FLAGS.reconstr_loss + params['kl_loss'] = FLAGS.kl_loss + + eval_dir = os.path.join(FLAGS.log_root, 'eval') + + images = reader.ReadInput( + FLAGS.data_filepattern, shuffle=False, params=params) + images *= params['scale'] + # Increase the value makes training much faster. + image_diff_list = reader.SequenceToImageAndDiff(images) + model = cross_conv_model.CrossConvModel(image_diff_list, params) + model.Build() + + summary_writer = tf.summary.FileWriter(eval_dir) + saver = tf.train.Saver() + sess = tf.Session('', config=tf.ConfigProto(allow_soft_placement=True)) + tf.train.start_queue_runners(sess) + + while True: + time.sleep(60) + try: + ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) + except tf.errors.OutOfRangeError as e: + sys.stderr.write('Cannot restore checkpoint: %s\n' % e) + continue + if not (ckpt_state and ckpt_state.model_checkpoint_path): + sys.stderr.write('No model to eval yet at %s\n' % FLAGS.log_root) + continue + sys.stderr.write('Loading checkpoint %s\n' % + ckpt_state.model_checkpoint_path) + saver.restore(sess, ckpt_state.model_checkpoint_path) + # Use the empirical distribution of z from training set. + if not tf.gfile.Exists(os.path.join(FLAGS.log_root, 'z_mean.npy')): + sys.stderr.write('No z at %s\n' % FLAGS.log_root) + continue + + with tf.gfile.Open(os.path.join(FLAGS.log_root, 'z_mean.npy')) as f: + sample_z_mean = np.load(io.BytesIO(f.read())) + with tf.gfile.Open( + os.path.join(FLAGS.log_root, 'z_stddev_log.npy')) as f: + sample_z_stddev_log = np.load(io.BytesIO(f.read())) + + total_loss = 0.0 + for _ in xrange(FLAGS.eval_batch_count): + loss_val, total_steps, summaries = sess.run( + [model.loss, model.global_step, model.summary_op], + feed_dict={model.z_mean: sample_z_mean, + model.z_stddev_log: sample_z_stddev_log}) + total_loss += loss_val + + summary_writer.add_summary(summaries, total_steps) + sys.stderr.write('steps: %d, loss: %f\n' % + (total_steps, total_loss / FLAGS.eval_batch_count)) + + +def main(_): + _Eval() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/next_frame_prediction/cross_conv/example_gen.py b/models/research/next_frame_prediction/cross_conv/example_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..bcda0bc405a60c3116e8c488cae92f502720fec4 --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/example_gen.py @@ -0,0 +1,93 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generate examples of two objects moving in different directions.""" +import random +import sys + +import numpy as np +from six.moves import xrange +import tensorflow as tf + + +tf.flags.DEFINE_string('out_file', '', + 'Output file for the tfrecords.') + + +def _add_object(obj_type, image, image2, xpos, ypos): + """Add a moving obj to two consecutive images.""" + obj_size = random.randint(8, 10) + channel = random.randint(0, 2) + move = random.randint(6, 10) + + obj = np.zeros([obj_size, obj_size, 3]) + if obj_type == 'rectangle': + xpos2 = xpos + move + ypos2 = ypos + for i in xrange(obj_size): + obj[i, 0:i+1, channel] = [1.0 for _ in xrange(i+1)] + elif obj_type == 'square': + xpos2 = xpos + ypos2 = ypos + move + obj[:, :, channel] = 1.0 + + for x in xrange(obj_size): + for y in xrange(obj_size): + if obj[x, y, channel] == 1.0: + image[xpos+x, ypos+y, channel] = 1.0 + image2[xpos2+x, ypos2+y, channel] = 1.0 + + +def _images_to_example(image, image2): + """Convert two consecutive images to SequenceExample.""" + example = tf.SequenceExample() + feature_list = example.feature_lists.feature_list['moving_objs'] + feature = feature_list.feature.add() + feature.float_list.value.extend(np.reshape(image, [-1]).tolist()) + feature = feature_list.feature.add() + feature.float_list.value.extend(np.reshape(image2, [-1]).tolist()) + return example + + +def generate_input(): + """Generate tfrecords.""" + writer = tf.python_io.TFRecordWriter(tf.flags.FLAGS.out_file) + writer2 = tf.python_io.TFRecordWriter(tf.flags.FLAGS.out_file + '_test') + + examples = [] + for xpos in xrange(0, 40, 3): + for ypos in xrange(0, 40, 3): + for xpos2 in xrange(0, 40, 3): + for ypos2 in xrange(0, 40, 3): + image = np.zeros([64, 64, 3]) + image2 = np.zeros([64, 64, 3]) + _add_object('rectangle', image, image2, xpos, ypos) + _add_object('square', image, image2, xpos2, ypos2) + examples.append(_images_to_example(image, image2)) + + sys.stderr.write('Finish generating examples.\n') + random.shuffle(examples) + for count, ex in enumerate(examples): + if count % 10 == 0: + writer2.write(ex.SerializeToString()) + else: + writer.write(ex.SerializeToString()) + +def main(_): + generate_input() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/next_frame_prediction/cross_conv/model.py b/models/research/next_frame_prediction/cross_conv/model.py new file mode 100644 index 0000000000000000000000000000000000000000..7b48e446e18b70fec87142f6834f33332287d02e --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/model.py @@ -0,0 +1,233 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Cross Convolutional Model. + +https://arxiv.org/pdf/1607.02586v1.pdf +""" +import math +import sys + +from six.moves import xrange +import tensorflow as tf + +slim = tf.contrib.slim + + +class CrossConvModel(object): + + def __init__(self, image_diff_list, params): + """Constructor. + + Args: + image_diff_list: A list of (image, diff) tuples, with shape + [batch_size, image_size, image_size, 3] and image_sizes as + [32, 64, 128, 256]. + params: Dict of parameters. + """ + self.images = [i for (i, _) in image_diff_list] + # Move the diff to the positive realm. + self.diffs = [(d + params['scale']) / 2 for (i, d) in image_diff_list] + self.params = params + + def Build(self): + with tf.device('/gpu:0'): + with slim.arg_scope([slim.conv2d], + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params={'is_training': + self.params['is_training']}): + self._BuildMotionKernel() + encoded_images = self._BuildImageEncoder() + cross_conved_images = self._CrossConv(encoded_images) + self._BuildImageDecoder(cross_conved_images) + self._BuildLoss() + + image = self.images[1] + diff = self.diffs[1] + + self.global_step = tf.Variable(0, name='global_step', trainable=False) + + if self.params['is_training']: + self._BuildTrainOp() + + diff = diff * 2.0 - self.params['scale'] + diff_output = self.diff_output * 2.0 - self.params['scale'] + concat_image = tf.concat( + axis=1, values=[image, image + diff_output, image + diff, diff_output]) + tf.summary.image('origin_predict_expect_predictdiff', concat_image) + self.summary_op = tf.summary.merge_all() + return self.loss + + def _BuildTrainOp(self): + lrn_rate = tf.maximum( + 0.01, # min_lr_rate. + tf.train.exponential_decay( + self.params['learning_rate'], self.global_step, 10000, 0.5)) + tf.summary.scalar('learning rate', lrn_rate) + optimizer = tf.train.GradientDescentOptimizer(lrn_rate) + self.train_op = slim.learning.create_train_op( + self.loss, optimizer, global_step=self.global_step) + + def _BuildLoss(self): + # 1. reconstr_loss seems doesn't do better than l2 loss. + # 2. Only works when using reduce_mean. reduce_sum doesn't work. + # 3. It seems kl loss doesn't play an important role. + self.loss = 0 + with tf.variable_scope('loss'): + if self.params['l2_loss']: + l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1])) + tf.summary.scalar('l2_loss', l2_loss) + self.loss += l2_loss + if self.params['reconstr_loss']: + reconstr_loss = (-tf.reduce_mean( + self.diffs[1] * (1e-10 + self.diff_output) + + (1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output))) + reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss') + tf.summary.scalar('reconstr_loss', reconstr_loss) + self.loss += reconstr_loss + if self.params['kl_loss']: + kl_loss = (0.5 * tf.reduce_mean( + tf.square(self.z_mean) + tf.square(self.z_stddev) - + 2 * self.z_stddev_log - 1)) + tf.summary.scalar('kl_loss', kl_loss) + self.loss += kl_loss + + tf.summary.scalar('loss', self.loss) + + def _BuildMotionKernel(self): + image = self.images[-2] + diff = self.diffs[-2] + shape = image.get_shape().as_list() + assert shape[1] == shape[2] and shape[1] == 128 + batch_size = shape[0] + + net = tf.concat(axis=3, values=[image, diff]) + with tf.variable_scope('motion_encoder'): + with slim.arg_scope([slim.conv2d], padding='VALID'): + net = slim.conv2d(net, 96, [5, 5], stride=1) + net = slim.max_pool2d(net, [2, 2]) + net = slim.conv2d(net, 96, [5, 5], stride=1) + net = slim.max_pool2d(net, [2, 2]) + net = slim.conv2d(net, 128, [5, 5], stride=1) + net = slim.conv2d(net, 128, [5, 5], stride=1) + net = slim.max_pool2d(net, [2, 2]) + net = slim.conv2d(net, 256, [4, 4], stride=1) + net = slim.conv2d(net, 256, [3, 3], stride=1) + + z = tf.reshape(net, shape=[batch_size, -1]) + self.z_mean, self.z_stddev_log = tf.split( + axis=1, num_or_size_splits=2, value=z) + self.z_stddev = tf.exp(self.z_stddev_log) + + epsilon = tf.random_normal( + self.z_mean.get_shape().as_list(), 0, 1, dtype=tf.float32) + kernel = self.z_mean + tf.multiply(self.z_stddev, epsilon) + + width = int(math.sqrt(kernel.get_shape().as_list()[1] // 128)) + kernel = tf.reshape(kernel, [batch_size, width, width, 128]) + with tf.variable_scope('kernel_decoder'): + with slim.arg_scope([slim.conv2d], padding='SAME'): + kernel = slim.conv2d(kernel, 128, [5, 5], stride=1) + self.kernel = slim.conv2d(kernel, 128, [5, 5], stride=1) + + sys.stderr.write('kernel shape: %s\n' % kernel.get_shape()) + + def _BuildImageEncoder(self): + feature_maps = [] + for (i, image) in enumerate(self.images): + with tf.variable_scope('image_encoder_%d' % i): + with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME'): + net = slim.conv2d(image, 64, [5, 5], stride=1) + net = slim.conv2d(net, 64, [5, 5], stride=1) + net = slim.max_pool2d(net, [5, 5]) + net = slim.conv2d(net, 64, [5, 5], stride=1) + net = slim.conv2d(net, 32, [5, 5], stride=1) + net = slim.max_pool2d(net, [2, 2]) + sys.stderr.write('image_conv shape: %s\n' % net.get_shape()) + feature_maps.append(net) + return feature_maps + + def _CrossConvHelper(self, encoded_image, kernel): + """Cross Convolution. + + The encoded image and kernel are of the same shape. Namely + [batch_size, image_size, image_size, channels]. They are split + into [image_size, image_size] image squares [kernel_size, kernel_size] + kernel squares. kernel squares are used to convolute image squares. + """ + images = tf.expand_dims(encoded_image, 0) + kernels = tf.expand_dims(kernel, 3) + return tf.nn.depthwise_conv2d(images, kernels, [1, 1, 1, 1], 'SAME') + + def _CrossConv(self, encoded_images): + """Apply the motion kernel on the encoded_images.""" + cross_conved_images = [] + kernels = tf.split(axis=3, num_or_size_splits=4, value=self.kernel) + for (i, encoded_image) in enumerate(encoded_images): + with tf.variable_scope('cross_conv_%d' % i): + kernel = kernels[i] + + encoded_image = tf.unstack(encoded_image, axis=0) + kernel = tf.unstack(kernel, axis=0) + assert len(encoded_image) == len(kernel) + assert len(encoded_image) == self.params['batch_size'] + conved_image = [] + for j in xrange(len(encoded_image)): + conved_image.append(self._CrossConvHelper( + encoded_image[j], kernel[j])) + cross_conved_images.append(tf.concat(axis=0, values=conved_image)) + sys.stderr.write('cross_conved shape: %s\n' % + cross_conved_images[-1].get_shape()) + return cross_conved_images + + def _Deconv(self, net, out_filters, kernel_size, stride): + shape = net.get_shape().as_list() + in_filters = shape[3] + kernel_shape = [kernel_size, kernel_size, out_filters, in_filters] + + weights = tf.get_variable( + name='weights', + shape=kernel_shape, + dtype=tf.float32, + initializer=tf.truncated_normal_initializer(stddev=0.01)) + + + out_height = shape[1] * stride + out_width = shape[2] * stride + batch_size = shape[0] + + output_shape = [batch_size, out_height, out_width, out_filters] + net = tf.nn.conv2d_transpose(net, weights, output_shape, + [1, stride, stride, 1], padding='SAME') + slim.batch_norm(net) + return net + + def _BuildImageDecoder(self, cross_conved_images): + """Decode the cross_conved feature maps into the predicted images.""" + nets = [] + for i, cross_conved_image in enumerate(cross_conved_images): + with tf.variable_scope('image_decoder_%d' % i): + stride = 64 / cross_conved_image.get_shape().as_list()[1] + # TODO(xpan): Alternative solution for upsampling? + nets.append(self._Deconv( + cross_conved_image, 64, kernel_size=3, stride=stride)) + + net = tf.concat(axis=3, values=nets) + net = slim.conv2d(net, 128, [9, 9], padding='SAME', stride=1) + net = slim.conv2d(net, 128, [1, 1], padding='SAME', stride=1) + net = slim.conv2d(net, 3, [1, 1], padding='SAME', stride=1) + self.diff_output = net + sys.stderr.write('diff_output shape: %s\n' % self.diff_output.get_shape()) diff --git a/models/research/next_frame_prediction/cross_conv/reader.py b/models/research/next_frame_prediction/cross_conv/reader.py new file mode 100644 index 0000000000000000000000000000000000000000..ab4ab698dda938f182be0019168aa132c1e3c5af --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/reader.py @@ -0,0 +1,86 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Read image sequence.""" + +from six.moves import xrange +import tensorflow as tf + + +def SequenceToImageAndDiff(images): + """Convert image sequence batch into image and diff batch. + + Each image pair is converted to the first image and their diff. + Batch size will increase if sequence length is larger than 2. + + Args: + images: Image sequence with shape + [batch_size, seq_len, image_size, image_size, channel] + + Returns: + the list of (image, diff) tuples with shape + [batch_size2, image_size, image_size, channel]. image_sizes are + [32, 64, 128, 256]. + """ + image_diff_list = [] + image_seq = tf.unstack(images, axis=1) + for size in [32, 64, 128, 256]: + resized_images = [ + tf.image.resize_images(i, [size, size]) for i in image_seq] + diffs = [] + for i in xrange(0, len(resized_images)-1): + diffs.append(resized_images[i+1] - resized_images[i]) + image_diff_list.append( + (tf.concat(axis=0, values=resized_images[:-1]), tf.concat(axis=0, values=diffs))) + return image_diff_list + + +def ReadInput(data_filepattern, shuffle, params): + """Read the tf.SequenceExample tfrecord files. + + Args: + data_filepattern: tf.SequenceExample tfrecord filepattern. + shuffle: Whether to shuffle the examples. + params: parameter dict. + + Returns: + image sequence batch [batch_size, seq_len, image_size, image_size, channel]. + """ + image_size = params['image_size'] + filenames = tf.gfile.Glob(data_filepattern) + filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle) + reader = tf.TFRecordReader() + _, example = reader.read(filename_queue) + feature_sepc = { + 'moving_objs': tf.FixedLenSequenceFeature( + shape=[image_size * image_size * 3], dtype=tf.float32)} + _, features = tf.parse_single_sequence_example( + example, sequence_features=feature_sepc) + moving_objs = tf.reshape( + features['moving_objs'], [params['seq_len'], image_size, image_size, 3]) + if shuffle: + examples = tf.train.shuffle_batch( + [moving_objs], + batch_size=params['batch_size'], + num_threads=64, + capacity=params['batch_size'] * 100, + min_after_dequeue=params['batch_size'] * 4) + else: + examples = tf.train.batch([moving_objs], + batch_size=params['batch_size'], + num_threads=16, + capacity=params['batch_size']) + examples /= params['norm_scale'] + return examples diff --git a/models/research/next_frame_prediction/cross_conv/sprites_gen.py b/models/research/next_frame_prediction/cross_conv/sprites_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..0d36c255cd93a90797272d7a80389f16fc6f3702 --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/sprites_gen.py @@ -0,0 +1,98 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generate the sprites tfrecords from raw_images.""" +import os +import random +import re +import sys + +import numpy as np +import scipy.misc +from six.moves import xrange +import tensorflow as tf + + +tf.flags.DEFINE_string('data_filepattern', '', 'The raw images.') +tf.flags.DEFINE_string('out_file', '', + 'File name for the tfrecord output.') + + +def _read_images(): + """Read images from image files into data structure.""" + sprites = dict() + files = tf.gfile.Glob(tf.flags.FLAGS.data_filepattern) + for f in files: + image = scipy.misc.imread(f) + m = re.search('image_([0-9]+)_([0-9]+)_([0-9]+).jpg', os.path.basename(f)) + if m.group(1) not in sprites: + sprites[m.group(1)] = dict() + character = sprites[m.group(1)] + if m.group(2) not in character: + character[m.group(2)] = dict() + pose = character[m.group(2)] + pose[int(m.group(3))] = image + return sprites + + +def _images_to_example(image, image2): + """Convert 2 consecutive image to a SequenceExample.""" + example = tf.SequenceExample() + feature_list = example.feature_lists.feature_list['moving_objs'] + feature = feature_list.feature.add() + feature.float_list.value.extend(np.reshape(image, [-1]).tolist()) + feature = feature_list.feature.add() + feature.float_list.value.extend(np.reshape(image2, [-1]).tolist()) + return example + + +def generate_input(): + """Generate tfrecords.""" + sprites = _read_images() + sys.stderr.write('Finish reading images.\n') + train_writer = tf.python_io.TFRecordWriter( + tf.flags.FLAGS.out_file.replace('sprites', 'sprites_train')) + test_writer = tf.python_io.TFRecordWriter( + tf.flags.FLAGS.out_file.replace('sprites', 'sprites_test')) + + train_examples = [] + test_examples = [] + for i in sprites: + if int(i) < 24: + examples = test_examples + else: + examples = train_examples + + character = sprites[i] + for j in character.keys(): + pose = character[j] + for k in xrange(1, len(pose), 1): + image = pose[k] + image2 = pose[k+1] + examples.append(_images_to_example(image, image2)) + + sys.stderr.write('Finish generating examples: %d, %d.\n' % + (len(train_examples), len(test_examples))) + random.shuffle(train_examples) + _ = [train_writer.write(ex.SerializeToString()) for ex in train_examples] + _ = [test_writer.write(ex.SerializeToString()) for ex in test_examples] + + +def main(_): + generate_input() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/next_frame_prediction/cross_conv/train.py b/models/research/next_frame_prediction/cross_conv/train.py new file mode 100644 index 0000000000000000000000000000000000000000..5b9973f52cc3946b3396c1e0b87fda19901735f6 --- /dev/null +++ b/models/research/next_frame_prediction/cross_conv/train.py @@ -0,0 +1,122 @@ +# Copyright 2016 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Train the cross convolutional model.""" +import os +import sys + +import numpy as np +import tensorflow as tf + +import model as cross_conv_model +import reader + +FLAGS = tf.flags.FLAGS +tf.flags.DEFINE_string('master', '', 'Session address.') +tf.flags.DEFINE_string('log_root', '/tmp/moving_obj', 'The root dir of output.') +tf.flags.DEFINE_string('data_filepattern', '', + 'training data file pattern.') +tf.flags.DEFINE_integer('image_size', 64, 'Image height and width.') +tf.flags.DEFINE_integer('batch_size', 1, 'Batch size.') +tf.flags.DEFINE_float('norm_scale', 1.0, 'Normalize the original image') +tf.flags.DEFINE_float('scale', 10.0, + 'Scale the image after norm_scale and move the diff ' + 'to the positive realm.') +tf.flags.DEFINE_integer('sequence_length', 2, 'tf.SequenceExample length.') +tf.flags.DEFINE_float('learning_rate', 0.8, 'Learning rate.') +tf.flags.DEFINE_bool('l2_loss', True, 'If true, include l2_loss.') +tf.flags.DEFINE_bool('reconstr_loss', False, 'If true, include reconstr_loss.') +tf.flags.DEFINE_bool('kl_loss', True, 'If true, include KL loss.') + +slim = tf.contrib.slim + + +def _Train(): + params = dict() + params['batch_size'] = FLAGS.batch_size + params['seq_len'] = FLAGS.sequence_length + params['image_size'] = FLAGS.image_size + params['is_training'] = True + params['norm_scale'] = FLAGS.norm_scale + params['scale'] = FLAGS.scale + params['learning_rate'] = FLAGS.learning_rate + params['l2_loss'] = FLAGS.l2_loss + params['reconstr_loss'] = FLAGS.reconstr_loss + params['kl_loss'] = FLAGS.kl_loss + + train_dir = os.path.join(FLAGS.log_root, 'train') + + images = reader.ReadInput(FLAGS.data_filepattern, shuffle=True, params=params) + images *= params['scale'] + # Increase the value makes training much faster. + image_diff_list = reader.SequenceToImageAndDiff(images) + model = cross_conv_model.CrossConvModel(image_diff_list, params) + model.Build() + tf.contrib.tfprof.model_analyzer.print_model_analysis(tf.get_default_graph()) + + summary_writer = tf.summary.FileWriter(train_dir) + sv = tf.train.Supervisor(logdir=FLAGS.log_root, + summary_op=None, + is_chief=True, + save_model_secs=60, + global_step=model.global_step) + sess = sv.prepare_or_wait_for_session( + FLAGS.master, config=tf.ConfigProto(allow_soft_placement=True)) + + total_loss = 0.0 + step = 0 + sample_z_mean = np.zeros(model.z_mean.get_shape().as_list()) + sample_z_stddev_log = np.zeros(model.z_stddev_log.get_shape().as_list()) + sample_step = 0 + + while True: + _, loss_val, total_steps, summaries, z_mean, z_stddev_log = sess.run( + [model.train_op, model.loss, model.global_step, + model.summary_op, + model.z_mean, model.z_stddev_log]) + + sample_z_mean += z_mean + sample_z_stddev_log += z_stddev_log + total_loss += loss_val + step += 1 + sample_step += 1 + + if step % 100 == 0: + summary_writer.add_summary(summaries, total_steps) + sys.stderr.write('step: %d, loss: %f\n' % + (total_steps, total_loss / step)) + total_loss = 0.0 + step = 0 + + # Sampled z is used for eval. + # It seems 10k is better than 1k. Maybe try 100k next? + if sample_step % 10000 == 0: + with tf.gfile.Open(os.path.join(FLAGS.log_root, 'z_mean.npy'), 'w') as f: + np.save(f, sample_z_mean / sample_step) + with tf.gfile.Open( + os.path.join(FLAGS.log_root, 'z_stddev_log.npy'), 'w') as f: + np.save(f, sample_z_stddev_log / sample_step) + sample_z_mean = np.zeros(model.z_mean.get_shape().as_list()) + sample_z_stddev_log = np.zeros( + model.z_stddev_log.get_shape().as_list()) + sample_step = 0 + + +def main(_): + _Train() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/next_frame_prediction/g3doc/cross_conv.png b/models/research/next_frame_prediction/g3doc/cross_conv.png new file mode 100644 index 0000000000000000000000000000000000000000..13915f944188adf0b0a3dc85219fce7bcb5e7de9 Binary files /dev/null and b/models/research/next_frame_prediction/g3doc/cross_conv.png differ diff --git a/models/research/next_frame_prediction/g3doc/cross_conv2.png b/models/research/next_frame_prediction/g3doc/cross_conv2.png new file mode 100644 index 0000000000000000000000000000000000000000..c4b5e8e9d6169a1a908858a91fdc6467ae03ea2a Binary files /dev/null and b/models/research/next_frame_prediction/g3doc/cross_conv2.png differ diff --git a/models/research/next_frame_prediction/g3doc/cross_conv3.png b/models/research/next_frame_prediction/g3doc/cross_conv3.png new file mode 100644 index 0000000000000000000000000000000000000000..054d7d1edf2043c50a3ea8d332cc83a8dcb32c9b Binary files /dev/null and b/models/research/next_frame_prediction/g3doc/cross_conv3.png differ diff --git a/models/research/nst_blogpost/4_Neural_Style_Transfer_with_Eager_Execution.ipynb b/models/research/nst_blogpost/4_Neural_Style_Transfer_with_Eager_Execution.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..7ba70749cf9366a4ea63776c267a777d2e582e4c --- /dev/null +++ b/models/research/nst_blogpost/4_Neural_Style_Transfer_with_Eager_Execution.ipynb @@ -0,0 +1,1225 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Neural Style Transfer with Eager Execution", + "version": "0.3.2", + "provenance": [], + "private_outputs": true, + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "metadata": { + "id": "jo5PziEC4hWs", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "# Neural Style Transfer with tf.keras\n", + "\n", + "\n", + " \n", + " \n", + "
\n", + " Run in Google Colab\n", + " \n", + " View source on GitHub\n", + "
" + ] + }, + { + "metadata": { + "id": "aDyGj8DmXCJI", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Overview\n", + "\n", + "In this tutorial, we will learn how to use deep learning to compose images in the style of another image (ever wish you could paint like Picasso or Van Gogh?). This is known as **neural style transfer**! This is a technique outlined in [Leon A. Gatys' paper, A Neural Algorithm of Artistic Style](https://arxiv.org/abs/1508.06576), which is a great read, and you should definitely check it out. \n", + "\n", + "But, what is neural style transfer?\n", + "\n", + "Neural style transfer is an optimization technique used to take three images, a **content** image, a **style reference** image (such as an artwork by a famous painter), and the **input** image you want to style -- and blend them together such that the input image is transformed to look like the content image, but “painted” in the style of the style image.\n", + "\n", + "\n", + "For example, let’s take an image of this turtle and Katsushika Hokusai's *The Great Wave off Kanagawa*:\n", + "\n", + "\"Drawing\"\n", + "\"Drawing\"\n", + "\n", + "[Image of Green Sea Turtle](https://commons.wikimedia.org/wiki/File:Green_Sea_Turtle_grazing_seagrass.jpg)\n", + "-By P.Lindgren [CC BY-SA 3.0 (https://creativecommons.org/licenses/by-sa/3.0)], from Wikimedia Commons\n", + "\n", + "\n", + "Now how would it look like if Hokusai decided to paint the picture of this Turtle exclusively with this style? Something like this?\n", + "\n", + "\"Drawing\"\n", + "\n", + "Is this magic or just deep learning? Fortunately, this doesn’t involve any witchcraft: style transfer is a fun and interesting technique that showcases the capabilities and internal representations of neural networks. \n", + "\n", + "The principle of neural style transfer is to define two distance functions, one that describes how different the content of two images are , $L_{content}$, and one that describes the difference between two images in terms of their style, $L_{style}$. Then, given three images, a desired style image, a desired content image, and the input image (initialized with the content image), we try to transform the input image to minimize the content distance with the content image and its style distance with the style image. \n", + "In summary, we’ll take the base input image, a content image that we want to match, and the style image that we want to match. We’ll transform the base input image by minimizing the content and style distances (losses) with backpropagation, creating an image that matches the content of the content image and the style of the style image. \n", + "\n", + "### Specific concepts that will be covered:\n", + "In the process, we will build practical experience and develop intuition around the following concepts\n", + "\n", + "* **Eager Execution** - use TensorFlow's imperative programming environment that evaluates operations immediately \n", + " * [Learn more about eager execution](https://www.tensorflow.org/programmers_guide/eager)\n", + " * [See it in action](https://www.tensorflow.org/get_started/eager)\n", + "* ** Using [Functional API](https://keras.io/getting-started/functional-api-guide/) to define a model** - we'll build a subset of our model that will give us access to the necessary intermediate activations using the Functional API \n", + "* **Leveraging feature maps of a pretrained model** - Learn how to use pretrained models and their feature maps \n", + "* **Create custom training loops** - we'll examine how to set up an optimizer to minimize a given loss with respect to input parameters\n", + "\n", + "### We will follow the general steps to perform style transfer:\n", + "\n", + "1. Visualize data\n", + "2. Basic Preprocessing/preparing our data\n", + "3. Set up loss functions \n", + "4. Create model\n", + "5. Optimize for loss function\n", + "\n", + "**Audience:** This post is geared towards intermediate users who are comfortable with basic machine learning concepts. To get the most out of this post, you should: \n", + "* Read [Gatys' paper](https://arxiv.org/abs/1508.06576) - we'll explain along the way, but the paper will provide a more thorough understanding of the task\n", + "* [Understand reducing loss with gradient descent](https://developers.google.com/machine-learning/crash-course/reducing-loss/gradient-descent)\n", + "\n", + "**Time Estimated**: 30 min\n" + ] + }, + { + "metadata": { + "id": "U8ajP_u73s6m", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Setup\n", + "\n", + "### Download Images" + ] + }, + { + "metadata": { + "id": "riWE_b8k3s6o", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "import os\n", + "img_dir = '/tmp/nst'\n", + "if not os.path.exists(img_dir):\n", + " os.makedirs(img_dir)\n", + "!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/d/d7/Green_Sea_Turtle_grazing_seagrass.jpg\n", + "!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/0/0a/The_Great_Wave_off_Kanagawa.jpg\n", + "!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/b/b4/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg\n", + "!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/0/00/Tuebingen_Neckarfront.jpg\n", + "!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/6/68/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg\n", + "!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "eqxUicSPUOP6", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Import and configure modules" + ] + }, + { + "metadata": { + "id": "sc1OLbOWhPCO", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "import matplotlib.pyplot as plt\n", + "import matplotlib as mpl\n", + "mpl.rcParams['figure.figsize'] = (10,10)\n", + "mpl.rcParams['axes.grid'] = False\n", + "\n", + "import numpy as np\n", + "from PIL import Image\n", + "import time\n", + "import functools" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "RYEjlrYk3s6w", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "%tensorflow_version 1.x\n", + "import tensorflow as tf\n", + "\n", + "from tensorflow.python.keras.preprocessing import image as kp_image\n", + "from tensorflow.python.keras import models \n", + "from tensorflow.python.keras import losses\n", + "from tensorflow.python.keras import layers\n", + "from tensorflow.python.keras import backend as K" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "L7sjDODq67HQ", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "We’ll begin by enabling [eager execution](https://www.tensorflow.org/guide/eager). Eager execution allows us to work through this technique in the clearest and most readable way. " + ] + }, + { + "metadata": { + "id": "sfjsSAtNrqQx", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "tf.enable_eager_execution()\n", + "print(\"Eager execution: {}\".format(tf.executing_eagerly()))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "IOiGrIV1iERH", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# Set up some global values here\n", + "content_path = '/tmp/nst/Green_Sea_Turtle_grazing_seagrass.jpg'\n", + "style_path = '/tmp/nst/The_Great_Wave_off_Kanagawa.jpg'" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "xE4Yt8nArTeR", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Visualize the input" + ] + }, + { + "metadata": { + "id": "3TLljcwv5qZs", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def load_img(path_to_img):\n", + " max_dim = 512\n", + " img = Image.open(path_to_img)\n", + " long = max(img.size)\n", + " scale = max_dim/long\n", + " img = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)\n", + " \n", + " img = kp_image.img_to_array(img)\n", + " \n", + " # We need to broadcast the image array such that it has a batch dimension \n", + " img = np.expand_dims(img, axis=0)\n", + " return img" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "vupl0CI18aAG", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def imshow(img, title=None):\n", + " # Remove the batch dimension\n", + " out = np.squeeze(img, axis=0)\n", + " # Normalize for display \n", + " out = out.astype('uint8')\n", + " plt.imshow(out)\n", + " if title is not None:\n", + " plt.title(title)\n", + " plt.imshow(out)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "2yAlRzJZrWM3", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "These are input content and style images. We hope to \"create\" an image with the content of our content image, but with the style of the style image. " + ] + }, + { + "metadata": { + "id": "_UWQmeEaiKkP", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "plt.figure(figsize=(10,10))\n", + "\n", + "content = load_img(content_path).astype('uint8')\n", + "style = load_img(style_path).astype('uint8')\n", + "\n", + "plt.subplot(1, 2, 1)\n", + "imshow(content, 'Content Image')\n", + "\n", + "plt.subplot(1, 2, 2)\n", + "imshow(style, 'Style Image')\n", + "plt.show()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "7qMVNvEsK-_D", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Prepare the data\n", + "Let's create methods that will allow us to load and preprocess our images easily. We perform the same preprocessing process as are expected according to the VGG training process. VGG networks are trained on image with each channel normalized by `mean = [103.939, 116.779, 123.68]`and with channels BGR." + ] + }, + { + "metadata": { + "id": "hGwmTwJNmv2a", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def load_and_process_img(path_to_img):\n", + " img = load_img(path_to_img)\n", + " img = tf.keras.applications.vgg19.preprocess_input(img)\n", + " return img" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "xCgooqs6tAka", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "In order to view the outputs of our optimization, we are required to perform the inverse preprocessing step. Furthermore, since our optimized image may take its values anywhere between $- \\infty$ and $\\infty$, we must clip to maintain our values from within the 0-255 range. " + ] + }, + { + "metadata": { + "id": "mjzlKRQRs_y2", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def deprocess_img(processed_img):\n", + " x = processed_img.copy()\n", + " if len(x.shape) == 4:\n", + " x = np.squeeze(x, 0)\n", + " assert len(x.shape) == 3, (\"Input to deprocess image must be an image of \"\n", + " \"dimension [1, height, width, channel] or [height, width, channel]\")\n", + " if len(x.shape) != 3:\n", + " raise ValueError(\"Invalid input to deprocessing image\")\n", + " \n", + " # perform the inverse of the preprocessing step\n", + " x[:, :, 0] += 103.939\n", + " x[:, :, 1] += 116.779\n", + " x[:, :, 2] += 123.68\n", + " x = x[:, :, ::-1]\n", + "\n", + " x = np.clip(x, 0, 255).astype('uint8')\n", + " return x" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "GEwZ7FlwrjoZ", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Define content and style representations\n", + "In order to get both the content and style representations of our image, we will look at some intermediate layers within our model. As we go deeper into the model, these intermediate layers represent higher and higher order features. In this case, we are using the network architecture VGG19, a pretrained image classification network. These intermediate layers are necessary to define the representation of content and style from our images. For an input image, we will try to match the corresponding style and content target representations at these intermediate layers. \n", + "\n", + "#### Why intermediate layers?\n", + "\n", + "You may be wondering why these intermediate outputs within our pretrained image classification network allow us to define style and content representations. At a high level, this phenomenon can be explained by the fact that in order for a network to perform image classification (which our network has been trained to do), it must understand the image. This involves taking the raw image as input pixels and building an internal representation through transformations that turn the raw image pixels into a complex understanding of the features present within the image. This is also partly why convolutional neural networks are able to generalize well: they’re able to capture the invariances and defining features within classes (e.g., cats vs. dogs) that are agnostic to background noise and other nuisances. Thus, somewhere between where the raw image is fed in and the classification label is output, the model serves as a complex feature extractor; hence by accessing intermediate layers, we’re able to describe the content and style of input images. \n", + "\n", + "\n", + "Specifically we’ll pull out these intermediate layers from our network: \n" + ] + }, + { + "metadata": { + "id": "N4-8eUp_Kc-j", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "# Content layer where will pull our feature maps\n", + "content_layers = ['block5_conv2'] \n", + "\n", + "# Style layer we are interested in\n", + "style_layers = ['block1_conv1',\n", + " 'block2_conv1',\n", + " 'block3_conv1', \n", + " 'block4_conv1', \n", + " 'block5_conv1'\n", + " ]\n", + "\n", + "num_content_layers = len(content_layers)\n", + "num_style_layers = len(style_layers)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "Jt3i3RRrJiOX", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Build the Model \n", + "In this case, we load [VGG19](https://keras.io/applications/#vgg19), and feed in our input tensor to the model. This will allow us to extract the feature maps (and subsequently the content and style representations) of the content, style, and generated images.\n", + "\n", + "We use VGG19, as suggested in the paper. In addition, since VGG19 is a relatively simple model (compared with ResNet, Inception, etc) the feature maps actually work better for style transfer. " + ] + }, + { + "metadata": { + "id": "v9AnzEUU6hhx", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "In order to access the intermediate layers corresponding to our style and content feature maps, we get the corresponding outputs and using the Keras [**Functional API**](https://keras.io/getting-started/functional-api-guide/), we define our model with the desired output activations. \n", + "\n", + "With the Functional API defining a model simply involves defining the input and output: \n", + "\n", + "`model = Model(inputs, outputs)`" + ] + }, + { + "metadata": { + "id": "nfec6MuMAbPx", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def get_model():\n", + " \"\"\" Creates our model with access to intermediate layers. \n", + " \n", + " This function will load the VGG19 model and access the intermediate layers. \n", + " These layers will then be used to create a new model that will take input image\n", + " and return the outputs from these intermediate layers from the VGG model. \n", + " \n", + " Returns:\n", + " returns a keras model that takes image inputs and outputs the style and \n", + " content intermediate layers. \n", + " \"\"\"\n", + " # Load our model. We load pretrained VGG, trained on imagenet data\n", + " vgg = tf.keras.applications.vgg19.VGG19(include_top=False, weights='imagenet')\n", + " vgg.trainable = False\n", + " # Get output layers corresponding to style and content layers \n", + " style_outputs = [vgg.get_layer(name).output for name in style_layers]\n", + " content_outputs = [vgg.get_layer(name).output for name in content_layers]\n", + " model_outputs = style_outputs + content_outputs\n", + " # Build model \n", + " return models.Model(vgg.input, model_outputs)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "kl6eFGa7-OtV", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "In the above code snippet, we’ll load our pretrained image classification network. Then we grab the layers of interest as we defined earlier. Then we define a Model by setting the model’s inputs to an image and the outputs to the outputs of the style and content layers. In other words, we created a model that will take an input image and output the content and style intermediate layers! \n" + ] + }, + { + "metadata": { + "id": "vJdYvJTZ4bdS", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Define and create our loss functions (content and style distances)" + ] + }, + { + "metadata": { + "id": "F2Hcepii7_qh", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Content Loss" + ] + }, + { + "metadata": { + "id": "1FvH-gwXi4nq", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "Our content loss definition is actually quite simple. We’ll pass the network both the desired content image and our base input image. This will return the intermediate layer outputs (from the layers defined above) from our model. Then we simply take the euclidean distance between the two intermediate representations of those images. \n", + "\n", + "More formally, content loss is a function that describes the distance of content from our output image $x$ and our content image, $p$. Let $C_{nn}$ be a pre-trained deep convolutional neural network. Again, in this case we use [VGG19](https://keras.io/applications/#vgg19). Let $X$ be any image, then $C_{nn}(X)$ is the network fed by X. Let $F^l_{ij}(x) \\in C_{nn}(x)$ and $P^l_{ij}(p) \\in C_{nn}(p)$ describe the respective intermediate feature representation of the network with inputs $x$ and $p$ at layer $l$. Then we describe the content distance (loss) formally as: $$L^l_{content}(p, x) = \\sum_{i, j} (F^l_{ij}(x) - P^l_{ij}(p))^2$$\n", + "\n", + "We perform backpropagation in the usual way such that we minimize this content loss. We thus change the initial image until it generates a similar response in a certain layer (defined in content_layer) as the original content image.\n", + "\n", + "This can be implemented quite simply. Again it will take as input the feature maps at a layer L in a network fed by x, our input image, and p, our content image, and return the content distance.\n", + "\n" + ] + }, + { + "metadata": { + "id": "6KsbqPA8J9DY", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Computing content loss\n", + "We will actually add our content losses at each desired layer. This way, each iteration when we feed our input image through the model (which in eager is simply `model(input_image)`!) all the content losses through the model will be properly compute and because we are executing eagerly, all the gradients will be computed. " + ] + }, + { + "metadata": { + "id": "d2mf7JwRMkCd", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def get_content_loss(base_content, target):\n", + " return tf.reduce_mean(tf.square(base_content - target))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "lGUfttK9F8d5", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Style Loss" + ] + }, + { + "metadata": { + "id": "I6XtkGK_YGD1", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "Computing style loss is a bit more involved, but follows the same principle, this time feeding our network the base input image and the style image. However, instead of comparing the raw intermediate outputs of the base input image and the style image, we instead compare the Gram matrices of the two outputs. \n", + "\n", + "Mathematically, we describe the style loss of the base input image, $x$, and the style image, $a$, as the distance between the style representation (the gram matrices) of these images. We describe the style representation of an image as the correlation between different filter responses given by the Gram matrix $G^l$, where $G^l_{ij}$ is the inner product between the vectorized feature map $i$ and $j$ in layer $l$. We can see that $G^l_{ij}$ generated over the feature map for a given image represents the correlation between feature maps $i$ and $j$. \n", + "\n", + "To generate a style for our base input image, we perform gradient descent from the content image to transform it into an image that matches the style representation of the original image. We do so by minimizing the mean squared distance between the feature correlation map of the style image and the input image. The contribution of each layer to the total style loss is described by\n", + "$$E_l = \\frac{1}{4N_l^2M_l^2} \\sum_{i,j}(G^l_{ij} - A^l_{ij})^2$$\n", + "\n", + "where $G^l_{ij}$ and $A^l_{ij}$ are the respective style representation in layer $l$ of $x$ and $a$. $N_l$ describes the number of feature maps, each of size $M_l = height * width$. Thus, the total style loss across each layer is \n", + "$$L_{style}(a, x) = \\sum_{l \\in L} w_l E_l$$\n", + "where we weight the contribution of each layer's loss by some factor $w_l$. In our case, we weight each layer equally ($w_l =\\frac{1}{|L|}$)" + ] + }, + { + "metadata": { + "id": "F21Hm61yLKk5", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Computing style loss\n", + "Again, we implement our loss as a distance metric . " + ] + }, + { + "metadata": { + "id": "N7MOqwKLLke8", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def gram_matrix(input_tensor):\n", + " # We make the image channels first \n", + " channels = int(input_tensor.shape[-1])\n", + " a = tf.reshape(input_tensor, [-1, channels])\n", + " n = tf.shape(a)[0]\n", + " gram = tf.matmul(a, a, transpose_a=True)\n", + " return gram / tf.cast(n, tf.float32)\n", + "\n", + "def get_style_loss(base_style, gram_target):\n", + " \"\"\"Expects two images of dimension h, w, c\"\"\"\n", + " # height, width, num filters of each layer\n", + " # We scale the loss at a given layer by the size of the feature map and the number of filters\n", + " height, width, channels = base_style.get_shape().as_list()\n", + " gram_style = gram_matrix(base_style)\n", + " \n", + " return tf.reduce_mean(tf.square(gram_style - gram_target))# / (4. * (channels ** 2) * (width * height) ** 2)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "pXIUX6czZABh", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Apply style transfer to our images\n" + ] + }, + { + "metadata": { + "id": "y9r8Lyjb_m0u", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Run Gradient Descent \n", + "If you aren't familiar with gradient descent/backpropagation or need a refresher, you should definitely check out this [awesome resource](https://developers.google.com/machine-learning/crash-course/reducing-loss/gradient-descent).\n", + "\n", + "In this case, we use the [Adam](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam)* optimizer in order to minimize our loss. We iteratively update our output image such that it minimizes our loss: we don't update the weights associated with our network, but instead we train our input image to minimize loss. In order to do this, we must know how we calculate our loss and gradients. \n", + "\n", + "\\* Note that L-BFGS, which if you are familiar with this algorithm is recommended, isn’t used in this tutorial because a primary motivation behind this tutorial was to illustrate best practices with eager execution, and, by using Adam, we can demonstrate the autograd/gradient tape functionality with custom training loops.\n" + ] + }, + { + "metadata": { + "id": "-kGzV6LTp4CU", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "We’ll define a little helper function that will load our content and style image, feed them forward through our network, which will then output the content and style feature representations from our model. " + ] + }, + { + "metadata": { + "id": "O-lj5LxgtmnI", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def get_feature_representations(model, content_path, style_path):\n", + " \"\"\"Helper function to compute our content and style feature representations.\n", + "\n", + " This function will simply load and preprocess both the content and style \n", + " images from their path. Then it will feed them through the network to obtain\n", + " the outputs of the intermediate layers. \n", + " \n", + " Arguments:\n", + " model: The model that we are using.\n", + " content_path: The path to the content image.\n", + " style_path: The path to the style image\n", + " \n", + " Returns:\n", + " returns the style features and the content features. \n", + " \"\"\"\n", + " # Load our images in \n", + " content_image = load_and_process_img(content_path)\n", + " style_image = load_and_process_img(style_path)\n", + " \n", + " # batch compute content and style features\n", + " style_outputs = model(style_image)\n", + " content_outputs = model(content_image)\n", + " \n", + " \n", + " # Get the style and content feature representations from our model \n", + " style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]\n", + " content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]\n", + " return style_features, content_features" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "3DopXw7-lFHa", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Computing the loss and gradients\n", + "Here we use [**tf.GradientTape**](https://www.tensorflow.org/programmers_guide/eager#computing_gradients) to compute the gradient. It allows us to take advantage of the automatic differentiation available by tracing operations for computing the gradient later. It records the operations during the forward pass and then is able to compute the gradient of our loss function with respect to our input image for the backwards pass." + ] + }, + { + "metadata": { + "id": "oVDhSo8iJunf", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def compute_loss(model, loss_weights, init_image, gram_style_features, content_features):\n", + " \"\"\"This function will compute the loss total loss.\n", + " \n", + " Arguments:\n", + " model: The model that will give us access to the intermediate layers\n", + " loss_weights: The weights of each contribution of each loss function. \n", + " (style weight, content weight, and total variation weight)\n", + " init_image: Our initial base image. This image is what we are updating with \n", + " our optimization process. We apply the gradients wrt the loss we are \n", + " calculating to this image.\n", + " gram_style_features: Precomputed gram matrices corresponding to the \n", + " defined style layers of interest.\n", + " content_features: Precomputed outputs from defined content layers of \n", + " interest.\n", + " \n", + " Returns:\n", + " returns the total loss, style loss, content loss, and total variational loss\n", + " \"\"\"\n", + " style_weight, content_weight = loss_weights\n", + " \n", + " # Feed our init image through our model. This will give us the content and \n", + " # style representations at our desired layers. Since we're using eager\n", + " # our model is callable just like any other function!\n", + " model_outputs = model(init_image)\n", + " \n", + " style_output_features = model_outputs[:num_style_layers]\n", + " content_output_features = model_outputs[num_style_layers:]\n", + " \n", + " style_score = 0\n", + " content_score = 0\n", + "\n", + " # Accumulate style losses from all layers\n", + " # Here, we equally weight each contribution of each loss layer\n", + " weight_per_style_layer = 1.0 / float(num_style_layers)\n", + " for target_style, comb_style in zip(gram_style_features, style_output_features):\n", + " style_score += weight_per_style_layer * get_style_loss(comb_style[0], target_style)\n", + " \n", + " # Accumulate content losses from all layers \n", + " weight_per_content_layer = 1.0 / float(num_content_layers)\n", + " for target_content, comb_content in zip(content_features, content_output_features):\n", + " content_score += weight_per_content_layer* get_content_loss(comb_content[0], target_content)\n", + " \n", + " style_score *= style_weight\n", + " content_score *= content_weight\n", + "\n", + " # Get total loss\n", + " loss = style_score + content_score \n", + " return loss, style_score, content_score" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "r5XTvbP6nJQa", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "Then computing the gradients is easy:" + ] + }, + { + "metadata": { + "id": "fwzYeOqOUH9_", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def compute_grads(cfg):\n", + " with tf.GradientTape() as tape: \n", + " all_loss = compute_loss(**cfg)\n", + " # Compute gradients wrt input image\n", + " total_loss = all_loss[0]\n", + " return tape.gradient(total_loss, cfg['init_image']), all_loss" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "T9yKu2PLlBIE", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Optimization loop" + ] + }, + { + "metadata": { + "id": "pj_enNo6tACQ", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "import IPython.display\n", + "\n", + "def run_style_transfer(content_path, \n", + " style_path,\n", + " num_iterations=1000,\n", + " content_weight=1e3, \n", + " style_weight=1e-2): \n", + " # We don't need to (or want to) train any layers of our model, so we set their\n", + " # trainable to false. \n", + " model = get_model() \n", + " for layer in model.layers:\n", + " layer.trainable = False\n", + " \n", + " # Get the style and content feature representations (from our specified intermediate layers) \n", + " style_features, content_features = get_feature_representations(model, content_path, style_path)\n", + " gram_style_features = [gram_matrix(style_feature) for style_feature in style_features]\n", + " \n", + " # Set initial image\n", + " init_image = load_and_process_img(content_path)\n", + " init_image = tf.Variable(init_image, dtype=tf.float32)\n", + " # Create our optimizer\n", + " opt = tf.train.AdamOptimizer(learning_rate=5, beta1=0.99, epsilon=1e-1)\n", + "\n", + " # For displaying intermediate images \n", + " iter_count = 1\n", + " \n", + " # Store our best result\n", + " best_loss, best_img = float('inf'), None\n", + " \n", + " # Create a nice config \n", + " loss_weights = (style_weight, content_weight)\n", + " cfg = {\n", + " 'model': model,\n", + " 'loss_weights': loss_weights,\n", + " 'init_image': init_image,\n", + " 'gram_style_features': gram_style_features,\n", + " 'content_features': content_features\n", + " }\n", + " \n", + " # For displaying\n", + " num_rows = 2\n", + " num_cols = 5\n", + " display_interval = num_iterations/(num_rows*num_cols)\n", + " start_time = time.time()\n", + " global_start = time.time()\n", + " \n", + " norm_means = np.array([103.939, 116.779, 123.68])\n", + " min_vals = -norm_means\n", + " max_vals = 255 - norm_means \n", + " \n", + " imgs = []\n", + " for i in range(num_iterations):\n", + " grads, all_loss = compute_grads(cfg)\n", + " loss, style_score, content_score = all_loss\n", + " opt.apply_gradients([(grads, init_image)])\n", + " clipped = tf.clip_by_value(init_image, min_vals, max_vals)\n", + " init_image.assign(clipped)\n", + " end_time = time.time() \n", + " \n", + " if loss < best_loss:\n", + " # Update best loss and best image from total loss. \n", + " best_loss = loss\n", + " best_img = deprocess_img(init_image.numpy())\n", + "\n", + " if i % display_interval== 0:\n", + " start_time = time.time()\n", + " \n", + " # Use the .numpy() method to get the concrete numpy array\n", + " plot_img = init_image.numpy()\n", + " plot_img = deprocess_img(plot_img)\n", + " imgs.append(plot_img)\n", + " IPython.display.clear_output(wait=True)\n", + " IPython.display.display_png(Image.fromarray(plot_img))\n", + " print('Iteration: {}'.format(i)) \n", + " print('Total loss: {:.4e}, ' \n", + " 'style loss: {:.4e}, '\n", + " 'content loss: {:.4e}, '\n", + " 'time: {:.4f}s'.format(loss, style_score, content_score, time.time() - start_time))\n", + " print('Total time: {:.4f}s'.format(time.time() - global_start))\n", + " IPython.display.clear_output(wait=True)\n", + " plt.figure(figsize=(14,4))\n", + " for i,img in enumerate(imgs):\n", + " plt.subplot(num_rows,num_cols,i+1)\n", + " plt.imshow(img)\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " \n", + " return best_img, best_loss " + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "vSVMx4burydi", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "best, best_loss = run_style_transfer(content_path, \n", + " style_path, num_iterations=1000)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "dzJTObpsO3TZ", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "Image.fromarray(best)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "dCXQ9vSnQbDy", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "To download the image from Colab uncomment the following code:" + ] + }, + { + "metadata": { + "id": "SSH6OpyyQn7w", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "#from google.colab import files\n", + "#files.download('wave_turtle.png')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "LwiZfCW0AZwt", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Visualize outputs\n", + "We \"deprocess\" the output image in order to remove the processing that was applied to it. " + ] + }, + { + "metadata": { + "id": "lqTQN1PjulV9", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "def show_results(best_img, content_path, style_path, show_large_final=True):\n", + " plt.figure(figsize=(10, 5))\n", + " content = load_img(content_path) \n", + " style = load_img(style_path)\n", + "\n", + " plt.subplot(1, 2, 1)\n", + " imshow(content, 'Content Image')\n", + "\n", + " plt.subplot(1, 2, 2)\n", + " imshow(style, 'Style Image')\n", + "\n", + " if show_large_final: \n", + " plt.figure(figsize=(10, 10))\n", + "\n", + " plt.imshow(best_img)\n", + " plt.title('Output Image')\n", + " plt.show()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "i6d6O50Yvs6a", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "show_results(best, content_path, style_path)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "tyGMmWh2Pss8", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Try it on other images\n", + "Image of Tuebingen \n", + "\n", + "Photo By: Andreas Praefcke [GFDL (http://www.gnu.org/copyleft/fdl.html) or CC BY 3.0 (https://creativecommons.org/licenses/by/3.0)], from Wikimedia Commons" + ] + }, + { + "metadata": { + "id": "x2TePU39k9lb", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Starry night + Tuebingen" + ] + }, + { + "metadata": { + "id": "ES9dC6ZyJBD2", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "best_starry_night, best_loss = run_style_transfer('/tmp/nst/Tuebingen_Neckarfront.jpg',\n", + " '/tmp/nst/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "X8w8WLkKvzXu", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "show_results(best_starry_night, '/tmp/nst/Tuebingen_Neckarfront.jpg',\n", + " '/tmp/nst/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "QcXwvViek4Br", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Pillars of Creation + Tuebingen" + ] + }, + { + "metadata": { + "id": "vJ3u2U-gGmgP", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "best_poc_tubingen, best_loss = run_style_transfer('/tmp/nst/Tuebingen_Neckarfront.jpg', \n", + " '/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "pQUq3KxpGv2O", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "show_results(best_poc_tubingen, \n", + " '/tmp/nst/Tuebingen_Neckarfront.jpg',\n", + " '/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "bTZdTOdW3s8H", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Kandinsky Composition 7 + Tuebingen" + ] + }, + { + "metadata": { + "id": "bt9mbQfl7exl", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "best_kandinsky_tubingen, best_loss = run_style_transfer('/tmp/nst/Tuebingen_Neckarfront.jpg', \n", + " '/tmp/nst/Vassily_Kandinsky,_1913_-_Composition_7.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "Qnz8HeXSXg6P", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "show_results(best_kandinsky_tubingen, \n", + " '/tmp/nst/Tuebingen_Neckarfront.jpg',\n", + " '/tmp/nst/Vassily_Kandinsky,_1913_-_Composition_7.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "cg68lW2A3s8N", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "### Pillars of Creation + Sea Turtle" + ] + }, + { + "metadata": { + "id": "dl0DUot_bFST", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "best_poc_turtle, best_loss = run_style_transfer('/tmp/nst/Green_Sea_Turtle_grazing_seagrass.jpg', \n", + " '/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "UzJfE0I1bQn8", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "show_results(best_poc_turtle, \n", + " '/tmp/nst/Green_Sea_Turtle_grazing_seagrass.jpg',\n", + " '/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "sElaeNX-4Vnc", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "## Key Takeaways\n", + "\n", + "### What we covered:\n", + "\n", + "* We built several different loss functions and used backpropagation to transform our input image in order to minimize these losses\n", + " * In order to do this we had to load in a **pretrained model** and use its learned feature maps to describe the content and style representation of our images.\n", + " * Our main loss functions were primarily computing the distance in terms of these different representations\n", + "* We implemented this with a custom model and **eager execution**\n", + " * We built our custom model with the Functional API \n", + " * Eager execution allows us to dynamically work with tensors, using a natural python control flow\n", + " * We manipulated tensors directly, which makes debugging and working with tensors easier. \n", + "* We iteratively updated our image by applying our optimizers update rules using **tf.gradient**. The optimizer minimized a given loss with respect to our input image. " + ] + }, + { + "metadata": { + "id": "U-y02GWonqnD", + "colab_type": "text" + }, + "cell_type": "markdown", + "source": [ + "\n", + "**[Image of Tuebingen](https://commons.wikimedia.org/wiki/File:Tuebingen_Neckarfront.jpg)** \n", + "Photo By: Andreas Praefcke [GFDL (http://www.gnu.org/copyleft/fdl.html) or CC BY 3.0 (https://creativecommons.org/licenses/by/3.0)], from Wikimedia Commons\n", + "\n", + "**[Image of Green Sea Turtle](https://commons.wikimedia.org/wiki/File:Green_Sea_Turtle_grazing_seagrass.jpg)**\n", + "By P.Lindgren [CC BY-SA 3.0 (https://creativecommons.org/licenses/by-sa/3.0)], from Wikimedia Commons\n", + "\n" + ] + }, + { + "metadata": { + "id": "IpUD9W6ZkeyM", + "colab_type": "code", + "colab": {} + }, + "cell_type": "code", + "source": [ + "" + ], + "execution_count": 0, + "outputs": [] + } + ] +} diff --git a/models/research/nst_blogpost/Green_Sea_Turtle_grazing_seagrass.jpg b/models/research/nst_blogpost/Green_Sea_Turtle_grazing_seagrass.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c98791b8ace15788b09fbc6cd03d9e946e29cce7 Binary files /dev/null and b/models/research/nst_blogpost/Green_Sea_Turtle_grazing_seagrass.jpg differ diff --git a/models/research/nst_blogpost/The_Great_Wave_off_Kanagawa.jpg b/models/research/nst_blogpost/The_Great_Wave_off_Kanagawa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d3475c9e5af1a6b403779f5f671a8f36e3f0a9f Binary files /dev/null and b/models/research/nst_blogpost/The_Great_Wave_off_Kanagawa.jpg differ diff --git a/models/research/nst_blogpost/wave_turtle.png b/models/research/nst_blogpost/wave_turtle.png new file mode 100644 index 0000000000000000000000000000000000000000..d15ee0fc9f01f1c72d442444bf7facb06a53ef2f Binary files /dev/null and b/models/research/nst_blogpost/wave_turtle.png differ diff --git a/models/research/object_detection/CONTRIBUTING.md b/models/research/object_detection/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..e3d87e3ce90fb4dd22b00a2c5368bf17c3610661 --- /dev/null +++ b/models/research/object_detection/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to the Tensorflow Object Detection API + +Patches to Tensorflow Object Detection API are welcome! + +We require contributors to fill out either the individual or corporate +Contributor License Agreement (CLA). + + * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). + * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). + +Please follow the +[Tensorflow contributing guidelines](https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md) +when submitting pull requests. diff --git a/models/research/object_detection/README.md b/models/research/object_detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4821f3e66b758b6d2b74efab724a3034b7281f26 --- /dev/null +++ b/models/research/object_detection/README.md @@ -0,0 +1,397 @@ +![TensorFlow Requirement: 1.15](https://img.shields.io/badge/TensorFlow%20Requirement-1.15-brightgreen) +![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) + +# Tensorflow Object Detection API + +Creating accurate machine learning models capable of localizing and identifying +multiple objects in a single image remains a core challenge in computer vision. +The TensorFlow Object Detection API is an open source framework built on top of +TensorFlow that makes it easy to construct, train and deploy object detection +models. At Google we’ve certainly found this codebase to be useful for our +computer vision needs, and we hope that you will as well.

+

+Contributions to the codebase are welcome and we would love to hear back from +you if you find this API useful. Finally if you use the Tensorflow Object +Detection API for a research publication, please consider citing: + +``` +"Speed/accuracy trade-offs for modern convolutional object detectors." +Huang J, Rathod V, Sun C, Zhu M, Korattikara A, Fathi A, Fischer I, Wojna Z, +Song Y, Guadarrama S, Murphy K, CVPR 2017 +``` + +\[[link](https://arxiv.org/abs/1611.10012)\]\[[bibtex](https://scholar.googleusercontent.com/scholar.bib?q=info:l291WsrB-hQJ:scholar.google.com/&output=citation&scisig=AAGBfm0AAAAAWUIIlnPZ_L9jxvPwcC49kDlELtaeIyU-&scisf=4&ct=citation&cd=-1&hl=en&scfhb=1)\] + +

+ +

+ +## Maintainers + +Name | GitHub +-------------- | --------------------------------------------- +Jonathan Huang | [jch1](https://github.com/jch1) +Vivek Rathod | [tombstone](https://github.com/tombstone) +Ronny Votel | [ronnyvotel](https://github.com/ronnyvotel) +Derek Chow | [derekjchow](https://github.com/derekjchow) +Chen Sun | [jesu9](https://github.com/jesu9) +Menglong Zhu | [dreamdragon](https://github.com/dreamdragon) +Alireza Fathi | [afathi3](https://github.com/afathi3) +Zhichao Lu | [pkulzc](https://github.com/pkulzc) + +## Table of contents + +Setup: + +* Installation
+ +Quick Start: + +* + Quick Start: Jupyter notebook for off-the-shelf inference
+* Quick Start: Training a pet detector
+ +Customizing a Pipeline: + +* + Configuring an object detection pipeline
+* Preparing inputs
+ +Running: + +* Running locally
+* Running on the cloud
+ +Extras: + +* Tensorflow detection model zoo
+* + Exporting a trained model for inference
+* + Exporting a trained model for TPU inference
+* + Defining your own model architecture
+* + Bringing in your own dataset
+* + Supported object detection evaluation protocols
+* + Inference and evaluation on the Open Images dataset
+* + Run an instance segmentation model
+* + Run the evaluation for the Open Images Challenge 2018/2019
+* + TPU compatible detection pipelines
+* + Running object detection on mobile devices with TensorFlow Lite
+* + Context R-CNN documentation for data preparation, training, and export
+ +## Getting Help + +To get help with issues you may encounter using the Tensorflow Object Detection +API, create a new question on [StackOverflow](https://stackoverflow.com/) with +the tags "tensorflow" and "object-detection". + +Please report bugs (actually broken code, not usage questions) to the +tensorflow/models GitHub +[issue tracker](https://github.com/tensorflow/models/issues), prefixing the +issue name with "object_detection". + +Please check [FAQ](g3doc/faq.md) for frequently asked questions before reporting +an issue. + +## Release information +### June 17th, 2020 + +We have released [Context R-CNN](https://arxiv.org/abs/1912.03538), a model that +uses attention to incorporate contextual information images (e.g. from +temporally nearby frames taken by a static camera) in order to improve accuracy. +Importantly, these contextual images need not be labeled. + +* When applied to a challenging wildlife detection dataset ([Snapshot Serengeti](http://lila.science/datasets/snapshot-serengeti)), + Context R-CNN with context from up to a month of images outperforms a + single-frame baseline by 17.9% mAP, and outperforms S3D (a 3d convolution + based baseline) by 11.2% mAP. +* Context R-CNN leverages temporal context from the unlabeled frames of a + novel camera deployment to improve performance at that camera, boosting + model generalizeability. + +We have provided code for generating data with associated context +[here](g3doc/context_rcnn.md), and a sample config for a Context R-CNN +model [here](samples/configs/context_rcnn_resnet101_snapshot_serengeti_sync.config). + +Snapshot Serengeti-trained Faster R-CNN and Context R-CNN models can be found in +the [model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md#snapshot-serengeti-camera-trap-trained-models). + +A colab demonstrating Context R-CNN is provided +[here](colab_tutorials/context_rcnn_tutorial.ipynb). + +Thanks to contributors: Sara Beery, Jonathan Huang, Guanhang Wu, Vivek +Rathod, Ronny Votel, Zhichao Lu, David Ross, Pietro Perona, Tanya Birch, and +the Wildlife Insights AI Team. + +### May 19th, 2020 + +We have released [MobileDets](https://arxiv.org/abs/2004.14525), a set of +high-performance models for mobile CPUs, DSPs and EdgeTPUs. + +* MobileDets outperform MobileNetV3+SSDLite by 1.7 mAP at comparable mobile + CPU inference latencies. MobileDets also outperform MobileNetV2+SSDLite by + 1.9 mAP on mobile CPUs, 3.7 mAP on EdgeTPUs and 3.4 mAP on DSPs while + running equally fast. MobileDets also offer up to 2x speedup over MnasFPN on + EdgeTPUs and DSPs. + +For each of the three hardware platforms we have released model definition, +model checkpoints trained on the COCO14 dataset and converted TFLite models in +fp32 and/or uint8. + +Thanks to contributors: Yunyang Xiong, Hanxiao Liu, Suyog Gupta, Berkin +Akin, Gabriel Bender, Pieter-Jan Kindermans, Mingxing Tan, Vikas Singh, Bo Chen, +Quoc Le, Zhichao Lu. + +### May 7th, 2020 + +We have released a mobile model with the +[MnasFPN head](https://arxiv.org/abs/1912.01106). + +* MnasFPN with MobileNet-V2 backbone is the most accurate (26.6 mAP at 183ms + on Pixel 1) mobile detection model we have released to date. With + depth-multiplier, MnasFPN with MobileNet-V2 backbone is 1.8 mAP higher than + MobileNet-V3-Large with SSDLite (23.8 mAP vs 22.0 mAP) at similar latency + (120ms) on Pixel 1. + +We have released model definition, model checkpoints trained on the COCO14 +dataset and a converted TFLite model. + +Thanks to contributors: Bo Chen, Golnaz Ghiasi, Hanxiao Liu, Tsung-Yi +Lin, Dmitry Kalenichenko, Hartwig Adam, Quoc Le, Zhichao Lu, Jonathan Huang, Hao +Xu. + +### Nov 13th, 2019 + +We have released MobileNetEdgeTPU SSDLite model. + +* SSDLite with MobileNetEdgeTPU backbone, which achieves 10% mAP higher than + MobileNetV2 SSDLite (24.3 mAP vs 22 mAP) on a Google Pixel4 at comparable + latency (6.6ms vs 6.8ms). + +Along with the model definition, we are also releasing model checkpoints trained +on the COCO dataset. + +Thanks to contributors: Yunyang Xiong, Bo Chen, Suyog Gupta, Hanxiao Liu, +Gabriel Bender, Mingxing Tan, Berkin Akin, Zhichao Lu, Quoc Le + +### Oct 15th, 2019 + +We have released two MobileNet V3 SSDLite models (presented in +[Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)). + +* SSDLite with MobileNet-V3-Large backbone, which is 27% faster than Mobilenet + V2 SSDLite (119ms vs 162ms) on a Google Pixel phone CPU at the same mAP. +* SSDLite with MobileNet-V3-Small backbone, which is 37% faster than MnasNet + SSDLite reduced with depth-multiplier (43ms vs 68ms) at the same mAP. + +Along with the model definition, we are also releasing model checkpoints trained +on the COCO dataset. + +Thanks to contributors: Bo Chen, Zhichao Lu, Vivek Rathod, Jonathan Huang + +### July 1st, 2019 + +We have released an updated set of utils and an updated +[tutorial](g3doc/challenge_evaluation.md) for all three tracks of the +[Open Images Challenge 2019](https://storage.googleapis.com/openimages/web/challenge2019.html)! + +The Instance Segmentation metric for +[Open Images V5](https://storage.googleapis.com/openimages/web/index.html) and +[Challenge 2019](https://storage.googleapis.com/openimages/web/challenge2019.html) +is part of this release. Check out +[the metric description](https://storage.googleapis.com/openimages/web/evaluation.html#instance_segmentation_eval) +on the Open Images website. + +Thanks to contributors: Alina Kuznetsova, Rodrigo Benenson + +### Feb 11, 2019 + +We have released detection models trained on the Open Images Dataset V4 in our +detection model zoo, including + +* Faster R-CNN detector with Inception Resnet V2 feature extractor +* SSD detector with MobileNet V2 feature extractor +* SSD detector with ResNet 101 FPN feature extractor (aka RetinaNet-101) + +Thanks to contributors: Alina Kuznetsova, Yinxiao Li + +### Sep 17, 2018 + +We have released Faster R-CNN detectors with ResNet-50 / ResNet-101 feature +extractors trained on the +[iNaturalist Species Detection Dataset](https://github.com/visipedia/inat_comp/blob/master/2017/README.md#bounding-boxes). +The models are trained on the training split of the iNaturalist data for 4M +iterations, they achieve 55% and 58% mean AP@.5 over 2854 classes respectively. +For more details please refer to this [paper](https://arxiv.org/abs/1707.06642). + +Thanks to contributors: Chen Sun + +### July 13, 2018 + +There are many new updates in this release, extending the functionality and +capability of the API: + +* Moving from slim-based training to + [Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)-based + training. +* Support for [RetinaNet](https://arxiv.org/abs/1708.02002), and a + [MobileNet](https://ai.googleblog.com/2017/06/mobilenets-open-source-models-for.html) + adaptation of RetinaNet. +* A novel SSD-based architecture called the + [Pooling Pyramid Network](https://arxiv.org/abs/1807.03284) (PPN). +* Releasing several [TPU](https://cloud.google.com/tpu/)-compatible models. + These can be found in the `samples/configs/` directory with a comment in the + pipeline configuration files indicating TPU compatibility. +* Support for quantized training. +* Updated documentation for new binaries, Cloud training, and + [Tensorflow Lite](https://www.tensorflow.org/mobile/tflite/). + +See also our +[expanded announcement blogpost](https://ai.googleblog.com/2018/07/accelerated-training-and-inference-with.html) +and accompanying tutorial at the +[TensorFlow blog](https://medium.com/tensorflow/training-and-serving-a-realtime-mobile-object-detector-in-30-minutes-with-cloud-tpus-b78971cf1193). + +Thanks to contributors: Sara Robinson, Aakanksha Chowdhery, Derek Chow, +Pengchong Jin, Jonathan Huang, Vivek Rathod, Zhichao Lu, Ronny Votel + +### June 25, 2018 + +Additional evaluation tools for the +[Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) +are out. Check out our short tutorial on data preparation and running evaluation +[here](g3doc/challenge_evaluation.md)! + +Thanks to contributors: Alina Kuznetsova + +### June 5, 2018 + +We have released the implementation of evaluation metrics for both tracks of the +[Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) +as a part of the Object Detection API - see the +[evaluation protocols](g3doc/evaluation_protocols.md) for more details. +Additionally, we have released a tool for hierarchical labels expansion for the +Open Images Challenge: check out +[oid_hierarchical_labels_expansion.py](dataset_tools/oid_hierarchical_labels_expansion.py). + +Thanks to contributors: Alina Kuznetsova, Vittorio Ferrari, Jasper +Uijlings + +### April 30, 2018 + +We have released a Faster R-CNN detector with ResNet-101 feature extractor +trained on [AVA](https://research.google.com/ava/) v2.1. Compared with other +commonly used object detectors, it changes the action classification loss +function to per-class Sigmoid loss to handle boxes with multiple labels. The +model is trained on the training split of AVA v2.1 for 1.5M iterations, it +achieves mean AP of 11.25% over 60 classes on the validation split of AVA v2.1. +For more details please refer to this [paper](https://arxiv.org/abs/1705.08421). + +Thanks to contributors: Chen Sun, David Ross + +### April 2, 2018 + +Supercharge your mobile phones with the next generation mobile object detector! +We are adding support for MobileNet V2 with SSDLite presented in +[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381). +This model is 35% faster than Mobilenet V1 SSD on a Google Pixel phone CPU +(200ms vs. 270ms) at the same accuracy. Along with the model definition, we are +also releasing a model checkpoint trained on the COCO dataset. + +Thanks to contributors: Menglong Zhu, Mark Sandler, Zhichao Lu, Vivek +Rathod, Jonathan Huang + +### February 9, 2018 + +We now support instance segmentation!! In this API update we support a number of +instance segmentation models similar to those discussed in the +[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). For further details refer +to [our slides](http://presentations.cocodataset.org/Places17-GMRI.pdf) from the +2017 Coco + Places Workshop. Refer to the section on +[Running an Instance Segmentation Model](g3doc/instance_segmentation.md) for +instructions on how to configure a model that predicts masks in addition to +object bounding boxes. + +Thanks to contributors: Alireza Fathi, Zhichao Lu, Vivek Rathod, Ronny +Votel, Jonathan Huang + +### November 17, 2017 + +As a part of the Open Images V3 release we have released: + +* An implementation of the Open Images evaluation metric and the + [protocol](g3doc/evaluation_protocols.md#open-images). +* Additional tools to separate inference of detection and evaluation (see + [this tutorial](g3doc/oid_inference_and_evaluation.md)). +* A new detection model trained on the Open Images V2 data release (see + [Open Images model](g3doc/detection_model_zoo.md#open-images-models)). + +See more information on the +[Open Images website](https://github.com/openimages/dataset)! + +Thanks to contributors: Stefan Popov, Alina Kuznetsova + +### November 6, 2017 + +We have re-released faster versions of our (pre-trained) models in the +model zoo. In addition to what was +available before, we are also adding Faster R-CNN models trained on COCO with +Inception V2 and Resnet-50 feature extractors, as well as a Faster R-CNN with +Resnet-101 model trained on the KITTI dataset. + +Thanks to contributors: Jonathan Huang, Vivek Rathod, Derek Chow, Tal +Remez, Chen Sun. + +### October 31, 2017 + +We have released a new state-of-the-art model for object detection using the +Faster-RCNN with the +[NASNet-A image featurization](https://arxiv.org/abs/1707.07012). This model +achieves mAP of 43.1% on the test-dev validation dataset for COCO, improving on +the best available model in the zoo by 6% in terms of absolute mAP. + +Thanks to contributors: Barret Zoph, Vijay Vasudevan, Jonathon Shlens, +Quoc Le + +### August 11, 2017 + +We have released an update to the +[Android Detect demo](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android) +which will now run models trained using the Tensorflow Object Detection API on +an Android device. By default, it currently runs a frozen SSD w/Mobilenet +detector trained on COCO, but we encourage you to try out other detection +models! + +Thanks to contributors: Jonathan Huang, Andrew Harp + +### June 15, 2017 + +In addition to our base Tensorflow detection model definitions, this release +includes: + +* A selection of trainable detection models, including: + * Single Shot Multibox Detector (SSD) with MobileNet, + * SSD with Inception V2, + * Region-Based Fully Convolutional Networks (R-FCN) with Resnet 101, + * Faster RCNN with Resnet 101, + * Faster RCNN with Inception Resnet v2 +* Frozen weights (trained on the COCO dataset) for each of the above models to + be used for out-of-the-box inference purposes. +* A [Jupyter notebook](colab_tutorials/object_detection_tutorial.ipynb) for + performing out-of-the-box inference with one of our released models +* Convenient [local training](g3doc/running_locally.md) scripts as well as + distributed training and evaluation pipelines via + [Google Cloud](g3doc/running_on_cloud.md). + +Thanks to contributors: Jonathan Huang, Vivek Rathod, Derek Chow, Chen +Sun, Menglong Zhu, Matthew Tang, Anoop Korattikara, Alireza Fathi, Ian Fischer, +Zbigniew Wojna, Yang Song, Sergio Guadarrama, Jasper Uijlings, Viacheslav +Kovalevskyi, Kevin Murphy diff --git a/models/research/object_detection/__init__.py b/models/research/object_detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/anchor_generators/__init__.py b/models/research/object_detection/anchor_generators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/anchor_generators/flexible_grid_anchor_generator.py b/models/research/object_detection/anchor_generators/flexible_grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0f340cc945e684e63d5e4d36de113c38b92558a5 --- /dev/null +++ b/models/research/object_detection/anchor_generators/flexible_grid_anchor_generator.py @@ -0,0 +1,134 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generates grid anchors on the fly corresponding to multiple CNN layers.""" + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.core import anchor_generator +from object_detection.core import box_list_ops + + +class FlexibleGridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generate a grid of anchors for multiple CNN layers of different scale.""" + + def __init__(self, base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=True): + """Constructs a FlexibleGridAnchorGenerator. + + This generator is more flexible than the multiple_grid_anchor_generator + and multiscale_grid_anchor_generator, and can generate any of the anchors + that they can generate, plus additional anchor configurations. In + particular, it allows the explicit specification of scale and aspect ratios + at each layer without making any assumptions between the relationship + between scales and aspect ratios between layers. + + Args: + base_sizes: list of tuples of anchor base sizes. For example, setting + base_sizes=[(1, 2, 3), (4, 5)] means that we want 3 anchors at each + grid point on the first layer with the base sizes of 1, 2, and 3, and 2 + anchors at each grid point on the second layer with the base sizes of + 4 and 5. + aspect_ratios: list or tuple of aspect ratios. For example, setting + aspect_ratios=[(1.0, 2.0, 0.5), (1.0, 2.0)] means that we want 3 anchors + at each grid point on the first layer with aspect ratios of 1.0, 2.0, + and 0.5, and 2 anchors at each grid point on the sercond layer with the + base sizes of 1.0 and 2.0. + anchor_strides: list of pairs of strides in pixels (in y and x directions + respectively). For example, setting anchor_strides=[(25, 25), (50, 50)] + means that we want the anchors corresponding to the first layer to be + strided by 25 pixels and those in the second layer to be strided by 50 + pixels in both y and x directions. + anchor_offsets: list of pairs of offsets in pixels (in y and x directions + respectively). The offset specifies where we want the center of the + (0, 0)-th anchor to lie for each layer. For example, setting + anchor_offsets=[(10, 10), (20, 20)]) means that we want the + (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space + and likewise that we want the (0, 0)-th anchor of the second layer to + lie at (25, 25) in pixel space. + normalize_coordinates: whether to produce anchors in normalized + coordinates. (defaults to True). + """ + self._base_sizes = base_sizes + self._aspect_ratios = aspect_ratios + self._anchor_strides = anchor_strides + self._anchor_offsets = anchor_offsets + self._normalize_coordinates = normalize_coordinates + + def name_scope(self): + return 'FlexibleGridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the Generate function. + """ + return [len(size) for size in self._base_sizes] + + def _generate(self, feature_map_shape_list, im_height=1, im_width=1): + """Generates a collection of bounding boxes to be used as anchors. + + Currently we require the input image shape to be statically defined. That + is, im_height and im_width should be integers rather than tensors. + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0), (height_1, width_1), ...]. For example, + setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that + correspond to an 8x8 layer followed by a 7x7 layer. + im_height: the height of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + im_width: the width of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + Raises: + ValueError: if im_height and im_width are 1, but normalized coordinates + were requested. + """ + anchor_grid_list = [] + for (feat_shape, base_sizes, aspect_ratios, anchor_stride, anchor_offset + ) in zip(feature_map_shape_list, self._base_sizes, self._aspect_ratios, + self._anchor_strides, self._anchor_offsets): + anchor_grid = grid_anchor_generator.tile_anchors( + feat_shape[0], + feat_shape[1], + tf.cast(tf.convert_to_tensor(base_sizes), dtype=tf.float32), + tf.cast(tf.convert_to_tensor(aspect_ratios), dtype=tf.float32), + tf.constant([1.0, 1.0]), + tf.cast(tf.convert_to_tensor(anchor_stride), dtype=tf.float32), + tf.cast(tf.convert_to_tensor(anchor_offset), dtype=tf.float32)) + num_anchors = anchor_grid.num_boxes_static() + if num_anchors is None: + num_anchors = anchor_grid.num_boxes() + anchor_indices = tf.zeros([num_anchors]) + anchor_grid.add_field('feature_map_index', anchor_indices) + if self._normalize_coordinates: + if im_height == 1 or im_width == 1: + raise ValueError( + 'Normalized coordinates were requested upon construction of the ' + 'FlexibleGridAnchorGenerator, but a subsequent call to ' + 'generate did not supply dimension information.') + anchor_grid = box_list_ops.to_normalized_coordinates( + anchor_grid, im_height, im_width, check_range=False) + anchor_grid_list.append(anchor_grid) + + return anchor_grid_list diff --git a/models/research/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py b/models/research/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bab34b750180081ee03d5d6681f2e449c283dfd4 --- /dev/null +++ b/models/research/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py @@ -0,0 +1,292 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generators.flexible_grid_anchor_generator_test.py.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import flexible_grid_anchor_generator as fg +from object_detection.utils import test_case + + +class FlexibleGridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(16, 16),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + anchor_corners_out = self.execute(graph_fn, []) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_unit_dimensions(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(16, 16),] + base_sizes = [(32.0,)] + aspect_ratios = [(1.0,)] + im_height = 1 + im_width = 1 + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + # Positive offsets are produced. + exp_anchor_corners = [[0, 0, 32, 32], + [0, 32, 32, 64], + [32, 0, 64, 32], + [32, 32, 64, 64]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_normalized_anchors_fails_with_unit_dimensions(self): + anchor_generator = fg.FlexibleGridAnchorGenerator( + [(32.0,)], [(1.0,)], [(32, 32),], [(16, 16),], + normalize_coordinates=True) + with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'): + anchor_generator.generate( + feature_map_shape_list=[(2, 2)], im_height=1, im_width=1) + + def test_construct_single_anchor_in_normalized_coordinates(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(16, 16),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = 64 + im_width = 128 + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=True) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128], + [-48./64, -16./128, 80./64, 112./128], + [-16./64, -48./128, 112./64, 80./128], + [-16./64, -16./128, 112./64, 112./128]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_num_anchors_per_location(self): + anchor_strides = [(32, 32), (64, 64)] + anchor_offsets = [(16, 16), (32, 32)] + base_sizes = [(32.0, 64.0, 96.0, 32.0, 64.0, 96.0), + (64.0, 128.0, 172.0, 64.0, 128.0, 172.0)] + aspect_ratios = [(1.0, 1.0, 1.0, 2.0, 2.0, 2.0), + (1.0, 1.0, 1.0, 2.0, 2.0, 2.0)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6]) + + def test_construct_single_anchor_dynamic_size(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(0, 0),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = tf.constant(64) + im_width = tf.constant(64) + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + # Zero offsets are used. + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-32, -64, 96, 64], + [-32, -32, 96, 96]] + anchor_corners_out = self.execute_cpu(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_odd_input_dimension(self): + + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(0, 0),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = 65 + im_width = 65 + feature_map_shape_list = [(3, 3)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + anchor_corners_out = self.execute(graph_fn, []) + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-64, 0, 64, 128], + [-32, -64, 96, 64], + [-32, -32, 96, 96], + [-32, 0, 96, 128], + [0, -64, 128, 64], + [0, -32, 128, 96], + [0, 0, 128, 128]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_on_two_feature_maps(self): + + def graph_fn(): + anchor_strides = [(32, 32), (64, 64)] + anchor_offsets = [(16, 16), (32, 32)] + base_sizes = [(128.0,), (256.0,)] + aspect_ratios = [(1.0,), (1.0,)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2), (1, 1)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave(self): + + def graph_fn(): + anchor_strides = [(64, 64),] + anchor_offsets = [(32, 32),] + base_sizes = [(256.0, 362.03867)] + aspect_ratios = [(1.0, 1.0)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect]] + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self): + def graph_fn(): + anchor_strides = [(64, 64),] + anchor_offsets = [(32, 32),] + base_sizes = [(256.0, 362.03867, 256.0, 362.03867)] + aspect_ratios = [(1.0, 1.0, 2.0, 2.0)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect], + # [2**0.0 intermediate scale + 2.0 aspect], + # [2**0.5 intermediate scale + 2.0 aspect]] + + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193], + [-58.50967, -149.0193, 122.50967, 213.0193], + [-96., -224., 160., 288.]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self): + + def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height, + feature_map2_width): + anchor_strides = [(32, 32), (64, 64)] + anchor_offsets = [(16, 16), (32, 32)] + base_sizes = [(128.0,), (256.0,)] + aspect_ratios = [(1.0,), (1.0,)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(feature_map1_height, feature_map1_width), + (feature_map2_height, feature_map2_width)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate( + self.execute_cpu(graph_fn, [ + np.array(2, dtype=np.int32), + np.array(2, dtype=np.int32), + np.array(1, dtype=np.int32), + np.array(1, dtype=np.int32) + ]), + axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/anchor_generators/grid_anchor_generator.py b/models/research/object_detection/anchor_generators/grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..a31bc87996d848201ac57e5e7429ee42ab274299 --- /dev/null +++ b/models/research/object_detection/anchor_generators/grid_anchor_generator.py @@ -0,0 +1,213 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generates grid anchors on the fly as used in Faster RCNN. + +Generates grid anchors on the fly as described in: +"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" +Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import anchor_generator +from object_detection.core import box_list +from object_detection.utils import ops + + +class GridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generates a grid of anchors at given scales and aspect ratios.""" + + def __init__(self, + scales=(0.5, 1.0, 2.0), + aspect_ratios=(0.5, 1.0, 2.0), + base_anchor_size=None, + anchor_stride=None, + anchor_offset=None): + """Constructs a GridAnchorGenerator. + + Args: + scales: a list of (float) scales, default=(0.5, 1.0, 2.0) + aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0) + base_anchor_size: base anchor size as height, width ( + (length-2 float32 list or tensor, default=[256, 256]) + anchor_stride: difference in centers between base anchors for adjacent + grid positions (length-2 float32 list or tensor, + default=[16, 16]) + anchor_offset: center of the anchor with scale and aspect ratio 1 for the + upper left element of the grid, this should be zero for + feature networks with only VALID padding and even receptive + field size, but may need additional calculation if other + padding is used (length-2 float32 list or tensor, + default=[0, 0]) + """ + # Handle argument defaults + if base_anchor_size is None: + base_anchor_size = [256, 256] + if anchor_stride is None: + anchor_stride = [16, 16] + if anchor_offset is None: + anchor_offset = [0, 0] + + self._scales = scales + self._aspect_ratios = aspect_ratios + self._base_anchor_size = base_anchor_size + self._anchor_stride = anchor_stride + self._anchor_offset = anchor_offset + + def name_scope(self): + return 'GridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the `generate` function. + """ + return [len(self._scales) * len(self._aspect_ratios)] + + def _generate(self, feature_map_shape_list): + """Generates a collection of bounding boxes to be used as anchors. + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0)]. For example, setting + feature_map_shape_list=[(8, 8)] asks for anchors that correspond + to an 8x8 layer. For this anchor generator, only lists of length 1 are + allowed. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + + Raises: + ValueError: if feature_map_shape_list, box_specs_list do not have the same + length. + ValueError: if feature_map_shape_list does not consist of pairs of + integers + """ + if not (isinstance(feature_map_shape_list, list) + and len(feature_map_shape_list) == 1): + raise ValueError('feature_map_shape_list must be a list of length 1.') + if not all([isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in feature_map_shape_list]): + raise ValueError('feature_map_shape_list must be a list of pairs.') + + # Create constants in init_scope so they can be created in tf.functions + # and accessed from outside of the function. + with tf.init_scope(): + self._base_anchor_size = tf.cast(tf.convert_to_tensor( + self._base_anchor_size), dtype=tf.float32) + self._anchor_stride = tf.cast(tf.convert_to_tensor( + self._anchor_stride), dtype=tf.float32) + self._anchor_offset = tf.cast(tf.convert_to_tensor( + self._anchor_offset), dtype=tf.float32) + + grid_height, grid_width = feature_map_shape_list[0] + scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales, + self._aspect_ratios) + scales_grid = tf.reshape(scales_grid, [-1]) + aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1]) + anchors = tile_anchors(grid_height, + grid_width, + scales_grid, + aspect_ratios_grid, + self._base_anchor_size, + self._anchor_stride, + self._anchor_offset) + + num_anchors = anchors.num_boxes_static() + if num_anchors is None: + num_anchors = anchors.num_boxes() + anchor_indices = tf.zeros([num_anchors]) + anchors.add_field('feature_map_index', anchor_indices) + return [anchors] + + +def tile_anchors(grid_height, + grid_width, + scales, + aspect_ratios, + base_anchor_size, + anchor_stride, + anchor_offset): + """Create a tiled set of anchors strided along a grid in image space. + + This op creates a set of anchor boxes by placing a "basis" collection of + boxes with user-specified scales and aspect ratios centered at evenly + distributed points along a grid. The basis collection is specified via the + scale and aspect_ratios arguments. For example, setting scales=[.1, .2, .2] + and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale + .1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2 + and aspect ratio 1/2. Each box is multiplied by "base_anchor_size" before + placing it over its respective center. + + Grid points are specified via grid_height, grid_width parameters as well as + the anchor_stride and anchor_offset parameters. + + Args: + grid_height: size of the grid in the y direction (int or int scalar tensor) + grid_width: size of the grid in the x direction (int or int scalar tensor) + scales: a 1-d (float) tensor representing the scale of each box in the + basis set. + aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each + box in the basis set. The length of the scales and aspect_ratios tensors + must be equal. + base_anchor_size: base anchor size as [height, width] + (float tensor of shape [2]) + anchor_stride: difference in centers between base anchors for adjacent grid + positions (float tensor of shape [2]) + anchor_offset: center of the anchor with scale and aspect ratio 1 for the + upper left element of the grid, this should be zero for + feature networks with only VALID padding and even receptive + field size, but may need some additional calculation if other + padding is used (float tensor of shape [2]) + Returns: + a BoxList holding a collection of N anchor boxes + """ + ratio_sqrts = tf.sqrt(aspect_ratios) + heights = scales / ratio_sqrts * base_anchor_size[0] + widths = scales * ratio_sqrts * base_anchor_size[1] + + # Get a grid of box centers + y_centers = tf.cast(tf.range(grid_height), dtype=tf.float32) + y_centers = y_centers * anchor_stride[0] + anchor_offset[0] + x_centers = tf.cast(tf.range(grid_width), dtype=tf.float32) + x_centers = x_centers * anchor_stride[1] + anchor_offset[1] + x_centers, y_centers = ops.meshgrid(x_centers, y_centers) + + widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers) + heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers) + bbox_centers = tf.stack([y_centers_grid, x_centers_grid], axis=3) + bbox_sizes = tf.stack([heights_grid, widths_grid], axis=3) + bbox_centers = tf.reshape(bbox_centers, [-1, 2]) + bbox_sizes = tf.reshape(bbox_sizes, [-1, 2]) + bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes) + return box_list.BoxList(bbox_corners) + + +def _center_size_bbox_to_corners_bbox(centers, sizes): + """Converts bbox center-size representation to corners representation. + + Args: + centers: a tensor with shape [N, 2] representing bounding box centers + sizes: a tensor with shape [N, 2] representing bounding boxes + + Returns: + corners: tensor with shape [N, 4] representing bounding boxes in corners + representation + """ + return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1) diff --git a/models/research/object_detection/anchor_generators/grid_anchor_generator_test.py b/models/research/object_detection/anchor_generators/grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..292076ea1e918607e8114b8ed317452f40062afd --- /dev/null +++ b/models/research/object_detection/anchor_generators/grid_anchor_generator_test.py @@ -0,0 +1,104 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.grid_anchor_generator.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.utils import test_case + + +class GridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor(self): + """Builds a 1x1 anchor grid to test the size of the output boxes.""" + def graph_fn(): + scales = [0.5, 1.0, 2.0] + aspect_ratios = [0.25, 1.0, 4.0] + anchor_offset = [7, -3] + anchor_generator = grid_anchor_generator.GridAnchorGenerator( + scales, aspect_ratios, anchor_offset=anchor_offset) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)]) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61], + [-505, -131, 519, 125], [-57, -67, 71, 61], + [-121, -131, 135, 125], [-249, -259, 263, 253], + [-25, -131, 39, 125], [-57, -259, 71, 253], + [-121, -515, 135, 509]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid(self): + def graph_fn(): + base_anchor_size = [10, 10] + anchor_stride = [19, 19] + anchor_offset = [0, 0] + scales = [0.5, 1.0, 2.0] + aspect_ratios = [1.0] + + anchor_generator = grid_anchor_generator.GridAnchorGenerator( + scales, + aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=anchor_stride, + anchor_offset=anchor_offset) + + anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)]) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.], + [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5], + [-5., 14., 5, 24], [-10., 9., 10, 29], + [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5], + [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5], + [14., 14., 24, 24], [9., 9., 29, 29]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid_with_dynamic_feature_map_shapes(self): + def graph_fn(feature_map_height, feature_map_width): + base_anchor_size = [10, 10] + anchor_stride = [19, 19] + anchor_offset = [0, 0] + scales = [0.5, 1.0, 2.0] + aspect_ratios = [1.0] + anchor_generator = grid_anchor_generator.GridAnchorGenerator( + scales, + aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=anchor_stride, + anchor_offset=anchor_offset) + + anchors_list = anchor_generator.generate( + feature_map_shape_list=[(feature_map_height, feature_map_width)]) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + + exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.], + [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5], + [-5., 14., 5, 24], [-10., 9., 10, 29], + [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5], + [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5], + [14., 14., 24, 24], [9., 9., 29, 29]] + anchor_corners_out = self.execute_cpu(graph_fn, + [np.array(2, dtype=np.int32), + np.array(2, dtype=np.int32)]) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/anchor_generators/multiple_grid_anchor_generator.py b/models/research/object_detection/anchor_generators/multiple_grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..5da24d4192c93a0e05a7dd48cce1ae823ae6b60d --- /dev/null +++ b/models/research/object_detection/anchor_generators/multiple_grid_anchor_generator.py @@ -0,0 +1,342 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generates grid anchors on the fly corresponding to multiple CNN layers. + +Generates grid anchors on the fly corresponding to multiple CNN layers as +described in: +"SSD: Single Shot MultiBox Detector" +Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, +Cheng-Yang Fu, Alexander C. Berg +(see Section 2.2: Choosing scales and aspect ratios for default boxes) +""" + +import numpy as np + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.core import anchor_generator +from object_detection.core import box_list_ops + + +class MultipleGridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generate a grid of anchors for multiple CNN layers.""" + + def __init__(self, + box_specs_list, + base_anchor_size=None, + anchor_strides=None, + anchor_offsets=None, + clip_window=None): + """Constructs a MultipleGridAnchorGenerator. + + To construct anchors, at multiple grid resolutions, one must provide a + list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid + size, a corresponding list of (scale, aspect ratio) box specifications. + + For example: + box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid + [(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid + + To support the fully convolutional setting, we pass grid sizes in at + generation time, while scale and aspect ratios are fixed at construction + time. + + Args: + box_specs_list: list of list of (scale, aspect ratio) pairs with the + outside list having the same number of entries as feature_map_shape_list + (which is passed in at generation time). + base_anchor_size: base anchor size as [height, width] + (length-2 float numpy or Tensor, default=[1.0, 1.0]). + The height and width values are normalized to the + minimum dimension of the input height and width, so that + when the base anchor height equals the base anchor + width, the resulting anchor is square even if the input + image is not square. + anchor_strides: list of pairs of strides in pixels (in y and x directions + respectively). For example, setting anchor_strides=[(25, 25), (50, 50)] + means that we want the anchors corresponding to the first layer to be + strided by 25 pixels and those in the second layer to be strided by 50 + pixels in both y and x directions. If anchor_strides=None, they are set + to be the reciprocal of the corresponding feature map shapes. + anchor_offsets: list of pairs of offsets in pixels (in y and x directions + respectively). The offset specifies where we want the center of the + (0, 0)-th anchor to lie for each layer. For example, setting + anchor_offsets=[(10, 10), (20, 20)]) means that we want the + (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space + and likewise that we want the (0, 0)-th anchor of the second layer to + lie at (25, 25) in pixel space. If anchor_offsets=None, then they are + set to be half of the corresponding anchor stride. + clip_window: a tensor of shape [4] specifying a window to which all + anchors should be clipped. If clip_window is None, then no clipping + is performed. + + Raises: + ValueError: if box_specs_list is not a list of list of pairs + ValueError: if clip_window is not either None or a tensor of shape [4] + """ + if isinstance(box_specs_list, list) and all( + [isinstance(list_item, list) for list_item in box_specs_list]): + self._box_specs = box_specs_list + else: + raise ValueError('box_specs_list is expected to be a ' + 'list of lists of pairs') + if base_anchor_size is None: + base_anchor_size = [256, 256] + self._base_anchor_size = base_anchor_size + self._anchor_strides = anchor_strides + self._anchor_offsets = anchor_offsets + if clip_window is not None and clip_window.get_shape().as_list() != [4]: + raise ValueError('clip_window must either be None or a shape [4] tensor') + self._clip_window = clip_window + self._scales = [] + self._aspect_ratios = [] + for box_spec in self._box_specs: + if not all([isinstance(entry, tuple) and len(entry) == 2 + for entry in box_spec]): + raise ValueError('box_specs_list is expected to be a ' + 'list of lists of pairs') + scales, aspect_ratios = zip(*box_spec) + self._scales.append(scales) + self._aspect_ratios.append(aspect_ratios) + + for arg, arg_name in zip([self._anchor_strides, self._anchor_offsets], + ['anchor_strides', 'anchor_offsets']): + if arg and not (isinstance(arg, list) and + len(arg) == len(self._box_specs)): + raise ValueError('%s must be a list with the same length ' + 'as self._box_specs' % arg_name) + if arg and not all([ + isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in arg + ]): + raise ValueError('%s must be a list of pairs.' % arg_name) + + def name_scope(self): + return 'MultipleGridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the Generate function. + """ + return [len(box_specs) for box_specs in self._box_specs] + + def _generate(self, feature_map_shape_list, im_height=1, im_width=1): + """Generates a collection of bounding boxes to be used as anchors. + + The number of anchors generated for a single grid with shape MxM where we + place k boxes over each grid center is k*M^2 and thus the total number of + anchors is the sum over all grids. In our box_specs_list example + (see the constructor docstring), we would place two boxes over each grid + point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and + thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the + output anchors follows the order of how the grid sizes and box_specs are + specified (with box_spec index varying the fastest, followed by width + index, then height index, then grid index). + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0), (height_1, width_1), ...]. For example, + setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that + correspond to an 8x8 layer followed by a 7x7 layer. + im_height: the height of the image to generate the grid for. If both + im_height and im_width are 1, the generated anchors default to + absolute coordinates, otherwise normalized coordinates are produced. + im_width: the width of the image to generate the grid for. If both + im_height and im_width are 1, the generated anchors default to + absolute coordinates, otherwise normalized coordinates are produced. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + + Raises: + ValueError: if feature_map_shape_list, box_specs_list do not have the same + length. + ValueError: if feature_map_shape_list does not consist of pairs of + integers + """ + if not (isinstance(feature_map_shape_list, list) + and len(feature_map_shape_list) == len(self._box_specs)): + raise ValueError('feature_map_shape_list must be a list with the same ' + 'length as self._box_specs') + if not all([isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in feature_map_shape_list]): + raise ValueError('feature_map_shape_list must be a list of pairs.') + + im_height = tf.cast(im_height, dtype=tf.float32) + im_width = tf.cast(im_width, dtype=tf.float32) + + if not self._anchor_strides: + anchor_strides = [(1.0 / tf.cast(pair[0], dtype=tf.float32), + 1.0 / tf.cast(pair[1], dtype=tf.float32)) + for pair in feature_map_shape_list] + else: + anchor_strides = [(tf.cast(stride[0], dtype=tf.float32) / im_height, + tf.cast(stride[1], dtype=tf.float32) / im_width) + for stride in self._anchor_strides] + if not self._anchor_offsets: + anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1]) + for stride in anchor_strides] + else: + anchor_offsets = [(tf.cast(offset[0], dtype=tf.float32) / im_height, + tf.cast(offset[1], dtype=tf.float32) / im_width) + for offset in self._anchor_offsets] + + for arg, arg_name in zip([anchor_strides, anchor_offsets], + ['anchor_strides', 'anchor_offsets']): + if not (isinstance(arg, list) and len(arg) == len(self._box_specs)): + raise ValueError('%s must be a list with the same length ' + 'as self._box_specs' % arg_name) + if not all([isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in arg]): + raise ValueError('%s must be a list of pairs.' % arg_name) + + anchor_grid_list = [] + min_im_shape = tf.minimum(im_height, im_width) + scale_height = min_im_shape / im_height + scale_width = min_im_shape / im_width + if not tf.is_tensor(self._base_anchor_size): + base_anchor_size = [ + scale_height * tf.constant(self._base_anchor_size[0], + dtype=tf.float32), + scale_width * tf.constant(self._base_anchor_size[1], + dtype=tf.float32) + ] + else: + base_anchor_size = [ + scale_height * self._base_anchor_size[0], + scale_width * self._base_anchor_size[1] + ] + for feature_map_index, (grid_size, scales, aspect_ratios, stride, + offset) in enumerate( + zip(feature_map_shape_list, self._scales, + self._aspect_ratios, anchor_strides, + anchor_offsets)): + tiled_anchors = grid_anchor_generator.tile_anchors( + grid_height=grid_size[0], + grid_width=grid_size[1], + scales=scales, + aspect_ratios=aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=stride, + anchor_offset=offset) + if self._clip_window is not None: + tiled_anchors = box_list_ops.clip_to_window( + tiled_anchors, self._clip_window, filter_nonoverlapping=False) + num_anchors_in_layer = tiled_anchors.num_boxes_static() + if num_anchors_in_layer is None: + num_anchors_in_layer = tiled_anchors.num_boxes() + anchor_indices = feature_map_index * tf.ones([num_anchors_in_layer]) + tiled_anchors.add_field('feature_map_index', anchor_indices) + anchor_grid_list.append(tiled_anchors) + + return anchor_grid_list + + +def create_ssd_anchors(num_layers=6, + min_scale=0.2, + max_scale=0.95, + scales=None, + aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3), + interpolated_scale_aspect_ratio=1.0, + base_anchor_size=None, + anchor_strides=None, + anchor_offsets=None, + reduce_boxes_in_lowest_layer=True): + """Creates MultipleGridAnchorGenerator for SSD anchors. + + This function instantiates a MultipleGridAnchorGenerator that reproduces + ``default box`` construction proposed by Liu et al in the SSD paper. + See Section 2.2 for details. Grid sizes are assumed to be passed in + at generation time from finest resolution to coarsest resolution --- this is + used to (linearly) interpolate scales of anchor boxes corresponding to the + intermediate grid sizes. + + Anchors that are returned by calling the `generate` method on the returned + MultipleGridAnchorGenerator object are always in normalized coordinates + and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]). + + Args: + num_layers: integer number of grid layers to create anchors for (actual + grid sizes passed in at generation time) + min_scale: scale of anchors corresponding to finest resolution (float) + max_scale: scale of anchors corresponding to coarsest resolution (float) + scales: As list of anchor scales to use. When not None and not empty, + min_scale and max_scale are not used. + aspect_ratios: list or tuple of (float) aspect ratios to place on each + grid point. + interpolated_scale_aspect_ratio: An additional anchor is added with this + aspect ratio and a scale interpolated between the scale for a layer + and the scale for the next layer (1.0 for the last layer). + This anchor is not included if this value is 0. + base_anchor_size: base anchor size as [height, width]. + The height and width values are normalized to the minimum dimension of the + input height and width, so that when the base anchor height equals the + base anchor width, the resulting anchor is square even if the input image + is not square. + anchor_strides: list of pairs of strides in pixels (in y and x directions + respectively). For example, setting anchor_strides=[(25, 25), (50, 50)] + means that we want the anchors corresponding to the first layer to be + strided by 25 pixels and those in the second layer to be strided by 50 + pixels in both y and x directions. If anchor_strides=None, they are set to + be the reciprocal of the corresponding feature map shapes. + anchor_offsets: list of pairs of offsets in pixels (in y and x directions + respectively). The offset specifies where we want the center of the + (0, 0)-th anchor to lie for each layer. For example, setting + anchor_offsets=[(10, 10), (20, 20)]) means that we want the + (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space + and likewise that we want the (0, 0)-th anchor of the second layer to lie + at (25, 25) in pixel space. If anchor_offsets=None, then they are set to + be half of the corresponding anchor stride. + reduce_boxes_in_lowest_layer: a boolean to indicate whether the fixed 3 + boxes per location is used in the lowest layer. + + Returns: + a MultipleGridAnchorGenerator + """ + if base_anchor_size is None: + base_anchor_size = [1.0, 1.0] + box_specs_list = [] + if scales is None or not scales: + scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) + for i in range(num_layers)] + [1.0] + else: + # Add 1.0 to the end, which will only be used in scale_next below and used + # for computing an interpolated scale for the largest scale in the list. + scales += [1.0] + + for layer, scale, scale_next in zip( + range(num_layers), scales[:-1], scales[1:]): + layer_box_specs = [] + if layer == 0 and reduce_boxes_in_lowest_layer: + layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)] + else: + for aspect_ratio in aspect_ratios: + layer_box_specs.append((scale, aspect_ratio)) + # Add one more anchor, with a scale between the current scale, and the + # scale for the next layer, with a specified aspect ratio (1.0 by + # default). + if interpolated_scale_aspect_ratio > 0.0: + layer_box_specs.append((np.sqrt(scale*scale_next), + interpolated_scale_aspect_ratio)) + box_specs_list.append(layer_box_specs) + + return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size, + anchor_strides, anchor_offsets) diff --git a/models/research/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py b/models/research/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cc507eec6487647cdab939b508cf85b58cdf20 --- /dev/null +++ b/models/research/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py @@ -0,0 +1,289 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generators.multiple_grid_anchor_generator_test.py.""" + +import numpy as np + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import multiple_grid_anchor_generator as ag +from object_detection.utils import test_case + + +class MultipleGridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor_grid(self): + """Builds a 1x1 anchor grid to test the size of the output boxes.""" + def graph_fn(): + + box_specs_list = [[(.5, .25), (1.0, .25), (2.0, .25), + (.5, 1.0), (1.0, 1.0), (2.0, 1.0), + (.5, 4.0), (1.0, 4.0), (2.0, 4.0)]] + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([256, 256], dtype=tf.float32), + anchor_strides=[(16, 16)], + anchor_offsets=[(7, -3)]) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)]) + return anchors_list[0].get() + exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61], + [-505, -131, 519, 125], [-57, -67, 71, 61], + [-121, -131, 135, 125], [-249, -259, 263, 253], + [-25, -131, 39, 125], [-57, -259, 71, 253], + [-121, -515, 135, 509]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid(self): + def graph_fn(): + box_specs_list = [[(0.5, 1.0), (1.0, 1.0), (2.0, 1.0)]] + + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([10, 10], dtype=tf.float32), + anchor_strides=[(19, 19)], + anchor_offsets=[(0, 0)]) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)]) + return anchors_list[0].get() + exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.], + [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5], + [-5., 14., 5, 24], [-10., 9., 10, 29], + [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5], + [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5], + [14., 14., 24, 24], [9., 9., 29, 29]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid_non_square(self): + + def graph_fn(): + box_specs_list = [[(1.0, 1.0)]] + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, base_anchor_size=tf.constant([1, 1], + dtype=tf.float32)) + anchors_list = anchor_generator.generate(feature_map_shape_list=[( + tf.constant(1, dtype=tf.int32), tf.constant(2, dtype=tf.int32))]) + return anchors_list[0].get() + + exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_dynamic_size_anchor_grid(self): + + def graph_fn(height, width): + box_specs_list = [[(1.0, 1.0)]] + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, base_anchor_size=tf.constant([1, 1], + dtype=tf.float32)) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(height, + width)]) + return anchors_list[0].get() + + exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]] + + anchor_corners_out = self.execute_cpu(graph_fn, + [np.array(1, dtype=np.int32), + np.array(2, dtype=np.int32)]) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid_normalized(self): + def graph_fn(): + box_specs_list = [[(1.0, 1.0)]] + + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, base_anchor_size=tf.constant([1, 1], + dtype=tf.float32)) + anchors_list = anchor_generator.generate( + feature_map_shape_list=[(tf.constant(1, dtype=tf.int32), tf.constant( + 2, dtype=tf.int32))], + im_height=320, + im_width=640) + return anchors_list[0].get() + + exp_anchor_corners = [[0., 0., 1., 0.5], [0., 0.5, 1., 1.]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_multiple_grids(self): + + def graph_fn(): + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5)]] + + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), ( + 2, 2)]) + return [anchors.get() for anchors in anchors_list] + # height and width of box with .5 aspect ratio + h = np.sqrt(2) + w = 1.0/np.sqrt(2) + exp_small_grid_corners = [[-.25, -.25, .75, .75], + [.25-.5*h, .25-.5*w, .25+.5*h, .25+.5*w], + [-.25, .25, .75, 1.25], + [.25-.5*h, .75-.5*w, .25+.5*h, .75+.5*w], + [.25, -.25, 1.25, .75], + [.75-.5*h, .25-.5*w, .75+.5*h, .25+.5*w], + [.25, .25, 1.25, 1.25], + [.75-.5*h, .75-.5*w, .75+.5*h, .75+.5*w]] + # only test first entry of larger set of anchors + exp_big_grid_corners = [[.125-.5, .125-.5, .125+.5, .125+.5], + [.125-1.0, .125-1.0, .125+1.0, .125+1.0], + [.125-.5*h, .125-.5*w, .125+.5*h, .125+.5*w],] + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + self.assertEquals(anchor_corners_out.shape, (56, 4)) + big_grid_corners = anchor_corners_out[0:3, :] + small_grid_corners = anchor_corners_out[48:, :] + self.assertAllClose(small_grid_corners, exp_small_grid_corners) + self.assertAllClose(big_grid_corners, exp_big_grid_corners) + + def test_construct_multiple_grids_with_clipping(self): + + def graph_fn(): + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5)]] + + clip_window = tf.constant([0, 0, 1, 1], dtype=tf.float32) + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + clip_window=clip_window) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), ( + 2, 2)]) + return [anchors.get() for anchors in anchors_list] + # height and width of box with .5 aspect ratio + h = np.sqrt(2) + w = 1.0/np.sqrt(2) + exp_small_grid_corners = [[0, 0, .75, .75], + [0, 0, .25+.5*h, .25+.5*w], + [0, .25, .75, 1], + [0, .75-.5*w, .25+.5*h, 1], + [.25, 0, 1, .75], + [.75-.5*h, 0, 1, .25+.5*w], + [.25, .25, 1, 1], + [.75-.5*h, .75-.5*w, 1, 1]] + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + small_grid_corners = anchor_corners_out[48:, :] + self.assertAllClose(small_grid_corners, exp_small_grid_corners) + + def test_invalid_box_specs(self): + # not all box specs are pairs + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5, .3)]] + with self.assertRaises(ValueError): + ag.MultipleGridAnchorGenerator(box_specs_list) + + # box_specs_list is not a list of lists + box_specs_list = [(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)] + with self.assertRaises(ValueError): + ag.MultipleGridAnchorGenerator(box_specs_list) + + def test_invalid_generate_arguments(self): + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5)]] + + # incompatible lengths with box_specs_list + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2), (1, 1)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.5, .5)], + anchor_offsets=[(.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)]) + + # not pairs + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4, 4), (2, 2)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25, .1), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4), (2, 2)]) + + +class CreateSSDAnchorsTest(test_case.TestCase): + + def test_create_ssd_anchors_returns_correct_shape(self): + + def graph_fn1(): + anchor_generator = ag.create_ssd_anchors( + num_layers=6, + min_scale=0.2, + max_scale=0.95, + aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3), + reduce_boxes_in_lowest_layer=True) + + feature_map_shape_list = [(38, 38), (19, 19), (10, 10), + (5, 5), (3, 3), (1, 1)] + anchors_list = anchor_generator.generate( + feature_map_shape_list=feature_map_shape_list) + return [anchors.get() for anchors in anchors_list] + anchor_corners_out = np.concatenate(self.execute(graph_fn1, []), axis=0) + self.assertEquals(anchor_corners_out.shape, (7308, 4)) + + def graph_fn2(): + anchor_generator = ag.create_ssd_anchors( + num_layers=6, min_scale=0.2, max_scale=0.95, + aspect_ratios=(1.0, 2.0, 3.0, 1.0/2, 1.0/3), + reduce_boxes_in_lowest_layer=False) + + feature_map_shape_list = [(38, 38), (19, 19), (10, 10), + (5, 5), (3, 3), (1, 1)] + anchors_list = anchor_generator.generate( + feature_map_shape_list=feature_map_shape_list) + return [anchors.get() for anchors in anchors_list] + anchor_corners_out = np.concatenate(self.execute(graph_fn2, []), axis=0) + self.assertEquals(anchor_corners_out.shape, (11640, 4)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/anchor_generators/multiscale_grid_anchor_generator.py b/models/research/object_detection/anchor_generators/multiscale_grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..a3244e1b196e58c46713059fa68ac2f22f7962ef --- /dev/null +++ b/models/research/object_detection/anchor_generators/multiscale_grid_anchor_generator.py @@ -0,0 +1,152 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generates grid anchors on the fly corresponding to multiple CNN layers. + +Generates grid anchors on the fly corresponding to multiple CNN layers as +described in: +"Focal Loss for Dense Object Detection" (https://arxiv.org/abs/1708.02002) +T.-Y. Lin, P. Goyal, R. Girshick, K. He, P. Dollar +""" + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.core import anchor_generator +from object_detection.core import box_list_ops + + +class MultiscaleGridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generate a grid of anchors for multiple CNN layers of different scale.""" + + def __init__(self, min_level, max_level, anchor_scale, aspect_ratios, + scales_per_octave, normalize_coordinates=True): + """Constructs a MultiscaleGridAnchorGenerator. + + To construct anchors, at multiple scale resolutions, one must provide a + the minimum level and maximum levels on a scale pyramid. To define the size + of anchor, the anchor scale is provided to decide the size relatively to the + stride of the corresponding feature map. The generator allows one pixel + location on feature map maps to multiple anchors, that have different aspect + ratios and intermediate scales. + + Args: + min_level: minimum level in feature pyramid. + max_level: maximum level in feature pyramid. + anchor_scale: anchor scale and feature stride define the size of the base + anchor on an image. For example, given a feature pyramid with strides + [2^3, ..., 2^7] and anchor scale 4. The base anchor size is + 4 * [2^3, ..., 2^7]. + aspect_ratios: list or tuple of (float) aspect ratios to place on each + grid point. + scales_per_octave: integer number of intermediate scales per scale octave. + normalize_coordinates: whether to produce anchors in normalized + coordinates. (defaults to True). + """ + self._anchor_grid_info = [] + self._aspect_ratios = aspect_ratios + self._scales_per_octave = scales_per_octave + self._normalize_coordinates = normalize_coordinates + + scales = [2**(float(scale) / scales_per_octave) + for scale in range(scales_per_octave)] + aspects = list(aspect_ratios) + + for level in range(min_level, max_level + 1): + anchor_stride = [2**level, 2**level] + base_anchor_size = [2**level * anchor_scale, 2**level * anchor_scale] + self._anchor_grid_info.append({ + 'level': level, + 'info': [scales, aspects, base_anchor_size, anchor_stride] + }) + + def name_scope(self): + return 'MultiscaleGridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the Generate function. + """ + return len(self._anchor_grid_info) * [ + len(self._aspect_ratios) * self._scales_per_octave] + + def _generate(self, feature_map_shape_list, im_height=1, im_width=1): + """Generates a collection of bounding boxes to be used as anchors. + + For training, we require the input image shape to be statically defined. + That is, im_height and im_width should be integers rather than tensors. + For inference, im_height and im_width can be either integers (for fixed + image size), or tensors (for arbitrary image size). + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0), (height_1, width_1), ...]. For example, + setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that + correspond to an 8x8 layer followed by a 7x7 layer. + im_height: the height of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + im_width: the width of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + Raises: + ValueError: if im_height and im_width are not integers. + ValueError: if im_height and im_width are 1, but normalized coordinates + were requested. + """ + anchor_grid_list = [] + for feat_shape, grid_info in zip(feature_map_shape_list, + self._anchor_grid_info): + # TODO(rathodv) check the feature_map_shape_list is consistent with + # self._anchor_grid_info + level = grid_info['level'] + stride = 2**level + scales, aspect_ratios, base_anchor_size, anchor_stride = grid_info['info'] + feat_h = feat_shape[0] + feat_w = feat_shape[1] + anchor_offset = [0, 0] + if isinstance(im_height, int) and isinstance(im_width, int): + if im_height % 2.0**level == 0 or im_height == 1: + anchor_offset[0] = stride / 2.0 + if im_width % 2.0**level == 0 or im_width == 1: + anchor_offset[1] = stride / 2.0 + if tf.is_tensor(im_height) and tf.is_tensor(im_width): + anchor_offset[0] = stride / 2.0 + anchor_offset[1] = stride / 2.0 + ag = grid_anchor_generator.GridAnchorGenerator( + scales, + aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=anchor_stride, + anchor_offset=anchor_offset) + (anchor_grid,) = ag.generate(feature_map_shape_list=[(feat_h, feat_w)]) + + if self._normalize_coordinates: + if im_height == 1 or im_width == 1: + raise ValueError( + 'Normalized coordinates were requested upon construction of the ' + 'MultiscaleGridAnchorGenerator, but a subsequent call to ' + 'generate did not supply dimension information.') + anchor_grid = box_list_ops.to_normalized_coordinates( + anchor_grid, im_height, im_width, check_range=False) + anchor_grid_list.append(anchor_grid) + + return anchor_grid_list diff --git a/models/research/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py b/models/research/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..82aa8d1df0b72d517c05d4dda1f6c2a7378d3d00 --- /dev/null +++ b/models/research/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py @@ -0,0 +1,308 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generators.multiscale_grid_anchor_generator_test.py.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import multiscale_grid_anchor_generator as mg +from object_detection.utils import test_case + + +class MultiscaleGridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_unit_dimensions(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 1.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 1 + im_width = 1 + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + # Positive offsets are produced. + exp_anchor_corners = [[0, 0, 32, 32], + [0, 32, 32, 64], + [32, 0, 64, 32], + [32, 32, 64, 64]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_normalized_anchors_fails_with_unit_dimensions(self): + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level=5, max_level=5, anchor_scale=1.0, aspect_ratios=[1.0], + scales_per_octave=1, normalize_coordinates=True) + with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'): + anchor_generator.generate( + feature_map_shape_list=[(2, 2)], im_height=1, im_width=1) + + def test_construct_single_anchor_in_normalized_coordinates(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 128 + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=True) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128], + [-48./64, -16./128, 80./64, 112./128], + [-16./64, -48./128, 112./64, 80./128], + [-16./64, -16./128, 112./64, 112./128]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_num_anchors_per_location(self): + min_level = 5 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0, 2.0] + scales_per_octave = 3 + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6]) + + def test_construct_single_anchor_dynamic_size(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = tf.constant(64) + im_width = tf.constant(64) + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-32, -64, 96, 64], + [-32, -32, 96, 96]] + # Add anchor offset. + anchor_offset = 2.0**5 / 2.0 + exp_anchor_corners = [ + [b + anchor_offset for b in a] for a in exp_anchor_corners + ] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_odd_input_dimension(self): + + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 65 + im_width = 65 + feature_map_shape_list = [(3, 3)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + anchor_corners_out = self.execute(graph_fn, []) + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-64, 0, 64, 128], + [-32, -64, 96, 64], + [-32, -32, 96, 96], + [-32, 0, 96, 128], + [0, -64, 128, 64], + [0, -32, 128, 96], + [0, 0, 128, 128]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_on_two_feature_maps(self): + + def graph_fn(): + min_level = 5 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2), (1, 1)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave(self): + + def graph_fn(): + min_level = 6 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 2 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect]] + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self): + def graph_fn(): + min_level = 6 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0, 2.0] + scales_per_octave = 2 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect], + # [2**0.0 intermediate scale + 2.0 aspect], + # [2**0.5 intermediate scale + 2.0 aspect]] + + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193], + [-58.50967, -149.0193, 122.50967, 213.0193], + [-96., -224., 160., 288.]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self): + + def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height, + feature_map2_width): + min_level = 5 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(feature_map1_height, feature_map1_width), + (feature_map2_height, feature_map2_width)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate( + self.execute_cpu(graph_fn, [ + np.array(2, dtype=np.int32), + np.array(2, dtype=np.int32), + np.array(1, dtype=np.int32), + np.array(1, dtype=np.int32) + ]), + axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/box_coders/__init__.py b/models/research/object_detection/box_coders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/box_coders/faster_rcnn_box_coder.py b/models/research/object_detection/box_coders/faster_rcnn_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..e06c1b12d2ccff5e4e2992554bd244c2f5b1a822 --- /dev/null +++ b/models/research/object_detection/box_coders/faster_rcnn_box_coder.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Faster RCNN box coder. + +Faster RCNN box coder follows the coding schema described below: + ty = (y - ya) / ha + tx = (x - xa) / wa + th = log(h / ha) + tw = log(w / wa) + where x, y, w, h denote the box's center coordinates, width and height + respectively. Similarly, xa, ya, wa, ha denote the anchor's center + coordinates, width and height. tx, ty, tw and th denote the anchor-encoded + center, width and height respectively. + + See http://arxiv.org/abs/1506.01497 for details. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list + +EPSILON = 1e-8 + + +class FasterRcnnBoxCoder(box_coder.BoxCoder): + """Faster RCNN box coder.""" + + def __init__(self, scale_factors=None): + """Constructor for FasterRcnnBoxCoder. + + Args: + scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. + If set to None, does not perform scaling. For Faster RCNN, + the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. + """ + if scale_factors: + assert len(scale_factors) == 4 + for scalar in scale_factors: + assert scalar > 0 + self._scale_factors = scale_factors + + @property + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + """Encode a box collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, th, tw]. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + # Avoid NaN in division and log below. + ha += EPSILON + wa += EPSILON + h += EPSILON + w += EPSILON + + tx = (xcenter - xcenter_a) / wa + ty = (ycenter - ycenter_a) / ha + tw = tf.log(w / wa) + th = tf.log(h / ha) + # Scales location targets as used in paper for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + th *= self._scale_factors[2] + tw *= self._scale_factors[3] + return tf.transpose(tf.stack([ty, tx, th, tw])) + + def _decode(self, rel_codes, anchors): + """Decode relative codes to boxes. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + + ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes)) + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + th /= self._scale_factors[2] + tw /= self._scale_factors[3] + w = tf.exp(tw) * wa + h = tf.exp(th) * ha + ycenter = ty * ha + ycenter_a + xcenter = tx * wa + xcenter_a + ymin = ycenter - h / 2. + xmin = xcenter - w / 2. + ymax = ycenter + h / 2. + xmax = xcenter + w / 2. + return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) diff --git a/models/research/object_detection/box_coders/faster_rcnn_box_coder_test.py b/models/research/object_detection/box_coders/faster_rcnn_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1cd48279af94ee67a2e7d2a96a703168965db897 --- /dev/null +++ b/models/research/object_detection/box_coders/faster_rcnn_box_coder_test.py @@ -0,0 +1,113 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.faster_rcnn_box_coder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class FasterRcnnBoxCoderTest(test_case.TestCase): + + def test_get_correct_relative_codes_after_encoding(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-0.5, -0.416666, -0.405465, -0.182321], + [-0.083333, -0.222222, -0.693147, -1.098612]] + def graph_fn(boxes, anchors): + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_relative_codes_after_encoding_with_scaling(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-1., -1.25, -1.62186, -0.911608], + [-0.166667, -0.666667, -2.772588, -5.493062]] + def graph_fn(boxes, anchors): + scale_factors = [2, 3, 4, 5] + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array([[-0.5, -0.416666, -0.405465, -0.182321], + [-0.083333, -0.222222, -0.693147, -1.098612]], + np.float32) + expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] + def graph_fn(rel_codes, anchors): + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + boxes = coder.decode(rel_codes, anchors) + return boxes.get() + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding_with_scaling(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array([[-1., -1.25, -1.62186, -0.911608], + [-0.166667, -0.666667, -2.772588, -5.493062]], + np.float32) + expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] + def graph_fn(rel_codes, anchors): + scale_factors = [2, 3, 4, 5] + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors).get() + return boxes + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(expected_boxes, boxes_out, rtol=1e-04, + atol=1e-04) + + def test_very_small_Width_nan_after_encoding(self): + boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32) + expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826]] + def graph_fn(boxes, anchors): + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/box_coders/keypoint_box_coder.py b/models/research/object_detection/box_coders/keypoint_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb4bf8b1849499f937b6f9f1d77fe2cf96a5eda --- /dev/null +++ b/models/research/object_detection/box_coders/keypoint_box_coder.py @@ -0,0 +1,173 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keypoint box coder. + +The keypoint box coder follows the coding schema described below (this is +similar to the FasterRcnnBoxCoder, except that it encodes keypoints in addition +to box coordinates): + ty = (y - ya) / ha + tx = (x - xa) / wa + th = log(h / ha) + tw = log(w / wa) + tky0 = (ky0 - ya) / ha + tkx0 = (kx0 - xa) / wa + tky1 = (ky1 - ya) / ha + tkx1 = (kx1 - xa) / wa + ... + where x, y, w, h denote the box's center coordinates, width and height + respectively. Similarly, xa, ya, wa, ha denote the anchor's center + coordinates, width and height. tx, ty, tw and th denote the anchor-encoded + center, width and height respectively. ky0, kx0, ky1, kx1, ... denote the + keypoints' coordinates, and tky0, tkx0, tky1, tkx1, ... denote the + anchor-encoded keypoint coordinates. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list +from object_detection.core import standard_fields as fields + +EPSILON = 1e-8 + + +class KeypointBoxCoder(box_coder.BoxCoder): + """Keypoint box coder.""" + + def __init__(self, num_keypoints, scale_factors=None): + """Constructor for KeypointBoxCoder. + + Args: + num_keypoints: Number of keypoints to encode/decode. + scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. + In addition to scaling ty and tx, the first 2 scalars are used to scale + the y and x coordinates of the keypoints as well. If set to None, does + not perform scaling. + """ + self._num_keypoints = num_keypoints + + if scale_factors: + assert len(scale_factors) == 4 + for scalar in scale_factors: + assert scalar > 0 + self._scale_factors = scale_factors + self._keypoint_scale_factors = None + if scale_factors is not None: + self._keypoint_scale_factors = tf.expand_dims( + tf.tile([ + tf.cast(scale_factors[0], dtype=tf.float32), + tf.cast(scale_factors[1], dtype=tf.float32) + ], [num_keypoints]), 1) + + @property + def code_size(self): + return 4 + self._num_keypoints * 2 + + def _encode(self, boxes, anchors): + """Encode a box and keypoint collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are + tensors with the shape [N, 4], and keypoints are tensors with the shape + [N, num_keypoints, 2]. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0 + represent the y and x coordinates of the first keypoint, tky1 and tkx1 + represent the y and x coordinates of the second keypoint, and so on. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + keypoints = boxes.get_field(fields.BoxListFields.keypoints) + keypoints = tf.transpose(tf.reshape(keypoints, + [-1, self._num_keypoints * 2])) + num_boxes = boxes.num_boxes() + + # Avoid NaN in division and log below. + ha += EPSILON + wa += EPSILON + h += EPSILON + w += EPSILON + + tx = (xcenter - xcenter_a) / wa + ty = (ycenter - ycenter_a) / ha + tw = tf.log(w / wa) + th = tf.log(h / ha) + + tiled_anchor_centers = tf.tile( + tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) + tiled_anchor_sizes = tf.tile( + tf.stack([ha, wa]), [self._num_keypoints, 1]) + tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes + + # Scales location targets as used in paper for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + th *= self._scale_factors[2] + tw *= self._scale_factors[3] + tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes]) + + tboxes = tf.stack([ty, tx, th, tw]) + return tf.transpose(tf.concat([tboxes, tkeypoints], 0)) + + def _decode(self, rel_codes, anchors): + """Decode relative codes to boxes and keypoints. + + Args: + rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N + anchor-encoded boxes and keypoints + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes and keypoints. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + + num_codes = tf.shape(rel_codes)[0] + result = tf.unstack(tf.transpose(rel_codes)) + ty, tx, th, tw = result[:4] + tkeypoints = result[4:] + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + th /= self._scale_factors[2] + tw /= self._scale_factors[3] + tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes]) + + w = tf.exp(tw) * wa + h = tf.exp(th) * ha + ycenter = ty * ha + ycenter_a + xcenter = tx * wa + xcenter_a + ymin = ycenter - h / 2. + xmin = xcenter - w / 2. + ymax = ycenter + h / 2. + xmax = xcenter + w / 2. + decoded_boxes_keypoints = box_list.BoxList( + tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) + + tiled_anchor_centers = tf.tile( + tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) + tiled_anchor_sizes = tf.tile( + tf.stack([ha, wa]), [self._num_keypoints, 1]) + keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers + keypoints = tf.reshape(tf.transpose(keypoints), + [-1, self._num_keypoints, 2]) + decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints) + return decoded_boxes_keypoints diff --git a/models/research/object_detection/box_coders/keypoint_box_coder_test.py b/models/research/object_detection/box_coders/keypoint_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5748255c825cfa46a9db81c082c1341f1c476fbf --- /dev/null +++ b/models/research/object_detection/box_coders/keypoint_box_coder_test.py @@ -0,0 +1,151 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.keypoint_box_coder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import keypoint_box_coder +from object_detection.core import box_list +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class KeypointBoxCoderTest(test_case.TestCase): + + def test_get_correct_relative_codes_after_encoding(self): + boxes = np.array([[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]], np.float32) + keypoints = np.array([[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]], np.float32) + num_keypoints = len(keypoints[0]) + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + expected_rel_codes = [ + [-0.5, -0.416666, -0.405465, -0.182321, + -0.5, -0.5, -0.833333, 0.], + [-0.083333, -0.222222, -0.693147, -1.098612, + 0.166667, -0.166667, -0.333333, -0.055556] + ] + def graph_fn(boxes, keypoints, anchors): + boxes = box_list.BoxList(boxes) + boxes.add_field(fields.BoxListFields.keypoints, keypoints) + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_relative_codes_after_encoding_with_scaling(self): + boxes = np.array([[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]], np.float32) + keypoints = np.array([[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]], np.float32) + num_keypoints = len(keypoints[0]) + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + expected_rel_codes = [ + [-1., -1.25, -1.62186, -0.911608, + -1.0, -1.5, -1.666667, 0.], + [-0.166667, -0.666667, -2.772588, -5.493062, + 0.333333, -0.5, -0.666667, -0.166667] + ] + def graph_fn(boxes, keypoints, anchors): + scale_factors = [2, 3, 4, 5] + boxes = box_list.BoxList(boxes) + boxes.add_field(fields.BoxListFields.keypoints, keypoints) + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints, scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding(self): + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + rel_codes = np.array([ + [-0.5, -0.416666, -0.405465, -0.182321, + -0.5, -0.5, -0.833333, 0.], + [-0.083333, -0.222222, -0.693147, -1.098612, + 0.166667, -0.166667, -0.333333, -0.055556] + ], np.float32) + expected_boxes = [[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]] + expected_keypoints = [[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]] + num_keypoints = len(expected_keypoints[0]) + def graph_fn(rel_codes, anchors): + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints) + boxes = coder.decode(rel_codes, anchors) + return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints) + boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04, + atol=1e-04) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding_with_scaling(self): + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + rel_codes = np.array([ + [-1., -1.25, -1.62186, -0.911608, + -1.0, -1.5, -1.666667, 0.], + [-0.166667, -0.666667, -2.772588, -5.493062, + 0.333333, -0.5, -0.666667, -0.166667] + ], np.float32) + expected_boxes = [[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]] + expected_keypoints = [[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]] + num_keypoints = len(expected_keypoints[0]) + def graph_fn(rel_codes, anchors): + scale_factors = [2, 3, 4, 5] + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints, scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors) + return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints) + boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04, + atol=1e-04) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_very_small_width_nan_after_encoding(self): + boxes = np.array([[10., 10., 10.0000001, 20.]], np.float32) + keypoints = np.array([[[10., 10.], [10.0000001, 20.]]], np.float32) + anchors = np.array([[15., 12., 30., 18.]], np.float32) + expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826, + -0.833333, -0.833333, -0.833333, 0.833333]] + def graph_fn(boxes, keypoints, anchors): + boxes = box_list.BoxList(boxes) + boxes.add_field(fields.BoxListFields.keypoints, keypoints) + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder(2) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/box_coders/mean_stddev_box_coder.py b/models/research/object_detection/box_coders/mean_stddev_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..256f53fd036798cd7b3da8fcdd720c7e3c46e2e4 --- /dev/null +++ b/models/research/object_detection/box_coders/mean_stddev_box_coder.py @@ -0,0 +1,79 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mean stddev box coder. + +This box coder use the following coding schema to encode boxes: +rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev. +""" +from object_detection.core import box_coder +from object_detection.core import box_list + + +class MeanStddevBoxCoder(box_coder.BoxCoder): + """Mean stddev box coder.""" + + def __init__(self, stddev=0.01): + """Constructor for MeanStddevBoxCoder. + + Args: + stddev: The standard deviation used to encode and decode boxes. + """ + self._stddev = stddev + + @property + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + """Encode a box collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of N anchors. + + Returns: + a tensor representing N anchor-encoded boxes + + Raises: + ValueError: if the anchors still have deprecated stddev field. + """ + box_corners = boxes.get() + if anchors.has_field('stddev'): + raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and " + "should not be specified in the box list.") + means = anchors.get() + return (box_corners - means) / self._stddev + + def _decode(self, rel_codes, anchors): + """Decode. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes + + Raises: + ValueError: if the anchors still have deprecated stddev field and expects + the decode method to use stddev value from that field. + """ + means = anchors.get() + if anchors.has_field('stddev'): + raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and " + "should not be specified in the box list.") + box_corners = rel_codes * self._stddev + means + return box_list.BoxList(box_corners) diff --git a/models/research/object_detection/box_coders/mean_stddev_box_coder_test.py b/models/research/object_detection/box_coders/mean_stddev_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d94fff1187d3eb1b53e7ca525741f819cd944cc6 --- /dev/null +++ b/models/research/object_detection/box_coders/mean_stddev_box_coder_test.py @@ -0,0 +1,61 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.mean_stddev_boxcoder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class MeanStddevBoxCoderTest(test_case.TestCase): + + def testGetCorrectRelativeCodesAfterEncoding(self): + boxes = np.array([[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]], np.float32) + anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32) + expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]] + + def graph_fn(boxes, anchors): + anchors = box_list.BoxList(anchors) + boxes = box_list.BoxList(boxes) + coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def testGetCorrectBoxesAfterDecoding(self): + rel_codes = np.array([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]], + np.float32) + expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]] + anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32) + + def graph_fn(rel_codes, anchors): + anchors = box_list.BoxList(anchors) + coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + decoded_boxes = coder.decode(rel_codes, anchors).get() + return decoded_boxes + + decoded_boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(decoded_boxes_out, expected_box_corners, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/box_coders/square_box_coder.py b/models/research/object_detection/box_coders/square_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..859320fd5024c2762dc935aa23ed437a8cff886b --- /dev/null +++ b/models/research/object_detection/box_coders/square_box_coder.py @@ -0,0 +1,126 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Square box coder. + +Square box coder follows the coding schema described below: +l = sqrt(h * w) +la = sqrt(ha * wa) +ty = (y - ya) / la +tx = (x - xa) / la +tl = log(l / la) +where x, y, w, h denote the box's center coordinates, width, and height, +respectively. Similarly, xa, ya, wa, ha denote the anchor's center +coordinates, width and height. tx, ty, tl denote the anchor-encoded +center, and length, respectively. Because the encoded box is a square, only +one length is encoded. + +This has shown to provide performance improvements over the Faster RCNN box +coder when the objects being detected tend to be square (e.g. faces) and when +the input images are not distorted via resizing. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list + +EPSILON = 1e-8 + + +class SquareBoxCoder(box_coder.BoxCoder): + """Encodes a 3-scalar representation of a square box.""" + + def __init__(self, scale_factors=None): + """Constructor for SquareBoxCoder. + + Args: + scale_factors: List of 3 positive scalars to scale ty, tx, and tl. + If set to None, does not perform scaling. For faster RCNN, + the open-source implementation recommends using [10.0, 10.0, 5.0]. + + Raises: + ValueError: If scale_factors is not length 3 or contains values less than + or equal to 0. + """ + if scale_factors: + if len(scale_factors) != 3: + raise ValueError('The argument scale_factors must be a list of length ' + '3.') + if any(scalar <= 0 for scalar in scale_factors): + raise ValueError('The values in scale_factors must all be greater ' + 'than 0.') + self._scale_factors = scale_factors + + @property + def code_size(self): + return 3 + + def _encode(self, boxes, anchors): + """Encodes a box collection with respect to an anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, tl]. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + la = tf.sqrt(ha * wa) + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + l = tf.sqrt(h * w) + # Avoid NaN in division and log below. + la += EPSILON + l += EPSILON + + tx = (xcenter - xcenter_a) / la + ty = (ycenter - ycenter_a) / la + tl = tf.log(l / la) + # Scales location targets for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + tl *= self._scale_factors[2] + return tf.transpose(tf.stack([ty, tx, tl])) + + def _decode(self, rel_codes, anchors): + """Decodes relative codes to boxes. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + la = tf.sqrt(ha * wa) + + ty, tx, tl = tf.unstack(tf.transpose(rel_codes)) + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + tl /= self._scale_factors[2] + l = tf.exp(tl) * la + ycenter = ty * la + ycenter_a + xcenter = tx * la + xcenter_a + ymin = ycenter - l / 2. + xmin = xcenter - l / 2. + ymax = ycenter + l / 2. + xmax = xcenter + l / 2. + return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) diff --git a/models/research/object_detection/box_coders/square_box_coder_test.py b/models/research/object_detection/box_coders/square_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e6bdcb245dc783cca6999a71e6b7bfe8b118cb2f --- /dev/null +++ b/models/research/object_detection/box_coders/square_box_coder_test.py @@ -0,0 +1,114 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.square_box_coder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import square_box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class SquareBoxCoderTest(test_case.TestCase): + + def test_correct_relative_codes_with_default_scale(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-0.790569, -0.263523, -0.293893], + [-0.068041, -0.272166, -0.89588]] + def graph_fn(boxes, anchors): + scale_factors = None + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_correct_relative_codes_with_non_default_scale(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-1.581139, -0.790569, -1.175573], + [-0.136083, -0.816497, -3.583519]] + def graph_fn(boxes, anchors): + scale_factors = [2, 3, 4] + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-03, + atol=1e-03) + + def test_correct_relative_codes_with_small_width(self): + boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32) + expected_rel_codes = [[-1.317616, 0., -20.670586]] + def graph_fn(boxes, anchors): + scale_factors = None + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_correct_boxes_with_default_scale(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array([[-0.5, -0.416666, -0.405465], + [-0.083333, -0.222222, -0.693147]], np.float32) + expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], + [0.155051, 0.102989, 0.522474, 0.470412]] + def graph_fn(rel_codes, anchors): + scale_factors = None + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors).get() + return boxes + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_correct_boxes_with_non_default_scale(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array( + [[-1., -1.25, -1.62186], [-0.166667, -0.666667, -2.772588]], np.float32) + expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], + [0.155051, 0.102989, 0.522474, 0.470412]] + def graph_fn(rel_codes, anchors): + scale_factors = [2, 3, 4] + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors).get() + return boxes + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/__init__.py b/models/research/object_detection/builders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/builders/anchor_generator_builder.py b/models/research/object_detection/builders/anchor_generator_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..7880210a8b1881af71117c1ed60a60f4f4231581 --- /dev/null +++ b/models/research/object_detection/builders/anchor_generator_builder.py @@ -0,0 +1,116 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection anchor generator from config.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import zip +from object_detection.anchor_generators import flexible_grid_anchor_generator +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.anchor_generators import multiple_grid_anchor_generator +from object_detection.anchor_generators import multiscale_grid_anchor_generator +from object_detection.protos import anchor_generator_pb2 + + +def build(anchor_generator_config): + """Builds an anchor generator based on the config. + + Args: + anchor_generator_config: An anchor_generator.proto object containing the + config for the desired anchor generator. + + Returns: + Anchor generator based on the config. + + Raises: + ValueError: On empty anchor generator proto. + """ + if not isinstance(anchor_generator_config, + anchor_generator_pb2.AnchorGenerator): + raise ValueError('anchor_generator_config not of type ' + 'anchor_generator_pb2.AnchorGenerator') + if anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'grid_anchor_generator': + grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator + return grid_anchor_generator.GridAnchorGenerator( + scales=[float(scale) for scale in grid_anchor_generator_config.scales], + aspect_ratios=[float(aspect_ratio) + for aspect_ratio + in grid_anchor_generator_config.aspect_ratios], + base_anchor_size=[grid_anchor_generator_config.height, + grid_anchor_generator_config.width], + anchor_stride=[grid_anchor_generator_config.height_stride, + grid_anchor_generator_config.width_stride], + anchor_offset=[grid_anchor_generator_config.height_offset, + grid_anchor_generator_config.width_offset]) + elif anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'ssd_anchor_generator': + ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator + anchor_strides = None + if ssd_anchor_generator_config.height_stride: + anchor_strides = list( + zip(ssd_anchor_generator_config.height_stride, + ssd_anchor_generator_config.width_stride)) + anchor_offsets = None + if ssd_anchor_generator_config.height_offset: + anchor_offsets = list( + zip(ssd_anchor_generator_config.height_offset, + ssd_anchor_generator_config.width_offset)) + return multiple_grid_anchor_generator.create_ssd_anchors( + num_layers=ssd_anchor_generator_config.num_layers, + min_scale=ssd_anchor_generator_config.min_scale, + max_scale=ssd_anchor_generator_config.max_scale, + scales=[float(scale) for scale in ssd_anchor_generator_config.scales], + aspect_ratios=ssd_anchor_generator_config.aspect_ratios, + interpolated_scale_aspect_ratio=( + ssd_anchor_generator_config.interpolated_scale_aspect_ratio), + base_anchor_size=[ + ssd_anchor_generator_config.base_anchor_height, + ssd_anchor_generator_config.base_anchor_width + ], + anchor_strides=anchor_strides, + anchor_offsets=anchor_offsets, + reduce_boxes_in_lowest_layer=( + ssd_anchor_generator_config.reduce_boxes_in_lowest_layer)) + elif anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'multiscale_anchor_generator': + cfg = anchor_generator_config.multiscale_anchor_generator + return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( + cfg.min_level, + cfg.max_level, + cfg.anchor_scale, + [float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios], + cfg.scales_per_octave, + cfg.normalize_coordinates + ) + elif anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'flexible_grid_anchor_generator': + cfg = anchor_generator_config.flexible_grid_anchor_generator + base_sizes = [] + aspect_ratios = [] + strides = [] + offsets = [] + for anchor_grid in cfg.anchor_grid: + base_sizes.append(tuple(anchor_grid.base_sizes)) + aspect_ratios.append(tuple(anchor_grid.aspect_ratios)) + strides.append((anchor_grid.height_stride, anchor_grid.width_stride)) + offsets.append((anchor_grid.height_offset, anchor_grid.width_offset)) + return flexible_grid_anchor_generator.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, strides, offsets, cfg.normalize_coordinates) + else: + raise ValueError('Empty anchor generator.') diff --git a/models/research/object_detection/builders/anchor_generator_builder_test.py b/models/research/object_detection/builders/anchor_generator_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..35cdfcaee26f349e84257816f06c699f384246b1 --- /dev/null +++ b/models/research/object_detection/builders/anchor_generator_builder_test.py @@ -0,0 +1,339 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generator_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.anchor_generators import flexible_grid_anchor_generator +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.anchor_generators import multiple_grid_anchor_generator +from object_detection.anchor_generators import multiscale_grid_anchor_generator +from object_detection.builders import anchor_generator_builder +from object_detection.protos import anchor_generator_pb2 + + +class AnchorGeneratorBuilderTest(tf.test.TestCase): + + def assert_almost_list_equal(self, expected_list, actual_list, delta=None): + self.assertEqual(len(expected_list), len(actual_list)) + for expected_item, actual_item in zip(expected_list, actual_list): + self.assertAlmostEqual(expected_item, actual_item, delta=delta) + + def test_build_grid_anchor_generator_with_defaults(self): + anchor_generator_text_proto = """ + grid_anchor_generator { + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + grid_anchor_generator.GridAnchorGenerator) + self.assertListEqual(anchor_generator_object._scales, []) + self.assertListEqual(anchor_generator_object._aspect_ratios, []) + self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0]) + self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16]) + self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256]) + + def test_build_grid_anchor_generator_with_non_default_parameters(self): + anchor_generator_text_proto = """ + grid_anchor_generator { + height: 128 + width: 512 + height_stride: 10 + width_stride: 20 + height_offset: 30 + width_offset: 40 + scales: [0.4, 2.2] + aspect_ratios: [0.3, 4.5] + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + grid_anchor_generator.GridAnchorGenerator) + self.assert_almost_list_equal(anchor_generator_object._scales, + [0.4, 2.2]) + self.assert_almost_list_equal(anchor_generator_object._aspect_ratios, + [0.3, 4.5]) + self.assertAllEqual(anchor_generator_object._anchor_offset, [30, 40]) + self.assertAllEqual(anchor_generator_object._anchor_stride, [10, 20]) + self.assertAllEqual(anchor_generator_object._base_anchor_size, [128, 512]) + + def test_build_ssd_anchor_generator_with_defaults(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [1.0] + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.1, 0.2, 0.2), + (0.35, 0.418), + (0.499, 0.570), + (0.649, 0.721), + (0.799, 0.871), + (0.949, 0.974)]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + [(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0]) + + def test_build_ssd_anchor_generator_with_custom_scales(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [1.0] + scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8] + reduce_boxes_in_lowest_layer: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.1, math.sqrt(0.1 * 0.15)), + (0.15, math.sqrt(0.15 * 0.2)), + (0.2, math.sqrt(0.2 * 0.4)), + (0.4, math.sqrt(0.4 * 0.6)), + (0.6, math.sqrt(0.6 * 0.8)), + (0.8, math.sqrt(0.8 * 1.0))]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + + def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [0.5] + interpolated_scale_aspect_ratio: 0.5 + reduce_boxes_in_lowest_layer: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + 6 * [(0.5, 0.5)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + + def test_build_ssd_anchor_generator_without_reduced_boxes(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [1.0] + reduce_boxes_in_lowest_layer: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.2, 0.264), + (0.35, 0.418), + (0.499, 0.570), + (0.649, 0.721), + (0.799, 0.871), + (0.949, 0.974)]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + 6 * [(1.0, 1.0)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + + self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0]) + + def test_build_ssd_anchor_generator_with_non_default_parameters(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + num_layers: 2 + min_scale: 0.3 + max_scale: 0.8 + aspect_ratios: [2.0] + height_stride: 16 + height_stride: 32 + width_stride: 20 + width_stride: 30 + height_offset: 8 + height_offset: 16 + width_offset: 0 + width_offset: 10 + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.1, 0.3, 0.3), (0.8, 0.894)]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + [(1.0, 2.0, 0.5), (2.0, 1.0)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + + for actual_strides, expected_strides in zip( + list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]): + self.assert_almost_list_equal(expected_strides, actual_strides) + + for actual_offsets, expected_offsets in zip( + list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]): + self.assert_almost_list_equal(expected_offsets, actual_offsets) + + self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0]) + + def test_raise_value_error_on_empty_anchor_genertor(self): + anchor_generator_text_proto = """ + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + with self.assertRaises(ValueError): + anchor_generator_builder.build(anchor_generator_proto) + + def test_build_multiscale_anchor_generator_custom_aspect_ratios(self): + anchor_generator_text_proto = """ + multiscale_anchor_generator { + aspect_ratios: [1.0] + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiscale_grid_anchor_generator. + MultiscaleGridAnchorGenerator) + for level, anchor_grid_info in zip( + range(3, 8), anchor_generator_object._anchor_grid_info): + self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info'])) + self.assertTrue(level, anchor_grid_info['level']) + self.assertEqual(len(anchor_grid_info['info']), 4) + self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5]) + self.assertTrue(anchor_grid_info['info'][1], 1.0) + self.assertAllClose(anchor_grid_info['info'][2], + [4.0 * 2**level, 4.0 * 2**level]) + self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level]) + self.assertTrue(anchor_generator_object._normalize_coordinates) + + def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates( + self): + anchor_generator_text_proto = """ + multiscale_anchor_generator { + aspect_ratios: [1.0] + normalize_coordinates: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiscale_grid_anchor_generator. + MultiscaleGridAnchorGenerator) + self.assertFalse(anchor_generator_object._normalize_coordinates) + + def test_build_flexible_anchor_generator(self): + anchor_generator_text_proto = """ + flexible_grid_anchor_generator { + anchor_grid { + base_sizes: [1.5] + aspect_ratios: [1.0] + height_stride: 16 + width_stride: 20 + height_offset: 8 + width_offset: 9 + } + anchor_grid { + base_sizes: [1.0, 2.0] + aspect_ratios: [1.0, 0.5] + height_stride: 32 + width_stride: 30 + height_offset: 10 + width_offset: 11 + } + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + flexible_grid_anchor_generator. + FlexibleGridAnchorGenerator) + + for actual_base_sizes, expected_base_sizes in zip( + list(anchor_generator_object._base_sizes), [(1.5,), (1.0, 2.0)]): + self.assert_almost_list_equal(expected_base_sizes, actual_base_sizes) + + for actual_aspect_ratios, expected_aspect_ratios in zip( + list(anchor_generator_object._aspect_ratios), [(1.0,), (1.0, 0.5)]): + self.assert_almost_list_equal(expected_aspect_ratios, + actual_aspect_ratios) + + for actual_strides, expected_strides in zip( + list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]): + self.assert_almost_list_equal(expected_strides, actual_strides) + + for actual_offsets, expected_offsets in zip( + list(anchor_generator_object._anchor_offsets), [(8, 9), (10, 11)]): + self.assert_almost_list_equal(expected_offsets, actual_offsets) + + self.assertTrue(anchor_generator_object._normalize_coordinates) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/box_coder_builder.py b/models/research/object_detection/builders/box_coder_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..cc13d5a2f01c5a1f66e83abc5bb5ada542047d83 --- /dev/null +++ b/models/research/object_detection/builders/box_coder_builder.py @@ -0,0 +1,66 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection box coder from configuration.""" +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.box_coders import keypoint_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.box_coders import square_box_coder +from object_detection.protos import box_coder_pb2 + + +def build(box_coder_config): + """Builds a box coder object based on the box coder config. + + Args: + box_coder_config: A box_coder.proto object containing the config for the + desired box coder. + + Returns: + BoxCoder based on the config. + + Raises: + ValueError: On empty box coder proto. + """ + if not isinstance(box_coder_config, box_coder_pb2.BoxCoder): + raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.') + + if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder': + return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[ + box_coder_config.faster_rcnn_box_coder.y_scale, + box_coder_config.faster_rcnn_box_coder.x_scale, + box_coder_config.faster_rcnn_box_coder.height_scale, + box_coder_config.faster_rcnn_box_coder.width_scale + ]) + if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder': + return keypoint_box_coder.KeypointBoxCoder( + box_coder_config.keypoint_box_coder.num_keypoints, + scale_factors=[ + box_coder_config.keypoint_box_coder.y_scale, + box_coder_config.keypoint_box_coder.x_scale, + box_coder_config.keypoint_box_coder.height_scale, + box_coder_config.keypoint_box_coder.width_scale + ]) + if (box_coder_config.WhichOneof('box_coder_oneof') == + 'mean_stddev_box_coder'): + return mean_stddev_box_coder.MeanStddevBoxCoder( + stddev=box_coder_config.mean_stddev_box_coder.stddev) + if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder': + return square_box_coder.SquareBoxCoder(scale_factors=[ + box_coder_config.square_box_coder.y_scale, + box_coder_config.square_box_coder.x_scale, + box_coder_config.square_box_coder.length_scale + ]) + raise ValueError('Empty box coder.') diff --git a/models/research/object_detection/builders/box_coder_builder_test.py b/models/research/object_detection/builders/box_coder_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5db9947cb643a7ba90e66d431dc3b80b3b82e00c --- /dev/null +++ b/models/research/object_detection/builders/box_coder_builder_test.py @@ -0,0 +1,136 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for box_coder_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.box_coders import keypoint_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.box_coders import square_box_coder +from object_detection.builders import box_coder_builder +from object_detection.protos import box_coder_pb2 + + +class BoxCoderBuilderTest(tf.test.TestCase): + + def test_build_faster_rcnn_box_coder_with_defaults(self): + box_coder_text_proto = """ + faster_rcnn_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, + faster_rcnn_box_coder.FasterRcnnBoxCoder) + self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) + + def test_build_faster_rcnn_box_coder_with_non_default_parameters(self): + box_coder_text_proto = """ + faster_rcnn_box_coder { + y_scale: 6.0 + x_scale: 3.0 + height_scale: 7.0 + width_scale: 8.0 + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, + faster_rcnn_box_coder.FasterRcnnBoxCoder) + self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) + + def test_build_keypoint_box_coder_with_defaults(self): + box_coder_text_proto = """ + keypoint_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) + self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) + + def test_build_keypoint_box_coder_with_non_default_parameters(self): + box_coder_text_proto = """ + keypoint_box_coder { + num_keypoints: 6 + y_scale: 6.0 + x_scale: 3.0 + height_scale: 7.0 + width_scale: 8.0 + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) + self.assertEqual(box_coder_object._num_keypoints, 6) + self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) + + def test_build_mean_stddev_box_coder(self): + box_coder_text_proto = """ + mean_stddev_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertTrue( + isinstance(box_coder_object, + mean_stddev_box_coder.MeanStddevBoxCoder)) + + def test_build_square_box_coder_with_defaults(self): + box_coder_text_proto = """ + square_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertTrue( + isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) + self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0]) + + def test_build_square_box_coder_with_non_default_parameters(self): + box_coder_text_proto = """ + square_box_coder { + y_scale: 6.0 + x_scale: 3.0 + length_scale: 7.0 + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertTrue( + isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) + self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0]) + + def test_raise_error_on_empty_box_coder(self): + box_coder_text_proto = """ + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + with self.assertRaises(ValueError): + box_coder_builder.build(box_coder_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/box_predictor_builder.py b/models/research/object_detection/builders/box_predictor_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..029649d8d9dd68877adac6bb971d5fd024f62246 --- /dev/null +++ b/models/research/object_detection/builders/box_predictor_builder.py @@ -0,0 +1,975 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Function to build box predictor from configuration.""" + +import collections +import tensorflow.compat.v1 as tf +from object_detection.predictors import convolutional_box_predictor +from object_detection.predictors import convolutional_keras_box_predictor +from object_detection.predictors import mask_rcnn_box_predictor +from object_detection.predictors import mask_rcnn_keras_box_predictor +from object_detection.predictors import rfcn_box_predictor +from object_detection.predictors import rfcn_keras_box_predictor +from object_detection.predictors.heads import box_head +from object_detection.predictors.heads import class_head +from object_detection.predictors.heads import keras_box_head +from object_detection.predictors.heads import keras_class_head +from object_detection.predictors.heads import keras_mask_head +from object_detection.predictors.heads import mask_head +from object_detection.protos import box_predictor_pb2 + + +def build_convolutional_box_predictor(is_training, + num_classes, + conv_hyperparams_fn, + min_depth, + max_depth, + num_layers_before_predictor, + use_dropout, + dropout_keep_prob, + kernel_size, + box_code_size, + apply_sigmoid_to_scores=False, + add_background_class=True, + class_prediction_bias_init=0.0, + use_depthwise=False, + box_encodings_clip_range=None): + """Builds the ConvolutionalBoxPredictor from the arguments. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + box_code_size: Size of encoding for each box. + apply_sigmoid_to_scores: If True, apply the sigmoid on the output + class_predictions. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: Constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + + Returns: + A ConvolutionalBoxPredictor class. + """ + box_prediction_head = box_head.ConvolutionalBoxHead( + is_training=is_training, + box_code_size=box_code_size, + kernel_size=kernel_size, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + class_prediction_head = class_head.ConvolutionalClassHead( + is_training=is_training, + num_class_slots=num_classes + 1 if add_background_class else num_classes, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + kernel_size=kernel_size, + apply_sigmoid_to_scores=apply_sigmoid_to_scores, + class_prediction_bias_init=class_prediction_bias_init, + use_depthwise=use_depthwise) + other_heads = {} + return convolutional_box_predictor.ConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams_fn=conv_hyperparams_fn, + num_layers_before_predictor=num_layers_before_predictor, + min_depth=min_depth, + max_depth=max_depth) + + +def build_convolutional_keras_box_predictor(is_training, + num_classes, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + num_predictions_per_location_list, + min_depth, + max_depth, + num_layers_before_predictor, + use_dropout, + dropout_keep_prob, + kernel_size, + box_code_size, + add_background_class=True, + class_prediction_bias_init=0.0, + use_depthwise=False, + box_encodings_clip_range=None, + name='BoxPredictor'): + """Builds the Keras ConvolutionalBoxPredictor from the arguments. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + box_code_size: Size of encoding for each box. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + name: A string name scope to assign to the box predictor. If `None`, Keras + will auto-generate one from the class name. + + Returns: + A Keras ConvolutionalBoxPredictor class. + """ + box_prediction_heads = [] + class_prediction_heads = [] + other_heads = {} + + for stack_index, num_predictions_per_location in enumerate( + num_predictions_per_location_list): + box_prediction_heads.append( + keras_box_head.ConvolutionalBoxHead( + is_training=is_training, + box_code_size=box_code_size, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + num_predictions_per_location=num_predictions_per_location, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range, + name='ConvolutionalBoxHead_%d' % stack_index)) + class_prediction_heads.append( + keras_class_head.ConvolutionalClassHead( + is_training=is_training, + num_class_slots=( + num_classes + 1 if add_background_class else num_classes), + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + num_predictions_per_location=num_predictions_per_location, + class_prediction_bias_init=class_prediction_bias_init, + use_depthwise=use_depthwise, + name='ConvolutionalClassHead_%d' % stack_index)) + + return convolutional_keras_box_predictor.ConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_heads=box_prediction_heads, + class_prediction_heads=class_prediction_heads, + other_heads=other_heads, + conv_hyperparams=conv_hyperparams, + num_layers_before_predictor=num_layers_before_predictor, + min_depth=min_depth, + max_depth=max_depth, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + name=name) + + +def build_weight_shared_convolutional_box_predictor( + is_training, + num_classes, + conv_hyperparams_fn, + depth, + num_layers_before_predictor, + box_code_size, + kernel_size=3, + add_background_class=True, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + share_prediction_tower=False, + apply_batch_norm=True, + use_depthwise=False, + score_converter_fn=tf.identity, + box_encodings_clip_range=None, + keyword_args=None): + """Builds and returns a WeightSharedConvolutionalBoxPredictor class. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + share_prediction_tower: Whether to share the multi-layer tower between box + prediction and class prediction heads. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. + score_converter_fn: Callable score converter to perform elementwise op on + class scores. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + keyword_args: A dictionary with additional args. + + Returns: + A WeightSharedConvolutionalBoxPredictor class. + """ + box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( + box_code_size=box_code_size, + kernel_size=kernel_size, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + class_prediction_head = ( + class_head.WeightSharedConvolutionalClassHead( + num_class_slots=( + num_classes + 1 if add_background_class else num_classes), + kernel_size=kernel_size, + class_prediction_bias_init=class_prediction_bias_init, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + use_depthwise=use_depthwise, + score_converter_fn=score_converter_fn)) + other_heads = {} + return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams_fn=conv_hyperparams_fn, + depth=depth, + num_layers_before_predictor=num_layers_before_predictor, + kernel_size=kernel_size, + apply_batch_norm=apply_batch_norm, + share_prediction_tower=share_prediction_tower, + use_depthwise=use_depthwise) + + +def build_weight_shared_convolutional_keras_box_predictor( + is_training, + num_classes, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + num_predictions_per_location_list, + depth, + num_layers_before_predictor, + box_code_size, + kernel_size=3, + add_background_class=True, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + share_prediction_tower=False, + apply_batch_norm=True, + use_depthwise=False, + score_converter_fn=tf.identity, + box_encodings_clip_range=None, + name='WeightSharedConvolutionalBoxPredictor', + keyword_args=None): + """Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + share_prediction_tower: Whether to share the multi-layer tower between box + prediction and class prediction heads. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. + score_converter_fn: Callable score converter to perform elementwise op on + class scores. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + name: A string name scope to assign to the box predictor. If `None`, Keras + will auto-generate one from the class name. + keyword_args: A dictionary with additional args. + + Returns: + A Keras WeightSharedConvolutionalBoxPredictor class. + """ + if len(set(num_predictions_per_location_list)) > 1: + raise ValueError('num predictions per location must be same for all' + 'feature maps, found: {}'.format( + num_predictions_per_location_list)) + num_predictions_per_location = num_predictions_per_location_list[0] + + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=box_code_size, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=num_predictions_per_location, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range, + name='WeightSharedConvolutionalBoxHead') + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=( + num_classes + 1 if add_background_class else num_classes), + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=num_predictions_per_location, + class_prediction_bias_init=class_prediction_bias_init, + use_depthwise=use_depthwise, + score_converter_fn=score_converter_fn, + name='WeightSharedConvolutionalClassHead') + other_heads = {} + + return ( + convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams=conv_hyperparams, + depth=depth, + num_layers_before_predictor=num_layers_before_predictor, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + kernel_size=kernel_size, + apply_batch_norm=apply_batch_norm, + share_prediction_tower=share_prediction_tower, + use_depthwise=use_depthwise, + name=name)) + + + + +def build_mask_rcnn_keras_box_predictor(is_training, + num_classes, + fc_hyperparams, + freeze_batchnorm, + use_dropout, + dropout_keep_prob, + box_code_size, + add_background_class=True, + share_box_across_classes=False, + predict_instance_masks=False, + conv_hyperparams=None, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample_masks=False): + """Builds and returns a MaskRCNNKerasBoxPredictor class. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for fully connected dense ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + add_background_class: Whether to add an implicit background class. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + predict_instance_masks: If True, will add a third stage mask prediction + to the returned class. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample_masks: Whether to apply convolutions on mask + features before upsampling using nearest neighbor resizing. Otherwise, + mask features are resized to [`mask_height`, `mask_width`] using + bilinear resizing before applying convolutions. + + Returns: + A MaskRCNNKerasBoxPredictor class. + """ + box_prediction_head = keras_box_head.MaskRCNNBoxHead( + is_training=is_training, + num_classes=num_classes, + fc_hyperparams=fc_hyperparams, + freeze_batchnorm=freeze_batchnorm, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + box_code_size=box_code_size, + share_box_across_classes=share_box_across_classes) + class_prediction_head = keras_class_head.MaskRCNNClassHead( + is_training=is_training, + num_class_slots=num_classes + 1 if add_background_class else num_classes, + fc_hyperparams=fc_hyperparams, + freeze_batchnorm=freeze_batchnorm, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob) + third_stage_heads = {} + if predict_instance_masks: + third_stage_heads[ + mask_rcnn_box_predictor. + MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + mask_height=mask_height, + mask_width=mask_width, + mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, + mask_prediction_conv_depth=mask_prediction_conv_depth, + masks_are_class_agnostic=masks_are_class_agnostic, + convolve_then_upsample=convolve_then_upsample_masks) + return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor( + is_training=is_training, + num_classes=num_classes, + freeze_batchnorm=freeze_batchnorm, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + third_stage_heads=third_stage_heads) + + +def build_mask_rcnn_box_predictor(is_training, + num_classes, + fc_hyperparams_fn, + use_dropout, + dropout_keep_prob, + box_code_size, + add_background_class=True, + share_box_across_classes=False, + predict_instance_masks=False, + conv_hyperparams_fn=None, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample_masks=False): + """Builds and returns a MaskRCNNBoxPredictor class. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for fully connected ops. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + add_background_class: Whether to add an implicit background class. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + predict_instance_masks: If True, will add a third stage mask prediction + to the returned class. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample_masks: Whether to apply convolutions on mask + features before upsampling using nearest neighbor resizing. Otherwise, + mask features are resized to [`mask_height`, `mask_width`] using + bilinear resizing before applying convolutions. + + Returns: + A MaskRCNNBoxPredictor class. + """ + box_prediction_head = box_head.MaskRCNNBoxHead( + is_training=is_training, + num_classes=num_classes, + fc_hyperparams_fn=fc_hyperparams_fn, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + box_code_size=box_code_size, + share_box_across_classes=share_box_across_classes) + class_prediction_head = class_head.MaskRCNNClassHead( + is_training=is_training, + num_class_slots=num_classes + 1 if add_background_class else num_classes, + fc_hyperparams_fn=fc_hyperparams_fn, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob) + third_stage_heads = {} + if predict_instance_masks: + third_stage_heads[ + mask_rcnn_box_predictor. + MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead( + num_classes=num_classes, + conv_hyperparams_fn=conv_hyperparams_fn, + mask_height=mask_height, + mask_width=mask_width, + mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, + mask_prediction_conv_depth=mask_prediction_conv_depth, + masks_are_class_agnostic=masks_are_class_agnostic, + convolve_then_upsample=convolve_then_upsample_masks) + return mask_rcnn_box_predictor.MaskRCNNBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + third_stage_heads=third_stage_heads) + + +def build_score_converter(score_converter_config, is_training): + """Builds score converter based on the config. + + Builds one of [tf.identity, tf.sigmoid] score converters based on the config + and whether the BoxPredictor is for training or inference. + + Args: + score_converter_config: + box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter. + is_training: Indicates whether the BoxPredictor is in training mode. + + Returns: + Callable score converter op. + + Raises: + ValueError: On unknown score converter. + """ + if score_converter_config == ( + box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY): + return tf.identity + if score_converter_config == ( + box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID): + return tf.identity if is_training else tf.sigmoid + raise ValueError('Unknown score converter.') + + +BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange', + ['min', 'max']) + + +def build(argscope_fn, box_predictor_config, is_training, num_classes, + add_background_class=True): + """Builds box predictor based on the configuration. + + Builds box predictor based on the configuration. See box_predictor.proto for + configurable options. Also, see box_predictor.py for more details. + + Args: + argscope_fn: A function that takes the following inputs: + * hyperparams_pb2.Hyperparams proto + * a boolean indicating if the model is in training mode. + and returns a tf slim argscope for Conv and FC hyperparameters. + box_predictor_config: box_predictor_pb2.BoxPredictor proto containing + configuration. + is_training: Whether the models is in training mode. + num_classes: Number of classes to predict. + add_background_class: Whether to add an implicit background class. + + Returns: + box_predictor: box_predictor.BoxPredictor object. + + Raises: + ValueError: On unknown box predictor. + """ + if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): + raise ValueError('box_predictor_config not of type ' + 'box_predictor_pb2.BoxPredictor.') + + box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') + + if box_predictor_oneof == 'convolutional_box_predictor': + config_box_predictor = box_predictor_config.convolutional_box_predictor + conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, + is_training) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + return build_convolutional_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + conv_hyperparams_fn=conv_hyperparams_fn, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + min_depth=config_box_predictor.min_depth, + max_depth=config_box_predictor.max_depth, + apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_depthwise=config_box_predictor.use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + + if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': + config_box_predictor = ( + box_predictor_config.weight_shared_convolutional_box_predictor) + conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, + is_training) + apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( + 'batch_norm') + # During training phase, logits are used to compute the loss. Only apply + # sigmoid at inference to make the inference graph TPU friendly. + score_converter_fn = build_score_converter( + config_box_predictor.score_converter, is_training) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + keyword_args = None + + return build_weight_shared_convolutional_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + conv_hyperparams_fn=conv_hyperparams_fn, + depth=config_box_predictor.depth, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + share_prediction_tower=config_box_predictor.share_prediction_tower, + apply_batch_norm=apply_batch_norm, + use_depthwise=config_box_predictor.use_depthwise, + score_converter_fn=score_converter_fn, + box_encodings_clip_range=box_encodings_clip_range, + keyword_args=keyword_args) + + + if box_predictor_oneof == 'mask_rcnn_box_predictor': + config_box_predictor = box_predictor_config.mask_rcnn_box_predictor + fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams, + is_training) + conv_hyperparams_fn = None + if config_box_predictor.HasField('conv_hyperparams'): + conv_hyperparams_fn = argscope_fn( + config_box_predictor.conv_hyperparams, is_training) + return build_mask_rcnn_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + fc_hyperparams_fn=fc_hyperparams_fn, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + share_box_across_classes=( + config_box_predictor.share_box_across_classes), + predict_instance_masks=config_box_predictor.predict_instance_masks, + conv_hyperparams_fn=conv_hyperparams_fn, + mask_height=config_box_predictor.mask_height, + mask_width=config_box_predictor.mask_width, + mask_prediction_num_conv_layers=( + config_box_predictor.mask_prediction_num_conv_layers), + mask_prediction_conv_depth=( + config_box_predictor.mask_prediction_conv_depth), + masks_are_class_agnostic=( + config_box_predictor.masks_are_class_agnostic), + convolve_then_upsample_masks=( + config_box_predictor.convolve_then_upsample_masks)) + + if box_predictor_oneof == 'rfcn_box_predictor': + config_box_predictor = box_predictor_config.rfcn_box_predictor + conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, + is_training) + box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams_fn=conv_hyperparams_fn, + crop_size=[config_box_predictor.crop_height, + config_box_predictor.crop_width], + num_spatial_bins=[config_box_predictor.num_spatial_bins_height, + config_box_predictor.num_spatial_bins_width], + depth=config_box_predictor.depth, + box_code_size=config_box_predictor.box_code_size) + return box_predictor_object + raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof)) + + +def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update, + num_predictions_per_location_list, box_predictor_config, + is_training, num_classes, add_background_class=True): + """Builds a Keras-based box predictor based on the configuration. + + Builds Keras-based box predictor based on the configuration. + See box_predictor.proto for configurable options. Also, see box_predictor.py + for more details. + + Args: + hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams + proto and returns a `hyperparams_builder.KerasLayerHyperparams` + for Conv or FC hyperparameters. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + box_predictor_config: box_predictor_pb2.BoxPredictor proto containing + configuration. + is_training: Whether the models is in training mode. + num_classes: Number of classes to predict. + add_background_class: Whether to add an implicit background class. + + Returns: + box_predictor: box_predictor.KerasBoxPredictor object. + + Raises: + ValueError: On unknown box predictor, or one with no Keras box predictor. + """ + if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): + raise ValueError('box_predictor_config not of type ' + 'box_predictor_pb2.BoxPredictor.') + + box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') + + if box_predictor_oneof == 'convolutional_box_predictor': + config_box_predictor = box_predictor_config.convolutional_box_predictor + conv_hyperparams = hyperparams_fn( + config_box_predictor.conv_hyperparams) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + + return build_convolutional_keras_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + num_predictions_per_location_list=num_predictions_per_location_list, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + min_depth=config_box_predictor.min_depth, + max_depth=config_box_predictor.max_depth, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_depthwise=config_box_predictor.use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + + if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': + config_box_predictor = ( + box_predictor_config.weight_shared_convolutional_box_predictor) + conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams) + apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( + 'batch_norm') + # During training phase, logits are used to compute the loss. Only apply + # sigmoid at inference to make the inference graph TPU friendly. This is + # required because during TPU inference, model.postprocess is not called. + score_converter_fn = build_score_converter( + config_box_predictor.score_converter, is_training) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + keyword_args = None + + return build_weight_shared_convolutional_keras_box_predictor( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + num_predictions_per_location_list=num_predictions_per_location_list, + depth=config_box_predictor.depth, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + add_background_class=add_background_class, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + share_prediction_tower=config_box_predictor.share_prediction_tower, + apply_batch_norm=apply_batch_norm, + use_depthwise=config_box_predictor.use_depthwise, + score_converter_fn=score_converter_fn, + box_encodings_clip_range=box_encodings_clip_range, + keyword_args=keyword_args) + + if box_predictor_oneof == 'mask_rcnn_box_predictor': + config_box_predictor = box_predictor_config.mask_rcnn_box_predictor + fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams) + conv_hyperparams = None + if config_box_predictor.HasField('conv_hyperparams'): + conv_hyperparams = hyperparams_fn( + config_box_predictor.conv_hyperparams) + return build_mask_rcnn_keras_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + fc_hyperparams=fc_hyperparams, + freeze_batchnorm=freeze_batchnorm, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + share_box_across_classes=( + config_box_predictor.share_box_across_classes), + predict_instance_masks=config_box_predictor.predict_instance_masks, + conv_hyperparams=conv_hyperparams, + mask_height=config_box_predictor.mask_height, + mask_width=config_box_predictor.mask_width, + mask_prediction_num_conv_layers=( + config_box_predictor.mask_prediction_num_conv_layers), + mask_prediction_conv_depth=( + config_box_predictor.mask_prediction_conv_depth), + masks_are_class_agnostic=( + config_box_predictor.masks_are_class_agnostic), + convolve_then_upsample_masks=( + config_box_predictor.convolve_then_upsample_masks)) + + if box_predictor_oneof == 'rfcn_box_predictor': + config_box_predictor = box_predictor_config.rfcn_box_predictor + conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams) + box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + crop_size=[config_box_predictor.crop_height, + config_box_predictor.crop_width], + num_spatial_bins=[config_box_predictor.num_spatial_bins_height, + config_box_predictor.num_spatial_bins_width], + depth=config_box_predictor.depth, + box_code_size=config_box_predictor.box_code_size) + return box_predictor_object + + raise ValueError( + 'Unknown box predictor for Keras: {}'.format(box_predictor_oneof)) diff --git a/models/research/object_detection/builders/box_predictor_builder_test.py b/models/research/object_detection/builders/box_predictor_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7154cd2efc06e2c4581e654d718e3519152bc6bb --- /dev/null +++ b/models/research/object_detection/builders/box_predictor_builder_test.py @@ -0,0 +1,668 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for box_predictor_builder.""" + +import unittest +import mock +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import mask_rcnn_box_predictor +from object_detection.protos import box_predictor_pb2 +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_calls_conv_argscope_fn(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn + self.assertAlmostEqual((hyperparams_proto.regularizer. + l1_regularizer.weight), + (conv_hyperparams_actual.regularizer.l1_regularizer. + weight)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.stddev), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.stddev)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.mean), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.mean)) + self.assertEqual(hyperparams_proto.activation, + conv_hyperparams_actual.activation) + self.assertFalse(is_training) + + def test_construct_non_default_conv_box_predictor(self): + box_predictor_text_proto = """ + convolutional_box_predictor { + min_depth: 2 + max_depth: 16 + num_layers_before_predictor: 2 + use_dropout: false + dropout_keep_probability: 0.4 + kernel_size: 3 + box_code_size: 3 + apply_sigmoid_to_scores: true + class_prediction_bias_init: 4.0 + use_depthwise: true + } + """ + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10, + add_background_class=False) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._min_depth, 2) + self.assertEqual(box_predictor._max_depth, 16) + self.assertEqual(box_predictor._num_layers_before_predictor, 2) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4) + self.assertTrue(class_head._apply_sigmoid_to_scores) + self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) + self.assertEqual(class_head._num_class_slots, 10) + self.assertEqual(box_predictor.num_classes, 10) + self.assertFalse(box_predictor._is_training) + self.assertTrue(class_head._use_depthwise) + + def test_construct_default_conv_box_predictor(self): + box_predictor_text_proto = """ + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + }""" + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=hyperparams_builder.build, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._min_depth, 0) + self.assertEqual(box_predictor._max_depth, 0) + self.assertEqual(box_predictor._num_layers_before_predictor, 0) + self.assertTrue(class_head._use_dropout) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8) + self.assertFalse(class_head._apply_sigmoid_to_scores) + self.assertEqual(class_head._num_class_slots, 91) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertFalse(class_head._use_depthwise) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_calls_conv_argscope_fn(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + (box_predictor_proto.weight_shared_convolutional_box_predictor + .conv_hyperparams.CopyFrom(hyperparams_proto)) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn + self.assertAlmostEqual((hyperparams_proto.regularizer. + l1_regularizer.weight), + (conv_hyperparams_actual.regularizer.l1_regularizer. + weight)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.stddev), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.stddev)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.mean), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.mean)) + self.assertEqual(hyperparams_proto.activation, + conv_hyperparams_actual.activation) + self.assertFalse(is_training) + + def test_construct_non_default_conv_box_predictor(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + depth: 2 + num_layers_before_predictor: 2 + kernel_size: 7 + box_code_size: 3 + class_prediction_bias_init: 4.0 + } + """ + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + (box_predictor_proto.weight_shared_convolutional_box_predictor. + conv_hyperparams.CopyFrom(hyperparams_proto)) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10, + add_background_class=False) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._depth, 2) + self.assertEqual(box_predictor._num_layers_before_predictor, 2) + self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) + self.assertEqual(box_predictor.num_classes, 10) + self.assertFalse(box_predictor._is_training) + self.assertEqual(box_predictor._apply_batch_norm, False) + + def test_construct_non_default_depthwise_conv_box_predictor(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + depth: 2 + num_layers_before_predictor: 2 + kernel_size: 7 + box_code_size: 3 + class_prediction_bias_init: 4.0 + use_depthwise: true + } + """ + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + (box_predictor_proto.weight_shared_convolutional_box_predictor. + conv_hyperparams.CopyFrom(hyperparams_proto)) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10, + add_background_class=False) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._depth, 2) + self.assertEqual(box_predictor._num_layers_before_predictor, 2) + self.assertEqual(box_predictor._apply_batch_norm, False) + self.assertEqual(box_predictor._use_depthwise, True) + self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) + self.assertEqual(box_predictor.num_classes, 10) + self.assertFalse(box_predictor._is_training) + + def test_construct_default_conv_box_predictor(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + conv_hyperparams { + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + }""" + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=hyperparams_builder.build, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor._depth, 0) + self.assertEqual(box_predictor._num_layers_before_predictor, 0) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._apply_batch_norm, False) + + def test_construct_default_conv_box_predictor_with_batch_norm(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + conv_hyperparams { + regularizer { + l1_regularizer { + } + } + batch_norm { + train: true + } + initializer { + truncated_normal_initializer { + } + } + } + }""" + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=hyperparams_builder.build, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor._depth, 0) + self.assertEqual(box_predictor._num_layers_before_predictor, 0) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._apply_batch_norm, True) + + + + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_builder_calls_fc_argscope_fn(self): + fc_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + op: FC + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto) + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom( + hyperparams_proto) + mock_argscope_fn = mock.Mock(return_value='arg_scope') + box_predictor = box_predictor_builder.build( + argscope_fn=mock_argscope_fn, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + mock_argscope_fn.assert_called_with(hyperparams_proto, False) + self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn, + 'arg_scope') + self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn, + 'arg_scope') + + def test_non_default_mask_rcnn_box_predictor(self): + fc_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + op: FC + """ + box_predictor_text_proto = """ + mask_rcnn_box_predictor { + use_dropout: true + dropout_keep_probability: 0.8 + box_code_size: 3 + share_box_across_classes: true + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto) + def mock_fc_argscope_builder(fc_hyperparams_arg, is_training): + return (fc_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_fc_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + self.assertTrue(box_head._use_dropout) + self.assertTrue(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 3) + self.assertEqual(box_head._share_box_across_classes, True) + + def test_build_default_mask_rcnn_box_predictor(self): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( + hyperparams_pb2.Hyperparams.FC) + box_predictor = box_predictor_builder.build( + argscope_fn=mock.Mock(return_value='arg_scope'), + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + self.assertFalse(box_head._use_dropout) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 4) + self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0) + + def test_build_box_predictor_with_mask_branch(self): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( + hyperparams_pb2.Hyperparams.FC) + box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = ( + hyperparams_pb2.Hyperparams.CONV) + box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True + box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512 + box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16 + box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16 + mock_argscope_fn = mock.Mock(return_value='arg_scope') + box_predictor = box_predictor_builder.build( + argscope_fn=mock_argscope_fn, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + mock_argscope_fn.assert_has_calls( + [mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams, + True), + mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams, + True)], any_order=True) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + third_stage_heads = box_predictor._third_stage_heads + self.assertFalse(box_head._use_dropout) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 4) + self.assertIn( + mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads) + self.assertEqual( + third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] + ._mask_prediction_conv_depth, 512) + + def test_build_box_predictor_with_convlve_then_upsample_masks(self): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( + hyperparams_pb2.Hyperparams.FC) + box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = ( + hyperparams_pb2.Hyperparams.CONV) + box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True + box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512 + box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24 + box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24 + box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = ( + True) + + mock_argscope_fn = mock.Mock(return_value='arg_scope') + box_predictor = box_predictor_builder.build( + argscope_fn=mock_argscope_fn, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + mock_argscope_fn.assert_has_calls( + [mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams, + True), + mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams, + True)], any_order=True) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + third_stage_heads = box_predictor._third_stage_heads + self.assertFalse(box_head._use_dropout) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 4) + self.assertIn( + mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads) + self.assertEqual( + third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] + ._mask_prediction_conv_depth, 512) + self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] + ._convolve_then_upsample) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class RfcnBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_calls_fc_argscope_fn(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn + self.assertAlmostEqual((hyperparams_proto.regularizer. + l1_regularizer.weight), + (conv_hyperparams_actual.regularizer.l1_regularizer. + weight)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.stddev), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.stddev)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.mean), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.mean)) + self.assertEqual(hyperparams_proto.activation, + conv_hyperparams_actual.activation) + self.assertFalse(is_training) + + def test_non_default_rfcn_box_predictor(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + box_predictor_text_proto = """ + rfcn_box_predictor { + num_spatial_bins_height: 4 + num_spatial_bins_width: 4 + depth: 4 + box_code_size: 3 + crop_height: 16 + crop_width: 16 + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._box_code_size, 3) + self.assertEqual(box_predictor._num_spatial_bins, [4, 4]) + self.assertEqual(box_predictor._crop_size, [16, 16]) + + def test_default_rfcn_box_predictor(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._box_code_size, 4) + self.assertEqual(box_predictor._num_spatial_bins, [3, 3]) + self.assertEqual(box_predictor._crop_size, [12, 12]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/calibration_builder.py b/models/research/object_detection/builders/calibration_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..4adc170d3f1a203abc47dd74017c656ba967e74b --- /dev/null +++ b/models/research/object_detection/builders/calibration_builder.py @@ -0,0 +1,250 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tensorflow ops to calibrate class predictions and background class.""" + +import tensorflow.compat.v1 as tf +from object_detection.utils import shape_utils + + +def _find_interval_containing_new_value(x, new_value): + """Find the index of x (ascending-ordered) after which new_value occurs.""" + new_value_shape = shape_utils.combined_static_and_dynamic_shape(new_value)[0] + x_shape = shape_utils.combined_static_and_dynamic_shape(x)[0] + compare = tf.cast(tf.reshape(new_value, shape=(new_value_shape, 1)) >= + tf.reshape(x, shape=(1, x_shape)), + dtype=tf.int32) + diff = compare[:, 1:] - compare[:, :-1] + interval_idx = tf.argmin(diff, axis=1) + return interval_idx + + +def _tf_linear_interp1d(x_to_interpolate, fn_x, fn_y): + """Tensorflow implementation of 1d linear interpolation. + + Args: + x_to_interpolate: tf.float32 Tensor of shape (num_examples,) over which 1d + linear interpolation is performed. + fn_x: Monotonically-increasing, non-repeating tf.float32 Tensor of shape + (length,) used as the domain to approximate a function. + fn_y: tf.float32 Tensor of shape (length,) used as the range to approximate + a function. + + Returns: + tf.float32 Tensor of shape (num_examples,) + """ + x_pad = tf.concat([fn_x[:1] - 1, fn_x, fn_x[-1:] + 1], axis=0) + y_pad = tf.concat([fn_y[:1], fn_y, fn_y[-1:]], axis=0) + interval_idx = _find_interval_containing_new_value(x_pad, x_to_interpolate) + + # Interpolate + alpha = ( + (x_to_interpolate - tf.gather(x_pad, interval_idx)) / + (tf.gather(x_pad, interval_idx + 1) - tf.gather(x_pad, interval_idx))) + interpolation = ((1 - alpha) * tf.gather(y_pad, interval_idx) + + alpha * tf.gather(y_pad, interval_idx + 1)) + + return interpolation + + +def _function_approximation_proto_to_tf_tensors(x_y_pairs_message): + """Extracts (x,y) pairs from a XYPairs message. + + Args: + x_y_pairs_message: calibration_pb2..XYPairs proto + Returns: + tf_x: tf.float32 tensor of shape (number_xy_pairs,) for function domain. + tf_y: tf.float32 tensor of shape (number_xy_pairs,) for function range. + """ + tf_x = tf.convert_to_tensor([x_y_pair.x + for x_y_pair + in x_y_pairs_message.x_y_pair], + dtype=tf.float32) + tf_y = tf.convert_to_tensor([x_y_pair.y + for x_y_pair + in x_y_pairs_message.x_y_pair], + dtype=tf.float32) + return tf_x, tf_y + + +def _get_class_id_function_dict(calibration_config): + """Create a dictionary mapping class id to function approximations. + + Args: + calibration_config: calibration_pb2 proto containing + id_function_approximations. + Returns: + Dictionary mapping a class id to a tuple of TF tensors to be used for + function approximation. + """ + class_id_function_dict = {} + class_id_xy_pairs_map = ( + calibration_config.class_id_function_approximations.class_id_xy_pairs_map) + for class_id in class_id_xy_pairs_map: + class_id_function_dict[class_id] = ( + _function_approximation_proto_to_tf_tensors( + class_id_xy_pairs_map[class_id])) + + return class_id_function_dict + + +def build(calibration_config): + """Returns a function that calibrates Tensorflow model scores. + + All returned functions are expected to apply positive monotonic + transformations to inputs (i.e. score ordering is strictly preserved or + adjacent scores are mapped to the same score, but an input of lower value + should never be exceed an input of higher value after transformation). For + class-agnostic calibration, positive monotonicity should hold across all + scores. In class-specific cases, positive monotonicity should hold within each + class. + + Args: + calibration_config: calibration_pb2.CalibrationConfig proto. + Returns: + Function that that accepts class_predictions_with_background and calibrates + the output based on calibration_config's parameters. + Raises: + ValueError: No calibration builder defined for "Oneof" in + calibration_config. + """ + + # Linear Interpolation (usually used as a result of calibration via + # isotonic regression). + if calibration_config.WhichOneof('calibrator') == 'function_approximation': + + def calibration_fn(class_predictions_with_background): + """Calibrate predictions via 1-d linear interpolation. + + Predictions scores are linearly interpolated based on a class-agnostic + function approximation. Note that the 0-indexed background class is also + transformed. + + Args: + class_predictions_with_background: tf.float32 tensor of shape + [batch_size, num_anchors, num_classes + 1] containing scores on the + interval [0,1]. This is usually produced by a sigmoid or softmax layer + and the result of calling the `predict` method of a detection model. + + Returns: + tf.float32 tensor of the same shape as the input with values on the + interval [0, 1]. + """ + # Flattening Tensors and then reshaping at the end. + flat_class_predictions_with_background = tf.reshape( + class_predictions_with_background, shape=[-1]) + fn_x, fn_y = _function_approximation_proto_to_tf_tensors( + calibration_config.function_approximation.x_y_pairs) + updated_scores = _tf_linear_interp1d( + flat_class_predictions_with_background, fn_x, fn_y) + + # Un-flatten the scores + original_detections_shape = shape_utils.combined_static_and_dynamic_shape( + class_predictions_with_background) + calibrated_class_predictions_with_background = tf.reshape( + updated_scores, + shape=original_detections_shape, + name='calibrate_scores') + return calibrated_class_predictions_with_background + + elif (calibration_config.WhichOneof('calibrator') == + 'class_id_function_approximations'): + + def calibration_fn(class_predictions_with_background): + """Calibrate predictions per class via 1-d linear interpolation. + + Prediction scores are linearly interpolated with class-specific function + approximations. Note that after calibration, an anchor's class scores will + not necessarily sum to 1, and score ordering may change, depending on each + class' calibration parameters. + + Args: + class_predictions_with_background: tf.float32 tensor of shape + [batch_size, num_anchors, num_classes + 1] containing scores on the + interval [0,1]. This is usually produced by a sigmoid or softmax layer + and the result of calling the `predict` method of a detection model. + + Returns: + tf.float32 tensor of the same shape as the input with values on the + interval [0, 1]. + + Raises: + KeyError: Calibration parameters are not present for a class. + """ + class_id_function_dict = _get_class_id_function_dict(calibration_config) + + # Tensors are split by class and then recombined at the end to recover + # the input's original shape. If a class id does not have calibration + # parameters, it is left unchanged. + class_tensors = tf.unstack(class_predictions_with_background, axis=-1) + calibrated_class_tensors = [] + for class_id, class_tensor in enumerate(class_tensors): + flat_class_tensor = tf.reshape(class_tensor, shape=[-1]) + if class_id in class_id_function_dict: + output_tensor = _tf_linear_interp1d( + x_to_interpolate=flat_class_tensor, + fn_x=class_id_function_dict[class_id][0], + fn_y=class_id_function_dict[class_id][1]) + else: + tf.logging.info( + 'Calibration parameters for class id `%d` not not found', + class_id) + output_tensor = flat_class_tensor + calibrated_class_tensors.append(output_tensor) + + combined_calibrated_tensor = tf.stack(calibrated_class_tensors, axis=1) + input_shape = shape_utils.combined_static_and_dynamic_shape( + class_predictions_with_background) + calibrated_class_predictions_with_background = tf.reshape( + combined_calibrated_tensor, + shape=input_shape, + name='calibrate_scores') + return calibrated_class_predictions_with_background + + elif (calibration_config.WhichOneof('calibrator') == + 'temperature_scaling_calibration'): + + def calibration_fn(class_predictions_with_background): + """Calibrate predictions via temperature scaling. + + Predictions logits scores are scaled by the temperature scaler. Note that + the 0-indexed background class is also transformed. + + Args: + class_predictions_with_background: tf.float32 tensor of shape + [batch_size, num_anchors, num_classes + 1] containing logits scores. + This is usually produced before a sigmoid or softmax layer. + + Returns: + tf.float32 tensor of the same shape as the input. + + Raises: + ValueError: If temperature scaler is of incorrect value. + """ + scaler = calibration_config.temperature_scaling_calibration.scaler + if scaler <= 0: + raise ValueError('The scaler in temperature scaling must be positive.') + calibrated_class_predictions_with_background = tf.math.divide( + class_predictions_with_background, + scaler, + name='calibrate_score') + return calibrated_class_predictions_with_background + + # TODO(zbeaver): Add sigmoid calibration. + else: + raise ValueError('No calibration builder defined for "Oneof" in ' + 'calibration_config.') + + return calibration_fn diff --git a/models/research/object_detection/builders/calibration_builder_test.py b/models/research/object_detection/builders/calibration_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a81d53a86e65bc400fe38cac8c96867aa1489607 --- /dev/null +++ b/models/research/object_detection/builders/calibration_builder_test.py @@ -0,0 +1,233 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for calibration_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from scipy import interpolate +from six.moves import zip +import tensorflow.compat.v1 as tf +from object_detection.builders import calibration_builder +from object_detection.protos import calibration_pb2 +from object_detection.utils import test_case + + +class CalibrationBuilderTest(test_case.TestCase): + + def test_tf_linear_interp1d_map(self): + """Tests TF linear interpolation mapping to a single number.""" + def graph_fn(): + tf_x = tf.constant([0., 0.5, 1.]) + tf_y = tf.constant([0.5, 0.5, 0.5]) + new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.]) + tf_map_outputs = calibration_builder._tf_linear_interp1d( + new_x, tf_x, tf_y) + return tf_map_outputs + tf_map_outputs_np = self.execute(graph_fn, []) + self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5]) + + def test_tf_linear_interp1d_interpolate(self): + """Tests TF 1d linear interpolation not mapping to a single number.""" + def graph_fn(): + tf_x = tf.constant([0., 0.5, 1.]) + tf_y = tf.constant([0.6, 0.7, 1.0]) + new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.]) + tf_interpolate_outputs = calibration_builder._tf_linear_interp1d( + new_x, tf_x, tf_y) + return tf_interpolate_outputs + tf_interpolate_outputs_np = self.execute(graph_fn, []) + self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.]) + + @staticmethod + def _get_scipy_interp1d(new_x, x, y): + """Helper performing 1d linear interpolation using SciPy.""" + interpolation1d_fn = interpolate.interp1d(x, y) + return interpolation1d_fn(new_x) + + def _get_tf_interp1d(self, new_x, x, y): + """Helper performing 1d linear interpolation using Tensorflow.""" + def graph_fn(): + tf_interp_outputs = calibration_builder._tf_linear_interp1d( + tf.convert_to_tensor(new_x, dtype=tf.float32), + tf.convert_to_tensor(x, dtype=tf.float32), + tf.convert_to_tensor(y, dtype=tf.float32)) + return tf_interp_outputs + np_tf_interp_outputs = self.execute(graph_fn, []) + return np_tf_interp_outputs + + def test_tf_linear_interp1d_against_scipy_map(self): + """Tests parity of TF linear interpolation with SciPy for simple mapping.""" + length = 10 + np_x = np.linspace(0, 1, length) + + # Mapping all numbers to 0.5 + np_y_map = np.repeat(0.5, length) + + # Scipy and TF interpolations + test_data_np = np.linspace(0, 1, length * 10) + scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map) + np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map) + self.assertAllClose(scipy_map_outputs, np_tf_map_outputs) + + def test_tf_linear_interp1d_against_scipy_interpolate(self): + """Tests parity of TF linear interpolation with SciPy.""" + length = 10 + np_x = np.linspace(0, 1, length) + + # Requires interpolation over 0.5 to 1 domain + np_y_interp = np.linspace(0.5, 1, length) + + # Scipy interpolation for comparison + test_data_np = np.linspace(0, 1, length * 10) + scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x, + np_y_interp) + np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x, + np_y_interp) + self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs) + + @staticmethod + def _add_function_approximation_to_calibration_proto(calibration_proto, + x_array, y_array, + class_id): + """Adds a function approximation to calibration proto for a class id.""" + # Per-class calibration. + if class_id is not None: + function_approximation = ( + calibration_proto.class_id_function_approximations + .class_id_xy_pairs_map[class_id]) + # Class-agnostic calibration. + else: + function_approximation = ( + calibration_proto.function_approximation.x_y_pairs) + + for x, y in zip(x_array, y_array): + x_y_pair_message = function_approximation.x_y_pair.add() + x_y_pair_message.x = x + x_y_pair_message.y = y + + def test_class_agnostic_function_approximation(self): + """Tests that calibration produces correct class-agnostic values.""" + # Generate fake calibration proto. For this interpolation, any input on + # [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have + # 0.25 subtracted from it. + class_agnostic_x = np.asarray([0.0, 0.5, 1.0]) + class_agnostic_y = np.asarray([0.0, 0.25, 0.75]) + calibration_config = calibration_pb2.CalibrationConfig() + self._add_function_approximation_to_calibration_proto( + calibration_config, class_agnostic_x, class_agnostic_y, class_id=None) + + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2, 0.3], + [0.4, 0.5, 0.0]], + [[0.6, 0.7, 0.8], + [0.9, 1.0, 1.0]]], dtype=tf.float32) + + # Everything should map to 0.5 if classes are ignored. + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15], + [0.2, 0.25, 0.0]], + [[0.35, 0.45, 0.55], + [0.65, 0.75, 0.75]]]) + + def test_multiclass_function_approximations(self): + """Tests that calibration produces correct multiclass values.""" + # Background class (0-index) maps all predictions to 0.5. + class_0_x = np.asarray([0.0, 0.5, 1.0]) + class_0_y = np.asarray([0.5, 0.5, 0.5]) + calibration_config = calibration_pb2.CalibrationConfig() + self._add_function_approximation_to_calibration_proto( + calibration_config, class_0_x, class_0_y, class_id=0) + + # Class id 1 will interpolate using these values. + class_1_x = np.asarray([0.0, 0.2, 1.0]) + class_1_y = np.asarray([0.0, 0.6, 1.0]) + self._add_function_approximation_to_calibration_proto( + calibration_config, class_1_x, class_1_y, class_id=1) + + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2], [0.9, 0.1]], + [[0.6, 0.4], [0.08, 0.92]]], + dtype=tf.float32) + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]], + [[0.5, 0.7], [0.5, 0.96]]]) + + def test_temperature_scaling(self): + """Tests that calibration produces correct temperature scaling values.""" + calibration_config = calibration_pb2.CalibrationConfig() + calibration_config.temperature_scaling_calibration.scaler = 2.0 + + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]], + [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]], + dtype=tf.float32) + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, + [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]], + [[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]]) + + def test_temperature_scaling_incorrect_value_error(self): + calibration_config = calibration_pb2.CalibrationConfig() + calibration_config.temperature_scaling_calibration.scaler = 0 + + calibration_fn = calibration_builder.build(calibration_config) + class_predictions_with_background = tf.constant( + [[[0.1, 0.2, 0.3]]], dtype=tf.float32) + with self.assertRaises(ValueError): + calibration_fn(class_predictions_with_background) + + def test_skips_class_when_calibration_parameters_not_present(self): + """Tests that graph fails when parameters not present for all classes.""" + # Only adding calibration parameters for class id = 0, even though class id + # 1 is present in the data. + class_0_x = np.asarray([0.0, 0.5, 1.0]) + class_0_y = np.asarray([0.5, 0.5, 0.5]) + calibration_config = calibration_pb2.CalibrationConfig() + self._add_function_approximation_to_calibration_proto( + calibration_config, class_0_x, class_0_y, class_id=0) + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2], [0.9, 0.1]], + [[0.6, 0.4], [0.08, 0.92]]], + dtype=tf.float32) + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]], + [[0.5, 0.4], [0.5, 0.92]]]) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/dataset_builder.py b/models/research/object_detection/builders/dataset_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..c1c1ce3ecd17c2625585cd83f080b49c0150151a --- /dev/null +++ b/models/research/object_detection/builders/dataset_builder.py @@ -0,0 +1,202 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""tf.data.Dataset builder. + +Creates data sources for DetectionModels from an InputReader config. See +input_reader.proto for options. + +Note: If users wishes to also use their own InputReaders with the Object +Detection configuration framework, they should define their own builder function +that wraps the build function. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import tensorflow.compat.v1 as tf + +from object_detection.builders import decoder_builder +from object_detection.protos import input_reader_pb2 + + +def make_initializable_iterator(dataset): + """Creates an iterator, and initializes tables. + + This is useful in cases where make_one_shot_iterator wouldn't work because + the graph contains a hash table that needs to be initialized. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.data.Iterator`. + """ + iterator = dataset.make_initializable_iterator() + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator + + +def read_dataset(file_read_func, input_files, config, + filename_shard_fn=None): + """Reads a dataset, and handles repetition and shuffling. + + Args: + file_read_func: Function to use in tf_data.parallel_interleave, to + read every individual file into a tf.data.Dataset. + input_files: A list of file paths to read. + config: A input_reader_builder.InputReader object. + filename_shard_fn: optional, A funciton used to shard filenames across + replicas. This function takes as input a TF dataset of filenames and + is expected to return its sharded version. It is useful when the + dataset is being loaded on one of possibly many replicas and we want + to evenly shard the files between the replicas. + + Returns: + A tf.data.Dataset of (undecoded) tf-records based on config. + + Raises: + RuntimeError: If no files are found at the supplied path(s). + """ + # Shard, shuffle, and read files. + filenames = tf.gfile.Glob(input_files) + if not filenames: + raise RuntimeError('Did not find any input files matching the glob pattern ' + '{}'.format(input_files)) + num_readers = config.num_readers + if num_readers > len(filenames): + num_readers = len(filenames) + tf.logging.warning('num_readers has been reduced to %d to match input file ' + 'shards.' % num_readers) + filename_dataset = tf.data.Dataset.from_tensor_slices(filenames) + if config.shuffle: + filename_dataset = filename_dataset.shuffle( + config.filenames_shuffle_buffer_size) + elif num_readers > 1: + tf.logging.warning('`shuffle` is false, but the input data stream is ' + 'still slightly shuffled since `num_readers` > 1.') + if filename_shard_fn: + filename_dataset = filename_shard_fn(filename_dataset) + + filename_dataset = filename_dataset.repeat(config.num_epochs or None) + records_dataset = filename_dataset.apply( + tf.data.experimental.parallel_interleave( + file_read_func, + cycle_length=num_readers, + block_length=config.read_block_length, + sloppy=config.shuffle)) + if config.shuffle: + records_dataset = records_dataset.shuffle(config.shuffle_buffer_size) + return records_dataset + + +def shard_function_for_context(input_context): + """Returns a function that shards filenames based on the input context.""" + + if input_context is None: + return None + + def shard_fn(dataset): + return dataset.shard( + input_context.num_input_pipelines, input_context.input_pipeline_id) + + return shard_fn + + +def build(input_reader_config, batch_size=None, transform_input_data_fn=None, + input_context=None, reduce_to_frame_fn=None): + """Builds a tf.data.Dataset. + + Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all + records. Applies a padded batch to the resulting dataset. + + Args: + input_reader_config: A input_reader_pb2.InputReader object. + batch_size: Batch size. If batch size is None, no batching is performed. + transform_input_data_fn: Function to apply transformation to all records, + or None if no extra decoding is required. + input_context: optional, A tf.distribute.InputContext object used to + shard filenames and compute per-replica batch_size when this function + is being called per-replica. + reduce_to_frame_fn: Function that extracts frames from tf.SequenceExample + type input data. + + Returns: + A tf.data.Dataset based on the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + ValueError: If no input paths are specified. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + decoder = decoder_builder.build(input_reader_config) + + if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': + config = input_reader_config.tf_record_input_reader + if not config.input_path: + raise ValueError('At least one input path must be specified in ' + '`input_reader_config`.') + def dataset_map_fn(dataset, fn_to_map, batch_size=None, + input_reader_config=None): + """Handles whether or not to use the legacy map function. + + Args: + dataset: A tf.Dataset. + fn_to_map: The function to be mapped for that dataset. + batch_size: Batch size. If batch size is None, no batching is performed. + input_reader_config: A input_reader_pb2.InputReader object. + + Returns: + A tf.data.Dataset mapped with fn_to_map. + """ + if hasattr(dataset, 'map_with_legacy_function'): + if batch_size: + num_parallel_calls = batch_size * ( + input_reader_config.num_parallel_batches) + else: + num_parallel_calls = input_reader_config.num_parallel_map_calls + dataset = dataset.map_with_legacy_function( + fn_to_map, num_parallel_calls=num_parallel_calls) + else: + dataset = dataset.map(fn_to_map, tf.data.experimental.AUTOTUNE) + return dataset + shard_fn = shard_function_for_context(input_context) + if input_context is not None: + batch_size = input_context.get_per_replica_batch_size(batch_size) + dataset = read_dataset( + functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000), + config.input_path[:], input_reader_config, filename_shard_fn=shard_fn) + if input_reader_config.sample_1_of_n_examples > 1: + dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0) + # TODO(rathodv): make batch size a required argument once the old binaries + # are deleted. + dataset = dataset_map_fn(dataset, decoder.decode, batch_size, + input_reader_config) + if reduce_to_frame_fn: + dataset = reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, + input_reader_config) + if transform_input_data_fn is not None: + dataset = dataset_map_fn(dataset, transform_input_data_fn, + batch_size, input_reader_config) + if batch_size: + dataset = dataset.batch(batch_size, drop_remainder=True) + dataset = dataset.prefetch(input_reader_config.num_prefetch_batches) + return dataset + + raise ValueError('Unsupported input_reader_config.') diff --git a/models/research/object_detection/builders/dataset_builder_test.py b/models/research/object_detection/builders/dataset_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3de113e92a1784462d32459197b98c51f88562 --- /dev/null +++ b/models/research/object_detection/builders/dataset_builder_test.py @@ -0,0 +1,653 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for dataset_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import dataset_builder +from object_detection.core import standard_fields as fields +from object_detection.dataset_tools import seq_example_util +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import test_case + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import lookup as contrib_lookup +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +def get_iterator_next_for_testing(dataset, is_tf2): + iterator = dataset.make_initializable_iterator() + if not is_tf2: + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator.get_next() + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) + return os.path.join(parent_path, 'data', + 'pet_label_map.pbtxt') + + +class DatasetBuilderTest(test_case.TestCase): + + def create_tf_record(self, has_additional_channels=False, num_shards=1, + num_examples_per_shard=1): + + def dummy_jpeg_fn(): + image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + additional_channels_tensor = np.random.randint( + 255, size=(4, 5, 1)).astype(np.uint8) + encoded_jpeg = tf.image.encode_jpeg(image_tensor) + encoded_additional_channels_jpeg = tf.image.encode_jpeg( + additional_channels_tensor) + + return encoded_jpeg, encoded_additional_channels_jpeg + + encoded_jpeg, encoded_additional_channels_jpeg = self.execute( + dummy_jpeg_fn, []) + + tmp_dir = self.get_temp_dir() + flat_mask = (4 * 5) * [1.0] + + for i in range(num_shards): + path = os.path.join(tmp_dir, '%05d.tfrecord' % i) + writer = tf.python_io.TFRecordWriter(path) + + for j in range(num_examples_per_shard): + if num_shards > 1: + source_id = (str(i) + '_' + str(j)).encode() + else: + source_id = str(j).encode() + + features = { + 'image/source_id': dataset_util.bytes_feature(source_id), + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': dataset_util.int64_feature(4), + 'image/width': dataset_util.int64_feature(5), + 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), + 'image/object/class/label': dataset_util.int64_list_feature([2]), + 'image/object/mask': dataset_util.float_list_feature(flat_mask), + } + + if has_additional_channels: + additional_channels_key = 'image/additional_channels/encoded' + features[additional_channels_key] = dataset_util.bytes_list_feature( + [encoded_additional_channels_jpeg] * 2) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(example.SerializeToString()) + + writer.close() + + return os.path.join(self.get_temp_dir(), '?????.tfrecord') + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + def graph_fn(): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + return encoded_images_list + + encoded_images = self.execute(graph_fn, []) + return encoded_images + + def create_tf_record_sequence_example(self): + path = os.path.join(self.get_temp_dir(), 'seq_tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + num_frames = 4 + image_height = 4 + image_width = 5 + image_source_ids = [str(i) for i in range(num_frames)] + with self.test_session(): + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_source_ids=image_source_ids, + image_format='JPEG', + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[]], # Frame 0. + [[0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], + [0.1, 0.1, 0.2, 0.2]], # Frame 2. + [[]], # Frame 3. + ], + label_strings=[ + [], # Frame 0. + ['Abyssinian'], # Frame 1. + ['Abyssinian', 'american_bulldog'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + writer.write(sequence_example_serialized) + writer.close() + return path + + def test_build_tf_record_input_reader(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1), + self.is_tf2()) + + output_dict = self.execute(graph_fn, []) + + self.assertNotIn( + fields.InputDataFields.groundtruth_instance_masks, output_dict) + self.assertEqual((1, 4, 5, 3), + output_dict[fields.InputDataFields.image].shape) + self.assertAllEqual([[2]], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) + + def get_mock_reduce_to_frame_fn(self): + def mock_reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, config): + def get_frame(tensor_dict): + out_tensor_dict = {} + out_tensor_dict[fields.InputDataFields.source_id] = ( + tensor_dict[fields.InputDataFields.source_id][0]) + return out_tensor_dict + return dataset_map_fn(dataset, get_frame, batch_size, config) + return mock_reduce_to_frame_fn + + def test_build_tf_record_input_reader_sequence_example_train(self): + tf_record_path = self.create_tf_record_sequence_example() + label_map_path = _get_labelmap_path() + input_type = 'TF_SEQUENCE_EXAMPLE' + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + input_type: {1} + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path, input_type) + input_reader_proto = input_reader_pb2.InputReader() + input_reader_proto.label_map_path = label_map_path + text_format.Merge(input_reader_text_proto, input_reader_proto) + reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn() + + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1, + reduce_to_frame_fn=reduce_to_frame_fn), + self.is_tf2()) + + output_dict = self.execute(graph_fn, []) + + self.assertEqual((1,), + output_dict[fields.InputDataFields.source_id].shape) + + def test_build_tf_record_input_reader_sequence_example_test(self): + tf_record_path = self.create_tf_record_sequence_example() + input_type = 'TF_SEQUENCE_EXAMPLE' + label_map_path = _get_labelmap_path() + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + input_type: {1} + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path, input_type) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + input_reader_proto.label_map_path = label_map_path + reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn() + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1, + reduce_to_frame_fn=reduce_to_frame_fn), + self.is_tf2()) + + output_dict = self.execute(graph_fn, []) + + self.assertEqual((1,), + output_dict[fields.InputDataFields.source_id].shape) + + def test_build_tf_record_input_reader_and_load_instance_masks(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1), + self.is_tf2() + ) + + output_dict = self.execute(graph_fn, []) + self.assertAllEqual( + (1, 1, 4, 5), + output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) + + def test_build_tf_record_input_reader_with_batch_size_two(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def one_hot_class_encoding_fn(tensor_dict): + tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( + tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3) + return tensor_dict + + def graph_fn(): + return dataset_builder.make_initializable_iterator( + dataset_builder.build( + input_reader_proto, + transform_input_data_fn=one_hot_class_encoding_fn, + batch_size=2)).get_next() + + output_dict = self.execute(graph_fn, []) + + self.assertAllEqual([2, 4, 5, 3], + output_dict[fields.InputDataFields.image].shape) + self.assertAllEqual( + [2, 1, 3], + output_dict[fields.InputDataFields.groundtruth_classes].shape) + self.assertAllEqual( + [2, 1, 4], output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual([[[0.0, 0.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0]]], + output_dict[fields.InputDataFields.groundtruth_boxes]) + + def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def one_hot_class_encoding_fn(tensor_dict): + tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( + tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3) + return tensor_dict + + def graph_fn(): + return dataset_builder.make_initializable_iterator( + dataset_builder.build( + input_reader_proto, + transform_input_data_fn=one_hot_class_encoding_fn, + batch_size=2)).get_next() + + output_dict = self.execute(graph_fn, []) + + self.assertAllEqual( + [2, 1, 4, 5], + output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) + + def test_raises_error_with_no_input_paths(self): + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + with self.assertRaises(ValueError): + dataset_builder.build(input_reader_proto, batch_size=1) + + def test_sample_all_data(self): + tf_record_path = self.create_tf_record(num_examples_per_shard=2) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + sample_1_of_n_examples: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + dataset = dataset_builder.build(input_reader_proto, batch_size=1) + sample1_ds = dataset.take(1) + sample2_ds = dataset.skip(1) + iter1 = dataset_builder.make_initializable_iterator(sample1_ds) + iter2 = dataset_builder.make_initializable_iterator(sample2_ds) + + return iter1.get_next(), iter2.get_next() + + output_dict1, output_dict2 = self.execute(graph_fn, []) + self.assertAllEqual(['0'], output_dict1[fields.InputDataFields.source_id]) + self.assertEqual([b'1'], output_dict2[fields.InputDataFields.source_id]) + + def test_sample_one_of_n_shards(self): + tf_record_path = self.create_tf_record(num_examples_per_shard=4) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + sample_1_of_n_examples: 2 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + dataset = dataset_builder.build(input_reader_proto, batch_size=1) + sample1_ds = dataset.take(1) + sample2_ds = dataset.skip(1) + iter1 = dataset_builder.make_initializable_iterator(sample1_ds) + iter2 = dataset_builder.make_initializable_iterator(sample2_ds) + + return iter1.get_next(), iter2.get_next() + + output_dict1, output_dict2 = self.execute(graph_fn, []) + self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id]) + self.assertEqual([b'2'], output_dict2[fields.InputDataFields.source_id]) + + def test_no_input_context(self): + """Test that all samples are read with no input context given.""" + tf_record_path = self.create_tf_record(num_examples_per_shard=16, + num_shards=2) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + num_epochs: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + for i in range(4): + + # pylint:disable=cell-var-from-loop + def graph_fn(): + dataset = dataset_builder.build(input_reader_proto, batch_size=8) + dataset = dataset.skip(i) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + batch = self.execute(graph_fn, []) + self.assertEqual(batch['image'].shape, (8, 4, 5, 3)) + + def graph_fn_last_batch(): + dataset = dataset_builder.build(input_reader_proto, batch_size=8) + dataset = dataset.skip(4) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + self.assertRaises(tf.errors.OutOfRangeError, self.execute, + compute_fn=graph_fn_last_batch, inputs=[]) + + def test_with_input_context(self): + """Test that a subset is read with input context given.""" + tf_record_path = self.create_tf_record(num_examples_per_shard=16, + num_shards=2) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + num_epochs: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + input_context = tf.distribute.InputContext( + num_input_pipelines=2, input_pipeline_id=0, num_replicas_in_sync=4 + ) + + for i in range(8): + + # pylint:disable=cell-var-from-loop + def graph_fn(): + + dataset = dataset_builder.build(input_reader_proto, batch_size=8, + input_context=input_context) + dataset = dataset.skip(i) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + batch = self.execute(graph_fn, []) + self.assertEqual(batch['image'].shape, (2, 4, 5, 3)) + + def graph_fn_last_batch(): + dataset = dataset_builder.build(input_reader_proto, batch_size=8, + input_context=input_context) + dataset = dataset.skip(8) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + self.assertRaises(tf.errors.OutOfRangeError, self.execute, + compute_fn=graph_fn_last_batch, inputs=[]) + + +class ReadDatasetTest(test_case.TestCase): + + def setUp(self): + self._path_template = os.path.join(self.get_temp_dir(), 'examples_%s.txt') + + for i in range(5): + path = self._path_template % i + with tf.gfile.Open(path, 'wb') as f: + f.write('\n'.join([str(i + 1), str((i + 1) * 10)])) + + self._shuffle_path_template = os.path.join(self.get_temp_dir(), + 'shuffle_%s.txt') + for i in range(2): + path = self._shuffle_path_template % i + with tf.gfile.Open(path, 'wb') as f: + f.write('\n'.join([str(i)] * 5)) + + super(ReadDatasetTest, self).setUp() + + def _get_dataset_next(self, files, config, batch_size, num_batches_skip=0): + + def decode_func(value): + return [tf.string_to_number(value, out_type=tf.int32)] + + dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, + config) + dataset = dataset.map(decode_func) + dataset = dataset.batch(batch_size) + + if num_batches_skip > 0: + dataset = dataset.skip(num_batches_skip) + + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + def test_make_initializable_iterator_with_hashTable(self): + + def graph_fn(): + keys = [1, 0, -1] + dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]]) + try: + # Dynamically try to load the tf v2 lookup, falling back to contrib + lookup = tf.compat.v2.lookup + hash_table_class = tf.compat.v2.lookup.StaticHashTable + except AttributeError: + lookup = contrib_lookup + hash_table_class = contrib_lookup.HashTable + table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=keys, values=list(reversed(keys))), + default_value=100) + dataset = dataset.map(table.lookup) + return dataset_builder.make_initializable_iterator(dataset).get_next() + + result = self.execute(graph_fn, []) + self.assertAllEqual(result, [-1, 100, 1, 100]) + + def test_read_dataset(self): + config = input_reader_pb2.InputReader() + config.num_readers = 1 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '*'], config, batch_size=20) + + data = self.execute(graph_fn, []) + # Note that the execute function extracts single outputs if the return + # value is of size 1. + self.assertCountEqual( + data, [ + 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5, + 50 + ]) + + def test_reduce_num_reader(self): + config = input_reader_pb2.InputReader() + config.num_readers = 10 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '*'], config, batch_size=20) + + data = self.execute(graph_fn, []) + # Note that the execute function extracts single outputs if the return + # value is of size 1. + self.assertCountEqual( + data, [ + 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5, + 50 + ]) + + def test_enable_shuffle(self): + config = input_reader_pb2.InputReader() + config.num_readers = 1 + config.shuffle = True + + tf.set_random_seed(1) # Set graph level seed. + + def graph_fn(): + return self._get_dataset_next( + [self._shuffle_path_template % '*'], config, batch_size=10) + expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] + data = self.execute(graph_fn, []) + + self.assertTrue( + np.any(np.not_equal(data, expected_non_shuffle_output))) + + def test_disable_shuffle_(self): + config = input_reader_pb2.InputReader() + config.num_readers = 1 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._shuffle_path_template % '*'], config, batch_size=10) + expected_non_shuffle_output1 = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] + expected_non_shuffle_output2 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0] + + # Note that the execute function extracts single outputs if the return + # value is of size 1. + data = self.execute(graph_fn, []) + self.assertTrue(all(data == expected_non_shuffle_output1) or + all(data == expected_non_shuffle_output2)) + + def test_read_dataset_single_epoch(self): + config = input_reader_pb2.InputReader() + config.num_epochs = 1 + config.num_readers = 1 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '0'], config, batch_size=30) + + data = self.execute(graph_fn, []) + + # Note that the execute function extracts single outputs if the return + # value is of size 1. + self.assertAllEqual(data, [1, 10]) + + # First batch will retrieve as much as it can, second batch will fail. + def graph_fn_second_batch(): + return self._get_dataset_next( + [self._path_template % '0'], config, batch_size=30, + num_batches_skip=1) + + self.assertRaises(tf.errors.OutOfRangeError, self.execute, + compute_fn=graph_fn_second_batch, inputs=[]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/decoder_builder.py b/models/research/object_detection/builders/decoder_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..59880735cd3fd6be3d4e9c567af615227d0a1fb1 --- /dev/null +++ b/models/research/object_detection/builders/decoder_builder.py @@ -0,0 +1,70 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DataDecoder builder. + +Creates DataDecoders from InputReader configs. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from object_detection.data_decoders import tf_example_decoder +from object_detection.data_decoders import tf_sequence_example_decoder +from object_detection.protos import input_reader_pb2 + + +def build(input_reader_config): + """Builds a DataDecoder based only on the open source config proto. + + Args: + input_reader_config: An input_reader_pb2.InputReader object. + + Returns: + A DataDecoder based on the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': + label_map_proto_file = None + if input_reader_config.HasField('label_map_path'): + label_map_proto_file = input_reader_config.label_map_path + input_type = input_reader_config.input_type + if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'): + decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=input_reader_config.load_instance_masks, + load_multiclass_scores=input_reader_config.load_multiclass_scores, + load_context_features=input_reader_config.load_context_features, + instance_mask_type=input_reader_config.mask_type, + label_map_proto_file=label_map_proto_file, + use_display_name=input_reader_config.use_display_name, + num_additional_channels=input_reader_config.num_additional_channels, + num_keypoints=input_reader_config.num_keypoints, + expand_hierarchy_labels=input_reader_config.expand_labels_hierarchy) + return decoder + elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'): + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file, + load_context_features=input_reader_config.load_context_features) + return decoder + raise ValueError('Unsupported input_type in config.') + + raise ValueError('Unsupported input_reader_config.') diff --git a/models/research/object_detection/builders/decoder_builder_test.py b/models/research/object_detection/builders/decoder_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d45285fd19f7648ab4d9365b155ba35a2ce0d3ed --- /dev/null +++ b/models/research/object_detection/builders/decoder_builder_test.py @@ -0,0 +1,193 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for decoder_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import decoder_builder +from object_detection.core import standard_fields as fields +from object_detection.dataset_tools import seq_example_util +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import test_case + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) + return os.path.join(parent_path, 'data', + 'pet_label_map.pbtxt') + + +class DecoderBuilderTest(test_case.TestCase): + + def _make_serialized_tf_example(self, has_additional_channels=False): + image_tensor_np = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + additional_channels_tensor_np = np.random.randint( + 255, size=(4, 5, 1)).astype(np.uint8) + flat_mask = (4 * 5) * [1.0] + def graph_fn(image_tensor): + encoded_jpeg = tf.image.encode_jpeg(image_tensor) + return encoded_jpeg + encoded_jpeg = self.execute_cpu(graph_fn, [image_tensor_np]) + encoded_additional_channels_jpeg = self.execute_cpu( + graph_fn, [additional_channels_tensor_np]) + + features = { + 'image/source_id': dataset_util.bytes_feature('0'.encode()), + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': dataset_util.int64_feature(4), + 'image/width': dataset_util.int64_feature(5), + 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), + 'image/object/class/label': dataset_util.int64_list_feature([2]), + 'image/object/mask': dataset_util.float_list_feature(flat_mask), + } + if has_additional_channels: + additional_channels_key = 'image/additional_channels/encoded' + features[additional_channels_key] = dataset_util.bytes_list_feature( + [encoded_additional_channels_jpeg] * 2) + example = tf.train.Example(features=tf.train.Features(feature=features)) + return example.SerializeToString() + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + def graph_fn(): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images = [tf.io.encode_jpeg(image) for image in images_list] + return encoded_images + return self.execute_cpu(graph_fn, []) + + def _make_serialized_tf_sequence_example(self): + num_frames = 4 + image_height = 20 + image_width = 30 + image_source_ids = [str(i) for i in range(num_frames)] + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_source_ids=image_source_ids, + image_format='JPEG', + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[]], # Frame 0. + [[0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], + [0.1, 0.1, 0.2, 0.2]], # Frame 2. + [[]], # Frame 3. + ], + label_strings=[ + [], # Frame 0. + ['Abyssinian'], # Frame 1. + ['Abyssinian', 'american_bulldog'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + return sequence_example_serialized + + def test_build_tf_record_input_reader(self): + input_reader_text_proto = 'tf_record_input_reader {}' + input_reader_proto = input_reader_pb2.InputReader() + text_format.Parse(input_reader_text_proto, input_reader_proto) + + decoder = decoder_builder.build(input_reader_proto) + serialized_seq_example = self._make_serialized_tf_example() + def graph_fn(): + tensor_dict = decoder.decode(serialized_seq_example) + return (tensor_dict[fields.InputDataFields.image], + tensor_dict[fields.InputDataFields.groundtruth_classes], + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + (image, groundtruth_classes, + groundtruth_boxes) = self.execute_cpu(graph_fn, []) + self.assertEqual((4, 5, 3), image.shape) + self.assertAllEqual([2], groundtruth_classes) + self.assertEqual((1, 4), groundtruth_boxes.shape) + self.assertAllEqual([0.0, 0.0, 1.0, 1.0], groundtruth_boxes[0]) + + def test_build_tf_record_input_reader_sequence_example(self): + label_map_path = _get_labelmap_path() + input_reader_text_proto = """ + input_type: TF_SEQUENCE_EXAMPLE + tf_record_input_reader {} + """ + input_reader_proto = input_reader_pb2.InputReader() + input_reader_proto.label_map_path = label_map_path + text_format.Parse(input_reader_text_proto, input_reader_proto) + + serialized_seq_example = self._make_serialized_tf_sequence_example() + def graph_fn(): + decoder = decoder_builder.build(input_reader_proto) + tensor_dict = decoder.decode(serialized_seq_example) + return (tensor_dict[fields.InputDataFields.image], + tensor_dict[fields.InputDataFields.groundtruth_classes], + tensor_dict[fields.InputDataFields.groundtruth_boxes], + tensor_dict[fields.InputDataFields.num_groundtruth_boxes]) + (actual_image, actual_groundtruth_classes, actual_groundtruth_boxes, + actual_num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) + expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]] + expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] + expected_num_groundtruth_boxes = [0, 1, 2, 0] + + # Sequence example images are encoded. + self.assertEqual((4,), actual_image.shape) + self.assertAllEqual(expected_groundtruth_classes, + actual_groundtruth_classes) + self.assertAllClose(expected_groundtruth_boxes, + actual_groundtruth_boxes) + self.assertAllClose( + expected_num_groundtruth_boxes, actual_num_groundtruth_boxes) + + def test_build_tf_record_input_reader_and_load_instance_masks(self): + input_reader_text_proto = """ + load_instance_masks: true + tf_record_input_reader {} + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Parse(input_reader_text_proto, input_reader_proto) + + decoder = decoder_builder.build(input_reader_proto) + serialized_seq_example = self._make_serialized_tf_example() + def graph_fn(): + tensor_dict = decoder.decode(serialized_seq_example) + return tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + masks = self.execute_cpu(graph_fn, []) + self.assertAllEqual((1, 4, 5), masks.shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/graph_rewriter_builder.py b/models/research/object_detection/builders/graph_rewriter_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..9cbeb4a1f687a6c194d3d8226fea1629e2f34a18 --- /dev/null +++ b/models/research/object_detection/builders/graph_rewriter_builder.py @@ -0,0 +1,53 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for quantized training and evaluation.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import quantize as contrib_quantize +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +def build(graph_rewriter_config, is_training): + """Returns a function that modifies default graph based on options. + + Args: + graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto. + is_training: whether in training of eval mode. + """ + def graph_rewrite_fn(): + """Function to quantize weights and activation of the default graph.""" + if (graph_rewriter_config.quantization.weight_bits != 8 or + graph_rewriter_config.quantization.activation_bits != 8): + raise ValueError('Only 8bit quantization is supported') + + # Quantize the graph by inserting quantize ops for weights and activations + if is_training: + contrib_quantize.experimental_create_training_graph( + input_graph=tf.get_default_graph(), + quant_delay=graph_rewriter_config.quantization.delay + ) + else: + contrib_quantize.experimental_create_eval_graph( + input_graph=tf.get_default_graph() + ) + slim.summarize_collection('quant_vars') + + return graph_rewrite_fn diff --git a/models/research/object_detection/builders/graph_rewriter_builder_tf1_test.py b/models/research/object_detection/builders/graph_rewriter_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8af8fe9627bf3041b0499909cc298d1790810753 --- /dev/null +++ b/models/research/object_detection/builders/graph_rewriter_builder_tf1_test.py @@ -0,0 +1,67 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for graph_rewriter_builder.""" +import unittest +import mock +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.builders import graph_rewriter_builder +from object_detection.protos import graph_rewriter_pb2 +from object_detection.utils import tf_version + + +if tf_version.is_tf1(): + from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class QuantizationBuilderTest(tf.test.TestCase): + + def testQuantizationBuilderSetsUpCorrectTrainArguments(self): + with mock.patch.object( + contrib_quantize, + 'experimental_create_training_graph') as mock_quant_fn: + with mock.patch.object(slim, + 'summarize_collection') as mock_summarize_col: + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewriter_proto.quantization.weight_bits = 8 + graph_rewriter_proto.quantization.activation_bits = 8 + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, is_training=True) + graph_rewrite_fn() + _, kwargs = mock_quant_fn.call_args + self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) + self.assertEqual(kwargs['quant_delay'], 10) + mock_summarize_col.assert_called_with('quant_vars') + + def testQuantizationBuilderSetsUpCorrectEvalArguments(self): + with mock.patch.object(contrib_quantize, + 'experimental_create_eval_graph') as mock_quant_fn: + with mock.patch.object(slim, + 'summarize_collection') as mock_summarize_col: + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, is_training=False) + graph_rewrite_fn() + _, kwargs = mock_quant_fn.call_args + self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) + mock_summarize_col.assert_called_with('quant_vars') + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/hyperparams_builder.py b/models/research/object_detection/builders/hyperparams_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f34e1112a81bd9dad1c30ba39af6b1a20a252d2c --- /dev/null +++ b/models/research/object_detection/builders/hyperparams_builder.py @@ -0,0 +1,425 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder function to construct tf-slim arg_scope for convolution, fc ops.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.core import freezable_batch_norm +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import context_manager + +# pylint: enable=g-import-not-at-top + + +class KerasLayerHyperparams(object): + """ + A hyperparameter configuration object for Keras layers used in + Object Detection models. + """ + + def __init__(self, hyperparams_config): + """Builds keras hyperparameter config for layers based on the proto config. + + It automatically converts from Slim layer hyperparameter configs to + Keras layer hyperparameters. Namely, it: + - Builds Keras initializers/regularizers instead of Slim ones + - sets weights_regularizer/initializer to kernel_regularizer/initializer + - converts batchnorm decay to momentum + - converts Slim l2 regularizer weights to the equivalent Keras l2 weights + + Contains a hyperparameter configuration for ops that specifies kernel + initializer, kernel regularizer, activation. Also contains parameters for + batch norm operators based on the configuration. + + Note that if the batch_norm parameters are not specified in the config + (i.e. left to default) then batch norm is excluded from the config. + + Args: + hyperparams_config: hyperparams.proto object containing + hyperparameters. + + Raises: + ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. + """ + if not isinstance(hyperparams_config, + hyperparams_pb2.Hyperparams): + raise ValueError('hyperparams_config not of type ' + 'hyperparams_pb.Hyperparams.') + + self._batch_norm_params = None + if hyperparams_config.HasField('batch_norm'): + self._batch_norm_params = _build_keras_batch_norm_params( + hyperparams_config.batch_norm) + + self._activation_fn = _build_activation_fn(hyperparams_config.activation) + # TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv + # (Those might use depthwise_* instead of kernel_*) + # We should probably switch to using build_conv2d_layer and + # build_depthwise_conv2d_layer methods instead. + self._op_params = { + 'kernel_regularizer': _build_keras_regularizer( + hyperparams_config.regularizer), + 'kernel_initializer': _build_initializer( + hyperparams_config.initializer, build_for_keras=True), + 'activation': _build_activation_fn(hyperparams_config.activation) + } + + def use_batch_norm(self): + return self._batch_norm_params is not None + + def batch_norm_params(self, **overrides): + """Returns a dict containing batchnorm layer construction hyperparameters. + + Optionally overrides values in the batchnorm hyperparam dict. Overrides + only apply to individual calls of this method, and do not affect + future calls. + + Args: + **overrides: keyword arguments to override in the hyperparams dictionary + + Returns: dict containing the layer construction keyword arguments, with + values overridden by the `overrides` keyword arguments. + """ + if self._batch_norm_params is None: + new_batch_norm_params = dict() + else: + new_batch_norm_params = self._batch_norm_params.copy() + new_batch_norm_params.update(overrides) + return new_batch_norm_params + + def build_batch_norm(self, training=None, **overrides): + """Returns a Batch Normalization layer with the appropriate hyperparams. + + If the hyperparams are configured to not use batch normalization, + this will return a Keras Lambda layer that only applies tf.Identity, + without doing any normalization. + + Optionally overrides values in the batch_norm hyperparam dict. Overrides + only apply to individual calls of this method, and do not affect + future calls. + + Args: + training: if True, the normalization layer will normalize using the batch + statistics. If False, the normalization layer will be frozen and will + act as if it is being used for inference. If None, the layer + will look up the Keras learning phase at `call` time to decide what to + do. + **overrides: batch normalization construction args to override from the + batch_norm hyperparams dictionary. + + Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True), + or a Keras Lambda layer that applies the identity (if use_batch_norm() + is False) + """ + if self.use_batch_norm(): + return freezable_batch_norm.FreezableBatchNorm( + training=training, + **self.batch_norm_params(**overrides) + ) + else: + return tf.keras.layers.Lambda(tf.identity) + + def build_activation_layer(self, name='activation'): + """Returns a Keras layer that applies the desired activation function. + + Args: + name: The name to assign the Keras layer. + Returns: A Keras lambda layer that applies the activation function + specified in the hyperparam config, or applies the identity if the + activation function is None. + """ + if self._activation_fn: + return tf.keras.layers.Lambda(self._activation_fn, name=name) + else: + return tf.keras.layers.Lambda(tf.identity, name=name) + + def params(self, include_activation=False, **overrides): + """Returns a dict containing the layer construction hyperparameters to use. + + Optionally overrides values in the returned dict. Overrides + only apply to individual calls of this method, and do not affect + future calls. + + Args: + include_activation: If False, activation in the returned dictionary will + be set to `None`, and the activation must be applied via a separate + layer created by `build_activation_layer`. If True, `activation` in the + output param dictionary will be set to the activation function + specified in the hyperparams config. + **overrides: keyword arguments to override in the hyperparams dictionary. + + Returns: dict containing the layer construction keyword arguments, with + values overridden by the `overrides` keyword arguments. + """ + new_params = self._op_params.copy() + new_params['activation'] = None + if include_activation: + new_params['activation'] = self._activation_fn + if self.use_batch_norm() and self.batch_norm_params()['center']: + new_params['use_bias'] = False + else: + new_params['use_bias'] = True + new_params.update(**overrides) + return new_params + + +def build(hyperparams_config, is_training): + """Builds tf-slim arg_scope for convolution ops based on the config. + + Returns an arg_scope to use for convolution ops containing weights + initializer, weights regularizer, activation function, batch norm function + and batch norm parameters based on the configuration. + + Note that if no normalization parameters are specified in the config, + (i.e. left to default) then both batch norm and group norm are excluded + from the arg_scope. + + The batch norm parameters are set for updates based on `is_training` argument + and conv_hyperparams_config.batch_norm.train parameter. During training, they + are updated only if batch_norm.train parameter is true. However, during eval, + no updates are made to the batch norm variables. In both cases, their current + values are used during forward pass. + + Args: + hyperparams_config: hyperparams.proto object containing + hyperparameters. + is_training: Whether the network is in training mode. + + Returns: + arg_scope_fn: A function to construct tf-slim arg_scope containing + hyperparameters for ops. + + Raises: + ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. + """ + if not isinstance(hyperparams_config, + hyperparams_pb2.Hyperparams): + raise ValueError('hyperparams_config not of type ' + 'hyperparams_pb.Hyperparams.') + + normalizer_fn = None + batch_norm_params = None + if hyperparams_config.HasField('batch_norm'): + normalizer_fn = slim.batch_norm + batch_norm_params = _build_batch_norm_params( + hyperparams_config.batch_norm, is_training) + if hyperparams_config.HasField('group_norm'): + normalizer_fn = slim.group_norm + affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose] + if hyperparams_config.HasField('op') and ( + hyperparams_config.op == hyperparams_pb2.Hyperparams.FC): + affected_ops = [slim.fully_connected] + def scope_fn(): + with (slim.arg_scope([slim.batch_norm], **batch_norm_params) + if batch_norm_params is not None else + context_manager.IdentityContextManager()): + with slim.arg_scope( + affected_ops, + weights_regularizer=_build_slim_regularizer( + hyperparams_config.regularizer), + weights_initializer=_build_initializer( + hyperparams_config.initializer), + activation_fn=_build_activation_fn(hyperparams_config.activation), + normalizer_fn=normalizer_fn) as sc: + return sc + + return scope_fn + + +def _build_activation_fn(activation_fn): + """Builds a callable activation from config. + + Args: + activation_fn: hyperparams_pb2.Hyperparams.activation + + Returns: + Callable activation function. + + Raises: + ValueError: On unknown activation function. + """ + if activation_fn == hyperparams_pb2.Hyperparams.NONE: + return None + if activation_fn == hyperparams_pb2.Hyperparams.RELU: + return tf.nn.relu + if activation_fn == hyperparams_pb2.Hyperparams.RELU_6: + return tf.nn.relu6 + if activation_fn == hyperparams_pb2.Hyperparams.SWISH: + return tf.nn.swish + raise ValueError('Unknown activation function: {}'.format(activation_fn)) + + +def _build_slim_regularizer(regularizer): + """Builds a tf-slim regularizer from config. + + Args: + regularizer: hyperparams_pb2.Hyperparams.regularizer proto. + + Returns: + tf-slim regularizer. + + Raises: + ValueError: On unknown regularizer. + """ + regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') + if regularizer_oneof == 'l1_regularizer': + return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight)) + if regularizer_oneof == 'l2_regularizer': + return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight)) + if regularizer_oneof is None: + return None + raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) + + +def _build_keras_regularizer(regularizer): + """Builds a keras regularizer from config. + + Args: + regularizer: hyperparams_pb2.Hyperparams.regularizer proto. + + Returns: + Keras regularizer. + + Raises: + ValueError: On unknown regularizer. + """ + regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') + if regularizer_oneof == 'l1_regularizer': + return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight)) + if regularizer_oneof == 'l2_regularizer': + # The Keras L2 regularizer weight differs from the Slim L2 regularizer + # weight by a factor of 2 + return tf.keras.regularizers.l2( + float(regularizer.l2_regularizer.weight * 0.5)) + if regularizer_oneof is None: + return None + raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) + + +def _build_initializer(initializer, build_for_keras=False): + """Build a tf initializer from config. + + Args: + initializer: hyperparams_pb2.Hyperparams.regularizer proto. + build_for_keras: Whether the initializers should be built for Keras + operators. If false builds for Slim. + + Returns: + tf initializer. + + Raises: + ValueError: On unknown initializer. + """ + initializer_oneof = initializer.WhichOneof('initializer_oneof') + if initializer_oneof == 'truncated_normal_initializer': + return tf.truncated_normal_initializer( + mean=initializer.truncated_normal_initializer.mean, + stddev=initializer.truncated_normal_initializer.stddev) + if initializer_oneof == 'random_normal_initializer': + return tf.random_normal_initializer( + mean=initializer.random_normal_initializer.mean, + stddev=initializer.random_normal_initializer.stddev) + if initializer_oneof == 'variance_scaling_initializer': + enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer. + DESCRIPTOR.enum_types_by_name['Mode']) + mode = enum_descriptor.values_by_number[initializer. + variance_scaling_initializer. + mode].name + if build_for_keras: + if initializer.variance_scaling_initializer.uniform: + return tf.variance_scaling_initializer( + scale=initializer.variance_scaling_initializer.factor, + mode=mode.lower(), + distribution='uniform') + else: + # In TF 1.9 release and earlier, the truncated_normal distribution was + # not supported correctly. So, in these earlier versions of tensorflow, + # the ValueError will be raised, and we manually truncate the + # distribution scale. + # + # It is insufficient to just set distribution to `normal` from the + # start, because the `normal` distribution in newer Tensorflow versions + # creates a truncated distribution, whereas it created untruncated + # distributions in older versions. + try: + return tf.variance_scaling_initializer( + scale=initializer.variance_scaling_initializer.factor, + mode=mode.lower(), + distribution='truncated_normal') + except ValueError: + truncate_constant = 0.87962566103423978 + truncated_scale = initializer.variance_scaling_initializer.factor / ( + truncate_constant * truncate_constant + ) + return tf.variance_scaling_initializer( + scale=truncated_scale, + mode=mode.lower(), + distribution='normal') + + else: + return slim.variance_scaling_initializer( + factor=initializer.variance_scaling_initializer.factor, + mode=mode, + uniform=initializer.variance_scaling_initializer.uniform) + if initializer_oneof is None: + return None + raise ValueError('Unknown initializer function: {}'.format( + initializer_oneof)) + + +def _build_batch_norm_params(batch_norm, is_training): + """Build a dictionary of batch_norm params from config. + + Args: + batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. + is_training: Whether the models is in training mode. + + Returns: + A dictionary containing batch_norm parameters. + """ + batch_norm_params = { + 'decay': batch_norm.decay, + 'center': batch_norm.center, + 'scale': batch_norm.scale, + 'epsilon': batch_norm.epsilon, + # Remove is_training parameter from here and deprecate it in the proto + # once we refactor Faster RCNN models to set is_training through an outer + # arg_scope in the meta architecture. + 'is_training': is_training and batch_norm.train, + } + return batch_norm_params + + +def _build_keras_batch_norm_params(batch_norm): + """Build a dictionary of Keras BatchNormalization params from config. + + Args: + batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. + + Returns: + A dictionary containing Keras BatchNormalization parameters. + """ + # Note: Although decay is defined to be 1 - momentum in batch_norm, + # decay in the slim batch_norm layers was erroneously defined and is + # actually the same as momentum in the Keras batch_norm layers. + # For context, see: github.com/keras-team/keras/issues/6839 + batch_norm_params = { + 'momentum': batch_norm.decay, + 'center': batch_norm.center, + 'scale': batch_norm.scale, + 'epsilon': batch_norm.epsilon, + } + return batch_norm_params diff --git a/models/research/object_detection/builders/hyperparams_builder_test.py b/models/research/object_detection/builders/hyperparams_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2c6fcd5af70a64003e62fe41a38b0238863fea97 --- /dev/null +++ b/models/research/object_detection/builders/hyperparams_builder_test.py @@ -0,0 +1,916 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests object_detection.core.hyperparams_builder.""" + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.core import freezable_batch_norm +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import tf_version + + +def _get_scope_key(op): + return getattr(op, '_key_op', str(op)) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests.') +class HyperparamsBuilderTest(tf.test.TestCase): + + def test_default_arg_scope_has_conv2d_op(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.conv2d), scope) + + def test_default_arg_scope_has_separable_conv2d_op(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.separable_conv2d), scope) + + def test_default_arg_scope_has_conv2d_transpose_op(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.conv2d_transpose), scope) + + def test_explicit_fc_op_arg_scope_has_fully_connected_op(self): + conv_hyperparams_text_proto = """ + op: FC + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.fully_connected), scope) + + def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + kwargs_1, kwargs_2, kwargs_3 = scope.values() + self.assertDictEqual(kwargs_1, kwargs_2) + self.assertDictEqual(kwargs_1, kwargs_3) + + def test_return_l1_regularized_weights(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.5 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = list(scope.values())[0] + regularizer = conv_scope_arguments['weights_regularizer'] + weights = np.array([1., -1, 4., 2.]) + with self.test_session() as sess: + result = sess.run(regularizer(tf.constant(weights))) + self.assertAllClose(np.abs(weights).sum() * 0.5, result) + + def test_return_l2_regularizer_weights(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + weight: 0.42 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + + regularizer = conv_scope_arguments['weights_regularizer'] + weights = np.array([1., -1, 4., 2.]) + with self.test_session() as sess: + result = sess.run(regularizer(tf.constant(weights))) + self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result) + + def test_return_non_default_batch_norm_params_with_train_during_train(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + train: true + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) + batch_norm_params = scope[_get_scope_key(slim.batch_norm)] + self.assertAlmostEqual(batch_norm_params['decay'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + self.assertTrue(batch_norm_params['is_training']) + + def test_return_batch_norm_params_with_notrain_during_eval(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + train: true + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=False) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) + batch_norm_params = scope[_get_scope_key(slim.batch_norm)] + self.assertAlmostEqual(batch_norm_params['decay'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + self.assertFalse(batch_norm_params['is_training']) + + def test_return_batch_norm_params_with_notrain_when_train_is_false(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + train: false + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) + batch_norm_params = scope[_get_scope_key(slim.batch_norm)] + self.assertAlmostEqual(batch_norm_params['decay'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + self.assertFalse(batch_norm_params['is_training']) + + def test_do_not_use_batch_norm_if_default(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], None) + + def test_use_none_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: NONE + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], None) + + def test_use_relu_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu) + + def test_use_relu_6_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6) + + def test_use_swish_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: SWISH + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.swish) + + def _assert_variance_in_range(self, initializer, shape, variance, + tol=1e-2): + with tf.Graph().as_default() as g: + with self.test_session(graph=g) as sess: + var = tf.get_variable( + name='test', + shape=shape, + dtype=tf.float32, + initializer=initializer) + sess.run(tf.global_variables_initializer()) + values = sess.run(var) + self.assertAllClose(np.var(values), variance, tol, tol) + + def test_variance_in_range_with_variance_scaling_initializer_fan_in(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_out(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_OUT + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 40.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_AVG + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=4. / (100. + 40.)) + + def test_variance_in_range_with_variance_scaling_initializer_uniform(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: true + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_truncated_normal_initializer(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.49, tol=1e-1) + + def test_variance_in_range_with_random_normal_initializer(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.64, tol=1e-1) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only tests.') +class KerasHyperparamsBuilderTest(tf.test.TestCase): + + def _assert_variance_in_range(self, initializer, shape, variance, + tol=1e-2): + var = tf.Variable(initializer(shape=shape, dtype=tf.float32)) + self.assertAllClose(np.var(var.numpy()), variance, tol, tol) + + def test_return_l1_regularized_weights_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.5 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + regularizer = keras_config.params()['kernel_regularizer'] + weights = np.array([1., -1, 4., 2.]) + result = regularizer(tf.constant(weights)).numpy() + self.assertAllClose(np.abs(weights).sum() * 0.5, result) + + def test_return_l2_regularizer_weights_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + weight: 0.42 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + regularizer = keras_config.params()['kernel_regularizer'] + weights = np.array([1., -1, 4., 2.]) + result = regularizer(tf.constant(weights)).numpy() + self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result) + + def test_return_non_default_batch_norm_params_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + self.assertTrue(keras_config.use_batch_norm()) + batch_norm_params = keras_config.batch_norm_params() + self.assertAlmostEqual(batch_norm_params['momentum'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + + batch_norm_layer = keras_config.build_batch_norm() + self.assertIsInstance(batch_norm_layer, + freezable_batch_norm.FreezableBatchNorm) + + def test_return_non_default_batch_norm_params_keras_override( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + self.assertTrue(keras_config.use_batch_norm()) + batch_norm_params = keras_config.batch_norm_params(momentum=0.4) + self.assertAlmostEqual(batch_norm_params['momentum'], 0.4) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + + def test_do_not_use_batch_norm_if_default_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertFalse(keras_config.use_batch_norm()) + self.assertEqual(keras_config.batch_norm_params(), {}) + + # The batch norm builder should build an identity Lambda layer + identity_layer = keras_config.build_batch_norm() + self.assertIsInstance(identity_layer, + tf.keras.layers.Lambda) + + def test_use_none_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: NONE + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertIsNone( + keras_config.params(include_activation=True)['activation']) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.identity) + + def test_use_relu_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertEqual( + keras_config.params(include_activation=True)['activation'], tf.nn.relu) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.nn.relu) + + def test_use_relu_6_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertEqual( + keras_config.params(include_activation=True)['activation'], tf.nn.relu6) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.nn.relu6) + + def test_use_swish_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: SWISH + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertEqual( + keras_config.params(include_activation=True)['activation'], tf.nn.swish) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.nn.swish) + + def test_override_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + new_params = keras_config.params(activation=tf.nn.relu) + self.assertEqual(new_params['activation'], tf.nn.relu) + + def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_OUT + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 40.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_AVG + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=4. / (100. + 40.)) + + def test_variance_in_range_with_variance_scaling_initializer_uniform_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: true + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_truncated_normal_initializer_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.49, tol=1e-1) + + def test_variance_in_range_with_random_normal_initializer_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.64, tol=1e-1) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/image_resizer_builder.py b/models/research/object_detection/builders/image_resizer_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..1a3f096f1786bd47f9084a559c2657f72a164da0 --- /dev/null +++ b/models/research/object_detection/builders/image_resizer_builder.py @@ -0,0 +1,187 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Builder function for image resizing operations.""" +import functools +import tensorflow.compat.v1 as tf + +from object_detection.core import preprocessor +from object_detection.protos import image_resizer_pb2 + + +def _tf_resize_method(resize_method): + """Maps image resize method from enumeration type to TensorFlow. + + Args: + resize_method: The resize_method attribute of keep_aspect_ratio_resizer or + fixed_shape_resizer. + + Returns: + method: The corresponding TensorFlow ResizeMethod. + + Raises: + ValueError: if `resize_method` is of unknown type. + """ + dict_method = { + image_resizer_pb2.BILINEAR: + tf.image.ResizeMethod.BILINEAR, + image_resizer_pb2.NEAREST_NEIGHBOR: + tf.image.ResizeMethod.NEAREST_NEIGHBOR, + image_resizer_pb2.BICUBIC: + tf.image.ResizeMethod.BICUBIC, + image_resizer_pb2.AREA: + tf.image.ResizeMethod.AREA + } + if resize_method in dict_method: + return dict_method[resize_method] + else: + raise ValueError('Unknown resize_method') + + +def build(image_resizer_config): + """Builds callable for image resizing operations. + + Args: + image_resizer_config: image_resizer.proto object containing parameters for + an image resizing operation. + + Returns: + image_resizer_fn: Callable for image resizing. This callable always takes + a rank-3 image tensor (corresponding to a single image) and returns a + rank-3 image tensor, possibly with new spatial dimensions. + + Raises: + ValueError: if `image_resizer_config` is of incorrect type. + ValueError: if `image_resizer_config.image_resizer_oneof` is of expected + type. + ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer + is used. + """ + if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer): + raise ValueError('image_resizer_config not of type ' + 'image_resizer_pb2.ImageResizer.') + + image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof') + if image_resizer_oneof == 'keep_aspect_ratio_resizer': + keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer + if not (keep_aspect_ratio_config.min_dimension <= + keep_aspect_ratio_config.max_dimension): + raise ValueError('min_dimension > max_dimension') + method = _tf_resize_method(keep_aspect_ratio_config.resize_method) + per_channel_pad_value = (0, 0, 0) + if keep_aspect_ratio_config.per_channel_pad_value: + per_channel_pad_value = tuple(keep_aspect_ratio_config. + per_channel_pad_value) + image_resizer_fn = functools.partial( + preprocessor.resize_to_range, + min_dimension=keep_aspect_ratio_config.min_dimension, + max_dimension=keep_aspect_ratio_config.max_dimension, + method=method, + pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension, + per_channel_pad_value=per_channel_pad_value) + if not keep_aspect_ratio_config.convert_to_grayscale: + return image_resizer_fn + elif image_resizer_oneof == 'fixed_shape_resizer': + fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer + method = _tf_resize_method(fixed_shape_resizer_config.resize_method) + image_resizer_fn = functools.partial( + preprocessor.resize_image, + new_height=fixed_shape_resizer_config.height, + new_width=fixed_shape_resizer_config.width, + method=method) + if not fixed_shape_resizer_config.convert_to_grayscale: + return image_resizer_fn + elif image_resizer_oneof == 'identity_resizer': + def image_resizer_fn(image, masks=None, **kwargs): + del kwargs + if masks is None: + return [image, tf.shape(image)] + else: + return [image, masks, tf.shape(image)] + return image_resizer_fn + elif image_resizer_oneof == 'conditional_shape_resizer': + conditional_shape_resize_config = ( + image_resizer_config.conditional_shape_resizer) + method = _tf_resize_method(conditional_shape_resize_config.resize_method) + + if conditional_shape_resize_config.condition == ( + image_resizer_pb2.ConditionalShapeResizer.GREATER): + image_resizer_fn = functools.partial( + preprocessor.resize_to_max_dimension, + max_dimension=conditional_shape_resize_config.size_threshold, + method=method) + + elif conditional_shape_resize_config.condition == ( + image_resizer_pb2.ConditionalShapeResizer.SMALLER): + image_resizer_fn = functools.partial( + preprocessor.resize_to_min_dimension, + min_dimension=conditional_shape_resize_config.size_threshold, + method=method) + else: + raise ValueError( + 'Invalid image resizer condition option for ' + 'ConditionalShapeResizer: \'%s\'.' + % conditional_shape_resize_config.condition) + if not conditional_shape_resize_config.convert_to_grayscale: + return image_resizer_fn + elif image_resizer_oneof == 'pad_to_multiple_resizer': + pad_to_multiple_resizer_config = ( + image_resizer_config.pad_to_multiple_resizer) + + if pad_to_multiple_resizer_config.multiple < 0: + raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.') + + else: + image_resizer_fn = functools.partial( + preprocessor.resize_pad_to_multiple, + multiple=pad_to_multiple_resizer_config.multiple) + + if not pad_to_multiple_resizer_config.convert_to_grayscale: + return image_resizer_fn + else: + raise ValueError( + 'Invalid image resizer option: \'%s\'.' % image_resizer_oneof) + + def grayscale_image_resizer(image, masks=None): + """Convert to grayscale before applying image_resizer_fn. + + Args: + image: A 3D tensor of shape [height, width, 3] + masks: (optional) rank 3 float32 tensor with shape [num_instances, height, + width] containing instance masks. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A 3D tensor of shape [new_height, new_width, 1], + where the image has been resized (with bilinear interpolation) so that + min(new_height, new_width) == min_dimension or + max(new_height, new_width) == max_dimension. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width]. + resized_image_shape: A 1D tensor of shape [3] containing shape of the + resized image. + """ + # image_resizer_fn returns [resized_image, resized_image_shape] if + # mask==None, otherwise it returns + # [resized_image, resized_mask, resized_image_shape]. In either case, we + # only deal with first and last element of the returned list. + retval = image_resizer_fn(image, masks) + resized_image = retval[0] + resized_image_shape = retval[-1] + retval[0] = preprocessor.rgb_to_gray(resized_image) + retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0) + return retval + + return functools.partial(grayscale_image_resizer) diff --git a/models/research/object_detection/builders/image_resizer_builder_test.py b/models/research/object_detection/builders/image_resizer_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc456eab1da1ea7952d17be4d14fab9ca8bf9a4 --- /dev/null +++ b/models/research/object_detection/builders/image_resizer_builder_test.py @@ -0,0 +1,243 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.builders.image_resizer_builder.""" +import numpy as np +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection.builders import image_resizer_builder +from object_detection.protos import image_resizer_pb2 +from object_detection.utils import test_case + + +class ImageResizerBuilderTest(test_case.TestCase): + + def _shape_of_resized_random_image_given_text_proto(self, input_shape, + text_proto): + image_resizer_config = image_resizer_pb2.ImageResizer() + text_format.Merge(text_proto, image_resizer_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + def graph_fn(): + images = tf.cast( + tf.random_uniform(input_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + resized_images, _ = image_resizer_fn(images) + return resized_images + return self.execute_cpu(graph_fn, []).shape + + def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self): + image_resizer_text_proto = """ + keep_aspect_ratio_resizer { + min_dimension: 10 + max_dimension: 20 + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (20, 10, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_keep_aspect_ratio_resizer_grayscale(self): + image_resizer_text_proto = """ + keep_aspect_ratio_resizer { + min_dimension: 10 + max_dimension: 20 + convert_to_grayscale: true + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (20, 10, 1) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_keep_aspect_ratio_resizer_with_padding(self): + image_resizer_text_proto = """ + keep_aspect_ratio_resizer { + min_dimension: 10 + max_dimension: 20 + pad_to_max_dimension: true + per_channel_pad_value: 3 + per_channel_pad_value: 4 + per_channel_pad_value: 5 + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (20, 20, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_built_fixed_shape_resizer_returns_expected_shape(self): + image_resizer_text_proto = """ + fixed_shape_resizer { + height: 10 + width: 20 + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (10, 20, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_built_fixed_shape_resizer_grayscale(self): + image_resizer_text_proto = """ + fixed_shape_resizer { + height: 10 + width: 20 + convert_to_grayscale: true + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (10, 20, 1) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_identity_resizer_returns_expected_shape(self): + image_resizer_text_proto = """ + identity_resizer { + } + """ + input_shape = (10, 20, 3) + expected_output_shape = (10, 20, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_raises_error_on_invalid_input(self): + invalid_input = 'invalid_input' + with self.assertRaises(ValueError): + image_resizer_builder.build(invalid_input) + + def _resized_image_given_text_proto(self, image, text_proto): + image_resizer_config = image_resizer_pb2.ImageResizer() + text_format.Merge(text_proto, image_resizer_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + def graph_fn(image): + resized_image, _ = image_resizer_fn(image) + return resized_image + return self.execute_cpu(graph_fn, [image]) + + def test_fixed_shape_resizer_nearest_neighbor_method(self): + image_resizer_text_proto = """ + fixed_shape_resizer { + height: 1 + width: 1 + resize_method: NEAREST_NEIGHBOR + } + """ + image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + image = np.expand_dims(image, axis=2) + image = np.tile(image, (1, 1, 3)) + image = np.expand_dims(image, axis=0) + resized_image = self._resized_image_given_text_proto( + image, image_resizer_text_proto) + vals = np.unique(resized_image).tolist() + self.assertEqual(len(vals), 1) + self.assertEqual(vals[0], 1) + + def test_build_conditional_shape_resizer_greater_returns_expected_shape(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: GREATER + size_threshold: 30 + } + """ + input_shape = (60, 30, 3) + expected_output_shape = (30, 15, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_same_shape_with_no_resize(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: GREATER + size_threshold: 30 + } + """ + input_shape = (15, 15, 3) + expected_output_shape = (15, 15, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_smaller_returns_expected_shape(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: SMALLER + size_threshold: 30 + } + """ + input_shape = (30, 15, 3) + expected_output_shape = (60, 30, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_grayscale(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: GREATER + size_threshold: 30 + convert_to_grayscale: true + } + """ + input_shape = (60, 30, 3) + expected_output_shape = (30, 15, 1) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_error_on_invalid_condition(self): + invalid_image_resizer_text_proto = """ + conditional_shape_resizer { + condition: INVALID + size_threshold: 30 + } + """ + with self.assertRaises(ValueError): + image_resizer_builder.build(invalid_image_resizer_text_proto) + + def test_build_pad_to_multiple_resizer(self): + """Test building a pad_to_multiple_resizer from proto.""" + image_resizer_text_proto = """ + pad_to_multiple_resizer { + multiple: 32 + } + """ + input_shape = (60, 30, 3) + expected_output_shape = (64, 32, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_pad_to_multiple_resizer_invalid_multiple(self): + """Test that building a pad_to_multiple_resizer errors with invalid multiple.""" + + image_resizer_text_proto = """ + pad_to_multiple_resizer { + multiple: -10 + } + """ + + with self.assertRaises(ValueError): + image_resizer_builder.build(image_resizer_text_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/input_reader_builder.py b/models/research/object_detection/builders/input_reader_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..c7755177e70d528984ea425f21fb9afaf11d9eaa --- /dev/null +++ b/models/research/object_detection/builders/input_reader_builder.py @@ -0,0 +1,91 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Input reader builder. + +Creates data sources for DetectionModels from an InputReader config. See +input_reader.proto for options. + +Note: If users wishes to also use their own InputReaders with the Object +Detection configuration framework, they should define their own builder function +that wraps the build function. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.data_decoders import tf_example_decoder +from object_detection.data_decoders import tf_sequence_example_decoder +from object_detection.protos import input_reader_pb2 + +parallel_reader = slim.parallel_reader + + +def build(input_reader_config): + """Builds a tensor dictionary based on the InputReader config. + + Args: + input_reader_config: A input_reader_pb2.InputReader object. + + Returns: + A tensor dict based on the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + ValueError: If no input paths are specified. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': + config = input_reader_config.tf_record_input_reader + if not config.input_path: + raise ValueError('At least one input path must be specified in ' + '`input_reader_config`.') + _, string_tensor = parallel_reader.parallel_read( + config.input_path[:], # Convert `RepeatedScalarContainer` to list. + reader_class=tf.TFRecordReader, + num_epochs=(input_reader_config.num_epochs + if input_reader_config.num_epochs else None), + num_readers=input_reader_config.num_readers, + shuffle=input_reader_config.shuffle, + dtypes=[tf.string, tf.string], + capacity=input_reader_config.queue_capacity, + min_after_dequeue=input_reader_config.min_after_dequeue) + + label_map_proto_file = None + if input_reader_config.HasField('label_map_path'): + label_map_proto_file = input_reader_config.label_map_path + input_type = input_reader_config.input_type + if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'): + decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=input_reader_config.load_instance_masks, + instance_mask_type=input_reader_config.mask_type, + label_map_proto_file=label_map_proto_file, + load_context_features=input_reader_config.load_context_features) + return decoder.decode(string_tensor) + elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'): + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file, + load_context_features=input_reader_config.load_context_features) + return decoder.decode(string_tensor) + raise ValueError('Unsupported input_type.') + raise ValueError('Unsupported input_reader_config.') diff --git a/models/research/object_detection/builders/input_reader_builder_tf1_test.py b/models/research/object_detection/builders/input_reader_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6049128b03f55501ddcd2a1b3334821800d826a1 --- /dev/null +++ b/models/research/object_detection/builders/input_reader_builder_tf1_test.py @@ -0,0 +1,306 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for input_reader_builder.""" + +import os +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import input_reader_builder +from object_detection.core import standard_fields as fields +from object_detection.dataset_tools import seq_example_util +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import tf_version + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) + return os.path.join(parent_path, 'data', + 'pet_label_map.pbtxt') + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class InputReaderBuilderTest(tf.test.TestCase): + + def create_tf_record(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + flat_mask = (4 * 5) * [1.0] + with self.test_session(): + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': dataset_util.int64_feature(4), + 'image/width': dataset_util.int64_feature(5), + 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), + 'image/object/class/label': dataset_util.int64_list_feature([2]), + 'image/object/mask': dataset_util.float_list_feature(flat_mask), + })) + writer.write(example.SerializeToString()) + writer.close() + + return path + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + with tf.Session() as sess: + encoded_images = sess.run(encoded_images_list) + return encoded_images + + def create_tf_record_sequence_example(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + num_frames = 4 + image_height = 20 + image_width = 30 + image_source_ids = [str(i) for i in range(num_frames)] + with self.test_session(): + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_source_ids=image_source_ids, + image_format='JPEG', + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[]], # Frame 0. + [[0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], + [0.1, 0.1, 0.2, 0.2]], # Frame 2. + [[]], # Frame 3. + ], + label_strings=[ + [], # Frame 0. + ['Abyssinian'], # Frame 1. + ['Abyssinian', 'american_bulldog'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + + writer.write(sequence_example_serialized) + writer.close() + + return path + + def create_tf_record_with_context(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + flat_mask = (4 * 5) * [1.0] + context_features = (10 * 3) * [1.0] + with self.test_session(): + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': + dataset_util.int64_feature(4), + 'image/width': + dataset_util.int64_feature(5), + 'image/object/bbox/xmin': + dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': + dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': + dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': + dataset_util.float_list_feature([1.0]), + 'image/object/class/label': + dataset_util.int64_list_feature([2]), + 'image/object/mask': + dataset_util.float_list_feature(flat_mask), + 'image/context_features': + dataset_util.float_list_feature(context_features), + 'image/context_feature_length': + dataset_util.int64_list_feature([10]), + })) + writer.write(example.SerializeToString()) + writer.close() + + return path + + def test_build_tf_record_input_reader(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks, + output_dict) + self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape) + self.assertEqual([2], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + + def test_build_tf_record_input_reader_sequence_example(self): + tf_record_path = self.create_tf_record_sequence_example() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + input_type: TF_SEQUENCE_EXAMPLE + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + input_reader_proto.label_map_path = _get_labelmap_path() + text_format.Merge(input_reader_text_proto, input_reader_proto) + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]] + expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] + expected_num_groundtruth_boxes = [0, 1, 2, 0] + + self.assertNotIn( + fields.InputDataFields.groundtruth_instance_masks, output_dict) + # sequence example images are encoded + self.assertEqual((4,), output_dict[fields.InputDataFields.image].shape) + self.assertAllEqual(expected_groundtruth_classes, + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (4, 2, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllClose(expected_groundtruth_boxes, + output_dict[fields.InputDataFields.groundtruth_boxes]) + self.assertAllClose( + expected_num_groundtruth_boxes, + output_dict[fields.InputDataFields.num_groundtruth_boxes]) + + def test_build_tf_record_input_reader_with_context(self): + tf_record_path = self.create_tf_record_with_context() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + input_reader_proto.load_context_features = True + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks, + output_dict) + self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape) + self.assertEqual([2], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + self.assertAllEqual( + (3, 10), output_dict[fields.InputDataFields.context_features].shape) + self.assertAllEqual( + (10), output_dict[fields.InputDataFields.context_feature_length]) + + def test_build_tf_record_input_reader_and_load_instance_masks(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape) + self.assertEqual([2], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + self.assertAllEqual( + (1, 4, 5), + output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) + + def test_raises_error_with_no_input_paths(self): + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + with self.assertRaises(ValueError): + input_reader_builder.build(input_reader_proto) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/losses_builder.py b/models/research/object_detection/builders/losses_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..5a69c9b602c95ab6c8368638b2e38448ae113b9c --- /dev/null +++ b/models/research/object_detection/builders/losses_builder.py @@ -0,0 +1,260 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build localization and classification losses from config.""" + +import functools +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import losses +from object_detection.protos import losses_pb2 +from object_detection.utils import ops + + +def build(loss_config): + """Build losses based on the config. + + Builds classification, localization losses and optionally a hard example miner + based on the config. + + Args: + loss_config: A losses_pb2.Loss object. + + Returns: + classification_loss: Classification loss object. + localization_loss: Localization loss object. + classification_weight: Classification loss weight. + localization_weight: Localization loss weight. + hard_example_miner: Hard example miner object. + random_example_sampler: BalancedPositiveNegativeSampler object. + + Raises: + ValueError: If hard_example_miner is used with sigmoid_focal_loss. + ValueError: If random_example_sampler is getting non-positive value as + desired positive example fraction. + """ + classification_loss = _build_classification_loss( + loss_config.classification_loss) + localization_loss = _build_localization_loss( + loss_config.localization_loss) + classification_weight = loss_config.classification_weight + localization_weight = loss_config.localization_weight + hard_example_miner = None + if loss_config.HasField('hard_example_miner'): + if (loss_config.classification_loss.WhichOneof('classification_loss') == + 'weighted_sigmoid_focal'): + raise ValueError('HardExampleMiner should not be used with sigmoid focal ' + 'loss') + hard_example_miner = build_hard_example_miner( + loss_config.hard_example_miner, + classification_weight, + localization_weight) + random_example_sampler = None + if loss_config.HasField('random_example_sampler'): + if loss_config.random_example_sampler.positive_sample_fraction <= 0: + raise ValueError('RandomExampleSampler should not use non-positive' + 'value as positive sample fraction.') + random_example_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=loss_config.random_example_sampler. + positive_sample_fraction) + + if loss_config.expected_loss_weights == loss_config.NONE: + expected_loss_weights_fn = None + elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING: + expected_loss_weights_fn = functools.partial( + ops.expected_classification_loss_by_expected_sampling, + min_num_negative_samples=loss_config.min_num_negative_samples, + desired_negative_sampling_ratio=loss_config + .desired_negative_sampling_ratio) + elif (loss_config.expected_loss_weights == loss_config + .REWEIGHTING_UNMATCHED_ANCHORS): + expected_loss_weights_fn = functools.partial( + ops.expected_classification_loss_by_reweighting_unmatched_anchors, + min_num_negative_samples=loss_config.min_num_negative_samples, + desired_negative_sampling_ratio=loss_config + .desired_negative_sampling_ratio) + else: + raise ValueError('Not a valid value for expected_classification_loss.') + + return (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, random_example_sampler, + expected_loss_weights_fn) + + +def build_hard_example_miner(config, + classification_weight, + localization_weight): + """Builds hard example miner based on the config. + + Args: + config: A losses_pb2.HardExampleMiner object. + classification_weight: Classification loss weight. + localization_weight: Localization loss weight. + + Returns: + Hard example miner. + + """ + loss_type = None + if config.loss_type == losses_pb2.HardExampleMiner.BOTH: + loss_type = 'both' + if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION: + loss_type = 'cls' + if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION: + loss_type = 'loc' + + max_negatives_per_positive = None + num_hard_examples = None + if config.max_negatives_per_positive > 0: + max_negatives_per_positive = config.max_negatives_per_positive + if config.num_hard_examples > 0: + num_hard_examples = config.num_hard_examples + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=num_hard_examples, + iou_threshold=config.iou_threshold, + loss_type=loss_type, + cls_loss_weight=classification_weight, + loc_loss_weight=localization_weight, + max_negatives_per_positive=max_negatives_per_positive, + min_negatives_per_image=config.min_negatives_per_image) + return hard_example_miner + + +def build_faster_rcnn_classification_loss(loss_config): + """Builds a classification loss for Faster RCNN based on the loss config. + + Args: + loss_config: A losses_pb2.ClassificationLoss object. + + Returns: + Loss based on the config. + + Raises: + ValueError: On invalid loss_config. + """ + if not isinstance(loss_config, losses_pb2.ClassificationLoss): + raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') + + loss_type = loss_config.WhichOneof('classification_loss') + + if loss_type == 'weighted_sigmoid': + return losses.WeightedSigmoidClassificationLoss() + if loss_type == 'weighted_softmax': + config = loss_config.weighted_softmax + return losses.WeightedSoftmaxClassificationLoss( + logit_scale=config.logit_scale) + if loss_type == 'weighted_logits_softmax': + config = loss_config.weighted_logits_softmax + return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( + logit_scale=config.logit_scale) + if loss_type == 'weighted_sigmoid_focal': + config = loss_config.weighted_sigmoid_focal + alpha = None + if config.HasField('alpha'): + alpha = config.alpha + return losses.SigmoidFocalClassificationLoss( + gamma=config.gamma, + alpha=alpha) + + # By default, Faster RCNN second stage classifier uses Softmax loss + # with anchor-wise outputs. + config = loss_config.weighted_softmax + return losses.WeightedSoftmaxClassificationLoss( + logit_scale=config.logit_scale) + + +def _build_localization_loss(loss_config): + """Builds a localization loss based on the loss config. + + Args: + loss_config: A losses_pb2.LocalizationLoss object. + + Returns: + Loss based on the config. + + Raises: + ValueError: On invalid loss_config. + """ + if not isinstance(loss_config, losses_pb2.LocalizationLoss): + raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.') + + loss_type = loss_config.WhichOneof('localization_loss') + + if loss_type == 'weighted_l2': + return losses.WeightedL2LocalizationLoss() + + if loss_type == 'weighted_smooth_l1': + return losses.WeightedSmoothL1LocalizationLoss( + loss_config.weighted_smooth_l1.delta) + + if loss_type == 'weighted_iou': + return losses.WeightedIOULocalizationLoss() + + if loss_type == 'l1_localization_loss': + return losses.L1LocalizationLoss() + + raise ValueError('Empty loss config.') + + +def _build_classification_loss(loss_config): + """Builds a classification loss based on the loss config. + + Args: + loss_config: A losses_pb2.ClassificationLoss object. + + Returns: + Loss based on the config. + + Raises: + ValueError: On invalid loss_config. + """ + if not isinstance(loss_config, losses_pb2.ClassificationLoss): + raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') + + loss_type = loss_config.WhichOneof('classification_loss') + + if loss_type == 'weighted_sigmoid': + return losses.WeightedSigmoidClassificationLoss() + + if loss_type == 'weighted_sigmoid_focal': + config = loss_config.weighted_sigmoid_focal + alpha = None + if config.HasField('alpha'): + alpha = config.alpha + return losses.SigmoidFocalClassificationLoss( + gamma=config.gamma, + alpha=alpha) + + if loss_type == 'weighted_softmax': + config = loss_config.weighted_softmax + return losses.WeightedSoftmaxClassificationLoss( + logit_scale=config.logit_scale) + + if loss_type == 'weighted_logits_softmax': + config = loss_config.weighted_logits_softmax + return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( + logit_scale=config.logit_scale) + + if loss_type == 'bootstrapped_sigmoid': + config = loss_config.bootstrapped_sigmoid + return losses.BootstrappedSigmoidClassificationLoss( + alpha=config.alpha, + bootstrap_type=('hard' if config.hard_bootstrap else 'soft')) + + if loss_type == 'penalty_reduced_logistic_focal_loss': + config = loss_config.penalty_reduced_logistic_focal_loss + return losses.PenaltyReducedLogisticFocalLoss( + alpha=config.alpha, beta=config.beta) + + raise ValueError('Empty loss config.') diff --git a/models/research/object_detection/builders/losses_builder_test.py b/models/research/object_detection/builders/losses_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b37b7f3195427b951e2c508f0df191f176b9d835 --- /dev/null +++ b/models/research/object_detection/builders/losses_builder_test.py @@ -0,0 +1,558 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for losses_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import losses_builder +from object_detection.core import losses +from object_detection.protos import losses_pb2 +from object_detection.utils import ops + + +class LocalizationLossBuilderTest(tf.test.TestCase): + + def test_build_weighted_l2_localization_loss(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedL2LocalizationLoss) + + def test_build_weighted_smooth_l1_localization_loss_default_delta(self): + losses_text_proto = """ + localization_loss { + weighted_smooth_l1 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedSmoothL1LocalizationLoss) + self.assertAlmostEqual(localization_loss._delta, 1.0) + + def test_build_weighted_smooth_l1_localization_loss_non_default_delta(self): + losses_text_proto = """ + localization_loss { + weighted_smooth_l1 { + delta: 0.1 + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedSmoothL1LocalizationLoss) + self.assertAlmostEqual(localization_loss._delta, 0.1) + + def test_build_weighted_iou_localization_loss(self): + losses_text_proto = """ + localization_loss { + weighted_iou { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedIOULocalizationLoss) + + def test_anchorwise_output(self): + losses_text_proto = """ + localization_loss { + weighted_smooth_l1 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedSmoothL1LocalizationLoss) + predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) + targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) + weights = tf.constant([[1.0, 1.0]]) + loss = localization_loss(predictions, targets, weights=weights) + self.assertEqual(loss.shape, [1, 2]) + + def test_raise_error_on_empty_localization_config(self): + losses_text_proto = """ + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + with self.assertRaises(ValueError): + losses_builder._build_localization_loss(losses_proto) + + + +class ClassificationLossBuilderTest(tf.test.TestCase): + + def test_build_weighted_sigmoid_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSigmoidClassificationLoss) + + def test_build_weighted_sigmoid_focal_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid_focal { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.SigmoidFocalClassificationLoss) + self.assertAlmostEqual(classification_loss._alpha, None) + self.assertAlmostEqual(classification_loss._gamma, 2.0) + + def test_build_weighted_sigmoid_focal_loss_non_default(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 3.0 + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.SigmoidFocalClassificationLoss) + self.assertAlmostEqual(classification_loss._alpha, 0.25) + self.assertAlmostEqual(classification_loss._gamma, 3.0) + + def test_build_weighted_softmax_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + def test_build_weighted_logits_softmax_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_logits_softmax { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance( + classification_loss, + losses.WeightedSoftmaxClassificationAgainstLogitsLoss) + + def test_build_weighted_softmax_classification_loss_with_logit_scale(self): + losses_text_proto = """ + classification_loss { + weighted_softmax { + logit_scale: 2.0 + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + def test_build_bootstrapped_sigmoid_classification_loss(self): + losses_text_proto = """ + classification_loss { + bootstrapped_sigmoid { + alpha: 0.5 + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.BootstrappedSigmoidClassificationLoss) + + def test_anchorwise_output(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid { + anchorwise_output: true + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSigmoidClassificationLoss) + predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]]) + targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]) + weights = tf.constant([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]) + loss = classification_loss(predictions, targets, weights=weights) + self.assertEqual(loss.shape, [1, 2, 3]) + + def test_raise_error_on_empty_config(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + with self.assertRaises(ValueError): + losses_builder.build(losses_proto) + + + +class HardExampleMinerBuilderTest(tf.test.TestCase): + + def test_do_not_build_hard_example_miner_by_default(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertEqual(hard_example_miner, None) + + def test_build_hard_example_miner_for_classification_loss(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + loss_type: CLASSIFICATION + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertEqual(hard_example_miner._loss_type, 'cls') + + def test_build_hard_example_miner_for_localization_loss(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + loss_type: LOCALIZATION + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertEqual(hard_example_miner._loss_type, 'loc') + + def test_build_hard_example_miner_with_non_default_values(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + num_hard_examples: 32 + iou_threshold: 0.5 + loss_type: LOCALIZATION + max_negatives_per_positive: 10 + min_negatives_per_image: 3 + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertEqual(hard_example_miner._num_hard_examples, 32) + self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5) + self.assertEqual(hard_example_miner._max_negatives_per_positive, 10) + self.assertEqual(hard_example_miner._min_negatives_per_image, 3) + + +class LossBuilderTest(tf.test.TestCase): + + def test_build_all_loss_parameters(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, _, + _) = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(localization_loss, + losses.WeightedL2LocalizationLoss) + self.assertAlmostEqual(classification_weight, 0.8) + self.assertAlmostEqual(localization_weight, 0.2) + + def test_build_expected_sampling(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, _, + _) = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) + self.assertAlmostEqual(classification_weight, 0.8) + self.assertAlmostEqual(localization_weight, 0.2) + + + def test_build_reweighting_unmatched_anchors(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, _, + _) = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) + self.assertAlmostEqual(classification_weight, 0.8) + self.assertAlmostEqual(localization_weight, 0.2) + + def test_raise_error_when_both_focal_loss_and_hard_example_miner(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_sigmoid_focal { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + with self.assertRaises(ValueError): + losses_builder.build(losses_proto) + + +class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase): + + def test_build_sigmoid_loss(self): + losses_text_proto = """ + weighted_sigmoid { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSigmoidClassificationLoss) + + def test_build_softmax_loss(self): + losses_text_proto = """ + weighted_softmax { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + def test_build_logits_softmax_loss(self): + losses_text_proto = """ + weighted_logits_softmax { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertTrue( + isinstance(classification_loss, + losses.WeightedSoftmaxClassificationAgainstLogitsLoss)) + + def test_build_sigmoid_focal_loss(self): + losses_text_proto = """ + weighted_sigmoid_focal { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.SigmoidFocalClassificationLoss) + + def test_build_softmax_loss_by_default(self): + losses_text_proto = """ + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/matcher_builder.py b/models/research/object_detection/builders/matcher_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..086f74b5c45f81cd555207f0ad593a52a0c0f307 --- /dev/null +++ b/models/research/object_detection/builders/matcher_builder.py @@ -0,0 +1,58 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection matcher from configuration.""" + +from object_detection.matchers import argmax_matcher +from object_detection.protos import matcher_pb2 +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + + +def build(matcher_config): + """Builds a matcher object based on the matcher config. + + Args: + matcher_config: A matcher.proto object containing the config for the desired + Matcher. + + Returns: + Matcher based on the config. + + Raises: + ValueError: On empty matcher proto. + """ + if not isinstance(matcher_config, matcher_pb2.Matcher): + raise ValueError('matcher_config not of type matcher_pb2.Matcher.') + if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher': + matcher = matcher_config.argmax_matcher + matched_threshold = unmatched_threshold = None + if not matcher.ignore_thresholds: + matched_threshold = matcher.matched_threshold + unmatched_threshold = matcher.unmatched_threshold + return argmax_matcher.ArgMaxMatcher( + matched_threshold=matched_threshold, + unmatched_threshold=unmatched_threshold, + negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched, + force_match_for_each_row=matcher.force_match_for_each_row, + use_matmul_gather=matcher.use_matmul_gather) + if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher': + if tf_version.is_tf2(): + raise ValueError('bipartite_matcher is not supported in TF 2.X') + matcher = matcher_config.bipartite_matcher + return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather) + raise ValueError('Empty matcher.') diff --git a/models/research/object_detection/builders/matcher_builder_test.py b/models/research/object_detection/builders/matcher_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa55ff94fb7a12dbf78787ffbbf762d1890e3bc --- /dev/null +++ b/models/research/object_detection/builders/matcher_builder_test.py @@ -0,0 +1,105 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for matcher_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import matcher_builder +from object_detection.matchers import argmax_matcher +from object_detection.protos import matcher_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + + +class MatcherBuilderTest(test_case.TestCase): + + def test_build_arg_max_matcher_with_defaults(self): + matcher_text_proto = """ + argmax_matcher { + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher) + self.assertAlmostEqual(matcher_object._matched_threshold, 0.5) + self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5) + self.assertTrue(matcher_object._negatives_lower_than_unmatched) + self.assertFalse(matcher_object._force_match_for_each_row) + + def test_build_arg_max_matcher_without_thresholds(self): + matcher_text_proto = """ + argmax_matcher { + ignore_thresholds: true + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher) + self.assertEqual(matcher_object._matched_threshold, None) + self.assertEqual(matcher_object._unmatched_threshold, None) + self.assertTrue(matcher_object._negatives_lower_than_unmatched) + self.assertFalse(matcher_object._force_match_for_each_row) + + def test_build_arg_max_matcher_with_non_default_parameters(self): + matcher_text_proto = """ + argmax_matcher { + matched_threshold: 0.7 + unmatched_threshold: 0.3 + negatives_lower_than_unmatched: false + force_match_for_each_row: true + use_matmul_gather: true + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher) + self.assertAlmostEqual(matcher_object._matched_threshold, 0.7) + self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3) + self.assertFalse(matcher_object._negatives_lower_than_unmatched) + self.assertTrue(matcher_object._force_match_for_each_row) + self.assertTrue(matcher_object._use_matmul_gather) + + def test_build_bipartite_matcher(self): + if tf_version.is_tf2(): + self.skipTest('BipartiteMatcher unsupported in TF 2.X. Skipping.') + matcher_text_proto = """ + bipartite_matcher { + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, + bipartite_matcher.GreedyBipartiteMatcher) + + def test_raise_error_on_empty_matcher(self): + matcher_text_proto = """ + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + with self.assertRaises(ValueError): + matcher_builder.build(matcher_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/model_builder.py b/models/research/object_detection/builders/model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb17e88bf1d18714909ef56ec7bc2bb501ffeeb --- /dev/null +++ b/models/research/object_detection/builders/model_builder.py @@ -0,0 +1,950 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build a DetectionModel from configuration.""" + +import functools +from object_detection.builders import anchor_generator_builder +from object_detection.builders import box_coder_builder +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import image_resizer_builder +from object_detection.builders import losses_builder +from object_detection.builders import matcher_builder +from object_detection.builders import post_processing_builder +from object_detection.builders import region_similarity_calculator_builder as sim_calc +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import post_processing +from object_detection.core import target_assigner +from object_detection.meta_architectures import center_net_meta_arch +from object_detection.meta_architectures import context_rcnn_meta_arch +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.meta_architectures import rfcn_meta_arch +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.predictors.heads import mask_head +from object_detection.protos import losses_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import label_map_util +from object_detection.utils import ops +from object_detection.utils import tf_version + +## Feature Extractors for TF +## This section conditionally imports different feature extractors based on the +## Tensorflow version. +## +# pylint: disable=g-import-not-at-top +if tf_version.is_tf2(): + from object_detection.models import center_net_hourglass_feature_extractor + from object_detection.models import center_net_resnet_feature_extractor + from object_detection.models import center_net_resnet_v1_fpn_feature_extractor + from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras + from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras + from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras + from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor + from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor + from object_detection.predictors import rfcn_keras_box_predictor + +if tf_version.is_tf1(): + from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res + from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 + from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas + from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas + from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 + from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn + from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn + from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor + from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor + from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor + from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor + from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor + from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor + from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor + from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor + from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor + from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor + from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor + from object_detection.predictors import rfcn_box_predictor +# pylint: enable=g-import-not-at-top + +if tf_version.is_tf2(): + SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { + 'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor, + 'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor, + 'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor, + 'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor, + 'ssd_resnet50_v1_fpn_keras': + ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor, + 'ssd_resnet101_v1_fpn_keras': + ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor, + 'ssd_resnet152_v1_fpn_keras': + ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor, + } + + FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { + 'faster_rcnn_resnet50_keras': + frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor, + 'faster_rcnn_resnet101_keras': + frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor, + 'faster_rcnn_resnet152_keras': + frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor, + 'faster_rcnn_inception_resnet_v2_keras': + frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, + } + + CENTER_NET_EXTRACTOR_FUNCTION_MAP = { + 'resnet_v2_50': center_net_resnet_feature_extractor.resnet_v2_50, + 'resnet_v2_101': center_net_resnet_feature_extractor.resnet_v2_101, + 'resnet_v1_50_fpn': + center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn, + 'resnet_v1_101_fpn': + center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn, + 'hourglass_104': center_net_hourglass_feature_extractor.hourglass_104, + } + + FEATURE_EXTRACTOR_MAPS = [ + CENTER_NET_EXTRACTOR_FUNCTION_MAP, + FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP, + SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP + ] + +if tf_version.is_tf1(): + SSD_FEATURE_EXTRACTOR_CLASS_MAP = { + 'ssd_inception_v2': + SSDInceptionV2FeatureExtractor, + 'ssd_inception_v3': + SSDInceptionV3FeatureExtractor, + 'ssd_mobilenet_v1': + SSDMobileNetV1FeatureExtractor, + 'ssd_mobilenet_v1_fpn': + SSDMobileNetV1FpnFeatureExtractor, + 'ssd_mobilenet_v1_ppn': + SSDMobileNetV1PpnFeatureExtractor, + 'ssd_mobilenet_v2': + SSDMobileNetV2FeatureExtractor, + 'ssd_mobilenet_v2_fpn': + SSDMobileNetV2FpnFeatureExtractor, + 'ssd_mobilenet_v2_mnasfpn': + SSDMobileNetV2MnasFPNFeatureExtractor, + 'ssd_mobilenet_v3_large': + SSDMobileNetV3LargeFeatureExtractor, + 'ssd_mobilenet_v3_small': + SSDMobileNetV3SmallFeatureExtractor, + 'ssd_mobilenet_edgetpu': + SSDMobileNetEdgeTPUFeatureExtractor, + 'ssd_resnet50_v1_fpn': + ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, + 'ssd_resnet101_v1_fpn': + ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, + 'ssd_resnet152_v1_fpn': + ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, + 'ssd_resnet50_v1_ppn': + ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, + 'ssd_resnet101_v1_ppn': + ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, + 'ssd_resnet152_v1_ppn': + ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor, + 'embedded_ssd_mobilenet_v1': + EmbeddedSSDMobileNetV1FeatureExtractor, + 'ssd_pnasnet': + SSDPNASNetFeatureExtractor, + 'ssd_mobiledet_cpu': + SSDMobileDetCPUFeatureExtractor, + 'ssd_mobiledet_dsp': + SSDMobileDetDSPFeatureExtractor, + 'ssd_mobiledet_edgetpu': + SSDMobileDetEdgeTPUFeatureExtractor, + 'ssd_mobiledet_gpu': + SSDMobileDetGPUFeatureExtractor, + } + + FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = { + 'faster_rcnn_nas': + frcnn_nas.FasterRCNNNASFeatureExtractor, + 'faster_rcnn_pnas': + frcnn_pnas.FasterRCNNPNASFeatureExtractor, + 'faster_rcnn_inception_resnet_v2': + frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor, + 'faster_rcnn_inception_v2': + frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor, + 'faster_rcnn_resnet50': + frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, + 'faster_rcnn_resnet101': + frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, + 'faster_rcnn_resnet152': + frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor, + } + + FEATURE_EXTRACTOR_MAPS = [ + SSD_FEATURE_EXTRACTOR_CLASS_MAP, + FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP + ] + + +def _check_feature_extractor_exists(feature_extractor_type): + feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS) + if feature_extractor_type not in feature_extractors: + raise ValueError('{} is not supported. See `model_builder.py` for features ' + 'extractors compatible with different versions of ' + 'Tensorflow'.format(feature_extractor_type)) + + +def _build_ssd_feature_extractor(feature_extractor_config, + is_training, + freeze_batchnorm, + reuse_weights=None): + """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. + + Args: + feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. + is_training: True if this feature extractor is being built for training. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + reuse_weights: if the feature extractor should reuse weights. + + Returns: + ssd_meta_arch.SSDFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + feature_type = feature_extractor_config.type + depth_multiplier = feature_extractor_config.depth_multiplier + min_depth = feature_extractor_config.min_depth + pad_to_multiple = feature_extractor_config.pad_to_multiple + use_explicit_padding = feature_extractor_config.use_explicit_padding + use_depthwise = feature_extractor_config.use_depthwise + + is_keras = tf_version.is_tf2() + if is_keras: + conv_hyperparams = hyperparams_builder.KerasLayerHyperparams( + feature_extractor_config.conv_hyperparams) + else: + conv_hyperparams = hyperparams_builder.build( + feature_extractor_config.conv_hyperparams, is_training) + override_base_feature_extractor_hyperparams = ( + feature_extractor_config.override_base_feature_extractor_hyperparams) + + if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type)) + + if is_keras: + feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ + feature_type] + else: + feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type] + kwargs = { + 'is_training': + is_training, + 'depth_multiplier': + depth_multiplier, + 'min_depth': + min_depth, + 'pad_to_multiple': + pad_to_multiple, + 'use_explicit_padding': + use_explicit_padding, + 'use_depthwise': + use_depthwise, + 'override_base_feature_extractor_hyperparams': + override_base_feature_extractor_hyperparams + } + + if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'): + kwargs.update({ + 'replace_preprocessor_with_placeholder': + feature_extractor_config.replace_preprocessor_with_placeholder + }) + + if feature_extractor_config.HasField('num_layers'): + kwargs.update({'num_layers': feature_extractor_config.num_layers}) + + if is_keras: + kwargs.update({ + 'conv_hyperparams': conv_hyperparams, + 'inplace_batchnorm_update': False, + 'freeze_batchnorm': freeze_batchnorm + }) + else: + kwargs.update({ + 'conv_hyperparams_fn': conv_hyperparams, + 'reuse_weights': reuse_weights, + }) + + + if feature_extractor_config.HasField('fpn'): + kwargs.update({ + 'fpn_min_level': + feature_extractor_config.fpn.min_level, + 'fpn_max_level': + feature_extractor_config.fpn.max_level, + 'additional_layer_depth': + feature_extractor_config.fpn.additional_layer_depth, + }) + + + return feature_extractor_class(**kwargs) + + +def _build_ssd_model(ssd_config, is_training, add_summaries): + """Builds an SSD detection model based on the model config. + + Args: + ssd_config: A ssd.proto object containing the config for the desired + SSDMetaArch. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tf summaries in the model. + Returns: + SSDMetaArch based on the config. + + Raises: + ValueError: If ssd_config.type is not recognized (i.e. not registered in + model_class_map). + """ + num_classes = ssd_config.num_classes + _check_feature_extractor_exists(ssd_config.feature_extractor.type) + + # Feature extractor + feature_extractor = _build_ssd_feature_extractor( + feature_extractor_config=ssd_config.feature_extractor, + freeze_batchnorm=ssd_config.freeze_batchnorm, + is_training=is_training) + + box_coder = box_coder_builder.build(ssd_config.box_coder) + matcher = matcher_builder.build(ssd_config.matcher) + region_similarity_calculator = sim_calc.build( + ssd_config.similarity_calculator) + encode_background_as_zeros = ssd_config.encode_background_as_zeros + negative_class_weight = ssd_config.negative_class_weight + anchor_generator = anchor_generator_builder.build( + ssd_config.anchor_generator) + if feature_extractor.is_keras_model: + ssd_box_predictor = box_predictor_builder.build_keras( + hyperparams_fn=hyperparams_builder.KerasLayerHyperparams, + freeze_batchnorm=ssd_config.freeze_batchnorm, + inplace_batchnorm_update=False, + num_predictions_per_location_list=anchor_generator + .num_anchors_per_location(), + box_predictor_config=ssd_config.box_predictor, + is_training=is_training, + num_classes=num_classes, + add_background_class=ssd_config.add_background_class) + else: + ssd_box_predictor = box_predictor_builder.build( + hyperparams_builder.build, ssd_config.box_predictor, is_training, + num_classes, ssd_config.add_background_class) + image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) + non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( + ssd_config.post_processing) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, random_example_sampler, + expected_loss_weights_fn) = losses_builder.build(ssd_config.loss) + normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches + normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize + + equalization_loss_config = ops.EqualizationLossConfig( + weight=ssd_config.loss.equalization_loss.weight, + exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes) + + target_assigner_instance = target_assigner.TargetAssigner( + region_similarity_calculator, + matcher, + box_coder, + negative_class_weight=negative_class_weight) + + ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch + kwargs = {} + + return ssd_meta_arch_fn( + is_training=is_training, + anchor_generator=anchor_generator, + box_predictor=ssd_box_predictor, + box_coder=box_coder, + feature_extractor=feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=score_conversion_fn, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_weight, + localization_loss_weight=localization_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=hard_example_miner, + target_assigner_instance=target_assigner_instance, + add_summaries=add_summaries, + normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, + freeze_batchnorm=ssd_config.freeze_batchnorm, + inplace_batchnorm_update=ssd_config.inplace_batchnorm_update, + add_background_class=ssd_config.add_background_class, + explicit_background_class=ssd_config.explicit_background_class, + random_example_sampler=random_example_sampler, + expected_loss_weights_fn=expected_loss_weights_fn, + use_confidences_as_targets=ssd_config.use_confidences_as_targets, + implicit_example_weight=ssd_config.implicit_example_weight, + equalization_loss_config=equalization_loss_config, + return_raw_detections_during_predict=( + ssd_config.return_raw_detections_during_predict), + **kwargs) + + +def _build_faster_rcnn_feature_extractor( + feature_extractor_config, is_training, reuse_weights=True, + inplace_batchnorm_update=False): + """Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. + + Args: + feature_extractor_config: A FasterRcnnFeatureExtractor proto config from + faster_rcnn.proto. + is_training: True if this feature extractor is being built for training. + reuse_weights: if the feature extractor should reuse weights. + inplace_batchnorm_update: Whether to update batch_norm inplace during + training. This is required for batch norm to work correctly on TPUs. When + this is false, user must add a control dependency on + tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch + norm moving average parameters. + + Returns: + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + if inplace_batchnorm_update: + raise ValueError('inplace batchnorm updates not supported.') + feature_type = feature_extractor_config.type + first_stage_features_stride = ( + feature_extractor_config.first_stage_features_stride) + batch_norm_trainable = feature_extractor_config.batch_norm_trainable + + if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( + feature_type)) + feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[ + feature_type] + return feature_extractor_class( + is_training, first_stage_features_stride, + batch_norm_trainable, reuse_weights=reuse_weights) + + +def _build_faster_rcnn_keras_feature_extractor( + feature_extractor_config, is_training, + inplace_batchnorm_update=False): + """Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config. + + Args: + feature_extractor_config: A FasterRcnnFeatureExtractor proto config from + faster_rcnn.proto. + is_training: True if this feature extractor is being built for training. + inplace_batchnorm_update: Whether to update batch_norm inplace during + training. This is required for batch norm to work correctly on TPUs. When + this is false, user must add a control dependency on + tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch + norm moving average parameters. + + Returns: + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + if inplace_batchnorm_update: + raise ValueError('inplace batchnorm updates not supported.') + feature_type = feature_extractor_config.type + first_stage_features_stride = ( + feature_extractor_config.first_stage_features_stride) + batch_norm_trainable = feature_extractor_config.batch_norm_trainable + + if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( + feature_type)) + feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ + feature_type] + return feature_extractor_class( + is_training, first_stage_features_stride, + batch_norm_trainable) + + +def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries): + """Builds a Faster R-CNN or R-FCN detection model based on the model config. + + Builds R-FCN model if the second_stage_box_predictor in the config is of type + `rfcn_box_predictor` else builds a Faster R-CNN model. + + Args: + frcnn_config: A faster_rcnn.proto object containing the config for the + desired FasterRCNNMetaArch or RFCNMetaArch. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tf summaries in the model. + + Returns: + FasterRCNNMetaArch based on the config. + + Raises: + ValueError: If frcnn_config.type is not recognized (i.e. not registered in + model_class_map). + """ + num_classes = frcnn_config.num_classes + image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) + _check_feature_extractor_exists(frcnn_config.feature_extractor.type) + is_keras = tf_version.is_tf2() + + if is_keras: + feature_extractor = _build_faster_rcnn_keras_feature_extractor( + frcnn_config.feature_extractor, is_training, + inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) + else: + feature_extractor = _build_faster_rcnn_feature_extractor( + frcnn_config.feature_extractor, is_training, + inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) + + number_of_stages = frcnn_config.number_of_stages + first_stage_anchor_generator = anchor_generator_builder.build( + frcnn_config.first_stage_anchor_generator) + + first_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'proposal', + use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) + first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate + if is_keras: + first_stage_box_predictor_arg_scope_fn = ( + hyperparams_builder.KerasLayerHyperparams( + frcnn_config.first_stage_box_predictor_conv_hyperparams)) + else: + first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build( + frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) + first_stage_box_predictor_kernel_size = ( + frcnn_config.first_stage_box_predictor_kernel_size) + first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth + first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size + use_static_shapes = frcnn_config.use_static_shapes and ( + frcnn_config.use_static_shapes_for_eval or is_training) + first_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=frcnn_config.first_stage_positive_balance_fraction, + is_static=(frcnn_config.use_static_balanced_label_sampler and + use_static_shapes)) + first_stage_max_proposals = frcnn_config.first_stage_max_proposals + if (frcnn_config.first_stage_nms_iou_threshold < 0 or + frcnn_config.first_stage_nms_iou_threshold > 1.0): + raise ValueError('iou_threshold not in [0, 1.0].') + if (is_training and frcnn_config.second_stage_batch_size > + first_stage_max_proposals): + raise ValueError('second_stage_batch_size should be no greater than ' + 'first_stage_max_proposals.') + first_stage_non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=frcnn_config.first_stage_nms_score_threshold, + iou_thresh=frcnn_config.first_stage_nms_iou_threshold, + max_size_per_class=frcnn_config.first_stage_max_proposals, + max_total_size=frcnn_config.first_stage_max_proposals, + use_static_shapes=use_static_shapes, + use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage, + use_combined_nms=frcnn_config.use_combined_nms_in_first_stage) + first_stage_loc_loss_weight = ( + frcnn_config.first_stage_localization_loss_weight) + first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight + + initial_crop_size = frcnn_config.initial_crop_size + maxpool_kernel_size = frcnn_config.maxpool_kernel_size + maxpool_stride = frcnn_config.maxpool_stride + + second_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'detection', + use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) + if is_keras: + second_stage_box_predictor = box_predictor_builder.build_keras( + hyperparams_builder.KerasLayerHyperparams, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[1], + box_predictor_config=frcnn_config.second_stage_box_predictor, + is_training=is_training, + num_classes=num_classes) + else: + second_stage_box_predictor = box_predictor_builder.build( + hyperparams_builder.build, + frcnn_config.second_stage_box_predictor, + is_training=is_training, + num_classes=num_classes) + second_stage_batch_size = frcnn_config.second_stage_batch_size + second_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=frcnn_config.second_stage_balance_fraction, + is_static=(frcnn_config.use_static_balanced_label_sampler and + use_static_shapes)) + (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn + ) = post_processing_builder.build(frcnn_config.second_stage_post_processing) + second_stage_localization_loss_weight = ( + frcnn_config.second_stage_localization_loss_weight) + second_stage_classification_loss = ( + losses_builder.build_faster_rcnn_classification_loss( + frcnn_config.second_stage_classification_loss)) + second_stage_classification_loss_weight = ( + frcnn_config.second_stage_classification_loss_weight) + second_stage_mask_prediction_loss_weight = ( + frcnn_config.second_stage_mask_prediction_loss_weight) + + hard_example_miner = None + if frcnn_config.HasField('hard_example_miner'): + hard_example_miner = losses_builder.build_hard_example_miner( + frcnn_config.hard_example_miner, + second_stage_classification_loss_weight, + second_stage_localization_loss_weight) + + crop_and_resize_fn = ( + ops.matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize + else ops.native_crop_and_resize) + clip_anchors_to_image = ( + frcnn_config.clip_anchors_to_image) + + common_kwargs = { + 'is_training': + is_training, + 'num_classes': + num_classes, + 'image_resizer_fn': + image_resizer_fn, + 'feature_extractor': + feature_extractor, + 'number_of_stages': + number_of_stages, + 'first_stage_anchor_generator': + first_stage_anchor_generator, + 'first_stage_target_assigner': + first_stage_target_assigner, + 'first_stage_atrous_rate': + first_stage_atrous_rate, + 'first_stage_box_predictor_arg_scope_fn': + first_stage_box_predictor_arg_scope_fn, + 'first_stage_box_predictor_kernel_size': + first_stage_box_predictor_kernel_size, + 'first_stage_box_predictor_depth': + first_stage_box_predictor_depth, + 'first_stage_minibatch_size': + first_stage_minibatch_size, + 'first_stage_sampler': + first_stage_sampler, + 'first_stage_non_max_suppression_fn': + first_stage_non_max_suppression_fn, + 'first_stage_max_proposals': + first_stage_max_proposals, + 'first_stage_localization_loss_weight': + first_stage_loc_loss_weight, + 'first_stage_objectness_loss_weight': + first_stage_obj_loss_weight, + 'second_stage_target_assigner': + second_stage_target_assigner, + 'second_stage_batch_size': + second_stage_batch_size, + 'second_stage_sampler': + second_stage_sampler, + 'second_stage_non_max_suppression_fn': + second_stage_non_max_suppression_fn, + 'second_stage_score_conversion_fn': + second_stage_score_conversion_fn, + 'second_stage_localization_loss_weight': + second_stage_localization_loss_weight, + 'second_stage_classification_loss': + second_stage_classification_loss, + 'second_stage_classification_loss_weight': + second_stage_classification_loss_weight, + 'hard_example_miner': + hard_example_miner, + 'add_summaries': + add_summaries, + 'crop_and_resize_fn': + crop_and_resize_fn, + 'clip_anchors_to_image': + clip_anchors_to_image, + 'use_static_shapes': + use_static_shapes, + 'resize_masks': + frcnn_config.resize_masks, + 'return_raw_detections_during_predict': + frcnn_config.return_raw_detections_during_predict, + 'output_final_box_features': + frcnn_config.output_final_box_features + } + + if ((not is_keras and isinstance(second_stage_box_predictor, + rfcn_box_predictor.RfcnBoxPredictor)) or + (is_keras and + isinstance(second_stage_box_predictor, + rfcn_keras_box_predictor.RfcnKerasBoxPredictor))): + return rfcn_meta_arch.RFCNMetaArch( + second_stage_rfcn_box_predictor=second_stage_box_predictor, + **common_kwargs) + elif frcnn_config.HasField('context_config'): + context_config = frcnn_config.context_config + common_kwargs.update({ + 'attention_bottleneck_dimension': + context_config.attention_bottleneck_dimension, + 'attention_temperature': + context_config.attention_temperature + }) + return context_rcnn_meta_arch.ContextRCNNMetaArch( + initial_crop_size=initial_crop_size, + maxpool_kernel_size=maxpool_kernel_size, + maxpool_stride=maxpool_stride, + second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, + second_stage_mask_prediction_loss_weight=( + second_stage_mask_prediction_loss_weight), + **common_kwargs) + else: + return faster_rcnn_meta_arch.FasterRCNNMetaArch( + initial_crop_size=initial_crop_size, + maxpool_kernel_size=maxpool_kernel_size, + maxpool_stride=maxpool_stride, + second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, + second_stage_mask_prediction_loss_weight=( + second_stage_mask_prediction_loss_weight), + **common_kwargs) + +EXPERIMENTAL_META_ARCH_BUILDER_MAP = { +} + + +def _build_experimental_model(config, is_training, add_summaries=True): + return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name]( + is_training, add_summaries) + + +# The class ID in the groundtruth/model architecture is usually 0-based while +# the ID in the label map is 1-based. The offset is used to convert between the +# the two. +CLASS_ID_OFFSET = 1 +KEYPOINT_STD_DEV_DEFAULT = 1.0 + + +def keypoint_proto_to_params(kp_config, keypoint_map_dict): + """Converts CenterNet.KeypointEstimation proto to parameter namedtuple.""" + label_map_item = keypoint_map_dict[kp_config.keypoint_class_name] + + classification_loss, localization_loss, _, _, _, _, _ = ( + losses_builder.build(kp_config.loss)) + + keypoint_indices = [ + keypoint.id for keypoint in label_map_item.keypoints + ] + keypoint_labels = [ + keypoint.label for keypoint in label_map_item.keypoints + ] + keypoint_std_dev_dict = { + label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels + } + if kp_config.keypoint_label_to_std: + for label, value in kp_config.keypoint_label_to_std.items(): + keypoint_std_dev_dict[label] = value + keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels] + return center_net_meta_arch.KeypointEstimationParams( + task_name=kp_config.task_name, + class_id=label_map_item.id - CLASS_ID_OFFSET, + keypoint_indices=keypoint_indices, + classification_loss=classification_loss, + localization_loss=localization_loss, + keypoint_labels=keypoint_labels, + keypoint_std_dev=keypoint_std_dev, + task_loss_weight=kp_config.task_loss_weight, + keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight, + keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight, + keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight, + heatmap_bias_init=kp_config.heatmap_bias_init, + keypoint_candidate_score_threshold=( + kp_config.keypoint_candidate_score_threshold), + num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint, + peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size, + unmatched_keypoint_score=kp_config.unmatched_keypoint_score, + box_scale=kp_config.box_scale, + candidate_search_scale=kp_config.candidate_search_scale, + candidate_ranking_mode=kp_config.candidate_ranking_mode, + offset_peak_radius=kp_config.offset_peak_radius, + per_keypoint_offset=kp_config.per_keypoint_offset) + + +def object_detection_proto_to_params(od_config): + """Converts CenterNet.ObjectDetection proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy classification loss to avoid the loss_builder throwing error. + # TODO(yuhuic): update the loss builder to take the classification loss + # directly. + loss.classification_loss.weighted_sigmoid.CopyFrom( + losses_pb2.WeightedSigmoidClassificationLoss()) + loss.localization_loss.CopyFrom(od_config.localization_loss) + _, localization_loss, _, _, _, _, _ = (losses_builder.build(loss)) + return center_net_meta_arch.ObjectDetectionParams( + localization_loss=localization_loss, + scale_loss_weight=od_config.scale_loss_weight, + offset_loss_weight=od_config.offset_loss_weight, + task_loss_weight=od_config.task_loss_weight) + + +def object_center_proto_to_params(oc_config): + """Converts CenterNet.ObjectCenter proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy localization loss to avoid the loss_builder throwing error. + # TODO(yuhuic): update the loss builder to take the localization loss + # directly. + loss.localization_loss.weighted_l2.CopyFrom( + losses_pb2.WeightedL2LocalizationLoss()) + loss.classification_loss.CopyFrom(oc_config.classification_loss) + classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) + return center_net_meta_arch.ObjectCenterParams( + classification_loss=classification_loss, + object_center_loss_weight=oc_config.object_center_loss_weight, + heatmap_bias_init=oc_config.heatmap_bias_init, + min_box_overlap_iou=oc_config.min_box_overlap_iou, + max_box_predictions=oc_config.max_box_predictions, + use_labeled_classes=oc_config.use_labeled_classes) + + +def mask_proto_to_params(mask_config): + """Converts CenterNet.MaskEstimation proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy localization loss to avoid the loss_builder throwing error. + loss.localization_loss.weighted_l2.CopyFrom( + losses_pb2.WeightedL2LocalizationLoss()) + loss.classification_loss.CopyFrom(mask_config.classification_loss) + classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) + return center_net_meta_arch.MaskParams( + classification_loss=classification_loss, + task_loss_weight=mask_config.task_loss_weight, + mask_height=mask_config.mask_height, + mask_width=mask_config.mask_width, + score_threshold=mask_config.score_threshold, + heatmap_bias_init=mask_config.heatmap_bias_init) + + +def _build_center_net_model(center_net_config, is_training, add_summaries): + """Build a CenterNet detection model. + + Args: + center_net_config: A CenterNet proto object with model configuration. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tf summaries in the model. + + Returns: + CenterNetMetaArch based on the config. + + """ + + image_resizer_fn = image_resizer_builder.build( + center_net_config.image_resizer) + _check_feature_extractor_exists(center_net_config.feature_extractor.type) + feature_extractor = _build_center_net_feature_extractor( + center_net_config.feature_extractor) + object_center_params = object_center_proto_to_params( + center_net_config.object_center_params) + + object_detection_params = None + if center_net_config.HasField('object_detection_task'): + object_detection_params = object_detection_proto_to_params( + center_net_config.object_detection_task) + + keypoint_params_dict = None + if center_net_config.keypoint_estimation_task: + label_map_proto = label_map_util.load_labelmap( + center_net_config.keypoint_label_map_path) + keypoint_map_dict = { + item.name: item for item in label_map_proto.item if item.keypoints + } + keypoint_params_dict = {} + keypoint_class_id_set = set() + all_keypoint_indices = [] + for task in center_net_config.keypoint_estimation_task: + kp_params = keypoint_proto_to_params(task, keypoint_map_dict) + keypoint_params_dict[task.task_name] = kp_params + all_keypoint_indices.extend(kp_params.keypoint_indices) + if kp_params.class_id in keypoint_class_id_set: + raise ValueError(('Multiple keypoint tasks map to the same class id is ' + 'not allowed: %d' % kp_params.class_id)) + else: + keypoint_class_id_set.add(kp_params.class_id) + if len(all_keypoint_indices) > len(set(all_keypoint_indices)): + raise ValueError('Some keypoint indices are used more than once.') + + mask_params = None + if center_net_config.HasField('mask_estimation_task'): + mask_params = mask_proto_to_params(center_net_config.mask_estimation_task) + + return center_net_meta_arch.CenterNetMetaArch( + is_training=is_training, + add_summaries=add_summaries, + num_classes=center_net_config.num_classes, + feature_extractor=feature_extractor, + image_resizer_fn=image_resizer_fn, + object_center_params=object_center_params, + object_detection_params=object_detection_params, + keypoint_params_dict=keypoint_params_dict, + mask_params=mask_params) + + +def _build_center_net_feature_extractor( + feature_extractor_config): + """Build a CenterNet feature extractor from the given config.""" + + if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP: + raise ValueError('\'{}\' is not a known CenterNet feature extractor type' + .format(feature_extractor_config.type)) + + return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type]( + channel_means=list(feature_extractor_config.channel_means), + channel_stds=list(feature_extractor_config.channel_stds), + bgr_ordering=feature_extractor_config.bgr_ordering + ) + + +META_ARCH_BUILDER_MAP = { + 'ssd': _build_ssd_model, + 'faster_rcnn': _build_faster_rcnn_model, + 'experimental_model': _build_experimental_model, + 'center_net': _build_center_net_model +} + + +def build(model_config, is_training, add_summaries=True): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tensorflow summaries in the model graph. + Returns: + DetectionModel based on the config. + + Raises: + ValueError: On invalid meta architecture or model. + """ + if not isinstance(model_config, model_pb2.DetectionModel): + raise ValueError('model_config not of type model_pb2.DetectionModel.') + + meta_architecture = model_config.WhichOneof('model') + + if meta_architecture not in META_ARCH_BUILDER_MAP: + raise ValueError('Unknown meta architecture: {}'.format(meta_architecture)) + else: + build_func = META_ARCH_BUILDER_MAP[meta_architecture] + return build_func(getattr(model_config, meta_architecture), is_training, + add_summaries) diff --git a/models/research/object_detection/builders/model_builder_test.py b/models/research/object_detection/builders/model_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..511c640f3e7413bf7e1e31948547efb9daf76e4f --- /dev/null +++ b/models/research/object_detection/builders/model_builder_test.py @@ -0,0 +1,352 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.models.model_builder.""" + +from absl.testing import parameterized + +from google.protobuf import text_format +from object_detection.builders import model_builder +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.meta_architectures import rfcn_meta_arch +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.protos import hyperparams_pb2 +from object_detection.protos import losses_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import test_case + + +class ModelBuilderTest(test_case.TestCase, parameterized.TestCase): + + def default_ssd_feature_extractor(self): + raise NotImplementedError + + def default_faster_rcnn_feature_extractor(self): + raise NotImplementedError + + def ssd_feature_extractors(self): + raise NotImplementedError + + def faster_rcnn_feature_extractors(self): + raise NotImplementedError + + def create_model(self, model_config, is_training=True): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + is_training: True if this model is being built for training purposes. + + Returns: + DetectionModel based on the config. + """ + return model_builder.build(model_config, is_training=is_training) + + def create_default_ssd_model_proto(self): + """Creates a DetectionModel proto with ssd model fields populated.""" + model_text_proto = """ + ssd { + feature_extractor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + override_base_feature_extractor_hyperparams: true + } + box_coder { + faster_rcnn_box_coder { + } + } + matcher { + argmax_matcher { + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + aspect_ratios: 1.0 + } + } + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + box_predictor { + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + loss { + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + } + }""" + model_proto = model_pb2.DetectionModel() + text_format.Merge(model_text_proto, model_proto) + model_proto.ssd.feature_extractor.type = (self. + default_ssd_feature_extractor()) + return model_proto + + def create_default_faster_rcnn_model_proto(self): + """Creates a DetectionModel proto with FasterRCNN model fields populated.""" + model_text_proto = """ + faster_rcnn { + inplace_batchnorm_update: false + num_classes: 3 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 600 + max_dimension: 1024 + } + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.01 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + }""" + model_proto = model_pb2.DetectionModel() + text_format.Merge(model_text_proto, model_proto) + (model_proto.faster_rcnn.feature_extractor.type + ) = self.default_faster_rcnn_feature_extractor() + return model_proto + + def test_create_ssd_models_from_config(self): + model_proto = self.create_default_ssd_model_proto() + for extractor_type, extractor_class in self.ssd_feature_extractors().items( + ): + model_proto.ssd.feature_extractor.type = extractor_type + model = model_builder.build(model_proto, is_training=True) + self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) + self.assertIsInstance(model._feature_extractor, extractor_class) + + def test_create_ssd_fpn_model_from_config(self): + model_proto = self.create_default_ssd_model_proto() + model_proto.ssd.feature_extractor.fpn.min_level = 3 + model_proto.ssd.feature_extractor.fpn.max_level = 7 + model = model_builder.build(model_proto, is_training=True) + self.assertEqual(model._feature_extractor._fpn_min_level, 3) + self.assertEqual(model._feature_extractor._fpn_max_level, 7) + + + @parameterized.named_parameters( + { + 'testcase_name': 'mask_rcnn_with_matmul', + 'use_matmul_crop_and_resize': False, + 'enable_mask_prediction': True + }, + { + 'testcase_name': 'mask_rcnn_without_matmul', + 'use_matmul_crop_and_resize': True, + 'enable_mask_prediction': True + }, + { + 'testcase_name': 'faster_rcnn_with_matmul', + 'use_matmul_crop_and_resize': False, + 'enable_mask_prediction': False + }, + { + 'testcase_name': 'faster_rcnn_without_matmul', + 'use_matmul_crop_and_resize': True, + 'enable_mask_prediction': False + }, + ) + def test_create_faster_rcnn_models_from_config(self, + use_matmul_crop_and_resize, + enable_mask_prediction): + model_proto = self.create_default_faster_rcnn_model_proto() + faster_rcnn_config = model_proto.faster_rcnn + faster_rcnn_config.use_matmul_crop_and_resize = use_matmul_crop_and_resize + if enable_mask_prediction: + faster_rcnn_config.second_stage_mask_prediction_loss_weight = 3.0 + mask_predictor_config = ( + faster_rcnn_config.second_stage_box_predictor.mask_rcnn_box_predictor) + mask_predictor_config.predict_instance_masks = True + + for extractor_type, extractor_class in ( + self.faster_rcnn_feature_extractors().items()): + faster_rcnn_config.feature_extractor.type = extractor_type + model = model_builder.build(model_proto, is_training=True) + self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) + self.assertIsInstance(model._feature_extractor, extractor_class) + if enable_mask_prediction: + self.assertAlmostEqual(model._second_stage_mask_loss_weight, 3.0) + + def test_create_faster_rcnn_model_from_config_with_example_miner(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.hard_example_miner.num_hard_examples = 64 + model = model_builder.build(model_proto, is_training=True) + self.assertIsNotNone(model._hard_example_miner) + + def test_create_rfcn_model_from_config(self): + model_proto = self.create_default_faster_rcnn_model_proto() + rfcn_predictor_config = ( + model_proto.faster_rcnn.second_stage_box_predictor.rfcn_box_predictor) + rfcn_predictor_config.conv_hyperparams.op = hyperparams_pb2.Hyperparams.CONV + for extractor_type, extractor_class in ( + self.faster_rcnn_feature_extractors().items()): + model_proto.faster_rcnn.feature_extractor.type = extractor_type + model = model_builder.build(model_proto, is_training=True) + self.assertIsInstance(model, rfcn_meta_arch.RFCNMetaArch) + self.assertIsInstance(model._feature_extractor, extractor_class) + + @parameterized.parameters(True, False) + def test_create_faster_rcnn_from_config_with_crop_feature( + self, output_final_box_features): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.output_final_box_features = ( + output_final_box_features) + _ = model_builder.build(model_proto, is_training=True) + + def test_invalid_model_config_proto(self): + model_proto = '' + with self.assertRaisesRegex( + ValueError, 'model_config not of type model_pb2.DetectionModel.'): + model_builder.build(model_proto, is_training=True) + + def test_unknown_meta_architecture(self): + model_proto = model_pb2.DetectionModel() + with self.assertRaisesRegex(ValueError, 'Unknown meta architecture'): + model_builder.build(model_proto, is_training=True) + + def test_unknown_ssd_feature_extractor(self): + model_proto = self.create_default_ssd_model_proto() + model_proto.ssd.feature_extractor.type = 'unknown_feature_extractor' + with self.assertRaises(ValueError): + model_builder.build(model_proto, is_training=True) + + def test_unknown_faster_rcnn_feature_extractor(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.feature_extractor.type = 'unknown_feature_extractor' + with self.assertRaises(ValueError): + model_builder.build(model_proto, is_training=True) + + def test_invalid_first_stage_nms_iou_threshold(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.first_stage_nms_iou_threshold = 1.1 + with self.assertRaisesRegex(ValueError, + r'iou_threshold not in \[0, 1\.0\]'): + model_builder.build(model_proto, is_training=True) + model_proto.faster_rcnn.first_stage_nms_iou_threshold = -0.1 + with self.assertRaisesRegex(ValueError, + r'iou_threshold not in \[0, 1\.0\]'): + model_builder.build(model_proto, is_training=True) + + def test_invalid_second_stage_batch_size(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.first_stage_max_proposals = 1 + model_proto.faster_rcnn.second_stage_batch_size = 2 + with self.assertRaisesRegex( + ValueError, 'second_stage_batch_size should be no greater ' + 'than first_stage_max_proposals.'): + model_builder.build(model_proto, is_training=True) + + def test_invalid_faster_rcnn_batchnorm_update(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.inplace_batchnorm_update = True + with self.assertRaisesRegex(ValueError, + 'inplace batchnorm updates not supported'): + model_builder.build(model_proto, is_training=True) + + def test_create_experimental_model(self): + + model_text_proto = """ + experimental_model { + name: 'model42' + }""" + + build_func = lambda *args: 42 + model_builder.EXPERIMENTAL_META_ARCH_BUILDER_MAP['model42'] = build_func + model_proto = model_pb2.DetectionModel() + text_format.Merge(model_text_proto, model_proto) + + self.assertEqual(model_builder.build(model_proto, is_training=True), 42) diff --git a/models/research/object_detection/builders/model_builder_tf1_test.py b/models/research/object_detection/builders/model_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..083275ac466250220dd532f52288bab9a5a66daf --- /dev/null +++ b/models/research/object_detection/builders/model_builder_tf1_test.py @@ -0,0 +1,55 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for model_builder under TensorFlow 1.X.""" +import unittest +from absl.testing import parameterized +import tensorflow.compat.v1 as tf + +from object_detection.builders import model_builder +from object_detection.builders import model_builder_test +from object_detection.meta_architectures import context_rcnn_meta_arch +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.protos import losses_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest): + + def default_ssd_feature_extractor(self): + return 'ssd_resnet50_v1_fpn' + + def default_faster_rcnn_feature_extractor(self): + return 'faster_rcnn_resnet101' + + def ssd_feature_extractors(self): + return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP + + def faster_rcnn_feature_extractors(self): + return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP + + + @parameterized.parameters(True, False) + def test_create_context_rcnn_from_config_with_params(self, is_training): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.context_config.attention_bottleneck_dimension = 10 + model_proto.faster_rcnn.context_config.attention_temperature = 0.5 + model = model_builder.build(model_proto, is_training=is_training) + self.assertIsInstance(model, context_rcnn_meta_arch.ContextRCNNMetaArch) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/model_builder_tf2_test.py b/models/research/object_detection/builders/model_builder_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c2cd237292ab8cb534aa760380fb31e7a68f1e43 --- /dev/null +++ b/models/research/object_detection/builders/model_builder_tf2_test.py @@ -0,0 +1,261 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for model_builder under TensorFlow 2.X.""" + +import os +import unittest + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import model_builder +from object_detection.builders import model_builder_test +from object_detection.core import losses +from object_detection.models import center_net_resnet_feature_extractor +from object_detection.protos import center_net_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): + + def default_ssd_feature_extractor(self): + return 'ssd_resnet50_v1_fpn_keras' + + def default_faster_rcnn_feature_extractor(self): + return 'faster_rcnn_resnet101_keras' + + def ssd_feature_extractors(self): + return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP + + def faster_rcnn_feature_extractors(self): + return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP + + def get_fake_label_map_file_path(self): + keypoint_spec_text = """ + item { + name: "/m/01g317" + id: 1 + display_name: "person" + keypoints { + id: 0 + label: 'nose' + } + keypoints { + id: 1 + label: 'left_shoulder' + } + keypoints { + id: 2 + label: 'right_shoulder' + } + keypoints { + id: 3 + label: 'hip' + } + } + """ + keypoint_label_map_path = os.path.join( + self.get_temp_dir(), 'keypoint_label_map') + with tf.gfile.Open(keypoint_label_map_path, 'wb') as f: + f.write(keypoint_spec_text) + return keypoint_label_map_path + + def get_fake_keypoint_proto(self): + task_proto_txt = """ + task_name: "human_pose" + task_loss_weight: 0.9 + keypoint_regression_loss_weight: 1.0 + keypoint_heatmap_loss_weight: 0.1 + keypoint_offset_loss_weight: 0.5 + heatmap_bias_init: 2.14 + keypoint_class_name: "/m/01g317" + loss { + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 3.0 + beta: 4.0 + } + } + localization_loss { + l1_localization_loss { + } + } + } + keypoint_label_to_std { + key: "nose" + value: 0.3 + } + keypoint_label_to_std { + key: "hip" + value: 0.0 + } + keypoint_candidate_score_threshold: 0.3 + num_candidates_per_keypoint: 12 + peak_max_pool_kernel_size: 5 + unmatched_keypoint_score: 0.05 + box_scale: 1.7 + candidate_search_scale: 0.2 + candidate_ranking_mode: "score_distance_ratio" + offset_peak_radius: 3 + per_keypoint_offset: true + """ + config = text_format.Merge(task_proto_txt, + center_net_pb2.CenterNet.KeypointEstimation()) + return config + + def get_fake_object_center_proto(self): + proto_txt = """ + object_center_loss_weight: 0.5 + heatmap_bias_init: 3.14 + min_box_overlap_iou: 0.2 + max_box_predictions: 15 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 3.0 + beta: 4.0 + } + } + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.ObjectCenterParams()) + + def get_fake_object_detection_proto(self): + proto_txt = """ + task_loss_weight: 0.5 + offset_loss_weight: 0.1 + scale_loss_weight: 0.2 + localization_loss { + l1_localization_loss { + } + } + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.ObjectDetection()) + + def get_fake_mask_proto(self): + proto_txt = """ + task_loss_weight: 0.7 + classification_loss { + weighted_softmax {} + } + mask_height: 8 + mask_width: 8 + score_threshold: 0.7 + heatmap_bias_init: -2.0 + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.MaskEstimation()) + + def test_create_center_net_model(self): + """Test building a CenterNet model from proto txt.""" + proto_txt = """ + center_net { + num_classes: 10 + feature_extractor { + type: "resnet_v2_101" + channel_stds: [4, 5, 6] + bgr_ordering: true + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + } + """ + # Set up the configuration proto. + config = text_format.Merge(proto_txt, model_pb2.DetectionModel()) + config.center_net.object_center_params.CopyFrom( + self.get_fake_object_center_proto()) + config.center_net.object_detection_task.CopyFrom( + self.get_fake_object_detection_proto()) + config.center_net.keypoint_estimation_task.append( + self.get_fake_keypoint_proto()) + config.center_net.keypoint_label_map_path = ( + self.get_fake_label_map_file_path()) + config.center_net.mask_estimation_task.CopyFrom( + self.get_fake_mask_proto()) + + # Build the model from the configuration. + model = model_builder.build(config, is_training=True) + + # Check object center related parameters. + self.assertEqual(model._num_classes, 10) + self.assertIsInstance(model._center_params.classification_loss, + losses.PenaltyReducedLogisticFocalLoss) + self.assertEqual(model._center_params.classification_loss._alpha, 3.0) + self.assertEqual(model._center_params.classification_loss._beta, 4.0) + self.assertAlmostEqual(model._center_params.min_box_overlap_iou, 0.2) + self.assertAlmostEqual( + model._center_params.heatmap_bias_init, 3.14, places=4) + self.assertEqual(model._center_params.max_box_predictions, 15) + + # Check object detection related parameters. + self.assertAlmostEqual(model._od_params.offset_loss_weight, 0.1) + self.assertAlmostEqual(model._od_params.scale_loss_weight, 0.2) + self.assertAlmostEqual(model._od_params.task_loss_weight, 0.5) + self.assertIsInstance(model._od_params.localization_loss, + losses.L1LocalizationLoss) + + # Check keypoint estimation related parameters. + kp_params = model._kp_params_dict['human_pose'] + self.assertAlmostEqual(kp_params.task_loss_weight, 0.9) + self.assertAlmostEqual(kp_params.keypoint_regression_loss_weight, 1.0) + self.assertAlmostEqual(kp_params.keypoint_offset_loss_weight, 0.5) + self.assertAlmostEqual(kp_params.heatmap_bias_init, 2.14, places=4) + self.assertEqual(kp_params.classification_loss._alpha, 3.0) + self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3]) + self.assertEqual(kp_params.keypoint_labels, + ['nose', 'left_shoulder', 'right_shoulder', 'hip']) + self.assertAllClose(kp_params.keypoint_std_dev, [0.3, 1.0, 1.0, 0.0]) + self.assertEqual(kp_params.classification_loss._beta, 4.0) + self.assertIsInstance(kp_params.localization_loss, + losses.L1LocalizationLoss) + self.assertAlmostEqual(kp_params.keypoint_candidate_score_threshold, 0.3) + self.assertEqual(kp_params.num_candidates_per_keypoint, 12) + self.assertEqual(kp_params.peak_max_pool_kernel_size, 5) + self.assertAlmostEqual(kp_params.unmatched_keypoint_score, 0.05) + self.assertAlmostEqual(kp_params.box_scale, 1.7) + self.assertAlmostEqual(kp_params.candidate_search_scale, 0.2) + self.assertEqual(kp_params.candidate_ranking_mode, 'score_distance_ratio') + self.assertEqual(kp_params.offset_peak_radius, 3) + self.assertEqual(kp_params.per_keypoint_offset, True) + + # Check mask related parameters. + self.assertAlmostEqual(model._mask_params.task_loss_weight, 0.7) + self.assertIsInstance(model._mask_params.classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertEqual(model._mask_params.mask_height, 8) + self.assertEqual(model._mask_params.mask_width, 8) + self.assertAlmostEqual(model._mask_params.score_threshold, 0.7) + self.assertAlmostEqual( + model._mask_params.heatmap_bias_init, -2.0, places=4) + + # Check feature extractor parameters. + self.assertIsInstance( + model._feature_extractor, + center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor) + self.assertAllClose(model._feature_extractor._channel_means, [0, 0, 0]) + self.assertAllClose(model._feature_extractor._channel_stds, [4, 5, 6]) + self.assertTrue(model._feature_extractor._bgr_ordering) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/optimizer_builder.py b/models/research/object_detection/builders/optimizer_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..d602bad1292e222b5cbc532a873299dd918ef011 --- /dev/null +++ b/models/research/object_detection/builders/optimizer_builder.py @@ -0,0 +1,205 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to build DetectionModel training optimizers.""" + +import tensorflow.compat.v1 as tf + +from object_detection.utils import learning_schedules + +try: + from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + + +def build_optimizers_tf_v1(optimizer_config, global_step=None): + """Create a TF v1 compatible optimizer based on config. + + Args: + optimizer_config: A Optimizer proto message. + global_step: A variable representing the current step. + If None, defaults to tf.train.get_or_create_global_step() + + Returns: + An optimizer and a list of variables for summary. + + Raises: + ValueError: when using an unsupported input data type. + """ + optimizer_type = optimizer_config.WhichOneof('optimizer') + optimizer = None + + summary_vars = [] + if optimizer_type == 'rms_prop_optimizer': + config = optimizer_config.rms_prop_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.train.RMSPropOptimizer( + learning_rate, + decay=config.decay, + momentum=config.momentum_optimizer_value, + epsilon=config.epsilon) + + if optimizer_type == 'momentum_optimizer': + config = optimizer_config.momentum_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.train.MomentumOptimizer( + learning_rate, + momentum=config.momentum_optimizer_value) + + if optimizer_type == 'adam_optimizer': + config = optimizer_config.adam_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon) + + + if optimizer is None: + raise ValueError('Optimizer %s not supported.' % optimizer_type) + + if optimizer_config.use_moving_average: + optimizer = tf_opt.MovingAverageOptimizer( + optimizer, average_decay=optimizer_config.moving_average_decay) + + return optimizer, summary_vars + + +def build_optimizers_tf_v2(optimizer_config, global_step=None): + """Create a TF v2 compatible optimizer based on config. + + Args: + optimizer_config: A Optimizer proto message. + global_step: A variable representing the current step. + If None, defaults to tf.train.get_or_create_global_step() + + Returns: + An optimizer and a list of variables for summary. + + Raises: + ValueError: when using an unsupported input data type. + """ + optimizer_type = optimizer_config.WhichOneof('optimizer') + optimizer = None + + summary_vars = [] + if optimizer_type == 'rms_prop_optimizer': + config = optimizer_config.rms_prop_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.keras.optimizers.RMSprop( + learning_rate, + decay=config.decay, + momentum=config.momentum_optimizer_value, + epsilon=config.epsilon) + + if optimizer_type == 'momentum_optimizer': + config = optimizer_config.momentum_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.keras.optimizers.SGD( + learning_rate, + momentum=config.momentum_optimizer_value) + + if optimizer_type == 'adam_optimizer': + config = optimizer_config.adam_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon) + + if optimizer is None: + raise ValueError('Optimizer %s not supported.' % optimizer_type) + + if optimizer_config.use_moving_average: + raise ValueError('Moving average not supported in eager mode.') + + return optimizer, summary_vars + + +def build(config, global_step=None): + + if tf.executing_eagerly(): + return build_optimizers_tf_v2(config, global_step) + else: + return build_optimizers_tf_v1(config, global_step) + + +def _create_learning_rate(learning_rate_config, global_step=None): + """Create optimizer learning rate based on config. + + Args: + learning_rate_config: A LearningRate proto message. + global_step: A variable representing the current step. + If None, defaults to tf.train.get_or_create_global_step() + + Returns: + A learning rate. + + Raises: + ValueError: when using an unsupported input data type. + """ + if global_step is None: + global_step = tf.train.get_or_create_global_step() + learning_rate = None + learning_rate_type = learning_rate_config.WhichOneof('learning_rate') + if learning_rate_type == 'constant_learning_rate': + config = learning_rate_config.constant_learning_rate + learning_rate = tf.constant(config.learning_rate, dtype=tf.float32, + name='learning_rate') + + if learning_rate_type == 'exponential_decay_learning_rate': + config = learning_rate_config.exponential_decay_learning_rate + learning_rate = learning_schedules.exponential_decay_with_burnin( + global_step, + config.initial_learning_rate, + config.decay_steps, + config.decay_factor, + burnin_learning_rate=config.burnin_learning_rate, + burnin_steps=config.burnin_steps, + min_learning_rate=config.min_learning_rate, + staircase=config.staircase) + + if learning_rate_type == 'manual_step_learning_rate': + config = learning_rate_config.manual_step_learning_rate + if not config.schedule: + raise ValueError('Empty learning rate schedule.') + learning_rate_step_boundaries = [x.step for x in config.schedule] + learning_rate_sequence = [config.initial_learning_rate] + learning_rate_sequence += [x.learning_rate for x in config.schedule] + learning_rate = learning_schedules.manual_stepping( + global_step, learning_rate_step_boundaries, + learning_rate_sequence, config.warmup) + + if learning_rate_type == 'cosine_decay_learning_rate': + config = learning_rate_config.cosine_decay_learning_rate + learning_rate = learning_schedules.cosine_decay_with_warmup( + global_step, + config.learning_rate_base, + config.total_steps, + config.warmup_learning_rate, + config.warmup_steps, + config.hold_base_rate_steps) + + if learning_rate is None: + raise ValueError('Learning_rate %s not supported.' % learning_rate_type) + + return learning_rate diff --git a/models/research/object_detection/builders/optimizer_builder_tf1_test.py b/models/research/object_detection/builders/optimizer_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..350ecb84b11b3fbd87e584a5d8d23ae877089078 --- /dev/null +++ b/models/research/object_detection/builders/optimizer_builder_tf1_test.py @@ -0,0 +1,224 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for optimizer_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import six +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import optimizer_builder +from object_detection.protos import optimizer_pb2 +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top +if tf_version.is_tf1(): + from tensorflow.contrib import opt as contrib_opt +# pylint: enable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class LearningRateBuilderTest(tf.test.TestCase): + + def testBuildConstantLearningRate(self): + learning_rate_text_proto = """ + constant_learning_rate { + learning_rate: 0.004 + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertTrue( + six.ensure_str(learning_rate.op.name).endswith('learning_rate')) + with self.test_session(): + learning_rate_out = learning_rate.eval() + self.assertAlmostEqual(learning_rate_out, 0.004) + + def testBuildExponentialDecayLearningRate(self): + learning_rate_text_proto = """ + exponential_decay_learning_rate { + initial_learning_rate: 0.004 + decay_steps: 99999 + decay_factor: 0.85 + staircase: false + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertTrue( + six.ensure_str(learning_rate.op.name).endswith('learning_rate')) + self.assertIsInstance(learning_rate, tf.Tensor) + + def testBuildManualStepLearningRate(self): + learning_rate_text_proto = """ + manual_step_learning_rate { + initial_learning_rate: 0.002 + schedule { + step: 100 + learning_rate: 0.006 + } + schedule { + step: 90000 + learning_rate: 0.00006 + } + warmup: true + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertIsInstance(learning_rate, tf.Tensor) + + def testBuildCosineDecayLearningRate(self): + learning_rate_text_proto = """ + cosine_decay_learning_rate { + learning_rate_base: 0.002 + total_steps: 20000 + warmup_learning_rate: 0.0001 + warmup_steps: 1000 + hold_base_rate_steps: 20000 + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertIsInstance(learning_rate, tf.Tensor) + + def testRaiseErrorOnEmptyLearningRate(self): + learning_rate_text_proto = """ + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + with self.assertRaises(ValueError): + optimizer_builder._create_learning_rate(learning_rate_proto) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class OptimizerBuilderTest(tf.test.TestCase): + + def testBuildRMSPropOptimizer(self): + optimizer_text_proto = """ + rms_prop_optimizer: { + learning_rate: { + exponential_decay_learning_rate { + initial_learning_rate: 0.004 + decay_steps: 800720 + decay_factor: 0.95 + } + } + momentum_optimizer_value: 0.9 + decay: 0.9 + epsilon: 1.0 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.train.RMSPropOptimizer) + + def testBuildMomentumOptimizer(self): + optimizer_text_proto = """ + momentum_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.001 + } + } + momentum_optimizer_value: 0.99 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.train.MomentumOptimizer) + + def testBuildAdamOptimizer(self): + optimizer_text_proto = """ + adam_optimizer: { + epsilon: 1e-6 + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.train.AdamOptimizer) + + def testBuildMovingAverageOptimizer(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: True + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer) + + def testBuildMovingAverageOptimizerWithNonDefaultDecay(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: True + moving_average_decay: 0.2 + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer) + # TODO(rathodv): Find a way to not depend on the private members. + self.assertAlmostEqual(optimizer._ema._decay, 0.2) + + def testBuildEmptyOptimizer(self): + optimizer_text_proto = """ + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + with self.assertRaises(ValueError): + optimizer_builder.build(optimizer_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/optimizer_builder_tf2_test.py b/models/research/object_detection/builders/optimizer_builder_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2c555f9a0f4c22b7c27955c92eaa3655c8fae5c6 --- /dev/null +++ b/models/research/object_detection/builders/optimizer_builder_tf2_test.py @@ -0,0 +1,104 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for optimizer_builder.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import optimizer_builder +from object_detection.protos import optimizer_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class OptimizerBuilderV2Test(tf.test.TestCase): + """Test building optimizers in V2 mode.""" + + def testBuildRMSPropOptimizer(self): + optimizer_text_proto = """ + rms_prop_optimizer: { + learning_rate: { + exponential_decay_learning_rate { + initial_learning_rate: 0.004 + decay_steps: 800720 + decay_factor: 0.95 + } + } + momentum_optimizer_value: 0.9 + decay: 0.9 + epsilon: 1.0 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.keras.optimizers.RMSprop) + + def testBuildMomentumOptimizer(self): + optimizer_text_proto = """ + momentum_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.001 + } + } + momentum_optimizer_value: 0.99 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.keras.optimizers.SGD) + + def testBuildAdamOptimizer(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.keras.optimizers.Adam) + + def testMovingAverageOptimizerUnsupported(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: True + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + with self.assertRaises(ValueError): + optimizer_builder.build(optimizer_proto) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/models/research/object_detection/builders/post_processing_builder.py b/models/research/object_detection/builders/post_processing_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..18795f58ccb4a382bdc457c2d15e069d1bb52662 --- /dev/null +++ b/models/research/object_detection/builders/post_processing_builder.py @@ -0,0 +1,182 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder function for post processing operations.""" +import functools + +import tensorflow.compat.v1 as tf +from object_detection.builders import calibration_builder +from object_detection.core import post_processing +from object_detection.protos import post_processing_pb2 + + +def build(post_processing_config): + """Builds callables for post-processing operations. + + Builds callables for non-max suppression, score conversion, and (optionally) + calibration based on the configuration. + + Non-max suppression callable takes `boxes`, `scores`, and optionally + `clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns + `nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See + post_processing.batch_multiclass_non_max_suppression for the type and shape + of these tensors. + + Score converter callable should be called with `input` tensor. The callable + returns the output from one of 3 tf operations based on the configuration - + tf.identity, tf.sigmoid or tf.nn.softmax. If a calibration config is provided, + score_converter also applies calibration transformations, as defined in + calibration_builder.py. See tensorflow documentation for argument and return + value descriptions. + + Args: + post_processing_config: post_processing.proto object containing the + parameters for the post-processing operations. + + Returns: + non_max_suppressor_fn: Callable for non-max suppression. + score_converter_fn: Callable for score conversion. + + Raises: + ValueError: if the post_processing_config is of incorrect type. + """ + if not isinstance(post_processing_config, post_processing_pb2.PostProcessing): + raise ValueError('post_processing_config not of type ' + 'post_processing_pb2.Postprocessing.') + non_max_suppressor_fn = _build_non_max_suppressor( + post_processing_config.batch_non_max_suppression) + score_converter_fn = _build_score_converter( + post_processing_config.score_converter, + post_processing_config.logit_scale) + if post_processing_config.HasField('calibration_config'): + score_converter_fn = _build_calibrated_score_converter( + score_converter_fn, + post_processing_config.calibration_config) + return non_max_suppressor_fn, score_converter_fn + + +def _build_non_max_suppressor(nms_config): + """Builds non-max suppresson based on the nms config. + + Args: + nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto. + + Returns: + non_max_suppressor_fn: Callable non-max suppressor. + + Raises: + ValueError: On incorrect iou_threshold or on incompatible values of + max_total_detections and max_detections_per_class or on negative + soft_nms_sigma. + """ + if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0: + raise ValueError('iou_threshold not in [0, 1.0].') + if nms_config.max_detections_per_class > nms_config.max_total_detections: + raise ValueError('max_detections_per_class should be no greater than ' + 'max_total_detections.') + if nms_config.soft_nms_sigma < 0.0: + raise ValueError('soft_nms_sigma should be non-negative.') + if nms_config.use_combined_nms and nms_config.use_class_agnostic_nms: + raise ValueError('combined_nms does not support class_agnostic_nms.') + non_max_suppressor_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=nms_config.score_threshold, + iou_thresh=nms_config.iou_threshold, + max_size_per_class=nms_config.max_detections_per_class, + max_total_size=nms_config.max_total_detections, + use_static_shapes=nms_config.use_static_shapes, + use_class_agnostic_nms=nms_config.use_class_agnostic_nms, + max_classes_per_detection=nms_config.max_classes_per_detection, + soft_nms_sigma=nms_config.soft_nms_sigma, + use_partitioned_nms=nms_config.use_partitioned_nms, + use_combined_nms=nms_config.use_combined_nms, + change_coordinate_frame=nms_config.change_coordinate_frame, + use_hard_nms=nms_config.use_hard_nms) + + return non_max_suppressor_fn + + +def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale): + """Create a function to scale logits then apply a Tensorflow function.""" + def score_converter_fn(logits): + scaled_logits = tf.multiply(logits, 1.0 / logit_scale, name='scale_logits') + return tf_score_converter_fn(scaled_logits, name='convert_scores') + score_converter_fn.__name__ = '%s_with_logit_scale' % ( + tf_score_converter_fn.__name__) + return score_converter_fn + + +def _build_score_converter(score_converter_config, logit_scale): + """Builds score converter based on the config. + + Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on + the config. + + Args: + score_converter_config: post_processing_pb2.PostProcessing.score_converter. + logit_scale: temperature to use for SOFTMAX score_converter. + + Returns: + Callable score converter op. + + Raises: + ValueError: On unknown score converter. + """ + if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY: + return _score_converter_fn_with_logit_scale(tf.identity, logit_scale) + if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID: + return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale) + if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX: + return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale) + raise ValueError('Unknown score converter.') + + +def _build_calibrated_score_converter(score_converter_fn, calibration_config): + """Wraps a score_converter_fn, adding a calibration step. + + Builds a score converter function with a calibration transformation according + to calibration_builder.py. The score conversion function may be applied before + or after the calibration transformation, depending on the calibration method. + If the method is temperature scaling, the score conversion is + after the calibration transformation. Otherwise, the score conversion is + before the calibration transformation. Calibration applies positive monotonic + transformations to inputs (i.e. score ordering is strictly preserved or + adjacent scores are mapped to the same score). When calibration is + class-agnostic, the highest-scoring class remains unchanged, unless two + adjacent scores are mapped to the same value and one class arbitrarily + selected to break the tie. In per-class calibration, it's possible (though + rare in practice) that the highest-scoring class will change, since positive + monotonicity is only required to hold within each class. + + Args: + score_converter_fn: callable that takes logit scores as input. + calibration_config: post_processing_pb2.PostProcessing.calibration_config. + + Returns: + Callable calibrated score coverter op. + """ + calibration_fn = calibration_builder.build(calibration_config) + def calibrated_score_converter_fn(logits): + if (calibration_config.WhichOneof('calibrator') == + 'temperature_scaling_calibration'): + calibrated_logits = calibration_fn(logits) + return score_converter_fn(calibrated_logits) + else: + converted_logits = score_converter_fn(logits) + return calibration_fn(converted_logits) + + calibrated_score_converter_fn.__name__ = ( + 'calibrate_with_%s' % calibration_config.WhichOneof('calibrator')) + return calibrated_score_converter_fn diff --git a/models/research/object_detection/builders/post_processing_builder_test.py b/models/research/object_detection/builders/post_processing_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b7383c92f99637ebf660d40a6074c65b03abd3c5 --- /dev/null +++ b/models/research/object_detection/builders/post_processing_builder_test.py @@ -0,0 +1,185 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for post_processing_builder.""" + +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection.builders import post_processing_builder +from object_detection.protos import post_processing_pb2 +from object_detection.utils import test_case + + +class PostProcessingBuilderTest(test_case.TestCase): + + def test_build_non_max_suppressor_with_correct_parameters(self): + post_processing_text_proto = """ + batch_non_max_suppression { + score_threshold: 0.7 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + soft_nms_sigma: 0.4 + } + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + non_max_suppressor, _ = post_processing_builder.build( + post_processing_config) + self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100) + self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300) + self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7) + self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6) + self.assertAlmostEqual(non_max_suppressor.keywords['soft_nms_sigma'], 0.4) + + def test_build_non_max_suppressor_with_correct_parameters_classagnostic_nms( + self): + post_processing_text_proto = """ + batch_non_max_suppression { + score_threshold: 0.7 + iou_threshold: 0.6 + max_detections_per_class: 10 + max_total_detections: 300 + use_class_agnostic_nms: True + max_classes_per_detection: 1 + } + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + non_max_suppressor, _ = post_processing_builder.build( + post_processing_config) + self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 10) + self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300) + self.assertEqual(non_max_suppressor.keywords['max_classes_per_detection'], + 1) + self.assertEqual(non_max_suppressor.keywords['use_class_agnostic_nms'], + True) + self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7) + self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6) + + def test_build_identity_score_converter(self): + post_processing_text_proto = """ + score_converter: IDENTITY + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build( + post_processing_config) + self.assertEqual(score_converter.__name__, 'identity_with_logit_scale') + def graph_fn(): + inputs = tf.constant([1, 1], tf.float32) + outputs = score_converter(inputs) + return outputs + converted_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(converted_scores, [1, 1]) + + def test_build_identity_score_converter_with_logit_scale(self): + post_processing_text_proto = """ + score_converter: IDENTITY + logit_scale: 2.0 + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'identity_with_logit_scale') + + def graph_fn(): + inputs = tf.constant([1, 1], tf.float32) + outputs = score_converter(inputs) + return outputs + converted_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(converted_scores, [.5, .5]) + + def test_build_sigmoid_score_converter(self): + post_processing_text_proto = """ + score_converter: SIGMOID + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'sigmoid_with_logit_scale') + + def test_build_softmax_score_converter(self): + post_processing_text_proto = """ + score_converter: SOFTMAX + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale') + + def test_build_softmax_score_converter_with_temperature(self): + post_processing_text_proto = """ + score_converter: SOFTMAX + logit_scale: 2.0 + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale') + + def test_build_calibrator_with_nonempty_config(self): + """Test that identity function used when no calibration_config specified.""" + # Calibration config maps all scores to 0.5. + post_processing_text_proto = """ + score_converter: SOFTMAX + calibration_config { + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: 0.5 + } + x_y_pair { + x: 1.0 + y: 0.5 + }}}}""" + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, calibrated_score_conversion_fn = post_processing_builder.build( + post_processing_config) + self.assertEqual(calibrated_score_conversion_fn.__name__, + 'calibrate_with_function_approximation') + + def graph_fn(): + input_scores = tf.constant([1, 1], tf.float32) + outputs = calibrated_score_conversion_fn(input_scores) + return outputs + calibrated_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(calibrated_scores, [0.5, 0.5]) + + def test_build_temperature_scaling_calibrator(self): + post_processing_text_proto = """ + score_converter: SOFTMAX + calibration_config { + temperature_scaling_calibration { + scaler: 2.0 + }}""" + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, calibrated_score_conversion_fn = post_processing_builder.build( + post_processing_config) + self.assertEqual(calibrated_score_conversion_fn.__name__, + 'calibrate_with_temperature_scaling_calibration') + + def graph_fn(): + input_scores = tf.constant([1, 1], tf.float32) + outputs = calibrated_score_conversion_fn(input_scores) + return outputs + calibrated_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(calibrated_scores, [0.5, 0.5]) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/preprocessor_builder.py b/models/research/object_detection/builders/preprocessor_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..aa6a6bc963f8635827aa8776252889d6c5f8d0e9 --- /dev/null +++ b/models/research/object_detection/builders/preprocessor_builder.py @@ -0,0 +1,412 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder for preprocessing steps.""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import preprocessor +from object_detection.protos import preprocessor_pb2 + + +def _get_step_config_from_proto(preprocessor_step_config, step_name): + """Returns the value of a field named step_name from proto. + + Args: + preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object. + step_name: Name of the field to get value from. + + Returns: + result_dict: a sub proto message from preprocessor_step_config which will be + later converted to a dictionary. + + Raises: + ValueError: If field does not exist in proto. + """ + for field, value in preprocessor_step_config.ListFields(): + if field.name == step_name: + return value + + raise ValueError('Could not get field %s from proto!' % step_name) + + +def _get_dict_from_proto(config): + """Helper function to put all proto fields into a dictionary. + + For many preprocessing steps, there's an trivial 1-1 mapping from proto fields + to function arguments. This function automatically populates a dictionary with + the arguments from the proto. + + Protos that CANNOT be trivially populated include: + * nested messages. + * steps that check if an optional field is set (ie. where None != 0). + * protos that don't map 1-1 to arguments (ie. list should be reshaped). + * fields requiring additional validation (ie. repeated field has n elements). + + Args: + config: A protobuf object that does not violate the conditions above. + + Returns: + result_dict: |config| converted into a python dictionary. + """ + result_dict = {} + for field, value in config.ListFields(): + result_dict[field.name] = value + return result_dict + + +# A map from a PreprocessingStep proto config field name to the preprocessing +# function that should be used. The PreprocessingStep proto should be parsable +# with _get_dict_from_proto. +PREPROCESSING_FUNCTION_MAP = { + 'normalize_image': + preprocessor.normalize_image, + 'random_pixel_value_scale': + preprocessor.random_pixel_value_scale, + 'random_image_scale': + preprocessor.random_image_scale, + 'random_rgb_to_gray': + preprocessor.random_rgb_to_gray, + 'random_adjust_brightness': + preprocessor.random_adjust_brightness, + 'random_adjust_contrast': + preprocessor.random_adjust_contrast, + 'random_adjust_hue': + preprocessor.random_adjust_hue, + 'random_adjust_saturation': + preprocessor.random_adjust_saturation, + 'random_distort_color': + preprocessor.random_distort_color, + 'random_jitter_boxes': + preprocessor.random_jitter_boxes, + 'random_crop_to_aspect_ratio': + preprocessor.random_crop_to_aspect_ratio, + 'random_black_patches': + preprocessor.random_black_patches, + 'random_jpeg_quality': + preprocessor.random_jpeg_quality, + 'random_downscale_to_target_pixels': + preprocessor.random_downscale_to_target_pixels, + 'random_patch_gaussian': + preprocessor.random_patch_gaussian, + 'rgb_to_gray': + preprocessor.rgb_to_gray, + 'scale_boxes_to_pixel_coordinates': ( + preprocessor.scale_boxes_to_pixel_coordinates), + 'subtract_channel_mean': + preprocessor.subtract_channel_mean, + 'convert_class_logits_to_softmax': + preprocessor.convert_class_logits_to_softmax, +} + + +# A map to convert from preprocessor_pb2.ResizeImage.Method enum to +# tf.image.ResizeMethod. +RESIZE_METHOD_MAP = { + preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA, + preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC, + preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR, + preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: ( + tf.image.ResizeMethod.NEAREST_NEIGHBOR), +} + + +def build(preprocessor_step_config): + """Builds preprocessing step based on the configuration. + + Args: + preprocessor_step_config: PreprocessingStep configuration proto. + + Returns: + function, argmap: A callable function and an argument map to call function + with. + + Raises: + ValueError: On invalid configuration. + """ + step_type = preprocessor_step_config.WhichOneof('preprocessing_step') + + if step_type in PREPROCESSING_FUNCTION_MAP: + preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type] + step_config = _get_step_config_from_proto(preprocessor_step_config, + step_type) + function_args = _get_dict_from_proto(step_config) + return (preprocessing_function, function_args) + + if step_type == 'random_horizontal_flip': + config = preprocessor_step_config.random_horizontal_flip + return (preprocessor.random_horizontal_flip, + { + 'keypoint_flip_permutation': tuple( + config.keypoint_flip_permutation) or None, + }) + + if step_type == 'random_vertical_flip': + config = preprocessor_step_config.random_vertical_flip + return (preprocessor.random_vertical_flip, + { + 'keypoint_flip_permutation': tuple( + config.keypoint_flip_permutation) or None, + }) + + if step_type == 'random_rotation90': + return (preprocessor.random_rotation90, {}) + + if step_type == 'random_crop_image': + config = preprocessor_step_config.random_crop_image + return (preprocessor.random_crop_image, + { + 'min_object_covered': config.min_object_covered, + 'aspect_ratio_range': (config.min_aspect_ratio, + config.max_aspect_ratio), + 'area_range': (config.min_area, config.max_area), + 'overlap_thresh': config.overlap_thresh, + 'clip_boxes': config.clip_boxes, + 'random_coef': config.random_coef, + }) + + if step_type == 'random_pad_image': + config = preprocessor_step_config.random_pad_image + min_image_size = None + if (config.HasField('min_image_height') != + config.HasField('min_image_width')): + raise ValueError('min_image_height and min_image_width should be either ' + 'both set or both unset.') + if config.HasField('min_image_height'): + min_image_size = (config.min_image_height, config.min_image_width) + + max_image_size = None + if (config.HasField('max_image_height') != + config.HasField('max_image_width')): + raise ValueError('max_image_height and max_image_width should be either ' + 'both set or both unset.') + if config.HasField('max_image_height'): + max_image_size = (config.max_image_height, config.max_image_width) + + pad_color = config.pad_color or None + if pad_color: + if len(pad_color) != 3: + tf.logging.warn('pad_color should have 3 elements (RGB) if set!') + + pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) + return (preprocessor.random_pad_image, + { + 'min_image_size': min_image_size, + 'max_image_size': max_image_size, + 'pad_color': pad_color, + }) + + if step_type == 'random_absolute_pad_image': + config = preprocessor_step_config.random_absolute_pad_image + + max_height_padding = config.max_height_padding or 1 + max_width_padding = config.max_width_padding or 1 + + pad_color = config.pad_color or None + if pad_color: + if len(pad_color) != 3: + tf.logging.warn('pad_color should have 3 elements (RGB) if set!') + + pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) + + return (preprocessor.random_absolute_pad_image, + { + 'max_height_padding': max_height_padding, + 'max_width_padding': max_width_padding, + 'pad_color': pad_color, + }) + if step_type == 'random_crop_pad_image': + config = preprocessor_step_config.random_crop_pad_image + min_padded_size_ratio = config.min_padded_size_ratio + if min_padded_size_ratio and len(min_padded_size_ratio) != 2: + raise ValueError('min_padded_size_ratio should have 2 elements if set!') + max_padded_size_ratio = config.max_padded_size_ratio + if max_padded_size_ratio and len(max_padded_size_ratio) != 2: + raise ValueError('max_padded_size_ratio should have 2 elements if set!') + pad_color = config.pad_color or None + if pad_color: + if len(pad_color) != 3: + tf.logging.warn('pad_color should have 3 elements (RGB) if set!') + + pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) + + kwargs = { + 'min_object_covered': config.min_object_covered, + 'aspect_ratio_range': (config.min_aspect_ratio, + config.max_aspect_ratio), + 'area_range': (config.min_area, config.max_area), + 'overlap_thresh': config.overlap_thresh, + 'clip_boxes': config.clip_boxes, + 'random_coef': config.random_coef, + 'pad_color': pad_color, + } + if min_padded_size_ratio: + kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) + if max_padded_size_ratio: + kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) + return (preprocessor.random_crop_pad_image, kwargs) + + if step_type == 'random_resize_method': + config = preprocessor_step_config.random_resize_method + return (preprocessor.random_resize_method, + { + 'target_size': [config.target_height, config.target_width], + }) + + if step_type == 'resize_image': + config = preprocessor_step_config.resize_image + method = RESIZE_METHOD_MAP[config.method] + return (preprocessor.resize_image, + { + 'new_height': config.new_height, + 'new_width': config.new_width, + 'method': method + }) + + if step_type == 'random_self_concat_image': + config = preprocessor_step_config.random_self_concat_image + return (preprocessor.random_self_concat_image, { + 'concat_vertical_probability': config.concat_vertical_probability, + 'concat_horizontal_probability': config.concat_horizontal_probability + }) + + if step_type == 'ssd_random_crop': + config = preprocessor_step_config.ssd_random_crop + if config.operations: + min_object_covered = [op.min_object_covered for op in config.operations] + aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) + for op in config.operations] + area_range = [(op.min_area, op.max_area) for op in config.operations] + overlap_thresh = [op.overlap_thresh for op in config.operations] + clip_boxes = [op.clip_boxes for op in config.operations] + random_coef = [op.random_coef for op in config.operations] + return (preprocessor.ssd_random_crop, + { + 'min_object_covered': min_object_covered, + 'aspect_ratio_range': aspect_ratio_range, + 'area_range': area_range, + 'overlap_thresh': overlap_thresh, + 'clip_boxes': clip_boxes, + 'random_coef': random_coef, + }) + return (preprocessor.ssd_random_crop, {}) + + if step_type == 'autoaugment_image': + config = preprocessor_step_config.autoaugment_image + return (preprocessor.autoaugment_image, { + 'policy_name': config.policy_name, + }) + + if step_type == 'drop_label_probabilistically': + config = preprocessor_step_config.drop_label_probabilistically + return (preprocessor.drop_label_probabilistically, { + 'dropped_label': config.label, + 'drop_probability': config.drop_probability, + }) + + if step_type == 'remap_labels': + config = preprocessor_step_config.remap_labels + return (preprocessor.remap_labels, { + 'original_labels': config.original_labels, + 'new_label': config.new_label + }) + + if step_type == 'ssd_random_crop_pad': + config = preprocessor_step_config.ssd_random_crop_pad + if config.operations: + min_object_covered = [op.min_object_covered for op in config.operations] + aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) + for op in config.operations] + area_range = [(op.min_area, op.max_area) for op in config.operations] + overlap_thresh = [op.overlap_thresh for op in config.operations] + clip_boxes = [op.clip_boxes for op in config.operations] + random_coef = [op.random_coef for op in config.operations] + min_padded_size_ratio = [tuple(op.min_padded_size_ratio) + for op in config.operations] + max_padded_size_ratio = [tuple(op.max_padded_size_ratio) + for op in config.operations] + pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b) + for op in config.operations] + return (preprocessor.ssd_random_crop_pad, + { + 'min_object_covered': min_object_covered, + 'aspect_ratio_range': aspect_ratio_range, + 'area_range': area_range, + 'overlap_thresh': overlap_thresh, + 'clip_boxes': clip_boxes, + 'random_coef': random_coef, + 'min_padded_size_ratio': min_padded_size_ratio, + 'max_padded_size_ratio': max_padded_size_ratio, + 'pad_color': pad_color, + }) + return (preprocessor.ssd_random_crop_pad, {}) + + if step_type == 'ssd_random_crop_fixed_aspect_ratio': + config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio + if config.operations: + min_object_covered = [op.min_object_covered for op in config.operations] + area_range = [(op.min_area, op.max_area) for op in config.operations] + overlap_thresh = [op.overlap_thresh for op in config.operations] + clip_boxes = [op.clip_boxes for op in config.operations] + random_coef = [op.random_coef for op in config.operations] + return (preprocessor.ssd_random_crop_fixed_aspect_ratio, + { + 'min_object_covered': min_object_covered, + 'aspect_ratio': config.aspect_ratio, + 'area_range': area_range, + 'overlap_thresh': overlap_thresh, + 'clip_boxes': clip_boxes, + 'random_coef': random_coef, + }) + return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {}) + + if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio': + config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio + kwargs = {} + aspect_ratio = config.aspect_ratio + if aspect_ratio: + kwargs['aspect_ratio'] = aspect_ratio + min_padded_size_ratio = config.min_padded_size_ratio + if min_padded_size_ratio: + if len(min_padded_size_ratio) != 2: + raise ValueError('min_padded_size_ratio should have 2 elements if set!') + kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) + max_padded_size_ratio = config.max_padded_size_ratio + if max_padded_size_ratio: + if len(max_padded_size_ratio) != 2: + raise ValueError('max_padded_size_ratio should have 2 elements if set!') + kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) + if config.operations: + kwargs['min_object_covered'] = [op.min_object_covered + for op in config.operations] + kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio) + for op in config.operations] + kwargs['area_range'] = [(op.min_area, op.max_area) + for op in config.operations] + kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations] + kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations] + kwargs['random_coef'] = [op.random_coef for op in config.operations] + return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs) + + if step_type == 'random_square_crop_by_scale': + config = preprocessor_step_config.random_square_crop_by_scale + return preprocessor.random_square_crop_by_scale, { + 'scale_min': config.scale_min, + 'scale_max': config.scale_max, + 'max_border': config.max_border, + 'num_scales': config.num_scales + } + + raise ValueError('Unknown preprocessing step.') diff --git a/models/research/object_detection/builders/preprocessor_builder_test.py b/models/research/object_detection/builders/preprocessor_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4c283238c59695dace4d769b0a0fe0941a6a027c --- /dev/null +++ b/models/research/object_detection/builders/preprocessor_builder_test.py @@ -0,0 +1,747 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for preprocessor_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import preprocessor_builder +from object_detection.core import preprocessor +from object_detection.protos import preprocessor_pb2 + + +class PreprocessorBuilderTest(tf.test.TestCase): + + def assert_dictionary_close(self, dict1, dict2): + """Helper to check if two dicts with floatst or integers are close.""" + self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys())) + for key in dict1: + value = dict1[key] + if isinstance(value, float): + self.assertAlmostEqual(value, dict2[key]) + else: + self.assertEqual(value, dict2[key]) + + def test_build_normalize_image(self): + preprocessor_text_proto = """ + normalize_image { + original_minval: 0.0 + original_maxval: 255.0 + target_minval: -1.0 + target_maxval: 1.0 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.normalize_image) + self.assertEqual(args, { + 'original_minval': 0.0, + 'original_maxval': 255.0, + 'target_minval': -1.0, + 'target_maxval': 1.0, + }) + + def test_build_random_horizontal_flip(self): + preprocessor_text_proto = """ + random_horizontal_flip { + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 4 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_horizontal_flip) + self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4)}) + + def test_build_random_vertical_flip(self): + preprocessor_text_proto = """ + random_vertical_flip { + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 4 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_vertical_flip) + self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4)}) + + def test_build_random_rotation90(self): + preprocessor_text_proto = """ + random_rotation90 {} + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_rotation90) + self.assertEqual(args, {}) + + def test_build_random_pixel_value_scale(self): + preprocessor_text_proto = """ + random_pixel_value_scale { + minval: 0.8 + maxval: 1.2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_pixel_value_scale) + self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2}) + + def test_build_random_image_scale(self): + preprocessor_text_proto = """ + random_image_scale { + min_scale_ratio: 0.8 + max_scale_ratio: 2.2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_image_scale) + self.assert_dictionary_close(args, {'min_scale_ratio': 0.8, + 'max_scale_ratio': 2.2}) + + def test_build_random_rgb_to_gray(self): + preprocessor_text_proto = """ + random_rgb_to_gray { + probability: 0.8 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_rgb_to_gray) + self.assert_dictionary_close(args, {'probability': 0.8}) + + def test_build_random_adjust_brightness(self): + preprocessor_text_proto = """ + random_adjust_brightness { + max_delta: 0.2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_brightness) + self.assert_dictionary_close(args, {'max_delta': 0.2}) + + def test_build_random_adjust_contrast(self): + preprocessor_text_proto = """ + random_adjust_contrast { + min_delta: 0.7 + max_delta: 1.1 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_contrast) + self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1}) + + def test_build_random_adjust_hue(self): + preprocessor_text_proto = """ + random_adjust_hue { + max_delta: 0.01 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_hue) + self.assert_dictionary_close(args, {'max_delta': 0.01}) + + def test_build_random_adjust_saturation(self): + preprocessor_text_proto = """ + random_adjust_saturation { + min_delta: 0.75 + max_delta: 1.15 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_saturation) + self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15}) + + def test_build_random_distort_color(self): + preprocessor_text_proto = """ + random_distort_color { + color_ordering: 1 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_distort_color) + self.assertEqual(args, {'color_ordering': 1}) + + def test_build_random_jitter_boxes(self): + preprocessor_text_proto = """ + random_jitter_boxes { + ratio: 0.1 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_jitter_boxes) + self.assert_dictionary_close(args, {'ratio': 0.1}) + + def test_build_random_crop_image(self): + preprocessor_text_proto = """ + random_crop_image { + min_object_covered: 0.75 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.25 + max_area: 0.875 + overlap_thresh: 0.5 + clip_boxes: False + random_coef: 0.125 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_image) + self.assertEqual(args, { + 'min_object_covered': 0.75, + 'aspect_ratio_range': (0.75, 1.5), + 'area_range': (0.25, 0.875), + 'overlap_thresh': 0.5, + 'clip_boxes': False, + 'random_coef': 0.125, + }) + + def test_build_random_pad_image(self): + preprocessor_text_proto = """ + random_pad_image { + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_pad_image) + self.assertEqual(args, { + 'min_image_size': None, + 'max_image_size': None, + 'pad_color': None, + }) + + def test_build_random_absolute_pad_image(self): + preprocessor_text_proto = """ + random_absolute_pad_image { + max_height_padding: 50 + max_width_padding: 100 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_absolute_pad_image) + self.assertEqual(args, { + 'max_height_padding': 50, + 'max_width_padding': 100, + 'pad_color': None, + }) + + def test_build_random_crop_pad_image(self): + preprocessor_text_proto = """ + random_crop_pad_image { + min_object_covered: 0.75 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.25 + max_area: 0.875 + overlap_thresh: 0.5 + clip_boxes: False + random_coef: 0.125 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_pad_image) + self.assertEqual(args, { + 'min_object_covered': 0.75, + 'aspect_ratio_range': (0.75, 1.5), + 'area_range': (0.25, 0.875), + 'overlap_thresh': 0.5, + 'clip_boxes': False, + 'random_coef': 0.125, + 'pad_color': None, + }) + + def test_build_random_crop_pad_image_with_optional_parameters(self): + preprocessor_text_proto = """ + random_crop_pad_image { + min_object_covered: 0.75 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.25 + max_area: 0.875 + overlap_thresh: 0.5 + clip_boxes: False + random_coef: 0.125 + min_padded_size_ratio: 0.5 + min_padded_size_ratio: 0.75 + max_padded_size_ratio: 0.5 + max_padded_size_ratio: 0.75 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_pad_image) + self.assertEqual(args, { + 'min_object_covered': 0.75, + 'aspect_ratio_range': (0.75, 1.5), + 'area_range': (0.25, 0.875), + 'overlap_thresh': 0.5, + 'clip_boxes': False, + 'random_coef': 0.125, + 'min_padded_size_ratio': (0.5, 0.75), + 'max_padded_size_ratio': (0.5, 0.75), + 'pad_color': None, + }) + + def test_build_random_crop_to_aspect_ratio(self): + preprocessor_text_proto = """ + random_crop_to_aspect_ratio { + aspect_ratio: 0.85 + overlap_thresh: 0.35 + clip_boxes: False + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio) + self.assert_dictionary_close(args, {'aspect_ratio': 0.85, + 'overlap_thresh': 0.35, + 'clip_boxes': False}) + + def test_build_random_black_patches(self): + preprocessor_text_proto = """ + random_black_patches { + max_black_patches: 20 + probability: 0.95 + size_to_image_ratio: 0.12 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_black_patches) + self.assert_dictionary_close(args, {'max_black_patches': 20, + 'probability': 0.95, + 'size_to_image_ratio': 0.12}) + + def test_build_random_jpeg_quality(self): + preprocessor_text_proto = """ + random_jpeg_quality { + random_coef: 0.5 + min_jpeg_quality: 40 + max_jpeg_quality: 90 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Parse(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_jpeg_quality) + self.assert_dictionary_close(args, {'random_coef': 0.5, + 'min_jpeg_quality': 40, + 'max_jpeg_quality': 90}) + + def test_build_random_downscale_to_target_pixels(self): + preprocessor_text_proto = """ + random_downscale_to_target_pixels { + random_coef: 0.5 + min_target_pixels: 200 + max_target_pixels: 900 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Parse(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_downscale_to_target_pixels) + self.assert_dictionary_close(args, { + 'random_coef': 0.5, + 'min_target_pixels': 200, + 'max_target_pixels': 900 + }) + + def test_build_random_patch_gaussian(self): + preprocessor_text_proto = """ + random_patch_gaussian { + random_coef: 0.5 + min_patch_size: 10 + max_patch_size: 300 + min_gaussian_stddev: 0.2 + max_gaussian_stddev: 1.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Parse(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_patch_gaussian) + self.assert_dictionary_close(args, { + 'random_coef': 0.5, + 'min_patch_size': 10, + 'max_patch_size': 300, + 'min_gaussian_stddev': 0.2, + 'max_gaussian_stddev': 1.5 + }) + + def test_auto_augment_image(self): + preprocessor_text_proto = """ + autoaugment_image { + policy_name: 'v0' + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.autoaugment_image) + self.assert_dictionary_close(args, {'policy_name': 'v0'}) + + def test_drop_label_probabilistically(self): + preprocessor_text_proto = """ + drop_label_probabilistically{ + label: 2 + drop_probability: 0.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.drop_label_probabilistically) + self.assert_dictionary_close(args, { + 'dropped_label': 2, + 'drop_probability': 0.5 + }) + + def test_remap_labels(self): + preprocessor_text_proto = """ + remap_labels{ + original_labels: 1 + original_labels: 2 + new_label: 3 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.remap_labels) + self.assert_dictionary_close(args, { + 'original_labels': [1, 2], + 'new_label': 3 + }) + + def test_build_random_resize_method(self): + preprocessor_text_proto = """ + random_resize_method { + target_height: 75 + target_width: 100 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_resize_method) + self.assert_dictionary_close(args, {'target_size': [75, 100]}) + + def test_build_scale_boxes_to_pixel_coordinates(self): + preprocessor_text_proto = """ + scale_boxes_to_pixel_coordinates {} + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates) + self.assertEqual(args, {}) + + def test_build_resize_image(self): + preprocessor_text_proto = """ + resize_image { + new_height: 75 + new_width: 100 + method: BICUBIC + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.resize_image) + self.assertEqual(args, {'new_height': 75, + 'new_width': 100, + 'method': tf.image.ResizeMethod.BICUBIC}) + + def test_build_rgb_to_gray(self): + preprocessor_text_proto = """ + rgb_to_gray {} + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.rgb_to_gray) + self.assertEqual(args, {}) + + def test_build_subtract_channel_mean(self): + preprocessor_text_proto = """ + subtract_channel_mean { + means: [1.0, 2.0, 3.0] + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.subtract_channel_mean) + self.assertEqual(args, {'means': [1.0, 2.0, 3.0]}) + + def test_random_self_concat_image(self): + preprocessor_text_proto = """ + random_self_concat_image { + concat_vertical_probability: 0.5 + concat_horizontal_probability: 0.25 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_self_concat_image) + self.assertEqual(args, {'concat_vertical_probability': 0.5, + 'concat_horizontal_probability': 0.25}) + + def test_build_ssd_random_crop(self): + preprocessor_text_proto = """ + ssd_random_crop { + operations { + min_object_covered: 0.0 + min_aspect_ratio: 0.875 + max_aspect_ratio: 1.125 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + } + operations { + min_object_covered: 0.25 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + } + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375]}) + + def test_build_ssd_random_crop_empty_operations(self): + preprocessor_text_proto = """ + ssd_random_crop { + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop) + self.assertEqual(args, {}) + + def test_build_ssd_random_crop_pad(self): + preprocessor_text_proto = """ + ssd_random_crop_pad { + operations { + min_object_covered: 0.0 + min_aspect_ratio: 0.875 + max_aspect_ratio: 1.125 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + min_padded_size_ratio: [1.0, 1.0] + max_padded_size_ratio: [2.0, 2.0] + pad_color_r: 0.5 + pad_color_g: 0.5 + pad_color_b: 0.5 + } + operations { + min_object_covered: 0.25 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + min_padded_size_ratio: [1.0, 1.0] + max_padded_size_ratio: [2.0, 2.0] + pad_color_r: 0.5 + pad_color_g: 0.5 + pad_color_b: 0.5 + } + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop_pad) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375], + 'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)], + 'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)], + 'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]}) + + def test_build_ssd_random_crop_fixed_aspect_ratio(self): + preprocessor_text_proto = """ + ssd_random_crop_fixed_aspect_ratio { + operations { + min_object_covered: 0.0 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + } + operations { + min_object_covered: 0.25 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + } + aspect_ratio: 0.875 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio': 0.875, + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375]}) + + def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self): + preprocessor_text_proto = """ + ssd_random_crop_pad_fixed_aspect_ratio { + operations { + min_object_covered: 0.0 + min_aspect_ratio: 0.875 + max_aspect_ratio: 1.125 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + } + operations { + min_object_covered: 0.25 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + } + aspect_ratio: 0.875 + min_padded_size_ratio: [1.0, 1.0] + max_padded_size_ratio: [2.0, 2.0] + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, + preprocessor.ssd_random_crop_pad_fixed_aspect_ratio) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio': 0.875, + 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375], + 'min_padded_size_ratio': (1.0, 1.0), + 'max_padded_size_ratio': (2.0, 2.0)}) + + def test_build_normalize_image_convert_class_logits_to_softmax(self): + preprocessor_text_proto = """ + convert_class_logits_to_softmax { + temperature: 2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.convert_class_logits_to_softmax) + self.assertEqual(args, {'temperature': 2}) + + def test_random_crop_by_scale(self): + preprocessor_text_proto = """ + random_square_crop_by_scale { + scale_min: 0.25 + scale_max: 2.0 + num_scales: 8 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_square_crop_by_scale) + self.assertEqual(args, { + 'scale_min': 0.25, + 'scale_max': 2.0, + 'num_scales': 8, + 'max_border': 128 + }) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/region_similarity_calculator_builder.py b/models/research/object_detection/builders/region_similarity_calculator_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..8f35087ff40ed9e08e7c889803b704687ac3c770 --- /dev/null +++ b/models/research/object_detection/builders/region_similarity_calculator_builder.py @@ -0,0 +1,59 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder for region similarity calculators.""" + +from object_detection.core import region_similarity_calculator +from object_detection.protos import region_similarity_calculator_pb2 + + +def build(region_similarity_calculator_config): + """Builds region similarity calculator based on the configuration. + + Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See + core/region_similarity_calculator.proto for details. + + Args: + region_similarity_calculator_config: RegionSimilarityCalculator + configuration proto. + + Returns: + region_similarity_calculator: RegionSimilarityCalculator object. + + Raises: + ValueError: On unknown region similarity calculator. + """ + + if not isinstance( + region_similarity_calculator_config, + region_similarity_calculator_pb2.RegionSimilarityCalculator): + raise ValueError( + 'region_similarity_calculator_config not of type ' + 'region_similarity_calculator_pb2.RegionsSimilarityCalculator') + + similarity_calculator = region_similarity_calculator_config.WhichOneof( + 'region_similarity') + if similarity_calculator == 'iou_similarity': + return region_similarity_calculator.IouSimilarity() + if similarity_calculator == 'ioa_similarity': + return region_similarity_calculator.IoaSimilarity() + if similarity_calculator == 'neg_sq_dist_similarity': + return region_similarity_calculator.NegSqDistSimilarity() + if similarity_calculator == 'thresholded_iou_similarity': + return region_similarity_calculator.ThresholdedIouSimilarity( + region_similarity_calculator_config.thresholded_iou_similarity + .iou_threshold) + + raise ValueError('Unknown region similarity calculator.') diff --git a/models/research/object_detection/builders/region_similarity_calculator_builder_test.py b/models/research/object_detection/builders/region_similarity_calculator_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..da72e7360ee47d142ced9c90787bdc56813901dc --- /dev/null +++ b/models/research/object_detection/builders/region_similarity_calculator_builder_test.py @@ -0,0 +1,67 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for region_similarity_calculator_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import region_similarity_calculator_builder +from object_detection.core import region_similarity_calculator +from object_detection.protos import region_similarity_calculator_pb2 as sim_calc_pb2 + + +class RegionSimilarityCalculatorBuilderTest(tf.test.TestCase): + + def testBuildIoaSimilarityCalculator(self): + similarity_calc_text_proto = """ + ioa_similarity { + } + """ + similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() + text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) + similarity_calc = region_similarity_calculator_builder.build( + similarity_calc_proto) + self.assertTrue(isinstance(similarity_calc, + region_similarity_calculator.IoaSimilarity)) + + def testBuildIouSimilarityCalculator(self): + similarity_calc_text_proto = """ + iou_similarity { + } + """ + similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() + text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) + similarity_calc = region_similarity_calculator_builder.build( + similarity_calc_proto) + self.assertTrue(isinstance(similarity_calc, + region_similarity_calculator.IouSimilarity)) + + def testBuildNegSqDistSimilarityCalculator(self): + similarity_calc_text_proto = """ + neg_sq_dist_similarity { + } + """ + similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() + text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) + similarity_calc = region_similarity_calculator_builder.build( + similarity_calc_proto) + self.assertTrue(isinstance(similarity_calc, + region_similarity_calculator. + NegSqDistSimilarity)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/builders/target_assigner_builder.py b/models/research/object_detection/builders/target_assigner_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f6434f653c8426733e90ff5ed04dc69b7d9e34af --- /dev/null +++ b/models/research/object_detection/builders/target_assigner_builder.py @@ -0,0 +1,40 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection box coder from configuration.""" +from object_detection.builders import box_coder_builder +from object_detection.builders import matcher_builder +from object_detection.builders import region_similarity_calculator_builder +from object_detection.core import target_assigner + + +def build(target_assigner_config): + """Builds a TargetAssigner object based on the config. + + Args: + target_assigner_config: A target_assigner proto message containing config + for the desired target assigner. + + Returns: + TargetAssigner object based on the config. + """ + matcher_instance = matcher_builder.build(target_assigner_config.matcher) + similarity_calc_instance = region_similarity_calculator_builder.build( + target_assigner_config.similarity_calculator) + box_coder = box_coder_builder.build(target_assigner_config.box_coder) + return target_assigner.TargetAssigner( + matcher=matcher_instance, + similarity_calc=similarity_calc_instance, + box_coder_instance=box_coder) diff --git a/models/research/object_detection/builders/target_assigner_builder_test.py b/models/research/object_detection/builders/target_assigner_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..279600214844c617056453890cc0d9d471ec5e82 --- /dev/null +++ b/models/research/object_detection/builders/target_assigner_builder_test.py @@ -0,0 +1,50 @@ +"""Tests for google3.third_party.tensorflow_models.object_detection.builders.target_assigner_builder.""" +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + + +from object_detection.builders import target_assigner_builder +from object_detection.core import target_assigner +from object_detection.protos import target_assigner_pb2 + + +class TargetAssignerBuilderTest(tf.test.TestCase): + + def test_build_a_target_assigner(self): + target_assigner_text_proto = """ + matcher { + argmax_matcher {matched_threshold: 0.5} + } + similarity_calculator { + iou_similarity {} + } + box_coder { + faster_rcnn_box_coder {} + } + """ + target_assigner_proto = target_assigner_pb2.TargetAssigner() + text_format.Merge(target_assigner_text_proto, target_assigner_proto) + target_assigner_instance = target_assigner_builder.build( + target_assigner_proto) + self.assertIsInstance(target_assigner_instance, + target_assigner.TargetAssigner) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb b/models/research/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..b735cfbcea0e2c5b7e7c44e706e68a59d98b68ec --- /dev/null +++ b/models/research/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb @@ -0,0 +1,1500 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "context_rcnn_tutorial.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "jZc1kMel3sZP", + "colab_type": "text" + }, + "source": [ + "# Context R-CNN Demo\n", + "\n", + "
\n", + " \n", + " Run in Google Colab\n", + " \n", + "\n", + " \n", + " View source on GitHub\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XuHWvdag3_b9", + "colab_type": "text" + }, + "source": [ + " This notebook will walk you step by step through the process of using a pre-trained model to build up a contextual memory bank for a set of images, and then detect objects in those images+context using [Context R-CNN](https://arxiv.org/abs/1912.03538)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "u0e-OOtn4hQ8", + "colab_type": "text" + }, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w-UrhxBw4iLA", + "colab_type": "text" + }, + "source": [ + "Important: If you're running on a local machine, be sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). This notebook includes only what's necessary to run in Colab." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SAqMxS4V4lqS", + "colab_type": "text" + }, + "source": [ + "### Install" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BPkovrxF4o8n", + "colab_type": "code", + "outputId": "e1b8debc-ab73-4b3e-9e44-c86446c7cda1", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 785 + } + }, + "source": [ + "!pip install -U --pre tensorflow==\"2.*\"\n", + "!pip install tf_slim" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Requirement already up-to-date: tensorflow==2.* in /usr/local/lib/python3.6/dist-packages (2.2.0)\n", + "Requirement already satisfied, skipping upgrade: scipy==1.4.1; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.4.1)\n", + "Requirement already satisfied, skipping upgrade: protobuf>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (3.10.0)\n", + "Requirement already satisfied, skipping upgrade: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (2.10.0)\n", + "Requirement already satisfied, skipping upgrade: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (3.2.1)\n", + "Requirement already satisfied, skipping upgrade: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.18.5)\n", + "Requirement already satisfied, skipping upgrade: wheel>=0.26; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.34.2)\n", + "Requirement already satisfied, skipping upgrade: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.9.0)\n", + "Requirement already satisfied, skipping upgrade: tensorflow-estimator<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (2.2.0)\n", + "Requirement already satisfied, skipping upgrade: google-pasta>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.2.0)\n", + "Requirement already satisfied, skipping upgrade: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.29.0)\n", + "Requirement already satisfied, skipping upgrade: tensorboard<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (2.2.2)\n", + "Requirement already satisfied, skipping upgrade: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.3.3)\n", + "Requirement already satisfied, skipping upgrade: astunparse==1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.6.3)\n", + "Requirement already satisfied, skipping upgrade: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.1.2)\n", + "Requirement already satisfied, skipping upgrade: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.1.0)\n", + "Requirement already satisfied, skipping upgrade: six>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.12.0)\n", + "Requirement already satisfied, skipping upgrade: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.12.1)\n", + "Requirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.8.0->tensorflow==2.*) (47.1.1)\n", + "Requirement already satisfied, skipping upgrade: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.7.2)\n", + "Requirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.0.1)\n", + "Requirement already satisfied, skipping upgrade: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (2.23.0)\n", + "Requirement already satisfied, skipping upgrade: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (0.4.1)\n", + "Requirement already satisfied, skipping upgrade: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.2.2)\n", + "Requirement already satisfied, skipping upgrade: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.6.0.post3)\n", + "Requirement already satisfied, skipping upgrade: cachetools<3.2,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.1.1)\n", + "Requirement already satisfied, skipping upgrade: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (4.0)\n", + "Requirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (0.2.8)\n", + "Requirement already satisfied, skipping upgrade: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (2.9)\n", + "Requirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.0.4)\n", + "Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (2020.4.5.1)\n", + "Requirement already satisfied, skipping upgrade: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.24.3)\n", + "Requirement already satisfied, skipping upgrade: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.3.0)\n", + "Requirement already satisfied, skipping upgrade: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.6.0)\n", + "Requirement already satisfied, skipping upgrade: pyasn1>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from rsa<4.1,>=3.1.4->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (0.4.8)\n", + "Requirement already satisfied, skipping upgrade: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.1.0)\n", + "Requirement already satisfied, skipping upgrade: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.1.0)\n", + "Collecting tf_slim\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/97/b0f4a64df018ca018cc035d44f2ef08f91e2e8aa67271f6f19633a015ff7/tf_slim-1.1.0-py2.py3-none-any.whl (352kB)\n", + "\u001b[K |████████████████████████████████| 358kB 2.8MB/s \n", + "\u001b[?25hRequirement already satisfied: absl-py>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from tf_slim) (0.9.0)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from absl-py>=0.2.2->tf_slim) (1.12.0)\n", + "Installing collected packages: tf-slim\n", + "Successfully installed tf-slim-1.1.0\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zpKF8a2x4tec", + "colab_type": "text" + }, + "source": [ + "Make sure you have `pycocotools` installed" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "owcrp0AW4uCg", + "colab_type": "code", + "outputId": "001148a8-b0a8-43a1-f6df-225d86d90b8f", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "!pip install pycocotools" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Requirement already satisfied: pycocotools in /usr/local/lib/python3.6/dist-packages (2.0.0)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wHFSRVaO4wuq", + "colab_type": "text" + }, + "source": [ + "Get `tensorflow/models` or `cd` to parent directory of the repository." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "E0ZuGKoi4wTn", + "colab_type": "code", + "outputId": "2b5d93cb-3548-4347-9b76-ce12bea44a56", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 136 + } + }, + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Cloning into 'models'...\n", + "remote: Enumerating objects: 2694, done.\u001b[K\n", + "remote: Counting objects: 100% (2694/2694), done.\u001b[K\n", + "remote: Compressing objects: 100% (2370/2370), done.\u001b[K\n", + "remote: Total 2694 (delta 520), reused 1332 (delta 290), pack-reused 0\u001b[K\n", + "Receiving objects: 100% (2694/2694), 34.10 MiB | 29.32 MiB/s, done.\n", + "Resolving deltas: 100% (520/520), done.\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GkqRm-WY47MR", + "colab_type": "text" + }, + "source": [ + "Compile protobufs and install the object_detection package" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "62Dn1_YU45O2", + "colab_type": "code", + "outputId": "439166dd-6202-4ff9-897d-100a35ae5af5", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 54 + } + }, + "source": [ + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=." + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "object_detection/protos/input_reader.proto: warning: Import object_detection/protos/image_resizer.proto but not used.\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "83kNiD-24-ZB", + "colab_type": "code", + "outputId": "aa148939-7dcc-4fbd-ea48-41236523712c", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 343 + } + }, + "source": [ + "%%bash \n", + "cd models/research\n", + "pip install ." + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Processing /content/models/research\n", + "Requirement already satisfied: Pillow>=1.0 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (7.0.0)\n", + "Requirement already satisfied: Matplotlib>=2.1 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (3.2.1)\n", + "Requirement already satisfied: Cython>=0.28.1 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (0.29.19)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (0.10.0)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (2.4.7)\n", + "Requirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (1.18.5)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (2.8.1)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (1.2.0)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->Matplotlib>=2.1->object-detection==0.1) (1.12.0)\n", + "Building wheels for collected packages: object-detection\n", + " Building wheel for object-detection (setup.py): started\n", + " Building wheel for object-detection (setup.py): finished with status 'done'\n", + " Created wheel for object-detection: filename=object_detection-0.1-cp36-none-any.whl size=1141324 sha256=1dff68de415a4ccc3af0e20b8f409a73d147d79720a713dcdc30f9bc8d4ab3a2\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-rlyj8yrw/wheels/94/49/4b/39b051683087a22ef7e80ec52152a27249d1a644ccf4e442ea\n", + "Successfully built object-detection\n", + "Installing collected packages: object-detection\n", + "Successfully installed object-detection-0.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LBdjK2G5ywuc" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "hV4P5gyTWKMI", + "colab": {} + }, + "source": [ + "import numpy as np\n", + "import os\n", + "import six\n", + "import six.moves.urllib as urllib\n", + "import sys\n", + "import tarfile\n", + "import tensorflow as tf\n", + "import zipfile\n", + "import pathlib\n", + "import json\n", + "import datetime\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from collections import defaultdict\n", + "from io import StringIO\n", + "from matplotlib import pyplot as plt\n", + "from PIL import Image\n", + "from IPython.display import display" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r5FNuiRPWKMN" + }, + "source": [ + "Import the object detection module." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "4-IMl4b6BdGO", + "colab": {} + }, + "source": [ + "from object_detection.utils import ops as utils_ops\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import visualization_utils as vis_utils" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RYPCiag2iz_q" + }, + "source": [ + "Patches:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "mF-YlMl8c_bM", + "colab": {} + }, + "source": [ + "# patch tf1 into `utils.ops`\n", + "utils_ops.tf = tf.compat.v1\n", + "\n", + "# Patch the location of gfile\n", + "tf.gfile = tf.io.gfile" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cfn_tRFOWKMO" + }, + "source": [ + "# Model preparation " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7ai8pLZZWKMS" + }, + "source": [ + "## Loader" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "zm8xp-0eoItE", + "colab": {} + }, + "source": [ + "def load_model(model_name):\n", + " base_url = 'http://download.tensorflow.org/models/object_detection/'\n", + " model_file = model_name + '.tar.gz'\n", + " model_dir = tf.keras.utils.get_file(\n", + " fname=model_name,\n", + " origin=base_url + model_file,\n", + " untar=True)\n", + "\n", + " model_dir = pathlib.Path(model_dir)/\"saved_model\"\n", + " model = tf.saved_model.load(str(model_dir))\n", + " model = model.signatures['serving_default']\n", + "\n", + " return model" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_1MVVTcLWKMW" + }, + "source": [ + "## Loading label map\n", + "Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `zebra`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "hDbpHkiWWKMX", + "colab": {} + }, + "source": [ + "# List of the strings that is used to add correct label for each box.\n", + "PATH_TO_LABELS = 'models/research/object_detection/data/snapshot_serengeti_label_map.pbtxt'\n", + "category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=False)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oVU3U_J6IJVb" + }, + "source": [ + "We will test on a context group of images from one month at one camera from the Snapshot Serengeti val split defined on [LILA.science](http://lila.science/datasets/snapshot-serengeti), which was not seen during model training:\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "jG-zn5ykWKMd", + "outputId": "c7bbbb2f-0f6e-4380-fd92-c88c088bd766", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 85 + } + }, + "source": [ + "# If you want to test the code with your images, just add path to the images to\n", + "# the TEST_IMAGE_PATHS.\n", + "PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images/snapshot_serengeti')\n", + "TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpeg\")))\n", + "TEST_IMAGE_PATHS" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0038.jpeg'),\n", + " PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0039.jpeg'),\n", + " PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0040.jpeg'),\n", + " PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0041.jpeg')]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 11 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oBcQzptnQ-x6", + "colab_type": "text" + }, + "source": [ + "Load the metadata for each image" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZLLINOHcQ-An", + "colab_type": "code", + "colab": {} + }, + "source": [ + "test_data_json = 'models/research/object_detection/test_images/snapshot_serengeti/context_rcnn_demo_metadata.json'\n", + "with open(test_data_json, 'r') as f:\n", + " test_metadata = json.load(f)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "BgGTPHhkOAel", + "colab_type": "code", + "outputId": "1421a32a-c208-498f-931f-1bfeb25d6488", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 105 + } + }, + "source": [ + "image_id_to_datetime = {im['id']:im['date_captured'] for im in test_metadata['images']}\n", + "image_path_to_id = {im['file_name']: im['id'] \n", + " for im in test_metadata['images']}\n", + "image_path_to_id" + ], + "execution_count": 13, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0038.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0038',\n", + " 'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0039.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0039',\n", + " 'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0040.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0040',\n", + " 'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0041.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0041'}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 13 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H0_1AGhrWKMc" + }, + "source": [ + "# Generate Context Features for each image" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kt3_pPQOj7ii", + "colab_type": "code", + "outputId": "fc72e978-f576-43f4-bcf1-3eb49fef5726", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 88 + } + }, + "source": [ + "faster_rcnn_model_name = 'faster_rcnn_resnet101_snapshot_serengeti_2020_06_10'\n", + "faster_rcnn_model = load_model(faster_rcnn_model_name)" + ], + "execution_count": 14, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Downloading data from http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz\n", + "588832768/588829839 [==============================] - 3s 0us/step\n", + "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k6Clkv_mBo_U", + "colab_type": "text" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "H1qNlFESBsTR", + "colab_type": "code", + "outputId": "9b8b84e0-d7a8-4ec9-d6e0-22d574cb6209", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "faster_rcnn_model.inputs" + ], + "execution_count": 15, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 15 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eYS8KpRCBtBH", + "colab_type": "text" + }, + "source": [ + "And it returns several outputs. Note this model has been exported with additional output 'detection_features' which will be used to build the contextual memory bank." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5M-1yxgfkmQl", + "colab_type": "code", + "outputId": "1da98c3b-79c5-4d19-d64c-3e9dbadc97c0", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 153 + } + }, + "source": [ + "faster_rcnn_model.output_dtypes" + ], + "execution_count": 16, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': tf.float32,\n", + " 'detection_classes': tf.float32,\n", + " 'detection_features': tf.float32,\n", + " 'detection_multiclass_scores': tf.float32,\n", + " 'detection_scores': tf.float32,\n", + " 'num_detections': tf.float32,\n", + " 'raw_detection_boxes': tf.float32,\n", + " 'raw_detection_scores': tf.float32}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 16 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zVjNFFNIDCst", + "colab_type": "code", + "outputId": "edb46db0-05fb-4952-bc88-db09d7811b01", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 153 + } + }, + "source": [ + "faster_rcnn_model.output_shapes" + ], + "execution_count": 17, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': TensorShape([None, 300, 4]),\n", + " 'detection_classes': TensorShape([None, 300]),\n", + " 'detection_features': TensorShape([None, None, None, None, None]),\n", + " 'detection_multiclass_scores': TensorShape([None, 300, 49]),\n", + " 'detection_scores': TensorShape([None, 300]),\n", + " 'num_detections': TensorShape([None]),\n", + " 'raw_detection_boxes': TensorShape([None, 300, 4]),\n", + " 'raw_detection_scores': TensorShape([None, 300, 49])}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 17 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JP5qZ7sXJpwG" + }, + "source": [ + "Add a wrapper function to call the model, and cleanup the outputs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "ajmR_exWyN76", + "colab": {} + }, + "source": [ + "def run_inference_for_single_image(model, image):\n", + " '''Run single image through tensorflow object detection saved_model.\n", + "\n", + " This function runs a saved_model on a (single) provided image and returns\n", + " inference results in numpy arrays.\n", + "\n", + " Args:\n", + " model: tensorflow saved_model. This model can be obtained using \n", + " export_inference_graph.py.\n", + " image: uint8 numpy array with shape (img_height, img_width, 3)\n", + "\n", + " Returns:\n", + " output_dict: a dictionary holding the following entries:\n", + " `num_detections`: an integer\n", + " `detection_boxes`: a numpy (float32) array of shape [N, 4]\n", + " `detection_classes`: a numpy (uint8) array of shape [N]\n", + " `detection_scores`: a numpy (float32) array of shape [N]\n", + " `detection_features`: a numpy (float32) array of shape [N, 7, 7, 2048]\n", + " '''\n", + " image = np.asarray(image)\n", + " # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n", + " input_tensor = tf.convert_to_tensor(image)\n", + " # The model expects a batch of images, so add an axis with `tf.newaxis`.\n", + " input_tensor = input_tensor[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " output_dict = model(input_tensor)\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_dets = output_dict.pop('num_detections')\n", + " num_detections = int(num_dets)\n", + " for key,value in output_dict.items():\n", + " output_dict[key] = value[0, :num_detections].numpy() \n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(\n", + " np.int64)\n", + " return output_dict" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "un5SXxIxMaaV", + "colab_type": "text" + }, + "source": [ + "Functions for embedding context features" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "qvtvAZFDMoTM", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def embed_date_captured(date_captured):\n", + " \"\"\"Encodes the datetime of the image.\n", + "\n", + " Takes a datetime object and encodes it into a normalized embedding of shape \n", + " [5], using hard-coded normalization factors for year, month, day, hour,\n", + " minute.\n", + "\n", + " Args:\n", + " date_captured: A datetime object.\n", + "\n", + " Returns:\n", + " A numpy float32 embedding of shape [5].\n", + " \"\"\"\n", + " embedded_date_captured = []\n", + " month_max = 12.0\n", + " day_max = 31.0\n", + " hour_max = 24.0\n", + " minute_max = 60.0\n", + " min_year = 1990.0\n", + " max_year = 2030.0\n", + "\n", + " year = (date_captured.year-min_year)/float(max_year-min_year)\n", + " embedded_date_captured.append(year)\n", + "\n", + " month = (date_captured.month-1)/month_max\n", + " embedded_date_captured.append(month)\n", + "\n", + " day = (date_captured.day-1)/day_max\n", + " embedded_date_captured.append(day)\n", + "\n", + " hour = date_captured.hour/hour_max\n", + " embedded_date_captured.append(hour)\n", + "\n", + " minute = date_captured.minute/minute_max\n", + " embedded_date_captured.append(minute)\n", + "\n", + " return np.asarray(embedded_date_captured)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xN8k5daOOA7b", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def embed_position_and_size(box):\n", + " \"\"\"Encodes the bounding box of the object of interest.\n", + "\n", + " Takes a bounding box and encodes it into a normalized embedding of shape \n", + " [4] - the center point (x,y) and width and height of the box.\n", + "\n", + " Args:\n", + " box: A bounding box, formatted as [ymin, xmin, ymax, xmax].\n", + "\n", + " Returns:\n", + " A numpy float32 embedding of shape [4].\n", + " \"\"\"\n", + " ymin = box[0]\n", + " xmin = box[1]\n", + " ymax = box[2]\n", + " xmax = box[3]\n", + " w = xmax - xmin\n", + " h = ymax - ymin\n", + " x = xmin + w / 2.0\n", + " y = ymin + h / 2.0\n", + " return np.asarray([x, y, w, h])" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "lJe2qy8HPc6Z", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_context_feature_embedding(date_captured, detection_boxes,\n", + " detection_features, detection_scores):\n", + " \"\"\"Extracts representative feature embedding for a given input image.\n", + "\n", + " Takes outputs of a detection model and focuses on the highest-confidence\n", + " detected object. Starts with detection_features and uses average pooling to\n", + " remove the spatial dimensions, then appends an embedding of the box position\n", + " and size, and an embedding of the date and time the image was captured,\n", + " returning a one-dimensional representation of the object.\n", + "\n", + " Args:\n", + " date_captured: A datetime string of format '%Y-%m-%d %H:%M:%S'.\n", + " detection_features: A numpy (float32) array of shape [N, 7, 7, 2048].\n", + " detection_boxes: A numpy (float32) array of shape [N, 4].\n", + " detection_scores: A numpy (float32) array of shape [N].\n", + "\n", + " Returns:\n", + " A numpy float32 embedding of shape [2057].\n", + " \"\"\"\n", + " date_captured = datetime.datetime.strptime(date_captured,'%Y-%m-%d %H:%M:%S')\n", + " temporal_embedding = embed_date_captured(date_captured)\n", + " embedding = detection_features[0]\n", + " pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0)\n", + " box = detection_boxes[0]\n", + " position_embedding = embed_position_and_size(box)\n", + " bb_embedding = np.concatenate((pooled_embedding, position_embedding))\n", + " embedding = np.expand_dims(np.concatenate((bb_embedding,temporal_embedding)),\n", + " axis=0)\n", + " score = detection_scores[0]\n", + " return embedding, score" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "z1wq0LVyMRR_" + }, + "source": [ + "Run it on each test image and use the output detection features and metadata to build up a context feature bank:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "DWh_1zz6aqxs", + "colab": {} + }, + "source": [ + "def run_inference(model, image_path, date_captured, resize_image=True):\n", + " \"\"\"Runs inference over a single input image and extracts contextual features.\n", + "\n", + " Args:\n", + " model: A tensorflow saved_model object.\n", + " image_path: Absolute path to the input image.\n", + " date_captured: A datetime string of format '%Y-%m-%d %H:%M:%S'.\n", + " resize_image: Whether to resize the input image before running inference.\n", + "\n", + " Returns:\n", + " context_feature: A numpy float32 array of shape [2057].\n", + " score: A numpy float32 object score for the embedded object.\n", + " output_dict: The saved_model output dictionary for the image.\n", + " \"\"\"\n", + " with open(image_path,'rb') as f:\n", + " image = Image.open(f)\n", + " if resize_image:\n", + " image.thumbnail((640,640),Image.ANTIALIAS)\n", + " image_np = np.array(image)\n", + "\n", + " # Actual detection.\n", + " output_dict = run_inference_for_single_image(model, image_np)\n", + "\n", + " context_feature, score = get_context_feature_embedding(\n", + " date_captured, output_dict['detection_boxes'],\n", + " output_dict['detection_features'], output_dict['detection_scores'])\n", + " return context_feature, score, output_dict" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "3a5wMHN8WKMh", + "colab": {} + }, + "source": [ + "context_features = []\n", + "scores = []\n", + "faster_rcnn_results = {}\n", + "for image_path in TEST_IMAGE_PATHS:\n", + " image_id = image_path_to_id[str(image_path)]\n", + " date_captured = image_id_to_datetime[image_id]\n", + " context_feature, score, results = run_inference(\n", + " faster_rcnn_model, image_path, date_captured)\n", + " faster_rcnn_results[image_id] = results\n", + " context_features.append(context_feature)\n", + " scores.append(score)\n", + "\n", + "# Concatenate all extracted context embeddings into a contextual memory bank.\n", + "context_features_matrix = np.concatenate(context_features, axis=0)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DsspMPX3Cssg" + }, + "source": [ + "## Run Detection With Context" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f7aOtOlebK7h" + }, + "source": [ + "Load a context r-cnn object detection model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "1XNT0wxybKR6", + "outputId": "cc5b0677-cf16-46c2-9ae5-32681725f856", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 88 + } + }, + "source": [ + "context_rcnn_model_name = 'context_rcnn_resnet101_snapshot_serengeti_2020_06_10'\n", + "context_rcnn_model = load_model(context_rcnn_model_name)\n" + ], + "execution_count": 24, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Downloading data from http://download.tensorflow.org/models/object_detection/context_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz\n", + "724664320/724658931 [==============================] - 3s 0us/step\n", + "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G6IGGtGqBH6y", + "colab_type": "text" + }, + "source": [ + "We need to define the expected context padding size for the\n", + "model, this must match the definition in the model config (max_num_context_features)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4oh9XNLBjkTL", + "colab_type": "code", + "colab": {} + }, + "source": [ + "context_padding_size = 2000" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "yN1AYfAEJIGp" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8, plus context_features padded to the maximum context feature size for this model (2000) and valid_context_size to represent the non-padded context features: " + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "CK4cnry6wsHY", + "outputId": "d77af014-769f-4e20-b4ac-bfdd40502128", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + } + }, + "source": [ + "context_rcnn_model.inputs" + ], + "execution_count": 26, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[,\n", + " ,\n", + " ]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 26 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q8u3BjpMJXZF" + }, + "source": [ + "And returns several outputs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "oLSZpfaYwuSk", + "outputId": "63a3903f-529b-41f9-b742-9b81c4c5e096", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 136 + } + }, + "source": [ + "context_rcnn_model.output_dtypes" + ], + "execution_count": 27, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': tf.float32,\n", + " 'detection_classes': tf.float32,\n", + " 'detection_multiclass_scores': tf.float32,\n", + " 'detection_scores': tf.float32,\n", + " 'num_detections': tf.float32,\n", + " 'raw_detection_boxes': tf.float32,\n", + " 'raw_detection_scores': tf.float32}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 27 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "FZyKUJeuxvpT", + "outputId": "d2feeaba-2bb2-4779-a96a-94a8a0aff362", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 136 + } + }, + "source": [ + "context_rcnn_model.output_shapes" + ], + "execution_count": 28, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': TensorShape([1, 300, 4]),\n", + " 'detection_classes': TensorShape([1, 300]),\n", + " 'detection_multiclass_scores': TensorShape([1, 300, 49]),\n", + " 'detection_scores': TensorShape([1, 300]),\n", + " 'num_detections': TensorShape([1]),\n", + " 'raw_detection_boxes': TensorShape([1, 300, 4]),\n", + " 'raw_detection_scores': TensorShape([1, 300, 49])}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 28 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "CzkVv_n2MxKC", + "colab": {} + }, + "source": [ + "def run_context_rcnn_inference_for_single_image(\n", + " model, image, context_features, context_padding_size):\n", + " '''Run single image through a Context R-CNN saved_model.\n", + "\n", + " This function runs a saved_model on a (single) provided image and provided \n", + " contextual features and returns inference results in numpy arrays.\n", + "\n", + " Args:\n", + " model: tensorflow Context R-CNN saved_model. This model can be obtained\n", + " using export_inference_graph.py and setting side_input fields. \n", + " Example export call - \n", + " python export_inference_graph.py \\\n", + " --input_type image_tensor \\\n", + " --pipeline_config_path /path/to/context_rcnn_model.config \\\n", + " --trained_checkpoint_prefix /path/to/context_rcnn_model.ckpt \\\n", + " --output_directory /path/to/output_dir \\\n", + " --use_side_inputs True \\\n", + " --side_input_shapes 1,2000,2057/1 \\\n", + " --side_input_names context_features,valid_context_size \\\n", + " --side_input_types float,int \\\n", + " --input_shape 1,-1,-1,3\n", + "\n", + " image: uint8 numpy array with shape (img_height, img_width, 3)\n", + " context_features: A numpy float32 contextual memory bank of shape \n", + " [num_context_examples, 2057]\n", + " context_padding_size: The amount of expected padding in the contextual\n", + " memory bank, defined in the Context R-CNN config as \n", + " max_num_context_features.\n", + "\n", + " Returns:\n", + " output_dict: a dictionary holding the following entries:\n", + " `num_detections`: an integer\n", + " `detection_boxes`: a numpy (float32) array of shape [N, 4]\n", + " `detection_classes`: a numpy (uint8) array of shape [N]\n", + " `detection_scores`: a numpy (float32) array of shape [N]\n", + " '''\n", + " image = np.asarray(image)\n", + " # The input image needs to be a tensor, convert it using \n", + " # `tf.convert_to_tensor`.\n", + " image_tensor = tf.convert_to_tensor(\n", + " image, name='image_tensor')[tf.newaxis,...]\n", + "\n", + " context_features = np.asarray(context_features)\n", + " valid_context_size = context_features.shape[0]\n", + " valid_context_size_tensor = tf.convert_to_tensor(\n", + " valid_context_size, name='valid_context_size')[tf.newaxis,...]\n", + " padded_context_features = np.pad(\n", + " context_features,\n", + " ((0,context_padding_size-valid_context_size),(0,0)), mode='constant')\n", + " padded_context_features_tensor = tf.convert_to_tensor(\n", + " padded_context_features,\n", + " name='context_features',\n", + " dtype=tf.float32)[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " output_dict = model(\n", + " inputs=image_tensor,\n", + " context_features=padded_context_features_tensor,\n", + " valid_context_size=valid_context_size_tensor)\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_dets = output_dict.pop('num_detections')\n", + " num_detections = int(num_dets)\n", + " for key,value in output_dict.items():\n", + " output_dict[key] = value[0, :num_detections].numpy() \n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n", + " return output_dict" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "0FqVkR3Agc6U", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def show_context_rcnn_inference(\n", + " model, image_path, context_features, faster_rcnn_output_dict,\n", + " context_padding_size, resize_image=True):\n", + " \"\"\"Runs inference over a single input image and visualizes Faster R-CNN vs. \n", + " Context R-CNN results.\n", + "\n", + " Args:\n", + " model: A tensorflow saved_model object.\n", + " image_path: Absolute path to the input image.\n", + " context_features: A numpy float32 contextual memory bank of shape \n", + " [num_context_examples, 2057]\n", + " faster_rcnn_output_dict: The output_dict corresponding to this input image\n", + " from the single-frame Faster R-CNN model, which was previously used to\n", + " build the memory bank.\n", + " context_padding_size: The amount of expected padding in the contextual\n", + " memory bank, defined in the Context R-CNN config as \n", + " max_num_context_features.\n", + " resize_image: Whether to resize the input image before running inference.\n", + "\n", + " Returns:\n", + " context_rcnn_image_np: Numpy image array showing Context R-CNN Results.\n", + " faster_rcnn_image_np: Numpy image array showing Faster R-CNN Results.\n", + " \"\"\"\n", + "\n", + " # the array based representation of the image will be used later in order to prepare the\n", + " # result image with boxes and labels on it.\n", + " with open(image_path,'rb') as f:\n", + " image = Image.open(f)\n", + " if resize_image:\n", + " image.thumbnail((640,640),Image.ANTIALIAS)\n", + " image_np = np.array(image)\n", + " image.thumbnail((400,400),Image.ANTIALIAS)\n", + " context_rcnn_image_np = np.array(image)\n", + " \n", + " faster_rcnn_image_np = np.copy(context_rcnn_image_np)\n", + "\n", + " # Actual detection.\n", + " output_dict = run_context_rcnn_inference_for_single_image(\n", + " model, image_np, context_features, context_padding_size)\n", + "\n", + " # Visualization of the results of a context_rcnn detection.\n", + " vis_utils.visualize_boxes_and_labels_on_image_array(\n", + " context_rcnn_image_np,\n", + " output_dict['detection_boxes'],\n", + " output_dict['detection_classes'],\n", + " output_dict['detection_scores'],\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " line_thickness=2)\n", + " \n", + " # Visualization of the results of a faster_rcnn detection.\n", + " vis_utils.visualize_boxes_and_labels_on_image_array(\n", + " faster_rcnn_image_np,\n", + " faster_rcnn_output_dict['detection_boxes'],\n", + " faster_rcnn_output_dict['detection_classes'],\n", + " faster_rcnn_output_dict['detection_scores'],\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " line_thickness=2)\n", + " return context_rcnn_image_np, faster_rcnn_image_np" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3cYa2B8uAYx0", + "colab_type": "text" + }, + "source": [ + "Define Matplotlib parameters for pretty visualizations" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "9F8okR1uAQ0T", + "colab_type": "code", + "colab": {} + }, + "source": [ + "%matplotlib inline\n", + "plt.rcParams['axes.grid'] = False\n", + "plt.rcParams['xtick.labelsize'] = False\n", + "plt.rcParams['ytick.labelsize'] = False\n", + "plt.rcParams['xtick.top'] = False\n", + "plt.rcParams['xtick.bottom'] = False\n", + "plt.rcParams['ytick.left'] = False\n", + "plt.rcParams['ytick.right'] = False\n", + "plt.rcParams['figure.figsize'] = [15,10]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YGj7nXXQAaQ7", + "colab_type": "text" + }, + "source": [ + "Run Context R-CNN inference and compare results to Faster R-CNN" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "vQ2Sj2VIOZLA", + "outputId": "1c043894-09e5-4c9f-a99d-ae21d6e72d0c", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + } + }, + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " image_id = image_path_to_id[str(image_path)]\n", + " faster_rcnn_output_dict = faster_rcnn_results[image_id]\n", + " context_rcnn_image, faster_rcnn_image = show_context_rcnn_inference(\n", + " context_rcnn_model, image_path, context_features_matrix,\n", + " faster_rcnn_output_dict, context_padding_size)\n", + " plt.subplot(1,2,1)\n", + " plt.imshow(faster_rcnn_image)\n", + " plt.title('Faster R-CNN')\n", + " plt.subplot(1,2,2)\n", + " plt.imshow(context_rcnn_image)\n", + " plt.title('Context R-CNN')\n", + " plt.show()" + ], + "execution_count": 32, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOx9d5xsWVnt2tXVfe8dYJhAGEYFRAxk8JFBER9ZUBAFFRQkCKiAJAcQRMkgUQUlKAxZFCQHA2EMAyoITwUe8BjCEGeGGSbduV3Vtd8fp77qVavWPtV9b0+4t7/v9+tfV52zw5fXPt/Z51SptSIpKSkpKSkpKSkpKSlp52lwaTOQlJSUlJSUlJSUlJR0pFJecCUlJSUlJSUlJSUlJV1MlBdcSUlJSUlJSUlJSUlJFxPlBVdSUlJSUlJSUlJSUtLFRHnBlZSUlJSUlJSUlJSUdDFRXnAlJSUlJSUlJSUlJSVdTJQXXElJSUlJSUlJSUlJSRcT5QVX0mWaSilfLqXsL6WcT38nHuRYDyyl/PMO8/fAUsrGlK9zSymfLqXcfUmfo0spLymlfHXa7/9Nv19pev7LpZTvlFIuR30eUkr5CH2vpZT/KqUM6NgzSymv3Un5kpKSkpIuG1RK+ZVSyn9MceObpZT3l1JuuwPjvraU8swd4nHpWFP8umAqx9dLKS8qpaws6dOUvZTyB9Mx70Pth9Nj1yS+ainl5tTm2qWU/DHapEuE8oIr6XCge9RaL09/37g0mCilDBunTq21Xh7AMQBeDuAtpZRjGmOsAfhHANcDcBcARwO4FYCzANycmq4AePQSlk4E8EtbFiApKSkp6bCkUspjAbwEwLMBXBXA1dHhzc9dmnwdAt1oipu3A3BfAA9qNdyi7N8F8IdLLty+C2BHLiyTkrZLecGVdNhRKeXYUsp7SilnlFLOnn7+fjr/wFLKl0op55VSTiul3K+Uch0Afw7gVtMK2TnTtntKKS+Y3m36dinlz0sp+6bnfqqUcnop5aRSyrcAvKaPr1rrBMDrAVwOwA83mv0aOrC4V631M7XWSa31O7XWZ9Ra30ft/gjA41sXblN6PjqAaV0IJiUlJSUd5lRKuSKApwP4rVrr22utF9RaR7XWd9danzBts2e6U+Ib07+XlFL2TM8Flj1uunvim6WUX5+e+w0A9wPwu1NsfPf0+ImllLdNcfa0UsqjpsePm451j+n3y5dSvlhK+bXWWH1Ua/0igH8BcOODlX1KHwCwDuD+PdOdDOCGpZTbLeMrKWmnKS+4kg5HGqC7+LkGuouX/QD+FACm2/D+GMBda61XAHBrAJ+qtX4WwMMxvRtVa40LmecC+BF0yf7aAL4PwO/TXCcAOG4612/0MTWtrP06gBGArzSa3QHAB2qt5y+R8T8AfATA43vavB3AuQAeuGSspKSkpKTDl24FYC+Av+1p83sAbokOy26EbsfEU+j8CQCuiA7jHgzgZaWUY2utrwTwRgDPn2LjPaZb1d8N4NPT9v8bwO+UUu5ca/0uurtRryqlXAXAi9Fh7OvcWMsEK6X8GICfAPDFQ5AdACqApwJ4WilltdHmQnR3yZ61jK+kpJ2mvOBKOhzoHaWUc6Z/76i1nlVrfVut9cJa63nokidXrCYArl9K2Vdr/Wat9X/coKWUgu4i6jG11u9Ox3o25rfpTQA8rdZ6oNa6v8HfLad3zC4C8AIA96+1fqfR9ngA39yi3L8P4JGllCs3zgfAPHW6VTEpKSkp6cij4wGcWWsd97S5H4CnT3dMnAHgDwH8Kp0fTc+Pprspzgfwo42xbgbgyrXWp9da12utXwLwKkyxsdb6dwD+Gt32+LsBeNhByPTJUsoFAD6Lrrj48ka7rciOKV/vAnAGgIf0NHsFgKuXUu66PXaTkg6N8oIr6XCge9Zaj5n+3bOUclQp5RWllK+UUs4FcAqAY0opK7XWC9DtB384gG+WUt47raA5ujKAowB8Ii7o0G1L4AucM2qtFy3h72PTO2bHAngXumodSilXL/Syj2nbswBcbStC11r/G8B7ADyxp837AJyOgwO8pKSkpKTLPp0F4EpLto+fiPmdFV+ZHpuNIRctFwK4fGOsawA4kQqd5wB4Mrrnp4JeCeD6AF5baz1ri3Iw/fh0/vsCuAW6rfiYvgwjcPN+2JrsTE9Bd7dvrztZaz0A4BnTv6SkS4zygivpcKTHoavM3aLWejSAn5weLwBQa/1grfWO6C5sPoeuMgd0d4SYzkS3HfF6dEF3xemDvGj0adJ0m+AjAPxqKeUmtdav8ss+ps3+AcCdC72BcAk9DcBD0W3raNHvoQPDo7bKa1JSUlLSYUOnAjgA4J49bb6B7kIp6OrTY1shxbmvATiNcPGYWusVaq13A2bb518J4HUAfrOUcu2esdqTdvRWdPL9/vTYXQk334ityc5j/j267Ym/2dPsNehecvXzW+U1KelQKS+4kg5HugK6C6VzSinHobsoAQCUUq5aSvm56QXNAXTbJibT098G8P2x/W76kotXAXjxdC86SinfV0q588EyNt3f/mrMPwfG9Hp0YPa2UsqPlVIGpZTjSylPLqXczYz3RQB/BeBRPXN+BMB/A3jAwfKdlJSUlHTZpFrr99BhystKKbHLY7WUctdSyvOnzd4M4CmllCuX7idGfh/AG7Y4xbcBXIu+/xuA86YvjNpXSlkppVy/lHKz6fkno7uwehC6Fzy9rmy+HVDH2go9F8BDSykn6Iktyq70ewB+tzXZ9E7f0wCctE0+k5IOmvKCK+lwpJcA2IfuDtXH0G0DDBoAeCy6yt530T3b9YjpuQ8B+B8A3yqlnDk9dhK6atjHptsT/wHtfe3b4e9upZQb6onpdoY7oLvz9vfoXnrxbwCuBODjjfGejul2ix56CrqXeyQlJSUlHWFUa30hOmx7CrrnlL4G4LcBvGPa5JnoXrb0fwD8F4BPYuuvQP8LANel56Q3ANwd3Qs4TkOHta8GcMVSyv+a8vFr03bPQ3fx9UQ31hZl+y90jwY8oXF+meza/l/Q4WofvRlbf546KemQqdSav/mWlJSUlJSUlJSUlJR0cVDe4UpKSkpKSkpKSkpKSrqYKC+4kpKSkpKSkpKSkpKSLibKC66kpKSkpKSkpKSkpKSLifKCKykpKSkpKSkpKSkp6WKirf6QXJNKKbWUglorSimz4/oyjjgXx/U7H2sd176tuVrzbuWz9nVjL+PTUczTkmG7tNV5D3bslo63O5cbK+myQZeUbVpxtNO0XXmcT7fyVmuO7eQv7d/67HjT8fpylZ5fli+3koulzZm1Vv5x8KQeSoxMjNzuWEmXDUqMTIzsm6PFc63VKnlLF1yllFvWWj/WOj8cDjGZTDAYDGaTbmxszJhS4QaD+RtrtdZZfwCYTCZz5xms4nO0nQo36xM8xPdSCgaDwYICneNxHz7vjDEYDOb4jGNqUB0rPkd/5wA8jh5zjqDyLgM17ss6VLsMBgNsbGzM8eDax+doz2PH97CLk8fpjL/zXGpLF2Tx54LK2U31GLIAi77IsvF5lodtwHExmUy2nDDi88rKyoJMPHdrodVaIAWvwXeMu7KyMsez0yn7h9OB8zv1Y+4f/DvdqG9xrMTxrSz23Lnop3OyzjSeVlZWZrHQ8sEW8Lg8oDy5/OQ+u3zq8ivHlfKsMab613iM/vF9NBp9pangXUjL8BFIjORjiZGJkYmRm5QYeWRh5Gg0auvWGWM7VEqp4Yg8eQjFjDqBgtEw1HA4nAOICKaNjY05h9fgUgWEA/YpKHgCsJDY4/vKyspCklWKthwAzujqdAp4PGcLRDTgGIhZT8yHI9XfysrKbP445gCPdajE8qvMeo4DOuyufZyeWTdBmqD1e8se4XetBUccH41GC0mHx+TEzOPy4or9noOTZQM27eD8nG3L7XVsBjM+x2OzbVU3LslqXCuIBsUcrh/zE+cjSTOwuHlbCxFuy+O7BWjEiYJf9OHco2NxPmnFo/ZRe7PdOM7dAqQVhzGW5je1dSsf8YJJ24aPqW8zb6UUrK+vf6LWelMkbYkSIxMjmR+Wkz8nRiZGMj9xPjHy8MLI8XiMyWRy8He4lpE6QCjAJXxWACso+sTxcLQYUxOkVn+ibyQHdTQ2Mjsa88PjRnKbTCZzytfxmB9VvDqRS8bMj7bVANIKRhDzosDnAE0raTwG08bGBtbW1jCZTDAej2e8Md8KChoU8T3s4vh01VwAs8XDeDye+YAmFk3oPKcmJ1eV0sUk8zcejxeC3iVrTa7MKwM9+4mzjy6EVCdhC+cfLYB1vs4A5IAn7OHiyOlVq4EKsOoPcZz9phVfSq6yvbq6OhtLie0VOUSrzDEO5w7VmQILJ12Wx/Gvvu4qvWFXznma/N34ChSsF81vzFMsdoIPjk/2Gc4VLR0nLafEyMTIxMjEyBgjMfLIxciWXYAdemmGOlfLGTgRMGP8P4zsmHcK43N6q77FI/MRx7mawkbQZMM8slytoNMr8WjPjh9Jk4OZK3MuiQHovX2rFSUmBXFeAKjt1tfXZ8lckzzLGlVOlll1Gvy4aqhL6qw71Y3Tu55rJRfWQyspq69qXwWp8CHuC3SJDti0VfAZ/VhX2l/nDr9jf2nFB/MW5zR5BS/BR3xX4GzZSIEi/nMMKx9MLimGfOxj2p4TXSR2XWgp/6w/9fcYR/MAt2H52I782fmS2jD8Mtop3yEL68QtONxig8dXXqONxpSTuTVmK3aTllNiZGJkYmRiZPxPjDxyMdK1C9qJl2bMCRlM8DllkJ2SlaKBoUHlgITn5CqJS6LaR8dmQzOg9M2vAKWVF55L+8exSNZMCgZ6nvlpASx/j+B0FcBW1Zp1wsGiulOnd8mOb6Oz7Go3tXvMzVWrGCuSQIzLFYiYQ2VjwNRA1QTLfDMItsBMfYZ9nPXnKp3OflxFU3/gai6DE9sm/FH9nRdDCqhcKYq2ansHErHVoBU/TlYF12i3vr6+UM1zxHPo8b524aOql5BHFzzRXvkO3jU/OHuobdgGMR/7nCZxB+z8X0FNFzhaRVbZuU/LTjxe0tYpMTIxMjFyfr7EyMRItceRgpF9tCPo6RK3Cs/CspJUsCBWhIKKJrAWLxxw0S6qKKpMNjYnAXYyTmh8hdy64m8RVwqcrvgcV3VUt8FH3NZ3c2qicFf93Fb7AlgIJNaLOnGcZz1zP+ZHt7ZsbGzMBRmPyQlNE5wGsepR28W8uujgwNQAU531LSj4M2/10CqRI9UnzxHfp3uE5/q1bt+34oV1wn2ULwUSJn4uBfAPBmtss6108RBjaIJUQGK7OZvzZ47Z0BP3c4u9vuPqM25x5o6r3mMOlqela80VPK4DmvCziFuWlxcXGpPRj3OO85Gk7VNiZGJkYmRiZGIkmsePFIzsox17houFHAwGC7f9HLngU6Wx4Cwgz8vt+bNTBM/HiUsdSfu54HcJXGXSczxfOBwbXfuoflhPrJPoFw9Y9oEsj+2AbFkfl4SZL7Y7g3HLId1igsklmGjPyUITgIIB92VAcgHJ/qeJ3QV0yB1zctDyosb5a3xnPhUsWA7ew+yATYFe9cvf4yHjvoWd6i744X6uLcdrS15XpebPaj/1WSc7n2d7cBtXmVf+9POyHBHHVVat5i+LN23Pfujm1sVPjKvbuhy1wMkBYQs4k5ZTYmRbJj3H8yVGzs+TGJkYmRh52cbIPjrkO1w6uUv0ShzsmvDVEdQJuD/PzQrmylqMyYrg5NACnJibx3SApoblylVfO9WTJrqYTx0rSKuSTrfOVnpe/6tOHcg7p2K52T46FydVtoPzlVayVR24INPEzDxrAKuu1c5qH9azG8+dc9UX53PODgxSGh99ccbVwr5FQh+AuKQZ7d0tf3dLfiu60/gE5p+DaOmupW+dTz/3bQ1woKt6dvmH27Nv69aLOKYx5e4g8HeurOn5Vvy4xYvTUeikBXy6aE3aOiVGJkaq3ImRiZGJkbsPIw/5gkuTbmtCDZL4HMzr7XYWximLg4znZ8M5fhhstK2TwQVCvJ1JgaZPB/HdJVMNvr5Ac07UAmP33fGm8ro+fX2VLz7W4jH+MyDw974xVTYFDrWd+lDcSla/0geduW9LV8pLjK8VTAf6bkzHtwN8YN6XXJxwZVOTV/zXxZHjh+XQeGU+mFxVTuPbyahjuLzB49Ra5xZfEZ8a25oDOA+0Fil9pAsMBRS3AFHe44/buqTNeub2DtyYn5ZeW4sEtoUCIp9P2h4lRiZGKl98LDEyMZJlSYw8cjFyR7YUxkScJFuCOmDgvpoU9areGZHn01vVOgff1nYB3JIrFNuqXHHSjGNuHG7fBwwKVK2xlNjRHEi5hO4SfrR3gLsM5FUXqkc+1wIcHov9I/rwmNFuWYVM5wM2fUoriZxo+HY786bjcl+XoNiu6t8sE/s5b7Vw9nNjqt1Zlwx0GgvOZjyXxpXaknl0sdpKRMti0Y2l211Yz1pBDD072WK+sK9b3Do+Wz7Y0pfypDHQiiPWAcuoumz5CcvXl3e5rS7wQt99QJK0nBIjEyPdeE6PiZGJkaq7xMjDHyN3ZEuhfnYCqRDxPwzd2hagc7lEoVfAmoD4FiPzyGPwZwccTk5N9GpM/nNz8ndNNsyTc1zVt/LgzrEOefxW4u/TFfOrC4UWoDl/YBtykLTG7ks8mljVH92boVQmt+VDx3J25B8mbOksApWf3XB+wvIo2PTJy+dVBp7TVfGY1Id1TJZF7aAJk22nCyzHs/MbllXHVOJkyOO6qpibx+m6lY/YJ91CRuVS3275amu+PmKbaa5TH2R+W2O4WNaxkrZGiZGJkYmRiZGJkbsDI/toR7YUBiPMlJ5zCZL7OAWwEdxvDrgg0OMuuFsJvhWIuq3BJRk1UHznqpA7Fsc1ADixsRz6P/roNgF2GievUkvvPKZuZXDAweS2pXBg8zhuYbAsMasu9HN814DlMeKYPsDO53grhyMF/JBPda+A0bc1hO2qPqRysJ/oGDy2S3Kuj9MX89PSsat4uSSpY7cAPubSOHC+zT6jOmc7aluex/VX/WkSVrvzb+04kFT5NL54Ya0+x/p3/sLHeQwXJ2ybINanAz4eN2nrlBiZGJkYmRiZGLk7MLKVP4Adei18a5JQoN6KVkfid+pH2zCMm6MFBn2JJL7zcU7ubtz4rm9qYmfW18FqwDn+3BaIlpOq7H3yBj/s0NyHx3XJwvHjAlcdVgOqFZAxh9rVteOgUnuxDpkXB0Sqq1aCdVt8og37B//ngFV9xwJB53Q+xvbQZOHAg8+55O4onqlQ3XB/JwPrp8+2SqrnVkVJQVvlVn6DFKjjmPvcAnPlw8mjiz/lh+OkVc1kedSvtQ3nJNaB5kcdX/ngthwXvK2ipQd9fTLzn3TwlBiZGJkYmRgZlBi5+zByxy64HHEyr7UuJLloo32Axd9M0KtaTvZKqkjuowpxSZUNwglM+7FsmgTcmE5m5+gacDxG6DD0oWDIx/mH61o8sB55DpeM+5KG06kGRIylPyoYFPNqtZSptU2Dz/NvX/Txpzz2JXpNpMFny690C1C01USkulV/WBbECnSa1OJc+ImrGMb38BuWKXjQ+FN9Rn/VmwONFji0dNnKEfqd+2ncxzFd6CkYc+w7HnkeBUI+pouR1gKhpRNdeIUPuj6txVdLn25x38pPOtdWFi5JW6fESD9HYmRiZBxLjJzXT2Ik5o4fThi5I89wxcN5zmHUwGoANjAnfw1cFW4mgBiR54pxNIBaV9k6t55vAQ+TGlJ55DaxJcIBipK7EmedAps/9sdjq114LOWb5eLEwFs3VBYGWtWhyssArPpgPlvB3peQhsMhSilzABpzqc5U1laQu4TkdMn65C0iXBF0/Vmv7JNciWQ+VQbVr4Iq94vtILow4u9c9QneFZiUD5WHxw0e3cJQx+Qc0LcIYNk1xpYtdlinWrFv+YbaQucNvuO7e3A2fqzUJezWMQUflpkXXH0gxDHM/GtuVX3G+Dyms0fS1igxMjFS+8V4iZGJkYmRuwcjD/kthTwxC89BxUlQE7VzAD4WffX2fZAmH5eIQlkRIO7qtY+vkEUN1JeQ+bwDLxfYffyoM2h7Bxou4Su/qjP+r20nkwnG47EFRwYRVwHihD4YDGZAB2D2w4IuIateNaHGWNGGAXSZjlUGBnk+z/K4RZPqXX1HfzSR4yR8MRYCMUfog6uxmhhZduePk8kEw+HQAiD34+OaNBlY2J4OnLli7MaNxSOP3wceajOVvaV7tbvGZYynx1sLDuUh8gBXQ2utczZj2Vvz8efBYDAXWyqj8ql8KWAwKOgCjxdoPJ+Or3lhK3ZKWqTEyDbfiZGJkYmRiZGt+fjzkYCRO/ZaeA0SdhROwHE+bpm3Kls6rlZx3PzKB5MCXV+icX2DZ5fYXBJujeOSzjInZh4A/wBjjOOCiMePwHbAzw7GAaE8OTBVPridOrvypvxzBa5Pd1rh0oSgVTXl21UonO9xsLskyECmCYXn18TibK0+yeNrxSmOLQMzAHPPgDh9sr4UrHVcTp68MIi5eKHAQMcJLsZpVQl5Ps4jrJeWvVhuB2ot/9eFCceK82MGRuaJSfu75M0AyO24r8aL04eb1+mUx+NFheNfwX5lZQXj8Xhh3KTllBiZGJkYmRgZekiMnOfpSMHIVn4CduiCSycKBjWwo60eU+IqBo/P/VUo/c7gw3xFNYJfUeoAzAVeVGI02XI/5UPbOl1FhYVlqbXOADcCkB1IdeISSJxzlQ4HdtpfnUyDm5OAs28c5yTCztuqamoA1lpnP9TnZGA/4gqUVr1Y97Hvdzwez+ku+AteYxtG6NEtanix46qYcSz8x9nQ6RvotoHEAjd4CtDiedhXnB31tj7bQBcnvLhWP2d/4vEDaFWevspjfFZQZjnYHnwubMGL0mjDfLuFEJNW+NnWrCPlP3wo7Kt9op2bT2XUnMCxxboLmzPgOUDVeNVcwf0cULDMbuyk7VNi5Ga/xMjESLZZYmRipM6nMh4pGLljd7iY0VbiY+FYSWwAZV4Tvd7+4/GZWhUgDQIdhz9Hko/2/J3HCedtGY8NpklXnZV50Ad6FWQdoNZaZ07nqh2uesDEjstztpyKq1UsY3znRMoJlufmpMuByM6+vr6+APycQDRZqv5bcio/3D/szQnLBZ8LTH3LVPDnkr7yyPodjUYLY7C/cV/t7yrjet7FXoypidwlRl20udhWfUffOM8LEtW3Vh1VZ+xr2s7lAK3YOlKeXZ6K4yy/06EbixdFnAe0r8Y5t2X7tRY6LJ8uUlSPSi2bJx0aJUYmRoaMiZGJkTpm6DD6xvnEyCMDI3fkgqsVVC7w+DM7ghop/rPyl4FOfF5ZWZkL2lZgMY8uwFj53J/51zF5y00roHgcdphop1U71S2TAgbLxW1cxUUrZZrwmd+oPLmxNQnzmGy7GEO3yozH47n96apX59DBnwYz61srdy0e3dhuHN4G4Ko+3CcqS+ETnNh4QaT+qDZ04K6AGomfx2cbMtjwG33ivy7QeDGi8akLO9VnEFcqdRGjCdWBnCZCZyP1MZY/qvO6IGA9a2XdJWJdJIQtWAfL/JTbqd+y/K5SyrxprDKvqkcdg3Mb24F1qy8nUJ0kHTwlRiZGJkYmRiZG7m6MLIcKpKWUGreUgzjxqKCsCHUaDXANHK0UaHLUufj4YDCYS1r82lW+CuaEGgrnfbga5JwsnSzqmHqeeVCHaH3WIIjjOoex1ULyaunSkQtwtZFLfvH7FsxzbEVQYqBhWdUPlE+2m+pME3/YMs5H0tOHaznxORnZxg4A+4AqqLXI6bO/UvDqEjTPw3MEMMZDqLzg4DbMI88XvKi8Gg9sQ+XPJUunK61MBUVcjkajBRB2IOFk4LnCxppUOV8pn61FgVbe2QbcX3Mg6yk+K4/xWSuh3Jb50jlaOnGLHVdtHY/Hn6i13nRhgCRLiZGJkYmRiZGJkbsDI6c50CaIHdtSyAHLFYVgiBXLitAEywpTRS4LkviuyTYMw1UJdhbl3yVdt//WORcnH+7fCh5nVAUKbs+Ap+Np8nG6dv10HpbPXeUz6S1pBXKtDAVPq6urM175P8vBY7D9OQEPh8NZlYl1x/rXqqMGWACJ+qT6ZotirNhL7oCHx9MFQQsgdHHlFko8nvqUiy0dP6pb/FYhXgRwHLO+FLT5e8QaJ9awEbfl/goWumh0yX/Zq3S1v0uWGrsuSeuihftpruI3gGk7XaiqvtgvXQ5RmVgejlXlS3NcLKaYP26n+YNjMOngKTFyc67EyM3PiZGJkYmRuwMjd3RLYTDMTDJz8dkl4TimVQa9CmXHYgdkMFAgUaOoAtXwTK4axMlNFc86URDpS9xaMeCxHWCpPvm4BiJTHwDxfwcefYDCjqzyq0zKBycxYL5iEMHWGlMrbqGPVrXVUQCOyt8HJi5JcJLl6rAmRF2oOAq+AyzjmFvQhJzqF47vPh1wktfFiEv6KpfmgTgXANtKsi1ds05dQo1FnuODY6EVD8xLqz+w+GC0W6SxPYJnl+BdPIYfBwDHwrUV9xoDrp3eDWBe+SH4rcR5X05I2holRiZGJkYmRiZGHvkY2YeT/hVI2yRWNCc2ZUiZ4e+8v3ZZIDihWkJygGg7TVTxX694WZnqQKroMGLsjw25NNkukzGcUdvH8ZhLr6o58UQwhvO76gYnKHXQ6Mu/Su906ABKjztwVEBWmUMeltnNyUmcE0+rusp6ir3jLdBfdox50ucStE0kWAauZTpxQBKf+S1iqntOImq3lkzO19mHGag18XJFWR+G1vGXASj7AY/v9M+88OfYShF6Z3m5csUyudylABg65i1iYYs4r3cJeBHM1XCeU3nlObk6rTrgNiyjto0tJQzOnM94Dh5D3/SVdHCUGJkYyfpNjEyMVEqMPPIx8pARVJlX47iEGJ9d0LFDuyTeUm60c4DERmeF6bjKV8jC7bkqw2OzLlrBrcHM1amWsRQ4Odm0dBXfOWhcgm+BggKMJjmWm89zoLjEz/bRiuPGxsbcb3joNg2etyWz6kd50UCNRMFvOXL20T/Vc9hOH2pWe+tn5xMKkgEYkaQ1ubH/BGkFnNsp76xT1aNWRuO4gkLIzkbi9KcAACAASURBVLHBNuZ5NXadPqOP+oCzTWuxpPLHeLxodfMyvyyvytXKEzGm6lhlYbs4X2KeWJYWWHCeYoDSCrbGM+cQXnzFMc4huiBO2holRiZGJkYmRiZGJkbuSMkyHuzUBM4Jm5PAbHK5padjAFjo16oOAlhwclYgK5uDXoOFDRt93L525wjq3C6hqUPxXOpoqhPm1yUHdaRSyuy3Obg/66ulD5aJ5+eqiOqN9cTbP5Q0uateXLDzWMoDy6f+ocHCc6l/qh9GkmRdRrVP7RWfGRBZh/oGIJ7XycoyMuDG/udoo37i4sL5B8sffzGna8Pfl1VTWfc6v+pZK4bcn9tyFanFh/obgzDrUWMkjkU8uAWL2pNtqbrVnKa89bXjhN+qyLkFi86vudjx4XTi5tSFQ9L2KTEyMTIxMjGS+WD7JkYeORjZimlgBy64IlCcQTm5OmpdoYcyNWEvU7IjHov7sNM453XzRXJx55Und0tS+Yr/LhkGqWPoHEq6L5rBTeWNMfXBSqVWQLmg4/OsE8cD64iTAwe7kxtYfMvXMuDSeVQOZzfVX6vyw2PxOQYOHc8lVh2T+enzMbUVb13gBMm61N864fNa5eSxo61baLRkcMma5dMc4WJGFx6uWsZjK0i5MeMY5ymt7qvcAeour3EstLaGcGywP+hdDpanb0HaWjS1ZOmrnDJ/zLsuNpO2R4mRiZF8js8nRiZGxvfEyM02hzNG9tGOlitbSVMTNIA5weO4qxwEKajomNoXWNyrHZ9Voc7oYUSuonB7bsNztuR2Y2tSaCVtTdR6+5a/O107wHJgyqTJ3tm2pbdWdYfBRGVo7dlWEGDduYBd9tmBgVucxPjxmefnPnGMK2rRnve9tyqeOqfGBx/Xh5Z5YRNzuGqW+qjGg+7XVl0rYOsiUedTWfoqhn2AqnHQ0pFbKPHcyjv/6VYTNyefYz07Up5cX+fL2s7psmXXOMZy8Tzhq1ztdTK3AFTbJx08JUa25XZjJ0ZuUmJkYqTaJTFyXpeXdYzcsdfC8+RhQD4exMbjBKptOaG4OYKcI/YlEZdA+VjMqVfz6iT81iE3px5z+nJO4Y6rHlSHgH9wUUGmFQBKk8nmQ7iuGqg8x/zu9nifDpYFsXNgBSSWmwHSBYH6Avfnt9Iw7zw+99XkxAGviZbnDT25hQ7L4Kp1PF4pZWELhkuufbpnvl28qmwqvxu/T+9KrmqnCw0HBG5u3crEIMKvoV0Wqy1gZOBx7VnO8KtWfDt9OWBTnl3e4+Ouustz1rr5+mHHu+NN40wXgUlbp8TIxMjEyMTIxMgjGyP7aMdemuEmYuWyI7h+HBA8VijY3erksVu3JzUQ1BmCuFIVgczGiOPBS99tb/eZqz0a+My7JlOXUJ1M7pyzRXzWIHaOpcDhKkNOdrWzJlq1J/tJi2f+rr7E59SvIrHoMU76cZ6TBW85iD7BX18SVN8NvanNVS61Kfujjhdz6vYNp5c+/uKcq3zHOK2k6BYMreTE86qP8/hu+0dLPzqPjss2Zt3x1qCWXA6c+ZzjQbeCtMCWY0uTtG5LUKBzPq5zOr0on84XWu0d70nbo8TIxMjEyMRIPsa8qL4SI49cjNyRZ7hYEK46aED03WJUJ3CJoiWMc3KXBNjommDcWK3g5DF1ruDBfXbzRZuW8+m8zvE5aJhnBTrHt55XwHUJ0AG+qyjyeZWXnVfbu0TK/CqPTgb+7ObneVwy7PseY3JlM3TA4KS+qA+1qj412FuJxVUUlyXGUua3ckR79/skzLv6QZ9+mNwWo1bSDzu4BSHbiKtiyrPTqdtHHrHfijWeqyVna8HGVbFWnIZe2MdV78pDy19iTleN43Yazy4fLZNxWW5KalNiZGJkYmRipFJi5O7DyB29w8UOH5NrYog2rFRg8wFFvYIPJ9Pg4vOt49tppwmI23KljmVoOZ4m2iAegxOL47EPyFi/LRByAOfk1bk42BSsHGgD3sY8brThxOEAw8nKetGA1HnU57RdH1gooDuQ4fnjj3WnW0ZYPvUd9pvWwsYdc4CtPCixz/UlQRfD/J3Hc4sBrdLF/2VbB9TOyjd/V1voYoL/8xuleAHB86pvqc0dkGqsKHD27V/XMVR/Tm7VV3x2C1rnqy1S2V2OZP62ut0qaZ4SIxMjEyMTI2OcxMjdi5E7docrJmPDuS0FfGtak5g6hgKLngcWA5wNolUUl/A4sDUhMf96S52Jx28ZVZMvkzqFBin/Zx1zYDge+DjLxaSyRzC0wAyYfxBV9apBGf91+4kmMJdIVW/RzyVonS8+q2+5W/LRRh9K5jm4Lf8QZBD7WitZqV2cHzuw0PG5/WAw6LUHH4tX5vKYDIo6L4MGJ0i3LcQBu0vMqrNSFhcqrkIXMuqbo4LcvCoH86p2cXESY7kk35JTx2npQfU3mcy/USza69wt3+zTu8u/erx1jPu14jSpnxIjEyMTIxMjEyN3B0b20Y6ULMPYnMydkHxLmc+3glirZg6kol+c56qQzsM8AFhQkCZTnjM+89j6o3d9SY3nG4/HC0Ed7Xg7h4Kt8qHJQbeCqFxODk4oyi8HEo+vcvIeYLavA9dWsPO8LSd3wc26V8DXBYXyxQkmfpSQx4ngZnso8Ib9XHJS8HOV3IgZ1hvzrGOzDhUcOSZ420XwoLGp/uD05Krp+tpnNwbbpLVYUKByIBzfdSuCi69o70Av5OC3YvGChtuoXpwNGMQdvy1gc/I7v9H4VNkd4DJvnD9b2ziUTx3LyaMgnrQ1SoxMjIzziZGJkYmRRy5G9tGOvKWQg7J1Lj6rQC0A50B0zq6JIZTnEhjzoUmJDcDJIo5FsPOP6WkFbzBYfFOR+90VbqdyayBwUuNKooKovga1pWsGIDe3OqRzzBhXF14O8FUu1RvzyyAYPOhWAtabJudIEhpoKp/qhxc/8WOMLM94PF6Yh+3hkh1TABT7sdolkpKrZvMcfXYLMAxeYyxdWLGPu7hzyYRjiiuwnPxD96oDrRSpLh2AMS+tOOLzLkHqnG5Bw0DOsnM7tpfSxsaGfYDX2Vd5DN25vkyqszjWV113Mte6ufDtA/Ug9h8+lnTwlBiZGJkYmRiZGLm7MfJieS28Jvw4zw7EytZqmiYynYPb6Rzs/OpsPD+THuOg0XECENgZ9XWpkeQ5ACM5OZnUaRkcmR8HWvrZOTMnSdfGOabTu/ZXnTmg5zFc0Gvwq+NrhaXFs0vCGlAMjA4QOFBVp5o4uXLrKHhX2+mrklU+9zn8SfsxgLqFEcvMMnA1yCUq9jvlQyvnAOZ0Ebpp+Z3q1cns+GefYHlaiw83Z6tq73zdAVjYsjW2Vs1VFs0lDBYK4i6Hxlzq+7robi0cWvpXWVt2Szo0SoxMjEyM3KTEyMRIleVIx8hDvuAqpcyqFOHwbGB1YBXCgYo6sSa+mIMrATyXggiPpUlWFcpGUEAJYqDgfiE//6nj9iX7OB/zqi5Z56xHDjx2NE5ELBPrTZOk0mQywXA4nFWyXPJ3+lM5WkmAiSt0zl90HlfdZZ0oMGjC0QSgPqXjaiLlz3yM/YYfSlUfDR6CmN/4rImZda5xwgsVBmJ+2F51pHrluV27VvWNq1mtBYBb3MWYXMFVG/JcTkcsm8sFLdKEzFsp9DyP7cZRPUbfiBu36FEAbYEL67gFMtHPgaADZ64Wc46Ivs7HWc9JW6fEyMRIp7/EyMTIxMgjDyP79Llj6MmKnkwmC79/EMSGdpWAlrK0yhdtgcUrfz7GClMHc0mIz7uxFQyYZw1avp3K8yg/qkf9zHwrYDjeoo8mrOBTA5p1zkkoSJO2yqIByP14m4nO62RRfmJMrkIB89sbXPJyuuexA0jCZsPh0OogZGC7cvU2iMFG9R99OIDZxgE2qjsFYCbVN/u9LrKcTtmWLLN7TSzzqraPtgyWqm8HyrpI4jYbGxtzW1UcKMU59W3lXRcBHAM6P8vP/V3+cP2D1Pdan1s/yun0HW2Dd44dfSCcF7LxneeJmBwOh7NFMINNtOE7BayjpIOjxMjESNVXYmRiZGLkkYWRzi+CdmRLIf9QWkzqbtfFOWWQSa9ANUG45B7n1VD8i9FRYYzzzglcEtOE3JKXZY3kwGPomM4wTjeufQvsVH98jG87h+6UYq5IrDoHO1volHWjAaFVTg3SSBgqvwN+5SPG5yAL0kqZ9i9l86FW9tM4xttaeH5OnGwb1g3PpQDPi6AIaJWR5VLQ6fPb4I9l4s8si1aCuW3MET7Dizgdm2XVhQ8nJ52X36akCZYr3py0V1ZWZtuSOKbcgkuPsf1C57yQ0HH4TwHIkcY4LzZ4HPVJfREC+8lWwDz+u6pexFefLph39S/ND8uAJKmfEiMTI+N8YiTmvidGJkbuFozcsWe4ggF9i00wHAwB807PwraSpho2junDgqxMNQjzpElJ5wK6ao4LIg6Q+OMEHYmpj1o88nnlx4GAc27tx4lV9a5JezAYYDwezxKrJhqVrZXwnH5dMLJNGIhZz9FOgSr07pKJs6vSeDyeq3qEbLFoiX76PED4nXvNLI/DMkVyZh65WsikvqP2jvFiPrdvXRdkmsg0ebF+OdnyQkBlY1srCGtidwDYWqy45Bb+qjEZ/GoCjDE4H/H8bmHjbKm61yq2Lk50caSg5HTAPq88OH1wf40dtRUvpt24MYabm/XaiqGk7VNiZGJkYmRiZGLk5ri7CSOLS2bbocFgUIfD+es2rnIsTEiMta4UJ5PJXAWJ/7vk4ebgtqoUdfZWIuIKhjpFkCYubqfgxX3C8PxaVU34y5yxlZwj0TKQaL9WEnZAzvpknYSO3Nh6q1Xf0sP60WqS8sXnXLJVX4ugdv7Hfbj6F4knbBK3i1v79nURo4mT5+Jg5wDXxOLs5OTWpAZgAVhbi6m+GGKAZburfK3KDi8IVHcMVjym8ze2qavwOp1rjLjqlBJX2VQGt5DSvMFjsC+p3VmP4XcxF4O2W1yrvp392AZM6k88X6sPjxdjsBwAMB6PP1FrvantmLRAiZGJkYmRiZGJkbsDI6dbee0V2I5ecCmTqhgnIDsfGy/aal91jj5Q4ePsDFzxiPmCj1Aet+ekx4ZdWVnBaDRqJnh19uirFU51zFYS1IQfPPA2EJ07gtG9GpfBlp3M2UsTX+jFOTDPr8e1jQNcFywho1YVo50D/DiuyUBlDAo/VP1zYEe7OM4+EdUllUUBVBNdKxG29AdgYUwGcE1g0T8SmdqgVW1mXlUXGhNaSV9Gqjsno9qIY4b9khcpLH9LTo5JYN6ntMqq/qkLIOercVzzlSbm1gJOfSLm69MrAznr170Jjv3YAZ4uZLVfXnBtjxIjEyOZEiMTIxMjj1yM7LvgOuSXZmiAuiDlc055+hDeZDKxDh4PsMUYzqA6NgdunNPEFO1Go9HcG1MUYJg/dgyVi2XgByxblbRoH/y0gjJ44Fv1zC+DIAMYt1eg1sTHFYRor4HEdoux4r8LFE4CwX+M6x48ZH5i7NXV1Tk7s9whH+uuVTl1cwSxPC3w4WSicjkbx4PL6ustO7NPse+GXTjR8PMB+hspsWBx8eBi0cnJdmmNE37Hx/mcA0GueKnelA/VC7fXh5f1f/DC8ccyuFjQGHB88TG2kcrN8RDzhu9rbCsA8p/LhTyX+mX8sf4VLB2/3I75Yn/aykIhaZ4SIxMjEyM32yRGJkbuVozckbcUsrJKKTYoOSGxMUMJfMsyiIMoBGIBNWmxofgKVdvqeE55ratlDiB2PnYuVnrLObUKpcDW0jPzrfpkfp3jMEiz44Y8GjjMS60Vo9HIVnP4s3uDjbZj3lWvPAZvZYi/aBNVCZc0NcFxZYl16b6rDlvHmJ8gfeCc/YD9Mb5rctREX0q3JxuYByUX+LoA4M/On12FOOZTIOP/QQwKfMzpNRYMGpO82FF9cnLVcVluB+CsT27PeuUFTfTRRZHK4cYB5p/T4Dbsr5of3DgODGNM5VHBQccNPSvYqh21UsftnD5auSmpnxIjEyPjc2JkYqSOmRh55GCki+OZjvpOboUGg0FdXV2dCchXjqwIFp6dhitp7PSRhFvjBTH/7tZrGNVVIxhk5pQiAcLG4fN9yUaBUW89uySjySLGCx41STojq76VeByVP47x9heVR+3A+olxGSjjT18py0lL9Rc2Y73HMX7bDMujPLRsxvbQwHPbTngO1jPzyonD2VcXFvFmHF706LxsG31TUIBp7AGPLR6txOT8vBX3LX9SP+RFiluoKZ/s5+zHbDutasYx9oXghfOIJj3lgW2oulJfU9258fi4ApbaUXXI7fv8mOXXZyRczLhFCp93ecGBphLrKvxwY2MjtxRugxIjEyMTIxMjEyN3B0ZOLy5tpx3ZUqiG48DigItzwOZtY3Yul6xYUcvmj+/8n0HMJam+4GG+gflb2Ty/qwK2ErUbl53SObV+Vwd0zqWg03Ic5SV45dujLtkzyLPN9DWebAOeZzwe925P0AogPzjNsusCQW/FOz1wZYf17xYWTFwpjPauMuuAROdVH1Swjnn0mQVOGvGcRV8yDPm4isdy82f2Qx1Tfdkt8py9VU8cL5PJZPbGL1cVVkDUsdQXYtHodBFtuUqo1WqNK10gOLDg3OT8x+lYgUOBio/FItQtPpaNq9SXRx2xjrSSmLR1SoxMjEyMTIxMjNwdGNlHO/pa+HBsJpfE1ADRjo9pclEn4bYMGC7xOcM50mDmIGYeHfC0ZODP7m0+rQqEytj32T3kCbQfWtSg4fF4zGUgpMDvEiKPxf04meo8GqwuSce5VrBxcDEvwU9f0DlwccDKCYqBrbU4jXG1euQWMKoXrv4GiHDS48WK6sHxy/rXSq6zu+MxiPu7GOB5FYD65nPArMeVXAyHvlwllfe4t5KtxnFLn335z/XTz9zHLcZCltYCUPWj/blyyXy7/KPAFHK1YiZpa5QYmRiZGJkYyX0SI48sjOy78DrkO1wtY/N5dhhO+uH0TuFMTnnKg7uKdgmKxwpeXCAF8ULD8RjfW47EPLlADB6UR5ZDxwra2NiY7Vt2DsT/VV+sA8cTz8320j/mjXWhxE7JSVDn6tMvgwjPw/ywvRiolWetDvL8XFVmnQUxiDFPPJdW5ZzuXRVR/zO/+jparVxy++AxyPlYH0AocLItVEYlTtyhT632cl/93vJnBgWNkb4E7fyGt+CwL2ocqF5bOmPf6wM8tidXZFUmXjyw7tUeGjsuF+q8Lb31UV/+TeqnxMjEyMTIxEimxMjdiZFLL7hKKbcopVx1qxMFs6zoVjVD+7uEz4bWc3E+jrVug7aM20qICi4a3PHHD6xGX74dq/Nz9SWOq6O3QM0FdisIXDLS7zyHOu1kMr9dQl8RrHK29KhgxPZRHhjYdS7+r7JoILlkqeNwEuf2ehteFx7xnx8AZTladnT8Rxttr8SgoLYL3TpAaulJ/cEBJ+tMFwZqV/YFB6TOFznGeFyNXwckHOua+FXPLpbic1SyXMVO81GQ6lrzDIOV8z0mzhmab5h//qw6cbHtQE7bxB/n6rCbvv53mU52O20FH4HEyMTIxMjEyMTI3YCRfbR0S2Gt9eNLR5EJNbEw6TmnxL6x3Vh6G5QD280X313As0G0rSYsTgZ9TsHzsnGZbzYe928FoQMXx3+fbjkB8LwtJ9W5WDf6mdu2FhEMpKGL1u3Y1q3s4FH50iTdup3MvDEwx6105lX3vLuqC+uRx1Xw0gq2I92yEnvV++za8uE4FzK6H5d0vtVajIQM2i/k0dc3q15cfDA4abxpPGhc88LHje/aho11Hv7MoOXiTvXr4k/n5346Z0uuvhhiX1Rd8ziuqtfSj+uzFUDZbbQdfJy2B5AYmRg5z1diZGKk6iUx8sjDyB1/LXzruLZhQVvH+pKUtgf8A5l6xR39HJ/8PcZ2jqEJq3Ve52Mn1S0IwSPLEI6scqjczLcmXb0CV1BoARU7lP7QoZ53/DhADhm4eqn6iz9OKNxWE2BfUDJvfRUq/uwqjOq/rAutnuocLRDk7RYa+EG8NUB1ybzxPNyHdRbf4zP/1on+GKTqxenJyRoUx+N3YHihFLwq3zrespjVCm+LFx2X5+67g9NXteRxl+WJvvhQefp+nFUrmnq+9Z2PKbj0ycGycJ5LOjhKjEyMTIxMjAxKjNydGLljL81QJp2Q6uRBWsngJBJjukBi5XA/Hp+VwoksEocmAk7kPEbcVtYk2kreeuvd6UhBhckFxzJwZYr59dfN1aE0ibeShuokkhYDnsraAgRNjly1acmviwauzjHQulcnt4C3BYBOB8q/nnNjtRZRrXYKfDyP+p4b182hPHC8KO9hY1cF0rYxn/ISMmhlb9l47GM6hy4g3LhBrdcWA/NVVPVB5cXp0VHLntHH8a4AwZXi0B3rV8d2ixeNa3fnwsUBV6DVp3iBknTolBi5KQuQGJkYmRipfPWNlxh5+GPkjl1wRYDrvtFgrhXQwKaS9PYzt3NAwWPorX+nUFZWyzk0+XESZefgJMr9dD4GyJbczI9eKfMYqo+WTlh+HU8BVJOe8qdy61jxXfnlMRko3IJBdQwsLjDClupfDsSYv9Yig8/HZ2eXvgSjsul51oMCpgteJZfoeFxd4LAuOOlobKmNHNjw+KozAHabQa2bP6zK8nFSaumzlWyd/6tuWZ98zOlDSdv06T70yNtoHLHeNVb5T+OOK8dONse/+pHLvxqvHE+tXKILK37rV9LBUWJkYqTylhiZGMl9EiMPf4zsox274NJqlXNmDfSgaKuvZAyhdCw9zuPEsb5KkOOvFdAOCDRAnSw8PsuiPKicqifWr8rKPLrb8nrLn3lY1j8qncELO6I6nwMDto0LIB1T+yivoWvdV90H0uojCoiDwWDhQdOYi+VQQHI2VrljfzbPqT92yTJoondAwElA5YiFDS+iNHkFD6EL5YHt68DO8RTJtdbNH+oEMDvu4qAv3lgetUOQ2pHlYH91PsBjOX/j+GuBC7dvndM5XA5x7Vk/6rv6fIT6+3b4U5nZB4HF1+HmXa5Dp8TIxfOJkYmRiZGJkbsFIw/5Ga6YkAVVcg7F/V1C18qQBgUHD//x23H0vHMiTbIAFh5m5Fus0SeCm/vzbXsem/WketH+LX1o4LPRHQDxeMEXf2+BsM7HjsRJy/HqAp/7hY8wucSgsnJi5h9W1DlirKgg6bjqXyrvMt3zd+7vkpi257aaZHVODeioFrnfqGGfd/4Uc8Z4umjgH40MXatcQRoPnPSHw+FCe60c8xj8XXXA/sn27gOh4CvaKvA6/bo4cHHGf3yXgvXGvPPiKsbSKjHbVgGaY08X6cxbC2g1B/OcbJeW7lrzsO6Stk6JkYmRiZGJkYmRuwMj+2hHXprhBGClcBJwzh7Jgh9SbCUAJ5wLfq6GKDlQY0dS4An+dAwG0WjL43NSYMM4x1be1IFd0mZndDzr+Jok2MG1gjccDhd0O5lMMBwO7dgcfOqYEXwxH9sYmK9qcaDF9xgzEpbauqXDIFfJbQU9gJkfht9GO060WkFsLQy0qsn7kBXY3QKHdRTn1B8Y2BS41IbhLwG4al/n77x4aCWXGJcrQTG3JuhWTLqFGOcCF9MrKysLixTVn9qhNb/qS8GY2/LbsNzCxOlOY0P9L+SJMfQ3Vdi2qi9HMX5rgaNx6kCWF9JbqeAleUqM3ByDx0+MTIxMjEyM3C0YuWNbCoN5TgpskGXEjsgO6K40NSmqw/Jn/i2BCBbnKBqM7HThLOPxeC6oV1ZW5qqFeuud+XGJYtmWDpZXkxG35zckMf+6tcCBM7DpPJwAuTLJOlP9cLJ3Acs61B8h1O0Sal8NoNA386OJhfURfEXCUTu0dB7+wok3xm0tGFq8u7l4W4G21fHUf7iPJmddEPDYoa/QA1cQQ7YYI+bleTgZ6lYQtR/riZMR66AvBhUMONY4cXO1SxM6J3n1Rda1ArHqQHkKYt9VudjnVS5dJKmtol/YSHXnQIFzp6PxeDy3mGvJxPmilU9dPk7aOiVGJkZye+YzMTIxMjHy8MfIPjrkCy4GYTdpMBOMcjJc9pBZi3mXkLXiE4HV5yhMatggddJoG0DF88RnTvwKTK2kweMqTxrYrAdOHurY0Sf0r7fbmV83L5O+9nZjY2OhwqcgoosJBeeWnMp7jDUajQAAq6urC+NqImEAYp5Yhy7Zc9BxxY5BNj5rFZN5ZX70t0K4SqZVQAWWsHEr8DUp60JMgcgtKPriVm0HzPtZAK/z69a4cU71FfrlRR+DUvRZXV3FeDy2sczytYBMK2T6gG8LwJ0Mbh7lmfXGMcQLLM6hautWHnS+EmMF6DMPfYDUN5cuXJO2R4mRiZGJkYmRiZG7AyNbOgF2aEuhS0aRaOO8Bi4nGzZGJGVWEjsYC6ROE07N1RceV5NyBKm7Zan77RW4gly1hG9xcuVQ5XTy8rjOiH0A4/TK8rnEpAHAtlKbcaUygCT41zlDplbycAHLwBY24CALYIhE4hyfeXdBonJqtVV9xiVf3S/Pc6h9hsPhbI7V1dW57S08Pid+vsUd/3lREHxq/xYYqOy68In+DG7sw+ofWhVTCvuxHdUPNMlyX5afKY5peye3Hmd/idw0HA7nkrn2ieMa4+zr3I/Hb1GrOsa61piIWAvS8zwOH3eLCgV7Z98WoPTZPKmfEiM3KTEyMZLnSYxMjGQ6kjGyaBLbLpVS6urq6hxjOqkKGkHBTg34W4/8v+XcMaZTXrTR6h73cYmGHVaTspKTI8bRsXlMraYFAPYlSR432ruExAmbx2Ubqe0VrHmulq5a7Xlft0vqzjG1auOqTywDA7aTo1XNZH1z/6hQhm3iWGxv0AQddo+x+DwH+fr6+hz4RhuuIrV+0I8TFsvNPPSRS1Iqoy4eeEz2USd7fOZq4jIg19jkMTkhKxiwzZhayY/nUn9luzPPWqFVmZmHrcRS+LJW+ALI2O4xN4OmrlREAgAAIABJREFUxhPzqTlPbdSS351z+nG2HwwGWF9f/0St9aYLSk+ylBiZGJkYmRiZGLk7MHJ6sWqvug75gmswGNRwJL6qZ0OoEXifqSZgFTb6RRutckQ7Djydj9soaULSdn1ApwDpDKZJlasGEdAtvngcTWR8jB1CHW4wGMx+zXyrCUlBJ+RzVRvniKqDFsi32vH8KrcLBNWjjq+V1Dim1SAH5uoL7L+sF15MxPiRrLmS7HTkAl79kZO3yu9syIko2vODs+PxuOnDnFBZ725M5Y91oHKx7hmAXLIHFn/7ydmsz99ai6MYi2VTvbE+lNwiiNvqPm83htquBaJuO4cbl6udHLNqo2XyqM2db45Go7zg2gYlRsL2S4xMjEyMTIw80jByenfZXnAd8jNcmvDZWBqoQXFV6JKSJi8noBo/2nB/9wBhjMEJTedgZ+UA1qSmyZ4TOANb7KVm59FqIFc9OGFrEtZA1uAOoGJZOXAd3ypD/Ocr960ARt/xkIXtoclN2zJpZYzH5+SnyUeTSszNb7bhcaLNYDDAaDSae3CXkyW/3tUtXEqZfwU0BzXHgbNxHI+HODlOWgsW9jfd7sG6CwDhSqJSyOniQROx+iv7iXu4VPluxRbzov10MaILnxYga4Jt5ZJWwlV9K//s186v3Fi8gG4tKJYBuPLjfEZzs7O7yseyBC9uIZi0nBIjEyOXHU+MTIxUG8c4iZGbsh7uGLnjbykE5m87M7EitELAY7jKFYCFBBCkgcufteLHAcDA03JSlS36u+CKz5okWB6XTLTi4drGMXUidTStLEXy4AqAAiy31XG46sh9XaAqf60KiSaFllxsM3V0V6VloGC54jxXrFh+lmdlZQXr6+sLld+wYwuMeTxdkDg7KXiEnzLgtXQ1mXT74tnvtV+Q8lxKWagGhX+o77H+HRDyA/FaVeM51HdYf24h46qHXLmLN6HxVgP1HwcgjlS3vIVE8wwDBduT//PcDgRYDrZd32JJeeV5NMeGX7jtN6znlozBE/PBC4wWOCZtjRIjEyMTIxMjgcTIIxUj+2hHnuFaW1tbSPJsSGDzAVJ+ZakLaq3AhEBBGsxaCXNJsi9psXFCYRo0jjTJqQHU4MwfO5PKH/01WXN7dVJNBk5/LfDQJOj0pe0d6GpAaNJo6XvZ3Jz8eFtOJBQXLI5ntY2zl/LL57XS1LfgiWQ4GHQP2Y7HY2xsbGB1dXWuwsZ6Cl7ZB52v6kKGx2BQUX2wDNE2xoyHlqMCumzxpgslx6fqToFex1S9aFJWu/IY6tssbyyKdDGi/4HNRaX6hsqtczmdqC76co7rp/M5P3ALFvUnN17Y2Y2tvOvCtJSSz3BtkxIjEyMTIxMjEyN3B0ZerFsKnXHcea2o6H9Nyi4wNOi5P3/mxMRJySXVIDYqz8dVG2cwDno2WnxnY0TQsh64GtaqDLHsLpC1X9zm572urmKnc+h8EeD62leVT51ZAZB54MUE64u3Ujjg5LFcMnX9WCZdJKgcIWM8AKzVCl7ksE87vpjfSJD8ml6n7+jL21QCNMOeLaCPqmT4oiYptl3ox52L81pJdL7HiyQH2iwbg4iLgZiTk7mzoY7tco/GYAC6LoI0rwQPTk+OB5fruE9LH24Mltct1HQul3P0DgDnFY1d9pMYw1VvFaiSDo4SIxMjEyMTIxMj58fdjRh5yK+Fd4mFA9EpNBxVr6o56LWaw/1Vsa19zlol287CQR0sxghgqXXz1jQ7IAel8s/JpBWIzK+TO6or+vslLpBaPx7Ic/FrZSPxRaWVgT0Src6puuGEo7KoLjiphFytSksrseiCQoNE+7QSACdzrkhxslM7Mc8aA2pPnU+J+RsOhzM+OBGvrq7OdMt77NkPmQ9dGPDf6urq3JYMtbGCact3VB61lyb8kCXm0vEd0KhNWc/OjqyHeJaAeeExGUDVt1kH3J6TcR/guUWOtlMZdHzmm/nTKhvrFdiMJ81BLX7ZXxxfLhcnbY0SIxMjWTeJkYmRiZHzMu0WjNyR3+FyDIWRVClOQL7yDMajvQYKEztHzM3AwueB/nf/a/A5cs6txFUTBhU1FCcwHpvHYN6iPW8d4P3JnOh5Dg4WlU/l5PFUbnZyBtU4r6DrQCDGUWDkMZ1vsC/FOOwXnFB5QcJ6VV60UsN+ybZROwZPqn9OTMy76jd4dwlLdamVZLaLJuLBYDCXOJVUPqWWvXTuWMho8mJ5lQe3sIjj7FcuebtFAVe6VUYnl4Jg6Inn5MUjLxYdQCjgqZ/EOCyH2pJjQKuzum0kFnzMY5xj3nlPuv6WDes/9McVO7fA1yofz510cJQYmRjJcydGJkbG8cTIIwMjne8E7ciWQiekS57cx906DqGVYVYIO4WSztMKGuWhbz515Pivb5jhcdWAGkgsi+unzg3M3yJ1wMOOpQCuSZ77Kvi3ko2O5eZk/YVD87mWPTggnY50EcLfuVq2bBHAnzUp6duhmB99DSm/Qlj/oj+/ZUxtoHpXG6pOXaUqxgg9s/14fG4fFWOnk4gHTrK8EGF5WU/qTyEPy6L8awwqcHKCC9u4eZ29o40bL8gtKtlGrAeWgXXEeonPvJBlmfnh+mjb8munV20bfLjfIwo+9WFpllOr7H0xr/m2FWNJbUqMTIxMjEyMjHaJkfN8xHy7ASN3ZEsh/3fn3HdlyjlHBIabK4K8jx+uBrmE6hSkjszfuR0bRKtOmkBcEub5mFd1MCdvOIsmkWijwczyqK70Tx2a/7hCqJW3Fgi07BPJzQGytg+ZtNrJc2giYOoDGa2qhK5acrXG0Soc8+Qq2S6pK68Myuwn3K712yVObq0usZ/EsZYd1NYOtFqLBrdwZPncwor5Cnu4hYMmyzjW0kXw7WRlsHSytHJd+JFWAnVcp9OWH2s/jj/WG8vGvqJyA/OVSSeb+8667wOSpDYlRiZGJkYmRvJ8iZG7EyN3ZEuhKiWMHgK75Bl93C1SHZv/83xuzPiu/bkdn1OjqAFZDq2yAPOvrFT+XfLQuRQwFFR4bk36vI2ilM3bwOw0raBgkObKhF7Zqz3VNqorZ0sFb+af5+B+bouE8t8KrDivt3sVHFQO/n0OXQy4hKe6cODhki4Aq2e1h8oXn/l2vtOR++6ShbbRiqP6oiOWNW77q8/rnC6muU0c1y0RrbsDfD54VqBkcvM732A/ZXlbecr5SZxT/+vjqRVrGi8ud4UeNL/oHNy2FbOtfJm0fUqMTIxMjEyMTIw88jGyj3bsLYWc2Dg4NBEzc8tudTujqXCcCLUv/1dyQaKJnXls8RM8uCoj64dlVKfjhB392SFUFpcU3HhcodmKfl0Cd+c12PqAQx1aba6220qwqW5DVnfrl4HQAQmPo69bZnmdrzhgcf4Rt7VblUUNfAYhfmBb5+Fb9E4m5lP1F8Ch/g1s3t7vk5cXP+r/qjfVc4vXFr+uSqgVwT7Ac3M5O6icOp/LFUGawFvAwn3dQgFYfJtS/O/bAqJ5mH1+mV7c+byjtXOUGJkYmRiZGJkYmRi5Yy/NaBmjlUjZ+SL58XcNLv7fomjPrw3VxO7au3OtZOiCtJVU4rsmS9eOx+EHEZeBYTiQ48Nd4btKo+Pf6cLx4PTVAkHlvbUAcPO7hM2+FJXLqHDog+F8rgUMyrfqVcfjduz3LphV325+rdoFuKkvt/hn0OFkEHrmB2FV3y0fcMd5oRXjhx0YZNwigWPd6cuBm9qNbck2Uftsxb806bPd3KIw+I2xnSy8sNZ5mP+Wv7tqeSu3MG8xp/Mv1iHPo+36FsV6LGl7lBiZGKnyJEYmRrpxEiMPX4zsox274GKG47szijLHzsWCcLVPjaL9OGA5kahj9t22doAW5xmcuCKmFbLgRQGU+dWk06o+sl5bQcn/45x7awzzpceUZ/5jHmMu56g6vm674L58G5uThf4ORPiSBjDrwy0YYi6eb5n/hYzBd4wfn9nPaq1ze4BZpviuvhBjhVyOT07AHEuuwu2SdgswOSZK2XyugPurLfuSrtNhkCZWBW7mW/Wjfqd24xjv49EtFljPjli3LuHz3MqHm8MBVGvhpf2VT7ZJa1Gh43O8xhz6GyKt+fW48tLSYdJySoxMjEyMTIzkcRIjdxdGHvKWQsdUkLuS5HZxjt/U4xJ7kCpWlRPHRqPRnEO7dpE8nGNrsuNgbiV+dbJIOpzcow0nVAYrbqe3/2Ps2EMd5zgJOp40GbIeHAjG2Gon5Z95CBn4nIKwo77FRhxnXtx+bL5dz7pSIOL/TK3b3MyH49fZWwHKgVzwynrVfpwAWMdsRwadIF6YMLCwnzOAx+fRaDQ75uI15uFYZaBVf+I4cbrXfKGgwvJwHPW14/mVHChxfDlq8ag2Go/Hs9fWjsfjGb9uHI03/s85T/OEtlee+btu+Vnmy0xsf/7PizeXl5OWU2JkYmRiZGJkYuSRj5FOv0E7csEVxM6vzLMxHLHy1GDcXysp+hskTiE8h4IGz9dqz4EMzCtd+6p8LFPwORgMLIDG2C7ZqPx3fflTcKXrXKsbE0DLxApSMR6fc0mvOw+EONGklPY4PMeM7ymDXb/58Wafe/hXvhYDaXOcue8x4RbHnfWfVLz1ro9Y8JEIKLYZ9+VKj1abQz8rKyuzRNO30GpVAyOZR2LVpKW8MM8OWPUBcl0AOGB2YzFw9CVLTYgOSFwlP2TSN65polabKPHii19ZzXnAzd1a7ACwtlD5+3IR60BzhfoA21SPsx1bvLdylfMN57MuvyVtjxIjLzmM1NzLd0fcIk55Y37HD30eJj96UzZGNAJKQWFbEvZsYmDF2GDkuBQESM7kobEZixkk4+MCdhrcc37SfS/TEbZAIaPMD8yPMDeizjvXrz33nB5c7qVx2vqqwPpF2PN7d58bNzEyMfLixkhX2AnakQsuFUoXnSwIf1al8Xj6XY3jAID3JOtYqlT9/QvHY4sXNbgGBTBfUdCqBs83HA4xGo0WEoCruHH/q9zgR3Diza+/IG/SoVGVgNTqq/NbXmioj/PFU/w2CbD4o5LRTpM5J8CYS3+zgn2D5+XqGv8AZSzCeEE2GAxmVTwGEScHL1KAzdfuDofD2XcFM5aB9ROy6lYibcNjsa51HqfHOfua8eN4q5rnFizaVj9z1VRjXon54DasD62Uaj508jgQU2pV/RXgnF6Stk6JkZc8RrJcEYt8p2k0Gs1djOqCbW5xfI3rol7/Ngu6m8m8hePLIsid34lxXZtdEc37z0+MTIy8TGHkjt3h4knDmHr7Um/D83HdHsAOH8TH2aHZqVzCjfl4bB0zxlPw0bFYLndbkhfJLRBkfviH2fiVq7GgVn5mi/QsNF8ixL4BbAafbnGJtlxJ1S0/nDjjv1ab+TOw6Sc8P1egOMHEefVlHofPcczym544HnVMvavGc8TdN5ZVFzEqG4/ZSn66CHP6CuJfpOe5NFZdAnaAxTzpfFyFb+mWx1VedXsK+w/riPs7/TgA0Xn5vFv8at7cCngkbY8SIy9hjCT+g69WbId9VF99C7Gkyz6FjyRGJkaqXnRePn9xYeSOXHC1knArscf3vuSviTiIDafOpwDj+OkzBu/95vYMdGxoNZ4bk4PJVeRahlOQ48C/OBZDb7jLw/DNT35GjhY8/tsfPSjQeeNdH45vfOJ/AAC//bn3YN9xVzwovt7zsD/EZ//2H+aOxXgvPPH2mIzHWFlbxWNP/1DvOP/83Ffj1BedDAC495uej2vd4VZ4ze0egDM/+yUAwKNP+zusXW4fAF+R5aqqLjT6znEVhxcZcS7aRmLiLUe6IOGFh9vC4MZnHw1e9A5e7KvmxOVkaS1k+Pv6+vrcNgznO27hFd+dfLGobG0V0LzB57mdzsux7ezuYo3PK2AEGKjeWslb+VQbs84d0DmeHa9935087C+tvknbo8TISx8jOZeXUrC6uorRaIRSytzzJSHf3DyowO/eBfjCJ4FXfhK48vfPDz46ANznB4BjrgK85r8X5r7E6FE/CXztc8DJnwOOPu7S46NFj74d8NXPdp/fdBqw73LAZALc+4Tu2NFXAk7WdYjQi38TOOVvpp8/AlzzusADrgOce1Z37O3fAkrbjxIjEyOV177vO4mRO/IEtF4luitOrcZHkmSn42oEOxg7HVcUHDDEL66zE8U47pkXnlMdQ/uw0gHMGZkVz+OpQ43H4wWjcRvmi/nm/wdzAbSM7vO2l2DfccfgwjPOxkP//a+wfv6FuPCM7x70eL/4Ny+ejVcnB79QO3Du+bjwjLNx7zc9H4/64vvxqC++H3uPPRovOOF2OP+bZ+CRn38vzvv6d/DCE2/fHOPf/vRN+MjTXobbPOFBuOZP3Rxv/tnfxukf/z+46LvfwwM+9BrUjQlQK5533K3nklEkNK6qcrDrloTxeNzcQx3jsd+PRqPZRVb0izhRP48Y4vFWV1ftbXldnDG/GxsbWF1dne075gULJ4vwU6D/hwz53MrKClZXVy0I9SVolbPl4/HgrW7j4Dm0SqgPkfM5BlAFU+3DPGvFNI5p7mqBH9sj/vMxbuPyIX9n3oIHzT/cPj7rb9e4hXz4i/KdtH1KjLzkMTK2b0U/5pH/x+fWAnoWx+efDZxzRneBoDRcA974ReDlH1s8d0nSed/teKzt50guNXr8HYH/+mfghf+4eZE1HgH3OBY47xzg1Z8Cvvo54EE3bI/xit8F3vsq4NEvA078IeARNwO+9RXge2d2/c87B5hU4GePB4DEyMTIOd4ubYzc8S2FrARmUKsGDCQsQAjH59m5nKOxMp2BWjyFwvQ491HS+V3gsrMqwMY5xwt/1/3z2hYV+LMb/jzO/NyX8PhvfRQvvdZdMLpwPwYrA5x09sfwnKNvDgA47tpXx2995l0AgM+980P46198LADg9s94JG570oMBAG+6+2/i//3dv2Iy6p4xWrv8UbN5n7nnJhjuWcNjTv8Q/ujKtwUAnHDjH8OdXvB4vO4OD8GNfu1n8bOvfjo+/LQ/xT8/59UAgF9+98tw7TvfBmVlU0/PO/ZWWL9gP55y4D8PauH2xrs+HCgFj/ri+7Hn6MvjMV/t7noNhl2yWD//wmbfjdEYG+sjrOxdw2B1iPH+A91FFoBX/PgvYDIa43nH3RonnX2q9ZVISLFQ0eDihY4uFmILQfSNO1kR0Jo8+TPHRoyvccTfNzY2sLa2Nrf9gF91GvNubGxgfX197qHY4C3GW1lZmbt1r4uU+K8+HaTtIqZ14RnHuEINdGDG52rtHmDWfOC2YcT5ucUS8cXxyXbWyryOq7I58Na2rW1KMdfq6uqcvaJd6EIXq3yR73hhmTT3MGC63KhAqu1Vl0nbo8TISxYjQ5+xhYrzpItdzauqoxnd79pAAfDuc4A9+4A77QXiqajjTgBe9GHggdeZ7/MLvwOs7QPe9Jz546/+FHCN63af73EscNEF3d2Z934PuNsV5tve4CeAF/1j9/ltLwX+/And5ye/Hrj9fYFH/yTw5f+Zzndi9//9FwCDFeDOe7vvx1wFeOkpwAN+bH7sez8a2Hs54I3P7r4/850dr5/5GPD6LwAnXGOTP6C7wHz/+dgWXXRhdyG47/JAKcD+af8LzwOGQ2DfFQBU4KKecdcPABvjTu+Dlc0xAeCXrglsjIC7Xg5477nAeB1AYmRi5CWLkSof047c4YqraiaXXPlqkZmOxacqk7cv8JiqwGjDDqpOwJU/ndsBiVsIcx/32V3hqmx65R3ntU/ccla++NjDP/U3uPxVj8fzr3RbHPjeeXjy+f+O8YERnnXU/8Lqvr149GkfxJmf/RJeedP74Msf/jf81b0ejRvc72fw0898FP7xiS/Gv7/8LXjbLz8BX3jvKXjgR0+evfWQ6ckX/AfWz78QL73mHfE7X/47TEZjTEZj1EntPo83cOqLTsYpT/9z3PnFJ+G6974j3nS3h+P0j316NsYfXeUncNE5HX+lFIwPrOPpKzfsvStFQgMAHvCR1+L4H74GXnLNO+HCs87BytoqnnP0LfCM1RtjZW0VJ539r8vHEnr4p9+O39v/Cew7/hj87pn/jNWjOlDSQOTEoz8eGfaIpB1VkfivleTwBQ5i9bf4zHNtqqMbY21tbXZxx7wEn+yPXNWJsfbs2TNLzrpQY59duNDH/MKGkw4nH+4TCZATv1soqV7iGIPNvGssvhqbZY+cxItTruZFX63Gsb2UL53fLfZiTLYJz+cWmAwabsHKuU15VDBjPTM/8d/dSWF5nD36QCRpOSVGXjoYqQspnivygctn7q7cjN7w+e7i4G6X7xb/H9wPvOd73d2a8Qg48VrAy07tPt/ibsBjXwG85Y+A4Spwt4d0x5/xDuCHfxz49esD3/4KcM8rA+efA7zvvG574t2PAf72O13bq1y9u3P2nx8Cnnov4AOvBf70McD9nwL8zEOBp/8y8G8f2NxeBwB//XXggxcBK0PgjmsASjfemV8HnnCnbrzgdzwC3vrC7gLm7r/Rfa8VGI+7z6jAva7a8TceAR+4sJMXAM7+NvC/V4CH3mRRT0p//E/AD16/u2D93pmbx//+wPRO1zHAFa8MvOELy8dSevu3gQ/s73T8vvO6/0BiZGLkZQojD/kOV6114eqTJ2fm1Mn0apavSkvZrDxwEETy1ESpY3P7ONZSPhtIKxXch43PDqBJncGI9aIOEvzxOOFMXBXRc92xihKBVCuedP6/Y2VtFU8dfQrPGN4IB849Hy+95p0xGA5RBoPZq2k/ffK7MFhZmR6n8QcF7kUcZTDV1aTOLn4WiHi81+ufi3u+7tkYrAznzp909qkYrHbHhnvW8NTxp91IC3Sv1z0HP/+G53aylulctbtr9aTzPo7BygqevnJDPO/YW+NJ5358S2MGTTYmeNl17oH9Z52D5x9/G0zGG3jq+NMLiVT9je3jqiphW73VHHe62A84mcR4WunmtjG/e5A8YkYTYfyPOOKkzwsd9jn1Xa0IcUxzLLCuok0kdZZLEyvPzbrQeGJ96yKsZSNOsrx4dPmKqbWYVH7dIpl1znmMz+k8mg9DlwrQyi8DrYIR51nNc2wHxz+TA6ikrVFi5KWDkbwVPP7zdi4XG+4ZtYVcXDZxd9p481jHyOb3UuZxs1AbHiP+R7/JRMaUOeO3VkrpPtfa8RETlAJMpls9339Bd4frnlfeHKMMgBvcFrjvE4BT3gZc+8bdHadWfL/9W8DPHte1ucNqd0Hz/guAY68K/MPY91Gqk+75t5UhcK+rbF503XFPN947zwJ+5mjg/j8MvOlLWxszaLLRXRSOR8BdjurkeNd3Z7ZOjOwoMfLix8g+OuQ7XOp8PLGrDinDYWTuz8pkRwhn4DfEqVHU2ZknHZONqMbR3y7RW6kcPK5awbJygAcfCsAR6FxdYKdQ+QBgtP+i2ffRhfG5YPWovVi7wuVw0jmn4qRzTsUDP/rabuw9a7jh/e8+O36TB98bwz1rKIMBxvsPdHufZ2NN57hgPwBg9ai9KKVguG8P6mSC8UUHOr2Op4GyOsTGgXW87VeegOdc4Rb4xn9sPjg83LcXzzv2Vjhw7vlYv2A/aq0YXXgRxvsvwjL62199Iv7vOz/c9ZtUDPftAQrwou+7PZ65dpPZVsLZ3anJBOsX7Mdo/4HZGCvDlY6/9RHqeIyVPWsoKwP85W3uj1/7x7/AvuOPwWO//uHZ9kSXTNl/2B68j5svrLh/K0E7O3MCAbpEzBeAOpfzeU3KSsr3ZDLB+vr6XGJXX48tkXFMnznT/c7sy67qpvzwMbcQ5H6cA/gCV23TqhgyCLFOterPnzXOWU4dy/XhnBay8Xe3kOTvMZ76Sh+/jtcWkLJsuugGYKu5SVujxMhLByP1XCy2XZzzwpC3kW3J3y+6ADhwQTAAHKDt7Rsb3R2rlWG3DS9o/aLuAmRtb3dxsOeo6VjTvnuP2mxbK3Bgf3eRtLpnOtZqt2VuPAJW17q7U0C3bRGl4+Eex3R3yu68t5v7rV9bLstwOtboIqBOL9gO7O/G++vTu22UKN289/mBTXnXl2M5nnBn4E57gK98tusXMu89Cqgk+57u5VWYbHS6Xd/E8pmsowPdRWno737XBv7qK51e3nXWrHliZGLkJY2RfVQOFUAHg0Hl266tJBgMqcL1ORYXCNFWjR394rgGj45Za7dNi/fs6tV4fO8zDPdjWVsLEq00sDwKZDqHVi6i7QP+6WS8/9HPxVlf+PKs/eO/dQoGwxWMD6zjRd93+9nxq97wR/GAD/0lPv/eU/COBzxpdvyOz38cbvKgn8df/+JjcdqH23eHVtbW8LhvfBgAcPZpp+NVN7vv3PmbPeKXMNy3Z/YWwF94ywtxrTvcEn9x6/vjrM+fhkd+/n348xvfG6ML96MMBnjs6R/CC0/8KVzuysfhtz777ua8APDu3/gDfPbtfz/7/rD/fBuu+APdG41ecMLturcUrq7icd/8CADgnNO+jlfe7D74/lveCL/ynpfP+v3Tc16FU1/4WgDAvd/8AvzQHW81O/cnP/Iz2P/dczodfvsUvPiY29gtOlolif+6mGA/CttzEonAdD+CGvuy+bkxYH5hEnOG329sbMwelOUEEwlkMplgPB7PnmVwsdnywxhr2Q/7cTy1QEzba3xz9V6TsC5MQ7ZWLAPz1TxNzK49x69uSwD8MyOsc32LpMob7fk788G5T0EqZG3lWB6DZVJdaRu3hcN95zw0Go0+UWulX4FN6qPEyEsHI6O/Xszx3QTWl7sLGd/Xn/kuTN7yR8DnP9k9d/WY2wPnnd3d+fn5E+aZutoPAo97JfAbP95dwBx1BeDnHgE86BnAS34TeOefdceGa8Cf/Atw9R/t+t3n6t3FSxkA7/gOcMG5wN2v2H2/wrHA9W4FPHuKl+/8M+Avn9p9ftwrgJ+89+b8D7kxcMbp3ee3fRO499Xm+TvhmsBJrwFe9STgXr8F/Ot7gB+8fneNPj/nAAAgAElEQVRxc9/HAy98WHfXq4+Gq93Y55zRvSHwmtcDXvrR/j4A8Mif2HxL4Vu+3D3PNZl0d7wA4IpXAl73ue7zZz4GPOnuwE/dB3jMJpbjRQ8HPjp9S+FLT9ncRgl0so5HHX9v+AIuf5+rJUYmRi6MwTKprrTNdjFyNBphMplY4+7IBVfcpufbkcsSZTg5J0LnDEEKOAFIrkLIfZzx+4LB9Ws5PRuUF9JO7lZw9QFg9GNwjeMP+KeTceLNb9CUPengqE4meNEVb71wCzxsrvbSBMTn2W9qrQsLJ/4cfblS4xZXGgM8J28viGowvwExxnXPLcTcLtHowk4BqG/Bw+NrbLjFPCd93ubjfm9IF3Q6j26fCBu1FlbMF+cYNw4DDFfFnewuJ2hOcUDD8R524MWz8q5tXc5xuufjbI+WjvKCa3uUGHnpYCTrMOaO/4PBYLbAjpzAdwijX/A5ftZ7UG92p6Ye52g8Aj7xD8AT7wbc7M7AU94IHH189wzUnzwK+LvXA088Gfjp+3Z3rBzV2t0J+vXrAVe9RvdM2PFX822TPO0/H5f7xRMSI5EYqfpQm7VkdHywPZyO+i64duQthbxojP/qNC0nisQXxuPjqiB1nvgMLF5tK2DE/HycK1zO4KrgmNMBpZORP3MCD4fWB4X77pT0gWbSztJVrnftma1miaVOMCjTKnGtGJTNtxCiAIMy9ddBQUFZCPy4c8UJL3xEK0vqE+xHkzpBQSMJDgpWBt1ioqJ2LzaRylQkR+WPFyz8w6W1Vow3NjCY8nTu17+N/Wef2wQO5YlJ5XYJy+WMlu+7i1smfV4u+mheCdKc0lqIak5iO7nkr3O1Fre8SGVyIMTnXD7k8ePzVqqqPI+OnXTwlBh5yWMk51e9eOXx+MJKj890up0QOOubwJ89vnv74He+Brz1RcBDngV88GTg/36iO/7m5wE/dCPg2jfyY2yMgD/4xc03GL70t4Cnv30bTCRhMEC9+nWwUTD7aZo6GGCwMr3QqsAo7Fym/jKZ+sFQMBKLGFn2n4+VM0+3ccR9EyMTI2d9Ws6wVRoMBlXf6BFMuwVkEF8V8+KODcaLw+jDwBVzRdWKBXeAInwv8Bf9HVDEcbcY5rsh6oiuahZzaXVEnYJBlXkBgF895bU48WbX37KdkpJ2gt730D/Af73hPXPHeFETlTGuvmlctIDEVQwnk8ncq255jL7FpfKnAMCLUE20nDcYUBwpzyqzyyl94Ojm1IWggpzKrxfUqgfmk/nj3OaAJL6vr6/nHa5tUGLkpYORcWx1dXXueRx+SyEvjnXxxf9Hz3o36k23eIcraVfQymdOxeWedJeZ77DPuIuwxMjdgZHTXHPx3eHSyd2tfb2qBDDnePxbGfE/3urG/dS5XcKNMVvAwAHRdxXvHCL4CPnYEHzbmm8/Rxv9/Q+9Da3EfXicWmu35QDA1/7pk7MXWNRagVIAdq75AUPAze/T9rP/0/OhlTrrVzZH47Y8Hs8TctI4c61Y7mn7QnOXUmZzzyUe4r2GLcy4hfQx49yA8pyOws4kbYyjfsI24b84x74SfLLtZvyoLoXcmZlOSc/MP0pBnavOltmcM3kGgzk+5uxDOov2x1zr+3HsD/3AgnxM4/F4dp6TmHsgHmi/En/GN7CQA4DFN+a5+FawcBTxFf00JpUiJykfDtgU/JivSPKaEzmXuHymIMRjhY7duE4velx5YJm2U/VL8pQYecljZPSLu4Mhc8QR34Vjft3CdSb3f/8LcNEF3S4DAreKOpczof1KAQNL1346QIk8CzCYRZ8abyGcNmT9TJWwOVf4VPTD9O4OscuY0eEny1JnmGNjXjGHxxHcQFmOkQXApG7yWcE4OdUT48KmyrdGzC+NVwgjQX7D/QrFTPBTClDX9mFyg9vOYoRjS3eUKCVG7m6M3LEfPlYm4nvftgcmFZIV55yMj2uCVmOp0/WNp+f1v7bVQGi1D5nUoZSHOO4CZa7f9Px7H/w0nHv6t2bBz0Go/Lgr/9CZ6jv++JfWNUA1iPiNO3Gc23LyiHPj8XjuHP8P0t+aiX5ha30rYFBLXk3+bHP11/iubxiK39WJ//GbMLrAGI1Gs6S1sbGB8Xgce3wxGo3m5HOLL60kqc71v1bRgm9+cLbWzR9IbCX3eHA+fOBWT3wwbvvUhy3w57ZGsa7Zn/i8JlcGLpd8NWeoH8WcurBTe/SBF59XvreSBzTZa5wsGy/8nhedC4s20X+MqZV5x7/LdS3AU0BrLSCStk6JkZcwRlJbPcb8sC3i4mwBM9At8lf/+LdRvvrZxMhdjJH1qtfAhX/xP7M1WOBX6yInMTIxMmjH7nDpd2XOKZaFic9qdCbtG3OxA22FPx1Tx2q1c7ceOeidTKoTllPBzRmU53KybEw27JW9ytKqZqp86nQu0FtjBGnFR3WnCdLpR28981YTro5GQudqSSTU6BdbSOI4v1aYj/Nc8Tl+TV6BbjAYzN52xeDPeguf5Ip0gMh4PMb+/fsxGo1m33lrC48Rn4On0E3rHOtWx4sx40FWtVkrTnlMPs52iO8u6bXAxfmSbkFQvSqQsay8NUllXpagQ/9sN/6x2lZ8t2JCc5uO0eJJz4e/O13GYkFjPeSP52B4XAVtltvlXrftJWl7lBh5yWNktNOLkZYsfRhZNzvNzRFzJ0buIoyU2HKxzf8TI3cXRvbRxXKHiyd2Dsl91PmY8RY4BIVi1CBc5eHxXSWf+2kyZzl4Tie36sA5OhtWq5NuDnYapslkAkgbdSgelwGHgcwlcAY21ic7GlfNmAcXPEEc3PGKVuWTK0QKjlyNUaCJClp8Xl1dnf0FGESCUdCIublKxy+N4HMO0JnnSNKsD04GXOmbTCbYu3cvNjY2sL6+PvuL6t54PF6o6qk/sW7DfqxTB2jqWwr0/Nr5ufgom3bVbQLO9/g8J/q+pK7xy/pX+fk7Hw8bK+gwD6wz5tXFvdtW5RawrcUVgyL34zld3mA/by3eeIGiOlD96NwMumxvJyfzmnTwlBh5yWIkxwHnBeUN2BpGAkCdTOa2mSdG7j6MHMf2PYOF8VnHSIzcPRjZh5OHfMHVSrytZNxyJmA++YewGiAqrPZ3iuvrrw4Xx121qzXmVs7Fd95PznOx7ByAwduiTqe8TsdRO6jhNTkzfzwn667PuZwdWjrg4GBn1yqf40WDMcaJpDwcDrG2tjYDgeFwOAOSqNSp7By4PGeQJlFd8PA4WglheXWRxMknqn+1VuzZswfj8XgGJgcOHMBFF12E8Xg8AxV3EdRKvsqjsysvBpRYpoW94QbEggLMNcnrZ80T6kusJ+7L59h3NUZ1XPXPvphmuzkA1Rjg77qNtyWjjqO8qk9xe/Urp+dYDOmiU4HEyap6cMCXtD1KjLx0MNLF4UFjJLq7XPGGu8TI3YuRMULo3v12E8uUGLm7MLKPDvmCyzlnS/F6LL5zkER/l6x4LicoAwKAWSWFgaElgzqcS8hA5zD84CP/V7lbhnLHNHgcMOvn6UA2mMJh9IFqptCZJvkYj5OVc0aWhc+3FgmTyQRra2uodfNB53jgk/UebcJ2tdbZ76UAwNraGvbu3TsDDd4fHtW7GM8Bido3vjMvITNXg9hPuPrB8jJ4sT9wBVV9kaso4/EYa2tr2Ldv31xVL6p86nsaO0oKnCpH6+6W+m6RC3wdr7V4dD6nutFzWrVvJV533gFV31yOZyX2F/1hR+VN5eVFkIsR9RXWvY6l+a41Zl/FUUl51hymeWyZrpIWKTHy0sHI1mLoYDASkS/L/EsMEiN3H0YqaRwG74mRiZFKO7KlsHUb3jlny5lYAeGsC9uaptRKolpl0e9OOY6vPid0t3xZBlW4q0C25OHvIbs62+bnKW/El/IzGAwwGo0WHD7OtfTpbimHPbmaFlseYk4G2jgW7UopWF1dnemn70/1FmAR1bn/z9677MiRbed/KyIyM/JaVSySre5zWgLOHwLOQIDHHhnwzC/iN/AzeOAX8DP4KSwIguCRJgY00cCQjk73IdkkqyrvGTcPMn87v1i1M3mp+pOCMjZAVmZkxL6sy/ftWHvFDsgEghkMBqGPCuYxwIvJVfPb9cFelYPKinaUdFQ33tG9PakekKsff57nlud5yF/fbrdBvrvd7lGkNzYubcPbR7/fb91o8+AygBkb16Gy1jFd0tfjKtNTPnwKyDzh6fn+M9+xScWiU+TBJEttQqOrKkffL69nrS/WV+3fKTA/df4psojVc8rHP0XmMZn667ytdeXLS8eR354j+U393PfnczmyVW9ddxx5wRyZSP/O3axzrOPIy+LIc+XZUgpjjSvweGDmdwzPR3NigzqnIK1Dz9E69J0m+jtvmUdgSmTeqGJExnX+WEw+MXLQ4knKO9axT+07aiVhxqpLot4wuZZ+9Pv9sDOQ6g35aQRUHdb3lYIMiUpp/zXdAV3oUjN5/BAFEbo0TcNnUiSwmSRJQvoBOjwFcqoH1ZuXoY9aqk3H9KLApO35na88KSs5+f5kWWaDwSD8q+valsulVVXV2t3Jy1D7SFu8fFl9IE33KSW73S6MERlxvtplYu3JDv31qRc6hpgv6XmeSLnGE6DKxstKdenBmKI3kjoujR7rWGPF+w91+pVBtT3VQwwfYjtwKdF52Xob5HPs2thnPeeUHyRJ0tK/b68rn186jvw+HOl9+ikcSV+qujJz+N1x5OVxpMorduPRceTx86Vx5Lly9oYrSZL/0cz+v6Zp3pw559F3BU41CG9MDFCV4CNu3mB0YKcIi+t832Jk5Jcn6Y8fm4/A+WijGkuMAGPGFVOOl5M6ke+TmVlV1Y8cwZPHKQdTY6Xf2jbH/Racsf77JW4/oQiEJUShqQk4Ju0SrYNM9JiSS5LsI1Fc5/uuQK0AqGTHX52Q+L4rCHmH9HJRGSip+ZcTan/os9qlRr8Yf9M0NhgMbLfb2Xa7DTs5qYy9HSnA8b2u65BWom2r3/j0J188IPr61Q78dTGb1Pa8vGO+q33T8fJZiVpJXovav/dp317Mh2ITulOfY5MFbUPTdfy1qr9TutDrdCLkcTiGVSprj4n++q4cS8eR/7k5UieyT+HIsE1h0/bDjiMvjyNDDUnyiCNPcUfHkZfDkefK2Ruupmn+n0/WcCjeyc6lCKjDxM7xxuaFJP1rXRszMq/MyBjPGpmOz0f91AAV0D2AekLxSlQQ8aCn12vfGlIKE2v1g2t8JNO3qbLRaCW6A/gUjGMy0jY9oass1D76/X4gDlIu6EuWZZbneesBX9IhYqRC/5G/JwTtg8pTd1qCmJRUfIREI80+KqJkiD2VZRnk5tNKtK3YpELHYnaMumkfkMtmswntbbfbENFTmWvUij7o81pqyzo+tSPszYON9pk6YwDqz9fCeWpPpyZ7moahdXofjmGR99kYUPqIr8cl/zk2Dp/+EpvU+r7HJmmxSZvHBF11UJmpbXlf8CRGfZ5MdTIZk3FXOo78z8yRKrencqTu0kr9HUdeJkdiMfzV1DvVe8eRl8mRsTRxyrOlFMYIwzuJDi6mSF+nBwF/jheeB3Ft1//1RKSfFRz4rEamCtQokR+/J0M1ao1uqfIxfv0tNpYgh8NfyI/rlaD43cvN7BjlKooijJHcZT9WvT4mN/+ZerUvnlQUkHnAV1MklFiQd5IkIY8c8Ne2T5EB0S+KB1O1Of4qmCFbX7f+rpE66tEXKvK7gn6SJOHlln4sKie1W+y93+/bcDi03W5n/X7f1uu1bbfb8AJJjUwp6HjiV5uM2W1TtwFNb9K1xCZdOh4PtnqeEldsguiJwfdT7c7r0PsSckY32IxGTLW+c5NYf51OQlWmMZLXlCFfL3I4F12kbV9HrN2Tk0kp6geKQz7lpyufXzqO/D4c6SeGT+HIMtmvbHQc2XGk15/HZh1nx5GXx5HnyrPsUqgAqMe1c954VFAxwPRkFBvIKUHobyqUmJHE2vHOY2YtYXINwObvaGMO5vvNMXUsb7RmR6N/JN/wt+18gKaC06l3VXC+LnWHZXORjfbPGzJ/1dh1jPwFENnGVesgEpemaXjYN0mSQCyQCSDtl9U9OXq5+/Fqf/V6H2Wj75wTs3FAhPM04qbkhg6wQdriPJV/XdfBLmKRnX6/b9vtNti2RjTJ++dB4liftY+0h3zLsgxRUZWZJ7lYBPnUBFLH4idm/ruPGGm0lH6ScuP91tuq9lEBMjaZo03v+zoZ8aSMbLQujukzL8jLg3iMvPwkUq/T9mNj1uJtRscSIxg/YVLMZPLmdduVzysdR34fjqRe7zdfw5F6vNdx5EVz5I5NT5ydabob9qh1dhzZceSz7FKog9FB+KKK1AiTjxy0wK3Xe6QYHIK2fITAK0MBWgWkRZ2E8cQISEss2kBd5yKJarD6EKs3Vj2XurzhNPb4+S1kkCRJ2BHJj1NliMECSJpq4Z1KyS32uxqf9kF3ZlJA1V2U+v2+5Xneehkjv9NX6oQoFeyxJQUg/U3tAfvzwEZqgk5yPKl4W/Ljquv4W+W97hUUNULnbRMbiaUubDabcEyjnuzUtF6vA3kraDImCEWJis/6oGzdHKJDETtSeVBUvrEJm0bWdDKmelPC9+TlJ3dq31q/RjCxH48bPsoX8z3vl1qP70NZlq0oIyWWduSv12iyvyY2Tj7rOacI59TkXHWktkF/Yv3typeVjiO/D0d6WX8tR6o/dhx52Rw56Pdttx9QmyNlHKe4oeNIC23+V+XIc+VZbri0s+pMKlAFS47RWXVI/aupCF6QupsMwKKCi5GZGpD2W4u/VoFL+6a/Mx5+i0Xr9FpviLGxc84psvLjUuNh6RzZqiFpP31d/X4/OIKSsJ4DKQC43uk0VYFJAECs0Z/BYBBSIci1VjCkHj57wtQJBKkTuvyskSKVP/XGbEsBUKOAsUkF7TFmtW8f7dPxq21AEgCq/sY16FBlTT+9vHVSRopomqYhfYJjGnHkOurWh4RjPpQme9nou05839XONHIcszs/GfG/KV7EfMb7h5+c+omqXqdy8PXGIvjUo1G+U+f7SYz+ppjHudqGAvo5MI/hwKlJtp8MaJ9UT17u/nyv5658Xuk48ttz5Kl+fBVHHr72+32rDhjcceSFcqQbt7c56uw48jI58lx5tvdwqTAVeE8Z5CmANTsqDSWhlJjCY46ubavBeKHGhOuv1+/+GnXMWJ2+3574YtERvyz9qXSKxNrRPMBMo3Y+nUPBmP5otErBNkZaHFdi4R/1qFz4Xte1DYfDQCBJckyJIGqn7xPRes2OqRAxsodU9Dwf3WN8uvSrfyEvlQuy9xsHqF2qbmOyTZKklRYBYBCZRu9+sqVgqeTOWKqqssFgECZWROyQH//SNLXdbtfqY1EUrQgjctfILVvslmUZXnxc11XrHCW2GPCpLDxh6Hj0HD3m/f7UMZWP2rtiik4QtK5Yv3V8fPc+rn6ppOn15P1f/Vz9Se2DOpmEmFmUKPwENman6pfalhKpJ2W/6hHDgq58fuk48vtwZKyPX8ORlLIqLbGOI/l+qRxJGy2OlDZ13B1HdhxJeZZNM2hU96T3d386aAUDvSv3xuQHrsWDnT9Pz1eDARhUUHqdj1qpEflxa19jBun7Rv1ab6xvSi6e8Hwkom7a0R0AXsHJEw4gg764To1bI66qJ5W/1yUOoNcpUEIWXE/Ebjgc2mAwCISgETQfteG4Opg6BLqjHdWxkoMe1/HFJi96zAMF9hIDR3Xguq5bD1rTF10tUrvQfHYPHNoXbR+ZVlUVyIpjq9UqjGm73YY20Fee59GHPoMMmse7pSmA+YkN1zJeTxJ6jOPed2NEHcMHT1IeLM2OKQwesH0AgxKbKPrJrH6mXrVTtSlfvFw86TEOxSO/8qHj1f54n/HPQHjZKgbFJjO+f135stJx5PfjSO33UznSzKzYFZYe0s86jrxMjkyRjbM7+qG23HHk8fOlcKTHYS3PsmkGzkBURJ3Cg6IKAGfSiLkHMu+83mA94KoyY797IWk9McfRooas9avxeiDyBqEk4kkyFlnkuseRiQNgpcelfcZ/aux6TI1HDSh21855ALdGnMh9ps4YuKNP8s55Q/xgMGhF66jTE5Xvj4/CcR2FcVCnJz0FFJWJ6l2Jh3MVuCkaLdG+eT3mef6oXd++n1ghO6JrkLVGMZumCfnoPFRNPzQaN51ObTAYWFEUoU/kbldVFbbOVTmxS5QHQ7UJby/qB3rMTy79pE0jTCoT/d1PwnykSj8jK9Wn9sdP4mJjoz4Pzr5NtR/+klLhCesUqfrJipeBXhPDIbUbP2GFnOi3J2TfD2+HOt7uhuvLS8eR34cj9Tw//i/lyIbXsKTxVTTO6zjyvz5HmtiLPvsVs6WOI9v1XQJH+tVyLc/2DJd2Tg0FR8ZQPHH4NzXrwLX+mFJiNwcxA1QQ8HVr8XfyMRL032PA7Z0PQ1ZFesWpYQOE6oAYyFHeDZ1s9UdBkO+lROR8hIk6aS8WifLAzjtCYs6sTpTneWhzMBiEv8PhMNRD1E5JhH+ae+1l5mWu4+McTcugb7F8bgVfrVNlqPajelZyoP4kSWy73YbPfoKkcuW3FrkfZEdUTB8WVv9Q8FKSh1SwQ/xuOByGMWZZZtvt1na7XdCZkjM2E8afoIe2bDwp629qu/pZbUr1p+CrhEF9p4BPiUVt2k8itHiCocSIxU+QtS+ePJWoPG557IldG7Mv1aWOXfvpScrb2ylc1QmOb/tUe1358tJx5PfgyHj/voYjQ32WWNJx5EVzZGAamXDH0ks7juw40pdn26WQRj3Aa8djd+aAUiz3mOINVh2Lun1fEIYahILQOYNQ0Pft+j7Gfud4DLj1PDUW3x5/PYHQv+MKV/YoSqHG4eVC0SVuJT305OVMrrLKTfut0Rf6wXa1vDsEcOc7wKbvC/E68E7MeWmahveXUABvrldyaJqm1Y5GAFVGAG+/3xdZt6OuOmaVs+a9h/QD0a8SCHUSPfOrjHzWZxW8zfnIDXWp7NApZI2OVI5N0wQ/pE3Ko0mcPQYWiMj7HPJUgPUg6FdMvIz1XP4qyfOd9mPRbZURxBMDZP3ux+j177FDJ6/+Gu2/nh+zdb776KbqXcdDnd5fVI/at9gYqddH+bwMvE668vml48hvz5E6kdrtdk/myMbM0iy1RCZ3HUdeHkcadiI2G8N+35+OIzuOfLYbLgxDIxaqNB2ICt9Hr3QJNkYsMXCNGaqvy1+vDumVoX1RA9K3v3ti9ADsiYw2tM8qp5gs1RgfOdlhqFXV3omKB0LVEPnsyVejXSo/8qj5Lc9zMzNbrVahHX1w1sxCzjO7KdEHyAMgY5cl2tc+etl5clEApJ8+3QaA9vaDXTLOWP66AjL90JU/LeiN49i+viBW/5Efrf6BfSowUrf2U0FQ++rJiUmZtoOdEqVDJ3Vdh1z2PM9ts9m0bF7HXNd1eIYrFXv3kyvv39hbjERivh2zffQNlui4YgTj6/ZkRTux9n1RbIqRCG17fDlFiNq21umfl9FImvZf24xNCPw5qhetJxat89fw3f/mb8i78nml48jvwJGH4ndr/BqOLOChJLX0cIPUceTlcqSZhc1TOKYY3HHk5XLkObk9yw2XGsupu3FvKLHOmrVBg+OnjM8L0B/T+nXJ95QB+M8xZz1nPP6YjoFzFPh8e754cmqRZsP1j+WhhOflGTtXnVGjR1xfFEUASQU35Kp/aY9InW5jC/grIXh56U2gyoE2qFsJOLa9MtFEvRYS1IgcTuUjJb5Or2fVh0bFfB55zMn5rrLWdvQFmB6INVqpgED/NXqn8tVnR0hR2Ww2geB5AaROpmL+pfpS0vD91N9Vdj4C7H1HJ0Y68YtNxrQfugWv4o3Kx0e/PAh7kNXrz/mpH5/XtydbPvv+6G86eUAGp/LMPaZou9gDvuzx1BOeJ02Py135utJx5LfnSP9Zx/7FHFnvP5dVab267jgyUuelcCQtNfbYl9SPO460Vl0dRz7TLoUYUyyCRge1Y77DalAMWuvzdXqiwDBijqnn+/r0fB8Z8f0GmDyhsNStgKP9onjDUTLw7XnjVwJ8RAr2eOnVy0nHoX3Sh3m1b9qeAlpd163nCRSwAVR9CSP552mahpxp1bcSiBqv15vmlHMM+dC2khDt851jp5zap+poGxrB4btGjryDQiSnrkXuKj8/9pg98BdQAUCIQvpIIvW3cs9lxyXkSkSa39H3I/K0tg/HiEbHEItkqu7PXU/xESZt+5RNozcFZHSiMtF6qMM/3K51nvJrvp+aJMeKxy/VrScxre9UxE3r9LajKSWxumP90vM1kv05Y+vK49Jx5PflSK2PcXs5fYoj2TSjqY+rkh1HXiZH1sjmRB9iNttx5OVw5LnyLLsUeoPh+CmwjBmDFzAAYPY4fzOmTBWWAquCY6zvMcHG+uKVcAq8zxkDhq5AdC4a58s5I/DjiRG1gpXKTOunXxohIxKVZZkVRfEoQgRRaISu1+uFlzZSH0TDWAFYjfx5olJSUPIws9bDrQoiSia+Xi9nrtXoiKZI+LaVIDTKqQSjNqD5555kYoBCWxpV9BMt1bMSh+8j1/vJXsxXNbedc1sRTTE7T6Te9vyk6JStxgA7Ri6eyHykNea//vM5IlI5x3DC/+YnPz7qp3qM9e/U+GL+7Y+pjGKRzpi8PL748zxm+QmSJ6qufFnpOPL7caTvy9dyZGJmKp2OIy+XI8NxZzexvx1HXh5HnuPJZ0sp1EY8SMY674sfbGyQMcONERFFI03njCrWF21HJ5+xtrXEDNw7mjeyGPh7Y1FZq4OmTr4eeLSvGg2MyUT7PJlMbD6fh9+oj2iPgrZ/yNd/b5qm9e4QNXCiT4Csj7j5NAz6SF3Ih3YgIG2DumLkq7I+1a7+jQElRKHApBOpNE0fveNKyUjB3vfLzFrypg7fH7UJBWnq9/3S8TJJ4BxvCxiRFmIAACAASURBVHVdh5Qa/1usaA5+DAxjAH4KqLQ/Hku831BiE0ElAD1+inhix9WnPJ55EtXjsYml1q02HZvs+Yix1uPHfYqMPiUnf40SJ8XbVVc+v3QceSzfiiM9RjyFI/24zTqOvHSO1KL+feoc317HkZfHkc+6LXwM/FSICkxe4LEtK/31p4Db/37qszqSjxxoW6eOK0H6h/jUQWL9568uP6qivWJj5BxTvjfsmOGeIzJ+j0VIe72e7Xa7cG1RFCE6Z2ZhuV3TFSARfd8FY/AAppEs9OLJhnYUFLxuvONpDnesTi8HwJy2fFTKT45ielY5+2Vvs+PuTPqCPcgvZqM6fm8Pqj/65glUU1xC7vmhXSZZmr9PXUo6/uF3bfuUvcYmSnqOt8VTIEtdyEcji57Q1Gb95DPm0zF/V6KIyfkUwehvXo++nAJ0D/gqo1Oy9sfPYZ/XifedU330Y4jJpyufVzqO/D4cqWN4CkfG2u048jI5sj42eHKDmI4jL5cjz20q9Sw3XNrRc4CsHVRQiAEn15xSrv9dAUmBnfM+tVTor+U8vaP20TBfPmcy4kHCjyXWz7jCGy58JEcPhp8CJe8IOK9Z26EVwHjYN0mSELHL89yGw2ErV52i4AWAe3A+lTahjqX53HVdt9pQIlBy8nV42+A756VpGohTx+xtWGXo88wV5DX1gXr0AVbVA/Wfszev0xhZel/zhOmjjSoLtSfkQUqhnwT5v97OToG6jl3BEx/QiUbsulOkoPJRMNWot/ZRJyF6/BRQ+wmNH7dvx197yhdV536MMVlrHbEJViyCG5uQesLyY4pNJLrydaXjyG/NkY8nV1/LkfCsJcd+dBx5oRzp7Mzzhx7vOLLjSC3PsmmGVy7CxDAoKij/ux/UqTYoXqAK+vzuI3TeYPz3T6Vr+Hb1blaP6+49MbL0MgOMABit7zRBJeF/ztF3XOi4PDCpTGPtJEli6/Xarq6urGmaEMFDr1mWWZ7nNplMwvs4+v2+5XkeXuSYJEkgG/Sjy/YakVMiV2D0cjVrR37Rn0ahYhMFrlEb8Y7p/0JSHqBpL9afpmk/VHoOaHX83kf8A6tKBFo0qhsDVM051z4o0cZs3EeFkyQ5bpphj/3CT160eJ/09qZ9ixGQYopixanIX8x/ud4f9zKJEZSv2/uzH4vqJkYgvk6VnZ4fk5OXl++nv0bHrPLS6LKWGNHG7LcrX1Y6jvw+HOnxgna/liO1dBx5uRxJLY3Uo9fGbLfjyI4jzZ45pdDssXB8J72D651nDMxjbZwCwNh5KEgf8ozV6RXpHcaPBTBQElBQw+A9mOsydkx5p4qOr2masGtSXR8Nh774cezPe7z7VMwwubau69Zb7rmGyB3AnOe5DQaD8I+cdH+eyt1HWJFjTH/eWZCh5r5rFM8DZMypvY1iG7RHHTpRUNnEiE4nD/64rzcWbSHy6FNyPDCoDTE+P6lR+/LkpN95jmC327VsS/WkYzAza+rHDz57O/UPF2vU6BRgqo2o78SKJ2uVUawd5B5bFYhNLGLtcszbiteX2vE5/46RsNpVDHv8uPQ6f55fidAo/Lni6z23ktKVzy8dR357jtT6kPvXcmTi2ug4suPIxE5wZMS/vJ12HPlflyPPXf8suxTGDEqXZs3iZIAj00nAyud+c64Cha+La2JkFVOG7wffvWH54iNlMaPxbauzaBRGyc1HZ7yR6+fWeGRb+LIsrdfrtQhFndRHSmMRQD5DTHVdW57nliT7/HR2PVIHJk9dyYStbrUPSn5m7QdVY85MX/wyt4K91oujA8xqN3Vdt873Tu/TN9CxP6ZjUN3QBu3qBANSgPSa5vgOEuqKrSopKHu5MZ4kOT5U7VNJvPMjMw9WkD999uTh6/ETIQUqve7chOnUJE314Z9Z8bbvgwbet/0YfL/U9nzxmKFtavF2qe3pd+0Px/RZhViU1ROv76/qWn3E+5SmPiHbmL/FZOf70ZUvLx1Hfh+O9P7/FI4Mqxn7nMKOIy+YIz1b+DpiNt9x5L5cOkc+2wqXF5wng1Pn0Xn9zRuWmT0ClBjw81lBk6ICVyVoH7witc4YyajReIP0TqyK9O3GDDdGJK2xHz6nYhwqPyXqmOOfkrPZHkQnk0mr71m2f/kfueiDwcBGo1FIkdAIHufQjkaPPGjGJiIqC6JTfnKi50IeGh1TkDez1m/kzut7JxTAff98JMTrXX+jXk9WHshjdqqTAiVrJT8lTa2j3++3CNODsY5R5ai7lJ0CGL0uRpyceyod5VNg7MlSf/O+orbAtfzu++T1hhz89R609RjXaPH1aXuxSWzMzmN1KXb6cWuhbiUMfw56jeEncvVErnLXz94muvLlpePIb8+ROnFUn9HfP5cjfek4st2/S+LINHls397+abfjyI4jtTzLDZcKG8Cg83qHrefw3ed3++i6CjfmxDpgPebz1fkeU9I5Yfrj3kEUnD2xeKfyY/PFAwjH/PF9HY/7TrSN64jmER1Qg4+1S93k1/PZzEJkjjzz0Whkw+HQBoOBjcdjy/M8kIw+mEsfkD1b5tIP3hiPfgF0dQL+KiEwBv1ddaE68//UGb3jxyJRftekmL0rQPnJBfUhV28zlKIowvmqR4qCpZdPzB/8JCIWIVS/ybLMdrvdI5BJksQkp6ZlL2matvRLORWB17b1uLdLiFdJVEGfemKgj5z8BE5J3o8vNmHT/p2ajGrfz6UjxOpXDKQO/CXWfkx2KoMYOcReXBzzfz/pjJXYDUJXPq90HPntOTLmD1/LkaF920+4O468XI7cunP0L587jrxcjjx3zrPccMXAP9ZpL1AVkApDz1EBqSOfG5Qul7NsHRM05/goTiz6lyTt/FMlBr1jj8nFF0+QumLAbwr+MTmQ5FCLoccIHAJYrVahXh27gqqOR5f1NWIHePAQMORByoTmjUMK1IcuID21B3VECJB/fhKhION1QJ1KaEro3iHUEX1dClIeeDwQIFvGqITiU4IU3PS49lWfpyCax5iwQ03D8AAHeejYYyDEhCHLskBmKhc/+UnEfnlZpT+naRrr9/tWFEXUJ2Lg5mXMbx4ztO963BOJnufTLvxkjetbL7V07atteXtCN34C6K+N2V0MQzzp0T8dd2zSeiqSGMNiX3Ryovjj5fYpwulKvHQc+e050vvjUziSPDLq6DjygjkSu7LHwQ9vvx1HXh5HnivPvmmGfldD8AM/NzhvVP54DFhV2doO5+lv3kB95CBmwNTnDc5HcnxbnOPrMzuS3na7DQrM87xl4D5qoM5rZpalacuRAHAfkVIy9jLTY/xbLpc2m81CtE3JAcLQuqmHc/h9NBoF4Oz3+wEYiYhpv0hj0PpUtirjmH4hITOLRitjEwrVOf9ikTWKgr5eqxMDioKYRs/0XHXaU+ktFAVJ+kk93CxpXxiDTiL0en6PRbRV7/sTj9fGcsf1s05oPMnrOQpgsUmj9k1l5CO3vg+0i6/GCM2TkX+HiSeB2KRD6/P1xnQZsyc9R2Xg7cmPWXXs6+H7Kdw5V7Rej69fWldXjqXjyPY534IjY/jytRzJCn9THyfvHUdeKEfShj3mLpV3x5Ht+ny9l8iRz/YeLrPHd6R6N+mPafHCPFU356qzK1CogerdMMfVMNWYYmTj+81nfUhT+3/OobyRNs3xre9ExDabzaNlUurWXFPdTlf7forkttutDQaDlgzVUdM0tel0akVRhAhfmqY2HA5tOBy2ABqiUIfJ87wVKSPy4AnazMKb2mlXZaHRP9oiokQEsWmakPIWs7HdbhcijQoESnScS/3IUPuqETgvU42KKKl5EinLMkwM/FbGfktb9Ir80JO+VBNyVjtTgNFJhD+P4qOICu6etIi4BvJMkUPSsgXthwdelVUMCzhPH9RWX9U6vdx1TEoc9EcnVDFiiIGjnyj4yZhO6ugbv6v+1f5VxhT1Q188Keqx2CRIj8cwTicKPprtxxqbGFP8eLrydaXjSGud+9+TI9XXzexJHEmp6sr6B5l1HHmZHKk+0OJIZ5cdR3Yc6cuz3nB5B/a/xb574XmQ1siDrwMBeLLgXH7HYPV4zKgUPGLC1j6ZPU730Bx4DEoVyAOz2+3WzPago+AbIyTqpZ2Q9xsM+die2dGQR6NR2M5UncQ7qdmecHq9nr148cKqqrLtdmvX19eWpmmI3uk7NzQ3XYEb4lHZeQLTZ4SQDwTAP16miP407171i4EzdgVVZKHHkQ1yoI8aiVL5+5sQ72CxiJ0CqYK2Tl4Gg0GI5imIq9ywe0iG+pRUFIypi89+LIxZyaosSyuKopXWoPJV0PH1aVGgiQGkykrPUTJELzE/0Ikc/kD095R+lEBi/h0bi8euU0TqCUWxJIZhftxKtNio5pH79um71ql/tZ1YdN/3Xc/Xnet0khebCJ8is658fuk48hty5KHgR0/hSKR6c3Njyf2040jrONIcR3qc19Jx5OVw5DmefLYbLm1EheePnxK0ClKdQUEpZoj6u48QeAJQo/S708TGoIrQO3cFKwU5lKFbkKZp2iKR7XYb+kOkajQaWa/Xs/V6bWVZ2mg0ejRergnfD/0a5INWVE3Hk2WZvXjxwlarla3X65aB0Feum0wmlqapPTw8hC1ukRn90eiXgh7RN823JlIXczaAj/SIXq9nw+HQkuSYigHI+7fN0z5Aq/LxEwpPEoxBJwTIiwipytw7sIKmymK320UBEvDWZXvtEzn76tgApu7CpP3VPGhkjowgErVLBfmyLIOOfP5xkiQ2HA5ts9m0nu/wK6pN086jRq+Qnvov49MJBjKj/2pnalPIhAdkfcnzPHxWQPYRJiUnH0nUiZnHmVPE5nWI7PirY1RAVxnqQ/raT61DccqTh8dDJSePZb7/WnQizjXaVz/+c0TSlU+XjiO/LUfSpzTdp5M9hSMpg/7ANoe0xo4jL5MjV27MuqJqdtz9ruPIY+k48jCus79+ZjlFGAxOB+qJxndY69PlXX8edcaASs/RPqlRcTwGkHqtlnP1aFtmFlINAJP5fB4AljFSF2kHaZraarUKoKI53I/6cvjLA7n0BQfNsizkvUNmEAppCEyuX758aZvNxtbrtU2nUzPbO9p4PG6NiX4rMHAMcoQEFBBj5KUPD/u8dz+58G3q74w9lsrhZadEoOc0TdPaqjZGGFpfbFLideSjaCovBfQYyVAfNkA/dOw+WuZTBLRO9RXsgjQUnQCgL/0Xop7JgWAlOqoPedOOL/4YcvTRWPqtxKG6YjwKgE3Tzl9XGSrpcr6fEKrPet9WWeskxU+I/bWeMH2dagN6Xuwv+kK//KaErrrTnbC8j6oeFJdUB1yL/Gg3pseufFnpOPLbcyTyzPO8tVLyNRyZpqlVZjZfzO3msB18x5GXy5GHhoJcdZxsP2/WcWTs2kvmyGdPKaR4wI0dV4VrZIR6GKwHIk9S1KuftQ2KpjNonaqoT92hal16vjeCoijC28npFw/EqrzSNLX7+3vL89yqqrLJZGJ1vd/6EsDd7XatyEzTNGGFq9/r2Ww2a+Whl2UZ6iFqMxwOw2d1JracffHiRThnOByG6BiyIrrGOFRf9M2nfmC0+kJdM2vt2kR+O3n6TdOEyAzL+TiGj75QJ06ETJUkFEg0+qkApGDgAVj16a/1kbpTgKRy12VxTXNQm0BvOja+a1SOf17GGplDZ/zVfigw+5UsJWiVm26JjA6LomhFrgF+PyFQ4vKTN/7y4lC1UQ9w9A9dq561bi0qV+TgyVfb1Do8Lp0iCOpBJv5cPvuIpmKbPzfWDvLRyYR/fkH7qpMBJijoykeJfR3djdbzlo4jvx1H+hcNP4UjN72eFWY2m80sX+cdR0bs8VI4si+po6w0ohd4pePIjiNj5ck3XDiVEoMCNb9rp2Kd1u8qaH+XrefzWdv1d/V6DvUBclyjhtI0TWuZ0t+50z+N9HAt36uqCuCjqQSqWDV+jYgURWFZloWXKvJ7WZYhkpNlmaUHxxrkuY3HY5tOp7Zer22z2dh4PA511nUdgBoQJ2WDNpbLZSAyxpLneWunJVI7GINGCpIkaZGkWXsiwDj0gWfSJJRYYsaL/P3yvgKt6iIWDeYYfY1NAPy/wWDQcjpPTrEJj/8es0ONSAOOpDFwnY7Xg61vi/ErUDMm7YMHdx42VvnQN42gKbkeOhLsgfaUzOmrRnI5prrx4K/ypf/qe/q7XqNRTA/yMf9XH1bMUjl4Pccmuh7bfH904uvxKjYR5hy1/3MTXN9Xs2M0FV0PBoPwfIq3G9pRe4z5iL9B+NzJdleOpePI78OR/X7fxuNxqOspHEkZjUbhpq3jyMvkyFJuKtVPkIEGDzqOvDyOPFeefMOFgyBcFZRG2bSD2jFPMnqH7g1E2zA7Oh51K6AooWHUapTathoPv2l0DiNRo1HwUwfGCb0haOQQR9SicgL4AQhNpwA4ksMaV3mInHAuIAhZLBaL8J4RgIeoIFGlyWRiTbPPKdddl4bDYSAzQB+yQf4qN00RUBugTcYIMfnvGDQRBY2qqqHrMSUKrtfIFN8VFLATtRWveyYDRFI1ouSdWG0pSZKwa1IAaImYcq4HLe2vjisma/SmfsAxjeopICowA6R5nofI2znQ09I7XMf5XBsbp58M+r6of2ibTA4UAxR8Fdz1uPdl1YG3E8hOwV/twsxak4jYzYZOCHR8flzoSNvBzrxt67WKZ9QRa8fbuUby9FkPoq4qP198ak5s3F35stJx5PfhSDOz9Xod2nsKR9LHsihtJKsUHUdeHkcyImxc/VNXkzqO7DjSl2d7hgvBKlCfIgz/mQH6ZW41Rm+kXK8Gr8qNObvWG7t717tpPzYAW+96vXK1be2D76/KREENg4AYhsNhOJ9c8yDLw9CSg/PxEr3RaBSih4PBIEThFKTMjjmqWZaF9AjdXpZIwN/93d/Zzz//bEVR2Hq9tvV6HVbE9IWZRBfVuDXyR8QPfQ0GA0uSpBUVBEi8kXtn53ydICBTwEf1Th0edFTnPm3C25Ae807mr/EgcGoMCiKMB3kpOaufMCZsV5+70rF7svK2TyHKi/w9idHO0e6O0T+1I+rysvXkof2K+YCPbPlrNGLuiQq5aB9imKGTBp0UAraa507bOh50oIR6CnhV5tq29kn7Sd3eX7Uu7Ia+q54YE/Vrm36CoeNT3Z+y8a58fek48ttzpN4QPpUjg1zNOo68cI7041Uf0To7juw40pdnffExzq2DP3WuGrj+hmJ9FC5GUhQfCYgpL6asGLjEwIPiU0IUAPldIxiqfO1PXdfhYVu2ZKc+QHy73Yax93q94PRE1bL0GAEgT137NBwObbfb2WAweAR0fuxmx0iFpjSYmd3e3trPP/9sw+EwbIP78PBgv/zyi/3pT3+y9+/f22azCedr5Ix66T+kQr/RhdoCzul1ii1AYlzP7keQlDoLdalDaxQFmXhZeNtSQPHg7O1HnR156DVapwdY7Y+OQcEP2ejEA/noRARZqCxVjloHn73tqvzD9cnR/9RuPShhBwr21K0TBj/JYxLiwUx1C4GpfNTmaF8j+LEILalL9EEj2VpvDNhVTopJGin2duPtR4u3qVN697pR+TFWcvzxER2j2qpiV2zy4W1T2+7Kl5WOI78DRx4mW6QHPpUjzSysZHUc2batS+PIw0AfTcy1D+i248jL4shz5Vk3zaBxT8wx59PB40BeKDGC1/PUALXt2J2y9k//xiaWMSDSSJfWmyTthxJjkQf/GSXmeR5yhGNRx7qurSgK2263lud5SFlIkiSscKUiJxxEgcID5ykHT9M0vG9CU8b+8R//0f7pn/7Jrq6u7Obmxn7/+9/bH/7wB/vbv/1b++Mf/2j/9m//Zv/8z/9s79+/b8medki9APD1hi42MfB6ZVzq6EqajIuUBmSr0U2v/xjIKtkoaMTsxgOcBx1IW6NASoaehJSwPHB5AjKzVlqE+onKXknNA59GXbUfPs9fo4fBX9J2vjZ/Y6Dr/Vd9QwnNpxyYxf1Bz9FrNPVBz9Gceu2fRr00f/6UryIbjxEqG7UJ/irRezs/RfgeyNVfT2GrRjxjtqPnqa6V1P0kRvWnOvsUoXTl06XjyG/DkVmWhWufypFBtoeVsY4jL5cjE3dT8Ygjk44jOf8SOfJcebYVLgSmd9FavAN5A411mvpU4b5dPa7EpGDif9fjMdJB6D6CFCMes+PuJyhUDUwdS0FQ3wav/fUGpO0VRREcJ7QtOzEBuqQI4jTqUGbH5Xja1HxWTXvQt9Lvdjv7+PGj3d/f27/+67/a1dWV/fVf/7W9fPkyvC1eSQ07IIqgaS2n5KmA6SOsOgZy5JGFgqhf3vf2kiTJo4iF/qY68CCttqQ2r/32bamTe8flbyyK53P0lVw9Ceo5atMeMCjqU2qvmo+t4Kt2nMpn3m0TI1c/IWNiwQRJZUx92Ig+XI6NMmafMhNLDfARRR23nyAqbiErP2aVh9qrtkXdijvn0qd83XqeTqCQifqrgr72y5+ndXvCiZFHDE8VN3Ry1JUvKx1HfnuOBFvhxqdwJH+pu+PIy+XIWnTE+NCpn/B3HNlxpJZnffHxqU75z96x+F1zT/WOWyd/vj7fZuxc388YcMT6FQMU//I//cwb5+mHkpL2T3/Xfnrlq8zINdcd5MzMGnEOgLZpGttut48iSHqeB3b6rzrAqQECIhh1Xdt8Prd/+Zd/eSRP5AWJePmpAatzKaHoEj7pEXqsrusQqfPRQeSo0SEfdfA6V9ulaH0qN99f/T0G8vpdbU7/Me7YZMqfT0EX+psHCk9EdV0HWRIB9CkCqn99gDTNjs9wAUoqK3Si6Rr0PZZHrfJTW9dVNi8vvxOWBgp8nafk7nFKJ2ZKnip/LWq7jEXtgnMYR2ySEeuXEp0nZ7UHb3candU2Yr6lffff9Xra8+PyeNWVLysdR35bjvQ+8BSOpJ3+oN9a3eo48jI50myfZKQcCe6rnlRWHUdeBkeeK8/64uMYMMQIhUGoE6rR6vlqACpYflcD8NG2GHDSngfyU5MJDzTc5bP0T33eIRWA9KFJNcRerxfy01Ve/h/X4Uzkrh96GK5VR/TpAgoGvV7P8jwPgAJwhsm1fAfQ2VCDdgaDQWunKc03J+JH5EEjEErQCqTeydUuaJN/1EX/yX/X9zroX3VGnaxgN+q0qju1PY1I+gkHMtKJjLbF9fqbjySprSkQaL1KNpwTi86hJ4qSCr8hKyYg3g4YbyC5QxOawqq2ouPhO3aiulDfQa86sarrOuhS9aK2o+Skfqp2SF98HrvKX21NSU8ngYyL8ai+dPLiJ7p6nvZP8dFPnjlPx+ntCHl5+9PrFeO83Wsb6Fl/V1uk6Ng/RShdiZeOI78PR7JJAdd+LUeG0TXWwr2OIy+PIxPs5hBA0Bs3728dR14eR54rz7bC5SMVFC/EGGBnWRaWUP0Svnc0JQbflgKVCpeHbDEWv8zLtTGQaJr27ksc7/V6NplMArBj8DgJMlFQpW5A0Gy/5E+6AQ+3ah/0Rkg/0/c0S1sOpIahzqH18U+NlMijlydGxMPFPjqj9dEvXtTIP0hQ5a1OrKCogEc7mgqC7tAnevEPPapTeQdUB1bH94RHIULoJzUqK3VoxqSg539TgEfWZu13lCiIe9noOQpA1KFEzHX6LhOdwAwGg/ASUvRHn7BVlSuTGD+x0920+v2+rdfrAFia5qCTKSLECmoK4HrzxzMIHqwVeH2EEd/gPK9j3TpYf2f8XF9VVZCZ3nx4/dC24oCfJHnc8ViGTsAqj0u+v2pXHPeTJD+B0rZVXsjck4rW1ZWvKx1HfluOpE7146dw5EFYJydaHUdeBkcyIla4Yhyp11I6jrwMjjxXnj2lUJWsoOMFq0rlO5/5rp/1LhWBqIB8BEbb1N/I3dZIFvUBShptou5erxeADJDY7XbBgTabTWusvFPDExuEw/fZbGb9fj/spKTKph+0w00R4GZmllj8+Rs1LIoHd3RkZq2cYAU2BWWAmTGqoytREU1T4PHGTVEC9KkmnO91q2Sz2WxCNAo50LaXRyyCoXqJHde61Ca9/HBCdc6YYyq48bJOjWgRKeU4kw+iWaoj1TvAqKkl/FNfwIawYYiW9r0f+iiw7uxjZq13saD/sizDTRtgTX95p0lMHh4Y+QcRKUDHQFzlDVGpT/mJKfryabpqI9oXjcByPf1CNt7fkD3X0ieNrsVWjzQ9CYxClkpwKku1PSUmHTu/6e9+3H5Cq89zfIpQunK6dBz5bTkS+epN4ddypE6yVfYdR14eR1bYi/CCmYUbXP8OuY4jL4sjz5VnTSnksypZlaGd9ESDIaqAFCBVAB4Q1DAQtObK4jxme+Pm7dIedFG4dzSz4zKvkgEPbabpcRtaVgnMLOyYtFqtwnclP2QDqBDFpO8qEwXtXq9nKf1Ok0BsOBtyUwDiOiVglqV9rrNGxRRYcQQiKRg6x3TSrvrxEzUdP/rXqAv1aFSQOnUljgek1+u1bbfbkALAeHXsCvrqXLqqpxE1Pc/bn9q6n+B4kGNc6tRJkthwOAykkWVZePZgs9lY0+xfzjmZTGw+n9tqtQqpNR40tA+azqLEpn7ix6IPc2dZ1vIN7EkBB30AhkR0IYnxeBzARx9MBwjRn6Yx0F99R4najU5adrvdI6xAFoAudq+TLsUZHS+yY0yKYdgLNqKyRM7YGXXoZAlfpB21P/UH/xyA2pCOnXoUt3T8Kjcdu07YtI+Kz1q0H568lfy78vml48hvz5HIwm9w8TUcqaXjyMvmyJ6kACpHZllmo9Eo2HjHkZfJkf5cLc+6SyGDM2vfmXsH08iY/o4R0mEcjd8VFGLOrf1QYzGzkI+tkZKY42vkToFTt1Tlr+6IBAHhQDou3lauTq136kpUvOcDoKFO2glgwJhlQsxYe71ey+ExajVET/Aa+SBapFEN1RkGrOBFUbnruRp5hvP5yQAAIABJREFUoy1NG8FZAJRYRKWu60DWOvHjOoBPnUBtxU9ukI8u22+32xB9Qhbk2+u7TbB1BTxAUu2K8eo2swAqkcckSWw+n4cXZ+p7atI0tclkYrPZzMqytNVq1Yqg+ufwNG1ACYWJg+oEnerEQEld8/3ZCne73YU28RUiypCgJ3V0pECkUSH6zTnUrRFI5DkcDlsYgBx0wxeddGJrWqf2CZLzN5e6sqckpO36iY73L/rCJFHloeerLaFHjSwydsVC+qATVMUwrY9x0L4SEL8plp7yF/rZlS8vHUd+e46s6zp6s2T25RyZWHsS1XHk5XLkjvmW2JfKmz53HHmZHHmuPNsKl94ZIhyUqSSgg/JkgHI9wfOZpVraImpAaoIqGyOhLp1MqpPpcjH9Q/lE/NUI2eEIAOOzLifTN1IcMFIUTztEEc0sPDhJ38hP1UgW44MoKPSBdjQPmePIXIlMScBHAWKRbCVa2qqqKuhMJwMAi6ZAAKhch17UKVUPGDO501xDpIi21YmpXx1SbQ/5qKMBskrGRMrQqYKdAgjyihGrghN9xjYeHh7C70VR2GazCe9B45rhcGi///3vg21MJhO7urqysizt/v4+RKBpV4kUu1ZQ0Jtr9IkfrFarR5M8BUQLx/bHR6NRsCGuS9PUNpuNjcfjMFaibQrK6J1JGvKjbxpt11QCzkPPnKOTQ32WQImcCRV6QQbj8ThsP037noST5PjyVNrTvHa1A09amkqh4OwfXgcbsFeupW+eSPis7Wo/GA+RTPUPLT466QmTwm/dDdfXlY4jvz1HchOAfT+JIw+uoxPHjiMvkyPJLjJ309A0Teu9bx1HXiZH+uNanvUZrlhR8FSwUsH7O0lPJihCo2L8w8n1GSQ1PgABw1WjVkD2Y0F4GM1kMnlkyD7dwN8Fa0QFZWIUAMRwOGwRVFVVtl6vw3eVjY8OmJntDlFBooZ3d3eWZVmIcmDUeZ6H/uFo6IbziGDoGDTyoeNTQvJREY3QqRH7CIPKjL4o0CuQaX9wsO12a5vNxjabTdgS2JOnj4bweblc2mazsfv7e1sul5amqY1GIxuPx5bneYgiQiTYigKI6gdbUUBXO0Xeu93OlstlSKFRwmHyQfvYCqkUjDtNU7u6ugppIjpJUp9TnUDK2IK/sfJEgz/hewG4D/n/TKSIbmp6j0adlVC8/yNDbwd8JnLF5ClJkhBZpg/4jk5e1VfwNZ4viUUIPf7QL8UYJoXab7/rl+bEM16izWon6JpJnz53om00TdOajOrEXP8xbsVaZIjtaoSVa7Qt7aMfP7bEuLrydaXjyG/LkUySdrudDYfDJ3Fk6G+WPhpDx5GXyZHWHJ8NpP/YFLjeceTlceQpnDd7phsuGkaRasR0lO90iGiXFw4D1+s96CmxmFkLnCgKZpoL6wGZ6BF1cG6/3w8RIwyv3+8Hp6b+0Whk2+02jI0onbapjkR/AH/u7olEonwFDhSohs7n8Whkd+/ubL1eW7/ft/F43DIg2sPIaUuNT+WqUQ/aR7+a345xaaRIZV3XtW2323B9mqYhMqUPlapRewP2kwcmDByHeCBmQAtHZQwaMWJ8gNBoNLI8z225XNp8Pre//OUvwbmHw6GZmY1GowDy/NMHvj3hIzdSL9DtbrdrRcDMLNRH3waDQejbbrezxWJhWZbZZDIJYFMUha1WK5tMJnZ7e2t/+ctfAqihd78JCjIFiAFgbICXZ9NfxsZY+oN+qEv1DoFoZNPbqurVT2YUA6jfbD/JImKGHnq9/VbN/oWoCqZ+MqoTSvTAzmn4/na7DePk+RVNp0JnmkKjeKV90LQkiAIi84Snz5l4v1I8VfzgXL1OiVPJVH+H9FXGYKaXPQW/RJaxiXdXPq90HPntOVJT3bbb7ZM4Uid4HUdeNkeu+kcuhBN0ou/13nHkZXHkufLkGy6NCJ363QsHB+ez1mN2jLogWE11UIDVO28PvGYWQF2jdHoO0ZTxeNxSclEUwakxeKJlHnD1zhrHUaMGTLQPfhx83+12YecafTiSv610hPogqyxrOZg6r/aFKChF7/T5rsbOeFSHntQ4h/4DNrrETVmv1y1dmh23Otblaq8bxg4JUedoNAoOpLnf/kFljYxxTq/XCw+3TiaTACir1SpE8haLhb148cI+fPhgd3d3ttlsLEmOW6Zrn8Pqj+hfI1YACn2A8Jumsdls9miygQzRyXK5DHXr9r/v3r2zJElsOp3acrls+aIHf/xB7c1sD5Cr1cr6/X54SHm5XIZob4hGpe13yCgJKEl7nyeiVVVVAFeVmdnxoXYmANgFsuD5Dt09SvPytW3GSeoJ6Q7Uh9yxyevraxsMBq08dbVBjplZa3z6N8v2D0nrBI+CDQ6Hw2AXSsKaRsP5yEwx00dhfdRRJ+CKP/gEstNrFDuxFb5T0BN994TTlU+XjiO/D0fS9+Fw+CwcaWZWFqX1JZLeceTlcWQlr03huUduBHUnyI4jL5Mjz5Vne4ZLQZZOKBBqbqgORDusd/Qa/dJzOOaJA6UpgNEn+qUgqIRVFIWNRqNWHd5JsiwLu8soWI9GowD0AGuSJMFYtF1IRSN49FEVTnRFwZBrkyQJ0QYzs94BvDQaQV+IRnAM0FUZ6vlqvCxRIy+vB43KcY0Sri5Hx/QOIABQ2BBGixMQiUS2RLaqqgp9HI1GtlgsHkVpcW4loRjBDAYDW61WNhqNbDqdBnLu9Xr2008/2Xq9DmTy+vVre/v2rS0WCzOzkNe9Xq8DoPzhD3+wpmns3bt31u/37erqygaDQbA3ojaAAgTloyUKekp8RJiaprFff/3VfvrpJ7u6urL379+3InLYIXWVZRns9ePHjzadTlvECyBNJpOgG+9n1IvPKCjqBK+u69YD/ZATclX/U3vyPu8ncDqx0QmP2hg4QqqBkgNRSraoxifwA8aH3fi6+K4RTaLu2HtsckmUTycJ+h4RJX+N4NE2RX1bZaMRf9WXTqKxHerw2yhjJ0rSiqHdzdbXl44jvz1HIhPs/ikcqbsCI+eOIy+TIwOfNO3nIH3fOo7sONKXZ3+GC6FzzAO+Cm00GrUGh/I4FzDDUHWJWonK3zzojQPnUK9uw0ndGMR0Og2K5Px+vx9SIqqqstFoZPP53Mbjcegn48D4WbrWu+jxeBzGzZ38arWyq6urICOWhxkLhoTiWeYn79fMrKr257x48SLka19fX5uZhQiHgj594nqcSUmBPqphamoH49bxeeJQWaAPBVNkT0oFS8oYPjLMsv0Knjqu9gcnoj0ciogZoKaRGyVMiBs5syRO7jEOrw+Gz2az8NArD5SSI7/ZbOyPf/yjvX792v7+7/8+1Ec9jI+b5qZpWnpnzJpLXJalrdfrkBqDvADXP//5zyEKiK8AzJ6ssdOrq6sgC6JXu93ORqNRS0ecHyZztk9f0Bt/7EztgoibkqdO9nTiwT+uZyJH3TrB0joVX7AP+ooM0CHPp9Auk0P6MhgMgmwBfZ0QK4F4EqLPfvIKAWs0USOE1KVkzUQTn/U+pUTLNZzDOGM3yPgq9Xp/BifN2mkRGhVUXOzKl5eOI78tRy6XyyDrp3JkfZBlv3fc6a3jyMvkyJQb6qT9ugLS7PR5qo4jL48jOSdWzt5wJUnyP5nZ/9s0zcdPnBeEiQAU5PU8vSPUu0glAr88icAV5BGYKoKl0abZP0QJkCghKLDRPyUpVRIPHRK1U0IqiiIAro4d4WNsOIGmM1DHZrNpRTeIyCgJKKi0DO+wMXxVV8G5cHj6iSx5KFHBg6VPTW1QQMBo1DEZG/ngGsGjPY2gUDR6RnQOXWiO8GazsZubGxuNRrZcLkPf/C5bgEuvt38nB9EglpiJ2ul4iIBgE0Rt+b3X69nV1ZXN5/MwxvV6HWyTdhaLhZVlGVIJ7u/vW3ZdFIX98ssvdn19bUVR2PX1tS2Xy+DkSZLYZrNp2YYCKTbkozAQL3KAeLGH+/v7lt0BZuhQbU8jYLormPqqRl/b0dtjHjQTAAVUfUAYIlJSQv5+EqhtEcHC5zQyroBOe2pbukpA24o/vAuIcTMhASewY/qrslL5EjXWl7EqrlAfPq0EyVixYyaFOhGC3LV9xQAlcIrWrfqmfcVlPxnUqKHaDbL09tiVfek48j8vR+rYzexJHNnUxw0rwPKOIy+TI0kaU1+AD/W5uI4jO4705ewNV9M0/3D26sfnB8XrnbqSDY6jEQV9QFHP95EmwIx/SiwIOM/zsJTe7/dtMpm0AASQ4a5ac2N1+ZD8dDMLoD+ZTMzMbDabhaXxJEmCUWFsLMkCmDzcOBqNWsDIg45ZltnNzY3N5/NAhGYWHiBVg8PQY3fR9EXfXQEwakQAoDBrvzsEkFIDVqNXwMCwcDrVCf3DadXx0SvpIoD2YrEIKyeApC6rowvkQz951giwB6j9jaTmuhPFgXxJRyA3HT1ix6PRyNbrdZAtADYYDGy5XNp4PA7HSLf49ddfw4OnmgaELbJbFNvdAs5pmgY7QY9ElubzuU2n0xZh4AMKhvgeYKhL/ICaAgdki140Eq7pPWYWonu0RySa39nKt9frBV+kTnzePzBPQUa0qzrQ70RWsWMPlGYWJhuAvfpPnufheFHs36XDDmvYAv2hHYhAfU0f3FbC0xe74iv6YDB1a/oScsQmaE/JkXGqH+vEQ/EVW9ebZfV1H2TR37mG9hUTPIFdeuk48j8vR3JDqT7wtRzJewiRaceRl8uR+mY31QHy0zF0HHl5HHmuPHtKoZKCvynw3zFkPpu1X8CrwMdgYiktOnicBqWRy212fKu97yeOosLUO3VPAoA2AKLRDRwLA+Rf0zQ2mUyCsRI1RGGawtHv98OyNG2orNL0+CLGxNqpJUQA8jwPedt8JzrHuIkSqL58tEKNWMlFo45cpwSGrum/RlnMLORGL5dLe3h4sMlk0kqVVGdV0KzrY65wWe7zrXu9Xoj2AfyQsI6Dh5KVFHFy5IpzbrfboI9er2fT6dT6/X4AIjYQePnypY1GI3t4eLD5fB4e0CYqiSMiI6K+9Im6lWwBIAWf8XjcyonXGyT6jGz8iy/V1hkr+tKorAKYgjNjMrMwQdhutyHioySGbdN2nufB5plkoQudiNAfiJ7+47MQsJ+M4jsQEBMNxgqZp+nxAWfGyEQmyzK7vr4O2yerDaK7NE3DhHC9XltVVbZarez6+joQKH2jH+hPI8v4DXLBzsEafSeLJwbkq+PXvqq9gBXYBgW56oRE2/N4jM6p41OE0pXTpePIb8uRTESR/VM40hzudBx5uRxpgoHYOoEBfAnu6Tjy8jjyXHmWGy4FD777zqEQCsDDwLjD9aCkgKZgp+0BrDiGX8LXKBCEpAQFqAN4GrUhegLwaiRJI1YsX6vTNk0THjQGFDEc6qAtjEqXwwFTcnKJdBIRM9tHEuu7ZSA8opBMmmkL44Fs+U11h+HpMwYYvoKcRgEYl5KwtqNEqHKA2IqisJubm5CzO51OLcsye3h4CO1TF9cBplVVBdCfTCa2Wq0C0X/48KFFtn6yg61sNpsQaWUpnTp1uZ5xoX/0zZa55DoTcSUffTqdBv1VVRWiyRqxpm8KAEqmukTPGDimuizL0l68eBH6PhwOQ194aSTAoJMfCENTOuq6Dtvx7nY7q5v2ygbj88v2mpakaT96I6eRRp0wMl7dKUojj5zXNM2jVCVIx2MMdWr0lEgsflYURSBgbVttBv9irESaqRvfTZIkkI6ZtUhQJzikvphZ0As6VkBXn1Ic9asXyAHd+/PVnxWXaVeLTuzBZz8Z6cqXlY4jvz1HMlkfj8et1bOv4ciAzXXHkR1HHvuiHKmrHMpjHUdeFkeeC0o+y7bwDBZjpMNq/PzTTiIQFQ5RD8DSL7VqZCk2eAxQCYCldBxYl/c1ymF2fImfOhVOQ92Mg3qU2FgJMLNAGDgrRsndvo6fvkKmXAuhKYHu/x5fDofBILfJZBIIgaVlHwFUQtXIHL8BmMg3BshcQ/t855jqlnpYWmZHI8aJvtfrtY3H4+BU6CZJkrCpA3aCTHa7XXgHB3XQR40EaVQWGeN8gDDpD4Ae9tjr9ez6+trevn0bIiVqFxAcOgXEFeRWq1VoDzvxfeVcjWQxRh03RW1CCWu1WrX0QloJzyZomgFL9ZoSwLgDkBxMnBVV/AhfpO+MCzmXZWmTySQAftM0tlgsWs99aGQrSZLWREpJAMJXPNCxA9T8pZ9q+5AmMmXM2+3WBoNBSJvQSSGEw3mKERqBVPImioq+ISn1ZWSnkzr6iq4VayBjxSD8DFloNA9f1O86eY/ZEW0hA+qn6MShK59XOo78Xhx5fFEuUfmv5UiTCZ7eGHYceXkcmWVHjFWOBK/By44jL5Mjz5VnCVfSEYShgtJOck6WHbeDNTuSgUbUFKiVQPx3PYbhaGREz8FguWvWPFnAWCN9+lJHnBiA5E4e4+Y6CFSdjnFTD0BPu54gMZqiKML7T3RZvKqqQAC7A1HQnhI1ctxsNmH5ViNRAAVyQgc4LoSiUQqNEGhURclE7YL+0naSJIEgkR2RKF0KJspFpAZgQgbYCnJ/eHiw6XRq0+k0bEOLnWEfOklgskOf0zQNBEedi8UiOODNzU2IDtLnoijCzkfIXseJnbDNLORP6o2+s0H7yTmQD/UAgrPZLDwPQQpFDCj0BlpvoBRwFGyJNHENUWlkYmbWOwApdgogT6dTm8/nZmYhL520gfF43HoomlQFJgZFUYTUAfwF/8JudXzj8dg2m00rGob8scuY7VZVZePx2K6ursIzJ1qH2jj2gS551kVthEmD2i6EjX+jS8UtlZ3ZcVc4TY9SvMKH6BORax9VRpcK/vSLvz5CqxNptVsf+cZHPJ535fNKx5HfgSPNQj+egyMP2uk48tI5cpDjqGZ23CEUnTFu7LTjyI4jKU++4WLQ6qQ0qoNCWTgHoKUdVuD3RetUxekdKHWgFKIvV1dXtl6vw3kYEMrA0QBJjIWxKWhyjYIThq5pBRAWfVXFmx23p2Ss2oZGLhmXEkRRFOEZLmSDAWiuuy7HaiqQRmNoH+PEgQEMXe5nnKofJXlPdKojnMsv+Q+Hw0CuRN806sbYAE/a4XpIh12bkmSfoqDv26B9xseNBWMhjxkC9ZGoLNtvEfvw8BAiNNg5xJIk+yXw6+vrMAlBx9jfYrFopcB4+8AG8AvSN3a7XRgHBLZcLkMfyJXX6DTAqxEgQDPPc3t4eAjncR2gNxgMwkPsPnI7zPOQKoBMeTB4uVy2dI+dMy70PRqNQhuqnyRJWvnZyIUxQMZMPoiI62RPbRRbIt0D/y6KIuhWVwsUoCEBHQPnE8VTIKcdZK2+rH5GO7ShKWJq2/iOTojLsgzRQfqhvqx+o5NVxRd+17oZv+qCgo3p9658Wek48vtwJH6iq0hP4Ugzs+owme448nI5shCMZ2MWdAivcXPUceTlceS58iwphTSK8pRgfKQAY9dlSwU2lh+pl4Hyu5KN3rVTL2ChS7lmx7QD8oo1UqYAiLPHiIpzMAQMhn5p9EmvgQw0GnDquBKuRreyLAtAt297X8d4NLJSdmPCsBkDTshxZEWbamRECNTIbm5u7O7uznq9nt3d3YVzNTqm0TutnygYY63rfSoH4KjEojoA2DSvHtJnXLzEcLlc2mw2syRJbLFYhNSJFy9ehIgRpMpkoCzLEDki/5qIoUY/0APfb29vrSgKu7u7a+V2QzzUD0g2TROO8zCx10GStHf/UpkwXo0OQliDwf5FlCznQwrogLQIjfiQpkAOOfpL0zTUxVin06mZWfBRBUn0Sp9JBSAKBYlxHhE+zieXXwFZdzxjQkD7+MByuQwTRcUHyBB7h2Swb7VnJUh8z0c5zY7Eq22hT01XgLywE3SmPoXvYRMU+q6RScavBKMROB2PTjy0eJ/jfMal44eA9IaAttT3FJu68mWl48jvw5Hc3CVJEnaz+1qOpFR11eLNjiMvjyPVznWTCfTVceRlc6Se48uzPQGNILVRjJBNHnjoTvN+EYoKTw2agSmZ+MiOWTtnmrY5DmgQxer3+y2lI3CiNrpEyWf+cievUSx1diUr7SN9A9wYK7JhdxsiXACQAi0RFDML29QCDhiutsmKBQ8RsxuNGj0Go2SMnIbDoY3HY7u7u7PZbGYfPnwIwED0C115+Sm5Uj/99QBMHyAQ3XkJ4CVqpNHFyWRid3d3Np1OA8ET2YKMiaSpcyNbxkg0FyDRaAyg/Pbt20AUpDoANER6iLTpw6I8bAqwcr6SJ8c1NUCBR+2ccRCtpO7xeBxkjn6IhpGecHV1FVIGsGFeSkm0W6OJCnzqT/yeJEnYAStN0xClJI+c9+hAvETMIB8ikGbHB2HR/3A4DHn2mgJSlmVIF1mv10EeyA5/VP+BUElb2Ww2wb704Xf1ac1x18iv2ozqCV9WfVGvn3RqnzQyzvg9fvgJO/3iZaf4DxihbetknOuUEHTir+coNioWq0105ctKx5HfmCNlwslE/6s5UrAYXO848jI5Evuypr36qTc2+HvHkZfHkefKs24LH4scoQiN1NV1bYvForVsqBET7TzLzr3e/v0RAJa2RfvcXXPToH0AkGJgj+DMLLyjYjqdtu5y1bEwam94GAYKwYk1WgiRMlZvyF6hOKreSZtZK6WQgsMjU81FTtM0bNep+eBKdDiM9oM+j8djm81mtlgsAnCpYevyP47sJ++67M1fTW1R+aITyDvPc7u5uQlAwIOZ6BtSxvnfv39v19fX4aFTnVwACrPZLIDAbDYLOc/sPMQ7QsyO271CxAr62CR2x8spGZfaiZIBgER0EXny/gtsj6gVETXsbTQa2Wq1sjzP7fr6OshPJ2HqA+Px2JqmCeRBtJKo6mq1CjYACWADOmnQSCo6apr9w+68FPP6+tpWq1Ug5Lquw65HfqLmCbZpGvv48WPQMfZ4c3MTiIMdkUiBwBaURJUMyJPnGQPO2Ww2rZQdzqdeZMCKAxMtdrXCb8ys9RA9kx/GoxNc1Q/PXNAO0UH1Z1YkeLicKKh+Vj35G2V+10k6GKYEE7vB1uv8OV35stJx5LflSJ8mSB1fw5GUujpGuzuOvEyO1FVa5UiPyR1HXiZHnivPti28grIOQKNPLPkreBAF0vd8oFQIQXcQ0vZwbF1W1CVF3d0Hw6NPCpb0F0cCsABfzqFfvd5+lx8igdvtNkTKNEpDferQZhYcgndhAIS6NKwpFIyR/vptunUyjKz0oWhtU7cK1cghE+mqqsKOM0mS2Hq9tru7O7u+vraff/7Zfv31V5tOpyHiVdf7h4f9g5v0geiRkrJGN4iqcJ0+HEn/ifq8fv3aVquVffz4MURi1JF4UBTy1iikAh+AB4g3zfEBZLW5Fy9e2HA4DC/bxPaIFs7n80eRECJTq9Wq9RAt6QmAOdFrjdqoPjWqRcTNzEK6SVEU9jd/8zf222+/hQeMqZs0BwUCIomQ63A4tNvbW/uP//iP0I72kVQGyL6x9u5pRNnW67UNBoMQUR0Oh/bbb7+FXZc2m41Np1MbjUatyDh56Ni8Eiq/F8V+O2Te61KWpV1fX4frJpNJ+AxRaxoLxKBEvN1ubTqd2nq9Dg8FI2/sWSc/+ryCgj/1xSYqHqxjmMhxfAZ5aJQUX9Q8dsUCzvF1M2Zkq3KmnzoRVDuJTdDVLj9FKF2Jl44jvz1H6qT3qRzJC9/RXceRl8uRzeiwfbgdV1vhHfWTjiM7jvTlWW64NAKFg2hUSIFR75J5+/hyuQxgQue5+0Z4XI/y+Y36VAiqEE3J0CiKLuUjKLNjXu5yuQzpeiiaOngvxWQyaQGF3pWbHVM1fPqFEoNG4AB5vZYxECUgQsh5GDd1QWqMib5g2ETeyrIMoIPBbbdbWywWwYjK8vgA4ps3bwKIAPDkyPOAKLJSp9YUA84hKqMOQI55v98PYE8fiKRqigFgolGoNE3DFrKvXr16lCcOCfz666+hferTyYg+DEp/yWnHcXkRo+Zwm1kgaICqLEu7ubmxjx8/htQLTZdhRQnZoXNepDgcDm06ndpsNrP5fG7T6dS22629efMmRMhWq1V4mJkJgE7cFAx7vf0D8tgC/QeQAUH8OvjcYUUV0Oe33W4XZP3mzZtgh0QVx+Nx8A2VaZoec+KxFY1esm0z/tg0jf32229WVZXd3t62Ipx5nrfkDjFoG4yV8eouXUxMFbPwG8iESQYEgt2qfzF5mc1moS58RSd6RAZVvpoGhR9g/3oTrJMQJS6NfCJHjfiDIdiYkojiN332qyiKOV358tJx5LfnSH3PD/L/Wo7knPFkbHZYIeg48jI5ssQeJcCg9szKaceRl8mR53jyWTbN4I5bj/nIGMdQAMvELBkDBhiXChlDVABAIAhBDcHn/+qSK+2og2jfVdlFUdjV1VWIwIzH4xDNu7m5seVyGYiEvmlkUJXFDQ3GAOhrXi0P9uodfsvR5eaKUjsC5XfAl0gYMtCIAVEjZH19fR1yjJNk/1Bmr9ezH374wR4eHuzu7s4Gg0ErugBIA9SLxSJcR9SJ+rMsCw6NLKjPzGw+n9tgMLDpdBp0myRJAGbGw3hHo5G9f//e1uu1vX79OshzNpuFvqEPcrb7/f07WVjunk6nYZlbSQ/w6ff7dn19HW5MAe/ZbGY3NzdhNz/smgkIBLPb7V/yiEywe/QPOTBWTQcws5D/PZlMwvsveFiWfH7SH8jP14kX9TKJQHc3Nzf28PBgi8XCXr16FaJ+TOIARmxkMpns5X6wJeyZyCipB/gJxIWu1e90IgjR8fD2ZDIJPoPfECVl8ok9MHb6qFFwMwuEMxwOQ8QdWafpPp9+u92GZwrwCX1+RSfAikv0HfLSSTR4pA+A43dgIf1EV5AWtkr0k8kR/1S3OlHVyTL90THQP45xLmPW6zVqp7+Du135stJx5PfhSE1DQgZfy5HI7vWr19bf/a7jyAuI4zY6AAAgAElEQVTmyI/5ngvTg3yxEVJxGWPHkZfJkefKs6UU6t0kAgNQUC531Wbt3URQJMbOgMza70cgGkJdYRCyNBj7x4OJPpKm4K93rPTBrJ0HjjLI5QU4NeqvJKW5o/yGIxINYOyMjfQ+UgQwBpz5keyT445QRPZ4UFD1gMEBfIxd+0CUQ0Gedm9vbwMwmO2jSkQyRqNRiGyxaxT6yrIsPCCLc43H45AWgX6IUqRpGtIMzCw8m8BnXerHCc0sRLDqura/+qu/CqBNSormLk+nU8uyzFarlV1fX1vTNCHKU9f7t6yjp4eHh2DjODq2St9IQ9Bt1G9vb20+nwfQJnKJP2B3ClyQrdoEhMzWw7/99luIHK/Xa7u9vQ2A9vLlywDygO1ms7HVatWyz/v7+7BF7Xq9tslkYjc3N/bLL7+E9BV8BQAMUZzq+A4PjhdFYR8+fAj2ANAyVkBbo+4AFPaEDrJsv71wr9ezxWIRUofG43HISecZkjRNbTKZBCLCzna7XcjFB9Rns5nd39/ber0Ozy0URWGvXr0K6TBgg4/8E3UkyqdRN8UKfJ9z1O88jlD8qoMSk5IO7emEGDvUibWPripGolP1fbBPU0zAX43yeXzuypeVjiO/PUfSb+3z13Iksu44suNIuDAxa3Gkpjx2HHm5HKl1+vLkGy6AA6XTEb0b5DMDZ2kXA0WBOI/Z8cVw1N80TSAMjW4BoBrBY5ceoi3cJeMACA8FKsGYWYiEaLSfKFSv17P7+/tgsP1+vxVBgfjou0ZRIEb6C6ABGCgcY1TA1XzjXq9nabIf6x//9//Vmqr9bgCNlPJdjUdLkiSWWGKNNY+uQTc6SaAN1YPq+FXdmCXWqpO+JunxQV/qNjOr09SSprHZoZ0mOW4JkjaNNYe+7JLEsrq2cdPY7iDT2UEuVZJY71DfXOwQZ12mqa0SyeM3s6Rp7L2LZvB7laS2To/L0N6J7l10ViMs6zS13cEGdmlmVX2wqzS1uqosaRobiJxJ3Omr7BszS5hMpLbK9vIdH3xg0DTWbxprssxeH8ip3+ubJWY5QJIkljeN9SQVoElTm1SVFVlmo6qy/1YUluZDq9PEXu6K0KYHo+xqjMG0dntSsFTQ6fV6Np/Pw4SE1I3VamVNc3w2Av9iguCJoq73zz+QTkIkMfjBgSzQG9FW/G0ymdhsNrPJZBKImIeAN5uN/fLLL3Z7exvSHZj8EP3WiR/4tlqtQtoQvqx2gP8TfVcQ1okj+EfEk75BJqoHjepT8EfIG8LyBOhTHXQir36iBKVtKMGAZV35/NJx5PfhSOSMrzPuXq/X+h2fYRLGjQ39TISP/uF/+d8srQqz5oDTiePIJMKR1pg1Bx1bY01dh5fmmu25Mjm85iVN0ihHpgf8r+va0iTdn3+AAniW8dRVbY0dbjQbs/LvjnJpmn1fev1eaLtuamvqwyrroR91Ve/HVh828rDGmgO3W2NWN3Xgdvgt2RN/6FeSJmF+UTdtjkyTwyppXVmWplbVdZBRVVctvNINwsKxw1xC5xlZevCBqtzL0Y433GVx2JCm3wv9Qe5N3VhVV2Gegg/Sv6Io7P/Oc0uT1Hb/w86a9LhTIyuEGgDA7juOvEyO/O96w2V2TE/Qu08a1jt8jXwAsv7OlvQAjIQ6EABGpQNjiZsHLbkeYCWawR0+USSExfmMgwihph/M5/OwbA4pqBMRkcE5dOxmx7tnlskx6CzLwrsgrq6uggERKVIi1PQSdDr8/evnUOF3KYn7HJvK6TmHexDF9NY1nPv4rQvxY2Zm5Ynj/nyflXsqS5e+Ua/WX8k5MZc8d4z+ZJFzvRPrwnYS+T2Tv3xuIufFyjDPbddrby2ruxvd3NxYmqb26tUre/fuXXiR5Xa7DekU+MJisWiBY7/fD9FMoq28KJKiz1MMBoNQP30hVWW9Xtt0Og3pSHd3d2GLXQiOrWt5XoJIZ13XIcILhoA/WZaF6DKkSJ/YJMBH9jTYAQ4R3dfJJpikMuF8sERXG8yOE3ewleN5noeJtJKTTrwgIg2yaGRP+0X7bNzQlS8rHUd+e46kv0TSWU1L0zTsJMjzKqSn8dwKKVYhze4w8Vte//jtjKYr/2nKPHIszbLwnBo+yUoTONlx5OVxpK7O+fLkGy4GQO4sxqLLwXSSu8SyLEPuLxECs70xsWTaNE3YtWW5XIb8dB6URHhFUYS7eiKAZu2IIZ+plzthTT8gmrharUIkbT6fh8gFD9LtdrtAaIwR59KlR+7geWP4fD4PxHa8aUrCzjoomPc/MD5IFlmRw/rL//F/WX8w2G+H2u+bNY3dP9wfDDw/EO/QsjSz2dXsEGEs7P3732w4HFk+GFhyIJbisNtNfRjPcJjberUOOfcsvRdlaVm6fylfY7Z/aLrXs7Iq7erq2nq9XtjmtK4qG0/Gltjh4Ubbr8CMRiPb7nZWFoXNrma23e6sqkqbTvbOu9lurGmOL5/bbrf7nZUOW/WORiNbHyI8TV1bdnDe6hANyXqZNfWemFfrtdWH9Irtbmv1YSVwu9uF3Xvevn1rvV5mk8nUBoO+NY3Z/OHBBnlu0+nEhvnQ3n94b3XdhBz45WFjkeFoZGb7aF14QLkqrWnMRqOhPTw82HK5sqauraprGw2Htit2VhSlrdcrK3aFNWaWARJmlqbHSVFV1VZVpQ36A9sVO7u/fwhRryxN7er6ysqytFcvX5mZ2ce7uyCXXVFYWRbW7x1y4Q/2OR6NbL5Y2NXVlQ0GA1suFnufSo7bxC4WC8vS1Hr9npXFPqpVVpXl+cDqzR4A8TV84Pr6upWmsVwug00XRRFy3bEnUj6Kogg7mJHbv16vbblc2s3NjSVJYnd3d7ZarcLuUxoZJC2C6FpVVeFdOK9fv7bNZmNpun9QuNfrBT82O0bNmACORqPgYzw/AZEC6OALmKMTaCZvyBHsAMhJVdIHy4lmssmAYhOkB4YxYWcSToSxruvQpkbmwFwf/UdnEDD4QoHkOFcJpytfXjqO/D4cyaYCi8UibBpwf38fngHS3c24yWQHOW7W+Ps//8P/advyGAXPh0Nbr1dHjkwzG+QDK4vS0iy18Wjf5z0/96wsK7s+pIHBkVVd2WQ8CX1HZ6PxyHbbXXg+brvdWllVNj1wFjjiOXItHMkqCPJJksTKA5dkvZ41TW1Xs/3z2lV94Mjtzup6v+qw27Y5Muv1bDqZ2GCw39784eHB8jy3yXRqwzy39x8+WFPXgSMXi4UlaWKj4SjorixLm0wnVpWHTS9GI3t4eLDVYUMYVizZAZJUR7PDCl/N6lZqxW5nWa9ndVVZWVU26PdtVxT2cH8vO0umdn21DwC8fPXSzMzuPu45sm5qK3ZF8MWi3K9a4lML4cjFYmHD0dASa3NkL2m/V44gA3zUcWTHkb48yw2XbtfI4HAys3a+Jh3H4DAslKxv4t5sNuE9EDxUeHd3F87TAQLcLBNqbqUuy3JHjuABX4AEZdJ38tqHw6GtVqsQXaA+riFvvWma4HhJkth8PrfXr19bnufhAVSMcbfb2YcPH8LuQ7r8m2VZ68FHriGvuFnvLG1SW3+4D1GLq94+z7tvme3KypbvPu6NZL0LkYxX4/0kvV5t9zdQo0P6RLK/2ZnP57YhIjAaWdk0Nr25sc3DUVfp9vCOkm1paWWWVZVtPz5YOhpZvVrb9hAlucnHliRmSb1PP8kqs3q5sdUhP7pfNFZuS9uuVrYr91vW9svDsm6xteFgYE3Z2KBsLEv7tix29vDwW0i3ybLMfvjxxz2BlY0Vm6UNDy+fHFlm5Xq9f9fHtrRJ2rer2z3BrJvU+k1q1ba02+HENpuNzd/8Fh5G7hW1DbLGBqXZdj23frF39mq4j34WD/uI0rDZ33zeXN/YmzdvLK8Ty6rE/vznP4dJ02g0svKQvlMvN5ZUlSVFYZOkb4tie7Czvb2XB0Cd5rkt53t5v3rxwsqytMWmsNvhJADNdDyxzf1yP1F5+37//pUP90fbbhqzqrZitX/B5WCwf6nju3fvbJim1iw31hS13Y72D3J//PjRNofta+vl2tJezzbVIa+9qqwuS7PKrJemVpodb34PD4+/fv06gBwTJgCx1+uFiLGmUdFXMwuTS8B2uVzar7/+anmehx2jFotFmJwBcmxJTMF/2OQD3+RZAk0bUrwwsxCJY4IKSbGKAMEQWSe6yAPQRP118wImoOBO0zQBC5bLpY1GI7s73CgzKSH9g755wIfQkKOmQUB64JimSyj5K8Yp7mlahf5GFLBLKfzy0nHk9+FIbpaKogj+TmoU8mQSx8rWaDSyH3/8sYUTZVnaJMts2k9ss9nafD633Tyx/uH8ptlvsLBZPdgwy6yf9S2vD5s81FvrVdU+ZW3x0fLRyKrtysrl0pKmsevJHluSQyQ9SytLtkvb3t/bdDi0KytsXW9ttV5ZL9lPfnM7vKbjkIa2TkobJ6VN89SWZWHrDw9WkZKaZfbj7YEjq9LK9d5ePnz8YP2eWble2+ubGyvLrV0NUru6ut1zZNZYv9dYVW2tP9unlm0+vLH0wJFmhQ2TzKZJacVybddW2Ga3scFm/3zadr1/rmuU7W+Qb26u7c2bNzZLayvTyv785z/bwuFwnueW7JbWryorysImg8QWu/3E1urjLnf9ft9u8tyWy7llWWYvbg8cWW1sNhseOXI6ts3q3saDgVUf3trNzY3dzz8Eu8ubxqqmsnK9stxx5Isstf5uaQMr7OZqFDiyYIv37f69X9uq/Vw9E3/spuPIy+NIromVJ99wAXaAPXeldFiX6RAsOaa6vMq5kAlgy4Oa3LlzB43QmHizlLvZbMKOLUSxMBwMgJx2lIPB8mZuSIelVoxHH3RUMkLhjJsoATvp6AsBs2z/QOarV69ClIKlW+7iieJBIkQYiPgQjeBhZM05J3ownU7D7kA8aFyW+zeQk2PPmMbjcdj9aDab2XQ6tX//938P73MgcjYY7N8pofUSITTbO+54PG5tqwuQMC5eDInRrtfrsMxMfwArokI8BFrXtd3e3trd3V2oi3dm1HVtv/zyi1VVZa9evQrtEhnabrdhwqEP7w4Gg9C/N2/e2G63s59//tnu7/c3stPp1FarVYi8JUliV1dXZmZBrsjn/fv3Ybl9NpvZbrezt2/fBlDZrx4O7erqKqTL8GJGorjYAw/A/vjjj7Zer0MkkwmW5m7zcsGffvrJ/vSnP4WoEb6SZZn98MMPtjus7JlZAG7sCZDmZpYomeY56/I+edvY9cePH0PEiAja5BAR1QfLIZjVahXy1pEtOsev9RiEALjxDpfZbBYmT7rjErpmInp/f98aAy8pXa/XoS+bzSakfkAM5KuT9gQgM3aiqeAV+IKcvO6xeUgObEM2ZhbwAJl53KJoGgP9oy10pn+J4ClGc76vC//U38DxrnxZ6Tiy48iOIzuO7DjyMjjyXHmWbeFRNh3X5Tq94+c87j4BosViEUDbzMJL17wx9Xq9AM4QFFEsgJ/VHhSOMQN45KLSd66DKHSZPkn2O+mYWdiVJ01T+/DhQzDu29tbe//+/V6YveOWliiOupEDeeOAObnjRAfY5agoirCNLHf0P/zwQ9hFhqV2HIc7a+TO9p6QbFmW9vDwYOv1Oiwxm1nY/tU7y6tXrwKoqQMkyXHXI30Ikd1vdEmcyRnPBZA3T14wEwK2mkUOuq1umqa2XC73y/qH80gVmc1mtt1u7eHhwTabjf3ud78Lu2JVVRV25kEub968CWQCMLEDEc7+8uVLu7q6so8fP9rHjx/t559/ttlsFlKC0D85zGxZy1I9bZuZvXz5MkSDAAPsGCJBpkRIqAd/QvcvDitdb9++tbIsQ9RoPp+H96f97ne/sx9++MHm83mILKOL5XIZHjx/+/ZtSCcAJOq6DikIkAokzWSMyQ72ohHy+/v7MFHBx66urkKO9Gw2CwRAqsr9/X2YOBDdK4rCJpOJvXv3LpDW3d2djcdju76+DlvAYpek07AKoGkJROrwBdKGWBngYXy2CWbChB+DZ2b7CScpT9iqvkcGO9MIINcTrdM0Kgr64TcAnMk5pIa/IXclGNJXwEoIh3rBQSb0mi6BD2KDtMHvGt3EP7vyZaXjyI4jO47sOLLjyMvgyHPlyTdcdIwlxabZb13p7xIpAIwuYZKTPRgMgmNnWWaLxSJcA2mQ3gAh4XQYA/ml/KZEROQJo93tdmF7TYB+vV6HO2mMGYUSxXn9+rX99ttvIb+VLUUhApaIIbymaQJp7nY7u76+tlevXlnTNDafz0N/9X0c+q6IqqpChAaQWCwW9vLlS/vpp5+srmt79+5d+M0/IEnure5ARfSHOquqCvKm/6SmECXbbv9/9t6s140sudpeyXlmcj7nSCqpym4DbTQM3xrwT7ONegH7T7z/xlffVRuw4X7bVa6uKpV0Rs5MzmSS3wX7CQbVbXXJpb4pZQKCjnTIHPaOiBW59orYG9M1EwAATILNcrk0wGKcGSeuw3z6LjYECFhBbANmJZ1Oq9lsKpVKqV6v63A46He/+51evHhhS+YA02g00vPnzzUYDBSGoQELwH08njTo9Xr9QqoThqG1ks3n8+p0OorjWJPJRIXCaXNFAMonQa1WS+Px2Gzt9vZWlUpFj4+PmkwmxqRhUxSx5nI5C9ip1GmjyHq9bvNAYGY8J5OJyuWyMXZxHFtgzmaz2mw26na7xrhyENAymYxtVooGG5/DH/gs7BF/40NIIYIgsOBO8e3NzY3u7u4sCZHODBMJFgGS9r/T6VSHw0Gz2cwkPyR5+F0YhhcAwbN4xouxlWRBkM/zTL6OhPHd7XYaDocmywLQeWZ8Ab9gg1EYUnTvqVTKWHs/hqxEkFASJyQZYOPzXDMIAi0WC5O4kGgSF7ElxtWzcDw/8+tjEd/nc5wDG+Fn5kyS+SiAlBwffiQYmWBkgpEJRiYY+Wlg5J9VUijJ3holWQEbb9LchH/rZEJ5u8S4YVTy+bzm87kFGRgF3y7TD5rXa/oHZ7D5u1Kp2Bs37Bg/AxS8mR+PR5MjjEYjpdPnzisY/Ww2M+0pS7gAgSRbnvbANR6P9fT0pGfPnlkQ4DnjOFar1TJ9qzc2AHS5XBqwcL10Oq3r62u9fv1a9Xpd9Xrd2JrxeGxzUS6XDfwKhYIFeMaAQME8NptNAyVAHH19JpNRvV63+/D7ePi5IHAQeGezmY0hQESQRBO82WwuJAQADcEDpoMAP5vNDARwxMVioaurKwussH6r1UqpVEqtVstkNTwPy/qwIYC/l7uwHI9dzedzYyVJPDKZc1tjlsMJNLVazVqvem01YweziX1LZ10y44mWm2SAIun5fK4wDJXL5TQajSTJgly9Xtd4PDZ7QbJQLBY1nU4VhqEFR8+qEsiKxaKy2VOXImofeEZJur6+VhRFxrLCgMPcITNBugCT2Gg0TBKFfSAVgV2rVCqKokiLxUL1et2KfIkL3C8gQGDlGSaTid3ner229rL41Xw+N+DE3ulgRLLL+fE9ZGK1Wk1xHOvx8dFswBdD+9joWTviHhII/BuGMAxDY+kJ+gAw88p3iYOAALYM6JPU+gOmzvu3Z949C4gP8r3k+PAjwcgEIxOMTDAywcifP0Z68uzd46O8cDGAGI0HZwI5N8zDEijRbPIZjPl4PNoSK+yHX0L27CBOxSQXCgX7DM4gyd6EPait1+sLrSmDztKwdG6py7/R1RP82FgunU6bQfGWzdsvy/Gwaen0Sf8O80PwhDnyy78813a7tSVprkOg5jOVSkXj8dgCcqvV0mq1svvBiHibx2h8UEKugaOiO8eBfd0Ajgd7Kp2ZAGQCh8NBi8VC5XLZlnhht2gjTOAhWUAT7JeTMXi+U61WTbKyXq+tC9Bms9F4PFa9XrcaAekUsGFnOR/sYqVSMcDCxgCHarVqbJB0lghxbqQ9z54902g0MmZuv9+r0WjY92CKASrmj/nAvgBEgrkkk0CwV8bq9w1BODc23+/3zVc8i42NIzeo1Wo2BswnjCkFuSQ4jD9z420+l8upWCxqNBppOp0qm80qiiKzN+aRuSP4zufzUwev3/uhlxrBlkqnpGYymRigwLLxfPv9/g+YO37mvHyWhAdfqtVqJjvBTgEmzkO88Cwzn4P5BhioveE+YNCwJy8jIukmhvEcPugz1iRR2Wz2YjXByyt4VuIUvoodS+dkyMdh4su798B1OS8Jgh/b5PjxR4KRCUYmGJlgZIKRP3+M/LOvcDEoMFkwZwycf7OG6fBBO5vN2nJpuVzWcDi09pPT6VTlcvnigaWztp3JIKBlMhm12209PDxcGCjsCRPD75gQb/C8KbNk6N+aYbzK5fLFbtwsffvlSNgAjIBWt6lU6iL41Wo1m/xyuWwFs5VK5aI7C0XA6XRajUZDh8O5qxUGM5lMLAijBQe8JVkw9V1seG6WaxkPGCIMGPYPVjKKIu33p13SmQ8KbfkuwYWgArABRgS1xWJhYMm8Mn60UmXJnWXgRqNh2u3ZbGbX4l6QBgBiOMl4PDapAbp79tNgrqMoUrlcNtvD+QEg7ocAO5lMDFA8O/f09KRarWaaaBjD2WxmwRvGkLFHB49fAXiNRsPsKggCKzomOMxmM0VRpF6vZ+MnnZfl8U0CZ7PZNIYHxpJgA7tO+99UKmUsLfbmtfjYS61WMzui+Jtzorkm8cHukBJQGA4wMmcE8VarZb4Js+RZKM9S8UwkdLDIzKN/gYDx47m5X4CBayDlItnyyRK2iQ35+wDICfz4CZIjmE5+9isb+DcrAB7gmHd82cspOLy+3Y859uVlKHyGccHvuQ7JaHJ8+JFgZIKRCUYmGJlg5M8fI9+3wpX6H3/zAQfMB0HTg4iXIKBB90zSfD6/YH1ggxiITObcptIzTv6tPQhO+yNMp1NjWWBxJNkbP4ONA3sma7Va2bIpg+kZQQYag1yv13r27JkFFO4RIyOoSmftLMv2FFxy4ISetfKsWrvdNr23X+71+mbuwzOOOArLz9vt1kCSzjMYMu1x0StjxB6QcBicKJs9FWIvFgvrwFMul634FyCgw1Ucn3TisIF0AyLIZTKnYnHOsd1urbtRNpu15/DsrXSWttB5yjvhfD63olTOy3foEITNYp/j8ViSjJVDYoNMAJve7XaaTqe2eeHXX39twJDL5QygkFV4QOB+9/u9de/h3zDQBEi/s/p2u1UYhqrX68YGe0ZmvV5btyEABz0/kodUKqVms6nnz58b8BCoSC4YI8+qw5pJMh02dlapVNRqtVQsFhWGoVqtlur1utky18Au+H+fyHh/5B6CIFCj0bCWupyLOAEYkhgSIDkPtQDFYlG1Ws0ShHq9bjZHcCdG4cskqcQC7ANpC3GHcfAvJJ6Z4/mYIwAFGyqXyxdJFPfB8xIfPZuOj+OTsJQ+bvAs+JcHRy/F8mweenbG08vH+G5yfPiRYGSCkQlGJhiZYOTPHyPfd3yUphkwMdLlGyIPirMSgDKZjGmJfcDhISl6RBfLg6RSKZXL5YtN/aRzYVu5XLaOOLAnsCK0b53NZgYo3D9vpn/s7TmOY+tglE6fusGgM8eAYFAIhn7ZExYLUMQw9vu9SSx4ZgwIg/Ng6o0nDMOL5f44Pml4YX0YV9/WliLtdDptBdg4A0EijmNjTcfjsXK5nBV78jueD503OnhYMZyCfb88yGLUzOV6vVaxWDQWFF0vDgtANZvNCwcksB0OB9MXY1dBEBggE0QAvPl8bppfWEjGFbkKbJlPWPg3SQGBB2lPoVDQZ599prdv35qUBvumVsF3DEK/jlwAVoikxgcpH6z8c/qEBBv3GwNWq1WzH2okPGu8Xq/1u9/9zsAadhvfI7C9G1jS6bTtj4GtFYtFNRoNS0rCMFSlUtF0Or1gYX2Cw1xxDS/Jwd6ps6ATGb7KGHnmkDGhhoVAzT0CkofDaXNOxh4mmSJj6gVgwGBOfcvtIAiM1cUujseTfKZarf7BObBp/s+/tBAjuTeCN37Gs/F7xgFmjXMBJNg08YrvwjwT3/xqBckyAOSB3rOknh1Njh9/JBiZYGSCkQlGJhj5aWDk+46PIilkUgnADIBn3PgMn/OD65k1lhZ5oyYoMyB+aZlgHsfn7kvb7XmHdq5FtyLewgnYsCIMPgHTX5tz47xM0HK51MPDg3q93gVY4hyFQsGCIvcAI0UAXywW1uXncDi15oyi6ALMstmsXZ/AQNtSHIixRItOkOEcjCkO47vxrNdrdTodYxuRdDC+GHcmk9HV7zcZZg4IjjB+sK4UUHNfMC2MN22LG42GwjC0+ywUCrZkDsD75V8KVwFvv3xNEEV3TJCo1+uqVComVwCEfSKTSqUMXPr9vtmXTzjYa4QgyPK27xhF16x2u61vv/3WQECSsaewV1zDs68EwP1+f9HVC438eDy2Ll8AjWfYYKVIgLytE9BTqdMGjqnUSV5xc3Ojer2u6XRqTCCBcb/fW4D3HaM8eAEu+/3e2jH3ej1LgnhWWF/aFdM6l7kiUMM4cc4wDFWr1azAmfjCmNGBikDK7/Fr6iEIiiRS2+3W/IfjXdkTsi8kNcwPLDX245MNmGbOxUoBIIRO3dsrMcUnzf6ZeG7/t3/x4WcSS+adsSAWwHpzLz7p90BxOBwuQMcn1skK1//uSDAywcgEIxOMTDDy54+R7zs+iqSQm/MA4t+sYVr4f/87bp6bhi1hMP0SI+fACPwE+EFhif/d6/I9WAR+hhFh0pncarWqVCplQQQ5wW63s6XgbDZrTohhUazLvfFMdD1iuRtw5Pf7/WkPCZ4nn8/bcj4gwDI7zkSggNGkixJBG+cE2DA6DJl7QKoB+NVqNdt88OnpyX7f7/dtXGEbJdm+HTj4fr83FoMAwrzWajVb6vdspXTSXjM23qkmk4mm0+mFxINrxPG5vSdzwrPASHA9AgWOjwQD5yZhgP1C3+4TJja+5N73+9OGjvhTmNsAACAASURBVCQEkowN4/ez2UzH41m77IGG5IGf+RzJB2PqGbXtdmsyFtjSbrdrn/HJDe1xCYq0qW02m8aMUROBTxCg2JMFJpw/BGFAdDQaWZHvdrtVFEVWME1y4pfrU6mTFIHxRz5TqVQs2eAzkqzzF+f3cYdAh9yE//NMFj5FEgnzS72E36eEJMLLHOI4NruN49gkOAAbc8m88v+cG9/Ab4lf+BtzhQ4d/+I7BHR+9lIx/iZW+rjM83OPjBl/sDn+jV+QXDNmnqVMjg8/EoxMMDLByAQjE4z8+WPk+46PssLFTfmlPAaCIOFZGAI8AZZBknTRoQhw4u3eDxLsCg/MGzWDBZhw3SiKzFCZLO6NJWLewn1w47yAjn9DZ6mcCZFk90xAOxwOtg8BBZwsTxM4PIPJs3ANZCQYng+cBCACyLv7HTBWAAa68t1uZ4GFQNJoNLRarayjE0HwcDiY5IAuOz7Io5uHnfFADFvhx5nAzv8R4JFAFAoFawWLZCGbzWoymWg+n6ter6tcLmu5XFrxryQNBgNjbH2nHkBHkrULppUvLCfyHc4lyZbkkQMQpCSZLIFC2SAIrHNVsVhUsVi05MQ/I7IYdOvMGcEllUpZIN1utxdJznK5VKVSseSCufLzyR4sBINs9lQUvVqtVK1WNZlMtN1udXV1ZZ2YlsulZrOZ2Qgbf2I3sH4k2z758gwkQIQvLZdLNRoNpdPn7mzURpDseLDw7BA/+5a7Xo4Ew+vlCtwDY5jJZNTv9y/YL5h+78vYAckWYEA84BkZD3wde6WWA7AkXsH2+8DNuWDSABrPlHFv+A02B7uL3/tEmevyOWIQ9/wuE+5ZQh+T+T4AAvNPcuEBKDk+7EgwMsHIBCMTjEww8uePke87fvILl3979EyaH3h+B8jw1uuX7oLgrMn1elYcEkPgWjyoZ5BgY9Bro72lgE+Stc6UZEvVBBOuTRBEs8yb/Ww2u+imgmPzVkwAx7h5Y2dZ3S9f4gxopWEOfatPgjNv1el02t7uCT6MBX+urq4uAjHGhXH6oAnLls1mDSQwKs9ywGANBgNji2CQvOYYJ4LxQMMbRZFyuZzdGxptNnH0bAXngr2EKaETF915ADpADB09tQTL5dKeIZ1Om3yFuZLOnYn8viksl6NZRktNAGR+YV+wFcbRM2eMBTZAQJRkbA2Axve4FgDI87KTPEEBe4d1ZG5arZbCMLT5lE7gR8F5On3eY4Rn8npxfua7u93O2E3G2gdi5hHbWq/XNm+LxULSua6CczHWsLfYnGfPqUMh6FII7wMyCRnMNzYD24if+/hBfPLzgH8QXJknfMuzq+/W4nAvnq3jZ28fxDa+i794f/f3wHORlDMOgD5xgRjjf8Y2GHvm3AOQvxfiM78jTvnE1gNScnzYkWBkgpEJRiYYmWDkp4GR73vp+smSQi7CQ0uyn3kI/9bo32IJiBRwHg4H26Xcd6DxQOIDtmeGgiCwoleW2+lUBJMGm4bEoVqtWgErS7sEIpYxpXOxHZvM+Tdtrsn/4YQYAOwXziTJlrkZF7rXlEolKyAluHptLYyG/y4BC+ANgsB02/wehiOfzxvbEASBBV+KqP0yvZ9PHBXgY25gLQAR7glGB/20149TNAm7QZclAjHPk8lkVKlU7FrValW9Xs+cqVqtGrMZx7HCMLSxZ28RwAxH495xCsbFd99Jpc6tVwm4ki66VO12p85LnhmESYOZyWQyptNPpVIXBaT8fTweLZhJJ0Bhk0hsn8/D6vnrxnGsZrNp2vHD4WBF4Ox9Uq/XbRd6xp55aTaburq6uujM5MedPWeCILANNmGAYYi4b8ZuOBwqlTp1eGJ++JtxhHGHBUPOwfWr1aqazab2+721aCbWAHQE13Q6fSG3IAAyZsw1gRZp0rtgiI9zHZ90UBPi45dPOgBlQAP7LRaLF9f1HbbwQeILz4K9M64+WfdSs8PhYPGT8WScvdzMPytjzPl9EoovMHbvvgB42VpyfNiRYGSCkQlGJhiZYOSngZHvxYI/gRV/8vATIMke3A8GD+B/5mEIsD7Is1QJe8QDYkg4KJPKdQnmLNXX63VdX1+rUChcBH2uVyqV1Gq1lM/njd1ht26vNw2CQNPp1AaZt3ucAUPy7EOxWLTORYAADkUglaQwDO28LItjAEwwRuqZBL8MzjNVKhUNBgPbJBI2gTEsFArqdrsG5HwP7TKGCxMjnWQDMEHszTEajfT4+Kgoiiww4SwY9n5/2gvk7du3iqJIz549M0YSltWPG2DPkrVnUtLptLrdroIg0Gq1Ui6XswDIvGUyGZtj7MgHasZROrMVBCfugcAAwHrmlvHCUf3yvnc+EgzuBxtnw0PGHHYVAMfZGR9JxoJJsvPtdrsL9pNnYMNSispp6RrHpwJlzuHnhjmHIcS/YMBqtZpdg9bBSFpyuZzq9bolSlwLKQjjQoADOPEB33ENMCGxoFMbgAhbGsexBWgfM7x0hXnx7Ge1WjVGlWQslUpZLQJxiaCO32PLgBJ1Hp7twr58QTOHT16obcEmve3hP+8mowASia4HfQ+wMPp0rcOGYNm5V58cA0TYIcmJB0xJJiHjnvzzJcePOxKMTDAywcgEIxOM/DQw8n2k5EdpC08QYiJZTvUshXTuohQEZ80phsMg8kDvLnXS+hYjw2D4Huf1S4wMGsvpb9++VbPZvAjuFB4yWN4BCNbz+dwkCvl83tpl+p3QpXPHo2z2tOFgEJx2Q18ul2bEmUzmolAV5mU8Hpthvbt8GwSBgRa6cgIb48850+m0Hh8fdTwe1Wg0VCgUbMkUtszv98EbO9p9ZAYUXtIidT6fazQaKQiCi31hVquVaWwJRAQsgnS73bYAUK1WtdlsNBwO1W63jTk8HE6tbWHH0FZzj8ggvIyExIN7xOYIpt7ZOQByxpO5hrU7Ho/Gfi6XSxWLRc1mMzsX7BhFwhTsInNgeTkITnvDEPx3u52NAYwg7DLgDYNTKpW0Wq1Uq9VMxrPZbCzxWCwWxlQ9PDyY3TFvs9nMWLB+v29MmA9iyBFInEiCYAZ5Dh9cYQi5H8aJwESQ8+dg004vPwAwOB9BfDqdWqvcfD6vFy9eaLvdXtSsbLdbYyCxf38P2B1MGf60XC4tLgE8JGbMu2fJABgSxv1+b/vL4Hven71MBBv3NQbIInxHKII8DCbzQJctEh7P5uHvJKmAiSRLwBkLZDEkQvzh+YiTHpw8E4tdE5P9akly/PgjwcgEIxOMTDAywchPAyPfd3yUphkMLoPPzfIG6JdV+R1ggFFxozwgb+rL5dKWer0TYlgEUIIiQYriUwwMx+cNnkkMgsAKiVkeLxaLWiwWtgzLWzmAws/7/d66FLVaLZsQCogXi4VNAgwT0gGKDtHwRlFk98r3YQnZP8SzRAAgy9f5fF739/fqdrvqdDrGzDDuLH0DJjgQzATGjiMWCgXNZjNJJ1bw6enJpCUAe71elyQrfHyX8YDNaTabdp8Ez0qloslkouPxqGfPnhmb6bXegAdjzrXZvNPbFOdl3EhgYFu4R8adOUGbDzjh5JlMRm/fvlWn01EYhgY6JA5+6ZlAScckEpjZbGbBxxfuUtBMYOczFM5yPp57vV6bffogxsac1WrVkpXdbqf5fK7b21u9fPnSggI2QfEvCcZkMrGky2v6YbfxTR/IpHMROm2W1+u1qtWqHh4ejO2r1+sWpPkO98O/5/O5pBPLRhIBEHlf4t+woQBjKnUuMGbe2A8kjmM7PwGcJCCKIrN5fA8wAUCILX4PElpEPz09qVQqaT6fq1gsWutefLXf72swGFiCBADjw8QAgjfzwc/vsrwk6tgFzDfzQowjrnqWmWviV4wF8VGSzRFxmITo3ZWY5PjfHQlGJhiZYGSCkQlGftoY+VHawvu3fAaHhwBoCPYYBW+9GIK/UQIEG8exVOrPi1F5Fgvne3x8VD6fN33rfD5XrVbT1dWVfZeATEcfjJMl8jAMbX8DDBOAyuVOnWZubm7UbDYtkLJ07CcF9owlaAKLX7Ld7/e2FJrP522HdJ4dGcF+v78IBrAFBHsMkKVugiX3BQgsFgszqCiKLLCiEz8cDlawipHPZjO1222l02n1ej0VCgVdXV3p1atXxt7567VaLVWrVQvEBFnYzHQ6feF0GDS2Ip2Z3PF4bCBPoIUdOR6Pevnype0ez3mWy6XVE/glZt9KlXk9Hs8SAHTOSBA8g4KEJAhOWn1sF5BFZoH8A0YReYv3lyAIrCMUwRGb9kCHrcIMsiw/Ho8vNiYkqEqyPVWQA0RRpGKxqMFgYAGbzwOAm83GpAUE9Nlspu12a8zk4+OjptOpHh4ebH4Gg4Hu7+9Nm4/WG9CgKN/LCLyEhZoMAiPdv94FY34Xx7EVOCODQNpEkMafCoWCtdRtt9v2XWyE+WPsCfDlctl2tmdO+RmfhO1ljnq9niqVinq9nsUuQIEEiWSbg6SZ5yMhhx33qwqAJbZI7Dgej1bvQWLMHOAns9nMGH6ux7h5bby3JR/PPfPK/yfHhx0JRiYYmWBkgpEJRv78MfJ9L10fpUshLNB+f96MjuVWmAEmkDdGlmZ5UybQwJDQtQeD5I0VQ2SAuRaMBkupLDNjaIAcbBtvufv9ufg4k8nYzt2Ax2QysUllZ/lsNmtFvJPJRNVq1Sbs3f1FKKT0S9TpdNq6MgGAvJnDLAJskuz5s9msxuOxPQMtUDGUUqlkAZJAAUNEgOKZ8/m8nZv9Epiv6+trk1DgFC9evFClUtFvf/tb1et1kwK8W/gJ43k4HNRoNCzw4eTL5dLaoqbTaT179szkIjAOuVxO2+3W9N2VSsW0urPZTI1Gw5hFnp0lYQL1fr+3TRYBFD+WPtmhuJj5ms1mFhhhteI4VqPRMPkD48o5cTa/9Mz400YWrTGsUzqdNl05wQPAINh5CcrhcFC329VsNlMURWZngIF0TuyKxaJGo5EB2mQyMb2335keTT/L/PzNfKRSKWPQmOcoijSdTvXy5UutVitjwQ6H0w71JD4EWp8AYvc8H4GUJAQbBhBgpguFyw0/+Qzf8/UYvigXUOO8JDXEFeyWeWBFgIQxiiJjNbPZrMUHxni32+np6UkPDw/G4NF+mcQJSRS2QYIGyLI6IcnqI0gestms1U2QUCPfACwKhcLF5paeaeVaJLyeNWR14Hg8dwLDBolZfI6DWJgcP/5IMDLByAQjE4zEDxOM/Hlj5PuOjyIpRGZAwOYNzy+NcuO8QfJwBACYBxyLB4QNWSwWNuEURPLgOCDBmOXQYrGoh4cHlUolaxnKxBHI4/hUhMnSICwczGG1WrXl0uvrawssm81GT09PiqJIvV5P+XzeHDOTOXd1wXgAqu12q6enJ4VhqGw2q+vra2PeYIIwVMZlNBrZOTC05XKpZrNpYxSGoQaDgQE7zueXS3k7Z/md52XTxuvra2sdG4ahisWiXr9+rVwup5ubG41GIyu2huGEPfTMHY4SBIHJIqTzLuUEGZiEMAyNraFImxaujUbDwJAl+d1up0ajofF4rHa7bf9PO+N2u21tf4/Ho0kUcC4YKYIBwSedTqter1sCxDxvt6dNCmFECbKwkARKL3VAr8wzI29oNpsXCRGyBGwcx0VKUCgUTBeNXwDU2Bjyl+12q8FgYDKdzWZjLCoSgvl8fsHi5PN5C9K73c7OiZ4b2yGYZTIZhWGodPrc1hdwBEzxJ0ADGyRQwiJ7wEE3nk6n1Wq1dHt7qx9++EGvXr0yKQU6/3z+tN9Np9PRX/7lXxq4Pj092XP5cUT/TbEy15dkhdrYp68focU1Y0kiMBqNLCFLpVJqNBoajUamuweQ2XAUJpxkjsJqD4wkECTTBHIK/7lfQAY/Y28bYt98PjcbBKxI7rgHZFuAG/GGn4nlzLefQ88+JsePPxKMTDAywcgEIxOM/PljJBLNP3akv/zyy58EJP/8z//8JZ2G/CR6/aNfFvRvlDByXn/qAztvmbBdnBtNtQ9IJAMMUC6XU6VSsW44o9HIgh5sAcvavp2pJPu/3W6n6+trPTw8mK67WCyq2WyaY5bLZTUaDVtip5MQQSuKIpMv7Pd7W+I+Hk87w6fTZ219Pp9XpVJRu922pVi08xgDAZBroLcFQFnSRTvO79BzB0FgAW69XlvR62KxMBaV8w6HQ63Xa61WKzUaDdMy4zTIT3zLYvS5MAV0tCqXy8b+EBCQlBDokXVst1vbNA+nJYAtFgsrRpXO3WRev36tVCqlyWRiQXw4HFpwqNfrqlQqFvzv7+/NXgqFgjkKRbcvX75UEJwKbgF2NpH0hcqeMeZnWBs0w7BfPC/L2rDCnskBMKTTfhawje12W/l8Xo+Pj7ZUf319bQkBUhyvq4ZN8nIPQBTGEr0/40DBNEwb/sR38R/85O7uTu12W4VCQff390qlTkXJBDVYSQI1YOUZR0AVvyBZ2Gw2enx8vAia7AeTTqf16tUrYyZhfJFXIEPabDaq1+vW3po58+wff1PgDJMmycYIVjQIArveer22REiSyWk880v8QzPuX1zwfX5mNQLJDHKacrmsZrP5B/vEcD8kQshgqFVAdsGKAEwvz0IcxkZI/D0DLulCjpJKpRRF0f2XX375f38ScHxCR4KRCUYmGJlgZIKRnwZGrlYr/eM//uP/+WNY8FG6FHIh6VyA6QO716VjQL6zEcuidCHK5XK2RAqLwFvx4XAwNo0lfhzKazi9Fhy9KcYxHo//KNNCsSda8bdv36rdbqvX6+m7777TaDRSq9WyYALbFUWRDoeDJpOJyQcoviRQADS/+c1vbD8RQK7RaOhwOGg+n+vZs2cX58KpCZCpVMpa6fJWPZ1ODTxxpOl0aoDDmGB0MFpIBcrlsl6+fGmBbLFY6L/+678umKpsNqvhcGiAyvwhhWFJ2LOgMIitVssSC87FvUnSbDYz50Qnz7I2TAJL8jhAv983cCqVSrq5ubHAvV6vVSwWjREhwERRZMvvy+VS8/ncbPV4PBoLQ+tQOhS12227D5hA5gWAYjx4fsAhDEMLIARngMkzKX5JGufHbrhetVrV4XCqA7i9vdWbN2+sILdYLGo+n5tkgQQCUGMuAYsoivSLX/xCr1+/tsRtNpupUqlYgPVjUygUrJNTHMcmp4AZv76+Vq/X05s3by4SQRjw+/t7k0d5mRNzQlcwpAHD4dAKyUul0sXqSqlUUqfTURAE+td//VcVCgW12227VxIofMQztdjpdrs14CfZZQ4I5iStzDnP02q1FASBxuOxnp6eDOixYVpBI2GiVTTSF+IWwZo55/m8pIoEMwxDY/2YVxLyVOpcw4OP0CkMoAa0SGDelYl5lpBzIa0gtn+IdCI5zkeCkQlGMu8JRiYYmWDkzxsj36cC+SgrXJ4l4CbefRskCAAq/B6GhmAPM8X5GBAGie+zbIzkAvaHINbr9Qy4CC4sUTKAHrBSqZQtvUvnTfzQ9XI/LK0Ph0NVKhULaGjWMVRJts8DXZfy+byxEM1m01ijfD5vS9xhGNrzsaSJA3OgneVzjBHGl06fim13u51ubm7sjZ/7Z1z4/n6/183NjT7//HPTJqOBl07sGswB1240GhdMAGPAHGKIOKV3cjZgpCZhOp0qjmMr+KQjDsEaTTa1D/yOIAn7iNaXYCXJpDQ8J+OFU/uf+eOX+SnOZg5xYjrreD35dDo1h2VO6OSFzEaS2Q3/pjMWDBLL8AA3Rds4Mj7GUj6MEmDQbreVSp3qAjKZjL777jtLkKIoMoYS9gi2LI5jY009WGYypw0uKe6u1WrGppfLZbVaLUskdrudyuWy6vW6sW1ILZhz9sB5N7nBB8rl8kVihwyGxIfvfPXVVwqCQGEYWnAkCel0OsaIZzIZPT4+GiuZy+WsUxT2NB6PdTgcDLg5D6w0bNzhcLB9T9brtTGUYRjq/v7exoWgjxQCQMTG8RN8Ex27f6lBwuBlHJPJ5AJsqDPArj0Iwohms1m7X+IAgEkMelfCQdJHvOWZfp9sJitcH3AkGJlgZIKRCUYmGPlpYORyudQ//dM//XlWuDhYumMwYHBYpvMgwL9Zeo/j847rOBJv2wwEb/YUIMJuYFiwWwwyeuZU6rSzd7lctolmeROWje+gO2WJ0gd2llqZ9F/96lemYUdDvNlsbGnVL11yP9K5kHk2mxmLh+a8UqloOBzq+vra3vQlGRNEsIEJlE5L6v1+3/YIgUEgwKHNLZfL9lbOcioBDjZrNBqZjl6SdQ3CITAqxgUApyCW5WAcL45PbYuHw6G1BKaoEkaDsWWpHGYPNmM0GknSRTJSLpd1f39vz5vJZEzu0m63NZ/PrZ0u7NF2u7Vl7larpfl8bkW1rVbrQr5Sr9dtuX6xWKjf7yuOY9ukbz6fK5vNqlqtajgcGnjDfEpSrVbTfD43XXOj0VAul9Pj46OkU0IAg0sHqlarZYEX9utwOBhLMxwObaxIHLzU5OnpyZhbJBbD4VDFYlGTyUR/8Rd/ofl8rn6/r+vraw2HQ7Or6XRq3Zey2ax1TgqC82aq+Gs2mzVWCgYRGcLnn3+u29tb9Xo9mz+kJF6e42VSBGiug2xjNpuZVIAWtUhmfvjhB223W7169crsE7kEcoX5fK67uzvT3ufzeUVRZLUqSB0obMcvvKSBwI4MJIoi83XizmKx0GAwUKPR0GKxuJAr+OJnwBRQwv7ZZ4eECBujVkSSJbXU5gRBYKsckux+SGiZL+RZ+BAJYqlUssJiEhLmBIkLySLXY2yS4393JBiZYGSCkQlGJhj588ZIyK8/igEfY4WLwlC/VM6yqNdFM1EEMN4S/ZKeZ1UAFB4yjmN7eK//9JNDcGAg/AZ3BBaCLVpsnBWNNsFhuVxaS07ebll+3u12+vzzz401I4iypM1n5/O5bUiI3ON4PJoWe7PZqFarabfbaTQaqVarGXNIpyLGiSVZJAW73Wl39sPhYMEUBgpmAq2wdwoAgfEFvLPZrG3CR0DL5/PqdDoWPNiskmtgsBxor3O5nLUvJejB7ADCsKgwYp6Ngr3M5XKKosi+D8tFcM1ms2o2mwaijUZDYRhqvV7bc7daLVUqFQuEtVpNkqzgdjqd6urqypKXarVqkg0Ay2uHYaoAPQIhPtDpdDQajey5YVm4LswcGvHpdKr9fq9KpWIgR+vdw+FU/Alo4x+M2fX19YU2netJskSLACudQG4wGCiKIrVaLS2XS6s7APToZhTHsUk5sLnZbHaxYebj46OGw6Fevnx5IQcplUp6fHw0TTrJVbFYvGD7AOaXL1+q2+1qOBwaY7rdbnVzc6PJZGLP9PbtWwuqjUbDWDI2WJVkDPtut7OEhucgmHrwgH2D8YdZJB5lMhkDQzq7kXRIJ5aNxBVZTjabtSBOzEFGAqPmVxdgYwEymEbqZKbTqcbjscU9AJnYyXOn06d9Y0jwPKuMbxKDPHvpZW/EHC9587Kj6XSarHB9wJFgZIKRCUYmGJlg5KeBkdPp9M9bw+UHAFDmJgiETIj/HkbKQYDK5/OazWYW4DzQ+M/hwLBQMBm73c4MaLfbmZMSKGEQPRMBwzafz+13pVLJmEVYjXQ6bYWKkvTixQt1u13VajVFUaSHhwftdjuFYWiMEAH75uZGNzc3evv2rckqgiCw9q1cYzKZXOiT0c963SwBGVYCY0ilUnZtus7wZg+gE7TZP2U8HqvT6ZgTA2KHw0GtVkvtdlv9fl/dbtf09pyL9p/IEcIwNFaKhIAxJ+h5kO73+8Z8HQ4H0yMDwOl02uYSaQpJC4zaarUyloiWuoXCaY+PfD6vXq+n2WxmNQjL5dK6VW23W2M4aHHKNQh42WzW2EvmA3BqNBrK5/PGnJFM+GJK5pEi8t1up5cvX9qyfKVSUb/fVy6XU7fb1d3dndkiwBxFkdk+MiE05xS77nY7ey4SFJbcveTjl7/8pb7//ntlMhlrzcoczOdzY5aRA5DAeA0zevZms2lJGq1wqQHh2bE9pBeSzCZqtZry+bz5Z6/XM3vqdDqqVqt6fHy0zk0AHMFeOreVZt7oYrbf79Xtdm2cASAv28JvsL1cLndRO0Fck2SaeJKnxWJhdSFPT08qFAq6vr421pxkya8mSDKZinRucUucPB6PFzKN3W5nshNf4+ClaMRF33UslUpZkkPySv0CiQEgwfPzTNgLTCkgtd/vreg5OX78kWBkgpEJRiYYmWDkp4GRXtr87vGTN1WBycHA0QzDqAEOvL2n02lj43j75C0yl8sZUNTrdXv75GG8Fp1zERhhUtCAr9drC1j8fxiGJkVApw7gefYPEGMJGMDjfsrlsnVJGg6HWiwWtmTMjuzcB4aZyZzatyIHYKwAmuVyqVarZdeRZPp53up5Tt9JCADCaClW7Pf7ms1mxoARfDkmk4kmk4ktZVMoi74dVqRarVqQZOxIBJhDHAfZB+OJJAA9M/9mmZnuUSytMwc+mEsyrSxjQ2calps9cxxFkTk/OnrYDpjR6XSqKIpsPuv1uvr9vjKZjHWVQjPMH+yTwEUSQqvTZrNpQAUjhaMi31gsFhZc3759q9FoZAWprVbLGCqYXaQSBPlqtaparaZU6lSUmkqldHt7q9lsZq1bYTYBFjYBhUUjKev1ehd1H9JJCjMcDg3wAaxMJqPpdKogCHR9fW0+iL8cDgeNx2NNp1NLcmCAmH8Ca6VSseckaZBkTBWsFLr0w+Gg6+trY9qRsVSrVUkyuRPxBT+J49jAj32Ams2mxRFaaWezWXuWOI7NZ5BSIY96lyHHfpFHYF/D4VClUsliC3GEInrPmPFMu92p61ytVlMYhhYzCPD4Nv8HQGy3W81mM41GIwVBYJ3hiClslsk1fUE340usTqVSxrbzvMRWxpV4kxwfdiQYmWBkgpEJRiYY+Wlg5PskhR9lF0uCOowdxoGx8WYKkDDQ/O13nMZ5YOb8hDMwvFmiy2XpX5ItgXrAYJkegPfW7gAAIABJREFUw6Arjm9zy3Igb9oYJW/rLMkSfOv1usrlsvb7vUkWwjA0J9/v96b3haVcLpe6v7837TIyhE6nY8vhtOjFAAASgiyaVIoHYbhYRpdkjCHyFKQh0vntHE0wS9jIL3yQhRVAEw4jx9jDCvCMu93ONMiMP2NI/QHSABwxiiLNZjO7J6QAMIQsPz89PZkTYjPMNyweAXg2m1n3K8CQOfYbRbIUzvh64MY2SQCkc9ca5p3xnk6nmk6nNvfokpH6EBS5ZyQiu91O4/HYujFR5A2DFMex6aklWWco5D+tVssYR9hFwJIia8DS7wkDa+f9MJVKGYtHUkTAwueQKwEe2WxW3W7XdnGvVqt68+aNisWiyS5Wq5X5DYwb40aSud+fWkGPRiPNZjNtt1s1m01j/ACmzWZj4IOtSecCaQ8myEy8xCYMQ4VhqFwuZ7IIur/1+31tNpuL5BgWEHAj+Ws2mzanJDpovzOZjAG4TxZhYZEr4B/UG2DXfjWEYl0COKCH7ZGU0aAA5pSEb7PZWCcoWMg4js3XiasAB2O52WzsM8RjfN6vwCTHjz8SjEwwMsHIBCMTjPz5Y+Sf/YULA8VpCXI8GEECpsw/AJMO2NABZrlcWuADjHjT5G3TM2G8rRKAKADl/5AlfPvttxbkmRRkCAwkg8cEAiQUKLJ8WSye9huhew3G6JfIU6mUtbTNZrN6eHiw1q7pdFrdblftdtvYL54JzTuOQuCDzYTphNmEgYO9yufzxnD4OfBMFKDB2/p0OtXt7a0Fikqlov3+vA8D88V48Yw4MveAwSMPYH5Y+iUYPDw8aLlc6u7uzgKwvzcKLzFqnJD7ZkmYe9rtdhb8YXQAK5aRr66ujEXxoCjJAI+/e72eqtWq6vW6XZekZbVamQa40+mYpAG9OvYDc5jNnro31et11Wo1GzdYSrrrEADYWwQWFRZ5Pp9ruVzqq6++0uFw0NXVlclCfLBnbmjFm81mjXXGRznf559/bteN45OeH+01DI+fZ/yV5AMb3e/31m4XFp/uXSy94wf8jgQF/wX8sS/mnDFbLBYW5AEOxtnLqQBJ7IBOWNI5AZlMJnrz5o3Vq6zXa9OtU1xMMkmBNKsCyDIqlYo1EiBpggmEccPWiDsAmLcpalLwEc/Ke6Y0CAKTOjDPrFAg7WL/GnyXOEIzBUkGLIyXT/6wOeIGtkximBwffiQYmWBkgpEJRiYY+fPHyPcdH0VSGASBter0bAgBkyU+3iLt4qlz4TD6XbqCwKayfMobP8YFW8SgMhhMuH/jxLiy2dM+GezJwLImWm4YK4ycweReuXcYGkkXjBb3VCwWbTxo0RkEgRVcokePoshap/JZxgQduQ+gABjXYnmZ+/RGTpEiAdqzDBiLZ6no5LRerzWbzbTf71Wv123JlAAOGwNwIrHA8TBU2AXYTYI+MgO0zEhFcCSCIAydB7PD4dStZzQaGfCyNA6z6dv0Hg4HK47M5/NWTIl0AOegrqHf79tGkAAyn0XCMJlMTG4BAwLw0iK50+noxYsXiuPYpDuHw0G9Xk+lUsnGjvFgvGFiANJyuWzBqFQqWSIQx7FGo5G++uore3YYrna7rSAIDHQIlKVSSff395JkjO90OrXNF7F39nuBsfKSChg67AkQR3YTRZHq9brtteKDYaFw2qMEtgrfRi6A7QKcJFz7/d602fg7zwxQ+ETVt6GmVgEpA0w+AN5ut41tg20EZBln7hMfKRaL1u2J56Ad77v+ToLISgF/8ztiD/cryfTgdGDCJwAUGh1gczDLJK20/8U2YfWpxUH+wM/+WQESkhwSa+Ku9KcBJTn+8EgwMsHIBCMTjEww8tPAyPcd733hCoLg74Ig6P2pk8DA1et1m2wOf/OwMr8/9wV4wMbwdspDEsgYOP+Gie6cSZVkoIQz0F2GN1CWbQlYnr1hmdIzi9wfb+r+bZp7JWAweTCNLDN7pyEg0vkmDEP1+30rasaBcW7AEaaCoj8YKgIorF+xeNrPA7aBP7AH9XrdGCpJVhw4nU6NKcvlcuac0in4bDYbK1CF3UEewOcIRAA4P2OUhcJpP5fNZqPRaKQ4jo3dCcPQtOT7/f5Cz0uywf0RQFim5zOMD8yDZxe32611DmLDS0k2tr4jFYmIT1h8IIO1rdVqtnyPvKBcLuvp6cmcNwgCa2mLAyNfgJEhQSDAE3iZX9jcRqNhGnU2sWTZn0QEfTg6dkm6uroyYN9sNsaKAw6vX7+2ccK/KAgnMEkyH4VpQmteLpcteSJQMt/4GzawWCysOJs4wd4ldDjKZrMGuFEUWZJI7YcPjNgG9w5bFQSBgTBBWDrt+0NXLfyiUqmo0WhYhyyYUuYMYGQ8OfdkMrFCZnyK+yGOlEol676FXwFGrFLEcWxjRzLJH2yEgmZAlLhIIsyc+USFuMb4EdfwB0DKAwXfAxA9Mw/gJMf5SDAywUgpwcgEIxOMTDDyT2PkewX5x+Px/3vf7zkY5GKxaE7lgzVBhUH0kwJTgN4U3SdacknGuODoMHE8MEbEIGCMLAMul0sz8iA47X7NcjAD5CfIg4iki6XI/X5vzA/FhhgQzoVEwE8KLCYFusfjUbPZTFEU6fXr12bYfsL9RPM7jkKhYJ1bgiCwXbaXy6UBDsHI/xsNNiwGy9+whLBxMHzcZ6VS0fF4NC07+4xQ1OqlJ+l0WvP53DZ+ZNmea/rxQqPMEjtBDycgiGPQ/X7fkg1aG8MWY3OwFYVCwZbW+/2+BQDGgOsw9iQdsE0kIPwO1sQzuZwHoGM/kyiKbDNCukRFUaROp3PRFQtQhJ3ygPP09GSgwE7ubPAIGNIR6ff+aiw5QbJUKimfz+t3v/ud5vO5adrxwW63aywf34UJJUFB+uKZHmRKaNozmYx1o0qnT3voAEbYGQkfMgKfuLAnDP4PS7bb7WyD0eVyqWazqc1mY7ZN/IEJZZNQZCqANIwfHacYuyAILMnwNoxcgqSMQnzkELR/zWQyJqHy8QrpF7bD+ZknAIKY4JMKYhp2zgoJgR4b90BD4gSYAPqSLmRr0uUKCM/D7/wc+8J0fDQ5Lo8EIxOMTDAywcgEIxOM/DEY+VEkhQwAwUI6d3UBRGCtGFAGjLdWQIa3bwYCY+HNlTdW3pC9htUvm0qywlQCNODFuVnKxFkYTK53PJ5acvrBZjmdQC7J9jdg6ZPlXcaAfU54S2dZM45jffPNN6YVJfACUjgSn+dZAErGoFQqGZMQx+f9Q9AQ06VlMpnYfWGIx+NRzWZT5XLZ5oC2v7vdzsaQ74xGIwM23x4Wg8P5FouFgQw/o5OG5SwUChYIuUan01Gz2dRqtbI54Br5/GkHd6Qc9Xrd7gsWEpDHYaMo0nA4tPqCWq2mZrNpvyfokkjQDQo2BjYYFtJLP/iO19xLsk0r6RJVr9dtrAE6NOiSTKbDcvu7xZlep04iBWhPp1PbM4QkIZc7dWCiSPTp6cmedzQaWXEr991oNKyQni5V+AfsLMlNFEVaLBYmyyAZWS6XxqABwsgMSGJg62GaCN7IFGD4YeVpe0288LIgGFv8lzkjEcCP0a03Gg1J0tPTk7HdjCtjyr3A7kmyuEWM8R3VkN+gXyfu8RmA3DOg3Bcxxce+/X5vwIy98EycF2CBUcafGBv+xj75QwJEzGSOuA9Ahet69s9/Nzk+/EgwMsHIBCMTjEwwMsHIj9JyioEnmPklbAI1IMGN8Tb/7j4GGFY+nzdnZtkT52RwPdPFxGCgfunRXzOXO+3tsVgsFIahMUSeBcRpYQLH47E95+Fw6vDT6/UUx7G1mIUFpBMPh++aw9s4z5ROpy1grtdrTadTffbZZ7aUWyqV1G63tdvtDHT7/b4VN8IQZLNZvX37Vjc3NwaA7B6Oht0boCQDPc8IwnZ6nfd8Ptdnn31my7s41Gg0UhiGF86LRhoHgjVFhoAMgiDJ3LFEz7i8y5QxL1wbJ97tdgYEcRyrWq3aErSffzobrVariw0bKbrudDrGHDPnADWbQxK8uUfAhiDAc8Kq3d7emr78/v7+AiAI/B5EYEYKhYJpymGquHecu1qt2lxiR55NpLvTdru1rjy+G5gk00ljI5wHX0YjzRgRlEh8guCkgYflw/6DILC9Lfjdcrm0vT1Y9geksCt03TzfeDw2FhO7wx4BEc+CAQYkV9gjic5ut7NrHI9H9ft9e7btdnvBfrOhJiAFw8ymoTw7Mii6g9HRzK8GMJcAFysV+L2PHd6+fHvkw+FgduL/j7hAXEKihG3yfNwbrCIA4lcLYL+JA5wP28KfeBlIjg87EoxMMDLByAQjE4z8+WPk+46fvMLFgKDN9O1oCe48LINKIIRtwIiks9bdM31+Kd0PAi1iuRYBi9/DmGG0vmCPQMAkMZAEJpyLZX0c7Hg86vb2Vv1+35aW2RxROhX03d/fWwEqoOQZwlqtpmz2tJt7u91WpVJRtVq1NrDtdtucgXElWHHPXictyZiv6XSq4XCoMAw1mUw0Ho+Vy53a/NKpB62zB2b0xdvt9iL4B0Fg7BCdhdhgkMANyONAxWLRWBfYHYy1VqtZMTaMHwE8juOLgkkAL5s9da0BVEqlkjlkOp22ccNusAcYv3K5bIwqS9gAOVILzyr7+b67u9PhcDBdM9dnHmDvjsejLf3zfE9PTyYdoXYAn2AefE2DT6oAdj/3SBQmk4ldv1QqqdFoGCOXyWQsqaNtrHTuUAQzXa/XFYahoiiyInD8BH9er9fW0Qv5Sb1eN6ABmABfzxwCPF7WlM1mbQ8NnhNQKRaL5guMC+MgyXT7+CtzASMKsBKUYfmoReH6lUpFqVTKnov7pgB3vz/XgxAHYP/YrJRVgul0amMLaLFHyXK5tKSVBJff+ZgpyVYxKCrnuYibrDp4dpOkBT8kbvmVDZI0YioMsHTeWNLHO9h+4oKPwfz9pwAlOf7wSDAywcgEIxOMTDDy08DI961yfRRJITeC0aO1JCgQWDDmdxk1Bi+TyVjAQDvKA+H4DA4sgmd24vhcjBwEwcWbr2fpPIOE4WI8nBtGgLdrgBKmYbFYqFKpmAY5kzl1arm6ujLdOcueGAKGg0N2u13T0t/c3KhYLGo0GhnQzOdz3d3dablc2u7sqVRKYRja98IwNDaRMYZhWK/Xqlar6na76vV6tnEggQ2nxwELhYIxK3RwIkDH8XlvAs+YHo+nwk863MCwSTLHZW44JwGUMcOpuB8cNJU67c1AYTDMMP9GZ75YLDQej7XbnfbsIGnh8zgv+8yk02kLDMgssEUkCLAoQRCo0WhcMHvUKwAMzAvP3263dXV1ZeD76tUrhWGoXq+nzWZjbWIJEj7QwPaQFMH2sjs77N58PlelUrnQKmNfYRgaA0XHJAIKiQy2OZ/PbS7wH5g2zwoiKcAnNpuNMepIBQASmC4SPdhGgiqJHn9T7+HZJ+LGbrfT/f290um0SYeokeBzxBRsgda62Cf2zFgAMGjvuVeCKoHeJ7okAJvNxqQwSF6oZSFWEXi9lIFEFHDwIEvsk3RR8wLbhn+QfDFf/rsAA4eXkBEjsfF32Th81McCP+/Y14+RTCTHHx4JRiYYmWBkgpHMWYKRP2+MfN/xUV64kCDAsOFski52jobF8iwbxoBjck4GHjYQA+DzMAoYNuwDLB6BiUFiqdEXCrNkzyaCMEbH46lTDPINro1DsefEbrdTq9Wyc08mE7XbbSsAxVny+byNRbVatRanABUBFiNik8tKpaLFYqHb21sdj6ed1AmajB1ORIBstVrW8vbVq1fWthYjJCDyXKvVSk9PT5rNZgb+bMI3m80MyAjmgDJ/arWaBc7ZbGZzy74HtPCEsUGf7fW2+Xxe8/lc6/VaqVTKmF6YD5wH+6CA83A4FY4/PT3ZXKbTaetSBCOKUwKAURSpXC6r0WioXq9fBHS/MR5sI+1zSQgICDxHrVazsc1kMjZmnU5HURTp+vrapBOAJAEYW2cujsej6ZpJfPxmj5lMxmwOtkySnQ+fAQB7vZ6ur69NfoKkJAgC20AUAAGwcrncRV0GgZjP8oyeMaJ9MddhPAjA+XzextoztfgwDJpvf4wv7HY71et1VSoVtVqtP7gvWhYjlUBmBauGrxLc6VJFjPGSAYIpoADLDHt4PB7Nt9m1njgDkwxjjL3gp/gQMRAfBnCYY2IpiR3yB7+qIMlAilbI+Ajx7ng8WuLgmXqYPB8f+ZkY6pNr5pmYnhwfdiQYmWBkgpEJRiYYmWDkR5MUspyNsRF4/ZKfD9YEBgKxf+M9HA5m9FEUXTwkD10qlazlKQDxbktIAhiOydsnEgSMiME7HA4XhYSwE3QP8svZ6NLX67VJAAaDgTkyAREnoi3o4XDq7uQZNMaRJWkC1eFwsK427JXhmQbkIldXV8b8wEhIsqVpAr1nNAj6nJMd7aVTAbPX3wKOsH+wlrS93W5PnbXQQMMopFIpa7cax/EFU8RyNsGCOUA7ThE58wX4w8gxjvv93tiwxWJhQQwA4dx+eXgymZid0q4YPTfsKLYJC4htMX6cE6aOlsjsIXI8nrTV+/1e4/H4IpFip3qCQDp97pZEUCOwMpfYEucBcNipHpmDt2NAEp8hAQCgCW7z+VyPj492Ds96E+hg2GBUYeoANcaQgIONEqhgakulkiU99XrdlvQpwgUcYacpSI+iyFhB9gja7/f2/MgD0um0tRRuNBrq9Xoql8vmN+Px2BIYDn9dzsk8Y0vH49FYc0AZwCb++ASJ+8GGJV1IdRgXYqekC1mMZ1gp1mbuYNM8yBBrScr5m3hAkiadW/pyf9wvYAUzGMfnVsHcKyxjcvz4I8HIBCMTjEwwMsHITwMj37fK9ZPR83g8ms4zCAIbXIwfo/MDQZGq15cSjDabjbFXME+0WAVUcECcAQCCPeGa/J5AxNKsfzsFODA2ZB71el3r9VqPj48ql8uq1+u2PN9sNjUajdRoNDQcDm2PAtgGv3M50gikIHTUORwOur6+1uvXr+28vV7PurkQjAuFgrrdrr2Jt9ttVatVNZtN3d/fm1wC46EA+XA4WMFzLpezzxyPR33//ffm0KPRyJipRqNh9438AelKtVrV9fX1hXSDZeU4jo3tYQ4IatnsZYE158SZ6LTkl+IJXHF83tsjl8sZI5JOp21vFpITnAMpDC2T2T+FwmFsI4oidbtdA1m03wD34XCqO5hOp/r++++N+YD5DIJAtVpNu93OluFJAkajker1uqrVqhaLhR4eHvTy5UsLyiQK2B41AYVCQePx2HzA66SpSUDyAQvG/JPAcT46HZGsEHxh1rGRzz//3PTpsIdv3761RMPPJ8lKuVw2hgjGFh88HA4aDAbabrfq9XqaTqcmIxiPx0qlUjZeLOMvFgsdj0erISARGwwGyuVyajabyuVyxjJjBwRnNkZlo1CkC4BUPp9Xu91WFEU2dzCfJBmwgbDg6MBZHchms8Zmf/HFF3r79q3FlVqtptVqZcGXuhVAEAY4n8/b3Esy4GXlAADKZDLG4PlxguGG/fTSMPyR50YmQTLBZ4mhJAGABSBBLCJ+AmgAUvLC9eFHgpEJRiYYmWBkgpGfBka+7/jJ6ImTLRYLeytmwnB0dM3H49EKPiUZs8KyOv9mEDBilghh/xhIlvq85lmSJpOJOVu9XtdoNJIk68iSSqWs+A4mjyCPLrXT6Vh7W34fhqHS6bS192QvEVrEeoedzWY2gQ8PDxfOh56bXcyRHbBkTNDjXoMg0Gw2s70oFouFer2etR3NZrP2HDApsGTD4VClUkmFQkGtVktRFJkhSbIlX3S6OJGXihQKp/0Unj9/bgwOIAGAMU6wODA2m83G7pu5Yl8KGB4SBq+d9jbg7QSpATpt9OCHw0GVSkVhGBqTSfvQ+Xyu0WhkY8Jyfzqd1s3NjZ6enmwZmWJbHJLnAazYZ4L/f/v2rdrttlqtlhVfw/jC8jDWFLI+PDxYAIzj2AqMmXPYMTZrJNgByGz4OJvNLphSAtJoNLKxI+mCWfIMNYkBDDS2XSwWjcGCjeQ8BDgPVMy/b9eLpGWxWKjb7do8TKdTK3qH2abLFPeUTqc1mUxOAer3IMk4hGForFocn/b6QFpDTICp2u12Fww9sQUmar1eq9VqmU0Qr5Ag7HY7XV1dqdfrmYSoXC7r7u7O5hWglHTRGIA6hP1+r4eHB7NP9nchCSZ2Sef9jqrVqgaDgTGoXtYBUHkGmX+TjCFRYr8bZBUw1iQUMOT+JSqdTl8AHjaK/76PvUuOP34kGJlgZIKRCUYmGJlg5EehK+M4NvZhNpvZEuN+v7dJZiJxSoyOgMkDEkx4o2ewCEBcj+9JZ8NmczUmh7dgAjJBF6kCnWQ6nY5t/ocDwTx2u11JMh20JNtxm+Xs0Wikv/qrv1KlUtF8PtfLly+12+1s74/ZbGbnxODL5bJub28Vx7Ha7bYymYx++OEH5XI51et1Y9FwYByC693c3NiSJuAGm8GGlQQ+jIsNNykYRt4Bc4d2ljECZCgwpRC1XC5rNptpvV6rVCpZNxuWv2EHYGuLxaIFkqurK6VSKWNmGUevCUbqApO3251qCADUer2u6XRqumRYhuPx1O53OBxeaLhhGZGA0Lno7u5Ov/jFL4wZ5vsENYJSuVw2lhEJBCBZLBb1+vVrG9M4jvXq1StNp1MLdt1u1xgzz55gz/gJtoyMwncbAkikU6BiM83dbqcvvvjiQk6EXINAQY0HNQfo6ak7ADxgSAlMSEW4brPZtG5ctVrNwGk2m9m9p1IpdTod89mbmxvVajXNZjNjuCmY5/y5XE7D4dDmErkUMhBkLchYkNP0ej1NJhP98MMPSqVSevXqlSVnSByIF5vNxgAf+ZRvndzv9w3IHx4eDASJLa1WyxjOKIo0n891dXWlRqOh1WplXbuYX+o8kC6k06dC8+l0qmazqXw+b53LkJ0gbYqiyPwBkMOOOQBH2FWSL88EY1uACD5J7Q1Ay9wBKh6QYf58YpscH34kGJlgZIKRCUYmGPnzx8j3Hekvv/zyxyHG/3D8y7/8y5ftdlupVMpaQOIAGLtnYGDCCJRoMjF2ltN5+4fNQk/r9ZcYOYV6gBL/xz4BaF09mB2PR1uSZvJZJqxUKppOpyabgA3c7U4dfl6+fHmxrDufzyVJnU5Ht7e3Fpzp1AQTBKBQiNrv9xVFkVqtlgX8crmsMAztvBgHXWwAPWQTaLRhljKZjAV1v1wahqEOh1N3KeQdvNHjoEEQ2IaK6/Va3W7XJCx8niVzAiPaXK8ZR0tMQgHTOZlMTD5Sr9dt+TqVStlmiyQC0ondRcM/m80uQG65XFpAfHh4MFbDL1lTu8ByPg5TqVS02+30n//5n5pOp/rVr35l9pfL5WwfC9gwmKP1em1MFsvf3O/xeNock+DARoqHw0HPnz+3+4rj047xALFPEghwy+XSwAiGC9uMokij0cg218QO0DBj9yQugCfnyuVyVryL3eKPJFjeJxqNhgWR0WhkNQIAXq1Wk3TeAwXmko0tCbYEKcZ0t9vZ+aifIAllA9AoilSpVPTXf/3XWi6Xxp4T4I7Ho/7t3/5Ng8FA3W7XCp/L5bJ1QqP9M/aKbT4+PpqvsyEqyZJ0qu3I5XJ6/fq17UfE+KfTaXW7Xd3e3qpUKunFixf2DIz3eDw2WRNsNmOAHQGKjB3F08w13wEYWfnwyRPxFnaPWgfADfadZInYCEvHOPJdr5sHUGCWpRO7Nx6P77/88sv/+5OA4xM6EoxMMDLByAQjE4z8NDBysVjoH/7hH/7PH8OCj1LDhWGm02nTneM8FLtJ5176LAsTgNCf87aI3ILACIjwpslgMkC+WBM2AsNgCZGl/TAMLZjwhkyQZt8EgCKXyymKIis8pjsMgWw6narX6+lwOBiTV6/XzYAxFNhImAo004PBwJb4WXIFtKrVqulsARMCHEu63W7XPlOv1zWbzUwmQtEnb+o8PwEQeQIyjdevX+t4PBq7M5vNDERHo5Et4RMsbm5urHAYvTrzSBF3JnPqYERNADvZH49HSxL4vi/oxpCLxeLFfiYYPEv3LPezzwqsDKzYuwXijUbD5iKdTuuXv/ylbm9vdXd3py+++ELr9dqYouFwaIkP7CJL+8zjfD635AHpzWw2s2JpHHs8HmswGJjt0s1osVhYUEAClM/nbal7Op0ay0WStdvtrEsUPoRGvNls2p4sdD1Cb0wnJxg9mHFJ5h905kFC4BMwfC0Igot9Zfb7vTqdjjF/URRZ62bqDDabU2vp5XJpkiXmC7+C/ZxMJioUTht3djodG38CKvfS6XT09ddfaz6fq1arWeA/HA42l7BTdEY7Ho8aDAbWNhdpA6w29RQUvQMaj4+PJrfh3vCf7fa0Dw0JNONJQrdarQxE8VUYeZIwWLXBYGDyED672+3s37DL0pmVg2HH1kjuSE6QSMHoSrI4il15aYm/lk/aWV3hHMnx448EIxOMTDAywcgEIxOM/GiSwkKhYJrhavW0CzZGC0ORy+VsiXS/39uSKHpiGC1ABQYKrSRG788L0MBc8PZdLBbN8ZFHsIR7OBzMKGD40JWjMZdObEWz2bSiy+l0qlKppNFopFqtpiiKzFkxlkqloru7Ow0GA9OWFotFY3bQ8rML/eFwKjz95ptvbJk6nz+3psXgkXHEcWwsF8ELYIOlK5fLmk6nF+wXrUDRIOPQBHE0zzgZgJLJZPT8+XPbDBN2r9FoWHcX2NNms6lSqaTxeGxgjEwDmch8PlcQBBqPx1qtVgZ0sDMsIx+PR3U6HdPmc1/YQz6fNwkGS9O5XM6SGZwMRhHn4dzlctmYlR9++EFXV1fGMnqGKZPJmKwAW4PVkE7sFXKWp6cnlctlFQoFvXr1St9+++2FphsWkDoKNNaTycQY2NFoZNp6DyIeFGGpK5WKsXHL5dKkL94nGS+YYKQSo9HIEjISKALOeDy+0M0DJH5fGj642nTVAAAgAElEQVS/252KSpvNpjFkgONut7MkajAYqFQqGasPIMPuEiNgK+mqNJlMLLD3+31VKhU9f/5cq9XKuills1lNp1Mbx5ubG6XTaevIRS1KGIZ68eKF1Yjc3NyYPcKO00kOwMjn82q1WtbpCWDgWQFkdP08N2NLknE8Hi3pkmQJ3d3dnUql0oUdsVJBYwSCfLF42tTVy6h8YwXiIbEykzl3UuJnrkGSzXl8QoONkdhg71wnOT78SDAywcgEIxOMTDDy54+R75Pd/+S28AyM14KzDIdTwijx/0gaYLcoqiMAMThoq9FdM7AsM1K8y1ssGvZKpXLBUuRyuQv2K45jK+bFyWEHMBAKHLfbrbFXtCWFrSuXy/ruu+8kydiaYrGoTqdje4PwzARAloM9s4JDRlFkgALDB5vJMi2Tjw53t9tpOByq0WjoeDzt9fD4+GiBj+D4+PhohZc4AHPV7/dtmXm/PxUw8n2Yjslkot/85jf2f5lMRvf398ZMPTw86M2bN3p8fLRgTnvS1Wql4XBozkZBLqCezWatqw57f1xfXxsrt1gsDKiKxdOGfm/fvtV4PNZyubT2wyQSJC2bzUb39/cWpBg/zjsYDGxp/9e//vUFkwaDCuuEFj6TyVh7Vhybnd8BoHK5bM+63W5VKpWsZgB9+36/V7vdNjBtNpuqVqsGNGEY6ubmRrlcTnd3d5pMJsbmUYAsSY+Pj5JkDDPXhH2DIfLFqD7IwqjToQomieV06hAmk4lJDPBPlvABdgIe491sNg3MKc4mSdztdpZM4KPYgAcrmF9YcQDmt7/9rYLg1AULhrBYLJp9MK+wdg8PD/p//+//qdvtGisXBKeOWWyoyn4sYRgqCAIDAnzshx9+0Gg0uuhShk2m02nd3t6aNpygS3E9fl2tVu2ZgiCwfV2Gw6HK5bLJZVKplM0Pc8dzkhSSLKBLR8JCwkQba1Yj8A1sA/BhTjw4etaQGA8wJceHHwlGJhiZYGSCkQlG/vwx8n3HR3nhotMIgcKzLPv93pxOkr1xl0olW46DQUBXzIAQiPk+7BoPDLtBIRvBHOceDocXGtB0Om0dT2AWJJlGlmVDnAkHbTQaprcGMFlKfvbsmVarle7v7zUajYzhe/78ubrdrgqFgj3T999/b8/QbDYvJB/IApbLpR4eHnR3d2fFi8hLVquV6Z4xLJa9WVaXpLdv3xqjQoCBtcHQ9vu9FR8ioahUKhbkl8ulXr16Zc+K9OXp6UlxHCuKIg0GAwNKWITD4dTydDQaGUOKcTKGbMRHYvH555/r2bNn9iycC1a01+sZU8Gy8nQ6VbFYtIDb6XRs7mj7iyOSAOAYSDWwGVjWt2/fqtVqaTKZKJ/P23I39w9bxnzBekVRZHIdHFs6FY63Wi0DNa7rl8JJbpB2wCqS2MKMwnLV63WT6LCrPInwcrm0ZX8YF+Q6Dw8PWq/XGgwGdn3un2diyT8IAptvitO5NwJiq9Wy+cDPdrtz9y+eG2BYr9cajUaWAAHKMHmAWKVS0X6/V6PRUC6X02effaZU6lRA3+12Va/X9d///d/GZvtWxuzHg4yCehCYZ+ncPhYw/uKLLyw2kXAQuwACEtl+v6/pdGoxbjgc6vvvv9fXX39trbHZv2a73doqBDp/5CiMCVKJVqulTqcjScYsAtTcLzEAEIb99OCLTIrifUl2fWpTuK8gCCxRJaEgjkrnomCkb7B+nDc5PuxIMDLByAQjE4xMMPLnj5G8uP2x46O0hWepF0P0bNvxeLSlT252Pp8bwwbLQbCEtWOAJFlwY4IZHK+bJSAAaMgKKF7lbRe2qF6v24DCcvj2osgf2FxyNptdLCsidWADOZb/YR0IELwFE5QIgqlUysACZ81kMtbqs1AoKAxDYzg4J+CChno8Htv+CcViUY1GQ2EY6s2bN+p2uyZRSKVSF4GdcZvNZrq6ujKWCD1qtVrVmzdvNBgM9Dd/8zeKokiNRkP9ft+eA/kHLAJFpcwtwIF0YTAY6Pr62sZkt9tZjYIk0zfncjnrikRg3m63pldmXtm7IY5jmweciGBDsKJbEMwI11+tVspms7q6utJ3332nTqejer2u4/Foz0Vik81mL9hb6RSc0um0aX4JuKvVytjQ8XhshbowKXRRYqyZ3+PxaIkW0pp2u21doPhsoVCwJf4gCCywc+3VamVSFuYKliaOTxtgUqyOH8LqwE4RVNLptBXIEkT9Mn0qlTJpDmCOFh1JEnvvUAgOe0Wis1qtLFHkfhjrp6cn03v/8MMPVtROgTjPT8B9/fq1vvnmG/3t3/6tvvnmG2OEj8eTvrvT6ZgM5sWLF6rVanp6erLgDAg9Pj7qeDyq2+1quVzq5cuXarfbSqfTZpepVMqSxmazaawyKxS1Ws3uVZIVA5Pk+cQahvL29lb1et0kWaw6IJdBe+/ZU4q+CfjMDUkOcc6zcCQcfIc5ITYQ74hpyEWS48OOBCMTjEwwMsHIBCM/DYyEqPhjx0ep4cKZKBrjjRfHwdHQfQJA2expgz46oDBgFDvypouunDdynJ2HRXvp34xhHihwhWHg2jAbBETfsYZgydv8YrGwVqi53Kk9J4ZAAKE9J7uDM+EUkwKW6PdJXrLZU7cm9ONovsvl087fvMFHUXSx5Nztdu1eYUmy2awmk4mCINCzZ89MroGzAkQwTKVSySQKdHKirezt7a12u1M7VViXzWajTqdjwEQhZqFQuJAB+JoCpDTNZlO1Ws305pLU6/UsgOFszM9gMLioHSApKRaLqtVqtsxMNyE66zB3hULBCm494yvJWMt2u21L4tjVr3/9a/393/+9sbSPj49mxzBeJDG0Mubc6Ln5bKlU0nw+NwAjaeE7+AUBDPYTFg3NPfMnSe12W3d3d5JO7PWzZ89sE0lYXFgwvkMRO0H5+fPn+o//+A9L0ABvZBvs/wLbWyqVrGg9mz1tasl38UOuD0M+m81sPvBREjn/nN1u16RDzCcsNEnlfr83CVAcx8aYIymAoWdse72evv76axUKBV1dXanf718ETupNmF8SMnTnJF1IF2CFb25uVCqV9Pj4aHEslTpvfIisg7kmycU+iVMkMbTcZayxGWox1uu1JZ4k4J5BA8SxIVYg0K8z9sQowJaEmfhLbQCHZ4D52TddSI4PPxKMTDAywcgEIxOM/Plj5J/1hQsHQ5PsCx6RJvggmE6njX2CSWB5kN/FcWxMB7pXX9xGUMEoYY4AksViYcu8y+XSllWZkP3+1GcfWQWBjEmAedzv9zbJ/I6OL+v1efPCVqtl3yGo0TUJVhEJCcWNFBN3Oh1b5gawJpOJ7u7uTP+KpOHq6krH40lrSwFqvV43R0f3zfVms5lubm5M1oBUghaYMKoATDp93kF8tVrZ9abTqa6vr+3cOFmn07Ei0XT61AHm9evX5mAwC8gX0CCHYWhFw+jtYQ0BWBhU5lU6b8p5OBysSxEJwPF41GQyMYaYa3Nu5hBbg7nk2bGT4XCof//3f9ff/d3fWSChSBUmmOV7pA6SjB0GnLhfdNMAMCBFogKzXC6XbRyxdZ5/MploszltKsrzsrwNy4t9zedzbTYbSzZgoAgmPBMyErr+kGgwxmi9CcLMKX7ipT4c1Fdwb74WBUCTZHECcEBDT5CD6ZzNZibT2O/36vV6NpcwgiR1JGGLxUKtVkvr9do07J999plGo5GWy6XZKC2d37x5Ywwy++wwz8wB0phCoWCJL2wi84lsg+ci4BOE8bdarWY2xPOj10fTT9wEcBkT7NbX8lSrVS2XS2Nive8B3MQqkmHqW7y/IXfz/sf1Ye08SCXHjz8SjEwwMsHIBCOlBCM/dYz8KJJCjIs3Pem8JO/f4FlmpGCTz/i3QgaJg5asPARGTPCDSYFVqVar6vf71pmIzxP00FmWy2UzUu6PScKxfDcfimm5FnIM3uAx4NVqZfKEzWZjhaAU5G63W2WzWQ0GA223W7169cre3ne7nR4fH7Xf7619KROJvIOCwkzm1DKVLi43NzdKpVJqNBp6eHjQcDjUF198ofl8bm/1vm4AjTAshCT7eTAY6NmzZyZfOBwOGo/H2u12ms/n6na7ajabVqxdqVQM6MfjsQUugg/zBxMH49BoNIzRepeJYFkXR8UZPUOYSqVs/wiAWDqxWsw/duWXmZEyALCbzbk9LO1Nv/32W11dXVnbVp9UeMc7Ho/2rOyZgn3DHmLf2A1AB6MryQACG4elYfPGw+GgMAxt741Wq2UdfUhioijSV199pUajYWOJbAf/pM0xdgnzCtu0XC7Nlwh0fp64P5bQGQ//HGjlsS8SOVjYVCpliQ0tXGFnGc+npydLBmAzy+WyHh8fDXDRgTPvfk5evHihIAj0/fffK5VKqd1uW/JEMvbFF18ojmPrsEQBLqCHhCCTObVupsPTfr/XdDpVtVpVEAQWL5CoMA60wZZkUofFYmHSDor6gyCw8xGr8CeeCx9FEiHJGHlsE/BDehPHp6YMy+XSVi7wIVhEDkAPG+T5SYxIFnme5PjxR4KRCUYmGJlgZIKRnwZGjsfj/xELfjJdicNjeCx7wmwwwLAxGCGTwu9wIN7mYSEkmWFjiDgvnYt460d3C+NBEOAtFUkE4Lff760QF2cjaGHUAASDu1qtrA0n8ofD4WD7PKCRhrXxGu5Wq2XBBJDlmQeDgd68eaMwDG0jvDg+dYqCASHIsuzZarV0c3NjXWZgjpBgMB48H+wRzwMQM19IF1qtlkkbCBi0v6WQOQjOBY3T6VT9fl+TycRkEdVq9UImw/wRNHyR5mKxsLoGDB6WkRayzLskCyywTgRB7hvmlPHCxrycBlaC7kokGqlUSs+ePTMdfhCc9tTAZgBigr1njBkrlsPpikOggc0jYYKdXa1WVqjrGcH/n70327HsOq61x+77vt/ZVCZZRYqUdWFBgC/PheG3kgEeWH6J8zT+b2zAsGRLBE2yWF1WNrvv+/6/2P4i56YpmmVSN6y1gAJZlZk715ozZoxYI0ZE4HxwkK4eG0Bj8CEp9ng8bt3GJJk9HA6PBeTIGdBtZzKZE1vAedzf3+v169cnmmX2krVn1ggOEm08z8jaJZNJm60CMzSbzU7YVb/fb4wwv4uCXr/fr1evXplEh6CEswaThz6eYK9UKqnb7Wq1WlnB7mQyMaYWOw8GgyZhcO+PIMqtGchkMprNZvriiy+sMxXO2mX1Oad+/2NXOs4vEq1A4NjpijOOr2O9KaQ/HA6WycAXcb6xK9aSbAaSC4AI0AfAAGHu12Vj+Tr+CUbPDfS964ddHkZ6GOlhpIeRHka+HxjpSg+/ff0kNVwYDkWBFBdy6NBvcpgpGMZxuG/JsHmwgPTux+GhqZRkiwEzst/vTdMLG8tMDEAul8uZXpg3ZUnm8F2GAjDEyZJqjkaP8x06nY5pktfrtbWCde+TTQqFji0yMXgYAdgEd9hct9tVNpuVJGu1i0whEAhY4SVv59fX16Yv73Q6Buo4IPbF7/dbVxn+DmMCuxoKhWxY5XA41GKxsA5IOAXWLplMqt/v20EB/GezmWnPAQUkJgAD6W6fz6dSqWS/HxYLx8egOpgQnHggcJx30m63zflhB5KMhSCFTlCw2WyMdQGsAHbp2JIWrfXDw4Oq1aoNo8RZcLgAEZe5woZhPRaL46BLdN5IA2BK2dvFYmG1FN8OYpBTMA9jv9/rzZs3evbsme7v71UsFo3FOT8/Vzqdtg5knL9cLqflcql0Oq35fG4aa7Toh8PBzhkBDADKjBcAibS5z+ezfeQ+OdN0GpvNZiZLgZGCjQLk0YRTgM0gVXwATGen01GhUFCj0dB2u1U6ndZoNFIoFFKpVDI7xMZ9vmOxLXNbGO54dnam/X6v4XCo8XhstkSwGQ6HT5w3bXX5+nQ6tba42CQzclarlQ0pBfSwV4Jm9jAYPNZO7Pd785vT6VS9Xu+k8xbBMD4Oto71wQdyP9gmPpmaH9Ye0ACQ+F4+g+9z5RaSrHuTd7375WGkh5EeRnoY6WHkzx8jeSH7rutHZ7j4cH45i+syZrwFwuSxKJIspe4yAnR7wSGTtoMN2u+PrUU5vNzHbDazwkBYvXQ6bW/IOBo+w2XrkDe4xYW8ObvtWdl0Cmdpg8rbP/IRN3W/2z12ToLpGI/HlqaezWZ69uyZYrGYdW8BdGBzXAaA+yctjzP0+XwqFouSjk6MTjmAlJvyJrXK2rHGAE48Hle73dZyuTTnH4lE1Ol09OLFC9NVA8gMk0ylUspms9aBZ7c7Dgd88uSJstmsGSzacpgEgAOJi5suJrXLQefQ+P1+mxDOoEycPSwOuniXzeIQRSIR1et1O2g45OFwaMwazFi73dZgMDDpAoAKYCYSCZVKJQNLCnNh8wAwVzZC/QSyHxgrv9+v0Whkkg/Ah89er9caDAa6u7vTarWyAutIJGK/T5IFEXQHogsVzmE6nVp9Ag4H4OF8xuNx2yPAE5ZvvV4rm82qVqtZoTgFtzhbpCsUEQ+HQ5M1IU9gLTebjQaDgcLhsLHMyCpWq5XdF0EP4AfIw5gTePp8PpMOTadT/elPf9Jms9HV1ZWKxaLJOhjYyiwfzhtsMYwtdkIHqVwup4uLC2PFJ5PJSYclbM6VNMD2E2gfDkfdN0xcr9ez8wvzRsCCHh4AYM9gXgEfAhF8GIEZvs71fa7EjYvADF9NpgO/6l3vdnkY6WGkh5EeRnoY+X5g5Pe9cP0kGS70xxx6CuZI8/EWSxoVZgVnAnMTDAZNNy7JwEl6dHiutleSHU7eOF3HTjqVtK5riGhQ3VQy6VKAhWGCOFQ0mn6/3wqKMTDkATAPgUDANpMiYNgYF1BJ4/K1ZDJpDAHdXGA20bNSvMp6YEDr9dpazrZaLc1mM9PiI5/4droVsIYdSKfTymaz/20yO4d3v9/r7du3isViOjs7s3WCsYANI9XMYEzWgRQ6TACHD3sBwAFkUsAAL8yW250K50gwQqobpwCTHAg8zivBWQD+FFTSajUQOLailR4HMaLzJlAALGEcYa0oTIZFXSwWOjs7M4CNRCLmeGCG2WNqAehERGE7BdBuEDEYDPTxxx+btpmCZHfeDA4DW8BeCoWCgTZOajweK5fLKZlMmozIlX5wv7Cr8Xj8hN3C8fA92CmAn8vljC2UZMEIPgD/AZuJnYbDYbVaLaXTabMb5Dc4cs4j526/P866Qc//wQcfWPvl/X5vASL7Pp/PzUFnMhl1u13TtRMQofVOpVJqt9sGmO6/w37C2GPHOHDOLP8G6x0IBOzfkVyxHm4Nh1vvgE1ixy5bD0AAPPg5V95G1mS3O+r82Q/uXToGX8g6/icw8a4/f3kY6WGkh5EeRnoY+fPHyO+7fvQLF6l2DNaVRsTj8ZNCWgybN1aYKRgoDJcFAIhwCrBa6HRhm1gk2AeXwej3+8pmsyYxcFk20pT7/V6j0cgWH2OXHluwUvjKm7DbeYVUOo4BY0bXDesCSDH4DceOHODs7MwMHBkHrBUGAVhz2HBwGBCHzDXQ4XCo9XqtTCZjzj4UChkQoLmFaeFnarWa9vvH7i/r9VrpdFpXV1dWKBsOh02KglaWA4rTcQulKWZ+eHhQrVYzFpYLo+cZMH73MAIyru2hM14sFlYczAyPSCSiSqVijBCHBafEIYEhAbxms5nVDJTLZQ0GA9sL2qLiVN16AwCQ4BS7dnXaLoDA1OTzeU2nU93c3Bjj6qbk+Uycm3voh8Oh2TgBDc+4Xq8N4Pg82EDkMsvlUqPRSOfn51qv1ybNwLmzh0gzXHkNLCPdu3jeTCZjRegU91Kj0ev1TubCYKuRSESz2cxqBjgnFBinUikr1AccaEkdCoXU6/XMFwBET58+VTAYVL1et6COugR8CZ91OBw14c1mU6vVSk+ePLHADlkJAeZut7OgKxwO21mECWevYNLQrrOXaMj3++OAztlspmg0qul0akENRb/YiBtkwQJyRmHYuAdJJ6ww60kwhF92/TMsLbaGZAbQdzMg3vXDLg8jPYz0MNLDSA8j3w+M/L7rR79wwYJwqCUZGwALx0O4bUL5mqsrPhyOWmb34MB2oKd2Haf7tgm7BiOIcaBvZ4HYQBaY1Oq3Qck1jGAwaOwHP8fPSjr5bEnGHsFoTadTK2rE0SQSCTOGbreri4sLhcNh9ft9Y318Pp+xCqRFYfBYb9gDF0jQ7CIBIA3NvRUKBTtsSEvQiLsFkDBpGNlwOFQmk7FhkbT33e12Go/HZvg+n88OibuujUbDpr4D6KTAF4uFCoWC/H6/sVXYUzgctgODw8ERplIpY6/cw+LWMSQSCcXjcd3c3Fj7YKQFrEEikbA9px1oOBzW27dvtV6vValUVCgU1O/3TXvMAQsEAup2u7bfqVRKy+XSpBW0CMYGkN6wHwAee5XNZq2ugqAEUODMEbi8fPlSn376qYEkAzUBKZgdSVas3W637T4A8u12a0BCgAcQIKlA/uAGfsih0H7zu8fjsRXOJ5NJk0s8PDyYo0QOROqeICqZTJqzhlXK5/PmvAFNAjoAhsDNldxcXFxoNptZfUOv11OxWNSLFy90eXlp9sgFqGJvnEWCIs47Mqx+v2+a/X6/r8PhYPtPK1pXtw9ryH4B7m4wPBgMzIfiE5HMfFtuw+eylqFQyM4LwTIg6IIMhenu97MOBEz4VEDF9fHe9cMvDyM9jPQw0sNIDyPfD4x01+rb108iKZR04mRJ5eGMMMxA4DhALBw+du4ZDAZWGImEgQdkgfnDIsI8SbLP5oHR27IpvHXicF0WcLlcWuoeFo3UJYPVOJAY+m63UzabNYaQ4mMcO8YGI8hzrVYrO7ykYykU7Xa7BqA4JJehYT1YYxyeJDvsfA1ns9vtzKkcDgfVajVJR4fBQalWq9bJCC37mzdv5PMdC3QHg4ExQZVK5YQ943e/efNGhUJBi8VC7XZbyWRShUJB0WjUZAOwmTxfOHwcIBgIBDQYDHR5ealOp2MBAmtJjQAHkSBBeiwQXS6XyuVyVgAO4Lv63XA4rNFoZIdvs9mYBCSTyZgzoKVwMBhUNpvVbDZTOBzWxx9/bOtKgMNBhxGFnaELD5KMfr9v9sA9w2ziVNgvnhP74D4AcVd6gU1LMmkF600hOXYDqwXwAYTIGABU1h/W1+0UhV3TQQnmcb/fW/vYi4sLGwR6dnZmE+HRoiOXcYvqYeywEewROch0OpUk23fXEcfjcdPww8Jls9mTGUIMdf3mm290dXVlTBVSm91uZ7p6QHAymRgokSHA/qbTqfb7vTnxbrcrn+9Y1E0dSyQSse5x+CVaa/O9MIibzUbVatXOhd9/rEtIp9NKJBLq9/sWZCAxQk6BHAbWEGYPZhWfy+9FykLgia0RIGDjgBD3ji/HJ/1PDJ53/fnLw0gPIz2M9DDSw8ifN0Z+Hyn5k71wcZEuxcli1Lxhut/D2zlsC2n5zWZjm+gyaDgJ3sx5MFKHGCGHolAonLyFkqoMh8OWfnRZLxZwNBrZhsHEMQDSZfYmk4kCgcCJ/pZ0ZSAQOJF1cBjQp8Jk0gq31+spEAgYK4AmlAOM8cGKAozr9ePMFgCbA7/fP9YCkKYej8c6HA7WgQcjdjX2HK5AIGAsKwcRXavLGPJssCqAuvQIcovFcUgkxg4b0O/3Lf0Na8tnAp44T+4NJrPf78vv96tSqWgymdhhRzohHSUQd3d3KpVKpjFmPXGkFKn6/X4bVkmavlwum7Pb7XZ23ziPxWKh0Whk9h2LxdRsNtVsNm0+DjIiAg7a1LoMMBKV+XxuM0Ly+byBlvTI9K3XaxWLRRvqORqNlEwm1e127XMBPQpkmVUCew0zBYtaLBa1Wh1ntTBwE+CByYpEIsYosZaLxcLkP9hDo9HQxx9/bCwTgQDnnHPDvUoy22f96vW6SbAAYQYYss/sMcEhxfTlctnY6dFopOvraxWLRVtr/NJ+v7ehlgyMbDQa1iksn8+bDblyumazaZ8XDAatq1soFLLACF8XCoVMWjOZTOwcce+xWMxkMolEwoLc7fY4WyWXy2kymajf7xsjTt0Ea4AMAzvjjBCcE5Djb92aH2xru92arIuvAVjYIAG6d/24y8NIDyM9jPQw0sPInydGft8V+Oyzz773G/6n6x//8R8/y2QylpqHJcOJ8eaPobGIfC+AgB6VBcPR4+RJOfPGiTG4DBwHDYAKBoOmkSVlyqK6unnkGDgE7m0+n6tarapQKNim7PeP2lM2IRQKneiv+d2hUMjajNKVCGDj2TEcGDLezmG3kEYAqDgz1hUQdtupkvpPJBI6HA7WMYbDyRt5o9FQMpm0LkSwprRF5V4AQZjWYDCoUqlkQB+NRpXL5QywYVhSqdTJTJJ6vW6MHsXBMDiwCxwMvsa8EEnG+NBNCm0wBZoUMFMUPpvN1Ol0LKCJxWI2RBDGCl19MBjUaDSydr8cOFgngBr7RYLB5Hg6c5VKJY1GIwUCAXMEgcCxSJQaB7oK+f1+q22AJYtGj0MWb25ubL4KmnuceDwe12w203Q6VaVSUb/fVy6Xk9/vPxmoCUNTLBbV7XaVyWR0OBw0HA4Vj8f18PCgeDxuAdJmszHAwJZYB2pN1uu1tbJlnzKZjO1NOBxWt9s1BopggICCYuVUKmVBJTZE3YXP99ihiza3fr/fNOK9Xs/0+5w3WDECA+49n88rFotpuVyqXC6fAHAmk1G73dZ//ud/mj0iUYnFYif1GvgjbJcsAv+/2RybAazXa2PRuEdJJ75uvV4b40qns9FopHK5bF2ykKiEw2EbCBsMBu1Mw2wC2Pgmn8+nwWCg8XhsPpcgkgCdoARb3m63J1kOSQacBJoAyX9JYxqfffbZ//tRwPEeXR5GehjpYaSHkR5Gvh8YOZlM9Pd///f/97uw4CehK2E+ccZoq2E5cOAwCrzR8ne//3EaOmlnpAK8eTKfw73Q0gYCAUvtk+pNJpPmGNbrtXj+xbMAACAASURBVPr9vmmuccSkVgEf6XE2BUzDYnGcsfHw8GAsHm1l2XhJlt6czWbW8QmZAIeJTYJVQ3NfLpeNLUGfS1EgTJ6kk6JTQCkejxvj12q1JMk64wAOsAKZTMbui841dAUCkPP5vNUc4EgwSgwccIJBkmTgk0qltFgsjO2BuQCkASsOZjQatbaj9XrdZlkEg8fiTxwqQzpxljgg2FYOII6Q2R3McRkOh1oul2YDANdgMFClUrEWxNPpVMPhULPZzOaLJJNJSTKmZDgcml0TsMBcxuNxXVxc6ObmRtvt1rpZ8b273XFqO8/I1wksgsGgMUEEIsgy0um0yXmw9fl8rg8//NDOEXaCPAJ2tFwu63A4WIrf5/MZ63U4HDQej1Uul624d71eG/vY6/UM6HD+1B3M53Pl83kDR+QeBDOcDexOkp3lcDhs5wF72G63qlarkqR///d/V61WM5YzGj3O9snn8+Zoe72e9vu9crmc2cNgMDBgdWUyAAUBz2azUa1WU7vdNu21K0lxAZYzDKPd6/WUzWa1WBxnw1APUSwW7dzD7lLwDKtNMBmJHAeiutKuXC5nUgt8TDAYtEwE/oZuXDQGkB47gtG5DYAnCOd8utIkGF3YePw5Ugn2RzoFRO96t8vDSA8jPYz0MNLDyJ8/Rn7f9ZPM4XJT6dIjM8LbLXpxWC3e2mHmUqnUSeFcInGckA5bJMk+jxQfg/ZIndIhBkYBx4uUAXkBCyPJZpK49xWNRu2Nm9/Fxvv9x3kak8lEs9nMWuKSvq9Wq/Z70bPifChk5SDTAYf7mU6nGo1G5uAAUnSp/H5ACBkFn8cBicWO09TPzs6USqVUKpVUrVaVy+UUiRznt8A20A2IVOt4PFav1zPgGAwGxizwPMgU+v2+FVtziF12kWJISScHCpYJ/Tup/2QyaYcYQIHtASww7vF4bMyb6+zcYmkYZHTdyFM46ADkeDw2/S37zfyL/X5vXydQ2u/3toawaTA2rL1rX/P53PTxsEB0wcJ2CYDo/IU0p9VqGbBxj7BgzWbTHA3MEmzWarWyblLz+Vw3NzcGKG5A5Z6pw+FwUjy+Xq9Nh57JZOxnYHJLpZKy2ayKxaJCoZAajYY59fV6rUajYZIImPFKpaKLiwvbEwq/eaZUKmWB2mAwUKFQUKlUsu5ShULBirUnk4nJNMgYwNLW63VjY5HipNNpq89AhoJfuLy8NHkGzBvsJC8ZAOhqtTJQwB7q9boFdDwbARId27gPSeaf6BYFy8vnwrwVi0ULoJAAsWcUlBN0TCYTkxu5kin0/9KjrISuW9w/QOPKxjgrh8PBfhd24l3vdnkY6WGkh5EeRnoY+X5g5PddP1pS+Lvf/c7kEjw4Tps3XjbFTcv5fMcuPLw18pa9XC5NU45T9vl89ibNwUJbC2uDkfOzwWDQev0HAqdT7SmIJCXPv8diMQMdn89nTpvvAdhcrSZvxiw6hwSNKqlc2niig8bxoiNfr9dKJpPm9NA9Azowdjha7h2nMZlMlMlkFIlEbMgfTpM06WQysQnrrjOGFUBLv1gsbB9jsZgNH4RNdB0qbJ5b1ApzAMOQTqfV7/dVKBQsJU1wwfqgjeVQwLDBEgFGbqoZcB4MBvL5fMrn8ydMmmv8OFr2FVaPguFcLmdtYl+/fm3rRPEwAUsoFLK9Z8Ah6zQajdRsNhUKhfTq1St9/PHHVljMfsPWSjJnA9NGEIaTaDabxkJGo8d2z3RyQmKRTCbNNpDurFYr6+IDc/bmzRtjZkmhu8xlqVQ6GR7oBkUw65zXSCSiVCqlwWBgAQDMDoHiYrFQvV7Xcrk0UGO/2Nv1em11GaxJLpfTH/7wB5XLZVUqFb1588ZYNr/ff1LH0Wg0Ts4NGQFJBjbUbdBi94svvlC9XjdHTcczghAkW3SvglkF9JD8jMdjxeNx6zDV6/UUjUZVLBY1HA5PuiMh5RiNRhacAt6wpwTdi8VCs9lMpVJJsVhM/X5f+/3ewAawgOHjrLjrCsu8Xq/NnwKU0mOwjx+JxWL2d4IL7nG3250UFXuSwne/PIz0MNLDSA8jPYx8PzByNpv9WUnhT/LClc/n7RDAfgEu3Bh6TtLMOGhXawyTxeLAIuH4ACmcAJIGNpWUNFpOOt9IMiZnsViYU2SRYFVczelmszFdOhvnDnpjIjxvwRgFn03HJobtcZBwwDBx/C4OwX5/7PBye3urVCqlcrlsG1sqlaytKg7M5/OpUCjYYUfzulqt7A/SiW63a44EPTSsItrf1WqlTqdjaV0OJqCO8dIpikJFijCRgLBPrA/24fP5TA6DHIC9Yr9gQ9HcM5F+PB5bmrvb7ZpcJpFImN45nU7bWuM4qU84HA7W8haHwYGMx+PmYAD4aPQ4t4Hvw44BApx8oVAwaUK1WlW73dZmsznpRFWr1RSNRjUej60T0X6/t/WrVCqm+Q8GgyqXy1oul1Z8SoEqDC1McbVaNXmSe77Yq3Q6rVwupxcvXugPf/iDOSm0/LC0yJw4w1xondkbJA90nbq/v7fgiz2inoB/C4fDajabur29tc8KBoMn9SwAPLUCMKKpVMq02NhWNBpVrVZTNpu1s7RYLE5Y58lkolgspkwmozdv3qjX6+np06eKRqNWJDyfz0260e12TV8uHSVHzIKBmd5sNqbN9/l8enh4sCGvyElgEQF8/IKr+8dvIItCe//27Vs7W6FQyAZHTqdT9Xo9qyEheHCf2917/CfBGAAvydheAmOCNQJx7h9QJSMDUIVCIfX7fe+F6x0uDyM9jPQw0sNIDyPfD4yczWb67W9/+5d54fqHf/iHz2irib4zGo1aqjUcDls3FB4QYwJUWq2WstmsstmsGR9Oxe0YAjvkSjD4OynjQCBg3Z/QBdO5xa1BcGUVbHY6nf5vBZvodBlYSNcYmEO00hwKSaYxPRwOJ4xJOHxs9xqNRk90oYFAwNLykiwNXy6XrRVrPp+3AkuAFXZvv9/bYEHWHX03xoTTZHjjer02/ar0qK9HwxoOhy3dXyqVjI3DGe73e/V6Peu8tFgsLC3LZ2LAHDb0761WS71eT7/85S/tcBAw4BTdYIKDidOXjuzW+fm5Pv/8c83nc33wwQcnaW1YEA47em4Oi8/ns6/v93u1Wi1z0JFIRN1uV5JULBYtxUwqH2aZlDlBRyAQMB1xvV7Xf/zHf1jBqM/nU7lctj13Z9/EYjGTZlDgyqDCQqFgrVthYfr9vvL5vLHYpMVJybvdpUKhY2vfUCikt2/fGtu0Wq2MSeUMoeNnL5H1UOCNc4FppWMTbY9LpZIxXkiB0KAD7ofDQQ8PD5bSn8/nVnQuSV9++aWKxaI+//xz0/szNLHb7do6wzA/PDwYeAJeblvoeDyuL7/8UtfX1woGg0qn0zo7O7PPogHBkydPdH9/r4eHB5VKJWUyGQuC1uu1Xr58aQHybrczCdF6vVYul1Or1bIaDzpo4eQBYlhApBiAcL/fN9lEIBAwWUO327XZONgJgSTgxnkLBAJWAwDQEGSFQiELwmBn2RNXmw5Th33ze/gd+NXBYOC9cL3D5WGkh5EeRnoY6WHk+4GRo9HoL5fhogMTkgYeEsPBgVO45nbdcVN9GAtv50gwWGTYqlwuZ738t9utTdeGLWJxwuFju0YOM06GhWPIG2wab+gwb65ml03HAWNkOC8KV/l8nDUbiBFFIhE9efJE+/3eJrJT3NjpdLRcLg0A3QGYgC8OBTBD2sEwOdYcZwETRCrZbYO72+0sFQ+TA3OZz+etw896fWyx63ZhwlGEQiFzsIAVsgIKmpn1wFT6WOzY0jSXy1lhMEPnXKaUe3YlE/v9Xslk0pwbbBFzKyQZgKJjJiCQZN8HoPZ6PW02x9ayo9FIb9++Va1WM8aOegWfz3cSvMDK7nY7PTw8mDOFsfv666+N3Ws2myZloKDXnYvi6o5hVAks+v2+drudDdFstVrabDZWbIs0BufD3A303KFQ6GSIJRpvSVZrsFqtlEqlVK/X9fLlS0kyhtcNDmFvYBJns5nu7+9NHpROp5XJZNRsNg0YACAKfWHLYA2z2ayBJ2xuNBrV69evVSwW9fLlSxWLRZMowHQj61kul7q7u1MmkzG5y3a7tcJ6Oj0Fg0GdnZ1pOBzqxYsXOj8/N1kOz4+zh211g9DZbKblcqlsNmsSB0B1MBjY+eEPzjgQCFjgC5MOK83zwgb3ej1jwe/u7tTv9/Xhhx/q888/V6vVskCE80BTA5w9gQa+NBAI2JmDwYc5hLkDgF2J2Lf/jk8j0AsGg+r1et4L1ztcHkZ6GCl5GOlhpIeR7wNGTiaTv1yG63e/+91nzNbg4NFtR5K9IbspPEmmW0WvyYMEg0ErWqR4D6YMQPD5fBqPxxqNRvY7WCicP6l6jESSgZfbtYRFprMNxsLXfD6fFdC5AyBJ81PEy7/DXHKfk8lEy+VSlUrFGDy//1gQO51OVSqVFIlE9PDwYIYGSxAOh61IlEMCAFIgSxvS5fLY0pPPxyGSDoZB5TCGQiHlcjlVKhWtVitLXaP3HY1GarfbxqQiieHtfjqdqlgsmqaW9eWe6MQkSaVSSZ1Ox/TWFxcXqtfrurm5MZYDJgzjJdhYLBambXZlKICNz+fTaDSyVDc2grOYTCa2l5FIxA48axaJRFQoFNTv9y0gAPw5eKT/W62WcrmcRqORDdwDQJn7Qi0AUpDlcml7TLtSvhfwgPXkGVOplM7OzhQIBHRzc6Ovv/5aV1dXJr358ssvlUgkVCgU9Ic//EFPnz41lok2yRRhz2YzDQYD+2ykO9Pp1Fgntzh2s9kY8FATAtsZCBxbyhJIUHuAjXQ6HQt8ttutBZKcDwaYck7C4ePgxkgkYgwdNSqr1UpnZ2em7ae+A5CHFYOJ6vf7xpLiL9Dmw1D2ej2TJ1xcXNizTSYTdTodCygBCpeJLJfLqlarJ8FeKpUyNhlwj0QiVkTf7/fV6/UMSNiLVqtlv4OaDVh2AiokP69fv1Y+n7cuV7CqBMWsMcEqjh9/RDZkuVyesHK0+PX5fP+NuePzCRDxg3zdy3C92+VhpIeRHkZ6GOlh5PuBkX9RSSEFwbxZoivlDRA2D3YNwGABWBS/329v0xTQdTodY4pgeEhVYoTozFlYJpAjRyAdz2JLsunhqVTK3vK5V4yDA0WHJpy3e+hJb8MWhEIh+/9AIGAs2cXFhXK5nG1oJBIxoEwkEtZxBUfHJruFthzO1WplrXUZcIcGHJ0+MgrYUNafN3nWAWBCksLvhU0CiBl4Rwce9oeuTuwlHYMAREmqVqtmkN1uV9PpVM+ePZPP59OLFy/UbDaNsYIBdhm37XarwWCgXq+n0WikbDarRCKh0WhkBbZ0F6pWq1osFqZnRnYQDodtoCRFuJLUaDQkSefn55JkuvmzszP1ej0dDgedn5/bzJB+v69sNmugiiOVZG1x3YLadrttRcPn5+d2rzCOfr/fugjhjKLRqNLptG5vb61+4nA4tqRdLBY6Pz9XrVaz9clmsxbUDIdD6zo0m8306tUr0+sjU6CYF3kGUpder6dUKmW2FY1GjSmFHcdGYZ9wfsykoT30ZDI5kUxJRwkRjPxyuTSGqVAomGSKYCeTySibzUqSzeBwnR6Odr1em6yHFsPVatWY8/+Sv0mSZQey2awqlYqKxaIx5ovFwiQiw+HQdODcF76Gc4LMQDqVBlG0i69CzsL3EvDsdjsb8Ih/JNPR7XaVSCSsk9V+v7eOUjh1np36EgJpnpkzBCC4ungkUuyd9DjkknPv3rvL1LOX3gvXu10eRnoY6WGkh5EeRr4fGDkej/9yc7hYBN4Q3ZQboMG/SY9MGywRbAfOjnS33+9XOp02Haz0WEy62WwMsNhYd4MpeuOQ4jTpXsTvgwVAMkERIW/nq9VK4/HYhrOVy2U73LPZTLVaTb1ez/TQrgHsdscWsZPJRJVKRalU6gQckH58W69cq9V0f39vwwqlRz00mnfSmJPJxCQRw+FQ/X7fhlgOh0Mz9NFoZIWWOGD2AHYFNmK9XptjSCaTymazVuRZKpVOmL7FYmGzW0ajkRXODodD+12NRsOYUaQm/X5f0WjUnARyGwqa5/O5Xrx4oUAgoGw2a7IOtNir1UrZbNbSxu76sJ7ZbFb7/V7lclmbzbGNK+nuw+GgbDarjz76SM1mU69fv7ZJ6KTXU6mUscOr1UqxWMwKeWGp6T6EXAD7415x6jDKOOm3b99aZy+kRQQhAMNutzN5Bd2IYH+wM3Td33zzjbXRpRsRuvxOp6N8Pm8FrrCGu91OzWZT2WxWgUDA6hxgeQlWKCiWHougGcCKDAI5SSaT0WQysZbVBF+DwcDYK5/vWMBODYTf71ez2dTh8Nj9B5Z9Op2ajGM8Hutv/uZvrNV1Npu1ItlYLKanT59KOspA0IXjJ/AH0hFUaM8Lw0mwNZ1OT4pqcabz+dxAKRqNGvjudjvT/nOO8HO0CeZ53Ja++DCYd5gzPn+73ardbhugwyjThQw5C0Gmu24EisiJCHD5DLIoBPCSrG2zKwEDUAjUkePw+d71wy8PIz2M9DDSw0gPI98PjMSnfNf1o1+4WDzAAwPB4fHWiPY7GAyeTBanEBTmjFQ2hxejkGSMRSgUsm4nOFe3uBanO5/PzYHAYh0OB5MQcJA51DjRXC5nb/6ka3k+mKVQKGSOjNQyLWV5W57NZiesByyHJGMGs9msFZ/W63VLUZNW5llh2Ugrw6LwrOVyWavVSr1eT9vt1liOXq+n8XhshcnRaNQAwu/3mwNMpVIGojgNnPhqdZx/USwWtdlsTF8+Go00Ho9VrVZNb81cEZ/PZ/MxAA708be3t7q8vFS1WlUqldLt7a3NgKAAG1uKx4/T3svlst0j68D+0OqU/eQwxeNxDYdDDQYDpdNp05DXajW1Wi3bz7u7O7Oth4cHXV9fW+DBnsJ2TCYTY/AIiObzuYbDoTFp3W5Xq9VK1WpVh8NjVy5kOX6/X5VKxQ4tz9NoNMwGALVgMKibmxsVi0WdnZ3p5ubG2CvWloGPBCQ4zlQqZewg5wEJAA6UgAzwhf2eTCZmuwQMkmygJ06K8wmoYNv39/fKZDJmo67+HDac9UbWBHvHui4Wx1kqtVrNpB20R45Go8rlciaXwaaxP5fRpwgXQIUNzefzVscBiw8I0iIZW+QeX716ZYXFfv+xeP7+/t46SEkyWQy+yGX6XH+Cb4zFYhoOh5KO0iKY93a7bUw0jCcAiOzMlai5gbff/zgzxJVA4G/5msvaucE560dAQCDH373rh18eRnoY6WGkh5EeRr4fGMm+fdf1k2S4uHEeFu2qJHsIWDIMGPkCqViMkf/n7Vl6nAqPTpyHZzEp/OTzOVQUVsIqzGYzm5dAKpffxRsxz0MaFcOAaeQtezqdWjFkqVRSMplUs9m0rkQwfRjB4XCwAYpIJrbbrUk/AEd0smhqeeuGKVmvj/NN+H0cXjSnACfpaAp63cPu9/utgw8yFveNPRQKqVKp2Bs77A1MJWsXiUR0eXlp4IfGeb/fq9PpGANL0evz58+VTqfV7XbtIMP4NRoNdTodzedzFQoFA3Qc/suXL3V5eWlgHYlENBgMrBibgkraoEYiETUaDU2nU2OKYCNYy0QioSdPniifz6vdbuvZs2dqt9uWskZn/e26BmQ/sL8wScgfYEUpIMfeYWbcWgA6PmHfsG2Hw0GNRsOc7zfffKPLy0uTruCEr6+vVS6XDUQ4H/w/mm7qCG5ubvSLX/zCag7c9P96fRyWisPmvMXjcbMxHBHzLmDPXJ0+afxisSifz6darXbipJAmwDBx1tmf5fI4H6NSqeiLL75QuVxWvV5Xp9MxnTrr7kqn6GzEJHsAnCGVtLAlyEQy4TK6i8XCdPR+v1/ZbFapVErL5dJYZ/xHvV7XbDazM3Bzc6NarWb+ARABqKbTqQEIATPntVwum3wsGo1aC2j8HAA8n89PwAlbxP8hvYKJJfhysxYE1pIsC3I4HOwPzCCfD8MIi+dd73Z5GOlhpIeRHkZ6GPl+YCRS4e+6/D8WTHBQOEDeIiXZwaXb0XQ6tYJOFgAWje4kgA9s32azMeeAVIEHZVHcNB9FkDBY/A6+nwUkvQ1DwlstrJvbIpXiYxiodrt9Ag6wN7wJ+/3HdpP5fN403DAOhULBDIpDj3OXdNJKczKZ2BR2ihnRhZPedtnT0WikVqtla49mvl6vm+OjGBWDc50rBx4HzcEGZEhh03Upk8lYS83pdGpD/qgNoIsUjvTq6spmO8AEwGK22+0Th4vjGA6HmkwmqtVqms/n+vrrr3V7e6tAIKDz83NFIhHTFTOLASC9vb1VMplUvV6XdGRAYG9g8pDrwFRmMhljBUmVD4dDrVYra+/77WdjWCdF2QDwaDTS+fm5MpmMdSPiMMPiwTqHw2EbXHhzc2Np8t1udzIbg+BmOp2a5p39wY7pzgUDutlsNBwOdX5+Lor3CfAIjpArULMBexUKhUyLjg0RcKB9B/BZ016vp6urK2PWGBbZ6/WsqB5bonDWZRNZYxjRTqejbrdrRazJZNICK+oVKM6dTCamA5/P59bFCgkWa//VV19pOBxaUINenGAN1oogZD6fazKZWKADSCHNSSQSBmIAMI6ZQBrgm8/n6nQ66nQ6VrAOu0lxM+fO9U3IiSgCJhjkD5/j8z22KuYPHdVYY+wQ+wFQJP23r7msIxIu7/rhl4eRHkZ6GOlhpIeR7wdG/kUlhYHA43R5nPy3b0ySpTE5PK7Gl8XnzZAbxuhg2TgEvOnztgsIsNB8nYVArgBjttlsTqQZq9XK2k/CDG63W9NGk/pfLBYqFAp6eHgwJoCiSdgzDpyrJeWAk3KHqeFCq9xsNi0tzRrx9syGw6DALPAWztdoU0u6HZAulUrWcYg3epwCoAqgUFSLfAI2BXCXHp2P9Ng6EyDn/nkuHEAoFFK/37c9yGQyur6+1osXL6zzDh1vCA7evHmj7XarTz75RP1+X51OR1dXV2o0GuYYer2ems2mPvzwQ9OBMzUd5gH2AyYFeQnabLoA0YIYhkZ6TD27xZ3fZqeRUpTLZQMXn89nbVjRM8Pe4Sxms5mtLfcmSS9fvtTFxYW63a6CwaDy+bzu7+9Vr9e12+10fX1tQEo7Yp6VegcKc9HeZzIZFQoF228cHufGDa6wBRh0V6tN+h3ngh11Oh0bVBiLxUyWQdH6aDQ6kS7hD7h3LkDJrSHpdDoKBI4FxMhyYOoYdgkDR6taV150OBzbDz958sSCwLu7OxUKBR0OBwsGAFAkFfgkpB4UNRMgYss+37E98GQysQ5y7DFsOhIw7IWfG4/H5kc5f/gNF/ABbRh3GFDAzw20XcDgfgEdAI7Al4sgH/t2bYM9RsblXT/88jDSw0gPIz2M9DDy/cBIvv5d108iKWTB3JvnzZOb3G63xgLgYLh5ug7BJEmyVO5ut7OiPRyMqxVn0WANASbecH0+3wlIuRuBQePYADo2KZFIWLobZkM6OhdmO9AuFaAC+CjAhUmIx+O6vr62YXKkiHFarpMG/AhueBa6+fC8pERhOHFedAyClbq/v1etVrPvlWSOlD+hUOhktgrspOtsw+GwMW8ArKSTYXakzLfbY0cndLwwEzhx2EValdbrde33e93f39tes78ffPCB6appmdtoNNRoNOygVCoV1Wo1rVYrY4XOz881m83U7XZVLpfNkR0OB5NdwKphp6PRyBw+TDN6YuyWQmECF5zCcnmcel8qlbTZbOy/HEAKt5FH+P1+s3k+OxQK6dmzZ3p4eLA2vMzXePXqlYrFopLJpMrlsn0WLC6dkXDEBDvBYFCNRsP2DsANh8O6vLw0xnU+n5v9up13ABg3gHBlT8gmMpmMnT++B3a33++b1hspEEES5w7pDCwybBP7hZTDDQ5gfOkutlgslM1mbU8pmn54eLDWv+fn58rn8xqPx4pEIgYotLN1pVdci8XCno92xuzpeDy29dlut7q7u1OpVDrR20vHNtkEoXw2YNDr9ez8EIi7TCnrwl678ofdbmd1L9QLYE+cUxhRlzlm//CPfKabkQHIpMfOc971bpeHkR5GehjpYaSHke8HRv5FX7gwuPV6bSk2HpgHBTBYFDdFhyMDgEgJxuNxax1JURw/z0a4hu0WXR4Ox+JQCn/d3+dKDNB/7nbHTiocbj4fjSmsIUxWqVSy1DdvygCDJLuHyWSiXq+nUOg4/BBw4fcDlv1+X4VCQel02hiOSCRielZS5/wuDhCFyBQ74kQikYgViOLwBoOB6vW6GS+F2y57Nx6PrdUr97rZbKxFKkWyHBIA1WUOCoWCIpGI7u/vdTgc9MEHHygaPQ6/4+0fyUY0GlW329X19bUGg4H6/b4+/fRT3dzc2JDAXC6nq6srjUYj07tPJhN99NFHdvjPz88tVQ2IlctlY15Z52QyeZJaxmGMx+MT9hbmFYaYWSGz2cwCTr7utl5mDQmElsuler2eyQoYFshBRuMO8+sWXdbrdd3e3kqSfS2TyZjuGwZ6v9+r2+2azGE8HluraNL4MMU4fuoIKCjGwbF23Nu3zwyBGJ+BVpv1yOVypoXH+VAjst0e60Skx6JiGCqXLZQeZwFJMkeHs8Zm0cDjGJfLpTlNv99vXYVWq5UGg4F8Pp/q9brdMwCJPAK5CSw7No0zpbUuMhpJ1oEL34XPQZ4SCoVOWstS50FdAr4Pto4Algv/yDNSOOxKXPB/+EU32Oasur4appjvcf0Y98bnuoCFjbvg4l0/7PIw0sNIDyM9jPQw8v3ASL72XdePfuHioWHupMcWrTwcN8T38He02XQUIfUM20faFCMjDT2fz+3zDoeD9eSn3SULSmtOCl5h/GazmdLptAGW9GispMrd3++mlg+Hg2lyWWy+Dovhyge+fSDpysRbOcbJZnE/3DtO2nVGGBMOZLc7FmNT+MphgSV5o2NRkwAAIABJREFU8uSJer2eteRcLBb2cxjScrk08AA80RzDbLKvGC7TxSlsdBlY5B6kmJfLpfb7Y0Hj5eWl3QssCKBWKpXMwcK8AIB0NGJQ5vX1tTqdjvb7vQaDge7u7owhvry8tPs8HA7WCcfVWA+HQ0uDE9Dw3/V6rUwmo263a3vI53DwDoeDyU4AfXTI2WxW/X7f7I0gizayMCs4bp/PZ3NnRqOROp2OMV+z2cy6LDUaDeXzeQukaAGLPAVpDqwprVYzmYyxktjeeDxWu90+SaEnEglj8Xhu6kewe86Ly4xzrjm/q9VK+Xxes9nMWuvyGfw+tz5ltzvqqJE+hELHQmmKvynKxTlvNhsLBmGggsGgEomE1XZQg0L7Zkk2WR5Qor6EmTWsD8EbAOz3+63OBvBBguIGp4fDQaVSyYIp5B3ou9Go53I5zWYz9ft9AxdAHzkJNrjbPWr26Y4GsPL9wWDwpOUujD5r48pc+HzACr/s1gS5wQ7BpQvy3vVul4eRHkZ6GOlhpIeRP3+M/L4Xrp+kaQbpc36pCyau7pV/w0hxIuiwXQNbrVZWHMtDUYQZiUROBs4FAscBi0gTeGjSlBwwukPxNVeDzffhqCkyxMgXi4UVvMJY0AmGDi4YDAdGkkk3MN58Pm9v0RgR8zIwVheAWT+Mh/UF1NLptKW2/X6/sTHSo6ZYkrEAGBegg9TBZUFYP2ZVkHqnuBJnz88BjqxltVrVJ598Yp13AFsONxrsxWKhfD5vDpbBi2dnZ0qn08aIUTQJ45HJZNRsNs3pz+dzYx1hn9wCSFg6nCLdtfgvXWsIBEiLu8ELgcN2u9VkMjGgdlPQh8OxoHg4HBqz6fP5NJ/PTZLjpqM5P36/3+of2Nv5fG6tc7Eh7qvT6ZywXtFo1IqWfb7jDA9kFhRbj0YjC94IjLC/4XBoBdXdbtdmgYTDYTs3PMNms7GAaLM5dhNbr9caj8f2ecFgUP1+X4fDQfl83p4RLT7PA0iwhtg6Dg4ZB22bYbgBUIIXHF46nTaGC8Y/n8+bXWHj2BIBFMFJv983uQxnJRgMWrE7basBUQICzqokkxDhA1xWkjOcz+dtzabTqcmKaETA7+RzCShd58/vxbaRIyF/4txJj3U++A8CcUDTDaDwIXTdIhNBcIzv864ffnkY6WGkh5EeRnoY+X5gpJsx+/b1o1+4cFSk6qXH9owsursgHFj0k0gDeCPFWboaTkm2caSvSSHDqPD70WaS+uYeSJmzMPwONgFHDsiRauSNdzqdWktWuqhIj+lFDurhcLCOMwDRbDbTmzdvNBwOlclkbNL5ZDKxjdztdrYWbDjPwO/huTabjbXwxQmh/XXTocglkALwu7jPeDxuRbOk0GEt0frC9sGI8JzSkT2kGxTrBGNHETAMzHq9NueNTpxhfofDwdLq2AVsKLIAQBpnCVNVKpXsPhi+ibwBuQl2Apiw14FAQMlkUrFYzBzAdntsQzwajWwtCWrcQMINVlhXnO1kMlGz2VQkErECaAqzGfRIe2QONCz1fD7XcrnUxcWFza/Y74+af7/fr2q1qs1mo06nYxKKZDJpw1A3m8f2w9j+V199ZUHWfn8sssX5LxYLtVotm8XCM/R6PXteHCuShMFgoMlkom63a1KE9fo40f7u7k7hcFjD4VCvXr3S4XAs0nVb8JZKJaXTae33e9Ns84yLxUKTyUSDwcDWAseLbTSbTd3c3OjFixe2fjhlGPDtdmsypGq1av+OPfN3BnniT4LBY9E3MgtJxnbSNjibzSqfz6tYLJ5IEPi9DPLEfrE5VybSarXMJnK5nHa7nXVZw4fhVwnYCRwY/ol/AyQp3gVIONOxWMxAG38Mg8q9Exzy71wE1zDU7te864dfHkZ6GOlhpIeRHkb+/DHy+66ftGmGJEu1cfFgbCJ/B2B4CN7e0b26b72ACs6fwXNoo2EvYHtI7QUCj0WGvJnSdhdGzE3vspD8G+yguzF0MUK7itbblTZst9sTxjAYPA76Y/4IzoWDzTpReAvrBCPEs+CwATDu1y1qvb+/N2032mkYIP7ARjCPA2YSmcV4PDaWBNYUAIbBIl0NCwIj4xY4h8Nh9Xo9xWIxAzba1br6YQYjTqdTAzqAnEMsye6HQk7sDI2tJCtqLpfL9jthLVz2A7sixQwbw765+mgOEoEOrJxr43RPglUdj8d6eHhQPB5Xq9VSLpc7Odw4bfTmpLulx7oLHE2n09F2u1W9XjfbiUQimkwmBqqu4zkcjgNJS6WSdXByuy3lcjkbIImMiD+JREL9fl+z2cykKzz/fD63YAdpEE4Odpbi61gspru7OzWbTT19+tQkCdhNNps1x8fP45ilR/YLlhmHeHd3p263ax2TaFVL8Ob3H7sdwYLhA2AcOaPMFpFks0Ww78PhOGhxv99bK1pJ9juy2az5OreOAbkJLZNTqZT9DjejwUwhv99vbZSxA+yJgnS35sS1YVhOAI/7hW0jWEJvD0Bi12jNARP2AjkQ5x121v1/73q3y8NIDyM9jPQw0sPI9wMjv09S+L0vXD6f7/9I+tPhcBj8ue/hQPKAaCxd3aObIkbvyaGBMeNGObCk8Uml8kaMk4eJwxlyoDFIipR9Pp8V8sFukWaG0eD72DgKF11nn0qlTjrycMDYPADH1a/T8Yj7brVaur6+1m6302AwMCDudrs2IG46nSqbzSoSiRjbBSCRiqa1Js5Ekrrdrtbrteld0TqTTuWzLy8v7S0dmcV8Pj9JoTO1nM/CsAqFgtrttjlBNOlIjWD+uJii3u12td0eOy5VKhVjwPL5vB4eHnR1dWUHodlsqlAomIPHAXK4XFYK6QX/xowJF4Bg/8bjsRaLhT0j7ODhcLCZMQQ2/EGCwMGjZSxgA2u73+9tH/b7vd1/r9fT06dP1Wg0jLGOxWLWPQn2hI4+2WzWApvNZqPLy0tJMnDn+6fTqWq12klnnUAgYMzPdrtVoVAw0Mpms9ZJjO5IboelZDJpem709q6ECQeCY6GTE46Vc75er3VxcaHlcmnP8vnnn6tarSqfz6vb7do8Eeoa4vG42TcF+DCVnU7H6ij8/uMg1NevX9s6EgTB7I/HY6vZIFMAg9tut1WpVAx0XTYcFg92cLPZmG0R/G63j/N2YL86nY52u51pzWOxmM7Pz9Xv9zWdTrVYLFSpVE5qb77lX5XL5ewcA9w+n8+kSYCVK/lhf8ggcHbpBEXjAtd/AaQwnIAKQQL3yNrANLu+F5DyrsfLw0gPIz2M9DDSw0gPI8FIl2j49vW9L1yHw+H/+76vY2BozF39J//O31lMV47gtmmE+QCcYFz4GVJ9pG75PDdVu9s9do7x+48zONAWIxdA8w0ThN42HA4bU8Wi4+A4LIFAwLoH9Xq9E30oQwElmdOTZAxDoVDQmzdvlMvl7I37/Pz8xHkAFBTh0kUJKQUsF0wRDB+Ajc4b44Kp22yOcx5gyrLZrILB4xBTHCNOge9B8oJ2XpKlfqVjsAC7gBwBwwb4cPocbg4TIM7cjHa7rUwmY99DChwGAvYkmUxqMBhou93q1atXSiQSur6+NvYFY08mkxY0SDLHt9vtNJlMLJBw95SUPfIObAB2lmdBpy3J2Fy+B0eJFIaf++CDDyzgYC7Mzc2NOY77+3vrNoX8gbUNhUI2f2e7Pc69wZl0u137/dgKzoZhmbBZ3B8p/el0anr3ZDJptRs+31ETPR6PrdUu6xEIPBZ5Uz/CYMRisWiDIzl/wWBQb968UaPR0OXlpclnCJT++Mc/qlQq6YMPPlAqlTKJBP4gnU6bXeVyOQNVniEWi5lTpWaCwI1nns/nuri40G9+8xvd3t7q/v7edNfSkRGG3ez3+yZHAIQ4jzCXkUhE2WxWw+FQm83G2jBLx5a2iUTCzgOgQ3ALa0wROrIHfCBF625g4zYY4HziCwEBwBkgpsYkEDjWNCDX2Gw2J13I8JucScAUqRuzYmD/OPfe9Xh5GOlhpIeRHkZ6GOlhJBiJL/iu6yfpUsgBJEXupt8ADw6wJEufw7jh3ElPFgoF7ffHrjowDHQkYiGRXPBmi3FwQGBvVquVFZ3iAGCEXGeHM2aBMUS/32/3S2tQChkXi4UVzqEX5+DTKSWbzdrBPDs7k8/nU6VSMQDbbDZ68uSJrQUD8Pb7vX7xi1+YJMJts4lR4tQ48IVCwcBpuz0OEsSQ+QwcJFpWvo5uulwuWwo7n8/bfAbmNjCPAfaR1qocLN7ukYiMRiNlMhlL/TPkEDbk/PxcnU7HGCbkDTBjOGoO1np97Mjz8ccfm03l83ml02lLZ9PNirTzaDSyQwdbzKEklY6T6Pf7KhaLFtCgSca5LxYL69hEMTesWigUsqJngprVaqXxeKyPPvpINzc3KpVKxrIEg0Fj35AINZtNZTIZZbNZDQYDpdNpmzUCq3t+fi6/32/DGmESAdDFYqE3b94Y+4fzJzDJZDI29HO9XtsZ5PzSSQpQx9EATgQQ19fXNsgSpi8Wi6nZbKpSqcjn8+nXv/61ut2usaiSLDgolUpWXwETl0gkrH6BtWc/AfvJZGLgHI/HbS8JQHw+38l8nFevXqlWq5l0BNYcBpOfoYX0ZDIxFn86nZ7oypfLpbrdrhaLhWq1mvz+YzF3KpU6ke1st1tjDwlM8As46+VyqXa7bfUI0nFeD8wdrCAyL84V7B7Mr3u28QlkEFztOkGidAQvshHSo3yCP9gvZ4jP5fm8690uDyM9jPQw0sNIDyN//hj5fdePfuHy+R7nJPCg3CCsFxtGyhbjR4PK4uIsSIXudjtbADYch4Ox86aK1tNNraLLRjOPg4Kl4/DBKsEEwoZgeBzCeDxuaW6fz6d8Pq9kMmndU3D8sDTb7fakyBFjq1ar+uqrr6zoFiar2WzavAhJVqjI4RiPx/Zs6/Wxk1IikTC9PU4ymUxqPB6r2+2qVqvpcDiYjGE6ndrndrtdjcdja0cajUZtICT6+UKhYKntdrutYDBozmw8HpsufbFYaLvdmq59vV7bYeXzf//73+ujjz4yBhGGgo5K19fXBugYNcwtOmq3hSjpZQpbcYIUKM/nc1t7nBE1DOv1WsPhUO12W/f39/rbv/1ba6VM0LJcHudYuAwuTh92ms9D3xyNRq1FLXaJw8xms8aC5XI5A02KUbPZrKbTqb7++mtJ0l/91V9ZATlsynw+PwGReDxuzCNOHgYLVomfzWazKpVK6vV66vV6qtVqZhPpdNqYcTrwcB6oA+FsxONxJZNJFYtFBQIBXVxcqNVq2YT4xWKhRqOhfr9vrGSr1bKADE15KpUyp/7ixQv1ej07NzB9sM+5XM58DQEfzDhBD3M9YCKpq3j79q1ms5kKhYKeP3+u9XqtYrGobrd70iK3Uqmc+AQccaFQ0Ha7tc5UzWZT+/1eT58+NSkSjOFms9HV1ZV6vZ6x8vzhHOKkg8HHmUN+v9/YaaQbBOjYFbY3nU5NMoI/mM/nFswmEglNJhOTvDCElDVBngJo4x/dGgdJdr5gvmHwvOvdLg8jPYz0MNLDSA8jPYz8STJcMFpsAOwdbB0HjwXlTRunjCZdkqX2+V4K+3K5nB0WDFaSAQRv+qFQyN6kSd3DrCUSCUs5InWAHSKFGgwGrVVotVrVq1evDKjYWFiO+fxxxsBkMjHdNwwULBUGtFqtdHNzo3w+b1pXWJVQKKS7uzuVy2VtNhsr2EXyIckKByXZoZvNZuaIhsOhtQVOJpN6/vy5er2eMXZ0GkLL2+/3NZlMrAvRer22AwAzBbOCpGAymajRaJj+n9Q6LUgLhYI5umg0qkKhoOFwqFwup1/96leSZAzXcrk0bfR2e+yYk8/nbe9Z8+l0qnw+b2wp9+T3+21ex2w2M505RdcEJjC8rB8BDNKSDz74wGQFpLHb7bZms5ntAQ4DqUs8Hrc2si4LPRgMDISm06mq1apqtZrtN4EDYEydgCSTM1SrVb1580b/8i//or/7u7/Tr371K71+/dpS5wBhqVSS3+9XJpNRu922Qtt+v69cLnfijOLxuEKhkG5ubkw6Ua1WLTgLBAKq1WpaLBbGCFEgjt3s93s7d6FQSA8PD7q7u1MgEDAgjEQiKpfLev78uQKBgLUiJoDZ7/e2brlcTvV63RyZqx8nGKQddblc1r/+678ql8vp4uJCX3/9tQWxAB8MGfcN81gsFq0AGAZxNpvZmtO1C8kTLY2xMQAPoCEzgIQlk8no/v7ezs1kMrEAlK+7GY7VamXBKkHAZDKxomjqHWBN8T98P2cYxrjf70t6rDnAxxJ8b7dbGxaLLMOVoLm1NtTbuIG+y0Z7ksL/3eVhpIeRHkZ6GOlh5M8fI7/v+kleuNAU4ygOh8fiahwsmkuGzfHWSqEZBiod2QRYO7TOLCCAT5EeqULYuWDwODE+EomY/hOtNguOHCEej1tak0nnsFCkNnmDh2Hka6S7D4eDbm9vzYnxvX6/X6VSydLsMDfr9fqkxeZyuVS1WtVgMFAulzMtKBpd2E7Sn61WS4vFwli6drut9frYUpbBjZlMxoYaApCkQyninc1mms1mxg6iiWb+AZ2q9vu9nj17psPhYFrXcDhsBbYMw8Po0PD2+30ryhyNRlosFjo7O5Pf7zf2AEeP/KTX6xlYx2IxZTIZ9Xo9Y4xIqS+XS6VSKQNxn89nTCjsGb/XrXuAAeRQ7vd7+zotQQFuSfrwww81mUx0f39vTCx6XhhqagH2+70VtJZKJU0mE0v95/N5PX/+XP1+39L2Z2dnqlQq6vf7BlBo/yuVip49e6bhcKh/+qd/0m9+8xubTUNdABIfl8GcTCbG8MG6Ii/ZbrcWbKRSKaVSKTWbTdVqNesyBNAS5OVyOW23j/M2cGxIZXK5nJ48eaLb21ubJ7PZbPTNN9/YeeVevvzyS3344YcaDAbGiBHErFYrA1y3/iGRSGg8Huvs7EydTkfD4VB3d3f667/+65MA9HA4mKyAzkcUzWezWY1GI5M3AQTMQCkUCvb7kMrA9OIrYOy3263Ozs6s2BeAjsViqtfrJk9AmoPfIpAGWPhZmF9qZJibxLnn2QiICZI2m43NN6LlsCsPIjigIBypBIGC67Nh9vDX/Cw2ze+nUJxmCd71bpeHkR5GehjpYaSHkT9/jPy+Gq7AZ5999kPw4s9e//iP//gZzA83xds4jCjG6d4kxWccTBd0MAQ2D+dHAafbTcllC4vFojneQqGgSCSiTqej1WqlTCaj/X6vXC5ngBSJRCylyAYCMsgx3ALjYDBoBxHWEceOJATGgM9iM6PRqCqViskr6FqFjleSsXwwCNlsVrVazaQm3BPruFgsNBwOFY/HVS6XjQ1zGclKpXICqty3q5Wl5SoSA54Dw2m1WvZvnU5H5XLZnMVm89jN5nA4GIu1Xq9PWs4iOymVSsZWxWIxSysDwLvdzuQnLjMDaxSLxSwt7/P59PDwoPF4rOVyqV6vp3Q6bX9ndgOH3z3YFG2n02l9+OGHkqTnz59rPB4bW8HQvVAoZBp4wI7DNZlMTgo5s9msnjx5crI+pVLJWMhKpaJOp6MvvvjCgldsb7vdWgtbnAuMeDAYNHYJfTvMIfpz5prADFFEij4adq1YLFrggvZ+OByq1+up2+1avcNqdWwx7Rbcco63262xu+v1Wm/evDHm9e3btyoWiwZ4biqeICGXy9n5eP36tWUAOGfpdNpm0FQqFd3d3SmVSqlarVq3Ith3Aj6CGQYvUnOx3+91eXmpTqdj8hN3KCTZgtXqOLj08vLS9gYnyxlLJpNKJpMW0ODckUPxPUi50OwPBgNj2gkWkXvMZjPzXwynBdTdi6CRPaVegJoY7BsbhTFkPgvnm/0AuAFYzjB7QCDh+o9AIKBOp9P47LPP/t+PAo736PIw0sNIDyM9jPQw8v3AyMlkot/+9rf/97uw4CeZw+XqxXGeFPi6BaI8KKDABpK6I12YSCSs0xAHJ5PJGCjBDrKAMFnSsSC40+moVCrZXAPSvP8VLJzcO4cK5qBYLBrbwR9JJtXAISMXQIfMfdAVBZYKo/f7/WZcu91OZ2dnko4FgOfn5/q3f/s3M9DpdGqskCRrRQoziIM7HI5TynEcsICkOrl8Pp/S6bQZR7/ftwNIAfXd3Z0Nq2s0GsYi+Hw+K7pMpVK6urpSPB631CwA6M7dwMExi4GCwsPhYO1aO52OZrOZyUlg+jqdjgKBgDkEimUpnGS+DM+SyWRMZgED1el0tN/vdXZ2pnw+b9KG7XarVqtljkGSer2e7u/v1Wq11Gq19Omnn1rKGxuBIWQ44WKxME0xLEk2m7V76fV61haWgAnHf3Nzo08++cQKrAk+mP+CE0faU6/XlUwmNRwOTR8Ne8MZYB8Aamw2HA5rNBoZq5zP560DGFIAtPcugNMxzN1LbMHV6iMbWq1Wenh4kCR98skn+vTTT81+M5mM/vjHP1pL58PhoC+++EKRSERXV1cql8sWPLJH/J3idPTZnJdvvvlG+Xze7osieWpPttutsbAEkO12W7vdTqPRyLpFIX8A1GDPABqACHCgyB8WzGV7CdKCwaD5LP4NAKaAlxkvBB8wrjDtsJCbzUbj8dgC3mg0antGBoFABKCnID4Wi6nT6Vjba2o++B34LPwT0i2YcAJAfob99uq43v3yMNLDSA8jPYz0MPL9wMjvkxX++dzXO1yuk41Go3ajPp/PUrosKg9MCpafQfIgyXSXpCsBHd5oXQd/OByMDWNDWBBaS6J1XSwWNq0dNmI0GqnX6xnLR4Efn5XP5607z/n5uTlXGEBSkhwyQGk2m510ZKG4sdVqSZKq1apJIdbrtRVA0l5zv98bg8nb9WKxMElENBpVsVg0p46enSJR3vDRVaMZ3+12xhwCAIlEQo1GQ41GQ9vtVi9fvrRDzbBM9NSLxULT6VS3t7c2g4O98Pl81j2oVCopEAhoPB6bPaCpv729tVaibiABCLbbbWP+crmcDakMBAK6u7szm0DmkUgkFI/HrbNVKBRSoVCQ3+832Ua5XLYhhTCJMB/r9Vq5XE61Wk2hUEhff/21AVqtVtMnn3yiarVqmnNsPhqNqlwuG2gSuLBn2+3WCtKbzaY5xM8//1yRSMTaGqdSKZMDYPdomZvNpv0utxCUwA05jvQ4MHA0GplEJR6PKxwOG6sEEwgw5XI5LRYLPX/+XMVi0SbOo9VvNpvmDGHV+HsoFDLd85MnT9TpdPTP//zPuru706tXr4w9urq6UiRynBFDDUksFjN9/X6/14cffqhf//rXtg+r1crmh7RaLbPj/f6xVSuBKDUcOD2CQBxou93Wzc2N7dFmc5zfgpwDH+H3+63rEeCDNMhlSvmMxWJhbYoBZPwbTp2iZ/TuMK0ACLIQzrDbUQn2HJZ4NBoZcBwOx2J5gBQwoBibmUPdbvekvgCm3ZUMwdhRZE8htcv081ze9b+7PIz0MNLDSA8jPYx8vzHyJ6nhQv4As8YbXjAYNEeDNhr9Nzrp7XZrDGA4HLbPIh0oySQW6KxJA+OoYTK4GD7osliAnVtkCrPS6/UUCASsYJGNYT5BOp22t2/0z6Q8Kf4D6OjK4/cfO8zM53MVi0V74wbYSFninIbDoa0L07TRHKdSqROWZjAYaL1e2/BAuiqtVis9ffrU2AW6zJDuXq2OQydhqdHrIjlxWTQ37UrnFtaNIk8G7pGaRu5wd3dnXYMCgYDy+bwFEDAFDPRrNBpWMHk4HFStVs3pkJbfbrfmjPP5vEajkdLptHq9nrbbY9cn2gXTXpe1XCwW+vLLL21eAmwRwQCsFAERNkfxMM9Hi+ZCoXAi4YB9BDhISdOud7c7DvOkW1e327Vhhblczpw9rPNgMLC2z6TYb25urKsW8iOXseYeOQcwfOz7fn8c3Hl3d6d4PG61Eq9fvzamrlQqGcs7Go00GAysVTDBIBIAVzMNYxsKhXR1daVGo2HsMg50PB5rt9up2WwqkUjo6upKq9XKpCmcNYDA5/Pp7OzM2FIK/rFNOk6Fw2G1222Vy2U7T51Ox4AQNu7zzz/XbrfTRx99pOfPn6vb7dp+uM7UDUYnk4nq9bpCoZDValQqFYXDYUUiEQv0aHncaDTMb3U6HVsjSXYG2S+3EJzAeTqdqt/vmy31+32TbTEniYDVPcvsfzgctsCFsx0MBjWfz60bE8HMZDI5ARgYUgI0WEiYZXwGMjHvevfLw0gPIz2M9DDSw8ifP0bSoOO7rp+saQYggmGThoPB4i0dJ4yOGRaBnwc4EomEOV0OOs4abTrpdHcRksmker2eNpuNvWG7bFY8HjdGCR0yB4auShhCoVDQbDaz1CgOnZ+BtcNpouuMRqPKZDKaz+dWuIdzqVarZrRucSBMFGu0Wq20Wh0H51G4vN/vrQvPYrHQ27dvlU6nlcvlzLCy2axt/HA4VKPRUCQSUb1et58nPYxumXkaPM/FxYV8Pp8B3HQ6NcNijXkmahPYG54DJopnIZggHQ1zRecqilFJqSM3SKVSJtcIh49zSh4eHozhgBFJpVL605/+pFqtZvNGcCqhUEiZTMakCPP53BzVarXSxcWFMZFPnjxRtVrV27dvFQweu3FFIhFjCdH1wiiHw2GTPiyXSxUKBW02G7VaLZVKJfX7fXvubrercDisZ8+eqdPpWNDAbI3r62tjXH0+n0qlkmq1mvr9vhqNhs7PzyU9ynIIIGj7DEgDikiXYOzOzs5sbs9gMFA0GlUikTDdOzNpkLUsl0tdX1/bvm63Ww0GAytW73a7xtYvFguVy2X98pe/1O9//3vl83nNZjMNBgOTPdGtDJnIYDDQxcWFEomEFRmHw2Hr8AXwr9dru29J5hvq9bpKpZJpsMfjsZLJpElGaHl8OBwLf2E0kRjQWhqbI6CCOUMegSQCSQIMLT5kNBqdFAIDyovFwoZ7EuBgOwTD4/HYiv1d2RlBNi15eXa+j9oYfpZGQh9IAAAgAElEQVRgDB/E82w2G5PCRCIR5XI56zBFYOKycwAL98f/8zXv+t9dHkZ6GOlhpIeRHka+3xj5k0gKMWTXSfF3NgD2zH2rhTXCUaNPxVBhyVhcJBc4dUm2QbASOCvSm+l02tgOtMb8fpdRcwuX2cj5fK7hcGhg0ev1rLC02+1aMajP51O5XFatVlOhULCZBBgyAMEbNGlNn8+ni4uLE/bYHQqIfls6AjbPTKFvNBo1ZuPp06fGOtApqNFoWAEjvw/giMVidigoEsRoU6mUGR8OdLk8ztpwO0oVCgVlMhnbG4qRI5Fja1GGB5L6dpme0Whk6wGgwcSVy2Xlcjlz5tzLbrez7kM4coqUGXI3Ho/NaUiybjyAQTAY1NXVlUKhkIrFoh16isjX62Or43g8bvMeKNx8+/atTY4fDocmtRkMBsagzGYzayPMPUYiEbVaLWPper2eBUGHw8G6ja1WK2NaKHz2+/2WTqdL2O3trRXBU6wJY0dws9vtjE3EYWy3W5VKJVWrVT19+lSlUulEm4wNwPTB8IzHY3U6HasR4ExytpPJpAGoJF1eXqpWq9lZo+NYPp+3TlKSTPZCAAYbyP6+evXK6gTOzs7MYa7Xa3U6HTUaDbNZipz5LNg89PkAVqFQsCL+1Wqlu7s7s3kYL4Jdl9lLJpMajUbq9/sWYGDrMKfBYNCCPwIk/EMwGLQA2mXb8EFuy20Aw+/3W0czSVbjglwER+8y58ggSqWSJFnAACiNx2MlEgmTomEjZE3wjRR6A3ysM+y2d73b5WGkh5EeRnoY6WHkzx8j2bvvun6SFy6MwtWPf5tV+Da4oE9HYwtrwOfxcHweGmtS6aRROegUZ7LYfA1g2e/31vYWJpA3aelx6jT3ig57v38sCAwGgyedeHhTJpWM4+/1enawACfS1n/605/UbrctDUpBMqwGQOLq9+nw8vDwoOFwqM1mY88bDAb18uVL7XbHlr9fffWVdSFCq47DRiZB6pWWrBSHSsd2wzh99LiA4cXFhQ3cRFfLzwHirCldbmAzKMrEWUynU61WKzssdLxx9fPSIzOMVGM4HFp7YBg2utjkcjlj63AsOANqDyjYTiQSJs/o9/vWsWqxWOjFixe6uroyZgknjz4XB8kh456x+91uZ8W5SC1g6mjrezgci2UZkOgWegLK7MtqtbIZK7vdzlgt0thusBUMHjsSoc8mIJrNZqbNp3tUIBBQoVAwB4qdZ7NZVatVmyfT7/dP5qlw0S4Z9jsWi1kgEQo9zr4ARLEjzsl4PFa73VaxWDTAxOaDwaDZE0wutkjBKoDA3iKvgeGnZiWfz9vPwLoCCJFIxCQFoVDIZu+Ew2Elk0mVSiWTl+CwN5uNOXtsCnkRQZsbOAPWrDtgzzPBBJK1wIZDoZBJuWD4OQfr9dokZzBuAAFNCmgwwGdhX0gyCEj442ZR8C08C4EG/tK73u3yMNLDSA8jPYz0MPLnj5H46O+6fvQLF04NJ8WFE+UNGGeH9pnv4TNYDHSibuqQQlYOLRIGDIm0KBtC4RwbV6lUrIMT7Bgp9kqlonQ6LUkn0g6cL6wU947+lA5D2WzW2pTydr/dbpXL5U5as6LNpjB4OByaQ1qtVna4eDacA0YHE8gQQwor0VA3m01jysbjsQaDgTk3ZA4AMgWjOHpXH4z2HObQ1dU+PDzI5ztqtNGsY9SwQAA5WmmcN/uNfAImEecHk4DB45AozIRZJyWOrr7VaqlSqVjXqGAwaAP6cDDcPw620+mYk8dRw4xIR83u7e2tyuWyrq+vVSwWrUOS29UL1hknC3sGSwwz7Pf7bbjlYrFQvV5XoVAwpw+TAxi49kgg4AJaMpk0CYnLuiHlIT0P00qKPxwO2/fhEHe7Y0FtOp02CQbn2OfzWbtgHCOyGSQd2+1W7XbbnNpqtVKpVDJGrNPpWP0GzwArhcwHtgz9NAwhbXG3262++uorq0fAZ0iywGC321ndBsEj60VtCd+HzQHMkUhEw+HQ2LhYLGbrB7it12sLHGCEeV5XpuLWSOCzkIEQpBKUSPr/27vy6Lir83pn31eNNBotlmzLNsY4psQrhgSzGBuMDYQlTWh6TmtImxQCDUsoISVpSxsOZGvJH06zkBKawGFtWVowkBwIa8DYBmyDLVn7MpoZzaLR7P1DvZ/fCFm2QA7YfvccHYw0y+/3lu++3333fZ+cy2HfUFkkAdAaxjnMNic5kiDVv1cqFcTjcVFG0+l0lY2N446LYMZX9bNLpZJ8PgmFcVFjetAcqTlSc6TmSM2RxwdHst0nw4ykhVfVJNUWofrWeVEMjnx65JO6agtgsGFnTHbYmCTF96mV1jk48/nxCtrNzc3o7++XA45UnvhUzBoDJDn+l5PbYrHIFiT9nlTwOEl4PQwyDocDFotFbA5UKT0eD8bGxuthNDU1SRDn5GQAYGBQi7mR5KhccNuVh15dLhfq6+sRjUaFSDnwAYgKQ6IGIBOd3+/xeGAymeTgc7lclsOq2WwWs2fPFrUpk8kIcXPLmJ7o2tpaUTC59cogw+DHH9XioZIblZG+vj65HuCAauT3+8V/z8nMBQy/gwRGdYwKELMDsZYLbSQcZ6Ojo9i7dy+sVqu0CRcaVKl4IJpjiQGXWXH4O47PZDKJmpoapNNphEIh8YPzWmmboX0kmUxKdjEeAOecYABS5wAXDDwYSuUomUwKOXPBzIxXtLLwfWrbqxYim80mJErQqqKqj7xf2lqY6lb1wVPl4pxRPd1UbklY9HUnk0lZLOXzebH0FItFsQtwsUerDOMSxxTvmWc0eHDZZrOJ2uXz+aoWtlSz2ea8ZoKLN6/Xi6GhIekLqthGo1HiB2MF7V4keYvFIjVvSDRsEx4iJ6mWy9WJEfgZagzlIkklC8Y1jmOOC9UqpsY+vo/zh8roVOqdxuTQHKk5UnOk5kjNkZojZ+SBSx0UfMJTiYVPfnwNG5MDi9t1zPTCBuKF80a4XTmRXPh55fL4QUdWkI9Go/B4PDCbzXIQuFgsyoSNx+Ny6BWAKGFqJigGH7vdDqfTKUGXk5qHIfmUzglLxS0QCMjgotJDvy79srRDAON2ARbD42dks1mZOGwDtgvVlnw+j87OTsyZMwejo6OSUYf2C6NxvJ4IB4bRaJQAwyDGrWO+dnBwUA7kejwe8f9TXaWixu+nn5nV3KmisH8BiNJD5VHd5iU4Eai8xeNxyYI1OjqKdDqNVCqFsbExKWZZX1+PbDYrtVQ4Fvn9fD/7n9Ya1lgpFotSTI/XYDQaxS9OW0GhUBAy48FWBkCmP2bf8CAsMyFRdaQCzXFuNI7X0GDApCKWyWSkUCX7xeVyIRqNwmQaP0zP7XYGGy44mF44kUjA7/dXqUDcpjcajVWLAWaZoqeeqhAtN1wQ0YbD8dzU1CTWIo4JWg+YBWl0dFS+n/OLQZZKtslkEi+2mnXIZrNh9uzZ0l51dXVVNgkSIFVjErrFYhGfNgmN4x1AlYLF+MEUuCyqycP7JGGqf2raYfY5Dxuryi3foy44Sc7cRSiXy2LtYLxgLR4uLEmqnG+Mm/w7YyH7sVwe9+wz7nChw8U7xy4AmYMc+xMX/1SpuUDRmB40R2qO1BypOVJz5PHBkVNhRs5wMcCxQ/gDQII+L4aThzfCp0nggEecQYgqDH/o72Qn8t88yDc0NIShoSHYbDaxMtDrCUBSb3Jrkof76KXn99JXTwLJZrOiEJCcuA1O24Xf75d6GLQC8Nqo+NTV1aGmpkYCC7dZY7GYDCr64mmbIHFwW1TdwnW73YhEItLxw8PDsFqtMsh4yJHKAL/X7XZLNXNu+1LlowpjNh+ooVGpjBe/pJ2Cbc8+p9pK7yy3Z2kboKWC/cfBy/+nXYCHE7ltS2WF9TVIVkwPynFH4nW5XDJ56WNXgywXLBwPAMSqQrLmljUVHvYnyd5iGU9PykUU1Q+z2Sy2A76OY9rr9aJQKCAUCskii2OCCpvFYpGtfqa9pY+flgAGFY4T+oWpPKt2BraV6kXmuEylUqKuc56RlBgI6Tnn4WSqs5wHnDNMx8zFeDabRTQahdfrRblcRkdHhwRk+vo5/thuauDl/ZCwAUjmJh76pw2J5MA+5nkJBk4St9FoRCgUEkvPxIUZxxDnDccALSW8dxaqJBHwvqlaqzGQcY9jmGo+38v+5X0DkMUKyYbWBRIF56RqN+NCjud2SLIkZra7ehiZfct2m7j7QEWZcZoLEPaHxvShOVJzpOZIzZGaI49vjpyxOlwMArxJqia0CqhPg3xSZEcxOPLJkuRAMuE2HRsUgDxxEybTeAHB4eFh7Nu3DyeddJJkXxodHRUPOycvt6AZzOhJ5RMwX6f+qMEtlUphYGBABj2vjVv+nMxsE/pVY7GYBN62tjaUy2XxtLPWBlUOvp9bwYVCQQZBLjeeDreurk6CADMNUd1xu914//33ZcJyoHJAWq1WBINBGcRUzVjLQt1appLC4GI2m0UloCLILWfVpsDJa7fbpYAgLR/8Tto3OFZ471T9mBo4l8shEolIgOAixWgcP9PAA99UJhi0OZbolWewoI2Er1eJm/3PQFIqleQgptVqFYsEVR2LxSLXQJsN7RjsR1oiVFWHpMlsVEajUQ6M0/qSzWYlGHIBxerqVHRYq4T3wPul9YNqWzabhd/vFzU1FAphaGhIgj/7jWOM84N9RDWOZMQD5bQecfFA5Vftx4kWKvqwae+gxYfxpFgsSgpnjqOxsTEJerTYMFMbFTUAYnOKRqMwm82iRo+OjoqVitnQGJcm7uCwjaiI0tvPQ+JsXy42qB6TzEneXMDwvYx1HAeqLYFjSc2QxnshgfH9KqGp9gueqeE84XtIDCRvxk++l3GJ4BzlPfJaNaYPzZGaIzVHao7UHHnsc+QRtRQCB54i+bTHziYZ8MLU1/IG1MNo+XxeOlB9uuTnMKDRAsAnbiooPp8PbrcbPT09UhF8eHhYFEGqQrzGYrEo9QVIJOVyWQ6QjoyMSCAfGxuT+gU9PT0YGBiQCUoLB5+QOWm4hTs2NibZfVgJfXh4WLZ+BwcHEYvFUFtbK9mIqMZRqWQwIzGwgzOZDGpqalAul6VAZD6fR2NjIxKJhAR/PuXz4CDtAlR+aPugiqQGYAYxqnwsOMin/lKpVJVtiWRMWwaVM4/HI4c2ubiw2WxybwDE2sC+Yn0Yptal4qPaOqguZDIZ+Hw+uV+qOyRQ2h9IFPRn8z7ZH1x4AJAzBWxXjhd6ta1Wq7QR1Uv6vJ1Op5BBU1MThoaGUCgUUFtbW0VOHDvqZOd45RhzOBziO2bWqGQyKW1GKwj7c3h4WNLvUj2jqsqUsqVSCYFAAIVCAQ6HQwqI8v65KGIfcwHCfqZlhgsEtgdtBlQFGYA457kAYJpnqpG0KFitVsRiMVHD0uk03G43+vv74fV6JdaQlJgdioTEvuC5hGQyKX3OGMSFLschxzjHF+eL+qDBRRVfA6AqExwJmfdNSxZ/R0KmxYaLW/6X6jNrllBZY+zkYo39wFiq2ic4t+x2uyjcjHVURhk7VAsGCZvgIomvVxdbGtOH5kjNkZojNUdqjjy+OXLGHrjUrTQGInas6qPkj3rIjAcgaVHgkyqDO1/LrUO73S4dTtWLqgoP/6pZfTKZjKSKHRgYkG1Vo9EIr9cLp9NZNbjYkNwy5SCrVMYrj+/duxfBYFA8u1T2eDg3l8uJ9YOEBUDUG241t7e3Y+HChejv769K1Us1x+l0SrvwKZ1qAwMHCZRtkUwmcdJJJ6FSqeCdd96p8qPTb6/aEli4jkoUr5fqQyKRkInPtimXy+jv75cBSv8tP4eqLD/L7XYjEAhIG9LLbLFYUFNTI9vvqtLJgMj/54RLp9Pwer3S9nwP61ykUikEg0FRQvL5vNQXoTrKVKEAqrIaUQVkcGW/AxD7C9uMXmgqOvn8ePrmYDAIn88nKm19fb0UQgyHw0gmk9K23ELngoTfw3GXz49nDSKZpdNpSfdLYuOWO9tGVWCpQjKAcUzTB83XciFF3zwXFiQw+u/ZdpxrVOUZWJmpif3F++Dc4AJDVeJZw4cKbalUEo8+vemMI6oFJxQKIZPJCEFy/LItR0ZGhJjsdju6urokHS8XOyTUTCaD+vp6BINBpFIpGVf8OxVKALI4sVqtCIfDiEajVWMwFouJUkd1FjiQZY5qHscXFUyj0SjqNYM64wX7jkEdGPfvc7zwvAgXiEw6kM1mJdZx0cLrYFuqBE+S4bhn2zEG8rUa04fmSM2RmiM1R2qOPPY5cirMyANXsVgUewCDlOp3VImGjcUfelFJPrwREhB9nAyovGEqf7xpqgUul0uK37ndbuTzeSQSCaTTaRkUg4ODsm1J0qI6oW4Bc7vTYrGgqakJAwMD6OjokLogDOiqt5z/ZSDggODg5d/7+/vR1NSEwcFB9PX1SQ0GBiaqByRbq9WKkZERqWhvsYwXRozH4wiHw/B4PHA4HBgZGcH8+fMxMDAg95XNZiW48MCkulVLMmQfcECRIKgslctlqYw+NDQk10EVgf3GvjMajQgGg1XKpap0MOVvNBodH4z/H2yotNHCwm1tq3W8UOXIyAjmzp0r3n8qIFRP1e1qtiMAUXlJcl6vF4FAQA6kqgoVFSC1GCXHHlUl2iFYqJDeciqwLO5I1YbKJACprj7RjkMl224fL9jJayNpVSoVJBIJjI6OF+JkAVGqfsViUc5ZqJYGZhkzGAxy6JUH1+12uyhVzEDENqMXX1U6AQj5MViaTCYJxH6/Hw6HAw6HA4lEQiwODGYMUHw9gyzPShiNRtTW1spCIJfLIRaLoa6uDsPDw1WqP5VPBmKOA8YjkgGLTHK+c3GrLnJJhLxOxjPGHbZfLBZDuTx+4DYYDGJkZASVSkWIl2o/s0Wp6hu/jyTP9uPc4z1xLAKQsUi1m2OPC0Qq7mNjYzK32ccmk6mqCCXbV42jHN/8N+c6xyVJvVQ6kHpaY3rQHKk5UnOk5kjNkcc+R06Fj/zAxYDMLDOqr5sTkAGKryXBsPP4BMsOo4JHhYedQeWLn8vgSJXJ7XZLETMOKBbR4+tol2AnsiOpQKiKGz3Gra2tiMfj6OrqkkOJVJw4Wek7pnedk4aDYmxsTDIH5XI5UaGSySTK5TICgUDVgUtuH6uKCQMqlUwSGu+3u7tblKW+vr6qOiYjIyMwGAyiUpEsVcXDYrGIAkPVioSUz+flcLTJZEIoFJI+5kDnd1HpikQiKJfL6Ovrg8PhkMFOTzyDHxUzbjdzsNMfGwqFkE6nMTw8XBWkAMDv90sgJZGpB2Lpp2ZA4za0utVM8uI2N4sR8kwAJygXBVRX6W8nYZHU2FbsS5fLhXQ6LeNB9aVTsbLZbGIZYlG+fD4v2cLy+bzUQtm/fz/q6urkTAJtPYXCgQPjPITMxQv7iH25b98+tLW1SSYoplWlDYSLQh4w5SKGKhsLHcbjcfj9flFkGeyCwSAGBgZgt9uRzWaFVKi8ErxPtivtHDwTwL95PB7x+vM8Rbl8wEPPRQpJm+3MxUptbS1SqZT0u6peMaU0bSQcewyiXODwe+mTLxQKsvCw2WyIxWJVChljTKl0IEUxxykA6XsSN+ed+jveA8+G8DM4fqhCT4ydvGamDeb4dblcVRYxLvzUhdREOwljMeO7xvSgOVJzpOZIzZGaI48PjmQsngwzkhaeh9b4w210bpsTaoACICqVeviyUhk/+Kduw7LjeHN8EgUgT6psHG7te71e7N69W7bh1cbgJKctg5/HDuTnWSzjheVGRkbQ1dUlFgmn04l4PC73yEFHlSCXG6/bYDKZpC4EK3MzxW0ul0NdXR0SiQQcDocU6mPdAx7q43YzPb0Oh0OUHQ4WHk5MpVIYGRlBNpvFvn375Ane5XIhEonA5XIhEAhgaGioattWJW4GUKoqPp9P+o0pYekZ52Sh/YJ+Y04Aq3U8/afD4ZDsNYVCQdQNtlttba3U26AaQUWWW8EMyPQ/q2phPp9HIBCQgM1JQgWCKiuJQR2XPCzLg91UlHhGgSlI2S70nTscDjk4Tb+yxWKpqu/CbXmqTaxXwf7k53GxwXmQy+XgcDjQ1NQk15PJZCRFcTwex9y5c9Hc3CwZlnjIFTjgLTYYDOKBp4WAB2Gz2Sw6OjpkcRGNRkUtY4Yml8slCimVJS5s2BeJREKU6mg0ilAoJIeVw+EwhoeHJ21DjjuPxyPKotvtlixiwWBQMhsVCuOHVLlQooWCgY2qEhc0FosFiUQCJpMJ4XAYAKReDm0UVMWpKpPoSRBUKZla2Ww+UGyTRRsZ6xKJBHw+nxAtY1u5XK5KwayeMeH4V1U5jgMecFbHMZV8kjaJkDse7BseEuc4DwaDsmDgeGT2OI5Tqsi0Q6kWCTXGcf5oTA+aIzVHao7UHKk58vjgyKkwI+xJryUDPrd8qY6wcziZqaKpN0W1QJ0c9K2rT9v8XNVTWSwWxRpB76rRaEQ8HkcoFJKBxcFhMpkQCASqbBfqwVcGBio77e3tclBUXXhQbbHZbKLKUZ3gwJk7dy7MZrNMUCohc+bMQTAYxPvvvy+KD7epef9sH3qf6S1m0KFHOpvNIhgM4r333pN0uSMjI/D5fFXFKyuV8UJu3D7lVjC3W7nlqwZ7Ti6v1wur1SopbdVFAAe/GnSoMgCQjDUkcL4fOJDlheTBgEi1xGg0CuFSAeF3A+OFKr1eL2KxmFyz1+uVMRmNRuU+6ecmMTMwUOGj0sZx4vF4JJhxzPF+BgcHEY1GEYlEJFh7PB75/3g8LmONthmv1yuBTrXkcJLyIDRJfnh4GDabTVRctnVLSwvK5bKMaQahiQRLZcpisWBoaAgOh0PqjagHsXO5HGbNmiVnJBiYWH9kZGQEACQYuVwumRvBYFAUdWbYcrvd6O7uxvz580XNDYfDsphgX/IgL8coM4jRagBA7pltQDJkcKbyy0WOxWJBKpWCz+eTbFiFwvhB2mg0CrvdLsU1eYiduwIMpqptTg3cLIIIHEjPzdfHYjG4XC4Z67x+Lp5Iopy3/DcAWRzRHkMPv8lkksUOlVTVJ656/ammU0HmvDKZTNKmVKep+vNcCQkaOLBwp8rIBTwXKPxujelBc6TmSM2RmiM1Rx77HDkVZuSBi6oLVRsAEmzZYCSZSqUiBzK5NcctegYbWijoKecTqapkqd89NnagOCC/kwoBG4aKF9Uh/nBrnt/DhmZNgVQqhUAggLGxMelAbq8DqLLZsEM8Hk9V57pcLrS0tKCzsxPhcBiNjY3o6uoS0nG5XOjt7RVy4wSkssCCd1T8WCulUqmgublZbBChUEjukX5jHtyMRqPI5XKoqamRLXiDYfxwq6ooMHByArndbqTTaSQSCVgsFkkdSr89r5nfyXZQ1TISGScCFQdui7PwIf8fgKharK3BYKJm2qE9g4GEKiInt8FgEKWWROJwOKpquHBsceLwWump56FZYFwlYrHEbDaL5uZmsYHwx2w2S9Yhet9ZM4SLDtoXeOCUZAocSJEMQO6dr+E44Os47rlYo9LMucLDsqrKSkLgnKTVaHh4GIsXL8bQ0BDy+TwaGhqwf/9+eDwe8dqTbEwmk/jY1WCsbtc7nU7s3bsXfr9fiJUHcumrJrmwbalgMUuXqrIHg0F0dnbKooC2HS5S1RogJKp8Pi++dJ4ZGBkZkYUUME6AVONHRkZkTNGexP4gkXEsUqkmaVCx5/UyHpIMuQjieQ7OMy5ueIAagKhqXGSqmbNo62FM4/2zj/kZqnrNPuN1qEoe+4/fy8+j3UT9HBKLxvShOVJzpOZIzZGaI499jpwKM2Ip5CTiIGVgZ2Cj8sHgzq10HjDkDwmGXndOWk5oKmpsLHphx8bG5AmYaV4ZSPlUTu8zlSwGKqpWlcp46kw+7fKzvV4vcrmcpErN5XIYHBysmsTAuE+a27hOpxM1NTXIZDJCPpFIBD09Pairq0MoFMKePXtksNLjykFbX1+PfD6PoaEhCU5WqxWtra3iEadaMzAwgLlz56JYLMpWPgeM3W6Hz+dDf38/AKChoUFUIR6kpDJDe0AoFJKJyNSt7B+DwYCBgQHxAZMQOGm4IODBX4PBAL/fL/5wn88n7yuVStKegUAAPp9PvPr0L3Ngc3FgMpmE1DlpVTWGkx2AHPDl/5PcGPT4k8+PH9jmwVMetmXmGvU8BRcqTMFqNBplrHo8HlFC2F4OhwNer1fOA+zevVvU1kqlAr/fL4TNQ/EMULTPUMHjmQiOOR5KzWQyKJVKsvBJJpOSMplKNG0uo6OjqKurg8PhQE9Pj/iWqcYlk0nU1tYiHA4jnU7D7/fDarWiqakJiURCDjiri7ZsNgufzyd9H4/HEY/H0djYiGg0KsoQlUoAkjGNpEvbQiKRkDMbiUQC4XBYbA30vtMHDkBUqkqlIvOUfUrfdX9/P+rr6yWQU3Gjj9/tdqOvr0+sCbwu9QwDfeRsJ5JnMpmU+GE0GqXeDxdeFotFFko8nMs+5nynx54EAaBqUcm2prqsFqblnOMYJTGoOwEkHXr7VaLgAo/kxu/h79hWjPOq0qxx+NAcqTlSc6TmSM2RxwdHToWP/MBFXzK396gq8Ca45c2nSD4F8uLVwED7AQBRVEgiHEz0XVLlK5VKaGtrQ6lUwv79+2EymeTJ3uv1wmQyyYFJbiUDqPLEs44DfZj0lBaLRakHkc1mhcyoNHDCMnjTc8sn+ng8Do/HU+XPzefz6OjogM/ng9lsxqxZs9DV1QW3242WlhZUKhXZ2ubTeaVSQSQSgcfjwXvvvQeDwYBgMCh90NXVhfr6eoRCIdn65wQfHBwUny4nLQ8sckDHYjEJCgzkaiCmYqAqTvz8bHa8OGIgEBBfP++f/mWz2YyhoSFRa0hCxeJ4il5mcyoWi6irq5OzAiQeDnbacHhGgAUELRYLenp6UFtbK203Ojoq/nH6ibndzOBjsVgkc+wyjo0AAB9jSURBVNfo6CiCwaAQCdWQZDIp44yLlUwmg9raWjk/wPFKuwknM1VhZuJpamqSFMNjY2NyoJfFCTk/fD6fkGY4HJYgSa85CZSZjlh7hD5ktiUJr6amBhaLBfv370exWERHR4e8zuFwIBKJoKOjQzzwTqcTPp8PLS0tiMfjsFqtaGlpQW9vr2Su6u7ulmxeTBnL1L5qBjLadhhQ1aAKQFQqztNKpSKHVrmdb7PZEI1GUalURImz2WwIhUJC4OpCgQoh26C7uxvNzc1CnOrClwo7FzhMM8174Dhg3KpUKuju7pbYoaaC5vjnAoqLwGQyKYotVUMusjmf+BlcjNJyYTIdKBxaLBZl54ALNCp6HC8kUt5fLpeTuU1FkaSh2oZ4gJ6xUY2VjF20uGlMD5ojNUdqjtQcqTny+ODIqVwgH/mBi1uL6rYnL4hBNp/Py8FBkgsHE7c4qayxE9QBx0GvBnwOCA5uv9+PQCAg6hEVv8HBQQwODsrE7uvrQ6FQwKxZs6QT6FNlx/X29op/NJlMYsGCBahUKujv74fdbpeJYjCMH+RLJpPo7OxEXV0dTjjhBMm2RF8wJxGDBQ+gDg4Owul0ora2FolEAqVSCZ2dnUgmkzLomUVp7ty5SKVSSCQSqFQqcLvdUvytq6sLixYtgt/vR19fn6ipPORJZbKpqQmFQgEjIyNVnmBgvJBcKpVCd3e3nBWg0kC1jkpTuVxGT0+PDEC73S4KJ5/+s9mseIBpS9mzZw+8Xm9VBh9+F3242ex4LRH2PdUy+rvp2SUhlMtlWSyQvAHIYWKqKCQxdbtaPcBeLpeFeJn9h/UjqJ4xaHPruVgsiteYY5e2CS5Oa2pqRAlhQU9aaLjYokLNsURiol2C5xK4cBsbGxPlb3R0FOl0WhY1drsd/f39kgY2Fouhvr5e5mS5XBYV1eFwoLe3V66jUqmI0sNsYCR7t9uNgYEB5PN5uSfWzuF5ANocbDYbBgYGpN/ZX7RysJAk1WsG73K5jHg8DrfbjWg0itHRUdTX18Nut4uKSfsGyVddUNASpWaTop2pt7dX+oexhv1JmwdVPSqAtEzQikKvPhVlZr9i3OPY5rgaGRmRxTCVN2a4YnuTSDkXON6pHBYKBVnc8ppo7SsWi3KGgO9VzwgVi0X4/X4Z/4xBBoNBxgznAz+Tc5FWHCqevEYuQDUOH5ojNUdqjtQcqTny+ODIqWyFBtoPPixaW1srt9xyS1VgMJkOpH0k2ajqnvrkzkZTf0fCoXLDJ0Y+efLG6b0m2ZBo+D18olaDIBUCHhak95dPpnwPO9tkGs+iZLPZpAAdn9y53UzPMWtWcHtcHQwWi0WUjFKpJClEGcyHhoakqBy3eqlYBQIB8dkODQ2JvYLBiRMwEomgt7cXJ5xwAnp6esT3TIKvra2FxWKROg0MPjysTMuF0WgUu4jqTaWlRfWsUhGhV5kWGYJ9pqob9OWzvdlvfB3HARVE/tC7rqp7ZrNZMv5w0nBi8X20ZqgHP9UtadVfry4qstmsXB8nIlVddfHJz2CQouWB38P/cjHE7W5eLxVhqsPq53NyU2XimOU5DdXrTAWKNhXeD/uSyh/POnCcUnVWrQAulwtjY2OSKtnpdMr92+12mVe03aiLOHVhSNVc3ZKnt5vBTC1QyOvl3zlPaaUhyXERSCWY8YZzmfGE5K1aqpLJpFh1OEZJqMx4pfrnqcxz0WQ0GsXnzver38fxQ4WM84aqLa+PcZI7Fmos5Pu5AKeSrcZJWqa4C8JYzvjIOEcCpS9fHTOMNeqcYBwgCfL3VA03b978h0qlsvQjEcdxBM2RmiM1R2qO1Bx5fHDkbbfdho6Ojkm9hR/5gWvp0qWV11577cAHTrixwwUbUkNDQ0Pjkwuj0agfuKYBzZEaGhoaxweWLVuG119/fdIHrhlJmkF1iE+N/D3JgQQz8d+qTQKAPO2rr/ljgtdLJZI4GEFOvJeJv5up61FVzoO9bmIbH63EPFn7qffycY2NjxsT+3iyv6tQx4A6fid+xlSfO7Hdj8SY4udO9tmH6uuDzbnpzsXDua9DLZInfueHba8jEUc+DA413jQOH5ojq9+rOfKjQXPk5NAcefDr0xw58zjYuJjqnj7yA5dqZxgZGcH27dtRqYyncpwzZw78fj8GBwfR3t6OxsZGNDc3V22nx2IxvPvuuwCAtrY2RCKRgzbk4XTQoV4z1d+5pQxAtmz5HhWTvZ/bk9PBdAfMdF7/cQ/GmcSxdC8fFodqg8n+PvF3h/Oayf6mLrJUWxRB64y6tT4Z+H71cw72OvU7ppp/kxEm/zvZtUwMkrwWvnaq6z/YtQIHzuUAkGufavE52edMXIBPXERO/NuhrmliG0z0lk/3XjU+HDRHjkNz5JHDsXQvHxaaI6v/PvHfmiM/eE1HgiOnev2M1OHixb/77ru44oorAIyntVy5ciWuuuoq7Nq1C//8z/+MzZs348tf/rKkjhwZGcF3vvMdPP744zCZTFi4cCG+973vYd68eVUHQicbEGpDqYNzssGqejonXrOKaDSK5557DjabDWvWrJEie5Pd62TXoP5+MhWTUN+jA6XGJxkcu8ViUc5hMEUvz/TEYjEUCgUEAgE5b3GwecNMUkzry4r2mUymaj6w5s/B5gcP99KfzrMCTEVbLpclDbUaTJmClwvaQqGAeDwuB6WdTue02sZgGPd6R6NRyUzGZAH8Tp59YVakgy1o8/m8HELnmRg1rtCP7nA4PvAZE2ObwTCe2YoJFyqViiyU+Xf6zyfrL42ZheZIzZEaxyY0R07dNpojD2BGHriIUqmE4eFhnHLKKVixYgUefPBBVCoVnHDCCUilUnKwkbaKBx98EPfddx8uueQStLW14eabb8bChQtxxx13oFwuI51OY3h4GBaLBXV1dVIscGRkRGogeDweDA0NoVQqIRwOw2g0oqenBwaDAY2Njcjlcujv70elUkE4HIbD4UAul8PQ0BCcTqccjLNYLHjzzTdx5513wu12IxgM4pRTTpFDhCoqlQri8TgSiQScTifC4bAMKlZzDwaDsNvtiEajcqiQFdFDoZBkCtKEovFJhUoA27dvxxNPPIHu7m4sWLAAl112GcLhMF566SU8/vjjyGQyWLx4MS677DIEAoGqcc3PyeVyePPNN/Hkk09icHAQS5YswcaNGxGPx/H0009LQcJ8Po+lS5di/fr1ci1q8CyVSti9ezeee+45+d5TTz0VBoMBzz77LF544QWMjo7irLPOwvr162G321EoFDA4OIinn34aGzduRG1tLbLZLJ599lk8+eSTMBqNWL58OTZt2iT1dg5HIcvn83j++efxyCOPoFQqYcmSJfjCF74gdVR6enqwZ88etLW1Ye7cuQf9vFQqhUcffRS/+93vAABnnXUWLrrooqqaMk8++SRcLhfOO+88Ie2J18MEB3v27MHbb7+Nyy+/HEajEYODg3jmmWeqiOTkk0/G/PnzdQz6I0JzpOZIjWMHmiM1R04HM/rAxSwobW1tuPbaaxGPx9He3o5QKCTpb/m6XC6Hp59+GiaTCTfddBPq6urwwAMPYM+ePSiXy+jr68Mvf/lL7Ny5E263G+vXr8fatWuxa9cuPProo+jo6MCiRYtw2WWXYevWrdixYwe+/vWvw2az4fbbb0dbWxuuvPJKPPTQQ3jhhRdQLBZx2mmn4ZJLLsHAwAC+973voba2Ful0GuFwGLW1tfjf//1f7N+/HzabDXfeeSduuOEGrFq1SjLIEHv37sUvf/lLvPfee4hEIrjwwguxbNkyvPnmm3jwwQcRjUaxcuVKnHLKKXjqqaewf/9+GAwGBAIB9PX14ZxzzsEXv/hFKdimofFJRaVSQUdHB/7t3/4N+/btQ1NTE37605/C6/XizDPPxK233gqr1YqFCxfim9/8JrLZLK677rqq+hXAuIq1d+9efP/730dvby8aGhrws5/9DE6nE3PmzEFfXx8ymQx6enrw1FNP4dprr8X69evl/IqqYvX19eH222/Hjh07UFdXh//6r//CddddB5PJhDvvvBORSATpdBrf/e530draivnz5+O5557Dk08+iSeeeAJr1qxBKBTCtm3bcPPNNyMSiSAUCuEHP/gB/H4/zj///EMGWCr0O3fuxLXXXouGhgbMnj0b//Iv/wKPx4OLLroIL7/8Mv77v/8bu3fvxvXXX485c+YAADKZDHbt2oVQKITGxkaYzWY88cQTuPHGG3HOOecgk8ngpptuQktLC1atWoVcLofnn38et956K0477TSsWbMGDocDsVgM7e3tCIfDiEQiMBqN6O3txcsvv4x///d/h9FoxGWXXQaDwYC9e/filltuwYoVKxAIBGC1WlFTU4P58+cf2QGkUQXNkZojNY4taI48eLtojqzGjD5wcVuOhRHnz5+PHTt2SEE2vsZgGK9P0N3dLQUaAeCaa66Bz+dDoVDA3XffjZ/85Cc444wz8MYbb6CzsxMejwcPPPAAXnnlFTQ3N+Pee++V7cOHH34Yp59+OhwOB371q1/hBz/4AZ555hl84xvfgM/nAwBs3boVjY2NqK2txf333w+LxYITTzwRZ5xxhqR5JSGq26yqLzSXy+GOO+7A448/js9+9rN48cUX0dXVheuvvx4//OEP8c4776C1tRU///nPsXPnTrz++uuiNhaLRRQKBezYsQPr169HOByeyebX0JgxqPN1x44d2LVrFzZv3owLL7wQf/3Xf43f/va3aG1txb59+/CP//iP+MIXvoDnnnsO9957L6677roPBGIqgLt378bXv/51nHbaabjpppvwyiuv4KyzzsI//MM/wGKx4Kc//SleeuklnHnmmQf1Tw8ODuKNN97AFVdcgeXLl+Pv//7v8bvf/U5S1958880ol8v40pe+hBdffBGtra3o6urCtm3bJCVzsVjEI488gkwmgx/+8Icol8u46qqr8Mwzz2Dt2rWSAvhghMIdiAceeAADAwP4zW9+g+bmZrz99tt46KGHcO6556K3txfvvfeeFDbl5w0NDeFnP/sZTj31VFx88cUwGAz4xS9+gbq6Ovz4xz/G4OAgVq1ahcceewwrV65EV1cX7rnnHkSj0arCw+3t7fj5z3+OtWvX4txzz4XVakU6nUZHRwd27tyJJUuWyPWz5s2mTZsQDAYRiUQwb948ANpO+MeE5kjNkRrHBjRHao6cLg5eEvmjfOj/+1ZZ7I5koR7yY3594EBF++effx4vvfQSxsbGcN9992HJkiW46667cP3112P+/PnYuXMntm3bhrVr1+I73/kO5syZg6eeegrz5s3DrFmz8Nhjj+HXv/41amtrsWHDBvz2t79FLBZDQ0ODWBrefPNNGAwGuFwunH322bjjjjtw5ZVX4rLLLsNXvvIVzJ49GwsWLMC3vvUtLF26tEphIwk+9dRTWL16Ne666y781V/9FSKRCF599VW8+OKLOOuss/BP//RPmDt3Lnbs2IFMJoN169Zh0aJFOOGEE9Dc3Iyuri4p3Kah8UlGpVJBLBaD0WhEJBKB3+/HiSeeiPb2dimO+vvf/x733Xcfstks1q1bJwUb1Z9sNouhoSHYbDa0trairq4Ora2t6OjoQDQalZofTz31FBobG/Enf/InyOVyUliRP/l8HvX19bjhhhuwYcMG8X47nU7E43E0NjYiEAhgzpw5aGhowGuvvQaLxYLNmzfj9NNPl3oaVBP5Wp/Ph4ULF6KzsxPDw8Ny74fC7t27UVNTg9bWVthsNqxevRodHR0YHR3Fpk2bcO65537gjEogEMD69euxePFiqT2yb98+nHjiiWK/Wrx4MV5++WWk02k8/PDDeOedd9DS0iL2rkqlgkgkgvXr12PBggWicLa1teHqq69GU1OTFHCsVCrYu3cvkskktm7div/8z//Er3/9aylIqfHHh+ZIzZEaxwY0R04NzZEHMKM7XMA4YYyNjeHNN9/Eyy+/jGAwiPr6ehiNRqRSKfT29koxwXnz5uGdd97Be++9B6/Xi3vuuQfnnXeeFITj4LFarRgeHkZjY6M0GovrZbNZ1NXVYenSpXj44YeRSqXwuc99DsFgEMViEQBw5plnwmKx4Be/+AXi8TgqlfEK5aeeeiqWLVsmW5+sXM+B3d3djXA4jD179sDn88nTLgAZ5LFYDF1dXTIoWEiPT+pGoxHBYFAKz7Ea+sGyz2hofJIwUUEzm82or69HLBYTC9C2bduQSqWQTCaxatUqPPjgg9i+fTvsdrsEPofDgf7+fpjNZpTLZTidTgSDQYyMjCCTycBkMqGrqwvbt2/Hhg0bYLVa8a//+q9SuJRwuVy4/PLL8Wd/9md455138B//8R8IhUJYs2YNfvWrX0mBTZ/Ph2AwiO7ubpTLZSlmObGgLDBOGjabDTU1Ndi7dy9SqRTq6+sP2S4A5Nqo5jU1NcnZFZ6XUdV/APD7/bjgggvks0iIhMPhQH19PV5//XW8+uqrePTRR/H5z38e+/fvR6lUEgUvEomgoaFB3sdzNoxFKux2Oz772c9i7dq16OzsxJYtW7Bs2TJcfPHFVWnHNY48NEdqjtQ4dqA58uDtAmiOVDHjSTPy+TxeffVV9PT0IJFIYPPmzTAajchkMnjsscewY8cOAOPFwS688EK88MILuPXWW+FwOOB2u/Hnf/7nsFqt+OIXv4gtW7bga1/7GhKJBHw+H0455RS0t7fjySefxLZt29Dd3Y2LL74Ys2bNwjnnnIMHHngA2WwW5513HoxGI84880w8+OCDePHFF+F0OpHJZLB06Xi9znw+/4GAXl9fj9bWVrzyyiu49dZbsXz5clxwwQX45je/iTlz5uDuu++Gy+XC+vXr8fDDD+Oaa67B+++/j9bWVpx99tnYv38/nn32WezZswfxeByrV6/Ga6+9hnw+j7GxMVExWGlcQ+NogBqYisUient7EYlE8NZbb6G9vR3f+ta3MHv2bLS3t+Oee+7BX/zFX6C+vl4OswKAxWJBLBYDcEAFHx4eRjAYlMPxL7/8MlKpFM466yxYLBaEQiG4XK4PBFqTyYRdu3bhu9/9LoaHh/G3f/u3WLBggRAJAMTjcQwPD6OlpUUyH6n3wcQEJJWxsTEMDQ2hpqYGXq9XrvNQ4GeQqPbv349QKIRQKHTQ9xSLRYyNjcFsNgsxqNncaCWbO3cu7r//frS3t+OMM85AZ2cnCoUCnn/+eXzmM5+B3W5HPp8XAjkYSqUS5s+fjxtuuAEnn3wyXn31VWzZsgVDQ0OHvD+NmYXmSM2RGsceNEceHJojD2BGH7gCgQDWrVsHu92OhoYGnHzyyVi3bh22bduGNWvWiEWgXC6jVCphxYoVuP322/HII48gkUjg29/+Ns4++2yYTCb8zd/8DXw+H9566y0sWrQIGzZswLJlyxAMBlFbW4vOzk5ccMEF2LBhA2pqarBy5UpccsklyOfzOO2002AwGLB27Vrcdddd2Lp1KwqFAr7xjW9g7dq1SCQSuOKKK7Bo0aKqp/hQKIQvf/nLqKmpwcDAABYuXAiPx4NFixahubkZlUoFVqsVN910E5qbm7Fz506ce+65uPTSS/HpT38aNTU1eOyxx9DZ2YlLL70Uy5cvR2trK+bOnYv6+noUi0Wk02lEo1HJ8qKh8UmGwWBAQ0MDzGYz+vv7kU6n8e6776KpqQmZTAbxeByzZ8/G8uXLYbPZsH37dqxZswann3561eeUy2U888wzePbZZ7Fv3z7MmjULnZ2daG5uRjAYRDabxdatWxEOh3HiiSfCbrfj8ssvnzRddDKZxN13341UKoW/+7u/w4oVK1Aul9Hc3IxXX30Vw8PDGB4eRl9fHzZu3AiLxSJKOtV/k8mET3/603jjjTewb98+lEolvPvuu1i+fDlqamoOu32WLl2KrVu3oqOjA7NmzcJLL72E1tZWhMNhORStpr82GAzo6enBj370IyxfvhwbN26E3W7H4sWLsX37duRyOUSjUezYsQNf+cpX4PF4sHTpUnR2dqKvrw8GgwG7du3CihUrsHfvXtx3331Ys2aNEDDbSP3ecrmMu+66C4FAAD/60Y+QSCRQKBTk9Rp/PGiO1BypcWxBc+TU0Bx5ADPywMVgPH/+fHz/+9+HyWQSNc7hcGDVqlVYsGABSqWS+CWdTidCoRA2btyI5cuXo1AoIBwOS5BtbGzEV7/6VUkrGwwGYTKZsGTJErS2tiKbzcLn88HtdsNgMCAYDOKmm24CANTU1MBgMMDj8eCSSy7BGWecgVKphGAwCJfLBafTia997WsfqCdgNBqxYsUKtLW1IZ/Po6amBlarFTfeeKMcEDYYDJgzZw6uvvpqJBIJ2O12hEIhmEwmnHLKKZg7dy5GR0fh8/ngcDjQ0tICq9WKYrEoikGxWJQBqz3qGp9EqClmP/WpT2HRokXYsmULnnjiCbz11lu45ZZbMG/ePPzmN7/BjTfeiEWLFuGtt97C5ZdfDrvd/gELQKVSwZIlS7Bo0SL8+Mc/xkMPPYSenh6cf/75qKmpwa5du/CHP/wBp59+OmprayVGqGTCufPss8/ikUcewYIFC/D000/jf/7nf7B69WqcccYZ+P3vf4/bbrsNyWQSDodDFpaVSgXZbBaZTAbAuM1h06ZNuP/++/GXf/mXiEQiGBsbw9q1a+W8zFSWJs7bSy+9FPfeey+uvPJKzJ8/H52dnbj66qthsVhQLBblmtWij/Toh0IhObNz5ZVX4oorrsCXvvQlZLNZ2O12XHDBBZg9ezYuuugi9PX1IR6Pw2Aw4OKLL4bf70cikUBraysCgcAHCklmMhlRLc1mMxYvXow77rgDlUoFb7/9NiKRCJYtW1Z1LxpHDpojNUdqHFvQHKk5crow3XbbbR/pA7Zs2XLbVVddBYPBALPZDJ/PB6/XC5fLJU/NVqsVPp8Pfr9ffjwejzxher1eBAIBeT1/bDabfBY71mg0wm63w+PxfKDSvVpsjjCbzXC5XPD5fJJ212w2w263S1EzVcGjT93r9UrxN6fTKdmZCJvNBo/HA5fLJd5OZm7itZlMJthsNvHj22w2OBwOOJ1One5W46gAD8+3trbCYBivo7NhwwZs3LgRbW1tOOGEE5DL5TA2Nobzzz8fV199tRRDnfjjdrsxZ84c8XJv2rQJ5513HgKBAGKxGNLpNC699FLMnj27al6q87NcLqO9vR0mkwn19fXI5/PI5/NoaGjA6tWr0dLSIvWHNm/ejJUrV8JsNsNkMiGRSMDv92PdunUSExYuXIhEIgGv14vPf/7zOPfcc6vi0KHg9/uxZMkSDA0NwWQy4U//9E/xuc99TpSxdDoNl8uFFStWoLa2FgaDAU6nEwsXLkTr/x8irlQqaGpqQltbG3p7e+H1evHVr34Vn/nMZ+B0OuH3+2G329Hd3Y2Ghgacc845cDgc8Hg8OPHEE9HU1CR2CV7zwMAATjrpJKxevRoAsGjRIoRCIQwPD2PevHm45ppr8KlPfUrSeU8nFn3729/uu+2227ZMayAdx9AcqTlS49iF5sipcbxx5E9+8hNcddVV3550rBxOlpGpsHTp0sprr70GYPKMJeoT5eHiYO+Z6vcTv/9wP2NiIx7qWqd7bYf6LA2NTzLUYoHZbBb5fB52u10q3JdKJWQyGTmo6nQ6qxZ4Ez+Hh/iLxWLV5xSLRWQyGTidzqpt/MnmdjablYryBD+Ln18ul+F2u2E2m2URODo6irGxMfj9fsnCVC6XkUqlRC1UF6iH2zblchnJZBLlchkOh0PuCYAkNXA4HKKm8T0Tg3ihUEAqlRICJ6mx/VOpFADA6/VWWSEmEm6lUkEymZQdDO4a5PN5ZLNZWWBPXEgfLoxG4x8qlcrSab3pOIbmyKl/f6jP0tD4JENz5KHb5njiyGXLluH111+f9A0z+sCloaFx7EENmmpmMf5NPYirprWe7HPUn4mvnSwwHuozCNUuwEPIE4Mlr39ikciJ3zud4MrrOFgb8G8Tf6eC1z1Z26jv4fvUtp/4OQStaRPbV/2MD/Ow9f/v1Q9c04DmSA2NYxuaI6dum+OJI6d64JrxtPAaGhrHFhjsJvq1GYgON13qZKreVJ8/1edMFgQPdT3q+/idBoOh6vUf5gHkUJ9xsHab7Hfq3yZrp0N9zmTfeaj+09DQ0ND48NAceejr0RypH7g0NDQOAwcLPNMNSB/1cw71uqn+Pp1gfLj4KNczndd+1HbWD1caGhoaRw6aI2f+eqbz2qOBI3VlQQ0NDQ0NDQ0NDQ0NjSOEj3yGy2AwDAHYPzOXo6GhoaHxCUdLpVKp/bgv4miB5kgNDQ2N4wYH5ceP/MCloaGhoaGhoaGhoaGhMTm0pVBDQ0NDQ0NDQ0NDQ+MIQT9waWhoaGhoaGhoaGhoHCHoBy4NDQ0NDQ0NDQ0NDY0jBP3ApaGhoaGhoaGhoaGhcYSgH7g0NDQ0NDQ0NDQ0NDSOEPQDl4aGhoaGhoaGhoaGxhHCR37gMhgMK2fiQj4uGAyGFQaDIfxxX8eHgcFgWHkUX/tnDAZD4OO+jg8Lg8Gw6ihu+88e5W1/+tHa9sDR3/4a08PRzJGaHz8+HM0ceTTzI3D0x+ijmSOP9rafCroOl4aGhoaGhoaGhoaGxhGCthRqaGhoaGhoaGhoaGgcIegHLg0NDQ0NDQ0NDQ0NjSME/cCloaGhoaGhoaGhoaFxhKAfuDQ0NDQ0NDQ0NDQ0NI4Q9AOXhoaGhoaGhoaGhobGEcL/AXSwPC8Odq4zAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOy9ebRt61nW+X5r733uTSAhIQQxCpQIisoQG9oaKtIZE0mB1qgAgSDGLthCQEjRd1I0UqBVUgpaAhFBEUrpOwUdZQlosAFFHQgEpAmQGJKQ3HN2M+uPtZ69f+vZzzf3PvfsS+49533H2GOvNefXvP3zzXd+c66xLEs1NTU1NTU1NTU1NTU13Txt3tQMNDU1NTU1NTU1NTU13a/UF1xNTU1NTU1NTU1NTU2PEfUFV1NTU1NTU1NTU1NT02NEfcHV1NTU1NTU1NTU1NT0GFFfcDU1NTU1NTU1NTU1NT1G1BdcTU1NTU1NTU1NTU1NjxH1BVdTU1NTU1NTU1NTU9NjRH3B1fS4pjHGT44x3jjGeD3+nvUox/roMcb/e8P8ffQY43TH12vHGP9ujPFBV/R56hjjS8cYP7Xr9193399qd/4nxxi/MMZ4M/T5E2OM78P3ZYzxw2OMDY597hjjK29SvqampqamxweNMV4wxvjXO9z4uTHGt48xfs8NjPuVY4zPvSEerxxrh1+/spPjZ8YY//sY4+CKPlPZxxifuRvz+Wh/uDv2P4CvZYzxHmjzjmOM/jHapl8V6guupicCPW9ZljfH38++KZgYYxxOTv3LZVnevKqeVlVfVlVfN8Z42mSMW1X1T6rqt1XVH6yqp1bVe1fVq6rqPdD0oKr+4hUsPauqPuzaAjQ1NTU1PSFpjPGSqvrSqvq8qvo1VfV2tcWbD35T8nUP9K473HyfqvrQqnrRrOE1ZX91VX3WFRdur66qG7mwbGq6W+oLrqYnHI0xnj7G+JYxxi+OMf777vOvx/mPHmP8+BjjdWOMnxhjfMQY47dU1d+oqvfeVches2v70Bjjr+zuNr1yjPE3xhhP2p37/WOM/zbG+KQxxs9X1d9Z42tZlrOqellVvVlVvdOk2UfVFiz+8LIs/3FZlrNlWX5hWZbPWZbl29Dui6rqE2YXbjv6wtoCzOxCsKmpqanpCU5jjLeoqs+uqj+7LMs3LsvyK8uyHC/L8s3LsvylXZuHdjslfnb396VjjId254RlH7/bPfFzY4w/tjv3p6rqI6rqE3fY+M27488aY3zDDmd/YozxF3bH33I31vN23998jPFjY4yPmo21Rsuy/FhV/Yuq+h2PVvYdfUdV3amqj1yZ7quq6rePMd7nKr6amm6a+oKr6YlIm9pe/Lx9bS9e3lhV/2dV1W4b3l+rqucsy/KUqvofq+rfLsvyo1X14trdjVqWRRcyn19Vv6m2yf4dq+rXVdWnY663qaq33M31p9aY2lXW/lhVHVfVKybNPqCqvmNZltdfIeO/rqrvq6pPWGnzjVX12qr66CvGampqamp64tJ7V9XDVfX/rLT5lKp6r9pi2bvWdsfEp+L821TVW9QW4/54Vf31McbTl2X58qr6mqr6wh02Pm+3Vf2bq+rf7dq/f1V97Bjj2cuyvLq2d6O+Yozx1lX1JbXF2K9OY10l2Bjjnavq91bVj92D7FVVS1V9WlV9xhjjaNLmDbW9S/aXr+KrqemmqS+4mp4I9I/GGK/Z/f2jZVletSzLNyzL8oZlWV5X2+TJitVZVb3LGONJy7L83LIs/yENOsYYtb2I+rhlWV69G+vzan+b3llVfcayLLeXZXnjhL/32t0xe6Sq/kpVfeSyLL8wafuMqvq5a8r96VX158cYz5ycF8B82m6rYlNTU1PT/UfPqKpfWpblZKXNR1TVZ+92TPxiVX1WVb0Q54935493uyleX1W/eTLWu1fVM5dl+exlWe4sy/LjVfUVtcPGZVm+q6q+vrbb459bVX/6Ucj0Q2OMX6mqH61tcfHLJu2uI3vt+PqmqvrFqvoTK83+ZlW93RjjOXfHblPTvVFfcDU9EehDlmV52u7vQ8YYTx5j/M0xxivGGK+tqn9eVU8bYxwsy/Irtd0P/uKq+rkxxrfuKmiJnllVT66ql+uCrrbbEniB84vLsjxyBX/fv7tj9vSq+qbaVutqjPF2Ay/72LV9VVX92usIvSzLj1TVt1TVS1fafFtV/bd6dIDX1NTU1PT4p1dV1VtdsX38WbW/s+IVu2PnY9hFyxuq6s0nY719VT0Lhc7XVNUn1/b5KdGXV9W7VNVXLsvyqmvKQfpdu/k/tKres7Zb8Wv3Mgzh5kfU9WQnfWpt7/Y9nE4uy3K7qj5n99fU9KtGfcHV9ESkj69tZe49l2V5alX9vt3xUVW1LMt3LsvygbW9sPlPta3MVW3vCJF+qbbbEX8bLujeYvcgb036TGm3TfBjquqFY4zfuSzLT/FlH7tm31NVzx54A+EV9BlV9Sdru61jRp9SWzB88nV5bWpqamp6wtC/rKrbVfUhK21+trYXSqK32x27DjnO/XRV/QRw8WnLsjxlWZbnVp1vn//yqvrqqvozY4x3XBlrPumW/kFt5fv03bHnADe/pq4nO8f87tpuT/wzK83+Tm1fcvVHrstrU9O9Ul9wNT0R6Sm1vVB6zRjjLWt7UVJVVWOMXzPG+ODdBc3t2m6bONudfmVV/Xptv9u95OIrqupLdnvRa4zx68YYz360jO32t/+t2n8OjPSy2oLZN4wx3nmMsRljPGOM8cljjOeG8X6sqv5+Vf2FlTm/r6p+pKr+6KPlu6mpqanp8UnLsvxybTHlr48xtMvjaIzxnDHGF+6afW1VfeoY45lj+xMjn15Vf/eaU7yyqt4B33+wql63e2HUk8YYB2OMdxljvPvu/CfX9sLqRbV9wdNXj4u3A/pY16HPr6o/OcZ4Gz9xTdmdPqWqPnE22e5O32dU1SfdJZ9NTY+a+oKr6YlIX1pVT6rtHarvr+02QNGmql5S28req2v7bNfH7M7906r6D1X182OMX9od+6TaVsO+f7c98Xtqvq/9bvh77hjjt/uJ3XaGD6jtnbfvru1LL36wqt6qqn5gMt5n1267xQp9am1f7tHU1NTUdJ/RsixfXFts+9TaPqf001X156rqH+2afG5tX7b076vqh6vqh+r6r0D/21X1W/Gc9GlVfVBtX8DxE7XF2r9VVW8xxvjdOz4+atfuC2p78fXSNNY1Zfvh2j4a8Jcm56+S3dv/i9ri6hp9bV3/eeqmpnumsSz9m29NTU1NTU1NTU1NTU2PBfUdrqampqampqampqampseI+oKrqampqampqampqanpMaK+4GpqampqampqampqanqMqC+4mpqampqampqampqaHiO67g/JTWmMsYwxalmWGmOcH+fLONJx9QnjXeo/mffKdmmOGS+J5+u8UITjXYefNM9V/dJc0neSbzb+jFe3X2p7XXKdXtX20cwz63cvfN8r/WrO/aaU81eDZn7J81f5Muk68T2LQz8+8zsf8zr5w9un+a86dzdtruLBx0rj7dr/0rIsz6yma1FjZGPkbMzrtG2MfHzP9aagxsjL7Wbn7qbNVTz4WLPxlmWJznetC64xxnsty/L9M2YODg5qs9lMFbNmtPT99PS0NptNbTaXb8Alo6q9J8SDg+3PQpydnV2aS23Vzx307Ows8uYOrmNqx7n4WbJQJ1ctbNjf52E7n9dBIckxA3vXtY/h4/gxyiWZxZuOadzkM5RHxw4ODur09DTyeHBwsNfW9cPxvR39S+eSXdhuNrb6uKzUSepLmTXO2dnZpT4+FsehX/rxWQz6fMk/fV71lZwen2mONZ+TzjWmbOz+Rv8/PT3dswnnmfl0so3zk3xVx5Pd6L9nZ2d1cHAQF2Uzmx8cHER7ed7RPDp+t35A/XmeoI3pL/7f25+cnLyims5pDR935xsjGyMbIxsjGyMfAIw8OTmpGV3rgmsNTDxY9V3KdWFJNJQUJidl0tF5jeMCCtB0nuBCfqpqj0c6bQIakbehcegEnngPDw9jMOv70dHReXCImDjFM0myucHlnK5Pl4HfHQASj55M/byPOwuSBDYOzg6gPgb7nZ6eXrLRLHgJbrOEzeSk/wcHB3VycjL1h0QaJy1SNL/sqzaeINxXvZ/6SgeuF/lD4tn5WpalDg8P6+TkJC5EaCe3RUo2bivGNWOU5DHutne7Sh+zxM+8MgMZBzGPK/d5go3zRHsxaSuPESTUhz6gvoeHh5cWWMwBHiNuIwd+l/mq2PN+vvDzXNW0pTV83J2vqsbIxsgL/hsjGyMbI+9PjFyje/4drjHGQqV5MvNgkmNdJzh53K866Zh0VB1L4zABUcFuhFTNYHt9T0nS2/EK3/nxsSlfSo5yJOmaOl7jzatPHgzuWDzORCM906Ye5HS+lDh1znXs31Py0ndV7BxQZu3T4oNg63OlSs5a4vQ51ebk5GTPN70SRJuyvydJJu+kTyZQTxgaj/FJn0+Lq+R3DvTyAyY217H7h857bkgypKoZ9ZAWbySvnFPHs8o89ZlAZ7bQ4THZnX7kvni3/kXyfMJ86McJAFzkreW3WYWb+j45OXn5sizvdknpTZEaIxsjRY2RjZGNkfc3Ru4u6GPCvolnuOrw8HDvyk6AQSZ1nAyKZs6j/nSaFAh09BRUHpxOXnVMFQ7KkvjkWO5ss/lTMPic3oZ6YsVlTV+edNQuJUDaQoHB8VTlIG/6zCR0enp6XkGi/M6nKFUkvY/O8ba6B5wDiSdWb+f2W5btLWH3Ubanb3sQOk+Hh4d7yY+AlRYZvjAg70k3zoePkT7z+0yH7gezGHA/c3CgzhJQXSUD5xQpGbLynfiirMmnZgsY58vPEZw5p2RT1d7lnfHhvpji132AuvLFoi/sxbP++4KQ/bjIcl1S5qbrU2NkY6Q+N0Y2RjZG3t8YuZZL7/mCSwySMSasFKzpynHN0USeKPy2qI/J/+LLk56PTVDyJOgJeBYUyUGSw8h4Oja7qp6N4wHH4He+uZ1iZhe2d73ovAdRCgJ99qA7PDysO3fuTG2cQJGVnnQ7OFXEyDeT/bJcVDlU+WNlp6oiAKag0+ekF1YneYyUgtKBxo/PiLzP/FtjcjuO+FQ1lLJQhzNenTzu3WdnsTMDiyR3AkC3caqa8fsMrDxR65h0xPzBvgTPpBvO7zmLPM0AxeVdywUOCL4wvsq31Yf+6/m96e6pMXJ/nMbIxsjGyMZI19X9jpH3fMHlwT/77Ey743Ksqv3bewoAJheRBzCPMTlr/Jlz0ihUYHKSVNVIxhcvMwB0HjwAScmYDhAiGn8WUASw2UJgDTS8LY+zv8afVW7p7NR5CiLNIX9wGTUP+6StCDPfTFUPzuuUkiD3KntCJJjRz8jLTDfuQ26jGa+uB/LlMlB+jyf3ac7jPKbz7g8cz2/1s7qUgCD5Eo950mdy9IWIL5zIb4pVLcz8WNVFxZp2cPCbgZhv/UmLCfVPlVDq1/VKXXkMq6+P5X29TdP1qTGyMbIx8kLvjZGNkbTD/YaRa3Qjv8O12Wziw35ikMmCCvYqCIOEt+zoqLxFSgX4YmAWAE4JSLzvVf04/kxe14lk5W1MT878zMQh3ek/r9g9qFMiXUvU6kfn9+pNSnqeQBw0XD8iOr9/9uqpjskHZkmLzp/AlG/LOT093UuamiPJTd2wnfNA3/UEJdmS7nwc9xOfl3FSdfFga1qscEz+pQe4PVkn8HLbcHwec735MfHNOFhL+C5XinvndS1HpBilHhjLbu8ZiF6VP9xHkk5miwSPWc+hvqic6YZEOT0/+F/To6PGyMbIxsjGyMbIBxsjb2RLoYKSQqXEnAI7CezJwa9WfRwSz+k2sPPK/35lyluE5McV7POlsa+jt7Vkrs+uk9R/tmfb21ftP9hIG/htUuqe5Lag7tYA/ir7ab5U0fWKKYPceWA/8uCgymOu85ktU1B5RYrgTnu4r8+qlZRTFcGkO48P8rtmW8rCqtZsMZIAJlHyB6cEruk8dZSq5WnetGDxXENbzbZkcO7EX7JhWsCkbS/O02y8RMm2qZKX5qO8PDbLZzM9Nz06aozMY19Hb42R++M2RjZGum4bIyv2e1Ng5Fpuu5E7XO6IbqCkHA9yOoO3W7s9nByJ47vCPFBSNYhOxLFnFSmdS0k38eWJxYGXY7tDJ/ldnz5u0pPrgklYDqpAnm1p8eTMJDKrurDqSFBwmagPT7hukxnYJ1npF0wCGstlnMmdbOXzEJQ9SVP2teSR7O+/Z6Hvkif5aZI/bQNi++SfVXUpgc3iN/nfjDQXX9Wb7OjtZ3kjyU57E/g5ZrrDkPyQ40snzuta4ha4syq4BrBr53WOvqz99CR/ZfJMV8nvyEfT3VNjZGNkY+TleRojGyMp+/2AkWt0I28pTElT5N+TsqVYHzP19eNuJK9kuBN4Vc4/kzcf28dLlUHK4smDx3i7NDm1jqWAcqfyK/5UqUkO6WNzTh+TbVOFIYHMLBkxkGkXgkzSlc/nAe9zup58bE/wXg3hmKqicQyv2vnnzWaz+tsdyTZJf6yceUKa+SbHTvqexeUMdN1HyTcrvg5MvshM8ife+T0tOtJ38uXbCFw218mMH2/vuuI5+kOaz8eaAfHM/91+SY/0mTV9u85m45Ck16t+a6Rpnxoj9/s0RjZGNkY2Rqb5fKwnIkb6WKQb2VLoE64ZnUy5k3iS4pthZkKkYErGXgOSWXD7PH5r3pON88G+7OeJ09umY/w+CwS2mwFRoplMrmPqd+agtKsnP+eT43hVZZbsZolyTU4mNpH7Gv2DfTSnqmIOYm6vlIxnssy2onDO5Kdpe8R1aI1njcOH72eg5jamnsirg+0a4PkYVft2c19aA2SPe573udPCjzTT71U5w3U0Axg/PsY4/6V6r6bO5kh8JVC+Coxm8nucNN0bNUY2RjZGNkY2Rt6fGLlGN/KWwhSYKSm406hamhxEzkhjzEAqGYXEStbsan0GSuLDA5tjpsQxA4AENvrsb3BJyYbHGAh+PFXunA/XB3WYqpCiWWVEfTzR6BzfoqX+Tu4zaS7X2yzg6TdeWXE9pTfgeAJy/5vpWOe5JcbndUDyeV1OJZmUvGexQL4YR/SVqxJ5OjdLRg7Is7j2PtQJx2dVUDylirnOcf60OPJcwrs1zFneZ8a762Lm17QT+1CelOSTz6Yqv5PbcWZvl2Ems3iijZquT42RjZHepzGyMZJ9GiMfDIy852e4kjPKEXReiqaRCBQ0ApWXBGSy4vwzA/K7BwjH9j/y6Q6cHNSrT6k92yT5kizps7df0wOP0aH4p2PUjweyjzcDGpfPdZr0p/m8OqRjHoyegGeLiWQ/Bz9+529vcGzKwOBPwTVb4NAHZr7r391fyFO6hb7mV4eHh3vbQcivfrjzKt7ID8+rny9iElAmX1+LVcb7mr7TImTN/uTDAS35K+MjLRLcj7lgSHHqwJMq+eSTb6miH3pb8pYWBFeBj8ea870GJk2ZGiMbI13WxsjGyGSnxsgnPkau0Y1sKaSDcUvBLLmyyuOBkW6LpkSZAtUdlQr3YykZk2+1TYlqWeZvd/JE7bJ4xSAd97FTwp9Vhhj0yeF8TPKuP83vtkv6umpsAgOrea5/By5PVmpDAOc4rA7yNa70M09ibvNZxcoDne1pA8k5AxLXyVUBq3lOT0/3Fl3kieDgCVI+wOOudwcD8ubyui+4XzsvlCnpl/Z0X3B5aPfEm/PpwMJtIM5/+u52EPlvyDjA0SecH+o/5TfFnVcpPTZSwk++KD5ncqktP88WiSnXNF2fGiNrb8zGyMbIxsjGyPsRI9fonu9wMeD2Bp5U5DzIyLw7NZWvY1JsSj4cwwOLfM0UM8bFD7apTXqTCc+LX43tQKIxE8DNqmOpsunnUoLises82J4cWsdpp+S8s/GoD9qE/T0JMRF7EuJ4HvzJ8ZMc0qO/qYh9+Fe1v9XCg5Z6Sf7l4yaeeWzmY9LH4eHhnjypf9KngJX6pR69Ouj/1/TDMeivTglcGAvuK1fp3Ntye9HMJprfkzz1xwXKjI9lWfZ+x4VVT84neyZ+HPiZG2Y6TzHhudB1K/36+O4HblPeiUgLgqa7p8bIxkj2a4xsjPS+bN8Yef9i5I29NIOMzZRHpyZApCTD8VgVSNUKv3LlmInSL11X1XmVhJUmAlPVftUkBaobQuO6I7OP5KIO1gDMkxJ1kBIuHZ7zc17qXXpg0NHR9MBisq2O+YOlOq594LQng9gBieCeEhZ5ODo6Oud/RtSRBwz9LSUlzq9nCcgXfYKLHy5sXF9e7SPRBp4MyTPHdt+d6SKBt4/v5xyENJ+O6buP5f6WANd9aC1+vZ239e8CLdnLt1DMZPYY8LZeqfOtKGzvviWifZxHX4gQRFxO+hj9zIGDPPudAPdPxbDmb7o3aoxsjGyMbIxsjHxwMfJGLrgUSDSulEImuB2ACpBAvM3tAag2XuHjrUVPRu6UDgKJNKfGpDE8CDwxyUkVbHQcOgeTUHJW50fjezsm3ATe4scT8dnZ2XlFKAUW5fUKGfXqwJ5Ayucln/whULahvB7kfp5jCuSYoFOAUue0G8cXOZioYsPj3N7CuVPVMiU/b+OLMV+MpD5pHtfRLNFwe4zP4VV4P08d+ULGx2FspHFmD+dqb/bR0dGeLghkqZKbQCXFkdslnZ8dc3/SPDMbp+O0dbKvxvQqps7zRQKuY47r/uB+4QsWl2u26Gm6mhojGyMbIxsjGyMv5nkQMfJGLrhSIPqvuut2NZnkd45Fpj3JEEx4dU0FMzlRIZvNpg4PD8+Tjs7P5lgL+rTtYk3ZvKXtgelOdNWihvoTpb3t1LHzyIdfU8VRbRmoCurDw8Nz+b3KKd5PT0/PAUtER6fcmufk5CT+YjyBzBMlE3d6oDfx51WxmS3Vz23idvYx+N2TVErwPq+Sg9ql2/gcjz5Jopzc1pBARRXb2dYNTzZJV25fPYScEmSShdsWfC6Nxflc9rS4oV5m/RNQJrt7HPG5gZk9nVJF1he4rhfqi/3cn1Mf10UCRm/r8rr+mu6eGiPrXNbGyMbIxsjGyBk90TEy2f98jLWT16ExxuJJw5ny5GH9rz0XA0J9kxKoXF71zhJHkGkqhycNH2vNwEoO/E7HuE4Q++1OfXZndsdyh3IgODg42ANZBxImjDSP8+5VzJl+09hcMHBsd3TfrsAAm/mZ/IdbXWakZHEdnSZi+6QPT6gzP+A49OlkA/WdPfya5qZuRK5b91WRQJwJ1RM75ZglQV8kul7Ic6o0ug/xmNtK81HeZAsS8wjHTAvR1N/7LMu2Enx4eHiug3T3Yub7LtcaQIg/f9EA/UZ69Ype0vXp6enLl2V5tyhg0yVqjGyMFC+kxsjGyMbIy/REx8idX0Wnv9EfPk7JJxkoKcirCzxPZSUH1f8ZsHjQeN/ZcU/cVZf3V7uDkhwwOI6T2h4cHNTx8fG0bQJpv6p250qV6Vlg+L5rBw6vis7O0+6zNzqpjwJJ7VIS94TmoHB6enp+O11zyvndvr644H57zsEHcZOvEBgoN/XjNqHfuF78/CyJ+meRqqscw8GLdlcl2xdYmpvx7Isz8ueVXPojk6fz5fpyuR1kvBJNSsn0KrCX7T22Z+Ol/gRIblmgnB5/kuHw8DDmiGSLlAvJh4+dgI62FK98la78I9n2Kl02XY8aIxsjGyMbIxsjH0yMvNE7XExubvh0NU+nleN6MHilIiXONUVXXXYqT06axxOD5mPbVB2gsf1hYwYDjUieXTdK6ikQKbOOcWuKVyXcGZJ+fH7XKROX656y+PYX9WXC98TJ8RiUlO/sbLtFwx9uTYuTBFjkN53nPDO+krz0Xwdvr/bMwJ0JR/x7JUz+Ix2zrdtaCZK2pl87GJFPf9DbeffEpjFnixXqk1tZrloAelyt5Y2UrF3Ha4uyZFN+F7++iOQ8OjartHk/10fin7wTxKkTt0PiyeWdAQ/jkrmYzwRozJOTk77DdRfUGNkY2RjZGOnUGHkxz/2EkbtnGB+bO1xUlP7rqpTBr3aeCCUMGZbSuEeX4yej8pxu/dOhNU8yDgPXae2YJ2mONXPK5FzuhN7Gg4afN5vN+S1X17UHPvVN/ma69KTp5/27V1/JNxcMM5rZlQnUQSVVIWcJgmPqnFcK5Qfyx2XZvtlJ83sgexCL/NY3kyr90H2e7VnR1nnt4aefsx+3vLiONI6qw+4D7odexeVCjqCkOdSOsayFQFp4JdvomIMndc4FB+dOunNQXQMEj2vqgtXZBIAcR5TiLyXsNGbykdmi0r8TbBN5VdbnF098i5pXp5uuT42RjZGNkXUuv6gxsjHyQcPIPNujIA8qBjyZoWO4IvzNSBSYf6T0nYmA88tJGQipkpLGZZvZ+RlgOoiwujNzvlT58aBQoDHR+YOF+lMyT4mWf9S7aLO5+H0OjeHHyOusGrumY81JeTyB++KAOktBkexFHY8x6ujoaE9W+U/St+uEfSgbk4DaSp6jo6M9Ha7pgmN41VoxlsCT5LpnNdVBdCYfk7X8zscgKFP+ZVnq+Pj4ki+6j7pdku7Zl4sB8u/jeBL0hOs2lJ14nFVP2kLxxnY6rz6e0+inihPynQAi2YZ/3iYtdsTHGGNvAcJ+1K0v4KmjpkdHjZGNkRqvMbIxsjHywcPIe77DReMdHh7W8fHxJaV40vQqjzPtzNPZUqVL/z2x+RWqfxZvDHbyorbp+FU6mRFB1vfDig8Gr+RIMvCVsTqXEgz1l5KdH09ycs5kNw9Wjs2Km/PKoHMQU2Inv6kqw3nX7OD+pP6p+iO9MzHQNu5HqbLqOmPlzPUn8mQ5S36uL1UcOaZXqLmVZ1aNoRyuf5K/ZlVtPXmmyjP/vCKYkjl1RXClHyVQdN1TPo7vQESAW4v/FGNc9JAPtUsV7DS/L6TYhv5PXn0MLoy0hUY2cXv6mKRZzDVdjxojs05m1BjZGElqjGyMfCJh5BrdyJbCqv0kRkf1pJ+SGdu6wA5IHtQpwWk8Koi88TudRf1mju68JkBMSc2TQ0oWnOP4+HgP5NxJKQP1k2wzA4eUQN3JeauadvE5POG57bgvnf306tyUaDQO+R14akUAACAASURBVGJ1iPrywFuWZS/ZJqBKvjmbj4nK/czBmjrScT5YzHl1y9/no2z007RQSr6V+JuBsPQs0OCYAvkUp5R3jG0VlFUhtk2JyGPPE6aOpSqS64V9PFGmCjl17Dbxc+TD/Zuy+NhpPtcLj/MNaCmW1U929PzDWGB7r36SuNjQeeZTblNa46tpnRojGyMbIxsjGyMfDIz05yhJN/bDx1X7bydKBvIASE7jgUaj0TE5Btt6guD4dGAHNQ8c7+8gkwAlAafL6Q+7sh35SsnEExvPE2RYqWF7yutA4kHlY/P3H7y64mCUqqazaoZvq0nJzp1epODz36ZIwbpWNdFnykmdif+0CHCQTYuMqvkPmjoIpoWI2vOYA5fkT230nRVUnpN+qWf9pcWD4pwVvLQg8YUP5Ug6cv0lcEwV7jUQdV648HRda6xULff2lJW/W+QJOcno83keY9JmTKd8mPzFbT8j2dYXlvIFAklV/t2gputRY2RjZGNkY2Rj5P2PkWt0I+iZkiqZSxUnBxqORcbppCkJpQToVZhUIdBxHnOwYyKeBQGTlCe8WQLgODrGBOwVxuRAarfZXLy205Ow5k1ypURPsPOKC53bZUoOnPSVEo6P4YCtOVNQziqW1CPlm/GjP4KRt/c+TrSJJ+6qXKmb6dPHFa29wSol7Jk+XV8pLhM/mpcLorQoof5Tm1S9d1kpL2ltuwP55n8HfV94sC11or7kNS1KCMauP8rlcyTbVV1sbfCFjW9pmPWnbmbyeC50APTxOV7T3VNjZGNkY2RjZGPkg42R97xHZDaBC+gB7W08qNMca0G81iYpyPulQErJWe08OBMPlMfH83H9Vbnko+pir7Qf87asMnmCngW+Jx0mPHfgmW7ES0r8s6pZ4mFZtm8RShUU1zGBeE23/Oztkh/wnM/v/Vwet5PfYl5LVK6HWXK/js/SJtSl+2OqirqeHACp9xnQq522UblsPg/5TDwkW/KzxuUYPh7t6YtTxjWPue/7ApR9ycMM4LzPDFCV3L1q5zLOEr3HWAJLn585KIGJ/Lvp7qgxsi71b4xsjNT3xsjGyCTLExUj1+jG9odo8tmVeVVOYjyeiA7sCkngn5Kmnyc/yfDuaOKDMjlfKbA5D3m7yiipH+dKAZkqh+nWMHlN5+ionE96cV24TaiPBBpJviSrk8/Dz+KbixA9hOzB77yx38w/yV/am+w2TYE9i4mUvBLp9aOUWUDlfjFLMJ4gNT9lo05SzBEcfCtJkn8WW+5DikUCXQJwX2hyHFbQZj5HvlL+WUvwa/Zxv0l5cAYyyX9cx85j8kP+d39iHNDnnBfO5WOlannT9akxsjGyMfKyXGzTGNkYeT9j5I1ccM0UkZKJb10Q43xoMzn9mhFdaM5PBSa+vL+PeVVCIYi6A8/ALB1PBpS+nE+Ocx0wcxskHSZHYSWQeknjkAeXb7aNxIOOOpgBHbcdUOfkX0DqyUXn0qIgycFz5JMBrrHSA7UpySVbePsEnEmGqxYmvu0mLRTcPowX8kn5UyJ226k/beagl/igDasu73H3hJx053YigHHsBL5ug+TrKfnP/FY02yLkeqZ8M9lm21CcRx9jzVdSHx6bVXmbrkeNkY2RjZGNkY2R9z9GruHkPZcskwI9WbBtYl6f/dagjjtIVO1XPRyEnIeZQ/B8auNbPNxhGIQ+d3KelCycJ97iXtNVOjZzrpkuPFEnGXn7P82vP7Vbc7YE0Gv9kkwpuDgW+6UtOikZkoc0hut7pkMHECZOJoGZLmYA6ny4vLPFnPO82Vz8LkxaoKTtBW5r7+cPGUtGzuVjuzxpXunQ5fV8kOLP9Zn0c3Z2dol3p9l47ifkL+Uw9p/pngBDIBTRd5ZlufRjnu6v7pcE4xk/LmOKuXTHpGmdGiMbIxsjGyMbIx8MjFyjG9kjwh9RFLNueCZg/jaBjzEb29ufCxCqCvxzx6XSqKgEbPqckvNVYMV2s6ARedLj2F7RYP+r9gavJcMkr1dWk9NfJ3jFG8eWnMk2aR71nSX9qn2/Y9u0wJkFDe3nn6n/q8DOdZ582/3I+SNtNvkH+6hrypgWBE5K7jN/88+cx2PM56Gf+o+q+sIvJTrK4fNpDLep68O/e7sU6w7InM+J/sp5UnzoXKroruWbNI63mS2SNM8s5l3WWWy4XPzf2wofHTVGNkaqH3nj2JKzMbIxsjHyiYuRa3SjP3zswcg2ZNDBJRlUfRhQOu57Z1PymO2dlbLXFDPrS/K5ZwHPMT2oNA8dLo0/IyaGFEyJB8k1m096ld5nFW3phr9D4DZPY7rdOL8HAD+7c+utUyL+cGSyn/ul+5H4S7LMgopt/PcYXAecy6u+Igeu5LvkPcXNDKw0ntuTY/qPPnoy9wWCAE/jcAG52Wz2fkjQt0w4D+4rbDt7FbXbYG085g61SYk//Wgpcxxj1n3ceeBCKbVNtvW3bHGumf7Sdhf6hMfyVVsfkv6SjE3Xo8bIxsjGyMbIxsjGyBspV4phFyYlWg82rzg4w6z0qc3s6jUlDD/G4NCYsyoYgYxzqt/MIB4M4oV6cj5dN/quSojfBp195vxOazpKSYjVsgQqa0mMYEOdi9JWk7SAIDEo/NfaOZZXiFzWpK80T1WuKnul0/XnFdc1H5vFiB5m9m0XTDaUw/lJ86jNwcHBpa0bBOAUM6mPAwhl9fZeGabNxrj4gU+Oo/HpN0mHiWa+5MAy8yFP9G5Dl1t902tqfVHjOkl6dRlcfl8kiAcHMPePlGsY5y7PVQDSdD1qjLysj8bIxkjN2RjZGPkgYOSN7Q9JRp4loZRYPbjlWL4VIwnOPlW5SqE2biBXWkrwcjyO4f1mCYo6oDy+NScldCXMNJfrLOnRE6vLvpbkqi6cdAb0nCcB6+np6fmP3XllS/OTT+fDA0t7cv2X3avq0i+Qi28H4jUwpp3U//DwMAIbeWQlLcnGHz5M+vLFkQMMPytJ++KGMnAM9ifwJSBINqR9+EYrVbq8r/jy373hOfeFs7OzOjk5ufS7Qj6/f3afpH75uljG7PHxcVwgST63IX3BdeJtqW/PQV5p5bgOxj4PXwHtvHFMn0vVVY7rix6PMV+4MO/eDbA0XabGyMbIxsgsW2NkY+SDgJE39pZCr8z41eJ1+nkQsL8Hg4+b5lmrGIhPb8fv5HNNDlWTGJwzsOI8MycQvx68zg91w3nd0bzapTF9bH9FLtvPtpCsOVly+JnjOm8cg4nLKyFOaRHgt4cdoLnVYiYLfzGevu3+6reudSz5uuuDvDHZ8VfaXT+0OSn5NEFE3wl2adGQ9EFZ1Neriu5fTOApxsmPdOiVKG2/WAN26lxjy3ZcpPrcvvWDfCRK20SS7WeAlGLY/dJjgzw7KKVFFOOGpLeF0UaznER5+6Lr0VNjZGNkosbIxsjGyPsLI9foxn6HiwlVzLoQfF1pAh8JyN9SEM2U6uf9VrfO0aAOSCmIRFcZUvz7WDP+/NyMFzpoGsv1mRxR46QqKXXix10etqu6SFQpsc0ck680TpW6qsu/mk6bpURAfg8PD88TDuWl/AQV7pf2IGGCVT9PBLMFj+tQAU3Q5vjUhYOSJwjJ5uPTdlw46Hha2I0xzn+3ZJYI1xIa7eGycQxW0Jx8wSO50zg8lwAv8ee8Uc++IEiLpRQfDp46xn5J5qTfdC7JzJzo/Li8Kc/QVgmsyYMDXNPNUWNkY2RjZGNkY+SDi5E3dsHFqzuvNOl/YjQ5iAeuiEmTAOAK9MqFlKkKSAIPVz6rNeJTAa125H9mxJlB6Iz8L74YqAnomAQJGg4CM10m3jm22qcrfZffnXQGmvxPHp0f+kaqGLAPQYHg6/rkOfc18UwdEnRErEbR75xmdvUkw8/0XfcB2pny+Xy++PBFANtyLiZXjpMSjY/jCYljHB4e7lV3PYlzHLc9/UixwjmTHmb6SwsxBy3+RkwCUR5jnqNMqbK9lgNc32kxSP4ddFw+2ly+kgDOfZ9yuK5db3cLME0X1BjZGNkYua/nxsjGyPsNI9foRn/4eKYEMk7BKezBwcH5/mON4UCUEtRasuU4XiXxW7F+29+dlQZIidp1oT4pmMcYMUm546REI105v9427YP3AEsg5nJSnrTlwuXSPExOM53Mgszty2Q/S8QMFufb/YaflfC8AkpeGXQu7yzxsFp5dHR0vi9adpWO3M/ULwGJKpTUv/usJ9+k61Q99aSetjok2ydwJQh4AvM3dnk+0Piqekom+R63jaSql/PlCy7KOas+J91zXC1gUjKn/B6TSU8zX5OMOpdk1Jh8eDxV4ahf2YUAwvyX4oY8XAdUmi5TY+RlXahPY2RjZGNkY6TmfKJj5Brd8wWXGOLkqeLibdifVTWRlOHOzOMpwScAc2KwJYdxHnw8Opxk0znf482xXS909pQUNJ7LmJKFjykevd9VgJ8qsLMKjSfwlKSoxzSX+uvWvSfopEfawgNP43ilznWmOXzfuwc+tzqQL+qYclZdbNPRuTt37uzJyX3WPmbyFy4Q9F1ju1ybzWYPyJI+XZYZCWSZ5MmPxvIKHSlV5ZQA0zYSzk1K1W73PY8hPt+hfgmovHLpC6FZkmc8kdzHNC51lBaZ1I+DTlroeUwlgOX4Htv0eT4MnaqYs9huupoaIxsjGyNrj8/GyNo7z+ONkfcvRo41Z7oObTabhQ8UJiXq3CzhUEheYbLfVcr0K2k3oh6qXJb9N+KoffrF71mwJdCh083kZn/yzXGYdDxAfE6CkcZhoDAZrzkCz/t4SZ9Mugnk06LC7e9gwvkYFBqzqs6rV57kkv45r3hxvlgNq9omBz4c7GO4HnjMwYjHyZf7FPc7u544DsFvVpElud0Sf7OFEnVUte8LBAMfi2P4Z43pyd1p5qsJXMl3WgRpMZCqrmrrCZl9k840xsHBQR0fH+/x4z6ZZPC4EflCIi2kZoA9i3Pqn/3ddt5nTYadTl++LMu7XTrZFKkxsjGyMbIxsjHywcDIXQ6NyeSeL7jGGMvR0dE5w6rErd3Sd8YdFNTWwYeJxqsGHF+Ku+q2cUoC7rTs6wlJt03XnMQTIilVFqQDVoA8Aakfkyv1meQgP9TRLHk5zRYCKbmKb4Ijbc3Kl1dlZqC0Bm6aT211nrzOEofzp4TtSdoTticwrybznOaVv+jZBwcZze3+roWQPwDuciYbpgVX0iN1z36yk/tf1UWllHxwLsUn53I7rC0A0uLM7etyEihYVV/jjb5G8tzgc1zli97HeVF7+kWqdjKuvVLqseU+yuc3eHwNnDwfeCyfnJz0BdddUGNkYyTHFTVGNkY2Rt5/GLmLjXjBdSNbCikwHcwNmhzYE54HOxXmb83xtiIpy39Z3YGHCZkKdsBJ86ja52DH+SnXTHezhOe/JSE+WNniLV+XXzw4AHhbBgz7sMLpenH5UlAx2Y4xpsDrffk2G09mDsr0NenP/cJtIt0ycTu/vkd8Zjvpnn/k2dvrHHXLRYknO/VL1RaPOwKuy+7HaANureBY1NtsoeFVVi5MmCir6lJFOdmV85A2m83eb8jM5qWePS69zVUg4TYk2LsuHKxTXks50PXtfiAeUq7juGyTYtkBZ0a+KPHqONs0XZ8aIxsjGyMbIxsjGyPza0weBcmAbsS14HKDss85g3blmMbieOpL5ep86pOOeeXIAYXzJ14oA3mQfvx46kd+UtJlVc/PaawZfx5Yqj6tVf40jwedJzoRK3SuC87NB6MTn06UV9tf9N3fHkMdSF+sIHIeJgL6kPyI/iQ9EUzOzs7OH/rl2J48+McFA+eYJQ6Xlbr2uVJlm0lC/Gic9AYi9x3KS30l27A/EyWJPDqAMRlqP7/bUDy5npL9GYOsgnqszkh25VheaZPPq/rrgOz+R5tQB8neXrXTee7B1/j6EUjaRf998bPWJoHfDPCbrqbGyH1dNEY2RpKHxsjGyPsBI9d0dCNvKdxsLn48zA0pRSigPUnps8bwoPNA0XzqM1OaC514421xJhHxwn5q5wEth/IqGckrZDODuFNfBTyUNQX+bA4PCifpNQEpiUFEXhhsTJTURxpH55igmYg8AJI+KAOPkTf9yX6sDnlQecJwG9I/Dg8P9+YSeYWM4/Az/dOTT6rwJZ2nZOTjs9JH33H9cB7qkfF6enpaR0dHU39JizpPijP/YRvaNS1g0gLIx9G83FeeKpOeJ5JMrm/6kPp4HKQFFsdw8Kcf8C9VqDUXq6Q+HnXs8eQ5kGN7pbTp7qkxsjGSvDRGNkbSno2R9w9GrtE9X3BR4SkBsB0D0Pcl8600Gic5+dq8fi4F0sxo/l1zp73Kasdqhtol3azJ4AHufTQXKYGot6UOElj6Zwbi7C1SDnbXSWheFVt7exTnScHg7ZmkZ+2od467ppMxxt5WjAQgLvssGJ3HWQJLdkjA4H4/k4/+qXHd3xiv7ntp0edJ3x9mJi9pK0/Sd/JxfmaljXx7PNNWzkvSM9vMKvAzHl2HOp/2gmusRNfxcY/9ZP9kX8kqPl03aZEwi0fO13T31BjZGNkY2Rjpum6M3B8r0f2GkTf2DBcd14PPHcWV5sAhcqOznzu5Jz0lXAKUxvK5XGEpaN1InlhT8vXK8EwnrHL5OP7Zk56OuaF5LunIx6Fe6GhKOMmRaJfrOFuSjfNRR54MyROTZkpqvghJn1lJYXLnIsKThwccE46qSuJlrUJJWbyiTbvwoXq3F3WWFjrJH53Ozs72Ko7+3xeEbgsHMO67T3L73CnxE2wIdmqn34QRzQDBx2J7JwIg+SAgph8Pdd+RPHytsc+Rkr7HDhccHhMz0HVw9Rj2WPdxnZc0p8vbdD1qjGyMbIxsjGyMfDAw0t/gSbqxHz72pJMU4kmcDqjva+CkNn7eFaJzXlFxEEs0C74EHvrsFQWRV38SD0psqVI544V6URB4pYHjz4LKA8OTlgNySlgcPyUatndQ8v4ijkFdpiTm/RLwUlfySa++6TNtQF0lUPLvia9UdWKl1pNqAg62YzVLdnOb0D9S4k3x6HProenkg+m4x6m/LWotbhNopweIl2WJv0eUeHMbkRflBlYdr8oL1CNf8a3++i+9uUxsJ6LtUlWSel07R9n5PQHdml/M9DB7BqXp+tQY2RipMRsjGyOrGiMfRIy88qUZY4z3HGP8mqvacbuAJzsynSoV/ueCudH8fLpSTUpLyUzj6rP2kbKNAoA8a17KmBJ5+kwe6Yi+79Rl9mOeHBIQiRxYk14lv1MCyuTs+k47ux1TAPhCgby4/B4EArxUYUw+mPTDCo3aeNWTyTtVSzWO+jABrFWPPFmsLXh8zOQvXlmeJTZ9ZxWT/szky+NuL86TFk8JNFwePRjNY0m3nlSdj6TfpLuq+TMjM35TPtLDuJ4/0najNG4aU2MQ7NV2lh9mOSj5nee2lA8S0KQxm66Pj1WNkeSP8qbPjZGNkYnHxsjGSI3xeMXIZBvRlXe4lmX5gWu0mSb+1EZEMPHKy3UchOcIQrPjMx5nhlZb54X8pgDheORhJksax+Uh3z7PjBdvW3X5zU1JZ2vgxD6zwJ/Jveb4DrApYV8FeqzM+atJCXBeRVYbJVf3S1+UiFzvXCjNdDKTO7X1/677NMdMp64H9U8VdcrHYw4o6ZY++XTenf8Uk17NJB9Jh7MYSUnadTdLlDru1V99dl9Yu5NAv/Lv7mvkgf9n8qUcm8aQH69tB0z5ltV7n79pS9fBx127xsgdNUY2RjZGNkZyzPsJI9foRrcU6vOakCk5q+/aL8evJe001qzfWtC44tyZ0nGO67fzkyM7ucyuR99G4XLM9D0LqlkiS3K6vhIPyQ7uxMnxqcfUlj6V9opTPwpKvmp0BpLef2aXWYJPidXbp9v87mPiOfX3zzMQSfPwnOZZS7bcquPyzvrNdEbdXle/szZ+zKtkPM4+qZ/z5vPz+Eymqv3nSGTTtF0lgekaCDjN8tGM35R7E4ClnLI2P+e8LqA0ZWqMbIxsjGyMbIx8sDHyRi646BC+15rnPfjcoF7B07FZMvH/MyDS+VlC5WfyMhtTY6VbjzOlr8151ZX12vi8tToLSte5O10CCj9+HbmcLweatcBgO/qP8+t7g2cLgGTv2Xfy4HMxic10kkCWCceTbKoKuS597LXkzPnom25n9yECivPvDyOneX2hkxYaM/koI/lNSdJ9aDaOZHG9zOzkOr1OwvSxUsImeYVzxjd1NdtXnvpdRbQfY2a21SsBcrJJ091RY2RjpFNjZGNkY+T9h5FrdCMb8pMj8bMCSN/dOcS83+JOAeRCzpThYMM5r5Jjljicj7VE604hOjg42HsImsnKtwgk0PHEpM+ab+YEs8TtMsycN803S36zPe7qMwNhVka8opmSIKspfpt9VhH1MZhQ1wA9JQnuT05jejV3llQ1npOS+hqQULZUdUlzeUKUX5Ff+qXHrNq4X8wAZBajlN150RhXVa+TvCm/pL6zWGGc0S9oa6+cSmdqm3Q/44NzzXLUTHdr8lBns6087j+zXDt7s1jT9agxsjGyMbIxsjHy/sfINbrnC66Z0ySwILNqmyoD7pR0MO/HZMLjIvVJtzVnSZGO4bz57X/vT2djIKTbvbO+rjf2n83tAc3zVwWP6y3x5K+6TKCXEgyDy/n05Oe3n3XM+yWeHexSwHmgzsBKRPvLH5IcPqb8zQOQMju/7rdryTIB9xpgsm3yE/e1BADUn+w0S7Rqw34+Too9jcPPPu5VQEFdzBZnDpyJ1uLa307leuIca3LOctDaAoC8pf6zvMQ49D5Jh0k/Vy24mjI1RjZGNkbuj9kY2Rh5v2Jk4u28z/TMXdAaUzQ4EzS/K8gkKIMwJRf15W9KyMjpFi8DyZOBB18KpDH2f7SRSeEqo+i8eNQvj18VsMkBNYf047c8KW96m9AsAGbzaa6zs7M6ODiow8PDWpb9H2ZMOnDdcTzyLD34a0K9eqJjngCTTGrr9iAoqA19xP1nlrAkA/tTPp+fiw/5IGVxn+N8/EFI2WBtQcC+M/5cxymxeDXPibHAcfz3Stz3fJHh4ONbUyTvzIdEaUsL+dDnGS9s64uq5EOq2jqlBcFaHEiXaZGi8dzetG/if7aQYH/K6qScnCrTabHXdD1qjGyMbIxsjGyMvP8x0n2OdCPPcInRs7OzmAjIkDuvO4uPOUuw7gxsqx9+c6fyQJ4pPt1adMUrCXLv/GwbgMswk5F91JbzJvk53tnZ9kf6Tk9PL/2oHOVPfPhc5EF2PTk5OZc5VY1c1/49Jf70mtSq/NC1ZGNQO78u2yyYXdfSHwOYbRIQezWsqvYAQMd1zO8QcGHFOQggSrJaHHB8AvFVWz3of7Sd5HNe9J0/oujz8LsWSgKxmc/TX9JvVzCWlEu8cpwWD1y0JT7XYnC2qHLeUozquPNAnVEmByeXQX25+PTxyQf7ujzMd65j+m3KU9Rb4rfp7qkxsjHSde3fGyMbIxsj71+MvNGXZlTNH1B1BcyYY/Ki8jzR6L8SG6/MdcwNrl85XwMzzk+nSEl4dtWd5Js5ItvSoDPHZ3v+KB0rGC4XfxAxAWziw6udDs6eAH1cr4zNnJ2LEOrVK1w6zv8ER09MDtLiyf3PHzZPiYWV0AT+M8BhhXmMcQkIkq2oS/Er+7lN1EY/dEjduT09lhL4Emg4d5IxAbHo5ORkD1R8YeLkAEcdrcWf02zBQvlS/8SfjzWb131TCx5fkPh8szwpf5yB4UxGn0vtCMS+kEkL5Kray3ue168ClKZMjZGNkY2RjZGixsj7FyPX6J73hyTh0m04BX5ydDJdVecViarLwcXATkZKY/C8JwsmRfF4eHi4x5cHvTu5+qWH5pREnIdUIaSsnNdJ7Qi4BwcHdXR0dH5cMoi3WaJkRYnf6byeTPUjfEzS/Kz2nnBmtj46OtoDHgab65/9fcHhvOg79Z4WNEdHR+dbQdwn2J/bOtT28PBwz6+9okfbJzBOiY7jaQzNn8CQP4qY9sRfp9IqOWgzt533I6/0E49zjsHz7mO+3cJJ/uHk1cikX/aTrBw38cktA35XIiVXgoCPwZjiYjXN6+Ml4KfeeZz+kPRFnyYxb5BmgNN0fWqMbIxsjGyMbIxsjLyRO1ysLNCgs8TozpqcgYynpDob24OB7ejAbO8BytvbznMKtmXZ37M9A7qzs7NLe7zp/NIlndITqvPEZFdVdXR0tFdxYX8meQ8mJvAE5ilQ0+1u3uZ2nc5AVL7DZHadQCPJB11/Gsv9k21pq4ODg/NtIeqXKh1M9qwmz4I/JRCXgb7qiwWe1zltXUkgOOPDiX61LEsdHR3t+ZgnfU/U9FuO6TGlOdwHKavaevxSJo2RfE/jpvikP6SH21mtlDy0P/UhXpJuqurc1zmW68I/u+7chpvN5lIljrriYkLVXp7zRU5aOLC67PGv9k2PjhojGyNFjZGNkY2RDyZGjnsF0THGcnR0tHfMk7WYuyqxqz2dj4mFglHZLrg7LcdIV92sxHEuT9weQJTLk70r/zqAqDZ+u3U2hnj1W/KuV0/Wftx1n5wwJTW1cb7o9AJPjcPbyUywCeSTziSn28mrZAQUyp1s50HrPuu28UqOL2h03PXj9mXF0Bdg/hpu+r2ePVDbVI1RH5fD9cCY8gUM9cTFhuuFC6kEPGs6IT+zBZkDQAIrzesJkWPIVlrUcT+97MIxxXMa19vQH30M+qLsTn345wTSaXFJ3/NFnIOHLyA5l3/nOPR3jXNycvLyZVne7dJgTZEaIxsjGyMbIxsjHwyMPD4+rmVZYmDe8wXXZrNZ+ApIGYoCURDNx6vfZKTk+Pqckpq+J6fe8XkpoBJweVJIjutJKH13SgHKcy5/1eU9196P1RsHJI3J5O0J25NEGj/pRkGXErg+pyqey1tVlxKkeKD9PMG4D6V+sjerQrSRV14YfDrvAenBSdlYXfHKCOdne/r+bOGhcU9PT88fzp35QrFcwAAAIABJREFUOPXr/HJcjxMnT8a+4HaZ/JjzmIBlLY5dFhLjNckknVVdJEL3pbRIZNJ00PSE7rJxMSffIe8JyJO9Eyinlxs4n7QP5XN5uVDxRYrz4nGk9oeHh3X79u2+4LoLaoxsjHT5GiMbIxsj70+MPDs7q7Ozs3jBdc9bCt1IDKR0tch2VL4E5cOrrnRPLlQmH46lMelM7iApIJl0fC88+5MXESsBrFTo/1UPCjr5bxl4e+o76XCMUXfu3LmUkD2R+L5nOo54pZPNKq9ebSXRKRPokQev6rm85MkTVQJsB8xUzfNnCLTPV+cFdgKZGRiokk3fZDv5wVp1l7Zh1ZOVJM7h31MyT/7GuJEMm81mrwKcKm20h/PvidTjajaW+0fyFc8b5JufxbfHnid09mEeYgxr24z6Ozh5MiYPCQg9/lk1TbpKOvaFD3Vwlb3V33NpWhhwXvfjputTY2RjJPs1RjZGsm1j5AXdDxi5Rjf2lkIxy0TsRpYg/E7HENMMfhdylli5/5kJSmO5ghncnENt/Xa1G1JtTk5OLgEYnU3tr/NWFncI6sfbeyKS3O6EDnzkgbK449J52N+3C9Buvlda+lTAuM2pE7XhvnAGqTs6eXcbUZ9MAk6e3JSYtT9dc4h3VkrTfKqmOtE3HKipE+rQE5Xz6mBHOcWz2uwqLpeqsUzc7k9rb+1y2ZmkBLo872+5SmOJxxnYVO3H+GzRQB17ldlv/Xt/8u6+Rh2k+Rk78iO+CY5yERRpA85Hf3Ce0mKI/s7XFDsYsSrNNg7a9KuZ3ZquT42RjZGNkY2RjZF13uZBxMgb2VJ469atveAgw0nR7qR3QzNnpMJTwqQBqCzy4s7n7XmlLYdJoMC+bnTNycRFPXkgJgDkvKw46NWn6kN9qE9KHtdxUrVz2yawmtmI8yS5rko41J9eqzrG2PvMJJz2OzuvM6DjYkM8OiDNgN6JOqnar1Bf1a9qP4nO5Ep2pW5dh0n/Oj5L1pRFvPjC4eDg4NI2nuQvtENKYuTLEy91I+Cj7zi/a7qlfCl/kFceSz7istF3ZnImUHU5Ej9V+3ngOnKy8ugLNX5WvpBunZ/j4+PeUngX1BjZGNkY2RjZGPlgYOTJyUmdPVZbCslECi4PJDHmCvKk5c7hyvLbdwwMJk6dS0qX8fiqztSW4ySnSm05PvvQWXgVX3Xx8KwDsMhfZzrGxQO2qYKV9E2eU2WSTkYeSUzAa07PcVIyITDpu5IS+6axCaK0Nys3yff49h7f+yselbC5XUY6SP7oyUTfUyVPPCT/p5zpNrrbzxOdbMg5tG1Aek2LA/VnBZXjc27Jxv9KPn47Xt9VQfQY8cq560J6XwPrWYymPp7UNT4r3z6WvtMvqAvK5X0TD/RdtvfqZcqZnje5qHBiW/quj51ALYFOym9Nd0eNkY2RjZGNkeKhMfL+xMg1urqEcAUty7JXjaBDONNUiDuKG5hCsGLGeUlUngzOqpv6pMSQEuO5gjb7+2Eph59notR4foXNgEuAISfx4Ep/m82mjo+Pz9uzykGdun6cP+pzpgsPtqSPpAcGCKsdnkAcxJJeuD1A43hlWO30lyo/OuZvrhKPm83m0u9QuLyepPXnIKCxBXqam0nck5+DBMcm/2zv4Ek7ut+TB313PyHftLtXrHjcgUE8pIqU+HY+SQ5Q9BtPfvQ/jamtTJqL8/pnjxXZl8fXqsHs6zZN9nIfl29zXvdbj8sUJ2wzAwDGJvXvepzFddPdUWNkY2RjZGNkY2TtjXO/YuQa3fMdLp9olpTd0b0S5UnBb53quAcGBdTxVClg22SoNNZVMqkNQcvlTMbwQEiJPNHstrN4YSJkEHPsma74neNTfrdNSij8zj4cixUQEu3rOkvOzMTnVcZkOw/+pOvN5mIrhoOy693HpY1YmZWOtbDgA7ezCozzlhZrOk6/SPuZZ7YUX/oBUMrLdsmWyZe1r99txr70d0/qjMuqy3ujqVvvk2TzhZ4nR+qKY3sCn/mR5KGvrOUQXzzxvMuk/nxbEsdNbUWMBcrmMnG8Wb7jfncH8abrUWNkYyTn5/fGyMbIxsgHByPv+Q6XC5eCNZ33q3ZWZmbJ1at6biglOk8q7tzuMKR0PFVamAw0ZgpAOklKnnwolrI7SCSQct5YLdG8awn+qnFd1zOgm4E/5137dW6fxx3XdU0ZWYHy/sleDGIHKCZ52i1Vt9xGXpkT7fbz7ukp6dT1kfzt7OxsbwvGDMD5p+Pkl/6/2Wwu+aDIFyDuKzNywKE9WVFzWT1G0gKCOqSM7n+erB2wGR9rC1f29/OyWXql9Kwyqv8pt7ntZv7iNpnlFgIXjzs/flx68b5ehWy6PjVGNkY2RjZGsl1j5P2JkWt0I89w+VW/kx9z56ZyZ8bj+Kk/z6craZEnHVYTXAYGq1fmNH5yssTz7DwrmUxSdCiXVXP6azg53qzylHQ5AznqLFUQrpJdx5JdvULi52fBIZu43dSGdqJsvlik/OTFA1569nOaT/r2cSSjPzQ+xsXrdPkaVreXy6AtSQxoT0i0mYOMy5304PYgAHtMpBiUn7ByRr9MydLHcP+gHeTXtHGytYj74pNvenyvLYjIZ0rIvqCiXfy/n5/FzZpPJP35wsTjIVVneZzgQZ2x2r6mn6Y5NUauy9oY2RjJeRsjGyPvR4y8sZdmeFIj064I0ew2o/p6wnOnS9UF/p8BEol90tWp3zanQpPi09gcy48T3FJQc6xlWfYSl4iVGI7jttCxWUXB5SAf3Frh87gsSXafN9ltVh1YSx5pQZEC2vWYjnuQjnH5dvVsAcNkmvTnidX587a+7YCvMeV8M99LMjtf0i1fjUsdpphz3XmMMtn5XCm5Jp/3nCA5E8DMFh3pXJpzzcedb/q3A+xsITMDkhTvzosvYN1nEw8pxmfyp/mvE/9Nj44aIxsjGyMbIxsjH1yMvJEthZ4kZ45Lugoo2HcWDA5OKfnOePO+OpeSeQIu5439eOuTVYw1cKHTcNuDB+/a7V3XC//owLME5LKTh9lcHGsNxNPt6xm5zM6z7JHGSMkvybz23e2RQNBt7uDuOiFf9A/Rmj48MdDOrlfK7uMmsNcrl31BwnF0nHL7eeos3fonb7MFh+vJZaIdxc9sseCfxQe3Zfkcrrvk57Pcdp14dPnS+dRuxpv7aZKL9phtyUi2mvnnmp82zakxsjHS9eXUGNkYqT6NkfcnRt7IBRcFSIkqBZArY7YdQf89cD0R+FxpDJ/XeUwAwYTq4DdzMD/m1b8UMJybCcK3KZA/r6K5A+gtRCJVBGYOQd7c8ZLOU5C4gztIJptwzHR8dt6DR8fJawKlWUJz8FiWZe+H+WbVLc7rMiuB6XgCBvnsZrPZ+wHClCw4pxYW5NnHJj+ax99kxfazxUECdNer20j8XWUv6s7bpTmoN5InavdR6TWN7XpKgETeU0XXaZYfUvL22KJt05guX8pHKbekuPV+rByq/1VbSZqupsbIy3oQNUY2Rup4Y2Rj5P2KkTe2pVDEZJ6E8cQi5n0M9p+Rj89EmcbgbVfxQKMS2LQP1h/A9HlpSD/PQHLjOE9MNG48dx4e86DQeL4Ngw+7esA4r1X7D8ceHh7W8fFxdH4HxJnDOo/8Lv4oo49JG9Nu1ONsqwJloc7pK/Id7ilPCTQlMB6b8ZSC3o+Rl7Q1wG3m+nQ+OIf060kk2cITloNRsrXbjXZnv/ScQwLhxNPMpzhvqmbyRyaTXIlfb+fbJxwYZ8l9lstSLCd53DcSMM1ihvP6/B5XSX59Fl33x0ib1qkxsjGyMfIyT42RjZEu1xMRI9foni+4lJxZ7bmbW9keTDrPhyVnSucxJnCNQUrG9sBa49fHFzEQPEFxbvLKAGIbVol8bq+6zQw/c0TxmsCNbXnlrj+vVHg/Jgseo2wCZvkKeVyzFfXH/6pucF7NwYol+Zd8SffSTwrYpCfNR3KdzvzcAd/ndn9hxSnp6+zs7Pw3USRf4iMt8g4ODs7fijQDxeSLy3LxkDL1kR4edYBNsea8+mLTAWcGWglwlUtcZ1V1vu9/lgvcFt52Jk+ypVNK2Oy7RrMYd2CV/zNOOe8sFpLs8q+mu6PGSLyB7re+d519+CdhgKqllhpVdRYWVGP7pWpZahHPy1JF3NsOc9H3YujLOt71vfh/Mf9JWkQt2wGT7KfQzelmU4vkDnblWBdzDjC7nPdblqU2Y1OLpNrNfTp2+bvWc8OlxaCamxwu494YmnZ3/OEvelHVI69vjGyMbIy8AiPTXTfRjfwOl4iLdJ6fMUBhk/Oq/wyYZot29fHfPJiBRpLBz7nzMjlwge/BvwaaKTHMQI+Oq8+84PALGXd6B1kHR680qY0nzBTEGoe25AURfcAD0p3U7a3PXnF1WyUf4DYX6lby+u9xOL+sonEMD0j/LL2dnJxcsi2JCybXJ9v4PAIPf+OS65G8qA3bHR0d7enTwcaTEhMqq4tqR336toSU0NO2JvqKbML4os+73t3P2KeqLlVxPSbdf5jP6K/Ok7chb2mBMosr54N5YLYg8Hb8nhb1DtKsbCb5Z/mi6frUGHmBkafP+LW1vOdzL/M6+T47fp2+s/aJrjPPWp/1uvb15iQ9mvEeS1oOjxojqzGyMfJqjFyjG9lS6MpiQCZF06ET8LhiNKYoGX/tezruitdnfU+33xVUCmQHM/6ReJ7bcjyAXI8eHOSTQaV+6e6O28WPc0yep+4IntSLj+cOTz6kO73pJ10QUSfe32WYBSl51vi8hU7/mlXf1C/ZirbxZOTH3cf99bd+Aa253QYcVz7kF61s5/ZIfMuHBSqUw38Pxu/iineCkCcu502yu850TEnNwZOx5kDuslOnzoPznvxH/PB/0p/LkqpaPg7nlk+x+pkWJdQbK3AOmJ6vPHd6fqJ/q/3MLimmmu6eGiN3PnhtjTU9HqkxsjGyMXIdI9Ma9lz30zN3QekCIwUFhdSbX+iU7M+LCZ+HiiG5wjmf78/lOAx48eM/dMcgTr/urT9WKfxiYKaLqvx7CO6gfvElPauaw/F4cUFndH5EfvEhR08XRbSN21D6ZCWMSUJ65S16teWDqgw4Vovcdu4/vLiin/i4TE7J/w4PDy/duVQioO24TYj2IA9qKx1Qp7T1jGdRqj6Jz8PDw0t91gDQdU5deELmuPrxSm6ZoG/J3vpMPph8PSmKH46tvgTbJBMT9Awcqi58j8WKxKfHPW1M/TrIEuzZT/x7gcIXF65vT+BuA8a1/G4mg8+h/+nHdGeA4QucputTYyT883v/ftWH/4aqf/x/ZWX92ffenr/9xlWdPqb0LV++5eG7Xvam42GNvu1vb/n78N9Q9S++6eL4X3r2xfFH3rA+xn/6wYu2L/vc7bGv/MyLY//lh86betQ3RjZG+viNkVff5brRtxT6ot3Bwq/CGXC6YOA4dG4XigZ0Y/MiJilAfXlBM3NSN4SClm28suj80WD+yk0aknuMUzB7oiDPvGvEtkw4aifdMkBdzlSR8PPuxG4TJRXak8lLnxlk9Avpa7PZnPuH5Et346gL8iE+XY9eZebYx8fHlxY1tBUTYQIdt53L5YsYjctxNL90oItU6UP8eLXt6OgoLsa4fSMlYf75RTT9hWPLhtQB500JNyV8T4ySm7oQH4wT9U9yuF49T7APbZ8AI9nHc5jbzgHbdcrPBCPyk3Tl/u0gI/LFj+uR+kj68Xi5Ckya1qkxckdvfH3Vz/9k1etfkxX1ed9c9WU/UHXr4Xz+V4N+5bVbHt/wujcdDzP6p19X9X98bNX7fmjV73y/qs97YdUP/ZOqlz53+/+zvqHqV3656sPevurkOI/xEz+yvTh7299c9fyXVH3N51X9gy+ueu2rqj7sE6ue9Rurjh+p+ovvU/ULP11n8IfGyMbIxshHh5E3csG1doHB4PEKGits6eqdRpIiqMxluageXBJss/+M0/Hx8Tk/MwWRV1+M6zivzEVsr/+qqPi4M/25Hjkuj1HHLuuyXH6ejDr0YPAkwfEYgCTdiZvp3Z1TfXQBRqBX2xSwXIDwos4vHMmz8+568wUEK20eOMn+TLTi3/f2SqeqTtNWnC9dfBIUyIvmZmLQsxdjjHPgUzvKlJLR0dFRHR4e7t1lpN/OkimTjQDLF3Ocx+/I8q6n5wP3UfoBY58+QZ2mQoOPS13Ih3Un1yujlNl1S1I+4MKVBQ7PWeKNd4Eph/oQVFLcimZ3JTxuvSjEvJsKJuTdbdl099QYGS7Ovuozq5771Kp//o3b7x/3ftvvH/4OVR/xjlV3Hql63tO3x/T3ke9U9TP/df/Yc59a9bc/9WLcz/+jF8d/6j9X/dHfst/2g5520fbVr7w4/tI/tD32XS+7GO/LXrI99/Lv2X5/8XtctH/kDVXPe8vL/P3sj198/9wXVH3312w/f90Xbcf4ghft9/mFn5o7TqI7j1Q98vqqh9+s6qEnVb3htVXHd6pe95qqs9Oqt3hG1dhU/fIvzcc4Pdle8B7dqnryU7d3E3VH7MteUvVvv6/q4z+w6mP+StVb/brGyMbIxsgbwMgbvcMl4lW4vrtTuALUTv95dZ6CSk7JKgbnppN7hU2O5InDk4nm9qtgnhdPfrtS8/jFxMnJyfmxhx9++NIdL7XTfz5kqTYpgOnQShgMYurc+fWxZ7ZNCcGDj04qW0lfXCQwmI6Oji7Nz8SdEl2qrLLaI97cfqxwiEfpyqtSHkBpgSRA8WTgFTX3wwRMbgvOlcbyBJq2s6Yke3JysvcwcrpAJeBRV7Kd/Jg+7UBGG0l2jxHqIy3K3HddJwksOOaMdM6rjhrTfWuNZ8WF+urObtqKwnnXqn+qUFI+2kDtCEg87nzPwDUtoDkG+fc82nR31Bh5VmeMyRd+WtX7fWjVZz2/6l9/d9UXfWfVP/6lqjd7atUbX1e1LFXf+Mrt55M7VX/vv1b9zI9VffIHVX3xd2+P/673r/qYL6r6mv+t6u99QdVf/XPbC6ZP/9qqt3/nqhe9S9Xnf2vVweG2/Tf+/Pbuz3Ofsv3//Letevpbb8f7wW+v+sz/peoDXlD10Z+55fHFX7jl6Xe+X9Wf/z1V//lfVX3Fv6k6PKx63tOqvv6/bcfV38/82PZO0xd/z/b77TdWnR5vPx/frvprf6HqO79q+/3zv3U79jPfdjvX855e9YG3tnKv0R94YdWf/sLtBSu3Zf7Vf1b19r+16iPeaXun6ptevZX7bujPfknVt/xy1bt9YNUXfHvVb/7dVTuMboxsjOSYjZEZI9foRtBzdleBDKWrZyrbX5XpV4660nYlcE4qTN9p5HRlWpWrAZqLe0Epo4ImvaSChneHoEFVdUmGTIHHgNWtb12Ukf+qi19HF4lXXgDpO8c9PT2t4+Pj8/MaT/1YrUrBL55cJ7zwnC08mJC98uMXmAoizuX+weBNiVrjHR8f7+mai410QUkfcR51nElQvi5/cL0kH/ULdcpRta0QKZFTZs6d/FuxpnkUg0nfIgKKdJ5ebSs/ISirrfyR8/HZPl+cpUTHOagLt5Pa+xgO2uQvxZrr3kGZ84gvzxGpOkbZZQsWJNxmlDuBgfNOm1BXXol2OVzPzIO+KGm6O2qMPGcESjmo2my2d2WWperwaHvHhXR4dPnz6cnFhcRmbD8vZ9txNNbBUdUYu7ZHdf4QksbXVrvT4227g8Ntv9OTLV/n4x9u5x1jd0HzW6pe+JuqXvfft2McHm1l+I43bO8Uff3PbMcg36Q//6VV7/v87eePfZ+qZz9c9cpXbOf+x6+q+s5HtnOt0dhUPf/jq777TtX/9OKL4x/7+6te8R+rXvafq576llUf/IwtL3dDm4OqL3xR1Q9+R9XHvW/V+x9V/fxPNkY2RjZGXhMj1+jGthRq8nSF6At/Koy3K6l8BiUD04kKpwMw2MWXAnCz2Zx/TlemXNTPeGCQqE+SnX29CsixOZbm9rtCmpPB4HOzIuDOo/n8KpyO6HPOEltq59scObYHPG3DxOQLA6+sM/ClL+mQd66oF/qnKlf0C5dt5muUnce8CsM/JVFeIOs/bRjf6GX2pf50sc6kRH/VOKwCyT5+8eoynJ6e1p07d871zLus9FG3Lf2OdlV1dFmWOj4+3tOFA4f+z7YbeFtfWBLsE5FnEfWhvp6z+D2Bj8vi8zAx+4KXtnXZmD8SceFNubnIYFsnFjyc91RdnOm1aZ0aI3d91tYjpyf7zxydHl8+X7W7GBpSbNXp6fYiZLPZXjDoQmtZLt/h0fi6IDq/0DrdjrnZYdjYbP/OTqs++0OrPuBoe/Hxih+t+rv/peopT98fd/cbWecXS2M31vnYVXV2tv38v3719mLpnd99e/4j36nqta/ev2Bco+96WdUHHlV99eds20pm8X62m0/fl2Ur997zXMaf9PfXP67qvf5Q1Xs+p+pLv3d7gQlqjGyMbIxcx8g1upEfPnanStU8GcqdhQ7KLXG8kuQCekabzSbe1fH5fTwGgc8xe6BY8zm4eLJxXmb6Y1WBgMzxrhpL8vmFDqs8PiZ1445NuTkmbeLt6cyedJ1Xf4aBidNBfoxx/nyB8yReOE665U57iTcdc1uqD+8IpsBkAiJfBwcHl/Z+n5yc7L34I+lkNi5jibHlNkhjUnbXh+uP4/Di2auBKR7JrydLl8Pl9WPil/lCPuO8ur8nXTnfvMvLmEv8MLEmffP8Grl9vQ99xv3JY5K+Kp7SAoTk/p1yHsdIxYjrAErTZWqMtG1OB4dVDz95e9Fz+ND28+ag6uM/oOo//attm4eeXPU/P6vqW395+/349vYlEL/2Haq+6kcv3qD3/31z1b/6rqoXfFLVC166PfYrr91uDayq+r9/uOqt3/aCqQ9+q6onPaXq2167/f4PfrrqBb+x6iXvW/Uez676rH+4Pf78l2yfjfryl26v7b7g26v+7l/ebil80W+/4LFq+yzVqK0cY2yfq3rH31H1Jd9b9Yl/cLtd8uEnV33tF2z/SA8/eSvPWzyj6kOeWXX7DVXf9vqJBXf07I/abof8ipdWLVX1aX+v6j3+4PbvT7971R9/1y0/3/yarY7PzrbbH5/0lO2Wyqqqd3zX7R27T/gDVf/mn1Z92CdUfeSnXMzxfV9f9YnP2Y4jP7NFdWNkY2Rj5N1h5Jgly+vSZrNZ/DcKZgv3Xfs9hSVnooLYR20cAHyclOSpaFbikkN6WxKNJT7dGZPSZ3rmBRdv0VNPqVokvfCWvcY6ODio4+Pj8wDk1sWZvsj7smwrNrdu3Tq3YeJf8yV7ug04ti52Tk9P9xYQGkMPVHJ8Bpp0pYvJNL7zwkRwcHBw6fkEVm14S1/B7Hu/fcFDW0m22aKIPNIeKVk7CBBsPLn4nvi1BJsq5u4bjAHKk/wyVbdnsUpfoz7djqmAwDFddw5esrNsTL0ILPngtvOdyGM7AYvHr7dNADHTkRNl8/6UjRVXLYycT+qKMcNY8Lsay7LUnTt3Xr4sy7tFBTVdosZI/P+9f6TOPv3vX195v/yqqg95q+1WwH/4c9utcqenVf/+n1W95P23d2M+/euqnvTm8zFe99+rXvAO25dEfOPPVz3trS/uRDVdi57ywneo5TW/2BgJXvi/MXKfb//+IGHk7s5nTDA38sPHnEzkRkkgkhbnVfuVvQRSnG/N+HSutCD3edOdDndmju3zJNDT+QRw7lRyBD/PPpSH7ZOjePWCSccd3UnJJl2sSYfaLuJ8Jid03eliS1UttdFY7tz6LN5TpVdtkz8pcUpuyXB2dnbOQ0r21B/5SJVl8aSLXQYzberVNE+i7kdqc+vWrb0tQCIuaqh3feYeaOlecrFKyVhy36W+1U5bnVxntLt07L7ofu/ATJ34+O4THI9yczHAhULaWy9/9iQ6W1Q6eZLX+BojASfj323n+uE4s4U6/dR1xDlp++SjfFjZF83pAeqm69MDj5Fj1F1tSn3xu13cofqLv6/q7/zI9q1+n//R2+M//u+rvuqzql78RfMxPuk5VU9+yvbvxe9e9XWvuBsOmqrq+C3euo4eelKdnZzUqFG12dTB4UEtp2d1NrY32pazpbaf6vxu2LJUndZSy1I1NsonSx2fndZmtw1zbEadLrWfFzfy69reZVvk2ztf+uVfOv+dtsbIxsjHE0au0Y3d4UrExa9fOEhIvzA5Ozvb2zuu4/7dF/F+2xD8XQIcd7xUSfQ5OfbMmD62GypdtPjCmufIRxpPi3e2W5alHn744b07X7oAoD54dT/j4ejoaK+6oT6zizy3KZ2PD5WmJCPeHPCdP22t4a/A62KKgc+EIGLAuh5TUvILWwZUWpCkZEx9cuuodOn+xwUQ7XV2tv21e+5jn/HofuK8eGKn7dPCjDISiGh750EvXuEYnuzpgxyD+lA+cH9d45M8+Vy0hSdYXnBTV04pN1Vd+LjLlfRftQ9QKXcRZJKPcWy/0+y8u6yUOeUsju25tu9w3R01RoKH3/OH6/Ru7nA1NQV6s8/90Bo/8G1V1RjZGPn4wsjdC2YeuztcvMIj83RsMsgrRS7mpQC18d87cOE8qScH4yKcC3mBSDJOAi0PYL9Q8MBMDqQ5PdDl3CQFUXI4jUcHVPWBF0bUM+X0oKGt1IcXMR6ctCFtO7PT2t0uHvO53AZjjEuVpvRqWV7YkFgVdp26HBqHeku3ud3/KDO3IaQLTgYz9Zsq3FUXv5PjPPvFKX0mgYbbOtnE+8wqN/Q7b+d6pe7It/TCpEkbaJ6rEh7tpHlUYaSc7ruzPFBVe9tek00om/rNKv4+/lXxoz6+IKa+3c+cr3TM5Z/dcZ39b7p7aoy0O2Ov+tkar/yp2t4RGfh/Lgm+7+6QXPK9pWqM85sq16XtMBhb0+H83rFx8Vl3Xi61n/FwMQ3m3h2yuzfn/Oy+L7XsnwMfF3OPSzJIvj07j/3uY/dtyrfLsOzrbVmWvTHHJRvKV/Z5ujhnPjtpSFzdAAAgAElEQVQ0xs6nNm6ELZ299dvX8pZvs21TjZGNkY8/jFyjG3lpxoyoVAcQ/uddCh3323rpKprz8EJBY/pVcrqNOuN/tsjQWOkWMGXixZSIt2nFj45xTgfJdJ7zcS6/qEiOQL55zh+ATrqZVQeS7H6cNiLPnoiYFBy0tf3Qg9tlUoDRh8SbB5MvGmZJgPpy3mfVECYXfect++Rb1IeSGS/E3LfSRXxK4rqIV5LWnU/GGLd4Und+ke72Sknbn7NT/5ToaD+OI56oPy5eE6jwu/jgnL4VQnN6ccPt4+R+sLbdw/uJD/eX1N/5Snd/XY8+H+1Mvtw3XU9+kdB099QYiZy6a7b53q+rgy9/6d54M3/m6+c9V/tuC+osjSs9Clv8NfqeUzzfaNdHyuuJ/8QLxxbRJpSLdnDckww8no49Gox0fVBennMduz8ljHQbCjvYV3dsfJ43/onPr+Pnfcz2Aq0xsjHyCYaRN3KHi8bQZ09yzhSBhN8lqJyTgZKSofp4QtDYTBhUcqo2etJmAHj1IPXxJOwBR6eb3cXwhOhz6rOSjDsUZRagODC7fLKXfmuDFRN/FauOM/BdDtprs9n/hfIEmnRsPW+lcXQH6+Dg4Pz1rbrwEh/ubwJEvQJez5qdnZ3V8fFxfAU//TElErdf+sy+OpbukqVKSwJq3eF0nhgbBGbniYsrxgSrgBqfF4Aa1++UpoWIfyafDr4p0ZPScQKZxkzx7P3dDh6/7tMpztwePpcvsmaAlkCB43guSosszzMevw7WMwBnrPncvvjwxdF1QaXpMjVGaszd5+VijMbIxshrY+T5dftojIRtGiOfGBh5zxdcdCIX0tvNnKsqK6GqLj2fk8hBIyUG8kEe0tWukys1nedYakuemFzJ9+wCjeO6njQXAZxVEh1jpTHxnOSRE/t8lInPIxB4bt26dX5MbXVediTffozJguMzIboO3acODg72ftOC+jg93f6o8507d86B5uTkZO/3GKjT2SIlJUW/e6l2qUJDGzp5UnFdONgT8D2BOrBQRvHLH1tUdW+NJ313OXnMF4Yua1q4pTFSpWmWWDmv5yFfFCQw93auT+eJfT3uPbY8sc/mpJwpNgnsnFvAT7/1POf6WeNJn7kAEU8plzStU2Mkct7YvjRDozVGNkbeLUbyfGNkY6Tz+abGyLVceWPPcKWXMJBJCpgU7hUCPtOUgpfHfV4mlsQDFU4n8GOszLgD+/yedMnHLGmw/VpwpsTmfK6BUiKXVUHCxK15dUwVM722XbfV9d35EMgRSCknz4knl0P6S/qXjsWj5E7BSJuokndycnJesTw5Oak7d+7s/TiiAIjJl0Agn3V/0Xm1Z1vftuk+SNJxVui4AJn5DROOLywYSwRxzc+3KtGPkk+lxOoxOkv2nvQ8uaVkuAbm5CGdo52Szl3/vuhL8nhcJ/Dx+T0HzeZOcruP6Rgr7DMQ1pwpL6zlN5eh6dFRY+TuvOWCxsjGyLvDyJ18kLMxsjHyiYKRN/ZaeCZeMcLnk0gpWfK5FgXMDDxECZA4/sHBwflryxPAcQw38gxEnK+U9JND08gcz8dIQOug6WNTdiYxtfe3z8z0pf5MYA4Ut27dqsPDw/M/gYnriYn98PCw7ty5c4k/B2yCgN8Wp40cxNk2ASvbeyVC1auzs7PzX3gXyKjCp3Npvz/BI1VxXB8Ebga4y5p8gHHBqiV1QJnJK23r/k2wo4+s+br7iyfHtPCZ2YTHZ8fS/JTRfY/tkz59PtdVauc68aTv89DOnpjZ9io+k1zUc4rjqot9+ZSLdrvqzpXrx3XRdHfUGFlVs+PVGNkYeTVGlvisy77dGNkY+XjHyBv9Ha6q9a0FzpAHFRONqgguiPdxx6VR/PcY2IZtk1OynfefOfgsYFhxmQHkLBGRF5c/ASLnYwLzhOp61W1XndOWAx3XVoiHHnrofGyv/ngSp8x6CPahhx66VEmS/EzIWgykxOYPmXoQidIbvBxs+Btg3ELggKJtFmdnZ+fbLVLyEJ/UMYGAvKTPkieBPs97P/JASvGYQE59WQWVDanrGbAneTy+3O7ed1Ytn4FdWmy4PpJ+1/IJ+VjzL8oyi+kETLNcscYrz/u8XkF0HTNu0gLH7ZYW3lcBXdPdUWNknb+jrjGyMfLRYiSpMbIxcjbvrzZGrtGNXHAx2bqSZobU+RmA06BMWP57SJo/XcHy6jWNq/akZFhWRmaAqM9s6/zMgsS3hjhPybj672DMoNX4Mx37PmdW6Vidu3Xr1t6ec4IqjzEZ+Jzci84Hfse4eLg5gZGDoFfJpIdbt26d+4EnB9etv9FIc2g7SFVdephY2ymOj4/39rhru0VK0OQjJWDNzbePcQzZkqCaApx68+qok/ucgJkPCev/ycnJJd8Ur56gfHGSEtWsvRPBuOryG5Sq8gP9aRzyPvMJj2l9d1m90pz6Ub60eOD/5IP874sVz1u+gE56mOU5P+75zH1wpr+m61Fj5O5z8J/GyIu5GiMbIxsjn9gYuUY38lp4OnRyFAqbhPbk4JUntvf/TGQyDAHIleAOLxLPKUE7rSUql9mDyGVWvwTIAs8UvOJtrbri7akvgsAY4zyRKnkcHR3VrVu3aoz8ALASC+1F3dPWnkhdp9zrzotW9Vd7ysvjnJd+4GBEXryKd3a2/2OinOfs7GyvmidguX379jmoaG87E5F44K/Yc3uD+5vkSD7FxRR/j4eypQWVEgH7aV7akjpWP+ro+Pj4kh9xESc5Z77ni0DO43HoFWrqiX1myY26cpCmvjgm53agTvP7eB7HKYkn2/I8fwzbc4ODofPtfi4drwENSTKmvOaLvKa7o8bIi2O+MGqMbIy8W4yc+VRjZGPk4wEj12j1gmuM8V5V9RPLsrxypc05My78GuOzY+qrfdFy2LWrZSnZr/ipTCYJ7klOTulgwCtaVshYVVwzmualo3kfjkOj+u90uVOLF972r9oHaNdTAhIBh954pAqeJ6xZgmbSoK38dbdq72DtAOI8Mhl7BUoy6e1B/qN3rgMmXc2vh5PFr4P00dFRPfzww+eVu6Ojo/PfZLl9+3YdHBycV/b0RifazG9zkyduTXD/YEzQfhyXlVX3Ez8mku8sy7byuSzL+XYQB1m29yQoXdFPGYe++GMbLhrpT+TBY5Tj+ALKaZYcuRhhbkn5Rd9nwEke3Z5J7rTYSzJS3zNZCZSzNi6b+vEh85RbZ3ytAdKDSI2Rd4eRazpqjGyMTHaexUtjZGPk4xEj12j1gmtZlu9f7b0jMUMF0+FpaCpAe5bpiDIwf98gCaLxfU7+d2VX1d7rQWcO6AoUqKXbtt6Xi2Y6lR5MFl8cx52WDs+ExwRAHegckwDB1PXmoKGq3dHR0flnJlWN7WDgAMA5uBhIAVxVe9sW6Cd87epMF+JVPxwoPep4Cg7p11/ryt8xUaApQXuSlJ6Oj4/roYceqtu3b59XOh955JF65JFHzh8eVsXLFzySgdUafZccCRh4zv0nLbroU/Jj91F/hoN605uq0j5/T06kxDNtz6TrCVj8JXlTPHsCdbBJAOz+yrji4sVlZdwm8PcFDnnhomwGjL4wJb8OIjN/cPD2XEh/I9+zfOhjXQUoDxo1Rt49RpIaIxsj7wYjz+dZzuqwMfKSvI2Rb3qMXKMb++Hjqsu3zRlEYsaVTedPSdXHqNq/omdgkLwPxyGfLoPGpOPqnCpkybGTk3HOGS8pEdMpCWLUrUDA37KiihITORO/Erg+37p16/zhXyVi2nJZLvZ+a34fi8S91EpunkTUh1VCje8VOR+T/Tk3/cirQjxH4KD/8bv+BIriRwlb9tEvzrOa9/DDD9ft27frkUceqaOjo7p9+3ZM8s6/xw31yURP/0g28TmS32kBJ3n1fIAWINxawfFlUx3ntg0mPn33yhHtQR24nWhr+n2KJ1aHya9/Tj7iAEZ9UY4EIp7X5LvelvZMIKK48wWV95WMXnVz4ONihbaj3Ti2g1xa9FDnHu9N16PGyP2FFL83RjZG3g1GVlVtxsXxxsjGSPV5PGDkGt3Y73ClqhMdjOAi8qvGteTP8xyX39XPiRVBGoPt6cgECgdHtZkZ3oGRnx0UWMWhjC57AmYPau7/5X8lPbXdbDZ1dHR0XqHig7+pGqexmCBSkDuvVbUHDPxOoFR/tlUy97YelFUXFWDy5fMxcaTFjSdlBxbaXa/Ale7u3LlzDsq3bt06r+YdHBzUnTt3arPZnD9I7P4kHjUu/ZWval4DCZeHccjEqTl8EabqJ/3KK+JcQBHQnBe3qdqTZpUjfSeIub3Yh7InXTBG3Z+XZYmvwr5URQ38p/aUg3MxFlJ1VMQ7FSk/uLxa5Ig/+gmfsVDflKdSnlxbiKS81nR9aozcybe5vBgj342RjZGrGCm+dhdBjZGNka7TNzVGrtGNvhbegUPH6IA8l6qlrFypHStUDATe5lRQerCqn4/rRvUgpCPzSjY5gwNTAkCeT9UOtvGKhsZxp3Kd67scTbwdHR3t/R4Iv+tP4EI+uH1A46nSl3RctV/hVGVIx6Vjr76pWqRz1A8TonjyiuUM3NbaMIn7cQcTykpdMAlwO4y2T9y+ffv8oeE3vOEN5/vXZ8k38UcdkOSTs/6zxYeqkQSYk5OTeuihh84BzytbXslzfiS7J2ePlSSH80iZjo+Pzytb3lbEWGPi5HyJlzWATrym5Evy6qwDvM9N+6QEz8q8t2MuoA8LSNynU/U3gTXBibxzYXodUGma04OOkfrF2sbIxshHg5FVFz7VGNkY+XjEyDW6sQsuVo+YgB04eNXvFT+1kyJ521bCiWZJ2s/LCJ7Ak3N45SDNS4cgzy6by6RjdC6vzqSg80qiA4eIlSrKKyfQw726rS89M1hZlaKuND4dlYlWFS+CiD77W400/hijjo+P99qxr+SlvTxpaE7qkHK7PxAgdU7VPx+PoOg2ZsJ76KGHzqt0m83mfDxtk1B1tGqbdLWHnbZye1Pv9KHj4+NzO3qCdD/hednVH2Kl3wgQmDCoR68AM1a4ePHtC5zbY3a2WJSNHnrooagLxonHgx8b42JbB23PZO3ySt+uR+c9ATnn5VjelvPRdxmLnE+8c3GlMUS+3WWWwyivFuIum8d/071TY2TVyWTB1hjZGFl1DYzcudTZ6Wn50rYxsvb6kr/GyDrX45sSI2/0Dhev9KSYFOBilMeobDku99CSZoDgVYbkhHIiT/A678b2cdxZSUm25Oy80p857MyBPXEwOKouAmaMcV6l0z5wJXI5D986RBm575i8+FueqFPaW39eKeJt+ao6f5Uu5fIqkNtEMtJmrDZ4UiKf8k+2l1w8Rh7cPu4r6s8Hk/mKYL6NSW9qOjg4qEceeWQv4LUAYwWKsgiYKIt0y+qMJx2NJbBQbKm9/JFxRz1zXPmF70tPfFB/lEmfU6VNiZ8AR/9kfHtMOfnikbwy1zgfIr/w9UULF8z6rHYEST8mkjx+XHok4ElO9uX81D1Bm7mY9nAgdF+ZLbpnum66Pj3wGDn+f/beZDe2JDvXXNv7nnTytKHMrERONRI0FG4N70PcR9RYo5oUUC9QgxIgCYKABCLjxInTkfS+9ztwfsv/vWjuQUZQlcxMM4Ag6b63Nav5f9vLltkubBeuyxyZOfKxHKlF284cmTnypXDkufJsD1wqUO28KjsCROp7VX7qtCQVRjQSrkkRhi4RRiI5JTytm+sjsem4tA+qmJSx6nc4sxpfKtqp39GmRk2JFOGQROkgEH50nFEWyIPrtG8aGVQZcW0kJa1fjVn1SHvYjPZLo4ARQKPMdUwAJ86qpBXb0bZP6ZoIp9avhAVZs7xvdojqNRoNm8/nfqQwkb5qtWrNZtMWi4XN5/PSOFSGUScRrKNNRPBSoNe/q9VqKV+esSmpKhFotDT6Svxf+xonTtF+ol8qOFPq9XoJBxQ4qZO2NPrK2CL26D1m5f0PmgoT/ZiidgC5673aprYbcYu2te/avxTAn8KeOMGN96jtqG7pj8pI9aj7Ps71IZfHlcyRx6L1ZI7MHPlYjiQlNTVBzhyZOfIlcOS58mwPXDGCEqP1OkjtuN6jqRHUqXUo8SAkBTnth7YTDUoBJNW3c+CvbWvfo7GqkgE3rRfHicuiKeLV9jX3VMFRDQHwqlSOm3+pW4kqRUz0n35zr0aOdNyAGICq7UQi4HsATcet+kjJV3WppKXRIO5vNBo+HtULfaReBTy1Cz0SF5lrBAkCgVT2+2N6xW63K719vtPp2Hq99rYZ22q18nvjQ5fqX+0X0FIbYlwKZOpbmt7C+KLN7XaHTcCc3sXnqUif/n8uwqXXqHyRJ/0+ZfOq31hnJBX+VhlEf65UKqU9BJGg4+RN+6B9i2SnEWv1e9Wl1hH1G7FC66UP6DxGZjW6egqz4gQsYqjKQvFQ/TXV71yeXjJH7m1fPLw+c2TmyKdwpI4z/p05MnPkn5sjz5VnPaXQ7OESrF6jhsO1p0hdcy3VwPV3dDr6oSDG5zHXPQo2ZTjxu+igKYNUw4jRiugQfKZOl1JsarwKKgAVoAawaWSi0WjYdrtNOjJ9jX3RdAe9VpdplRyQA1GgSChaT8qgta3UZyl7isQYo096bZR/JKH9fl+K9ClRUFQvMZoFqPEOklQf+F7rgoAUOKPtRZ9SO4nEo31X2fF/zBVPTSiiz50C/EhgKluNrMUSsUDbSvU/2kv0+5RvxXIKuLkvtRdG+6HjVr9XXOB/1WVsS20hhX2xr+p3KVlHTI26ibLVjey0Fck5+locay5PK5kjHx6aYfYwop85MnPkOY70sZzQfebIzJF/bo48V571PVwU7ZgOKC7ZUVInvKgwIyhHgcZlypQgYhupfkfF6jWqJL0Pp1GAjH085ZD6fVwu1Xqiw6phE0niZKRms1kCSdom8qNP/Pt9OS0igkqUgxKCghT90QheiiSoX+uO4z43CdHvmGRobngKRHRMKaJSIo591nFwTeyP6pwIEk6/2WxcL0ocjBMCJgqoESicHsKKwI2P6TX0R3WvAKR16ffoN0bcIZ5zUXhkGJfd6Q916D2pSNKpCZy2GScKSlanCELtP/p3JProa3G8sY/6XZxgqg9riVFsla3io441BeSpvkR8iHJRXetESPufwrFT5JzL40vmyHtsCjLJHJk58ikc6bK+t+fMkceSObJcXiJHPuseLhWsDiY6pBpMFNQp0Oa7WKJhxihU6n69Th1Or1GH0s8AcByPa7ScM8j4fySc1FiJZOqpQLr8Xq0eT01S0IukEx1aZRVBCEMn2hSJg/8jyCixMA7dCKykoi9M1P5Eh+R/JR6z48bbVHRN5RoJKtapbUWd6pj4XEF5v98/2Liu0U++L4rCI6vImogdkVVOpFKdR19RIlE9MOZo84C5Tug0LcbsmG+vciyKokRySoIp0kiVVOQuToxSJQWAcXxqf6cmgOd8Su9JAXPsc8Sq1ATvMcAbI77IFb0ouZzDwlPEeOo6jSaaPTymOPabPilhP5ZUckmXzJFmFu7NHJk58ikcqSVzZObIvzSOfNZTCmOJYBNBWYuCjRohA4q5mNEgFZCiQ0WHiwpQZ9DrUtfqdTHFIfZFxxXv5bvdbuckoaQYDVRBng2+RXE8ZUmdK+ZCn3MSDFijeNTDZswI4tTHPdyn90a54kTURdtqE5HwtC6+5++45KvtxchUJBjkHm1Tx2H2MNqhdWvR66NuNYJHPZvN5oGe0KtOUuJkJzq06hyZ6NjVJ7RP5KHTlqbVoJN4DG3KZ1UX0TawvRQYxWvi+KJfqd5SoKnXpEBWVw317yjjVN1KanqP2q2Oh6Lkq3aktqa/qUsjoZEwU22fskWVZYzaUn9c9VCdqH89hkhy+WUlc2TmSErmyJ/nSH1xdiogkDkyc+RL5shn28NldhSggmnsrAoiEocWVRCDUtBTA9YJqtaf+u4UuUWDpO2UkUWDjP2PBnCu6Ds4ThmsEokeYwsYsQE2gkv8UbkrcCpRm5lvDo394xruVTJRIotFbYH/1dFKgBr6q9EOta0IdCqzVOQtBTZ6rxIP7bGn4RyBxML1Gp3DbvU9HvSt0Wj4tavVKum8OiFQANBImtpOKgKp+tUNy5Fw8DPqi9G6KAOdAKUmcyng5v+YDx79TttQ32eDtRKRykr1rd+p30bfT5HYOQBV/ErpiTpSxJCaaOpkIDWOFJ5pmynMUZLWPuq4Yv/0+tiH2OdcHl8yR2aOzBxpLrtfypGxr5TMkZkjXwpHnivPtsKljcUn7ygQiipcjTkSUswtTwG8GnaMpmgfzvX7VEkJW40iVYcaWXySV6VTXypiwbggDQikVqv5MawKdJoyQb/UyWmHeiN4aYQqphGorHFqlYuCpJbUpCKmUmhbSkoRUCMoRXCgDo34poAmRg21n5VKxaNrCqjx2tgX1Tn9gYyIyMQXSOrLEFN2FCNges2pyUzK3uP3tM+xt/pdBEedzGn9kZi1bv0/ji1OJvX+2HaMPKlPpPpyqigh6mQnhU/RLrTPqodURBW705LSTewb90bdnZoAUe85/cexRxKLvq0l6g/9R3/K5Wklc6SZWk/mSPO6tC/cT32ZI4UjrVwyR2aOfGkcGVNftTzbCpd2OEXKUUkKoPyvYEpJPa3GOtW5o4JPAdLP9U/Hpn2ITpAywFN9YIxxDCljUYAmHQLgrdfrpY2mGmEDxFS2+pt7TkXAFDy0Ph1H7KPWic5UVipfrUtJg/tjpEwBKjpJbC/WEZ07OqpGfvhcdR3vTfUx1VeVeayLFBRyxvVYYa7T8cRIXGqJG7nqeKIc1UZVLtFmK5XyBt4oH60zrsZF0FVb1wlAlKlOnlIgqu2nrtGiwJ4aQywRiE/JWH1Cxx/1HH+firZR4oRO/04RU6qeSK6pSeCpsaucU0TGddhXLr+sZI6871NxrCtzZOZI/fwxHIkB6VgyR2aO1L7+uTkyVQ/l2U4p1A6fazAaRzSi+F3KaFIGHgHc7KGD0U4qUnaOrFL9PKXUlMBjH2Pd3Kt1E7FjEyk/kAuRvFSUS3OgI2hoW/q3gkgKwKOjRCdSI9Y6Y11KArFPjFnbTEVqVN46CYkOn5pIRBnHd3CoTLR97UcE6qhnJTON2MX66TsTBI2oxehQBLdIzqnr6VMqFWS73Vqj0bDVavVAr/FI3ChL2o/AQ4QH//q5iSD9iKAYfS0VFTulb/0s2q/26RxGpewzjj3l03FifIpoUxMYtYeU3WqdqX6k+hiviZPilJ+oDHRskUhzeVrJHMl3pyPGmSMzR/4cR1YqDx9mMkdmjnxJHHmOJ5/1gasojnnjsTNcFyNfDAajiYM5N4BUpCcCTvxcS+relPPGPkQDSPVfx6v/p/qW+l2pHF/IqCkS+vd+v/fTe6Ihp0iPelM55GYPX3oYZaT1xLHRXurN2xFwkKemgaico7HznaZZRAdO9TE1huio1BFtLxJraozatvZN89q1LtVHHKeSqUbPYtsq+wMBVUrvMzlnkwoo9FP9NpXuEstj+nbK56MOUv6lk5JIwDHyd67O+HdsL/riuXFHHErpUeuNfT/Vlzg+lZf6SSqIo/iamkClxhX/TsnslEx+zi5yOV8yR97fmzkyc+Sv4Ejev7UL12nbKvvMkZkjXxJH/uoHrqggyqn0k1M5xbGeuNyZGiT3R5Ki6OlBkShSS+lxHCkBnhN0ivxiZDjKKVWnbvolatdsNq3RaJRIhbFpnnEcp7YJIGs0RyOcmt9OJCY1lpjjG/Wpf3OMqt4byRpgUyJVsABsU7KP9qPH68axpyYvSmQqG+0PfUnl7Mc2YhRQI3nadwUP7uNHZaa/U+RK9Etlrn1OpQ1w7Wq18rZiikSciKSIQ/UU24sAGOukrmhfqXZPRd2iDZ0rp/w29jE15lhPSqeRqM+RnU4gYqQz1Wez40rEKQLQz1L4Eu1WJxqpvp2S52NIJZdyyRx5/NtHXMhnmSMf3Js5Ms2RxX1KYe1et5kjM0dSXgpHniv/LYdm0DE1OgXXmK4QN/zqMni8J2WUpz5LkYaWc2kyKcDSKFkEUD6LjkY78YleFag52tVq1ZrNppNFo9GwdrttjUbDP+M6DIwf6qU9Tqqhz6loJ5+rzHTMOh4F2jiGmBKgslNHiPKOIJ+S7SkAoo+RaJABdqXyRVaAsbahJBl1C7mrvFSmOnZNk6AtHqQgMCYKTARiFDMVdYtAtF6vH7w/JF5/iszobySk1CptlG0KaDVKec5PddKn/p0iDW1P/TjlQ/q/YgV9i+Oi3tRENmWjKstYb5zARZlpH6NstU09vYsxqH70nlP6TMmdopPIVEn55ymyzeXpJXOk+aEHhT3EzMyRmSN/jiOj/WtfMkdmjtR7/lwcea786geuVOOnOsv1+j0bJFNPrwpAKUXp4BSwaQehpwglJWwFFdqJjq1GEiORWrdeF50AcMOZ+b9Wq1mlUilF6jRapySiBKEv4wPY9/t96d0WOp748j76gRypNzoE9akhx6ibjpn7dSleAZvx6jtStI7YfmxP9RMBVUGee1L9jtEmAD/acZxExB/VrwIM+o7EgS5rtZptNhvbbDYlnajelBC4TyO256J7fJ8CCrWZCBQq8xj1SYEt+oyTyBQRpPxDJyrRx/ENHUsEzWgT50okD5WB3puSm5JbSvexnIrK6SRSyVQjeupXkXCj3WtfTk0OI1bFicYpmeoYTq3K5HK6ZI5MR4QzR2aOfCpHuklmjswc+UI58txD17OtcKVAiA6dEnjsfFRmyjD1HhVy7AdCi0AfASr1WcphUvcpmGg/UkqM92iEEkDl75TcogERBVLHihMjBVvq1vY1ehIdMBpcHCsEoNEYro/57wqeAIPqJ+qCvmh6go5DSZ++60lGXBfTOmKkL+pQnZzIGMSdIjb9zRvnmbzEPG6iMNFe6Sd9YyxaN3XSN6I8yEEjp0o+cbKlhKSy10gjsqKgRV8AACAASURBVIqTqdSEiT4qEXN/qv14X7xOQVrTeJSko71pSYFhBM8UaareaeccuaTuPxURVnmkxh3/1nq1zRSGniL1FAaqjFJ/Rzmm2s7l15XMkTIpsXJ7mSMzRz6GIy2BcfyfOTJzJHXFa1L3xLZURqm/oxxTbf9cebYHLrOyYNWx4/f6P0as4KbXqtHH5T6U/XPRuZQjqLGo4UYl6v2pp+JT3ym4qqFE49b2FHD1M/2ce2IEjz6QIqCGgJOz5K9koiSkkREFWa1f/1f5RuKLUTb9n/7TpsovAh3tcZ86CqdM6Sbk2FftJ58R3VTAj7pRGWu6hwJftEn6wwsxVR7USRQvkjvfVatVW61WbtdKHhFQNKKjvqc2F2UY5axyiakTKRtWoDezpGx0khIBPeX/aov6Q6Ffqt9T9gfxxsmPth0nELQRv1ebSGGX6jhlP6dIQGWktnQK3NVHdCyPBXnqUD+KfhEx6hTJPqXNXB6Wv3WO3PGZZY7MHHm0s8dyZHGflLqXh6PMkZkj/1I48tkeuLRDugwYFa9OEU+yicCi9Z57io3RomjEKSXr/ZFIuE7rinXTB5wvOqk6j7YNQMR7kJmOkeV0BUpy1qkbAFOQTNWpOdH6HVEv+mV2jERxn5JufKmbyi2CnIIRMqbvyENBVevTz7REHQHcCti88FIjRuic/qve0BkPOmoz1KERR+0n8lG97vf70lG3OiFYLBYPAFPtv1qtev+V+HViEU/cUsdXkkiBJeOP9oluU8fdRr/U/SR8p31R/W+3W7e51AQt5StqL2pD8ZpzRKHXRp9X/NF+6HhTABvb0TGfwprUNSndY2MUHaeSt+pXfytRnCLdqCfVlX4ffS/aWy6/rGSOLNtu5sjMkU/lSOUz6s4cmTnyJXHkufJsD1zRKGKJSlOhRqEDBPqyQrOHIKV1RvBKOZf2NfYrBVzaTxWwtqvEeWp5UZ9+NYe3KAprNBqlJ2ttKzoQYLBer0ttkKu82Ww8v1plQRSKOgFg2or7AxhfjDCwwfiUw9IeY61UjrnnyEGNmH4hl5Qu1En4nzGpfhWsFIggXNrnvuhwRVE8yK/mfo086jWQiNYRfQDdoyP0EyNyGllEfpCSTjqUZKg35r5H3TJp0DFtt1snXfpFekjKJ1UmGi1EPimSJUrJ53GSF9tR248gGIFPxxgBOgXsGvlKjUv7ovdHAkiRSuxHSp8Rk6hDj1aOEzWK2nOK/FQmp6J7UW76ufqD6kKL9jOXX1YyRx4nnpkjM0f+Eo7UsWaOzBxJeUkcea486x6uKGhKBCX9PBqqgo1+p+kXUUgRzPRJN16nJWWgsZ8p4wEoqVOXrVPkxzXarkbTiJYBrgrC/B+fqCuVimwkPfYRUFHgpi42GGs/tM+kswFiERhTERTVYwRGPtN0FOQS5YuxpjYHR12qPBUkYxRQCU37rdHK3W5nm82mFMXTDcyqwxgZU1tIEQGkjU1qtCiVMkHbZuWN8lo/0dvJZOKkGlMOlFSVyBWUlDQiUUTg0Cikjgc70c3JEUAZN/ae0r3+jmSmkTRtI/pyjAzq30xOTxGG2lVsI+IV10XMUJuMRBzHGb9TsjqFO7HECV2sW3Vwrk+p76JcFet+jlByOV0yRxZWVMo+mjkycyR9fyxHUkfmyMyRsU9a/lwcea48ywMXRkCHYqejA2qnThmFXqeCjoNLKUD7pMarkSXtTzQsBYSYR61OFY0nFSlUQqhWq9Zut0vOZ2bWarUcSGMdSia0ud1uHUT1ZBoATyN15KTztzqE9g9QVb0QVdOIVVy2VTCM5GF2PH0JcNQ+Uq8up3M9Y1KC0OjnarUqAZQ6hAJmjAyqLehYFLg1BSLaa4z4KcnxvcpUr8OeqtXDscakJ8RDMrbbwxvuYzv1et3fC9JqtWy1WrlcVUYxSkW7KT/U3/pzarxqOzp5Ud85Ra4poIwTEb6HzDWyrORN0cg0E6tIAlpv/P8cePN9KkKv8vk5oopYkro22vop8I661b91YqETPo3oYes6aYikltJzxI1cnlYyR95zZPHQjjJHZo58LEdStruH+7AyR2aOpC7Kn4Mjz5VneeBSBUdDjAqIRLHb7dwhzI7Ly+RNU48KJ9YVC4pXJauSlGSiMUewiak0EbQUzGL/AGIAfbfb2Wq1cuKo1+t+nC338mb0VqtljUaj9D2gEwGC76JsaEMJCdlEQ1IA1EgR7VWrVW9Dr1eDIyKmEUXq1Hp1qV6X/jlSV0E6RQqRyBQoo45oL44RWWpEilSUmIpBPaQ6qEOrfJAXMqbf9D1eW6vVPFqKDoui8MmFRmDq9brt93v/zsxKaTMKJEqGcYzaP/VDzU9X0FZi17z01G8lTLUJlVskEp0QUBT81BbUv9QO9boIihEoGWe0k5i+orakaQwRc7TfjJ9rFNhpR+1WdRQJKMo3kojKREmLz2J/1J5V/9pOHJPiiUaEc3l6yRxZth/8NXNk5sincOT+/nRL3sOWOTJzpOpL7e3PxZGph0DKszxwqdNFw6CzKQWoc0dhKpFwrYKn1p0yYOrWiJEKMSUY/UyfdOMY+E77pk/KkAH93W63tlqtzOzhQxifmZk1Gg3rdDrWbDZLBsLSOtfh9Jp+xnjpU6PRSEZGVDaqC8al7Sow6Y/2WQ1XCQu5aN5zdHjVF/Vpe6moDEXTQaLs1Qb0PvQfiVivZ+w6GUHG6/W65JzUr+Adoznq+JvNprQhV/tIUfmxygWZcC+yiu0TyYo6UbslNSdFzKmJBO3pA6HqV+UXJxJKrlEPagtRzmZHouS7er1eWgnUvishRpmrPGIUDZnESWoE/Ig5yIo2wRjtk06C+Ju6I+FR9O9ToB3JKZboW1qv6lk/j3gdJ2d6rWJiLo8vmSPDClmlyBxpmSOfypEFr86+bzNzZObIWP7cHKl9jOVZHrjUsU6BU+ppUwWj1+iTdErZDAqgLoryE7YamRpyVN6pp3X9jChTVIwqSI2Yz5fLZemaZrPp4Iwj8+LGer1u7XbbP99ut9br9ew3v/mNfffdd9Zut+3HH3+0f//3f7fFYlEiOGSgYKZEBcCYPdxDEJ1MI02RgKhXgYrf0cCxA42UpCYCati0QxqE2ouCZGoioGPnOyVerUvlxn38DYDxv/Zd88nVbuJnKfvQCYGCkxIi7TSbTf+b/kPK1Ae5YD+LxaKkNyJ9OgZIvdlsPojCqt1qSo1OAoimqw4AeE6uon6NDqtNqP1EffC/7hWgAMpx8qb2HCdHsW2dZFB/TFPR9qgrVU+sP9pUnLRq27FEoNfrtE5KjLohd+2bErrKT3VHWzEyeQ7ncvnlJXPkPY7w2W6fOTJzZKlvj+FIVriq9ymD9D9zZOZIykvmyGd74FJnj8DCAKPDKVGogxFh0Dp1wDFCoYZFG5rnrManT+6RSCipHGk+p02tlxLBoiiKEnmwNByjGoy5Wq3ab3/7W/uHf/gH+8Mf/mD7/d5ub29ttVrZdDq1oigclGJ6idatBkP9qo/o0NE44+QAoEKH0eBjRNCsHGFSwjMzBzt17EhiKZJPRf60jagX7CmOHbBDvxGAAN5Yp/ZBZc7fCjjYIP+nUltixAjZIFNIkToqlYqt12uP5HU6He+LApXqSu09BcpaaFvHovfqJEGjfDH1Zb/f+8liGtlS3ahe9HsFT+Svk1RkoZOAKMMI7IwpgrRGXeMkKeocm9C24qRV+6WpMtQTT5SLslHdqM3q2PRv1YHaoH6+2WySJKbRR/4/RRwR43J5eskceV9fpRyhzhyZOfIpHMl4asI1mSMzR+rY9O8/B0eeK8/ywBUjC/rkbnZMdYgPJHFyyQBZJjZ7CNwp41MQVINKkZBZOZKk4BQdPxJJXP7EgbSvqrho1ACmRpr4+5/+6Z/s7//+763f79uPP/5o//Iv/2I//PCDLRYLjwSqvFSuRXGIBOLARGAiaGj6hjq3Gnw0WnQDweu9Wn+MBjEu+qqOTV9iVDE6VrxG/1Y5KjjpeE9NDk+NFdtdrVYlmcT+UreStYKjjol+Rp9AlpvNxnUMeTD2Tqdj0+nUOp2OdTodu7m5cVtUG2y326UJmOb/0xfVJaCmUb5o6wqk3KfXq5zVVhhzBGUliEhUUW9cx+lO0cdSE03NBVe/0/aV5Ima6+Z89XvaUhtQn009hChB8XnEGu1LnHxFIipNVAOWads6ZnAiNZmIPqj60PqQEW2p/vID1y8rmSPv+3p/aIYV6Y3omSMzR2p7kSN326OsMkdmjnyJHHmuPNux8FrikzIlGqFGycweOr9+HzfxaUkNWBWsf6sxRLA3e/geE75PRQKJuKWAiToU/NkAyj0AVK1Ws6urK/vXf/1X+7d/+ze7ubmxRqNh7XbbWq2WG308zY46iPAAsnqiURyjgp0akkYmNF2FehW4ozHzE50X/aXu0z7xvYJYrAuZK3BrH2KEIqVbnYzo/xR9maVGCrUuPo9AoxMLnbholFUjJKvVylarleuVSI7aq5n5BIHorRJ4tXpIR9ntdn6d6leBV8kiZZ+aFx7ll4qWoQddmtdIo0bzVA6pyZBGmKhXI1unwD1GxtS3VVdqc2bHHPhT+JMiv0i4gL/qIfbp1ORXr4v/077+rf1XncUJt7bD2JmYoyd8W+0jtqP/Rz/N5XnK3zpHmowzc2TmSLNHcuR9SmFFuCVzZOZI7f9L5shnSymMf2tH4/8KIAoC6rx6T8ogSuB9XzQ6pU/R2gfa0TZV+WqoKkSNDmrkTu/jJ0acUCKGx/Uodj6f2z//8z+bmdlgMLBut2tmx/dnEP3RcQPS2p7KVMeu7aosAKKoS34igSuwqmNHMj1lfEo8tKsRIZUJ12jEyaxMempH9EntQyNmGkVkIhDHmwIuBVO1S340OkPbeuIQ7en/6/Xao4TL5dL3HGgkstFo2Hg8tk6n4/cowDQajQd2eMrG6Dt9UJDX9mazmY9Dc8lVzwpkCqbUlYroQlYK9FqfpmJEH1LggzQiwAGUUWdxskI/sX1NL1Abir6iE5nYR5VRtBHGq2Cv/sTf6lcpbNLr9EfxRyN1vNxVJ+JFcXxvDf7AGDQNLEZd1W/i5CuXx5fMkff3hQlT5sjMkU/hSOqvVg4Hk2WOzBz50jjyXHn2Y+EpCmwqoNQ1Kkw+N7OS4er30WhP9SGShD4I6XfRcFLGFIEVwWt0hrxg7o1P6MhDjQtgI92hKArfYKnGFx2bCJ2mRmi/tZ0of61bgfLUpED1RElFy2K0BLmoU+jn6syqG0okFtWZAqL2/1zUmO91NUnJF4KJS9yxnthW3EAdc99Vz+v1uvSzXC5ts9n40cYAY61W8827rVbLms1m6QSoRqNhy+XS+6MTiGq1WkqXiPbPb6JOajtMUrDt1CRBbUHBDmLQFAedjEFumpag0djod4wzRq11MsJ9qh+1pdSEVidDii/UEfvAGGKELdrZOQKOQKyEqv/HvurkIO53SPmD6tDsGI2lPsbB5Ez9SmUY8VgntLk8vWSOvPdvmdCluCdzZPnzzJFljqxV7/e9VTJHUmfmyJfFkXEcWp4tpTAKBUXpdxEUtaMUBSQFCR2wGrTWo4bCdZE01Kli3059pkpSYFIgVIcByNV5tf9q/KokIi0K9lEmGBJH2uomTmRmdgQ4NYAUEOh4iuJ4ek/UoZ7AoyCl5ZSeVObxM3UElY/KPMow2oROXKJTxsiV6hiZY4Oa5qM6iEQWN5urLNAnsicHfb1ee3rEcrm05XLpefA8bJHqstls7O7uzt8Ps91urd1uO3nQtqbeqN1jI5pnT1ESV9mSz4z+GX8qhYVxa7tRtuq7Uc/6fpcUycVr9LhbbUNtRfUb7T5OWGO6TfR3jWzpxCGOOU4yo99GTFH/UyxRuwboqTtGraMuUhindqykrragMjlFehFT1fdzeXrJHFmxbagjc2TmyKdwJDLabraZIzNHvkiOPFd+9QNXBAqK5peq8+tA1CHjtQo4qchZfDJPCSQVDYh/IygFykhQ8bdZ+cVn9FPrAeB13GqgGkFBXkoa+ps+1et1j9ZplEUNWfuCU6kjaDRJ5brfH5bIiSAyPo1EIS9KypmiIyIjfqucot70vjgOrtX3XdCvFBjQZtS/gor+HfWhfaPvyBxA1igKK5u73c4jJsiYH81Fh2CKovDjbeNDupIChLNcLkuROewhRml0osP/Cra6qoaekS8vmtRTtJAlf2uqDbauNhhlHMen6SE6GYq+q6CpNl2pHN+5o/qLE5oIhhG4I8hGX4o2pEV9XIv6QapoX/lbZak2yXjjmLg2ksKpyaLKGP9Q4tK+Rb1FHeTytJI58shRu+CnmSMzRz6FI2mrWq2WXiOQOTJz5EvhyFNjMnuGB644kJQTpwbM51Ho55YkIxlEwDolZP0+FZ3BERDyKVJTBSsYRkNQQMLZtR7aTCkLEqY/Clw4kDqv9k3HkgJO2mIDJsSieiOKo23r/qJISgqAUX6qN9WXjl0ncCnyiBORaFPaf72P8Wr9sR4FW7WpqHeVMzJR8iQVQtMwNCK72Wxc5kTvIJhK5RiB5XqNqu12x82+/X7f/6d//M0m10ql4n3RsWu6q06ezMyazaan6GjRsauM1ZdUhnod7SmAIRtyp7H3qLNYl04+daKmNpJaeUEWOtml35EoIVKVW7S/OAFRMsGvIkbFFQz9W6PWSq6RUBX8T/lWJI5IcHElJI5P740yjP6Ry9NK5kjZ9yNtIYfMkZkjH8uR2JTqO3Nk5siXxJHnyrOkFKae2mOJAzAr582eW4JXx1KQiMKN9/IZQEe+ry69qgOlhE2b6lQAbbvdLgGJ9kfJJzpBJF6KRkS0H5GYuU+NXr/TfqqxEE0iz5loUlEUnv/MZtaoW50kYPjaLx17lOWpfuqPAmgkB+7Vcev7GtQZVC76uYKS2TE3Wx1eIxmAVZwM6LiIwEW9qby175VKxcmFPpEugQwrlYo1m01/UWO9Xrfr62u/ttls2ng8tv1+b61WyyqV43tHkCOnOwGQjAfQ4loAsNfrlZbmiSbSPkSousJeVe4awWu32zabzVwm+l4Z/KBaLb9PSP0ggrrqj8+Rr6aVaERO9Ug98Tt8WSc3qUml2rnWpRM/tVk9oUnbRx5qp/q3TlSUDCOgp/CUkpp8IVfuUXtGH0r42k+t8+cIJZd0yRy5cb/mPngxc2TmyMdyJOPlZMrMkZkj/5I48tkOzdCS6qwOUKMeMYoVAVYHBgCcAogYRcB4MeTtdmutVssWi0XJodVwzB5GosyOx9Xy2+yQH9xsNq1ardp8Pvf+0abeH0mK3wCYylCXrqNc6TPXKVGpLPSEIYx6u916jjSAAzgoEFBHyhmiY+vYoqPqZmUlUv0sEkkknpT9UH9Mo4myZuzaFg7D5ypPJQTNW1ebVSLBhvlc71Hn16hcvV63brfr6QoKtOgInS6XSz/6mE22amdqY6oztRuivfzoZIH+qF7RG/qFZPBVff8P7wAB9KhvOp36hmZsGZ+KLxnU/ujLKFVvatMKcGrzmrZSFIeoJvYRbVX9UnWfirSp3Wk0W9tCX9gx9aJLtTXGEP1MJ51q32o/MSKLf6rPxIkVnzMGfbkudcb74uT+VL25PL5kjjxgl070daWJzzJHZo48x5G1+kHfjUbDms1m5sjMkS+OI8/x5H/LoRmp/7VjCIBO8pCiS48ogbqiwCLInfoNcKgxc/QjCsQxiSaYWenzWIe+wwNAbjQa7oQ4cLvdtqIobD6fm9kxx1lloBE2BS6VHzJUsMLAopKjA6l8id5plEPbw0EYv5I+8tR+x/FEAuDFeZEYkQ+OHaNtWhd9U9lE8o3OH6MdEfBj1CkVeavVaqW9VXpiDVE7BQm1b50gqUzr9bo1Gg2bz+dWFIW9f//e5vO5LZdL63a71u/3bTQaWVEcoqn9ft9ubm5sv997zvhut7NOp+MrYEVxOLGr0Wi43jTSpnZbFEUpj51ooMpZN5KrjdXrdWu321atVv0lzZXK4WheZLZarazT6bj+Wq3WA9DCPiMxqP+rjejnKlPsU+1d/TliCSCqKRyqe/0sRtV0Ykb7ShqRcOLkWscV7UN9VydO1KM4xTVqayn7i31V+464eqoP/GgaRy6/rmSObNhGeCBzZOZI2n8sR/a6PTMz6/f7NhgOM0dmjvyL4shnOzSDQasjmz08UQml6/dxw5uCVSQR/czMStGFaHBmR6PUZV82vqaAC0Ur2AK+3FOpHHKK2aip5/mrEdO+Rv60vUi89E2NKipXDSxG0VROGIyCn+ZM01eiNSoD2k1FP6if/zVKSV28/0LzkBWgUuNGrkpY2lfapWgUAluJ+eEaRUNufBaji+ieyFmn07FKpWKr1crm87ktFgtbLBaehkDaQ7V62LyrY1U5QkwAGuMkurTf763X69nFxYW/5wNyGA6HHiFut9ulNEMdDxMhonlMXjRaqpFv5IOesFMIiT7GSDnAirz1VEVSPiCdeF8k3liXgjMkx7sy0BmTE+0juuVzvoMYqU/TQVRHXB9JAP1phFTHwHfq5xqFxG7pF3aj6aOMCTkr6VMUD3QDtE7mdEIaCZgftcdIhLEopqusNKqXy+NL5sgjR07v8aZer7svZI7MHPlYjqzV7lcy7m0tc2TmyJfGkazup8qzHJqB4PhfSyrywzXxiVZBSgVGG6oIfitZaf0YeCQhjJj7lMj0aX+/31uj0SjVYVY+jrXZbLpB6XIx0SIFa3LC43i5Vg0jZfRqyDgHAKERGl1aNTu+rZ17WN7WaEUcPwbJ3wAhzhBBkvuRB58rqNN//o4EriQGyGjkgWvVPojEofcYZYFI9TrAv9Vq+Xtd6DM/yGK73TqJTCYTu729fQCK3K+kqvLU1JrVauV28vbtW/vpp5/M7AAYi8XCRqORrVYr1+3333/vE5eiKKzb7Vqr1fL3kiBjxq4TJ9IrmMhwXbPZdDuh74xT7UllTTvL5dL1QmRTo0tc0+l0vD/IX+1GI+Cbzcb7p7aEPKPfRBJivLqZF79TEEVnqt9q9bDHZD6f+//YeQRcJacY4Vfw1zZpTyeiiovaN7V1xSydaCk2aR3q7+hH/4+TNO2zYptONGhLx6U4n8vjS+bII0duZRUGP80cmTlS9XCOI5fLw7Wbe1vJHJk58qVxZOoBjfJsh2YoWJiVBa1PlvoUqYDDwBRQoyCjkjQaQX0IQAdNX/R6M3vwslmMkX7wGYbAGKiTF87p+FNPxEQd1Cl0PEoecbWM/mjUAECGGFQPGqGbz+d+LZ+fijKpsdEOuiA6osCtBsiDppnZcrm08XjsdkDECdkALEqggB5kBOCblXNwFYxoW51b9agyYpz1et1arZa1223rdDqeBqoRLu4njx/bBHAqlYqNx2OrVqueDoCdYif0j5OWIBG1oY8fP5qZWbfbtd1u9yClRidReoQuwPTq1Stbr9c2nU6tWq3aaDSyzWbj0Ue1CQXBojik8WBz1WrVN4KbHUCJQzu4F3lCRApi6/XaSSmCtvqXRsV1fBph04leihywYY2SUacGOLA1bKQoCut0OqVTpiJ5qf6RGzhA5A9b4G/kpBO57XZry+XS2u22bTYbWywWJb/ChxTf8C0iumpP2LaSjxKuTvgi7ukeHCUtxRHVWZywakFHSjC5PL5kjmT8D9Mo6WvmyMyRP8eRu/099teP9pI5MnMkun8JHKkPl7E8ywNXHBidjWkM+p2SiD6Rch0ABhCqM5hZMhpmVn7zOu2jTAVxQIMnfzUG+qBP8hpVQ+gYDdEH2k+BvxoqfTazEpEocagyGYMSoT7B63f7/SGSqJEWlvb1iR1g0LaRK+0rKEXC0ze6s6SsZbFY2HQ6LYEwfaF92gV0IJV2u+2nW6FHxogesJ1UREHzx7GHdrvtm2vb7ba1Wq1SxJi6N5uN//D53d2db3KlrtVq5XnaEI+CPvbKd2p7vNBRD8tQoOVvUiQ2m41Hy5B7rVazq6sra7Va9v333z+wAQCKKC/+sVgsPC2CyBr6ZVKAjLlPT4hi2V8JQAENW0O//ADk9FPtR4FKgxb6vcqKvuuEBEJReeqkUXEHkIYkICAlL65XAEZui8WihBm0T906Fl6+ij2aHVMPsBUIRHP2sQFtXwk1Tm6VHNQGtXBNCoeVVGhfr9M6cnl6yRx54MjCyuk3jDVzZObIx3Ak46gUx5P+MkdmjnxJHHmuPMseLp6EGYQSggIonY3RHwVwMystreqTvAovLhtyvxKILhUTRcDRNLpDfjFghzPxgsNouMvl0iMeCsT8z1I19+oStBoHfVZjVrmhfHU2ZKDRO3VYfjM+ltbp3ynni/neyELlrASJE+IsGvlGdrVazS4vLz0SZHY8zlQfTHe7nYPrer22+XzukQzq5ndMPcF+AG8iZnxH/9nMSt45JAAgqI1CRvV63ZbLpZ96hHyIfh1SHJalSRHjjNE6CCraCTLGxtExUT/NY0fPjUbDbm9vbT6f2+9//3tbrVb26dMnlx92hmzpF5E/+o7tEfVjzwUnP2nUB/us1WqlcREtxMb5DFlyH0AY0yaQDTpQIsCHGo2G2xCfq8+rXSoBKfHirzohW6/X1ul0fO+KRuzQoYKyTgz5X6OWy+XSiS/6MBMz+hSxEf3rCgJjRX7IVNMZdDzUF+vWySxkrzgUZae4oxjLdbk8rWSOLKcfMabMkZkjn8qRKt/MkZkjXyJHnivPssKlkTM6hMPEa3SAKFENDEdigCpEBqrLpHxnVt7IpsvBMSqnyuHhqNvt+hIoQEvBsVxo93XzdA5IxOuVmKJh0jd96o4yik/iEZxwDo24cZyt9ol+ahQslWoRjVJlhU7ol/ZDJw/UoXn7/K0bhReLhdex3W5tNpvZcrl02S6XS5vNZh7dQH9m5vcqMSrQAhCQWqvVOpxw1Os5qWgaQnQYiABgxZ4qlYoNBgMbj8cOGrPZrGTrpDLs93tPYVitVrZYLNyRVQiCZAAAIABJREFUlWgWi4W12+2kbDWiggwA/KIobLFY2Pfff2/fffedrVYrm06nLgN0gV1Qd6fTcfkjI3SHTyrY6mSFOpmwkS6ifqd9jZMjdIu9xUkSn+skQ9Mt1DY0oogN6WRT29bC5BS/J7KqMsM38Vvu08mX2fGdNPo+F61DUz6YpMT7mfDSpral/+Pv+GkkInya8fIZeqe+uKKhZKSkyXc6GdFrc3layRy58fb5XtODMkdmjnwUR+6P8s0cmTnyL40jn+WBi8YZQFzCRql0CEXriS86IFUGAyLyENvVJ2jAixJJgT5G4+Y+XkSn4FQUhTtwpXLYUDmfz5109vu956Kqg2jb+kSu0QZkgzOoE0W5ae6zroQogUynU3c0Uhi4l6gEulKDpS410lOREU8NKQrr9Q5HtKrMAWiIlLoqlYqfJoTOkCd1LZdLq1YPuenT6dQuLy9Ly9n0XTe46phwTiVpxqfL2jg1QBLTGdA1aQ1qmzr+SqVil5eXNh6P/R0z2NLbt2/t06dPtt/v/QQniASARAbITNNxACLIiAgzIN1qtWw6ndpkMrGffvrJLi4ubDweu+6Rv0bVFDy73W4JkJgYNJtNnwjRvm6aXq1WLsf5fF5KY4i+hg4UlDUaHaP2EBUTK9WL1qHXcz/fk3qjEWBNHVFf6/V6btdE5Sn4MwSJ/4IFSvgxlQM/QacaZaMv2KuSBH6j0UHqpH3FJiVvJTMlZ3SlkUHFaXSSIhXVDXXqpDKXx5fMkQeOXIttZ47MHPlUjmQP194sc2TmyBfJkefK2Qeuoij+TzP7//b7/c3ZWmTg+nSJc9JBOqwTS1U8AKxgR12AAm2pIWF0GskhMqhAgkBRMv3FAIvicLIMb5eHvMiV3u/3TiLb7dajLoADCqcdBTOu0ydqM3NjikrlPmShMqY+TvXhKNZarWa9Xs+63W5JTjg1YEH0BkfHSPlbl++ZGJB/zTJ7q9Xygx50UgAIaBQDfWhdAKk6Y71et36/746hdtTpdNzWVquVR46InCA/jQqrnjXComQcSRQnwy6J3EEC+tlud0zzAPQhE8A2FX3GBgF+BX1kgT50AkFfIaB2u23L5dK+fv1qq9XKut1uyeGxKyUmBbft9rAJFX3xuUa00J/6FnrWKDZjIpceclL/UBDTyaX6HbJW7FD/Xi6Xrkd8n/6Ylfen6KQFuTJZISKmUTuO2dXIJzJRsEZnihXYB/iGvJjI0S/6gK1Wq+WTTcGi7XbrtsV48CO1UZ240mftu05S4wSffigu4QN6vf7WaGMumSOfypHaP3wtc2TmSOz/5ziSFa7CjoHJzJGZI18SR+pDeixnH7j2+/3/c+57igI/g4hP8xrBUlDEoPXpkM9TTq5P6ziuOgcvmQNcMRZygXGAoij8TeXcz5K22THlARBF6WaHl+5xLU6Fo3MtY9YX4Jkdl2EdQKx8hC9tx6iGgjNOCrDhjNVq1SNqOCv3M2YMEwDQo1HRERE1NTB9+leZahux39gGfSZaUq1Wrd/vm5m5M1arVX+fh26qZILHAy0n/rApk2jEdDp1OdGukhHyJv1BoyEadUN3erpRp9Oxu7s7j9wAbrVazZbLpednA5jb7da+fv1q7Xa79CCFDtVuzMyjhcib04Lm87nbMZFlbAn7I0K1Wq2s3+/7BEOjxfhSvV53G+A7xozM414C+oj8kUG1WvV3iUAW6/XaJpOJVatVt0etB5vTiSZYoSAOkQLGCoike0Ai9DVGvJlAMsnTVCGNxrNBmsnKbDYryQ0fpS4lSPX7aK9K2mZW2hegpI186/W6p87M5/PSJC+FBXwe/VsJX/0Q+cUVKpUf9cTVBcbI/7kcS+bIp3FkrXo8XZDJTubIzJGP5cj7M1es3WlbvV7PHJk58sVx5LnyLCmFNKq/1ZF5SuR7BX8GqEpqNBq+/KxLiSokvT8qXyM0mrNZrx/eYk6Elr6xkU8NAHCDRIricGwmQMB7FHT5EcLCQQEpfUrXvkfA16d0vVcn44Aiho08W62Wj5PoVrfbtclkYvv93oFN0xlUX/wdCUiJrFI55Gcvl0vPEdf8XRyg2Wy6/NR40Qmy4Rrkj4PSj1Tkq9FoOMkzeQB4eBu9bvDUaCLL4fSZSAl6Rs7b7dZPj2q3D8AOiNFH6uC9H3d3d3Z3d+fHDAM+9AVZMQZNaQHIdSM3/dOIEe9FAXQ0GoVNQFpqR5AofhJ9FN0gL/wVnbOZlYkRezI0aka92OF2e3wnED6jUapT4KTj1/0Mmkah4MYmaLPyBBBg170ATHohRbCFqDGTGWwB/CBqpmlSq9XKZrOZXVxc2HQ6dRCHCNvttmMSMqAPOtFWDNztdn7SFwXbwN6QAdFwMJPvlFiYKMZJa5yg0yeKkrSSiZJMLk8vmSP3ZoI3ZuW0qsyRmSN/jiOrlXus2x3xP3Nk5si/FI58tj1cNKYDwjARHoJAyBgGhqiRLb1PFcB96vwUBq4n9BABiCCp/cY5eOrmWpaLMWpyv7XP1LPZHE9aYqxEtzAOHD5GUABZlA0Amh0jo/pUbmZ+Og+O3u12PQeatonkQCSMVyM6kawxTI2C4VCdTsdlj0wrlUopWhSdlHErUBGhM7PSyTiVSsV6vZ4VReG6QJb0mZxxXeYH8HC4fr9vt7e3DkI6dvqq4ITTUcd8PrfZbOb6RKaqN8h8MBhYvV73lBqie+ie+9AhtqAkiB0hH9JJut2ujUYjvxcZagQH++VUMI2Kkk4BoGALtEU9Zlaya/TfbDZdJnGfmY4JO2GSpbJTm9VoluoFsgRUkUulUinZM3s00JvigmJIJBtsq1ar2Xw+9xOm8F2N0ilGIUf8RAv6pn0mqkQh8QFSlDgJDXIAx7TouGlTo/yKXcheJy460VYdxcm34gD1xiidtqnf64pMLo8vmSPvOVImReBK5sjMkY/lyL3tSzLJHJk58qVx5KkHZbNnOhZenUKNCwOL0QTu0WgcoImQNWKjUS4AiSVT+kCdCJon+c1mY9Pp1J0XJ9f+0B4gpW1rBATgISoAIO12u5LRYZxKnPrkHiN2CrY4Id/r30rKOAfLqwAqkR2NDDGBTxE+MqVPOIPmwQLA9LXb7XrELeZkq4HrGJEFUTUeUGmT+mezmfV6Pc8hRr+QENEhxgbx9Pt9q9frpXeXIE+NXqgetN9m5iQwmUy8Tsit0+nY5eWlffr0yW1LI8+z2cyX9pfLpZMe+tcoZ1EUvnyuG47VjvGboig8UkcEjwiPRkYHg4HbWa/XcxLCZpCLvrARnyMiRRQYe9SNxfid2TESpcSiAIis0S26pO4vX754FFZ9hz4Bumo72AY5+PQLEI82qylY2icmX/pga2ae9jIYDGy3O6aNaA4646FvOtnBJtAbtkDf6AM2rRNLrk/5uhKmjg+f0MkoOlAC0Uk10VMlmUi+SjA6aUYX+YHr6SVz5JEj1X70oTFzZObIR3Gk6CNzZObIl8iR58qvfuDSzsaIWuwQnymYIwgzezDR1KdbnEkFzI8OUu9TINE8bNrHUGgbgGY5kvxvCEiVQL1EQ8ih1qdvjQwoEWhKgBolT+CMV8eohESEjAgEoE6/FbD2+72Nx2OPZOG8tB+JjUgJUZTlcukArhttAVPq02gl7Uc7AVjQA/aCDjDs7XZrvV7PnYt0AH6QmYLB3d2d9Xo9u7i4sNFo5IRP1ET7RJvciwOuViu7ubmx/X5vr169chtqt9s2HA49d5l7ttutjwf7J51Bl6l1gzSkD7BqlAngBDQmk0np3RT0nTz/zWbjG7uxMQhR5Q+IIj/Vo5LEer32Sdd+fzxtq1arlTbLAi6QEIQ3Go2csBhzs9n0l2Ay3maz6cTIOObzeQm4iDqS0qLRvG636/6m9sVkRaN52Dc22uv1rNFolMg/RqyQna4u4G+kfXKvpg8jXyUyxQoFe8WwSCSMVwkEfMC/lDQiISgGM2GBzOLDPX/TXpxoqQ2pbHJ5fMkceeRItTnkkDnyqL/Mkec5snbfn1arZfV6PXNk5sgXx5HnyrOkFDIoLfyv32k0jKdnPsOoI4jyv9apT7AapUERGlUiwqUbePXpmCdu6udJG+PRyI9GKTW/WKNVOLTZMQ9aFafgrUTM//zodTgIy9eAFk5PvrPZMSoBMe52u9I7N8jBVblqX5ARfW+3256LD6DgMLqxEtLVSKDWjX4BUQCX6AvtEpXSiN9ut/P8aupE7gBVv9+32WzmUb3ZbOY6AHiwMyYO6A0i4USpy8tL34tgdgD6i4sLm0wmnpai8gP4yGdfr9cO8vSXomAAkCB3IsKAPcfSxojtcDi0z58/23q9LgE3Y0Q3RVH4ARwa0a7Vav4+EiYNyIRJG2TPJIl6K5WKR7oAoFar5VFFjfSgC8pyubROp+Mv1iSiCNFUKocNutgxEwLsic/UzxVvdBKl8laCIfrOKVFqpxqpJHKJPGhTU58UF9Q2FQ8iCNMOsiFNB9vFHrE7ZMnnTAaq1aqfhgaJKlGAj0o2fKd94fsYmWO88b6Y4pHL40rmyF3Jzor7PmWOzBxJfynnOJL26F/myMyRL40jI85rebY9XGbHU0oAEAAORWv0qt1u23g89vvMjptdNQ2BAanz6xMw96kxxeVEs2MESDfWxadpjSggaL2OsSjIxWVtjaRo9EAjSLQXyRMFasoE42s0Di8nJPLBEjQgjhzpJ+OChKjf7HjiD+PQKAP9Q2ZXV1ceGTQ7rsTgQGzIRLe0qRMC5ASZsNn24uLCZcMyNH1arValt7NjN4yt3+/bfD63yWRivV7PKpWKjUYjz8EmZUCjMxqZAGTMDidZcerN69evfYyME6K7vr629XptX758KUVSyE9fLBY2mUzs6urKJpPJA3tVAtHoFwTNBIUJhEattB8aScImlbCwRc1ZbrfbDvadTscnHESIOG2KfhKVxp65jjETAUWvRMO5logf9sbEw8x8vwRRJcAV3+MaiAs7n81m1mw2S+PmAVWJWH2Y/iiZbjYbrx870OjoKSBl8oJNKS5pdFgnh1yLjmKUDn1DnBGXdCKqEz5wSCOO2g7915SMFEZofdyrtkRdKodcnl4yR+5KE73CHr7HKnNk5sif40jX936XOTJz5F8cRz7bKYVaIuArCABaRBc0yqSRPhRNfXq/ArBGQxgw9/E50SbqIDJhdnQWnoABCRWwkobm8lI3ILnfH17gl8q3VSCLJAN5qvGrYbK83m63fSnd7JheooajIMgSMdEYokrIVI1D5Ug9w+HQWq2WLZdLu7i48AiDkjfOrUel6vdm5s6ufev1etZqtTzSCrjEqCh9Yol8sVh4xIqNv4PBwOVxcXHh+wyQK9+pnWy3W0+lmM1m1m637fr6upTfDumu12v76aeffJNtq9Uq2QppPJwmZWYO9owZ3TMpwLY0GsQkYbfbPZggQKx3d3d2cXHh0Uo2txJZhayI8lIPkTeIAhm1Wi3fSK7EzY9GDnXTMCCj0W/6ZGa+R4KNynzGBKRer/sLVFU/mkrBhmY2tqMvTgHTF2kS0VKZIldIgGN4OToY38MPdHKHjzHhAJ+Q2XK5LE2gIDXkUq1W/RrGxnHLGlWDbJSQwBT8MmIdOIdNENlW3GPs4JpiIzhDUXJRTIgklsvzlb9VjpzJODNHZo58Kkdik9VKtWT7mSMzR/4lcOSzP3ABAnRGDdLseMrIZDJxgOEJ3uyoUAUmQBPlqFBi1EJza6mfevlec0rNykKczWZWq9U8RcDM/Hp9+iaHmydws2OOOtEZlQPj5h6MhfrpB2M2O4Bwv9+3brfrjgsoABzITqOUlUrFwQwy3O127oCQi0a1aFfrhVT3+73nqJM2gTMAMtSpdUG6elqTygwA0EiaGjbXQTIXFxelDZw8oBDxxSYqlYpviG02mzafz22z2ZTyoYuicB1eXl7au3fv/DuipPv98QWH2B7L7bqcrtGU9Xpt4/HY3rx5Y9PptJSqAPk0Gg0HSnRmZp7moO+5wO41PWW73Tro4lPdbtftHVvQSCWEjA4AZeyn1+vZbDbzNCOO7o1AxkSQ8RRF4XJtt9s2mUysUqnYxcWFjcdj191+f3gxp9qjTqIYCz5yc3PjsqYPRHshGz26GpAnKsd3tFGv120wGNh0OvWJ7GKx8HeM6EoDY2R/A+1jE+iKI4BZtWCcyILJlxadLGMvRL91sso18T01TG4UN7ET7tPoP/0DX5XMUv3SiYROHHJ5vvK3zJEeJa8e32uk/cocmTnyHEdyLPxmu8kcmTnyL44jn+WUQjVeDE8jXxgdDyPkDJsdNj+aWWmpEOPHmHFoBoiBAfK0reQAkPGZ5k5rNBBBA1yVSsU3J3Y6HT/tRqM+ZubLuFzPeOiLRlwUFABRnIZ0AwwS0iHaMBgMShuXATwzK4E9Y9NxEoXY7/c2m81K6Skqa5XRdru1fr/vdXBE6GQysX6/7xuEcTIAltx5QIJoxG6383djYOQaVYJsIAROjELH3G9m9vr1a5vNZnZ7e+vkprbX6/U87QEgV9Lr9XpWrVbt69ev1mg07NWrV55bPRwO7e7uzuUCyRDd0+gypzKpreHsADG52OrYEAl2S0S1Uqn4yUJKohr1rVar/o4LooJv3761yWRS2gBMpFojNvRzuVxav9/3jb/D4dA+fPjgBMqLMrfbrf8NgWl0HGDFDprNpk9ems2mffv2zV+yOZ/Prd/ve+QO39PUHPYo0A8mPuv12gaDQemUsX6/7/pBn6RdqAywLU0NggCq1UNut75PhMicpkLoRBFf1Ykcfowv64QIUlKgR+eKTfzgL7pfQSPp8XrFgkgQMTKIDdEO5KrYQX1q0/qZRqBzeVrJHHnkSH/AupdN5sjMkU/hyHrjfh9eJXNk5siXyZHnyrOscCn4awSF7zBQiMDsmHNtZv7iQRU0BgG4IDAVqBonURSN1ul1umzJNTEygWDZUHp5eVk6AhRlzGYzByGWc5XUaI+IIXu7iDJAhIyXtAEAleiEAiH9g7Q6nY6P2eyYdhHBa7/fe9409at8KJBCu932aCHtt1ot+/r1q7cF2BPBATj7/b5Hb7gfvRExQZ8aLQGguU/lSZ9wHMa22Ww8YsWG50qlYpPJxKbTqTUaDV++3263NhgMbDgc+nJ5s9m0d+/eWVEUTjq8HwPnR3fkL/d6PZfzxcVFybZ3u50fNzwYDDytYbPZ2Js3b+zr16/+rgmigUQH9/u9XV5e2nw+9+gS6R462TAzB83FYmFXV1eenqAbs3VjNL7Efeiz2+1ap9PxyNt6vfbrdZJGdBZdQXLohd/tdtt++uknj/xyelSv13Nb0Qkk5KikSZR3Op16nWaHaF2327Xb21vbbDY2HA5LAQ6irdVq1cbjcSnSxf2dTsfTRDabjZ9Khkzpg5ImslJSUPzAFukLtgHJMQFWAgWD9JQz2tNoqZIKkz+iuIppyE1XSZRMlDRoB3tVvFS8AB90jJT8wPXLSubIVWnVClzJHJk58ikciT/U6rXMkZkj/Z6XxJHnyrM8cNFxBBGX2vRpUzva6XScSBgIub8aXQI49akSgeDQZlZSuEb5lJy4DqWqAjQysVgsHCRZKqe/RVFYt9u18XjsS9eqYIyEfumTL/1hPKQAEN1T2SnQYfCMGYdW0Fcjp47pdOr5uEQjNDLG9ZvNYUMu71igrzjlYrFwB1QSrdfr1uv1bDgcWqVyyMEdDAYeKcEZNUJK1EofLDabjUeMut2u6wonoqBH7OLz58++AXi1WtlwOLR+v2/b7dajnkRDO52O1Wo1u76+9n71ej2Psulxr6RiQPhMRsbjsU0mExsMBnZxceGn50ynU2s2m/ab3/zGBoOBvw+F9IbLy0ur1+s2Ho9LKwXdbtcjhESssE2imZ1O5xDduweGfr9vo9HI9Y98m82mE6xObiBKokf7/d6ur6/t7u7Obm9v7e3btw5U2JjmbhOZ4zNNN8CWibhtNptSHrq+jwa9MTaNSo9GIzM7HiFNwfY0lQAS0pQg9GVmpXQY6mw0Gjafz30fhUZ7F4uFdTodB3P8ER9iwgDh6qRwOp26jPEl7kf/ELLqHR8Ax/ABsAOSZz8JE0fFBp1gayROcVbHgDzBGPxJ69SoXcQi/s7l6SVzZPNBP8B+s8yRmSMfx5FgemHH/V2ZIzNH4gcvgSPPlWdLKeRHB6HRHR0IT42kTTBQM/MlSIyeKJ8CH1Esoiss6ZI/qtEi/iZKQMHgEJw+xRKtoK8YDsY2HA7t5ubGVquVDQYDB3Sz49vBFUC1HwBErVbzjbb6FM+mQUCcfmkOq/Zb+7xer221WnnkjxUTTY/A+HgwxACJAiC3SqXiGzGJXNVqNU8N0fSSXq/nS9p8plFJojXoQN/0XqlUnIxZymZCYWYuK0CNyM7l5aXbGMfVbrdbm8/n9t1337nsybmGGMzMXr165RG0i4sL2+12JdLWaKxGg/Q3gAi4ElECcH/729+6PCAQfc+HHqyCrWPPRHuZYEDYi8XCvn796lHO+Xxul5eXNplMfFyMWfcMcPISBD4ajazVatlkMrHZbGadTsd6vZ798MMP3oe7uzv3ZfUhfBDgZnP2aDTyfmnEfTab+SZtjUgr2QHQtNnv9223O+xhAVir1arXM5lM/H7y6km1MDtE05iAkqrDiVzL5dInELvdzq6vr0uTLXyPseMH+Jvmq6M77AFZ4cM6eQJvNMqmoK6TUY3w4Uv0h2v5XDEXXCXSrZFDMyutQihJMDbGpNgHxtOW4mQujyuZI4Uji/KBH5kjM0c+hSP3O3knV62WOTJz5IvjyHNByWd54FJh0KhG0TRKBkDx9M3SNJEJcm0VUKiP6L+ZuQJxSlUQm/N48tcnV340Egbwo2zAnad1TqsxOyzNslmRDZaMWxXI3xqtA2Ax8nq97qffkKJAfYAw40IOajBsgiR/WI8gnc1mdnNz4xGd8XjsgIA+er2ebbdbj04SOaFtQJyTnOjXdDq1f/zHf7TZbGbfvn1zkiDnGb2ytEs9lUrFN5qScsBkgo2P/X7fSQPCQ57IgHFDHKQMAAK8CwQAph5+SAd49epV6TvA3uy4hF6tVh3U9LhVolV6vZmVIn77/d4jvJBor9ezyWRi9frxBCI29wKKCmK8fHA6nXqUj3bMDkB8eXnpBEjEikgQk6Fq9ZCTfXt7a71ez/74xz/acDi09+/fO+BjN9vt1qOFTGLoE7ntKb2S30206vb21uvA7ni/TVwV5r0j+OtqtfL0lNlsZm/evPFJAeAK4bbbbfcxcuXREUTZ6/Xs9vbW5dDpdGw0GtnHjx/t6urK04UgTjNz/wfUmdSiE7CCCYYGXbBVfFdJo1qtOqZgP7rhnnaUfChglkbW8X10oRN57FDJTiN8ZuUNxJEsqFNxPpenlcyRx3GbcJlGhzNHZo58DEfW6vd2Wq04T2SOzBz5kjjyXHmWBy6cPTYMKEPS+qRLpARHwTCoB+fmAYbBAp71+vHt0gAMhowgWTqOGyt5qo5LgxiI5ssS4WKJnHs4hQglojja5n4X9L0c9IQb8liXy6XNZjN/O7r2hxPrAHd1bsagUZ7lcml/+tOf7Orqyi4vL83sGK2az+fuXMisUqnYly9fSiczERl6+/atk0y/33cHJ2//7u7ONwJjqM1ms/R+i1rtkKsMAP3d3/2dbbflDafYC/JCv4vFohRVYxJQqVQ8EnN1deV2hK6ZmJATjl2Ro4/M+BuZEpkholqpVOzq6sqjigAjzqv2SanX6x5FAoh2u0PuOkvr1WrVo160v91uXW5EEyGa1WplP/30kwMCJLfdbm00Gtnbt2/t06dP3k+NkCMz/t9ut/b69evS5KAoChuPx7ZcLu3y8tLu7u6s0Tic9sUYaZNIOZOvarXqef1s/oWo+RziJ/JLnri+BBLZ4Z/9ft9TUYhO6uQUnwLYsSXkwjtsAN3JZOIRRvwEQl8sFo4r6Lfb7brtICNkRl4+KRpgFXsbFNDNjies4SPYDBMAnXhyPTavugPz0KMSFbrU1QsmSnp9jNDRXw0Q6XWQPT6jJJTL40rmyCNHru4nu9vdNnNk5sgnc6Tdw89umzkyc+TL5Mhz5Vc/cNGwkoE+vWrHddkNRSkocswsxssRlxxN2W63/ekdwSlZYGAIgUgL/QI8zY4b3Yh4EBFaLpceSbu7u/OIG8YEIGu6BU6sEUQz8yhIp9Oxb9++ORmQx8sLADGg7XZbimjpEiUyJiLDJliictVq1b59+2aj0cgGg4EVRWFXV1dWrR5eRjgcDu3jx49u8Lrc/erVq2Nu9D3JFEXhubxspiQ9oNVq+eZMgLvf7/vSPJE3yPHy8rJE9Eri5N+yoZg3g+OoZubHw0K43NNoNGw8HnvUiWVxbIk9UURM1YnJ1765ubHdbufgtd/vnYRYtiedAYLmJCvNpa7Vap7njnOTqqKRJR6oIEP6ROS6UjmkjSh5spF4Mpk4UVcqFT9NCfC+vb21/X7vpKfL7SqL//qv//Kc7fF47CmMRPCI9jH5wSc1ckYaAhM69gcwydITuvBd7LharZYitkR0e72eH1G8Xq/d/29vb32vyGKxsHq97idt6d4VSP7bt2+2Wq18EmVmvn8AQgdgu92uRws1pYaJRoyAcS+f4ZeMjRQwAFi/o06ic9giaSUatdMoPaSC3OkDk1kz80kt/gXZg8vsJcA++YkYg1/GVQMmJvmB6+klc+SRI/dEie34wJk5MnPk4znyYK/z+dzMLHNk5sgXx5Hnyq9+4KJzGBrRGIRJB5UAACycDSES7UPoRLi4f7PZ+HsLADyeNlEcRszgUZoafrwXoNRIAoogeqb9jcAOILCRlQkzUT+AUZfxIaZv3755rjXLtLyLQSNcECfkx/hIZWg2m/bmzRu7urpyh8Thbm5ubDgc2u9+9zv7+vWrt8N9pG4QVWPTMwWQ10gsMkAObIBlP8Jut7PhcOjXsrTLuNGJkiuGj56U8JCamhEFAAAgAElEQVTHZDLxY1aLorDXr1/7/YDDx48fvS9sDuZdLbPZzCNKm83hPRuz2cwjkXp6DiDDQxAOySSl0WhYt9u1fr9v3759cxv74YcffJLDplciptgX+dH8j33zHe+6+e6772y5XNpoNCodoQx4Q1rdbtffl6H6YTm/2Wza9fW1ffz40Sdvm83GU0bu7u4cwPANIln0TyPv2Dy6Jjee+1arlW8EhjyYpEFImorEBAc7mM1mrkcmc5ycxf9MdCaTiddJ1Lzb7Xo/mcRUKhWXIfbFxFUjxOiaaDbvLdEIOTKlT9g4RKL4qJFSM3MZbDaHzdO3t7fuj2blY635nwk4EzgmpkTUIAfSL7AT6lW806gc1yI7tR3GoisyOrZcHlcyRx45kskQ32eOzBz5FI4s7mXbbrdt83mTOTJzpP//UjjyXPnVD1yankDnUDSD0ygeoM/GvuFw6IJRUmIJmSVFBIlBIAyif0SVdrudb8BsNBqe/81TPvWQt1sUhR8BulwufUmX5clut+skiQNhHCicXHvytAE+HIEUCZxtNBrZq1evrFI55HpzUhHRLJaL2QiJ0ROBqVQqnmML+DKWzeZwMtPV1VUpRQCS/e677+zu7s7TI1ju5ojSRuPwwsEffvjBx7VYLDyKQF5+JACMVSN9AFC323UZ9Ho9P/ocPeOcSk7YDWkatMfkoigOaQ2DwcCPuf348aNHbaifJfDb21uXGZE9dDccDm0+n9vnz59tuVzad999Z9Pp1KOkyIZ+XV5eOhgsl0uPqH39+tUjT4PBwNbrtX3+/Nkdlqji5eWl2yXgBChMp1OXcafTsbdv3/qGVU4LGgwG1ul0PMWBnPI3b97Yx48fHSixjVqt5vZweXlZmmQRLSUyzniwQ7MDULHpWiPzTKR2u50fdYzeP3365Kk0jUajlAZVq9VcTtgfUT6AiwkmZAMYMyHBR3kxJXjB5JCJBRMk3kuzWCxcr91u1/0PHeETRCyJ3seJqaZpgAWk9jBhVZ0uFgvP+dc9GpCfrnIS4dQInK5SKLYic3yQd7vQp1TKjJn5d0z48VWIn++RAeUxpJJLuWSOLHOk2XHl5r+XI5cviiPt13DkfQrhYzlyb39dHLlYLq1WrVqr2fJrM0dmjnyJHHmuPMsDF1EgGtbIneY/6rIjIN5oNHx5GOciFxsyUULSPHT9jFxa3q+AgNgEC9BPp1OPjkEYGkHUSKKZ2fX1tZmZzWYzX1r9/PmzO+DFxYV9+fLFzMzb0dzTWq1WymPudDoO0IyVJ+/FYmHNZrOUQoDRtFot30wKuOIUPGFryg/pDLQ5Ho/txx9/tDdv3vhyOhEJ0khwxu126ycK8XJJjJFxomPN0zU75uCaHaMA5HKzqZX6WOpFTvSd+pDZdDq10Wjk7z+BPEktgRzfvXtn4/HY88s5PQlb+Pjxo0dP6QORL7PDUvTFxYW9evXKRqORp56wL6HT6ZiZ2d3dnRMbJxkBQGxs3e12dnV15ekGrFrhE9gsusP+GXfclDocDu3i4sI+fPjg0VI2tQLy79+/t83meHwudQPe2+3hiN9Pnz7Zer226+trjwxtt1snBAURAJ3UESKf+KfZIbo6Go18gzwTlYuLC9d5v9/3d79AADc3N6XIL+k1vV7Pvv/+eyeV8XjsugHkNXrd7XZLRMdLWcEPNoWDV/RxNBo5+WDf+LBGWrFRjYQiF8gREhgMBjYajUpAT6oIbUOERDaZqOAzGk0jjYOJKxNG8JNJuEZUiZ7GfSIaLVZ/I1iitoiMkRsRx/zA9fSSOfLIkUywfvwf/8t+/B//6/9vVeTy31X+j0dc8/r5mqvf27JZ5sjMkS+LI8+VZ9nDpcCuT/Wa60mneWJl+VyfXBuNhk9q2XBaq9V8GVqXDTWvnDaJJpE/Tmk2m25A5G4TqcOwK5WKH9tqdkwLxGn2+70fD8ob12u1mudLIwcepNjwyfhZ/bq5ubHXr1+XJubIZzAYuEFwOhKOzRM/4yK3/f3797bf7+3Lly+eOsHyrhoibzLnwQpwwDh5QEE3RAKIvnG06MXFhRuxpoeQ40w/iXgQ8WI5n5xs+snDBZG/xWLhUR2Wzc0OD5+vXr0yM7PhcGjb7db+8z//0/7whz/4caY40NevX+39+/f25csXz42nDgDy9vbWLi8vrdfrudxIqSCXmZN5bm9vrdlslt43Q858vV63q6sru729dZl8+PDBut2uff361U9ZYowAO3bNwzUpqeyLINKMnfb7ffvy5YtHQoku9no9tzcil0Qq8RezY/48+dlm5hE6XqiJfRME0NRYdMy7ZczMX9aMzb97984+fPhg4/HYjzUGI8yOOdTz+dzli9wgncFgYJvNxv16t9t5O0T4mDhymhttKD7QL1KMmGgia1JuFouFjUYj6/f7nupRFIWnopBKRLSNPQikZvH+NCalBCD4gZQVjME4ItJ8hy+AI6TFaGqDEgB2bWY+WSfqZnZ80aemqDE28Ig6dB8FuKARe/BZ28zlcSVz5JEj59WqFdu1mfHgvrfCivuzEOL+wMNOr33p88LOPvIX5aPg/eoCXdz/WRybO9Qv9fJdqqF9uMZS1xUPqnh4W/wkXlnqve3v5RS7UiT+fnDNfRP7cOWD2AlfpcZo8lnieq/1VJ2pjp265LxIzPZmtUrhwezMkZkjXxJHnivP8uJjOocyeGpliZhB6ESaSTmgr4pCyQiMp1wiEpALRKQrKkVRuMEhEAyBt6qThoCgeQhgky1L+yxvsmkUo7q+vnanQGFm5u926Pf7vsRMagFRhvF4bJ8/f7br62tPEdCjPNnkyak1jJmVsS9fvvjqGSknTPp/+OEHu7q6souLCxuPx7bZbGw0GrlR6xJxt9v1ZWUz84ehzWbjOeS8iLBaPb7xnIesVqtVyhunz/yvhthoNDzlgNN0eNjiCGGW6DH29XrtUQ/ARVfgSGXhfRgXFxc2mUz8IWaxWNjbt2/9oQ3iZ2n86urK02qK4pBbzAZfQJQoGBMMHo6RJxOUP/3pT76hXDdzE2W5vLz0Fb7BYGCfP392uSIr7ifFUFNgsHd0w3WAT6vV8gdm3m1yd3fnttlsNv29OEzg0BMbnC8uLvxkJMaGf5H6o5t1V6uV73XYbrf2u9/9zk9hQreswpL6c3197bn2pK4MBoPSSUTk4ROR40H47u7OZrOZDQYDJy/SebA1bIRABxHQjx8/ur8D/Ho6E34MyFcqFV+R3G63boekfWCDugH98+fPnj6hq5qkUGi6GFFHTelBj6PRyBqNhkc7wQ4IlAd06oDY+F/THcyOpyzpJJ+/dZIPbiuhcK1Gc/MK1y8rmSMP9t/7f/8v+5//+n/bxcVFkiOr1cN7nfb7w4tnm82m3d7e+j4QDo7QqD2Y3ul0rNls2ocPH3yS+fr1a38PWa1Wsx9++MF6vZ5zJPu4wJKf48jRaGSbzcbbBouZENZqh6Pc2XulHEmmymaz8Yl8rVZzXez3e9efmfnq4H6/d45EZ3AT2ESmChkg8NTXr1/t97//vX38+NHTJ7GJ9+/fux3yYmLlyHa7XeJIAo1gxHq9do7/9u2btdtte/Pmja+WYFv8VnzkOHTmGhzIMZ1O7c2bN/b582fPIPn69avbHvZ3s7mx/f3qb+bIzJEviSPPlWd54KpWq346DhNmBWs6RTQMwEYo+oTJ8ikrK7r5U5dviRRSdDkSR8bAAAZyrwEDjIt6cR5ditXVCfo1n8/t1atX9uHDByuKwhVPFEXTMPhRg8CgWXHi4YnlTfLOSb9ATqvVyvOeuQaQIg+41+vZ169fPS/91atXNplMHCAwIsYMWQM6nNxD1IOoKECLQ7FpWCOxSvyQQKfTsfV67SROvnm/37fPnz/7EjiEjrzZmKsTE0Di8vLSIz3NZtMGg4GDFps5v337ZsPh0HOkzcydUceOjnnhMPnQmrMMwavcGDendpkdjhb++vWrR8Qmk4lH1zhRiNQe0hAhOCJT2DLph/QBgm21Wvb582fb7Q4nJiETcrF56aOmKmFf+M5ut/NN6fyPn7VaLX+I1QdmwJAIDuPDpyeTid3e3lqtVrPxeOx10Qc9zUtPDQJ4OSoX/8D2WWHsdDoeBAAgNWIFCBJlNCtveN/tdr6hH19CfnqyG3WhB/pAAWQhS3ACWXENxMbeAqJq+Ab6YfKsuMkPKWjUybX4CrhJUbyjfv7WfQWQoUZmlWwo9BufR8a5PL1kjswcmTkyc2TmyL9+jjwXlHy2B65KpeL55bpBjU4TBZhMJqW9SQDwaDRyIXJSUavVcqcCuBiw5pRj0GYHwOj3+/bjjz/6ZygNwfK0T3uAA3UTTeGpGME2m8f3Z7TbbRsOh54GBxHxRM7qDY6tGxaJKA0GAzMzfxM60QjeFM+SMH0gEsRKmJI0UZSbmxsrisJTRiAYyJuNqDp5Qkc4p9lxkzOkst/vPeJ2e3tr7XbbJpOJrddrfxEj4yLiQV2r1cojTqTTcQoToEakgqNzSV8BiLAxHB5512o1Gw6HNhqNXN/IQwGbCMx+v/fThjiRSeXa7XZ9GZmN4qyyYQfYC85MW6vVyn772996tHcwGPgpQrvdYb/bhw8fzMxsPB47SDPxgeh1/wQTHyJt+/3eBoOBywp9mpmNRiObTCald8MAnOzbIAWgUqnY+/fvPTrJkr4CECt4RPvQt9kxwmVm7ofVatUnC+Sh39zcuI2xKVYnmI1GwyORyIR8bWwI+7+6unI/NDumDKg96yoMGERbpMWiO+QOmdMvndRALGAboE4Uj1UGXUFA7kxUkJniDlE5CAPZQCpMzCDv7XZbSuFiIoCtQ2xcS791AsRYwQx8CdJAZugSTNS0NMaSy9NK5sjMkZkjM0dmjvzr58hz5fyh8Y8sAL+SCEACiCBMOsV1k8nEB6QpEESRUB7fozD+16f88Xjs93C06X6/9xf/cZ8aHsJmmZ80MAVbjB1DWSwWDhyqjFgfho3R8vJKooson2gVKQ0KIkVR2HA4tG63a41Gw6N8ACkRGTU46gRIOBVJIzHk1hKF0ONEOUEJQgTciTBxYhTRq9lsZjc3N7ZeH452ZaOqmTmwEAFjYyrpDwAYoAKJkGoRr9e9cWbHF/0RveI39kXKCOCkZK8v7sOOSI/BkbALdeBms+k6nEwm9uXLF7u7u7P/+I//KOU1s5RuZr7HjP0RkCipGaRlYhdEvswOm9jpz253yNe+uroq2Qt2O51OPfKGLJlMEPGt1Wr2+vVre/funb1+/dpJTE9LSoEmxElkVqNCl5eX9urVK+t2u94/NgiDBxz2wsQDPwF4sTF+aO/q6srevn3rQAvRsZeQqBg/SrCk/ZCORDoRkxaNGJKOontbNILOJA05Ine1Q1IQwA7aQKbgDvbJZIw9AuAM0TlkYWY+uaNNbBmfh1zBAn4gMXBYo7b0SVct1LfAPr0+l6eXzJGZIzNHZo7MHPnXz5G6AhbLsxyaQQ40HeDJEzBEMBgRIKeGxJIhudkYHifEoQAA1cz8M+rp9Xp+OAOn62Fol5eXVq0eXuKHQ6FglKgRGV1CBIhbrZZvdmRZXCNzOjYcgXEDPjghhMdeLXJ6WU7HkJTI+JvIHZtFAUYIB+NgCbzRaJSO2SR6imFhbGbmaQq8M0MjWkQlMEaW9TH229tb32CN/NEdkQecU52egy2ItkFgHB5xeXnpkR4di9khWkvUhzFARpBjo9HwqA0TgW63a+v18ZhlPgMQlKSZoBAl4nMAqNFo2O9+9zv78OGDR54B3+FwaEVR+EmIRIEqlUppzx9yIloNSNImEUKi4xH0Go2GpykQ4YPIdQkdO57NZvbHP/7Rc8DxK1KUsCPIgz4Q0ePloNglvjedTv09N+gPPySViDERhaMefZdHURS+Lw/CJ3o5n8/9OjCAqBapFsiIMSADJpfsJcR++v2+VSoV/xwf2mwOp2myURk8u7i48IgxusLeAHeNJuokWiNlTETBsKIoSgfOcB0Yo3gKUeDXkLwWcIeonGKz1gNO0kdwUUmePPpcnlYyR2aOzByZOTJz5N8GR55b5XqWlEI6qZsU6awuX+rAeVrUlAqz4xGf5LAjQOrRZV3uNzN3CI675IWC2+1ho+SrV69KT8YYm5l5vSwxs5zPNfSPtslz/vHHH+3du3fuaDxNkyfKtTg6S59slOT4XQiMI0eRJb9x7qI45MJ3Oh379OmT1ev10pvb37x548v/GiEh2qURN6KJy+XSc8QhQp7aAS5I9u3bt755ErkRrWKDcVEU9u3bN19Ox1DJeeZvctQhLzPzDaeAlpl5+0VR2Hg89nzu7Xbr0TGIisgGmz1JSen3+07kuryuQEAqB5uE6QP2xVI7h4YAeCx712o1f4Hk9fW1/fGPf/R6Npvj5lAicQos2LhGY8lBxg4gP2wTMNWJDxvKd7udR2wZJ3ZmdiB9yOzVq1cOjJy8ZGYexWMJnzQGnXgT0UEekMz19bXbOmPiPkCbCYeSbLV6PFaWiOZwOLR+v+8pKOqvTCABZQ5bMTtG04h46eQDLGJcyIxcdTbWYgO6oR9fI3KKfaMLM/MJh05+8CEiy9Vq1SdipA4xJrU7xsP3iqcUtWXGpVjKeBmDYjF9wYbAclYdsEuIGYLL5Wklc2TmyMyRmSMzR/71c+S5Fa5nSSlU5bCkpsuGOAwdQbFmViIKJRSigTi7CgCBQ1pmx6ghigbYtW8AO9EAje7oUy51Y6goG2AjqsgTOxETPY0QMuHJVzcED4dDj0gRTSLix0sAASKUTR4/wMiYcUo2umrKB44B0RJN4xrdwEiEgzYvLy89kvPp0ycf66dPn5yQkVe1WnUSBvh2u52NRiMriqK0KZYNvKQTMLFQECMCiH53u53d3Nz4Ua46Bk6NAuCwI/QKAOjGUJ0kcMQr0Sp0TYSUNA1stFKpWLfb9Q2sTIbu7u4cGNAL0Y7dbucnb5FCgI0hf7VxtWGIG1lRt066OLHo9evXflIkciDVgckLpDqfz63X65WiVrqsDhi3Wi0/IRMCRN/4V6/Xs7u7O7u5uXE5EGnklC9sGB83M58M0Q+IHxsnAsskqlKp+BjMrORfEDBACHYAonoPIE4UE1JS+eKDRNZ2u13JjnWTvaY/gCvUUa1WHQ/MjtFy9AYW8coC+kz71K+2oZMRcAk71Kickh8+gK/pyoqSExjLmBWnwbhcflnJHJk5MnNk5sjMkX/dHHnugetZVrjMzJ1BB61/6xMi1wNi8emap8xG4/ieIp5cNaoDKKAoAA2Hp/79/nCU5rt37zwKQu5mo9EovTsBobMUznI9dREhoH0iGnd3d94vjfIBMPX64ZhNPTIdGWmajkYjIT+uJ6JCtEojAkSSAFDahxy4j/Hr+xem06kNh0M/2pQlewCXjbE3Nzeed45xATQ6GatUKv6OCRwZ4ACgmHSQikF/2AjNCU8Q4+3trc3nc08LmU6ndnNzY1dXV1YUhX358sWGw6Ht93uP8kHWpAqQWz8cDj2dBJ3x/gkcjnQAoo30w8z8BC3SWiqVikc1GSfpB5A6gAdwmB33E2Dr2C37BZDrarXysbOUzpGxmkM/HA6d0LHXyWTixMFekHfv3tlkMvHIJ+9fw+bxOSYq2JimJpiZ7y9Al7Vaza6urmy5XNp0OrXLy8vS5ItoFptokXelckzBwSdIo0FvCnq6wRwb1f5xdPDHjx99MqqTXXTGZEiJGaCHmLBn/J0JGLbD5ADiVOxiIoC/QPDoSP/WqB7ta1/BUMVRSAL8AAcYh07O6SM2SNGJnP5PFI96ICadDOTytJI5MnNk5sjMkZkj/7o5Mq6uafnV7ImA9OmOJ0dvRJ6G9SlTB25m7nwsaTI4JSF+a1SQ+iECQIoX36HwzWZjvV7PnYe8Y5bKAQLGMp/PPbpAlA0hm5lHFiBG+o1xkHLAE7+Z+d+Ax3K59NOSttutDQYDX24lFYE66TObUpENxLTf7+39+/f+Fm+ioBAchILMAfJarebvNgAYiuJ4EgxLyj/99JODCdEpImY4Ju+ighiKovD3Rrx7985Go5HnzXN6FdcDBgA3dU+nU3/xHqkWRBcARMgIWZKWQB9JEYGEiWrtdjtPCYA89vu9HyOMfFQXkCPXb7dbPyUKWyWCQ/oJ7wFBVwAr48Zpa7WaAxnRzs3m8MJsPZkJm6DNm5sbq9frdn19bRcXF3Z3d+d2vFgs/D0n9BsQg1QBDPSoPsZRvUTI9F58Bt2hGyJcRXHMzVZwZIJCW+iR77EN7SOTRY2yKxgrqHc6HZcBmAE4YlcKxJoigX4pCsykURFBw59pB33wPfZB9BD5Uw8TK7ALf2bywSQbW+c76ke2+/2+dEQuY0a+TESRE31hfPymbl1pURLJD1xPL5kjM0dmjswcmTnyb4Mjzz1w/eqUQl2WY1B8ztKrCp8OoggGygDY7MkJNCgEgOEJVkEdZ+Rlb0QNOGWFVAEcuigKf98FS5n8EHnA8ZQkJ5OJRx4gCU4xioLWZU7eeaAbhVmSNjM/tYZlY31yJoJodozo0a/4RA4wcTINhomzcRoPBs2LBXmBHqc7EXWMusXgITImEcgDIICUcAYlMIipKAqPIk6nU79HAZgc61rtkPv95s0bJ9SLiwtr/m/23qVHjiy5/jweHu/3OzKZmSSrit0FqSFo0asRtNH30qIH0KeYb6PVbCRADQitRlU1ySIzM94e73eE+yxCP8sbVIn/ooqaRdEdKDSbzIxwv9fMjvm5x8wyGVunSqVie1Aul0377Dod0gjsinvkiJrAhWQCMJJknZxgcABFvt/t5sT9M+8D+YXrkNgGdQRcy+XS2j+7jDUMM98LA91sNi/kQfwuWv9arWascS6Xs8GmnuepXq+r3W5fdHtKJpOWQOVyORv6jAyD5AJ/df/zfV+j0UiSrCgYX8PnSeBYi81mo9VqZYmk53nWJelwOM+mcY/u2U8AhUQBAHH9jrbV7ukB8geXgef5iSP4Fc/IQFV8kT0FMNlv7AltOp2fWDc3ccD38VHsjfoHEjr3VMSNC+wDa+qCiasldyUk7ve6yTgggk3x3R/GWtdu4+vnXzFGxhgZY2SMkTFGfhkY6Z6G/Rcs+G//5Wde7uK6b62uBIKf4ecIQqfT6WIjOIJ29dM4HobD57AoOCIbgjESVK6urpROPw3R4/K8cwFlvV63N35+jqNqfs7zPBtYx/dhaLA6bIYbvIrF4kWAd1k/mCNaoPJ5HFmzee5xMGuGxIHZGLyRFwoFjUYjTadT+z2OPwGTVqtlIE+ggn1y5SUEkfV6rfl8riiKNJ1OdTqdFASB+v2+zZyIoshYRQIFQfrh4cGmuyOdoAsQjgBLBeBjxABZMplUp9NRIpGw1rzsF7+TTqdN8gCjBBBJT7MWPmSZYYNg/Ha7nSUagC2spPTE8hDosXH+LplMWlH6arWytZ9OpyZ/cFmdMAztWfAH7s9lU9lz1ot7wuaZVzOfzzWbzWzgZRiGarVaFrTwR1cWAfuGfwHEJFs8C4kTdRb8O+sYhqF9Fv4Jo03CgR0TnEkoCFhIIQA1hlmSFJHsuOwUsiXiiCu9QLvvMunED9htfJa4RDzBRmhxG0XRhf7f9S+3IJjPgDF1k0/3HlxGk9/F7tlzwBL/cGMnv4PvbTYbAwSS7w/Z/zAMLxIY1svV4WN/bvLjMtvx9WlXjJExRsYYGWNkjJFfBkZ+7PosbeGlpzkc7pv2hzfDW6LLTOGk7sPBlOFw7pu0718Wz7GxBIkPNaEMH9xsNrq/v1ej0ZD0NA0cfbD0xETCKqIBhkGgow3Huq7BuiCZzWbVaDTkeZ51ROKeMXwCCnMvZrOZMRtuMGUNYQ3ZaNaK+8Zpfd9Xt9uVJOvWBAjC0FFwCWAcj0f7WUAXJ16v16rValoulwqCQJ73NBfG8zx7NhyL+8AODoeDGo2GMbhMup9MJhfdkaIosrkZyElWq5WWy6UFEeyG4EJXo+12q3K5bGC+XC5VLpcVRdEFw0gg3Gw2tu9ukTB/T/thpBbT6dQcE6kN+wnrRHAAPBKJhNrttjk6n5tIJAxs2HtJNuHd930r8KZOIpFImK5eOjN8FLA+PDyYbyWTSU0mEy0WC6VS54L28XisWq12kZDAADOHh3vn3gAWN4jAygJk+/3+Yqij5507ZLH3FEDTsprPYbBrrVazmAEozmYzkznk83k1Gg1LNPD9w+GgarVqSRd25spTACliDLbEcwKyJBcwXMQZpA0uwwhAAZ7UgnBvDGKVZDUGsNbEB5JKPp97AARI8mDk3ToU/Nz1excgeG6SW/fkxJVTYZ8/ddIAuLksnctqkgzH16ddMUbGGBljZIyRMUbGGPlZuhR+CBIfsqEYMpvPTfPvrsNjeFEUmbGh/YUdco/x+H0+bzKZ6Hg82hu3JHuj5WgeJoAWnSwabFCxWNRqtTJWjyJUghGFf2EYWoEtgRDmyPefZmjApGEkBMvj8aj1eq3dbmeda9BuM5wwkUioXC6bRhhj4+ifgJdOpzUajZROp9VoNFQsFi/2BgbjdDpdHA/zjI1GwxhGZBmr1Uqed571EASBMTnZbFblclnPnj2ztrMEHOyAQNlut9VqtYwRxcBLpZIxpVdXV2YDLjvl6uOlc9CqVCpmE659kWTg3NgG7B7ORuBjP5LJ8/BIgjd2mEwm9fj4aEGR4OoWomI3/B6SBhgeEoQoigwg5vO5BVWO1nkOipDT6acBlkhCCOYAXyKR0GKxMA04SQrdq+7v7y+OuFutlv0OmvtCoWCF7Mgr8BcGpAIgJFTu+khSs9k0trZSqVgwm8/nZm/YhZtwAX5IP/CHTCZj68Jk+Z9im9g7WDtiDiBK0jGZTNTtdjWbzYx9Ph6Pms1m9mckKOv12mIYfybYA6y0UfY8T9Vq1ZLM6+trNZtN1Wo1kySNx2NLNlzmFFum3sCNge6JB8/kBnr2ivbEblx1mUtiJXUx+Nj9/scAACAASURBVAmxz0362Qv3hQAf4vc/BKD4+rQrxsgYI2OMjDEyxsgvGyN/8QuX53n/RbfpHrURXHjzc49/MTYeCMPj2BRGA8Pg3/gcFoTP52f7/b6xgsyNyOfzNmeE4C7JJtTDlHEUXyqVTDM+mUwURZFtfjJ57kzT6XRUrVZtLWjLSVCUZEEc+QTfwRs1nVY42s9ms6rVahaQMDqKawFDPuN0OqlSqVhb2jAMTedOy16cC4dFD3w6naxlLJ2X+BnYMpKC2WymZrOpZDKpdrutZDKpZrOpr7/+2oAW9qNQKKjZbJochc8EYHe783yOcrms8Xis8XhsNuGyTLAb0+nU2BKCOkzb4XDQixcvTLsNC7Ldbi+O2kks3I5HrryDble5XM6AkI5OJC8UqLrH8slkUsVi0eyGgaTYEI7KsEDXQVer1UWHKDchw2ZYA7doGkZxvV4bsMAmY4d8TyKRsFk2o9HI9gGWCtnMdrs1sIZRJNEB2Pv9vhaLhbU93u/3Gg6HGgwGtjfYKcGRPYIdlJ6KVKMosmGQsHHMeNntdjbQkfWma9RqtTIbJ7mSnlhsWDISQ3yfBJKfo66DZJS6jXw+r0KhoHK5rEKhYHp5N5nyfd+Y7uPxqJubGxUKBV1fXysMw4uZLcQcN2YSL7E/6akdLnbMxc9++Lv4UbFYNCkOiS/Pst/vtVwujcX7ULaGbyB5cWUR7qkL/x9QjK+ff8UYGWNkjJExRsYY+WVg5Mdeuj4begIMrrGyQWwWLAz/jl4ZQ3b1r8woIPDzsCwmD4VT8O+pVMo69mBoMB7H49GYuQ9BhcBdrVatc1MulzMWz/M8jcdjc3IMlGN/JoYjqfjwaBag4PjbZRtcJtNla4rFohkNGw8bQIEnAZx7Oh6P1r4XJgdD47gZFkuSFUDDRqbTaXU6HXMqWtx2Oh3VajX96U9/UrVaVb1e1+FwMJbpdDpZQC0WizoejzbwEm02Gtp8Pm9OfX19rVwup8lkYgwaz8nRPS1Ms9msVquVKpWKAYXneWZjbqJxOBysPgBAkZ66J7ka+XK5bAEpl8tZUS17CZMDW0NNBTbFs7gyFkCVBIBhh2iMsV0SEDcoEkhoq+oWlbZaLc1mM9Nx83zoo/G/fD6vyWRiazAej61QfLPZ2IwcmPHj8aharWYyCGocoihSqVRSNpu1ZAoZy83NjQV86Zy4tVot05673bhcfyDQucyQy0ifTieTl5DUZDIZ6xIGsMP6FQoFY3P5XPYI6Us+n9fhcLBif5fJhCVl7Uk2+E4YUt/3rbMbQTwMQ/X7fd3f31s9A/UB6/XaZBQusGA7PDuATfAvlUpWTI7/w+yTcH0oV8JWsQHWmJgp6aLVtHsvJAYAPn4EqLj7RCIQX59+xRgZY2SMkTFGxhj568bIj12fTVLofhEGzmLwM4ALG8MDe55ng9ZcVg5jOZ1OJmvgwWFF+Gw0qQRPGKper6disWiyApgVQIujRe41kXhqTyrJOudkMhm9evVKzWbTQGE0Gmk4HBobANux3W6N4ZNkwY6j1MFgYG/KnU7HnhtpAiwSIDwcDjUajczROXZPJpNm/G6AzefzqtVqJhMhyLlH+xheIpHQcDhUt9s1LX86nVa73dbd3Z05wt3dnTkl69Jqtez70JrTBYf9IVi6Rg6wclwLMwKbVS6X7ei+3W6r3W5bR55KpWIMp+d5arfb2u3Os1Nevnx5oSHGOWhnjE4Y3TBdnGCH+fxaraZWq6V2u22diLBPbA6QAVzW67V9RxiehzjC8gCE3Ec6nb6QAABqy+VSk8nEwIuhmJvNxpg79rPRaOj6+lqtVkudTkfPnj2zOgBYPDpbVatVtdttZTIZK4LFx9gb1oVORjBY2Ml6vbaAUyqVVKvVjJ0tlUr2PC6rRNImyRhCWDXpqSMZ9wAD2Gw2lUgk9P79e7tffIS1QcP+m9/8Ru122zT4+D2yJPYJnX673TYbRNJCQGePYVIBG5dl9zzPEp90+tyJqlqtKggCDQYDPTw8mKae/SKuMHuG2AJoEdfcPeGzqSkhRvFnpF3YB8kwjDLrj3QMG8cWsUtkLIARa0OSLsmSOWpd4uvTrxgjY4yMMTLGyBgjf/0Y+f/LCReGJj1NtHdZNZwuis4FgLB73CAbyGLwlg7TxFs5RsuiwzrxvRiuK+FIJBKaTqfWChVmDi0tx7B8BozUarXS1dXVRUcmAiYDDiuVir09f1jcTJCpVCoqlUrGOFBAO5vNDNhgzhKJhBqNhgaDgQEK6wA7uNvtDCAbjYYFGGQXmcx5fgcsJIGAe3MBJgxD1Wo1OwavVqtaLpeqVqtaLBZ2LJxIJEybS+CkMBM2ZbPZWDch2MbJZKLdbqdqtap0Om2aataKe8PRjsfzPA0X/On2czqdTDrRaDSsuDidTuvh4UH1el2TycT2qt/vGxiwljjo+/fvjS2EWUE6kkwm9fz5c02nU33//fcql8va7/cKgsBAG+aXTkXIIZgoT1vZKIpsoCGsG3/nsiJhGBrrRXCDbaI49nQ6mSxmv9/r5ubGfAzNNGsoyX4H20Hakc2e2xwTZIbDodkhQQTAhw2G7SSB4b77/b5evHihbDarXq9nzPtqtVK9Xre4wPORVFGkD+ATF9iHZrOp5XKp169fq9PpWACGjfQ8T61WS7vdeSglAE2iByPNsEvskoJu4goaeZi+0+lkMhVJViuCD0gy1ng8HlsyiD9LsmDNoFQCOc9AEGdvqJlAQkVnLGoE6vW6DalFJkQCAJMP+wdrCngT/9xEnXjrdrPCPpAQSbqQuODPMMrx9elXjJExRsYYGWNkjJG/bowkbvzU9Vm6FHLci+SEQM/CYywc1RGwT6eTDZwrlUqaz+cXx55uwavLrG42G9twGDsWiP/cVrK5XE7VatUWjInkSBYo4kSeQKec9+/f6+rqSl999ZXevHmjIAjUbDZN895ut7Xf7zWbzezYnKP3w+Fgx+6wVqlUSvf392YcyWTShhXS6YnOPovFQs1mU5JsrdDTUhQLEzCbzezoXZK1osXBMAZABJ0uwdBl547Hc5Hyn//8ZwNw1mk4HJrevFKpmJQE+Qe6aBIIZpc0Gg0DbphO2nhyv+l02owfDTtsE4AC+5NMJjUYDCw4sR4EPPbXZQpxTgLparXSbDa7CBywMAR0CsbpFkQCgjwG1hJ5Ap2pYLwIZKlUymwMh0QzzBG5q8tH7099ButULpct+Dw8POj9+/dqt9sKgsBmh0jS1dWV+d5isbgAZtYnkUjoN7/5jd69e2c2tlwujT0GpEgMs9msqtWqBZvZbGZ7NB6PdXt7q1arpYeHB0nngLnZbKxI+eHhwToNudIqbJ7PZu9Ho5Ha7bZGo5Gxru76NJtNpVIp/fM//7MymYwVPHO5TBh7t9vtLiQfhUJBi8Xion6BdUOi4fu+SRLYV6Qmy+VSvV5PnU7HfI06CWy2XC5bcoS23QUWEnA3nuHn1EwghdrtdsbmMe+GxJrYCkC6HadceQQMJOBNAuu2NAZ8XCCS4sYZ/9MrxsgYI2OMjDEyxsgvAyM/dv3iFy6O3j9803MfAjYBI8K4uUlYIjShfCaBUJKxW4ANhoVz8vMwWAR0Nr1UKhnQofUloMG0oJPmTbhQKKjX65n8IIoia386nU5tcB3Ho2w+b9+dTsdaX65WKysyRtf97NkzO/qEVSAowgIBLjgHmlPWhgCKQXIvsCxXV1dmfMfj0VgTCjc971y8/OzZM7VaLW23W3333XdmYGjOE4mEvvrqK2M3arWaVquVSWAAEI7WYWIAUZe5pR0tR8+TyUTVatXmh/C/OBT3xRrBVuF0ruaW/XULWYMgsAAA89Rut81+UqmUMXfusXIYhmq327Y/mUzGClavrq7MuagfABRw0lQqpcViYSBAsMAWuRcCBECH45JEwES5pwuwqGi2+/2+ttutKpWKyuWyttutgiBQIpHQ27dvTQYEqxaGoWnLff+paDwIAgvcyWTS9up0Ohee48vVatXsE105do0vBkFg4EXAjqJIo9HI2CvWYTgcGhDD1nqeZxKjyWRibaQ5CfjLX/5ihcF0a8Im7u7uNJlMLEa9ffvW9qRYLGo6nZpUJplMajgcmvwD6ZX778iEsCt80vfPgyw7nY7u7+9t//iMzWZjtSSn00mTycRYVp6fE4RKpWKyDBIUkhokVPv9/iI5LRaL1g5bktkRvgDYRFFkw0qTyXPBPEX+xEPiLvEWOyAhjl+2/mdXjJExRsYYGWNkjJExRn62OVw4JiwEF2+Q/CxBhCBEgSpBgEBOUSkBUtJFgR0LJJ0DqlsQx9E6hXuz2czYOe6NhWaGRaFQ0GQyMcOmAwsa0EqlYmxOqVTS7373OwsiFA7yhs5mo+WFDSG4+r6vyWSier1u9wtLFASBrq6uLtaTNeLo1tU7Y6AcdwNuGDrGjEQlnU7bYEeCGABIRxlYQHTGdMfhXpgLgm4fNuhwOLd3peUn2tjRaGTPin4cxjGKIlWrVdN2wya4Om+AHeMuFovqdrsX81M48m6321qv11osFuYgBFcCb6PR0HK5NHBqNBryfV9BECidTpv+mlqD8Xhsx9CpVMqK1avVqkajkWmUsTlJFuAARrpqdbtd0wHD4A6HQ51OJ6spgDnBJ7bbrVKplEajkTGksK6wStlsVv1+X/v93maMNBoNTSYTZbNZTSYTffPNN9rtdur3+7q9vbWWrDwTnXxcRhEbxDfdfY+iSA8PD1YYm0ql9Pz5c3W7XbPh0WhkDDvrkcvlzJ7DMDTWjqSUzmEUf1cqFWNaKfh/eHjQdrvVV199Ze2DXSkWbOrj46N1W5LOiSvxoFarmRykXq+bpIKgTIzhmel6hd9FUWSs+2QyMRkIyQSDV4kJ6XTa9hz/PJ1Oxsy57YeJXbBxSCeIOQA2Ugu+h3oQulMhJcHv3CJvt3BaktVOwC66pyPslctyxtfPu2KMjDEyxsgYI2OM/DIw8mMvXZ/lhAtw4MsIJBgfm8FxufvWigwCVg99KQ9P4R0XAER3G4wY7SaFa2jBWUyOoHkD5nPdIEtBL6zecrk07fl8Prc3dooSX716ZcEDTTjFo4fDQUEQKIoi0wlToEmA2e/Pg9tqtZodq1YqFY3HY2Ni3I5NrjSBYsXlcmlFgrVazQwAMBsOh9rtdjbRG+Cg1e9oNLJ9QDsPkwYo04HneDyaNAGgASBIxD5kxFKplMrlsjl3uVy2v8f4WdswDO1zWZtsNmsBm8TB8zzV63Vrvdput3V1daXRaKRyuaxarab7+3tjb0gkcFAYIoLPdDrVzc2N3UMul7NCUZcJ5nfpdBWGoUksYJWz2fNATzTfjUbjwjZwVKbcJxIJjcdjSTKpCAwbQQaJCqBCLUCpVFK9XjfZDfUOyCt4bhgyJDg//PCD3r17p9vbW5vzwu9UKhVNp1MLogRhujJtt1vTnddqNb19+1bj8Vi/+93vzEbcRADWCXafxAxma7FYqFAo6MWLF/J9X999950Fr/1+r3q9rul0aq2l379/r2q1Ks/z1Ol0LOjDcHHfnAzALO73eytQRvJAong8Hq199Ww2swSTAmnYceweW0deVa/XrXjb931LNjiJQIpCTOQ/AAC5ECBFwCYOJhIJS47q9box5STPxF0Ai+Gr+DXaekDWbbPs7gusJHEUm0Q6g+Qkvj7tijEyxsgYI2OMjDHyy8DI/9UXLhg0l8WzD//P4/PT6WRvhrA8aNMJ5kgeuGn0o0gsACCOGVkoPgd2xZVusFjpdPpisjpvpjAUfA9acxgT3nrdAASADYdDhWGo58+fq9FoWAHuYDDQ4XAwPTzadd/39fLlS1UqFWOemGDOd0qyIAw7gPQEYJ7P5/ZdvLW7xa6wYYAzb/E4F4boslqz2cyCPm/4gGSz2dTV1ZX6/b6xXBgXQYzPjqLoYmYLCQPOR3cmAKNcLtu8CPTKOASSj2QyaYkeTg2rAvjBsMGuUuyK3bXb7YvEYL/fazwemwPyrDAuMPmAN8kRPyPJkoZKpaKrqyv7fgIMwISMAMZmuVxqt9vp5cuXFqyZdj+bzUzLfzqdDNATiXNBu1uMy14izygUCmq32xoMBgZy6Me5Dwqg/+qv/krv3783BpL7jqJIi8VCq9XKillJDGDNKFButVoaDAZqNpvy/XPxK4XQbgKAL8IYufIWgCWfz1ub2FarZUGebmcklgAMLDJscTabNX8hcZTOkgUkFhQ1Y48ESeIUdoUUiWSDxCadThsjCZtNzUWxWNT9/b3VefR6PUuwSG7DMDSfdU80iGX4OMwojDUyJ+Qj2CS/j6wjkUhoMpkY6MBop1IpS6KXy6WtEbUCJGkkhvwMvibJkmdJ8QvX/+CKMTLGyBgjY4yMMfLLwEh8/aeuz9KlkJvihgg0vEUTRDAkHNQFHoIIoMIbuu/79sbMGygPD9vBw1Noiu6WIMUGNJtNe9N2Nc3cB8Gbe+M7OdZkMznS3Ww2Gg6HqlararVams/nxkZks1nN5/MLnTUOi4PDagIK6IYBZjqp8FYvnQNysVg0QEY6gUQD3XgQBJJkR+kAKGAIyAF0aHDZA8/zTJOM9rfRaGg8HpvjUnDr6v9hHTHaSqViQe94PA+L7Pf7KpVKKhQK6vf7ur6+Vq/XM+YRllGSOaW7F7Ty5D64UqnUBTCzVm6w4bh7NptZwMjn8xqPx8pkMup0OhawYT2Qu1BYClPk+74xZtQvrNdrCyLUS6zXa9Mow5oQfPCXWq1myYrLKmGXJA3ZbNY076fTSff397q6ujLggG08Ho8KgsCSjEQiYYlCuVw2XbJ0WU+C3SwWC7Mfjt2TyaSePXtmz+BKAYIgMFDGN/A7twaEzmEMWkWnHoZPLXGxO6QAsIzYNwkDNoAkCYBxWTQCJwBDQEf7T5wCJJBMIZ8iqMMSUqvAOqNNxwYoVMc/kZ5QDE68Yt8lmWQCMCGZZX3ZA+lJN46v8HPFYtGGpEqyWMg+RlGkSqViBeLYCsl2GIb2vPgdPst3foy5i6+PXzFGxhgZY2SMkTFGftkY+YvncLk3w02zUAQvl8EhyLjFahSeAhBsAkV1BBECGIwfgRL2jXtx742F4ZjeLXIkUAFg2WzWAjObipHDpvBG3Gg0VCicp38ztJCAy3PR6pTNWK1W6na71vml3W4rm82q3W5bIEZbjIOjNWdTXWaLQArjgAHQZhZHx5EJbHwef4/GHA03yYD0dFzKbA+CKCwowMn9HQ4HC8I4AAwILBTOw3E5AwQJDqwztrRYLGyWC2wu3WKQMpCweJ5n3ZU8z7O2uNghOt9isXhROE2Ng+s8x+NR1WrVEoIwDE36QDIgSUEQWMFqGIZWdI1spVQqKYoiC7bJZNJmYSwWC9tzik7ZYyQOaPm32611qapUKmo2m1bbgO6YPWSdJVmRMAAEUwurhu8QfA+Hgw3z5DMymYwxwqfTyZIR5rCgiX7//r0ymYyq1arpxumsxjoDHplM5mI/ma+CvUm6aCd8PB4tIePeACP8gRMYNPx8J/vA0ElmiiCbGA6HBkT4HFpySQbQSBZgFbEht/DZHUgL+4/kCC0/AOEy3QRvEleXlYcNZp/dhG6326nb7WqxWFzUhrj1AMReCorxe5dFxK/53dPpaT6KG2vi69OuGCNjjIwxMsbIGCO/DIz82EvXL37h4tiawMVC4LwEIx6et0AWTpIxb+5R5GazsaNrAityC4IdwILzcbTM58/nc1WrVVuk+Xyut2/fWptSjJH7gymDxWMBmWOwWCysGLZQKFjrTXTcu93ughEARBuNhqQzE/L4+GgMVTKZVKfTsSNntMU4GYENYwVkAMH9fm+AgKZ3uVzasTiFvRgDwUZ6YmwwaoL2w8ODMTXNZtOcnuJBAIP1cvW2BHvWlHVj33e7nbV6PR6P6vV6Wi6X6na71qaXe0skLgfaYV98FmvB3vF9ODrSGgp/YUWfPXumSqViUgySEkm29wT9drttQRibg92kNSusMKwONRXYF+BDXUKtVjNGk3XHjmkjC9C7PsHPsL//8R//oSiK1Ol0rJAXWZEkYxsrlYpNgc/n88ba4U/r9Vp3d3cWxMMwVKPRUL1eN7YNW0XDDsAej8eLuRrb7VatVsv23/fPxeWVSsV09e7zFgoFK3hNJBLWTclljrBf5Ee0lXalGKfTuSsY7WzdOMQ+07Y2iiJj/oIg0P39vfkts2tgWSmyJzngz4AMrapHo5ElLPgJEgueD8aQpIG9TaVSVjtB96XNZnPh7zwnbN9ms7kAl2KxaInI8Xi0xOpwOBjo8Xx8ryuXYJ2J59ITS/hhzMZX4uvnXzFGxhgZY2SMkTFGfhkY+bHrs5xw8abMMS9vlxzDS+e3X5cF4fd4Q2QhKECUnqa++75vAex4PF68zfIfAc0Neq4Olf8Ps8VxPd8NKwKD5wZfNp2AlEgkrA0pm87nS+d5DTg4a8IaNRoNjUYjeZ6n+XyuVqtlb+qlUslADKdgrWAA3E5TGI77777v2/BKmAsc0j12dcGdY3468sxmMx0OB9VqNdPH4rzL5dLYGBgA1oy9QAssnaU0zC8AmDKZc4eZyWRywUrB9rh1B7BNOE4YhhqPxwYYURTZ92YyGfsdpDKTycRmXXDEDTjDOLOG4/HYJAZIRGBYsIn5fG7FtxToZjKZiy5PrVZLz58/1+l0bldKkGi328rlcrZ2rBFH9K4Ofr/fG+iRaDBD5HQ6D7f87rvvTKPNZ5K4ADqAU7FY1MPDwwUjDnNKMgbzid6ZVsduUCZA4h8uk8exPb6USqXML5ETACz8HQkBvsj+E1CRQ/BdAAVxAlvBRtG7AzwE2mKxaN8lnesL0JtTgM36uj7C/ruf5xarU9NAHQhDRMMwtP/vJqmw2NgNySI+T20KtQAkUMQe9g9dPKcVyWRS1WpV+XzewIO9ItEiXpMsk1AANO5pCzER32K/4+vTrxgjY4yMMTLGyBgjY4z8qD7E87z/S9LrKIr6H/kZJZPnAX5hGGowGEiSsVeSzKDQihLEMAr3zRQHZBP4Dhgy3spdVo+Wq4DXer22Y0OmqvM7ODfHjCwugY/7ZQEJYhh+Op22t/1EImHHrhgIUg4MBN0pzsbzwJDV63W9fv3ajJPiV0CUYIDswtXGSmfWZLfbmRFLMkBwJSYUw/q+bxpV7pn/TwtZvov1D8PQGAm0wNK5WJh1QIpRq9XMuTFkHAKGCoaMQAE7Aagdj0crSOQImmN89vlwOBgLSwBAbsL6uA4Fszcej629Ky1tU6mUsbyTycTABXsjkBG4M5mMBT9AZjabKQzPtRiDwUD5fN702bRQBhTdWR10c4Ihmc1m1n6V55Zkg0lZS4LYfr83PTXrGkWRtdwNw1DNZvPClg+Hw8Vx/Y8//qh0Om1ghk+xf/gH95hIJKwwnSCIBAif5+/wMbTnJBbM3Iiis27a7ZAEg5dOpzWZTEwSgtwJZhP9NskB9+6eIBSLRS0WC5PflMtls21qCXK5nHWFItBKTww3Ng57xRoEQaDNZmP3jw+58geYND4TmyK+ELvQ8OMHh8PBmHISD2IYiSZAyt/B6uGz7BlSDpcd3+/3BmjsIX6Mj7kxlvt1k/34ijEyxsgYI2OMjDEyxsgnjPzYS9dHT7iiKPp/PwYk//kz9kWu8btyA3eB2DCMAAmEJCv0o5DN1WASPFkoWA1AiuCBERHkYWLYqDAM7YiXoAcjwO/DhhAQWVh+xj06RhcK2+JKFjA+9w2do/8wDI0Fevv2rcIwtKNV3s75Tnc9MRyOh1OplBXerlYrLZdLTSYTeybf900y4BYCEhiSyaR9jvTEsrryBOanhGFoQQCNfxAEpod293c6nV4wechMSCQ4buY4ndakMBqJxFMhLmuWTCY1nU6NLaIIF+0zTsAxvsuuTqdT03oj/yD4EJxxPJc15l6Q5wDMMDewky7oIxPBjiki7vV6BqA8K4mYu/4EWIYyep5nvsH9UhzstkmFQWY+TqvVsu968+aNdYlyJSLX19fGksH8kgRxj9JT0T/MHvuHL/j+uQtTvV5XsVg0JowABrC6ds56wQqHYWisaz6f1+FwMEkKiUaxWJQkY9SJLwTX4/Go6XSq/X5vcgeelc8nbiDHQgvPCQFJLp8HIJJAIKVgTUh88ZtkMml+AsPtAgP+zJ65iQX75SZ2FMi7+wYgAnLcu6s/Px6PF6cS2A73AAC64OYm0viC+z38bnydrxgjY4yMMTLGSCnGyBgjn144/7vrs0gKXY0zGw9zJD21Eua4j4chYHjeU8Eob5MsPAvIQ+NcsGQsIm/ZrmyBzk0uY8FbPw4jyY4LCSoYFs4FqHFUn8lkrAjyeDya/pf7x+lZA4pscZ5araZqtar9fq/vvvvODHi32xk7ASAwR4IZC6yNJDv6z+VyFtRhtWCgKNClaJV75PfDMLQAgHYdffbhcDCtNfvrTlknALiBDHnFer22uQrr9do6H3HPyCywGRy81WqpXq9bgGbdcAiGfTJdnQAFexeG4UVdw2Kx0Gg00mq10mKxUKVSsQGTJCTYgeed55VgtzBIgBkFvBwtY48f6vZLpZLJNPb78ywOV+LCsD8kJNhlOp22IOUybbBqgDU27fu+ptOphsOhJV/UPpxOJwVBYM/P9zAt3rUjNP3J5LlYHl/ATrAZQI3iZ/aNjkuu1pw9Zo1gjNCowzoS8EiQAElYZXwJv4chhfF3WVXiDvIdNxjTFWwwGBh7j+wDLT8nB3w/rK2bFBPkU6mUBX4K2CXZM6TTadP2k+y4NoesC2YzlUoZ+0diw3diy+6MD+yPBOdDoMCv3D+7AAYQs27EV5dl5PvdOM//j6+ff8UYGWNkjJExRsYY+WVg5MdIyc/ywsXbO2+OPBTH7uiKeZMEuBOJhLFcvLEjRXD11Ry1ujpO3ipxKv5XkjFybpDDIHmLZRYCn0sRLsbi3uNut7to4UlQlmQFljjFx8dUvAAAIABJREFUdDo15oOg6xZ5ugCAobE20+nUjAOG5urqyiaJc2xM8CDIJJNJ9ft9C9Bo3jEYjnCRPWCEH0pS3KCNlIACSJxfOks0xuOxrUE6nTYZRSJx1jWjpyYgEwz5OYqZYSpdFot9JjC5DkOBLQWphULB2Eja27qsMFKbSqVyAer7/d72n8CI8xMkAEeYQO4Fh9rtdsZOEmhyuZyKxaKGw6EB8ePjow0KPJ3OheouWMIKEcAJvPV63fyMWTD4Bs8GW0RgOBwOtu4APsWwsKWw2/w+98/zEawBZf4DDChCX61WlgTANiPxIQEheWPfeBZiBYDK2oXhuXOZy9Div6fTyb4bf4ZhA8yxd4p2AXFADZ8cjUbGRGKjJCSe59mMGvYEeVUul7MEk1krzOUANFgHYgdMIBf+zj25cgXAEPYdJs0FJ+ydhJvnBHQ+ZP5IhGBoWUvWAvaQxB6AYV0NLP4T2OLr064YI2OMjDEyxsgYI78MjOTl86euz9KlMIoiAwGAhcVhM3hDdI2V4zr3zdwFEh7GXVgCHkfxGC4bB5vEG6vLAMGscaQNewjrwPOwyRQuwiAAbI+PjxoMBnbftVrNvpuOQjB8HDXyth5FZ40s60AHJ2Qiy+VSrVbLDHu/f5pQD2PJ/cNKYITNZtMkGNVq1WQM6K/L5bIBZ61WuwiQMA6Hw8H+F+ccDoeaz+e21mjMcW7XgWBWAW1JJk9wAz5H2QR9tLYEOYIRbAYgwd8RxAEhZla4DDL3QoF2IpEwppWgh3PAVnieZ0fy+/1e3W5XURRZMTY1CJ7n2TNwzwRUkonBYGAsFADLvXEkjiQB8AfE3OQIppnvn06nF8+G3AT2BdYUMGKdASrqGZDYrNdrkxCx59g6QyiZ28OwR7djlptMYjswYewZdlar1Ywdc6VSuVxO7XbbBnJyr7BeyE9gx1yJCfGB9Sc+EEdYR/T4icS59TKsGaBG0gcbSxxIp9OqVqu27zCGo9HI4htgRqE4n0cdBhIeaqCIJwAAReVo5vnuDwEDMHGZPGIjzw6wuIweTCp7A/MJYLIPgDz/7mr++fz4+rQrxsgYI2OMjDEyxsgvAyP/V0+4uHgQtM68qROocAgCu/v2zbE9VyqVMkPlYd1iWumJOeHnMSAYExbydDpZEGYRYX5gcNLpp3ahfAYFunwHm+l55+nvMCIcl6IX73Q6ms1mms/nBnIchXOvFM22221zvpubGxUKBQVBYGzYer3W4+OjTVNnTRh4hyZ+uVxaNxm06gRL5iq02221Wi2752w2e3HMjJPCXmSzWdP4onvFyThednWsFOe6QZYAzt4UCgVjMAmMAAOGjMESTIrFommKcVxAEVDdbDbWxWgymRg7yf9K58LISqVi30V7ZECbn/E8zwpPSRYqlYoFIRwfxgMnxuETiYRarZaur6/NZr/66ivVajW1222TXPA7BDOCNnURODSs0vX1ta3tfr+3QYWuTINnoBMPjBk+VCgUTFOObVJc6zI52A9BmOeExSKRo+CXAAmT5vvneTk8PwMUSQBgZqUzY0QxMwkSa4OE5/Hx0favVqtZMbQbR9zkhgDO3rBnlUrFgjgMOqBJMshzk/whMTgej9bdaDQamSyIOhi+HxDie4lN2J0kk3W4JxDEK1ha4gb368ocSH5d38V3+Dy+g2dlv4mhgA6f6/7Hz7onD+yJaxPx9WlXjJExRsYYGWNkjJG/boz82PWLX7g4HseoMDpuGgBxWRVujsV23+L5PTbHLch1N8fVusImsLjuESBOwhEkjJj0FLzo+uP7vgHLbrczo+VzwzC0wMkmMB+Eo2lmFGBgvG1L59kMxWLRhhjye6vVyrTU6XTapBAMtHt8fJQkK/aNoqcJ9oAGYNZoNEyS8Pz5c2sXCqBxhM06rNdr9ft9k4BwH/v9XtPp1AbfEUDdt/r9/tyRqdPpGAMEi8vvuIMw2UekJ2joU6mUdRPCeVw5BYwW+0Y3Lvaz3+8b6wAAlctlY0Vd2YXv+8b0NRoNaysLu8IMFBwONgmnxaESifMMFIKcy1CtViubVL9arXR1dXXBPnqeZzUSBJfD4WDFvQA+vkXBKvtdq9VMUsGRP0wkvw+gtNtt3d7eWiCi4Nb3fSuSP51OqtVqurm5MYbI7VSE1AL5UzKZNKAAqJCiwP7CLmFr+XxetVrtQhdOm2PugfUnuPu+b3YB+8ysEfe+kFnwd/wu9gX7RAKCzIP7BRRcQCa5zGbPrZGDIDA7xD4bjcbF78Akw6oC0tiN+7sEedaDpMdNYDlx4PfcGOh+ntuJivvhO7F5WDwSuQ9jLfZOHHZjqPszfH58/fwrxsgYI2OMjDEyxsgvAyM/dn20LfzPvVytIw+y3W5tkVxJA4aI0X14NA/rQvBaLBZWPAtzkEhczgKBvWCxXZkGm+we97mBnADFWzdvxrAL+/1e5XJZk8nEjAlpgud51hUmm83q4eFBr169Ur1eN6OBQdlut8rlcqYXzmTOszaQi0jnDlTVatWKeD3PU7Va1el0npWB8xHENpuNzbRwW/ny9l6pVMxBOYpnj2AfPpQLZDIZG8QIE1soFNTr9UwrzHEtcxtg9kqlkjF6MLDtdtv2drVaGVNzPB6tQxGOxBG7q7tHZiPJZAkEDFguGDuO9LFBmAy3yDsMz52v0PKjwXbZmmq1aowsNuMeocM0J5NJG2LIfhGsJBlbCCOLnWMPMKWAFhp+EgSAAbYMdvVwOJjNkURJ50CELcP8kHDxDKwtweZwOHeQ2m63urq6Mp9mD6WnQlj8jEn2BLv1em32zWe78ynYD5hyEprdbnch/YAJJjACrolEwiQnMJzYMlIJ4gvyJbpQeZ6n4XBoMgYK2t2EF6kQzCmJoCRjMLHv7XZroFEoFDSdTi/0+Ng9+wrTDXhyny5TDRiSjLgF6qyfK69gD/k71g2QJQ6GYWi1IawpfwaQuAcAF5Bi3d37B5Dj69OvGCNjjIwxMsbIGCN//Rj5sesXoyfBHHaoUCiY07iLy9uwJJMawDzA5gEgHOFyVL9areyokw2GYeEzWUDYPhYDo3CPhQEXFpZjdwI1QXwymVig4m08iiK1223NZjPl83lNp1OVy2U1Gg153rl1J8WnMDt8TyaTuWA52u223r59a0fgjUbDdL8YcaFQUKfTMXBEw1upVNTtdlWtVq2jz36/t+NbGBnYA4Lm8XjUu3fvVCgU1Gw2FQSBGV2pVLKhfsgZEomEgcDt7a3m87kKhYIFGZyC+RcuA0BAciUF/DuMDUEVNpLAStDGgQBlAlEqlVKv17NBluwrQEPB9263s04+2AQgVqvVbDYIQSydTms+n5ts4HA46PXr1xfSHlgvJCwwpCQXQRBY16xut6tut6tvvvnG2CCYSZxakunPgyDQdrs13TjH9LBIzEbhXth//IggjQQEAEaGQK0A//7NN9+YxADW6McffzRmlCApncETXT176hZ1kzz1ej2t12u1221LWug45fu+yWh4Bhhy9PvEitFopFQqpWazqWTyXPSOH/Oc0pkVT6fTms1mGo/HFyBG4X2z2dRyubR1Zw1hto7HpzlCxCYAh+5SJMEvX75Ut9s1xhhdOckp/pxMJjWZTAz4XWkO4O5+v6QLZpNE07U7EoLD4WAJB3HQZdY/9DOSPAAL7T9+DENHQuBKOPBzaini69OuGCNjjIwxMsbIGCO/DIx0lQgfXp/lhatYLJomlMCFAeM4MDA8qPRUfJjP541RYfFwAtgA3iDdt9RUKmWyCd7ApbOswPd9OzJH70kRsOedNccc5eMIMCzc03A4NAAJwyd99XK5NG1vKpWyo2PXiNy3/263qzAMjT1Ip9PWZQZdcq1WMyaEoMPled5FEF8sFmq1Wmq1WrZeri4fdjCRSNjvZbNZKxh2n7Ner5tuGGYClsU90p/NZrq5uZEkO4KFQSNxQJcMo8L+0wEKJ+H+cAaCjauLRrbyISsMq8NnbzYbYwhLpZKq1aoxmeinF4uFNpuNBQv27HQ66ebmRt1u14CfbkvSebo97BhMM92osOnBYKBWq6VGo6HJZGLMCjIP7IxAnsvlLAngZ9lrGBVA0x1uSjBIJpOq1WqSZCwTiRX/GwSBnUZgE9gxdppMnlsrVyoVnU7nNr2SbB+QMcCYkaQBmgQa6Ym9p5D6dDpZi+blcql6va5k8lyoPJvNVCwW1Ww2lU6nrV0u9w/gTCYTSxAPh4O1rEUuwd8T/JbLpa0hDCfMFs9CEGUt9/tzlyvf983mJZld7XY7tdtttdttA810+mnQJEkHa1Iuly1xdpO64XCo4/Hcvpo14jvdRAoZBWuCVtxdbyRH/Nll3Ug4eH709O5Jhe/7lryTXBIT0awTJ0kSALqfI5mIr/96xRgZY2SMkTFGxhj5ZWDkx65f/MLF2yEBH401N+MavCtl4DgclgqjpePOfD63z8UgWCz3TZQFcI+hYTESiYSxRq78gTdzWBIYMTYG/SaFp1F07gDT75/nW/KGj6xhs9no66+/VqFQ0Gaz0fPnzxWGobExo9FIxWLRWA+e9+HhQVEU2ZTzfr9vQIMkAlaGI9jdbqfRaKTb21sDW47qWVMYLdin1Wql+Xyu+XyuMDzPFPE8T+Px2JyoXq+bHIA12u/PbTLR3O/3e2PyxuOxvdEDiKVSyRwjn88bA8TzzGYztVot05APh0OVSiULJq5GGKagUCjYcT5yiGazqdlspnK5bHYG+8aMDMAZBuRwOGg8HhubgwTk66+/VqPRsH3hM0kYJNm+8ow4LKzM27dvjV2Ooki3t7fGvqVSKb148cIkD/gMDBE2x7BF1t89xneP09kjWL4oivTixQsLLuwTrLpbB7Df7zWZTGxtaN2KbISaCIIdgC6dk8B2u63RaGSAgM8TbLj/6+trC6bPnj1TPp/XarUyXf18Prc4wfePRiNJTy20qWshMB8OBzUaDQu4h8NBz54902Kx0Js3bxSGoV68eGHd0FizKIrs90lWSAyQBO33505b7Euv1zMZEuvYaDSMJX94eFA+n7ei78fHR5NmkFQRC5HEAKbL5dK6OVFTQFCnhgbfdyVlrnyIP7sSGBJEkl5+DpaP54YtJ96eTk8dmEjY2UtiNTGUZCK+Pu2KMTLGyBgjY4x0Y0GMkb9ejPzY5f/hD3/4JViif/qnf/oDg/BwQAwYRo+AyJE2b4V06gFUOKLzfd/e/mFZ+Bl+zmUROALme9lMpBu+71tQ4ag9iiILEPw8bFSxWDQmBmaRt9jhcKjnz59LkhkqbGG1WtXDw4MymczFNHGOf6vVqrEvfMdsNrtg0HK5nOnbaV2K1jYMQytgbTQaNuAQJhNmAGaItZJkE+d3u53K5bJ9B78P61iv1+34nCnsu91O9Xrd5BhupyYKwTH4/X5vTCy1BqlUSsvlUovFwtioWq2m9Xptx9fVatW66xBUkOGwFrvdU4ceijpLpZK63a4FUZ6FlqjcH+wV63Q8HvXnP/9Zw+FQf/M3f2OSGRhInFWS3TP2kkic55EgKeE4Gq36brdTv9+3gHBzc2OOLZ2Z0NVqpWKxaKw2DEyhUNB2e543A0gCkJJsSCM2SZE5jBzFqDCCAJIrJ5rNZsYUk6ghd3LrH7AHLgq+SRKiKLIgut1uDQiR8ywWi4t7x39Zl9FopMPhYHUWSGH4/4vFQqVSSd9++622260VRrus/r/9278Zg3p9fW3SpmfPnmk6nWo6nV5Ilij4hlErFosKgsAKkaktoDXv/f291Y0cj0e7z0ajYTUbV1dX2m63VmieTCbt3gFA4gXPvtvtLtbSjY38OzYIYHOigayHOEjyTuAncZV0kViTHCFLgdUEeHihcuVOMLNIzTzP02Qy6f7hD3/4f34RcHxBV4yRMUbGGBljZIyRXwZGLpdL/eM//uP//VNY8Fkkhb7vm3Pncjlj2dwOMYA1mnWO6KQzo+a2oyTwoyWGeXMlARyd4ww4JItFkSIbwv1UKhXTk/L9p9O5YxAbxiahnS0WixYQYaJgHG5ubhRFkb3tF4tFjUYjO7an4BW9Kdr41WqlbrerUql0MQsC7SlBBV06Gmi0wvv9Xu1229icTqdj80RccHXZTQwLmQmBaL/f6/Xr1/ZZxWJRi8XCZC9BEFjbWRym0+lotVppNpup0WgYUOXzedN8E3TRvk+nU5Ou0PZ1Pp+rVqsZ6BEseO75fG7Gjo4epyEpQYfvarjRvsM8SGdpSCqVsnX67W9/q263q7/85S/667/+a+uExb5JZ0kC+uRyuXxxFL1YLHR/f2/Ovt1urW4BhjSTyWg6narf71uihN6coE2ygf/sdruLZINAAxu5XC6tIBaW6HA4qNlsKpPJWFCH+UG6Qicpl4XBz5BmkBRxbI7Pksxxn+jOAVH0+uv1WrlcTtPp1KQM1KOsVisDVGQhrDf1HRTe5/Pn+TsEXUkW/LfbrRqNht68eaPZbKZSqaRMJqN3797pdDrpq6++skR2u91qPp8bQz4ej1WpVC5aPi8WC0t4YZ5hoj3P08PDg4IgUCqVsnsjBgRBYHZOQuj7viUzm83G4pdbt8O6cJIAQ+2y9cQhkjiYefyBJAEbR67BfSQSCVWrVZP8kLAAbiTVsMWsMSwg+4Qdubr/+Pr5V4yRMUbGGBljZIyRXwZGfkwF8otfuLg5tNMc67utQwmiLByBniN6ijcrlYp6vZ5JFnjTl2RvlRwPcmSNXpMOL4vFwsABZ+U4kWPw4/Fc/MfRMCACo8jiDgYDVatVVatVzWYzLRYLJZNJM8jNZmMabjaj0Wjo4eHBptpLsuJbmEp08TCU5XJZb968MWNmrXibJ3hiLLSTPZ1OxpShz63X63bkj2SD5yX4Iq/AcDebjbXuBWhw2kQiobu7O2th2mq17BgX54ZRq9frJnfhuH+73Vphq1v8CThT6LhcLm16PckBwXEymVhQBGyRacBiUTROMsNRMGwaLAcAWSgUTCbB0ErsB8BCrsM8EuQr7DV1DtjPYDAwGcTd3Z3evHmjcrlsumCCOTUWPBMthtPptIIgME0/iRRMHMEbBqfZbCqXy2k+n5sPsfbuUftut7OCcJg+gqDLrBNkZrPZhWwIWyK4EtypLaCYHd8nGWAfWWNXd48NYPMUWWcyGeuAlUwmNRqNNB6P5fu+Xr9+rUKhoOfPn2u/32s4HNpaUQwcRZHu7u6sfiSTOQ9XXS6Xajaburu703Q6le/7NvMFWQ5d3ZCNcHIAc85cE4DIjW/L5dJqF1ztOPaP/7rSg3Q6rYeHB4t9rCt1DEhouAj+7AOAQoxkH7FNN4l3texujZCbwJJ8RVFkf8+fXcYwvj7tijEyxsgYI2OMjDHyy8BIbP+nrs/S49ct3OMYDmdhcXhLRBtMgR0sGgwGumbeHo/Hox2BptNp06fy0LzhUpRLJy2YPBaFgMPv0apWeiqQ5DsTiYS9KR+PRwVBYGwczrDdnucmfP/992q323ZsnM/n1Wq1tN1uTXLB/cCALJdLm2LOkTGSAJjCzWaj5XJ5IU2oVqv2tg5rQcHr8+fPFQSBZrOZFb/CKnqep36/b4GeI36YiyAITJKx2+00Ho/V6XR0Op2MmZlMJnp4eNDd3Z0xE91u19gc1hVGkISBfaYL0mw2Uy6XM3aMZwDwAQOCymQy0X6/1/X1tTFCy+XS2LB0Oq12u63VaqXlcmmSizAMtVgs1Ov1LDEheDJUs9frGav1L//yL/r9739vNkeyAwPU6XQsOeHoGIBEC73f7w2I3aNvl3Epl8u2J1dXV5pOp9rv95ZsAO7FYlHValXb7XmoIc9AATT1FcPh0GQjaJRhkFz2zWXNAXHuG9ACDGj3i52n0+e5N0htCEwkdLB2gIGki+RuvV6bjImgiFQjl8tZ1y98kHUlCZJkUiQKcd+/f2+yG9hpbH65XOrm5sbaRUvScDhUv9/XN998o+12azYwnU7V6XRsHfg+ng1wuL+/N7lOo9EwkCfhmE6n6na7evXq1UVQzmQyliCRJLpAe319rdVqpSAI1G635XmeJYjUwJAIH49HA18SHjqvsW6AID6w2+2Mlee0g1iLP7gadfYAcHFlaUgr4uvTrxgjY4yMMTLGyBgjf/0Y+bHrFw8+JjCzWMlk8kJzS+CHAQiCQJJMNiA9aZH5DByB4I8jcAzJ78Lu8RadTCZVqVTMcOfzuRkDbACLyvdJZ+kBAZv7hW1BA0wQ4a2dYlgYgH6/r/F4rCAIlM/ndXNzo0ajcWGg7969M7ar2WwaG8UxLvKBfr+vXq+n2WxmbIJ0losMBgMDPYIbMgaKYn/88UcbLAh7B6uEgXCkvlwujQGAMaBA9cWLF8ZOUAjc6/UUhqGm06kxTRR1wqpSBJ1Op23SOyxJOn3uulMsFm0tf/vb3+r6+trYTT4LNqPRaNgznE4nY9vS6bRarZYymYw6nY5JbRgECPPmBgjP88yuYCuTyXNXph9++ME6KQEY6JUPh4NJP2CcYWmWy+VF4SzsL1PfD4eD+v2+FouFJUc4rMtEh+G5SxM6bOmJseFny+WyAXy9XjdmERA6HA4XgYPuSd1uV+v1eYAnwYPjc36Wglh8Aq07jHihUJDvnzstAdrYPnZAcIWNIhhtNhtrHw1owabiBzBcMGa+7+v29tZ8sV6vq1Kp6E9/+pMVZ8OeJhIJK1z//vvvNZ1OjQXDvl1JHIXpr169MgDE7mAoYfNJckajkTF/JF1v377Vd999p9FoZDNH2FuYLyQosLWsNbGrVqup0Wgok8kYy4l/8jMuoNAWm8TQTRgo9iWpYD+Id+j0AR86PZHAuWy3W39xOp2s1iO+Pu2KMTLGyBgjY4yMMfLLwMj/dUkhX0TgABS4cHKO5SaTiRkNzBbHf+gyefvlrZE3fIydN2EYAXSvLCZMEgEUPTtsGgbGUbfLsrDZYXguwE0kEtbekrddupw0Gg2t12tNp1PTr6Nxh2nkz77vazweG9hNp1NjOJB8MM2eDjl08eH4nyPwzWaj4XCoIAh0dXVlx+XValWNRkPdblfPnj0zpoA3dd/37fg2DM/DCAnI/X7fEoJSqaSHhweNRiP97ne/k+d5ajQaGg6HkmRAXalUDODz+bx1mXKlBux/EATWnYdgBkBzlL9YLCygYhPop9vttk6nc0eoQqFgR/9oo9FvJ5NJs0fsC6BFKsKxMQW+t7e3+uGHHzSZTNRsNo2FQ6KDdp1ZJe6xOGvseZ4BDh2aTqeTdbpC/42dMmCQNSLQoXXGdpvNps1/4Uiez4Mxd5k4agtYXwIcYI2GmcSB9YGlg/3h91OplCqViq3vaDQyxo+aB/6XxA29NbNT0Gmjl+Z+DoeD6vW6/TuBkeDmeZ7u7+8tGD88PBizCutbLpdtryVpNBrp9evX+v3vf6/vv/9e+XzeJt6Xy2V1Oh153rlF7N3dner1uh4eHqw7F0A3Ho91Op1sqOzd3Z1qtZqdUJCMXF9fG/DSvQmAJnlE8gSbjdaf+0Djnkwm7XlpJU7CA7OG5Ivkl0RbkoEVP0syiN+TmPL9xDuSPVfqgU2GYWiJCjYaXz//ijEyxsgYI2OMjDEyxsjPIimkVSOsBZIHdJHr9VrS+QiVKfFojCXZUTcBlTdW6Uk7ztEg+mwYIelpNgObA9PGzAjmjCDrwIHQ4boSCwBqOp0aW0E3FQBsMBjYoieTSQsakkz3jpYVho3jezozLRYLc5xms2l6eoIxDggwcw84PAGVAElA5TmfPXtmg+eYtQGgSk/sDAC93++NGcjn87q/v9fxeNTXX39tR/rb7VatVsv03eVy2cCNfXXrByQZG9tqtcwmCMSdTke+71uRLWsahmc9M8ESo0fiUq/Xlclk7OgfppZghIYYfS/PDYCt12tbQ7fYslar6Y9//KP+/u//3mxpsVhYcuFq1gFUpDbYPU6635+nrtMSWdJFQuAOmUylUgbISAXcRILicBiqfr9vwfvu7k5BEFhNAnZHm17pDFwUhVYqFT1//lx//OMfzedIXE6nk+r1un0fvsJsjMPhYFInOovB5hAI8XdqOfh3l4Ei8JHUATisKTpo2Nv9fq9er2cBDn064I5cgHu9vr7WX/7yF+VyOXU6HQ2Hw/9SU8Bw1eFwqMlkolarZfHGrSMAAE6nk25vb5XL5dTv9w18kRgQCxqNhklEiE2JRELT6dR+DhaX4uLVamXSi1wup3z+PJiUxBidO7p4t3MdrC8nCpxq8KzI1gAI9oQEGwaWi3XCf0lkYf357Pj6tCvGyBgjY4yMMTLGyF8/Rn7s+ixdCjm645gQY8BZeNPnmI4uM64enMXh7ZJAOZvNJOmi0JEuL2g0eXvlYSkuRcZBsR8B+XQ6WTtOtKM4JxsOs+f7vhUKrtdrTSYTM+DD4aAgCNRsNu0ZkB9gkO7RJ8CTSJw7EOVyOZs0fjgcjClEV430gy5OzDx4fHzU4XDQjz/+qGq1qnK5LEl2vA+Dulwu9ezZM5NiYDQwXjgCQMqxNPr46+trk3DAEPJMsGnIMThGf/v2ra0fen5XCsNwPjoxsc7cPwkFhbfIBRKJ85wQ1kqSSQVwegIEzpdOp+3eAAcYHu6B4lyc1fd9/eu//qv+4R/+wdYSrTyFxhSQwuTi8LBbfCf258pR+H3WHGbFbc9MQgOzByCxdtR48LPM1chms5rP59putzY3p9vtKooimzvD+sG0BkFgNpBIJKztL0Htw2JgmC3ulQQAYGe93OTM8zyTSki6WKMwDI2hgkmCvWOfWZvr62uTW1EQ7uqqsQ8Y9T//+c/yPE+3t7dWt5HNZjUcDm220Lt378wG3PUFvIlHBOLBYKD1em3gj0+5pwGSLGF1ZSkkHHSV8n3f2Lft9jzcsVarmdSHteBnONUgaWefKKiH4aOoHz05rBvyjw/BA6bPlc+wV+wLSdD/CVDi679eMUbGGBljZIyRMUZ+GRj5seuznHAlk0/DGnFm9+9V9igxAAAgAElEQVQJGgAPjAdH4YAAhktwSCQSNpiQz8TJCH4EB0Aql8tpOBxeGAGLji4YbSsMCp/HZuEM7nE7xsWbcRRF1kHK8zxzaAp5YcyYeI6Gl3amQRBos9nom2++sWPww+GgwWCg4/FojJ4784Aghm4UFuXh4UHPnj2TdO7mNJlM9Pj4qOfPn2s+n1t7Up4BjTDMHQCaz+etyBSmAgCZTCY6HM4DIpvNpmq1mulk3cAK60lAdB0OWQ1rXSqVjMnDAbgI7KfTyVgx9LlIBAi22+3WBvbBVsACcRTNM8PeIVGgaJnhg+jIf/jhB11fX18wOW4gdbvYYJMEfSQGsHs8O/8RcCRZ5yzqB1wmJorOdQswcpVKxbpOIaEgmO92O83nc3333XdqNptmy9gue4Hd05LX7f7Dz1K4j4YfjT1Bh+fm33e7nRUEw0JtNhsLnjCR7C/7ARvrSq4oeB2Px0qn0yYbgemiPsNNAlzGiWemC9Pr168lyVhn3/fV6/W0Xq/16tUrkzytViuzM2yeNdxutwZkSKvm87k1AYAFJ3nAXtzCYHTexD8YRNaE5BbNObpxZBauhMutT0GXThyCbSNmep5n7CM2i/98CHgwx+wvvoaNk0TH16dfMUbGGBljZIyRMUb++jFyPB7/9zjwcZj4P1/ox3nDB0AwDt6AWVC0nTgBGwbTwia6BXm83fJm6hbtSbLf5a02nU5rMBjo6urKJBkYOoXBBJ7VaqV0Om3OAwidTk/tVTebzUXbS+ncWpap7vw9+mT0vYAV7J87i4PgiRMHQaDpdCoGZG635yF5dGXCibn3MAytANLVcRP06DDDsS36cd7qYWzYP/4jUAHidPGZz+eqVCoXsxd4m3fnLDSbTbMDDBG5Bwwsc0oACJgLGCiMmSNpSRcMMcfrJBJMrEdXTnICG0TAwRGRUCyXS+sOhB0mEucWv4PBQO1221gpHBapD4CAE1OEDMvCz/Eck8nE7Ipj6kwmY4Wlo9FI7XbbmBPuF7vC2YvFomazmfnKfD63Tk3YCsy3JLM1kijqJ2BkKdAmsLmAPBqNtN1u9bd/+7cX987/sjYkOdgL60JChawB20TewNq7NSqdTkeZTMbAhBjjeZ7evn1rLYRJDAiGBEKCqMu4DodDA9HFYqHJZKJ6va4gCCzZI4C3222rYSAR87xz++bZbGbdx8bjsd68eaNms6lsNmtgxZ5LstggnWfVYO8wZKyHqxvHLn3fv0i0jsej2QLx1a17wd7dhBeAZz9ckCdm41vYPn/vJtkweTD38fVpV4yRMUbGGBljZIyRXwZG8nk/dX22tvA4eiqVsuN4l6nASKMoujBaHlzSxULxxurqKGFJ2BB+l4enaxOGdDqdVKlUTOcpSZVKxTYWoyMw8gxsahiGNlTPLaDMZDLGbPH7sCsEe9grJCNojl3Dh3Hq9XoaDoeqVCoqFosaDodm5AQHgihMJFrUKIr09ddfG/MxGo0MdF22CxmE759nG7gtVAm+OO18Plen0zFm6/r62lqrckzveZ5KpZKCIDC5CY60WCx0dXVljEUul7PhkRzLw7pFUWTtXnk2gmwURfZ7rgwEoyZpwL4IuuyRJKsR2G63NlQQLTeSAO4TtrhSqahararb7erm5sbmy2CPMGySLjq38VmSLLFZrVbGdDLrAh+BTSPBmM1mNj+GtSepqdVq1iZYkt68eaNvv/1WDw8PajQaBk7Pnj2zWS1BEJitcQwPUzcYDKyomGcicYuic/cdOlEh7XH9ked36wCwdzTQtCd2NecwRa6fI2nCd+bzuYrF4oVcKZFIaDAY6OXLl+r1esb+0lb66upKm83GtN4AKDZ6PB7V7/dVrVZ1d3enMAw1Ho9Nw1+r1YzhZw1YF4qPAVAaGrD/y+XSZhE9Pj5a7IGtI15xMuD+mTiITSyXS43HY1tvlz1jvQBf/D+KnmYckXSRMPK7kgwIAAyYQTfuuacpLvuOtAdgia9Pu2KMjDEyxsgYI2OM/PVjJD/3U9dnqeHCiSkuI3hyfIc0gjdEWBQMxdVPZrPnFrFosnlo9LDoNmHF+HdJdsQoyQyWOSHuUS336/5Zki0Wi+syiLBPvOHSbQmGyffPha1o3Fkb2A6O0AG2+/t7PX/+XIfDebbIq1evtFqtzIHd+8WAYENdowIIORJvtVqaTCbabrfmlAAAXW64MC6X2WJPmJ9CoIB9oevTixcvLICjuQeMwjA0BgR25vb2VoPBwJiJUqlknYFgOfhZDJi9ZL/RDjNXASYwmUwamLLmx+PRkhlXsgGrBtN2dXWl2WxmmuN0+twF6+XLlzarBRkFoMNeSrKiTJhNwAW9MOxkuVy2YEkgIPhh1/iA53kXs2JYf4Ir3azoRkRxOZ1+OJKH7Uun06rX65pOp1Z4HUXnAmtmdOB7m81G9XrdgNdtKUuiQXBarVY2iyYMQ41GI63Xa2tRTHJJh7FE4txJCzkNfo//HQ5Ps2iQ9/j+06DRWq1mbCl+iT0hEQDQCaa9Xs9Y9n//93/X3/3d31mR+2g00mKxsOd3Ez3pnFCyDrDTs9nM/LxarSqXy6nb7Zofu4W8MJKuhASNPEywJPu++Xxu+nuYOmRRSCNcTT7rkEgkjOkjqWY93fgImLPmSCdg6vg3fo6kH5mMy1rH18+/YoyMMTLGyBgjY4z8MjCSz/qp67OccLHY7rEe7WYxKLTALB5BBwCHccpms8ZouAWdrgSDt3mCl1uEx+fg1BR6zufzi4JLmBAMD3bOPb5drVb25g6IlEolYxz4WZiK0+lkulXulw5IBHPuk03c7XZWKIpkh2DMc7Oh6MH5bveNGnlFPp9XsVi01pun08kGSFKE/SGLcjw+zR2AuZrP51qtVsYyEWyOx6Pev3+vfD6v6+trk4dwXM7gSGZK8Pfo2Nkb7oUgQNAj+LMv2Bb3D3CytwAJgZh9oOUrDscetlotK84mKLrMCDUOJEHSWfPvHk1jdwCYK91Ip9N2/A2Y7nY7tVot03ETYLkPjrBJlCaTiebzuTG+SBm4j1Tq3LY5CAK9evVK0+n0gvVbLpcmEyJwoA8naAAiBKz5fK7pdKparaZKpWLJHL+z3+8tUWQPYJWLxaIxkKxroVDQfD63bmTUalADAUC57BBSinw+f9GWOZ1Oq9frWT2D9CSFyWazBt50SUNLP5lMNJvNtFqt9PXXXxtIrVYrkyCtVislEgkbLst+I2+pVqvyPM9a/vJMo9HIGHYkD8xFATzxG5cRg012ZQnEJYCaZ5Jk4OTaMvsKGLnF3MQRN3lnn1kzki5J5mMuy+iekBC/iRsfY+/i67+/YoyMMTLGyBgjY4z89WPk//oLF2/fyBB423ODpBvkYWpgjwCH/X5/IWVwHQzDw6ldNosN5RgXFlE6a6crlYoFJ1g2l3k6nc6FrGwwmm4W7nQ6GQOD4dNBpVAoGFMDi+AyF7ytS08a2MlkYiwL7M9ut9PNzY0Oh8NFoSRv4NJT0JtOp8b64MDcF8BKF5f9fq+Hhwft93sbcEjRITIEglY6/TRjgtkl3Dfa9lKppLu7O3smjundN3w3OG63W/s7mJlUKmUzUI7Ho9kCzoLRoiv3/ac6BpgG/szz0mGIfUqn0wqCwFitq6sr7fd72w+0z7B82C/JAUHw/v5etVpNrVbL2gkjtSCAArgwHoCzG0Bce3EHHvIsu91OjUZDy+VSb9++Va1WM/3y6XQyNpQkAOkMoDWZTJTP540BYz2wM+wbP0PjjyRgtVppsVgYo0zAI2Btt1ttNhvTY2MXyWTSbKhYLFogJBFza0aQL4VhqMFgoFqtdgHC8/ncOitRMwCjRNCH+eP7kJUUi0XTjXuep3q9bkwrdQZXV1d2v8vl0mRHyEaIGWEYqtfrabfb6e7u7sIuTqeTFf9HUaTpdGp2SlzhnpEQUU+AbfA9+D72S7MAkjhAFTafOTduHMR+2XuSb+yRGADIAtDYku/7F8XDAAn+jEQtl8tdAFh8fdoVY2SMkTFGxhgZY+SvHyN5Kf2p67+v7vqZlxsAuAhugIh0ZvgymXN7zkKhoHw+b8fyHAlLsrd73mpZbAIWDuY+qKtdJ8AgE6D7EI7kvlFz3+7bKoYtPbVg5Rh5Op1qMplY1xwAhP9gj6IossFrh8PBiohhyiSZQ+52Ow2HQ5vqzUwSmEHWBcaGddlsNprP53YcCsBF0bmVK2uCBnY6nWo+n1sBJ8B7PB7tOFmSMUcMoyMowS5ks1ldXV0pn89bO2IAjg5EPDMgiRF2u12NRiNjGz3PU7VaNWbzw042MBfYEtKadDpt+nakGm73GvaYPxeLRbXbbYVhaPMdAEjWjiGdyWTSEppsNqv7+3uTSjQaDXM+N3j6vq/hcKj1eq3d7tzdS5LJIUh8YHTQ7GMraJNJnCqVimq1mtmjW2sAiwPL++bNG2O7YZer1aoFA4rRJanRaKjdbiubzRqT5TLpd3d3Oh6PVjQPI4+PIUVwWXrAdTabXQADensStkwmY+B8PB6NCeb7fd+3AlbmmZDYsSZhGNpwVdhEAJp6EQqciTsvXry4YEgZsPn4+Kjt9jwcEh8Lw/PwQoK/W9NBMkPQJYaR9MC6nk4nm9fCZ7kvKYVC4cKnAHf2CrAj9pEM0TWLzlvUIBDXiIc0IuDeSd6wPRIQWDziGL7CZ7kMH6CMX8XXp10xRsYYGWNkjJExRn4ZGPmxF67Pgp6e512wLxg8D4CTw0Yhi5hOp+ZQGD9BXno6EnXfMnlwjnldtpAJ7BS78qbKsbj7+cfj8eLo3T1ihCGAvUArG4ahHYly5A2AIV1IJBLmvBgeR7EwT+VyWanUuSCYQmCYS1i74/Fox9G8iUdRZBpzwANmhLdxtNtMlQ/D0Fi44/FoMpGrqytbq91up8lkoh9++EG+f9a4M8hxuVzq6urK2DLuZ7/fWwea7XarwWCgYrFoXaFyuZzG47ExDLAYmUzGZqlMJhPd3t5qOBzaHmDABIsPj3nZH47WK5WKsS8wPLBxrM10OtXpdDIgIUhWKhUFQWBFwjh3rVYzEPj222+NzXUZj/1+b/ptCnfZ/3a7bWsFCOL4yAAoeOW5cFLf91WpVJROP7XrpSidII0tu0lCoVDQZDJRrVYz9iiZTF50zMJOk8mkJQOwTQQWgi73iT5akgUmVwuNNvv58+fqdrvabDa6vb1VvV7XeDy2e+Ae1+u1yWqQsQC0MFY8O61++Xf8ZLc7t8GlUBtJTqlUMkkJsqXZbKbvv/9eL1++tGeF8Yui6AJMKNhm75G6YH+wwRSHd7tdW0sSL+ICXcUkGbMWRZEVPsMcMnSSOEkiVywWNRqNrF6HpJgEkZa/SNF2u50BAuAMKBDvXNmGq3GnVoC/l57qYojZxCTsL74+7YoxMsbIGCNjjIwx8tePkZy2/9T1WdrC42QsCDpiboyHIZiyABwTu2wLN+4e4bFIOB1gg7HzFs8bK2/h1WrVingJhkgt0DfjYGxMFJ3biGLUiUTCjlYBQdgNmLDFYqHD4aBqtWqMBG/lhUJBs9lMURRZe9rNZqNSqWTPVq/XrXMSXZn4DHd4HKwXTAZBLZM5TyNnzVymB0aFdrCAyXA4tGNnwKlarZpWlmNk99gfp0LXzv67DIpbbIvxYuytVsuAjwLYIAisEJd7dlkSHMj9M887Ho8teM/n84viThdc379/r1arZUkGsp31eq3j8dy1ir/fbrdqtVpmL4ClC6TYHdp85DLs52Aw0Pv371Wv143JTKVSJrtZLBbGOsKyrlYrK0xmpker1TKbdRnl4/GoarWq4XCoYrGo6XSqUqmk8XhsGm0SJ+xnPB7bwFFqNlgH7IMCX1g3GCUAkT0CWDjCB6xJ3nq9nr799lvbd2Q0MKL4C4ASReemAhTY93o93dzcGBhjY8Vi8WKOB/blsuvz+Vztdlv5fF7T6VSz2UwvX75Uq9W6YOix38lkokQioZubG51OJxvamMvlTHJBYgMgdLtddTods0XWDHAHbEl+AUkkKyS30hloGo2GFouFstmsgQJsdrVa1XK5VBAEFqtIpthjJEm73c6SBVjwD2M0+81nsRbYtMvewfxzEhFf/7MrxsgYI2OMjDEyxsgYIz9Ll0JJFkRhNwj4sF7ceBiGVtyIcfDmDQjAzLgyBH6WRYH5oNMMbJzbh3+32xm7wvfwu65GFLYNpg7HnU6nevHihWq1mg2TYxPYdI5ot9vzMEQ0uWzEfr9XvV639pzuMenxeFS5XNZisdBvfvMbTSYTHY9Hk4/QjQUQ8n3fjuLR5hMEAeJEImFdmCignE6nZvisqyQ9Pj7qq6++0u3trfb7vTFu0tP07FwuZ0P5Go2G6awZHEgRNbMrcPTFYqF6va5qtarZbKb9fq+XL1/q/v5e8/lcjUbDvgemyPd9cwjWD2YOHTgOtNlsTJ4AE7zb7czRCO606gXYGZRJkEulUhYkKR6t1+sXLCJFuejSOe4vFAo2DBCArFQq6vV6qlarqtfr6vf7dkzv/j4Fv0EQGBvDcM9CoaAffvhBmUzGAiwFrIA2cpJOp6MgCFQqlVSpVIyNwScTiYQajYaCIFCn0zGpTalUUq/XU7vdtg5jBH0YH8A1kUiY7QACMGvZbFY3NzcmiygUCnp8fLQ2sNPp1JIA9iyfzxvouYz3YrEwyQTAvFwudX19bfuHPbMudCFrNBrKZrPqdrtms/l8XpVKxQqib25u9PDwoE6nY0X6QRDo3bt3Oh7Pg1RhSf8/9t6sx67ryNYda/d932VuJpOdLFk2UAYKfijAhesf5gJ0UFXnR5xfU1UPRsEolMuyZVE2ySSTuTN33/ftfdj1Rc7No8sjHdH3wVwLECSRmXuvNWdEjFhjjoiAeaNwG3DY7XY6OztTKBRSoVCw75dkdSf4qxv0kedgu7CZ+/1eg8FAi8VCxWLRwIHvR6YEeHKqQL2CW0NAQtxqtWz/drudFamzziT1MLp8NhcMOMm7K0UjNvrXd798jPQx0sdIHyN9jPw4MNL9uXevDyIp5G2RAOkyNyw0EgoeEn0qm5rJZOxtd7PZ2ELDGHG8DLPDIgIKu93O9LIcvTJccTgcqtfr2VEqb7AUA3P86soquNf5fK7Hjx/r9vbW5AwcsXLkiaZ2OBxam1LYSYIr/w/7td/vbc1gSDabjTkkb/OAAAYJiwWQoHcNhULqdDoKh8P2nOwBUgKKJgEsggKtOGGFpKN+ng5HOBH3QneoYrGoVqsl6X6idy6Xs2BFsTBthwE89NSwjbCKBFUSikwmY8ETh4J12u125hiTycQMHSYD0EX/3u/3T/YGhqTf76tSqdgskEAgYIxUvV4/YdqkI7iMRiOz8VAoZIGLwsx6va6rqyu7D47XYapghcLh4+yPwWBwohFeLpdWyAnAknyhyW82myY3ePr0qWmLuSfY7n6/r3g8bowkTCVJFEFmOp1a0XMsdpxPQoLQ7XYNNCl4l+5bqsJEwwJnMhktl0ul02ljEAGfw+FYbE29Cv5GIfxut7Nagj/84Q/K5/MmSUDaQ7F0OBxWp9OxNYXBHo/HloC6NRLYMUzXeDxWvV63BMoFMLqIwZwS21zmlGGa3W7X4geDW1mvxWJh34/fsP7EJgI2ANVut62AGmArFosW+D3PswHEtGomEQ8Gg2bHgBrfH4vFlM1mrdMT7LZ0/1Ig3c+D4gWAv4cV9q/vf/kY6WOkj5E+RvoY+dePke+7fnDTDB7ODQ7BYNAKNzkKJ1CHw2FjAvh/Np2F5hgVVg0HQ4/OMS1Trvf7vTF0gA8ONZlMbDHR7fLWShtQmCOMNplMmoYZJol7wqBXq+Mwt0qlYvM3zs7O7Hd2u53NHBiPxydv5IBPNBo1x5xMJtbNhcJCdMJ0zIFZSCQSyuVyBi5IMyhiZBBjOp1WqVRSrVazzjlISGBhCPTz+fxkxgHMUqvVsqF4ONlgMLDgTGE3DAABhZa5nnfsTAMwFQoFhcNhY3VgTdLptIEIbNhqtbJ9Zv8IFoDHZrNRq9UyZgjJBfZHopNKpUxqQFBBGoMNw+rC8lHzwB6xr3T0ApRdKQCa+lDoWNg5n8/V7/fN2fl8dMyANPcvydr/3t3daTwem13DqqxWKzWbTev8hA+gZWbdpOPcitevXyscDpsNAN4EdI7HeaZ+v29yomAwqFwuZ7puimCr1aqxs6FQSK1WyzTky+VSjUbD/AXJRq1W08XFha0V/kFtRjabNVkDrWUrlYr6/b5isZhKpZJJYvr9vtklzJJ0ZLrPz88VDAZt6j3JKomAK1VaLBaq1Wo6Pz83BpY/B8yxO6RZyEmwi/Pzc9tTl/WE+YPJg1GmuBb5RKVS0XA41HA4tC5o1EwQBylyhlll30g2sFNkDtwrdgXDj/QBcH03ZvNCRXIi3YMLJy/+9f0uHyN9jPQx0sdIHyM/Dox0X8zevYJffPHFDwKTf/qnf/oim81KkrFxkuwmOXaDuXFvcr1e2wLx4LBdBF3+vdkcO5IQWNDBE1To2iLd99xHPy7JtNZuQOaNG2kAG0RwicViZnj8LkGBI3befmHjYEQI7gSH+Xxu8woALgwBnX4ikbBAOp/PrdiRAIFRSvddeTjOXiwWNmdiMplY4JFkQQFWjXairuaf/0e3DaAmEgkrkkSzDwMDA4HxcwQLy0ZAhtkslUqKRqN2zxgqwErSQMDHiA+HgyUfm82xfSvfHQ4f52NIx0nosJ8u0wtAcXzNvZEwjEYjZbNZK8R98eKFFXeiy3YlMtgwnZpgYIfDoXVrevXqlZ49e6blcmlH6thlsViU53kWTAFOWF32t9VqGVBzD7SDJaDAgrOHJGrorimiff3fbXRdFmo0GqlarWoymahSqZjdsi4c9efzeQtKweCx/Wk6nTY9PesBI4meu1ar2ZrBSLrs82q10nA4VDgctqL1dDqt3/3udyoUCjo7O9OrV6/ss0hwAoFjp6nb29sTxtvtqDUej9Xr9axYGPb+q6++Ur1eP+noFA6HrYU0toaPY9vuMwHwyDUSiYSxpMVi8SSRQkYSCoVsVoyr/ccX3Nk84/HY5vd0u11LfJFIAPqwwYvF4kQPD/C78dFl/tgD/o65PNJ9LZEki93SfW1KPB5Xv9+/++KLL/7XDwKOj+jyMdLHSB8jfYz0MfLjwMjZbKZf/epX/+PbsOCDvHAVi0ULWHSbYcHQE7sBlH8fDseOJEgdYCEISJJM+w2oELR5E+bNlkDBz+PgGJjLdHDUCnOHoRCcstmsNpuN8vm8/Z0ktdttYyGQdwCYaFiROMAw0ZrSDSiwkJFIxD6Hjd7v9xoOh2o0Gsrn86pUKraRlUrFZCM8ezAYtMGTOABGCgBjZJ1Ox97aASCKtzn63e126nQ6tmeSNJ1OLcgBfKw3k9nL5bLtPaDLz2PcMI2r1cqKaWHCCIzYEIGcwLDdbs2xd7udBoOBsQ/okZGKuDNLCAQcxY/HY0sEeHZYuFKpdHKsjpYcewLAeG5ssVAoGOt1dnamTqdjEhzAvVarWf0E80soQF0ulzo/Pzfds+d5NhPl9vZWpVJJyWTShggmEglVq1WzRe7J9QtAI5PJqFAo6OXLl/rP//xP65AFWIdCx+GHSIzQnMMW9vt9Y5Xc+h2SvtvbW0sMYHhJLln/SOQ4lPH6+lqTycR8kX1iHV0WH1BDfuP6ViQSUb1eN0kOEhcC7Gg0ssGW2WxWb968Ubfb1bNnzxSNRjUcDvXkyRNL1pCR8HsEd2yUuIIcBmlRs9m0pIvEmBoWJDZIs0jUCOrb7dY6Ph0OB+VyOV1fX1tMjEajajabdg9IoUjK2WNqO/B7GGq+k8SAtcEGKdrHXmBBkbSQcAAksO/hcFi9Xs9/4foel4+RPkb6GOljpORj5MeAkX/RF65//Md/NPZO0v/WDYgja46FeVt02Ztut2ubJ8mCuquNdhcnHo9bUAYoaE2JUwWDQWNHKKB0QcoFD7TfqVTKZldwb0gVAB5+B2dC8sDneJ5nQ98kWeErAb1QKFjxI8/o3iuacoIHrVgLhYIVxhLkx+OxMY4cx8KWoCUnUIdCx+JLWuFuNhszPunImrTbbXmedzJQbj6fq1QqKRI5zm7gCPpwOKjT6RhQUpyL8fLMsJWutrfdbqvX6+nzzz+3+4LRIWi7jKLLCJMcBINB1et1ffXVV1oul3r8+LGxFLCaTFb3PM8YTfbf8zwL7ASG8/Nz6+LV7XZt/9DvVyoVs3MYte12q0KhoP3+2J2J/T47O9OXX35pXZ+CwaANFwTICaoU+zKYMRgMmi4eKcloNDJ9cL/fN407vob0geJP9iASidhMizdv3iiTyZiNh0IhY9/43tFoZPIWWE/YI5eJR9KQy+VsjQA4ipzxoXcLSW9vb21NkNzw/c+fP1exWNTvf/97A/nJZKJIJKJOp3NSWxCPx3Vzc3OS+JAk8GepVEpff/21Li8vbS0uLi5Mkw7j/+TJE7VaLd3d3Smfz590X9rtdnr16pXJWWD9PM+zhIgZM/g/DHQ4HLY6jcPhYOw49rLf721WTzabtYA/n8/VarWUSqV0d3dnewHI41N8zruMHD+fy+UsQSIhg+FD1kMSi4+yV+wzLwS8JPgvXN/v8jHSx0gfI32M9DHy48DI8Xj8//nC9YObZuDABAUWnxuCeQIUYOt4S+RYEQciQNNthWAIG4IOlJ9ng3lD5chekjmqG+hxPo6GYXtYRI6keR6MwNWWLhYLe0aYKNhAScbkBYNBCxA8by6X03q9Nk0pQEz3IoI1AZSJ6J7nqdvt2nqwnrvdTqPRyIYCwlhyDMx9IFNAw45OmqC7Wh1b0sLooOvnezBKijEJ7HScwgFwFpxsNpspk8kgRbIi6UwmI0nGfsL0wprBimDkrCuFkLQBlY7B9fnz5yoUCsZ24sjYGoXGFLDG43G1Wi3tdjvVajWNRiP9+te/1s9//nM7koYhIdgTeDzPU7lc1mQy0QQC+/YAACAASURBVPX1tarVqiUbm83G9ODxeFzX19fKZrPabrcmkwDQYRuZSUGBNoEeQLm8vJTneTbTAilNPB63rl8Uw3c6HY1GI1UqFasliEajKhaL+tnPfmYMIGsJ+1mr1fTll19a4seeICvq9Xo6HA7K5/Omre90OtYSOJfLKRaLqdFoWME3Rd4ELthsQDESiVi3K2RGtVpNL1++VLVa1fX1tTKZjJ48eWIzbJAnbTYbTSYTmyVCR6bpdKrlcmkSgEgkos8//1ylUkmdTkfNZlM///nPtdlsjMWiCL5QKCgej2swGEiSKpWKSVkikWN3rWj0ONskEDgWOgNA7DfrR1IGqALO2WzW1mcwGNjpQ6vVUjQa1dOnT3V1daX5fK4f//jH+uqrr3Rzc6MnT55Y8EfSQRLsSoqQf5E40hWKGMjau9IwkjbuHQmPG8ORSRFb/eu7Xz5G+hjpY6SPkT5GfhwY+b7rg7SF5yYSiYQ5Pzpr5AiAAYbKcSi/DxAQ+Dh6RQ/KMSNH8RjNfn+cU8GbJseUvL3CsHBfAB5MHoWTkqy4FSkHBrBarUw7DfChvWamAEe/HPlS4MoRf7Va1Xa7te/zPM901e6RPc/OsbErM3A3HhYT1nI0Guny8tL0ucFg0OZVoBumKxXa2Gw2q3q9rtevXysQCJjUBMZmPB4rHo+bswP2sDmwWbB07hqn02kD+ljsODei0WgokUjo008/VSqV0osXL0xLznE0xg67udlsrK0t3YwIyLFYTOfn57q6utLt7a3pf8vlsskR1uu1MWg4Mce+aLwBnUwmY3pp/mw4HNrMj0ajocvLS9M+53I5JRIJtdtt035PJhMVi0UNh0NjtfL5vNktn80+whS7f55KpVQul9VqtfTixQu9ePFCv/jFL3R2dqbD4aD/+I//UDKZ1I9//GP95je/0S9/+Uv7HAo+m82mpCNoDIdDk/gg2yEYMlMDn53NZorH4/ZZksw2JKnb7SoeP87byefz1ukqGAzq9evXJlshoO/3e7P5brdrvgnITKdTZTIZS7pYIzpLkUhiw0h3sP+HDx9qs9no7u5OpVLJ/BpGm8RvPp+bJOv169f65JNP1Gq1rNtWq9Wy58QmkBulUillMhnlcjnd3t7a38Nov1sz0G63raAYWdNut7PEbT6fK51O2z4jxyExJI6Mx2N1u13l83lLolxmkr0hvuKrrAHfT6c4kiRJdmogyVg/EmpXFsP3eZ53UlfiX9/98jHSx0gfI32M9DHy48DI910fpC08b4cwC5FIxJg3gowbbAiGLsMC87Fer002MZvNbAjhcnkcRDebzYxJQBYBWxMKhUyKQRtR6X52CH+3WCysgJC3azYE5ozEgq4kOADH0jgCml6AEOOTZM92dnamTCajbrer8XhsDAoTzweDgTKZzAlo7HY7e3Y2HscajUbGBrKG+/3eio4pToTtw7g4BodlYG4GjkPxK2/5tVrN9Mbozek2BaASAJfLpR1nl0olK3Su1+sGjJvNRi9fvtTf//3fy/M89ft93d3dmcYYlpJWxOwLs1e2261qtZqi0agNwYzFYnr06JExfoPBQHd3d7ZeAFqlUtF2u1W73T7p4rPZbHR5ealarWZMcqlUMhurVqvq9XonkhD3qDmfz9t6hcNhm8BeLBb19u1bA6RPP/1U0lGWQpEnrY1hLJEtwPrt93tLWl68eKFIJKJPP/1Uv/jFL9Tr9dTpdPSjH/3Ikq3BYKDRaKR8Pq/ZbKabmxvV63VjLpvNpmq1morFok2Yz+fzmkwmarVayufzGo1GGo1GKpfLKhQKxiTCFqVSKQMTEgAC33q9tvWCjYMxR/qDBAf2qFqtSjoCDYlIrVaT53mmMW82myYpIt4cDgdLdmazmd68eaPFYqHPPvvMmPnVamXs1eFwLJwtl8vWNQ1Jw3K5NDshzmB/8/lcvV5PxWLRfA7pwG63swQZuQsSEJgxGGikTDDRvV7PANKVeCGRIBmtVCrWlQo/ku7rRPg8/AuWFKkZBeL8XCwWM9mXy4zDOLqadBJQTmT4Hf/6/pePkT5G+hjpY6SPkX/9GPm+64MNPuY43i1AIyhut1tzPgwGDTb6WN7yU6mUMWRortHGsoAwMq6MgYVyAydBD2DDUWHI0JpzxAjThrGs12uTL0QiEZXLZQMt2sr2ej1jEx88eHASBGezmbEN6XTa2DoCD6wOjpNIJHR2dqZGo6HxeGy6Z3T9bmF0JBI5YZqYGF4sFo2xwfCYY+AWDaJpRr6QSqWsixVMB4wFx/auNAJQ7vf7SiSOE8LZS7TU0WhUjUbDukMhgen3+8YceZ5n7UbpVjSbzfTy5UvT+qbTaWOHYV/RgbM2JCLcG0DNIMrhcGjtfmH/Pv30U93e3urq6krZbNakKeFw2KQekmx9KJrGLrF3GDOSF4II3atIWAgyjUbDuvJwxA2jGgwei8npLISMpVqtaj6fazQaKZfLWQ1BOBzW1dWVMpmMDoeDsZokRAy1TKfTJlXCttFiE1xIuEiUYJmy2awKhYKGw6H57Xq9tkJdunoBTCRp+OdgMNByudRsNlMgELACZwLo7e2t2TlyJhKldDqtFy9eaDAY6O/+7u80m83MzjudjqLRqOLxuJ49e2Z1CARpfJWAyho3Gg1LvhaLhUmCSAQAgGAwaNIgCoVDoWNTgW63a/IhJAskUMgiqCVBGhWPx60lMDGCRBeWjL3p9XoG2sQMbAUWH5aSIO8W9RJz3X/wE1fiBqPHM3NvSJdg9pBRcT/+9d0vHyN9jPQx0sdIHyM/Dox8n+z+B79w8eGwSDgUDkLw44iPI0+YLdgBivd4W4Qlcn8X6YB71MemsyCe51nBHeDkzqzgGJWFQnPMhi4WC9OrMvyMtqJ8J8GCFqA4PVpZNpMgg5abwExghDF7+/atPM/T2dmZHfHDjAQCAetOBPtGNxyYw/1+b1Pt2+32SfGyCzIYJUMeMZBoNGrzDMLhsBUoUviLZIHBcq1WS8ViUaPRSIPBQGdnZ1ZsSQA+HA7G4oRCIb148cKKpG9ubvTw4UOdn58rlUrp7du3ymQy2u2Os0EIhJHIcUZNo9FQuVw2dg+gHgwGpteXdFIIC0vZ7XY1nU6tbet6vdbZ2Zl10wqFQmo0GgYMt7e3ury8tC5Ek8nE1gZNNAnKarVSNpvVfD7XYDBQPp9XJBKx43IkMsg12Kv9fm86epwWdu3s7Eyr1cp03IFAQG/fvlW5XNbFxYWurq7MH9Bqc/QuyZg65BC9Xu+krS42gd+S/LC2pVLJbB+/6Ha7NqCRuRgu44uNAyCpVEqNRkPZbNYKy7mveDxuCR+ae1dnTZLDYMhWq2XzTJrNpoFiNHrsfIXsKJfLaTQanQx2BeCxF0B2vz/OjqFzWSh0LNSn4FaSgRQzSvb7Y5eo169fW50JjDMFzm7S47L/sMzB4LFdMGsGOMdiMfONSqWicDisXq+nfr9/MtcnFAqZTIzATvJEzKVGhr3DhmD4ARkAFJDh74i32AV/D8jz5/713S8fI32M9DHSx0gfIz8OjHyfCuSDCPI5CjwcDieD9VxNOZ2YuKHpdHoiqcCxOGaEfSOYYphuW0eX2Xq3oM0NhBwRchxOQSXFq/P5XMvl0oIw7BYaZgoZl8ulBSSMdjweKxaLqVgsajAY6Pr62tipUqlkBbQcfQN6GNR4PLYOM/v9Xu122zYXllOS6WxXq5UFh+FwaAwK8hGYp2w2a0XFdKDiODUcPk7lZnggenS+MxwOW8vVYPA41C+Xy1lgorYgGo3q0aNHphXmH+l4HMweFgoFlUolA6lOp2NFyDzv3d2d/vjHP+o3v/mNvvzySwNsZCivX782oMSG+v2+HXMPBgN1Oh0tl0tbn0ajYQwPtgfLR3B48OCBzQJ58uSJPSdH1pLMXmGgYGRhn7LZrLrdrumc3doCl1lhj93CUQIN9gDISdLd3Z2twfPnzxWNRlWpVJRIJNTpdNRqtXQ4HE504gRpjsgJyjDOv/3tb027TtJEQiHJWhgT9CVZ8A6Hw/Y7sFnIJjimz2Qy5lcAe71et/bN2MdqtTLJALZHvFgul+r3+1YUvNlsrLUvnc9CoZCy2awOh4OGw6HtCwkGdk5Sg4wLJjcYDKpUKpnEis+jyxCMF4Mms9msFouFBeX1em3zWx4+fKhisWgF2+Hw/dwb9OHMOsF+AVYK+avVqtVPADj8Hfey3+8tUSBOcRoRCoUsASVuwOzDhhJj3RcAN04Ta/b7vcVUvh97cut5/Ou7Xz5G+hjpY6SPkT5G/vVj5PteuH5wW/h//ud//gJmCLYJ+QNGwgPAAnFDGBsMWjR6bNkJ+FDsBgPEIgEgvFUTeFh0dKxuMSZHjEgGKFaEDaQ7Ed8PAwggBYNBK9bDYIvFonq9nv28e+RZqVTsvpPJpBXPcizNBhKoaIf69u1bewvn6BLWiKPOfD6vZDJpcy5wmslkYqwdAxxh3dbr4wyEXC5ngAZbxps6xZAECXTPyEu4JwovGZRI8XC/3zdGFrlLLBazvY7FYqrVaur1eioUCiYRmc1mur29tXa9AD5A0uv1jAl7+/atJpOJqtWq1RtIR30zUopSqaRgMKhXr16pWq3acEG3OLjX69l3wWLA1MxmM5VKJQUCAdNrk6zA5mI/HMFjw8wbQR7z8OFD0/26x89Ie/hu7GS9XqvVaqler1t9QrVatWQkHj/OB7m5udHjx4/NhziWR28N2+zKAHK5nBWOAmywPayL53kGbvv9UVdNITEBB3BCboRN81ndblcPHjyQJBtEuN0eZ7DwGdwXjBt+BwATwAKBgHq9nu2PJJOGUDy9XC5VLBa12WzU6XQUCoVMvkDB/n6/t2R0PB7r5ubGOo4Nh0OrNUGaFY/HjRFloKUruUGzHQgEVKlUbO1IEFhfmHsSJuJkv983xpj1pmsaEg4SSGIeiZ57weK5OnUSHb5rvV5rOp2ajXIRZ93EFaCCtUMqAYhGo1ENh0O/Lfz3uHyM9DHSx0gfI32M/Dgwcjab6R/+4R/+cm3heRskyAMiksxw5vP5CRtDJxiOCZEi8MC0rFyv19YyluM83kgJWvw/jEMgEDCjhQkBqPgcAgJs2HK5tOCCM2YyGdPLw5TlcjmNx2OlUinN53MbsohsAucCWAjYsAeAKsWSfE84fJyajuFtt1vrIIXhusxPIBBQJpMxR8GIYSX5udVqpVKppEqlYo6Ko3Hf1AOwvrPZzJwKTTaOzZ6yti6Qo2snOHIET0BCu879ZrNZXV5eWgBfLBYGkNjN69evtVqt9PnnnxtDd3FxYXMX2Lvb21s9fvzYjpTb7bYFT5ye4BOJRGw9WFMKWHO5nDkytomuF5aL+3edEkerVqtmB/l83hgemMv5fH7SOQs2GJ8gWLx580b1et2SlUKhoNvbWz148ED7/V6PHz82CY3bkpc9XK1Wpo3P5/OaTqc6Pz9XsVg0hon9dBM4/s6Vebi1DQwOdf0e26OF62Qy0eXlpTG0xAJm4rCe+CX2iG25TDr31O12rbA4nU6bFInCZfYjl8uZHAgWkRjQbDb18OFDpdNpbTYbvXnzRpVKRbvd/awW1uJwOFgyRUyIRCJWcA9zhoQJ1g0JBBIY1iYYDFqNhisNYV3c9XQ1627djxvbwuGwgQTSM0nmyyQugAPJgRu3+Xn+/W7cxjZgDmEJ/ev7XT5G+hjpY6SPkT5GfhwYye992/VB0JM3TBaem3GP+QjgBFuK9Ha7nRXcukd9kchx2rOrWXWDDCyKy4Kx4Dgli8kxH6wibB2bSWAjSBNMOCbebo+D7DiShtXBURliR8CFmQgGj8Wdq9VxDsmjR49szgbaUTTx3CubSkCSZM8DIANQ2+3WukLxdh0IBKxdrOcdC1/fvn2rBw8emA6WwIORoz+FLeTK5XInmlrWDTnCaDQy599utydT3rm3bDarQCBgz1goFMzJ4vG4sWgwVOFw2O6RvXvy5Inp5hOJhB49eqS7uzuTCwSDx4GJDx480HJ5bCs6HA4toHW7XWNTSVqY8/GuHUwmE2NxaZuLLXH0DYuLvh8ZCnrqcrlsIM46Y3d8JlILkiKCeDQa1SeffKK7uzt1u12lUin1+31lMhldX1+rXC7bsEekOlwkH/gJdR6BwFF3z94Fg0EDmnq9brUUHMWjJ+d+KJongeA7XY0zUgqkSthpKpVSNBrVYDAwzT26evzSZcyJGdFo1DqNSbJZLBStEySRQ8GeE9j5vVgspkwmo0ajYaz7xcWF1YpEIhGVSiXtdjuzZ/aWPcfG6US2WCyUSqUMQPB1koybmxur5eDEgvqNVCplQCkdmUGAOJvNWjyTdAJwJMwuW++ydqwdYMMaYgf4L/EFwHHjJ/fE87MXrmaf+O5f3+/yMdLHSB8jfYz0MfLjxsgf/MLFmy/FuhiRq3XkCDYajdoxniuZIGiwKPTUR7LAXAlXB4sju0eUaH4xBJgT/hzWiU0BKA6HY4EfzBqAxsRpwIS39VKpZAZBMCHQ4mQUPsM6FItFhcNhm9MhycCUlpq5XM6misdiMQ0GA+12O2P3YKJgVjg6Zy4ExhsOH4cF0vEmHA6r3+/r7OzM2Az3HwB+MploOp1aAIW9oO1qLpczw0NbDxsIQ1YoFKzLzXA41OPHjxWJRDQajexnKYamDfCTJ0+sePXzzz/X9fW1ksmkcrmcptOpHj58qPF4rFAoZAMMafXqeZ7q9bpKpZIFl2g0qmq1agWeFKzyHARbikM5UgeAYdOQACA7oV4BRnK73ZoOOxAIWJIBm7RcLjUYDIyxYVgofgPzDBPLdx8OB52dnen6+lqed9RIw/7M53OTZDCTo9PpmJRiNBqp2+0qFotZTcdisTA5hSSzI0Ajk8kY2AMaLjP3rm4Z4GQN8PtsNqvpdHpSnIwUYbfbWTIGW8X6IgUgEOKf+Lakk/sKh8NWKwIAwvzCMNGta7FYWCvaWq1mf05RPc8BULpMmSR7PphxAFaSyTxYN2wM5hTZBD7JAFfpvo01CSWMPl3piFWsUzh87CBH+2c3eXelMfw5iSg/y17u93v7WWKXeyqCjfDZLsuKj/vX97t8jPQx0sdIHyN9jPw4MJLP+rbrg3QpdL8ENoW3a27UNUYMheD77tEzGl7YBowMRos3dL6TwDObzYxJwXgxBhdwZrOZdXJZLBZ2xOgeQ8LA4fTxeNy60CQSCbtP2Ae6PAF4HFmzJjwjjsWGYyQ4AfeD/pk1gZVx1y+bzRqziTwCow6FQvZG/+jRIyvm5b5c5hMn4ngbYAgEjm12YfHco9X9/r5lKMAGk8ge4Hi5XM7WdLlc6uLiwpyWAB8OH7t3lUolC7DMeyFgV6tV7XY7K859/Pixut2uDofjnJbb21tby3q9bowmgAKLAhs3Go2MpYU9BvA2m43y+bw6nc4JuwH7hT1TKAlTBnBms1nTVVM8i6SiVCrZkD6Cged5Nn9mNpup3W7b581mM5u+3mw2VSwWzc5ms5kFZ7p/UZvA39O6eLe77/wFs8z6YfvJZNLYWen+6H65XFonMBds3OJwPmO3OxauU4PA/JL1em2BmCBF8oZPFAoFs0lqUmC+6JbEfpJMcY8kUZvNxrqhAbDpdFr7/d5kTtgm6+V288J/8Ed8gZbFPONisTjpEockoVKpqNlsWpKaSqWMPUOPD4tNkTvAxmexhtR/uFIaEm9siviIHfEzLhPuJkLEJOIIn8Was6b8mSSLi/71/S8fI32M9DHSx0gfIz8OjPyLvnBJMgcDSAg2sFscv7lGxAOhez0cDtZN53A46lX7/b5tEuyZ+zvuRk+nUzu25IFhwlzWC5aEt2V05fwZb+7MgOBNmvkOrp4bcEMXT0AE8PgZlwEolUq6vb215CWRSKhQKJhGmzdsjindDeVtH8Mh2PJ7sVjMGCscEkODvXElJRScxmIxCwoAYDQatTXgGPjdAmQCAJptjtI5gub7AExAPRwOG/NSKBQ0n89Nvw4QYAvIJJj9IUnZbFatVsskKmi8AfbtdmvOw7OwZgRCfg8dOjYVDAYt8GQyGY3HY9Oxw6RQcI7zcSQdCh27+DQaDR0O961MAVwGKbIn7O1+v7cBk+Vy2Zwfv+K+YC7b7bZ1YkKGNBwOFYkcuz3Bfnc6HSWTSXU6HQ2HQysEBjAImuxrMBg0n2K9x+PxSfBC7sAzuBIatz3scDhUOp22zkSe55nch8Jb/HqxWBj7jyyAYIp0CUaW1raTycQA0rWLRCKhZrNpAMxaUHwMA41kBBCnuxbxhvqBYDBorZ2TyaTJNfA3l2WMRI7thEkCSAaxEfacxBdgicfj1vlrt9vZf2OrxA4SWz7H/fP1em1xydWhS/fzQqjBIjbzZ4FAwDpDkRiwptg934st+Nf3u3yM9DHSx0gfI32M/OvHyPe9cH0QfQhO5n7Ruw/Eg8PMSDpxFI7n+DlXowxjRsCnUJe3UTrvwKLAirHgyAjePYKdz+fWmYR/cx/uM8DAdLtdTSYTjcfjk4JenB8QZSAerMR0OtXr169Ng8q8AyQEGANHuGy4qxElWLE26IlHo5Hp3zl25r7QvgJsvJmzLslkUtls1pgtlzVdLBbmXLPZTOv1WuPx2D5TklKplHWDYs1geHAQiqPRYcN4hMNhNZtNW7PxeGyAFI1Gjd2CFYMR9jxPhULBmMtisah0Oq1CoWDthRmoR9Eo9oWdwMIA9jBhMDfj8dgCjOd5VsTM72O/2AtMLrKMwWCgdruteDxuXalWq5WxQ/P53PTn7lG7JCuMrtfrNr9Ckq1BpVLRer1Wu9024KM9L34IqwXoP3/+3ArfCXqu3d/d3anX62k4HJoPdbtd27f1em2gOBgMNBgMNJlMrM0vSUg0GtXt7a3C4bAGg4FevXqlw+FggwyRqJRKJQNs7v9wOFhb6dlsZkNC+V4Y0Gg0qmazqbdv3+rVq1dW5+EmD7SjHg6HyufzOjs7Mzvm1ACfpqg/k8mYj8B6Afx8Bzr4fD6vfD5vXbrYQ2obAD9YYDTqJG+73c5aI0ejUZXLZR0OB3W7XWu7jr279sWeujUDsIr4DYBCnDgcDuZP77LZ7CtJJ8mh2+mOuE489l+4/u8uHyN9jPQx0sdIHyP/+jHyL3rCxRsmgRqnIxjzxs8N8cbPGygPwX+/q1clEAD2y+XSNp6j7Wg0akwfwRhHx8iQITBTBMaJYM4RMWCEgcDIUTiKAaCFdov1XM0nrAMbzZC2dDptXWxwbJ4/nU6bRpz1IqC6R50c249GI9MZ45DNZtMmvtPqdLPZmOY9mUxqOp1akWkweGwzCkAGg8cWoHSxicVi5vh8P+DstiJ+t2AYJyAAwKy+ffvWpC8EONZkOp0aS8D3hcNhY3dZSyaSA5zuMS5MW7lcNrYIBpF7ctmLzeZ+/gLdrpiTw33gyIA7rXZJNkh2YGxgvRqNhrFnFMvu93sbxsl30TIVG4Q5CYfDKhQKarfb2mw2Oj8/P0m8kGVwzI40ab/fazQaqVwuq9/vKxQK2RwZwLbZbFobYXeNUqmUBbVyuWz2ie1TvOt5nrG+u93O5mj0+337vdvbW7VaLT179swAYr1eK5PJKJ/PGyjTHQ0/3O/3Zrcwii4zSltbEgZXysB9dTodBYPHbm/4PH4WjUbV6/Us4OdyOUtssHGYUVrfEteoWcE2iBWuFIQGAel0+n+Tl+CvnERQmN7r9VSr1SyZI2ZNJhNjPLEv9gzJC4wj+0TSTSwKhUKW1L0rC+TesRuXpWZ/2BPivX99v8vHSB8jfYz0MdLHyI8DI993vfeEy/O8/8fzvPz7fma/P3YkYcFoV4mDuw53OBwsKKNlhfUgGPCG6X7m4XCwVq58Jp+PNCEQOOqZCW4AEzpkV8bAIsPk8LYP4+YenWMgbjEpYMKbOsC42WyMIYhEjlPu0fym02m1Wi0Dxn6/b2DBIL/ZbKbZbKZkMmnGNpvNjPUiONMJhsFxu91O7XZbjUZD3W5X3W5X19fXpp3lszudzomWHJkHhY0EsWq1qnK5bC1VeWuvVCoWWAki7Bfg1m631W63bTjgbncseGaIZ6VSseLafD6v29tbYw32+73u7u6siJQOXBStkmRst1trIYyjE6Slez0/LBaFswAdawmwtFottVotTSYT6w602x3npnBsDdN0eXlp0g5+f7lc2hH54XBQuVy2YudkMmmsKNKaSqVimnFAMp1Oq1arWceq/X6vi4sLPX78WOfn56pUKicBjY5E+/3e2FJJFvSLxaIViVOkzZ6TgAEuaMIB6O12azUYfCY2AIvbarV0fX190vJ1vV7r/PzcvrNcLuvt27eaTqfWSrjf76vX66nVakmSDV+NRqMmw8jn81agjy/CTr169UqhUMja8bqDTQeDgTGEJILITtrttrFt2C9MNs/AaQJ1G7CfJDgEd9YDWwfw9vu96vW6AoGAxuOxtb+FdXaLiWHQMpmMdrtjpypYaUnWfcpN1JF8uXGPpI54QILrgg72hw2yPtj5crm0RGu9XpvPSzpJellz/7q/fIz0MdLHSB8jfYz0MRJ/et/13hOuw+Hwr+/9bd33tKc7EW+VkuwYjrdAbp5jRBg1Jkxzwcpw/AnjR/BmkTzPM+Pg7wEtz/OMwXK147ydwhQBIgRTmBWOHl0JBYWJGBKBWtKJVnU+n5vBrdfHOQClUklXV1cqFovW2ejhw4cnOlyONdfrtc2noJ0sbMB0Oj2ZU4DGdLM5Dn4EiDzPM20+WnxYA/TwTFhfr9em9eW/WXdmUpAocNQMaI/HY9tvz/Ms0BJsN5v7YmX2hjUtl8u6ublRr9ezugHYXJzBddR0Oq1er6flcqmbmxvF43E9evTI5qDAwlEUCjjCPhBoXT0vbA8A7TIzsVjM/h7bpRaA74JthkHGucfjsQXup0+fmrNSGNtoNMzmG42GcrmcST/S6bSxTcFg0MBwtVpZcTXf5xZWkwyR+Nbt/QAAIABJREFUmJG0cXyOPGC325m+GzshyWLfh8OhBoOBfbe7JoVCQev12n6mWCyqUqloMBjo4uLC/JUC3bu7O11cXNhwyGj02Gr4D3/4g4rFop49e6ZMJmPPg7/SySkQCNhsH4r48RcAA501oAN4zudzPXz40Nb57du3pjGn1oD4BaMXi8VMakNChQQiEolYu1zWAECkaQBsPnbOiQBJz3K5NB+NRCJ2mgDzhv/g2yTHxAr8ii5Pu93OGE4SAvdEYLVamd279SvENUAKlo9rOp3aOhLfkDH51/HyMdLHSB8jfYz0MdLHSOKbG8vfvT7I4GOOot2gS4Bx3zwxFAIEjup5nh1DslmBQMBmSEQiEXvr5aEwfCQK7uLBRrHI5XLZdJ9ckUjEgls4fD+53ZVLzGYzA6H1em3tQzF0jpQTiYTp1jmWhllhM0ejkarVqiTZ/XBE/uTJE2NS3La+n3zyiW00z00w8zzPtKVID2j7SvDs9XpmJKvVyroT4Tx8P6wOnZkw3Gw2a0fUaL7L5bKk46R09o8AB5uITSARKRQKBsTpdNqOdefzuer1ujqdjh0tsxeS7PnYA9qHRiIRffrpp6b7pV0wrAOSGfZ5OByazMDV4NNVBsY0l8vZPBKY4lQqZVKO1WplhawkH8yFIKEBzN3vm81m+uSTT3R1daVyuWxH4DgqzMh6vdbd3Z3JXQaDgVKplMlXMpmMIpGILi8vzdaxWWQjnudpsVjo9evXFtgBi+l0qnw+r0KhoOFwaDbktktGHuPWMsBsAX749KNHjzSZTE5kFbFYTN1uV9VqVZ7n6W//9m/V6XS02Wws6FIoXqlU1Ov1jGEjMJPI4W8kIjDnsLqe55l/EXN2u2PtSTabtUL0ly9f6uzszFrfArB0PXJlFjQi2O/31goYVhGb4J7x5+FwaEkgiRDACyiRrEwmE4tZ2+1W7XbbupIdDgdNp1Nra0stSygUOplrA1MJk86+UQwP402xNWtDYs+zoafnM/lvmDvWmPhOgudf3+/yMdLHSB8jfYz0MfLjwEhe5r/t+iBdCmG86GSEBtI9UoaBgvFwdZC8pWKwqVTK2Jh3u4FwJI2MgqI2mDc0rbCJ2WzW2DTkB6FQyIyQY3OYHBwIFgpAgt3liFKSKpWKSSFgRXhOCm35u36/b8PjqtWqvv76azuaXK2O7UEbjYZ6vZ4F3FwuZzINtOtuUPW8YxEr8zc8z7Mj2OVyaVph5BLIOSiopggUMHADL8fJlUrFGLh2u61IJKJ6vS7P82yoIFpavpvgEIvFlEqlbJDdb3/7W3322WfGlsFAZrNZ3d3d6cmTJyfdlNhz9t/zPBWLRSsG51gYYIrH41ZsigxkuVxa8MYmAZrpdKp2u62bmxv98pe/tJklsMAwrwRpjpphZUhwYJDRES+XS5vUHgqFNB6Pbd1JBGgjTNAIh8PGCj1//lw3Nzf66U9/qkKhoPF4bHY+n89VLBZtvdGwU5iODIJ1xa7H47EKhYLOz8/V6XTU6/VUqVQkyeQXgDOASHtcGB8X3JPJpMrlsqLRqB4+fKh2u610Oq1IJGKSgn6/r2w2q1wup1arZZ22JpOJsZJIM66urtRut5XL5czX6NRENyV09t1u1zTpweCxMxXzN6LRqA1nZLjkmzdvrNXw73//e+33exWLRQNpbAS9PgkVMSiVShmweZ6nVqul7XarZ8+e2doCpJvNRhcXF+r1emYnMJ+cFAB4sVjMZg553rHrW6fTsecCKCKRiNU/UNPgAkSxWDSfo6kAzBsnFdg/LaSl++J9mMx3E3XAAw27y+z51/e7fIz0MdLHSB8jfYz8uDEy+MUXX/wgIPmf//N/fpHNZk2zzc3glAR8juhg76R7LTrHnAQQgg+BZbPZWICloA8A4Ahcui/oRrOMMS2XSws2LgPBMTfFjHQNApBKpZJ6vZ7pVgmykuz7OQJ+/fq1gR3H6zCBg8FAkuwNP5lM6urqyob1MWjx+vpauVxO2+3WWJV8Pn/CMMLGuVIHgux2uzUmiEJU960cXTqBo9Pp2GRygJLCYOnYOjUYDOrm5kZPnjwxXThH9AyhWyyOA/VCoZAqlYoxXXzXYDBQJpPR2dmZBdJYLGYFl/v93oqgAXlJZrzL5dLWDoYFEIapA7DRepMEkMDQfhVQIFjChsEuxuNxY6BGo5FSqZQNfzwcDtYVSJK1QgWEYEvoWEViEI1Gja3Z7/fK5/MnxZp0z5pOp5pOp6Ztv7q60uPHj/Xo0SM71icoDIdDk73k83lrw5vP57Xb3Q/i5F7xC2Zf3N3dqVKpKBwO2xo+ePDAkjWSQORMMI0Ex1Qqpclkojdv3phEZTab2fDSly9fGqDT1hVmut/vazgcKhaLqVKpKBAIWHEzzB12AEA/ffpUv/vd77TZbFStVtVoNCTJWDfuebvdajKZWE0DcafdbpukQpIxXwRufJ8YwfwcOrkRq7CdWOzYXppWwjxjNptVu93WdDo1aUQ6nbZ7GY/HBtjhcNhkF0iKZrOZFeuToIRCx2Jo2Dm3W1ogENBgMLDYg5Zcui9UR7KRTCYtQQDQiKHIuwCWdyUVyNOCwaB6vd7dF1988b9+EHB8RJePkT5G+hjpY6SPkR8HRo7HY/3qV7/6H9+GBT+4LfzhcDh50yVgccRJUZurBWZT3SC52+3sv3FOz/NM84kRcJROYIVRkGRAxlGiexQIA8S94RCAWzKZVLVaNbaJAEfwpJCUxQZM9vu9bm9v7TNgcjzPM/mCe1xK8ShOt1qtVKvVrEA2Eomc/D2a1FAoZB1vRqORHdFyX51OR7e3t3r+/LlevXplhsYRLgAOG4oEgOJAGLFCoaBCoWBv/ePxWE+fPtV2u7WWsjjBdDrVcDi0gIWDhcNhc0aKKBuNhklpYOdgIQmo3W7XNMqhUEjFYtF0+WiKh8OhDahcLBYaj8fmkLC4SGP4O+yTI2c3eSSQZjIZk8nwd0+ePJHnedaeFgYDVhd7x14Gg4Fms+PAvv1+byBxcXGhm5sbffPNN5rNZnrx4oWi0ajOzs4Ui8WsjSxgmEwm9cknn+ji4kL/+q//qkajYQW+1EdQnEkg3Ww2VpCNzIfiXoIHXYkAzdvbW/sst7gdvyYI7nY7A1GSH5KHhw8f6u7u7iTZ+vrrrw1cZ7OZEomEnj9/rs1mo1arZb7BfWy3W5XLZbNF/JlEo1qtmm38/ve/N4aZpBVWHr+DMUS/PZlMVCgULMGJxWJmI7RlJumDzefkYLfbaTAYqNlsqt/vmx3CnkYiEWWzWdVqNfM3WGWYTuyckwn+Qct+OBzU7/dt/fh54qckDQYDq1XZ7XYGaDCK7BuF6fgUrGK5XLaTFex/tzvODeJz+c535W7EXpex9q/vfvkY6WOkj5E+RvoY+XFg5F+0hku67/rC0ZsLHrAWvIWiY+emeWPE+Hlr5I0UxoUjVj4HDTXfEQgcOxPxlprL5RQOh3V3d2eyCY6QYTVIIJB1sHEUPHK8S/A7HA7K5/NWaIiDAqiwRbB2vBlXq1XT43Kcmc/nFQ6HrasRhYcEs/F4bEPp0OhzpMrGw4xkMhmVSiUtl0szoO12q1qtZsaPXAQ9NvuD/pfgN51OjQGA+Wu321ZAOhgM9PjxYws4tK0NhULGkvI58/ncjr73+2Ono4cPH9qxbCwWs4F/FCuu12tls1kLBDwPXbQAF/Syd3d32mw21jY0Go2arCOfz2u1WhnYwKoC6gSe8/NzjcdjPX/+/IRZ7nQ6JvtgOCDMCQGaGRuwM6lUSg8ePDAdNfrgUqlkgbXZbOrf/u3f9LOf/Uyed1+QiywGEOYzqdPALqfTqcLhsM1lga0ZDAamPw8Ggyc+lUwmrX4gkUjoRz/6kRWJwzh2Oh3tdjvV63VLxlirRCJhQR4GKBqNajAYKBQK6de//rUeP35sdRrVatXW2/OOc2Emk4kxprBz8/lc19fXxtjhj9lsVtfX16avv7q6Ur1et9oQai6kI5MZDoftmYkDaPsTiYTq9bpev36tfD6vwWCgxWJhDCTJoXSUjvz4xz9WoVBQv9+3tQbIYfnv7u6MKd9sNiZ7qFarVt9AB6bdbqdms2mBnXWB5aaexk2uOSUgYSUGEof4N/U9xA7iB7EM/yTJgyXH5wFSt+sSSdJ0OrXkl5/hvvzr+10+RvoY6WOkj5E+Rv71YyQ/823XB5nDRcDgWC4Wi9n0eY7aCNiwJxgDUgekC5IMgHAA9OQc6fEPQRwACYeP8yiYq7BcLm1DeTPt9Xr21g+bRGHgcrm04lWOs91j2P1+f1L0zNs7x4swIBTuuV1t3j2WvLi4MFC7uLjQv//7v2u1WllBM0MsAZD1+n7OCDr1/X5vxdOSVK/X1Wq1rM0qb/XB4Gm3JQb2cdQeCAT09u3bk6nn6OwpcsW4nz59am1zKax1nQB7gNWAOUPWMZ1OVa/X1e/3NRgM5HmezTZJJBJqtVrGMMEARaNRG2IJ24IcIpfLqd1uK5lMajKZmGyCpIUagsFgoNXq2Po0lUrZXI9er6d2u20M3WeffaZgMGiT5T3Ps1ku6PhXq9XJ2tKulO+BxYOJwdmDwaAajYZ+8pOf6O7uzoIhGm2m25MU7Pd7PXjwQMlk0iauS7Li9P3+fp7PZrNRrVZTNBq1QIJWHFa5XC4rm82aXv5wOBj7gzaeI31kF/gHjDs2jN8lEgn1+301m01J0ueff66f/OQnljBkMhl99dVXKpVKNtTwm2++UTgc1qNHjwx0uCcCO3pxWH008rPZTN98843K5bIxmAAmwI8fEleKxaIVx4/HY9VqNdP8c4qw2WxMYjIcDhWNRq0AmORlPB6bnzPUdTKZ2IkCa0XdxrtJNWxqLpfT3d2dFd0DaPiNm3ijLSfmsO6w8nRucllp4iCSGJJ94ibSNr6HWg9+hueGgST2EO/86/tdPkb6GOljpI+RPkZ+HBj5vusHSwo5WvY8z47i3bc8dOD8DBvH2yZ6cz5H0okRu5vHkSALiWGj5YSlIcjBAFGkOZ/PrcCT7xkOh+p0OtYJRZKxJ5vNxtqCZrNZ1et123CCOUwBz7NarexeYPgwhMlkok6no/1+r0qlYg4wmUxMmwvrhVPzxg/T1uv1jCU8Pz+3CfZoqWHheINnQCGdX5BfcIyLhKXRaJiBX11dWYLAETLyCSQKjUbDWBCSCFisfD5vx7MEYuk4p0WSbm5uzHhxBIALRpSBjBRFo89tNBo6HA5mYxw7JxIJnZ+fG5NYKBTkeZ46nY4xp2dnZ2Zr7DWdp3K5nGq1mkKhkL7++msFg0Fls1mdnZ3ps88+U71eN6kKe5pIJGwuyH5/310H3TyF1KvVSs1m0wD4yy+/VCKRUC6XUy6XUzKZVKvV0ng8tn1BonN3d2f6aBIjCmlhv7D5/X5vTBzMUDx+HEBZKpVMbhMKhUyXXalUtF6v9fz5cxUKBZs4T41Du90+Bor/DlaAvCRruxsMBnV5eal2u61/+Zd/sQn3HOfX63X7bqQciURCpVLJfODx48f6m7/5G6XTadXrdXv2aDSq29tbC/4wW26coeifpI6EjTav7XZbf/7zn3U4HGyPaIMLe43cI5PJaDqdWtcotOckidQukHym02kr/id5giWPRqPGtrvyCmITe5DL5UyCQhxBakO8m06nJ3NHJFkyACCgq280GhoOh8YaYvPSfftn7hOgIw5QxM9LABKi97F2/vX+y8dIHyN9jPQx0sdIHyM/2AlXMBg0J4L1cIvveIt1tcncKIvBQ2G4bvEuoEJg4884ygSYDofjXIxOp2PBEqaKAMCCUizKUbw7eHG5XGo2m+nBgwfGGEmy9rY4/Wg0MvaAdeA50Mi6bV5dfTaFrIFAwAoFw+GwyuWyyUGm0+lJEd9yudRgMLDPCwQCmkwmSqfTWi6X1sUI1qLValnBIUxbLBYz9ovjXrdAlb3B+GDw0EDjrNzXfr8/GWrYbDY1HA6PBvbfgX2z2VgQcOsY+v2+ra/neapWq3aED2u63W7VbDZNtjAYDJTNZs1h0DnTqQeZAAH9j3/8o6bTqeLx40BFnItjY4Kly/IiX+G4mmBCUEYeQ2K02WxMUuAWkx4OB3W7XWuN3Ol0DFgKhYJSqZQxne12W51Ox7obkTjc3NyY3VFbQeEzviDJpDEEM9gsSZrNZrq7uzMQ2+12evPmjdknrHUymbQaAHTgJE8AGAwR94OfXl5eqtvtajqdKho9DmlcrY7tnj3Ps3a+l5eXWq1Wmk6nxn4vl0uzn1AoZH5HYwDXJvP5vMUQCvb5ffw+lUpZkH/79q22262q1ar+8Ic/aLPZqFgsnszb2O/3J4nmZDLR+fm5FQTPZjNjRymC3+12KpVK9juw++1222LTfr+3mgsKywE0/JAkajQaWR0A3aQoFEerjnQKho3ECt0980roNMf6kVRvt8chtzw7iR41KzDSyL6IbawDP+9f3/3yMdLHSB8jfYyUfIz8GDDyfdcHqeHiS3BQ3ghZJAyc4BIIBEwOsFqtjFHY7Xb2MOFw2Ipv+W/3LRtAcHXlHLPzpo+js4iuwy2XS1sggkcoFLKZAKHQsZsQDkFgYvFxLrT2gCBvwEgMUqmUMQ2BQEC1Ws2YhlQqpXw+r2Qyqd1uZ//meZF7cPQsySQls9nMDIfp4Mgn0Oh2u10r1sSxKHLkODkUCtkxPEHx8vJSh8OxSBEGUJKxVgAH7AU1CbANBM1o9Dg/g9/nKJsOM7TxJUgRVFxmFmYOGU4+n9ft7a1Go5Gi0ai1a+VYHk0++4tdwPBSd5DNZq3D18XFhUajkUlXarWa3rx5Y8EKFgZGhWNvAhhBz2WSO52OdfDC/nq9nsLhsD799FMLghzPJxIJPX361PYWrXMwGFS/31er1TL2mIJU5r7Qkpk14sh7s9kYaxaJRFSr1SwRoXNQMpm0RKHVahkDBYv89OlTA0UKskm8+v2+fT5FvaVSSf/1X/9l9jEajRSJRDSbzZTL5YytC4WOhfv1et0+CyChwL5arSoej2uxWGgwGFjw5DsLhYLy+bzVtzArhiRnu90aq1wqlYz9kmRsG8Ga76aTEknafn+c64OPE3uQAFFITMJGPKC1LHU7yC+4LxgxZg1xWgFDCfOPf5GQcxLiyhiQs5Agk3CQkGazWVs/Bt2iu8duKIJ2mUFiO/UYoVDIfMq/vt/lY6SPkT5G+hjpY+RfP0by9992fZATLt4q2SSYEP4M5g59JIwJgYO3WAoPcWICExvE273LknEsytC7w+FgWmTYCIIkcgRYQ1eu4Wp9ATAK4tgsdO/ICNiwRCJhszzQWS8WC2sJu9/vTWcNi4YGnTajmUxG2+1xJol0f0TKPaLzhiljMB+OeXl5qdFoZEzeer1Ws9m0YIxWni40sEEEcoIUmmIYJgwZoz8cjp1pYKMCgePwTQAUx6BVLAAN+wGgjUYjnZ2dmePDlNEJCcmK286T+6BYEeaLdqaunIHfoWMRjMd2u1W9XreCW4JmJBJRsVjUZrPRgwcP1Gw2jYWRZCxMoVCwI20CFfdBAML22JtQKKTXr1+btOLt27cnbCmJ1bssIFKLRCJhE+Q3m41evXqlUql0wnZjM9igK7NxWZhKpWIBGOkLzwcTtFgs1G63VavVTGLAz2+3x05c2CU+2u/31W639fjxY52fnyudTms0Gllig89yP8QH7jkajVpxNbNFrq6uVKlUjM3r9/uWZPR6PR0OB9VqNUlSrVazfYnH4xoMBjaPJJFIWIcvpAkwkm/evFE2m1WxWFS/3zdW2WVF8WnABYCEwQfgAoGAhsOh1U8gecE20NMDfvF43GIOCRMsNv/gT8RRZD40X8AfYeJisePcEp4nHA4rHL6fjUMtD0lRPB43aRUxFvmTW4fBiQzf71/f/fIx0sdIHyN9jPQx8uPASPzh264PUsNFIOItj0DNYrrAggPwdzAN7s+9+2D8g76cQEMHGd6+g8GgsW3odnFuSabl5l54A+cIkns7HI6Fh7ztMynd8zzl83nlcjl7FhgqW9DAsei42+2atANd6na71VdffaVWq2XH+64Ug6JRGAKOL5fL49C529tbAxACZiAQ0NXVlX338+fPlU6nbcYBXW24FxfYmVjuHlW7E8rRESONuLy8NEeHoSBAbLdbk3yw7hwLS1I6nVa73T5xTNepAFGcEGYVJgKpBjM9CoWC3rx5o1AoZAEwl8sZe0H7YHevJVnNAsfFTFePRCLGFL148ULPnj3T5eWlyVAAdhIDEhfsHemEW4hMUJrP5yqVSpJk0+sPh2M3MJcVxG8AEzplrddrVatV2/disWjsMfIkEiAYILcWgVoJmF/+PhQKGfMLw0lCw6yYVqtla47siOQmmUzaUTqsNf8dDoctIQyHwycFxiSV4/FY/X7fBnW6x/ee55mNuENCAaBAIGDrAXtIEotP0m6ZTlyu3hvJE/YM8HFPweCxhXWpVDIwdBNiQPxd9gumj//nZ4g13Cf7iu8TC5H+cILBfCJkL9S+AFIk5q5Mq1gsmnSDgn8SaNaLpB4fc++JuILvAWqbzcZiqX9998vHSB8jfYz0MdLHyI8DI3kJ/bbrg7xwsYnIHfhC/g1A4IiwbjgVhs1/EwgJUgROWB6OIWElmLSNk7iTqQOBgKrVqg1LpK8/hlSpVJTJZOR5ni2W5913ZsKo0JYHg0F1Oh3FYsehcblcTtls1qQNSAxgzdhwt/CQ2R3o1jkKpcvOfD43Zgd2jTVhiCSsByB8d3enTCZjPzscDm29cKTNZmN/B1DCCjI0sNfrabc7dpshSCcSCUmyOSHFYlGRSMQKU3Fed4/cAlAYJZhSjoRhFJFuUDuw2+3MoWjDyjPA3nH8fHNzo1KppEajYYCfzWaVTqdtXQlUrEW325XneRakisWirQM2eXNzo0qlosvLS+uuROAZDocmqSFpcgMLGuT9fm8ynFKpZGtar9dVKpWMTUTzHA6HLahjj/F43KQAFIgDQHw3687zI/3i+VzGm5+DmSHgMN2e7+YfEjbpXurEvnIc3+l07OdXq5XK5bL2+71JdmD9VquVsYXITAhqDNmU7ot0SeI2m43+9Kc/qdPp2Pq60ixXhgMzDlgRGNGH48f4OpKM8XhsQZfGBujVSQj57mQyacXtbn0GySxtsd3ACxDCMBLsAQKSOmQ7ADKgSYIFM4utuuDssrlIOJBqEWPd1tokuLCAgCX3x37znNiAf32/y8dIHyN9jPQx0sfIjwMj/3+RFPIgrhxCuj+G5c/ct0dXC8nDusGGQOvq3/kMlzU8HA6aTqc2ZwMJw36/V6FQ0MXFhTqdjrXP5egyFDoWusLMsHmwHGwe+txisajD4dgCk85MkkxLyr2xaQASQQZZRyAQULvdVrVatful4NTt2gSDQytOjt5xaI6GF4uF/vjHPyoej+v8/Fy9Xs82P5FI2JHocDg8cRruHdYBdgOGDR09OuvZbKYnT55YhxZYD+6XI19AtNVqGXNBkMEmCGiwcoAkzkNAmM/narVaSiQSNv8EGUY6nTb9PZIb7A3nJ3kg8MHg4lTMTqEAGUZuPp/r5cuXikaj1uFnt9vZ2kQiEfvORCJxsnYwhSRIPMdoNLJZI7RhJRjBZKZSKfOB6XRqzBN7jS1lMhlLypArwXYD8tgt6xYOhw1EeIb1em3tfdk/ZsO4DBQSFxgm/BE5QiAQsGcB9BOJhM7Ozkw3DqiTSOAzm82xCBu9PW17KQTGRtwj/0wmY/ZJgTfafNrRAiQwo0gPeE46XsFsbTb3HddIVjgZQG5FLQbsFiCfTqdt8Kekk+fj80kQWCfiWjAYtCJyfAn2lXoaTi+ILyRoxE6e0fOO84nm8/kJY8fv8TvIYN79MzdOE1vR6/Ps/vX9Lh8jfYz0MdLHSB8jPw6MfN/1QehKV+YASMDWvHsRSDBSNka6n/Au6cQYCATu2zBv7+738sb+8OFDO2pMp9OKxWJ2RL/b7UwDPRwONRgMtN0eiwSXy6UtKMaDpjSRSJzM2iBIwHBhONFo1IxuuVwqn89Lup+DMJ/PlcvlTONeLpfNWfhMVzMKCFH06epmAUOOOa+vr/XJJ59oOp1ay1lX24o0wz3uxpnS6bQdMcOcMLsDVo0BiwAPTkEwJKiNRiNrubrb3Q/O3O126na7pu0laXD3HDCB5WWdCJ4wR8hhkBGQSMCIMvsBO8tkMsZ8wLTAkri2w97CLHa7XasHwMlJCGBR6LYEiyTJAhfafKQnsGoEYIJHLBbTcDi0+TCdTkez2Uy9Xk/5fF6NRsNYHoIlXaWwfwJdIpEwDTnF19j0dru1GRqBQMAAwk1oeBYSFrr7AHBo6Hn+s7MzYyvX67W1ZGa+CYkHyQNF6YAdyQSsIywYvh2Px3V5eWn7yXBL9PzIo9DyI6EJBI6FwJ7nWfE3CWs4HD6Rb7APAJhbvwDjh1QDhg/7Xa1WNisE1o/kgIu1J/AjbUG2gx0hi5hMJjo7OzNQ4n7xEf7BB5EFSbK1o1Cd5J1khEJ04i8+wn64QAf4Ay7fFtP96/98+RjpY6SPkT5G+hj514+R77s+yAsXgY8vdt8ocRgWAjaO/+fiLRtmCgcHeHiT5K0Zp93tdhYAu92ums2mzs7O7Lic4+rD4WBOxFst2k9XqsE9SvfAgPNjiOi5cSqMhvs7HI5dZnBQmAtYpv1+b/NAYrGYTX1nPgRv4awVBoCMAqYmGo3agDrP86xgmcCRSqWs3SigQSBEeoEEgPapOGgwGFQsFrNZKNls1oJboVA4YQWWy6UVNxI8ATzadVJcTDABGGCG4vG4zaSAiWDNkHmUy2VNp1NzVtgrd50lWcDkGWFCsS8YCmwNBpfgicYZlgigcTv2pNNp+zmYEQI43ZX4XqQIpVLpxE+wBRIYJAX9ft8YuOFwqHQ6bfYCswxzBYABvjwTReFIFUiKqB8goSJ4sPboufkel82mQxhBzAUVgic2Wup3AAAgAElEQVR6+FqtptFopFevXlliQxAm2cLP+F2SOe4XqQcdtbCRRCJhXatcfT4ADdNNAplMJpXP5y2I0pKXBMcN0nwnbB9BlcLy+XxuPsapBdIVbBH/lY7MHf+NBp19hOGnJkQ6AgHsoStlIB4RC7kAOP6M/efkAIYO4GCtYc+5X+piXDaY9eGe/Ov//vIx0sdIHyN9jPQx8uPGyA8mKeRm+DOCCSwcIENg5MFhUSSZw8N2uG+77zJ+OCH/5q252+3q5cuX+ulPf2oGQIvH6XSq8/NzY9E4hoZZcZkc3tbZSDe4wcrc3d0pm83a38OusPHuBoXDx/aSaKO3262ePHmi/X6vwWCgzWZjgwthOVhLWDhXfrHZbIz9W6/XVtBIpxkYqz//+c9m+O6RMY5dKBQMfGAuACcAi7f39Xpt3ZtwPJ4NwyMo8z2wfQRbPsOVfLDX6Gb5XJw2k8kYu1GpVMwJ3ARgPp8rlUrZXvDdFErCkASDQQuy7hR26X4AKWyJ53knjg2YYxuBQMB0xxydA8qApcsiAtLYCI4LKNDFh45SZ2dn6na7ms1mKpVKdm/7/VH/TvcqSdaGmEBG4JNkQHs4HKxVLglQqVRSu902cDwcDicaaHwBMCBw4rskQSQEdLRqNpvG9rIebqLJehD0YaoJ3m7Cw73OZrOTGgKkJUgD0ITjd3TnikSOM1xarZbm87kBlSuxQcJBfCF4chIRDodNtoBsg/VForBYLEzeAAPvFu5yb3y+C+bcL7bKM2OL/AMYwLZh48Qt4hXSE9aS5+Iz3T9zYymgSDwmBvGcnFL413e/fIz0MdLHSB8jfYz8ODDSPUl79/pgc7hwLv6bm2OjXMbE/TsewH0bJkDxRu8+ND+P0QWDxynUHPFnMhnd3t6qXC7r7OxMnU7HAAsnAcwAGoouYQsJlrS1TaVSFqxCoWObzLu7u5NBidwvaxAKHQtdOXaPxWKmu5ZkRaW1Wk3tdlvD4dCOqXO5nILBoLrdrj0vQdntviQdWZxCoWCshHRkCer1uiaTibEdMIoYI2vCEXI4HNZ8PjcZBIZD5x7WPJ1Oq1Qqqd/vG9Ox2+3sed51RI6uPc9TOp22SeE4EsfD3HswGLSADIvjecciX5d5oHVxPp+3ID+dTk2Kwr7xPQREt0YB4N7v9xY4GSqJxnw+nyuTydj9wLS4rXhXq9WJlCOVShmD1u12NZ/P9eDBA7VaLW23x+GCh8PBWirDlMKu8HzBYNA03bFYzLqNVSoV9Xo99ft967AD4wm49Ho9zedz67603W6tDsOVdaB/BwiYvYJ0hCN2GErkN9j4crm0AnDpfognXdLYP+k+iOHnFNUCcgRjpEzEicXiOMDy9vZWmUzGkn58gGGISJIIukiD8C0XqIgBsHaw8Tzjfr8/6TiFLIHfJ9jjQxQcs1Ywjcx0cb/XPaGAMSd+BQIBK/x2pTj8DicHACZrS9KEhCMej6vb7Ro4STqxDxd8id3uhY8AVO8DEf/6P18+RvoY6WOkj5GSj5EfM0b+4BcuNoKNIYDAMBG4WGRYGVgh2BxXP4p2k0DlHgPDlBAk0JtzRPjgwQO1220LuslkUqPRyDao1WpZQSsBjm4/MGUwb2jC0WN7nqeXL1/qT3/6k4rFommJ+axIJGJyA4IDrBdBOhKJGHN4dXWln/70p7q7uzvpKIVhUASL4fH3sH90bEEmsNkch+795Cc/ked5ev78uYG4yyDAnnH0j1ETqAEQANXzPANsgkuz2bSABOPG3uCoBN9sNqtCoWBrPJlM7GcqlYqtEwYLu8jQQOwjGAzaDBXkF9idGwRLpZLS6bSxJ+w3rBWfDwtGETNFpgQDV64C0wggkVy4bMd2e+y8lclk1Ov1FAgEVKlUNBgMFA6HVavVDOBxeBIc1pJuO9Q3IJVg3fr9vj777DNtNhsrtEVvj61sNhtjYGGekCjNZjMtFgvTeMNAep5nAZ3vRipEkoHUwS3ahe2TZImS689owLELbBC/Ho1GCgQCyuVyluzAugNy+DL2MR6PVS6Xre01n+V2F3KLzOPxuG5ubhQOh00v7zLak8lE9XpduVxO4/HYJAokr9RnwArzHMViUaPRSJJMUkSbX/YUQIK9RN/uAhbxDQAhDhEb8WHiDD9HzKAFNQxkOp02cHXtl3t3k49vSwBZZ2yDuCPdA7h/fffLx0gfI32M9DFS8jHyY8DI910fRFIIQLgsHQtCwOcN0D0uhZHjaBvH5LP2+70NzHOLT1k4jv9pIwvzlslkFIvFLOAw7BCGqt1uW4emWCxmzoDTYZQ4SCwW0+PHj9XpdPTq1SubVM+molHGEGDZ3KF8BFI2ttlsqlarmaYe/THHxhwnHw4HC+wwOAQJWohWq1WbFzKZTPTpp5+q1WqZlGCxOA7pQ0KB8Uqy73DZS9hTHJMhlTCEg8HAZB+AvutoLgOYz+eVTCatmw0OkMlk7JicwIuh0+6TZ2I/IpGIbm9vNRqN9OzZMytgJejheLBG7CtgHA6HbSo5TB8TxpG9uEfTSCRwSNfZsL/xeGzT4ZkJwvE8gydJqBKJhH0PGmn8gt9jzwjag8HAkhaCN2B2OByLY9Eak7jAZsKwIe9hT6bTqa0xxddIUNLptO0f97ndbm04qsuc8XPsHe1x8/m84vG4UqmUhsOh1Y/A5FFz4N4nP0PgLZfLJ4DNoEJYPRht2EhsF+kGDOV2u1Uul1O1WjVJkctGAXjuuvPfMHvEK35uNBpps9moVCoZ0+4GeJJK1ot1gj2LRqNmF/wZfk98A6Q5DUBq40pL3FMS/p/1RXJGAkadDv5O8sLvSDoBmXfjtbue/vX9Lh8jfYz0MdLHSB8jPw6M/ItKCgn6HOuxGGwATsjivMv0EezQ1mIc7mfxHSwqgMVxciAQMAdJp9PGDsbjcb169cr+H1aBzyEwwNLANMDYhULHGR6PHz9Wv9/X9fW1UqmUdeNBykGRcSQSseJSgiLguFwurT2mG8yYBcKAP8CNFpho0mECADzYLNY9kUjo7du3NvDt9vZWg8HAinfpkAOTxb3DpgAoFP/hnLAF8/lciUTCfq9QKJwE1Wg0aoMOmRlSq9W02+3UbDYNUMLhsIrFohkvGu7D4WCyBByAz06n05rNZlYom0wmrZibQYSwEOwjNgd44GwApCvrQS8Oa4e+3+0UBDCx5gQs1s7dj/V6bXrzYPA4owYGGLDmHgFpGDSCH4wJBeeLxULlclnhcFhXV1eqVqtqNBpWT0DAms1m2mw2JisAaGjdCuC8evVKz549M/lHr9fTfr+3drckGxQUs6fM0yHBo0MU3c1IskqlklqtlgE92nISPtaAQArDR+EvCRiMfj6fN99xazg4JVgul8aEvbvOnU7HZrwQc2CbD4eDsXPRaNTYbAIviYVrI8yKQfZBzQryBPzHZQFJdABh6b4tLhIZ1o/EhPsHNCORiMlSttv7lsbEUGQX7okKMg5+ljXGlqkrgekjGSSBcu/JlZz413e/fIz0MdLHSB8jfYz0MfKDnHDBPPBlLoC48glu0t1owMIFHx7M3RTeKDm2ZRHC4bB1LeINNZVKKZ1O65tvvjF2jTdUCie5Z9gN7oGN4Tg7l8tpOp3qzZs3yufzCoePRZHoTt1gHIlEVKvVtFqtTBPLRGsmc9Olh9kWo9HI2B66ISEHgaEDPGFA4vG46eoZZjmbzTSZTIwhu76+PjlOh+HL5XJqt9taLpdWBM1aRqNRC/oc8yYSCQNdZA7xeNxA2PM8c3wkCm63oNFopFQqZd17kJKs12vrolOtVk3vznMSgNFvM3iP9Qc4CLSpVMqA1S2sdFkX2F6chkAbi8WUTCYtKdlut8Ya0bKY+gKOsFOplK0HkoRw+Dg5ngCIs0qyQE9Q4Ls5Pqeodb/fW0Ct1+sneniApdfr6ZNPPpF039WHNQUcsXckFIA2xarz+VyvX7+2CfH9fl+bzUbJZNIAhfoLQJ9kCxlMMBg0KUgkcpwpUyqVNBqNFIvFdHZ2pn6/b7IU2KrBYGAJGEf7MIT8XD6fN9Dgubh/dOAkfu6RP2tENzIaAAAe/X7fah0IuNgPn+UWFi+XSw0GA4srgcBxFgpBHaDOZrP2uQA7ci33ZIIkERYbwInH40qn01oulxY7eBZsgn8TP5HwsDfBYNAGZUr6f9s78+ioq/P/vz+zr5nJzGQySQhZCESImBZCIkT8gSAQZXEDrFvP8QC2WtwVteJXbautx6W21T+0at04RywgqEBFlnoUEJQtlE0gewJZJpNZMltm5vdH+lw+CUkgEizL8zonR0nmc+fzuZ97n/e9z32ee0UYCq0qULsnDxz1D3kMO+XIUL8hG0w2jtozc/qwRrJGskayRrJGXhwaSe2qJwZEPSl2m5b4qfPSA9BN00un/6dGT/9OJBKiM5NgUGOmCgROHBRJ4kMzVDLiKSkpUCg6E+tsNht8Ph8UCsVJXgiqQPL+kEeCngcATCYTDh8+LJZvyXMGQHQs8vI1NzcLUWhubobP50Nubq4QPGrUgUAAeXl5cDqd+P777xGJRBAOh8WLJy8neYjI+0QJodRxIpEI9Ho9fD4fnE4nKioq4Ha74ff74fF4RHIshS1Q/VIyLQBhIOgeKOyAlnupo5HRoXhums0DEEuz5BWljkrvnwwxCQDlClBnpTokrwKdtUADCrfbDZfLJWL2ybsCQAhSa2uriMmlU++DwaDw8kWjUbFrkbwz0XbEciMCQGxxS4aYhCiRSIhdrlpaWuByuUT7MZlMcLlciEaj4j0oFApx8KbFYoFKpRLGlNoReWapU1OoAXmD2trahIAHAgEMGTKkizGk9kXvUq1Wi3umPub3+6HRaMSAiNo6eY8zMzOFR4z6gtfrRSLReQaIfECYlJQkRMnpdIqEb6vVKsIfqqqqkJ+fL0TT6XQCQBfPJp16r9VqhWeKzoiRx2lTTL1arRbeMjKgFJrU0dEhknB9Ph8sFgsMBoNo4+3t7WhuboZWq4XX60VycjI8Ho+I26ZwEaoXGlxSfUiSJHJBaCWC6hHoPBOE+isJlzwcjPpS91APACJmnsKfKKxEpVKJXBHqK/Ss8nySeDwuwjbUarXw1NIzUPsjz6lSeWJr22g0KgaO1PfIFlP/lSRJDASYHwdrJGskayRrJGvkxa2RAzLhophLmmFSJXef8SUSCdGY1Gq1qDTyFpDXhZbvKBGRZpNkBKjTSNKJU6lVqhMnSwOdy4R0PgR1UAqVoPsj40HfTTsxUcMkjxHtKEOeGgq/oKVDMjL0zGazGXa7XXhSjEYjMjMzcfToUTgcDvH/lPio0+lw7NgxEYsaj3cmClK8r8ViEfXkdDrh8/nECx80aBAUis4DGh0Oh+gU1AG02s6zMpqbm4VBo7hvhUIhPH5yDya9x2g0CpvNJpbFyUPX1NQkEippMEBeABKPSKTzvBESAPJWULkUCmAwGIQH0mg0ijZB75TOsKBwGLo3Ohyzo6NDlEPCSQJF3hyFQiG8lXTuRFtbm+h8tPRMuQjUXmgnJhpcRKNRJCcnCyOWkZEhRIt+qF3FYjFYLBa0tbUJzxvFiVPHJoNPXmgAovMCEInAlEBOBoPaHG25SsJNBpIMDuVskDecjBZ5pqk/tba2oqmpCZdddhmam5sRjUaRkZGB6upqWCwWEaoQCoXEO6A8B/L6ktElg2+z2VBRUQGr1Qq3241AIACNpvMQQ/KCyQeRclE5fvy4GBBQX1Or1aitrRVtgbzctApgMpnEM1GeBu1WRUJECd/UTuPxuMhlIVGlwQoZbhIE+i55fgUNqmiQ1dLSImwT1Q3ZIhooUDgF9VMKuaEwJ/obCUUkEoHX6xV2guqI7CzZL7LDNCAjWyu3w/Q5amfU9smO0bsg+0P3CpzI8aD2x/QP1kjWSNZI1kjWyAtfI/tiQEIKyWMnb6B0cxTbCEBUEH2WvAUELbFT449Go6LjAScOX6TOQEazpaUFKpVKCJBKpRJx6pRAS4f7kThR3LB8ydLj8QgDRkbFZrOhvb1dbJUaDoe7xI1T47FYLCKGValUwmazCY+dSqVCamoq6uvr4XK5kJqaiv3793cJy6CGodPpkJKSgmg0Ks4eoTCJvLw8sYRN3gu3242cnByEQid2fqK60Ov1sFgsOHbsGBKJBNLT04XXRKFQCMMqSZ07UdG9U7y4wWAQHYZi2VtaWrokN9LSNTU0tVot3ht5dGKxzkRNipPtHo+clJQkDCHVAbUHWmamEIRIJCK8CSTUFHoijweWx6VTrDF5TCiuWf5vOjciEAgIL408fl/u0aVEX4VCIfIYkpKSRNsgAwEAVqsVQKc38NChQ0J8KHGWvGDyMz7IG63T6dDS0gK1Wg2r1SoEgd47eQ1pWZ7O0klKShJL9vL3ScbVYDCgoaFBtGES3La2NjgcDjidTpHorFKpkJmZKXYxk+cQKBQKMdghg9vW1oa2tjZkZWWJsAB6ZrIDFH9Pu3mRcHu9XpF7YLPZkJKSIkKqyLOm1WrR2NgIlUoFj8cjPMLkUaSQCgqHOX78ONLS0sQAgHIL5MnbNDgim0Mec3kb9Pv9ov1Se6QBH7UPh8MhhIE8+z6fTwxE9Xq9WNWgQQF5CElcyZ6QV44GfWRH2traxOAXOLH1MH2evI8U4kL9jtqKPPSM7DfZamrrZHNogEliQ8LK9A/WSNZI1kjWSNbIi0Mj+2JAtoWnLVfpwehmqUNSR6SZJQARixyPx8V1dC1w4mRteaIkzeLJ00ENJisrCwqFApWVlWLpWafTiZAIiiMmDxsA4XEAIGJT6SXSzDwWiyElJUV4A+hz4XBYVLT8hZtMJhFTTmEBdF4G0OllCQaDOHz4sNh+NSsrC1VVVdDr9cjJyekSv0uCplKpkJGRAYvFgsOHD4v4XfK21dfXw+l0wmq1iu1PqSM2NjZCkiQ4nU6o1eouyYDUMVpbW8USss1mE/VPUCOnpe9wOAybzSaMqVarRXJysoidpY5KoQEU8jB48GCxUxA13GAwKHZgisfjSElJEcvL1Eno/dOggmLUvV6v8GYcO3YMdrtd7K4VDAZF7L/ZbBbL5JFIRLxzGhyQx5K8gSTYtFQvT7z0er3wer1wOp3i0EK5MFFCMD0ftZdoNIrBgweLLVJDoRC8Xi8MBgPMZrMQYUmSxG5HiUQCKSkpoj/5fD7U19cLbyglR1McfyAQEN4/Ct9QKDq33VUqlaiurkYikUBNTY0wJnq9Hunp6aipqREGk06dlyQJHo9H9KWmpiYRilBbWyvip9vb20VcOoWB0M5jHo8HFosFPp9PbEFLHl9qh+Slb21tRUdHh9i+mNqHXq9HY2Oj8NqSZ93hcAhhJuNOce40SLRYLKipqRGeVjLmFG5D7ZTivEn45J5mCu0hD25dXZ0YtFD+Au2YRZ5Eqn8K5aFEbXp2EhOyMzSoJJGlnBMAXdoS2RoSIbJn1EZISCm3hHbqorwcugcK/SK7RIJFXnUScOr/ZCuoDzKnD2skayRrJGska+TFoZF9TbrOeMJFs1v6IWi2TTP01tZWsWwpv0lakpP/P71gmk2TR41ihcnwK5VKcYZFcnIykpOTEY/HReKlVqsVW8rSC2xsbBQdm66nOFwSm7q6OlHZHo8H+fn5kCQJDQ0NwhOk1+vFUq7X60VVVRVcLheGDRsmGjh5u8iYyQ2ZXq/HsWPHoNfrYbfbxZIo7aJEXri2tjYYDAZkZWWhra0NjY2NSCQS4uyK9vZ2HDx4EMOGDYPFYkFDQ4O4d1r6Bjq9R2lpaejo6BBbiSoUnbsMUdyz3+8Xz0j3Sp4KanAUvtDQ0CCEnjxfVI/kNbDb7SI3AAAOHz4sOgqJnlLZucVmKBSCyWQSMd0AhIB5vd4uxpFEhQYxra2tUCg6z0OhgwLJU0OeYOpsJAz0eyovFouJbXzpe8i4qVQqYeBI5KiTUhw/tWEK/aBYYAorUCqV4qBGADAYDMIwUOyyWq0WOxlJkiS8mJSXQAYiHA4LrymFBVB71+l0OH78uLi2tbUVDodDPGsikRBhF3q9HvX19aINUHgNxfFTjgC14YaGBhFCQ57XWCwGs9ksPMF0H8eOHRPecvKgezwekZROniT6O3npSXQaGhpE3oVer4fH4xGhNElJSSKngjzc1JaDwaAQHmp7arUadXV1YvADdBpoaiPUnymBHYBow5RwnZycjGAwKN4viRaFWSQSCdFGyMZ5PB5hpyjRmtoyhbhQyAuJm0KhEO+aBIXyVuS5PzRgoIEMiZ3P5wMAEYNOA5NAIACz2SxCxshG0nXkgVQoFKJOKGyI7o9DCn8crJGskayRrJGskReHRsojGLoj9fXH0yE7OzvxxBNPdBb236U18iCQ5448LzTzk98UiQd58eQVSzHGJDLyiqQXRgJmMBi6xEwrFJ2JmGRoyPPmdrtFeAAtKZMQJhIJEVtNzyBJktgNhmKaCTKufr8fgUBALPvTC6FnIO+Mx+MRgkcNnYy02+2GyWQSRoq8QuTNSEtLg9/vF0u7tD0neZRMJhPS09NRX1+PoUOHio5Py7YAxJaplCRLIQe07EzCQXVODZoMJYW/UFw/GQ7y0pIHhIwWlUkxzPRsZOhoubp7GATVLV1Lngt5oiQZEI1GI5an6VnJq0vX0RK0RqMRQkvvl9ohefLIwKnV6i5bqOr1etFWSGjJIJBXg+pHqVSKTksGlepCkk4c1kceZOoPFA5Ev6O2QQaHciMonp08cPLwDwovIANEhguA8DqTF5d2r5K/U6pbMn506CKdbUFtigZ1ZMRpYEZhMvK+Te+MPPYUzkH1S6EU5HGTx49TLgudkUI5BpT8T6FXJBrkvacBB3nF5B532smLnofaJAk+1avcTun1euF9p0EetUcaPNA9yEMY5PkH9Fm5kJJHkN4n9X2yM9THqH+R9xFAl++lZ6UyyO7KQ55oNYAG6eRFl7d5sgHUH6kN0juNxWKYP3/+94lEoqi/WnGxwhrJGskayRrJGnlxaOQzzzyDysrKHpe5znjCVVRUlNi+ffuJAqUTyb/9Qd6xGYZhmHMThULBE65+wBrJMAxzcTBmzBh89913PU64BmTTDJopyz1wFH7QPZ5R/m/yfMiT4+QzxTOlvwJFn6UZbvd77qms7n8bqHvvXmZfz9K9ns9nYe5ef92fYyDr92zSU9s/298nR94G5G20p/rt7T7lZVJ5A/1c8nJ7+ltf9Nbn+tMXT7efdK/D7td1/84f2wcH0o709r5Pp/yzYc8uVlgjWSMHEtbIH/99clgjWSPPhkb29UxnPOGi0AQAaGtrw549e5BIdO7ek5OTA6vViqamJlRUVCAjIwOZmZldQg7cbjf2798PAMjLy0NaWlqvD3o6L6j7Z3oSs97KoKVCAF0OxjudCqUQgf7Q3wbTn8+fL0b3VJyvz/FT33dP39fXQK6v33X/m3yQRQZKfh0t0ctDjHqCru+tHPnn5IPKvvpfT4JJ/+2p/J4MLIWxyH9OF/o++WYS3UNxiL7KldcLfbb7ILL73051T92ftXu99/dZmR8Ha2QnrJFnh/P1OVgjT4Y18vzXyL4+PyDncNHN79+/H7fddhuAzhjUsWPHYsGCBThw4ACef/55zJs3D3fddZdI5PR4PHj22Wfx+eefQ6lUYvjw4Xj55ZcxdOhQESPa2wxUXin0bzLo8n8DEGV1r9juNDc3Y+PGjdBqtZg4caLYJrenZ+3pHuS/7+7FlCO/5nw1lszFAbVd+fatJpOpS3K22+1GNNp5/oo896CncqLRKPx+P8LhMEwmk9gVibaHpusoubm3/kG7ZtGOWZTXQAcjUhIvxaQDnXaAzjKhAS1tLR2Px7scBHm6dSNJnfHlzc3NiMVi4gwf+Xd2dHSImHWqm57sD50FJEkS7Ha7SNSm76JduSguvrdBM91XMBjs8jx0n0qlEna7vcsW4szZhTWSNZK5MGGN7LtuWCNPMCATLiIWi6GlpQWjRo1CcXExli9fjkQigUsuuQQ+n08khFJYxbJly7BkyRLcdNNNyMvLw+OPP47hw4fjhRdeQDweh9/vF2csOJ1OkUDZ1tYGv98Pq9UqDhmMxWJITU2FQqFAXV0dJElCRkYGwuGw2IEpNTVVJAc2NTXBYDCIhEC1Wo2dO3fixRdfhMlkgs1mw6hRo8TBenISiQRaW1vFCe6pqamiUdHJ4DabDTqdTuyCJEmSONvA4XCIMz1YUJhzFbkA7NmzB6tXr0ZtbS3y8/MxZ84cuFwubN68GZ9//jkCgQBGjhyJOXPmIDk5uUu7pnLC4TB27tyJNWvWoLGxEYWFhZg5cyZaW1uxbt06kUQfiURQVFSEsrIycS9y4xmLxXDw4EFs3LhRfO+4ceMgSRI2bNiAr7/+Gu3t7Zg8eTKmTZsmzgxpbGzEunXrMHPmTKSkpCAYDGLDhg1Ys2YNFAoFiouLMWvWLJhMJvGdp6qbSCSCTZs24ZNPPkEsFkNhYSFuueUWWCwWRCIR1NXV4dChQ8jLy8OQIUN6Lc/n82HlypX46quvAACTJk3C9ddfLw5ldLvdWLNmDQwGA6699loh2t3vh7bCPXToEPbt24e5c+eK+/z000+xdu1aGI1G3HTTTRg3bhzvOvgTwxrJGslcOLBGskb2hwGdcNHONnl5eXjggQfg8XhQUVEBh8PRZRatUCgQDoexbt06KJVKLFq0CE6nEx9//DEOHTqEeDyOhoYGvPfee9i7dy9MJhPKysowZcoUHDhwACtXrkRlZSUuvfRSzJ49G+vXr0d5eTkeeughaLVaPPfcc8jLy8P8+fOxfPlyfP3114jFYigtLcWNN96IxsZGvPzyy0hJSYHf70dqaipSUlLwxRdfoKqqClqtFi+++CIeeeQRjB07tsuBfgBw5MgRvPfee/jhhx+QlpaG6667DmPGjMHOnTuxbNkyNDc34/LLL8eoUaOwdu1aVFVVQZI6d3JqaF7AP9kAABNwSURBVGjA1VdfjVtvvVXsLMUw5yqJRAKVlZX429/+hiNHjiAzMxNvvfUWzGYzJk+ejMWLF0Oj0WD48OF48skn0d7ejgcffLDLzkBApxfryJEjeOWVV1BfX4/09HS8/fbbMBgMyM3NRUNDAwKBAOrq6rB27Vrcf//9KCsrE/krci9WQ0MDnnvuOZSXl8PpdOLTTz/FAw88AKVSiRdffFHsVvb8888jKysLw4YNw8aNG7FmzRqsXr0aEydOhMPhwK5du/D4448jLS0NDocDr7zyCiwWC6ZPn37KQR556Pfu3Yv7778f6enpyMnJwR//+EeYzWZcf/312Lp1Kz777DMcPHgQDz/8MHJzcwF0bsd84MABOBwOZGRkQKVSYfXq1Xj00Udx9dVXIxAIYNGiRcjKysLYsWMRDoexadMmLF68GKWlpbjqqqug1+vhdrtRUVGB1NRUpKWlQaFQoL6+Hlu3bsXf//53KBQKzJkzB5IkYceOHXjqqacwevRo7Nu3DzU1NRg+fDgcDgcPaH9CWCNZI5kLC9bI3uuFNbIrAzrhotCAjo4OJCUlYdiwYSgvL0dzc/NJMZOBQAC1tbViq0UAuPfee2GxWBCNRvHaa6/hzTffxIQJE7Bjxw5UV1fDbDbj448/xrfffovMzEy8//77YivUFStWYPz48dDr9fjwww/x5z//GV9++SUee+wxWCwWSJKEL7/8EhkZGXA4HFi6dCnUajVGjBiBCRMmiK07SRDl8enyuNBwOIwXXngBn332GSZMmIBvvvkGNTU1ePjhh/Hqq69i3759yM7OxjvvvIO9e/fiu+++E95G2uqyvLwcZWVlSE1NHcjqZ5gBQ95fy8vLceDAAcybNw/XXXcdfv3rX+Orr75CTk4Ojh49it///ve45ZZbsHHjRnzwwQd48MEHTzJO5AE8ePAgHnroIVxxxRVYtGgRvv32W0yaNAm/+93voFar8dZbb2HLli246qqreo2fbmxsxI4dO3DbbbehuLgY//d//4evvvpKbK38+OOPIx6P44477sA333yD7Oxs1NTUYNeuXQiFQmI72E8++QSBQACvvvoqYrEYFixYgPXr12Pq1KldDvLsCVqB+Pjjj3H8+HF89NFHyMzMxH/+8x8sX74cU6dORX19PX744Qd4PJ4uoQlNTU14++23MW7cONxwww2QJAnvvPMOnE4nXn/9dTQ2NmLs2LFYtWoVLr/8ctTU1ODdd99Fc3NzF69dRUUF3nnnHUyZMgVTp06FRqOB3+9HZWUl9u7di8LCQvHZZcuWwWQy4f7778cXX3whVjSYnxbWSNZI5sKANZI1sr8oTv2RH1Hof+NWw+FwF7GQJ/nRAWIAREVv2rQJW7ZsQSgUwpIlS1BYWIiXXnoJDz/8MIYNG4a9e/di165dmDJlCp599lnk5uZi7dq1GDp0KAYPHoxVq1bho48+QkpKCqZPn45///vfcLvdSEtLg9PphCRJ2LlzJxQKBYxGIyZPnowXXngB8+fPx5w5c3D33XcjJycH+fn5eOqpp1BUVNTFw0YiuHbtWpSWluKll17Cr371K6SlpWHbtm345ptvMGnSJPzhD3/AkCFDUF5ejkAggGnTpqGgoACXXHIJMjMzxYnl7LljznUSiQTcbjcUCgXS0tJgtVoxYsQIVFRUiMNRN2/ejCVLliAYDKKsrAzRaBThcLjLTzAYRFNTE7RaLbKzs+F0OpGdnY3Kyko0NzeLM0jWrl2LjIwM/PznP0c4HEYkEulSTiQSgcvlwiOPPILp06eL2G+DwQC3242MjAwkJycjNzcX6enp2L59O9RqNebNm4fx48eLs2nIm0ifpeeqrq5GS0uLePZTcfDgQdjtdmRnZ0Or1aK0tBSVlZVob2/HrFmzMGXKlJNyVJKTk1FWVoaRI0eKc2oqKiowYsQIEX41cuRIbN26FX6/HytWrMC+ffuQlZUlwrsSiQTS0tJQVlaG/Px84eHMy8vDwoULMWjQIHFOSCQSwebNmxEMBvH6669jy5YtSE9PFwegMj89rJGskcyFAWtk37BGnmBAV7iATsEIhULYuXMntm7dCpvNBpfLBYVCAZ/Ph/r6emg0Guh0OgwdOhT79u3DDz/8gKSkJLz77ru45pprxKF81Hg0Gg1aWlqQkZEhKo0OYgwGg3A6nSgqKsKKFSvg8/lw4403wmazicMMr7rqKmg0GvzjH/9Aa2srEokEjEYjxo0bhzFjxoilT6PRCJVKJRp2bW0tUlNTcejQIVgsFgwdOlQ8J3UYt9uNmpoa0SjoYDqaqSsUCthsNnHwnPxQQYY51+nuQVOpVHC5XOJw1OTkZOzatQs+nw9erxdjx47FsmXLsGfPHnGIICWxHjt2TJzGbjAYYLPZ0NbWhkAgAKVSiZqaGuzZswfTp0+HRqPBX//6V3GiPGE0GjF37lzcfvvt2LdvH95//304HA5MnDgRH374odhNyWKxwGazoba2VhwOSSfLd3+uRCIBrVYLu92OI0eOwOfzweVynbJeAIh7I2/eoEGDRO5Kampql1APusZqtWLGjBmiLBJEQq/Xw+VyYfv27di2bRtWrlyJm2++GVVVVcJDCQBpaWlIT08X11GeDdkiefm1tbVQqVQYPXo0du/ejX/+85+YNm0aCgsLOaTwJ4Y1kjWSuXBgjey9XgDWSDkDvmlGJBLBtm3bUFdXB4/Hg3nz5kGhUCAQCGDVqlUoLy8H0Hk42HXXXYevv/4aixcvhl6vh8lkwi9/+UtoNBrceuuteOONN3DffffB4/HAYrFg1KhRqKiowJo1a7Br1y7U1tbihhtuwODBgzF58mQsXboUwWAQ11xzDRQKBSZOnIhly5Zh8+bNMBgMCAQCKCoqQiKRQDgcPsmgu1wuZGdn49tvv8XixYtRXFyMGTNm4Mknn0Rubi5ee+01GI1GlJWVYcWKFbj33ntx+PBhZGdnY/LkyaiqqsKGDRtw6NAhtLa2orS0FNu3b0ckEkEoFBJejHA4LBo1w5zrdDdM9fX1SEtLw+7du1FRUYGnnnoKOTk5qKiowLvvvos777wTLpdLJLMCgFqthtvtBnDCC97S0gKbzSaS47du3Qqfz4dJkyZBrVbD4XDAaDSeZGiVSiUOHDiAP/3pT2hpacGDDz6I/Px8ISQA0NraipaWFmRlZYmdj+TPQRsTkKiEQiE0NTXBbrcLr9bpGFgqg4SqqqoKDocDDoej12s6OjoQCoWgUqmEMMh3c6NQsry8PCxduhQVFRWYMGECqqurEY1GsWnTJlx55ZXQ6XSIRCJCQHpDpVLBZDKhoKAAd955JzZt2oT169dj586dQkyYnwbWSNZI5sKDNbJ3WCNl3zMgpfwXm80mdjxJT0/Hz372M0ybNg27du3CxIkTRYhAPB5HLBZDSUkJnnvuOaxcuRJutxvPPPMMJk+eDKVSid/85jewWCzYvXs3CgoKMH36dIwZMwY2mw0pKSmorq7GjBkzMH36dNjtdowdOxazZ89GNBrFFVdcAUmSMHXqVLz00ktYv349otEoHnvsMUyZMgUejwe33347CgoKusziHQ4H7rrrLtjtdhw/fhzDhw+H2WxGQUEBMjMzkUgkoNFosGjRImRmZmLv3r2YOnUqZs+ejdGjR8Nut2PVqlWorq7G7NmzUVxcjOzsbAwZMgQulwsdHR3w+/1obm4Wu7wwzLmMJElIT0+HSqXCsWPH4Pf7sX//fgwaNAiBQACtra3IyclBcXExtFot9uzZg4kTJ2L8+PFdyonH4/jyyy+xYcMGHD16FIMHD0Z1dTUyMzNhs9kQDAaxfv16pKamYsSIEdDpdJg7d26P20V7vV689tpr8Pl8eOKJJ1BSUoJ4PI7MzExs27YNLS0taGlpQUNDA2bOnAm1Wi086eRNUyqVGD16NHbs2IGjR48iFoth//79KC4uht1uP+36KSoqwvr161FZWYnBgwdjy5YtyM7ORmpqqkiKlm9/LUkS6urq8Je//AXFxcWYOXMmdDodRo4ciT179iAUCqG5uRnl5eW4++67YTabUVRUhOrqajQ0NECSJBw4cAAlJSU4cuQIlixZgokTJwoBpjqi7yWPXlFREXbv3o1oNIpQKAQAYutxXt366WCNZI1kLixYI/uGNfIEAzLhopsZOnQoXnnlFSiVSuGN0+v1GDt2LPLz8xGLxcTWlQaDAQ6HAzNnzkRJSQkikQhSU1OFkc3IyMA999wjtpW12WxQKpUoLCxEdnY2gsEgLBYLTCYTJEmCzWbDokWLAAB2ux2SJMFsNuOmm27ChAkTEIvFYLPZYDQaYTAYcN999510noBCoUBJSQny8vIQiURgt9uh0Wjw6KOPigRhSZKQm5uLhQsXwuPxQKfTweFwQKlUYtSoURgyZAja29thsVig1+uRlZUFjUaDjo4O4THo6OgQDZYHO8y5iHyL2csuuwwFBQV44403sHr1auzevRu//e1vMXToUHz00Ud49NFHUVBQgN27d2Pu3LnQ6XQnhQAkEgkUFhaioKAAr7/+OpYvX466ujpce+21sNvtOHDgAL7//nuMHz8eKSkpwkbIxYT6zoYNG/DJJ58gPz8f69atw7/+9S+UlpZiwoQJ2Lx5M55++ml4vV7o9XoxsEwkEggGgwgEAgA6wxxmzZqFpUuXYt68eXC5XAiFQpgyZYrIl+krpIn67ezZs/HBBx9g/vz5GDZsGKqrq7Fw4UKo1Wp0dHSIe5Yf+kgx+g6HQ+TszJ8/H7fddhvuuOMOhMNh6HQ6zJgxAzk5Obj++uvR0NCA1tZWSJKEG264AVarFR6PB9nZ2UhOTj7pIMlAICBya+ia1atX484778Tx48eRk5ODkpKSLs/CnD1YI1kjmQsL1kjWyP6ifPrpp8+ogDfeeOPpBQsWQJIkqFQqWCwWJCUlwWg0ilmzRqOBxWKB1WoVP2azWcwwzWYzrFar2BaXfrRarSiLXqxCoYBOp4PZbD7ppHv5YXOESqWC0WiExWIR5atUKuh0OqjV6pNO/qY49aSkJHH4m8FgELszEVqtFmazGUajUcSo0s5NdG9KpRJarVbE42u1Wuj1ehgMBt7uljkvkCQJRqMR2dnZkKTOc3SmT5+OmTNnIi8vD5dccgnC4TBCoRCuvfZaLFy4UByG2v3HZDIhNzdXxHLPmjUL11xzDZKTk+F2u+H3+zF79mzk5OR06Zfy/hmPx1FRUQGlUgmXy4VIJIJIJIL09HSUlpYiKytLnD80b948XH755VCpVFAqlfB4PLBarZg2bZqwCcOHD4fH40FSUhJuvvlmsftS97j83rBarSgsLERTUxOUSiV+8Ytf4MYbbxSeNL/fD6PRiJKSEqSkpECSJBgMBgwfPhzZ/00iTiQSGDRoEPLy8lBfX4+kpCTcc889uPLKK2EwGGC1WqHT6VBbW4v09HRcffXV0Ov1MJvNGDFiBAYNGiTCJeiejx8/jksvvRSlpaUAgMzMTGRnZ6OxsRG5ubm45557MGLEiC7283R55plnGp5++uk3Tr8VXdywRrJGMhcurJF9c7Fp5JtvvokFCxY802NbOdPYxKKiosT27dsB9LxjiXxGebr0dk1fv+/+/adbRvdKPNW99vfeTlUWw5zLyA8LDAaDiEQi0Ol04oT7WCyGQCAgElUNBkOPBorKoST+jo6OLuV0dHQgEAjAYDAIQywvQ963gsGgOFGeoLKo/Hg8DpPJBJVKJQaB7e3tCIVCsFqtIpQgFovB5/MJb6F8gHq6dROPx+H1esX22/RMAMSmBnq9XsTJ0zXdBSsajcLn8wkBJ1Gj+vf5fACApKQk8UzycuR15fV6xQoGEQ6HRfI1DbpPVzTlKBSK7xOJRFG/LrqIYY3s+/enKothzmVYI09dNxeTRo4ZMwbfffddjxcM6ISLYZgLD7nRlO8sRn+TJ+LKt7XuqRz5T/fP9mQYT1UGIQ8XoCTk7saS7r/7IZHdv7c/xpXuo7c6oL91/50cuu+e6kZ+DV0nr/vu5RAUmtZd0Hsrvz/whKt/sEYyzIUNa2TfdXMxaWRfE64B3xaeYZgLCzJ23eO1yRDJt6Q9VTny/56q/L7K6ckInup+5NfRd0qS1OXzP2YCcqoyequ3nn4n/1tP9XSqcnr6Tvmz9lU+wzAM039YI099P6yRPOFiGOY06M3w9NcgnWk5p/pcX3/vjzE+Xc7kfvrz2TOtZ55cMQzDnD1YIwf+fvrz2fNBI/lkQYZhGIZhGIZhmLPEGedwSZLUBKBqYG6HYRiGOcfJSiQSKf/rmzhfYI1kGIa5aOhVH894wsUwDMMwDMMwDMP0DIcUMgzDMAzDMAzDnCV4wsUwDMMwDMMwDHOW4AkXwzAMwzAMwzDMWYInXAzDMAzDMAzDMGcJnnAxDMMwDMMwDMOcJXjCxTAMwzAMwzAMc5Y44wmXJEmXD8SN/K+QJKlEkqTU//V9/BgkSbr8PL73KyVJSv5f38ePRZKksedx3f+/87zux5+vdQ+c//XP9I/zWSNZH/93nM8aeT7rI3D+2+jzWSPP97rvCz6Hi2EYhmEYhmEY5izBIYUMwzAMwzAMwzBnCZ5wMQzDMAzDMAzDnCV4wsUwDMMwDMMwDHOW4AkXwzAMwzAMwzDMWYInXAzDMAzDMAzDMGeJ/w9aiLv/t+87XAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOy9ebht21nW+Y3dnHNvSEsSQqNRkBIsKEQEAS1ERR+KVAC1RCwCCFEioKKGLtIEiCARpC2waKURKjRGKXrFQvQBCRRYYBAsH4qQgIQQEiMhhHt2M+uPtd69f+vd75h7n3P2DbnnfN/znLPXmnM0X/+O+c0x5xrLslRTU1NTU1NTU1NTU1PT9dPe7zQDTU1NTU1NTU1NTU1N9yr1BVdTU1NTU1NTU1NTU9PDRH3B1dTU1NTU1NTU1NTU9DBRX3A1NTU1NTU1NTU1NTU9TNQXXE1NTU1NTU1NTU1NTQ8T9QVXU1NTU1NTU1NTU1PTw0R9wdXU1NTU1NTU1NTU1PQwUV9wNb1R0xjjF8cYrx9j/Cb+veUdjvURY4wfvmb+PmKMcbLl6zfGGD89xnj6JX0eO8b44jHGy7b9/r/t9ydtz//iGOPXxhhvgj5/dYzxQ/i+jDFePMbYw7HPHmN8/XXK19TU1NT0xkFjjA8ZY/zEFjdePsb4vjHG/3gN4379GOOzr4nHS8fa4tfrtnL8lzHGF44x9i/pM5V9jPGZ2zH/ItofbI/9XvC1jDH+CNq87Rijf4y26Q1CfcHV9Eig91+W5dH49yu/E0yMMQ4mp350WZZHV9Xjq+ofVdW3jDEePxnjRlX9X1X1DlX1P1XVY6vqPavqVVX1R9B0v6r+1iUsvWVV/aUrC9DU1NTU9IikMcazq+qLq+rvV9VTquqptcGbD/yd5Osu6A9ucfO9q+qDq+qZs4ZXlP3VVfVZl1y4vbqqruXCsqnpdqkvuJoecTTGeMIY47vHGK8cY/zX7effhfMfMcb4hTHGa8cYLxljPGOM8Qeq6iuq6j23FbLXbNveHGP8w+3dpleMMb5ijPHg9tyfGGP88hjjk8cYv1pVX7fG17Isp1X1T6rqTarqv5s0+/DagMWfW5blZ5dlOV2W5deWZfl7y7J8L9p9flV9wuzCbUufVxuAmV0INjU1NTU9wmmM8biqel5V/fVlWf7ZsiyvW5blaFmW71qW5RO3bW5ud0r8yvbfF48xbm7PCcs+frt74uVjjI/cnntWVT2jqj5pi43ftT3+lmOMF25x9iVjjI/bHn/T7Vjvv/3+6DHGz48xPnw21hoty/LzVfUjVfXOdyr7lr6/qm5V1YeuTPcNVfVOY4z3voyvpqbrpr7ganok0l5tLn5+T20uXl5fVV9WVbXdhvelVfV+y7I8pqr+aFX91LIsP1dVH13bu1HLsuhC5vlV9ftrk+zftqreqqqei7nevKredDvXs9aY2lbWPrKqjqrqpZNmf7qqvn9Zlt+8RMafqKofqqpPWGnzz6rqN6rqIy4Zq6mpqanpkUvvWVUPVNU/X2nzqVX1HrXBsj9Ymx0Tn4bzb15Vj6sNxv2VqvryMcYTlmX5qqr65qr6vC02vv92q/p3VdVPb9u/T1X97THG+y7L8ura3I366jHGm1XVF9UGY78xjXWZYGOMt6+q96qqn78L2auqlqr69Kr6jDHG4aTNb9XmLtnnXMZXU9N1U19wNT0S6DvGGK/Z/vuOZVletSzLC5dl+a1lWV5bm+TJitVpVb3jGOPBZVlevizLf0yDjjFGbS6i/s6yLK/ejvX3a3eb3mlVfcayLA8ty/L6CX/vsb1j9ttV9Q+r6kOXZfm1SdsnVtXLryj3c6vqb44xnjw5L4D59O1Wxaampqame4+eWFW/vizL8UqbZ1TV87Y7Jl5ZVZ9VVR+G80fb80fb3RS/WVVvNxnr3arqycuyPG9ZllvLsvxCVX11bbFxWZZ/WVXfXpvt8U+rqr92BzL9+zHG66rq52pTXPxHk3ZXkb22fH1nVb2yqv7qSrOvrKqnjjHe7/bYbWq6O+oLrqZHAv3ZZVkev/33Z8cYjxpjfOUY46VjjN+oqn9bVY8fY+wvy/K62uwH/+iqevkY43u2FbRET66qR1XVT+qCrjbbEniB88plWX77Ev5etL1j9oSq+s7aVOtqjPHUgZd9bNu+qqre4ipCL8vyM1X13VX1nJU231tVv1x3BnhNTU1NTW/89KqqetIl28ffsnZ3Vrx0e+xsDLto+a2qevRkrN9TVW+JQudrqupTavP8lOirquodq+rrl2V51RXlIL3Ldv4Prqp3r81W/Nq+DEO4+Yy6muykT6vN3b4H0sllWR6qqr+3/dfU9AajvuBqeiTSx9emMvfuy7I8tqr++Pb4qKpaluVfLMvyZ2pzYfOfalOZq9rcESL9em22I74DLuget32QtyZ9prTdJvgxVfVhY4w/tCzLy/iyj22zf1VV7zvwBsJL6DOq6qNqs61jRp9aGzB81FV5bWpqamp6xNCPVtVDVfVnV9r8Sm0ulERP3R67CjnO/VJVvQS4+PhlWR6zLMvTqs62z39VVX1jVX3sGONtV8aaT7qhb6uNfM/dHns/4OY319Vk55g/UJvtiR+70uzravOSqz9/VV6bmu6W+oKr6ZFIj6nNhdJrxhhvWpuLkqqqGmM8ZYzxgdsLmodqs23idHv6FVX1u7T9bvuSi6+uqi/a7kWvMcZbjTHe904Z2+5v/5rafQ6M9E9qA2YvHGO8/Rhjb4zxxDHGp4wxnhbG+/mq+taq+riVOX+oqn6mqv7ynfLd1NTU1PTGScuy/LfaYMqXjzG0y+NwjPF+Y4zP2zZ7QVV92hjjyWPzEyPPrapvuuIUr6iqt8H3H6+q125fGPXgGGN/jPGOY4x3257/lNpcWD2zNi94+sZx/nZAH+sq9Pyq+qgxxpv7iSvK7vSpVfVJs8m2d/o+o6o++Tb5bGq6Y+oLrqZHIn1xVT1YmztUL6rNNkDRXlU9uzaVvVfX5tmuj9me+8Gq+o9V9atjjF/fHvvk2lTDXrTdnvivar6v/Xb4e9oY4538xHY7w5+uzZ23H6jNSy9+vKqeVFU/NhnvebXdbrFCn1abl3s0NTU1Nd1jtCzLF9QG2z6tNs8p/VJV/Y2q+o5tk8+uzcuW/kNVvbiq/n1d/RXoX1tV/z2ekz6pqqfX5gUcL6kN1n5NVT1ujPGHt3x8+LbdP6jNxddz0lhXlO3FtXk04BMn5y+T3dv/SG1wdY1eUFd/nrqp6a5pLEv/5ltTU1NTU1NTU1NTU9PDQX2Hq6mpqampqampqamp6WGivuBqampqampqampqamp6mKgvuJqampqampqampqamh4m6guupqampqampqampqamh4mu+kNyUxpjLPb97POyLGff08s5vO3a+bXxxhgXvrMNz8/4uR1eZuP6HN428e3yzc41PbxE32q6XlJsePxel77X4s6Pr/UnrcXzLG6TbJflh8Rryk1r+e2y8a8i64zvCf36sixPXmvQdE6NkY2R9wI1Rj581Bh5b2HksixxgitdcI0x3mNZlhfNzu/v7+/8dUb29vbq5OTkQr+9vfMbbHK2MPfZ+fT59PR0tQ0deW9vb+e7jy+eTk9Pi8S5xLPzsLe3twoy7pAci3yyDed2fk5OTs76jTHOeHb+xxhn+ldbzpEc1XWqcdWWvHMs6oD653icI/GleU5PT3d0QBkpp/5yDPej5Fve/vT0tPb393fm2d/fP/vsflRVF+TiOerj5OTkbOy9vb06Pj6+oMvkz5RFvLis7pMuo8j76Rhj1u2a4ob6cJCY2cHPeQy6r1M2nyvFp8s+ixnnVTY5Pj6uZVnOvqcxE58zPaW+LvP+/n6dnJzs+Dvbuq+n3DiztwP3LOc5bylveR6tqjo+Pn7pBWbuY7oMH6saIxsjGyOpA9dHY2Rj5L2CkcfHxxf4OBsnMXk7tLe3t8ghnTkyUnXROSS49zk4OLgAPjMw4Rg+H3g8S04MvOTEyYF8XhnXEw3l59xMLB5A3scTHw19FT5d17NA5bjJLuyrwFMA+JgEMrenbCkbpISV9OZA4u2UoKkzAsxa8CSd8+/+/v5ZcpF8Pp77sAcwSYl0WZa6ceNGHR0dRaD0xYH0R1tStlnyoJ9qHPcfjuk6pq7dpvQVjX9yclIHBwd1fHy8k1BddsYOvytB7e3t7Sw8leg9yeuc653y08fEwyxBc8Gk9vpO27iOPQ/ouPyHx2hrzuE+MLOj2jD+Up5wHXEc8k2ZqJME9Jx7+/cnl2V512q6EjVGNkY2RjZGNkbW2fF7GSO3PMaAupYLroODg51J3ZlmAtNoqb2CWYKkMaqysXjcq0RrMqfqEeed8UpnpVFmAe9zMslqHFZ2ZomW4/IvHcZlmAGzJ18mVAFCAhLnz5O5JzUnT1LOL/U9xrgQrF7NSaDmcztIK0i9AqM2rOTN+HZ9uy6VLJU8UyKnvr0/E9hlfsw+GodgwsrirDqUxlLfqotAwcUH7eQANItf1yNtQ13oL4/7uA6OySdnCX8tbjkux6DtU2ylOJFuaHMHRY9N54HtuQCfVXRJKe41drKZjvUF1+1RY+T5+cbIc3kaI8/baKzGyMbIRzpGbuMkXnDd9TNcZKpq90qZiVfn2N4V7QLRkDMg8NunDHDOsZbIdZ7OlozgRmSweJLjX+fBAWvmNDP9iRKPKXHrc3LoqnOg8MSnc+5gnMvt5pUu8k2g80THv05sR3t7YNKWngwSoHhC8WqgyBcKvtDgOFy4OIAk/6c8XtlyHaUqoldh6DM+BkmVNk+I9M0EXJ5oUpynqpaIAJBiWhVeXzA4UXfJf9aSr+ZJY3q/2bhegZ7Jc5U8MMtNass84zkx8c1jvpiazZPkcznW+Gxap8bIxsjGyMZI10dj5L2HkWt0LRdcSRnJ0AwcKtkTur67U3tSSnP7lT558AQgx/VxkqNRybM+dCZ99wSh9ry1nBKv6zAFh8bnLdRU5WSScHlSALgD+pzkU98FSDrHROoyuGMq+Tq5LGOMnUStPkx6HC9VMCk//60lL+oiPWeR9MKx3T7kl8CUAJZjz6qWngBmiwxf6HiFxvmm3RKI0qfdH9y/Ul/Guujg4OBsO0kCS6c1wKTvcsHktvCYnQE5x1yTl3xRT/IzbTtYizuCuqq9qiDrvNpyzzj58/hdozXf85zadPvUGNkY2RjZGOl863tj5P2Bkdd2wZWS0ywZUlkpOLUnlZVAju9KmlWlyI8CcQYYHkwcKzkMkyfl0nypEsN5JVPa2+9yOhDxPPf2+nxMMjPZ3JmdF9f7Wv+0QPCHX5kQGRzuwGvBmWzt/KXFjSccBmZV7Ty463JoDvkrj9OXfMGiY2lRdBml4E4y61xK9L5oolxcGKkNYzeBTlp80IazpOR24dj0UybGGcgS8AhWszzgfbnQY9tUsSKIehy5/9K2KZnLf5K/+0I5ycLqXapgM7d5jCbdez/PN66PNVBvWqfGyHO5GiMbIzmnjjVGNkbeCxi5RtdywTW7belBQ6E88blR0xzqQydYczhXUkpM5E2UDCGSTLzVPEv2HHctwfucpFR98lu1/jnp1G3jlIDP+9DOTDqcY81R1Z8BKqf1MTwZpoWeB57LnGRIbaqynikndbCmR78F7kHPyh3jhDpgkvfk7eOQPAZm/u6+SPm9n/up62c2ZkrULpPGTQAw45M0S3YEmJQL/E1xLjvtlvjV3GvnRQlkeVzkb/tK+ZTjU2/pPH3K8+5l5LLpmMZJFwBN69QY2RjJeUmNkY2RM5k0bmPkIwsjp/0uHfmK5AZISZIK57+qDDwJVDi2j8ME5ELzvPPtbTiHeHNnmDk62/pYNL7z44Gfqlr6p0pFSv6zAEs6ZxsfJyVh1xflmNnzMn273n1+Vk4ceBwAZ4Gc+OfCRJQSp9tztu0k9aONkj7dR1w+3/4gnTpoUGeeYHyBwc/UXYoX13/aUuFAqzaSn4la592HZmDievGElhKr68ftIXl50eB9vZI2i9kEjtLD/v7+hfyVbJMAw7+7H7nNmS8SmHsspRhOPHARd9WKc9OcGiMvtm2MbIxsjGyMnMl6r2HkXV9wUfn6rluCblCdXwMaGs0F8GSsY+6kKaGv8a+/7hTpuxzFHcINQX4YYN42AeBlCcJ1xnEkewJn/04n9OOeoKlzB+5EDjRecU06Sf5APXDuFDQaj/q4ik+kxOg8uO1cB66HtSpfsnXiMSVt5z+18WTreqT+PaE5yNLmHCPxPauy+nYN5yXpOyVMB5IZAPrcslGyv8c/27g9xhgXYl9/WdX3yp/LnT579Tj5jeeKmSyXxVKKaW/r+khtm65GjZGNkYkaIxsjeb4xci4j+Zz1I49vrBh5bXe4nBFndmZICc4ASz+ulgAozZNuI6uNK5LnaCC2938OdDNnSYGVEqTasNqT5vd+LrPG8LlS8rwsaXvCoTy0l8afjbmW9GbbARI4OV9rSZ102ZaDNJ5XLNw3ZuA7O0Z9efUrLR54Xsc8Get3R1JlzPuTqHeO71s8Eg+zMdO5GVhq/tlCR3/5zEaK9wT+1HUCmSSXj8eE7n1crpRgqd9ZxW1WdXfbrOW7JIPLTD2zjXLrbMHhlEC/6c6pMbIx0ucWNUY2RjZG3tsYeW0XXJxsFriJydTfyQOZiXeWLBkkCQwSf86TO5cnwjVZUiXGyUGA48+cxXWx5vA+T+LZ5Sf/azJS1pQ82S9VitLWGq/kkR9/kJK686BlsKZb2ldJQm43X2wkANFCxu3nt7KTvb2PA13inTqjHlMyFX/pOB9yTnJ6+8Qrq2O0rW89oA5cvpn+UwXO25CPGd9+bKYrPz5r4xW3NQBIwOM2cd9O/ZM+Z7E506cWKPSzNXv756Y7o8bIxsjGyMbIxsj7FyOv5aUZoqtWzkSz4PFbj8nRZkDE9inZ+nFXZlLumsM5iLqR19qmZERDezXOgXnGF/v45xRsyRGpS+eX8vEckyn1riTlP1rn89D2Tp600vYF5zMBhstOn/Xb0AQ+l5UyuN+neXjc28+SB/slflKssb/rmck2AQb1n+TVea9i+SJgFp++v5zzaFx9dh0kfXrcphhLslMe92OO61VN9zmXOS2aUpJO8UufcN0lvXEsl8d5IaV8ugacMz9rujNqjGyMbIxsjGyMvD8x8q4vuNwAniBnbeU8SfCUyD0xJPKA83k9WFLic34ZuDKCQM6rMF5dIF8MQL/qJ+8ug+/jJh/u6LPb3pSFeqfunQhslwWp/nqCYgCnW9Ep6TvPM0c/PT09e+jSk3/ih3pxH5Ueks+wsnUVH/SkM8Y4e4WzA2aqQCY/GmOc/TbFWtLRmD5G8jvala9u5Vhug7Tooj2cJ/bn+CTy4fqfASt1SOJDvvTtlLwpj8dA8p3EV6qG+mevzjnfkl/HZWf6j/NGO6Rc4218scKcwfNp3FlubLo6NUY2RupvY2RjZGPk7uf7CSOv9S2FZFhMXuVWfXI0T0z86/OINBd/hCzdlnfn9vmTESkLA9Tl4w/4uXOdnp5e+AHDpAc6uTuCV5huh9RvxiMdOtlDCTEl4xnY8Dznrbr4hiHy41tdPIkmJ2cSmN3Kph5IvsggjwmYk4+kYGRwU/7LwJRg7H48m3eWsDWvg5fzshaDPifHmNmDAJF05nrm5zV/m+mCn9cSvcuY/CHJnEDRF28+psf0TE9M8LQ7z1+mb6ekv5k+kj+zXfLDptujxsg6m7sxsjGyMbIx8l7EyLX57voOFydLyf2yRMPPVJBvHXDHdKIyfBxVejhX4snHSD/c6HPOZHY+ZqDm8lblvduz5KVjDgCpLXlmAnFH0fxenVT/NJdXoBQQrED6g6wC1mU5/xV7zrmmb352nXNLBeWnjj1w1V5t/AcemQxnAJoSi59z8HawdB9mu1SNYkxI3z6Px4CqddK59O/6Ij/Oq+amfb1i6PwlWzoopUqa+EmJ32PJ84svxhzUEgC7vqkPtyeJOpzlNo8/b0seyL8DiCd59wMtXmaLrssWU2kB5fptuho1RjZGNkY2RtIOjZH3Lkau0bXc4ZIwqjbpH4WW44lB3qqjorxixe8USEktOWm6/effZ8oRn+4USjBejSBv7qx0oL2981flkj//y4TGc0nnnqAODg4uyMz2KfA8WNMclyUyHvfKabr1L13we9W5L1A/s2qJEqDriLea3fZeSdF2C8pIG7nsnkxYHXFQTtUoD3xf4OjvDKwc7Ge+7JXPBDQEEreh257nZhXs2aInJUDZKSV3p6vInIDa5/bjSrYEMi50Lls0kD/19Xh22ROA0C+8rcdOiusEcGmLzwwcGEPuM/Jx2neWj5rWqTGyMbIxsjGS/XzMxsh7AyPX6K4vuDy5UGkyDpXhb9Jx4jkal3O5ATkWkzEDOrXlmLPvVKKSTQIo3wrgsvu46Ze80+cZ0clk7OPj47OxPTkleajn5OQOkBorBZ1XcpLsPKbKqAOaE9t4JcirTRo/VT+Z6F03x8fHF5KWeEzJRjx4gvCFhvsDk2+qgPKfg14CG+rCFy6Um8f9PMdMCyC3ifsAK1auG68kroGG+tO3KLMn8EQzP3cdn5yc7Cx6eJ4JWCR7pd8XIZ9abLo+PZbU3p9ZSPy7HVJ8avGiv6ma7XqmTn2sGZA23Rk1RjZGNkY2Rrr8jZH3H0aOuwXUMcYiRScDudNTCH3e29s7uz3tQcQx9JkCJ6BxB/cg0XF996Qso7Cd2vq8zpP30bieXDj3ZeDq1SPXseva+aUe0jgEAc1HUJ45sldUJCtlkg0dgCiDB5+DhvPtfKjP/v7+GaBSJ/KxNK4HMQOPyV8yOD8ur7fTHLNkSF5mvuBJSHMQZPy8Jyyvlnk/Aqb6Men4go3yOaUYcd2xHbcl0U5rlOSd5R1Pui6Xx4jnKJ+X8iR/mYGCANYr4e77sznXcgxznuexMc4fSnceZnxybl80HB8f/+SyLO9aTVeixsjGyMbIxkhSY+S9i5Hb4kRMVtfyWnh3BAeQ5ED+3bc4pKSSBPZAk3OqXbrV7UGbDCSeXKbU3vlzQ/EcHZr6E6CmOdwB2ZaOIyD1oBHNkgB14ufTYmAWxOLLt5CwD9v7w9Gca1Ztdf1wzOPj46ifmewMNvKo/gxyghQrj8kHCMTkZ+YXDjhMCgICT3z0oaRvkQO4+KOe5X/UD5MRdeTA6LaTrMmH2Jbt6bOM37SNQDxSN6JkR18MOi/U98xP3Sb8m3zU45UxQB+gLS5L7mkBxdzHWOJnju/5bS3eddz1dhWgb7pIjZGNkeKlMbIxsjHy3sXINbrW18K7kLOEROUzyGbG9KtS/8w5HDy8DR3dHcL5VELzyqTz6cCkIGVSIbFKoD5eXfOkRBmSYV1eD7KU/Hx+t40HInUufnXMnZtJ3BOWj5fmT9WcGfBoPlYYfC4PCH99LJOg+D44OLiwh5zJNY05xth5ToA2pI3d1owXJUn3dQ/0tACbLdxUgSGflMv5ob0oi2SY7a1PiZlzpKSlMT2xejzys/TqvsZ+njxn+UnjJsBUW4JD+q0cAobHp/sJ84r8O1X1XGafJ7VZW4CxrfvUbIFInquuXl1tOqfGyMbIxsjzMRsjGyPvZYxco2t7aQaZSNUAMkrG01WwnFUJQuMl5/K+rhw6j9Ps1a+eKI6OjnYSgicFr254ok2GoEweTOKHAUu503heNT04ONjhKQUJ5xI/Xg0k8OqYPzTrenOeZUPKIB0QPHg7d20xwERCAJM9fR+xVwJ98SD98fzBwaYWofkoR5LRQUTtfF8ztwCJD3+w3RcqXs2b2d354ThMWr4NyRdC+px85ODgIFbFGPNqR5/zOPF5k8/N9Cy5ucUqJVOPewcfjkueuG888etv5vKxku00lr7zYXIHMre329ft4rSmwzXQS+08r1wGVE2ZGiMbI6k357kxsjGyMfLex8hr2VI4xvkrQhNTzgiP7e3tnd3mZtJSOxpjrcrngtJ4lwVLChx3Lg+MqotvnnHHpvMwYDgfE4Ta0+FnxuN4VedvXxLxIddU5anKe/T1V5Uwr5JqbPVfc9hZYnLdeCVhFqDiy4+Rj1mQO7FymKr21NHh4eGFfe+SyRdDfMZCbdRX86S33nB+Aqu++8LHdeoJlHJ5ghXfyR/4mQlvpj99djsopqvOfYlVXCY0r9B6ouPiTJ/l76xSp0ovZWRseUU+LaIoJ3ni3n3y5zHtMqqvx4b4ZF/Xs3Kst6HNSL7wEz/K09SF29d9mPw13Rk1RjZGkhojGyMbI+8/jLzrO1xikJUor0JVXawQUOGqcLgjzBxZ43ogeHB5hSF9JqVkNJt/Np504CBTddEJOD6v6lU5cj7ozCmJJ749aSS+GTjiXWAkB2ZV0sHHg01zqvLFW8EMbM3t/pLsmgJdCUUJaqYP8uyLhqoNODoPTMJHR0dnbZm8XH6+EtnncNskvekzZZ4thHxsH8fj0IGbMjl/7KN/vthIupxV5wjEHhvkTXz7+KycJ/2Rh+QDihfNzTdr0cdTbuLbmnSc/X3B5QnabeUJmp+TnPrsFWvqzeM6gbGIvHmVMrVvuntqjGyMbIxsjGyMbIy8ljtcVReV5gwxoei7G16GPD09PQMo9vGHTRPJKGl+Kc0rZEwcs4RbtVvR8uRER/TAo3MwsTFRewVEbVMySgGpz3TAdGVOPakN20rnbh8HkQRaPOZVI205SZUP2piANksO4i0lOVWDOa5XSGkLPtybdDSTOS10/JifV0XMF01rVVQHBLcNgZuySn8JxJdl2dnaMVtguB1ncnMu+p6Px7ben3PSH5INHHBdVl88cNFKQGFljHpIydSBzpOv5xOdd3+fLQA8Z3E+5RXXo8eciHGvMfhMQgI25yHZa5Zzm65GjZGNkY2RjZGNkfcvRt71HS4agEqUAvzBwRRsFNzfnMPPKTlKoQlkXBF0PPLon51Pd87UX/w7uHgCch6dVwYGg4Z6YvtULZolh9nxqtqpPPkYCdB8XPHCKiR1w3lcP+rLBDnj2feeu+40jiekBKoaz8FG333BQL2khQLldL15LKhtsmGaiwst94c1nnme9nEAo/4SSCbd+XG3rf/uDmWj/pzv1C/pfqZXjUuZPHcQSLiAnS1yuVgkD0gY+XwAACAASURBVH7cK8m+qKI8rg/3A8aBA7/L5z6RclUCBvdv/eVibG3bTNPl1BjZGNkY2RhZ1Ripce9XjLy2O1wiN8YsiF1o9vPEu5Yw9VntVL2RQR3gPMHreBqbTujKTE7nPLr8Hjg+pvOTrug5LoPI+fGHPpMjUT/UC/XjPDOhpSoL9ZaqgA6+5CMFnZO/PUiB7A+Hup0pB/klyDFRkHe2oz1YiaO+WYGVzKxqObCmhZD7PT/PFkyuU573pOd2pU4S+KaE5vPLJx2QaBvqIOUKVpFV7eTrh9cWNuncWi6ZgabrzGUVj+QpAb23nxFBraoulZn/Unz62IrFtVzj4wtcBe6XxWXT1akxclfuxsjGyMbIxsj7ASPv+oKLDpKYJdMzhpICpBwHA533uXiOD41eNka6Gq/a/R0JzuH9PXm5k4koOx2Azu1O6gnOZVijBMJ+fvZdfKdbvTPwSQ7vCZzjK3g8wXg79w0HJp5P22lSYiTP8hevlPhrc9cCmm3SgsD9MwEDdei6cUoLnLRQcB/1MVwun3MGpGkxxzlnFVSO6bJRd6yEcWGoY2nh4X7C4/TDWbXe+fJYnx3z3DLLNyTqM8UHgSQBly863Mdn8ceK3Mxv/Pva3Zemq1FjZGNkY+R5m8bIxsj7FSOv9YePxQQFT47nx5xZKYKKTEkljZ8AgvNe9oNzLpMnCDoex0q8qI/3dUrHOU+qtKR2SoxMWJxfPCfAS/rwgEg8ps8JXMQTg3uWrNN4SiACOE9wCQyTLJRb/pV0xURcVZFX6dITnGyQFgL87tVZkip7DrYzH54lv2QP6jTp7rLYYJuUxAlKnuySzyXgTYnLx521uwwgvc8MoNiHx9PdBNlk5veaJwHWLCekN3WR95TfOD7fpMRzrC47MCUdpnhquj1qjGyMbIxsjBQ1Ru7Odb9g5F0/wzWbJCmKgnj/FLQcZ83QNJIblEFNnviP1QE/lnjWPPq9BfLvt4op90xmgsZaUva+rhO/0k5/Z1WOFAjJVjPnS444S/AJGKWjpLs1x14LRp+P55J/eiC6H/h8ae4ZaDFxE0g5PnlOIJ+Ax0EkgYbLlxJ9sn/iRbzPbLzGo3xqtv3JdU0e0/gzWTkf/T4lYn6mLi4DqpSPUnvyc5Wx6SeM85QbRNKbg7PrMuk+ycbPa/M2XZ0aIxsjGyN39dAYeZHHxsh7GyOv5Q6XJx3/7LeH/Zj38yB1R3ClrVVw0pYHdxpevbO//qXbqj6my3qZM/Kz792dOWXSlYOEV/pSAkk0S+JJjhnIeeAz8bgeyQsfJHXZLgONNd2sJR7nmYsG6jTxNgNzT2CetCmTg9wsYBMIMnk4z6Tka1y00D4eQ15hvey2vebwCpwqtgkwXZ9uE58n5YzL4sQT8ox3Hy+Bub57/lqrQvOY3+XQuQSY1PfMz9JYJG5v8bZr/XTOZV7TW9M6NUY2RvrxxsjGyMbI3WP3Akau0bVtKZSDJqcjg8lRluXibX4KxH8+b5orOYuDABXtDsS52cedYbbP2I9xHk8gqVrp5P14fPbZE7I7hPMxSw6zMavyfmXK4cDtY8t+rlcPtDXg1jH/4cAUcLOgkt/xt11SgvXFQgL0WWKkrKlvskfSp+j0dPfHIVkhTImQi6ZkK/Zz/r3q44sMjw/3aSbHFCf+3f3UfSTpx+Wmf5Bnn4v6uGwhl/SUwCnFm/Oo+ZI/6ZxvwdAx17t/15ieU33rCPW7Jq/GXNNN0zo1RjZGNkY2RjZG7urnXsTINbq218IroH1f9ywRqo8nHZGDSDKyOw95mQUmjZcqez5/Ihlojf80ZuJxWc5/DNABivp1npKuJBPlmx2/U2JVRzLosydKDxRPEuLd+fFFhbf3RQb7zcA4Je2q84eKZ4BGGf17WnAw8CkzkxvjhIGv766jpAdP4Pw8A/YUJ0lXHEv8ccGdYiYlWOp3bcGWZPQxHFyod9ljbduA+72DQIpTjjfTlcbxhO3ju/8keZ0PB+e13LA2HnlIMejHXK/U92WA0nSRGiMbIxsjGyMbIxsjr+Uthf62ossm5Tkpileqnpz9sxuef/1q3IPAjaw+6Va+A5L30Q/j+bzsPzOEB2EKEp5P+vTx0u3RtI2CAZACM1UlZ3yn4y7TLBkkW1OPs0Sj9i4b9SL5OPcMtCh3SjIEAhF1RP9hW/mfg6rL7/ZJYJK2RZAn9id/Dmak5C8uj9sjxV7ile2d79mCaPZXvLMC6gmcxL6+uHWdOCCmNj72DOzTQiJVSynXDGhTPkn5Y01ns4VBymue/5wP96umq1NjZGOkH2+MbIxk+8bIewcj12isJf2r0N7e3qJqRDJwSuQXmAjJk8L4rVL1ITF4PWGqvTsO501J1AORtzeraudtQJ4ckqxXBVm2cz5nxDZ6Tagncr8S55yc96rVglmwsS1v63tyJ98z3XPMZdl9Kw3HnIHBTFeiVG2egYnbn+epP87BX2tPPMwWE76okWz6vQ72TXKRPLk6iOpc8uPL9Ody65a+H08yp2Oz+EgAcJW2a1sFkj1FHndpQTDjjbko9Umf12RJixmfNx1fI8/LLjcXQl6JPjo6+sllWd51dYKmM2qMbIycyaAxGiMbI5PM6Vhj5EVZ3pgw8ujoqE5PT6NzXMtbCqtqZ2LelvMrRlJyLr8SnyUrfaYT+5hjjPgL2xzTk4EHmMbXG3n0b39/v46Pj3eSL8dwY3m1R/18Lo4/0xn5pS607cL3Wcs5XL6ZTqjTyxJUkpfnSWkrhwcJz4lkw3RLnL/w7ec4HrdEsGpZtftA8v7+/s5v1PAvx6BOk/6SXPxMvsQTdcTPApAkG8fS3Dzu/9wffJykZ/bztpJz9iOis/mdUnvnbU2GGflChvajz1wlPpLcCSC8jx/T31nV0e1Ked2XZ/Kv6SX5n/ObLgr4t+n2qTFyd7zGyMbIxsjGyPsJI6/lpRkkVgicGQZOStqz4FMbHverZSZsfaaTOAiJ0i1E8ea/80AeCA4CFY3nyVBtmRT0XcTKGnWVkquOJwNT79SpEpxXqyg79Zn0w0TgydP153ymOZKNfX7x4E7u5AnPdTMLBj4bIGLlU4sk+kNKdr4tgklA8/giy5OJiL+i7jpzffG8PrtPuw0lGyttzqPGp045hsaUbujjtAF14NUstx/lYgymCmuqjCXbcxxvO8sd8gHnx7+TL1ZVE7BwbOYKt4nPl+I+2Sb5ygxQBITJb1xOH3MNTJquRo2RjZGNkY2RjZH3Jkau0bU8w+UOJ0pJ3Nu50q7S57LxaOiqi7dKU9CncfnjfHJgVgNnVTvKpeOU8+DgYOeXsmd9HJhn5OCdEtXMOb39siw7FQ8fn07qyVbH3CE1psZL8mqs1K9qnqQUBGznPJPvVOV1HRwdHZ0lWwYe++mz29B1rjFUUfWqiPsNeaJNJRdfj5zkIF/OJ/Xi86UElmTlQnCMze/snJ6enm3R8WqPj+8g69tdSA54h4eHdXx8fGGbEvmbAe7h4eGZD83I5U5boWYLgLRIS75GfSYQuyzWZ3Knv5SVMZoA2AGfYzNu1/TXlKkxcnfexsjGSJ5vjGyMvJcwco2u5Q6XJ+W1Sf02MonOqiBMAOXj0CHHOL9Sd8emMmX0VA0ieFAuObEnTAcZ6iElG/KuMd053JF5O/oyo7oczpd0Q12q+kAA9dfIcgz10zyuVwaHbOLEapEDl+ssAZfPqe9OvngQX/Q1T7ZMPAnw3XccFOQT5DktotjXY0NtyIvLKt0SrF339GcHYcq3lmh9geBJVX7DsbhA8IWXxiGfPr/rjVVy8pESvl8kcBzJrkVdks9l98UO2/rixuXiefpv8qk1XjQOY4bnfcF22ULU7e7+wzZXzTlNmRojGyNdr42RjZGNkfcXRl7bBRcnTEqZJXb/7A7px/V5bS4GoR9zw3uFJilShmPlw9sdHx/vyJr4oTOqKpEclHwISGZJOgFkAlwec2fUA93UV6rIeXVN51z/XvHyAKLuOZbP4XOlPdDs58mFuq6qs4qpV8Coc/qWwM6TE3+7wRcBnkw0v+uEfkW+2c8/p+8c3+3gSdDjidsy0twcYyanP/xLAODzG+5/HF/H+T1VGXWcfuIgQt3y/PHx8QXdcEuK7MxFAqu6XCSk8RN4EmioW8lAnlIc8zv5TjbiAigBmgOpPvtCyeX3OdYWHE1zaoxsjGyMbIxsjLz3MXKNru0thS6cBBFTFJTt0lW2K0JCzSptpFTxYyUwGYrzegJkW47DIPOtDwogzS0DpeTJtp7APXmnxU4ai0FN/ZBHb5fmYRKs2k3mKZhmgO8Jmbe/aVcHPi4KVE3UdyXDlBRcnuTjs4Ujk1+yp/tFGssXLLS/J6bEF+fjOJp7tkhaW6BpvlnCTYnFq3scl3HAePWELMBSX/qj2oivg4ODnUUZea+6CDBJRh73ve2esJPufEGWdJsq+NQXfSXpnDrzOPRFYQJV+kOa3wGE+ddtR5l9sZwWRlu99lsKb4MaIxsjGyMbI6m7xsh7FyO3sRaD564vuMYYy+Hh4YWrXE/UfnXOednHA9jHmyV/H6vqIogkB0mOqe++n9UDhQZZ40/y+5xJ5gRUDsKp2qi/3PtOWbktYy2RUpYUvA7uOs/goGyuj1nS8+ByfvSdQZaAhJ9nAepJnrzyoc4ZyM18wduor7YzpHEcUDxevILk+vJ48SSjOS6rnvPVvGsx4bbiYiQ9GzDzd/KW8oTPmwCR4yVAE60tclLCTCCrua+ib5ePY6R4ZjvO68DLuX0eyirZ1kDXY9qByRdQor29vX4t/G1SY2RjZGNkY2RVY6TriX3uFYzcXkQ+vBdcTJTJ6WeM87i3rdoohbcVZ8lpTXFUDuee3X6v2nXaWeLQVbG34WtYE3/qr4coXZ5ZciT5rXzXn8ZVNSQlcLVzkEl2cD9xe5BmycvnZdskt6of1GeaJwWVJ2S1T74iEOE+bwILE8Ks2pmSEuX280nfKXlxfk9eOpYWKklOB9eZDZks3Qa+aEyJem3xwLHd3uTXk/Bawna9pTndLg441Bl5dJ6T/bw6dhVyPXryp2/zYfBUuXVdOVG3fJjaZfcFjI+hc8fHx33BdRvUGNkY2RjZGOlyNUau0yMVI7e+Fie662e43GmSI9FZZg5NB+JYcmRPUO4wrpzZrb+1RKDv+/v7Z7duyVd66NKTvDupJ1A3LNvSsVwP+i49+FuSnKjrNf5cHvJNfrzKpeBKCcyrK86X/l6WiKRz8enJU/0S0PC8v8rVk44n5zQm2yUwUl/tyfagTX08CVK3nFdvb2Ilj9W2FFOpspTiZeYjBFSPO+rYeZ8labVn4p3J659dNgKdVxBJs3H5vIfOeaJ2+ZI+mWCd76Qbt4+PT79OMZQWaVpQ6pjP4znEKdnAgTPps+nq1BjZGNkY2RjZGNkYeW0vzSBjfswZ8UScmKRACQxc6Q5ETBBrvPAzDTgDr1S5cF74nQAgHmaJccYjeRBvbEti9UlATF7XKAWB+ioJ8HdSWGFgok2A7fKxeuEBOnvI1EErjZvs4wHjScp/xJG8pPY+lmzpvyMiIODzC25/D2T6kCeA2cLB+6gt+3t1iecTGItP+lDa9uE6Ej/uo7RhAhy2TWBMXdPfEsAn/xBfjCH3D++fZKHfz5Ir+U68uL1PT0/PfF6vEKZs6pP80ytuCdTYnjkugU/yhbVzTVejxsjGyMbIxkjy0xh5b2LkzAerqvIL82+TZiCiyf0H6lxJsyShSggF9sCkQt2xLuN5Nr8cbe2fg4B4S8f11x2MDjsLbE82JB5X4B8dHZ2NyR+QVPKaAUqal2NrTK+KJjD0JOjyiWTLNd2wD48rMFgFoQxuQ+qAFQ/X+9HR0Vl7VXEJGDN7UBaSb1eh7zOhkVKyZnvplDL5Z/kDwZhvjtI8M39j4jk+Pj5LdkmnapNiaSa7yO0tvSd9uG6SX8ySHWMzLV5SPy5sNHaKK18AesVTfTmPeE45g+RAxtyT8ogvKFxvWhyoDRdSvthI/ZvujBojGyM5TmPkOTVGNkaq772Okddyh0uOR8E9Uc4SZLri1N9U3dN5D2yvvM0ClFfdbMOkRsDzJOyVF/LlQZEqGNTXwcFBPfTQQzu/z8CxfO6UeHic7VKFYVY18qTCSoDG9C0HHtSuNxETHXXFz3RsD5QEQmpLn0t2pIyz19yyv/ucL26cdyZcb5uAxxOt+usvdU19emWG47idaEMCC2VyWZikUszxmLdx23Fs+q/bdpYjfKHmFVG3jdvNda3P3HI1i3/3saSfpPcxdn/XyPn0udbIX4PrFW5f/FBvXOAl/dDn9GxMysvyHS4irwImTXNqjGyMbIxsjGyMvL8x8lrucDkz+szjrgAPgJQwSFKIO57O+XfywL9ugORAPo635/g+h8sxk0v7YTkfr5jpID63O5AnMe1nTuPMdOvnmTCY9LyP8+R9nFefd/bZbU15xJ+Szuw3P9Q/VVI4p9tO/bQ9hO1Y/VPgul6qNknBbZn8gv7BSjf/Uc6kr7WK8SxGkh0o+yyW04IiHfc48wowE/SyLDvVQdfPwcHBBbBw3hU7s4qo5ubYPgZlTv7v4My5U1zSn9ifff2zyAHzsvghz2qfPnMBlfIN23G8tChsun1qjGyM9D6NkY2RjZH3Dkau0V1fcNEhHDBcEBmFxz0Rsa0ndc7nBmQCUB83CIN/FtguR0qEKTns7e3t3HpMVRC29d/JcB1IHncY6tvH5oOOyfiSf3Zr2HWt7+l1rdQTg5j9UiJLTu5j+puxEph7JYXjUPczOzC5aXzZhAGmtpKPv4FxOwuRGYBxTh3j7faUoNmPbdnOkzj1m3zZ+fctCzo/k5HVK/LHeZO+vPqZkpq2FMwWZrPj1DM/e5VxllCdDwKDxmE+4eKFdqNtE+iKfPHkPkNd6vwMYBKAJHl8AcWxOecsbzRdTo2RG2qMbIxsjGyMvJ8x8tILrjHGu48xnnJJm81gKxWXyxJYUgydkQl05pj+eeZgPO4Gc2NQ2c7vzLHJfwKTlLAIiJf1pxNUXbyy9irWmnycKwHsZWO6g/rYfJ5AFTf6gn933XBxIFk5v1dEnIfZQiAlOspDnn2clBw8IbPv/v7+heTsSSf5mR9nQmXS9qTsScLlXvNbT24kBx7nlbfpPTm5bRg/TNJpsUkeCYhpbPfbFEMz3jS/L1YJdP77PdKB79/nZ/Z3G0lejeMLwWQz9z2X2T9LHvqq+5/GIx8zoGo6p6vg47ZdVTVG+tiNkY2RVY2RyTaNkfceRl76DNeyLD92WRu03fmcHNdBJQWiPtMICUg4Hx+8pLLJj7/q8jJek1wcS/PRWDSGy8mxXFZv5xUG58OdggGwluD53ZOUOzT1PrtV7ElY5GCb2s+SGudw21BGBmqq0iVwcOBy/hLN/I56mumCIOjA6HPws8Z2XYh4bs3XZvbhOLOYSN/dN8iLHxNo0HccgHyuZDMd95hIdve/szhLcqnNmn9yPvJwenr+FiUS+Ve/xFvihcept5k8CazofzNwozxpvLU573e6HXzctt/53Bi5KyfHaoxsjPQ5+LkxsjGSxx8JGPmwPMPFREWBnDzBsF0CELZJSTIZmv2593e2bYJj6rPfFuUc7JvAz+XxOVIFhOeS4SWrzns1LCWXpN/kzEzadN4UBGuB4Trl9gRPFl4l8qrWTN+s/Lls/Ot6c3+aAWgKagcWyqXvtI/rlvL7NqOZb5P35AueWP3cLH4cuL0vbbHGm9vEq7Ts4/rSeY93EcGIuqQ9+M/9NvkX+UhjsY94S+PqMxeRSd/pzkPKBzO/9Sqb93GbzPLBTE8c0+dNVeGmO6fGyMZIp8bIXd02RjZGPpIxco2u5S2FYlQM8NacztE5maBFnhR97JmwyQApsTFhpLkSv57I9Z0ONqMEjJxLfZlQ/Ao7yU++3KFnzql+DO6ZjtbkSAknJWz/uwbay3L+dpyktxmvHvCup1QlSmOlBY3zLkq38pOe9ZeJkH18zqTbJL/s7K/XdWBLx5jY5H/+VirKmxYb5DUtClzXLntK6smv0/wpCft4nMMTbPIR12+KrWQTArf7h1dcva/rxs9Rf+5Liadko1nbWT9W8D1PpJzQdGfUGLlLjZGNkY2RjZHe13Xj56i/RxpGXssdrhQgUjoTuBJwAgD/7NWP1IbjMLmvBfjMWRLAsS8/J74TuKT25M+NRSf1vmmeNX3OkuOMN4LpbCGgfkm3ntxSckh2STrwdq4/VoAY1Jw/BTSrKORLY+j1qs6bgn9Nn2v6mJHr2f85764L8bXms0k3HCf5iSdc10caexYTDg6XjeF8ypZ8DbDnF+pitmCgnhwQZsnfdaHvXqn1ahnbXZbUHRx9Lo3pNp75i+dbl8s/S5bUh1tAEnA3XZ0aIxsjGyPn+phRY+TFMRojH7kYeW0omhKO/vHHzDyx+2fRWnJec6hEM0f29hxT51KFjoanYzIRpVvX6sO2KaHQWT3wkp5cJzye2s3Go8wKFAcvPhDJ40lm119KPN4m8Z4SXkoAngxTQkzBN5vbwT0l+TQ37eigxPHTbe1ZTNDfdC69GSslJRH3T3u1V2OmpKLE4jJ4Wx2jvvxzkt115/7AOGCVyftyDPfHpPc121N29yfqyuV2mTg2eeUWrKSHtfHSIsWribM8cJnv8hj9d23upqtRY2RjZGNkY2Rj5K5MHPtewMg1uusLLk2ajM3AWktm7JNusXpQsa8L60pPylAy9wRBJ2RC4K9Oc440vhsk/fNfzua5lIw4J+cjOHmSS5Wtte0SDhocP+kv8V11EQB1zl/Nmvonn6FNFIDiK/0qudvHbx17wnR/ok45rm9RcBs58Ih86xB1Sr/yBYZIeqP9HZDdZ10PGlsPsftihX5M0HDg9rE1b6qa0R+og+TrrkPJItut6XiW2BPYyIcSuKVFQsotLqfriG19ET2rqjm/tAPP8zv16XHF6rbPzT4O4q4/+sEslzZdTo2RjZGNkY2RjZH3B0au0bXc4RIjclY6gQtBBp05d2SC1MxodF4PYneYdG7mHBqTtwx1Tj/25/0JEskR3GiJL5eFBve/Xhlz+Rw8PMnybwJB34rhOnbedCzZzZOW9/U+s+BiIpa/8W1YPn7yM87t56QT6sdB1f1H/eQT/qavo6OjC/Zz4Heg0vnj4+Od7+zvQOuLHo1Nnt3mnJNbEhxQPJZPT0/r1q1bceGgPvv7+3V4eLjDD5O524E+4GCuY5yffRMoc5HK8/wrvn0RS53RPvTFmzdvXsgdrj+Xze3v8eGvR55t5XA/SDFMoPZ+Dvbej6+q9opp0+1TY2RjJI81RjZGSg+NkfcPRt71SzNmQvBzEsaTUkr+vG3vCYhOoYD3/ing6CA06Fqid8ejAycHcUfSfMkxvK0fT3pLwKIENnM6yeHfKUOaz+Uiv5IrVVSTgzq4iQc6tCdZ/161G2CSiTzwvNvKdUl5We3gAmlmZ6/yyg4eePQ/90fN7UDPpMJqjAMr+fLxaEP3Rbe9gITklfhUWffku7e3d7bP3yuXx8fHO5Vmjp1s5lViP05wlL5OT093Xn/t8rCvJ3TJ5LkoxdXBwUEdHR3t2Jjt/XhaKLA9Fx6UW7qk7Er0XuX0eeg/iafE4yyXp5hpuho1RjZGNkY2Rqp9Y+S9jZFrdNcXXAwcVu3EwNrVpwOPB4AHWNVuQFXt/lo3nWMtqc6Up+9ySoKOjOpjM7BSNc0rac6L+Jk59wy4qnarROkK25NUSggJpDwROs2cl32SD5BntfMKCX2JCc+Tjcaqqp1KquwwA33Xz8nJSR0cHFxYCHgCdfJEx2PHx8dnx09OTnaqi5rT/T8lOepFOmACOTg4qP39/Z0fFHSefBzqheBD4lwpBhOY8zhlOT4+3rE5ZXBZnVd9Z1slU88rAhCXg/z4YsETPOdS+1kSPT4+3skJItepZE9VYI+3FEt8O5nrOOU06tYXCe7baSzqyP3NF4lNV6PGyMbIpN/GyMbIxsh7DyNn+qi6ptfCJydOzuLOx75Vu4qjMdxx2IeO4r9wrb7u5ExYa7zqOx+kJLCwXZItJWId55wpmJ1SkvH2BD8mjDF2Hx71fuQ7JVEHO8pGORwMfXzqSue9oid+Dw4O6vT0/M07HrRJnyL19aRK2dKCQG0EMAxMyeH6JoB5O/HiCXG2eHDAVCJ2/WjBw8UP3x6VfMtt7wtngrAqkO5j7iP6q0rW4eHhhfjl4s+TuNt8zaZ+LC0YXUfig3r1pJgWCzNf5jGPh9SGtqaeZ/K4HzIWuABJCwWvlnOx4Hz6oj/p0n088dh0dWqMbIxsjGyMbIy89zFyja5lS6Enh+QEIt7q1TkKQ6dm8PMqVIpg1cb7eMIWf8uyXEgW7uS6alUgcK6k5BRoTpSLx6jHGc0CQImPekxjU8d0pAR81MdMljSP28d50lhe+UsLhBS0M9CVf3jCZhLjcS0w3GfpC7IVKxdecXKe5CMaX3rxCkkCcPJDPREUyZP+qWJFPc4WTkwulEE8u27Fn1e+UuIXH9Q1+6ZE53Zn/Ign6legSh36oscXJjMgJS/8y3FmCyoCtvSWKqPkdea/VbsLVc5DfaSqKMdnW5HzlRbA9NMEZmtg2XR1aoxsjKxqjGyMbIy8HzByja5lS6E7WEoiqU/VbjLyvkw+7hypGuDJVpU7BZ+f41zJET34RQSGWUIUJbCSUfkAJpMNKzMEWZe9Klc8Ka9kY7J3QE8JguTJl0lXOlPw387Y7vypD4PYq1tMiBwvBZsDmMbyh3c5fgpiBy7Oy6qXjjFZJxDzWKH/sVInvVTVWTVnTUaOTR/yRZP057pT7Lifu8+77XzRxIVYSs4pEaak5/ZL7ZMdZxVIr3Sm+E7ypVznldDZOOwvu/oihXN5zFG3vmifLdCkf9fN7M6Fj8G2nKfp6tQY2RjZGNkY2Rh5ke5FjFyja7nDxb/87AaqOld+qi6wvx+XIql0oIjNyQAAIABJREFUJzqIB0DV7p5nv73sCc8TtMvgTkqnIGB5AHM+Jh2Ny+BJVQFP2EkPVRUrO+KJY/O4kiH33aZEPEtKLoe3pZ5mAaNjbju9iYigxblImtffXkR7Or9e3XCbM7H6P/EuHVLHlE/VuOQLDHzqXvPrYdoxxs7+d38I1vWfFmPuNx6LWujwTUBu57RI8KTMBZPrfsaL+3NKnnt7ezv78rmVgP08mafFCG3Eth7PfBYi5YvkY7PPCQi5uEm5h7pMgMr2tIM+M49KDp33cXysNFfT1akxsjGyMbIxUv0aI+9tjFy7+LqWZ7gSE7xqrzoP8JQw1I+K1XcfZ+aEnpQ8wTPgFYTsx8+uMB+Hyk1X354YnT/ONwswr2B5X+rSKxF0Rtcvv3v1yMfjeY7JhKfjqdKV5KRtHKSSPVnt4NiemD34Z0CusQlSDtwkJjOXg7zrn8YTEVyoQ98OoLl8QVJ1vhXFq5QOVJo/+Z+3d/LqsAOJ+if/T5Uu54Gyuq8k29PmvqhyX+JxLmLIDyn1d13pe3r9rPPsOYfnKavzSf91HTl/qthyIU79p7sPyb/Iiy80yD/Hc5003R01RjZGrsnZGNkY2Rj5yMXINbq2Cy46lQOFM1y1+2YSEpMDBXZFcB6Cj88tYh9Pojyuvgx2Vv0uU3LiLR13WfTdjeoAzCqb9OUO5kmDvLgdvH3i0/lL8nrF0vXsCYhBmhxfY3HuND/lTIuH1I9A5YuB5D+Shwsb9wWd51z6K+Bj9c154RjJPj62V7W8v4N6shvfNKY2DkIpjrnQSePO/Puqi6Pkg55k+fpcB1/q2nUxS7Scl/MwZrSIcn/SeJoz5b00n9q434r4Gmsuksgrc9/MDiTGlMb3rVQzXdwOsDRdpMbIi/M0RjZGVjVGJlm9b2PkIx8jr+UZLp/MkwiPiXjrT+ROSkdhwDh54NBAnMOdk4HhCWVt/OTEHvwOAD4er7h9m4LrwStLDigOJOzPwEoyqM3MlnRwBy3qjPL7w8G89S7n9yRIchBhhVB8c/uAv6WL4yZA8gSabKOxqCf/HY6U/FMwqqKXqiUkxoz7pffVvARlr3KLB/4mheuae8ddb57cU4WIOk/6pM+4Xznw0lf5cLVs7briZ9enf14DPbedz8WY0TaNGRCleE88UA/p910Sn75gczk9j+l8AjfPGYk3fp6BVtPl1BjZGNkY2RjZGHl/YOQaXTt6KmATEx7Y/MzEyKtWvnklEUGH4zuw+LweKGqXzs9ALJHasnowM3ACWyUIjsEELP14UvKH/jxRryVs8k49OWCxOpEcTu1cLvHjgEPe00OLSQ6nlDTII+3hfLjPUR6CFuehL7u/uc95HCSZ5ONpQcM+WkD4AosAQv/14HefdN1xm4XrXPOxikn5PcbcPrPYpV/Tn8k7fV1zCWzYn/YcY5xtH+Hiwuembjz+yTtl8C1gtJ8DrI+TFnVuGwellCN8DOczxb4fS4usGc1ySNOdUWNkY2RjZGNk0otTY+RF2zySMfJan+FKimRgJIUTOLxi4InRK1Gcl87ggc3+bJ8SF/+mc5QryTtrOzOIO6+3dxk5hssj4sOSnhBYoZjJr8+0mT8wS517NSxVTmkD3+7hcyfbiLzS4cl1pjPnI/EonTooJF7cl1M1RHP7w6SeVFJVzKvdrBrRHuQ7VTEdKD3x+rYVJh3/zH7+mQs69w/qKIEL/VJyut9yPuqQn+nzrLCRH7VX28PDwzMgTQnVfWgNMK7y2ft4FT0Buff3aqjzSDmUX/VbNxyLb4CjfSTbzNZNd06NkfO2jZGNkY2RjZH3MkZeywUXHdO3njBY2Dbd/vWEr79MWjrmSvcgdGMwOOmcbkApODkWg1XjXAYoHpQ8R/nUjv88QaX+rld3Vh5328wogYbLlWRkHw/4xA8TywxU3bmZMBwgZwCVAsEXHImHBGqpbQIkHWcCJfDOtqisAXzV7kJBWwrcnqzg+VYVxlgi8UtQcF7c1rN9224/f3tRSorehp+5mPR5HHxTjFD/6uMLIfeVxIfnt/R3Rq5X2cIXFOlOxFUWgEkOX3zyWAIjX0CwfdOdUWNkY2RjZGNkY+T9jZHXfocrKTcJl5KflOiGIqXkNBM0JSXOn273emXFDe88MZklXtWGPPt3BprGSwmfAUP+vQLg1Q7nidWLBBaeZC9LrP55BkDpdrF4cb1IFl9oOND5a0AZ6L5AEZ8p8B30mfCZwJIuHPxnicb1s5Z400Ip+bD49PNsJ7+hr1I28UT9zpKWx5T7alqAeKxfFdxpf771awZyrt80h+sj+d0auDggeZ5byx+ktPj0vrM4Sjy6b6Y7FYkXX6Snha3aNd09NUZe5FVtGiMbI10/jZG74zdGPrIx8touuNwRGNhkTLd1KbD6e+VN48yAJzk9gYIB7v2dV+dbPK05JZNHAlLniTrguP49JZFZ4iE/5JdB47KnYF9z+FkSE+/SgetIx91hRUzSs+cQvMLjweL8XyUpu4wEqFniW0v8Sac8R534gonycYxUqXFgnAW4y+8Vaso7Gyv5iycWyTFbLHmF3nVJnhwY0/wOYB4fM549Qc+qYAng+TktONyfEy9rwCo98hjnSAtLzuFx7jlP416WJ2ffU15oujNqjGyMdB3peGNk7bRpjGyMvBcx8touuOgUvCWYFJkcwJOjt+d3H4/9FCzce5kczQEhzT/jwR3BX03pQZr6zoDTkyIDODkE2yRnTLpiUJHPJLsDFNu4/WZJluM7L/SVtCDx+ZJPpQBd44tyscKX+ngC0S12Bzn/MceZv1Sd7wl3m+jvWn8HgttJ1Elv6U1oM59fAzOXcXY++WhKWMkX6EupajjzF+dlpj8HxWR/lzXpLsWXz8uxZ0C+BpDOx1rMzwBy7djs/O2AS9MuNUY2Rl4JI8eo4w/9tF2elqrTvW3fRYe2MtSoZTmtCrY+s+HYq9Pl9Kz9Ge+1HW9s+dueWyrbN+HZZoht3xFyd23mORt/nB/TfKfL6c6xpZY63p7TH/HJMWtZtofP+54uF18QQT7LU9io6EPL6WmNsbejizU6+Jkfrr2f/jeNkSZrY+Q5XfsdLk2u777vkkDii1ZRupWrMZzSMQcGOcravtxk5OSclMX5TcnewcMX1l7d9HHd2ejQY5wvmF129RO4p/ETgHrwpaDy8ZclPxfgCcCP8/Z82jubeE4BPQtMTxqzIOV5Xnh59YltXV8k+rx4mv0go4+R9Mx42dvbO3srlI8xq+ZxoecxWFVnY7KiSH6pG09atCNlSQujpEdv59thJJf7oc9FPXA+X9i6HOTF7XFZkk3gkHSQFnKe//wOwGwBl/hNPk7ZlSf0UPBVwJsyeS5oujNqjGyMvBJGVtXJMz51aoc7oZPLmzTdJY3/43Nr/NQPNUaC7keMXKNr25ifFpLucL6P1heBVRdfmcpjM2HoXHJs3n5Pzswkv+YInnCTwdPtztTHF7nUkwcU+3NfvMbgMXci7+vnrjJHugCaLbhnIOtzJjunvi6b2yHJQ1lSAMxk8jYJAAmYLhtfV+s+u5ZIki95hZYxw/6MG1LSJWVRXKRX50pWJRzfwuMLBo+BmWyJ57To4D/5xmwLjtvBdUm+mE+cl7XYph65PYSUchR1zUXmzDYJsDyfiAfqxfOs+8MMMMiL64Pz0yck04zfpqtRY2Rj5O1gZNMjixojGyMvi927vsPFRVtKtElhUgiZ5t0OtmOicwVyPl6l82FXvyqeKYUVQ0/4DhIzWfl5lnBdJy5Xkik5yRj518zp0NIFj7kdfFy1IwCPcfHNRz6Oyyr+OI/sqLcCJZtSbspJW3oQ0/Zqm/a7MyHSlmkRkHTkxDFm+uRbkiiX80253bbUAXXtx9Jvz7AvF1RMsvrRSS76aB/vQxn9GGOGcye7uu64vZHjpP3XabHHcZPd1N77O18+NvWW5nPdSg8zgEvxwq2qqS9lSDFDPaovXx+cqoMpdyaw8zzSdHvUGNkYeVsYebpUHR9Vffqfq3r046s+9ZvqAv3wd1R9z9dUfcBHV73n0y+ef0PQQ79V9Zl/sepNn1L1iV/7O8PDGj30+qrP/KDN5zf/vVV/68uq/tOPV33D83bbfdCzq97lT83H+ZT3r1qWqhsPVH3WP636r6+o+ry/sjn39u9W9Zc/46xpY2Rj5BpO3vUFV1okixFfkM62pohRH4N91xbFnMcX5TQ0xxHPPkaSy/vTAQmCBCIHHq+WufwJ9FyfdAoF38HBwc55OqfOpe0cM0Bj9TP9gJ22myWnmlUUq85/SV7POqXFgweixuDcp6enF5IOgSsFIn2ASY6y++9z8KIjBRn1PQN86kXHNebMDrNkxQTIX5hfluXMzkyE1BcXbrz4Ojg42LnYYnXI+fAFlPOfkq/sIXv7ooQJbZaUXd+u07TATG0ku2Tzi8okq/OlMZkLHCx9bo7p5DHAcf13Vmb8+XjyhVk//mUckOgvau/2aro6NUY2RlKfl2LkwX4dLadVL/qeqic85cIYVbVZ6D/uSVVv+Tb5/BuCTo43PL7FW//O8TCj46OqZ/+pqpf+XNUnfHXV8z+i6vS06j2etuH5vf9C1aMeU/V9X1f1J//ifJy/8yerfurfVH3RD1Z9/PtU/d2nby7cXvqzVR/8CVUv+t6qn/iXVb/4H6sq5+bGyMZI0bU8w8Uqkyu/ave3Rxx8EoOesHWMgnHuqvPnThKocQ62T9vOtCgmL+48Xnl0OWbgJUpX0un2anJyHiOw+I/h8TMX+QQrt5PGoX1E/vsx1JUHdrLzwcHBTgWAwHSZs/J2tV9geRLTXR5/Zor2k5zJBu4LM//zQKc+9FcXRloscE4uDmQvzkP9Ofmig3OuJXXZzuNKF17uC1wAaWwuMMQ3K22MCSYkze8JnHP6hbafd7t4bHobt2eqLCoPUK7ZwjKR+77nFM6dFlqzSl3yO/efdLdDY56cnFyQifp1sJyBu8eF3wFouho1RjZGzmx0ASMLMv3Gq6qe+U5VT327qs/89qr/5wer/re/fX7+GX+36ud/qurHvm9XgV/6b6s+8X03d3lGVS1V9eCbVH35j27Ov+w/be5OVVX9sfev+t1vX/Utn787xqd8Y9XbvvPm89/641Wvfc1mrK/56c3F1l//o5tzr/zlDY+//12qnvP1Vf/uu6q+ZvsM2jOeU/ULL6760e/ZHftLfqjqk59W9du/VTVG1Vf+RNWz/nDVY9+06ot/qOqX/3PVc//Cefs//SFVH/KcujItS9XPvmhzUfp277q5G/cL/6HqWc+v+tqfrnrsk6q++6suH+dnfmTz9x3+6OaC7ed+bPP913+l6tu+oOo1r6x68u+q+vBPr/ED33jhIryqMfJ+w8g1XVwLelJwCisGtKgmQIhJOgV/XV0K8M+z81W7iwG/xar5uI3AE6y2frlSVX3gHA6emtNBcub4M1B0PfKY91c/BorGPTw83BmHdzZ8fHdugrknD7WhLpON1F/tOb/4VkUv2ZcB7/7k+uQx+piDLeehTalPgfDh4eHOWORBixZPfGOc/1aMEhXJFyX+2f1X53VRpRdbHB4e7iTCqs2zVynQ6eunp6d1dHRUVXUhqe/v79fBwcHZXLQxfcP9n4sMyux5IFXmNSbtzEVIqgaTh2RPteE5T5bus4xZ6tztRn79u8ej5ybOxb5r7Umek7jgJY+yN+Xw3OA8uU45ZmrfdPvUGNkY6Ta6DCOrarOl8OO+tOpHvrPqeR9c9Qfevepz/s+qP/YBVS958eaC7IM/fnOh8pIXV33M51e99lVVf+UPVj3n6zZ3Yl7yM1Wf9e2bi4WPfreqV7y06tnvs9ki9+f/ZtULv3RzMfLO770Z42nPrHrcEzcXRL/0/1Z93HtVvfhHqp77gs0do2f8vqr9g6pPf8GGxye8+Yanj/rcqp/8V1Wf98zNRc4f+hNVX/o3q97ibaoe/6TN2Pr3V9+56pP+cdXLfm7zfVk2f1/6s1W/9rLNnaWXvLjq8U/ejP30j9rM9Q2fVfW/vnXVv//BCzrfoYPDqn/8M1W/8erNxaLoTR5b9TbvVPUT/6LqhV9S9eHPrXqvP78+ltOT3qrqG36u6qOev5Hzmc+retJbXci3jZG73+8XjFyjaytXMmFVnVfefUHNz0xuPO6A4U7J4yJPXE50Uu/v89Px04WC+qTkTl2IH78w8KC5zLlJHjBVdXYB4ORgSYDwcwpo8Uu+vT3BNS0evCLhQcRAkDy6ANPYnkykM1UlFDTUhQM45VKA8cLLE5l49Isp58ft7UmP7VndYozogkcyu8/TT5w32YmLH+mPcvGv+5jG5HNf9Dmfm7J7xdovzOQDbOe6Skk9Hee47keuUz/vsSX7eH/PR/Qhty0pxSLnmeUN12fSAY+57KndzB5smwoA1G0al3MnfTZdnRojGyOvhJEUaW9/cwfl+FbVq3616oE32Wzhe+wTz9s8/s2qbj64+fykt6raO9hctLzZUzd3j6o2zzAtS9UrXra5O/WqX6m6cXOzZfH1v7k59ujHb8d78ma8V7+86uhW1a/9UtVyWvWU37M5/6u/uBn3zbffDw42PD3xLare6b2qPuIzq/71t1R9z9duLnhuPlh181FVf/+7qp769lVf8X9X/fbrqp7y1Av2qKqqJ75V1ad+8+bzi3+46mPfverbvnDz/YOeXfWPfqzqf/hjua9ojKrf+weqXvjyqr/3zy+ef91vVP3mazYXqg8+en0sp4PDqlu/XfUFz9pciH7kO1Z99afsxGpj5Pn4jZHndO2/w8WFnC8OSek3QJJyfAGoz2xHRbmB3Jlp/DVHYeDwfLp16IbyIPIFte/vJj8z53TyRXQKMtmC+9RpB8qhisDR0dEOSMp2HnTen7xwC53O37hxY6eK686u89QrfSPZl+fdN5hkZreZvdJK25EP8pBs73fP9Ka/qt07SfzHZxLctyiDkk9VnVVkdWfQ/c+rWqzsyvZqc3x8vPMsWEo0HJdbDj1+bty4sZPoOIZXvVNVjP7A78wfmi9tBU18yZ+pD7e1b//wqh/jlluOkv+lZ/PUhnq4bOGcdOPysQ+rp6kffSzN6T7nOlB7r1I23T41RjZGUr4ZRp4+EkPsF15c9Te2F0EnR1Xv95FVj3ps1Qv+wXmbxzxhc2fssU+sGiv229/fXLh9z3+r+rHv39zZ+9bP31wYPePvbp69uoyOb1V9wJM2d+q+0O6G/eC3Vn3lJ2+2KH7Ax9y+rK94WdXnfnjVc7+16tu/sOpPfFDVz/9ULW/y6MbIxshY2Dkba3rmNkjKpJLpRFz8+ULXr+ZdaCqHbZmoWbFzUHAw0Tg6547pIEDlLcvFZ1jcOXy7gf7yH43DB/icV5+DPJM3r6akq/xULeB8rADSOTmXFt8JVNyGaQyNrd+REtF+HpAuD53ak0NVnW2L84B3XfLCx4PLFxwir15IJw60XlmV39AfPDDVhomMbalH/eVF1snJSfx9LsmopHN0dBSrt4kH15Xzpba3bt2K+9Y9VkmS33VA/9Fcrhf6Em0280vazWM/LfycL47l/2Z64hz+cC/9i7x6vnNbekwty/kdUrZPVTa/GOdf8jrjI1Vim65OjZGNkVfFyGVlwVb/7jur/syNqq/4xM33L/vbVf/6W8/Pf9Qf2tzd+qe/srkbthm86mmP2VywfNsvbbb4/e8/vnk+6TP+l6o/86FVH43ntz73L1f9+PdXffG/qXrrd6j6pv+8uUj6gDfd3An7/tdv2j34mM14L39J1bPeper3vdPmGa5bv131nV+5uUi6jL7vdVVLVT1te5fpNa/cyPe+D1Z94JOqPucZm+NPf1bVX/qkqi9/9ub8i753fdz9w6pv+cXN3bgPe7uqt37Hqi/615tzJ0dVRw9VHd6sOrxx3ufv/MnN2L/6i+fHvvc3N3ccn/bojT6//b9Uvdnvrvqyf1f1wKM2L8z4gr9WVbt3iBsj71+MXKPhk94u7e3tLXp+ZG9vb5psxVhamBJEBAwuFMfwcTWvK4CUXljgbX0uOp74Iwhqbg8Y/6624iEBX7p4mI23LMuFasLMKf2Cgo4v/pUYtChgsLqePcj84oT8C4B8kcHAZYVv5szLsnmjnp5j8kAmX15dTW15oeUXe6yQJP0mO6taeXh4eOEhTK+eJr7TXMme9PMbN26cPY9FOysW6Wdc0Om7+NEcrHT5Vk0lLfqB5FSi5++RccHkPkRiHHiskQ/GnPPu/uxtkt1SwvYFIPXkfj/zB9HMV/Q35USX0/tzAe7Ax2NruYAyuTzkMR2jTEdHRz+5LMu7XhC8KVJjZGPk7WDkyVJ19H2/tblQ2hzMn88mqKrn/M+bi6Sv+emqt37HTbuqzQXE6UnVDxxtjmlRuSwYc/vfP35u1Td99uZlGe/zIZu7UBqHMo1xfpzjTPkbtbmq2p4b+DvG7tgzGrXhZzndDgUeZuS8uew+hviYHecYPv6oOnjB8+vGN3/Otllj5P2KkdvCTDTotW0pZIVHzHBB50IlBVxmJG/LeZKzaO5kCAJburJVMGhB7TL4lgbnW+05d5rP5XIi4OpfuihQW1/Aq+LmthARSJgklBw84D1QEw/Op8/p+p4FpOY7PDzcufPE56Jc97QrK46sBM0CTDyPMc5eUOFz+gWcAHhvb+/sooTfdceNOqadXHavROsCU9sUJeNDDz101scTjD/bpTb+Q6ca0+1O/XEOr5oxFhgrrKalv253T6Yif7uXX0yzb4qtZC9997cuug9TJ7JVujj2vsmmbOPxk6pia77tdvHtrB4DfswXg2netOB20G66fWqMbIzUfGsYubn+sUX/7HNV1ed82OZlFfsHVX/tD1d9889vnrd6/ydsJ9yvevrjqr7vN3fH4Dgv/JKqFzx/M8Y/eObmLX7v/n5U8gW9x3ESf+dSnZ9jm9nYcb69qqumoMTb2vEZH1eVfW+/lr2DqlF1OkbV2Ob2cVx7Y9Te1l+Oj49r7KM4vLcUvXrU2F6bLjW2dVC1c7zY29/eadqOv7csmzukY1TtbS8Eauuby1JjOW2MfJgxco3u+g7XGGM5PDw8Yzqc30n2M7DwhfDawioBEft4P5Icyd9SwvNOHD/d5uR8lyk9GUbj0tFZBfXKo+aRIxEMpG9WDNkmJXo6tTuqHFCLc/5uE0FS310/qbqpMQiKiSSjXpfLu1sE9VkVQ3+TzxEcKTftwAVI8q0E9K6TlCyUxKgb+isvmtL2S/qGLwyks52kHCpeTGYug+uA/jR7zk5jzBKp2yHpi+2r6uyOZprL284WiilnUK4ZeVJ2v/ZFLWOXoJNyhuZOdxX1L1XwWGigv6ZcRzuqD3NAyrWuY7+TovO3bt3qO1y3QY2RjZG3hZF7e/X678bFUVPTNdHBV31S7f+zL22MrIcPI7dbGB+eO1wuPI9LaF+4UWne3hepHNf7pkWa33ZVOxqUlawZuPlCjsk8LRQT6FWdG0QVAb9oSJU87sf1Rbk7KOU9OjqqGzdu1MHBQd26desCT6J094Myux0FbjzvD3gncKWu0yLBb0NT39y3rwqjPu/t7V24+yE+6Wv865+p09kWEM3LbXokXySlhOX+oXjwB0jZxoOYCwza339HRhembu9094q64rNfvKDjAkl86OFf34YjmdPD4/RX+sasCi2biD//nbCZ3i47n+y1Rs4X77AxhqcLJ+jF8xp9Ly14XWfJj8kD27iuuRVqTW4WIBS7Kcc23R41RjZGsu+lGKm3ZixL1WtffWG+s3ntVs/ufZI6+/mtq9BV2nobfRcX/Lzb6qpkI6wNvJz9V1XjguzO77hwxhu5ZBt9jtBbmj4/t1zQwxsVLbX5CYAH36SW092ihKgx8g2Dkde2pVAkYVOVi7fueWtf/SSQHvL3BagvaF1AGlf/Dg4O4laomWPQOWZ3DDgXQdLPc4xl2X0wkPLy4kHHEn/sJx3RgV2n0h2f9aFTUQ53VjoV+yYg1zzkL/Gs77qY8gUAx0gAd3p6erbg98AiOLqd04Udx9SFgj4zkGc/ckm51I7+R/3xmMbz5wv82QVd9GgBQt50XMf8rYiMGz3ozaRSVWdbJqnvBP6ygVd7nd9ZH/qXvvOCzv2EvqW/3NLgvuHH/M7drB9tQ14oC2VIdwTF22VvXyKvJG05TdtcpSe/KE15jzwzDtbaej/ZlPqRPunLfdF199QYeX6eYzRGbnWyt+Xt9a+tB//S776Q9yWH46jePMvfaeQFfMpRHM8v9t1WfmfCL3STr3Icx8N0jHn7djGS5+TXaUs+5XOM5Dl/dMHjhrL5hQr1RL917PS1CfGG38kDY4U2SHyd8fuBH1snf+NLdo41Rr7hMfJaLrg4uSdRMuuM+UJJ3/XyAU/Svtjz+R0QWK3nlSiTjfgT8RkU3mZMbUV0sJSsXE7XiTu/VwHc8T3ZiUfqiHO6bjyx+jk6jS+O02LXkwLlFqXqLgM/BdIY42x+PcfFJOXJW8DMwEjVkZQ4HJQI8rOk4/ojMGvhVLUBDgE/jwk03VYM6mXZbFERmJ6entatW7fO9OZ80xas5mhebm1yHbgf8a1c9FfqNPk7+3sl3oHW558lbbaZgUoCRI8VzwEpd5FH8cFq+VWTtN9J0DGNq22yKbacJz9HWWfFBC5S5UfpItt1SaI/Jzs3XY0aIxsjr4yRp7t2a4xsjOT8d4KRPm9j5IYeDoxcw8mH5Q6XBPTJkwN5whHzM5pdzTKpucKrLiqJQeeGEv+s6Pj52RU7P7vRE8ixHSsBKegcBD0gWPnhOExUbOfnNLYSORMpkxb50zisOLpePQG7A3vydZ3zWR5PvOSBY7h8budkd7ajbj2QUuALBMbYbIF74IEHzsbTGxbVxsFFMno1zWOBOhegHB0dnb3qXYmeiwVPpAIwycRKmgOjqoP6y7loD/pESjzUUYpbkceE54rkV37nbWZ7XzBRxx4nbO+xO+vjC0+ku69/AAAgAElEQVQCottuVr30efWd47A/ZfDvugviVWj1T4tjl9X58vZNd0aNkY2Rl2GkU2NkY2TVnWPkSfCpxsiHByPX6K4vuDQJEwCFcod1B/Ereo2VBEsOlZTtvGluApzzq/Z0VBotAVwCKJ5zuQmWDgisSnmfBI6cw/W+t7d3tqdcSUbj+auBUxCx4pKSgAdzqrA6WHu7VN1Nr+f1Sp2/Wc/14qCncSgL5+RcmkO6c9B1P5Oe9fyUwOHmzZtnn9VGcghYpGMtWPTd3/aVZBtj1IMPPnjBf1ipJu9rFVh/OJ52pEzS59HR0Vk18fT0dOe7qkN88DTpmuQ+Qh/yRVnydX52u1M3M3/wsXjMFzWMI9nf50u5yGVPoOWLZJHHIiktNGa+yr5cOLhO0iKDczkPTZdTY+Q5NUZeHSNJjZGNkXeLkZsDF5+zkj4aI68HI9foWl6aQSY92biyXeFuGJJXsjhf6ucKYPB4EtZfnqMzM+kzuDknZVlzVg+Kmf7W9Mj2qfLIfrxNzXEcuBN4UibKxmoQdZbAiHrSX38OZ2ZT6kjJjmBC2yR9E6hdBudV5LeVPWkxoAQOAoX9/f26ceNGjbHZ5sOqHKt2XqWj/8yASvyICBI8xgUEZaCNCES8RS/Q0GduMyGp/c2bN+vGjRtnutQLN05OTs4qe69//et3tnfQZm5zki+aqCf3e9pU+koAxrESqM2+r+UMt5PnOB5PY3lecFl8oeztkm9W7VblvJ0vCF1HpKsAR9PVqTGyMfJ2MdKpMbIx0u1/Oxg586nGyDcsRl7blsLZ1SeZdePLQej4VbtVCncqUTKIxvKKD9tpvLXFG+d1ct7JjxuPxAqNJ2IFGpO+J9jZ55nzpnGczxkQUvfk2ykFp7fjGA4Ivh1lBpQEMY2VXlnMIHAfcP9Mi560cCB/AgIByMHBQR0eHp6BBrch6DN/9Ljq4gtDxKsvVFISlEwaj3HhIEs7eNWaPPDhe26zSbbwxQ3B9caNG2eg+qhHPeqsmnfr1q2zSp8Ahtsi6CcpefP7LHZmY/D82lshExjxvM/nVbyrgJQoLW6cD8+JyQbk0V8okPqs6c1lvWq+bbo9aoxsjExyX8DI/d0XRzRGNkaKl7vFyFHzLcuNkdeDkWt0bRdcXEin25oUSMbwqo6USKOlwBexLYmK9IpUApdZotQcHNcTkDtUOn+ZIZJTs9+sclZ1vh+WuksA6/pzZ3cAXwswH29WlaGOuWXAtyGwrQOAfIRjqWIkfSgZeuUu+WICX77tiCS/EWAIIPb3988qdTzHyh33mxPs9AaplJgT0Yfox6mKtyzL2Rt93DYzvUjPBBUHV/VJMe4VQ4138+bNeuCBB862U6i6d3R0VLdu3dqp7DkYyq6eBFMCpg59EeJ5gMdSrPpCjfP7fLSJt+M4s9y19p12TgDrfPO32hwAGFcph8xA0PPBZcDbdDk1RjZGcnzOQYw8wbCNkY2R14qR21fdN0bu8in/uA6MXKNrueBaS8ZM4p6oPBHPFOFjUkBXjPfzpK7vXtlKVSpPPAywFPwOADw+q5SpbXLaJHdKBGOMnVebul45TkoIHJ8An+zHBMO/qdLp8q2Bb+rH755UOS+rWklf4iEtPEge/Pv7+3Xz5s2zv16p01+9HYpJ0HWl6hG3JiSf8O+6QJ0lyJk89N3L9LqW1GaLLq8ySkb54eHh4dnxmzdv1snJyQ7oHh8fX3iQWePSngS2mbyMi5kcPMY48TZpEeV5YvYGuBnN8tjsePo+m2+2YE19Z/pIx9h/zc+arkaNkeftGyMv8r2LkVlfqR+/N0Y2Rl4lV/N8Y+Ru34cbI6/ttfBiwhWUvsugnnDpnDQujTcDHPZPwTfjh2P5ea+yeNB7X8rnPCbZOGcCSB+LgOcJicmLOmDVIt0GVXsFsctAnlzPlHEGximhs58DgficAT2BTrfnvdJAvc8AJOlvjHFWkavavB72wQcfPKvg6Z+2SrCyRz1ybsqnql7yB7cJFweevJTQPDFSDk+WKSHSJgQ+6WT22xfO/97e3tnDwBybthMPBJPDw8OzbRUClhRvlF8+QJD0O6aXJXe15V9RinPGEXVAvXJ+n2OWA1IuY6zwOOea5TAfK/F1O2OR56vqtSlTY2Rj5FUxcm9/f6dfY2Rj5N1i5Dlzu363Ro2Rd4aRa3RtWwrlNK6wFOSeqHScfZ14m5iJ0RO+b8Fwg3F8T67kOSVtn1Nz6ZzfXqbT+IOW1FWqiFEeOrLf9qUMnPPk5OQsKaZtFNTrzGYCfCVLfXZ+Lwsg8SEdcPuD610ye5WLiUsy7e/v19HR0Y5+OR7H9YTs8vIBXwGKgMPBhFU832Kg79Kt/IN7iGeJTvzNEgn9IP2mDG3KcTQ3v+s87Z/8whcw7n+UVe24KOH2DbVnlZM6Pzo6qtPT0wu/L0M5KR/H9O8JJPzzDBBmCTct/pwcPD2e1uImLUI5L4+tLZTczzyPpIUh4+EyP226M2qMbIwkX1OMHPm5uMbIxsg7xcizNjUv/DRGPvwYea2/wyUnmN1OdONU5QdlPTDo1OpPgdme7ZRwWN2RQdN8XoHysZksCCqemJIc6u8A5m0INvruCdrbOyB5wLqTSg+udwK1SD86KNlVqeF4/Os8+DnakiBFmdlGPOp3LjTeyclJ3bp1awfImbwdOLgHXef1WcBw48aNnYd9b968eXaLX6+x5VuVpJuq8wTtCyTagrZPt6CZmBOgCMxpe7c79Uy/ZiKWnsTTrVu3dt4MpTc0cX4CO6t6GttlYvXYEyr14tVT2U/VQC2MpJOUgFNSZ6xRJ74AJH+0wwwAOL4nY0/2GivZmvaajc04neWMJPvMr/h5toBMvLuMTXdOjZGNkZdh5OlyftGgbWeNkbu6bIy8PYx0/elcY+TF73eLkWu0esE1xniPqnrJsiyvuKRdTLxOzpgb3BVPYdOYPpaTnIVJKhmbx2f8e9XCgz0p2xMBA43Jx3n2ZDDjiXwQBKs2IMDAdfnZl4DB+dMFEYNrxht1qjFUxeGtbo6rW+jiIemSSVby6fWr+qy3/TB5qL/G1F5pgYP2mOstQgIPVe4IKuJD9lsLMso6WzzMwJiy6jMfJvaErr4eDz6+qnhMJgSolLQIFvxHHyYAqS1lIdgQ2Jks3e6q5Gkc34aR9Oix6Hr0pD0jgoXnKR+H7al7LWTT4oF+kfIm7ZCqcbM5Pa/R7ul8IvcrzpvennU/U2PkLu883hi5S1fFSOa2xsjGyOvAyDPfWxojH26MXNPZ6gXXsiwvWjuPdjtGI8OerF3Zs3M+DhVHR3HHSoEkQ84CzJO3VwBdVlbM5OyetGbA5T9c6M7KsRSw7hju3CkBsPoxAyV3EiYAJa61Koy+swqYbMTXnKqtxhYAVJ2DmsZiNXiMcQYgriPJJLBK+5fFl8bSQ757e3t148aNeuCBB+rg4GDnsyp1euBXAETfk/4kp87pOPVH4mtnkz9wDq/SVe3Gjj7TF2lT8uI+oPkfeuihs2TNrSds61VFgjYBSW9zEj9aSLA/F+60O2NVf/mgsfuYX8Crvcc+5fB4SsmTunZfEjmfM53RpjzvzyrQHs6z5vNj/O650+NS/Lj8nDeBxUyGpsZIfXZZGyPvAiPxWvjGyMZIt/udYKS0crqcViFm5B+NkW8YjLzWLYVkzBlIxvHkyc9Vu0b14E1tHCQUhB4E+qu+npiTI9IocgS/5Ur52cfnIm8Mek8Ya07s8mtsJTVWQtYSvwLv+Pg4JhONq0QuHn1LTAI3T7DLstmrrm0Oquwx+aovgUxyPfTQQzuAoGTCoDw+Pj5LTn6RN8Y4q87p7Uk3btyoBx988GzP+Y0bN872oN+8eXNHZwQSjus+xy064o3APgMM2pRJlQsYJVYnrw75AoJ2TQDDyuDR0dGFhQh9X33VTm1YnZXN5AOerGVD394i33rwwQfr1q1bF/hVG5G23NCXZxXm2RiMUfcb2ojtNZbrhDFPvlP+Sjx6vvHj5Nf7M4/yeYhUbSSvzgurqaR0rOnOqDGyMXKOkTfqdds+ym+NkY2Rd4ORx2PUaVXt7+3XwVaOxsiHByOTX4mu7S2FnrQTE1T0DGicvG3VbnVNbbwPqzo+P4+pPZM9kyDHk3weYMmwLteaMRmQPOeVQY3j35P8+sukQL4V0L5/n3vQqYfZPBzX+fO/h4eHdXR0dNbOq0nuP+KPYMDP5EEXcqq6qSJIYBNQ7O3tnQHIAw88cLYvnSAivfGHGj0puA/RLvIjbSMQqAhsqFfqk4swJh9/sJZyafxUxdbxVNHc2zvfpiJdayuJ+3VKIrJXAgRuc0j+JL64MOCiRfF2cHBQR0dHZ8e5eKCc7pMeJ+JP86dFoo5Jp7PKH4l65wIt5cEE0PqbYnqNh8sWqtQJt9b44lVjc6HhY3HbR9OdUWNkY2TiL2Iknv9qjGyMvA6MPNNJZcwQD42RDy9GXssFFwNATEmAlMzpKLpCZyJhRWxmRCpIbVxo9fFKiN+m5Lxqn+Zk0tUc6RZr6jcDPzqMgyb15Qnb5fPqAC9o6LTJIfxhWX/g18HZg1RjcGwlJ/U5Ojq6oDcfi3pwgK3a/FaF/IU860cD1Ye+oYR548aNs33m2oeuPejcl672SqhMsvIX+af0xcTLc1Wb1/IqOYpfJkTy7VtcaEdP2JqH/u5VGtnNQZug7PvKE6iLB4LcWkV4jHEGAF695Zj8S73IF/whZfkVeUmxkRK2+qc40nz0XS5YCG6kFK8+VuLN56YcvuhLRL1zPsaPLx5muk/PjfjieLYYabo6NUY2Rl4ZIzGvMMH12xjZGNkY+cjDyGv9HS4JQXLlMPB0t4NXzd7HydvKEL7Xne0dqDgO50vGc8MpWXFMzqFxFchMvu4kvIL2hyTJK48zUDQWK3DkWXMkINWYBB1Vcvgr9QpkypWCUH/1I4gnJydnD+dW1dmDuxpDiZtys9JUdb5vmZVTBbrulp2cnJw9uCveuajZ39/f2XvOH2nUVgnxot8s4dxcqNDGBDvZybdG6Dv9ixUvT1rSmydezV11EbRJBPzkiwJZgpkvFCiTdCkefRuBJxlfVPqC0B+QTn7Eaie/J/BM41Be+jnlpE8xnmlvH1f8+NgpLryK5rnM49NlSbnPF9i+UHS9emXOn6Ok3imT88W9/t636erUGNkYeXWMPOf98PCwMbIxcof3u8LIpXZkaoy8foxco2u7w8Xqho5ROVK2J1SeS4lSbXwsnqciRepDB3JQm13dktKtQipY5/w2rPOS5qCOKINX+pgcEs9MTNo24HqjbhmsStDan1x1XnGiAztYkV/plr8/4lUj6UcAIlkeeuihszln82hMBgMrPQqcMcZO5VFbH1i103dV6x544IHa29vb2Rah30Shvv9/9t6lR5LkOts87nG/ZURmZXdXsSlSoogP2gjiguvB7PUL9C8G80Pmh4y2Wmg9m5GWI0H4BAgCRVHdXd1VeY27x8VnEfmceP20R1ZmVRaJTzQDEhnh4W5udm6v+WvHzBUwNHDEgF2nt2jHtFX1gEy4tz6Mal15ftgRilz9GPSi3REs6+QTB3c6+FHgoz1Rd9iO1olNqVzo36nBHrbDix6zLKu8O0aZPAagMZjG/sc4ooBSF4O0Tm1brDfGHJWjxpqotzjwqLtPBAGtQ9Nr1N7rdBivV+BUm4gxO8YH6mOQpYPDVJ5XEkYmjHwyRnY69CxhZMLIhJH2vxZGPlZebIZLHS86WF1g1SdcPTeeV2d8MQBHkMBB42/arnhNPE+dgOM6/V8HZKecOoLkqXvVteNUUePWa3A2M6ts/6rOxOLXzWZTCWaa021mHtjoO9dTp6ZEaLtUPxoIcAp1Dt0uXtupzqkBnTZst1s3ctpOwAOAYOpg6/r9vvV6PWfsyFfPsswZQGW1YD2i7tSuCfpqlzHnX1mXaEfIpQ6o1Ue0Lk0zoZ3K0iqDrv7HvZke1yCjdlM3MIr9QHcRUBQwVX91cSCCV91grw7UtG8xiNYNdPT3GDi1nIoVMUBzb7VJ/ayDO+qru2dsT53t151fFxN1sFk3MI7xKIJWPFftS+NCnJ1J5WklYWTCSG3XhzDy4UxfL5QwMmHkJ2Ek+snqyYCEkS+HkXV1U170gUuFVjc1x7lm9Qar50Tj0rrUsfT+Kux4bTxHj+l5UcAoJ6YkxDZovWZWa/x6LxwwBqpTgBIBgfNVJtpXsyrTpPdR5zKzygsTtc/oSHVFnQTeyALo1D5BHXvQoMAWqQT1oih+1FdNUVCmE5DT9qnRdzoda7Vafn2v17N+v2/9ft86nY51u11PoWARMICiOuHefCcA056oF5V1nf2pbdTpCj3EoK160yCg91TgiQMcBToNznHAhG0ha02XqGO91I7wDwVkBiz8HtuiclWfUMDVQUZkj7QdsU49Tt/0f6wjXlMnJ60/zk7UAVAM7Kf0GW2Mcgo4om4ViJBlnQ1G2cf2ah+jHdXJLpWnl4SRCSOfipG7XXUgmTAyYeRLYKSZWbmvxzv6pv9jHfGahJHPx8gX3RYewdSxoHUBLXZWGSI9JwZmFQaGFwWldT729B1BiGPartgmSp1w9ZrYX47jGHWGFsFXr8F59bzYbhyXBa2AQl0OOywOMoxORNE88izLrNvt+vUAhtlxy1P6BrumgGJ2ZH1UD6RrKLuU57kzjBo44kBFA46+gJF0iX6/b91u1/93u93KTkv6ssZOp1OxDb0fhfbpfdGNsij0Txddqo3WBe0YFKKdxQFIXZCNQUxtTtuljBYsZGTa1Df0/Lo8efqw3W6t0+n8aLGxyqnONxgUcH/0gE1oPIifI6tH25XVfMy/o6wi28j5detgYtyJ/Yzsahz41Q0oI6OppS4man+i/8Y+RH1RZ0yF0TSbeG0qH1cSRiaM/BBGRpkkjEwY+RIYaWaW5T8mQRJGVsvnxMhPfuCKN4hGEqeSzawSgDQoqXHUCZsAcwoYtA3R2FAo56tS6wCA43WKVeOnP9GgIjhof+K91Di0/VHOMchp2/R8DTD0VQ1FnRPQ0XrViLiG93JEQCN45PkhVYEAEgOe5pPrAJm+60BC26m6J+2BNBCOmZkDA6kgnU7Her2e56R3u13/T7pEo9Hw7W5VDnERJPeiYEtqq/zpsQg2cfATbTMGPtqgKTs6kCIAxEFWtBMNajGYK9BooNT+1gXOeA6FHHNtJ23kmlMDIwWPOLDQwVxM3YmDjRic1Z8i0MZ+xd/1e51fkvKjRZkvlZ/GnVNxQevXeFcXP/Q8jU1Zlnmb4kCoThZxYKDAXQfCqTyvJIxMGPksjMyqG4EkjEwY+akYmee5KRIkjPzDYOQnP3DFjsYbx6BCUaOKxhSv0XrV4fTcqIRTDsmT6SnB1PUlDuJgYyKo1BlxlFXdZ2QQnT+2J16rDEMETwVUfUt5PB5nn2IbCeplWVpRFN4+8r75rkEtAj4Ao/cgRSH2nWChU+4apAES9LjfHxcsAgoKfu12uwIkyu7pCx5VPtp/DXr8VwdVO1QAj31iVg85cB9lWgBdPmtdeg5/yEP9QXWiYK3BGFnRbvVFtUe1MfWHyOqojuNgkHoAqxgctd8MONQH4kCQ3yLwnAq2sZ1RDqpr9Y1Yr+obv4iBOgK9tlfbpHGwDqDi4KMuxsUBZV3spJ0aE3SwEeurk0es90OAksqPS8LIhJHPwsjsQW9WfSlwwsiEkR+LkcfVZwkj/5AY+WIphdp5sx8/9aqyYoPjtVqnsjkRuPT6yCxRVJh8N6susqQ+VU68LrY59jveUx2K6+rOj8ZSZzhcX2d00VFUHrpdaAwOFKbq6XdsM9cqK6cAk2WHvG7qwsF0NosAr30muNCmOgOv03XUG+1kITDgwZ/mo7MAWF/iqECr9akeNCDqb3VBsSzLCnulQKhBUfsY+xl1oXKPQBbrjIwY1+pLp/W4BhwK4Kqgp/KBVdO6Go2GbTYbt5eYVx4HS9Gfox9GcKtrdwQW1U1kmOOAKfqo6rkuzuj9qV/jTvRbBfJoJ1Gvdf3UojZfFyNjKcvTm1tEZjfqV+UaGfW6WJfK00vCyOo9E0aewEiYfas+yCSMTBip8ngORpbS9oSRnxcjHysv+sClzoByTjFlOgDXTuh3rSsqQO9r9mMGjWtxCAWbOkZcr/1Q+dDWj48JXRWIUz12XZ3D1TGYmnKiIEy9ek2Uqd5H/7Isq+TRx0AedaEpFRpIMEymcKlT86XR336/9wDPd7NqcFTZESi73a6nUwAomo/OVreACgwewS/eLwY32sBxGEZsSXWpwVaBUN+foWlE1B2ZV+pSue9rAqfKkOvr9B8HFNqOGLw3m02ljmh7dcFfWVDOiywidWk/aLOuqdA2K1MX9RR9Q2VTp8MYVyJTpnVzfhy4qgzjvevqODUAVp/V8yLo1V2rJYJClHUscRZF/V3PqYsPcTCUytNLwsiqLB777Y8aI6W/CSMTRqqMPhYj67wtYeTnwcjHYtuLvYdLZyxUOHUDobqgFQOkCo/vlFivOmQ89zGDitfXtb1OwBxXR4wlGmhsR+wXdUYwPCVD/a+BQhd1KnOBc34IPDWYb7dbf/eDyhZA1r6fAhllDmmfnqOMFAFF8301tzkCiqY96EsayUEfDod+XFk9WDvuQ/11LJjaKSkPkVnTzwp+cVClwZx+q35i8FcGlN9jW9XOog1pXTqg0mP7/d4XY9MeZXSpo64d9AXw0bZhe7rIty4YRp9H3+SlRz/Rdj4WYGPsiAyl9ieWU21UuZ4CzSiDeL1eF2cStN4oF9Vzna7VBnX74RhPYh9OxT69XgGvLmak8uGSMDJh5HMxUn9LGJkwkvo/BiPzLLO9mWX2Ywzj2oSRnx8jX2yGSwNt3U1PKY3/dQBRJ4x4vgLJKYD40P3rjtUxa6cA6RRgqVyoMxq4GldsawwMsT/xCVuL1h37F9uo9zQz6/f7VhSFb1eKI8J26W5CWmfcvED7EGcklNHRa5WBjAENsKFNpD6wm1K73bbBYODvERkMBg4kAA7XkDJBG+qClAZ7Dci6AYhZdecg5AObRWCOtvShAYK2S0FJjyPHOhtQ/4k2GAO7LrrlexyEaD1al4IL/3UNgfpBtIs624vBjv+6iF3tKvb/Qz6qdcdgWqeHCPKR7Ytpsafacyq2RaYwxrb4Wa+NvmZWXYejJcaz2I5T8TH+/hiYpPJ4SRiZMPIpGHncubu6TiZhZMLIePypGLnnmqxaX8LIY3kpjHysvNh7uPRJlIY8Jkg+x2OUuinWuvvGe0VAgwWpA5dTiqkzpDpwjM4WFa/lMfA55VgRUGNwjfepq6suQMc2qAz1v077R5atTmdRXvo5z3Nbr9dmVt3elrqUXYnyRI9lWTrzZmb+npA8zysAoe8OUQDhOAAEMO12x5dCok+YGO6rbYsyjzKokzNBR1+Qp/dT0KDEAcZ+vz/5no264HKqbVFfcYcn1XW0CUq0M/qi7Jv6Rl3R3yPw1fkgcqzLM9dUkbo+1+nt1D10QKnH6nyHYxFE6mQW/T36i4L0qXIKSPR/9PdT8bbuuPYxpmCd0mMqHy4JIxNG1smrDiPLh10KM7NKuxJGJoz8WIw8da72KWHk58fIF30PlxoepU4J+rmOtXrsOj1PjV9/ix2PqRgaoPTaCEpaZzTy2N/ouKf6FJ3rFFhEY4gyiEAb7183PR3/n7p3URS+qLUucGZZZp1Ox8yqufrqWLFeZK1MmU7fRzkRKLRuDbqx/8o8KQACKuTOAxK6SJnvyE3voTamMtXFsqeCVN3v2u6o37rjWmfUaV0bsyyrXT+BbKLOYzBT+6zbYSjaWbT/2Hb0GFNJo+9Gu499rNuNUG2rrmhfTvlKXYA8FXfqmDltr9rTqT5pe7Rv2p86f9b66uSm8SnGtThI07Y/9pnvyhQ/BnSpfLgkjKyXidb/R4+RnGvVOJcwMmGk1vccjPQ2BRdMGHmUQ5THx2LkqThn9oJruD4UCGMjomHqlKyew/+6IK4CoA1xyr4OwLTdj/0WnSTeXxVad5+6Ptcdr7v/KcDUa1W5BP/IdkbHUCPW4+goz3Pfqef8/Nyur69rQa3X63ngWq1Wld/rgiC6UceKjIrZcVGrOpiZ/ejaaD9xZyJ1HMBC66hjjXRgw0Mh56rMVBd1wVWDZzwv2qfaUtQt99Vtf6MOqSPuiBQDdgxSdcE1BjT1gTqwiz7BeXGRcYwBKke1S87RhcrRPlU2db/V+Zzadjz/Q9/rZBb1rf81kMdBgraHPsQUhrqYRp8/1I4Yl2JRu43xTeup01fduak8vSSMTBj5ZIyUdTYJIxNGvgRGakkY+YfDyBd54FLHpLF1ASsGXbMf57XT0br3GMT6Yp11xlYXVKNR6++x3aecUa/RPkcl1QHOqXrqFBs/xyDC/Tqdju+aw2/KwEXjrJOd3m+z2Xi6AaABs0XqQ2T4uF5zifU33YIXwNJgzvRslGcMKpwLG6cLgzWdYrvd+hvdYe3UUTQNLQKm6pHPKtv4vc7BdVGr7jAVByLx4YHfdH2Atg0dRKCLM4fRTuqCfGyrsmR1dnlqUKL915xtvU5lTx8jM6Q2hlx0MFL30lBKBD8F9VPsu7atrr8awOOATtcoRNDU79wj+kOUnQJCXTzVcup+dXGK/ut1dTp9DGQ+xNyl8nhJGJkwUtv4KEZ6XKnOuCSMTBj5sRjpssura6ASRh7LS2Fk3diE8skPXDEIo/wYxPmMUZxi67g+vtwtKrJOOfE+MYByTd20Yd158ZpTCqkDOT1WF2iiPOoMjXbFp3/9zG/L5dKyLHOHi307taON9kX1ws5LZXlkjzRwFkVhRVGYWTXViBdeaps1tcDs+KZ11TFAyFayql9kT4oD7xNBRiwGZutbficw7ff7ysJcgESDAX3P87zy3o4YEFRv8fOp3wiGdXJRG4m60fZpO9EJYFhnI3qvyHbFQQRtUNaJe+luPtHe0eA/0oQAACAASURBVAG7r8X0Cr139K8oqwgQ2CM2puCv+tJ+xT7Guk+t84qgF48zENFztD9aZwSluKbjlL60nacGkSpTBZDYBq5jcPYUplDbWGf7dd9TeVpJGJkw8lkY6btmZJWZm4SRCSM/FiON6+QZIWHk58HIx8qLruGqazDCq2ucCkR/U8HVBb+oWI5rPfFN91oiGxLvGdkU/c9n+kodui1sFLoac/ytDsRwJJVRXRvUMHe7nbNWdWCY53nlfSF1hqJ96vf7tl6vnQGL8iaIw+RR4rbHnU7Hdrudp1TQ58iIKstVlqXXTTBBp5TBYODvFOHhDRDS93/sdjtbr9fORsZ3i9BfHbToAJ7jEZijHdEHfov56xqg+T0CkTJadUFP26GgX2evcfClJQY76jA7vu0+9iHajMoJuWCLGjT13Bj8VPYxOHJ/zon57U8p8Vz1tRjQY50RIGJ7uJb66mKftuEpA1baRb0aU+qK6lF1GHWuA8HYN21njA3RPulnKh9fEkYmjPwQRsqjScLIhJEvipFlWMSVMPJ43kthZJ09UT75gSsaSZ2C6pg6ijpOnJLTTsU69bM6mBpp3K1Fgy+Fe6oiIsDUgY46UxSwGltk/OJ56oDqqBFo+a5MmgIUQQ6WK74xvQ7IYlsUyJAV9bbbbQeWVqtlw+HQdzQaDAa2WCysKArb7XbW7/dttVrZZrOxTqfj12mb1MCz7PBiRrNjkN1sNp4O0Wg0HDDYuhYQ0YW+KlP9HVAgcOqAQ5mZGCCj7akeY8BUXW82G/9dA2U8ho3UgUe0eXRPH7bbbe1ufcq86EBD7ardbjuj2Wq1almxOtuI/gkYab8i46cyi+CrMtN6NIhFVlD7pTpStvYU4Kje4gA26jh+xy/UX7XOeF4MxBEste3aB87V+KH6OcXAxbYQC+JAVPt9CijUl+rsMJXnlYSRCSM/BiOj7hNGJoz8WIw0kVXCyM+LkY+VF5vhUuHFxhJcNW81CloHTFqHGqSyAHotRYUXn2SjoUZWINYRnTv2R+8RDVzbX8c6aF+j48JA1U09x37VgRwOrkyQtq9OJtqHeJ3e69WrVzafz83ssCCYoN3pdGw4HNr9/b3NZjMrisK3ns2yzLrdrs3nc9vv987AFUVheZ5br9ezVqtlvV7P1uu1zWazylqv4XDoQEJaBEChoDAcDq3X61m/33fGkKDGNvQ4KYCr98FpsdMYOOvSQHSApIP9GExU1/xpAFbmkjpjnrgCnv7XFzDGQK3BIfqM6lbboX4RfZI+85/24tcx0OkAqI7xVBvT64ui8N+LoqjITQNdZOejPvT4qbjxnAeKOn+NJcaJukEh/YixC+aUc+vAQIsypiobjbXR1yOrq21Qm1OWOsoxlY8rCSMTRj4HI80Og8KEkQkjKQkj/9fGyBd54NKnUApGqU+hkZWOwZBjUdCqhGgYUTiUU/dSgUZj1nr0eDxWd09VTgzwauTadpgn3kCOQaBQ3a0nBjeVDbLWN5m32+1KUKC/sQ5yuWHeaENRFNbtdm0ymTjrdnZ2Zo1Gw9brtS0WC2u1WjaZTKzb7dpsNrNut2tmh1x53umxXC6t0+lUGEUAYDAY2HA4rNhMv9/3lIbRaFQJWOSgNxoNz0MfjUY2Go2ckaLvb968sV/+8pd2c3Nj//RP/1RhhzQQoQ/uo7pSHWrbI0jU2YCZOVipDutYKu4XmbM4aMEeFAT1GPeONhPvmWXHBd70GQBVljemdKicon9qXTCmCkjRP+hTzBnnemV6WUugwU0/q13XxRbVSQTDCIBaYlzTgB11rT6vwVwLdemCbWVYY2xT2aNPBSjaFO/NZ7Ur5BkHHFpiXdEPHgPaVB4vCSMTRj4dIx9ilGU2Go0SRiaM/GSMVM9PGPmHw8gXe+DSTtZ1Ts9VRik2Ujsag7KCTAQcDLgu+GKIUVkUzq1z5DohxoCsCjoV7NXItP+9Xs8DtLIg0fm1XmVZVKaq/F6vZ4vFonIdsuB8AKvT6ThTwq5K3W7Xut2uDYdDB4Tdbuegs1gsrNPp2GKxsKurK8uyzNMTCHKz2cyyLLPFYmHn5+fWbrft7u7Oer2e7fd7u7y89Bz4fr/vQaPb7Vqv1zOzQ9DgRYwwd4PBwM7OzhykcMavv/7afv3rX9tf/MVf2OXlpf3ud7+zf/iHf6joTx03sol6jsq9TqcRMLAdzeXH7gjSMWUFm4CFiQ6sTGJsewz61If9xAcStRPO0YBOW5X1jMErBtZ4z1h0kIOcI5DqOgL6S8qFDiA5X9NEVB86KKDeU0y06kf7FeNQ1HVcqK5sl7aD+2gd2vd4rtqZtiGCnYKMglpsvwIUrGC0Ydhrzo+ghSzRodpOKs8vCSMTRj4ZI++nh7bkWcLIhJEvgpEqz4SRnxcjH3voepEHLhVSnCrWhkbDUwHGzur58Tx1Co6rk0QBK+NQ93SqDEI0hseAMrYpOpQey7LMgyELaAnO1MO56uhaV1276s6DyTo/P7eyLG2xWDizp6BqZr7gttPpWL/ftzzP7e7uzvr9vr8QkXsAOO122waDgQdy8scViNQG9vu9p1dwv1arZYvFwkajkc1mMzs7O7PBYODpEvP53EGEAsBlWWar1cr2+7397Gc/s7/8y7+0X//61/b69WubzWb2z//8z/a3f/u39u2339p+v3eGUvsOcBN0sizz9A6+x7zgaCcK4KQExDURylorQxsdH/3j/Pqb2l9c+Kmgwvl1v6udaOoC2+qy+FQXU2uAisCjOfIq07rBYWQtdYAV7RGfMKumgtAfZFXnn9HfonzUf2Lb6nxNi14XU0kUVFRm+l/7zLE4eIj9Upavrh3RNmiHMqlxMKly1DgTBwkUZSLr5JLK00rCyISRT8XI9WZri4d+JIxMGPkSGJkHzONzwsiXx8jHcPJFdylUFqIuQNORmKcen6L1XGXhuIcqgd+iUPisCuQ8FFXHZESDiMapQUmdRhkCzmfaX5/6dZtY2kCO9/39vX9nOjvKF5mhZPK61ZE0z7Xf75vZIYVBp/wjgwpbNp1ObTgcWrfbtc1m44t6lR29vr62drvteiZlwuwASqvVytbrtfX7fbu7u3O2r91uV9IhyGvvdruW54eccdhBjrXb7crv3W7XfvGLX9hf/dVf2a9+9St78+aNFUVh//Iv/2J///d/b//+7//uAY/3oygrpHZBnfwWg7/mVWuwiOCAPmJAjQ8BahsE7xhQIoOsQZw6uRcLj7FbAgl/GrC4Z/RHbYOyNWr38b/aftz1SwO+Au6pABuZR9qgwInPKFBqP2IaiRZ0qwFU5ax+G31Y45deH2OH6isOSPU3jUPIOTLFWk7FOGKmzkrE+9XpTAcRkTXWdun9Y1x+DExS+XBJGJkw8kMYOej37doOKYX9fj9hZMLIyvGPwUjFiISRx/I5MPKx8iIPXOpU2pE64RCAzI5TdirQSuPCdp5m1W04IyjofWKwr1Mk37XNtAMmJ7ILajCnlEgQhM3a7XYemJkK1h2HyOUeDAbOsrHjEUWDFNdmWebnERAwhsFg4CkYsIaLxcID63a7teVyaWZmFxcXtl6vbT6f2/n5ue12O5vP5/bll18600gfR6ORb4e7Wq2s2+36FDsAY2a+C1Ov13O5ttttG41GNp/PrdFo2MXFheeg6zT4cDi0drvt/Wm329ZoNGy5XNpf//Vf29/8zd/Y3d2d/cd//If93d/9nf3mN7+x1WrlC4tJ21Cdax9g7PjPnw7gzY6gHJli6onHYcPQM/eMqQuRyYoBLQ5oOEZKgbIzBDnkhy/qgKfOZ/QBRYEk+oQe3+12lUGFBrs4wFIQjz6i7Vfw5joFmDgI4xyVF+dG0I7AFa/nT/WoQf4UgJpVY96pOKN9PRWw9R5RRqqPGCPrUkbqgLDue52s9J46YKm7NpXnl4SRCSOfipHDs6797qHtCSMTRr4ERio5orE/YeTLY+Rj5cVSCinKMkenMKsKTdkEFZjZ8clVmRKtQ5++Y/0Ytk4tR0DgCRjB6T1VcJyjQq5ThBpcWR62bCVoUD+BCQfj2Hw+93uQ6oDTt1ot34WG882OC3nZfnaz2fi11MOiYK4bjUbO4PH0D/tycXHheerD4dD2+31l1yFAq9k8vK1+vV7bcDisBCbaCdumej0/P7d+v29FUdhkMnFQjMzF+fm56wJA0J2U/vEf/9H+7d/+zd6/f2/r9doBpN/vu6y1zshOKJCoHOgHxzVwqgz5HtMQGBihE71Ot5UlcNPHGJB0AGFWZW7U1vU4Oz4iK/UHBQMN4lE2yCQCH3LhHAUsDWzq18oqKfBE345+CwhSv26TrP3W/tcNWlXfnKf31LikA0/awLEIsMpmaZuV/Y9t4XsEhtiWyHAq0Or96oK69kMHv7EewDbODMSBNedFm41xOJWnl4SRCSOfjJGbBwzILGFkwsiK/3wsRnodVn0wShj58hj5WHnRB64o7NhY7Tif46BJv+sONjqVq05Rdw+cPualR4OkRBBRhauhxPNRkA5GYnBQY4U14zxl3ii73WEBbL/f9/YQ5HkxoQIBwXG3OyzmvL+/t7Ozsx/lZJMOYGb+gsXhcGi73c5ub2+draON3W7XWbPVauXXw0SSjoC8V6uVFUVhvV7PVquVbbdbm0wmdnV1ZUVRWKfTcT0Oh0ObTqcOPvSH1A89hr4IUNfX13Zzc2O9Xs/bF4MguogDAAUOZe0AVAV5rlXGuc5+Tg2EYtCK4EO7tF61PQ3WEWA0QOt5apf8KTMJMGgg1Tp0gEWbou3redTfarU8QOm9CFw6aNP6dGCmLD7y0f5Hf9fgq8dUHmoL2ufIZkag4d4sblc5aLsioJ0CEj0/6kltJaZ1nKpX69d4SL06mFa9aL9PDc4pMYbVgXcqTy8JIxNGPgcjD0qwhJEJI18EI91v8/pZ64SRL4eRj5UXSymMiq27eQzqdYMmFSABTM9B6ByPT6FqDNq+unbU1a2BQR1RX1QXFaJMUJ0ctN8xvUNBCPZNGT5SKxqNhg0GA39BIiDEAl0CVq/XcyZvMBjY7e2tg9N6vXZmT8FhNBrZdru1xWJhg8HA70NbSV2AySPvG3Ajvx4GDxlut1s7OzurrNf69ttv7d27dzYYDJzlUmAEzEih6HQ6/rsuWkUv6EblCrggewUOrkfuyDYO1DlWluWPXkZpVg0uce2QBkRsFJnE9AG1WW2DHov3pB70oYMh+otu1VY1aPAbIKx2Dcip/0SA4Xw9D71oUVDRdmjghqXV/iGDGMTjQE91zP3U5+r8NQ42drud254OQuP9VE7IIvp83UCjDkyUGY2/x77rNbHoIEHBSe1dz9X/pwZHek9+1zpTeX5JGJkw8skY+fbtQ8ctYaQljHwpjDyYVJYw8jNj5GPlxVMKtVF1wuX8qCQVany61Xrq6mRqW3/DSKmDe8Xro8C0LRgaCtO6tC/6pF33pBsZPq6Jgxh97wPv8KBfq9Wq0l5daEyAv7+/dzDRwA8Y0KZGo1GZ0p9MJrbf7221WtlqtfI0CICs3W5bURS+wDnLMme8SMngN+pkQTEB+ebmxn7+85/bdru1u7s7azQO2/yen597oFEHo6/b7daZOpWt2gjAhjwjQCkA8Z/fsAkNhOqYtA37gCUliEbGiWuRvwbgyDCiizjgUP/IsqyyiNzsx+/+iEDIudofPa72xyJsBRL1FexS70UbGDBowNQArECnYB8HXJvNpjKFrwOXKLMoozgQiHWrXEj/YX2H/o5/qI2dAgcNrPQ1Low+BRD6exzssDYl1qHX8llloNcoS49fYsdRvnWyijEae8Y/6mJ9Kh8uCSMTRj4ZI3cPOi8tYaQljDT7dIz0vmc/xsaEkS+LkY+VF9ulUDtOo3DsaJzKvCh7oB3gPK1THSjWx3cdgNUF9WgMWpfWQ4lK04ClDEA8JwY9s2p+c7fbtWazaavVqtLudrttu111209ABZah2+06e0N/2J0IAyJnnSDP1qbtdtud1+y4gw71wuzRT34HKFi8bGaVzxRSO5rNpi8Khl3bbDZ2dnbmrF+WZZ6jrjZCPnfUgzJQ1ElbNY+ZPnC+pkdEvaJL2qH3igMMZZewW47H4K6faS/61OCPvqhH+xEDONfpIIJ7aSDgevVDDUBlWVZSHGjHKXYoBka9pzKFMd1E2ST1SfVv5KjX0i8GBHzmfjqw00GSykh/x1a4p96LPqrM6/yWunUwqQFcdUGdMVhHYKkDCLWNOnulPWoLEdzMrLKhAcexsyhzHbSjL2X3VVdx0JLK00vCyISRZk/DyIOQzTNBEkYmjPwUjHT8y6rrBhNGvjxGPlZe9MXH+rSuzlIXuCk61Xfq6VGFXje4imxfPM/sCCSxbVqPKpuiThkdNDIvyhLqYBD2g9+URTgFfBjMdru11WpV2fo13os2NRqH3ZbYaakoCn/fR3RQwEVZLZUnKRcYagQYDSQwOKROwNo1GsdtZ3u9nt3d3dlwOHS2cL/f+1a4yIIFx8p+RUYUh6AN0WGQh+pX7SIGXpVlBA/awGf+lP1QpirqRlNpCN4VxqnGzkgVMTPXE/fhPGWL1I/qAqPattpMZPiQs/YpDvAo6q/q308JwnGQybkMInQNB/YV260lBmvsOuqzjhk7FWfioFXlqPFGz4l2qm2ra6fKiD5rbIr9iSksWmd88IwDoSh31bsOJNQOtH9xwJTK80rCyISRT8XIwWh0aLNl3taEkQkjPwUjS/txWnLCyM+DkY+VF30PVwQDPc5/GqmGS4lsnSpZz4lCQhjRUKIRRGHXKULvqQYY/9QIYZFUKZQIICgVkNA6ooPqvTGEaMywbto3ArqmAZHfbXZ8GzgAENktNTC9P4uMFUgAGoBMdbzb7ZwRzPPclsult58F3vriRuyCxcPr9bqyEFztivZp0AaINFVCUyZUrpHNU/tRAIn/deAJoCjTSD0qO154qbtkqW1Qh9om/1VeAA39VYYsglO0cwUd9SWO6YLlaOcRmJC/+pgyWGq72Kj2R+/NoEoHLKQNYEONRsMXsWuQVSZO05qU1eb+0W649jF2SgGiDlSjLLRf2l+NF1pHlEcM+gpe9E2vqdOH9lHbo3XFGY7YR42peq94n1SeXxJGJoz8MEZW9Z0wMmHkp2Jks/Hw8JlXZ0cTRr48Rj5WPvmBq+4mMaA+1hgVoH6vq+cUWMTPdUqkvsjinKo7sol155tVgQglxfxhNSA9Nyo6Ojy/a9AltzYCMu0lHxUnjKwS94tsT57nDnhlWTqDRhqBglKdkQNQ1Mk0936/d7Do9XqW57m/+0RZrt3u+K6VqMuY269MDm0CFEkP4U/bpn/UjewiIOBIuvia82KKj9aP3tXW0I0yNZEZi2DFvSjUEe0l3iPamgKSyo7+a8BQxrRualzlrjaq9hDTXPR3vRd9VfuhTo4VReFpNvqb1ht9UnWrbCH3RG5q/zogif4U5afnqx5iMK7Th8okDpK1Dj1HQToCkYJLHGCrDKKvq5wU/HQAQIkzFKk8vySMTBj5HIzMGPBnxzRFs4SRCSM/HiNdLpYw8g+JkZ/8wKWdrzNAZZ0oKuS6/+qIsRMqEDXUGPj1nrQNVkEZhnhcjU3vF8FBDTL2TduurJ4GYj7jMFGJ3EMBgzx1WC1SEXR6OTpQXfsJvtGZNWh3Oh1brVbebpw6OpKCVgz6qhtSKpAZ2+Xy0kiYtwj21M29CHjKwClwRHBTp4nMCiWCvy5QLoqiNhc9y47Mp96T34ui8POpgzbAMKEvdpzSYE8AA8jovwYYBXS1m1M+o3YZg1YcnGhwqwuYWh82oDrjeAzIyFUHTrCb3Bdbo8/YYEwXiPYewTIOIiP4K3hr7KB+jVNKDiD/CEjR5zSmaHu5v54f64uBW+WocUdlim41dik4qq3ENsZ76Dl1Mkjl6SVhZMLI52AksxH0J2FkwshPxcg9DxuWMPJzY+Rj5UXXcNEZ/uvNVTHxWJZlbjQ4pSpAp1spavgoQAWD46pycGRVPOfp9apoVQJ10ybeeo/DE9h0AWKWHff6px1qVCzubTQOu/YxVazBT4MGzA/5yWog6vwApS7YVTmo4apcKNyHdrKYkAXKZuZtpa/qEDG/nYClLA3nahvVmHFE/lTO1BPBBHlENlFtVIMfdsfxyGDpblYU5AHbCDjrYGG5XFpRFLZYLGy9Xju4ANR5nvvOVtpHBQ1NYdDAzX8FhDiAqvNNztXgUTeAUTnFc9SmVWY6kFBgj6ClzCVAwvmnWDGu13toneq7Kqs4uNJ3FmlQxu50IKSDSo1Nmq5CG8zM/YO6+K6DXtWD+kUcOKtPsT4hxk6Nf6o7PRb7pIBTNwCJthQH7nosleeVhJEJI1Uuj2Fk3niIg7KFd8LIhJGfgpFGn8tq3EkY+fIY+Vh5sQcuvVF8yqsTxCnB8p3fMUquo5MqPFWY2Y9zwakHIyZ/W+tXwel1OA7BHqM2M0+N03dl0AdybvmOcdA++txut322h75FoND3jOz3exsMBt5vXRiq/dC2qPzUsON0qxodjqeG3mgc3/9R54gK+vEY/VZ5wmiRkqHOTq77brerpCwo80H9Coy6y4zaTt1gpy4VAuYOhlQX5CrIozf+KyByTVEUdn9/b0VRWFEUDkxmZpPJxPI8t7dv31qn0/F3sRRFYXmeO6u7XC4dmOiHDpjqmHEt6heqX3SMzelxM6uwhvym9WCvHNeBltqLDgr1XSw6SNOgqrao9wKkuC4OCLWeeB33ZNcpbAa/QvcKgOo7cRCmfWRgp3LnGGkeer4yxaq/CCgMQKkLMNQ+xTZq0fjIPfEnPV/1qu2I8UTjbCrPLwkjE0Y+ByPNjrMRCSMTRn4qRmbi43XXJYx8OYyM99HyYptmqJPWgYmeF8FAp5K1EKDV8OqKCkZnT5R9wpDqghxt1vurwRIY+MyiTlgL6ucaAku/37csy+z+/t72+71vhR4DQrfb9TfNKwtAm2ADMHZd8Esb9cWGyiIpI0K7kAH16HQ9sqON9AkGI8uOaQJch6OrI5ZlaavVygaDQeXJH6Aoy+rLEumn5sTv98dUAx1gqO1E9ikyE1pvnYPRN44BItvt1gGAvjUaxx2lAEHef0JBTpvNxubzucsHWa1WK+v1erZYLOzt27fWbDat1+tVrtnv99btdm0ymdh6vbbxeGzb7daurq4qgBrTBxTE9bjKTfVMcEImfMeudWARBxcKvBqQlNWiLchWfTUOAh9jo+L/siwdBNS+ARPemaIDDlKKFHB0UKf3AAQjaMcBivYHX9G4o36HrKiP39THTwEZJfovnxngUl8EuwjscXCg/Yz31u/pYevTSsLIhJFPwci9HWNFfEhPGJkwUnX5HIw0M2s0D/pJGPmHwcgX2zRDAyAOpI3Rc/V8Da50mo6q0DXQ6RMwymLqnrr1M/VFRksVXyc0ZeT0dx4aYFhYrEvQ56k7vh9EUxIajeNCRwI60+44H3LkfST0VaexeaeVypTAoEaEk1N/NBTkoKwI7ad+8qqpH7nptTq9iwxJCeA7W/jifLwwUq+DOYuBIbJE9EOBmrpoD/dRW9V6YNW4X1EUtt1uK+9iAThgcQHV6LjUs1qtvC5AifrH47HXfXZ2ZmVZ2vX1dSXQLxYL++GHH6zVatlwOLTdbmfn5+e2Xq9tPp//yLEVxNXf0FEEetU16w6wK80NBzQ0aCvYwKip/FUv2ibAWtfzMUgidkRWMfYxstXEGxguzqEuZEBdGvC5lmsUnCNTqDKM9anMsRUFjxjU9bj6lAIs10S/iDGUvwhisU0K5jrorqsv9kt9V2WUytNKwsiEkc/ByHJfnelMGJkw8lMxsgwP3QkjPx9Gxnq1vMimGQi5rgH8ro1WBalToMB4fpZlHoz1CVzf0g6bYnbMrY5PuziIpkjQZjVo2sGOR8pkKLPU7Xa9TWqUGBNt1QWgOkCEtYMVoux2OwcrfcKPLAMGouANixef5DEg+q8OolO6ej3nKiupqYBqvJqmAJtFINLUB10EnGWHl1vynW1NqSPaEv1AfmbV91hoX7Rv/M4xBT/YwaIobLVaORCsVqsKQwljq87LNdhNWR7y0mezmU2nU69nNptVZPLDDz9Ys9m0s7Mzr4dATv91UHNzc+O67vV6NplM7P37935fDdzqT6p7ftOUFGRG3disgghBkfO328NWzdg5dqUDCw182rbo23zmuw584hS/ppJwLx2k6cMWdepAUEEPe40DvAjQgJ3qnfNUluo/6jP6m+arUxRANOVH45ACMXV+SN/aHo0Bek48HgfM6lN6bZrlen5JGJkwEt09BSObMhtUlmXCyISRn4yRTTbcsCxhpH1ejHysvEhKoXZSWTMapn8Yq1n1bdA63a5CpC6cVXNcqT8GPH0S16d1vReC0cFaBLyyLJ2NghHQc3kXRmQGolIAF9ITYOTMjlulw+I1m00PZBg+dUew1qAwHA4tz3O7vr42s8N7RgAwgrY+scfgity73a73mfeD8LsGljw/5GivVqsK69RoHBfqEpBgrVhAjeECzARQ/mtAU90TDGBLK/nuD+1TxlcBWJkZBhvKJi2XS1+QrYOHLMscEMgRR1/L5bIy0KceZf8Wi0VFn/1+39m/Xq/n0/sRMOm3+sput7PFYmG73c4uLy+tKAq7urqy6XTq98RWdQtg6kH/yrzyXYGTvscBIjInqOvghHe70FbtSwzGAIaeRztUd9hnHFgQyKmDNsf3i9Bv/FTBCX/M89zf9aPxJg42eUcMcijLA4MfU0aIQ8SBuL6BuokJDFQYdGHDdUClMSXGKuQOa89xHSBEecd6+K6gr/5Vd34qTysJIxNGPhUjC2f+j+utEkYmjKQdqrunYmT+YAPNVrPyAJow8veLkS+SUqjMp3acYwyCNJhpoxRUNHAom0ddKEtBhQCEcWu9WZZ5kKZobnfcKQjl81mfkjHe5XLpT/2wbDABGuRxbNqtv8HmBwasyQAAIABJREFU5PnhvVTr9dodFSBQppHvGsg6nY71+327u7uzxWLhTo1h6n1hqABbDE+NGD3xH70WReFgpEEIWTNVruBUFIUHNILner2unGNmtlwuKwNndKq7USmzSioi7dBBA7pG95o/r4wEx3e7nYMIC2811YXgic0CPgxASNMheAAeMHir1cq63a51Oh1rtVrW7XZtsVjYYDDwdnMtwUqBW4OSWRXE7+/v7fz83H71q1/Zv/7rv9r79+8dXFerlU2nU5eF6gZ5Ktir/DSQRhaKulqtlgdodK0PPLqrUgyGajsKNBrky7J05hofVbvFLyMbiF9rHFBbQR7qX5QY3DVo01bdeUxtOOpJfVZ9jQEMII+9qt8hI+KfxhCNd3EwqLrTmQIdkMTBI21XsKP9+J6CcnrY+riSMDJh5HMwEutA+wkjE0Z+KkZqnbQ1YeTvHyNfZIZLDTDLju+80CdmDEufirUD8QlXpw8xUoo+Javh81mDP4pWBgAhI0xVHO3vdDqudD1Hc+G5J8bHPbIsq+yqhPKQQcwdV2UpKGLAODJ16xN+s9m0fr/vzsH5WXZg36gH+SkDR5uVJcQIqYMgsVqtfIGzmXkAYYGsMj7UQ/vqBhNmVsmthxnTz5FRjfaDfcECapCEYYrMLABEWoTaEGAOy/bVV1/ZbDaz0WjkC4WRIfej77CApFCQAlKWpQ0GA+97URQ2Go1st9vZbDarAKUCJ/JTW6ad6Prt27fW7/ftl7/8pduK2gg58ciaAVi327XpdOrrD9DlaDSy7XZri8WiYkeAnQKP2jl6IDAqq4dN0B/N+UeW7GSl9qN+0u/3HUCwH7UnjTP6hz9wb/qDPFlsrfaEjyPD2PfIMMK+6iCF2EGMMjPrdrsOvJxLDEKmGthhXbEFZKIgovWrXNSe4kMp5yk7S1EGmdik91PfTeV5JWFkwsgnY+SDG5ZWJoxMGPkiGOljL0sY+bkx8rHyYrsUUmLn+E+jMTycUQusBU//BBV9us3zYx66PlFyX4wMISCwyqLUsrpzD3USGHBWNVpYEwIghgnrosFK2TnaBEMGoGgaBkGKYziBMgy0AxlxbWRbVJZmVgnoMDEALOfBrnEN8o16m8/n1u12PWArM4PhKlsJS0KfzMzZN/ShjBMBQ0GAgNjv971N1KdOgqOqDumvDjb0PAAfmXa7XRuPxzYej22xWFiWZR740d9wOLTFYlGxI+RQFIWt12t79eqV9Xo9++abb9yuaDvpKHxnRyaVt4IzrA/HsUGzAwv1n//5n/bLX/7SfvKTn9jbt2/9OAMovTdMNTIhP34+n7vfcRzAhaWkHuTOIEPZKbPqDkzKxEXWlGCtYIOt6e6VcWDFMWW0dLCELeBL9EuBmdQWBooaULEtQIU2qn8oq4nv4I8wjfiosp+qRx3A0l98iHPU97hGAVyvo90KCBGMdLACk89gV2VHH+vAJj1wfXpJGJkwku+1GNnpehtpW8LIhJGfgpFu9w/hO2Gk+XHkwfdPxcjHyout4dL/UfAc0wbB/PA0r4wAzo1SKARirscINRVCQSeyXjrAV0UiUBxNmTYMYbFYWFmWNh6PbTqdOlOCce73e893JXVOn7KVjcTZ1+u19Xo9Z340OPf7fRuNRv5d2blWq2Wz2eygQMmbx2F7vV4FJDTgaXBQPen6APLJ6Y+ZeeDFkaOTKbjAaKBD+k8wMTvuBEWAIEjSRlI5Go3jYmmcHNniPLQDfamtqD2p/VH4Tn41YIlt9no9m8/nHlzIU4eRw5Gpa7Va2VdffWVff/21fffdd/4+GH5nIKIpLfQbW0Ff9BVZA86bzcbZoGazab/97W/t8vLSgzspPMhM2V4GQDB1AEar1bLFYuGyRi/YEHLRaXTWJmhQVnviN7MjI6tpIMqOo0u1S2WClWGmXwpcyuRyjf6u9osvjUYjlz9AjV1pWowOSrFT2qhBnwGJDlyRHfbMMfwNP1Bd8x996yBRwVhtGbkp+04cpN8UfZEo1yrgRAaVc+hjKs8vCSMTRj4XI7GXhJEJIz8VI50oKy1h5GfGyMceuh594Mqy7H8zs38uy/LmsfNwBDquCqBR2nAz82lMhBCfNAlY1EUdKtzIUGnAxPhQuE4/UqiTKX8Ur31pNBqeJsB9er2e7XY7Z5T0KZg2afBXNgn5wOKUZWnD4dCNFSDC+XACBUO+K4gSwJUJQm7KLuq5yFenvEkX6HQ6tlwu/RhtRmaRIVXwVPZJAw0sDDInrUFBnQCjDo48kaXam5lVnJmgjz7UCQBbdQi1mdFo5O1otY67KbbbbZtOpxU2ErCO6QKbzcbu7u7s4uLCQV5Zlmaz6QtQsWNsHv0iJ2VvdFCFLQJkm83Grq6ubDAYVPpNQFG5KTu+3x92EaM9g8HAzMzZNnShAQm7BuSVNcdmGcwoG6UsloKrDkIiANQxewAUflTHruGT6iPqPwTuzWbjawewkxholdkiZnE96Uir1aoSq5AL59BXGHntF4vbNT7yR50qGwZIyl4rCKFXfFTZQrMjcKoeVD6xKLBrnE7lUBJGJox8aYzc7auvXkgYmTDyUzGyjihLGPl5MPKx8ugDV1mW/8+jVz8UGkYnECr/eTJXpkOns5X144lWAwHnq/AADDU2zUFlarjT6XiARnkKWLSBa2A4yJmFLcmywzSymdlwOKwEaGUZYVkwVhS63++t1+u5MxNYMJCzszObzWa+IxPGpPViKCzQVTnTR/KN0Qvy5LvKHKNS48bwYIHoP/duNBrucOqgOg2PE9DvLMtssVh4kKG/i8XC5vN5ZSGsphLgZAoe6F2ZPGSquldw16Cudofs0RP95EEQ9mIwGPhCXphNAivXrNdrZ7P2+719//33vkNVHOxgp9iNpv2UZem52gpCZlZZLI4cleVDHzoVrlP3DJR0gKEMK+sQkAFtiaCA/xA0ucdgMLD5fO4+q4NKZZcAMw10BH1ld3UgyOd+v++61AGTp+M82KzeDxvAZ3SggC5YpI2tKUBxb37H9tCLsn7IjtiELnSNjcqQtrbbbR+cKJuLz0VWVwFWByN6rC74Kzun/dNr8Tkd2Gl8TuVYEkYmjHxpjDxumnGQccLIhJGfipF542GGvZEnjPwDYuSLreEiQMHSaJCmE6c6iNBgt/RJlmCl4IQTwW5jiHHaVp9qCaqcQ1F2StkUrs3zvLLgl/qHw6GtVit/8kfgAAAsAQphin02m7kMCPL8jnOqrHa7nTMAtDsypDApGCaANhqNbDab+TSu9hXWjvYiH6bDcYiiKHzBsQ5wdQpfdQWrCfunMxDqhBpAYYvMDo7Z7XZtNpu53jBsdNnr9fwa7st/dUIKTgxbqiwIQQMWBz0CRvw+HA4dFDUQmh3Yqvv7+8q7Sdh5CbDBB0h1wDewr/1+b+v12nUTwRnWDN/QAKPn9Xq9CnvHfXkXCqCrdWG/XENApj1cw3fsRgeJyvBpAGSARoDs9XqVtRfKKqEXvisocF+NKzrwVH+g//iVDiR18NNut20+n1tZHhZtMyCgXmSqg1UGp+v12hqNho3HY7u7u/PBIXYOKwc7jY9p/5SB5T4UjXXYIsdiepnGUHSqa3j0WvUJdKay06Ky5Ht64Pr4kjAyYeRTMLIhqWEJIxNGvgRGuj1Z1QYSRr48Rj5WXnTTDASJ8vW4OrzuaIIxIghViB7nWjq02+1cQfwRCBVgzI4bNfAH28N1OLOmNOjTq+adA3babnVm2sRv5KITQPr9vrM/Zla5L8ZK/jlOiyPSF3Jku92uvxRxMBh4CgT1wjQxDY68kaUuStZcaWQHA9rr9Wy1WnlwQKcEBtpIXerk6kxmh2BDXRi6pjdosFEHwFk2m41Np9MK4CnTlWWZgxEyQ8bqVDAugDigyba0Wi9T+tvt1nWHrWEXRVHYYrGo9Au9EzjK8rCb0Ha79fsrM8o13E+ZKLY3pm30SwG3KAqbTCZmZr5jFsER+8J3Go3qzkLcS4MHDDa7SuGfMQ1IwY7fVWZq49gIMlRZIQ/Ai7aaVXdN4/4cj2shOA4zRzuQ02Kx8FQHBnAEaK7VmIJN3d3dubx6vd6P9EYa1Waz8RSr9XrtslV71T5wL/oMk4dMkCvt53xNvaDdtEdjqN5DjytzT2yIJcZeBbxUnl8SRiaM/DBGPryM+mGXwoSRCSM/FSNbzcNnZo45njDy5THysYeuF3kPl+Zd0mCz43S92fEN3So4TTOgLv3M+Rq8AX0MUoGLNAWupz5lxlC8thWD4zetE4CIQmW6lXti7ICG2dG48zz3haEYHE/maizcQ41cA6tZ9ame72ogGD4AyXahKnOz45bspCzgfN1u15mH5XLpdaO/3W7nfYBh4lqCTVEUlel45IT8zY5gx2faRz0K/DgRZbPZOHsKo0S/aY+yXVwL88EUMMDcbret3+9bs9m05XLpqY/7/eFlmTrdzb2xnf1+7+8T2W63/pu2q9Pp+MJi9KYPB+qoyNDsyLAQJLBTArWmNcAWk7YQ0zBgPBl8aIAlqOmaBPUJBoAarOgD9RMgkS3nqR4mk4nleW6z2cztjJx+M6u0i/7xnYEMQKNMv7LgyFvlijw11UAZMey01Wr57lO8sFPXkcBg6gJ1lSE+zICQ9wbhKxr/sFEd9FIn8Y1UC2U10XF88MGHYBGVbdffFWwUGDTmxWuIUfqAkMrTS8LIhJHPwcgyYJ1ZwsiEkZ+Gke4bjdxne1WuCSN/Pxj5YjNc+gSpHUTxCA+h8PRPIYjHJ1TtIMrAkPRpXp9OFVBQNsGDtiiLoKyhBmumWmFYlE2hr/oUHNsbn3YxAAJCnucVwIGpwHjW67WnORDcABzuT9qAsp8qJ7MDk8MOQ8hNZW1mFdaR9vf7fZvP564r2sp9NNggz3b78CLExWLhU+AAEvceDocVecDicAxdq2OpjeGE5H8TsMwOaQ13d3cVMINBoc2AJbnO2Ol6vbb9fu8LnwmCWZbZeDy2b775xrrdboVhRoaLxcImk0llkSwARO4zbNhyuXSwp8Am0z4YM4IZQZ7petYxsDMYAVhTT/ABZYrUB7EBTV/o9/tuawykYCHRh9YDcOpgBSaROpVlZ8F6q9Wq7Ex0c3PjekKm2As7kdG/0Wjkete+YI/Yi7Kv2EKz2fRti7vdrg+YAL7oXwqsLJ7Gb/VdMrqoWNO3suy4UJogrzFAGT0dhCrTrCDOoAHAjucgC/2vtqDxSGcfNOYCHnqNxtdUPq4kjEwY+RSMzPKjj+nDX8LIhJEfi5HuD1lesauEkb9fjHyRBy46oUBCJ1EQ382OT9E4rwa4yEphACoIZWR4WiXgYlwYc7PZ9G1lOYf7xKdpNQYVcAQ6gmhkVriOQKaMCwyEgpfZkUXTttAvBUyCeWQvYR2U3QOEcNZer+e7TLnixeFIUaDt1IPTZFnm+dYs+AS0lIkkR562q23AupAmQHCEIdGApFPkjUbDJpOJTadTT1Ug0BGop9OplWVpt7e3P8ozx8YovV7PXr165e0gWPKOE/qgdtBut52V++KLL7xO5EVeM+8WWS6XDh6bzcbOzs5sPp974EIP6JSAzz3RM4wvcl6tVjYcDq3VatlyubRms+myw5Zgl/A3wAd7ZK3DbDbzwRWDJA16sEf6/hlsDlslWCuTqAMrbJXPpIkgA9pSFIXNZjNnppSZL8vSt5HVtB78SO0LX6Tv2BNrMpQxZ1Cgvo4MFOhg0JWx1H7pMXRI0RkNHVwSFxjgMPjCZ3QQFXPRSZPqdDqVnbz0Phr4iREak4kVOgDWgSUDOv40NioYpfL0kjAyYeSTMVLew4UME0YmjPwUjNyF2Z6EkZ8PIx8rL7ppBg2nUzFYI8g8z63X61Wm8QEKdXwFh6hMfsfI9ToFIM7DqQhCmt/Kee12u5KeQJ6oAkdkBkmXoI88ZSu4qNLokyqW81TxOA6BRgENhoc24/BaJ2kHGCMBCflpbjLTv3xXAD07O6vk8+t/wAYZq1NpTq4yRuiBfHCuVfAyqy54xflvb28dfJj+ZwHuer225XJp0+nUBoOBdbtdu76+tv3+kDIyGAys3W7bZDLxdIiiKDzX3+yYc0zuOXKYzWb21Vdf2Zs3b2yz2di7d+887x4WaTgcOqDSZg1EvV7PmTbsFduBwWLKHZnA4iFv0gmU9QJwI/vKOdyv0+l4wKKdysrOZrNK0OQdKrBvkbHFLtkKeDwe2/v3792XlP1BttyPRcuwdeT9A2C8WLPdbttisfA1HQAgbBx9pV3q68rE4ZP4GrLX/jBAAayIDXEAir0om42+uUaDsv6m37U+7qvrUBQYIlunaTSco+1U0IwDcI232i6V1Sm2juOpfFxJGJkw8ikY2e4cY5BemzAyYeTHYmSrWX0JcsLIz4eRj5VPfuCikQhAAzg3V2ZIWWgcAcMgmCmDpwJFiNopNRazat6v2fFdH9wLB8GoED7GhzA1gOuUpgZTnYqlXgxN20d7MFxlHnq9njUaDVsul34+AQXGi8WYtF+ZtzzPnZnThcYEGK7TaxQMYGjIaScvd7fbVXKIR6NRZaEs8lQWj7YrSAJ0yJM0geFw6CyTgp8Cq5m5zZyfn9v19bUtl0tf9Jplmc1mMxsMBt6Xdrttt7e31u12bTKZ2H6/t9Fo5FPu2CGsLo6KrM3MQbwoCpvP53Z7e2v9ft+Wy6X1+/0KEGdZ5jpkCp3peO612Ww8mDJIgfXSIMRgxuz48kNdAzCbzWw+n9vr16/t7u7OmanBYOD58Z1Ox8F2u936FrYsTNf+sdibB2BYI2VvaBsskzJQAAb3++KLL5xR4r6NRsOm06n1+33r9/se7MvyMMsJqOT5IdV0Op3aZDKxdrtt5+fnDkDY8Xa79XfB6BvslcHWXHEd4AGg3BcdwFriN1mWOfOqi5bVlwB5ZeMYiGrsgoXjfhrc6Q+D6rIs3TaIo/ibDrjQI78Rn3RRPHER3+Se2uYYp5AzctRzI7Ck8vSSMDJh5LMwsnUY0GaWeVpcwsiEkZ+KkWZmjebxQTJh5O8fI190DZcqj4CsncMoeeongGiAoi6CCkEII2B6m0JHKQRcnnYRqhqYWXWXFb3v3d2d5y6jHH5TFk4Xs+p3nU6lb8o+KsvIdQpG+mSP4dAWNSRNs9Cnct1mVY0SGcBgwnDqeWowMEUwY69evfLAwBvcleWjP8iKhba0D3aJevmdNnCOTqGX5SFHnnSPr7/+2nc5ItASpEejkX377bd+72azaT/96U99IIHMdAq73+97PWzrSz5+p9OxTqdj7969s263a/f3986+6QAAffHwyM5ZBHjs3+zAYGXZgfWFFSUQEdwbjYYtFgu/DgaPYLxer+3s7Myur689vaPRaPguW8hLWWEdEJHTT447AY33qGDH+Fr0XV2sO5vNbDgc2tnZmWVZZufn5/btt99aq9WyL7/80mazmRVFYa9evbI8z309QFked/8i2F5cXDhTuNlsbDabeToMgfjVq1fOcpK3T9GUVR3EmFlFRjCYeZ77Am0AXAsxhKCPnlSOGmNoV2Tq8T0N5HoPYiSDONYHMDiD3dN6GQw3m023MYAmDo6JyXzX2BNjpz4UUDQuKDOZyvNLwsiEkU/CSAa3mSWMTBj5IhiJTzfyho1Go4SRnxEjHysv8sClN+eGOtWogkA4sB84kE75mx3ZCwIuggJcNHczdhphau4v1+EUCiooJz4NYyRxoEFQItiuVqtKUMBwqIOigEJ+qdavhqksJ/1Fnhi5tovfCAD0RQFVHQf2AVYPkGKb3bIs3WEB//Pzc5tOp84UUC+fkS1yxeC1XrUTBUCVy3q9doas2WzaaDQyM7Of//znVhSFfffdd3Z2duZ963Q6zvhkWWaXl5dWlsfUMuwDGSjrSmDo9/t2dXVl33zzjXU6HRsOh/bnf/7n1mw27erqysF1t9vZeDy25XJZm+4Da4Zdj0YjT+XA1gBN6ms0Gg6i6IJBF+woW96yQxDpH3me2/X1tW/Xi/4BTeSK7Nkaud/v28XFhb19+9ZznpfLpdsH7Bu6Jqiu12ubzWZ2dnbmbeGdMpvNxgaDgf3ud7+z8/NzazabNp1O7eLiwnVAv5QtZ6E0dcGmkU6hjCZpHMiQOKOpSTrQwZZ1gEg+OH3UXdPMzNumrDssogZpZa65DhujnxofOa5+Tb/4XePTfr+vvBSSe8bCuRrsub8OEBXgsFn6yTnKSlM3dvoh9i6V0yVhZMLIp2NklVVPGJkw8lMxUmVHfxNG/v4x8kW2ha9rZGTiCJDK1PFkzvsgGDSZHXdVQniwMPokSTDXYGx2ZNR0UK9CI+grQKEQnsrn87mzDZqfDbvAAlGAJDIAtA1gMjsyUvyGbOgjv3NP+kM+L8ZFoMS4YQeQM3KnHciPaXP6oQAU2UXdPrbX69nV1ZU7OTsIoXcFVmUXabO2lynlqAPWc8EmwhDCUAHQ7Xbbg8h4PLbdbmeTycRTIshXVyYD/YzHY2s2m3Z9fV1JjyDokOIwGo38t/F4bGVZ+pQ/djgcDm2/Py6wxQ7IbwY8VquVjcdju76+9kCCnna7nQcL0lGQrcqLY+12287OzqzRaNhwOLTJZOILabH5LMv83jDWBFDaRi79aDSyq6sr2+12njIDYKBb6lHWE712Oh27v7+3brdr/X7f3r5964GXvPXz83O3OXQD6LOombQEdKKDDOya9QZFUdj5+bm3EZuDyeYlptQDsAO2q9WqsrsVg0f6qSkN3FdjCqkSCga6IDvPc5tMJpUHQHxdGXMAlRgXAZEdr7DPLKu+eFMHCNSPr2nfaaOCCn02qzLuGs8pCjh8TuV5JWFkwsjnYmS0nYSRCSM/BSOxqUbzuOYsYeTnwcjHyovMcNEZNTgaxneEq0+gvEAPxSjYoGCMT5+aIyOnBqhMlzJfBEIN5vqdJ3EErNufcl673fYdWprNpucIDwYD76tZNSVCmR1VGEY7GAwqObg4qhqE5vVrXTGPVZkEjB8QQW6R1aCNgJ4GE4yRQMY7EwASZVcIGKQLKOuKXSA3ghrBl/sTBMk3z7JDjjjOSxCmncPh0K6vrx2M9vu9b6mqQVmBlIDSbB7eJzIajez8/Nzu7+/tiy++sMvLS7dNrp9MJs4aTadTWy6XdnFx4SkV2Ba2MRgMbLM5vshxu916XjupENgIL3nUrXuVeQRABoOBM2QscKYecsyz7JCyAIOHzxHsdWB1cXFhi8XCptOpffnll1YUhQ2HQ38xqA6CYNzJs8d/eHcPfeTdMrB1MKMxxQlfR2aA7mKx8PoBhU6nY7PZzBqN40sT0R8+wKyArivAxwD+drtt8/nc13owSFqv194vZRd1DUGjcVgUju8ysMzz3Ad3AK4GXvTFwFbBA/9CV8Q45M0AkvbqoJEYhT9rqpbGHmIS8iGu6gNAjE1xEBZBhjam8rySMDJh5JMx8uHFx5kdt5FPGJkw8lMwEozo9/s2Go0SRj6Uz4GRj5VPfuDi5oCGPuFpZ5V9UYaKRawYOdfo0y+dUIZOF7/xNIvxE/ipi+MEFT5zvQrMzLyNOLc+ILD48fr6unIuDkD9ZVm6o+BQtItzzY5sFWCAI9NnzUHnN2SlhgDA8hATWUrOXy6XFdYKGWE8HKeN6/XamUy2c4WB4t5cp9O62jYWgmLQgA2BiyDBlrB8pw86+GBR7mQysSzLfMeju7s72+/3Np/P7Sc/+YkHJXKuAT2cnAHG2dlZhaFloSmAdXd35zLa7Xa+qFQHLgR09YNXr155Lj+LvRl0sCUu/oM+8jz3rYrpOy9i5D0l3377resUJhMW5+Liwtrtti/cZj2CLk7udrs2m80cSJbLpQ0GA88tz7LM39OC/eFD2Lqyq8jl+++/91gA88RWwJeXl64DZdApsIa3t7e2Xq9tOBxar9ersPqNRsPG47Gz/fjqYDBweaInwFsHiv1+36bTqbedrYIvLy899YW4hL/AtOHHxB/6ooyY2XH9AaBC3GEQpLEHGcByYwcK4sQc9U+NDTHgE4+IE5HV5TP3pGj8U7ZOUyTibEoqTy8JIxNGPgcjd/vD8U2zbf/3//5/VgZ4MQUqs8z2pa55a1hZ7m2321ueH/S6+h+sm66+IFpnRNWu8jy3zc82ZpbZfr+rDH4552BzDcvzTOKGmVlp2y3x77g+qtyXtt0dX4590I9ZWR7TPc2oexd8JLey3Hv9+31peX7UGb838oaVZrbfV/ulM4kHLGn6fQ/3Ky3LDrIzO64jImVz+T+Wjndm9tAXHhwys9Isy3WH0cxoflnqOr7cNpvCZaAy5zMxIstyMysrthOJBrVF9K22jlz2+9KKweShQcfdQxNGfh6M1P7G8iIphToNrIxQZJyUjTI7TskTsJXBIs8XBaJgAiGDN2V9uBeCbDQa/qSPEmKw0SdrVQoGAIOk+cRv3761VqvlO/hwPAYzdSYGz9qGwWBg6/Xa1uu1sztm5oZNSoYGC7PjCzF54GD3HGUlaYOy/mbm8kGex6B1ZAGYgqbu+Xxuu93OnWiz2dibN2/s/v7erq+vf8SQwbTRV7aYLcvDgmuYiclkUnnh4mZz2Omp2+3afD6vLMhGtzyQdbtdm06n9vXXXzvAIRd2TIKNYnEr98DZLi4u/N0zDDrIQ9/v966v6XRaYZGRBYMA7JMHMZjNPM+9H7A4bPXc7/d9gTHvu2AqX6fh2R0JxpUUC+736tUrtx+YVwYqPIgxMNvtdnZ1dWWTycT+67/+y0ajkf30pz+tsGEsGMe2o+3t93tPbQGcaZsOAGHLsHuYWmxJWXHSOYbDoX8m/9zswPBNJhOvp9vtVkAL5piHeR4SkXO/36+wcchrs9nYt99+a1988YX7XZ7nvsMTgV19C6ZSg7EO+BQ46Z/GStqMTqkfEgCbZ/DINfqQCjhwLjYIa4nfcb0OwgE5jikrB1AokBMjtB2pPK8kjEwY+RyMnN/fH9qR5fb2/OefboDDj7gLP2MlAAAgAElEQVSm9+m3/W9Tzv7QDXi5kjDy82PkY+VF6EoEQkc12MF8KWPnU+cPgycGngzcUAwDOJ1K5BxlWjCg1WpVUSyDPIyB/zowBOB0BoY0BMARlpEdXJiSNzM/V9MP6INOX+rTOlO+5M3u93tn9mkfebUMoOkLBkN7maIHLMhnNjPfiUcBJc9zlxd/9/f3ngpBHeRVb7db32WH/GLSGXRgDuuy2+1cPoDWfD73Rb9/8id/4jMfWZb5GoAsyxywAS3kDkAp27FYLGy9XttoNLKyLB2wYFizLLObmxtrt9s2Ho8dFIuicNvAeZELKQecn2WH2R7sigdBbI/ZJWychw5YHOxiu936g5PZ8aWJahNM2TPLpouWi6Kwq6srtyt2T7q/v7dWq2VfffWV3d3dVRaP0n6m7Klrv997/9SXsIHxeGzT6dTy/LgbEA+p2Af9YMDIA/BisXB9XF5eWq/Xs/l8boPBwF9qiU2gQ3xQdUJqRL/f95k2mD3iAEz4YrFwRpxr0elkMnE/Wa1WnvJAmgTtx6eZ3US3zOrRTmIbumbAh9z1HS5m5nYR0/Doi85MMNgibmIngIqmm1FHZOj1uLJ0Git10A/JACuojCt11zGGqTy/JIxMGPlUjPzZT39q/8f/+38dtk8fjayR55ZlueWN3FbLlXU6bbed5XL5kBrYsSzPrN/r2f391Pb7nXW7PVuvV3Z+fvGgk8MszWq1tn6/9zAg3TqBRcbJYZBaPmQVTGy7O+g2z9ip9BD31uv1wz379u7dD9ZstqzbPbz4ebff26A/sG63Y9vtzprNhhWbzUGnrbb3f7s9HFsul7Zare3VqwsrzWxTHMjM+Xx2iGXbrVlp1uv3bL/be1sPNnWwx31Z2m67tWazZfv9znq9vm22G7s4v7Bms2k3tzfWyBvWajUfiMvDYLzT7dhquZK0wMJ+9rOf2fXNjWWW2Xa7sSzPbbvZ2t3drZ2NxzabzSzPHh6UrbTtZutkYZaBkVkFI6+u3ttisXzAyL29enVpd/d3Np/PbTgc2nw2t9VqaZvt1tqtlrXbHVsuF2H2K/e43el2rN/r2939nS3mC+sP+mblYYbvQG7sH94tNvfUz1F2fNl0wsjPg5GPlU9+4OKpkc4z6KXjCF+fFnlqpTMITQdUu93OLi8vrdE45IayYJGtRKkPICN4I3Rd8KpgpzndOivCQBnD6na7dnd3Z7vdzrcaZXvJ4XDoAdvMfDYDxatBsGvO+/fvfTDNVqk8KACQ2+3WA7jZEYCRE4bKQwVOwoATufT7/cruMsPh0C4uLuz6+tpubm5cVrSTN7NjqDjBbDazsix9Ia3Z8X0STD0TpGBI5vO5LRYLl5uZeYogW4+ioyzLfEZlPB77gl76Rwoezs/7RAi2rVbLAwQDb3bwIW8aBodBAc5Pf29ubszMfJ1Ap9Oxm5sbazQO+e95nvuD1WAwOATGh1krHgyw/cvLS9vtdj4QIB+a3HLYUH3A0Rki2COCE4WdsFarlfeFh6PlcmmvX7/2mS+z4+5XzN4xgAPof/Ob31iz2fT3rqCXs7Mz7wvMDay0pgLRJlIJNpuNb+GrgzACFD6V53llG2BshwGTzm7yrpHBYOCywo7I84dp06C83W59O2C2yKUN7XbbZzs5Tp+Wy6W3kf5in8hO04iwPw26+iDJjAGDW61LZ/cgABiAFkXhM8AM2GDYOaYpHbQV5hC9Af5m5jn++Bzn0wZl/OIsRoxDLH5O5eklYWTCyOdi5Hkxs/lqbuOOOd6Nx2NrtBq2Wtx4//aNvS3yjfVah3hfXN3Ym27Xil1htlzZsCytcbO2sZntlwcC8rzVsmJ6GORv9hsrH9IhV/uVbfdba7QPM42tvGWd4t7evXtnLTPHsSzLbPb2vXUfMLK33Vtma9uu5nY5vLRsciDaNqtbG7VGDy9y3tpmv7X+F2cP8fOgk/lmZdNiapOG2bZT2ig7rEdbF2tb3i2t/WA3vQf928ysledWFoX1mseNFXhQv7+/9wf0xvrevhiPrbz73l6/fm29xtaKYmHZNnNcXK/X1lg2LH+Is43dzia9nl3/6//nGLlarXw8MWjuzObX1lgd0k6b+4XPbDYf1lOZmbUeZnybWdOKVWHNommv871dF/fWnR/Izv7MbHlzZbvl0rLFjV12u7Yu17barMw2Zu1t2zoPZOePMPLu2rZXWxv/yZ9Ya7ew726+s+aq5y+75iFpMBjYMMtsv1paa9eyVrtt27JMGPkZMfIxUvJFHrhYuIcwEbzZkcGho/yWZZlNp1Nfh8JCSWXk5/O5r9VBgff395U0AJ3Sg8XSVAWCuj7lwtLRLn3Dtz5ZY2Q6vcp5BHIGegzMsyzz4zBXBHUGjew8tVwu7erqygfq9JOpUxaJan59p3NgkLSdrDm6uLiobCKAUU+nUxuNRnZ5eVl5CidXl/VNu93OZrPZgb2RKVJYS1L/MDrYDwbSOJumpcEisIB1s9lUFnjCtKxWKzs7O3NwwqZYxNvpdHyND2DVbDbt5z//eWVXncFgYNfX174odzKZHALlYGDj8djm87nvWmRmntbIbNFoNPIAwAMSqS66FS+yL8vD4tr37997/7///nsPVqT9xN1+OI48CSxm5vJutVr25s0bWy6Xdnd359P6u93OU20ajYbN53Pr9XqerqnMOQ+XvV7PhsOh/fDDD5VFvNgefoWdEoCVwdHgiy0SA169euUDlOVy6e8XwadJb1ImyqyaQsCMI+u3vvvuO99+mGBMXfjvYDCw2WzmtswDOrtoETh1RpFreTjH1zmP2UhSuNjxi/N0VgGbw050MMx3nXkkDmL3rPHTmQ0GggwqGTjTdmXnkKHGPM21xx+Jx/SdazSFlbbD9OliZcpTWLxUqiVhZMLIhJEJIxNG/nFg5GPlkx+4mLHgqQ4n16c8gjTHuIYdcBCUmXlgw3l1V56404hO+TFl2mgcNjoAXAhMsIT6hI0RDAYDOzs7s81mY4vForJGBmYNxhG2CUURHOgTyl4ul842wtTt93sbDod2c3NjFxcXHsD1BYqz2cza7banTiCPPM+dQcQYCObMUvA0v91u/Q3k9Afm8auvvrLpdGrz+dxT+AaDgW/wYHZg6H744Qdrt9suI1jQxWLhMz7oWvUNMNM3Zj8AqPF47OmEGD0MMO+4wFHW67UHezZQYLBBwCCgbbdbu729td1uZ19//XUF2KgLXeu7ONrtto1GI2u32/bu3Tvb7/f2+vVrD67MaBFQmCUClIuisMViYdvt1t9FUpalp9b98MMPlXRB2k16Sp7ntlwuffMPmGre2/Hq1Svf9GK9Xtt0OrXhcGjD4dB1cHt7a5eXlzYej51xRo7Y/WQy8W1eAcrtdmvT6dTKsqwMYNhYAx3iZwzyNHWPwHN9fe222Ov17He/+50DM35IIOM+sN6wYJpPjS/p7lsMHM2OazjYAhhbI9WBwQygeHt76+wy/WeQwuCMQSpsM/cirgA82Cf3wueYgVTA1JQH/Av2nlhIypEOThVkdYCFz3MO8RAfZNCnzB56BCzQBTLVGQf8Bn8H9PQ+qTyvJIxMGJkwMmFkwsg/DozknnXlRTbN0MBGUNPFrxhIbIgGSaYXy7L0dAR2mFP2g86jCAJ+u932p3qO4ZT65IyzMyXJdphMletuS3me2/n5uTtRo3F4L8Pbt289X/zVq1d2dXVVUQT18F9T6JrNpn355ZeVdTIKfOSkAwhmx4W97CIDY8cUM44FIGdZZtfX125ITDNfXV3Z+fl55f0FbFrBVC1OTdoGwUOn7pvN47tKYCbYtAEWBkYKBnM4HNr9/b1Pz+sb20nBwFZUZo3G4W318/khD/ns7MzvDbjf3NzYcrm0V69eef1m5jsXwox89913NhwOK9vHsuGG2QEIz8/PHfzu7u783SUELu6H7YxGI5dlo9GogA66geEh9xl2ldxg7A3bJ8hST7fb9XQ/Ahdte//+vQP8119/bWZWWStFkND3p9zd3dnt7a0zbtgMqZS6xoPBDumdBCsYZdit29tbm0wm1ul07O7uztN05vO5Dwqm06k1m4fNMbAp1n50Oh3fiXEwGNgPP/zgMuD9LpPJxNMd8C/WdbA2Rdlz4gn+ix/BMBJj2DyFQR11MwjEDkllAEiVpadNsKgw9cQe9EycRD8cB5wAGV1rg3/zO9dqeho2pMAKm3lcW1CNv5piEe0Q3ersBPaXyvNKwsiEkQkjE0YmjPzjwMjHyic/cNEZfdIn2CNobQwBlanwLMsqa2x4UR95uPp0T7DkqRtWjjYwZchTMouBUQpToCgXsAHs2ApTc9gJ8mxkkGWZffHFF/5uC9INGo2GByhdWKzpJGVZ2vv37+2rr76yV69eWZZl9v79e2cDJ5OJB1ReJkhbYd8wlOvrazs/P7cvv/zSzMyZJ/qhC7EJZrB1+/3eAyR1mpkHVl1vA1vF2qfhcOh9QY+NRsNz0VnfBKMKqNMWmIOyLH2DBgIaoDefz/16wJN0EGzBzOybb76xP/3TP/VNGRh0vH371n7xi1/Y+/fvnd0hkHLvm5sbm0wmnkKx3R7XaJEWQX7zzc2Np8sAvmbm6SgXFxfONDUaDXv79q31+327vb2129vbii02Gg2/jjSh/X7vgcjM7P7+3uW73W5tNpvZaDSyd+/e+QJn1jlh6+jx8vLS1wjQb+7farVcP2V5eFHlYDDwtIHValVZkwZQADCwSEyzn5+f22Kx8MHCV199Ze/evbPb21t/6aWCIsCYZYfNPAi8eZ47A4qO8KGyLO38/NzX6lEnvgZYKFDDgusAju2aGbwSbFutll1fX7tda7oDA0b6jVxgdRnkjMdjH0gxuCINjHQNwJf4hU8zsNBNCLSNuu4DENHADuNJm7XPgCMgxL2oRxlK6uBc1iiScgOYpRmu55eEkQkjE0YmjEwY+ceBkY+VF0nIh/EhUGEMBA+YFtIems1mZWMAjAa2A2AyO+5MUml0fszNRICca2YuiAhEbIKgwUun6nF2lDUajX6UpkAghs2AeTA7BoHRaGTj8djZHViJ4XBo3W7Xvv/+e39q1sDPFC/b3yrzQNBeLBaeX10UhU2nU1ssFh74JpOJvXnzxqdO7+7u3MgBP/5/8cUXzriwK5PKkDe1M206Go0qL/7r9Xo2Ho99Uwsz87YjR9oKe0T6Bgu7W62W5zLDSO52O1ssFh5gAGMAGscbDAYedLRdrGP48ssv3dFhGJE1LzUkjQPnL4rCc7oXi4XvDkj6BFPMBMT1em3fffedB4zIgpCOgYzPzs78XAZE+AepK6RW0FbYJNjGPD++qLDVavmGFaxDgF3iPVuALLbAoImBFi/iHA6Hvvsj7YMNAmh1LcPV1ZX7IWk4bGyC78NEYht5ntvd3Z2vB7i4uPDBCJuA6CCPgWWWZXZ7e2uj0cjMzK/B9wj6+AzH2ISD2QP6NplM/NUBulYAWROMiTc6OwCYjsdje/PmjTN0yIfBKT6tAZsATkAnHSH6H+DG2gRsQ4GE2KcsIHFFB3qkzHAN//lMncraKbgAPMoupvK8kjAyYWTCyISRCSP/+2PkY+VF6Mr9fl/ZwYRjmibB1C3TikyTYmCkReDMpCZQL84JUwdAAGQIMc8PC3NZoApDggMRlGgjAczsmIeK0tnRyMwcEBaLhTMV3333nQMkOacwjDqlz/QsaQHIBaPWNALkQyoAyoZ9BJD4g/nAaM7Ozuzdu3ceAF+/fu3MA7LCeDVnmCBDOglgzwJr2FQWM9/f33twp25NN8IACVDsFEXQP2yTemUXFxduF2y5HtMvCGikuwCc5D6PRiNrNpu+hmG5XFam70k1gYHDjugnwZLc4kaj4awiThwZSMBEbfnNmzeeyw6wYN+73c5f3Gtmns6iudCwJGZWGSwBFMj/+vragxnXwsCx66L6oZlVdh6CJYPZxdbb7XZlZ0AGSwRpmGFe8kmud7vdttvbW7u/v/e0AuyR/psdAQDbnE6nnhcPuwXzxgYDgBb31WCJTtCZMsoEWFKYYPhg92BrYfEAFII816BzBqj4yXw+dznA0GOn2h4GPrB4mppHHGCwBcNGrFMA2Gw2Pjhmh0ZNlcD32Ipf+42NIGdtI/WorRFvdGExKVUppfDjSsLIhJEJIxNGJoz874+Rj5UXWcOlU5tMZTLtj/IRNAoFDPgddkkNud1u2/39vQdNAqqZeQ4zSoaNaDabdn5+bt9++60LYDKZVKbpCeQIWBVMkGEbWKaNefJn2vXy8tJGo5HvIgQrRhDRtANYQdIuAAqC93g8doWxowwBXJnl1WrlQQCGJM+PC0qLorD37997ELy8vHR28P7+3kGXgA0o6xM9xtxqtVzuyG23O+T3Xl9f+7QxDkEfcZ48zytTzJeXl754li15zY4gfX9/70CNE+k6A9qw2Wzs5ubG2VHYEuyF9zaxqJVBCVO/ZuaLdFm7AOPEwEDXRHAtIEsKDMEPO+Y9U69fv7a3b99almU2mUzst7/9rY3HY9tsNm7XyJSgS6BAbzoo4x6bzcbTaWA9Ybvxiel0aqvVys7Pz32g1mgct0JG57DSk8nEg7UOqOgjQIkNT6dT97Esy/xFxTDzjUbD1zWQ8nNzc+O2ExezKzuYZZkvhmcr3pgWcXl56XahgdnMPChijxp38C30p0DLehBsmDpg83RwyUCRGAaLBxNGWhL3pk4Nyjpjwf3R6WAw8BhBG2CUiZ2ApbLMpMyovyFzAJJ+YCvRRwFeXSeksieVBBmk8vSSMDJhZMLIhJEJI/84MPKxWa4XmeGCgUA46nAEBDqA8NTACJYoAeYLoSI0s+PWtcrYcS7sFmwZaQYsRsRwUShOTduXy6ULGYWU5SFHnPxXXsY6n8/tz/7sz3xxqLJ3WZZ5v1BCnufOTlGvBkFYOcCKQG9mdn5+7qxRURT+LgZlz3AQnB/w4+melzJ2Oh2fRgcEYY1wPt6pQvups9U6vOyRnWZYRAmrQz46aRoKhjgXAXQ8HjsARzkNBgMHX+on1YHceAwfO4JhG4/HdnV15b+RrpFlmQdfZUfoG7bR6/Xs3bt3PnAhUPD+GV7ua2ZuczBj//N//k9PB2LRsTrxcDj09pGCQoDDofEP5K4+AvPGi4thCbHfsix9G2mCKHK7u7vztAqA6Ouvv7b1eu1sL77H9tPb7dbZc4LO/8/emy3JkR3X2isi56ycxxpQQIMtyiTe6FZ6OMn6mKiXOI+jm99MN5JJZuxDgmyMNeQ8z9N/Efy8PMEWRLChm0aEGQxAVWYMe7v78lh7uW/m5XA4WCBjPvF3xqBQKGg+n9vnYShJ0nxiyP1zHfw0lUqpXC5b0Oc+kZMgwyFBkmSa/9PpZGzq6XQ6A0dJZju73c7azAI2Phn2UhgAhmdCfpFOpy0G+DkBwEmWiYOcDzaQGEnM8zHTA48/D1IRWEzs2cdSVgOwK5JqD8bEGn9/3APJCFp3bDU+Pu+IMTLGyBgjY4yMMfLnj5Gshv3Y8UX24fIbw/ng4ZfPk8mk6T8Jzhg2rBCBxmuRKVD0A+iXvAkax2PUDtVLLDCi4/Foes/pdGr3g4EzcQyevx5aaoxwPp9bsSfsFsV7LP0iNfAsGcBFkKZ9KRpUJCEEST7PZ/2bP8XC/CGY0rEJA5JkARinzmaztmzvAxmOk81mlc/nNRwODQCYA3TTXAPQ5v+JRML2wUCiArMFG8KYJJNPbU9hkVgypoATFpN9QmC2ABlJJhPgvATQMAxNF57NZv+4k320Fwm2Anh9vFmypDO2hGdkfpDYrNdru6/r62s9Pj5qvV6rXC6bnIQNlZEJUAwsRcXTxWLRgjzBz88L4JfNZjWdTi2Y4UMcMMLUY8AYMmbb7dZsNZ2O9sb44YcfDDjwP+Q3JIgwcwRAlutJvAAt6jV2u52azaYuLi6MFaR98263s4CHHeGL+/3e2FSeq1gsmiTBs1YwqYwZvoHvkfgATB6ckdtMJpOzQM05ZrOZpCeNNnOBjRBQKXj2SS0rCCQnjBvsIffiE0V8GFDy0hFszidN/vs8H2Po/ZJz+5oJH599ss5z8m/P0Hk5BiAbH593xBgZYyT/jzEyxsgYI3/eGPmp44tICk+nk+1N4H/G3/6heSv3gcUDAwbnHwpg4W2Uif/49/l83jb8gwE6Ho9WMMvEco9+4nFaWBda0gKWOC+AOJvN9PDwoKurKwNAgJFiShwAI/VLo4lE1MqVJWdJxu4R9Anu/B9W6OLiQp1O58z4NpuNarXaWZtXAh/jgD6f702nU3Mu7smzZbTsxOna7bYxFDgW8wQYAkJop70uHdYKx8rlctbCFoaEYIexe7ZkOp1ae1L02Ywjwc8nEgRrxhiWDcaWAA2zAxMnyeaQgFAsFq3dqyQrWkX3nEgkbD+Wer2u0WikzWZjAYZEZT6fW/DxDJ5nEFkKB7xItnBs7MUztel02toZkwDgA9Qt4GfINhaLhe1NMhwOjTWXnvaTQEYCKOOnzBXJz2azMc02BemMMfaDBIhx8JsvMu8EVsAfsEWq4QMlrC8AijyA8T2dnpb8+TmJEd9PJpO2v9DpdLI59SsNrASQGBKkkfqQTEqy1QtWDnzM8nPm2Wn8kbknbsDWMyc8n5fWeBDAZhhvxgEA8dIIXp6wPX8Nzskz8dycMz4+74gxMsbIGCNjjIwx8uvAyE8dP5muxGj8kq0fcNiwj9kmSWcPxORhFIAMb+Ocg+/BJvA7H/QoluW8DILXufN9z4b4t9xEImEadc8qsuRIIIOZwVB84Oc6gB3sYq1WM60xBoAB9no9SU+tgb3khKCMc/A52BmWtwmYjCdsHsC22+3OCgVhSZFrnE5Pu85vNhsDgWw2q16vdzaPOEaxWDSGw++gzlzgPKlU1KlGko0/oM3P0E/jNMfjUbPZzDTsjPN+vzdWEskHzuGDDKBMoOV6u93OOgt5LXwQBGeMIUwN5y4UCtZKV4qcczweW1KFLXuWmSV8xgJ7Aui9Q0vnm5sGQWDsM8kM54KpSqfTarVaBur+c3RVgg3dbDZaLpcqlUrGnPrNHUkSYOXoUsRYetYcOQMSotlsZjbAfZEIAlZe/8wGlVwDG8QmYf4ovF0sFjYmjCO2iA9weN+SZPMFQ4o0BHvHFr0sYLvdmq/AXu92O9s8k3MCmh/bLayen2vsg+cNgqc23cRRL0/xjBtxwCfU/I0/eoDgD4w1Y+YTfv8iBWiQXHMPnNePb3z8eUeMkTFGxhgZY2SMkV8HRn6KlPxim6owqDgqE+wfKAyf+trzEPzs42I3HprP8PbNeQhE0rmUArYLA2cQptOpMV8UG7LEj7aZgMR3uFcKitFbAwywAb6ugUll0pFIwKAcj0fTPUvn7SUlWQDc7/dmvNwnUgH2lvAAylI47ITXv3qmAJmAl4rAeDEHBFS/x0ipVNLj46PG47GxJjgBrCXj4cERB4XdxZEIRP7zPhGguw/PTptUAjmFwbAe3W5X5XJZyWTUzYbn9xp9NvKr1Wo2XjwL7CUJA3tapNNpsynmCVaOACDJwNPLZggcXuJBwSuHl4ggE6EGAABfr9cmP6EQHt21l1lIsj1SkKewcSHs436/V6vVMtkBAIDfjsdjY1yDIDC2DfaN5MYz7DCvyWRU+H04RAXeAC5Mq7c3/JvkhaSG59nv9xb8kKVwTYqQSYy4N/yAtsu0V/YsP3YH24XEAvD29wgIkmh4iRUJGdIcbAfGktoNn1D6ecY2qKcBHHxXL8+WcQ3iH/HJM7ych6SJ7yOjARSw5Y9jtvQkXfmxlRfGJj7+siPGSNm9xBgZY2SMkTFG/hwx0seqj4+f/MIVhqEFTQaNm+aBPCgAAARTfs7PeKsmCPEQDJ5nTBhMbyj+bRjWwu/ZgVFjlBgXhYVe8kHhKw4OG+AHnkkhEMIqYfCS7No8H+wKzuALY+ng4jXv3iBhKXyXHhxzuVzq8vLS9lTAaXECKQqoBHOW95PJpOl1MVien7FPJBLq9XpmlBgwDCoJgGfIYEEI7q1WS6PRSOv12tio/X5/dh3GCnb1cIh069PpVLlczphKxpFx8nPJdz4GRHTq3Bc6Z5wQ1m+329k9E5T528tdPMMqPXXAYVy87eIj/E0xJ7bGGHMNnodkhY0HPeNNEORZCM71el3T6dSYb6/fR0LB/PhgyrN5f95utyZpgSWFueMz/lnm87kFeLpseRaZcYG9IjgRuACmQqFgdQ0AEgwbSaqfR+4lmUxae2NAkvOS6PlElGflufg89wwAADbj8dieget5OQTjSmzimswFdoMUyLN2xFLm2GvNsS1sknHjPolHxCTPsFIQ7Fcn8C/P2vnf8efHXgri4/OOGCNjjIwxMsZIjhgjf94Y+anji9RwYZAMHjf0Mcj4G/TLw/5z9MeHUcG42MzOD9Zu99S+0t8D7EyhUDAGgr8pZIQFIQAhr4BZ8/fEvVLMyQQkEgnr8ON/xj3550Nbyhs6Y4Rml6JdQBDHw1D5PhsZeuOhZSvBNZfLWatRDAY20H8OZiadTqvb7SoMQ2srOp1Oz5bsuR4MBU7tl4cxyiAIzrT8y+VSlUrFgofXNjPmBCsA1rMzUtRBii4/6/VatVpNo9HIAis6Z0lWDAvLRULjGRbmDBvyy960XoWFSSQSpm9HykEhLSCNvfDZRCKh+/t7NZtNhWFoRb/eLiQZe8XYLRYL6wbkgZx7WSwWKpVKkqIAc3l5eTZXtE7NZDK2wzsBl8CFL1WrVdVqNQMAScYeMSaAbz6ft7bJ2exT1y4va5Kk0Wik6+tr1ev1MzsiWAHgJCEUV3uAgNF6fHzUbhdtJurBAlkEMiX8gzhDYun3lIGVxo8Ads4Jc8g9wLDB0MEG+3jA+bbbqLsWSSTMI4kNBc4kwx4IGWfiHIkEz8C1GCPP0uEzHpgYG87rgc4zoNiWBzxiEjbn2UP/ufj4vCPGyBgjY4yMMTLGyK8DI7UxMccAACAASURBVD91/OQXLm4qmXwqUGPgPHPHjfg3Q/9GilFkMtHO5HTn8WDhAxcDwJsykw9DwYSynNntdlUqlWxygiAwOQL/RorgmS3unZa5TDZBA/YNGQXnY1mc9rU8lxQFLJgFwMGzEIwdIMC4+rGcz+c6Ho969uyZut2ujseoyxSb+mEI3tEymYwVqzJPBAKKZz0jkkwmbd+KXC6n3/3ud9Y6F8cDCDkfgYhg0+/3dTgc9Ktf/cpAkgJkxgaGiO9LT62NYZ6azaYFWwJeNps1vf7xeLT9SwBZAshutzP2BIfEFj3LRrEuzKhv5cy4kIwwptgTbDX6Za7NPIxGIwMs78zYP85MMoOdE/hgZzkvdoRfYYOwlfV63ca2Wq2q3++b/foWprDOi8VChULBfMrr+0lYkHYASvgcYzabzUxHzpiyySAAwnwyTjBZ+NXhcFCtVlMikVCpVFKn07Fgjmbd6/59suTZU3yXMfJzTkzZbqN9WHwi6GMSDDCSnu12a/IV5huQX61WllDx7LD5nslPp5/aEXOvJETMOT+DKSe+ekAB1GH5sFPA+uPA72MkcZTxJl74331sox8nyPHx5x8xRsYYGWNkjJExRn4dGPmp44vUcBHEWdbEwDAav8Tr3xZ506VIkiBP0PDgRLDHyaWnNqsYKM7Id/xyZBAEevfunVqtlv2OzwNIiUTC7vd0OhnrBAMEG4WD4SRoxQG8bDarZrN5FozpZOOXRGEQUqmUhsOhsQUYPACSTCbNgD3TgQHBqjE+vV7PWEE2KcSR1+u1SqWSttutBXMpYrxwfFhJNLbFYlGz2UyDwcACKvcJK0QgBbwB2u12q1qtZuwkwC5JlUrFAiP3kM1mTSayXC5tyRzWzRdDA/jcI0FmPp+rVCqZU/qd3NnDxbOL6Hd3u51JZA6Hg4bDocrlssbjsTFoFPwCNCQ8zD3a8CAIdHl5qcPhYEXLXM9LG0iMGAfsY7vd2uaGUqRjL5fL1h0sl8spnU7rw4cPZ52RJpOJJpOJEomoSLvT6ZhvSE8MDuz4YrEwX8D3CDzeDrkv5p3uSJLMZ9D0k5SRLOVyOfNvfLtarZ6x54lEwjT3o9FImUxGt7e3ZwCCz1QqFWOpAFvGkmJ0Aie1IbCQ3C+2AUgz9gCK12UDoqvVymoEdrudFdnzTAT8YrFo88kqAYDEtRlHfJiNWUmofTtlEm/uD7vDdzkHwCXJYpKXbjC3fp79ixTxmTn3gOLtID4+/4gxMsbIGCNjjIwx8uePkZ86vkiXwtPpZI7olxT9Uh9vir7gjwlkwP2bPAMPW8FDw2T4t08GPJlMajgcmhQAp8HIt9utJpOJyRc8i8HAs1xLsLm4uFCpVDIwoBCUiSsUCrZ8TwEj18ZImUi0z0z44XCwYOkdkU5NjGGhUFClUrEN+uhMw1J7JpNRJpNRv99XMplUs9lUuVy2MeLajAHjzc/Zn4RCRACCbk6FQkHdbtc6FdGq9ObmxjpJYXiMC8xou922AtRUKmV7bhyPRwsaNzc39n0AiIDFPEhREKjX67a87XXpzBXMG86CNne73dqzcy0AYLfbWbE0CU02m9VgMFC/37eEAeAmIGAvMI4wVwRXClKx8f1+r9FoZM/GPeHMsEWMMYGLg4SK+wAUpadOV8ViUel0Wnd3dzbmQRCo0WgoDEPb+ySdTqtUKpndIa/g4P4A2cViYXbL5/f7vUmT1uu1KpWKJS4UdEv6k2SNcccffGEudgmj7JMw/AP2PJVKnbFh/Iz9Tg6HqLPUcDjUbDZTr9ezZwB0jseouxf7phC/PLj7hAFfD4LAJDC5XE5XV1eq1+vWXS2ZTFotBkwc88z4+RoTnoN4QdIISMBInk4nS0BJXBgTEi0SLMaLOQDAfNzm3yTD+LBnuaWnNsteQhEff94RY2SMkTFGxhgZY+TXgZGfOr7IChcBENbOSyi4WZxU0pkj8kAsjTOYDCLOy9u6X3LmYfkZn314eFC9XjdnQ0LQbDZt8AmudGXirRZWBOPM5/N6fHzUarWyzRBxxtvbW6XTaU2nUx0O0eZ17FHBsjdFjLzBAzAsVWMIaG1Pp6jjE113ADoC0Hw+N6CAzatUKtaKlsAFC5FKRXtl0GVJkkktJFlXIzaqhKkrFouaz+cGkKPRyDTHlUpF8/lclUpFlUpF9/f3xiix1A7DwvWRlSCFAZA/fPigVqtlUhY/zwTOyWSifD5vAZGkgyTlxYsXGo1Gmkwm1iKVYAbrBvvHsjaMEGwFQQ5GJgwjrT6BDSYIO4Z1orZBepKKHA4HK2olmLBZoNc5Ax74C8mWpDN9MwmU1z6Px+MzxhhGJgwj3flgMNBqtVIqldJ8Ple73dZoNDKbQCIBMKOxhjXd7/eaTCa2x8dyudR4PFY6He2vQ+F6t9s1W4AxJLkLw6f9OXzA9yw0NRLMJ5s4ksggGUEihN+zjwnsJjGGZAE/YL5SqZTu7u5MTgDTBgBJOmu1C3NGvGJul8ulyXWoTVmv17q5uTFAHQ6HxkSSIHmmDf/3MUySyZKIIT4+Ajp+ZQLGj+5ax+PRElliwW4XFbez2SqyHOzGH35uvJ2zQhKvbv3lR4yRMUbGGBljZIyRXzdGfpEXLiZOeurA4o2AwMnNsEzHz1jS5CF8AbCks7d1v/QL48ZSIqzLfh9tAOidHKCivS1Mo3fWZDJp+msCHm/0BFve4oMgMKctFAqazWa2zM9E4TwEtEqlYsGsUChY1yMcYb/fn20qR3CmaxLARWArFotn7B1BGwaOQuh0Oq3VamXFwDAASEIAFwzw8vJSqVTUonc6nSoMQ11fX6tYLOr169fK5/NqtVoWDNBGUw9AS1oAYjqdWsACvDDOq6srJZNJG8cgCCzwM0cwgdgFxanYHswtNoWTsnM91yIRIEgw7pwfpnQ0GhlLSgI0n8/VbDa1Xq9tfgABOjKFYWjBWpIFExIItNIAlyTT5SNRkJ52Qoc1AtCRnqxWKyuG5pqw3tgzbG6r1bI5YAw9g0zx8X6/V7FY1OFwMMnCfr83qQ9MOMA4mUz0/PlzK0D3hd/ISkjYAGHsjsDsOyeRdDBmJBIUkp9OJ/NpAinnI1kiJgBMxAcpYlZrtZrJYQA62DeYW2ITzOBsNjPdPQwxjCctfe/v7/XhwwdVKhUDegDYs8LMFXaDPIr4x1iVSiX1+31JEcDN53OrCQG8JJlNIIsCIPEHCrEBfnwEJtn7j2eR+ZmP18R37jM+Pu+IMTLGyBgjY4yMMfLnj5H4w48dX6RLoSRb4ibYwsDAKjCIOAYsxmazsWDHm6xn7wAZut/w4DBUXJM3ZJg4mK/RaGQdiYrForbbrbFofmnWG/DFxYVp0tnB+3Q66eXLl7YUud1u1el0NJlMdH19rYuLC+tQI8lai3qtby6XU7lc1v39ver1uoIg0jCPx2N7u6d7DmMDoyFFoAjYoFemRSkGCKDClHFez2IQDHHw0Wikfr+vFy9eGIOAfALwu7y8NIDCgRuNhna73VkbVdqyMn+VSsXGj4JK5h4G5+rqSkEQaDKZqFQqqVAoqNfr2fjgaBQOs0/IaDRSu922APjNN9+o1+vp8vLSWFpYz1KpZI7IUjKF49zrdrvV1dWVBfD9fm8FwOPx2Ngsvo8NYe++OJquUj7In04nA3j2EMEn0OJj+9JToMdH8I31eq1qtapyuazD4WBafOwFJgcfLBQKFuCn06nJOpCBoLWGucpkMhZcCK6wqrCSjBEAhQSJmgZA1ssECKyegQVAmUPkDqfTSe/fv9fhcFC73TZpAIzVaDTS5eWlfvGLX2g6nWo+n6vT6VjiiT8DCLvdzuQcnvEcDocGeswRMWsymZgchzoWEh/sHtnEZDLRZrOxZLVQKNgmmvg/SRLJm0+yAVX+JgZQiI1EhkQAUKawnphKPJBkY0YsAGCIm8QCAIax8zGbFRHpiXGMj887YoyMMTLGyBgjY4z8OjCSldofO75Il0KMnODJhLAEDCvE2yWslF+qxnh5aIoEfXBmGRoDxCm9NANWAhbLvylTZLlarSwoJZNJK9jM5/PmAMlkVPB7fX1tRaDrdbQ3Bl2MaCnqi+8wUpxpsVioUqnY2/x+v7dC1vl8bnpl6al4sNVqqdfrWeDG4RlHzrHb7VSr1czQmYNEImGMiiR7PmQeOBnj32q1NJ1O1e12Va1WTZIxGo0MzI7HqMORZxPYtR0WAcaGgJFOpzWZTKwzEu1kmWOWyXHCRCLawI/rYhepVMra2MIEZjIZlctlc9xXr14ZiLCsjqaeYIojbTYbffjwwZhBXxyL89/c3Gg2m+nVq1cql8v2LIDMdDq1cb64uNBsNjtLREiOYGolmRQgm83avjUwz6dT1K3Ia46x7Xw+r2azqcPhoLu7O9MmX15emg8iU2Cjy1QqpUqlYqANg8k4UOMgSZ1O54xZXCwWZ9IP7BZ75NmOx6MeHx/14sULpdNRcXI+nzetN/NNcEdmUK/XLaACUNwbMg2KxWGxkD4g+clkMmq325rP55pOp3bufD5vvsVzl0ol259mMpmcsf8wz1zf10Ngy4VCQcvl8sy3AbRms2mShdlspiAIjOkDvBgrYtPHLy1+VQIA2e/3VogMKwiowWqHYWjSEuIrjOLpdLIkFXDGv2DjPLvppSk8K8/r45ov4I+PP++IMTLGyBgjY4yMMfLrwEjG7seOxHffffc52PEnxz//8z9/R9Eeg8xE+eVB2AKMO51On71dUuzI2zUTjSHx8JLMMT7WwfuAfnV1ZTIJHIXB99/hPhKJhCqVijnhZrPR3d2dgQdBjMEfDAa6vb21JGSxWFiBK07hHYag9urVK4VhqEqlYufEqWezmdrttrLZrN6/f29Lx7zNo4lGg804sezLGLF0jGF7yQpjAjilUikVi0VdXV2pVqsZ0D08PEiSMQcvXrzQ69evLSBfXV2dGWIYhtaRh/tgTmHJPl4yR1oCgwBbxf3R5SgMQ9srAyeaTCaSIglCPp9XoVCwhEF6Sj6SyeTZHi5IOgAxv3SczUb7u8xmM41GI83ncw0GA1WrVQM1ltthSSn+RAdO8CDg0q6VMUKDDnOHJIJ797UTjAeF2HSZajab2mw2ms1mxrRR7H48HtVoNKwd6m63M+kP7Blz+vLlS00mE5PjsHGm9LSnBIEPBhcw9EF3vV6r0WgolUpZ+2WkLel0WtVq1YI4z899odH3rPvhcLAW1cfjUdVq1WIBQf/58+dKJBL613/9Vwu+3Ct1C9gOBewUHBOAYRyJI8QIEg5fWwFTCbtF3JjNZqpUKga+u93O2EISAf7PuPA77lV66mJFfCOWpVJRVzOSKVYPmENkVsQHruH31vErIMwpbCCxF3/wsdFL1LBfSZrP5w/ffffd//0LIeOrO2KMjDEyxsgYI2OM/DowcrFY6B//8R//z49hwReRFHpGjQlEisCNMBG8gbLsyRJutVq1pVcmVJIFFK/hPp2e9nrwg88So1/ChnWByeL7Pvh69gQ9qiTrPFQsFs2JmMT1eq1er6dqtWpjwLIxb9OtVku73c426isWi1YoixQAQx2Px2YQu91O1WrVOtDw/FLESqZSKdVqNVueRn6ADjaRSFiQaTQatsx8PB6t4xNGAivXaDTUbre13+/1m9/8RtVq1aQTjN3t7a3JAMrl8hmIEQiRQFxcXJh9cF8AQa1WsyA6HA5N/kDR8mw2s3mBufIdc4IgUKvVsj1BeD7YTeyDc7CjPDaaSqVMrnI6RfvJ0C0J6QyMKZsy8hyr1coKLAEAEhbYuIuLC0taZrOZyRoADQAUOc5kMjGdMXIiWG4AbLlc/onEYrVaaTKZmJ58u432E0H///DwoFQqpffv35t9w8xKsqLiMAzNzrBdnoNAQ9JDspJIJM6KwgnSpVLprPYhnU4bgyfJWCXGGfAaDAYGCL7+AXZ8Op2qXC7b2CyXS719+9Y6k1EfwbjV63UNBgMb97dv31pSWSwWNRgMrE1uGIbGYNZqNQMi4hX+zbggZyEpHg6Hur6+1v39vZ0TP2Ds8G1qXGCuOfdyubTNLAEs5pjidiRZyCUymYwVEHO/zFsymdR0OjWgxE+IkyRNJC4AKf/GX30CzfPEx+cdMUbGGBljZIyRMUZ+HRj5qRWuL9I0gwtzURgrBpSJ4IF4y4SB2mw2xuAkk0lzcD7LQ/LGT1BgkFiOxBGQPbBfBsvOLLnyXYptJalYLGoymdhyIUV2yAFwUIzrb/7mbyyA5vN5LRYLW2b0hojcAaZSiooTR6ORnj17pvV6bYG5UChoPB5bNyaW5gGfdDptDuU1pP1+/2xJ1BsBrUoJUrvdTvV6/ewNH4aCotFKpWKgD7vlmYNqtWrjAMBy3ePxaSM9pAy9Xk/1et3kSDwX48TeGdw/bNV2u9VgMDDpBcxvuVxWv9+3BCWXy1mBMNKPxWJhLC9ButPpmBxlsVhoOByaXCeZTKrX65k0o1Qq6XSKilCHw6HNM4Xc4/FYjUbDJBYEAvadoSiajj0Utt7f31sgIJjCONJ5iwAjyRgTAjSJA9IFkplMJqNut2vJGclMv983sGk2m5pMJnr37p1evnyp2Wym+XxuTBsgiDQIe8XfGH+W0jebjbrdrhW2Z7NZvXz5Uh8+fNDV1ZU6nY7tdxMEUccpJDcENc/sMxYkQ7CNl5eXms/nVhgsSW/evDEGEoCn9oQ4ks/n9e7dO7Nv2NLJZKKLiwuTk6xWK7VaLZP5YKfcF3bqW+PiE8SaTqejWq1mSSE2SdKLD5Kc4aMkXsvl0uYN9hB5F0kJqwQ+hjE/xAJaZSeTT8XLJI7o2UkMKFgGJJBaMe+cH0aQpCo+Pv+IMTLGyBgjY4yMMfLrxsifLCn89a9//R2tYAly/M0bIRPCm3IikTCjZQk/DEPrrgNIECAJypIsoNNFJQzDP2GH0PLytsxyLEvlsCGpVMqWlTFkggHaZ/YYoX0ogDOfz/XixQt7U8aZCEI4JG0xmQQMBWBjgztJ6vV6plvmbR75CMuY6IsxaJaXYacAbMZ+tVppOByaQ8McwUjCIMDMUDhL15xMJqPr62trkQqQcA2ewS83A8LT6VRBEJgG/HA4WH0ADCvjIcmAk+Rgv98bm4ctSE/ymcFgoGQyqVqtZoCdz+eNeYTRq1QqlgxkMhnreoQ8YDqdqtFoGHNK0S/sJQW00hNjw/3T5QnwRVbw+Pho381kon1cKGgPw1ClUumsY8/xeFS5XDaQq1arxmwhF0H/T2BOpVK6vLy0+gjsisJa2Em/1A973O/3Va1WLThKsoJ15tOzTwR47HixWJg/IB2CaQfs2Lwzn8+bvwKAAAMSlpubG9VqNfV6PRWLRSu2pWAeP3///r2xWhSkw94SVEke+R0SBd/FiGswbqVSSdfX18b8kih6uQ3JAZKscrlsoEhCgJSH5O10OhnTTuxjHLxMiJUJZCh+7LfbrUajkUaj0dk+P8w7gMz9jsfjP/FPYiN6fOI1sZfPE0uIt8RfAOWPfhNLCj/jiDEyxsgYI2OMjDHy68DI6XT630oKv9jGxyyZc1GcD6NmIGFKWHakeM5/lomTZMt5sICwaZJMW0nXF78MySDzOS+F4a18u91quVya8xF89/u9aXoPh4NNdiIR7aFAQPzNb34jSXr27JkuLy+1XC71+PhoQQrmkE3lYI6y2awFrFQqpfF4bEEKffh+vzfj8BpT9MS8edNutVgs2vhVq1WTKxQKBduEjrdyxhOG0hfdZjIZ042fTidVq1U9e/ZMp9NJ9Xr9rE4gmUwamwBDQfcnH3xgX2FWYGsLhYIt7+J4nJe9LegGxfz6pV/kJJvNRoPBwGQpSDjy+bwqlYqurq7MLilY7Xa7FjCpT0DyAuh6h/I2TFBmztrttjGbMGGwIIC3FIHldDrVaDSyWghJZyDSbDZt7khg6DLl75f7A8ByuZwajYYlRNgLjFGpVDJpx8uXL60dcavVMvshSYLJgwUCjEiQkLXApF5fX6vRaEiKis+z2az6/b4FL+a2WCyqXq+bj2KD2WxWw+FQ2+1W5XLZJCHsW4JcAHAlMaK4tVAo2POEYVT7weah7XbbJBWeRUV2QZIGqABabJqKPGS9XqvZbKpUKlkSip8WCgVjiy8vL21vGmIZbC3g5kEF34PVA3RhiqltKBaLajQalqjhgyRIMP50XSNuMfbEwM1mY3GNPzB3jCkrJsQB4iVJbnx83hFjZIyRMUbGGBlj5NeBkcT1Hzu+iKQQw4BBY4L88jg3y9Ijxsn3JJkRSbIle79cyYHMIplMajabmVPzVgwbwZ4aBDMCy2QysQmVZMEJ7TfLpkx8Mpk8A0pJqtfrWiwW+v7775VMJtVoNFQul/Xw8GCTNZvNDKSy2awKhYKBIgGKiWRDQgIPBiDJOrHAhBFAOZcH70ajoc1mo/v7+zPJBs/EmLIRJcCL7CCXy9ny/OFwMIYnl8upVqtpMBgYU8HbPwwly7EUBgdBYM7H+OZyOXW7XWWzWZufy8tLdTodY+4+TlAANuaRPR4ImABlMpk0zboUBelisWh1C2EY7Y+xWCysaw/ShH6/bw7MHMAsMW4EVoI5y9Gwn17/TwAmadlsNhaAgyDQu3fvjJEMgkC1Ws0SJ7Ti6N7DMNR8Pjfw95r3h4cHtVotq/fguhQIS08F2qlUyoo+W62W+SjJHZrm4/Fo2mb8bDKZWBAdjUaWBBKU+/2+Fb4yFjwPgIof7Pd7Y/BJKglWMPyw7YlEQre3t8aasU8Pc4rPwnjBAGPjgHsmkzFpD2w+oCnJkkfsm32DmEvkRNgggXYwGKjdbiuTyahWq6nT6RjjzL0hlfBMHT7E2AMIJGYAgU+0pSf2GPtAElYqlVQqlWxzV4AJBj0IAtuoFVmJPzeJBD7FWMLqwYJyH/HxeUeMkTFGxhgZY2SMkT9/jPyUpPAnr3BhrNJTK0eW7mBBeFMl+BDEeQh282ai/Jslb7ecFyPEwQlyLMmzMSDnQi+OE8JCwdThDDAgOCLBGUkCOlY0s6VSSZeXl6ZDhj2pVqs2EbVazZ4xCCK99uPjo00yLWOvrq5suZ+xIIBSOEowZDzpzuSXQQnCyCOQZOC8LLPzOQybNqHr9dq0xrCVODpyCgAKrSpBhaC42+1MpsBzAMyJRMIKGtk8czqdWuHm4XA4m39JtqQ+Ho+NOQvD0OYEGQTzLsnOK+lM/51IJIyhhxnxmlyCO0EA2Uw+n7exoOMTQet4PGo2m2k8HluR8Xw+tyQnk8lYO1rsj6C+3W41HA41GAwk6azLEBIbxgemx88ZLK1nXDabjV0f2ygUClasi+3QxQu5EiwZOni6GlEbQWDHX4fDoY7HqNsTzHGtVlO327XAOpvNtFqtjBHcbrcmAcA+qWVYLBYmGzkej8aOwvACHNg0/i3JlvcZI/yCLknISEqlku2dw2oNANzr9c6A9XA4nHVA8ywkLZBJlmEWSR5JriQZiBYKBbsHbJGAj2/ia/6axA4YOcbCs4OS1O/3NZlMjAX0sg8fK47H458kxpwbf/OSE/wAZjQ+/rIjxsgYI2OMjDEyxsivGyO/CIJS9EYBJgcT4JflmHx0l3yO72G8sCywK37ZFkfzLGAqlbKAwoCMx2MVi0ULdtPpVH/4wx90cXFhA84gMkm+CJJB5M2W3bTX67V1w6nVaqpUKlosFvbGzvl5w2evgUQioU6nYzr8w+GgVqulZrOpZDKpcrlsoIhOH6DDOHlz53e85QfBU8Efy/aAPEEDYORNnPmARZ1Op3r79q16vZ6kiO30S/6wWiy7wh5+nCzAChJoGc/VanXW4ef+/l7L5VIPDw/WrhWmloCGQ3BO5seD4/F4tOBM8Gf8fUet/X5vS96ACOzpbhe1KvW2iXQBG2IJGUddr6PdzJvNphUKExwZM8YE5gZ7ISACuh8XaKK3TqWiDkww0+xf8v3332u32+ny8tLuj6JXLwFBioF8hda0nnllw07+sGEkrBB2TLAulUrWgQqGCzBqtVo6Ho9WHF8oFKy7GnECP4ZZ5WelUkn1ev1MooRtwghTt0HiwvP6836czMJI8z2kIdPpVB8+fLC4tVwuLTGFbUUShH3BBDI2lUpFw+HQfANAYO5JumAMmUekQ+l0WoVCwXwD2wMAYPA4RxAEJuk6HA6W4MBmesYXKYUHEcabueZeATA+SzJNAo+txi9df9kRY2SMkTFGxhgZY+TPHyP/V1e4GCyWv/0bNAHreDwaGDDxfnnT68r9pmM8BG/UBJuPHwhgYfmXgWYAGPR0OmpPiYaWCWVJ2z8HTBWBjPsloByPR/V6PQveLJuz9AmQssfE6RQVZtbrdWNWaPWLI/AmzjMzPoDaxcWFaekxAl9AjbOgrYVpwdm90eFsAM5yuVS/37ciZron+etjyABSGIam+WZuT6eTaXWliEmj049n39iThaDgwQLnQWuOY/Ecw+HQ7p1kgPkFXFjiHgwGZ5IGGDLmR5KxKIPBQP1+X4lE1JmLscZWjsejRqORJpOJPSOgRevR8XisarWqFy9eGMgxByyrwxIlk0lVq1WTkiDLkaKkCjtMJpNWE8ExnU716tUrGzvsl1qE6XRqXXtIcB4fHyU97WuxWCw0mUwMaJCTeIaYsSUIAXCeafUJIIXjBEzmEm04TCaMczqdNp/BvrAbpAZorz14AMKw8/hmsVg8C6beJxKJhIHUxcWFLi8vlclkrA0wcYtE0kuIAEiKw73kgCSTJNAnuKVSyWychJtx5Nlh0WB+N5uNptOpFfsTz7ALJEQkTZLMltiHxtsP44YPEV+YAwCbuWbOfCwlZpCAxseff8QYGWNkjJExRsYY+XVg5KeOT9ZwBUHwD5J+OJ1OnU+e5I9vyMlkUv1+34I9b4AwahiCf6vlLR8ddyKRMGeD0WNZnAfjPFyTCWICKexjCZPzMyBIMCgohcUhcAIKBDgGnefkzZzAPJ/PVa1WZdzJ1AAAIABJREFUzaBYpgZAAAYmFacNw1DValUPDw/GuFGgy/NIsuXe0ylqfYmsA/Zyu91aVxuCMLpUNL/ofSuViiaTiRUGsrxLkSdBAGfykg1YFMCsXq+bgXFvdJvBmGG6GDtAZTQa6Xg82n4XjBFBulKpSJLpidGqkzQcDgdjOyUZeCCH4N5JDjabjWq1mgVZggdjW61WFYaher2eMSI+cFH8SWDEidPpqH3pZDKx5f1er2eF0UEQWFchWFQfuCgGDcPQGOdms2lBCR/IZrNqNBoGsM+ePTO2EeAPgkCj0UiJREL1et2Alf06DoeDyWUYx2KxqLu7O2PF8UvshiDjGRxJZi9BEEkdcrmctf1NJqMi0nw+b9IBpA7z+Vzr9doAIgxD27AQNpTvk3DREhb9NAXP2AM1JV5iBbNcLpfPWiBT94I/wvpTK8GcELMYD+aOmBaGoSWmsNx0bOP32B/sLX9gJ2HT0+m0JUbYAUEd5paADpvm2TXsEomDX8FIJqOubZ4NBWhJ8Hlu2DuembhJHCIOxMfTEWNkjJExRsYYyXjGGBlj5KdIyU+ucJ1Op//vfwISAokkC8IEOm4MNoCgxE1LOnMkjCWbzVqrWgycN0quB/PC+fwbN2/FyC1gpDgoCGaAWCL3AwpD5XW5yARg8NAcs6laoVBQoVAw7asv7uX+MJhEItJqj8dj/eEPfzjrAgRYYRQYs39eWCMYA/Y5mc/nFqg9owUbwOeZE5yJwOULWblfZCA4KMHrcDhoNBpZgS1MgyTTZ/M9kgGCBQBHp6V0Om2B1Y8Z5yBRQK8tRQW/JAeAPkwvGyXC0sE6zudzu3eYDJyOBOR0OlkRLQGF5MEzNATJ+Xxuth2GoTF26XS0Mz3dtjqdjk6nqF0zCQHA5Vmu0ynao2Q8Htt4+eCNk9Me2fsUzFShUFC9XlepVFI2m9WbN2/U7/cN2Pk8NRL7fdS1qlwuWxAkEOInngX3ARibqFQqarVaqlarFtCJDYyjJJMTEQRh7bh3bAP2k+8tFouzPXYAcgI3AMN8eMDnGTk/97zb7UyKsVqtLGkCqHwSQfJ6OBwsQUkkErYfEn7zMTjAjiFP+Fh2gOQJ1p0ECP/0Bd3YDGNLnCXWsIcIdsi5ATQPhkigOJ8fS+mJ4eY6zEN8PB0xRsYYGWNkjJH4SYyRMUZ+6vgiCMqFfFtSJpw3av9G7m+cIMlSOF1qGDQCqWdXMehE4kn7TuAl8LBsT0BE95vNZk3zDLvEEqQkA0Imx3fJCcPQ9rVgfw5JBloEJfTPDD5L0kgB0LUej0f98MMP5njr9dqCPy1A+Sz7kOBgjBHM4ng8tmJb9q6ALeB8dNVJpVI23sdjVHzJPiowPLACdL5hPsbjsX2f8eW+0FKfTieTSex2u7NA7oNhOp02EOca9XpdtVrN2tcyxwR6GCK/e/vpdLLAut/vbck9mUxae1uujwYau0KGAttxeXlpc8n1YVPX67XG47HJH2CivX4eIBiNRprP5za+kuwz7Osxn8/t2UgOYFpgVGCy0UbD8mJL4/FY3W7XwBxmer/fazgcajKZ6PHx0YLPYDAwGQQSDfZgQW4AswiwM5bcO2wxQQx2jfHEt/m+Z72xXfTe2+3WvkuCA1hPp9OzvYQYa+7fjymxAekL7CvfRSozHo9NigG7StJ4OBys6xZjSMJB8KddbiKRsFqI5XJ5VkALKw6bTxtaYiVj7wEgDKOaDTqWkRRJT0CMnIq4ig8TB7xEDPv0KxF815/Xy234vpdI8B0fe+Pj848YI2OMjDEyxsgYI3/+GPmp44u8cCEJ8B1JGDQvbwBUYFkIPgxMPp8/Y8J4u8aheDuGrZNkxXyewcBgMQImA01oGEbFmZlMxt7mmSiviyfgEBxhnebzuV0TdgTDG41GtkTp2ScfmAEAlvhhHabTqTkTrA4FrJ4Z7Pf7xnQyNr1ez1gwCjC5d/Zb8PNBFx6CFsbI73ByNMeMKfONLAaWjWvAZjCXLJvDfsJc7XY7AwxYIs+sEGABHhgn9MKwXMhAjsejaZOZT+wQXfRoNDI2xt9LrVazcxF4Li4ujJmbTqdnen4fEDgfwRPmjNapQRDo7u7OgjBsKDpkmDCSr0KhYHZRLpctOCCBOf1RgoN8BlbT2z5FulKUyKzXa11cXJi8BBDwDLZP7giGFJ6SKGA/2+3W2HbGGpCXnlo0cz6uD1Di49wvyRnsZLFYtMSIjRMJ5uw3QlICCPhngmFlZSAInoqHOVen0zm7P0CAOWOcSFzxURjnwyGqoanVambnACRxjgSEuSAgE8Q9synJ7CubzZpf8nc2m7UCamRN2CT+wr2yISZsI3HCJ/mMnx83fx7iNQff+ZRcIj7++yPGyBgjY4yMMTLGyJ8/Rn7q+GIrXCwvMoGSzlgaDNAvNzP4vDEy6bzx8pbPmy5vlTCBfmmQJWX/e9gp/s8SM/cjPTEL3ANvvp7dIVjBqtzd3anT6RijxdIp3+33+8pmsyqVSgZqgKIku4disaharaZ8Pm+72k8mE9XrdTsnu6XjcIwxzs9mdiy3wlax3A7bWK1WbSNBAIfnhWkggOKgAG6/39d0OjVWjNatAKQ3asYUOQkOwjhUq1UDMozUM7i0y4WVYMmWv5lDxkaKdmmnKNNLajgPz+brFWD2PPNDAPStbzudSC0EU4NTkxAEQWBzj4OTtHQ6HSUSCesahJMTiPADxsknWtg1Nug3JhwOhwZ+tL6FTUbTvNvtrL0rMgRY5e12ay1wkfsgIULHTIcezrXb7Qx4V6uVBXv8HLYpm80aI4SEgHvgngErGDtYO5hb/ANQPR6PNr/ITEg0ADds0Etg8FtsEdlEKhXVZAB62+3WCvthrSWZjwVBtFcOmnqC7WAwsHnn57PZzJjrzWZjQLPf7y2BIAnwIJ7LRZuXApaSzlYwCPTYFr/zqyH8gV30rBvJHtf0unO/OvFxcgnocV7uLT4+74gxMsbIGCNjjIwx8uePkZ86vgh6ws4R6Ah+BClYJPSk/oZ94OHt2rN6PLAfeAzIv3XyGQzBs7EYL4aWzWZtoHzBLgesBawcy5k4NrtmZzJRm050qplMRpeXl+ZsXJfvYhR8hx3DN5uN2u22bSQHe0k72Pl8bp1VWI4noNbrdevkBGvYaDSM9SkUCrq8vNT19bXq9brpjnFU5o57Y8xgKLzRAqYELQK2D1R+WZfNCcMwNOZjOp0qmUwaY+U7C3lNM110YC15nkQiYQCayWTOWMbtdmsMnU8gJNn3JJm+Hvv0XXEI/qlUyroClcvls3tgTBg7SWf20Wg0rI2sJP3VX/2VSqWS2u22druddeDy7DDBzcsIJJkm+erqykD1dDppMplYsOTzMJ504iFhAcByuZwVdHO/fA/G+HQ6mQbcL5cz5wADjNx2uzXZDOOCj3rdNXbgZVPef0+nk9kBdomm/P7+3u4RUIX1Q9rg2Us6F3kGDO09gEmSAZggESKoEl+wZf69Xq81GAzsXgFdgjrX84Ef+RVBm3n3f5PMrlYrzWYzY98+BkiAxL/4EL+QucC4cl4+433IM4bYMfMShqFd72N7jF+4/rIjxsgYI2OMjDEyxsifP0Z+6vjJ6OnZF/82z0377iNMOEEjCJ42S5NkQMBnJdlbPIPDH9gwDJuOQ5yHyZWeikvR+GLEDD4MFIwKz1IoFDQej+37OAyBSJJt3BgEUacddm6n4wtSAIIZOvD9fm/Fp+y7gXRgPB7rcIg6DG02G3348MHe/kejkXa7p70reGZYUfaHyOfzevbsmarVqjEyfJ4gDGvR7/dtI8hkMtpTAV05QIY8gSCD0RaLRTWbTW02UZtOlodhI3FU5tnrxykMTafTVkQL04mGG+2wZx7YowOg73a72mw25nTFYtFACAkGyUs2mzVAazQaqlQqxubSnQldNQEIttTrmZkvmDWfRG23W9VqNTWbTS2XSyu69VIQLxOSIhkC7DNFocwxchPmDxvj+kEQWEA/HiN5Sq1Ws/09ADFJKpVKBrLD4VCSDNxvbm6s+JTPYafosvEB9NgEw8lkYoHZF53DkuZyOdXrdQM5QAUwww/ZzwOfgPHCdxqNhvm1lzXxHEhxkB14ZhH213eXYvy9rCGReKp7AbzH47HJdACeer1uNTkwiujLaUfr4x6g4rXlJADYPbbIygV+5+UexBPOh397/ToJugdwWGOeidjoYyvf98wef/uEOz7+/CPGyBgjY4yMMTLGyBgjP9kW/s89PFhgXLvdU9ccBsnLHDabjS3h8WbIsvpmszF2iiJOJpk3UgYF5+ah/VstzJx/Q6VwVHraXJLvMMG86QJybNjH23QqFbXSDIJoLwf0uW/fvlWz2VSlUrHWnplMxtrAFgoFTadTM5JisWhvyIlEwjahTKVSBrYUXlJci8Oxe/x0OtX19bV6vZ5ms5kymYzJSkqlkkkCxuOxsafIHDBamE3G0G++J0Xs52g0suV3lqABSt9xCEPknO1220B7sVioWq0ac3pxcaHxeGxzxDI912DeYONg2ViSRy7Bhnjz+dwKSLl3xpH6CQCU1rKMK8xvLpdTuVy2wlDPRH1cKJlMJlWr1ZROp23jyEQiYSBYqVQ0m83U6XTOADyXy50B5eFwMOkEc4Jteh9hXLCrYrFoncpghrzch8BH8gEDBzBh3xQ4M2YEno/rQQAvpBmr1cruDY03Y891SZi4N+IAkpDRaGTMP2wcLBWsWCKRsLklyfD6bM+gHo9RzQjJDM89Go20Wq1sfxq6JnEt2GySNj+mxAwAtl6vGzCFYWigTCLqmTkkI8Qbv4KBzeM3nhXmb/97bAOb4Zx8F/+RZHZA8gdg4+P4nGf5SLjCMLSx93Izfh8fn3/EGBljZIyRMUbGGPnzx0hexH7s+CIvXBgSLAdvwbw9w7JgQJvNxibaGy8DTWtW2j7O53PV63ULKBgOg5FOp8+67vA7lhYJBpLMAP2E8sYMu5dIRN1VYLV4FsAEiUKhUDDDa7VaSiSiDQeRToRhpMeezWbGrtBlCXau0+nYsvDNzY0VoDI2xWJRV1dXZtTPnj2zDel6vZ7q9boWi8WZThcgZzkYTbEUvZlzzVarZe1xKWadTCbGQuAYsGbX19cG9BglQME+C35eCF78DEOFhSAwAtw4BE5J8MYJADDm5/7+3rrxEFxKpZLNYy6XM8BFugLDAxCR+DC36XTaGNJKpaL1eq1er2cgRpCTZEXr2CrM5HA4tN3qAZPb21urMdhut6ZTBvzRno/HYwM35Ckwh7Bqu93OQJNuVVLEOKdS0R4cnt0BJJkPGNVkMqmbmxttt9szucZ6vbZr+/lfr9fGZgJGMG347m63s81OG42G1RvwTMfj0drqMm+TyUT7/d725zidos5T1Hk0m00lk0l1u13bX6RQKBhIwiquVitjnkkyYR0bjYZpx/FD5htGDYYVOwNMAdbVaqVOp6N2u63Hx0e7z3K5bOfG94mLk8lEjUbDmGoSNaQqHmgA1XQ6bewuPsj1PbNHLQRsIHOAvySTSYuv/JF0xggCNCST2DFsoV/58AlmfHzeEWNkjJExRsYYGWPkzx8j/9dfuBgA3/mIN+nZbGaOS7DjDZ5J9B1rTqeTOZzXdRL4AQfeYmERWK6WZPuJXF1dqVKpWDBgfwWkC6vVyoI7y7NMVqFQ0P39vb2BUwzqHZFAMh6PbUnXs0uA5Gg0ssA4Go1sOZuC3fV6bc5B61LYHN7oh8OhCoWCarWaFYT61q2eJUB2kEql1O/3bXm8Xq9rvV7r/fv36na7ajabZ3s5oDlmiRWARbMNcwXTAkCzZAwrhdHiqEgWGLv5fG6B/ng8GtChsefe+bcHfZzQM5FolmHekB5gRxQzU5Drn+/m5kavX782pnQ0Ghn4+aJR7KZSqRirfDhERaHz+Vw3Nzcaj8cKw9BYSoC81+uZJr5er6vf71u73t1uZ3UQPC/AwX4o2L8UgR/68eFwaEADkOfzeQ2HQ81ms7OldVg1mG9/bRIEOv+Uy2Utl0vT6fvlfh+YPIuEtAR/Apzn87kajYbZ6Hw+13w+V7PZtMSARDObzVqXMkADaUmz2VStVlOtVrMYwjx5uRRzCwsMU+ylA8Ss/X6vWq0WBcI/SlmQcAA2AAGyjcvLS3348MHGaTQaGYjgO8fj0RjR0+mk+/t7BUFgdSUEfLqUscrgVzVIPj1z5nXqnnHjgGUlznJ9Yqb/PrEJhg/G0fs1QAPr7eVZ8fF5R4yRMUbGGBljZIyRP3+M/NTxk1+4CGbpdNpaYEoymQQ3y43yHc/wwdjwto2BUoQHc4Bx8XaJzIG/uaZfPiQYJpNPG8V5xm4+n6vdblvXHe4JJyU4AQgsz1arVe12UdtW3siLxaL6/b6azaa2260Z4Wq1UqVSsU0IMd5ut6vT6aR2u60wDPXw8KBEIqGXL19ae1PGigApRW1MCZbIBnh2SSaxQHNMsexsNlMiEXW6IaidTidVq1XVajULEOi3JRmwstw/mUxUKBQ0HA4NxNlDpFAoWFIGOBIQYWOurq5ULpdNgoH2nWV82DoYPAqjkSvA5gBIzDE2gLSFfVZge/f7vR4fH22viXw+r36/r0QioevrawNgtO+A4vEYddjC0Tm2260xbg8PD6rValYw+uLFC9MwJxIJ/e3f/q3ZMAlrOp22lsOweUgzptOppKjrUyaTMXaXoLJer/X4+GiMCsw29ktSx1j6DmPdbtfGhoBMB6sgCMwO8Bnud7PZqNVqaT6fazAYWCclEj403clk0gpv5/O5bm9vVSqVNJlMzP/G47G14uU74/H4jKUkISPh2G63KpfLNo4A9d3dne7u7pTP5/X8+XNLxgALAHCxWOj29lbD4dBiA2zhbrczX8zlcup2u5rNZvrmm2+sTuXq6kqn08n2C1qtVhoMBmo2m+bPsHm5XM5Ak/jE2GcyGauJIEjDPmIP+Knf6BL7wJb99SheJxFmXg6Hpw1ZPVO8XC7PWGjmm2TUgy4+QM1G/ML1+UeMkTFGxhgZY2SMkTFGfhFBfjabNYYMcMlkMiYlYEmP5TeYJgaZQMxApdNpY5aQV7BsKT0V+HJuv3O9X+rE0HFyivYIvLAws9nMmCYCZL/fN0OHTVutVnrz5o0V++FEQRDYzzebjQaDgWq1mulZ0+m07VOBnpl2uNwXLUdZevf68/1+r8vLS2WzWdVqNV1dXeni4kLVavVsDIrFopLJpGlzmXyCBDKCdrut29tbPXv2TFdXV2dv7DBcs9lM5XJZ7XbbwBDHI6jBesICwaK8f/9ew+HQjJ7g7OUcBA3AP5vNqt1un23USDISBIEBShAEtoGl9AQ2MFj8HFvjPml7m0qlrAPX+/fv9W//9m9KJpO6vLw0KQWJCkkOtrRer/Xw8KDNZqNqtapcLmebbj4+Phqg+CDo6zBgqynsJjACeNtttFkiHYtGo5EWi4WNB0AzGo2sGB4bAHTy+byur6/VbDaVz+cN8BKJqOMQQIx+HIkBciBqDfDrRqOhdDraz6TT6dj9oBOn0He/32s2mxlrSBJGm2ECUzqdtg1NO52OfQeJxukUbTzZ6/Us+P7yl7+0c2WzWdvnYzAY6D//8z/V7XatxXSpVFKz2dTz58+VTqc1HA6NPUaWkU5H7Yi73a6CIDC2czqdqt/vKwxD3dzcSJL+3//7f3r16pV1XWIuvv32W0kRU/7s2bOzZBpG+vb2VldXVzaWJMqDwcDmdruNuoYxH/gs7CO2TFJJfGWFhI5qQRAYGBaLRdtvh3hHNzFsBRaP8wNE3vZhB9Hv/0/sXXz890eMkTFGxhgZY2SMkV83Rn4RSSHFYxhtKpUyiYRfikun02dacl8AjIFQCOmXev0D+8Fl4P3/pagwj/MSqIIgsELaSqWi5XJpk7Hf763Fqy9ChBmD/WDJHXnDarVSq9WyglT2QRgOh6rValqv12eFojAYQRBpogeDgVKplCqVil0PEKGLy+FwsJayvFGHYWggiWwAZoV2tgAhwL7f722c9/u9XYPl57dv39oSK6130Qcj8eDaQRDo8vJSi8VCk8lExWLRlrgvLi6MfYBFyGQyajQamk6nlmAQ9GezmRUJo7kFrPzyuWeJYJaQIJTLZUkygCb4+wQGmQFOn0ql9Mtf/lKPj4969eqV/v7v/96W3Lk/WCNkK9gNeuzFYmGbAyaTSZOxFAoFe45yuazxeKxer2fOSRei2Wx2tpRNPQdSgel0agExk8kok8loNBppMpmYhIHAPh6PLYHxDLqXDrAHCyyQDw6MrfTE/jIW+DY+CQABmNVqVWEY7c2yXC5tr5x8Pq/1en02XgRMAh/BGS37cDg0ORFsEqDFfTDP7969UxAElui8fv1au91O3377rcl7OGe73TYwhpFm3NmkFTkQtRqn00mtVkudTkfj8djs6vLy0ljpMAx1d3dnm2YiF6lUKsa0Uhzv6zdIqgj2JEteAkGiNZ1OjaGGwYSh9bIIahdI+JB7UHSN/yJLY0yxIw8qkmw8iMOwjPHx+UeMkTFGxhgZY2SMkT9/jPzU8UVeuCTZRncEd5bxWLbFMCWZ5AD2DodF70lQ8Uwby55efuHP7Y0ZzTKTA7vFQPf7fWM6GDQmFg364XCw5WiYr+VyaRvhwULCXqFNRdv7+PhorEE+H23ayDOhQ2d5tF6v682bN2aIbHgIY4Tx8F1+z3lo47rdbtVqtVSv103vz/PAKkkypo4x3G63arfbCoKo6Bi5AePwzTff6Hg82tLw4XCwe0EXvt/v1Wq1TI6QSCSsUHI+nxsr4MEVXe9wODT2hY4+y+VSzWbTAiiBD8eEEZ3NZvb8iUTCNjhcrVZmRzBfsIAkESQz+/1eHz58MBaO5APGi8J0SRZA+D+SlSAI1O/3jTX55ptv9Nvf/tbGmeth6zC1QRAYk7fZbEyWQ5tZdP+Ak2fH6dY1nU5tzxuSC4IARa3IZUqlktVUfMwEAbywXLR4hTVF0sK9wIBTm+GZH5hXnp89USh25VlIetBpfywP6Pf7lqTe39+r1Wrp+vpa3W7XJDPYEGD+7NkzSdJ0OlUul7M9gcrlsp4/f65Op6N0Oq3r62uTidANjWJnkg5Yct+O2UsNYFBJoCjCh6ljvPBb4iCM+d3dndLptGq1mo0n0jGSIC97kWQyF8aQGHk6nUwmwneJjXyez0kycONFyv/u43oE5vVTcon4+PQRY2SMkTFGxhgZY+TPGyM/dXwRSSFv9TwAzhWGoS3VSbLgQbABSGDlYO3Qj8LooNOk8w/BhqVmDJfAChuEw2McvjgSCQZaaN5kPfBR4AtDRVEwy6G73U6//e1vDeD4LuBzd3dnLMLpdDJjRQtLa1W61PjN/laraGNIDGU+n9u9Hw6HM/ak2+2aXnixWOjt27f23Owa3+v1tFwuLbAlk1Hb0cViYUvBFNA+Pj5aEKJlbSKR0H/8x38YK5PJZHR/f6/RaKR+v69er6f3798bm0UR7mQy0WKx0Gg0MkYjl8tpsVjYGKOPh9mqVCq6vr5Wq9Uy8Gg2myYHWa1W+vDhg+1p4nXPOEMiEXUH4n4oGCdpYZmc///Xf/2XVquV6aI3m40Bw2KxsI5Hx+PRbBpHpLsOXXL4LE6dTqeNRSOp2O12arfb1g2n2WwaEJ9OUSecm5sb5fN5dTodW16fTqeq1WpWZEsLV7TsFHZ7SQPXIPDCXsL00j2N8cGeYZ9JnOgOBZODPyELgjklsavX6zocDppMJmZz+AhAQ/0JTCPnZaxhXGu1miaTiVKplH744Qe9evVKyWTSmOnjMWp1S4FztVpVtVq1zmDD4VC/+93vbE7r9bp2u521Ps7lcqpUKqrVajZHtFGez+fqdDp6+/atRqORtd9lHxviyMPDg40xbBtFwEgO8B1i5fX1tVKplMk6isWiMdx0F+NaFCoDwMRRZEjITYg5xE9iKysrsOOADCDiEwriLPOKrSOxiI/PO2KMjDEyxsgYI2OM/Plj5KeOn/zCdTwerfiRwxfirddr7XY7ewiWfgEflsXR7WKo6F0psGMSKCYFlNCKewaPyaO7EbIHrwf3BZpIHQjUp9PJAgYFfDgB3+eNv91u63g8ajKZqN/vazAYKAgCPX/+XFdXV8pms7YJ5P39vVarlW02SAcnSdYpaTqd6vHxUb1ez5yeZeP5fK77+3tJsrdtjI2uUMlkUh8+fFC/3zcNNfptGEwAmGVv78g49mQyMSChnoCAuF6vTSNOQuDZpuVyqclkYs4uyfS3sGG0PM5ms1YkzDI+rCdzVq1Wz9gd9L3JZNSyNZfLqd1un+na1+u1OS8ADdAA2rAXsJ6///3v1Ww2NZ1OjbHz3cPQR4dhVOy63++tgJolaEAkDEPV63UrHKcNLIkKCQJBk449Xr8dBIElMFLEXBaLRTUaDQPier1uLDkME77gk6l+v6/5fK7Hx0dbasdnk8nkmd6ZYM+cHg4H04XDcAPs63W0qSYSKFjDUqlk95NOp7VcLg34ODKZzNlzH49Htdtta5ebyWR0c3NjMeL6+lqn00lv3rwxZhHZQxiGxty+evVKs9nMEqxSqWQMLhINivD/+q//2pJG7I6gSp0Lnc8Wi4Wxv9PpVIPBQK9evdLr1681GAyUyWQs5uz3+zP5ValUspgnyeYG+UW5XDYNPUyv70zFigUSDN/oADYQaUs6nTbWEFAjBnomjnMxFyQHXibhV1CIG/HxeUeMkTFGxhgZY2SMkV8HRn5Kdp/47rvvPhM+zo9f//rX3xFE0EVTNMikoJeEeTkcDvbWTgDgIZkIvwcEhaOAkncSAihBAyaRoJjL5WxSGPBCoWCBD2nH6XSye8LBDoeDAQntcaVILoDUggLW+Xx+xg4CUgSh8XhsBcuAHx2XarWaHh8frSDTt9CUnvToQRBp/GEa9/u9ST/QlPJ7OkGhCeeeYLUIfMg2uEc02c1mU91uV+/evdOLFy9th1prAAAgAElEQVQ0HA6t4xG6436/r1KpZAWvFM/ivLBTjCfME4WNaH6R2qRSKZNW4FDT6dQCPk4fBJEm+eLiwjomAZbIEdAyY1csCQNqsCeMd7vdVq/XkyQDADrlkGRIsm5Uq1W0KWMymTSWQ5IFWp5BktUhEJxhhmCdkCGcTicLHsfj0ZhD9P3b7dYChS+ETyQSdq90xEIuksvlTEaApCeVSmmz2Wg6nVq7ZhjF3W5niQD+cjpF7ZVhLP1GkgRyAjXHaDQyuQ6JQ7FYtG5RFxcXJuGp1WpmV0gMyuWyJTDv3r1TuVzWbrfT3d2dwjA8k0OR2MGcITl5+fKlMWr4fbVaNRYrDEN98803Fk8YexJXXyTPfFBAv9lsrA6gUqmYjIPNXEnOAD2ehW5ontHnPMiLut2ugRhxED86nU5WsI0d+ZjjJSrMD98jiQAQ+JvPcj/8DKmEZwL/WG/w8N133/3fz0eLr/OIMTLGyBgjY4yMMfLrwMjFYqF/+qd/+j8/hgU/+YXrX/7lX76jIBNDh93C6He7nWmaYWAABZZncQAGneAuyeQJMAS8gXvNLk7Nsqwktdtt087yNg2bQOcl2sUixWDyKda8uLgwxi0MQxWLRXM+pB2ZTMaWpuv1umlNkYLAcBK4Wa5nA8WXL1+a/INATMExUpDhcGiykuFwaEEc0EJqAKP27NkzAwd02AR75iSfz1tQQqM/nU5VLBbV7Xa12Wz04sULbbdbjcfjsy5GXBe9M0GCN3+AgwQinU4b++O7UXkmFUeAucMZmLfj8WhShEajYbUCjCXfwY4ADVhY7ImEh7E+Ho8WaN+8eaNf/OIX1tFqMBhYYuJZtPV6rcViYUvPyWS04SIMGEFivV6r0+mYnQIIxWLRvhcEUVcc7gEGDwamUqlYMlGv1zWbzeznjUZDkmypfb/fm7wD9gXJEa1Ob29vdXd3p/l8buwO56H9M4mH1+jjtzDizA8yIHwPmYGXofFc1DUQnEkEKTimyxGJ6eFwsL1UuF6j0TBbQeZBnCBRQ07Rbrc1Go0sHlEDASBuNht1u13VajVL3pCsEDMAOPYbgTkmsSGWkUDzzP53PqFbLpdaLBbGjBHYiWEUI+OjsGzENQ5+zmoDCQoMKT5Dcg3zCksOoKJF55w+ySHh9uAXv3B93hFjZIyRMUbGGBlj5NeBkavV6r994frJTTOYZAICjsfkBUFgTBkyCAII/+fhk8mkvT1WKhWFYWhL217TTmEjk86kAQi8/bPcjsHSGQmWAunFeh3tH8GSPW/D3AvgFQSBFRky4IdD1M2F4lWuD5uIU8B4wUIghbi9vTXZAkv0+320HwaGDxvK0jgFpG/evDFwQk+OEU2nU9NAAww8S6FQOFuu5R5hDMIw6gJ0c3OjMAyNCcSpYE4uLy9NH4xU5e3bt2eFxzA0gGomk1GtVrP9TSj09Y7C8j9OQqEpDALFouxp4gtrkepQq7Ber41VQvtMcGMvFc6dSET7u/z7v/+7/uEf/sHmN5lMqlarKZvNWq0EwEKhNmwX+0bARG63W+t8RODg3zAr+AiMqq8fKBQKGo/H1g53s9kom82eFa7WajVjecbjsRaLher1uiTp7u7OCtsBgu12ayzkaDSycQH8kArwBx/j/iiCJlAitfASEpbdYasHg4H5ul/uh6lHMoUtUCfAeEgRq4rfZbNZC/ZeV09suLy81Pfff6/j8ajnz5+fJXOvXr3Ser3Ws2fP9Pvf/96SKTpP5XI503Mj6QBcB4OBxQ4vTSEwS0+di5DOANjYO/53PB5tPqlroLaFGOUDOvGWcYExB7y9bAIJCnUGXI8GAbDisNr4m7dTXgCQQHkJTnz8+UeMkTFGxhgZY2SMkV8HRnpJ6MfHF9n4mAnHGLgRjIwb4A2VAISR8xBMBpNAsSGslTcsX+AmyViRQqFwtt8JgY5lfJg3JAIEJulpMzycFYdFY8pknU4n04vCBDGhLNnS8pad4TudjrF5FG6yH4PvVvXu3Tsdj1FXJt+JpV6vK5GIuhr96le/MiM6HA568+aNWq2WFUMPBgP1+329fPlS4/HYAoRnI1ju9YwK7MloNNK3336rRCJhQDCZTOwc7XZbtVrNloZZ5t7tdtYtCTClQBOH8EvFACudp/icLzZFbsL3/ZJwGEYdrxaLhUlICAYwo764lBbIgFwqFXV7WiwWtr/Jhw8fNJvN9Pr1a93c3NgYAYowlJ4JxtbG47EtX19cXNh9IQMCGGHEfJD9WEuPLQ4GA+sqhhxkNpupUqlYm+YgCIx5/d3vfmcF8afTyVgjxgEfYU+fYrFo9wobRGCTdMYekoAAIvil17qT1Gw2G5MLUMwLa0dgg0FlPnnmdDpte+7Qchp5he+qJskAHzadpPP29lan00nv3r1TMplUq9UyVqzT6eh0OumXv/ylEomEGo2GNpuNPSMbqrLqQOJC7UkymdTDw4NKpZIkmRRkMpmcATdxAxDBRyRZzcVyubSYhB0Q4wAQHzsZE1/ES0ySdLZawvysVk8blDJmxC5iwseMHTbuaypYuYiPzztijIwxMsbIGCNjjPw6MHI4HP63WPCTm2YAFLvdzpyOZWkCBYZN0P5Yn8kbOAVn+33U958JBlwABpiGzWZztqyOLAEtNJpZHI5JYiApioVxZMkTQwrDSBe+WCzOzj+dTi3Q+65GpVLJdoxHSoAx0vWFoMi1uP54PNb79+/VbrfVaDQMAGazmQUkWERYwlqtpuvra11eXlqgwiEI1ARQluhZTkcaglMSNAkehUJByWTSAJJWte122wobCdbdblcPDw/q9/uqVCoql8u6uLgwg6S4ERtgLxaCPAaLI8L8oPXms+ijkVnAcElRwXGtVrNORBRYwhAdj0cDc5heuljBaKBtfv78uemIkcv4ccdZcTKCNvd8Op3MZijwXK/XJjPwc0QwHY1GpofG6QF97ycUrcKGjsdjS3xg5WiJjI1j0yQtm83G5C+FQsG6BDFegN379+/1+9//3oKPZ9wBfJhvWCNkP4wF/sl9eR/keWHtE4mEtU3GbtCLS9L79++tixd1BYwn10V2EwRRK9qrqysrWEYGhZxkMBiYnySTSQN/EsX5fG6bkyJbYGw3m41++9vfajQa6c2bN5rP5zbmJNHYJgkn14Ft9iwuGnpsn6QJu5SiDSR54SExI5HFxqUnGQRSK2wKcPA/O52edOl+BQvmnPsnSWAu4uPPP2KMjDEyxsgYI2OM/Dowkhj+Y8dPXuHyF4Ct8W+fPCQTxBv8x0uMLKVLOrthX3hHAGbJmwfnzZSBoltMGD4VDjJpxWLRghcyitPpScPM/SLpQKcOOyfJdOmAWjab1WAwMA0vPwcEuRYaYK8nv7i40N3dnS3dp9NRtxo07P6+WEbn/zABz58/N4cajUaSZKwSgY+xZSmbv2FrCBiJRNRGlaJYdgnnnJyHgDMej5VIJIx54Zz1et0cgoDFhn/j8djY8uMx2pCSe6NbFVIXPsdSPPaw2+1swzofMFmaZg5wSJ94FItFkyh4OcB6vVatVjPGqdvt6he/+IXtZYLOmIDhAYoxhj0CPOmylc/nrVMWPpJIRB2oarWaPSMyIZIhHxh9h6V3794pk4naDiOjyWQyurq6suJY7vl0OqlarWq9Xps/sPN8uVw2CQjsIEkb3ZjG47ExVSy9w65eXFzYBp3YBucCiH2XLwITyQbskZfxzGYzK6oOgsBYzcPhoOvra3348MEKxEejkbLZrBqNhtUL4HuwdcvlUtvtVoPBQM1mU998840B+HQ6VbPZtI5PyEBOp9OZn+73e7unbrdrBdLIfbbbrcrlsvr9vgE+wEs8IyHm/NgoheskTePx2OyLMSLO7XY7kzd5qRpJjWcxfdG6Px8rJSTVxBnmjp+RYBMvf0wjHx//8xFjZIyRMUbGGMn8STFG/pwx0q+6fXx8kRcuAMUzMDwMRkTwI8DAEgAMGEEQBFboSVBIJBJnb6gMCkvkDCLnxyEosjwej1ZMzH1yDn4G48J94ThIJ3q9ngX7bDbaRBAnRrbBEjGgAlPkNbvL5VIXFxd6fHw829n75cuX6vf7tjHgfr+3SYQ9WK1WBsIAniS7dhhGbVan06k2m40mk4kxRDCPQRCYDh6WAfaTucvno00oX79+bSwrhdL9fl/dblfffvvtGbgB3gASQZ1r3dzcqNPp2Bixu7xnwujU4+U1BHruA10yy8W+QxNO5+sEYMpgS+g8VKlUTF5CsTmJAOOG/v3i4kLdbteCA0HkY/AvlUrW+Yj9MGCw0JbDziAXYE6xfYI/II/MAVtPJBL2PfaSIeEh6JJkYDfpdFrValWTycQYvTAMtVqtLIiiZyZosoQPA02gQasN4LfbbdvZvdfrab/f2zlns5kV33P++XxuPkVwxfdhw33R8W63syBOEAWEqd2Qnloq83Pmu9PpmGb9+++/V6VS0fPnzzWdTtXpdIyxQ+IDa0iAhT0lWQBgeA7063RXWy6XNt9easFGmcifuD+/igEb5z+DPAI5GOwd38du8F2SdUnmI55R9NIW6SkR53z+vmCIAcOPvxsff94RY2SMkTFGxhgZY+TXgZH/qy9cPACBBAOgFSzSgCAIjCXyb7Ys6eGYBDseAg05BoKj8DbKgPD2SnEguk82RmQQATBYGJYut9ut6XL5PxIaHxhhSSik5I2folPYpUKhYAGcsWDC+RwTxn4MGDTABlgwNr4dMN1i0C6zVJzL5Qx46MC02USb3PliWEAKA4NZubi40O3trS3fs4zunfndu3fK5/N6+fKlBTCWnWEb1+u1tT2lSJb9MpCccG+73c6Kn7kX5gWAoLYAWQ3zjGyGAIdsgCBDcMBGYcqQWjDGLEUDFASKw+FgO8RzTuweR6SzlBR172EuvW6ZdqbYH9cjUZJ0ppeHlarVapYc0OmM5KXX6+nv/u7vNJ/PVSwW7VnYdBDmDxaacTocolbOfB7wGg6HajQa1iaX+aBLEWPI/AJwSAw8IAO2tOVdr6PNN6+urkyK4ouQ8eP1em3dxxjf0ynSlBPc0fvjFzDXaO73+70lgIxzLpezeaSon8QrDEOTECWTSVUqFdNht1otA1UpAu5yuazBYGAyjyCIpBV8BltBAgHDS+KExt7rwH0M8isMJL0AA9p+VgYYR87FNRgHkk7PnDKu2LGXh5HQ+JURxp3YFR+fd8QYGWNkjJExRsYYGWPkF9GH8EaHNhw2BSDwemweyBfjEjx4y6SIEWZJkn2OB8eIGAx+B3NI4B6Px9ZpCeaFok0YIz7H9fzSv3/T5g2bAC7JOsUwyQw4+lwYKs8a9no9eyOGpYH1kGTdmAjiOBTMz3w+t7HGqWAGGEPamwZBoH6/bxIGNpeDIYE9Ys8ODKjf7+vq6soYM4C8UCjo5cuXVoyaSqVsf4vdLioeBvTQzDOm4/HYGLH379/r2bNnOh6P5gySLCgT4KQn/TJOB2NGcgCY8XNAizlF40zdAAwoc4XcggB5OBys0PiHH35Qs9lUs9m0BIikhQBB21UYQ5zZL2ej8w+CJ1kCc0pQoEvXu3fvrKUx56GAHLvNZDLmTwR/kjAf2Agc4/HYZEdo8i8uLkxLDoNJwrHdbu38JGWbzUatVstYLvTMu13UUYxOYFIUdJFoMK+bzcYCOjIbxhsmLggCzWYzFQoFGxfux/s+zz2bzQzwwzC0jSPT6bQ959XVlcIwVKvVUiqVsnbX5XLZGKlyuWzXOBwO6nQ6lhQDcMht6ARF3QE+yrVJWJC3IKfhWQj4JJgkcdgz3ZSQLCHXok7CxyL8i+clbsH8cXjbxS6kp/oiwIM4RSykcB4mnbgXH593xBgZY2SMkTFGxhj588fITx1f5IXr/2fvzXrkupKr7ZUn53meamJRpNitti0bhu9s2P5hbUAvbP+J9998vrLRDdtotCZSFMkasnKe5/G7SD9RO2WZaL6Sb5rnAES3yKrMc/aOiBVn7RUROCaBlyAISKAb5igTg5V0UrDL8S7/fTgcrKASpoC3Vh4cjbubECyXj7M6YIfco1aABwaHz+FtN5lMGmOHw7OZLDDgxue5m8rP47wYLgYQCARMh7zb7fTw8KBPPvlE0WjUBh1KsmNs7hUAgAHACd038Gg0qn6/f/KMADea50KhYPvCMS5DAxuNhqrVqhUho5n1PE+DwUBnZ2dKpVIKBALWbWm73Vo3JmQhdIdiPQKBgNrttjEX7HE2m9V2u7U2rZFIxJgmjJf1QIITDAZPtMl0ugJQYf5wPGZk3NzcWNerXC5nNgUDQgCFpYtGo+p0OopEIiqVSioUCsZqwhwRFHq9nhWuZjIZ68Y0HA7NnlzbJ5DyXLB60pExyuVyxjC7PgV7HY8fh5V+//33+su//EvrYpVOp+3zsUfWMZfLKZfLqdvt2h5wz7vdseWrpJNgQmDOZrO2vwQzkge3uBvp1Ha71XA4NNAKhY4DDW9vb5VIJE7kH8QCwJx6h0AgYHr+YrGo+XyuTqdjRbkEWuQTsJYwsIFAQJeXlyZn2G63Np/nzZs3CofDKhaLtj7b7dY6kAFW7DOJCDEomUzafqxWK5MFwdqv12v7TOyfmg6kKtgOiR/7QLtm2DhYUOzE/XwkUG6SQ6wjnhIrAAziMUwsCc8PJRwADYkh8dG/PvzyMdLHSB8jfYz0MfKPHyPfd/1sbeE5VsN49/u9veEScHmbRBsNI8PlailddoDfcaUSkkz/S9AmuBLQWACCCr9LkGYjObLk+3EIpBAAlOd5NtuEAMhxPbpR5CCAHKwQxYqe55netlQqqdfrnXRtgoWiYwwsII5KMMWQ3CGLOAO/w3E0bNZ0OtVisVC/3zcHBGgGg4G+//57SdLFxYUxDHSp4ZgVx1utVrq9vVWxWNRqtdLDw4OSyaTK5bJCoZCy2azG47EBMHr0cDisUqmkcPg4WO/s7My00DgeSQHAC1gi72B/J5OJSqWSPR/sk8v48j3ITNy1SaVS6vf7piemKDKXy2k8HisSiehXv/qVHcczBwT75blYcwJ/uVw+SU7xgXA4bECP9MFNQggSBDGKwvv9viKRiAGI6zer1UrdbleJRMLska5O7C9rSFCh1gB2E7/BT364VgTpUqlkTKLneRZMb25u9Gd/9mfq9/taLBaq1WqqVCpqNpsWQGFu6diEnyJrgiXOZrMWyNCA4xsEati06XRq7PxqtVIikTDWcjabqVAoaDKZ6LvvvtOTJ08sHsCkAaDEJmQTMITIPEhUR6ORMW6bzUadTsfumWL2TqdjsQsJBMCAlEk6JioUIzebTSsmf3h4UDh8bM89nU7NNmazmSUxSHmQCJEws3+spcsQE3uwHfabGMU9uxIL1++wC/+E68MvHyN9jPQx0sdIHyM/Dox83ynXzyYp5GE8z7O3fG6eYM8bIhpMNgXmwgUiJAtsMkb/wwfFIXkTxtH5DhaK4/7FYmGsCYGOz+X+p9OpbRbFrYAWDu15nmmsmYpdqVTsc3Be2EnP8ywY8XkEvGKxqM1mY6wjkg9+B+YGJ6B7DevgGscP3975XTTyzG7odDqKRqN21C9JZ2dn1mWKI/n9fm9g5u61e4wKGwYDBhvmHusul0sDEeQj3A/FxK5UhQDoOolbxxCPx83ZmCw/Go3MwQCl8Xisu7s7mzGBffCckkxKwPNynwQbdMPS43wNNOyTycTkPThxr9ezzkh0VoLBQ25D8XAgcJzdsFgsTL6SyWS0XC5VrVa13+/VaDTMRtGG0yEKppCAncvlLHhEo1HrNtXtdq1OgNqHcDhsLZVzuZzm87mCwaAxupKscNdNEGGwYfZyuZzNywiFjvM3AOHxeGzPTXJIzQhJJnvCoMlms6larWa+yffk83nN53Oru8DmAaXBYKDhcKiLiwtjVkejka6vr803SX7xGxKrer2uYDCodrttiacLTK68oNVqKZ/PK51OK5/PmzRnNBqp2+0qk8koGAzaoE0aBNBwQJIln/F43BIvAIX7I6GABQYAXalMMBg0SQodzZDkIGNhHQHj7XZr8iYkZ5JOTkWwHxJYEmxiuX992OVjpI+RPkb6GOlj5B8/RvKzP3b9LOjpBmw3EHDcRwAiOGMc7o0SaDF02BwCnStdcI/00B1zNO8Why4WC1WrVU0mE3sTl2SBFiPmSJ/jX/TS4/FYZ2dnqtfrevv2rW0Uml8cG6NutVoqFAoWAGHu0um0dWGBCQgEjt2AcPZwOKzBYGBgFAqF1O/37Xu4T4oe6W4DQ0UhbDAYNEbAnSkSDoeNNQSEW62WLi4udH19bUWTBCL+l/apzPCAwSsUChascYr9fm8B0u1KNJ1Otd/vdX19rXfv3mk0Gqlardp+wkwwCNNNTOLxuBVIUnyMo8K0JBIJKz6OxU6nvqN15rnz+bzpk/f7vUkpPM9Tr9ez+2CN0F1jb/F43BjUePw4PJLj/2QyqXQ6rU6no3K5rFwup3a7LUkajUa2nhRLw+qwP7BvkUhEb968MbsJh8N2j5FI5KSV7Pn5ufr9vkkHYHdgXUKhkPL5vIbDoUql0smatFotlUolRSIRdTodY0Txpd1uZ3IFAguJC3sKEBEwQ6GQXr58qXa7rVwuZ/IUfGW1WhnjRcIVDh+7R43HY9N04yfD4dAGNDJhfjabqVwuW+CVpGKxqHg8rna7bQlKNBrVJ598os1mo16vp6urK7XbbdXrdXuOwWCgb775Rn/xF39hNQisTzKZtLUl0FLAjwxHknq9nsliqtWqJcj1et0YU5IralHoAkYSyV7B6kqy/ez1epYsk4TSoY5YSVxC306C4wb/4XB4kqxLj6cN/H/u0z3NYl8BFP/68MvHSB8jfYz0MdLHyD9+jHwvDnwgbvzoFQweO81wo9wQN0ExGjdMcOQImTd3iiuRCkiPGlS3MxK/yzEhR7eTycSMMJ1O29vz4XCw43wc5HA42EYQkGCsSCy4l6urK0UiEXW7XQUCAWPC2GwAYzKZWEHqbrfTZDLRYrGwt2QCEs+USqV0OBys6JBiXdgbGJJkMmngx7E6TABHxJ7nqd1u232z9tvtsYUpQZFOQBzv4pyTycSO1zkmJ1jDCuCAq9VKzWZTlUrFiiFd3fdsNlMsFrMgxB7AiHEkTlee/X5vXXE4Bsem4vG4JRrUDpAosH6wqLB/2+3WNMwEX4qiAVh09Tc3N6rVahqPx6pUKhoMBsYCEoRTqdQJu8hawSoj14hEjh20wuGwvv32W5ul0uv1bO8AbdYzm81awMJhSWiQAMDm0lkLCQMF0NfX18ZWss6w6Pf398ZwSlK73TbWlQQEGU29XtdgMLC/p2C43W6bJGC32534HZ2h8OVA4KgJl2TsErKUHzKiyF54lu12a0XHnufpyy+/1OXl5UlNSTweV7lctkBKzQXtpwEO/IaEFdYKaRMyiXw+r0KhYPuLLAj/JUFB3gCQDQYD+9nxeKxCoWCSCNc/pUemjhbgyLw8z7OkZrPZWCevd+/eGdhiY4VCwZJoGHXAmAQb8HBPBrARYo9bjE5C63meSZMAZxJuLlcW518ffvkY6WOkj5E+RvoY+cePke874frJdCVv9Hw5X0rgiceP/fsxtHA4fFL8hhaTFo+bzcYmuANEMDy8US6XS83nc02nU+sMwx8cij8UqmL8fC/BDHDi+J6jeN6GuUfP84x14PsZPscch1qtZmwimnSCz2q1siN2GCOcU5JpgmEwuUee0y1CjsVi1smII1TYKYylVCqZQ9XrdRWLRe33e5s0z9F+Nps1XWqz2VS32zXtOse/sIi0hB0OhxZ0eSYSMZgAijBhAQEIin5Ho9GJYxeLRS0WixMGDobOPXInWMAC7XY7KzR2kweY30QiYayIy+xhV25gJDABWNw3ml2KMAuFgoF6Mpk0vTXyDwqL2UuSCuyQYZ7L5dK03zDHrL3nebq9vdVkMtF4PLbaDJKSVqtlARzNOvrn2WxmgDiZTNRsNq0ugGder9emYUf3T7AaDof2mclkUtls1toUs4ZnZ2fK5/O2vr1ezz5vMpnom2++sUJeJDzVatWYM2owYN5JnphPMhgMVK/Xbb5LNpu1IZ6wn0hAgsGg+WkikdDl5aUxc7DAMH/dbtcSDcAsnU7r/PzcAIrkDTB0AVQ6MrH8/uFw0IsXL6yltBuvAArP80ziQLINEMZiMWux2+/3TVdOC9/D4aBMJmM1IiQd7gDZ3e7YlY1YQJwgDnNy4jZf4PQEf+CUBFsHCInBMJh+DdeHXz5G+hjpY6SPkT5GfhwY+b5TruAXX3zxk8Dkn/7pn75ggBnMHBtAkMK5uEneKtlsWBGOBQlM7gPzILyJEkgJ0vl83mQMvB2PRiN78+etGeBwtdmweLz1suDMJSDwEvgJHmjTXdaEokfaWwYCAZMBwAJJjzpnEhi07ARmQCiTySiTydjbdSqVsueHXQyFQsaixGIxY7MoCCbwEiBgVzESl7kIBALW9QomkaLZWCxmDAwzKiKRiMbjsQ6Hw4lsJhqNGnuSTCatCxXO4nnHLjYEQD6b42QYNoIcrWphG13ZzXA4VCAQULFYPAENEp3D4WDaX0kG6DBLvV5PxWJRuVxOkvT69WsrEKZugoCCVh5WiPtIJpMaDodqNpuKxWJ69eqVPv30U5PvBINBG655dnZmiRNgRTJAAEun0ydtmNkbggzJBUfu3AcBh+N7pBV3d3emt5aOLHq/31exWNR0Oj2RqkSjxzkx+BdzNgCdWOzYVQmQA0RJJGCW0JOjgyfZ5Fk2m41Go5Gi0agqlYpJIv7jP/5D1WpVhUJBL1++1OFwUL1el3Q88o9Go8pkMrq/vzfAS6VSSqfTtl7D4VCdTueEgSwUCrq9vT2JQ9RNPDw8SJLFLFhSZC74P3KIwWBgMSqbzVq9R7lcNvkEtkJCzeBJYgxMtSQlEgnrltTtdnV+fq5YLKZWqyXpKNtBboVPE1OxdXybhAHb5xJGow0AACAASURBVGQKXyJuwfDDnLogg5/D9AEiwWBQw+Hw4Ysvvvi/Pwk4PqLLx0gfI32M9DHSx8iPAyOn06n+4R/+4f/8GBb85Beuf/zHf/yCzizb7dYM8XB4nIlBMOdBARxkDev12tqnSrIFQLMJE8Pi8SYsyQIwbEosFrMA6R6ZU8BKa1AK4jhCxYlc9o42tXxeo9Ew5iKXyxnoobuVZKwjhkNrVe49FAqZJp23cIyYt+3xeKybmxul02nVajUzAroNuVp/3v453sUwOcqG3QS8cSAkJgBjoVA4YRtZ/0DgsZBxv9+bzh+Zw3A41Gq1MtbFfVb2H408wL7dbpVMJjWdTk3ywD5Go1GTHOA0JBDT6dSkG7B/SCNIDAi0sA8E6PF4bEDJ/fHs2FmhULACS5gxSQZksC6ed2z/S6DgqLzf7+v8/FwPDw/2mYBnrVZTMpnUbDZTNps1lm8wGGizOXbjIiGiDgHwZF2q1apJKKrVqjzPMzaU+0Vig7SIgtU3b97o66+/Nh03IBAMHgt0qSeAzZFk907XKPxTkrHpodDjsFJ8iQTRZev7/b5ub2/V6/XMD/BPYgRBnXoIwHI+n5s8BjA9Pz9XPp+3gmW3i9doNDLmOJvN6uuvv9Z0OtX19bXZADIVZp7AHN/f32u9Xpu0CCa+0+lIkg2c9DxPzWbT/PvNmzcqFAqSjgwYrXeRE8GSuh208AEkWN1u1yQXAAn1FIAKCZord2DNiWuu9IE4QTwOhUI2y4X/xk/ZQyQ7brEw/x4KhTQYDPwXrg+4fIz0MdLHSB8jfYz8ODByNpv9771w/fM///MXbCgMCawQgZ+3Q96sYY94WI5DM5mMMVO8XfNwBCccmOCx3+8NkFzQgenbbrfWwjQYDNpCo0lnc0KhkLF1FCkTzFKplE3ZpnsTAQ4mgntGDw57RxEtDpPP55VKpSwYwcJwvAozlMvlbE1gABOJhEkCQqGQBX3WAHYHMCQIYWi0U6UWAO04xk7A5YiceQuVSsX07ExsDwQCdryLRh9mgn2BWUDTDuvXaDTUbDb1+eefKxR6nJ2Bg1B/AOBSFMt6cxT85MkTffXVVxoMBnrx4oU5RTD4OBOGQlrkONgSTsVaNBoN25to9DinhWC2Xq9Nv45tkQAsl0uVy2X7u2KxqEAgoPPzc3333Xe2vyQnJEp8dywWM3uZzWbGsNHOtVar6fr62hhhZADlctnAcLc7FhWz19Pp1EA7Go3a997e3podUQOCjAV5TbvdNhnDdru1I3YXyAEVjvUBuGKxaEweQxmJBYBXLBZTo9Ew5hRWmELpRqOhdDqt3/zmN6b37/f7xrJiF9RAvHv3zmoQYKhZJ+nYvvjVq1d6/vy5MW0vXrwwZm21OrZ7rtVq1q0Ilpd7XywWarVaZisAKcw9a0hgpwuWm4DwfdglJwG73c7kF9QwYH8UmzcaDWO7+T2YcgAdm3flX7DjxDvshPoEfAVfZZ8AEPwM4EF7779wfdjlY6SPkT5G+hjpY+THgZHD4fB/94ULvS6buNls7NgNyQDaSR4Eto+HjMfj5sCJRMI6DcViMWUyGTNEpovjLLy5EsRcVgvAgNlzfwbncBMJgHA0GtmmuW1fYcFYfIyBzjtsgtutZj6f28T3UChkk+ORGMC+tFotY7lYD9aUo33a1UajUWMCgsGgafAJ7LAGrBl6djTeGBh1AMzT4DspHu33+zb9nc4wdBuC0SwWi6brhe1brVZW5EmAzOfzmkwmJisoFArmtNgGTFAoFLLvwTEkGeuHc6xWx0nlg8HAgBhJCoMZXRYStjabzZq9bTYblctljUYj3d/fm36aWge38xGsGBpzSXYMDwMci8X03XffGVjf3d2ZLTCpPZPJGKgnEgk7kqaNL1rt5XKpN2/eaDweq16vq9vtajKZ2IT6QCCgh4cHFQoFNZtNJRIJTadT9Xo9u9/ZbGZsaDqdNukDrW4pzK3X63rz5o0xm/yhExHJIK2NSWpYRzoiMTMH0MNHYGFTqZQymYxJi0jCYDaj0ajevXuni4sLffvtt0qlUnr27NlJjQbM7Hq9tkQUzT86f5hMmN1isajhcKjvv/9elUrFurKtViuTfrhgDtuPP0ajUVtzWNV4PG6nAXQvwz632+MASfYV0OBnaSBAVzFmk1CU3mg09Mtf/lJffvml7u7ujHUlIUJHLsmAiv/P3mezWWvXCyNHPCYOcmJA3ACk8DmSVSRDwWBQ/X7ff+H6gMvHSB8jfYz0MdLHyI8DI6fTqX7961//6AvXT+5S6LI0sVjM9Khoi1l4NNJoaTnWdAMKQR1GZzgcql6vG+jwXbvd7qSYkM5LgUDAjpkJuiwGzrDf7621KG/Truad4342jGNXnIHPYeI4xZMAHIwEzjKbzSxAsxmwNv1+X+v12gr/OGIl+PDcvJkfDo+tg3e7na33fr83jTGgF4s9DuPD+VhrjC6VSpkkBLYDAx0MBqbvBZTQS8NolEolhUIhm7/BcfVms1Emk9F4PLbE4OHhQd1uV9lsVp9++qkSiYRev35t3+sekXueZ+tOIS3Bgha7BOLFYqHxeGwdoNbrtQqFgjExzHxARuDq3Ukm2He67MA+wszAKt/c3OjZs2darVZqt9t2XN/pdEyvT6CnEDMcDhs4UwtB0Ad4sVXWIRwO22fP53Pd3t5ancTTp0/1L//yL2q1Wrq6urLCX9hXiorpFhYKhXR/f281GwRvAJvOVDc3N8ZWsx6AtMtA3d3dmZQAm7y6ulIgEFCz2TTbhJ07HA7qdDoKh8MajUYWxAAohi92u13F43Fj5drttl68eGGAhz0AxtJROsBQ0Ldv3+rs7Mx8d7VamQ2SRCG7uLu704sXL2yAIjUEJHXMDqG+gz3L5XK6u7szAEKeVCwWNZvNDMyGw6HNBVmtjm2tSbIpXKd2gKJq2D5suVKpaDQaqdfrKZfLWXtdN9CTnLsAKj12O1sulxaXXaYXMHF168RUQMkFbFhx4op/fdjlY6SPkT5G+hjpY+THgZGc5P7Y9ZNfuHBQijApcHVBBYbODeRu0Sk36R7R8raIJICBcWiiYeZg13Ac2Lper2daeUCIoj4kE7A4gA2B1G1/S/ck3sxhkFh8tJ8EXBgKz/PsSL5QKKhQKJhulqNhhqwx04BNDAaD1nkJvTRGsd1u/1vhNAA7mUxspgr3DkhyzMs6BoNBtVotM0hkHvx/z/N0cXFx0gkG3TfrSoteAnO329V6vbYhi2iOYR0Xi4W++eYb/d3f/Z0kqdvtajQaWXccdLnSqSZ8Npup3W5rt9vp/PxcodCx4w/MyPX1tRUWD4dDY6/ocIVzsQ6sD8fewWDQ2pju93tVKhXrOkWBZyKRMKAkeLtH0dgmTE8wGNT9/b2xab/4xS9MzkOBdSaTsT0iiUHj//r1a5PXAOTY2V//9V+r1+up2+3q4uLCpEB0JoIlfPfunU20j8fjevfundbrta6urkx+wBrd39+rWq2q1+tJkkqlkklF2IftdmvDMV0Wnn/bbDY6OzszWQOsaSqVUi6XM605bPFut1O9XreaCD6PjkPSEVy73a7tHRdyqGQyqfl8rlarpf1+r6urKysCpj6DRGK1Wunq6spAB3kBgzmRZyyXS93d3enJkyfGIEuyltZIarAVpE8kh8QXhjsi4XDlDbvdzvYL9hswSSaTymQyWq1WqlarOjs7036/N98l2UQihg8TE0jIWT/XzhnACfDANOLfzGEiEaLBgSQ73fCvD7t8jPQx0sdIHyN9jPw4MJIXsh+7fvILFwYgPb5Nonl0H5y/R25QLpft2BVpBTILgIGe+xzzSbJCNbc4EcYHxhC2jTdTz/OMFYnFYsYIciSK9GI4HJ44NcBVKBQUDodVLpft7Xk8HqtcLqvf7xvLRzEqR5NozqvVqvL5vBXicl8wNMvl0sD18vJS7Xbb2Ad0+TAXsHbhcNicMhQKqd1uq9frqVKpWHBAesLMBXS87MF6vdZwODQAp/sT7BwBYLFYaDKZqFQqmZY6EolYoWQ8Hrd5DJJOjsxvb29tfkUmk1Gz2VS/31cymVSv1zMNLIFZOgLJy5cvJR1ZukKhYKwsGlr2hCALA4OdcbTNTJXpdKpisWjPmEgk9Itf/ELNZlPffPONzs/Pjbk7HA7KZrM2rR0GlWBIsgDYz2Yz69g0m820Xq+1XC51eXlpPx8IBCxpePv2re0bbAprGovFLMAwIBLAXC6XJ52V+FxkCa7OPJ1OKxgM6vb21oIkTC1rDIBgU65tDYdDCyYERQCWNYQ5putVIpEwSQx+HAwG1el0NJ1OrRC1Wq2eFMw2m01LOLHzSOTYFrlSqajdbuv29lZ/9Vd/pc1mo/l8rlwup06nYzUwz54902KxUK/Xs+93ExIYZtYE6Qygg5yBGAETjB+hiz87O1O321Wz2dR6vVa9XreEmmLnZDKpXC5nDDKfBVsHiOGD1Hcg3drtdjaThQSSpIXYStLuAgfJCiwhLC0xy/0+1oKkGUBxWXxOXfg7ZGj+9WGXj5E+RvoY6WOkj5EfB0a+74XrZ2kLz/wKHBl2yC2cBEh4q4f9ouuPWwDKAwIEu93jDBM6iVCYy3+z8OFw2BZhtVoZGwIoocfleJp7xDnQ8UqytpWweQS8QODYNpL2rDCAsDDcK+0ss9msKpWKJJnem+/M5/NqNBparVa6uLiQJGttyWcGg0HTv8MMuEwlGnnYU6Ql4XDY5rCwN67OFokABciwCGj3YfNgALmXdrttxaSDwUCJRMKSBtgrtNEYHyDCZ9DGt1arWcHjdrs9aW0rHVmkdrttQYvi8+12awMLYY45MqboNhKJmKab9ZtMJioUCqYd3myOM0uwi+l0qlqtdlL3EIkcO9u4cgQGBabTaWuLCsCuVsehl7RpBYhgwsbjsc20oD2xJJudQgcfGKZ3796pUqmoVqtZa1YYKvyMOT2AArIHkjXYOiQMBGX8jUQIqQRJjOc9FlNjv64kgZ/ZbrfmYyQN0WhU6XRau93OkpBkMmm/EwqF1O12LbmCBVwsForH41qtVlbUTXAOhUKWaGKvs9lMuVxOq9XKfAtmi1oUGKh8Pm9SApIDPo8C+v3+OK+FomvX5+7u7iwgwxBjC9g18dDVeCPT4Ts4KWIdALRCoWCtl0ejkbG7rmwKmQbJE3sI4OBjMNeAnZvIIgfiHtxiYJJxPhefxq5Go5Ffw/UBl4+RPkb6GOljpI+RHwdGjsfj/7Fpxk8efMwiE/wojCRw43wYGhO7J5OJHTMTsDgODQaD9mAwOzAEaGVhPXB4zzt2d0JTDCOIocCwhcNh06QzNHA2m9kbNW/VGDWOSeHmcrk0EOQoNBAI2KyGm5sbY6dKpZIymYwFBhgLju5hDHB4z/PUbreNVYBhjEQiVsAIWKClpnsLQEbgSKfTNgiOAOwaUDqdNolQPB43UEZ6AlMpyYpCkSm4jvH06VP7DjpGeZ6nfr+vxWJh2vx0Oq3xeKxQKKTBYKDBYKD5fK5Go6FwOKxOp6OvvvpKv/3tb/Wf//mfxrawt61Wy+QVAM5gMLA9Go/H6vV61lJUOs4KcWsg3D2GITo7O9Pz58+13W51cXGhfD6vfr9vbBeBknkaDOvkvgDyTqdjQzBns5nS6bTu7u6seNo9Jnd9BnsmWLt+0Ww2Lch+8803ikQiury8VCqV0uvXr61jFlIRSSf1H9QnoOlfLpf63e9+ZwGcBIJkYb8/1jlgS/g39RTRaNTaAk+nU2PyYJg2m43VZ8CwI7HIZDIql8tma7PZzFq8kvy4Re7MfWm325pMJiqXy4pGo8bS0llqt9tZ3chqtVKr1TJbJmCzFkiIYDuZVE/QzGaz5tMkJeVyWZeXlwZugEsoFFKtVtN6vVYul1M+n7fiW7TwfDfSmslkYrGGf+dZ6vW6DQulMJe5I4AZTDInF4ApSboLIK7mnriIfQAeMIAu6LhsNacYyLcAHP/6sMvHSB8jfYz0MdLHyI8DI//Xa7h4MB4AFonAxBsgb+W8ZbpSC9gm92iOByUAI4Hg39wCWRyVzeQ70Q3D8HmeZ8fdBCxYEt5qaePJfaAj3e12NqU8FAopl8up1WqdDHFkUxlUyLE4QYFuPQQSWAVa4w6HQ9OD02YXEIH1oIgQPTjfORgMNB6PVa1WTf++2+1MMxwOh40ZgGGESZVkgYG9QZuPjhr20J2Mjv6VgYYYJEkARZ/RaFTX19eKxWJ2XI2GdzQamVaeDjYUQk4mE/V6PZ2dnelwOOjrr79WKBTSZ599puvra7MJumYNBgM9efLEBvk9efJElUrF9hSAIDBwv4lEwlgmJDKAFBIAgmYoFLJCVWyaeRG1Wk2dTkfZbNb2IpfL2WwWlxHD9gh+sVjMClbT6bQFsOfPn9sRPbrl4XCoTz/99CSIELDwPexfksbjsS4uLixo4A/UDgAodCxyEzWAhPtmP10myvM8G9C4Wq10eXlpQRy/6Ha75gvEA4DAHcgoyYpr0+m0zThhHog7Z6dYLJqtD4dDtdtt26v/Oo0xuQD+O5/P9fLlS2WzWaVSKTUaDZNdzGYzSyBc+YbnHWtkrq+vrQUuvo1Pu+2tKSpnjyOR45wV9gpmH0kU7DCdxZAyeZ5njCfNBTjFgOGGwSXu0BXK9W+YdJcZJ267MQB/4MLeia/EXv/6wy8fI32M9DHSx0gfIz8OjOTU7seun/zCxYf/UI/uGiPMGSDAcSTHchx5wx7xJxAIWPtPjuJxhO12a2/RPCiLgeHwGbAhuVzOFs7zjl1v0LtT3Mtib7fHYYHFYlGHw8G6qRSLRTWbTRWLRS0WC5sBIulktgBFdzCazJTgqBOwggGJRCLWtpQ1pFXper024OTNHlBkXQCnxWKhdrttgx1Zh1qtdjJ93gVm3tIpPKZTzeFwsOfEaGF5kCS4/991EtgQ2AUKOmFACSBPnz5Vo9Gwgt/hcGgOt9vtdHt7q/l8rs8++0zL5VLT6VRnZ2c25A5pTLPZ1OXlpRXPDodDu3eKG3HGcDisarVqz0jtwnQ6ta5Sq9XKAj7MEgENJ4Udpr5hsVjo/PzcmFHkFIFAQOPx2AKo2x1nOp1aQkD703w+r9evX+uTTz4xOUs2m1W73bZ9+OUvf3nSZYt9owaCgIGsgGGSMFbYEQwPyQ5SF1gn/A0GCqaIC/sMh8PGxi2XS52dnRmAslbj8dhsnWN9kj3WWjoG3/l8bl2ZIpGIhsOhIpGISqWSdaZCXpDNZo2JAoSRrbD3+/1e3W7XJByr1Uq3t7cql8vGYC8WC0sqYDK32635MzOJNpuN2TYtcVkLGDyCsisFoaaAJJrYM5lMTtaU74e9Jx6w9/gbwEPcdcGYmITEwi2OB7zcmMn3cvFzJPfILZCZ+dcffvkY6WOkj5E+RvoY+XFgJD/7Y9fP0jTDvQnYOm5A0gnDxjE7b9ewDjwsxkcXGY7+OFLF0DFwjvcAERYABsINeDg9b+Toa3FgF4jQ+sICUhzJ50+nUyWTSRWLRfV6PWN2eF4MaDKZaLFYKJ1Oq1AoaLlcnoAfnZHQmsIqJZNJWxOe02UWfhjEYWOSyaQGg8HJEbAbaF3tLwEFQOHzpaNDwyKyP26QJxAC6tvt1toAoxdHrxsIBAysK5WKPWM+n1en01EgENDl5aUlFTA6BN1PP/1U0WhUNzc3isfjVrz79u1bed6x+9fV1ZUuLi4UCBznbsznc5XLZc3nc3U6HV1cXKhSqRgzgQQACYV0BA26MAHMrJObyNDKl8QB+9tut2o2myqVSprP5ydMLQGNImIc02W6CQRnZ2d238Vi0SQfDw8PVkj/5MkTAxIuZsaEQiGbYYNcYzQaabVamWSIAHZxcWEgPZlMTIqErAH7cH0L0MP3CDTIF0h2YEUlmYYcFmy3250w3uw1a0IiSYxBbkOheSj0OEiUdr9IiiSZbMbzPGUyGd3d3WkwGCgej6tWq6lQKGgwGOji4kLVatX2nuSHKxKJ2LqRWDIYMRg81nu0223lcjkDgIeHhxPGGI2+2xEOeQm6fthqkkZqDrCNUChkcYBk2gUs5BfEOOwgFAqdJN0k8MQ9bN99CXBjNVKLw+Fgpyj+9WGXj5E+RvoY6WOkj5EfB0a+74XrJ9dw4XgEOW7MdRTpcb4ChsbP8IAEJJgit6e+y3CgGyWA8geGCLDYbDY2l4Hgh/4d5oCAcDgcTNONVjgQCFi70sFgYOzYdrtVsVi0trfu5vHWzZ/pdKput6ter2eaT4weY9rv99YhyC1GzGazxioSMDGi1WplcgTkHDwXMxpoq3s4HIt9aduKk3qeZ87AsffhcFC/37euRpPJxKaquyCIE/f7ffsOHBc2ZbFYqN/vK5PJKJfLKZ1O25R1prpzPF2r1VSv1xUMBvWLX/xCxWJRT5480dOnT3V9fa3r62tzOuQVT58+tdkLz549069+9SsLVOl0WrVazdgSnA12ikAAa8e6ISHheBy2DSCllgHbIwDBMMOmAQrL5XEqPO1cSY7wDYJVMpm0+0WGQV1Cp9OxIYisF4kYdj8YDLTdbq09a7fblfTIZBGA8UXkPd1uV4PBwJ6PhAZGjUBG8TCFr0h4SLQAHTof1Wq1k8Sg1Wpps9kYE4etSDI23WXvFouFsZyuJhpQcxs4uFKG0Wh0MuuH9aAF8pMnT07kBCQMxC7ihvQoG+D78V0A2gVXZDPEkXA4bN2q0PnD/BLTYGvT6bTVPeCXbnLuSheQouCPsKIku/x/Vy4GWPzwv2E1sUtiATGb+ATjzuXXcH345WOkj5E+RvoY6WPkx4GR7MWPXT9LDRdOCLvhvmFiOO5RG4GdmyQoI2tgUTgO5CgR+QBSA4JYOp1WJpM56eZE8ME42EC06DgaDo4z0VGFRWUOAwwCGnf3eBO9sud5Bo6SrFgX1pFja+4Hg85kMieaeYyQ9QFMJBnDzHNLj4YBGwdTMJ1OTc9OsSwF0ejQYSsWi4V1XgKwkJoQzFhzSVYYDRtIMMU4YRvC4bANtORIv16vaz6fmwyAtYjFYtYhZ7/f27R5WMVqtSpJ6vf7Go1Guri4sK5Fb9++Va/XMyCuVqsGzgRSnh9bGwwGxsLCcrG/m83GuiZJMhaOZ2MtdrudtX5lKvxwOLSWrOwpwQc2hyCDfR4OB41GI9Nbf/XVV0okEiqVShZQ8vm8SXW4z8lkYoMdkUIg++B+a7XaSQJF8J/P5ybBwF+QGriyIZIaEhrpsX4Ev/c8z0AaeZTLnqZSKUuCsCuYYP7/fr+3+0bTDXsKkw6zBji5yVUmk1Emk9FisdBgMFChULC5M7CRDHqUZKA0n8+tzTDABJjDisF6kTgGAgENh0Nls1klk0lJj6w1nbL2+721J2ZtGfhIA4Fut2ssPMCIXAdmkmSFpB1fwiddlo1nWK1WJyckbjKOPRNb2L9QKGQsKs8Jm0vS7L9wffjlY6SPkT5G+hjpY+THgZHve+H6WboUwuAQZHkrdP/ODb4uqIRCIetQxCLxZj4cDu0Ij4CNkbGBvGXT3QRgw3FhBnjjha1zu6RQhErACAQCSiQSymQydl+TyUTD4VDL5dKChzv8kaDF/eEoBBg2B0nB4XCUgXD8zHrB6LlOC5ixtjis5x3nUMBQATpoonE+wAWWIRAIWFtT1oz7x4gPh4PpcXFY5pH8UDqBMXvecaZJpVLRZ599prOzM9t/l0WCvZ3P59a5JhQ6drSJx+Oq1+vK5/PmGHRZIlFBosLfA4LhcNiKRgEKl9lApgEIog+ez+fWkcnzPBuwCbvC50jH4DmbzYxtQu/uau6n06mGw6FpkWE7sQX2j4AQCoWMCSVoIeXodDqKRI4zQujQwwwaHB1NPlp8ZtcMBgOtVis1Gg11Oh0LumisYf/o5AXjSjJFMKdwlwQQmQx+sN/vNR6PzSYDgePcE4rHYTnn87nZLM/E/4fdd5lN/JbfpR3saDSyImHsUXqUwIzHYyumz+fzqtVqxgQSjEmqJpOJxuOxlsvj/BbaFyOVQXqCtj6VSlniSYIA0ynJ1oF9JDZiP9TJwIzv98dC5Hw+r9FoZHNOaEDAeuJvgClr5NZGSI+nJG7yDhuHP3GRCJBsATCxWOxkFhGxBImZf33Y5WOkj5E+RvoY6WPkx4GR7onbD6+f/MJFEJQej/bcxXN/jsBIgKEQ1S1o49/c4kY07bz1M2gOxyRIwDawQO6m49AEThZnuVxqtzt2KMFZCJIYSCAQ0Hw+V6/XM0NFY+uyG6wFwQYGcDKZ6OXLlzbjgcLc6XRqGzkYDKzgkWPbSOSxexPAi/HQsrfT6WixWCibzSoSidj3EqzRTcOOsUdokOk0xHE44MsQPpgEF7C5H6Z9szeSbF8BSY6TWevJZGL34na+cTsURaNRk7LAihF0Q6FjxxuCVDabVaFQUKFQMPCnm4/LvLAXMBmwZnTrYX33+2M3J7edLnIHQMhNSGA2SARWq+NEeEBgOp1aouJKHprNpnq9niUZJAOLxbGd8vn5uSqVig6Hg9rttskTSqWSttutut2uAoGATZynQBWbpUuS53n69ttvT5JlWCTstN1uazgcqtvtmmYasCYJc4/psQcCMOuQSCRsiORsNtObN2+sboFEMZFIqFgsmnQgk8mYZAC5CYkbbDzH9sHgsSal2+2q0Wjo7du3lgQhr8D/DoeDddKq1WoWn9g3kuD1eq1UKmVATzBer9cajUaW1OGHJED5fF6FQuGksBl9PKw5fkZ8wo6Wy6UxrsgodrvdSQLEUE6XNQbgkfngc6wNJwjcL/YeiUTMh1gHkm0AE5tGIkLcI7EEkN6nT/evH798jPQx0sdIHyN9jPw4MPJ910+WFOIE3IhbaMtbHxdH1vwdi8kNo2d1j+9YSLdgGLYNwAiFQhoOh/aWi96UxXC14wRNDA92izdT3nKlx45D7vEl2lYYGAIETNgP5R4wFqPRyAY6ugV5MJswKhQH9N105gAAIABJREFUM4gOIHODK4ELBojAEQgEdH9/r1gspmq1aoWL1BAAHrAUAIEbPDHOcrmszWaj6XRqjo9kAxClwJHjXBe0kR70+31jgUKhkLW2BcSQDywWC7sPOuoANDC0DNlEU813si/syWw2U7lc1mw2M8djT7BTHIQ9R4cOwwmLCjvJ8yID2e/3J0Afi8Vs78PhYyvZ29tbZbNZ9ft9u0905NjHdDpVuVxWLpfTbDYzm9psHlu6kryg618sFkqlUhqNRjaYkIL0SOQ4vX6xWKhSqRioIc+gDgNpistuEkhJGpjrQVBivoxbEwKDnUgklEqlNB6PTW7w8PCgh4cHvXjxQul0Wr1ez+oEXHZouz12e2JN+PvFYmGSBVi/29tbdTod02ojfSC5cxlJgAAGELulE5oroSDQ49u5XM4kGUgw6A4Ga0rswY4J5iSGhULBmEWSOGIktSXpdFrpdFrdbtfkQNgAn4vv8V3EC8/zNBwOJR2ZZvelaLlcWqwhaQc83RcAYswP/Yh9cGON/8L1/3b5GOljpI+RPkb6GPlxYOT7rveecAUCgb8LBAL5937CfwUfZAa8bbJx7pEfTBfMHkGfv0MLzFurKxtgIfhZAgGMBcfc6K4JLMFg0DrLIFtgg2HBkAHA/nFciCYULTjBHN0nhkrAwVAIVhRARqNRpVIpPTw82DogvQiFQjYZHXYLVkaSHeVOJhN7y+bfcABJarVaur+/12g00mw20+3trd0/P9toNBQIBEyPTVAE5HjWarWqbDZrwZG3fQKbuw+u3ng2m6nX69nkegIuTOxisTD2iePv+/t7Y4UIWAxG5Lso8OTIWJK1r0WSQHKBsyHdWK1WtgYwiNQIYFu3t7fGujDV3vM8TadT9ft9Y5az2ayur6+Vy+UM5GAmAWbWiWN99+ganXW5XFY6nbauX7Cvl5eXyufzFiiur6/15MkTnZ2d6erqylin7fbYoWw4HBrgEsg4ci8Wi4pEjoNUKZz2PM+AjOevVCrGHtJZCQmGy0BRL4CUZDab2QBS1mi1Wun6+lqSjGF99+6dJSTr9XFuRq/XU6PR0GAwsAQQv41Gj8Ma8/m8gSCsWigU0u3trQU/mDBiDvsKExeNRs1Hut2uzf1hr2GpSICoF2F9iC3EEZ4bG2s2mxqPx/K84xDT3W6ny8tL8zcADHskqBPMg8GgCoWC7WexWDSboMgbeQc2BFMaDodNrkJzgslkYkBOYkeiACvnsuouC80zwkADSsQEEgf/Or18jPQx0sdIHyN9jPQx8g/ByPeecB0Oh//vvb+tRwaOQI1x8Pcui8dbZTKZNCYNp+ft0T3yJ3hjGLBCvEViUAQcmDpJ5rjuW7wLMDBt7rFpKHTsDsRnwzb+11ookUjY4uJItKXEEfk8noHNKRaLenh4UDabtUBbrVZPjt3RINNxqlwua7lcWsDIZDIaDAa2qRyJEgxYW4zLlSHwp9fr2RwNWInVaqVSqWSMFdr//f44aR3NMBps1m63Ow49JLAScJE8BALH2RowXhQIwyBkMhn1+321Wi0bbpdKpczhYLBwmkKhoGazqd1up/v7e3mep2fPnqlcLhtrhPNyTMze4kx8N4zjdrvV+fm5scPcN44MG8T9MEMF2yeB8DzP5ogkk0ljnCVZq1eeYbvd6u7uzpgVgnw+n1c6nVYul9P9/b263a7ZF22MWadkMql+v6/pdKpYLKZEIqFms2m+4RYaI1+heDgSiRj4c4zuSpIOh4NpwPl+WMNIJGIFzcgaMpmMCoWC2u22isWiBc5AIKByuaxms6knT54ok8mYFGa/3+urr75SKpXSixcvlMvlTIcuHQM57CKymPl8bqDEz7jsMQykGzNWq5XOzs70N3/zN7q5udHd3Z3NEzkcDpZoplIpdbtd+z0SGQqy8X/2ACCnuxMJI7bi1kRQg0PSuVwujY3DLohbxB5sCzaW54LZ45SEuMeJBAkUnwlT6YIDsZXkhNjBvmE3w+HQElISS5oQ+Nfx8jHSx0gfI32M9DHSx0gwklj6Y9fP0qWQ4lICnLtIMHCwdPwOBsGNsnme56lSqcjzPOuu4z6Iq8ENBB7nf8AgRqNRcw6OwUulkrUiBXDQJLtH8rALvMUzhyGRSJjjrddrJZNJ63YTDodPikAJ9IvFQtvt1gbItdttXV9fy/O8kyLY6XSqJ0+eGBDt93tjYq6urqwQk6N85m9wvxjq4XCwjkGuLAXtLDIPgJtuOhSfcgTLoMjD4VhMmc1m1Wg0lEwmbc5COBxWq9UyQOBoudfr2T4dDgf1ej0bJsgx+fPnz5VMJrVaHec2VCoV01q7c2Emk4kVcAJcgFEgENAvf/lLs69QKKRKpaL1eq2HhwflcjkDymQyqeFwaNpcV5M7mUysNqFUKqlararZbKpcLhvzTOtg5kUAEoBMOp02NpLjb5IoPn86ner58+f6/vvvbfYEdgX446S9Xk/T6VSFQsGKlkejkbLZrEkTyuWysdXr9dr02Bzvz2YzvX79Ws+fPzcmx9XW53I5kxcxt+T+/t5sEJ0/7DLBFJ8jAD99+tSYR9i1dDqth4cHO/r//PPPreUz+wvjXavV1O12bVp8o9FQOp02dgmpBFIPWFrYNfwFGyHZJPAj43j79q3q9brVLSALgcHCzhKJhNbrtc2ZwRdgltHQj0YjzedzlUolY29JRpBcIFfY7XYGwvv9sXCaGMJ3LZfHIZjY4ng81mZz7ACGhAkZlXQ6C8eVbQE4JBCAIqxyKpWypJ6kipMO1sFdU0n2MuAmJ/71YZePkT5G+hjpY6SPkT5G/uQXLjYSbawke0sFUHgDd99AXQ0mIBKJREwnjDYTtoIrEokYaycdJQ8ciRNoMKLNZmOBjMUgSOTzeXtjRZMaCoWM6Umn08buIGFA70nBHyxGNps1lg+5BZ9PEAYsJalSqejNmzfa7Xb2/KlUSvf39+r3+xZsU6mUMZ3okUOhkGnZV6tjO1Dmk2AgOG2v11OlUjEjgG2jG9JgMFC/31ehUDCNda/XM+YB8BiNRqZ1DgQCyuVyqtVqxtzhxLBKs9lM0+nUjp7ptPPll19ah6hcLmcGnMvl9PDwoGfPnhk7CyuWzWaNmeHInkCBLRDsAZVSqXTCvjJIkWN52A8A8ObmRn/7t39rbAXOAzDzbEhMCD7YoKv/pWUse7DfHwuMJ5OJ8vm82XK5XDZ7S6fTZnOr1Urff/+9Op2OXrx4YXUCJFRID0ajkR2jw7y62m5XVuN5xxao5XJZpVJJvV5P7XZbT58+1WazUaPRULFYtDWJx+OW0BCYpUctcyqVUi6XU6FQUKvVMhAGZNfrtdlyJpNRPp/Xw8ODZrOZMpmM7QfBOpfL6eXLl9YxCmkJwZC5MZPJxBhGghyAn0wmzXb5/Ewmo+VyqXfv3qnT6ej6+tokS8hNqAtg6CTJVygUsjoFNORII2hn+8knnxijO5lMlM1mtd8f2/ZOJhNJsq5X2PRsNrMkCJ8m/vEdi8XCYirBHrAOBoPG/mFLxB/kESS/SHUAa3f2EAw7unbYbsDYPT1xYz2A5l9/+OVjpI+RPkb6GOlj5MeBke+7fvIL1+FwMAkCR62e55mT2xf9lzPwBs7C8Ps4BsfOsHB0/MlkMmZogJRbREhQ5u1/vV4bQ4d2lU2DUeGNPRKJ2M+iS14ul8rlcrq5uTHjJsC4Cw+TwPEpC85bNPcXDB67St3d3SmZTFpRJ2/l8/lcDw8PFhgArUKhYIYJMPGcHJMSiEajka1hvV5Xp9OxLjKuVj0cDms4HKrdbms6ndrAxX6/bxpunAIQjkajJ606YXe4JxydoETHoVQqZTMm/vzP/9xAOxgMarFYKJPJmGym2+3a7AWO1inEZlAkjA4Bk+JW9MBIV2B8cBiYMgKEJJtBcXl5Kc/zDOCCwaCGw6F124nH4/addM7Z7XYajUb/jTXu9/v27OPxWPV6XYVCwRKJ/X5vwy3ZLxKt4XCo8XisbDarTqejf/u3f9Pf//3f61e/+pVevnx5krhNp1PV63VJsuP/aPTYOrXT6ahYLFrLZ1i5UCikt2/fKpFIGDNYKBTs+/P5vNbrtR2vA+L4RjQatdkwh8NBjUZD7XZb4XDYvi8cDqtSqej3v/+9FebiqwRrupiVSiXV63V7LrTy2Pd6vTb/LJVK+td//Vdls1ldXV3p66+/NuCHvUaGgfQNuUo2m9VgMDAGHnulCL1YLCqVSimbzRpzDMAgmXElQS4LttvtVK1W9fDwYMnFD2U5gB1+hZxCeuxyNB6Prc0zAEZyQBIEk0o8pZMb7DcAE4vFTGbBYNt+v3/SRpqTB9hJYtQPE39inX/9v18+RvoY6WOkj5E+RvoY+bO8cAEIkmxjYRw42nfZHd4KOdZjccLhsB3rRqOPE+U5LifYs6kwd7AGPLjboSeVSqnRaCiTyVig4Lt52wZUYOdgDikahJFwwZA/+/1eNzc3xnrAMnmeZwP6ttutFZGu12vV63XrWCPJjquz2axisZh1Z6HTFEDLmzZGmUql1Gq1FAgE1Gq1rOVmMBjUs2fPlMvlFA6HT0AdKQgBn+8kGBYKBUnSYDDQZDLRer3WkydPJMkYFIoR6X4DawqLFYvF1Ol0bE8oCOaZsIlCoWA6/nQ6bRpsSaYNHo1G9u9Mfd9sNjZXZTAYGDARsLEJOh/BmtCRCdtByx4IHFvJSjIZy36/15MnTzQcDk33TSE1rCv2yR7BWFYqFU0mEzuev76+1qtXr9TtdnV2dqZ2u63Ly0tVq1UDfJIc5DvlclnZbFa//e1v9fz5c9VqNZN50I2Jn93vj9r7fr9vx+9IiABOwC+bzRoD2mw2dXl5qVgsdvLcMDcwkW6RuyQD/0KhoKurK5MjEJxev35tLBuA/PLlS33yySfGJJ2fn1sB/3a7tY5cLnvL719eXloBfbvdtmJ9AjJae/TqJBgkK+v1Wvl83oplKU6XdBJg2+22arWaJb4kJN1u1+oVrq6uNBwObX9JVorFokleSqWSdWFyGd/9fm8ymGQyaQnEfn/sNkULXU4viK+e55nt7nY7O/lIp9M2h8llWgFnfC0cDuv8/NxkH67sgpkqxHAKwYljgJcb3/3rwy4fI32M9DHSx0gfIz8OjHyfCiT4xRdf/OHI8SPXP//zP3/BhvHmiTFyAzAtMFuwfOjYKUzj+Dwej58UxUYiEet4FAqFLNgS+NlEWJFgMKh8Pq9Y7Djsjina4XDYutuwAYCYe/zIkTrggRNxpFkoFEwPi2YZzSfAgzO4syXOzs4UDoeVzWYt8BDEJJksIxA4dmDJZDKq1+sn2mzuBTkBz3ZxcWFrh/4+HA5b4KAQ0GUdkJHQbYm94LhWOupTG42GgcFgMLBWqCQJzNGA4SEoLRYL21fPO85nSKVSevbsmRVFT6dT09yTfLjyE0AD1iiRSBjLst/v1Wq17HNub29VLpe1WCw0nU6t2BWmhv0GSCneff78ufb7vb755hsLOAAQ3akKhYIVfMJwbjYbO+JnfZPJpOr1+gmTg+RhMpmoWCyq0+nod7/7nem6CR7olgGMYrFoM0dggLbb43wRANTzPEuCWq2WYrGY6cvxQUBnPB7bXBhm0kwmE2Orut2udSqC8T4cHgvupUe5EmtHMvDq1Sv772+++caeDYYb2+L7iRmHw0Fv377VaDSyQBYKHQcfttttSdLFxYXu7+8Vj8d1dnZmnZTcmEGiiH9hi61WS8FgUGdnZ+p2u1bwTytgkgB8djAY6PLy0upPOJlIJpOqVqtWsE7tiXQMtAzDBIyojUG73u/3NR6PjbkjTrC/SDhWq5UFeII890ECBNCQPFEfhA8jj+H3aCMeDAZNrgGzKMl8mUR7v9/b2iyXS/MjgKnb7T588cUX//cnAcdHdPkY6WOkj5E+RvoY+XFg5Gw2069//ev/82NY8LOccKEVdt/waUMJ08YxOYwXb+0YLEd0nueZw6LTlmSOBDPosnCbzcZ03NFoVKPRyIr1Op2O6XoDgYC1xYTVCofDxlLQKcmVFvA8u93OnB6W5XA4FuFOp1NzbED1cDhYYILZQtogSZ9++qkVp15eXurm5sbe0NvttmmLJZnjAQLISSKRiHK5nDl+vV7Xw8ODSUtgzzD8/f44yG04HNoaEpiRcRQKBZslQWcZCn63260uLy9tLzBSjnLdPeZ3Q6HH+Sw4wGw2s7aaME/SkflttVr/TbpAYEHTTF3B4XAwNiSTyahWq2k6narVaikcDqter6tUKun8/NxYjk6no3A4rFqtJs87tohFv/zw8KA//dM/NakPDDAyjOvra00mE3NUbIej9kwmo263a0Wf7IHneQbQjUZDn332mer1ugUI6RikAVGKqJFbVKtVk6gAIvl83vTLsM2lUslkPSRwJBjRaNQYs/l8bp2k8B+O59frtabTqQ0X3W631nYWP4DdIvCsVit1Oh2Nx2N9/vnnxkySMFA8i+Tkyy+/lOd5+vTTT1UsFi0WdLtdYyipiWD9SPiWy6VevnypSqVizBmsFBIpgiAxxPM8dTodYzhTqZQlFdgY9nc4HKyAnCYCBGvWbDgcajqdKp/PW7tmEs1YLKZ2u20sOUw+Her4uUajYYkc0hw3BiIlo9U1f++CDLKI9Xptwd7zPEWjUSuEj8fjVnyMvbmyKewYGyWGYZcAIwmEf3345WOkj5E+RvoY6WOkj5Hvr/D6Ay/eGCmkw5ilx2M2jqVdAySgcfRJEAI0OK7c7XZ2vMl/u7p2jo4xKOlxnkQkchwgdzgcrC2qeww4Ho9NZ4sMYbFYaDKZaDqdKpfLWcels7MzY+VwAPeIGd0wRahuIXIoFLIZFgRx1gd5B1rtzebYCYWgBFuwWq3U6/U0Ho+t0BCtLF193Ddx3uIxFAKcW2AK49TtdnV/f6/t9tiOlbf2brdrR6zT6dTmbtzc3FjLVUBBkjkqDBKdYgKBgLGXDw8PpoVmfThWDgaDGo1Gdt+5XO6kxWqv15P0ONcGBiUWi1l7UjpJBYNBjcdjO44vFAoGfAQAAkWxWNTFxYXW67W+/fZbayNbKpX02Wef6fz8XNPpVL1ezwIEumyCEC1vYbv2+73prGGLJOnrr7/WaDQypjISiajX6xmQcrTvsoxomPGL2Wx2Up9A0kNh6Gw2M200uvV4PG76+sFgYGzoZrPRd999Z4XervSFORkEROyWJJFBh8+ePVMgENBvfvMb7fd7a9cbCAR0cXFhUqjpdKpEImFT6PG3i4sLff7558rn8zanYzweKxwO2wwYgpzneQYso9FIqVTKgBtwJEbs93s9PDzo7du32u12xl49f/5ckqyQPBaLmf3SGpq1BjzcOhPYOWxgt9vZvBvAPRAIKJ1Oa7fbaTgcmp58Op3a/gWDQWsrTMxwWVyeYzKZWEEyzBpyJphV7IR4RvLMMyOfQYKBX3LPMMkAsCTTqbux178+/PIx0sdIHyN9jPQx8o8fI7GzH7t+8gmXJHMuWDmCtasH560RoOHNluNXmAn39wAiOo4QvN2jfx7Y7atfKpXUarVsM2AccF408HwGm5TNZm3AIoWj5XLZukBRxEtwxdAY9MYRLYWUgAMMGxvZ7/dVrVaN2YpEItZRJxKJWEek9frY7hSDRCIxm81svkOlUjGmdL1e6/nz55pMJup0OiqXy3p4eNBqtbK2vBgOQZzf4/iWY9tUKmVv+xxfw4pMp1M7Lue+CJTb7Va9Xk/NZtMAjRaynU7HgjnFijc3N9Ymdr/fGyMC28qROvu52WxMH4/jFQoFpdNp3d/fK5lMGtsIe/Xq1StNp1Nj9NzkBeZitzsWdm42G2UyGdMEU4QM+FQqlZO2xuv12vYkHH5s00ois9vt1Gq1dHZ2Zoxuu902phafyWazarVaGo/HJ7ULNzc3FtR4hnQ6rU6no1qtZkEEEHUZ7vl8brKC2WymZrNpgfhwOBhjnE6nT2Q5kjQajcyG0NnzuYHAsUB+MpkYw77dblWr1TQajdRut22uiiQ1Gg0Fg0G1220lEgmdn58bc49EAiYd2UKpVDIGnsCMHCKdTutwONaXUJA8n89t7gqtnYkTrPezZ8/06tUr7fd71et1i0HEJvabhKBerxuojsdj1Wo1ax6Azr5er1tzAdao3+9bLNputxqNRlosFubHrjxovV6bHIwZK7vdcbgpQZ0EmJcd6lxIxpCPUGfAyQRxmD8Mph0Ohye1L4AI811IlJBbSMfkwQUZ//qwy8dIHyN9jPQx0sfIP36M7Ha7/yMO/CwvXLw9SjK5AMHFZcoOh8cCYIp4ARfeImEOkDDwpkkA50geo3a16553LJpFM8zbKp/JhaQiEokYiHG8ydFrOBw2VgDGbbM5tjrle2FPADj3/gASt5NUIBCwWRgE43K5bG/IaLFdtm48HiudTlvgo+AWAKV1qXRktHCkq6srtVotPTw8WFtejlJhXlxmD4ddLBY6Pz/XYrGwWQeAIAwl09QpDgZIKXJMp9MW/Dm2PRwOBsS09U2lUsa+8T10vUH7jx6dQJdKpdRut43tg4UMh49zT0ql0slQSxhAJAIu40XR98XFhbEpFxcXury81MuXL80pA4GABSecG/vieWFzCHTdbtcGFcJIYzPh8LE7E4FpuVwqkUjo008/NaZ6tVqpUqno+vpaNzc36vV6ev78uTFY7MdqtbJ5NgAhCQJ+xXE6bYvZe3TiBKf7+3vV63UL/CRELitG4HSfgWBXqVRUr9f17//+75rNZiaXgXllH5FEjMdjZTIZJZNJDQYD89NGo6HD4aBisah0Om2zSIgRFNhmMhk9e/bMkiykEEhGttutJSPMGSKO0IIYfTj1DySJ+/3eCrUZTOrWdhDckSuwr7CL+P9isVAsFjPQhV2l8JcYQYLCM3IKgQ1RP0X8IVFjb4ghsLGsJdIb5t3AFuNnsH98DkwtwEjsRcrxPvbOv/7ny8dIHyN9jPQx0sfIP36MfC8O/GQkkaxYlIWANePBMUR+hpt1j9kJJpHIcVYIuvJ0Om3ODyvGGyUP6XmeFRput1trqZlIJCwwY9zM45BkAYU3VRga/g0tMtrYfr9/EiD5/lAopEKhYIDC0TCMAuwhx/M8QygUsuLRTCaj3W5nk7q3260BG+sFo0jBM8+52Wz04sULvXv3zoLYYrFQr9ez+QjL5dLYAhga1h/Q5pg5kUjYcbc7V4UOMNPpVJVKxcC40WjYkTNGTmEqoNfr9cxgk8mktXaNx+PG0EiyjlDofmFWo9GodVZCG08rU7fjkMvKhkIh5fN5O1aOx+Pabrf65JNPrF0vwZB7n06nOjs7U61W06tXrwzUCGDFYtFYTQIVDB+aZqQmAEgsFtO7d+8s0FNXQGF7LpczP4L1y+VyBkJISwjOX331lcrlsg0/RF+NTxDwsDUYokAgcNKCdz6fazQaSTrKmfC12Wymfr+vs7Mze/7VamW2xnR7/IR5NZ1OxxIZjvylR40z9wBDxn7B3sOIYdtv375VrVazuohut2t1JMPhUNvtVtVqVdvt1gaawoZOp9MTrXgqldJwODwZqBoOh/Xu3TvTwdO2ORgMms4dW4YZk2TDW0kiXfYabTuJEUCOxGq/39ugSlopS4/1N3QII3Zix9gcyXcsFjO2HX92Y2S5XLYOZCTLFI3ncjmbQ0KjBF4CSLphxrkHEhPu178+7PIx0sdIHyN9jPQx8o8fI1154Q+vn6wPwXExDv7wpsqxvGvoSCFoB+s6Jm/iMAQwFQQQ3rRXq5W9HXM0yKLwuXTQYVFgl1gYtwUkjBhv5TgQulNAkGF2vNlSbMnzwCIAPMhIYAR+//vfW/DFeAjABJJgMGhOQ7BHR0+XJknGkr5588bewul+A/C47CRv4qwnBk2gQrcMIIbDYQsIkUhEtVrNCoslmeOx/8Ph0L4XHTtH6YVCQf1+39gXOlfBmiIP4Kga4IFFwJ4Aj2g0qmazqXg8bqCay+WsRqJUKpkDuV1tXKnHdrtVpVKxYZlIcl69eqV8Pq9nz56pWCyazAHpDdIaSWafMLLUTBQKBauvoICXRAY7RHqRzWZNxw6rFovFrOYAECNYAuSAK1IeAg+fQwtk5EHYNwXZgUBA+XzekgLkO/l8XvV6XfP5XPf39+r1euZH/JxbCyHJ2E0YHthd2Et3lgxANJ1O1el0VCgUTFIEa83/Yg+TycSSwO12a4XR7roTPwiQ1JnQehqmeblcWjE7XaNg2RaLhUku8PVsNmsMNvGNYA97iiwFnbwbeF1tOMkvNg37zP2T4LCX1GPAVKOxJ8klxvDzyEhcNhnWn0Sf56VbG/UNrlzQ/Tz2mHjkXx92+RjpY6SPkT5G+hj5cWDk+1QgP/mFy33T5Kb4QhbJ1Ui6b4guU8ZbZigUsqNLjJ+NZIFgSHAaCuwAEY4GCYrFYtFmGNC2NB6PW8En7S05ZnSDKQXFOE8oFLJuQPl83gpWXd04OniOmt2jTN6ah8OhGSngyZH+bDaz41ECFcenFHi6hrjf79VoNFQoFOwomEJc1o3gPJ/P1e/3NRwO7TMxrnA4bDp51hMNcjAY1O3trQWzaDR6wlC5etblcmkab46ckZjAWBwOB2MUXbnFZrMxUPU8z+Z7kJSMRiMlEgkbYtnv91UsFq3gEyYVppHjb1ePy9E7yQ6SFZKOUOg4/DCVSun8/Fz5fN6kHev12jrmkBTxe7Cfu93OioMJAiQTAFmlUjG2zgXWaDRqkgtYmkAgYIkR7GoikbCkx7Vb6Viv4c4HIdHjHvFJgI92uqvVytquEjhJ1ggwHKkDXuv12nTzMJkAred56vV6xlBtt1urO4hEIsrn85JkGnBYTJhW1oyuS0yvJ8ARe5jzQic0l2WTZHUvSAmi0aglkrScXiwWlnTBzFF4L50OakQCxTO5sgLkTbB6JK4EbQroAUDiIUAO40e8YQ3wYYAIWye28D3sMTpy6gj4LiQ7rLuQ3MdaAAAgAElEQVQbo3lOEibWDruSZCylf/3hl4+RPkb6GOljpI+RHwdGui9lP7x+FkkhD4pBsdE8NIwdgOMypSyQ9N/fGAkifDYPRVDgD59JgAIE9vu9stmsnjx5ovF4bMwUOmeMmA3mHkkqAEV0nhxh73Y7K3jEgbgPjsNjsZgdVx8Ox5a7MDObzUbD4VBnZ2d2nIoswNXJugwFBsDxOKwULM5XX32lSCSicrmsZrNpMg7acB4Ox0JFgjvPyPfhgASvZDJphc7ZbFa9Xk+bzcaOWvldagFgTWCU6vW63QcMR7VaNZsgIKFXxwFZAyQAu91OjUbD1kiSHSlns1n1+31VKhVjMvh8gjxsSCQSMVkDaxuLHYdPoi9Ht42DvX79Wp7n2bA9WCc+EwYGBhY75TMIYrBWMG3D4VC1Wk29Xs+Yle32OOgSZo6EA3sheJBcUNwpyTTrADOyENi1ZrNpdowfwTrDDAcCAWNmuXcYTu6dQlY38aNoGH8G7AD9Wq1msiG3sBhZiyTTdlNbAgO53W6tpe1qtVK5XLbAmE6nzT5ZF+QIAJabvAJQgAXsPnUOsISwXdgTdSW73c5kELCX0pHRYnZJr9czoMC2WacfAjj7AYCNx2M7hXBPQ5LJpK0VdgVokCCy9/wbBemuvAyfp/B/vz/q74mVbvLtStI4FUG68z72zr/+58vHSB8jfYz0MZL98jHyjxcj33f9LHO4MEAWAmNzgcX9edgaSScPgENKMmaERWMD3DdLPo9jyP1+r36/r4uLC0Wjx0FqGBDtPjeb4zyFTCajyWRi3VLYLDSevOW7nZ/4b4CDDccIABJ6+u92O2UyGWOJkEHk83mbGs90dHfmAwGdz6bbEIDiskwwaJvNRs1mU8+ePbPOTJLMoAKBgHK5nAGL53k2rA1WE2ANBoMqFArWmhbw5bv4GRgNSSe1CAwJBNhhOw6HgwE6LALP4X4GYIO0gC5UABza2c1mY9KQq6srjUYj049TyLvf723/YUSRK/Az2CTyAoIyLKbneSqVSlZTEY1G7f45fmZ93WeipmEymVjyIskCmfvdBC7POxZDj0Yj04kXCgXd3d1JkgVRSdbBiEDgBv58Pm+BlWJwWGBA1fM89ft98zHuCRaRPabDD3UFrCOBkbk4JDlcvV7PNPkEfr6bfQkEAhoOh8ZYAjYu8xyNRlWtVq2FbqVSsUGdrB8yKbdYnqSOPSIJcBl1/BoJBLUK+AS2ht/w7OjDsetoNGrPRj0NCZt7eoGdo20H2NziYRhL/I2A7sZMmEvYWOInrHQgELDYwz3wzMRWPou/I1l3mUB+BukKNuxff/jlY6SPkZKPkT5G+hj5MWDk+0jJn+WFS3rUnrt/5/63eyMuM4MTACq8yaKf5WEJ4K7cgs1dLpcqlUrqdDpqNBoqlUrGyHBMfjgc1Ov1jIFbrVZqNpva7XbGxABUFDjHYjHbbIwSo6FAlLdz97kk2fH34XCw41xa58J08cx3d3fGpMDW4VQuo4gchM8MhUKq1Wq2Xv1+X3/yJ3+iSCRibMVkMtHhcDDmhhaj+/2xRarbAYjfIyAkEgnV63WNRiPrYoTMg/3E0FxWAAckiBwOB5vzstlsTD+8Wh07QBGocESAHRBCYkHrXIAX+0DL7B71TyYTSxzcRMWVdhC0KORlFgMSkO12a7UI3W7XAJdZEewDdklw5DN5Nob4EeAJSG79BQkTOnwSkvF4rFwup1QqZbYPSBCwOF6HCV+tHlscDwYDrddrm7nC52ezWYVCIds/umMhd4B5xvZ4VnwEIMnlcha8kDrN53MVCgVjdxOJhP07rJgb0Fhv9u5weGwEcDgcB6fe399rs9koFovZOlMQTWyggxkgQUvqUCikYrF4ot2nO5erNwdwJRnjS4JDnYZbwIzdMZcEe4Y1JTEFJHnW7XZr7GkoFDI5GPvjSo/wI3yYZ3DjL58Jg4f0iVjF2rtgz/fyu+7piisBkWTJuhvT/esPv3yM9DHSx0gfI32M/Dgw8n3Xz0JX8sUYOn/Hzblvh7x5AiSwVPw7hXwAkau9xQklGbvC98DGjEYjvXr1SqlUyhydQmKKA0OhkAUbOjOlUil7Q5d0coSN4RMEc7mcJpOJGo2GySY4ToUdwBnpXBQIBDQajazL036/19OnT7Xf79Xr9bRer60dJRsMW+eCAUfwFN1SsMqwvFQqpZubG+ue9ObNG7t/2CYABe2+WzgbDAZtcjfgSGErTAoaf6ak49wwKAQX9orj60KhYAABk8lROUHB1XgTPEKhkOmOATXXztDVl8tluw/pkeXADmEgIpGItW6FYaEmAJbP8zwLgi5gwZi5Nk/iARjw7DC4i8XCagoAdMD3h8HlcDjOU0kkEqpUKlZkTeEugWcymZjeWjpKOLBZ/ANwpBsRiRfAtN0eC6Lv7++tmBq5AgwSbCqMVzqdNp/E1lkTgI0kolqtmt7aZYIkWbLEHsBOA1gE+XQ6rUQiYbIBJDDo0rEN1hCWPxAIWOF5JBIxQIdhg+kjEeB5CdhcLlPmJkMULWOHdPmiTbZbDA4TGgqFTGokycAcv+SzXP074O0mrcQkAA0bBVT5HhdIeEZsGd92WUViC98B4HAP/K5/ffjlY6SPkT5G+hjpY+QfP0b+r55w8SAwIe4NuW+l7sXbKAYtyQpc2SAcWZItBP/O5xFkcc5oNGoD45rNpi4uLjQcDhUIBIzx4fiQ76SrDMzT4XCcLO55nnU7oktROHycO9JsNnV3d3cyRI833cPhYB2NCCZsANraYDBoBbmVSkWtVkvD4dAYA1iJwWBgTuWyVi5AT6dTa4lKofR+v7chgchF2A++H0kAQA5zxhE538F8Ba5kMqlisajBYGDHwgRMggaBhjXB8VOplPr9voEI7FYgELDAFwqFjDFDsgLTSOIQi8UM8DKZjLVinU6nJmvA4WC0sBGCpQv82ByFv6yjew+wawAZzgsLyXH2fr+3IMbsjNFopIuLCw0Gg/+/vS8Pk6o6039v7XtVV1XvNN00WxqMTKBZFW0WERTBUdGZmMw8M0GcRY2OCxJjBqNxJo6aZBLzB3mS0SQ6Y3xUxolKVJbEDcUAAiJgoBt6X2rp6qqu7lp/f/Tv/bjdQgPSRIHzPk8/SnfVveeee873nvOeb0EsFkNxcTH6+49mEOMYo2qZy+WELHhkTrU4FAqhuLhYsnzps05R7bFYLOjq6pIAc16T/aRXq1lng5mIaLSTyaTMLf11SfocQww6djgcMBqNogLS752gKsRxZzAYRFnl3Ovr6xPVjgsCji23243W1lZ4PB4AkOtwbnNucV7Q3QaAjGESP8meBJHP52WBwLHBRShtD0lS72rBtnOOer1eGRt0MQmHw4OMMf9LVyMuQLjIADAoaJh2Ue8awXFLktOrxbQ3tB/6e5J0Oc/0C3zOF71t1rul6Z9B4dSgOFJxpOJIxZGKI88PjhwOI1r4mB3JB+Wgo+pAI8UXwt0hXRS4U6R/rN5o8iiTHaUnFKfTKYpWdXU18vk8YrEYcrmcHBvTDz0SiYifNo0RDQ3Bf/NFJZNJ+X1jYyMOHDggR9h8eXSloP8vfY6NRiO6u7vlCJWBlX19fWhqakJHRwfa2tpkcNFVgxOZxovHouwTqlEMuqRC1dPTgylTpiCVSqG1tVWUOioAHHgckDx2526d1+U7CofDotoAEGPS1tY2aMJz4cDPABDF0Gg0SgAr3zX7x+/3y/31R8w0iFRsqU7GYjG4XC5RhmgYaDj4Of2k5/EzXQYYZ0AVzGAwiM86j8OpxtHg8D3kcjlRYemSozcqHo8HXq9Xsv8Eg0GZE4WFheju7hbiYKYfEjKNDJUhvl+mg6ViXFhYKCppPp+XY3e6zZBY+U5J7FwcOJ1Oqd1B5VHTNMRiMSSTSXGpcLlcopqSdEmUzPAEQPqexEh1lM/A8Ua1k6RrMBgQjUbFRpAEuJijCwRJkIvPRCIhxVH1BEmFFIAQAReAzc3NMkdpeGlv+vr6UFZWBr/fj+7ubhnXVFip8pE0+Qxut1vaT/tA9wwSBdVEqpRUhvVByLSFJFT9s3IhQyLjHKMrCV2s+Nz5fF4URPYLbSPVVc5PvXLHucc2c8yynRxrw6l3CseH4kjFkYojFUcqjjz3OXJYHjhJvhgW2WxWJicfWO96om8wj+uofpAc6LvNyczr8BhZryjpXRj4wKzoHgwGpZiZ0+mUnT2PLbPZrKgevDc7kgOaPshUPGw2GyoqKhCJRLB//374fD45mqaCw6NoAOKrygBRGhn2VTKZRGtrK8aOHYuOjg60traisLBQ+ouGWR+ozuNoPVHRf7miokIGeSKRgMfjQVNTkyyQUqmU9InX6xVSofHis1OtotHXp+9lulWz2YyGhgZ0dnYODKD/ryxQVdArCkajUe6XTqelzgfdIKg8sWI77wtAXE5o+Elw0WgU3d3dGDt2rAxuk8kkahmJTV+7hYuQoUbV7XZLlXKqNSRfXjsUCiGbzQqpcdzyOtFoFD6fD5lMRgjHbrfLIoaGkQsL+jnTTUc/rjmOSIT9/QN1Qnw+36DnonJK/3kuAKjIMaOS3s2B7gh0U2HRUwZ3k3xcLpf4+WuaJvENzITEe/C90/AARzNBsago/0aXCcaK0N2AY4oLSKrMXBzwndIu+Hw+KfbKucGgcfYjjT8Xo2wrXU8cDoeMfYPBIGREe8V3TOPucDjkmUnoehcGEjmfh+4v9IMnMbBtNOYkWADyPFyY0NhzLnGesl+ohA61hZwjHCMcyxxrVF/1qrpeWdQTHwmfNsvlckn7FU4diiMVRyqOVBypOPLc58jhcNobLj4wd/g0SnrwwfkCuNvl72iI+CL4wPpjPP2OlrtnAOIrTVWCfrnBYBBOpxOffPIJTCaTGBfuiPVH+wCEvDRNk0nMCV1RUYFkMonDhw9L7RAeTxsMBhlQrGXBIFiXy4VQKASDwSCBkjyKTyaT8Pv9iMVisFgsCAQC4iPPyZ9IJAZNMh7du91u9Pb2CqHQHaG1tVUMQUtLCyKRCILBoBhKAFKHhHUhOGk4kO12uxgYHqfSsDB4WNM0uY6e4D0ej7gUaJom2aVaW1vR19cnPuEMjOUEZB0Lt9stim5PT4+MHQZcdnZ2wm63IxAIIBaLIZ/PS0pYAJLmkwaZz5PP50XpoxHSK6I0CvpjZgDiomA2m6UODd83JyoNKI1ELBaD3++XSvMcy8yuRFJgnAIJiOou4xTYPwxC5nixWCw4cOAAxo0bJy4jzMhENYsBv1x8JJNJ6U8e6zc2NiIQCIhhaW9vRz6fFxcYvlsaKCpNdGdgTEMsFpNMP5lMRvzXA4EAQqGQjFWqYtlsVt4Jr693xaAy6Ha70dbWJv7lHo8HHR0dQiB6YmPbuBClneCxfygUQlFRkfQTyYnzn0H/vD9VZ96LixCOf96vra1NVEOr1Yru7m4hb45dvSJNm0G/dMaE0J2FpAoMqNBc3OkXMHT74L8Z2Ez7y0U9+7O3t1fayMUClWX60JOIqXZyEaI/PaGNVi6Fpw7FkYojFUcqjlQcqTjytDdc3OFxR8oJo5/Q/Lve+Oh3iHzxnOA8YuVxN9UwAPLAdMEwGAxipMxmM3p6elBQUAC/34/6+nr09vaioKBAiIJtZGfrXQf0Rom/pzLT1NSEgoICIUQOAP33jEYjiouLkU6n0d7eLultTaaBWgwGg0GKMjLIs6OjAyaTSQKYacj5O/rP610a+FKpsOmLJFKpbGpqkkHvcDhQXFwMl8sFn8+HlpYW9Pb2Shpbs3kgeJUB1DxizWaz4uaQTqeFJBnUSJWRAaZUOuh3rGmaGHwe+8bjcSGMTGaghkRhYSG6uroGqRs88o1EIjAajXA4HLBareIKYbPZ5JifRi4ajYrCQYNCA5ZOp+UInePRYDBINi19ACdwNJUzx47dbhcS4JF7JBIRw0GDQlcD/UQ0GAxieGlYaAD6+/vlexyHDFYdNWqUkC37hsGsVqsVFRUVQo5sH40E3S7ommOxWCS4mAseBndT4aWiSdXJZrNJNh+qhTQysVhsUGCspmlob28Xu2CxWERJ58KDhioWi4lLEhcKmUwGHo9HFlFFRUUynvQuHHRN0rtk6RVQBgsz3qOwsBAmk0nqo0Sj0UH2IJcbCJi2Wq0yBhhrwPEQjUZlUUPbQdvG0wAWGtW7YGQymUGKMk8DeCJA0uI74++6u7tFsafdZP9TpeV/AcgY4CkIlUYSJV0w2J96dxQ+D+cF3xPtOgC5Hm2DwqlBcaTiSMWRiiMVR54fHHlGN1xsAJUMviA+qN71QX/MSqNFBYrgRLRYLIN8JUk49IXn76kAsDp9Op3GqFGjYDab0djYiJKSEgns1bSBLEhUongdTjwqhlQHGbBbX18v6TR5L96bCl4mk0E4HIbX6xXDGYvFUFVVJek/eUydSqUwYcIEuN1u7N+/X8iSGXUYUDyUNKl0RKNR2dlTOQkGg0IoLAjo9XolkJZparu7u9Hb2ysGjeoUFTqqAHTz4N+prFHZpNLAI3VOQL0LCY+m9eREtYnuAVTq+DxcHLB9BoNB/PtJIlyIZDIZhEIhIWVOKqbyBSBB1CQWqnR023A6ndJOEgMAUUdpWPUTyW63o7m5GfF4HKWlpWLoNE1DIBAAMBD8HYlEEAqF5Djf7/cLCdlsNlEgOf71xOR0OmUBwraYzWYkEglMmDBhkHuJ0WgUZYqGls9rsQzUV6GKTLUbGDCUDEoOBoPwer3i9x4IBMT3uru7W+aK0WiUtNFMCU1yKCoqEhJqamqSxQndk+gOQuLt6uqShSNVKhI8a5JQeY7FYmJYaRtSqZSkV6bhdjqd6O3tlRgUALKgiEQiovTT955KVzablVgNzmmSQjKZlLTLVOFogDluGXfA0wuq6nw+2jaORS7MuAgjUVBlBiDvlYocYyBo3GmnNE2T/rVYLGLvuBD1eDzSR1TsueAn0VCp5+9pb0lw9FNX+GxQHKk4UnGk4kjFkec3R45IlkKSh16B07s0ABikzPHIX08y7FgOQKoCdEMgWbGTObBoQGlo9LUAGPTLoz4aNk7CoW4IdKnIZgcqx5OgPB6PqE5Mq6k/tqQaRUPlcDgQDAZlx+xwOFBZWYk//elPGDVqFPx+Pw4fPiwBegw0Zj8AEAPV29srLhp2ux1er1d8z3O5HAoLC2WX7/F4hBRpWPk8HR0dSKfTCAaDYlCMRqOof5xkNLJUaegy0NPTA4tloGBlOByW90bw/XCipVIpdHd3izGju4beDYZqC5+XihxJju43VF8YaMp3ro9dKCgokMnS29srxQqdTuegMeBwONDb2yspSvnOeA8eiVMl4335vHRHSaVSsmgh+erHYSqVQkFBgag1+v6Ox+ODAr3j8bgULNQf9bN+Bw0cYx5I/CQwqrterxeRSETefTKZRDqdlud3OBwyJrhIocEKh8O44IILZN75/X40NDTA6XQiGAxKxjBgwChSAeL/02BzrhcXF6OxsVHcJdi3LM5JdxwuMkh+drsd4XAYyWRSshgZDAYUFRWhsbFR7s9FD79PhZiuWVTrgsGgKOL0x2cK6lwuh0AgAKPRKOoxF4305+d4Zh9T7aNNonFPJpPo7u6Wtgx9No4huopxgUJ/c7pR6W0ixxkXX3QzIYnRVvKdklwJuntwvtHW6BcvfGd03+CYpLIHDCxQONbYfoWTh+JIxZGA4kjFkYojzweOHA4j4lKody/QH6txR07Dyx0xlRdOED4MFRsaaE4YHqFzx0wlg2TA66dSKbS3t8tOk5PRaDQiGAwiEomIsaf6QP/bdDotaT7p0kB3Abox0GByR03FymKxCPlw4ND3nMfyXq8XNttANfqysjLs2bMH/f39QlAkX7vdLll7mC7TaBwIrK2pqUFLS4sMFE3T0NHRIZXk6YLBCUiCDIVC0DQNZWVlsNvt6OzsFEPL98VjXY/HIxmG6CfNNhgMBpnoJCmm/dVnaGEQNtUiGlAGVfN90ZDS4Hd1dQGAZEPipKKqSENCdxIuHpgeVq+E8dn4fb5f+hdzIcL36vF4kMlkEIvF0NvbKy4hHIfA0QJ68Xgcfr9fxg0NFccqDRXHDwBEIhEcPHgQNptN3CSKiopkbNBtgUaEKgvdKJgxSdMGAmrpC81Ad5fLhVgsJu5CenVb34clJSWw2+1obGwUoxoMBsUlIRAISIYnujlUVlYiFotJtXsaGbaXRpdjNhKJYMKECaLCMZ0uXW4YKM96LnQXiUQi8Pv9iEajsFqtqKyslPkFQPqZ/tXRaHSQ+4JeaaU96OjoQElJyaAFLg0v7U5LS4sofVxIcNFDNxW2l5sOqnNcgJEouajgvKebjtlslkxOvAZPJTif9H7oNPK0DZnM0QKYdMPhe6XdpfsSg6+pPAKQuAP2D+0e3SL0xMa/sb0Gg0HmOduhcPJQHKk4UnGk4kjFkecHRw6HETnhYnYhNo43pdHm7pHKAI9zSTS8DicnH8BoNMrgNRgMg47K6UphNpsxevRoCTzlzhsASkpKZGfN63PHSgNhNpulo/jy6eOZzWYRDAaFmEgwVAb5vHwJJCgaX6oW/C4H2JEjR6RYXXl5OUKhEIxGI8aOHYu+vj50dXXJIOJEYkYf+hXTb17TNBw+fBgVFRUoKiqSjDWBQAB9fX2iYjGYlEoGB2EuN5BFKB6PS2V1/aQkOBipitBXl8addU7oYsGJz75n9iUOdA7Wjo4OUf1cLpcQC5UcTiAOcC466OPP/u/s7ERRURFCoZAojvQfZ0FEZvyhomgymSRTEOMYGHzKdtOw0V2Dbi9UQTneOeY5Gfl7TdNkQo8ePVrcVeifzkxUVAtzuYE0zSS94uJiZLNZUQG7u7sRCASEROjTTb9uGjoSWTabFZXzyJEjyGQyaGxsFII1mUwIBoM4fPiwZIzinKioqJCUx2VlZTCZTJIRqr29XRaLnB/hcFjGfyKRgNvtFneRZDIJr9c7KIaFBp5qI6/J7Gh0iXA6nejq6oKmaRJA73K5UFRUJP77JMN0Oi1jk+p5S0sLSkpKpF6LnoB4QkCbwZgQjmO+Wy6m0uk02traxKgXFBSI7UulBjJ9MZscA/ypkNPYk9D47CQHqo/0+6d7Be8NQEhP71bFBT2VX9pUxhdwAaJPU0yCyuePZnwigZFMLRaLjGt+l/dXOHkojlQcqThScaTiyPODI9mOY2FETrh49Ks/LmWwLTCwy+dRInfFVHroG0rC0TRNdo36z/C6BoNBBqR+EUBDyHSb7Kyuri7U19fLYO7q6hrkD55MJhGJRGRw2Gw2MRj5fB6RSATjxo1DPp9HS0uLqDrcdfMaDQ0NKCwsRE1NDbLZrBST5GSiUkjyjcfj6OzshNVqRUFBgbShublZVD/6clPJiEaj0jaXyyXH/u3t7YjH4ygqKkJbW5v4vtKAZzIZ9PT0oKKiQgYYYwTcbjd6enqQSCSQTCbR3Nw8iOzpegFACB2ABIpS/SEpsR/j8bgMeA5AxgtQMaTbhc/nkwnHoFS61uTzA/ViUqmUZL4Kh8NSG8ZisYgi2N3dDa/XK8+r973leLJaB+phsF1UuDRNkzS+Ho8H+Xx+UBB3QUEBwuEwUqmUkAoXFuwfAKIS8nibvs0AxDBTvWTwKlVFs9ksCwCOb2BAUWJhwmAwKK4iudxAgCn7mnOOClkymURXVxeKi4uF0OhWwcVER0fHoMruDKqne47RaERHRwc8Ho/UwgkGgwAg/twMZqU6ZjAYpO+oMPL5qbjq3XLi8bgc5VN9DIVCiMViKCkpgdPplAKlBQUFEiuhL1jJRUIymURLSws0TZOFk9VqFdWbBp8+5Vw00deebdGrdsBRlwH9uLbb7UgkEhKQ7/P5RN3ngognDVRXqTzSzYexByRYo3EgroUnBZlMRjKWcSFHO0gFkmOSsQyMA+CCQh9IThvNGBO6b+hdOfRjWp9KW/95hZOH4kjFkYojFUcqjjw/OFLvRjwUmt6H/LOgsrIyv2bNmkFHgPzhYOKDUYU71rGbnmj0fuP016SSxPZyR8odMR+aHctBRMPKbD083qSySEWRR+E0+vRHNZkGCg9y8FosFvG/5n17e3sRjUbhcrng9/sHGeBMZiANqNlsHhQY2dPTI6RrNBql/kFvb68YP6o5LpcLpaWlSCaTUpySWXEMhoEsTDabDaNHj0ZLSwvGjx+PtrY2IVoOGr/fD7vdjmg0KsqL1WoV1waqayRs/QIhm82KQql/J/pgYRpF4KiSy/dts9kkswyJgP/Wq6A0SLwfB3A+n5caFfn80exOnKRG40BtBbaf7zOXO1q/Q99n/KHxo2uO3i+eR+fM6sTJzTHHY2q2h+OFSrXRaBT/bR6Ns5/1ag2JjosvLjqo8HDik4hk8mpH/Y6pxgGQNK10MWFQJ/tB78rE5+eijsG5XCQxuJSuDel0Woy2/jm4eKOSpo9P4Xii8aPaT7cQ+j739vaKewjnH0mTY4WuNOl0GrFYTJ6P96HqBhwNsCah0aecizW+T44dziW2k4sMuhNRMeY1+NxUH9n3emWci1IuYuhzPtRXne+C9lLvMkE1Ta/0cbzy32yvfm5omiYxNjwpoU3WtyGdTsuCj/dlnAvHC/+Wy+XwjW9844/5fL72ZPhBQXGk4kjFkYojFUeeLxy5du1aNDQ0HDOY67Q3XLW1tflt27YdvaB21Jf3VKBX9BQUFBQUvpgwGAxqw3UKUBypoKCgcH5g+vTp+OCDD4654Tptl0IqLVQxuAvUqxp66P9NBYi7T72CNxI4VYLiZ/XPoW/zsZ5n6HdHqu36a57oukPbdTYQ8/HaOPQ5h35mJPv3z4HhxsypXAM48RjQQ9+/n/X+Q8ff8eb0Z4X+WiczFo7XvuONmZNp58nOkxMtks/E/B/p6+uVvRNdayTf8/kOxZGDv6s48uSgOPLUrgEojjxe+xRHnvw1Tpcjh+uvEUmaQforhpQAABkySURBVMMbiUSwa9cu5PMDmYSqq6vh8/nQ2dmJ+vp6lJeXo6KiYlAcQDgcxscffwwAGDduHEpLS4/7kCdjJId+5lhkdrxr0F0AOHrseaxrHOv7PHo/FZxJw3g2GN2TaePZ8BzDYSTa/1n76XTvPdSA6slEf20e03MhONyCS/8zNFZF/zn9onK4+TfUMOrbeqy2HMtI0i1C/3Oy4P30ftv6hejQPhvu2voAfP0z6f92sm3kQp2fZTv019XbboUzB8WRA1AceepQHDly11AcqThyaLvOBEcOd98RKXzMRn788cf42te+BgDwer2YNWsWVq1ahX379uHf/u3fsHLlStx8882SjrS7uxvf/e538fLLL8NoNKKmpgaPP/44xo8fP8gf/VgDQv+C9INz6GAFBr+IoW3Wo6urC5s3b4bVasW8efOk7sOxnvVYbdD/fqiKqYf+O2e7sVQ4t8GxyyDrZDIJl8slWY6y2YEq8qzzQl/x482bdDotgby8TiZztMghv2e1WiWm5FjIZo9Wqrfb7YN8+xmDwsxnvAb9xZlNi+2JRCISKK0Pjj6ZvtG0AV//rq4uZLNZSV/MNuqDw/VFQ4cuePULWf6NwcTZbFaKkzIOZzhVmzZHH0/T398v2ZhIIgyoH/p9hZGH4kjFkQrnJhRHDt83iiOPYkQ2XEQ2m0UoFMLUqVMxc+ZMPP/888jn8/jSl74kGXa4qzQYDHj++efxzDPP4LrrrsO4ceOwZs0a1NTU4JFHHpEsPqFQCGazGUVFRZK1hLUUfD4f3G43Ojs7JT2owWBAc3MzNE1DeXk5+vv70dbWhnw+j+LiYkkn2dnZKVXr8/mBYMwdO3bg0UcflcDeqVOnSu0HPfL5gcxMDOItLi6WQRUOh6U+g81mQ1dX16Dg20QigWAwKLUnFKEofFGhJ4Bdu3bhlVdeQVNTEyZOnIjrr78excXFePfdd/Hyyy8jkUjgy1/+Mq6//noUFBQMGte8Tn9/P3bs2IFXX30VHR0dmDJlCpYtW4ZIJILXX39dMjWlUinU1tZiyZIl0hb9gi2bzWL//v3YvHmz3HfOnDnQNA2bNm3CW2+9hd7eXixYsABLliyRFMgdHR14/fXXsWzZMhQWFiKZTGLTpk149dVXYTAYMGPGDCxfvlzIYLh5ybakUils2bIF69evRzabxZQpU/DVr34VdrsdH330Ef7whz8glUrhK1/5CmbNmiXXHopwOIzXXnttUErwCy64ABdeeCE++OADvPnmmzAYDKitrcXMmTM/lZ59qCtEe3s7nnzySUyfPh3z5s3Dm2++iXfffVeKmFqtVlx66aWYNm2askF/RiiOVBypcO5AcaTiyFPBiG64mNVm3LhxuP322xGJRFBfX49gMCi1Rvi5/v5+vP766zAajVi9ejWKiorw3HPP4cCBA8jlcmhtbcUvf/lL7NmzBy6XC0uWLMGiRYuwb98+/O///i8aGhpwwQUXYMWKFdi4cSN2796NO++8E1arFQ8//DDGjRuHm266CS+88ALeeustZDIZXHzxxbj22mvR0dGBxx9/HIWFhYjH4yguLkZhYSFee+01HD58GFarFY8++ijuvvtuzJ49WzKeEAcPHsQvf/lLfPLJJygtLcXVV1+N6dOnY8eOHXj++efR1dWFWbNmYerUqdiwYQMOHz4MTdNQUFCA1tZWXHbZZbjxxhsHFQxUUPgiIp/Po6GhAT/5yU9w6NAhjBo1Cj//+c/h8Xgwf/583H///bBYLKipqcG3v/1tJJNJ3HHHHRJrQuRyORw8eBA/+MEP0NLSgrKyMvziF7+Aw+FAdXU1WltbkUgk0NzcjA0bNuD222/HkiVLJH5Fr463trbi4Ycfxu7du1FUVIT/+7//wx133AGj0YhHH30UpaWliMfj+P73v4+qqipMmDABmzdvxquvvopXXnkF8+bNQzAYxM6dO7FmzRqUlpYiGAzihz/8IXw+H6688soTGlgq9Hv27MHtt9+OsrIyjBkzBv/+7/8Oj8eDWbNm4cEHH8Thw4fhdruxYcMGrFmzBvPnz0dvby/279+PYDCI8vJymM1mHDlyBN/61rcwbdo0FBUVSQ0Zm82GNWvWSJrr1157DQ888ABmzZol9rW4uBilpaWS3SuXy2HdunV46KGHcOedd2LBggWIRqNobm6GyWTC3r17UV9fj7KyMiEThT8PFEcqjlQ4t6A48vj9ojhyMEZ0w8VjukwmA4/HgwkTJmD37t3o6ur6lN9oIpFAU1OTFFoEgNtuuw1erxfpdBpPPPEEfvazn6Gurg7bt2+XQojPPfcc3nvvPVRUVOBXv/qVHEW++OKLmDt3Lux2O55++mn88Ic/xBtvvIF7770XXq8XALBx40aUl5cjGAziN7/5DcxmMyZNmoS6ujoYDANV50mI+mNWvV9nf38/HnnkEfz2t79FXV0d3n77bTQ2NuKuu+7Cj370I+zduxdVVVX4r//6L+zZswcffPABuru7paBgOp3G7t27sWTJEhQXF49k9ysojBj083X37t3Yt28fVq5ciauvvhr/+I//iN///veoqqrCoUOH8NBDD+GrX/0qNm/ejF//+te44447PmWIqQDu378fd955Jy6++GKsXr0a7733HhYsWIAHH3wQZrMZP//5z/Huu+9i/vz5x/XD7ujowPbt2/G1r30NM2bMwL/+67/iD3/4A7LZLKxWK9asWYNcLoe/+Zu/wdtvv42qqio0NjZi586dUoQ0k8lg/fr1SCQS+NGPfoRcLodVq1bhjTfewKJFi2A2m4clFJ5APPfcc2hvb8ezzz6LiooKfPTRR1i/fj3Kyspw4MABrFq1CqNHj8ZDDz2E7du345JLLkFnZyd+8YtfYM6cObjmmmvEJcJgMGD58uXw+/0IBAKoqanB1q1bYbfbce+99yKTyeDee+/F1q1bMXv2bDQ0NODJJ5/EZZddhssvv1xS2r7zzjt48sknB6Vg/su//Etce+216O3txdq1a5FMJjFt2jQAyp3wzwnFkYojFc4NKI5UHHmqOCMR0/o89Xqy0Af56StD099yy5YtePfdd9HX14dnnnkGU6ZMwWOPPYa77roLEyZMwJ49e7Bz504sWrQI3/3ud1FdXY0NGzZg/PjxGD16NF566SX8z//8DwoLC7F06VL8/ve/RzgcRllZmbg07NixAwaDAU6nEwsXLsQjjzyCm266Cddffz3+6Z/+CWPGjMHEiRPxne98B7W1tYMUNpLghg0bcNFFF+Gxxx7DP/zDP6C0tBTvv/8+3n77bSxYsADf+973MHbsWOzevRuJRAKLFy/G5MmTMXHiRFRUVKCxsRHxeFwtdBS+8Mjn81JksrS0FD6fD5MmTUJ9fT3S6TRGjx6Nd955B8888wySySQWL16MdDqN/v7+QT/JZFKKmFZVVaGoqAhVVVVoaGhAV1eX1OrYsGEDysvL8ZWvfEXqceivk0qlUFJSgrvvvhtLly6VehwOhwPhcBjl5eUoKChAdXU1ysrKsG3bNpjNZqxcuRJz586VehpUE/lZr9eLmpoaHDlyBKFQSJ79RNi/fz8CgQCqqqpgtVoxZ84cHDp0CFarFatXr8bChQulbo3P5xMVf8mSJfjyl78sfvKHDh1CPB7Hli1b8Oyzz2LdunUIhUK48MILcd9996GyshI7d+6E2+3GqFGjAAAlJSVYvHgxJk6cKCcMkUgE//Ef/4GioiI4HA6pn8TaIqFQCG+++SYmTZqEyspKAGrD9XlAcaTiSIVzA4ojh4fiyKMY0RMuYIAw+vr6sGPHDmzduhV+vx8lJSVSMK2lpQUWiwU2mw3jx4/H3r178cknn8Dj8eCpp57CFVdcAaPRKGTECtChUAjl5eVydJrJZKSIXFFREWpra/Hiiy+ip6cH1157Lfx+vxR3mz9/PsxmM5588klEIhHk8wMFAufMmYPp06dLZzudTqlw3t/fj6amJhQXF+PAgQPwer0YP368PCcnTDgcRmNjoxT4Y+Vt7vwNBoMUUzQYBqpcUyFUUPiiY6iCZjKZUFJSgnA4LIZx586d6OnpQSwWw+zZs/H8889j165dsNlsoujb7Xa0tbVJsUaHwwG/34/u7m4kEgkYjUY0NjZi165dWLp0KSwWC3784x8jHo8PytjmdDpxww034Otf/zr27t2LX/3qVwgGg5g3bx6efvppyTrk9Xrh9/vR1NSEXC4Hm80mxUOHPhf9tQOBAA4ePIienh6UlJScsF8ADCqoaDAYMGrUKEQiETidTvz1X/81tm/fjmeffRZVVVWYM2cOTCYTfD4frrrqKvkeMFCA8pJLLsGiRYvQ19eHW265BbW1tbjlllsQCATwu9/9DuvXr0dRUREmTZqEfD6P0tJSlJWVyXWy2Sz++7//GwcPHsSDDz6Im2++eVA6cQDYu3cvWltbcfPNN4vNUrbozwvFkYojFc4dKI48fr8AiiP1GPGkGalUCu+//z6am5sRjUaxcuVKGAwGJBIJvPTSS9i9ezeAgeJgV199Nd566y3cf//9sNvtcLlc+Nu//VtYLBbceOONWLduHb75zW8iGo3C6/Vi6tSpqK+vx6uvvoqdO3eiqakJ11xzDUaPHo3LLrsMzz33HJLJJK644goYDAbMnz8fL7zwAt555x3Y7XYkEgnU1tYin89LlWg9SkpKUFVVhffeew/3338/ZsyYgauuugrf/va3UV1djSeeeAJOpxNLlizBiy++iNtuuw1/+tOfUFVVhYULF+Lw4cPYtGkTDhw4gEgkgosuugjbtm1DKpVCX1+fqBishq2gcDZAr2JlMhm0tLSgtLQUH374Ierr6/Gd73wHY8aMQX19PZ566in8/d//PUpKSqSSPDBgLMPhMICjKngoFILf75fg+K1bt6KnpwcLFiyA2WxGMBiE0+kcNE/tdjuMRiP27duH73//+wiFQviXf/kXTJw4UYgEGEi/HQqFUFlZCZPJ9KnnYGICkkpfXx86OzsRCATg8XiknScCr0GiOnLkCAKBAAoKCrBjxw5JbnDbbbdh/Pjx0DRNskcZjUZYrVbkcjmMGTMGd911F2bMmIHW1lZYrVa0traiq6sLPT09Eouzfv167NixA5MmTUImk0EqlRL3rra2Njz11FOw2Wyor69Hf38/tm/fjjfeeAN1dXVIp9PYvHkzHA4H5syZczpDQuEzQnGk4kiFcw+KI48PxZFHMaIbroKCAixevBg2mw1lZWX4i7/4CyxevBg7d+7EvHnzxEWAO82ZM2fi4Ycfxvr16xGNRvHAAw9g4cKFMBqNuOWWW+D1evHhhx9i8uTJWLp0KaZPnw6/34/CwkIcOXIEV111FZYuXYpAIIBZs2bhuuuuQyqVwsUXXwxN03D55Zfjsccew8aNG5FOp3Hvvfdi0aJFiEaj+PrXv47JkycP2sUHg0HcfPPNCAQCaG9vR01NDdxuNyZPnoyKigrk83lYLBasXr0aFRUV2LNnDy6//HKsWLEC06ZNQyAQwEsvvYQjR45gxYoVmDFjBqqqqjB27FiUlJQgk8kgHo+jq6vruJlYFBS+SNA0DWVlZTCZTGhra0M8HsfHH3+MUaNGIZFIIBKJYMyYMZgxYwasVit27dqFefPmYe7cuYOuk8vl8MYbb2DTpk04dOgQRo8ejSNHjqCiogJ+vx/JZBIbN25EcXExJk2aBJvNhhtuuOGYqV1jsRieeOIJ9PT04Fvf+hZmzpyJXC6HiooKvP/++wiFQgiFQmhtbcWyZctgNptFSaf6bzQaMW3aNGzfvh2HDh1CNpvFxx9/jBkzZiAQCJx0/9TW1mLjxo1oaGgQ15HKykpkMhk88cQTMBqNuPvuuzF16lQhtebmZvznf/4npk+fjuXLl8NsNuPHP/6x2ERmcbPb7Vi3bh3279+Pxx9/HPPmzcOLL76IhoYGaJqGjz76CE8//TTq6uqwcOFC5HI51NTUoK+vD9u2bUM+n0dTUxMOHTqEhQsXIhwOY8uWLaipqVHuhJ8TFEcqjlQ4t6A4cngojjyKEdlwsUETJkzAD37wAxiNRlHj7HY7Zs+ejYkTJyKbzUpaRofDgWAwiGXLlmHGjBlIp9MoLi4WI1teXo5//ud/lrSyfr8fRqMRU6ZMQVVVFZLJpOTz1zQNfr8fq1evBgAEAgFomga3243rrrsOdXV1yGaz8Pv9cDqdcDgc+OY3v/mpegIGgwEzZ87EuHHjkEqlEAgEYLFYcM8990iAsKZpqK6uxq233opoNAqbzYZgMAij0YipU6di7Nix6O3thdfrhd1uR2VlJSwWi/iJ5nI5ZDIZGbBqwaPwRYQ+feqFF16IyZMnY926dXjllVfw4Ycf4r777sP48ePx7LPP4p577sHkyZPx4Ycf4oYbboDNZhO/cSKfz2PKlCmYPHkyfvrTn+KFF15Ac3MzrrzySgQCAezbtw9//OMfMXfuXBQWFoqN0JMJ586mTZuwfv16TJw4Ea+//jp+97vf4aKLLkJdXR3eeecdrF27FrFYDHa7XRaW+XweyWQSiUQCwICbw/Lly/Gb3/wG3/jGN1BaWoq+vj4sWrRI4mWGcyPgvF2xYgV+/etf46abbsKECRPQ2NiIW2+9FVu2bMFrr72GKVOm4Le//S1efvll1NXVYe7cubBYLKiqqkJhYaEQ3JQpU3DffffB7XZj//798Hq9WLBgAT766CM8/vjj8Pl8AIB4PC7+6U6nE2PGjIHf74fBYEBJSQkeeOABpNNpOS25+OKLsWzZMuTzeWzduhVHjhzB3/3d38Futyt3wj8jFEcqjlQ4t6A4UnHkqcK4du3a07rAunXr1q5atQqapsFkMsHr9cLj8cDpdMqu2WKxwOv1wufzyY/b7RY/bY/Hg4KCAvk8f6xWq1yLD81iZG63+1OV7vXF5giTyQSn0wmv1ytpd00mE2w2G8xm86cqf9NP3ePxSPE3h8Mh2ZkIq9UKt9sNp9MpPqA8tmTbeBxKf3yr1Qq73S6F2RSRKHzRoWkanE4nqqqqoGkDdXSWLl2KZcuWYdy4cfjSl76E/v5+9PX14corr8Stt94qxVCH/rhcLlRXV4sv9/Lly3HFFVegoKAA4XAY8XgcK1aswJgxYwbNS/38zOVyqK+vh9FoRElJCVKpFFKpFMrKynDRRRehsrIS8XgcPp8PK1euxKxZs2AymWA0GhGNRuHz+bB48WKxCTU1NYhGo/B4PPirv/orXH755YPs0Ing8/kwZcoUdHZ2wmg04oYbbsBVV12FpqYmmM1mFBYWSjBzVVUVqqur4Xa7UVNTI0HEAFBTU4OysjK0tbWhpKQE99xzD2bPno3KykqYzWbs2bNHYm+uueYaOBwOeDwe1NTUYNSoUbBYLDCZTHC5XLIIfv/993HppZeirq4OACQ+4MYbb0QgEPjMtUUeeOCB1rVr16475S+ep1AcqThS4dyF4sjhcb5x5M9+9jOsWrXqgWOOldPNL19bW5vftm0bgGNnLNGrACeL431nuN8Pvf/JXmNoZ56orafathNdS0Hhiwx9IcVkMolUKgWbzSYV7rPZLBKJhKSbdTgcgxZ4Q6/DIP5MJjPoOplMBolEAg6HY1C1+WPN7WQyib6+vkG/47V4/VwuB5fLJWlgAaC3txd9fX3w+XyD6nH09PSIWqhfoJ5s3+RyOcRiMUm/zar1/f39g9pot9tht9vlO3rCYjpsJgzgYhsAEomEqI4ulwtOp1Ouqb+O3gblcjl0dnbC6XTC6XRC0zSJj3G73aJQfhYbZDAY/pjP52tP+YvnKRRHDv/7E11LQeGLDMWRJ+6b84kjp0+fjg8++OCYXxrRDZeCgsK5B72B0mcW49/0gbj6tNbHuo7+Z+hnhxrGk7kGwWuxPfrf6ZU/+qbra6gMve+pGFi2Y2gfHK+N/NuxwPbp267vF97veC4OJ7MwZpHM04HacJ0aFEcqKJzbUBw5fN+cTxw53IZrxNPCKygonFugkR5qxGi8TtY4HUvVG+76w13nWAb/RO3Rf4/31DRt0Oc/i6I+9BrDtXG4exyrf4f2y/H68GShYrYUFBQURhaKI0/cHsWRasOloKBwEjhVw3imrnOizw3392OR2OliJF2ehiPIkbqegoKCgsLIQ3HkZ2vP6V7rbOJIJXcqKCgoKCgoKCgoKCicIZx2DJemaZ0ADo9McxQUFBQUvuCozOfzhZ93I84WKI5UUFBQOG9wXH487Q2XgoKCgoKCgoKCgoKCwrGhXAoVFBQUFBQUFBQUFBTOENSGS0FBQUFBQUFBQUFB4QxBbbgUFBQUFBQUFBQUFBTOENSGS0FBQUFBQUFBQUFB4QxBbbgUFBQUFBQUFBQUFBTOENSGS0FBQUFBQUFBQUFB4QzhtDdcmqbNGomGfF7QNG2mpmnFn3c7Pgs0TZt1Frf9Ek3TCj7vdnxWaJo2+yzu+0vP8r6fe7b2PXD297/CqeFs5kjFj58fzmaOPJv5ETj7bfTZzJFne98PB1WHS0FBQUFBQUFBQUFB4QxBuRQqKCgoKCgoKCgoKCicIagNl4KCgoKCgoKCgoKCwhmC2nApKCgoKCgoKCgoKCicIagNl4KCgoKCgoKCgoKCwhmC2nApKCgoKCgoKCgoKCicIfw/kWFA/Wu0KJ0AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOx9ebxkV1X1OvXqdaczJyQkBAIhJjIqIjP4McmMCCICn8woyCwKCEKYEUGRWWSIgIg4MAUhIHyCgAhBGQVMMAlJCEkgA+mETne/96rqfn/c2vVWrbfOrXrpF+hh7186r+rec/bZe5+99zp333NvlaZpkJSUlJSUlJSUlJSUlLTx1PtZC5CUlJSUlJSUlJSUlLS3Ul5wJSUlJSUlJSUlJSUlXUOUF1xJSUlJSUlJSUlJSUnXEOUFV1JSUlJSUlJSUlJS0jVEecGVlJSUlJSUlJSUlJR0DVFecCUlJSUlJSUlJSUlJV1DlBdcSUlJSUlJSUlJSUlJ1xDlBVfSbk2llHNLKTtKKdvo3zFXk9djSylf2GD5HltKGY7lurKU8s1Syq/N6HNwKeX1pZTvj/udPf5+xPj8uaWUi0spB1Cf3y2lfJa+N6WUb5VSenTsFaWUd2+kfklJSUlJuweVUn67lPKVMW5cVEr5RCnlVzaA77tLKa/YIBln8hrj11VjPS4opby2lLIwo09V91LKS8Y8H0rt++Njx5FcTSnlttTmhFJK/hht0k+F8oIraU+gBzRNcyD9u/BnIUQppV859aWmaQ4EcCiAtwD4h1LKoRUemwB8GsDNANwHwMEA7gDgMgC3paYLAH5/hkjHAHj43AokJSUlJe2RVEr5QwCvB/BKAEcBuD5avHngz1KuXaBbjHHzLgAeBuDxtYZz6v5jAC+dceH2YwAbcmGZlLReyguupD2OSimHlVI+Vkq5pJRy+fjz9ej8Y0sp3yul/KSUck4p5RGllJsAeCuAO4wrZFvHbTeXUl4zvtv0o1LKW0spW8bn7lpK+UEp5bmllB8CeFeXXE3TjAD8LYADAJxYafZotGDxG03T/E/TNKOmaS5umublTdN8nNr9OYBn1y7cxvRnaAGmdiGYlJSUlLSHUynlEAAvA/DUpmk+1DTNVU3TrDRN89GmaZ4zbrN5vFPiwvG/15dSNo/PBZY9a7x74qJSyuPG554I4BEA/miMjR8dHz+mlPLBMc6eU0p5xvj44WNeDxh/P7CUclYp5dE1Xl3UNM1ZAP4DwC9dXd3H9C8AlgE8smO4vwHwi6WUu8ySKylpoykvuJL2ROqhvfi5AdqLlx0A3gwA4214bwRw36ZpDgJwRwDfaJrmdABPwvhuVNM0cSHzKgA/jzbZnwDgugBeRGMdDeDw8VhP7BJqXFl7HIAVAOdVmt0DwL80TbNtho5fAfBZAM/uaPMhAFcCeOwMXklJSUlJey7dAcB+AD7c0eYFAG6PFstugXbHxEl0/mgAh6DFuN8B8JellMOapnk7gL8D8GdjbHzAeKv6RwF8c9z+VwE8s5Ry76Zpfoz2btQ7SinXBvA6tBj7HsdrlmKllBsD+D8AztoF3QGgAfBCAC8upSxW2mxHe5fsT2bJlZS00ZQXXEl7Ap1SStk6/ndK0zSXNU3zwaZptjdN8xO0yZMrViMANy+lbGma5qKmab7jmJZSCtqLqD9omubHY16vxPQ2vRGAFzdNs9Q0zY6KfLcf3zHbCeA1AB7ZNM3FlbbXAnDRnHq/CMDTSylHVs4HwLxwvFUxKSkpKWnvo2sBuLRpmkFHm0cAeNl4x8QlAF4K4FF0fmV8fmW8m2IbgBtVeN0GwJFN07ysaZrlpmm+B+AdGGNj0zSfAvB+tNvj7wfg966GTl8rpVwF4HS0xcW3VNrNozvGcv0zgEsA/G5Hs7cBuH4p5b7rEzcpadcoL7iS9gR6UNM0h47/PaiUsn8p5W2llPNKKVcC+DyAQ0spC03TXIV2P/iTAFxUSjl1XEFzdCSA/QF8NS7o0G5L4AucS5qm2TlDvtPGd8wOA/DPaKt1KKVcv9DLPsZtLwNwnXmUbprm2wA+BuB5HW0+DuAHuHqAl5SUlJS0+9NlAI6YsX38GEzvrDhvfGzCQy5atgM4sMLrBgCOoULnVgDPR/v8VNDbAdwcwLubprlsTj2Yfnk8/sMA3A7tVnyMX4YRuPkIzKc700lo7/bt5042TbME4OXjf0lJPzXKC66kPZGehbYyd7umaQ4GcOfx8QIATdN8smmae6K9sDkDbWUOaO8IMV2KdjvizeiC7pDxg7yo9KnSeJvgkwE8qpRyy6Zpvs8v+xg3+1cA9y70BsIZ9GIAT0C7raNGL0ALhvvPK2tSUlJS0h5DXwKwBOBBHW0uRHuhFHT98bF5SHHufADnEC4e2jTNQU3T3A+YbJ9/O4D3AHhKKeWEDl71QVv6J7T6vWh87L6Em3+H+XRnnv8P7fbEp3Q0exfal1w9eF5Zk5J2lfKCK2lPpIPQXihtLaUcjvaiBABQSjmqlPLA8QXNEtptE6Px6R8BuF5svxu/5OIdAF433ouOUsp1Syn3vrqCjfe3n4zp58CY/hYtmH2wlHLjUkqvlHKtUsrzSyn3M/zOAvCPAJ7RMeZnAXwbwGOurtxJSUlJSbsnNU1zBVpM+ctSSuzyWCyl3LeU8mfjZn8P4KRSypGl/YmRFwF475xD/AjA8fT9PwH8ZPzCqC2llIVSys1LKbcZn38+2gurx6N9wdN7yurbAZXXPPQqAE8opRytJ+bUXekFAP6oNtj4Tt+LATx3nXImJV1tyguupD2RXg9gC9o7VKeh3QYY1APwh2grez9G+2zXk8fnPgPgOwB+WEq5dHzsuWirYaeNtyf+K+r72tcj3/1KKb+oJ8bbGe6B9s7b/0P70ov/BHAEgC9X+L0M4+0WHXQS2pd7JCUlJSXtZdQ0zV+gxbaT0D6ndD6ApwE4ZdzkFWhftvTfAL4F4GuY/xXofw3gpvSc9BDAr6F9Acc5aLH2ZACHlFJuNZbj0eN2r0Z78fU8x2tO3b6F9tGA51TOz9Jd2/8HWlztor/H/M9TJyXtMpWmyd98S0pKSkpKSkpKSkpKuiYo73AlJSUlJSUlJSUlJSVdQ5QXXElJSUlJSUlJSUlJSdcQ5QVXUlJSUlJSUlJSUlLSNUR5wZWUlJSUlJSUlJSUlHQN0bw/JFelUkr1rRulFACAezGHO1dKmXzv6uvaz3su+CrpuHGM5ah9dny79JglQ1ebq9N2PeT4sr57G/E87c16bjTtqq1m9dfztThbz3hBmhscz664nhXrtXFr1JUPrk4erOUm5TEP70q7S5umObLaIWmKEiMTI/dkSoy8epQYObutjlujPQ0jm6axAs91wVVKuX3TNKfVzi8uLs4lmJ7r9XoopWA0GmE0GqHX660xymg0QtM06Pf7aJqmEzy6xnLgsh5nHY1GUzJzv9FohIWFhcn34NXr9SbHSikYDoeTvgsLCxiNRiilTOmpCVzl5jbcX2UPniGHs1uv17PjOvvo2ErcL2wUtmHeLMvCwsLEXrUgDT5hSwAYDodr9IrPs3SelTiYX/DSftFOZXbyq86qWxc/5cu2WlhYmJpjnUPl78YN2WKO4pzakvvyPCjP8G/2+5rONVJ/Zx/n/sPhcCrm2IbA6vzX8oLz8+jjYpj71xYgvV5v4pu1BK9y8hjqe7W4V3tx365FtOa4mm3Zf9SHmqbBysrKeWsG2IdpFj4CiZGJkYmRNXslRnqda5QYuXtj5MrKyhreEz2qZ4hmgYkKzQKxQ6oR43wIHm3V2cNpOJHXAledoZZoOeE5Q8bYLpkxhfxN02A4HHaOy0klnE7t5mwVckQCiX8KbMFH7TocDtc4JwMYj6mk52fNay2hqY7BQ2VQfm4BEgsLDVAAExvp8fje9U9lULuzPNqvazEVCyXnvwqINbn6/dXaSMzpYDBYM9duUcS2dP6oix5NJiwXz5ee41h11GVrXVTU4lhld+cATGJR54Ptz0lVj3GSZV+Nc7PmOWR0+uoxziG8OIi2YVft53ywBvYhW8xhAEm0Z/91ccrfk6ZpPfgIJEYmRiZGKiVGrrW/2joxcs/HyF3eUhgDaEJmAYPYAbocwR1XZ4vql1bAoi8rzQbjyoSbZB6PEz47eQRzyBH8uqoJLCdXCRzVErUmXq2yuMQbejl7qs7OoVhWll2Ph7NHgmNdw1Yqu46twMVyMwjz/EffWhVSEywvFDRwdI5i/hVAVDaWS+dG/UDJJcyoTLFeYVeWXeeMv9f0Yt/nWNQqa80/HHiyH3D1qmtuGUCdTdiOKqvqqfLWfIfJ5R2OLxdLbpyFhYUJcHEeUoDmJN7v9y0wx9yGL/E8MR/1J45Ll7Nc/LPOtdhzn7vAJKlOiZGJkYmRiZGJkXs/RnbRhr00Ixxy1i24IKecKqAGV+fXJKu3d52MLIde4atRtV98jslW+aJKxrJyfz6nPPkcJ9rRaGSrb6w3kwYnJwl1XieL9uXKgrNHjMdtef65ounswrKpHpoQWZauZKfnawnX6R1BORgMpvjV2vPY0UZ9IKqNGrj6OfrwooT1Z9Bm3XgOWAa1dZx3lSBN2rqwUr61OeA+tXmPPjzH7rzKqDKFf2lcsU1YX7XFrLnkz+qfmuP0bgDbW9vF2BrnPB7bXHMBgKlqMI/pcqyzcRdIufju8v2k+SgxMjEyMTIxkvskRu5bGLkRL82wxg8ha8Gqwa4VKxcMrKAGZExOnI9zWtXRCg3zdzrwd6evO68gqm3YFi6A2elcO7avVtFYDq4iAKv7wVVflW9hYQGDwWCqAuV0rlVA9ZhWonQe2WYqH8+V2tUlHNUnbFCzM48d9uHKXbSr+TPziaqbAgf7Nh9n2XUu2Z+jQlRbbOn8aWKIPmy/Ll2c/6quzJPb1yr07IcK+I6vjslxrH7jqvaOt8sZQZpUnU+xfi7pB+/wATf/bB+2oeYn9cHgXevHOrDNXH5QH+DF8Sw7Jq2fEiMTI/V4YmRipM5hYuR0fx53b8HIDdlSCKy9ynMTz+c1MPSzU8ABUfxlA/DteWcAF0R8XCtpTm6WSZ3dgY+7+mfAYP3Z4dz+0TgeFS2tAqg95tU/xmE5+PZ3jbcDOj4XcnF/tqnycPbTc7WFAM+Znp8VDNG2K/k4wGbAY2Bh3+G/umXHJbdoXwML1V8Tg6u6uMTCD6Zz7Gqfeas4qlv4qcaIm5t5PmvC08WIJuuYC7Uh2y2IF28cl87nedERlTQH8pqzuD3PP+vDMT0L3FV/p5fbulPLjbwgd7mpa+6TuikxMjHS8U+MTIxMjNx7MLKLdvmCS52r63t8ZuW4MqHOMa8SNbm6EpSrPPF5lte10ytjN8nclvlq4KusfDXNAOcARe1bk1tl5CRfk5vbuvYuwNyYLCcnpdr2lminzs/9aiDOutfmVxOVVjlrFV9HzIP9iuXjeWf9dFuF8yMFJPVNta+eY50i3tin+LzqFLxURydPkFac1Bc1DpmXjqf+UkvUKrPy5b+8OJuVhB2xrzB4zVrARV+NIwdcNdlnyca2YR05/9QWgsFft/Q4WZLWR4mRiZHcNzEyMVJtnRi52ndvxsgNu8PFA+sVdJAaQwNAnaPWr8aLjztQm8VHeTjDA2tfFVuThcfViQ4+OoYLWMeXeTtn5sBRu6iutaCeJ9mwHEpsL5ZJk7Mmw2jPW2Y0+GclldptYhecLMto1D7Y7XxYx6/x5MB0c8B6cz8FL8e3ZuOYr1r8uW0M8bm2OHELHT7ngNKNx3Ne8xX1YfVLtaPKqeeULwOoVrNqyVN5a07h51t0y4bK5GJJ31alCxiVoZbMa34aMjoZtD2PX7PtPICWNJsSIxMjdczESKwZR+2SGJkYuSdgZBdt+AVXzfE0YQbFg5fsMOqkzlhB6hQ8bpd8akB1LL6lqeOWMl0F4XPqcF124Ilz42tf/quOobfeNdm6MeO7S8BcoegKrnm2JnBFq7aVpStZ8nGXmBRI1a4hV42/9tVkx4mTt6nEd+WhtmA5nI+7W/xdcjpd47MmSubvfKo2B5rMnd/WEk0tcdXiRcdmG3Ff9d2aLPw5tid0AaP2qX3W7+pjqg9/d3PMMauVXNaZqQbGaiMnc5fvzNI7aeMoMTIxUtslRiZGujYqS2Lkno2RG/KWwlowOaG0Db8NpuaA7NgcvNpOk6STR4NFJ73LaYP0Sp3Hr5FLbPr2GJVJvztZZjkyJxD+p7pH5UcBoivIagsH57DBX7eO1OzDidrp5249Az4RKbFdGRh0Lt3cqkxuywProDbXZNFl2xhLgV0rQdxe9VT5+bj+rVWOdT70n/Mpp4+OpQs51pHP6RuUHAiqTaO/+82TWcChcrlFATD9FiQXp7Vxw5YBMOq/2k5lmlePLjnU5jGGazcv36Q6JUYmRiZGJkYmRu7bGLlhr4XnAVVIve2tgKDKaNDprV43uTrxXUGh7eZdSHAwqwM7kFI9XSINni7YHH/WwwEP89C5cCAVCVXt6cZUqunt2qm+et7poQlcwYT5xW1hPe4WGRpMKocmDTeOtlO/47FqfsA21ASqSYZ5O7mVBye6GuAzqYxdoBeyMRDrD19qn3l8Q+2nv9nDnxlgnX1UFpc/FDAciKg9nE85G+o/tbuLJ13EuVzq7NflezymVpo1j7r8oTRvrkyqU2JkYqRrp/rq+cTIxEi1X2LknoWRG3aHq2uvak1JYPXNPEHOuWrB5wKZ+ToH1iCpJUkeP4LHAZVLKrUJBqZfO8uTzKQVoi5yVSoOcE7ICtZhn9iP7eaJdeE2NR1qCwLn0GwvpwP7hgbXwsLC1CtqtcrByVSrLEp8jPuwP7vKGMvD2xSUpyYstSVTbeGgcVIDQwcCXXHi7KDJmiuiLg6Ct3ueQIGf5XBJnHlEv9qYzMvpyxVC9cuwP+8VVx8O4n3/TlcGfvVtbqc2YL1rIO7mzIFArfLKvJyNXa5i+3eBfdL8lBiZGJkYmRiZGLn3Y2QXbcjvcPFnl+R1b6YzkirkHNE5DpMm99o+1xoQOd56XMFqlp6qE8vIe505uWjSYN2jLe+dr4GEk5Ftr3qWUtY8L6A614i3HKjMbI/aVgznH3yedW2aZlItUjld4nDJuWbf4M082NYst9pUk7HbSsEy6aKE22pCDNL98c4/nQ1mJSJto77MY6sN4rW5aluNcX31rNNB50LldcfdvKgd+Xc/9FfvGdScz3LC51gPHRSEdGFdyyHOL1j+mAO2scocsrh55HFj7mqLHc1NStF/PeCS1FJiZGIk2yMxMjFS5zQxcu/ByC7akDtcPBEqJDBdTZq3MuWSvH7XiVVDOQdnObitGlC3EHRVW7QPO5D+ZedUnVQf1Zv/1fryeZcM4jwHgAMVR5pg3dzwP7ax2rPLX5QXyx/ECcGBdpfsMTZXh3is0Wg09epWBSP2q1m+rADjKkRaXXSJKvrUwJHnpJZw+v3u+orzLf4Xeqvszh+0Hds42vf7/TXzrG+/0vO1ZOfmoUsfbeeAJM7xnHEOU8DQ+FC5XO5iO0YbfmZHfxDV+ZOLfZdDeCydW8ejK88krY8SI6f7JEYmRqpOiZGJkXs7Rm7I73CNRqOpvaTq7EHhUBHEfHXKV67a1ymsMnA7dXxOOno++rmr4VoQ8ZWwggTrUQuucAT+ZfRoo1sSlA/r62zNc8GO1uUEIXdta4JWF2ty8dW9Vky5rXvrlgv6GjiXUtY8TN3v99fYWCt+tQDjAGQ7OF1rc6G81Xb8Y4sqJ9tLkwXz4oWNjhP6uaQYtg6AZH04Jvh4zReYn7YFVn8k0v2oJceZyhjHNCaiX1QsA4CUQh61I1eVNb44rtmm0cdV/weDAfr9/lQOY9006fOCgMdTHfjNZJxbeM7cIoMrcqx7rdJWy826yAs+KkfS+ikxMjEyKDFy7fHEyMTIfQUjN+S18OyYoUTcntRkppMQx2rG0oTDE83Br06hARDHSlndEqAJQBOaVk9Y3lIK+v1+tcrDMnDCcYHEfV0yUyDVRNQ0q7dTWT6X/LsSjquwhdyqnyYjZyNuF+Ny2whs9RM3d6yXa9u1IFFwinnl8dUH2W6xeKhV3oDp33LQYFbf1rnhvzFOLAp47tQuvDhTG9Rsy34SxHPikjkf4wTDY2qCVsBRu6mNo52+ojZ4Apj8ACxTDbCUn8rQ9eplnRsex4GF9ldeLhaCP+cz/s7gr7K7+HA+r21rC9eohEcbjlWeHzd/SfNRYiSmdGQeiZGJkYmRq3ZQSozcezByQ57hYuO7yQXW7tnUhMbVv/jMgc5OrEkSmK68OJBQXnyO5eXP3F4dOQKeDR3Br2NEe+att1xZTk2aSgwI2j8qC2oztb/qzUlMk5f242SkfGLutJqmc63A5IJDAY159Xq9CZiz3O72sb41yS1a3Fgxp+zDOp9qBzdX3EYr1zVySYjBvtfrTW7dDwaDNYsvtWmAiKvq6EIj/L0rYXUl0Zo99JjmCK6s1XiyT9ZAgI/xcwxMvF/dAZ4D7ziuvtm1iNE+ysv5dSwkuuLMAUTNB2ugznMd3zUvB2lFM2l+SoxMjGQ+iZFr54rbJEau5ZcYuedgZG0MYAOf4QKmE6YKFdWSEEyTSfzVRB4Kj0bTe4b5dnmQcxx2GPer1RFg2kerL7zgCHkiGfFChCeZE4dLwg5wHUA42bgdX33rQ5dsA50bFwy1fqwzn9d/w+Fw8s8lNQVgrTxpEo3g4sobAyDbiBcTusBx4B633mNct3+bdXJ2DTl0zCB+FoIXCax3VJK4v/MBNzbfTg+9YixdWNWSAcsTNgmfCt113tmODObaxvmIIxdj3F79iGNGgc3FiOvv5OkCqS79hsPhmudvOL+ErKqz2rIGuCqPk5PHcL7keLs86qhmr6T5KDEyMTIxMjEyMXLvx8jONrsKoL1er4kg5CThJoAnUJXlpKGVDT7Px4K0rY7NY0SgaDWL29Xkdp9Z31pSVp69Xm9NcqrZLZIr/61tX2iatbfDGUzdHMV8cMUqbM2JnbeGuIQUY3BSCQB3wMX24sqSCxSWKyjab9q0aWou2YYqu9sjrlsSXDy4IJyn0q8Jj5OkW0yonvE5Enu/359s9WFbRhuuwgS5tyNx4mZ/cdsKQn71IY2BGIMXbDW7OqBwbbid2sqRJnudI553lY951/KU8nU+zX85sbvYUF+Lyrsbw+nJfGtjukWIs7+TyeVCABgMBl9tmubWawRLspQYmRjJtkiMXNsvMTIxcm/ByMFggKZp7CRs+AWXnJsS2vSd9Auj6C175tNFajQ+5pwjqGZAvQrm4OP2vP9VnUbH1jG0D7Ca+NRRFRS4j3Ocmr2co7LtXYIN0ipJF8Az8GmQR2IM+7EO0SaO88PNwZc/R5tIkAxQ3IZJgWc0Gk21Z73UP9h2Lgm7hFxbZOlc1AA0gELHU904sbgEzcRzqbZSQHaJLebVAbnqFd/jnCZM1lkrjqzbLBBw9uE3GNV0Uv90OYL5a7xp5dLFlwKjyq4go/HA8mlu4fmona+N6RYJLm9xXC0vL+cF1zooMTIx0umVGJkYqXokRu75GDkuHtgLrg15aQYDgAaTU4aEso7a7/exsrIyxUcrVy5J6LhsTJfU4zgnPHbISGr8A4KR6CM4OBA5IdecgW2h8gHTr3JVudnRmIdWbjThcxBzGxcADFIuEWmi4TnXYNOxou1oNFpTCdREoltMOMFrold5a0nU2ZsTgtpB/U4BQBMqP8PAPhLt2E5sF124sP3iXCxauPKrvqf685aV2tyw7d1ccV/2j5p/sk6ukqcyBOn4muDYPhqjPDYvziI2uK/TgWVUQHc5hOXQZ2lqWx64H+ebOM5VVvUz5w+sk+Y1bqsy65yx/bmNu7ugeiXNT4mRiZFsu8TIxMjEyGm+ewtGduXdDbngUidxiqkDR5BpIHFwuepfzZG4kgOsvmHFjRt/lXd81mqMm6zRaGRfs6pAwsdnkauEOB7s5L1eb41Dq8MzuUUTO7cDwBjHgaQLtvjMwbOysjIls7O7k6vWxs2H6qJtHBCrHAFobMuoNCqYxXgMZgEoMS+8vYHH4e0nXC3lsVmfSDThI/xdebh41LeF8RgagzW715Jh2EyrbhGDaoeQnwEyeGkyD93UxurnLJ8eVzvWFnla0XOxEvaq5RXHl8+rDdmOCsIKKgo0XeOoPVxc1BYCbAv2DQfASfNRYmRipMqYGJkYmRi5SnsLRnblsQ254IrBNFEHuYnhN9twnxBajc1tXYLXANCEXgM4NbhrrzqpLgyMOqY6kfLnKiHLxAnSJRdu45xG3xjVBSLBg+dAx1InrgWyG4vfEDQr4NWHdK5CBk7oEYC130HgLS5ObpZJg4sTmfLR/iEL+xH3UTu7uVS/ZrtwsnGLovBDByj6FioGTR6Hq2hq99Cb+2nb6M8Jkbd71AApbB2kCdC9/tdVmV0OYvtpjGui1rlVu/CcOb9S/+L5UPnd4kvn1uUdBV+d85oPsB/yyxPUJ9iW6t81gE2aTYmRiZG1sRIjEyMTI/d+jNzwO1wuCbOwzhG6ErUe4+DRCdQkE+eiSuAmNIJMqzAqP8vqAkz1VvsoqKhNuBLobBE2Y6Dl42p/l/CdgzHpnKj8tYdntWrT5QMxTvCJhyDZxqxLHOd51d/BUBuoP+lWArVPJDp+1sCBLN9+Vptp4gnfcnbkz6q3JlGWtZYMuaqofdXvdJ44yTLo87g8H+rrrH/01wpk2IIXFG4xUfMBHVN92+Udbhtg5sbk8Ryv6O/G1jl0OjpZ3RjMU+3rfJbjIr6HL2iVmecm+nM8KZCyvyt1gUlSnRIjEyNVX6bEyGk78ufEyFVKjNz9MbKLNuyHj93VLVcCakap3TrvCiqXCDVp8BYCHZ95ue8u8bATuO+aWGsJ2R1XcrpzMo/vzh6afLvAg9tr23AmlsWBnOqmAKH6qR/o72MEf03SLikOBoOpBz41iTFPdyz0qr3ViI8pbw5ilTn6KwBqBZP7qT+q78WY4QNRrawBgANhXXixvsxbgUDnhsdRgGQg1facD2p+75Kzzl2tOsU8Qm+eB12EsV4qly6QnM/X4sGBnPq9ju/O1/JRkMtpPK5uu6iR062mVxefpDolRiZGJkYmRiZGTvxj6LAAACAASURBVLfdGzGyi2a/t3OdFA7DinZVSmuJuzZZ8xwH2kTDV7I6wS6o1DldAmJyAeXO621ptYH77ioGXQ7M4KFVJ5eknE4KhPEgtNMrbFcDdeYfMqgf9Pv9qS0bysfNr9qc9Y3j+kpjZy/mPxqN1uwjd1tNeLHCVao4F/943tmWOh9RMXSA6+aZfZIfPGb7uUWLs79LlvNUr50/abwrdQGIbpEJ/TV36P575wvOpmxr9cdaXCiYzmobPN02EjfnDKzBQ+OA50JBlv/GPLLfcRyo/y0srP4OiuZg1SfpmqHEyLXnEyMTIxMjEyNZjr0RIzfkDpdWCPRWHDC9R5SPRx8OgPjX6/WmHEOdVZO4O89tmO8sHuEcoV981uqk68vHtPrAcjkZ1AHVkZSibdhXq2Yqdw28ua8GvksEtcTA209YBtUj2ur2BFfhqgG246dbY9Sm3FcrcM62QbW5Zp4xLu9pZ9v2er014OwSqPJlHTXJBaixnLXFDRM/MO9AxAGAbjnSCiXLx/HDdlCfcOOwTPpZEz3bggHc2VBBdp5550SvdgmeuhDRxM92cqAQxM9Y6Ku1gbVvHlMddVGlflEDdCaXH5xdktZHiZHTfRMjEyMTIxMjuf++gJG7fIerS+HJIHJr1jk7T9I8ijtj1hRnw3YlcubBwFNLphoscdxVCphnfI5EwODkdGMeKgeP25VwOQmono4v66l81ZYKTI6X2tzNG88//+NzNTswiDn9mZ/aTNurXdSvtGKi/VW3kI0rLHzeJQk3r/y35hvaz8WjI5eoVRblFQAZ/9i2KiePMWtR6BZSNf90gFUDHY0BHpvPOV/l465SrraMdlpJrM2DyurI6cX2rsnBY+hiw+VMF+uz5E+qU2Lk9HiJkYmRiZGJkdFub8PILtqQLYWqkP6r9elKhG5ieayu75Gg+Za248+JxvFiZ1GH7poM5VHjF2M7OzAvJ5cmCx1L27EjaVAE6XYJ5VULbAeYOnYtObvEUaMuW2swsgxapYu2ushxwaR61wDbBd08CU2/6zgujvR7jSeP6yqVTs6afYNmJSPnt7rY0M9KfLwr/vhYzTdqvlyT0bXjnKI6avWyFh+qX8Q+j8G27dJnHmI7M/jNcwHlckrIpDkjaT5KjPSy6PnEyMTIxMjEyD0ZI7vG3+UthZoQOXHzRKvzuUlQUkfpmviaczkg4c9adXB9ox1PjBq3Fki1IFVQcY4667Y/O3lt0udNwKqbysA8nEO5JF7K6luIeBtEtNVXnCq/WQsH1SHkVZBUO4WurtKngKG2AKZ/54b7OVtpIor+vNjRqq3bFqGyBF+udKvMmrjd367E6vxN5XCxr21mUU0/Phef2TYxh9qGZec5ctVWHUMXDiyLq9a7mOL+XfLplhHmXVvE6JzWtpKEfZh0+wSPxcS8wsbrmc+kaUqMTIzkPomRiZHaZhYlRu4dGLlhv8MVpIYJ4dTZ1ejuWNf+1hiLz/HVrwKBk4cNxXw1+HmsCGC+VVwDPealfFgWPq5A7ECK7aMyuiBwlRtg9Xde3PwosOh8sF2dvTUAagHjvncBvyMHFNpebaCBym+rYXlcHw1St0ByyYaTks6v2lATvX7W/cucuHjOWDeNP63kaF+XrFQX10d9XvdNO52YrwIU83b+VNODY9sBdfiKLmq4vyOdJwdAuiddiceobUNhctu9NF7Ud2fJMI9+zjeTdo0SIxMjEyMTIxMj9z2M3LBnuGpO4hIyfw8ha/s4a8lSHVMrIAwYnGQ0GXQ5N1/tu7+c0N1kufYuaJ1dajqHbtxfQVTPO4BXPdlm7hyPVdPT/avNv/MbDZgYL/b5OhtxEEd7TSLMxwWcJne1q9NfbawyOb21v8oSMcBz6SonXElmv3Z2rgFzkNv+UJu/2uKKAWTWljPn72wz1p37xHf3Kt2uGFJbaFsH7DVZ1X9qMRhtmQ9XbLWvyqW+yzxUd9WXAdbluZpvqI0URGblkKQ6JUYmRjo5EyMTIx0lRu69GLnLF1zs9KGEUpeDaX9N4LVAVcPHZ3YOThRaUajxUoOpfI63S0RBbpJdwmfw5OqTk4fHd7dEFVRUdpatlvR17K5bxZGonRwu2FUe1b+mh84TyxT2YB6sj9OZ9XF9NJi0Xw3oeUzWy43BSahmY5cQ+QFo5c1VWZY/XgWs1TE3htveUrObJiDexsCxx+BYS0y1GKzZTXOE2pGJ3zrlxorvzn46voKpfnbtVe6aXfkHXpmHyllbAAD+YfUasNbyVq/Xm3pFrlsEJM2mxMjEyMTIxMjEyMTIDdlSGJPHvxquyUMTFjueJq2a0Jy81KAxJn9n3gpKLomynBwgOn58DufQsVRmBaDBYDCVsLlNHFcZ2Unir0s+MQfqjM4WbOua/KqLk8VRyK1bF1ziUsfWY9ovZHWv1+XzvKWF7cny8XdOgrowUXBg2d0taZ07nm+2P7etycnnXAVHZdSqjc6305H1c9uInI/qOZeYlIf6h1twxNw6UIj2uiBhmWvE5zXHuHnqqrS6LQ1s71joOUBwPsNycwyzfFoh5HO1BUFXtZ3zaRBvbeF8HvabBShJnhIjEyOVEiMTI7VfYuSej5Fd9t3lCy4X8LMcyPHoOuYCX53QJX/mozyYtNLEfTUZ1sbuCjy1lQZPjO9+SLBmH+anwdwFDNyeQdm1q/XrIg4U1i1ItxtokNRARJMAJ34nsy484jwnZbfwUNDS5Fsbp0sGHqPX601VkWr6qd8y9fv96g9Xqm/pudqxWsw6vRRonJ34t17cNiXmq7Kp7sGLfaXfX5u6mD/7tfqAzpHaTLfmqEw6RzxWV/7TxFyrqvMiyFVb1XbM3wFgV97VsRUg1TdnxX/SWkqMTIxkSoxMjEyM3DcxckNeC6/BH381ObBQ7ha7JpowqBo+vuukqcG0nQsmloeTcYwzaxJYT5dYdFLDMTQ4SilTvxTOOrBNtSKgCZVt5/46p2R5+W/IpLaq8WLwncUXmN5mwXPMtnN24Xb8MGfo2TTNGmDWsbVSq4GtQK/+FroOBoMpvmp/F3z9fn9qTLcYUaCpJTu2JduglsR0HmoJgpOY8nSAH3w1iev4zhejHSd1nmMFpfAHXQw5n2adQyeWPbYDuMUTy8T683YKAJNq/Gg0mpxTWzjZIp5rWygUpDUXan5V0GDZuU8NxFweYd5dQJLUTYmRiZGs3zx8gcTIxMjEyL0JI8uugmiv12t4QlQgnhQXJEx6i1QDTJMoOyEHb+1WZrTl8zqBMRaT3ooNJ2A5lVdtbHVcreToJNb4chJmEGTH07lwFas4p/Zlp6tVBPStRaor81bgjPlj2+lccOJnngoqPJ+coNgGzg9VXpc4NOGpn7t2tYSqyTj4RRvXNigS1Wg0mqpmueSrNuLEy+TAYZYPs61qc8v6sd20bcitsvEx9hWuCKoOLl7Yb1hmtyhxCyK3GGAd2D9ZNpezamDCNnK+7/Tj43zejc/5JeSM9iqX09f5w2Aw+GrTNLdeo1CSpcTIxMjEyMTIxMh9AyPHF5fWQTbkGS51Sk3Ork2c1+B2SUWJnYO/axvHU40zCwTVKR0PdV4GOQ4GBjIHIkG1ff4hJ1fUnN4acCEXH3cJvQbebg5qcx3fo8LCQRZy8K1+B2JunJot1H6cKKKyxoDLn5mPju1sy21qdufvYQd+uxgnIq0Yd40RbbidVj8VFJx+akPmpbbl2NBFyKy5i3MK6NyXk3GMr6/yjfYKBKoDV8UBTNlcZWMd2TY1fXSBUMsDHPfK08Vj2EcXxmy/Lh1YDwVDtrECO/PSvjWAUwBNWh8lRiZGJkYmRiZG7v0Y2UUbtqUwEkR8BzCV9EIprlS4q1blq/3js3sFKgenJggdwwUV31aM7zx+yM0UCUtvw84DdA5I3YS5ylTYlm/PMn8edzgcTtkrZGXduLqi8nA1i2XSOWUwVX8IH+laaNQqruEr6jPBk9uw3jqfqpcmA7ZH8FEfHQwGU7ZkW0cC04SviwXV3y1kwn5sU42r0DH2amtMaBxofIas4cNOJ7ZJ/NMtAerj3J8B3MWeA0gFgpWVlTW+Ez6t8xh9nS/VFg1hU07EbE+NDbYD+yDPSZwLH1TQ0TsIKh/7qOY1Pc56MKCqri7mamN2xUXS+ikxMjGS5UyMTIxMjNw7MbKLNmRLYbyikRO9A2g3FhtBnVOBgHk4gBrLM2lXc6h55FAHjYDV5OCCtEtW1bU22Swr93UVuHDoSCwrKysT/R2w1pKAS+psB60UxbGuLSDKWxMJb7lgO3EC0gqjftcx2MbzzoWzAScTlpn1dwuAqNjp2BEn4U8MsOxHkYgGg8EUIAafCPhox/3DZtGvy9c5cbgFk0u+/F0Bh/vEZ9VNdVG+ulhwMnP86DgKYrWKk8adflcfjD4s18LCwmRver/ft3lA+WjucAuWWt7RuXKJX32bbRrH+O5BzAlvo2Df4n7BK7cUro8SIxMjEyMTIxMj9w2MXF5eRlPZUrhhF1zqoCwEtbUJLL6H4dm46iA1x6k5M4/Nx5zzOCfVtvydJ4snRNtxwld7KHXNhwKLS7DqQF1g5WTlzzxXuuVBkzG3K6WseVDWta3J6wDbzQvz5cDWLRFsO5Wd/3JFhfsxv5jDOKYAwIkggJJ/iJBv56tt3JwFxd505/M8V+oHNeKEqePzcQYBBwgxXti/y4/cYkb1ZRu4qh/LpmOonHye53g0Gq1J/uyHXTHj+PG4zrcc6MZ46gdKmus0pzFffr6nKy/q9ozwLae3y9l5wbU+SoxMjEyMTIxMjNw3MPIaf4YLWHvFHwIGqWOxs9aSIvd1V+LskG4c/qyVIja2c2x+6JJlY5k1ofK4nGzYJvr6zqiy8C1iPh/f3a1ulp0/c1J01RB1MpeU2BbszLU3G3H/0JWTTeigQacJSm/FM0+2SfSJc2w/XaxoP7Y9B7T6R9dtZ9ZBqzPsJyyrsw8fq1V5wu5xPF55OxgMsLi4OBkrdNC5DZnU53lrgas8udhg3eN8yM0xH7K438sIcvbWWGJyeYGJwV3lV146NzUZdU6Cr+aiWDyEbTSGnaya+FlnloftrbHOuuqbymqLLwXR+M4LJTfXSbtGiZGJkSxfYmRiZGLkvoWRG7qlUI3ohHAJjc9pII3HmDKmGqyWdBxIcJXNJdhagDMp4OkYfKzGX6+81WF7vdWtDwy4ys/J7ICFv6t9FdBrC4KYU7cn3o0f5PTTPqGz9nHtGAziXBdosG7xl/vyQ8uaiDXomU/YRMn5MNtPK4TRjoHV+a5bGHA8MEDxYqKWEONYVLLifGzT0HFVXkc6Lywj+yzbqpZM1Z48voKEJkaeY40f1ksXdrXkr8d1fJ1TtVnNnm6Ona5BvACo5RuXm5iiv4sv53fBM/qsrKzkHa51UGJkYqTqnBiZGMn2SozcezBynI+uuTtcIQAbRIOQP7uErcJHcACYuvXOk6YA4+Tidjyma+NAyiUpDs64Tcw6uAnV2818jNuzbfTNPV168diaCDkAOOF0Oa0bp1bNUlI78jF24tDfOb8LXh3DARrvDS+lTPaBK/F8urf+6G+rxGf1NZccVW+eb5cIg9SHnI5uPLWpLhxUfpaTbcFzxPy7kr/yDru5Khrz08Tv7MsA73xF24d/xzjcXtu4CrIubIKH2rrLN3mx6nSPan2Mr7kxKrNMbsHF4MkLia654gp62JRjRXVnYh5J66fEyMRIpsTIxMjESK/7no6RXbQh6KkO3hUwPEGclOOcJlZ9uJSdgPvEuPy9No5z7IlBer2p6lQNXMIpOICUVC5NyAEWmiyir3strsrsKis8BuuutuNgcM6i2wiYRqPpNyIxP5VDZQw5IthHo9Gat0RpBYjlDN4azMFHx3bgyd91UaOk/hltNek6cFf9NQEwwHACY90YZKJ/tNEfGIzxXMWXZXG2Y99yfSM2eG4CeF1FSivUNfszhV03bdo01Z//su9p8hyNRpPnFBYWFqa2j8TfeHMTLzTcYqVmK80LHAexRz/04D6j0WhNNV4fiOdFrc5pjOdiwlUd2beYR4xXe4ZE7R1/FeSS5qfEyMRI5qdyJEYmRiZG7v0YucsXXGz0+O4SMBuck5P+MCAblMHAJXIO6DinwcWTwxR8XPVMZdFXcKr+2l8BgPVzSZ2rFDqJnGTYwZqmmQS3JuuYdPf7Fvy3FkhKTp+QG1ibsDkBcgLiKpbOm0tUCozORjpXHFQ1fZUf68OVO5dMuDoWxJWY4OPsBUw/HKx+pQsFnWsd1+mg5/SzyhZysA31M+upNuFnLpjivFbmawlZ5y4AoUZd+nASDz4KOmw7l0xZdqV+v2+B3+mviV/9W9vEcQUZB4q1xSHHifbROdbxlZeCYl50rZ8SIxMjEyMTIxMjp2lvxciuOfmp7A9hx+Gk6AyqgRnntKLmkq47zp81AbjbpMpTdQiZo39tHHWc+OtuYfNixk202oSrF24BFLdBXUVF+TFwO0eJc1xxY7ldIAXAcXVSQZYrd84/anKr7EqarLSCpLpGm7A/j8tj1vgzEMbcqC7Mi+VwOjr9uW8cc77J86K8nS2dDVmGLgDT3/xgvsrLxQH/dYs/F188B+4CQMd21b0asKhd1H9jXB6LE676svoBLzZ1PKdXV0y48yGTgkvN9zkHsy66uGaqgWvSrlNiZGJkYmRiZGLk3o2Ru4ygbEQWQB1Ib90F8ZtlmCf3Z+dS0Ij28dc5TIxfc4wYJ/hzVUq/s0ysF/NUh+KJYpn5uFapHD8nt/bhNsGXZdSxtT/ziWoWv8qUE4zax4GpJlR1bpWXnZxlrQWJJjytNLhkpHrGmGEvlh+YrjZzRZn7awWVk6j6SE0m1i9soX4b/GJuWH71fzf3wZ+J2+hWCD0f/dkH2C7OPx0oOGBj3krcX4HGxZ8jto9WFfm4VpbZLjxu+IsusBwQuOTvdFRg4AWYzm/MMX92YKZVV/VHt+Cq/UtaPyVGJkYmRiZGJkYmRs58aUYp5XYAzm2a5kc1AziHn5c0AcVnBSedHA1KB0hGlzXfnRO7MdhpuqoCTrZoo5Pm9OdEwu3UqRhYmRcfc0Gm42o7Pu4WAMxDx2Kn0yTPCZtlYj3YTnGOk3a0Y978al2WV22gNg5+7gcotZ0DCrWf80e1DctdSwwKEGwrJjf/KqObW/Uh1y/8tPbjnG5M1ZVto7GgizPWMfSaBbou/pS/ysnJVfVh/3S6sWw8PvdRXjpGTQf18zjH/uB4ufhz86NzqHp18WZeo9FoamGVNBsfgcRI1z4xMjEyMTIxUseo6bAnYaTjEzTzgqtpmi93na8FoUumKmhsgej6YTOXgHQS1JmcITlIWR6XSFRu5tk0zeRBQpaLqy0qg3NClpmTjCYk7aN/HTio/OyQXJ1SoHTOEzwdaDl78ZywDDyuHnf21kBingq8zt5ObpWX7aCVLh1Dk7Kzd5evuTmsJS3m64BH54n9r+Z3zDNijvVRPmxzt0BSfXTOdVyWryv58jhcHebq/bzx4EDN+W0t5lmPmsyRx3hrRchcA8Oab7l2KovK0BWLLP+shY72cf7lKsH7Os3CRyAxkmVMjEyMTIycPu/kS4zcczGy6znnDXktfJeTqZAqsBrE8XYJk53KOTpPhBtnnoVDzRk4YbCMTvda0NWcM34crmabOOacVZ0P8G+hUt1VlwgQriqEzG5bR80GClyalKI9Jzbn3GxLPa9BoUCic8DzEH+1j86fAl700zE0SDlZ1HRytuz6HjbVW/o8vsrmFssukTu/coDn+Dh9HJDpefYZ1VvPKZ9aLDswdO1YDp1H5u18zslak1ntoxVprSaq3HrnwIF7bQ74rpQudrSf87kuOyTNT4mRiZGJkYmRs9q584mRexZGdtGGvhZeE1YXQKjAWjnRtpqoag6vCYGTaZAGlkvCLJOe5wd3udqhcjr5XdJl/v1+v8pLic+rHF220eMhhwKcJigGFrVR8NM3YLFvOH1VlhjHAQHLWktAPGaXv7Ac7la1JuiQrZaQ4h/PgyZq5u100jZur7TrU9ONbeqSMsvHScoleSd7LU5dRZV1Ut76Wfdcu8qyyjWL3CLO+Skf033cNb/lWFEbuXG6bMZzyjkV8A+b61iOd+gSPOaxGcds0q5TYmRiZGJkYmS0SYzcNzFyQ+5wsQBA/TYtU1xV8i24mBB2IJ4MvQ2tiS/IJUxOGnyeHYGPseyzthi4LRtM7BDuNwX0djwHDycTNwa3c+PrK3F5jpRHzeldgCh1JSvVT3lxoNf04+/xt7Z4cXZTGZ0P6XcFTT2m9qgBdc2HWD7dTqLJ1MldG4PPc+zUANnN+axxuxYumrhcO543TfLaTvlzLqjJUtuGU5O5K48wxdYIBx6qi/5YqI6p/hXE8VKb79BRQaXmX+6c5kPtozmoFvtJ81NiZGIkf0+MxJr+iZGJkXsyRnbRhpQwXZDVqgH8j5WK/afBQ9sw7zC+LgTUWID/DROWmSsFzkGUGERUJrVFHFf5NCHqlgTV38nFgVhKmar6OZk5WTvn4d8wCJu4X/N24Mv2dWCgx1UPrSjUqhQx7zWg4/3MjpRPLGZqYBW2YNn0mB4P+VxFUD87ANS55x/WZPBlPi6h8Hguqfd6PSwuLq7pUwML9V8HojyWW6wov7AX/3Pjcfvgp/Gh/9TfHFixbDW/Zv0Y+OMf5y0l9WH+x/x0IcA6qc0cf/5ey1uzQKdGtfhIWh8lRiZGJkYmRvJYiZH7HkZu2B2upvFvbFFHmlWNcnxrt891Enlyglyb6M/naxOlRo/2/EN26gRxjHV0jqfOxADMtmK+NaBzFUC1k5NNdQ9b86/Rx3e1jfuuIN0VaCFbtOW+TGofDQrWWZOeBo8Ce/TreiYg7Ks+oYDOc6B6K5CzP7AvBh+ugPX7/alkWvt9DwVWtrVW1GIcBmBecDBPngf2G10Y6ZwoIOs8qTy8mHLgGj5YG8v5s7uTwPblvlwNY7BgUrnZrrxQ4Vd5O7mYHDi6NvzXASafB1YX0mo39a8aCKtdk3aNEiPX8k+MTIzU/omRiZFKewtG7vIFFzuMJlCgnqSdQ8RxnihNVO6zTrAmBE0gtQnhxM7JjtvqXwU7blsbVymSmcrBOrGzcRJmfbUvf9ekofqzTpysXDAyqZ0B/7srnCSUlyaS+Nd1u53twAlXk7XTk/9G0Lvk6xYazrYsK3+u8dAkq+DL+gWP+OsCvstGKpPal/+6WFD/dWDMurAdGaCcr+lYah/1SfYrB/iqv+PDNlBf0W1brtqtyVp9J2xQqzTWQIGBaDAYWJ1cDuEYCjvHFimOJ13E6mdX3eQ+XD1OWh8lRiZGJkYmRiZGrtK+ipEbcocrhFfBNckCawFBr6LDuAxOwGoFx/Fjg2i/kIP5ljL91h/+y3x1LJZVq3FafXPy8n5V/ht8OfkosKmucVwriOxUmiC0cqqAGBRbL+ZJYhoktfOsC4O1zpOrWEZbfitU2Dq2ecRx7s/2ZjkcqLE86s9qU50/bsf/VG9NmKFH6KCJJ+Yg/pbSbotZWVmZ0iU+1xKGA/AaqHMflVkTMs+jAk2Au1b02dZMqr+LPZfUdV6ZuCKrfbR/jFGbq+BX+90V9kmtjKne6ke1uVIQDBmCt7YJOdgebp7Zv5mH6qQLhaSrT4mRiZGJkYmRiZH7Nkbu8gVXzQFdRSvadB1jXsqTJzKCJ9r1er2J8+jVq/JyE8HB7RIwy9XlwBqY6iwsB9Ambv59ghiTv7NjuPFdYDmZOZEDmNoSoUDIfDRp8NhsY5bNLQTiuC48XICxPYP/YDCYJHtgOqC4UqQVSraDk43tzsDE/PnBapcc1c+YGBC4Ssu31JWfblcJGUJOrZIp6XxosnNgqMCliV8TnUvuPA8LCwtTv7/BvDnmNIFz/EZllm/56xw40GMZdfGiSbZp2t8N4goejxH21W0qPAdc6dPxXRyEvXWe1X9ZRu7LeUP1Zv6O3B0J1rcrH9d4JtUpMTIxMjEyMTIxct/ASHcuaMN+h0uF0OqYkiYgrTyo0JFE9BamS+aRdDRo+K+ThRNbTU91UK4YaeLiK2S+lRm3Q10S4CDR/uFoLC+AqQTFgRP6cDVOQcEFM9tWFwddCwB3jpOggjiPEeTAif8BwGAwmNw+5+BnwNKFggMVVy11Cw7WT33P2YH3BWslkOXhyijPK7C6yNAkr9WZGIdl1IWW6qFJRv2d7chyM9C6RBmyaDy5RRF/1+TLyY7106Sn7VTXWjuXC+ZZiKn9HNjWgKQmw+Li4hQohE9z5VEXlwrMTi7Wy/m/Ayyt6LkFQNLVo8TIxEjWPzEyMTIxct/DyA15hkuVAlaNUhM8JjkSHTtr9InjoRBXVzjBaWJlJ9Vx3XeWXas0IUcAFDuXBhDLrzKwPgwiXBFQ59SqmTpi7TYm6x0201/55v5hP54L57zOmXhOXHC6/hzgvKWB51jnJuRh4NQFiI7BvsJzwrK7cWp8WB+XUPl7gB3z0dcPswwaQzFGVMFYLm2r/WrJmkEneLL/ManPxnenA/fhv9GO/ZRlY99RXVgPJZc0XcLksUIOrQLGeZZd50Rzk7MRV3x5L73mIc2Hmqg5rwFrt0e4hM++GFVItpXOrwN0Z/PQTRdtSeujxMjEyMTIxEhnu8TIfQsjS23C5qVSSrNp06aJMDXHZCVc0uG+XKXQYFWQ0TFEtqlJqSU8d4yrbTwGy+baq/yaNGs6OLBVu8XEs7MqkMbEayLRJKSg5ZxabeTsxM7P8mjiZh35uCZLbut0cDJxsuDFiPNHPaagwwsVToZuEaHzwzLw/OieZgYKBXaufim5KqCSiz/1R01uDviZn7Njlw20L9uSx3Fzw4sGnl+XFHV81VkTM4MXV8hqcmm86HguRpTcokZtqH3DZrV82bV403iPY6xXV97rApk4try8/NWmaW69RtkkS4mRiZGJkYmRiZH7BkYOBgOMRqO1E4EN3lLIW4RUxAAAIABJREFUgumVr7YH/N5vTd5K7ko/PjvHYmd0fTQhM+ltSpXD3cZUcOS3tLAsmgTZceIcH9dEpI7BQOO+s41cotagczaK8wqoLtCYasmR+7NMzAvAVOD3er0pWfUtOLEfWm2jgcM25nHUJjUgi89avYwHdhVUFVwZnFhH1b2WfBjgYp5VT/UjBS8FYx2DgTkAwSXFaMv9FQh4jmbZlfs7n1Q+uojhcywnb6OqvfGsBtScxFUWBR+Nmdoc8vg6FucXBxA1foPBYFKF5wWLi0nOi656XPPDpPVTYmRiJP9NjEyMTIzc9zByw16aEcbj28TsYJpkeQL0tr1WP1wimEXqIK4ff9fkyO2dPOosmgg0CMIWmsh5MkNWvYXN7djmWilaWVlZ89aZCGSthjgbcMLRMaNvjOeqXGprTvj6Gk+1T8ilQBCy8xj6Sl6WjSuYWnULfpG4nY7aVkE9/nEyju8AsHPnTvT7/ak2Or81Chup3/Z67WtQXXDrnDKoajtNLFztjGMxV5zkIyZZNp63SNIxf1oVU0BV/+dKVG3xx3OjiwDWM4iTcfDRvKTH1ZYOZFlvjSsFNyb2H+XB7RVkNV5r/HSxGrKqLmzDeXxS83XS+ikxMjEyMTIxMjEyMXKXtxT2er1mcXFxTWLmybEDGyeJ4+6KU41bmwA+7l7t6ORwSbZmvPVMACddJ6s6lQINj8nt2SHYXjwGHw8eGoAA1iRWZwc9x7LogkFl4O+Ot9NbAVkBShOKVh4iiCLJOQqeUe1jcOR5rPVT3bm6xccYTGrVOdVP7ebePMbycDJRezPpokhtzPoFGNfix43J/skAwXMSfNQGLhZYLtaXx3A2YXnY7+K4W6w5G9V8m+dN9eU5VvvrMV4k1XKFLkrYT/i7zpHK6xZ9NftxG1epXFlZyS2F66DEyLWUGJkYmRiZGLk3YuQ4VmxQbcgFFz+Ey/zUeVyy0fNBcczd9uQ2PBYbvSshcR9tx3JrOweYykcdU3lo0OqY0Y77cpCqrDXeXC1TmdgWNcDmsV0Ci3ZqKweIrJMmfU3k/LpT1l1lV+IEEkEdsnBii/OsV63i5RKR+m3IxXbhLRv6MC8nYSb2MX1jlto62oTt+DMnZtZX9VC71eKH9dJEGGOq/A4cow8DkEuALNesNs4uLsZ5HnmRwfq69s7nY5ywe1CAr8rkgJX5cVJ3emqyd3lBfZu/6/wEL+3LttO8pHl6MBjkBdc6KDEyMZIpMTIxMjFy78XI8XZZe8E13691zaAYTK+gOYHUqiKsPFeItDpkhaeKAF1ZVq9ENdG5IHEOzc6g/XjSmVziY9DlcVU2Tna9Xm9KT2dHtuF4QTTpF8fYLgqMLK8DXz2m86m68Hw4PXnM0JVfE8u6OJ+oOTsf099tUUAL+YKvBr3aQv3UzRsfC53U1908sl6RANgOLCO35+Sg7Wo+7uZe41d9Xqu6Yd/wKQdA6s8xt0zqt/qd51wr1yo786zlDLeg4STK+qr/qA3ZrqyXVjT1u8oTOuhCgMdyrzTuAk3mqWPVbKjgxed5oZJ09SgxMjEyMTIxMjFy38bIDXktvDqOJls3+Q4suF8Ylp3MOQ6Pw8fdlXQQV4ZcUnUVQ666qCNpIlGKcRhgOHnHmJx8WQb3ukkHyFox6poblTdk1LZaWeBjHOw8Z66io/OitlG7uYqlVjBYFwc0bKdSypRd1Nd0DiNh8gJAgVx9NGTjxM92Y3AJfrUgZ75cbQnieWEA0wTC+/01HvWYi1cni7N7jBeLGH5AWW2hY7M9de70eQVXTVYb8sO+LKf6Zdh1nlzDFOdCNp5v5/N8nOOFq5luQaLj1QCJ5Ve5uZLcRU5/tg2PlbQ+SoxMjNQ5S4xMjEyM3DsxspPHzFHmIDWGJiD+B6xe/bureRXaObdLBFoNqd2iVEDQ8Zi/OrCbVHdMr9hdkPBfrtSFkzuwqAUc9w0do/rA8nHyqgFfVGQiicYx/hz8oi3b1FVzNIiVT5xzIK724Gom8+AKg/6eipsjZ0eVXRcBCjoKBsozfNwl8uDRJYuzG59Tn1D5NLEoKLoql1bbne/xnNV0Zxsy71q7kM0tUlgu3qLBY3A/9lWtxOn4IZPKpn6j/bXiqrbjOWf5dN6UXw0k1d5qO9WtFoPO12p+5GzWBbJJdUqMTIxMjEyMVEqM3Pswsos25C2Fqgw7tFOWnaZWveNAZR7MSwFM5XGVFP7MVTxNFjxGLelqRYV5ODlnBWHTNGt+lNJVDNW+tWBXmZhqSVDtGfbRJKv8HQ+WtwZmmvQ0iLVKxTzDPmoPrWR0+abaJPpzZYd5crV2Hr3Zt7hCW7OfiwFeHLlX+tbmUO3l9NO+al8FU7Wr9gEw2a7jwNMtFlw8B7kKu+YRPu/sG31Y5y67O/nURzXpc7J28a/y1hZ2tZzZZUOWQ/txjuN2LkbU5upHDsySZlNi5LQtdiuM3LwFozs/ZPV7KRg1IwAFBUADoKD9X9MApYTewEqvhxI2KgXNuMOoGWGhN/4x1nGnBs2Y2Srf+P8IYxlLO2qDBqX0gKbBcDx2KT0M0dBcNhjxHEzaFYzGsqMUjEbD9vtYr16vh7EXTLQbtUqhCduN+67OR9uuHWuEXm9hYode6WHUtCOWHvlBM57DgqljMexwPL1jSdH0CkajBs3CAgbDAZpm+nkxlqMAWPjmZ4FLL0iMRGLk7oaRXTi5Ib/DFQNpMqsN7pyc2/NtSJ005ekcS0HCkTpIl8zOmVRnBywKUG4snXQdLxbprk8tqHQ8/l5LIC7IuS9/V5BW23TJoQHiAoPnWROqs7kDdubl7OR8SS+Oavp36aXn1VfcbWkX0CGPVhRrCxzlMSuJqy1q9nPJvDbfXfEW55Vc0lUZHU8XD04m5tGlh4sL9QX2N54HVxlj33U6zSIFFaerxpnmHOWheZf51MCrZq+k9VNi5O6Hkc3+B2Pw7JMnfGdvLJqP+L7IRvHcF2hpznb7vey3sDC+4EqMTIzcUzByQy64OPGFcHzOgQCw9lWl0Y75uCTOARN83a1fHocX0M7InNR5q0Cc14sMt+h3xtardLaJ2in4BH++/VqbZOYdb4RxCeXQ/nXxywf/plncr1a3VmUJvXU+J/W+sR1Xq35tO0yOTapRExvHxVpbqWqPh31i7tryl/Jctdlo0m/V9g3YNGVcQWu4nDbhjSmZV+dr9RwQcx1ztFY/ljWqlmsT9aod1vo9H1u1actj1e6lAKdd+R7sGF6xZmGlYOCO87FVe63+8F8twTDPWoJi/lFR5PitJXhNfExdY3LFkm3hdHekSZjl0HZd9mT9ZiXoLnLgqfLMqlx2ya/nVQ/NodxG5dE7KUnrp8TI3RQjkUWEPZF647lk30yMTIxkvj8rjOyiDb3gUmFrTh3n9Wq+aVa3bCmgqDMrP26rdxHinMrqts1xEme5mR8nfr0TorJoYLpJCVvEazNduxqI8as2nb2j3+GbjsXdDn/6mrGTdm/62pUfwg5cMQUos3yLq9cRU/1+f/IAqv6gZXyOc3EMWJsUu8aL2FEf5j4hv9u3zv7qdHTjOxCs3Z1kmygPpVqCrQGc5ild3MY51TVyTW2hXNPD5VnXvjYPLJf6gi5gmE/Xoj2pTomRuytGAth6MfCelwPXOxF48DPWjI1PvAs482vAQ58FHH3c2vM/DbrkB8Dfvxo4/heBX3vCz0aGLrr0AuB9r2o/3/i2wL0e1X7+3AeAb36u/fyQPwCOOb7OY3kn8NbntJ+vdQzwiD8Gzvk28NG3tcdud1/gdvcDUM+FiZGJkWqHnzZGOlsFbdiWQp3YEMwpoM7F59UQ8bnmoHplGcaIyVYAckZn+bsuXtSQrjLndNRzTDp5wbPLLmzr+Kc/YMltZ8mQtPuTbpsB1sZIfOZErfPe7/enYkNjUfcza1x0gQv/PkxXHnB5QReBAWxdz/A5OVVGF5ddCbEGHjq+8ta7QGqv9eSDePMXg3PMv/Z1QBb/uE+XzjXZQo+uvknzU2LkboiRALDtCuDDbwZ+6a7+guu6JwD9RWDLgVUZr3G64tJWxjs9cPe74Lr8YuAvnghccDZw+/sB735xeyW7eQvw1ycBt7wb8D+nAef/L/Cck4FrH7uWx3AAvOqxwJc/Djz2Je0F8HAFOOGWwJlfB65zQ+D0/wSuuAw45vjJRRIwXURIjEyM3J0xckMuuFSYGohw0ox+cazmpBow3NYZFvDbMPgWq9v6oJUz5uHkCz31uSLWkz+7lzuo/g6c+DiPWbOfOhWfO2/7V/HpS96Emx98X9z2sIdV53N3pI/98BW4eOksPPS6r8GB/SMAtPq/+/uPR68s4DHXP3kGh5Y+d+nbcPZVXwIA/MZ1XoHDNl1vbhm+v/3r+NdL3jB17C5HPBHfu+rLOH/HN6eOP+rYt2Kxt5/l8+Pl83HKRS8EAJxwwB1x5yOeiK9vPQVfv+IUAMA9r/0HOHbLLcat1748A5ie5/CtxcXFqXFi3muvQOaEpVU358M18Io23DbaaIJzxOfdA6xKHC9cYVKZ2AZqO6ZawtQYD4BzC1mdH9abZVH7KPBrBTXmRCuy3I77O90dIDNwx3HNG24LWABU0vooMXL3xMhej2Loe98C/vjXgFvdA3jIM4FP/g3w2fevnr/xbYB3vhC4+PwpPfEnHwFe8MDpY8ccDzz9je3nr/4r8IHXt58f+GTg3O8A3/z8dPuT3gcccDDQNMDzH9Ae27w/8JJ/Ai67EHjj+ELwf05rZYwLr4/8FXDaqe25J/058OE3AT/6vsj3z8ALfr39fOAhwJNeA7zmCcBxNwN+79XA1z4NvP91q+0f9uz24nNe2rENOO3jwC/8CnCLu7a8vvsV4IBDgPO/CzzqpNZmp50KbNvqL7hGI+Df/hE49EjgTg8C3vIs4L8+1V5w/fBc4CeXA6d/GbjP44Bb3g09uQOVGDlNiZE/O4yszSGwQa+FV8HZcfW7S35dPGc5IfNyfGN8vrXJ/Vx7lplBJ4jbsKwKKDw+Tybz575qKz0e/bXCx8liNBphMBgYQAV+MrgEZ2z7DC5eOrNq82uC3nneY/CWcx6Mt5zzYPzVOQ+5WjzO3f4VnLHtM1gZ7Zw6fvq2T+OMbf82N5+bHnQPbB9uxRnbPoOl0bZ1yRD229Tbgrsd8VTc7Yin4qjNP4/zd3wTZ2z7DG5xyANwwc5v44xtn8Go8YvSK1d+hPec/0RsXbkQNzzgdvi3S/8K/37ZX+PS5XNwxKbjMGwG2Da4FB++8AW4bPm8qp/qD2bGZ1e5U+Bh/+U24VvMNxI283PjNk37Y6LxC/XRl2Xh2HMxxfJz3NcWdRqrtVyh8aQ61M5p3HHs8t8AYgYS5sd8XK6q2ZQXqmwb5qF5iPvwlg2XD/XHGl1e5O95obVrlBi5G2Jkj+Q+6vrAze8IvO9PgVP+ErjFXYDffh6wvKO9WPjJVuD+TwC++1/t94c8E/jPfwGeedd2u+FppwLf+RJwz0cBH38n8MantxdWr3tKe3fs4GsBb3oGsPmA9o7VaacCd394e3HynHsBSzuAP7hre5fnoc8C/v1D7YXSgYcB931cK+P1TmxlutWvAqee3N4JutGtW34vezjwy7/a8jvt1NV/z7zLqnxf+VdgaftY1i8C3/oC8Lont98PP7rlfdxN27Fe+yTg6b8CXHZR1f8AAEccAzz3XcDZ/w2843mrx+/3eODOv9nK+J0vdvOo0c3vBLzoH4Bb36O9oLv3Y4Ajrwc0zUw/TYxMjNzdMHJDfviYBWGH4TZxbmFhoXqVyxW0MErtitRdBeu4msBre8lZRj0f8unVLlfUog+Pzwmfz9V+BFKdJmzEt0v5Kl515MpEv99fA3irL6YAvrr1A/jfbZ/H3Y98Gi7Y8S18d9vnAACPuf478KELn4+fDC4BADzz5z6O15/d7pne3Nsfj73+O/G2cx+Oa2/+OdzliCfh/Rc8Bzc76F6491HPBgB8+pI34ZtXfBQA8H+v9wZcZ7+b4ORzH4kzr/oPPPWGH8ZibzNed/a98dqz7jWlw5Gbj8ejjn0rAOA7V34Kn7z4NQCAux/5VPzSIQ/Ehy58AS7aeToA4OTzHoWFsoinHf8RvOWcBwMAhs0KXnvWvbD/wqF40g3/CRcvnY33nv9kAMBND7on7nNUuy/8M5f8Jb5xxUdw+YpUKNdJZ131RVy8dDbudsRTcMMDboMHXeflWBo9F0dsOh6fuvgvOvuuNEu4cOd3cNz+t8ZRm0/EVcPL8OPl72P/hUPxzSs/hpXRTpxy0Qtx/6NegIP7R7ULhZW1CxWtIrmEy3HBPqcLpMXFxaltDu5ZCfZ/jUutLrpkpokzZHEy86JznlxQi38dS3/gkL936ccxFhWzsBef5/jUBR/Ly8CkdwyYD+vn4pzni8dxVTfVpYu0TegSD5EnrY8SI3djjOQF0gGHANe/SbtF7qJz2ue1jj4OOOTI1TY//8vA4ub2883uAJQCfPuLwM3u2B5b3ATc6FbAzqvaO2a3vDtwwZnAHR/QbrG78HvAwYcDBx3Wtj/xli2/078MNKOWF0p74TcattvoNm9p2wHtRdsv/Er7+Q77t3enPvHO9g7Qjm3AsTdq+b3hc8Bz7gO8+QvA792m5efo+F8E7vUY4J0nAV/6WDveE/8UuP392ztdS9vbMbto037A3R7W2uYbnwXe9Pvt8aNuADz9DcCVlwFveBrw3//ezcfRwYe3un32/a09zvgK8IRXrokXXZwnRk7zTIz86WHk8vJytd+GvhZeAcIl2zjPf+OzAgmAyV5N5cHj8PHoG79zEHtcI8G6LTEOADkoVO4gd/uaPztHikULX/G7W7khE1+x6/7UOBeBz3zVzqzzzQ6+NxawiFMuOgn3P+oF+P6Ob+Cc7V/GYLSES5bPxtaVC/GsEz6NPz/rbvjx8vfx7BM+g9ecdXecfN4j8cDrvBQnn/dIDJsV3PHwR+NjP3oF+r3N6JfN+Oylb8G9r/0cfPvKf8HJ5z0KTz7u/bh4+Ww0GOK9P3gyCgqed+IXMGwG+POz7or9Fw7Do459K04+75F47/lPwW0Pezg+cOEf4aYH3ROLvS045aIXYlPZH/e89jNx/o6v44Kd38ZDrvtnOKR/NBbLZjz62LfjVWfeCT308djr/zUKerh8+QK8/dyH4+D+0bjd4f8XH/3hy9DvbcamsgX/dulf4l7Xfha+c+Wn8L3tX5rY4w1n3w/bh1vxRyd+Hgtldkjc9KB7YnPvAHzkhy/CYm8Lbn7wvQG0F4NXrFyEZxx/Kjb1tszkw/Qr13o8bnXoQ/DRH74UNznonrjJQffAYm8z0Ey/4YgXMf1+fzL3OuccF+ET6pPsX7qFKfaxa3WHxwjih9JjsbiysjKJP/2BUbfY5MUUj6FjcVsAU0BTi9VYqGr+cC8fUFJQ4ZzDCz4lXhjGZwZ/AJP5i2PMz+VKl3/cnDp53WJZt6QweHO7kMftj0+anxIjd1+M3KPocx9YfbnElZcBj3858O8fXn05BQDc4KZArwfc8BfQ+SLGAw4GfvMZwD1+G3j/a9vnxF71OODF/9g+ezUPXfx94Pfv0m65vMcjV4+//3XAh94IPPV1wP4Hr1tNAO0F5Zc+2t5ZvOqK9nm7bVunsI0vFBIjEyOZfhYY2UUb8sPH+lkTYm3iQyG92lSQYCVd0ARvHjM+hwHCcRgkumTlcXgSA5xiEtVB2QlisvU4B3nIxlUBriKojC4I1XYMLqsyrLbZr3cQFsoitg+3YrG3ZfpZozH7wxavh8uXf4AGDd5yzm+iQYMrBz/CIf3rYNisYMfwShzYPwLLo+3YPtyKe137WfjR0nfxyYtfg0GzhGGzgmGzgt8//uNoMMSf/u8dsdLsxJu/9yA8/+dPa22MhQm/nwwuwcpoB64a/hj7LRyETb39sX24FUvNdhzUPxILZRMA4ND+dSbPXR22eL2JXQ7f1O4Lv2z5PFw5+BG2DS7DqT/8EyyPtmPHcCuGvWUsjbbhwIVrrXm26ndv8F40GKGH6f2+Sjc68K540Y2+hsWyBZ+77K3YPtyK5dFVk/NXrPwQIwxx6OIxiFe7z0tbFg7B5y59G7677bM4+6ov4RM/ehWedNw/YTgaYjAcTPlSfI4kzT8QyQkiti70ej0MBoM1Y7LPa+IDun/8OvqurKxM+Sdve+ALfbdQ50TqkljIpnK5GArSuOO2utiL3MNtuW/YQCv17k4A8+Z9/5rE2Q662FT+XQtwBgR3IaS+EH5Sy1kOEEOW2iIiaT5KjNy9MbJK7/0T4INvAK66sv3+3PsAbzlt9fzDj2tf9vChH64eu/xi4Mm3A254c+DlH26fw3r6G4C3Pw9AAZ74KuD//Abwqfe07Z9x5/a5pr85vW37gQuA3zgK+K1j222I7/p22+64mwGvOAV46UOBr38G+NXfBvY7oL1IeueL2jf8dVF/E/B3ZwOPPAF40m3bY//zZeA3rr3aZscYyx730vaO2PPu125PfNtX/XNXQde6LvBH7wSec2/gvz4J3OW32ovA0mufV/vTRwPLS8ArPwrc4CZtn8fcpL1YfP8F7QtJ+ovA+84GHnEC8OTbtO1e+VFgv/2BG90G+Jd3Af/0GmDUALf61Smfi21fiZGJkTpO2O6niZFdOWXD7nDxFbxOWhiKJ0ABhp0wyAmvTsLn1UDqBAoUzoDMgysmPL4zrgKf+64O4fTgW9l6OzXaRGUlEgQHXySWcLJwosXF+lQ/+ti3463n/NbYAMAfn/hFLJb9xl8LnnviFyafY7uh0r9f+g58beuH8ZvH/Cm+ccVHcOZVbZ+/OvcheMpxH8CLbvQ1vPiMX8BVw8uqcsxLr/zfO2DHcCtecuNvTY4tDbfhFf97m4mNjt1yC/zODf4WANAvi/j0pW+q8jugf/hc45511Rfw7Z98Eg855tVrzr3j3N/GxUtn4g9/7lM4YGE+fkyfueTNOKB/OG520L3wCwffH5+/7B0YYdC+nnZU1vgFgMlrbJeXl6eSmPodsOpznOgiZsPPouIW7XkBx3x4CwdvM6slPCUGLycfML19gduH/vFdtzmEXpogo1LGuUjfcuUWa24bBs+D5gcFRT3PvLvAheeQyQEjz6kuLFUHzsMOcFkOzie6sE1aPyVG7n4YiV4POObngFOvAHr9duF/6hXtBQoK8GD5KZX9Dlj9/M5vA/ttAbYcBKyMtxEdegTw3jOBsgBsGbf99ScD93ls+3lxv3bbYdCffQK4/o1bHqUAh167Hb+VFtj/oPZjf7Hd5nfKGH9Dvse+eK187/5O+/eDFwELfeCjl7e8r3ND4GNbMZM2bWnHe8l4G99+M97OuLAA3OLOwD9fOv6+2G6DBIDffzPw1Ne2nzfv38oDAH/1n2i3b4y/lwIcTfKx/RY3tzaM59g2bUHvy+2jC4xHiZGJkXH8Z4mRtQszYIO3FHKyC+VYEFbS9Y9zrLwCEPcN/uyUfFwrB6PRaHL7VoEn5OWr99hiUQMQ1YlBhInlZP23bNmCnTt3rgkGtQf/DVlZNrW/VhdK6b7Vuam3BaX08Lqz74MRBtjcOwClFLzipt/F8//nBLz0jPaNeQf2j8TvHfcPlsegWcYIA3zwwudhhFaH4Pey794KBQUjDPHKm5w16bNteClec9bdcZ3NN8ETj/t79NDDQ455NT544R8DAH796BfjFge3b2x68g3fj9edfR+8+sw7Y4QBXnLjb6GHBfzJTc7EC04/ES854xex38IheMGNv4yLdp6ON33v1ydy3+laj8f9jnoetg+24h8v+IOJfK8/+3547omfx5vPeRC2Dy7HK256BhbK9FuMmIbNAP91+T/gq5d/ACMM8YCjX4RfOuRBAIDl0Q40GGHT2HYxZ88//UQsYAGvuOl3AQCHLx6LZxx/Kt74vV/De77/BNzykAfhAUe/aDLGD3Z8C+/7wdMmMoZf8nMdTdNgcXFxKpHGbfaIBY4JnntevHGM8ZaLIAYm3grF/3gh45KVttW447EUQHSRX6vU8TldmLF94jwnXU3MHFNsbye7q5BrrOs4zmY18OW4rQESV+NqeSfaco5R2zu9NLfkxdauUWLk7omR6PWmt7z1CQM2bZ5W4ndvsfqWwocdC3ziKqAZAfcfXxjFHa6/OX2aH/N85aPaO0EA8LQ7tXeeePza9ruF/tpzKh+w+vr6uFiLPqWsb2sfX1zOot6C571pv/afUsjG1CXf4qapC9VR06CH9tmqeAFKYmRipNLuhpHFJcb1UK/XaxYXF9c4gzqCjqMKxLEwoE5WGIsNoUZxWxyUd5zT3y/QW8Msp/IAVrdf8AWNViNcNTNuXUfw6g/D6bjOls5uanvWtdfr4Qabb4PHHfO3aNCgjLcXtp8BoODN5zwQ5+/4Bv74xC/i0MXrrtqFXrYRbbmf8ptFBQXDZhnPP/1EHLhwBE660X+hoCC24DVNM+HTHmenH01GiHPT7YFSelPHam35XEiu4ynV+LJsymPUjCZyOT5rz03r+KYL740rRxdO/e4E0O2j6ncTWeRCnBcqEQ9xDph+7qFpmonfBi9evHAshAzcJvjqgomTtfbh/ffuBxSZmG+tOlXrp23dcW3P53XBCWBq+5Paura4dnnGAZkuaGdV43Qc1mXWwp75xbjD4fCrTdPcek3HJEuJkbsxRh52FHb+3Tl+4hxp9TrG0zgyi79VwUeYgqFSVvkkzUdqwz2VrrwMmx++ul0zMXLPx8jBYICmaWxAb8gdLq2WsUBd+x6jAsXt2Ai8yIxJi0qGu8rUapaOz47Atzp1suO4244Qi1Ze/GoyZz7qBMyHZQj9VFbuF/bl3xbQcxpUq3KM97vSs1wFBX//g2fgm1d8DADwrBM+veZ3qXrmWSTl4T7XaNSMcNLpN0EPfWwfbsVfnHUPPOfEf5vSu8anlN6aM659jUed9zySr182oGK/dfBZ6PXRG/XRW2gr4gsaHkeJAAAgAElEQVRjvxwOGywu9jEaNUDTYEQJpDQ9lF4PiEvASaJufWQ0atCMGvTGft9fWECvVzBsRgBWk+NCWUQzaoGtaYB+b3HMEWiaAkzGHD8TsbDQDtk0aJqCXulj1AzRlOnKX9hAP2vi5WdLgjimFIh4sajgEOPwxWHw4HY1sNHxam24LQOY5gO+cNYLXL74Zbu5cWq6cF7R8bm9q9SpXtyXgT1pfkqM3E0xcrVR+zzWRtEoYyRpBvUXAbnISYzc8zHSyTBp767e1kO9Xq+J7RE6kFbMu4y7RjCTTGvnx3KseX0lt3MVd+alV8QqL9+K1vEVBHhSuZ27vaqTVEqZupPg5InP6iwB0M5xj9vvtvid6/3dGp5JSdckXb7yA7z2vLsBmE7YHFvAajWcF3i8BcQlO/b9OMdg4eINmL4jx3cKavnKLXSDNDHzoq5WVeP2mndUHx1L82fYi8cJXVinOM/ta7y5H8sW/ZaXl/MO1zooMXL3xUgcfjSW3ncuyplfx+LTbr9GJ7fQ0gWs3o1wtuF+yi+OxSKSeehruZk357m4+NV4jZdC6AI1eEcu1O1d0Z8vYPl5Pd56yrrpRXYQj8O+EudcrnT5i2XlO6hLS0uT5/aiLb8oQ/NZLNrZFmE/jgN3R1rjINq7nKmfJz6w0MfyqduAKy7Fpoded4o3z29i5J6HkSsrKxiNRtfcHS43oa7CxUqpQ9WSWm087gNMb51wSU8rAMzHBSP3Z6dkcOGJcXI7vVhOTprsIC75Kc/g66oRi4uLWFlZmUoYQYNmCcuj7dauuxWV8Wa/qVwQ91emmvE3+l7iv9Xv003HmwnXnJmihv7XTP2dnKgpsHb84sbiI+Fj43FWv4zncA6JzRhNs2qnKTYswpROMy0yBxXsv3DoRHb1ZZcP3MKK+6wZgRaKypurZ3GeF2m8QFAZHY+a/Jq/eDE3qxqv3912T13QMWjzP2D6N1AcELm8WQMXHp9Bxr3JK2k2JUbuvhipdgv9Qv6avMPhcOo5IZZFt2MGTzcev8BA7avxGfL0+/3JojsuuFhmzRXBs5SCxcXFyfNO/X5/wot58sslOEeurKwAwOQHfFdWVjAYDLC8vDxZTMdFAPuFysdvsox2w+FwIkP01/wcP0YbPMJ2mzZtmvTheeMLRpaJ50ILESx3jMFzqfPYlVuVJrrSMdaX2yVGYs33PQEj3bwHbdgFF1dAWGFWnBOfJtda4mSFaoq4ZDyrLRM7GBs39AGmfw9AwVDH5L2pqpNuz3D2CGJZauAaW04UnI3mAIBvbfsYTrnk+VMOzzKxbKEHV1YcYDCQqtxaQePAcM8NxecAsljghazxA4ScGIPXpk2bsHnzZpTS/kbGQm9h8paiABiVm+dO3/7D4LG8vIylpaUJuMRx3tLDcsWYDGwLCwuTKpz7NxwOJ0C2vLyMlZUV7Ny5c/Jd7acVGrYJtxkMBuj3+1N+pOAe8mui4qQavsD+6RJT0zRYLFvwwuO/OcVfk5nmBfZd1kkTvRtbfVHtwzyUH78gQBd/DDh63MWP5j2l8BcFfNbFATrbl3XgMXXR62TmuYqxVO8awPBb9pLWR4mRewJGri7IOJ40xvliJOTTBZ9uHdW50/78j/NVF0bGxUm7xXx1vLj46PV62LRpUzdGLux9GMny84UerxcUI+Nij/2oaZrJ+oNfcqMx6mKC51lzKzD9qAHbNzFyz8fIWm4BNuCCqwYeYWgmrdQA03uttb2SGkwdWhd/0UblVaMqH/0BOA48DQanNxs9nFm3W7BsOvE8hgNFHieSLo+lWxDasVfH4uSswBfVnkhUS0tLUwGti4MgrWgxMLFsrJvan/tHIg9aXFycSqD8PT4zeETSjvZxXudPQVUvECPwt2zZMmUHlY9jgG+F6yJKq3y1BBd/R6MRlpaWMBwOsX37diwvL0/+BvgERaKKbSQ8L/y6V5Y5+rE9osqlccH99MJPAdXNMY/JsrGNeMGjsrGN2b8V1LhtJMaQt1YJY9m02qjtVS+OR/Z/tZlWVpUHLyr0Fb2cU2uA6mwfxFVcBUQH9HwMmN7alLQ+Sozc/TESZXostnNiZGLkhmNkvx5ziZF7L0ZuyB2uWuXTJc4QtNYuzjuniLG4nyYGngBO7OxQ8yT0OOZAiSfDOaXTNQKEZeC+PFmzEi4n3rAJO5G+0a49tnZLiDoSML2fugbuIYMDdpYlkibbPuaIq4SsE9svgi+qXwEc8TmOl7Ja7QsgCYpjzFtBneUfjVb3+OtFBPPk71wdGg6HUxVGF9g8NwrQmixDZwDYf//9J2MNh0MsLS1haWkJKysrWFpamlT6xnuIJzbnZwMCVNW3dI7YV9yibVYibQRoa37kFijsd5pX2H+Vt1tY6qJH5135cuzrIkrb8xwrKOnCQXOC5h0G7/AjHZf5ORuqHjqGA0Q3t2qHmN+maaZiK2l+SozcPTFykhka2DyfGLlWp8TIjcFIpsTIvQcju2iX0VOTrEu+DAiqpBpaqzxqCG7nJtglLpVXx3Tn1MF5DNeP5VEncoClzs5bMWoO6GSNz8oz/q5W4kp0tE7FfOM8V3HUiYHp/bBOr5gPTTIuSHWu+/0+Nm3aNKmeRVKNY7zvnAEl+DNouECu+WGASOjG1RIXyFxt4kTdBZacMLh66mwU4wwGg4ktgvcBBxwwORdbKnbu3ImdO3dix44dky0eDgyB6UVIzHU8l6BJXRd1bmHC/oOJzf3zHQ6Ywl/Yj1ycq++7XDFrIaRVWpWlloj/P3vvsiNJsp3rLXePe0Teqqq7du9N8uiQ4pxDjvQKAgFqwAGHegUO+BYEBBAgwIHAZ9ADUCOBU+FMCBAiBXV37a7q6spL3C+uQeRn8fufFlHV3VkHWUk3IJERHu52WZf/N1++zFxt14E8l/6jdWk/cxjEtT6x4nydFOlvOvnIjd1l65ioMsj1SY+1a7h+fmk58mlzpMtL/bzlyJYjPwtH2jrpliOfD0eeKo8SrswBKcWdL1fcYd04tBwDWB+0G4U/JtVHte70PhZXIPUruGj9px5LqjGq/I7JJ0fObsSeO5+TWXrXU32IaDq4KcAdk6XrJReNVNlqPX5c26dNonO9Xq+RW64LfclF55qPkYiSfc5xOF/f/ULKQQ4UVC4alVUyOUb0+lmdPxel4lpd5JmLjtZ1nUi13+/HZDKJzWbTIBYifXVdN+yTiYza03q9ToSiOqZf2n5u4tK0wQNwnloL6JMcJRUnIm0jR9I5n4Eo/VyfuOX8h+Ik6ITiBOjE4sTEuRRPhXDs+RjGfczmVD56jk/2fJzeXlt+fmk58olyJNcUzUloy5FNXbYc+fk4UuXQcuSXz5GnyqPuUpgDqIjIGp8Kk0dxHgXQ81xppyYCDhpacoaswtPioKtOf6wPx5R2iij8dzXSY1EIigJ0rt4D2R0AJAeKWtxJOOb9VRB14jxGVj5pIPIGOXS73UQomm8OuQD2usBX/0Mc/Gm6DH1RYtF+93q9xnH0rX3NLYrMAb/agJITv/nvHtH0SCEpHDnAVR1CbshyMBjEcDiM+XyeInqr1aqxGYnK8GPkmSs52y6PyEjH7HXmQNDtSmWXq5//x6J+fq7/7rrxcsy2fbKau8bHlptU5tpm0pKrE33rZJLobo4QaEeL+3COGE/JpC0fLy1HPlGOTP/z70jL9cPbbzmy5chjdqK20Dzw8LeWI58/Rz7aDZcDjxZVMN+1o16OGZMe0wG6cNRg9frcoz8nOCe+HDE4OPrOTD5OJ5VjY/7YecfAfLezd4uY3Bt1xHFi0/P10a2OR2Wv8tV2HYw5hz7qOZCEpkHoZ6J2mhKhkxR1Av/OMZcXn91u1JmJGiq4RzwkC60bmSiwIAOiYQr6p+Tm7areSdGApPQxvV5blmWDjCGV1WoV8/k8bfHrkxZ/2arK16O12q8GeJXHiciBi3IsHYXPepyJgutCz3V/PkU0uZLrp9qR9vEU2Kp95IhC/6sN5rBAdavkqv6rvqvlY7L1/nkf9H9bfl5pOfJpcmSuHLPxliNbjqT8Wo7MBQlajnz+HPkoN1wOYjnQ9Q6p4ZwiilPFDZo2vS3tnwosd9fqY8iNw4+rcbqh+mfPDXVQ0j4fcyQHsYimI7oxNGRZR6MNb1cjXnzXOnJyoSiIHCNejbxVVRX9fj8RBlE7SAQg1Oicg7mO0fvlDq/99MiX1wFgq1y0HNOJ1qmy02t8kqM6izjsduP648WMTl51fcipVzlznP+9Xi8Gg0GsVqsYjUaxXC5jNpvFer1uRPQ8Su3jpF0H9LzPPtRPjuyPEafrSM91u8yBYU5XuQ0MTmGOT1xzuKW6PoaFei4y1vPdZ/iskwwvuTSp3KTSo3C0lyNh92mV76eQcFselpYjnyZHuqz8e8uRzfNbjnx8jnQecNm1HPllceSp8mg3XA6A6vR+A6DXeXEDpTjQupJcWKrY3HHtzzHwVVDKGam2z/+cQ+h1Tmp6nrfjgORjz6WfuLz5firtAjnoY9acc6rj62+5R/0+XsAHslDC0FQJXwSseteFt0QtFfBVrm4jXJMD+BxQ+DiV0BhfLl0B4I6IRqpGzsaOAayPQ9vTseRSYPQ6rnXg7fV6SZ7dbje9z4TIntoQxOb1RjR3njo2Fh0V/VA7OvY4ni2FtU2uU7JzQHY5q/05JrhN5HTiPu/Y5P1z28udq4SvW2nruTmZMHaVpY9RiVbbVBtSLMi14b9xzTGybcvHS8uRT5cjIyLqeMh73seWI1uOfCyOzMmF81uO/LI58lR59DVcqvQccHqHVBmqADUcV6rfQasAdXtPPy9HOO4g7pR6Hcanu+v4mPS4//dxn1JizsCdaI7JnH52u92DQRSkDTT75X30yA2pGK4L7Zv3j+uRXa/XS3nndV1Hr9dLx5RUNFrnoMNxBzkf9zEdYlPUredwXMGN1A61k5z+KDmHo29OLhS1Uy3Imd+YYCkQaF+5xo85yHrEXPP/Na99vV6nrXNVXnqtTxjclhmlTmRypJoDWtehyzbnT1pHTgdat8s7d33Od3MY4d/dbvV3x6pj23fnIoTHMMDrPjZOT6PhGNfmsEjH0pZfV1qOfJoc2YGr5Em882LLkS1Hfg6O9LG0HPn8OfLRdinUu8NTIK4ddSXUdTOdQB2c87RoG2qMgL2epwCHM+SM0vuo/+mTlqMTzowRHHsMrd+VDDA8leMx8sj1tQH45BJGUx+qt1xkwoFQ+6Ggoo6k57ErULfbTd+J0mkeuqZQ5ACc3zRipOeqHFVHOX155IdjvnhYx6fpCNi424Be5/3wF206Qao+1DacRHKRUb4rWNGW60jBTMle5dDpdB68xDSXquEyzU0K3Z70v/uoFnz4WMoQRUn0GAG4DTg+OQmdIiva9PqdHPw61YPLxD+fkqPK2sepdvepOsnVm9PLMZm05dNKy5FPmyMpehPRcmTLkf89ONI5QP+3HPm8OPLkDVdRFH8eEf9PXde/P3VeDgi9c24s0saDY25IWr9fq8LiGgAQ8HbA83o45xTROGnk6srdtbsTqCHkDF3r1Lts748/stdI1m63i36/n2SxdziurI8SgKd6AHZsg4uzKMDm9FcURYrO8R/QIlJENI86PErkRKJywT5YHOwyhWiQSQ7QtD2OscMTstGJUVEUDxZdc463rfZEhE51cQrAqE+BCTsG0PWY2izAeszfcv6j+tJ+R0R6OaXv1pTTt6d07I6072TkMlRQzo0jB8w5kFfC+hiQe6Sf4pHVHMj6pML7quPMEcCpiWhuHCoz+pg730nWdevXOK6d6l9bDqXlyIdtfWkcGXF4Eq/4qvJoObLlyMfmSP3N8aHlyHhw7Dlw5Mkbrrqu/6+P1vDwmsaA3Hh0IA6AgACO50bobThIuECoB3LhHRIR+S0+KQhe+3TszpxrXalusDkgdyLR67Ru3REn13YOlHa7XWOL1qp66BQ5IlHAjoj0yFzfbUGf9M3a1KVpD5qH3u/3E3Gw6xKypT3VRc6wtV86cVBAyJEQn9XB9BzqA8jUJnUXJj1Px5xz0rIsY71eJxl5REzbUsJTMqzrfdRM3/viKSIqA77nfIU+MQ59oSQkRYR0u91Gp9NJaRMaMdVyrL3dbheVnOo26hMwlaOSsPuQk0bOx7yPGpV2+8pFBrXPbnvepgO9PzXQtrwd/6xY6e34NTlZUoeTIvX6C1p11y6XlxLlsYlJW/al5cgvmyMrGSNj8uh+y5EtRz4+Rz7E6JYjnz9HPkpKoXfKwV4d2ElFwUk7nnsng16j7TqYKwlomxqNUcG78DkPg/Y7/9w1OYPMKeEY2fo41AldXsdkrwaNAR0iEA8fYatcdAEmQL9arR5EViiaPgEYKJlozjmRoara77ikRK6Lf7V/rrecTLVv7sjr9TpNJOq6TgBJXdibg3G3220Atr/p3nWY65/Kliiq6g7bol7qpv7d7uFiX9r0SRB6c5moPLQuZKF1dDqdWK1WyVb43u12GxMw7SPFU3A8quwy+hRfVvLzCcuxSZXbs8oyFyGP2E9MNCKq1+aIyfXghKTR0xzeUb+TsOpVsYuJhOOUk5j2w/FAdaa61/NP4Wyu/235+aXlyKfLkblrW45sOVLreGyOrKuHONpy5KH+L5kjT5VH3RY+51QUBc6yPLzkzo3DBwbQeO60DlqVzG8azVFhOkBxzA3nmJBpU4nKgVYjOjllHCMTNQ5AVknVr9e3kisBQQaNu/kkt9NGo4CrIKsGrUBIe5pjrjspkRah7wpBRvqnUTJ1VgUWZKT24GBGVKLb7SYZqdw4j+M6RpXDsQmHk787s+qn3+837EnfscLiaK/bb1hU5gpCbre5CZ3aFHUqYSppKsmiE3ZjWi6XsdvtEvCqDx3zce+LT7hUhg6S/rvK5tiESgH52PWOHUoEOTyhqC7UXryo37uO1E7cpnUcub7nMMrb/xjW5Pqi4z1W58fqb8unlZYjnyZHuo5o0+XXcmTLkY/JkYoD+ELLkc+fIx/lhssFrM6e66jeUeciaJzvURPq00HqnbY6ot71H2s7Z8R6XBXoxOdj4PxchN/byZGIy6Cu60b0TeuHJNXBvA8cS3KwMQLgSliASFEUjZxkNSQlrLquG9E6ctGJ0rEYuCiKlKMeEY0UFsbhawrULlQHRDWoExkoqeSitNoe7eiEgJQUz432SC51uK58MqG/6XfSEZyAvF4nDvrm19NPX3CsEzfAXHeA0kmHy4/ftX/L5TLJ0SNe9Fcjhjmf1clCbpz0X2Wt9ausdUKFzareTgGv44LjAzo75ctKHHqtjjdXcuSSu0btxrHBMTV3Lb+pvdBvxY3cRDY3eeWajxFKW/Kl5cinyZG5CazqqOXIliM/F0f6uFqOPFz/JXPkqfKrb7jUqCkfIxIE5WCnQtDOq/C1Xb1OB+x3+doPFKSGUhQPd09xAaqTaR+8HXVm71+OpBpPoTLt6m/0lWM5J9KIWFNHjK8ZadWIDOC8XC4bzs1/3thO/yAQUiK63W4MBoNEJBq16/V6aXwOOiobBfhjUUQiX/QLUqRoZO6YY0MegKrK1wErF1Fz8nCZKjkyZp/4aG49RY+pXNwOkAckoaSh/VJZaOqDRkt9IqITBfqJ/rbbbXS73Viv142JjBJ3RESn2su0iAOhoetc8cmKHnOQ03MpGpU+1oaCKN9zdu2ReR+b44USmN+caL0+6UNH+t1loLr0sTgeogedNHjkVNvVlCcdQ843c8Tblk8rLUc+XY6s6WvdbKflyJYjPydHKopyfcuRz4MjT5VffcOVU1yORBCkKt076ODv9SkRcdwNC8f1yJf3SwV3ysAVVL2/ubt3JwMfm5/r5OOE6gaTM24HPyUHf2kc8tG7+LI8bHuq9er1OkEAwPWv0+nEcDiMwWDwYBEw5+ujeMBK68bwc7LGdnACJVYe/6scFGzdXhg30St19qrav2PDScgnnDnHy0WPtS/q6Or4bktaj0e21D6UlP036qYPRP5yPsa1uviXokRIm25TastEVrdxHwWNOoH1p0yYchNBl6vqQa/RP5Wjt6HlVHsuB4qPw/WsWKS651onnVxbPpFw4sxNenyi6BM07b9jqY8xV98xnGzL6dJy5NPlSDgo12bLkS1HqmwfkyMjmnJrOfL5cOSpzYMe7cXHDqA6QH73R5t6vjuFD0h/yzkyhbtvvcE41WcFkhx4Uyeg5OCk9TmB0GfqyBk0oOp51Hq9GqKDgUcjOKYGc19ro34eowLoGsWr6zqBKp89t5o/3h+iC4D1mC4GZiwOUEp67ngu37Is0/k5MKW+HOCqPlTv9G+326Ux0747q+tHz9Pf3KE1UshCTwUEd2qfJClI+USM4wpiuQiQyt0nU+qbudx9CMEjdvzG+bmIJHXmMCEnW/VL1S//fWy5cSoIEuHUcx1XXMauk9ykU7GAyQlYQV1aT25C6XW7j2u/j5GT9jEnB7dbZOiT1twYta/qV235eaXlyCfKkavVfYXNoGTLkS1HflaOlLHknrq0HNnU25fEkafKo22aoYNVZ/AohQ5AFcExNTB3CgcXbUfPcUGduvaYMWu/lQD9XAcel4v3wxVc13XaVjY3LpWNbvOukZmI5uN/ImR6vRaVq+pGSdUfuQK4LLQFmCAbyERTJCB1PmvUUEGNot9zMtZFtNTjNuLRGHUSBQfNc9d2iXTgOETyaNPJPSIeROTUB9wudFLhkxjPW1cZ8xt9oM+0zXcdq47XZaFFbUkjRegcItGUC0CT86ifsaX1ANGcODAu7Y/7Sa4vuSgm57sOnXTVFxyrlBD4zTFHdaZ+rOSqdpAjuhxIe1TOx+MY6fio1ziR6G86bu2L2qHLXOVE27nNCdryaaXlyKfJkXXRxEja00lry5EtRz42R/p7KluOfD4ceao8yhoubVg7oxMwFYwDsDuM1s3g/biDhf4G0BwTsAo2ZwyuZAXYXHRRz8sZqyoR5aiDeR06Dh2ngp8DZkrlksfiCgZ1/TA/WPvnzlnXh6iiAzg56brol2OAmJKHG3Kv12s4Hn2ALHI3ido3lbXfXKotuUwZsxIHelCi1vO0fh3PsSgP50FCnK9Eo+fngMtJnnb0sTfRVaKw3heftOQmMy5nQNMnEvRNZcBnP8fJoY7T+nRyU0AnUqu4oICam3h4wWZVX9oO9qK6csD2KF6u7+rvGvX083LYc2ri4WM6ds0pvPNx49tOJBHR0L3jwinfbMvx0nLk0+XIlPhRxwOM4LqWI1uO1L48BkdKwlGDn7y0HPnlceSpNh5lDZcKzIXiinbjxGi8Hj1+7M4/J0TOd6d1I8k5mYKbFxW6OrwrVtvKkacqi+Ln0R/fVceV6UYPkeTI8CCHw7l1fXgSwWcFDd3piEW/+r/T6cRgMEiLgDlONA9SgWToP9FKdKQLfCOaL3h0O1AdUT+RJLXBXGSYMfoER3cfUvvR6xXIdeLkzkVfGIPrXPvlkx6fPLg/5SZQqmuIxSPO7p8e6VOfVALzqI63nQMXyOjge03/cFL1iZrarfeHY8cmRn7MJ2a5lAmd/OauO1av25X3LzfJ8DqoX21Y69aJgf7ucvc++mRB8Tg30c6RcM4ucvbelo+XliOfMkc+jITrxKrlyJYjPwtHyjF9OtRy5JfPkafKo6UUesfUWHUAGv3IAa52+BhZqHC1KNjw3fvl/VXH4L87To6EtL/u6PrdHZrfeeysMsotttN+q7N5FEn7TFQn4kAwVcUuQweZq6z10a/3eTAYpL5rWgQkMhgMGkTD+0UAe5UhY1UyyclVx8y5Hr1ygvH3jbic6I/qCV1QFFioU/ui7XMOYAiJUI8SmE88FNjdXtw+cwBPfzS6Sr0aPVIbczLW37U+bYdjbmfaRs4+nRRO+Zy2SfFJY6745CHnr+g9518uf71edavyU3t1XXlkzcfhY9Y+ut3r+a6rHFZq8Qhsrp85ObsuHdO07rb8vNJy5BPlyOJQBzc1OhFvObLlyM/CkZbK2nLk8+HIY7qIeKQbLu2YCjEH7K4oSs4hdCDHlKztHhMuAOZORf1atE49z4sKOkckbnS54xEHg8gZugI/RceROx8j0jSA/XUPZe3RDJWlXsvxqqpiNBrFcDiMotinPfT7/RgOh2nLW6J6RMWUpBTUHWRVdw6watjHJheuRwcCtzW+q9xz9uN6875Qj5OLj+MYoGrajOtBAdzTSzwy4xMU9wf3I/SZI5+iKGK9XjfkootdPS9bJyc+zmM6c1/xiLvKJ3ety1p/z33Xul0OuQmfyiw38clFJL2/bps5AtKxez+OTYbVX9Sn9BzFz2OTRIrbn2KMy/gUmbTl46XlyCfGkdRZNPXRcmTLkZ+VI4uPB01ajjzI9kviyFPlUdZwqWMeA3SKOkVOsEooagg5UOAaP4ei52vkS6/1x545ElEAPEWG7rwuI63bI3E5J1Dj9t+93x4NdUCudweD1nxfrZstYPUzdZDLHRGNHZdIl9AXOxK508XDRbGPcJG3HXF4vK/6c/kpEKtT50jfI5uu62OA5iABYKrsXZ6uV2Sm42C3Jc7zrXQ/lQAV5BU8dNw6DvctB21knyMs6s8t4lUSIqKncvX0nnspZX1M29bfFBiPTdS0qH1gpznZOsEq4TuGHNOHyt0ng96O98Fxg/N0YqATFq3vUz7nxqE6c1w8dp72S+0G/3W5tuXjpeXIgxyeGkcG19TRGJv6eMuRLUcin8fiyKJzeBKp8mg58nlz5KOs4coBvYOcHlcw1YE5eLoBqOD8Oiciv/HQnWq0Hj57/zj24MZFxprrn59z7JiC+DEidPLzenKy9X4mYEzXN+tSQNhut2kbW8YF6BCVw1khC6J2vvWtG/Ixg9aoEDahYKJOqETi8nF7UntQx8/ZpUY31CbcLtwOFZi0PcBGAZyJljrksbY+FQjVDj36pb8zdo7R7rGIupOnT0C8uIzruo6iLB6c4wCeIwc91+oiUf0AACAASURBVAlR//tYjpGRTwRzxKbH3Id0fE6MOf/O+WxufLkxeV9O4aPKQLHOJ70+Xs7L4ZjLPkd6H7tZaMvD0nLkE+ZIwx+vr+XIliNz9f1ajvRdCjmn5cjDubkxfQkceao8WkqhO76X3DE97uDBsWMC1HaP1efO4g57zGD9GidMJzzvS84pXUYUDIHFt06K2p9c/3OTICe6/XX3xhIPH/FST10f3icC+BGJ8+1sdZycp4Tj46RuLaciF7kIhpJADhS0P9SvgK7Hcs7v8lcZamTE7Un/syDX687t7uTO7cdzYKY2rQSq+szZs45ZgTAXlXTdAs5899xn6s9GoaN4AHDanuv5Y5OqB/VnfEs/u1xyUfwcbuVIzonZbdBTX9xfnei1KPG5jarefMzefz0392TCr9Nzj9kCJRcVb8unl5Yjnx5HpnPj4YS+5ciWI2n7sTlS283JreXIL5cjT910PUpKoX/2CZafp99zA8gRgtbLdV48CqMK1N+85BRLyS3m5JxT4J9z6FNFAcuv07adpHRMCkQK6A3Z1w+BRdvZ7XaxWq2S4Wg7vnsRDrDZbGKz2aRInC8G1igVYMSYtX8qQ40w8F3PcedUXalMc4RJ0e852eZsTWWuv+eigRHNFKKcfRLZc7LwcbgufDKTm+i4TN0//bMSk+qI9AjAUm+s/H+SaYW55SNDOTzQ/ru8VWcfA9Vc3epfOfLwunP1ap8UqE9hi9ppzuf0nJwfHJPZKULR63yCkeuf207OL73dtnx6aTny6XJkbow+mWs5suXIR+fI4uHNssu45ciH53wJHHmqPPqmGTkj887klKi/qSByhpRzoIjmI+FTgsgZh5/v52jUIne9f3bF54yPknN4LTkj47sbqvZfjaMokE1eJzhbXe8jUJ1OJ21Nq3nrjN8fSUNAnJ/TkS4kRR6aB65ACHiqzj1S6QB8jAxcPznbYDweqXKdql16tMZllGvbCc1lRNG0Cm6eOaYRHvp1LKriExS91mWpRHks6uxj8/6ksdyvGSzi4dhc/m7XLpNjwOi612u9f3rNqcmUjzVXThFh7inQMZ/W9r1/6Cont2NjPRWJ1ja9D/iVRs6xtVwf2/LLS8uRT4wjad9wquXIliM/N0f6+FqOfFjHc+PIR7nhcgLwzuYGcQy8c8ajBn5sgDmAzNXrER8/T+vUzyiK6JQa+zECOVZ/ThaqeO2jAmMO1PjTl7S5jPdtPhyzA4jKebvdxmg0is1m01goSiGXnT7udrtYLpep3l6v1yD5Yy/MPCYvZHBsESJ6UBnodTmAVTmqjHO7J+XINzdRog86Lo0iEtH06/jvgKH1cJ7urKRj22w2Dbvkt2NjoA6VncpC6/d+ADb8cX1uQXdVVVHU9/os9mNiW+EcUOdswrEjhzHuEy4jbYe+6vVOpl6OkaD3E5kQ4cxhgvury0Kv0ehorm0f/zH5aTv+u+tcz8vZ67GJQls+rbQc+TQ5cncfGGJ7+NxEsOXIliM/B0e6LluOfP4c+WgvPtZBaPmYMuiogqkb3Kk6P+V4rn9q+OoQx87TejSKpH3OAZDW50am0R+vJ9cfr1PPGwwGsVgsEsj7I1wXTa4uwBHj1GicRuUABqJ7mhaxWCxis9nEYrFobIGr7eljea7LASBOpQauESyu9bE64GhUgt/VYbX/GpXkGm3PJwX85gt9Oc/J0MeTi6QpsSth6BbFPumiDurPRTl1LGqvOm7aqKoqrZnQ96ZoH5xwqEfHU9fN6LzqSvvu9qx/HkU9BrQeFeeYyswJXCepObLS/9iDyohrdU2Cg/2piWpuwuJ25tjhx3KTptzYfTveY9js9voxDG/L6dJy5NPlyMPFD2XUcmTLkZ+LIx2zW458Phx5qjxqSmEOFFxBKFOjOQ6yFBV2xMMtcSk5ZalS1ID9d3/E6JEN7cPHlJxzCu279kNBxiM4DgbUmTM0fpvP5xER0ev1Gm0eHmVDWs1x6GPSoigScUAEZVmmLW0VaNm+FufhpY673S69VHK5XKbdmWhLc5w532VDf9QZADU9T4GYz+owDhqqd5UzOeTaDrbhW6jqtQp4qpMcoOZs5BiwO9D6Ggmu13acBF2eOdJWe2Os+s4VJxElYfdF/SvLMqUURtQN+eYAWuXh/cuBnutVZZUD8Fx7KiOXZ64OPU9t1CceORnr2HLgrBiV669fp/aZm3B68etVd35tbjxtebzScuTT4kjdTVUny9rHliNbjvwcHKml5cjnw5E5OVIe9YZLy6dGVHxA+vmYUfjxnIHiHKokBOiREi3+mxuYt6PjcFI4ZpA6Bo+AeMlFdbQPfMdhy3L/aJqUjsN51HPIrdYx4uxE287OzqLf76dc9cMLlJu7MkVErFardJ6CMXrY7XapDh1rLg1Ax6nEpZMQjZq443rEQsHYiUgjeJznUT3Vres+NyGhT/SRKBjn894RBXgfv4+L491uN73bw2XkoK79cVtysFe96PtDtA70jp5Vr6vVqkG8+/HJpCaDAznfhrS8f1pc5u4fOUJxks2VYxO73DnaX5/A+Hj9mhxQnwJoLzqJ8nG6/XCutu1E4rimpK/n5fTVll9XWo58Ghzp18INLUe2HEl5bI7Um7ZjONBy5JfJkad48lF2KXQH1t9cUMfAk2tzERcVHN9z53ONtuGg4XV4X90AFYSozw3VFwrnDDNXv57rsvOoostSDUblgbN7tCW1I2TAb1VVpWO8pHEwGMRut4vxeBz9fj8Bh5KWR/50TPr4WPN2kQtjPEbuXAsQc163200vSnSZQDYagVUZ58gBGeUc0eXvoKHEEdEEUScZJUC1h1yb2h72Vtd1Sl84BRK5fmtdWtT2uF7bVPvY7fbrD5RUOS+XbpLWcMVDG8xF4fnsYJYrx0iTa9X3c9fmxk//mYTlJiXqa7mS29ZY23JgjojGZMN9mnPV545NHlyOp8Z5agz+5EB1BU6s1+vstW3Jl5Yjny5HlnY+fWo5suVIlcujc2R1mHrrjX3LkV8+Rx67LuKR1nDxX0Gcz7k7cT92DFR04GrYlBxoa39yxJEjvMZE0QyJ+mnDgVD7o2BxjFz0XH7Xt6+jMPKS3TAoustRRDwAa62/8U4I6RvRtrIso9frRa/Xi8FgEFVVxZ/92Z/FX/zFX8Tf//3fx3Q6jcvLyxSxU2cF2CEplRntaKQK0lLnR16qR9rwcdZ1nSJgGv3I2Y7KWsH2mANyrdqt6hswP+awEB3Xd7vdWK/X6bj3i7ZyEwtNJXHwYLxKml4HcnL7y9VHPaTAIF+1HfVBPrseVBbyLR1zcnDgVR0xdpWP6kPtxev2icSxiZvqXBeX52zIbYxjucid68EnCnqcCQg24td4G4qveszr9/4d64NPrDQq7ph6DM/bcrq0HPl0OTKiiS06rpYjW478XBy5qx/e6LQc+Tw40nWo5VHZUzuiwtCOE105Brp6voIN9fOnA9QBq3Ep2Lsg1JG8nxqVorhgVZEAZY6kvPi4u91urFardJzFvA4yKgf67SSMYW42m+j3+2kXpP219+O7rwO5dLvdlNNeVfsXM/b7/Xjz5k28fPky/uEf/iH+/M//PJbLZcpHV2DlepWzj4++QzYQDy+A1HQMrtUoSi6SGXEAx16v17A5jpNTr5EPnaTocW/bSUSJ0OtTu6B9/ezOrLIiurler2O9Xqcdr1i/4GktbudKUppKARCrXSJj1QHy5zi7ajFGnSA4gSro07f1ev3AXvEF1yPXqL/55IzrdcxaV44otK+5yLB+VwD1/up3+qO+7tilUUytX9NPdAzapj6RPiZrxUfFQi257zpO2qX/al8uzxy5tk+3fl1pOfJpcWS/32u0q3W0HNly5OfkSLW7liOfD0eeuuF6tG3hNQJGpxGMd4ri5KF1OZkoaOI4/MbjeD9XBelgGnF8Z5hj5+jvCjYRh8fh/FeD04IhQRr9fj+qqkoLepVY3Cj5nJOl3/kPh8NU5/7cJsDpS/uUjBjD9fV1/O3f/m381V/9VfzN3/xN/Ou//mv88z//c/z7v/97ROxJ6M2bN2mbWwVKwEkNFZDq9/ufBML0zQ342ATAHY/f1YaoT/PQXe+6u462jZyQjxeNYvlEBcLgHLVf9RUHxpye1+t1AgGP3qkcsDP1GdYt5Ap9JmXM++lyVvthXJy73W4boRyVId+PAbaStEYO3Se03x5R10g1RY/l8MhtjJKbyKhc0Yfbag53dBzUrbZHW4pdTpraV2/Dx+eR2tyYc33yuvEZTQdqy88rLUc+TY6c0ffiwA8tR7YcmSuPzpGmL86lnpYjnx9HPsoNlxuFK0YNMuJhuoQ6DwPkOn0smHOu3N2qboueMy4VEm25M/tnJwUfC/X7nb2SYKfTicFg0HDW5XIZg8EgptNpI4IV8fCxr45DwVDbKMsy5ZOfnZ3FbreLu7u7oPtFNMeii4chGXZQ6na78U//9E/xL//yL/GXf/mX8dd//dfR7XajLMv4t3/7t/jHf/zH+O6771JKA4CrIK657+pUjHW326XrdTtVlb8+Nck5LoZe13UjvUKJgTFqhDNiv2OVb+OaczCNeijIKRGw0BdS5k/BFjByPTJWftMIH+25XWrOOeNhHAosahs6TupXvTCZcRmoXtSO1+t1g1h0HBQHe9WhY8EpX6Nu/eyRdtrOYYViCef675oLniMu75u3rbJEtrl2c2BP0UXRWr8CvfbPcUaxiPNU7toHx16VWY743Cba8mml5cinyZGTySRuZMyR+dxyZMuRj82RuyM80nLk4bwvlSNPlUd7whXRjLbwWR3DO+kOkvuu/7W4MWoffIGunq+gqec4efCfHG6O5UjJDUaV2+120/axu93hxYdlWcZoNEqRx6urq5jNZhGxBzh2NdL+U7f+Ua8CG20MBoMYDof7tIQVqn54p05OclEUsVgsImIPwKvVKs7Pz+P777+Pv/u7v4tXr17F119/HUVRxNu3b+Pu7i76/X5DLkokZXlIi6DOsmymMXC+ThycKABqyEKjUAr02Jo6DEUJQ/VMuoHuKqTy5TwIQvtN2Ww2jfeo8AcJQihqh+ymRIEcjkWe6LtHdZAd8qA9HadORLT/PvGjPq9TfYVUDo6rv0JuZVlGcb82ooiiUSdFZazf1YeULOmrRit9gqIEz/V6nUbatE6XbY6I1L6O5bKfwiufzOUA24lVi05aVKdaJ3X5ccU+HWuuX9onHZe225afX1qOfJoc2ZWx6Q0OpeXIliM/B0d2Op1Y3tfHdS1HPg+OPFUe5YZLlaOg647njkpRReWErnefOlitT43OgYBzATF1No38qKFp3XxnrCrUnICLokgLa+n7bDZrREY4ryz3+dX8Xy6Xqa8aSfAxY0zD4TAWi0WDtKuqil6vF+v1OuWOTybjiIjodLoxGo1ivV4nwhqPx7Hb7WI+n8dgMIjVahXT6TSB62QyiW63Gzc3N3F9fZ0IQZ0d59VImeoBoGQculhVo1+MUSMQlN1ul14kqQCNcwF0Sp5Ksjmdqd0gd0Az56BOnLShkTn6wTVKEhCigh39YwxqNzoGjUxruoXbv/fNQVtJk2MRkaKPGnGkeO677qqlRAXZ97r3ayOKaNiy6ssn8roQmXrVX/lTQPcImU8wlQA1Gq7+59HY3CRTJyx6ntpEjqRyxYneU15chx7F1z468am95qKPLqNj9uJ9VTm15eeXliOb5alw5Op+TWJVVXF5eRnb7TZWq1VERMuRUqfaYMuRv54j1/XBD0glbTnyUL5kjjxVHvU9XApyKnw6Q6cduPldBwsg+AJD6sh993pVwBibLu50otLcche2kkxEMx9dx881m80mtUV9eq6C8HQ6TZ8Hg0HqF7nry+WyMTaIh8jYeDxOL1AkKkRUjXrLct92p1Olp17Us91uGy9xBNgXi0Xqy3A4TIt/WeSrulWZUwf94/OxSCty1PdvIEfXj5M79fnuSG742BVRQLc72mGjEb/Woyf+G/Xrn0dN1C/UXpWUNXUF0lDQ1jF6PQ4e2jbfNYqp1yk4RkSDtJSsvaBfFjNjsx6ZpM3chJLPbMigMtEJiMoTO9FxOwmrjymxY3MuU353/audK8b57y5vPaa45gCu/cR2Ne3H9aw7tqk/+OTCbTCHhz5WlZXqUNNx2vLLS8uRT4sj1/ebYRRxeL8W/tVyZNPuWo58RI4Uv6Zd2mw58vly5KPdcCEkBc5chEANSwfiv+vdYs6BGLQLxJWpxzTC4ICiUQIenXu9fPfrI5rvr9A+adQKQ0GxCq4omNQFSAUH1WgVb6Zn56S6rlME7sOHD3FxcZEW3kIyZQFARyLos7Oz2G63MZ/PY71ex3g8Tg7DfxyPyCOgojnnbrBKJDinRlzU6KlD/3KpDf4dW1O5067qFaCk3w48lLquo9/vJ3nrubStoKvA5TaIPUFc6gMKQlqnXoutYms6Tp1Y6DXIDLk4UDOB0Cijyoo20auCh4KvykDXgdDXNHHYMc7mO1jUBlRvqmPts+fmq6ydAGmb3709xSds0oFc61LS4HfadH8+hkU6IVCMyUXXlCyO4ZgTEwX/UvKtquqBHjXVQ+1RbVknTt43x722fHppOfLpcWRvNLrvfKSbqeVyGYvFouXIliM/K0dqaTny+XDkqfKoT7gimsCtCnIjQZmqoIhokJAqkwFFPMzj5FpVqLfn0Rc9x5Wjv2nxMXnfvB9coyUXiQF8VAZE4cqyTItw1TiJdGiksSj2Oy+xyJhFm1VVxXg4jlhEVNU+xYJ+EunTNgAJ3a6Wdl32jB2S9v/qHErSHv1w4lDZKeh6FJjv9Fkfo6vdqaMreGne+3K5TP+V5Fx/3lf6oW2qromO8lllf8yOiDIxdrVhJQ69jrY9kqqA4WByDADpq0+aXJ70R/VSFEX0OveR4+Iw+fJxqt41sqmTNtcj/aMutweP2Dlm+DoDZK161olkDiP0N43iA9yKebmJq+tO5cZ/1mC43eq5Sp4qT5/4emrNMd2r3eikQfXmBNWWn19ajnxaHEl9k8kkFotFdDqdliNbjvy8HCnjwRZajowH/dXypXCk+4OWR08p1MFqcYDmvy8S9DtyPdfbcBDSNlCw5njnDNP7miMZjyQ6yFEUPLQNrZf/aui0g/Oyq02v12tE3Xa7XTbtIWIPpvP5PHq9Xmw2m1itVjEcDhNRHPpxcAKu6/f7DVAghYIoLH2MePjo2CM1CnpMGHSM2mcHd40SkVOsQKoTEKIVXAtxqvw94qRgro7nNqS25mCkvzM2/h8jLpcR7eukwiOWyFmje2rzSmB81mih+4mPKedj/hge/0HWmqNOnzWSx4LbHABRn/ZR/ZJ6FPzUl3XMPglTvahfefvarupUI3A5AFUbcptR/Soxqm5ywK11q535DU0uQqm4k8M8Pa6RQOzfz/HJN3Vr5NOjeW35ZaXlyKfFkd0jOmo5suXIz8uRD1OFW458/hz52Z5wqQL1NwfrXDQmd+0pR9f23Pi4FmA8phxtx+tWA80ZpjqnO6p/ZnwoeblcNlIKAPXtdr94d7VaRafTSYuGO51OuonSnGxy+amDhb3azr4PzW1WAWTNT0cngAjgf0w3GrFT4Hf9KJAjK6Ja7hR6vQKiRyA0IujX6h9gAABSnxKz6hIw0DQPJ0fG5RFJ768SsdqS1qXgo9E5XU+h0bucbLSunN25fR+b9Km8FEQ9UtTtdqOq9jt1uW8d+tR8QScTlRyQ0VcnX/Sci1Q5AahctChZqYyQreOWkxI+4mNUkuWzyjwXUeN37adGWBUnVO4+keMzdulpKVpfRKT1H+5v2odj/u11teWXl5Yjnw5Hwov1rk5PbTylq+XIliO1PAZH1lXzCZfKqeXIQ33azy+FI0+VR90Wns/6506tAvDO6ragqjD9nnOAnCHkzlWjdSPGwDRSp/2kzhzxqNLps/fRDX2326V8aJeVRlE2m02K5pGzThv85xqIZrFYRFmWDXIpyyJit+9HFVVjvOySw/jUoNWxGaOSx6H+5hvRNTKiRMMxZKdpH27EHOd8rsF5lUjoh1+n8ta/iGj0XaORSrAaIXTbUdtQEuM3QADZeR0+WVKSVVtR2yDSpHJUe/P+qk6U1Kjr2OQnF/lUWeluSeikEe2pH/ookWWPOmrkmD5rJDkikp/ouepbqg/FDAdn/ELtxK/3ftMf9VPkrHXrRCRXjxYlNJ0IOX4qSSADxqz9U6JVfTsWkYblky4dv9qETpbaG65fXlqOfJocmZ4eFs2JmftSy5EtR6qO9ZxfwpFsYuY+0nLkoXypHHmqPMoNV27Bm3ZOO0Zx54qIBnjxW+5OnnZcaU4Qeo0qwZWGAnJ1qIJ8PHqd98vr4DGoRmKIrnmhDq1bUzGIZCyXyyiKIu3UNBgMUlQFw8MYy93hkW6nOuTMOvjnwE/TAgCSVK8QSlEcopIOtK5TJwhkqVG2zWbzINqGvXG9k51er4DJ54hIu1cx7qqqEqHmSBTd81uOCLRvOha1UVI7fReo3Gd1dCUQPaZgriCi/dbvTibeHv3X8xUcNXrkkUT3jcDn41BUfj5RU2Dmd41O0wedJOnkBp1rf31S6ADc6G88xCT1O9rVSJ/atvcz9/1YoT/gEhFtJ0V8S6OFToLqTzmcQ4+Kg6o7JSGX1cfG0ZbjpeXIp8mRafzR3P5a15W0HNlypP5/FI4U93OfbznyYfmSOPJUeZQbLhSqitDOaKcUQJwM/LsLQet3pTkx5c6hr240x+52VRFejzstAAuYuRMqSXGc89UAvA3OZSGvkorKHMdjo4zVatUgkJAXHmuUUZ9KYKiuK67RPwBXZcQf9eccG8dQG1FnUDLgT989wuP53W6X3uPCNrVOIB7503EoaeT668TBdwdnrVvBQ6NTWocSjF6vgMJvDlRaB3pSW9EIppKPA4n7lH52EHHQdZ9yAlDC2Z/YJAtfm6B1OPirXnxXIuyMCK72wycn1HkqlUGLf8/hlYK5ygsZnppU59pQ4vLx+KRL+6w45jr1sZFSpW06IWpxPPaJfVs+vbQc+TQ58pjvtxzZcuRn5Uh7UNRy5PPhSL0h9/LoKYURkRVe7vdcZIfrcgCbIyAtSgh616vGqVED2s1FrmgTwNf69RyNKtGm90PBi/Mhn/l8/sBR1Ei1n9TBm+7ZNYmInj5OVcPUPqoT6uJMQFg/k4+rgKMRLwfWHAlrn6iXPilxa+66EoD2v673efZ3d3fpN2TAtdQFCZFugkwUcJCF6hsZaLTE9a0TpmNEoI7skRJk6ekcmneuclUi0ImE2reSIzatNkT9ERGr1arx7h4AHoDUa9z2fNJ4jHTYpTAnOyUk0id80lHXh7ULGjnziCj2leurytNBXe3aZa54o3XqRFQjoLkJk/5XG/DfdYKgvx0jfdW9nq+RXG1LI3U6llxxP+eY/m/LLystRx7af0oc6QlA6ustR7Yc+dk4Uj7rzVbLkc+bIx/thisnfP3OedrRXHRCwVsdVx3EDcQNWqMJGp0pyzIBpF7vEQ9/VE4/tD9lWaZImYIB56sjaJTO84sHg0FyZkAvR5hECBU8/JE/eeyMwXXCGPWPPtJvNW6/UVND0xQJBTuPLPnEQeWg5+vb0709xtDtdmM0GkW/309P8IqiaMhU69cXayrYluU+79nH6frVNiBgB2kd2ynZ6dhoB/tRgOA7fy4TndggE9Ym8FLP3HqA1WoVi8UiZrNZLJfLw2Lx+vB+GsbpkUcFTh0TgK32CuAOeoOGfJC9Rug0DceJkTb1vSXs8IS80XtukkDBt9SmHFB9supyRgZ6Hv7lMvFJaA5fkJv2lcgax9S2tQ4l5xyp6zjUBrED7bO2lZOJRiCdKNvy80rLkU+TI7dMmqJ+gAEtR7YcSd2PzZHR6T44r+XI58GRp8qj3HDlQP+YoPnMfwah7xOgDr5rpMiV4KDjTuEEQYRCDVONwxcl81vEHuwxBP3e6XQS8GCsRJUcVCA36phMJo01VxpBUYfVKNNgMGjcXdN3cqxxXvrEeRQFeT4rYfrNmeoFGfIOBE+Z0AiDgqoCrho12/OqHVGv6j9iD5zj8Tgmk0nc3t42rjv1biCAS7f2nc/nDXmo0zqIIWMIRZ0QXfpkSv84pjY7mUxis9nEfD6Pfr8fk8kk6rqOu7u7RCSDwSBub29jPp83FsQiD17syX/fQQvA5Q33fGZnL+pZrVbJxtRO0VdVVQ+iQ/wVRXOhL7bc7XQj6v35vIQbm8TeIiIt+MVm1G+dHDy9iHUW6qO6BkT9jv7qAnwnONWPykIJArnym8pNf9fF6qp7tTOdAOn12JhHOzXS6XblhHCMMP0z5zleqzxcjm35+aXlyCfKkaYn6m05suXIz8mRa5nn8863liOfB0f6MS2PcsOFwJyM9e6PDuu5Hs3AqakTR9aB59pmoFovbVIfBk+EQO/qtT1VoEZVUGTEIcdZ0w1YZEpfMKherxfz+TyBvRNqxB4U+K+LIAGv9Xrd2PFIH2+v1+vodrsJWHVsTWCvHxxDVgr+2j/VC3JDDmqACizoC1lHHCYAOBfgz/+c7QAyfNZtdzk2nU5Tn1SfSura7nq9Tn/dbjd6vV7KbY+IRmoFZInOAGaNjuqEQ8ndiVr7s9ls4urqKjabTbx79y5NAObzeex2u5hOp2k8l5eXcXd3F2dnZ1EURbx79y4Wi0UMBoMYjUYpkqlEyRhUb0RKt9ttLJfLB+8igSg4D1JQH6VutXGihuyGSdrOfm1EpBuu0WjUIEOdtNGvuq6j3+835Lvb7dKWz0qQ+JtGzBkv7yjR6Je+t0Sj9/RFfcF9QHFEcUx9VH2ZNCQlCJWx+j4+55E5/U2/6/bBuf9KUscmjE5E+l/xWtvWyF0Og9vy8dJy5NPkyMPgmhOwliNbjvycHLlcrtI5BAdajnz+HPmrb7gwkGMd+ZROAMzqlNTtA3Wj8rvV3ONejquzeJSA8333E4B6vV4nCqw1ugAAIABJREFU4FHnBdxRNMbKeDqdTnosro+FdVtbAIxH3kVRpCgHJMT2q4xVo444j47ZjbkqqsO28FXVMCwlcSIfEDnRCQUfvUbr06ibGjBOrzsq8Z3rVO/aPnLRSCHyA9wUONXR6SM2QooAEVMlU+QFWZK2gE7QsxKkP4p3nwAk1fYiImazWSKP3W4X8/k8lstliqgR1fvhhx8a8h2NRik/vyiKuLy8bERyqQMbUdmrH63X6wd+wEsZKZvNpmFz6lvYAePic1EUMRwOG/Wo76NntVNNEVLwBlOYYDEG1QF90+i2grNO4HQyqCCuEUoFYL5DREpoapuOSchU/UPloIvXdRKGfTg5uY40ku4+TFFc41w9RwnRP3u0XW2bSUFbfl5pOfJpc+RekAeuVB21HNly5OfgyNXu4Os6wW858svnyFPlV99waQf10aEep8N0RoXFIFVBqmi9G9W6MSo1bhdQxCEyFnHYQYTfnGyoT42GR5ekNLCtbFmWaQcgojp6d03kp6qq5LzqJET2ICqiOICJPoKGrBQk+fMoAo/EFWw7nU4U0cyRBuw0okb71KVtIhttR8ncQUJ/08/b7TYtaHadIleNYrhOFMhU1/oYHNn4o/OyLGM4HMbZ2Vmyt/l83rhGbQRQp45+v5+iZT6JUJnQnk5qkO9ut4vr6+sk38VikcbIJGSz2cRyuWyAPtdfXl5GxD5VZ7FYxGg0iqraLyzXhb7UhcxVjhp5pK+AuoIS41HSzdkPuqqqKm5vb2M0GkWn6EasIoooEiFTiE5qNJT+0BbgzW5butMSciWCpxFtB1WO0UfIBSIj6q4TSvqhdqhypC/ad17OulwuG+sYeKeH2qtjDO2q3Six6nh8so3dOXHkongqUyVBxV2K+ibj1CheWz69tBz5tDkyYo9TfjPccmTLkZ+NIydnqQ7lCkrLkc+TIx/lCVcORFSpfveoys3VpXWgPH5HeAqEKF7vvFUwCjjUBfhrNEAjdtQHSLHQMuIAnLrol0fUOKoCBI4DIUE01KsAXlVVyhfOkS/yIzcewjk7O4u6ruP3v/99nJ+fp/qT8fJmcwF17SPAQVSRdomcKViq4WuUSwFKAY22AEfXOTLyiUOn04nZbJbqp6/oAEf1tQYKOl6wFcY5Ho8TgK/X6xThm81mMZlM4u7uLm5vbxNBK3kyISC6io1Qh+pH7Z6/xWKR5MQ40D2yJ62jrvfpBPP5PMqyjK+++ir6/X4sl8uUv45fAPx8Rm7oVbdORocqLyYyEYcoHn6lgKKgFxEN36o7zYWnjF8XaEMIEZGi1T7hoV58hPZpq9frpbGiW/qm/RkOh402sCkHbCcRnSQhA+pnYXpENMhSJ8Hz+bwRkVMC0kmc2gW60kmf4qFGVLVvyKEoDjufuU+oPjw6R1Gs9uO03ZZPLy1HPl2OHI1G9wJobkzA+FuObDkSHT42R6JbbaflyC+fI3O4TXmUJ1y5O0xVkgK+Ak2OXPRuVuv14woqGo1A2E5mg8GgISTNf+73+7HbHV6GCJihEB0XAIFCeczsTkW/qNOjjIPBILbbbYxGo2TkOD+gwfh8vLoVLTm9t7e30el04vz8PPUXY9rtdof37NWHqJJHNwBYxgqIIRslc4+0Kcg46QB0qksilkQqB4NBLBaLlDO+WCwSQOMcOmngM5FAdKKRJp1g5KI8GolivB6VY7EtY4DUSE2Yz+eJJDUyBAlEHICClBmN1Cnwo4PhcBj9fj8Gg0HDfrGvwWAQ33//fbx+/Tp+97vfxbt375IN9nq9BC6LxaKRloFt6mQPPRIx0wmaApgCm0aDkAVjfeB70VxzgjxUn9g37RCVo2+QKmPYbDYpqkcdObDXSS56pt8Uxaq6rlPKCmPjenRaFEU6pyiKhCuKGyobJqYaVVUcVKJgbEosnKsyVL9FnvxXn/Px60RN/VQndBzX+rkWvbTl55WWI58uR7qe9May5ciWI9WuH5Mji7IZuGo58vlwpC+p0PJoa7gU+DXy5OfpnTsg5v+56wekcECMMOLhxI72SUEAHHAyVxqOQf0KBNvtNjmvEh4LU1m0iaEAAuqoSkIAB8AE+GAkPDZXUNe26S/9ADjpkz7WJz1Eo4hVVcVmu2nUhfwVmJGR61GjHOoEWocCBPJW+SDj5XLZ2JaVx/0R+7xxyLGu67T4VevQlJx+vx/T6bQxJiUy/qNbtVN1GEgU2wL0h8NhLBaLRjRKdYjcF4tFYxvZ7XYbHz58aERVcGJIFLlBGufn542dlLB3zkWfEYfJyrfffhvD4TC++uqr+O677xKBoUv+MwZNv1DdLpfLKMsy5YIvl8uGXJCH+zvgy7bNTJo6nU7Um0NkSBfjayQe+2bRr4Itdok942/YFqCJjDQyjHw09x3bxFa73W7MZrM0qcNXiabr5Fb7Sts6cdRIOH1WfImIhCe6pbFOtrU+jbzRZ/U7+srYdDKnn5msYTcewfOJrvqEkgznt+u3fllpOfLpciSTYL1GJ5ctR7Yc+Tk4suoeghmDwSBWq1XLkc+EIxWTvDzKLoU4OB3Vu2mPIimhoAgM1Z2d4whTHzVyjgIdSqcelKIRQwVLBXyuw2lVIfyuxovSeVdFRPNxMuSiURnaJyViOBw2QJzj3W43BoNBAqfdbpfkwfm0jRPRP0iKtqgv7oNndX1YrKyO6capDohReSREH8mjc9UZKR3oij5qighbhgPKu90uRdF0Ia5Gjei/ArTqU3Wsutb8XE3PoL8acSFCRDQNub58+TKm02kiy6LY78KnEZKvv/46ttttfPfdd7Fer+Pi4iI56Gw2SzalpIWM0CVAjY6QMRMPJg5v3ryJyWQSr1+/jh9//DEBDtFG1a3ayXq9TkROexqB1DUVCmIU6mKTDLXPFCHaHM5V0FMMQPZeP36tkUeIGL9XUvNJjeJHp3N4ezzjI58cHSsZ4MsamOAz42ZiofnzekOiu2Bh505u+IDbLzrJTSjV/zQKr7KkjrQ+xSbqnKckxhhV52AShHOKSNpyurQc+TQ5Um2aGwr3l5YjW458bI7cyaYZ4GvLkc+fIx/lhktzM7XQGVc4nSLvVI/ro1l9tKzRDAULvasmAuCAqHfcREP0cSjkAGjpnfVms0kRiOVyGRcXFzGbzVJ0jDEADOy2pBEIojSAYL/fj263G/P5PC4uLmKz2cTd3V1EHF4mOBwO03hwbj53Op20+JPow2QySU49HA5ju92m3FmPTKuRquFqtAAC05x6lS19RQfoSEEFZ2bcrA9A/rSpqRg6PurnPPoJuJIfzjg5zyMPamvYgkakADN0g/PP5/OU/0wd8/k81YFc0lOduo7ZbBa9Xi/+5E/+JN68eRMXFxeNx+aTySTJhP7NZrPkR4wPn0KugCdEQtSt1+vFt99+Gy9fvmwQ+G63S1EzoqZE5QaDQWOShN6xa/50gTEy1UmDR622222SVxnNHbU08oMvq0/7pNOJhHpZJ0A92AXy8ogjOnLCqOs6JpNJ8jdSNJTIsUP6iL+hO/pCvfrEAUzSqLVOgjSlSifeREOVhClOuOoTXK9PKdxvnHCQc674BFPJqy0/v7Qc+TQ58lj6T8uRLUd+To7cCIzCHS1HPg+OPFVO3nAVRfE/RcT/Xdf1TydrkU7hqHTA7w4ZrDq8P8oEUFSg/I4jq5B1kAwa8NDIoC4IhAyUhAB9gBOj1RzpoihSNIfInToaDkb0Q9+xoBEewIr2SRPAMOfzebpWZUAkAgNnfNpPxoizrlarWMchcgBQeE63tsVY1aA80qNExLk4vJIP9esjcXRMdAO5EFG6ublpRGMYG7JF9rPZ7EE0jrr1EbJOLtQusb+ISCQc0XyBIO8zidhvK7vdbqPf7yew0XSD5XIZP/30U8qb73a7iQCQAe+cwb70MTUkq2sjdEJB1I71C+Tx//jjj2liQd/RhU+4AN71ep3sDtlrBFH1haywce23Tk6m02mMRqPoVt2Iw3szG5ND2tDfiERXVdVIO4E0NZqFzWLb6NZ3OtNJzWAwiNlslvQGYQwGgzTRY2yOQ/Sl3+8nDKqqKkajUVqgrRhG/RGRrlFsUtlpRF5lpLiC7HW9CP1QYkCPKgPVl0Ye1S+oiz5yzDH5GPH8Zy4tR37ZHKnYC4/oU56WI1uO/BwcObjfpbCOww1Xy5HPgyNP8eTJTePruv4/P4VIdECqdDqvkR2NQCm4qhCUSPjuBMJdMREcjUZoGobfhVIXhIfjRkTagUcBXh/bkt5wdXWVFhETffLICxES+tPpdPbbZd+TnEZ0IiLtmtTtdmM4HKYooMoHo9XIE7nHKJlcYcgG+ZTlIVJFRJP+Ih+IQo0UB4V8VNfUozqmDl20CahpNO7Fixdp8TLAu9vtX2qoL6jUCQc2AZl77q2mFfBZ/xgzclcCres6gT4RXMZIjjQLddEDC0PH43GKzkwmkxgOh/Hdd99FVVVJP/Rdt7LVaI+2ix0wRiY+EYcJg5KnTjQARkh2t9vFYrGI2WyWJjEaOaJ+XcCM32jb2BfjwGY6nU6a9JDK0SCLupneop91oqGP9tGR+yyAyg5Sepydq5CpTj49VYJ0HAi6KIo4Ozt7UK9O7sAJbBP9MaHTJxj6tAC/YRz0TfGC6GNRFGkROPLGJuiPRvCQlZ6jgA/Wqv7oO78r8agP5yb8H4ve/WcsLUd++RwZcXh9Be22HNly5OfkSH4rovmOvZYjnzdHnn5L1ycWBkqDniKhd4RqVKooflPC4DuC0DtyvTbi4AD6+FLJRRWld7kKhBg3j4KJfHQ6+xxlUhi2222cn5/HcDhMxkS/NdrhTjqZTBKo8zuOg6OMRqPG42PSBvQuXImJggPU9eFt8WdnZ0mO9AHQ4rPKNRfNVlkvl8vG+0GoA4fhjwgLbXHcnRr5rNfrmM1mKVJG+xqlYWzYF33SFBf6BCBEROqPpmSojTIWJ0UiRsgI3Y9Go5hMJiky3O/345tvvonBYBB3d3cpyqMvjlR75pE776fpdrsxmUzSd3xBbQeS0F2ZPMK62+2SvQKCADT+BklpGgtFXyKK/NQGPJLHeBQokROTDC9E0TQSqxNKbAW78jQa/E39DPuoqipN9pTo1C+VoJgEcD6TAGwN3erErJbofsQ+kss1kC5t9nq9mEwmifywX50E00/qRg+NiWBxiLBpJBZ5KoboZE1JWtOJmIhovYoDyF4xQuvV9tvy6aXlyCfOkWXR2Hyg5ciWI5EN5TE5MqWyFge7Qy8tRz5fjnyUNVx0BGPUwergAHFVOOew2I8OI2AlpNxAdLA4GG1ikBwjMsLveiet0UKNhpRlmR55cg79ZEyQD2NSYuI9H4Dk2dlZeu8AhSgJ//nMgkPNsdfUjbIs48WLF7FcLlMESdMVGHOv14vd6t44MlEsCnKGWI4Z0mKxaMjco3fIgHo4T50SwtPdd6qqivPz8wZxuv4BeUhIHzsDtJPJJG5ubpJTK1GqHRDZIscd29BUhRwZdbvdFC1ii14W/dZ13Vg70O12YzqdRlVV6fE8ExSAX2Wu4DAej+P6+jrlQfPIH31wrUZNVR9EJIuiiPF4nEgpornQnYmWRpRZI6HpHhrRcpDD3ln4mkinaL4PBB0iQ504IG/sRSNWmq+P/NQXVY4qFyVB9EwqEn6EHaid6mQEXQDmdb1/N41Gf5ExUdzxeJxwjQmP6p2ceM5B/4xbcQy7YPyKG8hOsRHf43qtz3FUsdgjdykKW7SphI9RWo58ehyZbLs+bMiADlqObDnyc3HkaltjUOlFwPhAy5HPlyMf5YZLH/uq0/pvGg3Q/ygDI+C45hirAendKecjFK4lArHd7jePIL3B3yCt0QHAh75BIPRRDYfFojgIClWyIsrU6XRiPB5HcR/NWK/XiThIzcDIaJd+sDgQEKZOPvM+Dq5ZrVYpcrTdbtNiZjVCjEnJWh1TiQzA5HwFFOTkxqbEgjxIM9AII9drvv1sNouzs7O0QFMjDuSlO+nx+ezsrBGR4jeNyGBPABrH2AVps9kkUtrtdmkbWiYCb9++TfplbLe3t420g+12m3Ks0SH643eiuEQ26Rf9jDjs4sPYIcz5fJ52EFK/YVxFUSRQWywWMZlMYjKZNGwEf8K2AFvkRjRRAYvzVd/4u9p6WZbRjU7E8rAWY7fbJVu+vb2Nfr8fZ2dnD1JH1CcgKSVlXYSveMI1HOM/YI4dsIYA/SpOQBoRkXyGl0GCG9SnOIe+lDSRFekrGl3XCRf6IFKt5KlPF5jcEpVFVoC+RrPBKScZ9KVRTyU4JRLqraqqscFBW35ZaTnyaXLknCcccZiAq523HNly5OfgyKJ7/+TwXoakgLYc+bw58tGecCEcJww67B3FuYhCaYqAGisKom5VnBuOkpQCmkbDcGgcn/ZZGKlkgmOiPJSpkUjtj4JlRDN1IqK5CFCVqGSm4AwJ+kJAbVONW/ukBsiixftOpSge4+QaNXqiN0RuElBI1FL1Bwji6JpLqzIlSqTgQcFWiH4QOWOh6+3tbapbt5lFNlVVxeXlZVxfXzciKxGRHn1TsEmcXkmVNgHkXq8X5+fnaRGuRgEhmrre7+jDo2naXK/XKfJD9AxbVEJV+8EWZ7NZ+m21WsX5+Xl0Oocc8Kurq2Sj2B3AxCRK0y2I7mIfEC92DVmjb01/0tQBXTehkyh8tNPpRLk7+OJkMom7u7tEuKT6IIvtdtvYWSoiUqSMdAQInyACEzL1N2xYvyvYElG9uLiIxWKRztf0DWxWSZa69P1FRB81cqkROsau/sw4kDnn+0RRI286JiYUeq36jkbi1Z+cPLB/x0/kpbilNwGc05ZfXlqOfHocGbFfw6U3HspfLUe2HPnoHFndv2srIsbjcYxGo5Yj43lwpAdXtDzae7i8MXVmjUrhHDgI16jQ9XyUzHF9FOokojcZRJ663W7jKQ+O5pEfVag+mtT2OAax0D/q4YV6+lgVB3TyAWA1GsJ/ImkaoVDHoF4nDQBpsVjEcDhMkb1+vx/dXTdiddARslTjpD+keETsFyqT48o5kJ8apEYLiqJoRNlYsE2+NQuvIZder5eIjOtURxH7FA0cHJ3zHcD/8OFDnJ2dxdnZWcp3p170PxgMEijRP/qPbgEMZF6W+0Xgd3d3KZqjAL7dblPUD/3T/4iIq6uruL29TakL6JBxMBb0ji8ApLShdd3e3iZ5cn4unWc8HifCq6p9DnddH7bmRW8adeZ8osIUbHI6nSYbL4p9mgAAn+z1HlABQUibPhBJI0qM7WmagD6lVYxwX9KIJ76lUX6IA7uNOLxksdPppHaISnMMQtVdlMAPCJU0L8ZK/7BdxQiKgjv4QLoG42CMKddf8EjX2eQK7TqRYWfqV8hOo/v6xznU0ZZfVlqOfJoc2bl/agPu6SS65ciWIz8fRxaNulqO/M/BkY9yw4Ww9O6PTvidKwLUhYM6mBxQOSkpgHM+jq2PE7V9QIDHswpYAAuLPomAcOdMlABF6di0rzgM56s8MH5Iif7yvhHAjN+UnBT4lIwYL2PUlAvVzWAwiN62HzGLKMoyOkWnYTBKlBg6EbPRaBTT6TQuLi7i3bt3qR0lXZ0kIBPyoDmPsUBcy+UyveNhs9kkwGEsmiJChAoCLooirq6u4s2bNzGbzVKu8GaziZubm7i4uIjJZBI//vhjbDabODs7S9E+FnRWVZWioxCP585Dgujz5cuXsVqt4ubmJpbLZQLiTqeTCBiwRm/k0fd6vSRT7Fl3ktrtdmnBL33QSQypBpPJJIEb0RzsGRKDNIn2EY1EH4BnVR22O14sFo3oJuPBT3SCh81AWiyWh2DI+deCL0VEepGn+lW3u18kSy7/1dVVDIfDuLu7S4TEug7doQtdat+xvdyEFr+BMNE5fsSYiGiqH6EH5MGYHGu4Dn1QP9ihmIXvUxdpH9omNqh4gy9Rt064HZeUKHzi6n3T8/QcjnmEsC2fVlqOfLocSX084eC6liNbjvxcHDlbHTbN6PV6Kf2x5cgvnyNPlUdLKQSE9G6VY+SGq5BHo1ESKEahjx4ZMILGaXRAGrmCqDQao8SmkT3u0PV3HIE7fxYyMiba5zpAkXa73W4CmIhoADqfqUMXNwLMmleNwRMt0XcsAADIRxe8avQBkozYO9ewHkT8dP8+h24vGSwLjqkPWRRFEefn53F+fp5yxt+9e5fAEBJUYyRCRxQOGaFH5E+EkBxo6iQKpgQLEZEaMp1O08Rgs9lv7fr69esk+/Pz87i7u0vAEXHYqpZH7Bq1wbY0YovOAbvNZhPffvtt0g3ghU56vcMLB9frdZyfn6dF00Q+iezpBMEjUES1PfIDOUyn03j79m3U9SF6O5vNGqkj6PHVq1cpnRRyByjxR0jQI7PYkQIKOlcgZ2chSPfVq1epvUF/QG+S7iByZDCbzZLOqQfi7ff7aREzbc5ms5RKQwRYdYA/46OKD9hBp9NJ7eJLSjbqg6p/CL/T6TS2o9ZJGTaBr+tnjcTp5Bv7AEP06QJY6BNSnbDtdrvG7lnqx0oGSlIUbDBHMoqzXI9ftOWXlZYjnx5HriRtkHVdPLVoObLlyM/FkTHfr5Eq4pA613Lk8+DIU+VX33Bp4xoJoxNEJxgsAry9vU0DBDggFVUs10A2FJTMORR98RoFIyDvF9KiXu3v7e1teoQPqEJSPGqsqiqBMoDIIkM1qIjD9q6QjPYLYPU7cSVZJWM1Jn2Rod7ZAw44gxJERERXIk39fj+m02na1UeJl4gZ0afhcBhfffVVSkmAePVRc7fbTZGZ8Xgc/X6/8V4mJWYFFrUb+otDaTTl4uIiAcX19XX0er24urqKXq8Xl5eX8eOPPybne/v2bVTVPl/9/Pw8EQA2Qx+2232OOS/oY73bxcVFijytVqtEDvSJR/48wsYH2N1nPB7H3d1dIkSiXEqmEKBGOsuyTOka9PXDhw8pP36xWMTXX3+dtmsF/Hk54W63i/F4nKKIFMZJXjXrD3q9XoqAArIAtfstUaSqqhJB93q9tNh4OBzGdDqNbrcbL168iPiPiKI47PY0Go2SX2vKCbajZHt9fZ0mMdjH1dVVwozhcBg//vhjg0DQMRMPtemqquLi4iJms1kicOSvC7Y1OqYgiq3SPum6agNgBuPwaBtjBb/op7ajOfEaUdZJN5FtJguMDx/TaBz2hx6VEJQw6GsuYqnErJPetnxaaTny6XLk2iZcbAyhsmo5suXIx+bITreZct9y5PPhyFPlV99woQCNutEwndC0CG4EAFEATt/pgAL4Ayz5DUBBuDgUfUBo+jhY79D1PO0vTr1ardJNiYIgY9DHmgCIKg0FaPoAdeidPlEJDIw6SR2IiAd9jThsCwoBMS6VA1GTAylH6j8yZ2eeu7u7iDjkgCNnFkDe3d3Fzc1N/O53v0vXEd2EbAAW6mZdAI/0NQqpxg0w8J33OKleIZRXr17FZDKJd+/eRVEU8dVXX8W7d+8S0azX67i9vU16g0A1Uswi1KraL0zFeSaTSWy327i5uUkR3MvLy1gul41oD9dpNFltQyNQRKFID9FoHgtsSc2hfYi3ruuYz+dJvjg2+fXobrfbxZs3b9LC28ViEefn541ouUYjR6NRip6/fPkyRWT7/X58+PAhyV1JV4GOfP/z8/NUF2Ol/jdv3qR8a3yBHbiInjtw4ytE5m5vbxOx6nbOZ2dnqX9KzjoRxZ/4Db2wyHs8HqcXXZZlmVJJ8DlPgWASx2J22sAelHiQt0fvNB2D48iC3/mMzukLNqQ+j43zp/1F5ooL9AvMzj0N0T5zHudqJLEtP6+0HPl0OTJFuONwDjdEES1Hthz5eTjy+od3yVbhpJYjnwdHniqP8oRL7xhpkP8oQgGqLMtk0BGRFhhqXiZ3/Z5qofUTvUKpGjlUslGnR1l+t46SqGc2myWA4jyAiMfykCLX5wgLgijLw45OEXujUEDWCBCOTyGPua7r9GgbUkAu3NHrVqvIYP95f0yjEIAAC0vn83l69KpGrpExQENfWjibzdJjZByLfqpDEXEAkNQ4z87OUptEH6kHZ/dIx9XVVVxfX8fXX38dl5eXDVv65ptvUr43ETJemPnu3bskB33cTIQNsijLfZpJURTJHgAuiMgBhtSF6XSaxnp2dhYfPnxI5MwYt9ttahsCICIEGaP78/PzBKa9Xi9evnyZdshCd7xolEkKBA8hMyZe9jkej+P9+/eJ1BiLRnLUZ/EBQBxwHgwGMZlM4rvvvku+MJ1OscIUWcM2WYyru2eyBTEYQAQU+xkMBvHu3btYrVZxdXXV2CZWX65KGg3RM2yN9Jz5fB7r9f4lokxSKEQ2dYLGegGO+85ZRVGk3aOYOBFxVGwBwxQXsHed5HANTyGUhIjqaqqWYl9Ec5tnnYQrtiiJ0BeNnHOcc/XmoL3h+vml5ciny5FJB3F40qUL7luObDnyc3Bk1TnssqcviW458svnyFPl0d7DpR2OaOaOI1gdGJGem5ubdIetd/cqGMBWCUoFoHeuXK+RM0CD9lFAxCFH0+/WWcyHwwC+8/k8AQmO7MCu0Qlkg8J18gmIeLQSB8KIAW0lr91ul5wROendNm1CyhBKWTW3zOTRLyCz2WzSjjXj8TiqqoqXL1/GYrFIfSESgkPzqD5iD+bkQi8Wi/T4lyiPRmhwVqIGjEffZh4RCYQwfOyHRbLUudls4vLyMjabTZqsMNGpqiqRy3Q6jbIsYzqdxvn5ebx8+TJms1mcn5/HxcVF7Ha7BskCokRu5/N5nJ2dpUWrABS2OhqN0qRjt9unnrDNK/am5LVer+Pu7i6220N+PtE9PmNntPMHf/AH8eHDh1iv13F1dZXSOSaTSfaRPTLFl66urmI6ncaHDx/iN7/5TYooQtj4JLImYs2EAeAm8kpuPpHvbsmOToedwfAH+oFdlOV+l6S7u7sExETuhsNheicJOtZNOYiK6gSR3wF9ZDmbzRIO1v8QAAAgAElEQVTwQ3T0Gz3QR/ADPbG2wUlGc+TV1hgj48sRCjLFtrkeO43Y5+Qr3oEN+LtGjxUHmLDTF35XjKZeL+gfndEu+NaWn19ajnyaHKk3NDrBajmy5cjPyZHj0TjZHHppOfL5c+SjPOEC6PWxHQPe7Q756QAuAgccUZ7eQXJnjUD07pE6OY8oiW9NiSNBCPQp4vBIdLfbpbtgBEeON+DGhLGu6/To9/3796l+xgw4YySAsq6nAtTm83lSGI5cVVVawKnGjCEhR2RAm4yXqA2yoz+dTifKe2DYbQ+LmJGr6hIAo47VahWz2SwRBgSiqRpK3kTdkOl2u00vyiNCAYERweLdHNiFvl9EdY8eLy4u4uLiIsqyjMvLy9hut2m9w3w+j2+++SY51GKxaKRzAKzdbjeBF7KChNFBWe7zpNX51a403xswQrYvXryIm5ubRDakEkREAjCNjqFTInZEcJAjMnnz5k36jcf+2M7V1VXKWUaHjJP+E8GaTqcxnU5TPv1kMokffvghimL/8krGDaF63jS+Bmjd3Nwkkul2u1Fs7xc6F2XyJyZdmmagk8B3797FbDaLV69eJbLHB8uyTBFM3jVDX6fTaSI6nTACzP1+Py4uLuL9+/fphpC0HCYSTJSYOOhElag7dpLWQ96f71FATaeiHr5rlI1Jp/oev/FfJ9CKtfiWPtUATxTT+E2xQic6PpHWSajqm3PpY1s+vbQc+XQ58qCkQ7qQvkuq5ciWIz8HR1adw6Ywt7e3aXfHliO/fI5s4IqVX82egG1E86WFDFaVgDB4NJrbhpOiu5wgvNwOTaQ2AOIUjGI2mzWib0SAEBp91TtWfZyI8DHUzWYT33//fdp6FOdQYmS8qkDN7aVvCnZlWSYjRtlnZ2cN4yN3nf7WdZ3el4GcGDslPSaHiOtDKgQOTZ0YGAAC+E+n02Ro6Pq3v/1t3N7exvv371OaBo4bcdh2tKqqBLT6yJxH2PqIeb1eJ7kSEdM+YvDD4TDG43FcX1/Hn/7pn6ZoHGRxc3MTk8kkRSM17xvZLpfLuLy8TAQMqEMk6LTb7cbt7W3Sma4p0FxyBS/6GxHpZYabzSZNRCDY+XwedV2nRdGLxSJFg5hkQLLYDXKo6zpevnwZL168iLdv38bl5WUDrIgE0jfs+P379/HixYv4/vvvYzwexx/90R8lsiVaHBFpO2LGx4RqsVg8eDFnv9+Pm5ubhg8Puof3bujidXL90RUTj6Io4vLysjHZGY/HKZ0F/4bYsC2Ijc+9Xi+1hcyxKfya4x8+fIhvv/02Xr58mfpOfRGHxbQsXtYnCxHRWHsBgOsTAPTkUTsmafSZCSdjxLd1UbNOHrV+jdpzHH2Dj7Th/dNcd/8NDKFeZKqTz7Z8Wmk58ulyZJrMxUEH+HjLkS1Hfi6OLMvD1Fs5ruXIL58jT5VHCVcSnaJjALYCJCClhFOW+8eOLLDvdDqNrUK5Vu9w9REkhgiI8S4DhEBuLu3xWBQnwElRNk5CrrxG+tgNZrFYpDQCInIYB3nAkJqCvhoIj3yJ0tBvdjYCFIhwQVIQBwatj+XJXe33+ymiA1n3+/3ozA8LjNlKFCOOiPSSPxx1uVymnOnJZBLn5+cpisGjf/LINX95t9ulRcE4G9GkzWYTf/iHfxjT6TTJkagQ0TnkivNzLToDXG9vb2O9XsfZ2VmKBgLwjIGc8MvLy5STrLZGW8gOPVxdXaWJxWQySQTDjkzIkfQC7Bryoe/oApsimsvkibFQP3nxTH7wqfV6nd6XglzLsoy3b9/Gb37zm9hu93nuTr4KGAD2brffoenFixcNooC8Ly4u0uSBFAtsFn8lRYGxkLN+d3eX3mvy6vI3Ef8toiiLlEfPblbI67DO6zBZgsQmk0l6v83d3V0aCzZHSsvt7W2yP+RKlPrs7CxNjGazWZoUIGMImokQemIbYdaOINu6rhtPjyMiyUNTurBV+oO/6mQX3OF6j8pHREo50kggddEHwD6XEqYRNz7r5EInGdiNRhU1igi56Bja8uml5cinyZFMHBVfyrJMa3Bajmw58nNwZNk92B07JrYc+Tw48lR5lF0KFbARIh0gtxpHj4h0p6pRAR4ns5vRZrNfSNnv73eGAVS4idD0A4xA28bwMDp+RzGqFMBrMBjEbDZLBn1zc5Ouj4j06JbcbAogERHpbhlDHQ6HcXFxEb///e9T9A1gqes6RV0gGPKrVYGAPiC53e4XnkI2OP1ms0mECVmV5X771vPu64j/NyLkbpwoEYtM0RF53+xQwyQaRyHyQc45uup2uzGdTuP29jaREH1XsgbIIyIBZ7fbTTs7QaT0n2jedDpNUS/A8ebmJjqdTgK4s7Oz9PLK5XKZ5IBNkZNM9Oj6+jr1jbSOm5ubBIQ64Tk/P4/Ly8u4vr5OwIGtRER89dVXKfpKPjS2wHgA5bu7uxSpwj9IOeAcjvPOkuvr68aLMDn+1VdfpVQHdArBAA6z2SwR/X/8x39EVe23gKUP3W43Li4u0rgYE5MZjdYRKYUs1+vDe1VIt1ku9qRL20S6selOp5PkQT+x8eVyGbe3t2mB8/v372O328XFxUXyC2TL4madXIIXX3/9dUpbUpuAGIh+Y4c8USDPXuvUxbgaSUTe+BQRN7ARX4qIlCKjE1jIgz4hZ2xAJ5eQBrrRpw76pIOCv9I2uOkkohNwf9pCfxlne8P180vLkU+XI/v3/bt+9V/i//hf//eow9Zw7L8cJm5R3B+MPZeWZUS9vyr5zf01ZVlGvdvtf7s/tr8uDnVkikbky+J+17Sopf77i4uHL1wtoohdLWsF633Qi+vS+OiP9uV+HHosTSLr+4Op6aIhq91/bU5y+cx5/E8YI9dHLbtEFkWSt9aldTb6mrr+8P2AjL8sy1S/9x1ZNK6uI8qqjHpX30ueTVXq2Fm/vD8P6pK+1Ls66vr+ybBMzuGzliOfB0fmbJHyq2+4eBwN0KlAUIRHDxA8IDwcDtNEGwPb7XbpBX5EgDqdTtzc3KTcS72jxBgjIk38iNiRp6sLJbX/EMl2u21E7oqiSHfPvOOBu3EIMgHjPckQSeMY7yUilxjgJyLw9u3beP36dYpCMvnnHIg3IpKciUBBwkREzs7OGtdcX1/HaDSKd+/eRbn7Jl5ERFlWybGVgLmWvGUmzkymkT+LK7mpQO+84ZxIELqk3+RX49xledhCVSOUkPBms0nRSnKt7+7uEhnzDow//uM/TuTM2N+/f58im+Q0TyaT9I6Ju7u7ZD8XFxdpS18ilkyCuKHgUTMTGiYi3BiyIxCO/P333zdu8NCpplEQzSQlAVuMiHSD1+1246uvvkpA8/r164iIFIHjHRlEMH/66af0vg/GwGSo1+ulHPTRaJQmdy9evIi6ruP6+jqlTaAfIpXq0xGHd04QsSqKIu2AFbEHrsXykPaAf5FSgD2ja84D7LGLb7/9Nrrdbpyfn0dEpIgbKScQ0t3dXfIBJl6sBSBapaCP3TPR1acOyI4bT+THjmvUga3e3t4mHQP67EqFHZPugc0RIIDgdF2DkpESjGIe7TNZRs88CUB+jAu/0wi74h9jV1vUm3eOER1uy88rLUc+XY788eZmL5uyitXw7L+LPTzL0sLCLy74dMuRz4MjTz3p+tU3XDxm0w4CtHQIQTA4Bjufz+Pq6ioNkEk2RkV0nBsCiIh2Iw53yBjZZrNp5EAzyeVFjfSHnZQiIi0YJI+YyT8TVcgBotMx8hkFowjyTzE07rIHg0Hc3NzEy5cv000D0Q0enS8Wi7i4uEiPW3lHBm8RJ41E0wh0u9nNZhMvXrxIj5uJEkREWtirW37yQj+eRL169SrevHmTno4x0RoMBnF3d5cm7Dgwd/dMrnWhbFmWKSobESntgggFT6F4csJ46rpOT5VYlAyZcv2rV69SNHe1WsX79+9juVzG7373u6Q/IjHckOm7OGjz5cuXMRgM4sOHD3FzcxNff/11eiLEEzVypYmcUrhJjdinZ2hUqa7r+OGHHxq66vf7MZlM0s46TKqI6Eyn05Q/PRgM4vXr16n+xWIRNzc3cXl5mXaXms1m8e7du/S0ivfEAKzohV206Bf9YbLEE0YmX/SJCct2u427u7t0DEAEMK+vr5NMut1ufPf9D/E/xoEkNOWiqqq0CJenf3Vdp8ke/k1knptzfA0grev9DmaaesIT2ohIN/zz+Txub28T7tAnblqJ6HGt4xnH1O+5IWY3Nuqg7bqukw6I2lEPtgxugY88hdCnzJBmWZYJUzQNC0ImwIDPkTZCYMQJk0I9njIBjoF3jKctP7+0HPmEOXKxiP/5f/tfolNVMZ5MYjgYxGg0itvb25jOZlGVVYxGwxjdc2QRexyeTCbx/fdv4uLyIob3vlpHxHAwiLvpNL569Srm80XcTe9iPBpF3D9VGQ2HUdw/+drrcBVlWcRgOIzFfB51RLx6+TLe//RTdKoqRqNx3N7dxnw2i+FwFJPJJDqdfQBwvdlEdZ+BslqtYjqbxfZeL7f3m0BcnJ/HixcvYrFYxHQ6jW+/+y4m43H89ne/i6qsYlfvYrPe48ZiuYjzs/MYT/Zct1wsoijL2G420el2Y3a/Y1/V6cTre46cTM5iNBrG27fvoigiOt1u9LrdB09wzu7fZUY62vY+AybqOn4vHNmpquj3BzGejGM+m8VqtY5OpzpwZF3H9P5lyQQBvvnmtzGbzeKnn36K+WIetzc3cXF5GePRKGb3mTHdbjcuLy7j3Y/v0tOObrcbnftgatXpxG9+85tY3NtWHQeugVeKoojt/bXL5TLmi0V0qirKqorlYhHre37rVFWUZRXd3h7/o65js9lGp9uJ3ZY1mEV8++3/t3/6NBi0HPmfhCN/9Qpo7vroCHfE3GVyjLxqCKaqqvTOB11wB+BOJpME5Pyud5MIHadjog8JoEQm/CgWxyE3fDKZxGAwSDcO3JVj+C9evIivv/461UuEDMX+9re/bWwDSl447+DgKdBoNEqPf1+/fp0m/KTzETHgTpxUOYyoKPa7+jBWnlAgJ9VHXdfx9u3b+Omnn1IKYFkeSLhTdaLe1bHb1bFarqPe1VEWVey2u+h2erFerePli5exWq6jU3Wi1+1FEWVsN7vodftR1xGD/jDGo0ms15uod3WMhqMoijKKKKMqqyiKMjpVJ4qijOndLCbjs4g6YjFfRL8/iHoXsd3soio70e/1Y9AfRr3bP7rv9wYxHAyj3xtEp9ON+Wwes+k8Ioq4uryKy4vLuLp6EednF7Feb+L9+5/i5uY2Xn/9OupdHd1OL6qyE7e3d7FcLKPe7QHvu+++j9///oeYz+bR7XRju9nFYr6IqIsoiyr6vX5cXV7Fi6sXEXURP757H71uL87OzmM8nsSLFy9j0B/EbDaPu9u7iDribHIW2+0uyqKKquzEZDyJy4urKIsyLs4v4/zsPIaD4f34iyiijKgjRsNxXJxfRrfTi35/EFXZiarqxGg0jl6vH0WUURZlrJbrGA5GcX52Eb/5zTcxGo5itVzFzc1t9Hv92Kw38f7Hn6KIMv6H//Jf4/zsIsbjSRRRRr3bP9HsVN2YTWdR7+oYj8axXCzj/Y8/xWazjSKK6HV70ev14+52Gpv1NkajcVRltZfbehvbzTZGw1H0e4NYLVcRUURVdWKz3sZ6tY5BfxC3N7fR6/Xj7Ow8NpttDAd7IK93dbphJYp+fn5+P3HopIhTWR7WTVxeXiZ/7/f7MZ1OU1Sa83nSqjsqdTr/P3tv1mPJlVxrLvczz/MQkcwki0SVAF3osQH1j5PAC5TQ/+H+HAGNBvSiF5VuFWcyM8Yzz7N7P5z8LOxEUVmVYl5AYroDQUZGnPBhbzNb5msvs522VrQkjQA87BpkAJr/0WhkSaB/WSWGeCLAS6FSqZRWq5WBCJ/h7wjIJJQ8K2wcCTTxRpIxcUEQWEIMWQL7xnNJsmSXv+dc1AHwM4DgOVD7ewTMiHun02UrXB/Tk+P9jgQj/2tjZCaQyoW8Dpu1fvj2G02GA6UDSceDdDrosN1Ix4PSihUfD8qlUzpsN2o3ajpuN0oHUi6dUiqOFB8PyqdTCqKTirmMKoW8TvuddDqonM8ppVipOFI6kMI4UiaUUoq1XS5ULRWViiPt1isVsxkF0Unxca9sGKiQzaiYyyiMT0opViGbUTmfUyGbUTYVardeabdaKowjteo1tWpVtWpV1StlnfY7TUdDLWdTXXc7CqKT8umUsqlA6/lMx91GYXxSfNjr4faNhvd32q9XyqVT0vGg/WZt91zIZtSu19Ru1BXGkaajgfKZtOqVkqqlorrNhkr5nHarpTaLuVJxpHq5pCA6KRMGyqZC1coltWpVZQKpUa2oXi6pnM+pVioqjCOlFCkVRyoX8mpUy8qlUypkM+e/DwNVigXlM2mlFCsdSKf9VqV8VvVKSS96XZULeZ12Wy1nUxUyaUX7nWajoVKK9Pmrl6qXS6oWC0orVhCdlA6kbBhou1woiE6qFAvar1eaDgeK9jul4kiFTFr5dErrxVzRYa9SPqe0Yul0VLTfnX+Wy6qQSeuwPY9nJpCi/U6n/U7FXEbr+Uz5TFq1cknRYadiLqtauWSqoAQjfx0Y+a7jg9RwSU+7O8NUMRDc5HPdo19yZCM8ChIJoLwhA1ic0wdUBt8vu/rB9VIOSQYsGEGtVrNAjnSNQM25MBokZM1mU5PJRGEYGojxBk59EAaAEUvnpePZbKZaraZ2u60oijSbzS6YPt7GvUyN1R5JxoKwqtFut00SRvGpX96Vzl2Aym+VxcXb/6Hf3P6PXzrt/6mj4b7Pv/1/RlLJ/Tx6+8Uxf/sZBGtrPakXKCctuXPkJC3efu+Nu/L2S5IOb7/4zMad5yTpVmcmIv/2e46V+z54+2//s+fHxp0nentvsaTls8/hoqm3Xzn3u+mzzzaf/fuab/7f89iU33696+j+Bz9/6b5v/4VzPD/67ns/zwpkTBKSJnz4cDhYa10CPl2zYLXiOFaj0dByeR41/AtAQF6A9EB6KqSF4QMcKCxmZQCGbDQaqVarSZLpzv0mkx5AkH2QNCKllWT+TkxCguXrnvDPMDzX8fkEm+swHv7efS0djCasm2fZAD9iGj8DmCXZuDDGz4GOpF3SRezi2ZLj/Y4EI/97YGSlUtGLFy8smWLlzSdXxCGaNxyPR02nUxWLRVNy8IJL0kuiSzJMcszPuX9WGlk52O126nQ6Vtsbx+eW+8w7q3uoJajDlZ5WHF6/fq2XL19aY4vtdqtisaiHhwd98cUXGg6H1vyA+EuMoWaYumjuGdVHGJ73utrv9xoOh/Zizj5j0nnPMezBx6PHx0cVCgUNh0OLg8Rv4h9yOFZmaAcfBE+118zBbDZTpVLRaDSyldD9fq/lcmnjvNls1Gw2VS6XrRW9j3Uk5j4uI9lF0rvZbKy2nNpwfJZYTczfbrdqNBpar9e2KfTV1ZVubm70+Pho7fr52wQjZfHrvzNG/h+VFHIhAi0SAG6QYI4RwaJJsmVUScZ8URjsO7V42YUPTH4wvJYziqKLc/B2ykDCKPKm7TWkvmsOzo3sCkemmcJ8Pr/QsyPLY9+Kw+FwESBp/ToYDHR1dWXGyj0RQKixgn0g0KZSqYvAdzgcbP+IRqOh+/t726RwMBjY3hapVErH6KBTuFMQnOtfgwAj8zNJYSmln4H7fPBWJWAVtv+BNfxM2WjsSk+Dp3OTWJz/f3kO/29/j4EoAmYPBp7hsgCXa3E5X3TMmc7fP/370lGexoE/+bNrWSnt23+9vVgcxZdF0U9V0/YDxvTpA5fjxxxdjsHTuEmugNud/+kZ355Dcv+J7VxP86CLa/H905g/FRoruLwPPw7+s/xtEEhx6qlNNS2DR6ORMejdblez2cykrcvl0uo5FovFhTxyMpnYfiUwZSQK+Xzekhy6oVEs/PDwYLGDlr0EfEkWAwAKpLHUo+Df+CixiOQjiiINh0Nj4X2tE2yYP5BpUiNJjIQRDMPQkr3NZqP9fm+JG0BIssp5SAphEr3sgXsGfDxweOaQBB+mkHOQAJLwJ8f7HwlG/tfHSGRmxKlisWit2Bkznl2SyRp5AUSyTqJ8OJw7BOL/jAMvaxw0g0I6Tvyi8QgvVbwk8DLh5V3U7sDA7/d7lctlux/k+STlcRxrOp2q2+1a8s0qIysHjUbDpOK8MGKjJPV0yOPlnJd1pF7Y++3trcUp3/AEuSvznE6n1Wg0NBwO7Zqz2czsE9wgDhKjsHMarRDfiWPUDbGH1f39vd0zOJLNZq1hlq8RLJfLmk6nqtVqVg4gPa1oMA403uJFhhUi5qVWq2k+n1s3T/wVjEsw8un474yRXo74/PggL1wEOYIzS3XPpRR+uZE2j7Q5ZQJ4a+VN3mu3pcv9Mzwr4pc1mVCuT7BCO+qL42jw4Fks2EC07mEYWtMHGjTU6/WLeh1qlFjGpQUvzwsDBUBJsmLndDptOnzGkzHzzwU7Rce7bPa8k/xsNjOHrVQq5jzr9dqaLizD7/X1//X/WHAmePi3/91up9lsZqDC5ofH41M3LLrgwFzx98+ZB79Mi34cBo5mEuPRSPV6XZPJxGwnDM/1QeVy2ZaUYU7T6fRZ4/72mabTqa5evNBisbC2sYfDQZu31280GrZPy2q1Urlctg6D1AnAhubzeXU6HbMBmC0A+XA46Pr62s5FcIRFOh6P6vV62mw2uru7s2BFspROpy+6IDIH/jNhGNqmi5vNRtm3bMvpdDK2rlgsajAY2JwwRrDIzWZTo9HIgn4YhmYvsFeACGwQ7Yc9k7hYLCzBxhdhwBgbdqvHB9GAw8gCWpkwY/ZBrcVsNtNoNDIfgNHKZrMXdWWwlyRPxAESTe7RB0FAcbfbWXJDkgKw7nY7Y3PxVz6DDxITCKr4J9eGZUbO5OMIKwYkg7D7HF7//ly6gM8TI6iZYUzpHupZP74AfM/icX/+3p/HU4DSr7R4RhDQTiSF/7kjwcj/+hjJfFD3ywoGL7C8YBIneYGSZPGYOO+TLi/39HGEuFQuly8wElwtlUq2EkIxPnOWTqcNP5h3ksvj8WjPlM2eu8kSd0ulkknIptOpGo2GrUTwIk+Cy7Pz+Xw+r0ajYbaAhI3YzQsrLzfPMfJwOBhG0n6cBJzxwIbCMLSXPe6ZXICxpLU/ZAbY/i6MBKPIJ3hGSAPm6ng8Wq7CXLMBMjYFRm42GxsDXjJ52ab2jJc5ru9XZ+j4mWDkrwMj33X84hcu3vji+Kn7BwHTO5JnIJgcJgD5AsFwMplcGKnf64DAQ9BmEkjuoihSs9k0RgU2jDdbmD4fpLl/Jt6zLn7pWDo3fTgczm1laUzBEiuDjpHTMY4A5IM0DpBKpS6Kf2lggWFwcH8Ec5gtzyzsdjtNJhMb83q9blp5goOfM4wFQ/VLtz6QUkPAcvF0Or3Q/2OEfI7rw6Kk0+eCVF4kPLOKFnmxWFgxMvUBAKeXruz3ey0WCxWLRdPm83KWyZw3YMQeeDGqVCqqVqtWSD0cDrXb7UwaEMexdfnxLx4AFWDiX0D8c/P74/Goq6sr3d6ehYjlclmvX79WrVYzVtuzzQQjX4wJO/zceU+nk+r1uj3Pfr+3YmmCxXw+t5fHyWRiYMPLKMGZvWM6nY6CINBgMDAbYux5fjqfxXFsbBq2h9zAv2zX63Ubl1qtZqCJxAJmD1vCtqXzC1sul7NCXewU/8SemTPP6BNrGDfPTJHMwVYDdDwzCYYPnn6lgXnHFmFGpSc2Gnun0Fh6ajPrQZB7IUnAJokhJFuMC4wyCQYMPPaD5v95IswYPdfmw17jT9yjvz8Or8knifhLgJIcf34kGJlgZIKRCUYmGPlxYOS7jl/cNEOSsSi8efr/w3DwJumXKjGw58GMzzCoyBcYKCYYI8IBYPBhX9BKExB4o8ZIMBqW4GENYAa9vpXBZeKWy6Wur68vzgFgsQrCxOHsdAYEcHibhxVjAzx+z5JotVpVsVg0qQLLtH7iuUdWNTyrAKPBs2CYrN7s93tjBNBNE9zY+Z153O/3pt+OoshaqhNQOL/XnTNHzAuaXr97OZpj6RyEuV/uMZM5t/RlCZzzSTJ5DAEMO2Elzp+DwEhCQhIAw8OyvCRjOpGkMLdo3mFIJ5OJVquV/v3f/13L5dICCnUUQRBYstTtdm2sCEQwdwRxxotn9K3p9/u9Go2G6vW6Fc5z71F0LiTHBoIgULlcNlYSdi6bzarT6ejVq1dqtVq2LI+PsGpJ8oNd4QP8PAgCA+R6va5ut6swDK343W9+SrBj6Z/z4O88O89IApHL5VSv19Xv902Hz3zB6Pnxgq0iACNhQv9fKpVsTPg77yuMIzbimTsAli/m1SemXlLgVy687MDfL8kDDQS83EZ6kqLhuyQPkgxYvQSMcfPxAwAi3gBezxN+bIYDAOIeGIPkeP8jwcgEIxOMTDAywchfP0a+6/jFK1zesFh69/pMBtcPLI7Ig/IWCwPC0iVSJ/9mikaTwaDjChPNciETzkRSKDmdThUEwcVbMvdKYOGtXjoHFLSiURSZNI0OSSyXw0Z5potz8n+ChnQuYJzP51aAikN5gMCIAVsAsFarmaGwDF4sFk1P7usDGG//Fs8Gt8yZZzrQ+cLQISVIpZ46ZsGkMkeSNJvNFMexOp2OMYnoh2lviqEimyCAEdCWy6U5DS3oYa0IPLClBGdYOuYCWQIAiq4X5hC9L5I6WFLGkHkm0SGYYwveSSnSzGazevHihe7u7rTZbFStVo1FZMw4N8XAkqwAF3YZ8OP8jDN2PpvNzEb97wgk/J7gDrsK20fgQcf93Xffaf52Hxr8EP9Cq01y4Bnz1WplyQefqVQqZi/NZlOVSsVa4sOMYpckDx5QfMBDHoCUhMSiVquZNt6z7dI5cHt2ClbMB+7j8dxmu9VqGQMMAAO0bORKvMFn2ZQbn/KMOPeQTqdVqUvlvewAACAASURBVFSsDgB2kXjjE21kFSTVXg7h/YQg7gGFMQP0SE7iOL6QbpDk+3N4SYR/eeIZfJLO54lxMKnJ8X5HgpEJRkoJRiYYmWDkx4CR7zo+SA0XRkLg5o2YQOXfCLlpX//jAzHLkgR0vyzKwCO/wJgJbL7zDMyBJNvLhEEicfBvylyDJXr05ofDwWQgBD+ebTAYGKvAz6MoMiaKt990+rz5GwbFMud+v7/QZ9NtiAmUZF1cMEKYPJa4Ac3dbqd2u20BmOujyZXOgQ2AxfGpJWJvK8YbPT/gEcexut2uMSsAHYENSctkMlEmkzHAYj5LpZLm87kFJOarVqtZwGKMAQXsCsPebDaq1+vGxsHA+Pag1J7lcjkrRi6Xy8bO4kwkH2h5keysVisLnHSskWSaZ2yOgEsBahA8Lce3Wi3byR0751yADHYBIHn2BZuTnpb/vQwHRg1n5/5rtZqxaYAugdRviDwej5VKpazou16vazQamQyCAIw/SLrYmBEWm2cHjBeLhbHa+Dl2DEBzX5wTqQsyIvwDOQ6M7XQ6vYgl2CRMM77KuUkGKTznb9h3hFjE2JCw4m/YYyaT0Wq1svsjgBOksQXslaCL3RP8uZ7XyMNO+/H20isv38DusBWeMYoimzMvy+DfnpEjhhJXib983oMWPycZ9WCTHO9/JBiZYGSCkQlGJhj568fIdx0fRFLo35J5u+XGCQQ4wM8tJ3rZgHS55H86nczInv8NMgz+7Q2oUCjYwDDpSBG4DpOKY/t7TqVSpmkm4PguO2wmh7yAyTscDhfL8Eway8SVSsWWu3Ec2L/j8dx+k8nFmAHnXC5nrWwxfknmoCx78zvuAVaG+4QBYVkZACRgwPg0Gg0dj0eNx2NjVx4fH20eT6eTjT8gB0MFGMOKEPTT6bSq1aoFTw8cOCl6fZ9QrNdrzWYzez7GerFYXLCIOBOghe2wDwX3jGOWy2UD2VQqZawgLVCz2ax9jyPSvYr7i6JI0+nU2KFUKmUgQkLFPMPm8fw4u/+ecctkMgaAz/fCIeHgK5/P6+rq6oIF9iz44XDQarWysQNokVL4JIQEgnHj2n7JH1Y6kznvGTIajTSbzYypZZNkui5hPyRLPD9jSUctJA2efcIekacQS7wUATaVxEySNVshSDN/BPBs9txdKpVKWeGyT5AABeIbiSbSLECe2MD5mSfuEekM8YnYw/jg24yvZ4l93PM2yM+4N/8z/zkv4WLOfLz05+f6+Ab261+2fNxJjr/+SDAywcgEIxOMTDDy14+R73rx+iArXARNHoDJgOHBAP0NYwB0x+EmYUSeTyx/w8+YDA9eBPbT6azp5vzIHDBaGD6/1IvsAHbC/75eryuOL4viuD/fhhJQ5F54Q4eNgKnj85499IwYIMrbv2dhjsfjhTyAscHwPTvnDQX2C6bCX2+9XqvRaBhLSSEtAAc439/f294qPFs+f94JHKeDHfFMGewiz+ABnODnwYQuMwTN0+mk+Xx+3k+sXFa9Xtd8PjdJB78HoNEkw5ZwH+v1WtvtVs1m056PQA4IAIieZaMzlWdJYHmeMyeZTMYYJeyBz8K8UDAKeyM96e0pwmUu8AdYSWQGdAokWZnP59aVERYLKcl6vbaWwIfDQVdXV1Y/4TtG+XHmnryWnODpgxL2GwRnSQes7XK5VLVaVTqdtn11mHPYrlQqdRFkkSOQ8JBUYEskVdiGZ5tIpqQnlvru7u6iiJUY4u0KnwKkAREA2Eu8fIJAxzR+79lY7Jv6Fq7t4xa2gxQLP+X3XgrCffpEmPjK8/A5QJJx8Ywj98u9MO4+VsAaejkVKyYwmMnx/keCkQlGJhiZYGSCkb9+jORzP3d8kC6FBDVYIyaEgOxvgOAnPS0F81A+oDDJ/m95KH7mgzEOTfFvsVjUdru1tpeAFpMHmHk2h7dwjtVqZUwTwdizZsg6uCfAhsAFI8ObuWcCYCYITDyD36QPI+f+uDabK/o3dYzg6urKnpvuOX6pHamGJAvymcy5cxFsFOcEdLlfltlxPGQcsG/SU4DxYLpcLpXP59Xr9S46LeFwBBEChj9XEJw3H5xOp1Y07YEKFo+/gcmElQEkCMTYqqQLZgxARuNMq2WWnKWnBAJWCDv3S+LMg9df++V1pCHMn08sAGYPdBS9TiYTky4wLiTABNhUKqVer6dWq2WbJeInzDfP7tlvmGUCvfdnOj0xFnxJT2yOD54U5cMa8nvugzoNEj+fYPp5L5fLxjZyj+jVPdOZSqUs6UmlnjaJLZVKmkwm5huSzAcI/iQ6/I7xYp44CPJhGP7ZRrPYhAcL7IDfU8T7/DyetfNsuGeXOa8HB+93+JsHC796QozyMYD788k45/DxVJLFZ35OgpQcf/2RYGSCkQlGJhiZYOTHgZHveuH6xZJC3tq5iL8hgo53RowgDEMzWhyPQYZ184bv9avPjY9zAy7NZlNBEFhXE7oV+X2nWOaFYYA1IhCxbOwHH52qX+5Fz8xnvKFxUFDrO6fgCFEUWTEv7KJfMvXdZGD0GG+ACudGAuE1tgA2zF61WjX5AsvTFDtS+MheGN5YcRrGkiDGfDKHjNfxeDSZCQxVsVhUvV63651OJ9M449QwJOl02paZYYXo2HQ4HKz9KUG1Wq1aMlitVu25sBmCF5IRH5ABeezWF5RTFHw4HGyZXZLW67U5Oywm5yUJGAwG9jO/ZwpOTDCDwYrj2Ng2WGdAjv1RptOpadfRwmMTuVzONPSZzHmvj3q9bgXByHVoqdxsNtXtdg1opSf5A4XU+B3tabFBAh7PAlM1Ho8lyTpEAZwcJGT4CcBNET3A32q1TB4hPbFf+BbnwHeLxaLtJ4LUgPnHfhlPzgf4wLz5oIw9B8G57oDaAWKN15djF/gr8hDG1QOmfw5sg8QiDEOzT2yR+yEZxeY84BH38HtsFlv2gAGweIAillJ7Ij215uVzAJ1nQ5PjrzsSjEwwMsHIBCMTjPw4MBL//rnjg9Rw+eU+BsrrIglqAAuDyGeeMwosDxN8paeuPRiAHxwG3Ac8DKLdbtvu3H4gCICACfrZzWZjy7Ee/ObzuV3DM45eZsOz4BDs5QBrwXUJ8Mfj0TaUlWTX9ku/jCHAyZI7Rkx3GxjLyWSix8dHrddrc3yMIp/PW0DGGeI4NodmPCXZPgqLxcI26xsOh9biFekEemE/Xz5I39zcaD6fq9/vW1FuuVy2TQX5W5yPwEpA5NkovGZHd9gmOmvB1GFHtAjGEQEvHJE5hHElkK9WK2N//BwyZz5wwJB4xoUlc5gw5m46nVpgee432FoURRfJAwkStkoXLQI37FcYPi27L5dLLZdLkyfEcaxGo3Fh84wHHYQAKuwYIAeUkWeQHMCGw8B6lpE9cBgX2Gh8GF/1gY+AdzgcLljvRqNh9gU76IvUJRlzCGBh0/hQLnfuysZnqIfAt57HLb7nXvEr4hUSFZ8YRFFkDDT2x7MjN/G1BPzeB2cYOR8jAUq+eHYPjEEQGIAdj0fzWz8HxA3GjfthvLBR/MKPoz+Ht9nkeL8jwcgEIxOMTDAywchfP0a+64Xrg3UphIki6Pk3TwaPN8UgCOxt8fmSHZOE0/KWTZEghgtwcDA5LIFLMoBgYN68eaNut6sgCIwdQTctyUCFCV0ulyoUChaY+R0TQ/tUjM3LJtgwbz6f22Z2nIMiSb9EPx6P7X48+8d5+TlabsbbyzwwxtFopO12q2q1agboQRqN+mazsXNQ0EsXKALr6XTuwrNYLDQejy3oMsa73e5CCgAY+BarzWbTmLBMJqPpdKrT6dzKlkASRZExcpxzvV6bphpggD0FSAAtz0otFgtrk4tTY19o31mupr4AmUQcP7UVXSwWqtVqpt9Gpx5FkfL5vLGlPgB5rX273baAyTJ/Pp+3xISkh4CADZIs1Ot1Y+rW67WxSGx0mslkdHt7a/eLvdHpqlgsajQaqV6vm83wfPl83rTp3o8kmcaaxIVkEN2zJKt1INjA5sIUSzLAg50KgsC6XJHQeJ8YDocqFAp2/81mU9vt1iQwBFE6hvkgSBE+cYVn9ckXz0kw5d4Jvj6R8geMo28FjIwmm82qUqlosVhYbQZdn2AuYT+xEX5H8AYAAApWEfyKBfdN3OOefGJDcs6zA0aMk48XfsWB//O9f6nCN6UnwEuO9z8SjEwwMsHIBCMTjPz1YySf+bnjF69wEag9m8HPPKvB4PBQXotKsANkpDNAYYQMnGc7eJvm39wLXWDoqIKkAUZxsViYVpN7YoK4X4oqYTHq9brp3llyPx6P5lC0DsVwABdfeBxFkTFFMJosr3JfOCPL2oArbBfLrxTo4jz8ezAYKJ1Oq9frqV6v29I95wQIYVxOp/NGf6VSSa1WyxyFsWfZt1gsajweG9PI0nS/31etVrNAgSECQNxLt9u1cUauIUnT6VS5XE6ffPKJsQm+iw6sBPebzWatzS22RODCjpBQYC+wO2j2sR3pSW9+OBysjgFHzGazGgwGenx81OFwsEABCw2gYWOwoz6B4prc2+Fw0Gw2s2VsQIagRktlgACpCLbii5Cz2axtWEnQKpVK1i3qp59+uggE7XZbqVRKi8XCgm+tVjPfSKfTxnCTMCCxgcUFbBmfKIrUbDYtsfL7mvgiYwDpuXwKMAHomGuYPLpEef8meWAe0PR7CQ8F18fjUZPJRKPRSPP5XIPBwOxisVgYgBMzfID2bBySjXT6XDeCHItGAfl8Xv1+X+12W61Wy/b6mUwm5kM8l/TEopLk+JhG3OQ5Saj9F3aOr3mZBuNJ3QPPge8Tn4lB3BN+wjhLly1yAZt3gUly/PyRYGSCkQlGJhiZYOTHgZHvOj6YpNAvb/PQ0lMLRh7cL8F5Xaa/eb90iqHz914T7AHhdDrZhNzf39s5N5uNJpOJ4jhWq9Wy5WwcDT02y5iwDYVCQfV6XaVSSePx2DbgA8AymYztQo6DsLGdJGOZaLXKsitjheFst1sdj0f7TCbztFcEQQm25XQ6GYPmg1qz2dR4PLYl1kwmYzKCMDx3/CGwSrJAgdMTNOhcFEWR7VYvnQP8bDZTvV5XJpNRp9NRuVxWq9XSF198YZITGLZisahOp6NGo2HMHewErCDzfnt7q/l8bgYL+wVDEkXndrJoxgHczWZjmvFXr15ZoCbQwNDBynNvu93OWBRvT2iUAeZMJqN2u21sC3aG0+52OwNd2tjSrhV2t1qtmq0jg8HOU6mUlsvlBSB4KQf3g89IT8E0nU5rPB4b04otcF720yFxgGFm3xMYIuQ0MF7sx0LwhnmGlRwOh5rNZhqNRsYUDYdD3d/fm+9htwRFGG58luV6gIY6AoLncrm08VosFsa+B0FgbWY3m40Vh8NY+6AI2JEo1Wo19ft9RdG5Exufg8nCFkhEkMlQS8EXNkSsAHzX67V6vZ6KxaJ6vZ7JVgBLEh+embhIUsQcI/cBvPgMCYq3gzAMzZb9nCH14VmiKLLNPolT3AuMLDHLvwj42E2cYe6S4/2PBCMTjEwwMsHIBCN//Rj5rpeuD9bjlwvzxunfuGGR/Ju7l1ewPM4yZaFQ0Gw2sweEKYEl8Ut8sCKeodrtdprNZjb5sGbS01KmX17k/tPp8w7z6MRbrZYmk4mOx/NmdL4VaLlcVrvd1uPjo52f5/BvuxRSZrNZa7cLw0BAxyhgvVjihYXYbDbG5LDrOQGC5w7D8270OCUOh8HQMtXLK3huv2+HJHW7XWUy50Lq6XRq165Wq/r2228vCjbRiqNVpnj3cDio0+koDEML8rAmsDxRFKnX6ymfz2s8HqtUKlmSAfPg5zWfz1srVb9TOfYBA4sdVqtVAwCckU5F3jHL5bJdk93RJVnHI5gt2gJjp9gmBZgw1QQ0/p5uULCjJF2pVMrYNtgtQAAQw2YBEiQE2AG+Rccm5jCTyejx8VHValVxHGsymZhPetYQezwej7Z/DfOORABJQ7FYVBAEWiwWmkwm+vTTTy0BIUlrt9tmwzBCSH0kmdaaawKw2BuJTxzHBnAAH13CUqmUASm6c7T7MF9cEz05HcAI8kEQ2N+QPMC4Y0uZTEbL5dKkW1wPwCNZuLu70+3trTHmo9HI5DsUdxP7AHySBuyH36/XaxWLRWN9iYs+8fG+SswB7DgviWkcxzamPqkj9gIkvt0wYE+chYFNjl92JBiZYGSCkQlGJhj58WLkB+lSiBHzVumlBzgYb6jcFBIJBhYNOM7kB16SGQ8MDwwWAyDJWCiKJ0+nk+7u7lQsFs2hD4eDOQWMBxppvkqlki3Xs7yfzWb12Wefqd/vm777/v5ew+FQmUzG5A2wjkEQWHEk4FKpVFSv1zUcDm3SYBUI8rCJfpl4sVhoOByaPIOlZJ4nDEML4AQDNuvzbCngy9hhNOPxWG/evDHNdRAEajQaevHihbFZ19fXxgpSnPjixQsdj+c2wujb0SwzFshW/PxIT1rb0+lkLAnsDDvWR1Gkfr+vq6srC6IwiBS5Xl1dWUD47LPPtFgsTKrBs2y3WwM6mChJ5oAwbsfjUfV6XfV6Xf1+35a+qY1gc0uAaLlcGrOH3p3f+9aoOOTpdLLuRoAH4HI6nff3mM1mdl4AA1uWnhi8arWqq6srNRoN9Xo99ft9C2YEnvl8bvrpZrNpiY3fBBSAIyGCgST5wUd5ziAIVK1WTV4Di8uzMM+SjJnmgHn0RcnECAJ+GIZWpzCZTDQcDiXJgt9qtVIcx6bR/+1vf6tOp6Nms3kRcEkiYIzn87kqlYra7baNg3RmKJn75XJp9+slFUijYOBhQdGO12o1zWYzPTw86OHhwVhdEh3kEjBrHD5B9qsS0hNzN5vNLsZ+u91aAsBzsBoBoBNbYZrL5fKFzAUwR/YF6CLDIOaymgLDR2xPjvc7EoxMMDLByAQjE4z8ODDyXccH2/hYkg0IxsEyqfS0LwIaVF9c6IOPNzi/TAfbxUNxXpYYcUiuz98xMQT5drut5XKpUqlkBkGXF36GDnu5XKrX60k6OwYbQ3Y6HY1GI2OiABMCNEGFnd8bjYbq9boZa7FYtGDhJSP8fafT0WAwMJ06zwYTyr3CPPliRwCSJdPT6WmHdPYm8W/wktTpdDSbzTQYDFSpVKwgkWLc4/Gofr9vzMJ6vdZut1O/379gTCmYxCApdN5ut3Y/dPKBraAYEv3/8XjUbDYztgGWkk5Io9HI5BzUBlQqFX3zzTe2VM3+GqPRSOVy2SQrBPf1eq03b95YsoCdTCYTG5t+v6/5fK5vv/1WlUrFClaRF8DwIa1ZLBY6Ho8GehRNE2xYXgfsYZEJkizT+z1HttutbWSJJvrm5saCO6xeHMeqVqtWaAsLTFG0T+RIBNDiB0Gg29tbY3ZhcwnsyBO473Q6bTKU0+mkh4cHffbZZyoWi7q7u1Mul9NoNLKNQulmBpsHY4kN4e+A6+Fw0Hw+V7Va1Xa71ePjo0kiUqmUte/NZrPqdrsGwCSRSGJIEGezmZrNpiUTzJtfYSBppRMVKwvpdNrAFTAhoQ3DUJPJRL1eT/v93mwAm0ilUsYek2z74nFiHswocSqKImNtkZrUajUVi0VjUwFrDsbYJwBINgAqnpMmDCSWPAu2ir1wj14ehn8mx/sfCUYmGJlgZIKRCUb++jHyXccvfuHyA+KX7pkwb8iAzOFwuOgygsaXf8M4wcB4CcJzZoTrMjgMBGwaOs1ms2n3ixPx94fDuaUmrB0tcu/u7nR9fa1Xr17p22+/1Ww2MzZjMpnoxYsX2u/31maUjj04Asa1Wq3UbDZVLpf1zTffKJfLqdfrGVMDQzifz3V9fa0oijQYDIyRgPGioBW2CUNB48rSMIafTqdtXD2bdTwerVsRgfbq6sqMbrfb6euvvzZ2jbf6x8dHC6ydTkebzcaSAlgXb3iMbbPZvJDLsI8CycJisbDgCggDMAQuWDdqBAaDgQWIXC6nzz//3JZ8AVu63rA8Pp/Plclk1Gq1tNvtNJ/P7fNxHKtWq1nRKOMznU7VaDQURZEFQdgQDxRhGGo+nxtAI19AakIwoUMYLI4PpiRDvtgznX7ayd7vRfPw8KD7+3u12+2L9sTSOTkg2MOCIhEpFouaz+c6nU7627/9W/30008Kw9DsL5fLXfgRASaXy5lef7fbaTAYWGI0Ho9Vq9XU7XZ1d3cnSXY+2OjHx0d7PoIrrFg+f27FjF0GQaCHhwe1Wi0ryJeeNqcslUqWOPzzP/+zFbTj3/wfOQBjh/8QvMvlsqbTqd0PMQb2D609zwnI8P1isdDj46MajYYlv7Br2HK1WtVoNDKWLJfLGVuIf2GjBG7YZpi/QqFgtrfZbCxx4++YP2wyk8lYly/GggSF2MxnaSzgZSE+pgK4gCyJV3L89UeCkQlGJhiZYGSCkQlG/uIXLpb9+eLG+d6zotwgRs5ArFYrCwws6RGMYKMkXSwBEkwBKN7+JRlDsdlsbLkUUAFMCExoUJFJAAoEisfHR9Otc05YRpg5lirpqkJQ63a75jCr1coKDQle3W7XNLij0ciWutH5rtdrGxOClCSTR3gwZrmZ8SsUCtpsNmq32zocDsZmUJjqiwPX67U6nY6urq603+/1xz/+UdVq1RwdMPvkk0+M6ajX62bYmUzGgj4MLnufSDLGD3uoVCpm2NPpVKPRSJ1Ox8ZoNpuZPZVKJXW7Xc1mswtdeKvVMomCJGPLCMw4myRb9vZJTqvVMpkKWmIKfUlCoihSu90+O8rbILTb7bTZbNTtdo1pwc5g3HK5nLGrdPxCc87/uZ9arWY1EH6PFQI/4LpYLKw+guCGDCCTObcRPhzOrX+r1ar2+73u7+8VBIG+/fZbffHFFyYjqtfrNvZ+KR/bk2TJCUnf6XQyhppkYLFYWJJEYKrVahYE2ZsGaQ/zwb1KMsAfjUYXHb9arZaCIFCn01EulzNfIynabDZ6/fq1ms2m6a9JSJA2Ma5xHOuHH34woEyn05pOpyarAbxyuXObXQIvBzpx7MoDFpKGfr+vm5sb8xVsg/bTyFKILdgqSR/1IyR8MLCr1cr08JVKRZPJxP4WFtbHw/1+b8k5sQ6WnBiCHS+XS7Xb7QuGj7hNTCOWc0/JC9f7HwlGJhiZYGSCkQlGfhwY+a7jg0gK/Q0wkLw9Y1j83MsYeEukWw6sD0uEnBMWgbdXAgYHS9wYRqVSsc42qVTKNurzE805PSM2nU5NJsDns9msGTja2Ewmo9/+9rcWzCuVirbbrXWM4n4kmb6bYC6dC56n06murq602+3UaDS03W7V6XS0WCwuOiZhlExusVi86M6z2+2smPa5Lptnh/3C2Nh5fbVaWRDb7/eaTCbGYnJdmKfdbmdA3u/3JcnYKYKFtwfpzALW63UNBgMrTIVZIzBKshahnlXhOR4fH83hYFkajYZGo5FdC014FEXqdDqaTqcmuUDHm8mcN6VMpVLqdrvabDZWdM650XOzoSF6aHauZ4xpn0orXwAJKUYQBMbgsgkl2ubBYGCF6t4+kOkAmDBokkymQ1CiyBTtOTb18PBgcgNkN5PJxPYa+eKLLzSfz/XmzRv99re/1Ww2M3/abDa2iSNz52tOADjuKZU6t/S9ubkxyUmxWNSrV6/0008/qd/vKwxDDQYDiwd030L/jv9ja6VSycAM1jmfz5sEhYTwdDrpxx9/1Ol00meffXbRPhjNNvP+448/GgMFAI7HY2WzWdXrdfMhpD9BENjmlQRfErblcmnMMskH9SKTycRkR4XCeSNLLw3CB0n2YGdh57EVkg2eCTskIUIC4X2NJIXYB+MXBOe9gohvPBvj6pl/GEDuFWaRGErsZH6S4/2OBCMTjEwwMsHIBCN//Rj5Thz48ssv/+KH3nX8/ve//7JWq1283TFoBDQOBuH5zcGWIFvwmnRYVZgXjM/rjRk4Jt8X2SJxoJUtE8eSJFKMXC5nnZdgVQimBAY0qXEca7lc6pNPPjHn420bHTW63sViYWzk6XSybkiZzNMGf61WS3Ec6/Hx0QL7crm0vU54dpbO0dTv93trHwvzAYjDVO52O00mk4s52O/3tm8Kb+20rQVEWTpFSlEoFLRardRoNEwDLD11cPLzjb4fpgLJAGPk9ekEMsYQFtDLaqgnYLkaxomuQs1m08aNpXOA73Q6FxzD0OVyuQv2iudqtVo2jrCLBG/si3tEvgBAEBQIQv1+X4+Pj8pkzl2F2JemWq2aPdNSNZPJaDKZ6HQ6GWM6m81MVx+G4cVyP8GSDj3dbteW77lf/s2B3CUIzoXeh8NBg8HA9hjZbDaWvNTrdWsLC5sH20tyR2BFEjEej/XixQtjjI7Hc0HsYDCwoI7d5vN5q4GQZP7x8uVLq8ugwDyOY/V6PZO1FAoF/fjjjyoWz5uM0gUMoALwYKqQqMCoI1UJw3P3pGq1qlQqZUB6dXVlKwtIGkgkqZUgcYJNg7X3+5CQlCDTaTQaF+wYYwJY40vYEbEUm2KlYDqd2katvk03TDgxjVbFJAE8N2NIks9zSE8b83o5GuwfTCs2NZ/P77788sv/9T448TEfCUYmGJlgZIKRCUZ+HBg5n8/1D//wD//z57Dgg218LP357stoHTFsJBEswyGJQO8LOOA4GDjsChPhnYTl5NVqZXpKlvBZ5stms3Y+v2zLWzjAxLIlwZwAuN1uzVAajYbJJv70pz8pk8noiy++0MuXL7VcLvXw8KDZbGYGuNvt9PDwYBrsTqdj7JCXBwBc+Xzeroc0gaDL8juFojBqYRhap6M4jm2PEkAXbT7LrBgIHWHQSFerVdN1E3wqlYqur68VhqG63a4tb3NPSE2kc4ADoL38BaYUdpExhq3xxamSLJGo1WpWDAuDyXE8Hq0Lz3a71WAw0GazMTkLCQSdpBiLSqWizWaj+/t7W0pHduEZxCB4agvMM0hPbImXUXQ6HZPQ0MkIpguwhNVdLpeaTCYG3pIsqI/HgMI1tgAAIABJREFUY2sZC1udTqetKJ0kxd8vQaBYPO9vARiRvJCAlMtl08d/+umnBs7dblep1LlINwgCq7Ug2CO9wZ+QlBCUGF9kBoAMQI+PptNpVSoVNRoNk+mcTicDW/bxqVarxkah/SeAP+/cxD4vhcJ5PyCu12g0TFve7/dVqVRMh10qlSzZ8XPG+ZCD0J0rDEPztW63a1r0VCplCR0AvN1u1W63ra4Exh1bxH4BGdhoEhbmyhdtw3AydsRPzkXcIPhzT9g1MQFdPIADI++T9SiKbP8hAA+7g831MpLk+OuOBCMTjEwwMsHIBCM/Dox81/FB2sKz7MYAEdRxeiaIQOZ/z81yLlg49l6QnvrfE1gx0Gw2awOHcxQKBet8xFsuINRqtXR9fW2DR4Bg6ZLJh53hfvzSNUaNwfzpT3/SaDSyjjfoY9GioonlrRvWBTDlur6dLWMKc+CDKNpnxg9mhvN0Oh2l02nd39+bvMTLDfhaLBaazWZ2nTAMDUT8vVE8SxEzb/gAk2dbYSZJMAA2HJDPAIAUxXa7XQsSyA9gVAnksH0EPNhKbIcC3Pl8bswjLU7RX9frdWMzl8ulSUgKhYJGo5EymfOGlcy9X4aXnvaWYQxLpZJWq5VteFkqlbTdbk2uwP3BvrDkHQSBXr9+rel0aqxTq9WyBAB7o0MTMoxyuaxaraZcLmfM083NjQGUZ0pZwl8ul2b/tGoOgsCe0xeWw2hKZ1aNa1MwejweDdjxQ/xnMploNptpPB5f1JRgI8xfoVAwVpKkA/ter9d2TpKaOI51dXVlEhp8jODMeMFCUTNB4rVcLjUej5XL5QycSBSQAsRxbEzvYDAwlhcdPDbLvBKcK5WK2VGxWFSz2dRoNDK7ZZx8wwFYaX5HckXsIuHxhb5IS/BzAAJpF0XN7L2DbyKxYW5rtZoBMB3MYPiC4Gm/GcAcVp2VABjF5Hi/I8HIBCMTjEwwMsHIjwMjfSx6fnyQFS7+z0W5MMbob1iSDQZL9xRU8ntYVLSjXnaB1jKVOhfP5vN5czAkCNKTZAOmoFQq2aZwsIUsb3vAo6iQA42mfzOG6Xrx4oWxX+wfwlL74XBQrVYz58CIaCu7XC6tFecnn3xyIePAURkfEh0/nhgsY+W1u+Px2AwROQMBEL06zouEg3mYTCbGDMGy+mJQWJznS8acc7/fW9cfD/owWovFQuv12rTYs9lMw+HQAoO3H/6PHtwnIbCGsFLowwGb+XxuTLAHOFrTopHmGsfjea8U6YnlhJlhHGGTkfGwzL1erzUajUynPZ/PbZwJ/KfTyTokwZ4ej0criJZkrJ1nvOfzubX59fIYgJL9WJDV0PkLHXM6nbZ9UggeBFYSB4IxrB16aP+MjNV2u9V0OtV4PNbhcFC73Ta2uF6v6/7+Xul0Wu122zoRLZdLY7eQsURRZAwViRfjE8ex6vW64jg2ffbx+LT5K3GGgwQMH+P/dOuiuLler6vZbJq0ZLvdGkgNBgNjBdFs88yAIHNQqVSsEBeby2TOXY/S6bQVQnv/9Pu2YF/EHACOxAj2HjDFJokH+An2GMexBoOB7aPCtSlef97tC0AB5H3MJfkBSABp/J0YnRx//ZFgZIKRCUYmGJlg5MeBke86PsgKF4yAZwIYDByRz+KULJfC6DCgGC/tYGGVeNP1Mgj/b95W/QPPZjPVajVjmGazmX744YeLnxG0uB+AxMsyCJDL5dKW5wGiRqNh2me01RTtUviJw2ezWQ0GAyuKjOPYNg9MpVJWkMnyKQ5EUMaoYTJZOocVzWTOhYSMIbpqmDuAkWX0MAzt7wh2Nzc3Frjp8pTJPHW+gpVAt8y88EUwwuhns5lOp3MxJEYK+Nze3mq9Xuvx8dE218NpoiiyIAlQwXpIsuvR2YlrorvHNuiohV30ej1Vq1V7ds5F0GPn88PhYJ+t1WoG4ow/gTKKIjWbTVUqFZNvSOfgy/iwxL3dbtVsNtVoNC4SJ0APXTRBHOaGZXwC1W6307/9279pt9sZs+UZZoIpoMPzl0oltdvti+eez+d6+fKljsej7fvBnjjIKPBj/LBararZbFrxsZcv9Ho9k3oghUKHT/BleZ/AB8Ax1tgpMQNQQUsO+wfbizymWCxapyOkUQTs1WplyQLtsBeLhV6/fm1xa7vdGotKRyTkLmi8+Xkqde4O1mg0NB6PLbADZj6Z8omXv0+emwJ5/AoJBCD1nH3H5rEfYgbjxfewnPgN40QiDhNIkupBmjEE+Lj2XwKU5PjzI8HIBCMTjEwwMsHIjwMj30VKfpAXLpaOWdrniyVyQIM3RQYJUGGwvA7S61rR4RLsCQw8OOwe+lkGLp1O20DG8Vn/7ltQ+sJJWEK0qSyL8nPe6pFLSNJ4PDbWi24+dDfBEPk/z99sNq070Hw+V7vdVhzHtszNG7zXoMJ0UkjJGztMAn+DxrhUKl0s3aNh5m95Q2eOpHOXn+FwaEGVc/slexwOFpNlZ+/EkqzAFIMmMWAMUqnzBoej0cgYCcAJoGP5lqVm2MtUKqXxeHyhV5ZkbAtOjxNNJhMr8mYJHmZTkn1fKpU0GAw0HA4t8BM0CKTSuV3rfD63OgGvJ4b9bDab+uyzzy7GCykL9QfYK7IbHBv2lo5PjBdF6TzrYrHQV199dcFWxXFshc0weNQwlMtl3d7eWiCB5VwsFhbs0IITcEhGSAZ8AoIPkzwSnIvFoiUMXoedz+dNUoI/eZuHqWKu9vu9yTnwU2wR0IQZJngzxzB8sMTIZThHHJ/bCff7fWWz58J12GSeS5LZBYkdXbKIQ/g20izAEwDg77Fx/k8yiI9jZ3Ec20aqzJ2X8D2XdVEUzPyQjMLOsaKBfXlWnDln/LAfknu/cgBb+JfkEsnx80eCkQlGJhiZYGSCkQlGvvOFKwiC/zsIgt67PiM9abZZ2oTp4O0PcPEyCRgaDPG5vMFLJOI4NsYN7SQSBzooMTgUccK0sLxMMGQJEcPyUhmCGcbJvVO0BzME0BHI2UfCB2uCRTabNYP2S5CLxULz+dxawqKDhmHwB5Max7Hm87lp4NGs+jd4ijdhGAnUODRMFDpwAjBFyZKsEFrSBSDjTO12W/V6/WIjPcAR8KfwkMCA06D1nk6niqLIOuHA3MCkwWowd0g6YBOjKLrYIwL7gd3CUVmiZr8U5AwESqQwMHDYI1ICpBQe7HK5nBqNhsrlsn2OLkrFYlHD4dDY2lQqpeFwaMFJkur1+gWzikwijmNNJhPl83ljdLGpbDardrttxa0UOcPwMu9IYxjLMAzV6/V0Op0sSKFZJvi9efPGfIgvAhnPjz/iE7TyLZfLZnueZSdgwwh6wKEQHqYW6QYyI3w8jmPbB8XbF4XSgBJxAMBjXGD0Yd2QMBEssRM6PrEvjvcrYhAst4uN1loZ3TuJLvFEkhXd+8SVuWAco+hpM1zGDuD0yYIv4GV8JJkv73a7i/oX5g3m9Pm/kREBxNybj+v4lY/ZyfF0JBiZYGSCkQlGJhiZYORfWt2S/sILVxzH/18cxw/vPIOeujB50PCgABuw3+/tDRm2Agc/Ho/WD599HPxbpp+k0+l0AQL+LZQ3W4rlCJzs/cASPgPOxDIhDBgOVywWbZCRTaRSKZv8yWRi3Z0wyO12e1GY6JlMWEXpiTH7/vvvDdhwJJzFs3ncB+fzAJLL5WwPBwI1IA27SIEyhk3g94wHQOmTAApZAR/06jg7RYkEnyAIrKAUh6AYlXMQyAAR3xKV5/ZARuBFz346nazlKVpv6akugU0gCXDMCVKVKIrM+Rlj2EgYDJIf2BiCFfeKU3qZA8GOQtH5fK5SqaTdbqfHx0djVZDSSLJAiF2hlZ/P55LOIMQ+OfhBLpdToVAwtgf2R5Ix0J1OxzoJ/fjjjwZyPCuMItKObPbc8hYGmgJxfNcHe4IksgCkKt1uV9Vq9SLwebuAIQeEuRfa7JJAlMtlux4BFjbOy4MIhF6uQA0C+nTGFc06No68CdYb5heARp7BPcCycW5YWHzBJ7JcD5v0wOAZML9CAOvO2PqElO8ZC8/CcQ5AyNv1ZrMxn4eR4x75LAkDsZN5Ye6JP8nq1p8fCUYmGJlgZIKRCUYmGPnXYOQHkRTiZDwEN8X3GBzB2y93crNMGMumDAZBhIcm2PO3nnFjUPwk8XMCDiwaTEscx8YGAkphGNo9cj/c02g0MmNhAuhyxHPDkAF46IMx0EajYcvk3333nd0butTNZmM7jZfLZTUaDTUaDbsPHJVla9rmLhYLHQ7nTQ8JeDBUBH1JBggE7UajYSDIsjDjRrE284dUIY7PS7sedChqJlDTtQfJAEvzgBhadpwChqrValn7WkCfxAPHrdfrpnuGyUPfzv2jLaYdLhtmsnM4dktRahAEurq6kiRjfAA05m+xWJgchGsAtATcWq2m0WikxWKhOI5t40Xmmb1sPBB4MIXtJDiQHPnNT5GHsMcHshgCpiTriuTb+7KvCmxPOp22+adQmHEEiAHX0+lkUhq/+3qpVLoo6idRRMufyWRMFoEfU0cBGOPPjIEHYu6BuQBUuRbPhh3RycgnsnRAG4/HNoaADUzm6XQyZtEX3PukFjY1nU7bmMGSM05ev059AQEamwOwJNnYHY/Hi31LYM0k2XMR24gXjBljge16xhVQA8z5HHHudDrZPcJQepkEzB4xLTne70gwMsHIBCMTjEww8uPAyHetcqX/w9+8xwHjgdMRSGFveDjPSvEGjiGdTuduSjBpfhO7KIouWlz6ZUXOyUDgUH55D6YCY1itVlqv1xc7uxOYYRVYKgUk0LTjpHQ1obiTyZtMJhcFx7whc49cB/bG65Ln87mazaa14ywWi2q329Z5JpVK6fHx8cLg1+u17c7+6aef6nA4mDZdko0lhoFzEJiZH4Aljs+yBlrHUjAKYOOgg8HAAI7lfIyU1pxonXFadNIAEYWkMKQwlDCyfnz4HWwl9kBgORwOJteBYWXu2M9jOp0akBEED4eDtXGF2UGTTIcoEhYYPJyLYEwggX3JZrO6ublRoXDe1+L169dmkyQf6NDRGBNYqAlgaZ+fE3wJzoABAYz52+12xoTBVMP0cX1snkALcw1DRHchwJTlfB+kkAkx95z7ue8gnWFjQ1qy4gckWLDhyCfoYFWr1Yw1wm58bQGrBMhpsBEvd+A6PimiUxS+7SVNsKveV7LZ88aNMH2r1Uq53HnvluFwaIXrPviT+B0OB7sfxokEhOfCtwCRzWZjoE7y6hNYQJC2wcRXNPvIlbgm+7Qwn7DjPgF/vkLgE3NsnntOjvc7EoxMMDLByAQjE4z89WPku1a5PtjGxxj28x28CQoMXj7/tO+HlygwsAwQBo+2FebDDwJvzZJs+ZPBg93yS5Qs0QNagBqB3TuKf/MncGEY9/f3enx8NGOklz8TMBwOVSgUTO/Mc3Ev6KrZSZ0uNcViUdPp1DZ+hPUiABI4Adh0Om07wqdSKdNhPz4+qlqtajKZaDgcGmOIZjmTyRhLAwNVLBat8JHzU9jJRpVsnAlrx2d4LsYjm80a+OEIzActZPkZ80JQgHkjwDwvrOZeCXYEv/l8bkwrDotTAz75fN4Kk0lksCmfqPi9IB4fH40tpLMWBeMEaB/EGIdCoWC7yO/3e9VqNVuyxsYBL57LL4uTmOFLvhPSeDyWJHsmzo12frVaab/fq9FoGBvD/iT4CfY5nU61Xq81mUwURZHJKZCXUPxNkG2329put8bMMlc8JzUIsFMwXfgAMgx+zv/DMLT2ufweP43js2SnUCiY7/hz4pfMO4ksNgariGwgnU5rtVpdbLaJvImkh5UGwIFicubveDxaa2nikS/kZX8f2j+T5PA8MGeeua5WqybrIknmHhgn4hVMHufGfj0r7VcdvOwFX/TMO7Hcr5IwFowz50+O9zsSjEwwMsHIBCMTjPw4MPJdxwfp8evlDyz7Mshob2GsuGnP0HiDk2TaTgaChyQIMIAYEA7H0h8sAAMFoyLJgjF/T2Djs0gEcEwMlWcKw1CNRsMKPuncwrP3+30rsM3n8xdjwbUJ5MgaaBVK29Q4Pu+dsVwudXd3p+VyqVqtZhPrl2nr9bpms5na7ba9uXc6HWNbqtWqer2erq6u1Gq1VCwWjZ0AzDFCmIMoioxFAlCRD7C0ig48iiKVSiXbxdwv66Jfhtktl8tarVY2bul02kATsObzLNUXi0ULwtgYDApL0WysuN/vNZlMLhISmKUwDK3gE50zzCLLzgRzGGWCB8EaBgNb8gwHthwE5w0T+/2+MW+/+93vVKvV1O12dTgcNJ1OLQDwXOzDgYQA+4dx6ff7SqWedjYfj8d2H8+LPWnDDLuE9KhUKl3se8O8eIZVOjPSsE88m2dRkY7AsBOUsR/06vgs+2rgq1yHoAX7TyKBXXL+m5sbpVIp20ke2wS0ua6XogAGPsBio8w3NsIzkEyQ2JxO571JYNe5n/F4bMmzByG+kF7hj5wPf/HzyvwT/JEVYX8wzd72sFNiFp/x7CAMKXOIHz9f1eCLmEmC7Ve0AJJETvifPxKMTDAywcgEIxOM/Lgx8oPUcGEMvrMLA8QyKDdHMMJgGHRJF5pKr6EkmMEG+TdL3r7ZEM4bEcuo0hNAecYP3SYFrwRCzpHL5awoE4YC/SnLruwQH0WRHh4eVK1WDQiOx6fOSHEc2+8wdpYzYcVwUFrp1mo1bbdb3d3dab8/b+hHpx+WfiWZA7PnAUzTy5cvbed4H6BzuZwVZ6PfHo/HFjCm06n9niViD8Ze71+tVtXtdrXdnjf7Y24rlYoBBSwOTsX+LIB8JpOxvT2YT9gP5oGxAiCZv/1+r8FgcME6sIEh94CjMgZs+NjpdIxN3O/3WiwWlgQQHKvVqslNfFE38xWGoer1uv0N0odaraZer6f1eq1Wq2WMFMwKOn4c+XQ6GZuIFIbkw18/DM+7oZNcEADY5JK/bzQaKpVKajab6vf7xhRVq1U7F22aYX5fvHhhTCqbKgLMsPLYng++0llCJMlqLNgsMZ0+1wgUCgW1Wq0LW4Kd58AGsJkwDI2JZCPFVqt1EawJyvgAAd8nqTDz0jmo0pWNFQBiEgDiE1aY/clkYgEeu2y1WiZ74VrIX3zXJYJ6Op02ORgSLb4AfIAYHwNwYBT5PPGVJI84DMB44OD8PD8AyeHtkL8nxvJvmLtkhev9jwQjE4xMMDLByAQjE4z8xTVcXorgNaYwb2jG/SDwpslys2dtcDQ6nCyXS9udHIbPM4MYMIbvtbIEfSQQp9PJghJBCwAjUBGkcRze8ikmjePYHDwMzzuP42zD4VDdblf1el3L5dKciCJPlvWRIVSrVTOSIDi30Gw2m3YvGGwURQZ4SCgKhYLpx6+urjQcDu2tnyVkNps8HA4WJAFQv2Eif8N8cR7flnWxWKherxsr6KUMgA5slyQbv+vrawsYq9XKNgM8Hs/drSgShmXYbDamdWcecSp0+uh00WHTVpU9LnAgAj8SERIJlrr7/b4VUfOFpIb9Q2A3YWiYGxybMeFeWIqP49j2k3l8fFQ+nzdmjufmeqfTyYIQzB1jBDtFUAeEqC+YTqfmaxyeaWPzSZI9L9fAH2EN8S8/5gQQ/DKVSpmun45pSCs8K4gGv1wuXySIsK9IQthvh4TTd01iDNLptDGqHmiw5efSAthWAO1wOGg0Gmmz2Wg8Hl8wigArCQWSER9AiQflclnT6fSioDwIAv3444+SZAkFtsxYc27GmJ8zT77QFyDi2QA24tzpdDIG0ssZYDAZDxhWz8pyPs/UARiSLuYB9hhtPayjX2VJjr/uSDAywcgEIxOMTDDy48DIdx2/GD1x2MPhYPtv4ExIJWD1WE7c7/fG+BHIMHDe2LPZrBXdbrfn3a398p43UCQHAJqXYzC4GCCT4XWdvP3DGhHoB4OBJpOJ2u22MUhxHKvdbtteEOx70Gq1lMmcN41EQyqd95OAlYFF4G28Xq9b4ehud94RHRDCKJFgAGKffPKJisWirq6udH9/r3a7bfsUeK1uGIbmyBRBExRub2+VzWbV7/eNDaRN6nQ6VT6ft2JpnDqOY11dXdncMfckAux+jjHiMMgq+DyAxblhMn1yAGhjMzgTrFY6fe4a9Pr1awN15oZuQCQkyBPYbI8ldgIGwZ+aCcCUILDdbvX999/bNWDngiCwugTkJ4DOeDw2ycp8PtfDw4M+/fRTY4cptJVk7JIkNZtNW4oncWDsYZGoeQC4W62WdfhivH1gJyADwNQgkNC8ePFCp9PJGFuuS9BG4w3DBVPJPNBGGDY7is7FtpvNRo1Gw56BMdvtdsa+AngUXTOXsGrYYqfTURzHGo1GtrdJqVQyEMHeSK68NIGagl6vZ9KaOI4NGL0Gng0YfVtcwCgMQ63Xaw0GA7XbbQNxmNrlcnmhJU+lUnY/nU7H5DnsAYMEA/8i2SZ+EbyxUYr/vXYekMDXiC/4If/+ufPxO66NnyFF4TrYKd/7pCU5/rojwcgEIxOMTDAywciPAyP/j65w+cDujZ8L0+2I5Ur+z/dxHNsD+OVeBpo3eQbCa6j5OQ7N7wjwaL7ZGR3tdy6XsxavgBAGwyDClPAmW6vVVCqVtFqtNJ/PzclyuZxms5kOh4MtQ/L2zaSPRqOLpf1c7rwp4HA41Gw203a7VbPZ1Hq9tnOhpeUNm/aftVrNghUAy33DYCA7SKVSxhJls1k1Gg0dDge9fv1aw+HwQkKBrhemlPmBCaAjDkwRDIIkA+p8Pm/LzzClSCdSqZRJEmD6OD/nSafTJglgGdyzFzg43aVw5kajoUKhYOODTIPagdlsptFodNE+FUal3++b7IYiahxzuVxe7MmBfGK/31vwoIXq9fW1RqORgiAwe4/j82ak7O1RLpfVbrc1GAzsWQAKkhyWvUnKarWaBWmSpGq1qlQqZQDgC8RZ2gesOScMJgxaKpUyGctms7Hl/fF4bPUX7EnDdbEvH5QI5nSf4jnoTLZcLtVut20eac3c7XbN9vDfQqFgG6QiaYBNa7fbCsNQzWbTWC6ecbfbmW/7IHo4HIy9lGRBXnraMwgmDhkRch4Y52azaZtT7vd7XV9f682bN5b80QIZVhObx+8k6fb21mQ8jJGPc8QNpA/FYlGTyeSiboKATkxk/IlPzA8xFMkIfsi5OR/XY275e9hYEnEvXcPWkuP9jgQjE4xMMDLByAQjPw6MfNfxQfQhsDcs6aPVZDK5Kf8ggASTyySw3M1EeA03bBS/44FhujAsBgXnYakaNhCDR+7Q6/XsdyxfHg4H5fN5e+sfDAbGvK3Xa9XrdQvcDH69XtdgMFCv19PhcLCN+dB4V6tVY03y+bzu7+91OBzU7/d1PJ47N+VyOX3++eemt+cZTqenQmW65rDx3Xw+NxYgCALrklOtVhWGoebzuQ6HwwXT2Gw2rbsQe5jA8tDOE5Bip3TkK6VSyQASZpClceYXnT8yk91up9FopOvra3v+6XSqSqVywSBQFErXK9gq5B2FQkG9Xs/2ygBQCCAsD1P4ikMC5MVi0difwWCgMAzV7/fNThuNhoEnIOa7YTHmSGFyuZzu7u7UbDaN2Xv58uUFK/l3f/d32u/3xgwTCNjokqV0QBz2EC03zwn47nY7PTw8GBPU7/fNpyRdaK3T6fRF56fBYHARICVZZypJtveND06ASKfT0XQ61WAwULVaNZaLwA+oMfaz2UwvX75UpVIxuQ5F74vFwgrBc7mcbdRJQlGtVm0PFp4ZbT0Ja7Va1c3Njd68eaNisahPP/1Uq9XqQj6BFGm9XqvT6VhsoM6BRGo6ndpcPz4+aj6f69WrV4qic7vt6+trA0PYsZubG9tYFPaSVtWAMcH6dDrvz0LtQLFYNJsjFpJgMRf4BYABICN/4W/RzcPcw/Tyb5I9it9JIpBL8TKADz2vG+LfJCfJ8f5HgpEJRiYYmWBkgpG/fox81wrXB9GHeNaDgkL009wkb54YPYBDgK5UKvZWGwTnLjYEA9gLgqWXQ3Ct4/F40RYTZgAtL8wfDsCy8Gq1Mj024IDsAfBj2XU6nerrr7+24k6CjST98MMPtrHgcDi0VrbS0y7oktRut5XNZq0LE+AHILGkjkZ6sVjodDqp3+9bUeXV1ZUKhYJ1IioWi3a9TCZjhceedQEsNpuNms2mrq+v9erVKwvuzFWlUjHWpdFoqNfrGTARuKUnps3rpVkefv36tXUZgjEhwP2cwcNg9Xo91et1M1rPWsHChGGoyWSixWKhKIqs4NnbgHS5gV0YhrYzeTabtXqB29tb/cu//IvCMFS32zWHxvn4O+51u93q4eFBh8N540yCY71e1+3tre0Pg7QAx8fWCW506EIWAHONhAbwmM/nWi6XFqgA29lsZnZD/QIyEMC20+mYRht2qFgsqtfrmc3Trpg9awg2SEk4F5207u7uTEIBs5nNZnV1dWVBEGAhSchkzpsz4re5XM72tnl8fNRkMtFkMrEaA1j2h4cHjUYj5XI5/c3f/I0Feuo9kPb84Q9/0GAwMIkPRcOffPKJMZl0LCMZC8NQg8FAj4+PkmQSJeQQknR9fa0gCPS///f/1jfffKPJZGKscRiG+uyzz4zNv76+Vj6ftz1XpLPE69WrV3r58uWFT0dRpNFoZAnhbrez71nZQLpFDQFsqV8Zwa98soDUg2SJYnjsj3jw/LzUREiyVQySfHybuIn0KTne70gwMsHIBCMTjEww8tePke86PghdudvttN1ubfd4dJgUP8Kyeb0nb9XoPpvNpjqdjskKCDgs96K/5u9gWjFSpAuSVCwWzUlgk+I4Ngaw2Wza0iEggr6aSWPgYJ/82y+Tvlwu1el0tNvttFwutVwuL5acCRq8FY/HY9vAkG5KBHqWiGGrcrmcLa1WKhXbiZ0xY5Jhwnq9npbLpfL5vI2zlwYQ4AAZNMytVkuHw0E//PCDMUk4yXBP0ys9AAAgAElEQVQ4VD5/3nkcPTBHp9PRer3WdDq18eH+cSq0wblcTs1mU9Pp1DoFUQS53Z7b9hJkMXhYE3T1aPp5Jpiy0+lkLBhByndKgtWVZA5BgP7d736nh4cHffPNN/r7v//7iwJzOlIh8eAZ0L7DWt3f3xtzNRqNdDwe7X7i+LyfzGg0MmaW3/P3XhsMiMKeLhYLYznRKBMEYUczmYzJbJrNpiqVijF9vtA1nU4bQ814khAxttgWRel8DuaIoMS9w+zW63VLCJfLpcrlsm18CqPl61CQO6FxB8S3260xrJVKRe12W3Ec23yRhAKOr1+/tvsNw1DfffedjsejPv/8c7s/6hPQuMM+4++SNJ1OLcnNZDJ2/dPppF6vp/v7e2MfqeuYzWZm2zc3N7aHC3uxYIuw9STDBHHAg2QFYCEBIt4R5Dn8RrR8Br/gnr2+vNPpXDQhQDePn/ESBSvntezcN3UZ+FNyvP+RYGSCkQlGJhiZYOSvHyPfdXyQF64wPO+74Q2VzjoMEEEgnU4bY0OQp1Cv0WjYEr5/WN64Wb4HnHg4zs+mhlF03k+BwE9QIwCNRqOLvSsOh4MVaTJoLDNWKhUrGpzP56pWq6YhZyn3eDyaoxJMHx4ebB+NbDZrjsHzrFYr1et1HY9H1et1ff/99+ZoFHsSvNDM0gmFfSP2+71tZjifz3U6nTs2AU6wWujTCaqwEASR/X6vXq9nenWek+d79eqVUqmU2u22sVAERvYs4b7YNDOTydgyOTun4zSSTKcehqHG47ExMjCryBHK5bJms5m1D2WZl3miVoCAyzK+L54kCclms3Z96hn43Y8//qhOp6OHhwcLfARcwFLSn3V48rUI4/HY5ubly5f66quvjEWm4BgGGz1zFEW2oeJ2u7UNG2EZ8QGeA/sgYGUyGU2nU+tchZ/5pXZsBfkJjFoYPm1ISHKGHIc5poUssiNsQpLZDvIhahl8ggeQ0akMxpfPslzvZUSwXLBsAMg333xjzNxgMLBzBkGgyWRiY/fq1SudTifryFUoFDSbzdRoNPTpp5/q4eFBqVTK9OUEYgCO8YXV7HQ6xvAh28Bu0M6jj6eoGOkDYH04HKzQPZM57z0ynU51d3dnbDsMN53PsJMoii4SQRJsEm6SRcY0DEOrsSAeI39iLn2tAbUmxGWSOZ/EY/PJ8Z87EoxMMDLByAQjE4z8uDHyF0sKAQUuDFvAmy3BNAgC0xEz4OgjJdmbrte2E0RhS2AbkDrw5rrfn9s/Elhgg3AujNgPCNre0+l0Ia9gApE0oLGGQfMMTxRF+uMf/3geyLcaUpbeKbxFXsBydy6X02g0MnaKoBeGoXWPCcOzbh52b7fbmaQDAF4ulxYghsOhGeBisdBPP/1kQQiGaDgcWvcZlv95k5/P59rtdraXxf39vb3Vo7UPgkD/+q//qtFoZLrsm5sbTadTDYdDDQYD3dzc6OHhQUEQWCeh5XJpMgPqC0qlki2hw/7QladcLqvRaOjly5fq9XoGLq1Wy1ib9Xqtm5sbTSYTqzuI49iCIAzyfr/X3d2dLSmjTYZtgoGpVqv64x//aPaz2+0saBQKBWN8COowaVyHZfwwDE2njxSIAINfeNkJRd9xHKvb7RoQE9Qomn14eLDgCdPdarUkyWQ9yCx8APetaLE97jGTObc0JsmCySYwoenHZo7Ho9Uk4O/4AtIfzhuGoRWgM84wR4AH/4fN5tq+m9XxeFSlUjFfnE6nyuVy+v777/WHP/zB5Bdo8Un8SNSazab58HQ61VdffaVCoWAyk/3+vAlop9NRqVRSu91Wt9s1qQeM/HK51MPDg16/fq3RaGSJHXvYwBI/Pj5exDbkC/7ZkG4Q0F++fGkSikqlYvsKIW+hjoGkkFjnpVxIbrgPgA2W3jN8xDdfC0AM9jVa2BFMHvbhGfzk+OuOBCMTjEwwMsHIBCM/Dox8lwrkg7xwccMs4xHk+D1vrAwazsiN0uUFEJJkRaA8tA/wBE7e6mE5mDwM8vHxUavVyt66cRDeXGHs6OACULF0TQvfWq1moMl5pPPye7vd1maz0WKx0HQ61Wg0kiS9fPlS19fXph3PZrO6ubmx4EdhKQZxf3+vOI41mUz08PCg6XRqy/QwFKvVSre3tzb5/B8H8su3d3d3qtfr1oWoUChc1ABghAR1ghr64+l0qm63axp8jIlAOxqNrHAVQMZRl8ulZrOZMpnLjSdhgNB1M1/X19dqtVpm7LBldDlCekLgQZ+ey+X06tUrFYtF9ft9C1Kwf2i2PRPhzwuLSUvlr776Su12+0Iig24fVgVtL4WnyC+wD+mJVabwerfb6f7+/qKQk2J5EqfpdGqMG5IHZAT4B3PIWAEq2WzWbBjfgZmTzgA9Ho81n8+tHS1MEcvjMELM1/F4tGujQ6fgu1AoqFqtKp0+7/0xHA5N6gJLW6/XLeHLZDLGTMLscU3qL5DPdLtdq6HI5XJ68eKFyUJ6vZ5Op5OBAgwxwESh89dff23SJZKFIAhsDtPpc3e2MAz1u9/9zsab4M08EsQp4gVYoijSfD7XaDTS119/re+++84kLIANAd8XL+fzefMFVgqCIFC327XVD6QhxCV8j8ScOMk4ezYZkC8Wi+bnSK9Itr18iHoHEjzqeBhvL1djNeMvdWFKjj8/EoxMMDLByAQjE4z8ODDyXS9cqS+//PKvxY2fPX7/+99/WalUTL+Nc8IywcIRoDEwmIRsNmvsAG/stM7F8Vj+Qz+KxpNghdNhlLxlEpgYHAYbloQAg/Mz6JIsyKMX3+12ViyMTIDleQp+YR5xZDoTZbNZLRYLW95lgh4fHxUEgRqNhu7u7qxQFe08RgETxPPlcjmTawyHQ7tHDCqfz1snqPF4fFEk+1yuslgsVKvVlMvlNJlMLIC2Wi0NBgP99NNPevXqlR4fH419Ywl9MBioXC4b6OKoBOJOp2NzlUqlNJvNjDWB1SEgSDIgotNSLpezZX2KklkCRppBV55cLmeaY9gqnMOztnRy4vlhh3u9niaTiQ6Hg90j7BrL+pKMwaWYlvHk94VCQfV63ZaywzDUaDSy8SkUClbPgb2zJA4jBgtHJ59s9rxHxn6/t7nnfAAiCYsv6qUbELp5xo97pkYCeyD4kGB4mQZ7plBki2/ClvLsJI+LxcISE3ylVqtZh6tyuWzMGywfCdbxeDSbTKVS+umnn2yeb29vDUjpJBQEge19Isme9/PPP7cW00EQWBc04k4qldLLly+N5adwFzafcYHNy2azur6+NvCGvW00Gmq32/rkk080n8+13+8NKEhssSOKuWHzuZcoitRoNLRYLHR/f29AQcIL4ybJ2k8Tk2ClOXxnJeYIzTsJPKBO4kH85Yu/41wk2ofDQev1+u7LL7/8X78IOD6iI8HIBCMTjEwwkmdPMPLXjZHr9Vr/+I//+D9/Dgt+8QvXP/3TP33JGyRBzzMHDAJOjoPBvsGIIXvg5xg9xsmbJiwGE+A1lgRelnG73a72+70qlYr9LZNHtxjYOAyRpW/aw6IbZUmeTj8MOEWXs9lMq9VK7XZbjUbD9J5MxHq9Nt030gCC6W9+8xtrnct+HGxOCGiMx2O77nQ6tb00CP6wD8vlUsfjUS9fvtR0OrW3/NPpZOyLZ6MI/hgOe5nAyHz++ed272j6mdvNZmNO32g0LIhLsr0luD9kBWij9/u9tSCVZAkEtkMXIGwHx6AOoNPpqFqtarFYKAiCC2ZlvV5b0EUmgGSGc8LuAHblclmpVErff/+9fvOb39g8+V3esRFkJmjdpXPCw+7wXIdiXdq5BkFggOABFElPsVhUHJ87hdXrdVsaRw8dx7FarZbNMa2KwzA0jfPpdLLWvPgbxbHY+qeffqr7+3srQo3jWJ1OR9K5Qxj+TAJWLpdNBkB9APf2/7P3Zj1yZcfV9sqTJ+d5zhpYxbHVatlGw4AFX/kz/LtsoF/Y+hPvv3mvDBiwLclWD2yySdaY8zyP30X6idpJS4Tobt+I5wCE1GRl5Tl7x44VZ8WKCLT5OGCCKDoxsb7sK0yk9FCgzXnGVvg59o/ubi6w8d9u8Mj5931f7969Uyh0aP9MUf56vbbgDyaRIuRisXj0jLBYgBv2ms1mjxhvAtX3A0kyGPi7yWRiAfZ8Ptd8PreA1mU98Vewb/gdpCjvX9Tj8J3ufCECLz5HgCo9tNPFx3LhUwluWFsXZIIXro+7AowMMDLAyAAjA4z8NDBysVj8wReuH900g7dBQIHFhbnj7RU5gyR7a3elFHQugWnhoJPiI7242RzamqIBlmRvybAI8/ncBtOtVitzvL1eT6VSyZgw9MfoO0nhs/CuhpY3aNL0OE0YvidPnhxpO9HYJpNJcyCuZGE0GikSiejs7MzmV+RyOUshD4dDZbNZc0gUFfu+r6urK+12O11fXxuLBeMDS0VxZKVSUafTsXWDnQiFHgoO0ahGIhF71tlsptPTU+33B913uVw+YrDm87lqtZo5B9/3rVsTBwkW1w0EisWiyuWyOp3OEdDBhHAfSCfQi3ueZ4XbSGnQI7P3ODjfP0xmdz8LewyjJ8lAhxR0KBTSixcv9Nvf/la//OUvj9iNcrmsWOxh0Cf2CfOLfbqsHgwWTvb9QmOXSYH5wpY56BREr1YrG8oJsxmJRAzIZ7OZwuGwdbkqFotKJBK6vb2178R5Ij2JxWI28BSwZb2wKRyrq22GIURCg5NE+oDmnfMfiRzmjcznc2NM3boNGG7qg1w2HjmV5x2aDuDUKLhlf2Aa3eLk7777TrvdTufn52Z/2WxWL1++1Gw206NHj/TDDz9Y1iGbzdrZpR0uZ54gtNVqWbc5wIRAw5UhsJfIi1wpED4Klp5noSYCLfn7QTKAiQ3GYjFrgkBQ5fvHmnPOH8EvZ4z7w9/hr0Ohh2Jm9lJ6GNz6IblEcP3+K8DIACMDjAwwMsDITwMjP3T96BouvtyVKHCDSBn4O9cgcQ48KI6Y1DoHGYfPYXy/4whvqRg3bSfpSgMgxONx5XI5S1XTg58FZOPRlrJRACEOksPFYYcJWiwWptlGnsDBh8Vytftow3F4AOwPP/xgAwthcyTZ7I1cLqef//znViwqHeab0BUKpqfT6RjTQGqe9WZteU5Su7Bzg8FAl5eXymQyJiNiEB+djkqlkmmrcYywjPzezebQ3tVdY3fOAy2SXfbh98li2CuYB/YUp4cDoLAYRxqPx42t4QBvNhsLXHDgpJyr1aq18X379q3ZFLp/AiVXYsOhjkajGo1GGg6HFsTQ+tf3fXvu7XZrNgCTSSEz94zD3e0O3ZmYPL/b7axQHPvkTC0WC3U6Hb18+dJAdbfb2aBVHF00GrXZI/l8XhcXF3r+/LkFdsvl0rTynDFYXzftj2aZtL07A8Zd60QioXq9bjbmnm/AHPkQ9x2LxTQcDu1+OAvIR1h79gHttCRju+r1usrlsq6vr9VoNBSNRk061W63rY0zsh7YTUk2jBEQ3mwOhcndbtdsrdFomJSHs8Ma4CdYPzezwRoVi0WbQcTnlsulBWX4PM4/fo713e12ZlMACHbKerA/SJDcRgqAFffo3jvMnSSrO/C8Q5cqgDe4/vgrwMgAIwOMDDAywMhPAyM/9NL1ozNcHGo0mbzRszg4GzdtR0Gbq5HkM65+msXk97NJ/D4eFKfA4UELDRDhvBKJw8wKfi9FmejcATqXweOgwCziKGiXOZ/PValUzBmg3Z7NZiZDgGEqlUr2zBT9wcz1ej1NJhOdnp4aS8Ukcd74YXUwYHcAoAuGOHv+W5Ixc3RFgjWAfWBNaT2MHhjHNRqNlMvlVCqVbB8BEdrEptNp+3dYgs1mY0DBmg4GA0uPc5ii0aixnOwPxcM4FQAqn88frSF63WKxaOl7mBicHWDOPcA0knInMAFIp9Op1uu1ksmkdRAiCHDlMjhhimwBLoZ4jkajo/S89MD0xWIxrVYrtdttDQaDI6lFKBQyZkt60Aozx4P1ZH5GOp22Llek/QEU1tDdi+FwqHq9fuRkAX3Yyevra+12O/3lX/6l3YMrEfA8z6QDdO+iTexud2g7DSgSBLIPrBkOCpCpVqvyfd+6LeFMQ6GQrq+vVSwWTVrFGrAXpPoJkPL5vFKplNrttr0sMCizVCqp2+1aTYPnHWo1Tk5O7L9Ho5FJYjiLDFudz+dqNBqqVCp25mDKsAvWCrsJh8N2bzwvLCcdvNxaBT7rSiGwIaQc2DbZBeybM+OCD2vOHgMmrB/PwJp73oNunWcjgA2uP/4KMDLAyAAjA4wMMPLTwEi+//ddP/qFy/3i3W5nrAip5NVqZelTSZaWdnWrLJzL4GG4HFBYChaNzZB0pF2lEBM5BTpffha9OfdMAAE7wcJyT8gaVquH9p84PJw7bIP7u9gYV8fOYDnAkq4sjUZDvV7P0tPT6dQ07LzJw4QAppvNxpjFx48fWxp8MBgcpdxxqKSc3XvGEQPK7GWv17NORJPJROfn50eSCIwxHo+bE8RBhcNh62gEYGcyGSt2xDnjaPb7B210NBo9kohst1tjEJk6D/ux3R7mqcA0wa7hqFxAA1T5nRSFIuXhWiwOAybpJtVoNPTixQuNx2ONRiOFw2HTwHNIsTlsn6ADm5zNZioUCkokDvNSYKxgZ5gbsVwe2hpzb2jQcfD5fN6026FQSDc3N0omk7q/v1e5XDb7PT09NdZ5PB4bCFKknMvltFqt1Gw2j3Tw0oH5woEDKNKhjoL7JFXPmUwkEsZ2ISMiwIPRotaD9QJwCTYIHChYRgqD8wKcer2enj59qtvbWwOiXq+nSCRindCQjXBvSFi2261ubm5UrVZ1eXmp7XZrxfLlctnWkPujvTAF4rQyXq/Xur29ValUMiCEeSyVStb2lrPPenAOAXTWmEATaVUoFFK32zX74rOcT4JzziM/g5MnqCfwIoh37xO/4Po69h2/h/8loJZkZyu4Pu4KMDLAyAAjA4wMMDLAyJ9k8DFfgPN3WTBSrBw4bha9MIvGGyyGz0Hgd5FKh2XBAFlknAYsFt+H86Mgz9WQu0ydJCumc9k9tPLoeDm0w+FQ5XLZGCTesD3vQYMKkLopTRiAu7s71Wo1040/ffpUg8HAuhoBurAwaKNdmQaH0jXOcrmswWCgxWKhfr+vQqFgHWRgLrlnDAwjZ00ymYySyaRev35thz2VSsn3fTWbTTWbTT158sQACW0u6+l5nrEEFDifnJwcMWi5XM4KgzlMMBiuZpaLe+PAwwDTbpYUMSDhpvtdRmS9Xltb0Wg0as51Pp9LOhSpTiYT6woUjUatZSz3T1pakgEfUg0Ku3GgiUTCWrjCTmPH6IolHTEvsGBuhyGYYNZ2NpvpzZs3xlgjJXJT9G7Rd6FQsMGfnBMGEiLZ4buRHK1Wq6NiZOlQ6M1zz+dzlctlawlN0XqlUlE0GrVaDhhvAqVYLGZDQ10Hyx6yppxhnj2ZTB6dSX6Os4sP4XyEw2E1m00LBr7++muVSiU9evRIo9FIzWZTk8nEAjR36CK+iyCV4A1gms/nFtxR3zKfz234I7NNAEP8lytlkXTkEzebjbVSdiVnPBv7JOkouIaZxY8Cwqwt3+VKHVzgcJlosgvcI4G/C/zB9fFXgJEBRgYYGWBkgJGfNkb+JC9cFPTxALBPbucP3vwlHTlr9L44OTaRz7CAPISrSXc3hHQg6UgO9GQyOUq5wzxwiGG11uu1ZrOZOTS0nBRhAhLcJ0WfvEnjEDmUGOZ0OjXn7epqSbujfcWoYX8mk4k5fO7ZLYblTX82m2mz2ZhBwwx1Oh3T+CaTSeuU5BoDz8XBg1FMp9PqdDparVZWIAwrut/vdX19rUQiocvLS9PQ03aXWgWYRGQzy+XS2CA69ODYYJVwtC4z6rJXMBNuMWo4HDYgYP1crfFutzNAD4VC1s6Wz0ejh45UdCmC0YNxxpnB3roXBw7Wy5XkALQMsCwUChoOhwZ2zL7BKcOy4FD6/b7VNlBMy7BS7rvRaOjLL7+0AAVnPZlMTCYkHZwENRQ4jFwuZ+cTux2NRioWi+Y0uR8KeLF5iuhxtgSMdAPCOfu+b8XjdDGr1+ummXclSPv9Qe4EQ4nTJVhstVp2BrBdN4ibz+fGfKLz7na7BoqJRMICBwrjy+WyJpOJnTdY2VwuZ4NXy+WyJKlYLMrzPOt+NhgMjth5mFf8IfIk/A7BGs/kyhWwTXxQJpMxlnS3e2jHzjllT/FRBMywbrCx7D8Buft59965R4JXNzAkaMVv8Jng+rgrwMgAIwOMDDAywMg/fYz80PWTvHBxqBaLhd0MjoW3Sg4vaWUMA0fpMizz+dwehodwD9tqtbLfzc+4KVz3rds9wLAxgAq6WkmW6t9ut0fyAd6i0Qev12vrNBMOh8158GbsMmKxWMyMGdaQz6NRxUntdjvTuVOwjMGx6dwf7Xpx2q6OlIPkaqXb7bZms5mleJESoL33PM9a1nL1ej2dnJwYA4Sxp9NpPXnyxFq24jRdaQKMWjQaNQZmtVppMBjYjI53797p/Pxc2+3WDoMkCyhgsEgLsx+bzcOQOiQzu93OCn8B2mg0agWb8XhcqVTK6gawRTfFTADBXBeYuFevXqlarZozAdzc9D9peeQaOAc3SKX42/M8G7QpyTTMi8VC5XJZy+VS19fXVlvB74TNxQkgddhuD3UR/X7fNOKTyUTZbPYo1Y59A74U2NI1zWXYsEv33mBa6/W6fQ/rsN1uzQliQ2jAcf4wqrlcTpvNRo1Gw8CW+hTXsQPgMEYAFQEFQRJyHwY3MlQVeUgsFtPJyYl8/zDIMRKJaDqd2rwbHC6yHFiqZrOp1WqlcrlswMdaFotF2wN08hTDc5aQ5xBEs6YEkNguPiWdTltQzTrQwADb5KziXwjI3z9r7jmG7cY++SzBN74a23JZQZe9w5dgt8H1cVeAkQFGBhgZYGSAkX/6GPmh6yd54XpfJ+mmK9HT8vaOI+NN2T0w2+3W2lrC0o1GI9No46AxAPf383kcKgPi2GjeWgEkFsh9W+We4vG4OQ3+LBaHGRg4s/cX32UbYRAw+slkYoWHOJpcLmesUrPZ1LNnz+T7vqWYuT+X8cT4SNfSOQjQgQ3t9/vmFEgto3/e7XYqlUomTcFJAECNRkP1et0AhDbA4fBhmF82m7W/6/f7xmROJpOj9D6Hn7UKhUJqNpvGzsAcsi6z2UzFYtEYKDphEQDgkAADtMkEJhQJwyZJssOXSCSUSqV0fX1tUgnkBaxbPB435hWWLhqN2nT0QqGgUqlkLX1h0JDCdLtd+335fN5az242G+v0xWewMxhY1ow9q1Qqpud3tfeAXigUMlb77du3+vLLL03njI7clRRJB0DL5/MqFArqdDoGSjie7XarfD5vf88e8V20YnVZehwh7B1/T2ABAwkDPJlMdHt7a9203JQ9ZxjmFEZ2Op0qmUyqWCxqPp+r2+3aPBj2gWJfWDmXBT8/Pzet+WKxUK/XUy6X05s3bxSLxWx6PYHFcrlUv9/XbrezoESSAToXIIjEg4wDHcEAJvwAAQzZAVrw4pdYP0kmtcEvYq8E3QAT5wp/iX9jT5E5wZSzt57nmV9hvWFDJdnv4ueRkOA3g+vjrwAjA4wMMDLAyAAj//Qxknv8vTjwxwLGH7pwNLzx8f9x7KRNXS26q7Fmg1hU3jRdZ86DuW+9fAbjJgXN/4dR2u12tpE4JwDJlSAAPGwCn+HtnIMM+7Tb7UxyQdoSp86mcH84fooNKYalCwxMEaAD6wcL6TotHN5+v7cCWwBls9lYahZGSZK1UsVJ9/t9kySkUikbPPjmzRt5nqezszNjdRh45zoRjPnq6soYmkajYR2YYrGY8vm8dbDBUcEg1Wo1RaNRDQYDnZycmCYXm3EZK+wGpx8Ohw1sp9OpASPFpzhrbCcSiVhLWb6HA5ROp9Xv901fzoHO5XImX/niiy8M7GB6CSSQl6AfZh9gd1grJCSe5xkAA8hIaNgrmOlwOGxtjpl3EY8fhv65zPJqtVK32zXJDRIFnhPnBRgRDFGYjd0g6eGeKPKlpiKVShljiyPiDLx580Z/8Rd/oeFwqMlkomq1qnK5rPv7e4XDYRu+CSOHg43FYqbnJvCgkJ86Aeoy5vO5dYpaLpfKZDKaTqcaDAbKZrMWQFJIzPkajUZ69eqVHj9+fKSB59zDWsP+j0Yjuz9YTXxIu902ENtsNlb8izSMbk84bhhc/AXrjnOez+eq1+tqt9u2/u12W77v20DRzWajUqmk8XhsgRTzX7hn5EiuBMMNImD5ORNkO/Aj7ksU7CW+Gp/M+QNoguuPvwKMDDAywMgAIwOM/DQw8kPSwp+kS6Fr3LAK3Axvnzgi3hzRb/P2SYElrJ/78Gw2zobf56bQJZnDC4VCJg8gXcihcZ0CxY2wgbwdz2Yze1tG/8oBx/mGQiGNRiPtdgdN7XQ6VaVSMcPjDR3WkQPCpnAPklQoFLRcLo9S4sgl3G5WLjMJuME6YoxIQ2AWEomEpaljsUOnKEnqdDrGYGUyGe33e52cnNjh8bzjIYswY64UBCOMRg+FtbCeaMwxbECuUqnYerKng8HAZi3wfLAEODaXGZIOjpduRrvdTsViUZPJxLpPwe6EQiENBgPd3t6qVqvZWnqeZ1IM9pH9ns/nBnaAIGypy3ZxqOlSxfdFo1F1Oh3d3NyYpn4ymZiTQ6KSTqc1nU7t98xmM81mM2O5F4uFarWa2TdBFc4jm82q0WiY0wEcYOBgpGHMW62W2RhdoMLhsNVvFAoF06T3+/2jmgzkLy5AspYA3Gw2MzBvNBr6xS9+YQzeer0+YqtgpzjbBJuAX6PR0MnJiUKhkJ3dRCKhUqlk2n0CRpfl7vf76vf7Oj09NSnDcDjUixcvbJgrNottdrtdhcNhnZ2dSXpgfGGu8RMuk8W6Z7NZVSoVRSIR9Xo9dbtdNZtN5fN5A+Vv5W4AACAASURBVAv2QJINh4Rt2+12xq4RPLpF8Z7nqVar2bMtl0sLigjG8afuCxEZBWodOFsUjBNI4yNcRs5lBNkbvpd7D66PuwKMDDAywMgAIwOM/DQw8kMvXD/J4GM3BQz7gpNbr9fWjtUtiqNwlQd1U6oUt5JyhMUB7F0nFQ6HzVmhY6Ugz01jusP2SO0j7UgkEsbKsdC+f5hzEI/HdXl5aawCRsBiowePRCJqNpva7/e2+OFw2AoVi8WisYQ8x2q1Mh3yycmJORwYqvF4bIWqq9Wh4xAGzsFl/Slk9X1flUpFm82h0Hi3O+i8YWMAvvl8rqurK02nU5XLZWWzWZ2cnFhRI2CQz+cNbAqFggF6sVjUs2fPjIUpl8tH804A0nK5bKzLxcWFdrudda/KZDImjxiNRpJka+cCXbFYtJS923lnNpup2WxquVxa8TEHA/CZTqfWqlSSsXSsyXa7NWDv9XpqNBpH+t3RaGQpeN8/7jZFgTUMTj6fV71el+/7KhaLKhQKdgAZzsj9u8wZoEKHrWQyqVarZUGL53nKZrP2/aVSyViofD5vBbs4V5g25BgEVeVy2QItNNgwSQRG4/HY2DzP8zSbzRSPx4+KTmFO6VhWLpeNOUskEhoMBjbvhP2AhYMBoyCZ76LOo9Vq2VmnviSdTiuXy6lQKBizjX0Ui0V7xlKpZF2QpANrfXFxoUKhoH6/b4wf9kyW4Ne//rWurq7MzwyHQ+12OwsW3LqGzWZjgRl2PRwOjSE+OTmxOpl6va5isWiOGMc/m81M2gFLDTONHIkWwLFYTK1Wy4LyVCpl6z+fzzWfz41V49xMJhO12231+337uclkYuwqAQ3BBUHOYrEwG8O/Yn/4gz9Gpx5cx1eAkQFGBhgZYGSAkZ8GRrqZr/evn6SGC8aEB4IZ4It5U0T3itHDeLCAi8XCOrbwO2DmmBjO2ykL4AIKbA6OA8e83+/V6XTMWZNydnW/OJ/39cPT6VSPHj1SKBQyFqNYLBpTRgqWFLvL2LGRaHlJO6IHpwiQdqj5fF7RaNQKRnHOaPh5m8ZofN+3ieie56nT6ZhBuhIRDlEul1M+n7diYlrhzudzc3QUxMbjcdNXoxVnfabTqW5vb3VycmKyChiFYrGo6XRqDoi0LcHaer1WPp+3/eJZJ5OJksmkDc+DOUomk6YFhoEAxFkXWrlKsqDFdRj5fF6dTseYWxjB7Xar6+tr6wpUKBSMXWJfSMdLB8kOBdxMdPe8Qz1DMpm0AImgCNau3+8bU4VeHbDC0XFu9vv9fwuGYLdpn4v0hU5JFxcXxmgTCJDWv7u7kyRzuq1Wy34W+QJynrOzMw0GA2Mo0dXf3t5aip5gybXlTCZjEiHP83R6eipJBjzIJQjWCEr4bwIjgsLT01OFw2H9x3/8hx49emTnDBYcUCGo2Gw2KhaL5gcYoMk6z+dzsx+kBQQVhULBQJ/7JSvA8EwkDgxxLZfLNhNotzsMPmWgKLbKcFlscrfbWac6/mBrsJQEildXVyoUCha4ILNwGUQCBTpjuY4etlB6AHH8G+cNmdv77B0XjD3rRjCFbw2uj7sCjAwwMsDIACNZowAj/3Qx8kPXj85wwWThxNlwNoaiOsCBvwckksmkMpmMab2Xy6UdTIwedgEWiw5Ew+HQ0rHumyigJckMgIVxaxBgCXG8OD+KYXljhZ2Lx+PGlkwmE41GIxug5/u+qtWqpY9JIa9WK41GI2uDGo1GzUHCXkoHDShOcTKZ2FrCUrnFyxS4Ij2A6QDU5/PDTAuM7uzszNL+lUrFZkIgO5AOwHt/f2+AxJp3u111Oh1jfXz/ULTM2rMuOHPYqvF4bGwAzwSzGIlE7N+n06kKhYLK5bKxvAA6TCPMCDUEMHUU4DYaDUvXczAAPtL6sKQEOUgK0DJLsnVPp9Mm36GY0l1j2DDWF8fImrlgBUvIocf5JpNJLRYLjcdjK0am4JaU+fX1tX2egIciXLTfyCckmV0gKcDGW62WPM9TqVSy4nBsk8AHx7fZbMyJElRg88gcYKlKpZJSqZTi8bi1iE2lUhqPx3r58qWxt0hRarWazs7O7EwxR4WzkkwmVa1WFQ4ftOCwye12W5lMRicnJ8asYZeuLht7Pj8/VzgcNvvc7/fWSanX61nrYHxSMpk0AEMjj44exhJNuiuT4kw+f/7cdO4EyRTgw9wRBPG5ZDKpwWBgxey9Xk+dTsfACD+43W6VSqWsVoFAFNvHl2K76/XaHD/BDxIXwApdO8G/JMtMEJwTsHPvBOhBDdfHXwFGBhgZYGSAkQFGfhoY+SFJYfirr776eARxrl/96ldf8TaOjpQNwDjRaboMVCwWswnsbBZG7Gov+T0syGQyMUfB4lFA56YMYVDQekqyA49DRuPKwtJGV3qYLA0LxJs698Iz0tYSY2b+gltgTvtNmB3YCPc5R6ORksmkGQkFwjCb6NHp8ILRw56t12tzmBg7z0o9AD/raqw5IAQBnuep2+0aoDPQkMMOI0gRZDweNykGzhkJB8P4cDBo2F32ElClgxCyF/ewSg8DNylWBXB837dC41KppFwuZ2wLeupQKGRrix0Aoslk0mZrlEolSdLr16+Vz+fNASCjclsxs/ewPfye+/t7xeNxffvtt/r5z39uQBUKhcwOarWaHfThcGjMKOyS7x+KepljARuHXSAzCYVC1rGM74AFGw6HisfjFlw0Gg3rDkbw1263VavVNBwOlcvlzG45FxS604oZ5hypwHA4tPOHRt/9k8/nTW5BcSt1D+zlYDBQLBZTpVKRdGAZf/Ob35jUgMGV1WpVkoxdzOVyajabdq6Qa/B8/X5fjUbDtP7tdlu5XE53d3emzydAqlQqajQatobr9druezQaGdO/3+9NioReHLtrt9smSWG+D3aSTqcViUQMPMlCUMSN/cTjcc1mMw0GAxuW2Ww2LRjjXnzfN2kNPg8GED/lZg2o5eHnJVkQGolEjFWWHgrT+QwAwzn8r5qS+6+++ur//ijg+ISuACMDjAwwMsDIACM/DYwcj8f6h3/4h//z+7DgR79w/eM//uNXvA1zc24RLA8fDoftf9nI/X5vrWlhG3DMkuxN1O1ugzOA2SOV6EoLOJjICKLRw4A+5A84B9KMMBm8FaNXhuXCwd3f30uSarWaddAJhULG1vGsaGiRQOCEN5tDoSwaU34W5gQWYjQa6d27d8rlcqpWq8Y2VSoVm+8gPcxtYWI3bAF6anSwvJFT/IghwhJEo1EVi0VrC4rjJiWOE8U4KUrebrfWsaZUKhn7CXMKiwsLCWOG0dOVR5KxvIA7YIs8Ybvd2nBDnAX7gmQEgMJpcm02G2MAcUYwgaSMI5GIgQlOgsPHenHwJFnxcTKZNIdOMWqj0TCbhaU7OzuzLlFMtMfpr9drlUolFYtFSTJbBeTZz3q9bin5UqlkBaNugObqiJHxJBIJ3d3d6d/+7d9ULBaVTqeNWfV93/Ywl8sZa8M5Wy6X6vV6BqquLIWiYRwTUhUCDhhw3/fV7/d1dXVlHYqQ6+z3Dy2Hy+WygR5BWjabtbOEPCSfz+vs7OxoUCl1DbDgaP6z2ax++OEH9Xo9PX361M4JbYlh6viOm5sbbbdb5XI5C+KQYMD4UTTc7XZtfd++fat8Pn+0ZwSL+DwkXtwnrC3Fx+122+Q0sVhM9/f3VlTebDaPgknsm+5fbtADYGHD7r4hg+K+CI4Aevy429UJyQR2FbxwfdwVYGSAkQFGBhgZYOSngZHT6fR/74XrV7/61Vc4BBcweOPD8QMu/B2FZrvdTq1WS/l83nTVvOFTsMcDUmiHwycVyhs6xg3QYCC5XE6z2UySrPvParUyFoeFT6fT1hkHZ7harczgJFkqm1auMBE8E9/HBiBb4JDRohV5iKuxpgB0v9/bPAgmgxcKBdMnAy7uIEoKqjebjTE78fhhmCF6WDTdHBIkAXSEgnEAAGEQK5WKfQ4WLhQK2QwJ3/etEBsGCUAnwIBpzefzajaburu705//+Z+bvh8wxo5gXUmBs9cEBuFwWI8ePdLXX3+tXq+nZ8+e2VrAbsA4YgsuwMCmFItFLZdL3dzcqFQq2XBImLNisWiyhnK5fAT80oHRg1mSZFrni4sLvXz50hw9bNZ+fyiYplAZhni9Xms6ndp+I0mpVqt6/PixSRw879A2t1armRPZ7XbWSYnfDYvH9+52O93f31tR9Xw+tz2WZOxZo9EwaQdyCWQfgDf2BcuFXdFhazgcKpvNWp3Hdrs19jkej6vRaJh+fDab2bmbz+e6ublRKpXSv//7v5t0iYGV7Ek4HLZ6iLdv31oxfCwWM2DA7+RyOX3//fd68eKFfN9XLpfTZ599ZkEBgF6r1f4bU+dKXq6uriyQe78QlzXEB+VyObNhAJ7vo30vznu/36vX65nT5xyPRiNjNm9vbw2QCLYJGvBBBImuxj6ZTFpADICxbpw17Nj9Ge6LM+iC0n/ZX/DC9RFXgJEBRgYYGWBkgJGfBkaOx2P9/d///f/eC1ehUNB+vz9y1EgPeHg04hgiDpY3/1gsZsPUMCLpQb8Os1YoFI7SjxwcnCeH3dUPu2lCDh+Mned5ll4HKNwhaK6UAo05LBbsFOwRAImDpJiW1KXv+zo/P5d0YH82m43K5bJisZg6nY4VyXIvrCXPMBgMbA1JY6IXp1AUowMsYHXYD9LhkkzyMB6P5fu+SUgYdNfr9TSZTJRIJEyOApMEcBcKBYVCoSPWlALfSORQsJnNZlUoFKzol9as7DFOCqYABpj9wOABPX5uuVxaJyAKK3HGTCJ32SiYmGw2q3g8rn6/b3KA0WikRqOhWq1mmvXNZmPtc2EECYAogO90OjZwcLFYKJFI6NWrV1bkend3Z3IIBiPm83mlUimTinBOYrGY6fZhql69eqXhcKjz83P1ej0tl0tjjSKRiO7u7lQoFHR3d2fp9n6/bzIkCodhNgEN5Dmc1ZOTE5sxw/oidaEw2PM8k5FMp1ML8EajkarVqhKJhDqdjgVo3W7X2CykF7SKxXFybwBbLBbTzc2NqtWqXr58qVwupydPnlgRO2DCfrZaLevQRJCEH0J2QhA3HA715s0bVavVoxbDyDlms5lyuZwk2dmhKJ7OVzha9osAkKLxVCplbBiF4Jw95tZEo4cW0ewN0pXt9lA/0ul0dH9/r5///Of63e9+p9vbWxWLRZP7SLL9kR4KwGHk8IcUtBPUEbC5LDo+DL/F3rPGLhMvHQK9Xq8XvHB9xBVgZICRAUYGGBlg5KeBkR964fpJ5nBJOkrlkY4mFUfHFlgcDiSMAxvE/yftNxwOdXJyYo6TxYtEItaycb/fm86d9CgOlO+CzWETYOdgdTgYkUjkKOXsOjnaivLfOK1ut6vZbGYSCNLjdFgB7ChIHI/HSqfTxhDRdQpQcosQpUNBKiyWC2IYM92R3JkUFHoCfhgYb/8wqKlUysCNg4ocYTAYaDQa2V7gfN09LRaLikQidg+AM0WvAFw6nVaj0bDhg8+fP1cymdSrV6/s90ejUUvhs958z2KxMP0xgEohNnUNDKrcbrc2UBKJCIwIen5Jxr4AFNQFML8CKQzs0maz0c3NjZ4+fWp6bxxru922/z8ajaz4Gjsol8vGqkSjUUtZS7L7gd0BvCg6ns1muru7Uy6X03K51LNnz/T69Wu1Wi1dXl6aRh4pEEGLW6CLTIaaA+4jkUio2WxqtVoZkHBOU6mUtel15Sv39/dKp9MmY/B93zqUNZtNC3RCoZDJeFqtltn7+4XUzObpdrtW6xCNRtXtdvX8+fOjPQbUsMFMJqNHjx5pPp/r9vZW9XrdQJSgAf3/er22M3x9fa0XL17YmafjVTweV7PZtCJoAgjfPxSyZzIZ3d/fG1tJ693T01NNJhPl83lrv0wb5+XyMICS+6KZAB2tkFsQ+IxGI00mE6sbGAwGKhaLVszLPXPeWG8CEH7G930r4I/FYur1epIeJFYE58hakAUhcYG5w99I+oPdmoLrw1eAkQFGBhgZYGSAkZ8GRn7o+knawrPg7gOg/XVT567WGUfFTZOqY1HctrGkgGOxmKbTqaUF4/G4FYayUBRx0nISIOEN3E2p0z1GeiiQ420bZ42Wk3tCniE9MCAYOJIEd6jgbDazDkOdTsfAZLVaWUqZgsztdmuFqovFwgYw0ikK8Oj1epbeBGDD4cOAvnK5rNFopFgsZh2iWCPSvIB2q9Wygw9TBVPm+77q9bp1SII1xAhJPaPXhVVaLA7TyykQLRaLGo1GBlTffvut/vZv/1aS1O12jckDgGKxmDGA0sHJTiYTm/1xdnZmKXdmMTx58sTYoOFwqE6nc5Q+3+12yufzkqR2u23zHWBwY7GY6vW6ut2uJKlUKtkaF4tF64xDVx7P84xdBPhZ481mY07o+vramMXHjx9boNXv9zWZTKxNMrbmpsx/+OEHc2I4WQ7+3/zN36jdbqvT6ahSqajX65lkZTAYmCzj3bt3KpVKqlarSqfTevfunSqVip4+fapsNmtzN5bLpe7v71Wr1dTv942VzeVyVreBw6GLE88IAwQzjJ0DjgR4tFuez+dWM7Hb7XR2dqblcmlOf7/f288S9HQ6HcVisaPCVEkWjE4mEysOPj8/t/bZ6/XaZrlIB+A+OTlRrVYzdpROSfwuOiDd3t7q4uLCQJNzCJuN36jVasZSwsATEPIMaNIBNQInMhBkL2ALkUatVitVq1XV63X7ftg2WEq34JyAG3YR6RLZAwbFIongItNA1oEXBGyRIBqWMLg+/gowMsDIACMDjAww8k8fIz90/SQZLg7Dcrm0VDxvgZLsEGEc6K9hHNzNRmPreZ45bfetEWByHwx2y33rJL3KIccRkqIl9c6hwPkiTeA5eHsOh8M6PT21lORwONTp6akxI8vl0vT1+/3eOgXh4EmPcv+e55kzXq1WpvWu1+u6u7szVg8WFOkELJ/v+wa0/yXzUbvdNq307e2trXe73ba3eJ4Pw6YzDql7N7XNYEIOEQXJdL1Zr9fqdrvGFMKWUmjp+76ur681n891fn6ubDar29tbdbtdpdNpdTodC0Jg3kjxf//999rvD206y+WyaawxfLTQyE6oDSBAATyoTWAfuf9kMqnHjx+r1Wrpm2++Ub1eN6YKTTdMIYcPZ0hKHK0vaXcc3nK5tNkf3333nWn4YW3evHkj3/ePNOQEO7Bm4fBh8CFAdnl5qcViYWubTCZtTQaDgZ2FcrlsILjf7/X27VsrvmWwKMwisgSYc2obOAsw8tg1RdUEVQCcK6Vx2WlaNcOkYq+0ysWG7u/vLaXPWULCUCwWrZ7hr/7qr6zuoFgsqtFoGEPprg/toCnspvCWs3t/f28sN1p8NwsQi8VMEnRzc2M+xfd9nZ2dqdfr2RBIBmWiKcdp032Kgnj2N5vNWjbDlVAADnzvbDYz+dJqdWh37BbyI+dyi4Pdz9NtTZIFONQc4F9hqJHIcJ8wjdSUAODYbHB93BVgZICRAUYGGBlg5KeBkR9SgfzoGq5/+qd/+qpcLtvbHywoNwsjBpPGTWPQsGPSgQV8H3RgcyRZcXA4HDZtMxuJpplFIx0ejUatowlvvS7rx3cj34BtCIVCVoDIMEi00XzGbRHKWzMpSMBkuz0MUqtUKvI8z4xEkrEh9/f31qVHOshE9vv9UUEt8gwXWNGw872s92Kx0H6/N+YLphRQYsBfPP4wdBGHEY1GbRglxcSwnbzBN5tNYzb7/b6tJeuJ3MAdbIdcAsABKE9PTw34ttutsbtIL0qlklqtlkkkkC6s14dhg5KsYxbdpGAfEomEzUIBACjsdRlQ7gt2uFarmcYZ5hbtOEwt9RY4f0mmx18sFmo0Gjo5ObEDCygTiDx69EjhcNhsRDoELsvl0uQuyEZg4er1urVmpQie4AV5SjKZtLoPZEXYT7vdNkkTBcgEFkxsJ+Ag4EFShKZ5PB6bvAk2iZoPwDSTyej6+lqxWEzZbFbr9dq04wQZ/L52u21nH602ZwR5QyqVsufDP8BEYePMQWH/V6uVOVrmz0gyGcp0OrXPUHTO8/MsFIPDJANE6OhpWkA2wQVtAt73td+JRMKCQRw5ALrdblUsFg2caTWNY8ce3NoLZE7cH3uPJI1AHhDBZ7rsv8v88e98Hy8L/P//koMENVwfcQUYGWBkgJEBRgYY+Wlg5HA4/INdCn8SSSFObLs9tHQkVcq/4cRwFjhKVy7BggEky+XyaAMoQiQNyP8iS8BpbTYb07jCFiQSCU0mE9ORs0Hvp7rdzjO73c4mf8/nh8nZODHPO3TIoYgxHo+rWCzq7u5Og8HAWDgYNle7jqaXzw6HQzvUksxxshYAH3NDNpvDLADS+KyXJFtPiiDRpabTaQNW9oRUOGuH0WBc9Xpdi8XCGBzuiaGP0uFt/uLiwn4Pf7darSyFXCqVrFj65ubG7psWu6xHq9WyAuRcLmcpY5gF2g0TUKTTaWOTcPQwjBwwGBaAmDaeq9VK6/Xa0vgU1H722WcaDAbW8YfAgLoC2A0YZ+wnnU6r1WopFDp0pUIb3Wg0TJaA7AAmkMDALZBfrw8zN9D0t1otC9C+/fZbPX78WBcXF1oul/r+++9t0GEkErGOZOiJN5uNBQSk5DebjX7729/qs88+UyKRUKFQMOaKMwfDyH9jt9ScwAoi83CBFs37bHaYbE/Bcb1eNyYdZng6nVogSaEqjtvzPHU6HZ2cnOj169dKp9PGmgGY0WhUhUJBrVbL2r5SIF6pVMzZApyj0UjZbNYKyPP5vIrFolqtltl/LpfTeDzWeDw2xr5arapYLKrX6x1JtaLRw5DKwWCgQqEg3/f15s0blctlbbeHFs+sGRr7drtt4ELQBIjil3zfN4kGLOlyubSAiSCBoBpfy7mA3QaksAlsDIaWoJTPE4BJOpJghcOHltAAFP8WXB93BRgZYGSAkQFGBhj5p4+R7s+9f/3oFy7eHHnj44Dxx71h3pRd/aQkSx0ieYAl4uFhgzgUvPkCALwdwyCiD+UAo5V3WUR0mxgGU94lWatcmMj5fK5ut2uHB0OAEaH7zn6/tw1g7gmHiDWgaBBGjHsolUoKhUIaDofWEYruL/wMz4BGGAeEc+r3+xqPx6rX62aA+/1e9XrdWDZaYPK2joOEjaAjECwAunVYG1LErCFa/uFwqGazaYECaWdYokgkoqdPnyoaPXQlggVjaGS73TYZg+cdCq0LhYLG47E6nY4VLv/ud79TNBrVF198YUC2Wh2mo282G3W7XV1cXGiz2eju7k7n5+eqVCq2R7TrJXDAqcNqJZNJdbtdFYtFA5HRaGQpeOkAmOiHWef5fK7RaKRaraZ2u61UKmVSGQqRsVWXnXTtslKpaDqd6vb21rpEzWYzvXjxwtYIPflgMNDz58+PbB+tNg6F87DfH+bWMKOEZ0DSwFmKxWJWqOp5nhXO03WL551OpxbgwaZhfzjOy8tLqy+B/SEQgTGcz+cGKDDmgBu66HQ6bcXeML5IXjzPs6CE4KLdbtv9LhYLC8L4TqQgr1+/VjabVTqd1v39vUlwWA/08wCC5x26el1cXGg2m6nVatnZLpfLmkwmKpVKdm7YE3xCKpWy1raADYw0djcajawWZbvdmmQF5nmxWNiZctlySQZSyF5gIQmOyUx4nmcBKZkPnhWf7Nol8gmkPYGk8OOvACMDjAwwMsDIACM/DYzE3/++6yd54QI0eEPkv0kXwoR53kN3JdKdFJvh+Pg8b4nuApL+JIXPwWCBYG14u3VByZVIcNjQfLusAW/8m81GuVzOWlkyUBAAIdUKMwRQAEAAJxKN92c68Nxow6PRqE1gl2Qpc+6RjY1EImYUGKIr1ZjP57q7u7M2wfxbtVq19Dbryvqg4V2vD11qKNqE0XDf3Fl79pd9QtNKETOHH108Tpt947kvLy/l+74VUw4GgyO7ubq60nw+N73vcDjU5eWlaYxxSo1GQ5eXl+YsKLKGqeN3AprVavXI9nCspVLJUuAEJewX6XCXRQUISPOfn5+bQ0arjCPi+XHMHHIOKQXG5XJZb9680eXlpTnrXC6nRqNh6/3FF19YXQAsD2woYMXzlstlzeeHYZL5fN4cOp+jUB5nxHliPgv2QdchfhaQQGrS7XaNjavX6xqPx3YfBBzYNA6RInr2Amc2n8+tfW40GlWv11M8HrfhoTB2pVLJOl4RnNAFDfCj2LrX65n0YrPZ6Pr6WpVKxdYCmQaSC7cIG+YfuU4+nzeb4UUkEomYtAL5D/eFJCOdThsIcA6Gw6EFyAAq/+bKf1x/BgjAEnLOQ6GQBa4E3i5z7zJ4fMbNiLzv1wkWCTjczExw/XFXgJEBRkoBRgYYGWDkp4CRnIffd/0kTTNYOG6eG2SBVqvDQEQOcigUMscuyViAzWZjhWz5fN7efklxu4sBqAA+6/Vhujjfi1PG6JFfAEgU5bFoTBCHoaITFE4IdoUNZwBfoVAwQ2dzYKwoAqVlKwMEOaz8LIwBxgmLxRrhZPidrDWH2X3DTiQSxhJIh4LYdrutk5MTk37APOHkWUvkJTghtxMVDkSS3TuddHDYtDhF30xKG0fr+76KxaIVIVPwGg6HdX5+bo6NFrowrM+fP1c0GtXbt2+VTCZ1dnam8XhsbVoTiYQeP35s+v5Wq6XpdGpOtN1u6/T01OZr7Pd7K0idTqf2PIDZer02bTf1DACB7/umd2dNYK03m42azaYxSuy3y1Qzz8U9wIA1NQYnJye2NsVi0aQ5Nzc3KpfLSqVSNhfF1ZID3gQI6MEXi4Xu7+81n8/N+SBvOD09NQkIewTLRWDGOSaYAMQl2b1LMl01QIsdh0IhTSYTkyBMJpMjZgmNO0AFcwTLtdvtlM1mraMYunAAmlbDo9HI/AQSDAKAq6sra7l7enqqXC6n4XCoWCxmRdHdbteeGTkMYDefz81+KHSGCTCPugAAIABJREFUvet0Otb1arfb6fb2VrVazUASpjEajR4FwgQU4XBYnU7HBoISJAN6aMxZe8AYP0AQiB24UgqAGEkVGnbX9jgTLrDwx2X18IfB9XFXgJEBRgYYGWCkFGDkp4CR/6svXDgNt/UiN+em83BIGMrRTfi+FTfCkiSTSXt7Zh4DB4XDj6NHo82h3+/3xrSx4CyMJHsjT6VSpqlnDgASAUnWOYbnwgkXi0UzBgowYdm4p/1+r+l0al2GKAgmBcra7HY7+7tisWiD3lKplMbjsd2b7/vGFJBG5nlh+nDqiUTCugVxEHu9ns7OzqzNK44QgI/FYhoOh8a4wDROp1NjrChqxYF0u107eBhysVhULBZTo9HQer3Ws2fPjFGAzeDwRqNRkzcw7+Kzzz7Tzc2Naf5Ho5EuLi6s5TESisePH1v3pcePH6tQKNj9ojvPZDLGxtIhy2WIpYPD45k5aK7GmzQ4NuWyJ9vt1iQCBDDj8dhaBC+XS/V6PWNxsU3OBw4G6Q3MDYXNDEmkqxI1GqVSybTjnndoXey2H6blrBuQkO6GWQ6HDx2VKBAnfU+wh8Nj/d6XPtEliX3Hac/nc5uHEYvFjmbVoG0noGNN+A78CRIYtNEwetJDW+vpdGrsXDgcttbCsJNIDJbLQyvmWCym8/Nzk0/haPFbBBCw7tyf62vo4uU2KeB3EIghZ0HigfzH930LtlzWHD+BtARGXpIBmuvMqclw5WgAErYFmLCW2DxZCQANm2CNeX7OB3aAXbhMYnD98VeAkQFGBhgZYGSAkZ8GRn7o+tEvXC5z9z5Y8Obpvg26gM1Gug/vbuJisThK8XNoeXgWMpPJ2JT2xWJhTAoHH4YKB0ZXF1pusrmSjEWEhej1eqbD5j5hG6WHFCZpZt9/aLtLup2N8TxPw+HQ7h2NMPciyQqmKeLl/mEjAbf9fm8OCNYUdoDDgsyiVCqp2+1qPp8bIwb4IGeYzWbGhsAGAK5MbHdZG2QVyFlIp+92OyskjsUOcxPookR69+Tk5Gj6OOwEYMFhzWQyFlR4nmdDK/v9vgaDgc7Pz63Y9u3bt8ZS5PN5nZ6eGoC59kRQs9/vrciT9YfRlGTPzWHE5gAaFxSY4UALZYpECSQYvOimoNlvDjySIoDi66+/PmpTGwqFrM1ruVy250AaEgqFzJECaEhJKpWKseIU6VIb0W63jTHCzmF00Liv12ubTeNKkgCB/X5vbBFs8nK5NIlOq9U66hjlyjlch7fZbFQqlcyxcw/YlyuFYq8I/nDW2WxWi8VC/X5f+XxeiUTCgi9kDuPx2BhqmGXa3XqeZ4GLq99nH2Eb9/uD/IOgDpZ7u91ap6zdbmcSC4KGfr9vRcvD4dAK58kauHIXntG1WwAWv0OdBwAA07rb7Uy+wTPyefTorg8H3LgPAg/W2/UnwfVxV4CRAUYGGBlgZICRnwZGup97//rD1V0fcbk6Xm6I/34/5cq/ccij0ai1miRNCPPR7/fNEaMhxfmhrXV10aTvYQd4E6VNJI4Ap0AxIildjJo3ZYpHkT/0+31raRoOh43xwyjZEN6Ot9utGRsbUiqVjLnD6fL9ODEABPDi9/GGjUF5nnc0GwTQYTAk98abOsxQOBy2egAcIQ4daQqSAobcrVYrY8s4GDwH9wrDWavV9Pnnn+v09FTh8KEAO5FI2NBGDuN0OjXG0vM81et1JRIJnZycWFebcDhs90hAwNDFxWJhDBvMWiaT0Xq9toGRPBcOiNQ3nwXgAFpJJrtJJpPWtYcDyPcRCLEf/D5+nlkXblocrXo4HD5iaDzvUExLa1mYsel0ahPoE4mEisWiMXrIf5DTDAYDa/MMINM6udls2mdgrfnuaDSqfr9vwUa/37eADGZqsVgcdTlyWXUCDECdPRoOh3Yv/PdsNpPneSZ5SCQSymazR+ea84lfmU6n6na7Nlw0n89rNBqp2+0ag0rmBbaSrmbSocXt6empsXbYKhkDptZTZA3Q8NyAM9p6gIHAGECElUNug9/gLLlyMiQwnU7HAly6P41GI0UiEds7/CnrStDrsoCw0/y8+xnuDXCABXX9Nv4FbT0+BOaXM4wfCK6PvwKMDDAywMgAIwOM/NPHyA+pQH6SFy4WCycoHQ9i5GcwXpg8NJ6uZhJH5RZk4qT2+4OekqI+Vz6BEyR1yAK4b8YADXKI5XJpWmjSrywYP8cGwUKMx2NNp1M7UPxhhoEka43J/Y5GI718+VKtVstkDOHwQacLK9bv9614k/uNRqNHzodn4ZDNZjPrAJXJZIxtYMNJ4fK7WEOMDAeFPhktLUwe94eWfzweaz6f20FKJpPK5XJWUAoLgN6Ye4KJWC6XNsQukUgYwxGPx83JA4CkuwFkApJwOGztSne7nU1cLxQK1uo3k8koFArZvQJ8HO7FYmH2CRhz79vt1uaSoPlFT03bZBhU1sUtwOQZm82motGoSX3QfnueZ/9Oe1zuLxQKmW3V63WVy2Xtdju1221jYejy0263FQqFzMHm83nTLSM7IlX/3XffHRXdJpNJA+Plcqlms6nRaKROp2NrRCtZnCqf55xtNhvrTLZcHjqKxeNxXV1dmSb69evX2mw2ZmOuzTFhHnYLpgrHDThiO+xXJBJRr9fT7e2tXr9+bUEawUQoFDJAGA6HymazJs/hD3ZBUJFOp81Wsd3lcqnhcGhOHJ9CF69CoWBtsAEJ5Ez8fuwXP0ORPaDN2aYWB2nJZnPoeubKRfBt8Xjcmhtgh259AyDCZzabh2GhblEwBccuG8mz8G/8DoCRnw+uj78CjAwwMsDIACMDjPzTx8gPXT9JDZerL8XwcObcPA/g6thxjhhKOp22DkXuGyuGBgND+1F3tsZgMDDHwedgGNBhbrdbjcdjO9jSQ5coDiutIVlc2u3yO0ldbrfbI50r3wVTBMDwXTgoDg4OBGdLGhvmBOdOqtNlQKUHXfVut7NC4UgkYtruarWq0Wh0xHBEIhEVCgUNBgNL9fL9MJMcgHK5bAeqUChIkhWJol2GdaV4mkOI5AInCwsUjR66TK1Wh7a5PC9sI46EtXdlFDiHSCRiM0DYf5cJ3W4PrUIpBgYksEMumDueZbVamVQAhwlDAvsBywQb69YiJBIJs5tIJKJut6vb21vlcjlzMhxudPKcE/Tn2D6sCXUNDNnE6dDNazAYqFwuWxcml6Wdz+eqVqsWpMBIxuNxC8ROT09NNsD+0sGK4ZecHUk2CBNZRDQatfkx1ASMRiPrZNRsNtVsNvX5558rk8mo0+lYMS2DDTkryWTSAjjS9PP53DTT4fBBw391daVms2lSp9FopFDoUHCMpCAej5tUBZ8EcG+3Wxv2SWvfTCaj8XgsSRYUwXAiW5rP5+ZjsDPkUdvt1p4rnU6r3W4rGo0qn8+bg8enwYBy/3RlYr+4kEIAIgTd7zN03DfrB0tIzQr+mXPK7yOoxB8TsAJM2KHnecYqY+vB9XFXgJEBRgYYGWBkgJEBRn4wwxUKhf6/UChU+NDPSLKDhybbZUq4aTSTpJX/6/cfpRI3m41tEMwdBgEj5joBFp0FIR2PfhPd6nA4NO02AIGOdjabqd/vmwOEjWHaNwWSFJfCFqzXazMipAuAEpuDw4vFYtYJiftCpypJNzc3kmSp6FAoZLNARqORsQwwn8g4+Px+f+gec319rcFgoMlkouvra3vT52fv7u7sgPBv+/1hlkkqlTLgrFarymazRzpsSTaDgfV/n60dDofqdDrqdDoGdL7v297RFQm2tFQq6fb21gKQ7fYw0b7b7Wo2m5keGEYOg4f5RB7BPgPy7BmzKHCW8/nc/rBHoVBIt7e3ms/n5rSx5fF4bCwZ6/748WPT67Pfq9XqSBZTKpWsJoCZD8heEomEKpWKac+x7Vwup8vLS+XzeZPPXF5e6vLyUmdnZzZPheAqm80ag4x+G6cKEOCwCoWCBWA4Hc5VpVIxuQFAtNlsjPnjjO73e2UyGbM95mzATgNyT548kecdCsMLhYLevXun0WikTCZjP9vv93V3d2edwiKRiBW3+75/xMQyUBIAu7u7s/3A38Awsa+uLIcOTgRP7Bt2AEMF6+kGCtQ34BNwrLD69/f3xvAh7Xr06JExtL1ez0BHOoAJdQn4r3w+r/V6rUwmo2q1au17+/2+Sbdc2QTfT50J7PZmszFfAVDTXAHfCovP2kqyug18GGCHj47FYtrv9+bHXLlFcAUYGWBkgJEBRgYYGWDkA0Z+6Ppghmu/3/+/D35aMokD+s9QKGSHmXQeRhkKPczJQOLA27MkK/4lRcjbPYwJD8pbKr8TI3TfLilupMsMOnnuGWcEi0KaHzYCZ+OyjbAOkuxNnsOJjhsDRoax2RwKKovFom5vb1UoFGzQXLVaNSaAz9INZ7FYGDMDgOXzeTMy1hXGBt00wwCRmKDLx1l2Oh1VKhXF43HrqLRcLlUoFAxcAU5J1qaUA4i0glSuy9J5nmdzVmAah8OhSSDc4YC73aHgt9/vq9lsKpvNmhODvYLlBFTy+bxNIr++vlYoFNKzZ89ULpetBmG3e+hK42ryWWPAngOyWq10fn5ua4iTxSZcB7NarUyTzfNKMntFx04nJMCaVPV+vze5w/X1tWazmQE48zPS6bQKhYLu7u7UbreVy+W0Xq/V6/UMPLE3dNis7d3dnT0fjCbsDRIKajiQ88AkIWUgKJpMJup2u8pmsxacoYdmH9FT53I5FYtFtdttG/jJGSqVSmo0Grq4uFA2m1UsFrM2vb/73e/UarX0s5/9zLqEUavgskW73c4Kfbkffoa1JpDwfd+YagrdLy4u9Nd//dd6+/atGo2GyYYkGfvHME+cJvKpSqView+ooSWXDrOHkC+4tuuyerCBgBAF+QS4FGO7kg/OF7IJfBXBlFtrslqtjD1kPfCP7vBXfDPgxDpiB/hI/m42m1kTBu4J/xdchyvAyAAjA4wMMDLAyAAjXfXAH7p+kqEqgAZOj3QeBoXTc5ktt6CSh5lMJkqn0+bseIuEteJtmgPighMA44IBb6zVatXaSsLM8FYqyQ60q7ONRqNm2BRnZjIZTSYTxeNxa3ULc0LBMJvH99HFp9Vq6eLiQp536CSElGE2m+nx48f2vcwr8TzPGBuKVZfLpbFn7mbD9pTLZWMVeZuHvVqv16ZTJXCC+cSoKQBmP2CY7u7uTMqCfh6njlZ3s9lYOp2D3u/3rQ0pz/D06VPrijObzVStVq0LTSaTMacFgJDexlawtS+++MJYRBxUJpPR7e2t6Z8pQGW2BE7Bdf4ARL1eV6VSUbvdNvvbbA5zUABimKJY7KFNMOl0vgswxdkim3n06JFev36tWq1mYJxKpZRMJpVKpQyQYXUplEbXjCP2PE/VatU+70pa0CLP53O9efNGz549s8/MZjONx2MVi0XrVLTf7y24wDkC8tQ6uBpp9hYJ0tOnTzWbzVQoFMyBp9NpNZtNs5Mvv/xS3W7XJEGwxev1WvV6Xd1u1wKz6+trZbNZC2Y4V24BLg4V9gvn6mqs9/u9+YFkMqlXr17ZsE2aPhCgkg2QZMMge72erQN+i4wEzDFriX7fZfoAMX7/YrGwIJRAkkCm3+9ruVyqXq9boDYcDrVYLI5smLoTsiAM0GQNWH83mCaAABBcKRn2RsDm7rMbkAGMAE/QNON/dgUYGWBkgJEBRgYY+aePkR+SFP4kbeFxzLzduw7flUPA3LBA3DgOnv78tHddr9dWGMpFURvf7bKAHJzd7qG1J61wcaoYYqFQOJJk8HZKSpr092azsYNGW1RYkXw+r1js0GIVXTkaYPSgTOcmJbnf71UsFtVqtewel8ulTUlvt9vmJOhOEw6HjTEg5cq6MQuEgwALM58fJsvXarWjglOkEalUSoPBQN1u19qjxmIxtdttW4/JZKJqtarxeKxqtap2u63t9tDSExkKTCzPls1mrZiY1q6JREKpVEpff/21aaWZQr5aHVrL3t/f6/nz51YAKsnWAH26JCtARh8dDoetDiAWi+n09FSlUukoOKDDEg4qHo/bnnQ6Hb19+9b0/Bw4V6eOTfA8OBVsHHuFRcKGCaIo8qWIWZLK5bIVwiONSafTWi6X+uGHH4zVqlar5jAJUnK5nDk8ggjsgXtxwZk0uwuY9/f3ury81H6/183NjUqlkj0z9svvSaVSJg0JhQ5a9Hw+b0zkycmJ7u7uTKKwWq3UaDTUarVUKBSUy+XUbDaNGeb8E5AVCgV99913Gg6HWq1W6nQ6ltJnzg5DGN0MC/KZdDpten1qVdCzr1YrXV1dqdvt6vLyUt9++60FQYPBQKVSyQq9WQN07rBg2WxWyWTS6k/u7++1XC71/PlzrdeH7mwMrWRv8Rms2WazsVoTpEaJRELj8djqc9hXwInABxvDp1DsDRDgY9y9G4/HJoHq9/sGGAzHJDjn9yAz4t4kmS93QQwQCq4//gowMsDIACMDjAww8tPASNcXv3/9ZHO4SImzyZLMKXAzyCncIlUYFYyedCIMEOlGZg/wpr7b7cz5u+yCJHtLh8mAiWNB9vvDBHiKeZFWkEZkyF8+n9fbt2+N7eJNHpDjvkOhkAaDgT27y3TwfOhn3717Z2/9pLQBGuZHoO2fTCZH8gqMDwcM47nZbGzQJOtaLBbV7XbtAMRiMQPicPgw0K/dbtsBp1AYaQWgt16vzUkxn6HVaikSiVjh52x2GKiHBAPGEUdH56kvv/zS9h+2gBamvu+r2+2qWCxqMpkok8mYRGYymdjz4hilg7yGAzOZTKyjEywSmutkMqlEImGAi0SgXC4rk8no4uLCpDIAFG1eC4WCpeqxGxy325EKhoTOTbBcZ2dntiawOxTa9no9s3nP80y7TS3DP//zP+vv/u7v9Pnnn+vVq1dHUo7pdKparSZJVmxLATET2bFVaj4k6e3bt0qlUppMJlqtVspkMqZdp4gam3KlHG6QAiDe399b8SvSm2g0qlqtpt/+9reKRqNmu7CrrCH1CWdnZ+ZHarWa6alh79PptBKJhKrVqv7lX/5F6XRaFxcX+uabb6xgleGs3Odms1EqlbIzncvlrOA9k8mYjnu5XOr6+tr0++l02ljn2WxmLCvAR8aB4IirXq/r9vZWkozlWy6XxoIzcNOtr3HZNor7i8WiyXa4d+yRYnSez/d985PdbleSjKUH1CWZXVM0TtDtZqzwN/hqScaYIruQHmRvwfVxV4CRAUYGGBlgZICRnwZGfuj60S9cfDkOnfahOH7eWvm5ZDJpG+QyprzNhkIhjUYjcwYwJrCD0kP3pu12a9p0ggEkBxQuJhIJ3d/fK51OW+pPkqXh+TuMjy4l6/XaZmqEQiFz+K4unnt49+6dGQg/j54cJwQbgea0WCyaTvf09FStVsvS1KTjcf6wooAwQJRMJtVqteR5h0nqg8HAHPyzZ8+Uy+VMZuEWpbq6bYokOZiVSkX7/d6c4mAw0OPHj00jDAjDSGHwGCn3Tq0B0gdkKzA2kgw4YIhc54o2eDQaGYMCA4SOHfaGNVmtVsaYbbdbDQYDOwAwNPF43JgwGMdIJHJUqMwhf/LkiRUFs94ETNg17Op2e+juFQ6HdXJyYkyg7x86Hb169coYs06no7OzM9VqNXU6HWNXWJdEIqF6va58Pq9//dd/1eXlpc7Pz43VwSnu9w+DBCkypwMSjoezR/1CKpUy9q3T6diezedzDYdDk6CgQ4e95Hl3u53JiAqFgi4vL/Xq1Sv98pe/NCb35cuXNk8F/frr16/15MkTe9azszNbz+VyqUqlYvImingJwB49emS2fXt7q3Q6bQ4TX8Mz02EqlUpZcexqtVKpVLKfQQqy3W6tNsb3fbVaLdXrdfNJSIfYf9/3Va/XNRqNzEexTrTVDYUOBfUw2/xumGQCSRccCAqRvGB/BL+e5xkYLJeHtsPYYb/fN5+ED4XZTqfT1h0NWQ9/tttDtyWkKvv9/kjuhk8n4GBvcrncj4WMT+4KMDLAyAAjA4wMMPLTwMgPkZLhr7766keBya9+9auvMpnMkdHCgvHlLAybDzBwqCm+5OcikYfhgtVq1Vg4GL14PG7pRDY5FDrMW3AdOanTfr9vGuBEImEpZUlHWmM2D4crPbSplA4gRvEj7BMHmbdqfh5QodgvmUyqVqspHo8bGwTL5hYSswa9Xk/ZbFanp6dmYDAbbDzGFI/HdXFxYU6X54pGD8PiEomEGR/rjhYWSQm/f7VamVMEoBuNhrUd7ff7xj65xYiAAOuIsbIfAEQqldLz589Nv89Axu12a0CAnh1nLclY3EQiYfrlUCik+/t7kxHc39+rVCpZ+prDDuMZCj20K0UakMlk9Nlnn2mz2ei7774zMN7tdib7SCQSyuVyxhTyWQBruVxawW8+n9fJyYkFPtvt1mQFpOTb7bb+8z//U7lczgqOCYyy2aw5D9rdzmYza4263W7VbDaN3SKAWi6XarfbisViR/MyYItIoTMXhp+ZTCaaTqc2PLHb7VrgBXDzfJ7nGUBSe0E9xZs3b4xt/eabb1QoFMw+CGQIAtLptEkZdrudrq6uNBqNzHbC4bAVGIdCIZ2fn1srZ0AIOYrnefb7OcfZbPZI/hMOh3V6eqp2u61CoaD1eq1Op2NtgmHyNpuNhsOhHj16pHw+L0l2NtPptGq1mjKZjMlastmsZSV6vZ42m43q9bokWdc0mOLRaKTpdGqBIdpxAq1QKGR6dAIo6bilOLIk/o0AnrPtatD5nbDMXAyyJQDCfjgb+B8kStQVJBIJA8FWq3X/1Vdf/d8fBRyf0BVgZICRAUYGGBlg5KeBkbPZTH//93//f34fFvwkGS4YEt7ud7tDi8n3H5C3WTYexmW321nRHGnP+XxuvfclWTEhYITmnUWG+YvH49YJhoI7V3YxmUzsXvguUqGLxcJSp/v93obLsZlok0nbb7eHTjgYDQ6TjQEwcPyk0TebjS4uLrTZbIyd+M1vfqPpdKpsNqt2u61sNmtv0XThgXEaDocGgsVi0e61Vqup2WxaqtwtUoQNpeARbSvT6m9ubszRdLtdc+SRSMQm0+92O11eXiqRSNi9S7ICajSw6F4pVMahEWyMx+P/tvcABQ4kn8+b42JPPc8zm8B+isWisZA43/v7e2PNSMnDcnS7XUUiEdVqNZM3XF1dqdFoqNls6he/+IXZCRKIbrdrsgoKg2FvKaRmxgbBC84J5whzdHNzo5/97Gc6OTkxllGSAT/Fx6T4T09PVa/X1Wg0jCFFKiTpqFiV9rWwgNgcRev1et1S5+Px2EABEGNPKEjGftzgSpI5q9VqRQCuRqOh0Wikzz77TI8ePVK5XDYQBETp4PXNN9/I93397Gc/0+npqXq9nsbjsbU69n1fvV7PaitYH6Q5L1++VLVatUJwZCHY/G536BaGI0yn02bTnC0YNRg0ztl+v7eubUhjcNa0ox4MBppOp1b7AmsMEw7AwtwDwpx/mOj1em02Q8DqniOAjsCA+htAhQCQ4NUFl3Q6rX6/r0QiYWeGoDcWi5mNAMjsNZkKzqqbMeDvguvjrgAjA4wMMDLAyAAjPw2M/ND1kw1VwdAxNP6Ow4JR4Fhg50hXk7KHcZAeujOFQg+zSzB+/peCSt402VjAAd38fr+3jWdR0RDf3d2p0WhoNjsM1UOjO5vNrAA1mUxaSpT7dNPVvClTaMrnkY3AyFBwS/HgarVSt9u1wlmYBABitVrZRiNfIJ283++tYJqDAZMZiRzaDgOirgyEe4IFhOVAY3t1daXVamWHkAMxnU41GAzU6XT07t07S8+7aXnuh0GNDGncbrfmwNGhY7DofwkIYMTC4cOEcQo+R6ORWq2WsaKbzcZkA4lEQoVCQbvdoXNOJpOx37VeH1oOl0olazVKQABTVSqVdHp6quVyqW+++cYmq1cqFX3++ec6PT21lsHsdzQaVblctuCDdrJof906hlarZXvzzTffmBQnn8/L8zx1Oh17tmw2a3ZEG13kDjh3bAvQ4kygq55MJlosFrZ2PDtyBbpz8fzff/+98vm8zs/P7exGo1F1Oh2TFME8b7dbO7fY1eXlpbbbrX79619rvz90dqKQ+OzszCQ2i8XCGHCGVI5GI9Xrdf3Zn/2ZisWiLi8vFYkchndGIhHd3d3Z2SWIjEQiNmgxHo/bOZFkhfHY3d3dnd6+fXskNXr27JnC4bB1kyJYTSaTts9uTQhMpiRbO9aXAajYPt/reYd23gTXbq0Mhc6+7yubzdqzsO+wk5KM5XblW7DsBGKS7Dw3Gg3d3t4qFDq0nEaS5HmenSv8FmCCH0B7j2zClfHg84Lrf3YFGBlgZICRAUYGGPnpYuRP0hYe+QPSBdgFUtoACm+NbocaGBpXLiE9FBrjqAAZfpaf4fPu99HhCDByP59MJq3gFQ0s6X46xbia6nq9bvMv5vP50WGiqHM0Gtk6+L5vRp3P540JgTkLh8Pq9XpWfDibzXR6eqqXL18eFariMEejkc1VgKVBHxuPx3VycqLVamVsxJMnTzSfz9Xr9ZTP503TnU6nrYMTBk/xMhp/GEKKMHHYvu8rl8vZYZxOp1qv18a2brcPgxTRSTNA0vd90922Wi1zvADOu3fvTLu/3++N9eEA0LUIZ8z9ICOAwctms8ZAsl4806tXr6y48+TkxIIBWAvuG20+czJms5l1duLQVqtVA735fG7gzKwPPucGAc1mU2dnZ6bX7vf7JsPY7Q7Fsvl8Xq1WS/1+3+QEy+VSd3d38n3fOm1R7Nxut40B5LAz3I9gio5QbtBE6n+32+ndu3daLA5zO2q1mp2n3W5n3bmq1aqxUpvNxtYBQMbh7nY71et1DYdDq7XgTDKIsd1uK5FI6Pz83NYMX8F8DWywXC6rVquZDMT1FbTTLRQKajabBiwwVuFw2O55v9+r1WppMpno2bNn+uabbyywgWlGPuDKJubzuc7Ozow7aYQLAAAgAElEQVQ1m0wmKpfLFvSSBaAQmkBhs9lYdyw6uDE01m35u9lsTF6CY6cmRDrMMELyROBNUEFGgLPntr2eTqfmC9DgY/uZTMaCOYqn8WPRaPSIbYStRbaEfw5euP5nV4CRAUYGGBlgZICRf/oY+aHrJ3nhwrGTdoQ9Qu/ITfDWTREiThkHgWFR+MaDkAJl8yWZLCEajdqi7nY7KyxF84wsgMX3vEPLWNpi4lj4X74TjexisVC/37fngp2kiwkAypsu+vlMJmNFfLFYzNpM1uv1I200aV/AByOEmRkMBjYvgftMJpN2H/f393rx4oUk/TfZSKvV0u3trbLZrGlp6azCm7wkc8owdI8ePbLvdmUNpGppxTsej20dJBkzBmDQ3hbpDEwGzA5/YF7R0gJwtPZkTgva43a7/d/kGNFoVPf399ZVCWBHerFYLKy96GKxUCaTMYf76NEjY0TPz8/16NEjvXz50uokYDvQT/M5vjedTlsBajabNTasUqloMBhYmp/Ps85IMgiunj59qu320OVrsVioWq3q8ePHur6+VrvdNh09TNJut7OfpXiTlD9SFhxLNBpVvV5Xp9MxtjeTySifz5v85erqypysO2eDfUdKwXp0Oh1zduv1YWZIvV7Xr3/9axtqSXcz5qTAcPu+b521UqnUUbvhRqOh/X6vWq2mVCplc2hc34LU4MWLF/bszPhxbRJwYq4P7NdsNrMBqLBp7A0F8+7gRgJYmC7X5geDgTl1AtRcLqfJZGISjlKpdAQKABhniDONryMLgZQLf+rWkVC3gh/DHpE4ENhQeA+g+b5vv9OV9eAfYfYBGoJIgsfg+p9dAUYGGBlgZICRAUb+aWPkh64fLSlEM+3KIAAW/qDldcGG/2ZRMVQcNBpfHpif4yFdxpDDSAet0WhkjhJH7RowYMEisVGwEdznbDbTcDg0QxmPx5pMJpZuJiVNur1Wq6lSqSifz9shR7Pu+74VI3OYJens7MwKTknXwrgAbKQ0AUgYDLdw8+LiQvv93lqQ8v+RG7iDIV2WZrfbGWOAkedyOSuMTacPU7xJ22+3W5tfUCqVVC6XjQl0GdlkMmlaf1gr9pa2o+Fw2CbMc9ByuZwqlYp1jxoMBsZU4rQ2m40B9Ww2U7vd1mAwMOdKS1VJKhQKBrCx2GF2xNOnTw3I0Rij6Z1MJspmszaLYzweG9t2e3troMRcE/TotL6dTCbGnC6XS2NgmB2BLVCQC2tMyh9nXSqVzGnm8/9/e18eHHd5n/98917tfUkrWZJl4wMbglufgA0YTGJzmuEcaNJOW4OnMxwZAgFMyzGlJKEBQqekM6Rt6ik0DWCuKYcBA6XEMZdtbGNsA7YkW5Kl1Z7SSrva6/fH/p6P3xU+sZyA/T4zHoy1+u77fY/P877P+zn8sFqtouJ1dXWJkaYiQ1C9I5mXy+UaF45oNIr6+nqMHz9eVGIq2azxMzQ0hEQiISSTzWaRSCTQ19dX4/5C9ZebB7rW0D+dBRq9Xi9cLheCwaCohDTI3OwxVoB9AwBffvklEokESqUSWlpaJDg8m82iq6sLPT09QqTBYBCBQEAMMDehmUxGgsiz2ayk9aVLV0dHB0wmkxhYbkJ4uDAMQ9Il012ImwBuzmhnqHoywJubDqpkzBo2MjIitUCovpIAaItITNxAc9NNhVHNJEZ3MPU76NrFdck04P39/RI/wtuM0Zt12izOVwAyp2grNA4fmiM1R2qO1BypOfLE4MiD3XId9YGLhokvS9VleHi4JgiNpEJjQ2WNnc7TMA053SL4/zSaNBT8w+/hs0dfYTONJztdzayUy+XkZ1QVVT9z+hvz6pdX/Jy0/B0uer5vPB6Xmg88FVOR++STT7Bnzx65nqTCAaCG2JjBhQYyk8mgu7tbVEBOFLvdjo6ODgmS3b59u2SIUd1LSFAcs0KhIOTGwGVgX5VvjhNrKVitVjQ2NsrVOABRz6igUe3jFW0qlZIxZt0MqpP5fF5UJZvNJpmHuDlgPAOvaUk2NNw2m00KYyaTSQwMDIhhpfLKIEwqMZVKRa7o6TceiUSQTqdF1S2VStixYwfC4TAmTZokrh6Dg4OymOg3D0DmLTc8nE/0Wy+Xq8GpKmkx2JvqFDMicQF7vV7x/aaSxHovxWJRFF81oJWbBKp1+XxegpcZZEzle2hoSGIgSFQ0JIz3iEajGB4eRnd3twTq0gWFKhvXC9cFXQkMwxD3jr6+Pnln1vlhnzHeIBQKSYwJx59rsFSqBsVmMhlxbSiXy7J+GJPCWBeuc25C0+m0ZDaiHWHfkLjYX3a7XYiTczIcDstGj+uOqhmVXLqxAJB3JLhpGxkZkc0g5zbXrDpvuLmifWEa6Lq6OlHSVFVQdfthG5iFjRsR3izQztHliWPFdUa7TFtN+8U5zzmucWTQHKk5UnOk5kjNkScGRx7zAxewr3Aj/7DD2Dh2GF+MDebv0tDRf5MvwMGjSwQHhROAKgoNOTubn7fb7QgGg5KRiadbnmjD4bCQA69W6W7AEzLVHw4efd8DgQCCwaD4blPpKZWqaVtdLpcs0kKhIAGVVAVpkOjfSr9sKmWqoaCxILHR4PKKs6enB8FgEIZhSDs4ETiBadj6+/uRSCTkmVSkDMNAMpkU40kljCf8zs5OlMtlNDQ0iCsEFQcugEKhIIHUhmGIqwQXBFNnUjXk75RK1WxWVHTY93v37q1ZbCQNr9eLdDqNeDyOUCiEjo4OlMtVP9pgMCi+wUA1QxTHolQqob+/X/qdfum8gqaSs3PnTng8HjQ2NiIYDNb4jLPoH0mU78G5XigUkEwmUS6XRTGkqwiDhBsbG0WNVI0ZlSYaA24uuLkhGbPuDgmGmymuG5vNVrOh40aZfU0DVyqVRGXL5/M1Wc84ltyscX1yA0jFm0osCYDGlyo8FfNKpSKxIHa7HaFQqMZFge5CpVKpxu2mUChgx44d6OnpkTEFIKofA3E5Z7lhSKVSsp7pB67GchQKBVGfh4eHZa3ScLNeCzcztA/cANKNhO3lnKLyzI20qorxhoI/UzeyDPIlUXCzSLunGvx8Pi8kwvVBG1osFtHf3y8+8LSH3FSqv8sDAO0x+5/ziDaYSqaaiUvj8KA5UnOk5kjNkZojTwyOVA+TozEmaeHVSc9Bph8nO4zqV6lUkkEhibCjSeh8GRICf8bv4xWw+pnBwUHU19fLSZpXph6PB+PGjUM2m5UCiCzERyNIZYJuCWwDryGZNYWKEFC9hme7uGDZNk4CXu9ykRUKBXE/YDpTKjA05jS07EcqMJws7EcaXU7aHTt2wOl0SqFAXkPTj7tSqUjwMokRqKoNVDLpg1qpVOR6mcY2Ho+jUCigpaVF+oXqaKGwr6An/cgbGhrQ19cnEz+dTku9FKqrVLxSqZSoFoZhSJs4zj09PbBaraKyceGyejwDNXnlrBp1ADLO6XRavoNX4rFYTDYDdKfhvNq1axcASB0YGm2SK+c+r87ZBrqfcCFSOePmIJVKoaGhQQJFuah5zU0VKZPJIBAIiNHnlTznkWrASFLlcrkmdbRhGOjt7ZV+4NpjfxcK1RTKhmGI/zjbzXSy7HNmFVMJIBaLydU8DRmLCdbV1YkKyEBtEh6VLAASPO12u2XzQVJgUUYaRsYYMB6gVCrJZo/KMgmgUCjIOs/n8/LODodD3OOYDphzzev1wmaziRqmuhLZbDZZV6oqR/cYFn+k2xjnFAOd+XnaCm66GeBOomGf0E1DVYa51lR7yrlGe0sXF9Vdhu4jnJ/lchnJZBI2m00SNVC1p52l7WWgO+eoxpFBc6TmSM2RmiM1R54YHHnMD1z8Yr4oJxv/rhpZ9TqOUP0s6SupKg58ESokNAZ8LgmrUCggkUhI5hQaKU4ei8WCkZERUWMGBgYkmI8TnORBQ+1yucTg22w2mbg0WjTqbAdVLSpg9FOmapLP5+H3++HxeJBMJlFfX49yuSx+8MViNaiXwclUK3kCVw0Cfd7Zjq6uLkydOlWeS3ImaQcCgZpUmixSWC6X4fV6YbdXC/9Rmezt7UUoFJKARbUSN9UuBmZzLpTLZWQyGVn0VAxI0IlEQtQlqmW8eiaZc9GoAcVMQctFp7phjIyMoKWlBel0GuFwWPx4aQSYoYbX3CQDzlfOrUwmI1myOBZUVkOhEEymfcHgXIAcI7oe0IBRIaYrh5oRiO4b6vqxWq1SiJIKYTabRTKZRDAYRFdXl5DA4OAgANQYDM5JGj3GajDo1jAMMWR0UzGbzVIUk2RL9ZKbQ6vVKkHtTDtMAuW8Z50R9g+LgiaTSVEis9msuD6oAatUeFkDh2ud71QsFmVzwmfV19dLDIq6BhmETDKmQq/GnvDdaLNUY8pNlclUDdj2eDySTYubLH6OQcfsA6vVKrEFXKvcdNE+qYaftwJ1dXUoFAriy+5wOGTcmFGL655zhc8gSXH+cQ0xSxbdRKjIMY6BdpDPoH3gZodt5vxU1yQ3oRqHD82RmiM1R2qO1Bx5YnDkwTBmaeHV6zS+tPp3Tg71BdhgvgAn08DAgHQ8/52dyGfw56VSSbKMJBIJ7N27F5FIRK6nmTWkXC7LKZuneV7FUwVhZ/G7eM1OX26qcrlcTpQqq9Vak3mFyiXdHQDUqCpcSGyXw+FAV1eXbGS44Hl1yWdQSeDJ3+FwwGq1IhqNSv0FFizklSgz2wCQyUI3BLbBbrdL8K/qN852NzY2iopE40a1i+NGhYdtpwpRLpdFDWNWJHUBq9e2bAsXAic/1Zbh4WHx4R4aGhL1goYin8/D5/OJCwhVRX4vlU81RoHvQENAI02yLBQKCIVCKJdriwHSt5nKEecLr7j5TCpKbrcbe/bsEcWXhGSz2WQTwmtvvovH40GxWJTMRXQJ4OfoEkAFjkaSrjRerxcDAwPiFqMaVV7pc25xftOdxuVySUYgdUNC9xjObxIo+9xut8vGJxAISOpl1puhmsz1x/VWKBRkc0Gb4PF4MDw8DJOpGjDd1dUlREmi47NIogyQ5+fMZjNSqRTq6uoQDAZlE8W6InSbUG0XN0e0U+wDjjM3lhxn9i1jC6j6cf2SBGi7OOdoC8xmswTdcoPG/h69UeXmcbSfOJVwrgnGb9CHnzaB4895xI0hx4bKKm8y2D9ck2yHxpFDc6TmSM2RmiM1Rx7/HKl+72iMyYGLJ1V+If9LULnjgPG6nx2oGib61arXf+oLjFbt+H00tplMBtu2bcOsWbNqcvrzD324meaVQZdut1tO23we/04lzefzwWq1ykTv7u6uCWTlYjaZ9mVQUaux09eXz25tbZVry0KhIKlk6Vqh+o3yqpMn7EKhGvAcCARqFojT6UR3d7eQw86dO2XxU3kgoVitVkQiEfGNpXLF4EuqqjQ0NF5UopixhoubfrdUxWi06cISDAbFiJJ8uDjpEkJCUhU8wzDk+pzpO9UNBlVHLkLVz5xt48LnomQNEz5fdWehAkcDTqNLMlcVUT6fbitUf9TFykBg+qBTQaWxoEJG94a+vj44HA40NDRIgctAIFCjeDKFLo06XQHUjQjdBkgQlUrVP5y++sViEQ0NDejq6oLP54PZbJb0rvws+5dkTJKjkWVbmO5YdV9isUP2ibrZpOFi39E1hyor56PH4xHXI8ajcF2qKjc3H1TiqbDt3bsXFotFAtIHBgZE9aKbCjdy6iaFc4uKMlVFKpecr+xvFljkuHPNcMzZpyRp2j66UKgqNtU9ko/677SRJFUaftpZoEpaPp9PXD34DqPVP/Yj55C6SefvqbaXdk7jyKE5UnOk5kjNkZojj3+OPBiO+sClGl51krAjVALhhAIgjefVKE/dxWJROpoGh9eU/AwHn9/NSWW1VrMEFYvVwmq8VvV4PGI8eBIFqqdeEhcJrVwuSyYfprelgXc6nTCZTFK7g2oSDSoXcT6fl4xLqgqpZk5hHZTGxkbs3bsXiURC3EWoZDE4lxOX78B2ms1mDA0NiYLCYMJSqRqQzBoQNFI09iQSTmSqegzsVA0S3Sf4Hm63G8FgUCp+8yqWxoBjzTnBTQGVUMYDqP7zqspntVpFMaMyQqWRi8LpdCKTycBisUitkGKxWnuDKU1JApxzJPRYLFYTm6AqKnRjYZA5r+LV9KI0XKx5wc0Pv4dtzeVyUmQwlUqhpaUFqVQKiUQCkUhENgP0xafxYQarVCoFr9crV+a8nk+n04hEIkgkEkilUtInZrNZrt3t9mr9DwaYU+2hm0koFBIllaosMyYxIxdJRXWLoFGk4aZSSHcCGkQqwtlstka9p8pIBZDKKjdNJA9mVqKhK5VKcLvd6Ovrk3o7nJOGYUiNEI4jyYLznmqraoc49txo0aWKrjzcGJBY2A4aev4b1dZ8Pi9xDACEABkYzmerY8V+JSF4PB4YhiHpm6lKMiaG9oTKJG0gbz0ASBttNltNFjgAYkdUwmK71M0+n6u6UHDMDqbeaewfmiM1R2qO1BypOVJz5JjccKkTRG0klRn+nAaEJ1KLZV9xMWZVUn/Gl+bfeY3PzuQkpcExmUyYMGECAEiNCWY8omGmywQJyOfziSsEoX4/fWqBagG3zs5OfP755/IMTpTRCg4ntdVqlUVPtSifr9Zx6OzsRF9fH/bs2SOTi2lw6YZBv14uSpX0BgcH4XK5EI1GpS8GBwcxY8YM5PN5bNmyRUhZNcScbCRM1dWDV/bsT/qTM00rFZru7m6ZmGz76E0Dx9xqtcrvm0z7aqdQgaBRAPZtMurq6hCPx2XxUHXKZDJwu92IRCJixGn0aDA5Pzge3ATQEFFBAyAplNVid6rySNWF84kES6KjIeRC93g88Pl8SKVSMAwDoVAIQFUtCYfDSCaTEuPA7ySpqwaKBMmUuIZhIJPJYO/evUKY2WwWPp9PsjypPtsMMOf1PQDZkAwNDaG+vl5USvYFMw5R+aN/NucyCYXGj3ESVHVI4sz6w7VAZY/9wLlsNptrqt6TBFiXZmRkBG63W9Iwc66l02k0NDSIoksXA7oHlMtlST1rt1drx3R2doq7DNcENwvDw8NoamqC3+8X/3uuNRI5VWVmcKO6y/ZXKhXxi+ezuVbpFsQ+49znvKFSrvqAcy5QKeYGnT/j+7EPOE+LxSL8fr+4LFGh5A0LlXU+h+tcVSyBfXWgVDI5lLuExoGhOVJzpOZIzZGaI49/jjwYxqTwMX2xafj57zx9UzVio3ji5wLkwqI6wkEHIIXraGB59U1ViJOXuf75d/Wan4GefGZ/f78QBK+C2alMZ0niYuHGxsZGpFIpbN++XfyUed3PK3oayeHhYaRSKfHVVo2VYVSv5nt6emAYBhKJBLq7uwFA+oL9QCXHbrfD4/HU+OLSxYBF5JxOJ/x+f40fNwkmn8+ju7sbvb294jLAsVBVBKqnHKuhoSExUCxkmMvl0NHRgXg8jkwmI4uZV+hcGJy0Pp9PUsWyQB8VWMYKZDIZMWSsG0NFhUHZnPCpVAqdnZ2yIICqgaKrB9+DZEkDxT5jXZRMJgOTyYRgMCgbArqykMipnNHtgXNaVX2YWpguAslkUowgDWM4HIbT6YTL5UJTU5PMdTW4nMaIhEtVk4HDHDO73S4+51Tf6O/NOUQi4Tqx2WwYGBgQRS6bzUrqWbrZMFDX5/MhFAohGo1KXAXXId2L1DnCfgWq6pBaIJPBuSx+yTnB9yZR0RZQUaNbBgPWuY6prFYqFVkbdEFwOBxiWyqVimwuuMmpr68Xu6IqeexHrgUqu5x7fr9fNickqXg8LqmtuTapKPN53BjS95zvyT8kFLpVcV1wI6POBW4GuMkicfD9qaKyP+lWQ3cfrg2uCapwJEWV/NgvqusF1VnaT40jg+ZIzZGaIzVHApojT3SOPOobLp5KqcwR6kmPL001gQaTV3scFBITDbyq3vHakAPOF6cqlE6nJXiXAYlutxu7du0SsmJbmPWEp1X1upEdOzQ0JGpOS0sLBgcHsXPnTjGOnFxUxPhfKh5UcnhlWShUs+5QHcjn86IWmM3Vqun0T6eqNjg4WKO+0F/b4XCIzzdP1W63G729vejv70cul5NifJFIBCMjI3J1zaBU+unyPThGVFI56dhn+Xy+JgVqJBKRMaaRYI0IKl2NjY0YGRmRaufcDAQCAVEuWAeDV+Uca9V9gvVh6P5Bdw2LxQKv1ytX1Iw1oK8/DTSVZC4Mtpvzja4TXHB0hVBT+LIejMlkqlFYaVCpOjM42+/3i6H1eDxi4NkWtofqLH32OS/K5WrcgmqQmXL5888/x5QpU5BOp2G1WiUNbaFQkIKNDPil6weNaqFQDZLeuXMnotGo9ENfXx8Mw5CMUqqxJXFynBlwyk1GIBCQAGyOh8/nk+9ikDD7iWRKA8sNJNPbulwuUVpJMm63WzYjVGX5vFKpWvNEvS3gxg4A4vE4otGouNjQtYbzm6l+acdIwvwObpA4HzgHuru7ZYNitVZTMqu3F9ygqko1XUY49kBtEgCqeqwnMzw8LHEIwL70zXxvlRxUVZBq5uDgYI166nQ6RVnmnKeiSCUcgKh2/B0AshHQODJojtQcqTlSc6TmyBODI1WhYzSO+sBFBY1Xkzz5lUr7AsvUU6J6omZn8DqQn+UplM8bHBysuRrnlSdJgAoH/+7z+VBfX4/29nbJYEPDRJLiCdlkMslJmYuFV+Z2e7X6Nw1iOByWd6GKwEnHgeE1dG9vL4aHq9XT1Wthr9crxjkUComKRLWD1630V+Vi4gSn/y8/w76hSkdjsnv3blGdWOvB6/XC6/WKcWdqXrpwUEmge0ulUpEikqobCJUSTnCqazQSVH1ohBjUCaAme9Hw8DCcTidCoRASiYQoG3wWr8YtlmrhPsYiUIXgVTKJLpPJwOl0fiVAuVgsCqGqriNUKJl6lAqeSqIkEdbtUK//k8lkzeJTA6u5+AAISQwMDMg8NAxD0gJTbaF6RQMybtw4GV+mQ2UgtsPhQGtrq/gcs1gmlSv+O/9rtVoRj8cxPDwshRsHBgZqCiem02nJXEZ3EG4Sc7lczUYunU5LZizOm3g8LobSYrGgoaEBiURCFEYaKCp8dDmgXVD92iORiMwjEgVjIDweT417Euch390wDMTjcTgcDtTX14tiWShUU2KHw2HZyJpMJonBoLJLMuA6J4kx85gaO1MuV2vGMCUzbdvIyIgQJ5VBkgPHl/Od85q2k/aMCiLtHolXfT7nP9etx+Op2bz7fD4hco6TmoWN/c7+44af859jw80757vG4UNzpOZIzZGaIzVHnhgceUwPXPwCnpjVhaouKF5JU42jKkJli1ArPpvNZjEufHm+HJ/J57I2Q6VSQXNzMywWC9rb20W9Khb3VWGnwaOawkVH48fvLhQKCAQC6OjoqHFBiEQiACBXxvy9dDoti4yKYltbG8LhsKQoJVFOmTIFXq8XnZ2dYvRVIuPpW/Xhz+VyUuyPi4z+90zpOTQ0JG4O9KE3DEMmGQN5eZpnH/L9M5mMkAtVRqZtpYsGx4jGPJPJiI8zVQWqHRxLjjF9zWlEaaTYnwDEwJM8M5mM9AevwWmYGG/Q19cnag7Ji20DIG4hJCmqh6ydQrJmYCuzUHHBs34EUF3kPT09UqiyUChIlqFwOAwASKVSUqCQqk8gEIDFYpHikrxap5LHRW2xWESNVZVqZpqaPHlyjWsNXQ+4tlRfZ5Iu1U6/3y+GPJfLiboXiUQQDAaRzWaRy+UQDoel5gh/l6oqXQicTqe4CTidTsm45PP50NvbK9mUWCeF70wFOh6PI5/Py2aJbiicwzSYQ0NDkmGLV/9ci1Qb6ctfV1cn6j3Jga4VVKVTqRR8Pp8QDPuLtoGEQEM9MlItsEgljORM4kyn01IsVVXWqH5yLpLs6SbGtUfFuVQqiZrLeca1qm4MVOUOgGxGuaEhGTHYn+4m3HRwI865zX7hBp82VR0T1Q1O48igOVJzpOZIzZGaI08MjjwYxiwtPK/uaUi48Gj8aSypdLFaNRvMU6zdbhf1j1fMNCAkKC58fjevPm02m6gPnOAsCuh0OsWAcdHSGPLkSwNWLBZr1DSqHbxO50CqfqNUX0qlavXyYDBY497Q1taGzz//HC0tLfD5fNizZ4+oc3V1dejo6JD3Vq+qGfQZDofF8HV1dUnfRyIROeXzahqAXGnTCPT392NkpJp9h76mJDwqmHRRUINfaRB4Ne/z+dDX1yfKBjcLXIAWi0VUnXQ6Lf7FVGy4UNjvgUBA0g97PB65VuaE5jtwwjPTDomUKh2LOfIqm37jbrdbxtThqBZ3ZMYeLjYA4ibC7En8/1wuJ58ZGRmROiYjIyNobW0VozIyUq19wnnDvk6n0+LaQqVI9R8H9qUEJjGwTwOBgBgPqqeBQKDmyp5qC10V6B9PVxTOp1wuJz7v6nU+r+1TqRSmTZsm4x4IBNDe3g6v1ysqHA04XRrY90xHy3VkGAYikQh2794t/Z3NZsUNgil3uVHi+9Gff+/evRKQzN8PhULo6emR92Z2NP4uU1ZzQ8L2MCsSNyZUG+kmRIJPJpOiWHLtqRtZADUuKcyWRCNNn/9yuSzqIn9Gu8NNEO1huVyW2iWZTEaU0kqlIu9J9y9+lptd+tCr5McNE7+bz6ELWD6/rw4JNye8JWGfqSTKeU+SUWMRNI4MmiM1R2qO1BypOfL458iDYUxcCnnKVo0rO4y+nTwF0ojw6pAkw5dXr+P4WZ6Maew5uDyd06gVCgX09PSIWhQKhTA4OAiTyYT6+nq53qbPLweaKhWv8n0+HxyOah0IBv8ODg7KNT9rQHAwbDab1ICg0QqHw3INW6lU5Jkulwvjx4/Hli1bxP2AKiaJKBwOw2qtpsXl5HG73Zg+fTq6urqElCuVCvr7+8Wlg4od3UlIpvF4HCaTCU1NTXA6nYjFYuKSQIPGKuY0SPl8XrK3UKE0m82IxWJCVHxXnvg5EZnlB4D4Zg8MDEilew51IQMAACAASURBVAb58mrY5XIBgFRdV8dYnStUGc1ms/S/w+GQKvVUYvm9JGf+O6/d1c0P1R/6KCcSCTHu+3OhKper/vd+v1/GgQs7n99XT4OGl8Ypk8lg165dcDqdophFo1H5bn6O64kkxr6ORqOyVtT0xHwft9uNdDqNVCqFUCgkxot/qIoxILmjo0NiIUKhkAR2h8NhRKNRSR8MAOPHj0cgEEAmkxF1k2uTG0h+XyqVQn9/PyZPniwqHJV1GjT2D9NCcy4lk0lJwRuLxdDW1iaKPxV7m80mWcHoD64q/dxo0EUlFouhvr5e+oD/TafTQop9fX3i5021jWRUKpWEnLmumMnIbDaLkm632+FyueQ9VdJQN3Ykc66pSqUiyqtK8Pzu0QkLGFdDe2EymcTGcO6zbdwMqX71/Jzq+sJ5rR4IqPqqdlsdB43Dh+ZIzZGaIzVHao48MTjyYDjqA5fZbJaFwD9sIINZ2VG8gqd7AUGiUH3V+QwaDKpzKvFQLWhpaUGpVEJPT48EH9rtdjHKdKNQ3WLUUzlVNxISDRMVmP35b5PISJjFYhFer1cWiNlsRjabFXLgoJtMJnR2dkpWqZaWFvT398NkMuGkk05CqVRCf38/HA6HnNIrlWqWGRohZlwioXR2dqK1tRXjxo2Tq1GmvYzFYjAMA8FgsCaQGICkJk0kEkJEJGcaQ7oVcGypCHFcqHT5/X658lX91tn3mUwGnZ2dogiy36k20Ve5rq6uxl+bC4jfwz5h3Rcu/r6+PtTX16O/v1/UJNZtIcFms1kxZiT2oaEhCRANhULiUsGrbbq/cKPDjFyhUEiqw1M1IanSoAwN7SuqWC6XMW7cOGQyGZmLNM4+n0/eGYBkrCqVSohEIigWixgeHhbV0e/3i7sRx3B4eLhmA8D5SNXH4XCgs7MTuVwOfX19ACD9zk0X20PVs7m5WeIqxo0bB5vNhnQ6Da/Xi3g8LkGshUIBDQ0NssmigfR6vUgkEnC73UIqdNMh+N6VSkU2MdwI8Drf6XTKGqHLgsPhQCQSQS6Xk4KKIyMj8vt0T3C73ejp6UE0GhWXItoVqsTcfFJJJ3lwI0XCo/Hdu3evrAnGL3DcOb8rlYqsUyq4qh89N9j0cecGiao0FUpgX2wE26eqcVwjXNcqSdG9hP1A28TvIxkB+1I7cwOlEkelUhE7xM9rHD40R2qO1BypOVJzpObIoz5wqadbukrwv5w4VJZ4BceX5VWv6gpB8uFzqGTQcNIo07DQyPv9flgsFhkIToJ4PI729nYMDw/D7/ejt7cXuVwOzc3NcLlcojSp7eEAVCoVJBIJ8Qfu7e2VjqZRZSBmZ2cnIpEIpk2bVqM4xuNxScmpKjoejwexWEyqkvf396NYLGL37t1IpVKw26vV5un2wXdhphtOetadGBoaQkNDA3p6esR/nJORrifjx48XQ0kj4PF4xEhls1l0dnYKadPY0ABzYptMJlFJeTVOP10G3+4vNWlHRweam5tlwtN3PBAIiEFgEU7OGyp8dFWgkkrlxWarFq9jBhxmPiJxjFaHVfcGKsW8XmYaYtb06O/vF5/jQCAgaVu5UVDflQalXC5LBicGuLIuBYObqeTQ5UAN+B0eHha1letHdQ+guqWqnwzu5bMzmQwCgQAKhQJisZgYXcOoBrt6vV4A1c1ELBaDy+WSec1+VBXsvr4+eL1edHd3Y2Skml2K86tUKskGgOvRYrGIIma1WiX7VzKZhMvlkqB4NSaDv5fNZhEIBKTifUNDA+rq6pBIJOBwOETppHsPg12paubzeSFLugI5nU709vbKd1F1ZipsbtbsdjsymYzYHf4BIHOdRTK56aCbQ6FQQDAYlBgS+uJzHFkfJ51Oo1KpiOsQY3qofFssFvT29oqSyJsJzjWuCW7I6WpEguLzqP653W4hG5fLJbaZGzHVFYSEwvXF8aN/OuMdNI4MmiM1R2qO1BypOfLE4EgKYfuDcbQuIuPHj6/cddddMlFV/1oaZ6oJbDjJg9/NjuFiZ3aScnlfth9+Xv0dXrOqk4EdpE7kcrksi4+DqVZ0p3JHdwXVeFosFsmixEnC72WAXS6XQyqVkqBIXt3yapKTPx6PS60QBjjy6jSZTEq2FJ7O2Qdut1uusJPJpPQTJw7dMiZMmIDu7m5MmjQJPT09smhI1IFAQBYRlQT+P6EGp/LqFNhnKEnkfDez2QyfzyftUY00x4qKRiKRgMViEb9jGlduHFTfdBIQlVyqULzSpepCVY7jTfcSto9qItvNVMb02+UmguRJA6c+y2q1isqkqpPcfNJVhG4YNARsA9UcEhE3RQ6HQxQivg99o9kGs9ks6U9J8uoVNr+D7QcggdPcJNE1iKqrukFgm+nDzzXkcrlqFCSqxVSyuFmgseacz+fzEi+ivqvZbJbAb8YMkED4+Xw+L2qsGlMAVInBMAypO1IoFMRVgSlZ2Qb2B+2N6pduGEZNOmuOCdNWk0zUGwZuKNg3hmHIeud78T05PrRbKhlxfXHM6fLAdcSbAFVVo+sC40z4M6rC/Dk3TPwdfifnAecF+4Vt4Oc5RpxH3Ehy8872FotFLFu27ONKpTL7iIjiBIbmSM2RmiM1R2qOPDE48t5770V7e/t+UxUe9YFr9uzZlQ8//HDfA0cZEvXfD/Zdh/q5hoaGhsYfHyaTSR+4jgCaIzU0NDRODMyZMwcfffTRfg9cY+IfwlOgSgZUFkb7M5Js+Ble/fE5PAGPBY6UoEarg6PbTKXnQL+rfnascDjPHf2ZI3nvsSLxI30OPz/69/b3nof6+YmAg809/lyF2q9H02fqM7g+xmoM1Gftb+4c6nsO9H5H8t5Hsk4O9vmDrcGx6LMjHcuj7ZuxHGcNzZH8mfrZsYLmSM2RgObIQ7XtcP79YM84FDRHHhpjcuCi4U0kEti8ebNcYU6cOBF+vx+xWAy7du3CuHHj0NraWhNslkgksHXrVgDApEmT0NjYeMCgs8MxWIcyTgd7BoMQAUjmk/09Y3+/zyvMI8GxNIxH8uyxJO+v8/lD/d6JSiAqvk4fjUW/qSRyoM0Wr+dVd4H9YfRzVL/s0Z9TN5UHWn/7W+dqG/fXlv0ZSW6GD9X+A70TsK8P+Bx+P11p1D8HAtsx+p3Unx3Oc9gudaPOf+P7H8i2aRwbaI7UHPl1nqM58vChObL25+p/1c9ojtzXrj80R47JgYuN3Lp1K/7sz/4MlUoFfr8fp59+Oq6//nps374dP/nJT7Bs2TIsX75cMhylUincf//9ePnll2EymTB9+nQ88sgjmDx58lcmzOjTMztFnew06OoAArUDQfAzKmKxGN5++23YbDacd955Ehg6+l3VdwZqiUQdMLVt/P7RP9OGUuObDM7fYrEoAbRut7umdksikUChUC2ASl/x/a2bSqWa1YwB3nwOA7jV9WC328VPfX9gMC1jSphyl0Hl5XJZ4jH4TKbDZYrpSqUaM5BIJFCpVNM9MwPV4fYNUC1GGovFUCqV4PP5JCMdM2dxc80A7/0prczgpP5cjblg3Z+6urr9Jq4YfStSqVQkBsZut8MwDIlvsdvtUltF26A/DDRHao7UOD6hOfLgfQNojiTGNOUUs8/MnDkT8+bNw7PPPotyuYxp06Yhk8nUZKoxmUxYtWoVfvOb3+CKK67ASSedhBUrVmDatGl46KGHJMNLPB6H1WpFfX29BFCmUikMDg7C5/NJkcFSqYSGhgaYzWbs2bMHANDc3Ix8Po+9e/eiUqmgoaFB0qH29fXB5XJJMJ/VasWGDRvw85//HG63G6FQCDNnzpS0pqPfM5lMIpVKoa6uDg0NDTCZqnUTEokEcrmcpJjt7++XQEgWiQyHw1IUThOKxjcVNGqFQgGbNm3Cyy+/jD179uDkk0/G1VdfjYaGBqxduxavvPIKstksvvOd7+Dqq69GMBismdd8Tj6fx4YNG/Dqq6+ir68Pp512GpYuXYpkMok33nhD7EOhUMDMmTNx4YUXfmUjBlSN9/bt2/H2228jm83i1FNPxfz582EYBtasWYPf/e53yGazOP/883HBBRdIqtdYLIbVq1dj6dKlkpXqrbfewquvvgqTyYQ5c+bgsssukwxgB1uXbMvIyAjeeecdvPDCCyiVSpgxYwauu+46OJ1OfPrpp3j33XeRz+cxc+ZMnH766fLs0e+VSCSwevVqCQ4uFos49dRTcdppp+HDDz/E//3f/0kb582bJ6R9oBuDnp4erFy5EnPmzMF5552HWCyG5557Dhs3bkQgEMBFF12EefPmCTFpG/SHgeZIzZEaxw80R2qOPBKM6YGLWW0mT56MH/7wh0gmk2hvb0ckEpETJD+Xy+XwxhtvwGw244477kB9fT2effZZ7NixQ9KPrly5Elu2bIHH48GSJUuwePFibNu2DS+88AI6Ojpwyimn4KqrrsJbb72FzZs349Zbb4XdbseDDz6Ik046CTfccAOee+45vPfeeyiVSpg/fz6uuOIK9PX14ZFHHkEkEsHg4CDq6+tRX1+P119/HR0dHbDb7fj5z3+O22+/HWeeeaZkJgGqg/fFF1/gP//zP/H555+jsbERl112GebMmYONGzfi2WefRTwex7x58zBz5ky89tpr6OjoAAAEg0H09PTg/PPPx/e//33JBKSh8U1FpVJNVfz444/jiy++QEtLC/71X/8VHo8HixYtwj333AObzYZp06bhb//2bzE0NIRbb731K1f15XIZX375JR599FF0d3ejqakJv/71r+FyuTBx4kT09PQgm82iq6sLr732Gm655RYhE5VIyuUy9u7diwcffBCbN29GfX09XnrpJdx6660wm814+OGH0djYiMHBQfz0pz9FW1sbJk+ejHfeeQevvfYaXn75ZZx33nkIh8PYuHEjVqxYgWg0inA4jF/84hfw+/24+OKLARyaTAzDwObNm/HDH/4QTU1NmDBhAn7605/C6/Xi9NNPx9///d+jo6MDHo8Hq1evxl133YXzzjsPQ0ND2L59O0KhEJqbm2G1WtHR0YEVK1Zg1qxZQnQ2mw1OpxMrVqxALpeDy+XCG2+8gfvuuw9nnHEGkskkdu7ciWg0isbGRsnmVi6X8cQTT+Af/uEf8KMf/QiLFi3CSy+9hLvvvhuXX345Nm7ciA8//BCPPvooTj31VL2h/QNCc6TmSI3jC5ojD9wvmiNrMaYHLvVq1ev1YvLkydi8eTPi8fhX/Fuz2Sz27NkjKRUB4Oabb4bP50OhUMAvf/lLPPHEE1i4cCHWr1+Pjo4OeL1ePPPMM3j//ffR0tKCJ598EpVKtXja888/j7POOgsOhwNPPfUUfvGLX+DNN9/EnXfeCZ/PBwBYs2YNxo0bh3A4jKeffhpWqxXTp0/HwoULYTKZxCedfx/t7gBUFYiHHnoIr7zyCs455xz87ne/w549e/CjH/0Ijz32GD799FO0tbXh17/+NTZv3oyPP/4Y6XRaaiEUi0Vs3rwZF154IRoaGsay+zU0xgyq8d60aRM+++wzLFu2DJdddhn+5m/+Bu+++y4mTJiAnTt34oEHHsB1112Ht99+G08++SRuvfXWr1zfUwHctm0bbrvtNixYsAB33HEH1q1bh0WLFuGBBx6AxWLBv/3bv2HdunU4//zzv+KHzTb19fXh448/xg9+8APMnTsX9957L959911xnbjrrrtQLpfx53/+53jvvfcwfvx47N69Gxs2bEAulxN17Pnnn8fg4CAee+wxlEol3HDDDVizZg2WLFlySFcCFkZ85pln0Nvbi9/+9rdoaWnBli1b8Pzzz6OpqQk7duzA8uXL0dLSggceeADr16/H2WefjVgshn//93/HmWeeicsvv7wmze3SpUsRCAQQDocxbdo0rFu3Dk6nE3feeSeKxSLuvPNOvP/++zjzzDPR3t6OlStX4rvf/S4WL14s/bVu3TqsXLmypsDjhg0bUKlU8NBDD2Ht2rVYvnw5Pv74YyETjT8MNEdqjtQ4PqA5UnPkkeKYVLGkvyXz1PNKTlXB1L9z4P73f/8X4XAY55xzDv7rv/4Lp512Gh5++GH8/ve/x9q1a7F582Zs3LgR3/ve93Dttdfivvvuw2uvvYbbb78dra2teOmllwAA4XAYF198MR544AEkEgmcfPLJKJfLaG9vl993uVw4++yzccstt6C5uRl+vx8nn3wyvvjiC3g8Htxzzz2YNGmStI3+79lsFqtXr8b8+fPx8MMP47XXXsOGDRvwwQcf4L333sOVV16Jv/7rv8ZPfvITbNmyBUNDQ1iyZAm2bduGXC6HdDqN9vZ2DAwMIBqNHovu19AYM/Aq32QyobGxEX6/H9OnT8ebb76JQqGA1tZWrF27FkC1+OTSpUulAr1qiIeHhxGLxeBwONDW1ob6+nq0tbVh69at6O/vR1NTE/L5PFavXo1x48bhT/7kT6Sex2hEo1H8+Mc/xqxZs9DV1QWTyYS6ujp0dnZi3LhxCAQC8Pv9aGpqwkcffYS//Mu/xLJly9DR0YHt27dL0cOdO3ciEAhg4sSJiMVimDZtGjo7O9Hf349oNHpYqtaOHTsQCoXQ1tYGi8WCBQsWYM2aNbDb7bjjjjswa9YsfPrppzCbzfD7/TCZTAgEArjgggvQ2toqtnHnzp0YHBzE22+/LYUX7777bpx22mm4++67EQqFsGrVKni9XrS0tEg/LFmyBJMmTRJ7mkwm8Y//+I+or6+vcdWaPn067HY7nnnmGXz++edoa2vDtGnTAGh3wj8GNEdqjtQ4PqA5UnPk4WLMD1x0hVi/fj3WrVuHYDCIaDQqxQe7u7thtVrhcDgwefJkbN26FTt27IDf78d//Md/4IILLpCCaiwuZrPZEI/H0dTUJAGCVMJyuRyi0SjmzJmD5557DoODg7j88ssRDAZlQi9atAhWqxUrV66U4D+Xy4UzzzwT8+bNk0nDauKsBt7V1YVoNIrt27fD5/Nh8uTJAKqdzwJ0iUQCu3fvht1uF5WiUCjUBCXTV51BeVQINTS+6RitoFksFkSjUSQSCQDVQqEbN27EwMAAMpkMzjjjDKxatQqbNm2Cw+GQjZjT6cTevXthsVikyGcwGEQymUQ2m4XJZMLu3bvxySef4OKLL4bVasU//dM/yXoh6urqcM011+AHP/gBtm7diieffBLhcBjnnnsunnrqKVl3Pp8PwWAQu3fvluKwJJH9vZfdbkc4HMbOnTsPa6OnboqBfTE3zc3NSCaTcLlcuPbaa7F+/Xr89re/RVtbG84880xYLBb4/X5ccsklAPYpklarFWeffTYWL16MXC6HG2+8EbNnz8aNN96IUCiE1atX44UXXkB9fb2QQGNjI5qamuQ5pVIJv/nNb/DFF1/ggQcewPLly4WsWltbUSqV8Oqrr6K3txcNDQ1obW3Vh60/AjRHao7UOH6gOfLA/QJojlQxpgcuVo3+4IMP0NXVhVQqhWXLlonq9eKLL2LTpk0AgLlz5+Kyyy7De++9h3vuuQdOpxNutxt/8Rd/Abvdjuuuuw6/+tWvcMsttyCVSsHn82HWrFlob2/HK6+8gg0bNqCrqwuXX345WlpacP755+Ppp5/G0NAQLrzwQphMJpx33nlYtWoV1q5dC6fTiWw2i9mzZ0smGDVzkmEYaGxsRFtbG95//3383d/9HebOnYtLL70Ud999NyZOnIjHH38cLpcLF1xwAVatWoWbb74ZX3zxBcaPH49Fixahvb0da9aswY4dO5BIJDB//nx89NFHGBkZQS6XQz6fl/9qNx6NbwvUuVosFtHV1YXGxkZs2rQJu3btwr333ou2tjbs2rULK1euxF/91V8hGo3CZrOJ8bZarUJAhmEgm80iHo8jFApJkOy6deswODgom7/6+noMDw/XGDyHwwGz2YzPPvsMP/vZzxCPx3Hrrbdi6tSpNVnVkskk4vG4qGr7eyd1w5fL5RCLxRAKheD1eg/byPIZ9A3v6OhAMBhEIBDAhg0b8NBDD6FcLuPmm2/GlClTAOzLBGWxWGC321Eul9HW1obbbrsNc+fORU9PD+x2O3p6ehCLxTAwMIBTTz0VV199NZ5//nmsX78e06dPR6FQkBsSfn7lypVwOBzYuXMnhoeHsXHjRqxevRr//d//jSlTpuC2227D1q1b8fjjj+ODDz7ApZdeikqlcsBMVxpjC82RmiM1jj9ojjwwNEfuw5geuAKBAJYsWQKHw4GmpibMmDEDF1xwATZu3Ihzzz0Xg4OD4kpRKpVw+umn48EHH8SLL76IZDKJ+++/H9/97ndhNptx0003wefzYdOmTZg+fTouueQSzJ07F8FgEOFwGJ2dnbjkkktwySWXIBQK4YwzzsCVV16JkZERLFiwAIZhYPHixXj44YexZs0aFAoF3HHHHfje976HZDKJ73//+zjllFNqTvGRSATLly9HMBhEX18fpk2bBo/Hg+nTp6OlpQWVSgV2ux0//vGP0dzcjM2bN2Px4sW46qqrMGvWLIRCIbz00kvo6OjAlVdeiblz56KtrQ0nnXQSotGopK6MxWKygDQ0vskwDANNTU2wWCzYu3cvBgcHsW3bNjQ3N2NwcBDJZBITJkzAnDlzYLfb8cknn+Dcc8/FWWedVfOccrmMN998E2+//TZ27tyJ1tZWdHZ2oqWlBcFgEMPDw1izZg0ikQimT58Oh8OBq6++umazR2QyGTz++OMYGBjAihUrMG/ePJTLZbS0tOCDDz5AIpFAPB5HT08PLr30UlitVhhGta6G2WwW4zlr1iysX78eX375JUqlEj777DPMnTsXoVDosPtn9uzZWLNmDdrb29Ha2orf//73aGtrQ7FYxOOPPw6z2Yzbb78dM2fOlDicrq4uPPbYY5g7dy6WLl0Km82Gf/7nf0apVMK8efMki1tdXR1+9atfYfv27Xj00UexcOFCrFq1Ch0dHTAMA1u3bsVTTz2FhQsX4vzzz5dsd7lcDh9++CHK5TL27NmDXbt24bPPPoPf78ef/umfolAoIJlMoru7ewxmiMaRQHOk5kiN4wuaIw8OzZH7MCYHLhrkKVOm4NFHH4XZbBY1rq6uDmeccQamTp1aU/isrq4O4XAYS5cuxbx581AoFNDQ0ACPx4NKpYJx48bhxhtvRCqVgtPpRCgUgtlsxowZM9DW1oahoSH4/X5JHRsIBHDHHXcAAEKhEEwmE7xeL6666iosXLgQ5XIZwWBQ2nTLLbfA5XLVTFKTyYR58+Zh0qRJyOfzCIVC4mdqs9nkPSdOnIibbroJqVQKDocDkUgEZrMZM2fOxEknnYRsNgu/3w+n04nx48fDZrOJn2i5XEaxWJQJq116NL6JUNMxn3baaTjllFPwxBNP4JVXXsEnn3yCFStWYOrUqXjmmWdw22234dRTT8Unn3yCa665Bna7HVar9SsFBWfMmIHp06fjl7/8JZ577jl0dXXhoosuQigUwvbt27F+/XosWLAAkUgEAOByuWqUQ6rub731Fl588UWcfPLJePPNN/H6669j/vz5OOecc7B27Vrce++9yGQycDqdWLBggaj0w8PDUqvDbDZj6dKlePrpp7Fs2TI0NDQgl8th8eLFEpNyMJcmrtsrr7wSTz31FK6//npMmTIFu3fvxk033YR33nkHr7/+OmbMmIH/+Z//wcsvv4yFCxfirLPOgs1mw4QJExCJRITgvvOd7+Cee+6B1+vFtm3b4PP5sGjRImzZsgWPPPII/H4/gGoihebmZumftrY2BINBmEwmRKNR3H///SgUCshms3jppZcwf/58XHbZZejq6sK//Mu/4MYbb0RnZydcLhemTp06tpNG44DQHKk5UuP4guZIzZFHCvN99913VA944okn7lu+fDkMwxDfS6/XC5fLJQbYbrfD5/PB7/cjEAggEAjA6/VKsLDX64Xf75esRzxp2+12eRYD3sxmMxwOB7xeb00aXcMw4Ha74fF4aq7+LBYL3G53zedJdpzwqoJnNpvhcrng8/mk+JvT6YTT6aypPM22sbgdUCUjp9Mp32UymeBwOGC32+FwOOBwOOB0OmsKs2ky0fgmg+tq/PjxMAwDhUIBF198MZYuXYpJkyZh6tSp4gZ00UUX4aabbhJ3g9F/PB4PJkyYIC4GS5cuxYUXXohAIIBEIoGBgQFceeWVmDhxoqx39Q99sHft2gWz2YxoNCpxIo2NjViwYAHGjx+PgYEB+Hw+3HDDDZg3b56s82QyCb/fjyVLlsgaP/nkk5FOp+HxeHDttddi8eLFNRvHg6FSqRavnTFjBvr6+mA2m3HNNdfg4osvxp49e2C1WhGJRKSNbW1tmDhxIjweD6ZNm4a2tjbY7XYA1YDdpqYmdHd3o6GhAbfffjvOOOMMjB8/HlarFVu2bMHAwACuuOIKXH755airq4PX68W0adPQ3Nwstsrj8SAYDMJms+GDDz7AOeecg3PPPRennHIK6uvrkUgk0NjYiOuvvx7nnHPOYb+rivvvv7/nvvvue+KoJtYJBM2RmiM1jl9ojjwwTkSOfOKJJ3DDDTfcv9+5crR+0rNnz6589NFH0rlf+QLDOGJf7AP9zsH+ffT3H+4zRnfiodp6pG071LM0NL7JYEAvq9aPjIzIpsgwDHEBKpVKsNvtqKurq9l0qc8BqgUbh4eHUSwWZYNlMplEcaqrq5Nq8+oz+PtU4XK5XM1647OKxSKGhoZQqVTgdrvFVQIAhoaGkMvlJBMS03MPDg6iUqmgrq6uZkN7OH0DVP3N0+m0BD7bbDYhELWN3EgCqPGNp6/8yMiIBEB7PB7px2w2K6qj2+2WWweOzf7aWyqVEIvFUFdXB4/Hg3K5jHw+j+HhYclY9XUOW/9/TD6uVCqzj+iXTmBojjz4vx/qWRoa32Rojjx43wAnFkfOnj0bH3300X5/YUwPXBoaGscf1PpANF40dGpgLQBRww/1HOCrWZBUA3u4z+B/+b0Her5hy11OXQAAAj1JREFUVONiRrtBjA4MPlLjyu9i4DPbMrqd/Hc1CcFosH3q+6j9Qhyoffsj79HP589G31ocCfSB68igOVJD4/iG5siD9w3bThzPHHmwA9cxqcOloaFx/EA1yPv798PN3nOw5xzKH3z05/dnBA/VztFkQWJU2/91FfXRbT+YoT7Qv+/vGfvrl8Np4/5uE77OczQ0NDQ0Dg7NkYeG5kh94NLQ0DgMHKlhPFbPOdTnDvbzA5HM0WAsDfLBCHKsnqehoaGhMfbQHPn12nO0z/o2caSuLKihoaGhoaGhoaGhoXGMcNQxXIZhxAB0jE1zNDQ0NDS+4RhfqVQif+xGfFugOVJDQ0PjhMEB+fGoD1waGhoaGhoaGhoaGhoa+4d2KdTQ0NDQ0NDQ0NDQ0DhG0AcuDQ0NDQ0NDQ0NDQ2NYwR94NLQ0NDQ0NDQ0NDQ0DhG0AcuDQ0NDQ0NDQ0NDQ2NYwR94NLQ0NDQ0NDQ0NDQ0DhG0AcuDQ0NDQ0NDQ0NDQ2NY4SjPnAZhnH6WDTkjwXDMOYZhtHwx27H14FhGKd/i9t+tmEYgT92O74uDMM441vc9+d8y/v+rG9r3wPf/v7XODJ8mzlS8+MfD99mjvw28yPw7bfR32aO/Lb3/cGg63BpaGhoaGhoaGhoaGgcI2iXQg0NDQ0NDQ0NDQ0NjWMEfeDS0NDQ0NDQ0NDQ0NA4RtAHLg0NDQ0NDQ0NDQ0NjWMEfeDS0NDQ0NDQ0NDQ0NA4RtAHLg0NDQ0NDQ0NDQ0NjWOE/weewGggFgupFgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lMombPr0GF9a", + "colab_type": "text" + }, + "source": [ + "The images used in this demo are from the [Snapshot Serengeti dataset](http://lila.science/datasets/snapshot-serengeti), and released under the [Community Data License Agreement (permissive variant)](https://cdla.io/permissive-1-0/)." + ] + } + ] +} \ No newline at end of file diff --git a/models/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb b/models/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9063f2cd33aa8fffe160b138b3a3ec69c0d3abdb --- /dev/null +++ b/models/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb @@ -0,0 +1,712 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "V8-yl-s-WKMG" + }, + "source": [ + "# Object Detection API Demo\n", + "\n", + "\u003ctable align=\"left\"\u003e\u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/colab_tutorials/colab_tutorials/object_detection_tutorial.ipynb\"\u003e\n", + " \u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\n", + " \u003c/a\u003e\n", + "\u003c/td\u003e\u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/colab_tutorials/object_detection_tutorial.ipynb\"\u003e\n", + " \u003cimg width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", + "\u003c/td\u003e\u003c/table\u003e" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "3cIrseUv6WKz" + }, + "source": [ + "Welcome to the [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "VrJaG0cYN9yh" + }, + "source": [ + "\u003e **Important**: This tutorial is to help you through the first step towards using [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to build models. If you just just need an off the shelf model that does the job, see the [TFHub object detection example](https://colab.sandbox.google.com/github/tensorflow/hub/blob/master/examples/colab/object_detection.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "kFSqkTCdWKMI" + }, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "awjrpqy-6MaQ" + }, + "source": [ + "Important: If you're running on a local machine, be sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). This notebook includes only what's necessary to run in Colab." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "p3UGXxUii5Ym" + }, + "source": [ + "### Install" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hGL97-GXjSUw" + }, + "outputs": [], + "source": [ + "!pip install -U --pre tensorflow==\"2.*\"\n", + "!pip install tf_slim" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "n_ap_s9ajTHH" + }, + "source": [ + "Make sure you have `pycocotools` installed" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Bg8ZyA47i3pY" + }, + "outputs": [], + "source": [ + "!pip install pycocotools" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-vsOL3QR6kqs" + }, + "source": [ + "Get `tensorflow/models` or `cd` to parent directory of the repository." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ykA0c-om51s1" + }, + "outputs": [], + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "O219m6yWAj9l" + }, + "source": [ + "Compile protobufs and install the object_detection package" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "PY41vdYYNlXc" + }, + "outputs": [], + "source": [ + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "s62yJyQUcYbp" + }, + "outputs": [], + "source": [ + "%%bash \n", + "cd models/research\n", + "pip install ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LBdjK2G5ywuc" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hV4P5gyTWKMI" + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import os\n", + "import six.moves.urllib as urllib\n", + "import sys\n", + "import tarfile\n", + "import tensorflow as tf\n", + "import zipfile\n", + "\n", + "from collections import defaultdict\n", + "from io import StringIO\n", + "from matplotlib import pyplot as plt\n", + "from PIL import Image\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r5FNuiRPWKMN" + }, + "source": [ + "Import the object detection module." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4-IMl4b6BdGO" + }, + "outputs": [], + "source": [ + "from object_detection.utils import ops as utils_ops\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import visualization_utils as vis_util" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RYPCiag2iz_q" + }, + "source": [ + "Patches:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "mF-YlMl8c_bM" + }, + "outputs": [], + "source": [ + "# patch tf1 into `utils.ops`\n", + "utils_ops.tf = tf.compat.v1\n", + "\n", + "# Patch the location of gfile\n", + "tf.gfile = tf.io.gfile" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cfn_tRFOWKMO" + }, + "source": [ + "# Model preparation " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "X_sEBLpVWKMQ" + }, + "source": [ + "## Variables\n", + "\n", + "Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing the path.\n", + "\n", + "By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7ai8pLZZWKMS" + }, + "source": [ + "## Loader" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "zm8xp-0eoItE" + }, + "outputs": [], + "source": [ + "def load_model(model_name):\n", + " base_url = 'http://download.tensorflow.org/models/object_detection/'\n", + " model_file = model_name + '.tar.gz'\n", + " model_dir = tf.keras.utils.get_file(\n", + " fname=model_name, \n", + " origin=base_url + model_file,\n", + " untar=True)\n", + "\n", + " model_dir = pathlib.Path(model_dir)/\"saved_model\"\n", + "\n", + " model = tf.saved_model.load(str(model_dir))\n", + " model = model.signatures['serving_default']\n", + "\n", + " return model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_1MVVTcLWKMW" + }, + "source": [ + "## Loading label map\n", + "Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hDbpHkiWWKMX" + }, + "outputs": [], + "source": [ + "# List of the strings that is used to add correct label for each box.\n", + "PATH_TO_LABELS = 'models/research/object_detection/data/mscoco_label_map.pbtxt'\n", + "category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oVU3U_J6IJVb" + }, + "source": [ + "For the sake of simplicity we will test on 2 images:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "jG-zn5ykWKMd" + }, + "outputs": [], + "source": [ + "# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\n", + "PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images')\n", + "TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpg\")))\n", + "TEST_IMAGE_PATHS" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H0_1AGhrWKMc" + }, + "source": [ + "# Detection" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f7aOtOlebK7h" + }, + "source": [ + "Load an object detection model:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "1XNT0wxybKR6" + }, + "outputs": [], + "source": [ + "model_name = 'ssd_mobilenet_v1_coco_2017_11_17'\n", + "detection_model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "yN1AYfAEJIGp" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8: " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CK4cnry6wsHY" + }, + "outputs": [], + "source": [ + "print(detection_model.inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q8u3BjpMJXZF" + }, + "source": [ + "And returns several outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "oLSZpfaYwuSk" + }, + "outputs": [], + "source": [ + "detection_model.output_dtypes" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FZyKUJeuxvpT" + }, + "outputs": [], + "source": [ + "detection_model.output_shapes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JP5qZ7sXJpwG" + }, + "source": [ + "Add a wrapper function to call the model, and cleanup the outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ajmR_exWyN76" + }, + "outputs": [], + "source": [ + "def run_inference_for_single_image(model, image):\n", + " image = np.asarray(image)\n", + " # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n", + " input_tensor = tf.convert_to_tensor(image)\n", + " # The model expects a batch of images, so add an axis with `tf.newaxis`.\n", + " input_tensor = input_tensor[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " output_dict = model(input_tensor)\n", + "\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_detections = int(output_dict.pop('num_detections'))\n", + " output_dict = {key:value[0, :num_detections].numpy() \n", + " for key,value in output_dict.items()}\n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n", + " \n", + " # Handle models with masks:\n", + " if 'detection_masks' in output_dict:\n", + " # Reframe the the bbox mask to the image size.\n", + " detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n", + " output_dict['detection_masks'], output_dict['detection_boxes'],\n", + " image.shape[0], image.shape[1]) \n", + " detection_masks_reframed = tf.cast(detection_masks_reframed \u003e 0.5,\n", + " tf.uint8)\n", + " output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()\n", + " \n", + " return output_dict" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "z1wq0LVyMRR_" + }, + "source": [ + "Run it on each test image and show the results:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DWh_1zz6aqxs" + }, + "outputs": [], + "source": [ + "def show_inference(model, image_path):\n", + " # the array based representation of the image will be used later in order to prepare the\n", + " # result image with boxes and labels on it.\n", + " image_np = np.array(Image.open(image_path))\n", + " # Actual detection.\n", + " output_dict = run_inference_for_single_image(model, image_np)\n", + " # Visualization of the results of a detection.\n", + " vis_util.visualize_boxes_and_labels_on_image_array(\n", + " image_np,\n", + " output_dict['detection_boxes'],\n", + " output_dict['detection_classes'],\n", + " output_dict['detection_scores'],\n", + " category_index,\n", + " instance_masks=output_dict.get('detection_masks_reframed', None),\n", + " use_normalized_coordinates=True,\n", + " line_thickness=8)\n", + "\n", + " display(Image.fromarray(image_np))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "3a5wMHN8WKMh" + }, + "outputs": [], + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " show_inference(detection_model, image_path)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DsspMPX3Cssg" + }, + "source": [ + "## Instance Segmentation" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CzkVv_n2MxKC" + }, + "outputs": [], + "source": [ + "model_name = \"mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28\"\n", + "masking_model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0S7aZi8ZOhVV" + }, + "source": [ + "The instance segmentation model includes a `detection_masks` output:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vQ2Sj2VIOZLA" + }, + "outputs": [], + "source": [ + "masking_model.output_shapes" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "AS57rZlnNL7W" + }, + "outputs": [], + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " show_inference(masking_model, image_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nLlmm9JojEKm" + }, + "outputs": [], + "source": [ + "" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "last_runtime": { + "build_target": "//learning/brain/python/client:colab_notebook", + "kind": "private" + }, + "name": "object_detection_tutorial.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "1LNYL6Zsn9Xlil2CVNOTsgDZQSBKeOjCh", + "timestamp": 1566498233247 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1566488313397 + }, + { + "file_id": "/piper/depot/google3/third_party/py/tensorflow_docs/g3doc/en/r2/tutorials/generative/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1566145894046 + }, + { + "file_id": "1nBPoWynOV0auSIy40eQcBIk9C6YRSkI8", + "timestamp": 1566145841085 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1556295408037 + }, + { + "file_id": "1layerger-51XwWOwYMY_5zHaCavCeQkO", + "timestamp": 1556214267924 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1556207836484 + }, + { + "file_id": "1w6mqQiNV3liPIX70NOgitOlDF1_4sRMw", + "timestamp": 1556154824101 + }, + { + "file_id": "https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb", + "timestamp": 1556150293326 + } + ], + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/models/research/object_detection/core/__init__.py b/models/research/object_detection/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/research/object_detection/core/__init__.py @@ -0,0 +1 @@ + diff --git a/models/research/object_detection/core/anchor_generator.py b/models/research/object_detection/core/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..69e29d84db8817c79f00f4fdf4ee4aa14b9828a1 --- /dev/null +++ b/models/research/object_detection/core/anchor_generator.py @@ -0,0 +1,171 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base anchor generator. + +The job of the anchor generator is to create (or load) a collection +of bounding boxes to be used as anchors. + +Generated anchors are assumed to match some convolutional grid or list of grid +shapes. For example, we might want to generate anchors matching an 8x8 +feature map and a 4x4 feature map. If we place 3 anchors per grid location +on the first feature map and 6 anchors per grid location on the second feature +map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total. + +To support fully convolutional settings, feature map shapes are passed +dynamically at generation time. The number of anchors to place at each location +is static --- implementations of AnchorGenerator must always be able return +the number of anchors that it uses per location for each feature map. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod + +import six +from six.moves import zip +import tensorflow.compat.v1 as tf + + +class AnchorGenerator(six.with_metaclass(ABCMeta, object)): + """Abstract base class for anchor generators.""" + + @abstractmethod + def name_scope(self): + """Name scope. + + Must be defined by implementations. + + Returns: + a string representing the name scope of the anchor generation operation. + """ + pass + + @property + def check_num_anchors(self): + """Whether to dynamically check the number of anchors generated. + + Can be overridden by implementations that would like to disable this + behavior. + + Returns: + a boolean controlling whether the Generate function should dynamically + check the number of anchors generated against the mathematically + expected number of anchors. + """ + return True + + @abstractmethod + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the `generate` function. + """ + pass + + def generate(self, feature_map_shape_list, **params): + """Generates a collection of bounding boxes to be used as anchors. + + TODO(rathodv): remove **params from argument list and make stride and + offsets (for multiple_grid_anchor_generator) constructor arguments. + + Args: + feature_map_shape_list: list of (height, width) pairs in the format + [(height_0, width_0), (height_1, width_1), ...] that the generated + anchors must align with. Pairs can be provided as 1-dimensional + integer tensors of length 2 or simply as tuples of integers. + **params: parameters for anchor generation op + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + + Raises: + ValueError: if the number of feature map shapes does not match the length + of NumAnchorsPerLocation. + """ + if self.check_num_anchors and ( + len(feature_map_shape_list) != len(self.num_anchors_per_location())): + raise ValueError('Number of feature maps is expected to equal the length ' + 'of `num_anchors_per_location`.') + with tf.name_scope(self.name_scope()): + anchors_list = self._generate(feature_map_shape_list, **params) + if self.check_num_anchors: + with tf.control_dependencies([ + self._assert_correct_number_of_anchors( + anchors_list, feature_map_shape_list)]): + for item in anchors_list: + item.set(tf.identity(item.get())) + return anchors_list + + @abstractmethod + def _generate(self, feature_map_shape_list, **params): + """To be overridden by implementations. + + Args: + feature_map_shape_list: list of (height, width) pairs in the format + [(height_0, width_0), (height_1, width_1), ...] that the generated + anchors must align with. + **params: parameters for anchor generation op + + Returns: + boxes_list: a list of BoxList, each holding a collection of N anchor + boxes. + """ + pass + + def anchor_index_to_feature_map_index(self, boxlist_list): + """Returns a 1-D array of feature map indices for each anchor. + + Args: + boxlist_list: a list of Boxlist, each holding a collection of N anchor + boxes. This list is produced in self.generate(). + + Returns: + A [num_anchors] integer array, where each element indicates which feature + map index the anchor belongs to. + """ + feature_map_indices_list = [] + for i, boxes in enumerate(boxlist_list): + feature_map_indices_list.append( + i * tf.ones([boxes.num_boxes()], dtype=tf.int32)) + return tf.concat(feature_map_indices_list, axis=0) + + def _assert_correct_number_of_anchors(self, anchors_list, + feature_map_shape_list): + """Assert that correct number of anchors was generated. + + Args: + anchors_list: A list of box_list.BoxList object holding anchors generated. + feature_map_shape_list: list of (height, width) pairs in the format + [(height_0, width_0), (height_1, width_1), ...] that the generated + anchors must align with. + Returns: + Op that raises InvalidArgumentError if the number of anchors does not + match the number of expected anchors. + """ + expected_num_anchors = 0 + actual_num_anchors = 0 + for num_anchors_per_location, feature_map_shape, anchors in zip( + self.num_anchors_per_location(), feature_map_shape_list, anchors_list): + expected_num_anchors += (num_anchors_per_location + * feature_map_shape[0] + * feature_map_shape[1]) + actual_num_anchors += anchors.num_boxes() + return tf.assert_equal(expected_num_anchors, actual_num_anchors) diff --git a/models/research/object_detection/core/balanced_positive_negative_sampler.py b/models/research/object_detection/core/balanced_positive_negative_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..6e09537d20e449d587d0ac027e1348df294bfb4b --- /dev/null +++ b/models/research/object_detection/core/balanced_positive_negative_sampler.py @@ -0,0 +1,262 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class to subsample minibatches by balancing positives and negatives. + +Subsamples minibatches based on a pre-specified positive fraction in range +[0,1]. The class presumes there are many more negatives than positive examples: +if the desired batch_size cannot be achieved with the pre-specified positive +fraction, it fills the rest with negative examples. If this is not sufficient +for obtaining the desired batch_size, it returns fewer examples. + +The main function to call is Subsample(self, indicator, labels). For convenience +one can also call SubsampleWeights(self, weights, labels) which is defined in +the minibatch_sampler base class. + +When is_static is True, it implements a method that guarantees static shapes. +It also ensures the length of output of the subsample is always batch_size, even +when number of examples set to True in indicator is less than batch_size. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import minibatch_sampler + + +class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): + """Subsamples minibatches to a desired balance of positives and negatives.""" + + def __init__(self, positive_fraction=0.5, is_static=False): + """Constructs a minibatch sampler. + + Args: + positive_fraction: desired fraction of positive examples (scalar in [0,1]) + in the batch. + is_static: If True, uses an implementation with static shape guarantees. + + Raises: + ValueError: if positive_fraction < 0, or positive_fraction > 1 + """ + if positive_fraction < 0 or positive_fraction > 1: + raise ValueError('positive_fraction should be in range [0,1]. ' + 'Received: %s.' % positive_fraction) + self._positive_fraction = positive_fraction + self._is_static = is_static + + def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): + """Counts the number of positives and negatives numbers to be sampled. + + Args: + sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains + the signed indices of the examples where the sign is based on the label + value. The examples that cannot be sampled are set to 0. It samples + atmost sample_size*positive_fraction positive examples and remaining + from negative examples. + sample_size: Size of subsamples. + + Returns: + A tuple containing the number of positive and negative labels in the + subsample. + """ + input_length = tf.shape(sorted_indices_tensor)[0] + valid_positive_index = tf.greater(sorted_indices_tensor, + tf.zeros(input_length, tf.int32)) + num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32)) + max_num_positive_samples = tf.constant( + int(sample_size * self._positive_fraction), tf.int32) + num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) + num_negative_samples = tf.constant(sample_size, + tf.int32) - num_positive_samples + + return num_positive_samples, num_negative_samples + + def _get_values_from_start_and_end(self, input_tensor, num_start_samples, + num_end_samples, total_num_samples): + """slices num_start_samples and last num_end_samples from input_tensor. + + Args: + input_tensor: An int32 tensor of shape [N] to be sliced. + num_start_samples: Number of examples to be sliced from the beginning + of the input tensor. + num_end_samples: Number of examples to be sliced from the end of the + input tensor. + total_num_samples: Sum of is num_start_samples and num_end_samples. This + should be a scalar. + + Returns: + A tensor containing the first num_start_samples and last num_end_samples + from input_tensor. + + """ + input_length = tf.shape(input_tensor)[0] + start_positions = tf.less(tf.range(input_length), num_start_samples) + end_positions = tf.greater_equal( + tf.range(input_length), input_length - num_end_samples) + selected_positions = tf.logical_or(start_positions, end_positions) + selected_positions = tf.cast(selected_positions, tf.float32) + indexed_positions = tf.multiply(tf.cumsum(selected_positions), + selected_positions) + one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, + total_num_samples, + dtype=tf.float32) + return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), + one_hot_selector, axes=[0, 0]), tf.int32) + + def _static_subsample(self, indicator, batch_size, labels): + """Returns subsampled minibatch. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + N should be a complie time constant. + batch_size: desired batch size. This scalar cannot be None. + labels: boolean tensor of shape [N] denoting positive(=True) and negative + (=False) examples. N should be a complie time constant. + + Returns: + sampled_idx_indicator: boolean tensor of shape [N], True for entries which + are sampled. It ensures the length of output of the subsample is always + batch_size, even when number of examples set to True in indicator is + less than batch_size. + + Raises: + ValueError: if labels and indicator are not 1D boolean tensors. + """ + # Check if indicator and labels have a static size. + if not indicator.shape.is_fully_defined(): + raise ValueError('indicator must be static in shape when is_static is' + 'True') + if not labels.shape.is_fully_defined(): + raise ValueError('labels must be static in shape when is_static is' + 'True') + if not isinstance(batch_size, int): + raise ValueError('batch_size has to be an integer when is_static is' + 'True.') + + input_length = tf.shape(indicator)[0] + + # Set the number of examples set True in indicator to be at least + # batch_size. + num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32)) + additional_false_sample = tf.less_equal( + tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), + batch_size - num_true_sampled) + indicator = tf.logical_or(indicator, additional_false_sample) + + # Shuffle indicator and label. Need to store the permutation to restore the + # order post sampling. + permutation = tf.random_shuffle(tf.range(input_length)) + indicator = tf.gather(indicator, permutation, axis=0) + labels = tf.gather(labels, permutation, axis=0) + + # index (starting from 1) when indicator is True, 0 when False + indicator_idx = tf.where( + indicator, tf.range(1, input_length + 1), + tf.zeros(input_length, tf.int32)) + + # Replace -1 for negative, +1 for positive labels + signed_label = tf.where( + labels, tf.ones(input_length, tf.int32), + tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) + # negative of index for negative label, positive index for positive label, + # 0 when indicator is False. + signed_indicator_idx = tf.multiply(indicator_idx, signed_label) + sorted_signed_indicator_idx = tf.nn.top_k( + signed_indicator_idx, input_length, sorted=True).values + + [num_positive_samples, + num_negative_samples] = self._get_num_pos_neg_samples( + sorted_signed_indicator_idx, batch_size) + + sampled_idx = self._get_values_from_start_and_end( + sorted_signed_indicator_idx, num_positive_samples, + num_negative_samples, batch_size) + + # Shift the indices to start from 0 and remove any samples that are set as + # False. + sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) + sampled_idx = tf.multiply( + tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), + sampled_idx) + + sampled_idx_indicator = tf.cast(tf.reduce_sum( + tf.one_hot(sampled_idx, depth=input_length), + axis=0), tf.bool) + + # project back the order based on stored permutations + idx_indicator = tf.scatter_nd( + tf.expand_dims(permutation, -1), sampled_idx_indicator, + shape=(input_length,)) + return idx_indicator + + def subsample(self, indicator, batch_size, labels, scope=None): + """Returns subsampled minibatch. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + batch_size: desired batch size. If None, keeps all positive samples and + randomly selects negative samples so that the positive sample fraction + matches self._positive_fraction. It cannot be None is is_static is True. + labels: boolean tensor of shape [N] denoting positive(=True) and negative + (=False) examples. + scope: name scope. + + Returns: + sampled_idx_indicator: boolean tensor of shape [N], True for entries which + are sampled. + + Raises: + ValueError: if labels and indicator are not 1D boolean tensors. + """ + if len(indicator.get_shape().as_list()) != 1: + raise ValueError('indicator must be 1 dimensional, got a tensor of ' + 'shape %s' % indicator.get_shape()) + if len(labels.get_shape().as_list()) != 1: + raise ValueError('labels must be 1 dimensional, got a tensor of ' + 'shape %s' % labels.get_shape()) + if labels.dtype != tf.bool: + raise ValueError('labels should be of type bool. Received: %s' % + labels.dtype) + if indicator.dtype != tf.bool: + raise ValueError('indicator should be of type bool. Received: %s' % + indicator.dtype) + with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'): + if self._is_static: + return self._static_subsample(indicator, batch_size, labels) + + else: + # Only sample from indicated samples + negative_idx = tf.logical_not(labels) + positive_idx = tf.logical_and(labels, indicator) + negative_idx = tf.logical_and(negative_idx, indicator) + + # Sample positive and negative samples separately + if batch_size is None: + max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) + else: + max_num_pos = int(self._positive_fraction * batch_size) + sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) + num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) + if batch_size is None: + negative_positive_ratio = ( + 1 - self._positive_fraction) / self._positive_fraction + max_num_neg = tf.cast( + negative_positive_ratio * + tf.cast(num_sampled_pos, dtype=tf.float32), + dtype=tf.int32) + else: + max_num_neg = batch_size - num_sampled_pos + sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) + + return tf.logical_or(sampled_pos_idx, sampled_neg_idx) diff --git a/models/research/object_detection/core/balanced_positive_negative_sampler_test.py b/models/research/object_detection/core/balanced_positive_negative_sampler_test.py new file mode 100644 index 0000000000000000000000000000000000000000..10b8ca740448c776b3ed8c642ba5722776175e5e --- /dev/null +++ b/models/research/object_detection/core/balanced_positive_negative_sampler_test.py @@ -0,0 +1,212 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.balanced_positive_negative_sampler.""" + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import balanced_positive_negative_sampler +from object_detection.utils import test_case + + +class BalancedPositiveNegativeSamplerTest(test_case.TestCase): + + def test_subsample_all_examples(self): + if self.has_tpu(): return + numpy_labels = np.random.permutation(300) + indicator = np.array(np.ones(300) == 1, np.bool) + numpy_labels = (numpy_labels - 200) > 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 32) + + def test_subsample_all_examples_static(self): + if not self.has_tpu(): return + numpy_labels = np.random.permutation(300) + indicator = np.array(np.ones(300) == 1, np.bool) + numpy_labels = (numpy_labels - 200) > 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + is_static=True)) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 32) + + def test_subsample_selection(self): + if self.has_tpu(): return + # Test random sampling when only some examples can be sampled: + # 100 samples, 20 positives, 10 positives cannot be sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 90 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 80) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 54) + self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) + + def test_subsample_selection_static(self): + if not self.has_tpu(): return + # Test random sampling when only some examples can be sampled: + # 100 samples, 20 positives, 10 positives cannot be sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 90 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 80) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + is_static=True)) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 54) + self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) + + def test_subsample_selection_larger_batch_size(self): + if self.has_tpu(): return + # Test random sampling when total number of examples that can be sampled are + # less than batch size: + # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. + # It should still return 64 samples, with 4 of them that couldn't have been + # sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 60 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 50) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 60) + self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertGreaterEqual( + sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50) + self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60) + + def test_subsample_selection_larger_batch_size_static(self): + if not self.has_tpu(): return + # Test random sampling when total number of examples that can be sampled are + # less than batch size: + # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. + # It should still return 64 samples, with 4 of them that couldn't have been + # sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 60 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 50) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + is_static=True)) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertGreaterEqual( + sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50) + self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60) + + def test_subsample_selection_no_batch_size(self): + if self.has_tpu(): return + # Test random sampling when only some examples can be sampled: + # 1000 samples, 6 positives (5 can be sampled). + numpy_labels = np.arange(1000) + numpy_indicator = numpy_labels < 999 + numpy_labels = (numpy_labels - 994) >= 0 + + def graph_fn(indicator, labels): + sampler = (balanced_positive_negative_sampler. + BalancedPositiveNegativeSampler(0.01)) + is_sampled = sampler.subsample(indicator, None, labels) + return is_sampled + is_sampled_out = self.execute_cpu(graph_fn, [numpy_indicator, numpy_labels]) + self.assertEqual(sum(is_sampled_out), 500) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled_out)), 5) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled_out)), 495) + self.assertAllEqual(is_sampled_out, np.logical_and(is_sampled_out, + numpy_indicator)) + + def test_subsample_selection_no_batch_size_static(self): + labels = tf.constant([[True, False, False]]) + indicator = tf.constant([True, False, True]) + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + with self.assertRaises(ValueError): + sampler.subsample(indicator, None, labels) + + def test_raises_error_with_incorrect_label_shape(self): + labels = tf.constant([[True, False, False]]) + indicator = tf.constant([True, False, True]) + sampler = (balanced_positive_negative_sampler. + BalancedPositiveNegativeSampler()) + with self.assertRaises(ValueError): + sampler.subsample(indicator, 64, labels) + + def test_raises_error_with_incorrect_indicator_shape(self): + labels = tf.constant([True, False, False]) + indicator = tf.constant([[True, False, True]]) + sampler = (balanced_positive_negative_sampler. + BalancedPositiveNegativeSampler()) + with self.assertRaises(ValueError): + sampler.subsample(indicator, 64, labels) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/batch_multiclass_nms_test.py b/models/research/object_detection/core/batch_multiclass_nms_test.py new file mode 100644 index 0000000000000000000000000000000000000000..06f17103b2b6bd7df5d449a270f0bddfd3514249 --- /dev/null +++ b/models/research/object_detection/core/batch_multiclass_nms_test.py @@ -0,0 +1,686 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for google3.third_party.tensorflow_models.object_detection.core.batch_multiclass_nms.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from absl.testing import parameterized +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf +from object_detection.core import post_processing +from object_detection.utils import test_case + + +class BatchMulticlassNonMaxSuppressionTest(test_case.TestCase, + parameterized.TestCase): + + def test_batch_multiclass_nms_with_batch_size_1(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]], + [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 999, 2, 1004], + [0, 100, 1, 101]]] + exp_nms_scores = [[.95, .9, .85, .3]] + exp_nms_classes = [[0, 0, 1, 0]] + def graph_fn(boxes, scores): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertEqual(num_detections, [4]) + + def test_batch_iou_with_negative_data(self): + def graph_fn(): + boxes = tf.constant([[[0, -0.01, 0.1, 1.1], [0, 0.2, 0.2, 5.0], + [0, -0.01, 0.1, 1.], [-1, -1, -1, -1]]], tf.float32) + iou = post_processing.batch_iou(boxes, boxes) + return iou + iou = self.execute_cpu(graph_fn, []) + expected_iou = [[[0.99999994, 0.0917431, 0.9099099, -1.], + [0.0917431, 1., 0.08154944, -1.], + [0.9099099, 0.08154944, 1., -1.], [-1., -1., -1., -1.]]] + self.assertAllClose(iou, expected_iou) + + @parameterized.parameters(False, True) + def test_batch_multiclass_nms_with_batch_size_2(self, use_dynamic_map_fn): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 999, 2, 1004], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.85, .5, .3, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [1, 0, 0, 0]]) + def graph_fn(boxes, scores): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size, + use_dynamic_map_fn=use_dynamic_map_fn) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), + exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), + exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), + exp_nms_classes.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [2, 3]) + + def test_batch_multiclass_nms_with_per_batch_clip_window(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + clip_window = np.array([0., 0., 200., 200.], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.5, .3, 0, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [0, 0, 0, 0]]) + def graph_fn(boxes, scores, clip_window): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + clip_window=clip_window) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), + exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), + exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), + exp_nms_classes.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [2, 2]) + + def test_batch_multiclass_nms_with_per_image_clip_window(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + clip_window = np.array([[0., 0., 5., 5.], + [0., 0., 200., 200.]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.9, 0., 0., 0.], + [.5, .3, 0, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [0, 0, 0, 0]]) + + def graph_fn(boxes, scores, clip_window): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + clip_window=clip_window) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), + exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), + exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), + exp_nms_classes.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [1, 2]) + + def test_batch_multiclass_nms_with_masks(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]], + [[[2, 3], [4, 5]], [[3, 4], [5, 6]]], + [[[4, 5], [6, 7]], [[5, 6], [7, 8]]], + [[[6, 7], [8, 9]], [[7, 8], [9, 10]]]], + [[[[8, 9], [10, 11]], [[9, 10], [11, 12]]], + [[[10, 11], [12, 13]], [[11, 12], [13, 14]]], + [[[12, 13], [14, 15]], [[13, 14], [15, 16]]], + [[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 999, 2, 1004], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.85, .5, .3, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [1, 0, 0, 0]]) + exp_nms_masks = np.array([[[[6, 7], [8, 9]], + [[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[10, 11], [12, 13]], + [[0, 0], [0, 0]]]]) + + def graph_fn(boxes, scores, masks): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + masks=masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) + self.assertAllEqual(nmsed_masks.shape.as_list(), exp_nms_masks.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [2, 3]) + self.assertAllClose(nmsed_masks, exp_nms_masks) + + def test_batch_multiclass_nms_with_additional_fields(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + keypoints = np.array( + [[[[6, 7], [8, 9]], + [[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[10, 11], [12, 13]], + [[0, 0], [0, 0]]]], + np.float32) + size = np.array( + [[[[6], [8]], [[0], [2]], [[0], [0]], [[0], [0]]], + [[[13], [15]], [[8], [10]], [[10], [12]], [[0], [0]]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 999, 2, 1004], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.85, .5, .3, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [1, 0, 0, 0]]) + exp_nms_additional_fields = { + 'keypoints': np.array([[[[0, 0], [0, 0]], + [[6, 7], [8, 9]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[10, 11], [12, 13]], + [[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[0, 0], [0, 0]]]]) + } + exp_nms_additional_fields['size'] = np.array([[[[0], [0]], [[6], [8]], + [[0], [0]], [[0], [0]]], + [[[10], [12]], [[13], [15]], + [[8], [10]], [[0], [0]]]]) + + def graph_fn(boxes, scores, keypoints, size): + additional_fields = {'keypoints': keypoints, 'size': size} + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + additional_fields=additional_fields) + self.assertIsNone(nmsed_masks) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) + self.assertEqual(len(nmsed_additional_fields), + len(exp_nms_additional_fields)) + for key in exp_nms_additional_fields: + self.assertAllEqual(nmsed_additional_fields[key].shape.as_list(), + exp_nms_additional_fields[key].shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return (nmsed_boxes, nmsed_scores, nmsed_classes, + nmsed_additional_fields['keypoints'], + nmsed_additional_fields['size'], + num_detections) + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints, + size]) + + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(nmsed_keypoints, + exp_nms_additional_fields['keypoints']) + self.assertAllClose(nmsed_size, + exp_nms_additional_fields['size']) + self.assertAllClose(num_detections, [2, 3]) + + def test_batch_multiclass_nms_with_masks_and_num_valid_boxes(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]], + [[[2, 3], [4, 5]], [[3, 4], [5, 6]]], + [[[4, 5], [6, 7]], [[5, 6], [7, 8]]], + [[[6, 7], [8, 9]], [[7, 8], [9, 10]]]], + [[[[8, 9], [10, 11]], [[9, 10], [11, 12]]], + [[[10, 11], [12, 13]], [[11, 12], [13, 14]]], + [[[12, 13], [14, 15]], [[13, 14], [15, 16]]], + [[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]], + np.float32) + num_valid_boxes = np.array([1, 1], np.int32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_nms_scores = [[.9, 0, 0, 0], + [.5, 0, 0, 0]] + exp_nms_classes = [[0, 0, 0, 0], + [0, 0, 0, 0]] + exp_nms_masks = [[[[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[8, 9], [10, 11]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]]] + + def graph_fn(boxes, scores, masks, num_valid_boxes): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + masks=masks, num_valid_boxes=num_valid_boxes) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks, + num_valid_boxes]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [1, 1]) + self.assertAllClose(nmsed_masks, exp_nms_masks) + + def test_batch_multiclass_nms_with_additional_fields_and_num_valid_boxes( + self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + keypoints = np.array( + [[[[6, 7], [8, 9]], + [[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[10, 11], [12, 13]], + [[0, 0], [0, 0]]]], + np.float32) + size = np.array( + [[[[7], [9]], [[1], [3]], [[0], [0]], [[0], [0]]], + [[[14], [16]], [[9], [11]], [[11], [13]], [[0], [0]]]], np.float32) + + num_valid_boxes = np.array([1, 1], np.int32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_nms_scores = [[.9, 0, 0, 0], + [.5, 0, 0, 0]] + exp_nms_classes = [[0, 0, 0, 0], + [0, 0, 0, 0]] + exp_nms_additional_fields = { + 'keypoints': np.array([[[[6, 7], [8, 9]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]]]) + } + + exp_nms_additional_fields['size'] = np.array([[[[7], [9]], [[0], [0]], + [[0], [0]], [[0], [0]]], + [[[14], [16]], [[0], [0]], + [[0], [0]], [[0], [0]]]]) + def graph_fn(boxes, scores, keypoints, size, num_valid_boxes): + additional_fields = {'keypoints': keypoints, 'size': size} + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + num_valid_boxes=num_valid_boxes, + additional_fields=additional_fields) + self.assertIsNone(nmsed_masks) + return (nmsed_boxes, nmsed_scores, nmsed_classes, + nmsed_additional_fields['keypoints'], + nmsed_additional_fields['size'], num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints, + size, num_valid_boxes]) + + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(nmsed_keypoints, + exp_nms_additional_fields['keypoints']) + self.assertAllClose(nmsed_size, + exp_nms_additional_fields['size']) + self.assertAllClose(num_detections, [1, 1]) + + def test_combined_nms_with_batch_size_2(self): + """Test use_combined_nms.""" + boxes = np.array([[[[0, 0, 0.1, 0.1], [0, 0, 0.1, 0.1]], + [[0, 0.01, 1, 0.11], [0, 0.6, 0.1, 0.7]], + [[0, -0.01, 0.1, 0.09], [0, -0.1, 0.1, 0.09]], + [[0, 0.11, 0.1, 0.2], [0, 0.11, 0.1, 0.2]]], + [[[0, 0, 0.2, 0.2], [0, 0, 0.2, 0.2]], + [[0, 0.02, 0.2, 0.22], [0, 0.02, 0.2, 0.22]], + [[0, -0.02, 0.2, 0.19], [0, -0.02, 0.2, 0.19]], + [[0, 0.21, 0.2, 0.3], [0, 0.21, 0.2, 0.3]]]], + np.float32) + scores = np.array([[[.1, 0.9], [.75, 0.8], + [.6, 0.3], [0.95, 0.1]], + [[.1, 0.9], [.75, 0.8], + [.6, .3], [.95, .1]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 3 + + exp_nms_corners = np.array([[[0, 0.11, 0.1, 0.2], + [0, 0, 0.1, 0.1], + [0, 0.6, 0.1, 0.7]], + [[0, 0.21, 0.2, 0.3], + [0, 0, 0.2, 0.2], + [0, 0.02, 0.2, 0.22]]]) + exp_nms_scores = np.array([[.95, .9, 0.8], + [.95, .9, .75]]) + exp_nms_classes = np.array([[0, 1, 1], + [0, 1, 0]]) + + def graph_fn(boxes, scores): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + use_static_shapes=True, + use_combined_nms=True) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertListEqual(num_detections.tolist(), [3, 3]) + + def test_batch_multiclass_nms_with_use_static_shapes(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], + np.float32) + clip_window = np.array([[0., 0., 5., 5.], + [0., 0., 200., 200.]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.9, 0., 0., 0.], + [.5, .3, 0, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [0, 0, 0, 0]]) + + def graph_fn(boxes, scores, clip_window): + (nmsed_boxes, nmsed_scores, nmsed_classes, _, _, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, clip_window=clip_window, + use_static_shapes=True) + return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute(graph_fn, [boxes, scores, clip_window]) + for i in range(len(num_detections)): + self.assertAllClose(nmsed_boxes[i, 0:num_detections[i]], + exp_nms_corners[i, 0:num_detections[i]]) + self.assertAllClose(nmsed_scores[i, 0:num_detections[i]], + exp_nms_scores[i, 0:num_detections[i]]) + self.assertAllClose(nmsed_classes[i, 0:num_detections[i]], + exp_nms_classes[i, 0:num_detections[i]]) + self.assertAllClose(num_detections, [1, 2]) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/batcher.py b/models/research/object_detection/core/batcher.py new file mode 100644 index 0000000000000000000000000000000000000000..26832e30efa43a15436070e8676b1d020712a794 --- /dev/null +++ b/models/research/object_detection/core/batcher.py @@ -0,0 +1,141 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides functions to batch a dictionary of input tensors.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.core import prefetcher + +rt_shape_str = '_runtime_shapes' + + +class BatchQueue(object): + """BatchQueue class. + + This class creates a batch queue to asynchronously enqueue tensors_dict. + It also adds a FIFO prefetcher so that the batches are readily available + for the consumers. Dequeue ops for a BatchQueue object can be created via + the Dequeue method which evaluates to a batch of tensor_dict. + + Example input pipeline with batching: + ------------------------------------ + key, string_tensor = slim.parallel_reader.parallel_read(...) + tensor_dict = decoder.decode(string_tensor) + tensor_dict = preprocessor.preprocess(tensor_dict, ...) + batch_queue = batcher.BatchQueue(tensor_dict, + batch_size=32, + batch_queue_capacity=2000, + num_batch_queue_threads=8, + prefetch_queue_capacity=20) + tensor_dict = batch_queue.dequeue() + outputs = Model(tensor_dict) + ... + ----------------------------------- + + Notes: + ----- + This class batches tensors of unequal sizes by zero padding and unpadding + them after generating a batch. This can be computationally expensive when + batching tensors (such as images) that are of vastly different sizes. So it is + recommended that the shapes of such tensors be fully defined in tensor_dict + while other lightweight tensors such as bounding box corners and class labels + can be of varying sizes. Use either crop or resize operations to fully define + the shape of an image in tensor_dict. + + It is also recommended to perform any preprocessing operations on tensors + before passing to BatchQueue and subsequently calling the Dequeue method. + + Another caveat is that this class does not read the last batch if it is not + full. The current implementation makes it hard to support that use case. So, + for evaluation, when it is critical to run all the examples through your + network use the input pipeline example mentioned in core/prefetcher.py. + """ + + def __init__(self, tensor_dict, batch_size, batch_queue_capacity, + num_batch_queue_threads, prefetch_queue_capacity): + """Constructs a batch queue holding tensor_dict. + + Args: + tensor_dict: dictionary of tensors to batch. + batch_size: batch size. + batch_queue_capacity: max capacity of the queue from which the tensors are + batched. + num_batch_queue_threads: number of threads to use for batching. + prefetch_queue_capacity: max capacity of the queue used to prefetch + assembled batches. + """ + # Remember static shapes to set shapes of batched tensors. + static_shapes = collections.OrderedDict( + {key: tensor.get_shape() for key, tensor in tensor_dict.items()}) + # Remember runtime shapes to unpad tensors after batching. + runtime_shapes = collections.OrderedDict( + {(key + rt_shape_str): tf.shape(tensor) + for key, tensor in tensor_dict.items()}) + + all_tensors = tensor_dict + all_tensors.update(runtime_shapes) + batched_tensors = tf.train.batch( + all_tensors, + capacity=batch_queue_capacity, + batch_size=batch_size, + dynamic_pad=True, + num_threads=num_batch_queue_threads) + + self._queue = prefetcher.prefetch(batched_tensors, + prefetch_queue_capacity) + self._static_shapes = static_shapes + self._batch_size = batch_size + + def dequeue(self): + """Dequeues a batch of tensor_dict from the BatchQueue. + + TODO: use allow_smaller_final_batch to allow running over the whole eval set + + Returns: + A list of tensor_dicts of the requested batch_size. + """ + batched_tensors = self._queue.dequeue() + # Separate input tensors from tensors containing their runtime shapes. + tensors = {} + shapes = {} + for key, batched_tensor in batched_tensors.items(): + unbatched_tensor_list = tf.unstack(batched_tensor) + for i, unbatched_tensor in enumerate(unbatched_tensor_list): + if rt_shape_str in key: + shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor + else: + tensors[(key, i)] = unbatched_tensor + + # Undo that padding using shapes and create a list of size `batch_size` that + # contains tensor dictionaries. + tensor_dict_list = [] + batch_size = self._batch_size + for batch_id in range(batch_size): + tensor_dict = {} + for key in self._static_shapes: + tensor_dict[key] = tf.slice(tensors[(key, batch_id)], + tf.zeros_like(shapes[(key, batch_id)]), + shapes[(key, batch_id)]) + tensor_dict[key].set_shape(self._static_shapes[key]) + tensor_dict_list.append(tensor_dict) + + return tensor_dict_list diff --git a/models/research/object_detection/core/batcher_tf1_test.py b/models/research/object_detection/core/batcher_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1688b87cdf08bc29ddb2413776757066047c80da --- /dev/null +++ b/models/research/object_detection/core/batcher_tf1_test.py @@ -0,0 +1,165 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.batcher.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.core import batcher +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class BatcherTest(tf.test.TestCase): + + def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self): + with self.test_session() as sess: + batch_size = 3 + num_batches = 2 + examples = tf.Variable(tf.constant(2, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 2) + boxes = tf.tile( + tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)])) + batch_queue = batcher.BatchQueue( + tensor_dict={'boxes': boxes}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([None, 4], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 2 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1))) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions( + self): + with self.test_session() as sess: + batch_size = 3 + num_batches = 2 + examples = tf.Variable(tf.constant(2, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 2) + image = tf.reshape( + tf.range(counter * counter), tf.stack([counter, counter])) + batch_queue = batcher.BatchQueue( + tensor_dict={'image': image}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([None, None], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 2 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self): + with self.test_session() as sess: + batch_size = 3 + num_batches = 2 + examples = tf.Variable(tf.constant(1, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 1) + image = tf.reshape(tf.range(1, 13), [4, 3]) * counter + batch_queue = batcher.BatchQueue( + tensor_dict={'image': image}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([4, 3], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 1 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + def test_batcher_when_batch_size_is_one(self): + with self.test_session() as sess: + batch_size = 1 + num_batches = 2 + examples = tf.Variable(tf.constant(2, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 2) + image = tf.reshape( + tf.range(counter * counter), tf.stack([counter, counter])) + batch_queue = batcher.BatchQueue( + tensor_dict={'image': image}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([None, None], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 2 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/box_coder.py b/models/research/object_detection/core/box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e54a44033f4f17dac0976132e1449ea3fedc3d --- /dev/null +++ b/models/research/object_detection/core/box_coder.py @@ -0,0 +1,158 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base box coder. + +Box coders convert between coordinate frames, namely image-centric +(with (0,0) on the top left of image) and anchor-centric (with (0,0) being +defined by a specific anchor). + +Users of a BoxCoder can call two methods: + encode: which encodes a box with respect to a given anchor + (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and + decode: which inverts this encoding with a decode operation. +In both cases, the arguments are assumed to be in 1-1 correspondence already; +it is not the job of a BoxCoder to perform matching. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod +from abc import abstractproperty + +import six +import tensorflow.compat.v1 as tf + +from object_detection.utils import shape_utils + + +# Box coder types. +FASTER_RCNN = 'faster_rcnn' +KEYPOINT = 'keypoint' +MEAN_STDDEV = 'mean_stddev' +SQUARE = 'square' + + +class BoxCoder(six.with_metaclass(ABCMeta, object)): + """Abstract base class for box coder.""" + + @abstractproperty + def code_size(self): + """Return the size of each code. + + This number is a constant and should agree with the output of the `encode` + op (e.g. if rel_codes is the output of self.encode(...), then it should have + shape [N, code_size()]). This abstractproperty should be overridden by + implementations. + + Returns: + an integer constant + """ + pass + + def encode(self, boxes, anchors): + """Encode a box list relative to an anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded + anchors: BoxList of N anchors + + Returns: + a tensor representing N relative-encoded boxes + """ + with tf.name_scope('Encode'): + return self._encode(boxes, anchors) + + def decode(self, rel_codes, anchors): + """Decode boxes that are encoded relative to an anchor collection. + + Args: + rel_codes: a tensor representing N relative-encoded boxes + anchors: BoxList of anchors + + Returns: + boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., + with corners y_min, x_min, y_max, x_max) + """ + with tf.name_scope('Decode'): + return self._decode(rel_codes, anchors) + + @abstractmethod + def _encode(self, boxes, anchors): + """Method to be overriden by implementations. + + Args: + boxes: BoxList holding N boxes to be encoded + anchors: BoxList of N anchors + + Returns: + a tensor representing N relative-encoded boxes + """ + pass + + @abstractmethod + def _decode(self, rel_codes, anchors): + """Method to be overriden by implementations. + + Args: + rel_codes: a tensor representing N relative-encoded boxes + anchors: BoxList of anchors + + Returns: + boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., + with corners y_min, x_min, y_max, x_max) + """ + pass + + +def batch_decode(encoded_boxes, box_coder, anchors): + """Decode a batch of encoded boxes. + + This op takes a batch of encoded bounding boxes and transforms + them to a batch of bounding boxes specified by their corners in + the order of [y_min, x_min, y_max, x_max]. + + Args: + encoded_boxes: a float32 tensor of shape [batch_size, num_anchors, + code_size] representing the location of the objects. + box_coder: a BoxCoder object. + anchors: a BoxList of anchors used to encode `encoded_boxes`. + + Returns: + decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, + coder_size] representing the corners of the objects in the order + of [y_min, x_min, y_max, x_max]. + + Raises: + ValueError: if batch sizes of the inputs are inconsistent, or if + the number of anchors inferred from encoded_boxes and anchors are + inconsistent. + """ + encoded_boxes.get_shape().assert_has_rank(3) + if (shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1]) + != anchors.num_boxes_static()): + raise ValueError('The number of anchors inferred from encoded_boxes' + ' and anchors are inconsistent: shape[1] of encoded_boxes' + ' %s should be equal to the number of anchors: %s.' % + (shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1]), + anchors.num_boxes_static())) + + decoded_boxes = tf.stack([ + box_coder.decode(boxes, anchors).get() + for boxes in tf.unstack(encoded_boxes) + ]) + return decoded_boxes diff --git a/models/research/object_detection/core/box_coder_test.py b/models/research/object_detection/core/box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..52765a9d06c990c483aaf87dcba3ecfe604d7adc --- /dev/null +++ b/models/research/object_detection/core/box_coder_test.py @@ -0,0 +1,62 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.box_coder.""" +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class MockBoxCoder(box_coder.BoxCoder): + """Test BoxCoder that encodes/decodes using the multiply-by-two function.""" + + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + return 2.0 * boxes.get() + + def _decode(self, rel_codes, anchors): + return box_list.BoxList(rel_codes / 2.0) + + +class BoxCoderTest(test_case.TestCase): + + def test_batch_decode(self): + + expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]], + [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]] + + def graph_fn(): + mock_anchor_corners = tf.constant( + [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32) + mock_anchors = box_list.BoxList(mock_anchor_corners) + mock_box_coder = MockBoxCoder() + + encoded_boxes_list = [mock_box_coder.encode( + box_list.BoxList(tf.constant(boxes)), mock_anchors) + for boxes in expected_boxes] + encoded_boxes = tf.stack(encoded_boxes_list) + decoded_boxes = box_coder.batch_decode( + encoded_boxes, mock_box_coder, mock_anchors) + return decoded_boxes + decoded_boxes_result = self.execute(graph_fn, []) + self.assertAllClose(expected_boxes, decoded_boxes_result) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/box_list.py b/models/research/object_detection/core/box_list.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6b97e995f483918aa2416c5f3b2e5c8c94a0c3 --- /dev/null +++ b/models/research/object_detection/core/box_list.py @@ -0,0 +1,210 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List definition. + +BoxList represents a list of bounding boxes as tensorflow +tensors, where each bounding box is represented as a row of 4 numbers, +[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes +within a given list correspond to a single image. See also +box_list_ops.py for common box related operations (such as area, iou, etc). + +Optionally, users can add additional related fields (such as weights). +We assume the following things to be true about fields: +* they correspond to boxes in the box_list along the 0th dimension +* they have inferrable rank at graph construction time +* all dimensions except for possibly the 0th can be inferred + (i.e., not None) at graph construction time. + +Some other notes: + * Following tensorflow conventions, we use height, width ordering, + and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering + * Tensors are always provided as (flat) [N, 4] tensors. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.utils import shape_utils + + +class BoxList(object): + """Box collection.""" + + def __init__(self, boxes): + """Constructs box collection. + + Args: + boxes: a tensor of shape [N, 4] representing box corners + + Raises: + ValueError: if invalid dimensions for bbox data or if bbox data is not in + float32 format. + """ + if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: + raise ValueError('Invalid dimensions for box data: {}'.format( + boxes.shape)) + if boxes.dtype != tf.float32: + raise ValueError('Invalid tensor type: should be tf.float32') + self.data = {'boxes': boxes} + + def num_boxes(self): + """Returns number of boxes held in collection. + + Returns: + a tensor representing the number of boxes held in the collection. + """ + return tf.shape(self.data['boxes'])[0] + + def num_boxes_static(self): + """Returns number of boxes held in collection. + + This number is inferred at graph construction time rather than run-time. + + Returns: + Number of boxes held in collection (integer) or None if this is not + inferrable at graph construction time. + """ + return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0]) + + def get_all_fields(self): + """Returns all fields.""" + return self.data.keys() + + def get_extra_fields(self): + """Returns all non-box fields (i.e., everything not named 'boxes').""" + return [k for k in self.data.keys() if k != 'boxes'] + + def add_field(self, field, field_data): + """Add field to box list. + + This method can be used to add related box data such as + weights/labels, etc. + + Args: + field: a string key to access the data via `get` + field_data: a tensor containing the data to store in the BoxList + """ + self.data[field] = field_data + + def has_field(self, field): + return field in self.data + + def get(self): + """Convenience function for accessing box coordinates. + + Returns: + a tensor with shape [N, 4] representing box coordinates. + """ + return self.get_field('boxes') + + def set(self, boxes): + """Convenience function for setting box coordinates. + + Args: + boxes: a tensor of shape [N, 4] representing box corners + + Raises: + ValueError: if invalid dimensions for bbox data + """ + if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: + raise ValueError('Invalid dimensions for box data.') + self.data['boxes'] = boxes + + def get_field(self, field): + """Accesses a box collection and associated fields. + + This function returns specified field with object; if no field is specified, + it returns the box coordinates. + + Args: + field: this optional string parameter can be used to specify + a related field to be accessed. + + Returns: + a tensor representing the box collection or an associated field. + + Raises: + ValueError: if invalid field + """ + if not self.has_field(field): + raise ValueError('field ' + str(field) + ' does not exist') + return self.data[field] + + def set_field(self, field, value): + """Sets the value of a field. + + Updates the field of a box_list with a given value. + + Args: + field: (string) name of the field to set value. + value: the value to assign to the field. + + Raises: + ValueError: if the box_list does not have specified field. + """ + if not self.has_field(field): + raise ValueError('field %s does not exist' % field) + self.data[field] = value + + def get_center_coordinates_and_sizes(self, scope=None): + """Computes the center coordinates, height and width of the boxes. + + Args: + scope: name scope of the function. + + Returns: + a list of 4 1-D tensors [ycenter, xcenter, height, width]. + """ + with tf.name_scope(scope, 'get_center_coordinates_and_sizes'): + box_corners = self.get() + ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners)) + width = xmax - xmin + height = ymax - ymin + ycenter = ymin + height / 2. + xcenter = xmin + width / 2. + return [ycenter, xcenter, height, width] + + def transpose_coordinates(self, scope=None): + """Transpose the coordinate representation in a boxlist. + + Args: + scope: name scope of the function. + """ + with tf.name_scope(scope, 'transpose_coordinates'): + y_min, x_min, y_max, x_max = tf.split( + value=self.get(), num_or_size_splits=4, axis=1) + self.set(tf.concat([x_min, y_min, x_max, y_max], 1)) + + def as_tensor_dict(self, fields=None): + """Retrieves specified fields as a dictionary of tensors. + + Args: + fields: (optional) list of fields to return in the dictionary. + If None (default), all fields are returned. + + Returns: + tensor_dict: A dictionary of tensors specified by fields. + + Raises: + ValueError: if specified field is not contained in boxlist. + """ + tensor_dict = {} + if fields is None: + fields = self.get_all_fields() + for field in fields: + if not self.has_field(field): + raise ValueError('boxlist must contain all specified fields') + tensor_dict[field] = self.get_field(field) + return tensor_dict diff --git a/models/research/object_detection/core/box_list_ops.py b/models/research/object_detection/core/box_list_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..159845b690d5f10ac4e38ca167faa2a6051cc023 --- /dev/null +++ b/models/research/object_detection/core/box_list_ops.py @@ -0,0 +1,1166 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List operations. + +Example box operations that are supported: + * areas: compute bounding box areas + * iou: pairwise intersection-over-union scores + * sq_dist: pairwise distances between bounding boxes + +Whenever box_list_ops functions output a BoxList, the fields of the incoming +BoxList are retained unless documented otherwise. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class SortOrder(object): + """Enum class for sort order. + + Attributes: + ascend: ascend order. + descend: descend order. + """ + ascend = 1 + descend = 2 + + +def area(boxlist, scope=None): + """Computes area of boxes. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing box areas. + """ + with tf.name_scope(scope, 'Area'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) + + +def height_width(boxlist, scope=None): + """Computes height and width of boxes in boxlist. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + Height: A tensor with shape [N] representing box heights. + Width: A tensor with shape [N] representing box widths. + """ + with tf.name_scope(scope, 'HeightWidth'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1]) + + +def scale(boxlist, y_scale, x_scale, scope=None): + """scale box coordinates in x and y dimensions. + + Args: + boxlist: BoxList holding N boxes + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + boxlist: BoxList holding N boxes + """ + with tf.name_scope(scope, 'Scale'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + y_min = y_scale * y_min + y_max = y_scale * y_max + x_min = x_scale * x_min + x_max = x_scale * x_max + scaled_boxlist = box_list.BoxList( + tf.concat([y_min, x_min, y_max, x_max], 1)) + return _copy_extra_fields(scaled_boxlist, boxlist) + + +def scale_height_width(boxlist, y_scale, x_scale, scope=None): + """Scale the height and width of boxes, leaving centers unchanged. + + Args: + boxlist: BoxList holding N boxes + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + boxlist: BoxList holding N boxes + """ + with tf.name_scope(scope, 'ScaleHeightWidth'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + yc, xc, height_orig, width_orig = boxlist.get_center_coordinates_and_sizes() + y_min = yc - 0.5 * y_scale * height_orig + y_max = yc + 0.5 * y_scale * height_orig + x_min = xc - 0.5 * x_scale * width_orig + x_max = xc + 0.5 * x_scale * width_orig + scaled_boxlist = box_list.BoxList( + tf.stack([y_min, x_min, y_max, x_max], 1)) + return _copy_extra_fields(scaled_boxlist, boxlist) + + +def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None): + """Clip bounding boxes to a window. + + This op clips any input bounding boxes (represented by bounding box + corners) to a window, optionally filtering out boxes that do not + overlap at all with the window. + + Args: + boxlist: BoxList holding M_in boxes + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window to which the op should clip boxes. + filter_nonoverlapping: whether to filter out boxes that do not overlap at + all with the window. + scope: name scope. + + Returns: + a BoxList holding M_out boxes where M_out <= M_in + """ + with tf.name_scope(scope, 'ClipToWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min) + y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min) + x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min) + x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min) + clipped = box_list.BoxList( + tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped], + 1)) + clipped = _copy_extra_fields(clipped, boxlist) + if filter_nonoverlapping: + areas = area(clipped) + nonzero_area_indices = tf.cast( + tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32) + clipped = gather(clipped, nonzero_area_indices) + return clipped + + +def prune_outside_window(boxlist, window, scope=None): + """Prunes bounding boxes that fall outside a given window. + + This function prunes bounding boxes that even partially fall outside the given + window. See also clip_to_window which only prunes bounding boxes that fall + completely outside the window, and clips any bounding boxes that partially + overflow. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] + of the window + scope: name scope. + + Returns: + pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + with tf.name_scope(scope, 'PruneOutsideWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + coordinate_violations = tf.concat([ + tf.less(y_min, win_y_min), tf.less(x_min, win_x_min), + tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max) + ], 1) + valid_indices = tf.reshape( + tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def prune_completely_outside_window(boxlist, window, scope=None): + """Prunes bounding boxes that fall completely outside of the given window. + + The function clip_to_window prunes bounding boxes that fall + completely outside the window, but also clips any bounding boxes that + partially overflow. This function does not clip partially overflowing boxes. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] + of the window + scope: name scope. + + Returns: + pruned_boxlist: a new BoxList with all bounding boxes partially or fully in + the window. + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + coordinate_violations = tf.concat([ + tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), + tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min) + ], 1) + valid_indices = tf.reshape( + tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def intersection(boxlist1, boxlist2, scope=None): + """Compute pairwise intersection areas between boxes. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise intersections + """ + with tf.name_scope(scope, 'Intersection'): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) + all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) + intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) + all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) + all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) + intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) + return intersect_heights * intersect_widths + + +def matched_intersection(boxlist1, boxlist2, scope=None): + """Compute intersection areas between corresponding boxes in two boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing pairwise intersections + """ + with tf.name_scope(scope, 'MatchedIntersection'): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + min_ymax = tf.minimum(y_max1, y_max2) + max_ymin = tf.maximum(y_min1, y_min2) + intersect_heights = tf.maximum(0.0, min_ymax - max_ymin) + min_xmax = tf.minimum(x_max1, x_max2) + max_xmin = tf.maximum(x_min1, x_min2) + intersect_widths = tf.maximum(0.0, min_xmax - max_xmin) + return tf.reshape(intersect_heights * intersect_widths, [-1]) + + +def iou(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise iou scores. + """ + with tf.name_scope(scope, 'IOU'): + intersections = intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = ( + tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) + return tf.where( + tf.equal(intersections, 0.0), + tf.zeros_like(intersections), tf.truediv(intersections, unions)) + + +def matched_iou(boxlist1, boxlist2, scope=None): + """Compute intersection-over-union between corresponding boxes in boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing pairwise iou scores. + """ + with tf.name_scope(scope, 'MatchedIOU'): + intersections = matched_intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = areas1 + areas2 - intersections + return tf.where( + tf.equal(intersections, 0.0), + tf.zeros_like(intersections), tf.truediv(intersections, unions)) + + +def ioa(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-area between box collections. + + intersection-over-area (IOA) between two boxes box1 and box2 is defined as + their intersection area over box2's area. Note that ioa is not symmetric, + that is, ioa(box1, box2) != ioa(box2, box1). + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise ioa scores. + """ + with tf.name_scope(scope, 'IOA'): + intersections = intersection(boxlist1, boxlist2) + areas = tf.expand_dims(area(boxlist2), 0) + return tf.truediv(intersections, areas) + + +def prune_non_overlapping_boxes( + boxlist1, boxlist2, min_overlap=0.0, scope=None): + """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. + + For each box in boxlist1, we want its IOA to be more than minoverlap with + at least one of the boxes in boxlist2. If it does not, we remove it. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + min_overlap: Minimum required overlap between boxes, to count them as + overlapping. + scope: name scope. + + Returns: + new_boxlist1: A pruned boxlist with size [N', 4]. + keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the + first input BoxList `boxlist1`. + """ + with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): + ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor + ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor + keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) + keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1]) + new_boxlist1 = gather(boxlist1, keep_inds) + return new_boxlist1, keep_inds + + +def prune_small_boxes(boxlist, min_side, scope=None): + """Prunes small boxes in the boxlist which have a side smaller than min_side. + + Args: + boxlist: BoxList holding N boxes. + min_side: Minimum width AND height of box to survive pruning. + scope: name scope. + + Returns: + A pruned boxlist. + """ + with tf.name_scope(scope, 'PruneSmallBoxes'): + height, width = height_width(boxlist) + is_valid = tf.logical_and(tf.greater_equal(width, min_side), + tf.greater_equal(height, min_side)) + return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) + + +def change_coordinate_frame(boxlist, window, scope=None): + """Change coordinate frame of the boxlist to be relative to window's frame. + + Given a window of the form [ymin, xmin, ymax, xmax], + changes bounding box coordinates from boxlist to be relative to this window + (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). + + An example use case is data augmentation: where we are given groundtruth + boxes (boxlist) and would like to randomly crop the image to some + window (window). In this case we need to change the coordinate frame of + each groundtruth box to be relative to this new window. + + Args: + boxlist: A BoxList object holding N boxes. + window: A rank 1 tensor [4]. + scope: name scope. + + Returns: + Returns a BoxList object with N boxes. + """ + with tf.name_scope(scope, 'ChangeCoordinateFrame'): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + boxlist_new = scale(box_list.BoxList( + boxlist.get() - [window[0], window[1], window[0], window[1]]), + 1.0 / win_height, 1.0 / win_width) + boxlist_new = _copy_extra_fields(boxlist_new, boxlist) + return boxlist_new + + +def sq_dist(boxlist1, boxlist2, scope=None): + """Computes the pairwise squared distances between box corners. + + This op treats each box as if it were a point in a 4d Euclidean space and + computes pairwise squared distances. + + Mathematically, we are given two matrices of box coordinates X and Y, + where X(i,:) is the i'th row of X, containing the 4 numbers defining the + corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to + boxlist2. We compute + Z(i,j) = ||X(i,:) - Y(j,:)||^2 + = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:), + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise distances + """ + with tf.name_scope(scope, 'SqDist'): + sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True) + sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True) + innerprod = tf.matmul(boxlist1.get(), boxlist2.get(), + transpose_a=False, transpose_b=True) + return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod + + +def boolean_mask(boxlist, indicator, fields=None, scope=None, + use_static_shapes=False, indicator_sum=None): + """Select boxes from BoxList according to indicator and return new BoxList. + + `boolean_mask` returns the subset of boxes that are marked as "True" by the + indicator tensor. By default, `boolean_mask` returns boxes corresponding to + the input index list, as well as all additional fields stored in the boxlist + (indexing into the first dimension). However one can optionally only draw + from a subset of fields. + + Args: + boxlist: BoxList holding N boxes + indicator: a rank-1 boolean tensor + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + scope: name scope. + use_static_shapes: Whether to use an implementation with static shape + gurantees. + indicator_sum: An integer containing the sum of `indicator` vector. Only + required if `use_static_shape` is True. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indicator + Raises: + ValueError: if `indicator` is not a rank-1 boolean tensor. + """ + with tf.name_scope(scope, 'BooleanMask'): + if indicator.shape.ndims != 1: + raise ValueError('indicator should have rank 1') + if indicator.dtype != tf.bool: + raise ValueError('indicator should be a boolean tensor') + if use_static_shapes: + if not (indicator_sum and isinstance(indicator_sum, int)): + raise ValueError('`indicator_sum` must be a of type int') + selected_positions = tf.cast(indicator, dtype=tf.float32) + indexed_positions = tf.cast( + tf.multiply( + tf.cumsum(selected_positions), selected_positions), + dtype=tf.int32) + one_hot_selector = tf.one_hot( + indexed_positions - 1, indicator_sum, dtype=tf.float32) + sampled_indices = tf.cast( + tf.tensordot( + tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32), + one_hot_selector, + axes=[0, 0]), + dtype=tf.int32) + return gather(boxlist, sampled_indices, use_static_shapes=True) + else: + subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator)) + if fields is None: + fields = boxlist.get_extra_fields() + for field in fields: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all specified fields') + subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator) + subboxlist.add_field(field, subfieldlist) + return subboxlist + + +def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False): + """Gather boxes from BoxList according to indices and return new BoxList. + + By default, `gather` returns boxes corresponding to the input index list, as + well as all additional fields stored in the boxlist (indexing into the + first dimension). However one can optionally only gather from a + subset of fields. + + Args: + boxlist: BoxList holding N boxes + indices: a rank-1 tensor of type int32 / int64 + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + scope: name scope. + use_static_shapes: Whether to use an implementation with static shape + gurantees. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indices + Raises: + ValueError: if specified field is not contained in boxlist or if the + indices are not of type int32 + """ + with tf.name_scope(scope, 'Gather'): + if len(indices.shape.as_list()) != 1: + raise ValueError('indices should have rank 1') + if indices.dtype != tf.int32 and indices.dtype != tf.int64: + raise ValueError('indices should be an int32 / int64 tensor') + gather_op = tf.gather + if use_static_shapes: + gather_op = ops.matmul_gather_on_zeroth_axis + subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices)) + if fields is None: + fields = boxlist.get_extra_fields() + fields += ['boxes'] + for field in fields: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all specified fields') + subfieldlist = gather_op(boxlist.get_field(field), indices) + subboxlist.add_field(field, subfieldlist) + return subboxlist + + +def concatenate(boxlists, fields=None, scope=None): + """Concatenate list of BoxLists. + + This op concatenates a list of input BoxLists into a larger BoxList. It also + handles concatenation of BoxList fields as long as the field tensor shapes + are equal except for the first dimension. + + Args: + boxlists: list of BoxList objects + fields: optional list of fields to also concatenate. By default, all + fields from the first BoxList in the list are included in the + concatenation. + scope: name scope. + + Returns: + a BoxList with number of boxes equal to + sum([boxlist.num_boxes() for boxlist in BoxList]) + Raises: + ValueError: if boxlists is invalid (i.e., is not a list, is empty, or + contains non BoxList objects), or if requested fields are not contained in + all boxlists + """ + with tf.name_scope(scope, 'Concatenate'): + if not isinstance(boxlists, list): + raise ValueError('boxlists should be a list') + if not boxlists: + raise ValueError('boxlists should have nonzero length') + for boxlist in boxlists: + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('all elements of boxlists should be BoxList objects') + concatenated = box_list.BoxList( + tf.concat([boxlist.get() for boxlist in boxlists], 0)) + if fields is None: + fields = boxlists[0].get_extra_fields() + for field in fields: + first_field_shape = boxlists[0].get_field(field).get_shape().as_list() + first_field_shape[0] = -1 + if None in first_field_shape: + raise ValueError('field %s must have fully defined shape except for the' + ' 0th dimension.' % field) + for boxlist in boxlists: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all requested fields') + field_shape = boxlist.get_field(field).get_shape().as_list() + field_shape[0] = -1 + if field_shape != first_field_shape: + raise ValueError('field %s must have same shape for all boxlists ' + 'except for the 0th dimension.' % field) + concatenated_field = tf.concat( + [boxlist.get_field(field) for boxlist in boxlists], 0) + concatenated.add_field(field, concatenated_field) + return concatenated + + +def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None): + """Sort boxes and associated fields according to a scalar field. + + A common use case is reordering the boxes according to descending scores. + + Args: + boxlist: BoxList holding N boxes. + field: A BoxList field for sorting and reordering the BoxList. + order: (Optional) descend or ascend. Default is descend. + scope: name scope. + + Returns: + sorted_boxlist: A sorted BoxList with the field in the specified order. + + Raises: + ValueError: if specified field does not exist + ValueError: if the order is not either descend or ascend + """ + with tf.name_scope(scope, 'SortByField'): + if order != SortOrder.descend and order != SortOrder.ascend: + raise ValueError('Invalid sort order') + + field_to_sort = boxlist.get_field(field) + if len(field_to_sort.shape.as_list()) != 1: + raise ValueError('Field should have rank 1') + + num_boxes = boxlist.num_boxes() + num_entries = tf.size(field_to_sort) + length_assert = tf.Assert( + tf.equal(num_boxes, num_entries), + ['Incorrect field size: actual vs expected.', num_entries, num_boxes]) + + with tf.control_dependencies([length_assert]): + _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True) + + if order == SortOrder.ascend: + sorted_indices = tf.reverse_v2(sorted_indices, [0]) + + return gather(boxlist, sorted_indices) + + +def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None): + """Overlay bounding box list on image. + + Currently this visualization plots a 1 pixel thick red bounding box on top + of the image. Note that tf.image.draw_bounding_boxes essentially is + 1 indexed. + + Args: + image: an image tensor with shape [height, width, 3] + boxlist: a BoxList + normalized: (boolean) specify whether corners are to be interpreted + as absolute coordinates in image space or normalized with respect to the + image size. + scope: name scope. + + Returns: + image_and_boxes: an image tensor with shape [height, width, 3] + """ + with tf.name_scope(scope, 'VisualizeBoxesInImage'): + if not normalized: + height, width, _ = tf.unstack(tf.shape(image)) + boxlist = scale(boxlist, + 1.0 / tf.cast(height, tf.float32), + 1.0 / tf.cast(width, tf.float32)) + corners = tf.expand_dims(boxlist.get(), 0) + image = tf.expand_dims(image, 0) + return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0]) + + +def filter_field_value_equals(boxlist, field, value, scope=None): + """Filter to keep only boxes with field entries equal to the given value. + + Args: + boxlist: BoxList holding N boxes. + field: field name for filtering. + value: scalar value. + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not have + the specified field. + """ + with tf.name_scope(scope, 'FilterFieldValueEquals'): + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field(field): + raise ValueError('boxlist must contain the specified field') + filter_field = boxlist.get_field(field) + gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1]) + return gather(boxlist, gather_index) + + +def filter_greater_than(boxlist, thresh, scope=None): + """Filter to keep only boxes with score exceeding a given threshold. + + This op keeps the collection of boxes whose corresponding scores are + greater than the input threshold. + + TODO(jonathanhuang): Change function name to filter_scores_greater_than + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not + have a scores field + """ + with tf.name_scope(scope, 'FilterGreaterThan'): + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + scores = boxlist.get_field('scores') + if len(scores.shape.as_list()) > 2: + raise ValueError('Scores should have rank 1 or 2') + if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1: + raise ValueError('Scores should have rank 1 or have shape ' + 'consistent with [None, 1]') + high_score_indices = tf.cast(tf.reshape( + tf.where(tf.greater(scores, thresh)), + [-1]), tf.int32) + return gather(boxlist, high_score_indices) + + +def non_max_suppression(boxlist, thresh, max_output_size, scope=None): + """Non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. Note that this only works for a single class --- + to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression. + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + max_output_size: maximum number of retained boxes + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= max_output_size + Raises: + ValueError: if thresh is not in [0, 1] + """ + with tf.name_scope(scope, 'NonMaxSuppression'): + if not 0 <= thresh <= 1.0: + raise ValueError('thresh must be between 0 and 1') + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + selected_indices = tf.image.non_max_suppression( + boxlist.get(), boxlist.get_field('scores'), + max_output_size, iou_threshold=thresh) + return gather(boxlist, selected_indices) + + +def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): + """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. + + Args: + boxlist_to_copy_to: BoxList to which extra fields are copied. + boxlist_to_copy_from: BoxList from which fields are copied. + + Returns: + boxlist_to_copy_to with extra fields. + """ + for field in boxlist_to_copy_from.get_extra_fields(): + boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) + return boxlist_to_copy_to + + +def to_normalized_coordinates(boxlist, height, width, + check_range=True, scope=None): + """Converts absolute box coordinates to normalized coordinates in [0, 1]. + + Usually one uses the dynamic shape of the image or conv-layer tensor: + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(images)[1], + tf.shape(images)[2]), + + This function raises an assertion failed error at graph execution time when + the maximum coordinate is smaller than 1.01 (which means that coordinates are + already normalized). The value 1.01 is to deal with small rounding errors. + + Args: + boxlist: BoxList with coordinates in terms of pixel-locations. + height: Maximum value for height of absolute box coordinates. + width: Maximum value for width of absolute box coordinates. + check_range: If True, checks if the coordinates are normalized or not. + scope: name scope. + + Returns: + boxlist with normalized coordinates in [0, 1]. + """ + with tf.name_scope(scope, 'ToNormalizedCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(boxlist.get()) + max_assert = tf.Assert(tf.greater(max_val, 1.01), + ['max value is lower than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(boxlist, 1 / height, 1 / width) + + +def to_absolute_coordinates(boxlist, + height, + width, + check_range=True, + maximum_normalized_coordinate=1.1, + scope=None): + """Converts normalized box coordinates to absolute pixel coordinates. + + This function raises an assertion failed error when the maximum box coordinate + value is larger than maximum_normalized_coordinate (in which case coordinates + are already absolute). + + Args: + boxlist: BoxList with coordinates in range [0, 1]. + height: Maximum value for height of absolute box coordinates. + width: Maximum value for width of absolute box coordinates. + check_range: If True, checks if the coordinates are normalized or not. + maximum_normalized_coordinate: Maximum coordinate value to be considered + as normalized, default to 1.1. + scope: name scope. + + Returns: + boxlist with absolute coordinates in terms of the image size. + + """ + with tf.name_scope(scope, 'ToAbsoluteCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + # Ensure range of input boxes is correct. + if check_range: + box_maximum = tf.reduce_max(boxlist.get()) + max_assert = tf.Assert( + tf.greater_equal(maximum_normalized_coordinate, box_maximum), + ['maximum box coordinate value is larger ' + 'than %f: ' % maximum_normalized_coordinate, box_maximum]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(boxlist, height, width) + + +def refine_boxes_multi_class(pool_boxes, + num_classes, + nms_iou_thresh, + nms_max_detections, + voting_iou_thresh=0.5): + """Refines a pool of boxes using non max suppression and box voting. + + Box refinement is done independently for each class. + + Args: + pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must + have a rank 1 'scores' field and a rank 1 'classes' field. + num_classes: (int scalar) Number of classes. + nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). + nms_max_detections: (int scalar) maximum output size for NMS. + voting_iou_thresh: (float scalar) iou threshold for box voting. + + Returns: + BoxList of refined boxes. + + Raises: + ValueError: if + a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. + b) pool_boxes is not a BoxList. + c) pool_boxes does not have a scores and classes field. + """ + if not 0.0 <= nms_iou_thresh <= 1.0: + raise ValueError('nms_iou_thresh must be between 0 and 1') + if not 0.0 <= voting_iou_thresh <= 1.0: + raise ValueError('voting_iou_thresh must be between 0 and 1') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + if not pool_boxes.has_field('classes'): + raise ValueError('pool_boxes must have a \'classes\' field') + + refined_boxes = [] + for i in range(num_classes): + boxes_class = filter_field_value_equals(pool_boxes, 'classes', i) + refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh, + nms_max_detections, voting_iou_thresh) + refined_boxes.append(refined_boxes_class) + return sort_by_field(concatenate(refined_boxes), 'scores') + + +def refine_boxes(pool_boxes, + nms_iou_thresh, + nms_max_detections, + voting_iou_thresh=0.5): + """Refines a pool of boxes using non max suppression and box voting. + + Args: + pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must + have a rank 1 'scores' field. + nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). + nms_max_detections: (int scalar) maximum output size for NMS. + voting_iou_thresh: (float scalar) iou threshold for box voting. + + Returns: + BoxList of refined boxes. + + Raises: + ValueError: if + a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. + b) pool_boxes is not a BoxList. + c) pool_boxes does not have a scores field. + """ + if not 0.0 <= nms_iou_thresh <= 1.0: + raise ValueError('nms_iou_thresh must be between 0 and 1') + if not 0.0 <= voting_iou_thresh <= 1.0: + raise ValueError('voting_iou_thresh must be between 0 and 1') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + + nms_boxes = non_max_suppression( + pool_boxes, nms_iou_thresh, nms_max_detections) + return box_voting(nms_boxes, pool_boxes, voting_iou_thresh) + + +def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5): + """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015. + + Performs box voting as described in 'Object detection via a multi-region & + semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For + each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes + with iou overlap >= iou_thresh. The location of B is set to the weighted + average location of boxes in S (scores are used for weighting). And the score + of B is set to the average score of boxes in S. + + Args: + selected_boxes: BoxList containing a subset of boxes in pool_boxes. These + boxes are usually selected from pool_boxes using non max suppression. + pool_boxes: BoxList containing a set of (possibly redundant) boxes. + iou_thresh: (float scalar) iou threshold for matching boxes in + selected_boxes and pool_boxes. + + Returns: + BoxList containing averaged locations and scores for each box in + selected_boxes. + + Raises: + ValueError: if + a) selected_boxes or pool_boxes is not a BoxList. + b) if iou_thresh is not in [0, 1]. + c) pool_boxes does not have a scores field. + """ + if not 0.0 <= iou_thresh <= 1.0: + raise ValueError('iou_thresh must be between 0 and 1') + if not isinstance(selected_boxes, box_list.BoxList): + raise ValueError('selected_boxes must be a BoxList') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + + iou_ = iou(selected_boxes, pool_boxes) + match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32) + num_matches = tf.reduce_sum(match_indicator, 1) + # TODO(kbanoop): Handle the case where some boxes in selected_boxes do not + # match to any boxes in pool_boxes. For such boxes without any matches, we + # should return the original boxes without voting. + match_assert = tf.Assert( + tf.reduce_all(tf.greater(num_matches, 0)), + ['Each box in selected_boxes must match with at least one box ' + 'in pool_boxes.']) + + scores = tf.expand_dims(pool_boxes.get_field('scores'), 1) + scores_assert = tf.Assert( + tf.reduce_all(tf.greater_equal(scores, 0)), + ['Scores must be non negative.']) + + with tf.control_dependencies([scores_assert, match_assert]): + sum_scores = tf.matmul(match_indicator, scores) + averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches + + box_locations = tf.matmul(match_indicator, + pool_boxes.get() * scores) / sum_scores + averaged_boxes = box_list.BoxList(box_locations) + _copy_extra_fields(averaged_boxes, selected_boxes) + averaged_boxes.add_field('scores', averaged_scores) + return averaged_boxes + + +def pad_or_clip_box_list(boxlist, num_boxes, scope=None): + """Pads or clips all fields of a BoxList. + + Args: + boxlist: A BoxList with arbitrary of number of boxes. + num_boxes: First num_boxes in boxlist are kept. + The fields are zero-padded if num_boxes is bigger than the + actual number of boxes. + scope: name scope. + + Returns: + BoxList with all fields padded or clipped. + """ + with tf.name_scope(scope, 'PadOrClipBoxList'): + subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor( + boxlist.get(), num_boxes)) + for field in boxlist.get_extra_fields(): + subfield = shape_utils.pad_or_clip_tensor( + boxlist.get_field(field), num_boxes) + subboxlist.add_field(field, subfield) + return subboxlist + + +def select_random_box(boxlist, + default_box=None, + seed=None, + scope=None): + """Selects a random bounding box from a `BoxList`. + + Args: + boxlist: A BoxList. + default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, + this default box will be returned. If None, will use a default box of + [[-1., -1., -1., -1.]]. + seed: Random seed. + scope: Name scope. + + Returns: + bbox: A [1, 4] tensor with a random bounding box. + valid: A bool tensor indicating whether a valid bounding box is returned + (True) or whether the default box is returned (False). + """ + with tf.name_scope(scope, 'SelectRandomBox'): + bboxes = boxlist.get() + combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes) + number_of_boxes = combined_shape[0] + default_box = default_box or tf.constant([[-1., -1., -1., -1.]]) + + def select_box(): + random_index = tf.random_uniform([], + maxval=number_of_boxes, + dtype=tf.int32, + seed=seed) + return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True) + + return tf.cond( + tf.greater_equal(number_of_boxes, 1), + true_fn=select_box, + false_fn=lambda: (default_box, tf.constant(False))) + + +def get_minimal_coverage_box(boxlist, + default_box=None, + scope=None): + """Creates a single bounding box which covers all boxes in the boxlist. + + Args: + boxlist: A Boxlist. + default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, + this default box will be returned. If None, will use a default box of + [[0., 0., 1., 1.]]. + scope: Name scope. + + Returns: + A [1, 4] float32 tensor with a bounding box that tightly covers all the + boxes in the box list. If the boxlist does not contain any boxes, the + default box is returned. + """ + with tf.name_scope(scope, 'CreateCoverageBox'): + num_boxes = boxlist.num_boxes() + + def coverage_box(bboxes): + y_min, x_min, y_max, x_max = tf.split( + value=bboxes, num_or_size_splits=4, axis=1) + y_min_coverage = tf.reduce_min(y_min, axis=0) + x_min_coverage = tf.reduce_min(x_min, axis=0) + y_max_coverage = tf.reduce_max(y_max, axis=0) + x_max_coverage = tf.reduce_max(x_max, axis=0) + return tf.stack( + [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage], + axis=1) + + default_box = default_box or tf.constant([[0., 0., 1., 1.]]) + return tf.cond( + tf.greater_equal(num_boxes, 1), + true_fn=lambda: coverage_box(boxlist.get()), + false_fn=lambda: default_box) + + +def sample_boxes_by_jittering(boxlist, + num_boxes_to_sample, + stddev=0.1, + scope=None): + """Samples num_boxes_to_sample boxes by jittering around boxlist boxes. + + It is possible that this function might generate boxes with size 0. The larger + the stddev, this is more probable. For a small stddev of 0.1 this probability + is very small. + + Args: + boxlist: A boxlist containing N boxes in normalized coordinates. + num_boxes_to_sample: A positive integer containing the number of boxes to + sample. + stddev: Standard deviation. This is used to draw random offsets for the + box corners from a normal distribution. The offset is multiplied by the + box size so will be larger in terms of pixels for larger boxes. + scope: Name scope. + + Returns: + sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in + normalized coordinates. + """ + with tf.name_scope(scope, 'SampleBoxesByJittering'): + num_boxes = boxlist.num_boxes() + box_indices = tf.random_uniform( + [num_boxes_to_sample], + minval=0, + maxval=num_boxes, + dtype=tf.int32) + sampled_boxes = tf.gather(boxlist.get(), box_indices) + sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0] + sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1] + rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0] + minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1] + maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2] + maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3] + maxy = tf.maximum(miny, maxy) + maxx = tf.maximum(minx, maxx) + sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1) + sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0) + return box_list.BoxList(sampled_boxes) diff --git a/models/research/object_detection/core/box_list_ops_test.py b/models/research/object_detection/core/box_list_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b572dff9e1cdd1b7ae59a63df3ae294d4b01a9a5 --- /dev/null +++ b/models/research/object_detection/core/box_list_ops_test.py @@ -0,0 +1,1079 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.box_list_ops.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.utils import test_case + + +class BoxListOpsTest(test_case.TestCase): + """Tests for common bounding box operations.""" + + def test_area(self): + def graph_fn(): + corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]]) + boxes = box_list.BoxList(corners) + areas = box_list_ops.area(boxes) + return areas + areas_out = self.execute(graph_fn, []) + exp_output = [200.0, 4.0] + self.assertAllClose(areas_out, exp_output) + + def test_height_width(self): + def graph_fn(): + corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]]) + boxes = box_list.BoxList(corners) + return box_list_ops.height_width(boxes) + heights_out, widths_out = self.execute(graph_fn, []) + exp_output_heights = [10., 2.] + exp_output_widths = [20., 2.] + self.assertAllClose(heights_out, exp_output_heights) + self.assertAllClose(widths_out, exp_output_widths) + + def test_scale(self): + def graph_fn(): + corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], + dtype=tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2]])) + + y_scale = tf.constant(1.0/100) + x_scale = tf.constant(1.0/200) + scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) + return scaled_boxes.get(), scaled_boxes.get_field('extra_data') + scaled_corners_out, extra_data_out = self.execute(graph_fn, []) + exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] + self.assertAllClose(scaled_corners_out, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2]]) + + def test_scale_height_width(self): + def graph_fn(): + corners = tf.constant([[-10, -20, 10, 20], [0, 100, 100, 200]], + dtype=tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2]])) + + y_scale = tf.constant(2.) + x_scale = tf.constant(0.5) + scaled_boxes = box_list_ops.scale_height_width(boxes, y_scale, x_scale) + return scaled_boxes.get(), scaled_boxes.get_field('extra_data') + exp_output = [ + [-20., -10, 20., 10], + [-50., 125, 150., 175.]] + scaled_corners_out, extra_data_out = self.execute(graph_fn, []) + self.assertAllClose(scaled_corners_out, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2]]) + + def test_clip_to_window_filter_boxes_which_fall_outside_the_window( + self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-100.0, -100.0, 300.0, 600.0], + [-10.0, -10.0, -9.0, -9.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned = box_list_ops.clip_to_window( + boxes, window, filter_nonoverlapping=True) + return pruned.get(), pruned.get_field('extra_data') + exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], + [0.0, 0.0, 9.0, 14.0]] + pruned_output, extra_data_out = self.execute_cpu(graph_fn, []) + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5]]) + + def test_clip_to_window_without_filtering_boxes_which_fall_outside_the_window( + self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-100.0, -100.0, 300.0, 600.0], + [-10.0, -10.0, -9.0, -9.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned = box_list_ops.clip_to_window( + boxes, window, filter_nonoverlapping=False) + return pruned.get(), pruned.get_field('extra_data') + pruned_output, extra_data_out = self.execute(graph_fn, []) + exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], + [0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 0.0, 0.0]] + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5], [6]]) + + def test_prune_outside_window_filters_boxes_which_fall_outside_the_window( + self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-10.0, -10.0, -9.0, -9.0], + [-100.0, -100.0, 300.0, 600.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned, keep_indices = box_list_ops.prune_outside_window(boxes, window) + return pruned.get(), pruned.get_field('extra_data'), keep_indices + pruned_output, extra_data_out, keep_indices_out = self.execute_cpu(graph_fn, + []) + exp_output = [[5.0, 5.0, 6.0, 6.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0]] + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(keep_indices_out, [0, 2, 3]) + self.assertAllEqual(extra_data_out, [[1], [3], [4]]) + + def test_prune_completely_outside_window(self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-10.0, -10.0, -9.0, -9.0], + [-100.0, -100.0, 300.0, 600.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned, keep_indices = box_list_ops.prune_completely_outside_window( + boxes, window) + return pruned.get(), pruned.get_field('extra_data'), keep_indices + pruned_output, extra_data_out, keep_indices_out = self.execute(graph_fn, []) + exp_output = [[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-100.0, -100.0, 300.0, 600.0]] + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(keep_indices_out, [0, 1, 2, 3, 5]) + self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [6]]) + + def test_prune_completely_outside_window_with_empty_boxlist(self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.zeros(shape=[0, 4], dtype=tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.zeros(shape=[0], dtype=tf.int32)) + pruned, keep_indices = box_list_ops.prune_completely_outside_window( + boxes, window) + pruned_boxes = pruned.get() + extra = pruned.get_field('extra_data') + return pruned_boxes, extra, keep_indices + + pruned_boxes_out, extra_out, keep_indices_out = self.execute(graph_fn, []) + exp_pruned_boxes = np.zeros(shape=[0, 4], dtype=np.float32) + exp_extra = np.zeros(shape=[0], dtype=np.int32) + self.assertAllClose(exp_pruned_boxes, pruned_boxes_out) + self.assertAllEqual([], keep_indices_out) + self.assertAllEqual(exp_extra, extra_out) + + def test_intersection(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + intersect = box_list_ops.intersection(boxes1, boxes2) + return intersect + exp_output = [[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]] + intersect_out = self.execute(graph_fn, []) + self.assertAllClose(intersect_out, exp_output) + + def test_matched_intersection(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + intersect = box_list_ops.matched_intersection(boxes1, boxes2) + return intersect + exp_output = [2.0, 0.0] + intersect_out = self.execute(graph_fn, []) + self.assertAllClose(intersect_out, exp_output) + + def test_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + iou = box_list_ops.iou(boxes1, boxes2) + return iou + exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_matched_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + iou = box_list_ops.matched_iou(boxes1, boxes2) + return iou + exp_output = [2.0 / 16.0, 0] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_iouworks_on_empty_inputs(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + boxes_empty = box_list.BoxList(tf.zeros((0, 4))) + iou_empty_1 = box_list_ops.iou(boxes1, boxes_empty) + iou_empty_2 = box_list_ops.iou(boxes_empty, boxes2) + iou_empty_3 = box_list_ops.iou(boxes_empty, boxes_empty) + return iou_empty_1, iou_empty_2, iou_empty_3 + iou_output_1, iou_output_2, iou_output_3 = self.execute(graph_fn, []) + self.assertAllEqual(iou_output_1.shape, (2, 0)) + self.assertAllEqual(iou_output_2.shape, (0, 3)) + self.assertAllEqual(iou_output_3.shape, (0, 0)) + + def test_ioa(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + ioa_1 = box_list_ops.ioa(boxes1, boxes2) + ioa_2 = box_list_ops.ioa(boxes2, boxes1) + return ioa_1, ioa_2 + exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], + [1.0 / 12.0, 0.0, 5.0 / 400.0]] + exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], + [0, 0], + [6.0 / 6.0, 5.0 / 5.0]] + ioa_output_1, ioa_output_2 = self.execute(graph_fn, []) + self.assertAllClose(ioa_output_1, exp_output_1) + self.assertAllClose(ioa_output_2, exp_output_2) + + def test_prune_non_overlapping_boxes(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + minoverlap = 0.5 + + exp_output_1 = boxes1 + exp_output_2 = box_list.BoxList(tf.constant(0.0, shape=[0, 4])) + output_1, keep_indices_1 = box_list_ops.prune_non_overlapping_boxes( + boxes1, boxes2, min_overlap=minoverlap) + output_2, keep_indices_2 = box_list_ops.prune_non_overlapping_boxes( + boxes2, boxes1, min_overlap=minoverlap) + return (output_1.get(), keep_indices_1, output_2.get(), keep_indices_2, + exp_output_1.get(), exp_output_2.get()) + + (output_1_, keep_indices_1_, output_2_, keep_indices_2_, exp_output_1_, + exp_output_2_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(output_1_, exp_output_1_) + self.assertAllClose(output_2_, exp_output_2_) + self.assertAllEqual(keep_indices_1_, [0, 1]) + self.assertAllEqual(keep_indices_2_, []) + + def test_prune_small_boxes(self): + def graph_fn(): + boxes = tf.constant([[4.0, 3.0, 7.0, 5.0], + [5.0, 6.0, 10.0, 7.0], + [3.0, 4.0, 6.0, 8.0], + [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes = box_list.BoxList(boxes) + pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3) + return pruned_boxes.get() + exp_boxes = [[3.0, 4.0, 6.0, 8.0], + [0.0, 0.0, 20.0, 20.0]] + pruned_boxes = self.execute(graph_fn, []) + self.assertAllEqual(pruned_boxes, exp_boxes) + + def test_prune_small_boxes_prunes_boxes_with_negative_side(self): + def graph_fn(): + boxes = tf.constant([[4.0, 3.0, 7.0, 5.0], + [5.0, 6.0, 10.0, 7.0], + [3.0, 4.0, 6.0, 8.0], + [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0], + [2.0, 3.0, 1.5, 7.0], # negative height + [2.0, 3.0, 5.0, 1.7]]) # negative width + boxes = box_list.BoxList(boxes) + pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3) + return pruned_boxes.get() + exp_boxes = [[3.0, 4.0, 6.0, 8.0], + [0.0, 0.0, 20.0, 20.0]] + pruned_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(pruned_boxes, exp_boxes) + + def test_change_coordinate_frame(self): + def graph_fn(): + corners = tf.constant([[0.25, 0.5, 0.75, 0.75], [0.5, 0.0, 1.0, 1.0]]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + boxes = box_list.BoxList(corners) + + expected_corners = tf.constant([[0, 0.5, 1.0, 1.0], + [0.5, -0.5, 1.5, 1.5]]) + expected_boxes = box_list.BoxList(expected_corners) + output = box_list_ops.change_coordinate_frame(boxes, window) + return output.get(), expected_boxes.get() + output_, expected_boxes_ = self.execute(graph_fn, []) + self.assertAllClose(output_, expected_boxes_) + + def test_ioaworks_on_empty_inputs(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + boxes_empty = box_list.BoxList(tf.zeros((0, 4))) + ioa_empty_1 = box_list_ops.ioa(boxes1, boxes_empty) + ioa_empty_2 = box_list_ops.ioa(boxes_empty, boxes2) + ioa_empty_3 = box_list_ops.ioa(boxes_empty, boxes_empty) + return ioa_empty_1, ioa_empty_2, ioa_empty_3 + ioa_output_1, ioa_output_2, ioa_output_3 = self.execute(graph_fn, []) + self.assertAllEqual(ioa_output_1.shape, (2, 0)) + self.assertAllEqual(ioa_output_2.shape, (0, 3)) + self.assertAllEqual(ioa_output_3.shape, (0, 0)) + + def test_pairwise_distances(self): + def graph_fn(): + corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 2.0]]) + corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], + [-4.0, 0.0, 0.0, 3.0], + [0.0, 0.0, 0.0, 0.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + dist_matrix = box_list_ops.sq_dist(boxes1, boxes2) + return dist_matrix + exp_output = [[26, 25, 0], [18, 27, 6]] + dist_output = self.execute(graph_fn, []) + self.assertAllClose(dist_output, exp_output) + + def test_boolean_mask(self): + def graph_fn(): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + indicator = tf.constant([True, False, True, False, True], tf.bool) + boxes = box_list.BoxList(corners) + subset = box_list_ops.boolean_mask(boxes, indicator) + return subset.get() + expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + subset_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(subset_output, expected_subset) + + def test_static_boolean_mask_with_field(self): + + def graph_fn(corners, weights, indicator): + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + subset = box_list_ops.boolean_mask( + boxes, + indicator, ['weights'], + use_static_shapes=True, + indicator_sum=3) + return (subset.get_field('boxes'), subset.get_field('weights')) + + corners = np.array( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]], + dtype=np.float32) + indicator = np.array([True, False, True, False, True], dtype=np.bool) + weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32) + result_boxes, result_weights = self.execute_cpu( + graph_fn, [corners, weights, indicator]) + expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + expected_weights = [[.1], [.5], [.9]] + + self.assertAllClose(result_boxes, expected_boxes) + self.assertAllClose(result_weights, expected_weights) + + def test_gather(self): + def graph_fn(): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + indices = tf.constant([0, 2, 4], tf.int32) + boxes = box_list.BoxList(corners) + subset = box_list_ops.gather(boxes, indices) + return subset.get() + expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + subset_output = self.execute(graph_fn, []) + self.assertAllClose(subset_output, expected_subset) + + def test_static_gather_with_field(self): + + def graph_fn(corners, weights, indices): + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + subset = box_list_ops.gather( + boxes, indices, ['weights'], use_static_shapes=True) + return (subset.get_field('boxes'), subset.get_field('weights')) + + corners = np.array([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], + 4 * [4.0]], dtype=np.float32) + weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32) + indices = np.array([0, 2, 4], dtype=np.int32) + + result_boxes, result_weights = self.execute(graph_fn, + [corners, weights, indices]) + expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + expected_weights = [[.1], [.5], [.9]] + self.assertAllClose(result_boxes, expected_boxes) + self.assertAllClose(result_weights, expected_weights) + + def test_gather_with_invalid_field(self): + corners = tf.constant([4 * [0.0], 4 * [1.0]]) + indices = tf.constant([0, 1], tf.int32) + weights = tf.constant([[.1], [.3]], tf.float32) + + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + with self.assertRaises(ValueError): + box_list_ops.gather(boxes, indices, ['foo', 'bar']) + + def test_gather_with_invalid_inputs(self): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + indices_float32 = tf.constant([0, 2, 4], tf.float32) + boxes = box_list.BoxList(corners) + with self.assertRaises(ValueError): + _ = box_list_ops.gather(boxes, indices_float32) + indices_2d = tf.constant([[0, 2, 4]], tf.int32) + boxes = box_list.BoxList(corners) + with self.assertRaises(ValueError): + _ = box_list_ops.gather(boxes, indices_2d) + + def test_gather_with_dynamic_indexing(self): + def graph_fn(): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + weights = tf.constant([.5, .3, .7, .1, .9], tf.float32) + indices = tf.reshape(tf.where(tf.greater(weights, 0.4)), [-1]) + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + subset = box_list_ops.gather(boxes, indices, ['weights']) + return subset.get(), subset.get_field('weights') + expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + expected_weights = [.5, .7, .9] + subset_output, weights_output = self.execute(graph_fn, []) + self.assertAllClose(subset_output, expected_subset) + self.assertAllClose(weights_output, expected_weights) + + def test_sort_by_field_ascending_order(self): + exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], + [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] + exp_scores = [.95, .9, .75, .6, .5, .3] + exp_weights = [.2, .45, .6, .75, .8, .92] + + def graph_fn(): + shuffle = [2, 4, 0, 5, 1, 3] + corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant( + [exp_scores[i] for i in shuffle], tf.float32)) + boxes.add_field('weights', tf.constant( + [exp_weights[i] for i in shuffle], tf.float32)) + sort_by_weight = box_list_ops.sort_by_field( + boxes, + 'weights', + order=box_list_ops.SortOrder.ascend) + return [sort_by_weight.get(), sort_by_weight.get_field('scores'), + sort_by_weight.get_field('weights')] + corners_out, scores_out, weights_out = self.execute(graph_fn, []) + self.assertAllClose(corners_out, exp_corners) + self.assertAllClose(scores_out, exp_scores) + self.assertAllClose(weights_out, exp_weights) + + def test_sort_by_field_descending_order(self): + exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], + [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] + exp_scores = [.95, .9, .75, .6, .5, .3] + exp_weights = [.2, .45, .6, .75, .8, .92] + + def graph_fn(): + shuffle = [2, 4, 0, 5, 1, 3] + corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant( + [exp_scores[i] for i in shuffle], tf.float32)) + boxes.add_field('weights', tf.constant( + [exp_weights[i] for i in shuffle], tf.float32)) + sort_by_score = box_list_ops.sort_by_field(boxes, 'scores') + return (sort_by_score.get(), sort_by_score.get_field('scores'), + sort_by_score.get_field('weights')) + + corners_out, scores_out, weights_out = self.execute(graph_fn, []) + self.assertAllClose(corners_out, exp_corners) + self.assertAllClose(scores_out, exp_scores) + self.assertAllClose(weights_out, exp_weights) + + def test_sort_by_field_invalid_inputs(self): + corners = tf.constant([4 * [0.0], 4 * [0.5], 4 * [1.0], 4 * [2.0], 4 * + [3.0], 4 * [4.0]]) + misc = tf.constant([[.95, .9], [.5, .3]], tf.float32) + weights = tf.constant([[.1, .2]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('misc', misc) + boxes.add_field('weights', weights) + + with self.assertRaises(ValueError): + box_list_ops.sort_by_field(boxes, 'area') + + with self.assertRaises(ValueError): + box_list_ops.sort_by_field(boxes, 'misc') + + with self.assertRaises(ValueError): + box_list_ops.sort_by_field(boxes, 'weights') + + def test_visualize_boxes_in_image(self): + def graph_fn(): + image = tf.zeros((6, 4, 3)) + corners = tf.constant([[0, 0, 5, 3], + [0, 0, 3, 2]], tf.float32) + boxes = box_list.BoxList(corners) + image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes) + image_and_boxes_bw = tf.cast( + tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0), dtype=tf.float32) + return image_and_boxes_bw + exp_result = [[1, 1, 1, 0], + [1, 1, 1, 0], + [1, 1, 1, 0], + [1, 0, 1, 0], + [1, 1, 1, 0], + [0, 0, 0, 0]] + output = self.execute_cpu(graph_fn, []) + self.assertAllEqual(output.astype(int), exp_result) + + def test_filter_field_value_equals(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('classes', tf.constant([1, 2, 1, 2, 2, 1])) + filtered_boxes1 = box_list_ops.filter_field_value_equals( + boxes, 'classes', 1) + filtered_boxes2 = box_list_ops.filter_field_value_equals( + boxes, 'classes', 2) + return filtered_boxes1.get(), filtered_boxes2.get() + exp_output1 = [[0, 0, 1, 1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]] + exp_output2 = [[0, 0.1, 1, 1.1], [0, 10, 1, 11], [0, 10.1, 1, 11.1]] + filtered_output1, filtered_output2 = self.execute_cpu(graph_fn, []) + self.assertAllClose(filtered_output1, exp_output1) + self.assertAllClose(filtered_output2, exp_output2) + + def test_filter_greater_than(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.1, .75, .9, .5, .5, .8])) + thresh = .6 + filtered_boxes = box_list_ops.filter_greater_than(boxes, thresh) + return filtered_boxes.get() + exp_output = [[0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]] + filtered_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(filtered_output, exp_output) + + def test_clip_box_list(self): + def graph_fn(): + boxlist = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) + boxlist.add_field('classes', tf.constant([0, 0, 1, 1])) + boxlist.add_field('scores', tf.constant([0.75, 0.65, 0.3, 0.2])) + num_boxes = 2 + clipped_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes) + return (clipped_boxlist.get(), clipped_boxlist.get_field('classes'), + clipped_boxlist.get_field('scores')) + + expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]] + expected_classes = [0, 0] + expected_scores = [0.75, 0.65] + boxes_out, classes_out, scores_out = self.execute(graph_fn, []) + + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllEqual(expected_classes, classes_out) + self.assertAllClose(expected_scores, scores_out) + + def test_pad_box_list(self): + def graph_fn(): + boxlist = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) + boxlist.add_field('classes', tf.constant([0, 1])) + boxlist.add_field('scores', tf.constant([0.75, 0.2])) + num_boxes = 4 + padded_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes) + return (padded_boxlist.get(), padded_boxlist.get_field('classes'), + padded_boxlist.get_field('scores')) + expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0, 0, 0, 0], [0, 0, 0, 0]] + expected_classes = [0, 1, 0, 0] + expected_scores = [0.75, 0.2, 0, 0] + boxes_out, classes_out, scores_out = self.execute(graph_fn, []) + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllEqual(expected_classes, classes_out) + self.assertAllClose(expected_scores, scores_out) + + def test_select_random_box(self): + boxes = [[0., 0., 1., 1.], + [0., 1., 2., 3.], + [0., 2., 3., 4.]] + def graph_fn(): + corners = tf.constant(boxes, dtype=tf.float32) + boxlist = box_list.BoxList(corners) + random_bbox, valid = box_list_ops.select_random_box(boxlist) + return random_bbox, valid + random_bbox_out, valid_out = self.execute(graph_fn, []) + norm_small = any( + [np.linalg.norm(random_bbox_out - box) < 1e-6 for box in boxes]) + self.assertTrue(norm_small) + self.assertTrue(valid_out) + + def test_select_random_box_with_empty_boxlist(self): + def graph_fn(): + corners = tf.constant([], shape=[0, 4], dtype=tf.float32) + boxlist = box_list.BoxList(corners) + random_bbox, valid = box_list_ops.select_random_box(boxlist) + return random_bbox, valid + random_bbox_out, valid_out = self.execute_cpu(graph_fn, []) + expected_bbox_out = np.array([[-1., -1., -1., -1.]], dtype=np.float32) + self.assertAllEqual(expected_bbox_out, random_bbox_out) + self.assertFalse(valid_out) + + def test_get_minimal_coverage_box(self): + def graph_fn(): + boxes = [[0., 0., 1., 1.], + [-1., 1., 2., 3.], + [0., 2., 3., 4.]] + corners = tf.constant(boxes, dtype=tf.float32) + boxlist = box_list.BoxList(corners) + coverage_box = box_list_ops.get_minimal_coverage_box(boxlist) + return coverage_box + coverage_box_out = self.execute(graph_fn, []) + expected_coverage_box = [[-1., 0., 3., 4.]] + self.assertAllClose(expected_coverage_box, coverage_box_out) + + def test_get_minimal_coverage_box_with_empty_boxlist(self): + def graph_fn(): + corners = tf.constant([], shape=[0, 4], dtype=tf.float32) + boxlist = box_list.BoxList(corners) + coverage_box = box_list_ops.get_minimal_coverage_box(boxlist) + return coverage_box + coverage_box_out = self.execute(graph_fn, []) + self.assertAllClose([[0.0, 0.0, 1.0, 1.0]], coverage_box_out) + + +class ConcatenateTest(test_case.TestCase): + + def test_invalid_input_box_list_list(self): + with self.assertRaises(ValueError): + box_list_ops.concatenate(None) + with self.assertRaises(ValueError): + box_list_ops.concatenate([]) + with self.assertRaises(ValueError): + corners = tf.constant([[0, 0, 0, 0]], tf.float32) + boxlist = box_list.BoxList(corners) + box_list_ops.concatenate([boxlist, 2]) + + def test_concatenate_with_missing_fields(self): + corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) + scores1 = tf.constant([1.0, 2.1]) + corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32) + boxlist1 = box_list.BoxList(corners1) + boxlist1.add_field('scores', scores1) + boxlist2 = box_list.BoxList(corners2) + with self.assertRaises(ValueError): + box_list_ops.concatenate([boxlist1, boxlist2]) + + def test_concatenate_with_incompatible_field_shapes(self): + corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) + scores1 = tf.constant([1.0, 2.1]) + corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32) + scores2 = tf.constant([[1.0, 1.0], [2.1, 3.2]]) + boxlist1 = box_list.BoxList(corners1) + boxlist1.add_field('scores', scores1) + boxlist2 = box_list.BoxList(corners2) + boxlist2.add_field('scores', scores2) + with self.assertRaises(ValueError): + box_list_ops.concatenate([boxlist1, boxlist2]) + + def test_concatenate_is_correct(self): + def graph_fn(): + corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) + scores1 = tf.constant([1.0, 2.1]) + corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8], [1, 0, 5, 10]], + tf.float32) + scores2 = tf.constant([1.0, 2.1, 5.6]) + boxlist1 = box_list.BoxList(corners1) + boxlist1.add_field('scores', scores1) + boxlist2 = box_list.BoxList(corners2) + boxlist2.add_field('scores', scores2) + result = box_list_ops.concatenate([boxlist1, boxlist2]) + return result.get(), result.get_field('scores') + exp_corners = [[0, 0, 0, 0], + [1, 2, 3, 4], + [0, 3, 1, 6], + [2, 4, 3, 8], + [1, 0, 5, 10]] + exp_scores = [1.0, 2.1, 1.0, 2.1, 5.6] + corners_output, scores_output = self.execute(graph_fn, []) + self.assertAllClose(corners_output, exp_corners) + self.assertAllClose(scores_output, exp_scores) + + +class NonMaxSuppressionTest(test_case.TestCase): + + def test_select_from_three_clusters(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) + iou_thresh = .5 + max_output_size = 3 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 100, 1, 101]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_at_most_two_boxes_from_three_clusters(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) + iou_thresh = .5 + max_output_size = 2 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_at_most_thirty_boxes_from_three_clusters(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) + iou_thresh = .5 + max_output_size = 30 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 100, 1, 101]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_single_box(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9])) + iou_thresh = .5 + max_output_size = 3 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 0, 1, 1]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_from_ten_identical_boxes(self): + def graph_fn(): + corners = tf.constant(10 * [[0, 0, 1, 1]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant(10 * [.9])) + iou_thresh = .5 + max_output_size = 3 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 0, 1, 1]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_copy_extra_fields(self): + tensor1 = np.array([[1], [4]]) + tensor2 = np.array([[1, 1], [2, 2]]) + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1]], tf.float32) + boxes = box_list.BoxList(corners) + + boxes.add_field('tensor1', tf.constant(tensor1)) + boxes.add_field('tensor2', tf.constant(tensor2)) + new_boxes = box_list.BoxList(tf.constant([[0, 0, 10, 10], + [1, 3, 5, 5]], tf.float32)) + new_boxes = box_list_ops._copy_extra_fields(new_boxes, boxes) + return new_boxes.get_field('tensor1'), new_boxes.get_field('tensor2') + tensor1_out, tensor2_out = self.execute_cpu(graph_fn, []) + self.assertAllClose(tensor1, tensor1_out) + self.assertAllClose(tensor2, tensor2_out) + + +class CoordinatesConversionTest(test_case.TestCase): + + def test_to_normalized_coordinates(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 100, 100], + [25, 25, 75, 75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + normalized_boxlist = box_list_ops.to_normalized_coordinates( + boxlist, tf.shape(img)[1], tf.shape(img)[2]) + return normalized_boxlist.get() + expected_boxes = [[0, 0, 1, 1], + [0.25, 0.25, 0.75, 0.75]] + normalized_boxes = self.execute(graph_fn, []) + self.assertAllClose(normalized_boxes, expected_boxes) + + def test_to_normalized_coordinates_already_normalized(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 1, 1], + [0.25, 0.25, 0.75, 0.75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + normalized_boxlist = box_list_ops.to_normalized_coordinates( + boxlist, tf.shape(img)[1], tf.shape(img)[2]) + return normalized_boxlist.get() + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_to_absolute_coordinates(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 1, 1], + [0.25, 0.25, 0.75, 0.75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return absolute_boxlist.get() + expected_boxes = [[0, 0, 100, 100], + [25, 25, 75, 75]] + absolute_boxes = self.execute(graph_fn, []) + self.assertAllClose(absolute_boxes, expected_boxes) + + def test_to_absolute_coordinates_already_abolute(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 100, 100], + [25, 25, 75, 75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return absolute_boxlist.get() + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_convert_to_normalized_and_back(self): + coordinates = np.random.uniform(size=(100, 4)) + coordinates = np.round(np.sort(coordinates) * 200) + coordinates[:, 2:4] += 1 + coordinates[99, :] = [0, 0, 201, 201] + def graph_fn(): + img = tf.ones((128, 202, 202, 3)) + + boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return boxlist.get() + out = self.execute(graph_fn, []) + self.assertAllClose(out, coordinates) + + def test_convert_to_absolute_and_back(self): + coordinates = np.random.uniform(size=(100, 4)) + coordinates = np.sort(coordinates) + coordinates[99, :] = [0, 0, 1, 1] + def graph_fn(): + img = tf.ones((128, 202, 202, 3)) + boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) + boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return boxlist.get() + out = self.execute(graph_fn, []) + self.assertAllClose(out, coordinates) + + def test_to_absolute_coordinates_maximum_coordinate_check(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 1.2, 1.2], + [0.25, 0.25, 0.75, 0.75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + absolute_boxlist = box_list_ops.to_absolute_coordinates( + boxlist, + tf.shape(img)[1], + tf.shape(img)[2], + maximum_normalized_coordinate=1.1) + return absolute_boxlist.get() + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + +class BoxRefinementTest(test_case.TestCase): + + def test_box_voting(self): + def graph_fn(): + candidates = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.6, 0.6, 0.8, 0.8]], tf.float32)) + candidates.add_field('ExtraField', tf.constant([1, 2])) + pool = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8]], tf.float32)) + pool.add_field('scores', tf.constant([0.75, 0.25, 0.3])) + averaged_boxes = box_list_ops.box_voting(candidates, pool) + return (averaged_boxes.get(), averaged_boxes.get_field('scores'), + averaged_boxes.get_field('ExtraField')) + + expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]] + expected_scores = [0.5, 0.3] + boxes_out, scores_out, extra_field_out = self.execute(graph_fn, []) + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllClose(expected_scores, scores_out) + self.assertAllEqual(extra_field_out, [1, 2]) + + def test_box_voting_fails_with_negative_scores(self): + def graph_fn(): + candidates = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) + pool = box_list.BoxList(tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) + pool.add_field('scores', tf.constant([-0.2])) + averaged_boxes = box_list_ops.box_voting(candidates, pool) + return averaged_boxes.get() + + with self.assertRaisesOpError('Scores must be non negative'): + self.execute_cpu(graph_fn, []) + + def test_box_voting_fails_when_unmatched(self): + def graph_fn(): + candidates = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) + pool = box_list.BoxList(tf.constant([[0.6, 0.6, 0.8, 0.8]], tf.float32)) + pool.add_field('scores', tf.constant([0.2])) + averaged_boxes = box_list_ops.box_voting(candidates, pool) + return averaged_boxes.get() + with self.assertRaisesOpError('Each box in selected_boxes must match ' + 'with at least one box in pool_boxes.'): + self.execute_cpu(graph_fn, []) + + def test_refine_boxes(self): + def graph_fn(): + pool = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8]], tf.float32)) + pool.add_field('ExtraField', tf.constant([1, 2, 3])) + pool.add_field('scores', tf.constant([0.75, 0.25, 0.3])) + averaged_boxes = box_list_ops.refine_boxes(pool, 0.5, 10) + return (averaged_boxes.get(), averaged_boxes.get_field('scores'), + averaged_boxes.get_field('ExtraField')) + boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, []) + expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]] + expected_scores = [0.5, 0.3] + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllClose(expected_scores, scores_out) + self.assertAllEqual(extra_field_out, [1, 3]) + + def test_refine_boxes_multi_class(self): + def graph_fn(): + pool = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) + pool.add_field('classes', tf.constant([0, 0, 1, 1])) + pool.add_field('scores', tf.constant([0.75, 0.25, 0.3, 0.2])) + averaged_boxes = box_list_ops.refine_boxes_multi_class(pool, 3, 0.5, 10) + return (averaged_boxes.get(), averaged_boxes.get_field('scores'), + averaged_boxes.get_field('classes')) + boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, []) + expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8], + [0.2, 0.2, 0.3, 0.3]] + expected_scores = [0.5, 0.3, 0.2] + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllClose(expected_scores, scores_out) + self.assertAllEqual(extra_field_out, [0, 1, 1]) + + def test_sample_boxes_by_jittering(self): + def graph_fn(): + boxes = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], + [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8], + [0.2, 0.2, 0.3, 0.3]], tf.float32)) + sampled_boxes = box_list_ops.sample_boxes_by_jittering( + boxlist=boxes, num_boxes_to_sample=10) + iou = box_list_ops.iou(boxes, sampled_boxes) + iou_max = tf.reduce_max(iou, axis=0) + return sampled_boxes.get(), iou_max + np_sampled_boxes, np_iou_max = self.execute(graph_fn, []) + self.assertAllEqual(np_sampled_boxes.shape, [10, 4]) + self.assertAllGreater(np_iou_max, 0.3) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/box_list_test.py b/models/research/object_detection/core/box_list_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c1389dbf8ae51f82ee28780d59ca599b1eff0d3e --- /dev/null +++ b/models/research/object_detection/core/box_list_test.py @@ -0,0 +1,121 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.box_list.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.utils import test_case + + +class BoxListTest(test_case.TestCase): + """Tests for BoxList class.""" + + def test_num_boxes(self): + def graph_fn(): + data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) + boxes = box_list.BoxList(data) + return boxes.num_boxes() + num_boxes_out = self.execute(graph_fn, []) + self.assertEqual(num_boxes_out, 3) + + def test_get_correct_center_coordinates_and_sizes(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + def graph_fn(boxes): + boxes = box_list.BoxList(boxes) + centers_sizes = boxes.get_center_coordinates_and_sizes() + return centers_sizes + centers_sizes_out = self.execute(graph_fn, [boxes]) + expected_centers_sizes = [[15, 0.35], [12.5, 0.25], [10, 0.3], [5, 0.3]] + self.assertAllClose(centers_sizes_out, expected_centers_sizes) + + def test_create_box_list_with_dynamic_shape(self): + def graph_fn(): + data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) + indices = tf.reshape(tf.where(tf.greater([1, 0, 1], 0)), [-1]) + data = tf.gather(data, indices) + assert data.get_shape().as_list() == [None, 4] + boxes = box_list.BoxList(data) + return boxes.num_boxes() + num_boxes = self.execute(graph_fn, []) + self.assertEqual(num_boxes, 2) + + def test_transpose_coordinates(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + def graph_fn(boxes): + boxes = box_list.BoxList(boxes) + boxes.transpose_coordinates() + return boxes.get() + transpoded_boxes = self.execute(graph_fn, [boxes]) + expected_corners = [[10.0, 10.0, 15.0, 20.0], [0.1, 0.2, 0.4, 0.5]] + self.assertAllClose(transpoded_boxes, expected_corners) + + def test_box_list_invalid_inputs(self): + data0 = tf.constant([[[0, 0, 1, 1], [3, 4, 5, 5]]], tf.float32) + data1 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.float32) + data2 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.int32) + + with self.assertRaises(ValueError): + _ = box_list.BoxList(data0) + with self.assertRaises(ValueError): + _ = box_list.BoxList(data1) + with self.assertRaises(ValueError): + _ = box_list.BoxList(data2) + + def test_num_boxes_static(self): + box_corners = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] + boxes = box_list.BoxList(tf.constant(box_corners)) + self.assertEqual(boxes.num_boxes_static(), 2) + self.assertEqual(type(boxes.num_boxes_static()), int) + + def test_as_tensor_dict(self): + boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], + tf.float32) + boxlist = box_list.BoxList(boxes) + classes = tf.constant([0, 1]) + boxlist.add_field('classes', classes) + scores = tf.constant([0.75, 0.2]) + boxlist.add_field('scores', scores) + tensor_dict = boxlist.as_tensor_dict() + + self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes, + 'boxes': boxes}) + + def test_as_tensor_dict_with_features(self): + boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], + tf.float32) + boxlist = box_list.BoxList(boxes) + classes = tf.constant([0, 1]) + boxlist.add_field('classes', classes) + scores = tf.constant([0.75, 0.2]) + boxlist.add_field('scores', scores) + tensor_dict = boxlist.as_tensor_dict(['scores', 'classes']) + + self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes}) + + def test_as_tensor_dict_missing_field(self): + boxlist = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) + boxlist.add_field('classes', tf.constant([0, 1])) + boxlist.add_field('scores', tf.constant([0.75, 0.2])) + with self.assertRaises(ValueError): + boxlist.as_tensor_dict(['foo', 'bar']) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/box_predictor.py b/models/research/object_detection/core/box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..342bca832d3bfc9c19c0240a441039d23ef16b37 --- /dev/null +++ b/models/research/object_detection/core/box_predictor.py @@ -0,0 +1,227 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Box predictor for object detectors. + +Box predictors are classes that take a high level +image feature map as input and produce two predictions, +(1) a tensor encoding box locations, and +(2) a tensor encoding classes for each box. + +These components are passed directly to loss functions +in our detection models. + +These modules are separated from the main model since the same +few box predictor architectures are shared across many models. +""" +from abc import abstractmethod +import tensorflow.compat.v1 as tf + +BOX_ENCODINGS = 'box_encodings' +CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' +MASK_PREDICTIONS = 'mask_predictions' + + +class BoxPredictor(object): + """BoxPredictor.""" + + def __init__(self, is_training, num_classes): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + """ + self._is_training = is_training + self._num_classes = num_classes + + @property + def is_keras_model(self): + return False + + @property + def num_classes(self): + return self._num_classes + + def predict(self, image_features, num_predictions_per_location, + scope=None, **params): + """Computes encoded object locations and corresponding confidences. + + Takes a list of high level image feature maps as input and produces a list + of box encodings and a list of class scores where each element in the output + lists correspond to the feature maps in the input list. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + scope: Variable and Op scope name. + **params: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + + Raises: + ValueError: If length of `image_features` is not equal to length of + `num_predictions_per_location`. + """ + if len(image_features) != len(num_predictions_per_location): + raise ValueError('image_feature and num_predictions_per_location must ' + 'be of same length, found: {} vs {}'. + format(len(image_features), + len(num_predictions_per_location))) + if scope is not None: + with tf.variable_scope(scope): + return self._predict(image_features, num_predictions_per_location, + **params) + return self._predict(image_features, num_predictions_per_location, + **params) + + # TODO(rathodv): num_predictions_per_location could be moved to constructor. + # This is currently only used by ConvolutionalBoxPredictor. + @abstractmethod + def _predict(self, image_features, num_predictions_per_location, **params): + """Implementations must override this method. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + **params: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + pass + + +class KerasBoxPredictor(tf.keras.Model): + """Keras-based BoxPredictor.""" + + def __init__(self, is_training, num_classes, freeze_batchnorm, + inplace_batchnorm_update, name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + """ + super(KerasBoxPredictor, self).__init__(name=name) + + self._is_training = is_training + self._num_classes = num_classes + self._freeze_batchnorm = freeze_batchnorm + self._inplace_batchnorm_update = inplace_batchnorm_update + + @property + def is_keras_model(self): + return True + + @property + def num_classes(self): + return self._num_classes + + def call(self, image_features, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Takes a list of high level image feature maps as input and produces a list + of box encodings and a list of class scores where each element in the output + lists correspond to the feature maps in the input list. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + return self._predict(image_features, **kwargs) + + @abstractmethod + def _predict(self, image_features, **kwargs): + """Implementations must override this method. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + raise NotImplementedError diff --git a/models/research/object_detection/core/class_agnostic_nms_test.py b/models/research/object_detection/core/class_agnostic_nms_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ed205c51d3651a473facae987aa02451dac4135a --- /dev/null +++ b/models/research/object_detection/core/class_agnostic_nms_test.py @@ -0,0 +1,144 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for google3.third_party.tensorflow_models.object_detection.core.class_agnostic_nms.""" +from absl.testing import parameterized +import tensorflow.compat.v1 as tf +from object_detection.core import post_processing +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class ClassAgnosticNonMaxSuppressionTest(test_case.TestCase, + parameterized.TestCase): + + def test_class_agnostic_nms_select_with_shared_boxes(self): + def graph_fn(): + boxes = tf.constant( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], tf.float32) + scores = tf.constant([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]) + score_thresh = 0.1 + iou_thresh = .5 + max_classes_per_detection = 1 + max_output_size = 4 + nms, _ = post_processing.class_agnostic_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, max_classes_per_detection, + max_output_size) + return (nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes)) + + exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_corners_output, exp_nms_corners) + self.assertAllClose(nms_scores_output, exp_nms_scores) + self.assertAllClose(nms_classes_output, exp_nms_classes) + + + def test_class_agnostic_nms_select_with_per_class_boxes(self): + def graph_fn(): + boxes = tf.constant( + [[[4, 5, 9, 10], [0, 0, 1, 1]], + [[0, 0.1, 1, 1.1], [4, 5, 9, 10]], + [[0, -0.1, 1, 0.9], [4, 5, 9, 10]], + [[0, 10, 1, 11], [4, 5, 9, 10]], + [[0, 10.1, 1, 11.1], [4, 5, 9, 10]], + [[0, 100, 1, 101], [4, 5, 9, 10]], + [[4, 5, 9, 10], [0, 1000, 1, 1002]], + [[4, 5, 9, 10], [0, 1000, 1, 1002.1]]], tf.float32) + scores = tf.constant([[.01, 0.9], + [.75, 0.05], + [.6, 0.01], + [.95, 0], + [.5, 0.01], + [.3, 0.01], + [.01, .85], + [.01, .5]]) + score_thresh = 0.1 + iou_thresh = .5 + max_classes_per_detection = 1 + max_output_size = 4 + nms, _ = post_processing.class_agnostic_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, max_classes_per_detection, + max_output_size) + return (nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes)) + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, []) + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 1, 1, 0] + self.assertAllClose(nms_corners_output, exp_nms_corners) + self.assertAllClose(nms_scores_output, exp_nms_scores) + self.assertAllClose(nms_classes_output, exp_nms_classes) + + # Two cases will be tested here: using / not using static shapes. + # Named the two test cases for easier control during testing, with a flag of + # '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1' + # or + # '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1_use_static_shapes'. + @parameterized.named_parameters(('', False), ('_use_static_shapes', True)) + def test_batch_classagnostic_nms_with_batch_size_1(self, + use_static_shapes=False): + def graph_fn(): + boxes = tf.constant( + [[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]]], tf.float32) + scores = tf.constant([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]]) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + max_classes_per_detection = 1 + use_class_agnostic_nms = True + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, + num_detections) = post_processing.batch_multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size, + use_class_agnostic_nms=use_class_agnostic_nms, + use_static_shapes=use_static_shapes, + max_classes_per_detection=max_classes_per_detection) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + exp_nms_corners = [[[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], + [0, 100, 1, 101]]] + exp_nms_scores = [[.95, .9, .85, .3]] + exp_nms_classes = [[0, 0, 1, 0]] + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, []) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertEqual(num_detections, [4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/data_decoder.py b/models/research/object_detection/core/data_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..87ddf72c1b04eba7a78f8584a38dc9f859cf8dfa --- /dev/null +++ b/models/research/object_detection/core/data_decoder.py @@ -0,0 +1,44 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Interface for data decoders. + +Data decoders decode the input data and return a dictionary of tensors keyed by +the entries in core.reader.Fields. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from abc import ABCMeta +from abc import abstractmethod +import six + + +class DataDecoder(six.with_metaclass(ABCMeta, object)): + """Interface for data decoders.""" + + @abstractmethod + def decode(self, data): + """Return a single image and associated labels. + + Args: + data: a string tensor holding a serialized protocol buffer corresponding + to data for a single image. + + Returns: + tensor_dict: a dictionary containing tensors. Possible keys are defined in + reader.Fields. + """ + pass diff --git a/models/research/object_detection/core/data_parser.py b/models/research/object_detection/core/data_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..889545db78fbc8adffaa2f082e4301cc15d7698f --- /dev/null +++ b/models/research/object_detection/core/data_parser.py @@ -0,0 +1,45 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Interface for data parsers. + +Data parser parses input data and returns a dictionary of numpy arrays +keyed by the entries in standard_fields.py. Since the parser parses records +to numpy arrays (materialized tensors) directly, it is used to read data for +evaluation/visualization; to parse the data during training, DataDecoder should +be used. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from abc import ABCMeta +from abc import abstractmethod +import six + + +class DataToNumpyParser(six.with_metaclass(ABCMeta, object)): + """Abstract interface for data parser that produces numpy arrays.""" + + @abstractmethod + def parse(self, input_data): + """Parses input and returns a numpy array or a dictionary of numpy arrays. + + Args: + input_data: an input data + + Returns: + A numpy array or a dictionary of numpy arrays or None, if input + cannot be parsed. + """ + pass diff --git a/models/research/object_detection/core/freezable_batch_norm.py b/models/research/object_detection/core/freezable_batch_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..7f08fa5df12163e8178f233dbb1d766fe27d8742 --- /dev/null +++ b/models/research/object_detection/core/freezable_batch_norm.py @@ -0,0 +1,68 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A freezable batch norm layer that uses Keras batch normalization.""" +import tensorflow.compat.v1 as tf + + +class FreezableBatchNorm(tf.keras.layers.BatchNormalization): + """Batch normalization layer (Ioffe and Szegedy, 2014). + + This is a `freezable` batch norm layer that supports setting the `training` + parameter in the __init__ method rather than having to set it either via + the Keras learning phase or via the `call` method parameter. This layer will + forward all other parameters to the default Keras `BatchNormalization` + layer + + This is class is necessary because Object Detection model training sometimes + requires batch normalization layers to be `frozen` and used as if it was + evaluation time, despite still training (and potentially using dropout layers) + + Like the default Keras BatchNormalization layer, this will normalize the + activations of the previous layer at each batch, + i.e. applies a transformation that maintains the mean activation + close to 0 and the activation standard deviation close to 1. + + Arguments: + training: If False, the layer will normalize using the moving average and + std. dev, without updating the learned avg and std. dev. + If None or True, the layer will follow the keras BatchNormalization layer + strategy of checking the Keras learning phase at `call` time to decide + what to do. + **kwargs: The keyword arguments to forward to the keras BatchNormalization + layer constructor. + + Input shape: + Arbitrary. Use the keyword argument `input_shape` + (tuple of integers, does not include the samples axis) + when using this layer as the first layer in a model. + + Output shape: + Same shape as input. + + References: + - [Batch Normalization: Accelerating Deep Network Training by Reducing + Internal Covariate Shift](https://arxiv.org/abs/1502.03167) + """ + + def __init__(self, training=None, **kwargs): + super(FreezableBatchNorm, self).__init__(**kwargs) + self._training = training + + def call(self, inputs, training=None): + # Override the call arg only if the batchnorm is frozen. (Ignore None) + if self._training is False: # pylint: disable=g-bool-id-comparison + training = self._training + return super(FreezableBatchNorm, self).call(inputs, training=training) diff --git a/models/research/object_detection/core/freezable_batch_norm_tf2_test.py b/models/research/object_detection/core/freezable_batch_norm_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc42ae3ef7da9b3412d2f461d7f9db62420e603 --- /dev/null +++ b/models/research/object_detection/core/freezable_batch_norm_tf2_test.py @@ -0,0 +1,198 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.freezable_batch_norm.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + + +from object_detection.core import freezable_batch_norm +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FreezableBatchNormTest(tf.test.TestCase): + """Tests for FreezableBatchNorm operations.""" + + def _build_model(self, training=None): + model = tf.keras.models.Sequential() + norm = freezable_batch_norm.FreezableBatchNorm(training=training, + input_shape=(10,), + momentum=0.8) + model.add(norm) + return model, norm + + def _copy_weights(self, source_weights, target_weights): + for source, target in zip(source_weights, target_weights): + target.assign(source) + + def _train_freezable_batch_norm(self, training_mean, training_var): + model, _ = self._build_model() + model.compile(loss='mse', optimizer='sgd') + + # centered on training_mean, variance training_var + train_data = np.random.normal( + loc=training_mean, + scale=training_var, + size=(1000, 10)) + model.fit(train_data, train_data, epochs=4, verbose=0) + return model.weights + + def _test_batchnorm_layer( + self, norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, training_mean, training_var): + out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32), + training=training_arg) + out = out_tensor + out -= norm.beta + out /= norm.gamma + + if not should_be_training: + out *= training_var + out += (training_mean - testing_mean) + out /= testing_var + + np.testing.assert_allclose(out.numpy().mean(), 0.0, atol=1.5e-1) + np.testing.assert_allclose(out.numpy().std(), 1.0, atol=1.5e-1) + + def test_batchnorm_freezing_training_none(self): + training_mean = 5.0 + training_var = 10.0 + + testing_mean = -10.0 + testing_var = 5.0 + + # Initially train the batch norm, and save the weights + trained_weights = self._train_freezable_batch_norm(training_mean, + training_var) + + # Load the batch norm weights, freezing training to True. + # Apply the batch norm layer to testing data and ensure it is normalized + # according to the batch statistics. + model, norm = self._build_model(training=True) + self._copy_weights(trained_weights, model.weights) + + # centered on testing_mean, variance testing_var + test_data = np.random.normal( + loc=testing_mean, + scale=testing_var, + size=(1000, 10)) + + # Test with training=True passed to the call method: + training_arg = True + should_be_training = True + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Reset the weights, because they may have been updating by + # running with training=True + self._copy_weights(trained_weights, model.weights) + + # Test with training=False passed to the call method: + training_arg = False + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Test the layer in various Keras learning phase scopes: + training_arg = None + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + tf.keras.backend.set_learning_phase(True) + should_be_training = True + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Reset the weights, because they may have been updating by + # running with training=True + self._copy_weights(trained_weights, model.weights) + + tf.keras.backend.set_learning_phase(False) + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + def test_batchnorm_freezing_training_false(self): + training_mean = 5.0 + training_var = 10.0 + + testing_mean = -10.0 + testing_var = 5.0 + + # Initially train the batch norm, and save the weights + trained_weights = self._train_freezable_batch_norm(training_mean, + training_var) + + # Load the batch norm back up, freezing training to False. + # Apply the batch norm layer to testing data and ensure it is normalized + # according to the training data's statistics. + model, norm = self._build_model(training=False) + self._copy_weights(trained_weights, model.weights) + + # centered on testing_mean, variance testing_var + test_data = np.random.normal( + loc=testing_mean, + scale=testing_var, + size=(1000, 10)) + + # Make sure that the layer is never training + # Test with training=True passed to the call method: + training_arg = True + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Test with training=False passed to the call method: + training_arg = False + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Test the layer in various Keras learning phase scopes: + training_arg = None + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + tf.keras.backend.set_learning_phase(True) + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + tf.keras.backend.set_learning_phase(False) + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/keypoint_ops.py b/models/research/object_detection/core/keypoint_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e321783d986b3c330300f347158c261a7e3f94a6 --- /dev/null +++ b/models/research/object_detection/core/keypoint_ops.py @@ -0,0 +1,366 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keypoint operations. + +Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2], +where the last dimension holds rank 2 tensors of the form [y, x] representing +the coordinates of the keypoint. +""" +import numpy as np +import tensorflow.compat.v1 as tf + + +def scale(keypoints, y_scale, x_scale, scope=None): + """Scales keypoint coordinates in x and y dimensions. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'Scale'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + new_keypoints = keypoints * [[[y_scale, x_scale]]] + return new_keypoints + + +def clip_to_window(keypoints, window, scope=None): + """Clips keypoints to a window. + + This op clips any input keypoints to a window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window to which the op should clip the keypoints. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'ClipToWindow'): + y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) + x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) + new_keypoints = tf.concat([y, x], 2) + return new_keypoints + + +def prune_outside_window(keypoints, window, scope=None): + """Prunes keypoints that fall outside a given window. + + This function replaces keypoints that fall outside the given window with nan. + See also clip_to_window which clips any keypoints that fall outside the given + window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window outside of which the op should prune the keypoints. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'PruneOutsideWindow'): + y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + + valid_indices = tf.logical_and( + tf.logical_and(y >= win_y_min, y <= win_y_max), + tf.logical_and(x >= win_x_min, x <= win_x_max)) + + new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y)) + new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x)) + new_keypoints = tf.concat([new_y, new_x], 2) + + return new_keypoints + + +def change_coordinate_frame(keypoints, window, scope=None): + """Changes coordinate frame of the keypoints to be relative to window's frame. + + Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint + coordinates from keypoints of shape [num_instances, num_keypoints, 2] + to be relative to this window. + + An example use case is data augmentation: where we are given groundtruth + keypoints and would like to randomly crop the image to some window. In this + case we need to change the coordinate frame of each groundtruth keypoint to be + relative to this new window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window we should change the coordinate frame to. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'ChangeCoordinateFrame'): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height, + 1.0 / win_width) + return new_keypoints + + +def keypoints_to_enclosing_bounding_boxes(keypoints): + """Creates enclosing bounding boxes from keypoints. + + Args: + keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints + in [y, x] format. + + Returns: + A [num_instances, 4] float32 tensor that tightly covers all the keypoints + for each instance. + """ + ymin = tf.math.reduce_min(keypoints[:, :, 0], axis=1) + xmin = tf.math.reduce_min(keypoints[:, :, 1], axis=1) + ymax = tf.math.reduce_max(keypoints[:, :, 0], axis=1) + xmax = tf.math.reduce_max(keypoints[:, :, 1], axis=1) + return tf.stack([ymin, xmin, ymax, xmax], axis=1) + + +def to_normalized_coordinates(keypoints, height, width, + check_range=True, scope=None): + """Converts absolute keypoint coordinates to normalized coordinates in [0, 1]. + + Usually one uses the dynamic shape of the image or conv-layer tensor: + keypoints = keypoint_ops.to_normalized_coordinates(keypoints, + tf.shape(images)[1], + tf.shape(images)[2]), + + This function raises an assertion failed error at graph execution time when + the maximum coordinate is smaller than 1.01 (which means that coordinates are + already normalized). The value 1.01 is to deal with small rounding errors. + + Args: + keypoints: A tensor of shape [num_instances, num_keypoints, 2]. + height: Maximum value for y coordinate of absolute keypoint coordinates. + width: Maximum value for x coordinate of absolute keypoint coordinates. + check_range: If True, checks if the coordinates are normalized. + scope: name scope. + + Returns: + tensor of shape [num_instances, num_keypoints, 2] with normalized + coordinates in [0, 1]. + """ + with tf.name_scope(scope, 'ToNormalizedCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(keypoints) + max_assert = tf.Assert(tf.greater(max_val, 1.01), + ['max value is lower than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(keypoints, 1.0 / height, 1.0 / width) + + +def to_absolute_coordinates(keypoints, height, width, + check_range=True, scope=None): + """Converts normalized keypoint coordinates to absolute pixel coordinates. + + This function raises an assertion failed error when the maximum keypoint + coordinate value is larger than 1.01 (in which case coordinates are already + absolute). + + Args: + keypoints: A tensor of shape [num_instances, num_keypoints, 2] + height: Maximum value for y coordinate of absolute keypoint coordinates. + width: Maximum value for x coordinate of absolute keypoint coordinates. + check_range: If True, checks if the coordinates are normalized or not. + scope: name scope. + + Returns: + tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates + in terms of the image size. + + """ + with tf.name_scope(scope, 'ToAbsoluteCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + # Ensure range of input keypoints is correct. + if check_range: + max_val = tf.reduce_max(keypoints) + max_assert = tf.Assert(tf.greater_equal(1.01, max_val), + ['maximum keypoint coordinate value is larger ' + 'than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(keypoints, height, width) + + +def flip_horizontal(keypoints, flip_point, flip_permutation, scope=None): + """Flips the keypoints horizontally around the flip_point. + + This operation flips the x coordinate for each keypoint around the flip_point + and also permutes the keypoints in a manner specified by flip_permutation. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + flip_point: (float) scalar tensor representing the x coordinate to flip the + keypoints around. + flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. This specifies the mapping from original keypoint indices + to the flipped keypoint indices. This is used primarily for keypoints + that are not reflection invariant. E.g. Suppose there are 3 keypoints + representing ['head', 'right_eye', 'left_eye'], then a logical choice for + flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' + and 'right_eye' after a horizontal flip. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'FlipHorizontal'): + keypoints = tf.transpose(keypoints, [1, 0, 2]) + keypoints = tf.gather(keypoints, flip_permutation) + v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + u = flip_point * 2.0 - u + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) + return new_keypoints + + +def flip_vertical(keypoints, flip_point, flip_permutation, scope=None): + """Flips the keypoints vertically around the flip_point. + + This operation flips the y coordinate for each keypoint around the flip_point + and also permutes the keypoints in a manner specified by flip_permutation. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + flip_point: (float) scalar tensor representing the y coordinate to flip the + keypoints around. + flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. This specifies the mapping from original keypoint indices + to the flipped keypoint indices. This is used primarily for keypoints + that are not reflection invariant. E.g. Suppose there are 3 keypoints + representing ['head', 'right_eye', 'left_eye'], then a logical choice for + flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' + and 'right_eye' after a horizontal flip. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'FlipVertical'): + keypoints = tf.transpose(keypoints, [1, 0, 2]) + keypoints = tf.gather(keypoints, flip_permutation) + v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + v = flip_point * 2.0 - v + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) + return new_keypoints + + +def rot90(keypoints, scope=None): + """Rotates the keypoints counter-clockwise by 90 degrees. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'Rot90'): + keypoints = tf.transpose(keypoints, [1, 0, 2]) + v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2) + v = 1.0 - v + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) + return new_keypoints + + +def keypoint_weights_from_visibilities(keypoint_visibilities, + per_keypoint_weights=None): + """Returns a keypoint weights tensor. + + During training, it is often beneficial to consider only those keypoints that + are labeled. This function returns a weights tensor that combines default + per-keypoint weights, as well as the visibilities of individual keypoints. + + The returned tensor satisfies: + keypoint_weights[i, k] = per_keypoint_weights[k] * keypoint_visibilities[i, k] + where per_keypoint_weights[k] is set to 1 if not provided. + + Args: + keypoint_visibilities: A [num_instances, num_keypoints] boolean tensor + indicating whether a keypoint is labeled (and perhaps even visible). + per_keypoint_weights: A list or 1-d tensor of length `num_keypoints` with + per-keypoint weights. If None, will use 1 for each visible keypoint + weight. + + Returns: + A [num_instances, num_keypoints] float32 tensor with keypoint weights. Those + keypoints deemed visible will have the provided per-keypoint weight, and + all others will be set to zero. + """ + if per_keypoint_weights is None: + num_keypoints = keypoint_visibilities.shape.as_list()[1] + per_keypoint_weight_mult = tf.ones((1, num_keypoints,), dtype=tf.float32) + else: + per_keypoint_weight_mult = tf.expand_dims(per_keypoint_weights, axis=0) + return per_keypoint_weight_mult * tf.cast(keypoint_visibilities, tf.float32) + + +def set_keypoint_visibilities(keypoints, initial_keypoint_visibilities=None): + """Sets keypoint visibilities based on valid/invalid keypoints. + + Some keypoint operations set invisible keypoints (e.g. cropped keypoints) to + NaN, without affecting any keypoint "visibility" variables. This function is + used to update (or create) keypoint visibilities to agree with visible / + invisible keypoint coordinates. + + Args: + keypoints: a float32 tensor of shape [num_instances, num_keypoints, 2]. + initial_keypoint_visibilities: a boolean tensor of shape + [num_instances, num_keypoints]. If provided, will maintain the visibility + designation of a keypoint, so long as the corresponding coordinates are + not NaN. If not provided, will create keypoint visibilities directly from + the values in `keypoints` (i.e. NaN coordinates map to False, otherwise + they map to True). + + Returns: + keypoint_visibilities: a bool tensor of shape [num_instances, num_keypoints] + indicating whether a keypoint is visible or not. + """ + if initial_keypoint_visibilities is not None: + keypoint_visibilities = tf.cast(initial_keypoint_visibilities, tf.bool) + else: + keypoint_visibilities = tf.ones_like(keypoints[:, :, 0], dtype=tf.bool) + + keypoints_with_nan = tf.math.reduce_any(tf.math.is_nan(keypoints), axis=2) + keypoint_visibilities = tf.where( + keypoints_with_nan, + tf.zeros_like(keypoint_visibilities, dtype=tf.bool), + keypoint_visibilities) + return keypoint_visibilities diff --git a/models/research/object_detection/core/keypoint_ops_test.py b/models/research/object_detection/core/keypoint_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..695e8fa1c6efcac8900577cd4657393b01d6d8d1 --- /dev/null +++ b/models/research/object_detection/core/keypoint_ops_test.py @@ -0,0 +1,317 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.keypoint_ops.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import keypoint_ops +from object_detection.utils import test_case + + +class KeypointOpsTest(test_case.TestCase): + """Tests for common keypoint operations.""" + + def test_scale(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.0, 0.0], [100.0, 200.0]], + [[50.0, 120.0], [100.0, 140.0]] + ]) + y_scale = tf.constant(1.0 / 100) + x_scale = tf.constant(1.0 / 200) + + expected_keypoints = tf.constant([ + [[0., 0.], [1.0, 1.0]], + [[0.5, 0.6], [1.0, 0.7]] + ]) + output = keypoint_ops.scale(keypoints, y_scale, x_scale) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_clip_to_window(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + expected_keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.25], [0.75, 0.75]] + ]) + output = keypoint_ops.clip_to_window(keypoints, window) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_prune_outside_window(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]], + [[np.nan, np.nan], [np.nan, np.nan]]]) + output = keypoint_ops.prune_outside_window(keypoints, window) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_change_coordinate_frame(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + expected_keypoints = tf.constant([ + [[0, 0.5], [1.0, 1.0]], + [[0.5, -0.5], [1.5, 1.5]] + ]) + output = keypoint_ops.change_coordinate_frame(keypoints, window) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_keypoints_to_enclosing_bounding_boxes(self): + def graph_fn(): + keypoints = tf.constant( + [ + [ # Instance 0. + [5., 10.], + [3., 20.], + [8., 4.], + ], + [ # Instance 1. + [2., 12.], + [0., 3.], + [5., 19.], + ], + ], dtype=tf.float32) + bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(keypoints) + return bboxes + output = self.execute(graph_fn, []) + expected_bboxes = np.array( + [ + [3., 4., 8., 20.], + [0., 3., 5., 19.] + ]) + self.assertAllClose(expected_bboxes, output) + + def test_to_normalized_coordinates(self): + def graph_fn(): + keypoints = tf.constant([ + [[10., 30.], [30., 45.]], + [[20., 0.], [40., 60.]] + ]) + output = keypoint_ops.to_normalized_coordinates( + keypoints, 40, 60) + expected_keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_to_normalized_coordinates_already_normalized(self): + if self.has_tpu(): return + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + output = keypoint_ops.to_normalized_coordinates( + keypoints, 40, 60) + return output + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_to_absolute_coordinates(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + output = keypoint_ops.to_absolute_coordinates( + keypoints, 40, 60) + expected_keypoints = tf.constant([ + [[10., 30.], [30., 45.]], + [[20., 0.], [40., 60.]] + ]) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_to_absolute_coordinates_already_absolute(self): + if self.has_tpu(): return + def graph_fn(): + keypoints = tf.constant([ + [[10., 30.], [30., 45.]], + [[20., 0.], [40., 60.]] + ]) + output = keypoint_ops.to_absolute_coordinates( + keypoints, 40, 60) + return output + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_flip_horizontal(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] + ]) + flip_permutation = [0, 2, 1] + + expected_keypoints = tf.constant([ + [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]], + [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]], + ]) + output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_flip_vertical(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] + ]) + flip_permutation = [0, 2, 1] + + expected_keypoints = tf.constant([ + [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], + [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]], + ]) + output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_rot90(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]] + ]) + expected_keypoints = tf.constant([ + [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], + [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]], + ]) + output = keypoint_ops.rot90(keypoints) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_keypoint_weights_from_visibilities(self): + def graph_fn(): + keypoint_visibilities = tf.constant([ + [True, True, False], + [False, True, False] + ]) + per_keypoint_weights = [1.0, 2.0, 3.0] + keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities( + keypoint_visibilities, per_keypoint_weights) + return keypoint_weights + expected_keypoint_weights = [ + [1.0, 2.0, 0.0], + [0.0, 2.0, 0.0] + ] + output = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoint_weights) + + def test_keypoint_weights_from_visibilities_no_per_kpt_weights(self): + def graph_fn(): + keypoint_visibilities = tf.constant([ + [True, True, False], + [False, True, False] + ]) + keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities( + keypoint_visibilities) + return keypoint_weights + expected_keypoint_weights = [ + [1.0, 1.0, 0.0], + [0.0, 1.0, 0.0] + ] + output = self.execute(graph_fn, []) + self.assertAllClose(expected_keypoint_weights, output) + + def test_set_keypoint_visibilities_no_initial_kpt_vis(self): + keypoints_np = np.array( + [ + [[np.nan, 0.2], + [np.nan, np.nan], + [-3., 7.]], + [[0.5, 0.2], + [4., 1.0], + [-3., np.nan]], + ], dtype=np.float32) + def graph_fn(): + keypoints = tf.constant(keypoints_np, dtype=tf.float32) + keypoint_visibilities = keypoint_ops.set_keypoint_visibilities( + keypoints) + return keypoint_visibilities + + expected_kpt_vis = [ + [False, False, True], + [True, True, False] + ] + output = self.execute(graph_fn, []) + self.assertAllEqual(expected_kpt_vis, output) + + def test_set_keypoint_visibilities(self): + keypoints_np = np.array( + [ + [[np.nan, 0.2], + [np.nan, np.nan], + [-3., 7.]], + [[0.5, 0.2], + [4., 1.0], + [-3., np.nan]], + ], dtype=np.float32) + initial_keypoint_visibilities_np = np.array( + [ + [False, + True, # Will be overriden by NaN coords. + False], # Will be maintained, even though non-NaN coords. + [True, + False, # Will be maintained, even though non-NaN coords. + False] + ]) + def graph_fn(): + keypoints = tf.constant(keypoints_np, dtype=tf.float32) + initial_keypoint_visibilities = tf.constant( + initial_keypoint_visibilities_np, dtype=tf.bool) + keypoint_visibilities = keypoint_ops.set_keypoint_visibilities( + keypoints, initial_keypoint_visibilities) + return keypoint_visibilities + + expected_kpt_vis = [ + [False, False, False], + [True, False, False] + ] + output = self.execute(graph_fn, []) + self.assertAllEqual(expected_kpt_vis, output) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/losses.py b/models/research/object_detection/core/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..c4d499e7e6c4ed5da803c48ff3d8908e713a3c2e --- /dev/null +++ b/models/research/object_detection/core/losses.py @@ -0,0 +1,775 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Classification and regression loss functions for object detection. + +Localization losses: + * WeightedL2LocalizationLoss + * WeightedSmoothL1LocalizationLoss + * WeightedIOULocalizationLoss + +Classification losses: + * WeightedSigmoidClassificationLoss + * WeightedSoftmaxClassificationLoss + * WeightedSoftmaxClassificationAgainstLogitsLoss + * BootstrappedSigmoidClassificationLoss +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v1 as tf +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.utils import ops + + +class Loss(six.with_metaclass(abc.ABCMeta, object)): + """Abstract base class for loss functions.""" + + def __call__(self, + prediction_tensor, + target_tensor, + ignore_nan_targets=False, + losses_mask=None, + scope=None, + **params): + """Call the loss function. + + Args: + prediction_tensor: an N-d tensor of shape [batch, anchors, ...] + representing predicted quantities. + target_tensor: an N-d tensor of shape [batch, anchors, ...] representing + regression or classification targets. + ignore_nan_targets: whether to ignore nan targets in the loss computation. + E.g. can be used if the target tensor is missing groundtruth data that + shouldn't be factored into the loss. + losses_mask: A [batch] boolean tensor that indicates whether losses should + be applied to individual images in the batch. For elements that + are False, corresponding prediction, target, and weight tensors will not + contribute to loss computation. If None, no filtering will take place + prior to loss computation. + scope: Op scope name. Defaults to 'Loss' if None. + **params: Additional keyword arguments for specific implementations of + the Loss. + + Returns: + loss: a tensor representing the value of the loss function. + """ + with tf.name_scope(scope, 'Loss', + [prediction_tensor, target_tensor, params]) as scope: + if ignore_nan_targets: + target_tensor = tf.where(tf.is_nan(target_tensor), + prediction_tensor, + target_tensor) + if losses_mask is not None: + tensor_multiplier = self._get_loss_multiplier_for_tensor( + prediction_tensor, + losses_mask) + prediction_tensor *= tensor_multiplier + target_tensor *= tensor_multiplier + + if 'weights' in params: + params['weights'] = tf.convert_to_tensor(params['weights']) + weights_multiplier = self._get_loss_multiplier_for_tensor( + params['weights'], + losses_mask) + params['weights'] *= weights_multiplier + return self._compute_loss(prediction_tensor, target_tensor, **params) + + def _get_loss_multiplier_for_tensor(self, tensor, losses_mask): + loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1)) + return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32) + + @abc.abstractmethod + def _compute_loss(self, prediction_tensor, target_tensor, **params): + """Method to be overridden by implementations. + + Args: + prediction_tensor: a tensor representing predicted quantities + target_tensor: a tensor representing regression or classification targets + **params: Additional keyword arguments for specific implementations of + the Loss. + + Returns: + loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per + anchor + """ + pass + + +class WeightedL2LocalizationLoss(Loss): + """L2 localization loss function with anchorwise output support. + + Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 + """ + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the (encoded) predicted locations of objects. + target_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the regression targets + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( + weights, 2) + square_diff = 0.5 * tf.square(weighted_diff) + return tf.reduce_sum(square_diff, 2) + + +class WeightedSmoothL1LocalizationLoss(Loss): + """Smooth L1 localization loss function aka Huber Loss.. + + The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and + delta * (|x|- 0.5*delta) otherwise, where x is the difference between + predictions and target. + + See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) + """ + + def __init__(self, delta=1.0): + """Constructor. + + Args: + delta: delta for smooth L1 loss. + """ + super(WeightedSmoothL1LocalizationLoss, self).__init__() + self._delta = delta + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the (encoded) predicted locations of objects. + target_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the regression targets + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + return tf.reduce_sum(tf.losses.huber_loss( + target_tensor, + prediction_tensor, + delta=self._delta, + weights=tf.expand_dims(weights, axis=2), + loss_collection=None, + reduction=tf.losses.Reduction.NONE + ), axis=2) + + +class WeightedIOULocalizationLoss(Loss): + """IOU localization loss function. + + Sums the IOU for corresponding pairs of predicted/groundtruth boxes + and for each pair assign a loss of 1 - IOU. We then compute a weighted + sum over all pairs which is returned as the total loss. + """ + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded predicted boxes + target_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded target boxes + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) + target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) + per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, + target_boxes) + return tf.reshape(weights, [-1]) * per_anchor_iou_loss + + +class WeightedSigmoidClassificationLoss(Loss): + """Sigmoid cross entropy classification loss function.""" + + def _compute_loss(self, + prediction_tensor, + target_tensor, + weights, + class_indices=None): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + class_indices: (Optional) A 1-D integer tensor of class indices. + If provided, computes loss only for the specified class indices. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors, num_classes] + representing the value of the loss function. + """ + if class_indices is not None: + weights *= tf.reshape( + ops.indices_to_dense_vector(class_indices, + tf.shape(prediction_tensor)[2]), + [1, 1, -1]) + per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( + labels=target_tensor, logits=prediction_tensor)) + return per_entry_cross_ent * weights + + +class SigmoidFocalClassificationLoss(Loss): + """Sigmoid focal cross entropy loss. + + Focal loss down-weights well classified examples and focusses on the hard + examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. + """ + + def __init__(self, gamma=2.0, alpha=0.25): + """Constructor. + + Args: + gamma: exponent of the modulating factor (1 - p_t) ^ gamma. + alpha: optional alpha weighting factor to balance positives vs negatives. + """ + super(SigmoidFocalClassificationLoss, self).__init__() + self._alpha = alpha + self._gamma = gamma + + def _compute_loss(self, + prediction_tensor, + target_tensor, + weights, + class_indices=None): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + class_indices: (Optional) A 1-D integer tensor of class indices. + If provided, computes loss only for the specified class indices. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors, num_classes] + representing the value of the loss function. + """ + if class_indices is not None: + weights *= tf.reshape( + ops.indices_to_dense_vector(class_indices, + tf.shape(prediction_tensor)[2]), + [1, 1, -1]) + per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( + labels=target_tensor, logits=prediction_tensor)) + prediction_probabilities = tf.sigmoid(prediction_tensor) + p_t = ((target_tensor * prediction_probabilities) + + ((1 - target_tensor) * (1 - prediction_probabilities))) + modulating_factor = 1.0 + if self._gamma: + modulating_factor = tf.pow(1.0 - p_t, self._gamma) + alpha_weight_factor = 1.0 + if self._alpha is not None: + alpha_weight_factor = (target_tensor * self._alpha + + (1 - target_tensor) * (1 - self._alpha)) + focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * + per_entry_cross_ent) + return focal_cross_entropy_loss * weights + + +class WeightedSoftmaxClassificationLoss(Loss): + """Softmax loss function.""" + + def __init__(self, logit_scale=1.0): + """Constructor. + + Args: + logit_scale: When this value is high, the prediction is "diffused" and + when this value is low, the prediction is made peakier. + (default 1.0) + + """ + super(WeightedSoftmaxClassificationLoss, self).__init__() + self._logit_scale = logit_scale + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] + representing the value of the loss function. + """ + weights = tf.reduce_mean(weights, axis=2) + num_classes = prediction_tensor.get_shape().as_list()[-1] + prediction_tensor = tf.divide( + prediction_tensor, self._logit_scale, name='scale_logit') + per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( + labels=tf.reshape(target_tensor, [-1, num_classes]), + logits=tf.reshape(prediction_tensor, [-1, num_classes]))) + return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights + + +class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss): + """Softmax loss function against logits. + + Targets are expected to be provided in logits space instead of "one hot" or + "probability distribution" space. + """ + + def __init__(self, logit_scale=1.0): + """Constructor. + + Args: + logit_scale: When this value is high, the target is "diffused" and + when this value is low, the target is made peakier. + (default 1.0) + + """ + super(WeightedSoftmaxClassificationAgainstLogitsLoss, self).__init__() + self._logit_scale = logit_scale + + def _scale_and_softmax_logits(self, logits): + """Scale logits then apply softmax.""" + scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits') + return tf.nn.softmax(scaled_logits, name='convert_scores') + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing logit classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] + representing the value of the loss function. + """ + weights = tf.reduce_mean(weights, axis=2) + num_classes = prediction_tensor.get_shape().as_list()[-1] + target_tensor = self._scale_and_softmax_logits(target_tensor) + prediction_tensor = tf.divide(prediction_tensor, self._logit_scale, + name='scale_logits') + + per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( + labels=tf.reshape(target_tensor, [-1, num_classes]), + logits=tf.reshape(prediction_tensor, [-1, num_classes]))) + return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights + + +class BootstrappedSigmoidClassificationLoss(Loss): + """Bootstrapped sigmoid cross entropy classification loss function. + + This loss uses a convex combination of training labels and the current model's + predictions as training targets in the classification loss. The idea is that + as the model improves over time, its predictions can be trusted more and we + can use these predictions to mitigate the damage of noisy/incorrect labels, + because incorrect labels are likely to be eventually highly inconsistent with + other stimuli predicted to have the same label by the model. + + In "soft" bootstrapping, we use all predicted class probabilities, whereas in + "hard" bootstrapping, we use the single class favored by the model. + + See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by + Reed et al. (ICLR 2015). + """ + + def __init__(self, alpha, bootstrap_type='soft'): + """Constructor. + + Args: + alpha: a float32 scalar tensor between 0 and 1 representing interpolation + weight + bootstrap_type: set to either 'hard' or 'soft' (default) + + Raises: + ValueError: if bootstrap_type is not either 'hard' or 'soft' + """ + super(BootstrappedSigmoidClassificationLoss, self).__init__() + if bootstrap_type != 'hard' and bootstrap_type != 'soft': + raise ValueError('Unrecognized bootstrap_type: must be one of ' + '\'hard\' or \'soft.\'') + self._alpha = alpha + self._bootstrap_type = bootstrap_type + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors, num_classes] + representing the value of the loss function. + """ + if self._bootstrap_type == 'soft': + bootstrap_target_tensor = self._alpha * target_tensor + ( + 1.0 - self._alpha) * tf.sigmoid(prediction_tensor) + else: + bootstrap_target_tensor = self._alpha * target_tensor + ( + 1.0 - self._alpha) * tf.cast( + tf.sigmoid(prediction_tensor) > 0.5, tf.float32) + per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( + labels=bootstrap_target_tensor, logits=prediction_tensor)) + return per_entry_cross_ent * weights + + +class HardExampleMiner(object): + """Hard example mining for regions in a list of images. + + Implements hard example mining to select a subset of regions to be + back-propagated. For each image, selects the regions with highest losses, + subject to the condition that a newly selected region cannot have + an IOU > iou_threshold with any of the previously selected regions. + This can be achieved by re-using a greedy non-maximum suppression algorithm. + A constraint on the number of negatives mined per positive region can also be + enforced. + + Reference papers: "Training Region-based Object Detectors with Online + Hard Example Mining" (CVPR 2016) by Srivastava et al., and + "SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al. + """ + + def __init__(self, + num_hard_examples=64, + iou_threshold=0.7, + loss_type='both', + cls_loss_weight=0.05, + loc_loss_weight=0.06, + max_negatives_per_positive=None, + min_negatives_per_image=0): + """Constructor. + + The hard example mining implemented by this class can replicate the behavior + in the two aforementioned papers (Srivastava et al., and Liu et al). + To replicate the A2 paper (Srivastava et al), num_hard_examples is set + to a fixed parameter (64 by default) and iou_threshold is set to .7 for + running non-max-suppression the predicted boxes prior to hard mining. + In order to replicate the SSD paper (Liu et al), num_hard_examples should + be set to None, max_negatives_per_positive should be 3 and iou_threshold + should be 1.0 (in order to effectively turn off NMS). + + Args: + num_hard_examples: maximum number of hard examples to be + selected per image (prior to enforcing max negative to positive ratio + constraint). If set to None, all examples obtained after NMS are + considered. + iou_threshold: minimum intersection over union for an example + to be discarded during NMS. + loss_type: use only classification losses ('cls', default), + localization losses ('loc') or both losses ('both'). + In the last case, cls_loss_weight and loc_loss_weight are used to + compute weighted sum of the two losses. + cls_loss_weight: weight for classification loss. + loc_loss_weight: weight for location loss. + max_negatives_per_positive: maximum number of negatives to retain for + each positive anchor. By default, num_negatives_per_positive is None, + which means that we do not enforce a prespecified negative:positive + ratio. Note also that num_negatives_per_positives can be a float + (and will be converted to be a float even if it is passed in otherwise). + min_negatives_per_image: minimum number of negative anchors to sample for + a given image. Setting this to a positive number allows sampling + negatives in an image without any positive anchors and thus not biased + towards at least one detection per image. + """ + self._num_hard_examples = num_hard_examples + self._iou_threshold = iou_threshold + self._loss_type = loss_type + self._cls_loss_weight = cls_loss_weight + self._loc_loss_weight = loc_loss_weight + self._max_negatives_per_positive = max_negatives_per_positive + self._min_negatives_per_image = min_negatives_per_image + if self._max_negatives_per_positive is not None: + self._max_negatives_per_positive = float(self._max_negatives_per_positive) + self._num_positives_list = None + self._num_negatives_list = None + + def __call__(self, + location_losses, + cls_losses, + decoded_boxlist_list, + match_list=None): + """Computes localization and classification losses after hard mining. + + Args: + location_losses: a float tensor of shape [num_images, num_anchors] + representing anchorwise localization losses. + cls_losses: a float tensor of shape [num_images, num_anchors] + representing anchorwise classification losses. + decoded_boxlist_list: a list of decoded BoxList representing location + predictions for each image. + match_list: an optional list of matcher.Match objects encoding the match + between anchors and groundtruth boxes for each image of the batch, + with rows of the Match objects corresponding to groundtruth boxes + and columns corresponding to anchors. Match objects in match_list are + used to reference which anchors are positive, negative or ignored. If + self._max_negatives_per_positive exists, these are then used to enforce + a prespecified negative to positive ratio. + + Returns: + mined_location_loss: a float scalar with sum of localization losses from + selected hard examples. + mined_cls_loss: a float scalar with sum of classification losses from + selected hard examples. + Raises: + ValueError: if location_losses, cls_losses and decoded_boxlist_list do + not have compatible shapes (i.e., they must correspond to the same + number of images). + ValueError: if match_list is specified but its length does not match + len(decoded_boxlist_list). + """ + mined_location_losses = [] + mined_cls_losses = [] + location_losses = tf.unstack(location_losses) + cls_losses = tf.unstack(cls_losses) + num_images = len(decoded_boxlist_list) + if not match_list: + match_list = num_images * [None] + if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses): + raise ValueError('location_losses, cls_losses and decoded_boxlist_list ' + 'do not have compatible shapes.') + if not isinstance(match_list, list): + raise ValueError('match_list must be a list.') + if len(match_list) != len(decoded_boxlist_list): + raise ValueError('match_list must either be None or have ' + 'length=len(decoded_boxlist_list).') + num_positives_list = [] + num_negatives_list = [] + for ind, detection_boxlist in enumerate(decoded_boxlist_list): + box_locations = detection_boxlist.get() + match = match_list[ind] + image_losses = cls_losses[ind] + if self._loss_type == 'loc': + image_losses = location_losses[ind] + elif self._loss_type == 'both': + image_losses *= self._cls_loss_weight + image_losses += location_losses[ind] * self._loc_loss_weight + if self._num_hard_examples is not None: + num_hard_examples = self._num_hard_examples + else: + num_hard_examples = detection_boxlist.num_boxes() + selected_indices = tf.image.non_max_suppression( + box_locations, image_losses, num_hard_examples, self._iou_threshold) + if self._max_negatives_per_positive is not None and match: + (selected_indices, num_positives, + num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio( + selected_indices, match, self._max_negatives_per_positive, + self._min_negatives_per_image) + num_positives_list.append(num_positives) + num_negatives_list.append(num_negatives) + mined_location_losses.append( + tf.reduce_sum(tf.gather(location_losses[ind], selected_indices))) + mined_cls_losses.append( + tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices))) + location_loss = tf.reduce_sum(tf.stack(mined_location_losses)) + cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses)) + if match and self._max_negatives_per_positive: + self._num_positives_list = num_positives_list + self._num_negatives_list = num_negatives_list + return (location_loss, cls_loss) + + def summarize(self): + """Summarize the number of positives and negatives after mining.""" + if self._num_positives_list and self._num_negatives_list: + avg_num_positives = tf.reduce_mean( + tf.cast(self._num_positives_list, dtype=tf.float32)) + avg_num_negatives = tf.reduce_mean( + tf.cast(self._num_negatives_list, dtype=tf.float32)) + tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives) + tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives) + + def _subsample_selection_to_desired_neg_pos_ratio(self, + indices, + match, + max_negatives_per_positive, + min_negatives_per_image=0): + """Subsample a collection of selected indices to a desired neg:pos ratio. + + This function takes a subset of M indices (indexing into a large anchor + collection of N anchors where M=0, + meaning that column i is matched with row match_results[i]. + (2) match_results[i]=-1, meaning that column i is not matched. + (3) match_results[i]=-2, meaning that column i is ignored. + use_matmul_gather: Use matrix multiplication based gather instead of + standard tf.gather. (Default: False). + + Raises: + ValueError: if match_results does not have rank 1 or is not an + integer int32 scalar tensor + """ + if match_results.shape.ndims != 1: + raise ValueError('match_results should have rank 1') + if match_results.dtype != tf.int32: + raise ValueError('match_results should be an int32 or int64 scalar ' + 'tensor') + self._match_results = match_results + self._gather_op = tf.gather + if use_matmul_gather: + self._gather_op = ops.matmul_gather_on_zeroth_axis + + @property + def match_results(self): + """The accessor for match results. + + Returns: + the tensor which encodes the match results. + """ + return self._match_results + + def matched_column_indices(self): + """Returns column indices that match to some row. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1))) + + def matched_column_indicator(self): + """Returns column indices that are matched. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return tf.greater_equal(self._match_results, 0) + + def num_matched_columns(self): + """Returns number (int32 scalar tensor) of matched columns.""" + return tf.size(self.matched_column_indices()) + + def unmatched_column_indices(self): + """Returns column indices that do not match any row. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1))) + + def unmatched_column_indicator(self): + """Returns column indices that are unmatched. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return tf.equal(self._match_results, -1) + + def num_unmatched_columns(self): + """Returns number (int32 scalar tensor) of unmatched columns.""" + return tf.size(self.unmatched_column_indices()) + + def ignored_column_indices(self): + """Returns column indices that are ignored (neither Matched nor Unmatched). + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(self.ignored_column_indicator())) + + def ignored_column_indicator(self): + """Returns boolean column indicator where True means the colum is ignored. + + Returns: + column_indicator: boolean vector which is True for all ignored column + indices. + """ + return tf.equal(self._match_results, -2) + + def num_ignored_columns(self): + """Returns number (int32 scalar tensor) of matched columns.""" + return tf.size(self.ignored_column_indices()) + + def unmatched_or_ignored_column_indices(self): + """Returns column indices that are unmatched or ignored. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results))) + + def matched_row_indices(self): + """Returns row indices that match some column. + + The indices returned by this op are ordered so as to be in correspondence + with the output of matched_column_indicator(). For example if + self.matched_column_indicator() is [0,2], and self.matched_row_indices() is + [7, 3], then we know that column 0 was matched to row 7 and column 2 was + matched to row 3. + + Returns: + row_indices: int32 tensor of shape [K] with row indices. + """ + return self._reshape_and_cast( + self._gather_op(tf.cast(self._match_results, dtype=tf.float32), + self.matched_column_indices())) + + def num_matched_rows(self): + """Returns number (int32 scalar tensor) of matched rows.""" + unique_rows, _ = tf.unique(self.matched_row_indices()) + return tf.size(unique_rows) + + def _reshape_and_cast(self, t): + return tf.cast(tf.reshape(t, [-1]), tf.int32) + + def gather_based_on_match(self, input_tensor, unmatched_value, + ignored_value): + """Gathers elements from `input_tensor` based on match results. + + For columns that are matched to a row, gathered_tensor[col] is set to + input_tensor[match_results[col]]. For columns that are unmatched, + gathered_tensor[col] is set to unmatched_value. Finally, for columns that + are ignored gathered_tensor[col] is set to ignored_value. + + Note that the input_tensor.shape[1:] must match with unmatched_value.shape + and ignored_value.shape + + Args: + input_tensor: Tensor to gather values from. + unmatched_value: Constant tensor value for unmatched columns. + ignored_value: Constant tensor value for ignored columns. + + Returns: + gathered_tensor: A tensor containing values gathered from input_tensor. + The shape of the gathered tensor is [match_results.shape[0]] + + input_tensor.shape[1:]. + """ + input_tensor = tf.concat( + [tf.stack([ignored_value, unmatched_value]), + input_tensor], + axis=0) + gather_indices = tf.maximum(self.match_results + 2, 0) + gathered_tensor = self._gather_op(input_tensor, gather_indices) + return gathered_tensor + + +class Matcher(six.with_metaclass(abc.ABCMeta, object)): + """Abstract base class for matcher. + """ + + def __init__(self, use_matmul_gather=False): + """Constructs a Matcher. + + Args: + use_matmul_gather: Force constructed match objects to use matrix + multiplication based gather instead of standard tf.gather. + (Default: False). + """ + self._use_matmul_gather = use_matmul_gather + + def match(self, similarity_matrix, valid_rows=None, scope=None): + """Computes matches among row and column indices and returns the result. + + Computes matches among the row and column indices based on the similarity + matrix and optional arguments. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher value means more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid for matching. + scope: Op scope name. Defaults to 'Match' if None. + + Returns: + A Match object with the results of matching. + """ + with tf.name_scope(scope, 'Match') as scope: + if valid_rows is None: + valid_rows = tf.ones(tf.shape(similarity_matrix)[0], dtype=tf.bool) + return Match(self._match(similarity_matrix, valid_rows), + self._use_matmul_gather) + + @abc.abstractmethod + def _match(self, similarity_matrix, valid_rows): + """Method to be overridden by implementations. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher value means more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid for matching. + Returns: + match_results: Integer tensor of shape [M]: match_results[i]>=0 means + that column i is matched to row match_results[i], match_results[i]=-1 + means that the column is not matched. match_results[i]=-2 means that + the column is ignored (usually this happens when there is a very weak + match which one neither wants as positive nor negative example). + """ + pass diff --git a/models/research/object_detection/core/matcher_test.py b/models/research/object_detection/core/matcher_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ad64075397e8ba2b6aea74b039036a84204f6631 --- /dev/null +++ b/models/research/object_detection/core/matcher_test.py @@ -0,0 +1,191 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.matcher.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import matcher +from object_detection.utils import test_case + + +class MatchTest(test_case.TestCase): + + def test_get_correct_matched_columnIndices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + matched_column_indices = match.matched_column_indices() + return matched_column_indices + expected_column_indices = [0, 1, 3, 5] + matched_column_indices = self.execute(graph_fn, []) + self.assertAllEqual(matched_column_indices, expected_column_indices) + + def test_get_correct_counts(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 1, -2]) + match = matcher.Match(match_results) + num_matched_columns = match.num_matched_columns() + num_unmatched_columns = match.num_unmatched_columns() + num_ignored_columns = match.num_ignored_columns() + num_matched_rows = match.num_matched_rows() + return [num_matched_columns, num_unmatched_columns, num_ignored_columns, + num_matched_rows] + (num_matched_columns_out, num_unmatched_columns_out, + num_ignored_columns_out, + num_matched_rows_out) = self.execute_cpu(graph_fn, []) + exp_num_matched_columns = 4 + exp_num_unmatched_columns = 2 + exp_num_ignored_columns = 1 + exp_num_matched_rows = 3 + self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns) + self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns) + self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns) + self.assertAllEqual(num_matched_rows_out, exp_num_matched_rows) + + def testGetCorrectUnmatchedColumnIndices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + unmatched_column_indices = match.unmatched_column_indices() + return unmatched_column_indices + unmatched_column_indices = self.execute(graph_fn, []) + expected_column_indices = [2, 4] + self.assertAllEqual(unmatched_column_indices, expected_column_indices) + + def testGetCorrectMatchedRowIndices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + matched_row_indices = match.matched_row_indices() + return matched_row_indices + matched_row_indices = self.execute(graph_fn, []) + expected_row_indices = [3, 1, 0, 5] + self.assertAllEqual(matched_row_indices, expected_row_indices) + + def test_get_correct_ignored_column_indices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + ignored_column_indices = match.ignored_column_indices() + return ignored_column_indices + ignored_column_indices = self.execute(graph_fn, []) + expected_column_indices = [6] + self.assertAllEqual(ignored_column_indices, expected_column_indices) + + def test_get_correct_matched_column_indicator(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + matched_column_indicator = match.matched_column_indicator() + return matched_column_indicator + expected_column_indicator = [True, True, False, True, False, True, False] + matched_column_indicator = self.execute(graph_fn, []) + self.assertAllEqual(matched_column_indicator, expected_column_indicator) + + def test_get_correct_unmatched_column_indicator(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + unmatched_column_indicator = match.unmatched_column_indicator() + return unmatched_column_indicator + expected_column_indicator = [False, False, True, False, True, False, False] + unmatched_column_indicator = self.execute(graph_fn, []) + self.assertAllEqual(unmatched_column_indicator, expected_column_indicator) + + def test_get_correct_ignored_column_indicator(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + ignored_column_indicator = match.ignored_column_indicator() + return ignored_column_indicator + expected_column_indicator = [False, False, False, False, False, False, True] + ignored_column_indicator = self.execute(graph_fn, []) + self.assertAllEqual(ignored_column_indicator, expected_column_indicator) + + def test_get_correct_unmatched_ignored_column_indices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + unmatched_ignored_column_indices = (match. + unmatched_or_ignored_column_indices()) + return unmatched_ignored_column_indices + expected_column_indices = [2, 4, 6] + unmatched_ignored_column_indices = self.execute(graph_fn, []) + self.assertAllEqual(unmatched_ignored_column_indices, + expected_column_indices) + + def test_all_columns_accounted_for(self): + # Note: deliberately setting to small number so not always + # all possibilities appear (matched, unmatched, ignored) + def graph_fn(): + match_results = tf.random_uniform( + [num_matches], minval=-2, maxval=5, dtype=tf.int32) + match = matcher.Match(match_results) + matched_column_indices = match.matched_column_indices() + unmatched_column_indices = match.unmatched_column_indices() + ignored_column_indices = match.ignored_column_indices() + return (matched_column_indices, unmatched_column_indices, + ignored_column_indices) + num_matches = 10 + matched, unmatched, ignored = self.execute(graph_fn, []) + all_indices = np.hstack((matched, unmatched, ignored)) + all_indices_sorted = np.sort(all_indices) + self.assertAllEqual(all_indices_sorted, + np.arange(num_matches, dtype=np.int32)) + + def test_scalar_gather_based_on_match(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32) + match = matcher.Match(match_results) + gathered_tensor = match.gather_based_on_match(input_tensor, + unmatched_value=100., + ignored_value=200.) + return gathered_tensor + expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200] + gathered_tensor_out = self.execute(graph_fn, []) + self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) + + def test_multidimensional_gather_based_on_match(self): + def graph_fn(): + match_results = tf.constant([1, -1, -2]) + input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], + dtype=tf.float32) + match = matcher.Match(match_results) + gathered_tensor = match.gather_based_on_match(input_tensor, + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + return gathered_tensor + expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] + gathered_tensor_out = self.execute(graph_fn, []) + self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) + + def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self): + def graph_fn(): + match_results = tf.constant([1, -1, -2]) + input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], + dtype=tf.float32) + match = matcher.Match(match_results, use_matmul_gather=True) + gathered_tensor = match.gather_based_on_match(input_tensor, + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + return gathered_tensor + expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] + gathered_tensor_out = self.execute(graph_fn, []) + self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/minibatch_sampler.py b/models/research/object_detection/core/minibatch_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5b0a7242530202c5510c8ea29c4f5857f12b3b --- /dev/null +++ b/models/research/object_detection/core/minibatch_sampler.py @@ -0,0 +1,94 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base minibatch sampler module. + +The job of the minibatch_sampler is to subsample a minibatch based on some +criterion. + +The main function call is: + subsample(indicator, batch_size, **params). +Indicator is a 1d boolean tensor where True denotes which examples can be +sampled. It returns a boolean indicator where True denotes an example has been +sampled.. + +Subclasses should implement the Subsample function and can make use of the +@staticmethod SubsampleIndicator. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod + +import six +import tensorflow.compat.v1 as tf + +from object_detection.utils import ops + + +class MinibatchSampler(six.with_metaclass(ABCMeta, object)): + """Abstract base class for subsampling minibatches.""" + + def __init__(self): + """Constructs a minibatch sampler.""" + pass + + @abstractmethod + def subsample(self, indicator, batch_size, **params): + """Returns subsample of entries in indicator. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + batch_size: desired batch size. + **params: additional keyword arguments for specific implementations of + the MinibatchSampler. + + Returns: + sample_indicator: boolean tensor of shape [N] whose True entries have been + sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size + """ + pass + + @staticmethod + def subsample_indicator(indicator, num_samples): + """Subsample indicator vector. + + Given a boolean indicator vector with M elements set to `True`, the function + assigns all but `num_samples` of these previously `True` elements to + `False`. If `num_samples` is greater than M, the original indicator vector + is returned. + + Args: + indicator: a 1-dimensional boolean tensor indicating which elements + are allowed to be sampled and which are not. + num_samples: int32 scalar tensor + + Returns: + a boolean tensor with the same shape as input (indicator) tensor + """ + indices = tf.where(indicator) + indices = tf.random_shuffle(indices) + indices = tf.reshape(indices, [-1]) + + num_samples = tf.minimum(tf.size(indices), num_samples) + selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) + + selected_indicator = ops.indices_to_dense_vector(selected_indices, + tf.shape(indicator)[0]) + + return tf.equal(selected_indicator, 1) diff --git a/models/research/object_detection/core/minibatch_sampler_test.py b/models/research/object_detection/core/minibatch_sampler_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ddadd25eb587c2087e23a20807488fee955882 --- /dev/null +++ b/models/research/object_detection/core/minibatch_sampler_test.py @@ -0,0 +1,71 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for google3.research.vale.object_detection.minibatch_sampler.""" + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import minibatch_sampler +from object_detection.utils import test_case + + +class MinibatchSamplerTest(test_case.TestCase): + + def test_subsample_indicator_when_more_true_elements_than_num_samples(self): + np_indicator = np.array([True, False, True, False, True, True, False]) + def graph_fn(indicator): + samples = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 3) + return samples + samples_out = self.execute(graph_fn, [np_indicator]) + self.assertTrue(np.sum(samples_out), 3) + self.assertAllEqual(samples_out, + np.logical_and(samples_out, np_indicator)) + + def test_subsample_indicator_when_less_true_elements_than_num_samples(self): + np_indicator = np.array([True, False, True, False, True, True, False]) + def graph_fn(indicator): + samples = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 5) + return samples + samples_out = self.execute(graph_fn, [np_indicator]) + self.assertTrue(np.sum(samples_out), 4) + self.assertAllEqual(samples_out, + np.logical_and(samples_out, np_indicator)) + + def test_subsample_indicator_when_num_samples_is_zero(self): + np_indicator = np.array([True, False, True, False, True, True, False]) + def graph_fn(indicator): + samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 0) + return samples_none + samples_out = self.execute(graph_fn, [np_indicator]) + self.assertAllEqual( + np.zeros_like(samples_out, dtype=bool), + samples_out) + + def test_subsample_indicator_when_indicator_all_false(self): + indicator_empty = np.zeros([0], dtype=np.bool) + def graph_fn(indicator): + samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 4) + return samples_empty + samples_out = self.execute(graph_fn, [indicator_empty]) + self.assertEqual(0, samples_out.size) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/model.py b/models/research/object_detection/core/model.py new file mode 100644 index 0000000000000000000000000000000000000000..0430b37b5c31c6e1ce9604898aaa8e73319400f8 --- /dev/null +++ b/models/research/object_detection/core/model.py @@ -0,0 +1,445 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Abstract detection model. + +This file defines a generic base class for detection models. Programs that are +designed to work with arbitrary detection models should only depend on this +class. We intend for the functions in this class to follow tensor-in/tensor-out +design, thus all functions have tensors or lists/dictionaries holding tensors as +inputs and outputs. + +Abstractly, detection models predict output tensors given input images +which can be passed to a loss function at training time or passed to a +postprocessing function at eval time. The computation graphs at a high level +consequently look as follows: + +Training time: +inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor) + +Evaluation time: +inputs (images tensor) -> preprocess -> predict -> postprocess + -> outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor) + +DetectionModels must thus implement four functions (1) preprocess, (2) predict, +(3) postprocess and (4) loss. DetectionModels should make no assumptions about +the input size or aspect ratio --- they are responsible for doing any +resize/reshaping necessary (see docstring for the preprocess function). +Output classes are always integers in the range [0, num_classes). Any mapping +of these integers to semantic labels is to be handled outside of this class. + +Images are resized in the `preprocess` method. All of `preprocess`, `predict`, +and `postprocess` should be reentrant. + +The `preprocess` method runs `image_resizer_fn` that returns resized_images and +`true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros, +true_image_shapes indicate the slices that contain the image without padding. +This is useful for padding images to be a fixed size for batching. + +The `postprocess` method uses the true image shapes to clip predictions that lie +outside of images. + +By default, DetectionModels produce bounding box detections; However, we support +a handful of auxiliary annotations associated with each bounding box, namely, +instance masks and keypoints. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields + + +# If using a new enough version of TensorFlow, detection models should be a +# tf module or keras model for tracking. +try: + _BaseClass = tf.keras.layers.Layer +except AttributeError: + _BaseClass = object + + +class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)): + """Abstract base class for detection models. + + Extends tf.Module to guarantee variable tracking. + """ + + def __init__(self, num_classes): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* include + background categories that might be implicitly predicted in various + implementations. + """ + self._num_classes = num_classes + self._groundtruth_lists = {} + + super(DetectionModel, self).__init__() + + @property + def num_classes(self): + return self._num_classes + + def groundtruth_lists(self, field): + """Access list of groundtruth tensors. + + Args: + field: a string key, options are + fields.BoxListFields.{boxes,classes,masks,keypoints, + keypoint_visibilities} or + fields.InputDataFields.is_annotated. + + Returns: + a list of tensors holding groundtruth information (see also + provide_groundtruth function below), with one entry for each image in the + batch. + Raises: + RuntimeError: if the field has not been provided via provide_groundtruth. + """ + if field not in self._groundtruth_lists: + raise RuntimeError('Groundtruth tensor {} has not been provided'.format( + field)) + return self._groundtruth_lists[field] + + def groundtruth_has_field(self, field): + """Determines whether the groundtruth includes the given field. + + Args: + field: a string key, options are + fields.BoxListFields.{boxes,classes,masks,keypoints, + keypoint_visibilities} or + fields.InputDataFields.is_annotated. + + Returns: + True if the groundtruth includes the given field, False otherwise. + """ + return field in self._groundtruth_lists + + @staticmethod + def get_side_inputs(features): + """Get side inputs from input features. + + This placeholder method provides a way for a meta-architecture to specify + how to grab additional side inputs from input features (in addition to the + image itself) and allows models to depend on contextual information. By + default, detection models do not use side information (and thus this method + returns an empty dictionary by default. However it can be overridden if + side inputs are necessary." + + Args: + features: A dictionary of tensors. + + Returns: + An empty dictionary by default. + """ + return {} + + @abc.abstractmethod + def preprocess(self, inputs): + """Input preprocessing. + + To be overridden by implementations. + + This function is responsible for any scaling/shifting of input values that + is necessary prior to running the detector on an input image. + It is also responsible for any resizing, padding that might be necessary + as images are assumed to arrive in arbitrary sizes. While this function + could conceivably be part of the predict method (below), it is often + convenient to keep these separate --- for example, we may want to preprocess + on one device, place onto a queue, and let another device (e.g., the GPU) + handle prediction. + + A few important notes about the preprocess function: + + We assume that this operation does not have any trainable variables nor + does it affect the groundtruth annotations in any way (thus data + augmentation operations such as random cropping should be performed + externally). + + There is no assumption that the batchsize in this function is the same as + the batch size in the predict function. In fact, we recommend calling the + preprocess function prior to calling any batching operations (which should + happen outside of the model) and thus assuming that batch sizes are equal + to 1 in the preprocess function. + + There is also no explicit assumption that the output resolutions + must be fixed across inputs --- this is to support "fully convolutional" + settings in which input images can have different shapes/resolutions. + + Args: + inputs: a [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + pass + + @abc.abstractmethod + def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): + """Predict prediction tensors from inputs tensor. + + Outputs of this function can be passed to loss or postprocess functions. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float32 tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding prediction tensors to be + passed to the Loss or Postprocess functions. + """ + pass + + @abc.abstractmethod + def postprocess(self, prediction_dict, true_image_shapes, **params): + """Convert predicted output tensors to final detections. + + This stage typically performs a few things such as + * Non-Max Suppression to remove overlapping detection boxes. + * Score conversion and background class removal. + + Outputs adhere to the following conventions: + * Classes are integers in [0, num_classes); background classes are removed + and the first non-background class is mapped to 0. If the model produces + class-agnostic detections, then no output is produced for classes. + * Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max] + format and normalized relative to the image window. + * `num_detections` is provided for settings where detections are padded to a + fixed number of boxes. + * We do not specifically assume any kind of probabilistic interpretation + of the scores --- the only important thing is their relative ordering. + Thus implementations of the postprocess function are free to output + logits, probabilities, calibrated probabilities, or anything else. + + Args: + prediction_dict: a dictionary holding prediction tensors. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **params: Additional keyword arguments for specific implementations of + DetectionModel. + + Returns: + detections: a dictionary containing the following fields + detection_boxes: [batch, max_detections, 4] + detection_scores: [batch, max_detections] + detection_classes: [batch, max_detections] + (If a model is producing class-agnostic detections, this field may be + missing) + instance_masks: [batch, max_detections, image_height, image_width] + (optional) + keypoints: [batch, max_detections, num_keypoints, 2] (optional) + num_detections: [batch] + + In addition to the above fields this stage also outputs the following + raw tensors: + + raw_detection_boxes: [batch, total_detections, 4] tensor containing + all detection boxes from `prediction_dict` in the format + [ymin, xmin, ymax, xmax] and normalized co-ordinates. + raw_detection_scores: [batch, total_detections, + num_classes_with_background] tensor of class score logits for + raw detection boxes. + """ + pass + + @abc.abstractmethod + def loss(self, prediction_dict, true_image_shapes): + """Compute scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding predicted tensors + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + a dictionary mapping strings (loss names) to scalar tensors representing + loss values. + """ + pass + + def provide_groundtruth(self, + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list=None, + groundtruth_keypoints_list=None, + groundtruth_keypoint_visibilities_list=None, + groundtruth_weights_list=None, + groundtruth_confidences_list=None, + groundtruth_is_crowd_list=None, + groundtruth_group_of_list=None, + groundtruth_area_list=None, + is_annotated_list=None, + groundtruth_labeled_classes=None): + """Provide groundtruth tensors. + + Args: + groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape + [num_boxes, 4] containing coordinates of the groundtruth boxes. + Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] + format and assumed to be normalized and clipped + relative to the image window with y_min <= y_max and x_min <= x_max. + groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot) + tensors of shape [num_boxes, num_classes] containing the class targets + with the 0th index assumed to map to the first non-background class. + groundtruth_masks_list: a list of 3-D tf.float32 tensors of + shape [num_boxes, height_in, width_in] containing instance + masks with values in {0, 1}. If None, no masks are provided. + Mask resolution `height_in`x`width_in` must agree with the resolution + of the input image tensor provided to the `preprocess` function. + groundtruth_keypoints_list: a list of 3-D tf.float32 tensors of + shape [num_boxes, num_keypoints, 2] containing keypoints. + Keypoints are assumed to be provided in normalized coordinates and + missing keypoints should be encoded as NaN (but it is recommended to use + `groundtruth_keypoint_visibilities_list`). + groundtruth_keypoint_visibilities_list: a list of 3-D tf.bool tensors + of shape [num_boxes, num_keypoints] containing keypoint visibilities. + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape + [num_boxes, num_classes] containing class confidences for groundtruth + boxes. + groundtruth_is_crowd_list: A list of 1-D tf.bool tensors of shape + [num_boxes] containing is_crowd annotations. + groundtruth_group_of_list: A list of 1-D tf.bool tensors of shape + [num_boxes] containing group_of annotations. + groundtruth_area_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing the area (in the original absolute coordinates) + of the annotations. + is_annotated_list: A list of scalar tf.bool tensors indicating whether + images have been labeled or not. + groundtruth_labeled_classes: A list of 1-D tf.float32 tensors of shape + [num_classes], containing label indices encoded as k-hot of the classes + that are exhaustively annotated. + """ + self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list + self._groundtruth_lists[ + fields.BoxListFields.classes] = groundtruth_classes_list + if groundtruth_weights_list: + self._groundtruth_lists[fields.BoxListFields. + weights] = groundtruth_weights_list + if groundtruth_confidences_list: + self._groundtruth_lists[fields.BoxListFields. + confidences] = groundtruth_confidences_list + if groundtruth_masks_list: + self._groundtruth_lists[ + fields.BoxListFields.masks] = groundtruth_masks_list + if groundtruth_keypoints_list: + self._groundtruth_lists[ + fields.BoxListFields.keypoints] = groundtruth_keypoints_list + if groundtruth_keypoint_visibilities_list: + self._groundtruth_lists[ + fields.BoxListFields.keypoint_visibilities] = ( + groundtruth_keypoint_visibilities_list) + if groundtruth_is_crowd_list: + self._groundtruth_lists[ + fields.BoxListFields.is_crowd] = groundtruth_is_crowd_list + if groundtruth_group_of_list: + self._groundtruth_lists[ + fields.BoxListFields.group_of] = groundtruth_group_of_list + if groundtruth_area_list: + self._groundtruth_lists[ + fields.InputDataFields.groundtruth_area] = groundtruth_area_list + if is_annotated_list: + self._groundtruth_lists[ + fields.InputDataFields.is_annotated] = is_annotated_list + if groundtruth_labeled_classes: + self._groundtruth_lists[ + fields.InputDataFields + .groundtruth_labeled_classes] = groundtruth_labeled_classes + + @abc.abstractmethod + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + pass + + @abc.abstractmethod + def restore_map(self, fine_tune_checkpoint_type='detection'): + """Returns a map of variables to load from a foreign checkpoint. + + Returns a map of variable names to load from a checkpoint to variables in + the model graph. This enables the model to initialize based on weights from + another task. For example, the feature extractor variables from a + classification model can be used to bootstrap training of an object + detector. When loading from an object detection model, the checkpoint model + should have the same parameters as this detection model with exception of + the num_classes parameter. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + pass + + @abc.abstractmethod + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + pass + + def call(self, images): + """Returns detections from a batch of images. + + This method calls the preprocess, predict and postprocess function + sequentially and returns the output. + + Args: + images: a [batch_size, height, width, channels] float tensor. + + Returns: + detetcions: The dict of tensors returned by the postprocess function. + """ + + preprocessed_images, shapes = self.preprocess(images) + prediction_dict = self.predict(preprocessed_images, shapes) + return self.postprocess(prediction_dict, shapes) diff --git a/models/research/object_detection/core/model_test.py b/models/research/object_detection/core/model_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb1ab343a6634ffc8df9f71378e83371921da7a --- /dev/null +++ b/models/research/object_detection/core/model_test.py @@ -0,0 +1,98 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for model API.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import model +from object_detection.utils import test_case + + +class FakeModel(model.DetectionModel): + + def __init__(self): + + # sub-networks containing weights of different shapes. + self._network1 = tf.keras.Sequential([ + tf.keras.layers.Conv2D(8, 1) + ]) + + self._network2 = tf.keras.Sequential([ + tf.keras.layers.Conv2D(16, 1) + ]) + + super(FakeModel, self).__init__(num_classes=0) + + def preprocess(self, images): + return images, tf.shape(images) + + def predict(self, images, shapes): + return {'prediction': self._network2(self._network1(images))} + + def postprocess(self, prediction_dict, shapes): + return prediction_dict + + def loss(self): + return tf.constant(0.0) + + def updates(self): + return [] + + def restore_map(self): + return {} + + def regularization_losses(self): + return [] + + +class ModelTest(test_case.TestCase): + + def test_model_call(self): + + detection_model = FakeModel() + + def graph_fn(): + return detection_model(tf.zeros((1, 128, 128, 3))) + + result = self.execute(graph_fn, []) + self.assertEqual(result['prediction'].shape, + (1, 128, 128, 16)) + + def test_freeze(self): + + detection_model = FakeModel() + detection_model(tf.zeros((1, 128, 128, 3))) + + net1_var_shapes = [tuple(var.get_shape().as_list()) for var in + detection_model._network1.trainable_variables] + + del detection_model + + detection_model = FakeModel() + detection_model._network2.trainable = False + detection_model(tf.zeros((1, 128, 128, 3))) + + var_shapes = [tuple(var.get_shape().as_list()) for var in + detection_model._network1.trainable_variables] + + self.assertEqual(set(net1_var_shapes), set(var_shapes)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/multiclass_nms_test.py b/models/research/object_detection/core/multiclass_nms_test.py new file mode 100644 index 0000000000000000000000000000000000000000..80be89da926115bc55eaab5a5c471d4f5ae0bca1 --- /dev/null +++ b/models/research/object_detection/core/multiclass_nms_test.py @@ -0,0 +1,583 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for tensorflow_models.object_detection.core.post_processing.""" +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.core import post_processing +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class MulticlassNonMaxSuppressionTest(test_case.TestCase): + + def test_multiclass_nms_select_with_shared_boxes_cpu_only(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + + def graph_fn(boxes, scores): + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + nms, _ = post_processing.multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, max_output_size) + return (nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes)) + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output, exp_nms_corners) + self.assertAllClose(nms_scores_output, exp_nms_scores) + self.assertAllClose(nms_classes_output, exp_nms_classes) + + def test_multiclass_nms_select_with_shared_boxes_pad_to_max_output_size(self): + boxes = np.array([[[0, 0, 1, 1]], + [[0, 0.1, 1, 1.1]], + [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], + [[0, 10.1, 1, 11.1]], + [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], + [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_size_per_class = 4 + max_output_size = 5 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores): + nms, num_valid_nms_boxes = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size=max_output_size, + pad_to_max_output_size=True) + return [nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), num_valid_nms_boxes] + + [nms_corners_output, nms_scores_output, nms_classes_output, + num_valid_nms_boxes] = self.execute(graph_fn, [boxes, scores]) + + self.assertEqual(num_valid_nms_boxes, 4) + self.assertAllClose(nms_corners_output[0:num_valid_nms_boxes], + exp_nms_corners) + self.assertAllClose(nms_scores_output[0:num_valid_nms_boxes], + exp_nms_scores) + self.assertAllClose(nms_classes_output[0:num_valid_nms_boxes], + exp_nms_classes) + + def test_multiclass_nms_select_with_shared_boxes_given_keypoints(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + num_keypoints = 6 + keypoints = np.tile(np.reshape(range(8), [8, 1, 1]), + [1, num_keypoints, 2]).astype(np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + def graph_fn(boxes, scores, keypoints): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + additional_fields={fields.BoxListFields.keypoints: keypoints}) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(fields.BoxListFields.keypoints), nms_valid + ] + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + exp_nms_keypoints = np.tile( + np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]), + [1, num_keypoints, 2]) + (nms_corners_output, nms_scores_output, nms_classes_output, nms_keypoints, + nms_valid) = self.execute(graph_fn, [boxes, scores, keypoints]) + + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_keypoints[:nms_valid], exp_nms_keypoints) + + def test_multiclass_nms_with_shared_boxes_given_keypoint_heatmaps(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + + num_boxes = boxes.shape[0] + heatmap_height = 5 + heatmap_width = 5 + num_keypoints = 17 + keypoint_heatmaps = np.ones( + [num_boxes, heatmap_height, heatmap_width, num_keypoints], + dtype=np.float32) + + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + exp_nms_keypoint_heatmaps = np.ones( + (4, heatmap_height, heatmap_width, num_keypoints), dtype=np.float32) + + def graph_fn(boxes, scores, keypoint_heatmaps): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + additional_fields={ + fields.BoxListFields.keypoint_heatmaps: keypoint_heatmaps + }) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(fields.BoxListFields.keypoint_heatmaps), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_keypoint_heatmaps, + nms_valid) = self.execute(graph_fn, [boxes, scores, keypoint_heatmaps]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_keypoint_heatmaps[:nms_valid], + exp_nms_keypoint_heatmaps) + + def test_multiclass_nms_with_additional_fields(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + + coarse_boxes_key = 'coarse_boxes' + coarse_boxes = np.array( + [[0.1, 0.1, 1.1, 1.1], [0.1, 0.2, 1.1, 1.2], [0.1, -0.2, 1.1, 1.0], + [0.1, 10.1, 1.1, 11.1], [0.1, 10.2, 1.1, 11.2], [ + 0.1, 100.1, 1.1, 101.1 + ], [0.1, 1000.1, 1.1, 1002.1], [0.1, 1000.1, 1.1, 1002.2]], np.float32) + + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]], dtype=np.float32) + + exp_nms_coarse_corners = np.array([[0.1, 10.1, 1.1, 11.1], + [0.1, 0.1, 1.1, 1.1], + [0.1, 1000.1, 1.1, 1002.1], + [0.1, 100.1, 1.1, 101.1]], + dtype=np.float32) + + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores, coarse_boxes): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + additional_fields={coarse_boxes_key: coarse_boxes}) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(coarse_boxes_key), + nms_valid, + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_coarse_corners, + nms_valid) = self.execute(graph_fn, [boxes, scores, coarse_boxes]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_coarse_corners[:nms_valid], exp_nms_coarse_corners) + + def test_multiclass_nms_select_with_shared_boxes_given_masks(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + num_classes = 2 + mask_height = 3 + mask_width = 3 + masks = np.tile( + np.reshape(range(8), [8, 1, 1, 1]), + [1, num_classes, mask_height, mask_width]) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + exp_nms_masks_tensor = np.tile( + np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]), + [1, mask_height, mask_width]) + + def graph_fn(boxes, scores, masks): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + masks=masks, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(fields.BoxListFields.masks), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, nms_masks, + nms_valid) = self.execute(graph_fn, [boxes, scores, masks]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_masks[:nms_valid], exp_nms_masks_tensor) + + def test_multiclass_nms_select_with_clip_window(self): + boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32) + scores = np.array([[.9], [.75]], np.float32) + clip_window = np.array([5, 4, 8, 7], np.float32) + score_thresh = 0.0 + iou_thresh = 0.5 + max_output_size = 100 + + exp_nms_corners = [[5, 4, 8, 7]] + exp_nms_scores = [.9] + exp_nms_classes = [0] + + def graph_fn(boxes, scores, clip_window): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + clip_window=clip_window) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_select_with_clip_window_change_coordinate_frame(self): + boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32) + scores = np.array([[.9], [.75]], np.float32) + clip_window = np.array([5, 4, 8, 7], np.float32) + score_thresh = 0.0 + iou_thresh = 0.5 + max_output_size = 100 + + exp_nms_corners = [[0, 0, 1, 1]] + exp_nms_scores = [.9] + exp_nms_classes = [0] + + def graph_fn(boxes, scores, clip_window): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + clip_window=clip_window, + pad_to_max_output_size=True, + change_coordinate_frame=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_select_with_per_class_cap(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_size_per_class = 2 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002]] + exp_nms_scores = [.95, .9, .85] + exp_nms_classes = [0, 0, 1] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms_valid + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_select_with_total_cap(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_size_per_class = 4 + max_total_size = 2 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1]] + exp_nms_scores = [.95, .9] + exp_nms_classes = [0, 0] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms_valid + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_threshold_then_select_with_shared_boxes(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9], [.75], [.6], [.95], [.5], [.3], [.01], [.01]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 3 + + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 100, 1, 101]] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True) + return nms.get(), nms_valid + + nms_output, nms_valid = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_output[:nms_valid], exp_nms) + + def test_multiclass_nms_select_with_separate_boxes(self): + boxes = np.array( + [[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [ + 0, 10, 1, 11 + ]], [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 999, 2, 1004], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms_valid + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_soft_nms_select_with_shared_boxes_cpu_only(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = 1.0 + max_output_size = 4 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 0.1, 1, 1.1]] + exp_nms_scores = [.95, .9, .85, .384] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores): + nms, _ = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size, + soft_nms_sigma=0.5) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes) + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose( + nms_corners_output, exp_nms_corners, rtol=1e-2, atol=1e-2) + self.assertAllClose(nms_scores_output, exp_nms_scores, rtol=1e-2, atol=1e-2) + self.assertAllClose( + nms_classes_output, exp_nms_classes, rtol=1e-2, atol=1e-2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/post_processing.py b/models/research/object_detection/core/post_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..e425cd08a2f11b4ebcc999b01ac4025101a93a8c --- /dev/null +++ b/models/research/object_detection/core/post_processing.py @@ -0,0 +1,1226 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Post-processing operations on detected boxes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import standard_fields as fields +from object_detection.utils import shape_utils + +_NMS_TILE_SIZE = 512 + + +def batch_iou(boxes1, boxes2): + """Calculates the overlap between proposal and ground truth boxes. + + Some `boxes2` may have been padded. The returned `iou` tensor for these + boxes will be -1. + + Args: + boxes1: a tensor with a shape of [batch_size, N, 4]. N is the number of + proposals before groundtruth assignment. The last dimension is the pixel + coordinates in [ymin, xmin, ymax, xmax] form. + boxes2: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This + tensor might have paddings with a negative value. + + Returns: + iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES]. + """ + with tf.name_scope('BatchIOU'): + y1_min, x1_min, y1_max, x1_max = tf.split( + value=boxes1, num_or_size_splits=4, axis=2) + y2_min, x2_min, y2_max, x2_max = tf.split( + value=boxes2, num_or_size_splits=4, axis=2) + + # Calculates the intersection area. + intersection_xmin = tf.maximum(x1_min, tf.transpose(x2_min, [0, 2, 1])) + intersection_xmax = tf.minimum(x1_max, tf.transpose(x2_max, [0, 2, 1])) + intersection_ymin = tf.maximum(y1_min, tf.transpose(y2_min, [0, 2, 1])) + intersection_ymax = tf.minimum(y1_max, tf.transpose(y2_max, [0, 2, 1])) + intersection_area = tf.maximum( + (intersection_xmax - intersection_xmin), 0) * tf.maximum( + (intersection_ymax - intersection_ymin), 0) + + # Calculates the union area. + area1 = (y1_max - y1_min) * (x1_max - x1_min) + area2 = (y2_max - y2_min) * (x2_max - x2_min) + # Adds a small epsilon to avoid divide-by-zero. + union_area = area1 + tf.transpose(area2, + [0, 2, 1]) - intersection_area + 1e-8 + + # Calculates IoU. + iou = intersection_area / union_area + + # Fills -1 for padded ground truth boxes. + padding_mask = tf.logical_and( + tf.less(intersection_xmax, 0), tf.less(intersection_ymax, 0)) + iou = tf.where(padding_mask, -tf.ones_like(iou), iou) + + return iou + + +def _self_suppression(iou, iou_threshold, loop_condition, iou_sum): + """Bounding-boxes self-suppression loop body. + + Args: + iou: A float Tensor with shape [1, num_boxes, max_num_instance]: IOUs. + iou_threshold: A scalar, representing IOU threshold. + loop_condition: The loop condition returned from last iteration. + iou_sum: iou_sum_new returned from last iteration. + + Returns: + iou_suppressed: A float Tensor with shape [1, num_boxes, max_num_instance], + IOU after suppression. + iou_threshold: A scalar, representing IOU threshold. + loop_condition: Bool Tensor of shape [], the loop condition. + iou_sum_new: The new IOU sum. + """ + del loop_condition + can_suppress_others = tf.cast( + tf.reshape(tf.reduce_max(iou, 1) <= iou_threshold, [1, -1, 1]), iou.dtype) + iou_suppressed = tf.reshape( + tf.cast( + tf.reduce_max(can_suppress_others * iou, 1) <= iou_threshold, + iou.dtype), [1, -1, 1]) * iou + iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2]) + return [ + iou_suppressed, iou_threshold, + tf.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new + ] + + +def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx): + """Bounding-boxes cross-suppression loop body. + + Args: + boxes: A float Tensor of shape [1, anchors, 4], representing boxes. + box_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile + returned from last iteration + iou_threshold: A scalar, representing IOU threshold. + inner_idx: A scalar, representing inner index. + + Returns: + boxes: A float Tensor of shape [1, anchors, 4], representing boxes. + ret_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile + after suppression + iou_threshold: A scalar, representing IOU threshold. + inner_idx: A scalar, inner index incremented. + """ + new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0], + [1, _NMS_TILE_SIZE, 4]) + iou = batch_iou(new_slice, box_slice) + ret_slice = tf.expand_dims( + tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), + 2) * box_slice + return boxes, ret_slice, iou_threshold, inner_idx + 1 + + +def _suppression_loop_body(boxes, iou_threshold, output_size, idx): + """Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE). + + Args: + boxes: a tensor with a shape of [1, anchors, 4]. + iou_threshold: a float representing the threshold for deciding whether boxes + overlap too much with respect to IOU. + output_size: an int32 tensor of size [1]. Representing the number of + selected boxes. + idx: an integer scalar representing induction variable. + + Returns: + boxes: updated boxes. + iou_threshold: pass down iou_threshold to the next iteration. + output_size: the updated output_size. + idx: the updated induction variable. + """ + num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE + + # Iterates over tiles that can possibly suppress the current tile. + box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0], + [1, _NMS_TILE_SIZE, 4]) + _, box_slice, _, _ = tf.while_loop( + lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, + _cross_suppression, [boxes, box_slice, iou_threshold, + tf.constant(0)]) + + # Iterates over the current tile to compute self-suppression. + iou = batch_iou(box_slice, box_slice) + mask = tf.expand_dims( + tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape( + tf.range(_NMS_TILE_SIZE), [-1, 1]), 0) + iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype) + suppressed_iou, _, _, _ = tf.while_loop( + lambda _iou, _threshold, loop_condition, _iou_sum: loop_condition, + _self_suppression, + [iou, iou_threshold, + tf.constant(True), + tf.reduce_sum(iou, [1, 2])]) + suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0 + box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2) + + # Uses box_slice to update the input boxes. + mask = tf.reshape( + tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) + boxes = tf.tile(tf.expand_dims(box_slice, [1]), + [1, num_tiles, 1, 1]) * mask + tf.reshape( + boxes, [1, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask) + boxes = tf.reshape(boxes, [1, -1, 4]) + + # Updates output_size. + output_size += tf.reduce_sum( + tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1]) + return boxes, iou_threshold, output_size, idx + 1 + + +def partitioned_non_max_suppression_padded(boxes, + scores, + max_output_size, + iou_threshold=0.5, + score_threshold=float('-inf')): + """A tiled version of [`tf.image.non_max_suppression_padded`](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression_padded). + + The overall design of the algorithm is to handle boxes tile-by-tile: + + boxes = boxes.pad_to_multiple_of(tile_size) + num_tiles = len(boxes) // tile_size + output_boxes = [] + for i in range(num_tiles): + box_tile = boxes[i*tile_size : (i+1)*tile_size] + for j in range(i - 1): + suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] + iou = batch_iou(box_tile, suppressing_tile) + # if the box is suppressed in iou, clear it to a dot + box_tile *= _update_boxes(iou) + # Iteratively handle the diagonal tile. + iou = _box_overlap(box_tile, box_tile) + iou_changed = True + while iou_changed: + # boxes that are not suppressed by anything else + suppressing_boxes = _get_suppressing_boxes(iou) + # boxes that are suppressed by suppressing_boxes + suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) + # clear iou to 0 for boxes that are suppressed, as they cannot be used + # to suppress other boxes any more + new_iou = _clear_iou(iou, suppressed_boxes) + iou_changed = (new_iou != iou) + iou = new_iou + # remaining boxes that can still suppress others, are selected boxes. + output_boxes.append(_get_suppressing_boxes(iou)) + if len(output_boxes) >= max_output_size: + break + + Args: + boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. + scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes). + max_output_size: a scalar integer `Tensor` representing the maximum number + of boxes to be selected by non max suppression. + iou_threshold: a float representing the threshold for deciding whether boxes + overlap too much with respect to IOU. + score_threshold: A float representing the threshold for deciding when to + remove boxes based on score. + + Returns: + selected_indices: a tensor of shape [anchors]. + num_valid_boxes: a scalar int tensor. + nms_proposals: a tensor with a shape of [anchors, 4]. It has + same dtype as input boxes. + nms_scores: a tensor with a shape of [anchors]. It has same + dtype as input scores. + argsort_ids: a tensor of shape [anchors], mapping from input order of boxes + to output order of boxes. + """ + num_boxes = tf.shape(boxes)[0] + pad = tf.cast( + tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE), + tf.int32) * _NMS_TILE_SIZE - num_boxes + + scores, argsort_ids = tf.nn.top_k(scores, k=num_boxes, sorted=True) + boxes = tf.gather(boxes, argsort_ids) + num_boxes = tf.shape(boxes)[0] + num_boxes += pad + boxes = tf.pad( + tf.cast(boxes, tf.float32), [[0, pad], [0, 0]], constant_values=-1) + scores = tf.pad(tf.cast(scores, tf.float32), [[0, pad]]) + + # mask boxes to -1 by score threshold + scores_mask = tf.expand_dims( + tf.cast(scores > score_threshold, boxes.dtype), axis=1) + boxes = ((boxes + 1.) * scores_mask) - 1. + + boxes = tf.expand_dims(boxes, axis=0) + scores = tf.expand_dims(scores, axis=0) + + def _loop_cond(unused_boxes, unused_threshold, output_size, idx): + return tf.logical_and( + tf.reduce_min(output_size) < max_output_size, + idx < num_boxes // _NMS_TILE_SIZE) + + selected_boxes, _, output_size, _ = tf.while_loop( + _loop_cond, _suppression_loop_body, + [boxes, iou_threshold, + tf.zeros([1], tf.int32), + tf.constant(0)]) + idx = num_boxes - tf.cast( + tf.nn.top_k( + tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) * + tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0], + tf.int32) + idx = tf.minimum(idx, num_boxes - 1 - pad) + idx = tf.reshape(idx + tf.reshape(tf.range(1) * num_boxes, [-1, 1]), [-1]) + num_valid_boxes = tf.reduce_sum(output_size) + return (idx, num_valid_boxes, tf.reshape(boxes, [-1, 4]), + tf.reshape(scores, [-1]), argsort_ids) + + +def _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, + change_coordinate_frame, clip_window): + """Validates boxes, scores and iou_thresh. + + This function validates the boxes, scores, iou_thresh + and if change_coordinate_frame is True, clip_window must be specified. + + Args: + boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either + number of classes or 1 depending on whether a separate box is predicted + per class. + scores: A [k, num_classes] float32 tensor containing the scores for each of + the k detections. The scores have to be non-negative when + pad_to_max_output_size is True. + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided) + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not + have a valid scores field. + """ + if not 0 <= iou_thresh <= 1.0: + raise ValueError('iou_thresh must be between 0 and 1') + if scores.shape.ndims != 2: + raise ValueError('scores field must be of rank 2') + if shape_utils.get_dim_as_int(scores.shape[1]) is None: + raise ValueError('scores must have statically defined second ' 'dimension') + if boxes.shape.ndims != 3: + raise ValueError('boxes must be of rank 3.') + if not (shape_utils.get_dim_as_int( + boxes.shape[1]) == shape_utils.get_dim_as_int(scores.shape[1]) or + shape_utils.get_dim_as_int(boxes.shape[1]) == 1): + raise ValueError('second dimension of boxes must be either 1 or equal ' + 'to the second dimension of scores') + if shape_utils.get_dim_as_int(boxes.shape[2]) != 4: + raise ValueError('last dimension of boxes must be of size 4.') + if change_coordinate_frame and clip_window is None: + raise ValueError('if change_coordinate_frame is True, then a clip_window' + 'must be specified.') + + +def _clip_window_prune_boxes(sorted_boxes, clip_window, pad_to_max_output_size, + change_coordinate_frame): + """Prune boxes with zero area. + + Args: + sorted_boxes: A BoxList containing k detections. + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + pad_to_max_output_size: flag indicating whether to pad to max output size or + not. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided). + + Returns: + sorted_boxes: A BoxList containing k detections after pruning. + num_valid_nms_boxes_cumulative: Number of valid NMS boxes + """ + sorted_boxes = box_list_ops.clip_to_window( + sorted_boxes, + clip_window, + filter_nonoverlapping=not pad_to_max_output_size) + # Set the scores of boxes with zero area to -1 to keep the default + # behaviour of pruning out zero area boxes. + sorted_boxes_size = tf.shape(sorted_boxes.get())[0] + non_zero_box_area = tf.cast(box_list_ops.area(sorted_boxes), tf.bool) + sorted_boxes_scores = tf.where( + non_zero_box_area, sorted_boxes.get_field(fields.BoxListFields.scores), + -1 * tf.ones(sorted_boxes_size)) + sorted_boxes.add_field(fields.BoxListFields.scores, sorted_boxes_scores) + num_valid_nms_boxes_cumulative = tf.reduce_sum( + tf.cast(tf.greater_equal(sorted_boxes_scores, 0), tf.int32)) + sorted_boxes = box_list_ops.sort_by_field(sorted_boxes, + fields.BoxListFields.scores) + if change_coordinate_frame: + sorted_boxes = box_list_ops.change_coordinate_frame(sorted_boxes, + clip_window) + return sorted_boxes, num_valid_nms_boxes_cumulative + + +def multiclass_non_max_suppression(boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size=0, + clip_window=None, + change_coordinate_frame=False, + masks=None, + boundaries=None, + pad_to_max_output_size=False, + use_partitioned_nms=False, + additional_fields=None, + soft_nms_sigma=0.0, + use_hard_nms=False, + scope=None): + """Multi-class version of non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. It operates independently for each class for + which scores are provided (via the scores field of the input box_list), + pruning boxes with score less than a provided threshold prior to + applying NMS. + + Please note that this operation is performed on *all* classes, therefore any + background classes should be removed prior to calling this function. + + Selected boxes are guaranteed to be sorted in decreasing order by score (but + the sort is not guaranteed to be stable). + + Args: + boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either + number of classes or 1 depending on whether a separate box is predicted + per class. + scores: A [k, num_classes] float32 tensor containing the scores for each of + the k detections. The scores have to be non-negative when + pad_to_max_output_size is True. + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + max_size_per_class: maximum number of retained boxes per class. + max_total_size: maximum number of boxes retained over all classes. By + default returns all boxes retained after capping boxes per class. + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window + is provided) + masks: (optional) a [k, q, mask_height, mask_width] float32 tensor + containing box masks. `q` can be either number of classes or 1 depending + on whether a separate mask is predicted per class. + boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 + tensor containing box boundaries. `q` can be either number of classes or 1 + depending on whether a separate boundary is predicted per class. + pad_to_max_output_size: If true, the output nmsed boxes are padded to be of + length `max_size_per_class`. Defaults to false. + use_partitioned_nms: If true, use partitioned version of + non_max_suppression. + additional_fields: (optional) If not None, a dictionary that maps keys to + tensors whose first dimensions are all of size `k`. After non-maximum + suppression, all tensors corresponding to the selected boxes will be + added to resulting BoxList. + soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; + See Bodla et al, https://arxiv.org/abs/1704.04503). When + `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) + NMS. Soft NMS is currently only supported when pad_to_max_output_size is + False. + use_hard_nms: Enforce the usage of hard NMS. + scope: name scope. + + Returns: + A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a + BoxList holds M boxes with a rank-1 scores field representing + corresponding scores for each box with scores sorted in decreasing order + and a rank-1 classes field representing a class label for each box. The + num_valid_nms_boxes is a 0-D integer tensor representing the number of + valid elements in `BoxList`, with the valid elements appearing first. + + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have + a valid scores field. + ValueError: if Soft NMS (tf.image.non_max_suppression_with_scores) is not + supported in the current TF version and `soft_nms_sigma` is nonzero. + """ + _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, + change_coordinate_frame, clip_window) + if pad_to_max_output_size and soft_nms_sigma != 0.0: + raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not ' + 'supported when pad_to_max_output_size is True.') + + with tf.name_scope(scope, 'MultiClassNonMaxSuppression'): + num_scores = tf.shape(scores)[0] + num_classes = shape_utils.get_dim_as_int(scores.get_shape()[1]) + + selected_boxes_list = [] + num_valid_nms_boxes_cumulative = tf.constant(0) + per_class_boxes_list = tf.unstack(boxes, axis=1) + if masks is not None: + per_class_masks_list = tf.unstack(masks, axis=1) + if boundaries is not None: + per_class_boundaries_list = tf.unstack(boundaries, axis=1) + boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1 + else [0] * num_classes) + for class_idx, boxes_idx in zip(range(num_classes), boxes_ids): + per_class_boxes = per_class_boxes_list[boxes_idx] + boxlist_and_class_scores = box_list.BoxList(per_class_boxes) + class_scores = tf.reshape( + tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1]) + + boxlist_and_class_scores.add_field(fields.BoxListFields.scores, + class_scores) + if masks is not None: + per_class_masks = per_class_masks_list[boxes_idx] + boxlist_and_class_scores.add_field(fields.BoxListFields.masks, + per_class_masks) + if boundaries is not None: + per_class_boundaries = per_class_boundaries_list[boxes_idx] + boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, + per_class_boundaries) + if additional_fields is not None: + for key, tensor in additional_fields.items(): + boxlist_and_class_scores.add_field(key, tensor) + + nms_result = None + selected_scores = None + if pad_to_max_output_size: + max_selection_size = max_size_per_class + if use_partitioned_nms: + (selected_indices, num_valid_nms_boxes, + boxlist_and_class_scores.data['boxes'], + boxlist_and_class_scores.data['scores'], + _) = partitioned_non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + else: + selected_indices, num_valid_nms_boxes = ( + tf.image.non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field( + fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + pad_to_max_output_size=True)) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + else: + max_selection_size = tf.minimum(max_size_per_class, + boxlist_and_class_scores.num_boxes()) + if (hasattr(tf.image, 'non_max_suppression_with_scores') and + tf.compat.forward_compatible(2019, 6, 6) and not use_hard_nms): + (selected_indices, selected_scores + ) = tf.image.non_max_suppression_with_scores( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + soft_nms_sigma=soft_nms_sigma) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat( + [selected_indices, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) + selected_scores = tf.concat( + [selected_scores, + tf.zeros(max_selection_size-num_valid_nms_boxes, + tf.float32)], -1) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + else: + if soft_nms_sigma != 0: + raise ValueError('Soft NMS not supported in current TF version!') + selected_indices = tf.image.non_max_suppression( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat( + [selected_indices, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + # Make the scores -1 for invalid boxes. + valid_nms_boxes_indices = tf.less( + tf.range(max_selection_size), num_valid_nms_boxes) + + nms_result.add_field( + fields.BoxListFields.scores, + tf.where(valid_nms_boxes_indices, + selected_scores, -1*tf.ones(max_selection_size))) + num_valid_nms_boxes_cumulative += num_valid_nms_boxes + + nms_result.add_field( + fields.BoxListFields.classes, (tf.zeros_like( + nms_result.get_field(fields.BoxListFields.scores)) + class_idx)) + selected_boxes_list.append(nms_result) + selected_boxes = box_list_ops.concatenate(selected_boxes_list) + sorted_boxes = box_list_ops.sort_by_field(selected_boxes, + fields.BoxListFields.scores) + if clip_window is not None: + # When pad_to_max_output_size is False, it prunes the boxes with zero + # area. + sorted_boxes, num_valid_nms_boxes_cumulative = _clip_window_prune_boxes( + sorted_boxes, clip_window, pad_to_max_output_size, + change_coordinate_frame) + + if max_total_size: + max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) + sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) + num_valid_nms_boxes_cumulative = tf.where( + max_total_size > num_valid_nms_boxes_cumulative, + num_valid_nms_boxes_cumulative, max_total_size) + # Select only the valid boxes if pad_to_max_output_size is False. + if not pad_to_max_output_size: + sorted_boxes = box_list_ops.gather( + sorted_boxes, tf.range(num_valid_nms_boxes_cumulative)) + + return sorted_boxes, num_valid_nms_boxes_cumulative + + +def class_agnostic_non_max_suppression(boxes, + scores, + score_thresh, + iou_thresh, + max_classes_per_detection=1, + max_total_size=0, + clip_window=None, + change_coordinate_frame=False, + masks=None, + boundaries=None, + pad_to_max_output_size=False, + use_partitioned_nms=False, + additional_fields=None, + soft_nms_sigma=0.0, + scope=None): + """Class-agnostic version of non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. It operates on all the boxes using + max scores across all classes for which scores are provided (via the scores + field of the input box_list), pruning boxes with score less than a provided + threshold prior to applying NMS. + + Please note that this operation is performed in a class-agnostic way, + therefore any background classes should be removed prior to calling this + function. + + Selected boxes are guaranteed to be sorted in decreasing order by score (but + the sort is not guaranteed to be stable). + + Args: + boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either + number of classes or 1 depending on whether a separate box is predicted + per class. + scores: A [k, num_classes] float32 tensor containing the scores for each of + the k detections. The scores have to be non-negative when + pad_to_max_output_size is True. + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + max_classes_per_detection: maximum number of retained classes per detection + box in class-agnostic NMS. + max_total_size: maximum number of boxes retained over all classes. By + default returns all boxes retained after capping boxes per class. + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided) + masks: (optional) a [k, q, mask_height, mask_width] float32 tensor + containing box masks. `q` can be either number of classes or 1 depending + on whether a separate mask is predicted per class. + boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 + tensor containing box boundaries. `q` can be either number of classes or 1 + depending on whether a separate boundary is predicted per class. + pad_to_max_output_size: If true, the output nmsed boxes are padded to be of + length `max_size_per_class`. Defaults to false. + use_partitioned_nms: If true, use partitioned version of + non_max_suppression. + additional_fields: (optional) If not None, a dictionary that maps keys to + tensors whose first dimensions are all of size `k`. After non-maximum + suppression, all tensors corresponding to the selected boxes will be added + to resulting BoxList. + soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; + See Bodla et al, https://arxiv.org/abs/1704.04503). When + `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) + NMS. Soft NMS is currently only supported when pad_to_max_output_size is + False. + scope: name scope. + + Returns: + A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a + BoxList holds M boxes with a rank-1 scores field representing + corresponding scores for each box with scores sorted in decreasing order + and a rank-1 classes field representing a class label for each box. The + num_valid_nms_boxes is a 0-D integer tensor representing the number of + valid elements in `BoxList`, with the valid elements appearing first. + + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have + a valid scores field or if non-zero soft_nms_sigma is provided when + pad_to_max_output_size is True. + """ + _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, + change_coordinate_frame, clip_window) + if pad_to_max_output_size and soft_nms_sigma != 0.0: + raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not ' + 'supported when pad_to_max_output_size is True.') + + if max_classes_per_detection > 1: + raise ValueError('Max classes per detection box >1 not supported.') + q = shape_utils.get_dim_as_int(boxes.shape[1]) + if q > 1: + class_ids = tf.expand_dims( + tf.argmax(scores, axis=1, output_type=tf.int32), axis=1) + boxes = tf.batch_gather(boxes, class_ids) + if masks is not None: + masks = tf.batch_gather(masks, class_ids) + if boundaries is not None: + boundaries = tf.batch_gather(boundaries, class_ids) + boxes = tf.squeeze(boxes, axis=[1]) + if masks is not None: + masks = tf.squeeze(masks, axis=[1]) + if boundaries is not None: + boundaries = tf.squeeze(boundaries, axis=[1]) + + with tf.name_scope(scope, 'ClassAgnosticNonMaxSuppression'): + boxlist_and_class_scores = box_list.BoxList(boxes) + max_scores = tf.reduce_max(scores, axis=-1) + classes_with_max_scores = tf.argmax(scores, axis=-1) + boxlist_and_class_scores.add_field(fields.BoxListFields.scores, max_scores) + if masks is not None: + boxlist_and_class_scores.add_field(fields.BoxListFields.masks, masks) + if boundaries is not None: + boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, + boundaries) + + if additional_fields is not None: + for key, tensor in additional_fields.items(): + boxlist_and_class_scores.add_field(key, tensor) + + nms_result = None + selected_scores = None + if pad_to_max_output_size: + max_selection_size = max_total_size + if use_partitioned_nms: + (selected_indices, num_valid_nms_boxes, + boxlist_and_class_scores.data['boxes'], + boxlist_and_class_scores.data['scores'], + argsort_ids) = partitioned_non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + classes_with_max_scores = tf.gather(classes_with_max_scores, + argsort_ids) + else: + selected_indices, num_valid_nms_boxes = ( + tf.image.non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + pad_to_max_output_size=True)) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + else: + max_selection_size = tf.minimum(max_total_size, + boxlist_and_class_scores.num_boxes()) + if (hasattr(tf.image, 'non_max_suppression_with_scores') and + tf.compat.forward_compatible(2019, 6, 6)): + (selected_indices, selected_scores + ) = tf.image.non_max_suppression_with_scores( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + soft_nms_sigma=soft_nms_sigma) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat([ + selected_indices, + tf.zeros(max_selection_size - num_valid_nms_boxes, tf.int32) + ], 0) + selected_scores = tf.concat( + [selected_scores, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.float32)], -1) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + else: + if soft_nms_sigma != 0: + raise ValueError('Soft NMS not supported in current TF version!') + selected_indices = tf.image.non_max_suppression( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat( + [selected_indices, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + valid_nms_boxes_indices = tf.less( + tf.range(max_selection_size), num_valid_nms_boxes) + nms_result.add_field( + fields.BoxListFields.scores, + tf.where(valid_nms_boxes_indices, + selected_scores, -1*tf.ones(max_selection_size))) + + selected_classes = tf.gather(classes_with_max_scores, selected_indices) + selected_classes = tf.cast(selected_classes, tf.float32) + nms_result.add_field(fields.BoxListFields.classes, selected_classes) + selected_boxes = nms_result + sorted_boxes = box_list_ops.sort_by_field(selected_boxes, + fields.BoxListFields.scores) + + if clip_window is not None: + # When pad_to_max_output_size is False, it prunes the boxes with zero + # area. + sorted_boxes, num_valid_nms_boxes = _clip_window_prune_boxes( + sorted_boxes, clip_window, pad_to_max_output_size, + change_coordinate_frame) + + if max_total_size: + max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) + sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) + num_valid_nms_boxes = tf.where(max_total_size > num_valid_nms_boxes, + num_valid_nms_boxes, max_total_size) + # Select only the valid boxes if pad_to_max_output_size is False. + if not pad_to_max_output_size: + sorted_boxes = box_list_ops.gather(sorted_boxes, + tf.range(num_valid_nms_boxes)) + + return sorted_boxes, num_valid_nms_boxes + + +def batch_multiclass_non_max_suppression(boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size=0, + clip_window=None, + change_coordinate_frame=False, + num_valid_boxes=None, + masks=None, + additional_fields=None, + soft_nms_sigma=0.0, + scope=None, + use_static_shapes=False, + use_partitioned_nms=False, + parallel_iterations=32, + use_class_agnostic_nms=False, + max_classes_per_detection=1, + use_dynamic_map_fn=False, + use_combined_nms=False, + use_hard_nms=False): + """Multi-class version of non maximum suppression that operates on a batch. + + This op is similar to `multiclass_non_max_suppression` but operates on a batch + of boxes and scores. See documentation for `multiclass_non_max_suppression` + for details. + + Args: + boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing + detections. If `q` is 1 then same boxes are used for all classes + otherwise, if `q` is equal to number of classes, class-specific boxes are + used. + scores: A [batch_size, num_anchors, num_classes] float32 tensor containing + the scores for each of the `num_anchors` detections. The scores have to be + non-negative when use_static_shapes is set True. + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + max_size_per_class: maximum number of retained boxes per class. + max_total_size: maximum number of boxes retained over all classes. By + default returns all boxes retained after capping boxes per class. + clip_window: A float32 tensor of shape [batch_size, 4] where each entry is + of the form [y_min, x_min, y_max, x_max] representing the window to clip + boxes to before performing non-max suppression. This argument can also be + a tensor of shape [4] in which case, the same clip window is applied to + all images in the batch. If clip_widow is None, all boxes are used to + perform non-max suppression. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided) + num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape + [batch_size] representing the number of valid boxes to be considered for + each image in the batch. This parameter allows for ignoring zero + paddings. + masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width] + float32 tensor containing box masks. `q` can be either number of classes + or 1 depending on whether a separate mask is predicted per class. + additional_fields: (optional) If not None, a dictionary that maps keys to + tensors whose dimensions are [batch_size, num_anchors, ...]. + soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; + See Bodla et al, https://arxiv.org/abs/1704.04503). When + `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) + NMS. Soft NMS is currently only supported when pad_to_max_output_size is + False. + scope: tf scope name. + use_static_shapes: If true, the output nmsed boxes are padded to be of + length `max_size_per_class` and it doesn't clip boxes to max_total_size. + Defaults to false. + use_partitioned_nms: If true, use partitioned version of + non_max_suppression. + parallel_iterations: (optional) number of batch items to process in + parallel. + use_class_agnostic_nms: If true, this uses class-agnostic non max + suppression + max_classes_per_detection: Maximum number of retained classes per detection + box in class-agnostic NMS. + use_dynamic_map_fn: If true, images in the batch will be processed within a + dynamic loop. Otherwise, a static loop will be used if possible. + use_combined_nms: If true, it uses tf.image.combined_non_max_suppression ( + multi-class version of NMS that operates on a batch). + It greedily selects a subset of detection bounding boxes, pruning away + boxes that have high IOU (intersection over union) overlap (> thresh) with + already selected boxes. It operates independently for each batch. + Within each batch, it operates independently for each class for which + scores are provided (via the scores field of the input box_list), + pruning boxes with score less than a provided threshold prior to applying + NMS. This operation is performed on *all* batches and *all* classes + in the batch, therefore any background classes should be removed prior to + calling this function. + Masks and additional fields are not supported. + See argument checks in the code below for unsupported arguments. + use_hard_nms: Enforce the usage of hard NMS. + + Returns: + 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor + containing the non-max suppressed boxes. + 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing + the scores for the boxes. + 'nmsed_classes': A [batch_size, max_detections] float32 tensor + containing the class for boxes. + 'nmsed_masks': (optional) a + [batch_size, max_detections, mask_height, mask_width] float32 tensor + containing masks for each selected box. This is set to None if input + `masks` is None. + 'nmsed_additional_fields': (optional) a dictionary of + [batch_size, max_detections, ...] float32 tensors corresponding to the + tensors specified in the input `additional_fields`. This is not returned + if input `additional_fields` is None. + 'num_detections': A [batch_size] int32 tensor indicating the number of + valid detections per batch item. Only the top num_detections[i] entries in + nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the + entries are zero paddings. + + Raises: + ValueError: if `q` in boxes.shape is not 1 or not equal to number of + classes as inferred from scores.shape. + """ + if use_combined_nms: + if change_coordinate_frame: + raise ValueError( + 'change_coordinate_frame (normalizing coordinates' + ' relative to clip_window) is not supported by combined_nms.') + if num_valid_boxes is not None: + raise ValueError('num_valid_boxes is not supported by combined_nms.') + if masks is not None: + raise ValueError('masks is not supported by combined_nms.') + if soft_nms_sigma != 0.0: + raise ValueError('Soft NMS is not supported by combined_nms.') + if use_class_agnostic_nms: + raise ValueError('class-agnostic NMS is not supported by combined_nms.') + if clip_window is not None: + tf.logging.warning( + 'clip_window is not supported by combined_nms unless it is' + ' [0. 0. 1. 1.] for each image.') + if additional_fields is not None: + tf.logging.warning('additional_fields is not supported by combined_nms.') + if parallel_iterations != 32: + tf.logging.warning('Number of batch items to be processed in parallel is' + ' not configurable by combined_nms.') + if max_classes_per_detection > 1: + tf.logging.warning( + 'max_classes_per_detection is not configurable by combined_nms.') + + with tf.name_scope(scope, 'CombinedNonMaxSuppression'): + (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, + batch_num_detections) = tf.image.combined_non_max_suppression( + boxes=boxes, + scores=scores, + max_output_size_per_class=max_size_per_class, + max_total_size=max_total_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + pad_per_class=use_static_shapes) + # Not supported by combined_non_max_suppression. + batch_nmsed_masks = None + # Not supported by combined_non_max_suppression. + batch_nmsed_additional_fields = None + return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, + batch_nmsed_masks, batch_nmsed_additional_fields, + batch_num_detections) + + q = shape_utils.get_dim_as_int(boxes.shape[2]) + num_classes = shape_utils.get_dim_as_int(scores.shape[2]) + if q != 1 and q != num_classes: + raise ValueError('third dimension of boxes must be either 1 or equal ' + 'to the third dimension of scores.') + if change_coordinate_frame and clip_window is None: + raise ValueError('if change_coordinate_frame is True, then a clip_window' + 'must be specified.') + original_masks = masks + + # Create ordered dictionary using the sorted keys from + # additional fields to ensure getting the same key value assignment + # in _single_image_nms_fn(). The dictionary is thus a sorted version of + # additional_fields. + if additional_fields is None: + ordered_additional_fields = collections.OrderedDict() + else: + ordered_additional_fields = collections.OrderedDict( + sorted(additional_fields.items(), key=lambda item: item[0])) + + with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'): + boxes_shape = boxes.shape + batch_size = shape_utils.get_dim_as_int(boxes_shape[0]) + num_anchors = shape_utils.get_dim_as_int(boxes_shape[1]) + + if batch_size is None: + batch_size = tf.shape(boxes)[0] + if num_anchors is None: + num_anchors = tf.shape(boxes)[1] + + # If num valid boxes aren't provided, create one and mark all boxes as + # valid. + if num_valid_boxes is None: + num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors + + # If masks aren't provided, create dummy masks so we can only have one copy + # of _single_image_nms_fn and discard the dummy masks after map_fn. + if masks is None: + masks_shape = tf.stack([batch_size, num_anchors, q, 1, 1]) + masks = tf.zeros(masks_shape) + + if clip_window is None: + clip_window = tf.stack([ + tf.reduce_min(boxes[:, :, :, 0]), + tf.reduce_min(boxes[:, :, :, 1]), + tf.reduce_max(boxes[:, :, :, 2]), + tf.reduce_max(boxes[:, :, :, 3]) + ]) + if clip_window.shape.ndims == 1: + clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1]) + + def _single_image_nms_fn(args): + """Runs NMS on a single image and returns padded output. + + Args: + args: A list of tensors consisting of the following: + per_image_boxes - A [num_anchors, q, 4] float32 tensor containing + detections. If `q` is 1 then same boxes are used for all classes + otherwise, if `q` is equal to number of classes, class-specific + boxes are used. + per_image_scores - A [num_anchors, num_classes] float32 tensor + containing the scores for each of the `num_anchors` detections. + per_image_masks - A [num_anchors, q, mask_height, mask_width] float32 + tensor containing box masks. `q` can be either number of classes + or 1 depending on whether a separate mask is predicted per class. + per_image_clip_window - A 1D float32 tensor of the form + [ymin, xmin, ymax, xmax] representing the window to clip the boxes + to. + per_image_additional_fields - (optional) A variable number of float32 + tensors each with size [num_anchors, ...]. + per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of + shape [batch_size] representing the number of valid boxes to be + considered for each image in the batch. This parameter allows for + ignoring zero paddings. + + Returns: + 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the + non-max suppressed boxes. + 'nmsed_scores': A [max_detections] float32 tensor containing the scores + for the boxes. + 'nmsed_classes': A [max_detections] float32 tensor containing the class + for boxes. + 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width] + float32 tensor containing masks for each selected box. This is set to + None if input `masks` is None. + 'nmsed_additional_fields': (optional) A variable number of float32 + tensors each with size [max_detections, ...] corresponding to the + input `per_image_additional_fields`. + 'num_detections': A [batch_size] int32 tensor indicating the number of + valid detections per batch item. Only the top num_detections[i] + entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The + rest of the entries are zero paddings. + """ + per_image_boxes = args[0] + per_image_scores = args[1] + per_image_masks = args[2] + per_image_clip_window = args[3] + # Make sure that the order of elements passed in args is aligned with + # the iteration order of ordered_additional_fields + per_image_additional_fields = { + key: value + for key, value in zip(ordered_additional_fields, args[4:-1]) + } + per_image_num_valid_boxes = args[-1] + if use_static_shapes: + total_proposals = tf.shape(per_image_scores) + per_image_scores = tf.where( + tf.less(tf.range(total_proposals[0]), per_image_num_valid_boxes), + per_image_scores, + tf.fill(total_proposals, np.finfo('float32').min)) + else: + per_image_boxes = tf.reshape( + tf.slice(per_image_boxes, 3 * [0], + tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4]) + per_image_scores = tf.reshape( + tf.slice(per_image_scores, [0, 0], + tf.stack([per_image_num_valid_boxes, -1])), + [-1, num_classes]) + per_image_masks = tf.reshape( + tf.slice(per_image_masks, 4 * [0], + tf.stack([per_image_num_valid_boxes, -1, -1, -1])), + [-1, q, shape_utils.get_dim_as_int(per_image_masks.shape[2]), + shape_utils.get_dim_as_int(per_image_masks.shape[3])]) + if per_image_additional_fields is not None: + for key, tensor in per_image_additional_fields.items(): + additional_field_shape = tensor.get_shape() + additional_field_dim = len(additional_field_shape) + per_image_additional_fields[key] = tf.reshape( + tf.slice( + per_image_additional_fields[key], + additional_field_dim * [0], + tf.stack([per_image_num_valid_boxes] + + (additional_field_dim - 1) * [-1])), [-1] + [ + shape_utils.get_dim_as_int(dim) + for dim in additional_field_shape[1:] + ]) + if use_class_agnostic_nms: + nmsed_boxlist, num_valid_nms_boxes = class_agnostic_non_max_suppression( + per_image_boxes, + per_image_scores, + score_thresh, + iou_thresh, + max_classes_per_detection, + max_total_size, + clip_window=per_image_clip_window, + change_coordinate_frame=change_coordinate_frame, + masks=per_image_masks, + pad_to_max_output_size=use_static_shapes, + use_partitioned_nms=use_partitioned_nms, + additional_fields=per_image_additional_fields, + soft_nms_sigma=soft_nms_sigma) + else: + nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression( + per_image_boxes, + per_image_scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size, + clip_window=per_image_clip_window, + change_coordinate_frame=change_coordinate_frame, + masks=per_image_masks, + pad_to_max_output_size=use_static_shapes, + use_partitioned_nms=use_partitioned_nms, + additional_fields=per_image_additional_fields, + soft_nms_sigma=soft_nms_sigma, + use_hard_nms=use_hard_nms) + + if not use_static_shapes: + nmsed_boxlist = box_list_ops.pad_or_clip_box_list( + nmsed_boxlist, max_total_size) + num_detections = num_valid_nms_boxes + nmsed_boxes = nmsed_boxlist.get() + nmsed_scores = nmsed_boxlist.get_field(fields.BoxListFields.scores) + nmsed_classes = nmsed_boxlist.get_field(fields.BoxListFields.classes) + nmsed_masks = nmsed_boxlist.get_field(fields.BoxListFields.masks) + nmsed_additional_fields = [] + # Sorting is needed here to ensure that the values stored in + # nmsed_additional_fields are always kept in the same order + # across different execution runs. + for key in sorted(per_image_additional_fields.keys()): + nmsed_additional_fields.append(nmsed_boxlist.get_field(key)) + return ([nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] + + nmsed_additional_fields + [num_detections]) + + num_additional_fields = 0 + if ordered_additional_fields: + num_additional_fields = len(ordered_additional_fields) + num_nmsed_outputs = 4 + num_additional_fields + + if use_dynamic_map_fn: + map_fn = tf.map_fn + else: + map_fn = shape_utils.static_or_dynamic_map_fn + + batch_outputs = map_fn( + _single_image_nms_fn, + elems=([boxes, scores, masks, clip_window] + + list(ordered_additional_fields.values()) + [num_valid_boxes]), + dtype=(num_nmsed_outputs * [tf.float32] + [tf.int32]), + parallel_iterations=parallel_iterations) + + batch_nmsed_boxes = batch_outputs[0] + batch_nmsed_scores = batch_outputs[1] + batch_nmsed_classes = batch_outputs[2] + batch_nmsed_masks = batch_outputs[3] + batch_nmsed_values = batch_outputs[4:-1] + + batch_nmsed_additional_fields = {} + if num_additional_fields > 0: + # Sort the keys to ensure arranging elements in same order as + # in _single_image_nms_fn. + batch_nmsed_keys = list(ordered_additional_fields.keys()) + for i in range(len(batch_nmsed_keys)): + batch_nmsed_additional_fields[ + batch_nmsed_keys[i]] = batch_nmsed_values[i] + + batch_num_detections = batch_outputs[-1] + + if original_masks is None: + batch_nmsed_masks = None + + if not ordered_additional_fields: + batch_nmsed_additional_fields = None + + return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, + batch_nmsed_masks, batch_nmsed_additional_fields, + batch_num_detections) diff --git a/models/research/object_detection/core/prefetcher.py b/models/research/object_detection/core/prefetcher.py new file mode 100644 index 0000000000000000000000000000000000000000..31e93eae80e25abde3166a56d212645ed4f17a5a --- /dev/null +++ b/models/research/object_detection/core/prefetcher.py @@ -0,0 +1,61 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides functions to prefetch tensors to feed into models.""" +import tensorflow.compat.v1 as tf + + +def prefetch(tensor_dict, capacity): + """Creates a prefetch queue for tensors. + + Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a + dequeue op that evaluates to a tensor_dict. This function is useful in + prefetching preprocessed tensors so that the data is readily available for + consumers. + + Example input pipeline when you don't need batching: + ---------------------------------------------------- + key, string_tensor = slim.parallel_reader.parallel_read(...) + tensor_dict = decoder.decode(string_tensor) + tensor_dict = preprocessor.preprocess(tensor_dict, ...) + prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) + tensor_dict = prefetch_queue.dequeue() + outputs = Model(tensor_dict) + ... + ---------------------------------------------------- + + For input pipelines with batching, refer to core/batcher.py + + Args: + tensor_dict: a dictionary of tensors to prefetch. + capacity: the size of the prefetch queue. + + Returns: + a FIFO prefetcher queue + """ + names = list(tensor_dict.keys()) + dtypes = [t.dtype for t in tensor_dict.values()] + shapes = [t.get_shape() for t in tensor_dict.values()] + prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, + shapes=shapes, + names=names, + name='prefetch_queue') + enqueue_op = prefetch_queue.enqueue(tensor_dict) + tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( + prefetch_queue, [enqueue_op])) + tf.summary.scalar( + 'queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), + tf.cast(prefetch_queue.size(), dtype=tf.float32) * (1. / capacity)) + return prefetch_queue diff --git a/models/research/object_detection/core/prefetcher_tf1_test.py b/models/research/object_detection/core/prefetcher_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..95e9155e5e38c762cee915389f55f0cc69334ae9 --- /dev/null +++ b/models/research/object_detection/core/prefetcher_tf1_test.py @@ -0,0 +1,109 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.prefetcher.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.core import prefetcher +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class PrefetcherTest(tf.test.TestCase): + """Test class for prefetcher.""" + + def test_prefetch_tensors_with_fully_defined_shapes(self): + with self.test_session() as sess: + batch_size = 10 + image_size = 32 + num_batches = 5 + examples = tf.Variable(tf.constant(0, dtype=tf.int64)) + counter = examples.count_up_to(num_batches) + image = tf.random_normal([batch_size, image_size, + image_size, 3], + dtype=tf.float32, + name='images') + label = tf.random_uniform([batch_size, 1], 0, 10, + dtype=tf.int32, name='labels') + + prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter, + 'image': image, + 'label': label}, + capacity=100) + tensor_dict = prefetch_queue.dequeue() + + self.assertAllEqual(tensor_dict['image'].get_shape().as_list(), + [batch_size, image_size, image_size, 3]) + self.assertAllEqual(tensor_dict['label'].get_shape().as_list(), + [batch_size, 1]) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + for _ in range(num_batches): + results = sess.run(tensor_dict) + self.assertEquals(results['image'].shape, + (batch_size, image_size, image_size, 3)) + self.assertEquals(results['label'].shape, (batch_size, 1)) + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(tensor_dict) + + def test_prefetch_tensors_with_partially_defined_shapes(self): + with self.test_session() as sess: + batch_size = 10 + image_size = 32 + num_batches = 5 + examples = tf.Variable(tf.constant(0, dtype=tf.int64)) + counter = examples.count_up_to(num_batches) + image = tf.random_normal([batch_size, + tf.Variable(image_size), + tf.Variable(image_size), 3], + dtype=tf.float32, + name='image') + image.set_shape([batch_size, None, None, 3]) + label = tf.random_uniform([batch_size, tf.Variable(1)], 0, + 10, dtype=tf.int32, name='label') + label.set_shape([batch_size, None]) + + prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter, + 'image': image, + 'label': label}, + capacity=100) + tensor_dict = prefetch_queue.dequeue() + + self.assertAllEqual(tensor_dict['image'].get_shape().as_list(), + [batch_size, None, None, 3]) + self.assertAllEqual(tensor_dict['label'].get_shape().as_list(), + [batch_size, None]) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + for _ in range(num_batches): + results = sess.run(tensor_dict) + self.assertEquals(results['image'].shape, + (batch_size, image_size, image_size, 3)) + self.assertEquals(results['label'].shape, (batch_size, 1)) + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(tensor_dict) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/preprocessor.py b/models/research/object_detection/core/preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8fdff5e5446f0739396eafd10b4b5d39bd14b5 --- /dev/null +++ b/models/research/object_detection/core/preprocessor.py @@ -0,0 +1,4308 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preprocess images and bounding boxes for detection. + +We perform two sets of operations in preprocessing stage: +(a) operations that are applied to both training and testing data, +(b) operations that are applied only to training data for the purpose of + data augmentation. + +A preprocessing function receives a set of inputs, +e.g. an image and bounding boxes, +performs an operation on them, and returns them. +Some examples are: randomly cropping the image, randomly mirroring the image, + randomly changing the brightness, contrast, hue and + randomly jittering the bounding boxes. + +The preprocess function receives a tensor_dict which is a dictionary that maps +different field names to their tensors. For example, +tensor_dict[fields.InputDataFields.image] holds the image tensor. +The image is a rank 4 tensor: [1, height, width, channels] with +dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where +in each row there is a box with [ymin xmin ymax xmax]. +Boxes are in normalized coordinates meaning +their coordinate values range in [0, 1] + +To preprocess multiple images with the same operations in cases where +nondeterministic operations are used, a preprocessor_cache.PreprocessorCache +object can be passed into the preprocess function or individual operations. +All nondeterministic operations except random_jitter_boxes support caching. +E.g. +Let tensor_dict{1,2,3,4,5} be copies of the same inputs. +Let preprocess_options contain nondeterministic operation(s) excluding +random_jitter_boxes. + +cache1 = preprocessor_cache.PreprocessorCache() +cache2 = preprocessor_cache.PreprocessorCache() +a = preprocess(tensor_dict1, preprocess_options, preprocess_vars_cache=cache1) +b = preprocess(tensor_dict2, preprocess_options, preprocess_vars_cache=cache1) +c = preprocess(tensor_dict3, preprocess_options, preprocess_vars_cache=cache2) +d = preprocess(tensor_dict4, preprocess_options, preprocess_vars_cache=cache2) +e = preprocess(tensor_dict5, preprocess_options) + +Then correspondings tensors of object pairs (a,b) and (c,d) +are guaranteed to be equal element-wise, but the equality of any other object +pair cannot be determined. + +Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing +functions receive a rank 3 tensor for processing the image. Thus, inside the +preprocess function we squeeze the image to become a rank 3 tensor and then +we pass it to the functions. At the end of the preprocess we expand the image +back to rank 4. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import inspect +import sys + +import six +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from tensorflow.python.ops import control_flow_ops +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import preprocessor_cache +from object_detection.core import standard_fields as fields +from object_detection.utils import autoaugment_utils +from object_detection.utils import ops +from object_detection.utils import patch_ops +from object_detection.utils import shape_utils + + +def _apply_with_random_selector(x, + func, + num_cases, + preprocess_vars_cache=None, + key=''): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + If both preprocess_vars_cache AND key are the same between two calls, sel will + be the same value in both calls. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + key: variable identifier for preprocess_vars_cache. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + generator_func = functools.partial( + tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) + rand_sel = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.SELECTOR, + preprocess_vars_cache, key) + + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([func( + control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case) + for case in range(num_cases)])[0] + + +def _apply_with_random_selector_tuples(x, + func, + num_cases, + preprocess_vars_cache=None, + key=''): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + If both preprocess_vars_cache AND key are the same between two calls, sel will + be the same value in both calls. + + Args: + x: A tuple of input tensors. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + key: variable identifier for preprocess_vars_cache. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + num_inputs = len(x) + generator_func = functools.partial( + tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) + rand_sel = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES, + preprocess_vars_cache, key) + + # Pass the real x only to one of the func calls. + tuples = [list() for t in x] + for case in range(num_cases): + new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x] + output = func(tuple(new_x), case) + for j in range(num_inputs): + tuples[j].append(output[j]) + + for i in range(num_inputs): + tuples[i] = control_flow_ops.merge(tuples[i])[0] + return tuple(tuples) + + +def _get_or_create_preprocess_rand_vars(generator_func, + function_id, + preprocess_vars_cache, + key=''): + """Returns a tensor stored in preprocess_vars_cache or using generator_func. + + If the tensor was previously generated and appears in the PreprocessorCache, + the previously generated tensor will be returned. Otherwise, a new tensor + is generated using generator_func and stored in the cache. + + Args: + generator_func: A 0-argument function that generates a tensor. + function_id: identifier for the preprocessing function used. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + key: identifier for the variable stored. + Returns: + The generated tensor. + """ + if preprocess_vars_cache is not None: + var = preprocess_vars_cache.get(function_id, key) + if var is None: + var = generator_func() + preprocess_vars_cache.update(function_id, key, var) + else: + var = generator_func() + return var + + +def _random_integer(minval, maxval, seed): + """Returns a random 0-D tensor between minval and maxval. + + Args: + minval: minimum value of the random tensor. + maxval: maximum value of the random tensor. + seed: random seed. + + Returns: + A random 0-D tensor between minval and maxval. + """ + return tf.random_uniform( + [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed) + + +# TODO(mttang): This method is needed because the current +# tf.image.rgb_to_grayscale method does not support quantization. Replace with +# tf.image.rgb_to_grayscale after quantization support is added. +def _rgb_to_grayscale(images, name=None): + """Converts one or more images from RGB to Grayscale. + + Outputs a tensor of the same `DType` and rank as `images`. The size of the + last dimension of the output is 1, containing the Grayscale value of the + pixels. + + Args: + images: The RGB tensor to convert. Last dimension must have size 3 and + should contain RGB values. + name: A name for the operation (optional). + + Returns: + The converted grayscale image(s). + """ + with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name: + images = tf.convert_to_tensor(images, name='images') + # Remember original dtype to so we can convert back if needed + orig_dtype = images.dtype + flt_image = tf.image.convert_image_dtype(images, tf.float32) + + # Reference for converting between RGB and grayscale. + # https://en.wikipedia.org/wiki/Luma_%28video%29 + rgb_weights = [0.2989, 0.5870, 0.1140] + rank_1 = tf.expand_dims(tf.rank(images) - 1, 0) + gray_float = tf.reduce_sum( + flt_image * rgb_weights, rank_1, keep_dims=True) + gray_float.set_shape(images.get_shape()[:-1].concatenate([1])) + return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name) + + +def normalize_image(image, original_minval, original_maxval, target_minval, + target_maxval): + """Normalizes pixel values in the image. + + Moves the pixel values from the current [original_minval, original_maxval] + range to a the [target_minval, target_maxval] range. + + Args: + image: rank 3 float32 tensor containing 1 + image -> [height, width, channels]. + original_minval: current image minimum value. + original_maxval: current image maximum value. + target_minval: target image minimum value. + target_maxval: target image maximum value. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('NormalizeImage', values=[image]): + original_minval = float(original_minval) + original_maxval = float(original_maxval) + target_minval = float(target_minval) + target_maxval = float(target_maxval) + image = tf.cast(image, dtype=tf.float32) + image = tf.subtract(image, original_minval) + image = tf.multiply(image, (target_maxval - target_minval) / + (original_maxval - original_minval)) + image = tf.add(image, target_minval) + return image + + +def retain_boxes_above_threshold(boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + threshold=0.0): + """Retains boxes whose label weight is above a given threshold. + + If the label weight for a box is missing (represented by NaN), the box is + retained. The boxes that don't pass the threshold will not appear in the + returned tensor. + + Args: + boxes: float32 tensor of shape [num_instance, 4] representing boxes + location in normalized coordinates. + labels: rank 1 int32 tensor of shape [num_instance] containing the object + classes. + label_weights: float32 tensor of shape [num_instance] representing the + weight for each box. + label_confidences: float32 tensor of shape [num_instance] representing the + confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks are of + the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + threshold: scalar python float. + + Returns: + retained_boxes: [num_retained_instance, 4] + retianed_labels: [num_retained_instance] + retained_label_weights: [num_retained_instance] + + If multiclass_scores, masks, or keypoints are not None, the function also + returns: + + retained_multiclass_scores: [num_retained_instance, num_classes] + retained_masks: [num_retained_instance, height, width] + retained_keypoints: [num_retained_instance, num_keypoints, 2] + """ + with tf.name_scope('RetainBoxesAboveThreshold', + values=[boxes, labels, label_weights]): + indices = tf.where( + tf.logical_or(label_weights > threshold, tf.is_nan(label_weights))) + indices = tf.squeeze(indices, axis=1) + retained_boxes = tf.gather(boxes, indices) + retained_labels = tf.gather(labels, indices) + retained_label_weights = tf.gather(label_weights, indices) + result = [retained_boxes, retained_labels, retained_label_weights] + + if label_confidences is not None: + retained_label_confidences = tf.gather(label_confidences, indices) + result.append(retained_label_confidences) + + if multiclass_scores is not None: + retained_multiclass_scores = tf.gather(multiclass_scores, indices) + result.append(retained_multiclass_scores) + + if masks is not None: + retained_masks = tf.gather(masks, indices) + result.append(retained_masks) + + if keypoints is not None: + retained_keypoints = tf.gather(keypoints, indices) + result.append(retained_keypoints) + + return result + + +def drop_label_probabilistically(boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + dropped_label=None, + drop_probability=0.0, + seed=None): + """Drops boxes of a certain label with probability drop_probability. + + Boxes of the label dropped_label will not appear in the returned tensor. + + Args: + boxes: float32 tensor of shape [num_instance, 4] representing boxes + location in normalized coordinates. + labels: rank 1 int32 tensor of shape [num_instance] containing the object + classes. + label_weights: float32 tensor of shape [num_instance] representing the + weight for each box. + label_confidences: float32 tensor of shape [num_instance] representing the + confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks are of + the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + dropped_label: int32 id of label to drop. + drop_probability: float32 probability of dropping a label. + seed: random seed. + + Returns: + retained_boxes: [num_retained_instance, 4] + retianed_labels: [num_retained_instance] + retained_label_weights: [num_retained_instance] + + If multiclass_scores, masks, or keypoints are not None, the function also + returns: + + retained_multiclass_scores: [num_retained_instance, num_classes] + retained_masks: [num_retained_instance, height, width] + retained_keypoints: [num_retained_instance, num_keypoints, 2] + """ + with tf.name_scope('DropLabelProbabilistically', + values=[boxes, labels]): + indices = tf.where( + tf.logical_or( + tf.random_uniform(tf.shape(labels), seed=seed) > drop_probability, + tf.not_equal(labels, dropped_label))) + indices = tf.squeeze(indices, axis=1) + + retained_boxes = tf.gather(boxes, indices) + retained_labels = tf.gather(labels, indices) + retained_label_weights = tf.gather(label_weights, indices) + result = [retained_boxes, retained_labels, retained_label_weights] + + if label_confidences is not None: + retained_label_confidences = tf.gather(label_confidences, indices) + result.append(retained_label_confidences) + + if multiclass_scores is not None: + retained_multiclass_scores = tf.gather(multiclass_scores, indices) + result.append(retained_multiclass_scores) + + if masks is not None: + retained_masks = tf.gather(masks, indices) + result.append(retained_masks) + + if keypoints is not None: + retained_keypoints = tf.gather(keypoints, indices) + result.append(retained_keypoints) + + return result + + +def remap_labels(labels, + original_labels=None, + new_label=None): + """Remaps labels that have an id in original_labels to new_label. + + Args: + labels: rank 1 int32 tensor of shape [num_instance] containing the object + classes. + original_labels: int list of original labels that should be mapped from. + new_label: int label to map to + Returns: + Remapped labels + """ + new_labels = labels + for original_label in original_labels: + change = tf.where( + tf.equal(new_labels, original_label), + tf.add(tf.zeros_like(new_labels), new_label - original_label), + tf.zeros_like(new_labels)) + new_labels = tf.add( + new_labels, + change) + new_labels = tf.reshape(new_labels, tf.shape(labels)) + return new_labels + + +def _flip_boxes_left_right(boxes): + """Left-right flip the boxes. + + Args: + boxes: Float32 tensor containing the bounding boxes -> [..., 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each last dimension is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Flipped boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=-1) + flipped_xmin = tf.subtract(1.0, xmax) + flipped_xmax = tf.subtract(1.0, xmin) + flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1) + return flipped_boxes + + +def _flip_boxes_up_down(boxes): + """Up-down flip the boxes. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Flipped boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) + flipped_ymin = tf.subtract(1.0, ymax) + flipped_ymax = tf.subtract(1.0, ymin) + flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1) + return flipped_boxes + + +def _rot90_boxes(boxes): + """Rotate boxes counter-clockwise by 90 degrees. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Rotated boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) + rotated_ymin = tf.subtract(1.0, xmax) + rotated_ymax = tf.subtract(1.0, xmin) + rotated_xmin = ymin + rotated_xmax = ymax + rotated_boxes = tf.concat( + [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1) + return rotated_boxes + + +def _flip_masks_left_right(masks): + """Left-right flip masks. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + flipped masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + return masks[:, :, ::-1] + + +def _flip_masks_up_down(masks): + """Up-down flip masks. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + flipped masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + return masks[:, ::-1, :] + + +def _rot90_masks(masks): + """Rotate masks counter-clockwise by 90 degrees. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + rotated masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + masks = tf.transpose(masks, [0, 2, 1]) + return masks[:, ::-1, :] + + +def random_horizontal_flip(image, + boxes=None, + masks=None, + keypoints=None, + keypoint_visibilities=None, + keypoint_flip_permutation=None, + seed=None, + preprocess_vars_cache=None): + """Randomly flips the image and detections horizontally. + + The probability of flipping the image is 50%. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_visibilities: (optional) rank 2 bool tensor with shape + [num_instances, num_keypoints]. + keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. + seed: random seed + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, keypoints, keypoint_visibilities, and + keypoint_flip_permutation are not None,the function also returns the + following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + keypoint_visibilities: rank 2 bool tensor with shape + [num_instances, num_keypoints]. + + Raises: + ValueError: if keypoints are provided but keypoint_flip_permutation is not. + """ + + def _flip_image(image): + # flip image + image_flipped = tf.image.flip_left_right(image) + return image_flipped + + if keypoints is not None and keypoint_flip_permutation is None: + raise ValueError( + 'keypoints are provided but keypoints_flip_permutation is not provided') + + with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]): + result = [] + # random variable defining whether to do flip or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_flip_random = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP, + preprocess_vars_cache) + do_a_flip_random = tf.greater(do_a_flip_random, 0.5) + + # flip image + image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes), + lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks), + lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None and keypoint_flip_permutation is not None: + permutation = keypoint_flip_permutation + keypoints = tf.cond( + do_a_flip_random, + lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation), + lambda: keypoints) + result.append(keypoints) + + # flip keypoint visibilities + if (keypoint_visibilities is not None and + keypoint_flip_permutation is not None): + permutation = keypoint_flip_permutation + kpt_flip_perm = keypoint_flip_permutation + keypoint_visibilities = tf.cond( + do_a_flip_random, + lambda: tf.gather(keypoint_visibilities, kpt_flip_perm, axis=1), + lambda: keypoint_visibilities) + result.append(keypoint_visibilities) + + return tuple(result) + + +def random_vertical_flip(image, + boxes=None, + masks=None, + keypoints=None, + keypoint_flip_permutation=None, + seed=None, + preprocess_vars_cache=None): + """Randomly flips the image and detections vertically. + + The probability of flipping the image is 50%. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. + seed: random seed + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, keypoints, and keypoint_flip_permutation are not None, + the function also returns the following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + + Raises: + ValueError: if keypoints are provided but keypoint_flip_permutation is not. + """ + + def _flip_image(image): + # flip image + image_flipped = tf.image.flip_up_down(image) + return image_flipped + + if keypoints is not None and keypoint_flip_permutation is None: + raise ValueError( + 'keypoints are provided but keypoints_flip_permutation is not provided') + + with tf.name_scope('RandomVerticalFlip', values=[image, boxes]): + result = [] + # random variable defining whether to do flip or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_flip_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP, + preprocess_vars_cache) + do_a_flip_random = tf.greater(do_a_flip_random, 0.5) + + # flip image + image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes), + lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks), + lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None and keypoint_flip_permutation is not None: + permutation = keypoint_flip_permutation + keypoints = tf.cond( + do_a_flip_random, + lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation), + lambda: keypoints) + result.append(keypoints) + + return tuple(result) + + +def random_rotation90(image, + boxes=None, + masks=None, + keypoints=None, + seed=None, + preprocess_vars_cache=None): + """Randomly rotates the image and detections 90 degrees counter-clockwise. + + The probability of rotating the image is 50%. This can be combined with + random_horizontal_flip and random_vertical_flip to produce an output with a + uniform distribution of the eight possible 90 degree rotation / reflection + combinations. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + seed: random seed + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, and keypoints, are not None, + the function also returns the following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + + def _rot90_image(image): + # flip image + image_rotated = tf.image.rot90(image) + return image_rotated + + with tf.name_scope('RandomRotation90', values=[image, boxes]): + result = [] + + # random variable defining whether to rotate by 90 degrees or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_rot90_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.ROTATION90, + preprocess_vars_cache) + do_a_rot90_random = tf.greater(do_a_rot90_random, 0.5) + + # flip image + image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image), + lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes), + lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks), + lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None: + keypoints = tf.cond( + do_a_rot90_random, + lambda: keypoint_ops.rot90(keypoints), + lambda: keypoints) + result.append(keypoints) + + return tuple(result) + + +def random_pixel_value_scale(image, + minval=0.9, + maxval=1.1, + seed=None, + preprocess_vars_cache=None): + """Scales each value in the pixels of the image. + + This function scales each pixel independent of the other ones. + For each value in image tensor, draws a random number between + minval and maxval and multiples the values with them. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + minval: lower ratio of scaling pixel values. + maxval: upper ratio of scaling pixel values. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomPixelValueScale', values=[image]): + generator_func = functools.partial( + tf.random_uniform, tf.shape(image), + minval=minval, maxval=maxval, + dtype=tf.float32, seed=seed) + color_coef = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE, + preprocess_vars_cache) + + image = tf.multiply(image, color_coef) + image = tf.clip_by_value(image, 0.0, 255.0) + + return image + + +def random_image_scale(image, + masks=None, + min_scale_ratio=0.5, + max_scale_ratio=2.0, + seed=None, + preprocess_vars_cache=None): + """Scales the image size. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels]. + masks: (optional) rank 3 float32 tensor containing masks with + size [height, width, num_masks]. The value is set to None if there are no + masks. + min_scale_ratio: minimum scaling ratio. + max_scale_ratio: maximum scaling ratio. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + masks: If masks is not none, resized masks which are the same rank as input + masks will be returned. + """ + with tf.name_scope('RandomImageScale', values=[image]): + result = [] + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + generator_func = functools.partial( + tf.random_uniform, [], + minval=min_scale_ratio, maxval=max_scale_ratio, + dtype=tf.float32, seed=seed) + size_coef = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE, + preprocess_vars_cache) + + image_newysize = tf.cast( + tf.multiply(tf.cast(image_height, dtype=tf.float32), size_coef), + dtype=tf.int32) + image_newxsize = tf.cast( + tf.multiply(tf.cast(image_width, dtype=tf.float32), size_coef), + dtype=tf.int32) + image = tf.image.resize_images( + image, [image_newysize, image_newxsize], align_corners=True) + result.append(image) + if masks is not None: + masks = tf.image.resize_images( + masks, [image_newysize, image_newxsize], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True) + result.append(masks) + return tuple(result) + + +def _augment_only_rgb_channels(image, augment_function): + """Augments only the RGB slice of an image with additional channels.""" + rgb_slice = image[:, :, :3] + augmented_rgb_slice = augment_function(rgb_slice) + image = tf.concat([augmented_rgb_slice, image[:, :, 3:]], -1) + return image + + +def random_rgb_to_gray(image, + probability=0.1, + seed=None, + preprocess_vars_cache=None): + """Changes the image from RGB to Grayscale with the given probability. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + probability: the probability of returning a grayscale image. + The probability should be a number between [0, 1]. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + def _image_to_gray(image): + image_gray1 = _rgb_to_grayscale(image) + image_gray3 = tf.image.grayscale_to_rgb(image_gray1) + return image_gray3 + + with tf.name_scope('RandomRGBtoGray', values=[image]): + # random variable defining whether to change to grayscale or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_gray_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY, + preprocess_vars_cache) + + image = tf.cond( + tf.greater(do_gray_random, probability), lambda: image, + lambda: _augment_only_rgb_channels(image, _image_to_gray)) + + return image + + +def random_adjust_brightness(image, + max_delta=0.2, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts brightness. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + max_delta: how much to change the brightness. A value between [0, 1). + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + boxes: boxes which is the same shape as input boxes. + """ + with tf.name_scope('RandomAdjustBrightness', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + -max_delta, max_delta, seed=seed) + delta = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS, + preprocess_vars_cache) + + def _adjust_brightness(image): + image = tf.image.adjust_brightness(image / 255, delta) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + + image = _augment_only_rgb_channels(image, _adjust_brightness) + return image + + +def random_adjust_contrast(image, + min_delta=0.8, + max_delta=1.25, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts contrast. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + min_delta: see max_delta. + max_delta: how much to change the contrast. Contrast will change with a + value between min_delta and max_delta. This value will be + multiplied to the current contrast of the image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomAdjustContrast', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + min_delta, max_delta, seed=seed) + contrast_factor = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST, + preprocess_vars_cache) + + def _adjust_contrast(image): + image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + image = _augment_only_rgb_channels(image, _adjust_contrast) + return image + + +def random_adjust_hue(image, + max_delta=0.02, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts hue. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + max_delta: change hue randomly with a value between 0 and max_delta. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomAdjustHue', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + -max_delta, max_delta, seed=seed) + delta = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE, + preprocess_vars_cache) + def _adjust_hue(image): + image = tf.image.adjust_hue(image / 255, delta) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + image = _augment_only_rgb_channels(image, _adjust_hue) + return image + + +def random_adjust_saturation(image, + min_delta=0.8, + max_delta=1.25, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts saturation. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + min_delta: see max_delta. + max_delta: how much to change the saturation. Saturation will change with a + value between min_delta and max_delta. This value will be + multiplied to the current saturation of the image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomAdjustSaturation', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + min_delta, max_delta, seed=seed) + saturation_factor = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADJUST_SATURATION, + preprocess_vars_cache) + def _adjust_saturation(image): + image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + image = _augment_only_rgb_channels(image, _adjust_saturation) + return image + + +def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None): + """Randomly distorts color. + + Randomly distorts color using a combination of brightness, hue, contrast and + saturation changes. Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + color_ordering: Python int, a type of distortion (valid values: 0, 1). + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + Raises: + ValueError: if color_ordering is not in {0, 1}. + """ + with tf.name_scope('RandomDistortColor', values=[image]): + if color_ordering == 0: + image = random_adjust_brightness( + image, max_delta=32. / 255., + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_saturation( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_hue( + image, max_delta=0.2, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_contrast( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + + elif color_ordering == 1: + image = random_adjust_brightness( + image, max_delta=32. / 255., + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_contrast( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_saturation( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_hue( + image, max_delta=0.2, + preprocess_vars_cache=preprocess_vars_cache) + else: + raise ValueError('color_ordering must be in {0, 1}') + return image + + +def random_jitter_boxes(boxes, ratio=0.05, seed=None): + """Randomly jitter boxes in image. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + ratio: The ratio of the box width and height that the corners can jitter. + For example if the width is 100 pixels and ratio is 0.05, + the corners can jitter up to 5 pixels in the x direction. + seed: random seed. + + Returns: + boxes: boxes which is the same shape as input boxes. + """ + def random_jitter_box(box, ratio, seed): + """Randomly jitter box. + + Args: + box: bounding box [1, 1, 4]. + ratio: max ratio between jittered box and original box, + a number between [0, 0.5]. + seed: random seed. + + Returns: + jittered_box: jittered box. + """ + rand_numbers = tf.random_uniform( + [1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed) + box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1]) + box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0]) + hw_coefs = tf.stack([box_height, box_width, box_height, box_width]) + hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers) + jittered_box = tf.add(box, hw_rand_coefs) + jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0) + return jittered_box + + with tf.name_scope('RandomJitterBoxes', values=[boxes]): + # boxes are [N, 4]. Lets first make them [N, 1, 1, 4] + boxes_shape = tf.shape(boxes) + boxes = tf.expand_dims(boxes, 1) + boxes = tf.expand_dims(boxes, 2) + + distorted_boxes = tf.map_fn( + lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32) + + distorted_boxes = tf.reshape(distorted_boxes, boxes_shape) + + return distorted_boxes + + +def _strict_random_crop_image(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + keypoint_visibilities=None, + min_object_covered=1.0, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.1, 1.0), + overlap_thresh=0.3, + clip_boxes=True, + preprocess_vars_cache=None): + """Performs random crop. + + Note: Keypoint coordinates that are outside the crop will be set to NaN, which + is consistent with the original keypoint encoding for non-existing keypoints. + This function always crops the image and is supposed to be used by + `random_crop_image` function which sometimes returns the image unchanged. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes with shape + [num_instances, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_visibilities: (optional) rank 2 bool tensor with shape + [num_instances, num_keypoints]. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If label_weights, multiclass_scores, masks, keypoints, or + keypoint_visibilities is not None, the function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + keypoint_visibilities: rank 2 bool tensor with shape + [num_instances, num_keypoints] + """ + with tf.name_scope('RandomCropImage', values=[image, boxes]): + image_shape = tf.shape(image) + + # boxes are [N, 4]. Lets first make them [N, 1, 4]. + boxes_expanded = tf.expand_dims( + tf.clip_by_value( + boxes, clip_value_min=0.0, clip_value_max=1.0), 1) + + generator_func = functools.partial( + tf.image.sample_distorted_bounding_box, + image_shape, + bounding_boxes=boxes_expanded, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=100, + use_image_if_no_bounding_boxes=True) + + # for ssd cropping, each value of min_object_covered has its own + # cached random variable + sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE, + preprocess_vars_cache, key=min_object_covered) + + im_box_begin, im_box_size, im_box = sample_distorted_bounding_box + im_box_end = im_box_begin + im_box_size + new_image = image[im_box_begin[0]:im_box_end[0], + im_box_begin[1]:im_box_end[1], :] + new_image.set_shape([None, None, image.get_shape()[2]]) + + # [1, 4] + im_box_rank2 = tf.squeeze(im_box, axis=[0]) + # [4] + im_box_rank1 = tf.squeeze(im_box) + + boxlist = box_list.BoxList(boxes) + boxlist.add_field('labels', labels) + + if label_weights is not None: + boxlist.add_field('label_weights', label_weights) + + if label_confidences is not None: + boxlist.add_field('label_confidences', label_confidences) + + if multiclass_scores is not None: + boxlist.add_field('multiclass_scores', multiclass_scores) + + im_boxlist = box_list.BoxList(im_box_rank2) + + # remove boxes that are outside cropped image + boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window( + boxlist, im_box_rank1) + + # remove boxes that are outside image + overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( + boxlist, im_boxlist, overlap_thresh) + + # change the coordinate of the remaining boxes + new_labels = overlapping_boxlist.get_field('labels') + new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, + im_box_rank1) + new_boxes = new_boxlist.get() + if clip_boxes: + new_boxes = tf.clip_by_value( + new_boxes, clip_value_min=0.0, clip_value_max=1.0) + + result = [new_image, new_boxes, new_labels] + + if label_weights is not None: + new_label_weights = overlapping_boxlist.get_field('label_weights') + result.append(new_label_weights) + + if label_confidences is not None: + new_label_confidences = overlapping_boxlist.get_field('label_confidences') + result.append(new_label_confidences) + + if multiclass_scores is not None: + new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') + result.append(new_multiclass_scores) + + if masks is not None: + masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids) + masks_of_boxes_completely_inside_window = tf.gather( + masks_of_boxes_inside_window, keep_ids) + new_masks = masks_of_boxes_completely_inside_window[:, im_box_begin[ + 0]:im_box_end[0], im_box_begin[1]:im_box_end[1]] + result.append(new_masks) + + if keypoints is not None: + keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids) + keypoints_of_boxes_completely_inside_window = tf.gather( + keypoints_of_boxes_inside_window, keep_ids) + new_keypoints = keypoint_ops.change_coordinate_frame( + keypoints_of_boxes_completely_inside_window, im_box_rank1) + if clip_boxes: + new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, + [0.0, 0.0, 1.0, 1.0]) + result.append(new_keypoints) + + if keypoint_visibilities is not None: + kpt_vis_of_boxes_inside_window = tf.gather(keypoint_visibilities, + inside_window_ids) + kpt_vis_of_boxes_completely_inside_window = tf.gather( + kpt_vis_of_boxes_inside_window, keep_ids) + if clip_boxes: + # Set any keypoints with NaN coordinates to invisible. + new_kpt_visibilities = keypoint_ops.set_keypoint_visibilities( + new_keypoints, kpt_vis_of_boxes_completely_inside_window) + result.append(new_kpt_visibilities) + + return tuple(result) + + +def random_crop_image(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + keypoint_visibilities=None, + min_object_covered=1.0, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.1, 1.0), + overlap_thresh=0.3, + clip_boxes=True, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly crops the image. + + Given the input image and its bounding boxes, this op randomly + crops a subimage. Given a user-provided set of input constraints, + the crop window is resampled until it satisfies these constraints. + If within 100 trials it is unable to find a valid crop, the original + image is returned. See the Args section for a description of the input + constraints. Both input boxes and returned Boxes are in normalized + form (e.g., lie in the unit square [0, 1]). + This function will return the original image with probability random_coef. + + Note: Keypoint coordinates that are outside the crop will be set to NaN, which + is consistent with the original keypoint encoding for non-existing keypoints. + Also, the keypoint visibility will be set to False. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes with shape + [num_instances, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances]. + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_visibilities: (optional) rank 2 bool tensor with shape + [num_instances, num_keypoints]. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + labels: new labels. + + If label_weights, multiclass_scores, masks, keypoints, keypoint_visibilities + is not None, the function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + keypoint_visibilities: rank 2 bool tensor with shape + [num_instances, num_keypoints] + """ + + def strict_random_crop_image_fn(): + return _strict_random_crop_image( + image, + boxes, + labels, + label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + masks=masks, + keypoints=keypoints, + keypoint_visibilities=keypoint_visibilities, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + preprocess_vars_cache=preprocess_vars_cache) + + # avoids tf.cond to make faster RCNN training on borg. See b/140057645. + if random_coef < sys.float_info.min: + result = strict_random_crop_image_fn() + else: + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_crop_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE, + preprocess_vars_cache) + do_a_crop_random = tf.greater(do_a_crop_random, random_coef) + + outputs = [image, boxes, labels] + + if label_weights is not None: + outputs.append(label_weights) + if label_confidences is not None: + outputs.append(label_confidences) + if multiclass_scores is not None: + outputs.append(multiclass_scores) + if masks is not None: + outputs.append(masks) + if keypoints is not None: + outputs.append(keypoints) + if keypoint_visibilities is not None: + outputs.append(keypoint_visibilities) + + result = tf.cond(do_a_crop_random, strict_random_crop_image_fn, + lambda: tuple(outputs)) + return result + + +def random_pad_image(image, + boxes, + masks=None, + keypoints=None, + min_image_size=None, + max_image_size=None, + pad_color=None, + seed=None, + preprocess_vars_cache=None): + """Randomly pads the image. + + This function randomly pads the image with zeros. The final size of the + padded image will be between min_image_size and max_image_size. + if min_image_size is smaller than the input image size, min_image_size will + be set to the input image size. The same for max_image_size. The input image + will be located at a uniformly random location inside the padded image. + The relative location of the boxes to the original image will remain the same. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [N, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [N, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + min_image_size: a tensor of size [min_height, min_width], type tf.int32. + If passed as None, will be set to image size + [height, width]. + max_image_size: a tensor of size [max_height, max_width], type tf.int32. + If passed as None, will be set to twice the + image [height * 2, width * 2]. + pad_color: padding color. A rank 1 tensor of [channels] with dtype= + tf.float32. if set as None, it will be set to average color of + the input image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + + if masks is not None, the function also returns: + masks: rank 3 float32 tensor with shape [N, new_height, new_width] + if keypoints is not None, the function also returns: + keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2] + """ + if pad_color is None: + pad_color = tf.reduce_mean(image, axis=[0, 1]) + + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + + if max_image_size is None: + max_image_size = tf.stack([image_height * 2, image_width * 2]) + max_image_size = tf.maximum(max_image_size, + tf.stack([image_height, image_width])) + + if min_image_size is None: + min_image_size = tf.stack([image_height, image_width]) + min_image_size = tf.maximum(min_image_size, + tf.stack([image_height, image_width])) + + target_height = tf.cond( + max_image_size[0] > min_image_size[0], + lambda: _random_integer(min_image_size[0], max_image_size[0], seed), + lambda: max_image_size[0]) + + target_width = tf.cond( + max_image_size[1] > min_image_size[1], + lambda: _random_integer(min_image_size[1], max_image_size[1], seed), + lambda: max_image_size[1]) + + offset_height = tf.cond( + target_height > image_height, + lambda: _random_integer(0, target_height - image_height, seed), + lambda: tf.constant(0, dtype=tf.int32)) + + offset_width = tf.cond( + target_width > image_width, + lambda: _random_integer(0, target_width - image_width, seed), + lambda: tf.constant(0, dtype=tf.int32)) + + gen_func = lambda: (target_height, target_width, offset_height, offset_width) + params = _get_or_create_preprocess_rand_vars( + gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE, + preprocess_vars_cache) + target_height, target_width, offset_height, offset_width = params + + new_image = tf.image.pad_to_bounding_box( + image, + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width) + + # Setting color of the padded pixels + image_ones = tf.ones_like(image) + image_ones_padded = tf.image.pad_to_bounding_box( + image_ones, + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width) + image_color_padded = (1.0 - image_ones_padded) * pad_color + new_image += image_color_padded + + # setting boxes + new_window = tf.cast( + tf.stack([ + -offset_height, -offset_width, target_height - offset_height, + target_width - offset_width + ]), + dtype=tf.float32) + new_window /= tf.cast( + tf.stack([image_height, image_width, image_height, image_width]), + dtype=tf.float32) + boxlist = box_list.BoxList(boxes) + new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) + new_boxes = new_boxlist.get() + + result = [new_image, new_boxes] + + if masks is not None: + new_masks = tf.image.pad_to_bounding_box( + masks[:, :, :, tf.newaxis], + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width)[:, :, :, 0] + result.append(new_masks) + + if keypoints is not None: + new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window) + result.append(new_keypoints) + + return tuple(result) + + +def random_absolute_pad_image(image, + boxes, + masks=None, + keypoints=None, + max_height_padding=None, + max_width_padding=None, + pad_color=None, + seed=None, + preprocess_vars_cache=None): + """Randomly pads the image by small absolute amounts. + + As random_pad_image above, but the padding is of size [0, max_height_padding] + or [0, max_width_padding] instead of padding to a fixed size of + max_height_padding for all images. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [N, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [N, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + max_height_padding: a scalar tf.int32 tensor denoting the maximum amount of + height padding. The padding will be chosen uniformly at + random from [0, max_height_padding). + max_width_padding: a scalar tf.int32 tensor denoting the maximum amount of + width padding. The padding will be chosen uniformly at + random from [0, max_width_padding). + pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. + if set as None, it will be set to average color of the input + image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + if masks is not None, the function also returns: + masks: rank 3 float32 tensor with shape [N, new_height, new_width] + if keypoints is not None, the function also returns: + keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2] + """ + min_image_size = tf.shape(image)[:2] + max_image_size = min_image_size + tf.cast( + [max_height_padding, max_width_padding], dtype=tf.int32) + return random_pad_image( + image, + boxes, + masks=masks, + keypoints=keypoints, + min_image_size=min_image_size, + max_image_size=max_image_size, + pad_color=pad_color, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + +def random_crop_pad_image(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + min_object_covered=1.0, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.1, 1.0), + overlap_thresh=0.3, + clip_boxes=True, + random_coef=0.0, + min_padded_size_ratio=(1.0, 1.0), + max_padded_size_ratio=(2.0, 2.0), + pad_color=None, + seed=None, + preprocess_vars_cache=None): + """Randomly crops and pads the image. + + Given an input image and its bounding boxes, this op first randomly crops + the image and then randomly pads the image with background values. Parameters + min_padded_size_ratio and max_padded_size_ratio, determine the range of the + final output image size. Specifically, the final image size will have a size + in the range of min_padded_size_ratio * tf.shape(image) and + max_padded_size_ratio * tf.shape(image). Note that these ratios are with + respect to the size of the original image, so we can't capture the same + effect easily by independently applying RandomCropImage + followed by RandomPadImage. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: rank 1 float32 containing the label weights. + label_confidences: rank 1 float32 containing the label confidences. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. + if set as None, it will be set to average color of the randomly + cropped image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + padded_image: padded image. + padded_boxes: boxes which is the same rank as input boxes. Boxes are in + normalized form. + cropped_labels: cropped labels. + if label_weights is not None also returns: + cropped_label_weights: cropped label weights. + if multiclass_scores is not None also returns: + cropped_multiclass_scores: cropped_multiclass_scores. + + """ + image_size = tf.shape(image) + image_height = image_size[0] + image_width = image_size[1] + result = random_crop_image( + image=image, + boxes=boxes, + labels=labels, + label_weights=label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + random_coef=random_coef, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + cropped_image, cropped_boxes, cropped_labels = result[:3] + + min_image_size = tf.cast( + tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) * + min_padded_size_ratio, + dtype=tf.int32) + max_image_size = tf.cast( + tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) * + max_padded_size_ratio, + dtype=tf.int32) + + padded_image, padded_boxes = random_pad_image( + cropped_image, + cropped_boxes, + min_image_size=min_image_size, + max_image_size=max_image_size, + pad_color=pad_color, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + cropped_padded_output = (padded_image, padded_boxes, cropped_labels) + + index = 3 + if label_weights is not None: + cropped_label_weights = result[index] + cropped_padded_output += (cropped_label_weights,) + index += 1 + + if label_confidences is not None: + cropped_label_confidences = result[index] + cropped_padded_output += (cropped_label_confidences,) + index += 1 + + if multiclass_scores is not None: + cropped_multiclass_scores = result[index] + cropped_padded_output += (cropped_multiclass_scores,) + + return cropped_padded_output + + +def random_crop_to_aspect_ratio(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + aspect_ratio=1.0, + overlap_thresh=0.3, + clip_boxes=True, + seed=None, + preprocess_vars_cache=None): + """Randomly crops an image to the specified aspect ratio. + + Randomly crops the a portion of the image such that the crop is of the + specified aspect ratio, and the crop is as large as possible. If the specified + aspect ratio is larger than the aspect ratio of the image, this op will + randomly remove rows from the top and bottom of the image. If the specified + aspect ratio is less than the aspect ratio of the image, this op will randomly + remove cols from the left and right of the image. If the specified aspect + ratio is the same as the aspect ratio of the image, this op will return the + image. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + aspect_ratio: the aspect ratio of cropped image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If label_weights, masks, keypoints, or multiclass_scores is not None, the + function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + + Raises: + ValueError: If image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('RandomCropToAspectRatio', values=[image]): + image_shape = tf.shape(image) + orig_height = image_shape[0] + orig_width = image_shape[1] + orig_aspect_ratio = tf.cast( + orig_width, dtype=tf.float32) / tf.cast( + orig_height, dtype=tf.float32) + new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) + + def target_height_fn(): + return tf.cast( + tf.round(tf.cast(orig_width, dtype=tf.float32) / new_aspect_ratio), + dtype=tf.int32) + + target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio, + lambda: orig_height, target_height_fn) + + def target_width_fn(): + return tf.cast( + tf.round(tf.cast(orig_height, dtype=tf.float32) * new_aspect_ratio), + dtype=tf.int32) + + target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio, + lambda: orig_width, target_width_fn) + + # either offset_height = 0 and offset_width is randomly chosen from + # [0, offset_width - target_width), or else offset_width = 0 and + # offset_height is randomly chosen from [0, offset_height - target_height) + offset_height = _random_integer(0, orig_height - target_height + 1, seed) + offset_width = _random_integer(0, orig_width - target_width + 1, seed) + + generator_func = lambda: (offset_height, offset_width) + offset_height, offset_width = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO, + preprocess_vars_cache) + + new_image = tf.image.crop_to_bounding_box( + image, offset_height, offset_width, target_height, target_width) + + im_box = tf.stack([ + tf.cast(offset_height, dtype=tf.float32) / + tf.cast(orig_height, dtype=tf.float32), + tf.cast(offset_width, dtype=tf.float32) / + tf.cast(orig_width, dtype=tf.float32), + tf.cast(offset_height + target_height, dtype=tf.float32) / + tf.cast(orig_height, dtype=tf.float32), + tf.cast(offset_width + target_width, dtype=tf.float32) / + tf.cast(orig_width, dtype=tf.float32) + ]) + + boxlist = box_list.BoxList(boxes) + boxlist.add_field('labels', labels) + + boxlist.add_field('label_weights', label_weights) + + if label_confidences is not None: + boxlist.add_field('label_confidences', label_confidences) + + if multiclass_scores is not None: + boxlist.add_field('multiclass_scores', multiclass_scores) + + im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0)) + + # remove boxes whose overlap with the image is less than overlap_thresh + overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( + boxlist, im_boxlist, overlap_thresh) + + # change the coordinate of the remaining boxes + new_labels = overlapping_boxlist.get_field('labels') + new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, + im_box) + if clip_boxes: + new_boxlist = box_list_ops.clip_to_window( + new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32)) + new_boxes = new_boxlist.get() + + result = [new_image, new_boxes, new_labels] + + new_label_weights = overlapping_boxlist.get_field('label_weights') + result.append(new_label_weights) + + if label_confidences is not None: + new_label_confidences = ( + overlapping_boxlist.get_field('label_confidences')) + result.append(new_label_confidences) + + if multiclass_scores is not None: + new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') + result.append(new_multiclass_scores) + + if masks is not None: + masks_inside_window = tf.gather(masks, keep_ids) + masks_box_begin = tf.stack([0, offset_height, offset_width]) + masks_box_size = tf.stack([-1, target_height, target_width]) + new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size) + result.append(new_masks) + + if keypoints is not None: + keypoints_inside_window = tf.gather(keypoints, keep_ids) + new_keypoints = keypoint_ops.change_coordinate_frame( + keypoints_inside_window, im_box) + if clip_boxes: + new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, + [0.0, 0.0, 1.0, 1.0]) + result.append(new_keypoints) + + return tuple(result) + + +def random_pad_to_aspect_ratio(image, + boxes, + masks=None, + keypoints=None, + aspect_ratio=1.0, + min_padded_size_ratio=(1.0, 1.0), + max_padded_size_ratio=(2.0, 2.0), + seed=None, + preprocess_vars_cache=None): + """Randomly zero pads an image to the specified aspect ratio. + + Pads the image so that the resulting image will have the specified aspect + ratio without scaling less than the min_padded_size_ratio or more than the + max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio + is lower than what is possible to maintain the aspect ratio, then this method + will use the least padding to achieve the specified aspect ratio. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + aspect_ratio: aspect ratio of the final image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If masks, or keypoints is not None, the function also returns: + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + + Raises: + ValueError: If image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('RandomPadToAspectRatio', values=[image]): + image_shape = tf.shape(image) + image_height = tf.cast(image_shape[0], dtype=tf.float32) + image_width = tf.cast(image_shape[1], dtype=tf.float32) + image_aspect_ratio = image_width / image_height + new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) + target_height = tf.cond( + image_aspect_ratio <= new_aspect_ratio, + lambda: image_height, + lambda: image_width / new_aspect_ratio) + target_width = tf.cond( + image_aspect_ratio >= new_aspect_ratio, + lambda: image_width, + lambda: image_height * new_aspect_ratio) + + min_height = tf.maximum( + min_padded_size_ratio[0] * image_height, target_height) + min_width = tf.maximum( + min_padded_size_ratio[1] * image_width, target_width) + max_height = tf.maximum( + max_padded_size_ratio[0] * image_height, target_height) + max_width = tf.maximum( + max_padded_size_ratio[1] * image_width, target_width) + + max_scale = tf.minimum(max_height / target_height, max_width / target_width) + min_scale = tf.minimum( + max_scale, + tf.maximum(min_height / target_height, min_width / target_width)) + + generator_func = functools.partial(tf.random_uniform, [], + min_scale, max_scale, seed=seed) + scale = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO, + preprocess_vars_cache) + + target_height = tf.round(scale * target_height) + target_width = tf.round(scale * target_width) + + new_image = tf.image.pad_to_bounding_box( + image, 0, 0, tf.cast(target_height, dtype=tf.int32), + tf.cast(target_width, dtype=tf.int32)) + + im_box = tf.stack([ + 0.0, + 0.0, + target_height / image_height, + target_width / image_width + ]) + boxlist = box_list.BoxList(boxes) + new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box) + new_boxes = new_boxlist.get() + + result = [new_image, new_boxes] + + if masks is not None: + new_masks = tf.expand_dims(masks, -1) + new_masks = tf.image.pad_to_bounding_box( + new_masks, 0, 0, tf.cast(target_height, dtype=tf.int32), + tf.cast(target_width, dtype=tf.int32)) + new_masks = tf.squeeze(new_masks, [-1]) + result.append(new_masks) + + if keypoints is not None: + new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box) + result.append(new_keypoints) + + return tuple(result) + + +def random_black_patches(image, + max_black_patches=10, + probability=0.5, + size_to_image_ratio=0.1, + random_seed=None, + preprocess_vars_cache=None): + """Randomly adds some black patches to the image. + + This op adds up to max_black_patches square black patches of a fixed size + to the image where size is specified via the size_to_image_ratio parameter. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + max_black_patches: number of times that the function tries to add a + black box to the image. + probability: at each try, what is the chance of adding a box. + size_to_image_ratio: Determines the ratio of the size of the black patches + to the size of the image. + box_size = size_to_image_ratio * + min(image_width, image_height) + random_seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image + """ + def add_black_patch_to_image(image, idx): + """Function for adding one patch to the image. + + Args: + image: image + idx: counter for number of patches that could have been added + + Returns: + image with a randomly added black box + """ + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + box_size = tf.cast( + tf.multiply( + tf.minimum( + tf.cast(image_height, dtype=tf.float32), + tf.cast(image_width, dtype=tf.float32)), size_to_image_ratio), + dtype=tf.int32) + + generator_func = functools.partial(tf.random_uniform, [], minval=0.0, + maxval=(1.0 - size_to_image_ratio), + seed=random_seed) + normalized_y_min = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, + preprocess_vars_cache, key=str(idx) + 'y') + normalized_x_min = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, + preprocess_vars_cache, key=str(idx) + 'x') + + y_min = tf.cast( + normalized_y_min * tf.cast(image_height, dtype=tf.float32), + dtype=tf.int32) + x_min = tf.cast( + normalized_x_min * tf.cast(image_width, dtype=tf.float32), + dtype=tf.int32) + black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32) + mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min, + image_height, image_width) + image = tf.multiply(image, mask) + return image + + with tf.name_scope('RandomBlackPatchInImage', values=[image]): + for idx in range(max_black_patches): + generator_func = functools.partial(tf.random_uniform, [], + minval=0.0, maxval=1.0, + dtype=tf.float32, seed=random_seed) + random_prob = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.BLACK_PATCHES, + preprocess_vars_cache, key=idx) + image = tf.cond( + tf.greater(random_prob, probability), lambda: image, + functools.partial(add_black_patch_to_image, image=image, idx=idx)) + return image + + +def random_jpeg_quality(image, + min_jpeg_quality=0, + max_jpeg_quality=100, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly encode the image to a random JPEG quality level. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels] and + values in the range [0, 255]. + min_jpeg_quality: An int for the lower bound for selecting a random jpeg + quality level. + max_jpeg_quality: An int for the upper bound for selecting a random jpeg + quality level. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the encoded image, + and if it is 1.0, we will always get the original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this function is called + multiple times with the same non-null cache, it will perform + deterministically. + + Returns: + image: image which is the same shape as input image. + """ + def _adjust_jpeg_quality(): + """Encodes the image as jpeg with a random quality and then decodes.""" + generator_func = functools.partial( + tf.random_uniform, [], + minval=min_jpeg_quality, + maxval=max_jpeg_quality, + dtype=tf.int32, + seed=seed) + quality = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY, + preprocess_vars_cache, key='quality') + + # Need to convert to uint8 before calling adjust_jpeg_quality since it + # assumes that float features are in the range [0, 1], where herein the + # range is [0, 255]. + image_uint8 = tf.cast(image, tf.uint8) + adjusted_image = tf.image.adjust_jpeg_quality(image_uint8, quality) + return tf.cast(adjusted_image, tf.float32) + + with tf.name_scope('RandomJpegQuality', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_encoding_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY, + preprocess_vars_cache) + do_encoding_random = tf.greater_equal(do_encoding_random, random_coef) + image = tf.cond(do_encoding_random, _adjust_jpeg_quality, + lambda: tf.cast(image, tf.float32)) + + return image + + +def random_downscale_to_target_pixels(image, + masks=None, + min_target_pixels=300000, + max_target_pixels=800000, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly downscales the image to a target number of pixels. + + If the image contains less than the chosen target number of pixels, it will + not be downscaled. + + Args: + image: Rank 3 float32 tensor with shape [height, width, channels] and + values in the range [0, 255]. + masks: (optional) Rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks are of + the same height, width as the input `image`. + min_target_pixels: Integer. An inclusive lower bound for for the target + number of pixels. + max_target_pixels: Integer. An exclusive upper bound for for the target + number of pixels. + random_coef: Float. Random coefficient that defines the chance of getting + the original image. If random_coef is 0, we will always apply downscaling, + and if it is 1.0, we will always get the original image. + seed: (optional) Integer. Random seed. + preprocess_vars_cache: (optional) PreprocessorCache object that records + previously performed augmentations. Updated in-place. If this function is + called multiple times with the same non-null cache, it will perform + deterministically. + + Returns: + Tuple with elements: + image: Resized image which is the same rank as input image. + masks: If masks is not None, resized masks which are the same rank as + the input masks. + + Raises: + ValueError: If min_target_pixels or max_target_pixels are not positive. + """ + if min_target_pixels <= 0: + raise ValueError('Minimum target pixels must be positive') + if max_target_pixels <= 0: + raise ValueError('Maximum target pixels must be positive') + + def _resize_image_to_target(target_height, target_width): + # pylint: disable=unbalanced-tuple-unpacking + new_image, _ = resize_image(image, None, target_height, target_width) + return (new_image,) + + def _resize_image_and_masks_to_target(target_height, target_width): + # pylint: disable=unbalanced-tuple-unpacking + new_image, new_masks, _ = resize_image(image, masks, target_height, + target_width) + return new_image, new_masks + + with tf.name_scope('RandomDownscaleToTargetPixels', values=[image]): + generator_fn = functools.partial(tf.random_uniform, [], seed=seed) + do_downscale_random = _get_or_create_preprocess_rand_vars( + generator_fn, + preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS, + preprocess_vars_cache) + do_downscale_random = tf.greater_equal(do_downscale_random, random_coef) + + generator_fn = functools.partial( + tf.random_uniform, [], + minval=min_target_pixels, + maxval=max_target_pixels, + dtype=tf.int32, + seed=seed) + target_pixels = _get_or_create_preprocess_rand_vars( + generator_fn, + preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS, + preprocess_vars_cache, + key='target_pixels') + + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + image_pixels = image_height * image_width + scale_factor = tf.sqrt( + tf.cast(target_pixels, dtype=tf.float32) / + tf.cast(image_pixels, dtype=tf.float32)) + target_height = tf.cast( + scale_factor * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32) + target_width = tf.cast( + scale_factor * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32) + image_larger_than_target = tf.greater(image_pixels, target_pixels) + + should_apply_resize = tf.logical_and(do_downscale_random, + image_larger_than_target) + if masks is not None: + resize_fn = functools.partial(_resize_image_and_masks_to_target, + target_height, target_width) + return tf.cond(should_apply_resize, resize_fn, + lambda: (tf.cast(image, dtype=tf.float32), masks)) + else: + resize_fn = lambda: _resize_image_to_target(target_height, target_width) + return tf.cond(should_apply_resize, resize_fn, + lambda: (tf.cast(image, dtype=tf.float32),)) + + +def random_patch_gaussian(image, + min_patch_size=1, + max_patch_size=250, + min_gaussian_stddev=0.0, + max_gaussian_stddev=1.0, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly applies gaussian noise to a random patch on the image. + + The gaussian noise is applied to the image with values scaled to the range + [0.0, 1.0]. The result of applying gaussian noise to the scaled image is + clipped to be within the range [0.0, 1.0], equivalent to the range + [0.0, 255.0] after rescaling the image back. + + See "Improving Robustness Without Sacrificing Accuracy with Patch Gaussian + Augmentation " by Lopes et al., 2019, for further details. + https://arxiv.org/abs/1906.02611 + + Args: + image: Rank 3 float32 tensor with shape [height, width, channels] and + values in the range [0.0, 255.0]. + min_patch_size: Integer. An inclusive lower bound for the patch size. + max_patch_size: Integer. An exclusive upper bound for the patch size. + min_gaussian_stddev: Float. An inclusive lower bound for the standard + deviation of the gaussian noise. + max_gaussian_stddev: Float. An exclusive upper bound for the standard + deviation of the gaussian noise. + random_coef: Float. Random coefficient that defines the chance of getting + the original image. If random_coef is 0.0, we will always apply + downscaling, and if it is 1.0, we will always get the original image. + seed: (optional) Integer. Random seed. + preprocess_vars_cache: (optional) PreprocessorCache object that records + previously performed augmentations. Updated in-place. If this function is + called multiple times with the same non-null cache, it will perform + deterministically. + + Returns: + Rank 3 float32 tensor with same shape as the input image and with gaussian + noise applied within a random patch. + + Raises: + ValueError: If min_patch_size is < 1. + """ + if min_patch_size < 1: + raise ValueError('Minimum patch size must be >= 1.') + + get_or_create_rand_vars_fn = functools.partial( + _get_or_create_preprocess_rand_vars, + function_id=preprocessor_cache.PreprocessorCache.PATCH_GAUSSIAN, + preprocess_vars_cache=preprocess_vars_cache) + + def _apply_patch_gaussian(image): + """Applies a patch gaussian with random size, location, and stddev.""" + patch_size = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=min_patch_size, + maxval=max_patch_size, + dtype=tf.int32, + seed=seed), + key='patch_size') + gaussian_stddev = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=min_gaussian_stddev, + maxval=max_gaussian_stddev, + dtype=tf.float32, + seed=seed), + key='gaussian_stddev') + + image_shape = tf.shape(image) + y = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=0, + maxval=image_shape[0], + dtype=tf.int32, + seed=seed), + key='y') + x = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=0, + maxval=image_shape[1], + dtype=tf.int32, + seed=seed), + key='x') + gaussian = get_or_create_rand_vars_fn( + functools.partial( + tf.random.normal, + image_shape, + stddev=gaussian_stddev, + dtype=tf.float32, + seed=seed), + key='gaussian') + + scaled_image = image / 255.0 + image_plus_gaussian = tf.clip_by_value(scaled_image + gaussian, 0.0, 1.0) + patch_mask = patch_ops.get_patch_mask(y, x, patch_size, image_shape) + patch_mask = tf.expand_dims(patch_mask, -1) + patch_mask = tf.tile(patch_mask, [1, 1, image_shape[2]]) + patched_image = tf.where(patch_mask, image_plus_gaussian, scaled_image) + return patched_image * 255.0 + + with tf.name_scope('RandomPatchGaussian', values=[image]): + image = tf.cast(image, tf.float32) + patch_gaussian_random = get_or_create_rand_vars_fn( + functools.partial(tf.random_uniform, [], seed=seed)) + do_patch_gaussian = tf.greater_equal(patch_gaussian_random, random_coef) + image = tf.cond(do_patch_gaussian, + lambda: _apply_patch_gaussian(image), + lambda: image) + return image + + +# TODO(barretzoph): Put in AutoAugment Paper link when paper is live. +def autoaugment_image(image, boxes, policy_name='v0'): + """Apply an autoaugment policy to the image and boxes. + + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + boxes: rank 2 float32 tensor containing the bounding boxes with shape + [num_instances, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + policy_name: The name of the AutoAugment policy to use. The available + options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for + all of the results in the paper and was found to achieve the best results + on the COCO dataset. `v1`, `v2` and `v3` are additional good policies + found on the COCO dataset that have slight variation in what operations + were used during the search procedure along with how many operations are + applied in parallel to a single image (2 vs 3). + + + Returns: + image: the augmented image. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. boxes will have been augmented along with image. + """ + return autoaugment_utils.distort_image_with_autoaugment( + image, boxes, policy_name) + + +def image_to_float(image): + """Used in Faster R-CNN. Casts image pixel values to float. + + Args: + image: input image which might be in tf.uint8 or sth else format + + Returns: + image: image in tf.float32 format. + """ + with tf.name_scope('ImageToFloat', values=[image]): + image = tf.cast(image, dtype=tf.float32) + return image + + +def random_resize_method(image, target_size, preprocess_vars_cache=None): + """Uses a random resize method to resize the image to target size. + + Args: + image: a rank 3 tensor. + target_size: a list of [target_height, target_width] + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + resized image. + """ + + resized_image = _apply_with_random_selector( + image, + lambda x, method: tf.image.resize_images(x, target_size, method), + num_cases=4, + preprocess_vars_cache=preprocess_vars_cache, + key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD) + + return resized_image + + +def resize_to_range(image, + masks=None, + min_dimension=None, + max_dimension=None, + method=tf.image.ResizeMethod.BILINEAR, + align_corners=False, + pad_to_max_dimension=False, + per_channel_pad_value=(0, 0, 0)): + """Resizes an image so its dimensions are within the provided value. + + The output size can be described by two cases: + 1. If the image can be rescaled so its minimum dimension is equal to the + provided value without the other dimension exceeding max_dimension, + then do so. + 2. Otherwise, resize so the largest dimension is equal to max_dimension. + + Args: + image: A 3D tensor of shape [height, width, channels] + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. + min_dimension: (optional) (scalar) desired size of the smaller image + dimension. + max_dimension: (optional) (scalar) maximum allowed size + of the larger image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + align_corners: bool. If true, exactly align all 4 corners of the input + and output. Defaults to False. + pad_to_max_dimension: Whether to resize the image and pad it with zeros + so the resulting image is of the spatial size + [max_dimension, max_dimension]. If masks are included they are padded + similarly. + per_channel_pad_value: A tuple of per-channel scalar value to use for + padding. By default pads zeros. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A 3D tensor of shape [new_height, new_width, channels], + where the image has been resized (with bilinear interpolation) so that + min(new_height, new_width) == min_dimension or + max(new_height, new_width) == max_dimension. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width]. + resized_image_shape: A 1D tensor of shape [3] containing shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + def _resize_landscape_image(image): + # resize a landscape image + return tf.image.resize_images( + image, tf.stack([min_dimension, max_dimension]), method=method, + align_corners=align_corners, preserve_aspect_ratio=True) + + def _resize_portrait_image(image): + # resize a portrait image + return tf.image.resize_images( + image, tf.stack([max_dimension, min_dimension]), method=method, + align_corners=align_corners, preserve_aspect_ratio=True) + + with tf.name_scope('ResizeToRange', values=[image, min_dimension]): + if image.get_shape().is_fully_defined(): + if image.get_shape()[0] < image.get_shape()[1]: + new_image = _resize_landscape_image(image) + else: + new_image = _resize_portrait_image(image) + new_size = tf.constant(new_image.get_shape().as_list()) + else: + new_image = tf.cond( + tf.less(tf.shape(image)[0], tf.shape(image)[1]), + lambda: _resize_landscape_image(image), + lambda: _resize_portrait_image(image)) + new_size = tf.shape(new_image) + + if pad_to_max_dimension: + channels = tf.unstack(new_image, axis=2) + if len(channels) != len(per_channel_pad_value): + raise ValueError('Number of channels must be equal to the length of ' + 'per-channel pad value.') + new_image = tf.stack( + [ + tf.pad( + channels[i], [[0, max_dimension - new_size[0]], + [0, max_dimension - new_size[1]]], + constant_values=per_channel_pad_value[i]) + for i in range(len(channels)) + ], + axis=2) + new_image.set_shape([max_dimension, max_dimension, 3]) + + result = [new_image] + if masks is not None: + new_masks = tf.expand_dims(masks, 3) + new_masks = tf.image.resize_images( + new_masks, + new_size[:-1], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=align_corners) + if pad_to_max_dimension: + new_masks = tf.image.pad_to_bounding_box( + new_masks, 0, 0, max_dimension, max_dimension) + new_masks = tf.squeeze(new_masks, 3) + result.append(new_masks) + + result.append(new_size) + return result + + +def _get_image_info(image): + """Returns the height, width and number of channels in the image.""" + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + num_channels = tf.shape(image)[2] + return (image_height, image_width, num_channels) + + +# TODO(alirezafathi): Make sure the static shapes are preserved. +def resize_to_min_dimension(image, masks=None, min_dimension=600, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes image and masks given the min size maintaining the aspect ratio. + + If one of the image dimensions is smaller than min_dimension, it will scale + the image such that its smallest dimension is equal to min_dimension. + Otherwise, will keep the image size as is. + + Args: + image: a tensor of size [height, width, channels]. + masks: (optional) a tensors of size [num_instances, height, width]. + min_dimension: minimum image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + + Returns: + An array containing resized_image, resized_masks, and resized_image_shape. + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A tensor of size [new_height, new_width, channels]. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width] + resized_image_shape: A 1D tensor of shape [3] containing the shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]): + (image_height, image_width, num_channels) = _get_image_info(image) + min_image_dimension = tf.minimum(image_height, image_width) + min_target_dimension = tf.maximum(min_image_dimension, min_dimension) + target_ratio = tf.cast(min_target_dimension, dtype=tf.float32) / tf.cast( + min_image_dimension, dtype=tf.float32) + target_height = tf.cast( + tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32) + target_width = tf.cast( + tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32) + image = tf.image.resize_images( + tf.expand_dims(image, axis=0), size=[target_height, target_width], + method=method, + align_corners=True) + result = [tf.squeeze(image, axis=0)] + + if masks is not None: + masks = tf.image.resize_nearest_neighbor( + tf.expand_dims(masks, axis=3), + size=[target_height, target_width], + align_corners=True) + result.append(tf.squeeze(masks, axis=3)) + + result.append(tf.stack([target_height, target_width, num_channels])) + return result + + +def resize_to_max_dimension(image, masks=None, max_dimension=600, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes image and masks given the max size maintaining the aspect ratio. + + If one of the image dimensions is greater than max_dimension, it will scale + the image such that its largest dimension is equal to max_dimension. + Otherwise, will keep the image size as is. + + Args: + image: a tensor of size [height, width, channels]. + masks: (optional) a tensors of size [num_instances, height, width]. + max_dimension: maximum image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + + Returns: + An array containing resized_image, resized_masks, and resized_image_shape. + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A tensor of size [new_height, new_width, channels]. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width] + resized_image_shape: A 1D tensor of shape [3] containing the shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizeGivenMaxDimension', values=[image, max_dimension]): + (image_height, image_width, num_channels) = _get_image_info(image) + max_image_dimension = tf.maximum(image_height, image_width) + max_target_dimension = tf.minimum(max_image_dimension, max_dimension) + target_ratio = tf.cast(max_target_dimension, dtype=tf.float32) / tf.cast( + max_image_dimension, dtype=tf.float32) + target_height = tf.cast( + tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32) + target_width = tf.cast( + tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32) + image = tf.image.resize_images( + tf.expand_dims(image, axis=0), size=[target_height, target_width], + method=method, + align_corners=True) + result = [tf.squeeze(image, axis=0)] + + if masks is not None: + masks = tf.image.resize_nearest_neighbor( + tf.expand_dims(masks, axis=3), + size=[target_height, target_width], + align_corners=True) + result.append(tf.squeeze(masks, axis=3)) + + result.append(tf.stack([target_height, target_width, num_channels])) + return result + + +def resize_pad_to_multiple(image, masks=None, multiple=1): + """Resize an image by zero padding it to the specified multiple. + + For example, with an image of size (101, 199, 3) and multiple=4, + the returned image will have shape (104, 200, 3). + + Args: + image: a tensor of shape [height, width, channels] + masks: (optional) a tensor of shape [num_instances, height, width] + multiple: int, the multiple to which the height and width of the input + will be padded. + + Returns: + resized_image: The image with 0 padding applied, such that output + dimensions are divisible by `multiple` + resized_masks: If masks are given, they are resized to the same + spatial dimensions as the image. + resized_image_shape: An integer tensor of shape [3] which holds + the shape of the input image. + + """ + + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizePadToMultiple', values=[image, multiple]): + image_height, image_width, num_channels = _get_image_info(image) + image = image[tf.newaxis, :, :, :] + image = ops.pad_to_multiple(image, multiple)[0, :, :, :] + + if masks is not None: + masks = tf.transpose(masks, (1, 2, 0)) + masks = masks[tf.newaxis, :, :, :] + + masks = ops.pad_to_multiple(masks, multiple)[0, :, :, :] + masks = tf.transpose(masks, (2, 0, 1)) + + if masks is None: + return image, tf.stack([image_height, image_width, num_channels]) + else: + return image, masks, tf.stack([image_height, image_width, num_channels]) + + +def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): + """Scales boxes from normalized to pixel coordinates. + + Args: + image: A 3D float32 tensor of shape [height, width, channels]. + boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding + boxes in normalized coordinates. Each row is of the form + [ymin, xmin, ymax, xmax]. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + + Returns: + image: unchanged input image. + scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the + bounding boxes in pixel coordinates. + scaled_keypoints: a 3D float32 tensor with shape + [num_instances, num_keypoints, 2] containing the keypoints in pixel + coordinates. + """ + boxlist = box_list.BoxList(boxes) + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() + result = [image, scaled_boxes] + if keypoints is not None: + scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) + result.append(scaled_keypoints) + return tuple(result) + + +# TODO(alirezafathi): Investigate if instead the function should return None if +# masks is None. +# pylint: disable=g-doc-return-or-yield +def resize_image(image, + masks=None, + new_height=600, + new_width=1024, + method=tf.image.ResizeMethod.BILINEAR, + align_corners=False): + """Resizes images to the given height and width. + + Args: + image: A 3D tensor of shape [height, width, channels] + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. + new_height: (optional) (scalar) desired height of the image. + new_width: (optional) (scalar) desired width of the image. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + align_corners: bool. If true, exactly align all 4 corners of the input + and output. Defaults to False. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A tensor of size [new_height, new_width, channels]. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width] + resized_image_shape: A 1D tensor of shape [3] containing the shape of the + resized image. + """ + with tf.name_scope( + 'ResizeImage', + values=[image, new_height, new_width, method, align_corners]): + new_image = tf.image.resize_images( + image, tf.stack([new_height, new_width]), + method=method, + align_corners=align_corners) + image_shape = shape_utils.combined_static_and_dynamic_shape(image) + result = [new_image] + if masks is not None: + num_instances = tf.shape(masks)[0] + new_size = tf.stack([new_height, new_width]) + def resize_masks_branch(): + new_masks = tf.expand_dims(masks, 3) + new_masks = tf.image.resize_nearest_neighbor( + new_masks, new_size, align_corners=align_corners) + new_masks = tf.squeeze(new_masks, axis=3) + return new_masks + + def reshape_masks_branch(): + # The shape function will be computed for both branches of the + # condition, regardless of which branch is actually taken. Make sure + # that we don't trigger an assertion in the shape function when trying + # to reshape a non empty tensor into an empty one. + new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]]) + return new_masks + + masks = tf.cond(num_instances > 0, resize_masks_branch, + reshape_masks_branch) + result.append(masks) + + result.append(tf.stack([new_height, new_width, image_shape[2]])) + return result + + +def subtract_channel_mean(image, means=None): + """Normalizes an image by subtracting a mean from each channel. + + Args: + image: A 3D tensor of shape [height, width, channels] + means: float list containing a mean for each channel + Returns: + normalized_images: a tensor of shape [height, width, channels] + Raises: + ValueError: if images is not a 4D tensor or if the number of means is not + equal to the number of channels. + """ + with tf.name_scope('SubtractChannelMean', values=[image, means]): + if len(image.get_shape()) != 3: + raise ValueError('Input must be of size [height, width, channels]') + if len(means) != image.get_shape()[-1]: + raise ValueError('len(means) must match the number of channels') + return image - [[means]] + + +def one_hot_encoding(labels, num_classes=None): + """One-hot encodes the multiclass labels. + + Example usage: + labels = tf.constant([1, 4], dtype=tf.int32) + one_hot = OneHotEncoding(labels, num_classes=5) + one_hot.eval() # evaluates to [0, 1, 0, 0, 1] + + Args: + labels: A tensor of shape [None] corresponding to the labels. + num_classes: Number of classes in the dataset. + Returns: + onehot_labels: a tensor of shape [num_classes] corresponding to the one hot + encoding of the labels. + Raises: + ValueError: if num_classes is not specified. + """ + with tf.name_scope('OneHotEncoding', values=[labels]): + if num_classes is None: + raise ValueError('num_classes must be specified') + + labels = tf.one_hot(labels, num_classes, 1, 0) + return tf.reduce_max(labels, 0) + + +def rgb_to_gray(image): + """Converts a 3 channel RGB image to a 1 channel grayscale image. + + Args: + image: Rank 3 float32 tensor containing 1 image -> [height, width, 3] + with pixel values varying between [0, 1]. + + Returns: + image: A single channel grayscale image -> [image, height, 1]. + """ + return _rgb_to_grayscale(image) + + +def random_self_concat_image( + image, boxes, labels, label_weights, label_confidences=None, + multiclass_scores=None, concat_vertical_probability=0.1, + concat_horizontal_probability=0.1, seed=None, + preprocess_vars_cache=None): + """Randomly concatenates the image with itself. + + This function randomly concatenates the image with itself; the random + variables for vertical and horizontal concatenation are independent. + Afterwards, we adjust the old bounding boxes, and add new bounding boxes + for the new objects. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: rank 1 float32 containing the label weights. + label_confidences: (optional) rank 1 float32 containing the label + confidences. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for + each box for each class. + concat_vertical_probability: (optional) a tf.float32 scalar denoting the + probability of a vertical concatenation. + concat_horizontal_probability: (optional) a tf.float32 scalar denoting the + probability of a horizontal concatenation. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + if label_confidences is not None also returns: + maybe_concat_label_confidences: cropped label weights. + if multiclass_scores is not None also returns: + maybe_concat_multiclass_scores: cropped_multiclass_scores. + """ + + concat_vertical = (tf.random_uniform([], seed=seed) < + concat_vertical_probability) + # Note the seed + 1 so we get some semblance of independence even with + # fixed seeds. + concat_horizontal = (tf.random_uniform([], seed=seed + 1 if seed else None) + < concat_horizontal_probability) + + gen_func = lambda: (concat_vertical, concat_horizontal) + params = _get_or_create_preprocess_rand_vars( + gen_func, preprocessor_cache.PreprocessorCache.SELF_CONCAT_IMAGE, + preprocess_vars_cache) + concat_vertical, concat_horizontal = params + + def _concat_image(image, boxes, labels, label_weights, axis): + """Concats the image to itself on `axis`.""" + output_images = tf.concat([image, image], axis=axis) + + if axis == 0: + # Concat vertically, so need to reduce the y coordinates. + old_scaling = tf.constant([0.5, 1.0, 0.5, 1.0]) + new_translation = tf.constant([0.5, 0.0, 0.5, 0.0]) + elif axis == 1: + old_scaling = tf.constant([1.0, 0.5, 1.0, 0.5]) + new_translation = tf.constant([0.0, 0.5, 0.0, 0.5]) + + old_boxes = old_scaling * boxes + new_boxes = old_boxes + new_translation + all_boxes = tf.concat([old_boxes, new_boxes], axis=0) + + return [output_images, all_boxes, tf.tile(labels, [2]), tf.tile( + label_weights, [2])] + + image, boxes, labels, label_weights = tf.cond( + concat_vertical, + lambda: _concat_image(image, boxes, labels, label_weights, axis=0), + lambda: [image, boxes, labels, label_weights], + strict=True) + + outputs = tf.cond( + concat_horizontal, + lambda: _concat_image(image, boxes, labels, label_weights, axis=1), + lambda: [image, boxes, labels, label_weights], + strict=True) + + if label_confidences is not None: + label_confidences = tf.cond(concat_vertical, + lambda: tf.tile(label_confidences, [2]), + lambda: label_confidences) + outputs.append(tf.cond(concat_horizontal, + lambda: tf.tile(label_confidences, [2]), + lambda: label_confidences)) + + if multiclass_scores is not None: + multiclass_scores = tf.cond(concat_vertical, + lambda: tf.tile(multiclass_scores, [2, 1]), + lambda: multiclass_scores) + outputs.append(tf.cond(concat_horizontal, + lambda: tf.tile(multiclass_scores, [2, 1]), + lambda: multiclass_scores)) + + return outputs + + +def ssd_random_crop(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio_range=((0.5, 2.0),) * 7, + area_range=((0.1, 1.0),) * 7, + overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 7, + random_coef=(0.15,) * 7, + seed=None, + preprocess_vars_cache=None): + """Random crop preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: rank 1 float32 tensor containing the weights. + label_confidences: rank 1 float32 tensor containing the confidences. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If label_weights, multiclass_scores, masks, or keypoints is not None, the + function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + + def random_crop_selector(selected_result, index): + """Applies random_crop_image to selected result. + + Args: + selected_result: A tuple containing image, boxes, labels, keypoints (if + not None), and masks (if not None). + index: The index that was randomly selected. + + Returns: A tuple containing image, boxes, labels, keypoints (if not None), + and masks (if not None). + """ + + i = 3 + image, boxes, labels = selected_result[:i] + selected_label_weights = None + selected_label_confidences = None + selected_multiclass_scores = None + selected_masks = None + selected_keypoints = None + if label_weights is not None: + selected_label_weights = selected_result[i] + i += 1 + if label_confidences is not None: + selected_label_confidences = selected_result[i] + i += 1 + if multiclass_scores is not None: + selected_multiclass_scores = selected_result[i] + i += 1 + if masks is not None: + selected_masks = selected_result[i] + i += 1 + if keypoints is not None: + selected_keypoints = selected_result[i] + + return random_crop_image( + image=image, + boxes=boxes, + labels=labels, + label_weights=selected_label_weights, + label_confidences=selected_label_confidences, + multiclass_scores=selected_multiclass_scores, + masks=selected_masks, + keypoints=selected_keypoints, + min_object_covered=min_object_covered[index], + aspect_ratio_range=aspect_ratio_range[index], + area_range=area_range[index], + overlap_thresh=overlap_thresh[index], + clip_boxes=clip_boxes[index], + random_coef=random_coef[index], + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + result = _apply_with_random_selector_tuples( + tuple( + t for t in (image, boxes, labels, label_weights, label_confidences, + multiclass_scores, masks, keypoints) if t is not None), + random_crop_selector, + num_cases=len(min_object_covered), + preprocess_vars_cache=preprocess_vars_cache, + key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID) + return result + + +def ssd_random_crop_pad(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio_range=((0.5, 2.0),) * 6, + area_range=((0.1, 1.0),) * 6, + overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 6, + random_coef=(0.15,) * 6, + min_padded_size_ratio=((1.0, 1.0),) * 6, + max_padded_size_ratio=((2.0, 2.0),) * 6, + pad_color=(None,) * 6, + seed=None, + preprocess_vars_cache=None): + """Random crop preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: float32 tensor of shape [num_instances] representing the + confidences for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. + if set as None, it will be set to average color of the randomly + cropped image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + new_labels: new labels. + new_label_weights: new label weights. + """ + + def random_crop_pad_selector(image_boxes_labels, index): + """Random crop preprocessing helper.""" + i = 3 + image, boxes, labels = image_boxes_labels[:i] + selected_label_weights = None + selected_label_confidences = None + selected_multiclass_scores = None + if label_weights is not None: + selected_label_weights = image_boxes_labels[i] + i += 1 + if label_confidences is not None: + selected_label_confidences = image_boxes_labels[i] + i += 1 + if multiclass_scores is not None: + selected_multiclass_scores = image_boxes_labels[i] + + return random_crop_pad_image( + image, + boxes, + labels, + label_weights=selected_label_weights, + label_confidences=selected_label_confidences, + multiclass_scores=selected_multiclass_scores, + min_object_covered=min_object_covered[index], + aspect_ratio_range=aspect_ratio_range[index], + area_range=area_range[index], + overlap_thresh=overlap_thresh[index], + clip_boxes=clip_boxes[index], + random_coef=random_coef[index], + min_padded_size_ratio=min_padded_size_ratio[index], + max_padded_size_ratio=max_padded_size_ratio[index], + pad_color=pad_color[index], + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + return _apply_with_random_selector_tuples( + tuple(t for t in (image, boxes, labels, label_weights, label_confidences, + multiclass_scores) if t is not None), + random_crop_pad_selector, + num_cases=len(min_object_covered), + preprocess_vars_cache=preprocess_vars_cache, + key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID) + + +def ssd_random_crop_fixed_aspect_ratio( + image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio=1.0, + area_range=((0.1, 1.0),) * 7, + overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 7, + random_coef=(0.15,) * 7, + seed=None, + preprocess_vars_cache=None): + """Random crop preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + The only difference is that the aspect ratio of the crops are fixed. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidences for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio: aspect ratio of the cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If multiclass_scores, masks, or keypoints is not None, the function also + returns: + + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range) + + crop_result = ssd_random_crop( + image, + boxes, + labels, + label_weights=label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + masks=masks, + keypoints=keypoints, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + random_coef=random_coef, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + i = 3 + new_image, new_boxes, new_labels = crop_result[:i] + new_label_weights = None + new_label_confidences = None + new_multiclass_scores = None + new_masks = None + new_keypoints = None + if label_weights is not None: + new_label_weights = crop_result[i] + i += 1 + if label_confidences is not None: + new_label_confidences = crop_result[i] + i += 1 + if multiclass_scores is not None: + new_multiclass_scores = crop_result[i] + i += 1 + if masks is not None: + new_masks = crop_result[i] + i += 1 + if keypoints is not None: + new_keypoints = crop_result[i] + + result = random_crop_to_aspect_ratio( + new_image, + new_boxes, + new_labels, + label_weights=new_label_weights, + label_confidences=new_label_confidences, + multiclass_scores=new_multiclass_scores, + masks=new_masks, + keypoints=new_keypoints, + aspect_ratio=aspect_ratio, + clip_boxes=clip_boxes, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + return result + + +def ssd_random_crop_pad_fixed_aspect_ratio( + image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio=1.0, + aspect_ratio_range=((0.5, 2.0),) * 7, + area_range=((0.1, 1.0),) * 7, + overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 7, + random_coef=(0.15,) * 7, + min_padded_size_ratio=(1.0, 1.0), + max_padded_size_ratio=(2.0, 2.0), + seed=None, + preprocess_vars_cache=None): + """Random crop and pad preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + The only difference is that after the initial crop, images are zero-padded + to a fixed aspect ratio instead of being resized to that aspect ratio. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio: the final aspect ratio to pad to. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If multiclass_scores, masks, or keypoints is not None, the function also + returns: + + multiclass_scores: rank 2 with shape [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + crop_result = ssd_random_crop( + image, + boxes, + labels, + label_weights=label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + masks=masks, + keypoints=keypoints, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + random_coef=random_coef, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + i = 3 + new_image, new_boxes, new_labels = crop_result[:i] + new_label_weights = None + new_label_confidences = None + new_multiclass_scores = None + new_masks = None + new_keypoints = None + if label_weights is not None: + new_label_weights = crop_result[i] + i += 1 + if label_confidences is not None: + new_label_confidences = crop_result[i] + i += 1 + if multiclass_scores is not None: + new_multiclass_scores = crop_result[i] + i += 1 + if masks is not None: + new_masks = crop_result[i] + i += 1 + if keypoints is not None: + new_keypoints = crop_result[i] + + result = random_pad_to_aspect_ratio( + new_image, + new_boxes, + masks=new_masks, + keypoints=new_keypoints, + aspect_ratio=aspect_ratio, + min_padded_size_ratio=min_padded_size_ratio, + max_padded_size_ratio=max_padded_size_ratio, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + result = list(result) + i = 3 + result.insert(2, new_labels) + if new_label_weights is not None: + result.insert(i, new_label_weights) + i += 1 + if new_label_confidences is not None: + result.insert(i, new_label_confidences) + i += 1 + if multiclass_scores is not None: + result.insert(i, new_multiclass_scores) + result = tuple(result) + + return result + + +def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0): + """Converts multiclass logits to softmax scores after applying temperature. + + Args: + multiclass_scores: float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + temperature: Scale factor to use prior to applying softmax. Larger + temperatures give more uniform distruibutions after softmax. + + Returns: + multiclass_scores: float32 tensor of shape + [num_instances, num_classes] with scaling and softmax applied. + """ + + # Multiclass scores must be stored as logits. Apply temp and softmax. + multiclass_scores_scaled = tf.multiply( + multiclass_scores, 1.0 / temperature, name='scale_logits') + multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax') + + return multiclass_scores + + +def _get_crop_border(border, size): + border = tf.cast(border, tf.float32) + size = tf.cast(size, tf.float32) + + i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0)) + divisor = tf.pow(2.0, i) + divisor = tf.clip_by_value(divisor, 1, border) + divisor = tf.cast(divisor, tf.int32) + + return tf.cast(border, tf.int32) // divisor + + +def random_square_crop_by_scale(image, boxes, labels, label_weights, + masks=None, keypoints=None, max_border=128, + scale_min=0.6, scale_max=1.3, num_scales=8, + seed=None, preprocess_vars_cache=None): + """Randomly crop a square in proportion to scale and image size. + + Extract a square sized crop from an image whose side length is sampled by + randomly scaling the maximum spatial dimension of the image. If part of + the crop falls outside the image, it is filled with zeros. + The augmentation is borrowed from [1] + [1]: https://arxiv.org/abs/1904.07850 + + Args: + image: rank 3 float32 tensor containing 1 image -> + [height, width,channels]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. + Boxes on the crop boundary are clipped to the boundary and boxes + falling outside the crop are ignored. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + max_border: The maximum size of the border. The border defines distance in + pixels to the image boundaries that will not be considered as a center of + a crop. To make sure that the border does not go over the center of the + image, we chose the border value by computing the minimum k, such that + (max_border / (2**k)) < image_dimension/2. + scale_min: float, the minimum value for scale. + scale_max: float, the maximum value for scale. + num_scales: int, the number of discrete scale values to sample between + [scale_min, scale_max] + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + label_weights: rank 1 float32 tensor with shape [num_instances]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + + """ + + img_shape = tf.shape(image) + height, width = img_shape[0], img_shape[1] + scales = tf.linspace(scale_min, scale_max, num_scales) + + scale = _get_or_create_preprocess_rand_vars( + lambda: scales[_random_integer(0, num_scales, seed)], + preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, + preprocess_vars_cache, 'scale') + + image_size = scale * tf.cast(tf.maximum(height, width), tf.float32) + image_size = tf.cast(image_size, tf.int32) + h_border = _get_crop_border(max_border, height) + w_border = _get_crop_border(max_border, width) + + def y_function(): + y = _random_integer(h_border, + tf.cast(height, tf.int32) - h_border + 1, + seed) + return y + + def x_function(): + x = _random_integer(w_border, + tf.cast(width, tf.int32) - w_border + 1, + seed) + return x + + y_center = _get_or_create_preprocess_rand_vars( + y_function, + preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, + preprocess_vars_cache, 'y_center') + + x_center = _get_or_create_preprocess_rand_vars( + x_function, + preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, + preprocess_vars_cache, 'x_center') + + half_size = tf.cast(image_size / 2, tf.int32) + crop_ymin, crop_ymax = y_center - half_size, y_center + half_size + crop_xmin, crop_xmax = x_center - half_size, x_center + half_size + + ymin = tf.maximum(crop_ymin, 0) + xmin = tf.maximum(crop_xmin, 0) + ymax = tf.minimum(crop_ymax, height - 1) + xmax = tf.minimum(crop_xmax, width - 1) + + cropped_image = image[ymin:ymax, xmin:xmax] + offset_y = tf.maximum(0, ymin - crop_ymin) + offset_x = tf.maximum(0, xmin - crop_xmin) + + oy_i = offset_y + ox_i = offset_x + + output_image = tf.image.pad_to_bounding_box( + cropped_image, offset_height=oy_i, offset_width=ox_i, + target_height=image_size, target_width=image_size) + + if ymin == 0: + # We might be padding the image. + box_ymin = -offset_y + else: + box_ymin = crop_ymin + + if xmin == 0: + # We might be padding the image. + box_xmin = -offset_x + else: + box_xmin = crop_xmin + + box_ymax = box_ymin + image_size + box_xmax = box_xmin + image_size + + image_box = [box_ymin / height, box_xmin / width, + box_ymax / height, box_xmax / width] + boxlist = box_list.BoxList(boxes) + boxlist = box_list_ops.change_coordinate_frame(boxlist, image_box) + boxlist, indices = box_list_ops.prune_completely_outside_window( + boxlist, [0.0, 0.0, 1.0, 1.0]) + boxlist = box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0], + filter_nonoverlapping=False) + + return_values = [output_image, boxlist.get(), + tf.gather(labels, indices), + tf.gather(label_weights, indices)] + + if masks is not None: + new_masks = tf.expand_dims(masks, -1) + new_masks = new_masks[:, ymin:ymax, xmin:xmax] + new_masks = tf.image.pad_to_bounding_box( + new_masks, oy_i, ox_i, image_size, image_size) + new_masks = tf.squeeze(new_masks, [-1]) + return_values.append(tf.gather(new_masks, indices)) + + if keypoints is not None: + keypoints = tf.gather(keypoints, indices) + keypoints = keypoint_ops.change_coordinate_frame(keypoints, image_box) + keypoints = keypoint_ops.prune_outside_window(keypoints, + [0.0, 0.0, 1.0, 1.0]) + return_values.append(keypoints) + + return return_values + + +def get_default_func_arg_map(include_label_weights=True, + include_label_confidences=False, + include_multiclass_scores=False, + include_instance_masks=False, + include_keypoints=False, + include_keypoint_visibilities=False): + """Returns the default mapping from a preprocessor function to its args. + + Args: + include_label_weights: If True, preprocessing functions will modify the + label weights, too. + include_label_confidences: If True, preprocessing functions will modify the + label confidences, too. + include_multiclass_scores: If True, preprocessing functions will modify the + multiclass scores, too. + include_instance_masks: If True, preprocessing functions will modify the + instance masks, too. + include_keypoints: If True, preprocessing functions will modify the + keypoints, too. + include_keypoint_visibilities: If True, preprocessing functions will modify + the keypoint visibilities, too. + + Returns: + A map from preprocessing functions to the arguments they receive. + """ + groundtruth_label_weights = None + if include_label_weights: + groundtruth_label_weights = ( + fields.InputDataFields.groundtruth_weights) + + groundtruth_label_confidences = None + if include_label_confidences: + groundtruth_label_confidences = ( + fields.InputDataFields.groundtruth_confidences) + + multiclass_scores = None + if include_multiclass_scores: + multiclass_scores = (fields.InputDataFields.multiclass_scores) + + groundtruth_instance_masks = None + if include_instance_masks: + groundtruth_instance_masks = ( + fields.InputDataFields.groundtruth_instance_masks) + + groundtruth_keypoints = None + if include_keypoints: + groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints + + groundtruth_keypoint_visibilities = None + if include_keypoint_visibilities: + groundtruth_keypoint_visibilities = ( + fields.InputDataFields.groundtruth_keypoint_visibilities) + + prep_func_arg_map = { + normalize_image: (fields.InputDataFields.image,), + random_horizontal_flip: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + groundtruth_keypoint_visibilities, + ), + random_vertical_flip: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_rotation90: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_pixel_value_scale: (fields.InputDataFields.image,), + random_image_scale: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + random_rgb_to_gray: (fields.InputDataFields.image,), + random_adjust_brightness: (fields.InputDataFields.image,), + random_adjust_contrast: (fields.InputDataFields.image,), + random_adjust_hue: (fields.InputDataFields.image,), + random_adjust_saturation: (fields.InputDataFields.image,), + random_distort_color: (fields.InputDataFields.image,), + random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,), + random_crop_image: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores, + groundtruth_instance_masks, groundtruth_keypoints, + groundtruth_keypoint_visibilities), + random_pad_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, + groundtruth_keypoints), + random_absolute_pad_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, + groundtruth_keypoints), + random_crop_pad_image: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores), + random_crop_to_aspect_ratio: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_pad_to_aspect_ratio: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_black_patches: (fields.InputDataFields.image,), + random_jpeg_quality: (fields.InputDataFields.image,), + random_downscale_to_target_pixels: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + random_patch_gaussian: (fields.InputDataFields.image,), + autoaugment_image: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + ), + retain_boxes_above_threshold: ( + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + drop_label_probabilistically: ( + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + remap_labels: (fields.InputDataFields.groundtruth_classes,), + image_to_float: (fields.InputDataFields.image,), + random_resize_method: (fields.InputDataFields.image,), + resize_to_range: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + resize_to_min_dimension: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + scale_boxes_to_pixel_coordinates: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_keypoints, + ), + resize_image: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + subtract_channel_mean: (fields.InputDataFields.image,), + one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,), + rgb_to_gray: (fields.InputDataFields.image,), + random_self_concat_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_label_confidences, + multiclass_scores), + ssd_random_crop: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores, + groundtruth_instance_masks, groundtruth_keypoints), + ssd_random_crop_pad: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores), + ssd_random_crop_fixed_aspect_ratio: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_label_confidences, + multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints + ), + ssd_random_crop_pad_fixed_aspect_ratio: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + convert_class_logits_to_softmax: (multiclass_scores,), + random_square_crop_by_scale: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_instance_masks, + groundtruth_keypoints), + } + + return prep_func_arg_map + + +def preprocess(tensor_dict, + preprocess_options, + func_arg_map=None, + preprocess_vars_cache=None): + """Preprocess images and bounding boxes. + + Various types of preprocessing (to be implemented) based on the + preprocess_options dictionary e.g. "crop image" (affects image and possibly + boxes), "white balance image" (affects only image), etc. If self._options + is None, no preprocessing is done. + + Args: + tensor_dict: dictionary that contains images, boxes, and can contain other + things as well. + images-> rank 4 float32 tensor contains + 1 image -> [1, height, width, 3]. + with pixel values varying between [0, 1] + boxes-> rank 2 float32 tensor containing + the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning + their coordinates vary between [0, 1]. + Each row is in the form + of [ymin, xmin, ymax, xmax]. + preprocess_options: It is a list of tuples, where each tuple contains a + function and a dictionary that contains arguments and + their values. + func_arg_map: mapping from preprocessing functions to arguments that they + expect to receive and return. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + tensor_dict: which contains the preprocessed images, bounding boxes, etc. + + Raises: + ValueError: (a) If the functions passed to Preprocess + are not in func_arg_map. + (b) If the arguments that a function needs + do not exist in tensor_dict. + (c) If image in tensor_dict is not rank 4 + """ + if func_arg_map is None: + func_arg_map = get_default_func_arg_map() + + # changes the images to image (rank 4 to rank 3) since the functions + # receive rank 3 tensor for image + if fields.InputDataFields.image in tensor_dict: + images = tensor_dict[fields.InputDataFields.image] + if len(images.get_shape()) != 4: + raise ValueError('images in tensor_dict should be rank 4') + image = tf.squeeze(images, axis=0) + tensor_dict[fields.InputDataFields.image] = image + + # Preprocess inputs based on preprocess_options + for option in preprocess_options: + func, params = option + if func not in func_arg_map: + raise ValueError('The function %s does not exist in func_arg_map' % + (func.__name__)) + arg_names = func_arg_map[func] + for a in arg_names: + if a is not None and a not in tensor_dict: + raise ValueError('The function %s requires argument %s' % + (func.__name__, a)) + + def get_arg(key): + return tensor_dict[key] if key is not None else None + + args = [get_arg(a) for a in arg_names] + if preprocess_vars_cache is not None: + if six.PY2: + # pylint: disable=deprecated-method + arg_spec = inspect.getargspec(func) + # pylint: enable=deprecated-method + else: + arg_spec = inspect.getfullargspec(func) + if 'preprocess_vars_cache' in arg_spec.args: + params['preprocess_vars_cache'] = preprocess_vars_cache + + results = func(*args, **params) + if not isinstance(results, (list, tuple)): + results = (results,) + # Removes None args since the return values will not contain those. + arg_names = [arg_name for arg_name in arg_names if arg_name is not None] + for res, arg_name in zip(results, arg_names): + tensor_dict[arg_name] = res + + # changes the image to images (rank 3 to rank 4) to be compatible to what + # we received in the first place + if fields.InputDataFields.image in tensor_dict: + image = tensor_dict[fields.InputDataFields.image] + images = tf.expand_dims(image, 0) + tensor_dict[fields.InputDataFields.image] = images + + return tensor_dict diff --git a/models/research/object_detection/core/preprocessor_cache.py b/models/research/object_detection/core/preprocessor_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..948710564cfba22f294b98f0078ee632aa15854c --- /dev/null +++ b/models/research/object_detection/core/preprocessor_cache.py @@ -0,0 +1,109 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Records previous preprocessing operations and allows them to be repeated. + +Used with object_detection.core.preprocessor. Passing a PreprocessorCache +into individual data augmentation functions or the general preprocess() function +will store all randomly generated variables in the PreprocessorCache. When +a preprocessor function is called multiple times with the same +PreprocessorCache object, that function will perform the same augmentation +on all calls. +""" + +import collections + + +class PreprocessorCache(object): + """Dictionary wrapper storing random variables generated during preprocessing. + """ + + # Constant keys representing different preprocessing functions + ROTATION90 = 'rotation90' + HORIZONTAL_FLIP = 'horizontal_flip' + VERTICAL_FLIP = 'vertical_flip' + PIXEL_VALUE_SCALE = 'pixel_value_scale' + IMAGE_SCALE = 'image_scale' + RGB_TO_GRAY = 'rgb_to_gray' + ADJUST_BRIGHTNESS = 'adjust_brightness' + ADJUST_CONTRAST = 'adjust_contrast' + ADJUST_HUE = 'adjust_hue' + ADJUST_SATURATION = 'adjust_saturation' + DISTORT_COLOR = 'distort_color' + STRICT_CROP_IMAGE = 'strict_crop_image' + CROP_IMAGE = 'crop_image' + PAD_IMAGE = 'pad_image' + CROP_TO_ASPECT_RATIO = 'crop_to_aspect_ratio' + RESIZE_METHOD = 'resize_method' + PAD_TO_ASPECT_RATIO = 'pad_to_aspect_ratio' + BLACK_PATCHES = 'black_patches' + ADD_BLACK_PATCH = 'add_black_patch' + SELECTOR = 'selector' + SELECTOR_TUPLES = 'selector_tuples' + SELF_CONCAT_IMAGE = 'self_concat_image' + SSD_CROP_SELECTOR_ID = 'ssd_crop_selector_id' + SSD_CROP_PAD_SELECTOR_ID = 'ssd_crop_pad_selector_id' + JPEG_QUALITY = 'jpeg_quality' + DOWNSCALE_TO_TARGET_PIXELS = 'downscale_to_target_pixels' + PATCH_GAUSSIAN = 'patch_gaussian' + SQUARE_CROP_BY_SCALE = 'square_crop_scale' + + # 27 permitted function ids + _VALID_FNS = [ROTATION90, HORIZONTAL_FLIP, VERTICAL_FLIP, PIXEL_VALUE_SCALE, + IMAGE_SCALE, RGB_TO_GRAY, ADJUST_BRIGHTNESS, ADJUST_CONTRAST, + ADJUST_HUE, ADJUST_SATURATION, DISTORT_COLOR, STRICT_CROP_IMAGE, + CROP_IMAGE, PAD_IMAGE, CROP_TO_ASPECT_RATIO, RESIZE_METHOD, + PAD_TO_ASPECT_RATIO, BLACK_PATCHES, ADD_BLACK_PATCH, SELECTOR, + SELECTOR_TUPLES, SELF_CONCAT_IMAGE, SSD_CROP_SELECTOR_ID, + SSD_CROP_PAD_SELECTOR_ID, JPEG_QUALITY, + DOWNSCALE_TO_TARGET_PIXELS, PATCH_GAUSSIAN, + SQUARE_CROP_BY_SCALE] + + def __init__(self): + self._history = collections.defaultdict(dict) + + def clear(self): + """Resets cache.""" + self._history = collections.defaultdict(dict) + + def get(self, function_id, key): + """Gets stored value given a function id and key. + + Args: + function_id: identifier for the preprocessing function used. + key: identifier for the variable stored. + Returns: + value: the corresponding value, expected to be a tensor or + nested structure of tensors. + Raises: + ValueError: if function_id is not one of the 23 valid function ids. + """ + if function_id not in self._VALID_FNS: + raise ValueError('Function id not recognized: %s.' % str(function_id)) + return self._history[function_id].get(key) + + def update(self, function_id, key, value): + """Adds a value to the dictionary. + + Args: + function_id: identifier for the preprocessing function used. + key: identifier for the variable stored. + value: the value to store, expected to be a tensor or nested structure + of tensors. + Raises: + ValueError: if function_id is not one of the 23 valid function ids. + """ + if function_id not in self._VALID_FNS: + raise ValueError('Function id not recognized: %s.' % str(function_id)) + self._history[function_id][key] = value diff --git a/models/research/object_detection/core/preprocessor_test.py b/models/research/object_detection/core/preprocessor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fdb56c90a0b85db6772bccc4cfb8793d2012f8ed --- /dev/null +++ b/models/research/object_detection/core/preprocessor_test.py @@ -0,0 +1,3785 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.preprocessor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized +import numpy as np +import six +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import preprocessor +from object_detection.core import preprocessor_cache +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock # pylint: disable=g-import-not-at-top + + +class PreprocessorTest(test_case.TestCase, parameterized.TestCase): + + def createColorfulTestImage(self): + ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8)) + ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8)) + ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8)) + imr = tf.concat([ch255, ch0, ch0], 3) + img = tf.concat([ch255, ch255, ch0], 3) + imb = tf.concat([ch255, ch0, ch255], 3) + imw = tf.concat([ch128, ch128, ch128], 3) + imu = tf.concat([imr, img], 2) + imd = tf.concat([imb, imw], 2) + im = tf.concat([imu, imd], 1) + return im + + def createTestImages(self): + images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128], + [0, 128, 128, 128], [192, 192, 128, 128]]], + dtype=tf.uint8) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128], + [0, 128, 192, 192], [192, 192, 128, 192]]], + dtype=tf.uint8) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192], + [0, 128, 128, 0], [192, 192, 192, 128]]], + dtype=tf.uint8) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def createEmptyTestBoxes(self): + boxes = tf.constant([[]], dtype=tf.float32) + return boxes + + def createTestBoxes(self): + boxes = tf.constant( + [[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) + return boxes + + def createTestGroundtruthWeights(self): + return tf.constant([1.0, 0.5], dtype=tf.float32) + + def createTestMasks(self): + mask = np.array([ + [[255.0, 0.0, 0.0], + [255.0, 0.0, 0.0], + [255.0, 0.0, 0.0]], + [[255.0, 255.0, 0.0], + [255.0, 255.0, 0.0], + [255.0, 255.0, 0.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def createTestKeypoints(self): + keypoints_np = np.array([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], + ]) + keypoints = tf.constant(keypoints_np, dtype=tf.float32) + keypoint_visibilities = tf.constant( + [ + [True, True, False], + [False, True, True] + ]) + return keypoints, keypoint_visibilities + + def createTestKeypointsInsideCrop(self): + keypoints = np.array([ + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], + ]) + return tf.constant(keypoints, dtype=tf.float32) + + def createTestKeypointsOutsideCrop(self): + keypoints = np.array([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + ]) + return tf.constant(keypoints, dtype=tf.float32) + + def createKeypointFlipPermutation(self): + return np.array([0, 2, 1], dtype=np.int32) + + def createTestLabels(self): + labels = tf.constant([1, 2], dtype=tf.int32) + return labels + + def createTestLabelsLong(self): + labels = tf.constant([1, 2, 4], dtype=tf.int32) + return labels + + def createTestBoxesOutOfImage(self): + boxes = tf.constant( + [[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32) + return boxes + + def createTestMultiClassScores(self): + return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32) + + def expectedImagesAfterNormalization(self): + images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0], + [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0], + [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], + [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedMaxImageAfterColorScale(self): + images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], + [-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], + [-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6], + [-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedMinImageAfterColorScale(self): + images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1], + [-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1], + [-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4], + [-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedImagesAfterLeftRightFlip(self): + images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1], + [0, 0, 0, -1], [0, 0, 0.5, 0.5]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1], + [0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], + [-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedImagesAfterUpDownFlip(self): + images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], + [-1, -1, 0, 0], [0, 0, 0, 0]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], + [-1, -1, 0, 0], [-1, -1, 0, 0]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], + [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedImagesAfterRot90(self): + images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0], + [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], + [-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], + [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedBoxesAfterLeftRightFlip(self): + boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]], + dtype=tf.float32) + return boxes + + def expectedBoxesAfterUpDownFlip(self): + boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], + dtype=tf.float32) + return boxes + + def expectedBoxesAfterRot90(self): + boxes = tf.constant( + [[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32) + return boxes + + def expectedMasksAfterLeftRightFlip(self): + mask = np.array([ + [[0.0, 0.0, 255.0], + [0.0, 0.0, 255.0], + [0.0, 0.0, 255.0]], + [[0.0, 255.0, 255.0], + [0.0, 255.0, 255.0], + [0.0, 255.0, 255.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedMasksAfterUpDownFlip(self): + mask = np.array([ + [[255.0, 0.0, 0.0], + [255.0, 0.0, 0.0], + [255.0, 0.0, 0.0]], + [[255.0, 255.0, 0.0], + [255.0, 255.0, 0.0], + [255.0, 255.0, 0.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedMasksAfterRot90(self): + mask = np.array([ + [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [255.0, 255.0, 255.0]], + [[0.0, 0.0, 0.0], + [255.0, 255.0, 255.0], + [255.0, 255.0, 255.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedLabelScoresAfterThresholding(self): + return tf.constant([1.0], dtype=tf.float32) + + def expectedBoxesAfterThresholding(self): + return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32) + + def expectedLabelsAfterThresholding(self): + return tf.constant([1], dtype=tf.float32) + + def expectedMultiClassScoresAfterThresholding(self): + return tf.constant([[1.0, 0.0]], dtype=tf.float32) + + def expectedMasksAfterThresholding(self): + mask = np.array([ + [[255.0, 0.0, 0.0], + [255.0, 0.0, 0.0], + [255.0, 0.0, 0.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedKeypointsAfterThresholding(self): + keypoints = np.array([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]] + ]) + return tf.constant(keypoints, dtype=tf.float32) + + def expectedLabelScoresAfterThresholdingWithMissingScore(self): + return tf.constant([np.nan], dtype=tf.float32) + + def expectedBoxesAfterThresholdingWithMissingScore(self): + return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32) + + def expectedLabelsAfterThresholdingWithMissingScore(self): + return tf.constant([2], dtype=tf.float32) + + def expectedLabelScoresAfterDropping(self): + return tf.constant([0.5], dtype=tf.float32) + + def expectedBoxesAfterDropping(self): + return tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) + + def expectedLabelsAfterDropping(self): + return tf.constant([2], dtype=tf.float32) + + def expectedMultiClassScoresAfterDropping(self): + return tf.constant([[0.5, 0.5]], dtype=tf.float32) + + def expectedMasksAfterDropping(self): + masks = np.array([[[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], + [255.0, 255.0, 0.0]]]) + return tf.constant(masks, dtype=tf.float32) + + def expectedKeypointsAfterDropping(self): + keypoints = np.array([[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) + return tf.constant(keypoints, dtype=tf.float32) + + def expectedLabelsAfterRemapping(self): + return tf.constant([3, 3, 4], dtype=tf.float32) + + def testRgbToGrayscale(self): + def graph_fn(): + images = self.createTestImages() + grayscale_images = preprocessor._rgb_to_grayscale(images) + expected_images = tf.image.rgb_to_grayscale(images) + return grayscale_images, expected_images + (grayscale_images, expected_images) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(expected_images, grayscale_images) + + def testNormalizeImage(self): + def graph_fn(): + preprocess_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 256, + 'target_minval': -1, + 'target_maxval': 1 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + images_expected = self.expectedImagesAfterNormalization() + return images, images_expected + images_, images_expected_ = self.execute_cpu(graph_fn, []) + images_shape_ = images_.shape + images_expected_shape_ = images_expected_.shape + expected_shape = [1, 4, 4, 3] + self.assertAllEqual(images_expected_shape_, images_shape_) + self.assertAllEqual(images_shape_, expected_shape) + self.assertAllClose(images_, images_expected_) + + def testRetainBoxesAboveThreshold(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + (retained_boxes, retained_labels, + retained_weights) = preprocessor.retain_boxes_above_threshold( + boxes, labels, weights, threshold=0.6) + return [ + retained_boxes, retained_labels, retained_weights, + self.expectedBoxesAfterThresholding(), + self.expectedLabelsAfterThresholding(), + self.expectedLabelScoresAfterThresholding() + ] + + (retained_boxes_, retained_labels_, retained_weights_, + expected_retained_boxes_, expected_retained_labels_, + expected_retained_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose( + retained_boxes_, expected_retained_boxes_) + self.assertAllClose( + retained_labels_, expected_retained_labels_) + self.assertAllClose( + retained_weights_, expected_retained_weights_) + + def testRetainBoxesAboveThresholdWithMultiClassScores(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + (_, _, _, + retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold( + boxes, + labels, + weights, + multiclass_scores=multiclass_scores, + threshold=0.6) + return [ + retained_multiclass_scores, + self.expectedMultiClassScoresAfterThresholding() + ] + + (retained_multiclass_scores_, + expected_retained_multiclass_scores_) = self.execute(graph_fn, []) + self.assertAllClose(retained_multiclass_scores_, + expected_retained_multiclass_scores_) + + def testRetainBoxesAboveThresholdWithMasks(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = self.createTestMasks() + _, _, _, retained_masks = preprocessor.retain_boxes_above_threshold( + boxes, labels, weights, masks, threshold=0.6) + return [ + retained_masks, self.expectedMasksAfterThresholding()] + retained_masks_, expected_retained_masks_ = self.execute_cpu(graph_fn, []) + + self.assertAllClose( + retained_masks_, expected_retained_masks_) + + def testRetainBoxesAboveThresholdWithKeypoints(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + (_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold( + boxes, labels, weights, keypoints=keypoints, threshold=0.6) + return [retained_keypoints, self.expectedKeypointsAfterThresholding()] + + (retained_keypoints_, + expected_retained_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_keypoints_, expected_retained_keypoints_) + + def testDropLabelProbabilistically(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + (retained_boxes, retained_labels, + retained_weights) = preprocessor.drop_label_probabilistically( + boxes, labels, weights, dropped_label=1, drop_probability=1.0) + return [ + retained_boxes, retained_labels, retained_weights, + self.expectedBoxesAfterDropping(), + self.expectedLabelsAfterDropping(), + self.expectedLabelScoresAfterDropping() + ] + + (retained_boxes_, retained_labels_, retained_weights_, + expected_retained_boxes_, expected_retained_labels_, + expected_retained_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_boxes_, expected_retained_boxes_) + self.assertAllClose(retained_labels_, expected_retained_labels_) + self.assertAllClose(retained_weights_, expected_retained_weights_) + + def testDropLabelProbabilisticallyWithMultiClassScores(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + (_, _, _, + retained_multiclass_scores) = preprocessor.drop_label_probabilistically( + boxes, + labels, + weights, + multiclass_scores=multiclass_scores, + dropped_label=1, + drop_probability=1.0) + return [retained_multiclass_scores, + self.expectedMultiClassScoresAfterDropping()] + (retained_multiclass_scores_, + expected_retained_multiclass_scores_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_multiclass_scores_, + expected_retained_multiclass_scores_) + + def testDropLabelProbabilisticallyWithMasks(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = self.createTestMasks() + (_, _, _, retained_masks) = preprocessor.drop_label_probabilistically( + boxes, + labels, + weights, + masks=masks, + dropped_label=1, + drop_probability=1.0) + return [retained_masks, self.expectedMasksAfterDropping()] + (retained_masks_, expected_retained_masks_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_masks_, expected_retained_masks_) + + def testDropLabelProbabilisticallyWithKeypoints(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + (_, _, _, retained_keypoints) = preprocessor.drop_label_probabilistically( + boxes, + labels, + weights, + keypoints=keypoints, + dropped_label=1, + drop_probability=1.0) + return [retained_keypoints, self.expectedKeypointsAfterDropping()] + + (retained_keypoints_, + expected_retained_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_keypoints_, expected_retained_keypoints_) + + def testRemapLabels(self): + def graph_fn(): + labels = self.createTestLabelsLong() + remapped_labels = preprocessor.remap_labels(labels, [1, 2], 3) + return [remapped_labels, self.expectedLabelsAfterRemapping()] + + (remapped_labels_, expected_remapped_labels_) = self.execute_cpu(graph_fn, + []) + self.assertAllClose(remapped_labels_, expected_remapped_labels_) + + def testFlipBoxesLeftRight(self): + def graph_fn(): + boxes = self.createTestBoxes() + flipped_boxes = preprocessor._flip_boxes_left_right(boxes) + expected_boxes = self.expectedBoxesAfterLeftRightFlip() + return flipped_boxes, expected_boxes + flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) + + def testFlipBoxesUpDown(self): + def graph_fn(): + boxes = self.createTestBoxes() + flipped_boxes = preprocessor._flip_boxes_up_down(boxes) + expected_boxes = self.expectedBoxesAfterUpDownFlip() + return flipped_boxes, expected_boxes + flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) + + def testRot90Boxes(self): + def graph_fn(): + boxes = self.createTestBoxes() + rotated_boxes = preprocessor._rot90_boxes(boxes) + expected_boxes = self.expectedBoxesAfterRot90() + return rotated_boxes, expected_boxes + rotated_boxes, expected_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten()) + + def testFlipMasksLeftRight(self): + def graph_fn(): + test_mask = self.createTestMasks() + flipped_mask = preprocessor._flip_masks_left_right(test_mask) + expected_mask = self.expectedMasksAfterLeftRightFlip() + return flipped_mask, expected_mask + flipped_mask, expected_mask = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) + + def testFlipMasksUpDown(self): + def graph_fn(): + test_mask = self.createTestMasks() + flipped_mask = preprocessor._flip_masks_up_down(test_mask) + expected_mask = self.expectedMasksAfterUpDownFlip() + return flipped_mask, expected_mask + flipped_mask, expected_mask = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) + + def testRot90Masks(self): + def graph_fn(): + test_mask = self.createTestMasks() + rotated_mask = preprocessor._rot90_masks(test_mask) + expected_mask = self.expectedMasksAfterRot90() + return [rotated_mask, expected_mask] + rotated_mask, expected_mask = self.execute(graph_fn, []) + self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten()) + + def _testPreprocessorCache(self, + preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False): + if self.is_tf2(): return + def graph_fn(): + cache = preprocessor_cache.PreprocessorCache() + images = self.createTestImages() + boxes = self.createTestBoxes() + weights = self.createTestGroundtruthWeights() + classes = self.createTestLabels() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=test_masks, include_keypoints=test_keypoints) + out = [] + for _ in range(2): + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_weights: weights + } + if test_boxes: + tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes + tensor_dict[fields.InputDataFields.groundtruth_classes] = classes + if test_masks: + tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks + if test_keypoints: + tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints + out.append( + preprocessor.preprocess(tensor_dict, preprocess_options, + preprocessor_arg_map, cache)) + return out + + out1, out2 = self.execute_cpu_tf1(graph_fn, []) + for (_, v1), (_, v2) in zip(out1.items(), out2.items()): + self.assertAllClose(v1, v2) + + def testRandomHorizontalFlip(self): + def graph_fn(): + preprocess_options = [(preprocessor.random_horizontal_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createTestBoxes() + tensor_dict = {fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes} + images_expected1 = self.expectedImagesAfterLeftRightFlip() + boxes_expected1 = self.expectedBoxesAfterLeftRightFlip() + images_expected2 = images + boxes_expected2 = boxes + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) + boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) + boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) + boxes_diff_expected = tf.zeros_like(boxes_diff) + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes_diff, + boxes_diff_expected] + (images_diff_, images_diff_expected_, boxes_diff_, + boxes_diff_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_diff_, boxes_diff_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomHorizontalFlipWithEmptyBoxes(self): + def graph_fn(): + preprocess_options = [(preprocessor.random_horizontal_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createEmptyTestBoxes() + tensor_dict = {fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes} + images_expected1 = self.expectedImagesAfterLeftRightFlip() + boxes_expected = self.createEmptyTestBoxes() + images_expected2 = images + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes, boxes_expected] + (images_diff_, images_diff_expected_, boxes_, + boxes_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_, boxes_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomHorizontalFlipWithCache(self): + keypoint_flip_permutation = self.createKeypointFlipPermutation() + preprocess_options = [ + (preprocessor.random_horizontal_flip, + {'keypoint_flip_permutation': keypoint_flip_permutation})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomHorizontalFlipWithMaskAndKeypoints(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_horizontal_flip, {})] + image_height = 3 + image_width = 3 + images = tf.random_uniform([1, image_height, image_width, 3]) + boxes = self.createTestBoxes() + masks = self.createTestMasks() + keypoints, keypoint_visibilities = self.createTestKeypoints() + keypoint_flip_permutation = self.createKeypointFlipPermutation() + tensor_dict = { + fields.InputDataFields.image: + images, + fields.InputDataFields.groundtruth_boxes: + boxes, + fields.InputDataFields.groundtruth_instance_masks: + masks, + fields.InputDataFields.groundtruth_keypoints: + keypoints, + fields.InputDataFields.groundtruth_keypoint_visibilities: + keypoint_visibilities + } + preprocess_options = [(preprocessor.random_horizontal_flip, { + 'keypoint_flip_permutation': keypoint_flip_permutation + })] + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, + include_keypoints=True, + include_keypoint_visibilities=True) + tensor_dict = preprocessor.preprocess( + tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] + keypoint_visibilities = tensor_dict[ + fields.InputDataFields.groundtruth_keypoint_visibilities] + return [boxes, masks, keypoints, keypoint_visibilities] + + boxes, masks, keypoints, keypoint_visibilities = self.execute_cpu( + graph_fn, []) + self.assertIsNotNone(boxes) + self.assertIsNotNone(masks) + self.assertIsNotNone(keypoints) + self.assertIsNotNone(keypoint_visibilities) + + def testRandomVerticalFlip(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_vertical_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterUpDownFlip() + boxes_expected1 = self.expectedBoxesAfterUpDownFlip() + images_expected2 = images + boxes_expected2 = boxes + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) + boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) + boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) + boxes_diff_expected = tf.zeros_like(boxes_diff) + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [ + images_diff, images_diff_expected, boxes_diff, boxes_diff_expected + ] + + (images_diff_, images_diff_expected_, boxes_diff_, + boxes_diff_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_diff_, boxes_diff_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomVerticalFlipWithEmptyBoxes(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_vertical_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createEmptyTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterUpDownFlip() + boxes_expected = self.createEmptyTestBoxes() + images_expected2 = images + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes, boxes_expected] + + (images_diff_, images_diff_expected_, boxes_, + boxes_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_, boxes_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomVerticalFlipWithCache(self): + keypoint_flip_permutation = self.createKeypointFlipPermutation() + preprocess_options = [ + (preprocessor.random_vertical_flip, + {'keypoint_flip_permutation': keypoint_flip_permutation})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomVerticalFlipWithMaskAndKeypoints(self): + preprocess_options = [(preprocessor.random_vertical_flip, {})] + image_height = 3 + image_width = 3 + images = tf.random_uniform([1, image_height, image_width, 3]) + boxes = self.createTestBoxes() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + keypoint_flip_permutation = self.createKeypointFlipPermutation() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_instance_masks: masks, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + preprocess_options = [ + (preprocessor.random_vertical_flip, + {'keypoint_flip_permutation': keypoint_flip_permutation})] + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, include_keypoints=True) + tensor_dict = preprocessor.preprocess( + tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] + self.assertIsNotNone(boxes) + self.assertIsNotNone(masks) + self.assertIsNotNone(keypoints) + + def testRandomRotation90(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_rotation90, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterRot90() + boxes_expected1 = self.expectedBoxesAfterRot90() + images_expected2 = images + boxes_expected2 = boxes + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) + boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) + boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) + boxes_diff_expected = tf.zeros_like(boxes_diff) + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [ + images_diff, images_diff_expected, boxes_diff, boxes_diff_expected + ] + + (images_diff_, images_diff_expected_, boxes_diff_, + boxes_diff_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_diff_, boxes_diff_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomRotation90WithEmptyBoxes(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_rotation90, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createEmptyTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterRot90() + boxes_expected = self.createEmptyTestBoxes() + images_expected2 = images + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes, boxes_expected] + + (images_diff_, images_diff_expected_, boxes_, + boxes_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_, boxes_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomRotation90WithCache(self): + preprocess_options = [(preprocessor.random_rotation90, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomRotation90WithMaskAndKeypoints(self): + preprocess_options = [(preprocessor.random_rotation90, {})] + image_height = 3 + image_width = 3 + images = tf.random_uniform([1, image_height, image_width, 3]) + boxes = self.createTestBoxes() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_instance_masks: masks, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, include_keypoints=True) + tensor_dict = preprocessor.preprocess( + tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] + self.assertIsNotNone(boxes) + self.assertIsNotNone(masks) + self.assertIsNotNone(keypoints) + + def testRandomPixelValueScale(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_min = tf.cast(images, dtype=tf.float32) * 0.9 / 255.0 + images_max = tf.cast(images, dtype=tf.float32) * 1.1 / 255.0 + images = tensor_dict[fields.InputDataFields.image] + values_greater = tf.greater_equal(images, images_min) + values_less = tf.less_equal(images, images_max) + values_true = tf.fill([1, 4, 4, 3], True) + return [values_greater, values_less, values_true] + + (values_greater_, values_less_, + values_true_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(values_greater_, values_true_) + self.assertAllClose(values_less_, values_true_) + + def testRandomPixelValueScaleWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_pixel_value_scale, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testRandomImageScale(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_image_scale, {})] + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images_scaled = tensor_dict[fields.InputDataFields.image] + images_original_shape = tf.shape(images_original) + images_scaled_shape = tf.shape(images_scaled) + return [images_original_shape, images_scaled_shape] + + (images_original_shape_, + images_scaled_shape_) = self.execute_cpu(graph_fn, []) + self.assertLessEqual(images_original_shape_[1] * 0.5, + images_scaled_shape_[1]) + self.assertGreaterEqual(images_original_shape_[1] * 2.0, + images_scaled_shape_[1]) + self.assertLessEqual(images_original_shape_[2] * 0.5, + images_scaled_shape_[2]) + self.assertGreaterEqual(images_original_shape_[2] * 2.0, + images_scaled_shape_[2]) + + def testRandomImageScaleWithCache(self): + preprocess_options = [(preprocessor.random_image_scale, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomRGBtoGray(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_rgb_to_gray, {})] + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images_gray = tensor_dict[fields.InputDataFields.image] + images_gray_r, images_gray_g, images_gray_b = tf.split( + value=images_gray, num_or_size_splits=3, axis=3) + images_r, images_g, images_b = tf.split( + value=images_original, num_or_size_splits=3, axis=3) + images_r_diff1 = tf.squared_difference( + tf.cast(images_r, dtype=tf.float32), + tf.cast(images_gray_r, dtype=tf.float32)) + images_r_diff2 = tf.squared_difference( + tf.cast(images_gray_r, dtype=tf.float32), + tf.cast(images_gray_g, dtype=tf.float32)) + images_r_diff = tf.multiply(images_r_diff1, images_r_diff2) + images_g_diff1 = tf.squared_difference( + tf.cast(images_g, dtype=tf.float32), + tf.cast(images_gray_g, dtype=tf.float32)) + images_g_diff2 = tf.squared_difference( + tf.cast(images_gray_g, dtype=tf.float32), + tf.cast(images_gray_b, dtype=tf.float32)) + images_g_diff = tf.multiply(images_g_diff1, images_g_diff2) + images_b_diff1 = tf.squared_difference( + tf.cast(images_b, dtype=tf.float32), + tf.cast(images_gray_b, dtype=tf.float32)) + images_b_diff2 = tf.squared_difference( + tf.cast(images_gray_b, dtype=tf.float32), + tf.cast(images_gray_r, dtype=tf.float32)) + images_b_diff = tf.multiply(images_b_diff1, images_b_diff2) + image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1]) + return [images_r_diff, images_g_diff, images_b_diff, image_zero1] + + (images_r_diff_, images_g_diff_, images_b_diff_, + image_zero1_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(images_r_diff_, image_zero1_) + self.assertAllClose(images_g_diff_, image_zero1_) + self.assertAllClose(images_b_diff_, image_zero1_) + + def testRandomRGBtoGrayWithCache(self): + preprocess_options = [( + preprocessor.random_rgb_to_gray, {'probability': 0.5})] + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomAdjustBrightness(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_adjust_brightness, {})) + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_bright = tensor_dict[fields.InputDataFields.image] + image_original_shape = tf.shape(images_original) + image_bright_shape = tf.shape(images_bright) + return [image_original_shape, image_bright_shape] + + (image_original_shape_, + image_bright_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image_original_shape_, image_bright_shape_) + + def testRandomAdjustBrightnessWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_adjust_brightness, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomAdjustContrast(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_adjust_contrast, {})) + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_contrast = tensor_dict[fields.InputDataFields.image] + image_original_shape = tf.shape(images_original) + image_contrast_shape = tf.shape(images_contrast) + return [image_original_shape, image_contrast_shape] + + (image_original_shape_, + image_contrast_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image_original_shape_, image_contrast_shape_) + + def testRandomAdjustContrastWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_adjust_contrast, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomAdjustHue(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_adjust_hue, {})) + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_hue = tensor_dict[fields.InputDataFields.image] + image_original_shape = tf.shape(images_original) + image_hue_shape = tf.shape(images_hue) + return [image_original_shape, image_hue_shape] + + (image_original_shape_, image_hue_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image_original_shape_, image_hue_shape_) + + def testRandomAdjustHueWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_adjust_hue, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomDistortColor(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_distort_color, {})) + images_original = self.createTestImages() + images_original_shape = tf.shape(images_original) + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_distorted_color = tensor_dict[fields.InputDataFields.image] + images_distorted_color_shape = tf.shape(images_distorted_color) + return [images_original_shape, images_distorted_color_shape] + + (images_original_shape_, + images_distorted_color_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_original_shape_, images_distorted_color_shape_) + + def testRandomDistortColorWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_distort_color, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomJitterBoxes(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.random_jitter_boxes, {})) + boxes = self.createTestBoxes() + boxes_shape = tf.shape(boxes) + tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + distorted_boxes_shape = tf.shape(distorted_boxes) + return [boxes_shape, distorted_boxes_shape] + + (boxes_shape_, distorted_boxes_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) + + def testRandomCropImage(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_crop_image, {})) + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testRandomCropImageWithCache(self): + preprocess_options = [(preprocessor.random_rgb_to_gray, + {'probability': 0.5}), + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1, + }), + (preprocessor.random_crop_image, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testRandomCropImageGrayscale(self): + + def graph_fn(): + preprocessing_options = [(preprocessor.rgb_to_gray, {}), + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1, + }), (preprocessor.random_crop_image, {})] + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testRandomCropImageWithBoxOutOfImage(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_crop_image, {})) + images = self.createTestImages() + boxes = self.createTestBoxesOutOfImage() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testRandomCropImageWithRandomCoefOne(self): + + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_image, { + 'random_coef': 1.0 + })] + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_weights = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + boxes_shape = tf.shape(boxes) + distorted_boxes_shape = tf.shape(distorted_boxes) + images_shape = tf.shape(images) + distorted_images_shape = tf.shape(distorted_images) + return [ + boxes_shape, distorted_boxes_shape, images_shape, + distorted_images_shape, images, distorted_images, boxes, + distorted_boxes, labels, distorted_labels, weights, distorted_weights + ] + + (boxes_shape_, distorted_boxes_shape_, images_shape_, + distorted_images_shape_, images_, distorted_images_, boxes_, + distorted_boxes_, labels_, distorted_labels_, weights_, + distorted_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) + self.assertAllEqual(images_shape_, distorted_images_shape_) + self.assertAllClose(images_, distorted_images_) + self.assertAllClose(boxes_, distorted_boxes_) + self.assertAllEqual(labels_, distorted_labels_) + self.assertAllEqual(weights_, distorted_weights_) + + def testRandomCropWithMockSampleDistortedBoundingBox(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createColorfulTestImage() + boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], + [0.3, 0.1, 0.4, 0.7]], + dtype=tf.float32) + labels = tf.constant([1, 7, 11], dtype=tf.int32) + weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object(tf.image, 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = (tf.constant( + [6, 143, 0], dtype=tf.int32), tf.constant( + [190, 237, -1], dtype=tf.int32), tf.constant( + [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_weights = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + expected_boxes = tf.constant( + [[0.178947, 0.07173, 0.75789469, 0.66244733], + [0.28421, 0.0, 0.38947365, 0.57805908]], + dtype=tf.float32) + expected_labels = tf.constant([7, 11], dtype=tf.int32) + expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) + return [ + distorted_boxes, distorted_labels, distorted_weights, + expected_boxes, expected_labels, expected_weights + ] + + (distorted_boxes_, distorted_labels_, distorted_weights_, expected_boxes_, + expected_labels_, expected_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(distorted_boxes_, expected_boxes_) + self.assertAllEqual(distorted_labels_, expected_labels_) + self.assertAllEqual(distorted_weights_, expected_weights_) + + def testRandomCropWithoutClipBoxes(self): + + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createColorfulTestImage() + boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], + [0.2, 0.4, 0.75, 0.75], + [0.3, 0.1, 0.4, 0.7]], dtype=tf.float32) + keypoints = tf.constant([ + [[0.1, 0.1], [0.8, 0.3]], + [[0.2, 0.4], [0.75, 0.75]], + [[0.3, 0.1], [0.4, 0.7]], + ], dtype=tf.float32) + labels = tf.constant([1, 7, 11], dtype=tf.int32) + weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_keypoints: keypoints, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + + preprocessing_options = [(preprocessor.random_crop_image, { + 'clip_boxes': False, + })] + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + with mock.patch.object(tf.image, 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = (tf.constant( + [6, 143, 0], dtype=tf.int32), tf.constant( + [190, 237, -1], dtype=tf.int32), tf.constant( + [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_weights = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + expected_boxes = tf.constant( + [[0.178947, 0.07173, 0.75789469, 0.66244733], + [0.28421, -0.434599, 0.38947365, 0.57805908]], + dtype=tf.float32) + expected_keypoints = tf.constant( + [[[0.178947, 0.07173], [0.75789469, 0.66244733]], + [[0.28421, -0.434599], [0.38947365, 0.57805908]]], + dtype=tf.float32) + expected_labels = tf.constant([7, 11], dtype=tf.int32) + expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) + return [distorted_boxes, distorted_keypoints, distorted_labels, + distorted_weights, expected_boxes, expected_keypoints, + expected_labels, expected_weights] + + (distorted_boxes_, distorted_keypoints_, distorted_labels_, + distorted_weights_, expected_boxes_, expected_keypoints_, expected_labels_, + expected_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(distorted_boxes_, expected_boxes_) + self.assertAllClose(distorted_keypoints_, expected_keypoints_) + self.assertAllEqual(distorted_labels_, expected_labels_) + self.assertAllEqual(distorted_weights_, expected_weights_) + + def testRandomCropImageWithMultiClassScores(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_crop_image, {})) + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.multiclass_scores: multiclass_scores + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_multiclass_scores = distorted_tensor_dict[ + fields.InputDataFields.multiclass_scores] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + multiclass_scores_rank = tf.rank(multiclass_scores) + distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) + return [ + boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, + distorted_images_rank, multiclass_scores_rank, + distorted_multiclass_scores_rank, distorted_multiclass_scores + ] + + (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_, multiclass_scores_rank_, + distorted_multiclass_scores_rank_, + distorted_multiclass_scores_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + self.assertAllEqual(multiclass_scores_rank_, + distorted_multiclass_scores_rank_) + self.assertAllEqual(distorted_boxes_.shape[0], + distorted_multiclass_scores_.shape[0]) + + def testStrictRandomCropImageWithGroundtruthWeights(self): + def graph_fn(): + image = self.createColorfulTestImage()[0] + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + new_image, new_boxes, new_labels, new_groundtruth_weights = ( + preprocessor._strict_random_crop_image( + image, boxes, labels, weights)) + return [new_image, new_boxes, new_labels, new_groundtruth_weights] + (new_image, new_boxes, _, + new_groundtruth_weights) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) + self.assertAllEqual(new_image.shape, [190, 237, 3]) + self.assertAllEqual(new_groundtruth_weights, [1.0, 0.5]) + self.assertAllClose( + new_boxes.flatten(), expected_boxes.flatten()) + + def testStrictRandomCropImageWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage()[0] + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + new_image, new_boxes, new_labels, new_weights, new_masks = ( + preprocessor._strict_random_crop_image( + image, boxes, labels, weights, masks=masks)) + return [new_image, new_boxes, new_labels, new_weights, new_masks] + (new_image, new_boxes, _, _, + new_masks) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) + self.assertAllEqual(new_image.shape, [190, 237, 3]) + self.assertAllEqual(new_masks.shape, [2, 190, 237]) + self.assertAllClose( + new_boxes.flatten(), expected_boxes.flatten()) + + def testStrictRandomCropImageWithKeypoints(self): + def graph_fn(): + image = self.createColorfulTestImage()[0] + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, keypoint_visibilities = self.createTestKeypoints() + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + (new_image, new_boxes, new_labels, new_weights, new_keypoints, + new_keypoint_visibilities) = preprocessor._strict_random_crop_image( + image, boxes, labels, weights, keypoints=keypoints, + keypoint_visibilities=keypoint_visibilities) + return [new_image, new_boxes, new_labels, new_weights, new_keypoints, + new_keypoint_visibilities] + (new_image, new_boxes, _, _, new_keypoints, + new_keypoint_visibilities) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32) + expected_keypoints = np.array([ + [[np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan]], + [[0.38947368, 0.07173], + [0.49473682, 0.24050637], + [0.60000002, 0.40928277]] + ], dtype=np.float32) + expected_keypoint_visibilities = [ + [False, False, False], + [False, True, True] + ] + self.assertAllEqual(new_image.shape, [190, 237, 3]) + self.assertAllClose( + new_boxes, expected_boxes) + self.assertAllClose( + new_keypoints, expected_keypoints) + self.assertAllEqual( + new_keypoint_visibilities, expected_keypoint_visibilities) + + def testRunRandomCropImageWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_instance_masks: masks, + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_masks = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_masks] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_masks_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0], + ], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) + self.assertAllEqual(distorted_masks_.shape, [2, 190, 237]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose( + distorted_boxes_.flatten(), expected_boxes.flatten()) + + def testRunRandomCropImageWithKeypointsInsideCrop(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints = self.createTestKeypointsInsideCrop() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_keypoints: keypoints, + fields.InputDataFields.groundtruth_weights: weights + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0], + ], dtype=np.float32) + expected_keypoints = np.array([ + [[0.38947368, 0.07173], + [0.49473682, 0.24050637], + [0.60000002, 0.40928277]], + [[0.38947368, 0.07173], + [0.49473682, 0.24050637], + [0.60000002, 0.40928277]] + ]) + self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose( + distorted_boxes_.flatten(), expected_boxes.flatten()) + self.assertAllClose( + distorted_keypoints_.flatten(), expected_keypoints.flatten()) + + def testRunRandomCropImageWithKeypointsOutsideCrop(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints = self.createTestKeypointsOutsideCrop() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0], + ], dtype=np.float32) + expected_keypoints = np.array([ + [[np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan]], + [[np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan]], + ]) + self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose( + distorted_boxes_.flatten(), expected_boxes.flatten()) + self.assertAllClose( + distorted_keypoints_.flatten(), expected_keypoints.flatten()) + + def testRunRetainBoxesAboveThreshold(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + + tensor_dict = { + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + + preprocessing_options = [ + (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) + ] + preprocessor_arg_map = preprocessor.get_default_func_arg_map() + retained_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + retained_boxes = retained_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + retained_labels = retained_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + retained_weights = retained_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + return [retained_boxes, retained_labels, retained_weights, + self.expectedBoxesAfterThresholding(), + self.expectedLabelsAfterThresholding(), + self.expectedLabelScoresAfterThresholding()] + + (retained_boxes_, retained_labels_, retained_weights_, + expected_retained_boxes_, expected_retained_labels_, + expected_retained_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_boxes_, expected_retained_boxes_) + self.assertAllClose(retained_labels_, expected_retained_labels_) + self.assertAllClose( + retained_weights_, expected_retained_weights_) + + def testRunRetainBoxesAboveThresholdWithMasks(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = self.createTestMasks() + + tensor_dict = { + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_instance_masks: masks + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_label_weights=True, + include_instance_masks=True) + + preprocessing_options = [ + (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) + ] + + retained_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + retained_masks = retained_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [retained_masks, self.expectedMasksAfterThresholding()] + (retained_masks_, expected_masks_) = self.execute(graph_fn, []) + self.assertAllClose(retained_masks_, expected_masks_) + + def testRunRetainBoxesAboveThresholdWithKeypoints(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + + tensor_dict = { + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [ + (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) + ] + + retained_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + retained_keypoints = retained_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [retained_keypoints, self.expectedKeypointsAfterThresholding()] + (retained_keypoints_, expected_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_keypoints_, expected_keypoints_) + + def testRandomCropToAspectRatioWithCache(self): + preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testRunRandomCropToAspectRatioWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_instance_masks: masks + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + + preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] + + with mock.patch.object(preprocessor, + '_random_integer') as mock_random_integer: + mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_masks = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [ + distorted_image, distorted_boxes, distorted_labels, distorted_masks + ] + + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_masks_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) + self.assertAllEqual(distorted_labels_, [1]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllEqual(distorted_masks_.shape, [1, 200, 200]) + + def testRunRandomCropToAspectRatioWithKeypoints(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] + + with mock.patch.object(preprocessor, + '_random_integer') as mock_random_integer: + mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) + expected_keypoints = np.array( + [[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) + self.assertAllEqual(distorted_labels_, [1]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllClose(distorted_keypoints_.flatten(), + expected_keypoints.flatten()) + + def testRandomPadToAspectRatioWithCache(self): + preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map() + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, + {'min_padded_size_ratio': (4.0, 4.0), + 'max_padded_size_ratio': (4.0, 4.0)})] + + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + return [distorted_image, distorted_boxes, distorted_labels] + + distorted_image_, distorted_boxes_, distorted_labels_ = self.execute_cpu( + graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]], + dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + + def testRunRandomPadToAspectRatioWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_instance_masks: masks + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] + + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_masks = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [ + distorted_image, distorted_boxes, distorted_labels, distorted_masks + ] + + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_masks_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllEqual(distorted_masks_.shape, [2, 400, 400]) + + def testRunRandomPadToAspectRatioWithKeypoints(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + keypoints, _ = self.createTestKeypoints() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] + + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [ + distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints + ] + + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) + expected_keypoints = np.array([ + [[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]], + [[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]], + ], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllClose(distorted_keypoints_.flatten(), + expected_keypoints.flatten()) + + def testRandomPadImageWithCache(self): + preprocess_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1,}), (preprocessor.random_pad_image, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomPadImage(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_pad_image, {})] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [boxes_shape, padded_boxes_shape, images_shape, + padded_images_shape, boxes, padded_boxes] + (boxes_shape_, padded_boxes_shape_, images_shape_, + padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn, + []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) + self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) + self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) + self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) + self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( + padded_boxes_[:, 2] - padded_boxes_[:, 0]))) + self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( + padded_boxes_[:, 3] - padded_boxes_[:, 1]))) + + def testRandomPadImageWithKeypointsAndMasks(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_instance_masks: masks, + fields.InputDataFields.groundtruth_keypoints: keypoints, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_pad_image, {})] + func_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, + include_keypoints=True, + include_keypoint_visibilities=True) + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options, + func_arg_map=func_arg_map) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + padded_masks = padded_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + padded_keypoints = padded_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + padded_masks_shape = tf.shape(padded_masks) + keypoints_shape = tf.shape(keypoints) + padded_keypoints_shape = tf.shape(padded_keypoints) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [boxes_shape, padded_boxes_shape, padded_masks_shape, + keypoints_shape, padded_keypoints_shape, images_shape, + padded_images_shape, boxes, padded_boxes, keypoints, + padded_keypoints] + + (boxes_shape_, padded_boxes_shape_, padded_masks_shape_, + keypoints_shape_, padded_keypoints_shape_, images_shape_, + padded_images_shape_, boxes_, padded_boxes_, + keypoints_, padded_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertAllEqual(keypoints_shape_, padded_keypoints_shape_) + self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) + self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) + self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) + self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) + self.assertAllEqual(padded_masks_shape_[1:3], padded_images_shape_[1:3]) + self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( + padded_boxes_[:, 2] - padded_boxes_[:, 0]))) + self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( + padded_boxes_[:, 3] - padded_boxes_[:, 1]))) + self.assertTrue(np.all((keypoints_[1, :, 0] - keypoints_[0, :, 0]) >= ( + padded_keypoints_[1, :, 0] - padded_keypoints_[0, :, 0]))) + self.assertTrue(np.all((keypoints_[1, :, 1] - keypoints_[0, :, 1]) >= ( + padded_keypoints_[1, :, 1] - padded_keypoints_[0, :, 1]))) + + def testRandomAbsolutePadImage(self): + height_padding = 10 + width_padding = 20 + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + } + preprocessing_options = [(preprocessor.random_absolute_pad_image, { + 'max_height_padding': height_padding, + 'max_width_padding': width_padding})] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + original_shape = tf.shape(images) + final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image]) + return original_shape, final_shape + for _ in range(100): + original_shape, output_shape = self.execute_cpu(graph_fn, []) + _, height, width, _ = original_shape + self.assertGreaterEqual(output_shape[1], height) + self.assertLess(output_shape[1], height + height_padding) + self.assertGreaterEqual(output_shape[2], width) + self.assertLess(output_shape[2], width + width_padding) + + def testRandomAbsolutePadImageWithKeypoints(self): + height_padding = 10 + width_padding = 20 + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + keypoints, _ = self.createTestKeypoints() + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_keypoints: keypoints, + } + + preprocessing_options = [(preprocessor.random_absolute_pad_image, { + 'max_height_padding': height_padding, + 'max_width_padding': width_padding + })] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + original_shape = tf.shape(images) + final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image]) + padded_keypoints = padded_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return (original_shape, final_shape, padded_keypoints) + for _ in range(100): + original_shape, output_shape, padded_keypoints_ = self.execute_cpu( + graph_fn, []) + _, height, width, _ = original_shape + self.assertGreaterEqual(output_shape[1], height) + self.assertLess(output_shape[1], height + height_padding) + self.assertGreaterEqual(output_shape[2], width) + self.assertLess(output_shape[2], width + width_padding) + # Verify the keypoints are populated. The correctness of the keypoint + # coordinates are already tested in random_pad_image function. + self.assertEqual(padded_keypoints_.shape, (2, 3, 2)) + + def testRandomCropPadImageWithCache(self): + preprocess_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomCropPadImageWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_pad_image, { + 'random_coef': 1.0 + })] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [boxes_shape, padded_boxes_shape, images_shape, + padded_images_shape, boxes, padded_boxes] + (boxes_shape_, padded_boxes_shape_, images_shape_, + padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn, + []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) + self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) + self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) + self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) + self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( + padded_boxes_[:, 2] - padded_boxes_[:, 0]))) + self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( + padded_boxes_[:, 3] - padded_boxes_[:, 1]))) + + def testRandomCropToAspectRatio(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, []) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, { + 'aspect_ratio': 2.0 + })] + cropped_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + cropped_images = cropped_tensor_dict[fields.InputDataFields.image] + cropped_boxes = cropped_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + cropped_boxes_shape = tf.shape(cropped_boxes) + images_shape = tf.shape(images) + cropped_images_shape = tf.shape(cropped_images) + return [ + boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape + ] + + (boxes_shape_, cropped_boxes_shape_, images_shape_, + cropped_images_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, cropped_boxes_shape_) + self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2) + self.assertEqual(images_shape_[2], cropped_images_shape_[2]) + + def testRandomPadToAspectRatio(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + } + tensor_dict = preprocessor.preprocess(tensor_dict, []) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, { + 'aspect_ratio': 2.0 + })] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [ + boxes_shape, padded_boxes_shape, images_shape, padded_images_shape + ] + + (boxes_shape_, padded_boxes_shape_, images_shape_, + padded_images_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertEqual(images_shape_[1], padded_images_shape_[1]) + self.assertEqual(2 * images_shape_[2], padded_images_shape_[2]) + + def testRandomBlackPatchesWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_black_patches, { + 'size_to_image_ratio': 0.5 + })) + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomBlackPatches(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_black_patches, { + 'size_to_image_ratio': 0.5 + })) + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + blacked_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + blacked_images = blacked_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + blacked_images_shape = tf.shape(blacked_images) + return [images_shape, blacked_images_shape] + (images_shape_, blacked_images_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_, blacked_images_shape_) + + def testRandomJpegQuality(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'min_jpeg_quality': 0, + 'max_jpeg_quality': 100 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + encoded_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + encoded_images_shape = tf.shape(encoded_images) + return [images_shape, encoded_images_shape] + images_shape_out, encoded_images_shape_out = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, encoded_images_shape_out) + + def testRandomJpegQualityKeepsStaticChannelShape(self): + # Set at least three weeks past the forward compatibility horizon for + # tf 1.14 of 2019/11/01. + # https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/compat/compat.py#L30 + if not tf.compat.forward_compatible(year=2019, month=12, day=1): + self.skipTest('Skipping test for future functionality.') + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'min_jpeg_quality': 0, + 'max_jpeg_quality': 100 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + encoded_images = processed_tensor_dict[fields.InputDataFields.image] + images_static_channels = images.shape[-1] + encoded_images_static_channels = encoded_images.shape[-1] + self.assertEqual(images_static_channels, encoded_images_static_channels) + + def testRandomJpegQualityWithCache(self): + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'min_jpeg_quality': 0, + 'max_jpeg_quality': 100 + })] + self._testPreprocessorCache(preprocessing_options) + + def testRandomJpegQualityWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'random_coef': 1.0 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + encoded_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + encoded_images_shape = tf.shape(encoded_images) + return [images, encoded_images, images_shape, encoded_images_shape] + + (images_out, encoded_images_out, images_shape_out, + encoded_images_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, encoded_images_shape_out) + self.assertAllEqual(images_out, encoded_images_out) + + def testRandomDownscaleToTargetPixels(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'min_target_pixels': 100, + 'max_target_pixels': 101 + })] + images = tf.random_uniform([1, 25, 100, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + downscaled_shape = tf.shape(downscaled_images) + return downscaled_shape + expected_shape = [1, 5, 20, 3] + downscaled_shape_out = self.execute_cpu(graph_fn, []) + self.assertAllEqual(downscaled_shape_out, expected_shape) + + def testRandomDownscaleToTargetPixelsWithMasks(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'min_target_pixels': 100, + 'max_target_pixels': 101 + })] + images = tf.random_uniform([1, 25, 100, 3]) + masks = tf.random_uniform([10, 25, 100]) + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_instance_masks: masks + } + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + processed_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + downscaled_masks = processed_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + downscaled_images_shape = tf.shape(downscaled_images) + downscaled_masks_shape = tf.shape(downscaled_masks) + return [downscaled_images_shape, downscaled_masks_shape] + expected_images_shape = [1, 5, 20, 3] + expected_masks_shape = [10, 5, 20] + (downscaled_images_shape_out, + downscaled_masks_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(downscaled_images_shape_out, expected_images_shape) + self.assertAllEqual(downscaled_masks_shape_out, expected_masks_shape) + + @parameterized.parameters( + {'test_masks': False}, + {'test_masks': True} + ) + def testRandomDownscaleToTargetPixelsWithCache(self, test_masks): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { + 'min_target_pixels': 100, + 'max_target_pixels': 999 + })] + self._testPreprocessorCache(preprocessing_options, test_masks=test_masks) + + def testRandomDownscaleToTargetPixelsWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'random_coef': 1.0, + 'min_target_pixels': 10, + 'max_target_pixels': 20, + })] + images = tf.random_uniform([1, 25, 100, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + downscaled_images_shape = tf.shape(downscaled_images) + return [images, downscaled_images, images_shape, downscaled_images_shape] + (images_out, downscaled_images_out, images_shape_out, + downscaled_images_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, downscaled_images_shape_out) + self.assertAllEqual(images_out, downscaled_images_out) + + def testRandomDownscaleToTargetPixelsIgnoresSmallImages(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'min_target_pixels': 1000, + 'max_target_pixels': 1001 + })] + images = tf.random_uniform([1, 10, 10, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + downscaled_images_shape = tf.shape(downscaled_images) + return [images, downscaled_images, images_shape, downscaled_images_shape] + (images_out, downscaled_images_out, images_shape_out, + downscaled_images_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, downscaled_images_shape_out) + self.assertAllEqual(images_out, downscaled_images_out) + + def testRandomPatchGaussianShape(self): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 1, + 'max_patch_size': 200, + 'min_gaussian_stddev': 0.0, + 'max_gaussian_stddev': 2.0 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + patched_images_shape = tf.shape(patched_images) + self.assertAllEqual(images_shape, patched_images_shape) + + def testRandomPatchGaussianClippedToLowerBound(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 20, + 'max_patch_size': 40, + 'min_gaussian_stddev': 50, + 'max_gaussian_stddev': 100 + })] + images = tf.zeros([1, 5, 4, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + return patched_images + patched_images = self.execute_cpu(graph_fn, []) + self.assertAllGreaterEqual(patched_images, 0.0) + + def testRandomPatchGaussianClippedToUpperBound(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 20, + 'max_patch_size': 40, + 'min_gaussian_stddev': 50, + 'max_gaussian_stddev': 100 + })] + images = tf.constant(255.0, shape=[1, 5, 4, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + return patched_images + patched_images = self.execute_cpu(graph_fn, []) + self.assertAllLessEqual(patched_images, 255.0) + + def testRandomPatchGaussianWithCache(self): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 1, + 'max_patch_size': 200, + 'min_gaussian_stddev': 0.0, + 'max_gaussian_stddev': 2.0 + })] + self._testPreprocessorCache(preprocessing_options) + + def testRandomPatchGaussianWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'random_coef': 1.0 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + patched_images_shape = tf.shape(patched_images) + return patched_images_shape, patched_images, images_shape, images + (patched_images_shape, patched_images, images_shape, + images) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape, patched_images_shape) + self.assertAllEqual(images, patched_images) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + def testAutoAugmentImage(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.autoaugment_image, { + 'policy_name': 'v1' + })) + images = self.createTestImages() + boxes = self.createTestBoxes() + tensor_dict = {fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes} + autoaugment_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options) + augmented_images = autoaugment_tensor_dict[fields.InputDataFields.image] + augmented_boxes = autoaugment_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + images_shape = tf.shape(images) + boxes_shape = tf.shape(boxes) + augmented_images_shape = tf.shape(augmented_images) + augmented_boxes_shape = tf.shape(augmented_boxes) + return [images_shape, boxes_shape, augmented_images_shape, + augmented_boxes_shape] + (images_shape_, boxes_shape_, augmented_images_shape_, + augmented_boxes_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_, augmented_images_shape_) + self.assertAllEqual(boxes_shape_, augmented_boxes_shape_) + + def testRandomResizeMethodWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_resize_method, { + 'target_size': (75, 150) + })) + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomResizeMethod(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_resize_method, { + 'target_size': (75, 150) + })) + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + resized_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + resized_images = resized_tensor_dict[fields.InputDataFields.image] + resized_images_shape = tf.shape(resized_images) + expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32) + return [expected_images_shape, resized_images_shape] + (expected_images_shape_, resized_images_shape_) = self.execute_cpu(graph_fn, + []) + self.assertAllEqual(expected_images_shape_, + resized_images_shape_) + + def testResizeImageWithMasks(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + height = 50 + width = 100 + expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_image( + in_image, in_masks, new_height=height, new_width=width) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return out_image_shape, out_masks_shape + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + (out_image_shape, + out_masks_shape) = self.execute_cpu(graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeImageWithMasksTensorInputHeightAndWidth(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + height = tf.constant(50, dtype=tf.int32) + width = tf.constant(100, dtype=tf.int32) + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_image( + in_image, in_masks, new_height=height, new_width=width) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return out_image_shape, out_masks_shape + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + (out_image_shape, + out_masks_shape) = self.execute_cpu(graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeImageWithNoInstanceMask(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] + height = 50 + width = 100 + expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] + expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_image( + in_image, in_masks, new_height=height, new_width=width) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return out_image_shape, out_masks_shape + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + (out_image_shape, + out_masks_shape) = self.execute_cpu(graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRangePreservesStaticSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] + min_dim = 50 + max_dim = 100 + expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] + + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + in_image = tf.random_uniform(in_shape) + out_image, _ = preprocessor.resize_to_range( + in_image, min_dimension=min_dim, max_dimension=max_dim) + self.assertAllEqual(out_image.get_shape().as_list(), expected_shape) + + def testResizeToRangeWithDynamicSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] + min_dim = 50 + max_dim = 100 + expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] + def graph_fn(in_image_shape): + in_image = tf.random_uniform(in_image_shape) + out_image, _ = preprocessor.resize_to_range( + in_image, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + return out_image_shape + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape, + np.int32)]) + self.assertAllEqual(out_image_shape, expected_shape) + + def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self): + in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] + min_dim = 50 + max_dim = 100 + expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]] + def graph_fn(in_image): + out_image, _ = preprocessor.resize_to_range( + in_image, + min_dimension=min_dim, + max_dimension=max_dim, + pad_to_max_dimension=True) + return tf.shape(out_image) + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + out_image_shape = self.execute_cpu( + graph_fn, [np.random.rand(*in_shape).astype('f')]) + self.assertAllEqual(out_image_shape, expected_shape) + + def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self): + in_image_np = np.array([[[0, 1, 2]]], np.float32) + ex_image_np = np.array( + [[[0, 1, 2], [123.68, 116.779, 103.939]], + [[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32) + min_dim = 1 + max_dim = 2 + def graph_fn(in_image): + out_image, _ = preprocessor.resize_to_range( + in_image, + min_dimension=min_dim, + max_dimension=max_dim, + pad_to_max_dimension=True, + per_channel_pad_value=(123.68, 116.779, 103.939)) + return out_image + out_image_np = self.execute_cpu(graph_fn, [in_image_np]) + self.assertAllClose(ex_image_np, out_image_np) + + def testResizeToRangeWithMasksPreservesStaticSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] + + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) + self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape) + self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape) + + def testResizeToRangeWithMasksAndPadToMaxDimension(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[100, 100, 3], [100, 100, 3]] + expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]] + def graph_fn(in_image, in_masks): + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, + max_dimension=max_dim, pad_to_max_dimension=True) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.random.rand(*in_image_shape).astype('f'), + np.random.rand(*in_masks_shape).astype('f'), + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRangeWithMasksAndDynamicSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] + def graph_fn(in_image, in_masks): + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.random.rand(*in_image_shape).astype('f'), + np.random.rand(*in_masks_shape).astype('f'), + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] + def graph_fn(in_image, in_masks): + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.random.rand(*in_image_shape).astype('f'), + np.random.rand(*in_masks_shape).astype('f'), + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRange4DImageTensor(self): + image = tf.random_uniform([1, 200, 300, 3]) + with self.assertRaises(ValueError): + preprocessor.resize_to_range(image, 500, 600) + + def testResizeToRangeSameMinMax(self): + """Tests image resizing, checking output sizes.""" + in_shape_list = [[312, 312, 3], [299, 299, 3]] + min_dim = 320 + max_dim = 320 + expected_shape_list = [[320, 320, 3], [320, 320, 3]] + def graph_fn(in_shape): + in_image = tf.random_uniform(in_shape) + out_image, _ = preprocessor.resize_to_range( + in_image, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + return out_image_shape + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape, + np.int32)]) + self.assertAllEqual(out_image_shape, expected_shape) + + def testResizeToMaxDimensionTensorShapes(self): + """Tests both cases where image should and shouldn't be resized.""" + in_image_shape_list = [[100, 50, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 100, 50], [10, 15, 30]] + max_dim = 50 + expected_image_shape_list = [[50, 25, 3], [15, 30, 3]] + expected_masks_shape_list = [[15, 50, 25], [10, 15, 30]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_max_dimension( + in_image, in_masks, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMaxDimensionWithInstanceMasksTensorOfSizeZero(self): + """Tests both cases where image should and shouldn't be resized.""" + in_image_shape_list = [[100, 50, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 100, 50], [0, 15, 30]] + max_dim = 50 + expected_image_shape_list = [[50, 25, 3], [15, 30, 3]] + expected_masks_shape_list = [[0, 50, 25], [0, 15, 30]] + + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_max_dimension( + in_image, in_masks, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMaxDimensionRaisesErrorOn4DImage(self): + image = tf.random_uniform([1, 200, 300, 3]) + with self.assertRaises(ValueError): + preprocessor.resize_to_max_dimension(image, 500) + + def testResizeToMinDimensionTensorShapes(self): + in_image_shape_list = [[60, 55, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 55], [10, 15, 30]] + min_dim = 50 + expected_image_shape_list = [[60, 55, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_min_dimension( + in_image, in_masks, min_dimension=min_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] + min_dim = 50 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_min_dimension( + in_image, in_masks, min_dimension=min_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMinDimensionRaisesErrorOn4DImage(self): + image = tf.random_uniform([1, 200, 300, 3]) + with self.assertRaises(ValueError): + preprocessor.resize_to_min_dimension(image, 500) + + def testResizePadToMultipleNoMasks(self): + """Tests resizing when padding to multiple without masks.""" + def graph_fn(): + image = tf.ones((200, 100, 3), dtype=tf.float32) + out_image, out_shape = preprocessor.resize_pad_to_multiple( + image, multiple=32) + return out_image, out_shape + + out_image, out_shape = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_image.sum(), 200 * 100 * 3) + self.assertAllEqual(out_shape, (200, 100, 3)) + self.assertAllEqual(out_image.shape, (224, 128, 3)) + + def testResizePadToMultipleWithMasks(self): + """Tests resizing when padding to multiple with masks.""" + def graph_fn(): + image = tf.ones((200, 100, 3), dtype=tf.float32) + masks = tf.ones((10, 200, 100), dtype=tf.float32) + + _, out_masks, out_shape = preprocessor.resize_pad_to_multiple( + image, multiple=32, masks=masks) + return [out_masks, out_shape] + + out_masks, out_shape = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_masks.sum(), 200 * 100 * 10) + self.assertAllEqual(out_shape, (200, 100, 3)) + self.assertAllEqual(out_masks.shape, (10, 224, 128)) + + def testResizePadToMultipleEmptyMasks(self): + """Tests resizing when padding to multiple with an empty mask.""" + def graph_fn(): + image = tf.ones((200, 100, 3), dtype=tf.float32) + masks = tf.ones((0, 200, 100), dtype=tf.float32) + _, out_masks, out_shape = preprocessor.resize_pad_to_multiple( + image, multiple=32, masks=masks) + return [out_masks, out_shape] + out_masks, out_shape = self.execute_cpu(graph_fn, []) + self.assertAllEqual(out_shape, (200, 100, 3)) + self.assertAllEqual(out_masks.shape, (0, 224, 128)) + + def testScaleBoxesToPixelCoordinates(self): + """Tests box scaling, checking scaled values.""" + def graph_fn(): + in_shape = [60, 40, 3] + in_boxes = [[0.1, 0.2, 0.4, 0.6], + [0.5, 0.3, 0.9, 0.7]] + in_image = tf.random_uniform(in_shape) + in_boxes = tf.constant(in_boxes) + _, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates( + in_image, boxes=in_boxes) + return out_boxes + expected_boxes = [[6., 8., 24., 24.], + [30., 12., 54., 28.]] + out_boxes = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_boxes, expected_boxes) + + def testScaleBoxesToPixelCoordinatesWithKeypoints(self): + """Tests box and keypoint scaling, checking scaled values.""" + def graph_fn(): + in_shape = [60, 40, 3] + in_boxes = self.createTestBoxes() + in_keypoints, _ = self.createTestKeypoints() + in_image = tf.random_uniform(in_shape) + (_, out_boxes, + out_keypoints) = preprocessor.scale_boxes_to_pixel_coordinates( + in_image, boxes=in_boxes, keypoints=in_keypoints) + return out_boxes, out_keypoints + expected_boxes = [[0., 10., 45., 40.], + [15., 20., 45., 40.]] + expected_keypoints = [ + [[6., 4.], [12., 8.], [18., 12.]], + [[24., 16.], [30., 20.], [36., 24.]], + ] + out_boxes_, out_keypoints_ = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_boxes_, expected_boxes) + self.assertAllClose(out_keypoints_, expected_keypoints) + + def testSubtractChannelMean(self): + """Tests whether channel means have been subtracted.""" + def graph_fn(): + image = tf.zeros((240, 320, 3)) + means = [1, 2, 3] + actual = preprocessor.subtract_channel_mean(image, means=means) + return actual + actual = self.execute_cpu(graph_fn, []) + self.assertTrue((actual[:, :, 0], -1)) + self.assertTrue((actual[:, :, 1], -2)) + self.assertTrue((actual[:, :, 2], -3)) + + def testOneHotEncoding(self): + """Tests one hot encoding of multiclass labels.""" + def graph_fn(): + labels = tf.constant([1, 4, 2], dtype=tf.int32) + one_hot = preprocessor.one_hot_encoding(labels, num_classes=5) + return one_hot + one_hot = self.execute_cpu(graph_fn, []) + self.assertAllEqual([0, 1, 1, 0, 1], one_hot) + + def testRandomSelfConcatImageVertically(self): + + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + confidences = weights + scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_confidences: confidences, + fields.InputDataFields.multiclass_scores: scores, + } + + preprocessing_options = [(preprocessor.random_self_concat_image, { + 'concat_vertical_probability': 1.0, + 'concat_horizontal_probability': 0.0, + })] + func_arg_map = preprocessor.get_default_func_arg_map( + True, True, True) + output_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=func_arg_map) + + original_shape = tf.shape(images)[1:3] + final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[ + 1:3] + return [ + original_shape, + boxes, + labels, + confidences, + scores, + final_shape, + output_tensor_dict[fields.InputDataFields.groundtruth_boxes], + output_tensor_dict[fields.InputDataFields.groundtruth_classes], + output_tensor_dict[fields.InputDataFields.groundtruth_confidences], + output_tensor_dict[fields.InputDataFields.multiclass_scores], + ] + (original_shape, boxes, labels, confidences, scores, final_shape, new_boxes, + new_labels, new_confidences, new_scores) = self.execute(graph_fn, []) + self.assertAllEqual(final_shape, original_shape * np.array([2, 1])) + self.assertAllEqual(2 * boxes.size, new_boxes.size) + self.assertAllEqual(2 * labels.size, new_labels.size) + self.assertAllEqual(2 * confidences.size, new_confidences.size) + self.assertAllEqual(2 * scores.size, new_scores.size) + + def testRandomSelfConcatImageHorizontally(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + confidences = weights + scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_confidences: confidences, + fields.InputDataFields.multiclass_scores: scores, + } + + preprocessing_options = [(preprocessor.random_self_concat_image, { + 'concat_vertical_probability': 0.0, + 'concat_horizontal_probability': 1.0, + })] + func_arg_map = preprocessor.get_default_func_arg_map( + True, True, True) + output_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=func_arg_map) + + original_shape = tf.shape(images)[1:3] + final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[ + 1:3] + return [ + original_shape, + boxes, + labels, + confidences, + scores, + final_shape, + output_tensor_dict[fields.InputDataFields.groundtruth_boxes], + output_tensor_dict[fields.InputDataFields.groundtruth_classes], + output_tensor_dict[fields.InputDataFields.groundtruth_confidences], + output_tensor_dict[fields.InputDataFields.multiclass_scores], + ] + (original_shape, boxes, labels, confidences, scores, final_shape, new_boxes, + new_labels, new_confidences, new_scores) = self.execute(graph_fn, []) + self.assertAllEqual(final_shape, original_shape * np.array([1, 2])) + self.assertAllEqual(2 * boxes.size, new_boxes.size) + self.assertAllEqual(2 * labels.size, new_labels.size) + self.assertAllEqual(2 * confidences.size, new_confidences.size) + self.assertAllEqual(2 * scores.size, new_scores.size) + + def testSSDRandomCropWithCache(self): + preprocess_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testSSDRandomCrop(self): + def graph_fn(): + preprocessing_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop, {})] + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + return [boxes_rank, distorted_boxes_rank, images_rank, + distorted_images_rank] + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testSSDRandomCropWithMultiClassScores(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), (preprocessor.ssd_random_crop, {})] + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.multiclass_scores: multiclass_scores, + fields.InputDataFields.groundtruth_weights: weights, + } + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_multiclass_scores=True) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_multiclass_scores = distorted_tensor_dict[ + fields.InputDataFields.multiclass_scores] + + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + multiclass_scores_rank = tf.rank(multiclass_scores) + distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) + return [ + boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, + distorted_images_rank, multiclass_scores_rank, + distorted_multiclass_scores, distorted_multiclass_scores_rank + ] + + (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_, multiclass_scores_rank_, + distorted_multiclass_scores_, + distorted_multiclass_scores_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + self.assertAllEqual(multiclass_scores_rank_, + distorted_multiclass_scores_rank_) + self.assertAllEqual(distorted_boxes_.shape[0], + distorted_multiclass_scores_.shape[0]) + + def testSSDRandomCropPad(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + preprocessing_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop_pad, {})] + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testSSDRandomCropFixedAspectRatioWithCache(self): + preprocess_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def _testSSDRandomCropFixedAspectRatio(self, + include_multiclass_scores, + include_instance_masks, + include_keypoints): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights + } + if include_multiclass_scores: + multiclass_scores = self.createTestMultiClassScores() + tensor_dict[fields.InputDataFields.multiclass_scores] = ( + multiclass_scores) + if include_instance_masks: + masks = self.createTestMasks() + tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks + if include_keypoints: + keypoints, _ = self.createTestKeypoints() + tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_multiclass_scores=include_multiclass_scores, + include_instance_masks=include_instance_masks, + include_keypoints=include_keypoints) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + return [boxes_rank, distorted_boxes_rank, images_rank, + distorted_images_rank] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testSSDRandomCropFixedAspectRatio(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, + include_instance_masks=False, + include_keypoints=False) + + def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=True, + include_instance_masks=False, + include_keypoints=False) + + def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, + include_instance_masks=True, + include_keypoints=True) + + def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, + include_instance_masks=True, + include_keypoints=True) + + def testConvertClassLogitsToSoftmax(self): + def graph_fn(): + multiclass_scores = tf.constant( + [[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32) + temperature = 2.0 + + converted_multiclass_scores = ( + preprocessor.convert_class_logits_to_softmax( + multiclass_scores=multiclass_scores, temperature=temperature)) + return converted_multiclass_scores + converted_multiclass_scores_ = self.execute_cpu(graph_fn, []) + expected_converted_multiclass_scores = [[0.62245935, 0.37754068], + [0.5, 0.5], + [1, 0]] + self.assertAllClose(converted_multiclass_scores_, + expected_converted_multiclass_scores) + + @parameterized.named_parameters( + ('scale_1', 1.0), + ('scale_1.5', 1.5), + ('scale_0.5', 0.5) + ) + def test_square_crop_by_scale(self, scale): + def graph_fn(): + image = np.random.randn(256, 256, 1) + + masks = tf.constant(image[:, :, 0].reshape(1, 256, 256)) + image = tf.constant(image) + keypoints = tf.constant([[[0.25, 0.25], [0.75, 0.75]]]) + + boxes = tf.constant([[0.25, .25, .75, .75]]) + labels = tf.constant([[1]]) + label_weights = tf.constant([[1.]]) + + (new_image, new_boxes, _, _, new_masks, + new_keypoints) = preprocessor.random_square_crop_by_scale( + image, + boxes, + labels, + label_weights, + masks=masks, + keypoints=keypoints, + max_border=256, + scale_min=scale, + scale_max=scale) + return new_image, new_boxes, new_masks, new_keypoints + image, boxes, masks, keypoints = self.execute_cpu(graph_fn, []) + ymin, xmin, ymax, xmax = boxes[0] + self.assertAlmostEqual(ymax - ymin, 0.5 / scale) + self.assertAlmostEqual(xmax - xmin, 0.5 / scale) + + k1 = keypoints[0, 0] + k2 = keypoints[0, 1] + self.assertAlmostEqual(k2[0] - k1[0], 0.5 / scale) + self.assertAlmostEqual(k2[1] - k1[1], 0.5 / scale) + + size = max(image.shape) + self.assertAlmostEqual(scale * 256.0, size) + + self.assertAllClose(image[:, :, 0], + masks[0, :, :]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/region_similarity_calculator.py b/models/research/object_detection/core/region_similarity_calculator.py new file mode 100644 index 0000000000000000000000000000000000000000..fd75d52f9f7b2d79e846f733fc312ec24176bf95 --- /dev/null +++ b/models/research/object_detection/core/region_similarity_calculator.py @@ -0,0 +1,159 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Region Similarity Calculators for BoxLists. + +Region Similarity Calculators compare a pairwise measure of similarity +between the boxes in two BoxLists. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod + +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list_ops +from object_detection.core import standard_fields as fields + + +class RegionSimilarityCalculator(six.with_metaclass(ABCMeta, object)): + """Abstract base class for region similarity calculator.""" + + def compare(self, boxlist1, boxlist2, scope=None): + """Computes matrix of pairwise similarity between BoxLists. + + This op (to be overridden) computes a measure of pairwise similarity between + the boxes in the given BoxLists. Higher values indicate more similarity. + + Note that this method simply measures similarity and does not explicitly + perform a matching. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + scope: Op scope name. Defaults to 'Compare' if None. + + Returns: + a (float32) tensor of shape [N, M] with pairwise similarity score. + """ + with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope: + return self._compare(boxlist1, boxlist2) + + @abstractmethod + def _compare(self, boxlist1, boxlist2): + pass + + +class IouSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on Intersection over Union (IOU) metric. + + This class computes pairwise similarity between two BoxLists based on IOU. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOU similarity between the two BoxLists. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing pairwise iou scores. + """ + return box_list_ops.iou(boxlist1, boxlist2) + + +class NegSqDistSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on the squared distance metric. + + This class computes pairwise similarity between two BoxLists based on the + negative squared distance metric. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute matrix of (negated) sq distances. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing negated pairwise squared distance. + """ + return -1 * box_list_ops.sq_dist(boxlist1, boxlist2) + + +class IoaSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on Intersection over Area (IOA) metric. + + This class computes pairwise similarity between two BoxLists based on their + pairwise intersections divided by the areas of second BoxLists. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOA similarity between the two BoxLists. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing pairwise IOA scores. + """ + return box_list_ops.ioa(boxlist1, boxlist2) + + +class ThresholdedIouSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on thresholded IOU and score. + + This class computes pairwise similarity between two BoxLists based on IOU and + a 'score' present in boxlist1. If IOU > threshold, then the entry in the + output pairwise tensor will contain `score`, otherwise 0. + """ + + def __init__(self, iou_threshold=0): + """Initialize the ThresholdedIouSimilarity. + + Args: + iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold, + then the comparison result will be the foreground probability of + the first box, otherwise it will be zero. + """ + super(ThresholdedIouSimilarity, self).__init__() + self._iou_threshold = iou_threshold + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOU similarity between the two BoxLists and score. + + Args: + boxlist1: BoxList holding N boxes. Must have a score field. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing scores threholded by pairwise + iou scores. + """ + ious = box_list_ops.iou(boxlist1, boxlist2) + scores = boxlist1.get_field(fields.BoxListFields.scores) + scores = tf.expand_dims(scores, axis=1) + row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]]) + thresholded_ious = tf.where(ious > self._iou_threshold, + row_replicated_scores, tf.zeros_like(ious)) + + return thresholded_ious diff --git a/models/research/object_detection/core/region_similarity_calculator_test.py b/models/research/object_detection/core/region_similarity_calculator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9f9a10b637fa127dfd83637f43508f9dbdb80c18 --- /dev/null +++ b/models/research/object_detection/core/region_similarity_calculator_test.py @@ -0,0 +1,98 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for region_similarity_calculator.""" +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.core import region_similarity_calculator +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class RegionSimilarityCalculatorTest(test_case.TestCase): + + def test_get_correct_pairwise_similarity_based_on_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + iou_similarity_calculator = region_similarity_calculator.IouSimilarity() + iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) + return iou_similarity + exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_get_correct_pairwise_similarity_based_on_squared_distances(self): + def graph_fn(): + corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 2.0]]) + corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], + [-4.0, 0.0, 0.0, 3.0], + [0.0, 0.0, 0.0, 0.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + dist_similarity_calc = region_similarity_calculator.NegSqDistSimilarity() + dist_similarity = dist_similarity_calc.compare(boxes1, boxes2) + return dist_similarity + exp_output = [[-26, -25, 0], [-18, -27, -6]] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_get_correct_pairwise_similarity_based_on_ioa(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + ioa_similarity_calculator = region_similarity_calculator.IoaSimilarity() + ioa_similarity_1 = ioa_similarity_calculator.compare(boxes1, boxes2) + ioa_similarity_2 = ioa_similarity_calculator.compare(boxes2, boxes1) + return ioa_similarity_1, ioa_similarity_2 + exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], + [1.0 / 12.0, 0.0, 5.0 / 400.0]] + exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], + [0, 0], + [6.0 / 6.0, 5.0 / 5.0]] + iou_output_1, iou_output_2 = self.execute(graph_fn, []) + self.assertAllClose(iou_output_1, exp_output_1) + self.assertAllClose(iou_output_2, exp_output_2) + + def test_get_correct_pairwise_similarity_based_on_thresholded_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + scores = tf.constant([.3, .6]) + iou_threshold = .013 + boxes1 = box_list.BoxList(corners1) + boxes1.add_field(fields.BoxListFields.scores, scores) + boxes2 = box_list.BoxList(corners2) + iou_similarity_calculator = ( + region_similarity_calculator.ThresholdedIouSimilarity( + iou_threshold=iou_threshold)) + iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) + return iou_similarity + exp_output = tf.constant([[0.3, 0., 0.3], [0.6, 0., 0.]]) + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/core/standard_fields.py b/models/research/object_detection/core/standard_fields.py new file mode 100644 index 0000000000000000000000000000000000000000..df995b4a429ec4e587d83cf8a94fb8c223ad4dca --- /dev/null +++ b/models/research/object_detection/core/standard_fields.py @@ -0,0 +1,300 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains classes specifying naming conventions used for object detection. + + +Specifies: + InputDataFields: standard fields used by reader/preprocessor/batcher. + DetectionResultFields: standard fields returned by object detector. + BoxListFields: standard field used by BoxList + TfExampleFields: standard fields for tf-example data format (go/tf-example). +""" + + +class InputDataFields(object): + """Names for the input tensors. + + Holds the standard data field names to use for identifying input tensors. This + should be used by the decoder to identify keys for the returned tensor_dict + containing input tensors. And it should be used by the model to identify the + tensors it needs. + + Attributes: + image: image. + image_additional_channels: additional channels. + original_image: image in the original input size. + original_image_spatial_shape: image in the original input size. + key: unique key corresponding to image. + source_id: source of the original image. + filename: original filename of the dataset (without common path). + groundtruth_image_classes: image-level class labels. + groundtruth_image_confidences: image-level class confidences. + groundtruth_labeled_classes: image-level annotation that indicates the + classes for which an image has been labeled. + groundtruth_boxes: coordinates of the ground truth boxes in the image. + groundtruth_classes: box-level class labels. + groundtruth_confidences: box-level class confidences. The shape should be + the same as the shape of groundtruth_classes. + groundtruth_label_types: box-level label types (e.g. explicit negative). + groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead] + is the groundtruth a single object or a crowd. + groundtruth_area: area of a groundtruth segment. + groundtruth_difficult: is a `difficult` object + groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the + same class, forming a connected group, where instances are heavily + occluding each other. + proposal_boxes: coordinates of object proposal boxes. + proposal_objectness: objectness score of each proposal. + groundtruth_instance_masks: ground truth instance masks. + groundtruth_instance_boundaries: ground truth instance boundaries. + groundtruth_instance_classes: instance mask-level class labels. + groundtruth_keypoints: ground truth keypoints. + groundtruth_keypoint_visibilities: ground truth keypoint visibilities. + groundtruth_keypoint_weights: groundtruth weight factor for keypoints. + groundtruth_label_weights: groundtruth label weights. + groundtruth_weights: groundtruth weight factor for bounding boxes. + num_groundtruth_boxes: number of groundtruth boxes. + is_annotated: whether an image has been labeled or not. + true_image_shapes: true shapes of images in the resized images, as resized + images can be padded with zeros. + multiclass_scores: the label score per class for each box. + context_features: a flattened list of contextual features. + context_feature_length: the fixed length of each feature in + context_features, used for reshaping. + valid_context_size: the valid context size, used in filtering the padded + context features. + image_format: format for the images, used to decode + image_height: height of images, used to decode + image_width: width of images, used to decode + """ + image = 'image' + image_additional_channels = 'image_additional_channels' + original_image = 'original_image' + original_image_spatial_shape = 'original_image_spatial_shape' + key = 'key' + source_id = 'source_id' + filename = 'filename' + groundtruth_image_classes = 'groundtruth_image_classes' + groundtruth_image_confidences = 'groundtruth_image_confidences' + groundtruth_labeled_classes = 'groundtruth_labeled_classes' + groundtruth_boxes = 'groundtruth_boxes' + groundtruth_classes = 'groundtruth_classes' + groundtruth_confidences = 'groundtruth_confidences' + groundtruth_label_types = 'groundtruth_label_types' + groundtruth_is_crowd = 'groundtruth_is_crowd' + groundtruth_area = 'groundtruth_area' + groundtruth_difficult = 'groundtruth_difficult' + groundtruth_group_of = 'groundtruth_group_of' + proposal_boxes = 'proposal_boxes' + proposal_objectness = 'proposal_objectness' + groundtruth_instance_masks = 'groundtruth_instance_masks' + groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' + groundtruth_instance_classes = 'groundtruth_instance_classes' + groundtruth_keypoints = 'groundtruth_keypoints' + groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' + groundtruth_keypoint_weights = 'groundtruth_keypoint_weights' + groundtruth_label_weights = 'groundtruth_label_weights' + groundtruth_weights = 'groundtruth_weights' + num_groundtruth_boxes = 'num_groundtruth_boxes' + is_annotated = 'is_annotated' + true_image_shape = 'true_image_shape' + multiclass_scores = 'multiclass_scores' + context_features = 'context_features' + context_feature_length = 'context_feature_length' + valid_context_size = 'valid_context_size' + image_timestamps = 'image_timestamps' + image_format = 'image_format' + image_height = 'image_height' + image_width = 'image_width' + + +class DetectionResultFields(object): + """Naming conventions for storing the output of the detector. + + Attributes: + source_id: source of the original image. + key: unique key corresponding to image. + detection_boxes: coordinates of the detection boxes in the image. + detection_scores: detection scores for the detection boxes in the image. + detection_multiclass_scores: class score distribution (including background) + for detection boxes in the image including background class. + detection_classes: detection-level class labels. + detection_masks: contains a segmentation mask for each detection box. + detection_boundaries: contains an object boundary for each detection box. + detection_keypoints: contains detection keypoints for each detection box. + detection_keypoint_scores: contains detection keypoint scores. + num_detections: number of detections in the batch. + raw_detection_boxes: contains decoded detection boxes without Non-Max + suppression. + raw_detection_scores: contains class score logits for raw detection boxes. + detection_anchor_indices: The anchor indices of the detections after NMS. + detection_features: contains extracted features for each detected box + after NMS. + """ + + source_id = 'source_id' + key = 'key' + detection_boxes = 'detection_boxes' + detection_scores = 'detection_scores' + detection_multiclass_scores = 'detection_multiclass_scores' + detection_features = 'detection_features' + detection_classes = 'detection_classes' + detection_masks = 'detection_masks' + detection_boundaries = 'detection_boundaries' + detection_keypoints = 'detection_keypoints' + detection_keypoint_scores = 'detection_keypoint_scores' + num_detections = 'num_detections' + raw_detection_boxes = 'raw_detection_boxes' + raw_detection_scores = 'raw_detection_scores' + detection_anchor_indices = 'detection_anchor_indices' + + +class BoxListFields(object): + """Naming conventions for BoxLists. + + Attributes: + boxes: bounding box coordinates. + classes: classes per bounding box. + scores: scores per bounding box. + weights: sample weights per bounding box. + objectness: objectness score per bounding box. + masks: masks per bounding box. + boundaries: boundaries per bounding box. + keypoints: keypoints per bounding box. + keypoint_heatmaps: keypoint heatmaps per bounding box. + is_crowd: is_crowd annotation per bounding box. + """ + boxes = 'boxes' + classes = 'classes' + scores = 'scores' + weights = 'weights' + confidences = 'confidences' + objectness = 'objectness' + masks = 'masks' + boundaries = 'boundaries' + keypoints = 'keypoints' + keypoint_visibilities = 'keypoint_visibilities' + keypoint_heatmaps = 'keypoint_heatmaps' + is_crowd = 'is_crowd' + group_of = 'group_of' + + +class PredictionFields(object): + """Naming conventions for standardized prediction outputs. + + Attributes: + feature_maps: List of feature maps for prediction. + anchors: Generated anchors. + raw_detection_boxes: Decoded detection boxes without NMS. + raw_detection_feature_map_indices: Feature map indices from which each raw + detection box was produced. + """ + feature_maps = 'feature_maps' + anchors = 'anchors' + raw_detection_boxes = 'raw_detection_boxes' + raw_detection_feature_map_indices = 'raw_detection_feature_map_indices' + + +class TfExampleFields(object): + """TF-example proto feature names for object detection. + + Holds the standard feature names to load from an Example proto for object + detection. + + Attributes: + image_encoded: JPEG encoded string + image_format: image format, e.g. "JPEG" + filename: filename + channels: number of channels of image + colorspace: colorspace, e.g. "RGB" + height: height of image in pixels, e.g. 462 + width: width of image in pixels, e.g. 581 + source_id: original source of the image + image_class_text: image-level label in text format + image_class_label: image-level label in numerical format + image_class_confidence: image-level confidence of the label + object_class_text: labels in text format, e.g. ["person", "cat"] + object_class_label: labels in numbers, e.g. [16, 8] + object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30 + object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40 + object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50 + object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70 + object_view: viewpoint of object, e.g. ["frontal", "left"] + object_truncated: is object truncated, e.g. [true, false] + object_occluded: is object occluded, e.g. [true, false] + object_difficult: is object difficult, e.g. [true, false] + object_group_of: is object a single object or a group of objects + object_depiction: is object a depiction + object_is_crowd: [DEPRECATED, use object_group_of instead] + is the object a single object or a crowd + object_segment_area: the area of the segment. + object_weight: a weight factor for the object's bounding box. + instance_masks: instance segmentation masks. + instance_boundaries: instance boundaries. + instance_classes: Classes for each instance segmentation mask. + detection_class_label: class label in numbers. + detection_bbox_ymin: ymin coordinates of a detection box. + detection_bbox_xmin: xmin coordinates of a detection box. + detection_bbox_ymax: ymax coordinates of a detection box. + detection_bbox_xmax: xmax coordinates of a detection box. + detection_score: detection score for the class label and box. + """ + image_encoded = 'image/encoded' + image_format = 'image/format' # format is reserved keyword + filename = 'image/filename' + channels = 'image/channels' + colorspace = 'image/colorspace' + height = 'image/height' + width = 'image/width' + source_id = 'image/source_id' + image_class_text = 'image/class/text' + image_class_label = 'image/class/label' + image_class_confidence = 'image/class/confidence' + object_class_text = 'image/object/class/text' + object_class_label = 'image/object/class/label' + object_bbox_ymin = 'image/object/bbox/ymin' + object_bbox_xmin = 'image/object/bbox/xmin' + object_bbox_ymax = 'image/object/bbox/ymax' + object_bbox_xmax = 'image/object/bbox/xmax' + object_view = 'image/object/view' + object_truncated = 'image/object/truncated' + object_occluded = 'image/object/occluded' + object_difficult = 'image/object/difficult' + object_group_of = 'image/object/group_of' + object_depiction = 'image/object/depiction' + object_is_crowd = 'image/object/is_crowd' + object_segment_area = 'image/object/segment/area' + object_weight = 'image/object/weight' + instance_masks = 'image/segmentation/object' + instance_boundaries = 'image/boundaries/object' + instance_classes = 'image/segmentation/object/class' + detection_class_label = 'image/detection/label' + detection_bbox_ymin = 'image/detection/bbox/ymin' + detection_bbox_xmin = 'image/detection/bbox/xmin' + detection_bbox_ymax = 'image/detection/bbox/ymax' + detection_bbox_xmax = 'image/detection/bbox/xmax' + detection_score = 'image/detection/score' + +# Sequence fields for SequenceExample inputs. +# All others are considered context fields. +SEQUENCE_FIELDS = [InputDataFields.image, + InputDataFields.source_id, + InputDataFields.groundtruth_boxes, + InputDataFields.num_groundtruth_boxes, + InputDataFields.groundtruth_classes, + InputDataFields.groundtruth_weights, + InputDataFields.source_id, + InputDataFields.is_annotated] diff --git a/models/research/object_detection/core/target_assigner.py b/models/research/object_detection/core/target_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..fd9020ebeac12c2610449afcbdd1f29dd3237f85 --- /dev/null +++ b/models/research/object_detection/core/target_assigner.py @@ -0,0 +1,1659 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base target assigner module. + +The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and +groundtruth detections (bounding boxes), to assign classification and regression +targets to each anchor as well as weights to each anchor (specifying, e.g., +which anchors should not contribute to training loss). + +It assigns classification/regression targets by performing the following steps: +1) Computing pairwise similarity between anchors and groundtruth boxes using a + provided RegionSimilarity Calculator +2) Computing a matching based on the similarity matrix using a provided Matcher +3) Assigning regression targets based on the matching and a provided BoxCoder +4) Assigning classification targets based on the matching and groundtruth labels + +Note that TargetAssigners only operate on detections from a single +image at a time, so any logic for applying a TargetAssigner to multiple +images must be handled externally. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 + +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_coder +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import matcher as mat +from object_detection.core import region_similarity_calculator as sim_calc +from object_detection.core import standard_fields as fields +from object_detection.matchers import argmax_matcher +from object_detection.utils import shape_utils +from object_detection.utils import target_assigner_utils as ta_utils +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + +ResizeMethod = tf2.image.ResizeMethod + +_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0 + + +class TargetAssigner(object): + """Target assigner to compute classification and regression targets.""" + + def __init__(self, + similarity_calc, + matcher, + box_coder_instance, + negative_class_weight=1.0): + """Construct Object Detection Target Assigner. + + Args: + similarity_calc: a RegionSimilarityCalculator + matcher: an object_detection.core.Matcher used to match groundtruth to + anchors. + box_coder_instance: an object_detection.core.BoxCoder used to encode + matching groundtruth boxes with respect to anchors. + negative_class_weight: classification weight to be associated to negative + anchors (default: 1.0). The weight must be in [0., 1.]. + + Raises: + ValueError: if similarity_calc is not a RegionSimilarityCalculator or + if matcher is not a Matcher or if box_coder is not a BoxCoder + """ + if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator): + raise ValueError('similarity_calc must be a RegionSimilarityCalculator') + if not isinstance(matcher, mat.Matcher): + raise ValueError('matcher must be a Matcher') + if not isinstance(box_coder_instance, box_coder.BoxCoder): + raise ValueError('box_coder must be a BoxCoder') + self._similarity_calc = similarity_calc + self._matcher = matcher + self._box_coder = box_coder_instance + self._negative_class_weight = negative_class_weight + + @property + def box_coder(self): + return self._box_coder + + # TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields. + def assign(self, + anchors, + groundtruth_boxes, + groundtruth_labels=None, + unmatched_class_label=None, + groundtruth_weights=None): + """Assign classification and regression targets to each anchor. + + For a given set of anchors and groundtruth detections, match anchors + to groundtruth_boxes and assign classification and regression targets to + each anchor as well as weights based on the resulting match (specifying, + e.g., which anchors should not contribute to training loss). + + Anchors that are not matched to anything are given a classification target + of self._unmatched_cls_target which can be specified via the constructor. + + Args: + anchors: a BoxList representing N anchors + groundtruth_boxes: a BoxList representing M groundtruth boxes + groundtruth_labels: a tensor of shape [M, d_1, ... d_k] + with labels for each of the ground_truth boxes. The subshape + [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set + to None, groundtruth_labels assumes a binary problem where all + ground_truth boxes get a positive label (of 1). + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + If set to None, unmatched_cls_target is set to be [0] for each anchor. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. The weights + must be in [0., 1.]. If None, all weights are set to 1. Generally no + groundtruth boxes with zero weight match to any anchors as matchers are + aware of groundtruth weights. Additionally, `cls_weights` and + `reg_weights` are calculated using groundtruth weights as an added + safety. + + Returns: + cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], + where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels + which has shape [num_gt_boxes, d_1, d_2, ... d_k]. + cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], + representing weights for each element in cls_targets. + reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension] + reg_weights: a float32 tensor with shape [num_anchors] + match: an int32 tensor of shape [num_anchors] containing result of anchor + groundtruth matching. Each position in the tensor indicates an anchor + and holds the following meaning: + (1) if match[i] >= 0, anchor i is matched with groundtruth match[i]. + (2) if match[i]=-1, anchor i is marked to be background . + (3) if match[i]=-2, anchor i is ignored since it is not background and + does not have sufficient overlap to call it a foreground. + + Raises: + ValueError: if anchors or groundtruth_boxes are not of type + box_list.BoxList + """ + if not isinstance(anchors, box_list.BoxList): + raise ValueError('anchors must be an BoxList') + if not isinstance(groundtruth_boxes, box_list.BoxList): + raise ValueError('groundtruth_boxes must be an BoxList') + + if unmatched_class_label is None: + unmatched_class_label = tf.constant([0], tf.float32) + + if groundtruth_labels is None: + groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), + 0)) + groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) + + unmatched_shape_assert = shape_utils.assert_shape_equal( + shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], + shape_utils.combined_static_and_dynamic_shape(unmatched_class_label)) + labels_and_box_shapes_assert = shape_utils.assert_shape_equal( + shape_utils.combined_static_and_dynamic_shape( + groundtruth_labels)[:1], + shape_utils.combined_static_and_dynamic_shape( + groundtruth_boxes.get())[:1]) + + if groundtruth_weights is None: + num_gt_boxes = groundtruth_boxes.num_boxes_static() + if not num_gt_boxes: + num_gt_boxes = groundtruth_boxes.num_boxes() + groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32) + + # set scores on the gt boxes + scores = 1 - groundtruth_labels[:, 0] + groundtruth_boxes.add_field(fields.BoxListFields.scores, scores) + + with tf.control_dependencies( + [unmatched_shape_assert, labels_and_box_shapes_assert]): + match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, + anchors) + match = self._matcher.match(match_quality_matrix, + valid_rows=tf.greater(groundtruth_weights, 0)) + reg_targets = self._create_regression_targets(anchors, + groundtruth_boxes, + match) + cls_targets = self._create_classification_targets(groundtruth_labels, + unmatched_class_label, + match) + reg_weights = self._create_regression_weights(match, groundtruth_weights) + + cls_weights = self._create_classification_weights(match, + groundtruth_weights) + # convert cls_weights from per-anchor to per-class. + class_label_shape = tf.shape(cls_targets)[1:] + weights_shape = tf.shape(cls_weights) + weights_multiple = tf.concat( + [tf.ones_like(weights_shape), class_label_shape], + axis=0) + for _ in range(len(cls_targets.get_shape()[1:])): + cls_weights = tf.expand_dims(cls_weights, -1) + cls_weights = tf.tile(cls_weights, weights_multiple) + + num_anchors = anchors.num_boxes_static() + if num_anchors is not None: + reg_targets = self._reset_target_shape(reg_targets, num_anchors) + cls_targets = self._reset_target_shape(cls_targets, num_anchors) + reg_weights = self._reset_target_shape(reg_weights, num_anchors) + cls_weights = self._reset_target_shape(cls_weights, num_anchors) + + return (cls_targets, cls_weights, reg_targets, reg_weights, + match.match_results) + + def _reset_target_shape(self, target, num_anchors): + """Sets the static shape of the target. + + Args: + target: the target tensor. Its first dimension will be overwritten. + num_anchors: the number of anchors, which is used to override the target's + first dimension. + + Returns: + A tensor with the shape info filled in. + """ + target_shape = target.get_shape().as_list() + target_shape[0] = num_anchors + target.set_shape(target_shape) + return target + + def _create_regression_targets(self, anchors, groundtruth_boxes, match): + """Returns a regression target for each anchor. + + Args: + anchors: a BoxList representing N anchors + groundtruth_boxes: a BoxList representing M groundtruth_boxes + match: a matcher.Match object + + Returns: + reg_targets: a float32 tensor with shape [N, box_code_dimension] + """ + matched_gt_boxes = match.gather_based_on_match( + groundtruth_boxes.get(), + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) + if groundtruth_boxes.has_field(fields.BoxListFields.keypoints): + groundtruth_keypoints = groundtruth_boxes.get_field( + fields.BoxListFields.keypoints) + matched_keypoints = match.gather_based_on_match( + groundtruth_keypoints, + unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), + ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])) + matched_gt_boxlist.add_field(fields.BoxListFields.keypoints, + matched_keypoints) + matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors) + match_results_shape = shape_utils.combined_static_and_dynamic_shape( + match.match_results) + + # Zero out the unmatched and ignored regression targets. + unmatched_ignored_reg_targets = tf.tile( + self._default_regression_target(), [match_results_shape[0], 1]) + matched_anchors_mask = match.matched_column_indicator() + reg_targets = tf.where(matched_anchors_mask, + matched_reg_targets, + unmatched_ignored_reg_targets) + return reg_targets + + def _default_regression_target(self): + """Returns the default target for anchors to regress to. + + Default regression targets are set to zero (though in + this implementation what these targets are set to should + not matter as the regression weight of any box set to + regress to the default target is zero). + + Returns: + default_target: a float32 tensor with shape [1, box_code_dimension] + """ + return tf.constant([self._box_coder.code_size*[0]], tf.float32) + + def _create_classification_targets(self, groundtruth_labels, + unmatched_class_label, match): + """Create classification targets for each anchor. + + Assign a classification target of for each anchor to the matching + groundtruth label that is provided by match. Anchors that are not matched + to anything are given the target self._unmatched_cls_target + + Args: + groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] + with labels for each of the ground_truth boxes. The subshape + [d_1, ... d_k] can be empty (corresponding to scalar labels). + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + + Returns: + a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the + subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has + shape [num_gt_boxes, d_1, d_2, ... d_k]. + """ + return match.gather_based_on_match( + groundtruth_labels, + unmatched_value=unmatched_class_label, + ignored_value=unmatched_class_label) + + def _create_regression_weights(self, match, groundtruth_weights): + """Set regression weight for each anchor. + + Only positive anchors are set to contribute to the regression loss, so this + method returns a weight of 1 for every positive anchor and 0 for every + negative anchor. + + Args: + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. + + Returns: + a float32 tensor with shape [num_anchors] representing regression weights. + """ + return match.gather_based_on_match( + groundtruth_weights, ignored_value=0., unmatched_value=0.) + + def _create_classification_weights(self, + match, + groundtruth_weights): + """Create classification weights for each anchor. + + Positive (matched) anchors are associated with a weight of + positive_class_weight and negative (unmatched) anchors are associated with + a weight of negative_class_weight. When anchors are ignored, weights are set + to zero. By default, both positive/negative weights are set to 1.0, + but they can be adjusted to handle class imbalance (which is almost always + the case in object detection). + + Args: + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. + + Returns: + a float32 tensor with shape [num_anchors] representing classification + weights. + """ + return match.gather_based_on_match( + groundtruth_weights, + ignored_value=0., + unmatched_value=self._negative_class_weight) + + def get_box_coder(self): + """Get BoxCoder of this TargetAssigner. + + Returns: + BoxCoder object. + """ + return self._box_coder + + +# TODO(rathodv): This method pulls in all the implementation dependencies into +# core. Therefore its best to have this factory method outside of core. +def create_target_assigner(reference, stage=None, + negative_class_weight=1.0, use_matmul_gather=False): + """Factory function for creating standard target assigners. + + Args: + reference: string referencing the type of TargetAssigner. + stage: string denoting stage: {proposal, detection}. + negative_class_weight: classification weight to be associated to negative + anchors (default: 1.0) + use_matmul_gather: whether to use matrix multiplication based gather which + are better suited for TPUs. + + Returns: + TargetAssigner: desired target assigner. + + Raises: + ValueError: if combination reference+stage is invalid. + """ + if reference == 'Multibox' and stage == 'proposal': + if tf_version.is_tf2(): + raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.') + similarity_calc = sim_calc.NegSqDistSimilarity() + matcher = bipartite_matcher.GreedyBipartiteMatcher() + box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder() + + elif reference == 'FasterRCNN' and stage == 'proposal': + similarity_calc = sim_calc.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7, + unmatched_threshold=0.3, + force_match_for_each_row=True, + use_matmul_gather=use_matmul_gather) + box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=[10.0, 10.0, 5.0, 5.0]) + + elif reference == 'FasterRCNN' and stage == 'detection': + similarity_calc = sim_calc.IouSimilarity() + # Uses all proposals with IOU < 0.5 as candidate negatives. + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + negatives_lower_than_unmatched=True, + use_matmul_gather=use_matmul_gather) + box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=[10.0, 10.0, 5.0, 5.0]) + + elif reference == 'FastRCNN': + similarity_calc = sim_calc.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.1, + force_match_for_each_row=False, + negatives_lower_than_unmatched=False, + use_matmul_gather=use_matmul_gather) + box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder() + + else: + raise ValueError('No valid combination of reference and stage.') + + return TargetAssigner(similarity_calc, matcher, box_coder_instance, + negative_class_weight=negative_class_weight) + + +def batch_assign(target_assigner, + anchors_batch, + gt_box_batch, + gt_class_targets_batch, + unmatched_class_label=None, + gt_weights_batch=None): + """Batched assignment of classification and regression targets. + + Args: + target_assigner: a target assigner. + anchors_batch: BoxList representing N box anchors or list of BoxList objects + with length batch_size representing anchor sets. + gt_box_batch: a list of BoxList objects with length batch_size + representing groundtruth boxes for each image in the batch + gt_class_targets_batch: a list of tensors with length batch_size, where + each tensor has shape [num_gt_boxes_i, classification_target_size] and + num_gt_boxes_i is the number of boxes in the ith boxlist of + gt_box_batch. + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + gt_weights_batch: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_reg_targets: a tensor with shape [batch_size, num_anchors, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_anchors], + match: an int32 tensor of shape [batch_size, num_anchors] containing result + of anchor groundtruth matching. Each position in the tensor indicates an + anchor and holds the following meaning: + (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. + (2) if match[x, i]=-1, anchor i is marked to be background . + (3) if match[x, i]=-2, anchor i is ignored since it is not background and + does not have sufficient overlap to call it a foreground. + + Raises: + ValueError: if input list lengths are inconsistent, i.e., + batch_size == len(gt_box_batch) == len(gt_class_targets_batch) + and batch_size == len(anchors_batch) unless anchors_batch is a single + BoxList. + """ + if not isinstance(anchors_batch, list): + anchors_batch = len(gt_box_batch) * [anchors_batch] + if not all( + isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): + raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') + if not (len(anchors_batch) + == len(gt_box_batch) + == len(gt_class_targets_batch)): + raise ValueError('batch size incompatible with lengths of anchors_batch, ' + 'gt_box_batch and gt_class_targets_batch.') + cls_targets_list = [] + cls_weights_list = [] + reg_targets_list = [] + reg_weights_list = [] + match_list = [] + if gt_weights_batch is None: + gt_weights_batch = [None] * len(gt_class_targets_batch) + for anchors, gt_boxes, gt_class_targets, gt_weights in zip( + anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch): + (cls_targets, cls_weights, + reg_targets, reg_weights, match) = target_assigner.assign( + anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights) + cls_targets_list.append(cls_targets) + cls_weights_list.append(cls_weights) + reg_targets_list.append(reg_targets) + reg_weights_list.append(reg_weights) + match_list.append(match) + batch_cls_targets = tf.stack(cls_targets_list) + batch_cls_weights = tf.stack(cls_weights_list) + batch_reg_targets = tf.stack(reg_targets_list) + batch_reg_weights = tf.stack(reg_weights_list) + batch_match = tf.stack(match_list) + return (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) + + +# Assign an alias to avoid large refactor of existing users. +batch_assign_targets = batch_assign + + +def batch_get_targets(batch_match, groundtruth_tensor_list, + groundtruth_weights_list, unmatched_value, + unmatched_weight): + """Returns targets based on anchor-groundtruth box matching results. + + Args: + batch_match: An int32 tensor of shape [batch, num_anchors] containing the + result of target assignment returned by TargetAssigner.assign(..). + groundtruth_tensor_list: A list of groundtruth tensors of shape + [num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type. + groundtruth_weights_list: A list of weights, one per groundtruth tensor, of + shape [num_groundtruth]. + unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as + groundtruth tensor containing target value for anchors that remain + unmatched. + unmatched_weight: Scalar weight to assign to anchors that remain unmatched. + + Returns: + targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k] + containing targets for anchors. + weights: A float tensor of shape [batch, num_anchors] containing the weights + to assign to each target. + """ + match_list = tf.unstack(batch_match) + targets_list = [] + weights_list = [] + for match_tensor, groundtruth_tensor, groundtruth_weight in zip( + match_list, groundtruth_tensor_list, groundtruth_weights_list): + match_object = mat.Match(match_tensor) + targets = match_object.gather_based_on_match( + groundtruth_tensor, + unmatched_value=unmatched_value, + ignored_value=unmatched_value) + targets_list.append(targets) + weights = match_object.gather_based_on_match( + groundtruth_weight, + unmatched_value=unmatched_weight, + ignored_value=tf.zeros_like(unmatched_weight)) + weights_list.append(weights) + return tf.stack(targets_list), tf.stack(weights_list) + + +def batch_assign_confidences(target_assigner, + anchors_batch, + gt_box_batch, + gt_class_confidences_batch, + gt_weights_batch=None, + unmatched_class_label=None, + include_background_class=True, + implicit_class_weight=1.0): + """Batched assignment of classification and regression targets. + + This differences between batch_assign_confidences and batch_assign_targets: + - 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and + tensor (high-dimensional) targets. 'batch_assign_confidences' only support + scalar (agnostic) and vector (multiclass) targets. + - 'batch_assign_targets' assumes the input class tensor using the binary + one/K-hot encoding. 'batch_assign_confidences' takes the class confidence + scores as the input, where 1 means positive classes, 0 means implicit + negative classes, and -1 means explicit negative classes. + - 'batch_assign_confidences' assigns the targets in the similar way as + 'batch_assign_targets' except that it gives different weights for implicit + and explicit classes. This allows user to control the negative gradients + pushed differently for implicit and explicit examples during the training. + + Args: + target_assigner: a target assigner. + anchors_batch: BoxList representing N box anchors or list of BoxList objects + with length batch_size representing anchor sets. + gt_box_batch: a list of BoxList objects with length batch_size + representing groundtruth boxes for each image in the batch + gt_class_confidences_batch: a list of tensors with length batch_size, where + each tensor has shape [num_gt_boxes_i, classification_target_size] and + num_gt_boxes_i is the number of boxes in the ith boxlist of + gt_box_batch. Note that in this tensor, 1 means explicit positive class, + -1 means explicit negative class, and 0 means implicit negative class. + gt_weights_batch: A list of 1-D tf.float32 tensors of shape + [num_gt_boxes_i] containing weights for groundtruth boxes. + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + include_background_class: whether or not gt_class_confidences_batch includes + the background class. + implicit_class_weight: the weight assigned to implicit examples. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_reg_targets: a tensor with shape [batch_size, num_anchors, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_anchors], + match: an int32 tensor of shape [batch_size, num_anchors] containing result + of anchor groundtruth matching. Each position in the tensor indicates an + anchor and holds the following meaning: + (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. + (2) if match[x, i]=-1, anchor i is marked to be background . + (3) if match[x, i]=-2, anchor i is ignored since it is not background and + does not have sufficient overlap to call it a foreground. + + Raises: + ValueError: if input list lengths are inconsistent, i.e., + batch_size == len(gt_box_batch) == len(gt_class_targets_batch) + and batch_size == len(anchors_batch) unless anchors_batch is a single + BoxList, or if any element in gt_class_confidences_batch has rank > 2. + """ + if not isinstance(anchors_batch, list): + anchors_batch = len(gt_box_batch) * [anchors_batch] + if not all( + isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): + raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') + if not (len(anchors_batch) + == len(gt_box_batch) + == len(gt_class_confidences_batch)): + raise ValueError('batch size incompatible with lengths of anchors_batch, ' + 'gt_box_batch and gt_class_confidences_batch.') + + cls_targets_list = [] + cls_weights_list = [] + reg_targets_list = [] + reg_weights_list = [] + match_list = [] + if gt_weights_batch is None: + gt_weights_batch = [None] * len(gt_class_confidences_batch) + for anchors, gt_boxes, gt_class_confidences, gt_weights in zip( + anchors_batch, gt_box_batch, gt_class_confidences_batch, + gt_weights_batch): + + if (gt_class_confidences is not None and + len(gt_class_confidences.get_shape().as_list()) > 2): + raise ValueError('The shape of the class target is not supported. ', + gt_class_confidences.get_shape()) + + cls_targets, _, reg_targets, _, match = target_assigner.assign( + anchors, gt_boxes, gt_class_confidences, unmatched_class_label, + groundtruth_weights=gt_weights) + + if include_background_class: + cls_targets_without_background = tf.slice( + cls_targets, [0, 1], [-1, -1]) + else: + cls_targets_without_background = cls_targets + + positive_mask = tf.greater(cls_targets_without_background, 0.0) + negative_mask = tf.less(cls_targets_without_background, 0.0) + explicit_example_mask = tf.logical_or(positive_mask, negative_mask) + positive_anchors = tf.reduce_any(positive_mask, axis=-1) + + regression_weights = tf.cast(positive_anchors, dtype=tf.float32) + regression_targets = ( + reg_targets * tf.expand_dims(regression_weights, axis=-1)) + regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1) + + cls_targets_without_background = ( + cls_targets_without_background * + (1 - tf.cast(negative_mask, dtype=tf.float32))) + cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast( + explicit_example_mask, dtype=tf.float32) + implicit_class_weight) + + if include_background_class: + cls_weights_background = ( + (1 - implicit_class_weight) * regression_weights_expanded + + implicit_class_weight) + classification_weights = tf.concat( + [cls_weights_background, cls_weights_without_background], axis=-1) + cls_targets_background = 1 - regression_weights_expanded + classification_targets = tf.concat( + [cls_targets_background, cls_targets_without_background], axis=-1) + else: + classification_targets = cls_targets_without_background + classification_weights = cls_weights_without_background + + cls_targets_list.append(classification_targets) + cls_weights_list.append(classification_weights) + reg_targets_list.append(regression_targets) + reg_weights_list.append(regression_weights) + match_list.append(match) + batch_cls_targets = tf.stack(cls_targets_list) + batch_cls_weights = tf.stack(cls_weights_list) + batch_reg_targets = tf.stack(reg_targets_list) + batch_reg_weights = tf.stack(reg_weights_list) + batch_match = tf.stack(match_list) + return (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) + + +def _smallest_positive_root(a, b, c): + """Returns the smallest positive root of a quadratic equation.""" + + discriminant = tf.sqrt(b ** 2 - 4 * a * c) + + # TODO(vighneshb) We are currently using the slightly incorrect + # CenterNet implementation. The commented lines implement the fixed version + # in https://github.com/princeton-vl/CornerNet. Change the implementation + # after verifying it has no negative impact. + # root1 = (-b - discriminant) / (2 * a) + # root2 = (-b + discriminant) / (2 * a) + + # return tf.where(tf.less(root1, 0), root2, root1) + + return (-b + discriminant) / (2.0) + + +def max_distance_for_overlap(height, width, min_iou): + """Computes how far apart bbox corners can lie while maintaining the iou. + + Given a bounding box size, this function returns a lower bound on how far + apart the corners of another box can lie while still maintaining the given + IoU. The implementation is based on the `gaussian_radius` function in the + Objects as Points github repo: https://github.com/xingyizhou/CenterNet + + Args: + height: A 1-D float Tensor representing height of the ground truth boxes. + width: A 1-D float Tensor representing width of the ground truth boxes. + min_iou: A float representing the minimum IoU desired. + + Returns: + distance: A 1-D Tensor of distances, of the same length as the input + height and width tensors. + """ + + # Given that the detected box is displaced at a distance `d`, the exact + # IoU value will depend on the angle at which each corner is displaced. + # We simplify our computation by assuming that each corner is displaced by + # a distance `d` in both x and y direction. This gives us a lower IoU than + # what is actually realizable and ensures that any box with corners less + # than `d` distance apart will always have an IoU greater than or equal + # to `min_iou` + + # The following 3 cases can be worked on geometrically and come down to + # solving a quadratic inequality. In each case, to ensure `min_iou` we use + # the smallest positive root of the equation. + + # Case where detected box is offset from ground truth and no box completely + # contains the other. + + distance_detection_offset = _smallest_positive_root( + a=1, b=-(height + width), + c=width * height * ((1 - min_iou) / (1 + min_iou)) + ) + + # Case where detection is smaller than ground truth and completely contained + # in it. + distance_detection_in_gt = _smallest_positive_root( + a=4, b=-2 * (height + width), + c=(1 - min_iou) * width * height + ) + + # Case where ground truth is smaller than detection and completely contained + # in it. + distance_gt_in_detection = _smallest_positive_root( + a=4 * min_iou, b=(2 * min_iou) * (width + height), + c=(min_iou - 1) * width * height + ) + + return tf.reduce_min([distance_detection_offset, + distance_gt_in_detection, + distance_detection_in_gt], axis=0) + + +def get_batch_predictions_from_indices(batch_predictions, indices): + """Gets the values of predictions in a batch at the given indices. + + The indices are expected to come from the offset targets generation functions + in this library. The returned value is intended to be used inside a loss + function. + + Args: + batch_predictions: A tensor of shape [batch_size, height, width, 2] for + single class offsets and [batch_size, height, width, class, 2] for + multiple classes offsets (e.g. keypoint joint offsets) representing the + (height, width) or (y_offset, x_offset) predictions over a batch. + indices: A tensor of shape [num_instances, 3] for single class offset and + [num_instances, 4] for multiple classes offsets representing the indices + in the batch to be penalized in a loss function + + Returns: + values: A tensor of shape [num_instances, 2] holding the predicted values + at the given indices. + """ + return tf.gather_nd(batch_predictions, indices) + + +def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap): + """Computes the standard deviation of the Gaussian kernel from box size. + + Args: + boxes_height: A 1D tensor with shape [num_instances] representing the height + of each box. + boxes_width: A 1D tensor with shape [num_instances] representing the width + of each box. + min_overlap: The minimum IOU overlap that boxes need to have to not be + penalized. + + Returns: + A 1D tensor with shape [num_instances] representing the computed Gaussian + sigma for each of the box. + """ + # We are dividing by 3 so that points closer than the computed + # distance have a >99% CDF. + sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap) + sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0 + return sigma + + +class CenterNetCenterHeatmapTargetAssigner(object): + """Wrapper to compute the object center heatmap.""" + + def __init__(self, stride, min_overlap=0.7): + """Initializes the target assigner. + + Args: + stride: int, the stride of the network in output pixels. + min_overlap: The minimum IOU overlap that boxes need to have to not be + penalized. + """ + + self._stride = stride + self._min_overlap = min_overlap + + def assign_center_targets_from_boxes(self, + height, + width, + gt_boxes_list, + gt_classes_list, + gt_weights_list=None): + """Computes the object center heatmap target. + + Args: + height: int, height of input to the model. This is used to + determine the height of the output. + width: int, width of the input to the model. This is used to + determine the width of the output. + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The box coordinates are expected in normalized coordinates. + gt_classes_list: A list of float tensors with shape [num_boxes, + num_classes] representing the one-hot encoded class labels for each box + in the gt_boxes_list. + gt_weights_list: A list of float tensors with shape [num_boxes] + representing the weight of each groundtruth detection box. + + Returns: + heatmap: A Tensor of size [batch_size, output_height, output_width, + num_classes] representing the per class center heatmap. output_height + and output_width are computed by dividing the input height and width by + the stride specified during initialization. + """ + + out_height = tf.cast(height // self._stride, tf.float32) + out_width = tf.cast(width // self._stride, tf.float32) + # Compute the yx-grid to be used to generate the heatmap. Each returned + # tensor has shape of [out_height, out_width] + (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width) + + heatmaps = [] + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_boxes_list) + # TODO(vighneshb) Replace the for loop with a batch version. + for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list, + gt_weights_list): + boxes = box_list.BoxList(boxes) + # Convert the box coordinates to absolute output image dimension space. + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box center coordinates. Each returned tensors have the shape of + # [num_instances] + (y_center, x_center, boxes_height, + boxes_width) = boxes.get_center_coordinates_and_sizes() + + # Compute the sigma from box size. The tensor shape: [num_instances]. + sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, + self._min_overlap) + # Apply the Gaussian kernel to the center coordinates. Returned heatmap + # has shape of [out_height, out_width, num_classes] + heatmap = ta_utils.coordinates_to_heatmap( + y_grid=y_grid, + x_grid=x_grid, + y_coordinates=y_center, + x_coordinates=x_center, + sigma=sigma, + channel_onehot=class_targets, + channel_weights=weights) + heatmaps.append(heatmap) + + # Return the stacked heatmaps over the batch. + return tf.stack(heatmaps, axis=0) + + +class CenterNetBoxTargetAssigner(object): + """Wrapper to compute target tensors for the object detection task. + + This class has methods that take as input a batch of ground truth tensors + (in the form of a list) and return the targets required to train the object + detection task. + """ + + def __init__(self, stride): + """Initializes the target assigner. + + Args: + stride: int, the stride of the network in output pixels. + """ + + self._stride = stride + + def assign_size_and_offset_targets(self, + height, + width, + gt_boxes_list, + gt_weights_list=None): + """Returns the box height/width and center offset targets and their indices. + + The returned values are expected to be used with predicted tensors + of size (batch_size, height//self._stride, width//self._stride, 2). The + predicted values at the relevant indices can be retrieved with the + get_batch_predictions_from_indices function. + + Args: + height: int, height of input to the model. This is used to determine the + height of the output. + width: int, width of the input to the model. This is used to determine the + width of the output. + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The coordinates are expected in normalized coordinates. + gt_weights_list: A list of tensors with shape [num_boxes] corresponding to + the weight of each groundtruth detection box. + + Returns: + batch_indices: an integer tensor of shape [num_boxes, 3] holding the + indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively. + batch_box_height_width: a float tensor of shape [num_boxes, 2] holding + expected height and width of each box in the output space. + batch_offsets: a float tensor of shape [num_boxes, 2] holding the + expected y and x offset of each box in the output space. + batch_weights: a float tensor of shape [num_boxes] indicating the + weight of each prediction. + """ + + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_boxes_list) + + batch_indices = [] + batch_box_height_width = [] + batch_weights = [] + batch_offsets = [] + + for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): + boxes = box_list.BoxList(boxes) + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box center coordinates. Each returned tensors have the shape of + # [num_boxes] + (y_center, x_center, boxes_height, + boxes_width) = boxes.get_center_coordinates_and_sizes() + num_boxes = tf.shape(x_center) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_boxes, 2] + # indices: [num_boxes, 2] + (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( + y_source=y_center, x_source=x_center) + + # Assign ones if weights are not provided. + if weights is None: + weights = tf.ones(num_boxes, dtype=tf.float32) + + # Shape of [num_boxes, 1] integer tensor filled with current batch index. + batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) + batch_indices.append(tf.concat([batch_index, indices], axis=1)) + batch_box_height_width.append( + tf.stack([boxes_height, boxes_width], axis=1)) + batch_weights.append(weights) + batch_offsets.append(offsets) + + batch_indices = tf.concat(batch_indices, axis=0) + batch_box_height_width = tf.concat(batch_box_height_width, axis=0) + batch_weights = tf.concat(batch_weights, axis=0) + batch_offsets = tf.concat(batch_offsets, axis=0) + return (batch_indices, batch_box_height_width, batch_offsets, batch_weights) + + +# TODO(yuhuic): Update this class to handle the instance/keypoint weights. +# Currently those weights are used as "mask" to indicate whether an +# instance/keypoint should be considered or not (expecting only either 0 or 1 +# value). In reality, the weights can be any value and this class should handle +# those values properly. +class CenterNetKeypointTargetAssigner(object): + """Wrapper to compute target tensors for the CenterNet keypoint estimation. + + This class has methods that take as input a batch of groundtruth tensors + (in the form of a list) and returns the targets required to train the + CenterNet model for keypoint estimation. Specifically, the class methods + expect the groundtruth in the following formats (consistent with the + standard Object Detection API). Note that usually the groundtruth tensors are + packed with a list which represents the batch dimension: + + gt_classes_list: [Required] a list of 2D tf.float32 one-hot + (or k-hot) tensors of shape [num_instances, num_classes] containing the + class targets with the 0th index assumed to map to the first non-background + class. + gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of + shape [num_instances, num_total_keypoints, 2] containing keypoint + coordinates. Note that the "num_total_keypoints" should be the sum of the + num_keypoints over all possible keypoint types, e.g. human pose, face. + For example, if a dataset contains both 17 human pose keypoints and 5 face + keypoints, then num_total_keypoints = 17 + 5 = 22. + If an intance contains only a subet of keypoints (e.g. human pose keypoints + but not face keypoints), the face keypoints will be filled with zeros. + Also note that keypoints are assumed to be provided in normalized + coordinates and missing keypoints should be encoded as NaN. + gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape + [num_instances, num_total_keypoints] representing the weights of each + keypoints. If not provided, then all not NaN keypoints will be equally + weighted. + gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape + [num_instances, 4] containing coordinates of the groundtruth boxes. + Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and + assumed to be normalized and clipped relative to the image window with + y_min <= y_max and x_min <= x_max. + Note that the boxes are only used to compute the center targets but are not + considered as required output of the keypoint task. If the boxes were not + provided, the center targets will be inferred from the keypoints + [not implemented yet]. + gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape + [num_instances] containing weights for groundtruth boxes. Only useful when + gt_boxes_list is also provided. + """ + + def __init__(self, + stride, + class_id, + keypoint_indices, + keypoint_std_dev=None, + per_keypoint_offset=False, + peak_radius=0): + """Initializes a CenterNet keypoints target assigner. + + Args: + stride: int, the stride of the network in output pixels. + class_id: int, the ID of the class (0-indexed) that contains the target + keypoints to consider in this task. For example, if the task is human + pose estimation, the class id should correspond to the "human" class. + keypoint_indices: A list of integers representing the indices of the + keypoints to be considered in this task. This is used to retrieve the + subset of the keypoints from gt_keypoints that should be considered in + this task. + keypoint_std_dev: A list of floats represent the standard deviation of the + Gaussian kernel used to generate the keypoint heatmap (in the unit of + output pixels). It is to provide the flexibility of using different + sizes of Gaussian kernel for each keypoint type. If not provided, then + all standard deviation will be the same as the default value (10.0 in + the output pixel space). If provided, the length of keypoint_std_dev + needs to be the same as the length of keypoint_indices, indicating the + standard deviation of each keypoint type. + per_keypoint_offset: boolean, indicating whether to assign offset for + each keypoint channel. If set False, the output offset target will have + the shape [batch_size, out_height, out_width, 2]. If set True, the + output offset target will have the shape [batch_size, out_height, + out_width, 2 * num_keypoints]. + peak_radius: int, the radius (in the unit of output pixel) around heatmap + peak to assign the offset targets. + """ + + self._stride = stride + self._class_id = class_id + self._keypoint_indices = keypoint_indices + self._per_keypoint_offset = per_keypoint_offset + self._peak_radius = peak_radius + if keypoint_std_dev is None: + self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] * + len(keypoint_indices)) + else: + assert len(keypoint_indices) == len(keypoint_std_dev) + self._keypoint_std_dev = keypoint_std_dev + + def _preprocess_keypoints_and_weights(self, out_height, out_width, keypoints, + class_onehot, class_weights, + keypoint_weights): + """Preprocesses the keypoints and the corresponding keypoint weights. + + This function performs several common steps to preprocess the keypoints and + keypoint weights features, including: + 1) Select the subset of keypoints based on the keypoint indices, fill the + keypoint NaN values with zeros and convert to absoluate coordinates. + 2) Generate the weights of the keypoint using the following information: + a. The class of the instance. + b. The NaN value of the keypoint coordinates. + c. The provided keypoint weights. + + Args: + out_height: An integer or an interger tensor indicating the output height + of the model. + out_width: An integer or an interger tensor indicating the output width of + the model. + keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2] + representing the original keypoint grountruth coordinates. + class_onehot: A float tensor of shape [num_instances, num_classes] + containing the class targets with the 0th index assumed to map to the + first non-background class. + class_weights: A float tensor of shape [num_instances] containing weights + for groundtruth instances. + keypoint_weights: A float tensor of shape + [num_instances, num_total_keypoints] representing the weights of each + keypoints. + + Returns: + A tuple of two tensors: + keypoint_absolute: A float tensor of shape + [num_instances, num_keypoints, 2] which is the selected and updated + keypoint coordinates. + keypoint_weights: A float tensor of shape [num_instances, num_keypoints] + representing the updated weight of each keypoint. + """ + # Select the targets keypoints by their type ids and generate the mask + # of valid elements. + valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class( + keypoint_coordinates=keypoints, + class_id=self._class_id, + class_onehot=class_onehot, + class_weights=class_weights, + keypoint_indices=self._keypoint_indices) + # Keypoint coordinates in absolute coordinate system. + # The shape of the tensors: [num_instances, num_keypoints, 2]. + keypoints_absolute = keypoint_ops.to_absolute_coordinates( + keypoints, out_height, out_width) + # Assign default weights for the keypoints. + if keypoint_weights is None: + keypoint_weights = tf.ones_like(keypoints[:, :, 0]) + else: + keypoint_weights = tf.gather( + keypoint_weights, indices=self._keypoint_indices, axis=1) + keypoint_weights = keypoint_weights * valid_mask + return keypoints_absolute, keypoint_weights + + def assign_keypoint_heatmap_targets(self, + height, + width, + gt_keypoints_list, + gt_classes_list, + gt_keypoints_weights_list=None, + gt_weights_list=None, + gt_boxes_list=None): + """Returns the keypoint heatmap targets for the CenterNet model. + + Args: + height: int, height of input to the CenterNet model. This is used to + determine the height of the output. + width: int, width of the input to the CenterNet model. This is used to + determine the width of the output. + gt_keypoints_list: A list of float tensors with shape [num_instances, + num_total_keypoints, 2]. See class-level description for more detail. + gt_classes_list: A list of float tensors with shape [num_instances, + num_classes]. See class-level description for more detail. + gt_keypoints_weights_list: A list of tensors with shape [num_instances, + num_total_keypoints] corresponding to the weight of each keypoint. + gt_weights_list: A list of float tensors with shape [num_instances]. See + class-level description for more detail. + gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See + class-level description for more detail. If provided, the keypoint + standard deviations will be scaled based on the box sizes. + + Returns: + heatmap: A float tensor of shape [batch_size, output_height, output_width, + num_keypoints] representing the per keypoint type center heatmap. + output_height and output_width are computed by dividing the input height + and width by the stride specified during initialization. Note that the + "num_keypoints" is defined by the length of keypoint_indices, which is + not necessarily equal to "num_total_keypoints". + num_instances_batch: A 2D int tensor of shape + [batch_size, num_keypoints] representing number of instances for each + keypoint type. + valid_mask: A float tensor with shape [batch_size, output_height, + output_width] where all values within the regions of the blackout boxes + are 0.0 and 1.0 else where. + """ + out_width = tf.cast(width // self._stride, tf.float32) + out_height = tf.cast(height // self._stride, tf.float32) + # Compute the yx-grid to be used to generate the heatmap. Each returned + # tensor has shape of [out_height, out_width] + y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width) + + if gt_keypoints_weights_list is None: + gt_keypoints_weights_list = [None] * len(gt_keypoints_list) + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_classes_list) + if gt_boxes_list is None: + gt_boxes_list = [None] * len(gt_keypoints_list) + + heatmaps = [] + num_instances_list = [] + valid_mask_list = [] + for keypoints, classes, kp_weights, weights, boxes in zip( + gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, + gt_weights_list, gt_boxes_list): + keypoints_absolute, kp_weights = self._preprocess_keypoints_and_weights( + out_height=out_height, + out_width=out_width, + keypoints=keypoints, + class_onehot=classes, + class_weights=weights, + keypoint_weights=kp_weights) + num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) + + # A tensor of shape [num_instances, num_keypoints] with + # each element representing the type dimension for each corresponding + # keypoint: + # [[0, 1, ..., k-1], + # [0, 1, ..., k-1], + # : + # [0, 1, ..., k-1]] + keypoint_types = tf.tile( + input=tf.expand_dims(tf.range(num_keypoints), axis=0), + multiples=[num_instances, 1]) + + # A tensor of shape [num_instances, num_keypoints] with + # each element representing the sigma of the Gaussian kernel for each + # keypoint. + keypoint_std_dev = tf.tile( + input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0), + multiples=[num_instances, 1]) + + # If boxes is not None, then scale the standard deviation based on the + # size of the object bounding boxes similar to object center heatmap. + if boxes is not None: + boxes = box_list.BoxList(boxes) + # Convert the box coordinates to absolute output image dimension space. + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box height and width. Each returned tensors have the shape + # of [num_instances] + (_, _, boxes_height, + boxes_width) = boxes.get_center_coordinates_and_sizes() + + # Compute the sigma from box size. The tensor shape: [num_instances]. + sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7) + keypoint_std_dev = keypoint_std_dev * tf.stack( + [sigma] * num_keypoints, axis=1) + + # Generate the valid region mask to ignore regions with target class but + # no corresponding keypoints. + # Shape: [num_instances]. + blackout = tf.logical_and(classes[:, self._class_id] > 0, + tf.reduce_max(kp_weights, axis=1) < 1e-3) + valid_mask = ta_utils.blackout_pixel_weights_by_box_regions( + out_height, out_width, boxes.get(), blackout) + valid_mask_list.append(valid_mask) + + # Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap + # has shape of [out_height, out_width, num_keypoints]. + heatmap = ta_utils.coordinates_to_heatmap( + y_grid=y_grid, + x_grid=x_grid, + y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), + x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]), + sigma=tf.keras.backend.flatten(keypoint_std_dev), + channel_onehot=tf.one_hot( + tf.keras.backend.flatten(keypoint_types), depth=num_keypoints), + channel_weights=tf.keras.backend.flatten(kp_weights)) + num_instances_list.append( + tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32)) + heatmaps.append(heatmap) + return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0), + tf.stack(valid_mask_list, axis=0)) + + def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors): + """Gets keypoint type index tensor. + + The function prepares the tensor of keypoint indices with shape + [num_instances, num_keypoints, num_neighbors]. Each element represents the + keypoint type index for each corresponding keypoint and tiled along the 3rd + axis: + [[0, 1, ..., num_keypoints - 1], + [0, 1, ..., num_keypoints - 1], + : + [0, 1, ..., num_keypoints - 1]] + + Args: + num_instances: int, the number of instances, used to define the 1st + dimension. + num_keypoints: int, the number of keypoint types, used to define the 2nd + dimension. + num_neighbors: int, the number of neighborhood pixels to consider for each + keypoint, used to define the 3rd dimension. + + Returns: + A integer tensor of shape [num_instances, num_keypoints, num_neighbors]. + """ + keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis] + tiled_keypoint_types = tf.tile(keypoint_types, + multiples=[num_instances, 1, num_neighbors]) + return tiled_keypoint_types + + def assign_keypoints_offset_targets(self, + height, + width, + gt_keypoints_list, + gt_classes_list, + gt_keypoints_weights_list=None, + gt_weights_list=None): + """Returns the offsets and indices of the keypoints for location refinement. + + The returned values are used to refine the location of each keypoints in the + heatmap. The predicted values at the relevant indices can be retrieved with + the get_batch_predictions_from_indices function. + + Args: + height: int, height of input to the CenterNet model. This is used to + determine the height of the output. + width: int, width of the input to the CenterNet model. This is used to + determine the width of the output. + gt_keypoints_list: A list of tensors with shape [num_instances, + num_total_keypoints]. See class-level description for more detail. + gt_classes_list: A list of tensors with shape [num_instances, + num_classes]. See class-level description for more detail. + gt_keypoints_weights_list: A list of tensors with shape [num_instances, + num_total_keypoints] corresponding to the weight of each keypoint. + gt_weights_list: A list of float tensors with shape [num_instances]. See + class-level description for more detail. + + Returns: + batch_indices: an integer tensor of shape [num_total_instances, 3] (or + [num_total_instances, 4] if 'per_keypoint_offset' is set True) holding + the indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively. The fourth column corresponds to the channel + dimension (if 'per_keypoint_offset' is set True). + batch_offsets: a float tensor of shape [num_total_instances, 2] holding + the expected y and x offset of each box in the output space. + batch_weights: a float tensor of shape [num_total_instances] indicating + the weight of each prediction. + Note that num_total_instances = batch_size * num_instances * + num_keypoints * num_neighbors + """ + + batch_indices = [] + batch_offsets = [] + batch_weights = [] + + if gt_keypoints_weights_list is None: + gt_keypoints_weights_list = [None] * len(gt_keypoints_list) + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_classes_list) + for i, (keypoints, classes, kp_weights, weights) in enumerate( + zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, + gt_weights_list)): + keypoints_absolute, kp_weights = self._preprocess_keypoints_and_weights( + out_height=height // self._stride, + out_width=width // self._stride, + keypoints=keypoints, + class_onehot=classes, + class_weights=weights, + keypoint_weights=kp_weights) + num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) + + # [num_instances * num_keypoints] + y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0]) + x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1]) + + # All keypoint coordinates and their neighbors: + # [num_instance * num_keypoints, num_neighbors] + (y_source_neighbors, x_source_neighbors, + valid_sources) = ta_utils.get_surrounding_grids(height // self._stride, + width // self._stride, + y_source, x_source, + self._peak_radius) + _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( + y_source_neighbors) + + # Update the valid keypoint weights. + # [num_instance * num_keypoints, num_neighbors] + valid_keypoints = tf.cast( + valid_sources, dtype=tf.float32) * tf.stack( + [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_instances * num_keypoints, num_neighbors, 2] + # indices: [num_instances * num_keypoints, num_neighbors, 2] + offsets, indices = ta_utils.compute_floor_offsets_with_indices( + y_source=y_source_neighbors, + x_source=x_source_neighbors, + y_target=y_source, + x_target=x_source) + # Reshape to: + # offsets: [num_instances * num_keypoints * num_neighbors, 2] + # indices: [num_instances * num_keypoints * num_neighbors, 2] + offsets = tf.reshape(offsets, [-1, 2]) + indices = tf.reshape(indices, [-1, 2]) + + # Prepare the batch indices to be prepended. + batch_index = tf.fill( + [num_instances * num_keypoints * num_neighbors, 1], i) + if self._per_keypoint_offset: + tiled_keypoint_types = self._get_keypoint_types( + num_instances, num_keypoints, num_neighbors) + batch_indices.append( + tf.concat([batch_index, indices, + tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) + else: + batch_indices.append(tf.concat([batch_index, indices], axis=1)) + batch_offsets.append(offsets) + batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) + + # Concatenate the tensors in the batch in the first dimension: + # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or + # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if + # 'per_keypoint_offset' is set to True. + batch_indices = tf.concat(batch_indices, axis=0) + # shape: [batch_size * num_instances * num_keypoints * num_neighbors] + batch_weights = tf.concat(batch_weights, axis=0) + # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2] + batch_offsets = tf.concat(batch_offsets, axis=0) + return (batch_indices, batch_offsets, batch_weights) + + def assign_joint_regression_targets(self, + height, + width, + gt_keypoints_list, + gt_classes_list, + gt_boxes_list=None, + gt_keypoints_weights_list=None, + gt_weights_list=None): + """Returns the joint regression from center grid to keypoints. + + The joint regression is used as the grouping cue from the estimated + keypoints to instance center. The offsets are the vectors from the floored + object center coordinates to the keypoint coordinates. + + Args: + height: int, height of input to the CenterNet model. This is used to + determine the height of the output. + width: int, width of the input to the CenterNet model. This is used to + determine the width of the output. + gt_keypoints_list: A list of float tensors with shape [num_instances, + num_total_keypoints]. See class-level description for more detail. + gt_classes_list: A list of float tensors with shape [num_instances, + num_classes]. See class-level description for more detail. + gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See + class-level description for more detail. If provided, then the center + targets will be computed based on the center of the boxes. + gt_keypoints_weights_list: A list of float tensors with shape + [num_instances, num_total_keypoints] representing to the weight of each + keypoint. + gt_weights_list: A list of float tensors with shape [num_instances]. See + class-level description for more detail. + + Returns: + batch_indices: an integer tensor of shape [num_instances, 4] holding the + indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively, the last dimension refers to the keypoint type + dimension. + batch_offsets: a float tensor of shape [num_instances, 2] holding the + expected y and x offset of each box in the output space. + batch_weights: a float tensor of shape [num_instances] indicating the + weight of each prediction. + Note that num_total_instances = batch_size * num_instances * num_keypoints + + Raises: + NotImplementedError: currently the object center coordinates need to be + computed from groundtruth bounding boxes. The functionality of + generating the object center coordinates from keypoints is not + implemented yet. + """ + + batch_indices = [] + batch_offsets = [] + batch_weights = [] + batch_size = len(gt_keypoints_list) + if gt_keypoints_weights_list is None: + gt_keypoints_weights_list = [None] * batch_size + if gt_boxes_list is None: + gt_boxes_list = [None] * batch_size + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_classes_list) + for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate( + zip(gt_keypoints_list, gt_classes_list, + gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)): + keypoints_absolute, kp_weights = self._preprocess_keypoints_and_weights( + out_height=height // self._stride, + out_width=width // self._stride, + keypoints=keypoints, + class_onehot=classes, + class_weights=weights, + keypoint_weights=kp_weights) + num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) + + # If boxes are provided, compute the joint center from it. + if boxes is not None: + # Compute joint center from boxes. + boxes = box_list.BoxList(boxes) + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes() + else: + # TODO(yuhuic): Add the logic to generate object centers from keypoints. + raise NotImplementedError(( + 'The functionality of generating object centers from keypoints is' + ' not implemented yet. Please provide groundtruth bounding boxes.' + )) + + # Tile the yx center coordinates to be the same shape as keypoints. + y_center_tiled = tf.tile( + tf.reshape(y_center, shape=[num_instances, 1]), + multiples=[1, num_keypoints]) + x_center_tiled = tf.tile( + tf.reshape(x_center, shape=[num_instances, 1]), + multiples=[1, num_keypoints]) + # [num_instance * num_keypoints, num_neighbors] + (y_source_neighbors, x_source_neighbors, + valid_sources) = ta_utils.get_surrounding_grids( + height // self._stride, width // self._stride, + tf.keras.backend.flatten(y_center_tiled), + tf.keras.backend.flatten(x_center_tiled), self._peak_radius) + + _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( + y_source_neighbors) + valid_keypoints = tf.cast( + valid_sources, dtype=tf.float32) * tf.stack( + [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_instances * num_keypoints, 2] + # indices: [num_instances * num_keypoints, 2] + (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( + y_source=y_source_neighbors, + x_source=x_source_neighbors, + y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), + x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1])) + # Reshape to: + # offsets: [num_instances * num_keypoints * num_neighbors, 2] + # indices: [num_instances * num_keypoints * num_neighbors, 2] + offsets = tf.reshape(offsets, [-1, 2]) + indices = tf.reshape(indices, [-1, 2]) + + # keypoint type tensor: [num_instances, num_keypoints, num_neighbors]. + tiled_keypoint_types = self._get_keypoint_types( + num_instances, num_keypoints, num_neighbors) + + batch_index = tf.fill( + [num_instances * num_keypoints * num_neighbors, 1], i) + batch_indices.append( + tf.concat([batch_index, indices, + tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) + batch_offsets.append(offsets) + batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) + + # Concatenate the tensors in the batch in the first dimension: + # shape: [batch_size * num_instances * num_keypoints, 4] + batch_indices = tf.concat(batch_indices, axis=0) + # shape: [batch_size * num_instances * num_keypoints] + batch_weights = tf.concat(batch_weights, axis=0) + # shape: [batch_size * num_instances * num_keypoints, 2] + batch_offsets = tf.concat(batch_offsets, axis=0) + return (batch_indices, batch_offsets, batch_weights) + + +class CenterNetMaskTargetAssigner(object): + """Wrapper to compute targets for segmentation masks.""" + + def __init__(self, stride): + self._stride = stride + + def assign_segmentation_targets( + self, gt_masks_list, gt_classes_list, + mask_resize_method=ResizeMethod.BILINEAR): + """Computes the segmentation targets. + + This utility produces a semantic segmentation mask for each class, starting + with whole image instance segmentation masks. Effectively, each per-class + segmentation target is the union of all masks from that class. + + Args: + gt_masks_list: A list of float tensors with shape [num_boxes, + input_height, input_width] with values in {0, 1} representing instance + masks for each object. + gt_classes_list: A list of float tensors with shape [num_boxes, + num_classes] representing the one-hot encoded class labels for each box + in the gt_boxes_list. + mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use + when resizing masks from input resolution to output resolution. + + Returns: + segmentation_targets: An int32 tensor of size [batch_size, output_height, + output_width, num_classes] representing the class of each location in + the output space. + """ + # TODO(ronnyvotel): Handle groundtruth weights. + _, num_classes = shape_utils.combined_static_and_dynamic_shape( + gt_classes_list[0]) + + _, input_height, input_width = ( + shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) + output_height = input_height // self._stride + output_width = input_width // self._stride + + segmentation_targets_list = [] + for gt_masks, gt_classes in zip(gt_masks_list, gt_classes_list): + # Resize segmentation masks to conform to output dimensions. Use TF2 + # image resize because TF1's version is buggy: + # https://yaqs.corp.google.com/eng/q/4970450458378240 + gt_masks = tf2.image.resize( + gt_masks[:, :, :, tf.newaxis], + size=(output_height, output_width), + method=mask_resize_method) + gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes]) + # Shape: [h, w, num_classes]. + segmentations_for_image = tf.reduce_max( + gt_masks * gt_classes_reshaped, axis=0) + segmentation_targets_list.append(segmentations_for_image) + + segmentation_target = tf.stack(segmentation_targets_list, axis=0) + return segmentation_target diff --git a/models/research/object_detection/core/target_assigner_test.py b/models/research/object_detection/core/target_assigner_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0ca43e558beac27076bce02ae8f62d6624d6f7 --- /dev/null +++ b/models/research/object_detection/core/target_assigner_test.py @@ -0,0 +1,1911 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.target_assigner.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import keypoint_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_list +from object_detection.core import region_similarity_calculator +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner as targetassigner +from object_detection.matchers import argmax_matcher +from object_detection.utils import np_box_ops +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +class TargetAssignerTest(test_case.TestCase): + + def test_assign_agnostic(self): + def graph_fn(anchor_means, groundtruth_box_corners): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9]], + dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [1]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_class_agnostic_with_ignored_matches(self): + # Note: test is very similar to above. The third box matched with an IOU + # of 0.35, which is between the matched and unmatched threshold. This means + # That like above the expected classification targets are [1, 1, 0]. + # Unlike above, the third target is ignored and therefore expected + # classification weights are [1, 1, 0]. + def graph_fn(anchor_means, groundtruth_box_corners): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.3) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0.0, 0.5, .9, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [0]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_agnostic_with_keypoints(self): + def graph_fn(anchor_means, groundtruth_box_corners, + groundtruth_keypoints): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, + groundtruth_keypoints) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, .9, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.45, 0.45, 0.95, 0.95]], + dtype=np.float32) + groundtruth_keypoints = np.array( + [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], + [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], + dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [1]] + exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, + -5], + [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, + -11, -7], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [anchor_means, + groundtruth_box_corners, + groundtruth_keypoints]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self): + # Note: test is very similar to above. The third box matched with an IOU + # of 0.35, which is between the matched and unmatched threshold. This means + # That like above the expected classification targets are [1, 1, 0]. + # Unlike above, the third target is ignored and therefore expected + # classification weights are [1, 1, 0]. + def graph_fn(anchor_means, groundtruth_box_corners, + groundtruth_keypoints): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, + groundtruth_keypoints) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, .9, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.45, 0.45, 0.95, 0.95]], + dtype=np.float32) + groundtruth_keypoints = np.array( + [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], + [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], + dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [1]] + exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, + -5], + [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, + -11, -7], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [anchor_means, + groundtruth_box_corners, + groundtruth_keypoints]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_multiclass(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]], dtype=np.float32) + groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) + + exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0]] + exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0], + [0, 0, -.5, .2]] + exp_reg_weights = [1, 1, 0, 1] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_multiclass_with_groundtruth_weights(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels, + groundtruth_weights): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label, + groundtruth_weights=groundtruth_weights) + (_, cls_weights, _, reg_weights, _) = result + return (cls_weights, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]], dtype=np.float32) + groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) + groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32) + + # background class gets weight of 1. + exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3], + [0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1], + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]] + exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0. + + (cls_weights_out, reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_box_corners, groundtruth_labels, + groundtruth_weights + ]) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_assign_multidimensional_class_targets(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + + unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]], dtype=np.float32) + + groundtruth_labels = np.array([[[0, 1], [1, 0]], + [[1, 0], [0, 1]], + [[0, 1], [1, .5]]], np.float32) + + exp_cls_targets = [[[0, 1], [1, 0]], + [[1, 0], [0, 1]], + [[0, 0], [0, 0]], + [[0, 1], [1, .5]]] + exp_cls_weights = [[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0], + [0, 0, -.5, .2]] + exp_reg_weights = [1, 1, 0, 1] + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_empty_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + unmatched_class_label = tf.constant([0, 0, 0], tf.float32) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) + groundtruth_labels = np.zeros((0, 3), dtype=np.float32) + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], + dtype=np.float32) + exp_cls_targets = [[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]] + exp_cls_weights = [[1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + exp_reg_weights = [0, 0, 0, 0] + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self): + similarity_calc = region_similarity_calculator.NegSqDistSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder() + unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]]) + priors = box_list.BoxList(prior_means) + + box_corners = [[0.0, 0.0, 0.5, 0.5], + [0.0, 0.0, 0.5, 0.8], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]] + boxes = box_list.BoxList(tf.constant(box_corners)) + + groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0]], tf.float32) + with self.assertRaisesRegexp(ValueError, 'Unequal shapes'): + target_assigner.assign( + priors, + boxes, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + + def test_raises_error_on_invalid_groundtruth_labels(self): + similarity_calc = region_similarity_calculator.NegSqDistSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0) + unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]]) + priors = box_list.BoxList(prior_means) + + box_corners = [[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]] + boxes = box_list.BoxList(tf.constant(box_corners)) + groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32) + + with self.assertRaises(ValueError): + target_assigner.assign( + priors, + boxes, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + + +class BatchTargetAssignerTest(test_case.TestCase): + + def _get_target_assigner(self): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) + + def test_batch_assign_targets(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [None, None] + anchors_boxlist = box_list.BoxList(anchor_means) + agnostic_target_assigner = self._get_target_assigner() + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + agnostic_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[1], [0], [0], [0]], + [[0], [1], [1], [0]]] + exp_cls_weights = [[[1], [1], [1], [1]], + [[1], [1], [1], [1]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_multiclass_targets(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets, unmatched_class_label) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, 1, 0]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_multiclass_targets_with_padded_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [class_targets1, class_targets2] + gt_weights = [groundtruth_weights1, groundtruth_weights2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets, unmatched_class_label, gt_weights) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], + [0., 0., 0., 0.]], dtype=np.float32) + groundtruth_weights1 = np.array([1, 0], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842], + [0, 0, 0, 0]], + dtype=np.float32) + groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_multidimensional_targets(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + target_dimensions = (2, 3) + unmatched_class_label = tf.constant(np.zeros(target_dimensions), + tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets, unmatched_class_label) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[[0, 1, 1], + [1, 1, 0]]], dtype=np.float32) + class_targets2 = np.array([[[0, 1, 1], + [1, 1, 0]], + [[0, 0, 1], + [0, 0, 1]]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[[0., 1., 1.], + [1., 1., 0.]], + [[0., 0., 0.], + [0., 0., 0.]], + [[0., 0., 0.], + [0., 0., 0.]], + [[0., 0., 0.], + [0., 0., 0.]]], + [[[0., 0., 0.], + [0., 0., 0.]], + [[0., 1., 1.], + [1., 1., 0.]], + [[0., 0., 1.], + [0., 0., 1.]], + [[0., 0., 0.], + [0., 0., 0.]]]] + exp_cls_weights = [[[[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]]], + [[[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_empty_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets): + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + gt_box_batch = [groundtruth_boxlist] + gt_class_targets_batch = [gt_class_targets] + anchors_boxlist = box_list.BoxList(anchor_means) + + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, + gt_box_batch, gt_class_targets_batch, unmatched_class_label) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1]], dtype=np.float32) + exp_cls_targets = [[[1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 1, 1], + [1, 1, 1, 1]]] + exp_reg_targets = [[[0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[0, 0]] + num_classes = 3 + pad = 1 + gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32) + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + +class BatchGetTargetsTest(test_case.TestCase): + + def test_scalar_targets(self): + batch_match = np.array([[1, 0, 1], + [-2, -1, 1]], dtype=np.int32) + groundtruth_tensors_list = np.array([[11, 12], [13, 14]], dtype=np.int32) + groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]], + dtype=np.float32) + unmatched_value = np.array(99, dtype=np.int32) + unmatched_weight = np.array(0.0, dtype=np.float32) + + def graph_fn(batch_match, groundtruth_tensors_list, + groundtruth_weights_list, unmatched_value, unmatched_weight): + targets, weights = targetassigner.batch_get_targets( + batch_match, tf.unstack(groundtruth_tensors_list), + tf.unstack(groundtruth_weights_list), + unmatched_value, unmatched_weight) + return (targets, weights) + + (targets_np, weights_np) = self.execute(graph_fn, [ + batch_match, groundtruth_tensors_list, groundtruth_weights_list, + unmatched_value, unmatched_weight + ]) + self.assertAllEqual([[12, 11, 12], + [99, 99, 14]], targets_np) + self.assertAllClose([[1.0, 1.0, 1.0], + [0.0, 0.0, 0.5]], weights_np) + + def test_1d_targets(self): + batch_match = np.array([[1, 0, 1], + [-2, -1, 1]], dtype=np.int32) + groundtruth_tensors_list = np.array([[[11, 12], [12, 13]], + [[13, 14], [14, 15]]], + dtype=np.float32) + groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]], + dtype=np.float32) + unmatched_value = np.array([99, 99], dtype=np.float32) + unmatched_weight = np.array(0.0, dtype=np.float32) + + def graph_fn(batch_match, groundtruth_tensors_list, + groundtruth_weights_list, unmatched_value, unmatched_weight): + targets, weights = targetassigner.batch_get_targets( + batch_match, tf.unstack(groundtruth_tensors_list), + tf.unstack(groundtruth_weights_list), + unmatched_value, unmatched_weight) + return (targets, weights) + + (targets_np, weights_np) = self.execute(graph_fn, [ + batch_match, groundtruth_tensors_list, groundtruth_weights_list, + unmatched_value, unmatched_weight + ]) + self.assertAllClose([[[12, 13], [11, 12], [12, 13]], + [[99, 99], [99, 99], [14, 15]]], targets_np) + self.assertAllClose([[1.0, 1.0, 1.0], + [0.0, 0.0, 0.5]], weights_np) + + +class BatchTargetAssignConfidencesTest(test_case.TestCase): + + def _get_target_assigner(self): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) + + def test_batch_assign_empty_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences): + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + gt_box_batch = [groundtruth_boxlist] + gt_class_confidences_batch = [gt_class_confidences] + anchors_boxlist = box_list.BoxList(anchor_means) + + num_classes = 3 + implicit_class_weight = 0.5 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + multiclass_target_assigner = self._get_target_assigner() + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1]], dtype=np.float32) + num_classes = 3 + pad = 1 + gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32) + + exp_cls_targets = [[[1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5]]] + exp_reg_targets = [[[0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[0, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, + [anchor_means, groundtruth_box_corners, gt_class_confidences]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_agnostic(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [None, None] + anchors_boxlist = box_list.BoxList(anchor_means) + agnostic_target_assigner = self._get_target_assigner() + implicit_class_weight = 0.5 + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + agnostic_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + include_background_class=False, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[1], [0], [0], [0]], + [[0], [1], [1], [0]]] + exp_cls_weights = [[[1], [0.5], [0.5], [0.5]], + [[0.5], [1], [1], [0.5]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_multiclass(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + implicit_class_weight = 0.5 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, -1, 0]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5]], + [[0.5, 0.5, 0.5, 0.5], + [1, 0.5, 0.5, 1], + [0.5, 0.5, 1, 0.5], + [0.5, 0.5, 0.5, 0.5]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 0, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [class_targets1, class_targets2] + gt_weights = [groundtruth_weights1, groundtruth_weights2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + implicit_class_weight = 0.5 + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + gt_weights, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], + [0., 0., 0., 0.]], dtype=np.float32) + groundtruth_weights1 = np.array([1, 0], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842], + [0, 0, 0, 0]], + dtype=np.float32) + groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, -1, 0], + [0, 0, 0, 0]], dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5]], + [[0.5, 0.5, 0.5, 0.5], + [1, 0.5, 0.5, 1], + [0.5, 0.5, 1, 0.5], + [0.5, 0.5, 0.5, 0.5]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 0, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_multidimensional(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + target_dimensions = (2, 3) + unmatched_class_label = tf.constant(np.zeros(target_dimensions), + tf.float32) + implicit_class_weight = 0.5 + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, 1, 0]], dtype=np.float32) + class_targets1 = np.array([[[0, 1, 1], + [1, 1, 0]]], dtype=np.float32) + class_targets2 = np.array([[[0, 1, 1], + [1, 1, 0]], + [[0, 0, 1], + [0, 0, 1]]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + with self.assertRaises(ValueError): + _, _, _, _ = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + + +class CreateTargetAssignerTest(test_case.TestCase): + + def test_create_target_assigner(self): + """Tests that named constructor gives working target assigners. + + TODO(rathodv): Make this test more general. + """ + corners = [[0.0, 0.0, 1.0, 1.0]] + groundtruth = box_list.BoxList(tf.constant(corners)) + + priors = box_list.BoxList(tf.constant(corners)) + if tf_version.is_tf1(): + multibox_ta = (targetassigner + .create_target_assigner('Multibox', stage='proposal')) + multibox_ta.assign(priors, groundtruth) + # No tests on output, as that may vary arbitrarily as new target assigners + # are added. As long as it is constructed correctly and runs without errors, + # tests on the individual assigners cover correctness of the assignments. + + anchors = box_list.BoxList(tf.constant(corners)) + faster_rcnn_proposals_ta = (targetassigner + .create_target_assigner('FasterRCNN', + stage='proposal')) + faster_rcnn_proposals_ta.assign(anchors, groundtruth) + + fast_rcnn_ta = (targetassigner + .create_target_assigner('FastRCNN')) + fast_rcnn_ta.assign(anchors, groundtruth) + + faster_rcnn_detection_ta = (targetassigner + .create_target_assigner('FasterRCNN', + stage='detection')) + faster_rcnn_detection_ta.assign(anchors, groundtruth) + + with self.assertRaises(ValueError): + targetassigner.create_target_assigner('InvalidDetector', + stage='invalid_stage') + + +def _array_argmax(array): + return np.unravel_index(np.argmax(array), array.shape) + + +class CenterNetCenterHeatmapTargetAssignerTest(test_case.TestCase): + + def setUp(self): + super(CenterNetCenterHeatmapTargetAssignerTest, self).setUp() + + self._box_center = [0.0, 0.0, 1.0, 1.0] + self._box_center_small = [0.25, 0.25, 0.75, 0.75] + self._box_lower_left = [0.5, 0.0, 1.0, 0.5] + self._box_center_offset = [0.1, 0.05, 1.0, 1.0] + self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] + + def test_center_location(self): + """Test that the centers are at the correct location.""" + def graph_fn(): + box_batch = [tf.constant([self._box_center, self._box_lower_left])] + classes = [ + tf.one_hot([0, 1], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0])) + self.assertAlmostEqual(1.0, targets[0, 10, 10, 0]) + self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1])) + self.assertAlmostEqual(1.0, targets[0, 15, 5, 1]) + + def test_center_batch_shape(self): + """Test that the shape of the target for a batch is correct.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center]), + tf.constant([self._box_center_small]), + ] + classes = [ + tf.one_hot([0, 1], depth=4), + tf.one_hot([2], depth=4), + tf.one_hot([3], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + self.assertEqual((3, 20, 20, 4), targets.shape) + + def test_center_overlap_maximum(self): + """Test that when boxes overlap we, are computing the maximum.""" + def graph_fn(): + box_batch = [ + tf.constant([ + self._box_center, self._box_center_offset, self._box_center, + self._box_center_offset + ]) + ] + classes = [ + tf.one_hot([0, 0, 1, 2], depth=4), + ] + + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + class0_targets = targets[0, :, :, 0] + class1_targets = targets[0, :, :, 1] + class2_targets = targets[0, :, :, 2] + np.testing.assert_allclose(class0_targets, + np.maximum(class1_targets, class2_targets)) + + def test_size_blur(self): + """Test that the heatmap of a larger box is more blurred.""" + def graph_fn(): + box_batch = [tf.constant([self._box_center, self._box_center_small])] + + classes = [ + tf.one_hot([0, 1], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + self.assertGreater( + np.count_nonzero(targets[:, :, :, 0]), + np.count_nonzero(targets[:, :, :, 1])) + + def test_weights(self): + """Test that the weights correctly ignore ground truth.""" + def graph1_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center]), + tf.constant([self._box_center_small]), + ] + classes = [ + tf.one_hot([0, 1], depth=4), + tf.one_hot([2], depth=4), + tf.one_hot([3], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + + targets = self.execute(graph1_fn, []) + self.assertAlmostEqual(1.0, targets[0, :, :, 0].max()) + self.assertAlmostEqual(1.0, targets[0, :, :, 1].max()) + self.assertAlmostEqual(1.0, targets[1, :, :, 2].max()) + self.assertAlmostEqual(1.0, targets[2, :, :, 3].max()) + self.assertAlmostEqual(0.0, targets[0, :, :, [2, 3]].max()) + self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max()) + self.assertAlmostEqual(0.0, targets[2, :, :, :3].max()) + + def graph2_fn(): + weights = [ + tf.constant([0., 1.]), + tf.constant([1.]), + tf.constant([1.]), + ] + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center]), + tf.constant([self._box_center_small]), + ] + classes = [ + tf.one_hot([0, 1], depth=4), + tf.one_hot([2], depth=4), + tf.one_hot([3], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes, + weights) + return targets + targets = self.execute(graph2_fn, []) + self.assertAlmostEqual(1.0, targets[0, :, :, 1].max()) + self.assertAlmostEqual(1.0, targets[1, :, :, 2].max()) + self.assertAlmostEqual(1.0, targets[2, :, :, 3].max()) + self.assertAlmostEqual(0.0, targets[0, :, :, [0, 2, 3]].max()) + self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max()) + self.assertAlmostEqual(0.0, targets[2, :, :, :3].max()) + + def test_low_overlap(self): + def graph1_fn(): + box_batch = [tf.constant([self._box_center])] + classes = [ + tf.one_hot([0], depth=2), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.1) + targets_low_overlap = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets_low_overlap + targets_low_overlap = self.execute(graph1_fn, []) + self.assertLess(1, np.count_nonzero(targets_low_overlap)) + + def graph2_fn(): + box_batch = [tf.constant([self._box_center])] + classes = [ + tf.one_hot([0], depth=2), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.6) + targets_medium_overlap = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets_medium_overlap + targets_medium_overlap = self.execute(graph2_fn, []) + self.assertLess(1, np.count_nonzero(targets_medium_overlap)) + + def graph3_fn(): + box_batch = [tf.constant([self._box_center])] + classes = [ + tf.one_hot([0], depth=2), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.99) + targets_high_overlap = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets_high_overlap + + targets_high_overlap = self.execute(graph3_fn, []) + self.assertTrue(np.all(targets_low_overlap >= targets_medium_overlap)) + self.assertTrue(np.all(targets_medium_overlap >= targets_high_overlap)) + + def test_empty_box_list(self): + """Test that an empty box list gives an all 0 heatmap.""" + def graph_fn(): + box_batch = [ + tf.zeros((0, 4), dtype=tf.float32), + ] + + classes = [ + tf.zeros((0, 5), dtype=tf.float32), + ] + + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.1) + targets = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets + targets = self.execute(graph_fn, []) + np.testing.assert_allclose(targets, 0.) + + +class CenterNetBoxTargetAssignerTest(test_case.TestCase): + + def setUp(self): + super(CenterNetBoxTargetAssignerTest, self).setUp() + self._box_center = [0.0, 0.0, 1.0, 1.0] + self._box_center_small = [0.25, 0.25, 0.75, 0.75] + self._box_lower_left = [0.5, 0.0, 1.0, 0.5] + self._box_center_offset = [0.1, 0.05, 1.0, 1.0] + self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] + + def test_max_distance_for_overlap(self): + """Test that the distance ensures the IoU with random boxes.""" + + # TODO(vighneshb) remove this after the `_smallest_positive_root` + # function if fixed. + self.skipTest(('Skipping test because we are using an incorrect version of' + 'the `max_distance_for_overlap` function to reproduce' + ' results.')) + + rng = np.random.RandomState(0) + n_samples = 100 + + width = rng.uniform(1, 100, size=n_samples) + height = rng.uniform(1, 100, size=n_samples) + min_iou = rng.uniform(0.1, 1.0, size=n_samples) + + def graph_fn(): + max_dist = targetassigner.max_distance_for_overlap(height, width, min_iou) + return max_dist + max_dist = self.execute(graph_fn, []) + xmin1 = np.zeros(n_samples) + ymin1 = np.zeros(n_samples) + xmax1 = np.zeros(n_samples) + width + ymax1 = np.zeros(n_samples) + height + + xmin2 = max_dist * np.cos(rng.uniform(0, 2 * np.pi)) + ymin2 = max_dist * np.sin(rng.uniform(0, 2 * np.pi)) + xmax2 = width + max_dist * np.cos(rng.uniform(0, 2 * np.pi)) + ymax2 = height + max_dist * np.sin(rng.uniform(0, 2 * np.pi)) + + boxes1 = np.vstack([ymin1, xmin1, ymax1, xmax1]).T + boxes2 = np.vstack([ymin2, xmin2, ymax2, xmax2]).T + + iou = np.diag(np_box_ops.iou(boxes1, boxes2)) + + self.assertTrue(np.all(iou >= min_iou)) + + def test_max_distance_for_overlap_centernet(self): + """Test the version of the function used in the CenterNet paper.""" + + def graph_fn(): + distance = targetassigner.max_distance_for_overlap(10, 5, 0.5) + return distance + distance = self.execute(graph_fn, []) + self.assertAlmostEqual(2.807764064, distance) + + def test_assign_size_and_offset_targets(self): + """Test the assign_size_and_offset_targets function.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center_offset]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + assigner = targetassigner.CenterNetBoxTargetAssigner(4) + indices, hw, yx_offset, weights = assigner.assign_size_and_offset_targets( + 80, 80, box_batch) + return indices, hw, yx_offset, weights + indices, hw, yx_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (5, 3)) + self.assertEqual(hw.shape, (5, 2)) + self.assertEqual(yx_offset.shape, (5, 2)) + self.assertEqual(weights.shape, (5,)) + np.testing.assert_array_equal( + indices, + [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) + np.testing.assert_array_equal( + hw, [[20, 20], [10, 10], [18, 19], [10, 10], [8, 15]]) + np.testing.assert_array_equal( + yx_offset, [[0, 0], [0, 0], [0, 0.5], [0, 0], [0.25, 0.75]]) + np.testing.assert_array_equal(weights, 1) + + def test_assign_size_and_offset_targets_weights(self): + """Test the assign_size_and_offset_targets function with box weights.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_lower_left, self._box_center_small]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4) + weights_batch = [ + tf.constant([0.0, 1.0]), + tf.constant([1.0, 1.0]), + tf.constant([0.0, 0.0]) + ] + indices, hw, yx_offset, weights = cn_assigner.assign_size_and_offset_targets( + 80, 80, box_batch, weights_batch) + return indices, hw, yx_offset, weights + indices, hw, yx_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (6, 3)) + self.assertEqual(hw.shape, (6, 2)) + self.assertEqual(yx_offset.shape, (6, 2)) + self.assertEqual(weights.shape, (6,)) + np.testing.assert_array_equal(indices, + [[0, 10, 10], [0, 15, 5], [1, 15, 5], + [1, 10, 10], [2, 10, 10], [2, 7, 11]]) + np.testing.assert_array_equal( + hw, [[20, 20], [10, 10], [10, 10], [10, 10], [10, 10], [8, 15]]) + np.testing.assert_array_equal( + yx_offset, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0.25, 0.75]]) + np.testing.assert_array_equal(weights, [0, 1, 1, 1, 0, 0]) + + def test_get_batch_predictions_from_indices(self): + """Test the get_batch_predictions_from_indices function. + + This test verifies that the indices returned by + assign_size_and_offset_targets function work as expected with a predicted + tensor. + + """ + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + pred_array = np.ones((2, 40, 20, 2), dtype=np.int32) * -1000 + pred_array[0, 20, 10] = [1, 2] + pred_array[0, 30, 5] = [3, 4] + pred_array[1, 20, 10] = [5, 6] + pred_array[1, 14, 11] = [7, 8] + + pred_tensor = tf.constant(pred_array) + + cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4) + indices, _, _, _ = cn_assigner.assign_size_and_offset_targets( + 160, 80, box_batch) + + preds = targetassigner.get_batch_predictions_from_indices( + pred_tensor, indices) + return preds + preds = self.execute(graph_fn, []) + np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]]) + + +class CenterNetKeypointTargetAssignerTest(test_case.TestCase): + + def test_keypoint_heatmap_targets(self): + def graph_fn(): + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 1.0], + [0.4, 0.1, 0.4, 0.2, 0.1], + [float('nan'), 0.1, 0.5, 0.7, 0.6]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + gt_boxes_list = [ + tf.constant( + np.array([[0.0, 0.0, 0.3, 0.3], + [0.0, 0.0, 0.5, 0.5], + [0.0, 0.0, 0.5, 0.5], + [0.0, 0.0, 1.0, 1.0]]), + dtype=tf.float32) + ] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2]) + (targets, num_instances_batch, + valid_mask) = cn_assigner.assign_keypoint_heatmap_targets( + 120, + 80, + gt_keypoints_list, + gt_classes_list, + gt_boxes_list=gt_boxes_list) + return targets, num_instances_batch, valid_mask + + targets, num_instances_batch, valid_mask = self.execute(graph_fn, []) + # keypoint (0.5, 0.5) is selected. The peak is expected to appear at the + # center of the image. + self.assertEqual((15, 10), _array_argmax(targets[0, :, :, 1])) + self.assertAlmostEqual(1.0, targets[0, 15, 10, 1]) + # No peak for the first class since NaN is selected. + self.assertAlmostEqual(0.0, targets[0, 15, 10, 0]) + # Verify the output heatmap shape. + self.assertAllEqual([1, 30, 20, 2], targets.shape) + # Verify the number of instances is correct. + np.testing.assert_array_almost_equal([[0, 1]], + num_instances_batch) + # When calling the function, we specify the class id to be 1 (1th and 3rd) + # instance and the keypoint indices to be [0, 2], meaning that the 1st + # instance is the target class with no valid keypoints in it. As a result, + # the region of the 1st instance boxing box should be blacked out + # (0.0, 0.0, 0.5, 0.5), transfering to (0, 0, 15, 10) in absolute output + # space. + self.assertAlmostEqual(np.sum(valid_mask[:, 0:16, 0:11]), 0.0) + # All other values are 1.0 so the sum is: 30 * 20 - 16 * 11 = 424. + self.assertAlmostEqual(np.sum(valid_mask), 424.0) + + def test_assign_keypoints_offset_targets(self): + def graph_fn(): + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2]) + (indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list) + return indices, weights, offsets + indices, weights, offsets = self.execute(graph_fn, []) + # Only the last element has positive weight. + np.testing.assert_array_almost_equal( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights) + # Validate the last element's indices and offsets. + np.testing.assert_array_equal([0, 3, 2], indices[7, :]) + np.testing.assert_array_almost_equal([0.6, 0.4], offsets[7, :]) + + def test_assign_keypoints_offset_targets_radius(self): + def graph_fn(): + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2], + peak_radius=1, + per_keypoint_offset=True) + (indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list) + return indices, weights, offsets + indices, weights, offsets = self.execute(graph_fn, []) + + # There are total 8 * 5 (neighbors) = 40 targets. + self.assertAllEqual(indices.shape, [40, 4]) + self.assertAllEqual(offsets.shape, [40, 2]) + self.assertAllEqual(weights.shape, [40]) + # Only the last 5 (radius 1 generates 5 valid points) element has positive + # weight. + np.testing.assert_array_almost_equal([ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], weights) + # Validate the last element's (with neighbors) indices and offsets. + np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :]) + np.testing.assert_array_equal([0, 3, 1, 1], indices[36, :]) + np.testing.assert_array_equal([0, 3, 2, 1], indices[37, :]) + np.testing.assert_array_equal([0, 3, 3, 1], indices[38, :]) + np.testing.assert_array_equal([0, 4, 2, 1], indices[39, :]) + np.testing.assert_array_almost_equal([1.6, 0.4], offsets[35, :]) + np.testing.assert_array_almost_equal([0.6, 1.4], offsets[36, :]) + np.testing.assert_array_almost_equal([0.6, 0.4], offsets[37, :]) + np.testing.assert_array_almost_equal([0.6, -0.6], offsets[38, :]) + np.testing.assert_array_almost_equal([-0.4, 0.4], offsets[39, :]) + + def test_assign_joint_regression_targets(self): + def graph_fn(): + gt_boxes_list = [ + tf.constant( + np.array([[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0]]), + dtype=tf.float32) + ] + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2]) + (indices, offsets, weights) = cn_assigner.assign_joint_regression_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list, + gt_boxes_list=gt_boxes_list) + return indices, offsets, weights + indices, offsets, weights = self.execute(graph_fn, []) + np.testing.assert_array_almost_equal( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights) + np.testing.assert_array_equal([0, 15, 10, 1], indices[7, :]) + np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[7, :]) + + def test_assign_joint_regression_targets_radius(self): + def graph_fn(): + gt_boxes_list = [ + tf.constant( + np.array([[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0]]), + dtype=tf.float32) + ] + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2], + peak_radius=1) + (indices, offsets, weights) = cn_assigner.assign_joint_regression_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list, + gt_boxes_list=gt_boxes_list) + return indices, offsets, weights + indices, offsets, weights = self.execute(graph_fn, []) + + # There are total 8 * 5 (neighbors) = 40 targets. + self.assertAllEqual(indices.shape, [40, 4]) + self.assertAllEqual(offsets.shape, [40, 2]) + self.assertAllEqual(weights.shape, [40]) + # Only the last 5 (radius 1 generates 5 valid points) element has positive + # weight. + np.testing.assert_array_almost_equal([ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], weights) + # Test the values of the indices and offsets of the last 5 elements. + np.testing.assert_array_equal([0, 14, 10, 1], indices[35, :]) + np.testing.assert_array_equal([0, 15, 9, 1], indices[36, :]) + np.testing.assert_array_equal([0, 15, 10, 1], indices[37, :]) + np.testing.assert_array_equal([0, 15, 11, 1], indices[38, :]) + np.testing.assert_array_equal([0, 16, 10, 1], indices[39, :]) + np.testing.assert_array_almost_equal([-10.4, -7.6], offsets[35, :]) + np.testing.assert_array_almost_equal([-11.4, -6.6], offsets[36, :]) + np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[37, :]) + np.testing.assert_array_almost_equal([-11.4, -8.6], offsets[38, :]) + np.testing.assert_array_almost_equal([-12.4, -7.6], offsets[39, :]) + + +class CenterNetMaskTargetAssignerTest(test_case.TestCase): + + def test_assign_segmentation_targets(self): + def graph_fn(): + gt_masks_list = [ + # Example 0. + tf.constant([ + [ + [1., 0., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + ], + [ + [0., 0., 0., 0.], + [0., 0., 0., 1.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + ], + [ + [1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.], + ] + ], dtype=tf.float32), + # Example 1. + tf.constant([ + [ + [1., 1., 0., 1.], + [1., 1., 1., 1.], + [0., 0., 1., 1.], + [0., 0., 0., 1.], + ], + [ + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [1., 1., 0., 0.], + [1., 1., 0., 0.], + ], + ], dtype=tf.float32), + ] + gt_classes_list = [ + # Example 0. + tf.constant([[1., 0., 0.], + [0., 1., 0.], + [1., 0., 0.]], dtype=tf.float32), + # Example 1. + tf.constant([[0., 1., 0.], + [0., 1., 0.]], dtype=tf.float32) + ] + cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=2) + segmentation_target = cn_assigner.assign_segmentation_targets( + gt_masks_list=gt_masks_list, + gt_classes_list=gt_classes_list, + mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR) + return segmentation_target + segmentation_target = self.execute(graph_fn, []) + + expected_seg_target = np.array([ + # Example 0 [[class 0, class 1], [background, class 0]] + [[[1, 0, 0], [0, 1, 0]], + [[0, 0, 0], [1, 0, 0]]], + # Example 1 [[class 1, class 1], [class 1, class 1]] + [[[0, 1, 0], [0, 1, 0]], + [[0, 1, 0], [0, 1, 0]]], + ], dtype=np.float32) + np.testing.assert_array_almost_equal( + expected_seg_target, segmentation_target) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/models/research/object_detection/data/ava_label_map_v2.1.pbtxt b/models/research/object_detection/data/ava_label_map_v2.1.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..5e2c485682830919a09300ac851e6b0e4bdf3efb --- /dev/null +++ b/models/research/object_detection/data/ava_label_map_v2.1.pbtxt @@ -0,0 +1,240 @@ +item { + name: "bend/bow (at the waist)" + id: 1 +} +item { + name: "crouch/kneel" + id: 3 +} +item { + name: "dance" + id: 4 +} +item { + name: "fall down" + id: 5 +} +item { + name: "get up" + id: 6 +} +item { + name: "jump/leap" + id: 7 +} +item { + name: "lie/sleep" + id: 8 +} +item { + name: "martial art" + id: 9 +} +item { + name: "run/jog" + id: 10 +} +item { + name: "sit" + id: 11 +} +item { + name: "stand" + id: 12 +} +item { + name: "swim" + id: 13 +} +item { + name: "walk" + id: 14 +} +item { + name: "answer phone" + id: 15 +} +item { + name: "carry/hold (an object)" + id: 17 +} +item { + name: "climb (e.g., a mountain)" + id: 20 +} +item { + name: "close (e.g., a door, a box)" + id: 22 +} +item { + name: "cut" + id: 24 +} +item { + name: "dress/put on clothing" + id: 26 +} +item { + name: "drink" + id: 27 +} +item { + name: "drive (e.g., a car, a truck)" + id: 28 +} +item { + name: "eat" + id: 29 +} +item { + name: "enter" + id: 30 +} +item { + name: "hit (an object)" + id: 34 +} +item { + name: "lift/pick up" + id: 36 +} +item { + name: "listen (e.g., to music)" + id: 37 +} +item { + name: "open (e.g., a window, a car door)" + id: 38 +} +item { + name: "play musical instrument" + id: 41 +} +item { + name: "point to (an object)" + id: 43 +} +item { + name: "pull (an object)" + id: 45 +} +item { + name: "push (an object)" + id: 46 +} +item { + name: "put down" + id: 47 +} +item { + name: "read" + id: 48 +} +item { + name: "ride (e.g., a bike, a car, a horse)" + id: 49 +} +item { + name: "sail boat" + id: 51 +} +item { + name: "shoot" + id: 52 +} +item { + name: "smoke" + id: 54 +} +item { + name: "take a photo" + id: 56 +} +item { + name: "text on/look at a cellphone" + id: 57 +} +item { + name: "throw" + id: 58 +} +item { + name: "touch (an object)" + id: 59 +} +item { + name: "turn (e.g., a screwdriver)" + id: 60 +} +item { + name: "watch (e.g., TV)" + id: 61 +} +item { + name: "work on a computer" + id: 62 +} +item { + name: "write" + id: 63 +} +item { + name: "fight/hit (a person)" + id: 64 +} +item { + name: "give/serve (an object) to (a person)" + id: 65 +} +item { + name: "grab (a person)" + id: 66 +} +item { + name: "hand clap" + id: 67 +} +item { + name: "hand shake" + id: 68 +} +item { + name: "hand wave" + id: 69 +} +item { + name: "hug (a person)" + id: 70 +} +item { + name: "kiss (a person)" + id: 72 +} +item { + name: "lift (a person)" + id: 73 +} +item { + name: "listen to (a person)" + id: 74 +} +item { + name: "push (another person)" + id: 76 +} +item { + name: "sing to (e.g., self, a person, a group)" + id: 77 +} +item { + name: "take (an object) from (a person)" + id: 78 +} +item { + name: "talk to (e.g., self, a person, a group)" + id: 79 +} +item { + name: "watch (a person)" + id: 80 +} diff --git a/models/research/object_detection/data/face_label_map.pbtxt b/models/research/object_detection/data/face_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..1c7355db1fd0d0bc468e42f881e65d1dc6d8b8e3 --- /dev/null +++ b/models/research/object_detection/data/face_label_map.pbtxt @@ -0,0 +1,6 @@ +item { + name: "face" + id: 1 + display_name: "face" +} + diff --git a/models/research/object_detection/data/face_person_with_keypoints_label_map.pbtxt b/models/research/object_detection/data/face_person_with_keypoints_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..181f11b289b7e0520ccc012514cf11338875a635 --- /dev/null +++ b/models/research/object_detection/data/face_person_with_keypoints_label_map.pbtxt @@ -0,0 +1,102 @@ +item: { + id: 1 + name: 'face' + display_name: 'face' + keypoints { + id: 0 + label: "left_eye_center" + } + keypoints { + id: 1 + label: "right_eye_center" + } + keypoints { + id: 2 + label: "nose_tip" + } + keypoints { + id: 3 + label: "mouth_center" + } + keypoints { + id: 4 + label: "left_ear_tragion" + } + keypoints { + id: 5 + label: "right_ear_tragion" + } +} +item: { + id: 2 + name: 'Person' + display_name: 'PERSON' + keypoints { + id: 6 + label: "NOSE_TIP" + } + keypoints { + id: 7 + label: "LEFT_EYE" + } + keypoints { + id: 8 + label: "RIGHT_EYE" + } + keypoints { + id: 9 + label: "LEFT_EAR_TRAGION" + } + keypoints { + id: 10 + label: "RIGHT_EAR_TRAGION" + } + keypoints { + id: 11 + label: "LEFT_SHOULDER" + } + keypoints { + id: 12 + label: "RIGHT_SHOULDER" + } + keypoints { + id: 13 + label: "LEFT_ELBOW" + } + keypoints { + id: 14 + label: "RIGHT_ELBOW" + } + keypoints { + id: 15 + label: "LEFT_WRIST" + } + keypoints { + id: 16 + label: "RIGHT_WRIST" + } + keypoints { + id: 17 + label: "LEFT_HIP" + } + keypoints { + id: 18 + label: "RIGHT_HIP" + } + keypoints { + id: 19 + label: "LEFT_KNEE" + } + keypoints { + id: 20 + label: "RIGHT_KNEE" + } + keypoints { + id: 21 + label: "LEFT_ANKLE" + } + keypoints { + id: 22 + label: "RIGHT_ANKLE" + } +} diff --git a/models/research/object_detection/data/fgvc_2854_classes_label_map.pbtxt b/models/research/object_detection/data/fgvc_2854_classes_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..009797f046a136ba45aa224f74e420973af19527 --- /dev/null +++ b/models/research/object_detection/data/fgvc_2854_classes_label_map.pbtxt @@ -0,0 +1,14270 @@ +item { + name: "147457" + id: 1 + display_name: "Nicrophorus tomentosus" +} +item { + name: "81923" + id: 2 + display_name: "Halyomorpha halys" +} +item { + name: "7" + id: 3 + display_name: "Aramus guarauna" +} +item { + name: "201041" + id: 4 + display_name: "Rupornis magnirostris" +} +item { + name: "65551" + id: 5 + display_name: "Hyla eximia" +} +item { + name: "106516" + id: 6 + display_name: "Nannothemis bella" +} +item { + name: "154287" + id: 7 + display_name: "Acalymma vittatum" +} +item { + name: "32798" + id: 8 + display_name: "Ramphotyphlops braminus" +} +item { + name: "8229" + id: 9 + display_name: "Cyanocitta cristata" +} +item { + name: "73766" + id: 10 + display_name: "Drymarchon melanurus" +} +item { + name: "409639" + id: 11 + display_name: "Aenetus virescens" +} +item { + name: "8234" + id: 12 + display_name: "Cyanocitta stelleri" +} +item { + name: "228593" + id: 13 + display_name: "Polygrammate hebraeicum" +} +item { + name: "53" + id: 14 + display_name: "Balearica regulorum" +} +item { + name: "57399" + id: 15 + display_name: "Fistularia commersonii" +} +item { + name: "81979" + id: 16 + display_name: "Syritta pipiens" +} +item { + name: "73788" + id: 17 + display_name: "Plestiodon fasciatus" +} +item { + name: "73790" + id: 18 + display_name: "Plestiodon inexpectatus" +} +item { + name: "16447" + id: 19 + display_name: "Pyrocephalus rubinus" +} +item { + name: "73792" + id: 20 + display_name: "Plestiodon laticeps" +} +item { + name: "49219" + id: 21 + display_name: "Anguilla rostrata" +} +item { + name: "73797" + id: 22 + display_name: "Plestiodon obsoletus" +} +item { + name: "73803" + id: 23 + display_name: "Plestiodon tetragrammus" +} +item { + name: "122956" + id: 24 + display_name: "Syntomoides imaon" +} +item { + name: "82003" + id: 25 + display_name: "Arion ater" +} +item { + name: "32854" + id: 26 + display_name: "Chamaeleo dilepis" +} +item { + name: "42341" + id: 27 + display_name: "Tragelaphus scriptus" +} +item { + name: "82018" + id: 28 + display_name: "Taeniopoda eques" +} +item { + name: "57443" + id: 29 + display_name: "Libellula quadrimaculata" +} +item { + name: "4885" + id: 30 + display_name: "Recurvirostra americana" +} +item { + name: "178403" + id: 31 + display_name: "Phalaenophana pyramusalis" +} +item { + name: "135027" + id: 32 + display_name: "Agalychnis dacnicolor" +} +item { + name: "49262" + id: 33 + display_name: "Haemulon sciurus" +} +item { + name: "98417" + id: 34 + display_name: "Cordulegaster diastatops" +} +item { + name: "57458" + id: 35 + display_name: "Ladona julia" +} +item { + name: "115" + id: 36 + display_name: "Ardeotis kori" +} +item { + name: "49269" + id: 37 + display_name: "Diodon holocanthus" +} +item { + name: "57463" + id: 38 + display_name: "Papilio canadensis" +} +item { + name: "82043" + id: 39 + display_name: "Monochamus scutellatus" +} +item { + name: "147580" + id: 40 + display_name: "Ceratotherium simum simum" +} +item { + name: "98430" + id: 41 + display_name: "Cordulia shurtleffii" +} +item { + name: "8319" + id: 42 + display_name: "Pica nuttalli" +} +item { + name: "43712" + id: 43 + display_name: "Dasyprocta punctata" +} +item { + name: "8335" + id: 44 + display_name: "Perisoreus canadensis" +} +item { + name: "508048" + id: 45 + display_name: "Antigone canadensis" +} +item { + name: "49297" + id: 46 + display_name: "Aetobatus narinari" +} +item { + name: "82069" + id: 47 + display_name: "Phyciodes pulchella" +} +item { + name: "73149" + id: 48 + display_name: "Parkesia noveboracensis" +} +item { + name: "180379" + id: 49 + display_name: "Ardea herodias occidentalis" +} +item { + name: "73884" + id: 50 + display_name: "Pantherophis emoryi" +} +item { + name: "106653" + id: 51 + display_name: "Nehalennia irene" +} +item { + name: "73887" + id: 52 + display_name: "Pantherophis guttatus" +} +item { + name: "73888" + id: 53 + display_name: "Pantherophis obsoletus" +} +item { + name: "162" + id: 54 + display_name: "Porzana carolina" +} +item { + name: "245925" + id: 55 + display_name: "Siproeta stelenes biplagiata" +} +item { + name: "117302" + id: 56 + display_name: "Physalia physalis" +} +item { + name: "57516" + id: 57 + display_name: "Bombus terrestris" +} +item { + name: "204995" + id: 58 + display_name: "Anas platyrhynchos diazi" +} +item { + name: "49348" + id: 59 + display_name: "Hyles lineata" +} +item { + name: "82117" + id: 60 + display_name: "Dolomedes tenebrosus" +} +item { + name: "114891" + id: 61 + display_name: "Varanus salvator" +} +item { + name: "319695" + id: 62 + display_name: "Epilachna mexicana" +} +item { + name: "41168" + id: 63 + display_name: "Desmodus rotundus" +} +item { + name: "13688" + id: 64 + display_name: "Motacilla cinerea" +} +item { + name: "57556" + id: 65 + display_name: "Papio ursinus" +} +item { + name: "16598" + id: 66 + display_name: "Empidonax difficilis" +} +item { + name: "16602" + id: 67 + display_name: "Empidonax minimus" +} +item { + name: "16604" + id: 68 + display_name: "Empidonax fulvifrons" +} +item { + name: "409181" + id: 69 + display_name: "Trite planiceps" +} +item { + name: "82144" + id: 70 + display_name: "Hemileuca eglanterina" +} +item { + name: "16611" + id: 71 + display_name: "Empidonax traillii" +} +item { + name: "82153" + id: 72 + display_name: "Ceratomia undulosa" +} +item { + name: "82155" + id: 73 + display_name: "Bittacomorpha clavipes" +} +item { + name: "205036" + id: 74 + display_name: "Xanthorhoe lacustrata" +} +item { + name: "16624" + id: 75 + display_name: "Empidonax hammondii" +} +item { + name: "16625" + id: 76 + display_name: "Empidonax occidentalis" +} +item { + name: "243" + id: 77 + display_name: "Rallus limicola" +} +item { + name: "41" + id: 78 + display_name: "Grus grus" +} +item { + name: "49402" + id: 79 + display_name: "Abudefduf saxatilis" +} +item { + name: "58550" + id: 80 + display_name: "Callophrys niphon" +} +item { + name: "205055" + id: 81 + display_name: "Zopherus nodulosus haldemani" +} +item { + name: "82177" + id: 82 + display_name: "Hermetia illucens" +} +item { + name: "9601" + id: 83 + display_name: "Quiscalus major" +} +item { + name: "7101" + id: 84 + display_name: "Branta leucopsis" +} +item { + name: "8470" + id: 85 + display_name: "Cyanocorax yucatanicus" +} +item { + name: "74009" + id: 86 + display_name: "Zamenis longissimus" +} +item { + name: "8474" + id: 87 + display_name: "Cyanocorax yncas" +} +item { + name: "82204" + id: 88 + display_name: "Nadata gibbosa" +} +item { + name: "123168" + id: 89 + display_name: "Ensatina eschscholtzii xanthoptica" +} +item { + name: "82210" + id: 90 + display_name: "Heterocampa biundata" +} +item { + name: "48284" + id: 91 + display_name: "Oniscus asellus" +} +item { + name: "4146" + id: 92 + display_name: "Oceanites oceanicus" +} +item { + name: "82225" + id: 93 + display_name: "Lophocampa caryae" +} +item { + name: "9609" + id: 94 + display_name: "Quiscalus niger" +} +item { + name: "65849" + id: 95 + display_name: "Incilius nebulifer" +} +item { + name: "207583" + id: 96 + display_name: "Miomantis caffra" +} +item { + name: "491839" + id: 97 + display_name: "Pyrausta insequalis" +} +item { + name: "74048" + id: 98 + display_name: "Alces americanus" +} +item { + name: "57665" + id: 99 + display_name: "Cotinis mutabilis" +} +item { + name: "65860" + id: 100 + display_name: "Incilius valliceps" +} +item { + name: "52911" + id: 101 + display_name: "Dolichovespula maculata" +} +item { + name: "8524" + id: 102 + display_name: "Psilorhinus morio" +} +item { + name: "49491" + id: 103 + display_name: "Thalassoma bifasciatum" +} +item { + name: "41301" + id: 104 + display_name: "Tadarida brasiliensis" +} +item { + name: "57687" + id: 105 + display_name: "Xylocopa varipuncta" +} +item { + name: "57689" + id: 106 + display_name: "Bombus vosnesenskii" +} +item { + name: "57690" + id: 107 + display_name: "Bombus sonorus" +} +item { + name: "33118" + id: 108 + display_name: "Basiliscus vittatus" +} +item { + name: "205151" + id: 109 + display_name: "Phlogophora meticulosa" +} +item { + name: "49504" + id: 110 + display_name: "Callinectes sapidus" +} +item { + name: "16737" + id: 111 + display_name: "Megarynchus pitangua" +} +item { + name: "357" + id: 112 + display_name: "Gallinula tenebrosa" +} +item { + name: "82278" + id: 113 + display_name: "Ameiurus melas" +} +item { + name: "82279" + id: 114 + display_name: "Automeris io" +} +item { + name: "505478" + id: 115 + display_name: "Gallus gallus domesticus" +} +item { + name: "33135" + id: 116 + display_name: "Crotaphytus collaris" +} +item { + name: "41328" + id: 117 + display_name: "Lavia frons" +} +item { + name: "196979" + id: 118 + display_name: "Anaxyrus boreas halophilus" +} +item { + name: "44902" + id: 119 + display_name: "Sigmodon hispidus" +} +item { + name: "1428" + id: 120 + display_name: "Numida meleagris" +} +item { + name: "119153" + id: 121 + display_name: "Junco hyemalis caniceps" +} +item { + name: "49539" + id: 122 + display_name: "Pisaster brevispinus" +} +item { + name: "328068" + id: 123 + display_name: "Belocaulus angustipes" +} +item { + name: "120214" + id: 124 + display_name: "Clostera albosigma" +} +item { + name: "16779" + id: 125 + display_name: "Tyrannus vociferans" +} +item { + name: "16782" + id: 126 + display_name: "Tyrannus tyrannus" +} +item { + name: "16783" + id: 127 + display_name: "Tyrannus forficatus" +} +item { + name: "16784" + id: 128 + display_name: "Tyrannus crassirostris" +} +item { + name: "57745" + id: 129 + display_name: "Linckia laevigata" +} +item { + name: "205202" + id: 130 + display_name: "Ecliptopera silaceata" +} +item { + name: "205203" + id: 131 + display_name: "Dyspteris abortivaria" +} +item { + name: "16791" + id: 132 + display_name: "Tyrannus verticalis" +} +item { + name: "16793" + id: 133 + display_name: "Tyrannus savana" +} +item { + name: "205213" + id: 134 + display_name: "Caripeta divisata" +} +item { + name: "49566" + id: 135 + display_name: "Cicindela sexguttata" +} +item { + name: "491935" + id: 136 + display_name: "Thylacodes squamigerus" +} +item { + name: "205216" + id: 137 + display_name: "Cerma cerintha" +} +item { + name: "39665" + id: 138 + display_name: "Caretta caretta" +} +item { + name: "147881" + id: 139 + display_name: "Trichechus manatus latirostris" +} +item { + name: "28743" + id: 140 + display_name: "Salvadora hexalepis" +} +item { + name: "205231" + id: 141 + display_name: "Idaea dimidiata" +} +item { + name: "205233" + id: 142 + display_name: "Iridopsis larvaria" +} +item { + name: "205235" + id: 143 + display_name: "Leuconycta diphteroides" +} +item { + name: "436" + id: 144 + display_name: "Gallirallus australis" +} +item { + name: "205238" + id: 145 + display_name: "Metanema inatomaria" +} +item { + name: "49591" + id: 146 + display_name: "Lepomis macrochirus" +} +item { + name: "229817" + id: 147 + display_name: "Raphia frater" +} +item { + name: "49594" + id: 148 + display_name: "Pomoxis nigromaculatus" +} +item { + name: "65979" + id: 149 + display_name: "Lithobates catesbeianus" +} +item { + name: "49596" + id: 150 + display_name: "Salvelinus fontinalis" +} +item { + name: "65982" + id: 151 + display_name: "Lithobates clamitans" +} +item { + name: "8649" + id: 152 + display_name: "Calocitta formosa" +} +item { + name: "8650" + id: 153 + display_name: "Calocitta colliei" +} +item { + name: "82379" + id: 154 + display_name: "Hemaris thysbe" +} +item { + name: "49614" + id: 155 + display_name: "Lepomis gibbosus" +} +item { + name: "63028" + id: 156 + display_name: "Hypercompe scribonia" +} +item { + name: "39672" + id: 157 + display_name: "Eretmochelys imbricata" +} +item { + name: "66003" + id: 158 + display_name: "Lithobates pipiens" +} +item { + name: "197077" + id: 159 + display_name: "Vanessa kershawi" +} +item { + name: "473" + id: 160 + display_name: "Fulica americana" +} +item { + name: "147930" + id: 161 + display_name: "Rabidosa rabida" +} +item { + name: "147931" + id: 162 + display_name: "Panoquina ocola" +} +item { + name: "66012" + id: 163 + display_name: "Lithobates sylvaticus" +} +item { + name: "8671" + id: 164 + display_name: "Pachyramphus aglaiae" +} +item { + name: "41440" + id: 165 + display_name: "Phocoena phocoena" +} +item { + name: "27388" + id: 166 + display_name: "Carphophis amoenus" +} +item { + name: "82418" + id: 167 + display_name: "Cicindela punctulata" +} +item { + name: "25078" + id: 168 + display_name: "Gastrophryne carolinensis" +} +item { + name: "82425" + id: 169 + display_name: "Cicindela repanda" +} +item { + name: "143446" + id: 170 + display_name: "Paonias myops" +} +item { + name: "41478" + id: 171 + display_name: "Eschrichtius robustus" +} +item { + name: "5200" + id: 172 + display_name: "Buteo lagopus" +} +item { + name: "148908" + id: 173 + display_name: "Chrysodeixis includens" +} +item { + name: "41482" + id: 174 + display_name: "Tursiops truncatus" +} +item { + name: "6914" + id: 175 + display_name: "Cygnus atratus" +} +item { + name: "464301" + id: 176 + display_name: "Philesturnus rufusater" +} +item { + name: "129226" + id: 177 + display_name: "Chytolita morbidalis" +} +item { + name: "180759" + id: 178 + display_name: "Aphonopelma iodius" +} +item { + name: "135318" + id: 179 + display_name: "Apantesis phalerata" +} +item { + name: "49699" + id: 180 + display_name: "Pisaster ochraceus" +} +item { + name: "49700" + id: 181 + display_name: "Coluber lateralis lateralis" +} +item { + name: "61532" + id: 182 + display_name: "Propylea quatuordecimpunctata" +} +item { + name: "4368" + id: 183 + display_name: "Larus marinus" +} +item { + name: "41521" + id: 184 + display_name: "Orcinus orca" +} +item { + name: "49716" + id: 185 + display_name: "Paonias excaecata" +} +item { + name: "41526" + id: 186 + display_name: "Delphinus delphis" +} +item { + name: "49723" + id: 187 + display_name: "Pugettia producta" +} +item { + name: "16956" + id: 188 + display_name: "Pitangus sulphuratus" +} +item { + name: "210607" + id: 189 + display_name: "Diastictis fracturalis" +} +item { + name: "148030" + id: 190 + display_name: "Equus asinus" +} +item { + name: "6924" + id: 191 + display_name: "Anas rubripes" +} +item { + name: "30844" + id: 192 + display_name: "Bothriechis schlegelii" +} +item { + name: "123628" + id: 193 + display_name: "Argynnis paphia" +} +item { + name: "131676" + id: 194 + display_name: "Anthus novaeseelandiae novaeseelandiae" +} +item { + name: "41566" + id: 195 + display_name: "Megaptera novaeangliae" +} +item { + name: "49759" + id: 196 + display_name: "Pyrgus oileus" +} +item { + name: "49761" + id: 197 + display_name: "Anartia jatrophae" +} +item { + name: "49766" + id: 198 + display_name: "Heliconius charithonia" +} +item { + name: "33383" + id: 199 + display_name: "Coleonyx brevis" +} +item { + name: "33384" + id: 200 + display_name: "Coleonyx elegans" +} +item { + name: "312764" + id: 201 + display_name: "Euptoieta hegesia meridiania" +} +item { + name: "82538" + id: 202 + display_name: "Vanessa gonerilla" +} +item { + name: "33387" + id: 203 + display_name: "Coleonyx variegatus" +} +item { + name: "56082" + id: 204 + display_name: "Aeshna canadensis" +} +item { + name: "17008" + id: 205 + display_name: "Sayornis phoebe" +} +item { + name: "200808" + id: 206 + display_name: "Sceloporus graciosus vandenburgianus" +} +item { + name: "17013" + id: 207 + display_name: "Sayornis nigricans" +} +item { + name: "122381" + id: 208 + display_name: "Cupido comyntas" +} +item { + name: "123516" + id: 209 + display_name: "Mydas clavatus" +} +item { + name: "8834" + id: 210 + display_name: "Tityra semifasciata" +} +item { + name: "146199" + id: 211 + display_name: "Lampropeltis californiae" +} +item { + name: "17858" + id: 212 + display_name: "Dryocopus lineatus" +} +item { + name: "334616" + id: 213 + display_name: "Battus philenor hirsuta" +} +item { + name: "82582" + id: 214 + display_name: "Labidomera clivicollis" +} +item { + name: "204699" + id: 215 + display_name: "Pseudothyatira cymatophoroides" +} +item { + name: "41638" + id: 216 + display_name: "Ursus americanus" +} +item { + name: "27420" + id: 217 + display_name: "Desmognathus fuscus" +} +item { + name: "81584" + id: 218 + display_name: "Anisota virginiensis" +} +item { + name: "49848" + id: 219 + display_name: "Navanax inermis" +} +item { + name: "143476" + id: 220 + display_name: "Calledapteryx dryopterata" +} +item { + name: "41663" + id: 221 + display_name: "Procyon lotor" +} +item { + name: "49857" + id: 222 + display_name: "Aplysia vaccaria" +} +item { + name: "41673" + id: 223 + display_name: "Nasua narica" +} +item { + name: "41676" + id: 224 + display_name: "Bassariscus astutus" +} +item { + name: "27427" + id: 225 + display_name: "Aneides lugubris" +} +item { + name: "418530" + id: 226 + display_name: "Porphyrio melanotus" +} +item { + name: "311419" + id: 227 + display_name: "Neobernaya spadicea" +} +item { + name: "113502" + id: 228 + display_name: "Sympetrum costiferum" +} +item { + name: "66278" + id: 229 + display_name: "Oophaga pumilio" +} +item { + name: "6951" + id: 230 + display_name: "Anas bahamensis" +} +item { + name: "213740" + id: 231 + display_name: "Antaeotricha schlaegeri" +} +item { + name: "143485" + id: 232 + display_name: "Xanthorhoe ferrugata" +} +item { + name: "120275" + id: 233 + display_name: "Euphyia intermediata" +} +item { + name: "48035" + id: 234 + display_name: "Strongylocentrotus purpuratus" +} +item { + name: "41728" + id: 235 + display_name: "Mirounga angustirostris" +} +item { + name: "41733" + id: 236 + display_name: "Halichoerus grypus" +} +item { + name: "41740" + id: 237 + display_name: "Zalophus californianus" +} +item { + name: "118914" + id: 238 + display_name: "Echinargus isola" +} +item { + name: "4936" + id: 239 + display_name: "Egretta novaehollandiae" +} +item { + name: "131862" + id: 240 + display_name: "Typocerus velutinus" +} +item { + name: "55401" + id: 241 + display_name: "Pieris brassicae" +} +item { + name: "41752" + id: 242 + display_name: "Arctocephalus forsteri" +} +item { + name: "41755" + id: 243 + display_name: "Eumetopias jubatus" +} +item { + name: "123676" + id: 244 + display_name: "Anas crecca carolinensis" +} +item { + name: "41763" + id: 245 + display_name: "Phocarctos hookeri" +} +item { + name: "181034" + id: 246 + display_name: "Cervus elaphus canadensis" +} +item { + name: "49964" + id: 247 + display_name: "Ginglymostoma cirratum" +} +item { + name: "213809" + id: 248 + display_name: "Anticarsia gemmatalis" +} +item { + name: "49972" + id: 249 + display_name: "Battus philenor" +} +item { + name: "205623" + id: 250 + display_name: "Microstylum morosum" +} +item { + name: "336697" + id: 251 + display_name: "Arctia villica" +} +item { + name: "41789" + id: 252 + display_name: "Taxidea taxus" +} +item { + name: "48724" + id: 253 + display_name: "Phidiana hiltoni" +} +item { + name: "123713" + id: 254 + display_name: "Neoscona oaxacensis" +} +item { + name: "33602" + id: 255 + display_name: "Tarentola mauritanica" +} +item { + name: "846" + id: 256 + display_name: "Alectoris chukar" +} +item { + name: "41808" + id: 257 + display_name: "Mustela erminea" +} +item { + name: "50001" + id: 258 + display_name: "Terrapene carolina carolina" +} +item { + name: "41810" + id: 259 + display_name: "Mustela frenata" +} +item { + name: "82774" + id: 260 + display_name: "Oryctes nasicornis" +} +item { + name: "41815" + id: 261 + display_name: "Mustela nivalis" +} +item { + name: "4239" + id: 262 + display_name: "Tachybaptus dominicus" +} +item { + name: "344926" + id: 263 + display_name: "Artemisiospiza belli" +} +item { + name: "82792" + id: 264 + display_name: "Celastrina neglecta" +} +item { + name: "41841" + id: 265 + display_name: "Meles meles" +} +item { + name: "882" + id: 266 + display_name: "Gallus gallus" +} +item { + name: "125758" + id: 267 + display_name: "Mercenaria mercenaria" +} +item { + name: "9081" + id: 268 + display_name: "Cardinalis sinuatus" +} +item { + name: "9083" + id: 269 + display_name: "Cardinalis cardinalis" +} +item { + name: "9092" + id: 270 + display_name: "Melospiza lincolnii" +} +item { + name: "4246" + id: 271 + display_name: "Podilymbus podiceps" +} +item { + name: "9096" + id: 272 + display_name: "Melospiza georgiana" +} +item { + name: "906" + id: 273 + display_name: "Meleagris gallopavo" +} +item { + name: "50059" + id: 274 + display_name: "Limacia cockerelli" +} +item { + name: "394124" + id: 275 + display_name: "Orthodera novaezealandiae" +} +item { + name: "82832" + id: 276 + display_name: "Cosmopepla lintneriana" +} +item { + name: "913" + id: 277 + display_name: "Meleagris ocellata" +} +item { + name: "41877" + id: 278 + display_name: "Conepatus leuconotus" +} +item { + name: "196419" + id: 279 + display_name: "Euborellia annulipes" +} +item { + name: "50071" + id: 280 + display_name: "Erynnis horatius" +} +item { + name: "41880" + id: 281 + display_name: "Mephitis mephitis" +} +item { + name: "50073" + id: 282 + display_name: "Dryas iulia" +} +item { + name: "173793" + id: 283 + display_name: "Diphthera festiva" +} +item { + name: "41886" + id: 284 + display_name: "Crocuta crocuta" +} +item { + name: "30683" + id: 285 + display_name: "Agkistrodon contortrix contortrix" +} +item { + name: "931" + id: 286 + display_name: "Lagopus lagopus" +} +item { + name: "41901" + id: 287 + display_name: "Herpestes javanicus" +} +item { + name: "143517" + id: 288 + display_name: "Biston betularia" +} +item { + name: "9139" + id: 289 + display_name: "Spizella atrogularis" +} +item { + name: "8350" + id: 290 + display_name: "Pyrrhocorax graculus" +} +item { + name: "9144" + id: 291 + display_name: "Spizella breweri" +} +item { + name: "12936" + id: 292 + display_name: "Sialia currucoides" +} +item { + name: "9152" + id: 293 + display_name: "Spizella pusilla" +} +item { + name: "68229" + id: 294 + display_name: "Tramea carolina" +} +item { + name: "6987" + id: 295 + display_name: "Anas superciliosa" +} +item { + name: "9156" + id: 296 + display_name: "Passerella iliaca" +} +item { + name: "202315" + id: 297 + display_name: "Romaleon antennarium" +} +item { + name: "4257" + id: 298 + display_name: "Phoenicopterus ruber" +} +item { + name: "25545" + id: 299 + display_name: "Rana aurora" +} +item { + name: "15282" + id: 300 + display_name: "Sylvia atricapilla" +} +item { + name: "103927" + id: 301 + display_name: "Ladona deplanata" +} +item { + name: "17356" + id: 302 + display_name: "Vireo bellii" +} +item { + name: "26765" + id: 303 + display_name: "Ambystoma mavortium" +} +item { + name: "205777" + id: 304 + display_name: "Plectrodera scalator" +} +item { + name: "17362" + id: 305 + display_name: "Vireo plumbeus" +} +item { + name: "99283" + id: 306 + display_name: "Didymops transversa" +} +item { + name: "17364" + id: 307 + display_name: "Vireo philadelphicus" +} +item { + name: "17365" + id: 308 + display_name: "Vireo flavifrons" +} +item { + name: "17366" + id: 309 + display_name: "Vireo olivaceus" +} +item { + name: "9182" + id: 310 + display_name: "Zonotrichia querula" +} +item { + name: "17375" + id: 311 + display_name: "Vireo huttoni" +} +item { + name: "9184" + id: 312 + display_name: "Zonotrichia albicollis" +} +item { + name: "9185" + id: 313 + display_name: "Zonotrichia atricapilla" +} +item { + name: "50147" + id: 314 + display_name: "Celithemis eponina" +} +item { + name: "47585" + id: 315 + display_name: "Crassostrea virginica" +} +item { + name: "9195" + id: 316 + display_name: "Emberiza citrinella" +} +item { + name: "41964" + id: 317 + display_name: "Panthera leo" +} +item { + name: "6994" + id: 318 + display_name: "Bucephala islandica" +} +item { + name: "52506" + id: 319 + display_name: "Adalia bipunctata" +} +item { + name: "9201" + id: 320 + display_name: "Emberiza schoeniclus" +} +item { + name: "17394" + id: 321 + display_name: "Vireo gilvus" +} +item { + name: "25591" + id: 322 + display_name: "Rana temporaria" +} +item { + name: "41976" + id: 323 + display_name: "Lynx rufus" +} +item { + name: "214015" + id: 324 + display_name: "Apoda y-inversum" +} +item { + name: "50176" + id: 325 + display_name: "Enallagma vesperum" +} +item { + name: "99331" + id: 326 + display_name: "Diplacodes trivialis" +} +item { + name: "50181" + id: 327 + display_name: "Loxosceles reclusa" +} +item { + name: "74758" + id: 328 + display_name: "Neovison vison" +} +item { + name: "123912" + id: 329 + display_name: "Charaxes jasius" +} +item { + name: "41997" + id: 330 + display_name: "Leopardus pardalis" +} +item { + name: "123920" + id: 331 + display_name: "Dorcus parallelipipedus" +} +item { + name: "132334" + id: 332 + display_name: "Urbanus procne" +} +item { + name: "123922" + id: 333 + display_name: "Abudefduf sordidus" +} +item { + name: "9236" + id: 334 + display_name: "Serinus serinus" +} +item { + name: "42007" + id: 335 + display_name: "Puma concolor" +} +item { + name: "9240" + id: 336 + display_name: "Serinus mozambicus" +} +item { + name: "148506" + id: 337 + display_name: "Melanis pixe" +} +item { + name: "58399" + id: 338 + display_name: "Urosalpinx cinerea" +} +item { + name: "312353" + id: 339 + display_name: "Leptophobia aripa elodia" +} +item { + name: "148517" + id: 340 + display_name: "Heliopetes laviana" +} +item { + name: "73905" + id: 341 + display_name: "Phrynosoma cornutum" +} +item { + name: "39772" + id: 342 + display_name: "Chrysemys picta marginata" +} +item { + name: "25646" + id: 343 + display_name: "Rana boylii" +} +item { + name: "62984" + id: 344 + display_name: "Aedes albopictus" +} +item { + name: "123959" + id: 345 + display_name: "Ensatina eschscholtzii oregonensis" +} +item { + name: "1081" + id: 346 + display_name: "Lophura leucomelanos" +} +item { + name: "39775" + id: 347 + display_name: "Chrysemys picta picta" +} +item { + name: "42046" + id: 348 + display_name: "Canis mesomelas" +} +item { + name: "42048" + id: 349 + display_name: "Canis lupus" +} +item { + name: "42051" + id: 350 + display_name: "Canis latrans" +} +item { + name: "9284" + id: 351 + display_name: "Euphonia elegantissima" +} +item { + name: "25669" + id: 352 + display_name: "Rana dalmatina" +} +item { + name: "9287" + id: 353 + display_name: "Euphonia hirundinacea" +} +item { + name: "9291" + id: 354 + display_name: "Euphonia affinis" +} +item { + name: "222284" + id: 355 + display_name: "Iridopsis defectaria" +} +item { + name: "74832" + id: 356 + display_name: "Papio anubis" +} +item { + name: "148563" + id: 357 + display_name: "Myscelia ethusa" +} +item { + name: "42069" + id: 358 + display_name: "Vulpes vulpes" +} +item { + name: "9743" + id: 359 + display_name: "Agelaius tricolor" +} +item { + name: "42076" + id: 360 + display_name: "Urocyon cinereoargenteus" +} +item { + name: "509025" + id: 361 + display_name: "Momotus lessonii" +} +item { + name: "17506" + id: 362 + display_name: "Zosterops japonicus" +} +item { + name: "4283" + id: 363 + display_name: "Phalacrocorax pelagicus" +} +item { + name: "58469" + id: 364 + display_name: "Thorybes pylades" +} +item { + name: "9319" + id: 365 + display_name: "Icterus cucullatus" +} +item { + name: "58473" + id: 366 + display_name: "Erynnis icelus" +} +item { + name: "58475" + id: 367 + display_name: "Erynnis juvenalis" +} +item { + name: "42093" + id: 368 + display_name: "Lycaon pictus" +} +item { + name: "58478" + id: 369 + display_name: "Erynnis baptisiae" +} +item { + name: "9328" + id: 370 + display_name: "Icterus graduacauda" +} +item { + name: "58481" + id: 371 + display_name: "Ancyloxypha numitor" +} +item { + name: "132210" + id: 372 + display_name: "Deloyala guttata" +} +item { + name: "58484" + id: 373 + display_name: "Thymelicus lineola" +} +item { + name: "13701" + id: 374 + display_name: "Motacilla aguimp" +} +item { + name: "410743" + id: 375 + display_name: "Anas superciliosa \303\227 platyrhynchos" +} +item { + name: "9336" + id: 376 + display_name: "Icterus pustulatus" +} +item { + name: "9339" + id: 377 + display_name: "Icterus gularis" +} +item { + name: "124031" + id: 378 + display_name: "Agrius convolvuli" +} +item { + name: "42113" + id: 379 + display_name: "Pecari tajacu" +} +item { + name: "132227" + id: 380 + display_name: "Lethe appalachia" +} +item { + name: "113516" + id: 381 + display_name: "Sympetrum madidum" +} +item { + name: "58509" + id: 382 + display_name: "Anatrytone logan" +} +item { + name: "83086" + id: 383 + display_name: "Eurytides marcellus" +} +item { + name: "58511" + id: 384 + display_name: "Poanes viator" +} +item { + name: "83090" + id: 385 + display_name: "Epimecis hortaria" +} +item { + name: "115859" + id: 386 + display_name: "Micrurus tener tener" +} +item { + name: "129902" + id: 387 + display_name: "Camponotus pennsylvanicus" +} +item { + name: "42134" + id: 388 + display_name: "Sus scrofa" +} +item { + name: "58519" + id: 389 + display_name: "Pompeius verna" +} +item { + name: "205977" + id: 390 + display_name: "Coccinella undecimpunctata" +} +item { + name: "58523" + id: 391 + display_name: "Papilio polyxenes" +} +item { + name: "58525" + id: 392 + display_name: "Papilio troilus" +} +item { + name: "410783" + id: 393 + display_name: "Hypoblemum albovittatum" +} +item { + name: "9376" + id: 394 + display_name: "Carduelis cannabina" +} +item { + name: "58531" + id: 395 + display_name: "Colias philodice" +} +item { + name: "50340" + id: 396 + display_name: "Hylephila phyleus" +} +item { + name: "42149" + id: 397 + display_name: "Hippopotamus amphibius" +} +item { + name: "50342" + id: 398 + display_name: "Erythrodiplax umbrata" +} +item { + name: "12883" + id: 399 + display_name: "Catharus minimus" +} +item { + name: "28557" + id: 400 + display_name: "Storeria occipitomaculata" +} +item { + name: "199" + id: 401 + display_name: "Amaurornis phoenicurus" +} +item { + name: "58541" + id: 402 + display_name: "Satyrium liparops" +} +item { + name: "58543" + id: 403 + display_name: "Callophrys augustinus" +} +item { + name: "42161" + id: 404 + display_name: "Dama dama" +} +item { + name: "61508" + id: 405 + display_name: "Ischnura elegans" +} +item { + name: "1204" + id: 406 + display_name: "Pavo cristatus" +} +item { + name: "42166" + id: 407 + display_name: "Axis axis" +} +item { + name: "146797" + id: 408 + display_name: "Platynota idaeusalis" +} +item { + name: "58556" + id: 409 + display_name: "Celastrina ladon" +} +item { + name: "367477" + id: 410 + display_name: "Rallus crepitans" +} +item { + name: "58561" + id: 411 + display_name: "Libytheana carinenta" +} +item { + name: "58563" + id: 412 + display_name: "Speyeria aphrodite" +} +item { + name: "58564" + id: 413 + display_name: "Boloria bellona" +} +item { + name: "413489" + id: 414 + display_name: "Nestor meridionalis septentrionalis" +} +item { + name: "42184" + id: 415 + display_name: "Capreolus capreolus" +} +item { + name: "9419" + id: 416 + display_name: "Pipilo chlorurus" +} +item { + name: "9420" + id: 417 + display_name: "Pipilo maculatus" +} +item { + name: "9424" + id: 418 + display_name: "Pipilo erythrophthalmus" +} +item { + name: "99539" + id: 419 + display_name: "Dorocordulia libera" +} +item { + name: "58580" + id: 420 + display_name: "Polygonia progne" +} +item { + name: "58581" + id: 421 + display_name: "Nymphalis vaualbum" +} +item { + name: "42199" + id: 422 + display_name: "Rangifer tarandus" +} +item { + name: "58586" + id: 423 + display_name: "Limenitis archippus" +} +item { + name: "58587" + id: 424 + display_name: "Asterocampa clyton" +} +item { + name: "42206" + id: 425 + display_name: "Cervus elaphus" +} +item { + name: "312543" + id: 426 + display_name: "Anartia jatrophae luteipicta" +} +item { + name: "204094" + id: 427 + display_name: "Cairina moschata domestica" +} +item { + name: "4304" + id: 428 + display_name: "Phalacrocorax varius" +} +item { + name: "42210" + id: 429 + display_name: "Cervus nippon" +} +item { + name: "17638" + id: 430 + display_name: "Picoides dorsalis" +} +item { + name: "132330" + id: 431 + display_name: "Chlosyne janais" +} +item { + name: "58603" + id: 432 + display_name: "Megisto cymela" +} +item { + name: "42220" + id: 433 + display_name: "Odocoileus hemionus" +} +item { + name: "17645" + id: 434 + display_name: "Picoides nuttallii" +} +item { + name: "58606" + id: 435 + display_name: "Cercyonis pegala" +} +item { + name: "42223" + id: 436 + display_name: "Odocoileus virginianus" +} +item { + name: "58609" + id: 437 + display_name: "Lepisosteus osseus" +} +item { + name: "17650" + id: 438 + display_name: "Picoides scalaris" +} +item { + name: "132339" + id: 439 + display_name: "Anthanassa texana" +} +item { + name: "58612" + id: 440 + display_name: "Carassius auratus" +} +item { + name: "1406" + id: 441 + display_name: "Callipepla gambelii" +} +item { + name: "9462" + id: 442 + display_name: "Pyrrhula pyrrhula" +} +item { + name: "4308" + id: 443 + display_name: "Phalacrocorax brasilianus" +} +item { + name: "17660" + id: 444 + display_name: "Picoides pubescens" +} +item { + name: "1280" + id: 445 + display_name: "Colinus virginianus" +} +item { + name: "129920" + id: 446 + display_name: "Calliostoma ligatum" +} +item { + name: "58627" + id: 447 + display_name: "Perca flavescens" +} +item { + name: "148742" + id: 448 + display_name: "Hamadryas februa" +} +item { + name: "39809" + id: 449 + display_name: "Terrapene ornata ornata" +} +item { + name: "115979" + id: 450 + display_name: "Plestiodon skiltonianus skiltonianus" +} +item { + name: "9484" + id: 451 + display_name: "Sporophila torqueola" +} +item { + name: "17678" + id: 452 + display_name: "Picoides villosus" +} +item { + name: "3862" + id: 453 + display_name: "Calidris pusilla" +} +item { + name: "70421" + id: 454 + display_name: "Acris blanchardi" +} +item { + name: "124183" + id: 455 + display_name: "Phlogophora periculosa" +} +item { + name: "124184" + id: 456 + display_name: "Plodia interpunctella" +} +item { + name: "99609" + id: 457 + display_name: "Dromogomphus spinosus" +} +item { + name: "99610" + id: 458 + display_name: "Dromogomphus spoliatus" +} +item { + name: "17694" + id: 459 + display_name: "Picoides arcticus" +} +item { + name: "113521" + id: 460 + display_name: "Sympetrum pallipes" +} +item { + name: "320801" + id: 461 + display_name: "Aspidoscelis tesselata" +} +item { + name: "7047" + id: 462 + display_name: "Aythya marila" +} +item { + name: "4317" + id: 463 + display_name: "Phaethon aethereus" +} +item { + name: "81606" + id: 464 + display_name: "Littorina littorea" +} +item { + name: "99891" + id: 465 + display_name: "Enallagma aspersum" +} +item { + name: "9528" + id: 466 + display_name: "Sturnella magna" +} +item { + name: "99641" + id: 467 + display_name: "Dythemis fugax" +} +item { + name: "99644" + id: 468 + display_name: "Dythemis nigrescens" +} +item { + name: "39818" + id: 469 + display_name: "Terrapene carolina triunguis" +} +item { + name: "99647" + id: 470 + display_name: "Dythemis velox" +} +item { + name: "148800" + id: 471 + display_name: "Chioides albofasciatus" +} +item { + name: "19339" + id: 472 + display_name: "Melopsittacus undulatus" +} +item { + name: "47509" + id: 473 + display_name: "Diaulula sandiegensis" +} +item { + name: "148810" + id: 474 + display_name: "Anaea aidea" +} +item { + name: "123070" + id: 475 + display_name: "Capra hircus" +} +item { + name: "7054" + id: 476 + display_name: "Aythya affinis" +} +item { + name: "99897" + id: 477 + display_name: "Enallagma civile" +} +item { + name: "42328" + id: 478 + display_name: "Kobus ellipsiprymnus" +} +item { + name: "48328" + id: 479 + display_name: "Aurelia aurita" +} +item { + name: "132445" + id: 480 + display_name: "Conchylodes ovulalis" +} +item { + name: "215271" + id: 481 + display_name: "Bleptina caradrinalis" +} +item { + name: "83297" + id: 482 + display_name: "Scarus rubroviolaceus" +} +item { + name: "42347" + id: 483 + display_name: "Rupicapra rupicapra" +} +item { + name: "7058" + id: 484 + display_name: "Aythya novaeseelandiae" +} +item { + name: "52457" + id: 485 + display_name: "Chaetodon auriga" +} +item { + name: "1392" + id: 486 + display_name: "Cyrtonyx montezumae" +} +item { + name: "4328" + id: 487 + display_name: "Pelecanus occidentalis" +} +item { + name: "7647" + id: 488 + display_name: "Cinclus cinclus" +} +item { + name: "148856" + id: 489 + display_name: "Anteos clorinde" +} +item { + name: "7060" + id: 490 + display_name: "Chen rossii" +} +item { + name: "58750" + id: 491 + display_name: "Nomophila nearctica" +} +item { + name: "1409" + id: 492 + display_name: "Callipepla californica" +} +item { + name: "9602" + id: 493 + display_name: "Quiscalus quiscula" +} +item { + name: "296326" + id: 494 + display_name: "Oncopeltus sexmaculatus" +} +item { + name: "9607" + id: 495 + display_name: "Quiscalus mexicanus" +} +item { + name: "319724" + id: 496 + display_name: "Euphoria kernii" +} +item { + name: "1419" + id: 497 + display_name: "Callipepla squamata" +} +item { + name: "148883" + id: 498 + display_name: "Eantis tamenund" +} +item { + name: "42391" + id: 499 + display_name: "Ovis canadensis" +} +item { + name: "107937" + id: 500 + display_name: "Orthemis discolor" +} +item { + name: "42405" + id: 501 + display_name: "Syncerus caffer" +} +item { + name: "42408" + id: 502 + display_name: "Bison bison" +} +item { + name: "116137" + id: 503 + display_name: "Sceloporus cowlesi" +} +item { + name: "326296" + id: 504 + display_name: "Bufo bufo" +} +item { + name: "148907" + id: 505 + display_name: "Cydia latiferreana" +} +item { + name: "42414" + id: 506 + display_name: "Oreamnos americanus" +} +item { + name: "116143" + id: 507 + display_name: "Sceloporus tristichus" +} +item { + name: "99912" + id: 508 + display_name: "Enallagma geminatum" +} +item { + name: "226889" + id: 509 + display_name: "Pangrapta decoralis" +} +item { + name: "42429" + id: 510 + display_name: "Antilocapra americana" +} +item { + name: "17855" + id: 511 + display_name: "Dryocopus pileatus" +} +item { + name: "107974" + id: 512 + display_name: "Orthetrum sabina" +} +item { + name: "56225" + id: 513 + display_name: "Polygonia c-album" +} +item { + name: "67016" + id: 514 + display_name: "Rana draytonii" +} +item { + name: "132553" + id: 515 + display_name: "Strymon istapa" +} +item { + name: "73155" + id: 516 + display_name: "Passerina caerulea" +} +item { + name: "26074" + id: 517 + display_name: "Crocodylus moreletii" +} +item { + name: "171903" + id: 518 + display_name: "Oligyra orbiculata" +} +item { + name: "26085" + id: 519 + display_name: "Crocodylus acutus" +} +item { + name: "143613" + id: 520 + display_name: "Homophoberia apicosa" +} +item { + name: "5715" + id: 521 + display_name: "Amazilia beryllina" +} +item { + name: "9721" + id: 522 + display_name: "Geothlypis trichas" +} +item { + name: "154446" + id: 523 + display_name: "Lambdina fiscellaria" +} +item { + name: "236841" + id: 524 + display_name: "Lichanura orcutti" +} +item { + name: "20737" + id: 525 + display_name: "Trogon melanocephalus" +} +item { + name: "124431" + id: 526 + display_name: "Cycloneda sanguinea" +} +item { + name: "124432" + id: 527 + display_name: "Deroceras reticulatum" +} +item { + name: "39566" + id: 528 + display_name: "Apalone ferox" +} +item { + name: "149017" + id: 529 + display_name: "Chlorochlamys chloroleucaria" +} +item { + name: "15281" + id: 530 + display_name: "Sylvia communis" +} +item { + name: "312873" + id: 531 + display_name: "Anartia fatima fatima" +} +item { + name: "9771" + id: 532 + display_name: "Pinicola enucleator" +} +item { + name: "39858" + id: 533 + display_name: "Graptemys geographica" +} +item { + name: "26159" + id: 534 + display_name: "Alligator mississippiensis" +} +item { + name: "304690" + id: 535 + display_name: "Naupactus cervinus" +} +item { + name: "124467" + id: 536 + display_name: "Pseudosphinx tetrio" +} +item { + name: "99892" + id: 537 + display_name: "Enallagma basidens" +} +item { + name: "99895" + id: 538 + display_name: "Enallagma carunculatum" +} +item { + name: "67129" + id: 539 + display_name: "Rhinella marina" +} +item { + name: "83515" + id: 540 + display_name: "Oxybelis aeneus" +} +item { + name: "81681" + id: 541 + display_name: "Campaea perlata" +} +item { + name: "99901" + id: 542 + display_name: "Enallagma cyathigerum" +} +item { + name: "99911" + id: 543 + display_name: "Enallagma exsulans" +} +item { + name: "9800" + id: 544 + display_name: "Coccothraustes vespertinus" +} +item { + name: "9801" + id: 545 + display_name: "Coccothraustes coccothraustes" +} +item { + name: "154551" + id: 546 + display_name: "Leptoglossus zonatus" +} +item { + name: "9807" + id: 547 + display_name: "Vermivora chrysoptera" +} +item { + name: "61157" + id: 548 + display_name: "Trichodes ornatus" +} +item { + name: "99924" + id: 549 + display_name: "Enallagma signatum" +} +item { + name: "1626" + id: 550 + display_name: "Opisthocomus hoazin" +} +item { + name: "132704" + id: 551 + display_name: "Setophaga coronata coronata" +} +item { + name: "119056" + id: 552 + display_name: "Centruroides vittatus" +} +item { + name: "50786" + id: 553 + display_name: "Vanessa annabella" +} +item { + name: "60347" + id: 554 + display_name: "Pituophis catenifer sayi" +} +item { + name: "9833" + id: 555 + display_name: "Diglossa baritula" +} +item { + name: "132718" + id: 556 + display_name: "Scathophaga stercoraria" +} +item { + name: "132719" + id: 557 + display_name: "Calopteron reticulatum" +} +item { + name: "116340" + id: 558 + display_name: "Dreissena polymorpha" +} +item { + name: "134078" + id: 559 + display_name: "Scoliopteryx libatrix" +} +item { + name: "9850" + id: 560 + display_name: "Saltator coerulescens" +} +item { + name: "117695" + id: 561 + display_name: "Cucumaria miniata" +} +item { + name: "9854" + id: 562 + display_name: "Saltator atriceps" +} +item { + name: "132736" + id: 563 + display_name: "Urola nivalis" +} +item { + name: "34435" + id: 564 + display_name: "Hemidactylus turcicus" +} +item { + name: "9864" + id: 565 + display_name: "Sicalis flaveola" +} +item { + name: "7106" + id: 566 + display_name: "Aix galericulata" +} +item { + name: "485010" + id: 567 + display_name: "Chinavia hilaris" +} +item { + name: "132764" + id: 568 + display_name: "Junco hyemalis hyemalis" +} +item { + name: "367558" + id: 569 + display_name: "Eupsittula canicularis" +} +item { + name: "370351" + id: 570 + display_name: "Microcarbo melanoleucos" +} +item { + name: "50867" + id: 571 + display_name: "Argiope bruennichi" +} +item { + name: "67252" + id: 572 + display_name: "Trachycephalus typhonius" +} +item { + name: "132789" + id: 573 + display_name: "Clepsis peritana" +} +item { + name: "9915" + id: 574 + display_name: "Piranga rubra" +} +item { + name: "50880" + id: 575 + display_name: "Limenitis lorquini" +} +item { + name: "9921" + id: 576 + display_name: "Piranga olivacea" +} +item { + name: "100034" + id: 577 + display_name: "Epiaeschna heros" +} +item { + name: "9924" + id: 578 + display_name: "Piranga flava" +} +item { + name: "42339" + id: 579 + display_name: "Tragelaphus strepsiceros" +} +item { + name: "50892" + id: 580 + display_name: "Euphydryas chalcedona" +} +item { + name: "130348" + id: 581 + display_name: "Dione moneta" +} +item { + name: "394966" + id: 582 + display_name: "Phaulacridium marginale" +} +item { + name: "9943" + id: 583 + display_name: "Amphispiza bilineata" +} +item { + name: "4388" + id: 584 + display_name: "Larus dominicanus" +} +item { + name: "1758" + id: 585 + display_name: "Piaya cayana" +} +item { + name: "50913" + id: 586 + display_name: "Hyalophora euryalus" +} +item { + name: "9958" + id: 587 + display_name: "Aimophila ruficeps" +} +item { + name: "59115" + id: 588 + display_name: "Gambusia affinis" +} +item { + name: "64346" + id: 589 + display_name: "Natrix tessellata" +} +item { + name: "59119" + id: 590 + display_name: "Pontia protodice" +} +item { + name: "18160" + id: 591 + display_name: "Melanerpes lewis" +} +item { + name: "18161" + id: 592 + display_name: "Melanerpes uropygialis" +} +item { + name: "50931" + id: 593 + display_name: "Strymon melinus" +} +item { + name: "59124" + id: 594 + display_name: "Anthocharis sara" +} +item { + name: "59127" + id: 595 + display_name: "Lycaena helloides" +} +item { + name: "59128" + id: 596 + display_name: "Atlides halesus" +} +item { + name: "67324" + id: 597 + display_name: "Eurema daira" +} +item { + name: "9981" + id: 598 + display_name: "Passerculus sandwichensis" +} +item { + name: "59134" + id: 599 + display_name: "Satyrium sylvinus" +} +item { + name: "67327" + id: 600 + display_name: "Schistocerca obscura" +} +item { + name: "67328" + id: 601 + display_name: "Pholcus phalangioides" +} +item { + name: "59138" + id: 602 + display_name: "Satyrium saepium" +} +item { + name: "132867" + id: 603 + display_name: "Microtia elva" +} +item { + name: "18181" + id: 604 + display_name: "Melanerpes pucherani" +} +item { + name: "7486" + id: 605 + display_name: "Salpinctes obsoletus" +} +item { + name: "108303" + id: 606 + display_name: "Paltothemis lineatipes" +} +item { + name: "59152" + id: 607 + display_name: "Leptotes marina" +} +item { + name: "132881" + id: 608 + display_name: "Catocala ultronia" +} +item { + name: "143662" + id: 609 + display_name: "Orthosoma brunneum" +} +item { + name: "59164" + id: 610 + display_name: "Plebejus icarioides" +} +item { + name: "18205" + id: 611 + display_name: "Melanerpes carolinus" +} +item { + name: "18206" + id: 612 + display_name: "Melanerpes chrysogenys" +} +item { + name: "83744" + id: 613 + display_name: "Amblyomma americanum" +} +item { + name: "18209" + id: 614 + display_name: "Melanerpes formicivorus" +} +item { + name: "116517" + id: 615 + display_name: "Caiman crocodilus" +} +item { + name: "59176" + id: 616 + display_name: "Phyciodes mylitta" +} +item { + name: "59182" + id: 617 + display_name: "Euphydryas editha" +} +item { + name: "43997" + id: 618 + display_name: "Myocastor coypus" +} +item { + name: "59185" + id: 619 + display_name: "Coenonympha tullia" +} +item { + name: "59187" + id: 620 + display_name: "Erynnis propertius" +} +item { + name: "59188" + id: 621 + display_name: "Erynnis funeralis" +} +item { + name: "59189" + id: 622 + display_name: "Erynnis tristis" +} +item { + name: "59190" + id: 623 + display_name: "Heliopetes ericetorum" +} +item { + name: "34615" + id: 624 + display_name: "Gekko gecko" +} +item { + name: "42808" + id: 625 + display_name: "Trichosurus vulpecula" +} +item { + name: "59194" + id: 626 + display_name: "Ochlodes sylvanoides" +} +item { + name: "59195" + id: 627 + display_name: "Lerodea eufala" +} +item { + name: "18236" + id: 628 + display_name: "Colaptes auratus" +} +item { + name: "10045" + id: 629 + display_name: "Basileuterus rufifrons" +} +item { + name: "59202" + id: 630 + display_name: "Larus michahellis" +} +item { + name: "10053" + id: 631 + display_name: "Ramphocelus passerinii" +} +item { + name: "19975" + id: 632 + display_name: "Athene cunicularia" +} +item { + name: "82231" + id: 633 + display_name: "Periplaneta americana" +} +item { + name: "67409" + id: 634 + display_name: "Gobiesox maeandricus" +} +item { + name: "83795" + id: 635 + display_name: "Cipangopaludina chinensis" +} +item { + name: "59220" + id: 636 + display_name: "Branta hutchinsii" +} +item { + name: "10069" + id: 637 + display_name: "Fringilla montifringilla" +} +item { + name: "10070" + id: 638 + display_name: "Fringilla coelebs" +} +item { + name: "83802" + id: 639 + display_name: "Megacyllene robiniae" +} +item { + name: "83804" + id: 640 + display_name: "Dynastes tityus" +} +item { + name: "51039" + id: 641 + display_name: "Cepaea hortensis" +} +item { + name: "68062" + id: 642 + display_name: "Menemerus bivittatus" +} +item { + name: "47527" + id: 643 + display_name: "Ostracion meleagris" +} +item { + name: "67435" + id: 644 + display_name: "Urbanus proteus" +} +item { + name: "10094" + id: 645 + display_name: "Junco hyemalis" +} +item { + name: "67440" + id: 646 + display_name: "Utetheisa ornatrix" +} +item { + name: "100210" + id: 647 + display_name: "Epitheca canis" +} +item { + name: "1907" + id: 648 + display_name: "Cuculus canorus" +} +item { + name: "100215" + id: 649 + display_name: "Epitheca princeps" +} +item { + name: "27826" + id: 650 + display_name: "Taricha granulosa" +} +item { + name: "129147" + id: 651 + display_name: "Ammophila procera" +} +item { + name: "10111" + id: 652 + display_name: "Junco phaeonotus" +} +item { + name: "83844" + id: 653 + display_name: "Oxyopes salticus" +} +item { + name: "144107" + id: 654 + display_name: "Tetracis crocallata" +} +item { + name: "51097" + id: 655 + display_name: "Papilio zelicaon" +} +item { + name: "10138" + id: 656 + display_name: "Ammodramus nelsoni" +} +item { + name: "10139" + id: 657 + display_name: "Ammodramus savannarum" +} +item { + name: "10147" + id: 658 + display_name: "Ammodramus maritimus" +} +item { + name: "59300" + id: 659 + display_name: "Anagrapha falcifera" +} +item { + name: "51110" + id: 660 + display_name: "Xylocopa virginica" +} +item { + name: "1960" + id: 661 + display_name: "Coccyzus erythropthalmus" +} +item { + name: "42652" + id: 662 + display_name: "Didelphis virginiana" +} +item { + name: "428606" + id: 663 + display_name: "Heraclides rumiko" +} +item { + name: "127303" + id: 664 + display_name: "Callophrys henrici" +} +item { + name: "1964" + id: 665 + display_name: "Coccyzus minor" +} +item { + name: "1965" + id: 666 + display_name: "Coccyzus americanus" +} +item { + name: "8520" + id: 667 + display_name: "Nucifraga columbiana" +} +item { + name: "116658" + id: 668 + display_name: "Siphanta acuta" +} +item { + name: "1972" + id: 669 + display_name: "Crotophaga sulcirostris" +} +item { + name: "10168" + id: 670 + display_name: "Pooecetes gramineus" +} +item { + name: "53893" + id: 671 + display_name: "Chlosyne palla" +} +item { + name: "10173" + id: 672 + display_name: "Arremonops rufivirgatus" +} +item { + name: "1986" + id: 673 + display_name: "Geococcyx californianus" +} +item { + name: "1987" + id: 674 + display_name: "Geococcyx velox" +} +item { + name: "116680" + id: 675 + display_name: "Tabanus atratus" +} +item { + name: "116681" + id: 676 + display_name: "Atteva aurea" +} +item { + name: "124875" + id: 677 + display_name: "Spodoptera litura" +} +item { + name: "26575" + id: 678 + display_name: "Diadophis punctatus" +} +item { + name: "10199" + id: 679 + display_name: "Coereba flaveola" +} +item { + name: "26591" + id: 680 + display_name: "Diadophis punctatus edwardsii" +} +item { + name: "59360" + id: 681 + display_name: "Neverita duplicata" +} +item { + name: "68263" + id: 682 + display_name: "Papilio multicaudata" +} +item { + name: "26598" + id: 683 + display_name: "Diadophis punctatus amabilis" +} +item { + name: "42983" + id: 684 + display_name: "Phascolarctos cinereus" +} +item { + name: "67560" + id: 685 + display_name: "Adelpha californica" +} +item { + name: "10224" + id: 686 + display_name: "Passerina ciris" +} +item { + name: "2038" + id: 687 + display_name: "Alectura lathami" +} +item { + name: "10232" + id: 688 + display_name: "Passerina leclancherii" +} +item { + name: "10234" + id: 689 + display_name: "Passerina amoena" +} +item { + name: "10243" + id: 690 + display_name: "Icteria virens" +} +item { + name: "2052" + id: 691 + display_name: "Crax rubra" +} +item { + name: "94551" + id: 692 + display_name: "Argia immunda" +} +item { + name: "2062" + id: 693 + display_name: "Penelope purpurascens" +} +item { + name: "204490" + id: 694 + display_name: "Copsychus malabaricus" +} +item { + name: "10257" + id: 695 + display_name: "Paroaria capitata" +} +item { + name: "51221" + id: 696 + display_name: "Procambarus clarkii" +} +item { + name: "10262" + id: 697 + display_name: "Cyanerpes cyaneus" +} +item { + name: "508249" + id: 698 + display_name: "Microcarbo melanoleucos brevirostris" +} +item { + name: "18460" + id: 699 + display_name: "Sphyrapicus thyroideus" +} +item { + name: "10271" + id: 700 + display_name: "Pheucticus ludovicianus" +} +item { + name: "18464" + id: 701 + display_name: "Sphyrapicus ruber" +} +item { + name: "10274" + id: 702 + display_name: "Pheucticus melanocephalus" +} +item { + name: "18467" + id: 703 + display_name: "Sphyrapicus nuchalis" +} +item { + name: "100391" + id: 704 + display_name: "Erythrodiplax berenice" +} +item { + name: "2089" + id: 705 + display_name: "Ortalis poliocephala" +} +item { + name: "2090" + id: 706 + display_name: "Ortalis vetula" +} +item { + name: "8038" + id: 707 + display_name: "Corvus albus" +} +item { + name: "67629" + id: 708 + display_name: "Oligocottus maculosus" +} +item { + name: "10286" + id: 709 + display_name: "Mniotilta varia" +} +item { + name: "10288" + id: 710 + display_name: "Volatinia jacarina" +} +item { + name: "100403" + id: 711 + display_name: "Erythrodiplax minuscula" +} +item { + name: "84023" + id: 712 + display_name: "Amorpha juglandis" +} +item { + name: "84024" + id: 713 + display_name: "Galasa nigrinodis" +} +item { + name: "10297" + id: 714 + display_name: "Thraupis palmarum" +} +item { + name: "67642" + id: 715 + display_name: "Pantherophis spiloides" +} +item { + name: "67653" + id: 716 + display_name: "Phoebis agarithe" +} +item { + name: "84038" + id: 717 + display_name: "Haploa lecontei" +} +item { + name: "26695" + id: 718 + display_name: "Scaphiopus holbrookii" +} +item { + name: "84040" + id: 719 + display_name: "Chauliognathus marginatus" +} +item { + name: "51275" + id: 720 + display_name: "Pentatoma rufipes" +} +item { + name: "2124" + id: 721 + display_name: "Momotus mexicanus" +} +item { + name: "26702" + id: 722 + display_name: "Spea hammondii" +} +item { + name: "10325" + id: 723 + display_name: "Euphagus cyanocephalus" +} +item { + name: "43102" + id: 724 + display_name: "Sylvilagus palustris" +} +item { + name: "49509" + id: 725 + display_name: "Lutjanus griseus" +} +item { + name: "116834" + id: 726 + display_name: "Cacatua galerita" +} +item { + name: "127188" + id: 727 + display_name: "Junco hyemalis oreganus" +} +item { + name: "26725" + id: 728 + display_name: "Ambystoma jeffersonianum" +} +item { + name: "43111" + id: 729 + display_name: "Sylvilagus floridanus" +} +item { + name: "43112" + id: 730 + display_name: "Sylvilagus bachmani" +} +item { + name: "67691" + id: 731 + display_name: "Lophocampa maculata" +} +item { + name: "51311" + id: 732 + display_name: "Urbanus dorantes" +} +item { + name: "67700" + id: 733 + display_name: "Caracolus caracolla" +} +item { + name: "43128" + id: 734 + display_name: "Lepus europaeus" +} +item { + name: "26745" + id: 735 + display_name: "Ambystoma texanum" +} +item { + name: "67706" + id: 736 + display_name: "Argiope argentata" +} +item { + name: "26747" + id: 737 + display_name: "Ambystoma gracile" +} +item { + name: "67708" + id: 738 + display_name: "Argiope trifasciata" +} +item { + name: "26749" + id: 739 + display_name: "Ambystoma tigrinum" +} +item { + name: "4896" + id: 740 + display_name: "Pluvialis fulva" +} +item { + name: "10369" + id: 741 + display_name: "Molothrus aeneus" +} +item { + name: "26754" + id: 742 + display_name: "Ambystoma macrodactylum" +} +item { + name: "10373" + id: 743 + display_name: "Molothrus ater" +} +item { + name: "2185" + id: 744 + display_name: "Merops pusillus" +} +item { + name: "84109" + id: 745 + display_name: "Pisaurina mira" +} +item { + name: "67726" + id: 746 + display_name: "Aeshna palmata" +} +item { + name: "2191" + id: 747 + display_name: "Merops apiaster" +} +item { + name: "67731" + id: 748 + display_name: "Anax junius" +} +item { + name: "198804" + id: 749 + display_name: "Satyrium titus" +} +item { + name: "51349" + id: 750 + display_name: "Pyrgus communis" +} +item { + name: "18584" + id: 751 + display_name: "Pteroglossus torquatus" +} +item { + name: "67737" + id: 752 + display_name: "Rhionaeschna multicolor" +} +item { + name: "198812" + id: 753 + display_name: "Lethe anthedon" +} +item { + name: "321697" + id: 754 + display_name: "Melanchroia chephise" +} +item { + name: "198821" + id: 755 + display_name: "Pieris oleracea" +} +item { + name: "26790" + id: 756 + display_name: "Ambystoma maculatum" +} +item { + name: "10411" + id: 757 + display_name: "Loxia curvirostra" +} +item { + name: "133295" + id: 758 + display_name: "Melitaea didyma" +} +item { + name: "67760" + id: 759 + display_name: "Popillia japonica" +} +item { + name: "43188" + id: 760 + display_name: "Ochotona princeps" +} +item { + name: "2229" + id: 761 + display_name: "Merops orientalis" +} +item { + name: "10423" + id: 762 + display_name: "Loxia leucoptera" +} +item { + name: "67771" + id: 763 + display_name: "Leptoglossus occidentalis" +} +item { + name: "84162" + id: 764 + display_name: "Chrysochus auratus" +} +item { + name: "26822" + id: 765 + display_name: "Dicamptodon tenebrosus" +} +item { + name: "26823" + id: 766 + display_name: "Dicamptodon ensatus" +} +item { + name: "51402" + id: 767 + display_name: "Megalops atlanticus" +} +item { + name: "67725" + id: 768 + display_name: "Aeshna interrupta" +} +item { + name: "411858" + id: 769 + display_name: "Vanessa gonerilla gonerilla" +} +item { + name: "26835" + id: 770 + display_name: "Drymobius margaritiferus" +} +item { + name: "84185" + id: 771 + display_name: "Megalopyge opercularis" +} +item { + name: "2266" + id: 772 + display_name: "Coracias garrulus" +} +item { + name: "141531" + id: 773 + display_name: "Lethe eurydice" +} +item { + name: "2269" + id: 774 + display_name: "Coracias caudatus" +} +item { + name: "133346" + id: 775 + display_name: "Melittia cucurbitae" +} +item { + name: "2275" + id: 776 + display_name: "Coracias benghalensis" +} +item { + name: "84196" + id: 777 + display_name: "Pontania californica" +} +item { + name: "10470" + id: 778 + display_name: "Xanthocephalus xanthocephalus" +} +item { + name: "10479" + id: 779 + display_name: "Chondestes grammacus" +} +item { + name: "51440" + id: 780 + display_name: "Pituophis catenifer catenifer" +} +item { + name: "54087" + id: 781 + display_name: "Pieris napi" +} +item { + name: "59635" + id: 782 + display_name: "Phragmatopoma californica" +} +item { + name: "10487" + id: 783 + display_name: "Dolichonyx oryzivorus" +} +item { + name: "67835" + id: 784 + display_name: "Danaus chrysippus" +} +item { + name: "59644" + id: 785 + display_name: "Pantherophis alleghaniensis" +} +item { + name: "59646" + id: 786 + display_name: "Pantherophis bairdi" +} +item { + name: "116999" + id: 787 + display_name: "Pandion haliaetus" +} +item { + name: "117002" + id: 788 + display_name: "Phainopepla nitens" +} +item { + name: "16770" + id: 789 + display_name: "Tyrannus couchii" +} +item { + name: "84239" + id: 790 + display_name: "Callophrys gryneus" +} +item { + name: "104553" + id: 791 + display_name: "Leucorrhinia proxima" +} +item { + name: "117016" + id: 792 + display_name: "Phylloscopus collybita" +} +item { + name: "49540" + id: 793 + display_name: "Gasteracantha cancriformis" +} +item { + name: "59675" + id: 794 + display_name: "Pyrrharctia isabella" +} +item { + name: "469277" + id: 795 + display_name: "Neotibicen superbus" +} +item { + name: "236973" + id: 796 + display_name: "Circus cyaneus hudsonius" +} +item { + name: "59683" + id: 797 + display_name: "Porpita porpita" +} +item { + name: "26916" + id: 798 + display_name: "Contia tenuis" +} +item { + name: "51493" + id: 799 + display_name: "Trimerotropis pallidipennis" +} +item { + name: "51495" + id: 800 + display_name: "Anthocharis cardamines" +} +item { + name: "133416" + id: 801 + display_name: "Phoebis philea" +} +item { + name: "8583" + id: 802 + display_name: "Grallina cyanoleuca" +} +item { + name: "395569" + id: 803 + display_name: "Prionoplus reticularis" +} +item { + name: "59698" + id: 804 + display_name: "Velella velella" +} +item { + name: "141626" + id: 805 + display_name: "Lygaeus turcicus" +} +item { + name: "84286" + id: 806 + display_name: "Diapheromera femorata" +} +item { + name: "117059" + id: 807 + display_name: "Plectrophenax nivalis" +} +item { + name: "133447" + id: 808 + display_name: "Crambus agitatellus" +} +item { + name: "133448" + id: 809 + display_name: "Climaciella brunnea" +} +item { + name: "51534" + id: 810 + display_name: "Leptotes cassius" +} +item { + name: "205197" + id: 811 + display_name: "Eutrapela clemataria" +} +item { + name: "51536" + id: 812 + display_name: "Ascia monuste" +} +item { + name: "10585" + id: 813 + display_name: "Calamospiza melanocorys" +} +item { + name: "49552" + id: 814 + display_name: "Scutigera coleoptrata" +} +item { + name: "51555" + id: 815 + display_name: "Sympetrum illotum" +} +item { + name: "51557" + id: 816 + display_name: "Bombylius major" +} +item { + name: "117095" + id: 817 + display_name: "Regulus calendula" +} +item { + name: "117097" + id: 818 + display_name: "Regulus ignicapilla" +} +item { + name: "117099" + id: 819 + display_name: "Regulus regulus" +} +item { + name: "117100" + id: 820 + display_name: "Regulus satrapa" +} +item { + name: "84333" + id: 821 + display_name: "Eudryas grata" +} +item { + name: "215409" + id: 822 + display_name: "Bradybaena similaris" +} +item { + name: "16787" + id: 823 + display_name: "Tyrannus melancholicus" +} +item { + name: "46225" + id: 824 + display_name: "Tamias dorsalis" +} +item { + name: "59774" + id: 825 + display_name: "Pachydiplax longipennis" +} +item { + name: "59776" + id: 826 + display_name: "Perithemis tenera" +} +item { + name: "119014" + id: 827 + display_name: "Argia fumipennis violacea" +} +item { + name: "4326" + id: 828 + display_name: "Pelecanus conspicillatus" +} +item { + name: "18833" + id: 829 + display_name: "Aulacorhynchus prasinus" +} +item { + name: "43411" + id: 830 + display_name: "Ateles geoffroyi" +} +item { + name: "141725" + id: 831 + display_name: "Nezara viridula" +} +item { + name: "51614" + id: 832 + display_name: "Eurema hecabe" +} +item { + name: "125343" + id: 833 + display_name: "Crepidula fornicata" +} +item { + name: "2464" + id: 834 + display_name: "Todiramphus sanctus" +} +item { + name: "43432" + id: 835 + display_name: "Cebus capucinus" +} +item { + name: "43436" + id: 836 + display_name: "Alouatta palliata" +} +item { + name: "43439" + id: 837 + display_name: "Alouatta pigra" +} +item { + name: "9357" + id: 838 + display_name: "Icterus bullockii" +} +item { + name: "84403" + id: 839 + display_name: "Phyllopalpus pulchellus" +} +item { + name: "10676" + id: 840 + display_name: "Spiza americana" +} +item { + name: "16798" + id: 841 + display_name: "Tyrannus dominicensis" +} +item { + name: "141752" + id: 842 + display_name: "Biblis hyperia" +} +item { + name: "4512" + id: 843 + display_name: "Chlidonias niger" +} +item { + name: "43460" + id: 844 + display_name: "Macaca mulatta" +} +item { + name: "51654" + id: 845 + display_name: "Junonia almana" +} +item { + name: "51659" + id: 846 + display_name: "Anthopleura xanthogrammica" +} +item { + name: "84428" + id: 847 + display_name: "Drepana arcuata" +} +item { + name: "10702" + id: 848 + display_name: "Oriturus superciliosus" +} +item { + name: "68047" + id: 849 + display_name: "Psarocolius montezuma" +} +item { + name: "12707" + id: 850 + display_name: "Turdus pilaris" +} +item { + name: "84437" + id: 851 + display_name: "Nicrophorus orbicollis" +} +item { + name: "84438" + id: 852 + display_name: "Platyprepia virginalis" +} +item { + name: "117209" + id: 853 + display_name: "Notiomystis cincta" +} +item { + name: "343393" + id: 854 + display_name: "Hypsopygia olinalis" +} +item { + name: "27101" + id: 855 + display_name: "Eurycea longicauda" +} +item { + name: "117214" + id: 856 + display_name: "Sagittarius serpentarius" +} +item { + name: "18911" + id: 857 + display_name: "Psittacula krameri" +} +item { + name: "117218" + id: 858 + display_name: "Verrucosa arenata" +} +item { + name: "117221" + id: 859 + display_name: "Dasymutilla occidentalis" +} +item { + name: "35303" + id: 860 + display_name: "Ctenosaura similis" +} +item { + name: "18920" + id: 861 + display_name: "Platycercus eximius" +} +item { + name: "10729" + id: 862 + display_name: "Protonotaria citrea" +} +item { + name: "35306" + id: 863 + display_name: "Ctenosaura pectinata" +} +item { + name: "109650" + id: 864 + display_name: "Platycnemis pennipes" +} +item { + name: "27120" + id: 865 + display_name: "Eurycea bislineata" +} +item { + name: "27123" + id: 866 + display_name: "Eurycea lucifuga" +} +item { + name: "51702" + id: 867 + display_name: "Coccinella septempunctata" +} +item { + name: "2552" + id: 868 + display_name: "Megaceryle torquata" +} +item { + name: "133625" + id: 869 + display_name: "Zanclognatha jacchusalis" +} +item { + name: "18943" + id: 870 + display_name: "Nestor meridionalis" +} +item { + name: "84481" + id: 871 + display_name: "Calopteryx maculata" +} +item { + name: "35330" + id: 872 + display_name: "Sauromalus ater" +} +item { + name: "27140" + id: 873 + display_name: "Coluber constrictor priapus" +} +item { + name: "199179" + id: 874 + display_name: "Polistes chinensis" +} +item { + name: "51724" + id: 875 + display_name: "Mopalia lignosa" +} +item { + name: "27149" + id: 876 + display_name: "Coluber constrictor constrictor" +} +item { + name: "35342" + id: 877 + display_name: "Iguana iguana" +} +item { + name: "27153" + id: 878 + display_name: "Coluber constrictor flaviventris" +} +item { + name: "35347" + id: 879 + display_name: "Amblyrhynchus cristatus" +} +item { + name: "125461" + id: 880 + display_name: "Ursus arctos horribilis" +} +item { + name: "84507" + id: 881 + display_name: "Lygus lineolaris" +} +item { + name: "35356" + id: 882 + display_name: "Dipsosaurus dorsalis" +} +item { + name: "51743" + id: 883 + display_name: "Danaus gilippus" +} +item { + name: "18976" + id: 884 + display_name: "Amazona viridigenalis" +} +item { + name: "125475" + id: 885 + display_name: "Plusiodonta compressipalpis" +} +item { + name: "51748" + id: 886 + display_name: "Danaus gilippus thersippus" +} +item { + name: "68137" + id: 887 + display_name: "Chlorocebus pygerythrus" +} +item { + name: "133675" + id: 888 + display_name: "Coenobita clypeatus" +} +item { + name: "215596" + id: 889 + display_name: "Buprestis aurulenta" +} +item { + name: "117293" + id: 890 + display_name: "Oecophylla smaragdina" +} +item { + name: "68142" + id: 891 + display_name: "Prenolepis imparis" +} +item { + name: "27184" + id: 892 + display_name: "Plethodon glutinosus" +} +item { + name: "27186" + id: 893 + display_name: "Plethodon cinereus" +} +item { + name: "18995" + id: 894 + display_name: "Amazona albifrons" +} +item { + name: "51765" + id: 895 + display_name: "Poanes melane" +} +item { + name: "18998" + id: 896 + display_name: "Amazona oratrix" +} +item { + name: "41396" + id: 897 + display_name: "Rhynchonycteris naso" +} +item { + name: "27194" + id: 898 + display_name: "Plethodon vehiculum" +} +item { + name: "51773" + id: 899 + display_name: "Nathalis iole" +} +item { + name: "12908" + id: 900 + display_name: "Saxicola rubetra" +} +item { + name: "68165" + id: 901 + display_name: "Linepithema humile" +} +item { + name: "154721" + id: 902 + display_name: "Brachygastra mellifica" +} +item { + name: "338504" + id: 903 + display_name: "Xanthocnemis zealandica" +} +item { + name: "338505" + id: 904 + display_name: "Melangyna novaezelandiae" +} +item { + name: "27093" + id: 905 + display_name: "Eurycea cirrigera" +} +item { + name: "65975" + id: 906 + display_name: "Lithobates berlandieri" +} +item { + name: "19020" + id: 907 + display_name: "Ara militaris" +} +item { + name: "474210" + id: 908 + display_name: "Spizelloides arborea" +} +item { + name: "205240" + id: 909 + display_name: "Pantographa limata" +} +item { + name: "27226" + id: 910 + display_name: "Plethodon albagula" +} +item { + name: "318545" + id: 911 + display_name: "Coreus marginatus" +} +item { + name: "2662" + id: 912 + display_name: "Ceryle rudis" +} +item { + name: "109161" + id: 913 + display_name: "Perithemis intensa" +} +item { + name: "51824" + id: 914 + display_name: "Calopteryx splendens" +} +item { + name: "27250" + id: 915 + display_name: "Ensatina eschscholtzii" +} +item { + name: "2676" + id: 916 + display_name: "Chloroceryle aenea" +} +item { + name: "2679" + id: 917 + display_name: "Chloroceryle amazona" +} +item { + name: "84602" + id: 918 + display_name: "Zale lunata" +} +item { + name: "133756" + id: 919 + display_name: "Leptoglossus oppositus" +} +item { + name: "35453" + id: 920 + display_name: "Zootoca vivipara" +} +item { + name: "84612" + id: 921 + display_name: "Polyphylla decemlineata" +} +item { + name: "133765" + id: 922 + display_name: "Eumenes fraternus" +} +item { + name: "68230" + id: 923 + display_name: "Brachymesia gravida" +} +item { + name: "49601" + id: 924 + display_name: "Mola mola" +} +item { + name: "68232" + id: 925 + display_name: "Papilio palamedes" +} +item { + name: "68233" + id: 926 + display_name: "Orthemis ferruginea" +} +item { + name: "68239" + id: 927 + display_name: "Parnassius clodius" +} +item { + name: "68240" + id: 928 + display_name: "Chlosyne lacinia" +} +item { + name: "68244" + id: 929 + display_name: "Euptoieta claudia" +} +item { + name: "68249" + id: 930 + display_name: "Dymasia dymas" +} +item { + name: "68251" + id: 931 + display_name: "Limenitis weidemeyerii" +} +item { + name: "133790" + id: 932 + display_name: "Chalybion californicum" +} +item { + name: "84644" + id: 933 + display_name: "Phalangium opilio" +} +item { + name: "68262" + id: 934 + display_name: "Polygonia faunus" +} +item { + name: "133799" + id: 935 + display_name: "Xenox tigrinus" +} +item { + name: "68264" + id: 936 + display_name: "Asterocampa celtis" +} +item { + name: "132892" + id: 937 + display_name: "Anacridium aegyptium" +} +item { + name: "68268" + id: 938 + display_name: "Euptoieta hegesia" +} +item { + name: "68269" + id: 939 + display_name: "Aglais milberti" +} +item { + name: "43694" + id: 940 + display_name: "Loxodonta africana" +} +item { + name: "59165" + id: 941 + display_name: "Apodemia mormo" +} +item { + name: "68274" + id: 942 + display_name: "Phyciodes phaon" +} +item { + name: "68275" + id: 943 + display_name: "Battus polydamas" +} +item { + name: "84662" + id: 944 + display_name: "Celastrina lucia" +} +item { + name: "16842" + id: 945 + display_name: "Myiozetetes similis" +} +item { + name: "133826" + id: 946 + display_name: "Zelus longipes" +} +item { + name: "14912" + id: 947 + display_name: "Toxostoma curvirostre" +} +item { + name: "53708" + id: 948 + display_name: "Pacifastacus leniusculus" +} +item { + name: "117452" + id: 949 + display_name: "Sphinx kalmiae" +} +item { + name: "182997" + id: 950 + display_name: "Megisto rubricata" +} +item { + name: "223965" + id: 951 + display_name: "Lithacodia musta" +} +item { + name: "125663" + id: 952 + display_name: "Kelletia kelletii" +} +item { + name: "125669" + id: 953 + display_name: "Rumina decollata" +} +item { + name: "68328" + id: 954 + display_name: "Oxythyrea funesta" +} +item { + name: "179324" + id: 955 + display_name: "Dactylotum bicolor" +} +item { + name: "68330" + id: 956 + display_name: "Arctia caja" +} +item { + name: "2548" + id: 957 + display_name: "Megaceryle alcyon" +} +item { + name: "207600" + id: 958 + display_name: "Thasus neocalifornicus" +} +item { + name: "207601" + id: 959 + display_name: "Palpita quadristigmalis" +} +item { + name: "51954" + id: 960 + display_name: "Sphecius speciosus" +} +item { + name: "207603" + id: 961 + display_name: "Prolimacodes badia" +} +item { + name: "7294" + id: 962 + display_name: "Eremophila alpestris" +} +item { + name: "19196" + id: 963 + display_name: "Alisterus scapularis" +} +item { + name: "145194" + id: 964 + display_name: "Cinnyris jugularis" +} +item { + name: "27390" + id: 965 + display_name: "Desmognathus ochrophaeus" +} +item { + name: "207615" + id: 966 + display_name: "Polistes apachus" +} +item { + name: "63275" + id: 967 + display_name: "Tremex columba" +} +item { + name: "61910" + id: 968 + display_name: "Orgyia antiqua" +} +item { + name: "199438" + id: 969 + display_name: "Orgyia postica" +} +item { + name: "43794" + id: 970 + display_name: "Castor canadensis" +} +item { + name: "84755" + id: 971 + display_name: "Arion rufus" +} +item { + name: "51996" + id: 972 + display_name: "Daphnis nerii" +} +item { + name: "194075" + id: 973 + display_name: "Drymarchon melanurus erebennus" +} +item { + name: "133923" + id: 974 + display_name: "Mermiria bivittata" +} +item { + name: "84778" + id: 975 + display_name: "Leptinotarsa decemlineata" +} +item { + name: "11051" + id: 976 + display_name: "Xiphorhynchus flavigaster" +} +item { + name: "121992" + id: 977 + display_name: "Cervus elaphus roosevelti" +} +item { + name: "27459" + id: 978 + display_name: "Batrachoseps attenuatus" +} +item { + name: "84806" + id: 979 + display_name: "Acanalonia conica" +} +item { + name: "52043" + id: 980 + display_name: "Spoladea recurvalis" +} +item { + name: "27468" + id: 981 + display_name: "Batrachoseps major" +} +item { + name: "133966" + id: 982 + display_name: "Lomographa vestaliata" +} +item { + name: "27474" + id: 983 + display_name: "Batrachoseps nigriventris" +} +item { + name: "101204" + id: 984 + display_name: "Gambusia holbrooki" +} +item { + name: "52055" + id: 985 + display_name: "Crocothemis servilia" +} +item { + name: "4580" + id: 986 + display_name: "Jacana jacana" +} +item { + name: "346970" + id: 987 + display_name: "Callophrys dumetorum" +} +item { + name: "27486" + id: 988 + display_name: "Pseudotriton ruber" +} +item { + name: "52075" + id: 989 + display_name: "Atalopedes campestris" +} +item { + name: "27500" + id: 990 + display_name: "Gyrinophilus porphyriticus" +} +item { + name: "73203" + id: 991 + display_name: "Phalaropus fulicarius" +} +item { + name: "322417" + id: 992 + display_name: "Limacus flavus" +} +item { + name: "40083" + id: 993 + display_name: "Gopherus berlandieri" +} +item { + name: "68469" + id: 994 + display_name: "Papilio demodocus" +} +item { + name: "2938" + id: 995 + display_name: "Streptopelia turtur" +} +item { + name: "117633" + id: 996 + display_name: "Mopalia muscosa" +} +item { + name: "117641" + id: 997 + display_name: "Nucella lamellosa" +} +item { + name: "322443" + id: 998 + display_name: "Thasus gigas" +} +item { + name: "68492" + id: 999 + display_name: "Hemidactylus mabouia" +} +item { + name: "143853" + id: 1000 + display_name: "Pica hudsonia" +} +item { + name: "144757" + id: 1001 + display_name: "Corvus cornix" +} +item { + name: "117650" + id: 1002 + display_name: "Mytilus edulis" +} +item { + name: "19349" + id: 1003 + display_name: "Myiopsitta monachus" +} +item { + name: "2969" + id: 1004 + display_name: "Streptopelia decaocto" +} +item { + name: "9919" + id: 1005 + display_name: "Piranga ludoviciana" +} +item { + name: "5009" + id: 1006 + display_name: "Ixobrychus exilis" +} +item { + name: "117666" + id: 1007 + display_name: "Pleuroncodes planipes" +} +item { + name: "7603" + id: 1008 + display_name: "Auriparus flaviceps" +} +item { + name: "117674" + id: 1009 + display_name: "Ligia occidentalis" +} +item { + name: "145223" + id: 1010 + display_name: "Geothlypis tolmiei" +} +item { + name: "60341" + id: 1011 + display_name: "Lithobates sphenocephalus" +} +item { + name: "60342" + id: 1012 + display_name: "Thamnophis proximus" +} +item { + name: "52155" + id: 1013 + display_name: "Dermacentor variabilis" +} +item { + name: "60349" + id: 1014 + display_name: "Scincella lateralis" +} +item { + name: "52158" + id: 1015 + display_name: "Schistocerca nitens" +} +item { + name: "117696" + id: 1016 + display_name: "Dendraster excentricus" +} +item { + name: "232391" + id: 1017 + display_name: "Tetracha carolina" +} +item { + name: "3017" + id: 1018 + display_name: "Columba livia" +} +item { + name: "145229" + id: 1019 + display_name: "Setophaga citrina" +} +item { + name: "84950" + id: 1020 + display_name: "Alypia octomaculata" +} +item { + name: "52188" + id: 1021 + display_name: "Rhincodon typus" +} +item { + name: "494559" + id: 1022 + display_name: "Polydrusus formosus" +} +item { + name: "145232" + id: 1023 + display_name: "Setophaga cerulea" +} +item { + name: "3048" + id: 1024 + display_name: "Columba palumbus" +} +item { + name: "9922" + id: 1025 + display_name: "Piranga bidentata" +} +item { + name: "44026" + id: 1026 + display_name: "Erethizon dorsatum" +} +item { + name: "61505" + id: 1027 + display_name: "Manduca sexta" +} +item { + name: "84994" + id: 1028 + display_name: "Acanthocephala declivis" +} +item { + name: "27652" + id: 1029 + display_name: "Hemidactylium scutatum" +} +item { + name: "117767" + id: 1030 + display_name: "Cervus elaphus nannodes" +} +item { + name: "494603" + id: 1031 + display_name: "Hermissenda opalescens" +} +item { + name: "39819" + id: 1032 + display_name: "Terrapene carolina bauri" +} +item { + name: "3093" + id: 1033 + display_name: "Patagioenas leucocephala" +} +item { + name: "205316" + id: 1034 + display_name: "Aidemona azteca" +} +item { + name: "216093" + id: 1035 + display_name: "Caracolus marginella" +} +item { + name: "44062" + id: 1036 + display_name: "Thomomys bottae" +} +item { + name: "85024" + id: 1037 + display_name: "Heraclides cresphontes" +} +item { + name: "3108" + id: 1038 + display_name: "Patagioenas fasciata" +} +item { + name: "213510" + id: 1039 + display_name: "Anageshna primordialis" +} +item { + name: "85030" + id: 1040 + display_name: "Crocothemis erythraea" +} +item { + name: "85034" + id: 1041 + display_name: "Neoscona crucifera" +} +item { + name: "3117" + id: 1042 + display_name: "Patagioenas flavirostris" +} +item { + name: "207924" + id: 1043 + display_name: "Synchlora frondaria" +} +item { + name: "35900" + id: 1044 + display_name: "Lacerta bilineata" +} +item { + name: "24382" + id: 1045 + display_name: "Osteopilus septentrionalis" +} +item { + name: "145249" + id: 1046 + display_name: "Setophaga discolor" +} +item { + name: "52297" + id: 1047 + display_name: "Triakis semifasciata" +} +item { + name: "27726" + id: 1048 + display_name: "Salamandra salamandra" +} +item { + name: "27727" + id: 1049 + display_name: "Bogertophis subocularis" +} +item { + name: "143043" + id: 1050 + display_name: "Cycnia tenera" +} +item { + name: "52313" + id: 1051 + display_name: "Diodon hystrix" +} +item { + name: "143316" + id: 1052 + display_name: "Schinia florida" +} +item { + name: "61968" + id: 1053 + display_name: "Graphosoma lineatum" +} +item { + name: "502885" + id: 1054 + display_name: "Lissachatina fulica" +} +item { + name: "71029" + id: 1055 + display_name: "Crotalus cerastes cerastes" +} +item { + name: "207977" + id: 1056 + display_name: "Aglais io" +} +item { + name: "19577" + id: 1057 + display_name: "Chordeiles minor" +} +item { + name: "93312" + id: 1058 + display_name: "Acropora palmata" +} +item { + name: "52354" + id: 1059 + display_name: "Ambystoma laterale" +} +item { + name: "19587" + id: 1060 + display_name: "Chordeiles acutipennis" +} +item { + name: "58585" + id: 1061 + display_name: "Limenitis arthemis astyanax" +} +item { + name: "134277" + id: 1062 + display_name: "Gastrophryne olivacea" +} +item { + name: "60551" + id: 1063 + display_name: "Papilio glaucus" +} +item { + name: "3731" + id: 1064 + display_name: "Platalea leucorodia" +} +item { + name: "232593" + id: 1065 + display_name: "Thyris sepulchralis" +} +item { + name: "19609" + id: 1066 + display_name: "Phalaenoptilus nuttallii" +} +item { + name: "126106" + id: 1067 + display_name: "Haploa clymene" +} +item { + name: "27805" + id: 1068 + display_name: "Notophthalmus viridescens" +} +item { + name: "199840" + id: 1069 + display_name: "Haemorhous mexicanus" +} +item { + name: "199841" + id: 1070 + display_name: "Haemorhous purpureus" +} +item { + name: "219719" + id: 1071 + display_name: "Eudryas unio" +} +item { + name: "27818" + id: 1072 + display_name: "Taricha torosa" +} +item { + name: "19627" + id: 1073 + display_name: "Nyctidromus albicollis" +} +item { + name: "28750" + id: 1074 + display_name: "Salvadora grahamiae lineata" +} +item { + name: "27824" + id: 1075 + display_name: "Taricha rivularis" +} +item { + name: "146632" + id: 1076 + display_name: "Toxomerus politus" +} +item { + name: "52402" + id: 1077 + display_name: "Cetonia aurata" +} +item { + name: "18291" + id: 1078 + display_name: "Campephilus guatemalensis" +} +item { + name: "60598" + id: 1079 + display_name: "Ixodes scapularis" +} +item { + name: "199870" + id: 1080 + display_name: "Pyralis farinalis" +} +item { + name: "60607" + id: 1081 + display_name: "Limenitis arthemis" +} +item { + name: "205241" + id: 1082 + display_name: "Plagodis phlogosaria" +} +item { + name: "14898" + id: 1083 + display_name: "Toxostoma rufum" +} +item { + name: "126153" + id: 1084 + display_name: "Amphion floridensis" +} +item { + name: "126155" + id: 1085 + display_name: "Vespula germanica" +} +item { + name: "51392" + id: 1086 + display_name: "Morone saxatilis" +} +item { + name: "3280" + id: 1087 + display_name: "Leptotila verreauxi" +} +item { + name: "19670" + id: 1088 + display_name: "Nyctibius jamaicensis" +} +item { + name: "6929" + id: 1089 + display_name: "Anas penelope" +} +item { + name: "97738" + id: 1090 + display_name: "Chromagrion conditum" +} +item { + name: "52449" + id: 1091 + display_name: "Rhinecanthus rectangulus" +} +item { + name: "52451" + id: 1092 + display_name: "Naso lituratus" +} +item { + name: "56529" + id: 1093 + display_name: "Papilio machaon" +} +item { + name: "199913" + id: 1094 + display_name: "Buteo plagiatus" +} +item { + name: "199914" + id: 1095 + display_name: "Selasphorus calliope" +} +item { + name: "85227" + id: 1096 + display_name: "Hemideina crassidens" +} +item { + name: "36076" + id: 1097 + display_name: "Cophosaurus texanus" +} +item { + name: "36077" + id: 1098 + display_name: "Cophosaurus texanus texanus" +} +item { + name: "208112" + id: 1099 + display_name: "Palpita magniferalis" +} +item { + name: "85235" + id: 1100 + display_name: "Deinacrida rugosa" +} +item { + name: "93429" + id: 1101 + display_name: "Aeshna constricta" +} +item { + name: "36086" + id: 1102 + display_name: "Callisaurus draconoides rhodostictus" +} +item { + name: "126204" + id: 1103 + display_name: "Synchlora aerata" +} +item { + name: "93437" + id: 1104 + display_name: "Aeshna mixta" +} +item { + name: "126207" + id: 1105 + display_name: "Schizura unicornis" +} +item { + name: "126209" + id: 1106 + display_name: "Metcalfa pruinosa" +} +item { + name: "126211" + id: 1107 + display_name: "Poecilocapsus lineatus" +} +item { + name: "36100" + id: 1108 + display_name: "Uta stansburiana elegans" +} +item { + name: "48342" + id: 1109 + display_name: "Hemigrapsus nudus" +} +item { + name: "199942" + id: 1110 + display_name: "Strategus aloeus" +} +item { + name: "126215" + id: 1111 + display_name: "Monobia quadridens" +} +item { + name: "101640" + id: 1112 + display_name: "Gomphaeschna furcillata" +} +item { + name: "126217" + id: 1113 + display_name: "Pyrausta orphisalis" +} +item { + name: "36107" + id: 1114 + display_name: "Urosaurus ornatus" +} +item { + name: "51940" + id: 1115 + display_name: "Hemidactylus frenatus" +} +item { + name: "36121" + id: 1116 + display_name: "Urosaurus graciosus" +} +item { + name: "19743" + id: 1117 + display_name: "Megascops kennicottii" +} +item { + name: "68901" + id: 1118 + display_name: "Salticus scenicus" +} +item { + name: "44326" + id: 1119 + display_name: "Microtus californicus" +} +item { + name: "82481" + id: 1120 + display_name: "Pieris marginalis" +} +item { + name: "474332" + id: 1121 + display_name: "Porphyrio poliocephalus" +} +item { + name: "81674" + id: 1122 + display_name: "Rivula propinqualis" +} +item { + name: "126252" + id: 1123 + display_name: "Mastigoproctus giganteus" +} +item { + name: "36142" + id: 1124 + display_name: "Sceloporus undulatus" +} +item { + name: "68911" + id: 1125 + display_name: "Libellula needhami" +} +item { + name: "68912" + id: 1126 + display_name: "Dysdera crocata" +} +item { + name: "42888" + id: 1127 + display_name: "Macropus giganteus" +} +item { + name: "19765" + id: 1128 + display_name: "Megascops asio" +} +item { + name: "68918" + id: 1129 + display_name: "Poecilanthrax lucifer" +} +item { + name: "333705" + id: 1130 + display_name: "Pantherophis obsoletus lindheimeri" +} +item { + name: "126267" + id: 1131 + display_name: "Coleomegilla maculata" +} +item { + name: "101693" + id: 1132 + display_name: "Gomphus vastus" +} +item { + name: "85221" + id: 1133 + display_name: "Hemideina thoracica" +} +item { + name: "126276" + id: 1134 + display_name: "Agrotis ipsilon" +} +item { + name: "85317" + id: 1135 + display_name: "Eurosta solidaginis" +} +item { + name: "36169" + id: 1136 + display_name: "Sceloporus spinosus" +} +item { + name: "60752" + id: 1137 + display_name: "Hermeuptychia sosybius" +} +item { + name: "60754" + id: 1138 + display_name: "Pyromorpha dimidiata" +} +item { + name: "126291" + id: 1139 + display_name: "Prosapia bicincta" +} +item { + name: "52564" + id: 1140 + display_name: "Anthopleura elegantissima" +} +item { + name: "126293" + id: 1141 + display_name: "Prionoxystus robiniae" +} +item { + name: "120719" + id: 1142 + display_name: "Pseudacris hypochondriaca" +} +item { + name: "36189" + id: 1143 + display_name: "Sceloporus poinsettii" +} +item { + name: "52576" + id: 1144 + display_name: "Uroctonus mordax" +} +item { + name: "36198" + id: 1145 + display_name: "Sceloporus orcutti" +} +item { + name: "52584" + id: 1146 + display_name: "Pantala hymenaea" +} +item { + name: "44395" + id: 1147 + display_name: "Peromyscus leucopus" +} +item { + name: "36204" + id: 1148 + display_name: "Sceloporus occidentalis" +} +item { + name: "52589" + id: 1149 + display_name: "Coenonympha pamphilus" +} +item { + name: "3439" + id: 1150 + display_name: "Zenaida auriculata" +} +item { + name: "36208" + id: 1151 + display_name: "Sceloporus occidentalis bocourtii" +} +item { + name: "72936" + id: 1152 + display_name: "Hymenolaimus malacorhynchos" +} +item { + name: "85362" + id: 1153 + display_name: "Sphex ichneumoneus" +} +item { + name: "36217" + id: 1154 + display_name: "Sceloporus merriami" +} +item { + name: "68993" + id: 1155 + display_name: "Liometopum occidentale" +} +item { + name: "199916" + id: 1156 + display_name: "Setophaga caerulescens" +} +item { + name: "52620" + id: 1157 + display_name: "Cicindela oregona" +} +item { + name: "36243" + id: 1158 + display_name: "Sceloporus jarrovii" +} +item { + name: "52628" + id: 1159 + display_name: "Araneus diadematus" +} +item { + name: "180007" + id: 1160 + display_name: "Otospermophilus beecheyi" +} +item { + name: "85408" + id: 1161 + display_name: "Erythemis collocata" +} +item { + name: "36262" + id: 1162 + display_name: "Sceloporus grammicus" +} +item { + name: "60839" + id: 1163 + display_name: "Spilosoma virginica" +} +item { + name: "16968" + id: 1164 + display_name: "Camptostoma imberbe" +} +item { + name: "4715" + id: 1165 + display_name: "Caracara plancus" +} +item { + name: "313246" + id: 1166 + display_name: "Olla v-nigrum" +} +item { + name: "126393" + id: 1167 + display_name: "Stomolophus meleagris" +} +item { + name: "126397" + id: 1168 + display_name: "Halysidota harrisii" +} +item { + name: "64221" + id: 1169 + display_name: "Bipalium kewense" +} +item { + name: "28102" + id: 1170 + display_name: "Virginia striatula" +} +item { + name: "150985" + id: 1171 + display_name: "Planorbella trivolvis" +} +item { + name: "36306" + id: 1172 + display_name: "Phrynosoma modestum" +} +item { + name: "36307" + id: 1173 + display_name: "Phrynosoma orbiculare" +} +item { + name: "199929" + id: 1174 + display_name: "Plagiometriona clavata" +} +item { + name: "3545" + id: 1175 + display_name: "Columbina passerina" +} +item { + name: "36315" + id: 1176 + display_name: "Phrynosoma hernandesi" +} +item { + name: "367556" + id: 1177 + display_name: "Eupsittula nana" +} +item { + name: "371963" + id: 1178 + display_name: "Lampropeltis multifasciata" +} +item { + name: "36339" + id: 1179 + display_name: "Holbrookia propinqua" +} +item { + name: "36094" + id: 1180 + display_name: "Uta stansburiana" +} +item { + name: "36343" + id: 1181 + display_name: "Holbrookia maculata" +} +item { + name: "52766" + id: 1182 + display_name: "Megaphasma denticrus" +} +item { + name: "18941" + id: 1183 + display_name: "Nestor notabilis" +} +item { + name: "3580" + id: 1184 + display_name: "Columbina talpacoti" +} +item { + name: "123690" + id: 1185 + display_name: "Caranx melampygus" +} +item { + name: "52482" + id: 1186 + display_name: "Episyrphus balteatus" +} +item { + name: "28762" + id: 1187 + display_name: "Rhinocheilus lecontei" +} +item { + name: "3607" + id: 1188 + display_name: "Geopelia striata" +} +item { + name: "52484" + id: 1189 + display_name: "Celastrina echo" +} +item { + name: "61293" + id: 1190 + display_name: "Thaumetopoea pityocampa" +} +item { + name: "19998" + id: 1191 + display_name: "Athene noctua" +} +item { + name: "44575" + id: 1192 + display_name: "Rattus rattus" +} +item { + name: "44576" + id: 1193 + display_name: "Rattus norvegicus" +} +item { + name: "133250" + id: 1194 + display_name: "Tettigonia viridissima" +} +item { + name: "52774" + id: 1195 + display_name: "Bombus fervidus" +} +item { + name: "49756" + id: 1196 + display_name: "Nephila clavipes" +} +item { + name: "52779" + id: 1197 + display_name: "Bombus bimaculatus" +} +item { + name: "52782" + id: 1198 + display_name: "Melissodes bimaculata" +} +item { + name: "126513" + id: 1199 + display_name: "Larinioides cornutus" +} +item { + name: "69170" + id: 1200 + display_name: "Hemigrapsus oregonensis" +} +item { + name: "1971" + id: 1201 + display_name: "Crotophaga ani" +} +item { + name: "12942" + id: 1202 + display_name: "Sialia sialis" +} +item { + name: "126532" + id: 1203 + display_name: "Toxomerus geminatus" +} +item { + name: "216649" + id: 1204 + display_name: "Chauliognathus pensylvanicus" +} +item { + name: "3734" + id: 1205 + display_name: "Platalea alba" +} +item { + name: "216651" + id: 1206 + display_name: "Chelinidea vittiger" +} +item { + name: "20044" + id: 1207 + display_name: "Bubo virginianus" +} +item { + name: "11855" + id: 1208 + display_name: "Petrochelidon fulva" +} +item { + name: "28246" + id: 1209 + display_name: "Arizona elegans" +} +item { + name: "224855" + id: 1210 + display_name: "Melipotis indomita" +} +item { + name: "11867" + id: 1211 + display_name: "Progne subis" +} +item { + name: "126562" + id: 1212 + display_name: "Setophaga coronata auduboni" +} +item { + name: "126568" + id: 1213 + display_name: "Manduca rustica" +} +item { + name: "11882" + id: 1214 + display_name: "Hirundo neoxena" +} +item { + name: "11901" + id: 1215 + display_name: "Hirundo rustica" +} +item { + name: "52865" + id: 1216 + display_name: "Tramea lacerata" +} +item { + name: "142978" + id: 1217 + display_name: "Simyra insularis" +} +item { + name: "123499" + id: 1218 + display_name: "Notophthalmus viridescens viridescens" +} +item { + name: "339592" + id: 1219 + display_name: "Calidris virgata" +} +item { + name: "339593" + id: 1220 + display_name: "Calidris pugnax" +} +item { + name: "44311" + id: 1221 + display_name: "Microtus pennsylvanicus" +} +item { + name: "142988" + id: 1222 + display_name: "Lerema accius" +} +item { + name: "142990" + id: 1223 + display_name: "Autographa precationis" +} +item { + name: "142995" + id: 1224 + display_name: "Hymenia perspectalis" +} +item { + name: "129423" + id: 1225 + display_name: "Zelus luridus" +} +item { + name: "3733" + id: 1226 + display_name: "Platalea regia" +} +item { + name: "470678" + id: 1227 + display_name: "Cerithideopsis californica" +} +item { + name: "146713" + id: 1228 + display_name: "Elaphria grata" +} +item { + name: "143002" + id: 1229 + display_name: "Orthonama obstipata" +} +item { + name: "11931" + id: 1230 + display_name: "Tachycineta thalassina" +} +item { + name: "143005" + id: 1231 + display_name: "Costaconvexa centrostrigaria" +} +item { + name: "3743" + id: 1232 + display_name: "Bostrychia hagedash" +} +item { + name: "143009" + id: 1233 + display_name: "Ectropis crepuscularia" +} +item { + name: "36514" + id: 1234 + display_name: "Anolis carolinensis" +} +item { + name: "143012" + id: 1235 + display_name: "Zanclognatha pedipilalis" +} +item { + name: "11941" + id: 1236 + display_name: "Riparia riparia" +} +item { + name: "52902" + id: 1237 + display_name: "Palthis asopialis" +} +item { + name: "3751" + id: 1238 + display_name: "Eudocimus albus" +} +item { + name: "52906" + id: 1239 + display_name: "Chytonix palliatricula" +} +item { + name: "3756" + id: 1240 + display_name: "Plegadis falcinellus" +} +item { + name: "3759" + id: 1241 + display_name: "Plegadis chihi" +} +item { + name: "143024" + id: 1242 + display_name: "Eusarca confusaria" +} +item { + name: "62067" + id: 1243 + display_name: "Orthetrum cancellatum" +} +item { + name: "28340" + id: 1244 + display_name: "Thamnophis sauritus" +} +item { + name: "28345" + id: 1245 + display_name: "Thamnophis cyrtopsis" +} +item { + name: "143034" + id: 1246 + display_name: "Hippodamia variegata" +} +item { + name: "28347" + id: 1247 + display_name: "Thamnophis cyrtopsis ocellatus" +} +item { + name: "52925" + id: 1248 + display_name: "Phyciodes tharos" +} +item { + name: "8010" + id: 1249 + display_name: "Corvus corax" +} +item { + name: "11970" + id: 1250 + display_name: "Stelgidopteryx serripennis" +} +item { + name: "28362" + id: 1251 + display_name: "Thamnophis sirtalis" +} +item { + name: "3788" + id: 1252 + display_name: "Sula dactylatra" +} +item { + name: "44749" + id: 1253 + display_name: "Neotoma fuscipes" +} +item { + name: "52943" + id: 1254 + display_name: "Trichodezia albovittata" +} +item { + name: "3793" + id: 1255 + display_name: "Sula sula" +} +item { + name: "101667" + id: 1256 + display_name: "Gomphus exilis" +} +item { + name: "3797" + id: 1257 + display_name: "Sula leucogaster" +} +item { + name: "118486" + id: 1258 + display_name: "Macaria aemulataria" +} +item { + name: "3801" + id: 1259 + display_name: "Morus serrator" +} +item { + name: "28378" + id: 1260 + display_name: "Thamnophis radix" +} +item { + name: "118492" + id: 1261 + display_name: "Helicoverpa zea" +} +item { + name: "148793" + id: 1262 + display_name: "Asterocampa leilia" +} +item { + name: "28384" + id: 1263 + display_name: "Thamnophis proximus rubrilineatus" +} +item { + name: "257761" + id: 1264 + display_name: "Phocides polybius" +} +item { + name: "28387" + id: 1265 + display_name: "Thamnophis proximus orarius" +} +item { + name: "28390" + id: 1266 + display_name: "Thamnophis marcianus" +} +item { + name: "118503" + id: 1267 + display_name: "Darapsa myron" +} +item { + name: "3817" + id: 1268 + display_name: "Eudyptula minor" +} +item { + name: "36135" + id: 1269 + display_name: "Uma scoparia" +} +item { + name: "28396" + id: 1270 + display_name: "Thamnophis hammondii" +} +item { + name: "28400" + id: 1271 + display_name: "Thamnophis elegans elegans" +} +item { + name: "118513" + id: 1272 + display_name: "Hypena scabra" +} +item { + name: "28403" + id: 1273 + display_name: "Thamnophis elegans vagrans" +} +item { + name: "201342" + id: 1274 + display_name: "Chalcoela iphitalis" +} +item { + name: "3831" + id: 1275 + display_name: "Megadyptes antipodes" +} +item { + name: "126712" + id: 1276 + display_name: "Corydalus cornutus" +} +item { + name: "30676" + id: 1277 + display_name: "Agkistrodon piscivorus leucostoma" +} +item { + name: "3834" + id: 1278 + display_name: "Scopus umbretta" +} +item { + name: "213631" + id: 1279 + display_name: "Anicla infecta" +} +item { + name: "143105" + id: 1280 + display_name: "Pleuroprucha insulsaria" +} +item { + name: "28418" + id: 1281 + display_name: "Thamnophis atratus" +} +item { + name: "118531" + id: 1282 + display_name: "Parallelia bistriaris" +} +item { + name: "145363" + id: 1283 + display_name: "Troglodytes troglodytes" +} +item { + name: "3845" + id: 1284 + display_name: "Calidris canutus" +} +item { + name: "12038" + id: 1285 + display_name: "Lanius collurio" +} +item { + name: "143114" + id: 1286 + display_name: "Phragmatobia fuliginosa" +} +item { + name: "3851" + id: 1287 + display_name: "Calidris bairdii" +} +item { + name: "324226" + id: 1288 + display_name: "Meleagris gallopavo intermedia" +} +item { + name: "143118" + id: 1289 + display_name: "Pseudeustrotia carneola" +} +item { + name: "3855" + id: 1290 + display_name: "Calidris mauri" +} +item { + name: "3856" + id: 1291 + display_name: "Calidris maritima" +} +item { + name: "3857" + id: 1292 + display_name: "Calidris alpina" +} +item { + name: "143124" + id: 1293 + display_name: "Parapediasia teterrella" +} +item { + name: "143125" + id: 1294 + display_name: "Hypena madefactalis" +} +item { + name: "3863" + id: 1295 + display_name: "Calidris ferruginea" +} +item { + name: "118552" + id: 1296 + display_name: "Felis catus" +} +item { + name: "3865" + id: 1297 + display_name: "Calidris melanotos" +} +item { + name: "3869" + id: 1298 + display_name: "Limnodromus griseus" +} +item { + name: "118558" + id: 1299 + display_name: "Manduca quinquemaculata" +} +item { + name: "118559" + id: 1300 + display_name: "Tetraopes tetrophthalmus" +} +item { + name: "12065" + id: 1301 + display_name: "Malurus cyaneus" +} +item { + name: "3878" + id: 1302 + display_name: "Tringa nebularia" +} +item { + name: "101681" + id: 1303 + display_name: "Gomphus militaris" +} +item { + name: "413483" + id: 1304 + display_name: "Todiramphus sanctus vagans" +} +item { + name: "3885" + id: 1305 + display_name: "Tringa ochropus" +} +item { + name: "3888" + id: 1306 + display_name: "Tringa glareola" +} +item { + name: "126770" + id: 1307 + display_name: "Vulpes vulpes fulvus" +} +item { + name: "3892" + id: 1308 + display_name: "Tringa melanoleuca" +} +item { + name: "3893" + id: 1309 + display_name: "Tringa flavipes" +} +item { + name: "126775" + id: 1310 + display_name: "Cervus elaphus nelsoni" +} +item { + name: "3896" + id: 1311 + display_name: "Numenius arquata" +} +item { + name: "126777" + id: 1312 + display_name: "Peucetia viridans" +} +item { + name: "3901" + id: 1313 + display_name: "Numenius phaeopus" +} +item { + name: "32058" + id: 1314 + display_name: "Elgaria multicarinata webbii" +} +item { + name: "413506" + id: 1315 + display_name: "Phalacrocorax carbo novaehollandiae" +} +item { + name: "413508" + id: 1316 + display_name: "Petroica macrocephala macrocephala" +} +item { + name: "413512" + id: 1317 + display_name: "Petroica australis longipes" +} +item { + name: "61258" + id: 1318 + display_name: "Junonia evarete" +} +item { + name: "28493" + id: 1319 + display_name: "Tantilla nigriceps" +} +item { + name: "413522" + id: 1320 + display_name: "Prosthemadera novaeseelandiae novaeseelandiae" +} +item { + name: "58506" + id: 1321 + display_name: "Polites themistocles" +} +item { + name: "28505" + id: 1322 + display_name: "Tantilla gracilis" +} +item { + name: "20315" + id: 1323 + display_name: "Asio flammeus" +} +item { + name: "143196" + id: 1324 + display_name: "Schinia arcigera" +} +item { + name: "413533" + id: 1325 + display_name: "Rhipidura fuliginosa fuliginosa" +} +item { + name: "3936" + id: 1326 + display_name: "Scolopax minor" +} +item { + name: "3938" + id: 1327 + display_name: "Arenaria interpres" +} +item { + name: "3941" + id: 1328 + display_name: "Arenaria melanocephala" +} +item { + name: "413543" + id: 1329 + display_name: "Rhipidura fuliginosa placabilis" +} +item { + name: "3947" + id: 1330 + display_name: "Limosa limosa" +} +item { + name: "3950" + id: 1331 + display_name: "Limosa haemastica" +} +item { + name: "126269" + id: 1332 + display_name: "Austrolestes colensonis" +} +item { + name: "3954" + id: 1333 + display_name: "Limosa fedoa" +} +item { + name: "199998" + id: 1334 + display_name: "Pedicia albivitta" +} +item { + name: "3959" + id: 1335 + display_name: "Phalaropus lobatus" +} +item { + name: "3962" + id: 1336 + display_name: "Bartramia longicauda" +} +item { + name: "199999" + id: 1337 + display_name: "Callopistria mollissima" +} +item { + name: "104426" + id: 1338 + display_name: "Lestes disjunctus" +} +item { + name: "126848" + id: 1339 + display_name: "Delphinia picta" +} +item { + name: "3951" + id: 1340 + display_name: "Limosa lapponica" +} +item { + name: "20356" + id: 1341 + display_name: "Aegolius acadicus" +} +item { + name: "121792" + id: 1342 + display_name: "Polistes carolina" +} +item { + name: "3978" + id: 1343 + display_name: "Actitis hypoleucos" +} +item { + name: "53911" + id: 1344 + display_name: "Cyprinus carpio" +} +item { + name: "135055" + id: 1345 + display_name: "Bufotes balearicus" +} +item { + name: "19121" + id: 1346 + display_name: "Trichoglossus haematodus" +} +item { + name: "28562" + id: 1347 + display_name: "Storeria dekayi" +} +item { + name: "28563" + id: 1348 + display_name: "Storeria dekayi texana" +} +item { + name: "20372" + id: 1349 + display_name: "Surnia ulula" +} +item { + name: "135064" + id: 1350 + display_name: "Bufotes viridis" +} +item { + name: "28570" + id: 1351 + display_name: "Storeria dekayi dekayi" +} +item { + name: "61341" + id: 1352 + display_name: "Narceus americanus" +} +item { + name: "7493" + id: 1353 + display_name: "Polioptila caerulea" +} +item { + name: "29339" + id: 1354 + display_name: "Natrix natrix" +} +item { + name: "9135" + id: 1355 + display_name: "Spizella passerina" +} +item { + name: "126889" + id: 1356 + display_name: "Toxomerus marginatus" +} +item { + name: "143274" + id: 1357 + display_name: "Gluphisia septentrionis" +} +item { + name: "343021" + id: 1358 + display_name: "Anguis fragilis" +} +item { + name: "14591" + id: 1359 + display_name: "Pycnonotus jocosus" +} +item { + name: "10227" + id: 1360 + display_name: "Passerina cyanea" +} +item { + name: "10228" + id: 1361 + display_name: "Passerina versicolor" +} +item { + name: "61371" + id: 1362 + display_name: "Panulirus interruptus" +} +item { + name: "143294" + id: 1363 + display_name: "Colias croceus" +} +item { + name: "135104" + id: 1364 + display_name: "Ichthyosaura alpestris" +} +item { + name: "83958" + id: 1365 + display_name: "Phryganidia californica" +} +item { + name: "143302" + id: 1366 + display_name: "Megapallifera mutabilis" +} +item { + name: "12231" + id: 1367 + display_name: "Manorina melanocephala" +} +item { + name: "200661" + id: 1368 + display_name: "Coluber constrictor mormon" +} +item { + name: "3681" + id: 1369 + display_name: "Ocyphaps lophotes" +} +item { + name: "4773" + id: 1370 + display_name: "Jabiru mycteria" +} +item { + name: "135140" + id: 1371 + display_name: "Taricha sierrae" +} +item { + name: "28649" + id: 1372 + display_name: "Sonora semiannulata" +} +item { + name: "53226" + id: 1373 + display_name: "Boisea rubrolineata" +} +item { + name: "53227" + id: 1374 + display_name: "Boisea trivittata" +} +item { + name: "14593" + id: 1375 + display_name: "Pycnonotus cafer" +} +item { + name: "61428" + id: 1376 + display_name: "Arion subfuscus" +} +item { + name: "333822" + id: 1377 + display_name: "Anser cygnoides domesticus" +} +item { + name: "41641" + id: 1378 + display_name: "Ursus arctos" +} +item { + name: "56602" + id: 1379 + display_name: "Plebejus lupini" +} +item { + name: "55295" + id: 1380 + display_name: "Grapsus grapsus" +} +item { + name: "36181" + id: 1381 + display_name: "Sceloporus cyanogenys" +} +item { + name: "41708" + id: 1382 + display_name: "Phoca vitulina" +} +item { + name: "118788" + id: 1383 + display_name: "Desmia funeralis" +} +item { + name: "61445" + id: 1384 + display_name: "Acanthocephala terminalis" +} +item { + name: "30721" + id: 1385 + display_name: "Crotalus triseriatus" +} +item { + name: "180010" + id: 1386 + display_name: "Callospermophilus lateralis" +} +item { + name: "53875" + id: 1387 + display_name: "Ocypode quadrata" +} +item { + name: "18358" + id: 1388 + display_name: "Picus viridis" +} +item { + name: "143390" + id: 1389 + display_name: "Oxidus gracilis" +} +item { + name: "55785" + id: 1390 + display_name: "Ochlodes agricola" +} +item { + name: "4141" + id: 1391 + display_name: "Phoebastria nigripes" +} +item { + name: "20526" + id: 1392 + display_name: "Struthio camelus" +} +item { + name: "32093" + id: 1393 + display_name: "Boa constrictor" +} +item { + name: "4144" + id: 1394 + display_name: "Phoebastria immutabilis" +} +item { + name: "74442" + id: 1395 + display_name: "Hydrochoerus hydrochaeris" +} +item { + name: "61492" + id: 1396 + display_name: "Chrysopilus thoracicus" +} +item { + name: "61495" + id: 1397 + display_name: "Erythemis simplicicollis" +} +item { + name: "389177" + id: 1398 + display_name: "Eriophora pustulosa" +} +item { + name: "61503" + id: 1399 + display_name: "Ascalapha odorata" +} +item { + name: "118855" + id: 1400 + display_name: "Calosoma scrutator" +} +item { + name: "61513" + id: 1401 + display_name: "Adelges tsugae" +} +item { + name: "28749" + id: 1402 + display_name: "Salvadora grahamiae" +} +item { + name: "143440" + id: 1403 + display_name: "Ceratomia catalpae" +} +item { + name: "61523" + id: 1404 + display_name: "Helix pomatia" +} +item { + name: "4180" + id: 1405 + display_name: "Fulmarus glacialis" +} +item { + name: "143445" + id: 1406 + display_name: "Pachysphinx modesta" +} +item { + name: "233560" + id: 1407 + display_name: "Vespula squamosa" +} +item { + name: "126308" + id: 1408 + display_name: "Marpesia chiron" +} +item { + name: "61536" + id: 1409 + display_name: "Calopteryx virgo" +} +item { + name: "685" + id: 1410 + display_name: "Francolinus pondicerianus" +} +item { + name: "60774" + id: 1411 + display_name: "Psychomorpha epimenis" +} +item { + name: "135271" + id: 1412 + display_name: "Amphibolips confluenta" +} +item { + name: "69736" + id: 1413 + display_name: "Schistocerca americana" +} +item { + name: "69737" + id: 1414 + display_name: "Xylophanes tersa" +} +item { + name: "6141" + id: 1415 + display_name: "Cynanthus latirostris" +} +item { + name: "4205" + id: 1416 + display_name: "Podiceps nigricollis" +} +item { + name: "69743" + id: 1417 + display_name: "Wallengrenia otho" +} +item { + name: "4208" + id: 1418 + display_name: "Podiceps cristatus" +} +item { + name: "4209" + id: 1419 + display_name: "Podiceps auritus" +} +item { + name: "118901" + id: 1420 + display_name: "Hyles gallii" +} +item { + name: "17871" + id: 1421 + display_name: "Dendrocopos major" +} +item { + name: "143484" + id: 1422 + display_name: "Blepharomastix ranalis" +} +item { + name: "4224" + id: 1423 + display_name: "Podiceps grisegena" +} +item { + name: "200834" + id: 1424 + display_name: "Sphenodon punctatus" +} +item { + name: "179995" + id: 1425 + display_name: "Urocitellus beldingi" +} +item { + name: "322024" + id: 1426 + display_name: "Apatura ilia" +} +item { + name: "44396" + id: 1427 + display_name: "Peromyscus maniculatus" +} +item { + name: "4237" + id: 1428 + display_name: "Tachybaptus ruficollis" +} +item { + name: "118930" + id: 1429 + display_name: "Spodoptera ornithogalli" +} +item { + name: "118936" + id: 1430 + display_name: "Euplagia quadripunctaria" +} +item { + name: "4804" + id: 1431 + display_name: "Charadrius montanus" +} +item { + name: "127133" + id: 1432 + display_name: "Hyphantria cunea" +} +item { + name: "143518" + id: 1433 + display_name: "Prochoerodes lineola" +} +item { + name: "52592" + id: 1434 + display_name: "Pararge aegeria" +} +item { + name: "36149" + id: 1435 + display_name: "Sceloporus torquatus" +} +item { + name: "118951" + id: 1436 + display_name: "Pterophylla camellifolia" +} +item { + name: "4265" + id: 1437 + display_name: "Phalacrocorax auritus" +} +item { + name: "4270" + id: 1438 + display_name: "Phalacrocorax carbo" +} +item { + name: "446640" + id: 1439 + display_name: "Neomonachus schauinslandi" +} +item { + name: "118961" + id: 1440 + display_name: "Conocephalus brevipennis" +} +item { + name: "28850" + id: 1441 + display_name: "Regina septemvittata" +} +item { + name: "4277" + id: 1442 + display_name: "Phalacrocorax penicillatus" +} +item { + name: "4234" + id: 1443 + display_name: "Aechmophorus clarkii" +} +item { + name: "118967" + id: 1444 + display_name: "Psyllobora vigintimaculata" +} +item { + name: "118968" + id: 1445 + display_name: "Allograpta obliqua" +} +item { + name: "118970" + id: 1446 + display_name: "Bombus impatiens" +} +item { + name: "123594" + id: 1447 + display_name: "Anaxyrus americanus americanus" +} +item { + name: "69838" + id: 1448 + display_name: "Cyanea capillata" +} +item { + name: "69844" + id: 1449 + display_name: "Anthocharis midea" +} +item { + name: "48505" + id: 1450 + display_name: "Junonia coenia" +} +item { + name: "151769" + id: 1451 + display_name: "Diaphania hyalinata" +} +item { + name: "151770" + id: 1452 + display_name: "Peridea angulosa" +} +item { + name: "53467" + id: 1453 + display_name: "Leucauge venusta" +} +item { + name: "119013" + id: 1454 + display_name: "Ctenucha virginica" +} +item { + name: "4327" + id: 1455 + display_name: "Pelecanus onocrotalus" +} +item { + name: "143592" + id: 1456 + display_name: "Spragueia leo" +} +item { + name: "200938" + id: 1457 + display_name: "Diaethria anna" +} +item { + name: "4334" + id: 1458 + display_name: "Pelecanus erythrorhynchos" +} +item { + name: "151794" + id: 1459 + display_name: "Atta texana" +} +item { + name: "3454" + id: 1460 + display_name: "Zenaida macroura" +} +item { + name: "4872" + id: 1461 + display_name: "Vanellus miles" +} +item { + name: "4345" + id: 1462 + display_name: "Larus occidentalis" +} +item { + name: "143610" + id: 1463 + display_name: "Besma quercivoraria" +} +item { + name: "20733" + id: 1464 + display_name: "Trogon massena" +} +item { + name: "143615" + id: 1465 + display_name: "Udea rubigalis" +} +item { + name: "4352" + id: 1466 + display_name: "Larus thayeri" +} +item { + name: "4353" + id: 1467 + display_name: "Larus heermanni" +} +item { + name: "4354" + id: 1468 + display_name: "Larus livens" +} +item { + name: "4356" + id: 1469 + display_name: "Larus canus" +} +item { + name: "220826" + id: 1470 + display_name: "Habrosyne scripta" +} +item { + name: "4361" + id: 1471 + display_name: "Larus glaucoides" +} +item { + name: "4364" + id: 1472 + display_name: "Larus delawarensis" +} +item { + name: "102672" + id: 1473 + display_name: "Hetaerina titia" +} +item { + name: "20754" + id: 1474 + display_name: "Trogon collaris" +} +item { + name: "479512" + id: 1475 + display_name: "Acronicta fallax" +} +item { + name: "3460" + id: 1476 + display_name: "Zenaida asiatica" +} +item { + name: "119066" + id: 1477 + display_name: "Idia lubricalis" +} +item { + name: "119068" + id: 1478 + display_name: "Apodemia virgulti" +} +item { + name: "4381" + id: 1479 + display_name: "Larus fuscus" +} +item { + name: "4385" + id: 1480 + display_name: "Larus californicus" +} +item { + name: "69922" + id: 1481 + display_name: "Oncorhynchus nerka" +} +item { + name: "12580" + id: 1482 + display_name: "Prosthemadera novaeseelandiae" +} +item { + name: "69925" + id: 1483 + display_name: "Clinocardium nuttallii" +} +item { + name: "20781" + id: 1484 + display_name: "Trogon elegans" +} +item { + name: "4399" + id: 1485 + display_name: "Larus glaucescens" +} +item { + name: "94513" + id: 1486 + display_name: "Archilestes grandis" +} +item { + name: "119090" + id: 1487 + display_name: "Eremnophila aureonotata" +} +item { + name: "20787" + id: 1488 + display_name: "Trogon citreolus" +} +item { + name: "69940" + id: 1489 + display_name: "Hemiargus ceraunus" +} +item { + name: "61749" + id: 1490 + display_name: "Lucanus cervus" +} +item { + name: "4415" + id: 1491 + display_name: "Cepphus columba" +} +item { + name: "4832" + id: 1492 + display_name: "Himantopus leucocephalus" +} +item { + name: "4418" + id: 1493 + display_name: "Cepphus grylle" +} +item { + name: "12612" + id: 1494 + display_name: "Anthornis melanura" +} +item { + name: "125627" + id: 1495 + display_name: "Ellychnia corrusca" +} +item { + name: "201031" + id: 1496 + display_name: "Leptoptilos crumenifer" +} +item { + name: "201032" + id: 1497 + display_name: "Threskiornis moluccus" +} +item { + name: "60812" + id: 1498 + display_name: "Lucanus capreolus" +} +item { + name: "10295" + id: 1499 + display_name: "Thraupis episcopus" +} +item { + name: "209233" + id: 1500 + display_name: "Equus caballus" +} +item { + name: "119122" + id: 1501 + display_name: "Araneus trifolium" +} +item { + name: "201043" + id: 1502 + display_name: "Geranoaetus albicaudatus" +} +item { + name: "61781" + id: 1503 + display_name: "Ochlodes sylvanus" +} +item { + name: "49133" + id: 1504 + display_name: "Vanessa atalanta" +} +item { + name: "94556" + id: 1505 + display_name: "Argia lugens" +} +item { + name: "94557" + id: 1506 + display_name: "Argia moesta" +} +item { + name: "61524" + id: 1507 + display_name: "Forficula auricularia" +} +item { + name: "4449" + id: 1508 + display_name: "Sterna paradisaea" +} +item { + name: "4450" + id: 1509 + display_name: "Sterna hirundo" +} +item { + name: "348515" + id: 1510 + display_name: "Nyctemera annulata" +} +item { + name: "110625" + id: 1511 + display_name: "Progomphus obscurus" +} +item { + name: "94566" + id: 1512 + display_name: "Argia plana" +} +item { + name: "4457" + id: 1513 + display_name: "Sterna forsteri" +} +item { + name: "94571" + id: 1514 + display_name: "Argia sedula" +} +item { + name: "61804" + id: 1515 + display_name: "Olivella biplicata" +} +item { + name: "204532" + id: 1516 + display_name: "Lanius excubitor" +} +item { + name: "29038" + id: 1517 + display_name: "Pituophis deppei" +} +item { + name: "143728" + id: 1518 + display_name: "Choristoneura rosaceana" +} +item { + name: "94577" + id: 1519 + display_name: "Argia translata" +} +item { + name: "130451" + id: 1520 + display_name: "Dione juno" +} +item { + name: "29044" + id: 1521 + display_name: "Pituophis catenifer" +} +item { + name: "70005" + id: 1522 + display_name: "Ilyanassa obsoleta" +} +item { + name: "143734" + id: 1523 + display_name: "Eupithecia miserulata" +} +item { + name: "20856" + id: 1524 + display_name: "Pharomachrus mocinno" +} +item { + name: "29049" + id: 1525 + display_name: "Pituophis catenifer deserticola" +} +item { + name: "29052" + id: 1526 + display_name: "Pituophis catenifer affinis" +} +item { + name: "29053" + id: 1527 + display_name: "Pituophis catenifer annectens" +} +item { + name: "4478" + id: 1528 + display_name: "Sterna striata" +} +item { + name: "407459" + id: 1529 + display_name: "Dolomedes minor" +} +item { + name: "4489" + id: 1530 + display_name: "Stercorarius parasiticus" +} +item { + name: "4491" + id: 1531 + display_name: "Stercorarius pomarinus" +} +item { + name: "6969" + id: 1532 + display_name: "Anas gracilis" +} +item { + name: "4494" + id: 1533 + display_name: "Rissa tridactyla" +} +item { + name: "4496" + id: 1534 + display_name: "Rynchops niger" +} +item { + name: "4501" + id: 1535 + display_name: "Alca torda" +} +item { + name: "4504" + id: 1536 + display_name: "Fratercula arctica" +} +item { + name: "4509" + id: 1537 + display_name: "Fratercula cirrhata" +} +item { + name: "26693" + id: 1538 + display_name: "Scaphiopus hurterii" +} +item { + name: "94624" + id: 1539 + display_name: "Arigomphus submedianus" +} +item { + name: "94625" + id: 1540 + display_name: "Arigomphus villosipes" +} +item { + name: "120720" + id: 1541 + display_name: "Pseudacris sierra" +} +item { + name: "70057" + id: 1542 + display_name: "Agrilus planipennis" +} +item { + name: "127402" + id: 1543 + display_name: "Grammia virgo" +} +item { + name: "51271" + id: 1544 + display_name: "Trachemys scripta elegans" +} +item { + name: "12716" + id: 1545 + display_name: "Turdus merula" +} +item { + name: "12718" + id: 1546 + display_name: "Turdus plumbeus" +} +item { + name: "12720" + id: 1547 + display_name: "Turdus grayi" +} +item { + name: "63697" + id: 1548 + display_name: "Metacarcinus magister" +} +item { + name: "12727" + id: 1549 + display_name: "Turdus migratorius" +} +item { + name: "26698" + id: 1550 + display_name: "Spea multiplicata" +} +item { + name: "12735" + id: 1551 + display_name: "Turdus viscivorus" +} +item { + name: "26699" + id: 1552 + display_name: "Spea bombifrons" +} +item { + name: "127431" + id: 1553 + display_name: "Emmelina monodactyla" +} +item { + name: "4553" + id: 1554 + display_name: "Cerorhinca monocerata" +} +item { + name: "12748" + id: 1555 + display_name: "Turdus philomelos" +} +item { + name: "233933" + id: 1556 + display_name: "Zale horrida" +} +item { + name: "1468" + id: 1557 + display_name: "Galbula ruficauda" +} +item { + name: "111055" + id: 1558 + display_name: "Pseudoleon superbus" +} +item { + name: "61908" + id: 1559 + display_name: "Orgyia vetusta" +} +item { + name: "43086" + id: 1560 + display_name: "Procavia capensis" +} +item { + name: "143830" + id: 1561 + display_name: "Eumorpha vitis" +} +item { + name: "67663" + id: 1562 + display_name: "Leptysma marginicollis" +} +item { + name: "127457" + id: 1563 + display_name: "Idia americalis" +} +item { + name: "4578" + id: 1564 + display_name: "Jacana spinosa" +} +item { + name: "127460" + id: 1565 + display_name: "Idia aemula" +} +item { + name: "201192" + id: 1566 + display_name: "Saxicola rubicola" +} +item { + name: "20969" + id: 1567 + display_name: "Upupa epops" +} +item { + name: "94699" + id: 1568 + display_name: "Aspidoscelis marmorata" +} +item { + name: "10322" + id: 1569 + display_name: "Euphagus carolinus" +} +item { + name: "53743" + id: 1570 + display_name: "Uca pugilator" +} +item { + name: "61256" + id: 1571 + display_name: "Leptoglossus phyllopus" +} +item { + name: "29438" + id: 1572 + display_name: "Coluber flagellum piceus" +} +item { + name: "53750" + id: 1573 + display_name: "Lottia gigantea" +} +item { + name: "143865" + id: 1574 + display_name: "Odocoileus hemionus hemionus" +} +item { + name: "143867" + id: 1575 + display_name: "Protoboarmia porcelaria" +} +item { + name: "209405" + id: 1576 + display_name: "Cenopis reticulatana" +} +item { + name: "49920" + id: 1577 + display_name: "Nymphalis californica" +} +item { + name: "53762" + id: 1578 + display_name: "Scolopendra polymorpha" +} +item { + name: "127492" + id: 1579 + display_name: "Megalographa biloba" +} +item { + name: "62470" + id: 1580 + display_name: "Limax maximus" +} +item { + name: "4621" + id: 1581 + display_name: "Gavia pacifica" +} +item { + name: "14884" + id: 1582 + display_name: "Mimus gilvus" +} +item { + name: "29200" + id: 1583 + display_name: "Opheodrys aestivus" +} +item { + name: "201233" + id: 1584 + display_name: "Passer italiae" +} +item { + name: "4626" + id: 1585 + display_name: "Gavia immer" +} +item { + name: "4627" + id: 1586 + display_name: "Gavia stellata" +} +item { + name: "12822" + id: 1587 + display_name: "Oenanthe oenanthe" +} +item { + name: "4631" + id: 1588 + display_name: "Fregata magnificens" +} +item { + name: "4636" + id: 1589 + display_name: "Fregata minor" +} +item { + name: "70174" + id: 1590 + display_name: "Hypolimnas bolina" +} +item { + name: "4643" + id: 1591 + display_name: "Falco subbuteo" +} +item { + name: "4644" + id: 1592 + display_name: "Falco mexicanus" +} +item { + name: "4645" + id: 1593 + display_name: "Falco femoralis" +} +item { + name: "4647" + id: 1594 + display_name: "Falco peregrinus" +} +item { + name: "119340" + id: 1595 + display_name: "Amphipyra pyramidoides" +} +item { + name: "61997" + id: 1596 + display_name: "Steatoda grossa" +} +item { + name: "70191" + id: 1597 + display_name: "Ischnura ramburii" +} +item { + name: "53809" + id: 1598 + display_name: "Phidippus audax" +} +item { + name: "143213" + id: 1599 + display_name: "Frontinella communis" +} +item { + name: "4664" + id: 1600 + display_name: "Falco rufigularis" +} +item { + name: "4665" + id: 1601 + display_name: "Falco sparverius" +} +item { + name: "19893" + id: 1602 + display_name: "Strix varia" +} +item { + name: "4672" + id: 1603 + display_name: "Falco columbarius" +} +item { + name: "201281" + id: 1604 + display_name: "Phyllodesma americana" +} +item { + name: "201282" + id: 1605 + display_name: "Gallinula chloropus" +} +item { + name: "152131" + id: 1606 + display_name: "Bagrada hilaris" +} +item { + name: "145276" + id: 1607 + display_name: "Cardellina pusilla" +} +item { + name: "12878" + id: 1608 + display_name: "Catharus ustulatus" +} +item { + name: "4690" + id: 1609 + display_name: "Falco novaeseelandiae" +} +item { + name: "53843" + id: 1610 + display_name: "Brephidium exilis" +} +item { + name: "36281" + id: 1611 + display_name: "Sceloporus clarkii" +} +item { + name: "12890" + id: 1612 + display_name: "Catharus guttatus" +} +item { + name: "62045" + id: 1613 + display_name: "Lygaeus kalmii" +} +item { + name: "47075" + id: 1614 + display_name: "Dasypus novemcinctus" +} +item { + name: "12901" + id: 1615 + display_name: "Catharus fuscescens" +} +item { + name: "4714" + id: 1616 + display_name: "Caracara cheriway" +} +item { + name: "53867" + id: 1617 + display_name: "Erythemis plebeja" +} +item { + name: "62060" + id: 1618 + display_name: "Palomena prasina" +} +item { + name: "53869" + id: 1619 + display_name: "Ocypus olens" +} +item { + name: "4719" + id: 1620 + display_name: "Herpetotheres cachinnans" +} +item { + name: "116840" + id: 1621 + display_name: "Calcarius lapponicus" +} +item { + name: "4726" + id: 1622 + display_name: "Milvago chimachima" +} +item { + name: "29304" + id: 1623 + display_name: "Nerodia taxispilota" +} +item { + name: "29305" + id: 1624 + display_name: "Nerodia sipedon" +} +item { + name: "29306" + id: 1625 + display_name: "Nerodia sipedon sipedon" +} +item { + name: "142783" + id: 1626 + display_name: "Myodocha serripes" +} +item { + name: "4733" + id: 1627 + display_name: "Ciconia ciconia" +} +item { + name: "29310" + id: 1628 + display_name: "Nerodia rhombifer" +} +item { + name: "201343" + id: 1629 + display_name: "Lithacodes fasciola" +} +item { + name: "21121" + id: 1630 + display_name: "Dendrobates auratus" +} +item { + name: "127618" + id: 1631 + display_name: "Epirrhoe alternata" +} +item { + name: "43115" + id: 1632 + display_name: "Sylvilagus audubonii" +} +item { + name: "29317" + id: 1633 + display_name: "Nerodia fasciata" +} +item { + name: "4742" + id: 1634 + display_name: "Mycteria americana" +} +item { + name: "53895" + id: 1635 + display_name: "Stenopelmatus fuscus" +} +item { + name: "4744" + id: 1636 + display_name: "Mycteria ibis" +} +item { + name: "12937" + id: 1637 + display_name: "Sialia mexicana" +} +item { + name: "29322" + id: 1638 + display_name: "Nerodia fasciata confluens" +} +item { + name: "29324" + id: 1639 + display_name: "Nerodia clarkii clarkii" +} +item { + name: "29327" + id: 1640 + display_name: "Nerodia cyclopion" +} +item { + name: "29328" + id: 1641 + display_name: "Nerodia erythrogaster" +} +item { + name: "53905" + id: 1642 + display_name: "Mantis religiosa" +} +item { + name: "4754" + id: 1643 + display_name: "Ephippiorhynchus senegalensis" +} +item { + name: "127635" + id: 1644 + display_name: "Plecia nearctica" +} +item { + name: "4756" + id: 1645 + display_name: "Cathartes aura" +} +item { + name: "29334" + id: 1646 + display_name: "Nerodia erythrogaster flavigaster" +} +item { + name: "12951" + id: 1647 + display_name: "Myadestes townsendi" +} +item { + name: "4761" + id: 1648 + display_name: "Cathartes burrovianus" +} +item { + name: "4763" + id: 1649 + display_name: "Sarcoramphus papa" +} +item { + name: "4765" + id: 1650 + display_name: "Coragyps atratus" +} +item { + name: "19890" + id: 1651 + display_name: "Strix nebulosa" +} +item { + name: "26736" + id: 1652 + display_name: "Ambystoma opacum" +} +item { + name: "66331" + id: 1653 + display_name: "Pelophylax perezi" +} +item { + name: "4776" + id: 1654 + display_name: "Anastomus lamelligerus" +} +item { + name: "4892" + id: 1655 + display_name: "Pluvialis squatarola" +} +item { + name: "4778" + id: 1656 + display_name: "Gymnogyps californianus" +} +item { + name: "12971" + id: 1657 + display_name: "Muscicapa striata" +} +item { + name: "56776" + id: 1658 + display_name: "Glaucopsyche lygdamus" +} +item { + name: "127669" + id: 1659 + display_name: "Jadera haematoloma" +} +item { + name: "4793" + id: 1660 + display_name: "Charadrius vociferus" +} +item { + name: "209594" + id: 1661 + display_name: "Scantius aegyptius" +} +item { + name: "4795" + id: 1662 + display_name: "Charadrius wilsonia" +} +item { + name: "48586" + id: 1663 + display_name: "Cepaea nemoralis" +} +item { + name: "4798" + id: 1664 + display_name: "Charadrius melodus" +} +item { + name: "12992" + id: 1665 + display_name: "Phoenicurus phoenicurus" +} +item { + name: "45763" + id: 1666 + display_name: "Ondatra zibethicus" +} +item { + name: "119492" + id: 1667 + display_name: "Smerinthus cerisyi" +} +item { + name: "13000" + id: 1668 + display_name: "Phoenicurus ochruros" +} +item { + name: "4811" + id: 1669 + display_name: "Charadrius dubius" +} +item { + name: "64973" + id: 1670 + display_name: "Anaxyrus cognatus" +} +item { + name: "2168" + id: 1671 + display_name: "Eumomota superciliosa" +} +item { + name: "6980" + id: 1672 + display_name: "Anas querquedula" +} +item { + name: "64975" + id: 1673 + display_name: "Anaxyrus debilis" +} +item { + name: "43130" + id: 1674 + display_name: "Lepus californicus" +} +item { + name: "67707" + id: 1675 + display_name: "Argiope aurantia" +} +item { + name: "4836" + id: 1676 + display_name: "Himantopus mexicanus" +} +item { + name: "4838" + id: 1677 + display_name: "Haematopus bachmani" +} +item { + name: "43132" + id: 1678 + display_name: "Lepus americanus" +} +item { + name: "144106" + id: 1679 + display_name: "Pica pica" +} +item { + name: "4843" + id: 1680 + display_name: "Haematopus ostralegus" +} +item { + name: "67709" + id: 1681 + display_name: "Antrodiaetus riversi" +} +item { + name: "4848" + id: 1682 + display_name: "Haematopus unicolor" +} +item { + name: "4857" + id: 1683 + display_name: "Vanellus vanellus" +} +item { + name: "29435" + id: 1684 + display_name: "Coluber flagellum testaceus" +} +item { + name: "119550" + id: 1685 + display_name: "Feltia jaculifera" +} +item { + name: "4866" + id: 1686 + display_name: "Vanellus spinosus" +} +item { + name: "4870" + id: 1687 + display_name: "Vanellus armatus" +} +item { + name: "54024" + id: 1688 + display_name: "Satyrium californica" +} +item { + name: "13071" + id: 1689 + display_name: "Luscinia svecica" +} +item { + name: "3544" + id: 1690 + display_name: "Columbina inca" +} +item { + name: "4883" + id: 1691 + display_name: "Recurvirostra avosetta" +} +item { + name: "204701" + id: 1692 + display_name: "Melanchra adjuncta" +} +item { + name: "56083" + id: 1693 + display_name: "Armadillidium vulgare" +} +item { + name: "981" + id: 1694 + display_name: "Phasianus colchicus" +} +item { + name: "4893" + id: 1695 + display_name: "Pluvialis dominica" +} +item { + name: "103200" + id: 1696 + display_name: "Hypsiglena jani" +} +item { + name: "127777" + id: 1697 + display_name: "Vespula vulgaris" +} +item { + name: "7643" + id: 1698 + display_name: "Cinclus mexicanus" +} +item { + name: "13094" + id: 1699 + display_name: "Erithacus rubecula" +} +item { + name: "41777" + id: 1700 + display_name: "Lontra canadensis" +} +item { + name: "64988" + id: 1701 + display_name: "Anaxyrus terrestris" +} +item { + name: "18167" + id: 1702 + display_name: "Melanerpes aurifrons" +} +item { + name: "54064" + id: 1703 + display_name: "Polygonia comma" +} +item { + name: "209713" + id: 1704 + display_name: "Phigalia titea" +} +item { + name: "54068" + id: 1705 + display_name: "Boloria selene" +} +item { + name: "104585" + id: 1706 + display_name: "Libellula semifasciata" +} +item { + name: "119608" + id: 1707 + display_name: "Theba pisana" +} +item { + name: "4801" + id: 1708 + display_name: "Charadrius hiaticula" +} +item { + name: "104586" + id: 1709 + display_name: "Libellula vibrans" +} +item { + name: "4935" + id: 1710 + display_name: "Egretta gularis" +} +item { + name: "4937" + id: 1711 + display_name: "Egretta caerulea" +} +item { + name: "4938" + id: 1712 + display_name: "Egretta tricolor" +} +item { + name: "4940" + id: 1713 + display_name: "Egretta thula" +} +item { + name: "340813" + id: 1714 + display_name: "Hyalymenus tarsatus" +} +item { + name: "4943" + id: 1715 + display_name: "Egretta garzetta" +} +item { + name: "4947" + id: 1716 + display_name: "Egretta sacra" +} +item { + name: "13141" + id: 1717 + display_name: "Monticola solitarius" +} +item { + name: "4952" + id: 1718 + display_name: "Ardea cocoi" +} +item { + name: "4954" + id: 1719 + display_name: "Ardea cinerea" +} +item { + name: "67727" + id: 1720 + display_name: "Aeshna umbrosa" +} +item { + name: "4956" + id: 1721 + display_name: "Ardea herodias" +} +item { + name: "144223" + id: 1722 + display_name: "Chlosyne theona" +} +item { + name: "201568" + id: 1723 + display_name: "Diabrotica undecimpunctata undecimpunctata" +} +item { + name: "47383" + id: 1724 + display_name: "Latrodectus geometricus" +} +item { + name: "119664" + id: 1725 + display_name: "Cacyreus marshalli" +} +item { + name: "62321" + id: 1726 + display_name: "Rutpela maculata" +} +item { + name: "217970" + id: 1727 + display_name: "Cyclophora pendulinaria" +} +item { + name: "4981" + id: 1728 + display_name: "Nycticorax nycticorax" +} +item { + name: "12714" + id: 1729 + display_name: "Turdus rufopalliatus" +} +item { + name: "4994" + id: 1730 + display_name: "Ardeola ralloides" +} +item { + name: "4999" + id: 1731 + display_name: "Nyctanassa violacea" +} +item { + name: "37769" + id: 1732 + display_name: "Plestiodon skiltonianus" +} +item { + name: "213826" + id: 1733 + display_name: "Apamea amputatrix" +} +item { + name: "67736" + id: 1734 + display_name: "Rhionaeschna californica" +} +item { + name: "155380" + id: 1735 + display_name: "Andricus crystallinus" +} +item { + name: "144280" + id: 1736 + display_name: "Aramides cajaneus" +} +item { + name: "5017" + id: 1737 + display_name: "Bubulcus ibis" +} +item { + name: "5020" + id: 1738 + display_name: "Butorides virescens" +} +item { + name: "144285" + id: 1739 + display_name: "Porphyrio martinicus" +} +item { + name: "81729" + id: 1740 + display_name: "Feniseca tarquinius" +} +item { + name: "127905" + id: 1741 + display_name: "Bombus ternarius" +} +item { + name: "5034" + id: 1742 + display_name: "Botaurus lentiginosus" +} +item { + name: "29330" + id: 1743 + display_name: "Nerodia erythrogaster transversa" +} +item { + name: "5036" + id: 1744 + display_name: "Cochlearius cochlearius" +} +item { + name: "46001" + id: 1745 + display_name: "Sciurus vulgaris" +} +item { + name: "46005" + id: 1746 + display_name: "Sciurus variegatoides" +} +item { + name: "127928" + id: 1747 + display_name: "Autochton cellus" +} +item { + name: "340923" + id: 1748 + display_name: "Scolypopa australis" +} +item { + name: "46017" + id: 1749 + display_name: "Sciurus carolinensis" +} +item { + name: "46018" + id: 1750 + display_name: "Sciurus aberti" +} +item { + name: "447427" + id: 1751 + display_name: "Neverita lewisii" +} +item { + name: "46020" + id: 1752 + display_name: "Sciurus niger" +} +item { + name: "5061" + id: 1753 + display_name: "Anhinga novaehollandiae" +} +item { + name: "46023" + id: 1754 + display_name: "Sciurus griseus" +} +item { + name: "122375" + id: 1755 + display_name: "Carterocephalus palaemon" +} +item { + name: "5066" + id: 1756 + display_name: "Anhinga rufa" +} +item { + name: "145289" + id: 1757 + display_name: "Melozone fusca" +} +item { + name: "5074" + id: 1758 + display_name: "Aquila chrysaetos" +} +item { + name: "49998" + id: 1759 + display_name: "Thamnophis sirtalis infernalis" +} +item { + name: "13270" + id: 1760 + display_name: "Hylocichla mustelina" +} +item { + name: "62423" + id: 1761 + display_name: "Cimbex americana" +} +item { + name: "62424" + id: 1762 + display_name: "Sitochroa palealis" +} +item { + name: "111578" + id: 1763 + display_name: "Regina grahamii" +} +item { + name: "144207" + id: 1764 + display_name: "Aphelocoma wollweberi" +} +item { + name: "62429" + id: 1765 + display_name: "Pyronia tithonus" +} +item { + name: "47934" + id: 1766 + display_name: "Libellula luctuosa" +} +item { + name: "50000" + id: 1767 + display_name: "Clemmys guttata" +} +item { + name: "5097" + id: 1768 + display_name: "Accipiter striatus" +} +item { + name: "119789" + id: 1769 + display_name: "Cisseps fulvicollis" +} +item { + name: "5106" + id: 1770 + display_name: "Accipiter nisus" +} +item { + name: "5108" + id: 1771 + display_name: "Accipiter gentilis" +} +item { + name: "62456" + id: 1772 + display_name: "Rhagonycha fulva" +} +item { + name: "4948" + id: 1773 + display_name: "Egretta rufescens" +} +item { + name: "46082" + id: 1774 + display_name: "Marmota marmota" +} +item { + name: "6990" + id: 1775 + display_name: "Bucephala clangula" +} +item { + name: "4535" + id: 1776 + display_name: "Anous stolidus" +} +item { + name: "46087" + id: 1777 + display_name: "Marmota caligata" +} +item { + name: "72458" + id: 1778 + display_name: "Actitis macularius" +} +item { + name: "4951" + id: 1779 + display_name: "Ardea purpurea" +} +item { + name: "128012" + id: 1780 + display_name: "Eumorpha fasciatus" +} +item { + name: "472078" + id: 1781 + display_name: "Todiramphus chloris" +} +item { + name: "46095" + id: 1782 + display_name: "Marmota monax" +} +item { + name: "34" + id: 1783 + display_name: "Grus americana" +} +item { + name: "4835" + id: 1784 + display_name: "Himantopus himantopus" +} +item { + name: "122374" + id: 1785 + display_name: "Eurema mexicana" +} +item { + name: "19812" + id: 1786 + display_name: "Glaucidium gnoma" +} +item { + name: "73823" + id: 1787 + display_name: "Hierophis viridiflavus" +} +item { + name: "5168" + id: 1788 + display_name: "Circus approximans" +} +item { + name: "143110" + id: 1789 + display_name: "Hypagyrtis unipunctata" +} +item { + name: "65976" + id: 1790 + display_name: "Lithobates blairi" +} +item { + name: "5173" + id: 1791 + display_name: "Circus aeruginosus" +} +item { + name: "54327" + id: 1792 + display_name: "Vespa crabro" +} +item { + name: "4273" + id: 1793 + display_name: "Phalacrocorax sulcirostris" +} +item { + name: "5180" + id: 1794 + display_name: "Buteo albonotatus" +} +item { + name: "103485" + id: 1795 + display_name: "Ischnura denticollis" +} +item { + name: "62528" + id: 1796 + display_name: "Butorides striata" +} +item { + name: "62529" + id: 1797 + display_name: "Platalea ajaja" +} +item { + name: "5186" + id: 1798 + display_name: "Buteo brachyurus" +} +item { + name: "103494" + id: 1799 + display_name: "Ischnura hastata" +} +item { + name: "144455" + id: 1800 + display_name: "Ardea alba" +} +item { + name: "103497" + id: 1801 + display_name: "Ischnura perparva" +} +item { + name: "103498" + id: 1802 + display_name: "Ischnura posita" +} +item { + name: "5196" + id: 1803 + display_name: "Buteo swainsoni" +} +item { + name: "128079" + id: 1804 + display_name: "Grammia ornata" +} +item { + name: "29777" + id: 1805 + display_name: "Lampropeltis triangulum" +} +item { + name: "867" + id: 1806 + display_name: "Alectoris rufa" +} +item { + name: "5206" + id: 1807 + display_name: "Buteo lineatus" +} +item { + name: "29783" + id: 1808 + display_name: "Lampropeltis triangulum triangulum" +} +item { + name: "122383" + id: 1809 + display_name: "Plebejus melissa" +} +item { + name: "5212" + id: 1810 + display_name: "Buteo jamaicensis" +} +item { + name: "81495" + id: 1811 + display_name: "Libellula pulchella" +} +item { + name: "35003" + id: 1812 + display_name: "Heloderma suspectum" +} +item { + name: "46180" + id: 1813 + display_name: "Cynomys gunnisoni" +} +item { + name: "144485" + id: 1814 + display_name: "Charadrius nivosus" +} +item { + name: "144490" + id: 1815 + display_name: "Tringa incana" +} +item { + name: "144491" + id: 1816 + display_name: "Tringa semipalmata" +} +item { + name: "25185" + id: 1817 + display_name: "Hypopachus variolosus" +} +item { + name: "5231" + id: 1818 + display_name: "Terathopius ecaudatus" +} +item { + name: "144496" + id: 1819 + display_name: "Gallinago delicata" +} +item { + name: "5233" + id: 1820 + display_name: "Buteogallus anthracinus" +} +item { + name: "211035" + id: 1821 + display_name: "Speranza pustularia" +} +item { + name: "29813" + id: 1822 + display_name: "Lampropeltis getula" +} +item { + name: "144502" + id: 1823 + display_name: "Chroicocephalus philadelphia" +} +item { + name: "5242" + id: 1824 + display_name: "Circaetus gallicus" +} +item { + name: "144507" + id: 1825 + display_name: "Chroicocephalus novaehollandiae" +} +item { + name: "144510" + id: 1826 + display_name: "Chroicocephalus ridibundus" +} +item { + name: "52757" + id: 1827 + display_name: "Polistes fuscatus" +} +item { + name: "144514" + id: 1828 + display_name: "Leucophaeus atricilla" +} +item { + name: "144515" + id: 1829 + display_name: "Leucophaeus pipixcan" +} +item { + name: "46217" + id: 1830 + display_name: "Tamias striatus" +} +item { + name: "144525" + id: 1831 + display_name: "Onychoprion fuscatus" +} +item { + name: "46222" + id: 1832 + display_name: "Tamias minimus" +} +item { + name: "144530" + id: 1833 + display_name: "Sternula antillarum" +} +item { + name: "46230" + id: 1834 + display_name: "Tamias merriami" +} +item { + name: "144537" + id: 1835 + display_name: "Hydroprogne caspia" +} +item { + name: "144539" + id: 1836 + display_name: "Thalasseus maximus" +} +item { + name: "144540" + id: 1837 + display_name: "Thalasseus bergii" +} +item { + name: "5277" + id: 1838 + display_name: "Elanus leucurus" +} +item { + name: "324766" + id: 1839 + display_name: "Epicallima argenticinctella" +} +item { + name: "72486" + id: 1840 + display_name: "Alopochen aegyptiaca" +} +item { + name: "62229" + id: 1841 + display_name: "Ischnura cervula" +} +item { + name: "144550" + id: 1842 + display_name: "Streptopelia senegalensis" +} +item { + name: "46256" + id: 1843 + display_name: "Ammospermophilus harrisii" +} +item { + name: "94559" + id: 1844 + display_name: "Argia nahuana" +} +item { + name: "46259" + id: 1845 + display_name: "Tamiasciurus douglasii" +} +item { + name: "46260" + id: 1846 + display_name: "Tamiasciurus hudsonicus" +} +item { + name: "119989" + id: 1847 + display_name: "Stagmomantis carolina" +} +item { + name: "13494" + id: 1848 + display_name: "Gerygone igata" +} +item { + name: "5305" + id: 1849 + display_name: "Haliaeetus leucocephalus" +} +item { + name: "7596" + id: 1850 + display_name: "Cistothorus platensis" +} +item { + name: "5308" + id: 1851 + display_name: "Haliaeetus vocifer" +} +item { + name: "218301" + id: 1852 + display_name: "Diacme elealis" +} +item { + name: "95422" + id: 1853 + display_name: "Basiaeschna janata" +} +item { + name: "46272" + id: 1854 + display_name: "Glaucomys volans" +} +item { + name: "120010" + id: 1855 + display_name: "Polistes metricus" +} +item { + name: "144594" + id: 1856 + display_name: "Bubo scandiacus" +} +item { + name: "52771" + id: 1857 + display_name: "Gonepteryx rhamni" +} +item { + name: "144597" + id: 1858 + display_name: "Ciccaba virgata" +} +item { + name: "890" + id: 1859 + display_name: "Bonasa umbellus" +} +item { + name: "52773" + id: 1860 + display_name: "Poanes zabulon" +} +item { + name: "120033" + id: 1861 + display_name: "Lapara bombycoides" +} +item { + name: "5346" + id: 1862 + display_name: "Busarellus nigricollis" +} +item { + name: "5349" + id: 1863 + display_name: "Rostrhamus sociabilis" +} +item { + name: "36391" + id: 1864 + display_name: "Anolis equestris" +} +item { + name: "46316" + id: 1865 + display_name: "Trichechus manatus" +} +item { + name: "5267" + id: 1866 + display_name: "Milvus milvus" +} +item { + name: "128241" + id: 1867 + display_name: "Darapsa choerilus" +} +item { + name: "128242" + id: 1868 + display_name: "Palthis angulalis" +} +item { + name: "5366" + id: 1869 + display_name: "Gyps fulvus" +} +item { + name: "204512" + id: 1870 + display_name: "Ficedula hypoleuca" +} +item { + name: "54526" + id: 1871 + display_name: "Crassadoma gigantea" +} +item { + name: "144642" + id: 1872 + display_name: "Momotus coeruliceps" +} +item { + name: "120070" + id: 1873 + display_name: "Strongylocentrotus droebachiensis" +} +item { + name: "54538" + id: 1874 + display_name: "Syngnathus leptorhynchus" +} +item { + name: "81746" + id: 1875 + display_name: "Necrophila americana" +} +item { + name: "300301" + id: 1876 + display_name: "Pseudomyrmex gracilis" +} +item { + name: "202003" + id: 1877 + display_name: "Apiomerus spissipes" +} +item { + name: "41860" + id: 1878 + display_name: "Enhydra lutris" +} +item { + name: "4817" + id: 1879 + display_name: "Charadrius semipalmatus" +} +item { + name: "36145" + id: 1880 + display_name: "Sceloporus variabilis" +} +item { + name: "202012" + id: 1881 + display_name: "Steatoda capensis" +} +item { + name: "62749" + id: 1882 + display_name: "Iphiclides podalirius" +} +item { + name: "5406" + id: 1883 + display_name: "Haliastur indus" +} +item { + name: "62751" + id: 1884 + display_name: "Andricus kingi" +} +item { + name: "5363" + id: 1885 + display_name: "Gyps africanus" +} +item { + name: "5416" + id: 1886 + display_name: "Ictinia mississippiensis" +} +item { + name: "62766" + id: 1887 + display_name: "Issoria lathonia" +} +item { + name: "62768" + id: 1888 + display_name: "Scolia dubia" +} +item { + name: "126206" + id: 1889 + display_name: "Dissosteira carolina" +} +item { + name: "269875" + id: 1890 + display_name: "Mallodon dasystomus" +} +item { + name: "155030" + id: 1891 + display_name: "Limenitis reducta" +} +item { + name: "62345" + id: 1892 + display_name: "Duttaphrynus melanostictus" +} +item { + name: "52519" + id: 1893 + display_name: "Aeshna cyanea" +} +item { + name: "10001" + id: 1894 + display_name: "Dives dives" +} +item { + name: "460365" + id: 1895 + display_name: "Tegula funebralis" +} +item { + name: "13631" + id: 1896 + display_name: "Baeolophus atricristatus" +} +item { + name: "13632" + id: 1897 + display_name: "Baeolophus bicolor" +} +item { + name: "13633" + id: 1898 + display_name: "Baeolophus inornatus" +} +item { + name: "9100" + id: 1899 + display_name: "Melospiza melodia" +} +item { + name: "62796" + id: 1900 + display_name: "Crotaphytus bicinctores" +} +item { + name: "62797" + id: 1901 + display_name: "Gambelia wislizenii" +} +item { + name: "46009" + id: 1902 + display_name: "Sciurus aureogaster" +} +item { + name: "112867" + id: 1903 + display_name: "Sparisoma viride" +} +item { + name: "70997" + id: 1904 + display_name: "Pelecinus polyturator" +} +item { + name: "62806" + id: 1905 + display_name: "Mytilus californianus" +} +item { + name: "120156" + id: 1906 + display_name: "Musca domestica" +} +item { + name: "136548" + id: 1907 + display_name: "Euclea delphinii" +} +item { + name: "50065" + id: 1908 + display_name: "Danaus eresimus" +} +item { + name: "43239" + id: 1909 + display_name: "Tachyglossus aculeatus" +} +item { + name: "145303" + id: 1910 + display_name: "Spinus spinus" +} +item { + name: "120183" + id: 1911 + display_name: "Araneus marmoreus" +} +item { + name: "71032" + id: 1912 + display_name: "Crotalus scutulatus scutulatus" +} +item { + name: "71034" + id: 1913 + display_name: "Tenodera sinensis" +} +item { + name: "143121" + id: 1914 + display_name: "Ochropleura implecta" +} +item { + name: "13695" + id: 1915 + display_name: "Motacilla alba" +} +item { + name: "7458" + id: 1916 + display_name: "Certhia americana" +} +item { + name: "38293" + id: 1917 + display_name: "Lampropholis delicata" +} +item { + name: "144281" + id: 1918 + display_name: "Bucorvus leadbeateri" +} +item { + name: "120217" + id: 1919 + display_name: "Halysidota tessellaris" +} +item { + name: "226718" + id: 1920 + display_name: "Otiorhynchus sulcatus" +} +item { + name: "464287" + id: 1921 + display_name: "Anteaeolidiella oliviae" +} +item { + name: "226720" + id: 1922 + display_name: "Oxychilus draparnaudi" +} +item { + name: "13729" + id: 1923 + display_name: "Anthus pratensis" +} +item { + name: "13732" + id: 1924 + display_name: "Anthus rubescens" +} +item { + name: "11930" + id: 1925 + display_name: "Tachycineta albilinea" +} +item { + name: "71085" + id: 1926 + display_name: "Varanus niloticus" +} +item { + name: "144814" + id: 1927 + display_name: "Poecile carolinensis" +} +item { + name: "144815" + id: 1928 + display_name: "Poecile atricapillus" +} +item { + name: "144816" + id: 1929 + display_name: "Poecile gambeli" +} +item { + name: "144820" + id: 1930 + display_name: "Poecile rufescens" +} +item { + name: "144823" + id: 1931 + display_name: "Periparus ater" +} +item { + name: "10485" + id: 1932 + display_name: "Chlorophanes spiza" +} +item { + name: "40523" + id: 1933 + display_name: "Lasiurus cinereus" +} +item { + name: "47719" + id: 1934 + display_name: "Datana ministra" +} +item { + name: "13770" + id: 1935 + display_name: "Estrilda astrild" +} +item { + name: "144849" + id: 1936 + display_name: "Cyanistes caeruleus" +} +item { + name: "218587" + id: 1937 + display_name: "Discus rotundatus" +} +item { + name: "47105" + id: 1938 + display_name: "Tamandua mexicana" +} +item { + name: "18463" + id: 1939 + display_name: "Sphyrapicus varius" +} +item { + name: "11858" + id: 1940 + display_name: "Petrochelidon pyrrhonota" +} +item { + name: "144882" + id: 1941 + display_name: "Troglodytes pacificus" +} +item { + name: "144883" + id: 1942 + display_name: "Troglodytes hiemalis" +} +item { + name: "153076" + id: 1943 + display_name: "Nephelodes minians" +} +item { + name: "62978" + id: 1944 + display_name: "Chlosyne nycteis" +} +item { + name: "128517" + id: 1945 + display_name: "Catocala ilia" +} +item { + name: "153102" + id: 1946 + display_name: "Dysphania militaris" +} +item { + name: "59651" + id: 1947 + display_name: "Aquarius remigis" +} +item { + name: "13851" + id: 1948 + display_name: "Passer montanus" +} +item { + name: "13858" + id: 1949 + display_name: "Passer domesticus" +} +item { + name: "39742" + id: 1950 + display_name: "Kinosternon flavescens" +} +item { + name: "506118" + id: 1951 + display_name: "Aphelocoma californica" +} +item { + name: "5672" + id: 1952 + display_name: "Amazilia yucatanensis" +} +item { + name: "5676" + id: 1953 + display_name: "Amazilia tzacatl" +} +item { + name: "204503" + id: 1954 + display_name: "Dicrurus adsimilis" +} +item { + name: "52785" + id: 1955 + display_name: "Megachile sculpturalis" +} +item { + name: "126905" + id: 1956 + display_name: "Harrisina americana" +} +item { + name: "55773" + id: 1957 + display_name: "Promachus hinei" +} +item { + name: "84752" + id: 1958 + display_name: "Microcentrum rhombifolium" +} +item { + name: "5698" + id: 1959 + display_name: "Amazilia violiceps" +} +item { + name: "145539" + id: 1960 + display_name: "Ovis canadensis nelsoni" +} +item { + name: "104004" + id: 1961 + display_name: "Lampropeltis splendida" +} +item { + name: "13893" + id: 1962 + display_name: "Lonchura punctulata" +} +item { + name: "63048" + id: 1963 + display_name: "Nuttallina californica" +} +item { + name: "226901" + id: 1964 + display_name: "Panopoda rufimargo" +} +item { + name: "194134" + id: 1965 + display_name: "Anthanassa tulcis" +} +item { + name: "5049" + id: 1966 + display_name: "Tigrisoma mexicanum" +} +item { + name: "407130" + id: 1967 + display_name: "Porphyrio melanotus melanotus" +} +item { + name: "226910" + id: 1968 + display_name: "Panthea furcilla" +} +item { + name: "130661" + id: 1969 + display_name: "Catasticta nimbice" +} +item { + name: "120215" + id: 1970 + display_name: "Bombus griseocollis" +} +item { + name: "144220" + id: 1971 + display_name: "Melanitta americana" +} +item { + name: "9148" + id: 1972 + display_name: "Spizella pallida" +} +item { + name: "320610" + id: 1973 + display_name: "Sceloporus magister" +} +item { + name: "54900" + id: 1974 + display_name: "Papilio polyxenes asterius" +} +item { + name: "36080" + id: 1975 + display_name: "Callisaurus draconoides" +} +item { + name: "5758" + id: 1976 + display_name: "Amazilia rutila" +} +item { + name: "3465" + id: 1977 + display_name: "Zenaida aurita" +} +item { + name: "116461" + id: 1978 + display_name: "Anolis sagrei" +} +item { + name: "61295" + id: 1979 + display_name: "Aporia crataegi" +} +item { + name: "131673" + id: 1980 + display_name: "Tetracis cachexiata" +} +item { + name: "63113" + id: 1981 + display_name: "Blarina brevicauda" +} +item { + name: "26904" + id: 1982 + display_name: "Coronella austriaca" +} +item { + name: "94575" + id: 1983 + display_name: "Argia tibialis" +} +item { + name: "237166" + id: 1984 + display_name: "Lycaena phlaeas hypophlaeas" +} +item { + name: "129305" + id: 1985 + display_name: "Melanoplus bivittatus" +} +item { + name: "63128" + id: 1986 + display_name: "Speyeria atlantis" +} +item { + name: "113514" + id: 1987 + display_name: "Sympetrum internum" +} +item { + name: "48757" + id: 1988 + display_name: "Echinothrix calamaris" +} +item { + name: "128670" + id: 1989 + display_name: "Bombus vagans" +} +item { + name: "13988" + id: 1990 + display_name: "Prunella modularis" +} +item { + name: "54951" + id: 1991 + display_name: "Anartia fatima" +} +item { + name: "54952" + id: 1992 + display_name: "Cardisoma guanhumi" +} +item { + name: "325295" + id: 1993 + display_name: "Cydalima perspectalis" +} +item { + name: "63160" + id: 1994 + display_name: "Celithemis elisa" +} +item { + name: "210615" + id: 1995 + display_name: "Pyrausta volupialis" +} +item { + name: "472766" + id: 1996 + display_name: "Falco tinnunculus" +} +item { + name: "29927" + id: 1997 + display_name: "Heterodon nasicus" +} +item { + name: "145088" + id: 1998 + display_name: "Ixoreus naevius" +} +item { + name: "6432" + id: 1999 + display_name: "Archilochus colubris" +} +item { + name: "5827" + id: 2000 + display_name: "Lampornis clemenciae" +} +item { + name: "15990" + id: 2001 + display_name: "Myiarchus tuberculifer" +} +item { + name: "128712" + id: 2002 + display_name: "Coccinella californica" +} +item { + name: "67559" + id: 2003 + display_name: "Adelpha eulalia" +} +item { + name: "128719" + id: 2004 + display_name: "Echinometra mathaei" +} +item { + name: "10247" + id: 2005 + display_name: "Setophaga ruticilla" +} +item { + name: "202451" + id: 2006 + display_name: "Copaeodes minima" +} +item { + name: "95958" + id: 2007 + display_name: "Boyeria vinosa" +} +item { + name: "16016" + id: 2008 + display_name: "Myiarchus tyrannulus" +} +item { + name: "36202" + id: 2009 + display_name: "Sceloporus olivaceus" +} +item { + name: "95982" + id: 2010 + display_name: "Brachymesia furcata" +} +item { + name: "126589" + id: 2011 + display_name: "Calycopis isobeon" +} +item { + name: "120578" + id: 2012 + display_name: "Micrathena sagittata" +} +item { + name: "194690" + id: 2013 + display_name: "Pogonomyrmex barbatus" +} +item { + name: "120583" + id: 2014 + display_name: "Parasteatoda tepidariorum" +} +item { + name: "202505" + id: 2015 + display_name: "Zosterops lateralis" +} +item { + name: "38671" + id: 2016 + display_name: "Aspidoscelis tigris" +} +item { + name: "38672" + id: 2017 + display_name: "Aspidoscelis tigris stejnegeri" +} +item { + name: "9176" + id: 2018 + display_name: "Zonotrichia leucophrys" +} +item { + name: "120596" + id: 2019 + display_name: "Aphonopelma hentzi" +} +item { + name: "9744" + id: 2020 + display_name: "Agelaius phoeniceus" +} +item { + name: "38684" + id: 2021 + display_name: "Aspidoscelis tigris mundus" +} +item { + name: "62426" + id: 2022 + display_name: "Aphantopus hyperantus" +} +item { + name: "30494" + id: 2023 + display_name: "Micrurus tener" +} +item { + name: "58578" + id: 2024 + display_name: "Euphydryas phaeton" +} +item { + name: "96036" + id: 2025 + display_name: "Brechmorhoga mendax" +} +item { + name: "333608" + id: 2026 + display_name: "Leukoma staminea" +} +item { + name: "38703" + id: 2027 + display_name: "Aspidoscelis sexlineata sexlineata" +} +item { + name: "126600" + id: 2028 + display_name: "Chortophaga viridifasciata" +} +item { + name: "63287" + id: 2029 + display_name: "Megalorchestia californiana" +} +item { + name: "128824" + id: 2030 + display_name: "Lucilia sericata" +} +item { + name: "104249" + id: 2031 + display_name: "Lepisosteus oculatus" +} +item { + name: "203153" + id: 2032 + display_name: "Parus major" +} +item { + name: "9183" + id: 2033 + display_name: "Zonotrichia capensis" +} +item { + name: "82201" + id: 2034 + display_name: "Hypena baltimoralis" +} +item { + name: "145217" + id: 2035 + display_name: "Oreothlypis peregrina" +} +item { + name: "145218" + id: 2036 + display_name: "Oreothlypis celata" +} +item { + name: "145221" + id: 2037 + display_name: "Oreothlypis ruficapilla" +} +item { + name: "145224" + id: 2038 + display_name: "Geothlypis philadelphia" +} +item { + name: "145225" + id: 2039 + display_name: "Geothlypis formosa" +} +item { + name: "448331" + id: 2040 + display_name: "Ambigolimax valentianus" +} +item { + name: "128845" + id: 2041 + display_name: "Copestylum mexicanum" +} +item { + name: "145231" + id: 2042 + display_name: "Setophaga tigrina" +} +item { + name: "145233" + id: 2043 + display_name: "Setophaga americana" +} +item { + name: "145235" + id: 2044 + display_name: "Setophaga magnolia" +} +item { + name: "145236" + id: 2045 + display_name: "Setophaga castanea" +} +item { + name: "145237" + id: 2046 + display_name: "Setophaga fusca" +} +item { + name: "145238" + id: 2047 + display_name: "Setophaga petechia" +} +item { + name: "145240" + id: 2048 + display_name: "Setophaga striata" +} +item { + name: "145242" + id: 2049 + display_name: "Setophaga palmarum" +} +item { + name: "179855" + id: 2050 + display_name: "Polites vibex" +} +item { + name: "145244" + id: 2051 + display_name: "Setophaga pinus" +} +item { + name: "145245" + id: 2052 + display_name: "Setophaga coronata" +} +item { + name: "145246" + id: 2053 + display_name: "Setophaga dominica" +} +item { + name: "5987" + id: 2054 + display_name: "Campylopterus hemileucurus" +} +item { + name: "17382" + id: 2055 + display_name: "Vireo cassinii" +} +item { + name: "145254" + id: 2056 + display_name: "Setophaga nigrescens" +} +item { + name: "145255" + id: 2057 + display_name: "Setophaga townsendi" +} +item { + name: "145256" + id: 2058 + display_name: "Setophaga occidentalis" +} +item { + name: "145257" + id: 2059 + display_name: "Setophaga chrysoparia" +} +item { + name: "145258" + id: 2060 + display_name: "Setophaga virens" +} +item { + name: "48786" + id: 2061 + display_name: "Pollicipes polymerus" +} +item { + name: "36207" + id: 2062 + display_name: "Sceloporus occidentalis longipes" +} +item { + name: "22392" + id: 2063 + display_name: "Eleutherodactylus marnockii" +} +item { + name: "22393" + id: 2064 + display_name: "Eleutherodactylus cystignathoides" +} +item { + name: "145275" + id: 2065 + display_name: "Cardellina canadensis" +} +item { + name: "145277" + id: 2066 + display_name: "Cardellina rubra" +} +item { + name: "7829" + id: 2067 + display_name: "Aphelocoma coerulescens" +} +item { + name: "41963" + id: 2068 + display_name: "Panthera pardus" +} +item { + name: "142998" + id: 2069 + display_name: "Pyrausta acrionalis" +} +item { + name: "18204" + id: 2070 + display_name: "Melanerpes erythrocephalus" +} +item { + name: "47425" + id: 2071 + display_name: "Tonicella lineata" +} +item { + name: "148460" + id: 2072 + display_name: "Charadra deridens" +} +item { + name: "145291" + id: 2073 + display_name: "Emberiza calandra" +} +item { + name: "52523" + id: 2074 + display_name: "Carcinus maenas" +} +item { + name: "46994" + id: 2075 + display_name: "Scapanus latimanus" +} +item { + name: "114314" + id: 2076 + display_name: "Tramea onusta" +} +item { + name: "145300" + id: 2077 + display_name: "Acanthis flammea" +} +item { + name: "63382" + id: 2078 + display_name: "Dermasterias imbricata" +} +item { + name: "126772" + id: 2079 + display_name: "Ursus americanus californiensis" +} +item { + name: "145304" + id: 2080 + display_name: "Spinus pinus" +} +item { + name: "10294" + id: 2081 + display_name: "Thraupis abbas" +} +item { + name: "145308" + id: 2082 + display_name: "Spinus psaltria" +} +item { + name: "145309" + id: 2083 + display_name: "Spinus lawrencei" +} +item { + name: "145310" + id: 2084 + display_name: "Spinus tristis" +} +item { + name: "3739" + id: 2085 + display_name: "Threskiornis aethiopicus" +} +item { + name: "47014" + id: 2086 + display_name: "Scalopus aquaticus" +} +item { + name: "4566" + id: 2087 + display_name: "Gygis alba" +} +item { + name: "43335" + id: 2088 + display_name: "Equus quagga" +} +item { + name: "41970" + id: 2089 + display_name: "Panthera onca" +} +item { + name: "128950" + id: 2090 + display_name: "Lycomorpha pholus" +} +item { + name: "11935" + id: 2091 + display_name: "Tachycineta bicolor" +} +item { + name: "333759" + id: 2092 + display_name: "Larus dominicanus dominicanus" +} +item { + name: "143008" + id: 2093 + display_name: "Herpetogramma pertextalis" +} +item { + name: "235341" + id: 2094 + display_name: "Coenonympha tullia california" +} +item { + name: "44705" + id: 2095 + display_name: "Mus musculus" +} +item { + name: "145352" + id: 2096 + display_name: "Lonchura oryzivora" +} +item { + name: "4840" + id: 2097 + display_name: "Haematopus palliatus" +} +item { + name: "244845" + id: 2098 + display_name: "Apiomerus californicus" +} +item { + name: "145360" + id: 2099 + display_name: "Chloris chloris" +} +item { + name: "5112" + id: 2100 + display_name: "Accipiter cooperii" +} +item { + name: "30675" + id: 2101 + display_name: "Agkistrodon piscivorus" +} +item { + name: "341972" + id: 2102 + display_name: "Crocodylus niloticus" +} +item { + name: "30677" + id: 2103 + display_name: "Agkistrodon piscivorus conanti" +} +item { + name: "30678" + id: 2104 + display_name: "Agkistrodon contortrix" +} +item { + name: "52900" + id: 2105 + display_name: "Caenurgina crassiuscula" +} +item { + name: "30682" + id: 2106 + display_name: "Agkistrodon contortrix laticinctus" +} +item { + name: "47067" + id: 2107 + display_name: "Bradypus variegatus" +} +item { + name: "55260" + id: 2108 + display_name: "Erythemis vesiculosa" +} +item { + name: "17402" + id: 2109 + display_name: "Vireo solitarius" +} +item { + name: "6369" + id: 2110 + display_name: "Selasphorus platycercus" +} +item { + name: "104416" + id: 2111 + display_name: "Lestes alacer" +} +item { + name: "128993" + id: 2112 + display_name: "Narceus annularus" +} +item { + name: "104422" + id: 2113 + display_name: "Lestes congener" +} +item { + name: "227307" + id: 2114 + display_name: "Patalene olyzonaria" +} +item { + name: "104429" + id: 2115 + display_name: "Lestes dryas" +} +item { + name: "194542" + id: 2116 + display_name: "Phyciodes graphica" +} +item { + name: "52904" + id: 2117 + display_name: "Microcrambus elegans" +} +item { + name: "129363" + id: 2118 + display_name: "Calephelis nemesis" +} +item { + name: "144506" + id: 2119 + display_name: "Chroicocephalus scopulinus" +} +item { + name: "30713" + id: 2120 + display_name: "Crotalus oreganus helleri" +} +item { + name: "47101" + id: 2121 + display_name: "Choloepus hoffmanni" +} +item { + name: "210942" + id: 2122 + display_name: "Caedicia simplex" +} +item { + name: "30719" + id: 2123 + display_name: "Crotalus scutulatus" +} +item { + name: "30724" + id: 2124 + display_name: "Crotalus ruber" +} +item { + name: "47110" + id: 2125 + display_name: "Triopha maculata" +} +item { + name: "4235" + id: 2126 + display_name: "Aechmophorus occidentalis" +} +item { + name: "30731" + id: 2127 + display_name: "Crotalus molossus" +} +item { + name: "30733" + id: 2128 + display_name: "Crotalus molossus nigrescens" +} +item { + name: "30735" + id: 2129 + display_name: "Crotalus mitchellii" +} +item { + name: "30740" + id: 2130 + display_name: "Crotalus lepidus" +} +item { + name: "30746" + id: 2131 + display_name: "Crotalus horridus" +} +item { + name: "63518" + id: 2132 + display_name: "Melanoplus differentialis" +} +item { + name: "30751" + id: 2133 + display_name: "Crotalus cerastes" +} +item { + name: "126640" + id: 2134 + display_name: "Caenurgina erechtea" +} +item { + name: "46086" + id: 2135 + display_name: "Marmota flaviventris" +} +item { + name: "194599" + id: 2136 + display_name: "Heliomata cycladata" +} +item { + name: "30764" + id: 2137 + display_name: "Crotalus atrox" +} +item { + name: "204520" + id: 2138 + display_name: "Hemiphaga novaeseelandiae" +} +item { + name: "128141" + id: 2139 + display_name: "Crepidula adunca" +} +item { + name: "121183" + id: 2140 + display_name: "Mythimna unipuncta" +} +item { + name: "40827" + id: 2141 + display_name: "Eidolon helvum" +} +item { + name: "4571" + id: 2142 + display_name: "Xema sabini" +} +item { + name: "211007" + id: 2143 + display_name: "Nepytia canosaria" +} +item { + name: "47171" + id: 2144 + display_name: "Flabellina iodinea" +} +item { + name: "211012" + id: 2145 + display_name: "Maliattha synochitis" +} +item { + name: "30798" + id: 2146 + display_name: "Bothrops asper" +} +item { + name: "47188" + id: 2147 + display_name: "Pachygrapsus crassipes" +} +item { + name: "55387" + id: 2148 + display_name: "Esox lucius" +} +item { + name: "58583" + id: 2149 + display_name: "Limenitis arthemis arthemis" +} +item { + name: "104548" + id: 2150 + display_name: "Leucorrhinia frigida" +} +item { + name: "104550" + id: 2151 + display_name: "Leucorrhinia hudsonica" +} +item { + name: "104551" + id: 2152 + display_name: "Leucorrhinia intacta" +} +item { + name: "47209" + id: 2153 + display_name: "Hermissenda crassicornis" +} +item { + name: "55655" + id: 2154 + display_name: "Lycaena phlaeas" +} +item { + name: "202861" + id: 2155 + display_name: "Otala lactea" +} +item { + name: "143037" + id: 2156 + display_name: "Lineodes integra" +} +item { + name: "47219" + id: 2157 + display_name: "Apis mellifera" +} +item { + name: "24254" + id: 2158 + display_name: "Pseudacris cadaverina" +} +item { + name: "47226" + id: 2159 + display_name: "Papilio rutulus" +} +item { + name: "104572" + id: 2160 + display_name: "Libellula comanche" +} +item { + name: "104574" + id: 2161 + display_name: "Libellula croceipennis" +} +item { + name: "104575" + id: 2162 + display_name: "Libellula cyanea" +} +item { + name: "145538" + id: 2163 + display_name: "Ovis canadensis canadensis" +} +item { + name: "104580" + id: 2164 + display_name: "Libellula incesta" +} +item { + name: "24257" + id: 2165 + display_name: "Pseudacris streckeri" +} +item { + name: "53866" + id: 2166 + display_name: "Calpodes ethlius" +} +item { + name: "18796" + id: 2167 + display_name: "Ramphastos sulfuratus" +} +item { + name: "2413" + id: 2168 + display_name: "Dacelo novaeguineae" +} +item { + name: "482" + id: 2169 + display_name: "Fulica atra" +} +item { + name: "47251" + id: 2170 + display_name: "Sphyraena barracuda" +} +item { + name: "358549" + id: 2171 + display_name: "Hemaris diffinis" +} +item { + name: "81526" + id: 2172 + display_name: "Crotalus viridis" +} +item { + name: "342169" + id: 2173 + display_name: "Hirundo rustica erythrogaster" +} +item { + name: "39280" + id: 2174 + display_name: "Leiocephalus carinatus" +} +item { + name: "47269" + id: 2175 + display_name: "Dasyatis americana" +} +item { + name: "55467" + id: 2176 + display_name: "Sabulodes aegrotata" +} +item { + name: "6316" + id: 2177 + display_name: "Calypte costae" +} +item { + name: "6317" + id: 2178 + display_name: "Calypte anna" +} +item { + name: "47280" + id: 2179 + display_name: "Pterois volitans" +} +item { + name: "81608" + id: 2180 + display_name: "Geukensia demissa" +} +item { + name: "121012" + id: 2181 + display_name: "Euglandina rosea" +} +item { + name: "236980" + id: 2182 + display_name: "Colaptes auratus cafer" +} +item { + name: "38673" + id: 2183 + display_name: "Aspidoscelis tigris tigris" +} +item { + name: "3786" + id: 2184 + display_name: "Sula nebouxii" +} +item { + name: "55487" + id: 2185 + display_name: "Diabrotica undecimpunctata" +} +item { + name: "243904" + id: 2186 + display_name: "Phrynosoma platyrhinos" +} +item { + name: "55489" + id: 2187 + display_name: "Cycloneda munda" +} +item { + name: "204491" + id: 2188 + display_name: "Copsychus saularis" +} +item { + name: "55492" + id: 2189 + display_name: "Cycloneda polita" +} +item { + name: "129222" + id: 2190 + display_name: "Heterophleps triguttaria" +} +item { + name: "129223" + id: 2191 + display_name: "Pasiphila rectangulata" +} +item { + name: "28365" + id: 2192 + display_name: "Thamnophis sirtalis sirtalis" +} +item { + name: "47316" + id: 2193 + display_name: "Chaetodon lunula" +} +item { + name: "6359" + id: 2194 + display_name: "Selasphorus sasin" +} +item { + name: "62500" + id: 2195 + display_name: "Leptophobia aripa" +} +item { + name: "6363" + id: 2196 + display_name: "Selasphorus rufus" +} +item { + name: "96480" + id: 2197 + display_name: "Calopteryx aequabilis" +} +item { + name: "55521" + id: 2198 + display_name: "Papilio eurymedon" +} +item { + name: "6371" + id: 2199 + display_name: "Calothorax lucifer" +} +item { + name: "129263" + id: 2200 + display_name: "Syrbula admirabilis" +} +item { + name: "28371" + id: 2201 + display_name: "Thamnophis sirtalis fitchi" +} +item { + name: "243962" + id: 2202 + display_name: "Charina bottae" +} +item { + name: "145659" + id: 2203 + display_name: "Acronicta americana" +} +item { + name: "14588" + id: 2204 + display_name: "Pycnonotus barbatus" +} +item { + name: "480298" + id: 2205 + display_name: "Cornu aspersum" +} +item { + name: "51584" + id: 2206 + display_name: "Melanitis leda" +} +item { + name: "243970" + id: 2207 + display_name: "Larus glaucescens \303\227 occidentalis" +} +item { + name: "55556" + id: 2208 + display_name: "Oncopeltus fasciatus" +} +item { + name: "506117" + id: 2209 + display_name: "Aphelocoma woodhouseii" +} +item { + name: "63750" + id: 2210 + display_name: "Anavitrinella pampinaria" +} +item { + name: "30983" + id: 2211 + display_name: "Sistrurus miliarius" +} +item { + name: "211210" + id: 2212 + display_name: "Holocnemus pluchei" +} +item { + name: "49587" + id: 2213 + display_name: "Micropterus salmoides" +} +item { + name: "6417" + id: 2214 + display_name: "Florisuga mellivora" +} +item { + name: "47381" + id: 2215 + display_name: "Latrodectus mactans" +} +item { + name: "47382" + id: 2216 + display_name: "Latrodectus hesperus" +} +item { + name: "4851" + id: 2217 + display_name: "Haematopus finschi" +} +item { + name: "51588" + id: 2218 + display_name: "Papilio polytes" +} +item { + name: "144431" + id: 2219 + display_name: "Falcipennis canadensis" +} +item { + name: "118490" + id: 2220 + display_name: "Haematopis grataria" +} +item { + name: "6433" + id: 2221 + display_name: "Archilochus alexandri" +} +item { + name: "52956" + id: 2222 + display_name: "Chaetodon capistratus" +} +item { + name: "203050" + id: 2223 + display_name: "Junonia genoveva" +} +item { + name: "5170" + id: 2224 + display_name: "Circus cyaneus" +} +item { + name: "84332" + id: 2225 + display_name: "Panorpa nuptialis" +} +item { + name: "47414" + id: 2226 + display_name: "Emerita analoga" +} +item { + name: "129335" + id: 2227 + display_name: "Gibbifer californicus" +} +item { + name: "55610" + id: 2228 + display_name: "Pyrrhocoris apterus" +} +item { + name: "58421" + id: 2229 + display_name: "Phidippus johnsoni" +} +item { + name: "208608" + id: 2230 + display_name: "Trachymela sloanei" +} +item { + name: "68138" + id: 2231 + display_name: "Sympetrum corruptum" +} +item { + name: "129350" + id: 2232 + display_name: "Photinus pyralis" +} +item { + name: "55625" + id: 2233 + display_name: "Sympetrum striolatum" +} +item { + name: "55626" + id: 2234 + display_name: "Pieris rapae" +} +item { + name: "203084" + id: 2235 + display_name: "Ardea alba modesta" +} +item { + name: "129362" + id: 2236 + display_name: "Zerene cesonia" +} +item { + name: "55638" + id: 2237 + display_name: "Anania hortulata" +} +item { + name: "148537" + id: 2238 + display_name: "Astraptes fulgerator" +} +item { + name: "55640" + id: 2239 + display_name: "Celastrina argiolus" +} +item { + name: "55641" + id: 2240 + display_name: "Polyommatus icarus" +} +item { + name: "16028" + id: 2241 + display_name: "Myiarchus crinitus" +} +item { + name: "55643" + id: 2242 + display_name: "Araschnia levana" +} +item { + name: "121180" + id: 2243 + display_name: "Megastraea undosa" +} +item { + name: "47454" + id: 2244 + display_name: "Triopha catalinae" +} +item { + name: "28389" + id: 2245 + display_name: "Thamnophis ordinoides" +} +item { + name: "68139" + id: 2246 + display_name: "Sympetrum vicinum" +} +item { + name: "55651" + id: 2247 + display_name: "Autographa gamma" +} +item { + name: "55653" + id: 2248 + display_name: "Maniola jurtina" +} +item { + name: "84369" + id: 2249 + display_name: "Libellula forensis" +} +item { + name: "47135" + id: 2250 + display_name: "Badumna longinqua" +} +item { + name: "48213" + id: 2251 + display_name: "Ariolimax californicus" +} +item { + name: "121196" + id: 2252 + display_name: "Acanthurus coeruleus" +} +item { + name: "47469" + id: 2253 + display_name: "Doris montereyensis" +} +item { + name: "5181" + id: 2254 + display_name: "Buteo regalis" +} +item { + name: "47472" + id: 2255 + display_name: "Acanthodoris lutea" +} +item { + name: "129415" + id: 2256 + display_name: "Copaeodes aurantiaca" +} +item { + name: "47505" + id: 2257 + display_name: "Geitodoris heathi" +} +item { + name: "28398" + id: 2258 + display_name: "Thamnophis elegans" +} +item { + name: "6553" + id: 2259 + display_name: "Aeronautes saxatalis" +} +item { + name: "47516" + id: 2260 + display_name: "Oncorhynchus mykiss" +} +item { + name: "6557" + id: 2261 + display_name: "Chaetura vauxi" +} +item { + name: "47518" + id: 2262 + display_name: "Salmo trutta" +} +item { + name: "55711" + id: 2263 + display_name: "Ladona depressa" +} +item { + name: "55719" + id: 2264 + display_name: "Eristalis tenax" +} +item { + name: "6571" + id: 2265 + display_name: "Chaetura pelagica" +} +item { + name: "119881" + id: 2266 + display_name: "Chrysochus cobaltinus" +} +item { + name: "145239" + id: 2267 + display_name: "Setophaga pensylvanica" +} +item { + name: "154043" + id: 2268 + display_name: "Bombus huntii" +} +item { + name: "41955" + id: 2269 + display_name: "Acinonyx jubatus" +} +item { + name: "55746" + id: 2270 + display_name: "Misumena vatia" +} +item { + name: "12024" + id: 2271 + display_name: "Lanius ludovicianus" +} +item { + name: "5063" + id: 2272 + display_name: "Anhinga anhinga" +} +item { + name: "59892" + id: 2273 + display_name: "Prionus californicus" +} +item { + name: "52986" + id: 2274 + display_name: "Largus californicus" +} +item { + name: "204454" + id: 2275 + display_name: "Acridotheres tristis" +} +item { + name: "14816" + id: 2276 + display_name: "Sitta pygmaea" +} +item { + name: "148560" + id: 2277 + display_name: "Mestra amymone" +} +item { + name: "4585" + id: 2278 + display_name: "Actophilornis africanus" +} +item { + name: "47590" + id: 2279 + display_name: "Phloeodes diabolicus" +} +item { + name: "14823" + id: 2280 + display_name: "Sitta canadensis" +} +item { + name: "14824" + id: 2281 + display_name: "Sitta europaea" +} +item { + name: "14825" + id: 2282 + display_name: "Sitta pusilla" +} +item { + name: "67598" + id: 2283 + display_name: "Solenopsis invicta" +} +item { + name: "6638" + id: 2284 + display_name: "Apus apus" +} +item { + name: "301557" + id: 2285 + display_name: "Euphoria basalis" +} +item { + name: "132070" + id: 2286 + display_name: "Phaneroptera nana" +} +item { + name: "14850" + id: 2287 + display_name: "Sturnus vulgaris" +} +item { + name: "62550" + id: 2288 + display_name: "Seiurus aurocapilla" +} +item { + name: "64006" + id: 2289 + display_name: "Corbicula fluminea" +} +item { + name: "204545" + id: 2290 + display_name: "Motacilla flava" +} +item { + name: "47632" + id: 2291 + display_name: "Katharina tunicata" +} +item { + name: "325309" + id: 2292 + display_name: "Chortophaga viridifasciata viridifasciata" +} +item { + name: "104993" + id: 2293 + display_name: "Macrodiplax balteata" +} +item { + name: "17408" + id: 2294 + display_name: "Vireo griseus" +} +item { + name: "14895" + id: 2295 + display_name: "Toxostoma longirostre" +} +item { + name: "47664" + id: 2296 + display_name: "Henricia leviuscula" +} +item { + name: "31281" + id: 2297 + display_name: "Calotes versicolor" +} +item { + name: "119086" + id: 2298 + display_name: "Agrius cingulata" +} +item { + name: "3849" + id: 2299 + display_name: "Calidris alba" +} +item { + name: "14906" + id: 2300 + display_name: "Toxostoma redivivum" +} +item { + name: "144479" + id: 2301 + display_name: "Gallinula galeata" +} +item { + name: "3850" + id: 2302 + display_name: "Calidris himantopus" +} +item { + name: "117520" + id: 2303 + display_name: "Enhydra lutris nereis" +} +item { + name: "51491" + id: 2304 + display_name: "Myliobatis californica" +} +item { + name: "121612" + id: 2305 + display_name: "Estigmene acrea" +} +item { + name: "105034" + id: 2306 + display_name: "Macromia illinoiensis" +} +item { + name: "6498" + id: 2307 + display_name: "Eugenes fulgens" +} +item { + name: "46179" + id: 2308 + display_name: "Cynomys ludovicianus" +} +item { + name: "105049" + id: 2309 + display_name: "Macromia taeniolata" +} +item { + name: "94045" + id: 2310 + display_name: "Anax longipes" +} +item { + name: "143119" + id: 2311 + display_name: "Galgula partita" +} +item { + name: "9317" + id: 2312 + display_name: "Icterus wagleri" +} +item { + name: "122704" + id: 2313 + display_name: "Nucella ostrina" +} +item { + name: "146709" + id: 2314 + display_name: "Grylloprociphilus imbricator" +} +item { + name: "9318" + id: 2315 + display_name: "Icterus parisorum" +} +item { + name: "85333" + id: 2316 + display_name: "Micrathena gracilis" +} +item { + name: "126737" + id: 2317 + display_name: "Anania funebris" +} +item { + name: "49053" + id: 2318 + display_name: "Cryptochiton stelleri" +} +item { + name: "47721" + id: 2319 + display_name: "Parastichopus californicus" +} +item { + name: "34050" + id: 2320 + display_name: "Phelsuma laticauda" +} +item { + name: "154219" + id: 2321 + display_name: "Notarctia proxima" +} +item { + name: "51781" + id: 2322 + display_name: "Tyria jacobaeae" +} +item { + name: "24230" + id: 2323 + display_name: "Acris crepitans" +} +item { + name: "146032" + id: 2324 + display_name: "Coluber flagellum" +} +item { + name: "146033" + id: 2325 + display_name: "Coluber flagellum flagellum" +} +item { + name: "244340" + id: 2326 + display_name: "Hordnia atropunctata" +} +item { + name: "146037" + id: 2327 + display_name: "Coluber taeniatus" +} +item { + name: "244344" + id: 2328 + display_name: "Scopula rubraria" +} +item { + name: "47737" + id: 2329 + display_name: "Harpaphe haydeniana" +} +item { + name: "5227" + id: 2330 + display_name: "Buteo platypterus" +} +item { + name: "39556" + id: 2331 + display_name: "Apalone spinifera" +} +item { + name: "39560" + id: 2332 + display_name: "Apalone spinifera emoryi" +} +item { + name: "318836" + id: 2333 + display_name: "Gallinago gallinago" +} +item { + name: "105098" + id: 2334 + display_name: "Magicicada septendecim" +} +item { + name: "96907" + id: 2335 + display_name: "Celithemis fasciata" +} +item { + name: "9325" + id: 2336 + display_name: "Icterus spurius" +} +item { + name: "3864" + id: 2337 + display_name: "Calidris minutilla" +} +item { + name: "14995" + id: 2338 + display_name: "Dumetella carolinensis" +} +item { + name: "424597" + id: 2339 + display_name: "Porphyrio hochstetteri" +} +item { + name: "47768" + id: 2340 + display_name: "Doriopsilla albopunctata" +} +item { + name: "498116" + id: 2341 + display_name: "Aeolidia papillosa" +} +item { + name: "244378" + id: 2342 + display_name: "Mallophora fautrix" +} +item { + name: "3866" + id: 2343 + display_name: "Calidris fuscicollis" +} +item { + name: "47776" + id: 2344 + display_name: "Ariolimax columbianus" +} +item { + name: "144497" + id: 2345 + display_name: "Phalaropus tricolor" +} +item { + name: "39824" + id: 2346 + display_name: "Pseudemys nelsoni" +} +item { + name: "236979" + id: 2347 + display_name: "Colaptes auratus auratus" +} +item { + name: "55990" + id: 2348 + display_name: "Podarcis muralis" +} +item { + name: "244407" + id: 2349 + display_name: "Zelus renardii" +} +item { + name: "47802" + id: 2350 + display_name: "Lymantria dispar" +} +item { + name: "15035" + id: 2351 + display_name: "Melanotis caerulescens" +} +item { + name: "51658" + id: 2352 + display_name: "Anthopleura artemisia" +} +item { + name: "121534" + id: 2353 + display_name: "Oreta rosea" +} +item { + name: "73504" + id: 2354 + display_name: "Tiaris olivaceus" +} +item { + name: "15045" + id: 2355 + display_name: "Oreoscoptes montanus" +} +item { + name: "3873" + id: 2356 + display_name: "Limnodromus scolopaceus" +} +item { + name: "47673" + id: 2357 + display_name: "Pycnopodia helianthoides" +} +item { + name: "47817" + id: 2358 + display_name: "Libellula saturata" +} +item { + name: "56644" + id: 2359 + display_name: "Polygonia satyrus" +} +item { + name: "47826" + id: 2360 + display_name: "Cancer productus" +} +item { + name: "3875" + id: 2361 + display_name: "Tringa solitaria" +} +item { + name: "39782" + id: 2362 + display_name: "Trachemys scripta" +} +item { + name: "143140" + id: 2363 + display_name: "Cyllopsis gemma" +} +item { + name: "29818" + id: 2364 + display_name: "Lampropeltis holbrooki" +} +item { + name: "56293" + id: 2365 + display_name: "Macroglossum stellatarum" +} +item { + name: "154340" + id: 2366 + display_name: "Gryllodes sigillatus" +} +item { + name: "14801" + id: 2367 + display_name: "Sitta carolinensis" +} +item { + name: "121578" + id: 2368 + display_name: "Ovis aries" +} +item { + name: "3879" + id: 2369 + display_name: "Tringa totanus" +} +item { + name: "6893" + id: 2370 + display_name: "Dendrocygna autumnalis" +} +item { + name: "154353" + id: 2371 + display_name: "Sunira bicolorago" +} +item { + name: "6898" + id: 2372 + display_name: "Dendrocygna viduata" +} +item { + name: "6899" + id: 2373 + display_name: "Dendrocygna bicolor" +} +item { + name: "9342" + id: 2374 + display_name: "Icterus abeillei" +} +item { + name: "39670" + id: 2375 + display_name: "Lepidochelys olivacea" +} +item { + name: "4867" + id: 2376 + display_name: "Vanellus chilensis" +} +item { + name: "39677" + id: 2377 + display_name: "Dermochelys coriacea" +} +item { + name: "113407" + id: 2378 + display_name: "Stylurus plagiatus" +} +item { + name: "39682" + id: 2379 + display_name: "Chelydra serpentina" +} +item { + name: "6915" + id: 2380 + display_name: "Cygnus buccinator" +} +item { + name: "6916" + id: 2381 + display_name: "Cygnus cygnus" +} +item { + name: "6917" + id: 2382 + display_name: "Cygnus columbianus" +} +item { + name: "29825" + id: 2383 + display_name: "Lampropeltis calligaster calligaster" +} +item { + name: "6921" + id: 2384 + display_name: "Cygnus olor" +} +item { + name: "146186" + id: 2385 + display_name: "Intellagama lesueurii" +} +item { + name: "9346" + id: 2386 + display_name: "Icterus galbula" +} +item { + name: "126765" + id: 2387 + display_name: "Plutella xylostella" +} +item { + name: "71154" + id: 2388 + display_name: "Aphis nerii" +} +item { + name: "6930" + id: 2389 + display_name: "Anas platyrhynchos" +} +item { + name: "6933" + id: 2390 + display_name: "Anas acuta" +} +item { + name: "39703" + id: 2391 + display_name: "Sternotherus odoratus" +} +item { + name: "6937" + id: 2392 + display_name: "Anas crecca" +} +item { + name: "64287" + id: 2393 + display_name: "Lottia digitalis" +} +item { + name: "6944" + id: 2394 + display_name: "Anas cyanoptera" +} +item { + name: "39713" + id: 2395 + display_name: "Kinosternon subrubrum" +} +item { + name: "26691" + id: 2396 + display_name: "Scaphiopus couchii" +} +item { + name: "6948" + id: 2397 + display_name: "Anas fulvigula" +} +item { + name: "6953" + id: 2398 + display_name: "Anas discors" +} +item { + name: "47914" + id: 2399 + display_name: "Eumorpha pandorus" +} +item { + name: "47916" + id: 2400 + display_name: "Actias luna" +} +item { + name: "6957" + id: 2401 + display_name: "Anas strepera" +} +item { + name: "47919" + id: 2402 + display_name: "Antheraea polyphemus" +} +item { + name: "119953" + id: 2403 + display_name: "Hypoprepia fucosa" +} +item { + name: "6961" + id: 2404 + display_name: "Anas clypeata" +} +item { + name: "134119" + id: 2405 + display_name: "Anisomorpha buprestoides" +} +item { + name: "51678" + id: 2406 + display_name: "Coenagrion puella" +} +item { + name: "72502" + id: 2407 + display_name: "Anas chlorotis" +} +item { + name: "49060" + id: 2408 + display_name: "Epiactis prolifera" +} +item { + name: "42122" + id: 2409 + display_name: "Phacochoerus africanus" +} +item { + name: "58507" + id: 2410 + display_name: "Poanes hobomok" +} +item { + name: "121669" + id: 2411 + display_name: "Stenopus hispidus" +} +item { + name: "8143" + id: 2412 + display_name: "Rhipidura leucophrys" +} +item { + name: "6985" + id: 2413 + display_name: "Anas americana" +} +item { + name: "6993" + id: 2414 + display_name: "Bucephala albeola" +} +item { + name: "121682" + id: 2415 + display_name: "Tetraclita rubescens" +} +item { + name: "6996" + id: 2416 + display_name: "Mergus serrator" +} +item { + name: "113498" + id: 2417 + display_name: "Sympetrum ambiguum" +} +item { + name: "39771" + id: 2418 + display_name: "Chrysemys picta" +} +item { + name: "7004" + id: 2419 + display_name: "Mergus merganser" +} +item { + name: "39773" + id: 2420 + display_name: "Chrysemys picta bellii" +} +item { + name: "113503" + id: 2421 + display_name: "Sympetrum danae" +} +item { + name: "113507" + id: 2422 + display_name: "Sympetrum fonscolombii" +} +item { + name: "154469" + id: 2423 + display_name: "Isa textula" +} +item { + name: "47975" + id: 2424 + display_name: "Argia apicalis" +} +item { + name: "7018" + id: 2425 + display_name: "Anser anser" +} +item { + name: "7019" + id: 2426 + display_name: "Anser albifrons" +} +item { + name: "47980" + id: 2427 + display_name: "Speyeria cybele" +} +item { + name: "58514" + id: 2428 + display_name: "Euphyes vestris" +} +item { + name: "113519" + id: 2429 + display_name: "Sympetrum obtrusum" +} +item { + name: "7024" + id: 2430 + display_name: "Somateria mollissima" +} +item { + name: "39793" + id: 2431 + display_name: "Trachemys scripta scripta" +} +item { + name: "367475" + id: 2432 + display_name: "Rallus obsoletus" +} +item { + name: "121716" + id: 2433 + display_name: "Uresiphita reversalis" +} +item { + name: "113525" + id: 2434 + display_name: "Sympetrum sanguineum" +} +item { + name: "113526" + id: 2435 + display_name: "Sympetrum semicinctum" +} +item { + name: "18921" + id: 2436 + display_name: "Platycercus elegans" +} +item { + name: "7032" + id: 2437 + display_name: "Melanitta fusca" +} +item { + name: "5268" + id: 2438 + display_name: "Milvus migrans" +} +item { + name: "144536" + id: 2439 + display_name: "Gelochelidon nilotica" +} +item { + name: "413503" + id: 2440 + display_name: "Ninox novaeseelandiae novaeseelandiae" +} +item { + name: "7036" + id: 2441 + display_name: "Melanitta perspicillata" +} +item { + name: "64382" + id: 2442 + display_name: "Lissotriton vulgaris" +} +item { + name: "39807" + id: 2443 + display_name: "Terrapene ornata" +} +item { + name: "39808" + id: 2444 + display_name: "Terrapene ornata luteola" +} +item { + name: "7044" + id: 2445 + display_name: "Aythya collaris" +} +item { + name: "7045" + id: 2446 + display_name: "Aythya ferina" +} +item { + name: "7046" + id: 2447 + display_name: "Aythya fuligula" +} +item { + name: "146314" + id: 2448 + display_name: "Opheodrys vernalis" +} +item { + name: "3906" + id: 2449 + display_name: "Numenius americanus" +} +item { + name: "39823" + id: 2450 + display_name: "Pseudemys gorzugi" +} +item { + name: "178991" + id: 2451 + display_name: "Sypharochiton pelliserpentis" +} +item { + name: "7061" + id: 2452 + display_name: "Chen caerulescens" +} +item { + name: "39830" + id: 2453 + display_name: "Pseudemys concinna" +} +item { + name: "127490" + id: 2454 + display_name: "Parrhasius m-album" +} +item { + name: "15256" + id: 2455 + display_name: "Chamaea fasciata" +} +item { + name: "39836" + id: 2456 + display_name: "Malaclemys terrapin" +} +item { + name: "133764" + id: 2457 + display_name: "Trichopoda pennipes" +} +item { + name: "334753" + id: 2458 + display_name: "Hypselonotus punctiventris" +} +item { + name: "58611" + id: 2459 + display_name: "Amia calva" +} +item { + name: "56240" + id: 2460 + display_name: "Argia vivida" +} +item { + name: "7089" + id: 2461 + display_name: "Branta canadensis" +} +item { + name: "146354" + id: 2462 + display_name: "Phrynosoma blainvillii" +} +item { + name: "56243" + id: 2463 + display_name: "Plebejus acmon" +} +item { + name: "144542" + id: 2464 + display_name: "Thalasseus elegans" +} +item { + name: "121783" + id: 2465 + display_name: "Lithobates clamitans melanota" +} +item { + name: "39865" + id: 2466 + display_name: "Glyptemys insculpta" +} +item { + name: "39867" + id: 2467 + display_name: "Emys orbicularis" +} +item { + name: "7104" + id: 2468 + display_name: "Branta sandvicensis" +} +item { + name: "50336" + id: 2469 + display_name: "Siproeta stelenes" +} +item { + name: "7056" + id: 2470 + display_name: "Aythya americana" +} +item { + name: "7107" + id: 2471 + display_name: "Aix sponsa" +} +item { + name: "7109" + id: 2472 + display_name: "Lophodytes cucullatus" +} +item { + name: "7111" + id: 2473 + display_name: "Histrionicus histrionicus" +} +item { + name: "367562" + id: 2474 + display_name: "Aratinga nenday" +} +item { + name: "39885" + id: 2475 + display_name: "Emydoidea blandingii" +} +item { + name: "367566" + id: 2476 + display_name: "Psittacara holochlorus" +} +item { + name: "143181" + id: 2477 + display_name: "Marimatha nigrofimbria" +} +item { + name: "7120" + id: 2478 + display_name: "Cairina moschata" +} +item { + name: "7122" + id: 2479 + display_name: "Netta rufina" +} +item { + name: "130003" + id: 2480 + display_name: "Phaeoura quernaria" +} +item { + name: "367572" + id: 2481 + display_name: "Psittacara erythrogenys" +} +item { + name: "17009" + id: 2482 + display_name: "Sayornis saya" +} +item { + name: "154582" + id: 2483 + display_name: "Ennomos magnaria" +} +item { + name: "58532" + id: 2484 + display_name: "Colias eurytheme" +} +item { + name: "121821" + id: 2485 + display_name: "Sceliphron caementarium" +} +item { + name: "48094" + id: 2486 + display_name: "Dryocampa rubicunda" +} +item { + name: "7057" + id: 2487 + display_name: "Aythya valisineria" +} +item { + name: "17646" + id: 2488 + display_name: "Picoides albolarvatus" +} +item { + name: "201551" + id: 2489 + display_name: "Procyon lotor lotor" +} +item { + name: "58534" + id: 2490 + display_name: "Lycaena hyllus" +} +item { + name: "73553" + id: 2491 + display_name: "Vermivora cyanoptera" +} +item { + name: "359401" + id: 2492 + display_name: "Exomala orientalis" +} +item { + name: "8018" + id: 2493 + display_name: "Corvus caurinus" +} +item { + name: "490478" + id: 2494 + display_name: "Tegula brunnea" +} +item { + name: "20307" + id: 2495 + display_name: "Asio otus" +} +item { + name: "227466" + id: 2496 + display_name: "Peridea ferruginea" +} +item { + name: "122172" + id: 2497 + display_name: "Pyrisitia lisa" +} +item { + name: "133631" + id: 2498 + display_name: "Polites peckius" +} +item { + name: "8021" + id: 2499 + display_name: "Corvus brachyrhynchos" +} +item { + name: "7170" + id: 2500 + display_name: "Clangula hyemalis" +} +item { + name: "58539" + id: 2501 + display_name: "Satyrium calanus" +} +item { + name: "27137" + id: 2502 + display_name: "Coluber constrictor" +} +item { + name: "7176" + id: 2503 + display_name: "Chenonetta jubata" +} +item { + name: "42157" + id: 2504 + display_name: "Giraffa camelopardalis" +} +item { + name: "144541" + id: 2505 + display_name: "Thalasseus sandvicensis" +} +item { + name: "23572" + id: 2506 + display_name: "Litoria aurea" +} +item { + name: "354820" + id: 2507 + display_name: "Patiriella regularis" +} +item { + name: "55887" + id: 2508 + display_name: "Andricus quercuscalifornicus" +} +item { + name: "46255" + id: 2509 + display_name: "Ammospermophilus leucurus" +} +item { + name: "334341" + id: 2510 + display_name: "Oryctolagus cuniculus domesticus" +} +item { + name: "144560" + id: 2511 + display_name: "Eolophus roseicapilla" +} +item { + name: "94043" + id: 2512 + display_name: "Anax imperator" +} +item { + name: "425004" + id: 2513 + display_name: "Dryas iulia moderata" +} +item { + name: "269359" + id: 2514 + display_name: "Cactophagus spinolae" +} +item { + name: "72755" + id: 2515 + display_name: "Colaptes rubiginosus" +} +item { + name: "319123" + id: 2516 + display_name: "Meleagris gallopavo silvestris" +} +item { + name: "130846" + id: 2517 + display_name: "Lyssa zampa" +} +item { + name: "203831" + id: 2518 + display_name: "Nemoria bistriaria" +} +item { + name: "367678" + id: 2519 + display_name: "Ptiliogonys cinereus" +} +item { + name: "5301" + id: 2520 + display_name: "Elanoides forficatus" +} +item { + name: "9398" + id: 2521 + display_name: "Carduelis carduelis" +} +item { + name: "143201" + id: 2522 + display_name: "Coryphista meadii" +} +item { + name: "104419" + id: 2523 + display_name: "Lestes australis" +} +item { + name: "367693" + id: 2524 + display_name: "Cassiculus melanicterus" +} +item { + name: "143452" + id: 2525 + display_name: "Deidamia inscriptum" +} +item { + name: "466003" + id: 2526 + display_name: "Romalea microptera" +} +item { + name: "84494" + id: 2527 + display_name: "Paraphidippus aurantius" +} +item { + name: "203866" + id: 2528 + display_name: "Rabdophaga strobiloides" +} +item { + name: "72797" + id: 2529 + display_name: "Dendragapus fuliginosus" +} +item { + name: "7266" + id: 2530 + display_name: "Psaltriparus minimus" +} +item { + name: "120920" + id: 2531 + display_name: "Odocoileus virginianus clavium" +} +item { + name: "7278" + id: 2532 + display_name: "Aegithalos caudatus" +} +item { + name: "30681" + id: 2533 + display_name: "Agkistrodon contortrix mokasen" +} +item { + name: "413547" + id: 2534 + display_name: "Zosterops lateralis lateralis" +} +item { + name: "48262" + id: 2535 + display_name: "Apatelodes torrefacta" +} +item { + name: "121993" + id: 2536 + display_name: "Lampides boeticus" +} +item { + name: "48267" + id: 2537 + display_name: "Crotalus oreganus oreganus" +} +item { + name: "48268" + id: 2538 + display_name: "Crotalus oreganus" +} +item { + name: "147309" + id: 2539 + display_name: "Feltia herilis" +} +item { + name: "146413" + id: 2540 + display_name: "Sceloporus consobrinus" +} +item { + name: "326764" + id: 2541 + display_name: "Cyprinus carpio haematopterus" +} +item { + name: "5315" + id: 2542 + display_name: "Haliaeetus leucogaster" +} +item { + name: "4519" + id: 2543 + display_name: "Uria aalge" +} +item { + name: "40085" + id: 2544 + display_name: "Gopherus polyphemus" +} +item { + name: "23702" + id: 2545 + display_name: "Agalychnis callidryas" +} +item { + name: "210116" + id: 2546 + display_name: "Tringa semipalmata inornatus" +} +item { + name: "40092" + id: 2547 + display_name: "Stigmochelys pardalis" +} +item { + name: "59931" + id: 2548 + display_name: "Acanthurus triostegus" +} +item { + name: "48292" + id: 2549 + display_name: "Philoscia muscorum" +} +item { + name: "146601" + id: 2550 + display_name: "Scolopendra heros" +} +item { + name: "244906" + id: 2551 + display_name: "Panchlora nivea" +} +item { + name: "48302" + id: 2552 + display_name: "Limulus polyphemus" +} +item { + name: "180008" + id: 2553 + display_name: "Otospermophilus variegatus" +} +item { + name: "7347" + id: 2554 + display_name: "Alauda arvensis" +} +item { + name: "43459" + id: 2555 + display_name: "Macaca fascicularis" +} +item { + name: "113846" + id: 2556 + display_name: "Telebasis salva" +} +item { + name: "7356" + id: 2557 + display_name: "Galerida cristata" +} +item { + name: "64705" + id: 2558 + display_name: "Delichon urbicum" +} +item { + name: "145932" + id: 2559 + display_name: "Aspidoscelis hyperythra beldingi" +} +item { + name: "72912" + id: 2560 + display_name: "Helmitheros vermivorum" +} +item { + name: "69805" + id: 2561 + display_name: "Octogomphus specularis" +} +item { + name: "129572" + id: 2562 + display_name: "Aphomia sociella" +} +item { + name: "31964" + id: 2563 + display_name: "Barisia imbricata" +} +item { + name: "244625" + id: 2564 + display_name: "Halmus chalybeus" +} +item { + name: "58576" + id: 2565 + display_name: "Phyciodes cocyta" +} +item { + name: "72931" + id: 2566 + display_name: "Hylocharis leucotis" +} +item { + name: "104449" + id: 2567 + display_name: "Lestes rectangularis" +} +item { + name: "14886" + id: 2568 + display_name: "Mimus polyglottos" +} +item { + name: "23783" + id: 2569 + display_name: "Hyla versicolor" +} +item { + name: "23784" + id: 2570 + display_name: "Hyla plicata" +} +item { + name: "8575" + id: 2571 + display_name: "Gymnorhina tibicen" +} +item { + name: "2599" + id: 2572 + display_name: "Alcedo atthis" +} +item { + name: "61152" + id: 2573 + display_name: "Pyrrhosoma nymphula" +} +item { + name: "58579" + id: 2574 + display_name: "Polygonia interrogationis" +} +item { + name: "31993" + id: 2575 + display_name: "Ophisaurus attenuatus attenuatus" +} +item { + name: "53985" + id: 2576 + display_name: "Odocoileus hemionus californicus" +} +item { + name: "144549" + id: 2577 + display_name: "Streptopelia chinensis" +} +item { + name: "105730" + id: 2578 + display_name: "Micrathyria hagenii" +} +item { + name: "7428" + id: 2579 + display_name: "Bombycilla cedrorum" +} +item { + name: "7429" + id: 2580 + display_name: "Bombycilla garrulus" +} +item { + name: "50391" + id: 2581 + display_name: "Polygonia gracilis" +} +item { + name: "7067" + id: 2582 + display_name: "Tadorna tadorna" +} +item { + name: "413513" + id: 2583 + display_name: "Petroica australis australis" +} +item { + name: "39469" + id: 2584 + display_name: "Varanus varius" +} +item { + name: "58479" + id: 2585 + display_name: "Pholisora catullus" +} +item { + name: "127929" + id: 2586 + display_name: "Achalarus lyciades" +} +item { + name: "48403" + id: 2587 + display_name: "Gasterosteus aculeatus" +} +item { + name: "18990" + id: 2588 + display_name: "Amazona autumnalis" +} +item { + name: "1241" + id: 2589 + display_name: "Dendragapus obscurus" +} +item { + name: "228634" + id: 2590 + display_name: "Ponometia erastrioides" +} +item { + name: "64806" + id: 2591 + display_name: "Pelophylax" +} +item { + name: "51761" + id: 2592 + display_name: "Hetaerina americana" +} +item { + name: "7464" + id: 2593 + display_name: "Catherpes mexicanus" +} +item { + name: "318761" + id: 2594 + display_name: "Sceloporus uniformis" +} +item { + name: "7068" + id: 2595 + display_name: "Tadorna ferruginea" +} +item { + name: "204077" + id: 2596 + display_name: "Achyra rantalis" +} +item { + name: "7470" + id: 2597 + display_name: "Campylorhynchus brunneicapillus" +} +item { + name: "32048" + id: 2598 + display_name: "Gerrhonotus infernalis" +} +item { + name: "204081" + id: 2599 + display_name: "Pyrausta laticlavia" +} +item { + name: "7476" + id: 2600 + display_name: "Campylorhynchus rufinucha" +} +item { + name: "32055" + id: 2601 + display_name: "Elgaria multicarinata" +} +item { + name: "244276" + id: 2602 + display_name: "Rhipidura fuliginosa" +} +item { + name: "144187" + id: 2603 + display_name: "Pyrisitia proterpia" +} +item { + name: "32059" + id: 2604 + display_name: "Elgaria multicarinata multicarinata" +} +item { + name: "32061" + id: 2605 + display_name: "Elgaria kingii" +} +item { + name: "146750" + id: 2606 + display_name: "Lascoria ambigualis" +} +item { + name: "32064" + id: 2607 + display_name: "Elgaria coerulea" +} +item { + name: "23873" + id: 2608 + display_name: "Hyla squirella" +} +item { + name: "48450" + id: 2609 + display_name: "Peltodoris nobilis" +} +item { + name: "64146" + id: 2610 + display_name: "Fissurella volcano" +} +item { + name: "48259" + id: 2611 + display_name: "Pelidnota punctata" +} +item { + name: "122185" + id: 2612 + display_name: "Pantherophis alleghaniensis quadrivittata" +} +item { + name: "7498" + id: 2613 + display_name: "Polioptila melanura" +} +item { + name: "56652" + id: 2614 + display_name: "Haliotis rufescens" +} +item { + name: "122191" + id: 2615 + display_name: "Pelecanus occidentalis carolinensis" +} +item { + name: "73041" + id: 2616 + display_name: "Melozone aberti" +} +item { + name: "199381" + id: 2617 + display_name: "Homalodisca vitripennis" +} +item { + name: "73044" + id: 2618 + display_name: "Melozone crissalis" +} +item { + name: "83290" + id: 2619 + display_name: "Zanclus cornutus" +} +item { + name: "7513" + id: 2620 + display_name: "Thryothorus ludovicianus" +} +item { + name: "28559" + id: 2621 + display_name: "Storeria occipitomaculata occipitomaculata" +} +item { + name: "24255" + id: 2622 + display_name: "Pseudacris maculata" +} +item { + name: "130398" + id: 2623 + display_name: "Melanargia galathea" +} +item { + name: "29925" + id: 2624 + display_name: "Heterodon platirhinos" +} +item { + name: "48484" + id: 2625 + display_name: "Harmonia axyridis" +} +item { + name: "122214" + id: 2626 + display_name: "Odontotaenius disjunctus" +} +item { + name: "39484" + id: 2627 + display_name: "Xantusia vigilis" +} +item { + name: "73919" + id: 2628 + display_name: "Podarcis sicula" +} +item { + name: "154553" + id: 2629 + display_name: "Leptoglossus clypealis" +} +item { + name: "23922" + id: 2630 + display_name: "Hyla intermedia" +} +item { + name: "122228" + id: 2631 + display_name: "Acharia stimulea" +} +item { + name: "108344" + id: 2632 + display_name: "Pantala flavescens" +} +item { + name: "118538" + id: 2633 + display_name: "Cotinis nitida" +} +item { + name: "23930" + id: 2634 + display_name: "Hyla chrysoscelis" +} +item { + name: "23933" + id: 2635 + display_name: "Hyla arenicolor" +} +item { + name: "122238" + id: 2636 + display_name: "Porcellio scaber" +} +item { + name: "479803" + id: 2637 + display_name: "Dioprosopa clavata" +} +item { + name: "5355" + id: 2638 + display_name: "Parabuteo unicinctus" +} +item { + name: "146822" + id: 2639 + display_name: "Texola elada" +} +item { + name: "236935" + id: 2640 + display_name: "Anas platyrhynchos domesticus" +} +item { + name: "7562" + id: 2641 + display_name: "Troglodytes aedon" +} +item { + name: "339444" + id: 2642 + display_name: "Buteo lineatus elegans" +} +item { + name: "42221" + id: 2643 + display_name: "Odocoileus hemionus columbianus" +} +item { + name: "15764" + id: 2644 + display_name: "Thamnophilus doliatus" +} +item { + name: "122261" + id: 2645 + display_name: "Cucullia convexipennis" +} +item { + name: "122262" + id: 2646 + display_name: "Brachystola magna" +} +item { + name: "7576" + id: 2647 + display_name: "Thryomanes bewickii" +} +item { + name: "143015" + id: 2648 + display_name: "Eubaphe mendica" +} +item { + name: "73592" + id: 2649 + display_name: "Actinemys marmorata" +} +item { + name: "84549" + id: 2650 + display_name: "Plathemis lydia" +} +item { + name: "23969" + id: 2651 + display_name: "Hyla cinerea" +} +item { + name: "318882" + id: 2652 + display_name: "Ancistrocerus gazella" +} +item { + name: "7072" + id: 2653 + display_name: "Tadorna variegata" +} +item { + name: "48548" + id: 2654 + display_name: "Vanessa cardui" +} +item { + name: "48549" + id: 2655 + display_name: "Vanessa virginiensis" +} +item { + name: "122278" + id: 2656 + display_name: "Pomacea canaliculata" +} +item { + name: "9457" + id: 2657 + display_name: "Myioborus miniatus" +} +item { + name: "122280" + id: 2658 + display_name: "Pyrgus albescens" +} +item { + name: "122281" + id: 2659 + display_name: "Calycopis cecrops" +} +item { + name: "130474" + id: 2660 + display_name: "Achlyodes pallida" +} +item { + name: "338503" + id: 2661 + display_name: "Phalacrocorax varius varius" +} +item { + name: "9458" + id: 2662 + display_name: "Myioborus pictus" +} +item { + name: "73629" + id: 2663 + display_name: "Anolis nebulosus" +} +item { + name: "122291" + id: 2664 + display_name: "Larus argentatus smithsonianus" +} +item { + name: "56756" + id: 2665 + display_name: "Murgantia histrionica" +} +item { + name: "73148" + id: 2666 + display_name: "Parkesia motacilla" +} +item { + name: "48575" + id: 2667 + display_name: "Okenia rosacea" +} +item { + name: "56768" + id: 2668 + display_name: "Sula granti" +} +item { + name: "48578" + id: 2669 + display_name: "Anteos maerula" +} +item { + name: "64968" + id: 2670 + display_name: "Anaxyrus americanus" +} +item { + name: "64970" + id: 2671 + display_name: "Anaxyrus boreas" +} +item { + name: "115549" + id: 2672 + display_name: "Crotalus lepidus lepidus" +} +item { + name: "64977" + id: 2673 + display_name: "Anaxyrus fowleri" +} +item { + name: "19022" + id: 2674 + display_name: "Ara macao" +} +item { + name: "24259" + id: 2675 + display_name: "Pseudacris regilla" +} +item { + name: "64984" + id: 2676 + display_name: "Anaxyrus punctatus" +} +item { + name: "64985" + id: 2677 + display_name: "Anaxyrus quercicus" +} +item { + name: "73178" + id: 2678 + display_name: "Peucaea ruficauda" +} +item { + name: "64987" + id: 2679 + display_name: "Anaxyrus speciosus" +} +item { + name: "64989" + id: 2680 + display_name: "Anaxyrus woodhousii" +} +item { + name: "339596" + id: 2681 + display_name: "Calidris subruficollis" +} +item { + name: "56552" + id: 2682 + display_name: "Carabus nemoralis" +} +item { + name: "84722" + id: 2683 + display_name: "Ischnura verticalis" +} +item { + name: "122356" + id: 2684 + display_name: "Eumorpha achemon" +} +item { + name: "318965" + id: 2685 + display_name: "Chrysolina bankii" +} +item { + name: "228855" + id: 2686 + display_name: "Protodeltote muscosula" +} +item { + name: "146940" + id: 2687 + display_name: "Agriphila vulgivagella" +} +item { + name: "56832" + id: 2688 + display_name: "Nymphalis antiopa" +} +item { + name: "61355" + id: 2689 + display_name: "Vespula pensylvanica" +} +item { + name: "48645" + id: 2690 + display_name: "Megathura crenulata" +} +item { + name: "73222" + id: 2691 + display_name: "Phoenicopterus roseus" +} +item { + name: "363354" + id: 2692 + display_name: "Lobatus gigas" +} +item { + name: "3802" + id: 2693 + display_name: "Morus bassanus" +} +item { + name: "62722" + id: 2694 + display_name: "Apalone spinifera spinifera" +} +item { + name: "48655" + id: 2695 + display_name: "Aplysia californica" +} +item { + name: "54468" + id: 2696 + display_name: "Aglais urticae" +} +item { + name: "48662" + id: 2697 + display_name: "Danaus plexippus" +} +item { + name: "49071" + id: 2698 + display_name: "Metridium senile" +} +item { + name: "228899" + id: 2699 + display_name: "Psamatodes abydata" +} +item { + name: "133102" + id: 2700 + display_name: "Oncometopia orbona" +} +item { + name: "39659" + id: 2701 + display_name: "Chelonia mydas" +} +item { + name: "121437" + id: 2702 + display_name: "Dolomedes triton" +} +item { + name: "94545" + id: 2703 + display_name: "Argia fumipennis" +} +item { + name: "56887" + id: 2704 + display_name: "Bombus pensylvanicus" +} +item { + name: "40509" + id: 2705 + display_name: "Eptesicus fuscus" +} +item { + name: "58635" + id: 2706 + display_name: "Lepomis megalotis" +} +item { + name: "100369" + id: 2707 + display_name: "Erpetogomphus designatus" +} +item { + name: "58636" + id: 2708 + display_name: "Lepomis cyanellus" +} +item { + name: "40522" + id: 2709 + display_name: "Lasiurus borealis" +} +item { + name: "102006" + id: 2710 + display_name: "Hagenius brevistylus" +} +item { + name: "50283" + id: 2711 + display_name: "Marpesia petreus" +} +item { + name: "123829" + id: 2712 + display_name: "Pelecanus occidentalis californicus" +} +item { + name: "62453" + id: 2713 + display_name: "Anthidium manicatum" +} +item { + name: "56925" + id: 2714 + display_name: "Graphocephala coccinea" +} +item { + name: "48738" + id: 2715 + display_name: "Sphex pensylvanicus" +} +item { + name: "43151" + id: 2716 + display_name: "Oryctolagus cuniculus" +} +item { + name: "19822" + id: 2717 + display_name: "Glaucidium brasilianum" +} +item { + name: "48750" + id: 2718 + display_name: "Lottia scabra" +} +item { + name: "335071" + id: 2719 + display_name: "Elophila obliteralis" +} +item { + name: "81521" + id: 2720 + display_name: "Vipera berus" +} +item { + name: "43697" + id: 2721 + display_name: "Elephas maximus" +} +item { + name: "7079" + id: 2722 + display_name: "Oxyura jamaicensis" +} +item { + name: "43042" + id: 2723 + display_name: "Erinaceus europaeus" +} +item { + name: "40086" + id: 2724 + display_name: "Gopherus agassizii" +} +item { + name: "81545" + id: 2725 + display_name: "Lumbricus terrestris" +} +item { + name: "16010" + id: 2726 + display_name: "Myiarchus cinerascens" +} +item { + name: "2669" + id: 2727 + display_name: "Chloroceryle americana" +} +item { + name: "9535" + id: 2728 + display_name: "Sturnella neglecta" +} +item { + name: "81554" + id: 2729 + display_name: "Ictalurus punctatus" +} +item { + name: "339907" + id: 2730 + display_name: "Ramphastos ambiguus" +} +item { + name: "39814" + id: 2731 + display_name: "Terrapene carolina" +} +item { + name: "10254" + id: 2732 + display_name: "Paroaria coronata" +} +item { + name: "40614" + id: 2733 + display_name: "Antrozous pallidus" +} +item { + name: "502385" + id: 2734 + display_name: "Probole amicaria" +} +item { + name: "24233" + id: 2735 + display_name: "Acris gryllus" +} +item { + name: "81579" + id: 2736 + display_name: "Steatoda triangulosa" +} +item { + name: "81580" + id: 2737 + display_name: "Callosamia promethea" +} +item { + name: "146034" + id: 2738 + display_name: "Coluber lateralis" +} +item { + name: "81582" + id: 2739 + display_name: "Hyalophora cecropia" +} +item { + name: "81583" + id: 2740 + display_name: "Anisota senatoria" +} +item { + name: "66002" + id: 2741 + display_name: "Lithobates palustris" +} +item { + name: "81586" + id: 2742 + display_name: "Citheronia regalis" +} +item { + name: "40629" + id: 2743 + display_name: "Lasionycteris noctivagans" +} +item { + name: "81590" + id: 2744 + display_name: "Eacles imperialis" +} +item { + name: "204472" + id: 2745 + display_name: "Buteo buteo" +} +item { + name: "65212" + id: 2746 + display_name: "Craugastor augusti" +} +item { + name: "48830" + id: 2747 + display_name: "Patiria miniata" +} +item { + name: "48833" + id: 2748 + display_name: "Pisaster giganteus" +} +item { + name: "16071" + id: 2749 + display_name: "Myiodynastes luteiventris" +} +item { + name: "81610" + id: 2750 + display_name: "Balanus glandula" +} +item { + name: "24268" + id: 2751 + display_name: "Pseudacris crucifer" +} +item { + name: "16079" + id: 2752 + display_name: "Contopus sordidulus" +} +item { + name: "204496" + id: 2753 + display_name: "Corvus corone" +} +item { + name: "204498" + id: 2754 + display_name: "Cyanoramphus novaezelandiae" +} +item { + name: "24277" + id: 2755 + display_name: "Smilisca baudinii" +} +item { + name: "22631" + id: 2756 + display_name: "Eleutherodactylus planirostris" +} +item { + name: "16100" + id: 2757 + display_name: "Contopus virens" +} +item { + name: "42278" + id: 2758 + display_name: "Aepyceros melampus" +} +item { + name: "16106" + id: 2759 + display_name: "Contopus pertinax" +} +item { + name: "16110" + id: 2760 + display_name: "Contopus cooperi" +} +item { + name: "42280" + id: 2761 + display_name: "Connochaetes taurinus" +} +item { + name: "47455" + id: 2762 + display_name: "Octopus rubescens" +} +item { + name: "204533" + id: 2763 + display_name: "Larus argentatus" +} +item { + name: "81656" + id: 2764 + display_name: "Nematocampa resistaria" +} +item { + name: "81657" + id: 2765 + display_name: "Lacinipolia renigera" +} +item { + name: "204519" + id: 2766 + display_name: "Halcyon smyrnensis" +} +item { + name: "62762" + id: 2767 + display_name: "Cordulegaster dorsalis" +} +item { + name: "81663" + id: 2768 + display_name: "Malacosoma disstria" +} +item { + name: "32512" + id: 2769 + display_name: "Rena dulcis" +} +item { + name: "81665" + id: 2770 + display_name: "Orgyia leucostigma" +} +item { + name: "130821" + id: 2771 + display_name: "Haploa confusa" +} +item { + name: "81672" + id: 2772 + display_name: "Clemensia albata" +} +item { + name: "204554" + id: 2773 + display_name: "Onychognathus morio" +} +item { + name: "81677" + id: 2774 + display_name: "Euchaetes egle" +} +item { + name: "81680" + id: 2775 + display_name: "Scopula limboundata" +} +item { + name: "318497" + id: 2776 + display_name: "Hemipenthes sinuosa" +} +item { + name: "179987" + id: 2777 + display_name: "Ictidomys parvidens" +} +item { + name: "179988" + id: 2778 + display_name: "Ictidomys tridecemlineatus" +} +item { + name: "81685" + id: 2779 + display_name: "Evergestis pallidata" +} +item { + name: "81687" + id: 2780 + display_name: "Noctua pronuba" +} +item { + name: "179992" + id: 2781 + display_name: "Xerospermophilus spilosoma" +} +item { + name: "179994" + id: 2782 + display_name: "Urocitellus armatus" +} +item { + name: "9519" + id: 2783 + display_name: "Cyanocompsa parellina" +} +item { + name: "179998" + id: 2784 + display_name: "Urocitellus columbianus" +} +item { + name: "114463" + id: 2785 + display_name: "Trithemis annulata" +} +item { + name: "199169" + id: 2786 + display_name: "Catocala maestosa" +} +item { + name: "143323" + id: 2787 + display_name: "Tolype velleda" +} +item { + name: "120113" + id: 2788 + display_name: "Anthrenus verbasci" +} +item { + name: "7601" + id: 2789 + display_name: "Cistothorus palustris" +} +item { + name: "81706" + id: 2790 + display_name: "Alaus oculatus" +} +item { + name: "220974" + id: 2791 + display_name: "Harrisimemna trisignata" +} +item { + name: "20445" + id: 2792 + display_name: "Tyto alba" +} +item { + name: "73523" + id: 2793 + display_name: "Trogon caligatus" +} +item { + name: "49590" + id: 2794 + display_name: "Micropterus dolomieu" +} +item { + name: "41729" + id: 2795 + display_name: "Mirounga leonina" +} +item { + name: "48957" + id: 2796 + display_name: "Arilus cristatus" +} +item { + name: "81727" + id: 2797 + display_name: "Abaeis nicippe" +} +item { + name: "8000" + id: 2798 + display_name: "Corvus monedula" +} +item { + name: "8001" + id: 2799 + display_name: "Corvus ossifragus" +} +item { + name: "171843" + id: 2800 + display_name: "Rabdotus dealbatus" +} +item { + name: "81734" + id: 2801 + display_name: "Neophasia menapia" +} +item { + name: "258813" + id: 2802 + display_name: "Clogmia albipunctata" +} +item { + name: "332243" + id: 2803 + display_name: "Lepturobosca chrysocoma" +} +item { + name: "81744" + id: 2804 + display_name: "Heliconius erato" +} +item { + name: "218424" + id: 2805 + display_name: "Dicymolomia julianalis" +} +item { + name: "3813" + id: 2806 + display_name: "Spheniscus demersus" +} +item { + name: "81749" + id: 2807 + display_name: "Malacosoma americanum" +} +item { + name: "81752" + id: 2808 + display_name: "Pyrausta tyralis" +} +item { + name: "48987" + id: 2809 + display_name: "Hippodamia convergens" +} +item { + name: "8029" + id: 2810 + display_name: "Corvus frugilegus" +} +item { + name: "8031" + id: 2811 + display_name: "Corvus splendens" +} +item { + name: "147298" + id: 2812 + display_name: "Lasiommata megera" +} +item { + name: "7087" + id: 2813 + display_name: "Branta bernicla" +} +item { + name: "48550" + id: 2814 + display_name: "Phoebis sennae" +} +item { + name: "4349" + id: 2815 + display_name: "Larus hyperboreus" +} +item { + name: "84027" + id: 2816 + display_name: "Trigonopeltastes delta" +} +item { + name: "194762" + id: 2817 + display_name: "Vanessa itea" +} +item { + name: "311163" + id: 2818 + display_name: "Pseudomops septentrionalis" +} +item { + name: "55957" + id: 2819 + display_name: "Scudderia furcata" +} +item { + name: "39822" + id: 2820 + display_name: "Pseudemys texana" +} +item { + name: "204685" + id: 2821 + display_name: "Chlosyne ehrenbergii" +} +item { + name: "122767" + id: 2822 + display_name: "Columba livia domestica" +} +item { + name: "55960" + id: 2823 + display_name: "Sceloporus graciosus" +} +item { + name: "121823" + id: 2824 + display_name: "Autographa californica" +} +item { + name: "8088" + id: 2825 + display_name: "Garrulus glandarius" +} +item { + name: "65433" + id: 2826 + display_name: "Ecnomiohyla miotympanum" +} +item { + name: "49051" + id: 2827 + display_name: "Anthopleura sola" +} +item { + name: "125815" + id: 2828 + display_name: "Coenonympha arcania" +} +item { + name: "55963" + id: 2829 + display_name: "Malacosoma californicum" +} +item { + name: "120479" + id: 2830 + display_name: "Anser anser domesticus" +} +item { + name: "133788" + id: 2831 + display_name: "Xylocopa micans" +} +item { + name: "81559" + id: 2832 + display_name: "Epargyreus clarus" +} +item { + name: "81839" + id: 2833 + display_name: "Platycryptus undatus" +} +item { + name: "133791" + id: 2834 + display_name: "Polistes exclamans" +} +item { + name: "84640" + id: 2835 + display_name: "Polistes dominula" +} +item { + name: "73666" + id: 2836 + display_name: "Aspidoscelis exsanguis" +} +item { + name: "73669" + id: 2837 + display_name: "Aspidoscelis gularis" +} +item { + name: "16326" + id: 2838 + display_name: "Mitrephanes phaeocercus" +} +item { + name: "49095" + id: 2839 + display_name: "Pagurus samuelis" +} +item { + name: "73672" + id: 2840 + display_name: "Aspidoscelis hyperythra" +} +item { + name: "59192" + id: 2841 + display_name: "Polites sabuleti" +} +item { + name: "81561" + id: 2842 + display_name: "Anaea andria" +} +item { + name: "81881" + id: 2843 + display_name: "Amphipsalta zelandica" +} +item { + name: "73690" + id: 2844 + display_name: "Aspidoscelis sexlineata" +} +item { + name: "73694" + id: 2845 + display_name: "Aspidoscelis velox" +} +item { + name: "335840" + id: 2846 + display_name: "Pyrausta inornatalis" +} +item { + name: "49126" + id: 2847 + display_name: "Strongylocentrotus franciscanus" +} +item { + name: "204775" + id: 2848 + display_name: "Kricogonia lyside" +} +item { + name: "475115" + id: 2849 + display_name: "Ardenna creatopus" +} +item { + name: "475120" + id: 2850 + display_name: "Ardenna gravis" +} +item { + name: "62803" + id: 2851 + display_name: "Monadenia fidelis" +} +item { + name: "49150" + id: 2852 + display_name: "Agraulis vanillae" +} +item { + name: "83929" + id: 2853 + display_name: "Phanaeus vindex" +} +item { + name: "199839" + id: 2854 + display_name: "Haemorhous cassinii" +} diff --git a/models/research/object_detection/data/kitti_label_map.pbtxt b/models/research/object_detection/data/kitti_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..0afcc6936ebdb37ecbc7c3245929fcf178a02c0b --- /dev/null +++ b/models/research/object_detection/data/kitti_label_map.pbtxt @@ -0,0 +1,9 @@ +item { + id: 1 + name: 'car' +} + +item { + id: 2 + name: 'pedestrian' +} diff --git a/models/research/object_detection/data/mscoco_complete_label_map.pbtxt b/models/research/object_detection/data/mscoco_complete_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..d73fc065a4bd4f024670f242acbc79c1fd8e82fe --- /dev/null +++ b/models/research/object_detection/data/mscoco_complete_label_map.pbtxt @@ -0,0 +1,455 @@ +item { + name: "background" + id: 0 + display_name: "background" +} +item { + name: "/m/01g317" + id: 1 + display_name: "person" +} +item { + name: "/m/0199g" + id: 2 + display_name: "bicycle" +} +item { + name: "/m/0k4j" + id: 3 + display_name: "car" +} +item { + name: "/m/04_sv" + id: 4 + display_name: "motorcycle" +} +item { + name: "/m/05czz6l" + id: 5 + display_name: "airplane" +} +item { + name: "/m/01bjv" + id: 6 + display_name: "bus" +} +item { + name: "/m/07jdr" + id: 7 + display_name: "train" +} +item { + name: "/m/07r04" + id: 8 + display_name: "truck" +} +item { + name: "/m/019jd" + id: 9 + display_name: "boat" +} +item { + name: "/m/015qff" + id: 10 + display_name: "traffic light" +} +item { + name: "/m/01pns0" + id: 11 + display_name: "fire hydrant" +} +item { + name: "12" + id: 12 + display_name: "12" +} +item { + name: "/m/02pv19" + id: 13 + display_name: "stop sign" +} +item { + name: "/m/015qbp" + id: 14 + display_name: "parking meter" +} +item { + name: "/m/0cvnqh" + id: 15 + display_name: "bench" +} +item { + name: "/m/015p6" + id: 16 + display_name: "bird" +} +item { + name: "/m/01yrx" + id: 17 + display_name: "cat" +} +item { + name: "/m/0bt9lr" + id: 18 + display_name: "dog" +} +item { + name: "/m/03k3r" + id: 19 + display_name: "horse" +} +item { + name: "/m/07bgp" + id: 20 + display_name: "sheep" +} +item { + name: "/m/01xq0k1" + id: 21 + display_name: "cow" +} +item { + name: "/m/0bwd_0j" + id: 22 + display_name: "elephant" +} +item { + name: "/m/01dws" + id: 23 + display_name: "bear" +} +item { + name: "/m/0898b" + id: 24 + display_name: "zebra" +} +item { + name: "/m/03bk1" + id: 25 + display_name: "giraffe" +} +item { + name: "26" + id: 26 + display_name: "26" +} +item { + name: "/m/01940j" + id: 27 + display_name: "backpack" +} +item { + name: "/m/0hnnb" + id: 28 + display_name: "umbrella" +} +item { + name: "29" + id: 29 + display_name: "29" +} +item { + name: "30" + id: 30 + display_name: "30" +} +item { + name: "/m/080hkjn" + id: 31 + display_name: "handbag" +} +item { + name: "/m/01rkbr" + id: 32 + display_name: "tie" +} +item { + name: "/m/01s55n" + id: 33 + display_name: "suitcase" +} +item { + name: "/m/02wmf" + id: 34 + display_name: "frisbee" +} +item { + name: "/m/071p9" + id: 35 + display_name: "skis" +} +item { + name: "/m/06__v" + id: 36 + display_name: "snowboard" +} +item { + name: "/m/018xm" + id: 37 + display_name: "sports ball" +} +item { + name: "/m/02zt3" + id: 38 + display_name: "kite" +} +item { + name: "/m/03g8mr" + id: 39 + display_name: "baseball bat" +} +item { + name: "/m/03grzl" + id: 40 + display_name: "baseball glove" +} +item { + name: "/m/06_fw" + id: 41 + display_name: "skateboard" +} +item { + name: "/m/019w40" + id: 42 + display_name: "surfboard" +} +item { + name: "/m/0dv9c" + id: 43 + display_name: "tennis racket" +} +item { + name: "/m/04dr76w" + id: 44 + display_name: "bottle" +} +item { + name: "45" + id: 45 + display_name: "45" +} +item { + name: "/m/09tvcd" + id: 46 + display_name: "wine glass" +} +item { + name: "/m/08gqpm" + id: 47 + display_name: "cup" +} +item { + name: "/m/0dt3t" + id: 48 + display_name: "fork" +} +item { + name: "/m/04ctx" + id: 49 + display_name: "knife" +} +item { + name: "/m/0cmx8" + id: 50 + display_name: "spoon" +} +item { + name: "/m/04kkgm" + id: 51 + display_name: "bowl" +} +item { + name: "/m/09qck" + id: 52 + display_name: "banana" +} +item { + name: "/m/014j1m" + id: 53 + display_name: "apple" +} +item { + name: "/m/0l515" + id: 54 + display_name: "sandwich" +} +item { + name: "/m/0cyhj_" + id: 55 + display_name: "orange" +} +item { + name: "/m/0hkxq" + id: 56 + display_name: "broccoli" +} +item { + name: "/m/0fj52s" + id: 57 + display_name: "carrot" +} +item { + name: "/m/01b9xk" + id: 58 + display_name: "hot dog" +} +item { + name: "/m/0663v" + id: 59 + display_name: "pizza" +} +item { + name: "/m/0jy4k" + id: 60 + display_name: "donut" +} +item { + name: "/m/0fszt" + id: 61 + display_name: "cake" +} +item { + name: "/m/01mzpv" + id: 62 + display_name: "chair" +} +item { + name: "/m/02crq1" + id: 63 + display_name: "couch" +} +item { + name: "/m/03fp41" + id: 64 + display_name: "potted plant" +} +item { + name: "/m/03ssj5" + id: 65 + display_name: "bed" +} +item { + name: "66" + id: 66 + display_name: "66" +} +item { + name: "/m/04bcr3" + id: 67 + display_name: "dining table" +} +item { + name: "68" + id: 68 + display_name: "68" +} +item { + name: "69" + id: 69 + display_name: "69" +} +item { + name: "/m/09g1w" + id: 70 + display_name: "toilet" +} +item { + name: "71" + id: 71 + display_name: "71" +} +item { + name: "/m/07c52" + id: 72 + display_name: "tv" +} +item { + name: "/m/01c648" + id: 73 + display_name: "laptop" +} +item { + name: "/m/020lf" + id: 74 + display_name: "mouse" +} +item { + name: "/m/0qjjc" + id: 75 + display_name: "remote" +} +item { + name: "/m/01m2v" + id: 76 + display_name: "keyboard" +} +item { + name: "/m/050k8" + id: 77 + display_name: "cell phone" +} +item { + name: "/m/0fx9l" + id: 78 + display_name: "microwave" +} +item { + name: "/m/029bxz" + id: 79 + display_name: "oven" +} +item { + name: "/m/01k6s3" + id: 80 + display_name: "toaster" +} +item { + name: "/m/0130jx" + id: 81 + display_name: "sink" +} +item { + name: "/m/040b_t" + id: 82 + display_name: "refrigerator" +} +item { + name: "83" + id: 83 + display_name: "83" +} +item { + name: "/m/0bt_c3" + id: 84 + display_name: "book" +} +item { + name: "/m/01x3z" + id: 85 + display_name: "clock" +} +item { + name: "/m/02s195" + id: 86 + display_name: "vase" +} +item { + name: "/m/01lsmm" + id: 87 + display_name: "scissors" +} +item { + name: "/m/0kmg4" + id: 88 + display_name: "teddy bear" +} +item { + name: "/m/03wvsk" + id: 89 + display_name: "hair drier" +} +item { + name: "/m/012xff" + id: 90 + display_name: "toothbrush" +} diff --git a/models/research/object_detection/data/mscoco_label_map.pbtxt b/models/research/object_detection/data/mscoco_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..1f4872bd0c7f53e70beecf88af005c07a5df9e08 --- /dev/null +++ b/models/research/object_detection/data/mscoco_label_map.pbtxt @@ -0,0 +1,400 @@ +item { + name: "/m/01g317" + id: 1 + display_name: "person" +} +item { + name: "/m/0199g" + id: 2 + display_name: "bicycle" +} +item { + name: "/m/0k4j" + id: 3 + display_name: "car" +} +item { + name: "/m/04_sv" + id: 4 + display_name: "motorcycle" +} +item { + name: "/m/05czz6l" + id: 5 + display_name: "airplane" +} +item { + name: "/m/01bjv" + id: 6 + display_name: "bus" +} +item { + name: "/m/07jdr" + id: 7 + display_name: "train" +} +item { + name: "/m/07r04" + id: 8 + display_name: "truck" +} +item { + name: "/m/019jd" + id: 9 + display_name: "boat" +} +item { + name: "/m/015qff" + id: 10 + display_name: "traffic light" +} +item { + name: "/m/01pns0" + id: 11 + display_name: "fire hydrant" +} +item { + name: "/m/02pv19" + id: 13 + display_name: "stop sign" +} +item { + name: "/m/015qbp" + id: 14 + display_name: "parking meter" +} +item { + name: "/m/0cvnqh" + id: 15 + display_name: "bench" +} +item { + name: "/m/015p6" + id: 16 + display_name: "bird" +} +item { + name: "/m/01yrx" + id: 17 + display_name: "cat" +} +item { + name: "/m/0bt9lr" + id: 18 + display_name: "dog" +} +item { + name: "/m/03k3r" + id: 19 + display_name: "horse" +} +item { + name: "/m/07bgp" + id: 20 + display_name: "sheep" +} +item { + name: "/m/01xq0k1" + id: 21 + display_name: "cow" +} +item { + name: "/m/0bwd_0j" + id: 22 + display_name: "elephant" +} +item { + name: "/m/01dws" + id: 23 + display_name: "bear" +} +item { + name: "/m/0898b" + id: 24 + display_name: "zebra" +} +item { + name: "/m/03bk1" + id: 25 + display_name: "giraffe" +} +item { + name: "/m/01940j" + id: 27 + display_name: "backpack" +} +item { + name: "/m/0hnnb" + id: 28 + display_name: "umbrella" +} +item { + name: "/m/080hkjn" + id: 31 + display_name: "handbag" +} +item { + name: "/m/01rkbr" + id: 32 + display_name: "tie" +} +item { + name: "/m/01s55n" + id: 33 + display_name: "suitcase" +} +item { + name: "/m/02wmf" + id: 34 + display_name: "frisbee" +} +item { + name: "/m/071p9" + id: 35 + display_name: "skis" +} +item { + name: "/m/06__v" + id: 36 + display_name: "snowboard" +} +item { + name: "/m/018xm" + id: 37 + display_name: "sports ball" +} +item { + name: "/m/02zt3" + id: 38 + display_name: "kite" +} +item { + name: "/m/03g8mr" + id: 39 + display_name: "baseball bat" +} +item { + name: "/m/03grzl" + id: 40 + display_name: "baseball glove" +} +item { + name: "/m/06_fw" + id: 41 + display_name: "skateboard" +} +item { + name: "/m/019w40" + id: 42 + display_name: "surfboard" +} +item { + name: "/m/0dv9c" + id: 43 + display_name: "tennis racket" +} +item { + name: "/m/04dr76w" + id: 44 + display_name: "bottle" +} +item { + name: "/m/09tvcd" + id: 46 + display_name: "wine glass" +} +item { + name: "/m/08gqpm" + id: 47 + display_name: "cup" +} +item { + name: "/m/0dt3t" + id: 48 + display_name: "fork" +} +item { + name: "/m/04ctx" + id: 49 + display_name: "knife" +} +item { + name: "/m/0cmx8" + id: 50 + display_name: "spoon" +} +item { + name: "/m/04kkgm" + id: 51 + display_name: "bowl" +} +item { + name: "/m/09qck" + id: 52 + display_name: "banana" +} +item { + name: "/m/014j1m" + id: 53 + display_name: "apple" +} +item { + name: "/m/0l515" + id: 54 + display_name: "sandwich" +} +item { + name: "/m/0cyhj_" + id: 55 + display_name: "orange" +} +item { + name: "/m/0hkxq" + id: 56 + display_name: "broccoli" +} +item { + name: "/m/0fj52s" + id: 57 + display_name: "carrot" +} +item { + name: "/m/01b9xk" + id: 58 + display_name: "hot dog" +} +item { + name: "/m/0663v" + id: 59 + display_name: "pizza" +} +item { + name: "/m/0jy4k" + id: 60 + display_name: "donut" +} +item { + name: "/m/0fszt" + id: 61 + display_name: "cake" +} +item { + name: "/m/01mzpv" + id: 62 + display_name: "chair" +} +item { + name: "/m/02crq1" + id: 63 + display_name: "couch" +} +item { + name: "/m/03fp41" + id: 64 + display_name: "potted plant" +} +item { + name: "/m/03ssj5" + id: 65 + display_name: "bed" +} +item { + name: "/m/04bcr3" + id: 67 + display_name: "dining table" +} +item { + name: "/m/09g1w" + id: 70 + display_name: "toilet" +} +item { + name: "/m/07c52" + id: 72 + display_name: "tv" +} +item { + name: "/m/01c648" + id: 73 + display_name: "laptop" +} +item { + name: "/m/020lf" + id: 74 + display_name: "mouse" +} +item { + name: "/m/0qjjc" + id: 75 + display_name: "remote" +} +item { + name: "/m/01m2v" + id: 76 + display_name: "keyboard" +} +item { + name: "/m/050k8" + id: 77 + display_name: "cell phone" +} +item { + name: "/m/0fx9l" + id: 78 + display_name: "microwave" +} +item { + name: "/m/029bxz" + id: 79 + display_name: "oven" +} +item { + name: "/m/01k6s3" + id: 80 + display_name: "toaster" +} +item { + name: "/m/0130jx" + id: 81 + display_name: "sink" +} +item { + name: "/m/040b_t" + id: 82 + display_name: "refrigerator" +} +item { + name: "/m/0bt_c3" + id: 84 + display_name: "book" +} +item { + name: "/m/01x3z" + id: 85 + display_name: "clock" +} +item { + name: "/m/02s195" + id: 86 + display_name: "vase" +} +item { + name: "/m/01lsmm" + id: 87 + display_name: "scissors" +} +item { + name: "/m/0kmg4" + id: 88 + display_name: "teddy bear" +} +item { + name: "/m/03wvsk" + id: 89 + display_name: "hair drier" +} +item { + name: "/m/012xff" + id: 90 + display_name: "toothbrush" +} diff --git a/models/research/object_detection/data/mscoco_minival_ids.txt b/models/research/object_detection/data/mscoco_minival_ids.txt new file mode 100644 index 0000000000000000000000000000000000000000..5bbff3c18d4efed835bcf022f3a5fbc11da0e496 --- /dev/null +++ b/models/research/object_detection/data/mscoco_minival_ids.txt @@ -0,0 +1,8059 @@ +25096 +251824 +35313 +546011 +524186 +205866 +511403 +313916 +47471 +258628 +233560 +576017 +404517 +410056 +178690 +248980 +511724 +429718 +163076 +244111 +126766 +313182 +191981 +139992 +325237 +248129 +214519 +175438 +493321 +174103 +563762 +536795 +289960 +473720 +515540 +292118 +360851 +267175 +532876 +171613 +581415 +259819 +441841 +381682 +58157 +4980 +473929 +70626 +93773 +283412 +36765 +495020 +278401 +329307 +192810 +491784 +506416 +225495 +553747 +86442 +242208 +132686 +385877 +290248 +525705 +5476 +486521 +332512 +138556 +348083 +284375 +40018 +296994 +38685 +432429 +183407 +434358 +472164 +530494 +570693 +193401 +392612 +98872 +445766 +532209 +98322 +285114 +267725 +51605 +314812 +91105 +535506 +540264 +375341 +449828 +277659 +68933 +76873 +217554 +213592 +190776 +516224 +474479 +343599 +578813 +128669 +546292 +475365 +377626 +128833 +427091 +547227 +11742 +80213 +462241 +374574 +121572 +29151 +13892 +262394 +303667 +198724 +7320 +448492 +419080 +460379 +483965 +556516 +139181 +1103 +308715 +207507 +213827 +216083 +445597 +240275 +379585 +116389 +138124 +559051 +326898 +419386 +503660 +519460 +23893 +24458 +518109 +462982 +151492 +514254 +2477 +147165 +570394 +548766 +250083 +364341 +351967 +386277 +328084 +511299 +499349 +315501 +234965 +428562 +219771 +288150 +136021 +168619 +298316 +75118 +189752 +243857 +296222 +554002 +533628 +384596 +202981 +498350 +391463 +183991 +528062 +451084 +7899 +408534 +329030 +318566 +22492 +361285 +226973 +213356 +417265 +105622 +161169 +261487 +167477 +233370 +142999 +256713 +305833 +103579 +352538 +135763 +392144 +61181 +200302 +456908 +286858 +179850 +488075 +174511 +194755 +317822 +2302 +304596 +172556 +548275 +341678 +55299 +134760 +352936 +545129 +377012 +141328 +103757 +552837 +28246 +125167 +328745 +278760 +337133 +403389 +146825 +502558 +265916 +428985 +492041 +113403 +372037 +306103 +287574 +187495 +479805 +336309 +162043 +95899 +43133 +464248 +149115 +247438 +74030 +130645 +282841 +127092 +101172 +536743 +179642 +58133 +49667 +170605 +11347 +365277 +201970 +292663 +217219 +463226 +41924 +281102 +357816 +490878 +100343 +525058 +133503 +416145 +29341 +415413 +125527 +507951 +262609 +240210 +581781 +345137 +526342 +268641 +328777 +32001 +137538 +39115 +415958 +6771 +421865 +64909 +383601 +206907 +420840 +370980 +28452 +571893 +153520 +185890 +392991 +547013 +257359 +279879 +478614 +131919 +40937 +22874 +173375 +106344 +44801 +205401 +312870 +400886 +351530 +344013 +173500 +470423 +396729 +402499 +276585 +377097 +367619 +518908 +263866 +332292 +67805 +152211 +515025 +221350 +525247 +78490 +504342 +95908 +82668 +256199 +220270 +552065 +242379 +84866 +152281 +228464 +223122 +67537 +456968 +368349 +101985 +14681 +543551 +107558 +372009 +99054 +126540 +86877 +492785 +482585 +571564 +501116 +296871 +20395 +181518 +568041 +121154 +56187 +190018 +97156 +310325 +393274 +214574 +243222 +289949 +452121 +150508 +341752 +310757 +24040 +228551 +335589 +12020 +529597 +459884 +344888 +229713 +51948 +370929 +552061 +261072 +120070 +332067 +263014 +158993 +451714 +397327 +20965 +414340 +574946 +370266 +487534 +492246 +264771 +73702 +43997 +235124 +301093 +400048 +77681 +58472 +331386 +13783 +242513 +419158 +59325 +383033 +393258 +529041 +249276 +182775 +351793 +9727 +334069 +566771 +539355 +38662 +423617 +47559 +120592 +508303 +462565 +47916 +218208 +182362 +562101 +441442 +71239 +395378 +522637 +25603 +484450 +872 +171483 +527248 +323155 +240754 +15032 +419144 +313214 +250917 +333430 +242757 +221914 +283190 +194297 +228506 +550691 +172513 +312192 +530619 +113867 +323552 +374115 +35435 +160239 +62877 +441873 +196574 +62858 +557114 +427612 +242869 +356733 +304828 +24880 +490509 +407083 +457877 +402788 +536416 +385912 +544121 +500389 +451102 +12120 +483476 +70987 +482799 +542549 +49236 +424258 +435783 +182366 +438093 +501824 +232845 +53965 +223198 +288933 +450458 +285664 +196484 +408930 +519815 +290981 +398567 +315792 +490683 +257136 +75611 +302498 +332153 +82293 +416911 +558608 +564659 +536195 +370260 +57904 +527270 +6593 +145620 +551650 +470832 +515785 +251404 +287331 +150788 +334006 +266117 +10039 +579158 +328397 +468351 +550400 +31745 +405970 +16761 +323515 +459598 +558457 +570736 +476939 +472610 +72155 +112517 +13659 +530905 +458768 +43486 +560893 +493174 +31217 +262736 +412204 +142722 +151231 +480643 +197245 +398666 +444869 +110999 +191724 +479057 +492420 +170638 +277329 +301908 +395644 +537611 +141887 +47149 +403432 +34818 +372495 +67994 +337497 +478586 +249815 +533462 +281032 +289941 +151911 +271215 +407868 +360700 +508582 +103873 +353658 +369081 +406403 +331692 +26430 +105655 +572630 +37181 +91336 +484587 +318284 +113019 +33055 +25293 +229324 +374052 +384111 +213951 +315195 +319283 +539453 +17655 +308974 +326243 +539436 +417876 +526940 +356347 +221932 +73753 +292648 +262284 +304924 +558587 +374858 +253518 +311744 +539636 +40924 +136624 +334305 +365997 +63355 +191226 +526732 +367128 +575198 +500657 +50637 +17182 +424792 +565353 +563040 +383494 +74458 +155142 +197125 +223857 +428241 +440830 +371289 +437303 +330449 +93771 +82715 +499631 +381257 +563951 +192834 +528600 +404273 +270554 +208053 +188613 +484760 +432016 +129800 +91756 +523097 +317018 +487282 +444913 +159500 +126822 +540564 +105812 +560756 +306099 +471226 +123842 +513219 +154877 +497034 +283928 +564003 +238602 +194780 +462728 +558640 +524373 +455624 +3690 +560367 +316351 +455772 +223777 +161517 +243034 +250440 +239975 +441008 +324715 +152106 +246973 +462805 +296521 +412767 +530913 +370165 +292526 +107244 +217440 +330204 +220176 +577735 +197022 +127451 +518701 +212322 +204887 +27696 +348474 +119233 +282804 +230040 +425690 +409241 +296825 +296353 +375909 +123136 +573891 +338256 +198247 +373375 +151051 +500084 +557596 +120478 +44989 +283380 +149005 +522065 +626 +17198 +309633 +524245 +291589 +322714 +455847 +248468 +371948 +444928 +20438 +481670 +147195 +95022 +548159 +553165 +395324 +391371 +86884 +561121 +219737 +38875 +338159 +377881 +185472 +359277 +114861 +378048 +126226 +10217 +320246 +15827 +178236 +370279 +352978 +408101 +77615 +337044 +223714 +20796 +352445 +263834 +156704 +377867 +119402 +399567 +1180 +257941 +560675 +390471 +209290 +258382 +466339 +56437 +195042 +384230 +203214 +36077 +283038 +38323 +158770 +532381 +395903 +375461 +397857 +326798 +371699 +369503 +495626 +464328 +462211 +397719 +434089 +424793 +476770 +531852 +303538 +525849 +480917 +419653 +265063 +48956 +5184 +279149 +396727 +374266 +124429 +36124 +240213 +147556 +339512 +577182 +288599 +257169 +178254 +393869 +122314 +28713 +48133 +540681 +100974 +368459 +500110 +73634 +460982 +203878 +578344 +443602 +502012 +399666 +103603 +22090 +257529 +176328 +536656 +408873 +116881 +460972 +33835 +460781 +51223 +46463 +89395 +407646 +337453 +461715 +16257 +426987 +234889 +3125 +165643 +517472 +451435 +206800 +112128 +331236 +163306 +94185 +498716 +532732 +146509 +458567 +153832 +105996 +353398 +546976 +283060 +247624 +110048 +243491 +154798 +543600 +149962 +355256 +352900 +203081 +372203 +284605 +516244 +190494 +150301 +326082 +64146 +402858 +413538 +399510 +460251 +94336 +458721 +57345 +424162 +423508 +69356 +567220 +509786 +37038 +111535 +341318 +372067 +358120 +244909 +180653 +39852 +438560 +357041 +67065 +51928 +171717 +520430 +552395 +431355 +528084 +20913 +309610 +262323 +573784 +449485 +154846 +283438 +430871 +199578 +516318 +563912 +348483 +485613 +143440 +94922 +168817 +74457 +45830 +66297 +514173 +99186 +296236 +230903 +452312 +476444 +568981 +100811 +237350 +194724 +453622 +49559 +270609 +113701 +415393 +92173 +137004 +188795 +148280 +448114 +575964 +163155 +518719 +219329 +214247 +363927 +65357 +87617 +552612 +457817 +124796 +47740 +560463 +513968 +273637 +354212 +95959 +261061 +307265 +316237 +191342 +463272 +169273 +396518 +93261 +572733 +407386 +202658 +446497 +420852 +229274 +432724 +34900 +352533 +49891 +66144 +146831 +467484 +97988 +561647 +301155 +507421 +173217 +577584 +451940 +99927 +350639 +178941 +485155 +175948 +360673 +92963 +361321 +48739 +577310 +517795 +93405 +506458 +394681 +167920 +16995 +519573 +270532 +527750 +563403 +494608 +557780 +178691 +8676 +186927 +550173 +361656 +575911 +281315 +534377 +57570 +340894 +37624 +143103 +538243 +425077 +376545 +108129 +170974 +7522 +408906 +264279 +79415 +344025 +186797 +234349 +226472 +123639 +225177 +237984 +38714 +223671 +358247 +152465 +521405 +453722 +361111 +557117 +235832 +309341 +268469 +108353 +532531 +357279 +537280 +437618 +122953 +7088 +36693 +127659 +431901 +57244 +567565 +568111 +202926 +504516 +555685 +322369 +347620 +110231 +568982 +295340 +529798 +300341 +158160 +73588 +119476 +387216 +154994 +259755 +211282 +433971 +263588 +299468 +570138 +123017 +355106 +540172 +406215 +8401 +548844 +161820 +396432 +495348 +222407 +53123 +491556 +108130 +440617 +448309 +22596 +346841 +213829 +135076 +56326 +233139 +487418 +227326 +137763 +383389 +47882 +207797 +167452 +112065 +150703 +421109 +171753 +158279 +240800 +66821 +152886 +163640 +475466 +301799 +106712 +470885 +536370 +420389 +396768 +281950 +18903 +357529 +33650 +168243 +201004 +389295 +557150 +185327 +181256 +557396 +182025 +61564 +301928 +332455 +199403 +18444 +177452 +204206 +38465 +215906 +153103 +445019 +324527 +299207 +429281 +574675 +157067 +241269 +100850 +502818 +576566 +296775 +873 +280363 +355240 +383445 +286182 +67327 +422778 +494855 +337246 +266853 +47516 +381991 +44081 +403862 +381430 +370798 +173383 +387173 +22396 +484066 +349414 +262235 +492814 +65238 +209420 +336276 +453328 +407286 +420490 +360328 +158440 +398534 +489475 +477389 +297108 +69750 +507833 +198992 +99736 +546444 +514914 +482574 +54355 +63478 +191693 +61684 +412914 +267408 +424641 +56872 +318080 +30290 +33441 +199310 +337403 +26731 +453390 +506137 +188945 +185950 +239843 +357944 +290570 +523637 +551952 +513397 +357870 +523517 +277048 +259879 +186991 +521943 +21900 +281074 +187194 +526723 +568147 +513037 +177338 +243831 +203488 +208494 +188460 +289943 +399177 +404668 +160761 +271143 +76087 +478922 +440045 +449432 +61025 +331138 +227019 +147577 +548337 +444294 +458663 +236837 +6854 +444926 +484816 +516641 +397863 +188534 +64822 +213453 +66561 +43218 +514901 +322844 +498453 +488788 +391656 +298994 +64088 +464706 +193720 +199017 +186427 +15278 +350386 +342335 +372024 +550939 +35594 +381382 +235902 +26630 +213765 +550001 +129706 +577149 +353096 +376891 +28499 +427041 +314965 +231163 +5728 +347836 +184388 +27476 +284860 +476872 +301317 +99546 +147653 +529515 +311922 +20777 +2613 +59463 +430670 +560744 +60677 +332087 +296724 +353321 +103306 +363887 +76431 +423058 +120340 +119452 +6723 +462327 +163127 +402723 +489382 +183181 +107656 +375409 +355228 +430762 +512468 +409125 +270544 +559113 +495388 +529434 +38355 +422025 +379667 +131386 +183409 +573536 +581317 +425404 +350084 +472 +28532 +329717 +230220 +187196 +484166 +97434 +224595 +87483 +516998 +314876 +32610 +514586 +344816 +394418 +402330 +305993 +371497 +315790 +294908 +207431 +561014 +26584 +368671 +374990 +54747 +47571 +449424 +283761 +84735 +522127 +120473 +524656 +479659 +131627 +450959 +153300 +580908 +207785 +49115 +284991 +96505 +278306 +291655 +1404 +489304 +557459 +37740 +157465 +390475 +119166 +33871 +247428 +75905 +20779 +65035 +333556 +375415 +383676 +505243 +87327 +16451 +287235 +70190 +245067 +417520 +229234 +183786 +333018 +554156 +198915 +108021 +128262 +412443 +242543 +555050 +436511 +445233 +207886 +156397 +526257 +521357 +413043 +427189 +401614 +94823 +351130 +105945 +182314 +305879 +526197 +64409 +496800 +236461 +138175 +43816 +185904 +345711 +72536 +526737 +360400 +556537 +426053 +59044 +28290 +222548 +434915 +418623 +246454 +111801 +12448 +427133 +459117 +11262 +169045 +469996 +304390 +513096 +322822 +196371 +504977 +395364 +243950 +216218 +417217 +106736 +58194 +504101 +478522 +379314 +30432 +207027 +297146 +91844 +176031 +98287 +278095 +196053 +343692 +523137 +220224 +349485 +376193 +407067 +185781 +37871 +336464 +46331 +44244 +80274 +170147 +361106 +468499 +537864 +467457 +267343 +291528 +287828 +555648 +388284 +576085 +531973 +350122 +422253 +509811 +78093 +410019 +133090 +581205 +343976 +9007 +92478 +450674 +486306 +503978 +46378 +335578 +404071 +225558 +217923 +406217 +138054 +575815 +234990 +336257 +159240 +399516 +226408 +531126 +138599 +61693 +89861 +29504 +163296 +477906 +48419 +25595 +195594 +97592 +392555 +203849 +139248 +245651 +275755 +245426 +127279 +521359 +517623 +235747 +475906 +11198 +336101 +70134 +505447 +218996 +30080 +484457 +120441 +575643 +132703 +197915 +505576 +90956 +99741 +517819 +240918 +150834 +207306 +132682 +88250 +213599 +462584 +413321 +361521 +496081 +410583 +440027 +417284 +397069 +280498 +473171 +129739 +279774 +29370 +518899 +509867 +85556 +434930 +280710 +55077 +348793 +157756 +281111 +190689 +281447 +502854 +232894 +268742 +199553 +220808 +137330 +256903 +116017 +466416 +41635 +110906 +340934 +557501 +146767 +517617 +487159 +1561 +417281 +489014 +292463 +113533 +412247 +263973 +515444 +343561 +310200 +293804 +225867 +150320 +183914 +9707 +89999 +177842 +296524 +287829 +68300 +363654 +465986 +159969 +313948 +522779 +219820 +198352 +12959 +266727 +8016 +175804 +497867 +307892 +287527 +309638 +205854 +114119 +23023 +322586 +383341 +134198 +553522 +70426 +329138 +105367 +175597 +187791 +17944 +366611 +93493 +242422 +41842 +558840 +32203 +19667 +124297 +383726 +252625 +234794 +498228 +102906 +287967 +69021 +51326 +243896 +509423 +440124 +122582 +344325 +34455 +442478 +23587 +236904 +185633 +349841 +44294 +112568 +186296 +71914 +3837 +135486 +223747 +557517 +385181 +265313 +404263 +26564 +516867 +497096 +332351 +345139 +444304 +510877 +356387 +561214 +311471 +408789 +561729 +291380 +174671 +45710 +435136 +388858 +361693 +50811 +531134 +573605 +340175 +534988 +382671 +327047 +348400 +547137 +401037 +490711 +499266 +236370 +449075 +334015 +107234 +232315 +462953 +252048 +186822 +410168 +28994 +45550 +453626 +417957 +468577 +106338 +391684 +375143 +217622 +357903 +347648 +142182 +213843 +299148 +352587 +436676 +161875 +144655 +304741 +235017 +181799 +211042 +335507 +553731 +412531 +229740 +437129 +423830 +561806 +337666 +52016 +138057 +70254 +494393 +73119 +262425 +565395 +305329 +489611 +377080 +569450 +549766 +332940 +235302 +53893 +203781 +38449 +114870 +18699 +396338 +449839 +423613 +379767 +369594 +375812 +359219 +229311 +291675 +224907 +416885 +32964 +573406 +17282 +103375 +81860 +576886 +461334 +35672 +243442 +217269 +445055 +211112 +455675 +412384 +88967 +550643 +24223 +504074 +9275 +155546 +329542 +172658 +331600 +315492 +194208 +162867 +324614 +432017 +140860 +157944 +406616 +486079 +361172 +258346 +494140 +315384 +451014 +242619 +413684 +386187 +408501 +121089 +343603 +232538 +558671 +551596 +32992 +406647 +435260 +11156 +40896 +175382 +110560 +252968 +189694 +63154 +564816 +72004 +164788 +434583 +453104 +111878 +268484 +290768 +473215 +450620 +32673 +277479 +529917 +315868 +562419 +378347 +398637 +84097 +120527 +134193 +431472 +400238 +86426 +208830 +524535 +22213 +516813 +526044 +386193 +246672 +386739 +559252 +153344 +236123 +246074 +323615 +92644 +408621 +323231 +499940 +296105 +578902 +150098 +145015 +131431 +318618 +68409 +497928 +362520 +467755 +112702 +163219 +277289 +192362 +497674 +525439 +56267 +465868 +407570 +551608 +345211 +179653 +55295 +97315 +534041 +505822 +411082 +132375 +25378 +272008 +536605 +123511 +148737 +577712 +493751 +29587 +468297 +528458 +491058 +558976 +181421 +209685 +147545 +486964 +570516 +168662 +19446 +395997 +242911 +232511 +317035 +354527 +5961 +513793 +124390 +370123 +113397 +195790 +252813 +326919 +432414 +409239 +458221 +115667 +212239 +279279 +375554 +546622 +317188 +260818 +286021 +377111 +209868 +243148 +132037 +560624 +459721 +193498 +22623 +254164 +112841 +383470 +62692 +227940 +471335 +44858 +213649 +179898 +102837 +474078 +44478 +256197 +309492 +182923 +421139 +275695 +104965 +480780 +449749 +76513 +578591 +336695 +247474 +320490 +246105 +53183 +485740 +575823 +510735 +290741 +37017 +348708 +279784 +453634 +567644 +434192 +482719 +435324 +544299 +106896 +569926 +301574 +492885 +103462 +487151 +513585 +219647 +303685 +459645 +76292 +188579 +154883 +207728 +425074 +310493 +27221 +371694 +119404 +399665 +273556 +454577 +580698 +267664 +295769 +423740 +22461 +22667 +508443 +390401 +369997 +524627 +193349 +132223 +576743 +130586 +487741 +107542 +501420 +520109 +308156 +540581 +231362 +86471 +472930 +351133 +463605 +575577 +159842 +39504 +223020 +63525 +298627 +139883 +375205 +303549 +16838 +495680 +408112 +394474 +188044 +472143 +463751 +31481 +378139 +190853 +442614 +172006 +140270 +133051 +178028 +495090 +88455 +13232 +46323 +346275 +425905 +487013 +433136 +514402 +521906 +4157 +61418 +567205 +213351 +304008 +296492 +506561 +408120 +415961 +323186 +480379 +349199 +201918 +135023 +456483 +136173 +237917 +4972 +99081 +331569 +150007 +36450 +93400 +487461 +203629 +218093 +487181 +113935 +139512 +210981 +358883 +47419 +248382 +80357 +462663 +83097 +26159 +80429 +283055 +452676 +50159 +12326 +29430 +303264 +158122 +569070 +52925 +534876 +46975 +426376 +170293 +434417 +235517 +218476 +445008 +482774 +305632 +116848 +557252 +229270 +453485 +382214 +54759 +59171 +193328 +17152 +238071 +148531 +409725 +75434 +65358 +473057 +415408 +579415 +48636 +269606 +298784 +162799 +356400 +326854 +24601 +66499 +340247 +20992 +190218 +548464 +122203 +405306 +495376 +536028 +5713 +206831 +9395 +503939 +194440 +474253 +395849 +165141 +204935 +412621 +402922 +87141 +570664 +202622 +137362 +221737 +78947 +112129 +341957 +169562 +164780 +360216 +107641 +415015 +444955 +559102 +123070 +176592 +309366 +116461 +222075 +530470 +214363 +414487 +471567 +292123 +370210 +364243 +510254 +396350 +141524 +220310 +398604 +145436 +392476 +17482 +78032 +336171 +130812 +489743 +346638 +418854 +139072 +263860 +458240 +383443 +337533 +182334 +535608 +517946 +489924 +308117 +129945 +59973 +538364 +513458 +449433 +25165 +335851 +487688 +153834 +347612 +349689 +443688 +486008 +479149 +442286 +61108 +315338 +511546 +506444 +775 +121839 +291412 +497626 +387223 +367095 +557896 +196118 +530652 +447991 +215622 +232160 +296731 +272273 +473415 +364705 +235790 +479950 +141278 +547903 +66523 +353989 +121875 +237735 +100083 +348941 +288983 +390083 +168248 +120776 +489764 +219135 +551713 +256035 +309005 +112493 +579759 +114972 +458992 +295768 +158497 +309696 +363844 +507966 +313491 +280779 +327130 +292901 +127761 +183843 +456521 +164475 +224281 +443713 +72514 +567383 +476215 +565650 +17708 +474471 +248334 +196313 +164759 +212453 +319024 +332916 +35436 +113139 +172716 +7570 +161609 +144534 +137475 +561411 +45844 +332027 +36990 +190160 +421231 +283210 +365611 +511407 +400887 +485071 +481214 +347203 +153506 +397403 +229599 +357322 +76034 +101189 +567444 +92363 +526767 +218811 +362812 +339120 +579696 +399269 +10705 +549012 +410428 +105623 +535307 +419235 +119911 +236604 +515779 +188173 +66397 +549119 +478742 +256180 +128224 +440539 +112818 +315434 +97513 +171970 +433483 +226008 +83217 +424548 +343753 +350334 +479280 +208808 +43266 +399893 +444386 +47687 +499093 +565269 +465835 +167486 +433460 +169872 +299640 +158466 +241373 +50576 +161567 +73560 +349804 +181745 +352684 +450357 +532693 +88335 +256518 +94926 +541197 +14629 +276149 +539439 +498738 +25654 +291330 +146465 +160190 +513064 +75748 +499007 +164464 +134042 +422416 +543315 +34056 +303197 +394801 +293071 +44964 +529083 +414522 +331180 +227599 +581040 +382850 +159898 +176841 +205352 +540782 +406591 +184499 +14380 +350230 +458175 +528786 +314935 +111086 +2191 +20371 +337042 +558371 +296907 +539937 +511463 +574856 +87864 +403817 +152598 +169712 +533227 +173545 +478862 +19455 +258433 +373440 +460229 +525682 +176857 +525050 +277025 +156416 +206784 +415179 +183204 +210374 +312868 +514366 +65208 +376342 +515792 +383066 +85247 +119132 +338007 +88748 +206705 +495808 +532164 +150686 +35474 +207860 +111165 +391199 +346011 +537721 +11390 +487482 +360983 +400347 +92795 +347506 +324322 +371958 +101280 +222842 +563604 +210299 +150616 +96351 +330455 +273551 +228749 +248051 +495252 +372265 +52664 +191874 +157416 +446428 +136681 +1228 +321811 +93791 +477867 +192520 +157124 +40620 +200541 +103904 +329494 +60093 +112573 +489125 +513115 +322968 +561619 +74309 +572462 +248252 +375376 +217312 +243213 +79878 +452218 +349754 +554291 +434043 +460373 +452591 +567787 +504711 +196007 +511153 +312416 +296056 +308849 +203667 +253223 +331230 +465545 +363048 +69392 +301506 +216198 +147979 +6005 +381870 +56983 +320972 +144122 +210855 +151480 +299288 +462486 +103931 +321079 +4134 +239861 +540006 +413805 +221222 +198943 +450790 +380597 +388298 +58737 +246197 +160726 +398554 +513946 +222235 +323851 +364703 +125643 +169800 +445662 +223764 +575372 +489207 +559474 +7155 +453819 +402720 +102355 +415076 +287436 +35705 +111076 +395865 +310862 +570834 +54728 +215778 +80053 +35148 +350488 +524140 +190097 +36661 +302110 +96884 +383397 +245462 +446958 +138937 +424712 +561814 +276964 +148034 +411068 +357824 +103257 +322149 +508899 +580294 +214386 +114419 +271429 +168260 +209835 +573072 +252269 +31980 +161308 +281508 +192714 +247599 +188948 +180563 +419601 +233660 +154804 +311846 +181499 +5535 +175082 +531018 +412338 +166995 +441411 +427820 +516846 +287366 +67959 +271266 +330845 +74209 +508167 +542699 +66485 +453756 +158412 +443784 +118097 +265050 +29074 +152623 +532493 +292988 +530384 +192660 +502336 +472648 +151657 +351626 +241010 +115070 +268356 +539557 +304698 +251140 +497158 +527445 +385428 +179200 +512394 +184978 +141910 +36311 +579457 +19129 +424960 +181714 +126216 +512911 +488360 +379533 +337551 +325410 +364587 +468885 +211107 +90062 +500446 +105960 +451951 +431431 +134178 +164548 +173826 +373988 +15157 +3091 +393557 +380011 +75372 +37403 +209995 +493610 +315899 +353299 +355040 +547000 +86133 +58174 +377326 +510230 +480583 +158588 +432529 +311206 +127626 +239980 +166340 +104185 +405174 +507211 +542782 +448078 +253477 +542694 +567308 +214853 +288824 +283268 +480757 +503200 +221089 +112388 +171539 +124452 +224200 +206362 +428754 +256192 +119414 +351620 +330050 +547504 +216398 +94261 +19916 +163242 +432588 +143824 +361103 +271138 +260150 +313627 +141086 +308263 +388453 +153217 +372794 +514787 +251910 +351335 +92683 +465836 +18442 +404128 +208476 +47873 +303219 +201622 +367489 +32760 +436174 +401926 +338419 +45248 +328464 +312216 +156282 +315702 +300701 +345401 +515350 +29094 +284296 +466449 +351057 +110672 +364853 +10014 +415828 +397522 +451412 +433124 +158277 +93476 +183387 +109889 +223326 +105547 +530061 +256301 +526778 +80974 +86650 +45835 +202154 +92678 +315991 +423919 +455044 +491168 +272253 +146627 +285349 +86001 +44171 +162332 +257328 +432820 +519275 +380639 +269436 +236016 +543215 +346752 +575970 +423498 +136926 +195648 +126634 +133078 +138656 +490012 +122388 +195165 +434900 +533625 +504167 +333697 +216576 +538775 +125072 +391154 +545007 +150292 +566717 +367362 +490991 +356623 +141271 +402795 +516786 +39499 +536716 +293324 +212853 +276381 +57124 +325992 +394659 +452178 +117674 +461172 +518586 +497021 +462345 +526570 +17328 +202928 +62566 +411277 +256983 +49473 +211206 +398031 +277955 +531178 +453959 +27946 +252844 +30273 +536933 +500298 +229111 +7977 +27642 +303726 +79927 +110313 +527691 +442205 +33345 +365851 +233236 +239157 +409221 +400803 +32947 +422516 +359727 +215872 +559454 +289716 +450247 +57827 +312298 +530383 +260048 +35857 +224222 +299533 +13296 +325907 +117869 +54088 +391011 +340478 +205344 +347823 +468604 +78701 +101414 +197499 +490871 +89273 +380343 +441974 +35974 +486114 +354398 +535536 +294030 +7276 +278742 +137028 +98721 +372764 +429802 +72105 +220307 +116845 +195406 +333000 +130401 +264382 +125458 +363036 +286994 +531070 +113801 +4108 +47603 +130118 +573924 +302990 +237566 +21470 +577926 +139436 +425925 +36844 +63602 +399791 +35894 +347228 +225617 +504813 +245320 +466007 +553931 +166731 +164885 +19090 +457262 +247806 +502895 +167593 +352491 +520 +26386 +497348 +352000 +386164 +32901 +730 +30925 +333167 +150361 +231747 +462244 +504958 +260738 +313762 +346645 +486118 +202998 +541613 +183884 +230245 +83172 +126638 +51844 +421673 +118625 +377723 +229427 +371326 +104345 +361687 +114246 +397354 +104137 +120850 +260516 +389168 +234555 +26348 +78522 +409784 +303024 +377949 +69887 +546983 +113736 +298197 +476810 +137315 +376321 +410337 +492905 +119785 +158167 +185930 +354061 +106563 +328452 +506587 +536517 +480173 +570688 +376441 +252127 +247720 +132554 +41923 +400317 +170041 +151938 +198650 +6437 +49091 +221820 +455966 +309859 +300659 +15850 +388014 +253386 +65415 +238228 +548882 +302155 +93483 +371869 +397287 +315249 +360564 +448410 +21382 +477474 +144862 +517515 +230190 +322353 +231568 +14940 +132719 +498942 +182469 +113720 +168890 +94852 +246077 +117535 +52596 +419116 +522020 +255338 +125228 +564332 +106375 +249534 +220915 +177758 +293057 +222430 +196878 +554980 +375606 +173081 +84936 +418907 +562229 +457616 +125700 +66038 +239274 +574110 +305540 +98431 +167347 +53345 +438481 +286010 +5569 +343606 +168898 +191301 +236338 +291394 +715 +520237 +236954 +192212 +524002 +471625 +476029 +413124 +203455 +483328 +476417 +114389 +372428 +369221 +322654 +388157 +561314 +264540 +418680 +359540 +426182 +521613 +92248 +74478 +398905 +554273 +125909 +430583 +418959 +503522 +382999 +403145 +536375 +352618 +108193 +279696 +163253 +439007 +204536 +552186 +269926 +372147 +399921 +201418 +240565 +471483 +91619 +393971 +331648 +385856 +567440 +81922 +391722 +372894 +535997 +134096 +545958 +239943 +186929 +34222 +177714 +277812 +197111 +281878 +532003 +557172 +142890 +196116 +385454 +322845 +374987 +123137 +255112 +111207 +304819 +523526 +336046 +42893 +241273 +240049 +90659 +271364 +408008 +253282 +167067 +354278 +178317 +229653 +93333 +163666 +566920 +495199 +100329 +218119 +558864 +257382 +406152 +206587 +420339 +325919 +278853 +555763 +293200 +151000 +209664 +79380 +197177 +353953 +464522 +392260 +46144 +154202 +164366 +206025 +511236 +24921 +497907 +393226 +318138 +364125 +157321 +492395 +187857 +109939 +441500 +144251 +368581 +51403 +283498 +43555 +89356 +404601 +23272 +425762 +460682 +544629 +209829 +322029 +199247 +307262 +571242 +124236 +162393 +104829 +250766 +563938 +237399 +131516 +483001 +21994 +97958 +540187 +264497 +384808 +343187 +51277 +6712 +566103 +435384 +292082 +359039 +165157 +267972 +263796 +489313 +392722 +541924 +554433 +571034 +146112 +201934 +518716 +64116 +294992 +289586 +159970 +479617 +269006 +140465 +513260 +554805 +6579 +452696 +34445 +548296 +372983 +509656 +199339 +130030 +128372 +449454 +139306 +247914 +99024 +499134 +536653 +468917 +412813 +404338 +215303 +455414 +413497 +574988 +397117 +188631 +378701 +241867 +143129 +419884 +412749 +496954 +317732 +16977 +398309 +162363 +147576 +100016 +209018 +92660 +173302 +525732 +449198 +99734 +12733 +172946 +168032 +210988 +340697 +4795 +534887 +483553 +278323 +178175 +190095 +357542 +230432 +227460 +334609 +562121 +378126 +555357 +325666 +451859 +526837 +531710 +297249 +294839 +499785 +254976 +527220 +173057 +11760 +163012 +215998 +114420 +57812 +563712 +513887 +201859 +36333 +291990 +338375 +460621 +518889 +337502 +133050 +80172 +537007 +295270 +335644 +227852 +336044 +204137 +82259 +165675 +295713 +343937 +442567 +356002 +346932 +62985 +180925 +525381 +13081 +377406 +159774 +462643 +359105 +185821 +390201 +84168 +128059 +80340 +481159 +491902 +306619 +353807 +390569 +541562 +292616 +64621 +439224 +96288 +449798 +160927 +496324 +90778 +126145 +97230 +572767 +11570 +539075 +350988 +3779 +208135 +551315 +216449 +169606 +502 +67765 +281414 +118594 +146127 +543985 +124927 +471394 +385508 +373783 +501315 +140974 +42757 +527054 +202387 +513056 +329931 +153973 +510152 +520812 +534601 +131282 +386638 +508538 +234779 +229329 +396568 +153568 +229478 +153574 +356299 +436694 +324139 +299409 +212462 +478155 +393266 +117836 +190760 +213605 +196 +444382 +445211 +363845 +433277 +521141 +464786 +169076 +301402 +4495 +177258 +328962 +183757 +452966 +416059 +113233 +559417 +280678 +481398 +328372 +234910 +30667 +343062 +383046 +370953 +258089 +404229 +456931 +535183 +300867 +60507 +262672 +7288 +81100 +575395 +539951 +347848 +437594 +352005 +14941 +196453 +528386 +466939 +482187 +293468 +494077 +217285 +362951 +435751 +411480 +517315 +480015 +60610 +353001 +376442 +430265 +478338 +303069 +525344 +437331 +389315 +8179 +31981 +313872 +330920 +515465 +258905 +142249 +323128 +389699 +565012 +124636 +488693 +376608 +309424 +370596 +261940 +39871 +226984 +152866 +515050 +116861 +412876 +120411 +550452 +565273 +273791 +181466 +183155 +293505 +336113 +569997 +303738 +331049 +147030 +74058 +198176 +23991 +198841 +79816 +85183 +261535 +566756 +386291 +318200 +569849 +57429 +36049 +420827 +519271 +24391 +172087 +158795 +133002 +522198 +133698 +499365 +79261 +258860 +457718 +179948 +421875 +558073 +206684 +529762 +456756 +65773 +425722 +53102 +294264 +416730 +38574 +176275 +404297 +127494 +242060 +272212 +189244 +510861 +421370 +208516 +206431 +248457 +39502 +375087 +130839 +308730 +572453 +263474 +544611 +255708 +412604 +390094 +578131 +234463 +493563 +9450 +381914 +148999 +32300 +423576 +569758 +347253 +92939 +112212 +13923 +39472 +363736 +289659 +269949 +88349 +188522 +488915 +129054 +573823 +316000 +440562 +408818 +539302 +199575 +122300 +340047 +322816 +472878 +313922 +228071 +265648 +400166 +169166 +10040 +125245 +148766 +31281 +172599 +431067 +208236 +441824 +175611 +15148 +431199 +521587 +50025 +443139 +349822 +515056 +27530 +571970 +82367 +7115 +424333 +157601 +537506 +447187 +115182 +547597 +5586 +143040 +31650 +196336 +279818 +206273 +403104 +514248 +243190 +558642 +548246 +16848 +391539 +89614 +284589 +191314 +259452 +208380 +209441 +465463 +385005 +321385 +223569 +11727 +87574 +566470 +210890 +323598 +427193 +425676 +401240 +94021 +259571 +447553 +456053 +84693 +14278 +119995 +234595 +408696 +136271 +143560 +357578 +28071 +36561 +157102 +293789 +392251 +356622 +180274 +48320 +475779 +301326 +100977 +413551 +574010 +404479 +80725 +552221 +575441 +197424 +124601 +215633 +359546 +25386 +73199 +334466 +156572 +124614 +34121 +460049 +327623 +441695 +292488 +476514 +464018 +348571 +113413 +125208 +129690 +446218 +493761 +383413 +460390 +343149 +374041 +525211 +451263 +333683 +385194 +107427 +102872 +517249 +475879 +575755 +147787 +297180 +343774 +112437 +142240 +384503 +511111 +51089 +145408 +143582 +408138 +162858 +71850 +126925 +222781 +314616 +425609 +203928 +337563 +223300 +52644 +272566 +232597 +374430 +469075 +267164 +265851 +28134 +308889 +465795 +47263 +233727 +42 +493117 +124621 +533378 +361259 +458750 +429033 +383289 +490927 +520964 +174420 +64425 +378859 +401850 +281475 +46508 +205300 +280736 +110961 +230679 +151956 +321497 +73665 +488736 +165353 +365983 +556230 +21465 +581226 +448861 +3793 +347335 +150726 +75319 +2521 +285894 +133876 +104589 +346013 +63516 +83656 +491515 +326256 +49942 +28508 +475413 +270222 +235839 +48554 +327777 +111179 +507171 +425973 +449490 +205239 +82375 +459575 +432300 +91885 +340922 +270239 +195894 +121417 +344831 +439651 +232148 +391688 +480793 +534275 +260823 +469294 +8688 +255654 +191300 +383464 +81594 +21240 +478077 +517596 +555953 +294119 +402234 +459500 +564280 +106849 +167501 +98328 +267411 +145512 +272599 +50054 +414156 +161129 +418226 +11796 +502090 +390350 +440500 +240727 +104406 +163682 +437910 +143767 +358901 +527631 +500543 +28377 +231097 +227985 +556703 +421566 +73201 +478393 +280347 +15497 +131969 +515760 +295440 +462527 +42147 +120007 +212895 +425361 +454143 +5758 +366782 +213932 +229848 +458861 +132791 +476664 +150365 +343038 +529649 +180515 +499810 +329041 +15660 +419228 +396295 +502644 +321085 +245049 +34193 +217323 +446455 +528046 +375573 +15802 +147448 +407291 +84000 +280891 +150487 +510606 +163025 +249964 +126123 +233771 +118507 +97278 +357386 +23121 +10580 +2153 +176017 +371472 +373289 +173908 +296797 +334083 +301107 +577522 +125404 +278359 +575032 +273002 +266371 +108315 +255633 +503490 +250051 +143927 +117407 +198271 +447043 +329789 +399991 +458388 +87489 +228411 +494634 +260802 +454161 +446322 +231079 +438373 +395665 +244539 +212427 +356660 +347276 +183287 +498374 +21167 +544522 +418533 +288493 +245660 +406103 +406976 +367313 +455555 +117337 +384465 +185697 +160393 +463825 +276852 +181462 +176288 +452816 +102497 +54277 +225791 +361046 +197278 +9857 +227736 +398992 +55868 +170914 +181677 +467803 +560470 +264599 +540372 +559442 +201207 +137227 +267643 +355471 +245431 +555669 +344498 +84783 +193474 +102411 +401860 +119469 +448786 +449990 +568082 +340472 +307573 +231828 +307547 +82052 +15140 +493612 +503972 +386592 +473219 +495557 +159440 +355869 +311531 +209733 +240119 +415048 +296098 +249482 +15663 +151432 +263011 +488539 +463913 +502798 +174276 +495613 +407861 +229304 +146742 +545039 +161202 +295134 +162144 +453317 +52759 +335201 +222903 +20333 +559550 +336049 +346140 +491223 +306611 +102746 +455355 +449921 +477288 +77821 +289712 +452663 +147758 +129571 +490869 +345961 +94501 +160394 +432993 +178796 +372494 +316323 +383435 +194940 +74583 +148911 +518027 +431827 +32724 +158548 +227227 +500330 +54679 +321024 +471175 +252074 +476569 +573258 +337247 +294373 +558661 +148898 +563267 +163112 +411968 +193565 +455210 +349344 +337160 +160456 +255158 +553678 +123843 +549687 +381968 +579471 +100604 +379841 +357526 +197263 +14756 +412639 +210915 +47204 +539251 +166255 +490199 +260363 +91654 +170550 +187888 +97362 +285418 +176993 +292741 +361901 +296988 +223496 +493753 +114907 +151358 +316534 +472509 +499802 +348519 +347747 +58851 +104790 +396779 +130528 +2255 +19624 +526800 +233950 +505945 +131207 +290750 +114090 +196665 +8708 +134688 +394715 +115088 +492196 +530099 +518729 +291572 +421457 +445365 +78929 +415461 +551796 +210002 +207913 +344878 +303893 +149196 +353275 +122413 +553361 +519132 +467135 +431439 +17089 +322119 +228214 +35062 +105689 +366141 +285651 +60409 +472671 +401446 +492846 +21023 +421952 +374100 +265200 +506628 +62298 +243626 +212122 +350648 +409921 +428140 +399212 +388267 +198921 +429246 +202040 +570001 +261346 +61171 +131815 +455448 +82696 +554607 +102174 +386803 +188421 +191846 +209898 +380117 +321064 +119617 +188651 +132210 +244299 +174072 +542910 +378334 +118405 +543347 +183657 +581180 +395289 +64760 +265584 +29573 +493720 +94795 +315601 +416596 +260106 +244019 +463884 +579468 +112085 +300972 +238528 +382542 +57672 +165298 +46889 +289497 +337180 +481252 +7913 +432150 +288161 +403758 +257336 +565331 +346589 +270785 +205670 +231580 +508580 +98871 +239997 +554579 +160057 +404922 +78771 +380756 +171199 +148077 +22892 +145378 +26967 +235200 +176007 +90349 +554377 +189744 +257053 +270515 +66508 +113890 +291983 +558927 +420916 +140908 +58384 +438226 +575776 +106935 +40602 +468993 +494810 +210408 +365685 +483722 +39430 +258793 +272615 +51476 +189919 +443887 +391648 +422670 +445135 +198959 +405529 +459757 +465489 +81827 +262576 +408289 +309237 +76249 +460091 +512630 +45959 +280320 +200492 +404652 +48475 +18480 +457097 +65889 +162256 +265950 +520752 +299082 +51500 +499313 +104906 +35438 +167647 +7274 +387824 +242139 +173166 +399830 +12014 +510642 +154053 +67785 +78170 +514118 +87998 +52703 +203539 +534533 +85926 +274438 +401653 +458790 +509262 +144481 +387515 +246649 +503207 +235131 +501531 +62025 +43286 +272323 +326128 +561889 +167529 +171067 +50778 +301282 +469719 +509388 +480317 +379055 +546428 +192763 +445602 +420882 +232790 +174332 +232865 +292822 +511145 +119502 +312591 +110330 +281353 +116244 +58778 +428079 +64902 +520840 +232054 +473214 +572574 +296684 +351590 +217997 +178761 +71618 +226496 +285212 +381195 +499903 +232849 +468997 +345559 +503097 +578570 +396404 +405223 +578752 +403500 +188958 +504498 +491623 +462929 +525762 +395550 +574227 +240751 +169356 +524694 +40886 +571635 +487774 +86220 +95677 +268987 +502599 +155270 +103855 +125100 +241355 +220214 +391774 +110618 +154587 +134483 +458781 +360877 +465963 +194595 +346934 +127153 +188078 +553869 +102665 +400547 +33759 +42779 +397587 +140295 +151807 +549136 +470288 +89738 +328368 +546934 +164255 +563683 +399988 +360951 +217303 +326781 +546133 +135399 +94666 +330037 +569839 +411070 +497466 +404805 +417854 +318442 +255036 +457230 +346863 +307438 +370448 +5124 +152582 +38118 +12179 +58462 +308420 +329456 +74920 +250368 +186428 +556073 +111806 +361244 +80273 +230964 +156754 +503101 +75173 +389404 +195538 +88848 +286018 +245481 +140929 +533721 +268378 +70048 +315467 +46269 +372807 +192403 +387328 +163033 +481314 +65306 +192529 +321107 +112232 +441216 +412399 +565391 +220670 +61471 +463290 +346707 +67587 +147624 +13031 +396754 +278601 +439426 +42834 +281829 +376209 +353148 +556562 +97579 +217989 +319530 +82551 +235319 +431799 +53892 +52853 +54533 +88897 +225093 +386777 +546742 +273684 +413900 +245447 +577995 +16249 +188414 +485142 +199602 +89258 +109679 +502397 +14494 +13632 +51674 +244999 +305050 +455956 +426795 +560700 +327306 +410301 +343803 +539422 +156740 +527845 +100582 +9941 +466585 +61515 +231895 +157052 +41271 +148128 +141172 +320232 +78565 +539883 +391300 +365182 +322194 +116517 +323496 +473783 +519874 +440706 +361587 +265153 +329946 +342814 +32258 +153510 +194555 +309317 +245006 +300303 +97767 +218224 +370170 +290477 +207178 +456730 +209480 +513775 +199516 +581542 +32524 +416337 +96241 +506279 +422893 +248911 +509855 +355183 +201220 +234914 +333436 +68198 +429074 +328430 +160531 +467854 +280688 +140661 +349525 +267315 +565543 +313162 +25751 +232574 +560358 +505213 +494427 +160308 +287335 +99182 +413260 +558808 +290839 +122954 +229221 +192007 +243189 +117645 +552824 +366111 +102056 +356949 +566298 +97899 +422545 +343769 +13127 +179273 +104486 +37660 +304099 +517570 +20207 +36484 +36492 +155974 +107257 +534019 +522371 +222825 +96183 +509227 +302260 +95078 +280918 +367582 +317033 +347982 +73209 +290521 +187243 +425151 +483723 +573796 +187249 +144114 +132992 +35887 +546067 +426532 +45626 +461805 +129989 +541478 +485489 +578498 +485483 +144784 +248224 +372362 +92050 +423519 +473118 +177207 +105455 +276434 +157767 +384335 +509497 +338191 +224010 +327388 +96988 +43376 +67867 +320743 +555197 +104453 +14439 +512194 +396387 +252559 +108953 +461262 +66320 +97946 +238065 +306139 +572408 +577864 +81004 +464526 +89378 +193389 +259049 +85665 +381134 +412419 +308947 +557510 +502084 +288290 +254609 +188752 +439525 +13980 +140513 +240173 +305268 +38678 +394050 +402926 +364079 +159260 +293034 +55429 +289640 +291028 +211120 +48050 +93887 +361029 +486026 +388374 +207803 +540174 +530630 +430359 +36420 +120099 +199764 +492911 +84498 +200882 +139843 +4975 +421209 +259513 +520324 +211317 +236457 +419344 +3867 +287846 +50434 +26624 +507235 +16238 +103705 +497555 +440060 +175825 +245460 +308276 +178535 +391735 +206391 +201550 +400945 +194634 +262360 +554142 +407574 +225225 +246057 +498627 +486172 +226571 +461751 +459733 +345869 +503841 +286460 +45644 +22861 +285599 +580284 +569565 +286778 +150024 +542101 +484075 +538153 +20470 +128034 +544120 +357109 +450728 +550968 +326230 +558809 +76334 +555387 +47121 +523978 +11081 +378134 +116279 +364884 +488250 +551957 +322824 +545564 +255573 +286327 +355453 +361933 +434897 +32597 +226761 +166482 +557564 +208166 +232115 +283520 +137395 +555894 +103509 +174284 +458313 +316147 +344059 +370701 +548930 +89894 +373662 +572095 +19324 +574411 +45746 +480122 +63950 +92339 +201111 +157053 +401539 +427956 +339099 +274651 +159537 +556101 +323399 +564337 +514915 +556025 +66427 +322357 +173737 +369128 +420230 +45176 +509675 +374677 +272311 +109797 +384723 +383678 +453040 +91080 +301634 +533003 +40361 +221605 +216228 +104002 +161011 +146123 +214421 +496252 +264948 +9759 +138856 +316189 +145734 +50411 +325157 +259099 +516856 +529668 +135976 +467130 +367433 +385598 +520933 +102805 +30066 +436696 +216837 +380754 +350457 +126974 +565374 +73832 +214703 +110501 +380609 +135872 +140231 +251816 +133836 +398866 +230362 +426815 +2240 +51484 +546325 +224093 +221190 +525024 +238806 +99908 +165795 +109146 +537727 +496571 +183803 +211175 +433845 +168692 +526394 +368402 +256309 +468972 +139169 +398440 +171678 +547341 +64332 +533589 +483249 +406000 +330348 +439188 +572886 +252829 +242724 +139127 +404568 +45809 +52257 +458727 +334509 +559665 +60992 +290896 +503106 +27972 +536891 +410855 +31202 +457882 +403315 +87399 +395291 +322141 +226377 +202799 +420826 +553034 +212077 +97693 +266370 +101656 +504142 +342933 +87567 +342060 +268854 +437028 +20175 +198625 +405047 +382374 +338291 +403975 +527906 +322429 +545550 +140043 +107389 +74059 +315621 +110138 +78381 +295576 +494438 +106335 +472349 +15818 +162358 +366484 +44604 +66524 +118606 +366873 +270721 +556478 +350789 +298628 +163314 +262800 +459428 +491725 +285421 +406332 +498280 +34535 +524282 +315744 +226592 +218294 +459141 +242034 +114164 +293733 +248242 +452881 +441496 +54358 +177489 +372861 +349489 +483941 +572802 +356494 +193875 +146570 +58253 +21338 +6220 +341933 +533368 +1818 +428248 +293026 +227656 +193021 +326938 +512966 +226020 +343059 +249720 +540106 +375278 +300023 +126512 +517135 +472540 +361439 +132702 +503294 +109537 +540669 +332007 +245266 +313999 +10386 +225715 +311567 +103837 +302405 +248616 +102654 +155087 +124756 +379659 +569272 +160166 +428234 +422280 +174425 +133412 +174503 +216581 +345063 +52949 +69536 +216161 +272728 +200870 +120792 +193480 +493923 +445567 +558539 +51938 +422706 +416271 +244160 +437898 +327352 +305480 +349459 +522418 +485219 +225133 +361400 +546569 +190015 +348216 +421822 +457683 +178683 +40894 +234526 +465074 +518725 +168096 +210190 +139605 +35195 +463640 +286770 +141651 +112022 +532552 +325327 +227224 +17272 +84163 +331475 +126065 +289309 +8583 +52952 +189427 +579693 +437947 +187565 +215982 +356424 +453731 +463522 +372316 +251797 +70187 +280515 +556608 +341635 +391067 +469480 +476298 +57917 +146672 +122747 +394328 +12209 +80013 +573291 +278449 +129659 +579560 +557190 +227468 +334782 +51157 +23774 +9426 +86582 +39211 +275751 +131597 +51250 +357255 +9041 +346482 +9647 +157019 +409016 +273416 +114414 +298172 +388854 +275025 +58079 +518034 +503518 +146710 +120632 +474680 +303713 +259097 +479630 +208318 +437298 +173704 +361831 +371638 +344279 +230175 +72507 +417980 +72621 +163057 +92894 +543525 +577364 +263696 +472732 +66027 +391584 +197745 +131019 +65604 +91318 +535934 +212646 +576354 +482071 +160556 +120129 +7260 +344881 +447548 +318193 +30383 +527002 +34904 +35677 +526222 +105261 +401897 +399452 +25660 +524595 +384512 +117543 +514600 +268944 +112664 +222340 +569058 +495332 +192153 +75591 +286711 +174888 +577065 +25508 +169972 +401820 +425475 +290700 +173091 +559101 +122418 +244124 +198645 +325519 +276437 +528276 +146614 +45574 +417804 +326420 +250594 +27353 +310407 +370103 +274957 +561160 +167598 +397166 +257458 +404546 +148392 +373396 +62230 +493522 +563665 +274240 +269815 +79024 +527427 +84674 +486788 +267690 +443347 +149304 +412285 +207041 +412916 +10764 +151338 +299000 +17882 +475510 +398188 +558213 +70493 +180779 +347210 +280211 +58146 +379022 +504125 +537604 +464858 +329573 +568623 +228309 +454444 +552775 +557884 +435671 +168706 +142257 +571437 +574845 +387773 +321008 +574208 +405811 +375426 +321887 +256852 +433554 +517029 +125870 +80395 +497139 +490008 +405279 +571857 +225738 +514913 +456239 +499402 +96440 +487607 +370999 +319617 +370233 +60760 +352703 +478575 +84170 +134112 +77689 +185036 +73738 +547502 +104782 +213276 +136908 +436273 +442149 +355000 +374061 +249884 +105711 +136464 +146997 +76351 +388487 +99115 +124135 +24721 +132931 +1149 +182403 +386089 +81691 +480657 +441522 +60989 +268000 +55840 +514321 +577959 +359638 +457986 +533596 +60332 +367082 +772 +535842 +473541 +270677 +409009 +259216 +302318 +117036 +331372 +231125 +384486 +405214 +20760 +579760 +172995 +359110 +83110 +410068 +109916 +328757 +299261 +19028 +515660 +40757 +10256 +442695 +553097 +185903 +74388 +425120 +241326 +299609 +29397 +328728 +283881 +344029 +367336 +27075 +163628 +127263 +488979 +460147 +473050 +405762 +221547 +131581 +561187 +406489 +140696 +452721 +530466 +118965 +398803 +218365 +298738 +19441 +521550 +120157 +498687 +4754 +365866 +70865 +235156 +133386 +142742 +221183 +262391 +567053 +520982 +121349 +448779 +440354 +3983 +578993 +519691 +160703 +103307 +300408 +137106 +488377 +523660 +318022 +132578 +302520 +153040 +408817 +145227 +311190 +159662 +202923 +256775 +359864 +384848 +336404 +185303 +421703 +362682 +464622 +246590 +422729 +165500 +42563 +219216 +520232 +95063 +265547 +532686 +290558 +112591 +448211 +315281 +545475 +225850 +232460 +82740 +272880 +347254 +122047 +352151 +541486 +97249 +200252 +544782 +499571 +379014 +303534 +479909 +305464 +323682 +181524 +273855 +190783 +567801 +119752 +241503 +536429 +327323 +128756 +349868 +500495 +372260 +315824 +484986 +364993 +124759 +300124 +329319 +68628 +14549 +121897 +506595 +115709 +199610 +230150 +31717 +139549 +222332 +534161 +360393 +541664 +507167 +286523 +158660 +66926 +195750 +80022 +589 +252220 +47255 +247014 +49881 +455005 +232453 +445722 +516805 +544122 +541917 +469356 +370042 +130522 +502163 +307866 +408894 +524247 +52233 +177861 +348881 +357943 +295303 +475389 +431691 +61316 +143998 +503483 +340155 +488785 +133636 +133567 +251627 +470095 +34873 +88815 +261178 +468612 +127477 +157960 +15687 +303089 +572331 +456708 +190515 +126131 +239194 +332074 +129765 +107167 +478184 +421833 +359715 +112440 +331317 +74492 +505386 +247839 +534210 +134503 +422700 +352111 +98674 +546219 +520508 +503008 +461953 +101913 +362092 +22103 +359128 +316666 +335579 +414750 +297980 +365652 +53635 +547601 +97589 +570515 +7125 +99828 +321437 +80671 +426275 +294883 +212605 +424293 +338108 +25005 +6949 +234291 +428399 +7149 +343076 +575287 +431848 +307611 +293909 +542511 +564739 +573843 +356878 +472864 +336793 +121904 +161060 +254004 +269873 +216428 +77172 +346517 +498555 +203690 +348973 +117704 +552672 +275270 +208107 +314016 +427518 +278134 +53420 +318777 +238980 +350614 +467315 +61233 +272188 +550797 +125051 +553965 +187286 +282912 +102532 +156076 +467848 +130875 +531585 +523470 +507684 +332582 +438989 +489209 +125944 +127474 +371957 +570349 +283286 +541635 +547106 +253630 +388677 +572525 +542302 +554537 +367205 +228300 +443498 +356432 +123946 +490441 +211063 +224542 +116574 +434510 +33116 +353136 +134167 +128291 +542510 +433963 +147453 +365766 +374806 +336600 +38238 +165476 +535578 +127788 +157099 +173640 +114348 +496722 +58141 +467296 +235864 +5154 +22775 +422536 +136820 +453438 +446359 +41990 +422240 +39267 +391392 +233825 +308504 +478250 +87328 +4079 +127074 +267709 +377635 +353231 +185768 +487897 +124215 +249757 +341681 +557552 +280733 +374734 +281601 +456420 +222266 +491947 +432732 +467157 +94025 +410328 +428291 +397639 +163528 +234697 +557573 +208363 +515962 +358658 +373075 +438995 +425672 +450169 +216103 +254638 +288591 +53626 +43417 +372252 +5038 +218357 +120860 +399349 +485509 +530261 +477087 +352302 +96075 +495443 +133928 +197175 +134074 +212553 +448181 +152000 +254277 +105734 +75481 +343662 +479350 +554347 +71090 +297426 +22176 +277622 +469235 +163041 +221272 +154263 +89296 +68411 +192871 +183217 +258141 +53058 +540529 +566414 +560948 +254535 +246076 +135972 +420069 +431023 +343643 +32682 +515176 +222635 +377155 +547041 +513283 +26017 +366096 +252133 +138078 +25685 +321798 +549361 +14088 +423048 +570810 +374974 +447501 +492544 +554046 +575357 +420791 +6019 +340451 +66800 +565575 +148055 +330432 +483038 +455004 +288765 +11034 +86988 +347142 +450559 +543581 +293757 +556901 +533032 +333020 +260266 +22420 +13948 +512657 +214124 +231236 +177149 +560879 +491793 +35767 +312878 +118542 +450596 +423773 +48653 +224523 +509577 +462677 +75405 +350023 +452122 +42008 +302555 +382309 +468483 +368684 +372580 +31333 +153697 +124876 +330023 +315672 +53990 +136533 +82815 +356836 +414821 +268717 +7333 +77544 +525373 +371042 +227048 +576327 +419309 +239773 +8119 +424135 +297425 +222711 +489909 +393995 +31019 +539326 +517612 +102461 +199989 +483374 +44952 +103863 +528980 +441543 +85381 +247234 +50924 +483994 +87456 +424271 +356091 +534669 +378831 +560662 +298773 +257896 +498274 +305800 +40517 +183949 +276840 +84442 +297620 +298252 +119088 +233315 +283977 +345154 +287649 +427311 +63399 +4700 +463611 +224104 +209388 +431655 +364190 +28864 +412455 +283290 +228541 +422200 +985 +133596 +323853 +503081 +130732 +224675 +199688 +230862 +21396 +485390 +1532 +125778 +235541 +370478 +522478 +514292 +384338 +531707 +178746 +532747 +62915 +519491 +140691 +112093 +358024 +263687 +297595 +506085 +102446 +325768 +29558 +222054 +466965 +316254 +546500 +216785 +194184 +464390 +348371 +231582 +208995 +464339 +308856 +340946 +214604 +570586 +182227 +248441 +89078 +376310 +73450 +115924 +308235 +15994 +8749 +429679 +37751 +122040 +284286 +388707 +248163 +11320 +427997 +282062 +237600 +376751 +223314 +86215 +12443 +163255 +564940 +462640 +522713 +306303 +460675 +126833 +26201 +224757 +357899 +546782 +96427 +480944 +479556 +569273 +520528 +190690 +344832 +462466 +270354 +559776 +279259 +280909 +227781 +163798 +491098 +439658 +416088 +107375 +74132 +379800 +511654 +346687 +226161 +578849 +544272 +146149 +570624 +178299 +126671 +356380 +530766 +175954 +158798 +422095 +55780 +512276 +560626 +187329 +513125 +347216 +306486 +161840 +180917 +188192 +421437 +93120 +324891 +252216 +488476 +578347 +101959 +10693 +170038 +213586 +210439 +469202 +381463 +343248 +127785 +287328 +538690 +16382 +293022 +112378 +435785 +56092 +381504 +284365 +406129 +233119 +53629 +188509 +191053 +81056 +82252 +538319 +38439 +181948 +439710 +529344 +434035 +342958 +563882 +37734 +364743 +330986 +546226 +463211 +62210 +442724 +232241 +293858 +119345 +61953 +577033 +522015 +381587 +350107 +4936 +511307 +228771 +177811 +231450 +176168 +84540 +259408 +264238 +539738 +255827 +459382 +221105 +431742 +204337 +227741 +336356 +37655 +167159 +59352 +165937 +53956 +378712 +88462 +495786 +542938 +566498 +367228 +157577 +442661 +62363 +390689 +480664 +521540 +414249 +20571 +160855 +451683 +156832 +570045 +326542 +568276 +568717 +563311 +113579 +218268 +546095 +160661 +341118 +150649 +462632 +198972 +220025 +61720 +430681 +524011 +457217 +40064 +285583 +314493 +78023 +470882 +298722 +555597 +489829 +314779 +367818 +138503 +243737 +580255 +444565 +386677 +190841 +493074 +234347 +466988 +227033 +519039 +351554 +390585 +443303 +140983 +81079 +538005 +169757 +368780 +457322 +341804 +409116 +181805 +284292 +551358 +344548 +503569 +336587 +417055 +522315 +58705 +148955 +375530 +474934 +577893 +28881 +360772 +445267 +244737 +355777 +72811 +190788 +54513 +243075 +518551 +487530 +292169 +69293 +397303 +129285 +429996 +109532 +53802 +340573 +91280 +535602 +270908 +381925 +549220 +488573 +47131 +32735 +117525 +279085 +43961 +188906 +394677 +395 +185201 +189365 +127596 +32712 +504810 +3703 +182874 +146981 +306755 +453093 +520503 +169808 +225670 +91063 +348584 +461802 +572555 +185922 +131497 +46736 +536006 +256505 +214975 +13445 +350736 +98115 +50304 +361180 +511333 +564820 +429717 +222500 +40083 +538230 +349438 +371250 +528578 +240418 +302380 +261758 +535809 +308388 +578878 +509451 +46919 +562592 +499950 +90374 +318146 +195353 +355325 +314515 +237277 +203024 +238911 +32039 +145591 +16030 +135411 +229350 +421757 +48034 +183704 +307292 +97974 +275999 +448256 +451915 +119113 +143503 +494141 +50124 +306553 +35526 +255279 +560908 +247264 +367599 +192782 +511324 +574350 +67569 +204360 +111907 +2839 +513971 +245201 +185240 +339468 +540101 +539673 +194425 +22168 +520150 +301595 +96006 +68286 +131280 +356662 +182441 +284749 +107108 +49761 +386718 +55244 +187990 +248678 +147721 +425727 +360350 +310797 +76765 +400489 +247639 +279864 +44699 +356145 +69138 +445041 +560598 +165464 +536343 +7818 +322831 +334760 +451463 +348730 +285967 +286353 +201887 +166165 +359 +465591 +519359 +550444 +402711 +3661 +132706 +534983 +306281 +150317 +15978 +580029 +496090 +267127 +210980 +384015 +222559 +2235 +255649 +278168 +440840 +27326 +202562 +230268 +362712 +1573 +107661 +464515 +373132 +447242 +547440 +43613 +200143 +260883 +250901 +64693 +408480 +204757 +319933 +147471 +381332 +518197 +27656 +260257 +434580 +159203 +568630 +497441 +499597 +60179 +574804 +343254 +501762 +220704 +524536 +86946 +456046 +62937 +49633 +144305 +475593 +478553 +574145 +63648 +3794 +303177 +1340 +82835 +371427 +156747 +448694 +219567 +75095 +242615 +492077 +132776 +199125 +349622 +195754 +455548 +181873 +138185 +338044 +362797 +180953 +505826 +69773 +304834 +162580 +154090 +519853 +319687 +132328 +27969 +52166 +100547 +568131 +415218 +348045 +478159 +402869 +10211 +26547 +551692 +105432 +313340 +182348 +383419 +570947 +345353 +226883 +255784 +214199 +262262 +283261 +449708 +299970 +392391 +245997 +330410 +343571 +519542 +37470 +42144 +342521 +498537 +10935 +443860 +512648 +146099 +98599 +123932 +489861 +262895 +184700 +218587 +363581 +21001 +481404 +249356 +64240 +492349 +199236 +481064 +353405 +116479 +132024 +138768 +524665 +434511 +326970 +138784 +340368 +312081 +366615 +171942 +21232 +473850 +93686 +295574 +51054 +162692 +174091 +20070 +270066 +492816 +20904 +484500 +147140 +242972 +420081 +63563 +261712 +316396 +49413 +520787 +510955 +393840 +142487 +19817 +261180 +413736 +230619 +484614 +337011 +496575 +4338 +552545 +5601 +75426 +568863 +184227 +170629 +438567 +505132 +541353 +284674 +322567 +182423 +312051 +18896 +40471 +321725 +188850 +37119 +95569 +187362 +397133 +528972 +487131 +174989 +370325 +223554 +385633 +103485 +537574 +63240 +256566 +86467 +401092 +486968 +308441 +280017 +527464 +131965 +310479 +125556 +220160 +532963 +310052 +107963 +293841 +388534 +45603 +368949 +391825 +5107 +569705 +231549 +250108 +152933 +206433 +358817 +434006 +283904 +152808 +539975 +24629 +410231 +13465 +502318 +51961 +445594 +209062 +38726 +295420 +430079 +240147 +561512 +35795 +102589 +505619 +565469 +271772 +520561 +372300 +178807 +492805 +1083 +303704 +125635 +217521 +278032 +208688 +335325 +140435 +313990 +143822 +320857 +549230 +76844 +424219 +463876 +243199 +2988 +215170 +30012 +377738 +408568 +490624 +404839 +138316 +157206 +404461 +122934 +263346 +21327 +99913 +67975 +339676 +391891 +365305 +337055 +233834 +125524 +46869 +32577 +304744 +104176 +167356 +210404 +307989 +217223 +196046 +454414 +16356 +244487 +543660 +197461 +199681 +476787 +455085 +307074 +260547 +107468 +334769 +29437 +166837 +53838 +502979 +82678 +288860 +535523 +311950 +237723 +98656 +223123 +273930 +58057 +544334 +324857 +198043 +535326 +316505 +12991 +576820 +43611 +107839 +275749 +456695 +78188 +375786 +466239 +184830 +537128 +434513 +244344 +374576 +69140 +434247 +555009 +510857 +220819 +20598 +99416 +74967 +533129 +515577 +213361 +330974 +548848 +431557 +503278 +130043 +402570 +320554 +559884 +252629 +364596 +423484 +271230 +105552 +143143 +285751 +49994 +204162 +80646 +381393 +123415 +118417 +30932 +425412 +388130 +551243 +468337 +484893 +25014 +174390 +463781 +124647 +60823 +361964 +425702 +575110 +532390 +230881 +84592 +189997 +221307 +361472 +32364 +71918 +316365 +492378 +234251 +48504 +418070 +89884 +562045 +506552 +66360 +122962 +262605 +529939 +345229 +294853 +344397 +56091 +8599 +459823 +175785 +226128 +259983 +354515 +379144 +384995 +205253 +116786 +441432 +448810 +83452 +465129 +506906 +90616 +551959 +406404 +157891 +362090 +439630 +45099 +61960 +478430 +489605 +127050 +579872 +475798 +64510 +447733 +33066 +102848 +538819 +323760 +200401 +179765 +251317 +239376 +83836 +578092 +522452 +393056 +278848 +27787 +377239 +473427 +83065 +377005 +576539 +248019 +473370 +536369 +92648 +332461 +437609 +274800 +388846 +323048 +193407 +541898 +480140 +46526 +26432 +339738 +325991 +37705 +528033 +542922 +313420 +190463 +531000 +454907 +26448 +238199 +476652 +457147 +364256 +72632 +430380 +315448 +353320 +18158 +91527 +454252 +546987 +386370 +38064 +19763 +64152 +453216 +55223 +361860 +522566 +509531 +438432 +31164 +163290 +389197 +333440 +173464 +447842 +381615 +99961 +156126 +103134 +394940 +165638 +261706 +378311 +534081 +373848 +401642 +338019 +378096 +289610 +547421 +174672 +133343 +191360 +293751 +520892 +145214 +167668 +37456 +460962 +465267 +292804 +347529 +203661 +10766 +27371 +203845 +155736 +136715 +463588 +26640 +547612 +131453 +184274 +442456 +265085 +223256 +129420 +23019 +536467 +194532 +127585 +392637 +330408 +524775 +31993 +433924 +502852 +553129 +559364 +297343 +71360 +225537 +271148 +345499 +475893 +237463 +5278 +501243 +413235 +444236 +541071 +380088 +468063 +94858 +225913 +295614 +210276 +170975 +205570 +422375 +550365 +308702 +484627 +565031 +98979 +480345 +579548 +272673 +436875 +287874 +16502 +274917 +281809 +442968 +289263 +347766 +160933 +84533 +266409 +122199 +396200 +30958 +504541 +1591 +89432 +387150 +306383 +15260 +154515 +50752 +166913 +102644 +100196 +160278 +349579 +442536 +17923 +310564 +62020 +152004 +578330 +126299 +527025 +83494 +226400 +268435 +445334 +310391 +505156 +19157 +44677 +318171 +447765 +354369 +527486 +329939 +184771 +134856 +467675 +517133 +89697 +447080 +70685 +144938 +519673 +485758 +454957 +564851 +189451 +408757 +192616 +280734 +305060 +243946 +99179 +303971 +170519 +48917 +549965 +300245 +384101 +576607 +186709 +516341 +241668 +133470 +134811 +500825 +464689 +29833 +343820 +213429 +387434 +279305 +444207 +210777 +372043 +189868 +572229 +8495 +370090 +450282 +277080 +199158 +109612 +567708 +245659 +485129 +268363 +23448 +5352 +235597 +6871 +348720 +94113 +314613 +63729 +114458 +215394 +460460 +240387 +398726 +135604 +571728 +415770 +286908 +138151 +146272 +344094 +345209 +241187 +282768 +113037 +545583 +219283 +145873 +285957 +489235 +157271 +197458 +502671 +499845 +334884 +79084 +505573 +115618 +561491 +354202 +279838 +190734 +134738 +269450 +482784 +144610 +52774 +290659 +440646 +25807 +442952 +159215 +318224 +73445 +211653 +527960 +401862 +431026 +488755 +292278 +400554 +272630 +382668 +470298 +166426 +129645 +28820 +161227 +417696 +560677 +283216 +28978 +310302 +154419 +230450 +328289 +73118 +104691 +15085 +405574 +510548 +470005 +102928 +569249 +413126 +77282 +96732 +359020 +42182 +250875 +106206 +354929 +320796 +453341 +237318 +254834 +137265 +399865 +292685 +152252 +319579 +81484 +16599 +162257 +351034 +396051 +502275 +308278 +34483 +13333 +320290 +321579 +349794 +99219 +200162 +369470 +487583 +62703 +251639 +138246 +157170 +477112 +283963 +74860 +307057 +364075 +295491 +34757 +400161 +170194 +120874 +492817 +3817 +183973 +135436 +512989 +114744 +379210 +201072 +293785 +578385 +237420 +7888 +18224 +155317 +522406 +441440 +110482 +173400 +183348 +552504 +475660 +166948 +147025 +443259 +578792 +245227 +546687 +474519 +393284 +249668 +87493 +151651 +100306 +540466 +546556 +212675 +282942 +21310 +385535 +7304 +303409 +386116 +574297 +514550 +217133 +533553 +447152 +578703 +45392 +166205 +180154 +25143 +338802 +330110 +261389 +343506 +442726 +285388 +554934 +421316 +479912 +85192 +34874 +487266 +226173 +20748 +360660 +574509 +543364 +1554 +125539 +566931 +312889 +466945 +444804 +257187 +568587 +427160 +71123 +563849 +138589 +162841 +129663 +107226 +140686 +321663 +437117 +179808 +321718 +62398 +16497 +468933 +219841 +355430 +293554 +293044 +109516 +485887 +490620 +579893 +427135 +31636 +217919 +432441 +314396 +119802 +393682 +201764 +146193 +116358 +84825 +208311 +419774 +177468 +72052 +142585 +519598 +464006 +556083 +412136 +169361 +442929 +84567 +549932 +75560 +74656 +93314 +393838 +383018 +372433 +431281 +556278 +5513 +108503 +500478 +148588 +138713 +368153 +22646 +303778 +270758 +276706 +275429 +492025 +169111 +494328 +35891 +70258 +400528 +165229 +460494 +269311 +307658 +98283 +369294 +319345 +414578 +541550 +425388 +129855 +99477 +383073 +387906 +293124 +155873 +549224 +266021 +52869 +1584 +421902 +498535 +277235 +153013 +452013 +553561 +138040 +20820 +58483 +423506 +569001 +325153 +383039 +213421 +38825 +453283 +384661 +127702 +238147 +104893 +577826 +64974 +240655 +459153 +145665 +49810 +65008 +545385 +125070 +46433 +143329 +429174 +52947 +321314 +253341 +157365 +453162 +111910 +339019 +239575 +362219 +80652 +247317 +460286 +365724 +160875 +372220 +483389 +572181 +146190 +580975 +54761 +348488 +416104 +468778 +18833 +251537 +234366 +510078 +14723 +338595 +153797 +513098 +467138 +404618 +261982 +545730 +135846 +108244 +562557 +180524 +227370 +341856 +131743 +255691 +497878 +68878 +430640 +441473 +347664 +214369 +347018 +225238 +421762 +317024 +6180 +172004 +303101 +22488 +193494 +199346 +409627 +315350 +263463 +190722 +523292 +363902 +573778 +437290 +389812 +517082 +145073 +37907 +489763 +456261 +270386 +508917 +566823 +543897 +362482 +130966 +66632 +181962 +274613 +135708 +549746 +323766 +366714 +353295 +318813 +153307 +213693 +293378 +149446 +199927 +580543 +331727 +238488 +472833 +308645 +424225 +228746 +110435 +495377 +240646 +274491 +130921 +140006 +4688 +115241 +76962 +66650 +47718 +224991 +434187 +272048 +11169 +158222 +154000 +507436 +443499 +109937 +309692 +534018 +22797 +163339 +168683 +210098 +246069 +137954 +143320 +262587 +414795 +226938 +536831 +128791 +459590 +50514 +30067 +317479 +378655 +229968 +522702 +11122 +515266 +136600 +224509 +149912 +97656 +120747 +349480 +155199 +528731 +523807 +168544 +325664 +229981 +434410 +431208 +508996 +63791 +89225 +513690 +136740 +224364 +515424 +508302 +418175 +465552 +439907 +272097 +451087 +396304 +342273 +52507 +300066 +380089 +326248 +167906 +37846 +262993 +60090 +499249 +90432 +74456 +264660 +325598 +480985 +245411 +425644 +224724 +475439 +246478 +487438 +563731 +441854 +522665 +245915 +85747 +315162 +108761 +407521 +388528 +389453 +298331 +447791 +368820 +440034 +305677 +122208 +182369 +543531 +151820 +63650 +457580 +563381 +320899 +14869 +137260 +61925 +376307 +80367 +269089 +203705 +274835 +267321 +418106 +471273 +74037 +227855 +519758 +89045 +321217 +324203 +479129 +503431 +368528 +527718 +278579 +13525 +291582 +301837 +31667 +68120 +14007 +114158 +124262 +33626 +53949 +187585 +192247 +208844 +212766 +318671 +575012 +439339 +364073 +419624 +178078 +427783 +302159 +339368 +190680 +23807 +288579 +312720 +15778 +553558 +571834 +574376 +122161 +493815 +472376 +483432 +149123 +51628 +264628 +26609 +23696 +485081 +441323 +451679 +42055 +378795 +86439 +366493 +520996 +332869 +18014 +554523 +83476 +6040 +421834 +424392 +308160 +335233 +249809 +349098 +358090 +187349 +61782 +35498 +386514 +207108 +578418 +84447 +104108 +126107 +211674 +111909 +490708 +477025 +206757 +556205 +142484 +454296 +464366 +358254 +215482 +468548 +82680 +100909 +405432 +85764 +94651 +63973 +8131 +288592 +257470 +47597 +321557 +34520 +134066 +246701 +317797 +282365 +78176 +29577 +311075 +331937 +190395 +5802 +245112 +111032 +140556 +199127 +376491 +305253 +300375 +545903 +357782 +377911 +74963 +329336 +25057 +3244 +252020 +293474 +171050 +239306 +189772 +238090 +160031 +36761 +445675 +252716 +152214 +239466 +55155 +479829 +420281 +445812 +118106 +434576 +451104 +316708 +438535 +300322 +167952 +390072 +487220 +20247 +9400 +43944 +35770 +487351 +425462 +212203 +9668 +8981 +574241 +332096 +535563 +192944 +498733 +276151 +550645 +507037 +9769 +404249 +236747 +376416 +306415 +45966 +191296 +576875 +493932 +225075 +536444 +79920 +561681 +60700 +99874 +219437 +509819 +466665 +579326 +428739 +394611 +263083 +379554 +279391 +178516 +133690 +77396 +300137 +6861 +435359 +314108 +444152 +500139 +92749 +89188 +300233 +414201 +443204 +211097 diff --git a/models/research/object_detection/data/oid_bbox_trainable_label_map.pbtxt b/models/research/object_detection/data/oid_bbox_trainable_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..863e4f31d719cd148fd56c981e219257334f9c7e --- /dev/null +++ b/models/research/object_detection/data/oid_bbox_trainable_label_map.pbtxt @@ -0,0 +1,2725 @@ +item { + name: "/m/01g317" + id: 1 + display_name: "Person" +} +item { + name: "/m/09j2d" + id: 2 + display_name: "Clothing" +} +item { + name: "/m/04yx4" + id: 3 + display_name: "Man" +} +item { + name: "/m/0dzct" + id: 4 + display_name: "Face" +} +item { + name: "/m/07j7r" + id: 5 + display_name: "Tree" +} +item { + name: "/m/05s2s" + id: 6 + display_name: "Plant" +} +item { + name: "/m/03bt1vf" + id: 7 + display_name: "Woman" +} +item { + name: "/m/07yv9" + id: 8 + display_name: "Vehicle" +} +item { + name: "/m/0cgh4" + id: 9 + display_name: "Building" +} +item { + name: "/m/01prls" + id: 10 + display_name: "Land vehicle" +} +item { + name: "/m/09j5n" + id: 11 + display_name: "Footwear" +} +item { + name: "/m/05r655" + id: 12 + display_name: "Girl" +} +item { + name: "/m/0jbk" + id: 13 + display_name: "Animal" +} +item { + name: "/m/0k4j" + id: 14 + display_name: "Car" +} +item { + name: "/m/02wbm" + id: 15 + display_name: "Food" +} +item { + name: "/m/083wq" + id: 16 + display_name: "Wheel" +} +item { + name: "/m/0c9ph5" + id: 17 + display_name: "Flower" +} +item { + name: "/m/0c_jw" + id: 18 + display_name: "Furniture" +} +item { + name: "/m/0d4v4" + id: 19 + display_name: "Window" +} +item { + name: "/m/03jm5" + id: 20 + display_name: "House" +} +item { + name: "/m/01bl7v" + id: 21 + display_name: "Boy" +} +item { + name: "/m/0463sg" + id: 22 + display_name: "Fashion accessory" +} +item { + name: "/m/04bcr3" + id: 23 + display_name: "Table" +} +item { + name: "/m/0jyfg" + id: 24 + display_name: "Glasses" +} +item { + name: "/m/01xyhv" + id: 25 + display_name: "Suit" +} +item { + name: "/m/08dz3q" + id: 26 + display_name: "Auto part" +} +item { + name: "/m/015p6" + id: 27 + display_name: "Bird" +} +item { + name: "/m/05y5lj" + id: 28 + display_name: "Sports equipment" +} +item { + name: "/m/01d40f" + id: 29 + display_name: "Dress" +} +item { + name: "/m/0bt9lr" + id: 30 + display_name: "Dog" +} +item { + name: "/m/01lrl" + id: 31 + display_name: "Carnivore" +} +item { + name: "/m/02p0tk3" + id: 32 + display_name: "Human body" +} +item { + name: "/m/0fly7" + id: 33 + display_name: "Jeans" +} +item { + name: "/m/04szw" + id: 34 + display_name: "Musical instrument" +} +item { + name: "/m/0271t" + id: 35 + display_name: "Drink" +} +item { + name: "/m/019jd" + id: 36 + display_name: "Boat" +} +item { + name: "/m/03q69" + id: 37 + display_name: "Hair" +} +item { + name: "/m/0h9mv" + id: 38 + display_name: "Tire" +} +item { + name: "/m/04hgtk" + id: 39 + display_name: "Head" +} +item { + name: "/m/01yrx" + id: 40 + display_name: "Cat" +} +item { + name: "/m/01rzcn" + id: 41 + display_name: "Watercraft" +} +item { + name: "/m/01mzpv" + id: 42 + display_name: "Chair" +} +item { + name: "/m/0199g" + id: 43 + display_name: "Bike" +} +item { + name: "/m/01fdzj" + id: 44 + display_name: "Tower" +} +item { + name: "/m/04rky" + id: 45 + display_name: "Mammal" +} +item { + name: "/m/079cl" + id: 46 + display_name: "Skyscraper" +} +item { + name: "/m/0dzf4" + id: 47 + display_name: "Arm" +} +item { + name: "/m/0138tl" + id: 48 + display_name: "Toy" +} +item { + name: "/m/06msq" + id: 49 + display_name: "Sculpture" +} +item { + name: "/m/03xxp" + id: 50 + display_name: "Invertebrate" +} +item { + name: "/m/0hg7b" + id: 51 + display_name: "Microphone" +} +item { + name: "/m/01n5jq" + id: 52 + display_name: "Poster" +} +item { + name: "/m/03vt0" + id: 53 + display_name: "Insect" +} +item { + name: "/m/0342h" + id: 54 + display_name: "Guitar" +} +item { + name: "/m/0k0pj" + id: 55 + display_name: "Nose" +} +item { + name: "/m/02dl1y" + id: 56 + display_name: "Hat" +} +item { + name: "/m/04brg2" + id: 57 + display_name: "Tableware" +} +item { + name: "/m/02dgv" + id: 58 + display_name: "Door" +} +item { + name: "/m/01bqk0" + id: 59 + display_name: "Bicycle wheel" +} +item { + name: "/m/017ftj" + id: 60 + display_name: "Sunglasses" +} +item { + name: "/m/052lwg6" + id: 61 + display_name: "Baked goods" +} +item { + name: "/m/014sv8" + id: 62 + display_name: "Eye" +} +item { + name: "/m/0270h" + id: 63 + display_name: "Dessert" +} +item { + name: "/m/0283dt1" + id: 64 + display_name: "Mouth" +} +item { + name: "/m/0k5j" + id: 65 + display_name: "Aircraft" +} +item { + name: "/m/0cmf2" + id: 66 + display_name: "Airplane" +} +item { + name: "/m/07jdr" + id: 67 + display_name: "Train" +} +item { + name: "/m/032b3c" + id: 68 + display_name: "Jacket" +} +item { + name: "/m/033rq4" + id: 69 + display_name: "Street light" +} +item { + name: "/m/0k65p" + id: 70 + display_name: "Hand" +} +item { + name: "/m/01ww8y" + id: 71 + display_name: "Snack" +} +item { + name: "/m/0zvk5" + id: 72 + display_name: "Helmet" +} +item { + name: "/m/07mhn" + id: 73 + display_name: "Trousers" +} +item { + name: "/m/04dr76w" + id: 74 + display_name: "Bottle" +} +item { + name: "/m/03fp41" + id: 75 + display_name: "Houseplant" +} +item { + name: "/m/03k3r" + id: 76 + display_name: "Horse" +} +item { + name: "/m/01y9k5" + id: 77 + display_name: "Desk" +} +item { + name: "/m/0cdl1" + id: 78 + display_name: "Palm tree" +} +item { + name: "/m/0f4s2w" + id: 79 + display_name: "Vegetable" +} +item { + name: "/m/02xwb" + id: 80 + display_name: "Fruit" +} +item { + name: "/m/035r7c" + id: 81 + display_name: "Leg" +} +item { + name: "/m/0bt_c3" + id: 82 + display_name: "Book" +} +item { + name: "/m/01_bhs" + id: 83 + display_name: "Fast food" +} +item { + name: "/m/01599" + id: 84 + display_name: "Beer" +} +item { + name: "/m/03120" + id: 85 + display_name: "Flag" +} +item { + name: "/m/026t6" + id: 86 + display_name: "Drum" +} +item { + name: "/m/01bjv" + id: 87 + display_name: "Bus" +} +item { + name: "/m/07r04" + id: 88 + display_name: "Truck" +} +item { + name: "/m/018xm" + id: 89 + display_name: "Ball" +} +item { + name: "/m/01rkbr" + id: 90 + display_name: "Tie" +} +item { + name: "/m/0fm3zh" + id: 91 + display_name: "Flowerpot" +} +item { + name: "/m/02_n6y" + id: 92 + display_name: "Goggles" +} +item { + name: "/m/04_sv" + id: 93 + display_name: "Motorcycle" +} +item { + name: "/m/06z37_" + id: 94 + display_name: "Picture frame" +} +item { + name: "/m/01bfm9" + id: 95 + display_name: "Shorts" +} +item { + name: "/m/0h8mhzd" + id: 96 + display_name: "Sports uniform" +} +item { + name: "/m/0d_2m" + id: 97 + display_name: "Moths and butterflies" +} +item { + name: "/m/0gjbg72" + id: 98 + display_name: "Shelf" +} +item { + name: "/m/01n4qj" + id: 99 + display_name: "Shirt" +} +item { + name: "/m/0ch_cf" + id: 100 + display_name: "Fish" +} +item { + name: "/m/06m11" + id: 101 + display_name: "Rose" +} +item { + name: "/m/01jfm_" + id: 102 + display_name: "Licence plate" +} +item { + name: "/m/02crq1" + id: 103 + display_name: "Couch" +} +item { + name: "/m/083kb" + id: 104 + display_name: "Weapon" +} +item { + name: "/m/01c648" + id: 105 + display_name: "Laptop" +} +item { + name: "/m/09tvcd" + id: 106 + display_name: "Wine glass" +} +item { + name: "/m/0h2r6" + id: 107 + display_name: "Van" +} +item { + name: "/m/081qc" + id: 108 + display_name: "Wine" +} +item { + name: "/m/09ddx" + id: 109 + display_name: "Duck" +} +item { + name: "/m/03p3bw" + id: 110 + display_name: "Bicycle helmet" +} +item { + name: "/m/0cyf8" + id: 111 + display_name: "Butterfly" +} +item { + name: "/m/0b_rs" + id: 112 + display_name: "Swimming pool" +} +item { + name: "/m/039xj_" + id: 113 + display_name: "Ear" +} +item { + name: "/m/021sj1" + id: 114 + display_name: "Office" +} +item { + name: "/m/0dv5r" + id: 115 + display_name: "Camera" +} +item { + name: "/m/01lynh" + id: 116 + display_name: "Stairs" +} +item { + name: "/m/06bt6" + id: 117 + display_name: "Reptile" +} +item { + name: "/m/01226z" + id: 118 + display_name: "Football" +} +item { + name: "/m/0fszt" + id: 119 + display_name: "Cake" +} +item { + name: "/m/050k8" + id: 120 + display_name: "Mobile phone" +} +item { + name: "/m/02wbtzl" + id: 121 + display_name: "Sun hat" +} +item { + name: "/m/02p5f1q" + id: 122 + display_name: "Coffee cup" +} +item { + name: "/m/025nd" + id: 123 + display_name: "Christmas tree" +} +item { + name: "/m/02522" + id: 124 + display_name: "Computer monitor" +} +item { + name: "/m/09ct_" + id: 125 + display_name: "Helicopter" +} +item { + name: "/m/0cvnqh" + id: 126 + display_name: "Bench" +} +item { + name: "/m/0d5gx" + id: 127 + display_name: "Castle" +} +item { + name: "/m/01xygc" + id: 128 + display_name: "Coat" +} +item { + name: "/m/04m6gz" + id: 129 + display_name: "Porch" +} +item { + name: "/m/01gkx_" + id: 130 + display_name: "Swimwear" +} +item { + name: "/m/01s105" + id: 131 + display_name: "Cabinetry" +} +item { + name: "/m/01j61q" + id: 132 + display_name: "Tent" +} +item { + name: "/m/0hnnb" + id: 133 + display_name: "Umbrella" +} +item { + name: "/m/01j51" + id: 134 + display_name: "Balloon" +} +item { + name: "/m/01knjb" + id: 135 + display_name: "Billboard" +} +item { + name: "/m/03__z0" + id: 136 + display_name: "Bookcase" +} +item { + name: "/m/01m2v" + id: 137 + display_name: "Computer keyboard" +} +item { + name: "/m/0167gd" + id: 138 + display_name: "Doll" +} +item { + name: "/m/0284d" + id: 139 + display_name: "Dairy" +} +item { + name: "/m/03ssj5" + id: 140 + display_name: "Bed" +} +item { + name: "/m/02fq_6" + id: 141 + display_name: "Fedora" +} +item { + name: "/m/06nwz" + id: 142 + display_name: "Seafood" +} +item { + name: "/m/0220r2" + id: 143 + display_name: "Fountain" +} +item { + name: "/m/01mqdt" + id: 144 + display_name: "Traffic sign" +} +item { + name: "/m/0268lbt" + id: 145 + display_name: "Hiking equipment" +} +item { + name: "/m/07c52" + id: 146 + display_name: "Television" +} +item { + name: "/m/0grw1" + id: 147 + display_name: "Salad" +} +item { + name: "/m/01h3n" + id: 148 + display_name: "Bee" +} +item { + name: "/m/078n6m" + id: 149 + display_name: "Coffee table" +} +item { + name: "/m/01xq0k1" + id: 150 + display_name: "Cattle" +} +item { + name: "/m/0gd2v" + id: 151 + display_name: "Marine mammal" +} +item { + name: "/m/0dbvp" + id: 152 + display_name: "Goose" +} +item { + name: "/m/03rszm" + id: 153 + display_name: "Curtain" +} +item { + name: "/m/0h8n5zk" + id: 154 + display_name: "Kitchen & dining room table" +} +item { + name: "/m/019dx1" + id: 155 + display_name: "Home appliance" +} +item { + name: "/m/03hl4l9" + id: 156 + display_name: "Marine invertebrates" +} +item { + name: "/m/0b3fp9" + id: 157 + display_name: "Countertop" +} +item { + name: "/m/02rdsp" + id: 158 + display_name: "Office supplies" +} +item { + name: "/m/0hf58v5" + id: 159 + display_name: "Luggage and bags" +} +item { + name: "/m/04h7h" + id: 160 + display_name: "Lighthouse" +} +item { + name: "/m/024g6" + id: 161 + display_name: "Cocktail" +} +item { + name: "/m/0cffdh" + id: 162 + display_name: "Maple" +} +item { + name: "/m/03q5c7" + id: 163 + display_name: "Saucer" +} +item { + name: "/m/014y4n" + id: 164 + display_name: "Paddle" +} +item { + name: "/m/01yx86" + id: 165 + display_name: "Bronze sculpture" +} +item { + name: "/m/020jm" + id: 166 + display_name: "Beetle" +} +item { + name: "/m/025dyy" + id: 167 + display_name: "Box" +} +item { + name: "/m/01llwg" + id: 168 + display_name: "Necklace" +} +item { + name: "/m/08pbxl" + id: 169 + display_name: "Monkey" +} +item { + name: "/m/02d9qx" + id: 170 + display_name: "Whiteboard" +} +item { + name: "/m/02pkr5" + id: 171 + display_name: "Plumbing fixture" +} +item { + name: "/m/0h99cwc" + id: 172 + display_name: "Kitchen appliance" +} +item { + name: "/m/050gv4" + id: 173 + display_name: "Plate" +} +item { + name: "/m/02vqfm" + id: 174 + display_name: "Coffee" +} +item { + name: "/m/09kx5" + id: 175 + display_name: "Deer" +} +item { + name: "/m/019w40" + id: 176 + display_name: "Surfboard" +} +item { + name: "/m/09dzg" + id: 177 + display_name: "Turtle" +} +item { + name: "/m/07k1x" + id: 178 + display_name: "Tool" +} +item { + name: "/m/080hkjn" + id: 179 + display_name: "Handbag" +} +item { + name: "/m/07qxg_" + id: 180 + display_name: "Football helmet" +} +item { + name: "/m/0ph39" + id: 181 + display_name: "Canoe" +} +item { + name: "/m/018p4k" + id: 182 + display_name: "Cart" +} +item { + name: "/m/02h19r" + id: 183 + display_name: "Scarf" +} +item { + name: "/m/015h_t" + id: 184 + display_name: "Beard" +} +item { + name: "/m/0fqfqc" + id: 185 + display_name: "Drawer" +} +item { + name: "/m/025rp__" + id: 186 + display_name: "Cowboy hat" +} +item { + name: "/m/01x3z" + id: 187 + display_name: "Clock" +} +item { + name: "/m/0crjs" + id: 188 + display_name: "Convenience store" +} +item { + name: "/m/0l515" + id: 189 + display_name: "Sandwich" +} +item { + name: "/m/015qff" + id: 190 + display_name: "Traffic light" +} +item { + name: "/m/09kmb" + id: 191 + display_name: "Spider" +} +item { + name: "/m/09728" + id: 192 + display_name: "Bread" +} +item { + name: "/m/071qp" + id: 193 + display_name: "Squirrel" +} +item { + name: "/m/02s195" + id: 194 + display_name: "Vase" +} +item { + name: "/m/06c54" + id: 195 + display_name: "Rifle" +} +item { + name: "/m/01xqw" + id: 196 + display_name: "Cello" +} +item { + name: "/m/05zsy" + id: 197 + display_name: "Pumpkin" +} +item { + name: "/m/0bwd_0j" + id: 198 + display_name: "Elephant" +} +item { + name: "/m/04m9y" + id: 199 + display_name: "Lizard" +} +item { + name: "/m/052sf" + id: 200 + display_name: "Mushroom" +} +item { + name: "/m/03grzl" + id: 201 + display_name: "Baseball glove" +} +item { + name: "/m/01z1kdw" + id: 202 + display_name: "Juice" +} +item { + name: "/m/02wv6h6" + id: 203 + display_name: "Skirt" +} +item { + name: "/m/016m2d" + id: 204 + display_name: "Skull" +} +item { + name: "/m/0dtln" + id: 205 + display_name: "Lamp" +} +item { + name: "/m/057cc" + id: 206 + display_name: "Musical keyboard" +} +item { + name: "/m/06k2mb" + id: 207 + display_name: "High heels" +} +item { + name: "/m/0f6wt" + id: 208 + display_name: "Falcon" +} +item { + name: "/m/0cxn2" + id: 209 + display_name: "Ice cream" +} +item { + name: "/m/02jvh9" + id: 210 + display_name: "Mug" +} +item { + name: "/m/0gjkl" + id: 211 + display_name: "Watch" +} +item { + name: "/m/01b638" + id: 212 + display_name: "Boot" +} +item { + name: "/m/071p9" + id: 213 + display_name: "Ski" +} +item { + name: "/m/0pg52" + id: 214 + display_name: "Taxi" +} +item { + name: "/m/0ftb8" + id: 215 + display_name: "Sunflower" +} +item { + name: "/m/0hnyx" + id: 216 + display_name: "Pastry" +} +item { + name: "/m/02jz0l" + id: 217 + display_name: "Tap" +} +item { + name: "/m/04kkgm" + id: 218 + display_name: "Bowl" +} +item { + name: "/m/0174n1" + id: 219 + display_name: "Glove" +} +item { + name: "/m/0gv1x" + id: 220 + display_name: "Parrot" +} +item { + name: "/m/09csl" + id: 221 + display_name: "Eagle" +} +item { + name: "/m/02jnhm" + id: 222 + display_name: "Tin can" +} +item { + name: "/m/099ssp" + id: 223 + display_name: "Platter" +} +item { + name: "/m/03nfch" + id: 224 + display_name: "Sandal" +} +item { + name: "/m/07y_7" + id: 225 + display_name: "Violin" +} +item { + name: "/m/05z6w" + id: 226 + display_name: "Penguin" +} +item { + name: "/m/03m3pdh" + id: 227 + display_name: "Sofa bed" +} +item { + name: "/m/09ld4" + id: 228 + display_name: "Frog" +} +item { + name: "/m/09b5t" + id: 229 + display_name: "Chicken" +} +item { + name: "/m/054xkw" + id: 230 + display_name: "Lifejacket" +} +item { + name: "/m/0130jx" + id: 231 + display_name: "Sink" +} +item { + name: "/m/07fbm7" + id: 232 + display_name: "Strawberry" +} +item { + name: "/m/01dws" + id: 233 + display_name: "Bear" +} +item { + name: "/m/01tcjp" + id: 234 + display_name: "Muffin" +} +item { + name: "/m/0dftk" + id: 235 + display_name: "Swan" +} +item { + name: "/m/0c06p" + id: 236 + display_name: "Candle" +} +item { + name: "/m/034c16" + id: 237 + display_name: "Pillow" +} +item { + name: "/m/09d5_" + id: 238 + display_name: "Owl" +} +item { + name: "/m/03hlz0c" + id: 239 + display_name: "Kitchen utensil" +} +item { + name: "/m/0ft9s" + id: 240 + display_name: "Dragonfly" +} +item { + name: "/m/011k07" + id: 241 + display_name: "Tortoise" +} +item { + name: "/m/054_l" + id: 242 + display_name: "Mirror" +} +item { + name: "/m/0jqgx" + id: 243 + display_name: "Lily" +} +item { + name: "/m/0663v" + id: 244 + display_name: "Pizza" +} +item { + name: "/m/0242l" + id: 245 + display_name: "Coin" +} +item { + name: "/m/014trl" + id: 246 + display_name: "Cosmetics" +} +item { + name: "/m/05r5c" + id: 247 + display_name: "Piano" +} +item { + name: "/m/07j87" + id: 248 + display_name: "Tomato" +} +item { + name: "/m/05kyg_" + id: 249 + display_name: "Chest of drawers" +} +item { + name: "/m/0kmg4" + id: 250 + display_name: "Teddy bear" +} +item { + name: "/m/07cmd" + id: 251 + display_name: "Tank" +} +item { + name: "/m/0dv77" + id: 252 + display_name: "Squash" +} +item { + name: "/m/096mb" + id: 253 + display_name: "Lion" +} +item { + name: "/m/01gmv2" + id: 254 + display_name: "Brassiere" +} +item { + name: "/m/07bgp" + id: 255 + display_name: "Sheep" +} +item { + name: "/m/0cmx8" + id: 256 + display_name: "Spoon" +} +item { + name: "/m/029tx" + id: 257 + display_name: "Dinosaur" +} +item { + name: "/m/073bxn" + id: 258 + display_name: "Tripod" +} +item { + name: "/m/0bh9flk" + id: 259 + display_name: "Tablet computer" +} +item { + name: "/m/06mf6" + id: 260 + display_name: "Rabbit" +} +item { + name: "/m/06_fw" + id: 261 + display_name: "Skateboard" +} +item { + name: "/m/078jl" + id: 262 + display_name: "Snake" +} +item { + name: "/m/0fbdv" + id: 263 + display_name: "Shellfish" +} +item { + name: "/m/0h23m" + id: 264 + display_name: "Sparrow" +} +item { + name: "/m/014j1m" + id: 265 + display_name: "Apple" +} +item { + name: "/m/03fwl" + id: 266 + display_name: "Goat" +} +item { + name: "/m/02y6n" + id: 267 + display_name: "French fries" +} +item { + name: "/m/06c7f7" + id: 268 + display_name: "Lipstick" +} +item { + name: "/m/026qbn5" + id: 269 + display_name: "studio couch" +} +item { + name: "/m/0cdn1" + id: 270 + display_name: "Hamburger" +} +item { + name: "/m/07clx" + id: 271 + display_name: "Tea" +} +item { + name: "/m/07cx4" + id: 272 + display_name: "Telephone" +} +item { + name: "/m/03g8mr" + id: 273 + display_name: "Baseball bat" +} +item { + name: "/m/0cnyhnx" + id: 274 + display_name: "Bull" +} +item { + name: "/m/01b7fy" + id: 275 + display_name: "Headphones" +} +item { + name: "/m/04gth" + id: 276 + display_name: "Lavender" +} +item { + name: "/m/0cyfs" + id: 277 + display_name: "Parachute" +} +item { + name: "/m/021mn" + id: 278 + display_name: "Cookie" +} +item { + name: "/m/07dm6" + id: 279 + display_name: "Tiger" +} +item { + name: "/m/0k1tl" + id: 280 + display_name: "Pen" +} +item { + name: "/m/0dv9c" + id: 281 + display_name: "Racket" +} +item { + name: "/m/0dt3t" + id: 282 + display_name: "Fork" +} +item { + name: "/m/04yqq2" + id: 283 + display_name: "Bust" +} +item { + name: "/m/01cmb2" + id: 284 + display_name: "Miniskirt" +} +item { + name: "/m/0gd36" + id: 285 + display_name: "Sea lion" +} +item { + name: "/m/033cnk" + id: 286 + display_name: "Egg" +} +item { + name: "/m/06ncr" + id: 287 + display_name: "Saxophone" +} +item { + name: "/m/03bk1" + id: 288 + display_name: "Giraffe" +} +item { + name: "/m/0bjyj5" + id: 289 + display_name: "Waste container" +} +item { + name: "/m/06__v" + id: 290 + display_name: "Snowboard" +} +item { + name: "/m/0qmmr" + id: 291 + display_name: "Wheelchair" +} +item { + name: "/m/01xgg_" + id: 292 + display_name: "Medical equipment" +} +item { + name: "/m/0czz2" + id: 293 + display_name: "Antelope" +} +item { + name: "/m/02l8p9" + id: 294 + display_name: "Harbor seal" +} +item { + name: "/m/09g1w" + id: 295 + display_name: "Toilet" +} +item { + name: "/m/0ll1f78" + id: 296 + display_name: "Shrimp" +} +item { + name: "/m/0cyhj_" + id: 297 + display_name: "Orange" +} +item { + name: "/m/0642b4" + id: 298 + display_name: "Cupboard" +} +item { + name: "/m/0h8mzrc" + id: 299 + display_name: "Wall clock" +} +item { + name: "/m/068zj" + id: 300 + display_name: "Pig" +} +item { + name: "/m/02z51p" + id: 301 + display_name: "Nightstand" +} +item { + name: "/m/0h8nr_l" + id: 302 + display_name: "Bathroom accessory" +} +item { + name: "/m/0388q" + id: 303 + display_name: "Grape" +} +item { + name: "/m/02hj4" + id: 304 + display_name: "Dolphin" +} +item { + name: "/m/01jfsr" + id: 305 + display_name: "Lantern" +} +item { + name: "/m/07gql" + id: 306 + display_name: "Trumpet" +} +item { + name: "/m/0h8my_4" + id: 307 + display_name: "Tennis racket" +} +item { + name: "/m/0n28_" + id: 308 + display_name: "Crab" +} +item { + name: "/m/0120dh" + id: 309 + display_name: "Sea turtle" +} +item { + name: "/m/020kz" + id: 310 + display_name: "Cannon" +} +item { + name: "/m/0mkg" + id: 311 + display_name: "Accordion" +} +item { + name: "/m/03c7gz" + id: 312 + display_name: "Door handle" +} +item { + name: "/m/09k_b" + id: 313 + display_name: "Lemon" +} +item { + name: "/m/031n1" + id: 314 + display_name: "Foot" +} +item { + name: "/m/04rmv" + id: 315 + display_name: "Mouse" +} +item { + name: "/m/084rd" + id: 316 + display_name: "Wok" +} +item { + name: "/m/02rgn06" + id: 317 + display_name: "Volleyball" +} +item { + name: "/m/05z55" + id: 318 + display_name: "Pasta" +} +item { + name: "/m/01r546" + id: 319 + display_name: "Earrings" +} +item { + name: "/m/09qck" + id: 320 + display_name: "Banana" +} +item { + name: "/m/012w5l" + id: 321 + display_name: "Ladder" +} +item { + name: "/m/01940j" + id: 322 + display_name: "Backpack" +} +item { + name: "/m/09f_2" + id: 323 + display_name: "Crocodile" +} +item { + name: "/m/02p3w7d" + id: 324 + display_name: "Roller skates" +} +item { + name: "/m/057p5t" + id: 325 + display_name: "Scoreboard" +} +item { + name: "/m/0d8zb" + id: 326 + display_name: "Jellyfish" +} +item { + name: "/m/01nq26" + id: 327 + display_name: "Sock" +} +item { + name: "/m/01x_v" + id: 328 + display_name: "Camel" +} +item { + name: "/m/05gqfk" + id: 329 + display_name: "Plastic bag" +} +item { + name: "/m/0cydv" + id: 330 + display_name: "Caterpillar" +} +item { + name: "/m/07030" + id: 331 + display_name: "Sushi" +} +item { + name: "/m/084zz" + id: 332 + display_name: "Whale" +} +item { + name: "/m/0c29q" + id: 333 + display_name: "Leopard" +} +item { + name: "/m/02zn6n" + id: 334 + display_name: "Barrel" +} +item { + name: "/m/03tw93" + id: 335 + display_name: "Fireplace" +} +item { + name: "/m/0fqt361" + id: 336 + display_name: "Stool" +} +item { + name: "/m/0f9_l" + id: 337 + display_name: "Snail" +} +item { + name: "/m/0gm28" + id: 338 + display_name: "Candy" +} +item { + name: "/m/09rvcxw" + id: 339 + display_name: "Rocket" +} +item { + name: "/m/01nkt" + id: 340 + display_name: "Cheese" +} +item { + name: "/m/04p0qw" + id: 341 + display_name: "Billiard table" +} +item { + name: "/m/03hj559" + id: 342 + display_name: "Mixing bowl" +} +item { + name: "/m/07pj7bq" + id: 343 + display_name: "Bowling equipment" +} +item { + name: "/m/04ctx" + id: 344 + display_name: "Knife" +} +item { + name: "/m/0703r8" + id: 345 + display_name: "Loveseat" +} +item { + name: "/m/03qrc" + id: 346 + display_name: "Hamster" +} +item { + name: "/m/020lf" + id: 347 + display_name: "Mouse" +} +item { + name: "/m/0by6g" + id: 348 + display_name: "Shark" +} +item { + name: "/m/01fh4r" + id: 349 + display_name: "Teapot" +} +item { + name: "/m/07c6l" + id: 350 + display_name: "Trombone" +} +item { + name: "/m/03bj1" + id: 351 + display_name: "Panda" +} +item { + name: "/m/0898b" + id: 352 + display_name: "Zebra" +} +item { + name: "/m/02x984l" + id: 353 + display_name: "Mechanical fan" +} +item { + name: "/m/0fj52s" + id: 354 + display_name: "Carrot" +} +item { + name: "/m/0cd4d" + id: 355 + display_name: "Cheetah" +} +item { + name: "/m/02068x" + id: 356 + display_name: "Gondola" +} +item { + name: "/m/01vbnl" + id: 357 + display_name: "Bidet" +} +item { + name: "/m/0449p" + id: 358 + display_name: "Jaguar" +} +item { + name: "/m/0gj37" + id: 359 + display_name: "Ladybug" +} +item { + name: "/m/0nl46" + id: 360 + display_name: "Crown" +} +item { + name: "/m/0152hh" + id: 361 + display_name: "Snowman" +} +item { + name: "/m/03dnzn" + id: 362 + display_name: "Bathtub" +} +item { + name: "/m/05_5p_0" + id: 363 + display_name: "Table tennis racket" +} +item { + name: "/m/02jfl0" + id: 364 + display_name: "Sombrero" +} +item { + name: "/m/01dxs" + id: 365 + display_name: "Brown bear" +} +item { + name: "/m/0cjq5" + id: 366 + display_name: "Lobster" +} +item { + name: "/m/040b_t" + id: 367 + display_name: "Refrigerator" +} +item { + name: "/m/0_cp5" + id: 368 + display_name: "Oyster" +} +item { + name: "/m/0gxl3" + id: 369 + display_name: "Handgun" +} +item { + name: "/m/029bxz" + id: 370 + display_name: "Oven" +} +item { + name: "/m/02zt3" + id: 371 + display_name: "Kite" +} +item { + name: "/m/03d443" + id: 372 + display_name: "Rhinoceros" +} +item { + name: "/m/0306r" + id: 373 + display_name: "Fox" +} +item { + name: "/m/0h8l4fh" + id: 374 + display_name: "Light bulb" +} +item { + name: "/m/0633h" + id: 375 + display_name: "Polar bear" +} +item { + name: "/m/01s55n" + id: 376 + display_name: "Suitcase" +} +item { + name: "/m/0hkxq" + id: 377 + display_name: "Broccoli" +} +item { + name: "/m/0cn6p" + id: 378 + display_name: "Otter" +} +item { + name: "/m/0dbzx" + id: 379 + display_name: "Mule" +} +item { + name: "/m/01dy8n" + id: 380 + display_name: "Woodpecker" +} +item { + name: "/m/01h8tj" + id: 381 + display_name: "Starfish" +} +item { + name: "/m/03s_tn" + id: 382 + display_name: "Kettle" +} +item { + name: "/m/01xs3r" + id: 383 + display_name: "Jet ski" +} +item { + name: "/m/031b6r" + id: 384 + display_name: "Window blind" +} +item { + name: "/m/06j2d" + id: 385 + display_name: "Raven" +} +item { + name: "/m/0hqkz" + id: 386 + display_name: "Grapefruit" +} +item { + name: "/m/01_5g" + id: 387 + display_name: "Chopsticks" +} +item { + name: "/m/02zvsm" + id: 388 + display_name: "Tart" +} +item { + name: "/m/0kpqd" + id: 389 + display_name: "Watermelon" +} +item { + name: "/m/015x4r" + id: 390 + display_name: "Cucumber" +} +item { + name: "/m/061hd_" + id: 391 + display_name: "Infant bed" +} +item { + name: "/m/04ylt" + id: 392 + display_name: "Missile" +} +item { + name: "/m/02wv84t" + id: 393 + display_name: "Gas stove" +} +item { + name: "/m/04y4h8h" + id: 394 + display_name: "Bathroom cabinet" +} +item { + name: "/m/01gllr" + id: 395 + display_name: "Beehive" +} +item { + name: "/m/0pcr" + id: 396 + display_name: "Alpaca" +} +item { + name: "/m/0jy4k" + id: 397 + display_name: "Doughnut" +} +item { + name: "/m/09f20" + id: 398 + display_name: "Hippopotamus" +} +item { + name: "/m/0mcx2" + id: 399 + display_name: "Ipod" +} +item { + name: "/m/04c0y" + id: 400 + display_name: "Kangaroo" +} +item { + name: "/m/0_k2" + id: 401 + display_name: "Ant" +} +item { + name: "/m/0jg57" + id: 402 + display_name: "Bell pepper" +} +item { + name: "/m/03fj2" + id: 403 + display_name: "Goldfish" +} +item { + name: "/m/03ldnb" + id: 404 + display_name: "Ceiling fan" +} +item { + name: "/m/06nrc" + id: 405 + display_name: "Shotgun" +} +item { + name: "/m/01btn" + id: 406 + display_name: "Barge" +} +item { + name: "/m/05vtc" + id: 407 + display_name: "Potato" +} +item { + name: "/m/08hvt4" + id: 408 + display_name: "Jug" +} +item { + name: "/m/0fx9l" + id: 409 + display_name: "Microwave oven" +} +item { + name: "/m/01h44" + id: 410 + display_name: "Bat" +} +item { + name: "/m/05n4y" + id: 411 + display_name: "Ostrich" +} +item { + name: "/m/0jly1" + id: 412 + display_name: "Turkey" +} +item { + name: "/m/06y5r" + id: 413 + display_name: "Sword" +} +item { + name: "/m/05ctyq" + id: 414 + display_name: "Tennis ball" +} +item { + name: "/m/0fp6w" + id: 415 + display_name: "Pineapple" +} +item { + name: "/m/0d4w1" + id: 416 + display_name: "Closet" +} +item { + name: "/m/02pv19" + id: 417 + display_name: "Stop sign" +} +item { + name: "/m/07crc" + id: 418 + display_name: "Taco" +} +item { + name: "/m/01dwwc" + id: 419 + display_name: "Pancake" +} +item { + name: "/m/01b9xk" + id: 420 + display_name: "Hot dog" +} +item { + name: "/m/013y1f" + id: 421 + display_name: "Organ" +} +item { + name: "/m/0m53l" + id: 422 + display_name: "Rays and skates" +} +item { + name: "/m/0174k2" + id: 423 + display_name: "Washing machine" +} +item { + name: "/m/01dwsz" + id: 424 + display_name: "Waffle" +} +item { + name: "/m/04vv5k" + id: 425 + display_name: "Snowplow" +} +item { + name: "/m/04cp_" + id: 426 + display_name: "Koala" +} +item { + name: "/m/0fz0h" + id: 427 + display_name: "Honeycomb" +} +item { + name: "/m/0llzx" + id: 428 + display_name: "Sewing machine" +} +item { + name: "/m/0319l" + id: 429 + display_name: "Horn" +} +item { + name: "/m/04v6l4" + id: 430 + display_name: "Frying pan" +} +item { + name: "/m/0dkzw" + id: 431 + display_name: "Seat belt" +} +item { + name: "/m/027pcv" + id: 432 + display_name: "Zucchini" +} +item { + name: "/m/0323sq" + id: 433 + display_name: "Golf cart" +} +item { + name: "/m/054fyh" + id: 434 + display_name: "Pitcher" +} +item { + name: "/m/01pns0" + id: 435 + display_name: "Fire hydrant" +} +item { + name: "/m/012n7d" + id: 436 + display_name: "Ambulance" +} +item { + name: "/m/044r5d" + id: 437 + display_name: "Golf ball" +} +item { + name: "/m/01krhy" + id: 438 + display_name: "Tiara" +} +item { + name: "/m/0dq75" + id: 439 + display_name: "Raccoon" +} +item { + name: "/m/0176mf" + id: 440 + display_name: "Belt" +} +item { + name: "/m/0h8lkj8" + id: 441 + display_name: "Corded phone" +} +item { + name: "/m/04tn4x" + id: 442 + display_name: "Swim cap" +} +item { + name: "/m/06l9r" + id: 443 + display_name: "Red panda" +} +item { + name: "/m/0cjs7" + id: 444 + display_name: "Asparagus" +} +item { + name: "/m/01lsmm" + id: 445 + display_name: "Scissors" +} +item { + name: "/m/01lcw4" + id: 446 + display_name: "Limousine" +} +item { + name: "/m/047j0r" + id: 447 + display_name: "Filing cabinet" +} +item { + name: "/m/01fb_0" + id: 448 + display_name: "Bagel" +} +item { + name: "/m/04169hn" + id: 449 + display_name: "Wood-burning stove" +} +item { + name: "/m/076bq" + id: 450 + display_name: "Segway" +} +item { + name: "/m/0hdln" + id: 451 + display_name: "Ruler" +} +item { + name: "/m/01g3x7" + id: 452 + display_name: "Bow and arrow" +} +item { + name: "/m/0l3ms" + id: 453 + display_name: "Balance beam" +} +item { + name: "/m/058qzx" + id: 454 + display_name: "Kitchen knife" +} +item { + name: "/m/0h8n6ft" + id: 455 + display_name: "Cake stand" +} +item { + name: "/m/018j2" + id: 456 + display_name: "Banjo" +} +item { + name: "/m/0l14j_" + id: 457 + display_name: "Flute" +} +item { + name: "/m/0wdt60w" + id: 458 + display_name: "Rugby ball" +} +item { + name: "/m/02gzp" + id: 459 + display_name: "Dagger" +} +item { + name: "/m/0h8n6f9" + id: 460 + display_name: "Dog bed" +} +item { + name: "/m/0fbw6" + id: 461 + display_name: "Cabbage" +} +item { + name: "/m/07kng9" + id: 462 + display_name: "Picnic basket" +} +item { + name: "/m/0dj6p" + id: 463 + display_name: "Peach" +} +item { + name: "/m/06pcq" + id: 464 + display_name: "Submarine sandwich" +} +item { + name: "/m/061_f" + id: 465 + display_name: "Pear" +} +item { + name: "/m/04g2r" + id: 466 + display_name: "Lynx" +} +item { + name: "/m/0jwn_" + id: 467 + display_name: "Pomegranate" +} +item { + name: "/m/02f9f_" + id: 468 + display_name: "Shower" +} +item { + name: "/m/01f8m5" + id: 469 + display_name: "Blue jay" +} +item { + name: "/m/01m4t" + id: 470 + display_name: "Printer" +} +item { + name: "/m/0cl4p" + id: 471 + display_name: "Hedgehog" +} +item { + name: "/m/07xyvk" + id: 472 + display_name: "Coffeemaker" +} +item { + name: "/m/084hf" + id: 473 + display_name: "Worm" +} +item { + name: "/m/03v5tg" + id: 474 + display_name: "Drinking straw" +} +item { + name: "/m/0qjjc" + id: 475 + display_name: "Remote control" +} +item { + name: "/m/015x5n" + id: 476 + display_name: "Radish" +} +item { + name: "/m/0ccs93" + id: 477 + display_name: "Canary" +} +item { + name: "/m/0nybt" + id: 478 + display_name: "Seahorse" +} +item { + name: "/m/02vkqh8" + id: 479 + display_name: "Wardrobe" +} +item { + name: "/m/09gtd" + id: 480 + display_name: "Toilet paper" +} +item { + name: "/m/019h78" + id: 481 + display_name: "Centipede" +} +item { + name: "/m/015wgc" + id: 482 + display_name: "Croissant" +} +item { + name: "/m/01x3jk" + id: 483 + display_name: "Snowmobile" +} +item { + name: "/m/01j3zr" + id: 484 + display_name: "Burrito" +} +item { + name: "/m/0c568" + id: 485 + display_name: "Porcupine" +} +item { + name: "/m/02pdsw" + id: 486 + display_name: "Cutting board" +} +item { + name: "/m/029b3" + id: 487 + display_name: "Dice" +} +item { + name: "/m/03q5t" + id: 488 + display_name: "Harpsichord" +} +item { + name: "/m/0p833" + id: 489 + display_name: "Perfume" +} +item { + name: "/m/01d380" + id: 490 + display_name: "Drill" +} +item { + name: "/m/024d2" + id: 491 + display_name: "Calculator" +} +item { + name: "/m/0mw_6" + id: 492 + display_name: "Willow" +} +item { + name: "/m/01f91_" + id: 493 + display_name: "Pretzel" +} +item { + name: "/m/02g30s" + id: 494 + display_name: "Guacamole" +} +item { + name: "/m/01hrv5" + id: 495 + display_name: "Popcorn" +} +item { + name: "/m/03m5k" + id: 496 + display_name: "Harp" +} +item { + name: "/m/0162_1" + id: 497 + display_name: "Towel" +} +item { + name: "/m/063rgb" + id: 498 + display_name: "Mixer" +} +item { + name: "/m/06_72j" + id: 499 + display_name: "Digital clock" +} +item { + name: "/m/046dlr" + id: 500 + display_name: "Alarm clock" +} +item { + name: "/m/047v4b" + id: 501 + display_name: "Artichoke" +} +item { + name: "/m/04zpv" + id: 502 + display_name: "Milk" +} +item { + name: "/m/043nyj" + id: 503 + display_name: "Common fig" +} +item { + name: "/m/03bbps" + id: 504 + display_name: "Power plugs and sockets" +} +item { + name: "/m/02w3r3" + id: 505 + display_name: "Paper towel" +} +item { + name: "/m/02pjr4" + id: 506 + display_name: "Blender" +} +item { + name: "/m/0755b" + id: 507 + display_name: "Scorpion" +} +item { + name: "/m/02lbcq" + id: 508 + display_name: "Stretcher" +} +item { + name: "/m/0fldg" + id: 509 + display_name: "Mango" +} +item { + name: "/m/012074" + id: 510 + display_name: "Magpie" +} +item { + name: "/m/035vxb" + id: 511 + display_name: "Isopod" +} +item { + name: "/m/02w3_ws" + id: 512 + display_name: "Personal care" +} +item { + name: "/m/0f6nr" + id: 513 + display_name: "Unicycle" +} +item { + name: "/m/0420v5" + id: 514 + display_name: "Punching bag" +} +item { + name: "/m/0frqm" + id: 515 + display_name: "Envelope" +} +item { + name: "/m/03txqz" + id: 516 + display_name: "Scale" +} +item { + name: "/m/0271qf7" + id: 517 + display_name: "Wine rack" +} +item { + name: "/m/074d1" + id: 518 + display_name: "Submarine" +} +item { + name: "/m/08p92x" + id: 519 + display_name: "Cream" +} +item { + name: "/m/01j4z9" + id: 520 + display_name: "Chainsaw" +} +item { + name: "/m/0kpt_" + id: 521 + display_name: "Cantaloupe" +} +item { + name: "/m/0h8n27j" + id: 522 + display_name: "Serving tray" +} +item { + name: "/m/03y6mg" + id: 523 + display_name: "Food processor" +} +item { + name: "/m/04h8sr" + id: 524 + display_name: "Dumbbell" +} +item { + name: "/m/065h6l" + id: 525 + display_name: "Jacuzzi" +} +item { + name: "/m/02tsc9" + id: 526 + display_name: "Slow cooker" +} +item { + name: "/m/012ysf" + id: 527 + display_name: "Syringe" +} +item { + name: "/m/0ky7b" + id: 528 + display_name: "Dishwasher" +} +item { + name: "/m/02wg_p" + id: 529 + display_name: "Tree house" +} +item { + name: "/m/0584n8" + id: 530 + display_name: "Briefcase" +} +item { + name: "/m/03kt2w" + id: 531 + display_name: "Stationary bicycle" +} +item { + name: "/m/05kms" + id: 532 + display_name: "Oboe" +} +item { + name: "/m/030610" + id: 533 + display_name: "Treadmill" +} +item { + name: "/m/0lt4_" + id: 534 + display_name: "Binoculars" +} +item { + name: "/m/076lb9" + id: 535 + display_name: "Bench" +} +item { + name: "/m/02ctlc" + id: 536 + display_name: "Cricket ball" +} +item { + name: "/m/02x8cch" + id: 537 + display_name: "Salt and pepper shakers" +} +item { + name: "/m/09gys" + id: 538 + display_name: "Squid" +} +item { + name: "/m/03jbxj" + id: 539 + display_name: "Light switch" +} +item { + name: "/m/012xff" + id: 540 + display_name: "Toothbrush" +} +item { + name: "/m/0h8kx63" + id: 541 + display_name: "Spice rack" +} +item { + name: "/m/073g6" + id: 542 + display_name: "Stethoscope" +} +item { + name: "/m/02cvgx" + id: 543 + display_name: "Winter melon" +} +item { + name: "/m/027rl48" + id: 544 + display_name: "Ladle" +} +item { + name: "/m/01kb5b" + id: 545 + display_name: "Flashlight" +} diff --git a/models/research/object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt b/models/research/object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..044f6d4c813729a693cac761f43a2246e07f7b6a --- /dev/null +++ b/models/research/object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt @@ -0,0 +1,2500 @@ +item { + name: "/m/061hd_" + id: 1 + display_name: "Infant bed" +} +item { + name: "/m/06m11" + id: 2 + display_name: "Rose" +} +item { + name: "/m/03120" + id: 3 + display_name: "Flag" +} +item { + name: "/m/01kb5b" + id: 4 + display_name: "Flashlight" +} +item { + name: "/m/0120dh" + id: 5 + display_name: "Sea turtle" +} +item { + name: "/m/0dv5r" + id: 6 + display_name: "Camera" +} +item { + name: "/m/0jbk" + id: 7 + display_name: "Animal" +} +item { + name: "/m/0174n1" + id: 8 + display_name: "Glove" +} +item { + name: "/m/09f_2" + id: 9 + display_name: "Crocodile" +} +item { + name: "/m/01xq0k1" + id: 10 + display_name: "Cattle" +} +item { + name: "/m/03jm5" + id: 11 + display_name: "House" +} +item { + name: "/m/02g30s" + id: 12 + display_name: "Guacamole" +} +item { + name: "/m/05z6w" + id: 13 + display_name: "Penguin" +} +item { + name: "/m/01jfm_" + id: 14 + display_name: "Vehicle registration plate" +} +item { + name: "/m/076lb9" + id: 15 + display_name: "Training bench" +} +item { + name: "/m/0gj37" + id: 16 + display_name: "Ladybug" +} +item { + name: "/m/0k0pj" + id: 17 + display_name: "Human nose" +} +item { + name: "/m/0kpqd" + id: 18 + display_name: "Watermelon" +} +item { + name: "/m/0l14j_" + id: 19 + display_name: "Flute" +} +item { + name: "/m/0cyf8" + id: 20 + display_name: "Butterfly" +} +item { + name: "/m/0174k2" + id: 21 + display_name: "Washing machine" +} +item { + name: "/m/0dq75" + id: 22 + display_name: "Raccoon" +} +item { + name: "/m/076bq" + id: 23 + display_name: "Segway" +} +item { + name: "/m/07crc" + id: 24 + display_name: "Taco" +} +item { + name: "/m/0d8zb" + id: 25 + display_name: "Jellyfish" +} +item { + name: "/m/0fszt" + id: 26 + display_name: "Cake" +} +item { + name: "/m/0k1tl" + id: 27 + display_name: "Pen" +} +item { + name: "/m/020kz" + id: 28 + display_name: "Cannon" +} +item { + name: "/m/09728" + id: 29 + display_name: "Bread" +} +item { + name: "/m/07j7r" + id: 30 + display_name: "Tree" +} +item { + name: "/m/0fbdv" + id: 31 + display_name: "Shellfish" +} +item { + name: "/m/03ssj5" + id: 32 + display_name: "Bed" +} +item { + name: "/m/03qrc" + id: 33 + display_name: "Hamster" +} +item { + name: "/m/02dl1y" + id: 34 + display_name: "Hat" +} +item { + name: "/m/01k6s3" + id: 35 + display_name: "Toaster" +} +item { + name: "/m/02jfl0" + id: 36 + display_name: "Sombrero" +} +item { + name: "/m/01krhy" + id: 37 + display_name: "Tiara" +} +item { + name: "/m/04kkgm" + id: 38 + display_name: "Bowl" +} +item { + name: "/m/0ft9s" + id: 39 + display_name: "Dragonfly" +} +item { + name: "/m/0d_2m" + id: 40 + display_name: "Moths and butterflies" +} +item { + name: "/m/0czz2" + id: 41 + display_name: "Antelope" +} +item { + name: "/m/0f4s2w" + id: 42 + display_name: "Vegetable" +} +item { + name: "/m/07dd4" + id: 43 + display_name: "Torch" +} +item { + name: "/m/0cgh4" + id: 44 + display_name: "Building" +} +item { + name: "/m/03bbps" + id: 45 + display_name: "Power plugs and sockets" +} +item { + name: "/m/02pjr4" + id: 46 + display_name: "Blender" +} +item { + name: "/m/04p0qw" + id: 47 + display_name: "Billiard table" +} +item { + name: "/m/02pdsw" + id: 48 + display_name: "Cutting board" +} +item { + name: "/m/01yx86" + id: 49 + display_name: "Bronze sculpture" +} +item { + name: "/m/09dzg" + id: 50 + display_name: "Turtle" +} +item { + name: "/m/0hkxq" + id: 51 + display_name: "Broccoli" +} +item { + name: "/m/07dm6" + id: 52 + display_name: "Tiger" +} +item { + name: "/m/054_l" + id: 53 + display_name: "Mirror" +} +item { + name: "/m/01dws" + id: 54 + display_name: "Bear" +} +item { + name: "/m/027pcv" + id: 55 + display_name: "Zucchini" +} +item { + name: "/m/01d40f" + id: 56 + display_name: "Dress" +} +item { + name: "/m/02rgn06" + id: 57 + display_name: "Volleyball" +} +item { + name: "/m/0342h" + id: 58 + display_name: "Guitar" +} +item { + name: "/m/06bt6" + id: 59 + display_name: "Reptile" +} +item { + name: "/m/0323sq" + id: 60 + display_name: "Golf cart" +} +item { + name: "/m/02zvsm" + id: 61 + display_name: "Tart" +} +item { + name: "/m/02fq_6" + id: 62 + display_name: "Fedora" +} +item { + name: "/m/01lrl" + id: 63 + display_name: "Carnivore" +} +item { + name: "/m/0k4j" + id: 64 + display_name: "Car" +} +item { + name: "/m/04h7h" + id: 65 + display_name: "Lighthouse" +} +item { + name: "/m/07xyvk" + id: 66 + display_name: "Coffeemaker" +} +item { + name: "/m/03y6mg" + id: 67 + display_name: "Food processor" +} +item { + name: "/m/07r04" + id: 68 + display_name: "Truck" +} +item { + name: "/m/03__z0" + id: 69 + display_name: "Bookcase" +} +item { + name: "/m/019w40" + id: 70 + display_name: "Surfboard" +} +item { + name: "/m/09j5n" + id: 71 + display_name: "Footwear" +} +item { + name: "/m/0cvnqh" + id: 72 + display_name: "Bench" +} +item { + name: "/m/01llwg" + id: 73 + display_name: "Necklace" +} +item { + name: "/m/0c9ph5" + id: 74 + display_name: "Flower" +} +item { + name: "/m/015x5n" + id: 75 + display_name: "Radish" +} +item { + name: "/m/0gd2v" + id: 76 + display_name: "Marine mammal" +} +item { + name: "/m/04v6l4" + id: 77 + display_name: "Frying pan" +} +item { + name: "/m/02jz0l" + id: 78 + display_name: "Tap" +} +item { + name: "/m/0dj6p" + id: 79 + display_name: "Peach" +} +item { + name: "/m/04ctx" + id: 80 + display_name: "Knife" +} +item { + name: "/m/080hkjn" + id: 81 + display_name: "Handbag" +} +item { + name: "/m/01c648" + id: 82 + display_name: "Laptop" +} +item { + name: "/m/01j61q" + id: 83 + display_name: "Tent" +} +item { + name: "/m/012n7d" + id: 84 + display_name: "Ambulance" +} +item { + name: "/m/025nd" + id: 85 + display_name: "Christmas tree" +} +item { + name: "/m/09csl" + id: 86 + display_name: "Eagle" +} +item { + name: "/m/01lcw4" + id: 87 + display_name: "Limousine" +} +item { + name: "/m/0h8n5zk" + id: 88 + display_name: "Kitchen & dining room table" +} +item { + name: "/m/0633h" + id: 89 + display_name: "Polar bear" +} +item { + name: "/m/01fdzj" + id: 90 + display_name: "Tower" +} +item { + name: "/m/01226z" + id: 91 + display_name: "Football" +} +item { + name: "/m/0mw_6" + id: 92 + display_name: "Willow" +} +item { + name: "/m/04hgtk" + id: 93 + display_name: "Human head" +} +item { + name: "/m/02pv19" + id: 94 + display_name: "Stop sign" +} +item { + name: "/m/09qck" + id: 95 + display_name: "Banana" +} +item { + name: "/m/063rgb" + id: 96 + display_name: "Mixer" +} +item { + name: "/m/0lt4_" + id: 97 + display_name: "Binoculars" +} +item { + name: "/m/0270h" + id: 98 + display_name: "Dessert" +} +item { + name: "/m/01h3n" + id: 99 + display_name: "Bee" +} +item { + name: "/m/01mzpv" + id: 100 + display_name: "Chair" +} +item { + name: "/m/04169hn" + id: 101 + display_name: "Wood-burning stove" +} +item { + name: "/m/0fm3zh" + id: 102 + display_name: "Flowerpot" +} +item { + name: "/m/0d20w4" + id: 103 + display_name: "Beaker" +} +item { + name: "/m/0_cp5" + id: 104 + display_name: "Oyster" +} +item { + name: "/m/01dy8n" + id: 105 + display_name: "Woodpecker" +} +item { + name: "/m/03m5k" + id: 106 + display_name: "Harp" +} +item { + name: "/m/03dnzn" + id: 107 + display_name: "Bathtub" +} +item { + name: "/m/0h8mzrc" + id: 108 + display_name: "Wall clock" +} +item { + name: "/m/0h8mhzd" + id: 109 + display_name: "Sports uniform" +} +item { + name: "/m/03d443" + id: 110 + display_name: "Rhinoceros" +} +item { + name: "/m/01gllr" + id: 111 + display_name: "Beehive" +} +item { + name: "/m/0642b4" + id: 112 + display_name: "Cupboard" +} +item { + name: "/m/09b5t" + id: 113 + display_name: "Chicken" +} +item { + name: "/m/04yx4" + id: 114 + display_name: "Man" +} +item { + name: "/m/01f8m5" + id: 115 + display_name: "Blue jay" +} +item { + name: "/m/015x4r" + id: 116 + display_name: "Cucumber" +} +item { + name: "/m/01j51" + id: 117 + display_name: "Balloon" +} +item { + name: "/m/02zt3" + id: 118 + display_name: "Kite" +} +item { + name: "/m/03tw93" + id: 119 + display_name: "Fireplace" +} +item { + name: "/m/01jfsr" + id: 120 + display_name: "Lantern" +} +item { + name: "/m/04ylt" + id: 121 + display_name: "Missile" +} +item { + name: "/m/0bt_c3" + id: 122 + display_name: "Book" +} +item { + name: "/m/0cmx8" + id: 123 + display_name: "Spoon" +} +item { + name: "/m/0hqkz" + id: 124 + display_name: "Grapefruit" +} +item { + name: "/m/071qp" + id: 125 + display_name: "Squirrel" +} +item { + name: "/m/0cyhj_" + id: 126 + display_name: "Orange" +} +item { + name: "/m/01xygc" + id: 127 + display_name: "Coat" +} +item { + name: "/m/0420v5" + id: 128 + display_name: "Punching bag" +} +item { + name: "/m/0898b" + id: 129 + display_name: "Zebra" +} +item { + name: "/m/01knjb" + id: 130 + display_name: "Billboard" +} +item { + name: "/m/0199g" + id: 131 + display_name: "Bicycle" +} +item { + name: "/m/03c7gz" + id: 132 + display_name: "Door handle" +} +item { + name: "/m/02x984l" + id: 133 + display_name: "Mechanical fan" +} +item { + name: "/m/04zwwv" + id: 134 + display_name: "Ring binder" +} +item { + name: "/m/04bcr3" + id: 135 + display_name: "Table" +} +item { + name: "/m/0gv1x" + id: 136 + display_name: "Parrot" +} +item { + name: "/m/01nq26" + id: 137 + display_name: "Sock" +} +item { + name: "/m/02s195" + id: 138 + display_name: "Vase" +} +item { + name: "/m/083kb" + id: 139 + display_name: "Weapon" +} +item { + name: "/m/06nrc" + id: 140 + display_name: "Shotgun" +} +item { + name: "/m/0jyfg" + id: 141 + display_name: "Glasses" +} +item { + name: "/m/0nybt" + id: 142 + display_name: "Seahorse" +} +item { + name: "/m/0176mf" + id: 143 + display_name: "Belt" +} +item { + name: "/m/01rzcn" + id: 144 + display_name: "Watercraft" +} +item { + name: "/m/0d4v4" + id: 145 + display_name: "Window" +} +item { + name: "/m/03bk1" + id: 146 + display_name: "Giraffe" +} +item { + name: "/m/096mb" + id: 147 + display_name: "Lion" +} +item { + name: "/m/0h9mv" + id: 148 + display_name: "Tire" +} +item { + name: "/m/07yv9" + id: 149 + display_name: "Vehicle" +} +item { + name: "/m/0ph39" + id: 150 + display_name: "Canoe" +} +item { + name: "/m/01rkbr" + id: 151 + display_name: "Tie" +} +item { + name: "/m/0gjbg72" + id: 152 + display_name: "Shelf" +} +item { + name: "/m/06z37_" + id: 153 + display_name: "Picture frame" +} +item { + name: "/m/01m4t" + id: 154 + display_name: "Printer" +} +item { + name: "/m/035r7c" + id: 155 + display_name: "Human leg" +} +item { + name: "/m/019jd" + id: 156 + display_name: "Boat" +} +item { + name: "/m/02tsc9" + id: 157 + display_name: "Slow cooker" +} +item { + name: "/m/015wgc" + id: 158 + display_name: "Croissant" +} +item { + name: "/m/0c06p" + id: 159 + display_name: "Candle" +} +item { + name: "/m/01dwwc" + id: 160 + display_name: "Pancake" +} +item { + name: "/m/034c16" + id: 161 + display_name: "Pillow" +} +item { + name: "/m/0242l" + id: 162 + display_name: "Coin" +} +item { + name: "/m/02lbcq" + id: 163 + display_name: "Stretcher" +} +item { + name: "/m/03nfch" + id: 164 + display_name: "Sandal" +} +item { + name: "/m/03bt1vf" + id: 165 + display_name: "Woman" +} +item { + name: "/m/01lynh" + id: 166 + display_name: "Stairs" +} +item { + name: "/m/03q5t" + id: 167 + display_name: "Harpsichord" +} +item { + name: "/m/0fqt361" + id: 168 + display_name: "Stool" +} +item { + name: "/m/01bjv" + id: 169 + display_name: "Bus" +} +item { + name: "/m/01s55n" + id: 170 + display_name: "Suitcase" +} +item { + name: "/m/0283dt1" + id: 171 + display_name: "Human mouth" +} +item { + name: "/m/01z1kdw" + id: 172 + display_name: "Juice" +} +item { + name: "/m/016m2d" + id: 173 + display_name: "Skull" +} +item { + name: "/m/02dgv" + id: 174 + display_name: "Door" +} +item { + name: "/m/07y_7" + id: 175 + display_name: "Violin" +} +item { + name: "/m/01_5g" + id: 176 + display_name: "Chopsticks" +} +item { + name: "/m/06_72j" + id: 177 + display_name: "Digital clock" +} +item { + name: "/m/0ftb8" + id: 178 + display_name: "Sunflower" +} +item { + name: "/m/0c29q" + id: 179 + display_name: "Leopard" +} +item { + name: "/m/0jg57" + id: 180 + display_name: "Bell pepper" +} +item { + name: "/m/02l8p9" + id: 181 + display_name: "Harbor seal" +} +item { + name: "/m/078jl" + id: 182 + display_name: "Snake" +} +item { + name: "/m/0llzx" + id: 183 + display_name: "Sewing machine" +} +item { + name: "/m/0dbvp" + id: 184 + display_name: "Goose" +} +item { + name: "/m/09ct_" + id: 185 + display_name: "Helicopter" +} +item { + name: "/m/0dkzw" + id: 186 + display_name: "Seat belt" +} +item { + name: "/m/02p5f1q" + id: 187 + display_name: "Coffee cup" +} +item { + name: "/m/0fx9l" + id: 188 + display_name: "Microwave oven" +} +item { + name: "/m/01b9xk" + id: 189 + display_name: "Hot dog" +} +item { + name: "/m/0b3fp9" + id: 190 + display_name: "Countertop" +} +item { + name: "/m/0h8n27j" + id: 191 + display_name: "Serving tray" +} +item { + name: "/m/0h8n6f9" + id: 192 + display_name: "Dog bed" +} +item { + name: "/m/01599" + id: 193 + display_name: "Beer" +} +item { + name: "/m/017ftj" + id: 194 + display_name: "Sunglasses" +} +item { + name: "/m/044r5d" + id: 195 + display_name: "Golf ball" +} +item { + name: "/m/01dwsz" + id: 196 + display_name: "Waffle" +} +item { + name: "/m/0cdl1" + id: 197 + display_name: "Palm tree" +} +item { + name: "/m/07gql" + id: 198 + display_name: "Trumpet" +} +item { + name: "/m/0hdln" + id: 199 + display_name: "Ruler" +} +item { + name: "/m/0zvk5" + id: 200 + display_name: "Helmet" +} +item { + name: "/m/012w5l" + id: 201 + display_name: "Ladder" +} +item { + name: "/m/021sj1" + id: 202 + display_name: "Office building" +} +item { + name: "/m/0bh9flk" + id: 203 + display_name: "Tablet computer" +} +item { + name: "/m/09gtd" + id: 204 + display_name: "Toilet paper" +} +item { + name: "/m/0jwn_" + id: 205 + display_name: "Pomegranate" +} +item { + name: "/m/02wv6h6" + id: 206 + display_name: "Skirt" +} +item { + name: "/m/02wv84t" + id: 207 + display_name: "Gas stove" +} +item { + name: "/m/021mn" + id: 208 + display_name: "Cookie" +} +item { + name: "/m/018p4k" + id: 209 + display_name: "Cart" +} +item { + name: "/m/06j2d" + id: 210 + display_name: "Raven" +} +item { + name: "/m/033cnk" + id: 211 + display_name: "Egg" +} +item { + name: "/m/01j3zr" + id: 212 + display_name: "Burrito" +} +item { + name: "/m/03fwl" + id: 213 + display_name: "Goat" +} +item { + name: "/m/058qzx" + id: 214 + display_name: "Kitchen knife" +} +item { + name: "/m/06_fw" + id: 215 + display_name: "Skateboard" +} +item { + name: "/m/02x8cch" + id: 216 + display_name: "Salt and pepper shakers" +} +item { + name: "/m/04g2r" + id: 217 + display_name: "Lynx" +} +item { + name: "/m/01b638" + id: 218 + display_name: "Boot" +} +item { + name: "/m/099ssp" + id: 219 + display_name: "Platter" +} +item { + name: "/m/071p9" + id: 220 + display_name: "Ski" +} +item { + name: "/m/01gkx_" + id: 221 + display_name: "Swimwear" +} +item { + name: "/m/0b_rs" + id: 222 + display_name: "Swimming pool" +} +item { + name: "/m/03v5tg" + id: 223 + display_name: "Drinking straw" +} +item { + name: "/m/01j5ks" + id: 224 + display_name: "Wrench" +} +item { + name: "/m/026t6" + id: 225 + display_name: "Drum" +} +item { + name: "/m/0_k2" + id: 226 + display_name: "Ant" +} +item { + name: "/m/039xj_" + id: 227 + display_name: "Human ear" +} +item { + name: "/m/01b7fy" + id: 228 + display_name: "Headphones" +} +item { + name: "/m/0220r2" + id: 229 + display_name: "Fountain" +} +item { + name: "/m/015p6" + id: 230 + display_name: "Bird" +} +item { + name: "/m/0fly7" + id: 231 + display_name: "Jeans" +} +item { + name: "/m/07c52" + id: 232 + display_name: "Television" +} +item { + name: "/m/0n28_" + id: 233 + display_name: "Crab" +} +item { + name: "/m/0hg7b" + id: 234 + display_name: "Microphone" +} +item { + name: "/m/019dx1" + id: 235 + display_name: "Home appliance" +} +item { + name: "/m/04vv5k" + id: 236 + display_name: "Snowplow" +} +item { + name: "/m/020jm" + id: 237 + display_name: "Beetle" +} +item { + name: "/m/047v4b" + id: 238 + display_name: "Artichoke" +} +item { + name: "/m/01xs3r" + id: 239 + display_name: "Jet ski" +} +item { + name: "/m/03kt2w" + id: 240 + display_name: "Stationary bicycle" +} +item { + name: "/m/03q69" + id: 241 + display_name: "Human hair" +} +item { + name: "/m/01dxs" + id: 242 + display_name: "Brown bear" +} +item { + name: "/m/01h8tj" + id: 243 + display_name: "Starfish" +} +item { + name: "/m/0dt3t" + id: 244 + display_name: "Fork" +} +item { + name: "/m/0cjq5" + id: 245 + display_name: "Lobster" +} +item { + name: "/m/0h8lkj8" + id: 246 + display_name: "Corded phone" +} +item { + name: "/m/0271t" + id: 247 + display_name: "Drink" +} +item { + name: "/m/03q5c7" + id: 248 + display_name: "Saucer" +} +item { + name: "/m/0fj52s" + id: 249 + display_name: "Carrot" +} +item { + name: "/m/03vt0" + id: 250 + display_name: "Insect" +} +item { + name: "/m/01x3z" + id: 251 + display_name: "Clock" +} +item { + name: "/m/0d5gx" + id: 252 + display_name: "Castle" +} +item { + name: "/m/0h8my_4" + id: 253 + display_name: "Tennis racket" +} +item { + name: "/m/03ldnb" + id: 254 + display_name: "Ceiling fan" +} +item { + name: "/m/0cjs7" + id: 255 + display_name: "Asparagus" +} +item { + name: "/m/0449p" + id: 256 + display_name: "Jaguar" +} +item { + name: "/m/04szw" + id: 257 + display_name: "Musical instrument" +} +item { + name: "/m/07jdr" + id: 258 + display_name: "Train" +} +item { + name: "/m/01yrx" + id: 259 + display_name: "Cat" +} +item { + name: "/m/06c54" + id: 260 + display_name: "Rifle" +} +item { + name: "/m/04h8sr" + id: 261 + display_name: "Dumbbell" +} +item { + name: "/m/050k8" + id: 262 + display_name: "Mobile phone" +} +item { + name: "/m/0pg52" + id: 263 + display_name: "Taxi" +} +item { + name: "/m/02f9f_" + id: 264 + display_name: "Shower" +} +item { + name: "/m/054fyh" + id: 265 + display_name: "Pitcher" +} +item { + name: "/m/09k_b" + id: 266 + display_name: "Lemon" +} +item { + name: "/m/03xxp" + id: 267 + display_name: "Invertebrate" +} +item { + name: "/m/0jly1" + id: 268 + display_name: "Turkey" +} +item { + name: "/m/06k2mb" + id: 269 + display_name: "High heels" +} +item { + name: "/m/04yqq2" + id: 270 + display_name: "Bust" +} +item { + name: "/m/0bwd_0j" + id: 271 + display_name: "Elephant" +} +item { + name: "/m/02h19r" + id: 272 + display_name: "Scarf" +} +item { + name: "/m/02zn6n" + id: 273 + display_name: "Barrel" +} +item { + name: "/m/07c6l" + id: 274 + display_name: "Trombone" +} +item { + name: "/m/05zsy" + id: 275 + display_name: "Pumpkin" +} +item { + name: "/m/025dyy" + id: 276 + display_name: "Box" +} +item { + name: "/m/07j87" + id: 277 + display_name: "Tomato" +} +item { + name: "/m/09ld4" + id: 278 + display_name: "Frog" +} +item { + name: "/m/01vbnl" + id: 279 + display_name: "Bidet" +} +item { + name: "/m/0dzct" + id: 280 + display_name: "Human face" +} +item { + name: "/m/03fp41" + id: 281 + display_name: "Houseplant" +} +item { + name: "/m/0h2r6" + id: 282 + display_name: "Van" +} +item { + name: "/m/0by6g" + id: 283 + display_name: "Shark" +} +item { + name: "/m/0cxn2" + id: 284 + display_name: "Ice cream" +} +item { + name: "/m/04tn4x" + id: 285 + display_name: "Swim cap" +} +item { + name: "/m/0f6wt" + id: 286 + display_name: "Falcon" +} +item { + name: "/m/05n4y" + id: 287 + display_name: "Ostrich" +} +item { + name: "/m/0gxl3" + id: 288 + display_name: "Handgun" +} +item { + name: "/m/02d9qx" + id: 289 + display_name: "Whiteboard" +} +item { + name: "/m/04m9y" + id: 290 + display_name: "Lizard" +} +item { + name: "/m/05z55" + id: 291 + display_name: "Pasta" +} +item { + name: "/m/01x3jk" + id: 292 + display_name: "Snowmobile" +} +item { + name: "/m/0h8l4fh" + id: 293 + display_name: "Light bulb" +} +item { + name: "/m/031b6r" + id: 294 + display_name: "Window blind" +} +item { + name: "/m/01tcjp" + id: 295 + display_name: "Muffin" +} +item { + name: "/m/01f91_" + id: 296 + display_name: "Pretzel" +} +item { + name: "/m/02522" + id: 297 + display_name: "Computer monitor" +} +item { + name: "/m/0319l" + id: 298 + display_name: "Horn" +} +item { + name: "/m/0c_jw" + id: 299 + display_name: "Furniture" +} +item { + name: "/m/0l515" + id: 300 + display_name: "Sandwich" +} +item { + name: "/m/0306r" + id: 301 + display_name: "Fox" +} +item { + name: "/m/0crjs" + id: 302 + display_name: "Convenience store" +} +item { + name: "/m/0ch_cf" + id: 303 + display_name: "Fish" +} +item { + name: "/m/02xwb" + id: 304 + display_name: "Fruit" +} +item { + name: "/m/01r546" + id: 305 + display_name: "Earrings" +} +item { + name: "/m/03rszm" + id: 306 + display_name: "Curtain" +} +item { + name: "/m/0388q" + id: 307 + display_name: "Grape" +} +item { + name: "/m/03m3pdh" + id: 308 + display_name: "Sofa bed" +} +item { + name: "/m/03k3r" + id: 309 + display_name: "Horse" +} +item { + name: "/m/0hf58v5" + id: 310 + display_name: "Luggage and bags" +} +item { + name: "/m/01y9k5" + id: 311 + display_name: "Desk" +} +item { + name: "/m/05441v" + id: 312 + display_name: "Crutch" +} +item { + name: "/m/03p3bw" + id: 313 + display_name: "Bicycle helmet" +} +item { + name: "/m/0175cv" + id: 314 + display_name: "Tick" +} +item { + name: "/m/0cmf2" + id: 315 + display_name: "Airplane" +} +item { + name: "/m/0ccs93" + id: 316 + display_name: "Canary" +} +item { + name: "/m/02d1br" + id: 317 + display_name: "Spatula" +} +item { + name: "/m/0gjkl" + id: 318 + display_name: "Watch" +} +item { + name: "/m/0jqgx" + id: 319 + display_name: "Lily" +} +item { + name: "/m/0h99cwc" + id: 320 + display_name: "Kitchen appliance" +} +item { + name: "/m/047j0r" + id: 321 + display_name: "Filing cabinet" +} +item { + name: "/m/0k5j" + id: 322 + display_name: "Aircraft" +} +item { + name: "/m/0h8n6ft" + id: 323 + display_name: "Cake stand" +} +item { + name: "/m/0gm28" + id: 324 + display_name: "Candy" +} +item { + name: "/m/0130jx" + id: 325 + display_name: "Sink" +} +item { + name: "/m/04rmv" + id: 326 + display_name: "Mouse" +} +item { + name: "/m/081qc" + id: 327 + display_name: "Wine" +} +item { + name: "/m/0qmmr" + id: 328 + display_name: "Wheelchair" +} +item { + name: "/m/03fj2" + id: 329 + display_name: "Goldfish" +} +item { + name: "/m/040b_t" + id: 330 + display_name: "Refrigerator" +} +item { + name: "/m/02y6n" + id: 331 + display_name: "French fries" +} +item { + name: "/m/0fqfqc" + id: 332 + display_name: "Drawer" +} +item { + name: "/m/030610" + id: 333 + display_name: "Treadmill" +} +item { + name: "/m/07kng9" + id: 334 + display_name: "Picnic basket" +} +item { + name: "/m/029b3" + id: 335 + display_name: "Dice" +} +item { + name: "/m/0fbw6" + id: 336 + display_name: "Cabbage" +} +item { + name: "/m/07qxg_" + id: 337 + display_name: "Football helmet" +} +item { + name: "/m/068zj" + id: 338 + display_name: "Pig" +} +item { + name: "/m/01g317" + id: 339 + display_name: "Person" +} +item { + name: "/m/01bfm9" + id: 340 + display_name: "Shorts" +} +item { + name: "/m/02068x" + id: 341 + display_name: "Gondola" +} +item { + name: "/m/0fz0h" + id: 342 + display_name: "Honeycomb" +} +item { + name: "/m/0jy4k" + id: 343 + display_name: "Doughnut" +} +item { + name: "/m/05kyg_" + id: 344 + display_name: "Chest of drawers" +} +item { + name: "/m/01prls" + id: 345 + display_name: "Land vehicle" +} +item { + name: "/m/01h44" + id: 346 + display_name: "Bat" +} +item { + name: "/m/08pbxl" + id: 347 + display_name: "Monkey" +} +item { + name: "/m/02gzp" + id: 348 + display_name: "Dagger" +} +item { + name: "/m/04brg2" + id: 349 + display_name: "Tableware" +} +item { + name: "/m/031n1" + id: 350 + display_name: "Human foot" +} +item { + name: "/m/02jvh9" + id: 351 + display_name: "Mug" +} +item { + name: "/m/046dlr" + id: 352 + display_name: "Alarm clock" +} +item { + name: "/m/0h8ntjv" + id: 353 + display_name: "Pressure cooker" +} +item { + name: "/m/0k65p" + id: 354 + display_name: "Human hand" +} +item { + name: "/m/011k07" + id: 355 + display_name: "Tortoise" +} +item { + name: "/m/03grzl" + id: 356 + display_name: "Baseball glove" +} +item { + name: "/m/06y5r" + id: 357 + display_name: "Sword" +} +item { + name: "/m/061_f" + id: 358 + display_name: "Pear" +} +item { + name: "/m/01cmb2" + id: 359 + display_name: "Miniskirt" +} +item { + name: "/m/01mqdt" + id: 360 + display_name: "Traffic sign" +} +item { + name: "/m/05r655" + id: 361 + display_name: "Girl" +} +item { + name: "/m/02p3w7d" + id: 362 + display_name: "Roller skates" +} +item { + name: "/m/029tx" + id: 363 + display_name: "Dinosaur" +} +item { + name: "/m/04m6gz" + id: 364 + display_name: "Porch" +} +item { + name: "/m/015h_t" + id: 365 + display_name: "Human beard" +} +item { + name: "/m/06pcq" + id: 366 + display_name: "Submarine sandwich" +} +item { + name: "/m/01bms0" + id: 367 + display_name: "Screwdriver" +} +item { + name: "/m/07fbm7" + id: 368 + display_name: "Strawberry" +} +item { + name: "/m/09tvcd" + id: 369 + display_name: "Wine glass" +} +item { + name: "/m/06nwz" + id: 370 + display_name: "Seafood" +} +item { + name: "/m/0dv9c" + id: 371 + display_name: "Racket" +} +item { + name: "/m/083wq" + id: 372 + display_name: "Wheel" +} +item { + name: "/m/0gd36" + id: 373 + display_name: "Sea lion" +} +item { + name: "/m/0138tl" + id: 374 + display_name: "Toy" +} +item { + name: "/m/07clx" + id: 375 + display_name: "Tea" +} +item { + name: "/m/05ctyq" + id: 376 + display_name: "Tennis ball" +} +item { + name: "/m/0bjyj5" + id: 377 + display_name: "Waste container" +} +item { + name: "/m/0dbzx" + id: 378 + display_name: "Mule" +} +item { + name: "/m/02ctlc" + id: 379 + display_name: "Cricket ball" +} +item { + name: "/m/0fp6w" + id: 380 + display_name: "Pineapple" +} +item { + name: "/m/0djtd" + id: 381 + display_name: "Coconut" +} +item { + name: "/m/0167gd" + id: 382 + display_name: "Doll" +} +item { + name: "/m/078n6m" + id: 383 + display_name: "Coffee table" +} +item { + name: "/m/0152hh" + id: 384 + display_name: "Snowman" +} +item { + name: "/m/04gth" + id: 385 + display_name: "Lavender" +} +item { + name: "/m/0ll1f78" + id: 386 + display_name: "Shrimp" +} +item { + name: "/m/0cffdh" + id: 387 + display_name: "Maple" +} +item { + name: "/m/025rp__" + id: 388 + display_name: "Cowboy hat" +} +item { + name: "/m/02_n6y" + id: 389 + display_name: "Goggles" +} +item { + name: "/m/0wdt60w" + id: 390 + display_name: "Rugby ball" +} +item { + name: "/m/0cydv" + id: 391 + display_name: "Caterpillar" +} +item { + name: "/m/01n5jq" + id: 392 + display_name: "Poster" +} +item { + name: "/m/09rvcxw" + id: 393 + display_name: "Rocket" +} +item { + name: "/m/013y1f" + id: 394 + display_name: "Organ" +} +item { + name: "/m/06ncr" + id: 395 + display_name: "Saxophone" +} +item { + name: "/m/015qff" + id: 396 + display_name: "Traffic light" +} +item { + name: "/m/024g6" + id: 397 + display_name: "Cocktail" +} +item { + name: "/m/05gqfk" + id: 398 + display_name: "Plastic bag" +} +item { + name: "/m/0dv77" + id: 399 + display_name: "Squash" +} +item { + name: "/m/052sf" + id: 400 + display_name: "Mushroom" +} +item { + name: "/m/0cdn1" + id: 401 + display_name: "Hamburger" +} +item { + name: "/m/03jbxj" + id: 402 + display_name: "Light switch" +} +item { + name: "/m/0cyfs" + id: 403 + display_name: "Parachute" +} +item { + name: "/m/0kmg4" + id: 404 + display_name: "Teddy bear" +} +item { + name: "/m/02cvgx" + id: 405 + display_name: "Winter melon" +} +item { + name: "/m/09kx5" + id: 406 + display_name: "Deer" +} +item { + name: "/m/057cc" + id: 407 + display_name: "Musical keyboard" +} +item { + name: "/m/02pkr5" + id: 408 + display_name: "Plumbing fixture" +} +item { + name: "/m/057p5t" + id: 409 + display_name: "Scoreboard" +} +item { + name: "/m/03g8mr" + id: 410 + display_name: "Baseball bat" +} +item { + name: "/m/0frqm" + id: 411 + display_name: "Envelope" +} +item { + name: "/m/03m3vtv" + id: 412 + display_name: "Adhesive tape" +} +item { + name: "/m/0584n8" + id: 413 + display_name: "Briefcase" +} +item { + name: "/m/014y4n" + id: 414 + display_name: "Paddle" +} +item { + name: "/m/01g3x7" + id: 415 + display_name: "Bow and arrow" +} +item { + name: "/m/07cx4" + id: 416 + display_name: "Telephone" +} +item { + name: "/m/07bgp" + id: 417 + display_name: "Sheep" +} +item { + name: "/m/032b3c" + id: 418 + display_name: "Jacket" +} +item { + name: "/m/01bl7v" + id: 419 + display_name: "Boy" +} +item { + name: "/m/0663v" + id: 420 + display_name: "Pizza" +} +item { + name: "/m/0cn6p" + id: 421 + display_name: "Otter" +} +item { + name: "/m/02rdsp" + id: 422 + display_name: "Office supplies" +} +item { + name: "/m/02crq1" + id: 423 + display_name: "Couch" +} +item { + name: "/m/01xqw" + id: 424 + display_name: "Cello" +} +item { + name: "/m/0cnyhnx" + id: 425 + display_name: "Bull" +} +item { + name: "/m/01x_v" + id: 426 + display_name: "Camel" +} +item { + name: "/m/018xm" + id: 427 + display_name: "Ball" +} +item { + name: "/m/09ddx" + id: 428 + display_name: "Duck" +} +item { + name: "/m/084zz" + id: 429 + display_name: "Whale" +} +item { + name: "/m/01n4qj" + id: 430 + display_name: "Shirt" +} +item { + name: "/m/07cmd" + id: 431 + display_name: "Tank" +} +item { + name: "/m/04_sv" + id: 432 + display_name: "Motorcycle" +} +item { + name: "/m/0mkg" + id: 433 + display_name: "Accordion" +} +item { + name: "/m/09d5_" + id: 434 + display_name: "Owl" +} +item { + name: "/m/0c568" + id: 435 + display_name: "Porcupine" +} +item { + name: "/m/02wbtzl" + id: 436 + display_name: "Sun hat" +} +item { + name: "/m/05bm6" + id: 437 + display_name: "Nail" +} +item { + name: "/m/01lsmm" + id: 438 + display_name: "Scissors" +} +item { + name: "/m/0dftk" + id: 439 + display_name: "Swan" +} +item { + name: "/m/0dtln" + id: 440 + display_name: "Lamp" +} +item { + name: "/m/0nl46" + id: 441 + display_name: "Crown" +} +item { + name: "/m/05r5c" + id: 442 + display_name: "Piano" +} +item { + name: "/m/06msq" + id: 443 + display_name: "Sculpture" +} +item { + name: "/m/0cd4d" + id: 444 + display_name: "Cheetah" +} +item { + name: "/m/05kms" + id: 445 + display_name: "Oboe" +} +item { + name: "/m/02jnhm" + id: 446 + display_name: "Tin can" +} +item { + name: "/m/0fldg" + id: 447 + display_name: "Mango" +} +item { + name: "/m/073bxn" + id: 448 + display_name: "Tripod" +} +item { + name: "/m/029bxz" + id: 449 + display_name: "Oven" +} +item { + name: "/m/020lf" + id: 450 + display_name: "Computer mouse" +} +item { + name: "/m/01btn" + id: 451 + display_name: "Barge" +} +item { + name: "/m/02vqfm" + id: 452 + display_name: "Coffee" +} +item { + name: "/m/06__v" + id: 453 + display_name: "Snowboard" +} +item { + name: "/m/043nyj" + id: 454 + display_name: "Common fig" +} +item { + name: "/m/0grw1" + id: 455 + display_name: "Salad" +} +item { + name: "/m/03hl4l9" + id: 456 + display_name: "Marine invertebrates" +} +item { + name: "/m/0hnnb" + id: 457 + display_name: "Umbrella" +} +item { + name: "/m/04c0y" + id: 458 + display_name: "Kangaroo" +} +item { + name: "/m/0dzf4" + id: 459 + display_name: "Human arm" +} +item { + name: "/m/07v9_z" + id: 460 + display_name: "Measuring cup" +} +item { + name: "/m/0f9_l" + id: 461 + display_name: "Snail" +} +item { + name: "/m/0703r8" + id: 462 + display_name: "Loveseat" +} +item { + name: "/m/01xyhv" + id: 463 + display_name: "Suit" +} +item { + name: "/m/01fh4r" + id: 464 + display_name: "Teapot" +} +item { + name: "/m/04dr76w" + id: 465 + display_name: "Bottle" +} +item { + name: "/m/0pcr" + id: 466 + display_name: "Alpaca" +} +item { + name: "/m/03s_tn" + id: 467 + display_name: "Kettle" +} +item { + name: "/m/07mhn" + id: 468 + display_name: "Trousers" +} +item { + name: "/m/01hrv5" + id: 469 + display_name: "Popcorn" +} +item { + name: "/m/019h78" + id: 470 + display_name: "Centipede" +} +item { + name: "/m/09kmb" + id: 471 + display_name: "Spider" +} +item { + name: "/m/0h23m" + id: 472 + display_name: "Sparrow" +} +item { + name: "/m/050gv4" + id: 473 + display_name: "Plate" +} +item { + name: "/m/01fb_0" + id: 474 + display_name: "Bagel" +} +item { + name: "/m/02w3_ws" + id: 475 + display_name: "Personal care" +} +item { + name: "/m/014j1m" + id: 476 + display_name: "Apple" +} +item { + name: "/m/01gmv2" + id: 477 + display_name: "Brassiere" +} +item { + name: "/m/04y4h8h" + id: 478 + display_name: "Bathroom cabinet" +} +item { + name: "/m/026qbn5" + id: 479 + display_name: "Studio couch" +} +item { + name: "/m/01m2v" + id: 480 + display_name: "Computer keyboard" +} +item { + name: "/m/05_5p_0" + id: 481 + display_name: "Table tennis racket" +} +item { + name: "/m/07030" + id: 482 + display_name: "Sushi" +} +item { + name: "/m/01s105" + id: 483 + display_name: "Cabinetry" +} +item { + name: "/m/033rq4" + id: 484 + display_name: "Street light" +} +item { + name: "/m/0162_1" + id: 485 + display_name: "Towel" +} +item { + name: "/m/02z51p" + id: 486 + display_name: "Nightstand" +} +item { + name: "/m/06mf6" + id: 487 + display_name: "Rabbit" +} +item { + name: "/m/02hj4" + id: 488 + display_name: "Dolphin" +} +item { + name: "/m/0bt9lr" + id: 489 + display_name: "Dog" +} +item { + name: "/m/08hvt4" + id: 490 + display_name: "Jug" +} +item { + name: "/m/084rd" + id: 491 + display_name: "Wok" +} +item { + name: "/m/01pns0" + id: 492 + display_name: "Fire hydrant" +} +item { + name: "/m/014sv8" + id: 493 + display_name: "Human eye" +} +item { + name: "/m/079cl" + id: 494 + display_name: "Skyscraper" +} +item { + name: "/m/01940j" + id: 495 + display_name: "Backpack" +} +item { + name: "/m/05vtc" + id: 496 + display_name: "Potato" +} +item { + name: "/m/02w3r3" + id: 497 + display_name: "Paper towel" +} +item { + name: "/m/054xkw" + id: 498 + display_name: "Lifejacket" +} +item { + name: "/m/01bqk0" + id: 499 + display_name: "Bicycle wheel" +} +item { + name: "/m/09g1w" + id: 500 + display_name: "Toilet" +} diff --git a/models/research/object_detection/data/oid_v4_label_map.pbtxt b/models/research/object_detection/data/oid_v4_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..643b9e8ed5d9239a3248b895fb32f3b51caa92f3 --- /dev/null +++ b/models/research/object_detection/data/oid_v4_label_map.pbtxt @@ -0,0 +1,3005 @@ +item { + name: "/m/011k07" + id: 1 + display_name: "Tortoise" +} +item { + name: "/m/011q46kg" + id: 2 + display_name: "Container" +} +item { + name: "/m/012074" + id: 3 + display_name: "Magpie" +} +item { + name: "/m/0120dh" + id: 4 + display_name: "Sea turtle" +} +item { + name: "/m/01226z" + id: 5 + display_name: "Football" +} +item { + name: "/m/012n7d" + id: 6 + display_name: "Ambulance" +} +item { + name: "/m/012w5l" + id: 7 + display_name: "Ladder" +} +item { + name: "/m/012xff" + id: 8 + display_name: "Toothbrush" +} +item { + name: "/m/012ysf" + id: 9 + display_name: "Syringe" +} +item { + name: "/m/0130jx" + id: 10 + display_name: "Sink" +} +item { + name: "/m/0138tl" + id: 11 + display_name: "Toy" +} +item { + name: "/m/013y1f" + id: 12 + display_name: "Organ" +} +item { + name: "/m/01432t" + id: 13 + display_name: "Cassette deck" +} +item { + name: "/m/014j1m" + id: 14 + display_name: "Apple" +} +item { + name: "/m/014sv8" + id: 15 + display_name: "Human eye" +} +item { + name: "/m/014trl" + id: 16 + display_name: "Cosmetics" +} +item { + name: "/m/014y4n" + id: 17 + display_name: "Paddle" +} +item { + name: "/m/0152hh" + id: 18 + display_name: "Snowman" +} +item { + name: "/m/01599" + id: 19 + display_name: "Beer" +} +item { + name: "/m/01_5g" + id: 20 + display_name: "Chopsticks" +} +item { + name: "/m/015h_t" + id: 21 + display_name: "Human beard" +} +item { + name: "/m/015p6" + id: 22 + display_name: "Bird" +} +item { + name: "/m/015qbp" + id: 23 + display_name: "Parking meter" +} +item { + name: "/m/015qff" + id: 24 + display_name: "Traffic light" +} +item { + name: "/m/015wgc" + id: 25 + display_name: "Croissant" +} +item { + name: "/m/015x4r" + id: 26 + display_name: "Cucumber" +} +item { + name: "/m/015x5n" + id: 27 + display_name: "Radish" +} +item { + name: "/m/0162_1" + id: 28 + display_name: "Towel" +} +item { + name: "/m/0167gd" + id: 29 + display_name: "Doll" +} +item { + name: "/m/016m2d" + id: 30 + display_name: "Skull" +} +item { + name: "/m/0174k2" + id: 31 + display_name: "Washing machine" +} +item { + name: "/m/0174n1" + id: 32 + display_name: "Glove" +} +item { + name: "/m/0175cv" + id: 33 + display_name: "Tick" +} +item { + name: "/m/0176mf" + id: 34 + display_name: "Belt" +} +item { + name: "/m/017ftj" + id: 35 + display_name: "Sunglasses" +} +item { + name: "/m/018j2" + id: 36 + display_name: "Banjo" +} +item { + name: "/m/018p4k" + id: 37 + display_name: "Cart" +} +item { + name: "/m/018xm" + id: 38 + display_name: "Ball" +} +item { + name: "/m/01940j" + id: 39 + display_name: "Backpack" +} +item { + name: "/m/0199g" + id: 40 + display_name: "Bicycle" +} +item { + name: "/m/019dx1" + id: 41 + display_name: "Home appliance" +} +item { + name: "/m/019h78" + id: 42 + display_name: "Centipede" +} +item { + name: "/m/019jd" + id: 43 + display_name: "Boat" +} +item { + name: "/m/019w40" + id: 44 + display_name: "Surfboard" +} +item { + name: "/m/01b638" + id: 45 + display_name: "Boot" +} +item { + name: "/m/01b7fy" + id: 46 + display_name: "Headphones" +} +item { + name: "/m/01b9xk" + id: 47 + display_name: "Hot dog" +} +item { + name: "/m/01bfm9" + id: 48 + display_name: "Shorts" +} +item { + name: "/m/01_bhs" + id: 49 + display_name: "Fast food" +} +item { + name: "/m/01bjv" + id: 50 + display_name: "Bus" +} +item { + name: "/m/01bl7v" + id: 51 + display_name: "Boy" +} +item { + name: "/m/01bms0" + id: 52 + display_name: "Screwdriver" +} +item { + name: "/m/01bqk0" + id: 53 + display_name: "Bicycle wheel" +} +item { + name: "/m/01btn" + id: 54 + display_name: "Barge" +} +item { + name: "/m/01c648" + id: 55 + display_name: "Laptop" +} +item { + name: "/m/01cmb2" + id: 56 + display_name: "Miniskirt" +} +item { + name: "/m/01d380" + id: 57 + display_name: "Drill" +} +item { + name: "/m/01d40f" + id: 58 + display_name: "Dress" +} +item { + name: "/m/01dws" + id: 59 + display_name: "Bear" +} +item { + name: "/m/01dwsz" + id: 60 + display_name: "Waffle" +} +item { + name: "/m/01dwwc" + id: 61 + display_name: "Pancake" +} +item { + name: "/m/01dxs" + id: 62 + display_name: "Brown bear" +} +item { + name: "/m/01dy8n" + id: 63 + display_name: "Woodpecker" +} +item { + name: "/m/01f8m5" + id: 64 + display_name: "Blue jay" +} +item { + name: "/m/01f91_" + id: 65 + display_name: "Pretzel" +} +item { + name: "/m/01fb_0" + id: 66 + display_name: "Bagel" +} +item { + name: "/m/01fdzj" + id: 67 + display_name: "Tower" +} +item { + name: "/m/01fh4r" + id: 68 + display_name: "Teapot" +} +item { + name: "/m/01g317" + id: 69 + display_name: "Person" +} +item { + name: "/m/01g3x7" + id: 70 + display_name: "Bow and arrow" +} +item { + name: "/m/01gkx_" + id: 71 + display_name: "Swimwear" +} +item { + name: "/m/01gllr" + id: 72 + display_name: "Beehive" +} +item { + name: "/m/01gmv2" + id: 73 + display_name: "Brassiere" +} +item { + name: "/m/01h3n" + id: 74 + display_name: "Bee" +} +item { + name: "/m/01h44" + id: 75 + display_name: "Bat" +} +item { + name: "/m/01h8tj" + id: 76 + display_name: "Starfish" +} +item { + name: "/m/01hrv5" + id: 77 + display_name: "Popcorn" +} +item { + name: "/m/01j3zr" + id: 78 + display_name: "Burrito" +} +item { + name: "/m/01j4z9" + id: 79 + display_name: "Chainsaw" +} +item { + name: "/m/01j51" + id: 80 + display_name: "Balloon" +} +item { + name: "/m/01j5ks" + id: 81 + display_name: "Wrench" +} +item { + name: "/m/01j61q" + id: 82 + display_name: "Tent" +} +item { + name: "/m/01jfm_" + id: 83 + display_name: "Vehicle registration plate" +} +item { + name: "/m/01jfsr" + id: 84 + display_name: "Lantern" +} +item { + name: "/m/01k6s3" + id: 85 + display_name: "Toaster" +} +item { + name: "/m/01kb5b" + id: 86 + display_name: "Flashlight" +} +item { + name: "/m/01knjb" + id: 87 + display_name: "Billboard" +} +item { + name: "/m/01krhy" + id: 88 + display_name: "Tiara" +} +item { + name: "/m/01lcw4" + id: 89 + display_name: "Limousine" +} +item { + name: "/m/01llwg" + id: 90 + display_name: "Necklace" +} +item { + name: "/m/01lrl" + id: 91 + display_name: "Carnivore" +} +item { + name: "/m/01lsmm" + id: 92 + display_name: "Scissors" +} +item { + name: "/m/01lynh" + id: 93 + display_name: "Stairs" +} +item { + name: "/m/01m2v" + id: 94 + display_name: "Computer keyboard" +} +item { + name: "/m/01m4t" + id: 95 + display_name: "Printer" +} +item { + name: "/m/01mqdt" + id: 96 + display_name: "Traffic sign" +} +item { + name: "/m/01mzpv" + id: 97 + display_name: "Chair" +} +item { + name: "/m/01n4qj" + id: 98 + display_name: "Shirt" +} +item { + name: "/m/01n5jq" + id: 99 + display_name: "Poster" +} +item { + name: "/m/01nkt" + id: 100 + display_name: "Cheese" +} +item { + name: "/m/01nq26" + id: 101 + display_name: "Sock" +} +item { + name: "/m/01pns0" + id: 102 + display_name: "Fire hydrant" +} +item { + name: "/m/01prls" + id: 103 + display_name: "Land vehicle" +} +item { + name: "/m/01r546" + id: 104 + display_name: "Earrings" +} +item { + name: "/m/01rkbr" + id: 105 + display_name: "Tie" +} +item { + name: "/m/01rzcn" + id: 106 + display_name: "Watercraft" +} +item { + name: "/m/01s105" + id: 107 + display_name: "Cabinetry" +} +item { + name: "/m/01s55n" + id: 108 + display_name: "Suitcase" +} +item { + name: "/m/01tcjp" + id: 109 + display_name: "Muffin" +} +item { + name: "/m/01vbnl" + id: 110 + display_name: "Bidet" +} +item { + name: "/m/01ww8y" + id: 111 + display_name: "Snack" +} +item { + name: "/m/01x3jk" + id: 112 + display_name: "Snowmobile" +} +item { + name: "/m/01x3z" + id: 113 + display_name: "Clock" +} +item { + name: "/m/01xgg_" + id: 114 + display_name: "Medical equipment" +} +item { + name: "/m/01xq0k1" + id: 115 + display_name: "Cattle" +} +item { + name: "/m/01xqw" + id: 116 + display_name: "Cello" +} +item { + name: "/m/01xs3r" + id: 117 + display_name: "Jet ski" +} +item { + name: "/m/01x_v" + id: 118 + display_name: "Camel" +} +item { + name: "/m/01xygc" + id: 119 + display_name: "Coat" +} +item { + name: "/m/01xyhv" + id: 120 + display_name: "Suit" +} +item { + name: "/m/01y9k5" + id: 121 + display_name: "Desk" +} +item { + name: "/m/01yrx" + id: 122 + display_name: "Cat" +} +item { + name: "/m/01yx86" + id: 123 + display_name: "Bronze sculpture" +} +item { + name: "/m/01z1kdw" + id: 124 + display_name: "Juice" +} +item { + name: "/m/02068x" + id: 125 + display_name: "Gondola" +} +item { + name: "/m/020jm" + id: 126 + display_name: "Beetle" +} +item { + name: "/m/020kz" + id: 127 + display_name: "Cannon" +} +item { + name: "/m/020lf" + id: 128 + display_name: "Computer mouse" +} +item { + name: "/m/021mn" + id: 129 + display_name: "Cookie" +} +item { + name: "/m/021sj1" + id: 130 + display_name: "Office building" +} +item { + name: "/m/0220r2" + id: 131 + display_name: "Fountain" +} +item { + name: "/m/0242l" + id: 132 + display_name: "Coin" +} +item { + name: "/m/024d2" + id: 133 + display_name: "Calculator" +} +item { + name: "/m/024g6" + id: 134 + display_name: "Cocktail" +} +item { + name: "/m/02522" + id: 135 + display_name: "Computer monitor" +} +item { + name: "/m/025dyy" + id: 136 + display_name: "Box" +} +item { + name: "/m/025fsf" + id: 137 + display_name: "Stapler" +} +item { + name: "/m/025nd" + id: 138 + display_name: "Christmas tree" +} +item { + name: "/m/025rp__" + id: 139 + display_name: "Cowboy hat" +} +item { + name: "/m/0268lbt" + id: 140 + display_name: "Hiking equipment" +} +item { + name: "/m/026qbn5" + id: 141 + display_name: "Studio couch" +} +item { + name: "/m/026t6" + id: 142 + display_name: "Drum" +} +item { + name: "/m/0270h" + id: 143 + display_name: "Dessert" +} +item { + name: "/m/0271qf7" + id: 144 + display_name: "Wine rack" +} +item { + name: "/m/0271t" + id: 145 + display_name: "Drink" +} +item { + name: "/m/027pcv" + id: 146 + display_name: "Zucchini" +} +item { + name: "/m/027rl48" + id: 147 + display_name: "Ladle" +} +item { + name: "/m/0283dt1" + id: 148 + display_name: "Human mouth" +} +item { + name: "/m/0284d" + id: 149 + display_name: "Dairy" +} +item { + name: "/m/029b3" + id: 150 + display_name: "Dice" +} +item { + name: "/m/029bxz" + id: 151 + display_name: "Oven" +} +item { + name: "/m/029tx" + id: 152 + display_name: "Dinosaur" +} +item { + name: "/m/02bm9n" + id: 153 + display_name: "Ratchet" +} +item { + name: "/m/02crq1" + id: 154 + display_name: "Couch" +} +item { + name: "/m/02ctlc" + id: 155 + display_name: "Cricket ball" +} +item { + name: "/m/02cvgx" + id: 156 + display_name: "Winter melon" +} +item { + name: "/m/02d1br" + id: 157 + display_name: "Spatula" +} +item { + name: "/m/02d9qx" + id: 158 + display_name: "Whiteboard" +} +item { + name: "/m/02ddwp" + id: 159 + display_name: "Pencil sharpener" +} +item { + name: "/m/02dgv" + id: 160 + display_name: "Door" +} +item { + name: "/m/02dl1y" + id: 161 + display_name: "Hat" +} +item { + name: "/m/02f9f_" + id: 162 + display_name: "Shower" +} +item { + name: "/m/02fh7f" + id: 163 + display_name: "Eraser" +} +item { + name: "/m/02fq_6" + id: 164 + display_name: "Fedora" +} +item { + name: "/m/02g30s" + id: 165 + display_name: "Guacamole" +} +item { + name: "/m/02gzp" + id: 166 + display_name: "Dagger" +} +item { + name: "/m/02h19r" + id: 167 + display_name: "Scarf" +} +item { + name: "/m/02hj4" + id: 168 + display_name: "Dolphin" +} +item { + name: "/m/02jfl0" + id: 169 + display_name: "Sombrero" +} +item { + name: "/m/02jnhm" + id: 170 + display_name: "Tin can" +} +item { + name: "/m/02jvh9" + id: 171 + display_name: "Mug" +} +item { + name: "/m/02jz0l" + id: 172 + display_name: "Tap" +} +item { + name: "/m/02l8p9" + id: 173 + display_name: "Harbor seal" +} +item { + name: "/m/02lbcq" + id: 174 + display_name: "Stretcher" +} +item { + name: "/m/02mqfb" + id: 175 + display_name: "Can opener" +} +item { + name: "/m/02_n6y" + id: 176 + display_name: "Goggles" +} +item { + name: "/m/02p0tk3" + id: 177 + display_name: "Human body" +} +item { + name: "/m/02p3w7d" + id: 178 + display_name: "Roller skates" +} +item { + name: "/m/02p5f1q" + id: 179 + display_name: "Coffee cup" +} +item { + name: "/m/02pdsw" + id: 180 + display_name: "Cutting board" +} +item { + name: "/m/02pjr4" + id: 181 + display_name: "Blender" +} +item { + name: "/m/02pkr5" + id: 182 + display_name: "Plumbing fixture" +} +item { + name: "/m/02pv19" + id: 183 + display_name: "Stop sign" +} +item { + name: "/m/02rdsp" + id: 184 + display_name: "Office supplies" +} +item { + name: "/m/02rgn06" + id: 185 + display_name: "Volleyball" +} +item { + name: "/m/02s195" + id: 186 + display_name: "Vase" +} +item { + name: "/m/02tsc9" + id: 187 + display_name: "Slow cooker" +} +item { + name: "/m/02vkqh8" + id: 188 + display_name: "Wardrobe" +} +item { + name: "/m/02vqfm" + id: 189 + display_name: "Coffee" +} +item { + name: "/m/02vwcm" + id: 190 + display_name: "Whisk" +} +item { + name: "/m/02w3r3" + id: 191 + display_name: "Paper towel" +} +item { + name: "/m/02w3_ws" + id: 192 + display_name: "Personal care" +} +item { + name: "/m/02wbm" + id: 193 + display_name: "Food" +} +item { + name: "/m/02wbtzl" + id: 194 + display_name: "Sun hat" +} +item { + name: "/m/02wg_p" + id: 195 + display_name: "Tree house" +} +item { + name: "/m/02wmf" + id: 196 + display_name: "Flying disc" +} +item { + name: "/m/02wv6h6" + id: 197 + display_name: "Skirt" +} +item { + name: "/m/02wv84t" + id: 198 + display_name: "Gas stove" +} +item { + name: "/m/02x8cch" + id: 199 + display_name: "Salt and pepper shakers" +} +item { + name: "/m/02x984l" + id: 200 + display_name: "Mechanical fan" +} +item { + name: "/m/02xb7qb" + id: 201 + display_name: "Face powder" +} +item { + name: "/m/02xqq" + id: 202 + display_name: "Fax" +} +item { + name: "/m/02xwb" + id: 203 + display_name: "Fruit" +} +item { + name: "/m/02y6n" + id: 204 + display_name: "French fries" +} +item { + name: "/m/02z51p" + id: 205 + display_name: "Nightstand" +} +item { + name: "/m/02zn6n" + id: 206 + display_name: "Barrel" +} +item { + name: "/m/02zt3" + id: 207 + display_name: "Kite" +} +item { + name: "/m/02zvsm" + id: 208 + display_name: "Tart" +} +item { + name: "/m/030610" + id: 209 + display_name: "Treadmill" +} +item { + name: "/m/0306r" + id: 210 + display_name: "Fox" +} +item { + name: "/m/03120" + id: 211 + display_name: "Flag" +} +item { + name: "/m/0319l" + id: 212 + display_name: "Horn" +} +item { + name: "/m/031b6r" + id: 213 + display_name: "Window blind" +} +item { + name: "/m/031n1" + id: 214 + display_name: "Human foot" +} +item { + name: "/m/0323sq" + id: 215 + display_name: "Golf cart" +} +item { + name: "/m/032b3c" + id: 216 + display_name: "Jacket" +} +item { + name: "/m/033cnk" + id: 217 + display_name: "Egg" +} +item { + name: "/m/033rq4" + id: 218 + display_name: "Street light" +} +item { + name: "/m/0342h" + id: 219 + display_name: "Guitar" +} +item { + name: "/m/034c16" + id: 220 + display_name: "Pillow" +} +item { + name: "/m/035r7c" + id: 221 + display_name: "Human leg" +} +item { + name: "/m/035vxb" + id: 222 + display_name: "Isopod" +} +item { + name: "/m/0388q" + id: 223 + display_name: "Grape" +} +item { + name: "/m/039xj_" + id: 224 + display_name: "Human ear" +} +item { + name: "/m/03bbps" + id: 225 + display_name: "Power plugs and sockets" +} +item { + name: "/m/03bj1" + id: 226 + display_name: "Panda" +} +item { + name: "/m/03bk1" + id: 227 + display_name: "Giraffe" +} +item { + name: "/m/03bt1vf" + id: 228 + display_name: "Woman" +} +item { + name: "/m/03c7gz" + id: 229 + display_name: "Door handle" +} +item { + name: "/m/03d443" + id: 230 + display_name: "Rhinoceros" +} +item { + name: "/m/03dnzn" + id: 231 + display_name: "Bathtub" +} +item { + name: "/m/03fj2" + id: 232 + display_name: "Goldfish" +} +item { + name: "/m/03fp41" + id: 233 + display_name: "Houseplant" +} +item { + name: "/m/03fwl" + id: 234 + display_name: "Goat" +} +item { + name: "/m/03g8mr" + id: 235 + display_name: "Baseball bat" +} +item { + name: "/m/03grzl" + id: 236 + display_name: "Baseball glove" +} +item { + name: "/m/03hj559" + id: 237 + display_name: "Mixing bowl" +} +item { + name: "/m/03hl4l9" + id: 238 + display_name: "Marine invertebrates" +} +item { + name: "/m/03hlz0c" + id: 239 + display_name: "Kitchen utensil" +} +item { + name: "/m/03jbxj" + id: 240 + display_name: "Light switch" +} +item { + name: "/m/03jm5" + id: 241 + display_name: "House" +} +item { + name: "/m/03k3r" + id: 242 + display_name: "Horse" +} +item { + name: "/m/03kt2w" + id: 243 + display_name: "Stationary bicycle" +} +item { + name: "/m/03l9g" + id: 244 + display_name: "Hammer" +} +item { + name: "/m/03ldnb" + id: 245 + display_name: "Ceiling fan" +} +item { + name: "/m/03m3pdh" + id: 246 + display_name: "Sofa bed" +} +item { + name: "/m/03m3vtv" + id: 247 + display_name: "Adhesive tape" +} +item { + name: "/m/03m5k" + id: 248 + display_name: "Harp" +} +item { + name: "/m/03nfch" + id: 249 + display_name: "Sandal" +} +item { + name: "/m/03p3bw" + id: 250 + display_name: "Bicycle helmet" +} +item { + name: "/m/03q5c7" + id: 251 + display_name: "Saucer" +} +item { + name: "/m/03q5t" + id: 252 + display_name: "Harpsichord" +} +item { + name: "/m/03q69" + id: 253 + display_name: "Human hair" +} +item { + name: "/m/03qhv5" + id: 254 + display_name: "Heater" +} +item { + name: "/m/03qjg" + id: 255 + display_name: "Harmonica" +} +item { + name: "/m/03qrc" + id: 256 + display_name: "Hamster" +} +item { + name: "/m/03rszm" + id: 257 + display_name: "Curtain" +} +item { + name: "/m/03ssj5" + id: 258 + display_name: "Bed" +} +item { + name: "/m/03s_tn" + id: 259 + display_name: "Kettle" +} +item { + name: "/m/03tw93" + id: 260 + display_name: "Fireplace" +} +item { + name: "/m/03txqz" + id: 261 + display_name: "Scale" +} +item { + name: "/m/03v5tg" + id: 262 + display_name: "Drinking straw" +} +item { + name: "/m/03vt0" + id: 263 + display_name: "Insect" +} +item { + name: "/m/03wvsk" + id: 264 + display_name: "Hair dryer" +} +item { + name: "/m/03_wxk" + id: 265 + display_name: "Kitchenware" +} +item { + name: "/m/03wym" + id: 266 + display_name: "Indoor rower" +} +item { + name: "/m/03xxp" + id: 267 + display_name: "Invertebrate" +} +item { + name: "/m/03y6mg" + id: 268 + display_name: "Food processor" +} +item { + name: "/m/03__z0" + id: 269 + display_name: "Bookcase" +} +item { + name: "/m/040b_t" + id: 270 + display_name: "Refrigerator" +} +item { + name: "/m/04169hn" + id: 271 + display_name: "Wood-burning stove" +} +item { + name: "/m/0420v5" + id: 272 + display_name: "Punching bag" +} +item { + name: "/m/043nyj" + id: 273 + display_name: "Common fig" +} +item { + name: "/m/0440zs" + id: 274 + display_name: "Cocktail shaker" +} +item { + name: "/m/0449p" + id: 275 + display_name: "Jaguar" +} +item { + name: "/m/044r5d" + id: 276 + display_name: "Golf ball" +} +item { + name: "/m/0463sg" + id: 277 + display_name: "Fashion accessory" +} +item { + name: "/m/046dlr" + id: 278 + display_name: "Alarm clock" +} +item { + name: "/m/047j0r" + id: 279 + display_name: "Filing cabinet" +} +item { + name: "/m/047v4b" + id: 280 + display_name: "Artichoke" +} +item { + name: "/m/04bcr3" + id: 281 + display_name: "Table" +} +item { + name: "/m/04brg2" + id: 282 + display_name: "Tableware" +} +item { + name: "/m/04c0y" + id: 283 + display_name: "Kangaroo" +} +item { + name: "/m/04cp_" + id: 284 + display_name: "Koala" +} +item { + name: "/m/04ctx" + id: 285 + display_name: "Knife" +} +item { + name: "/m/04dr76w" + id: 286 + display_name: "Bottle" +} +item { + name: "/m/04f5ws" + id: 287 + display_name: "Bottle opener" +} +item { + name: "/m/04g2r" + id: 288 + display_name: "Lynx" +} +item { + name: "/m/04gth" + id: 289 + display_name: "Lavender" +} +item { + name: "/m/04h7h" + id: 290 + display_name: "Lighthouse" +} +item { + name: "/m/04h8sr" + id: 291 + display_name: "Dumbbell" +} +item { + name: "/m/04hgtk" + id: 292 + display_name: "Human head" +} +item { + name: "/m/04kkgm" + id: 293 + display_name: "Bowl" +} +item { + name: "/m/04lvq_" + id: 294 + display_name: "Humidifier" +} +item { + name: "/m/04m6gz" + id: 295 + display_name: "Porch" +} +item { + name: "/m/04m9y" + id: 296 + display_name: "Lizard" +} +item { + name: "/m/04p0qw" + id: 297 + display_name: "Billiard table" +} +item { + name: "/m/04rky" + id: 298 + display_name: "Mammal" +} +item { + name: "/m/04rmv" + id: 299 + display_name: "Mouse" +} +item { + name: "/m/04_sv" + id: 300 + display_name: "Motorcycle" +} +item { + name: "/m/04szw" + id: 301 + display_name: "Musical instrument" +} +item { + name: "/m/04tn4x" + id: 302 + display_name: "Swim cap" +} +item { + name: "/m/04v6l4" + id: 303 + display_name: "Frying pan" +} +item { + name: "/m/04vv5k" + id: 304 + display_name: "Snowplow" +} +item { + name: "/m/04y4h8h" + id: 305 + display_name: "Bathroom cabinet" +} +item { + name: "/m/04ylt" + id: 306 + display_name: "Missile" +} +item { + name: "/m/04yqq2" + id: 307 + display_name: "Bust" +} +item { + name: "/m/04yx4" + id: 308 + display_name: "Man" +} +item { + name: "/m/04z4wx" + id: 309 + display_name: "Waffle iron" +} +item { + name: "/m/04zpv" + id: 310 + display_name: "Milk" +} +item { + name: "/m/04zwwv" + id: 311 + display_name: "Ring binder" +} +item { + name: "/m/050gv4" + id: 312 + display_name: "Plate" +} +item { + name: "/m/050k8" + id: 313 + display_name: "Mobile phone" +} +item { + name: "/m/052lwg6" + id: 314 + display_name: "Baked goods" +} +item { + name: "/m/052sf" + id: 315 + display_name: "Mushroom" +} +item { + name: "/m/05441v" + id: 316 + display_name: "Crutch" +} +item { + name: "/m/054fyh" + id: 317 + display_name: "Pitcher" +} +item { + name: "/m/054_l" + id: 318 + display_name: "Mirror" +} +item { + name: "/m/054xkw" + id: 319 + display_name: "Lifejacket" +} +item { + name: "/m/05_5p_0" + id: 320 + display_name: "Table tennis racket" +} +item { + name: "/m/05676x" + id: 321 + display_name: "Pencil case" +} +item { + name: "/m/057cc" + id: 322 + display_name: "Musical keyboard" +} +item { + name: "/m/057p5t" + id: 323 + display_name: "Scoreboard" +} +item { + name: "/m/0584n8" + id: 324 + display_name: "Briefcase" +} +item { + name: "/m/058qzx" + id: 325 + display_name: "Kitchen knife" +} +item { + name: "/m/05bm6" + id: 326 + display_name: "Nail" +} +item { + name: "/m/05ctyq" + id: 327 + display_name: "Tennis ball" +} +item { + name: "/m/05gqfk" + id: 328 + display_name: "Plastic bag" +} +item { + name: "/m/05kms" + id: 329 + display_name: "Oboe" +} +item { + name: "/m/05kyg_" + id: 330 + display_name: "Chest of drawers" +} +item { + name: "/m/05n4y" + id: 331 + display_name: "Ostrich" +} +item { + name: "/m/05r5c" + id: 332 + display_name: "Piano" +} +item { + name: "/m/05r655" + id: 333 + display_name: "Girl" +} +item { + name: "/m/05s2s" + id: 334 + display_name: "Plant" +} +item { + name: "/m/05vtc" + id: 335 + display_name: "Potato" +} +item { + name: "/m/05w9t9" + id: 336 + display_name: "Hair spray" +} +item { + name: "/m/05y5lj" + id: 337 + display_name: "Sports equipment" +} +item { + name: "/m/05z55" + id: 338 + display_name: "Pasta" +} +item { + name: "/m/05z6w" + id: 339 + display_name: "Penguin" +} +item { + name: "/m/05zsy" + id: 340 + display_name: "Pumpkin" +} +item { + name: "/m/061_f" + id: 341 + display_name: "Pear" +} +item { + name: "/m/061hd_" + id: 342 + display_name: "Infant bed" +} +item { + name: "/m/0633h" + id: 343 + display_name: "Polar bear" +} +item { + name: "/m/063rgb" + id: 344 + display_name: "Mixer" +} +item { + name: "/m/0642b4" + id: 345 + display_name: "Cupboard" +} +item { + name: "/m/065h6l" + id: 346 + display_name: "Jacuzzi" +} +item { + name: "/m/0663v" + id: 347 + display_name: "Pizza" +} +item { + name: "/m/06_72j" + id: 348 + display_name: "Digital clock" +} +item { + name: "/m/068zj" + id: 349 + display_name: "Pig" +} +item { + name: "/m/06bt6" + id: 350 + display_name: "Reptile" +} +item { + name: "/m/06c54" + id: 351 + display_name: "Rifle" +} +item { + name: "/m/06c7f7" + id: 352 + display_name: "Lipstick" +} +item { + name: "/m/06_fw" + id: 353 + display_name: "Skateboard" +} +item { + name: "/m/06j2d" + id: 354 + display_name: "Raven" +} +item { + name: "/m/06k2mb" + id: 355 + display_name: "High heels" +} +item { + name: "/m/06l9r" + id: 356 + display_name: "Red panda" +} +item { + name: "/m/06m11" + id: 357 + display_name: "Rose" +} +item { + name: "/m/06mf6" + id: 358 + display_name: "Rabbit" +} +item { + name: "/m/06msq" + id: 359 + display_name: "Sculpture" +} +item { + name: "/m/06ncr" + id: 360 + display_name: "Saxophone" +} +item { + name: "/m/06nrc" + id: 361 + display_name: "Shotgun" +} +item { + name: "/m/06nwz" + id: 362 + display_name: "Seafood" +} +item { + name: "/m/06pcq" + id: 363 + display_name: "Submarine sandwich" +} +item { + name: "/m/06__v" + id: 364 + display_name: "Snowboard" +} +item { + name: "/m/06y5r" + id: 365 + display_name: "Sword" +} +item { + name: "/m/06z37_" + id: 366 + display_name: "Picture frame" +} +item { + name: "/m/07030" + id: 367 + display_name: "Sushi" +} +item { + name: "/m/0703r8" + id: 368 + display_name: "Loveseat" +} +item { + name: "/m/071p9" + id: 369 + display_name: "Ski" +} +item { + name: "/m/071qp" + id: 370 + display_name: "Squirrel" +} +item { + name: "/m/073bxn" + id: 371 + display_name: "Tripod" +} +item { + name: "/m/073g6" + id: 372 + display_name: "Stethoscope" +} +item { + name: "/m/074d1" + id: 373 + display_name: "Submarine" +} +item { + name: "/m/0755b" + id: 374 + display_name: "Scorpion" +} +item { + name: "/m/076bq" + id: 375 + display_name: "Segway" +} +item { + name: "/m/076lb9" + id: 376 + display_name: "Training bench" +} +item { + name: "/m/078jl" + id: 377 + display_name: "Snake" +} +item { + name: "/m/078n6m" + id: 378 + display_name: "Coffee table" +} +item { + name: "/m/079cl" + id: 379 + display_name: "Skyscraper" +} +item { + name: "/m/07bgp" + id: 380 + display_name: "Sheep" +} +item { + name: "/m/07c52" + id: 381 + display_name: "Television" +} +item { + name: "/m/07c6l" + id: 382 + display_name: "Trombone" +} +item { + name: "/m/07clx" + id: 383 + display_name: "Tea" +} +item { + name: "/m/07cmd" + id: 384 + display_name: "Tank" +} +item { + name: "/m/07crc" + id: 385 + display_name: "Taco" +} +item { + name: "/m/07cx4" + id: 386 + display_name: "Telephone" +} +item { + name: "/m/07dd4" + id: 387 + display_name: "Torch" +} +item { + name: "/m/07dm6" + id: 388 + display_name: "Tiger" +} +item { + name: "/m/07fbm7" + id: 389 + display_name: "Strawberry" +} +item { + name: "/m/07gql" + id: 390 + display_name: "Trumpet" +} +item { + name: "/m/07j7r" + id: 391 + display_name: "Tree" +} +item { + name: "/m/07j87" + id: 392 + display_name: "Tomato" +} +item { + name: "/m/07jdr" + id: 393 + display_name: "Train" +} +item { + name: "/m/07k1x" + id: 394 + display_name: "Tool" +} +item { + name: "/m/07kng9" + id: 395 + display_name: "Picnic basket" +} +item { + name: "/m/07mcwg" + id: 396 + display_name: "Cooking spray" +} +item { + name: "/m/07mhn" + id: 397 + display_name: "Trousers" +} +item { + name: "/m/07pj7bq" + id: 398 + display_name: "Bowling equipment" +} +item { + name: "/m/07qxg_" + id: 399 + display_name: "Football helmet" +} +item { + name: "/m/07r04" + id: 400 + display_name: "Truck" +} +item { + name: "/m/07v9_z" + id: 401 + display_name: "Measuring cup" +} +item { + name: "/m/07xyvk" + id: 402 + display_name: "Coffeemaker" +} +item { + name: "/m/07y_7" + id: 403 + display_name: "Violin" +} +item { + name: "/m/07yv9" + id: 404 + display_name: "Vehicle" +} +item { + name: "/m/080hkjn" + id: 405 + display_name: "Handbag" +} +item { + name: "/m/080n7g" + id: 406 + display_name: "Paper cutter" +} +item { + name: "/m/081qc" + id: 407 + display_name: "Wine" +} +item { + name: "/m/083kb" + id: 408 + display_name: "Weapon" +} +item { + name: "/m/083wq" + id: 409 + display_name: "Wheel" +} +item { + name: "/m/084hf" + id: 410 + display_name: "Worm" +} +item { + name: "/m/084rd" + id: 411 + display_name: "Wok" +} +item { + name: "/m/084zz" + id: 412 + display_name: "Whale" +} +item { + name: "/m/0898b" + id: 413 + display_name: "Zebra" +} +item { + name: "/m/08dz3q" + id: 414 + display_name: "Auto part" +} +item { + name: "/m/08hvt4" + id: 415 + display_name: "Jug" +} +item { + name: "/m/08ks85" + id: 416 + display_name: "Pizza cutter" +} +item { + name: "/m/08p92x" + id: 417 + display_name: "Cream" +} +item { + name: "/m/08pbxl" + id: 418 + display_name: "Monkey" +} +item { + name: "/m/096mb" + id: 419 + display_name: "Lion" +} +item { + name: "/m/09728" + id: 420 + display_name: "Bread" +} +item { + name: "/m/099ssp" + id: 421 + display_name: "Platter" +} +item { + name: "/m/09b5t" + id: 422 + display_name: "Chicken" +} +item { + name: "/m/09csl" + id: 423 + display_name: "Eagle" +} +item { + name: "/m/09ct_" + id: 424 + display_name: "Helicopter" +} +item { + name: "/m/09d5_" + id: 425 + display_name: "Owl" +} +item { + name: "/m/09ddx" + id: 426 + display_name: "Duck" +} +item { + name: "/m/09dzg" + id: 427 + display_name: "Turtle" +} +item { + name: "/m/09f20" + id: 428 + display_name: "Hippopotamus" +} +item { + name: "/m/09f_2" + id: 429 + display_name: "Crocodile" +} +item { + name: "/m/09g1w" + id: 430 + display_name: "Toilet" +} +item { + name: "/m/09gtd" + id: 431 + display_name: "Toilet paper" +} +item { + name: "/m/09gys" + id: 432 + display_name: "Squid" +} +item { + name: "/m/09j2d" + id: 433 + display_name: "Clothing" +} +item { + name: "/m/09j5n" + id: 434 + display_name: "Footwear" +} +item { + name: "/m/09k_b" + id: 435 + display_name: "Lemon" +} +item { + name: "/m/09kmb" + id: 436 + display_name: "Spider" +} +item { + name: "/m/09kx5" + id: 437 + display_name: "Deer" +} +item { + name: "/m/09ld4" + id: 438 + display_name: "Frog" +} +item { + name: "/m/09qck" + id: 439 + display_name: "Banana" +} +item { + name: "/m/09rvcxw" + id: 440 + display_name: "Rocket" +} +item { + name: "/m/09tvcd" + id: 441 + display_name: "Wine glass" +} +item { + name: "/m/0b3fp9" + id: 442 + display_name: "Countertop" +} +item { + name: "/m/0bh9flk" + id: 443 + display_name: "Tablet computer" +} +item { + name: "/m/0bjyj5" + id: 444 + display_name: "Waste container" +} +item { + name: "/m/0b_rs" + id: 445 + display_name: "Swimming pool" +} +item { + name: "/m/0bt9lr" + id: 446 + display_name: "Dog" +} +item { + name: "/m/0bt_c3" + id: 447 + display_name: "Book" +} +item { + name: "/m/0bwd_0j" + id: 448 + display_name: "Elephant" +} +item { + name: "/m/0by6g" + id: 449 + display_name: "Shark" +} +item { + name: "/m/0c06p" + id: 450 + display_name: "Candle" +} +item { + name: "/m/0c29q" + id: 451 + display_name: "Leopard" +} +item { + name: "/m/0c2jj" + id: 452 + display_name: "Axe" +} +item { + name: "/m/0c3m8g" + id: 453 + display_name: "Hand dryer" +} +item { + name: "/m/0c3mkw" + id: 454 + display_name: "Soap dispenser" +} +item { + name: "/m/0c568" + id: 455 + display_name: "Porcupine" +} +item { + name: "/m/0c9ph5" + id: 456 + display_name: "Flower" +} +item { + name: "/m/0ccs93" + id: 457 + display_name: "Canary" +} +item { + name: "/m/0cd4d" + id: 458 + display_name: "Cheetah" +} +item { + name: "/m/0cdl1" + id: 459 + display_name: "Palm tree" +} +item { + name: "/m/0cdn1" + id: 460 + display_name: "Hamburger" +} +item { + name: "/m/0cffdh" + id: 461 + display_name: "Maple" +} +item { + name: "/m/0cgh4" + id: 462 + display_name: "Building" +} +item { + name: "/m/0ch_cf" + id: 463 + display_name: "Fish" +} +item { + name: "/m/0cjq5" + id: 464 + display_name: "Lobster" +} +item { + name: "/m/0cjs7" + id: 465 + display_name: "Asparagus" +} +item { + name: "/m/0c_jw" + id: 466 + display_name: "Furniture" +} +item { + name: "/m/0cl4p" + id: 467 + display_name: "Hedgehog" +} +item { + name: "/m/0cmf2" + id: 468 + display_name: "Airplane" +} +item { + name: "/m/0cmx8" + id: 469 + display_name: "Spoon" +} +item { + name: "/m/0cn6p" + id: 470 + display_name: "Otter" +} +item { + name: "/m/0cnyhnx" + id: 471 + display_name: "Bull" +} +item { + name: "/m/0_cp5" + id: 472 + display_name: "Oyster" +} +item { + name: "/m/0cqn2" + id: 473 + display_name: "Horizontal bar" +} +item { + name: "/m/0crjs" + id: 474 + display_name: "Convenience store" +} +item { + name: "/m/0ct4f" + id: 475 + display_name: "Bomb" +} +item { + name: "/m/0cvnqh" + id: 476 + display_name: "Bench" +} +item { + name: "/m/0cxn2" + id: 477 + display_name: "Ice cream" +} +item { + name: "/m/0cydv" + id: 478 + display_name: "Caterpillar" +} +item { + name: "/m/0cyf8" + id: 479 + display_name: "Butterfly" +} +item { + name: "/m/0cyfs" + id: 480 + display_name: "Parachute" +} +item { + name: "/m/0cyhj_" + id: 481 + display_name: "Orange" +} +item { + name: "/m/0czz2" + id: 482 + display_name: "Antelope" +} +item { + name: "/m/0d20w4" + id: 483 + display_name: "Beaker" +} +item { + name: "/m/0d_2m" + id: 484 + display_name: "Moths and butterflies" +} +item { + name: "/m/0d4v4" + id: 485 + display_name: "Window" +} +item { + name: "/m/0d4w1" + id: 486 + display_name: "Closet" +} +item { + name: "/m/0d5gx" + id: 487 + display_name: "Castle" +} +item { + name: "/m/0d8zb" + id: 488 + display_name: "Jellyfish" +} +item { + name: "/m/0dbvp" + id: 489 + display_name: "Goose" +} +item { + name: "/m/0dbzx" + id: 490 + display_name: "Mule" +} +item { + name: "/m/0dftk" + id: 491 + display_name: "Swan" +} +item { + name: "/m/0dj6p" + id: 492 + display_name: "Peach" +} +item { + name: "/m/0djtd" + id: 493 + display_name: "Coconut" +} +item { + name: "/m/0dkzw" + id: 494 + display_name: "Seat belt" +} +item { + name: "/m/0dq75" + id: 495 + display_name: "Raccoon" +} +item { + name: "/m/0_dqb" + id: 496 + display_name: "Chisel" +} +item { + name: "/m/0dt3t" + id: 497 + display_name: "Fork" +} +item { + name: "/m/0dtln" + id: 498 + display_name: "Lamp" +} +item { + name: "/m/0dv5r" + id: 499 + display_name: "Camera" +} +item { + name: "/m/0dv77" + id: 500 + display_name: "Squash" +} +item { + name: "/m/0dv9c" + id: 501 + display_name: "Racket" +} +item { + name: "/m/0dzct" + id: 502 + display_name: "Human face" +} +item { + name: "/m/0dzf4" + id: 503 + display_name: "Human arm" +} +item { + name: "/m/0f4s2w" + id: 504 + display_name: "Vegetable" +} +item { + name: "/m/0f571" + id: 505 + display_name: "Diaper" +} +item { + name: "/m/0f6nr" + id: 506 + display_name: "Unicycle" +} +item { + name: "/m/0f6wt" + id: 507 + display_name: "Falcon" +} +item { + name: "/m/0f8s22" + id: 508 + display_name: "Chime" +} +item { + name: "/m/0f9_l" + id: 509 + display_name: "Snail" +} +item { + name: "/m/0fbdv" + id: 510 + display_name: "Shellfish" +} +item { + name: "/m/0fbw6" + id: 511 + display_name: "Cabbage" +} +item { + name: "/m/0fj52s" + id: 512 + display_name: "Carrot" +} +item { + name: "/m/0fldg" + id: 513 + display_name: "Mango" +} +item { + name: "/m/0fly7" + id: 514 + display_name: "Jeans" +} +item { + name: "/m/0fm3zh" + id: 515 + display_name: "Flowerpot" +} +item { + name: "/m/0fp6w" + id: 516 + display_name: "Pineapple" +} +item { + name: "/m/0fqfqc" + id: 517 + display_name: "Drawer" +} +item { + name: "/m/0fqt361" + id: 518 + display_name: "Stool" +} +item { + name: "/m/0frqm" + id: 519 + display_name: "Envelope" +} +item { + name: "/m/0fszt" + id: 520 + display_name: "Cake" +} +item { + name: "/m/0ft9s" + id: 521 + display_name: "Dragonfly" +} +item { + name: "/m/0ftb8" + id: 522 + display_name: "Sunflower" +} +item { + name: "/m/0fx9l" + id: 523 + display_name: "Microwave oven" +} +item { + name: "/m/0fz0h" + id: 524 + display_name: "Honeycomb" +} +item { + name: "/m/0gd2v" + id: 525 + display_name: "Marine mammal" +} +item { + name: "/m/0gd36" + id: 526 + display_name: "Sea lion" +} +item { + name: "/m/0gj37" + id: 527 + display_name: "Ladybug" +} +item { + name: "/m/0gjbg72" + id: 528 + display_name: "Shelf" +} +item { + name: "/m/0gjkl" + id: 529 + display_name: "Watch" +} +item { + name: "/m/0gm28" + id: 530 + display_name: "Candy" +} +item { + name: "/m/0grw1" + id: 531 + display_name: "Salad" +} +item { + name: "/m/0gv1x" + id: 532 + display_name: "Parrot" +} +item { + name: "/m/0gxl3" + id: 533 + display_name: "Handgun" +} +item { + name: "/m/0h23m" + id: 534 + display_name: "Sparrow" +} +item { + name: "/m/0h2r6" + id: 535 + display_name: "Van" +} +item { + name: "/m/0h8jyh6" + id: 536 + display_name: "Grinder" +} +item { + name: "/m/0h8kx63" + id: 537 + display_name: "Spice rack" +} +item { + name: "/m/0h8l4fh" + id: 538 + display_name: "Light bulb" +} +item { + name: "/m/0h8lkj8" + id: 539 + display_name: "Corded phone" +} +item { + name: "/m/0h8mhzd" + id: 540 + display_name: "Sports uniform" +} +item { + name: "/m/0h8my_4" + id: 541 + display_name: "Tennis racket" +} +item { + name: "/m/0h8mzrc" + id: 542 + display_name: "Wall clock" +} +item { + name: "/m/0h8n27j" + id: 543 + display_name: "Serving tray" +} +item { + name: "/m/0h8n5zk" + id: 544 + display_name: "Kitchen & dining room table" +} +item { + name: "/m/0h8n6f9" + id: 545 + display_name: "Dog bed" +} +item { + name: "/m/0h8n6ft" + id: 546 + display_name: "Cake stand" +} +item { + name: "/m/0h8nm9j" + id: 547 + display_name: "Cat furniture" +} +item { + name: "/m/0h8nr_l" + id: 548 + display_name: "Bathroom accessory" +} +item { + name: "/m/0h8nsvg" + id: 549 + display_name: "Facial tissue holder" +} +item { + name: "/m/0h8ntjv" + id: 550 + display_name: "Pressure cooker" +} +item { + name: "/m/0h99cwc" + id: 551 + display_name: "Kitchen appliance" +} +item { + name: "/m/0h9mv" + id: 552 + display_name: "Tire" +} +item { + name: "/m/0hdln" + id: 553 + display_name: "Ruler" +} +item { + name: "/m/0hf58v5" + id: 554 + display_name: "Luggage and bags" +} +item { + name: "/m/0hg7b" + id: 555 + display_name: "Microphone" +} +item { + name: "/m/0hkxq" + id: 556 + display_name: "Broccoli" +} +item { + name: "/m/0hnnb" + id: 557 + display_name: "Umbrella" +} +item { + name: "/m/0hnyx" + id: 558 + display_name: "Pastry" +} +item { + name: "/m/0hqkz" + id: 559 + display_name: "Grapefruit" +} +item { + name: "/m/0j496" + id: 560 + display_name: "Band-aid" +} +item { + name: "/m/0jbk" + id: 561 + display_name: "Animal" +} +item { + name: "/m/0jg57" + id: 562 + display_name: "Bell pepper" +} +item { + name: "/m/0jly1" + id: 563 + display_name: "Turkey" +} +item { + name: "/m/0jqgx" + id: 564 + display_name: "Lily" +} +item { + name: "/m/0jwn_" + id: 565 + display_name: "Pomegranate" +} +item { + name: "/m/0jy4k" + id: 566 + display_name: "Doughnut" +} +item { + name: "/m/0jyfg" + id: 567 + display_name: "Glasses" +} +item { + name: "/m/0k0pj" + id: 568 + display_name: "Human nose" +} +item { + name: "/m/0k1tl" + id: 569 + display_name: "Pen" +} +item { + name: "/m/0_k2" + id: 570 + display_name: "Ant" +} +item { + name: "/m/0k4j" + id: 571 + display_name: "Car" +} +item { + name: "/m/0k5j" + id: 572 + display_name: "Aircraft" +} +item { + name: "/m/0k65p" + id: 573 + display_name: "Human hand" +} +item { + name: "/m/0km7z" + id: 574 + display_name: "Skunk" +} +item { + name: "/m/0kmg4" + id: 575 + display_name: "Teddy bear" +} +item { + name: "/m/0kpqd" + id: 576 + display_name: "Watermelon" +} +item { + name: "/m/0kpt_" + id: 577 + display_name: "Cantaloupe" +} +item { + name: "/m/0ky7b" + id: 578 + display_name: "Dishwasher" +} +item { + name: "/m/0l14j_" + id: 579 + display_name: "Flute" +} +item { + name: "/m/0l3ms" + id: 580 + display_name: "Balance beam" +} +item { + name: "/m/0l515" + id: 581 + display_name: "Sandwich" +} +item { + name: "/m/0ll1f78" + id: 582 + display_name: "Shrimp" +} +item { + name: "/m/0llzx" + id: 583 + display_name: "Sewing machine" +} +item { + name: "/m/0lt4_" + id: 584 + display_name: "Binoculars" +} +item { + name: "/m/0m53l" + id: 585 + display_name: "Rays and skates" +} +item { + name: "/m/0mcx2" + id: 586 + display_name: "Ipod" +} +item { + name: "/m/0mkg" + id: 587 + display_name: "Accordion" +} +item { + name: "/m/0mw_6" + id: 588 + display_name: "Willow" +} +item { + name: "/m/0n28_" + id: 589 + display_name: "Crab" +} +item { + name: "/m/0nl46" + id: 590 + display_name: "Crown" +} +item { + name: "/m/0nybt" + id: 591 + display_name: "Seahorse" +} +item { + name: "/m/0p833" + id: 592 + display_name: "Perfume" +} +item { + name: "/m/0pcr" + id: 593 + display_name: "Alpaca" +} +item { + name: "/m/0pg52" + id: 594 + display_name: "Taxi" +} +item { + name: "/m/0ph39" + id: 595 + display_name: "Canoe" +} +item { + name: "/m/0qjjc" + id: 596 + display_name: "Remote control" +} +item { + name: "/m/0qmmr" + id: 597 + display_name: "Wheelchair" +} +item { + name: "/m/0wdt60w" + id: 598 + display_name: "Rugby ball" +} +item { + name: "/m/0xfy" + id: 599 + display_name: "Armadillo" +} +item { + name: "/m/0xzly" + id: 600 + display_name: "Maracas" +} +item { + name: "/m/0zvk5" + id: 601 + display_name: "Helmet" +} diff --git a/models/research/object_detection/data/pascal_label_map.pbtxt b/models/research/object_detection/data/pascal_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..c9e9e2affcd73ae5cb272a51b44306a74cf22eea --- /dev/null +++ b/models/research/object_detection/data/pascal_label_map.pbtxt @@ -0,0 +1,99 @@ +item { + id: 1 + name: 'aeroplane' +} + +item { + id: 2 + name: 'bicycle' +} + +item { + id: 3 + name: 'bird' +} + +item { + id: 4 + name: 'boat' +} + +item { + id: 5 + name: 'bottle' +} + +item { + id: 6 + name: 'bus' +} + +item { + id: 7 + name: 'car' +} + +item { + id: 8 + name: 'cat' +} + +item { + id: 9 + name: 'chair' +} + +item { + id: 10 + name: 'cow' +} + +item { + id: 11 + name: 'diningtable' +} + +item { + id: 12 + name: 'dog' +} + +item { + id: 13 + name: 'horse' +} + +item { + id: 14 + name: 'motorbike' +} + +item { + id: 15 + name: 'person' +} + +item { + id: 16 + name: 'pottedplant' +} + +item { + id: 17 + name: 'sheep' +} + +item { + id: 18 + name: 'sofa' +} + +item { + id: 19 + name: 'train' +} + +item { + id: 20 + name: 'tvmonitor' +} diff --git a/models/research/object_detection/data/pet_label_map.pbtxt b/models/research/object_detection/data/pet_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..54d7d3518941ceb0d2dc3465bdf702d4eaac3f07 --- /dev/null +++ b/models/research/object_detection/data/pet_label_map.pbtxt @@ -0,0 +1,184 @@ +item { + id: 1 + name: 'Abyssinian' +} + +item { + id: 2 + name: 'american_bulldog' +} + +item { + id: 3 + name: 'american_pit_bull_terrier' +} + +item { + id: 4 + name: 'basset_hound' +} + +item { + id: 5 + name: 'beagle' +} + +item { + id: 6 + name: 'Bengal' +} + +item { + id: 7 + name: 'Birman' +} + +item { + id: 8 + name: 'Bombay' +} + +item { + id: 9 + name: 'boxer' +} + +item { + id: 10 + name: 'British_Shorthair' +} + +item { + id: 11 + name: 'chihuahua' +} + +item { + id: 12 + name: 'Egyptian_Mau' +} + +item { + id: 13 + name: 'english_cocker_spaniel' +} + +item { + id: 14 + name: 'english_setter' +} + +item { + id: 15 + name: 'german_shorthaired' +} + +item { + id: 16 + name: 'great_pyrenees' +} + +item { + id: 17 + name: 'havanese' +} + +item { + id: 18 + name: 'japanese_chin' +} + +item { + id: 19 + name: 'keeshond' +} + +item { + id: 20 + name: 'leonberger' +} + +item { + id: 21 + name: 'Maine_Coon' +} + +item { + id: 22 + name: 'miniature_pinscher' +} + +item { + id: 23 + name: 'newfoundland' +} + +item { + id: 24 + name: 'Persian' +} + +item { + id: 25 + name: 'pomeranian' +} + +item { + id: 26 + name: 'pug' +} + +item { + id: 27 + name: 'Ragdoll' +} + +item { + id: 28 + name: 'Russian_Blue' +} + +item { + id: 29 + name: 'saint_bernard' +} + +item { + id: 30 + name: 'samoyed' +} + +item { + id: 31 + name: 'scottish_terrier' +} + +item { + id: 32 + name: 'shiba_inu' +} + +item { + id: 33 + name: 'Siamese' +} + +item { + id: 34 + name: 'Sphynx' +} + +item { + id: 35 + name: 'staffordshire_bull_terrier' +} + +item { + id: 36 + name: 'wheaten_terrier' +} + +item { + id: 37 + name: 'yorkshire_terrier' +} diff --git a/models/research/object_detection/data/snapshot_serengeti_label_map.pbtxt b/models/research/object_detection/data/snapshot_serengeti_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..57555d179f968e479557fbec940d7dce4252d764 --- /dev/null +++ b/models/research/object_detection/data/snapshot_serengeti_label_map.pbtxt @@ -0,0 +1,240 @@ +item { + id: 1 + name: 'human' +} + +item { + id: 2 + name: 'gazelleGrants' +} + +item { + id: 3 + name: 'reedbuck' +} + +item { + id: 4 + name: 'dikDik' +} + +item { + id: 5 + name: 'zebra' +} + +item { + id: 6 + name: 'porcupine' +} + +item { + id: 7 + name: 'gazelleThomsons' +} + +item { + id: 8 + name: 'hyenaSpotted' +} + +item { + id: 9 + name: 'warthog' +} + +item { + id: 10 + name: 'impala' +} + +item { + id: 11 + name: 'elephant' +} + +item { + id: 12 + name: 'giraffe' +} + +item { + id: 13 + name: 'mongoose' +} + +item { + id: 14 + name: 'buffalo' +} + +item { + id: 15 + name: 'hartebeest' +} + +item { + id: 16 + name: 'guineaFowl' +} + +item { + id: 17 + name: 'wildebeest' +} + +item { + id: 18 + name: 'leopard' +} + +item { + id: 19 + name: 'ostrich' +} + +item { + id: 20 + name: 'lionFemale' +} + +item { + id: 21 + name: 'koriBustard' +} + +item { + id: 22 + name: 'otherBird' +} + +item { + id: 23 + name: 'batEaredFox' +} + +item { + id: 24 + name: 'bushbuck' +} + +item { + id: 25 + name: 'jackal' +} + +item { + id: 26 + name: 'cheetah' +} + +item { + id: 27 + name: 'eland' +} + +item { + id: 28 + name: 'aardwolf' +} + +item { + id: 29 + name: 'hippopotamus' +} + +item { + id: 30 + name: 'hyenaStriped' +} + +item { + id: 31 + name: 'aardvark' +} + +item { + id: 32 + name: 'hare' +} + +item { + id: 33 + name: 'baboon' +} + +item { + id: 34 + name: 'vervetMonkey' +} + +item { + id: 35 + name: 'waterbuck' +} + +item { + id: 36 + name: 'secretaryBird' +} + +item { + id: 37 + name: 'serval' +} + +item { + id: 38 + name: 'lionMale' +} + +item { + id: 39 + name: 'topi' +} + +item { + id: 40 + name: 'honeyBadger' +} + +item { + id: 41 + name: 'rodents' +} + +item { + id: 42 + name: 'wildcat' +} + +item { + id: 43 + name: 'civet' +} + +item { + id: 44 + name: 'genet' +} + +item { + id: 45 + name: 'caracal' +} + +item { + id: 46 + name: 'rhinoceros' +} + +item { + id: 47 + name: 'reptiles' +} + +item { + id: 48 + name: 'zorilla' +} + diff --git a/models/research/object_detection/data_decoders/__init__.py b/models/research/object_detection/data_decoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/data_decoders/tf_example_decoder.py b/models/research/object_detection/data_decoders/tf_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1fa2c771ec61a0ebc438392a966c08aff6faad --- /dev/null +++ b/models/research/object_detection/data_decoders/tf_example_decoder.py @@ -0,0 +1,764 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensorflow Example proto decoder for object detection. + +A decoder to decode string tensors containing serialized tensorflow.Example +protos for object detection. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import enum +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf +from tf_slim import tfexample_decoder as slim_example_decoder +from object_detection.core import data_decoder +from object_detection.core import standard_fields as fields +from object_detection.protos import input_reader_pb2 +from object_detection.utils import label_map_util + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import lookup as contrib_lookup + +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +_LABEL_OFFSET = 1 + + +class Visibility(enum.Enum): + """Visibility definitions. + + This follows the MS Coco convention (http://cocodataset.org/#format-data). + """ + # Keypoint is not labeled. + UNLABELED = 0 + # Keypoint is labeled but falls outside the object segment (e.g. occluded). + NOT_VISIBLE = 1 + # Keypoint is labeled and visible. + VISIBLE = 2 + + +class _ClassTensorHandler(slim_example_decoder.Tensor): + """An ItemHandler to fetch class ids from class text.""" + + def __init__(self, + tensor_key, + label_map_proto_file, + shape_keys=None, + shape=None, + default_value=''): + """Initializes the LookupTensor handler. + + Simply calls a vocabulary (most often, a label mapping) lookup. + + Args: + tensor_key: the name of the `TFExample` feature to read the tensor from. + label_map_proto_file: File path to a text format LabelMapProto message + mapping class text to id. + shape_keys: Optional name or list of names of the TF-Example feature in + which the tensor shape is stored. If a list, then each corresponds to + one dimension of the shape. + shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is + reshaped accordingly. + default_value: The value used when the `tensor_key` is not found in a + particular `TFExample`. + + Raises: + ValueError: if both `shape_keys` and `shape` are specified. + """ + name_to_id = label_map_util.get_label_map_dict( + label_map_proto_file, use_display_name=False) + # We use a default_value of -1, but we expect all labels to be contained + # in the label map. + try: + # Dynamically try to load the tf v2 lookup, falling back to contrib + lookup = tf.compat.v2.lookup + hash_table_class = tf.compat.v2.lookup.StaticHashTable + except AttributeError: + lookup = contrib_lookup + hash_table_class = contrib_lookup.HashTable + name_to_id_table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=tf.constant(list(name_to_id.keys())), + values=tf.constant(list(name_to_id.values()), dtype=tf.int64)), + default_value=-1) + display_name_to_id = label_map_util.get_label_map_dict( + label_map_proto_file, use_display_name=True) + # We use a default_value of -1, but we expect all labels to be contained + # in the label map. + display_name_to_id_table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=tf.constant(list(display_name_to_id.keys())), + values=tf.constant( + list(display_name_to_id.values()), dtype=tf.int64)), + default_value=-1) + + self._name_to_id_table = name_to_id_table + self._display_name_to_id_table = display_name_to_id_table + super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape, + default_value) + + def tensors_to_item(self, keys_to_tensors): + unmapped_tensor = super(_ClassTensorHandler, + self).tensors_to_item(keys_to_tensors) + return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor), + self._display_name_to_id_table.lookup(unmapped_tensor)) + + +class _BackupHandler(slim_example_decoder.ItemHandler): + """An ItemHandler that tries two ItemHandlers in order.""" + + def __init__(self, handler, backup): + """Initializes the BackupHandler handler. + + If the first Handler's tensors_to_item returns a Tensor with no elements, + the second Handler is used. + + Args: + handler: The primary ItemHandler. + backup: The backup ItemHandler. + + Raises: + ValueError: if either is not an ItemHandler. + """ + if not isinstance(handler, slim_example_decoder.ItemHandler): + raise ValueError('Primary handler is of type %s instead of ItemHandler' % + type(handler)) + if not isinstance(backup, slim_example_decoder.ItemHandler): + raise ValueError( + 'Backup handler is of type %s instead of ItemHandler' % type(backup)) + self._handler = handler + self._backup = backup + super(_BackupHandler, self).__init__(handler.keys + backup.keys) + + def tensors_to_item(self, keys_to_tensors): + item = self._handler.tensors_to_item(keys_to_tensors) + return tf.cond( + pred=tf.equal(tf.reduce_prod(tf.shape(item)), 0), + true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors), + false_fn=lambda: item) + + +class TfExampleDecoder(data_decoder.DataDecoder): + """Tensorflow Example proto decoder.""" + + def __init__(self, + load_instance_masks=False, + instance_mask_type=input_reader_pb2.NUMERICAL_MASKS, + label_map_proto_file=None, + use_display_name=False, + dct_method='', + num_keypoints=0, + num_additional_channels=0, + load_multiclass_scores=False, + load_context_features=False, + expand_hierarchy_labels=False): + """Constructor sets keys_to_features and items_to_handlers. + + Args: + load_instance_masks: whether or not to load and handle instance masks. + instance_mask_type: type of instance masks. Options are provided in + input_reader.proto. This is only used if `load_instance_masks` is True. + label_map_proto_file: a file path to a + object_detection.protos.StringIntLabelMap proto. If provided, then the + mapped IDs of 'image/object/class/text' will take precedence over the + existing 'image/object/class/label' ID. Also, if provided, it is + assumed that 'image/object/class/text' will be in the data. + use_display_name: whether or not to use the `display_name` for label + mapping (instead of `name`). Only used if label_map_proto_file is + provided. + dct_method: An optional string. Defaults to None. It only takes + effect when image format is jpeg, used to specify a hint about the + algorithm used for jpeg decompression. Currently valid values + are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for + example, the jpeg library does not have that specific option. + num_keypoints: the number of keypoints per object. + num_additional_channels: how many additional channels to use. + load_multiclass_scores: Whether to load multiclass scores associated with + boxes. + load_context_features: Whether to load information from context_features, + to provide additional context to a detection model for training and/or + inference. + expand_hierarchy_labels: Expands the object and image labels taking into + account the provided hierarchy in the label_map_proto_file. For positive + classes, the labels are extended to ancestor. For negative classes, + the labels are expanded to descendants. + + Raises: + ValueError: If `instance_mask_type` option is not one of + input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or + input_reader_pb2.PNG_MASKS. + ValueError: If `expand_labels_hierarchy` is True, but the + `label_map_proto_file` is not provided. + """ + # TODO(rathodv): delete unused `use_display_name` argument once we change + # other decoders to handle label maps similarly. + del use_display_name + self.keys_to_features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/filename': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/key/sha256': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/source_id': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/height': + tf.FixedLenFeature((), tf.int64, default_value=1), + 'image/width': + tf.FixedLenFeature((), tf.int64, default_value=1), + # Image-level labels. + 'image/class/text': + tf.VarLenFeature(tf.string), + 'image/class/label': + tf.VarLenFeature(tf.int64), + 'image/class/confidence': + tf.VarLenFeature(tf.float32), + # Object boxes and classes. + 'image/object/bbox/xmin': + tf.VarLenFeature(tf.float32), + 'image/object/bbox/xmax': + tf.VarLenFeature(tf.float32), + 'image/object/bbox/ymin': + tf.VarLenFeature(tf.float32), + 'image/object/bbox/ymax': + tf.VarLenFeature(tf.float32), + 'image/object/class/label': + tf.VarLenFeature(tf.int64), + 'image/object/class/text': + tf.VarLenFeature(tf.string), + 'image/object/area': + tf.VarLenFeature(tf.float32), + 'image/object/is_crowd': + tf.VarLenFeature(tf.int64), + 'image/object/difficult': + tf.VarLenFeature(tf.int64), + 'image/object/group_of': + tf.VarLenFeature(tf.int64), + 'image/object/weight': + tf.VarLenFeature(tf.float32), + + } + # We are checking `dct_method` instead of passing it directly in order to + # ensure TF version 1.6 compatibility. + if dct_method: + image = slim_example_decoder.Image( + image_key='image/encoded', + format_key='image/format', + channels=3, + dct_method=dct_method) + additional_channel_image = slim_example_decoder.Image( + image_key='image/additional_channels/encoded', + format_key='image/format', + channels=1, + repeated=True, + dct_method=dct_method) + else: + image = slim_example_decoder.Image( + image_key='image/encoded', format_key='image/format', channels=3) + additional_channel_image = slim_example_decoder.Image( + image_key='image/additional_channels/encoded', + format_key='image/format', + channels=1, + repeated=True) + self.items_to_handlers = { + fields.InputDataFields.image: + image, + fields.InputDataFields.source_id: ( + slim_example_decoder.Tensor('image/source_id')), + fields.InputDataFields.key: ( + slim_example_decoder.Tensor('image/key/sha256')), + fields.InputDataFields.filename: ( + slim_example_decoder.Tensor('image/filename')), + # Image-level labels. + fields.InputDataFields.groundtruth_image_confidences: ( + slim_example_decoder.Tensor('image/class/confidence')), + # Object boxes and classes. + fields.InputDataFields.groundtruth_boxes: ( + slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], + 'image/object/bbox/')), + fields.InputDataFields.groundtruth_area: + slim_example_decoder.Tensor('image/object/area'), + fields.InputDataFields.groundtruth_is_crowd: ( + slim_example_decoder.Tensor('image/object/is_crowd')), + fields.InputDataFields.groundtruth_difficult: ( + slim_example_decoder.Tensor('image/object/difficult')), + fields.InputDataFields.groundtruth_group_of: ( + slim_example_decoder.Tensor('image/object/group_of')), + fields.InputDataFields.groundtruth_weights: ( + slim_example_decoder.Tensor('image/object/weight')), + + } + if load_multiclass_scores: + self.keys_to_features[ + 'image/object/class/multiclass_scores'] = tf.VarLenFeature(tf.float32) + self.items_to_handlers[fields.InputDataFields.multiclass_scores] = ( + slim_example_decoder.Tensor('image/object/class/multiclass_scores')) + + if load_context_features: + self.keys_to_features[ + 'image/context_features'] = tf.VarLenFeature(tf.float32) + self.items_to_handlers[fields.InputDataFields.context_features] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/context_features', 'image/context_feature_length'], + self._reshape_context_features)) + + self.keys_to_features[ + 'image/context_feature_length'] = tf.FixedLenFeature((), tf.int64) + self.items_to_handlers[fields.InputDataFields.context_feature_length] = ( + slim_example_decoder.Tensor('image/context_feature_length')) + + if num_additional_channels > 0: + self.keys_to_features[ + 'image/additional_channels/encoded'] = tf.FixedLenFeature( + (num_additional_channels,), tf.string) + self.items_to_handlers[ + fields.InputDataFields. + image_additional_channels] = additional_channel_image + self._num_keypoints = num_keypoints + if num_keypoints > 0: + self.keys_to_features['image/object/keypoint/x'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/keypoint/y'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/keypoint/visibility'] = ( + tf.VarLenFeature(tf.int64)) + self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/keypoint/y', 'image/object/keypoint/x'], + self._reshape_keypoints)) + kpt_vis_field = fields.InputDataFields.groundtruth_keypoint_visibilities + self.items_to_handlers[kpt_vis_field] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/keypoint/x', 'image/object/keypoint/visibility'], + self._reshape_keypoint_visibilities)) + if load_instance_masks: + if instance_mask_type in (input_reader_pb2.DEFAULT, + input_reader_pb2.NUMERICAL_MASKS): + self.keys_to_features['image/object/mask'] = ( + tf.VarLenFeature(tf.float32)) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_instance_masks] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/mask', 'image/height', 'image/width'], + self._reshape_instance_masks)) + elif instance_mask_type == input_reader_pb2.PNG_MASKS: + self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_instance_masks] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/mask', 'image/height', 'image/width'], + self._decode_png_instance_masks)) + else: + raise ValueError('Did not recognize the `instance_mask_type` option.') + if label_map_proto_file: + # If the label_map_proto is provided, try to use it in conjunction with + # the class text, and fall back to a materialized ID. + label_handler = _BackupHandler( + _ClassTensorHandler( + 'image/object/class/text', label_map_proto_file, + default_value=''), + slim_example_decoder.Tensor('image/object/class/label')) + image_label_handler = _BackupHandler( + _ClassTensorHandler( + fields.TfExampleFields.image_class_text, + label_map_proto_file, + default_value=''), + slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label)) + else: + label_handler = slim_example_decoder.Tensor('image/object/class/label') + image_label_handler = slim_example_decoder.Tensor( + fields.TfExampleFields.image_class_label) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_classes] = label_handler + self.items_to_handlers[ + fields.InputDataFields.groundtruth_image_classes] = image_label_handler + + self._expand_hierarchy_labels = expand_hierarchy_labels + self._ancestors_lut = None + self._descendants_lut = None + if expand_hierarchy_labels: + if label_map_proto_file: + ancestors_lut, descendants_lut = ( + label_map_util.get_label_map_hierarchy_lut(label_map_proto_file, + True)) + self._ancestors_lut = tf.constant(ancestors_lut, dtype=tf.int64) + self._descendants_lut = tf.constant(descendants_lut, dtype=tf.int64) + else: + raise ValueError('In order to expand labels, the label_map_proto_file ' + 'has to be provided.') + + def decode(self, tf_example_string_tensor): + """Decodes serialized tensorflow example and returns a tensor dictionary. + + Args: + tf_example_string_tensor: a string tensor holding a serialized tensorflow + example proto. + + Returns: + A dictionary of the following tensors. + fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3] + containing image. + fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of + shape [2] containing shape of the image. + fields.InputDataFields.source_id - string tensor containing original + image id. + fields.InputDataFields.key - string tensor with unique sha256 hash key. + fields.InputDataFields.filename - string tensor with original dataset + filename. + fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape + [None, 4] containing box corners. + fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape + [None] containing classes for the boxes. + fields.InputDataFields.groundtruth_weights - 1D float32 tensor of + shape [None] indicating the weights of groundtruth boxes. + fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape + [None] containing containing object mask area in pixel squared. + fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape + [None] indicating if the boxes enclose a crowd. + + Optional: + fields.InputDataFields.groundtruth_image_confidences - 1D float tensor of + shape [None] indicating if a class is present in the image (1.0) or + a class is not present in the image (0.0). + fields.InputDataFields.image_additional_channels - 3D uint8 tensor of + shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim + is width; 3rd dim is the number of additional channels. + fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape + [None] indicating if the boxes represent `difficult` instances. + fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape + [None] indicating if the boxes represent `group_of` instances. + fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of + shape [None, num_keypoints, 2] containing keypoints, where the + coordinates of the keypoints are ordered (y, x). + fields.InputDataFields.groundtruth_keypoint_visibilities - 2D bool + tensor of shape [None, num_keypoints] containing keypoint visibilites. + fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of + shape [None, None, None] containing instance masks. + fields.InputDataFields.groundtruth_image_classes - 1D int64 of shape + [None] containing classes for the boxes. + fields.InputDataFields.multiclass_scores - 1D float32 tensor of shape + [None * num_classes] containing flattened multiclass scores for + groundtruth boxes. + fields.InputDataFields.context_features - 1D float32 tensor of shape + [context_feature_length * num_context_features] + fields.InputDataFields.context_feature_length - int32 tensor specifying + the length of each feature in context_features + """ + serialized_example = tf.reshape(tf_example_string_tensor, shape=[]) + decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, + self.items_to_handlers) + keys = decoder.list_items() + tensors = decoder.decode(serialized_example, items=keys) + tensor_dict = dict(zip(keys, tensors)) + is_crowd = fields.InputDataFields.groundtruth_is_crowd + tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool) + tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3]) + tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape( + tensor_dict[fields.InputDataFields.image])[:2] + + if fields.InputDataFields.image_additional_channels in tensor_dict: + channels = tensor_dict[fields.InputDataFields.image_additional_channels] + channels = tf.squeeze(channels, axis=3) + channels = tf.transpose(channels, perm=[1, 2, 0]) + tensor_dict[fields.InputDataFields.image_additional_channels] = channels + + def default_groundtruth_weights(): + return tf.ones( + [tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]], + dtype=tf.float32) + + tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond( + tf.greater( + tf.shape( + tensor_dict[fields.InputDataFields.groundtruth_weights])[0], + 0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights], + default_groundtruth_weights) + + if fields.InputDataFields.groundtruth_keypoints in tensor_dict: + # Set all keypoints that are not labeled to NaN. + gt_kpt_fld = fields.InputDataFields.groundtruth_keypoints + gt_kpt_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities + visibilities_tiled = tf.tile( + tf.expand_dims(tensor_dict[gt_kpt_vis_fld], -1), + [1, 1, 2]) + tensor_dict[gt_kpt_fld] = tf.where( + visibilities_tiled, + tensor_dict[gt_kpt_fld], + np.nan * tf.ones_like(tensor_dict[gt_kpt_fld])) + + if self._expand_hierarchy_labels: + input_fields = fields.InputDataFields + image_classes, image_confidences = self._expand_image_label_hierarchy( + tensor_dict[input_fields.groundtruth_image_classes], + tensor_dict[input_fields.groundtruth_image_confidences]) + tensor_dict[input_fields.groundtruth_image_classes] = image_classes + tensor_dict[input_fields.groundtruth_image_confidences] = ( + image_confidences) + + box_fields = [ + fields.InputDataFields.groundtruth_group_of, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_weights, + ] + + def expand_field(field_name): + return self._expansion_box_field_labels( + tensor_dict[input_fields.groundtruth_classes], + tensor_dict[field_name]) + + # pylint: disable=cell-var-from-loop + for field in box_fields: + if field in tensor_dict: + tensor_dict[field] = tf.cond( + tf.size(tensor_dict[field]) > 0, lambda: expand_field(field), + lambda: tensor_dict[field]) + # pylint: enable=cell-var-from-loop + + tensor_dict[input_fields.groundtruth_classes] = ( + self._expansion_box_field_labels( + tensor_dict[input_fields.groundtruth_classes], + tensor_dict[input_fields.groundtruth_classes], True)) + + if fields.InputDataFields.groundtruth_group_of in tensor_dict: + group_of = fields.InputDataFields.groundtruth_group_of + tensor_dict[group_of] = tf.cast(tensor_dict[group_of], dtype=tf.bool) + + return tensor_dict + + def _reshape_keypoints(self, keys_to_tensors): + """Reshape keypoints. + + The keypoints are reshaped to [num_instances, num_keypoints, 2]. + + Args: + keys_to_tensors: a dictionary from keys to tensors. Expected keys are: + 'image/object/keypoint/x' + 'image/object/keypoint/y' + + Returns: + A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values + in [0, 1]. + """ + y = keys_to_tensors['image/object/keypoint/y'] + if isinstance(y, tf.SparseTensor): + y = tf.sparse_tensor_to_dense(y) + y = tf.expand_dims(y, 1) + x = keys_to_tensors['image/object/keypoint/x'] + if isinstance(x, tf.SparseTensor): + x = tf.sparse_tensor_to_dense(x) + x = tf.expand_dims(x, 1) + keypoints = tf.concat([y, x], 1) + keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2]) + return keypoints + + def _reshape_keypoint_visibilities(self, keys_to_tensors): + """Reshape keypoint visibilities. + + The keypoint visibilities are reshaped to [num_instances, + num_keypoints]. + + The raw keypoint visibilities are expected to conform to the + MSCoco definition. See Visibility enum. + + The returned boolean is True for the labeled case (either + Visibility.NOT_VISIBLE or Visibility.VISIBLE). These are the same categories + that COCO uses to evaluate keypoint detection performance: + http://cocodataset.org/#keypoints-eval + + If image/object/keypoint/visibility is not provided, visibilities will be + set to True for finite keypoint coordinate values, and 0 if the coordinates + are NaN. + + Args: + keys_to_tensors: a dictionary from keys to tensors. Expected keys are: + 'image/object/keypoint/x' + 'image/object/keypoint/visibility' + + Returns: + A 2-D bool tensor of shape [num_instances, num_keypoints] with values + in {0, 1}. 1 if the keypoint is labeled, 0 otherwise. + """ + x = keys_to_tensors['image/object/keypoint/x'] + vis = keys_to_tensors['image/object/keypoint/visibility'] + if isinstance(vis, tf.SparseTensor): + vis = tf.sparse_tensor_to_dense(vis) + if isinstance(x, tf.SparseTensor): + x = tf.sparse_tensor_to_dense(x) + + default_vis = tf.where( + tf.math.is_nan(x), + Visibility.UNLABELED.value * tf.ones_like(x, dtype=tf.int64), + Visibility.VISIBLE.value * tf.ones_like(x, dtype=tf.int64)) + # Use visibility if provided, otherwise use the default visibility. + vis = tf.cond(tf.equal(tf.size(x), tf.size(vis)), + true_fn=lambda: vis, + false_fn=lambda: default_vis) + vis = tf.math.logical_or( + tf.math.equal(vis, Visibility.NOT_VISIBLE.value), + tf.math.equal(vis, Visibility.VISIBLE.value)) + vis = tf.reshape(vis, [-1, self._num_keypoints]) + return vis + + def _reshape_instance_masks(self, keys_to_tensors): + """Reshape instance segmentation masks. + + The instance segmentation masks are reshaped to [num_instances, height, + width]. + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 3-D float tensor of shape [num_instances, height, width] with values + in {0, 1}. + """ + height = keys_to_tensors['image/height'] + width = keys_to_tensors['image/width'] + to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32) + masks = keys_to_tensors['image/object/mask'] + if isinstance(masks, tf.SparseTensor): + masks = tf.sparse_tensor_to_dense(masks) + masks = tf.reshape( + tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape) + return tf.cast(masks, tf.float32) + + def _reshape_context_features(self, keys_to_tensors): + """Reshape context features. + + The instance context_features are reshaped to + [num_context_features, context_feature_length] + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 2-D float tensor of shape [num_context_features, context_feature_length] + """ + context_feature_length = keys_to_tensors['image/context_feature_length'] + to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32) + context_features = keys_to_tensors['image/context_features'] + if isinstance(context_features, tf.SparseTensor): + context_features = tf.sparse_tensor_to_dense(context_features) + context_features = tf.reshape(context_features, to_shape) + return context_features + + def _decode_png_instance_masks(self, keys_to_tensors): + """Decode PNG instance segmentation masks and stack into dense tensor. + + The instance segmentation masks are reshaped to [num_instances, height, + width]. + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 3-D float tensor of shape [num_instances, height, width] with values + in {0, 1}. + """ + + def decode_png_mask(image_buffer): + image = tf.squeeze( + tf.image.decode_image(image_buffer, channels=1), axis=2) + image.set_shape([None, None]) + image = tf.cast(tf.greater(image, 0), dtype=tf.float32) + return image + + png_masks = keys_to_tensors['image/object/mask'] + height = keys_to_tensors['image/height'] + width = keys_to_tensors['image/width'] + if isinstance(png_masks, tf.SparseTensor): + png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='') + return tf.cond( + tf.greater(tf.size(png_masks), 0), + lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32), + lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32))) + + def _expand_image_label_hierarchy(self, image_classes, image_confidences): + """Expand image level labels according to the hierarchy. + + Args: + image_classes: Int64 tensor with the image level class ids for a sample. + image_confidences: Float tensor signaling whether a class id is present in + the image (1.0) or not present (0.0). + + Returns: + new_image_classes: Int64 tensor equal to expanding image_classes. + new_image_confidences: Float tensor equal to expanding image_confidences. + """ + + def expand_labels(relation_tensor, confidence_value): + """Expand to ancestors or descendants depending on arguments.""" + mask = tf.equal(image_confidences, confidence_value) + target_image_classes = tf.boolean_mask(image_classes, mask) + expanded_indices = tf.reduce_any((tf.gather( + relation_tensor, target_image_classes - _LABEL_OFFSET, axis=0) > 0), + axis=0) + expanded_indices = tf.where(expanded_indices)[:, 0] + _LABEL_OFFSET + new_groundtruth_image_classes = ( + tf.concat([ + tf.boolean_mask(image_classes, tf.logical_not(mask)), + expanded_indices, + ], + axis=0)) + new_groundtruth_image_confidences = ( + tf.concat([ + tf.boolean_mask(image_confidences, tf.logical_not(mask)), + tf.ones([tf.shape(expanded_indices)[0]], + dtype=image_confidences.dtype) * confidence_value, + ], + axis=0)) + return new_groundtruth_image_classes, new_groundtruth_image_confidences + + image_classes, image_confidences = expand_labels(self._ancestors_lut, 1.0) + new_image_classes, new_image_confidences = expand_labels( + self._descendants_lut, 0.0) + return new_image_classes, new_image_confidences + + def _expansion_box_field_labels(self, + object_classes, + object_field, + copy_class_id=False): + """Expand the labels of a specific object field according to the hierarchy. + + Args: + object_classes: Int64 tensor with the class id for each element in + object_field. + object_field: Tensor to be expanded. + copy_class_id: Boolean to choose whether to use class id values in the + output tensor instead of replicating the original values. + + Returns: + A tensor with the result of expanding object_field. + """ + expanded_indices = tf.gather( + self._ancestors_lut, object_classes - _LABEL_OFFSET, axis=0) + if copy_class_id: + new_object_field = tf.where(expanded_indices > 0)[:, 1] + _LABEL_OFFSET + else: + new_object_field = tf.repeat( + object_field, tf.reduce_sum(expanded_indices, axis=1), axis=0) + return new_object_field diff --git a/models/research/object_detection/data_decoders/tf_example_decoder_test.py b/models/research/object_detection/data_decoders/tf_example_decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9cbed32fc05f3d6b2c9e3233633627412482e0f5 --- /dev/null +++ b/models/research/object_detection/data_decoders/tf_example_decoder_test.py @@ -0,0 +1,1350 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.data_decoders.tf_example_decoder.""" + +import os +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import test_case + + +class TfExampleDecoderTest(test_case.TestCase): + + def _create_encoded_and_decoded_data(self, data, encoding_type): + if encoding_type == 'jpeg': + encode_fn = tf.image.encode_jpeg + decode_fn = tf.image.decode_jpeg + elif encoding_type == 'png': + encode_fn = tf.image.encode_png + decode_fn = tf.image.decode_png + else: + raise ValueError('Invalid encoding type.') + + def prepare_data_fn(): + encoded_data = encode_fn(data) + decoded_data = decode_fn(encoded_data) + return encoded_data, decoded_data + + return self.execute_cpu(prepare_data_fn, []) + + def testDecodeAdditionalChannels(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg') + + additional_channel = np.random.randint(256, size=(4, 5, 1)).astype(np.uint8) + (encoded_additional_channel, + decoded_additional_channel) = self._create_encoded_and_decoded_data( + additional_channel, 'jpeg') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/additional_channels/encoded': + dataset_util.bytes_list_feature( + [encoded_additional_channel] * 2), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + num_additional_channels=2) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + np.concatenate([decoded_additional_channel] * 2, axis=2), + tensor_dict[fields.InputDataFields.image_additional_channels]) + + def testDecodeJpegImage(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, decoded_jpeg = self._create_encoded_and_decoded_data( + image, 'jpeg') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual( + (output[fields.InputDataFields.image].get_shape().as_list()), + [None, None, 3]) + self.assertAllEqual( + (output[fields.InputDataFields.original_image_spatial_shape] + .get_shape().as_list()), [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image]) + self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields. + original_image_spatial_shape]) + self.assertEqual( + six.b('image_id'), tensor_dict[fields.InputDataFields.source_id]) + + def testDecodeImageKeyAndFilename(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/key/sha256': + dataset_util.bytes_feature(six.b('abc')), + 'image/filename': + dataset_util.bytes_feature(six.b('filename')) + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertEqual(six.b('abc'), tensor_dict[fields.InputDataFields.key]) + self.assertEqual( + six.b('filename'), tensor_dict[fields.InputDataFields.filename]) + + def testDecodePngImage(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_png, decoded_png = self._create_encoded_and_decoded_data( + image, 'png') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_png), + 'image/format': + dataset_util.bytes_feature(six.b('png')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')) + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual( + (output[fields.InputDataFields.image].get_shape().as_list()), + [None, None, 3]) + self.assertAllEqual( + (output[fields.InputDataFields.original_image_spatial_shape] + .get_shape().as_list()), [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image]) + self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields. + original_image_spatial_shape]) + self.assertEqual( + six.b('image_id'), tensor_dict[fields.InputDataFields.source_id]) + + def testDecodePngInstanceMasks(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_png, _ = self._create_encoded_and_decoded_data(image, 'png') + mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8) + mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8) + encoded_png_1, _ = self._create_encoded_and_decoded_data(mask_1, 'png') + decoded_png_1 = np.squeeze(mask_1.astype(np.float32)) + encoded_png_2, _ = self._create_encoded_and_decoded_data(mask_2, 'png') + decoded_png_2 = np.squeeze(mask_2.astype(np.float32)) + encoded_masks = [encoded_png_1, encoded_png_2] + decoded_masks = np.stack([decoded_png_1, decoded_png_2]) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_png), + 'image/format': + dataset_util.bytes_feature(six.b('png')), + 'image/object/mask': + dataset_util.bytes_list_feature(encoded_masks) + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=True, + instance_mask_type=input_reader_pb2.PNG_MASKS) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + decoded_masks, + tensor_dict[fields.InputDataFields.groundtruth_instance_masks]) + + def testDecodeEmptyPngInstanceMasks(self): + image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8) + encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png') + encoded_masks = [] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_png), + 'image/format': + dataset_util.bytes_feature(six.b('png')), + 'image/object/mask': + dataset_util.bytes_list_feature(encoded_masks), + 'image/height': + dataset_util.int64_feature(10), + 'image/width': + dataset_util.int64_feature(10), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=True, + instance_mask_type=input_reader_pb2.PNG_MASKS) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape, + [0, 10, 10]) + + def testDecodeBoundingBox(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + def testDecodeKeypoint(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] + keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + keypoint_visibility = [1, 2, 0, 1, 0, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/keypoint/y': + dataset_util.float_list_feature(keypoint_ys), + 'image/object/keypoint/x': + dataset_util.float_list_feature(keypoint_xs), + 'image/object/keypoint/visibility': + dataset_util.int64_list_feature(keypoint_visibility), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()), + [2, 3, 2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + expected_keypoints = [ + [[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan]], + [[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0]]] + self.assertAllClose( + expected_keypoints, + tensor_dict[fields.InputDataFields.groundtruth_keypoints]) + + expected_visibility = ( + (np.array(keypoint_visibility) > 0).reshape((2, 3))) + self.assertAllEqual( + expected_visibility, + tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities]) + + def testDecodeKeypointNoVisibilities(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] + keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/keypoint/y': + dataset_util.float_list_feature(keypoint_ys), + 'image/object/keypoint/x': + dataset_util.float_list_feature(keypoint_xs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()), + [2, 3, 2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + expected_keypoints = ( + np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2))) + self.assertAllEqual( + expected_keypoints, + tensor_dict[fields.InputDataFields.groundtruth_keypoints]) + + expected_visibility = np.ones((2, 3)) + self.assertAllEqual( + expected_visibility, + tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities]) + + def testDecodeDefaultGroundtruthWeights(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights], + np.ones(2, dtype=np.float32)) + + def testDecodeObjectLabel(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/label': + dataset_util.int64_list_feature(bbox_classes), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(bbox_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeMultiClassScores(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + flattened_multiclass_scores = [100., 50.] + [20., 30.] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/multiclass_scores': + dataset_util.float_list_feature( + flattened_multiclass_scores), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_multiclass_scores=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flattened_multiclass_scores, + tensor_dict[fields.InputDataFields.multiclass_scores]) + + def testDecodeEmptyMultiClassScores(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_multiclass_scores=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertEqual( + (0,), tensor_dict[fields.InputDataFields.multiclass_scores].shape) + + def testDecodeObjectLabelNoText(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes = [1, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/label': + dataset_util.int64_list_feature(bbox_classes), + })).SerializeToString() + label_map_string = """ + item { + id:1 + name:'cat' + } + item { + id:2 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(bbox_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelWithText(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('dog')] + # Annotation label gets overridden by labelmap id. + annotated_bbox_classes = [3, 4] + expected_bbox_classes = [1, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + 'image/object/class/label': + dataset_util.int64_list_feature(annotated_bbox_classes), + })).SerializeToString() + label_map_string = """ + item { + id:1 + name:'cat' + } + item { + id:2 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(expected_bbox_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelUnrecognizedName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('cheetah')] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + })).SerializeToString() + + label_map_string = """ + item { + id:2 + name:'cat' + } + item { + id:1 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([2, -1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelWithMappingWithDisplayName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('dog')] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + })).SerializeToString() + + label_map_string = """ + item { + id:3 + display_name:'cat' + } + item { + id:1 + display_name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([3, 1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelUnrecognizedNameWithMappingWithDisplayName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('cheetah')] + bbox_classes_id = [5, 6] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + 'image/object/class/label': + dataset_util.int64_list_feature(bbox_classes_id), + })).SerializeToString() + + label_map_string = """ + item { + name:'/m/cat' + id:3 + display_name:'cat' + } + item { + name:'/m/dog' + id:1 + display_name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([3, -1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelWithMappingWithName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('dog')] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + })).SerializeToString() + + label_map_string = """ + item { + id:3 + name:'cat' + } + item { + id:1 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([3, 1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectArea(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_area = [100., 174.] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/area': + dataset_util.float_list_feature(object_area), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_area].get_shape().as_list()), [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(object_area, + tensor_dict[fields.InputDataFields.groundtruth_area]) + + def testDecodeObjectIsCrowd(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_is_crowd = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/is_crowd': + dataset_util.int64_list_feature(object_is_crowd), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [bool(item) for item in object_is_crowd], + tensor_dict[fields.InputDataFields.groundtruth_is_crowd]) + + def testDecodeObjectDifficult(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_difficult = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/difficult': + dataset_util.int64_list_feature(object_difficult), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_difficult].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [bool(item) for item in object_difficult], + tensor_dict[fields.InputDataFields.groundtruth_difficult]) + + def testDecodeObjectGroupOf(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_group_of = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/group_of': + dataset_util.int64_list_feature(object_group_of), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_group_of].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [bool(item) for item in object_group_of], + tensor_dict[fields.InputDataFields.groundtruth_group_of]) + + def testDecodeObjectWeight(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_weights = [0.75, 1.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/weight': + dataset_util.float_list_feature(object_weights), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_weights].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(object_weights, + tensor_dict[fields.InputDataFields.groundtruth_weights]) + + def testDecodeClassConfidence(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + class_confidence = [0.0, 1.0, 0.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/class/confidence': + dataset_util.float_list_feature(class_confidence), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual( + (output[fields.InputDataFields.groundtruth_image_confidences] + .get_shape().as_list()), [3]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + class_confidence, + tensor_dict[fields.InputDataFields.groundtruth_image_confidences]) + + def testDecodeInstanceSegmentation(self): + num_instances = 4 + image_height = 5 + image_width = 3 + + # Randomly generate image. + image_tensor = np.random.randint( + 256, size=(image_height, image_width, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + + # Randomly generate instance segmentation masks. + instance_masks = ( + np.random.randint(2, size=(num_instances, image_height, + image_width)).astype(np.float32)) + instance_masks_flattened = np.reshape(instance_masks, [-1]) + + # Randomly generate class labels for each instance. + object_classes = np.random.randint( + 100, size=(num_instances)).astype(np.int64) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/object/mask': + dataset_util.float_list_feature(instance_masks_flattened), + 'image/object/class/label': + dataset_util.int64_list_feature(object_classes) + })).SerializeToString() + example_decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=True) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual( + (output[fields.InputDataFields.groundtruth_instance_masks].get_shape( + ).as_list()), [4, 5, 3]) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [4]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual( + instance_masks.astype(np.float32), + tensor_dict[fields.InputDataFields.groundtruth_instance_masks]) + self.assertAllEqual(object_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testInstancesNotAvailableByDefault(self): + num_instances = 4 + image_height = 5 + image_width = 3 + # Randomly generate image. + image_tensor = np.random.randint( + 256, size=(image_height, image_width, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + + # Randomly generate instance segmentation masks. + instance_masks = ( + np.random.randint(2, size=(num_instances, image_height, + image_width)).astype(np.float32)) + instance_masks_flattened = np.reshape(instance_masks, [-1]) + + # Randomly generate class labels for each instance. + object_classes = np.random.randint( + 100, size=(num_instances)).astype(np.int64) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/object/mask': + dataset_util.float_list_feature(instance_masks_flattened), + 'image/object/class/label': + dataset_util.int64_list_feature(object_classes) + })).SerializeToString() + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertTrue( + fields.InputDataFields.groundtruth_instance_masks not in tensor_dict) + + def testDecodeImageLabels(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + + def graph_fn_1(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature(six.b('jpeg')), + 'image/class/label': dataset_util.int64_list_feature([1, 2]), + })).SerializeToString() + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn_1, []) + self.assertTrue( + fields.InputDataFields.groundtruth_image_classes in tensor_dict) + self.assertAllEqual( + tensor_dict[fields.InputDataFields.groundtruth_image_classes], + np.array([1, 2])) + + def graph_fn_2(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/class/text': + dataset_util.bytes_list_feature( + [six.b('dog'), six.b('cat')]), + })).SerializeToString() + label_map_string = """ + item { + id:3 + name:'cat' + } + item { + id:1 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn_2, []) + self.assertTrue( + fields.InputDataFields.groundtruth_image_classes in tensor_dict) + self.assertAllEqual( + tensor_dict[fields.InputDataFields.groundtruth_image_classes], + np.array([1, 3])) + + def testDecodeContextFeatures(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + num_features = 8 + context_feature_length = 10 + context_features = np.random.random(num_features*context_feature_length) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/context_features': + dataset_util.float_list_feature(context_features), + 'image/context_feature_length': + dataset_util.int64_feature(context_feature_length), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_context_features=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllClose( + context_features.reshape(num_features, context_feature_length), + tensor_dict[fields.InputDataFields.context_features]) + self.assertAllEqual( + context_feature_length, + tensor_dict[fields.InputDataFields.context_feature_length]) + + def testContextFeaturesNotAvailableByDefault(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + num_features = 10 + context_feature_length = 10 + context_features = np.random.random(num_features*context_feature_length) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/context_features': + dataset_util.float_list_feature(context_features), + 'image/context_feature_length': + dataset_util.int64_feature(context_feature_length), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertNotIn(fields.InputDataFields.context_features, + tensor_dict) + + def testExpandLabels(self): + label_map_string = """ + item { + id:1 + name:'cat' + ancestor_ids: 2 + } + item { + id:2 + name:'animal' + descendant_ids: 1 + } + item { + id:3 + name:'man' + ancestor_ids: 5 + } + item { + id:4 + name:'woman' + display_name:'woman' + ancestor_ids: 5 + } + item { + id:5 + name:'person' + descendant_ids: 3 + descendant_ids: 4 + } + """ + + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + bbox_classes_text = [six.b('cat'), six.b('cat')] + bbox_group_of = [0, 1] + image_class_text = [six.b('cat'), six.b('person')] + image_confidence = [1.0, 0.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + 'image/object/group_of': + dataset_util.int64_list_feature(bbox_group_of), + 'image/class/text': + dataset_util.bytes_list_feature(image_class_text), + 'image/class/confidence': + dataset_util.float_list_feature(image_confidence), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path, expand_hierarchy_labels=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + + boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + expected_boxes = np.stack( + [boxes[0, :], boxes[0, :], boxes[1, :], boxes[1, :]], axis=0) + expected_boxes_class = np.array([1, 2, 1, 2]) + expected_boxes_group_of = np.array([0, 0, 1, 1]) + expected_image_class = np.array([1, 2, 3, 4, 5]) + expected_image_confidence = np.array([1.0, 1.0, 0.0, 0.0, 0.0]) + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + self.assertAllEqual(expected_boxes_class, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + self.assertAllEqual( + expected_boxes_group_of, + tensor_dict[fields.InputDataFields.groundtruth_group_of]) + self.assertAllEqual( + expected_image_class, + tensor_dict[fields.InputDataFields.groundtruth_image_classes]) + self.assertAllEqual( + expected_image_confidence, + tensor_dict[fields.InputDataFields.groundtruth_image_confidences]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/data_decoders/tf_sequence_example_decoder.py b/models/research/object_detection/data_decoders/tf_sequence_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..1565a910eb1726ce0846e9c78488a7e8d4f97fdf --- /dev/null +++ b/models/research/object_detection/data_decoders/tf_sequence_example_decoder.py @@ -0,0 +1,314 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Sequence example decoder for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import zip +import tensorflow.compat.v1 as tf +from tf_slim import tfexample_decoder as slim_example_decoder + +from object_detection.core import data_decoder +from object_detection.core import standard_fields as fields +from object_detection.utils import label_map_util + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import lookup as contrib_lookup +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +class _ClassTensorHandler(slim_example_decoder.Tensor): + """An ItemHandler to fetch class ids from class text.""" + + def __init__(self, + tensor_key, + label_map_proto_file, + shape_keys=None, + shape=None, + default_value=''): + """Initializes the LookupTensor handler. + + Simply calls a vocabulary (most often, a label mapping) lookup. + + Args: + tensor_key: the name of the `TFExample` feature to read the tensor from. + label_map_proto_file: File path to a text format LabelMapProto message + mapping class text to id. + shape_keys: Optional name or list of names of the TF-Example feature in + which the tensor shape is stored. If a list, then each corresponds to + one dimension of the shape. + shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is + reshaped accordingly. + default_value: The value used when the `tensor_key` is not found in a + particular `TFExample`. + + Raises: + ValueError: if both `shape_keys` and `shape` are specified. + """ + name_to_id = label_map_util.get_label_map_dict( + label_map_proto_file, use_display_name=False) + # We use a default_value of -1, but we expect all labels to be contained + # in the label map. + try: + # Dynamically try to load the tf v2 lookup, falling back to contrib + lookup = tf.compat.v2.lookup + hash_table_class = tf.compat.v2.lookup.StaticHashTable + except AttributeError: + lookup = contrib_lookup + hash_table_class = contrib_lookup.HashTable + name_to_id_table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=tf.constant(list(name_to_id.keys())), + values=tf.constant(list(name_to_id.values()), dtype=tf.int64)), + default_value=-1) + + self._name_to_id_table = name_to_id_table + super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape, + default_value) + + def tensors_to_item(self, keys_to_tensors): + unmapped_tensor = super(_ClassTensorHandler, + self).tensors_to_item(keys_to_tensors) + return self._name_to_id_table.lookup(unmapped_tensor) + + +class TfSequenceExampleDecoder(data_decoder.DataDecoder): + """Tensorflow Sequence Example proto decoder for Object Detection. + + Sequence examples contain sequences of images which share common + features. The structure of TfSequenceExamples can be seen in + dataset_tools/seq_example_util.py + + For the TFODAPI, the following fields are required: + Shared features: + 'image/format' + 'image/height' + 'image/width' + + Features with an entry for each image, where bounding box features can + be empty lists if the image does not contain any objects: + 'image/encoded' + 'image/source_id' + 'region/bbox/xmin' + 'region/bbox/xmax' + 'region/bbox/ymin' + 'region/bbox/ymax' + 'region/label/string' + + Optionally, the sequence example can include context_features for use in + Context R-CNN (see https://arxiv.org/abs/1912.03538): + 'image/context_features' + 'image/context_feature_length' + """ + + def __init__(self, + label_map_proto_file, + load_context_features=False, + use_display_name=False, + fully_annotated=False): + """Constructs `TfSequenceExampleDecoder` object. + + Args: + label_map_proto_file: a file path to a + object_detection.protos.StringIntLabelMap proto. The + label map will be used to map IDs of 'region/label/string'. + It is assumed that 'region/label/string' will be in the data. + load_context_features: Whether to load information from context_features, + to provide additional context to a detection model for training and/or + inference + use_display_name: whether or not to use the `display_name` for label + mapping (instead of `name`). Only used if label_map_proto_file is + provided. + fully_annotated: If True, will assume that every frame (whether it has + boxes or not), has been fully annotated. If False, a + 'region/is_annotated' field must be provided in the dataset which + indicates which frames have annotations. Default False. + """ + # Specifies how the tf.SequenceExamples are decoded. + self._context_keys_to_features = { + 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/height': tf.FixedLenFeature((), tf.int64), + 'image/width': tf.FixedLenFeature((), tf.int64), + } + self._sequence_keys_to_feature_lists = { + 'image/encoded': tf.FixedLenSequenceFeature([], dtype=tf.string), + 'image/source_id': tf.FixedLenSequenceFeature([], dtype=tf.string), + 'region/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), + 'region/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), + 'region/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), + 'region/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), + 'region/label/string': tf.VarLenFeature(dtype=tf.string), + 'region/label/confidence': tf.VarLenFeature(dtype=tf.float32), + } + + self._items_to_handlers = { + # Context. + fields.InputDataFields.image_height: + slim_example_decoder.Tensor('image/height'), + fields.InputDataFields.image_width: + slim_example_decoder.Tensor('image/width'), + + # Sequence. + fields.InputDataFields.num_groundtruth_boxes: + slim_example_decoder.NumBoxesSequence('region/bbox/xmin'), + fields.InputDataFields.groundtruth_boxes: + slim_example_decoder.BoundingBoxSequence( + prefix='region/bbox/', default_value=0.0), + fields.InputDataFields.groundtruth_weights: + slim_example_decoder.Tensor('region/label/confidence'), + } + + # If the dataset is sparsely annotated, parse sequence features which + # indicate which frames have been labeled. + if not fully_annotated: + self._sequence_keys_to_feature_lists['region/is_annotated'] = ( + tf.FixedLenSequenceFeature([], dtype=tf.int64)) + self._items_to_handlers[fields.InputDataFields.is_annotated] = ( + slim_example_decoder.Tensor('region/is_annotated')) + + self._items_to_handlers[fields.InputDataFields.image] = ( + slim_example_decoder.Tensor('image/encoded')) + self._items_to_handlers[fields.InputDataFields.source_id] = ( + slim_example_decoder.Tensor('image/source_id')) + + label_handler = _ClassTensorHandler( + 'region/label/string', label_map_proto_file, default_value='') + + self._items_to_handlers[ + fields.InputDataFields.groundtruth_classes] = label_handler + + if load_context_features: + self._context_keys_to_features['image/context_features'] = ( + tf.VarLenFeature(dtype=tf.float32)) + self._items_to_handlers[fields.InputDataFields.context_features] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/context_features', 'image/context_feature_length'], + self._reshape_context_features)) + + self._context_keys_to_features['image/context_feature_length'] = ( + tf.FixedLenFeature((), tf.int64)) + self._items_to_handlers[fields.InputDataFields.context_feature_length] = ( + slim_example_decoder.Tensor('image/context_feature_length')) + self._fully_annotated = fully_annotated + + def decode(self, tf_seq_example_string_tensor): + """Decodes serialized `tf.SequenceExample`s and returns a tensor dictionary. + + Args: + tf_seq_example_string_tensor: a string tensor holding a serialized + `tf.SequenceExample`. + + Returns: + A list of dictionaries with (at least) the following tensors: + fields.InputDataFields.source_id: a [num_frames] string tensor with a + unique ID for each frame. + fields.InputDataFields.num_groundtruth_boxes: a [num_frames] int32 tensor + specifying the number of boxes in each frame. + fields.InputDataFields.groundtruth_boxes: a [num_frames, num_boxes, 4] + float32 tensor with bounding boxes for each frame. Note that num_boxes + is the maximum boxes seen in any individual frame. Any frames with fewer + boxes are padded with 0.0. + fields.InputDataFields.groundtruth_classes: a [num_frames, num_boxes] + int32 tensor with class indices for each box in each frame. + fields.InputDataFields.groundtruth_weights: a [num_frames, num_boxes] + float32 tensor with weights of the groundtruth boxes. + fields.InputDataFields.is_annotated: a [num_frames] bool tensor specifying + whether the image was annotated or not. If False, the corresponding + entries in the groundtruth tensor will be ignored. + fields.InputDataFields.context_features - 1D float32 tensor of shape + [context_feature_length * num_context_features] + fields.InputDataFields.context_feature_length - int32 tensor specifying + the length of each feature in context_features + fields.InputDataFields.image: a [num_frames] string tensor with + the encoded images. + """ + serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[]) + decoder = slim_example_decoder.TFSequenceExampleDecoder( + self._context_keys_to_features, self._sequence_keys_to_feature_lists, + self._items_to_handlers) + keys = decoder.list_items() + tensors = decoder.decode(serialized_example, items=keys) + tensor_dict = dict(list(zip(keys, tensors))) + tensor_dict[fields.InputDataFields.groundtruth_boxes].set_shape( + [None, None, 4]) + tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.cast( + tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + dtype=tf.int32) + tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.cast( + tensor_dict[fields.InputDataFields.groundtruth_classes], dtype=tf.int32) + tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.cast( + tf.stack([ + tensor_dict[fields.InputDataFields.image_height], + tensor_dict[fields.InputDataFields.image_width] + ]), + dtype=tf.int32) + tensor_dict.pop(fields.InputDataFields.image_height) + tensor_dict.pop(fields.InputDataFields.image_width) + + def default_groundtruth_weights(): + """Produces weights of 1.0 for each valid box, and 0.0 otherwise.""" + num_boxes_per_frame = tensor_dict[ + fields.InputDataFields.num_groundtruth_boxes] + max_num_boxes = tf.reduce_max(num_boxes_per_frame) + num_boxes_per_frame_tiled = tf.tile( + tf.expand_dims(num_boxes_per_frame, axis=-1), + multiples=tf.stack([1, max_num_boxes])) + range_tiled = tf.tile( + tf.expand_dims(tf.range(max_num_boxes), axis=0), + multiples=tf.stack([tf.shape(num_boxes_per_frame)[0], 1])) + return tf.cast( + tf.greater(num_boxes_per_frame_tiled, range_tiled), tf.float32) + + tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond( + tf.greater( + tf.size(tensor_dict[fields.InputDataFields.groundtruth_weights]), + 0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights], + default_groundtruth_weights) + + if self._fully_annotated: + tensor_dict[fields.InputDataFields.is_annotated] = tf.ones_like( + tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + dtype=tf.bool) + else: + tensor_dict[fields.InputDataFields.is_annotated] = tf.cast( + tensor_dict[fields.InputDataFields.is_annotated], dtype=tf.bool) + + return tensor_dict + + def _reshape_context_features(self, keys_to_tensors): + """Reshape context features. + + The instance context_features are reshaped to + [num_context_features, context_feature_length] + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 2-D float tensor of shape [num_context_features, context_feature_length] + """ + context_feature_length = keys_to_tensors['image/context_feature_length'] + to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32) + context_features = keys_to_tensors['image/context_features'] + if isinstance(context_features, tf.SparseTensor): + context_features = tf.sparse_tensor_to_dense(context_features) + context_features = tf.reshape(context_features, to_shape) + return context_features diff --git a/models/research/object_detection/data_decoders/tf_sequence_example_decoder_test.py b/models/research/object_detection/data_decoders/tf_sequence_example_decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2ea1c6163454cf2d05065713b2e0657f24af5e64 --- /dev/null +++ b/models/research/object_detection/data_decoders/tf_sequence_example_decoder_test.py @@ -0,0 +1,173 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tf_sequence_example_decoder.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_sequence_example_decoder +from object_detection.dataset_tools import seq_example_util +from object_detection.utils import test_case + + +class TfSequenceExampleDecoderTest(test_case.TestCase): + + def _create_label_map(self, path): + label_map_text = """ + item { + name: "dog" + id: 1 + } + item { + name: "cat" + id: 2 + } + item { + name: "panda" + id: 4 + } + """ + with tf.gfile.Open(path, 'wb') as f: + f.write(label_map_text) + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + def graph_fn(): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + return [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.execute(graph_fn, []) + return encoded_images + + def test_decode_sequence_example(self): + num_frames = 4 + image_height = 20 + image_width = 30 + + expected_groundtruth_boxes = [ + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], + [[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], + [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]] + ] + expected_groundtruth_classes = [ + [-1, -1], + [-1, 1], + [1, 2], + [-1, -1] + ] + + flds = fields.InputDataFields + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + + def graph_fn(): + label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt') + self._create_label_map(label_map_proto_file) + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_format='JPEG', + image_source_ids=[str(i) for i in range(num_frames)], + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[0., 0., 1., 1.]], # Frame 0. + [[0.2, 0.2, 1., 1.], + [0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], # Frame 2. + [0.1, 0.1, 0.2, 0.2]], + [[]], # Frame 3. + ], + label_strings=[ + ['fox'], # Frame 0. Fox will be filtered out. + ['fox', 'dog'], # Frame 1. Fox will be filtered out. + ['dog', 'cat'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + + example_string_tensor = tf.convert_to_tensor(sequence_example_serialized) + return decoder.decode(example_string_tensor) + + tensor_dict_out = self.execute(graph_fn, []) + self.assertAllClose(expected_groundtruth_boxes, + tensor_dict_out[flds.groundtruth_boxes]) + self.assertAllEqual(expected_groundtruth_classes, + tensor_dict_out[flds.groundtruth_classes]) + + def test_decode_sequence_example_negative_clip(self): + num_frames = 4 + image_height = 20 + image_width = 30 + + expected_groundtruth_boxes = -1 * np.ones((4, 0, 4)) + expected_groundtruth_classes = -1 * np.ones((4, 0)) + + flds = fields.InputDataFields + + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + + def graph_fn(): + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_format='JPEG', + image_source_ids=[str(i) for i in range(num_frames)], + bboxes=[ + [[]], + [[]], + [[]], + [[]] + ], + label_strings=[ + [], + [], + [], + [] + ]).SerializeToString() + example_string_tensor = tf.convert_to_tensor(sequence_example_serialized) + + label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt') + self._create_label_map(label_map_proto_file) + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file) + return decoder.decode(example_string_tensor) + + tensor_dict_out = self.execute(graph_fn, []) + self.assertAllClose(expected_groundtruth_boxes, + tensor_dict_out[flds.groundtruth_boxes]) + self.assertAllEqual(expected_groundtruth_classes, + tensor_dict_out[flds.groundtruth_classes]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/__init__.py b/models/research/object_detection/dataset_tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/dataset_tools/context_rcnn/__init__.py b/models/research/object_detection/dataset_tools/context_rcnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py b/models/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..9c05387980e193f9cb40a767944357d80379384c --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py @@ -0,0 +1,845 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""A Beam job to add contextual memory banks to tf.Examples. + +This tool groups images containing bounding boxes and embedded context features +by a key, either `image/location` or `image/seq_id`, and time horizon, +then uses these groups to build up a contextual memory bank from the embedded +context features from each image in the group and adds that context to the +output tf.Examples for each image in the group. + +Steps to generate a dataset with context from one with bounding boxes and +embedded context features: +1. Use object/detection/export_inference_graph.py to get a `saved_model` for + inference. The input node must accept a tf.Example proto. +2. Run this tool with `saved_model` from step 1 and a TFRecord of tf.Example + protos containing images, bounding boxes, and embedded context features. + The context features can be added to tf.Examples using + generate_embedding_data.py. + +Example Usage: +-------------- +python add_context_to_examples.py \ + --input_tfrecord path/to/input_tfrecords* \ + --output_tfrecord path/to/output_tfrecords \ + --sequence_key image/location \ + --time_horizon month + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import datetime +import io +import itertools +import json +import os + +from absl import app +from absl import flags +import apache_beam as beam +import numpy as np +import PIL.Image +import six +import tensorflow as tf + +from apache_beam import runners + + +flags.DEFINE_string('input_tfrecord', None, 'TFRecord containing images in ' + 'tf.Example format for object detection, with bounding' + 'boxes and contextual feature embeddings.') +flags.DEFINE_string('output_tfrecord', None, + 'TFRecord containing images in tf.Example format, with ' + 'added contextual memory banks.') +flags.DEFINE_string('sequence_key', None, 'Key to use when grouping sequences: ' + 'so far supports `image/seq_id` and `image/location`.') +flags.DEFINE_string('time_horizon', None, 'What time horizon to use when ' + 'splitting the data, if any. Options are: `year`, `month`,' + ' `week`, `day `, `hour`, `minute`, `None`.') +flags.DEFINE_integer('subsample_context_features_rate', 0, 'Whether to ' + 'subsample the context_features, and if so how many to ' + 'sample. If the rate is set to X, it will sample context ' + 'from 1 out of every X images. Default is sampling from ' + 'every image, which is X=0.') +flags.DEFINE_boolean('reduce_image_size', True, 'downsamples images to' + 'have longest side max_image_dimension, maintaining aspect' + ' ratio') +flags.DEFINE_integer('max_image_dimension', 1024, 'sets max image dimension') +flags.DEFINE_boolean('add_context_features', True, 'adds a memory bank of' + 'embeddings to each clip') +flags.DEFINE_boolean('sorted_image_ids', True, 'whether the image source_ids ' + 'are sortable to deal with date_captured tie-breaks') +flags.DEFINE_string('image_ids_to_keep', 'All', 'path to .json list of image' + 'ids to keep, used for ground truth eval creation') +flags.DEFINE_boolean('keep_context_features_image_id_list', False, 'Whether or ' + 'not to keep a list of the image_ids corresponding to the ' + 'memory bank') +flags.DEFINE_boolean('keep_only_positives', False, 'Whether or not to ' + 'keep only positive boxes based on score') +flags.DEFINE_boolean('keep_only_positives_gt', False, 'Whether or not to ' + 'keep only positive boxes based on gt class') +flags.DEFINE_float('context_features_score_threshold', 0.7, 'What score ' + 'threshold to use for boxes in context_features') +flags.DEFINE_integer('max_num_elements_in_context_features', 2000, 'Sets max ' + 'num elements per memory bank') +flags.DEFINE_integer('num_shards', 0, 'Number of output shards.') +flags.DEFINE_string('output_type', 'tf_sequence_example', 'Output type, one of ' + '`tf_example`, `tf_sequence_example`') +flags.DEFINE_integer('max_clip_length', None, 'Max length for sequence ' + 'example outputs.') + +FLAGS = flags.FLAGS + +DEFAULT_FEATURE_LENGTH = 2057 + + +class ReKeyDataFn(beam.DoFn): + """Re-keys tfrecords by sequence_key. + + This Beam DoFn re-keys the tfrecords by a user-defined sequence_key + """ + + def __init__(self, sequence_key, time_horizon, + reduce_image_size, max_image_dimension): + """Initialization function. + + Args: + sequence_key: A feature name to use as a key for grouping sequences. + Must point to a key of type bytes_list + time_horizon: What length of time to use to partition the data when + building the memory banks. Options: `year`, `month`, `week`, `day `, + `hour`, `minute`, None + reduce_image_size: Whether to reduce the sizes of the stored images. + max_image_dimension: maximum dimension of reduced images + """ + self._sequence_key = sequence_key + if time_horizon is None or time_horizon in {'year', 'month', 'week', 'day', + 'hour', 'minute'}: + self._time_horizon = time_horizon + else: + raise ValueError('Time horizon not supported.') + self._reduce_image_size = reduce_image_size + self._max_image_dimension = max_image_dimension + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'data_rekey', 'num_tf_examples_processed') + self._num_images_resized = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_resized') + self._num_images_read = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_read') + self._num_images_found = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_read') + self._num_got_shape = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_got_shape') + self._num_images_found_size = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_found_size') + self._num_examples_cleared = beam.metrics.Metrics.counter( + 'data_rekey', 'num_examples_cleared') + self._num_examples_updated = beam.metrics.Metrics.counter( + 'data_rekey', 'num_examples_updated') + + def process(self, tfrecord_entry): + return self._rekey_examples(tfrecord_entry) + + def _largest_size_at_most(self, height, width, largest_side): + """Computes new shape with the largest side equal to `largest_side`. + + Args: + height: an int indicating the current height. + width: an int indicating the current width. + largest_side: A python integer indicating the size of + the largest side after resize. + Returns: + new_height: an int indicating the new height. + new_width: an int indicating the new width. + """ + + x_scale = float(largest_side) / float(width) + y_scale = float(largest_side) / float(height) + scale = min(x_scale, y_scale) + + new_width = int(width * scale) + new_height = int(height * scale) + + return new_height, new_width + + def _resize_image(self, input_example): + """Resizes the image within input_example and updates the height and width. + + Args: + input_example: A tf.Example that we want to update to contain a resized + image. + Returns: + input_example: Updated tf.Example. + """ + + original_image = copy.deepcopy( + input_example.features.feature['image/encoded'].bytes_list.value[0]) + self._num_images_read.inc(1) + + height = copy.deepcopy( + input_example.features.feature['image/height'].int64_list.value[0]) + + width = copy.deepcopy( + input_example.features.feature['image/width'].int64_list.value[0]) + + self._num_got_shape.inc(1) + + new_height, new_width = self._largest_size_at_most( + height, width, self._max_image_dimension) + + self._num_images_found_size.inc(1) + + encoded_jpg_io = io.BytesIO(original_image) + image = PIL.Image.open(encoded_jpg_io) + resized_image = image.resize((new_width, new_height)) + + with io.BytesIO() as output: + resized_image.save(output, format='JPEG') + encoded_resized_image = output.getvalue() + + self._num_images_resized.inc(1) + + del input_example.features.feature['image/encoded'].bytes_list.value[:] + del input_example.features.feature['image/height'].int64_list.value[:] + del input_example.features.feature['image/width'].int64_list.value[:] + + self._num_examples_cleared.inc(1) + + input_example.features.feature['image/encoded'].bytes_list.value.extend( + [encoded_resized_image]) + input_example.features.feature['image/height'].int64_list.value.extend( + [new_height]) + input_example.features.feature['image/width'].int64_list.value.extend( + [new_width]) + self._num_examples_updated.inc(1) + + return input_example + + def _rekey_examples(self, tfrecord_entry): + serialized_example = copy.deepcopy(tfrecord_entry) + + input_example = tf.train.Example.FromString(serialized_example) + + self._num_images_found.inc(1) + + if self._reduce_image_size: + input_example = self._resize_image(input_example) + self._num_images_resized.inc(1) + + new_key = input_example.features.feature[ + self._sequence_key].bytes_list.value[0] + + if self._time_horizon: + date_captured = datetime.datetime.strptime( + six.ensure_str(input_example.features.feature[ + 'image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S') + year = date_captured.year + month = date_captured.month + day = date_captured.day + week = np.floor(float(day) / float(7)) + hour = date_captured.hour + minute = date_captured.minute + + if self._time_horizon == 'year': + new_key = new_key + six.ensure_binary('/' + str(year)) + elif self._time_horizon == 'month': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month)) + elif self._time_horizon == 'week': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(week)) + elif self._time_horizon == 'day': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(day)) + elif self._time_horizon == 'hour': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + ( + str(hour))) + elif self._time_horizon == 'minute': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + ( + str(hour) + '/' + str(minute))) + + self._num_examples_processed.inc(1) + + return [(new_key, input_example)] + + +class SortGroupedDataFn(beam.DoFn): + """Sorts data within a keyed group. + + This Beam DoFn sorts the grouped list of image examples by frame_num + """ + + def __init__(self, sequence_key, sorted_image_ids, + max_num_elements_in_context_features): + """Initialization function. + + Args: + sequence_key: A feature name to use as a key for grouping sequences. + Must point to a key of type bytes_list + sorted_image_ids: Whether the image ids are sortable to use as sorting + tie-breakers + max_num_elements_in_context_features: The maximum number of elements + allowed in the memory bank + """ + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'sort_group', 'num_groups_sorted') + self._too_many_elements = beam.metrics.Metrics.counter( + 'sort_group', 'too_many_elements') + self._split_elements = beam.metrics.Metrics.counter( + 'sort_group', 'split_elements') + self._sequence_key = six.ensure_binary(sequence_key) + self._sorted_image_ids = sorted_image_ids + self._max_num_elements_in_context_features = ( + max_num_elements_in_context_features) + + def process(self, grouped_entry): + return self._sort_image_examples(grouped_entry) + + def _sort_image_examples(self, grouped_entry): + key, example_collection = grouped_entry + example_list = list(example_collection) + + def get_frame_num(example): + return example.features.feature['image/seq_frame_num'].int64_list.value[0] + + def get_date_captured(example): + return datetime.datetime.strptime( + six.ensure_str( + example.features.feature[ + 'image/date_captured'].bytes_list.value[0]), + '%Y-%m-%d %H:%M:%S') + + def get_image_id(example): + return example.features.feature['image/source_id'].bytes_list.value[0] + + if self._sequence_key == six.ensure_binary('image/seq_id'): + sorting_fn = get_frame_num + elif self._sequence_key == six.ensure_binary('image/location'): + if self._sorted_image_ids: + sorting_fn = get_image_id + else: + sorting_fn = get_date_captured + + sorted_example_list = sorted(example_list, key=sorting_fn) + + self._num_examples_processed.inc(1) + + if len(sorted_example_list) > self._max_num_elements_in_context_features: + leftovers = sorted_example_list + output_list = [] + count = 0 + self._too_many_elements.inc(1) + while len(leftovers) > self._max_num_elements_in_context_features: + self._split_elements.inc(1) + new_key = key + six.ensure_binary('_' + str(count)) + new_list = leftovers[:self._max_num_elements_in_context_features] + output_list.append((new_key, new_list)) + leftovers = leftovers[:self._max_num_elements_in_context_features] + count += 1 + else: + output_list = [(key, sorted_example_list)] + + return output_list + + +def get_sliding_window(example_list, max_clip_length, stride_length): + """Yields a sliding window over data from example_list. + + Sliding window has width max_clip_len (n) and stride stride_len (m). + s -> (s0,s1,...s[n-1]), (s[m],s[m+1],...,s[m+n]), ... + + Args: + example_list: A list of examples. + max_clip_length: The maximum length of each clip. + stride_length: The stride between each clip. + + Yields: + A list of lists of examples, each with length <= max_clip_length + """ + + # check if the list is too short to slide over + if len(example_list) < max_clip_length: + yield example_list + else: + starting_values = [i*stride_length for i in + range(len(example_list)) if + len(example_list) > i*stride_length] + for start in starting_values: + result = tuple(itertools.islice(example_list, start, + min(start + max_clip_length, + len(example_list)))) + yield result + + +class GenerateContextFn(beam.DoFn): + """Generates context data for camera trap images. + + This Beam DoFn builds up contextual memory banks from groups of images and + stores them in the output tf.Example or tf.Sequence_example for each image. + """ + + def __init__(self, sequence_key, add_context_features, image_ids_to_keep, + keep_context_features_image_id_list=False, + subsample_context_features_rate=0, + keep_only_positives=False, + context_features_score_threshold=0.7, + keep_only_positives_gt=False, + max_num_elements_in_context_features=5000, + pad_context_features=False, + output_type='tf_example', max_clip_length=None): + """Initialization function. + + Args: + sequence_key: A feature name to use as a key for grouping sequences. + add_context_features: Whether to keep and store the contextual memory + bank. + image_ids_to_keep: A list of image ids to save, to use to build data + subsets for evaluation. + keep_context_features_image_id_list: Whether to save an ordered list of + the ids of the images in the contextual memory bank. + subsample_context_features_rate: What rate to subsample images for the + contextual memory bank. + keep_only_positives: Whether to only keep high scoring + (>context_features_score_threshold) features in the contextual memory + bank. + context_features_score_threshold: What threshold to use for keeping + features. + keep_only_positives_gt: Whether to only keep features from images that + contain objects based on the ground truth (for training). + max_num_elements_in_context_features: the maximum number of elements in + the memory bank + pad_context_features: Whether to pad the context features to a fixed size. + output_type: What type of output, tf_example of tf_sequence_example + max_clip_length: The maximum length of a sequence example, before + splitting into multiple + """ + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'num_seq_examples_processed') + self._num_keys_processed = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'num_keys_processed') + self._sequence_key = sequence_key + self._add_context_features = add_context_features + self._pad_context_features = pad_context_features + self._output_type = output_type + self._max_clip_length = max_clip_length + if six.ensure_str(image_ids_to_keep) == 'All': + self._image_ids_to_keep = None + else: + with tf.io.gfile.GFile(image_ids_to_keep) as f: + self._image_ids_to_keep = json.load(f) + self._keep_context_features_image_id_list = ( + keep_context_features_image_id_list) + self._subsample_context_features_rate = subsample_context_features_rate + self._keep_only_positives = keep_only_positives + self._keep_only_positives_gt = keep_only_positives_gt + self._context_features_score_threshold = context_features_score_threshold + self._max_num_elements_in_context_features = ( + max_num_elements_in_context_features) + + self._images_kept = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'images_kept') + self._images_loaded = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'images_loaded') + + def process(self, grouped_entry): + return self._add_context_to_example(copy.deepcopy(grouped_entry)) + + def _build_context_features(self, example_list): + context_features = [] + context_features_image_id_list = [] + count = 0 + example_embedding = [] + + for idx, example in enumerate(example_list): + if self._subsample_context_features_rate > 0: + if (idx % self._subsample_context_features_rate) != 0: + example.features.feature[ + 'context_features_idx'].int64_list.value.append( + self._max_num_elements_in_context_features + 1) + continue + if self._keep_only_positives: + if example.features.feature[ + 'image/embedding_score' + ].float_list.value[0] < self._context_features_score_threshold: + example.features.feature[ + 'context_features_idx'].int64_list.value.append( + self._max_num_elements_in_context_features + 1) + continue + if self._keep_only_positives_gt: + if len(example.features.feature[ + 'image/object/bbox/xmin' + ].float_list.value) < 1: + example.features.feature[ + 'context_features_idx'].int64_list.value.append( + self._max_num_elements_in_context_features + 1) + continue + + example_embedding = list(example.features.feature[ + 'image/embedding'].float_list.value) + context_features.extend(example_embedding) + example.features.feature[ + 'context_features_idx'].int64_list.value.append(count) + count += 1 + example_image_id = example.features.feature[ + 'image/source_id'].bytes_list.value[0] + context_features_image_id_list.append(example_image_id) + + if not example_embedding: + example_embedding.append(np.zeros(DEFAULT_FEATURE_LENGTH)) + + feature_length = DEFAULT_FEATURE_LENGTH + + # If the example_list is not empty and image/embedding_length is in the + # featture dict, feature_length will be assigned to that. Otherwise, it will + # be kept as default. + if example_list and ( + 'image/embedding_length' in example_list[0].features.feature): + feature_length = example_list[0].features.feature[ + 'image/embedding_length'].int64_list.value[0] + + if self._pad_context_features: + while len(context_features_image_id_list) < ( + self._max_num_elements_in_context_features): + context_features_image_id_list.append('') + + return context_features, feature_length, context_features_image_id_list + + def _add_context_to_example(self, grouped_entry): + key, example_collection = grouped_entry + list_of_examples = [] + + example_list = list(example_collection) + + if self._add_context_features: + context_features, feature_length, context_features_image_id_list = ( + self._build_context_features(example_list)) + + if self._image_ids_to_keep is not None: + new_example_list = [] + for example in example_list: + im_id = example.features.feature['image/source_id'].bytes_list.value[0] + self._images_loaded.inc(1) + if six.ensure_str(im_id) in self._image_ids_to_keep: + self._images_kept.inc(1) + new_example_list.append(example) + if new_example_list: + example_list = new_example_list + else: + return [] + + if self._output_type == 'tf_sequence_example': + if self._max_clip_length is not None: + # For now, no overlap + clips = get_sliding_window( + example_list, self._max_clip_length, self._max_clip_length) + else: + clips = [example_list] + + for clip_num, clip_list in enumerate(clips): + # initialize sequence example + seq_example = tf.train.SequenceExample() + video_id = six.ensure_str(key)+'_'+ str(clip_num) + seq_example.context.feature['clip/media_id'].bytes_list.value.append( + video_id.encode('utf8')) + seq_example.context.feature['clip/frames'].int64_list.value.append( + len(clip_list)) + + seq_example.context.feature[ + 'clip/start/timestamp'].int64_list.value.append(0) + seq_example.context.feature[ + 'clip/end/timestamp'].int64_list.value.append(len(clip_list)) + seq_example.context.feature['image/format'].bytes_list.value.append( + six.ensure_binary('JPG')) + seq_example.context.feature['image/channels'].int64_list.value.append(3) + context_example = clip_list[0] + seq_example.context.feature['image/height'].int64_list.value.append( + context_example.features.feature[ + 'image/height'].int64_list.value[0]) + seq_example.context.feature['image/width'].int64_list.value.append( + context_example.features.feature['image/width'].int64_list.value[0]) + + seq_example.context.feature[ + 'image/context_feature_length'].int64_list.value.append( + feature_length) + seq_example.context.feature[ + 'image/context_features'].float_list.value.extend( + context_features) + if self._keep_context_features_image_id_list: + seq_example.context.feature[ + 'image/context_features_image_id_list'].bytes_list.value.extend( + context_features_image_id_list) + + encoded_image_list = seq_example.feature_lists.feature_list[ + 'image/encoded'] + timestamps_list = seq_example.feature_lists.feature_list[ + 'image/timestamp'] + context_features_idx_list = seq_example.feature_lists.feature_list[ + 'image/context_features_idx'] + date_captured_list = seq_example.feature_lists.feature_list[ + 'image/date_captured'] + unix_time_list = seq_example.feature_lists.feature_list[ + 'image/unix_time'] + location_list = seq_example.feature_lists.feature_list['image/location'] + image_ids_list = seq_example.feature_lists.feature_list[ + 'image/source_id'] + gt_xmin_list = seq_example.feature_lists.feature_list[ + 'region/bbox/xmin'] + gt_xmax_list = seq_example.feature_lists.feature_list[ + 'region/bbox/xmax'] + gt_ymin_list = seq_example.feature_lists.feature_list[ + 'region/bbox/ymin'] + gt_ymax_list = seq_example.feature_lists.feature_list[ + 'region/bbox/ymax'] + gt_type_list = seq_example.feature_lists.feature_list[ + 'region/label/index'] + gt_type_string_list = seq_example.feature_lists.feature_list[ + 'region/label/string'] + gt_is_annotated_list = seq_example.feature_lists.feature_list[ + 'region/is_annotated'] + + for idx, example in enumerate(clip_list): + + encoded_image = encoded_image_list.feature.add() + encoded_image.bytes_list.value.extend( + example.features.feature['image/encoded'].bytes_list.value) + + image_id = image_ids_list.feature.add() + image_id.bytes_list.value.append( + example.features.feature['image/source_id'].bytes_list.value[0]) + + timestamp = timestamps_list.feature.add() + # Timestamp is currently order in the list. + timestamp.int64_list.value.extend([idx]) + + context_features_idx = context_features_idx_list.feature.add() + context_features_idx.int64_list.value.extend( + example.features.feature['context_features_idx'].int64_list.value) + + date_captured = date_captured_list.feature.add() + date_captured.bytes_list.value.extend( + example.features.feature['image/date_captured'].bytes_list.value) + unix_time = unix_time_list.feature.add() + unix_time.float_list.value.extend( + example.features.feature['image/unix_time'].float_list.value) + location = location_list.feature.add() + location.bytes_list.value.extend( + example.features.feature['image/location'].bytes_list.value) + + gt_xmin = gt_xmin_list.feature.add() + gt_xmax = gt_xmax_list.feature.add() + gt_ymin = gt_ymin_list.feature.add() + gt_ymax = gt_ymax_list.feature.add() + gt_type = gt_type_list.feature.add() + gt_type_str = gt_type_string_list.feature.add() + + gt_is_annotated = gt_is_annotated_list.feature.add() + gt_is_annotated.int64_list.value.append(1) + + gt_xmin.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/xmin'].float_list.value) + gt_xmax.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/xmax'].float_list.value) + gt_ymin.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/ymin'].float_list.value) + gt_ymax.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/ymax'].float_list.value) + + gt_type.int64_list.value.extend( + example.features.feature[ + 'image/object/class/label'].int64_list.value) + gt_type_str.bytes_list.value.extend( + example.features.feature[ + 'image/object/class/text'].bytes_list.value) + + self._num_examples_processed.inc(1) + list_of_examples.append(seq_example) + + elif self._output_type == 'tf_example': + + for example in example_list: + im_id = example.features.feature['image/source_id'].bytes_list.value[0] + + if self._add_context_features: + example.features.feature[ + 'image/context_features'].float_list.value.extend( + context_features) + example.features.feature[ + 'image/context_feature_length'].int64_list.value.append( + feature_length) + + if self._keep_context_features_image_id_list: + example.features.feature[ + 'image/context_features_image_id_list'].bytes_list.value.extend( + context_features_image_id_list) + + self._num_examples_processed.inc(1) + list_of_examples.append(example) + + return list_of_examples + + +def construct_pipeline(input_tfrecord, + output_tfrecord, + sequence_key, + time_horizon=None, + subsample_context_features_rate=0, + reduce_image_size=True, + max_image_dimension=1024, + add_context_features=True, + sorted_image_ids=True, + image_ids_to_keep='All', + keep_context_features_image_id_list=False, + keep_only_positives=False, + context_features_score_threshold=0.7, + keep_only_positives_gt=False, + max_num_elements_in_context_features=5000, + num_shards=0, + output_type='tf_example', + max_clip_length=None): + """Returns a beam pipeline to run object detection inference. + + Args: + input_tfrecord: An TFRecord of tf.train.Example protos containing images. + output_tfrecord: An TFRecord of tf.train.Example protos that contain images + in the input TFRecord and the detections from the model. + sequence_key: A feature name to use as a key for grouping sequences. + time_horizon: What length of time to use to partition the data when building + the memory banks. Options: `year`, `month`, `week`, `day `, `hour`, + `minute`, None. + subsample_context_features_rate: What rate to subsample images for the + contextual memory bank. + reduce_image_size: Whether to reduce the size of the stored images. + max_image_dimension: The maximum image dimension to use for resizing. + add_context_features: Whether to keep and store the contextual memory bank. + sorted_image_ids: Whether the image ids are sortable, and can be used as + datetime tie-breakers when building memory banks. + image_ids_to_keep: A list of image ids to save, to use to build data subsets + for evaluation. + keep_context_features_image_id_list: Whether to save an ordered list of the + ids of the images in the contextual memory bank. + keep_only_positives: Whether to only keep high scoring + (>context_features_score_threshold) features in the contextual memory + bank. + context_features_score_threshold: What threshold to use for keeping + features. + keep_only_positives_gt: Whether to only keep features from images that + contain objects based on the ground truth (for training). + max_num_elements_in_context_features: the maximum number of elements in the + memory bank + num_shards: The number of output shards. + output_type: What type of output, tf_example of tf_sequence_example + max_clip_length: The maximum length of a sequence example, before + splitting into multiple + """ + def pipeline(root): + if output_type == 'tf_example': + coder = beam.coders.ProtoCoder(tf.train.Example) + elif output_type == 'tf_sequence_example': + coder = beam.coders.ProtoCoder(tf.train.SequenceExample) + else: + raise ValueError('Unsupported output type.') + input_collection = ( + root | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( + input_tfrecord, + coder=beam.coders.BytesCoder())) + rekey_collection = input_collection | 'RekeyExamples' >> beam.ParDo( + ReKeyDataFn(sequence_key, time_horizon, + reduce_image_size, max_image_dimension)) + grouped_collection = ( + rekey_collection | 'GroupBySequenceKey' >> beam.GroupByKey()) + grouped_collection = ( + grouped_collection | 'ReshuffleGroups' >> beam.Reshuffle()) + ordered_collection = ( + grouped_collection | 'OrderByFrameNumber' >> beam.ParDo( + SortGroupedDataFn(sequence_key, sorted_image_ids, + max_num_elements_in_context_features))) + ordered_collection = ( + ordered_collection | 'ReshuffleSortedGroups' >> beam.Reshuffle()) + output_collection = ( + ordered_collection | 'AddContextToExamples' >> beam.ParDo( + GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep, + keep_context_features_image_id_list=( + keep_context_features_image_id_list), + subsample_context_features_rate=subsample_context_features_rate, + keep_only_positives=keep_only_positives, + keep_only_positives_gt=keep_only_positives_gt, + context_features_score_threshold=( + context_features_score_threshold), + max_num_elements_in_context_features=( + max_num_elements_in_context_features), + output_type=output_type, + max_clip_length=max_clip_length))) + + output_collection = ( + output_collection | 'ReshuffleExamples' >> beam.Reshuffle()) + _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=coder) + return pipeline + + +def main(_): + """Runs the Beam pipeline that builds context features. + + Args: + _: unused + """ + # must create before flags are used + runner = runners.DirectRunner() + + dirname = os.path.dirname(FLAGS.output_tfrecord) + tf.io.gfile.makedirs(dirname) + runner.run( + construct_pipeline(FLAGS.input_tfrecord, + FLAGS.output_tfrecord, + FLAGS.sequence_key, + FLAGS.time_horizon, + FLAGS.subsample_context_features_rate, + FLAGS.reduce_image_size, + FLAGS.max_image_dimension, + FLAGS.add_context_features, + FLAGS.sorted_image_ids, + FLAGS.image_ids_to_keep, + FLAGS.keep_context_features_image_id_list, + FLAGS.keep_only_positives, + FLAGS.context_features_score_threshold, + FLAGS.keep_only_positives_gt, + FLAGS.max_num_elements_in_context_features, + FLAGS.num_shards, + FLAGS.output_type, + FLAGS.max_clip_length)) + + +if __name__ == '__main__': + flags.mark_flags_as_required([ + 'input_tfrecord', + 'output_tfrecord' + ]) + app.run(main) diff --git a/models/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf1_test.py b/models/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..99bb479794b7d899e5275862dc7a00e7945c2731 --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf1_test.py @@ -0,0 +1,384 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for add_context_to_examples.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import contextlib +import datetime +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools.context_rcnn import add_context_to_examples +from object_detection.utils import tf_version +from apache_beam import runners + + +@contextlib.contextmanager +def InMemoryTFRecord(entries): + temp = tempfile.NamedTemporaryFile(delete=False) + filename = temp.name + try: + with tf.python_io.TFRecordWriter(filename) as writer: + for value in entries: + writer.write(value) + yield filename + finally: + os.unlink(temp.name) + + +def BytesFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def BytesListFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) + + +def Int64Feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + +def Int64ListFeature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def FloatListFeature(value): + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class GenerateContextDataTest(tf.test.TestCase): + + def _create_first_tf_example(self): + with self.test_session(): + encoded_image = tf.image.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval() + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(six.ensure_binary('image_id_1')), + 'image/height': Int64Feature(4), + 'image/width': Int64Feature(4), + 'image/object/class/label': Int64ListFeature([5, 5]), + 'image/object/class/text': BytesListFeature([six.ensure_binary('hyena'), + six.ensure_binary('hyena') + ]), + 'image/object/bbox/xmin': FloatListFeature([0.0, 0.1]), + 'image/object/bbox/xmax': FloatListFeature([0.2, 0.3]), + 'image/object/bbox/ymin': FloatListFeature([0.4, 0.5]), + 'image/object/bbox/ymax': FloatListFeature([0.6, 0.7]), + 'image/seq_id': BytesFeature(six.ensure_binary('01')), + 'image/seq_num_frames': Int64Feature(2), + 'image/seq_frame_num': Int64Feature(0), + 'image/date_captured': BytesFeature( + six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 0, 0)))), + 'image/embedding': FloatListFeature([0.1, 0.2, 0.3]), + 'image/embedding_score': FloatListFeature([0.9]), + 'image/embedding_length': Int64Feature(3) + + })) + + return example.SerializeToString() + + def _create_second_tf_example(self): + with self.test_session(): + encoded_image = tf.image.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval() + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(six.ensure_binary('image_id_2')), + 'image/height': Int64Feature(4), + 'image/width': Int64Feature(4), + 'image/object/class/label': Int64ListFeature([5]), + 'image/object/class/text': BytesListFeature([six.ensure_binary('hyena') + ]), + 'image/object/bbox/xmin': FloatListFeature([0.0]), + 'image/object/bbox/xmax': FloatListFeature([0.1]), + 'image/object/bbox/ymin': FloatListFeature([0.2]), + 'image/object/bbox/ymax': FloatListFeature([0.3]), + 'image/seq_id': BytesFeature(six.ensure_binary('01')), + 'image/seq_num_frames': Int64Feature(2), + 'image/seq_frame_num': Int64Feature(1), + 'image/date_captured': BytesFeature( + six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 1, 0)))), + 'image/embedding': FloatListFeature([0.4, 0.5, 0.6]), + 'image/embedding_score': FloatListFeature([0.9]), + 'image/embedding_length': Int64Feature(3) + })) + + return example.SerializeToString() + + def assert_expected_examples(self, tf_example_list): + self.assertAllEqual( + {tf_example.features.feature['image/source_id'].bytes_list.value[0] + for tf_example in tf_example_list}, + {six.ensure_binary('image_id_1'), six.ensure_binary('image_id_2')}) + self.assertAllClose( + tf_example_list[0].features.feature[ + 'image/context_features'].float_list.value, + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) + self.assertAllClose( + tf_example_list[1].features.feature[ + 'image/context_features'].float_list.value, + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) + + def assert_expected_sequence_example(self, tf_sequence_example_list): + tf_sequence_example = tf_sequence_example_list[0] + num_frames = 2 + + self.assertAllEqual( + tf_sequence_example.context.feature[ + 'clip/media_id'].bytes_list.value[0], six.ensure_binary( + '01_0')) + self.assertAllClose( + tf_sequence_example.context.feature[ + 'image/context_features'].float_list.value, + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) + + seq_feature_dict = tf_sequence_example.feature_lists.feature_list + + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + actual_timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + timestamps = [0, 1] + self.assertAllEqual(timestamps, actual_timestamps) + + # First image. + self.assertAllClose( + [0.4, 0.5], + seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.0, 0.1], + seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.6, 0.7], + seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.2, 0.3], + seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + [six.ensure_binary('hyena'), six.ensure_binary('hyena')], + seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:]) + + # Second example. + self.assertAllClose( + [0.2], + seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.0], + seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.3], + seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.1], + seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [six.ensure_binary('hyena')], + seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:]) + + def assert_expected_key(self, key): + self.assertAllEqual(key, '01') + + def assert_sorted(self, example_collection): + example_list = list(example_collection) + counter = 0 + for example in example_list: + frame_num = example.features.feature[ + 'image/seq_frame_num'].int64_list.value[0] + self.assertGreaterEqual(frame_num, counter) + counter = frame_num + + def assert_context(self, example_collection): + example_list = list(example_collection) + for example in example_list: + context = example.features.feature[ + 'image/context_features'].float_list.value + self.assertAllClose([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], context) + + def assert_resized(self, example): + width = example.features.feature['image/width'].int64_list.value[0] + self.assertAllEqual(width, 2) + height = example.features.feature['image/height'].int64_list.value[0] + self.assertAllEqual(height, 2) + + def assert_size(self, example): + width = example.features.feature['image/width'].int64_list.value[0] + self.assertAllEqual(width, 4) + height = example.features.feature['image/height'].int64_list.value[0] + self.assertAllEqual(height, 4) + + def test_sliding_window(self): + example_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + max_clip_length = 3 + stride_length = 3 + out_list = [list(i) for i in add_context_to_examples.get_sliding_window( + example_list, max_clip_length, stride_length)] + self.assertAllEqual(out_list, [['a', 'b', 'c'], + ['d', 'e', 'f'], + ['g']]) + + def test_rekey_data_fn(self): + sequence_key = 'image/seq_id' + time_horizon = None + reduce_image_size = False + max_dim = None + + rekey_fn = add_context_to_examples.ReKeyDataFn( + sequence_key, time_horizon, + reduce_image_size, max_dim) + output = rekey_fn.process(self._create_first_tf_example()) + + self.assert_expected_key(output[0][0]) + self.assert_size(output[0][1]) + + def test_rekey_data_fn_w_resize(self): + sequence_key = 'image/seq_id' + time_horizon = None + reduce_image_size = True + max_dim = 2 + + rekey_fn = add_context_to_examples.ReKeyDataFn( + sequence_key, time_horizon, + reduce_image_size, max_dim) + output = rekey_fn.process(self._create_first_tf_example()) + + self.assert_expected_key(output[0][0]) + self.assert_resized(output[0][1]) + + def test_sort_fn(self): + sequence_key = 'image/seq_id' + sorted_image_ids = False + max_num_elements_in_context_features = 10 + sort_fn = add_context_to_examples.SortGroupedDataFn( + sequence_key, sorted_image_ids, max_num_elements_in_context_features) + output = sort_fn.process( + ('dummy_key', [tf.train.Example.FromString( + self._create_second_tf_example()), + tf.train.Example.FromString( + self._create_first_tf_example())])) + + self.assert_sorted(output[0][1]) + + def test_add_context_fn(self): + sequence_key = 'image/seq_id' + add_context_features = True + image_ids_to_keep = 'All' + context_fn = add_context_to_examples.GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep) + output = context_fn.process( + ('dummy_key', [tf.train.Example.FromString( + self._create_first_tf_example()), + tf.train.Example.FromString( + self._create_second_tf_example())])) + + self.assertEqual(len(output), 2) + self.assert_context(output) + + def test_add_context_fn_output_sequence_example(self): + sequence_key = 'image/seq_id' + add_context_features = True + image_ids_to_keep = 'All' + context_fn = add_context_to_examples.GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep, + output_type='tf_sequence_example') + output = context_fn.process( + ('01', + [tf.train.Example.FromString(self._create_first_tf_example()), + tf.train.Example.FromString(self._create_second_tf_example())])) + + self.assertEqual(len(output), 1) + self.assert_expected_sequence_example(output) + + def test_add_context_fn_output_sequence_example_cliplen(self): + sequence_key = 'image/seq_id' + add_context_features = True + image_ids_to_keep = 'All' + context_fn = add_context_to_examples.GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep, + output_type='tf_sequence_example', max_clip_length=1) + output = context_fn.process( + ('01', + [tf.train.Example.FromString(self._create_first_tf_example()), + tf.train.Example.FromString(self._create_second_tf_example())])) + self.assertEqual(len(output), 2) + + def test_beam_pipeline(self): + with InMemoryTFRecord( + [self._create_first_tf_example(), + self._create_second_tf_example()]) as input_tfrecord: + runner = runners.DirectRunner() + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + sequence_key = six.ensure_binary('image/seq_id') + max_num_elements = 10 + num_shards = 1 + pipeline = add_context_to_examples.construct_pipeline( + input_tfrecord, + output_tfrecord, + sequence_key, + max_num_elements_in_context_features=max_num_elements, + num_shards=num_shards) + runner.run(pipeline) + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 2) + self.assert_expected_examples([tf.train.Example.FromString( + tf_example) for tf_example in actual_output]) + + def test_beam_pipeline_sequence_example(self): + with InMemoryTFRecord( + [self._create_first_tf_example(), + self._create_second_tf_example()]) as input_tfrecord: + runner = runners.DirectRunner() + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + sequence_key = six.ensure_binary('image/seq_id') + max_num_elements = 10 + num_shards = 1 + pipeline = add_context_to_examples.construct_pipeline( + input_tfrecord, + output_tfrecord, + sequence_key, + max_num_elements_in_context_features=max_num_elements, + num_shards=num_shards, + output_type='tf_sequence_example') + runner.run(pipeline) + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.python_io.tf_record_iterator( + path=filenames[0]) + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 1) + self.assert_expected_sequence_example( + [tf.train.SequenceExample.FromString( + tf_example) for tf_example in actual_output]) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py b/models/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py new file mode 100644 index 0000000000000000000000000000000000000000..106cf5adb94d8d1017a1834de42ab2096d85c67c --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py @@ -0,0 +1,324 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Beam pipeline to create COCO Camera Traps Object Detection TFRecords. + +Please note that this tool creates sharded output files. + +This tool assumes the input annotations are in the COCO Camera Traps json +format, specified here: +https://github.com/Microsoft/CameraTraps/blob/master/data_management/README.md + +Example usage: + + python create_cococameratraps_tfexample_main.py \ + --alsologtostderr \ + --output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \ + --image_directory="/path/to/image/folder/" \ + --input_annotations_file="path/to/annotations.json" + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib +import io +import json +import logging +import os +from absl import app +from absl import flags +import apache_beam as beam +import numpy as np +import PIL.Image +import tensorflow.compat.v1 as tf +from apache_beam import runners +from object_detection.utils import dataset_util + +flags.DEFINE_string('image_directory', None, 'Directory where images are ' + 'stored') +flags.DEFINE_string('output_tfrecord_prefix', None, + 'TFRecord containing images in tf.Example format.') +flags.DEFINE_string('input_annotations_file', None, 'Path to Coco-CameraTraps' + 'style annotations file') +flags.DEFINE_integer('num_images_per_shard', + 200, + 'The number of images to be stored in each shard.') + +FLAGS = flags.FLAGS + + +class ParseImage(beam.DoFn): + """A DoFn that parses a COCO-CameraTraps json and emits TFRecords.""" + + def __init__(self, image_directory, images, annotations, categories, + keep_bboxes): + """Initialization function. + + Args: + image_directory: Path to image directory + images: list of COCO Camera Traps style image dictionaries + annotations: list of COCO Camera Traps style annotation dictionaries + categories: list of COCO Camera Traps style category dictionaries + keep_bboxes: Whether to keep any bounding boxes that exist in the + annotations + """ + + self._image_directory = image_directory + self._image_dict = {im['id']: im for im in images} + self._annotation_dict = {im['id']: [] for im in images} + self._category_dict = {int(cat['id']): cat for cat in categories} + for ann in annotations: + self._annotation_dict[ann['image_id']].append(ann) + self._images = images + self._keep_bboxes = keep_bboxes + + self._num_examples_processed = beam.metrics.Metrics.counter( + 'cococameratraps_data_generation', 'num_tf_examples_processed') + + def process(self, image_id): + """Builds a tf.Example given an image id. + + Args: + image_id: the image id of the associated image + + Returns: + List of tf.Examples. + """ + + image = self._image_dict[image_id] + annotations = self._annotation_dict[image_id] + image_height = image['height'] + image_width = image['width'] + filename = image['file_name'] + image_id = image['id'] + image_location_id = image['location'] + + image_datetime = str(image['date_captured']) + + image_sequence_id = str(image['seq_id']) + image_sequence_num_frames = int(image['seq_num_frames']) + image_sequence_frame_num = int(image['frame_num']) + + full_path = os.path.join(self._image_directory, filename) + + try: + # Ensure the image exists and is not corrupted + with tf.io.gfile.GFile(full_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + # Ensure the image can be read by tf + with tf.Graph().as_default(): + image = tf.image.decode_jpeg(encoded_jpg, channels=3) + init_op = tf.initialize_all_tables() + with tf.Session() as sess: + sess.run(init_op) + sess.run(image) + except Exception as e: # pylint: disable=broad-except + # The image file is missing or corrupt + tf.logging.error(str(e)) + return [] + + key = hashlib.sha256(encoded_jpg).hexdigest() + feature_dict = { + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/filename': + dataset_util.bytes_feature(filename.encode('utf8')), + 'image/source_id': + dataset_util.bytes_feature(str(image_id).encode('utf8')), + 'image/key/sha256': + dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': + dataset_util.bytes_feature(encoded_jpg), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/location': + dataset_util.bytes_feature(str(image_location_id).encode('utf8')), + 'image/seq_num_frames': + dataset_util.int64_feature(image_sequence_num_frames), + 'image/seq_frame_num': + dataset_util.int64_feature(image_sequence_frame_num), + 'image/seq_id': + dataset_util.bytes_feature(image_sequence_id.encode('utf8')), + 'image/date_captured': + dataset_util.bytes_feature(image_datetime.encode('utf8')) + } + + num_annotations_skipped = 0 + if annotations: + xmin = [] + xmax = [] + ymin = [] + ymax = [] + category_names = [] + category_ids = [] + area = [] + + for object_annotations in annotations: + if 'bbox' in object_annotations and self._keep_bboxes: + (x, y, width, height) = tuple(object_annotations['bbox']) + if width <= 0 or height <= 0: + num_annotations_skipped += 1 + continue + if x + width > image_width or y + height > image_height: + num_annotations_skipped += 1 + continue + xmin.append(float(x) / image_width) + xmax.append(float(x + width) / image_width) + ymin.append(float(y) / image_height) + ymax.append(float(y + height) / image_height) + if 'area' in object_annotations: + area.append(object_annotations['area']) + else: + # approximate area using l*w/2 + area.append(width*height/2.0) + + category_id = int(object_annotations['category_id']) + category_ids.append(category_id) + category_names.append( + self._category_dict[category_id]['name'].encode('utf8')) + + feature_dict.update({ + 'image/object/bbox/xmin': + dataset_util.float_list_feature(xmin), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(xmax), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(ymin), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(ymax), + 'image/object/class/text': + dataset_util.bytes_list_feature(category_names), + 'image/object/class/label': + dataset_util.int64_list_feature(category_ids), + 'image/object/area': + dataset_util.float_list_feature(area), + }) + + # For classification, add the first category to image/class/label and + # image/class/text + if not category_ids: + feature_dict.update({ + 'image/class/label': + dataset_util.int64_list_feature([0]), + 'image/class/text': + dataset_util.bytes_list_feature(['empty'.encode('utf8')]), + }) + else: + feature_dict.update({ + 'image/class/label': + dataset_util.int64_list_feature([category_ids[0]]), + 'image/class/text': + dataset_util.bytes_list_feature([category_names[0]]), + }) + + else: + # Add empty class if there are no annotations + feature_dict.update({ + 'image/class/label': + dataset_util.int64_list_feature([0]), + 'image/class/text': + dataset_util.bytes_list_feature(['empty'.encode('utf8')]), + }) + + example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) + self._num_examples_processed.inc(1) + + return [(example)] + + +def _load_json_data(data_file): + with tf.io.gfile.GFile(data_file, 'r') as fid: + data_dict = json.load(fid) + return data_dict + + +def create_pipeline(image_directory, + input_annotations_file, + output_tfrecord_prefix=None, + num_images_per_shard=200, + keep_bboxes=True): + """Creates a beam pipeline for producing a COCO-CameraTraps Image dataset. + + Args: + image_directory: Path to image directory + input_annotations_file: Path to a coco-cameratraps annotation file + output_tfrecord_prefix: Absolute path for tfrecord outputs. Final files will + be named {output_tfrecord_prefix}@N. + num_images_per_shard: The number of images to store in each shard + keep_bboxes: Whether to keep any bounding boxes that exist in the json file + + Returns: + A Beam pipeline. + """ + + logging.info('Reading data from COCO-CameraTraps Dataset.') + + data = _load_json_data(input_annotations_file) + + num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard)) + + def pipeline(root): + """Builds beam pipeline.""" + + image_examples = ( + root + | ('CreateCollections') >> beam.Create( + [im['id'] for im in data['images']]) + | ('ParseImage') >> beam.ParDo(ParseImage( + image_directory, data['images'], data['annotations'], + data['categories'], keep_bboxes=keep_bboxes))) + _ = (image_examples + | ('Reshuffle') >> beam.Reshuffle() + | ('WriteTfImageExample') >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord_prefix, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example))) + + return pipeline + + +def main(_): + """Runs the Beam pipeline that performs inference. + + Args: + _: unused + """ + + # must create before flags are used + runner = runners.DirectRunner() + + dirname = os.path.dirname(FLAGS.output_tfrecord_prefix) + tf.io.gfile.makedirs(dirname) + + runner.run( + create_pipeline( + image_directory=FLAGS.image_directory, + input_annotations_file=FLAGS.input_annotations_file, + output_tfrecord_prefix=FLAGS.output_tfrecord_prefix, + num_images_per_shard=FLAGS.num_images_per_shard)) + + +if __name__ == '__main__': + flags.mark_flags_as_required([ + 'image_directory', + 'input_annotations_file', + 'output_tfrecord_prefix' + ]) + app.run(main) diff --git a/models/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf1_test.py b/models/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..be6dc0dc4dd765c4e6e0bab39f4228996a3c95b0 --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf1_test.py @@ -0,0 +1,201 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for create_cococameratraps_tfexample_main.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import datetime +import json +import os +import tempfile +import unittest +import numpy as np + +from PIL import Image +import tensorflow.compat.v1 as tf +from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main +from object_detection.utils import tf_version +from apache_beam import runners + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase): + + IMAGE_HEIGHT = 360 + IMAGE_WIDTH = 480 + + def _write_random_images_to_directory(self, directory, num_frames): + for frame_num in range(num_frames): + img = np.random.randint(0, high=256, + size=(self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3), + dtype=np.uint8) + pil_image = Image.fromarray(img) + fname = 'im_' + str(frame_num) + '.jpg' + pil_image.save(os.path.join(directory, fname), 'JPEG') + + def _create_json_file(self, directory, num_frames, keep_bboxes=False): + json_dict = {'images': [], 'annotations': []} + json_dict['categories'] = [{'id': 0, 'name': 'empty'}, + {'id': 1, 'name': 'animal'}] + for idx in range(num_frames): + im = {'id': 'im_' + str(idx), + 'file_name': 'im_' + str(idx) + '.jpg', + 'height': self.IMAGE_HEIGHT, + 'width': self.IMAGE_WIDTH, + 'seq_id': 'seq_1', + 'seq_num_frames': num_frames, + 'frame_num': idx, + 'location': 'loc_' + str(idx), + 'date_captured': str(datetime.datetime.now()) + } + json_dict['images'].append(im) + ann = {'id': 'ann' + str(idx), + 'image_id': 'im_' + str(idx), + 'category_id': 1, + } + if keep_bboxes: + ann['bbox'] = [0.0 * self.IMAGE_WIDTH, + 0.1 * self.IMAGE_HEIGHT, + 0.5 * self.IMAGE_WIDTH, + 0.5 * self.IMAGE_HEIGHT] + json_dict['annotations'].append(ann) + + json_path = os.path.join(directory, 'test_file.json') + with tf.io.gfile.GFile(json_path, 'w') as f: + json.dump(json_dict, f) + return json_path + + def assert_expected_example_bbox(self, example): + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.1]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.0]) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.6]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.5]) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, ['animal']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, ['animal']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, + [self.IMAGE_HEIGHT]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, + [self.IMAGE_WIDTH]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + ['im_0']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def assert_expected_example(self, example): + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, ['animal']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, ['animal']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, + [self.IMAGE_HEIGHT]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, + [self.IMAGE_WIDTH]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + ['im_0']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def test_beam_pipeline(self): + runner = runners.DirectRunner() + num_frames = 1 + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + json_path = self._create_json_file(temp_dir, num_frames) + output_tfrecord = temp_dir+'/output' + self._write_random_images_to_directory(temp_dir, num_frames) + pipeline = create_cococameratraps_tfexample_main.create_pipeline( + temp_dir, json_path, + output_tfrecord_prefix=output_tfrecord) + runner.run(pipeline) + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), num_frames) + self.assert_expected_example(tf.train.Example.FromString( + actual_output[0])) + + def test_beam_pipeline_bbox(self): + runner = runners.DirectRunner() + num_frames = 1 + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + json_path = self._create_json_file(temp_dir, num_frames, keep_bboxes=True) + output_tfrecord = temp_dir+'/output' + self._write_random_images_to_directory(temp_dir, num_frames) + pipeline = create_cococameratraps_tfexample_main.create_pipeline( + temp_dir, json_path, + output_tfrecord_prefix=output_tfrecord, + keep_bboxes=True) + runner.run(pipeline) + filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????') + actual_output = [] + record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), num_frames) + self.assert_expected_example_bbox(tf.train.Example.FromString( + actual_output[0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py b/models/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py new file mode 100644 index 0000000000000000000000000000000000000000..95c16c1358a15ece03aaa9e80353e1ebf2c17166 --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py @@ -0,0 +1,262 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""A Beam job to generate detection data for camera trap images. + +This tools allows to run inference with an exported Object Detection model in +`saved_model` format and produce raw detection boxes on images in tf.Examples, +with the assumption that the bounding box class label will match the image-level +class label in the tf.Example. + +Steps to generate a detection dataset: +1. Use object_detection/export_inference_graph.py to get a `saved_model` for + inference. The input node must accept a tf.Example proto. +2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example + protos containing images for inference. + +Example Usage: +-------------- +python tensorflow_models/object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/detection_model.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +python generate_detection_data.py \ + --alsologtostderr \ + --input_tfrecord path/to/input_tfrecord@X \ + --output_tfrecord path/to/output_tfrecord@X \ + --model_dir path/to/exported_model_directory/saved_model +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import threading +from absl import app +from absl import flags +import apache_beam as beam +import tensorflow.compat.v1 as tf +from apache_beam import runners + + +flags.DEFINE_string('detection_input_tfrecord', None, 'TFRecord containing ' + 'images in tf.Example format for object detection.') +flags.DEFINE_string('detection_output_tfrecord', None, + 'TFRecord containing detections in tf.Example format.') +flags.DEFINE_string('detection_model_dir', None, 'Path to directory containing' + 'an object detection SavedModel.') +flags.DEFINE_float('confidence_threshold', 0.9, + 'Min confidence to keep bounding boxes') +flags.DEFINE_integer('num_shards', 0, 'Number of output shards.') + +FLAGS = flags.FLAGS + + +class GenerateDetectionDataFn(beam.DoFn): + """Generates detection data for camera trap images. + + This Beam DoFn performs inference with an object detection `saved_model` and + produces detection boxes for camera trap data, matched to the + object class. + """ + session_lock = threading.Lock() + + def __init__(self, model_dir, confidence_threshold): + """Initialization function. + + Args: + model_dir: A directory containing saved model. + confidence_threshold: the confidence threshold for boxes to keep + """ + self._model_dir = model_dir + self._confidence_threshold = confidence_threshold + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'detection_data_generation', 'num_tf_examples_processed') + + def start_bundle(self): + self._load_inference_model() + + def _load_inference_model(self): + # Because initialization of the tf.Session is expensive we share + # one instance across all threads in the worker. This is possible since + # tf.Session.run() is thread safe. + with self.session_lock: + if self._session is None: + graph = tf.Graph() + self._session = tf.Session(graph=graph) + with graph.as_default(): + meta_graph = tf.saved_model.loader.load( + self._session, [tf.saved_model.tag_constants.SERVING], + self._model_dir) + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + self._input = graph.get_tensor_by_name(input_tensor_name) + self._boxes_node = graph.get_tensor_by_name( + signature.outputs['detection_boxes'].name) + self._scores_node = graph.get_tensor_by_name( + signature.outputs['detection_scores'].name) + self._num_detections_node = graph.get_tensor_by_name( + signature.outputs['num_detections'].name) + + def process(self, tfrecord_entry): + return self._run_inference_and_generate_detections(tfrecord_entry) + + def _run_inference_and_generate_detections(self, tfrecord_entry): + input_example = tf.train.Example.FromString(tfrecord_entry) + if input_example.features.feature[ + 'image/object/bbox/ymin'].float_list.value: + # There are already ground truth boxes for this image, just keep them. + return [input_example] + + detection_boxes, detection_scores, num_detections = self._session.run( + [self._boxes_node, self._scores_node, self._num_detections_node], + feed_dict={self._input: [tfrecord_entry]}) + + example = tf.train.Example() + + num_detections = int(num_detections[0]) + + image_class_labels = input_example.features.feature[ + 'image/object/class/label'].int64_list.value + + image_class_texts = input_example.features.feature[ + 'image/object/class/text'].bytes_list.value + + # Ignore any images with multiple classes, + # we can't match the class to the box. + if len(image_class_labels) > 1: + return [] + + # Don't add boxes for images already labeled empty (for now) + if len(image_class_labels) == 1: + # Add boxes over confidence threshold. + for idx, score in enumerate(detection_scores[0]): + if score >= self._confidence_threshold and idx < num_detections: + example.features.feature[ + 'image/object/bbox/ymin'].float_list.value.extend([ + detection_boxes[0, idx, 0]]) + example.features.feature[ + 'image/object/bbox/xmin'].float_list.value.extend([ + detection_boxes[0, idx, 1]]) + example.features.feature[ + 'image/object/bbox/ymax'].float_list.value.extend([ + detection_boxes[0, idx, 2]]) + example.features.feature[ + 'image/object/bbox/xmax'].float_list.value.extend([ + detection_boxes[0, idx, 3]]) + + # Add box scores and class texts and labels. + example.features.feature[ + 'image/object/class/score'].float_list.value.extend( + [score]) + + example.features.feature[ + 'image/object/class/label'].int64_list.value.extend( + [image_class_labels[0]]) + + example.features.feature[ + 'image/object/class/text'].bytes_list.value.extend( + [image_class_texts[0]]) + + # Add other essential example attributes + example.features.feature['image/encoded'].bytes_list.value.extend( + input_example.features.feature['image/encoded'].bytes_list.value) + example.features.feature['image/height'].int64_list.value.extend( + input_example.features.feature['image/height'].int64_list.value) + example.features.feature['image/width'].int64_list.value.extend( + input_example.features.feature['image/width'].int64_list.value) + example.features.feature['image/source_id'].bytes_list.value.extend( + input_example.features.feature['image/source_id'].bytes_list.value) + example.features.feature['image/location'].bytes_list.value.extend( + input_example.features.feature['image/location'].bytes_list.value) + + example.features.feature['image/date_captured'].bytes_list.value.extend( + input_example.features.feature['image/date_captured'].bytes_list.value) + + example.features.feature['image/class/text'].bytes_list.value.extend( + input_example.features.feature['image/class/text'].bytes_list.value) + example.features.feature['image/class/label'].int64_list.value.extend( + input_example.features.feature['image/class/label'].int64_list.value) + + example.features.feature['image/seq_id'].bytes_list.value.extend( + input_example.features.feature['image/seq_id'].bytes_list.value) + example.features.feature['image/seq_num_frames'].int64_list.value.extend( + input_example.features.feature['image/seq_num_frames'].int64_list.value) + example.features.feature['image/seq_frame_num'].int64_list.value.extend( + input_example.features.feature['image/seq_frame_num'].int64_list.value) + + self._num_examples_processed.inc(1) + return [example] + + +def construct_pipeline(input_tfrecord, output_tfrecord, model_dir, + confidence_threshold, num_shards): + """Returns a Beam pipeline to run object detection inference. + + Args: + input_tfrecord: A TFRecord of tf.train.Example protos containing images. + output_tfrecord: A TFRecord of tf.train.Example protos that contain images + in the input TFRecord and the detections from the model. + model_dir: Path to `saved_model` to use for inference. + confidence_threshold: Threshold to use when keeping detection results. + num_shards: The number of output shards. + Returns: + pipeline: A Beam pipeline. + """ + def pipeline(root): + input_collection = ( + root | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( + input_tfrecord, + coder=beam.coders.BytesCoder())) + output_collection = input_collection | 'RunInference' >> beam.ParDo( + GenerateDetectionDataFn(model_dir, confidence_threshold)) + output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle() + _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example)) + return pipeline + + +def main(_): + """Runs the Beam pipeline that performs inference. + + Args: + _: unused + """ + # must create before flags are used + runner = runners.DirectRunner() + + dirname = os.path.dirname(FLAGS.detection_output_tfrecord) + tf.io.gfile.makedirs(dirname) + runner.run( + construct_pipeline(FLAGS.detection_input_tfrecord, + FLAGS.detection_output_tfrecord, + FLAGS.detection_model_dir, + FLAGS.confidence_threshold, + FLAGS.num_shards)) + + +if __name__ == '__main__': + flags.mark_flags_as_required([ + 'detection_input_tfrecord', + 'detection_output_tfrecord', + 'detection_model_dir' + ]) + app.run(main) diff --git a/models/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py b/models/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9002e750f7a9fdee49baf7c589dca4095721e1ee --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py @@ -0,0 +1,267 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for generate_detection_data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection import exporter +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.dataset_tools.context_rcnn import generate_detection_data +from object_detection.protos import pipeline_pb2 +from object_detection.utils import tf_version +from apache_beam import runners + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock + + +class FakeModel(model.DetectionModel): + """A Fake Detection model with expected output nodes from post-processing.""" + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} + + def postprocess(self, prediction_dict, true_image_shapes): + with tf.control_dependencies(prediction_dict.values()): + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6], + [0.5, 0.5, 0.8, 0.8]]], tf.float32), + 'detection_scores': tf.constant([[0.95, 0.6]], tf.float32), + 'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2], + [0.3, 0.1, 0.6]]], + tf.float32), + 'detection_classes': tf.constant([[0, 1]], tf.float32), + 'num_detections': tf.constant([2], tf.float32) + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@contextlib.contextmanager +def InMemoryTFRecord(entries): + temp = tempfile.NamedTemporaryFile(delete=False) + filename = temp.name + try: + with tf.python_io.TFRecordWriter(filename) as writer: + for value in entries: + writer.write(value) + yield filename + finally: + os.unlink(filename) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class GenerateDetectionDataTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, checkpoint_path): + """A function to save checkpoint from a fake Detection Model. + + Args: + checkpoint_path: Path to save checkpoint from Fake model. + """ + g = tf.Graph() + with g.as_default(): + mock_model = FakeModel(num_classes=5) + preprocessed_inputs, true_image_shapes = mock_model.preprocess( + tf.placeholder(tf.float32, shape=[None, None, None, 3])) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + tf.train.get_or_create_global_step() + saver = tf.train.Saver() + init = tf.global_variables_initializer() + with self.test_session(graph=g) as sess: + sess.run(init) + saver.save(sess, checkpoint_path) + + def _export_saved_model(self): + tmp_dir = self.get_temp_dir() + checkpoint_path = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(checkpoint_path) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + tf.io.gfile.makedirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel(num_classes=5) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + outputs, placeholder_tensor = exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + output_node_names = ','.join(outputs.keys()) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_path, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph='', + clear_devices=True, + initializer_nodes='') + exporter.write_saved_model( + saved_model_path=saved_model_path, + frozen_graph_def=frozen_graph_def, + inputs=placeholder_tensor, + outputs=outputs) + return saved_model_path + + def _create_tf_example(self): + with self.test_session(): + encoded_image = tf.image.encode_jpeg( + tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).eval() + + def BytesFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def Int64Feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(b'image_id'), + 'image/height': Int64Feature(4), + 'image/width': Int64Feature(6), + 'image/object/class/label': Int64Feature(5), + 'image/object/class/text': BytesFeature(b'hyena'), + 'image/class/label': Int64Feature(5), + 'image/class/text': BytesFeature(b'hyena'), + })) + + return example.SerializeToString() + + def assert_expected_example(self, example): + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.0]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.1]) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.5]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.6]) + self.assertAllClose( + example.features.feature['image/object/class/score'] + .float_list.value, [0.95]) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, [b'hyena']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, [b'hyena']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, [4]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, [6]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + [b'image_id']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def test_generate_detection_data_fn(self): + saved_model_path = self._export_saved_model() + confidence_threshold = 0.8 + inference_fn = generate_detection_data.GenerateDetectionDataFn( + saved_model_path, confidence_threshold) + inference_fn.start_bundle() + generated_example = self._create_tf_example() + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/text'] + .bytes_list.value, [b'hyena']) + output = inference_fn.process(generated_example) + output_example = output[0] + + self.assertAllEqual( + output_example.features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual(output_example.features.feature['image/width'] + .int64_list.value, [6]) + + self.assert_expected_example(output_example) + + def test_beam_pipeline(self): + with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord: + runner = runners.DirectRunner() + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + saved_model_path = self._export_saved_model() + confidence_threshold = 0.8 + num_shards = 1 + pipeline = generate_detection_data.construct_pipeline( + input_tfrecord, output_tfrecord, saved_model_path, + confidence_threshold, num_shards) + runner.run(pipeline) + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 1) + self.assert_expected_example(tf.train.Example.FromString( + actual_output[0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/models/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py new file mode 100644 index 0000000000000000000000000000000000000000..a147c4e88339f44ff417dc38b60cff28ffe010ed --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -0,0 +1,378 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""A Beam job to generate embedding data for camera trap images. + +This tool runs inference with an exported Object Detection model in +`saved_model` format and produce raw embeddings for camera trap data. These +embeddings contain an object-centric feature embedding from Faster R-CNN, the +datetime that the image was taken (normalized in a specific way), and the +position of the object of interest. By default, only the highest-scoring object +embedding is included. + +Steps to generate a embedding dataset: +1. Use object_detection/export_inference_graph.py to get a Faster R-CNN + `saved_model` for inference. The input node must accept a tf.Example proto. +2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example + protos containing images for inference. + +Example Usage: +-------------- +python tensorflow_models/object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/faster_rcnn_model.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +python generate_embedding_data.py \ + --alsologtostderr \ + --embedding_input_tfrecord path/to/input_tfrecords* \ + --embedding_output_tfrecord path/to/output_tfrecords \ + --embedding_model_dir path/to/exported_model_directory/saved_model +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import datetime +import os +import threading +from absl import app +from absl import flags +import apache_beam as beam +import numpy as np +import six +import tensorflow.compat.v1 as tf +from apache_beam import runners + +flags.DEFINE_string('embedding_input_tfrecord', None, 'TFRecord containing' + 'images in tf.Example format for object detection.') +flags.DEFINE_string('embedding_output_tfrecord', None, + 'TFRecord containing embeddings in tf.Example format.') +flags.DEFINE_string('embedding_model_dir', None, 'Path to directory containing' + 'an object detection SavedModel with' + 'detection_box_classifier_features in the output.') +flags.DEFINE_integer('top_k_embedding_count', 1, + 'The number of top k embeddings to add to the memory bank.' + ) +flags.DEFINE_integer('bottom_k_embedding_count', 0, + 'The number of bottom k embeddings to add to the memory ' + 'bank.') +flags.DEFINE_integer('num_shards', 0, 'Number of output shards.') + + +FLAGS = flags.FLAGS + + +class GenerateEmbeddingDataFn(beam.DoFn): + """Generates embedding data for camera trap images. + + This Beam DoFn performs inference with an object detection `saved_model` and + produces contextual embedding vectors. + """ + session_lock = threading.Lock() + + def __init__(self, model_dir, top_k_embedding_count, + bottom_k_embedding_count): + """Initialization function. + + Args: + model_dir: A directory containing saved model. + top_k_embedding_count: the number of high-confidence embeddings to store + bottom_k_embedding_count: the number of low-confidence embeddings to store + """ + self._model_dir = model_dir + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'embedding_data_generation', 'num_tf_examples_processed') + self._top_k_embedding_count = top_k_embedding_count + self._bottom_k_embedding_count = bottom_k_embedding_count + + def start_bundle(self): + self._load_inference_model() + + def _load_inference_model(self): + # Because initialization of the tf.Session is expensive we share + # one instance across all threads in the worker. This is possible since + # tf.Session.run() is thread safe. + with self.session_lock: + if self._session is None: + graph = tf.Graph() + self._session = tf.Session(graph=graph) + with graph.as_default(): + meta_graph = tf.saved_model.loader.load( + self._session, [tf.saved_model.tag_constants.SERVING], + self._model_dir) + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + detection_features_name = signature.outputs['detection_features'].name + detection_boxes_name = signature.outputs['detection_boxes'].name + num_detections_name = signature.outputs['num_detections'].name + self._input = graph.get_tensor_by_name(input_tensor_name) + self._embedding_node = graph.get_tensor_by_name(detection_features_name) + self._box_node = graph.get_tensor_by_name(detection_boxes_name) + self._scores_node = graph.get_tensor_by_name( + signature.outputs['detection_scores'].name) + self._num_detections = graph.get_tensor_by_name(num_detections_name) + tf.logging.info(signature.outputs['detection_features'].name) + tf.logging.info(signature.outputs['detection_boxes'].name) + tf.logging.info(signature.outputs['num_detections'].name) + + def process(self, tfrecord_entry): + return self._run_inference_and_generate_embedding(tfrecord_entry) + + def _run_inference_and_generate_embedding(self, tfrecord_entry): + input_example = tf.train.Example.FromString(tfrecord_entry) + # Convert date_captured datetime string to unix time integer and store + + def get_date_captured(example): + date_captured = datetime.datetime.strptime( + six.ensure_str( + example.features.feature[ + 'image/date_captured'].bytes_list.value[0]), + '%Y-%m-%d %H:%M:%S') + return date_captured + + try: + date_captured = get_date_captured(input_example) + except Exception: # pylint: disable=broad-except + # we require date_captured to be available for all images + return [] + + def embed_date_captured(date_captured): + """Encodes the datetime of the image.""" + embedded_date_captured = [] + month_max = 12.0 + day_max = 31.0 + hour_max = 24.0 + minute_max = 60.0 + min_year = 1990.0 + max_year = 2030.0 + + year = (date_captured.year-min_year)/float(max_year-min_year) + embedded_date_captured.append(year) + + month = (date_captured.month-1)/month_max + embedded_date_captured.append(month) + + day = (date_captured.day-1)/day_max + embedded_date_captured.append(day) + + hour = date_captured.hour/hour_max + embedded_date_captured.append(hour) + + minute = date_captured.minute/minute_max + embedded_date_captured.append(minute) + + return np.asarray(embedded_date_captured) + + def embed_position_and_size(box): + """Encodes the bounding box of the object of interest.""" + ymin = box[0] + xmin = box[1] + ymax = box[2] + xmax = box[3] + w = xmax - xmin + h = ymax - ymin + x = xmin + w / 2.0 + y = ymin + h / 2.0 + return np.asarray([x, y, w, h]) + + unix_time = ( + (date_captured - datetime.datetime.fromtimestamp(0)).total_seconds()) + + example = tf.train.Example() + example.features.feature['image/unix_time'].float_list.value.extend( + [unix_time]) + + (detection_features, detection_boxes, num_detections, + detection_scores) = self._session.run( + [ + self._embedding_node, self._box_node, self._num_detections[0], + self._scores_node + ], + feed_dict={self._input: [tfrecord_entry]}) + + num_detections = int(num_detections) + embed_all = [] + score_all = [] + + detection_features = np.asarray(detection_features) + + def get_bb_embedding(detection_features, detection_boxes, detection_scores, + index): + embedding = detection_features[0][index] + pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0) + + box = detection_boxes[0][index] + position_embedding = embed_position_and_size(box) + + score = detection_scores[0][index] + return np.concatenate((pooled_embedding, position_embedding)), score + + temporal_embedding = embed_date_captured(date_captured) + + embedding_count = 0 + for index in range(min(num_detections, self._top_k_embedding_count)): + bb_embedding, score = get_bb_embedding( + detection_features, detection_boxes, detection_scores, index) + embed_all.extend(bb_embedding) + embed_all.extend(temporal_embedding) + score_all.append(score) + embedding_count += 1 + + for index in range( + max(0, num_detections - 1), + max(-1, num_detections - 1 - self._bottom_k_embedding_count), -1): + bb_embedding, score = get_bb_embedding( + detection_features, detection_boxes, detection_scores, index) + embed_all.extend(bb_embedding) + embed_all.extend(temporal_embedding) + score_all.append(score) + embedding_count += 1 + + if embedding_count == 0: + bb_embedding, score = get_bb_embedding( + detection_features, detection_boxes, detection_scores, 0) + embed_all.extend(bb_embedding) + embed_all.extend(temporal_embedding) + score_all.append(score) + + # Takes max in case embedding_count is 0. + embedding_length = len(embed_all) // max(1, embedding_count) + + embed_all = np.asarray(embed_all) + + example.features.feature['image/embedding'].float_list.value.extend( + embed_all) + example.features.feature['image/embedding_score'].float_list.value.extend( + score_all) + example.features.feature['image/embedding_length'].int64_list.value.append( + embedding_length) + example.features.feature['image/embedding_count'].int64_list.value.append( + embedding_count) + + # Add other essential example attributes + example.features.feature['image/encoded'].bytes_list.value.extend( + input_example.features.feature['image/encoded'].bytes_list.value) + example.features.feature['image/height'].int64_list.value.extend( + input_example.features.feature['image/height'].int64_list.value) + example.features.feature['image/width'].int64_list.value.extend( + input_example.features.feature['image/width'].int64_list.value) + example.features.feature['image/source_id'].bytes_list.value.extend( + input_example.features.feature['image/source_id'].bytes_list.value) + example.features.feature['image/location'].bytes_list.value.extend( + input_example.features.feature['image/location'].bytes_list.value) + + example.features.feature['image/date_captured'].bytes_list.value.extend( + input_example.features.feature['image/date_captured'].bytes_list.value) + + example.features.feature['image/class/text'].bytes_list.value.extend( + input_example.features.feature['image/class/text'].bytes_list.value) + example.features.feature['image/class/label'].int64_list.value.extend( + input_example.features.feature['image/class/label'].int64_list.value) + + example.features.feature['image/seq_id'].bytes_list.value.extend( + input_example.features.feature['image/seq_id'].bytes_list.value) + example.features.feature['image/seq_num_frames'].int64_list.value.extend( + input_example.features.feature['image/seq_num_frames'].int64_list.value) + example.features.feature['image/seq_frame_num'].int64_list.value.extend( + input_example.features.feature['image/seq_frame_num'].int64_list.value) + + example.features.feature['image/object/bbox/ymax'].float_list.value.extend( + input_example.features.feature[ + 'image/object/bbox/ymax'].float_list.value) + example.features.feature['image/object/bbox/ymin'].float_list.value.extend( + input_example.features.feature[ + 'image/object/bbox/ymin'].float_list.value) + example.features.feature['image/object/bbox/xmax'].float_list.value.extend( + input_example.features.feature[ + 'image/object/bbox/xmax'].float_list.value) + example.features.feature['image/object/bbox/xmin'].float_list.value.extend( + input_example.features.feature[ + 'image/object/bbox/xmin'].float_list.value) + example.features.feature[ + 'image/object/class/score'].float_list.value.extend( + input_example.features.feature[ + 'image/object/class/score'].float_list.value) + example.features.feature[ + 'image/object/class/label'].int64_list.value.extend( + input_example.features.feature[ + 'image/object/class/label'].int64_list.value) + example.features.feature[ + 'image/object/class/text'].bytes_list.value.extend( + input_example.features.feature[ + 'image/object/class/text'].bytes_list.value) + + self._num_examples_processed.inc(1) + return [example] + + +def construct_pipeline(input_tfrecord, output_tfrecord, model_dir, + top_k_embedding_count, bottom_k_embedding_count, + num_shards): + """Returns a beam pipeline to run object detection inference. + + Args: + input_tfrecord: An TFRecord of tf.train.Example protos containing images. + output_tfrecord: An TFRecord of tf.train.Example protos that contain images + in the input TFRecord and the detections from the model. + model_dir: Path to `saved_model` to use for inference. + top_k_embedding_count: The number of high-confidence embeddings to store. + bottom_k_embedding_count: The number of low-confidence embeddings to store. + num_shards: The number of output shards. + """ + def pipeline(root): + input_collection = ( + root | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( + input_tfrecord, + coder=beam.coders.BytesCoder())) + output_collection = input_collection | 'ExtractEmbedding' >> beam.ParDo( + GenerateEmbeddingDataFn(model_dir, top_k_embedding_count, + bottom_k_embedding_count)) + output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle() + _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example)) + return pipeline + + +def main(_): + """Runs the Beam pipeline that performs inference. + + Args: + _: unused + """ + # must create before flags are used + runner = runners.DirectRunner() + + dirname = os.path.dirname(FLAGS.embedding_output_tfrecord) + tf.io.gfile.makedirs(dirname) + runner.run( + construct_pipeline(FLAGS.embedding_input_tfrecord, + FLAGS.embedding_output_tfrecord, + FLAGS.embedding_model_dir, FLAGS.top_k_embedding_count, + FLAGS.bottom_k_embedding_count, FLAGS.num_shards)) + + +if __name__ == '__main__': + flags.mark_flags_as_required([ + 'embedding_input_tfrecord', + 'embedding_output_tfrecord', + 'embedding_model_dir' + ]) + app.run(main) diff --git a/models/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf1_test.py b/models/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..064a57e13c1a6b5ff42e0dd46868cab22d94346d --- /dev/null +++ b/models/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf1_test.py @@ -0,0 +1,337 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for generate_embedding_data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import contextlib +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf +from object_detection import exporter +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.dataset_tools.context_rcnn import generate_embedding_data +from object_detection.protos import pipeline_pb2 +from object_detection.utils import tf_version +from apache_beam import runners + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock + + +class FakeModel(model.DetectionModel): + """A Fake Detection model with expected output nodes from post-processing.""" + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} + + def postprocess(self, prediction_dict, true_image_shapes): + with tf.control_dependencies(prediction_dict.values()): + num_features = 100 + feature_dims = 10 + classifier_feature = np.ones( + (2, feature_dims, feature_dims, num_features), + dtype=np.float32).tolist() + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6], + [0.5, 0.5, 0.8, 0.8]]], tf.float32), + 'detection_scores': tf.constant([[0.95, 0.6]], tf.float32), + 'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2], + [0.3, 0.1, 0.6]]], + tf.float32), + 'detection_classes': tf.constant([[0, 1]], tf.float32), + 'num_detections': tf.constant([2], tf.float32), + 'detection_features': + tf.constant([classifier_feature], + tf.float32) + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@contextlib.contextmanager +def InMemoryTFRecord(entries): + temp = tempfile.NamedTemporaryFile(delete=False) + filename = temp.name + try: + with tf.python_io.TFRecordWriter(filename) as writer: + for value in entries: + writer.write(value) + yield filename + finally: + os.unlink(temp.name) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class GenerateEmbeddingData(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, checkpoint_path): + """A function to save checkpoint from a fake Detection Model. + + Args: + checkpoint_path: Path to save checkpoint from Fake model. + """ + g = tf.Graph() + with g.as_default(): + mock_model = FakeModel(num_classes=5) + preprocessed_inputs, true_image_shapes = mock_model.preprocess( + tf.placeholder(tf.float32, shape=[None, None, None, 3])) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + tf.train.get_or_create_global_step() + saver = tf.train.Saver() + init = tf.global_variables_initializer() + with self.test_session(graph=g) as sess: + sess.run(init) + saver.save(sess, checkpoint_path) + + def _export_saved_model(self): + tmp_dir = self.get_temp_dir() + checkpoint_path = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(checkpoint_path) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + tf.io.gfile.makedirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel(num_classes=5) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + outputs, placeholder_tensor = exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + output_node_names = ','.join(outputs.keys()) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_path, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph='', + clear_devices=True, + initializer_nodes='') + exporter.write_saved_model( + saved_model_path=saved_model_path, + frozen_graph_def=frozen_graph_def, + inputs=placeholder_tensor, + outputs=outputs) + return saved_model_path + + def _create_tf_example(self): + with self.test_session(): + encoded_image = tf.image.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval() + + def BytesFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def Int64Feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + def FloatFeature(value): + return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) + + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(b'image_id'), + 'image/height': Int64Feature(400), + 'image/width': Int64Feature(600), + 'image/class/label': Int64Feature(5), + 'image/class/text': BytesFeature(b'hyena'), + 'image/object/bbox/xmin': FloatFeature(0.1), + 'image/object/bbox/xmax': FloatFeature(0.6), + 'image/object/bbox/ymin': FloatFeature(0.0), + 'image/object/bbox/ymax': FloatFeature(0.5), + 'image/object/class/score': FloatFeature(0.95), + 'image/object/class/label': Int64Feature(5), + 'image/object/class/text': BytesFeature(b'hyena'), + 'image/date_captured': BytesFeature(b'2019-10-20 12:12:12') + })) + + return example.SerializeToString() + + def assert_expected_example(self, example, topk=False, botk=False): + # Check embeddings + if topk or botk: + self.assertEqual(len( + example.features.feature['image/embedding'].float_list.value), + 218) + self.assertAllEqual( + example.features.feature['image/embedding_count'].int64_list.value, + [2]) + else: + self.assertEqual(len( + example.features.feature['image/embedding'].float_list.value), + 109) + self.assertAllEqual( + example.features.feature['image/embedding_count'].int64_list.value, + [1]) + + self.assertAllEqual( + example.features.feature['image/embedding_length'].int64_list.value, + [109]) + + # Check annotations + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.0]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.1]) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.5]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.6]) + self.assertAllClose( + example.features.feature['image/object/class/score'] + .float_list.value, [0.95]) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, ['hyena']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, ['hyena']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, [400]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, [600]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + ['image_id']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def test_generate_embedding_data_fn(self): + saved_model_path = self._export_saved_model() + top_k_embedding_count = 1 + bottom_k_embedding_count = 0 + inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( + saved_model_path, top_k_embedding_count, bottom_k_embedding_count) + inference_fn.start_bundle() + generated_example = self._create_tf_example() + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/text'] + .bytes_list.value, ['hyena']) + output = inference_fn.process(generated_example) + output_example = output[0] + self.assert_expected_example(output_example) + + def test_generate_embedding_data_with_top_k_boxes(self): + saved_model_path = self._export_saved_model() + top_k_embedding_count = 2 + bottom_k_embedding_count = 0 + inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( + saved_model_path, top_k_embedding_count, bottom_k_embedding_count) + inference_fn.start_bundle() + generated_example = self._create_tf_example() + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/label'].int64_list.value, [5]) + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/text'].bytes_list.value, [b'hyena']) + output = inference_fn.process(generated_example) + output_example = output[0] + self.assert_expected_example(output_example, topk=True) + + def test_generate_embedding_data_with_bottom_k_boxes(self): + saved_model_path = self._export_saved_model() + top_k_embedding_count = 0 + bottom_k_embedding_count = 2 + inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( + saved_model_path, top_k_embedding_count, bottom_k_embedding_count) + inference_fn.start_bundle() + generated_example = self._create_tf_example() + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/label'].int64_list.value, [5]) + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/text'].bytes_list.value, ['hyena']) + output = inference_fn.process(generated_example) + output_example = output[0] + self.assert_expected_example(output_example, botk=True) + + def test_beam_pipeline(self): + with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord: + runner = runners.DirectRunner() + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + saved_model_path = self._export_saved_model() + top_k_embedding_count = 1 + bottom_k_embedding_count = 0 + num_shards = 1 + pipeline = generate_embedding_data.construct_pipeline( + input_tfrecord, output_tfrecord, saved_model_path, + top_k_embedding_count, bottom_k_embedding_count, num_shards) + runner.run(pipeline) + filenames = tf.io.gfile.glob( + output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 1) + self.assert_expected_example(tf.train.Example.FromString( + actual_output[0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/create_coco_tf_record.py b/models/research/object_detection/dataset_tools/create_coco_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..51ed389105f827335de68ec9c85e04c0083242a5 --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_coco_tf_record.py @@ -0,0 +1,367 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Convert raw COCO dataset to TFRecord for object_detection. + +Please note that this tool creates sharded output files. + +Example usage: + python create_coco_tf_record.py --logtostderr \ + --train_image_dir="${TRAIN_IMAGE_DIR}" \ + --val_image_dir="${VAL_IMAGE_DIR}" \ + --test_image_dir="${TEST_IMAGE_DIR}" \ + --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ + --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ + --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ + --output_dir="${OUTPUT_DIR}" +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib +import io +import json +import logging +import os +import contextlib2 +import numpy as np +import PIL.Image + +from pycocotools import mask +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import tf_record_creation_util +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + +flags = tf.app.flags +tf.flags.DEFINE_boolean( + 'include_masks', False, 'Whether to include instance segmentations masks ' + '(PNG encoded) in the result. default: False.') +tf.flags.DEFINE_string('train_image_dir', '', 'Training image directory.') +tf.flags.DEFINE_string('val_image_dir', '', 'Validation image directory.') +tf.flags.DEFINE_string('test_image_dir', '', 'Test image directory.') +tf.flags.DEFINE_string('train_annotations_file', '', + 'Training annotations JSON file.') +tf.flags.DEFINE_string('val_annotations_file', '', + 'Validation annotations JSON file.') +tf.flags.DEFINE_string('testdev_annotations_file', '', + 'Test-dev annotations JSON file.') +tf.flags.DEFINE_string('train_keypoint_annotations_file', '', + 'Training annotations JSON file.') +tf.flags.DEFINE_string('val_keypoint_annotations_file', '', + 'Validation annotations JSON file.') +tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.') + +FLAGS = flags.FLAGS + +logger = tf.get_logger() +logger.setLevel(logging.INFO) + +_COCO_KEYPOINT_NAMES = [ + b'nose', b'left_eye', b'right_eye', b'left_ear', b'right_ear', + b'left_shoulder', b'right_shoulder', b'left_elbow', b'right_elbow', + b'left_wrist', b'right_wrist', b'left_hip', b'right_hip', + b'left_knee', b'right_knee', b'left_ankle', b'right_ankle' +] + + +def create_tf_example(image, + annotations_list, + image_dir, + category_index, + include_masks=False, + keypoint_annotations_dict=None): + """Converts image and annotations to a tf.Example proto. + + Args: + image: dict with keys: [u'license', u'file_name', u'coco_url', u'height', + u'width', u'date_captured', u'flickr_url', u'id'] + annotations_list: + list of dicts with keys: [u'segmentation', u'area', u'iscrowd', + u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box + coordinates in the official COCO dataset are given as [x, y, width, + height] tuples using absolute coordinates where x, y represent the + top-left (0-indexed) corner. This function converts to the format + expected by the Tensorflow Object Detection API (which is which is + [ymin, xmin, ymax, xmax] with coordinates normalized relative to image + size). + image_dir: directory containing the image files. + category_index: a dict containing COCO category information keyed by the + 'id' field of each category. See the label_map_util.create_category_index + function. + include_masks: Whether to include instance segmentations masks + (PNG encoded) in the result. default: False. + keypoint_annotations_dict: A dictionary that maps from annotation_id to a + dictionary with keys: [u'keypoints', u'num_keypoints'] represeting the + keypoint information for this person object annotation. If None, then + no keypoint annotations will be populated. + + Returns: + example: The converted tf.Example + num_annotations_skipped: Number of (invalid) annotations that were ignored. + + Raises: + ValueError: if the image pointed to by data['filename'] is not a valid JPEG + """ + image_height = image['height'] + image_width = image['width'] + filename = image['file_name'] + image_id = image['id'] + + full_path = os.path.join(image_dir, filename) + with tf.gfile.GFile(full_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + key = hashlib.sha256(encoded_jpg).hexdigest() + + xmin = [] + xmax = [] + ymin = [] + ymax = [] + is_crowd = [] + category_names = [] + category_ids = [] + area = [] + encoded_mask_png = [] + keypoints_x = [] + keypoints_y = [] + keypoints_visibility = [] + keypoints_name = [] + num_keypoints = [] + include_keypoint = keypoint_annotations_dict is not None + num_annotations_skipped = 0 + num_keypoint_annotation_used = 0 + num_keypoint_annotation_skipped = 0 + for object_annotations in annotations_list: + (x, y, width, height) = tuple(object_annotations['bbox']) + if width <= 0 or height <= 0: + num_annotations_skipped += 1 + continue + if x + width > image_width or y + height > image_height: + num_annotations_skipped += 1 + continue + xmin.append(float(x) / image_width) + xmax.append(float(x + width) / image_width) + ymin.append(float(y) / image_height) + ymax.append(float(y + height) / image_height) + is_crowd.append(object_annotations['iscrowd']) + category_id = int(object_annotations['category_id']) + category_ids.append(category_id) + category_names.append(category_index[category_id]['name'].encode('utf8')) + area.append(object_annotations['area']) + + if include_masks: + run_len_encoding = mask.frPyObjects(object_annotations['segmentation'], + image_height, image_width) + binary_mask = mask.decode(run_len_encoding) + if not object_annotations['iscrowd']: + binary_mask = np.amax(binary_mask, axis=2) + pil_image = PIL.Image.fromarray(binary_mask) + output_io = io.BytesIO() + pil_image.save(output_io, format='PNG') + encoded_mask_png.append(output_io.getvalue()) + + if include_keypoint: + annotation_id = object_annotations['id'] + if annotation_id in keypoint_annotations_dict: + num_keypoint_annotation_used += 1 + keypoint_annotations = keypoint_annotations_dict[annotation_id] + keypoints = keypoint_annotations['keypoints'] + num_kpts = keypoint_annotations['num_keypoints'] + keypoints_x_abs = keypoints[::3] + keypoints_x.extend( + [float(x_abs) / image_width for x_abs in keypoints_x_abs]) + keypoints_y_abs = keypoints[1::3] + keypoints_y.extend( + [float(y_abs) / image_height for y_abs in keypoints_y_abs]) + keypoints_visibility.extend(keypoints[2::3]) + keypoints_name.extend(_COCO_KEYPOINT_NAMES) + num_keypoints.append(num_kpts) + else: + keypoints_x.extend([0.0] * len(_COCO_KEYPOINT_NAMES)) + keypoints_y.extend([0.0] * len(_COCO_KEYPOINT_NAMES)) + keypoints_visibility.extend([0] * len(_COCO_KEYPOINT_NAMES)) + keypoints_name.extend(_COCO_KEYPOINT_NAMES) + num_keypoints.append(0) + feature_dict = { + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/filename': + dataset_util.bytes_feature(filename.encode('utf8')), + 'image/source_id': + dataset_util.bytes_feature(str(image_id).encode('utf8')), + 'image/key/sha256': + dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': + dataset_util.bytes_feature(encoded_jpg), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(xmin), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(xmax), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(ymin), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(ymax), + 'image/object/class/text': + dataset_util.bytes_list_feature(category_names), + 'image/object/is_crowd': + dataset_util.int64_list_feature(is_crowd), + 'image/object/area': + dataset_util.float_list_feature(area), + } + if include_masks: + feature_dict['image/object/mask'] = ( + dataset_util.bytes_list_feature(encoded_mask_png)) + if include_keypoint: + feature_dict['image/object/keypoint/x'] = ( + dataset_util.float_list_feature(keypoints_x)) + feature_dict['image/object/keypoint/y'] = ( + dataset_util.float_list_feature(keypoints_y)) + feature_dict['image/object/keypoint/num'] = ( + dataset_util.int64_list_feature(num_keypoints)) + feature_dict['image/object/keypoint/visibility'] = ( + dataset_util.int64_list_feature(keypoints_visibility)) + feature_dict['image/object/keypoint/text'] = ( + dataset_util.bytes_list_feature(keypoints_name)) + num_keypoint_annotation_skipped = ( + len(keypoint_annotations_dict) - num_keypoint_annotation_used) + + example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) + return key, example, num_annotations_skipped, num_keypoint_annotation_skipped + + +def _create_tf_record_from_coco_annotations(annotations_file, image_dir, + output_path, include_masks, + num_shards, + keypoint_annotations_file=''): + """Loads COCO annotation json files and converts to tf.Record format. + + Args: + annotations_file: JSON file containing bounding box annotations. + image_dir: Directory containing the image files. + output_path: Path to output tf.Record file. + include_masks: Whether to include instance segmentations masks + (PNG encoded) in the result. default: False. + num_shards: number of output file shards. + keypoint_annotations_file: JSON file containing the person keypoint + annotations. If empty, then no person keypoint annotations will be + generated. + """ + with contextlib2.ExitStack() as tf_record_close_stack, \ + tf.gfile.GFile(annotations_file, 'r') as fid: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, output_path, num_shards) + groundtruth_data = json.load(fid) + images = groundtruth_data['images'] + category_index = label_map_util.create_category_index( + groundtruth_data['categories']) + + annotations_index = {} + if 'annotations' in groundtruth_data: + logging.info('Found groundtruth annotations. Building annotations index.') + for annotation in groundtruth_data['annotations']: + image_id = annotation['image_id'] + if image_id not in annotations_index: + annotations_index[image_id] = [] + annotations_index[image_id].append(annotation) + missing_annotation_count = 0 + for image in images: + image_id = image['id'] + if image_id not in annotations_index: + missing_annotation_count += 1 + annotations_index[image_id] = [] + logging.info('%d images are missing annotations.', missing_annotation_count) + + keypoint_annotations_index = {} + if keypoint_annotations_file: + with tf.gfile.GFile(keypoint_annotations_file, 'r') as kid: + keypoint_groundtruth_data = json.load(kid) + if 'annotations' in keypoint_groundtruth_data: + for annotation in keypoint_groundtruth_data['annotations']: + image_id = annotation['image_id'] + if image_id not in keypoint_annotations_index: + keypoint_annotations_index[image_id] = {} + keypoint_annotations_index[image_id][annotation['id']] = annotation + + total_num_annotations_skipped = 0 + total_num_keypoint_annotations_skipped = 0 + for idx, image in enumerate(images): + if idx % 100 == 0: + logging.info('On image %d of %d', idx, len(images)) + annotations_list = annotations_index[image['id']] + keypoint_annotations_dict = None + if keypoint_annotations_file: + keypoint_annotations_dict = {} + if image['id'] in keypoint_annotations_index: + keypoint_annotations_dict = keypoint_annotations_index[image['id']] + (_, tf_example, num_annotations_skipped, + num_keypoint_annotations_skipped) = create_tf_example( + image, annotations_list, image_dir, category_index, include_masks, + keypoint_annotations_dict) + total_num_annotations_skipped += num_annotations_skipped + total_num_keypoint_annotations_skipped += num_keypoint_annotations_skipped + shard_idx = idx % num_shards + output_tfrecords[shard_idx].write(tf_example.SerializeToString()) + logging.info('Finished writing, skipped %d annotations.', + total_num_annotations_skipped) + if keypoint_annotations_file: + logging.info('Finished writing, skipped %d keypoint annotations.', + total_num_keypoint_annotations_skipped) + + +def main(_): + assert FLAGS.train_image_dir, '`train_image_dir` missing.' + assert FLAGS.val_image_dir, '`val_image_dir` missing.' + assert FLAGS.test_image_dir, '`test_image_dir` missing.' + assert FLAGS.train_annotations_file, '`train_annotations_file` missing.' + assert FLAGS.val_annotations_file, '`val_annotations_file` missing.' + assert FLAGS.testdev_annotations_file, '`testdev_annotations_file` missing.' + + if not tf.gfile.IsDirectory(FLAGS.output_dir): + tf.gfile.MakeDirs(FLAGS.output_dir) + train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record') + val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record') + testdev_output_path = os.path.join(FLAGS.output_dir, 'coco_testdev.record') + + _create_tf_record_from_coco_annotations( + FLAGS.train_annotations_file, + FLAGS.train_image_dir, + train_output_path, + FLAGS.include_masks, + num_shards=100, + keypoint_annotations_file=FLAGS.train_keypoint_annotations_file) + _create_tf_record_from_coco_annotations( + FLAGS.val_annotations_file, + FLAGS.val_image_dir, + val_output_path, + FLAGS.include_masks, + num_shards=100, + keypoint_annotations_file=FLAGS.val_keypoint_annotations_file) + _create_tf_record_from_coco_annotations( + FLAGS.testdev_annotations_file, + FLAGS.test_image_dir, + testdev_output_path, + FLAGS.include_masks, + num_shards=100) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/dataset_tools/create_coco_tf_record_test.py b/models/research/object_detection/dataset_tools/create_coco_tf_record_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcc8be9c7437734414e73e43cae8effb7c95681 --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_coco_tf_record_test.py @@ -0,0 +1,371 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test for create_coco_tf_record.py.""" + +import io +import json +import os + +import numpy as np +import PIL.Image +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import create_coco_tf_record + + +class CreateCocoTFRecordTest(tf.test.TestCase): + + def _assertProtoEqual(self, proto_field, expectation): + """Helper function to assert if a proto field equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertListEqual(proto_list, expectation) + + def _assertProtoClose(self, proto_field, expectation): + """Helper function to assert if a proto field nearly equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertAllClose(proto_list, expectation) + + def test_create_tf_example(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(256, 256, 3) + tmp_dir = self.get_temp_dir() + save_path = os.path.join(tmp_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 256, + 'width': 256, + 'id': 11, + } + + annotations_list = [{ + 'area': .5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 2, + 'id': 1000, + }] + + image_dir = tmp_dir + category_index = { + 1: { + 'name': 'dog', + 'id': 1 + }, + 2: { + 'name': 'cat', + 'id': 2 + }, + 3: { + 'name': 'human', + 'id': 3 + } + } + + (_, example, + num_annotations_skipped, _) = create_coco_tf_record.create_tf_example( + image, annotations_list, image_dir, category_index) + + self.assertEqual(num_annotations_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('cat')]) + + def test_create_tf_example_with_instance_masks(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(8, 8, 3) + tmp_dir = self.get_temp_dir() + save_path = os.path.join(tmp_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 8, + 'width': 8, + 'id': 11, + } + + annotations_list = [{ + 'area': .5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [0, 0, 8, 8], + 'segmentation': [[4, 0, 0, 0, 0, 4], [8, 4, 4, 8, 8, 8]], + 'category_id': 1, + 'id': 1000, + }] + + image_dir = tmp_dir + category_index = { + 1: { + 'name': 'dog', + 'id': 1 + }, + } + + (_, example, + num_annotations_skipped, _) = create_coco_tf_record.create_tf_example( + image, annotations_list, image_dir, category_index, include_masks=True) + + self.assertEqual(num_annotations_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [8]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [8]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('dog')]) + encoded_mask_pngs = [ + io.BytesIO(encoded_masks) for encoded_masks in example.features.feature[ + 'image/object/mask'].bytes_list.value + ] + pil_masks = [ + np.array(PIL.Image.open(encoded_mask_png)) + for encoded_mask_png in encoded_mask_pngs + ] + self.assertEqual(len(pil_masks), 1) + self.assertAllEqual(pil_masks[0], + [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]]) + + def test_create_tf_example_with_keypoints(self): + image_dir = self.get_temp_dir() + image_file_name = 'tmp_image.jpg' + image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype( + np.uint8) + save_path = os.path.join(image_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 256, + 'width': 256, + 'id': 11, + } + + min_x, min_y = 64, 64 + max_x, max_y = 128, 128 + keypoints = [] + num_visible_keypoints = 0 + xv = [] + yv = [] + vv = [] + for _ in range(17): + xc = min_x + int(np.random.rand()*(max_x - min_x)) + yc = min_y + int(np.random.rand()*(max_y - min_y)) + vis = np.random.randint(0, 3) + xv.append(xc) + yv.append(yc) + vv.append(vis) + keypoints.extend([xc, yc, vis]) + num_visible_keypoints += (vis > 0) + + annotations_list = [{ + 'area': 0.5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 1, + 'id': 1000 + }] + + keypoint_annotations_dict = { + 1000: { + 'keypoints': keypoints, + 'num_keypoints': num_visible_keypoints + } + } + + category_index = { + 1: { + 'name': 'person', + 'id': 1 + } + } + + (_, example, _, + num_keypoint_annotation_skipped) = create_coco_tf_record.create_tf_example( + image, + annotations_list, + image_dir, + category_index, + include_masks=False, + keypoint_annotations_dict=keypoint_annotations_dict) + + self.assertEqual(num_keypoint_annotation_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('person')]) + self._assertProtoClose( + example.features.feature['image/object/keypoint/x'].float_list.value, + np.array(xv, dtype=np.float32) / 256) + self._assertProtoClose( + example.features.feature['image/object/keypoint/y'].float_list.value, + np.array(yv, dtype=np.float32) / 256) + self._assertProtoEqual( + example.features.feature['image/object/keypoint/text'].bytes_list.value, + create_coco_tf_record._COCO_KEYPOINT_NAMES) + self._assertProtoEqual( + example.features.feature[ + 'image/object/keypoint/visibility'].int64_list.value, vv) + + def test_create_sharded_tf_record(self): + tmp_dir = self.get_temp_dir() + image_paths = ['tmp1_image.jpg', 'tmp2_image.jpg'] + for image_path in image_paths: + image_data = np.random.rand(256, 256, 3) + save_path = os.path.join(tmp_dir, image_path) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + images = [{ + 'file_name': image_paths[0], + 'height': 256, + 'width': 256, + 'id': 11, + }, { + 'file_name': image_paths[1], + 'height': 256, + 'width': 256, + 'id': 12, + }] + + annotations = [{ + 'area': .5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 2, + 'id': 1000, + }] + + category_index = [{ + 'name': 'dog', + 'id': 1 + }, { + 'name': 'cat', + 'id': 2 + }, { + 'name': 'human', + 'id': 3 + }] + groundtruth_data = {'images': images, 'annotations': annotations, + 'categories': category_index} + annotation_file = os.path.join(tmp_dir, 'annotation.json') + with open(annotation_file, 'w') as annotation_fid: + json.dump(groundtruth_data, annotation_fid) + + output_path = os.path.join(tmp_dir, 'out.record') + create_coco_tf_record._create_tf_record_from_coco_annotations( + annotation_file, + tmp_dir, + output_path, + False, + 2) + self.assertTrue(os.path.exists(output_path + '-00000-of-00002')) + self.assertTrue(os.path.exists(output_path + '-00001-of-00002')) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/create_kitti_tf_record.py b/models/research/object_detection/dataset_tools/create_kitti_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..fe4f13ec80b4f552f316c6cc544c0fa4edf8f0bd --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_kitti_tf_record.py @@ -0,0 +1,310 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Convert raw KITTI detection dataset to TFRecord for object_detection. + +Converts KITTI detection dataset to TFRecords with a standard format allowing + to use this dataset to train object detectors. The raw dataset can be + downloaded from: + http://kitti.is.tue.mpg.de/kitti/data_object_image_2.zip. + http://kitti.is.tue.mpg.de/kitti/data_object_label_2.zip + Permission can be requested at the main website. + + KITTI detection dataset contains 7481 training images. Using this code with + the default settings will set aside the first 500 images as a validation set. + This can be altered using the flags, see details below. + +Example usage: + python object_detection/dataset_tools/create_kitti_tf_record.py \ + --data_dir=/home/user/kitti \ + --output_path=/home/user/kitti.record +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import hashlib +import io +import os + +import numpy as np +import PIL.Image as pil +import tensorflow.compat.v1 as tf + +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util +from object_detection.utils.np_box_ops import iou + +tf.app.flags.DEFINE_string('data_dir', '', 'Location of root directory for the ' + 'data. Folder structure is assumed to be:' + '/training/label_2 (annotations) and' + '/data_object_image_2/training/image_2' + '(images).') +tf.app.flags.DEFINE_string('output_path', '', 'Path to which TFRecord files' + 'will be written. The TFRecord with the training set' + 'will be located at: _train.tfrecord.' + 'And the TFRecord with the validation set will be' + 'located at: _val.tfrecord') +tf.app.flags.DEFINE_string('classes_to_use', 'car,pedestrian,dontcare', + 'Comma separated list of class names that will be' + 'used. Adding the dontcare class will remove all' + 'bboxs in the dontcare regions.') +tf.app.flags.DEFINE_string('label_map_path', 'data/kitti_label_map.pbtxt', + 'Path to label map proto.') +tf.app.flags.DEFINE_integer('validation_set_size', '500', 'Number of images to' + 'be used as a validation set.') +FLAGS = tf.app.flags.FLAGS + + +def convert_kitti_to_tfrecords(data_dir, output_path, classes_to_use, + label_map_path, validation_set_size): + """Convert the KITTI detection dataset to TFRecords. + + Args: + data_dir: The full path to the unzipped folder containing the unzipped data + from data_object_image_2 and data_object_label_2.zip. + Folder structure is assumed to be: data_dir/training/label_2 (annotations) + and data_dir/data_object_image_2/training/image_2 (images). + output_path: The path to which TFRecord files will be written. The TFRecord + with the training set will be located at: _train.tfrecord + And the TFRecord with the validation set will be located at: + _val.tfrecord + classes_to_use: List of strings naming the classes for which data should be + converted. Use the same names as presented in the KIITI README file. + Adding dontcare class will remove all other bounding boxes that overlap + with areas marked as dontcare regions. + label_map_path: Path to label map proto + validation_set_size: How many images should be left as the validation set. + (Ffirst `validation_set_size` examples are selected to be in the + validation set). + """ + label_map_dict = label_map_util.get_label_map_dict(label_map_path) + train_count = 0 + val_count = 0 + + annotation_dir = os.path.join(data_dir, + 'training', + 'label_2') + + image_dir = os.path.join(data_dir, + 'data_object_image_2', + 'training', + 'image_2') + + train_writer = tf.python_io.TFRecordWriter('%s_train.tfrecord'% + output_path) + val_writer = tf.python_io.TFRecordWriter('%s_val.tfrecord'% + output_path) + + images = sorted(tf.gfile.ListDirectory(image_dir)) + for img_name in images: + img_num = int(img_name.split('.')[0]) + is_validation_img = img_num < validation_set_size + img_anno = read_annotation_file(os.path.join(annotation_dir, + str(img_num).zfill(6)+'.txt')) + + image_path = os.path.join(image_dir, img_name) + + # Filter all bounding boxes of this frame that are of a legal class, and + # don't overlap with a dontcare region. + # TODO(talremez) filter out targets that are truncated or heavily occluded. + annotation_for_image = filter_annotations(img_anno, classes_to_use) + + example = prepare_example(image_path, annotation_for_image, label_map_dict) + if is_validation_img: + val_writer.write(example.SerializeToString()) + val_count += 1 + else: + train_writer.write(example.SerializeToString()) + train_count += 1 + + train_writer.close() + val_writer.close() + + +def prepare_example(image_path, annotations, label_map_dict): + """Converts a dictionary with annotations for an image to tf.Example proto. + + Args: + image_path: The complete path to image. + annotations: A dictionary representing the annotation of a single object + that appears in the image. + label_map_dict: A map from string label names to integer ids. + + Returns: + example: The converted tf.Example. + """ + with tf.gfile.GFile(image_path, 'rb') as fid: + encoded_png = fid.read() + encoded_png_io = io.BytesIO(encoded_png) + image = pil.open(encoded_png_io) + image = np.asarray(image) + + key = hashlib.sha256(encoded_png).hexdigest() + + width = int(image.shape[1]) + height = int(image.shape[0]) + + xmin_norm = annotations['2d_bbox_left'] / float(width) + ymin_norm = annotations['2d_bbox_top'] / float(height) + xmax_norm = annotations['2d_bbox_right'] / float(width) + ymax_norm = annotations['2d_bbox_bottom'] / float(height) + + difficult_obj = [0]*len(xmin_norm) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')), + 'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')), + 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': dataset_util.bytes_feature(encoded_png), + 'image/format': dataset_util.bytes_feature('png'.encode('utf8')), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin_norm), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax_norm), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin_norm), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax_norm), + 'image/object/class/text': dataset_util.bytes_list_feature( + [x.encode('utf8') for x in annotations['type']]), + 'image/object/class/label': dataset_util.int64_list_feature( + [label_map_dict[x] for x in annotations['type']]), + 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), + 'image/object/truncated': dataset_util.float_list_feature( + annotations['truncated']), + 'image/object/alpha': dataset_util.float_list_feature( + annotations['alpha']), + 'image/object/3d_bbox/height': dataset_util.float_list_feature( + annotations['3d_bbox_height']), + 'image/object/3d_bbox/width': dataset_util.float_list_feature( + annotations['3d_bbox_width']), + 'image/object/3d_bbox/length': dataset_util.float_list_feature( + annotations['3d_bbox_length']), + 'image/object/3d_bbox/x': dataset_util.float_list_feature( + annotations['3d_bbox_x']), + 'image/object/3d_bbox/y': dataset_util.float_list_feature( + annotations['3d_bbox_y']), + 'image/object/3d_bbox/z': dataset_util.float_list_feature( + annotations['3d_bbox_z']), + 'image/object/3d_bbox/rot_y': dataset_util.float_list_feature( + annotations['3d_bbox_rot_y']), + })) + + return example + + +def filter_annotations(img_all_annotations, used_classes): + """Filters out annotations from the unused classes and dontcare regions. + + Filters out the annotations that belong to classes we do now wish to use and + (optionally) also removes all boxes that overlap with dontcare regions. + + Args: + img_all_annotations: A list of annotation dictionaries. See documentation of + read_annotation_file for more details about the format of the annotations. + used_classes: A list of strings listing the classes we want to keep, if the + list contains "dontcare", all bounding boxes with overlapping with dont + care regions will also be filtered out. + + Returns: + img_filtered_annotations: A list of annotation dictionaries that have passed + the filtering. + """ + + img_filtered_annotations = {} + + # Filter the type of the objects. + relevant_annotation_indices = [ + i for i, x in enumerate(img_all_annotations['type']) if x in used_classes + ] + + for key in img_all_annotations.keys(): + img_filtered_annotations[key] = ( + img_all_annotations[key][relevant_annotation_indices]) + + if 'dontcare' in used_classes: + dont_care_indices = [i for i, + x in enumerate(img_filtered_annotations['type']) + if x == 'dontcare'] + + # bounding box format [y_min, x_min, y_max, x_max] + all_boxes = np.stack([img_filtered_annotations['2d_bbox_top'], + img_filtered_annotations['2d_bbox_left'], + img_filtered_annotations['2d_bbox_bottom'], + img_filtered_annotations['2d_bbox_right']], + axis=1) + + ious = iou(boxes1=all_boxes, + boxes2=all_boxes[dont_care_indices]) + + # Remove all bounding boxes that overlap with a dontcare region. + if ious.size > 0: + boxes_to_remove = np.amax(ious, axis=1) > 0.0 + for key in img_all_annotations.keys(): + img_filtered_annotations[key] = ( + img_filtered_annotations[key][np.logical_not(boxes_to_remove)]) + + return img_filtered_annotations + + +def read_annotation_file(filename): + """Reads a KITTI annotation file. + + Converts a KITTI annotation file into a dictionary containing all the + relevant information. + + Args: + filename: the path to the annotataion text file. + + Returns: + anno: A dictionary with the converted annotation information. See annotation + README file for details on the different fields. + """ + with open(filename) as f: + content = f.readlines() + content = [x.strip().split(' ') for x in content] + + anno = {} + anno['type'] = np.array([x[0].lower() for x in content]) + anno['truncated'] = np.array([float(x[1]) for x in content]) + anno['occluded'] = np.array([int(x[2]) for x in content]) + anno['alpha'] = np.array([float(x[3]) for x in content]) + + anno['2d_bbox_left'] = np.array([float(x[4]) for x in content]) + anno['2d_bbox_top'] = np.array([float(x[5]) for x in content]) + anno['2d_bbox_right'] = np.array([float(x[6]) for x in content]) + anno['2d_bbox_bottom'] = np.array([float(x[7]) for x in content]) + + anno['3d_bbox_height'] = np.array([float(x[8]) for x in content]) + anno['3d_bbox_width'] = np.array([float(x[9]) for x in content]) + anno['3d_bbox_length'] = np.array([float(x[10]) for x in content]) + anno['3d_bbox_x'] = np.array([float(x[11]) for x in content]) + anno['3d_bbox_y'] = np.array([float(x[12]) for x in content]) + anno['3d_bbox_z'] = np.array([float(x[13]) for x in content]) + anno['3d_bbox_rot_y'] = np.array([float(x[14]) for x in content]) + + return anno + + +def main(_): + convert_kitti_to_tfrecords( + data_dir=FLAGS.data_dir, + output_path=FLAGS.output_path, + classes_to_use=FLAGS.classes_to_use.split(','), + label_map_path=FLAGS.label_map_path, + validation_set_size=FLAGS.validation_set_size) + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/dataset_tools/create_kitti_tf_record_test.py b/models/research/object_detection/dataset_tools/create_kitti_tf_record_test.py new file mode 100644 index 0000000000000000000000000000000000000000..606c684ef90ee23fcb496b11322dc5b4bb9e0d57 --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_kitti_tf_record_test.py @@ -0,0 +1,132 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test for create_kitti_tf_record.py.""" + +import os + +import numpy as np +import PIL.Image +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import create_kitti_tf_record + + +class CreateKittiTFRecordTest(tf.test.TestCase): + + def _assertProtoEqual(self, proto_field, expectation): + """Helper function to assert if a proto field equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertListEqual(proto_list, expectation) + + def test_dict_to_tf_example(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(256, 256, 3) + save_path = os.path.join(self.get_temp_dir(), image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + annotations = {} + annotations['2d_bbox_left'] = np.array([64]) + annotations['2d_bbox_top'] = np.array([64]) + annotations['2d_bbox_right'] = np.array([192]) + annotations['2d_bbox_bottom'] = np.array([192]) + annotations['type'] = ['car'] + annotations['truncated'] = np.array([1]) + annotations['alpha'] = np.array([2]) + annotations['3d_bbox_height'] = np.array([10]) + annotations['3d_bbox_width'] = np.array([11]) + annotations['3d_bbox_length'] = np.array([12]) + annotations['3d_bbox_x'] = np.array([13]) + annotations['3d_bbox_y'] = np.array([14]) + annotations['3d_bbox_z'] = np.array([15]) + annotations['3d_bbox_rot_y'] = np.array([4]) + + label_map_dict = { + 'background': 0, + 'car': 1, + } + + example = create_kitti_tf_record.prepare_example( + save_path, + annotations, + label_map_dict) + + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(save_path)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(save_path)]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('png')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('car')]) + self._assertProtoEqual( + example.features.feature['image/object/class/label'].int64_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/truncated'].float_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/alpha'].float_list.value, + [2]) + self._assertProtoEqual(example.features.feature[ + 'image/object/3d_bbox/height'].float_list.value, [10]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/width'].float_list.value, + [11]) + self._assertProtoEqual(example.features.feature[ + 'image/object/3d_bbox/length'].float_list.value, [12]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/x'].float_list.value, + [13]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/y'].float_list.value, + [14]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/z'].float_list.value, + [15]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/rot_y'].float_list.value, + [4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/create_oid_tf_record.py b/models/research/object_detection/dataset_tools/create_oid_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..9b35765bacc2aaddb12698bcc6965bd92ee7a66f --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_oid_tf_record.py @@ -0,0 +1,117 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Creates TFRecords of Open Images dataset for object detection. + +Example usage: + python object_detection/dataset_tools/create_oid_tf_record.py \ + --input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \ + --input_image_label_annotations_csv=/path/to/input/annotations-label.csv \ + --input_images_directory=/path/to/input/image_pixels_directory \ + --input_label_map=/path/to/input/labels_bbox_545.labelmap \ + --output_tf_record_path_prefix=/path/to/output/prefix.tfrecord + +CSVs with bounding box annotations and image metadata (including the image URLs) +can be downloaded from the Open Images GitHub repository: +https://github.com/openimages/dataset + +This script will include every image found in the input_images_directory in the +output TFRecord, even if the image has no corresponding bounding box annotations +in the input_annotations_csv. If input_image_label_annotations_csv is specified, +it will add image-level labels as well. Note that the information of whether a +label is positivelly or negativelly verified is NOT added to tfrecord. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import contextlib2 +import pandas as pd +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import oid_tfrecord_creation +from object_detection.dataset_tools import tf_record_creation_util +from object_detection.utils import label_map_util + +tf.flags.DEFINE_string('input_box_annotations_csv', None, + 'Path to CSV containing image bounding box annotations') +tf.flags.DEFINE_string('input_images_directory', None, + 'Directory containing the image pixels ' + 'downloaded from the OpenImages GitHub repository.') +tf.flags.DEFINE_string('input_image_label_annotations_csv', None, + 'Path to CSV containing image-level labels annotations') +tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto') +tf.flags.DEFINE_string( + 'output_tf_record_path_prefix', None, + 'Path to the output TFRecord. The shard index and the number of shards ' + 'will be appended for each output shard.') +tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards') + +FLAGS = tf.flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + + required_flags = [ + 'input_box_annotations_csv', 'input_images_directory', 'input_label_map', + 'output_tf_record_path_prefix' + ] + for flag_name in required_flags: + if not getattr(FLAGS, flag_name): + raise ValueError('Flag --{} is required'.format(flag_name)) + + label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map) + all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv) + if FLAGS.input_image_label_annotations_csv: + all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv) + all_label_annotations.rename( + columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) + else: + all_label_annotations = None + all_images = tf.gfile.Glob( + os.path.join(FLAGS.input_images_directory, '*.jpg')) + all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images] + all_image_ids = pd.DataFrame({'ImageID': all_image_ids}) + all_annotations = pd.concat( + [all_box_annotations, all_image_ids, all_label_annotations]) + + tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids)) + + with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, FLAGS.output_tf_record_path_prefix, + FLAGS.num_shards) + + for counter, image_data in enumerate(all_annotations.groupby('ImageID')): + tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000, + counter) + + image_id, image_annotations = image_data + # In OID image file names are formed by appending ".jpg" to the image ID. + image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg') + with tf.gfile.Open(image_path) as image_file: + encoded_image = image_file.read() + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + image_annotations, label_map, encoded_image) + if tf_example: + shard_idx = int(image_id, 16) % FLAGS.num_shards + output_tfrecords[shard_idx].write(tf_example.SerializeToString()) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/dataset_tools/create_pascal_tf_record.py b/models/research/object_detection/dataset_tools/create_pascal_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..8d79a3391c4eaadcb658406c7240d8efc0c0f02e --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_pascal_tf_record.py @@ -0,0 +1,185 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Convert raw PASCAL dataset to TFRecord for object_detection. + +Example usage: + python object_detection/dataset_tools/create_pascal_tf_record.py \ + --data_dir=/home/user/VOCdevkit \ + --year=VOC2012 \ + --output_path=/home/user/pascal.record +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib +import io +import logging +import os + +from lxml import etree +import PIL.Image +import tensorflow.compat.v1 as tf + +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + + +flags = tf.app.flags +flags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.') +flags.DEFINE_string('set', 'train', 'Convert training set, validation set or ' + 'merged set.') +flags.DEFINE_string('annotations_dir', 'Annotations', + '(Relative) path to annotations directory.') +flags.DEFINE_string('year', 'VOC2007', 'Desired challenge year.') +flags.DEFINE_string('output_path', '', 'Path to output TFRecord') +flags.DEFINE_string('label_map_path', 'data/pascal_label_map.pbtxt', + 'Path to label map proto') +flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore ' + 'difficult instances') +FLAGS = flags.FLAGS + +SETS = ['train', 'val', 'trainval', 'test'] +YEARS = ['VOC2007', 'VOC2012', 'merged'] + + +def dict_to_tf_example(data, + dataset_directory, + label_map_dict, + ignore_difficult_instances=False, + image_subdirectory='JPEGImages'): + """Convert XML derived dict to tf.Example proto. + + Notice that this function normalizes the bounding box coordinates provided + by the raw data. + + Args: + data: dict holding PASCAL XML fields for a single image (obtained by + running dataset_util.recursive_parse_xml_to_dict) + dataset_directory: Path to root directory holding PASCAL dataset + label_map_dict: A map from string label names to integers ids. + ignore_difficult_instances: Whether to skip difficult instances in the + dataset (default: False). + image_subdirectory: String specifying subdirectory within the + PASCAL dataset directory holding the actual image data. + + Returns: + example: The converted tf.Example. + + Raises: + ValueError: if the image pointed to by data['filename'] is not a valid JPEG + """ + img_path = os.path.join(data['folder'], image_subdirectory, data['filename']) + full_path = os.path.join(dataset_directory, img_path) + with tf.gfile.GFile(full_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + if image.format != 'JPEG': + raise ValueError('Image format not JPEG') + key = hashlib.sha256(encoded_jpg).hexdigest() + + width = int(data['size']['width']) + height = int(data['size']['height']) + + xmin = [] + ymin = [] + xmax = [] + ymax = [] + classes = [] + classes_text = [] + truncated = [] + poses = [] + difficult_obj = [] + if 'object' in data: + for obj in data['object']: + difficult = bool(int(obj['difficult'])) + if ignore_difficult_instances and difficult: + continue + + difficult_obj.append(int(difficult)) + + xmin.append(float(obj['bndbox']['xmin']) / width) + ymin.append(float(obj['bndbox']['ymin']) / height) + xmax.append(float(obj['bndbox']['xmax']) / width) + ymax.append(float(obj['bndbox']['ymax']) / height) + classes_text.append(obj['name'].encode('utf8')) + classes.append(label_map_dict[obj['name']]) + truncated.append(int(obj['truncated'])) + poses.append(obj['pose'].encode('utf8')) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/source_id': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), + 'image/object/truncated': dataset_util.int64_list_feature(truncated), + 'image/object/view': dataset_util.bytes_list_feature(poses), + })) + return example + + +def main(_): + if FLAGS.set not in SETS: + raise ValueError('set must be in : {}'.format(SETS)) + if FLAGS.year not in YEARS: + raise ValueError('year must be in : {}'.format(YEARS)) + + data_dir = FLAGS.data_dir + years = ['VOC2007', 'VOC2012'] + if FLAGS.year != 'merged': + years = [FLAGS.year] + + writer = tf.python_io.TFRecordWriter(FLAGS.output_path) + + label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) + + for year in years: + logging.info('Reading from PASCAL %s dataset.', year) + examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', + 'aeroplane_' + FLAGS.set + '.txt') + annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir) + examples_list = dataset_util.read_examples_list(examples_path) + for idx, example in enumerate(examples_list): + if idx % 100 == 0: + logging.info('On image %d of %d', idx, len(examples_list)) + path = os.path.join(annotations_dir, example + '.xml') + with tf.gfile.GFile(path, 'r') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str) + data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] + + tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict, + FLAGS.ignore_difficult_instances) + writer.write(tf_example.SerializeToString()) + + writer.close() + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/dataset_tools/create_pascal_tf_record_test.py b/models/research/object_detection/dataset_tools/create_pascal_tf_record_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c751a1391c5a47bc676de5a4d701d52c75b4334d --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_pascal_tf_record_test.py @@ -0,0 +1,121 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test for create_pascal_tf_record.py.""" + +import os + +import numpy as np +import PIL.Image +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import create_pascal_tf_record + + +class CreatePascalTFRecordTest(tf.test.TestCase): + + def _assertProtoEqual(self, proto_field, expectation): + """Helper function to assert if a proto field equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertListEqual(proto_list, expectation) + + def test_dict_to_tf_example(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(256, 256, 3) + save_path = os.path.join(self.get_temp_dir(), image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + data = { + 'folder': '', + 'filename': image_file_name, + 'size': { + 'height': 256, + 'width': 256, + }, + 'object': [ + { + 'difficult': 1, + 'bndbox': { + 'xmin': 64, + 'ymin': 64, + 'xmax': 192, + 'ymax': 192, + }, + 'name': 'person', + 'truncated': 0, + 'pose': '', + }, + ], + } + + label_map_dict = { + 'background': 0, + 'person': 1, + 'notperson': 2, + } + + example = create_pascal_tf_record.dict_to_tf_example( + data, self.get_temp_dir(), label_map_dict, image_subdirectory='') + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('person')]) + self._assertProtoEqual( + example.features.feature['image/object/class/label'].int64_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/difficult'].int64_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/truncated'].int64_list.value, + [0]) + self._assertProtoEqual( + example.features.feature['image/object/view'].bytes_list.value, + [six.b('')]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/create_pet_tf_record.py b/models/research/object_detection/dataset_tools/create_pet_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..78524b5054229c101a5894290bf97dcc11c6d815 --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_pet_tf_record.py @@ -0,0 +1,318 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Convert the Oxford pet dataset to TFRecord for object_detection. + +See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar + Cats and Dogs + IEEE Conference on Computer Vision and Pattern Recognition, 2012 + http://www.robots.ox.ac.uk/~vgg/data/pets/ + +Example usage: + python object_detection/dataset_tools/create_pet_tf_record.py \ + --data_dir=/home/user/pet \ + --output_dir=/home/user/pet/output +""" + +import hashlib +import io +import logging +import os +import random +import re + +import contextlib2 +from lxml import etree +import numpy as np +import PIL.Image +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import tf_record_creation_util +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + +flags = tf.app.flags +flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.') +flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.') +flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt', + 'Path to label map proto') +flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes ' + 'for pet faces. Otherwise generates bounding boxes (as ' + 'well as segmentations for full pet bodies). Note that ' + 'in the latter case, the resulting files are much larger.') +flags.DEFINE_string('mask_type', 'png', 'How to represent instance ' + 'segmentation masks. Options are "png" or "numerical".') +flags.DEFINE_integer('num_shards', 10, 'Number of TFRecord shards') + +FLAGS = flags.FLAGS + + +def get_class_name_from_filename(file_name): + """Gets the class name from a file. + + Args: + file_name: The file name to get the class name from. + ie. "american_pit_bull_terrier_105.jpg" + + Returns: + A string of the class name. + """ + match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I) + return match.groups()[0] + + +def dict_to_tf_example(data, + mask_path, + label_map_dict, + image_subdirectory, + ignore_difficult_instances=False, + faces_only=True, + mask_type='png'): + """Convert XML derived dict to tf.Example proto. + + Notice that this function normalizes the bounding box coordinates provided + by the raw data. + + Args: + data: dict holding PASCAL XML fields for a single image (obtained by + running dataset_util.recursive_parse_xml_to_dict) + mask_path: String path to PNG encoded mask. + label_map_dict: A map from string label names to integers ids. + image_subdirectory: String specifying subdirectory within the + Pascal dataset directory holding the actual image data. + ignore_difficult_instances: Whether to skip difficult instances in the + dataset (default: False). + faces_only: If True, generates bounding boxes for pet faces. Otherwise + generates bounding boxes (as well as segmentations for full pet bodies). + mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to + smaller file sizes. + + Returns: + example: The converted tf.Example. + + Raises: + ValueError: if the image pointed to by data['filename'] is not a valid JPEG + """ + img_path = os.path.join(image_subdirectory, data['filename']) + with tf.gfile.GFile(img_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + if image.format != 'JPEG': + raise ValueError('Image format not JPEG') + key = hashlib.sha256(encoded_jpg).hexdigest() + + with tf.gfile.GFile(mask_path, 'rb') as fid: + encoded_mask_png = fid.read() + encoded_png_io = io.BytesIO(encoded_mask_png) + mask = PIL.Image.open(encoded_png_io) + if mask.format != 'PNG': + raise ValueError('Mask format not PNG') + + mask_np = np.asarray(mask) + nonbackground_indices_x = np.any(mask_np != 2, axis=0) + nonbackground_indices_y = np.any(mask_np != 2, axis=1) + nonzero_x_indices = np.where(nonbackground_indices_x) + nonzero_y_indices = np.where(nonbackground_indices_y) + + width = int(data['size']['width']) + height = int(data['size']['height']) + + xmins = [] + ymins = [] + xmaxs = [] + ymaxs = [] + classes = [] + classes_text = [] + truncated = [] + poses = [] + difficult_obj = [] + masks = [] + if 'object' in data: + for obj in data['object']: + difficult = bool(int(obj['difficult'])) + if ignore_difficult_instances and difficult: + continue + difficult_obj.append(int(difficult)) + + if faces_only: + xmin = float(obj['bndbox']['xmin']) + xmax = float(obj['bndbox']['xmax']) + ymin = float(obj['bndbox']['ymin']) + ymax = float(obj['bndbox']['ymax']) + else: + xmin = float(np.min(nonzero_x_indices)) + xmax = float(np.max(nonzero_x_indices)) + ymin = float(np.min(nonzero_y_indices)) + ymax = float(np.max(nonzero_y_indices)) + + xmins.append(xmin / width) + ymins.append(ymin / height) + xmaxs.append(xmax / width) + ymaxs.append(ymax / height) + class_name = get_class_name_from_filename(data['filename']) + classes_text.append(class_name.encode('utf8')) + classes.append(label_map_dict[class_name]) + truncated.append(int(obj['truncated'])) + poses.append(obj['pose'].encode('utf8')) + if not faces_only: + mask_remapped = (mask_np != 2).astype(np.uint8) + masks.append(mask_remapped) + + feature_dict = { + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/source_id': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), + 'image/object/truncated': dataset_util.int64_list_feature(truncated), + 'image/object/view': dataset_util.bytes_list_feature(poses), + } + if not faces_only: + if mask_type == 'numerical': + mask_stack = np.stack(masks).astype(np.float32) + masks_flattened = np.reshape(mask_stack, [-1]) + feature_dict['image/object/mask'] = ( + dataset_util.float_list_feature(masks_flattened.tolist())) + elif mask_type == 'png': + encoded_mask_png_list = [] + for mask in masks: + img = PIL.Image.fromarray(mask) + output = io.BytesIO() + img.save(output, format='PNG') + encoded_mask_png_list.append(output.getvalue()) + feature_dict['image/object/mask'] = ( + dataset_util.bytes_list_feature(encoded_mask_png_list)) + + example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) + return example + + +def create_tf_record(output_filename, + num_shards, + label_map_dict, + annotations_dir, + image_dir, + examples, + faces_only=True, + mask_type='png'): + """Creates a TFRecord file from examples. + + Args: + output_filename: Path to where output file is saved. + num_shards: Number of shards for output file. + label_map_dict: The label map dictionary. + annotations_dir: Directory where annotation files are stored. + image_dir: Directory where image files are stored. + examples: Examples to parse and save to tf record. + faces_only: If True, generates bounding boxes for pet faces. Otherwise + generates bounding boxes (as well as segmentations for full pet bodies). + mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to + smaller file sizes. + """ + with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, output_filename, num_shards) + for idx, example in enumerate(examples): + if idx % 100 == 0: + logging.info('On image %d of %d', idx, len(examples)) + xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml') + mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png') + + if not os.path.exists(xml_path): + logging.warning('Could not find %s, ignoring example.', xml_path) + continue + with tf.gfile.GFile(xml_path, 'r') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str) + data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] + + try: + tf_example = dict_to_tf_example( + data, + mask_path, + label_map_dict, + image_dir, + faces_only=faces_only, + mask_type=mask_type) + if tf_example: + shard_idx = idx % num_shards + output_tfrecords[shard_idx].write(tf_example.SerializeToString()) + except ValueError: + logging.warning('Invalid example: %s, ignoring.', xml_path) + + +# TODO(derekjchow): Add test for pet/PASCAL main files. +def main(_): + data_dir = FLAGS.data_dir + label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) + + logging.info('Reading from Pet dataset.') + image_dir = os.path.join(data_dir, 'images') + annotations_dir = os.path.join(data_dir, 'annotations') + examples_path = os.path.join(annotations_dir, 'trainval.txt') + examples_list = dataset_util.read_examples_list(examples_path) + + # Test images are not included in the downloaded data set, so we shall perform + # our own split. + random.seed(42) + random.shuffle(examples_list) + num_examples = len(examples_list) + num_train = int(0.7 * num_examples) + train_examples = examples_list[:num_train] + val_examples = examples_list[num_train:] + logging.info('%d training and %d validation examples.', + len(train_examples), len(val_examples)) + + train_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_train.record') + val_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_val.record') + if not FLAGS.faces_only: + train_output_path = os.path.join(FLAGS.output_dir, + 'pets_fullbody_with_masks_train.record') + val_output_path = os.path.join(FLAGS.output_dir, + 'pets_fullbody_with_masks_val.record') + create_tf_record( + train_output_path, + FLAGS.num_shards, + label_map_dict, + annotations_dir, + image_dir, + train_examples, + faces_only=FLAGS.faces_only, + mask_type=FLAGS.mask_type) + create_tf_record( + val_output_path, + FLAGS.num_shards, + label_map_dict, + annotations_dir, + image_dir, + val_examples, + faces_only=FLAGS.faces_only, + mask_type=FLAGS.mask_type) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/dataset_tools/create_pycocotools_package.sh b/models/research/object_detection/dataset_tools/create_pycocotools_package.sh new file mode 100644 index 0000000000000000000000000000000000000000..88ea5114c237503ca63714b1276b89b3639b9926 --- /dev/null +++ b/models/research/object_detection/dataset_tools/create_pycocotools_package.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download pycocotools and make package for CMLE jobs. +# +# usage: +# bash object_detection/dataset_tools/create_pycocotools_package.sh \ +# /tmp/pycocotools +set -e + +if [ -z "$1" ]; then + echo "usage create_pycocotools_package.sh [output dir]" + exit +fi + +# Create the output directory. +OUTPUT_DIR="${1%/}" +SCRATCH_DIR="${OUTPUT_DIR}/raw" +mkdir -p "${OUTPUT_DIR}" +mkdir -p "${SCRATCH_DIR}" + +cd ${SCRATCH_DIR} +git clone https://github.com/cocodataset/cocoapi.git +cd cocoapi/PythonAPI && mv ../common ./ + +sed "s/\.\.\/common/common/g" setup.py > setup.py.updated +cp -f setup.py.updated setup.py +rm setup.py.updated + +sed "s/\.\.\/common/common/g" pycocotools/_mask.pyx > _mask.pyx.updated +cp -f _mask.pyx.updated pycocotools/_mask.pyx +rm _mask.pyx.updated + +sed "s/import matplotlib\.pyplot as plt/import matplotlib;matplotlib\.use\(\'Agg\'\);import matplotlib\.pyplot as plt/g" pycocotools/coco.py > coco.py.updated +cp -f coco.py.updated pycocotools/coco.py +rm coco.py.updated + +cd "${OUTPUT_DIR}" +tar -czf pycocotools-2.0.tar.gz -C "${SCRATCH_DIR}/cocoapi/" PythonAPI/ +rm -rf ${SCRATCH_DIR} diff --git a/models/research/object_detection/dataset_tools/download_and_preprocess_mscoco.sh b/models/research/object_detection/dataset_tools/download_and_preprocess_mscoco.sh new file mode 100644 index 0000000000000000000000000000000000000000..843ba86938d35eed18dd6f7968ea87c90551fc13 --- /dev/null +++ b/models/research/object_detection/dataset_tools/download_and_preprocess_mscoco.sh @@ -0,0 +1,106 @@ +#!/bin/bash +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess the MSCOCO data set for detection. +# +# The outputs of this script are TFRecord files containing serialized +# tf.Example protocol buffers. See create_coco_tf_record.py for details of how +# the tf.Example protocol buffers are constructed and see +# http://cocodataset.org/#overview for an overview of the dataset. +# +# usage: +# bash object_detection/dataset_tools/download_and_preprocess_mscoco.sh \ +# /tmp/mscoco +set -e + +if [ -z "$1" ]; then + echo "usage download_and_preprocess_mscoco.sh [data dir]" + exit +fi + +if [ "$(uname)" == "Darwin" ]; then + UNZIP="tar -xf" +else + UNZIP="unzip -nq" +fi + +# Create the output directories. +OUTPUT_DIR="${1%/}" +SCRATCH_DIR="${OUTPUT_DIR}/raw-data" +mkdir -p "${OUTPUT_DIR}" +mkdir -p "${SCRATCH_DIR}" +CURRENT_DIR=$(pwd) + +# Helper function to download and unpack a .zip file. +function download_and_unzip() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f ${FILENAME} ]; then + echo "Downloading ${FILENAME} to $(pwd)" + wget -nd -c "${BASE_URL}/${FILENAME}" + else + echo "Skipping download of ${FILENAME}" + fi + echo "Unzipping ${FILENAME}" + ${UNZIP} ${FILENAME} +} + +cd ${SCRATCH_DIR} + +# Download the images. +BASE_IMAGE_URL="http://images.cocodataset.org/zips" + +TRAIN_IMAGE_FILE="train2017.zip" +download_and_unzip ${BASE_IMAGE_URL} ${TRAIN_IMAGE_FILE} +TRAIN_IMAGE_DIR="${SCRATCH_DIR}/train2017" + +VAL_IMAGE_FILE="val2017.zip" +download_and_unzip ${BASE_IMAGE_URL} ${VAL_IMAGE_FILE} +VAL_IMAGE_DIR="${SCRATCH_DIR}/val2017" + +TEST_IMAGE_FILE="test2017.zip" +download_and_unzip ${BASE_IMAGE_URL} ${TEST_IMAGE_FILE} +TEST_IMAGE_DIR="${SCRATCH_DIR}/test2017" + +# Download the annotations. +BASE_INSTANCES_URL="http://images.cocodataset.org/annotations" +INSTANCES_FILE="annotations_trainval2017.zip" +download_and_unzip ${BASE_INSTANCES_URL} ${INSTANCES_FILE} + +TRAIN_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/instances_train2017.json" +VAL_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/instances_val2017.json" + +# Download the test image info. +BASE_IMAGE_INFO_URL="http://images.cocodataset.org/annotations" +IMAGE_INFO_FILE="image_info_test2017.zip" +download_and_unzip ${BASE_IMAGE_INFO_URL} ${IMAGE_INFO_FILE} + +TESTDEV_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/image_info_test-dev2017.json" + +# Build TFRecords of the image data. +cd "${CURRENT_DIR}" +python object_detection/dataset_tools/create_coco_tf_record.py \ + --logtostderr \ + --include_masks \ + --train_image_dir="${TRAIN_IMAGE_DIR}" \ + --val_image_dir="${VAL_IMAGE_DIR}" \ + --test_image_dir="${TEST_IMAGE_DIR}" \ + --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ + --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ + --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ + --output_dir="${OUTPUT_DIR}" + diff --git a/models/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py b/models/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fcf1431e63326874a25bae1cf9682181e78beb --- /dev/null +++ b/models/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py @@ -0,0 +1,233 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""An executable to expand image-level labels, boxes and segments. + +The expansion is performed using class hierarchy, provided in JSON file. + +The expected file formats are the following: +- for box and segment files: CSV file is expected to have LabelName field +- for image-level labels: CSV file is expected to have LabelName and Confidence +fields + +Note, that LabelName is the only field used for expansion. + +Example usage: +python models/research/object_detection/dataset_tools/\ +oid_hierarchical_labels_expansion.py \ +--json_hierarchy_file= \ +--input_annotations= \ +--output_annotations= \ +--annotation_type=<1 (for boxes and segments) or 2 (for image-level labels)> +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import json +from absl import app +from absl import flags +import six + +flags.DEFINE_string( + 'json_hierarchy_file', None, + 'Path to the file containing label hierarchy in JSON format.') +flags.DEFINE_string( + 'input_annotations', None, 'Path to Open Images annotations file' + '(either bounding boxes, segments or image-level labels).') +flags.DEFINE_string('output_annotations', None, 'Path to the output file.') +flags.DEFINE_integer( + 'annotation_type', None, + 'Type of the input annotations: 1 - boxes or segments,' + '2 - image-level labels.' +) + +FLAGS = flags.FLAGS + + +def _update_dict(initial_dict, update): + """Updates dictionary with update content. + + Args: + initial_dict: initial dictionary. + update: updated dictionary. + """ + + for key, value_list in update.items(): + if key in initial_dict: + initial_dict[key].update(value_list) + else: + initial_dict[key] = set(value_list) + + +def _build_plain_hierarchy(hierarchy, skip_root=False): + """Expands tree hierarchy representation to parent-child dictionary. + + Args: + hierarchy: labels hierarchy as JSON file. + skip_root: if true skips root from the processing (done for the case when all + classes under hierarchy are collected under virtual node). + + Returns: + keyed_parent - dictionary of parent - all its children nodes. + keyed_child - dictionary of children - all its parent nodes + children - all children of the current node. + """ + all_children = set([]) + all_keyed_parent = {} + all_keyed_child = {} + if 'Subcategory' in hierarchy: + for node in hierarchy['Subcategory']: + keyed_parent, keyed_child, children = _build_plain_hierarchy(node) + # Update is not done through dict.update() since some children have multi- + # ple parents in the hiearchy. + _update_dict(all_keyed_parent, keyed_parent) + _update_dict(all_keyed_child, keyed_child) + all_children.update(children) + + if not skip_root: + all_keyed_parent[hierarchy['LabelName']] = copy.deepcopy(all_children) + all_children.add(hierarchy['LabelName']) + for child, _ in all_keyed_child.items(): + all_keyed_child[child].add(hierarchy['LabelName']) + all_keyed_child[hierarchy['LabelName']] = set([]) + + return all_keyed_parent, all_keyed_child, all_children + + +class OIDHierarchicalLabelsExpansion(object): + """ Main class to perform labels hierachical expansion.""" + + def __init__(self, hierarchy): + """Constructor. + + Args: + hierarchy: labels hierarchy as JSON object. + """ + + self._hierarchy_keyed_parent, self._hierarchy_keyed_child, _ = ( + _build_plain_hierarchy(hierarchy, skip_root=True)) + + def expand_boxes_or_segments_from_csv(self, csv_row, + labelname_column_index=1): + """Expands a row containing bounding boxes/segments from CSV file. + + Args: + csv_row: a single row of Open Images released groundtruth file. + labelname_column_index: 0-based index of LabelName column in CSV file. + + Returns: + a list of strings (including the initial row) corresponding to the ground + truth expanded to multiple annotation for evaluation with Open Images + Challenge 2018/2019 metrics. + """ + # Row header is expected to be the following for boxes: + # ImageID,LabelName,Confidence,XMin,XMax,YMin,YMax,IsGroupOf + # Row header is expected to be the following for segments: + # ImageID,LabelName,ImageWidth,ImageHeight,XMin,XMax,YMin,YMax, + # IsGroupOf,Mask + split_csv_row = six.ensure_str(csv_row).split(',') + result = [csv_row] + assert split_csv_row[ + labelname_column_index] in self._hierarchy_keyed_child + parent_nodes = self._hierarchy_keyed_child[ + split_csv_row[labelname_column_index]] + for parent_node in parent_nodes: + split_csv_row[labelname_column_index] = parent_node + result.append(','.join(split_csv_row)) + return result + + def expand_labels_from_csv(self, + csv_row, + labelname_column_index=1, + confidence_column_index=2): + """Expands a row containing labels from CSV file. + + Args: + csv_row: a single row of Open Images released groundtruth file. + labelname_column_index: 0-based index of LabelName column in CSV file. + confidence_column_index: 0-based index of Confidence column in CSV file. + + Returns: + a list of strings (including the initial row) corresponding to the ground + truth expanded to multiple annotation for evaluation with Open Images + Challenge 2018/2019 metrics. + """ + # Row header is expected to be exactly: + # ImageID,Source,LabelName,Confidence + split_csv_row = six.ensure_str(csv_row).split(',') + result = [csv_row] + if int(split_csv_row[confidence_column_index]) == 1: + assert split_csv_row[ + labelname_column_index] in self._hierarchy_keyed_child + parent_nodes = self._hierarchy_keyed_child[ + split_csv_row[labelname_column_index]] + for parent_node in parent_nodes: + split_csv_row[labelname_column_index] = parent_node + result.append(','.join(split_csv_row)) + else: + assert split_csv_row[ + labelname_column_index] in self._hierarchy_keyed_parent + child_nodes = self._hierarchy_keyed_parent[ + split_csv_row[labelname_column_index]] + for child_node in child_nodes: + split_csv_row[labelname_column_index] = child_node + result.append(','.join(split_csv_row)) + return result + + +def main(unused_args): + + del unused_args + + with open(FLAGS.json_hierarchy_file) as f: + hierarchy = json.load(f) + expansion_generator = OIDHierarchicalLabelsExpansion(hierarchy) + labels_file = False + if FLAGS.annotation_type == 2: + labels_file = True + elif FLAGS.annotation_type != 1: + print('--annotation_type expected value is 1 or 2.') + return -1 + confidence_column_index = -1 + labelname_column_index = -1 + with open(FLAGS.input_annotations, 'r') as source: + with open(FLAGS.output_annotations, 'w') as target: + header = source.readline() + target.writelines([header]) + column_names = header.strip().split(',') + labelname_column_index = column_names.index('LabelName') + if labels_file: + confidence_column_index = column_names.index('Confidence') + for line in source: + if labels_file: + expanded_lines = expansion_generator.expand_labels_from_csv( + line, labelname_column_index, confidence_column_index) + else: + expanded_lines = ( + expansion_generator.expand_boxes_or_segments_from_csv( + line, labelname_column_index)) + target.writelines(expanded_lines) + + +if __name__ == '__main__': + flags.mark_flag_as_required('json_hierarchy_file') + flags.mark_flag_as_required('input_annotations') + flags.mark_flag_as_required('output_annotations') + flags.mark_flag_as_required('annotation_type') + + app.run(main) diff --git a/models/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py b/models/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ca010c5bed3ee0a92f352596d18b3a515b654282 --- /dev/null +++ b/models/research/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py @@ -0,0 +1,116 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the OpenImages label expansion (OIDHierarchicalLabelsExpansion).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import oid_hierarchical_labels_expansion + + +def create_test_data(): + hierarchy = { + 'LabelName': + 'a', + 'Subcategory': [{ + 'LabelName': 'b' + }, { + 'LabelName': + 'c', + 'Subcategory': [{ + 'LabelName': 'd' + }, { + 'LabelName': 'e' + }, { + 'LabelName': 'f', + 'Subcategory': [{ + 'LabelName': 'd' + },] + }] + }, { + 'LabelName': 'f', + 'Subcategory': [{ + 'LabelName': 'd' + },] + }] + } + bbox_rows = [ + '123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0', + '123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0' + ] + label_rows = [ + '123,verification,b,0', '123,verification,c,0', '124,verification,d,1' + ] + segm_rows = [ + '123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK', + '123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK', + ] + return hierarchy, bbox_rows, segm_rows, label_rows + + +class HierarchicalLabelsExpansionTest(tf.test.TestCase): + + def test_bbox_expansion(self): + hierarchy, bbox_rows, _, _ = create_test_data() + expansion_generator = ( + oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion( + hierarchy)) + all_result_rows = [] + for row in bbox_rows: + all_result_rows.extend( + expansion_generator.expand_boxes_or_segments_from_csv(row, 2)) + self.assertItemsEqual([ + '123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0', + '123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0', + '123,xclick,f,1,0.2,0.3,0.1,0.2,1,1,0,0,0', + '123,xclick,c,1,0.2,0.3,0.1,0.2,1,1,0,0,0' + ], all_result_rows) + + def test_segm_expansion(self): + hierarchy, _, segm_rows, _ = create_test_data() + expansion_generator = ( + oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion( + hierarchy)) + all_result_rows = [] + for row in segm_rows: + all_result_rows.extend( + expansion_generator.expand_boxes_or_segments_from_csv(row, 2)) + self.assertItemsEqual([ + '123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK', + '123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK', + '123,cc,f,100,100,0.2,0.3,0.1,0.2,0,MASK', + '123,cc,c,100,100,0.2,0.3,0.1,0.2,0,MASK' + ], all_result_rows) + + def test_labels_expansion(self): + hierarchy, _, _, label_rows = create_test_data() + expansion_generator = ( + oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion( + hierarchy)) + all_result_rows = [] + for row in label_rows: + all_result_rows.extend( + expansion_generator.expand_labels_from_csv(row, 2, 3)) + self.assertItemsEqual([ + '123,verification,b,0', '123,verification,c,0', '123,verification,d,0', + '123,verification,f,0', '123,verification,e,0', '124,verification,d,1', + '124,verification,f,1', '124,verification,c,1' + ], all_result_rows) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/oid_tfrecord_creation.py b/models/research/object_detection/dataset_tools/oid_tfrecord_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..a4618a6aac0b884174abfddc31228c039aae7c66 --- /dev/null +++ b/models/research/object_detection/dataset_tools/oid_tfrecord_creation.py @@ -0,0 +1,112 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Utilities for creating TFRecords of TF examples for the Open Images dataset. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.utils import dataset_util + + +def tf_example_from_annotations_data_frame(annotations_data_frame, label_map, + encoded_image): + """Populates a TF Example message with image annotations from a data frame. + + Args: + annotations_data_frame: Data frame containing the annotations for a single + image. + label_map: String to integer label map. + encoded_image: The encoded image string + + Returns: + The populated TF Example, if the label of at least one object is present in + label_map. Otherwise, returns None. + """ + + filtered_data_frame = annotations_data_frame[ + annotations_data_frame.LabelName.isin(label_map)] + filtered_data_frame_boxes = filtered_data_frame[ + ~filtered_data_frame.YMin.isnull()] + filtered_data_frame_labels = filtered_data_frame[ + filtered_data_frame.YMin.isnull()] + image_id = annotations_data_frame.ImageID.iloc[0] + + feature_map = { + standard_fields.TfExampleFields.object_bbox_ymin: + dataset_util.float_list_feature( + filtered_data_frame_boxes.YMin.as_matrix()), + standard_fields.TfExampleFields.object_bbox_xmin: + dataset_util.float_list_feature( + filtered_data_frame_boxes.XMin.as_matrix()), + standard_fields.TfExampleFields.object_bbox_ymax: + dataset_util.float_list_feature( + filtered_data_frame_boxes.YMax.as_matrix()), + standard_fields.TfExampleFields.object_bbox_xmax: + dataset_util.float_list_feature( + filtered_data_frame_boxes.XMax.as_matrix()), + standard_fields.TfExampleFields.object_class_text: + dataset_util.bytes_list_feature([ + six.ensure_binary(label_text) + for label_text in filtered_data_frame_boxes.LabelName.as_matrix() + ]), + standard_fields.TfExampleFields.object_class_label: + dataset_util.int64_list_feature( + filtered_data_frame_boxes.LabelName.map( + lambda x: label_map[x]).as_matrix()), + standard_fields.TfExampleFields.filename: + dataset_util.bytes_feature( + six.ensure_binary('{}.jpg'.format(image_id))), + standard_fields.TfExampleFields.source_id: + dataset_util.bytes_feature(six.ensure_binary(image_id)), + standard_fields.TfExampleFields.image_encoded: + dataset_util.bytes_feature(six.ensure_binary(encoded_image)), + } + + if 'IsGroupOf' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_group_of] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsGroupOf.as_matrix().astype(int)) + if 'IsOccluded' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_occluded] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsOccluded.as_matrix().astype( + int)) + if 'IsTruncated' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_truncated] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsTruncated.as_matrix().astype( + int)) + if 'IsDepiction' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_depiction] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsDepiction.as_matrix().astype( + int)) + + if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns: + feature_map[standard_fields.TfExampleFields. + image_class_label] = dataset_util.int64_list_feature( + filtered_data_frame_labels.LabelName.map( + lambda x: label_map[x]).as_matrix()) + feature_map[standard_fields.TfExampleFields + .image_class_text] = dataset_util.bytes_list_feature([ + six.ensure_binary(label_text) for label_text in + filtered_data_frame_labels.LabelName.as_matrix() + ]), + return tf.train.Example(features=tf.train.Features(feature=feature_map)) diff --git a/models/research/object_detection/dataset_tools/oid_tfrecord_creation_test.py b/models/research/object_detection/dataset_tools/oid_tfrecord_creation_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e945f46d6159104735ea809c17ebe9b3cf73d1 --- /dev/null +++ b/models/research/object_detection/dataset_tools/oid_tfrecord_creation_test.py @@ -0,0 +1,200 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for oid_tfrecord_creation.py.""" + +import pandas as pd +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import oid_tfrecord_creation + + +def create_test_data(): + data = { + 'ImageID': ['i1', 'i1', 'i1', 'i1', 'i1', 'i2', 'i2'], + 'LabelName': ['a', 'a', 'b', 'b', 'c', 'b', 'c'], + 'YMin': [0.3, 0.6, 0.8, 0.1, None, 0.0, 0.0], + 'XMin': [0.1, 0.3, 0.7, 0.0, None, 0.1, 0.1], + 'XMax': [0.2, 0.3, 0.8, 0.5, None, 0.9, 0.9], + 'YMax': [0.3, 0.6, 1, 0.8, None, 0.8, 0.8], + 'IsOccluded': [0, 1, 1, 0, None, 0, 0], + 'IsTruncated': [0, 0, 0, 1, None, 0, 0], + 'IsGroupOf': [0, 0, 0, 0, None, 0, 1], + 'IsDepiction': [1, 0, 0, 0, None, 0, 0], + 'ConfidenceImageLabel': [None, None, None, None, 0, None, None], + } + df = pd.DataFrame(data=data) + label_map = {'a': 0, 'b': 1, 'c': 2} + return label_map, df + + +class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase): + + def test_simple(self): + label_map, df = create_test_data() + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + df[df.ImageID == 'i1'], label_map, 'encoded_image_test') + self.assertProtoEquals(six.ensure_str(""" + features { + feature { + key: "image/encoded" + value { bytes_list { value: "encoded_image_test" } } } + feature { + key: "image/filename" + value { bytes_list { value: "i1.jpg" } } } + feature { + key: "image/object/bbox/ymin" + value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } } + feature { + key: "image/object/bbox/xmin" + value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } } + feature { + key: "image/object/bbox/ymax" + value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } } + feature { + key: "image/object/bbox/xmax" + value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } } + feature { + key: "image/object/class/label" + value { int64_list { value: [0, 0, 1, 1] } } } + feature { + key: "image/object/class/text" + value { bytes_list { value: ["a", "a", "b", "b"] } } } + feature { + key: "image/source_id" + value { bytes_list { value: "i1" } } } + feature { + key: "image/object/depiction" + value { int64_list { value: [1, 0, 0, 0] } } } + feature { + key: "image/object/group_of" + value { int64_list { value: [0, 0, 0, 0] } } } + feature { + key: "image/object/occluded" + value { int64_list { value: [0, 1, 1, 0] } } } + feature { + key: "image/object/truncated" + value { int64_list { value: [0, 0, 0, 1] } } } + feature { + key: "image/class/label" + value { int64_list { value: [2] } } } + feature { + key: "image/class/text" + value { bytes_list { value: ["c"] } } } } + """), tf_example) + + def test_no_attributes(self): + label_map, df = create_test_data() + + del df['IsDepiction'] + del df['IsGroupOf'] + del df['IsOccluded'] + del df['IsTruncated'] + del df['ConfidenceImageLabel'] + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + df[df.ImageID == 'i2'], label_map, 'encoded_image_test') + self.assertProtoEquals(six.ensure_str(""" + features { + feature { + key: "image/encoded" + value { bytes_list { value: "encoded_image_test" } } } + feature { + key: "image/filename" + value { bytes_list { value: "i2.jpg" } } } + feature { + key: "image/object/bbox/ymin" + value { float_list { value: [0.0, 0.0] } } } + feature { + key: "image/object/bbox/xmin" + value { float_list { value: [0.1, 0.1] } } } + feature { + key: "image/object/bbox/ymax" + value { float_list { value: [0.8, 0.8] } } } + feature { + key: "image/object/bbox/xmax" + value { float_list { value: [0.9, 0.9] } } } + feature { + key: "image/object/class/label" + value { int64_list { value: [1, 2] } } } + feature { + key: "image/object/class/text" + value { bytes_list { value: ["b", "c"] } } } + feature { + key: "image/source_id" + value { bytes_list { value: "i2" } } } } + """), tf_example) + + def test_label_filtering(self): + label_map, df = create_test_data() + + label_map = {'a': 0} + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + df[df.ImageID == 'i1'], label_map, 'encoded_image_test') + self.assertProtoEquals( + six.ensure_str(""" + features { + feature { + key: "image/encoded" + value { bytes_list { value: "encoded_image_test" } } } + feature { + key: "image/filename" + value { bytes_list { value: "i1.jpg" } } } + feature { + key: "image/object/bbox/ymin" + value { float_list { value: [0.3, 0.6] } } } + feature { + key: "image/object/bbox/xmin" + value { float_list { value: [0.1, 0.3] } } } + feature { + key: "image/object/bbox/ymax" + value { float_list { value: [0.3, 0.6] } } } + feature { + key: "image/object/bbox/xmax" + value { float_list { value: [0.2, 0.3] } } } + feature { + key: "image/object/class/label" + value { int64_list { value: [0, 0] } } } + feature { + key: "image/object/class/text" + value { bytes_list { value: ["a", "a"] } } } + feature { + key: "image/source_id" + value { bytes_list { value: "i1" } } } + feature { + key: "image/object/depiction" + value { int64_list { value: [1, 0] } } } + feature { + key: "image/object/group_of" + value { int64_list { value: [0, 0] } } } + feature { + key: "image/object/occluded" + value { int64_list { value: [0, 1] } } } + feature { + key: "image/object/truncated" + value { int64_list { value: [0, 0] } } } + feature { + key: "image/class/label" + value { int64_list { } } } + feature { + key: "image/class/text" + value { bytes_list { } } } } + """), tf_example) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/seq_example_util.py b/models/research/object_detection/dataset_tools/seq_example_util.py new file mode 100644 index 0000000000000000000000000000000000000000..84573ec7eff5c2217693bd777386533c2c164af0 --- /dev/null +++ b/models/research/object_detection/dataset_tools/seq_example_util.py @@ -0,0 +1,264 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common utility for object detection tf.train.SequenceExamples.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf + + +def context_float_feature(ndarray): + """Converts a numpy float array to a context float feature. + + Args: + ndarray: A numpy float array. + + Returns: + A context float feature. + """ + feature = tf.train.Feature() + for val in ndarray: + feature.float_list.value.append(val) + return feature + + +def context_int64_feature(ndarray): + """Converts a numpy array to a context int64 feature. + + Args: + ndarray: A numpy int64 array. + + Returns: + A context int64 feature. + """ + feature = tf.train.Feature() + for val in ndarray: + feature.int64_list.value.append(val) + return feature + + +def context_bytes_feature(ndarray): + """Converts a numpy bytes array to a context bytes feature. + + Args: + ndarray: A numpy bytes array. + + Returns: + A context bytes feature. + """ + feature = tf.train.Feature() + for val in ndarray: + if isinstance(val, np.ndarray): + val = val.tolist() + feature.bytes_list.value.append(tf.compat.as_bytes(val)) + return feature + + +def sequence_float_feature(ndarray): + """Converts a numpy float array to a sequence float feature. + + Args: + ndarray: A numpy float array. + + Returns: + A sequence float feature. + """ + feature_list = tf.train.FeatureList() + for row in ndarray: + feature = feature_list.feature.add() + if row.size: + feature.float_list.value[:] = row + return feature_list + + +def sequence_int64_feature(ndarray): + """Converts a numpy int64 array to a sequence int64 feature. + + Args: + ndarray: A numpy int64 array. + + Returns: + A sequence int64 feature. + """ + feature_list = tf.train.FeatureList() + for row in ndarray: + feature = feature_list.feature.add() + if row.size: + feature.int64_list.value[:] = row + return feature_list + + +def sequence_bytes_feature(ndarray): + """Converts a bytes float array to a sequence bytes feature. + + Args: + ndarray: A numpy bytes array. + + Returns: + A sequence bytes feature. + """ + feature_list = tf.train.FeatureList() + for row in ndarray: + if isinstance(row, np.ndarray): + row = row.tolist() + feature = feature_list.feature.add() + if row: + row = [tf.compat.as_bytes(val) for val in row] + feature.bytes_list.value[:] = row + return feature_list + + +def boxes_to_box_components(bboxes): + """Converts a list of numpy arrays (boxes) to box components. + + Args: + bboxes: A numpy array of bounding boxes. + + Returns: + Bounding box component lists. + """ + ymin_list = [] + xmin_list = [] + ymax_list = [] + xmax_list = [] + for bbox in bboxes: + bbox = np.array(bbox).astype(np.float32) + ymin, xmin, ymax, xmax = np.split(bbox, 4, axis=1) + ymin_list.append(np.reshape(ymin, [-1])) + xmin_list.append(np.reshape(xmin, [-1])) + ymax_list.append(np.reshape(ymax, [-1])) + xmax_list.append(np.reshape(xmax, [-1])) + return ymin_list, xmin_list, ymax_list, xmax_list + + +def make_sequence_example(dataset_name, + video_id, + encoded_images, + image_height, + image_width, + image_format=None, + image_source_ids=None, + timestamps=None, + is_annotated=None, + bboxes=None, + label_strings=None, + detection_bboxes=None, + detection_classes=None, + detection_scores=None): + """Constructs tf.SequenceExamples. + + Args: + dataset_name: String with dataset name. + video_id: String with video id. + encoded_images: A [num_frames] list (or numpy array) of encoded image + frames. + image_height: Height of the images. + image_width: Width of the images. + image_format: Format of encoded images. + image_source_ids: (Optional) A [num_frames] list of unique string ids for + each image. + timestamps: (Optional) A [num_frames] list (or numpy array) array with image + timestamps. + is_annotated: (Optional) A [num_frames] list (or numpy array) array + in which each element indicates whether the frame has been annotated + (1) or not (0). + bboxes: (Optional) A list (with num_frames elements) of [num_boxes_i, 4] + numpy float32 arrays holding boxes for each frame. + label_strings: (Optional) A list (with num_frames_elements) of [num_boxes_i] + numpy string arrays holding object string labels for each frame. + detection_bboxes: (Optional) A list (with num_frames elements) of + [num_boxes_i, 4] numpy float32 arrays holding prediction boxes for each + frame. + detection_classes: (Optional) A list (with num_frames_elements) of + [num_boxes_i] numpy int64 arrays holding predicted classes for each frame. + detection_scores: (Optional) A list (with num_frames_elements) of + [num_boxes_i] numpy float32 arrays holding predicted object scores for + each frame. + + Returns: + A tf.train.SequenceExample. + """ + num_frames = len(encoded_images) + image_encoded = np.expand_dims(encoded_images, axis=-1) + if timestamps is None: + timestamps = np.arange(num_frames) + image_timestamps = np.expand_dims(timestamps, axis=-1) + + # Context fields. + context_dict = { + 'example/dataset_name': context_bytes_feature([dataset_name]), + 'clip/start/timestamp': context_int64_feature([image_timestamps[0][0]]), + 'clip/end/timestamp': context_int64_feature([image_timestamps[-1][0]]), + 'clip/frames': context_int64_feature([num_frames]), + 'image/channels': context_int64_feature([3]), + 'image/height': context_int64_feature([image_height]), + 'image/width': context_int64_feature([image_width]), + 'clip/media_id': context_bytes_feature([video_id]) + } + + # Sequence fields. + feature_list = { + 'image/encoded': sequence_bytes_feature(image_encoded), + 'image/timestamp': sequence_int64_feature(image_timestamps), + } + + # Add optional fields. + if image_format is not None: + context_dict['image/format'] = context_bytes_feature([image_format]) + if image_source_ids is not None: + feature_list['image/source_id'] = sequence_bytes_feature(image_source_ids) + if bboxes is not None: + bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax = boxes_to_box_components(bboxes) + feature_list['region/bbox/xmin'] = sequence_float_feature(bbox_xmin) + feature_list['region/bbox/xmax'] = sequence_float_feature(bbox_xmax) + feature_list['region/bbox/ymin'] = sequence_float_feature(bbox_ymin) + feature_list['region/bbox/ymax'] = sequence_float_feature(bbox_ymax) + if is_annotated is None: + is_annotated = np.ones(num_frames, dtype=np.int64) + is_annotated = np.expand_dims(is_annotated, axis=-1) + feature_list['region/is_annotated'] = sequence_int64_feature(is_annotated) + + if label_strings is not None: + feature_list['region/label/string'] = sequence_bytes_feature( + label_strings) + + if detection_bboxes is not None: + det_bbox_ymin, det_bbox_xmin, det_bbox_ymax, det_bbox_xmax = ( + boxes_to_box_components(detection_bboxes)) + feature_list['predicted/region/bbox/xmin'] = sequence_float_feature( + det_bbox_xmin) + feature_list['predicted/region/bbox/xmax'] = sequence_float_feature( + det_bbox_xmax) + feature_list['predicted/region/bbox/ymin'] = sequence_float_feature( + det_bbox_ymin) + feature_list['predicted/region/bbox/ymax'] = sequence_float_feature( + det_bbox_ymax) + if detection_classes is not None: + feature_list['predicted/region/label/index'] = sequence_int64_feature( + detection_classes) + if detection_scores is not None: + feature_list['predicted/region/label/confidence'] = sequence_float_feature( + detection_scores) + + context = tf.train.Features(feature=context_dict) + feature_lists = tf.train.FeatureLists(feature_list=feature_list) + + sequence_example = tf.train.SequenceExample( + context=context, + feature_lists=feature_lists) + return sequence_example diff --git a/models/research/object_detection/dataset_tools/seq_example_util_test.py b/models/research/object_detection/dataset_tools/seq_example_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ba898d73520badae017deb341e34ddbeb2b0eae8 --- /dev/null +++ b/models/research/object_detection/dataset_tools/seq_example_util_test.py @@ -0,0 +1,364 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.utils.seq_example_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import seq_example_util +from object_detection.utils import tf_version + + +class SeqExampleUtilTest(tf.test.TestCase): + + def materialize_tensors(self, list_of_tensors): + if tf_version.is_tf2(): + return [tensor.numpy() for tensor in list_of_tensors] + else: + with self.cached_session() as sess: + return sess.run(list_of_tensors) + + def test_make_unlabeled_example(self): + num_frames = 5 + image_height = 100 + image_width = 200 + dataset_name = b'unlabeled_dataset' + video_id = b'video_000' + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + image_source_ids = [str(idx) for idx in range(num_frames)] + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.materialize_tensors(encoded_images_list) + seq_example = seq_example_util.make_sequence_example( + dataset_name=dataset_name, + video_id=video_id, + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_format='JPEG', + image_source_ids=image_source_ids) + + context_feature_dict = seq_example.context.feature + self.assertEqual( + dataset_name, + context_feature_dict['example/dataset_name'].bytes_list.value[0]) + self.assertEqual( + 0, + context_feature_dict['clip/start/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames - 1, + context_feature_dict['clip/end/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames, + context_feature_dict['clip/frames'].int64_list.value[0]) + self.assertEqual( + 3, + context_feature_dict['image/channels'].int64_list.value[0]) + self.assertEqual( + b'JPEG', + context_feature_dict['image/format'].bytes_list.value[0]) + self.assertEqual( + image_height, + context_feature_dict['image/height'].int64_list.value[0]) + self.assertEqual( + image_width, + context_feature_dict['image/width'].int64_list.value[0]) + self.assertEqual( + video_id, + context_feature_dict['clip/media_id'].bytes_list.value[0]) + + seq_feature_dict = seq_example.feature_lists.feature_list + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + self.assertAllEqual(list(range(num_frames)), timestamps) + source_ids = [ + feature.bytes_list.value[0] for feature + in seq_feature_dict['image/source_id'].feature] + self.assertAllEqual( + [six.ensure_binary(str(idx)) for idx in range(num_frames)], + source_ids) + + def test_make_labeled_example(self): + num_frames = 2 + image_height = 100 + image_width = 200 + dataset_name = b'unlabeled_dataset' + video_id = b'video_000' + labels = [b'dog', b'cat'] + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.materialize_tensors(encoded_images_list) + timestamps = [100000, 110000] + is_annotated = [1, 0] + bboxes = [ + np.array([[0., 0., 0., 0.], + [0., 0., 1., 1.]], dtype=np.float32), + np.zeros([0, 4], dtype=np.float32) + ] + label_strings = [ + np.array(labels), + np.array([]) + ] + + seq_example = seq_example_util.make_sequence_example( + dataset_name=dataset_name, + video_id=video_id, + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + timestamps=timestamps, + is_annotated=is_annotated, + bboxes=bboxes, + label_strings=label_strings) + + context_feature_dict = seq_example.context.feature + self.assertEqual( + dataset_name, + context_feature_dict['example/dataset_name'].bytes_list.value[0]) + self.assertEqual( + timestamps[0], + context_feature_dict['clip/start/timestamp'].int64_list.value[0]) + self.assertEqual( + timestamps[-1], + context_feature_dict['clip/end/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames, + context_feature_dict['clip/frames'].int64_list.value[0]) + + seq_feature_dict = seq_example.feature_lists.feature_list + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + actual_timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + self.assertAllEqual(timestamps, actual_timestamps) + # Frame 0. + self.assertAllEqual( + is_annotated[0], + seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 1.], + seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 1.], + seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + labels, + seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:]) + + # Frame 1. + self.assertAllEqual( + is_annotated[1], + seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [], + seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:]) + + def test_make_labeled_example_with_predictions(self): + num_frames = 2 + image_height = 100 + image_width = 200 + dataset_name = b'unlabeled_dataset' + video_id = b'video_000' + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.materialize_tensors(encoded_images_list) + bboxes = [ + np.array([[0., 0., 0.75, 0.75], + [0., 0., 1., 1.]], dtype=np.float32), + np.array([[0., 0.25, 0.5, 0.75]], dtype=np.float32) + ] + label_strings = [ + np.array(['cat', 'frog']), + np.array(['cat']) + ] + detection_bboxes = [ + np.array([[0., 0., 0.75, 0.75]], dtype=np.float32), + np.zeros([0, 4], dtype=np.float32) + ] + detection_classes = [ + np.array([5], dtype=np.int64), + np.array([], dtype=np.int64) + ] + detection_scores = [ + np.array([0.9], dtype=np.float32), + np.array([], dtype=np.float32) + ] + + seq_example = seq_example_util.make_sequence_example( + dataset_name=dataset_name, + video_id=video_id, + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + bboxes=bboxes, + label_strings=label_strings, + detection_bboxes=detection_bboxes, + detection_classes=detection_classes, + detection_scores=detection_scores) + + context_feature_dict = seq_example.context.feature + self.assertEqual( + dataset_name, + context_feature_dict['example/dataset_name'].bytes_list.value[0]) + self.assertEqual( + 0, + context_feature_dict['clip/start/timestamp'].int64_list.value[0]) + self.assertEqual( + 1, + context_feature_dict['clip/end/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames, + context_feature_dict['clip/frames'].int64_list.value[0]) + + seq_feature_dict = seq_example.feature_lists.feature_list + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + actual_timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + self.assertAllEqual([0, 1], actual_timestamps) + # Frame 0. + self.assertAllEqual( + 1, + seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75, 1.], + seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75, 1.], + seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + ['cat', 'frog'], + seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:]) + self.assertAllClose( + [0.], + seq_feature_dict[ + 'predicted/region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.], + seq_feature_dict[ + 'predicted/region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75], + seq_feature_dict[ + 'predicted/region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75], + seq_feature_dict[ + 'predicted/region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + [5], + seq_feature_dict[ + 'predicted/region/label/index'].feature[0].int64_list.value[:]) + self.assertAllClose( + [0.9], + seq_feature_dict[ + 'predicted/region/label/confidence'].feature[0].float_list.value[:]) + + # Frame 1. + self.assertAllEqual( + 1, + seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0]) + self.assertAllClose( + [0.0], + seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.25], + seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.5], + seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.75], + seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + ['cat'], + seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [], + seq_feature_dict[ + 'predicted/region/label/index'].feature[1].int64_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/label/confidence'].feature[1].float_list.value[:]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dataset_tools/tf_record_creation_util.py b/models/research/object_detection/dataset_tools/tf_record_creation_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e54bcbcecdf95a9a4c524425ac612fcbe6eaeef3 --- /dev/null +++ b/models/research/object_detection/dataset_tools/tf_record_creation_util.py @@ -0,0 +1,48 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Utilities for creating TFRecords of TF examples for the Open Images dataset. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + + +def open_sharded_output_tfrecords(exit_stack, base_path, num_shards): + """Opens all TFRecord shards for writing and adds them to an exit stack. + + Args: + exit_stack: A context2.ExitStack used to automatically closed the TFRecords + opened in this function. + base_path: The base path for all shards + num_shards: The number of shards + + Returns: + The list of opened TFRecords. Position k in the list corresponds to shard k. + """ + tf_record_output_filenames = [ + '{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards) + for idx in range(num_shards) + ] + + tfrecords = [ + exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name)) + for file_name in tf_record_output_filenames + ] + + return tfrecords diff --git a/models/research/object_detection/dataset_tools/tf_record_creation_util_test.py b/models/research/object_detection/dataset_tools/tf_record_creation_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2873a6d146fbdb8ae62c558abe8f62e76943b515 --- /dev/null +++ b/models/research/object_detection/dataset_tools/tf_record_creation_util_test.py @@ -0,0 +1,49 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tf_record_creation_util.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import contextlib2 +import six +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import tf_record_creation_util + + +class OpenOutputTfrecordsTests(tf.test.TestCase): + + def test_sharded_tfrecord_writes(self): + with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, + os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10) + for idx in range(10): + output_tfrecords[idx].write(six.ensure_binary('test_{}'.format(idx))) + + for idx in range(10): + tf_record_path = '{}-{:05d}-of-00010'.format( + os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx) + records = list(tf.python_io.tf_record_iterator(tf_record_path)) + self.assertAllEqual(records, ['test_{}'.format(idx)]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/dockerfiles/android/Dockerfile b/models/research/object_detection/dockerfiles/android/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..470f669dccd057dcdbae0e929258fc0eb5f96703 --- /dev/null +++ b/models/research/object_detection/dockerfiles/android/Dockerfile @@ -0,0 +1,140 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# #========================================================================== + +# Pull TF nightly-devel docker image +FROM tensorflow/tensorflow:nightly-devel + +# Get the tensorflow models research directory, and move it into tensorflow +# source folder to match recommendation of installation +RUN git clone --depth 1 https://github.com/tensorflow/models.git && \ + mv models /tensorflow/models + + +# Install gcloud and gsutil commands +# https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu +RUN apt-get -y update && apt-get install -y gpg-agent && \ + export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ + echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + apt-get update -y && apt-get install google-cloud-sdk -y + + +# Install the Tensorflow Object Detection API from here +# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md + +# Install object detection api dependencies - use non-interactive mode to set +# default tzdata config during installation. +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get install -y protobuf-compiler python-pil python-lxml python-tk && \ + pip install Cython && \ + pip install contextlib2 && \ + pip install jupyter && \ + pip install matplotlib + +# Install pycocoapi +RUN git clone --depth 1 https://github.com/cocodataset/cocoapi.git && \ + cd cocoapi/PythonAPI && \ + make -j8 && \ + cp -r pycocotools /tensorflow/models/research && \ + cd ../../ && \ + rm -rf cocoapi + +# Get protoc 3.0.0, rather than the old version already in the container +RUN curl -OL "https://github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip" && \ + unzip protoc-3.0.0-linux-x86_64.zip -d proto3 && \ + mv proto3/bin/* /usr/local/bin && \ + mv proto3/include/* /usr/local/include && \ + rm -rf proto3 protoc-3.0.0-linux-x86_64.zip + +# Run protoc on the object detection repo +RUN cd /tensorflow/models/research && \ + protoc object_detection/protos/*.proto --python_out=. + +# Set the PYTHONPATH to finish installing the API +ENV PYTHONPATH $PYTHONPATH:/tensorflow/models/research:/tensorflow/models/research/slim + + +# Install wget (to make life easier below) and editors (to allow people to edit +# the files inside the container) +RUN apt-get install -y wget vim emacs nano + + +# Grab various data files which are used throughout the demo: dataset, +# pretrained model, and pretrained TensorFlow Lite model. Install these all in +# the same directories as recommended by the blog post. + +# Pets example dataset +RUN mkdir -p /tmp/pet_faces_tfrecord/ && \ + cd /tmp/pet_faces_tfrecord && \ + curl "http://download.tensorflow.org/models/object_detection/pet_faces_tfrecord.tar.gz" | tar xzf - + +# Pretrained model +# This one doesn't need its own directory, since it comes in a folder. +RUN cd /tmp && \ + curl -O "http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz" && \ + tar xzf ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz && \ + rm ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz + +# Trained TensorFlow Lite model. This should get replaced by one generated from +# export_tflite_ssd_graph.py when that command is called. +RUN cd /tmp && \ + curl -L -o tflite.zip \ + https://storage.googleapis.com/download.tensorflow.org/models/tflite/frozengraphs_ssd_mobilenet_v1_0.75_quant_pets_2018_06_29.zip && \ + unzip tflite.zip -d tflite && \ + rm tflite.zip + + +# Install Android development tools +# Inspired by the following sources: +# https://github.com/bitrise-docker/android/blob/master/Dockerfile +# https://github.com/reddit/docker-android-build/blob/master/Dockerfile + +# Set environment variables +ENV ANDROID_HOME /opt/android-sdk-linux +ENV ANDROID_NDK_HOME /opt/android-ndk-r14b +ENV PATH ${PATH}:${ANDROID_HOME}/tools:${ANDROID_HOME}/tools/bin:${ANDROID_HOME}/platform-tools + +# Install SDK tools +RUN cd /opt && \ + curl -OL https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip && \ + unzip sdk-tools-linux-4333796.zip -d ${ANDROID_HOME} && \ + rm sdk-tools-linux-4333796.zip + +# Accept licenses before installing components, no need to echo y for each component +# License is valid for all the standard components in versions installed from this file +# Non-standard components: MIPS system images, preview versions, GDK (Google Glass) and Android Google TV require separate licenses, not accepted there +RUN yes | sdkmanager --licenses + +# Install platform tools, SDK platform, and other build tools +RUN yes | sdkmanager \ + "tools" \ + "platform-tools" \ + "platforms;android-27" \ + "platforms;android-23" \ + "build-tools;27.0.3" \ + "build-tools;23.0.3" + +# Install Android NDK (r14b) +RUN cd /opt && \ + curl -L -o android-ndk.zip http://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ + unzip -q android-ndk.zip && \ + rm -f android-ndk.zip + +# Configure the build to use the things we just downloaded +RUN cd /tensorflow && \ + printf '\n\n\nn\ny\nn\nn\nn\ny\nn\nn\nn\nn\nn\nn\n\ny\n%s\n\n\n' ${ANDROID_HOME}|./configure + + +WORKDIR /tensorflow diff --git a/models/research/object_detection/dockerfiles/android/README.md b/models/research/object_detection/dockerfiles/android/README.md new file mode 100644 index 0000000000000000000000000000000000000000..69016cbb019fca4556b825262bc647c9ea6533fc --- /dev/null +++ b/models/research/object_detection/dockerfiles/android/README.md @@ -0,0 +1,69 @@ +# Dockerfile for the TPU and TensorFlow Lite Object Detection tutorial + +This Docker image automates the setup involved with training +object detection models on Google Cloud and building the Android TensorFlow Lite +demo app. We recommend using this container if you decide to work through our +tutorial on ["Training and serving a real-time mobile object detector in +30 minutes with Cloud TPUs"](https://medium.com/tensorflow/training-and-serving-a-realtime-mobile-object-detector-in-30-minutes-with-cloud-tpus-b78971cf1193), though of course it may be useful even if you would +like to use the Object Detection API outside the context of the tutorial. + +A couple words of warning: + +1. Docker containers do not have persistent storage. This means that any changes + you make to files inside the container will not persist if you restart + the container. When running through the tutorial, + **do not close the container**. +2. To be able to deploy the [Android app]( + https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/android) + (which you will build at the end of the tutorial), + you will need to kill any instances of `adb` running on the host machine. You + can accomplish this by closing all instances of Android Studio, and then + running `adb kill-server`. + +You can install Docker by following the [instructions here]( +https://docs.docker.com/install/). + +## Running The Container + +From this directory, build the Dockerfile as follows (this takes a while): + +``` +docker build --tag detect-tf . +``` + +Run the container: + +``` +docker run --rm -it --privileged -p 6006:6006 detect-tf +``` + +When running the container, you will find yourself inside the `/tensorflow` +directory, which is the path to the TensorFlow [source +tree](https://github.com/tensorflow/tensorflow). + +## Text Editing + +The tutorial also +requires you to occasionally edit files inside the source tree. +This Docker images comes with `vim`, `nano`, and `emacs` preinstalled for your +convenience. + +## What's In This Container + +This container is derived from the nightly build of TensorFlow, and contains the +sources for TensorFlow at `/tensorflow`, as well as the +[TensorFlow Models](https://github.com/tensorflow/models) which are available at +`/tensorflow/models` (and contain the Object Detection API as a subdirectory +at `/tensorflow/models/research/object_detection`). +The Oxford-IIIT Pets dataset, the COCO pre-trained SSD + MobileNet (v1) +checkpoint, and example +trained model are all available in `/tmp` in their respective folders. + +This container also has the `gsutil` and `gcloud` utilities, the `bazel` build +tool, and all dependencies necessary to use the Object Detection API, and +compile and install the TensorFlow Lite Android demo app. + +At various points throughout the tutorial, you may see references to the +*research directory*. This refers to the `research` folder within the +models repository, located at +`/tensorflow/models/research`. diff --git a/models/research/object_detection/eval_util.py b/models/research/object_detection/eval_util.py new file mode 100644 index 0000000000000000000000000000000000000000..3b365df19a8093ad2c2a2ad39b8dd46f6d1a82c7 --- /dev/null +++ b/models/research/object_detection/eval_util.py @@ -0,0 +1,1076 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common utility functions for evaluation.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import re +import time + +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf + +import tf_slim as slim + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.protos import eval_pb2 +from object_detection.utils import label_map_util +from object_detection.utils import object_detection_evaluation +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import visualization_utils as vis_utils + +EVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics' + +# A dictionary of metric names to classes that implement the metric. The classes +# in the dictionary must implement +# utils.object_detection_evaluation.DetectionEvaluator interface. +EVAL_METRICS_CLASS_DICT = { + 'coco_detection_metrics': + coco_evaluation.CocoDetectionEvaluator, + 'coco_keypoint_metrics': + coco_evaluation.CocoKeypointEvaluator, + 'coco_mask_metrics': + coco_evaluation.CocoMaskEvaluator, + 'coco_panoptic_metrics': + coco_evaluation.CocoPanopticSegmentationEvaluator, + 'oid_challenge_detection_metrics': + object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, + 'oid_challenge_segmentation_metrics': + object_detection_evaluation + .OpenImagesInstanceSegmentationChallengeEvaluator, + 'pascal_voc_detection_metrics': + object_detection_evaluation.PascalDetectionEvaluator, + 'weighted_pascal_voc_detection_metrics': + object_detection_evaluation.WeightedPascalDetectionEvaluator, + 'precision_at_recall_detection_metrics': + object_detection_evaluation.PrecisionAtRecallDetectionEvaluator, + 'pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.PascalInstanceSegmentationEvaluator, + 'weighted_pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, + 'oid_V2_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, +} + +EVAL_DEFAULT_METRIC = 'coco_detection_metrics' + + +def write_metrics(metrics, global_step, summary_dir): + """Write metrics to a summary directory. + + Args: + metrics: A dictionary containing metric names and values. + global_step: Global step at which the metrics are computed. + summary_dir: Directory to write tensorflow summaries to. + """ + tf.logging.info('Writing metrics to tf summary.') + summary_writer = tf.summary.FileWriterCache.get(summary_dir) + for key in sorted(metrics): + summary = tf.Summary(value=[ + tf.Summary.Value(tag=key, simple_value=metrics[key]), + ]) + summary_writer.add_summary(summary, global_step) + tf.logging.info('%s: %f', key, metrics[key]) + tf.logging.info('Metrics written to tf summary.') + + +# TODO(rathodv): Add tests. +def visualize_detection_results(result_dict, + tag, + global_step, + categories, + summary_dir='', + export_dir='', + agnostic_mode=False, + show_groundtruth=False, + groundtruth_box_visualization_color='black', + min_score_thresh=.5, + max_num_predictions=20, + skip_scores=False, + skip_labels=False, + keep_image_id_for_visualization_export=False): + """Visualizes detection results and writes visualizations to image summaries. + + This function visualizes an image with its detected bounding boxes and writes + to image summaries which can be viewed on tensorboard. It optionally also + writes images to a directory. In the case of missing entry in the label map, + unknown class name in the visualization is shown as "N/A". + + Args: + result_dict: a dictionary holding groundtruth and detection + data corresponding to each image being evaluated. The following keys + are required: + 'original_image': a numpy array representing the image with shape + [1, height, width, 3] or [1, height, width, 1] + 'detection_boxes': a numpy array of shape [N, 4] + 'detection_scores': a numpy array of shape [N] + 'detection_classes': a numpy array of shape [N] + The following keys are optional: + 'groundtruth_boxes': a numpy array of shape [N, 4] + 'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2] + Detections are assumed to be provided in decreasing order of score and for + display, and we assume that scores are probabilities between 0 and 1. + tag: tensorboard tag (string) to associate with image. + global_step: global step at which the visualization are generated. + categories: a list of dictionaries representing all possible categories. + Each dict in this list has the following keys: + 'id': (required) an integer id uniquely identifying this category + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza' + 'supercategory': (optional) string representing the supercategory + e.g., 'animal', 'vehicle', 'food', etc + summary_dir: the output directory to which the image summaries are written. + export_dir: the output directory to which images are written. If this is + empty (default), then images are not exported. + agnostic_mode: boolean (default: False) controlling whether to evaluate in + class-agnostic mode or not. + show_groundtruth: boolean (default: False) controlling whether to show + groundtruth boxes in addition to detected boxes + groundtruth_box_visualization_color: box color for visualizing groundtruth + boxes + min_score_thresh: minimum score threshold for a box to be visualized + max_num_predictions: maximum number of detections to visualize + skip_scores: whether to skip score when drawing a single detection + skip_labels: whether to skip label when drawing a single detection + keep_image_id_for_visualization_export: whether to keep image identifier in + filename when exported to export_dir + Raises: + ValueError: if result_dict does not contain the expected keys (i.e., + 'original_image', 'detection_boxes', 'detection_scores', + 'detection_classes') + """ + detection_fields = fields.DetectionResultFields + input_fields = fields.InputDataFields + if not set([ + input_fields.original_image, + detection_fields.detection_boxes, + detection_fields.detection_scores, + detection_fields.detection_classes, + ]).issubset(set(result_dict.keys())): + raise ValueError('result_dict does not contain all expected keys.') + if show_groundtruth and input_fields.groundtruth_boxes not in result_dict: + raise ValueError('If show_groundtruth is enabled, result_dict must contain ' + 'groundtruth_boxes.') + tf.logging.info('Creating detection visualizations.') + category_index = label_map_util.create_category_index(categories) + + image = np.squeeze(result_dict[input_fields.original_image], axis=0) + if image.shape[2] == 1: # If one channel image, repeat in RGB. + image = np.tile(image, [1, 1, 3]) + detection_boxes = result_dict[detection_fields.detection_boxes] + detection_scores = result_dict[detection_fields.detection_scores] + detection_classes = np.int32((result_dict[ + detection_fields.detection_classes])) + detection_keypoints = result_dict.get(detection_fields.detection_keypoints) + detection_masks = result_dict.get(detection_fields.detection_masks) + detection_boundaries = result_dict.get(detection_fields.detection_boundaries) + + # Plot groundtruth underneath detections + if show_groundtruth: + groundtruth_boxes = result_dict[input_fields.groundtruth_boxes] + groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints) + vis_utils.visualize_boxes_and_labels_on_image_array( + image=image, + boxes=groundtruth_boxes, + classes=None, + scores=None, + category_index=category_index, + keypoints=groundtruth_keypoints, + use_normalized_coordinates=False, + max_boxes_to_draw=None, + groundtruth_box_visualization_color=groundtruth_box_visualization_color) + vis_utils.visualize_boxes_and_labels_on_image_array( + image, + detection_boxes, + detection_classes, + detection_scores, + category_index, + instance_masks=detection_masks, + instance_boundaries=detection_boundaries, + keypoints=detection_keypoints, + use_normalized_coordinates=False, + max_boxes_to_draw=max_num_predictions, + min_score_thresh=min_score_thresh, + agnostic_mode=agnostic_mode, + skip_scores=skip_scores, + skip_labels=skip_labels) + + if export_dir: + if keep_image_id_for_visualization_export and result_dict[fields. + InputDataFields() + .key]: + export_path = os.path.join(export_dir, 'export-{}-{}.png'.format( + tag, result_dict[fields.InputDataFields().key])) + else: + export_path = os.path.join(export_dir, 'export-{}.png'.format(tag)) + vis_utils.save_image_array_as_png(image, export_path) + + summary = tf.Summary(value=[ + tf.Summary.Value( + tag=tag, + image=tf.Summary.Image( + encoded_image_string=vis_utils.encode_image_array_as_png_str( + image))) + ]) + summary_writer = tf.summary.FileWriterCache.get(summary_dir) + summary_writer.add_summary(summary, global_step) + + tf.logging.info('Detection visualizations written to summary with tag %s.', + tag) + + +def _run_checkpoint_once(tensor_dict, + evaluators=None, + batch_processor=None, + checkpoint_dirs=None, + variables_to_restore=None, + restore_fn=None, + num_batches=1, + master='', + save_graph=False, + save_graph_dir='', + losses_dict=None, + eval_export_path=None, + process_metrics_fn=None): + """Evaluates metrics defined in evaluators and returns summaries. + + This function loads the latest checkpoint in checkpoint_dirs and evaluates + all metrics defined in evaluators. The metrics are processed in batch by the + batch_processor. + + Args: + tensor_dict: a dictionary holding tensors representing a batch of detections + and corresponding groundtruth annotations. + evaluators: a list of object of type DetectionEvaluator to be used for + evaluation. Note that the metric names produced by different evaluators + must be unique. + batch_processor: a function taking four arguments: + 1. tensor_dict: the same tensor_dict that is passed in as the first + argument to this function. + 2. sess: a tensorflow session + 3. batch_index: an integer representing the index of the batch amongst + all batches + By default, batch_processor is None, which defaults to running: + return sess.run(tensor_dict) + To skip an image, it suffices to return an empty dictionary in place of + result_dict. + checkpoint_dirs: list of directories to load into an EnsembleModel. If it + has only one directory, EnsembleModel will not be used -- + a DetectionModel + will be instantiated directly. Not used if restore_fn is set. + variables_to_restore: None, or a dictionary mapping variable names found in + a checkpoint to model variables. The dictionary would normally be + generated by creating a tf.train.ExponentialMovingAverage object and + calling its variables_to_restore() method. Not used if restore_fn is set. + restore_fn: None, or a function that takes a tf.Session object and correctly + restores all necessary variables from the correct checkpoint file. If + None, attempts to restore from the first directory in checkpoint_dirs. + num_batches: the number of batches to use for evaluation. + master: the location of the Tensorflow session. + save_graph: whether or not the Tensorflow graph is stored as a pbtxt file. + save_graph_dir: where to store the Tensorflow graph on disk. If save_graph + is True this must be non-empty. + losses_dict: optional dictionary of scalar detection losses. + eval_export_path: Path for saving a json file that contains the detection + results in json format. + process_metrics_fn: a callback called with evaluation results after each + evaluation is done. It could be used e.g. to back up checkpoints with + best evaluation scores, or to call an external system to update evaluation + results in order to drive best hyper-parameter search. Parameters are: + int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics, + str checkpoint_file path. + + Returns: + global_step: the count of global steps. + all_evaluator_metrics: A dictionary containing metric names and values. + + Raises: + ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least + one element. + ValueError: if save_graph is True and save_graph_dir is not defined. + """ + if save_graph and not save_graph_dir: + raise ValueError('`save_graph_dir` must be defined.') + sess = tf.Session(master, graph=tf.get_default_graph()) + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + sess.run(tf.tables_initializer()) + checkpoint_file = None + if restore_fn: + restore_fn(sess) + else: + if not checkpoint_dirs: + raise ValueError('`checkpoint_dirs` must have at least one entry.') + checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0]) + saver = tf.train.Saver(variables_to_restore) + saver.restore(sess, checkpoint_file) + + if save_graph: + tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt') + + counters = {'skipped': 0, 'success': 0} + aggregate_result_losses_dict = collections.defaultdict(list) + with slim.queues.QueueRunners(sess): + try: + for batch in range(int(num_batches)): + if (batch + 1) % 100 == 0: + tf.logging.info('Running eval ops batch %d/%d', batch + 1, + num_batches) + if not batch_processor: + try: + if not losses_dict: + losses_dict = {} + result_dict, result_losses_dict = sess.run([tensor_dict, + losses_dict]) + counters['success'] += 1 + except tf.errors.InvalidArgumentError: + tf.logging.info('Skipping image') + counters['skipped'] += 1 + result_dict = {} + else: + result_dict, result_losses_dict = batch_processor( + tensor_dict, sess, batch, counters, losses_dict=losses_dict) + if not result_dict: + continue + for key, value in iter(result_losses_dict.items()): + aggregate_result_losses_dict[key].append(value) + for evaluator in evaluators: + # TODO(b/65130867): Use image_id tensor once we fix the input data + # decoders to return correct image_id. + # TODO(akuznetsa): result_dict contains batches of images, while + # add_single_ground_truth_image_info expects a single image. Fix + if (isinstance(result_dict, dict) and + fields.InputDataFields.key in result_dict and + result_dict[fields.InputDataFields.key]): + image_id = result_dict[fields.InputDataFields.key] + else: + image_id = batch + evaluator.add_single_ground_truth_image_info( + image_id=image_id, groundtruth_dict=result_dict) + evaluator.add_single_detected_image_info( + image_id=image_id, detections_dict=result_dict) + tf.logging.info('Running eval batches done.') + except tf.errors.OutOfRangeError: + tf.logging.info('Done evaluating -- epoch limit reached') + finally: + # When done, ask the threads to stop. + tf.logging.info('# success: %d', counters['success']) + tf.logging.info('# skipped: %d', counters['skipped']) + all_evaluator_metrics = {} + if eval_export_path and eval_export_path is not None: + for evaluator in evaluators: + if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or + isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)): + tf.logging.info('Started dumping to json file.') + evaluator.dump_detections_to_json_file( + json_output_path=eval_export_path) + tf.logging.info('Finished dumping to json file.') + for evaluator in evaluators: + metrics = evaluator.evaluate() + evaluator.clear() + if any(key in all_evaluator_metrics for key in metrics): + raise ValueError('Metric names between evaluators must not collide.') + all_evaluator_metrics.update(metrics) + global_step = tf.train.global_step(sess, tf.train.get_global_step()) + + for key, value in iter(aggregate_result_losses_dict.items()): + all_evaluator_metrics['Losses/' + key] = np.mean(value) + if process_metrics_fn and checkpoint_file: + m = re.search(r'model.ckpt-(\d+)$', checkpoint_file) + if not m: + tf.logging.error('Failed to parse checkpoint number from: %s', + checkpoint_file) + else: + checkpoint_number = int(m.group(1)) + process_metrics_fn(checkpoint_number, all_evaluator_metrics, + checkpoint_file) + sess.close() + return (global_step, all_evaluator_metrics) + + +# TODO(rathodv): Add tests. +def repeated_checkpoint_run(tensor_dict, + summary_dir, + evaluators, + batch_processor=None, + checkpoint_dirs=None, + variables_to_restore=None, + restore_fn=None, + num_batches=1, + eval_interval_secs=120, + max_number_of_evaluations=None, + max_evaluation_global_step=None, + master='', + save_graph=False, + save_graph_dir='', + losses_dict=None, + eval_export_path=None, + process_metrics_fn=None): + """Periodically evaluates desired tensors using checkpoint_dirs or restore_fn. + + This function repeatedly loads a checkpoint and evaluates a desired + set of tensors (provided by tensor_dict) and hands the resulting numpy + arrays to a function result_processor which can be used to further + process/save/visualize the results. + + Args: + tensor_dict: a dictionary holding tensors representing a batch of detections + and corresponding groundtruth annotations. + summary_dir: a directory to write metrics summaries. + evaluators: a list of object of type DetectionEvaluator to be used for + evaluation. Note that the metric names produced by different evaluators + must be unique. + batch_processor: a function taking three arguments: + 1. tensor_dict: the same tensor_dict that is passed in as the first + argument to this function. + 2. sess: a tensorflow session + 3. batch_index: an integer representing the index of the batch amongst + all batches + By default, batch_processor is None, which defaults to running: + return sess.run(tensor_dict) + checkpoint_dirs: list of directories to load into a DetectionModel or an + EnsembleModel if restore_fn isn't set. Also used to determine when to run + next evaluation. Must have at least one element. + variables_to_restore: None, or a dictionary mapping variable names found in + a checkpoint to model variables. The dictionary would normally be + generated by creating a tf.train.ExponentialMovingAverage object and + calling its variables_to_restore() method. Not used if restore_fn is set. + restore_fn: a function that takes a tf.Session object and correctly restores + all necessary variables from the correct checkpoint file. + num_batches: the number of batches to use for evaluation. + eval_interval_secs: the number of seconds between each evaluation run. + max_number_of_evaluations: the max number of iterations of the evaluation. + If the value is left as None the evaluation continues indefinitely. + max_evaluation_global_step: global step when evaluation stops. + master: the location of the Tensorflow session. + save_graph: whether or not the Tensorflow graph is saved as a pbtxt file. + save_graph_dir: where to save on disk the Tensorflow graph. If store_graph + is True this must be non-empty. + losses_dict: optional dictionary of scalar detection losses. + eval_export_path: Path for saving a json file that contains the detection + results in json format. + process_metrics_fn: a callback called with evaluation results after each + evaluation is done. It could be used e.g. to back up checkpoints with + best evaluation scores, or to call an external system to update evaluation + results in order to drive best hyper-parameter search. Parameters are: + int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics, + str checkpoint_file path. + + Returns: + metrics: A dictionary containing metric names and values in the latest + evaluation. + + Raises: + ValueError: if max_num_of_evaluations is not None or a positive number. + ValueError: if checkpoint_dirs doesn't have at least one element. + """ + if max_number_of_evaluations and max_number_of_evaluations <= 0: + raise ValueError( + '`max_number_of_evaluations` must be either None or a positive number.') + if max_evaluation_global_step and max_evaluation_global_step <= 0: + raise ValueError( + '`max_evaluation_global_step` must be either None or positive.') + + if not checkpoint_dirs: + raise ValueError('`checkpoint_dirs` must have at least one entry.') + + last_evaluated_model_path = None + number_of_evaluations = 0 + while True: + start = time.time() + tf.logging.info('Starting evaluation at ' + time.strftime( + '%Y-%m-%d-%H:%M:%S', time.gmtime())) + model_path = tf.train.latest_checkpoint(checkpoint_dirs[0]) + if not model_path: + tf.logging.info('No model found in %s. Will try again in %d seconds', + checkpoint_dirs[0], eval_interval_secs) + elif model_path == last_evaluated_model_path: + tf.logging.info('Found already evaluated checkpoint. Will try again in ' + '%d seconds', eval_interval_secs) + else: + last_evaluated_model_path = model_path + global_step, metrics = _run_checkpoint_once( + tensor_dict, + evaluators, + batch_processor, + checkpoint_dirs, + variables_to_restore, + restore_fn, + num_batches, + master, + save_graph, + save_graph_dir, + losses_dict=losses_dict, + eval_export_path=eval_export_path, + process_metrics_fn=process_metrics_fn) + write_metrics(metrics, global_step, summary_dir) + if (max_evaluation_global_step and + global_step >= max_evaluation_global_step): + tf.logging.info('Finished evaluation!') + break + number_of_evaluations += 1 + + if (max_number_of_evaluations and + number_of_evaluations >= max_number_of_evaluations): + tf.logging.info('Finished evaluation!') + break + time_to_next_eval = start + eval_interval_secs - time.time() + if time_to_next_eval > 0: + time.sleep(time_to_next_eval) + + return metrics + + +def _scale_box_to_absolute(args): + boxes, image_shape = args + return box_list_ops.to_absolute_coordinates( + box_list.BoxList(boxes), image_shape[0], image_shape[1]).get() + + +def _resize_detection_masks(args): + detection_boxes, detection_masks, image_shape = args + detection_masks_reframed = ops.reframe_box_masks_to_image_masks( + detection_masks, detection_boxes, image_shape[0], image_shape[1]) + return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8) + + +def _resize_groundtruth_masks(args): + """Resizes groundgtruth masks to the original image size.""" + mask, true_image_shape, original_image_shape = args + true_height = true_image_shape[0] + true_width = true_image_shape[1] + mask = mask[:, :true_height, :true_width] + mask = tf.expand_dims(mask, 3) + mask = tf.image.resize_images( + mask, + original_image_shape, + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True) + return tf.cast(tf.squeeze(mask, 3), tf.uint8) + + +def _scale_keypoint_to_absolute(args): + keypoints, image_shape = args + return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1]) + + +def result_dict_for_single_example(image, + key, + detections, + groundtruth=None, + class_agnostic=False, + scale_to_absolute=False): + """Merges all detection and groundtruth information for a single example. + + Note that evaluation tools require classes that are 1-indexed, and so this + function performs the offset. If `class_agnostic` is True, all output classes + have label 1. + + Args: + image: A single 4D uint8 image tensor of shape [1, H, W, C]. + key: A single string tensor identifying the image. + detections: A dictionary of detections, returned from + DetectionModel.postprocess(). + groundtruth: (Optional) Dictionary of groundtruth items, with fields: + 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in + normalized coordinates. + 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. + 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional) + 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional) + 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional) + 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional) + 'groundtruth_instance_masks': 3D int64 tensor of instance masks + (Optional). + 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with + keypoints (Optional). + class_agnostic: Boolean indicating whether the detections are class-agnostic + (i.e. binary). Default False. + scale_to_absolute: Boolean indicating whether boxes and keypoints should be + scaled to absolute coordinates. Note that for IoU based evaluations, it + does not matter whether boxes are expressed in absolute or relative + coordinates. Default False. + + Returns: + A dictionary with: + 'original_image': A [1, H, W, C] uint8 image tensor. + 'key': A string tensor with image identifier. + 'detection_boxes': [max_detections, 4] float32 tensor of boxes, in + normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. + 'detection_scores': [max_detections] float32 tensor of scores. + 'detection_classes': [max_detections] int64 tensor of 1-indexed classes. + 'detection_masks': [max_detections, H, W] float32 tensor of binarized + masks, reframed to full image masks. + 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in + normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. (Optional) + 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. + (Optional) + 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional) + 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional) + 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional) + 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional) + 'groundtruth_instance_masks': 3D int64 tensor of instance masks + (Optional). + 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with + keypoints (Optional). + """ + + if groundtruth: + max_gt_boxes = tf.shape( + groundtruth[fields.InputDataFields.groundtruth_boxes])[0] + for gt_key in groundtruth: + # expand groundtruth dict along the batch dimension. + groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0) + + for detection_key in detections: + detections[detection_key] = tf.expand_dims( + detections[detection_key][0], axis=0) + + batched_output_dict = result_dict_for_batched_example( + image, + tf.expand_dims(key, 0), + detections, + groundtruth, + class_agnostic, + scale_to_absolute, + max_gt_boxes=max_gt_boxes) + + exclude_keys = [ + fields.InputDataFields.original_image, + fields.DetectionResultFields.num_detections, + fields.InputDataFields.num_groundtruth_boxes + ] + + output_dict = { + fields.InputDataFields.original_image: + batched_output_dict[fields.InputDataFields.original_image] + } + + for key in batched_output_dict: + # remove the batch dimension. + if key not in exclude_keys: + output_dict[key] = tf.squeeze(batched_output_dict[key], 0) + return output_dict + + +def result_dict_for_batched_example(images, + keys, + detections, + groundtruth=None, + class_agnostic=False, + scale_to_absolute=False, + original_image_spatial_shapes=None, + true_image_shapes=None, + max_gt_boxes=None): + """Merges all detection and groundtruth information for a single example. + + Note that evaluation tools require classes that are 1-indexed, and so this + function performs the offset. If `class_agnostic` is True, all output classes + have label 1. + The groundtruth coordinates of boxes/keypoints in 'groundtruth' dictionary are + normalized relative to the (potentially padded) input image, while the + coordinates in 'detection' dictionary are normalized relative to the true + image shape. + + Args: + images: A single 4D uint8 image tensor of shape [batch_size, H, W, C]. + keys: A [batch_size] string/int tensor with image identifier. + detections: A dictionary of detections, returned from + DetectionModel.postprocess(). + groundtruth: (Optional) Dictionary of groundtruth items, with fields: + 'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor + of boxes, in normalized coordinates. + 'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of + 1-indexed classes. + 'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of + bbox area. (Optional) + 'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64 + tensor. (Optional) + 'groundtruth_difficult': [batch_size, max_number_of_boxes] int64 + tensor. (Optional) + 'groundtruth_group_of': [batch_size, max_number_of_boxes] int64 + tensor. (Optional) + 'groundtruth_instance_masks': 4D int64 tensor of instance + masks (Optional). + 'groundtruth_keypoints': [batch_size, max_number_of_boxes, num_keypoints, + 2] float32 tensor with keypoints (Optional). + 'groundtruth_keypoint_visibilities': [batch_size, max_number_of_boxes, + num_keypoints] bool tensor with keypoint visibilities (Optional). + 'groundtruth_labeled_classes': [batch_size, num_classes] int64 + tensor of 1-indexed classes. (Optional) + class_agnostic: Boolean indicating whether the detections are class-agnostic + (i.e. binary). Default False. + scale_to_absolute: Boolean indicating whether boxes and keypoints should be + scaled to absolute coordinates. Note that for IoU based evaluations, it + does not matter whether boxes are expressed in absolute or relative + coordinates. Default False. + original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2] + used to resize the image. When set to None, the image size is retained. + true_image_shapes: A 2D int32 tensor of shape [batch_size, 3] + containing the size of the unpadded original_image. + max_gt_boxes: [batch_size] tensor representing the maximum number of + groundtruth boxes to pad. + + Returns: + A dictionary with: + 'original_image': A [batch_size, H, W, C] uint8 image tensor. + 'original_image_spatial_shape': A [batch_size, 2] tensor containing the + original image sizes. + 'true_image_shape': A [batch_size, 3] tensor containing the size of + the unpadded original_image. + 'key': A [batch_size] string tensor with image identifier. + 'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes, + in normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. + 'detection_scores': [batch_size, max_detections] float32 tensor of scores. + 'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed + classes. + 'detection_masks': [batch_size, max_detections, H, W] float32 tensor of + binarized masks, reframed to full image masks. (Optional) + 'detection_keypoints': [batch_size, max_detections, num_keypoints, 2] + float32 tensor containing keypoint coordinates. (Optional) + 'detection_keypoint_scores': [batch_size, max_detections, num_keypoints] + float32 tensor containing keypoint scores. (Optional) + 'num_detections': [batch_size] int64 tensor containing number of valid + detections. + 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in + normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. (Optional) + 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed + classes. (Optional) + 'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox + area. (Optional) + 'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional) + 'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional) + 'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional) + 'groundtruth_instance_masks': 4D int64 tensor of instance masks + (Optional). + 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 + tensor with keypoints (Optional). + 'groundtruth_keypoint_visibilities': [batch_size, num_boxes, num_keypoints] + bool tensor with keypoint visibilities (Optional). + 'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor + of 1-indexed classes. (Optional) + 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number + of groundtruth boxes per image. + + Raises: + ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape + [2]. + ValueError: if true_image_shapes is not 2D int32 tensor of shape + [3]. + """ + label_id_offset = 1 # Applying label id offset (b/63711816) + + input_data_fields = fields.InputDataFields + if original_image_spatial_shapes is None: + original_image_spatial_shapes = tf.tile( + tf.expand_dims(tf.shape(images)[1:3], axis=0), + multiples=[tf.shape(images)[0], 1]) + else: + if (len(original_image_spatial_shapes.shape) != 2 and + original_image_spatial_shapes.shape[1] != 2): + raise ValueError( + '`original_image_spatial_shape` should be a 2D tensor of shape ' + '[batch_size, 2].') + + if true_image_shapes is None: + true_image_shapes = tf.tile( + tf.expand_dims(tf.shape(images)[1:4], axis=0), + multiples=[tf.shape(images)[0], 1]) + else: + if (len(true_image_shapes.shape) != 2 + and true_image_shapes.shape[1] != 3): + raise ValueError('`true_image_shapes` should be a 2D tensor of ' + 'shape [batch_size, 3].') + + output_dict = { + input_data_fields.original_image: + images, + input_data_fields.key: + keys, + input_data_fields.original_image_spatial_shape: ( + original_image_spatial_shapes), + input_data_fields.true_image_shape: + true_image_shapes + } + + detection_fields = fields.DetectionResultFields + detection_boxes = detections[detection_fields.detection_boxes] + detection_scores = detections[detection_fields.detection_scores] + num_detections = tf.cast(detections[detection_fields.num_detections], + dtype=tf.int32) + + if class_agnostic: + detection_classes = tf.ones_like(detection_scores, dtype=tf.int64) + else: + detection_classes = ( + tf.to_int64(detections[detection_fields.detection_classes]) + + label_id_offset) + + if scale_to_absolute: + output_dict[detection_fields.detection_boxes] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_box_to_absolute, + elems=[detection_boxes, original_image_spatial_shapes], + dtype=tf.float32)) + else: + output_dict[detection_fields.detection_boxes] = detection_boxes + output_dict[detection_fields.detection_classes] = detection_classes + output_dict[detection_fields.detection_scores] = detection_scores + output_dict[detection_fields.num_detections] = num_detections + + if detection_fields.detection_masks in detections: + detection_masks = detections[detection_fields.detection_masks] + # TODO(rathodv): This should be done in model's postprocess + # function ideally. + output_dict[detection_fields.detection_masks] = ( + shape_utils.static_or_dynamic_map_fn( + _resize_detection_masks, + elems=[detection_boxes, detection_masks, + original_image_spatial_shapes], + dtype=tf.uint8)) + + if detection_fields.detection_keypoints in detections: + detection_keypoints = detections[detection_fields.detection_keypoints] + output_dict[detection_fields.detection_keypoints] = detection_keypoints + if scale_to_absolute: + output_dict[detection_fields.detection_keypoints] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_keypoint_to_absolute, + elems=[detection_keypoints, original_image_spatial_shapes], + dtype=tf.float32)) + if detection_fields.detection_keypoint_scores in detections: + output_dict[detection_fields.detection_keypoint_scores] = detections[ + detection_fields.detection_keypoint_scores] + else: + output_dict[detection_fields.detection_keypoint_scores] = tf.ones_like( + detections[detection_fields.detection_keypoints][:, :, :, 0]) + + if groundtruth: + if max_gt_boxes is None: + if input_data_fields.num_groundtruth_boxes in groundtruth: + max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes] + else: + raise ValueError( + 'max_gt_boxes must be provided when processing batched examples.') + + if input_data_fields.groundtruth_instance_masks in groundtruth: + masks = groundtruth[input_data_fields.groundtruth_instance_masks] + groundtruth[input_data_fields.groundtruth_instance_masks] = ( + shape_utils.static_or_dynamic_map_fn( + _resize_groundtruth_masks, + elems=[masks, true_image_shapes, original_image_spatial_shapes], + dtype=tf.uint8)) + + output_dict.update(groundtruth) + + image_shape = tf.cast(tf.shape(images), tf.float32) + image_height, image_width = image_shape[1], image_shape[2] + + def _scale_box_to_normalized_true_image(args): + """Scale the box coordinates to be relative to the true image shape.""" + boxes, true_image_shape = args + true_image_shape = tf.cast(true_image_shape, tf.float32) + true_height, true_width = true_image_shape[0], true_image_shape[1] + normalized_window = tf.stack([0.0, 0.0, true_height / image_height, + true_width / image_width]) + return box_list_ops.change_coordinate_frame( + box_list.BoxList(boxes), normalized_window).get() + + groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes] + groundtruth_boxes = shape_utils.static_or_dynamic_map_fn( + _scale_box_to_normalized_true_image, + elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32) + output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxes + + if input_data_fields.groundtruth_keypoints in groundtruth: + # If groundtruth_keypoints is in the groundtruth dictionary. Update the + # coordinates to conform with the true image shape. + def _scale_keypoints_to_normalized_true_image(args): + """Scale the box coordinates to be relative to the true image shape.""" + keypoints, true_image_shape = args + true_image_shape = tf.cast(true_image_shape, tf.float32) + true_height, true_width = true_image_shape[0], true_image_shape[1] + normalized_window = tf.stack( + [0.0, 0.0, true_height / image_height, true_width / image_width]) + return keypoint_ops.change_coordinate_frame(keypoints, + normalized_window) + + groundtruth_keypoints = groundtruth[ + input_data_fields.groundtruth_keypoints] + groundtruth_keypoints = shape_utils.static_or_dynamic_map_fn( + _scale_keypoints_to_normalized_true_image, + elems=[groundtruth_keypoints, true_image_shapes], + dtype=tf.float32) + output_dict[ + input_data_fields.groundtruth_keypoints] = groundtruth_keypoints + + if scale_to_absolute: + groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes] + output_dict[input_data_fields.groundtruth_boxes] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_box_to_absolute, + elems=[groundtruth_boxes, original_image_spatial_shapes], + dtype=tf.float32)) + if input_data_fields.groundtruth_keypoints in groundtruth: + groundtruth_keypoints = output_dict[ + input_data_fields.groundtruth_keypoints] + output_dict[input_data_fields.groundtruth_keypoints] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_keypoint_to_absolute, + elems=[groundtruth_keypoints, original_image_spatial_shapes], + dtype=tf.float32)) + + # For class-agnostic models, groundtruth classes all become 1. + if class_agnostic: + groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes] + groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64) + output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes + + output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes + + return output_dict + + +def get_evaluators(eval_config, categories, evaluator_options=None): + """Returns the evaluator class according to eval_config, valid for categories. + + Args: + eval_config: An `eval_pb2.EvalConfig`. + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + 'keypoints': (optional) dict mapping this category's keypoints to unique + ids. + evaluator_options: A dictionary of metric names (see + EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization + keyword arguments. For example: + evalator_options = { + 'coco_detection_metrics': {'include_metrics_per_category': True} + } + + Returns: + An list of instances of DetectionEvaluator. + + Raises: + ValueError: if metric is not in the metric class dictionary. + """ + evaluator_options = evaluator_options or {} + eval_metric_fn_keys = eval_config.metrics_set + if not eval_metric_fn_keys: + eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] + evaluators_list = [] + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: + raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) + kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key + in evaluator_options else {}) + evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key]( + categories, + **kwargs_dict)) + + if isinstance(eval_config, eval_pb2.EvalConfig): + parameterized_metrics = eval_config.parameterized_metric + for parameterized_metric in parameterized_metrics: + assert parameterized_metric.HasField('parameterized_metric') + if parameterized_metric.WhichOneof( + 'parameterized_metric') == EVAL_KEYPOINT_METRIC: + keypoint_metrics = parameterized_metric.coco_keypoint_metrics + # Create category to keypoints mapping dict. + category_keypoints = {} + class_label = keypoint_metrics.class_label + category = None + for cat in categories: + if cat['name'] == class_label: + category = cat + break + if not category: + continue + keypoints_for_this_class = category['keypoints'] + category_keypoints = [{ + 'id': keypoints_for_this_class[kp_name], 'name': kp_name + } for kp_name in keypoints_for_this_class] + # Create keypoint evaluator for this category. + evaluators_list.append(EVAL_METRICS_CLASS_DICT[EVAL_KEYPOINT_METRIC]( + category['id'], category_keypoints, class_label, + keypoint_metrics.keypoint_label_to_sigmas)) + return evaluators_list + + +def get_eval_metric_ops_for_evaluators(eval_config, + categories, + eval_dict): + """Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`. + + Args: + eval_config: An `eval_pb2.EvalConfig`. + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + eval_dict: An evaluation dictionary, returned from + result_dict_for_single_example(). + + Returns: + A dictionary of metric names to tuple of value_op and update_op that can be + used as eval metric ops in tf.EstimatorSpec. + """ + eval_metric_ops = {} + evaluator_options = evaluator_options_from_eval_config(eval_config) + evaluators_list = get_evaluators(eval_config, categories, evaluator_options) + for evaluator in evaluators_list: + eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops( + eval_dict)) + return eval_metric_ops + + +def evaluator_options_from_eval_config(eval_config): + """Produces a dictionary of evaluation options for each eval metric. + + Args: + eval_config: An `eval_pb2.EvalConfig`. + + Returns: + evaluator_options: A dictionary of metric names (see + EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization + keyword arguments. For example: + evalator_options = { + 'coco_detection_metrics': {'include_metrics_per_category': True} + } + """ + eval_metric_fn_keys = eval_config.metrics_set + evaluator_options = {} + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'): + evaluator_options[eval_metric_fn_key] = { + 'include_metrics_per_category': ( + eval_config.include_metrics_per_category) + } + elif eval_metric_fn_key == 'precision_at_recall_detection_metrics': + evaluator_options[eval_metric_fn_key] = { + 'recall_lower_bound': (eval_config.recall_lower_bound), + 'recall_upper_bound': (eval_config.recall_upper_bound) + } + return evaluator_options diff --git a/models/research/object_detection/eval_util_test.py b/models/research/object_detection/eval_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d0623f1fcda50482ee98eccb2e2e62ef10b88be3 --- /dev/null +++ b/models/research/object_detection/eval_util_test.py @@ -0,0 +1,407 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for eval_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized + +import numpy as np +import six +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection import eval_util +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.protos import eval_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +class EvalUtilTest(test_case.TestCase, parameterized.TestCase): + + def _get_categories_list(self): + return [{'id': 1, 'name': 'person'}, + {'id': 2, 'name': 'dog'}, + {'id': 3, 'name': 'cat'}] + + def _get_categories_list_with_keypoints(self): + return [{ + 'id': 1, + 'name': 'person', + 'keypoints': { + 'left_eye': 0, + 'right_eye': 3 + } + }, { + 'id': 2, + 'name': 'dog', + 'keypoints': { + 'tail_start': 1, + 'mouth': 2 + } + }, { + 'id': 3, + 'name': 'cat' + }] + + def _make_evaluation_dict(self, + resized_groundtruth_masks=False, + batch_size=1, + max_gt_boxes=None, + scale_to_absolute=False): + input_data_fields = fields.InputDataFields + detection_fields = fields.DetectionResultFields + + image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8) + if batch_size == 1: + key = tf.constant('image1') + else: + key = tf.constant([str(i) for i in range(batch_size)]) + detection_boxes = tf.tile(tf.constant([[[0., 0., 1., 1.]]]), + multiples=[batch_size, 1, 1]) + detection_scores = tf.tile(tf.constant([[0.8]]), multiples=[batch_size, 1]) + detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1]) + detection_masks = tf.tile(tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32), + multiples=[batch_size, 1, 1, 1]) + num_detections = tf.ones([batch_size]) + groundtruth_boxes = tf.constant([[0., 0., 1., 1.]]) + groundtruth_classes = tf.constant([1]) + groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8) + groundtruth_keypoints = tf.constant([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]) + if resized_groundtruth_masks: + groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8) + + if batch_size > 1: + groundtruth_boxes = tf.tile(tf.expand_dims(groundtruth_boxes, 0), + multiples=[batch_size, 1, 1]) + groundtruth_classes = tf.tile(tf.expand_dims(groundtruth_classes, 0), + multiples=[batch_size, 1]) + groundtruth_instance_masks = tf.tile( + tf.expand_dims(groundtruth_instance_masks, 0), + multiples=[batch_size, 1, 1, 1]) + groundtruth_keypoints = tf.tile( + tf.expand_dims(groundtruth_keypoints, 0), + multiples=[batch_size, 1, 1]) + + detections = { + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + detection_fields.num_detections: num_detections + } + groundtruth = { + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks + } + if batch_size > 1: + return eval_util.result_dict_for_batched_example( + image, key, detections, groundtruth, + scale_to_absolute=scale_to_absolute, + max_gt_boxes=max_gt_boxes) + else: + return eval_util.result_dict_for_single_example( + image, key, detections, groundtruth, + scale_to_absolute=scale_to_absolute) + + @parameterized.parameters( + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} + ) + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, + max_gt_boxes=None, + scale_to_absolute=False): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend(['coco_detection_metrics']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict(batch_size=batch_size, + max_gt_boxes=max_gt_boxes, + scale_to_absolute=scale_to_absolute) + metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + metrics = {} + for key, (value_op, _) in six.iteritems(metric_ops): + metrics[key] = value_op + sess.run(update_op) + metrics = sess.run(metrics) + self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) + self.assertNotIn('DetectionMasks_Precision/mAP', metrics) + + @parameterized.parameters( + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} + ) + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_for_coco_detections_and_masks( + self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'coco_mask_metrics']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict(batch_size=batch_size, + max_gt_boxes=max_gt_boxes, + scale_to_absolute=scale_to_absolute) + metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] + _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + metrics = {} + for key, (value_op, _) in six.iteritems(metric_ops): + metrics[key] = value_op + sess.run(update_op_boxes) + sess.run(update_op_masks) + metrics = sess.run(metrics) + self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) + self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) + + @parameterized.parameters( + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} + ) + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( + self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'coco_mask_metrics']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict(batch_size=batch_size, + max_gt_boxes=max_gt_boxes, + scale_to_absolute=scale_to_absolute, + resized_groundtruth_masks=True) + metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] + _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + metrics = {} + for key, (value_op, _) in six.iteritems(metric_ops): + metrics[key] = value_op + sess.run(update_op_boxes) + sess.run(update_op_masks) + metrics = sess.run(metrics) + self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) + self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) + + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend(['unsupported_metric']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict() + with self.assertRaises(ValueError): + eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + + def test_get_eval_metric_ops_for_evaluators(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend([ + 'coco_detection_metrics', 'coco_mask_metrics', + 'precision_at_recall_detection_metrics' + ]) + eval_config.include_metrics_per_category = True + eval_config.recall_lower_bound = 0.2 + eval_config.recall_upper_bound = 0.6 + + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + self.assertTrue(evaluator_options['coco_detection_metrics'] + ['include_metrics_per_category']) + self.assertTrue( + evaluator_options['coco_mask_metrics']['include_metrics_per_category']) + self.assertAlmostEqual( + evaluator_options['precision_at_recall_detection_metrics'] + ['recall_lower_bound'], eval_config.recall_lower_bound) + self.assertAlmostEqual( + evaluator_options['precision_at_recall_detection_metrics'] + ['recall_upper_bound'], eval_config.recall_upper_bound) + + def test_get_evaluator_with_evaluator_options(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) + eval_config.include_metrics_per_category = True + eval_config.recall_lower_bound = 0.2 + eval_config.recall_upper_bound = 0.6 + categories = self._get_categories_list() + + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + evaluator = eval_util.get_evaluators(eval_config, categories, + evaluator_options) + + self.assertTrue(evaluator[0]._include_metrics_per_category) + self.assertAlmostEqual(evaluator[1]._recall_lower_bound, + eval_config.recall_lower_bound) + self.assertAlmostEqual(evaluator[1]._recall_upper_bound, + eval_config.recall_upper_bound) + + def test_get_evaluator_with_no_evaluator_options(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) + eval_config.include_metrics_per_category = True + eval_config.recall_lower_bound = 0.2 + eval_config.recall_upper_bound = 0.6 + categories = self._get_categories_list() + + evaluator = eval_util.get_evaluators( + eval_config, categories, evaluator_options=None) + + # Even though we are setting eval_config.include_metrics_per_category = True + # and bounds on recall, these options are never passed into the + # DetectionEvaluator constructor (via `evaluator_options`). + self.assertFalse(evaluator[0]._include_metrics_per_category) + self.assertAlmostEqual(evaluator[1]._recall_lower_bound, 0.0) + self.assertAlmostEqual(evaluator[1]._recall_upper_bound, 1.0) + + def test_get_evaluator_with_keypoint_metrics(self): + eval_config = eval_pb2.EvalConfig() + person_keypoints_metric = eval_config.parameterized_metric.add() + person_keypoints_metric.coco_keypoint_metrics.class_label = 'person' + person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'left_eye'] = 0.1 + person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'right_eye'] = 0.2 + dog_keypoints_metric = eval_config.parameterized_metric.add() + dog_keypoints_metric.coco_keypoint_metrics.class_label = 'dog' + dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'tail_start'] = 0.3 + dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'mouth'] = 0.4 + categories = self._get_categories_list_with_keypoints() + + evaluator = eval_util.get_evaluators( + eval_config, categories, evaluator_options=None) + + # Verify keypoint evaluator class variables. + self.assertLen(evaluator, 3) + self.assertFalse(evaluator[0]._include_metrics_per_category) + self.assertEqual(evaluator[1]._category_name, 'person') + self.assertEqual(evaluator[2]._category_name, 'dog') + self.assertAllEqual(evaluator[1]._keypoint_ids, [0, 3]) + self.assertAllEqual(evaluator[2]._keypoint_ids, [1, 2]) + self.assertAllClose([0.1, 0.2], evaluator[1]._oks_sigmas) + self.assertAllClose([0.3, 0.4], evaluator[2]._oks_sigmas) + + def test_get_evaluator_with_unmatched_label(self): + eval_config = eval_pb2.EvalConfig() + person_keypoints_metric = eval_config.parameterized_metric.add() + person_keypoints_metric.coco_keypoint_metrics.class_label = 'unmatched' + person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'kpt'] = 0.1 + categories = self._get_categories_list_with_keypoints() + + evaluator = eval_util.get_evaluators( + eval_config, categories, evaluator_options=None) + self.assertLen(evaluator, 1) + self.assertNotIsInstance( + evaluator[0], coco_evaluation.CocoKeypointEvaluator) + + def test_padded_image_result_dict(self): + + input_data_fields = fields.InputDataFields + detection_fields = fields.DetectionResultFields + key = tf.constant([str(i) for i in range(2)]) + + detection_boxes = np.array([[[0., 0., 1., 1.]], [[0.0, 0.0, 0.5, 0.5]]], + dtype=np.float32) + detection_keypoints = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]], + dtype=np.float32) + def graph_fn(): + detections = { + detection_fields.detection_boxes: + tf.constant(detection_boxes), + detection_fields.detection_scores: + tf.constant([[1.], [1.]]), + detection_fields.detection_classes: + tf.constant([[1], [2]]), + detection_fields.num_detections: + tf.constant([1, 1]), + detection_fields.detection_keypoints: + tf.tile( + tf.reshape( + tf.constant(detection_keypoints), shape=[1, 1, 3, 2]), + multiples=[2, 1, 1, 1]) + } + + gt_boxes = detection_boxes + groundtruth = { + input_data_fields.groundtruth_boxes: + tf.constant(gt_boxes), + input_data_fields.groundtruth_classes: + tf.constant([[1.], [1.]]), + input_data_fields.groundtruth_keypoints: + tf.tile( + tf.reshape( + tf.constant(detection_keypoints), shape=[1, 1, 3, 2]), + multiples=[2, 1, 1, 1]) + } + + image = tf.zeros((2, 100, 100, 3), dtype=tf.float32) + + true_image_shapes = tf.constant([[100, 100, 3], [50, 100, 3]]) + original_image_spatial_shapes = tf.constant([[200, 200], [150, 300]]) + + result = eval_util.result_dict_for_batched_example( + image, key, detections, groundtruth, + scale_to_absolute=True, + true_image_shapes=true_image_shapes, + original_image_spatial_shapes=original_image_spatial_shapes, + max_gt_boxes=tf.constant(1)) + return (result[input_data_fields.groundtruth_boxes], + result[input_data_fields.groundtruth_keypoints], + result[detection_fields.detection_boxes], + result[detection_fields.detection_keypoints]) + (gt_boxes, gt_keypoints, detection_boxes, + detection_keypoints) = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [[[0., 0., 200., 200.]], [[0.0, 0.0, 150., 150.]]], + gt_boxes) + self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]], + [[[0., 0.], [150., 150.], [300., 300.]]]], + gt_keypoints) + + # Predictions from the model are not scaled. + self.assertAllEqual( + [[[0., 0., 200., 200.]], [[0.0, 0.0, 75., 150.]]], + detection_boxes) + self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]], + [[[0., 0.], [75., 150.], [150., 300.]]]], + detection_keypoints) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/export_inference_graph.py b/models/research/object_detection/export_inference_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0ee0dde056afacca9a876c7456cb82a82f3192 --- /dev/null +++ b/models/research/object_detection/export_inference_graph.py @@ -0,0 +1,206 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Tool to export an object detection model for inference. + +Prepares an object detection tensorflow graph for inference using model +configuration and a trained checkpoint. Outputs inference +graph, associated checkpoint files, a frozen inference graph and a +SavedModel (https://tensorflow.github.io/serving/serving_basic.html). + +The inference graph contains one of three input nodes depending on the user +specified option. + * `image_tensor`: Accepts a uint8 4-D tensor of shape [None, None, None, 3] + * `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None] + containing encoded PNG or JPEG images. Image resolutions are expected to be + the same if more than 1 image is provided. + * `tf_example`: Accepts a 1-D string tensor of shape [None] containing + serialized TFExample protos. Image resolutions are expected to be the same + if more than 1 image is provided. + +and the following output nodes returned by the model.postprocess(..): + * `num_detections`: Outputs float32 tensors of the form [batch] + that specifies the number of valid boxes per image in the batch. + * `detection_boxes`: Outputs float32 tensors of the form + [batch, num_boxes, 4] containing detected boxes. + * `detection_scores`: Outputs float32 tensors of the form + [batch, num_boxes] containing class scores for the detections. + * `detection_classes`: Outputs float32 tensors of the form + [batch, num_boxes] containing classes for the detections. + * `raw_detection_boxes`: Outputs float32 tensors of the form + [batch, raw_num_boxes, 4] containing detection boxes without + post-processing. + * `raw_detection_scores`: Outputs float32 tensors of the form + [batch, raw_num_boxes, num_classes_with_background] containing class score + logits for raw detection boxes. + * `detection_masks`: (Optional) Outputs float32 tensors of the form + [batch, num_boxes, mask_height, mask_width] containing predicted instance + masks for each box if its present in the dictionary of postprocessed + tensors returned by the model. + * detection_multiclass_scores: (Optional) Outputs float32 tensor of shape + [batch, num_boxes, num_classes_with_background] for containing class + score distribution for detected boxes including background if any. + * detection_features: (Optional) float32 tensor of shape + [batch, num_boxes, roi_height, roi_width, depth] + containing classifier features + +Notes: + * This tool uses `use_moving_averages` from eval_config to decide which + weights to freeze. + +Example Usage: +-------------- +python export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +with contents: + - inference_graph.pbtxt + - model.ckpt.data-00000-of-00001 + - model.ckpt.info + - model.ckpt.meta + - frozen_inference_graph.pb + + saved_model (a directory) + +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the second stage post-processing score +threshold to be 0.5): + +python export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory \ + --config_override " \ + model{ \ + faster_rcnn { \ + second_stage_post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.5 \ + } \ + } \ + } \ + }" +""" +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection import exporter +from object_detection.protos import pipeline_pb2 + +flags = tf.app.flags + +flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be ' + 'one of [`image_tensor`, `encoded_image_string_tensor`, ' + '`tf_example`]') +flags.DEFINE_string('input_shape', None, + 'If input_type is `image_tensor`, this can explicitly set ' + 'the shape of this input tensor to a fixed size. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of integers. A value of -1 can be used for unknown ' + 'dimensions. If not specified, for an `image_tensor, the ' + 'default shape will be partially specified as ' + '`[None, None, None, 3]`.') +flags.DEFINE_string('pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_prefix', None, + 'Path to trained checkpoint, typically of the form ' + 'path/to/model.ckpt') +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string('config_override', '', + 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') +flags.DEFINE_boolean('write_inference_graph', False, + 'If true, writes inference graph to disk.') +flags.DEFINE_string('additional_output_tensor_names', None, + 'Additional Tensors to output, to be specified as a comma ' + 'separated list of tensor names.') +flags.DEFINE_boolean('use_side_inputs', False, + 'If True, uses side inputs as well as image inputs.') +flags.DEFINE_string('side_input_shapes', None, + 'If use_side_inputs is True, this explicitly sets ' + 'the shape of the side input tensors to a fixed size. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of integers. A value of -1 can be used for unknown ' + 'dimensions. A `/` denotes a break, starting the shape of ' + 'the next side input tensor. This flag is required if ' + 'using side inputs.') +flags.DEFINE_string('side_input_types', None, + 'If use_side_inputs is True, this explicitly sets ' + 'the type of the side input tensors. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of types, each of `string`, `integer`, or `float`. ' + 'This flag is required if using side inputs.') +flags.DEFINE_string('side_input_names', None, + 'If use_side_inputs is True, this explicitly sets ' + 'the names of the side input tensors required by the model ' + 'assuming the names will be a comma-separated list of ' + 'strings. This flag is required if using side inputs.') +tf.app.flags.mark_flag_as_required('pipeline_config_path') +tf.app.flags.mark_flag_as_required('trained_checkpoint_prefix') +tf.app.flags.mark_flag_as_required('output_directory') +FLAGS = flags.FLAGS + + +def main(_): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge(FLAGS.config_override, pipeline_config) + if FLAGS.input_shape: + input_shape = [ + int(dim) if dim != '-1' else None + for dim in FLAGS.input_shape.split(',') + ] + else: + input_shape = None + if FLAGS.use_side_inputs: + side_input_shapes, side_input_names, side_input_types = ( + exporter.parse_side_inputs( + FLAGS.side_input_shapes, + FLAGS.side_input_names, + FLAGS.side_input_types)) + else: + side_input_shapes = None + side_input_names = None + side_input_types = None + if FLAGS.additional_output_tensor_names: + additional_output_tensor_names = list( + FLAGS.additional_output_tensor_names.split(',')) + else: + additional_output_tensor_names = None + exporter.export_inference_graph( + FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix, + FLAGS.output_directory, input_shape=input_shape, + write_inference_graph=FLAGS.write_inference_graph, + additional_output_tensor_names=additional_output_tensor_names, + use_side_inputs=FLAGS.use_side_inputs, + side_input_shapes=side_input_shapes, + side_input_names=side_input_names, + side_input_types=side_input_types) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/export_tflite_ssd_graph.py b/models/research/object_detection/export_tflite_ssd_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..2127ca08ef57228831871605b4df5ca7f0a79963 --- /dev/null +++ b/models/research/object_detection/export_tflite_ssd_graph.py @@ -0,0 +1,144 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Exports an SSD detection model to use with tf-lite. + +Outputs file: +* A tflite compatible frozen graph - $output_directory/tflite_graph.pb + +The exported graph has the following input and output nodes. + +Inputs: +'normalized_input_image_tensor': a float32 tensor of shape +[1, height, width, 3] containing the normalized input image. Note that the +height and width must be compatible with the height and width configured in +the fixed_shape_image resizer options in the pipeline config proto. + +In floating point Mobilenet model, 'normalized_image_tensor' has values +between [-1,1). This typically means mapping each pixel (linearly) +to a value between [-1, 1]. Input image +values between 0 and 255 are scaled by (1/128.0) and then a value of +-1 is added to them to ensure the range is [-1,1). +In quantized Mobilenet model, 'normalized_image_tensor' has values between [0, +255]. +In general, see the `preprocess` function defined in the feature extractor class +in the object_detection/models directory. + +Outputs: +If add_postprocessing_op is true: frozen graph adds a + TFLite_Detection_PostProcess custom op node has four outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected boxes +else: + the graph has two outputs: + 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] + containing the encoded box predictions. + 'raw_outputs/class_predictions': a float32 tensor of shape + [1, num_anchors, num_classes] containing the class scores for each anchor + after applying score conversion. + +Example Usage: +-------------- +python object_detection/export_tflite_ssd_graph.py \ + --pipeline_config_path path/to/ssd_mobilenet.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +with contents: + - tflite_graph.pbtxt + - tflite_graph.pb +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the NMS iou_threshold to be 0.5 and +NMS score_threshold to be 0.0): +python object_detection/export_tflite_ssd_graph.py \ + --pipeline_config_path path/to/ssd_mobilenet.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + --config_override " \ + model{ \ + ssd{ \ + post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.0 \ + iou_threshold: 0.5 \ + } \ + } \ + } \ + } \ + " +""" + +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection import export_tflite_ssd_graph_lib +from object_detection.protos import pipeline_pb2 + +flags = tf.app.flags +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string( + 'pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.') +flags.DEFINE_integer('max_detections', 10, + 'Maximum number of detections (boxes) to show.') +flags.DEFINE_integer('max_classes_per_detection', 1, + 'Maximum number of classes to output per detection box.') +flags.DEFINE_integer( + 'detections_per_class', 100, + 'Number of anchors used per class in Regular Non-Max-Suppression.') +flags.DEFINE_bool('add_postprocessing_op', True, + 'Add TFLite custom op for postprocessing to the graph.') +flags.DEFINE_bool( + 'use_regular_nms', False, + 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.') +flags.DEFINE_string( + 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') + +FLAGS = flags.FLAGS + + +def main(argv): + del argv # Unused. + flags.mark_flag_as_required('output_directory') + flags.mark_flag_as_required('pipeline_config_path') + flags.mark_flag_as_required('trained_checkpoint_prefix') + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + + with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge(FLAGS.config_override, pipeline_config) + export_tflite_ssd_graph_lib.export_tflite_graph( + pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory, + FLAGS.add_postprocessing_op, FLAGS.max_detections, + FLAGS.max_classes_per_detection, use_regular_nms=FLAGS.use_regular_nms) + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/models/research/object_detection/export_tflite_ssd_graph_lib.py b/models/research/object_detection/export_tflite_ssd_graph_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..f72e9525bfd75b58c874cba5b790cbac710cb9dd --- /dev/null +++ b/models/research/object_detection/export_tflite_ssd_graph_lib.py @@ -0,0 +1,334 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exports an SSD detection model to use with tf-lite. + +See export_tflite_ssd_graph.py for usage. +""" +import os +import tempfile +import numpy as np +import tensorflow.compat.v1 as tf +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.framework import types_pb2 +from tensorflow.core.protobuf import saver_pb2 +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.builders import post_processing_builder +from object_detection.core import box_list +from object_detection.utils import tf_version + +_DEFAULT_NUM_CHANNELS = 3 +_DEFAULT_NUM_COORD_BOX = 4 + +if tf_version.is_tf1(): + from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top + + +def get_const_center_size_encoded_anchors(anchors): + """Exports center-size encoded anchors as a constant tensor. + + Args: + anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor + boxes + + Returns: + encoded_anchors: a float32 constant tensor of shape [num_anchors, 4] + containing the anchor boxes. + """ + anchor_boxlist = box_list.BoxList(anchors) + y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes() + num_anchors = y.get_shape().as_list() + + with tf.Session() as sess: + y_out, x_out, h_out, w_out = sess.run([y, x, h, w]) + encoded_anchors = tf.constant( + np.transpose(np.stack((y_out, x_out, h_out, w_out))), + dtype=tf.float32, + shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX], + name='anchors') + return encoded_anchors + + +def append_postprocessing_op(frozen_graph_def, + max_detections, + max_classes_per_detection, + nms_score_threshold, + nms_iou_threshold, + num_classes, + scale_values, + detections_per_class=100, + use_regular_nms=False, + additional_output_tensors=()): + """Appends postprocessing custom op. + + Args: + frozen_graph_def: Frozen GraphDef for SSD model after freezing the + checkpoint + max_detections: Maximum number of detections (boxes) to show + max_classes_per_detection: Number of classes to display per detection + nms_score_threshold: Score threshold used in Non-maximal suppression in + post-processing + nms_iou_threshold: Intersection-over-union threshold used in Non-maximal + suppression in post-processing + num_classes: number of classes in SSD detector + scale_values: scale values is a dict with following key-value pairs + {y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode + centersize boxes + detections_per_class: In regular NonMaxSuppression, number of anchors used + for NonMaxSuppression per class + use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of + Fast NMS. + additional_output_tensors: Array of additional tensor names to output. + Tensors are appended after postprocessing output. + + Returns: + transformed_graph_def: Frozen GraphDef with postprocessing custom op + appended + TFLite_Detection_PostProcess custom op node has four outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected + boxes + """ + new_output = frozen_graph_def.node.add() + new_output.op = 'TFLite_Detection_PostProcess' + new_output.name = 'TFLite_Detection_PostProcess' + new_output.attr['_output_quantized'].CopyFrom( + attr_value_pb2.AttrValue(b=True)) + new_output.attr['_output_types'].list.type.extend([ + types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, + types_pb2.DT_FLOAT + ]) + new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom( + attr_value_pb2.AttrValue(b=True)) + new_output.attr['max_detections'].CopyFrom( + attr_value_pb2.AttrValue(i=max_detections)) + new_output.attr['max_classes_per_detection'].CopyFrom( + attr_value_pb2.AttrValue(i=max_classes_per_detection)) + new_output.attr['nms_score_threshold'].CopyFrom( + attr_value_pb2.AttrValue(f=nms_score_threshold.pop())) + new_output.attr['nms_iou_threshold'].CopyFrom( + attr_value_pb2.AttrValue(f=nms_iou_threshold.pop())) + new_output.attr['num_classes'].CopyFrom( + attr_value_pb2.AttrValue(i=num_classes)) + + new_output.attr['y_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop())) + new_output.attr['x_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop())) + new_output.attr['h_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop())) + new_output.attr['w_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop())) + new_output.attr['detections_per_class'].CopyFrom( + attr_value_pb2.AttrValue(i=detections_per_class)) + new_output.attr['use_regular_nms'].CopyFrom( + attr_value_pb2.AttrValue(b=use_regular_nms)) + + new_output.input.extend( + ['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors']) + # Transform the graph to append new postprocessing op + input_names = [] + output_names = ['TFLite_Detection_PostProcess' + ] + list(additional_output_tensors) + transforms = ['strip_unused_nodes'] + transformed_graph_def = TransformGraph(frozen_graph_def, input_names, + output_names, transforms) + return transformed_graph_def + + +def export_tflite_graph(pipeline_config, + trained_checkpoint_prefix, + output_dir, + add_postprocessing_op, + max_detections, + max_classes_per_detection, + detections_per_class=100, + use_regular_nms=False, + binary_graph_name='tflite_graph.pb', + txt_graph_name='tflite_graph.pbtxt', + additional_output_tensors=()): + """Exports a tflite compatible graph and anchors for ssd detection model. + + Anchors are written to a tensor and tflite compatible graph + is written to output_dir/tflite_graph.pb. + + Args: + pipeline_config: a pipeline.proto object containing the configuration for + SSD model to export. + trained_checkpoint_prefix: a file prefix for the checkpoint containing the + trained parameters of the SSD model. + output_dir: A directory to write the tflite graph and anchor file to. + add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a + TFLite_Detection_PostProcess custom op + max_detections: Maximum number of detections (boxes) to show + max_classes_per_detection: Number of classes to display per detection + detections_per_class: In regular NonMaxSuppression, number of anchors used + for NonMaxSuppression per class + use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of + Fast NMS. + binary_graph_name: Name of the exported graph file in binary format. + txt_graph_name: Name of the exported graph file in text format. + additional_output_tensors: Array of additional tensor names to output. + Additional tensors are appended to the end of output tensor list. + + Raises: + ValueError: if the pipeline config contains models other than ssd or uses an + fixed_shape_resizer and provides a shape as well. + """ + tf.gfile.MakeDirs(output_dir) + if pipeline_config.model.WhichOneof('model') != 'ssd': + raise ValueError('Only ssd models are supported in tflite. ' + 'Found {} in config'.format( + pipeline_config.model.WhichOneof('model'))) + + num_classes = pipeline_config.model.ssd.num_classes + nms_score_threshold = { + pipeline_config.model.ssd.post_processing.batch_non_max_suppression + .score_threshold + } + nms_iou_threshold = { + pipeline_config.model.ssd.post_processing.batch_non_max_suppression + .iou_threshold + } + scale_values = {} + scale_values['y_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale + } + scale_values['x_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale + } + scale_values['h_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale + } + scale_values['w_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale + } + + image_resizer_config = pipeline_config.model.ssd.image_resizer + image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') + num_channels = _DEFAULT_NUM_CHANNELS + if image_resizer == 'fixed_shape_resizer': + height = image_resizer_config.fixed_shape_resizer.height + width = image_resizer_config.fixed_shape_resizer.width + if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: + num_channels = 1 + shape = [1, height, width, num_channels] + else: + raise ValueError( + 'Only fixed_shape_resizer' + 'is supported with tflite. Found {}'.format( + image_resizer_config.WhichOneof('image_resizer_oneof'))) + + image = tf.placeholder( + tf.float32, shape=shape, name='normalized_input_image_tensor') + + detection_model = model_builder.build( + pipeline_config.model, is_training=False) + predicted_tensors = detection_model.predict(image, true_image_shapes=None) + # The score conversion occurs before the post-processing custom op + _, score_conversion_fn = post_processing_builder.build( + pipeline_config.model.ssd.post_processing) + class_predictions = score_conversion_fn( + predicted_tensors['class_predictions_with_background']) + + with tf.name_scope('raw_outputs'): + # 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] + # containing the encoded box predictions. Note that these are raw + # predictions and no Non-Max suppression is applied on them and + # no decode center size boxes is applied to them. + tf.identity(predicted_tensors['box_encodings'], name='box_encodings') + # 'raw_outputs/class_predictions': a float32 tensor of shape + # [1, num_anchors, num_classes] containing the class scores for each anchor + # after applying score conversion. + tf.identity(class_predictions, name='class_predictions') + # 'anchors': a float32 tensor of shape + # [4, num_anchors] containing the anchors as a constant node. + tf.identity( + get_const_center_size_encoded_anchors(predicted_tensors['anchors']), + name='anchors') + + # Add global step to the graph, so we know the training step number when we + # evaluate the model. + tf.train.get_or_create_global_step() + + # graph rewriter + is_quantized = pipeline_config.HasField('graph_rewriter') + if is_quantized: + graph_rewriter_config = pipeline_config.graph_rewriter + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + if pipeline_config.model.ssd.feature_extractor.HasField('fpn'): + exporter.rewrite_nn_resize_op(is_quantized) + + # freeze the graph + saver_kwargs = {} + if pipeline_config.eval_config.use_moving_averages: + saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 + moving_average_checkpoint = tempfile.NamedTemporaryFile() + exporter.replace_variable_values_with_moving_averages( + tf.get_default_graph(), trained_checkpoint_prefix, + moving_average_checkpoint.name) + checkpoint_to_use = moving_average_checkpoint.name + else: + checkpoint_to_use = trained_checkpoint_prefix + + saver = tf.train.Saver(**saver_kwargs) + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_to_use, + output_node_names=','.join([ + 'raw_outputs/box_encodings', 'raw_outputs/class_predictions', + 'anchors' + ] + list(additional_output_tensors)), + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + clear_devices=True, + output_graph='', + initializer_nodes='') + + # Add new operation to do post processing in a custom op (TF Lite only) + if add_postprocessing_op: + transformed_graph_def = append_postprocessing_op( + frozen_graph_def, + max_detections, + max_classes_per_detection, + nms_score_threshold, + nms_iou_threshold, + num_classes, + scale_values, + detections_per_class, + use_regular_nms, + additional_output_tensors=additional_output_tensors) + else: + # Return frozen without adding post-processing custom op + transformed_graph_def = frozen_graph_def + + binary_graph = os.path.join(output_dir, binary_graph_name) + with tf.gfile.GFile(binary_graph, 'wb') as f: + f.write(transformed_graph_def.SerializeToString()) + txt_graph = os.path.join(output_dir, txt_graph_name) + with tf.gfile.GFile(txt_graph, 'w') as f: + f.write(str(transformed_graph_def)) diff --git a/models/research/object_detection/export_tflite_ssd_graph_lib_tf1_test.py b/models/research/object_detection/export_tflite_ssd_graph_lib_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0da7b9aa294054380dcc6ebaf627accdd5e619ff --- /dev/null +++ b/models/research/object_detection/export_tflite_ssd_graph_lib_tf1_test.py @@ -0,0 +1,423 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.export_tflite_ssd_graph.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from tensorflow.core.framework import types_pb2 +from object_detection import export_tflite_ssd_graph_lib +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import graph_rewriter_pb2 +from object_detection.protos import pipeline_pb2 +from object_detection.protos import post_processing_pb2 +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top + +if six.PY2: + import mock +else: + from unittest import mock # pylint: disable=g-importing-member +# pylint: enable=g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self, add_detection_masks=False): + self._add_detection_masks = add_detection_masks + + def preprocess(self, inputs): + pass + + def predict(self, preprocessed_inputs, true_image_shapes): + features = slim.conv2d(preprocessed_inputs, 3, 1) + with tf.control_dependencies([features]): + prediction_tensors = { + 'box_encodings': + tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], + tf.float32), + 'class_predictions_with_background': + tf.constant([[[0.7, 0.6], [0.9, 0.0]]], tf.float32), + } + with tf.control_dependencies( + [tf.convert_to_tensor(features.get_shape().as_list()[1:3])]): + prediction_tensors['anchors'] = tf.constant( + [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32) + return prediction_tensors + + def postprocess(self, prediction_tensors, true_image_shapes): + pass + + def restore_map(self, checkpoint_path, from_detection_checkpoint): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ExportTfliteGraphTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, + checkpoint_path, + use_moving_averages, + quantize=False, + num_channels=3): + g = tf.Graph() + with g.as_default(): + mock_model = FakeModel() + inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, num_channels]) + mock_model.predict(inputs, true_image_shapes=None) + if use_moving_averages: + tf.train.ExponentialMovingAverage(0.0).apply() + tf.train.get_or_create_global_step() + if quantize: + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + saver = tf.train.Saver() + init = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init) + saver.save(sess, checkpoint_path) + + def _assert_quant_vars_exists(self, tflite_graph_file): + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_string = f.read() + print(graph_string) + self.assertIn(six.ensure_binary('quant'), graph_string) + + def _import_graph_and_run_inference(self, tflite_graph_file, num_channels=3): + """Imports a tflite graph, runs single inference and returns outputs.""" + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name='') + input_tensor = graph.get_tensor_by_name('normalized_input_image_tensor:0') + box_encodings = graph.get_tensor_by_name('raw_outputs/box_encodings:0') + class_predictions = graph.get_tensor_by_name( + 'raw_outputs/class_predictions:0') + with self.test_session(graph) as sess: + [box_encodings_np, class_predictions_np] = sess.run( + [box_encodings, class_predictions], + feed_dict={input_tensor: np.random.rand(1, 10, 10, num_channels)}) + return box_encodings_np, class_predictions_np + + def _export_graph(self, + pipeline_config, + num_channels=3, + additional_output_tensors=()): + """Exports a tflite graph.""" + output_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt') + tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb') + + quantize = pipeline_config.HasField('graph_rewriter') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, + use_moving_averages=pipeline_config.eval_config.use_moving_averages, + quantize=quantize, + num_channels=num_channels) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + + with tf.Graph().as_default(): + tf.identity( + tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor') + export_tflite_ssd_graph_lib.export_tflite_graph( + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_dir=output_dir, + add_postprocessing_op=False, + max_detections=10, + max_classes_per_detection=1, + additional_output_tensors=additional_output_tensors) + return tflite_graph_file + + def _export_graph_with_postprocessing_op(self, + pipeline_config, + num_channels=3, + additional_output_tensors=()): + """Exports a tflite graph with custom postprocessing op.""" + output_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt') + tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb') + + quantize = pipeline_config.HasField('graph_rewriter') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, + use_moving_averages=pipeline_config.eval_config.use_moving_averages, + quantize=quantize, + num_channels=num_channels) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + + with tf.Graph().as_default(): + tf.identity( + tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor') + export_tflite_ssd_graph_lib.export_tflite_graph( + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_dir=output_dir, + add_postprocessing_op=True, + max_detections=10, + max_classes_per_detection=1, + additional_output_tensors=additional_output_tensors) + return tflite_graph_file + + def test_export_tflite_graph_with_moving_averages(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = True + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_without_moving_averages(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_grayscale(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + (pipeline_config.model.ssd.image_resizer.fixed_shape_resizer + ).convert_to_grayscale = True + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config, num_channels=1) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, + class_predictions_np) = self._import_graph_and_run_inference( + tflite_graph_file, num_channels=1) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_with_quantization(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.graph_rewriter.quantization.delay = 500000 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + self._assert_quant_vars_exists(tflite_graph_file) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_with_softmax_score_conversion(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SOFTMAX) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, + [[[0.524979, 0.475021], [0.710949, 0.28905]]]) + + def test_export_tflite_graph_with_sigmoid_score_conversion(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SIGMOID) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, + [[[0.668188, 0.645656], [0.710949, 0.5]]]) + + def test_export_tflite_graph_with_postprocessing_op(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SIGMOID) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + all_op_names = [node.name for node in graph_def.node] + self.assertIn('TFLite_Detection_PostProcess', all_op_names) + self.assertNotIn('UnattachedTensor', all_op_names) + for node in graph_def.node: + if node.name == 'TFLite_Detection_PostProcess': + self.assertTrue(node.attr['_output_quantized'].b) + self.assertTrue( + node.attr['_support_output_type_float_in_quantized_op'].b) + self.assertEqual(node.attr['y_scale'].f, 10.0) + self.assertEqual(node.attr['x_scale'].f, 10.0) + self.assertEqual(node.attr['h_scale'].f, 5.0) + self.assertEqual(node.attr['w_scale'].f, 5.0) + self.assertEqual(node.attr['num_classes'].i, 2) + self.assertTrue( + all([ + t == types_pb2.DT_FLOAT + for t in node.attr['_output_types'].list.type + ])) + + def test_export_tflite_graph_with_additional_tensors(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + tflite_graph_file = self._export_graph( + pipeline_config, additional_output_tensors=['UnattachedTensor']) + self.assertTrue(os.path.exists(tflite_graph_file)) + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + all_op_names = [node.name for node in graph_def.node] + self.assertIn('UnattachedTensor', all_op_names) + + def test_export_tflite_graph_with_postprocess_op_and_additional_tensors(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SIGMOID) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config, additional_output_tensors=['UnattachedTensor']) + self.assertTrue(os.path.exists(tflite_graph_file)) + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + all_op_names = [node.name for node in graph_def.node] + self.assertIn('TFLite_Detection_PostProcess', all_op_names) + self.assertIn('UnattachedTensor', all_op_names) + + @mock.patch.object(exporter, 'rewrite_nn_resize_op') + def test_export_with_nn_resize_op_not_called_without_fpn(self, mock_get): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + mock_get.assert_not_called() + + @mock.patch.object(exporter, 'rewrite_nn_resize_op') + def test_export_with_nn_resize_op_called_with_fpn(self, mock_get): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.feature_extractor.fpn.min_level = 3 + pipeline_config.model.ssd.feature_extractor.fpn.max_level = 7 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + mock_get.assert_called_once() + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/exporter.py b/models/research/object_detection/exporter.py new file mode 100644 index 0000000000000000000000000000000000000000..61c5f7f22db46c88c8bc5c1803b281da4c020967 --- /dev/null +++ b/models/research/object_detection/exporter.py @@ -0,0 +1,656 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to export object detection inference graph.""" +import os +import tempfile +import tensorflow.compat.v1 as tf +import tf_slim as slim +from tensorflow.core.protobuf import saver_pb2 +from tensorflow.python.tools import freeze_graph # pylint: disable=g-direct-tensorflow-import +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.utils import config_util +from object_detection.utils import shape_utils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import tfprof as contrib_tfprof + from tensorflow.contrib.quantize.python import graph_matcher +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +freeze_graph_with_def_protos = freeze_graph.freeze_graph_with_def_protos + + +def parse_side_inputs(side_input_shapes_string, side_input_names_string, + side_input_types_string): + """Parses side input flags. + + Args: + side_input_shapes_string: The shape of the side input tensors, provided as a + comma-separated list of integers. A value of -1 is used for unknown + dimensions. A `/` denotes a break, starting the shape of the next side + input tensor. + side_input_names_string: The names of the side input tensors, provided as a + comma-separated list of strings. + side_input_types_string: The type of the side input tensors, provided as a + comma-separated list of types, each of `string`, `integer`, or `float`. + + Returns: + side_input_shapes: A list of shapes. + side_input_names: A list of strings. + side_input_types: A list of tensorflow dtypes. + + """ + if side_input_shapes_string: + side_input_shapes = [] + for side_input_shape_list in side_input_shapes_string.split('/'): + side_input_shape = [ + int(dim) if dim != '-1' else None + for dim in side_input_shape_list.split(',') + ] + side_input_shapes.append(side_input_shape) + else: + raise ValueError('When using side_inputs, side_input_shapes must be ' + 'specified in the input flags.') + if side_input_names_string: + side_input_names = list(side_input_names_string.split(',')) + else: + raise ValueError('When using side_inputs, side_input_names must be ' + 'specified in the input flags.') + if side_input_types_string: + typelookup = {'float': tf.float32, 'int': tf.int32, 'string': tf.string} + side_input_types = [ + typelookup[side_input_type] + for side_input_type in side_input_types_string.split(',') + ] + else: + raise ValueError('When using side_inputs, side_input_types must be ' + 'specified in the input flags.') + return side_input_shapes, side_input_names, side_input_types + + +def rewrite_nn_resize_op(is_quantized=False): + """Replaces a custom nearest-neighbor resize op with the Tensorflow version. + + Some graphs use this custom version for TPU-compatibility. + + Args: + is_quantized: True if the default graph is quantized. + """ + def remove_nn(): + """Remove nearest neighbor upsampling structures and replace with TF op.""" + input_pattern = graph_matcher.OpTypePattern( + 'FakeQuantWithMinMaxVars' if is_quantized else '*') + stack_1_pattern = graph_matcher.OpTypePattern( + 'Pack', inputs=[input_pattern, input_pattern], ordered_inputs=False) + stack_2_pattern = graph_matcher.OpTypePattern( + 'Pack', inputs=[stack_1_pattern, stack_1_pattern], ordered_inputs=False) + reshape_pattern = graph_matcher.OpTypePattern( + 'Reshape', inputs=[stack_2_pattern, 'Const'], ordered_inputs=False) + consumer_pattern1 = graph_matcher.OpTypePattern( + 'Add|AddV2|Max|Mul', inputs=[reshape_pattern, '*'], + ordered_inputs=False) + consumer_pattern2 = graph_matcher.OpTypePattern( + 'StridedSlice', inputs=[reshape_pattern, '*', '*', '*'], + ordered_inputs=False) + + def replace_matches(consumer_pattern): + """Search for nearest neighbor pattern and replace with TF op.""" + match_counter = 0 + matcher = graph_matcher.GraphMatcher(consumer_pattern) + for match in matcher.match_graph(tf.get_default_graph()): + match_counter += 1 + projection_op = match.get_op(input_pattern) + reshape_op = match.get_op(reshape_pattern) + consumer_op = match.get_op(consumer_pattern) + nn_resize = tf.image.resize_nearest_neighbor( + projection_op.outputs[0], + reshape_op.outputs[0].shape.dims[1:3], + align_corners=False, + name=os.path.split(reshape_op.name)[0] + '/resize_nearest_neighbor') + + for index, op_input in enumerate(consumer_op.inputs): + if op_input == reshape_op.outputs[0]: + consumer_op._update_input(index, nn_resize) # pylint: disable=protected-access + break + + return match_counter + + match_counter = replace_matches(consumer_pattern1) + match_counter += replace_matches(consumer_pattern2) + + tf.logging.info('Found and fixed {} matches'.format(match_counter)) + return match_counter + + # Applying twice because both inputs to Add could be NN pattern + total_removals = 0 + while remove_nn(): + total_removals += 1 + # This number is chosen based on the nas-fpn architecture. + if total_removals > 4: + raise ValueError('Graph removal encountered a infinite loop.') + + +def replace_variable_values_with_moving_averages(graph, + current_checkpoint_file, + new_checkpoint_file, + no_ema_collection=None): + """Replaces variable values in the checkpoint with their moving averages. + + If the current checkpoint has shadow variables maintaining moving averages of + the variables defined in the graph, this function generates a new checkpoint + where the variables contain the values of their moving averages. + + Args: + graph: a tf.Graph object. + current_checkpoint_file: a checkpoint containing both original variables and + their moving averages. + new_checkpoint_file: file path to write a new checkpoint. + no_ema_collection: A list of namescope substrings to match the variables + to eliminate EMA. + """ + with graph.as_default(): + variable_averages = tf.train.ExponentialMovingAverage(0.0) + ema_variables_to_restore = variable_averages.variables_to_restore() + ema_variables_to_restore = config_util.remove_unecessary_ema( + ema_variables_to_restore, no_ema_collection) + with tf.Session() as sess: + read_saver = tf.train.Saver(ema_variables_to_restore) + read_saver.restore(sess, current_checkpoint_file) + write_saver = tf.train.Saver() + write_saver.save(sess, new_checkpoint_file) + + +def _image_tensor_input_placeholder(input_shape=None): + """Returns input placeholder and a 4-D uint8 image tensor.""" + if input_shape is None: + input_shape = (None, None, None, 3) + input_tensor = tf.placeholder( + dtype=tf.uint8, shape=input_shape, name='image_tensor') + return input_tensor, input_tensor + + +def _side_input_tensor_placeholder(side_input_shape, side_input_name, + side_input_type): + """Returns side input placeholder and side input tensor.""" + side_input_tensor = tf.placeholder( + dtype=side_input_type, shape=side_input_shape, name=side_input_name) + return side_input_tensor, side_input_tensor + + +def _tf_example_input_placeholder(input_shape=None): + """Returns input that accepts a batch of strings with tf examples. + + Args: + input_shape: the shape to resize the output decoded images to (optional). + + Returns: + a tuple of input placeholder and the output decoded images. + """ + batch_tf_example_placeholder = tf.placeholder( + tf.string, shape=[None], name='tf_example') + def decode(tf_example_string_tensor): + tensor_dict = tf_example_decoder.TfExampleDecoder().decode( + tf_example_string_tensor) + image_tensor = tensor_dict[fields.InputDataFields.image] + if input_shape is not None: + image_tensor = tf.image.resize(image_tensor, input_shape[1:3]) + return image_tensor + return (batch_tf_example_placeholder, + shape_utils.static_or_dynamic_map_fn( + decode, + elems=batch_tf_example_placeholder, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False)) + + +def _encoded_image_string_tensor_input_placeholder(input_shape=None): + """Returns input that accepts a batch of PNG or JPEG strings. + + Args: + input_shape: the shape to resize the output decoded images to (optional). + + Returns: + a tuple of input placeholder and the output decoded images. + """ + batch_image_str_placeholder = tf.placeholder( + dtype=tf.string, + shape=[None], + name='encoded_image_string_tensor') + def decode(encoded_image_string_tensor): + image_tensor = tf.image.decode_image(encoded_image_string_tensor, + channels=3) + image_tensor.set_shape((None, None, 3)) + if input_shape is not None: + image_tensor = tf.image.resize(image_tensor, input_shape[1:3]) + return image_tensor + return (batch_image_str_placeholder, + tf.map_fn( + decode, + elems=batch_image_str_placeholder, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False)) + + +input_placeholder_fn_map = { + 'image_tensor': _image_tensor_input_placeholder, + 'encoded_image_string_tensor': + _encoded_image_string_tensor_input_placeholder, + 'tf_example': _tf_example_input_placeholder +} + + +def add_output_tensor_nodes(postprocessed_tensors, + output_collection_name='inference_op'): + """Adds output nodes for detection boxes and scores. + + Adds the following nodes for output tensors - + * num_detections: float32 tensor of shape [batch_size]. + * detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4] + containing detected boxes. + * detection_scores: float32 tensor of shape [batch_size, num_boxes] + containing scores for the detected boxes. + * detection_multiclass_scores: (Optional) float32 tensor of shape + [batch_size, num_boxes, num_classes_with_background] for containing class + score distribution for detected boxes including background if any. + * detection_features: (Optional) float32 tensor of shape + [batch, num_boxes, roi_height, roi_width, depth] + containing classifier features + for each detected box + * detection_classes: float32 tensor of shape [batch_size, num_boxes] + containing class predictions for the detected boxes. + * detection_keypoints: (Optional) float32 tensor of shape + [batch_size, num_boxes, num_keypoints, 2] containing keypoints for each + detection box. + * detection_masks: (Optional) float32 tensor of shape + [batch_size, num_boxes, mask_height, mask_width] containing masks for each + detection box. + + Args: + postprocessed_tensors: a dictionary containing the following fields + 'detection_boxes': [batch, max_detections, 4] + 'detection_scores': [batch, max_detections] + 'detection_multiclass_scores': [batch, max_detections, + num_classes_with_background] + 'detection_features': [batch, num_boxes, roi_height, roi_width, depth] + 'detection_classes': [batch, max_detections] + 'detection_masks': [batch, max_detections, mask_height, mask_width] + (optional). + 'detection_keypoints': [batch, max_detections, num_keypoints, 2] + (optional). + 'num_detections': [batch] + output_collection_name: Name of collection to add output tensors to. + + Returns: + A tensor dict containing the added output tensor nodes. + """ + detection_fields = fields.DetectionResultFields + label_id_offset = 1 + boxes = postprocessed_tensors.get(detection_fields.detection_boxes) + scores = postprocessed_tensors.get(detection_fields.detection_scores) + multiclass_scores = postprocessed_tensors.get( + detection_fields.detection_multiclass_scores) + box_classifier_features = postprocessed_tensors.get( + detection_fields.detection_features) + raw_boxes = postprocessed_tensors.get(detection_fields.raw_detection_boxes) + raw_scores = postprocessed_tensors.get(detection_fields.raw_detection_scores) + classes = postprocessed_tensors.get( + detection_fields.detection_classes) + label_id_offset + keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) + masks = postprocessed_tensors.get(detection_fields.detection_masks) + num_detections = postprocessed_tensors.get(detection_fields.num_detections) + outputs = {} + outputs[detection_fields.detection_boxes] = tf.identity( + boxes, name=detection_fields.detection_boxes) + outputs[detection_fields.detection_scores] = tf.identity( + scores, name=detection_fields.detection_scores) + if multiclass_scores is not None: + outputs[detection_fields.detection_multiclass_scores] = tf.identity( + multiclass_scores, name=detection_fields.detection_multiclass_scores) + if box_classifier_features is not None: + outputs[detection_fields.detection_features] = tf.identity( + box_classifier_features, + name=detection_fields.detection_features) + outputs[detection_fields.detection_classes] = tf.identity( + classes, name=detection_fields.detection_classes) + outputs[detection_fields.num_detections] = tf.identity( + num_detections, name=detection_fields.num_detections) + if raw_boxes is not None: + outputs[detection_fields.raw_detection_boxes] = tf.identity( + raw_boxes, name=detection_fields.raw_detection_boxes) + if raw_scores is not None: + outputs[detection_fields.raw_detection_scores] = tf.identity( + raw_scores, name=detection_fields.raw_detection_scores) + if keypoints is not None: + outputs[detection_fields.detection_keypoints] = tf.identity( + keypoints, name=detection_fields.detection_keypoints) + if masks is not None: + outputs[detection_fields.detection_masks] = tf.identity( + masks, name=detection_fields.detection_masks) + for output_key in outputs: + tf.add_to_collection(output_collection_name, outputs[output_key]) + + return outputs + + +def write_saved_model(saved_model_path, + frozen_graph_def, + inputs, + outputs): + """Writes SavedModel to disk. + + If checkpoint_path is not None bakes the weights into the graph thereby + eliminating the need of checkpoint files during inference. If the model + was trained with moving averages, setting use_moving_averages to true + restores the moving averages, otherwise the original set of variables + is restored. + + Args: + saved_model_path: Path to write SavedModel. + frozen_graph_def: tf.GraphDef holding frozen graph. + inputs: A tensor dictionary containing the inputs to a DetectionModel. + outputs: A tensor dictionary containing the outputs of a DetectionModel. + """ + with tf.Graph().as_default(): + with tf.Session() as sess: + + tf.import_graph_def(frozen_graph_def, name='') + + builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path) + + tensor_info_inputs = {} + if isinstance(inputs, dict): + for k, v in inputs.items(): + tensor_info_inputs[k] = tf.saved_model.utils.build_tensor_info(v) + else: + tensor_info_inputs['inputs'] = tf.saved_model.utils.build_tensor_info( + inputs) + tensor_info_outputs = {} + for k, v in outputs.items(): + tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v) + + detection_signature = ( + tf.saved_model.signature_def_utils.build_signature_def( + inputs=tensor_info_inputs, + outputs=tensor_info_outputs, + method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME + )) + + builder.add_meta_graph_and_variables( + sess, + [tf.saved_model.tag_constants.SERVING], + signature_def_map={ + tf.saved_model.signature_constants + .DEFAULT_SERVING_SIGNATURE_DEF_KEY: + detection_signature, + }, + ) + builder.save() + + +def write_graph_and_checkpoint(inference_graph_def, + model_path, + input_saver_def, + trained_checkpoint_prefix): + """Writes the graph and the checkpoint into disk.""" + for node in inference_graph_def.node: + node.device = '' + with tf.Graph().as_default(): + tf.import_graph_def(inference_graph_def, name='') + with tf.Session() as sess: + saver = tf.train.Saver( + saver_def=input_saver_def, save_relative_paths=True) + saver.restore(sess, trained_checkpoint_prefix) + saver.save(sess, model_path) + + +def _get_outputs_from_inputs(input_tensors, detection_model, + output_collection_name, **side_inputs): + inputs = tf.cast(input_tensors, dtype=tf.float32) + preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) + output_tensors = detection_model.predict( + preprocessed_inputs, true_image_shapes, **side_inputs) + postprocessed_tensors = detection_model.postprocess( + output_tensors, true_image_shapes) + return add_output_tensor_nodes(postprocessed_tensors, + output_collection_name) + + +def build_detection_graph(input_type, detection_model, input_shape, + output_collection_name, graph_hook_fn, + use_side_inputs=False, side_input_shapes=None, + side_input_names=None, side_input_types=None): + """Build the detection graph.""" + if input_type not in input_placeholder_fn_map: + raise ValueError('Unknown input type: {}'.format(input_type)) + placeholder_args = {} + side_inputs = {} + if input_shape is not None: + if (input_type != 'image_tensor' and + input_type != 'encoded_image_string_tensor' and + input_type != 'tf_example' and + input_type != 'tf_sequence_example'): + raise ValueError('Can only specify input shape for `image_tensor`, ' + '`encoded_image_string_tensor`, `tf_example`, ' + ' or `tf_sequence_example` inputs.') + placeholder_args['input_shape'] = input_shape + placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type]( + **placeholder_args) + placeholder_tensors = {'inputs': placeholder_tensor} + if use_side_inputs: + for idx, side_input_name in enumerate(side_input_names): + side_input_placeholder, side_input = _side_input_tensor_placeholder( + side_input_shapes[idx], side_input_name, side_input_types[idx]) + print(side_input) + side_inputs[side_input_name] = side_input + placeholder_tensors[side_input_name] = side_input_placeholder + outputs = _get_outputs_from_inputs( + input_tensors=input_tensors, + detection_model=detection_model, + output_collection_name=output_collection_name, + **side_inputs) + + # Add global step to the graph. + slim.get_or_create_global_step() + + if graph_hook_fn: graph_hook_fn() + + return outputs, placeholder_tensors + + +def _export_inference_graph(input_type, + detection_model, + use_moving_averages, + trained_checkpoint_prefix, + output_directory, + additional_output_tensor_names=None, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None, + write_inference_graph=False, + temp_checkpoint_prefix='', + use_side_inputs=False, + side_input_shapes=None, + side_input_names=None, + side_input_types=None): + """Export helper.""" + tf.gfile.MakeDirs(output_directory) + frozen_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + saved_model_path = os.path.join(output_directory, 'saved_model') + model_path = os.path.join(output_directory, 'model.ckpt') + + outputs, placeholder_tensor_dict = build_detection_graph( + input_type=input_type, + detection_model=detection_model, + input_shape=input_shape, + output_collection_name=output_collection_name, + graph_hook_fn=graph_hook_fn, + use_side_inputs=use_side_inputs, + side_input_shapes=side_input_shapes, + side_input_names=side_input_names, + side_input_types=side_input_types) + + profile_inference_graph(tf.get_default_graph()) + saver_kwargs = {} + if use_moving_averages: + if not temp_checkpoint_prefix: + # This check is to be compatible with both version of SaverDef. + if os.path.isfile(trained_checkpoint_prefix): + saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 + temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name + else: + temp_checkpoint_prefix = tempfile.mkdtemp() + replace_variable_values_with_moving_averages( + tf.get_default_graph(), trained_checkpoint_prefix, + temp_checkpoint_prefix) + checkpoint_to_use = temp_checkpoint_prefix + else: + checkpoint_to_use = trained_checkpoint_prefix + + saver = tf.train.Saver(**saver_kwargs) + input_saver_def = saver.as_saver_def() + + write_graph_and_checkpoint( + inference_graph_def=tf.get_default_graph().as_graph_def(), + model_path=model_path, + input_saver_def=input_saver_def, + trained_checkpoint_prefix=checkpoint_to_use) + if write_inference_graph: + inference_graph_def = tf.get_default_graph().as_graph_def() + inference_graph_path = os.path.join(output_directory, + 'inference_graph.pbtxt') + for node in inference_graph_def.node: + node.device = '' + with tf.gfile.GFile(inference_graph_path, 'wb') as f: + f.write(str(inference_graph_def)) + + if additional_output_tensor_names is not None: + output_node_names = ','.join(list(outputs.keys())+( + additional_output_tensor_names)) + else: + output_node_names = ','.join(outputs.keys()) + + frozen_graph_def = freeze_graph.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_to_use, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph=frozen_graph_path, + clear_devices=True, + initializer_nodes='') + + write_saved_model(saved_model_path, frozen_graph_def, + placeholder_tensor_dict, outputs) + + +def export_inference_graph(input_type, + pipeline_config, + trained_checkpoint_prefix, + output_directory, + input_shape=None, + output_collection_name='inference_op', + additional_output_tensor_names=None, + write_inference_graph=False, + use_side_inputs=False, + side_input_shapes=None, + side_input_names=None, + side_input_types=None): + """Exports inference graph for the model specified in the pipeline config. + + Args: + input_type: Type of input for the graph. Can be one of ['image_tensor', + 'encoded_image_string_tensor', 'tf_example']. + pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. + trained_checkpoint_prefix: Path to the trained checkpoint file. + output_directory: Path to write outputs. + input_shape: Sets a fixed shape for an `image_tensor` input. If not + specified, will default to [None, None, None, 3]. + output_collection_name: Name of collection to add output tensors to. + If None, does not add output tensors to a collection. + additional_output_tensor_names: list of additional output + tensors to include in the frozen graph. + write_inference_graph: If true, writes inference graph to disk. + use_side_inputs: If True, the model requires side_inputs. + side_input_shapes: List of shapes of the side input tensors, + required if use_side_inputs is True. + side_input_names: List of names of the side input tensors, + required if use_side_inputs is True. + side_input_types: List of types of the side input tensors, + required if use_side_inputs is True. + """ + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + graph_rewriter_fn = None + if pipeline_config.HasField('graph_rewriter'): + graph_rewriter_config = pipeline_config.graph_rewriter + graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config, + is_training=False) + _export_inference_graph( + input_type, + detection_model, + pipeline_config.eval_config.use_moving_averages, + trained_checkpoint_prefix, + output_directory, + additional_output_tensor_names, + input_shape, + output_collection_name, + graph_hook_fn=graph_rewriter_fn, + write_inference_graph=write_inference_graph, + use_side_inputs=use_side_inputs, + side_input_shapes=side_input_shapes, + side_input_names=side_input_names, + side_input_types=side_input_types) + pipeline_config.eval_config.use_moving_averages = False + config_util.save_pipeline_config(pipeline_config, output_directory) + + +def profile_inference_graph(graph): + """Profiles the inference graph. + + Prints model parameters and computation FLOPs given an inference graph. + BatchNorms are excluded from the parameter count due to the fact that + BatchNorms are usually folded. BatchNorm, Initializer, Regularizer + and BiasAdd are not considered in FLOP count. + + Args: + graph: the inference graph. + """ + tfprof_vars_option = ( + contrib_tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS) + tfprof_flops_option = contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS + + # Batchnorm is usually folded during inference. + tfprof_vars_option['trim_name_regexes'] = ['.*BatchNorm.*'] + # Initializer and Regularizer are only used in training. + tfprof_flops_option['trim_name_regexes'] = [ + '.*BatchNorm.*', '.*Initializer.*', '.*Regularizer.*', '.*BiasAdd.*' + ] + + contrib_tfprof.model_analyzer.print_model_analysis( + graph, tfprof_options=tfprof_vars_option) + + contrib_tfprof.model_analyzer.print_model_analysis( + graph, tfprof_options=tfprof_flops_option) diff --git a/models/research/object_detection/exporter_lib_tf2_test.py b/models/research/object_detection/exporter_lib_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d30d80cb09a14206d0fbfc1614d99aeccc25489b --- /dev/null +++ b/models/research/object_detection/exporter_lib_tf2_test.py @@ -0,0 +1,237 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test for exporter_lib_v2.py.""" + +from __future__ import division +import io +import os +import unittest +from absl.testing import parameterized +import numpy as np +from PIL import Image +import six + +import tensorflow.compat.v2 as tf + +from object_detection import exporter_lib_v2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.protos import pipeline_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-importing-member,g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self, conv_weight_scalar=1.0): + super(FakeModel, self).__init__(num_classes=2) + self._conv = tf.keras.layers.Conv2D( + filters=1, kernel_size=1, strides=(1, 1), padding='valid', + kernel_initializer=tf.keras.initializers.Constant( + value=conv_weight_scalar)) + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': self._conv(preprocessed_inputs)} + + def postprocess(self, prediction_dict, true_image_shapes): + predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) + with tf.control_dependencies(list(prediction_dict.values())): + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]], tf.float32), + 'detection_scores': predict_tensor_sum + tf.constant( + [[0.7, 0.6], [0.9, 0.0]], tf.float32), + 'detection_classes': tf.constant([[0, 1], + [1, 0]], tf.float32), + 'num_detections': tf.constant([2, 1], tf.float32), + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): + + def _save_checkpoint_from_mock_model( + self, checkpoint_dir, conv_weight_scalar=6.0): + mock_model = FakeModel(conv_weight_scalar) + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_dir, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + @parameterized.parameters( + {'input_type': 'image_tensor'}, + {'input_type': 'encoded_image_string_tensor'}, + {'input_type': 'tf_example'}, + ) + def test_export_yields_correct_directory_structure( + self, input_type='image_tensor'): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type=input_type, + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'variables', 'variables.index'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'variables', + 'variables.data-00000-of-00001'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'checkpoint', 'ckpt-0.index'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'pipeline.config'))) + + def get_dummy_input(self, input_type): + """Get dummy input for the given input type.""" + + if input_type == 'image_tensor': + return np.zeros(shape=(1, 20, 20, 3), dtype=np.uint8) + if input_type == 'float_image_tensor': + return np.zeros(shape=(1, 20, 20, 3), dtype=np.float32) + elif input_type == 'encoded_image_string_tensor': + image = Image.new('RGB', (20, 20)) + byte_io = io.BytesIO() + image.save(byte_io, 'PNG') + return [byte_io.getvalue()] + elif input_type == 'tf_example': + image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8) + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy() + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')), + })).SerializeToString() + return [example] + + @parameterized.parameters( + {'input_type': 'image_tensor'}, + {'input_type': 'encoded_image_string_tensor'}, + {'input_type': 'tf_example'}, + {'input_type': 'float_image_tensor'}, + ) + def test_export_saved_model_and_run_inference( + self, input_type='image_tensor'): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type=input_type, + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + + saved_model_path = os.path.join(output_directory, 'saved_model') + detect_fn = tf.saved_model.load(saved_model_path) + image = self.get_dummy_input(input_type) + detections = detect_fn(image) + + detection_fields = fields.DetectionResultFields + self.assertAllClose(detections[detection_fields.detection_boxes], + [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(detections[detection_fields.detection_scores], + [[0.7, 0.6], [0.9, 0.0]]) + self.assertAllClose(detections[detection_fields.detection_classes], + [[1, 2], [2, 1]]) + self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) + + def test_export_checkpoint_and_run_inference_with_image(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + + mock_model = FakeModel() + ckpt = tf.compat.v2.train.Checkpoint( + model=mock_model) + checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint') + manager = tf.compat.v2.train.CheckpointManager( + ckpt, checkpoint_dir, max_to_keep=7) + ckpt.restore(manager.latest_checkpoint).expect_partial() + + fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + detections = mock_model.postprocess(predictions, true_image_shapes) + + # 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3. + self.assertAllClose(detections['detection_scores'], + [[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]]) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/models/research/object_detection/exporter_lib_v2.py b/models/research/object_detection/exporter_lib_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..a7ecb45adb14f1b20c2291a3cf67376ad07194eb --- /dev/null +++ b/models/research/object_detection/exporter_lib_v2.py @@ -0,0 +1,182 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to export object detection inference graph.""" +import os +import tensorflow.compat.v2 as tf +from object_detection.builders import model_builder +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.utils import config_util + + +def _decode_image(encoded_image_string_tensor): + image_tensor = tf.image.decode_image(encoded_image_string_tensor, + channels=3) + image_tensor.set_shape((None, None, 3)) + return image_tensor + + +def _decode_tf_example(tf_example_string_tensor): + tensor_dict = tf_example_decoder.TfExampleDecoder().decode( + tf_example_string_tensor) + image_tensor = tensor_dict[fields.InputDataFields.image] + return image_tensor + + +class DetectionInferenceModule(tf.Module): + """Detection Inference Module.""" + + def __init__(self, detection_model): + """Initializes a module for detection. + + Args: + detection_model: The detection model to use for inference. + """ + self._model = detection_model + + def _run_inference_on_images(self, image): + """Cast image to float and run inference. + + Args: + image: uint8 Tensor of shape [1, None, None, 3] + Returns: + Tensor dictionary holding detections. + """ + label_id_offset = 1 + + image = tf.cast(image, tf.float32) + image, shapes = self._model.preprocess(image) + prediction_dict = self._model.predict(image, shapes) + detections = self._model.postprocess(prediction_dict, shapes) + classes_field = fields.DetectionResultFields.detection_classes + detections[classes_field] = ( + tf.cast(detections[classes_field], tf.float32) + label_id_offset) + + for key, val in detections.items(): + detections[key] = tf.cast(val, tf.float32) + + return detections + + +class DetectionFromImageModule(DetectionInferenceModule): + """Detection Inference Module for image inputs.""" + + @tf.function( + input_signature=[ + tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)]) + def __call__(self, input_tensor): + return self._run_inference_on_images(input_tensor) + + +class DetectionFromFloatImageModule(DetectionInferenceModule): + """Detection Inference Module for float image inputs.""" + + @tf.function( + input_signature=[ + tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.float32)]) + def __call__(self, input_tensor): + return self._run_inference_on_images(input_tensor) + + +class DetectionFromEncodedImageModule(DetectionInferenceModule): + """Detection Inference Module for encoded image string inputs.""" + + @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.string)]) + def __call__(self, input_tensor): + with tf.device('cpu:0'): + image = tf.map_fn( + _decode_image, + elems=input_tensor, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False) + return self._run_inference_on_images(image) + + +class DetectionFromTFExampleModule(DetectionInferenceModule): + """Detection Inference Module for TF.Example inputs.""" + + @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.string)]) + def __call__(self, input_tensor): + with tf.device('cpu:0'): + image = tf.map_fn( + _decode_tf_example, + elems=input_tensor, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False) + return self._run_inference_on_images(image) + +DETECTION_MODULE_MAP = { + 'image_tensor': DetectionFromImageModule, + 'encoded_image_string_tensor': + DetectionFromEncodedImageModule, + 'tf_example': DetectionFromTFExampleModule, + 'float_image_tensor': DetectionFromFloatImageModule +} + + +def export_inference_graph(input_type, + pipeline_config, + trained_checkpoint_dir, + output_directory): + """Exports inference graph for the model specified in the pipeline config. + + This function creates `output_directory` if it does not already exist, + which will hold a copy of the pipeline config with filename `pipeline.config`, + and two subdirectories named `checkpoint` and `saved_model` + (containing the exported checkpoint and SavedModel respectively). + + Args: + input_type: Type of input for the graph. Can be one of ['image_tensor', + 'encoded_image_string_tensor', 'tf_example']. + pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. + trained_checkpoint_dir: Path to the trained checkpoint file. + output_directory: Path to write outputs. + Raises: + ValueError: if input_type is invalid. + """ + output_checkpoint_directory = os.path.join(output_directory, 'checkpoint') + output_saved_model_directory = os.path.join(output_directory, 'saved_model') + + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + + ckpt = tf.train.Checkpoint( + model=detection_model) + manager = tf.train.CheckpointManager( + ckpt, trained_checkpoint_dir, max_to_keep=1) + status = ckpt.restore(manager.latest_checkpoint).expect_partial() + + if input_type not in DETECTION_MODULE_MAP: + raise ValueError('Unrecognized `input_type`') + detection_module = DETECTION_MODULE_MAP[input_type](detection_model) + # Getting the concrete function traces the graph and forces variables to + # be constructed --- only after this can we save the checkpoint and + # saved model. + concrete_function = detection_module.__call__.get_concrete_function() + status.assert_existing_objects_matched() + + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, output_checkpoint_directory, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + tf.saved_model.save(detection_module, + output_saved_model_directory, + signatures=concrete_function) + + config_util.save_pipeline_config(pipeline_config, output_directory) diff --git a/models/research/object_detection/exporter_main_v2.py b/models/research/object_detection/exporter_main_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ba8456039d4584e5998d619f36747d58018418 --- /dev/null +++ b/models/research/object_detection/exporter_main_v2.py @@ -0,0 +1,126 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Tool to export an object detection model for inference. + +Prepares an object detection tensorflow graph for inference using model +configuration and a trained checkpoint. Outputs associated checkpoint files, +a SavedModel, and a copy of the model config. + +The inference graph contains one of three input nodes depending on the user +specified option. + * `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3] + * `float_image_tensor`: Accepts a float32 4-D tensor of shape + [1, None, None, 3] + * `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None] + containing encoded PNG or JPEG images. Image resolutions are expected to be + the same if more than 1 image is provided. + * `tf_example`: Accepts a 1-D string tensor of shape [None] containing + serialized TFExample protos. Image resolutions are expected to be the same + if more than 1 image is provided. + +and the following output nodes returned by the model.postprocess(..): + * `num_detections`: Outputs float32 tensors of the form [batch] + that specifies the number of valid boxes per image in the batch. + * `detection_boxes`: Outputs float32 tensors of the form + [batch, num_boxes, 4] containing detected boxes. + * `detection_scores`: Outputs float32 tensors of the form + [batch, num_boxes] containing class scores for the detections. + * `detection_classes`: Outputs float32 tensors of the form + [batch, num_boxes] containing classes for the detections. + + +Example Usage: +-------------- +python exporter_main_v2.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_dir path/to/checkpoint \ + --output_directory path/to/exported_model_directory + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +holding two subdirectories (corresponding to checkpoint and SavedModel, +respectively) and a copy of the pipeline config. + +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the second stage post-processing score +threshold to be 0.5): + +python exporter_main_v2.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_dir path/to/checkpoint \ + --output_directory path/to/exported_model_directory \ + --config_override " \ + model{ \ + faster_rcnn { \ + second_stage_post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.5 \ + } \ + } \ + } \ + }" +""" +from absl import app +from absl import flags + +import tensorflow.compat.v2 as tf +from google.protobuf import text_format +from object_detection import exporter_lib_v2 +from object_detection.protos import pipeline_pb2 + +tf.enable_v2_behavior() + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be ' + 'one of [`image_tensor`, `encoded_image_string_tensor`, ' + '`tf_example`, `float_image_tensor`]') +flags.DEFINE_string('pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_dir', None, + 'Path to trained checkpoint directory') +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string('config_override', '', + 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') + +flags.mark_flag_as_required('pipeline_config_path') +flags.mark_flag_as_required('trained_checkpoint_dir') +flags.mark_flag_as_required('output_directory') + + +def main(_): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge(FLAGS.config_override, pipeline_config) + exporter_lib_v2.export_inference_graph( + FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir, + FLAGS.output_directory) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/object_detection/exporter_tf1_test.py b/models/research/object_detection/exporter_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..40bdb966f584fdfb56eb05a2ecb9fd5c551941d5 --- /dev/null +++ b/models/research/object_detection/exporter_tf1_test.py @@ -0,0 +1,1203 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.export_inference_graph.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import array_ops +from tensorflow.python.tools import strip_unused_lib +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import graph_rewriter_pb2 +from object_detection.protos import pipeline_pb2 +from object_detection.utils import ops +from object_detection.utils import tf_version +from object_detection.utils import variables_helper + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock # pylint: disable=g-import-not-at-top, g-importing-member + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self, add_detection_keypoints=False, add_detection_masks=False, + add_detection_features=False): + self._add_detection_keypoints = add_detection_keypoints + self._add_detection_masks = add_detection_masks + self._add_detection_features = add_detection_features + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} + + def postprocess(self, prediction_dict, true_image_shapes): + with tf.control_dependencies(list(prediction_dict.values())): + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]], tf.float32), + 'detection_scores': tf.constant([[0.7, 0.6], + [0.9, 0.0]], tf.float32), + 'detection_multiclass_scores': tf.constant([[[0.3, 0.7], [0.4, 0.6]], + [[0.1, 0.9], [0.0, 0.0]]], + tf.float32), + 'detection_classes': tf.constant([[0, 1], + [1, 0]], tf.float32), + 'num_detections': tf.constant([2, 1], tf.float32), + 'raw_detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.0, 0.5]]], + tf.float32), + 'raw_detection_scores': tf.constant([[0.7, 0.6], + [0.9, 0.5]], tf.float32), + } + if self._add_detection_keypoints: + postprocessed_tensors['detection_keypoints'] = tf.constant( + np.arange(48).reshape([2, 2, 6, 2]), tf.float32) + if self._add_detection_masks: + postprocessed_tensors['detection_masks'] = tf.constant( + np.arange(64).reshape([2, 2, 4, 4]), tf.float32) + if self._add_detection_features: + # let fake detection features have shape [4, 4, 10] + postprocessed_tensors['detection_features'] = tf.constant( + np.ones((2, 2, 4, 4, 10)), tf.float32) + + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ExportInferenceGraphTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, + checkpoint_path, + use_moving_averages, + enable_quantization=False): + g = tf.Graph() + with g.as_default(): + mock_model = FakeModel() + preprocessed_inputs, true_image_shapes = mock_model.preprocess( + tf.placeholder(tf.float32, shape=[None, None, None, 3])) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + if use_moving_averages: + tf.train.ExponentialMovingAverage(0.0).apply() + tf.train.get_or_create_global_step() + if enable_quantization: + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + saver = tf.train.Saver() + init = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init) + saver.save(sess, checkpoint_path) + + def _load_inference_graph(self, inference_graph_path, is_binary=True): + od_graph = tf.Graph() + with od_graph.as_default(): + od_graph_def = tf.GraphDef() + with tf.gfile.GFile(inference_graph_path, mode='rb') as fid: + if is_binary: + od_graph_def.ParseFromString(fid.read()) + else: + text_format.Parse(fid.read(), od_graph_def) + tf.import_graph_def(od_graph_def, name='') + return od_graph + + def _create_tf_example(self, image_array): + with self.test_session(): + encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval() + def _bytes_feature(value): + return tf.train.Feature( + bytes_list=tf.train.BytesList(value=[six.ensure_binary(value)])) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': _bytes_feature(encoded_image), + 'image/format': _bytes_feature('jpg'), + 'image/source_id': _bytes_feature('image_id') + })).SerializeToString() + return example + + def test_export_graph_with_image_tensor_input(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + + def test_write_inference_graph(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + write_inference_graph=True) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'inference_graph.pbtxt'))) + + def test_export_graph_with_fixed_size_image_tensor_input(self): + input_shape = [1, 320, 320, 3] + + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + input_shape=input_shape) + saved_model_path = os.path.join(output_directory, 'saved_model') + self.assertTrue( + os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) + + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + meta_graph = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + image_tensor = od_graph.get_tensor_by_name(input_tensor_name) + self.assertSequenceEqual(image_tensor.get_shape().as_list(), + input_shape) + + def test_export_graph_with_tf_example_input(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + + def test_export_graph_with_fixed_size_tf_example_input(self): + input_shape = [1, 320, 320, 3] + + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + input_shape=input_shape) + saved_model_path = os.path.join(output_directory, 'saved_model') + self.assertTrue( + os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) + + def test_export_graph_with_encoded_image_string_input(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + + def test_export_graph_with_fixed_size_encoded_image_string_input(self): + input_shape = [1, 320, 320, 3] + + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + input_shape=input_shape) + saved_model_path = os.path.join(output_directory, 'saved_model') + self.assertTrue( + os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) + + def _get_variables_in_checkpoint(self, checkpoint_file): + return set([ + var_name + for var_name, _ in tf.train.list_variables(checkpoint_file)]) + + def test_replace_variable_values_with_moving_averages(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + graph = tf.Graph() + with graph.as_default(): + fake_model = FakeModel() + preprocessed_inputs, true_image_shapes = fake_model.preprocess( + tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3])) + predictions = fake_model.predict(preprocessed_inputs, true_image_shapes) + fake_model.postprocess(predictions, true_image_shapes) + exporter.replace_variable_values_with_moving_averages( + graph, trained_checkpoint_prefix, new_checkpoint_prefix) + + expected_variables = set(['conv2d/bias', 'conv2d/kernel']) + variables_in_old_ckpt = self._get_variables_in_checkpoint( + trained_checkpoint_prefix) + self.assertIn('conv2d/bias/ExponentialMovingAverage', + variables_in_old_ckpt) + self.assertIn('conv2d/kernel/ExponentialMovingAverage', + variables_in_old_ckpt) + variables_in_new_ckpt = self._get_variables_in_checkpoint( + new_checkpoint_prefix) + self.assertTrue(expected_variables.issubset(variables_in_new_ckpt)) + self.assertNotIn('conv2d/bias/ExponentialMovingAverage', + variables_in_new_ckpt) + self.assertNotIn('conv2d/kernel/ExponentialMovingAverage', + variables_in_new_ckpt) + + def test_export_graph_with_moving_averages(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = True + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step']) + actual_variables = set( + [var_name for var_name, _ in tf.train.list_variables(output_directory)]) + self.assertTrue(expected_variables.issubset(actual_variables)) + + def test_export_model_with_quantization_nodes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, + use_moving_averages=False, + enable_quantization=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'inference_graph.pbtxt') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + text_format.Merge( + """graph_rewriter { + quantization { + delay: 50000 + activation_bits: 8 + weight_bits: 8 + } + }""", pipeline_config) + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + write_inference_graph=True) + self._load_inference_graph(inference_graph_path, is_binary=False) + has_quant_nodes = False + for v in variables_helper.get_global_variables_safely(): + if six.ensure_str(v.op.name).endswith('act_quant/min'): + has_quant_nodes = True + break + self.assertTrue(has_quant_nodes) + + def test_export_model_with_all_output_nodes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True, + add_detection_features=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph): + inference_graph.get_tensor_by_name('image_tensor:0') + inference_graph.get_tensor_by_name('detection_boxes:0') + inference_graph.get_tensor_by_name('detection_scores:0') + inference_graph.get_tensor_by_name('detection_multiclass_scores:0') + inference_graph.get_tensor_by_name('detection_classes:0') + inference_graph.get_tensor_by_name('detection_keypoints:0') + inference_graph.get_tensor_by_name('detection_masks:0') + inference_graph.get_tensor_by_name('num_detections:0') + inference_graph.get_tensor_by_name('detection_features:0') + + def test_export_model_with_detection_only_nodes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel(add_detection_masks=False) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph): + inference_graph.get_tensor_by_name('image_tensor:0') + inference_graph.get_tensor_by_name('detection_boxes:0') + inference_graph.get_tensor_by_name('detection_scores:0') + inference_graph.get_tensor_by_name('detection_multiclass_scores:0') + inference_graph.get_tensor_by_name('detection_classes:0') + inference_graph.get_tensor_by_name('num_detections:0') + with self.assertRaises(KeyError): + inference_graph.get_tensor_by_name('detection_keypoints:0') + inference_graph.get_tensor_by_name('detection_masks:0') + + def test_export_model_with_detection_only_nodes_and_detection_features(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel(add_detection_features=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph): + inference_graph.get_tensor_by_name('image_tensor:0') + inference_graph.get_tensor_by_name('detection_boxes:0') + inference_graph.get_tensor_by_name('detection_scores:0') + inference_graph.get_tensor_by_name('detection_multiclass_scores:0') + inference_graph.get_tensor_by_name('detection_classes:0') + inference_graph.get_tensor_by_name('num_detections:0') + inference_graph.get_tensor_by_name('detection_features:0') + with self.assertRaises(KeyError): + inference_graph.get_tensor_by_name('detection_keypoints:0') + inference_graph.get_tensor_by_name('detection_masks:0') + + def test_export_and_run_inference_with_image_tensor(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph) as sess: + image_tensor = inference_graph.get_tensor_by_name('image_tensor:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def _create_encoded_image_string(self, image_array_np, encoding_format): + od_graph = tf.Graph() + with od_graph.as_default(): + if encoding_format == 'jpg': + encoded_string = tf.image.encode_jpeg(image_array_np) + elif encoding_format == 'png': + encoded_string = tf.image.encode_png(image_array_np) + else: + raise ValueError('Supports only the following formats: `jpg`, `png`') + with self.test_session(graph=od_graph): + return encoded_string.eval() + + def test_export_and_run_inference_with_encoded_image_string_tensor(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + jpg_image_str = self._create_encoded_image_string( + np.ones((4, 4, 3)).astype(np.uint8), 'jpg') + png_image_str = self._create_encoded_image_string( + np.ones((4, 4, 3)).astype(np.uint8), 'png') + with self.test_session(graph=inference_graph) as sess: + image_str_tensor = inference_graph.get_tensor_by_name( + 'encoded_image_string_tensor:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + multiclass_scores = inference_graph.get_tensor_by_name( + 'detection_multiclass_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + for image_str in [jpg_image_str, png_image_str]: + image_str_batch_np = np.hstack([image_str]* 2) + (boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np, + masks_np, num_detections_np) = sess.run( + [ + boxes, scores, multiclass_scores, classes, keypoints, masks, + num_detections + ], + feed_dict={image_str_tensor: image_str_batch_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]], + [[0.1, 0.9], [0.0, 0.0]]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_raise_runtime_error_on_images_with_different_sizes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + large_image = self._create_encoded_image_string( + np.ones((4, 4, 3)).astype(np.uint8), 'jpg') + small_image = self._create_encoded_image_string( + np.ones((2, 2, 3)).astype(np.uint8), 'jpg') + + image_str_batch_np = np.hstack([large_image, small_image]) + with self.test_session(graph=inference_graph) as sess: + image_str_tensor = inference_graph.get_tensor_by_name( + 'encoded_image_string_tensor:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, + 'TensorArray.*shape'): + sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={image_str_tensor: image_str_batch_np}) + + def test_export_and_run_inference_with_tf_example(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + tf_example_np = np.expand_dims(self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8)), axis=0) + with self.test_session(graph=inference_graph) as sess: + tf_example = inference_graph.get_tensor_by_name('tf_example:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_write_frozen_graph(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + tf.gfile.MakeDirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + outputs, _ = exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + output_node_names = ','.join(list(outputs.keys())) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=trained_checkpoint_prefix, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph=inference_graph_path, + clear_devices=True, + initializer_nodes='') + + inference_graph = self._load_inference_graph(inference_graph_path) + tf_example_np = np.expand_dims(self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8)), axis=0) + with self.test_session(graph=inference_graph) as sess: + tf_example = inference_graph.get_tensor_by_name('tf_example:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_export_graph_saves_pipeline_file(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + expected_pipeline_path = os.path.join( + output_directory, 'pipeline.config') + self.assertTrue(os.path.exists(expected_pipeline_path)) + + written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.gfile.GFile(expected_pipeline_path, 'r') as f: + proto_str = f.read() + text_format.Merge(proto_str, written_pipeline_config) + self.assertProtoEquals(pipeline_config, written_pipeline_config) + + def test_export_saved_model_and_run_inference(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + meta_graph = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) + + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + tf_example = od_graph.get_tensor_by_name(input_tensor_name) + + boxes = od_graph.get_tensor_by_name( + signature.outputs['detection_boxes'].name) + scores = od_graph.get_tensor_by_name( + signature.outputs['detection_scores'].name) + multiclass_scores = od_graph.get_tensor_by_name( + signature.outputs['detection_multiclass_scores'].name) + classes = od_graph.get_tensor_by_name( + signature.outputs['detection_classes'].name) + keypoints = od_graph.get_tensor_by_name( + signature.outputs['detection_keypoints'].name) + masks = od_graph.get_tensor_by_name( + signature.outputs['detection_masks'].name) + num_detections = od_graph.get_tensor_by_name( + signature.outputs['num_detections'].name) + + (boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np, + masks_np, num_detections_np) = sess.run( + [boxes, scores, multiclass_scores, classes, keypoints, masks, + num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]], + [[0.1, 0.9], [0.0, 0.0]]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_write_saved_model(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + tf.gfile.MakeDirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + outputs, placeholder_tensor = exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + output_node_names = ','.join(list(outputs.keys())) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=trained_checkpoint_prefix, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph='', + clear_devices=True, + initializer_nodes='') + exporter.write_saved_model( + saved_model_path=saved_model_path, + frozen_graph_def=frozen_graph_def, + inputs=placeholder_tensor, + outputs=outputs) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + meta_graph = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) + + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + tf_example = od_graph.get_tensor_by_name(input_tensor_name) + + boxes = od_graph.get_tensor_by_name( + signature.outputs['detection_boxes'].name) + scores = od_graph.get_tensor_by_name( + signature.outputs['detection_scores'].name) + classes = od_graph.get_tensor_by_name( + signature.outputs['detection_classes'].name) + keypoints = od_graph.get_tensor_by_name( + signature.outputs['detection_keypoints'].name) + masks = od_graph.get_tensor_by_name( + signature.outputs['detection_masks'].name) + num_detections = od_graph.get_tensor_by_name( + signature.outputs['num_detections'].name) + + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_export_checkpoint_and_run_inference(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + model_path = os.path.join(output_directory, 'model.ckpt') + meta_graph_path = model_path + '.meta' + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + new_saver = tf.train.import_meta_graph(meta_graph_path) + new_saver.restore(sess, model_path) + + tf_example = od_graph.get_tensor_by_name('tf_example:0') + boxes = od_graph.get_tensor_by_name('detection_boxes:0') + scores = od_graph.get_tensor_by_name('detection_scores:0') + classes = od_graph.get_tensor_by_name('detection_classes:0') + keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') + masks = od_graph.get_tensor_by_name('detection_masks:0') + num_detections = od_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_write_graph_and_checkpoint(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + model_path = os.path.join(output_directory, 'model.ckpt') + meta_graph_path = model_path + '.meta' + tf.gfile.MakeDirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + exporter.write_graph_and_checkpoint( + inference_graph_def=tf.get_default_graph().as_graph_def(), + model_path=model_path, + input_saver_def=input_saver_def, + trained_checkpoint_prefix=trained_checkpoint_prefix) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + new_saver = tf.train.import_meta_graph(meta_graph_path) + new_saver.restore(sess, model_path) + + tf_example = od_graph.get_tensor_by_name('tf_example:0') + boxes = od_graph.get_tensor_by_name('detection_boxes:0') + scores = od_graph.get_tensor_by_name('detection_scores:0') + raw_boxes = od_graph.get_tensor_by_name('raw_detection_boxes:0') + raw_scores = od_graph.get_tensor_by_name('raw_detection_scores:0') + classes = od_graph.get_tensor_by_name('detection_classes:0') + keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') + masks = od_graph.get_tensor_by_name('detection_masks:0') + num_detections = od_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, raw_boxes_np, raw_scores_np, classes_np, + keypoints_np, masks_np, num_detections_np) = sess.run( + [boxes, scores, raw_boxes, raw_scores, classes, keypoints, masks, + num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(raw_boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.0, 0.5]]]) + self.assertAllClose(raw_scores_np, [[0.7, 0.6], + [0.9, 0.5]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_rewrite_nn_resize_op(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) + s = ops.nearest_neighbor_upsampling(x, 2) + t = s + y + exporter.rewrite_nn_resize_op() + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0], x) + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_quantized(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_conv = slim.conv2d(x, 8, 1) + y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) + s = ops.nearest_neighbor_upsampling(x_conv, 2) + t = s + y + + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + exporter.rewrite_nn_resize_op(is_quantized=True) + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_odd_size(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + s = ops.nearest_neighbor_upsampling(x, 2) + t = s[:, :19, :19, :] + exporter.rewrite_nn_resize_op() + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0], x) + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_quantized_odd_size(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_conv = slim.conv2d(x, 8, 1) + s = ops.nearest_neighbor_upsampling(x_conv, 2) + t = s[:, :19, :19, :] + + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + exporter.rewrite_nn_resize_op(is_quantized=True) + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_multiple_path(self): + g = tf.Graph() + with g.as_default(): + with tf.name_scope('nearest_upsampling'): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_stack = tf.stack([tf.stack([x] * 2, axis=3)] * 2, axis=2) + x_reshape = tf.reshape(x_stack, [8, 20, 20, 8]) + + with tf.name_scope('nearest_upsampling'): + x_2 = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_stack_2 = tf.stack([tf.stack([x_2] * 2, axis=3)] * 2, axis=2) + x_reshape_2 = tf.reshape(x_stack_2, [8, 20, 20, 8]) + + t = x_reshape + x_reshape_2 + + exporter.rewrite_nn_resize_op() + + graph_def = g.as_graph_def() + graph_def = strip_unused_lib.strip_unused( + graph_def, + input_node_names=[ + 'nearest_upsampling/Placeholder', 'nearest_upsampling_1/Placeholder' + ], + output_node_names=['add'], + placeholder_type_enum=dtypes.float32.as_datatype_enum) + + counter_resize_op = 0 + t_input_ops = [op.name for op in t.op.inputs] + for node in graph_def.node: + # Make sure Stacks are replaced. + self.assertNotEqual(node.op, 'Pack') + if node.op == 'ResizeNearestNeighbor': + counter_resize_op += 1 + self.assertIn(six.ensure_str(node.name) + ':0', t_input_ops) + self.assertEqual(counter_resize_op, 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/g3doc/challenge_evaluation.md b/models/research/object_detection/g3doc/challenge_evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..d8ea21017d1ae5c6fefb148aa3178e1a079e9861 --- /dev/null +++ b/models/research/object_detection/g3doc/challenge_evaluation.md @@ -0,0 +1,215 @@ +# Open Images Challenge Evaluation + +The Object Detection API is currently supporting several evaluation metrics used +in the +[Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) +and +[Open Images Challenge 2019](https://storage.googleapis.com/openimages/web/challenge2019.html). +In addition, several data processing tools are available. Detailed instructions +on using the tools for each track are available below. + +**NOTE:** all data links are updated to the Open Images Challenge 2019. + +## Object Detection Track + +The +[Object Detection metric](https://storage.googleapis.com/openimages/web/evaluation.html#object_detection_eval) +protocol requires a pre-processing of the released data to ensure correct +evaluation. The released data contains only leaf-most bounding box annotations +and image-level labels. The evaluation metric implementation is available in the +class `OpenImagesChallengeEvaluator`. + +1. Download + [class hierarchy of Open Images Detection Challenge 2019](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-label500-hierarchy.json) + in JSON format. +2. Download + [ground-truth boundling boxes](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-detection-bbox.csv) + and + [image-level labels](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-detection-human-imagelabels.csv). +3. Run the following command to create hierarchical expansion of the bounding + boxes and image-level label annotations: + +``` +HIERARCHY_FILE=/path/to/challenge-2019-label500-hierarchy.json +BOUNDING_BOXES=/path/to/challenge-2019-validation-detection-bbox +IMAGE_LABELS=/path/to/challenge-2019-validation-detection-human-imagelabels + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${BOUNDING_BOXES}.csv \ + --output_annotations=${BOUNDING_BOXES}_expanded.csv \ + --annotation_type=1 + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${IMAGE_LABELS}.csv \ + --output_annotations=${IMAGE_LABELS}_expanded.csv \ + --annotation_type=2 +``` + +1. If you are not using Tensorflow, you can run evaluation directly using your + algorithm's output and generated ground-truth files. {value=4} + +After step 3 you produced the ground-truth files suitable for running 'OID +Challenge Object Detection Metric 2019' evaluation. To run the evaluation, use +the following command: + +``` +INPUT_PREDICTIONS=/path/to/detection_predictions.csv +OUTPUT_METRICS=/path/to/output/metrics/file + +python models/research/object_detection/metrics/oid_challenge_evaluation.py \ + --input_annotations_boxes=${BOUNDING_BOXES}_expanded.csv \ + --input_annotations_labels=${IMAGE_LABELS}_expanded.csv \ + --input_class_labelmap=object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt \ + --input_predictions=${INPUT_PREDICTIONS} \ + --output_metrics=${OUTPUT_METRICS} \ +``` + +Note that predictions file must contain the following keys: +ImageID,LabelName,Score,XMin,XMax,YMin,YMax + +For the Object Detection Track, the participants will be ranked on: + +- "OpenImagesDetectionChallenge_Precision/mAP@0.5IOU" + +To use evaluation within Tensorflow training, use metric name +`oid_challenge_detection_metrics` in the evaluation config. + +## Instance Segmentation Track + +The +[Instance Segmentation metric](https://storage.googleapis.com/openimages/web/evaluation.html#instance_segmentation_eval) +can be directly evaluated using the ground-truth data and model predictions. The +evaluation metric implementation is available in the class +`OpenImagesChallengeEvaluator`. + +1. Download + [class hierarchy of Open Images Instance Segmentation Challenge 2019](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-label300-segmentable-hierarchy.json) + in JSON format. +2. Download + [ground-truth bounding boxes](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-segmentation-bbox.csv) + and + [image-level labels](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-segmentation-labels.csv). +3. Download instance segmentation files for the validation set (see + [Open Images Challenge Downloads page](https://storage.googleapis.com/openimages/web/challenge2019_downloads.html)). + The download consists of a set of .zip archives containing binary .png + masks. + Those should be transformed into a single CSV file in the format: + + ImageID,LabelName,ImageWidth,ImageHeight,XMin,YMin,XMax,YMax,IsGroupOf,Mask + where Mask is MS COCO RLE encoding, compressed with zip, and re-coded with + base64 encoding of a binary mask stored in .png file. See an example + implementation of the encoding function + [here](https://gist.github.com/pculliton/209398a2a52867580c6103e25e55d93c). + +1. Run the following command to create hierarchical expansion of the instance + segmentation, bounding boxes and image-level label annotations: {value=4} + +``` +HIERARCHY_FILE=/path/to/challenge-2019-label300-hierarchy.json +BOUNDING_BOXES=/path/to/challenge-2019-validation-detection-bbox +IMAGE_LABELS=/path/to/challenge-2019-validation-detection-human-imagelabels + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${BOUNDING_BOXES}.csv \ + --output_annotations=${BOUNDING_BOXES}_expanded.csv \ + --annotation_type=1 + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${IMAGE_LABELS}.csv \ + --output_annotations=${IMAGE_LABELS}_expanded.csv \ + --annotation_type=2 + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${INSTANCE_SEGMENTATIONS}.csv \ + --output_annotations=${INSTANCE_SEGMENTATIONS}_expanded.csv \ + --annotation_type=1 +``` + +1. If you are not using Tensorflow, you can run evaluation directly using your + algorithm's output and generated ground-truth files. {value=4} + +``` +INPUT_PREDICTIONS=/path/to/instance_segmentation_predictions.csv +OUTPUT_METRICS=/path/to/output/metrics/file + +python models/research/object_detection/metrics/oid_challenge_evaluation.py \ + --input_annotations_boxes=${BOUNDING_BOXES}_expanded.csv \ + --input_annotations_labels=${IMAGE_LABELS}_expanded.csv \ + --input_class_labelmap=object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt \ + --input_predictions=${INPUT_PREDICTIONS} \ + --input_annotations_segm=${INSTANCE_SEGMENTATIONS}_expanded.csv + --output_metrics=${OUTPUT_METRICS} \ +``` + +Note that predictions file must contain the following keys: +ImageID,ImageWidth,ImageHeight,LabelName,Score,Mask + +Mask must be encoded the same way as groundtruth masks. + +For the Instance Segmentation Track, the participants will be ranked on: + +- "OpenImagesInstanceSegmentationChallenge_Precision/mAP@0.5IOU" + +## Visual Relationships Detection Track + +The +[Visual Relationships Detection metrics](https://storage.googleapis.com/openimages/web/evaluation.html#visual_relationships_eval) +can be directly evaluated using the ground-truth data and model predictions. The +evaluation metric implementation is available in the class +`VRDRelationDetectionEvaluator`,`VRDPhraseDetectionEvaluator`. + +1. Download the ground-truth + [visual relationships annotations](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-vrd.csv) + and + [image-level labels](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-vrd-labels.csv). +2. Run the follwing command to produce final metrics: + +``` +INPUT_ANNOTATIONS_BOXES=/path/to/challenge-2018-train-vrd.csv +INPUT_ANNOTATIONS_LABELS=/path/to/challenge-2018-train-vrd-labels.csv +INPUT_PREDICTIONS=/path/to/predictions.csv +INPUT_CLASS_LABELMAP=/path/to/oid_object_detection_challenge_500_label_map.pbtxt +INPUT_RELATIONSHIP_LABELMAP=/path/to/relationships_labelmap.pbtxt +OUTPUT_METRICS=/path/to/output/metrics/file + +echo "item { name: '/m/02gy9n' id: 602 display_name: 'Transparent' } +item { name: '/m/05z87' id: 603 display_name: 'Plastic' } +item { name: '/m/0dnr7' id: 604 display_name: '(made of)Textile' } +item { name: '/m/04lbp' id: 605 display_name: '(made of)Leather' } +item { name: '/m/083vt' id: 606 display_name: 'Wooden'} +">>${INPUT_CLASS_LABELMAP} + +echo "item { name: 'at' id: 1 display_name: 'at' } +item { name: 'on' id: 2 display_name: 'on (top of)' } +item { name: 'holds' id: 3 display_name: 'holds' } +item { name: 'plays' id: 4 display_name: 'plays' } +item { name: 'interacts_with' id: 5 display_name: 'interacts with' } +item { name: 'wears' id: 6 display_name: 'wears' } +item { name: 'is' id: 7 display_name: 'is' } +item { name: 'inside_of' id: 8 display_name: 'inside of' } +item { name: 'under' id: 9 display_name: 'under' } +item { name: 'hits' id: 10 display_name: 'hits' } +"> ${INPUT_RELATIONSHIP_LABELMAP} + +python object_detection/metrics/oid_vrd_challenge_evaluation.py \ + --input_annotations_boxes=${INPUT_ANNOTATIONS_BOXES} \ + --input_annotations_labels=${INPUT_ANNOTATIONS_LABELS} \ + --input_predictions=${INPUT_PREDICTIONS} \ + --input_class_labelmap=${INPUT_CLASS_LABELMAP} \ + --input_relationship_labelmap=${INPUT_RELATIONSHIP_LABELMAP} \ + --output_metrics=${OUTPUT_METRICS} +``` + +Note that predictions file must contain the following keys: +ImageID,LabelName1,LabelName2,RelationshipLabel,Score,XMin1,XMax1,YMin1,YMax1,XMin2,XMax2,YMin2,YMax2 + +The participants of the challenge will be evaluated by weighted average of the following three metrics: + +- "VRDMetric_Relationships_mAP@0.5IOU" +- "VRDMetric_Relationships_Recall@50@0.5IOU" +- "VRDMetric_Phrases_mAP@0.5IOU" diff --git a/models/research/object_detection/g3doc/configuring_jobs.md b/models/research/object_detection/g3doc/configuring_jobs.md new file mode 100644 index 0000000000000000000000000000000000000000..c088169bc9987918ae6a9a1e81305e00c313d672 --- /dev/null +++ b/models/research/object_detection/g3doc/configuring_jobs.md @@ -0,0 +1,165 @@ +# Configuring the Object Detection Training Pipeline + +## Overview + +The Tensorflow Object Detection API uses protobuf files to configure the +training and evaluation process. The schema for the training pipeline can be +found in object_detection/protos/pipeline.proto. At a high level, the config +file is split into 5 parts: + +1. The `model` configuration. This defines what type of model will be trained +(ie. meta-architecture, feature extractor). +2. The `train_config`, which decides what parameters should be used to train +model parameters (ie. SGD parameters, input preprocessing and feature extractor +initialization values). +3. The `eval_config`, which determines what set of metrics will be reported for +evaluation. +4. The `train_input_config`, which defines what dataset the model should be +trained on. +5. The `eval_input_config`, which defines what dataset the model will be +evaluated on. Typically this should be different than the training input +dataset. + +A skeleton configuration file is shown below: + +``` +model { +(... Add model config here...) +} + +train_config : { +(... Add train_config here...) +} + +train_input_reader: { +(... Add train_input configuration here...) +} + +eval_config: { +} + +eval_input_reader: { +(... Add eval_input configuration here...) +} +``` + +## Picking Model Parameters + +There are a large number of model parameters to configure. The best settings +will depend on your given application. Faster R-CNN models are better suited to +cases where high accuracy is desired and latency is of lower priority. +Conversely, if processing time is the most important factor, SSD models are +recommended. Read [our paper](https://arxiv.org/abs/1611.10012) for a more +detailed discussion on the speed vs accuracy tradeoff. + +To help new users get started, sample model configurations have been provided +in the object_detection/samples/configs folder. The contents of these +configuration files can be pasted into `model` field of the skeleton +configuration. Users should note that the `num_classes` field should be changed +to a value suited for the dataset the user is training on. + +## Defining Inputs + +The Tensorflow Object Detection API accepts inputs in the TFRecord file format. +Users must specify the locations of both the training and evaluation files. +Additionally, users should also specify a label map, which define the mapping +between a class id and class name. The label map should be identical between +training and evaluation datasets. + +An example input configuration looks as follows: + +``` +tf_record_input_reader { + input_path: "/usr/home/username/data/train.record" +} +label_map_path: "/usr/home/username/data/label_map.pbtxt" +``` + +Users should substitute the `input_path` and `label_map_path` arguments and +insert the input configuration into the `train_input_reader` and +`eval_input_reader` fields in the skeleton configuration. Note that the paths +can also point to Google Cloud Storage buckets (ie. +"gs://project_bucket/train.record") for use on Google Cloud. + +## Configuring the Trainer + +The `train_config` defines parts of the training process: + +1. Model parameter initialization. +2. Input preprocessing. +3. SGD parameters. + +A sample `train_config` is below: + +``` +batch_size: 1 +optimizer { + momentum_optimizer: { + learning_rate: { + manual_step_learning_rate { + initial_learning_rate: 0.0002 + schedule { + step: 0 + learning_rate: .0002 + } + schedule { + step: 900000 + learning_rate: .00002 + } + schedule { + step: 1200000 + learning_rate: .000002 + } + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false +} +fine_tune_checkpoint: "/usr/home/username/tmp/model.ckpt-#####" +from_detection_checkpoint: true +load_all_detection_checkpoint_vars: true +gradient_clipping_by_norm: 10.0 +data_augmentation_options { + random_horizontal_flip { + } +} +``` + +### Model Parameter Initialization + +While optional, it is highly recommended that users utilize other object +detection checkpoints. Training an object detector from scratch can take days. +To speed up the training process, it is recommended that users re-use the +feature extractor parameters from a pre-existing image classification or +object detection checkpoint. `train_config` provides two fields to specify +pre-existing checkpoints: `fine_tune_checkpoint` and +`from_detection_checkpoint`. `fine_tune_checkpoint` should provide a path to +the pre-existing checkpoint +(ie:"/usr/home/username/checkpoint/model.ckpt-#####"). +`from_detection_checkpoint` is a boolean value. If false, it assumes the +checkpoint was from an object classification checkpoint. Note that starting +from a detection checkpoint will usually result in a faster training job than +a classification checkpoint. + +The list of provided checkpoints can be found [here](detection_model_zoo.md). + +### Input Preprocessing + +The `data_augmentation_options` in `train_config` can be used to specify +how training data can be modified. This field is optional. + +### SGD Parameters + +The remainings parameters in `train_config` are hyperparameters for gradient +descent. Please note that the optimal learning rates provided in these +configuration files may depend on the specifics of the training setup (e.g. +number of workers, gpu type). + +## Configuring the Evaluator + +The main components to set in `eval_config` are `num_examples` and +`metrics_set`. The parameter `num_examples` indicates the number of batches ( +currently of batch size 1) used for an evaluation cycle, and often is the total +size of the evaluation dataset. The parameter `metrics_set` indicates which +metrics to run during evaluation (i.e. `"coco_detection_metrics"`). diff --git a/models/research/object_detection/g3doc/context_rcnn.md b/models/research/object_detection/g3doc/context_rcnn.md new file mode 100644 index 0000000000000000000000000000000000000000..a51e4f0419e9b3de2e167746f419b859cd07e8b5 --- /dev/null +++ b/models/research/object_detection/g3doc/context_rcnn.md @@ -0,0 +1,193 @@ +# Context R-CNN + +Context R-CNN is an object detection model that uses contextual features to +improve object detection. See https://arxiv.org/abs/1912.03538 for more details. + +## Table of Contents + +* [Preparing Context Data for Context R-CNN](#preparing-context-data-for-context-r-cnn) + + [Generating TfRecords from a set of images and a COCO-CameraTraps style + JSON](#generating-tfrecords-from-a-set-of-images-and-a-coco-cameratraps-style-json) + + [Generating weakly-supervised bounding box labels for image-labeled data](#generating-weakly-supervised-bounding-box-labels-for-image-labeled-data) + + [Generating and saving contextual features for each image](#generating-and-saving-contextual-features-for-each-image) + + [Building up contextual memory banks and storing them for each context + group](#building-up-contextual-memory-banks-and-storing-them-for-each-context-group) +- [Training a Context R-CNN Model](#training-a-context-r-cnn-model) +- [Exporting a Context R-CNN Model](#exporting-a-context-r-cnn-model) + +## Preparing Context Data for Context R-CNN + +In this section, we will walk through the process of generating TfRecords with +contextual features. We focus on building context from object-centric features +generated with a pre-trained Faster R-CNN model, but you can adapt the provided +code to use alternative feature extractors. + +Each of these data processing scripts uses Apache Beam, which can be installed +using + +``` +pip install apache-beam +``` + +and can be run locally, or on a cluster for efficient processing of large +amounts of data. See the +[Apache Beam documentation](https://beam.apache.org/documentation/runners/dataflow/) +for more information. + +### Generating TfRecords from a set of images and a COCO-CameraTraps style JSON + +If your data is already stored in TfRecords, you can skip this first step. + +We assume a COCO-CameraTraps json format, as described on +[LILA.science](https://github.com/microsoft/CameraTraps/blob/master/data_management/README.md). + +COCO-CameraTraps is a format that adds static-camera-specific fields, such as a +location ID and datetime, to the well-established COCO format. To generate +appropriate context later on, be sure you have specified each contextual group +with a different location ID, which in the static camera case would be the ID of +the camera, as well as the datetime each photo was taken. We assume that empty +images will be labeled 'empty' with class id 0. + +To generate TfRecords from your database and local image folder, run + +``` +python object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py \ + --alsologtostderr \ + --output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \ + --image_directory="/path/to/image/folder/" \ + --input_annotations_file="path/to/annotations.json" +``` + +### Generating weakly-supervised bounding box labels for image-labeled data + +If all your data already has bounding box labels you can skip this step. + +Many camera trap datasets do not have bounding box labels, or only have bounding +box labels for some of the data. We have provided code to add bounding boxes +from a pretrained model (such as the +[Microsoft AI for Earth MegaDetector](https://github.com/microsoft/CameraTraps/blob/master/megadetector.md)) +and match the boxes to the image-level class label. + +To export your pretrained detection model, run + +``` +python object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/faster_rcnn_model.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory +``` + +To add bounding boxes to your dataset using the above model, run + +``` +python object_detection/dataset_tools/context_rcnn/generate_detection_data.py \ + --alsologtostderr \ + --input_tfrecord path/to/input_tfrecord@X \ + --output_tfrecord path/to/output_tfrecord@X \ + --model_dir path/to/exported_model_directory/saved_model +``` + +If an image already has bounding box labels, those labels are left unchanged. If +an image is labeled 'empty' (class ID 0), we will not generate boxes for that +image. + +### Generating and saving contextual features for each image + +We next extract and store features for each image from a pretrained model. This +model can be the same model as above, or be a class-specific detection model +trained on data from your classes of interest. + +To export your pretrained detection model, run + +``` +python object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/pipeline.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory \ + --additional_output_tensor_names detection_features +``` + +Make sure that you have set `output_final_box_features: true` within +your config file before exporting. This is needed to export the features as an +output, but it does not need to be set during training. + +To generate and save contextual features for your data, run + +``` +python object_detection/dataset_tools/context_rcnn/generate_embedding_data.py \ + --alsologtostderr \ + --embedding_input_tfrecord path/to/input_tfrecords* \ + --embedding_output_tfrecord path/to/output_tfrecords \ + --embedding_model_dir path/to/exported_model_directory/saved_model +``` + +### Building up contextual memory banks and storing them for each context group + +To build the context features you just added for each image into memory banks, +run + +``` +python object_detection/dataset_tools/context_rcnn/add_context_to_examples.py \ + --input_tfrecord path/to/input_tfrecords* \ + --output_tfrecord path/to/output_tfrecords \ + --sequence_key image/location \ + --time_horizon month +``` + +where the input_tfrecords for add_context_to_examples.py are the +output_tfrecords from generate_embedding_data.py. + +For all options, see add_context_to_examples.py. By default, this code builds +TfSequenceExamples, which are more data efficient (this allows you to store the +context features once for each context group, as opposed to once per image). If +you would like to export TfExamples instead, set flag `--output_type +tf_example`. + +If you use TfSequenceExamples, you must be sure to set `input_type: +TF_SEQUENCE_EXAMPLE` within your Context R-CNN configs for both +train_input_reader and test_input_reader. See +`object_detection/test_data/context_rcnn_camera_trap.config` +for an example. + +## Training a Context R-CNN Model + +To train a Context R-CNN model, you must first set up your config file. See +`test_data/context_rcnn_camera_trap.config` for an example. The important +difference between this config and a Faster R-CNN config is the inclusion of a +`context_config` within the model, which defines the necessary Context R-CNN +parameters. + +``` +context_config { + max_num_context_features: 2000 + context_feature_length: 2057 + } +``` + +Once your config file has been updated with your local paths, you can follow +along with documentation for running [locally](running_locally.md), or +[on the cloud](running_on_cloud.md). + +## Exporting a Context R-CNN Model + +Since Context R-CNN takes context features as well as images as input, we have +to explicitly define the other inputs ("side_inputs") to the model when +exporting, as below. This example is shown with default context feature shapes. + +``` +python export_inference_graph.py \ + --input_type image_tensor \ + --input_shape 1,-1,-1,3 \ + --pipeline_config_path /path/to/context_rcnn_model/pipeline.config \ + --trained_checkpoint_prefix /path/to/context_rcnn_model/model.ckpt \ + --output_directory /path/to/output_directory \ + --use_side_inputs True \ + --side_input_shapes 1,2000,2057/1 \ + --side_input_names context_features,valid_context_size \ + --side_input_types float,int + +``` diff --git a/models/research/object_detection/g3doc/defining_your_own_model.md b/models/research/object_detection/g3doc/defining_your_own_model.md new file mode 100644 index 0000000000000000000000000000000000000000..865f6af169bfe35a41765d91d36bbcfbac0a937a --- /dev/null +++ b/models/research/object_detection/g3doc/defining_your_own_model.md @@ -0,0 +1,137 @@ +# So you want to create a new model! + +In this section, we discuss some of the abstractions that we use +for defining detection models. If you would like to define a new model +architecture for detection and use it in the Tensorflow Detection API, +then this section should also serve as a high level guide to the files that you +will need to edit to get your new model working. + +## DetectionModels (`object_detection/core/model.py`) + +In order to be trained, evaluated, and exported for serving using our +provided binaries, all models under the Tensorflow Object Detection API must +implement the `DetectionModel` interface (see the full definition in `object_detection/core/model.py`). In particular, +each of these models are responsible for implementing 5 functions: + +* `preprocess`: Run any preprocessing (e.g., scaling/shifting/reshaping) of + input values that is necessary prior to running the detector on an input + image. +* `predict`: Produce “raw” prediction tensors that can be passed to loss or + postprocess functions. +* `postprocess`: Convert predicted output tensors to final detections. +* `loss`: Compute scalar loss tensors with respect to provided groundtruth. +* `restore`: Load a checkpoint into the Tensorflow graph. + +Given a `DetectionModel` at training time, we pass each image batch through +the following sequence of functions to compute a loss which can be optimized via +SGD: + +``` +inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor) +``` + +And at eval time, we pass each image batch through the following sequence of +functions to produce a set of detections: + +``` +inputs (images tensor) -> preprocess -> predict -> postprocess -> + outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor) +``` + +Some conventions to be aware of: + +* `DetectionModel`s should make no assumptions about the input size or aspect + ratio --- they are responsible for doing any resize/reshaping necessary + (see docstring for the `preprocess` function). +* Output classes are always integers in the range `[0, num_classes)`. + Any mapping of these integers to semantic labels is to be handled outside + of this class. We never explicitly emit a “background class” --- thus 0 is + the first non-background class and any logic of predicting and removing + implicit background classes must be handled internally by the implementation. +* Detected boxes are to be interpreted as being in + `[y_min, x_min, y_max, x_max]` format and normalized relative to the + image window. +* We do not specifically assume any kind of probabilistic interpretation of the + scores --- the only important thing is their relative ordering. Thus + implementations of the postprocess function are free to output logits, + probabilities, calibrated probabilities, or anything else. + +## Defining a new Faster R-CNN or SSD Feature Extractor + +In most cases, you probably will not implement a `DetectionModel` from scratch +--- instead you might create a new feature extractor to be used by one of the +SSD or Faster R-CNN meta-architectures. (We think of meta-architectures as +classes that define entire families of models using the `DetectionModel` +abstraction). + +Note: For the following discussion to make sense, we recommend first becoming +familiar with the [Faster R-CNN](https://arxiv.org/abs/1506.01497) paper. + +Let’s now imagine that you have invented a brand new network architecture +(say, “InceptionV100”) for classification and want to see how InceptionV100 +would behave as a feature extractor for detection (say, with Faster R-CNN). +A similar procedure would hold for SSD models, but we’ll discuss Faster R-CNN. + +To use InceptionV100, we will have to define a new +`FasterRCNNFeatureExtractor` and pass it to our `FasterRCNNMetaArch` +constructor as input. See +`object_detection/meta_architectures/faster_rcnn_meta_arch.py` for definitions +of `FasterRCNNFeatureExtractor` and `FasterRCNNMetaArch`, respectively. +A `FasterRCNNFeatureExtractor` must define a few +functions: + +* `preprocess`: Run any preprocessing of input values that is necessary prior + to running the detector on an input image. +* `_extract_proposal_features`: Extract first stage Region Proposal Network + (RPN) features. +* `_extract_box_classifier_features`: Extract second stage Box Classifier + features. +* `restore_from_classification_checkpoint_fn`: Load a checkpoint into the + Tensorflow graph. + +See the `object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py` +definition as one example. Some remarks: + +* We typically initialize the weights of this feature extractor + using those from the + [Slim Resnet-101 classification checkpoint](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models), + and we know + that images were preprocessed when training this checkpoint + by subtracting a channel mean from each input + image. Thus, we implement the preprocess function to replicate the same + channel mean subtraction behavior. +* The “full” resnet classification network defined in slim is cut into two + parts --- all but the last “resnet block” is put into the + `_extract_proposal_features` function and the final block is separately + defined in the `_extract_box_classifier_features function`. In general, + some experimentation may be required to decide on an optimal layer at + which to “cut” your feature extractor into these two pieces for Faster R-CNN. + +## Register your model for configuration + +Assuming that your new feature extractor does not require nonstandard +configuration, you will want to ideally be able to simply change the +“feature_extractor.type” fields in your configuration protos to point to a +new feature extractor. In order for our API to know how to understand this +new type though, you will first have to register your new feature +extractor with the model builder (`object_detection/builders/model_builder.py`), +whose job is to create models from config protos.. + +Registration is simple --- just add a pointer to the new Feature Extractor +class that you have defined in one of the SSD or Faster R-CNN Feature +Extractor Class maps at the top of the +`object_detection/builders/model_builder.py` file. +We recommend adding a test in `object_detection/builders/model_builder_test.py` +to make sure that parsing your proto will work as expected. + +## Taking your new model for a spin + +After registration you are ready to go with your model! Some final tips: + +* To save time debugging, try running your configuration file locally first + (both training and evaluation). +* Do a sweep of learning rates to figure out which learning rate is best + for your model. +* A small but often important detail: you may find it necessary to disable + batchnorm training (that is, load the batch norm parameters from the + classification checkpoint, but do not update them during gradient descent). diff --git a/models/research/object_detection/g3doc/detection_model_zoo.md b/models/research/object_detection/g3doc/detection_model_zoo.md new file mode 100644 index 0000000000000000000000000000000000000000..cb515b813ba8296005da503703bb659b9cb8b9dd --- /dev/null +++ b/models/research/object_detection/g3doc/detection_model_zoo.md @@ -0,0 +1,187 @@ +# Tensorflow detection model zoo + +We provide a collection of detection models pre-trained on the +[COCO dataset](http://cocodataset.org), the +[Kitti dataset](http://www.cvlibs.net/datasets/kitti/), the +[Open Images dataset](https://storage.googleapis.com/openimages/web/index.html), +the [AVA v2.1 dataset](https://research.google.com/ava/) the +[iNaturalist Species Detection Dataset](https://github.com/visipedia/inat_comp/blob/master/2017/README.md#bounding-boxes) +and the +[Snapshot Serengeti Dataset](http://lila.science/datasets/snapshot-serengeti). +These models can be useful for out-of-the-box inference if you are interested in +categories already in those datasets. They are also useful for initializing your +models when training on novel datasets. + +In the table below, we list each such pre-trained model including: + +* a model name that corresponds to a config file that was used to train this + model in the `samples/configs` directory, +* a download link to a tar.gz file containing the pre-trained model, +* model speed --- we report running time in ms per 600x600 image (including + all pre and post-processing), but please be aware that these timings depend + highly on one's specific hardware configuration (these timings were + performed using an Nvidia GeForce GTX TITAN X card) and should be treated + more as relative timings in many cases. Also note that desktop GPU timing + does not always reflect mobile run time. For example Mobilenet V2 is faster + on mobile devices than Mobilenet V1, but is slightly slower on desktop GPU. +* detector performance on subset of the COCO validation set, Open Images test + split, iNaturalist test split, or Snapshot Serengeti LILA.science test + split. as measured by the dataset-specific mAP measure. Here, higher is + better, and we only report bounding box mAP rounded to the nearest integer. +* Output types (`Boxes`, and `Masks` if applicable ) + +You can un-tar each tar.gz file via, e.g.,: + +``` +tar -xzvf ssd_mobilenet_v1_coco.tar.gz +``` + +Inside the un-tar'ed directory, you will find: + +* a graph proto (`graph.pbtxt`) +* a checkpoint (`model.ckpt.data-00000-of-00001`, `model.ckpt.index`, + `model.ckpt.meta`) +* a frozen graph proto with weights baked into the graph as constants + (`frozen_inference_graph.pb`) to be used for out of the box inference (try + this out in the Jupyter notebook!) +* a config file (`pipeline.config`) which was used to generate the graph. + These directly correspond to a config file in the + [samples/configs](https://github.com/tensorflow/models/tree/master/research/object_detection/samples/configs)) + directory but often with a modified score threshold. In the case of the + heavier Faster R-CNN models, we also provide a version of the model that + uses a highly reduced number of proposals for speed. +* Mobile model only: a TfLite file (`model.tflite`) that can be deployed on + mobile devices. + +Some remarks on frozen inference graphs: + +* If you try to evaluate the frozen graph, you may find performance numbers + for some of the models to be slightly lower than what we report in the below + tables. This is because we discard detections with scores below a threshold + (typically 0.3) when creating the frozen graph. This corresponds effectively + to picking a point on the precision recall curve of a detector (and + discarding the part past that point), which negatively impacts standard mAP + metrics. +* Our frozen inference graphs are generated using the + [v1.12.0](https://github.com/tensorflow/tensorflow/tree/v1.12.0) release + version of Tensorflow and we do not guarantee that these will work with + other versions; this being said, each frozen inference graph can be + regenerated using your current version of Tensorflow by re-running the + [exporter](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/exporting_models.md), + pointing it at the model directory as well as the corresponding config file + in + [samples/configs](https://github.com/tensorflow/models/tree/master/research/object_detection/samples/configs). + +## COCO-trained models + +Model name | Speed (ms) | COCO mAP[^1] | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :----------: | :-----: +[ssd_mobilenet_v1_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz) | 30 | 21 | Boxes +[ssd_mobilenet_v1_0.75_depth_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz) | 26 | 18 | Boxes +[ssd_mobilenet_v1_quantized_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz) | 29 | 18 | Boxes +[ssd_mobilenet_v1_0.75_depth_quantized_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_0.75_depth_quantized_300x300_coco14_sync_2018_07_18.tar.gz) | 29 | 16 | Boxes +[ssd_mobilenet_v1_ppn_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync_2018_07_03.tar.gz) | 26 | 20 | Boxes +[ssd_mobilenet_v1_fpn_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz) | 56 | 32 | Boxes +[ssd_resnet_50_fpn_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz) | 76 | 35 | Boxes +[ssd_mobilenet_v2_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz) | 31 | 22 | Boxes +[ssd_mobilenet_v2_quantized_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz) | 29 | 22 | Boxes +[ssdlite_mobilenet_v2_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz) | 27 | 22 | Boxes +[ssd_inception_v2_coco](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz) | 42 | 24 | Boxes +[faster_rcnn_inception_v2_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_v2_coco_2018_01_28.tar.gz) | 58 | 28 | Boxes +[faster_rcnn_resnet50_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz) | 89 | 30 | Boxes +[faster_rcnn_resnet50_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_lowproposals_coco_2018_01_28.tar.gz) | 64 | | Boxes +[rfcn_resnet101_coco](http://download.tensorflow.org/models/object_detection/rfcn_resnet101_coco_2018_01_28.tar.gz) | 92 | 30 | Boxes +[faster_rcnn_resnet101_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_2018_01_28.tar.gz) | 106 | 32 | Boxes +[faster_rcnn_resnet101_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_lowproposals_coco_2018_01_28.tar.gz) | 82 | | Boxes +[faster_rcnn_inception_resnet_v2_atrous_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_coco_2018_01_28.tar.gz) | 620 | 37 | Boxes +[faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco_2018_01_28.tar.gz) | 241 | | Boxes +[faster_rcnn_nas](http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_coco_2018_01_28.tar.gz) | 1833 | 43 | Boxes +[faster_rcnn_nas_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_lowproposals_coco_2018_01_28.tar.gz) | 540 | | Boxes +[mask_rcnn_inception_resnet_v2_atrous_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28.tar.gz) | 771 | 36 | Masks +[mask_rcnn_inception_v2_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz) | 79 | 25 | Masks +[mask_rcnn_resnet101_atrous_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_resnet101_atrous_coco_2018_01_28.tar.gz) | 470 | 33 | Masks +[mask_rcnn_resnet50_atrous_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_resnet50_atrous_coco_2018_01_28.tar.gz) | 343 | 29 | Masks + +Note: The asterisk (☆) at the end of model name indicates that this model +supports TPU training. + +Note: If you download the tar.gz file of quantized models and un-tar, you will +get different set of files - a checkpoint, a config file and tflite frozen +graphs (txt/binary). + +### Mobile models + +Model name | Pixel 1 Latency (ms) | COCO mAP | Outputs +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------: | :------: | :-----: +[ssd_mobiledet_cpu_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_cpu_320x320_coco_2020_05_19.tar.gz) | 113 | 24.0 | Boxes +[ssd_mobilenet_v2_mnasfpn_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_mnasfpn_shared_box_predictor_320x320_coco_sync_2020_05_18.tar.gz) | 183 | 26.6 | Boxes +[ssd_mobilenet_v3_large_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_large_coco_2020_01_14.tar.gz) | 119 | 22.6 | Boxes +[ssd_mobilenet_v3_small_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_small_coco_2020_01_14.tar.gz) | 43 | 15.4 | Boxes + +### Pixel4 Edge TPU models + +Model name | Pixel 4 Edge TPU Latency (ms) | COCO mAP (fp32/uint8) | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------- | :---------------------------: | :-------------------: | :-----: +[ssd_mobiledet_edgetpu_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19.tar.gz) | 6.9 | 25.9/25.6 | Boxes +[ssd_mobilenet_edgetpu_coco](https://storage.cloud.google.com/mobilenet_edgetpu/checkpoints/ssdlite_mobilenet_edgetpu_coco_quant.tar.gz) | 6.6 | -/24.3 | Boxes + +### Pixel4 DSP models + +Model name | Pixel 4 DSP Latency (ms) | COCO mAP (fp32/uint8) | Outputs +------------------------------------------------------------------------------------------------------------------------------------- | :----------------------: | :-------------------: | :-----: +[ssd_mobiledet_dsp_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_dsp_320x320_coco_2020_05_19.tar.gz) | 12.3 | 28.9/28.8 | Boxes + +## Kitti-trained models + +Model name | Speed (ms) | Pascal mAP@0.5 | Outputs +----------------------------------------------------------------------------------------------------------------------------------- | :--------: | :------------: | :-----: +[faster_rcnn_resnet101_kitti](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_kitti_2018_01_28.tar.gz) | 79 | 87 | Boxes + +## Open Images-trained models + +Model name | Speed (ms) | Open Images mAP@0.5[^2] | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :---------------------: | :-----: +[faster_rcnn_inception_resnet_v2_atrous_oidv2](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_2018_01_28.tar.gz) | 727 | 37 | Boxes +[faster_rcnn_inception_resnet_v2_atrous_lowproposals_oidv2](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid_2018_01_28.tar.gz) | 347 | | Boxes +[facessd_mobilenet_v2_quantized_open_image_v4](http://download.tensorflow.org/models/object_detection/facessd_mobilenet_v2_quantized_320x320_open_image_v4.tar.gz) [^3] | 20 | 73 (faces) | Boxes + +Model name | Speed (ms) | Open Images mAP@0.5[^4] | Outputs +---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :---------------------: | :-----: +[faster_rcnn_inception_resnet_v2_atrous_oidv4](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_v4_2018_12_12.tar.gz) | 425 | 54 | Boxes +[ssd_mobilenetv2_oidv4](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_oid_v4_2018_12_12.tar.gz) | 89 | 36 | Boxes +[ssd_resnet_101_fpn_oidv4](http://download.tensorflow.org/models/object_detection/ssd_resnet101_v1_fpn_shared_box_predictor_oid_512x512_sync_2019_01_20.tar.gz) | 237 | 38 | Boxes + +## iNaturalist Species-trained models + +Model name | Speed (ms) | Pascal mAP@0.5 | Outputs +--------------------------------------------------------------------------------------------------------------------------------- | :--------: | :------------: | :-----: +[faster_rcnn_resnet101_fgvc](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_fgvc_2018_07_19.tar.gz) | 395 | 58 | Boxes +[faster_rcnn_resnet50_fgvc](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_fgvc_2018_07_19.tar.gz) | 366 | 55 | Boxes + +## AVA v2.1 trained models + +Model name | Speed (ms) | Pascal mAP@0.5 | Outputs +----------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :------------: | :-----: +[faster_rcnn_resnet101_ava_v2.1](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_ava_v2.1_2018_04_30.tar.gz) | 93 | 11 | Boxes + +## Snapshot Serengeti Camera Trap trained models + +Model name | COCO mAP@0.5 | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------: | :-----: +[faster_rcnn_resnet101_snapshot_serengeti](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz) | 38 | Boxes +[context_rcnn_resnet101_snapshot_serengeti](http://download.tensorflow.org/models/object_detection/context_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz) | 56 | Boxes + +[^1]: See [MSCOCO evaluation protocol](http://cocodataset.org/#detections-eval). + The COCO mAP numbers here are evaluated on COCO 14 minival set (note that + our split is different from COCO 17 Val). A full list of image ids used in + our split could be fould + [here](https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_minival_ids.txt). +[^2]: This is PASCAL mAP with a slightly different way of true positives + computation: see + [Open Images evaluation protocols](evaluation_protocols.md), + oid_V2_detection_metrics. +[^3]: Non-face boxes are dropped during training and non-face groundtruth boxes + are ignored when evaluating. +[^4]: This is Open Images Challenge metric: see + [Open Images evaluation protocols](evaluation_protocols.md), + oid_challenge_detection_metrics. diff --git a/models/research/object_detection/g3doc/evaluation_protocols.md b/models/research/object_detection/g3doc/evaluation_protocols.md new file mode 100644 index 0000000000000000000000000000000000000000..e431fa7233ee199443dcaf5f2b7840c5bb20cd99 --- /dev/null +++ b/models/research/object_detection/g3doc/evaluation_protocols.md @@ -0,0 +1,163 @@ +# Supported object detection evaluation protocols + +The Tensorflow Object Detection API currently supports three evaluation protocols, +that can be configured in `EvalConfig` by setting `metrics_set` to the +corresponding value. + +## PASCAL VOC 2010 detection metric + +`EvalConfig.metrics_set='pascal_voc_detection_metrics'` + +The commonly used mAP metric for evaluating the quality of object detectors, +computed according to the protocol of the PASCAL VOC Challenge 2010-2012. The +protocol is available +[here](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/devkit_doc_08-May-2010.pdf). + +## Weighted PASCAL VOC detection metric + +`EvalConfig.metrics_set='weighted_pascal_voc_detection_metrics'` + +The weighted PASCAL metric computes the mean average precision as the average +precision when treating all classes as a single class. In comparison, +PASCAL metrics computes the mean average precision as the mean of the +per-class average precisions. + +For example, the test set consists of two classes, "cat" and "dog", and there +are ten times more boxes of "cat" than those of "dog". According to PASCAL VOC +2010 metric, performance on each of the two classes would contribute equally +towards the final mAP value, while for the Weighted PASCAL VOC metric the final +mAP value will be influenced by frequency of each class. + +## PASCAL VOC 2010 instance segmentation metric + +`EvalConfig.metrics_set='pascal_voc_instance_segmentation_metrics'` + +Similar to Pascal VOC 2010 detection metric, but computes the intersection over +union based on the object masks instead of object boxes. + +## Weighted PASCAL VOC instance segmentation metric + +`EvalConfig.metrics_set='weighted_pascal_voc_instance_segmentation_metrics'` + +Similar to the weighted pascal voc 2010 detection metric, but computes the +intersection over union based on the object masks instead of object boxes. + + +## COCO detection metrics + +`EvalConfig.metrics_set='coco_detection_metrics'` + +The COCO metrics are the official detection metrics used to score the +[COCO competition](http://cocodataset.org/) and are similar to Pascal VOC +metrics but have a slightly different implementation and report additional +statistics such as mAP at IOU thresholds of .5:.95, and precision/recall +statistics for small, medium, and large objects. +See the +[pycocotools](https://github.com/cocodataset/cocoapi/tree/master/PythonAPI) +repository for more details. + +## COCO mask metrics + +`EvalConfig.metrics_set='coco_mask_metrics'` + +Similar to the COCO detection metrics, but computes the +intersection over union based on the object masks instead of object boxes. + +## Open Images V2 detection metric + +`EvalConfig.metrics_set='oid_V2_detection_metrics'` + +This metric is defined originally for evaluating detector performance on [Open +Images V2 dataset](https://github.com/openimages/dataset) and is fairly similar +to the PASCAL VOC 2010 metric mentioned above. It computes interpolated average +precision (AP) for each class and averages it among all classes (mAP). + +The difference to the PASCAL VOC 2010 metric is the following: Open Images +annotations contain `group-of` ground-truth boxes (see [Open Images data +description](https://github.com/openimages/dataset#annotations-human-bboxcsv)), +that are treated differently for the purpose of deciding whether detections are +"true positives", "ignored", "false positives". Here we define these three +cases: + +A detection is a "true positive" if there is a non-group-of ground-truth box, +such that: + +* The detection box and the ground-truth box are of the same class, and + intersection-over-union (IoU) between the detection box and the ground-truth + box is greater than the IoU threshold (default value 0.5). \ + Illustration of handling non-group-of boxes: \ + ![alt + groupof_case_eval](img/nongroupof_case_eval.png "illustration of handling non-group-of boxes: yellow box - ground truth bounding box; green box - true positive; red box - false positives.") + + * yellow box - ground-truth box; + * green box - true positive; + * red boxes - false positives. + +* This is the highest scoring detection for this ground truth box that + satisfies the criteria above. + +A detection is "ignored" if it is not a true positive, and there is a `group-of` +ground-truth box such that: + +* The detection box and the ground-truth box are of the same class, and the + area of intersection between the detection box and the ground-truth box + divided by the area of the detection is greater than 0.5. This is intended + to measure whether the detection box is approximately inside the group-of + ground-truth box. \ + Illustration of handling `group-of` boxes: \ + ![alt + groupof_case_eval](img/groupof_case_eval.png "illustration of handling group-of boxes: yellow box - ground truth bounding box; grey boxes - two detections of cars, that are ignored; red box - false positive.") + + * yellow box - ground-truth box; + * grey boxes - two detections on cars, that are ignored; + * red box - false positive. + +A detection is a "false positive" if it is neither a "true positive" nor +"ignored". + +Precision and recall are defined as: + +* Precision = number-of-true-positives/(number-of-true-positives + number-of-false-positives) +* Recall = number-of-true-positives/number-of-non-group-of-boxes + +Note that detections ignored as firing on a `group-of` ground-truth box do not +contribute to the number of true positives. + +The labels in Open Images are organized in a +[hierarchy](https://storage.googleapis.com/openimages/2017_07/bbox_labels_vis/bbox_labels_vis.html). +Ground-truth bounding-boxes are annotated with the most specific class available +in the hierarchy. For example, "car" has two children "limousine" and "van". Any +other kind of car is annotated as "car" (for example, a sedan). Given this +convention, the evaluation software treats all classes independently, ignoring +the hierarchy. To achieve high performance values, object detectors should +output bounding-boxes labelled in the same manner. + +The old metric name is DEPRECATED. +`EvalConfig.metrics_set='open_images_V2_detection_metrics'` + +## OID Challenge Object Detection Metric + +`EvalConfig.metrics_set='oid_challenge_detection_metrics'` + +The metric for the OID Challenge Object Detection Metric 2018/2019 Object +Detection track. The description is provided on the +[Open Images Challenge website](https://storage.googleapis.com/openimages/web/evaluation.html#object_detection_eval). + +The old metric name is DEPRECATED. +`EvalConfig.metrics_set='oid_challenge_object_detection_metrics'` + +## OID Challenge Visual Relationship Detection Metric + +The metric for the OID Challenge Visual Relationship Detection Metric 2018,2019 +Visual Relationship Detection track. The description is provided on the +[Open Images Challenge website](https://storage.googleapis.com/openimages/web/evaluation.html#visual_relationships_eval). +Note: this is currently a stand-alone metric, that can be used only through the +`metrics/oid_vrd_challenge_evaluation.py` util. + +## OID Challenge Instance Segmentation Metric + +`EvalConfig.metrics_set='oid_challenge_segmentation_metrics'` + +The metric for the OID Challenge Instance Segmentation Metric 2019, Instance +Segmentation track. The description is provided on the +[Open Images Challenge website](https://storage.googleapis.com/openimages/web/evaluation.html#instance_segmentation_eval). diff --git a/models/research/object_detection/g3doc/exporting_models.md b/models/research/object_detection/g3doc/exporting_models.md new file mode 100644 index 0000000000000000000000000000000000000000..c64408302e961be5f4b5de9767c7df3cb021601e --- /dev/null +++ b/models/research/object_detection/g3doc/exporting_models.md @@ -0,0 +1,36 @@ +# Exporting a trained model for inference + +After your model has been trained, you should export it to a Tensorflow +graph proto. A checkpoint will typically consist of three files: + +* model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001 +* model.ckpt-${CHECKPOINT_NUMBER}.index +* model.ckpt-${CHECKPOINT_NUMBER}.meta + +After you've identified a candidate checkpoint to export, run the following +command from tensorflow/models/research: + +``` bash +# From tensorflow/models/research/ +INPUT_TYPE=image_tensor +PIPELINE_CONFIG_PATH={path to pipeline config file} +TRAINED_CKPT_PREFIX={path to model.ckpt} +EXPORT_DIR={path to folder that will be used for export} +python object_detection/export_inference_graph.py \ + --input_type=${INPUT_TYPE} \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --trained_checkpoint_prefix=${TRAINED_CKPT_PREFIX} \ + --output_directory=${EXPORT_DIR} +``` + +NOTE: We are configuring our exported model to ingest 4-D image tensors. We can +also configure the exported model to take encoded images or serialized +`tf.Example`s. + +After export, you should see the directory ${EXPORT_DIR} containing the following: + +* saved_model/, a directory containing the saved model format of the exported model +* frozen_inference_graph.pb, the frozen graph format of the exported model +* model.ckpt.*, the model checkpoints used for exporting +* checkpoint, a file specifying to restore included checkpoint files +* pipeline.config, pipeline config file for the exported model diff --git a/models/research/object_detection/g3doc/faq.md b/models/research/object_detection/g3doc/faq.md new file mode 100644 index 0000000000000000000000000000000000000000..c0ca503fc6e7eae40192b97b8de2578d74e1b7f9 --- /dev/null +++ b/models/research/object_detection/g3doc/faq.md @@ -0,0 +1,27 @@ +# Frequently Asked Questions + +## Q: How can I ensure that all the groundtruth boxes are used during train and eval? +A: For the object detecion framework to be TPU-complient, we must pad our input +tensors to static shapes. This means that we must pad to a fixed number of +bounding boxes, configured by `InputReader.max_number_of_boxes`. It is +important to set this value to a number larger than the maximum number of +groundtruth boxes in the dataset. If an image is encountered with more +bounding boxes, the excess boxes will be clipped. + +## Q: AttributeError: 'module' object has no attribute 'BackupHandler' +A: This BackupHandler (tf_slim.tfexample_decoder.BackupHandler) was +introduced in tensorflow 1.5.0 so runing with earlier versions may cause this +issue. It now has been replaced by +object_detection.data_decoders.tf_example_decoder.BackupHandler. Whoever sees +this issue should be able to resolve it by syncing your fork to HEAD. +Same for LookupTensor. + +## Q: AttributeError: 'module' object has no attribute 'LookupTensor' +A: Similar to BackupHandler, syncing your fork to HEAD should make it work. + +## Q: Why can't I get the inference time as reported in model zoo? +A: The inference time reported in model zoo is mean time of testing hundreds of +images with an internal machine. As mentioned in +[Tensorflow detection model zoo](detection_model_zoo.md), this speed depends +highly on one's specific hardware configuration and should be treated more as +relative timing. diff --git a/models/research/object_detection/g3doc/img/dogs_detections_output.jpg b/models/research/object_detection/g3doc/img/dogs_detections_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e88a7010fa90f5c4a74f6caee78f5c975f77e40 Binary files /dev/null and b/models/research/object_detection/g3doc/img/dogs_detections_output.jpg differ diff --git a/models/research/object_detection/g3doc/img/example_cat.jpg b/models/research/object_detection/g3doc/img/example_cat.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74c7ef4b0849ce1b1f3b8061f172cb98ce06ef5e Binary files /dev/null and b/models/research/object_detection/g3doc/img/example_cat.jpg differ diff --git a/models/research/object_detection/g3doc/img/groupof_case_eval.png b/models/research/object_detection/g3doc/img/groupof_case_eval.png new file mode 100644 index 0000000000000000000000000000000000000000..5abc9b6984fb5816ca4f2e6f40e38ec6e6ea9cfc Binary files /dev/null and b/models/research/object_detection/g3doc/img/groupof_case_eval.png differ diff --git a/models/research/object_detection/g3doc/img/kites_detections_output.jpg b/models/research/object_detection/g3doc/img/kites_detections_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c0f3364deda6614b5bf6fdddad7e7a578f0f6eb Binary files /dev/null and b/models/research/object_detection/g3doc/img/kites_detections_output.jpg differ diff --git a/models/research/object_detection/g3doc/img/kites_with_segment_overlay.png b/models/research/object_detection/g3doc/img/kites_with_segment_overlay.png new file mode 100644 index 0000000000000000000000000000000000000000..45b0c50cbb5581ceb5c954ff613d43923cb0c83a --- /dev/null +++ b/models/research/object_detection/g3doc/img/kites_with_segment_overlay.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e80c2c2a057d53b749b5430b786f23e826f66e411d8fc13448bfb770c71ad3ac +size 7165291 diff --git a/models/research/object_detection/g3doc/img/nongroupof_case_eval.png b/models/research/object_detection/g3doc/img/nongroupof_case_eval.png new file mode 100644 index 0000000000000000000000000000000000000000..cbb76f493adfa725cd0b2ab323f89fdfc57a57ec Binary files /dev/null and b/models/research/object_detection/g3doc/img/nongroupof_case_eval.png differ diff --git a/models/research/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg b/models/research/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e9412ad545c0a1e1e7dcfa35a168c2a61cf2012 Binary files /dev/null and b/models/research/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg differ diff --git a/models/research/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg b/models/research/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46b1fb282a428fe1169a7ff1d30e963bc085e733 Binary files /dev/null and b/models/research/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg differ diff --git a/models/research/object_detection/g3doc/img/oxford_pet.png b/models/research/object_detection/g3doc/img/oxford_pet.png new file mode 100644 index 0000000000000000000000000000000000000000..ddac415f5ef079f8d6fde8dd4c9838735fd96325 Binary files /dev/null and b/models/research/object_detection/g3doc/img/oxford_pet.png differ diff --git a/models/research/object_detection/g3doc/img/tensorboard.png b/models/research/object_detection/g3doc/img/tensorboard.png new file mode 100644 index 0000000000000000000000000000000000000000..fbcdbeb38cf5594681c0e206a08b6d06bd1e86a9 Binary files /dev/null and b/models/research/object_detection/g3doc/img/tensorboard.png differ diff --git a/models/research/object_detection/g3doc/img/tensorboard2.png b/models/research/object_detection/g3doc/img/tensorboard2.png new file mode 100644 index 0000000000000000000000000000000000000000..97ad22daa11870ecebbbe7cadfb2d8bb30d738f6 Binary files /dev/null and b/models/research/object_detection/g3doc/img/tensorboard2.png differ diff --git a/models/research/object_detection/g3doc/img/tf-od-api-logo.png b/models/research/object_detection/g3doc/img/tf-od-api-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..9fa9cc9dba228c1effabfa5c1474052ed8bad3fd Binary files /dev/null and b/models/research/object_detection/g3doc/img/tf-od-api-logo.png differ diff --git a/models/research/object_detection/g3doc/installation.md b/models/research/object_detection/g3doc/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..05c891802af0dbc74adcb9337a8e435562877e28 --- /dev/null +++ b/models/research/object_detection/g3doc/installation.md @@ -0,0 +1,184 @@ +# Installation + +## Dependencies + +Tensorflow Object Detection API depends on the following libraries: + +* Protobuf 3.0.0 +* Python-tk +* Pillow 1.0 +* lxml +* tf-slim (https://github.com/google-research/tf-slim) +* slim (which is included in the "tensorflow/models/research/" checkout) +* Jupyter notebook +* Matplotlib +* Tensorflow (1.15.0) +* Cython +* contextlib2 +* cocoapi + +For detailed steps to install Tensorflow, follow the [Tensorflow installation +instructions](https://www.tensorflow.org/install/). A typical user can install +Tensorflow using one of the following commands: + +``` bash +# For CPU +pip install tensorflow +# For GPU +pip install tensorflow-gpu +``` + +The remaining libraries can be installed on Ubuntu 16.04 using via apt-get: + +```bash +sudo apt-get install protobuf-compiler python-pil python-lxml python-tk +pip install --user Cython +pip install --user contextlib2 +pip install --user jupyter +pip install --user matplotlib +pip install --user tf_slim +``` + +Alternatively, users can install dependencies using pip: + +```bash +pip install --user Cython +pip install --user contextlib2 +pip install --user pillow +pip install --user lxml +pip install --user jupyter +pip install --user matplotlib +pip install --user tf_slim +``` + + +**Note**: sometimes "sudo apt-get install protobuf-compiler" will install +Protobuf 3+ versions for you and some users have issues when using 3.5. +If that is your case, try the [manual](#Manual-protobuf-compiler-installation-and-usage) installation. + +## Download the tensorflow/models repository + +```bash +git clone https://github.com/tensorflow/models.git +``` + +To use this library, you need to download this repository, whenever it says +`` it will be referring to the folder that you downloaded +this repository into. + +## COCO API installation + +Download the +[cocoapi](https://github.com/cocodataset/cocoapi) and +copy the pycocotools subfolder to the tensorflow/models/research directory if +you are interested in using COCO evaluation metrics. The default metrics are +based on those used in Pascal VOC evaluation. To use the COCO object detection +metrics add `metrics_set: "coco_detection_metrics"` to the `eval_config` message +in the config file. To use the COCO instance segmentation metrics add +`metrics_set: "coco_mask_metrics"` to the `eval_config` message in the config +file. + +```bash +git clone https://github.com/cocodataset/cocoapi.git +cd cocoapi/PythonAPI +make +cp -r pycocotools /models/research/ +``` + +Alternatively, users can install `pycocotools` using pip: + +```bash +pip install --user pycocotools +``` + +## Protobuf Compilation + +The Tensorflow Object Detection API uses Protobufs to configure model and +training parameters. Before the framework can be used, the Protobuf libraries +must be compiled. This should be done by running the following command from +the [tensorflow/models/research/ +](https://github.com/tensorflow/models/tree/master/research/) +directory: + + +``` bash +# From tensorflow/models/research/ +protoc object_detection/protos/*.proto --python_out=. +``` + +**Note**: If you're getting errors while compiling, you might be using an incompatible protobuf compiler. If that's the case, use the following manual installation + +## Manual protobuf-compiler installation and usage + +**If you are on linux:** + +Download and install the 3.0 release of protoc, then unzip the file. + +```bash +# From tensorflow/models/research/ +wget -O protobuf.zip https://github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip +unzip protobuf.zip +``` + +Run the compilation process again, but use the downloaded version of protoc + +```bash +# From tensorflow/models/research/ +./bin/protoc object_detection/protos/*.proto --python_out=. +``` + +**If you are on MacOS:** + +If you have homebrew, download and install the protobuf with +```brew install protobuf``` + +Alternately, run: +```PROTOC_ZIP=protoc-3.3.0-osx-x86_64.zip +curl -OL https://github.com/google/protobuf/releases/download/v3.3.0/$PROTOC_ZIP +sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc +rm -f $PROTOC_ZIP +``` + +Run the compilation process again: + +``` bash +# From tensorflow/models/research/ +protoc object_detection/protos/*.proto --python_out=. +``` + +## Add Libraries to PYTHONPATH + +When running locally, the tensorflow/models/research/ and slim directories +should be appended to PYTHONPATH. This can be done by running the following from +tensorflow/models/research/: + + +``` bash +# From tensorflow/models/research/ +export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim +``` + +Note: This command needs to run from every new terminal you start. If you wish +to avoid running this manually, you can add it as a new line to the end of your +~/.bashrc file, replacing \`pwd\` with the absolute path of +tensorflow/models/research on your system. After updating ~/.bashrc file you +can run the following command: + +Note: Some of the functions defined in tensorflow/models/research/slim has been +moved to [tf-slim](https://github.com/google-research/tf-slim), so installing +tf_slim is required now. + +``` bash +source ~/.bashrc +``` + +# Testing the Installation + +You can test that you have correctly installed the Tensorflow Object Detection\ +API by running the following command: + + +```bash +# If using Tensorflow 1.X: +python object_detection/builders/model_builder_tf1_test.py +``` diff --git a/models/research/object_detection/g3doc/instance_segmentation.md b/models/research/object_detection/g3doc/instance_segmentation.md new file mode 100644 index 0000000000000000000000000000000000000000..8ebf7d8c3d7329b95b81fbb14d272f6b0134e138 --- /dev/null +++ b/models/research/object_detection/g3doc/instance_segmentation.md @@ -0,0 +1,105 @@ +## Run an Instance Segmentation Model + +For some applications it isn't adequate enough to localize an object with a +simple bounding box. For instance, you might want to segment an object region +once it is detected. This class of problems is called **instance segmentation**. + +

+ +

+ +### Materializing data for instance segmentation {#materializing-instance-seg} + +Instance segmentation is an extension of object detection, where a binary mask +(i.e. object vs. background) is associated with every bounding box. This allows +for more fine-grained information about the extent of the object within the box. +To train an instance segmentation model, a groundtruth mask must be supplied for +every groundtruth bounding box. In additional to the proto fields listed in the +section titled [Using your own dataset](using_your_own_dataset.md), one must +also supply `image/object/mask`, which can either be a repeated list of +single-channel encoded PNG strings, or a single dense 3D binary tensor where +masks corresponding to each object are stacked along the first dimension. Each +is described in more detail below. + +#### PNG Instance Segmentation Masks + +Instance segmentation masks can be supplied as serialized PNG images. + +```shell +image/object/mask = ["\x89PNG\r\n\x1A\n\x00\x00\x00\rIHDR\...", ...] +``` + +These masks are whole-image masks, one for each object instance. The spatial +dimensions of each mask must agree with the image. Each mask has only a single +channel, and the pixel values are either 0 (background) or 1 (object mask). +**PNG masks are the preferred parameterization since they offer considerable +space savings compared to dense numerical masks.** + +#### Dense Numerical Instance Segmentation Masks + +Masks can also be specified via a dense numerical tensor. + +```shell +image/object/mask = [0.0, 0.0, 1.0, 1.0, 0.0, ...] +``` + +For an image with dimensions `H` x `W` and `num_boxes` groundtruth boxes, the +mask corresponds to a [`num_boxes`, `H`, `W`] float32 tensor, flattened into a +single vector of shape `num_boxes` * `H` * `W`. In TensorFlow, examples are read +in row-major format, so the elements are organized as: + +```shell +... mask 0 row 0 ... mask 0 row 1 ... // ... mask 0 row H-1 ... mask 1 row 0 ... +``` + +where each row has W contiguous binary values. + +To see an example tf-records with mask labels, see the examples under the +[Preparing Inputs](preparing_inputs.md) section. + +### Pre-existing config files + +We provide four instance segmentation config files that you can use to train +your own models: + +1. mask_rcnn_inception_resnet_v2_atrous_coco +1. mask_rcnn_resnet101_atrous_coco +1. mask_rcnn_resnet50_atrous_coco +1. mask_rcnn_inception_v2_coco + +For more details see the [detection model zoo](detection_model_zoo.md). + +### Updating a Faster R-CNN config file + +Currently, the only supported instance segmentation model is [Mask +R-CNN](https://arxiv.org/abs/1703.06870), which requires Faster R-CNN as the +backbone object detector. + +Once you have a baseline Faster R-CNN pipeline configuration, you can make the +following modifications in order to convert it into a Mask R-CNN model. + +1. Within `train_input_reader` and `eval_input_reader`, set + `load_instance_masks` to `True`. If using PNG masks, set `mask_type` to + `PNG_MASKS`, otherwise you can leave it as the default 'NUMERICAL_MASKS'. +1. Within the `faster_rcnn` config, use a `MaskRCNNBoxPredictor` as the + `second_stage_box_predictor`. +1. Within the `MaskRCNNBoxPredictor` message, set `predict_instance_masks` to + `True`. You must also define `conv_hyperparams`. +1. Within the `faster_rcnn` message, set `number_of_stages` to `3`. +1. Add instance segmentation metrics to the set of metrics: + `'coco_mask_metrics'`. +1. Update the `input_path`s to point at your data. + +Please refer to the section on [Running the pets dataset](running_pets.md) for +additional details. + +> Note: The mask prediction branch consists of a sequence of convolution layers. +> You can set the number of convolution layers and their depth as follows: +> +> 1. Within the `MaskRCNNBoxPredictor` message, set the +> `mask_prediction_conv_depth` to your value of interest. The default value +> is 256. If you set it to `0` (recommended), the depth is computed +> automatically based on the number of classes in the dataset. +> 1. Within the `MaskRCNNBoxPredictor` message, set the +> `mask_prediction_num_conv_layers` to your value of interest. The default +> value is 2. diff --git a/models/research/object_detection/g3doc/oid_inference_and_evaluation.md b/models/research/object_detection/g3doc/oid_inference_and_evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..4babf10a2768a871191a645e85d00779ee7b22a9 --- /dev/null +++ b/models/research/object_detection/g3doc/oid_inference_and_evaluation.md @@ -0,0 +1,257 @@ +# Inference and evaluation on the Open Images dataset + +This page presents a tutorial for running object detector inference and +evaluation measure computations on the [Open Images +dataset](https://github.com/openimages/dataset), using tools from the +[TensorFlow Object Detection +API](https://github.com/tensorflow/models/tree/master/research/object_detection). +It shows how to download the images and annotations for the validation and test +sets of Open Images; how to package the downloaded data in a format understood +by the Object Detection API; where to find a trained object detector model for +Open Images; how to run inference; and how to compute evaluation measures on the +inferred detections. + +Inferred detections will look like the following: + +![](img/oid_bus_72e19c28aac34ed8.jpg) +![](img/oid_monkey_3b4168c89cecbc5b.jpg) + +On the validation set of Open Images, this tutorial requires 27GB of free disk +space and the inference step takes approximately 9 hours on a single NVIDIA +Tesla P100 GPU. On the test set -- 75GB and 27 hours respectively. All other +steps require less than two hours in total on both sets. + +## Installing TensorFlow, the Object Detection API, and Google Cloud SDK + +Please run through the [installation instructions](installation.md) to install +TensorFlow and all its dependencies. Ensure the Protobuf libraries are compiled +and the library directories are added to `PYTHONPATH`. You will also need to +`pip` install `pandas` and `contextlib2`. + +Some of the data used in this tutorial lives in Google Cloud buckets. To access +it, you will have to [install the Google Cloud +SDK](https://cloud.google.com/sdk/downloads) on your workstation or laptop. + +## Preparing the Open Images validation and test sets + +In order to run inference and subsequent evaluation measure computations, we +require a dataset of images and ground truth boxes, packaged as TFRecords of +TFExamples. To create such a dataset for Open Images, you will need to first +download ground truth boxes from the [Open Images +website](https://github.com/openimages/dataset): + +```bash +# From tensorflow/models/research +mkdir oid +cd oid +wget https://storage.googleapis.com/openimages/2017_07/annotations_human_bbox_2017_07.tar.gz +tar -xvf annotations_human_bbox_2017_07.tar.gz +``` + +Next, download the images. In this tutorial, we will use lower resolution images +provided by [CVDF](http://www.cvdfoundation.org). Please follow the instructions +on [CVDF's Open Images repository +page](https://github.com/cvdfoundation/open-images-dataset) in order to gain +access to the cloud bucket with the images. Then run: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # Set SPLIT to "test" to download the images in the test set +mkdir raw_images_${SPLIT} +gsutil -m rsync -r gs://open-images-dataset/$SPLIT raw_images_${SPLIT} +``` + +Another option for downloading the images is to follow the URLs contained in the +[image URLs and metadata CSV +files](https://storage.googleapis.com/openimages/2017_07/images_2017_07.tar.gz) +on the Open Images website. + +At this point, your `tensorflow/models/research/oid` directory should appear as +follows: + +```lang-none +|-- 2017_07 +| |-- test +| | `-- annotations-human-bbox.csv +| |-- train +| | `-- annotations-human-bbox.csv +| `-- validation +| `-- annotations-human-bbox.csv +|-- raw_images_validation (if you downloaded the validation split) +| `-- ... (41,620 files matching regex "[0-9a-f]{16}.jpg") +|-- raw_images_test (if you downloaded the test split) +| `-- ... (125,436 files matching regex "[0-9a-f]{16}.jpg") +`-- annotations_human_bbox_2017_07.tar.gz +``` + +Next, package the data into TFRecords of TFExamples by running: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # Set SPLIT to "test" to create TFRecords for the test split +mkdir ${SPLIT}_tfrecords + +PYTHONPATH=$PYTHONPATH:$(readlink -f ..) \ +python -m object_detection/dataset_tools/create_oid_tf_record \ + --input_box_annotations_csv 2017_07/$SPLIT/annotations-human-bbox.csv \ + --input_images_directory raw_images_${SPLIT} \ + --input_label_map ../object_detection/data/oid_bbox_trainable_label_map.pbtxt \ + --output_tf_record_path_prefix ${SPLIT}_tfrecords/$SPLIT.tfrecord \ + --num_shards=100 +``` + +To add image-level labels, use the `--input_image_label_annotations_csv` flag. + +This results in 100 TFRecord files (shards), written to +`oid/${SPLIT}_tfrecords`, with filenames matching +`${SPLIT}.tfrecord-000[0-9][0-9]-of-00100`. Each shard contains approximately +the same number of images and is defacto a representative random sample of the +input data. [This enables](#accelerating_inference) a straightforward work +division scheme for distributing inference and also approximate measure +computations on subsets of the validation and test sets. + +## Inferring detections + +Inference requires a trained object detection model. In this tutorial we will +use a model from the [detections model zoo](detection_model_zoo.md), which can +be downloaded and unpacked by running the commands below. More information about +the model, such as its architecture and how it was trained, is available in the +[model zoo page](detection_model_zoo.md). + +```bash +# From tensorflow/models/research/oid +wget http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_14_10_2017.tar.gz +tar -zxvf faster_rcnn_inception_resnet_v2_atrous_oid_14_10_2017.tar.gz +``` + +At this point, data is packed into TFRecords and we have an object detector +model. We can run inference using: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test +TF_RECORD_FILES=$(ls -1 ${SPLIT}_tfrecords/* | tr '\n' ',') + +PYTHONPATH=$PYTHONPATH:$(readlink -f ..) \ +python -m object_detection/inference/infer_detections \ + --input_tfrecord_paths=$TF_RECORD_FILES \ + --output_tfrecord_path=${SPLIT}_detections.tfrecord-00000-of-00001 \ + --inference_graph=faster_rcnn_inception_resnet_v2_atrous_oid/frozen_inference_graph.pb \ + --discard_image_pixels +``` + +Inference preserves all fields of the input TFExamples, and adds new fields to +store the inferred detections. This allows [computing evaluation +measures](#computing-evaluation-measures) on the output TFRecord alone, as +groundtruth boxes are preserved as well. Since measure computations don't +require access to the images, `infer_detections` can optionally discard them +with the `--discard_image_pixels` flag. Discarding the images drastically +reduces the size of the output TFRecord. + +### Accelerating inference + +Running inference on the whole validation or test set can take a long time to +complete due to the large number of images present in these sets (41,620 and +125,436 respectively). For quick but approximate evaluation, inference and the +subsequent measure computations can be run on a small number of shards. To run +for example on 2% of all the data, it is enough to set `TF_RECORD_FILES` as +shown below before running `infer_detections`: + +```bash +TF_RECORD_FILES=$(ls ${SPLIT}_tfrecords/${SPLIT}.tfrecord-0000[0-1]-of-00100 | tr '\n' ',') +``` + +Please note that computing evaluation measures on a small subset of the data +introduces variance and bias, since some classes of objects won't be seen during +evaluation. In the example above, this leads to 13.2% higher mAP on the first +two shards of the validation set compared to the mAP for the full set ([see mAP +results](#expected-maps)). + +Another way to accelerate inference is to run it in parallel on multiple +TensorFlow devices on possibly multiple machines. The script below uses +[tmux](https://github.com/tmux/tmux/wiki) to run a separate `infer_detections` +process for each GPU on different partition of the input data. + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test +NUM_GPUS=4 +NUM_SHARDS=100 + +tmux new-session -d -s "inference" +function tmux_start { tmux new-window -d -n "inference:GPU$1" "${*:2}; exec bash"; } +for gpu_index in $(seq 0 $(($NUM_GPUS-1))); do + start_shard=$(( $gpu_index * $NUM_SHARDS / $NUM_GPUS )) + end_shard=$(( ($gpu_index + 1) * $NUM_SHARDS / $NUM_GPUS - 1)) + TF_RECORD_FILES=$(seq -s, -f "${SPLIT}_tfrecords/${SPLIT}.tfrecord-%05.0f-of-$(printf '%05d' $NUM_SHARDS)" $start_shard $end_shard) + tmux_start ${gpu_index} \ + PYTHONPATH=$PYTHONPATH:$(readlink -f ..) CUDA_VISIBLE_DEVICES=$gpu_index \ + python -m object_detection/inference/infer_detections \ + --input_tfrecord_paths=$TF_RECORD_FILES \ + --output_tfrecord_path=${SPLIT}_detections.tfrecord-$(printf "%05d" $gpu_index)-of-$(printf "%05d" $NUM_GPUS) \ + --inference_graph=faster_rcnn_inception_resnet_v2_atrous_oid/frozen_inference_graph.pb \ + --discard_image_pixels +done +``` + +After all `infer_detections` processes finish, `tensorflow/models/research/oid` +will contain one output TFRecord from each process, with name matching +`validation_detections.tfrecord-0000[0-3]-of-00004`. + +## Computing evaluation measures + +To compute evaluation measures on the inferred detections you first need to +create the appropriate configuration files: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test +NUM_SHARDS=1 # Set to NUM_GPUS if using the parallel evaluation script above + +mkdir -p ${SPLIT}_eval_metrics + +echo " +label_map_path: '../object_detection/data/oid_bbox_trainable_label_map.pbtxt' +tf_record_input_reader: { input_path: '${SPLIT}_detections.tfrecord@${NUM_SHARDS}' } +" > ${SPLIT}_eval_metrics/${SPLIT}_input_config.pbtxt + +echo " +metrics_set: 'oid_V2_detection_metrics' +" > ${SPLIT}_eval_metrics/${SPLIT}_eval_config.pbtxt +``` + +And then run: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test + +PYTHONPATH=$PYTHONPATH:$(readlink -f ..) \ +python -m object_detection/metrics/offline_eval_map_corloc \ + --eval_dir=${SPLIT}_eval_metrics \ + --eval_config_path=${SPLIT}_eval_metrics/${SPLIT}_eval_config.pbtxt \ + --input_config_path=${SPLIT}_eval_metrics/${SPLIT}_input_config.pbtxt +``` + +The first configuration file contains an `object_detection.protos.InputReader` +message that describes the location of the necessary input files. The second +file contains an `object_detection.protos.EvalConfig` message that describes the +evaluation metric. For more information about these protos see the corresponding +source files. + +### Expected mAPs + +The result of running `offline_eval_map_corloc` is a CSV file located at +`${SPLIT}_eval_metrics/metrics.csv`. With the above configuration, the file will +contain average precision at IoU≥0.5 for each of the classes present in the +dataset. It will also contain the mAP@IoU≥0.5. Both the per-class average +precisions and the mAP are computed according to the [Open Images evaluation +protocol](evaluation_protocols.md). The expected mAPs for the validation and +test sets of Open Images in this case are: + +Set | Fraction of data | Images | mAP@IoU≥0.5 +---------: | :--------------: | :-----: | ----------- +validation | everything | 41,620 | 39.2% +validation | first 2 shards | 884 | 52.4% +test | everything | 125,436 | 37.7% +test | first 2 shards | 2,476 | 50.8% diff --git a/models/research/object_detection/g3doc/preparing_inputs.md b/models/research/object_detection/g3doc/preparing_inputs.md new file mode 100644 index 0000000000000000000000000000000000000000..8e690e8c345c314f9a65c8fdff5b6ca22e8159c2 --- /dev/null +++ b/models/research/object_detection/g3doc/preparing_inputs.md @@ -0,0 +1,59 @@ +# Preparing Inputs + +Tensorflow Object Detection API reads data using the TFRecord file format. Two +sample scripts (`create_pascal_tf_record.py` and `create_pet_tf_record.py`) are +provided to convert from the PASCAL VOC dataset and Oxford-IIIT Pet dataset to +TFRecords. + +## Generating the PASCAL VOC TFRecord files. + +The raw 2012 PASCAL VOC data set is located +[here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar). +To download, extract and convert it to TFRecords, run the following commands +below: + +```bash +# From tensorflow/models/research/ +wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar +tar -xvf VOCtrainval_11-May-2012.tar +python object_detection/dataset_tools/create_pascal_tf_record.py \ + --label_map_path=object_detection/data/pascal_label_map.pbtxt \ + --data_dir=VOCdevkit --year=VOC2012 --set=train \ + --output_path=pascal_train.record +python object_detection/dataset_tools/create_pascal_tf_record.py \ + --label_map_path=object_detection/data/pascal_label_map.pbtxt \ + --data_dir=VOCdevkit --year=VOC2012 --set=val \ + --output_path=pascal_val.record +``` + +You should end up with two TFRecord files named `pascal_train.record` and +`pascal_val.record` in the `tensorflow/models/research/` directory. + +The label map for the PASCAL VOC data set can be found at +`object_detection/data/pascal_label_map.pbtxt`. + +## Generating the Oxford-IIIT Pet TFRecord files. + +The Oxford-IIIT Pet data set is located +[here](http://www.robots.ox.ac.uk/~vgg/data/pets/). To download, extract and +convert it to TFRecords, run the following commands below: + +```bash +# From tensorflow/models/research/ +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz +tar -xvf annotations.tar.gz +tar -xvf images.tar.gz +python object_detection/dataset_tools/create_pet_tf_record.py \ + --label_map_path=object_detection/data/pet_label_map.pbtxt \ + --data_dir=`pwd` \ + --output_dir=`pwd` +``` + +You should end up with two 10-sharded TFRecord files named +`pet_faces_train.record-?????-of-00010` and +`pet_faces_val.record-?????-of-00010` in the `tensorflow/models/research/` +directory. + +The label map for the Pet dataset can be found at +`object_detection/data/pet_label_map.pbtxt`. diff --git a/models/research/object_detection/g3doc/running_locally.md b/models/research/object_detection/g3doc/running_locally.md new file mode 100644 index 0000000000000000000000000000000000000000..5e72ea4337856d1499892359d02b48503a0aa4c5 --- /dev/null +++ b/models/research/object_detection/g3doc/running_locally.md @@ -0,0 +1,66 @@ +# Running Locally + +This page walks through the steps required to train an object detection model +on a local machine. It assumes the reader has completed the +following prerequisites: + +1. The Tensorflow Object Detection API has been installed as documented in the +[installation instructions](installation.md). This includes installing library +dependencies, compiling the configuration protobufs and setting up the Python +environment. +2. A valid data set has been created. See [this page](preparing_inputs.md) for +instructions on how to generate a dataset for the PASCAL VOC challenge or the +Oxford-IIIT Pet dataset. +3. A Object Detection pipeline configuration has been written. See +[this page](configuring_jobs.md) for details on how to write a pipeline configuration. + +## Recommended Directory Structure for Training and Evaluation + +``` ++data + -label_map file + -train TFRecord file + -eval TFRecord file ++models + + model + -pipeline config file + +train + +eval +``` + +## Running the Training Job + +A local training job can be run with the following command: + +```bash +# From the tensorflow/models/research/ directory +PIPELINE_CONFIG_PATH={path to pipeline config file} +MODEL_DIR={path to model directory} +NUM_TRAIN_STEPS=50000 +SAMPLE_1_OF_N_EVAL_EXAMPLES=1 +python object_detection/model_main.py \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --model_dir=${MODEL_DIR} \ + --num_train_steps=${NUM_TRAIN_STEPS} \ + --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ + --alsologtostderr +``` + +where `${PIPELINE_CONFIG_PATH}` points to the pipeline config and +`${MODEL_DIR}` points to the directory in which training checkpoints +and events will be written to. Note that this binary will interleave both +training and evaluation. + +## Running Tensorboard + +Progress for training and eval jobs can be inspected using Tensorboard. If +using the recommended directory structure, Tensorboard can be run using the +following command: + +```bash +tensorboard --logdir=${MODEL_DIR} +``` + +where `${MODEL_DIR}` points to the directory that contains the +train and eval directories. Please note it may take Tensorboard a couple minutes +to populate with data. diff --git a/models/research/object_detection/g3doc/running_notebook.md b/models/research/object_detection/g3doc/running_notebook.md new file mode 100644 index 0000000000000000000000000000000000000000..c2b8ad1876258d023a997d1166b0d269c2f10f48 --- /dev/null +++ b/models/research/object_detection/g3doc/running_notebook.md @@ -0,0 +1,15 @@ +# Quick Start: Jupyter notebook for off-the-shelf inference + +If you'd like to hit the ground running and run detection on a few example +images right out of the box, we recommend trying out the Jupyter notebook demo. +To run the Jupyter notebook, run the following command from +`tensorflow/models/research/object_detection`: + +``` +# From tensorflow/models/research/object_detection +jupyter notebook +``` + +The notebook should open in your favorite web browser. Click the +[`object_detection_tutorial.ipynb`](../object_detection_tutorial.ipynb) link to +open the demo. diff --git a/models/research/object_detection/g3doc/running_on_cloud.md b/models/research/object_detection/g3doc/running_on_cloud.md new file mode 100644 index 0000000000000000000000000000000000000000..5ee5d87a223fb382c39c7c4d24641dcace9a606c --- /dev/null +++ b/models/research/object_detection/g3doc/running_on_cloud.md @@ -0,0 +1,170 @@ +# Running on Google Cloud ML Engine + +The Tensorflow Object Detection API supports distributed training on Google +Cloud ML Engine. This section documents instructions on how to train and +evaluate your model using Cloud ML. The reader should complete the following +prerequistes: + +1. The reader has created and configured a project on Google Cloud Platform. +See [the Cloud ML quick start guide](https://cloud.google.com/ml-engine/docs/quickstarts/command-line). +2. The reader has installed the Tensorflow Object Detection API as documented +in the [installation instructions](installation.md). +3. The reader has a valid data set and stored it in a Google Cloud Storage +bucket. See [this page](preparing_inputs.md) for instructions on how to generate +a dataset for the PASCAL VOC challenge or the Oxford-IIIT Pet dataset. +4. The reader has configured a valid Object Detection pipeline, and stored it +in a Google Cloud Storage bucket. See [this page](configuring_jobs.md) for +details on how to write a pipeline configuration. + +Additionally, it is recommended users test their job by running training and +evaluation jobs for a few iterations +[locally on their own machines](running_locally.md). + +## Packaging + +In order to run the Tensorflow Object Detection API on Cloud ML, it must be +packaged (along with it's TF-Slim dependency and the +[pycocotools](https://github.com/cocodataset/cocoapi/tree/master/PythonAPI/pycocotools) +library). The required packages can be created with the following command + +``` bash +# From tensorflow/models/research/ +bash object_detection/dataset_tools/create_pycocotools_package.sh /tmp/pycocotools +python setup.py sdist +(cd slim && python setup.py sdist) +``` + +This will create python packages dist/object_detection-0.1.tar.gz, +slim/dist/slim-0.1.tar.gz, and /tmp/pycocotools/pycocotools-2.0.tar.gz. + +## Running a Multiworker (GPU) Training Job on CMLE + +Google Cloud ML requires a YAML configuration file for a multiworker training +job using GPUs. A sample YAML file is given below: + +``` +trainingInput: + runtimeVersion: "1.12" + scaleTier: CUSTOM + masterType: standard_gpu + workerCount: 9 + workerType: standard_gpu + parameterServerCount: 3 + parameterServerType: standard + + +``` + +Please keep the following guidelines in mind when writing the YAML +configuration: + +* A job with n workers will have n + 1 training machines (n workers + 1 master). +* The number of parameters servers used should be an odd number to prevent + a parameter server from storing only weight variables or only bias variables + (due to round robin parameter scheduling). +* The learning rate in the training config should be decreased when using a + larger number of workers. Some experimentation is required to find the + optimal learning rate. + +The YAML file should be saved on the local machine (not on GCP). Once it has +been written, a user can start a training job on Cloud ML Engine using the +following command: + +```bash +# From tensorflow/models/research/ +gcloud ml-engine jobs submit training object_detection_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 1.12 \ + --job-dir=gs://${MODEL_DIR} \ + --packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \ + --module-name object_detection.model_main \ + --region us-central1 \ + --config ${PATH_TO_LOCAL_YAML_FILE} \ + -- \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} +``` + +Where `${PATH_TO_LOCAL_YAML_FILE}` is the local path to the YAML configuration, +`gs://${MODEL_DIR}` specifies the directory on Google Cloud Storage where the +training checkpoints and events will be written to and +`gs://${PIPELINE_CONFIG_PATH}` points to the pipeline configuration stored on +Google Cloud Storage. + +Users can monitor the progress of their training job on the [ML Engine +Dashboard](https://console.cloud.google.com/mlengine/jobs). + +Note: This sample is supported for use with 1.12 runtime version. + +## Running a TPU Training Job on CMLE + +Launching a training job with a TPU compatible pipeline config requires using a +similar command: + +```bash +gcloud ml-engine jobs submit training `whoami`_object_detection_`date +%m_%d_%Y_%H_%M_%S` \ +--job-dir=gs://${MODEL_DIR} \ +--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \ +--module-name object_detection.model_tpu_main \ +--runtime-version 1.12 \ +--scale-tier BASIC_TPU \ +--region us-central1 \ +-- \ +--tpu_zone us-central1 \ +--model_dir=gs://${MODEL_DIR} \ +--pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} +``` + +In contrast with the GPU training command, there is no need to specify a YAML +file and we point to the *object_detection.model_tpu_main* binary instead of +*object_detection.model_main*. We must also now set `scale-tier` to be +`BASIC_TPU` and provide a `tpu_zone`. Finally as before `pipeline_config_path` +points to a points to the pipeline configuration stored on Google Cloud Storage +(but is now must be a TPU compatible model). + +## Running an Evaluation Job on CMLE + +Note: You only need to do this when using TPU for training as it does not +interleave evaluation during training as in the case of Multiworker GPU +training. + +Evaluation jobs run on a single machine, so it is not necessary to write a YAML +configuration for evaluation. Run the following command to start the evaluation +job: + +```bash +gcloud ml-engine jobs submit training object_detection_eval_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 1.12 \ + --job-dir=gs://${MODEL_DIR} \ + --packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \ + --module-name object_detection.model_main \ + --region us-central1 \ + --scale-tier BASIC_GPU \ + -- \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} \ + --checkpoint_dir=gs://${MODEL_DIR} +``` + +Where `gs://${MODEL_DIR}` points to the directory on Google Cloud Storage where +training checkpoints are saved (same as the training job), as well as +to where evaluation events will be saved on Google Cloud Storage and +`gs://${PIPELINE_CONFIG_PATH}` points to where the pipeline configuration is +stored on Google Cloud Storage. + +Typically one starts an evaluation job concurrently with the training job. +Note that we do not support running evaluation on TPU, so the above command +line for launching evaluation jobs is the same whether you are training +on GPU or TPU. + +## Running Tensorboard + +You can run Tensorboard locally on your own machine to view progress of your +training and eval jobs on Google Cloud ML. Run the following command to start +Tensorboard: + +``` bash +tensorboard --logdir=gs://${YOUR_CLOUD_BUCKET} +``` + +Note it may Tensorboard a few minutes to populate with results. + diff --git a/models/research/object_detection/g3doc/running_on_mobile_tensorflowlite.md b/models/research/object_detection/g3doc/running_on_mobile_tensorflowlite.md new file mode 100644 index 0000000000000000000000000000000000000000..db166bcd394f4eed54a54e038091ec50a42410df --- /dev/null +++ b/models/research/object_detection/g3doc/running_on_mobile_tensorflowlite.md @@ -0,0 +1,147 @@ +# Running on mobile with TensorFlow Lite + +In this section, we will show you how to use [TensorFlow +Lite](https://www.tensorflow.org/mobile/tflite/) to get a smaller model and +allow you take advantage of ops that have been optimized for mobile devices. +TensorFlow Lite is TensorFlow’s lightweight solution for mobile and embedded +devices. It enables on-device machine learning inference with low latency and a +small binary size. TensorFlow Lite uses many techniques for this such as +quantized kernels that allow smaller and faster (fixed-point math) models. + +For this section, you will need to build [TensorFlow from +source](https://www.tensorflow.org/install/install_sources) to get the +TensorFlow Lite support for the SSD model. At this time only SSD models are supported. +Models like faster_rcnn are not supported at this time. You will also need to install the +[bazel build +tool](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#bazel). + +To make these commands easier to run, let’s set up some environment variables: + +```shell +export CONFIG_FILE=PATH_TO_BE_CONFIGURED/pipeline.config +export CHECKPOINT_PATH=PATH_TO_BE_CONFIGURED/model.ckpt +export OUTPUT_DIR=/tmp/tflite +``` + +We start with a checkpoint and get a TensorFlow frozen graph with compatible ops +that we can use with TensorFlow Lite. First, you’ll need to install these +[python +libraries](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). +Then to get the frozen graph, run the export_tflite_ssd_graph.py script from the +`models/research` directory with this command: + +```shell +object_detection/export_tflite_ssd_graph.py \ +--pipeline_config_path=$CONFIG_FILE \ +--trained_checkpoint_prefix=$CHECKPOINT_PATH \ +--output_directory=$OUTPUT_DIR \ +--add_postprocessing_op=true +``` + +In the /tmp/tflite directory, you should now see two files: tflite_graph.pb and +tflite_graph.pbtxt. Note that the add_postprocessing flag enables the model to +take advantage of a custom optimized detection post-processing operation which +can be thought of as a replacement for +[tf.image.non_max_suppression](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression). +Make sure not to confuse export_tflite_ssd_graph with export_inference_graph in +the same directory. Both scripts output frozen graphs: export_tflite_ssd_graph +will output the frozen graph that we can input to TensorFlow Lite directly and +is the one we’ll be using. + +Next we’ll use TensorFlow Lite to get the optimized model by using +[TOCO](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/toco), +the TensorFlow Lite Optimizing Converter. This will convert the resulting frozen +graph (tflite_graph.pb) to the TensorFlow Lite flatbuffer format (detect.tflite) +via the following command. For a quantized model, run this from the tensorflow/ +directory: + +```shell +bazel run -c opt tensorflow/lite/toco:toco -- \ +--input_file=$OUTPUT_DIR/tflite_graph.pb \ +--output_file=$OUTPUT_DIR/detect.tflite \ +--input_shapes=1,300,300,3 \ +--input_arrays=normalized_input_image_tensor \ +--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \ +--inference_type=QUANTIZED_UINT8 \ +--mean_values=128 \ +--std_values=128 \ +--change_concat_input_ranges=false \ +--allow_custom_ops +``` + +This command takes the input tensor normalized_input_image_tensor after resizing +each camera image frame to 300x300 pixels. The outputs of the quantized model +are named 'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1', +'TFLite_Detection_PostProcess:2', and 'TFLite_Detection_PostProcess:3' and +represent four arrays: detection_boxes, detection_classes, detection_scores, and +num_detections. The documentation for other flags used in this command is +[here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/convert/cmdline.md). +If things ran successfully, you should now see a third file in the /tmp/tflite +directory called detect.tflite. This file contains the graph and all model +parameters and can be run via the TensorFlow Lite interpreter on the Android +device. For a floating point model, run this from the tensorflow/ directory: + +```shell +bazel run -c opt tensorflow/lite/toco:toco -- \ +--input_file=$OUTPUT_DIR/tflite_graph.pb \ +--output_file=$OUTPUT_DIR/detect.tflite \ +--input_shapes=1,300,300,3 \ +--input_arrays=normalized_input_image_tensor \ +--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \ +--inference_type=FLOAT \ +--allow_custom_ops +``` + +# Running our model on Android + +To run our TensorFlow Lite model on device, we will use Android Studio to build +and run the TensorFlow Lite detection example with the new model. The example is +found in the +[TensorFlow examples repository](https://github.com/tensorflow/examples) under +`/lite/examples/object_detection`. The example can be built with +[Android Studio](https://developer.android.com/studio/index.html), and requires +the +[Android SDK with build tools](https://developer.android.com/tools/revisions/build-tools.html) +that support API >= 21. Additional details are available on the +[TensorFlow Lite example page](https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/android). + +Next we need to point the app to our new detect.tflite file and give it the +names of our new labels. Specifically, we will copy our TensorFlow Lite +flatbuffer to the app assets directory with the following command: + +```shell +mkdir $TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/assets +cp /tmp/tflite/detect.tflite \ + $TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/assets +``` + +You will also need to copy your new labelmap labelmap.txt to the assets +directory. + +We will now edit the gradle build file to use these assets. First, open the +`build.gradle` file +`$TF_EXAMPLES/lite/examples/object_detection/android/app/build.gradle`. Comment +out the model download script to avoid your assets being overwritten: `// apply +from:'download_model.gradle'` ``` + +If your model is named `detect.tflite`, and your labels file `labelmap.txt`, the +example will use them automatically as long as they've been properly copied into +the base assets directory. If you need to use a custom path or filename, open up +the +$TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java +file in a text editor and find the definition of TF_OD_API_LABELS_FILE. Update +this path to point to your new label map file: +"labels_list.txt". Note that if your model is quantized, +the flag TF_OD_API_IS_QUANTIZED is set to true, and if your model is floating +point, the flag TF_OD_API_IS_QUANTIZED is set to false. This new section of +DetectorActivity.java should now look as follows for a quantized model: + +```shell + private static final boolean TF_OD_API_IS_QUANTIZED = true; + private static final String TF_OD_API_MODEL_FILE = "detect.tflite"; + private static final String TF_OD_API_LABELS_FILE = "labels_list.txt"; +``` + +Once you’ve copied the TensorFlow Lite model and edited the gradle build script +to not use the downloaded assets, you can build and deploy the app using the +usual Android Studio build process. diff --git a/models/research/object_detection/g3doc/running_pets.md b/models/research/object_detection/g3doc/running_pets.md new file mode 100644 index 0000000000000000000000000000000000000000..bb62db5612adf427c6ab2c0a853b63a23376aa94 --- /dev/null +++ b/models/research/object_detection/g3doc/running_pets.md @@ -0,0 +1,319 @@ +# Quick Start: Distributed Training on the Oxford-IIIT Pets Dataset on Google Cloud + +This page is a walkthrough for training an object detector using the Tensorflow +Object Detection API. In this tutorial, we'll be training on the Oxford-IIIT Pets +dataset to build a system to detect various breeds of cats and dogs. The output +of the detector will look like the following: + +![](img/oxford_pet.png) + +## Setting up a Project on Google Cloud + +To accelerate the process, we'll run training and evaluation on [Google Cloud +ML Engine](https://cloud.google.com/ml-engine/) to leverage multiple GPUs. To +begin, you will have to set up Google Cloud via the following steps (if you have +already done this, feel free to skip to the next section): + +1. [Create a GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects). +2. [Install the Google Cloud SDK](https://cloud.google.com/sdk/downloads) on +your workstation or laptop. +This will provide the tools you need to upload files to Google Cloud Storage and +start ML training jobs. +3. [Enable the ML Engine +APIs](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component&_ga=1.73374291.1570145678.1496689256). +By default, a new GCP project does not enable APIs to start ML Engine training +jobs. Use the above link to explicitly enable them. +4. [Set up a Google Cloud Storage (GCS) +bucket](https://cloud.google.com/storage/docs/creating-buckets). ML Engine +training jobs can only access files on a Google Cloud Storage bucket. In this +tutorial, we'll be required to upload our dataset and configuration to GCS. + +Please remember the name of your GCS bucket, as we will reference it multiple +times in this document. Substitute `${YOUR_GCS_BUCKET}` with the name of +your bucket in this document. For your convenience, you should define the +environment variable below: + +``` bash +export YOUR_GCS_BUCKET=${YOUR_GCS_BUCKET} +``` + +It is also possible to run locally by following +[the running locally instructions](running_locally.md). + +## Installing Tensorflow and the Tensorflow Object Detection API + +Please run through the [installation instructions](installation.md) to install +Tensorflow and all it dependencies. Ensure the Protobuf libraries are +compiled and the library directories are added to `PYTHONPATH`. + +## Getting the Oxford-IIIT Pets Dataset and Uploading it to Google Cloud Storage + +In order to train a detector, we require a dataset of images, bounding boxes and +classifications. For this demo, we'll use the Oxford-IIIT Pets dataset. The raw +dataset for Oxford-IIIT Pets lives +[here](http://www.robots.ox.ac.uk/~vgg/data/pets/). You will need to download +both the image dataset [`images.tar.gz`](http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz) +and the groundtruth data [`annotations.tar.gz`](http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz) +to the `tensorflow/models/research/` directory and unzip them. This may take +some time. + +``` bash +# From tensorflow/models/research/ +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz +tar -xvf images.tar.gz +tar -xvf annotations.tar.gz +``` + +After downloading the tarballs, your `tensorflow/models/research/` directory +should appear as follows: + +```lang-none +- images.tar.gz +- annotations.tar.gz ++ images/ ++ annotations/ ++ object_detection/ +... other files and directories +``` + +The Tensorflow Object Detection API expects data to be in the TFRecord format, +so we'll now run the `create_pet_tf_record` script to convert from the raw +Oxford-IIIT Pet dataset into TFRecords. Run the following commands from the +`tensorflow/models/research/` directory: + +``` bash +# From tensorflow/models/research/ +python object_detection/dataset_tools/create_pet_tf_record.py \ + --label_map_path=object_detection/data/pet_label_map.pbtxt \ + --data_dir=`pwd` \ + --output_dir=`pwd` +``` + +Note: It is normal to see some warnings when running this script. You may ignore +them. + +Two 10-sharded TFRecord files named `pet_faces_train.record-*` and +`pet_faces_val.record-*` should be generated in the +`tensorflow/models/research/` directory. + +Now that the data has been generated, we'll need to upload it to Google Cloud +Storage so the data can be accessed by ML Engine. Run the following command to +copy the files into your GCS bucket (substituting `${YOUR_GCS_BUCKET}`): + +```bash +# From tensorflow/models/research/ +gsutil cp pet_faces_train.record-* gs://${YOUR_GCS_BUCKET}/data/ +gsutil cp pet_faces_val.record-* gs://${YOUR_GCS_BUCKET}/data/ +gsutil cp object_detection/data/pet_label_map.pbtxt gs://${YOUR_GCS_BUCKET}/data/pet_label_map.pbtxt +``` + +Please remember the path where you upload the data to, as we will need this +information when configuring the pipeline in a following step. + +## Downloading a COCO-pretrained Model for Transfer Learning + +Training a state of the art object detector from scratch can take days, even +when using multiple GPUs! In order to speed up training, we'll take an object +detector trained on a different dataset (COCO), and reuse some of it's +parameters to initialize our new model. + +Download our [COCO-pretrained Faster R-CNN with Resnet-101 +model](http://storage.googleapis.com/download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_11_06_2017.tar.gz). +Unzip the contents of the folder and copy the `model.ckpt*` files into your GCS +Bucket. + +``` bash +wget http://storage.googleapis.com/download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_11_06_2017.tar.gz +tar -xvf faster_rcnn_resnet101_coco_11_06_2017.tar.gz +gsutil cp faster_rcnn_resnet101_coco_11_06_2017/model.ckpt.* gs://${YOUR_GCS_BUCKET}/data/ +``` + +Remember the path where you uploaded the model checkpoint to, as we will need it +in the following step. + +## Configuring the Object Detection Pipeline + +In the Tensorflow Object Detection API, the model parameters, training +parameters and eval parameters are all defined by a config file. More details +can be found [here](configuring_jobs.md). For this tutorial, we will use some +predefined templates provided with the source code. In the +`object_detection/samples/configs` folder, there are skeleton object_detection +configuration files. We will use `faster_rcnn_resnet101_pets.config` as a +starting point for configuring the pipeline. Open the file with your favourite +text editor. + +We'll need to configure some paths in order for the template to work. Search the +file for instances of `PATH_TO_BE_CONFIGURED` and replace them with the +appropriate value (typically `gs://${YOUR_GCS_BUCKET}/data/`). Afterwards +upload your edited file onto GCS, making note of the path it was uploaded to +(we'll need it when starting the training/eval jobs). + +``` bash +# From tensorflow/models/research/ + +# Edit the faster_rcnn_resnet101_pets.config template. Please note that there +# are multiple places where PATH_TO_BE_CONFIGURED needs to be set. +sed -i "s|PATH_TO_BE_CONFIGURED|"gs://${YOUR_GCS_BUCKET}"/data|g" \ + object_detection/samples/configs/faster_rcnn_resnet101_pets.config + +# Copy edited template to cloud. +gsutil cp object_detection/samples/configs/faster_rcnn_resnet101_pets.config \ + gs://${YOUR_GCS_BUCKET}/data/faster_rcnn_resnet101_pets.config +``` + +## Checking Your Google Cloud Storage Bucket + +At this point in the tutorial, you should have uploaded the training/validation +datasets (including label map), our COCO trained FasterRCNN finetune checkpoint and your job +configuration to your Google Cloud Storage Bucket. Your bucket should look like +the following: + +```lang-none ++ ${YOUR_GCS_BUCKET}/ + + data/ + - faster_rcnn_resnet101_pets.config + - model.ckpt.index + - model.ckpt.meta + - model.ckpt.data-00000-of-00001 + - pet_label_map.pbtxt + - pet_faces_train.record-* + - pet_faces_val.record-* +``` + +You can inspect your bucket using the [Google Cloud Storage +browser](https://console.cloud.google.com/storage/browser). + +## Starting Training and Evaluation Jobs on Google Cloud ML Engine + +Before we can start a job on Google Cloud ML Engine, we must: + +1. Package the Tensorflow Object Detection code. +2. Write a cluster configuration for our Google Cloud ML job. + +To package the Tensorflow Object Detection code, run the following commands from +the `tensorflow/models/research/` directory: + +```bash +# From tensorflow/models/research/ +bash object_detection/dataset_tools/create_pycocotools_package.sh /tmp/pycocotools +python setup.py sdist +(cd slim && python setup.py sdist) +``` + +This will create python packages dist/object_detection-0.1.tar.gz, +slim/dist/slim-0.1.tar.gz, and /tmp/pycocotools/pycocotools-2.0.tar.gz. + +For running the training Cloud ML job, we'll configure the cluster to use 5 +training jobs and three parameters servers. The +configuration file can be found at `object_detection/samples/cloud/cloud.yml`. + +Note: The code sample below is supported for use with 1.12 runtime version. + +To start training and evaluation, execute the following command from the +`tensorflow/models/research/` directory: + +```bash +# From tensorflow/models/research/ +gcloud ml-engine jobs submit training `whoami`_object_detection_pets_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 1.12 \ + --job-dir=gs://${YOUR_GCS_BUCKET}/model_dir \ + --packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \ + --module-name object_detection.model_main \ + --region us-central1 \ + --config object_detection/samples/cloud/cloud.yml \ + -- \ + --model_dir=gs://${YOUR_GCS_BUCKET}/model_dir \ + --pipeline_config_path=gs://${YOUR_GCS_BUCKET}/data/faster_rcnn_resnet101_pets.config +``` + +Users can monitor and stop training and evaluation jobs on the [ML Engine +Dashboard](https://console.cloud.google.com/mlengine/jobs). + +## Monitoring Progress with Tensorboard + +You can monitor progress of the training and eval jobs by running Tensorboard on +your local machine: + +```bash +# This command needs to be run once to allow your local machine to access your +# GCS bucket. +gcloud auth application-default login + +tensorboard --logdir=gs://${YOUR_GCS_BUCKET}/model_dir +``` + +Once Tensorboard is running, navigate to `localhost:6006` from your favourite +web browser. You should see something similar to the following: + +![](img/tensorboard.png) + +Make sure your Tensorboard version is the same minor version as your Tensorflow (1.x) + +You will also want to click on the images tab to see example detections made by +the model while it trains. After about an hour and a half of training, you can +expect to see something like this: + +![](img/tensorboard2.png) + +Note: It takes roughly 10 minutes for a job to get started on ML Engine, and +roughly an hour for the system to evaluate the validation dataset. It may take +some time to populate the dashboards. If you do not see any entries after half +an hour, check the logs from the [ML Engine +Dashboard](https://console.cloud.google.com/mlengine/jobs). Note that by default +the training jobs are configured to go for much longer than is necessary for +convergence. To save money, we recommend killing your jobs once you've seen +that they've converged. + +## Exporting the Tensorflow Graph + +After your model has been trained, you should export it to a Tensorflow graph +proto. First, you need to identify a candidate checkpoint to export. You can +search your bucket using the [Google Cloud Storage +Browser](https://console.cloud.google.com/storage/browser). The file should be +stored under `${YOUR_GCS_BUCKET}/model_dir`. The checkpoint will typically +consist of three files: + +* `model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001` +* `model.ckpt-${CHECKPOINT_NUMBER}.index` +* `model.ckpt-${CHECKPOINT_NUMBER}.meta` + +After you've identified a candidate checkpoint to export, run the following +command from `tensorflow/models/research/`: + +```bash +# From tensorflow/models/research/ +gsutil cp gs://${YOUR_GCS_BUCKET}/model_dir/model.ckpt-${CHECKPOINT_NUMBER}.* . +python object_detection/export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path object_detection/samples/configs/faster_rcnn_resnet101_pets.config \ + --trained_checkpoint_prefix model.ckpt-${CHECKPOINT_NUMBER} \ + --output_directory exported_graphs +``` + +Afterwards, you should see a directory named `exported_graphs` containing the +SavedModel and frozen graph. + +## Configuring the Instance Segmentation Pipeline + +Mask prediction can be turned on for an object detection config by adding +`predict_instance_masks: true` within the `MaskRCNNBoxPredictor`. Other +parameters such as mask size, number of convolutions in the mask layer, and the +convolution hyper parameters can be defined. We will use +`mask_rcnn_resnet101_pets.config` as a starting point for configuring the +instance segmentation pipeline. Everything above that was mentioned about object +detection holds true for instance segmentation. Instance segmentation consists +of an object detection model with an additional head that predicts the object +mask inside each predicted box once we remove the training and other details. +Please refer to the section on [Running an Instance Segmentation +Model](instance_segmentation.md) for instructions on how to configure a model +that predicts masks in addition to object bounding boxes. + +## What's Next + +Congratulations, you have now trained an object detector for various cats and +dogs! There different things you can do now: + +1. [Test your exported model using the provided Jupyter notebook.](running_notebook.md) +2. [Experiment with different model configurations.](configuring_jobs.md) +3. Train an object detector using your own data. diff --git a/models/research/object_detection/g3doc/tpu_compatibility.md b/models/research/object_detection/g3doc/tpu_compatibility.md new file mode 100644 index 0000000000000000000000000000000000000000..0eb0c7a20ee3c030f867ed098e374c2047929071 --- /dev/null +++ b/models/research/object_detection/g3doc/tpu_compatibility.md @@ -0,0 +1,196 @@ +# TPU compatible detection pipelines + +[TOC] + +The Tensorflow Object Detection API supports TPU training for some models. To +make models TPU compatible you need to make a few tweaks to the model config as +mentioned below. We also provide several sample configs that you can use as a +template. + +## TPU compatibility + +### Static shaped tensors + +TPU training currently requires all tensors in the Tensorflow Graph to have +static shapes. However, most of the sample configs in Object Detection API have +a few different tensors that are dynamically shaped. Fortunately, we provide +simple alternatives in the model configuration that modifies these tensors to +have static shape: + +* **Image tensors with static shape** - This can be achieved either by using a + `fixed_shape_resizer` that resizes images to a fixed spatial shape or by + setting `pad_to_max_dimension: true` in `keep_aspect_ratio_resizer` which + pads the resized images with zeros to the bottom and right. Padded image + tensors are correctly handled internally within the model. + + ``` + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + ``` + + or + + ``` + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + ``` + +* **Groundtruth tensors with static shape** - Images in a typical detection + dataset have variable number of groundtruth boxes and associated classes. + Setting `max_number_of_boxes` to a large enough number in `train_config` + pads the groundtruth tensors with zeros to a static shape. Padded + groundtruth tensors are correctly handled internally within the model. + + ``` + train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" + batch_size: 64 + max_number_of_boxes: 200 + unpad_groundtruth_tensors: false + } + ``` + +### TPU friendly ops + +Although TPU supports a vast number of tensorflow ops, a few used in the +Tensorflow Object Detection API are unsupported. We list such ops below and +recommend compatible substitutes. + +* **Anchor sampling** - Typically we use hard example mining in standard SSD + pipeliens to balance positive and negative anchors that contribute to the + loss. Hard Example mining uses non max suppression as a subroutine and since + non max suppression is not currently supported on TPUs we cannot use hard + example mining. Fortunately, we provide an implementation of focal loss that + can be used instead of hard example mining. Remove `hard_example_miner` from + the config and substitute `weighted_sigmoid` classification loss with + `weighted_sigmoid_focal` loss. + + ``` + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + ``` + +* **Target Matching** - Object detection API provides two choices for matcher + used in target assignment: `argmax_matcher` and `bipartite_matcher`. + Bipartite matcher is not currently supported on TPU, therefore we must + modify the configs to use `argmax_matcher`. Additionally, set + `use_matmul_gather: true` for efficiency on TPU. + + ``` + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + ``` + +### TPU training hyperparameters + +Object Detection training on TPU uses synchronous SGD. On a typical cloud TPU +with 8 cores we recommend batch sizes that are 8x large when compared to a GPU +config that uses asynchronous SGD. We also use fewer training steps (~ 1/100 x) +due to the large batch size. This necessitates careful tuning of some other +training parameters as listed below. + +* **Batch size** - Use the largest batch size that can fit on cloud TPU. + + ``` + train_config { + batch_size: 1024 + } + ``` + +* **Training steps** - Typically only 10s of thousands. + + ``` + train_config { + num_steps: 25000 + } + ``` + +* **Batch norm decay** - Use smaller decay constants (0.97 or 0.997) since we + take fewer training steps. + + ``` + batch_norm { + scale: true, + decay: 0.97, + epsilon: 0.001, + } + ``` + +* **Learning rate** - Use large learning rate with warmup. Scale learning rate + linearly with batch size. See `cosine_decay_learning_rate` or + `manual_step_learning_rate` for examples. + + ``` + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + ``` + + or + + ``` + learning_rate: { + manual_step_learning_rate { + warmup: true + initial_learning_rate: .01333 + schedule { + step: 2000 + learning_rate: 0.04 + } + schedule { + step: 15000 + learning_rate: 0.004 + } + } + } + ``` + +## Example TPU compatible configs + +We provide example config files that you can use to train your own models on TPU + +* ssd_mobilenet_v1_300x300
+* ssd_mobilenet_v1_ppn_300x300
+* ssd_mobilenet_v1_fpn_640x640 + (mobilenet based retinanet)
+* ssd_resnet50_v1_fpn_640x640 + (retinanet)
+ +## Supported Meta architectures + +Currently, `SSDMetaArch` models are supported on TPUs. `FasterRCNNMetaArch` is +going to be supported soon. diff --git a/models/research/object_detection/g3doc/tpu_exporters.md b/models/research/object_detection/g3doc/tpu_exporters.md new file mode 100644 index 0000000000000000000000000000000000000000..0368359067e696a88b4a2ff6b961823f7fcf6210 --- /dev/null +++ b/models/research/object_detection/g3doc/tpu_exporters.md @@ -0,0 +1,35 @@ +# Object Detection TPU Inference Exporter + +This package contains SavedModel Exporter for TPU Inference of object detection +models. + +## Usage + +This Exporter is intended for users who have trained models with CPUs / GPUs, +but would like to use them for inference on TPU without changing their code or +re-training their models. + +Users are assumed to have: + ++ `PIPELINE_CONFIG`: A pipeline_pb2.TrainEvalPipelineConfig config file; ++ `CHECKPOINT`: A model checkpoint trained on any device; + +and need to correctly set: + ++ `EXPORT_DIR`: Path to export SavedModel; ++ `INPUT_PLACEHOLDER`: Name of input placeholder in model's signature_def_map; ++ `INPUT_TYPE`: Type of input node, which can be one of 'image_tensor', + 'encoded_image_string_tensor', or 'tf_example'; ++ `USE_BFLOAT16`: Whether to use bfloat16 instead of float32 on TPU. + +The model can be exported with: + +``` +python object_detection/tpu_exporters/export_saved_model_tpu.py \ + --pipeline_config_file= \ + --ckpt_path= \ + --export_dir= \ + --input_placeholder_name= \ + --input_type= \ + --use_bfloat16= +``` diff --git a/models/research/object_detection/g3doc/using_your_own_dataset.md b/models/research/object_detection/g3doc/using_your_own_dataset.md new file mode 100644 index 0000000000000000000000000000000000000000..23222f26e26936dd957163c1ae69ef6e3592dfec --- /dev/null +++ b/models/research/object_detection/g3doc/using_your_own_dataset.md @@ -0,0 +1,209 @@ +# Preparing Inputs + +[TOC] + +To use your own dataset in Tensorflow Object Detection API, you must convert it +into the [TFRecord file format](https://www.tensorflow.org/api_guides/python/python_io#tfrecords_format_details). +This document outlines how to write a script to generate the TFRecord file. + +## Label Maps + +Each dataset is required to have a label map associated with it. This label map +defines a mapping from string class names to integer class Ids. The label map +should be a `StringIntLabelMap` text protobuf. Sample label maps can be found in +object_detection/data. Label maps should always start from id 1. + +## Dataset Requirements + +For every example in your dataset, you should have the following information: + +1. An RGB image for the dataset encoded as jpeg or png. +2. A list of bounding boxes for the image. Each bounding box should contain: + 1. A bounding box coordinates (with origin in top left corner) defined by 4 + floating point numbers [ymin, xmin, ymax, xmax]. Note that we store the + _normalized_ coordinates (x / width, y / height) in the TFRecord dataset. + 2. The class of the object in the bounding box. + +# Example Image + +Consider the following image: + +![Example Image](img/example_cat.jpg "Example Image") + +with the following label map: + +``` +item { + id: 1 + name: 'Cat' +} + + +item { + id: 2 + name: 'Dog' +} +``` + +We can generate a tf.Example proto for this image using the following code: + +```python + +def create_cat_tf_example(encoded_cat_image_data): + """Creates a tf.Example proto from sample cat image. + + Args: + encoded_cat_image_data: The jpg encoded data of the cat image. + + Returns: + example: The created tf.Example. + """ + + height = 1032.0 + width = 1200.0 + filename = 'example_cat.jpg' + image_format = b'jpg' + + xmins = [322.0 / 1200.0] + xmaxs = [1062.0 / 1200.0] + ymins = [174.0 / 1032.0] + ymaxs = [761.0 / 1032.0] + classes_text = ['Cat'] + classes = [1] + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(filename), + 'image/source_id': dataset_util.bytes_feature(filename), + 'image/encoded': dataset_util.bytes_feature(encoded_image_data), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example +``` + +## Conversion Script Outline {#conversion-script-outline} + +A typical conversion script will look like the following: + +```python + +import tensorflow as tf + +from object_detection.utils import dataset_util + + +flags = tf.app.flags +flags.DEFINE_string('output_path', '', 'Path to output TFRecord') +FLAGS = flags.FLAGS + + +def create_tf_example(example): + # TODO(user): Populate the following variables from your example. + height = None # Image height + width = None # Image width + filename = None # Filename of the image. Empty if image is not from file + encoded_image_data = None # Encoded image bytes + image_format = None # b'jpeg' or b'png' + + xmins = [] # List of normalized left x coordinates in bounding box (1 per box) + xmaxs = [] # List of normalized right x coordinates in bounding box + # (1 per box) + ymins = [] # List of normalized top y coordinates in bounding box (1 per box) + ymaxs = [] # List of normalized bottom y coordinates in bounding box + # (1 per box) + classes_text = [] # List of string class name of bounding box (1 per box) + classes = [] # List of integer class id of bounding box (1 per box) + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(filename), + 'image/source_id': dataset_util.bytes_feature(filename), + 'image/encoded': dataset_util.bytes_feature(encoded_image_data), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example + + +def main(_): + writer = tf.python_io.TFRecordWriter(FLAGS.output_path) + + # TODO(user): Write code to read in your dataset to examples variable + + for example in examples: + tf_example = create_tf_example(example) + writer.write(tf_example.SerializeToString()) + + writer.close() + + +if __name__ == '__main__': + tf.app.run() + +``` + +Note: You may notice additional fields in some other datasets. They are +currently unused by the API and are optional. + +Note: Please refer to the section on [Running an Instance Segmentation +Model](instance_segmentation.md) for instructions on how to configure a model +that predicts masks in addition to object bounding boxes. + +## Sharding datasets + +When you have more than a few thousand examples, it is beneficial to shard your +dataset into multiple files: + +* tf.data.Dataset API can read input examples in parallel improving + throughput. +* tf.data.Dataset API can shuffle the examples better with sharded files which + improves performance of the model slightly. + +Instead of writing all tf.Example protos to a single file as shown in +[conversion script outline](#conversion-script-outline), use the snippet below. + +```python +import contextlib2 +from object_detection.dataset_tools import tf_record_creation_util + +num_shards=10 +output_filebase='/path/to/train_dataset.record' + +with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, output_filebase, num_shards) + for index, example in examples: + tf_example = create_tf_example(example) + output_shard_index = index % num_shards + output_tfrecords[output_shard_index].write(tf_example.SerializeToString()) +``` + +This will produce the following output files + +```bash +/path/to/train_dataset.record-00000-00010 +/path/to/train_dataset.record-00001-00010 +... +/path/to/train_dataset.record-00009-00010 +``` + +which can then be used in the config file as below. + +```bash +tf_record_input_reader { + input_path: "/path/to/train_dataset.record-?????-of-00010" +} +``` diff --git a/models/research/object_detection/inference/__init__.py b/models/research/object_detection/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/inference/detection_inference.py b/models/research/object_detection/inference/detection_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..b395cd7e74b093551a05f5e37875ccf06de4ccc9 --- /dev/null +++ b/models/research/object_detection/inference/detection_inference.py @@ -0,0 +1,141 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for detection inference.""" +from __future__ import division + +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields + + +def build_input(tfrecord_paths): + """Builds the graph's input. + + Args: + tfrecord_paths: List of paths to the input TFRecords + + Returns: + serialized_example_tensor: The next serialized example. String scalar Tensor + image_tensor: The decoded image of the example. Uint8 tensor, + shape=[1, None, None,3] + """ + filename_queue = tf.train.string_input_producer( + tfrecord_paths, shuffle=False, num_epochs=1) + + tf_record_reader = tf.TFRecordReader() + _, serialized_example_tensor = tf_record_reader.read(filename_queue) + features = tf.parse_single_example( + serialized_example_tensor, + features={ + standard_fields.TfExampleFields.image_encoded: + tf.FixedLenFeature([], tf.string), + }) + encoded_image = features[standard_fields.TfExampleFields.image_encoded] + image_tensor = tf.image.decode_image(encoded_image, channels=3) + image_tensor.set_shape([None, None, 3]) + image_tensor = tf.expand_dims(image_tensor, 0) + + return serialized_example_tensor, image_tensor + + +def build_inference_graph(image_tensor, inference_graph_path): + """Loads the inference graph and connects it to the input image. + + Args: + image_tensor: The input image. uint8 tensor, shape=[1, None, None, 3] + inference_graph_path: Path to the inference graph with embedded weights + + Returns: + detected_boxes_tensor: Detected boxes. Float tensor, + shape=[num_detections, 4] + detected_scores_tensor: Detected scores. Float tensor, + shape=[num_detections] + detected_labels_tensor: Detected labels. Int64 tensor, + shape=[num_detections] + """ + with tf.gfile.Open(inference_graph_path, 'rb') as graph_def_file: + graph_content = graph_def_file.read() + graph_def = tf.GraphDef() + graph_def.MergeFromString(graph_content) + + tf.import_graph_def( + graph_def, name='', input_map={'image_tensor': image_tensor}) + + g = tf.get_default_graph() + + num_detections_tensor = tf.squeeze( + g.get_tensor_by_name('num_detections:0'), 0) + num_detections_tensor = tf.cast(num_detections_tensor, tf.int32) + + detected_boxes_tensor = tf.squeeze( + g.get_tensor_by_name('detection_boxes:0'), 0) + detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor] + + detected_scores_tensor = tf.squeeze( + g.get_tensor_by_name('detection_scores:0'), 0) + detected_scores_tensor = detected_scores_tensor[:num_detections_tensor] + + detected_labels_tensor = tf.squeeze( + g.get_tensor_by_name('detection_classes:0'), 0) + detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64) + detected_labels_tensor = detected_labels_tensor[:num_detections_tensor] + + return detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor + + +def infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor, discard_image_pixels): + """Runs the supplied tensors and adds the inferred detections to the example. + + Args: + serialized_example_tensor: Serialized TF example. Scalar string tensor + detected_boxes_tensor: Detected boxes. Float tensor, + shape=[num_detections, 4] + detected_scores_tensor: Detected scores. Float tensor, + shape=[num_detections] + detected_labels_tensor: Detected labels. Int64 tensor, + shape=[num_detections] + discard_image_pixels: If true, discards the image from the result + Returns: + The de-serialized TF example augmented with the inferred detections. + """ + tf_example = tf.train.Example() + (serialized_example, detected_boxes, detected_scores, + detected_classes) = tf.get_default_session().run([ + serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor + ]) + detected_boxes = detected_boxes.T + + tf_example.ParseFromString(serialized_example) + feature = tf_example.features.feature + feature[standard_fields.TfExampleFields. + detection_score].float_list.value[:] = detected_scores + feature[standard_fields.TfExampleFields. + detection_bbox_ymin].float_list.value[:] = detected_boxes[0] + feature[standard_fields.TfExampleFields. + detection_bbox_xmin].float_list.value[:] = detected_boxes[1] + feature[standard_fields.TfExampleFields. + detection_bbox_ymax].float_list.value[:] = detected_boxes[2] + feature[standard_fields.TfExampleFields. + detection_bbox_xmax].float_list.value[:] = detected_boxes[3] + feature[standard_fields.TfExampleFields. + detection_class_label].int64_list.value[:] = detected_classes + + if discard_image_pixels: + del feature[standard_fields.TfExampleFields.image_encoded] + + return tf_example diff --git a/models/research/object_detection/inference/detection_inference_tf1_test.py b/models/research/object_detection/inference/detection_inference_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..899da1298765425c667fbcdfd341fad713724d9f --- /dev/null +++ b/models/research/object_detection/inference/detection_inference_tf1_test.py @@ -0,0 +1,177 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Tests for detection_inference.py.""" + +import os +import unittest +import numpy as np +from PIL import Image +import six +import tensorflow.compat.v1 as tf +from google.protobuf import text_format + +from object_detection.core import standard_fields +from object_detection.inference import detection_inference +from object_detection.utils import dataset_util +from object_detection.utils import tf_version + + +def get_mock_tfrecord_path(): + return os.path.join(tf.test.get_temp_dir(), 'mock.tfrec') + + +def create_mock_tfrecord(): + pil_image = Image.fromarray(np.array([[[123, 0, 0]]], dtype=np.uint8), 'RGB') + image_output_stream = six.BytesIO() + pil_image.save(image_output_stream, format='png') + encoded_image = image_output_stream.getvalue() + + feature_map = { + 'test_field': + dataset_util.float_list_feature([1, 2, 3, 4]), + standard_fields.TfExampleFields.image_encoded: + dataset_util.bytes_feature(encoded_image), + } + + tf_example = tf.train.Example(features=tf.train.Features(feature=feature_map)) + with tf.python_io.TFRecordWriter(get_mock_tfrecord_path()) as writer: + writer.write(tf_example.SerializeToString()) + return encoded_image + + +def get_mock_graph_path(): + return os.path.join(tf.test.get_temp_dir(), 'mock_graph.pb') + + +def create_mock_graph(): + g = tf.Graph() + with g.as_default(): + in_image_tensor = tf.placeholder( + tf.uint8, shape=[1, None, None, 3], name='image_tensor') + tf.constant([2.0], name='num_detections') + tf.constant( + [[[0, 0.8, 0.7, 1], [0.1, 0.2, 0.8, 0.9], [0.2, 0.3, 0.4, 0.5]]], + name='detection_boxes') + tf.constant([[0.1, 0.2, 0.3]], name='detection_scores') + tf.identity( + tf.constant([[1.0, 2.0, 3.0]]) * + tf.reduce_sum(tf.cast(in_image_tensor, dtype=tf.float32)), + name='detection_classes') + graph_def = g.as_graph_def() + + with tf.gfile.Open(get_mock_graph_path(), 'w') as fl: + fl.write(graph_def.SerializeToString()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class InferDetectionsTests(tf.test.TestCase): + + def test_simple(self): + create_mock_graph() + encoded_image = create_mock_tfrecord() + + serialized_example_tensor, image_tensor = detection_inference.build_input( + [get_mock_tfrecord_path()]) + self.assertAllEqual(image_tensor.get_shape().as_list(), [1, None, None, 3]) + + (detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor) = detection_inference.build_inference_graph( + image_tensor, get_mock_graph_path()) + + with self.test_session(use_gpu=False) as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + tf.train.start_queue_runners() + + tf_example = detection_inference.infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, + detected_scores_tensor, detected_labels_tensor, False) + expected_example = tf.train.Example() + text_format.Merge(r""" + features { + feature { + key: "image/detection/bbox/ymin" + value { float_list { value: [0.0, 0.1] } } } + feature { + key: "image/detection/bbox/xmin" + value { float_list { value: [0.8, 0.2] } } } + feature { + key: "image/detection/bbox/ymax" + value { float_list { value: [0.7, 0.8] } } } + feature { + key: "image/detection/bbox/xmax" + value { float_list { value: [1.0, 0.9] } } } + feature { + key: "image/detection/label" + value { int64_list { value: [123, 246] } } } + feature { + key: "image/detection/score" + value { float_list { value: [0.1, 0.2] } } } + feature { + key: "test_field" + value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }""", + expected_example) + expected_example.features.feature[ + standard_fields.TfExampleFields + .image_encoded].CopyFrom(dataset_util.bytes_feature(encoded_image)) + self.assertProtoEquals(expected_example, tf_example) + + def test_discard_image(self): + create_mock_graph() + create_mock_tfrecord() + + serialized_example_tensor, image_tensor = detection_inference.build_input( + [get_mock_tfrecord_path()]) + (detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor) = detection_inference.build_inference_graph( + image_tensor, get_mock_graph_path()) + + with self.test_session(use_gpu=False) as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + tf.train.start_queue_runners() + + tf_example = detection_inference.infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, + detected_scores_tensor, detected_labels_tensor, True) + + self.assertProtoEquals(r""" + features { + feature { + key: "image/detection/bbox/ymin" + value { float_list { value: [0.0, 0.1] } } } + feature { + key: "image/detection/bbox/xmin" + value { float_list { value: [0.8, 0.2] } } } + feature { + key: "image/detection/bbox/ymax" + value { float_list { value: [0.7, 0.8] } } } + feature { + key: "image/detection/bbox/xmax" + value { float_list { value: [1.0, 0.9] } } } + feature { + key: "image/detection/label" + value { int64_list { value: [123, 246] } } } + feature { + key: "image/detection/score" + value { float_list { value: [0.1, 0.2] } } } + feature { + key: "test_field" + value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } } + """, tf_example) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/inference/infer_detections.py b/models/research/object_detection/inference/infer_detections.py new file mode 100644 index 0000000000000000000000000000000000000000..3579142fc9add1c439164f0d938d4efe9089c692 --- /dev/null +++ b/models/research/object_detection/inference/infer_detections.py @@ -0,0 +1,96 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Infers detections on a TFRecord of TFExamples given an inference graph. + +Example usage: + ./infer_detections \ + --input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \ + --output_tfrecord_path_prefix=/path/to/output/detections.tfrecord \ + --inference_graph=/path/to/frozen_weights_inference_graph.pb + +The output is a TFRecord of TFExamples. Each TFExample from the input is first +augmented with detections from the inference graph and then copied to the +output. + +The input and output nodes of the inference graph are expected to have the same +types, shapes, and semantics, as the input and output nodes of graphs produced +by export_inference_graph.py, when run with --input_type=image_tensor. + +The script can also discard the image pixels in the output. This greatly +reduces the output size and can potentially accelerate reading data in +subsequent processing steps that don't require the images (e.g. computing +metrics). +""" + +import itertools +import tensorflow.compat.v1 as tf +from object_detection.inference import detection_inference + +tf.flags.DEFINE_string('input_tfrecord_paths', None, + 'A comma separated list of paths to input TFRecords.') +tf.flags.DEFINE_string('output_tfrecord_path', None, + 'Path to the output TFRecord.') +tf.flags.DEFINE_string('inference_graph', None, + 'Path to the inference graph with embedded weights.') +tf.flags.DEFINE_boolean('discard_image_pixels', False, + 'Discards the images in the output TFExamples. This' + ' significantly reduces the output size and is useful' + ' if the subsequent tools don\'t need access to the' + ' images (e.g. when computing evaluation measures).') + +FLAGS = tf.flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + + required_flags = ['input_tfrecord_paths', 'output_tfrecord_path', + 'inference_graph'] + for flag_name in required_flags: + if not getattr(FLAGS, flag_name): + raise ValueError('Flag --{} is required'.format(flag_name)) + + with tf.Session() as sess: + input_tfrecord_paths = [ + v for v in FLAGS.input_tfrecord_paths.split(',') if v] + tf.logging.info('Reading input from %d files', len(input_tfrecord_paths)) + serialized_example_tensor, image_tensor = detection_inference.build_input( + input_tfrecord_paths) + tf.logging.info('Reading graph and building model...') + (detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor) = detection_inference.build_inference_graph( + image_tensor, FLAGS.inference_graph) + + tf.logging.info('Running inference and writing output to {}'.format( + FLAGS.output_tfrecord_path)) + sess.run(tf.local_variables_initializer()) + tf.train.start_queue_runners() + with tf.python_io.TFRecordWriter( + FLAGS.output_tfrecord_path) as tf_record_writer: + try: + for counter in itertools.count(): + tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10, + counter) + tf_example = detection_inference.infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, + detected_scores_tensor, detected_labels_tensor, + FLAGS.discard_image_pixels) + tf_record_writer.write(tf_example.SerializeToString()) + except tf.errors.OutOfRangeError: + tf.logging.info('Finished processing records') + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/inputs.py b/models/research/object_detection/inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..a3eb2f0bd2514a723442a568e124a579eb801794 --- /dev/null +++ b/models/research/object_detection/inputs.py @@ -0,0 +1,1065 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model input function for tf-learn object detection model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import tensorflow.compat.v1 as tf +from object_detection.builders import dataset_builder +from object_detection.builders import image_resizer_builder +from object_detection.builders import model_builder +from object_detection.builders import preprocessor_builder +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.protos import eval_pb2 +from object_detection.protos import image_resizer_pb2 +from object_detection.protos import input_reader_pb2 +from object_detection.protos import model_pb2 +from object_detection.protos import train_pb2 +from object_detection.utils import config_util +from object_detection.utils import ops as util_ops +from object_detection.utils import shape_utils + +HASH_KEY = 'hash' +HASH_BINS = 1 << 31 +SERVING_FED_EXAMPLE_KEY = 'serialized_example' +_LABEL_OFFSET = 1 + +# A map of names to methods that help build the input pipeline. +INPUT_BUILDER_UTIL_MAP = { + 'dataset_build': dataset_builder.build, + 'model_build': model_builder.build, +} + + +def _multiclass_scores_or_one_hot_labels(multiclass_scores, + groundtruth_boxes, + groundtruth_classes, num_classes): + """Returns one-hot encoding of classes when multiclass_scores is empty.""" + # Replace groundtruth_classes tensor with multiclass_scores tensor when its + # non-empty. If multiclass_scores is empty fall back on groundtruth_classes + # tensor. + def true_fn(): + return tf.reshape(multiclass_scores, + [tf.shape(groundtruth_boxes)[0], num_classes]) + def false_fn(): + return tf.one_hot(groundtruth_classes, num_classes) + return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn) + + +def _convert_labeled_classes_to_k_hot(groundtruth_labeled_classes, num_classes): + """Returns k-hot encoding of the labeled classes.""" + + # If the input labeled_classes is empty, it assumes all classes are + # exhaustively labeled, thus returning an all-one encoding. + def true_fn(): + return tf.sparse_to_dense( + groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes], + tf.constant(1, dtype=tf.float32), + validate_indices=False) + + def false_fn(): + return tf.ones(num_classes, dtype=tf.float32) + + return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn) + + +def _remove_unrecognized_classes(class_ids, unrecognized_label): + """Returns class ids with unrecognized classes filtered out.""" + + recognized_indices = tf.where(tf.greater(class_ids, unrecognized_label)) + return tf.gather(class_ids, recognized_indices) + + +def assert_or_prune_invalid_boxes(boxes): + """Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin). + + When the hardware supports assertions, the function raises an error when + boxes have an invalid size. If assertions are not supported (e.g. on TPU), + boxes with invalid sizes are filtered out. + + Args: + boxes: float tensor of shape [num_boxes, 4] + + Returns: + boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes + filtered out. + + Raises: + tf.errors.InvalidArgumentError: When we detect boxes with invalid size. + This is not supported on TPUs. + """ + + ymin, xmin, ymax, xmax = tf.split( + boxes, num_or_size_splits=4, axis=1) + + height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax]) + width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax]) + + with tf.control_dependencies([height_check, width_check]): + boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1) + boxlist = box_list.BoxList(boxes_tensor) + # TODO(b/149221748) Remove pruning when XLA supports assertions. + boxlist = box_list_ops.prune_small_boxes(boxlist, 0) + + return boxlist.get() + + +def transform_input_data(tensor_dict, + model_preprocess_fn, + image_resizer_fn, + num_classes, + data_augmentation_fn=None, + merge_multiple_boxes=False, + retain_original_image=False, + use_multiclass_scores=False, + use_bfloat16=False, + retain_original_image_additional_channels=False, + keypoint_type_weight=None): + """A single function that is responsible for all input data transformations. + + Data transformation functions are applied in the following order. + 1. If key fields.InputDataFields.image_additional_channels is present in + tensor_dict, the additional channels will be merged into + fields.InputDataFields.image. + 2. data_augmentation_fn (optional): applied on tensor_dict. + 3. model_preprocess_fn: applied only on image tensor in tensor_dict. + 4. keypoint_type_weight (optional): If groundtruth keypoints are in + the tensor dictionary, per-keypoint weights are produced. These weights are + initialized by `keypoint_type_weight` (or ones if left None). + Then, for all keypoints that are not visible, the weights are set to 0 (to + avoid penalizing the model in a loss function). + 5. image_resizer_fn: applied on original image and instance mask tensor in + tensor_dict. + 6. one_hot_encoding: applied to classes tensor in tensor_dict. + 7. merge_multiple_boxes (optional): when groundtruth boxes are exactly the + same they can be merged into a single box with an associated k-hot class + label. + + Args: + tensor_dict: dictionary containing input tensors keyed by + fields.InputDataFields. + model_preprocess_fn: model's preprocess function to apply on image tensor. + This function must take in a 4-D float tensor and return a 4-D preprocess + float tensor and a tensor containing the true image shape. + image_resizer_fn: image resizer function to apply on groundtruth instance + `masks. This function must take a 3-D float tensor of an image and a 3-D + tensor of instance masks and return a resized version of these along with + the true shapes. + num_classes: number of max classes to one-hot (or k-hot) encode the class + labels. + data_augmentation_fn: (optional) data augmentation function to apply on + input `tensor_dict`. + merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes + and classes for a given image if the boxes are exactly the same. + retain_original_image: (optional) whether to retain original image in the + output dictionary. + use_multiclass_scores: whether to use multiclass scores as class targets + instead of one-hot encoding of `groundtruth_classes`. When + this is True and multiclass_scores is empty, one-hot encoding of + `groundtruth_classes` is used as a fallback. + use_bfloat16: (optional) a bool, whether to use bfloat16 in training. + retain_original_image_additional_channels: (optional) Whether to retain + original image additional channels in the output dictionary. + keypoint_type_weight: A list (of length num_keypoints) containing + groundtruth loss weights to use for each keypoint. If None, will use a + weight of 1. + + Returns: + A dictionary keyed by fields.InputDataFields containing the tensors obtained + after applying all the transformations. + + Raises: + KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes + are provided by the decoder in tensor_dict since both fields are + considered to contain the same information. + """ + out_tensor_dict = tensor_dict.copy() + + labeled_classes_field = fields.InputDataFields.groundtruth_labeled_classes + image_classes_field = fields.InputDataFields.groundtruth_image_classes + if (labeled_classes_field in out_tensor_dict and + image_classes_field in out_tensor_dict): + raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes' + 'are provided by the decoder, but only one should be set.') + + if labeled_classes_field in out_tensor_dict: + # tf_example_decoder casts unrecognized labels to -1. Remove these + # unrecognized labels before converting labeled_classes to k-hot vector. + out_tensor_dict[labeled_classes_field] = _remove_unrecognized_classes( + out_tensor_dict[labeled_classes_field], unrecognized_label=-1) + out_tensor_dict[labeled_classes_field] = _convert_labeled_classes_to_k_hot( + out_tensor_dict[labeled_classes_field], num_classes) + + if image_classes_field in out_tensor_dict: + out_tensor_dict[labeled_classes_field] = _convert_labeled_classes_to_k_hot( + out_tensor_dict[image_classes_field], num_classes) + + if fields.InputDataFields.multiclass_scores in out_tensor_dict: + out_tensor_dict[ + fields.InputDataFields + .multiclass_scores] = _multiclass_scores_or_one_hot_labels( + out_tensor_dict[fields.InputDataFields.multiclass_scores], + out_tensor_dict[fields.InputDataFields.groundtruth_boxes], + out_tensor_dict[fields.InputDataFields.groundtruth_classes], + num_classes) + + if fields.InputDataFields.groundtruth_boxes in out_tensor_dict: + out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates( + out_tensor_dict) + out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict) + + if retain_original_image: + out_tensor_dict[fields.InputDataFields.original_image] = tf.cast( + image_resizer_fn(out_tensor_dict[fields.InputDataFields.image], + None)[0], tf.uint8) + + if fields.InputDataFields.image_additional_channels in out_tensor_dict: + channels = out_tensor_dict[fields.InputDataFields.image_additional_channels] + out_tensor_dict[fields.InputDataFields.image] = tf.concat( + [out_tensor_dict[fields.InputDataFields.image], channels], axis=2) + if retain_original_image_additional_channels: + out_tensor_dict[ + fields.InputDataFields.image_additional_channels] = tf.cast( + image_resizer_fn(channels, None)[0], tf.uint8) + + # Apply data augmentation ops. + if data_augmentation_fn is not None: + out_tensor_dict = data_augmentation_fn(out_tensor_dict) + + # Apply model preprocessing ops and resize instance masks. + image = out_tensor_dict[fields.InputDataFields.image] + preprocessed_resized_image, true_image_shape = model_preprocess_fn( + tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0)) + + preprocessed_shape = tf.shape(preprocessed_resized_image) + new_height, new_width = preprocessed_shape[1], preprocessed_shape[2] + + im_box = tf.stack([ + 0.0, 0.0, + tf.to_float(new_height) / tf.to_float(true_image_shape[0, 0]), + tf.to_float(new_width) / tf.to_float(true_image_shape[0, 1]) + ]) + + if fields.InputDataFields.groundtruth_boxes in tensor_dict: + bboxes = out_tensor_dict[fields.InputDataFields.groundtruth_boxes] + boxlist = box_list.BoxList(bboxes) + realigned_bboxes = box_list_ops.change_coordinate_frame(boxlist, im_box) + + realigned_boxes_tensor = realigned_bboxes.get() + valid_boxes_tensor = assert_or_prune_invalid_boxes(realigned_boxes_tensor) + out_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] = valid_boxes_tensor + + if fields.InputDataFields.groundtruth_keypoints in tensor_dict: + keypoints = out_tensor_dict[fields.InputDataFields.groundtruth_keypoints] + realigned_keypoints = keypoint_ops.change_coordinate_frame(keypoints, + im_box) + out_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] = realigned_keypoints + flds_gt_kpt = fields.InputDataFields.groundtruth_keypoints + flds_gt_kpt_vis = fields.InputDataFields.groundtruth_keypoint_visibilities + flds_gt_kpt_weights = fields.InputDataFields.groundtruth_keypoint_weights + if flds_gt_kpt_vis not in out_tensor_dict: + out_tensor_dict[flds_gt_kpt_vis] = tf.ones_like( + out_tensor_dict[flds_gt_kpt][:, :, 0], + dtype=tf.bool) + out_tensor_dict[flds_gt_kpt_weights] = ( + keypoint_ops.keypoint_weights_from_visibilities( + out_tensor_dict[flds_gt_kpt_vis], + keypoint_type_weight)) + + if use_bfloat16: + preprocessed_resized_image = tf.cast( + preprocessed_resized_image, tf.bfloat16) + if fields.InputDataFields.context_features in out_tensor_dict: + out_tensor_dict[fields.InputDataFields.context_features] = tf.cast( + out_tensor_dict[fields.InputDataFields.context_features], tf.bfloat16) + out_tensor_dict[fields.InputDataFields.image] = tf.squeeze( + preprocessed_resized_image, axis=0) + out_tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze( + true_image_shape, axis=0) + if fields.InputDataFields.groundtruth_instance_masks in out_tensor_dict: + masks = out_tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + _, resized_masks, _ = image_resizer_fn(image, masks) + if use_bfloat16: + resized_masks = tf.cast(resized_masks, tf.bfloat16) + out_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] = resized_masks + + zero_indexed_groundtruth_classes = out_tensor_dict[ + fields.InputDataFields.groundtruth_classes] - _LABEL_OFFSET + if use_multiclass_scores: + out_tensor_dict[ + fields.InputDataFields.groundtruth_classes] = out_tensor_dict[ + fields.InputDataFields.multiclass_scores] + else: + out_tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( + zero_indexed_groundtruth_classes, num_classes) + out_tensor_dict.pop(fields.InputDataFields.multiclass_scores, None) + + if fields.InputDataFields.groundtruth_confidences in out_tensor_dict: + groundtruth_confidences = out_tensor_dict[ + fields.InputDataFields.groundtruth_confidences] + # Map the confidences to the one-hot encoding of classes + out_tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( + tf.reshape(groundtruth_confidences, [-1, 1]) * + out_tensor_dict[fields.InputDataFields.groundtruth_classes]) + else: + groundtruth_confidences = tf.ones_like( + zero_indexed_groundtruth_classes, dtype=tf.float32) + out_tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( + out_tensor_dict[fields.InputDataFields.groundtruth_classes]) + + if merge_multiple_boxes: + merged_boxes, merged_classes, merged_confidences, _ = ( + util_ops.merge_boxes_with_multiple_labels( + out_tensor_dict[fields.InputDataFields.groundtruth_boxes], + zero_indexed_groundtruth_classes, + groundtruth_confidences, + num_classes)) + merged_classes = tf.cast(merged_classes, tf.float32) + out_tensor_dict[fields.InputDataFields.groundtruth_boxes] = merged_boxes + out_tensor_dict[fields.InputDataFields.groundtruth_classes] = merged_classes + out_tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( + merged_confidences) + if fields.InputDataFields.groundtruth_boxes in out_tensor_dict: + out_tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.shape( + out_tensor_dict[fields.InputDataFields.groundtruth_boxes])[0] + + return out_tensor_dict + + +def pad_input_data_to_static_shapes(tensor_dict, + max_num_boxes, + num_classes, + spatial_image_shape=None, + max_num_context_features=None, + context_feature_length=None): + """Pads input tensors to static shapes. + + In case num_additional_channels > 0, we assume that the additional channels + have already been concatenated to the base image. + + Args: + tensor_dict: Tensor dictionary of input data + max_num_boxes: Max number of groundtruth boxes needed to compute shapes for + padding. + num_classes: Number of classes in the dataset needed to compute shapes for + padding. + spatial_image_shape: A list of two integers of the form [height, width] + containing expected spatial shape of the image. + max_num_context_features (optional): The maximum number of context + features needed to compute shapes padding. + context_feature_length (optional): The length of the context feature. + + Returns: + A dictionary keyed by fields.InputDataFields containing padding shapes for + tensors in the dataset. + + Raises: + ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we + detect that additional channels have not been concatenated yet, or if + max_num_context_features is not specified and context_features is in the + tensor dict. + """ + + if not spatial_image_shape or spatial_image_shape == [-1, -1]: + height, width = None, None + else: + height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence + + num_additional_channels = 0 + if fields.InputDataFields.image_additional_channels in tensor_dict: + num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[ + fields.InputDataFields.image_additional_channels].shape[2]) + + # We assume that if num_additional_channels > 0, then it has already been + # concatenated to the base image (but not the ground truth). + num_channels = 3 + if fields.InputDataFields.image in tensor_dict: + num_channels = shape_utils.get_dim_as_int( + tensor_dict[fields.InputDataFields.image].shape[2]) + + if num_additional_channels: + if num_additional_channels >= num_channels: + raise ValueError( + 'Image must be already concatenated with additional channels.') + + if (fields.InputDataFields.original_image in tensor_dict and + shape_utils.get_dim_as_int( + tensor_dict[fields.InputDataFields.original_image].shape[2]) == + num_channels): + raise ValueError( + 'Image must be already concatenated with additional channels.') + + if fields.InputDataFields.context_features in tensor_dict and ( + max_num_context_features is None): + raise ValueError('max_num_context_features must be specified in the model ' + 'config if include_context is specified in the input ' + 'config') + + padding_shapes = { + fields.InputDataFields.image: [height, width, num_channels], + fields.InputDataFields.original_image_spatial_shape: [2], + fields.InputDataFields.image_additional_channels: [ + height, width, num_additional_channels + ], + fields.InputDataFields.source_id: [], + fields.InputDataFields.filename: [], + fields.InputDataFields.key: [], + fields.InputDataFields.groundtruth_difficult: [max_num_boxes], + fields.InputDataFields.groundtruth_boxes: [max_num_boxes, 4], + fields.InputDataFields.groundtruth_classes: [max_num_boxes, num_classes], + fields.InputDataFields.groundtruth_instance_masks: [ + max_num_boxes, height, width + ], + fields.InputDataFields.groundtruth_is_crowd: [max_num_boxes], + fields.InputDataFields.groundtruth_group_of: [max_num_boxes], + fields.InputDataFields.groundtruth_area: [max_num_boxes], + fields.InputDataFields.groundtruth_weights: [max_num_boxes], + fields.InputDataFields.groundtruth_confidences: [ + max_num_boxes, num_classes + ], + fields.InputDataFields.num_groundtruth_boxes: [], + fields.InputDataFields.groundtruth_label_types: [max_num_boxes], + fields.InputDataFields.groundtruth_label_weights: [max_num_boxes], + fields.InputDataFields.true_image_shape: [3], + fields.InputDataFields.groundtruth_image_classes: [num_classes], + fields.InputDataFields.groundtruth_image_confidences: [num_classes], + fields.InputDataFields.groundtruth_labeled_classes: [num_classes], + } + + if fields.InputDataFields.original_image in tensor_dict: + padding_shapes[fields.InputDataFields.original_image] = [ + height, width, + shape_utils.get_dim_as_int(tensor_dict[fields.InputDataFields. + original_image].shape[2]) + ] + if fields.InputDataFields.groundtruth_keypoints in tensor_dict: + tensor_shape = ( + tensor_dict[fields.InputDataFields.groundtruth_keypoints].shape) + padding_shape = [max_num_boxes, + shape_utils.get_dim_as_int(tensor_shape[1]), + shape_utils.get_dim_as_int(tensor_shape[2])] + padding_shapes[fields.InputDataFields.groundtruth_keypoints] = padding_shape + if fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict: + tensor_shape = tensor_dict[fields.InputDataFields. + groundtruth_keypoint_visibilities].shape + padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] + padding_shapes[fields.InputDataFields. + groundtruth_keypoint_visibilities] = padding_shape + + if fields.InputDataFields.groundtruth_keypoint_weights in tensor_dict: + tensor_shape = ( + tensor_dict[fields.InputDataFields.groundtruth_keypoint_weights].shape) + padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] + padding_shapes[fields.InputDataFields. + groundtruth_keypoint_weights] = padding_shape + + # Prepare for ContextRCNN related fields. + if fields.InputDataFields.context_features in tensor_dict: + padding_shape = [max_num_context_features, context_feature_length] + padding_shapes[fields.InputDataFields.context_features] = padding_shape + + tensor_shape = tf.shape( + tensor_dict[fields.InputDataFields.context_features]) + tensor_dict[fields.InputDataFields.valid_context_size] = tensor_shape[0] + padding_shapes[fields.InputDataFields.valid_context_size] = [] + if fields.InputDataFields.context_feature_length in tensor_dict: + padding_shapes[fields.InputDataFields.context_feature_length] = [] + + if fields.InputDataFields.is_annotated in tensor_dict: + padding_shapes[fields.InputDataFields.is_annotated] = [] + + padded_tensor_dict = {} + for tensor_name in tensor_dict: + padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd( + tensor_dict[tensor_name], padding_shapes[tensor_name]) + + # Make sure that the number of groundtruth boxes now reflects the + # padded/clipped tensors. + if fields.InputDataFields.num_groundtruth_boxes in padded_tensor_dict: + padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = ( + tf.minimum( + padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + max_num_boxes)) + return padded_tensor_dict + + +def augment_input_data(tensor_dict, data_augmentation_options): + """Applies data augmentation ops to input tensors. + + Args: + tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. + data_augmentation_options: A list of tuples, where each tuple contains a + function and a dictionary that contains arguments and their values. + Usually, this is the output of core/preprocessor.build. + + Returns: + A dictionary of tensors obtained by applying data augmentation ops to the + input tensor dictionary. + """ + tensor_dict[fields.InputDataFields.image] = tf.expand_dims( + tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0) + + include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks + in tensor_dict) + include_keypoints = (fields.InputDataFields.groundtruth_keypoints + in tensor_dict) + include_keypoint_visibilities = ( + fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict) + include_label_weights = (fields.InputDataFields.groundtruth_weights + in tensor_dict) + include_label_confidences = (fields.InputDataFields.groundtruth_confidences + in tensor_dict) + include_multiclass_scores = (fields.InputDataFields.multiclass_scores in + tensor_dict) + tensor_dict = preprocessor.preprocess( + tensor_dict, data_augmentation_options, + func_arg_map=preprocessor.get_default_func_arg_map( + include_label_weights=include_label_weights, + include_label_confidences=include_label_confidences, + include_multiclass_scores=include_multiclass_scores, + include_instance_masks=include_instance_masks, + include_keypoints=include_keypoints, + include_keypoint_visibilities=include_keypoint_visibilities)) + tensor_dict[fields.InputDataFields.image] = tf.squeeze( + tensor_dict[fields.InputDataFields.image], axis=0) + return tensor_dict + + +def _get_labels_dict(input_dict): + """Extracts labels dict from input dict.""" + required_label_keys = [ + fields.InputDataFields.num_groundtruth_boxes, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + fields.InputDataFields.groundtruth_weights, + ] + labels_dict = {} + for key in required_label_keys: + labels_dict[key] = input_dict[key] + + optional_label_keys = [ + fields.InputDataFields.groundtruth_confidences, + fields.InputDataFields.groundtruth_labeled_classes, + fields.InputDataFields.groundtruth_keypoints, + fields.InputDataFields.groundtruth_instance_masks, + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_group_of, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_keypoint_visibilities, + fields.InputDataFields.groundtruth_keypoint_weights, + ] + + for key in optional_label_keys: + if key in input_dict: + labels_dict[key] = input_dict[key] + if fields.InputDataFields.groundtruth_difficult in labels_dict: + labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( + labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) + return labels_dict + + +def _replace_empty_string_with_random_number(string_tensor): + """Returns string unchanged if non-empty, and random string tensor otherwise. + + The random string is an integer 0 and 2**63 - 1, casted as string. + + + Args: + string_tensor: A tf.tensor of dtype string. + + Returns: + out_string: A tf.tensor of dtype string. If string_tensor contains the empty + string, out_string will contain a random integer casted to a string. + Otherwise string_tensor is returned unchanged. + + """ + + empty_string = tf.constant('', dtype=tf.string, name='EmptyString') + + random_source_id = tf.as_string( + tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64)) + + out_string = tf.cond( + tf.equal(string_tensor, empty_string), + true_fn=lambda: random_source_id, + false_fn=lambda: string_tensor) + + return out_string + + +def _get_features_dict(input_dict, include_source_id=False): + """Extracts features dict from input dict.""" + + source_id = _replace_empty_string_with_random_number( + input_dict[fields.InputDataFields.source_id]) + + hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS) + features = { + fields.InputDataFields.image: + input_dict[fields.InputDataFields.image], + HASH_KEY: tf.cast(hash_from_source_id, tf.int32), + fields.InputDataFields.true_image_shape: + input_dict[fields.InputDataFields.true_image_shape], + fields.InputDataFields.original_image_spatial_shape: + input_dict[fields.InputDataFields.original_image_spatial_shape] + } + if include_source_id: + features[fields.InputDataFields.source_id] = source_id + if fields.InputDataFields.original_image in input_dict: + features[fields.InputDataFields.original_image] = input_dict[ + fields.InputDataFields.original_image] + if fields.InputDataFields.image_additional_channels in input_dict: + features[fields.InputDataFields.image_additional_channels] = input_dict[ + fields.InputDataFields.image_additional_channels] + if fields.InputDataFields.context_features in input_dict: + features[fields.InputDataFields.context_features] = input_dict[ + fields.InputDataFields.context_features] + if fields.InputDataFields.valid_context_size in input_dict: + features[fields.InputDataFields.valid_context_size] = input_dict[ + fields.InputDataFields.valid_context_size] + return features + + +def create_train_input_fn(train_config, train_input_config, + model_config): + """Creates a train `input` function for `Estimator`. + + Args: + train_config: A train_pb2.TrainConfig. + train_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + + Returns: + `input_fn` for `Estimator` in TRAIN mode. + """ + + def _train_input_fn(params=None): + return train_input(train_config, train_input_config, model_config, + params=params) + + return _train_input_fn + + +def train_input(train_config, train_input_config, + model_config, model=None, params=None, input_context=None): + """Returns `features` and `labels` tensor dictionaries for training. + + Args: + train_config: A train_pb2.TrainConfig. + train_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + model: A pre-constructed Detection Model. + If None, one will be created from the config. + params: Parameter dictionary passed from the estimator. + input_context: optional, A tf.distribute.InputContext object used to + shard filenames and compute per-replica batch_size when this function + is being called per-replica. + + Returns: + A tf.data.Dataset that holds (features, labels) tuple. + + features: Dictionary of feature tensors. + features[fields.InputDataFields.image] is a [batch_size, H, W, C] + float32 tensor with preprocessed images. + features[HASH_KEY] is a [batch_size] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] (optional) is a + [batch_size, H, W, C] float32 tensor with original images. + labels: Dictionary of groundtruth tensors. + labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] + int32 tensor indicating the number of groundtruth boxes. + labels[fields.InputDataFields.groundtruth_boxes] is a + [batch_size, num_boxes, 4] float32 tensor containing the corners of + the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a + [batch_size, num_boxes, num_classes] float32 one-hot tensor of + classes. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes] float32 tensor containing groundtruth weights + for the boxes. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + [batch_size, num_boxes, H, W] float32 tensor containing only binary + values, which represent instance masks for objects. + labels[fields.InputDataFields.groundtruth_keypoints] is a + [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing + keypoints for each box. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes, num_keypoints] float32 tensor containing + groundtruth weights for the keypoints. + labels[fields.InputDataFields.groundtruth_visibilities] is a + [batch_size, num_boxes, num_keypoints] bool tensor containing + groundtruth visibilities for each keypoint. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a + [batch_size, num_classes] float32 k-hot tensor of classes. + + Raises: + TypeError: if the `train_config`, `train_input_config` or `model_config` + are not of the correct type. + """ + if not isinstance(train_config, train_pb2.TrainConfig): + raise TypeError('For training mode, the `train_config` must be a ' + 'train_pb2.TrainConfig.') + if not isinstance(train_input_config, input_reader_pb2.InputReader): + raise TypeError('The `train_input_config` must be a ' + 'input_reader_pb2.InputReader.') + if not isinstance(model_config, model_pb2.DetectionModel): + raise TypeError('The `model_config` must be a ' + 'model_pb2.DetectionModel.') + + if model is None: + model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( + model_config, is_training=True).preprocess + else: + model_preprocess_fn = model.preprocess + + num_classes = config_util.get_number_of_classes(model_config) + + def transform_and_pad_input_data_fn(tensor_dict): + """Combines transform and pad operation.""" + data_augmentation_options = [ + preprocessor_builder.build(step) + for step in train_config.data_augmentation_options + ] + data_augmentation_fn = functools.partial( + augment_input_data, + data_augmentation_options=data_augmentation_options) + + image_resizer_config = config_util.get_image_resizer_config(model_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + keypoint_type_weight = train_input_config.keypoint_type_weight or None + transform_data_fn = functools.partial( + transform_input_data, model_preprocess_fn=model_preprocess_fn, + image_resizer_fn=image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=data_augmentation_fn, + merge_multiple_boxes=train_config.merge_multiple_label_boxes, + retain_original_image=train_config.retain_original_images, + use_multiclass_scores=train_config.use_multiclass_scores, + use_bfloat16=train_config.use_bfloat16, + keypoint_type_weight=keypoint_type_weight) + + tensor_dict = pad_input_data_to_static_shapes( + tensor_dict=transform_data_fn(tensor_dict), + max_num_boxes=train_input_config.max_number_of_boxes, + num_classes=num_classes, + spatial_image_shape=config_util.get_spatial_image_size( + image_resizer_config), + max_num_context_features=config_util.get_max_num_context_features( + model_config), + context_feature_length=config_util.get_context_feature_length( + model_config)) + include_source_id = train_input_config.include_source_id + return (_get_features_dict(tensor_dict, include_source_id), + _get_labels_dict(tensor_dict)) + reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True) + + dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( + train_input_config, + transform_input_data_fn=transform_and_pad_input_data_fn, + batch_size=params['batch_size'] if params else train_config.batch_size, + input_context=input_context, + reduce_to_frame_fn=reduce_to_frame_fn) + return dataset + + +def create_eval_input_fn(eval_config, eval_input_config, model_config): + """Creates an eval `input` function for `Estimator`. + + Args: + eval_config: An eval_pb2.EvalConfig. + eval_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + + Returns: + `input_fn` for `Estimator` in EVAL mode. + """ + + def _eval_input_fn(params=None): + return eval_input(eval_config, eval_input_config, model_config, + params=params) + + return _eval_input_fn + + +def eval_input(eval_config, eval_input_config, model_config, + model=None, params=None): + """Returns `features` and `labels` tensor dictionaries for evaluation. + + Args: + eval_config: An eval_pb2.EvalConfig. + eval_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + model: A pre-constructed Detection Model. + If None, one will be created from the config. + params: Parameter dictionary passed from the estimator. + + Returns: + A tf.data.Dataset that holds (features, labels) tuple. + + features: Dictionary of feature tensors. + features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor + with preprocessed images. + features[HASH_KEY] is a [1] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [1, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] is a [1, H', W', C] + float32 tensor with the original image. + labels: Dictionary of groundtruth tensors. + labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4] + float32 tensor containing the corners of the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a + [num_boxes, num_classes] float32 one-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes] + float32 tensor containing object areas. + labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes] + bool tensor indicating if the boxes enclose a crowd. + labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes] + int32 tensor indicating if the boxes represent difficult instances. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + [1, num_boxes, H, W] float32 tensor containing only binary values, + which represent instance masks for objects. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes, num_keypoints] float32 tensor containing + groundtruth weights for the keypoints. + labels[fields.InputDataFields.groundtruth_visibilities] is a + [batch_size, num_boxes, num_keypoints] bool tensor containing + groundtruth visibilities for each keypoint. + labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes] + bool tensor indicating if the box covers more than 5 instances of the + same class which heavily occlude each other. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a + [num_boxes, num_classes] float32 k-hot tensor of classes. + + Raises: + TypeError: if the `eval_config`, `eval_input_config` or `model_config` + are not of the correct type. + """ + params = params or {} + if not isinstance(eval_config, eval_pb2.EvalConfig): + raise TypeError('For eval mode, the `eval_config` must be a ' + 'train_pb2.EvalConfig.') + if not isinstance(eval_input_config, input_reader_pb2.InputReader): + raise TypeError('The `eval_input_config` must be a ' + 'input_reader_pb2.InputReader.') + if not isinstance(model_config, model_pb2.DetectionModel): + raise TypeError('The `model_config` must be a ' + 'model_pb2.DetectionModel.') + + if eval_config.force_no_resize: + arch = model_config.WhichOneof('model') + arch_config = getattr(model_config, arch) + image_resizer_proto = image_resizer_pb2.ImageResizer() + image_resizer_proto.identity_resizer.CopyFrom( + image_resizer_pb2.IdentityResizer()) + arch_config.image_resizer.CopyFrom(image_resizer_proto) + + if model is None: + model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( + model_config, is_training=False).preprocess + else: + model_preprocess_fn = model.preprocess + + def transform_and_pad_input_data_fn(tensor_dict): + """Combines transform and pad operation.""" + num_classes = config_util.get_number_of_classes(model_config) + + image_resizer_config = config_util.get_image_resizer_config(model_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + keypoint_type_weight = eval_input_config.keypoint_type_weight or None + + transform_data_fn = functools.partial( + transform_input_data, model_preprocess_fn=model_preprocess_fn, + image_resizer_fn=image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=None, + retain_original_image=eval_config.retain_original_images, + retain_original_image_additional_channels= + eval_config.retain_original_image_additional_channels, + keypoint_type_weight=keypoint_type_weight) + tensor_dict = pad_input_data_to_static_shapes( + tensor_dict=transform_data_fn(tensor_dict), + max_num_boxes=eval_input_config.max_number_of_boxes, + num_classes=config_util.get_number_of_classes(model_config), + spatial_image_shape=config_util.get_spatial_image_size( + image_resizer_config), + max_num_context_features=config_util.get_max_num_context_features( + model_config), + context_feature_length=config_util.get_context_feature_length( + model_config)) + include_source_id = eval_input_config.include_source_id + return (_get_features_dict(tensor_dict, include_source_id), + _get_labels_dict(tensor_dict)) + + reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False) + + dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( + eval_input_config, + batch_size=params['batch_size'] if params else eval_config.batch_size, + transform_input_data_fn=transform_and_pad_input_data_fn, + reduce_to_frame_fn=reduce_to_frame_fn) + return dataset + + +def create_predict_input_fn(model_config, predict_input_config): + """Creates a predict `input` function for `Estimator`. + + Args: + model_config: A model_pb2.DetectionModel. + predict_input_config: An input_reader_pb2.InputReader. + + Returns: + `input_fn` for `Estimator` in PREDICT mode. + """ + + def _predict_input_fn(params=None): + """Decodes serialized tf.Examples and returns `ServingInputReceiver`. + + Args: + params: Parameter dictionary passed from the estimator. + + Returns: + `ServingInputReceiver`. + """ + del params + example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example') + + num_classes = config_util.get_number_of_classes(model_config) + model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( + model_config, is_training=False).preprocess + + image_resizer_config = config_util.get_image_resizer_config(model_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + + transform_fn = functools.partial( + transform_input_data, model_preprocess_fn=model_preprocess_fn, + image_resizer_fn=image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=None) + + decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=False, + num_additional_channels=predict_input_config.num_additional_channels) + input_dict = transform_fn(decoder.decode(example)) + images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32) + images = tf.expand_dims(images, axis=0) + true_image_shape = tf.expand_dims( + input_dict[fields.InputDataFields.true_image_shape], axis=0) + + return tf.estimator.export.ServingInputReceiver( + features={ + fields.InputDataFields.image: images, + fields.InputDataFields.true_image_shape: true_image_shape}, + receiver_tensors={SERVING_FED_EXAMPLE_KEY: example}) + + return _predict_input_fn + + +def get_reduce_to_frame_fn(input_reader_config, is_training): + """Returns a function reducing sequence tensors to single frame tensors. + + If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through + this function unchanged. Otherwise, when in training mode, a single frame is + selected at random from the sequence example, and the tensors for that frame + are converted to single frame tensors, with all associated context features. + In evaluation mode all frames are converted to single frame tensors with + copied context tensors. After the sequence example tensors are converted into + one or many single frame tensors, the images from each frame are decoded. + + Args: + input_reader_config: An input_reader_pb2.InputReader. + is_training: Whether we are in training mode. + + Returns: + `reduce_to_frame_fn` for the dataset builder + """ + if input_reader_config.input_type != ( + input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE')): + return lambda dataset, dataset_map_fn, batch_size, config: dataset + else: + def reduce_to_frame(dataset, dataset_map_fn, batch_size, + input_reader_config): + """Returns a function reducing sequence tensors to single frame tensors. + + Args: + dataset: A tf dataset containing sequence tensors. + dataset_map_fn: A function that handles whether to + map_with_legacy_function for this dataset + batch_size: used if map_with_legacy_function is true to determine + num_parallel_calls + input_reader_config: used if map_with_legacy_function is true to + determine num_parallel_calls + + Returns: + A tf dataset containing single frame tensors. + """ + if is_training: + def get_single_frame(tensor_dict): + """Returns a random frame from a sequence. + + Picks a random frame and returns slices of sequence tensors + corresponding to the random frame. Returns non-sequence tensors + unchanged. + + Args: + tensor_dict: A dictionary containing sequence tensors. + + Returns: + Tensors for a single random frame within the sequence. + """ + num_frames = tf.cast( + tf.shape(tensor_dict[fields.InputDataFields.source_id])[0], + dtype=tf.int32) + frame_index = tf.random.uniform((), minval=0, maxval=num_frames, + dtype=tf.int32) + out_tensor_dict = {} + for key in tensor_dict: + if key in fields.SEQUENCE_FIELDS: + # Slice random frame from sequence tensors + out_tensor_dict[key] = tensor_dict[key][frame_index] + else: + # Copy all context tensors. + out_tensor_dict[key] = tensor_dict[key] + return out_tensor_dict + dataset = dataset_map_fn(dataset, get_single_frame, batch_size, + input_reader_config) + else: + dataset = dataset_map_fn(dataset, util_ops.tile_context_tensors, + batch_size, input_reader_config) + dataset = dataset.unbatch() + # Decode frame here as SequenceExample tensors contain encoded images. + dataset = dataset_map_fn(dataset, util_ops.decode_image, batch_size, + input_reader_config) + return dataset + return reduce_to_frame diff --git a/models/research/object_detection/inputs_test.py b/models/research/object_detection/inputs_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1fca6538f071d11605ef1f83db24d184d3e6ab8d --- /dev/null +++ b/models/research/object_detection/inputs_test.py @@ -0,0 +1,1534 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.tflearn.inputs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import unittest +from absl import logging +from absl.testing import parameterized +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection import inputs +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.utils import config_util +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-import-not-at-top, g-importing-member + +FLAGS = tf.flags.FLAGS + + +def _get_configs_for_model(model_name): + """Returns configurations for model.""" + fname = os.path.join(tf.resource_loader.get_data_files_path(), + 'samples/configs/' + model_name + '.config') + label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), + 'data/pet_label_map.pbtxt') + data_path = os.path.join(tf.resource_loader.get_data_files_path(), + 'test_data/pets_examples.record') + configs = config_util.get_configs_from_pipeline_file(fname) + override_dict = { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + return config_util.merge_external_params_with_configs( + configs, kwargs_dict=override_dict) + + +def _get_configs_for_model_sequence_example(model_name): + """Returns configurations for model.""" + fname = os.path.join(tf.resource_loader.get_data_files_path(), + 'test_data/' + model_name + '.config') + label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), + 'data/snapshot_serengeti_label_map.pbtxt') + data_path = os.path.join( + tf.resource_loader.get_data_files_path(), + 'test_data/snapshot_serengeti_sequence_examples.record') + configs = config_util.get_configs_from_pipeline_file(fname) + override_dict = { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + return config_util.merge_external_params_with_configs( + configs, kwargs_dict=override_dict) + + +def _make_initializable_iterator(dataset): + """Creates an iterator, and initializes tables. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.data.Iterator`. + """ + iterator = tf.data.make_initializable_iterator(dataset) + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests under TF2.X.') +class InputFnTest(test_case.TestCase, parameterized.TestCase): + + def test_faster_rcnn_resnet50_train_input(self): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model('faster_rcnn_resnet50_pets') + model_config = configs['model'] + model_config.faster_rcnn.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + configs['train_config'], configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([1, None, None, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([1], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [1, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [1, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + def test_faster_rcnn_resnet50_train_input_with_additional_channels(self): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model('faster_rcnn_resnet50_pets') + model_config = configs['model'] + configs['train_input_config'].num_additional_channels = 2 + configs['train_config'].retain_original_images = True + model_config.faster_rcnn.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + configs['train_config'], configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([1, None, None, 5], + features[fields.InputDataFields.image].shape.as_list()) + self.assertAllEqual( + [1, None, None, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([1], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [1, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [1, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + @parameterized.parameters( + {'eval_batch_size': 1}, + {'eval_batch_size': 8} + ) + def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1): + """Tests the eval input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model('faster_rcnn_resnet50_pets') + model_config = configs['model'] + model_config.faster_rcnn.num_classes = 37 + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, None, None, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, None, None, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_area].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_area].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) + self.assertEqual( + tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) + self.assertEqual( + tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) + + def test_context_rcnn_resnet50_train_input_with_sequence_example( + self, train_batch_size=8): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model_sequence_example( + 'context_rcnn_camera_trap') + model_config = configs['model'] + train_config = configs['train_config'] + train_config.batch_size = train_batch_size + train_input_fn = inputs.create_train_input_fn( + train_config, configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([train_batch_size, 640, 640, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([train_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [train_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [train_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [train_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [train_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + def test_context_rcnn_resnet50_eval_input_with_sequence_example( + self, eval_batch_size=8): + """Tests the eval input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model_sequence_example( + 'context_rcnn_camera_trap') + model_config = configs['model'] + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, 640, 640, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, 640, 640, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + + def test_ssd_inceptionV2_train_input(self): + """Tests the training input function for SSDInceptionV2.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + model_config = configs['model'] + model_config.ssd.num_classes = 37 + batch_size = configs['train_config'].batch_size + train_input_fn = inputs.create_train_input_fn( + configs['train_config'], configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([batch_size, 300, 300, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [batch_size], + labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.int32, + labels[fields.InputDataFields.num_groundtruth_boxes].dtype) + self.assertAllEqual( + [batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [batch_size, 100, model_config.ssd.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [batch_size, 100], + labels[ + fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + + @parameterized.parameters( + {'eval_batch_size': 1}, + {'eval_batch_size': 8} + ) + def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1): + """Tests the eval input function for SSDInceptionV2.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + model_config = configs['model'] + model_config.ssd.num_classes = 37 + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, 300, 300, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, 300, 300, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.ssd.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[ + fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_area].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_area].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) + self.assertEqual( + tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) + self.assertEqual( + tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) + + def test_ssd_inceptionV2_eval_input_with_additional_channels( + self, eval_batch_size=1): + """Tests the eval input function for SSDInceptionV2 with additional channel. + + Args: + eval_batch_size: Batch size for eval set. + """ + configs = _get_configs_for_model('ssd_inception_v2_pets') + model_config = configs['model'] + model_config.ssd.num_classes = 37 + configs['eval_input_configs'][0].num_additional_channels = 1 + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_config.retain_original_image_additional_channels = True + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, 300, 300, 4], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, 300, 300, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size, 300, 300, 1], features[ + fields.InputDataFields.image_additional_channels].shape.as_list()) + self.assertEqual( + tf.uint8, + features[fields.InputDataFields.image_additional_channels].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.ssd.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_area].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_area].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) + self.assertEqual(tf.bool, + labels[fields.InputDataFields.groundtruth_is_crowd].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) + self.assertEqual(tf.int32, + labels[fields.InputDataFields.groundtruth_difficult].dtype) + + def test_predict_input(self): + """Tests the predict input function.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + predict_input_fn = inputs.create_predict_input_fn( + model_config=configs['model'], + predict_input_config=configs['eval_input_configs'][0]) + serving_input_receiver = predict_input_fn() + + image = serving_input_receiver.features[fields.InputDataFields.image] + receiver_tensors = serving_input_receiver.receiver_tensors[ + inputs.SERVING_FED_EXAMPLE_KEY] + self.assertEqual([1, 300, 300, 3], image.shape.as_list()) + self.assertEqual(tf.float32, image.dtype) + self.assertEqual(tf.string, receiver_tensors.dtype) + + def test_predict_input_with_additional_channels(self): + """Tests the predict input function with additional channels.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['eval_input_configs'][0].num_additional_channels = 2 + predict_input_fn = inputs.create_predict_input_fn( + model_config=configs['model'], + predict_input_config=configs['eval_input_configs'][0]) + serving_input_receiver = predict_input_fn() + + image = serving_input_receiver.features[fields.InputDataFields.image] + receiver_tensors = serving_input_receiver.receiver_tensors[ + inputs.SERVING_FED_EXAMPLE_KEY] + # RGB + 2 additional channels = 5 channels. + self.assertEqual([1, 300, 300, 5], image.shape.as_list()) + self.assertEqual(tf.float32, image.dtype) + self.assertEqual(tf.string, receiver_tensors.dtype) + + def test_error_with_bad_train_config(self): + """Tests that a TypeError is raised with improper train config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + train_config=configs['eval_config'], # Expecting `TrainConfig`. + train_input_config=configs['train_input_config'], + model_config=configs['model']) + with self.assertRaises(TypeError): + train_input_fn() + + def test_error_with_bad_train_input_config(self): + """Tests that a TypeError is raised with improper train input config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + train_config=configs['train_config'], + train_input_config=configs['model'], # Expecting `InputReader`. + model_config=configs['model']) + with self.assertRaises(TypeError): + train_input_fn() + + def test_error_with_bad_train_model_config(self): + """Tests that a TypeError is raised with improper train model config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + train_config=configs['train_config'], + train_input_config=configs['train_input_config'], + model_config=configs['train_config']) # Expecting `DetectionModel`. + with self.assertRaises(TypeError): + train_input_fn() + + def test_error_with_bad_eval_config(self): + """Tests that a TypeError is raised with improper eval config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['train_config'], # Expecting `EvalConfig`. + eval_input_config=configs['eval_input_configs'][0], + model_config=configs['model']) + with self.assertRaises(TypeError): + eval_input_fn() + + def test_error_with_bad_eval_input_config(self): + """Tests that a TypeError is raised with improper eval input config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['eval_config'], + eval_input_config=configs['model'], # Expecting `InputReader`. + model_config=configs['model']) + with self.assertRaises(TypeError): + eval_input_fn() + + def test_error_with_bad_eval_model_config(self): + """Tests that a TypeError is raised with improper eval model config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['eval_config'], + eval_input_config=configs['eval_input_configs'][0], + model_config=configs['eval_config']) # Expecting `DetectionModel`. + with self.assertRaises(TypeError): + eval_input_fn() + + def test_output_equal_in_replace_empty_string_with_random_number(self): + string_placeholder = tf.placeholder(tf.string, shape=[]) + replaced_string = inputs._replace_empty_string_with_random_number( + string_placeholder) + + test_string = b'hello world' + feed_dict = {string_placeholder: test_string} + + with self.test_session() as sess: + out_string = sess.run(replaced_string, feed_dict=feed_dict) + + self.assertEqual(test_string, out_string) + + def test_output_is_integer_in_replace_empty_string_with_random_number(self): + + string_placeholder = tf.placeholder(tf.string, shape=[]) + replaced_string = inputs._replace_empty_string_with_random_number( + string_placeholder) + + empty_string = '' + feed_dict = {string_placeholder: empty_string} + with self.test_session() as sess: + out_string = sess.run(replaced_string, feed_dict=feed_dict) + + is_integer = True + try: + # Test whether out_string is a string which represents an integer, the + # casting below will throw an error if out_string is not castable to int. + int(out_string) + except ValueError: + is_integer = False + + self.assertTrue(is_integer) + + def test_force_no_resize(self): + """Tests the functionality of force_no_reisze option.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['eval_config'].force_no_resize = True + + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['eval_config'], + eval_input_config=configs['eval_input_configs'][0], + model_config=configs['model'] + ) + train_input_fn = inputs.create_train_input_fn( + train_config=configs['train_config'], + train_input_config=configs['train_input_config'], + model_config=configs['model'] + ) + + features_train, _ = _make_initializable_iterator( + train_input_fn()).get_next() + + features_eval, _ = _make_initializable_iterator( + eval_input_fn()).get_next() + + images_train, images_eval = features_train['image'], features_eval['image'] + + self.assertEqual([1, None, None, 3], images_eval.shape.as_list()) + self.assertEqual([24, 300, 300, 3], images_train.shape.as_list()) + + +class DataAugmentationFnTest(test_case.TestCase): + + def test_apply_image_and_box_augmentation(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }), + (preprocessor.scale_boxes_to_pixel_coordinates, {}), + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)) + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields. + groundtruth_boxes]) + image, groundtruth_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]]) + + def test_apply_image_and_box_augmentation_with_scores(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }), + (preprocessor.scale_boxes_to_pixel_coordinates, {}), + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1.0], np.float32)), + fields.InputDataFields.groundtruth_weights: + tf.constant(np.array([0.8], np.float32)), + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes], + augmented_tensor_dict[fields.InputDataFields.groundtruth_classes], + augmented_tensor_dict[fields.InputDataFields.groundtruth_weights]) + (image, groundtruth_boxes, + groundtruth_classes, groundtruth_weights) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]]) + self.assertAllClose(groundtruth_classes.shape, [1.0]) + self.assertAllClose(groundtruth_weights, [0.8]) + + def test_include_masks_in_data_augmentation(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }) + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_instance_masks: + tf.constant(np.zeros([2, 10, 10], np.uint8)) + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields. + groundtruth_instance_masks]) + image, masks = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllEqual(masks.shape, [2, 20, 20]) + + def test_include_keypoints_in_data_augmentation(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }), + (preprocessor.scale_boxes_to_pixel_coordinates, {}), + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32)) + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes], + augmented_tensor_dict[fields.InputDataFields. + groundtruth_keypoints]) + image, boxes, keypoints = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllClose(boxes, [[10, 10, 20, 20]]) + self.assertAllClose(keypoints, [[[10, 20], [10, 10]]]) + + +def _fake_model_preprocessor_fn(image): + return (image, tf.expand_dims(tf.shape(image)[1:], axis=0)) + + +def _fake_image_resizer_fn(image, mask): + return (image, mask, tf.shape(image)) + + +def _fake_resize50_preprocess_fn(image): + image = image[0] + image, shape = preprocessor.resize_to_range( + image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True) + + return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0) + + +class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase): + + def test_combine_additional_channels_if_present(self): + image = np.random.rand(4, 4, 3).astype(np.float32) + additional_channels = np.random.rand(4, 4, 2).astype(np.float32) + def graph_fn(image, additional_channels): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.image_additional_channels: additional_channels, + fields.InputDataFields.groundtruth_classes: + tf.constant([1, 1], tf.int32) + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=1) + out_tensors = input_transformation_fn(tensor_dict=tensor_dict) + return out_tensors[fields.InputDataFields.image] + out_image = self.execute_cpu(graph_fn, [image, additional_channels]) + self.assertAllEqual(out_image.dtype, tf.float32) + self.assertAllEqual(out_image.shape, [4, 4, 5]) + self.assertAllClose(out_image, np.concatenate((image, additional_channels), + axis=2)) + + def test_use_multiclass_scores_when_present(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3). + astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.multiclass_scores: + tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)) + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=3, use_multiclass_scores=True) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return transformed_inputs[fields.InputDataFields.groundtruth_classes] + groundtruth_classes = self.execute_cpu(graph_fn, []) + self.assertAllClose( + np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32), + groundtruth_classes) + + @unittest.skipIf(tf_version.is_tf2(), ('Skipping due to different behaviour ' + 'in TF 2.X')) + def test_use_multiclass_scores_when_not_present(self): + def graph_fn(): + zero_num_elements = tf.random.uniform([], minval=0, maxval=1, + dtype=tf.int32) + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.multiclass_scores: tf.zeros(zero_num_elements), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)) + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=3, use_multiclass_scores=True) + + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return transformed_inputs[fields.InputDataFields.groundtruth_classes] + groundtruth_classes = self.execute_cpu(graph_fn, []) + self.assertAllClose( + np.array([[0, 1, 0], [0, 0, 1]], np.float32), + groundtruth_classes) + + @parameterized.parameters( + {'labeled_classes': [1, 2]}, + {'labeled_classes': []}, + {'labeled_classes': [1, -1, 2]} # -1 denotes an unrecognized class + ) + def test_use_labeled_classes(self, labeled_classes): + + def compute_fn(image, groundtruth_boxes, groundtruth_classes, + groundtruth_labeled_classes): + tensor_dict = { + fields.InputDataFields.image: + image, + fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes, + fields.InputDataFields.groundtruth_classes: + groundtruth_classes, + fields.InputDataFields.groundtruth_labeled_classes: + groundtruth_labeled_classes + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=3) + return input_transformation_fn(tensor_dict=tensor_dict) + + image = np.random.rand(4, 4, 3).astype(np.float32) + groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32) + groundtruth_classes = np.array([1, 2], np.int32) + groundtruth_labeled_classes = np.array(labeled_classes, np.int32) + + transformed_inputs = self.execute_cpu(compute_fn, [ + image, groundtruth_boxes, groundtruth_classes, + groundtruth_labeled_classes + ]) + + if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]: + transformed_labeled_classes = [1, 1, 0] + elif not labeled_classes: + transformed_labeled_classes = [1, 1, 1] + else: + logging.exception('Unexpected labeled_classes %r', labeled_classes) + + self.assertAllEqual( + np.array(transformed_labeled_classes, np.float32), + transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes]) + + def test_returns_correct_class_label_encodings(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences]) + (groundtruth_classes, groundtruth_confidences) = self.execute_cpu(graph_fn, + []) + self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) + self.assertAllClose(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]]) + + def test_returns_correct_labels_with_unrecognized_class(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant( + np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.groundtruth_area: + tf.constant(np.array([.5, .4, .3])), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, -1, 1], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant( + np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]], + np.float32)), + fields.InputDataFields.groundtruth_keypoint_visibilities: + tf.constant([[True, True], [False, False], [True, True]]), + fields.InputDataFields.groundtruth_instance_masks: + tf.constant(np.random.rand(3, 4, 4).astype(np.float32)), + fields.InputDataFields.groundtruth_is_crowd: + tf.constant([False, True, False]), + fields.InputDataFields.groundtruth_difficult: + tf.constant(np.array([0, 0, 1], np.int32)) + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields.num_groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_area], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences], + transformed_inputs[fields.InputDataFields.groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_keypoints], + transformed_inputs[fields.InputDataFields. + groundtruth_keypoint_visibilities], + transformed_inputs[fields.InputDataFields. + groundtruth_instance_masks], + transformed_inputs[fields.InputDataFields.groundtruth_is_crowd], + transformed_inputs[fields.InputDataFields.groundtruth_difficult]) + (groundtruth_classes, num_groundtruth_boxes, groundtruth_area, + groundtruth_confidences, groundtruth_boxes, groundtruth_keypoints, + groundtruth_keypoint_visibilities, groundtruth_instance_masks, + groundtruth_is_crowd, groundtruth_difficult) = self.execute_cpu(graph_fn, + []) + + self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) + self.assertAllEqual(num_groundtruth_boxes, 2) + self.assertAllClose(groundtruth_area, [.5, .3]) + self.assertAllEqual(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]]) + self.assertAllClose(groundtruth_boxes, [[0, 0, 1, 1], [.5, .5, 1, 1]]) + self.assertAllClose(groundtruth_keypoints, [[[.1, .1]], [[.5, .5]]]) + self.assertAllEqual(groundtruth_keypoint_visibilities, + [[True, True], [True, True]]) + self.assertAllEqual(groundtruth_instance_masks.shape, [2, 4, 4]) + self.assertAllEqual(groundtruth_is_crowd, [False, False]) + self.assertAllEqual(groundtruth_difficult, [0, 1]) + + def test_returns_correct_merged_boxes(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + merge_multiple_boxes=True) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences], + transformed_inputs[fields.InputDataFields.num_groundtruth_boxes]) + (groundtruth_boxes, groundtruth_classes, groundtruth_confidences, + num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) + self.assertAllClose( + groundtruth_boxes, + [[.5, .5, 1., 1.]]) + self.assertAllClose( + groundtruth_classes, + [[1, 0, 1]]) + self.assertAllClose( + groundtruth_confidences, + [[1, 0, 1]]) + self.assertAllClose( + num_groundtruth_boxes, + 1) + + def test_returns_correct_groundtruth_confidences_when_input_present(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)), + fields.InputDataFields.groundtruth_confidences: + tf.constant(np.array([1.0, -1.0], np.float32)) + } + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences]) + groundtruth_classes, groundtruth_confidences = self.execute_cpu(graph_fn, + []) + self.assertAllClose( + groundtruth_classes, + [[0, 0, 1], [1, 0, 0]]) + self.assertAllClose( + groundtruth_confidences, + [[0, 0, 1], [-1, 0, 0]]) + + def test_returns_resized_masks(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_instance_masks: + tf.constant(np.random.rand(2, 4, 4).astype(np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)), + fields.InputDataFields.original_image_spatial_shape: + tf.constant(np.array([4, 4], np.int32)) + } + + def fake_image_resizer_fn(image, masks=None): + resized_image = tf.image.resize_images(image, [8, 8]) + results = [resized_image] + if masks is not None: + resized_masks = tf.transpose( + tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]), + [2, 0, 1]) + results.append(resized_masks) + results.append(tf.shape(resized_image)) + return results + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=fake_image_resizer_fn, + num_classes=num_classes, + retain_original_image=True) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.original_image], + transformed_inputs[fields.InputDataFields. + original_image_spatial_shape], + transformed_inputs[fields.InputDataFields. + groundtruth_instance_masks]) + (original_image, original_image_shape, + groundtruth_instance_masks) = self.execute_cpu(graph_fn, []) + self.assertEqual(original_image.dtype, np.uint8) + self.assertAllEqual(original_image_shape, [4, 4]) + self.assertAllEqual(original_image.shape, [8, 8, 3]) + self.assertAllEqual(groundtruth_instance_masks.shape, [2, 8, 8]) + + def test_applies_model_preprocess_fn_to_image_tensor(self): + np_image = np.random.randint(256, size=(4, 4, 3)) + def graph_fn(image): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + def fake_model_preprocessor_fn(image): + return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0)) + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.image], + transformed_inputs[fields.InputDataFields.true_image_shape]) + image, true_image_shape = self.execute_cpu(graph_fn, [np_image]) + self.assertAllClose(image, np_image / 255.) + self.assertAllClose(true_image_shape, [4, 4, 3]) + + def test_applies_data_augmentation_fn_to_tensor_dict(self): + np_image = np.random.randint(256, size=(4, 4, 3)) + def graph_fn(image): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + def add_one_data_augmentation_fn(tensor_dict): + return {key: value + 1 for key, value in tensor_dict.items()} + + num_classes = 4 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=add_one_data_augmentation_fn) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.image], + transformed_inputs[fields.InputDataFields.groundtruth_classes]) + image, groundtruth_classes = self.execute_cpu(graph_fn, [np_image]) + self.assertAllEqual(image, np_image + 1) + self.assertAllEqual( + groundtruth_classes, + [[0, 0, 0, 1], [0, 1, 0, 0]]) + + def test_applies_data_augmentation_fn_before_model_preprocess_fn(self): + np_image = np.random.randint(256, size=(4, 4, 3)) + def graph_fn(image): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + def mul_two_model_preprocessor_fn(image): + return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0)) + + def add_five_to_image_data_augmentation_fn(tensor_dict): + tensor_dict[fields.InputDataFields.image] += 5 + return tensor_dict + + num_classes = 4 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=mul_two_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=add_five_to_image_data_augmentation_fn) + transformed_inputs = input_transformation_fn(tensor_dict) + return transformed_inputs[fields.InputDataFields.image] + image = self.execute_cpu(graph_fn, [np_image]) + self.assertAllEqual(image, (np_image + 5) * 2) + + def test_resize_with_padding(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant([[[0.1, 0.2]], [[0.3, 0.4]]]), + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes,) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_keypoints]) + groundtruth_boxes, groundtruth_keypoints = self.execute_cpu(graph_fn, []) + self.assertAllClose( + groundtruth_boxes, + [[.5, .25, 1., .5], [.0, .0, .5, .25]]) + self.assertAllClose( + groundtruth_keypoints, + [[[.1, .1]], [[.3, .2]]]) + + def test_groundtruth_keypoint_weights(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant([[[0.1, 0.2], [0.3, 0.4]], + [[0.5, 0.6], [0.7, 0.8]]]), + fields.InputDataFields.groundtruth_keypoint_visibilities: + tf.constant([[True, False], [True, True]]), + } + + num_classes = 3 + keypoint_type_weight = [1.0, 2.0] + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + keypoint_type_weight=keypoint_type_weight) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints], + transformed_inputs[fields.InputDataFields. + groundtruth_keypoint_weights]) + + groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu( + graph_fn, []) + self.assertAllClose( + groundtruth_keypoints, + [[[0.1, 0.1], [0.3, 0.2]], + [[0.5, 0.3], [0.7, 0.4]]]) + self.assertAllClose( + groundtruth_keypoint_weights, + [[1.0, 0.0], [1.0, 2.0]]) + + def test_groundtruth_keypoint_weights_default(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant([[[0.1, 0.2], [0.3, 0.4]], + [[0.5, 0.6], [0.7, 0.8]]]), + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints], + transformed_inputs[fields.InputDataFields. + groundtruth_keypoint_weights]) + groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu( + graph_fn, []) + self.assertAllClose( + groundtruth_keypoints, + [[[0.1, 0.1], [0.3, 0.2]], + [[0.5, 0.3], [0.7, 0.4]]]) + self.assertAllClose( + groundtruth_keypoint_weights, + [[1.0, 1.0], [1.0, 1.0]]) + + +class PadInputDataToStaticShapesFnTest(test_case.TestCase): + + def test_pad_images_boxes_and_classes(self): + input_tensor_dict = { + fields.InputDataFields.image: + tf.random.uniform([3, 3, 3]), + fields.InputDataFields.groundtruth_boxes: + tf.random.uniform([2, 4]), + fields.InputDataFields.groundtruth_classes: + tf.random.uniform([2, 3], minval=0, maxval=2, dtype=tf.int32), + fields.InputDataFields.true_image_shape: + tf.constant([3, 3, 3]), + fields.InputDataFields.original_image_spatial_shape: + tf.constant([3, 3]) + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.true_image_shape] + .shape.as_list(), [3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape] + .shape.as_list(), [2]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_boxes] + .shape.as_list(), [3, 4]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_classes] + .shape.as_list(), [3, 3]) + + def test_clip_boxes_and_classes(self): + def graph_fn(): + input_tensor_dict = { + fields.InputDataFields.groundtruth_boxes: + tf.random.uniform([5, 4]), + fields.InputDataFields.groundtruth_classes: + tf.random.uniform([2, 3], maxval=10, dtype=tf.int32), + fields.InputDataFields.num_groundtruth_boxes: + tf.constant(5) + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + return (padded_tensor_dict[fields.InputDataFields.groundtruth_boxes], + padded_tensor_dict[fields.InputDataFields.groundtruth_classes], + padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes]) + (groundtruth_boxes, groundtruth_classes, + num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(groundtruth_boxes.shape, [3, 4]) + self.assertAllEqual(groundtruth_classes.shape, [3, 3]) + self.assertEqual(num_groundtruth_boxes, 3) + + def test_images_and_additional_channels(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(4, 3, 5), + fields.InputDataFields.image_additional_channels: + test_utils.image_with_dynamic_shape(4, 3, 2), + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + # pad_input_data_to_static_shape assumes that image is already concatenated + # with additional channels. + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 5]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image_additional_channels] + .shape.as_list(), [5, 6, 2]) + + def test_images_and_additional_channels_errors(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(10, 10, 3), + fields.InputDataFields.image_additional_channels: + test_utils.image_with_dynamic_shape(10, 10, 2), + fields.InputDataFields.original_image: + test_utils.image_with_dynamic_shape(10, 10, 3), + } + with self.assertRaises(ValueError): + _ = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + def test_gray_images(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(4, 4, 1), + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 1]) + + def test_gray_images_and_additional_channels(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(4, 4, 3), + fields.InputDataFields.image_additional_channels: + test_utils.image_with_dynamic_shape(4, 4, 2), + } + # pad_input_data_to_static_shape assumes that image is already concatenated + # with additional channels. + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image_additional_channels] + .shape.as_list(), [5, 6, 2]) + + def test_keypoints(self): + keypoints = test_utils.keypoints_with_dynamic_shape(10, 16, 4) + visibilities = tf.cast(tf.random.uniform(tf.shape(keypoints)[:-1], minval=0, + maxval=2, dtype=tf.int32), tf.bool) + input_tensor_dict = { + fields.InputDataFields.groundtruth_keypoints: + test_utils.keypoints_with_dynamic_shape(10, 16, 4), + fields.InputDataFields.groundtruth_keypoint_visibilities: + visibilities + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints] + .shape.as_list(), [3, 16, 4]) + self.assertAllEqual( + padded_tensor_dict[ + fields.InputDataFields.groundtruth_keypoint_visibilities] + .shape.as_list(), [3, 16]) + + def test_context_features(self): + context_memory_size = 8 + context_feature_length = 10 + max_num_context_features = 20 + def graph_fn(): + input_tensor_dict = { + fields.InputDataFields.context_features: + tf.ones([context_memory_size, context_feature_length]), + fields.InputDataFields.context_feature_length: + tf.constant(context_feature_length) + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6], + max_num_context_features=max_num_context_features, + context_feature_length=context_feature_length) + + self.assertAllEqual( + padded_tensor_dict[ + fields.InputDataFields.context_features].shape.as_list(), + [max_num_context_features, context_feature_length]) + return padded_tensor_dict[fields.InputDataFields.valid_context_size] + + valid_context_size = self.execute_cpu(graph_fn, []) + self.assertEqual(valid_context_size, context_memory_size) + + +class NegativeSizeTest(test_case.TestCase): + """Test for inputs and related funcitons.""" + + def test_negative_size_error(self): + """Test that error is raised for negative size boxes.""" + + def graph_fn(): + tensors = { + fields.InputDataFields.image: tf.zeros((128, 128, 3)), + fields.InputDataFields.groundtruth_classes: + tf.constant([1, 1], tf.int32), + fields.InputDataFields.groundtruth_boxes: + tf.constant([[0.5, 0.5, 0.4, 0.5]], tf.float32) + } + tensors = inputs.transform_input_data( + tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn, + num_classes=10) + return tensors[fields.InputDataFields.groundtruth_boxes] + with self.assertRaises(tf.errors.InvalidArgumentError): + self.execute_cpu(graph_fn, []) + + def test_negative_size_no_assert(self): + """Test that negative size boxes are filtered out without assert. + + This test simulates the behaviour when we run on TPU and Assert ops are + not supported. + """ + + tensors = { + fields.InputDataFields.image: tf.zeros((128, 128, 3)), + fields.InputDataFields.groundtruth_classes: + tf.constant([1, 1], tf.int32), + fields.InputDataFields.groundtruth_boxes: + tf.constant([[0.5, 0.5, 0.4, 0.5], [0.5, 0.5, 0.6, 0.6]], + tf.float32) + } + + with mock.patch.object(tf, 'Assert') as tf_assert: + tf_assert.return_value = tf.no_op() + tensors = inputs.transform_input_data( + tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn, + num_classes=10) + + self.assertAllClose(tensors[fields.InputDataFields.groundtruth_boxes], + [[0.5, 0.5, 0.6, 0.6]]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/legacy/__init__.py b/models/research/object_detection/legacy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/legacy/eval.py b/models/research/object_detection/legacy/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..9a7d8c430fa1069320a78d39464df179e8f6d654 --- /dev/null +++ b/models/research/object_detection/legacy/eval.py @@ -0,0 +1,142 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Evaluation executable for detection models. + +This executable is used to evaluate DetectionModels. There are two ways of +configuring the eval job. + +1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead. +In this mode, the --eval_training_data flag may be given to force the pipeline +to evaluate on training data instead. + +Example usage: + ./eval \ + --logtostderr \ + --checkpoint_dir=path/to/checkpoint_dir \ + --eval_dir=path/to/eval_dir \ + --pipeline_config_path=pipeline_config.pbtxt + +2) Three configuration files may be provided: a model_pb2.DetectionModel +configuration file to define what type of DetectionModel is being evaluated, an +input_reader_pb2.InputReader file to specify what data the model is evaluating +and an eval_pb2.EvalConfig file to configure evaluation parameters. + +Example usage: + ./eval \ + --logtostderr \ + --checkpoint_dir=path/to/checkpoint_dir \ + --eval_dir=path/to/eval_dir \ + --eval_config_path=eval_config.pbtxt \ + --model_config_path=model_config.pbtxt \ + --input_config_path=eval_input_config.pbtxt +""" +import functools +import os +import tensorflow.compat.v1 as tf +from tensorflow.python.util.deprecation import deprecated +from object_detection.builders import dataset_builder +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.legacy import evaluator +from object_detection.utils import config_util +from object_detection.utils import label_map_util + +tf.logging.set_verbosity(tf.logging.INFO) + +flags = tf.app.flags +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job.') +flags.DEFINE_string( + 'checkpoint_dir', '', + 'Directory containing checkpoints to evaluate, typically ' + 'set to `train_dir` used in the training job.') +flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.') +flags.DEFINE_string( + 'pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file. If provided, other configs are ignored') +flags.DEFINE_string('eval_config_path', '', + 'Path to an eval_pb2.EvalConfig config file.') +flags.DEFINE_string('input_config_path', '', + 'Path to an input_reader_pb2.InputReader config file.') +flags.DEFINE_string('model_config_path', '', + 'Path to a model_pb2.DetectionModel config file.') +flags.DEFINE_boolean( + 'run_once', False, 'Option to only run a single pass of ' + 'evaluation. Overrides the `max_evals` parameter in the ' + 'provided config.') +FLAGS = flags.FLAGS + + +@deprecated(None, 'Use object_detection/model_main.py.') +def main(unused_argv): + assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.' + assert FLAGS.eval_dir, '`eval_dir` is missing.' + tf.gfile.MakeDirs(FLAGS.eval_dir) + if FLAGS.pipeline_config_path: + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + tf.gfile.Copy( + FLAGS.pipeline_config_path, + os.path.join(FLAGS.eval_dir, 'pipeline.config'), + overwrite=True) + else: + configs = config_util.get_configs_from_multiple_files( + model_config_path=FLAGS.model_config_path, + eval_config_path=FLAGS.eval_config_path, + eval_input_config_path=FLAGS.input_config_path) + for name, config in [('model.config', FLAGS.model_config_path), + ('eval.config', FLAGS.eval_config_path), + ('input.config', FLAGS.input_config_path)]: + tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True) + + model_config = configs['model'] + eval_config = configs['eval_config'] + input_config = configs['eval_input_config'] + if FLAGS.eval_training_data: + input_config = configs['train_input_config'] + + model_fn = functools.partial( + model_builder.build, model_config=model_config, is_training=False) + + def get_next(config): + return dataset_builder.make_initializable_iterator( + dataset_builder.build(config)).get_next() + + create_input_dict_fn = functools.partial(get_next, input_config) + + categories = label_map_util.create_categories_from_labelmap( + input_config.label_map_path) + + if FLAGS.run_once: + eval_config.max_evals = 1 + + graph_rewriter_fn = None + if 'graph_rewriter_config' in configs: + graph_rewriter_fn = graph_rewriter_builder.build( + configs['graph_rewriter_config'], is_training=False) + + evaluator.evaluate( + create_input_dict_fn, + model_fn, + eval_config, + categories, + FLAGS.checkpoint_dir, + FLAGS.eval_dir, + graph_hook_fn=graph_rewriter_fn) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/legacy/evaluator.py b/models/research/object_detection/legacy/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..feeb718876788dd067f9c1026d784aa7a7e15848 --- /dev/null +++ b/models/research/object_detection/legacy/evaluator.py @@ -0,0 +1,298 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Detection model evaluator. + +This file provides a generic evaluation method that can be used to evaluate a +DetectionModel. +""" + +import logging +import tensorflow.compat.v1 as tf + +from object_detection import eval_util +from object_detection.core import prefetcher +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.utils import object_detection_evaluation + +# A dictionary of metric names to classes that implement the metric. The classes +# in the dictionary must implement +# utils.object_detection_evaluation.DetectionEvaluator interface. +EVAL_METRICS_CLASS_DICT = { + 'pascal_voc_detection_metrics': + object_detection_evaluation.PascalDetectionEvaluator, + 'weighted_pascal_voc_detection_metrics': + object_detection_evaluation.WeightedPascalDetectionEvaluator, + 'pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.PascalInstanceSegmentationEvaluator, + 'weighted_pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, + 'oid_V2_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, + # DEPRECATED: please use oid_V2_detection_metrics instead + 'open_images_V2_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, + 'coco_detection_metrics': + coco_evaluation.CocoDetectionEvaluator, + 'coco_mask_metrics': + coco_evaluation.CocoMaskEvaluator, + 'oid_challenge_detection_metrics': + object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, + # DEPRECATED: please use oid_challenge_detection_metrics instead + 'oid_challenge_object_detection_metrics': + object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, + 'oid_challenge_segmentation_metrics': + object_detection_evaluation + .OpenImagesInstanceSegmentationChallengeEvaluator, +} + +EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics' + + +def _extract_predictions_and_losses(model, + create_input_dict_fn, + ignore_groundtruth=False): + """Constructs tensorflow detection graph and returns output tensors. + + Args: + model: model to perform predictions with. + create_input_dict_fn: function to create input tensor dictionaries. + ignore_groundtruth: whether groundtruth should be ignored. + + Returns: + prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed + by standard_fields.DetectionResultsFields) and optional groundtruth + tensors (keyed by standard_fields.InputDataFields). + losses_dict: A dictionary containing detection losses. This is empty when + ignore_groundtruth is true. + """ + input_dict = create_input_dict_fn() + prefetch_queue = prefetcher.prefetch(input_dict, capacity=500) + input_dict = prefetch_queue.dequeue() + original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0) + preprocessed_image, true_image_shapes = model.preprocess( + tf.cast(original_image, dtype=tf.float32)) + prediction_dict = model.predict(preprocessed_image, true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + + groundtruth = None + losses_dict = {} + if not ignore_groundtruth: + groundtruth = { + fields.InputDataFields.groundtruth_boxes: + input_dict[fields.InputDataFields.groundtruth_boxes], + fields.InputDataFields.groundtruth_classes: + input_dict[fields.InputDataFields.groundtruth_classes], + fields.InputDataFields.groundtruth_area: + input_dict[fields.InputDataFields.groundtruth_area], + fields.InputDataFields.groundtruth_is_crowd: + input_dict[fields.InputDataFields.groundtruth_is_crowd], + fields.InputDataFields.groundtruth_difficult: + input_dict[fields.InputDataFields.groundtruth_difficult] + } + if fields.InputDataFields.groundtruth_group_of in input_dict: + groundtruth[fields.InputDataFields.groundtruth_group_of] = ( + input_dict[fields.InputDataFields.groundtruth_group_of]) + groundtruth_masks_list = None + if fields.DetectionResultFields.detection_masks in detections: + groundtruth[fields.InputDataFields.groundtruth_instance_masks] = ( + input_dict[fields.InputDataFields.groundtruth_instance_masks]) + groundtruth_masks_list = [ + input_dict[fields.InputDataFields.groundtruth_instance_masks]] + groundtruth_keypoints_list = None + if fields.DetectionResultFields.detection_keypoints in detections: + groundtruth[fields.InputDataFields.groundtruth_keypoints] = ( + input_dict[fields.InputDataFields.groundtruth_keypoints]) + groundtruth_keypoints_list = [ + input_dict[fields.InputDataFields.groundtruth_keypoints]] + label_id_offset = 1 + model.provide_groundtruth( + [input_dict[fields.InputDataFields.groundtruth_boxes]], + [tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes] + - label_id_offset, depth=model.num_classes)], + groundtruth_masks_list, groundtruth_keypoints_list) + losses_dict.update(model.loss(prediction_dict, true_image_shapes)) + + result_dict = eval_util.result_dict_for_single_example( + original_image, + input_dict[fields.InputDataFields.source_id], + detections, + groundtruth, + class_agnostic=( + fields.DetectionResultFields.detection_classes not in detections), + scale_to_absolute=True) + return result_dict, losses_dict + + +def get_evaluators(eval_config, categories): + """Returns the evaluator class according to eval_config, valid for categories. + + Args: + eval_config: evaluation configurations. + categories: a list of categories to evaluate. + Returns: + An list of instances of DetectionEvaluator. + + Raises: + ValueError: if metric is not in the metric class dictionary. + """ + eval_metric_fn_keys = eval_config.metrics_set + if not eval_metric_fn_keys: + eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] + evaluators_list = [] + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: + raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) + if eval_metric_fn_key == 'oid_challenge_object_detection_metrics': + logging.warning( + 'oid_challenge_object_detection_metrics is deprecated; ' + 'use oid_challenge_detection_metrics instead' + ) + if eval_metric_fn_key == 'oid_V2_detection_metrics': + logging.warning( + 'open_images_V2_detection_metrics is deprecated; ' + 'use oid_V2_detection_metrics instead' + ) + evaluators_list.append( + EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories)) + return evaluators_list + + +def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories, + checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None): + """Evaluation function for detection models. + + Args: + create_input_dict_fn: a function to create a tensor input dictionary. + create_model_fn: a function that creates a DetectionModel. + eval_config: a eval_pb2.EvalConfig protobuf. + categories: a list of category dictionaries. Each dict in the list should + have an integer 'id' field and string 'name' field. + checkpoint_dir: directory to load the checkpoints to evaluate from. + eval_dir: directory to write evaluation metrics summary to. + graph_hook_fn: Optional function that is called after the training graph is + completely built. This is helpful to perform additional changes to the + training graph such as optimizing batchnorm. The function should modify + the default graph. + evaluator_list: Optional list of instances of DetectionEvaluator. If not + given, this list of metrics is created according to the eval_config. + + Returns: + metrics: A dictionary containing metric names and values from the latest + run. + """ + + model = create_model_fn() + + if eval_config.ignore_groundtruth and not eval_config.export_path: + logging.fatal('If ignore_groundtruth=True then an export_path is ' + 'required. Aborting!!!') + + tensor_dict, losses_dict = _extract_predictions_and_losses( + model=model, + create_input_dict_fn=create_input_dict_fn, + ignore_groundtruth=eval_config.ignore_groundtruth) + + def _process_batch(tensor_dict, sess, batch_index, counters, + losses_dict=None): + """Evaluates tensors in tensor_dict, losses_dict and visualizes examples. + + This function calls sess.run on tensor_dict, evaluating the original_image + tensor only on the first K examples and visualizing detections overlaid + on this original_image. + + Args: + tensor_dict: a dictionary of tensors + sess: tensorflow session + batch_index: the index of the batch amongst all batches in the run. + counters: a dictionary holding 'success' and 'skipped' fields which can + be updated to keep track of number of successful and failed runs, + respectively. If these fields are not updated, then the success/skipped + counter values shown at the end of evaluation will be incorrect. + losses_dict: Optional dictonary of scalar loss tensors. + + Returns: + result_dict: a dictionary of numpy arrays + result_losses_dict: a dictionary of scalar losses. This is empty if input + losses_dict is None. + """ + try: + if not losses_dict: + losses_dict = {} + result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict]) + counters['success'] += 1 + except tf.errors.InvalidArgumentError: + logging.info('Skipping image') + counters['skipped'] += 1 + return {}, {} + global_step = tf.train.global_step(sess, tf.train.get_global_step()) + if batch_index < eval_config.num_visualizations: + tag = 'image-{}'.format(batch_index) + eval_util.visualize_detection_results( + result_dict, + tag, + global_step, + categories=categories, + summary_dir=eval_dir, + export_dir=eval_config.visualization_export_dir, + show_groundtruth=eval_config.visualize_groundtruth_boxes, + groundtruth_box_visualization_color=eval_config. + groundtruth_box_visualization_color, + min_score_thresh=eval_config.min_score_threshold, + max_num_predictions=eval_config.max_num_boxes_to_visualize, + skip_scores=eval_config.skip_scores, + skip_labels=eval_config.skip_labels, + keep_image_id_for_visualization_export=eval_config. + keep_image_id_for_visualization_export) + return result_dict, result_losses_dict + + if graph_hook_fn: graph_hook_fn() + + variables_to_restore = tf.global_variables() + global_step = tf.train.get_or_create_global_step() + variables_to_restore.append(global_step) + + if eval_config.use_moving_averages: + variable_averages = tf.train.ExponentialMovingAverage(0.0) + variables_to_restore = variable_averages.variables_to_restore() + saver = tf.train.Saver(variables_to_restore) + + def _restore_latest_checkpoint(sess): + latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) + saver.restore(sess, latest_checkpoint) + + if not evaluator_list: + evaluator_list = get_evaluators(eval_config, categories) + + metrics = eval_util.repeated_checkpoint_run( + tensor_dict=tensor_dict, + summary_dir=eval_dir, + evaluators=evaluator_list, + batch_processor=_process_batch, + checkpoint_dirs=[checkpoint_dir], + variables_to_restore=None, + restore_fn=_restore_latest_checkpoint, + num_batches=eval_config.num_examples, + eval_interval_secs=eval_config.eval_interval_secs, + max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else + eval_config.max_evals + if eval_config.max_evals else None), + master=eval_config.eval_master, + save_graph=eval_config.save_graph, + save_graph_dir=(eval_dir if eval_config.save_graph else ''), + losses_dict=losses_dict, + eval_export_path=eval_config.export_path) + + return metrics diff --git a/models/research/object_detection/legacy/train.py b/models/research/object_detection/legacy/train.py new file mode 100644 index 0000000000000000000000000000000000000000..615773760a3988dc2d7f5177d1d37c0ec8df7e17 --- /dev/null +++ b/models/research/object_detection/legacy/train.py @@ -0,0 +1,186 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Training executable for detection models. + +This executable is used to train DetectionModels. There are two ways of +configuring the training job: + +1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file +can be specified by --pipeline_config_path. + +Example usage: + ./train \ + --logtostderr \ + --train_dir=path/to/train_dir \ + --pipeline_config_path=pipeline_config.pbtxt + +2) Three configuration files can be provided: a model_pb2.DetectionModel +configuration file to define what type of DetectionModel is being trained, an +input_reader_pb2.InputReader file to specify what training data will be used and +a train_pb2.TrainConfig file to configure training parameters. + +Example usage: + ./train \ + --logtostderr \ + --train_dir=path/to/train_dir \ + --model_config_path=model_config.pbtxt \ + --train_config_path=train_config.pbtxt \ + --input_config_path=train_input_config.pbtxt +""" + +import functools +import json +import os +import tensorflow.compat.v1 as tf +from tensorflow.python.util.deprecation import deprecated + + +from object_detection.builders import dataset_builder +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.legacy import trainer +from object_detection.utils import config_util + +tf.logging.set_verbosity(tf.logging.INFO) + +flags = tf.app.flags +flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') +flags.DEFINE_integer('task', 0, 'task id') +flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.') +flags.DEFINE_boolean('clone_on_cpu', False, + 'Force clones to be deployed on CPU. Note that even if ' + 'set to False (allowing ops to run on gpu), some ops may ' + 'still be run on the CPU if they have no GPU kernel.') +flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer ' + 'replicas.') +flags.DEFINE_integer('ps_tasks', 0, + 'Number of parameter server tasks. If None, does not use ' + 'a parameter server.') +flags.DEFINE_string('train_dir', '', + 'Directory to save the checkpoints and training summaries.') + +flags.DEFINE_string('pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file. If provided, other configs are ignored') + +flags.DEFINE_string('train_config_path', '', + 'Path to a train_pb2.TrainConfig config file.') +flags.DEFINE_string('input_config_path', '', + 'Path to an input_reader_pb2.InputReader config file.') +flags.DEFINE_string('model_config_path', '', + 'Path to a model_pb2.DetectionModel config file.') + +FLAGS = flags.FLAGS + + +@deprecated(None, 'Use object_detection/model_main.py.') +def main(_): + assert FLAGS.train_dir, '`train_dir` is missing.' + if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir) + if FLAGS.pipeline_config_path: + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + if FLAGS.task == 0: + tf.gfile.Copy(FLAGS.pipeline_config_path, + os.path.join(FLAGS.train_dir, 'pipeline.config'), + overwrite=True) + else: + configs = config_util.get_configs_from_multiple_files( + model_config_path=FLAGS.model_config_path, + train_config_path=FLAGS.train_config_path, + train_input_config_path=FLAGS.input_config_path) + if FLAGS.task == 0: + for name, config in [('model.config', FLAGS.model_config_path), + ('train.config', FLAGS.train_config_path), + ('input.config', FLAGS.input_config_path)]: + tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name), + overwrite=True) + + model_config = configs['model'] + train_config = configs['train_config'] + input_config = configs['train_input_config'] + + model_fn = functools.partial( + model_builder.build, + model_config=model_config, + is_training=True) + + def get_next(config): + return dataset_builder.make_initializable_iterator( + dataset_builder.build(config)).get_next() + + create_input_dict_fn = functools.partial(get_next, input_config) + + env = json.loads(os.environ.get('TF_CONFIG', '{}')) + cluster_data = env.get('cluster', None) + cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None + task_data = env.get('task', None) or {'type': 'master', 'index': 0} + task_info = type('TaskSpec', (object,), task_data) + + # Parameters for a single worker. + ps_tasks = 0 + worker_replicas = 1 + worker_job_name = 'lonely_worker' + task = 0 + is_chief = True + master = '' + + if cluster_data and 'worker' in cluster_data: + # Number of total worker replicas include "worker"s and the "master". + worker_replicas = len(cluster_data['worker']) + 1 + if cluster_data and 'ps' in cluster_data: + ps_tasks = len(cluster_data['ps']) + + if worker_replicas > 1 and ps_tasks < 1: + raise ValueError('At least 1 ps task is needed for distributed training.') + + if worker_replicas >= 1 and ps_tasks > 0: + # Set up distributed training. + server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc', + job_name=task_info.type, + task_index=task_info.index) + if task_info.type == 'ps': + server.join() + return + + worker_job_name = '%s/task:%d' % (task_info.type, task_info.index) + task = task_info.index + is_chief = (task_info.type == 'master') + master = server.target + + graph_rewriter_fn = None + if 'graph_rewriter_config' in configs: + graph_rewriter_fn = graph_rewriter_builder.build( + configs['graph_rewriter_config'], is_training=True) + + trainer.train( + create_input_dict_fn, + model_fn, + train_config, + master, + task, + FLAGS.num_clones, + worker_replicas, + FLAGS.clone_on_cpu, + ps_tasks, + worker_job_name, + is_chief, + FLAGS.train_dir, + graph_hook_fn=graph_rewriter_fn) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/legacy/trainer.py b/models/research/object_detection/legacy/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..21f8973d78ce43b3714480e928c6a2f9008ba623 --- /dev/null +++ b/models/research/object_detection/legacy/trainer.py @@ -0,0 +1,415 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Detection model trainer. + +This file provides a generic training method that can be used to train a +DetectionModel. +""" + +import functools + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.builders import optimizer_builder +from object_detection.builders import preprocessor_builder +from object_detection.core import batcher +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.utils import ops as util_ops +from object_detection.utils import variables_helper +from deployment import model_deploy + + +def create_input_queue(batch_size_per_clone, create_tensor_dict_fn, + batch_queue_capacity, num_batch_queue_threads, + prefetch_queue_capacity, data_augmentation_options): + """Sets up reader, prefetcher and returns input queue. + + Args: + batch_size_per_clone: batch size to use per clone. + create_tensor_dict_fn: function to create tensor dictionary. + batch_queue_capacity: maximum number of elements to store within a queue. + num_batch_queue_threads: number of threads to use for batching. + prefetch_queue_capacity: maximum capacity of the queue used to prefetch + assembled batches. + data_augmentation_options: a list of tuples, where each tuple contains a + data augmentation function and a dictionary containing arguments and their + values (see preprocessor.py). + + Returns: + input queue: a batcher.BatchQueue object holding enqueued tensor_dicts + (which hold images, boxes and targets). To get a batch of tensor_dicts, + call input_queue.Dequeue(). + """ + tensor_dict = create_tensor_dict_fn() + + tensor_dict[fields.InputDataFields.image] = tf.expand_dims( + tensor_dict[fields.InputDataFields.image], 0) + + images = tensor_dict[fields.InputDataFields.image] + float_images = tf.cast(images, dtype=tf.float32) + tensor_dict[fields.InputDataFields.image] = float_images + + include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks + in tensor_dict) + include_keypoints = (fields.InputDataFields.groundtruth_keypoints + in tensor_dict) + include_multiclass_scores = (fields.InputDataFields.multiclass_scores + in tensor_dict) + if data_augmentation_options: + tensor_dict = preprocessor.preprocess( + tensor_dict, data_augmentation_options, + func_arg_map=preprocessor.get_default_func_arg_map( + include_label_weights=True, + include_multiclass_scores=include_multiclass_scores, + include_instance_masks=include_instance_masks, + include_keypoints=include_keypoints)) + + input_queue = batcher.BatchQueue( + tensor_dict, + batch_size=batch_size_per_clone, + batch_queue_capacity=batch_queue_capacity, + num_batch_queue_threads=num_batch_queue_threads, + prefetch_queue_capacity=prefetch_queue_capacity) + return input_queue + + +def get_inputs(input_queue, + num_classes, + merge_multiple_label_boxes=False, + use_multiclass_scores=False): + """Dequeues batch and constructs inputs to object detection model. + + Args: + input_queue: BatchQueue object holding enqueued tensor_dicts. + num_classes: Number of classes. + merge_multiple_label_boxes: Whether to merge boxes with multiple labels + or not. Defaults to false. Merged boxes are represented with a single + box and a k-hot encoding of the multiple labels associated with the + boxes. + use_multiclass_scores: Whether to use multiclass scores instead of + groundtruth_classes. + + Returns: + images: a list of 3-D float tensor of images. + image_keys: a list of string keys for the images. + locations_list: a list of tensors of shape [num_boxes, 4] + containing the corners of the groundtruth boxes. + classes_list: a list of padded one-hot (or K-hot) float32 tensors containing + target classes. + masks_list: a list of 3-D float tensors of shape [num_boxes, image_height, + image_width] containing instance masks for objects if present in the + input_queue. Else returns None. + keypoints_list: a list of 3-D float tensors of shape [num_boxes, + num_keypoints, 2] containing keypoints for objects if present in the + input queue. Else returns None. + weights_lists: a list of 1-D float32 tensors of shape [num_boxes] + containing groundtruth weight for each box. + """ + read_data_list = input_queue.dequeue() + label_id_offset = 1 + def extract_images_and_targets(read_data): + """Extract images and targets from the input dict.""" + image = read_data[fields.InputDataFields.image] + key = '' + if fields.InputDataFields.source_id in read_data: + key = read_data[fields.InputDataFields.source_id] + location_gt = read_data[fields.InputDataFields.groundtruth_boxes] + classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], + tf.int32) + classes_gt -= label_id_offset + + if merge_multiple_label_boxes and use_multiclass_scores: + raise ValueError( + 'Using both merge_multiple_label_boxes and use_multiclass_scores is' + 'not supported' + ) + + if merge_multiple_label_boxes: + location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( + location_gt, classes_gt, num_classes) + classes_gt = tf.cast(classes_gt, tf.float32) + elif use_multiclass_scores: + classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores], + tf.float32) + else: + classes_gt = util_ops.padded_one_hot_encoding( + indices=classes_gt, depth=num_classes, left_pad=0) + masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks) + keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints) + if (merge_multiple_label_boxes and ( + masks_gt is not None or keypoints_gt is not None)): + raise NotImplementedError('Multi-label support is only for boxes.') + weights_gt = read_data.get( + fields.InputDataFields.groundtruth_weights) + return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt, + weights_gt) + + return zip(*map(extract_images_and_targets, read_data_list)) + + +def _create_losses(input_queue, create_model_fn, train_config): + """Creates loss function for a DetectionModel. + + Args: + input_queue: BatchQueue object holding enqueued tensor_dicts. + create_model_fn: A function to create the DetectionModel. + train_config: a train_pb2.TrainConfig protobuf. + """ + detection_model = create_model_fn() + (images, _, groundtruth_boxes_list, groundtruth_classes_list, + groundtruth_masks_list, groundtruth_keypoints_list, + groundtruth_weights_list) = get_inputs( + input_queue, + detection_model.num_classes, + train_config.merge_multiple_label_boxes, + train_config.use_multiclass_scores) + + preprocessed_images = [] + true_image_shapes = [] + for image in images: + resized_image, true_image_shape = detection_model.preprocess(image) + preprocessed_images.append(resized_image) + true_image_shapes.append(true_image_shape) + + images = tf.concat(preprocessed_images, 0) + true_image_shapes = tf.concat(true_image_shapes, 0) + + if any(mask is None for mask in groundtruth_masks_list): + groundtruth_masks_list = None + if any(keypoints is None for keypoints in groundtruth_keypoints_list): + groundtruth_keypoints_list = None + + detection_model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list, + groundtruth_keypoints_list, + groundtruth_weights_list=groundtruth_weights_list) + prediction_dict = detection_model.predict(images, true_image_shapes) + + losses_dict = detection_model.loss(prediction_dict, true_image_shapes) + for loss_tensor in losses_dict.values(): + tf.losses.add_loss(loss_tensor) + + +def train(create_tensor_dict_fn, + create_model_fn, + train_config, + master, + task, + num_clones, + worker_replicas, + clone_on_cpu, + ps_tasks, + worker_job_name, + is_chief, + train_dir, + graph_hook_fn=None): + """Training function for detection models. + + Args: + create_tensor_dict_fn: a function to create a tensor input dictionary. + create_model_fn: a function that creates a DetectionModel and generates + losses. + train_config: a train_pb2.TrainConfig protobuf. + master: BNS name of the TensorFlow master to use. + task: The task id of this training instance. + num_clones: The number of clones to run per machine. + worker_replicas: The number of work replicas to train with. + clone_on_cpu: True if clones should be forced to run on CPU. + ps_tasks: Number of parameter server tasks. + worker_job_name: Name of the worker job. + is_chief: Whether this replica is the chief replica. + train_dir: Directory to write checkpoints and training summaries to. + graph_hook_fn: Optional function that is called after the inference graph is + built (before optimization). This is helpful to perform additional changes + to the training graph such as adding FakeQuant ops. The function should + modify the default graph. + + Raises: + ValueError: If both num_clones > 1 and train_config.sync_replicas is true. + """ + + detection_model = create_model_fn() + data_augmentation_options = [ + preprocessor_builder.build(step) + for step in train_config.data_augmentation_options] + + with tf.Graph().as_default(): + # Build a configuration specifying multi-GPU and multi-replicas. + deploy_config = model_deploy.DeploymentConfig( + num_clones=num_clones, + clone_on_cpu=clone_on_cpu, + replica_id=task, + num_replicas=worker_replicas, + num_ps_tasks=ps_tasks, + worker_job_name=worker_job_name) + + # Place the global step on the device storing the variables. + with tf.device(deploy_config.variables_device()): + global_step = slim.create_global_step() + + if num_clones != 1 and train_config.sync_replicas: + raise ValueError('In Synchronous SGD mode num_clones must ', + 'be 1. Found num_clones: {}'.format(num_clones)) + batch_size = train_config.batch_size // num_clones + if train_config.sync_replicas: + batch_size //= train_config.replicas_to_aggregate + + with tf.device(deploy_config.inputs_device()): + input_queue = create_input_queue( + batch_size, create_tensor_dict_fn, + train_config.batch_queue_capacity, + train_config.num_batch_queue_threads, + train_config.prefetch_queue_capacity, data_augmentation_options) + + # Gather initial summaries. + # TODO(rathodv): See if summaries can be added/extracted from global tf + # collections so that they don't have to be passed around. + summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) + global_summaries = set([]) + + model_fn = functools.partial(_create_losses, + create_model_fn=create_model_fn, + train_config=train_config) + clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue]) + first_clone_scope = clones[0].scope + + if graph_hook_fn: + with tf.device(deploy_config.variables_device()): + graph_hook_fn() + + # Gather update_ops from the first clone. These contain, for example, + # the updates for the batch_norm variables created by model_fn. + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) + + with tf.device(deploy_config.optimizer_device()): + training_optimizer, optimizer_summary_vars = optimizer_builder.build( + train_config.optimizer) + for var in optimizer_summary_vars: + tf.summary.scalar(var.op.name, var, family='LearningRate') + + sync_optimizer = None + if train_config.sync_replicas: + training_optimizer = tf.train.SyncReplicasOptimizer( + training_optimizer, + replicas_to_aggregate=train_config.replicas_to_aggregate, + total_num_replicas=worker_replicas) + sync_optimizer = training_optimizer + + with tf.device(deploy_config.optimizer_device()): + regularization_losses = (None if train_config.add_regularization_loss + else []) + total_loss, grads_and_vars = model_deploy.optimize_clones( + clones, training_optimizer, + regularization_losses=regularization_losses) + total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.') + + # Optionally multiply bias gradients by train_config.bias_grad_multiplier. + if train_config.bias_grad_multiplier: + biases_regex_list = ['.*/biases'] + grads_and_vars = variables_helper.multiply_gradients_matching_regex( + grads_and_vars, + biases_regex_list, + multiplier=train_config.bias_grad_multiplier) + + # Optionally freeze some layers by setting their gradients to be zero. + if train_config.freeze_variables: + grads_and_vars = variables_helper.freeze_gradients_matching_regex( + grads_and_vars, train_config.freeze_variables) + + # Optionally clip gradients + if train_config.gradient_clipping_by_norm > 0: + with tf.name_scope('clip_grads'): + grads_and_vars = slim.learning.clip_gradient_norms( + grads_and_vars, train_config.gradient_clipping_by_norm) + + # Create gradient updates. + grad_updates = training_optimizer.apply_gradients(grads_and_vars, + global_step=global_step) + update_ops.append(grad_updates) + update_op = tf.group(*update_ops, name='update_barrier') + with tf.control_dependencies([update_op]): + train_tensor = tf.identity(total_loss, name='train_op') + + # Add summaries. + for model_var in slim.get_model_variables(): + global_summaries.add(tf.summary.histogram('ModelVars/' + + model_var.op.name, model_var)) + for loss_tensor in tf.losses.get_losses(): + global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name, + loss_tensor)) + global_summaries.add( + tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss())) + + # Add the summaries from the first clone. These contain the summaries + # created by model_fn and either optimize_clones() or _gather_clone_loss(). + summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, + first_clone_scope)) + summaries |= global_summaries + + # Merge all summaries together. + summary_op = tf.summary.merge(list(summaries), name='summary_op') + + # Soft placement allows placing on CPU ops without GPU implementation. + session_config = tf.ConfigProto(allow_soft_placement=True, + log_device_placement=False) + + # Save checkpoints regularly. + keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours + saver = tf.train.Saver( + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) + + # Create ops required to initialize the model from a given checkpoint. + init_fn = None + if train_config.fine_tune_checkpoint: + if not train_config.fine_tune_checkpoint_type: + # train_config.from_detection_checkpoint field is deprecated. For + # backward compatibility, fine_tune_checkpoint_type is set based on + # from_detection_checkpoint. + if train_config.from_detection_checkpoint: + train_config.fine_tune_checkpoint_type = 'detection' + else: + train_config.fine_tune_checkpoint_type = 'classification' + var_map = detection_model.restore_map( + fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, + load_all_detection_checkpoint_vars=( + train_config.load_all_detection_checkpoint_vars)) + available_var_map = (variables_helper. + get_variables_available_in_checkpoint( + var_map, train_config.fine_tune_checkpoint, + include_global_step=False)) + init_saver = tf.train.Saver(available_var_map) + def initializer_fn(sess): + init_saver.restore(sess, train_config.fine_tune_checkpoint) + init_fn = initializer_fn + + slim.learning.train( + train_tensor, + logdir=train_dir, + master=master, + is_chief=is_chief, + session_config=session_config, + startup_delay_steps=train_config.startup_delay_steps, + init_fn=init_fn, + summary_op=summary_op, + number_of_steps=( + train_config.num_steps if train_config.num_steps else None), + save_summaries_secs=120, + sync_optimizer=sync_optimizer, + saver=saver) diff --git a/models/research/object_detection/legacy/trainer_tf1_test.py b/models/research/object_detection/legacy/trainer_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3f01c018a6b95398b9f9198ba3f3394ce709cc --- /dev/null +++ b/models/research/object_detection/legacy/trainer_tf1_test.py @@ -0,0 +1,292 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.trainer.""" +import unittest +import tensorflow.compat.v1 as tf +import tf_slim as slim +from google.protobuf import text_format + +from object_detection.core import losses +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.legacy import trainer +from object_detection.protos import train_pb2 +from object_detection.utils import tf_version + + +NUMBER_OF_CLASSES = 2 + + +def get_input_function(): + """A function to get test inputs. Returns an image with one box.""" + image = tf.random_uniform([32, 32, 3], dtype=tf.float32) + key = tf.constant('image_000000') + class_label = tf.random_uniform( + [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32) + box_label = tf.random_uniform( + [1, 4], minval=0.4, maxval=0.6, dtype=tf.float32) + multiclass_scores = tf.random_uniform( + [1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32) + + return { + fields.InputDataFields.image: image, + fields.InputDataFields.key: key, + fields.InputDataFields.groundtruth_classes: class_label, + fields.InputDataFields.groundtruth_boxes: box_label, + fields.InputDataFields.multiclass_scores: multiclass_scores + } + + +class FakeDetectionModel(model.DetectionModel): + """A simple (and poor) DetectionModel for use in test.""" + + def __init__(self): + super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES) + self._classification_loss = losses.WeightedSigmoidClassificationLoss() + self._localization_loss = losses.WeightedSmoothL1LocalizationLoss() + + def preprocess(self, inputs): + """Input preprocessing, resizes images to 28x28. + + Args: + inputs: a [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + true_image_shapes = [inputs.shape[:-1].as_list() + for _ in range(inputs.shape[-1])] + return tf.image.resize_images(inputs, [28, 28]), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + """Prediction tensors from inputs tensor. + + Args: + preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + prediction_dict: a dictionary holding prediction tensors to be + passed to the Loss or Postprocess functions. + """ + flattened_inputs = slim.flatten(preprocessed_inputs) + class_prediction = slim.fully_connected(flattened_inputs, self._num_classes) + box_prediction = slim.fully_connected(flattened_inputs, 4) + + return { + 'class_predictions_with_background': tf.reshape( + class_prediction, [-1, 1, self._num_classes]), + 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) + } + + def postprocess(self, prediction_dict, true_image_shapes, **params): + """Convert predicted output tensors to final detections. Unused. + + Args: + prediction_dict: a dictionary holding prediction tensors. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **params: Additional keyword arguments for specific implementations of + DetectionModel. + + Returns: + detections: a dictionary with empty fields. + """ + return { + 'detection_boxes': None, + 'detection_scores': None, + 'detection_classes': None, + 'num_detections': None + } + + def loss(self, prediction_dict, true_image_shapes): + """Compute scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding predicted tensors + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + a dictionary mapping strings (loss names) to scalar tensors representing + loss values. + """ + batch_reg_targets = tf.stack( + self.groundtruth_lists(fields.BoxListFields.boxes)) + batch_cls_targets = tf.stack( + self.groundtruth_lists(fields.BoxListFields.classes)) + weights = tf.constant( + 1.0, dtype=tf.float32, + shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1]) + + location_losses = self._localization_loss( + prediction_dict['box_encodings'], batch_reg_targets, + weights=weights) + cls_losses = self._classification_loss( + prediction_dict['class_predictions_with_background'], batch_cls_targets, + weights=weights) + + loss_dict = { + 'localization_loss': tf.reduce_sum(location_losses), + 'classification_loss': tf.reduce_sum(cls_losses), + } + return loss_dict + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + pass + + def restore_map(self, fine_tune_checkpoint_type='detection'): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + + Returns: + A dict mapping variable names to variables. + """ + return {var.op.name: var for var in tf.global_variables()} + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class TrainerTest(tf.test.TestCase): + + def test_configure_trainer_and_train_two_steps(self): + train_config_text_proto = """ + optimizer { + adam_optimizer { + learning_rate { + constant_learning_rate { + learning_rate: 0.01 + } + } + } + } + data_augmentation_options { + random_adjust_brightness { + max_delta: 0.2 + } + } + data_augmentation_options { + random_adjust_contrast { + min_delta: 0.7 + max_delta: 1.1 + } + } + num_steps: 2 + """ + train_config = train_pb2.TrainConfig() + text_format.Merge(train_config_text_proto, train_config) + + train_dir = self.get_temp_dir() + + trainer.train( + create_tensor_dict_fn=get_input_function, + create_model_fn=FakeDetectionModel, + train_config=train_config, + master='', + task=0, + num_clones=1, + worker_replicas=1, + clone_on_cpu=True, + ps_tasks=0, + worker_job_name='worker', + is_chief=True, + train_dir=train_dir) + + def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self): + train_config_text_proto = """ + optimizer { + adam_optimizer { + learning_rate { + constant_learning_rate { + learning_rate: 0.01 + } + } + } + } + data_augmentation_options { + random_adjust_brightness { + max_delta: 0.2 + } + } + data_augmentation_options { + random_adjust_contrast { + min_delta: 0.7 + max_delta: 1.1 + } + } + num_steps: 2 + use_multiclass_scores: true + """ + train_config = train_pb2.TrainConfig() + text_format.Merge(train_config_text_proto, train_config) + + train_dir = self.get_temp_dir() + + trainer.train(create_tensor_dict_fn=get_input_function, + create_model_fn=FakeDetectionModel, + train_config=train_config, + master='', + task=0, + num_clones=1, + worker_replicas=1, + clone_on_cpu=True, + ps_tasks=0, + worker_job_name='worker', + is_chief=True, + train_dir=train_dir) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/matchers/__init__.py b/models/research/object_detection/matchers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/matchers/argmax_matcher.py b/models/research/object_detection/matchers/argmax_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..a347decbd3ccc1c68e9285f34e24a9b0610d83e1 --- /dev/null +++ b/models/research/object_detection/matchers/argmax_matcher.py @@ -0,0 +1,208 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Argmax matcher implementation. + +This class takes a similarity matrix and matches columns to rows based on the +maximum value per column. One can specify matched_thresholds and +to prevent columns from matching to rows (generally resulting in a negative +training example) and unmatched_theshold to ignore the match (generally +resulting in neither a positive or negative training example). + +This matcher is used in Fast(er)-RCNN. + +Note: matchers are used in TargetAssigners. There is a create_target_assigner +factory function for popular implementations. +""" +import tensorflow.compat.v1 as tf + +from object_detection.core import matcher +from object_detection.utils import shape_utils + + +class ArgMaxMatcher(matcher.Matcher): + """Matcher based on highest value. + + This class computes matches from a similarity matrix. Each column is matched + to a single row. + + To support object detection target assignment this class enables setting both + matched_threshold (upper threshold) and unmatched_threshold (lower thresholds) + defining three categories of similarity which define whether examples are + positive, negative, or ignored: + (1) similarity >= matched_threshold: Highest similarity. Matched/Positive! + (2) matched_threshold > similarity >= unmatched_threshold: Medium similarity. + Depending on negatives_lower_than_unmatched, this is either + Unmatched/Negative OR Ignore. + (3) unmatched_threshold > similarity: Lowest similarity. Depending on flag + negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore. + For ignored matches this class sets the values in the Match object to -2. + """ + + def __init__(self, + matched_threshold, + unmatched_threshold=None, + negatives_lower_than_unmatched=True, + force_match_for_each_row=False, + use_matmul_gather=False): + """Construct ArgMaxMatcher. + + Args: + matched_threshold: Threshold for positive matches. Positive if + sim >= matched_threshold, where sim is the maximum value of the + similarity matrix for a given column. Set to None for no threshold. + unmatched_threshold: Threshold for negative matches. Negative if + sim < unmatched_threshold. Defaults to matched_threshold + when set to None. + negatives_lower_than_unmatched: Boolean which defaults to True. If True + then negative matches are the ones below the unmatched_threshold, + whereas ignored matches are in between the matched and umatched + threshold. If False, then negative matches are in between the matched + and unmatched threshold, and everything lower than unmatched is ignored. + force_match_for_each_row: If True, ensures that each row is matched to + at least one column (which is not guaranteed otherwise if the + matched_threshold is high). Defaults to False. See + argmax_matcher_test.testMatcherForceMatch() for an example. + use_matmul_gather: Force constructed match objects to use matrix + multiplication based gather instead of standard tf.gather. + (Default: False). + + Raises: + ValueError: if unmatched_threshold is set but matched_threshold is not set + or if unmatched_threshold > matched_threshold. + """ + super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather) + if (matched_threshold is None) and (unmatched_threshold is not None): + raise ValueError('Need to also define matched_threshold when' + 'unmatched_threshold is defined') + self._matched_threshold = matched_threshold + if unmatched_threshold is None: + self._unmatched_threshold = matched_threshold + else: + if unmatched_threshold > matched_threshold: + raise ValueError('unmatched_threshold needs to be smaller or equal' + 'to matched_threshold') + self._unmatched_threshold = unmatched_threshold + if not negatives_lower_than_unmatched: + if self._unmatched_threshold == self._matched_threshold: + raise ValueError('When negatives are in between matched and ' + 'unmatched thresholds, these cannot be of equal ' + 'value. matched: {}, unmatched: {}'.format( + self._matched_threshold, + self._unmatched_threshold)) + self._force_match_for_each_row = force_match_for_each_row + self._negatives_lower_than_unmatched = negatives_lower_than_unmatched + + def _match(self, similarity_matrix, valid_rows): + """Tries to match each column of the similarity matrix to a row. + + Args: + similarity_matrix: tensor of shape [N, M] representing any similarity + metric. + valid_rows: a boolean tensor of shape [N] indicating valid rows. + + Returns: + Match object with corresponding matches for each of M columns. + """ + + def _match_when_rows_are_empty(): + """Performs matching when the rows of similarity matrix are empty. + + When the rows are empty, all detections are false positives. So we return + a tensor of -1's to indicate that the columns do not match to any rows. + + Returns: + matches: int32 tensor indicating the row each column matches to. + """ + similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( + similarity_matrix) + return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32) + + def _match_when_rows_are_non_empty(): + """Performs matching when the rows of similarity matrix are non empty. + + Returns: + matches: int32 tensor indicating the row each column matches to. + """ + # Matches for each column + matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32) + + # Deal with matched and unmatched threshold + if self._matched_threshold is not None: + # Get logical indices of ignored and unmatched columns as tf.int64 + matched_vals = tf.reduce_max(similarity_matrix, 0) + below_unmatched_threshold = tf.greater(self._unmatched_threshold, + matched_vals) + between_thresholds = tf.logical_and( + tf.greater_equal(matched_vals, self._unmatched_threshold), + tf.greater(self._matched_threshold, matched_vals)) + + if self._negatives_lower_than_unmatched: + matches = self._set_values_using_indicator(matches, + below_unmatched_threshold, + -1) + matches = self._set_values_using_indicator(matches, + between_thresholds, + -2) + else: + matches = self._set_values_using_indicator(matches, + below_unmatched_threshold, + -2) + matches = self._set_values_using_indicator(matches, + between_thresholds, + -1) + + if self._force_match_for_each_row: + similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( + similarity_matrix) + force_match_column_ids = tf.argmax(similarity_matrix, 1, + output_type=tf.int32) + force_match_column_indicators = ( + tf.one_hot( + force_match_column_ids, depth=similarity_matrix_shape[1]) * + tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32)) + force_match_row_ids = tf.argmax(force_match_column_indicators, 0, + output_type=tf.int32) + force_match_column_mask = tf.cast( + tf.reduce_max(force_match_column_indicators, 0), tf.bool) + final_matches = tf.where(force_match_column_mask, + force_match_row_ids, matches) + return final_matches + else: + return matches + + if similarity_matrix.shape.is_fully_defined(): + if shape_utils.get_dim_as_int(similarity_matrix.shape[0]) == 0: + return _match_when_rows_are_empty() + else: + return _match_when_rows_are_non_empty() + else: + return tf.cond( + tf.greater(tf.shape(similarity_matrix)[0], 0), + _match_when_rows_are_non_empty, _match_when_rows_are_empty) + + def _set_values_using_indicator(self, x, indicator, val): + """Set the indicated fields of x to val. + + Args: + x: tensor. + indicator: boolean with same shape as x. + val: scalar with value to set. + + Returns: + modified tensor. + """ + indicator = tf.cast(indicator, x.dtype) + return tf.add(tf.multiply(x, 1 - indicator), val * indicator) diff --git a/models/research/object_detection/matchers/argmax_matcher_test.py b/models/research/object_detection/matchers/argmax_matcher_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9305f0a86c893c5265c6a204367c98e91b6e8819 --- /dev/null +++ b/models/research/object_detection/matchers/argmax_matcher_test.py @@ -0,0 +1,235 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.matchers.argmax_matcher.""" + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.matchers import argmax_matcher +from object_detection.utils import test_case + + +class ArgMaxMatcherTest(test_case.TestCase): + + def test_return_correct_matches_with_default_thresholds(self): + + def graph_fn(similarity_matrix): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) + match = matcher.match(similarity_matrix) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1., 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_rows = np.array([2, 0, 1, 0, 1]) + (res_matched_cols, res_unmatched_cols, + res_match_results) = self.execute(graph_fn, [similarity]) + + self.assertAllEqual(res_match_results[res_matched_cols], + expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4]) + self.assertFalse(np.all(res_unmatched_cols)) + + def test_return_correct_matches_with_empty_rows(self): + + def graph_fn(similarity_matrix): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) + match = matcher.match(similarity_matrix) + return match.unmatched_column_indicator() + similarity = 0.2 * np.ones([0, 5], dtype=np.float32) + res_unmatched_cols = self.execute(graph_fn, [similarity]) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], np.arange(5)) + + def test_return_correct_matches_with_matched_threshold(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3, 4]) + expected_matched_rows = np.array([2, 0, 1]) + expected_unmatched_cols = np.array([1, 2]) + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_with_matched_and_unmatched_threshold(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2.) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3, 4]) + expected_matched_rows = np.array([2, 0, 1]) + expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_negatives_lower_than_unmatched_false(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher( + matched_threshold=3., + unmatched_threshold=2., + negatives_lower_than_unmatched=False) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3, 4]) + expected_matched_rows = np.array([2, 0, 1]) + expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_unmatched_row_not_using_force_match(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2.) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [-1, 0, -2, -2, -1], + [3, 0, -1, 2, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3]) + expected_matched_rows = np.array([2, 0]) + expected_unmatched_cols = np.array([1, 2, 4]) + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_unmatched_row_while_using_force_match(self): + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2., + force_match_for_each_row=True) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [-1, 0, -2, -2, -1], + [3, 0, -1, 2, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 1, 3]) + expected_matched_rows = np.array([2, 1, 0]) + expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_using_force_match_padded_groundtruth(self): + def graph_fn(similarity, valid_rows): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2., + force_match_for_each_row=True) + match = matcher.match(similarity, valid_rows) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [-1, 0, -2, -2, -1], + [0, 0, 0, 0, 0], + [3, 0, -1, 2, 0], + [0, 0, 0, 0, 0]], dtype=np.float32) + valid_rows = np.array([True, True, False, True, False]) + expected_matched_cols = np.array([0, 1, 3]) + expected_matched_rows = np.array([3, 1, 0]) + expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity, valid_rows]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_valid_arguments_corner_case(self): + argmax_matcher.ArgMaxMatcher(matched_threshold=1, + unmatched_threshold=1) + + def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self): + with self.assertRaises(ValueError): + argmax_matcher.ArgMaxMatcher(matched_threshold=1, + unmatched_threshold=1, + negatives_lower_than_unmatched=False) + + def test_invalid_arguments_no_matched_threshold(self): + with self.assertRaises(ValueError): + argmax_matcher.ArgMaxMatcher(matched_threshold=None, + unmatched_threshold=4) + + def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self): + with self.assertRaises(ValueError): + argmax_matcher.ArgMaxMatcher(matched_threshold=1, + unmatched_threshold=2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/matchers/bipartite_matcher.py b/models/research/object_detection/matchers/bipartite_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..f62afe0975f76397e49d06c7c86d5ff76896860b --- /dev/null +++ b/models/research/object_detection/matchers/bipartite_matcher.py @@ -0,0 +1,70 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bipartite matcher implementation.""" + +import tensorflow.compat.v1 as tf + +from tensorflow.contrib.image.python.ops import image_ops +from object_detection.core import matcher + + +class GreedyBipartiteMatcher(matcher.Matcher): + """Wraps a Tensorflow greedy bipartite matcher.""" + + def __init__(self, use_matmul_gather=False): + """Constructs a Matcher. + + Args: + use_matmul_gather: Force constructed match objects to use matrix + multiplication based gather instead of standard tf.gather. + (Default: False). + """ + super(GreedyBipartiteMatcher, self).__init__( + use_matmul_gather=use_matmul_gather) + + def _match(self, similarity_matrix, valid_rows): + """Bipartite matches a collection rows and columns. A greedy bi-partite. + + TODO(rathodv): Add num_valid_columns options to match only that many columns + with all the rows. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher values mean more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid. + + Returns: + match_results: int32 tensor of shape [M] with match_results[i]=-1 + meaning that column i is not matched and otherwise that it is matched to + row match_results[i]. + """ + valid_row_sim_matrix = tf.gather(similarity_matrix, + tf.squeeze(tf.where(valid_rows), axis=-1)) + invalid_row_sim_matrix = tf.gather( + similarity_matrix, + tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1)) + similarity_matrix = tf.concat( + [valid_row_sim_matrix, invalid_row_sim_matrix], axis=0) + # Convert similarity matrix to distance matrix as tf.image.bipartite tries + # to find minimum distance matches. + distance_matrix = -1 * similarity_matrix + num_valid_rows = tf.reduce_sum(tf.cast(valid_rows, dtype=tf.float32)) + _, match_results = image_ops.bipartite_match( + distance_matrix, num_valid_rows=num_valid_rows) + match_results = tf.reshape(match_results, [-1]) + match_results = tf.cast(match_results, tf.int32) + return match_results diff --git a/models/research/object_detection/matchers/bipartite_matcher_tf1_test.py b/models/research/object_detection/matchers/bipartite_matcher_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..314546ad4ee507d3024746044d4d4a30bc92e85d --- /dev/null +++ b/models/research/object_detection/matchers/bipartite_matcher_tf1_test.py @@ -0,0 +1,92 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.bipartite_matcher.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class GreedyBipartiteMatcherTest(test_case.TestCase): + + def test_get_expected_matches_when_all_rows_are_valid(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.ones([2], dtype=np.bool) + expected_match_results = [-1, 1, 0] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_expected_matches_with_all_rows_be_default(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + expected_match_results = [-1, 1, 0] + def graph_fn(similarity_matrix): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_no_matches_with_zero_valid_rows(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.zeros([2], dtype=np.bool) + expected_match_results = [-1, -1, -1] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.array([True, False], dtype=np.bool) + expected_match_results = [-1, -1, 0] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row_at_bottom(self): + similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]], + dtype=np.float32) + valid_rows = np.array([False, True], dtype=np.bool) + expected_match_results = [-1, -1, 0] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/__init__.py b/models/research/object_detection/meta_architectures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/meta_architectures/center_net_meta_arch.py b/models/research/object_detection/meta_architectures/center_net_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae98bb1f07588abb6080ce7f16e64ecd54f0f2a --- /dev/null +++ b/models/research/object_detection/meta_architectures/center_net_meta_arch.py @@ -0,0 +1,2348 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The CenterNet meta architecture as described in the "Objects as Points" paper [1]. + +[1]: https://arxiv.org/abs/1904.07850 + +""" + +import abc +import collections +import functools +import numpy as np +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner as cn_assigner +from object_detection.utils import shape_utils + +# Number of channels needed to predict size and offsets. +NUM_OFFSET_CHANNELS = 2 +NUM_SIZE_CHANNELS = 2 + +# Error range for detecting peaks. +PEAK_EPSILON = 1e-6 + +# Constants shared between all keypoint tasks. +UNMATCHED_KEYPOINT_SCORE = 0.1 +KEYPOINT_CANDIDATE_SEARCH_SCALE = 0.3 + + +class CenterNetFeatureExtractor(tf.keras.Model): + """Base class for feature extractors for the CenterNet meta architecture. + + Child classes are expected to override the _output_model property which will + return 1 or more tensors predicted by the feature extractor. + + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, name=None, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Initializes a CenterNet feature extractor. + + Args: + name: str, the name used for the underlying keras model. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. If None or empty, we use 0s. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + If None or empty, we use 1s. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + """ + super(CenterNetFeatureExtractor, self).__init__(name=name) + + if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test + channel_means = [0., 0., 0.] + + if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test + channel_stds = [1., 1., 1.] + + self._channel_means = channel_means + self._channel_stds = channel_stds + self._bgr_ordering = bgr_ordering + + def preprocess(self, inputs): + """Converts a batch of unscaled images to a scale suitable for the model. + + This method normalizes the image using the given `channel_means` and + `channels_stds` values at initialization time while optionally flipping + the channel order if `bgr_ordering` is set. + + Args: + inputs: a [batch, height, width, channels] float32 tensor + + Returns: + outputs: a [batch, height, width, channels] float32 tensor + + """ + + if self._bgr_ordering: + red, green, blue = tf.unstack(inputs, axis=3) + inputs = tf.stack([blue, green, red], axis=3) + + channel_means = tf.reshape(tf.constant(self._channel_means), + [1, 1, 1, -1]) + channel_stds = tf.reshape(tf.constant(self._channel_stds), + [1, 1, 1, -1]) + + return (inputs - channel_means)/channel_stds + + @property + @abc.abstractmethod + def out_stride(self): + """The stride in the output image of the network.""" + pass + + @property + @abc.abstractmethod + def num_feature_outputs(self): + """Ther number of feature outputs returned by the feature extractor.""" + pass + + +def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256, + bias_fill=None): + """Creates a network to predict the given number of output channels. + + This function is intended to make the prediction heads for the CenterNet + meta architecture. + + Args: + num_out_channels: Number of output channels. + kernel_size: The size of the conv kernel in the intermediate layer + num_filters: The number of filters in the intermediate conv layer. + bias_fill: If not None, is used to initialize the bias in the final conv + layer. + + Returns: + net: A keras module which when called on an input tensor of size + [batch_size, height, width, num_in_channels] returns an output + of size [batch_size, height, width, num_out_channels] + """ + + out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1) + + if bias_fill is not None: + out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill) + + net = tf.keras.Sequential( + [tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, + padding='same'), + tf.keras.layers.ReLU(), + out_conv] + ) + + return net + + +def _to_float32(x): + return tf.cast(x, tf.float32) + + +def _get_shape(tensor, num_dims): + tf.Assert(tensor.get_shape().ndims == num_dims, [tensor]) + return shape_utils.combined_static_and_dynamic_shape(tensor) + + +def _flatten_spatial_dimensions(batch_images): + batch_size, height, width, channels = _get_shape(batch_images, 4) + return tf.reshape(batch_images, [batch_size, height * width, + channels]) + + +def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100, + per_channel=False): + """Returns the top k scores and their locations in a feature map. + + Given a feature map, the top k values (based on activation) are returned. If + `per_channel` is True, the top k values **per channel** are returned. + + The `max_pool_kernel_size` argument allows for selecting local peaks in a + region. This filtering is done per channel, so nothing prevents two values at + the same location to be returned. + + Args: + feature_map: [batch, height, width, channels] float32 feature map. + max_pool_kernel_size: integer, the max pool kernel size to use to pull off + peak score locations in a neighborhood (independently for each channel). + For example, to make sure no two neighboring values (in the same channel) + are returned, set max_pool_kernel_size=3. If None or 1, will not apply max + pooling. + k: The number of highest scoring locations to return. + per_channel: If True, will return the top k scores and locations per + feature map channel. If False, the top k across the entire feature map + (height x width x channels) are returned. + + Returns: + Tuple of + scores: A [batch, N] float32 tensor with scores from the feature map in + descending order. If per_channel is False, N = k. Otherwise, + N = k * channels, and the first k elements correspond to channel 0, the + second k correspond to channel 1, etc. + y_indices: A [batch, N] int tensor with y indices of the top k feature map + locations. If per_channel is False, N = k. Otherwise, + N = k * channels. + x_indices: A [batch, N] int tensor with x indices of the top k feature map + locations. If per_channel is False, N = k. Otherwise, + N = k * channels. + channel_indices: A [batch, N] int tensor with channel indices of the top k + feature map locations. If per_channel is False, N = k. Otherwise, + N = k * channels. + """ + if not max_pool_kernel_size or max_pool_kernel_size == 1: + feature_map_peaks = feature_map + else: + feature_map_max_pool = tf.nn.max_pool( + feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME') + + feature_map_peak_mask = tf.math.abs( + feature_map - feature_map_max_pool) < PEAK_EPSILON + + # Zero out everything that is not a peak. + feature_map_peaks = ( + feature_map * _to_float32(feature_map_peak_mask)) + + batch_size, _, width, num_channels = _get_shape(feature_map, 4) + + if per_channel: + # Perform top k over batch and channels. + feature_map_peaks_transposed = tf.transpose(feature_map_peaks, + perm=[0, 3, 1, 2]) + feature_map_peaks_transposed = tf.reshape( + feature_map_peaks_transposed, [batch_size, num_channels, -1]) + scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_transposed, k=k) + # Convert the indices such that they represent the location in the full + # (flattened) feature map of size [batch, height * width * channels]. + channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis] + peak_flat_indices = num_channels * peak_flat_indices + channel_idx + scores = tf.reshape(scores, [batch_size, -1]) + peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1]) + else: + feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) + scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k) + + # Get x, y and channel indices corresponding to the top indices in the flat + # array. + y_indices, x_indices, channel_indices = ( + row_col_channel_indices_from_flattened_indices( + peak_flat_indices, width, num_channels)) + return scores, y_indices, x_indices, channel_indices + + +def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices, + channel_indices, height_width_predictions, + offset_predictions): + """Converts CenterNet class-center, offset and size predictions to boxes. + + Args: + detection_scores: A [batch, num_boxes] float32 tensor with detection + scores in range [0, 1]. + y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to + object center locations (expressed in output coordinate frame). + x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to + object center locations (expressed in output coordinate frame). + channel_indices: A [batch, num_boxes] int32 tensor with channel indices + corresponding to object classes. + height_width_predictions: A float tensor of shape [batch_size, height, + width, 2] representing the height and width of a box centered at each + pixel. + offset_predictions: A float tensor of shape [batch_size, height, width, 2] + representing the y and x offsets of a box centered at each pixel. This + helps reduce the error from downsampling. + + Returns: + detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the + the raw bounding box coordinates of boxes. + detection_classes: An integer tensor of shape [batch_size, num_boxes] + indicating the predicted class for each box. + detection_scores: A float tensor of shape [batch_size, num_boxes] indicating + the score for each box. + num_detections: An integer tensor of shape [batch_size,] indicating the + number of boxes detected for each sample in the batch. + + """ + _, _, width, _ = _get_shape(height_width_predictions, 4) + + peak_spatial_indices = flattened_indices_from_row_col_indices( + y_indices, x_indices, width) + y_indices = _to_float32(y_indices) + x_indices = _to_float32(x_indices) + + height_width_flat = _flatten_spatial_dimensions(height_width_predictions) + offsets_flat = _flatten_spatial_dimensions(offset_predictions) + + height_width = tf.gather(height_width_flat, peak_spatial_indices, + batch_dims=1) + offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1) + + heights, widths = tf.unstack(height_width, axis=2) + y_offsets, x_offsets = tf.unstack(offsets, axis=2) + + detection_classes = channel_indices + + num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1) + + boxes = tf.stack([y_indices + y_offsets - heights / 2.0, + x_indices + x_offsets - widths / 2.0, + y_indices + y_offsets + heights / 2.0, + x_indices + x_offsets + widths / 2.0], axis=2) + + return boxes, detection_classes, detection_scores, num_detections + + +def prediction_tensors_to_keypoint_candidates( + keypoint_heatmap_predictions, + keypoint_heatmap_offsets, + keypoint_score_threshold=0.1, + max_pool_kernel_size=1, + max_candidates=20): + """Convert keypoint heatmap predictions and offsets to keypoint candidates. + + Args: + keypoint_heatmap_predictions: A float tensor of shape [batch_size, height, + width, num_keypoints] representing the per-keypoint heatmaps. + keypoint_heatmap_offsets: A float tensor of shape [batch_size, height, + width, 2] (or [batch_size, height, width, 2 * num_keypoints] if + 'per_keypoint_offset' is set True) representing the per-keypoint offsets. + keypoint_score_threshold: float, the threshold for considering a keypoint + a candidate. + max_pool_kernel_size: integer, the max pool kernel size to use to pull off + peak score locations in a neighborhood. For example, to make sure no two + neighboring values for the same keypoint are returned, set + max_pool_kernel_size=3. If None or 1, will not apply any local filtering. + max_candidates: integer, maximum number of keypoint candidates per + keypoint type. + + Returns: + keypoint_candidates: A tensor of shape + [batch_size, max_candidates, num_keypoints, 2] holding the + location of keypoint candidates in [y, x] format (expressed in absolute + coordinates in the output coordinate frame). + keypoint_scores: A float tensor of shape + [batch_size, max_candidates, num_keypoints] with the scores for each + keypoint candidate. The scores come directly from the heatmap predictions. + num_keypoint_candidates: An integer tensor of shape + [batch_size, num_keypoints] with the number of candidates for each + keypoint type, as it's possible to filter some candidates due to the score + threshold. + """ + batch_size, _, width, num_keypoints = _get_shape( + keypoint_heatmap_predictions, 4) + # Get x, y and channel indices corresponding to the top indices in the + # keypoint heatmap predictions. + # Note that the top k candidates are produced for **each keypoint type**. + # Might be worth eventually trying top k in the feature map, independent of + # the keypoint type. + keypoint_scores, y_indices, x_indices, channel_indices = ( + top_k_feature_map_locations(keypoint_heatmap_predictions, + max_pool_kernel_size=max_pool_kernel_size, + k=max_candidates, + per_channel=True)) + + peak_spatial_indices = flattened_indices_from_row_col_indices( + y_indices, x_indices, width) + y_indices = _to_float32(y_indices) + x_indices = _to_float32(x_indices) + + offsets_flat = _flatten_spatial_dimensions(keypoint_heatmap_offsets) + + selected_offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1) + _, num_indices, num_channels = _get_shape(selected_offsets, 3) + if num_channels > 2: + reshaped_offsets = tf.reshape(selected_offsets, + [batch_size, num_indices, -1, 2]) + offsets = tf.gather(reshaped_offsets, channel_indices, batch_dims=2) + else: + offsets = selected_offsets + y_offsets, x_offsets = tf.unstack(offsets, axis=2) + + keypoint_candidates = tf.stack([y_indices + y_offsets, + x_indices + x_offsets], axis=2) + keypoint_candidates = tf.reshape( + keypoint_candidates, + [batch_size, num_keypoints, max_candidates, 2]) + keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3]) + keypoint_scores = tf.reshape( + keypoint_scores, + [batch_size, num_keypoints, max_candidates]) + keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1]) + num_candidates = tf.reduce_sum( + tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1) + + return keypoint_candidates, keypoint_scores, num_candidates + + +def regressed_keypoints_at_object_centers(regressed_keypoint_predictions, + y_indices, x_indices): + """Returns the regressed keypoints at specified object centers. + + The original keypoint predictions are regressed relative to each feature map + location. The returned keypoints are expressed in absolute coordinates in the + output frame (i.e. the center offsets are added to each individual regressed + set of keypoints). + + Args: + regressed_keypoint_predictions: A float tensor of shape + [batch_size, height, width, 2 * num_keypoints] holding regressed + keypoints. The last dimension has keypoint coordinates ordered as follows: + [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints. + y_indices: A [batch, num_instances] int tensor holding y indices for object + centers. These indices correspond to locations in the output feature map. + x_indices: A [batch, num_instances] int tensor holding x indices for object + centers. These indices correspond to locations in the output feature map. + + Returns: + A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where + regressed keypoints are gathered at the provided locations, and converted + to absolute coordinates in the output coordinate frame. + """ + batch_size, _, width, _ = _get_shape(regressed_keypoint_predictions, 4) + flattened_indices = flattened_indices_from_row_col_indices( + y_indices, x_indices, width) + _, num_instances = _get_shape(flattened_indices, 2) + + regressed_keypoints_flat = _flatten_spatial_dimensions( + regressed_keypoint_predictions) + + relative_regressed_keypoints = tf.gather( + regressed_keypoints_flat, flattened_indices, batch_dims=1) + relative_regressed_keypoints = tf.reshape( + relative_regressed_keypoints, + [batch_size, num_instances, -1, 2]) + relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack( + relative_regressed_keypoints, axis=3) + y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1)) + x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1)) + absolute_regressed_keypoints = tf.stack( + [y_indices + relative_regressed_keypoints_y, + x_indices + relative_regressed_keypoints_x], + axis=3) + return tf.reshape(absolute_regressed_keypoints, + [batch_size, num_instances, -1]) + + +def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=None, + unmatched_keypoint_score=0.1, box_scale=1.2, + candidate_search_scale=0.3, + candidate_ranking_mode='min_distance'): + """Refines regressed keypoints by snapping to the nearest candidate keypoints. + + The initial regressed keypoints represent a full set of keypoints regressed + from the centers of the objects. The keypoint candidates are estimated + independently from heatmaps, and are not associated with any object instances. + This function refines the regressed keypoints by "snapping" to the + nearest/highest score/highest score-distance ratio (depending on the + candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose"). + If no candidates are nearby, the regressed keypoint remains unchanged. + + In order to snap a regressed keypoint to a candidate keypoint, the following + must be satisfied: + - the candidate keypoint must be of the same type as the regressed keypoint + - the candidate keypoint must not lie outside the predicted boxes (or the + boxes which encloses the regressed keypoints for the instance if `bboxes` is + not provided). Note that the box is scaled by + `regressed_box_scale` in height and width, to provide some margin around the + keypoints + - the distance to the closest candidate keypoint cannot exceed + candidate_search_scale * max(height, width), where height and width refer to + the bounding box for the instance. + + Note that the same candidate keypoint is allowed to snap to regressed + keypoints in difference instances. + + Args: + regressed_keypoints: A float tensor of shape + [batch_size, num_instances, num_keypoints, 2] with the initial regressed + keypoints. + keypoint_candidates: A tensor of shape + [batch_size, max_candidates, num_keypoints, 2] holding the location of + keypoint candidates in [y, x] format (expressed in absolute coordinates in + the output coordinate frame). + keypoint_scores: A float tensor of shape + [batch_size, max_candidates, num_keypoints] indicating the scores for + keypoint candidates. + num_keypoint_candidates: An integer tensor of shape + [batch_size, num_keypoints] indicating the number of valid candidates for + each keypoint type, as there may be padding (dim 1) of + `keypoint_candidates` and `keypoint_scores`. + bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted + bounding boxes for each instance, expressed in the output coordinate + frame. If not provided, boxes will be computed from regressed keypoints. + unmatched_keypoint_score: float, the default score to use for regressed + keypoints that are not successfully snapped to a nearby candidate. + box_scale: float, the multiplier to expand the bounding boxes (either the + provided boxes or those which tightly cover the regressed keypoints) for + an instance. This scale is typically larger than 1.0 when not providing + `bboxes`. + candidate_search_scale: float, the scale parameter that multiplies the + largest dimension of a bounding box. The resulting distance becomes a + search radius for candidates in the vicinity of each regressed keypoint. + candidate_ranking_mode: A string as one of ['min_distance', + 'score_distance_ratio'] indicating how to select the candidate. If invalid + value is provided, an ValueError will be raised. + + Returns: + A tuple with: + refined_keypoints: A float tensor of shape + [batch_size, num_instances, num_keypoints, 2] with the final, refined + keypoints. + refined_scores: A float tensor of shape + [batch_size, num_instances, num_keypoints] with scores associated with all + instances and keypoints in `refined_keypoints`. + + Raises: + ValueError: if provided candidate_ranking_mode is not one of + ['min_distance', 'score_distance_ratio'] + """ + batch_size, num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(regressed_keypoints)) + max_candidates = keypoint_candidates.shape[1] + + # Replace all invalid (i.e. padded) keypoint candidates with NaN. + # This will prevent them from being considered. + range_tiled = tf.tile( + tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]), + [batch_size, 1, num_keypoints]) + num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1), + [1, max_candidates, 1]) + invalid_candidates = range_tiled >= num_candidates_tiled + nan_mask = tf.where( + invalid_candidates, + np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32), + tf.ones_like(invalid_candidates, dtype=tf.float32)) + keypoint_candidates_with_nans = tf.math.multiply( + keypoint_candidates, tf.expand_dims(nan_mask, -1)) + + # Pairwise squared distances between regressed keypoints and candidate + # keypoints (for a single keypoint type). + # Shape [batch_size, num_instances, max_candidates, num_keypoints]. + regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints, + axis=2) + keypoint_candidates_expanded = tf.expand_dims( + keypoint_candidates_with_nans, axis=1) + sqrd_distances = tf.math.reduce_sum( + tf.math.squared_difference(regressed_keypoint_expanded, + keypoint_candidates_expanded), + axis=-1) + distances = tf.math.sqrt(sqrd_distances) + + # Determine the candidates that have the minimum distance to the regressed + # keypoints. Shape [batch_size, num_instances, num_keypoints]. + min_distances = tf.math.reduce_min(distances, axis=2) + if candidate_ranking_mode == 'min_distance': + nearby_candidate_inds = tf.math.argmin(distances, axis=2) + elif candidate_ranking_mode == 'score_distance_ratio': + # tiled_keypoint_scores: + # Shape [batch_size, num_instances, max_candidates, num_keypoints]. + tiled_keypoint_scores = tf.tile( + tf.expand_dims(keypoint_scores, axis=1), + multiples=[1, num_instances, 1, 1]) + ranking_scores = tiled_keypoint_scores / (distances + 1e-6) + nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2) + else: + raise ValueError('Not recognized candidate_ranking_mode: %s' % + candidate_ranking_mode) + + # Gather the coordinates and scores corresponding to the closest candidates. + # Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and + # [batch_size, num_instances, num_keypoints], respectively. + nearby_candidate_coords, nearby_candidate_scores = ( + _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, + nearby_candidate_inds)) + + if bboxes is None: + # Create bboxes from regressed keypoints. + # Shape [batch_size * num_instances, 4]. + regressed_keypoints_flattened = tf.reshape( + regressed_keypoints, [-1, num_keypoints, 2]) + bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes( + regressed_keypoints_flattened) + else: + bboxes_flattened = tf.reshape(bboxes, [-1, 4]) + + # Scale the bounding boxes. + # Shape [batch_size, num_instances, 4]. + boxlist = box_list.BoxList(bboxes_flattened) + boxlist_scaled = box_list_ops.scale_height_width( + boxlist, box_scale, box_scale) + bboxes_scaled = boxlist_scaled.get() + bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4]) + + # Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint. + # Shape [batch_size, num_instances, num_keypoints]. + bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1]) + ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3) + + # Produce a mask that indicates whether the original regressed keypoint + # should be used instead of a candidate keypoint. + # Shape [batch_size, num_instances, num_keypoints]. + search_radius = ( + tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale) + mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) + + tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) + + tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) + + tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) + + # Filter out the chosen candidate with score lower than unmatched + # keypoint score. + tf.cast(nearby_candidate_scores < + unmatched_keypoint_score, tf.int32) + + tf.cast(min_distances > search_radius, tf.int32)) + mask = mask > 0 + + # Create refined keypoints where candidate keypoints replace original + # regressed keypoints if they are in the vicinity of the regressed keypoints. + # Shape [batch_size, num_instances, num_keypoints, 2]. + refined_keypoints = tf.where( + tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]), + regressed_keypoints, + nearby_candidate_coords) + + # Update keypoints scores. In the case where we use the original regressed + # keypoints, we use a default score of `unmatched_keypoint_score`. + # Shape [batch_size, num_instances, num_keypoints]. + refined_scores = tf.where( + mask, + unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores), + nearby_candidate_scores) + + return refined_keypoints, refined_scores + + +def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds, + num_total_keypoints): + """Scatter keypoint elements into tensors with full keypoints dimension. + + Args: + keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 + tensor. + keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 + tensor. + keypoint_inds: a list of integers that indicate the keypoint indices for + this specific keypoint class. These indices are used to scatter into + tensors that have a `num_total_keypoints` dimension. + num_total_keypoints: The total number of keypoints that this model predicts. + + Returns: + A tuple with + keypoint_coords_padded: a + [batch_size, num_instances, num_total_keypoints,2] float32 tensor. + keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints] + float32 tensor. + """ + batch_size, num_instances, _, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) + kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3]) + kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1]) + kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1) + kpt_coords_scattered = tf.scatter_nd( + indices=kpt_inds_tensor, + updates=kpt_coords_transposed, + shape=[num_total_keypoints, batch_size, num_instances, 2]) + kpt_scores_scattered = tf.scatter_nd( + indices=kpt_inds_tensor, + updates=kpt_scores_transposed, + shape=[num_total_keypoints, batch_size, num_instances]) + keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3]) + keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0]) + return keypoint_coords_padded, keypoint_scores_padded + + +def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds, + max_instances): + """Scatter keypoint elements into tensors with full instance dimension. + + Args: + keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 + tensor. + keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 + tensor. + instance_inds: a list of integers that indicate the instance indices for + these keypoints. These indices are used to scatter into tensors + that have a `max_instances` dimension. + max_instances: The maximum number of instances detected by the model. + + Returns: + A tuple with + keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2] + float32 tensor. + keypoint_scores_padded: a [batch_size, max_instances, num_keypoints] + float32 tensor. + """ + batch_size, _, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) + kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3]) + kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2]) + instance_inds = tf.expand_dims(instance_inds, axis=-1) + kpt_coords_scattered = tf.scatter_nd( + indices=instance_inds, + updates=kpt_coords_transposed, + shape=[max_instances, batch_size, num_keypoints, 2]) + kpt_scores_scattered = tf.scatter_nd( + indices=instance_inds, + updates=kpt_scores_transposed, + shape=[max_instances, batch_size, num_keypoints]) + keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3]) + keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2]) + return keypoint_coords_padded, keypoint_scores_padded + + +def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, + indices): + """Gathers keypoint candidate coordinates and scores at indices. + + Args: + keypoint_candidates: a float tensor of shape [batch_size, max_candidates, + num_keypoints, 2] with candidate coordinates. + keypoint_scores: a float tensor of shape [batch_size, max_candidates, + num_keypoints] with keypoint scores. + indices: an integer tensor of shape [batch_size, num_indices, num_keypoints] + with indices. + + Returns: + A tuple with + gathered_keypoint_candidates: a float tensor of shape [batch_size, + num_indices, num_keypoints, 2] with gathered coordinates. + gathered_keypoint_scores: a float tensor of shape [batch_size, + num_indices, num_keypoints, 2]. + """ + # Transpose tensors so that all batch dimensions are up front. + keypoint_candidates_transposed = tf.transpose(keypoint_candidates, + [0, 2, 1, 3]) + keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1]) + nearby_candidate_inds_transposed = tf.transpose(indices, + [0, 2, 1]) + nearby_candidate_coords_tranposed = tf.gather( + keypoint_candidates_transposed, nearby_candidate_inds_transposed, + batch_dims=2) + nearby_candidate_scores_transposed = tf.gather( + keypoint_scores_transposed, nearby_candidate_inds_transposed, + batch_dims=2) + gathered_keypoint_candidates = tf.transpose(nearby_candidate_coords_tranposed, + [0, 2, 1, 3]) + gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed, + [0, 2, 1]) + return gathered_keypoint_candidates, gathered_keypoint_scores + + +def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols): + """Get the index in a flattened array given row and column indices.""" + return (row_indices * num_cols) + col_indices + + +def row_col_channel_indices_from_flattened_indices(indices, num_cols, + num_channels): + """Computes row, column and channel indices from flattened indices. + + Args: + indices: An integer tensor of any shape holding the indices in the flattened + space. + num_cols: Number of columns in the image (width). + num_channels: Number of channels in the image. + + Returns: + row_indices: The row indices corresponding to each of the input indices. + Same shape as indices. + col_indices: The column indices corresponding to each of the input indices. + Same shape as indices. + channel_indices. The channel indices corresponding to each of the input + indices. + + """ + row_indices = (indices // num_channels) // num_cols + col_indices = (indices // num_channels) % num_cols + channel_indices = indices % num_channels + + return row_indices, col_indices, channel_indices + + +def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height, + width): + """Computes valid anchor weights for an image assuming pixels will be flattened. + + This function is useful when we only want to penalize valid areas in the + image in the case when padding is used. The function assumes that the loss + function will be applied after flattening the spatial dimensions and returns + anchor weights accordingly. + + Args: + true_image_shapes: An integer tensor of shape [batch_size, 3] representing + the true image shape (without padding) for each sample in the batch. + height: height of the prediction from the network. + width: width of the prediction from the network. + + Returns: + valid_anchor_weights: a float tensor of shape [batch_size, height * width] + with 1s in locations where the spatial coordinates fall within the height + and width in true_image_shapes. + """ + + indices = tf.reshape(tf.range(height * width), [1, -1]) + batch_size = tf.shape(true_image_shapes)[0] + batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices + + y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices( + batch_indices, width, 1) + + max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1] + max_x = _to_float32(tf.expand_dims(max_x, 1)) + max_y = _to_float32(tf.expand_dims(max_y, 1)) + + x_coords = _to_float32(x_coords) + y_coords = _to_float32(y_coords) + + valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y) + + return _to_float32(valid_mask) + + +def convert_strided_predictions_to_normalized_boxes(boxes, stride, + true_image_shapes): + """Converts predictions in the output space to normalized boxes. + + Boxes falling outside the valid image boundary are clipped to be on the + boundary. + + Args: + boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw + coordinates of boxes in the model's output space. + stride: The stride in the output space. + true_image_shapes: A tensor of shape [batch_size, 3] representing the true + shape of the input not considering padding. + + Returns: + boxes: A tensor of shape [batch_size, num_boxes, 4] representing the + coordinates of the normalized boxes. + """ + + def _normalize_boxlist(args): + + boxes, height, width = args + boxes = box_list_ops.scale(boxes, stride, stride) + boxes = box_list_ops.to_normalized_coordinates(boxes, height, width) + boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.], + filter_nonoverlapping=False) + return boxes + + box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)] + true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) + + true_heights_list = tf.unstack(true_heights, axis=0) + true_widths_list = tf.unstack(true_widths, axis=0) + + box_lists = list(map(_normalize_boxlist, + zip(box_lists, true_heights_list, true_widths_list))) + boxes = tf.stack([box_list_instance.get() for + box_list_instance in box_lists], axis=0) + + return boxes + + +def convert_strided_predictions_to_normalized_keypoints( + keypoint_coords, keypoint_scores, stride, true_image_shapes, + clip_out_of_frame_keypoints=False): + """Converts predictions in the output space to normalized keypoints. + + If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside + the valid image boundary are normalized but not clipped; If + clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the + valid image boundary are clipped to the closest image boundary and the scores + will be set to 0.0. + + Args: + keypoint_coords: A tensor of shape + [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates + of keypoints in the model's output space. + keypoint_scores: A tensor of shape + [batch_size, num_instances, num_keypoints] holding the keypoint scores. + stride: The stride in the output space. + true_image_shapes: A tensor of shape [batch_size, 3] representing the true + shape of the input not considering padding. + clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside + the image boundary should be clipped. If True, keypoint coords will be + clipped to image boundary. If False, keypoints are normalized but not + filtered based on their location. + + Returns: + keypoint_coords_normalized: A tensor of shape + [batch_size, num_instances, num_keypoints, 2] representing the coordinates + of the normalized keypoints. + keypoint_scores: A tensor of shape + [batch_size, num_instances, num_keypoints] representing the updated + keypoint scores. + """ + # Flatten keypoints and scores. + batch_size, _, _, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) + + # Scale and normalize keypoints. + true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) + yscale = float(stride) / tf.cast(true_heights, tf.float32) + xscale = float(stride) / tf.cast(true_widths, tf.float32) + yx_scale = tf.stack([yscale, xscale], axis=1) + keypoint_coords_normalized = keypoint_coords * tf.reshape( + yx_scale, [batch_size, 1, 1, 2]) + + if clip_out_of_frame_keypoints: + # Determine the keypoints that are in the true image regions. + valid_indices = tf.logical_and( + tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0, + keypoint_coords_normalized[:, :, :, 0] <= 1.0), + tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0, + keypoint_coords_normalized[:, :, :, 1] <= 1.0)) + batch_window = tf.tile( + tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), + multiples=[batch_size, 1]) + def clip_to_window(inputs): + keypoints, window = inputs + return keypoint_ops.clip_to_window(keypoints, window) + keypoint_coords_normalized = tf.map_fn( + clip_to_window, (keypoint_coords_normalized, batch_window), + dtype=tf.float32, back_prop=False) + keypoint_scores = tf.where(valid_indices, keypoint_scores, + tf.zeros_like(keypoint_scores)) + return keypoint_coords_normalized, keypoint_scores + + +def convert_strided_predictions_to_instance_masks( + boxes, classes, masks, stride, mask_height, mask_width, + true_image_shapes, score_threshold=0.5): + """Converts predicted full-image masks into instance masks. + + For each predicted detection box: + * Crop and resize the predicted mask based on the detected bounding box + coordinates and class prediction. Uses bilinear resampling. + * Binarize the mask using the provided score threshold. + + Args: + boxes: A tensor of shape [batch, max_detections, 4] holding the predicted + boxes, in normalized coordinates (relative to the true image dimensions). + classes: An integer tensor of shape [batch, max_detections] containing the + detected class for each box (0-indexed). + masks: A [batch, output_height, output_width, num_classes] float32 + tensor with class probabilities. + stride: The stride in the output space. + mask_height: The desired resized height for instance masks. + mask_width: The desired resized width for instance masks. + true_image_shapes: A tensor of shape [batch, 3] representing the true + shape of the inputs not considering padding. + score_threshold: The threshold at which to convert predicted mask + into foreground pixels. + + Returns: + A [batch_size, max_detections, mask_height, mask_width] uint8 tensor with + predicted foreground mask for each instance. The masks take values in + {0, 1}. + """ + _, output_height, output_width, _ = ( + shape_utils.combined_static_and_dynamic_shape(masks)) + input_height = stride * output_height + input_width = stride * output_width + + # Boxes are in normalized coordinates relative to true image shapes. Convert + # coordinates to be normalized relative to input image shapes (since masks + # may still have padding). + # Then crop and resize each mask. + def crop_and_threshold_masks(args): + """Crops masks based on detection boxes.""" + boxes, classes, masks, true_height, true_width = args + boxlist = box_list.BoxList(boxes) + y_scale = true_height / input_height + x_scale = true_width / input_width + boxlist = box_list_ops.scale(boxlist, y_scale, x_scale) + boxes = boxlist.get() + # Convert masks from [input_height, input_width, num_classes] to + # [num_classes, input_height, input_width, 1]. + masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis] + cropped_masks = tf2.image.crop_and_resize( + masks_4d, + boxes=boxes, + box_indices=classes, + crop_size=[mask_height, mask_width], + method='bilinear') + masks_3d = tf.squeeze(cropped_masks, axis=3) + masks_binarized = tf.math.greater_equal(masks_3d, score_threshold) + return tf.cast(masks_binarized, tf.uint8) + + true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) + masks_for_image = shape_utils.static_or_dynamic_map_fn( + crop_and_threshold_masks, + elems=[boxes, classes, masks, true_heights, true_widths], + dtype=tf.uint8, + back_prop=False) + masks = tf.stack(masks_for_image, axis=0) + return masks + + +class ObjectDetectionParams( + collections.namedtuple('ObjectDetectionParams', [ + 'localization_loss', 'scale_loss_weight', 'offset_loss_weight', + 'task_loss_weight' + ])): + """Namedtuple to host object detection related parameters. + + This is a wrapper class over the fields that are either the hyper-parameters + or the loss functions needed for the object detection task. The class is + immutable after constructed. Please see the __new__ function for detailed + information for each fields. + """ + + __slots__ = () + + def __new__(cls, + localization_loss, + scale_loss_weight, + offset_loss_weight, + task_loss_weight=1.0): + """Constructor with default values for ObjectDetectionParams. + + Args: + localization_loss: a object_detection.core.losses.Loss object to compute + the loss for the center offset and height/width predictions in + CenterNet. + scale_loss_weight: float, The weight for localizing box size. Note that + the scale loss is dependent on the input image size, since we penalize + the raw height and width. This constant may need to be adjusted + depending on the input size. + offset_loss_weight: float, The weight for localizing center offsets. + task_loss_weight: float, the weight of the object detection loss. + + Returns: + An initialized ObjectDetectionParams namedtuple. + """ + return super(ObjectDetectionParams, + cls).__new__(cls, localization_loss, scale_loss_weight, + offset_loss_weight, task_loss_weight) + + +class KeypointEstimationParams( + collections.namedtuple('KeypointEstimationParams', [ + 'task_name', 'class_id', 'keypoint_indices', 'classification_loss', + 'localization_loss', 'keypoint_labels', 'keypoint_std_dev', + 'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight', + 'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold', + 'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight', + 'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale', + 'candidate_search_scale', 'candidate_ranking_mode', + 'offset_peak_radius', 'per_keypoint_offset' + ])): + """Namedtuple to host object detection related parameters. + + This is a wrapper class over the fields that are either the hyper-parameters + or the loss functions needed for the keypoint estimation task. The class is + immutable after constructed. Please see the __new__ function for detailed + information for each fields. + """ + + __slots__ = () + + def __new__(cls, + task_name, + class_id, + keypoint_indices, + classification_loss, + localization_loss, + keypoint_labels=None, + keypoint_std_dev=None, + keypoint_heatmap_loss_weight=1.0, + keypoint_offset_loss_weight=1.0, + keypoint_regression_loss_weight=1.0, + keypoint_candidate_score_threshold=0.1, + heatmap_bias_init=-2.19, + num_candidates_per_keypoint=100, + task_loss_weight=1.0, + peak_max_pool_kernel_size=3, + unmatched_keypoint_score=0.1, + box_scale=1.2, + candidate_search_scale=0.3, + candidate_ranking_mode='min_distance', + offset_peak_radius=0, + per_keypoint_offset=False): + """Constructor with default values for KeypointEstimationParams. + + Args: + task_name: string, the name of the task this namedtuple corresponds to. + Note that it should be an unique identifier of the task. + class_id: int, the ID of the class that contains the target keypoints to + considered in this task. For example, if the task is human pose + estimation, the class id should correspond to the "human" class. Note + that the ID is 0-based, meaning that class 0 corresponds to the first + non-background object class. + keypoint_indices: A list of integers representing the indicies of the + keypoints to be considered in this task. This is used to retrieve the + subset of the keypoints from gt_keypoints that should be considered in + this task. + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the class predictions in CenterNet. + localization_loss: an object_detection.core.losses.Loss object to compute + the loss for the center offset and height/width predictions in + CenterNet. + keypoint_labels: A list of strings representing the label text of each + keypoint, e.g. "nose", 'left_shoulder". Note that the length of this + list should be equal to keypoint_indices. + keypoint_std_dev: A list of float represent the standard deviation of the + Gaussian kernel used to generate the keypoint heatmap. It is to provide + the flexibility of using different sizes of Gaussian kernel for each + keypoint class. + keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap. + keypoint_offset_loss_weight: float, The weight for the keypoint offsets + loss. + keypoint_regression_loss_weight: float, The weight for keypoint regression + loss. Note that the loss is dependent on the input image size, since we + penalize the raw height and width. This constant may need to be adjusted + depending on the input size. + keypoint_candidate_score_threshold: float, The heatmap score threshold for + a keypoint to become a valid candidate. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the class prediction head. If set to None, the bias is + initialized with zeros. + num_candidates_per_keypoint: The maximum number of candidates to retrieve + for each keypoint. + task_loss_weight: float, the weight of the keypoint estimation loss. + peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak + score locations in a neighborhood (independently for each keypoint + types). + unmatched_keypoint_score: The default score to use for regressed keypoints + that are not successfully snapped to a nearby candidate. + box_scale: The multiplier to expand the bounding boxes (either the + provided boxes or those which tightly cover the regressed keypoints). + candidate_search_scale: The scale parameter that multiplies the largest + dimension of a bounding box. The resulting distance becomes a search + radius for candidates in the vicinity of each regressed keypoint. + candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio'] + indicating how to select the keypoint candidate. + offset_peak_radius: The radius (in the unit of output pixel) around + groundtruth heatmap peak to assign the offset targets. If set 0, then + the offset target will only be assigned to the heatmap peak (same + behavior as the original paper). + per_keypoint_offset: A bool indicates whether to assign offsets for each + keypoint channel separately. If set False, the output offset target has + the shape [batch_size, out_height, out_width, 2] (same behavior as the + original paper). If set True, the output offset target has the shape + [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when + the offset_peak_radius is not zero). + + Returns: + An initialized KeypointEstimationParams namedtuple. + """ + return super(KeypointEstimationParams, cls).__new__( + cls, task_name, class_id, keypoint_indices, classification_loss, + localization_loss, keypoint_labels, keypoint_std_dev, + keypoint_heatmap_loss_weight, keypoint_offset_loss_weight, + keypoint_regression_loss_weight, keypoint_candidate_score_threshold, + heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight, + peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale, + candidate_search_scale, candidate_ranking_mode, offset_peak_radius, + per_keypoint_offset) + + +class ObjectCenterParams( + collections.namedtuple('ObjectCenterParams', [ + 'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init', + 'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes' + ])): + """Namedtuple to store object center prediction related parameters.""" + + __slots__ = () + + def __new__(cls, + classification_loss, + object_center_loss_weight, + heatmap_bias_init=-2.19, + min_box_overlap_iou=0.7, + max_box_predictions=100, + use_labeled_classes=False): + """Constructor with default values for ObjectCenterParams. + + Args: + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the class predictions in CenterNet. + object_center_loss_weight: float, The weight for the object center loss. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the object center prediction head. If set to None, the bias is + initialized with zeros. + min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes + need have with groundtruth boxes to not be penalized. This is used for + computing the class specific center heatmaps. + max_box_predictions: int, the maximum number of boxes to predict. + use_labeled_classes: boolean, compute the loss only labeled classes. + + Returns: + An initialized ObjectCenterParams namedtuple. + """ + return super(ObjectCenterParams, + cls).__new__(cls, classification_loss, + object_center_loss_weight, heatmap_bias_init, + min_box_overlap_iou, max_box_predictions, + use_labeled_classes) + + +class MaskParams( + collections.namedtuple('MaskParams', [ + 'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width', + 'score_threshold', 'heatmap_bias_init' + ])): + """Namedtuple to store mask prediction related parameters.""" + + __slots__ = () + + def __new__(cls, + classification_loss, + task_loss_weight=1.0, + mask_height=256, + mask_width=256, + score_threshold=0.5, + heatmap_bias_init=-2.19): + """Constructor with default values for MaskParams. + + Args: + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the semantic segmentation predictions in CenterNet. + task_loss_weight: float, The loss weight for the segmentation task. + mask_height: The height of the resized instance segmentation mask. + mask_width: The width of the resized instance segmentation mask. + score_threshold: The threshold at which to convert predicted mask + probabilities (after passing through sigmoid) into foreground pixels. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the semantic segmentation prediction head. If set to None, the + bias is initialized with zeros. + + Returns: + An initialized MaskParams namedtuple. + """ + return super(MaskParams, + cls).__new__(cls, classification_loss, + task_loss_weight, mask_height, mask_width, + score_threshold, heatmap_bias_init) + + +# The following constants are used to generate the keys of the +# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch +# class. +DETECTION_TASK = 'detection_task' +OBJECT_CENTER = 'object_center' +BOX_SCALE = 'box/scale' +BOX_OFFSET = 'box/offset' +KEYPOINT_REGRESSION = 'keypoint/regression' +KEYPOINT_HEATMAP = 'keypoint/heatmap' +KEYPOINT_OFFSET = 'keypoint/offset' +SEGMENTATION_TASK = 'segmentation_task' +SEGMENTATION_HEATMAP = 'segmentation/heatmap' +LOSS_KEY_PREFIX = 'Loss' + + +def get_keypoint_name(task_name, head_name): + return '%s/%s' % (task_name, head_name) + + +def get_num_instances_from_weights(groundtruth_weights_list): + """Computes the number of instances/boxes from the weights in a batch. + + Args: + groundtruth_weights_list: A list of float tensors with shape + [max_num_instances] representing whether there is an actual instance in + the image (with non-zero value) or is padded to match the + max_num_instances (with value 0.0). The list represents the batch + dimension. + + Returns: + A scalar integer tensor incidating how many instances/boxes are in the + images in the batch. Note that this function is usually used to normalize + the loss so the minimum return value is 1 to avoid weird behavior. + """ + num_instances = tf.reduce_sum( + [tf.math.count_nonzero(w) for w in groundtruth_weights_list]) + num_instances = tf.maximum(num_instances, 1) + return num_instances + + +class CenterNetMetaArch(model.DetectionModel): + """The CenterNet meta architecture [1]. + + [1]: https://arxiv.org/abs/1904.07850 + """ + + def __init__(self, + is_training, + add_summaries, + num_classes, + feature_extractor, + image_resizer_fn, + object_center_params, + object_detection_params=None, + keypoint_params_dict=None, + mask_params=None): + """Initializes a CenterNet model. + + Args: + is_training: Set to True if this model is being built for training. + add_summaries: Whether to add tf summaries in the model. + num_classes: int, The number of classes that the model should predict. + feature_extractor: A CenterNetFeatureExtractor to use to extract features + from an image. + image_resizer_fn: a callable for image resizing. This callable always + takes a rank-3 image tensor (corresponding to a single image) and + returns a rank-3 image tensor, possibly with new spatial dimensions and + a 1-D tensor of shape [3] indicating shape of true image within the + resized image tensor as the resized image tensor could be padded. See + builders/image_resizer_builder.py. + object_center_params: An ObjectCenterParams namedtuple. This object holds + the hyper-parameters for object center prediction. This is required by + either object detection or keypoint estimation tasks. + object_detection_params: An ObjectDetectionParams namedtuple. This object + holds the hyper-parameters necessary for object detection. Please see + the class definition for more details. + keypoint_params_dict: A dictionary that maps from task name to the + corresponding KeypointEstimationParams namedtuple. This object holds the + hyper-parameters necessary for multiple keypoint estimations. Please + see the class definition for more details. + mask_params: A MaskParams namedtuple. This object + holds the hyper-parameters for segmentation. Please see the class + definition for more details. + """ + assert object_detection_params or keypoint_params_dict + # Shorten the name for convenience and better formatting. + self._is_training = is_training + # The Objects as Points paper attaches loss functions to multiple + # (`num_feature_outputs`) feature maps in the the backbone. E.g. + # for the hourglass backbone, `num_feature_outputs` is 2. + self._feature_extractor = feature_extractor + self._num_feature_outputs = feature_extractor.num_feature_outputs + self._stride = self._feature_extractor.out_stride + self._image_resizer_fn = image_resizer_fn + self._center_params = object_center_params + self._od_params = object_detection_params + self._kp_params_dict = keypoint_params_dict + self._mask_params = mask_params + + # Construct the prediction head nets. + self._prediction_head_dict = self._construct_prediction_heads( + num_classes, + self._num_feature_outputs, + class_prediction_bias_init=self._center_params.heatmap_bias_init) + # Initialize the target assigners. + self._target_assigner_dict = self._initialize_target_assigners( + stride=self._stride, + min_box_overlap_iou=self._center_params.min_box_overlap_iou) + + # Will be used in VOD single_frame_meta_arch for tensor reshape. + self._batched_prediction_tensor_names = [] + + super(CenterNetMetaArch, self).__init__(num_classes) + + @property + def batched_prediction_tensor_names(self): + if not self._batched_prediction_tensor_names: + raise RuntimeError('Must call predict() method to get batched prediction ' + 'tensor names.') + return self._batched_prediction_tensor_names + + def _construct_prediction_heads(self, num_classes, num_feature_outputs, + class_prediction_bias_init): + """Constructs the prediction heads based on the specific parameters. + + Args: + num_classes: An integer indicating how many classes in total to predict. + num_feature_outputs: An integer indicating how many feature outputs to use + for calculating the loss. The Objects as Points paper attaches loss + functions to multiple (`num_feature_outputs`) feature maps in the the + backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2. + class_prediction_bias_init: float, the initial value of bias in the + convolutional kernel of the class prediction head. If set to None, the + bias is initialized with zeros. + + Returns: + A dictionary of keras modules generated by calling make_prediction_net + function. + """ + prediction_heads = {} + prediction_heads[OBJECT_CENTER] = [ + make_prediction_net(num_classes, bias_fill=class_prediction_bias_init) + for _ in range(num_feature_outputs) + ] + if self._od_params is not None: + prediction_heads[BOX_SCALE] = [ + make_prediction_net(NUM_SIZE_CHANNELS) + for _ in range(num_feature_outputs) + ] + prediction_heads[BOX_OFFSET] = [ + make_prediction_net(NUM_OFFSET_CHANNELS) + for _ in range(num_feature_outputs) + ] + if self._kp_params_dict is not None: + for task_name, kp_params in self._kp_params_dict.items(): + num_keypoints = len(kp_params.keypoint_indices) + prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [ + make_prediction_net( + num_keypoints, bias_fill=kp_params.heatmap_bias_init) + for _ in range(num_feature_outputs) + ] + prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [ + make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints) + for _ in range(num_feature_outputs) + ] + if kp_params.per_keypoint_offset: + prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [ + make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints) + for _ in range(num_feature_outputs) + ] + else: + prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [ + make_prediction_net(NUM_OFFSET_CHANNELS) + for _ in range(num_feature_outputs) + ] + if self._mask_params is not None: + prediction_heads[SEGMENTATION_HEATMAP] = [ + make_prediction_net(num_classes, + bias_fill=class_prediction_bias_init) + for _ in range(num_feature_outputs)] + return prediction_heads + + def _initialize_target_assigners(self, stride, min_box_overlap_iou): + """Initializes the target assigners and puts them in a dictionary. + + Args: + stride: An integer indicating the stride of the image. + min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes + need have with groundtruth boxes to not be penalized. This is used for + computing the class specific center heatmaps. + + Returns: + A dictionary of initialized target assigners for each task. + """ + target_assigners = {} + target_assigners[OBJECT_CENTER] = ( + cn_assigner.CenterNetCenterHeatmapTargetAssigner( + stride, min_box_overlap_iou)) + if self._od_params is not None: + target_assigners[DETECTION_TASK] = ( + cn_assigner.CenterNetBoxTargetAssigner(stride)) + if self._kp_params_dict is not None: + for task_name, kp_params in self._kp_params_dict.items(): + target_assigners[task_name] = ( + cn_assigner.CenterNetKeypointTargetAssigner( + stride=stride, + class_id=kp_params.class_id, + keypoint_indices=kp_params.keypoint_indices, + keypoint_std_dev=kp_params.keypoint_std_dev, + peak_radius=kp_params.offset_peak_radius, + per_keypoint_offset=kp_params.per_keypoint_offset)) + if self._mask_params is not None: + target_assigners[SEGMENTATION_TASK] = ( + cn_assigner.CenterNetMaskTargetAssigner(stride)) + + return target_assigners + + def _compute_object_center_loss(self, input_height, input_width, + object_center_predictions, per_pixel_weights): + """Computes the object center loss. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + object_center_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, num_classes] representing the object center + feature maps. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A float scalar tensor representing the object center loss per instance. + """ + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + + if self._center_params.use_only_known_classes: + gt_labeled_classes_list = self.groundtruth_lists( + fields.InputDataFields.groundtruth_labeled_classes) + batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0) + batch_labeled_classes_shape = tf.shape(batch_labeled_classes) + batch_labeled_classes = tf.reshape( + batch_labeled_classes, + [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]]) + per_pixel_weights = per_pixel_weights * batch_labeled_classes + + # Convert the groundtruth to targets. + assigner = self._target_assigner_dict[OBJECT_CENTER] + heatmap_targets = assigner.assign_center_targets_from_boxes( + height=input_height, + width=input_width, + gt_boxes_list=gt_boxes_list, + gt_classes_list=gt_classes_list, + gt_weights_list=gt_weights_list) + + flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) + num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) + + loss = 0.0 + object_center_loss = self._center_params.classification_loss + # Loop through each feature output head. + for pred in object_center_predictions: + pred = _flatten_spatial_dimensions(pred) + loss += object_center_loss( + pred, flattened_heatmap_targets, weights=per_pixel_weights) + loss_per_instance = tf.reduce_sum(loss) / ( + float(len(object_center_predictions)) * num_boxes) + return loss_per_instance + + def _compute_object_detection_losses(self, input_height, input_width, + prediction_dict, per_pixel_weights): + """Computes the weighted object detection losses. + + This wrapper function calls the function which computes the losses for + object detection task and applies corresponding weights to the losses. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + prediction_dict: A dictionary holding predicted tensors output by + "predict" function. See "predict" function for more detailed + description. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A dictionary of scalar float tensors representing the weighted losses for + object detection task: + BOX_SCALE: the weighted scale (height/width) loss. + BOX_OFFSET: the weighted object offset loss. + """ + od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss( + scale_predictions=prediction_dict[BOX_SCALE], + offset_predictions=prediction_dict[BOX_OFFSET], + input_height=input_height, + input_width=input_width) + loss_dict = {} + loss_dict[BOX_SCALE] = ( + self._od_params.scale_loss_weight * od_scale_loss) + loss_dict[BOX_OFFSET] = ( + self._od_params.offset_loss_weight * od_offset_loss) + return loss_dict + + def _compute_box_scale_and_offset_loss(self, input_height, input_width, + scale_predictions, offset_predictions): + """Computes the scale loss of the object detection task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + scale_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2] representing the prediction heads of the model + for object scale (i.e height and width). + offset_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2] representing the prediction heads of the model + for object offset. + + Returns: + A tuple of two losses: + scale_loss: A float scalar tensor representing the object height/width + loss normalized by total number of boxes. + offset_loss: A float scalar tensor representing the object offset loss + normalized by total number of boxes + """ + # TODO(vighneshb) Explore a size invariant version of scale loss. + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) + num_predictions = float(len(scale_predictions)) + + assigner = self._target_assigner_dict[DETECTION_TASK] + (batch_indices, batch_height_width_targets, batch_offset_targets, + batch_weights) = assigner.assign_size_and_offset_targets( + height=input_height, + width=input_width, + gt_boxes_list=gt_boxes_list, + gt_weights_list=gt_weights_list) + batch_weights = tf.expand_dims(batch_weights, -1) + + scale_loss = 0 + offset_loss = 0 + localization_loss_fn = self._od_params.localization_loss + for scale_pred, offset_pred in zip(scale_predictions, offset_predictions): + # Compute the scale loss. + scale_pred = cn_assigner.get_batch_predictions_from_indices( + scale_pred, batch_indices) + scale_loss += localization_loss_fn( + scale_pred, batch_height_width_targets, weights=batch_weights) + # Compute the offset loss. + offset_pred = cn_assigner.get_batch_predictions_from_indices( + offset_pred, batch_indices) + offset_loss += localization_loss_fn( + offset_pred, batch_offset_targets, weights=batch_weights) + scale_loss = tf.reduce_sum(scale_loss) / ( + num_predictions * num_boxes) + offset_loss = tf.reduce_sum(offset_loss) / ( + num_predictions * num_boxes) + return scale_loss, offset_loss + + def _compute_keypoint_estimation_losses(self, task_name, input_height, + input_width, prediction_dict, + per_pixel_weights): + """Computes the weighted keypoint losses.""" + kp_params = self._kp_params_dict[task_name] + heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP) + offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET) + regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION) + heatmap_loss = self._compute_kp_heatmap_loss( + input_height=input_height, + input_width=input_width, + task_name=task_name, + heatmap_predictions=prediction_dict[heatmap_key], + classification_loss_fn=kp_params.classification_loss, + per_pixel_weights=per_pixel_weights) + offset_loss = self._compute_kp_offset_loss( + input_height=input_height, + input_width=input_width, + task_name=task_name, + offset_predictions=prediction_dict[offset_key], + localization_loss_fn=kp_params.localization_loss) + reg_loss = self._compute_kp_regression_loss( + input_height=input_height, + input_width=input_width, + task_name=task_name, + regression_predictions=prediction_dict[regression_key], + localization_loss_fn=kp_params.localization_loss) + + loss_dict = {} + loss_dict[heatmap_key] = ( + kp_params.keypoint_heatmap_loss_weight * heatmap_loss) + loss_dict[offset_key] = ( + kp_params.keypoint_offset_loss_weight * offset_loss) + loss_dict[regression_key] = ( + kp_params.keypoint_regression_loss_weight * reg_loss) + return loss_dict + + def _compute_kp_heatmap_loss(self, input_height, input_width, task_name, + heatmap_predictions, classification_loss_fn, + per_pixel_weights): + """Computes the heatmap loss of the keypoint estimation task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + task_name: A string representing the name of the keypoint task. + heatmap_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, num_keypoints] representing the prediction heads + of the model for keypoint heatmap. + classification_loss_fn: An object_detection.core.losses.Loss object to + compute the loss for the class predictions in CenterNet. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + loss: A float scalar tensor representing the object keypoint heatmap loss + normalized by number of instances. + """ + gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + + assigner = self._target_assigner_dict[task_name] + (keypoint_heatmap, num_instances_per_kp_type, + valid_mask_batch) = assigner.assign_keypoint_heatmap_targets( + height=input_height, + width=input_width, + gt_keypoints_list=gt_keypoints_list, + gt_weights_list=gt_weights_list, + gt_classes_list=gt_classes_list, + gt_boxes_list=gt_boxes_list) + flattened_valid_mask = _flatten_spatial_dimensions( + tf.expand_dims(valid_mask_batch, axis=-1)) + flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap) + # Sum over the number of instances per keypoint types to get the total + # number of keypoints. Note that this is used to normalized the loss and we + # keep the minimum value to be 1 to avoid generating weird loss value when + # no keypoint is in the image batch. + num_instances = tf.maximum( + tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32), + 1.0) + loss = 0.0 + # Loop through each feature output head. + for pred in heatmap_predictions: + pred = _flatten_spatial_dimensions(pred) + unweighted_loss = classification_loss_fn( + pred, + flattened_heapmap_targets, + weights=tf.ones_like(per_pixel_weights)) + # Apply the weights after the loss function to have full control over it. + loss += unweighted_loss * per_pixel_weights * flattened_valid_mask + loss = tf.reduce_sum(loss) / ( + float(len(heatmap_predictions)) * num_instances) + return loss + + def _compute_kp_offset_loss(self, input_height, input_width, task_name, + offset_predictions, localization_loss_fn): + """Computes the offset loss of the keypoint estimation task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + task_name: A string representing the name of the keypoint task. + offset_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2] representing the prediction heads of the model + for keypoint offset. + localization_loss_fn: An object_detection.core.losses.Loss object to + compute the loss for the keypoint offset predictions in CenterNet. + + Returns: + loss: A float scalar tensor representing the keypoint offset loss + normalized by number of total keypoints. + """ + gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + + assigner = self._target_assigner_dict[task_name] + (batch_indices, batch_offsets, + batch_weights) = assigner.assign_keypoints_offset_targets( + height=input_height, + width=input_width, + gt_keypoints_list=gt_keypoints_list, + gt_weights_list=gt_weights_list, + gt_classes_list=gt_classes_list) + + # Keypoint offset loss. + loss = 0.0 + for prediction in offset_predictions: + batch_size, out_height, out_width, channels = _get_shape(prediction, 4) + if channels > 2: + prediction = tf.reshape( + prediction, shape=[batch_size, out_height, out_width, -1, 2]) + prediction = cn_assigner.get_batch_predictions_from_indices( + prediction, batch_indices) + # The dimensions passed are not as per the doc string but the loss + # still computes the correct value. + unweighted_loss = localization_loss_fn( + prediction, + batch_offsets, + weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) + # Apply the weights after the loss function to have full control over it. + loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) + + loss = tf.reduce_sum(loss) / ( + float(len(offset_predictions)) * + tf.maximum(tf.reduce_sum(batch_weights), 1.0)) + return loss + + def _compute_kp_regression_loss(self, input_height, input_width, task_name, + regression_predictions, localization_loss_fn): + """Computes the keypoint regression loss of the keypoint estimation task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + task_name: A string representing the name of the keypoint task. + regression_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2 * num_keypoints] representing the prediction + heads of the model for keypoint regression offset. + localization_loss_fn: An object_detection.core.losses.Loss object to + compute the loss for the keypoint regression offset predictions in + CenterNet. + + Returns: + loss: A float scalar tensor representing the keypoint regression offset + loss normalized by number of total keypoints. + """ + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + # keypoint regression offset loss. + assigner = self._target_assigner_dict[task_name] + (batch_indices, batch_regression_offsets, + batch_weights) = assigner.assign_joint_regression_targets( + height=input_height, + width=input_width, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list, + gt_weights_list=gt_weights_list, + gt_boxes_list=gt_boxes_list) + + loss = 0.0 + for prediction in regression_predictions: + batch_size, out_height, out_width, _ = _get_shape(prediction, 4) + reshaped_prediction = tf.reshape( + prediction, shape=[batch_size, out_height, out_width, -1, 2]) + reg_prediction = cn_assigner.get_batch_predictions_from_indices( + reshaped_prediction, batch_indices) + unweighted_loss = localization_loss_fn( + reg_prediction, + batch_regression_offsets, + weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) + # Apply the weights after the loss function to have full control over it. + loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) + + loss = tf.reduce_sum(loss) / ( + float(len(regression_predictions)) * + tf.maximum(tf.reduce_sum(batch_weights), 1.0)) + return loss + + def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights): + """Computes all the losses associated with segmentation. + + Args: + prediction_dict: The dictionary returned from the predict() method. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A dictionary with segmentation losses. + """ + segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP] + mask_loss = self._compute_mask_loss( + segmentation_heatmap, per_pixel_weights) + losses = { + SEGMENTATION_HEATMAP: mask_loss + } + return losses + + def _compute_mask_loss(self, segmentation_predictions, + per_pixel_weights): + """Computes the mask loss. + + Args: + segmentation_predictions: A list of float32 tensors of shape [batch_size, + out_height, out_width, num_classes]. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A float scalar tensor representing the mask loss. + """ + gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + + # Convert the groundtruth to targets. + assigner = self._target_assigner_dict[SEGMENTATION_TASK] + heatmap_targets = assigner.assign_segmentation_targets( + gt_masks_list=gt_masks_list, + gt_classes_list=gt_classes_list) + + flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) + + loss = 0.0 + mask_loss_fn = self._mask_params.classification_loss + total_pixels_in_loss = tf.reduce_sum(per_pixel_weights) + + # Loop through each feature output head. + for pred in segmentation_predictions: + pred = _flatten_spatial_dimensions(pred) + loss += mask_loss_fn( + pred, flattened_heatmap_targets, weights=per_pixel_weights) + # TODO(ronnyvotel): Consider other ways to normalize loss. + total_loss = tf.reduce_sum(loss) / ( + float(len(segmentation_predictions)) * total_pixels_in_loss) + return total_loss + + def preprocess(self, inputs): + outputs = shape_utils.resize_images_and_return_shapes( + inputs, self._image_resizer_fn) + resized_inputs, true_image_shapes = outputs + + return (self._feature_extractor.preprocess(resized_inputs), + true_image_shapes) + + def predict(self, preprocessed_inputs, _): + """Predicts CenterNet prediction tensors given an input batch. + + Feature extractors are free to produce predictions from multiple feature + maps and therefore we return a dictionary mapping strings to lists. + E.g. the hourglass backbone produces two feature maps. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float32 tensor + representing a batch of images. + + Returns: + prediction_dict: a dictionary holding predicted tensors with + 'preprocessed_inputs' - The input image after being resized and + preprocessed by the feature extractor. + 'object_center' - A list of size num_feature_outputs containing + float tensors of size [batch_size, output_height, output_width, + num_classes] representing the predicted object center heatmap logits. + 'box/scale' - [optional] A list of size num_feature_outputs holding + float tensors of size [batch_size, output_height, output_width, 2] + representing the predicted box height and width at each output + location. This field exists only when object detection task is + specified. + 'box/offset' - [optional] A list of size num_feature_outputs holding + float tensors of size [batch_size, output_height, output_width, 2] + representing the predicted y and x offsets at each output location. + '$TASK_NAME/keypoint_heatmap' - [optional] A list of size + num_feature_outputs holding float tensors of size [batch_size, + output_height, output_width, num_keypoints] representing the predicted + keypoint heatmap logits. + '$TASK_NAME/keypoint_offset' - [optional] A list of size + num_feature_outputs holding float tensors of size [batch_size, + output_height, output_width, 2] representing the predicted keypoint + offsets at each output location. + '$TASK_NAME/keypoint_regression' - [optional] A list of size + num_feature_outputs holding float tensors of size [batch_size, + output_height, output_width, 2 * num_keypoints] representing the + predicted keypoint regression at each output location. + 'segmentation/heatmap' - [optional] A list of size num_feature_outputs + holding float tensors of size [batch_size, output_height, + output_width, num_classes] representing the mask logits. + Note the $TASK_NAME is provided by the KeypointEstimation namedtuple + used to differentiate between different keypoint tasks. + """ + features_list = self._feature_extractor(preprocessed_inputs) + + predictions = {} + for head_name, heads in self._prediction_head_dict.items(): + predictions[head_name] = [ + head(feature) for (feature, head) in zip(features_list, heads) + ] + predictions['preprocessed_inputs'] = preprocessed_inputs + + self._batched_prediction_tensor_names = predictions.keys() + return predictions + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Computes scalar loss tensors with respect to provided groundtruth. + + This function implements the various CenterNet losses. + + Args: + prediction_dict: a dictionary holding predicted tensors returned by + "predict" function. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is of + the form [height, width, channels] indicating the shapes of true images + in the resized images, as resized images can be padded with zeros. + scope: Optional scope name. + + Returns: + A dictionary mapping the keys ['Loss/object_center', 'Loss/box/scale', + 'Loss/box/offset', 'Loss/$TASK_NAME/keypoint/heatmap', + 'Loss/$TASK_NAME/keypoint/offset', + 'Loss/$TASK_NAME/keypoint/regression', 'Loss/segmentation/heatmap'] to + scalar tensors corresponding to the losses for different tasks. Note the + $TASK_NAME is provided by the KeypointEstimation namedtuple used to + differentiate between different keypoint tasks. + """ + + _, input_height, input_width, _ = _get_shape( + prediction_dict['preprocessed_inputs'], 4) + + output_height, output_width = (input_height // self._stride, + input_width // self._stride) + + # TODO(vighneshb) Explore whether using floor here is safe. + output_true_image_shapes = tf.ceil( + tf.to_float(true_image_shapes) / self._stride) + valid_anchor_weights = get_valid_anchor_weights_in_flattened_image( + output_true_image_shapes, output_height, output_width) + valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2) + + object_center_loss = self._compute_object_center_loss( + object_center_predictions=prediction_dict[OBJECT_CENTER], + input_height=input_height, + input_width=input_width, + per_pixel_weights=valid_anchor_weights) + losses = { + OBJECT_CENTER: + self._center_params.object_center_loss_weight * object_center_loss + } + if self._od_params is not None: + od_losses = self._compute_object_detection_losses( + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict, + per_pixel_weights=valid_anchor_weights) + for key in od_losses: + od_losses[key] = od_losses[key] * self._od_params.task_loss_weight + losses.update(od_losses) + + if self._kp_params_dict is not None: + for task_name, params in self._kp_params_dict.items(): + kp_losses = self._compute_keypoint_estimation_losses( + task_name=task_name, + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict, + per_pixel_weights=valid_anchor_weights) + for key in kp_losses: + kp_losses[key] = kp_losses[key] * params.task_loss_weight + losses.update(kp_losses) + + if self._mask_params is not None: + seg_losses = self._compute_segmentation_losses( + prediction_dict=prediction_dict, + per_pixel_weights=valid_anchor_weights) + for key in seg_losses: + seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight + losses.update(seg_losses) + + # Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the + # losses will be grouped together in Tensorboard. + return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val) + for key, val in losses.items()]) + + def postprocess(self, prediction_dict, true_image_shapes, **params): + """Produces boxes given a prediction dict returned by predict(). + + Although predict returns a list of tensors, only the last tensor in + each list is used for making box predictions. + + Args: + prediction_dict: a dictionary holding predicted tensors from "predict" + function. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is of + the form [height, width, channels] indicating the shapes of true images + in the resized images, as resized images can be padded with zeros. + **params: Currently ignored. + + Returns: + detections: a dictionary containing the following fields + detection_boxes - A tensor of shape [batch, max_detections, 4] + holding the predicted boxes. + detection_scores: A tensor of shape [batch, max_detections] holding + the predicted score for each box. + detection_classes: An integer tensor of shape [batch, max_detections] + containing the detected class for each box. + num_detections: An integer tensor of shape [batch] containing the + number of detected boxes for each sample in the batch. + detection_keypoints: (Optional) A float tensor of shape [batch, + max_detections, num_keypoints, 2] with normalized keypoints. Any + invalid keypoints have their coordinates and scores set to 0.0. + detection_keypoint_scores: (Optional) A float tensor of shape [batch, + max_detection, num_keypoints] with scores for each keypoint. + detection_masks: (Optional) An int tensor of shape [batch, + max_detections, mask_height, mask_width] with binarized masks for each + detection. + """ + object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1]) + # Get x, y and channel indices corresponding to the top indices in the class + # center predictions. + detection_scores, y_indices, x_indices, channel_indices = ( + top_k_feature_map_locations( + object_center_prob, max_pool_kernel_size=3, + k=self._center_params.max_box_predictions)) + + boxes_strided, classes, scores, num_detections = ( + prediction_tensors_to_boxes( + detection_scores, y_indices, x_indices, channel_indices, + prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1])) + + boxes = convert_strided_predictions_to_normalized_boxes( + boxes_strided, self._stride, true_image_shapes) + + postprocess_dict = { + fields.DetectionResultFields.detection_boxes: boxes, + fields.DetectionResultFields.detection_scores: scores, + fields.DetectionResultFields.detection_classes: classes, + fields.DetectionResultFields.num_detections: num_detections, + } + + if self._kp_params_dict: + keypoints, keypoint_scores = self._postprocess_keypoints( + prediction_dict, classes, y_indices, x_indices, + boxes_strided, num_detections) + keypoints, keypoint_scores = ( + convert_strided_predictions_to_normalized_keypoints( + keypoints, keypoint_scores, self._stride, true_image_shapes, + clip_out_of_frame_keypoints=True)) + postprocess_dict.update({ + fields.DetectionResultFields.detection_keypoints: keypoints, + fields.DetectionResultFields.detection_keypoint_scores: + keypoint_scores + }) + + if self._mask_params: + masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1]) + instance_masks = convert_strided_predictions_to_instance_masks( + boxes, classes, masks, self._stride, self._mask_params.mask_height, + self._mask_params.mask_width, true_image_shapes, + self._mask_params.score_threshold) + postprocess_dict.update({ + fields.DetectionResultFields.detection_masks: + instance_masks + }) + return postprocess_dict + + def _postprocess_keypoints(self, prediction_dict, classes, y_indices, + x_indices, boxes, num_detections): + """Performs postprocessing on keypoint predictions. + + Args: + prediction_dict: a dictionary holding predicted tensors, returned from the + predict() method. This dictionary should contain keypoint prediction + feature maps for each keypoint task. + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + boxes: A [batch_size, max_detections, 4] float32 tensor with bounding + boxes in (un-normalized) output space. + num_detections: A [batch_size] int tensor with the number of valid + detections for each image. + + Returns: + A tuple of + keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 + tensor with keypoints in the output (strided) coordinate frame. + keypoint_scores: a [batch_size, max_detections, num_total_keypoints] + float32 tensor with keypoint scores. + """ + total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict + in self._kp_params_dict.values()) + batch_size, max_detections, _ = _get_shape(boxes, 3) + kpt_coords_for_example_list = [] + kpt_scores_for_example_list = [] + for ex_ind in range(batch_size): + kpt_coords_for_class_list = [] + kpt_scores_for_class_list = [] + instance_inds_for_class_list = [] + for task_name, kp_params in self._kp_params_dict.items(): + keypoint_heatmap = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] + keypoint_offsets = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] + keypoint_regression = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] + instance_inds = self._get_instance_indices( + classes, num_detections, ex_ind, kp_params.class_id) + + def true_fn( + keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, instance_inds, + ex_ind, kp_params): + """Logics to execute when instance_inds is not an empty set.""" + # Postprocess keypoints and scores for class and single image. Shapes + # are [1, num_instances_i, num_keypoints_i, 2] and + # [1, num_instances_i, num_keypoints_i], respectively. Note that + # num_instances_i and num_keypoints_i refers to the number of + # instances and keypoints for class i, respectively. + kpt_coords_for_class, kpt_scores_for_class = ( + self._postprocess_keypoints_for_class_and_image( + keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, instance_inds, + ex_ind, kp_params)) + # Expand keypoint dimension (with padding) so that coordinates and + # scores have shape [1, num_instances_i, num_total_keypoints, 2] and + # [1, num_instances_i, num_total_keypoints], respectively. + kpts_coords_for_class_padded, kpt_scores_for_class_padded = ( + _pad_to_full_keypoint_dim( + kpt_coords_for_class, kpt_scores_for_class, + kp_params.keypoint_indices, total_num_keypoints)) + return kpts_coords_for_class_padded, kpt_scores_for_class_padded + + def false_fn(): + """Logics to execute when the instance_inds is an empty set.""" + return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), + tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32)) + + true_fn = functools.partial( + true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, instance_inds, ex_ind, + kp_params) + results = tf.cond(tf.size(instance_inds) > 0, true_fn, false_fn) + + kpt_coords_for_class_list.append(results[0]) + kpt_scores_for_class_list.append(results[1]) + instance_inds_for_class_list.append(instance_inds) + + # Concatenate all keypoints across all classes (single example). + kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1) + kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1) + instance_inds_for_example = tf.concat(instance_inds_for_class_list, + axis=0) + + if tf.size(instance_inds_for_example) > 0: + # Scatter into tensor where instances align with original detection + # instances. New shape of keypoint coordinates and scores are + # [1, max_detections, num_total_keypoints, 2] and + # [1, max_detections, num_total_keypoints], respectively. + kpt_coords_for_example_all_det, kpt_scores_for_example_all_det = ( + _pad_to_full_instance_dim( + kpt_coords_for_example, kpt_scores_for_example, + instance_inds_for_example, + self._center_params.max_box_predictions)) + else: + kpt_coords_for_example_all_det = tf.zeros( + [1, max_detections, total_num_keypoints, 2], dtype=tf.float32) + kpt_scores_for_example_all_det = tf.zeros( + [1, max_detections, total_num_keypoints], dtype=tf.float32) + + kpt_coords_for_example_list.append(kpt_coords_for_example_all_det) + kpt_scores_for_example_list.append(kpt_scores_for_example_all_det) + + # Concatenate all keypoints and scores from all examples in the batch. + # Shapes are [batch_size, max_detections, num_total_keypoints, 2] and + # [batch_size, max_detections, num_total_keypoints], respectively. + keypoints = tf.concat(kpt_coords_for_example_list, axis=0) + keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) + + return keypoints, keypoint_scores + + def _get_instance_indices(self, classes, num_detections, batch_index, + class_id): + """Gets the instance indices that match the target class ID. + + Args: + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + num_detections: A [batch_size] int tensor with the number of valid + detections for each image. + batch_index: An integer specifying the index for an example in the batch. + class_id: Class id + + Returns: + instance_inds: A [num_instances] int tensor where each element indicates + the instance location within the `classes` tensor. This is useful to + associate the refined keypoints with the original detections (i.e. + boxes) + """ + classes = classes[batch_index:batch_index+1, ...] + _, max_detections = shape_utils.combined_static_and_dynamic_shape( + classes) + # Get the detection indices corresponding to the target class. + valid_detections_with_kpt_class = tf.math.logical_and( + tf.range(max_detections) < num_detections[batch_index], + classes[0] == class_id) + instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0] + return instance_inds + + def _postprocess_keypoints_for_class_and_image( + self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, + y_indices, x_indices, boxes, indices_with_kpt_class, batch_index, + kp_params): + """Postprocess keypoints for a single image and class. + + This function performs the following postprocessing operations on a single + image and single keypoint class: + - Converts keypoints scores to range [0, 1] with sigmoid. + - Determines the detections that correspond to the specified keypoint class. + - Gathers the regressed keypoints at the detection (i.e. box) centers. + - Gathers keypoint candidates from the keypoint heatmaps. + - Snaps regressed keypoints to nearby keypoint candidates. + + Args: + keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32 + tensor with keypoint heatmaps. + keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with + local offsets to keypoint centers. + keypoint_regression: A [batch_size, height, width, 2 * num_keypoints] + float32 tensor with regressed offsets to all keypoints. + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + boxes: A [batch_size, max_detections, 4] float32 tensor with detected + boxes in the output (strided) frame. + indices_with_kpt_class: A [num_instances] int tensor where each element + indicates the instance location within the `classes` tensor. This is + useful to associate the refined keypoints with the original detections + (i.e. boxes) + batch_index: An integer specifying the index for an example in the batch. + kp_params: A `KeypointEstimationParams` object with parameters for a + single keypoint class. + + Returns: + A tuple of + refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor + with refined keypoints for a single class in a single image, expressed + in the output (strided) coordinate frame. Note that `num_instances` is a + dynamic dimension, and corresponds to the number of valid detections + for the specific class. + refined_scores: A [1, num_instances, num_keypoints] float32 tensor with + keypoint scores. + """ + keypoint_indices = kp_params.keypoint_indices + num_keypoints = len(keypoint_indices) + + keypoint_heatmap = tf.nn.sigmoid( + keypoint_heatmap[batch_index:batch_index+1, ...]) + keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...] + keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...] + y_indices = y_indices[batch_index:batch_index+1, ...] + x_indices = x_indices[batch_index:batch_index+1, ...] + + # Gather the feature map locations corresponding to the object class. + y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class, + axis=1) + x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class, + axis=1) + boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1) + + # Gather the regressed keypoints. Final tensor has shape + # [1, num_instances, num_keypoints, 2]. + regressed_keypoints_for_objects = regressed_keypoints_at_object_centers( + keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class) + regressed_keypoints_for_objects = tf.reshape( + regressed_keypoints_for_objects, [1, -1, num_keypoints, 2]) + + # Get the candidate keypoints and scores. + # The shape of keypoint_candidates and keypoint_scores is: + # [1, num_candidates_per_keypoint, num_keypoints, 2] and + # [1, num_candidates_per_keypoint, num_keypoints], respectively. + keypoint_candidates, keypoint_scores, num_keypoint_candidates = ( + prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, keypoint_offsets, + keypoint_score_threshold=( + kp_params.keypoint_candidate_score_threshold), + max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, + max_candidates=kp_params.num_candidates_per_keypoint)) + + # Get the refined keypoints and scores, of shape + # [1, num_instances, num_keypoints, 2] and + # [1, num_instances, num_keypoints], respectively. + refined_keypoints, refined_scores = refine_keypoints( + regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=boxes_for_kpt_class, + unmatched_keypoint_score=kp_params.unmatched_keypoint_score, + box_scale=kp_params.box_scale, + candidate_search_scale=kp_params.candidate_search_scale, + candidate_ranking_mode=kp_params.candidate_ranking_mode) + + return refined_keypoints, refined_scores + + def regularization_losses(self): + return [] + + def restore_map(self, fine_tune_checkpoint_type='classification', + load_all_detection_checkpoint_vars=False): + + if fine_tune_checkpoint_type == 'classification': + return {'feature_extractor': self._feature_extractor.get_base_model()} + + if fine_tune_checkpoint_type == 'detection': + return {'feature_extractor': self._feature_extractor.get_model()} + + else: + raise ValueError('Unknown fine tune checkpoint type - {}'.format( + fine_tune_checkpoint_type)) + + def updates(self): + raise RuntimeError('This model is intended to be used with model_lib_v2 ' + 'which does not support updates()') diff --git a/models/research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py b/models/research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..247ffd1bdc767e1a2ad717b87b5dfc6901ad82ce --- /dev/null +++ b/models/research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py @@ -0,0 +1,1683 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the CenterNet Meta architecture code.""" + +from __future__ import division + +import functools +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import losses +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner as cn_assigner +from object_detection.meta_architectures import center_net_meta_arch as cnma +from object_detection.models import center_net_resnet_feature_extractor +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchPredictionHeadTest(test_case.TestCase): + """Test CenterNet meta architecture prediction head.""" + + def test_prediction_head(self): + head = cnma.make_prediction_net(num_out_channels=7) + output = head(np.zeros((4, 128, 128, 8))) + + self.assertEqual((4, 128, 128, 7), output.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchHelpersTest(test_case.TestCase, parameterized.TestCase): + """Test for CenterNet meta architecture related functions.""" + + def test_row_col_indices_from_flattened_indices(self): + """Tests that the computation of row, col, channel indices is correct.""" + + r_grid, c_grid, ch_grid = (np.zeros((5, 4, 3), dtype=np.int), + np.zeros((5, 4, 3), dtype=np.int), + np.zeros((5, 4, 3), dtype=np.int)) + + r_grid[..., 0] = r_grid[..., 1] = r_grid[..., 2] = np.array( + [[0, 0, 0, 0], + [1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3], + [4, 4, 4, 4]] + ) + + c_grid[..., 0] = c_grid[..., 1] = c_grid[..., 2] = np.array( + [[0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3]] + ) + + for i in range(3): + ch_grid[..., i] = i + + indices = np.arange(60) + ri, ci, chi = cnma.row_col_channel_indices_from_flattened_indices( + indices, 4, 3) + + np.testing.assert_array_equal(ri, r_grid.flatten()) + np.testing.assert_array_equal(ci, c_grid.flatten()) + np.testing.assert_array_equal(chi, ch_grid.flatten()) + + def test_flattened_indices_from_row_col_indices(self): + + r = np.array( + [[0, 0, 0, 0], + [1, 1, 1, 1], + [2, 2, 2, 2]] + ) + + c = np.array( + [[0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3]] + ) + + idx = cnma.flattened_indices_from_row_col_indices(r, c, 4) + np.testing.assert_array_equal(np.arange(12), idx.flatten()) + + def test_get_valid_anchor_weights_in_flattened_image(self): + """Tests that the anchor weights are valid upon flattening out.""" + + valid_weights = np.zeros((2, 5, 5), dtype=np.float) + + valid_weights[0, :3, :4] = 1.0 + valid_weights[1, :2, :2] = 1.0 + + def graph_fn(): + true_image_shapes = tf.constant([[3, 4], [2, 2]]) + w = cnma.get_valid_anchor_weights_in_flattened_image( + true_image_shapes, 5, 5) + return w + + w = self.execute(graph_fn, []) + np.testing.assert_allclose(w, valid_weights.reshape(2, -1)) + self.assertEqual((2, 25), w.shape) + + def test_convert_strided_predictions_to_normalized_boxes(self): + """Tests that boxes have correct coordinates in normalized input space.""" + + def graph_fn(): + boxes = np.zeros((2, 3, 4), dtype=np.float32) + + boxes[0] = [[10, 20, 30, 40], [20, 30, 50, 100], [50, 60, 100, 180]] + boxes[1] = [[-5, -5, 5, 5], [45, 60, 110, 120], [150, 150, 200, 250]] + + true_image_shapes = tf.constant([[100, 90, 3], [150, 150, 3]]) + + clipped_boxes = ( + cnma.convert_strided_predictions_to_normalized_boxes( + boxes, 2, true_image_shapes)) + return clipped_boxes + + clipped_boxes = self.execute(graph_fn, []) + + expected_boxes = np.zeros((2, 3, 4), dtype=np.float32) + expected_boxes[0] = [[0.2, 4./9, 0.6, 8./9], [0.4, 2./3, 1, 1], + [1, 1, 1, 1]] + expected_boxes[1] = [[0., 0, 1./15, 1./15], [3./5, 4./5, 1, 1], + [1, 1, 1, 1]] + + np.testing.assert_allclose(expected_boxes, clipped_boxes) + + @parameterized.parameters( + {'clip_to_window': True}, + {'clip_to_window': False} + ) + def test_convert_strided_predictions_to_normalized_keypoints( + self, clip_to_window): + """Tests that keypoints have correct coordinates in normalized coords.""" + + keypoint_coords_np = np.array( + [ + # Example 0. + [ + [[-10., 8.], [60., 22.], [60., 120.]], + [[20., 20.], [0., 0.], [0., 0.]], + ], + # Example 1. + [ + [[40., 50.], [20., 160.], [200., 150.]], + [[10., 0.], [40., 10.], [0., 0.]], + ], + ], dtype=np.float32) + keypoint_scores_np = np.array( + [ + # Example 0. + [ + [1.0, 0.9, 0.2], + [0.7, 0.0, 0.0], + ], + # Example 1. + [ + [1.0, 1.0, 0.2], + [0.7, 0.6, 0.0], + ], + ], dtype=np.float32) + + def graph_fn(): + keypoint_coords = tf.constant(keypoint_coords_np, dtype=tf.float32) + keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) + true_image_shapes = tf.constant([[320, 400, 3], [640, 640, 3]]) + stride = 4 + + keypoint_coords_out, keypoint_scores_out = ( + cnma.convert_strided_predictions_to_normalized_keypoints( + keypoint_coords, keypoint_scores, stride, true_image_shapes, + clip_to_window)) + return keypoint_coords_out, keypoint_scores_out + + keypoint_coords_out, keypoint_scores_out = self.execute(graph_fn, []) + + if clip_to_window: + expected_keypoint_coords_np = np.array( + [ + # Example 0. + [ + [[0.0, 0.08], [0.75, 0.22], [0.75, 1.0]], + [[0.25, 0.2], [0., 0.], [0.0, 0.0]], + ], + # Example 1. + [ + [[0.25, 0.3125], [0.125, 1.0], [1.0, 0.9375]], + [[0.0625, 0.], [0.25, 0.0625], [0., 0.]], + ], + ], dtype=np.float32) + expected_keypoint_scores_np = np.array( + [ + # Example 0. + [ + [0.0, 0.9, 0.0], + [0.7, 0.0, 0.0], + ], + # Example 1. + [ + [1.0, 1.0, 0.0], + [0.7, 0.6, 0.0], + ], + ], dtype=np.float32) + else: + expected_keypoint_coords_np = np.array( + [ + # Example 0. + [ + [[-0.125, 0.08], [0.75, 0.22], [0.75, 1.2]], + [[0.25, 0.2], [0., 0.], [0., 0.]], + ], + # Example 1. + [ + [[0.25, 0.3125], [0.125, 1.0], [1.25, 0.9375]], + [[0.0625, 0.], [0.25, 0.0625], [0., 0.]], + ], + ], dtype=np.float32) + expected_keypoint_scores_np = np.array( + [ + # Example 0. + [ + [1.0, 0.9, 0.2], + [0.7, 0.0, 0.0], + ], + # Example 1. + [ + [1.0, 1.0, 0.2], + [0.7, 0.6, 0.0], + ], + ], dtype=np.float32) + np.testing.assert_allclose(expected_keypoint_coords_np, keypoint_coords_out) + np.testing.assert_allclose(expected_keypoint_scores_np, keypoint_scores_out) + + def test_convert_strided_predictions_to_instance_masks(self): + + def graph_fn(): + boxes = tf.constant( + [ + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.5, 1.0], + [0.0, 0.0, 0.0, 0.0]], + ], tf.float32) + classes = tf.constant( + [ + [0, 1, 0], + ], tf.int32) + masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32) + masks_np[0, :, 2:, 0] = 1 # Class 0. + masks_np[0, :, :3, 1] = 1 # Class 1. + masks = tf.constant(masks_np) + true_image_shapes = tf.constant([[6, 8, 3]]) + instance_masks = cnma.convert_strided_predictions_to_instance_masks( + boxes, classes, masks, stride=2, mask_height=2, mask_width=2, + true_image_shapes=true_image_shapes) + return instance_masks + + instance_masks = self.execute_cpu(graph_fn, []) + + expected_instance_masks = np.array( + [ + [ + # Mask 0 (class 0). + [[1, 1], + [1, 1]], + # Mask 1 (class 1). + [[1, 0], + [1, 0]], + # Mask 2 (class 0). + [[0, 0], + [0, 0]], + ] + ]) + np.testing.assert_array_equal(expected_instance_masks, instance_masks) + + def test_top_k_feature_map_locations(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 1] = 1.0 + feature_map_np[0, 2, 1, 1] = 0.9 # Get's filtered due to max pool. + feature_map_np[0, 0, 1, 0] = 0.7 + feature_map_np[0, 2, 2, 0] = 0.5 + feature_map_np[0, 2, 2, 1] = -0.3 + feature_map_np[1, 2, 1, 1] = 0.7 + feature_map_np[1, 1, 0, 0] = 0.4 + feature_map_np[1, 1, 2, 0] = 0.1 + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=3, k=3)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.7, 0.5], scores[0]) + np.testing.assert_array_equal([2, 0, 2], y_inds[0]) + np.testing.assert_array_equal([0, 1, 2], x_inds[0]) + np.testing.assert_array_equal([1, 0, 0], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1]) + np.testing.assert_array_equal([2, 1, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0, 2], x_inds[1]) + np.testing.assert_array_equal([1, 0, 0], channel_inds[1]) + + def test_top_k_feature_map_locations_no_pooling(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 1] = 1.0 + feature_map_np[0, 2, 1, 1] = 0.9 + feature_map_np[0, 0, 1, 0] = 0.7 + feature_map_np[0, 2, 2, 0] = 0.5 + feature_map_np[0, 2, 2, 1] = -0.3 + feature_map_np[1, 2, 1, 1] = 0.7 + feature_map_np[1, 1, 0, 0] = 0.4 + feature_map_np[1, 1, 2, 0] = 0.1 + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=1, k=3)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.9, 0.7], scores[0]) + np.testing.assert_array_equal([2, 2, 0], y_inds[0]) + np.testing.assert_array_equal([0, 1, 1], x_inds[0]) + np.testing.assert_array_equal([1, 1, 0], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1]) + np.testing.assert_array_equal([2, 1, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0, 2], x_inds[1]) + np.testing.assert_array_equal([1, 0, 0], channel_inds[1]) + + def test_top_k_feature_map_locations_per_channel(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 0] = 1.0 # Selected. + feature_map_np[0, 2, 1, 0] = 0.9 # Get's filtered due to max pool. + feature_map_np[0, 0, 1, 0] = 0.7 # Selected. + feature_map_np[0, 2, 2, 1] = 0.5 # Selected. + feature_map_np[0, 0, 0, 1] = 0.3 # Selected. + feature_map_np[1, 2, 1, 0] = 0.7 # Selected. + feature_map_np[1, 1, 0, 0] = 0.4 # Get's filtered due to max pool. + feature_map_np[1, 1, 2, 0] = 0.3 # Get's filtered due to max pool. + feature_map_np[1, 1, 0, 1] = 0.8 # Selected. + feature_map_np[1, 1, 2, 1] = 0.3 # Selected. + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=3, k=2, per_channel=True)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.7, 0.5, 0.3], scores[0]) + np.testing.assert_array_equal([2, 0, 2, 0], y_inds[0]) + np.testing.assert_array_equal([0, 1, 2, 0], x_inds[0]) + np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.0, 0.8, 0.3], scores[1]) + np.testing.assert_array_equal([2, 0, 1, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0, 0, 2], x_inds[1]) + np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[1]) + + def test_box_prediction(self): + + class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32) + hw_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) + offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) + + # Sample 1, 2 boxes + class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0] + hw_pred[0, 10, 20] = [40, 60] + offset_pred[0, 10, 20] = [1, 2] + + class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45] + hw_pred[0, 50, 60] = [50, 50] + offset_pred[0, 50, 60] = [0, 0] + + # Sample 2, 2 boxes (at same location) + class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0] + hw_pred[1, 100, 100] = [10, 10] + offset_pred[1, 100, 100] = [1, 3] + + # Sample 3, 3 boxes + class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8] + hw_pred[2, 60, 90] = [40, 30] + offset_pred[2, 60, 90] = [0, 0] + + class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0] + hw_pred[2, 65, 95] = [20, 20] + offset_pred[2, 65, 95] = [1, 2] + + class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0] + hw_pred[2, 75, 85] = [21, 25] + offset_pred[2, 75, 85] = [5, 2] + + def graph_fn(): + class_pred_tensor = tf.constant(class_pred) + hw_pred_tensor = tf.constant(hw_pred) + offset_pred_tensor = tf.constant(offset_pred) + + detection_scores, y_indices, x_indices, channel_indices = ( + cnma.top_k_feature_map_locations( + class_pred_tensor, max_pool_kernel_size=3, k=2)) + + boxes, classes, scores, num_dets = cnma.prediction_tensors_to_boxes( + detection_scores, y_indices, x_indices, channel_indices, + hw_pred_tensor, offset_pred_tensor) + return boxes, classes, scores, num_dets + + boxes, classes, scores, num_dets = self.execute(graph_fn, []) + + np.testing.assert_array_equal(num_dets, [2, 2, 2]) + + np.testing.assert_allclose( + [[-9, -8, 31, 52], [25, 35, 75, 85]], boxes[0]) + np.testing.assert_allclose( + [[96, 98, 106, 108], [96, 98, 106, 108]], boxes[1]) + np.testing.assert_allclose( + [[69.5, 74.5, 90.5, 99.5], [40, 75, 80, 105]], boxes[2]) + + np.testing.assert_array_equal(classes[0], [1, 0]) + np.testing.assert_array_equal(classes[1], [2, 1]) + np.testing.assert_array_equal(classes[2], [0, 4]) + + np.testing.assert_allclose(scores[0], [.7, .55]) + np.testing.assert_allclose(scores[1][:1], [.9]) + np.testing.assert_allclose(scores[2], [1., .8]) + + def test_keypoint_candidate_prediction(self): + keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + keypoint_heatmap_np[0, 0, 0, 0] = 1.0 + keypoint_heatmap_np[0, 2, 1, 0] = 0.7 + keypoint_heatmap_np[0, 1, 1, 0] = 0.6 + keypoint_heatmap_np[0, 0, 2, 1] = 0.7 + keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. + keypoint_heatmap_np[0, 2, 2, 1] = 0.2 + keypoint_heatmap_np[1, 1, 0, 0] = 0.6 + keypoint_heatmap_np[1, 2, 1, 0] = 0.5 + keypoint_heatmap_np[1, 0, 0, 0] = 0.4 + keypoint_heatmap_np[1, 0, 0, 1] = 1.0 + keypoint_heatmap_np[1, 0, 1, 1] = 0.9 + keypoint_heatmap_np[1, 2, 0, 1] = 0.8 + + keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25] + keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5] + keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0] + keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0] + keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0] + keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5] + keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0] + keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5] + keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5] + keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5] + + def graph_fn(): + keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) + keypoint_heatmap_offsets = tf.constant( + keypoint_heatmap_offsets_np, dtype=tf.float32) + + keypoint_cands, keypoint_scores, num_keypoint_candidates = ( + cnma.prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, + keypoint_heatmap_offsets, + keypoint_score_threshold=0.5, + max_pool_kernel_size=1, + max_candidates=2)) + return keypoint_cands, keypoint_scores, num_keypoint_candidates + + (keypoint_cands, keypoint_scores, + num_keypoint_candidates) = self.execute(graph_fn, []) + + expected_keypoint_candidates = [ + [ # Example 0. + [[0.5, 0.25], [1.0, 2.0]], # Keypoint 1. + [[1.75, 1.5], [1.0, 1.0]], # Keypoint 2. + ], + [ # Example 1. + [[1.25, 0.5], [0.0, -0.5]], # Keypoint 1. + [[2.5, 1.0], [0.5, 0.5]], # Keypoint 2. + ], + ] + expected_keypoint_scores = [ + [ # Example 0. + [1.0, 0.7], # Keypoint 1. + [0.7, 0.3], # Keypoint 2. + ], + [ # Example 1. + [0.6, 1.0], # Keypoint 1. + [0.5, 0.9], # Keypoint 2. + ], + ] + expected_num_keypoint_candidates = [ + [2, 1], + [2, 2] + ] + np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) + np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) + np.testing.assert_array_equal(expected_num_keypoint_candidates, + num_keypoint_candidates) + + def test_keypoint_candidate_prediction_per_keypoints(self): + keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + keypoint_heatmap_np[0, 0, 0, 0] = 1.0 + keypoint_heatmap_np[0, 2, 1, 0] = 0.7 + keypoint_heatmap_np[0, 1, 1, 0] = 0.6 + keypoint_heatmap_np[0, 0, 2, 1] = 0.7 + keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. + keypoint_heatmap_np[0, 2, 2, 1] = 0.2 + keypoint_heatmap_np[1, 1, 0, 0] = 0.6 + keypoint_heatmap_np[1, 2, 1, 0] = 0.5 + keypoint_heatmap_np[1, 0, 0, 0] = 0.4 + keypoint_heatmap_np[1, 0, 0, 1] = 1.0 + keypoint_heatmap_np[1, 0, 1, 1] = 0.9 + keypoint_heatmap_np[1, 2, 0, 1] = 0.8 + + keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 4), dtype=np.float32) + keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25, 0.0, 0.0] + keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5, 0.0, 0.0] + keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0, 0.0, 0.0] + keypoint_heatmap_offsets_np[0, 0, 2] = [0.0, 0.0, 1.0, 0.0] + keypoint_heatmap_offsets_np[0, 2, 2] = [0.0, 0.0, 1.0, 1.0] + keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5, 0.0, 0.0] + keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0, 0.0, 0.0] + keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, 0.0, 0.0, -0.5] + keypoint_heatmap_offsets_np[1, 0, 1] = [0.0, 0.0, 0.5, -0.5] + keypoint_heatmap_offsets_np[1, 2, 0] = [0.0, 0.0, -1.0, -0.5] + + def graph_fn(): + keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) + keypoint_heatmap_offsets = tf.constant( + keypoint_heatmap_offsets_np, dtype=tf.float32) + + keypoint_cands, keypoint_scores, num_keypoint_candidates = ( + cnma.prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, + keypoint_heatmap_offsets, + keypoint_score_threshold=0.5, + max_pool_kernel_size=1, + max_candidates=2)) + return keypoint_cands, keypoint_scores, num_keypoint_candidates + + (keypoint_cands, keypoint_scores, + num_keypoint_candidates) = self.execute(graph_fn, []) + + expected_keypoint_candidates = [ + [ # Example 0. + [[0.5, 0.25], [1.0, 2.0]], # Candidate 1 of keypoint 1, 2. + [[1.75, 1.5], [1.0, 1.0]], # Candidate 2 of keypoint 1, 2. + ], + [ # Example 1. + [[1.25, 0.5], [0.0, -0.5]], # Candidate 1 of keypoint 1, 2. + [[2.5, 1.0], [0.5, 0.5]], # Candidate 2 of keypoint 1, 2. + ], + ] + expected_keypoint_scores = [ + [ # Example 0. + [1.0, 0.7], # Candidate 1 scores of keypoint 1, 2. + [0.7, 0.3], # Candidate 2 scores of keypoint 1, 2. + ], + [ # Example 1. + [0.6, 1.0], # Candidate 1 scores of keypoint 1, 2. + [0.5, 0.9], # Candidate 2 scores of keypoint 1, 2. + ], + ] + expected_num_keypoint_candidates = [ + [2, 1], + [2, 2] + ] + np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) + np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) + np.testing.assert_array_equal(expected_num_keypoint_candidates, + num_keypoint_candidates) + + def test_regressed_keypoints_at_object_centers(self): + batch_size = 2 + num_keypoints = 5 + num_instances = 6 + regressed_keypoint_feature_map_np = np.random.randn( + batch_size, 10, 10, 2 * num_keypoints).astype(np.float32) + y_indices = np.random.choice(10, (batch_size, num_instances)) + x_indices = np.random.choice(10, (batch_size, num_instances)) + offsets = np.stack([y_indices, x_indices], axis=2).astype(np.float32) + + def graph_fn(): + regressed_keypoint_feature_map = tf.constant( + regressed_keypoint_feature_map_np, dtype=tf.float32) + + gathered_regressed_keypoints = ( + cnma.regressed_keypoints_at_object_centers( + regressed_keypoint_feature_map, + tf.constant(y_indices, dtype=tf.int32), + tf.constant(x_indices, dtype=tf.int32))) + return gathered_regressed_keypoints + + gathered_regressed_keypoints = self.execute(graph_fn, []) + + expected_gathered_keypoints_0 = regressed_keypoint_feature_map_np[ + 0, y_indices[0], x_indices[0], :] + expected_gathered_keypoints_1 = regressed_keypoint_feature_map_np[ + 1, y_indices[1], x_indices[1], :] + expected_gathered_keypoints = np.stack([ + expected_gathered_keypoints_0, + expected_gathered_keypoints_1], axis=0) + expected_gathered_keypoints = np.reshape( + expected_gathered_keypoints, + [batch_size, num_instances, num_keypoints, 2]) + expected_gathered_keypoints += np.expand_dims(offsets, axis=2) + expected_gathered_keypoints = np.reshape( + expected_gathered_keypoints, + [batch_size, num_instances, -1]) + np.testing.assert_allclose(expected_gathered_keypoints, + gathered_regressed_keypoints) + + @parameterized.parameters( + {'candidate_ranking_mode': 'min_distance'}, + {'candidate_ranking_mode': 'score_distance_ratio'}, + ) + def test_refine_keypoints(self, candidate_ranking_mode): + regressed_keypoints_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + keypoint_candidates_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. + [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. + [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2. + ], + # Example 1. + [ + [[6.0, 1.5], [0.1, 0.4], [0.0, 0.0]], # Candidate 0. + [[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1. + [[0.0, 0.0], [0.1, 0.3], [0.0, 0.0]], # Candidate 2. + ] + ], dtype=np.float32) + keypoint_scores_np = np.array( + [ + # Example 0. + [ + [0.8, 0.9, 1.0], # Candidate 0. + [0.6, 0.1, 0.9], # Candidate 1. + [0.0, 0.0, 0.0], # Candidate 1. + ], + # Example 1. + [ + [0.7, 0.3, 0.0], # Candidate 0. + [0.6, 0.1, 0.0], # Candidate 1. + [0.0, 0.28, 0.0], # Candidate 1. + ] + ], dtype=np.float32) + num_keypoints_candidates_np = np.array( + [ + # Example 0. + [2, 2, 2], + # Example 1. + [2, 3, 0], + ], dtype=np.int32) + unmatched_keypoint_score = 0.1 + + def graph_fn(): + regressed_keypoints = tf.constant( + regressed_keypoints_np, dtype=tf.float32) + keypoint_candidates = tf.constant( + keypoint_candidates_np, dtype=tf.float32) + keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) + num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, + dtype=tf.int32) + refined_keypoints, refined_scores = cnma.refine_keypoints( + regressed_keypoints, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=None, + unmatched_keypoint_score=unmatched_keypoint_score, + box_scale=1.2, candidate_search_scale=0.3, + candidate_ranking_mode=candidate_ranking_mode) + return refined_keypoints, refined_scores + + refined_keypoints, refined_scores = self.execute(graph_fn, []) + + if candidate_ranking_mode == 'min_distance': + expected_refined_keypoints = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + expected_refined_scores = np.array( + [ + # Example 0. + [ + [0.8, 0.9, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + unmatched_keypoint_score, 1.0], + ], + # Example 1. + [ + [0.7, 0.1, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + 0.1, unmatched_keypoint_score], + ], + ], dtype=np.float32) + else: + expected_refined_keypoints = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 1.5], [0.1, 0.3], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + expected_refined_scores = np.array( + [ + # Example 0. + [ + [0.8, 0.9, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + unmatched_keypoint_score, 1.0], + ], + # Example 1. + [ + [0.7, 0.28, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + 0.1, unmatched_keypoint_score], + ], + ], dtype=np.float32) + + np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) + np.testing.assert_allclose(expected_refined_scores, refined_scores) + + def test_refine_keypoints_with_bboxes(self): + regressed_keypoints_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + keypoint_candidates_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. + [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. + ], + # Example 1. + [ + [[6.0, 1.5], [5.0, 5.0], [0.0, 0.0]], # Candidate 0. + [[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1. + ] + ], dtype=np.float32) + keypoint_scores_np = np.array( + [ + # Example 0. + [ + [0.8, 0.9, 1.0], # Candidate 0. + [0.6, 0.1, 0.9], # Candidate 1. + ], + # Example 1. + [ + [0.7, 0.4, 0.0], # Candidate 0. + [0.6, 0.1, 0.0], # Candidate 1. + ] + ], dtype=np.float32) + num_keypoints_candidates_np = np.array( + [ + # Example 0. + [2, 2, 2], + # Example 1. + [2, 2, 0], + ], dtype=np.int32) + bboxes_np = np.array( + [ + # Example 0. + [ + [2.0, 2.0, 14.0, 10.0], # Instance 0. + [0.0, 3.0, 5.0, 7.0], # Instance 1. + ], + # Example 1. + [ + [0.0, 0.0, 6.0, 2.0], # Instance 0. + [5.0, 1.4, 9.0, 5.0], # Instance 1. + ], + ], dtype=np.float32) + unmatched_keypoint_score = 0.1 + + def graph_fn(): + regressed_keypoints = tf.constant( + regressed_keypoints_np, dtype=tf.float32) + keypoint_candidates = tf.constant( + keypoint_candidates_np, dtype=tf.float32) + keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) + num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, + dtype=tf.int32) + bboxes = tf.constant(bboxes_np, dtype=tf.float32) + refined_keypoints, refined_scores = cnma.refine_keypoints( + regressed_keypoints, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=bboxes, + unmatched_keypoint_score=unmatched_keypoint_score, + box_scale=1.0, candidate_search_scale=0.3) + return refined_keypoints, refined_scores + + refined_keypoints, refined_scores = self.execute(graph_fn, []) + + expected_refined_keypoints = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.0], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0. + [[6.0, 1.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + expected_refined_scores = np.array( + [ + # Example 0. + [ + [0.8, unmatched_keypoint_score, # Instance 0. + unmatched_keypoint_score], + [unmatched_keypoint_score, # Instance 1. + unmatched_keypoint_score, 1.0], + ], + # Example 1. + [ + [0.7, 0.1, unmatched_keypoint_score], # Instance 0. + [0.7, 0.4, unmatched_keypoint_score], # Instance 1. + ], + ], dtype=np.float32) + + np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) + np.testing.assert_allclose(expected_refined_scores, refined_scores) + + def test_pad_to_full_keypoint_dim(self): + batch_size = 4 + num_instances = 8 + num_keypoints = 2 + keypoint_inds = [1, 3] + num_total_keypoints = 5 + + kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2) + kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints) + + def graph_fn(): + kpt_coords = tf.constant(kpt_coords_np) + kpt_scores = tf.constant(kpt_scores_np) + kpt_coords_padded, kpt_scores_padded = ( + cnma._pad_to_full_keypoint_dim( + kpt_coords, kpt_scores, keypoint_inds, num_total_keypoints)) + return kpt_coords_padded, kpt_scores_padded + + kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, []) + + self.assertAllEqual([batch_size, num_instances, num_total_keypoints, 2], + kpt_coords_padded.shape) + self.assertAllEqual([batch_size, num_instances, num_total_keypoints], + kpt_scores_padded.shape) + + for i, kpt_ind in enumerate(keypoint_inds): + np.testing.assert_allclose(kpt_coords_np[:, :, i, :], + kpt_coords_padded[:, :, kpt_ind, :]) + np.testing.assert_allclose(kpt_scores_np[:, :, i], + kpt_scores_padded[:, :, kpt_ind]) + + def test_pad_to_full_instance_dim(self): + batch_size = 4 + max_instances = 8 + num_keypoints = 6 + num_instances = 2 + instance_inds = [1, 3] + + kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2) + kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints) + + def graph_fn(): + kpt_coords = tf.constant(kpt_coords_np) + kpt_scores = tf.constant(kpt_scores_np) + kpt_coords_padded, kpt_scores_padded = ( + cnma._pad_to_full_instance_dim( + kpt_coords, kpt_scores, instance_inds, max_instances)) + return kpt_coords_padded, kpt_scores_padded + + kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, []) + + self.assertAllEqual([batch_size, max_instances, num_keypoints, 2], + kpt_coords_padded.shape) + self.assertAllEqual([batch_size, max_instances, num_keypoints], + kpt_scores_padded.shape) + + for i, inst_ind in enumerate(instance_inds): + np.testing.assert_allclose(kpt_coords_np[:, i, :, :], + kpt_coords_padded[:, inst_ind, :, :]) + np.testing.assert_allclose(kpt_scores_np[:, i, :], + kpt_scores_padded[:, inst_ind, :]) + + +# Common parameters for setting up testing examples across tests. +_NUM_CLASSES = 10 +_KEYPOINT_INDICES = [0, 1, 2, 3] +_NUM_KEYPOINTS = len(_KEYPOINT_INDICES) +_TASK_NAME = 'human_pose' + + +def get_fake_center_params(): + """Returns the fake object center parameter namedtuple.""" + return cnma.ObjectCenterParams( + classification_loss=losses.WeightedSigmoidClassificationLoss(), + object_center_loss_weight=1.0, + min_box_overlap_iou=1.0, + max_box_predictions=5, + use_labeled_classes=False) + + +def get_fake_od_params(): + """Returns the fake object detection parameter namedtuple.""" + return cnma.ObjectDetectionParams( + localization_loss=losses.L1LocalizationLoss(), + offset_loss_weight=1.0, + scale_loss_weight=0.1) + + +def get_fake_kp_params(): + """Returns the fake keypoint estimation parameter namedtuple.""" + return cnma.KeypointEstimationParams( + task_name=_TASK_NAME, + class_id=1, + keypoint_indices=_KEYPOINT_INDICES, + keypoint_std_dev=[0.00001] * len(_KEYPOINT_INDICES), + classification_loss=losses.WeightedSigmoidClassificationLoss(), + localization_loss=losses.L1LocalizationLoss(), + keypoint_candidate_score_threshold=0.1) + + +def get_fake_mask_params(): + """Returns the fake mask estimation parameter namedtuple.""" + return cnma.MaskParams( + classification_loss=losses.WeightedSoftmaxClassificationLoss(), + task_loss_weight=1.0, + mask_height=4, + mask_width=4) + + +def build_center_net_meta_arch(build_resnet=False): + """Builds the CenterNet meta architecture.""" + if build_resnet: + feature_extractor = ( + center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor( + 'resnet_v2_101')) + else: + feature_extractor = DummyFeatureExtractor( + channel_means=(1.0, 2.0, 3.0), + channel_stds=(10., 20., 30.), + bgr_ordering=False, + num_feature_outputs=2, + stride=4) + image_resizer_fn = functools.partial( + preprocessor.resize_to_range, + min_dimension=128, + max_dimension=128, + pad_to_max_dimesnion=True) + return cnma.CenterNetMetaArch( + is_training=True, + add_summaries=False, + num_classes=_NUM_CLASSES, + feature_extractor=feature_extractor, + image_resizer_fn=image_resizer_fn, + object_center_params=get_fake_center_params(), + object_detection_params=get_fake_od_params(), + keypoint_params_dict={_TASK_NAME: get_fake_kp_params()}, + mask_params=get_fake_mask_params()) + + +def _logit(p): + return np.log( + (p + np.finfo(np.float32).eps) / (1 - p + np.finfo(np.float32).eps)) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchLibTest(test_case.TestCase): + """Test for CenterNet meta architecture related functions.""" + + def test_get_keypoint_name(self): + self.assertEqual('human_pose/keypoint_offset', + cnma.get_keypoint_name('human_pose', 'keypoint_offset')) + + def test_get_num_instances_from_weights(self): + weight1 = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) + weight2 = tf.constant([0.5, 0.9, 0.0], dtype=tf.float32) + weight3 = tf.constant([0.0, 0.0, 1.0], dtype=tf.float32) + + def graph_fn_1(): + # Total of three elements with non-zero values. + num_instances = cnma.get_num_instances_from_weights( + [weight1, weight2, weight3]) + return num_instances + num_instances = self.execute(graph_fn_1, []) + self.assertAlmostEqual(3, num_instances) + + # No non-zero value in the weights. Return minimum value: 1. + def graph_fn_2(): + # Total of three elements with non-zero values. + num_instances = cnma.get_num_instances_from_weights([weight1, weight1]) + return num_instances + num_instances = self.execute(graph_fn_2, []) + self.assertAlmostEqual(1, num_instances) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchTest(test_case.TestCase, parameterized.TestCase): + """Tests for the CenterNet meta architecture.""" + + def test_construct_prediction_heads(self): + model = build_center_net_meta_arch() + fake_feature_map = np.zeros((4, 128, 128, 8)) + + # Check the dictionary contains expected keys and corresponding heads with + # correct dimensions. + # "object center" head: + output = model._prediction_head_dict[cnma.OBJECT_CENTER][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape) + + # "object scale" (height/width) head: + output = model._prediction_head_dict[cnma.BOX_SCALE][-1](fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + # "object offset" head: + output = model._prediction_head_dict[cnma.BOX_OFFSET][-1](fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + # "keypoint offset" head: + output = model._prediction_head_dict[ + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET)][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + # "keypoint heatmap" head: + output = model._prediction_head_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_HEATMAP)][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _NUM_KEYPOINTS), output.shape) + + # "keypoint regression" head: + output = model._prediction_head_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_REGRESSION)][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, 2 * _NUM_KEYPOINTS), output.shape) + + # "mask" head: + output = model._prediction_head_dict[cnma.SEGMENTATION_HEATMAP][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape) + + def test_initialize_target_assigners(self): + model = build_center_net_meta_arch() + assigner_dict = model._initialize_target_assigners( + stride=2, + min_box_overlap_iou=0.7) + + # Check whether the correponding target assigner class is initialized. + # object center target assigner: + self.assertIsInstance(assigner_dict[cnma.OBJECT_CENTER], + cn_assigner.CenterNetCenterHeatmapTargetAssigner) + + # object detection target assigner: + self.assertIsInstance(assigner_dict[cnma.DETECTION_TASK], + cn_assigner.CenterNetBoxTargetAssigner) + + # keypoint estimation target assigner: + self.assertIsInstance(assigner_dict[_TASK_NAME], + cn_assigner.CenterNetKeypointTargetAssigner) + + # mask estimation target assigner: + self.assertIsInstance(assigner_dict[cnma.SEGMENTATION_TASK], + cn_assigner.CenterNetMaskTargetAssigner) + + def test_predict(self): + """Test the predict function.""" + + model = build_center_net_meta_arch() + def graph_fn(): + prediction_dict = model.predict(tf.zeros([2, 128, 128, 3]), None) + return prediction_dict + + prediction_dict = self.execute(graph_fn, []) + + self.assertEqual(prediction_dict['preprocessed_inputs'].shape, + (2, 128, 128, 3)) + self.assertEqual(prediction_dict[cnma.OBJECT_CENTER][0].shape, + (2, 32, 32, _NUM_CLASSES)) + self.assertEqual(prediction_dict[cnma.BOX_SCALE][0].shape, + (2, 32, 32, 2)) + self.assertEqual(prediction_dict[cnma.BOX_OFFSET][0].shape, + (2, 32, 32, 2)) + self.assertEqual(prediction_dict[cnma.SEGMENTATION_HEATMAP][0].shape, + (2, 32, 32, _NUM_CLASSES)) + + def test_loss(self): + """Test the loss function.""" + groundtruth_dict = get_fake_groundtruth_dict(16, 32, 4) + model = build_center_net_meta_arch() + model.provide_groundtruth( + groundtruth_boxes_list=groundtruth_dict[fields.BoxListFields.boxes], + groundtruth_weights_list=groundtruth_dict[fields.BoxListFields.weights], + groundtruth_classes_list=groundtruth_dict[fields.BoxListFields.classes], + groundtruth_keypoints_list=groundtruth_dict[ + fields.BoxListFields.keypoints], + groundtruth_masks_list=groundtruth_dict[ + fields.BoxListFields.masks]) + + prediction_dict = get_fake_prediction_dict( + input_height=16, input_width=32, stride=4) + + def graph_fn(): + loss_dict = model.loss(prediction_dict, + tf.constant([[16, 24, 3], [16, 24, 3]])) + return loss_dict + + loss_dict = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)]) + self.assertGreater( + 0.01, + loss_dict['%s/%s' % + (cnma.LOSS_KEY_PREFIX, + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP))]) + self.assertGreater( + 0.01, + loss_dict['%s/%s' % + (cnma.LOSS_KEY_PREFIX, + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET))]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_REGRESSION))]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.SEGMENTATION_HEATMAP)]) + + @parameterized.parameters( + {'target_class_id': 1}, + {'target_class_id': 2}, + ) + def test_postprocess(self, target_class_id): + """Test the postprocess function.""" + model = build_center_net_meta_arch() + max_detection = model._center_params.max_box_predictions + num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) + + class_center = np.zeros((1, 32, 32, 10), dtype=np.float32) + height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) + offset = np.zeros((1, 32, 32, 2), dtype=np.float32) + keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) + keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) + keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) + + class_probs = np.zeros(10) + class_probs[target_class_id] = _logit(0.75) + class_center[0, 16, 16] = class_probs + height_width[0, 16, 16] = [5, 10] + offset[0, 16, 16] = [.25, .5] + keypoint_regression[0, 16, 16] = [ + -1., -1., + -1., 1., + 1., -1., + 1., 1.] + keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) + keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) + keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) + keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. + + segmentation_heatmap = np.zeros((1, 32, 32, 10), dtype=np.float32) + segmentation_heatmap[:, 14:18, 14:18, target_class_id] = 1.0 + segmentation_heatmap = _logit(segmentation_heatmap) + + class_center = tf.constant(class_center) + height_width = tf.constant(height_width) + offset = tf.constant(offset) + keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) + keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) + keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) + segmentation_heatmap = tf.constant(segmentation_heatmap, dtype=tf.float32) + + prediction_dict = { + cnma.OBJECT_CENTER: [class_center], + cnma.BOX_SCALE: [height_width], + cnma.BOX_OFFSET: [offset], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): + [keypoint_heatmaps], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): + [keypoint_offsets], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): + [keypoint_regression], + cnma.SEGMENTATION_HEATMAP: [segmentation_heatmap], + } + + def graph_fn(): + detections = model.postprocess(prediction_dict, + tf.constant([[128, 128, 3]])) + return detections + + detections = self.execute_cpu(graph_fn, []) + + self.assertAllClose(detections['detection_boxes'][0, 0], + np.array([55, 46, 75, 86]) / 128.0) + self.assertAllClose(detections['detection_scores'][0], + [.75, .5, .5, .5, .5]) + self.assertEqual(detections['detection_classes'][0, 0], target_class_id) + self.assertEqual(detections['num_detections'], [5]) + self.assertAllEqual([1, max_detection, num_keypoints, 2], + detections['detection_keypoints'].shape) + self.assertAllEqual([1, max_detection, num_keypoints], + detections['detection_keypoint_scores'].shape) + self.assertAllEqual([1, max_detection, 4, 4], + detections['detection_masks'].shape) + + # There should be some section of the first mask (correspond to the only + # detection) with non-zero mask values. + self.assertGreater(np.sum(detections['detection_masks'][0, 0, :, :] > 0), 0) + self.assertAllEqual( + detections['detection_masks'][0, 1:, :, :], + np.zeros_like(detections['detection_masks'][0, 1:, :, :])) + + if target_class_id == 1: + expected_kpts_for_obj_0 = np.array( + [[14., 14.], [14., 18.], [18., 14.], [17., 17.]]) / 32. + expected_kpt_scores_for_obj_0 = np.array( + [0.9, 0.9, 0.9, cnma.UNMATCHED_KEYPOINT_SCORE]) + np.testing.assert_allclose(detections['detection_keypoints'][0][0], + expected_kpts_for_obj_0, rtol=1e-6) + np.testing.assert_allclose(detections['detection_keypoint_scores'][0][0], + expected_kpt_scores_for_obj_0, rtol=1e-6) + else: + # All keypoint outputs should be zeros. + np.testing.assert_allclose( + detections['detection_keypoints'][0][0], + np.zeros([num_keypoints, 2], np.float), + rtol=1e-6) + np.testing.assert_allclose( + detections['detection_keypoint_scores'][0][0], + np.zeros([num_keypoints], np.float), + rtol=1e-6) + + def test_get_instance_indices(self): + classes = tf.constant([[0, 1, 2, 0], [2, 1, 2, 2]], dtype=tf.int32) + num_detections = tf.constant([1, 3], dtype=tf.int32) + batch_index = 1 + class_id = 2 + model = build_center_net_meta_arch() + valid_indices = model._get_instance_indices( + classes, num_detections, batch_index, class_id) + self.assertAllEqual(valid_indices.numpy(), [0, 2]) + + +def get_fake_prediction_dict(input_height, input_width, stride): + """Prepares the fake prediction dictionary.""" + output_height = input_height // stride + output_width = input_width // stride + object_center = np.zeros((2, output_height, output_width, _NUM_CLASSES), + dtype=np.float32) + # Box center: + # y: floor((0.54 + 0.56) / 2 * 4) = 2, + # x: floor((0.54 + 0.56) / 2 * 8) = 4 + object_center[0, 2, 4, 1] = 1.0 + object_center = _logit(object_center) + + # Box size: + # height: (0.56 - 0.54) * 4 = 0.08 + # width: (0.56 - 0.54) * 8 = 0.16 + object_scale = np.zeros((2, output_height, output_width, 2), dtype=np.float32) + object_scale[0, 2, 4] = 0.08, 0.16 + + # Box center offset coordinate (0.55, 0.55): + # y-offset: 0.55 * 4 - 2 = 0.2 + # x-offset: 0.55 * 8 - 4 = 0.4 + object_offset = np.zeros((2, output_height, output_width, 2), + dtype=np.float32) + object_offset[0, 2, 4] = 0.2, 0.4 + + keypoint_heatmap = np.zeros((2, output_height, output_width, _NUM_KEYPOINTS), + dtype=np.float32) + keypoint_heatmap[0, 2, 4, 1] = 1.0 + keypoint_heatmap[0, 2, 4, 3] = 1.0 + keypoint_heatmap = _logit(keypoint_heatmap) + + keypoint_offset = np.zeros((2, output_height, output_width, 2), + dtype=np.float32) + keypoint_offset[0, 2, 4] = 0.2, 0.4 + + keypoint_regression = np.zeros( + (2, output_height, output_width, 2 * _NUM_KEYPOINTS), dtype=np.float32) + keypoint_regression[0, 2, 4] = 0.0, 0.0, 0.2, 0.4, 0.0, 0.0, 0.2, 0.4 + + mask_heatmap = np.zeros((2, output_height, output_width, _NUM_CLASSES), + dtype=np.float32) + mask_heatmap[0, 2, 4, 1] = 1.0 + mask_heatmap = _logit(mask_heatmap) + + prediction_dict = { + 'preprocessed_inputs': + tf.zeros((2, input_height, input_width, 3)), + cnma.OBJECT_CENTER: [ + tf.constant(object_center), + tf.constant(object_center) + ], + cnma.BOX_SCALE: [ + tf.constant(object_scale), + tf.constant(object_scale) + ], + cnma.BOX_OFFSET: [ + tf.constant(object_offset), + tf.constant(object_offset) + ], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [ + tf.constant(keypoint_heatmap), + tf.constant(keypoint_heatmap) + ], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [ + tf.constant(keypoint_offset), + tf.constant(keypoint_offset) + ], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [ + tf.constant(keypoint_regression), + tf.constant(keypoint_regression) + ], + cnma.SEGMENTATION_HEATMAP: [ + tf.constant(mask_heatmap), + tf.constant(mask_heatmap) + ] + } + return prediction_dict + + +def get_fake_groundtruth_dict(input_height, input_width, stride): + """Prepares the fake groundtruth dictionary.""" + # A small box with center at (0.55, 0.55). + boxes = [ + tf.constant([[0.54, 0.54, 0.56, 0.56]]), + tf.constant([[0.0, 0.0, 0.5, 0.5]]), + ] + classes = [ + tf.one_hot([1], depth=_NUM_CLASSES), + tf.one_hot([0], depth=_NUM_CLASSES), + ] + weights = [ + tf.constant([1.]), + tf.constant([0.]), + ] + keypoints = [ + tf.tile( + tf.expand_dims( + tf.constant([[float('nan'), 0.55, + float('nan'), 0.55, 0.55, 0.0]]), + axis=2), + multiples=[1, 1, 2]), + tf.tile( + tf.expand_dims( + tf.constant([[float('nan'), 0.55, + float('nan'), 0.55, 0.55, 0.0]]), + axis=2), + multiples=[1, 1, 2]), + ] + labeled_classes = [ + tf.one_hot([1], depth=_NUM_CLASSES) + tf.one_hot([2], depth=_NUM_CLASSES), + tf.one_hot([0], depth=_NUM_CLASSES) + tf.one_hot([1], depth=_NUM_CLASSES), + ] + mask = np.zeros((1, input_height, input_width), dtype=np.float32) + mask[0, 8:8+stride, 16:16+stride] = 1 + masks = [ + tf.constant(mask), + tf.zeros_like(mask), + ] + groundtruth_dict = { + fields.BoxListFields.boxes: boxes, + fields.BoxListFields.weights: weights, + fields.BoxListFields.classes: classes, + fields.BoxListFields.keypoints: keypoints, + fields.BoxListFields.masks: masks, + fields.InputDataFields.groundtruth_labeled_classes: labeled_classes, + } + return groundtruth_dict + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaComputeLossTest(test_case.TestCase): + """Test for CenterNet loss compuation related functions.""" + + def setUp(self): + self.model = build_center_net_meta_arch() + self.classification_loss_fn = self.model._center_params.classification_loss + self.localization_loss_fn = self.model._od_params.localization_loss + self.true_image_shapes = tf.constant([[16, 24, 3], [16, 24, 3]]) + self.input_height = 16 + self.input_width = 32 + self.stride = 4 + self.per_pixel_weights = self.get_per_pixel_weights(self.true_image_shapes, + self.input_height, + self.input_width, + self.stride) + self.prediction_dict = get_fake_prediction_dict(self.input_height, + self.input_width, + self.stride) + self.model._groundtruth_lists = get_fake_groundtruth_dict( + self.input_height, self.input_width, self.stride) + super(CenterNetMetaComputeLossTest, self).setUp() + + def get_per_pixel_weights(self, true_image_shapes, input_height, input_width, + stride): + output_height, output_width = (input_height // stride, + input_width // stride) + + # TODO(vighneshb) Explore whether using floor here is safe. + output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride) + per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image( + output_true_image_shapes, output_height, output_width) + per_pixel_weights = tf.expand_dims(per_pixel_weights, 2) + return per_pixel_weights + + def test_compute_object_center_loss(self): + def graph_fn(): + loss = self.model._compute_object_center_loss( + object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER], + input_height=self.input_height, + input_width=self.input_width, + per_pixel_weights=self.per_pixel_weights) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + default_value = self.model._center_params.use_only_known_classes + self.model._center_params = ( + self.model._center_params._replace(use_only_known_classes=True)) + loss = self.model._compute_object_center_loss( + object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER], + input_height=self.input_height, + input_width=self.input_width, + per_pixel_weights=self.per_pixel_weights) + self.model._center_params = ( + self.model._center_params._replace( + use_only_known_classes=default_value)) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_box_scale_and_offset_loss(self): + def graph_fn(): + scale_loss, offset_loss = self.model._compute_box_scale_and_offset_loss( + scale_predictions=self.prediction_dict[cnma.BOX_SCALE], + offset_predictions=self.prediction_dict[cnma.BOX_OFFSET], + input_height=self.input_height, + input_width=self.input_width) + return scale_loss, offset_loss + + scale_loss, offset_loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, scale_loss) + self.assertGreater(0.01, offset_loss) + + def test_compute_kp_heatmap_loss(self): + def graph_fn(): + loss = self.model._compute_kp_heatmap_loss( + input_height=self.input_height, + input_width=self.input_width, + task_name=_TASK_NAME, + heatmap_predictions=self.prediction_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_HEATMAP)], + classification_loss_fn=self.classification_loss_fn, + per_pixel_weights=self.per_pixel_weights) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_kp_offset_loss(self): + def graph_fn(): + loss = self.model._compute_kp_offset_loss( + input_height=self.input_height, + input_width=self.input_width, + task_name=_TASK_NAME, + offset_predictions=self.prediction_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_OFFSET)], + localization_loss_fn=self.localization_loss_fn) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_kp_regression_loss(self): + def graph_fn(): + loss = self.model._compute_kp_regression_loss( + input_height=self.input_height, + input_width=self.input_width, + task_name=_TASK_NAME, + regression_predictions=self.prediction_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_REGRESSION,)], + localization_loss_fn=self.localization_loss_fn) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchRestoreTest(test_case.TestCase): + + def test_restore_map_resnet(self): + """Test restore map for a resnet backbone.""" + + model = build_center_net_meta_arch(build_resnet=True) + restore_map = model.restore_map('classification') + self.assertIsInstance(restore_map['feature_extractor'], tf.keras.Model) + + +class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor): + + def __init__(self, + channel_means, + channel_stds, + bgr_ordering, + num_feature_outputs, + stride): + self._num_feature_outputs = num_feature_outputs + self._stride = stride + super(DummyFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + + def predict(self): + pass + + def loss(self): + pass + + def postprocess(self): + pass + + def restore_map(self): + pass + + def call(self, inputs): + batch_size, input_height, input_width, _ = inputs.shape + fake_output = tf.ones([ + batch_size, input_height // self._stride, input_width // self._stride, + 64 + ], dtype=tf.float32) + return [fake_output] * self._num_feature_outputs + + @property + def out_stride(self): + return self._stride + + @property + def num_feature_outputs(self): + return self._num_feature_outputs + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetFeatureExtractorTest(test_case.TestCase): + """Test the base feature extractor class.""" + + def test_preprocess(self): + feature_extractor = DummyFeatureExtractor( + channel_means=(1.0, 2.0, 3.0), + channel_stds=(10., 20., 30.), bgr_ordering=False, + num_feature_outputs=2, stride=4) + + img = np.zeros((2, 32, 32, 3)) + img[:, :, :] = 11, 22, 33 + + def graph_fn(): + output = feature_extractor.preprocess(img) + return output + + output = self.execute(graph_fn, []) + self.assertAlmostEqual(output.sum(), 2 * 32 * 32 * 3) + + def test_bgr_ordering(self): + feature_extractor = DummyFeatureExtractor( + channel_means=(0.0, 0.0, 0.0), + channel_stds=(1., 1., 1.), bgr_ordering=True, + num_feature_outputs=2, stride=4) + + img = np.zeros((2, 32, 32, 3), dtype=np.float32) + img[:, :, :] = 1, 2, 3 + + def graph_fn(): + output = feature_extractor.preprocess(img) + return output + + output = self.execute(graph_fn, []) + self.assertAllClose(output[..., 2], 1 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 0], 3 * np.ones((2, 32, 32))) + + def test_default_ordering(self): + feature_extractor = DummyFeatureExtractor( + channel_means=(0.0, 0.0, 0.0), + channel_stds=(1., 1., 1.), bgr_ordering=False, + num_feature_outputs=2, stride=4) + + img = np.zeros((2, 32, 32, 3), dtype=np.float32) + img[:, :, :] = 1, 2, 3 + + def graph_fn(): + output = feature_extractor.preprocess(img) + return output + + output = self.execute(graph_fn, []) + self.assertAllClose(output[..., 0], 1 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 2], 3 * np.ones((2, 32, 32))) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/context_rcnn_lib.py b/models/research/object_detection/meta_architectures/context_rcnn_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..902a88c77669cd27eb36490d645740041600fcac --- /dev/null +++ b/models/research/object_detection/meta_architectures/context_rcnn_lib.py @@ -0,0 +1,224 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library functions for ContextRCNN.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +import tf_slim as slim + + +# The negative value used in padding the invalid weights. +_NEGATIVE_PADDING_VALUE = -100000 + + +def filter_weight_value(weights, values, valid_mask): + """Filters weights and values based on valid_mask. + + _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to + avoid their contribution in softmax. 0 will be set for the invalid elements in + the values. + + Args: + weights: A float Tensor of shape [batch_size, input_size, context_size]. + values: A float Tensor of shape [batch_size, context_size, + projected_dimension]. + valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means + valid and False means invalid. + + Returns: + weights: A float Tensor of shape [batch_size, input_size, context_size]. + values: A float Tensor of shape [batch_size, context_size, + projected_dimension]. + + Raises: + ValueError: If shape of doesn't match. + """ + w_batch_size, _, w_context_size = weights.shape + v_batch_size, v_context_size, _ = values.shape + m_batch_size, m_context_size = valid_mask.shape + if w_batch_size != v_batch_size or v_batch_size != m_batch_size: + raise ValueError("Please make sure the first dimension of the input" + " tensors are the same.") + + if w_context_size != v_context_size: + raise ValueError("Please make sure the third dimension of weights matches" + " the second dimension of values.") + + if w_context_size != m_context_size: + raise ValueError("Please make sure the third dimension of the weights" + " matches the second dimension of the valid_mask.") + + valid_mask = valid_mask[..., tf.newaxis] + + # Force the invalid weights to be very negative so it won't contribute to + # the softmax. + weights += tf.transpose( + tf.cast(tf.math.logical_not(valid_mask), weights.dtype) * + _NEGATIVE_PADDING_VALUE, + perm=[0, 2, 1]) + + # Force the invalid values to be 0. + values *= tf.cast(valid_mask, values.dtype) + + return weights, values + + +def compute_valid_mask(num_valid_elements, num_elements): + """Computes mask of valid entries within padded context feature. + + Args: + num_valid_elements: A int32 Tensor of shape [batch_size]. + num_elements: An int32 Tensor. + + Returns: + A boolean Tensor of the shape [batch_size, num_elements]. True means + valid and False means invalid. + """ + batch_size = num_valid_elements.shape[0] + element_idxs = tf.range(num_elements, dtype=tf.int32) + batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) + num_valid_elements = num_valid_elements[..., tf.newaxis] + valid_mask = tf.less(batch_element_idxs, num_valid_elements) + return valid_mask + + +def project_features(features, projection_dimension, is_training, normalize): + """Projects features to another feature space. + + Args: + features: A float Tensor of shape [batch_size, features_size, + num_features]. + projection_dimension: A int32 Tensor. + is_training: A boolean Tensor (affecting batch normalization). + normalize: A boolean Tensor. If true, the output features will be l2 + normalized on the last dimension. + + Returns: + A float Tensor of shape [batch, features_size, projection_dimension]. + """ + # TODO(guanhangwu) Figure out a better way of specifying the batch norm + # params. + batch_norm_params = { + "is_training": is_training, + "decay": 0.97, + "epsilon": 0.001, + "center": True, + "scale": True + } + + batch_size, _, num_features = features.shape + features = tf.reshape(features, [-1, num_features]) + projected_features = slim.fully_connected( + features, + num_outputs=projection_dimension, + activation_fn=tf.nn.relu6, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params) + + projected_features = tf.reshape(projected_features, + [batch_size, -1, projection_dimension]) + + if normalize: + projected_features = tf.math.l2_normalize(projected_features, axis=-1) + + return projected_features + + +def attention_block(input_features, context_features, bottleneck_dimension, + output_dimension, attention_temperature, valid_mask, + is_training): + """Generic attention block. + + Args: + input_features: A float Tensor of shape [batch_size, input_size, + num_input_features]. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + bottleneck_dimension: A int32 Tensor representing the bottleneck dimension + for intermediate projections. + output_dimension: A int32 Tensor representing the last dimension of the + output feature. + attention_temperature: A float Tensor. It controls the temperature of the + softmax for weights calculation. The formula for calculation as follows: + weights = exp(weights / temperature) / sum(exp(weights / temperature)) + valid_mask: A boolean Tensor of shape [batch_size, context_size]. + is_training: A boolean Tensor (affecting batch normalization). + + Returns: + A float Tensor of shape [batch_size, input_size, output_dimension]. + """ + + with tf.variable_scope("AttentionBlock"): + queries = project_features( + input_features, bottleneck_dimension, is_training, normalize=True) + keys = project_features( + context_features, bottleneck_dimension, is_training, normalize=True) + values = project_features( + context_features, bottleneck_dimension, is_training, normalize=True) + + weights = tf.matmul(queries, keys, transpose_b=True) + + weights, values = filter_weight_value(weights, values, valid_mask) + + weights = tf.nn.softmax(weights / attention_temperature) + + features = tf.matmul(weights, values) + output_features = project_features( + features, output_dimension, is_training, normalize=False) + return output_features + + +def compute_box_context_attention(box_features, context_features, + valid_context_size, bottleneck_dimension, + attention_temperature, is_training): + """Computes the attention feature from the context given a batch of box. + + Args: + box_features: A float Tensor of shape [batch_size, max_num_proposals, + height, width, channels]. It is pooled features from first stage + proposals. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + valid_context_size: A int32 Tensor of shape [batch_size]. + bottleneck_dimension: A int32 Tensor representing the bottleneck dimension + for intermediate projections. + attention_temperature: A float Tensor. It controls the temperature of the + softmax for weights calculation. The formula for calculation as follows: + weights = exp(weights / temperature) / sum(exp(weights / temperature)) + is_training: A boolean Tensor (affecting batch normalization). + + Returns: + A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels]. + """ + _, context_size, _ = context_features.shape + valid_mask = compute_valid_mask(valid_context_size, context_size) + + channels = box_features.shape[-1] + # Average pools over height and width dimension so that the shape of + # box_features becomes [batch_size, max_num_proposals, channels]. + box_features = tf.reduce_mean(box_features, [2, 3]) + + output_features = attention_block(box_features, context_features, + bottleneck_dimension, channels.value, + attention_temperature, valid_mask, + is_training) + + # Expands the dimension back to match with the original feature map. + output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] + + return output_features diff --git a/models/research/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py b/models/research/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0b3b848d835dcad37f6c75f05b869fbaec4facb --- /dev/null +++ b/models/research/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py @@ -0,0 +1,126 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for context_rcnn_lib.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import context_rcnn_lib +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_NEGATIVE_PADDING_VALUE = -100000 + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ContextRcnnLibTest(parameterized.TestCase, test_case.TestCase, + tf.test.TestCase): + """Tests for the functions in context_rcnn_lib.""" + + def test_compute_valid_mask(self): + num_elements = tf.constant(3, tf.int32) + num_valid_elementss = tf.constant((1, 2), tf.int32) + valid_mask = context_rcnn_lib.compute_valid_mask(num_valid_elementss, + num_elements) + expected_valid_mask = tf.constant([[1, 0, 0], [1, 1, 0]], tf.float32) + self.assertAllEqual(valid_mask, expected_valid_mask) + + def test_filter_weight_value(self): + weights = tf.ones((2, 3, 2), tf.float32) * 4 + values = tf.ones((2, 2, 4), tf.float32) + valid_mask = tf.constant([[True, True], [True, False]], tf.bool) + + filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( + weights, values, valid_mask) + expected_weights = tf.constant([[[4, 4], [4, 4], [4, 4]], + [[4, _NEGATIVE_PADDING_VALUE + 4], + [4, _NEGATIVE_PADDING_VALUE + 4], + [4, _NEGATIVE_PADDING_VALUE + 4]]]) + + expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], + [[1, 1, 1, 1], [0, 0, 0, 0]]]) + self.assertAllEqual(filtered_weights, expected_weights) + self.assertAllEqual(filtered_values, expected_values) + + # Changes the valid_mask so the results will be different. + valid_mask = tf.constant([[True, True], [False, False]], tf.bool) + + filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( + weights, values, valid_mask) + expected_weights = tf.constant( + [[[4, 4], [4, 4], [4, 4]], + [[_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], + [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], + [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4]]]) + + expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], + [[0, 0, 0, 0], [0, 0, 0, 0]]]) + self.assertAllEqual(filtered_weights, expected_weights) + self.assertAllEqual(filtered_values, expected_values) + + @parameterized.parameters((2, True, True), (2, False, True), + (10, True, False), (10, False, False)) + def test_project_features(self, projection_dimension, is_training, normalize): + features = tf.ones([2, 3, 4], tf.float32) + projected_features = context_rcnn_lib.project_features( + features, + projection_dimension, + is_training=is_training, + normalize=normalize) + + # Makes sure the shape is correct. + self.assertAllEqual(projected_features.shape, [2, 3, projection_dimension]) + + @parameterized.parameters( + (2, 10, 1), + (3, 10, 2), + (4, 20, 3), + (5, 20, 4), + (7, 20, 5), + ) + def test_attention_block(self, bottleneck_dimension, output_dimension, + attention_temperature): + input_features = tf.ones([2, 3, 4], tf.float32) + context_features = tf.ones([2, 2, 3], tf.float32) + valid_mask = tf.constant([[True, True], [False, False]], tf.bool) + is_training = False + output_features = context_rcnn_lib.attention_block( + input_features, context_features, bottleneck_dimension, + output_dimension, attention_temperature, valid_mask, is_training) + + # Makes sure the shape is correct. + self.assertAllEqual(output_features.shape, [2, 3, output_dimension]) + + @parameterized.parameters(True, False) + def test_compute_box_context_attention(self, is_training): + box_features = tf.ones([2, 3, 4, 4, 4], tf.float32) + context_features = tf.ones([2, 5, 6], tf.float32) + valid_context_size = tf.constant((2, 3), tf.int32) + bottleneck_dimension = 10 + attention_temperature = 1 + attention_features = context_rcnn_lib.compute_box_context_attention( + box_features, context_features, valid_context_size, + bottleneck_dimension, attention_temperature, is_training) + # Makes sure the shape is correct. + self.assertAllEqual(attention_features.shape, [2, 3, 1, 1, 4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/context_rcnn_meta_arch.py b/models/research/object_detection/meta_architectures/context_rcnn_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..abe30558b01218df8999b3f0f7698e57f67f8ff2 --- /dev/null +++ b/models/research/object_detection/meta_architectures/context_rcnn_meta_arch.py @@ -0,0 +1,340 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Context R-CNN meta-architecture definition. + +This adds the ability to use attention into contextual features within the +Faster R-CNN object detection framework to improve object detection performance. +See https://arxiv.org/abs/1912.03538 for more information. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from object_detection.core import standard_fields as fields +from object_detection.meta_architectures import context_rcnn_lib +from object_detection.meta_architectures import faster_rcnn_meta_arch + + +class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): + """Context R-CNN Meta-architecture definition.""" + + def __init__(self, + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + initial_crop_size, + maxpool_kernel_size, + maxpool_stride, + second_stage_target_assigner, + second_stage_mask_rcnn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + second_stage_mask_prediction_loss_weight=1.0, + hard_example_miner=None, + parallel_iterations=16, + add_summaries=True, + clip_anchors_to_image=False, + use_static_shapes=False, + resize_masks=True, + freeze_batchnorm=False, + return_raw_detections_during_predict=False, + output_final_box_features=False, + attention_bottleneck_dimension=None, + attention_temperature=None): + """ContextRCNNMetaArch Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + num_classes: Number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + image_resizer_fn: A callable for image resizing. This callable + takes a rank-3 image tensor of shape [height, width, channels] + (corresponding to a single image), an optional rank-3 instance mask + tensor of shape [num_masks, height, width] and returns a resized rank-3 + image tensor, a resized mask tensor if one was provided in the input. In + addition this callable must also return a 1-D tensor of the form + [height, width, channels] containing the size of the true image, as the + image resizer can perform zero padding. See protos/image_resizer.proto. + feature_extractor: A FasterRCNNFeatureExtractor object. + number_of_stages: An integer values taking values in {1, 2, 3}. If + 1, the function will construct only the Region Proposal Network (RPN) + part of the model. If 2, the function will perform box refinement and + other auxiliary predictions all in the second stage. If 3, it will + extract features from refined boxes and perform the auxiliary + predictions on the non-maximum suppressed refined boxes. + If is_training is true and the value of number_of_stages is 3, it is + reduced to 2 since all the model heads are trained in parallel in second + stage during training. + first_stage_anchor_generator: An anchor_generator.AnchorGenerator object + (note that currently we only support + grid_anchor_generator.GridAnchorGenerator objects) + first_stage_target_assigner: Target assigner to use for first stage of + Faster R-CNN (RPN). + first_stage_atrous_rate: A single integer indicating the atrous rate for + the single convolution op which is applied to the `rpn_features_to_crop` + tensor to obtain a tensor to be used for box prediction. Some feature + extractors optionally allow for producing feature maps computed at + denser resolutions. The atrous rate is used to compensate for the + denser feature maps by using an effectively larger receptive field. + (This should typically be set to 1). + first_stage_box_predictor_arg_scope_fn: Either a + Keras layer hyperparams object or a function to construct tf-slim + arg_scope for conv2d, separable_conv2d and fully_connected ops. Used + for the RPN box predictor. If it is a keras hyperparams object the + RPN box predictor will be a Keras model. If it is a function to + construct an arg scope it will be a tf-slim box predictor. + first_stage_box_predictor_kernel_size: Kernel size to use for the + convolution op just prior to RPN box predictions. + first_stage_box_predictor_depth: Output depth for the convolution op + just prior to RPN box predictions. + first_stage_minibatch_size: The "batch size" to use for computing the + objectness and location loss of the region proposal network. This + "batch size" refers to the number of anchors selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + first_stage_sampler: Sampler to use for first stage loss (RPN loss). + first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window`(with + all other inputs already set) and returns a dictionary containing + tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes`, `num_detections`. This is used to perform non max + suppression on the boxes predicted by the Region Proposal Network + (RPN). + See `post_processing.batch_multiclass_non_max_suppression` for the type + and shape of these tensors. + first_stage_max_proposals: Maximum number of boxes to retain after + performing Non-Max Suppression (NMS) on the boxes predicted by the + Region Proposal Network (RPN). + first_stage_localization_loss_weight: A float + first_stage_objectness_loss_weight: A float + crop_and_resize_fn: A differentiable resampler to use for cropping RPN + proposal features. + initial_crop_size: A single integer indicating the output size + (width and height are set to be the same) of the initial bilinear + interpolation based cropping during ROI pooling. + maxpool_kernel_size: A single integer indicating the kernel size of the + max pool op on the cropped feature map during ROI pooling. + maxpool_stride: A single integer indicating the stride of the max pool + op on the cropped feature map during ROI pooling. + second_stage_target_assigner: Target assigner to use for second stage of + Faster R-CNN. If the model is configured with multiple prediction heads, + this target assigner is used to generate targets for all heads (with the + correct `unmatched_class_label`). + second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for + the second stage. + second_stage_batch_size: The batch size used for computing the + classification and refined location loss of the box classifier. This + "batch size" refers to the number of proposals selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + second_stage_sampler: Sampler to use for second stage loss (box + classifier loss). + second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores`, optional `clip_window` and + optional (kwarg) `mask` inputs (with all other inputs already set) + and returns a dictionary containing tensors with keys: + `detection_boxes`, `detection_scores`, `detection_classes`, + `num_detections`, and (optionally) `detection_masks`. See + `post_processing.batch_multiclass_non_max_suppression` for the type and + shape of these tensors. + second_stage_score_conversion_fn: Callable elementwise nonlinearity + (that takes tensors as inputs and returns tensors). This is usually + used to convert logits to probabilities. + second_stage_localization_loss_weight: A float indicating the scale factor + for second stage localization loss. + second_stage_classification_loss_weight: A float indicating the scale + factor for second stage classification loss. + second_stage_classification_loss: Classification loss used by the second + stage classifier. Either losses.WeightedSigmoidClassificationLoss or + losses.WeightedSoftmaxClassificationLoss. + second_stage_mask_prediction_loss_weight: A float indicating the scale + factor for second stage mask prediction loss. This is applicable only if + second stage box predictor is configured to predict masks. + hard_example_miner: A losses.HardExampleMiner object (can be None). + parallel_iterations: (Optional) The number of iterations allowed to run + in parallel for calls to tf.map_fn. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + clip_anchors_to_image: Normally, anchors generated for a given image size + are pruned during training if they lie outside the image window. This + option clips the anchors to be within the image instead of pruning. + use_static_shapes: If True, uses implementation of ops with static shape + guarantees. + resize_masks: Indicates whether the masks presend in the groundtruth + should be resized in the model with `image_resizer_fn` + freeze_batchnorm: Whether to freeze batch norm parameters in the first + stage box predictor during training or not. When training with a small + batch size (e.g. 1), it is desirable to freeze batch norm update and + use pretrained batch norm params. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + output_final_box_features: Whether to output final box features. If true, + it crops the feauture map based on the final box prediction and returns + in the dict as detection_features. + attention_bottleneck_dimension: A single integer. The bottleneck feature + dimension of the attention block. + attention_temperature: A single float. The attention temperature. + + Raises: + ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at + training time. + ValueError: If first_stage_anchor_generator is not of type + grid_anchor_generator.GridAnchorGenerator. + """ + super(ContextRCNNMetaArch, self).__init__( + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + initial_crop_size, + maxpool_kernel_size, + maxpool_stride, + second_stage_target_assigner, + second_stage_mask_rcnn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + second_stage_mask_prediction_loss_weight=( + second_stage_mask_prediction_loss_weight), + hard_example_miner=hard_example_miner, + parallel_iterations=parallel_iterations, + add_summaries=add_summaries, + clip_anchors_to_image=clip_anchors_to_image, + use_static_shapes=use_static_shapes, + resize_masks=resize_masks, + freeze_batchnorm=freeze_batchnorm, + return_raw_detections_during_predict=( + return_raw_detections_during_predict), + output_final_box_features=output_final_box_features) + + self._context_feature_extract_fn = functools.partial( + context_rcnn_lib.compute_box_context_attention, + bottleneck_dimension=attention_bottleneck_dimension, + attention_temperature=attention_temperature, + is_training=is_training) + + @staticmethod + def get_side_inputs(features): + """Overrides the get_side_inputs function in the base class. + + This function returns context_features and valid_context_size, which will be + used in the _compute_second_stage_input_feature_maps function. + + Args: + features: A dictionary of tensors. + + Returns: + A dictionary of tensors contains context_features and valid_context_size. + + Raises: + ValueError: If context_features or valid_context_size is not in the + features. + """ + if (fields.InputDataFields.context_features not in features or + fields.InputDataFields.valid_context_size not in features): + raise ValueError( + "Please make sure context_features and valid_context_size are in the " + "features") + + return { + fields.InputDataFields.context_features: + features[fields.InputDataFields.context_features], + fields.InputDataFields.valid_context_size: + features[fields.InputDataFields.valid_context_size] + } + + def _compute_second_stage_input_feature_maps(self, features_to_crop, + proposal_boxes_normalized, + context_features, + valid_context_size): + """Crops to a set of proposals from the feature map for a batch of images. + + This function overrides the one in the FasterRCNNMetaArch. Aside from + cropping and resizing the feature maps, which is done in the parent class, + it adds context attention features to the box features. + + Args: + features_to_crop: A float32 Tensor with shape [batch_size, height, width, + depth] + proposal_boxes_normalized: A float32 Tensor with shape [batch_size, + num_proposals, box_code_size] containing proposal boxes in normalized + coordinates. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + valid_context_size: A int32 Tensor of shape [batch_size]. + + Returns: + A float32 Tensor with shape [K, new_height, new_width, depth]. + """ + box_features = self._crop_and_resize_fn( + features_to_crop, proposal_boxes_normalized, + [self._initial_crop_size, self._initial_crop_size]) + + attention_features = self._context_feature_extract_fn( + box_features=box_features, + context_features=context_features, + valid_context_size=valid_context_size) + + # Adds box features with attention features. + box_features += attention_features + + flattened_feature_maps = self._flatten_first_two_dimensions(box_features) + + return self._maxpool_layer(flattened_feature_maps) diff --git a/models/research/object_detection/meta_architectures/context_rcnn_meta_arch_tf1_test.py b/models/research/object_detection/meta_architectures/context_rcnn_meta_arch_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a5dc8cc8e12f8e2ee95465c651b3570db0cca80f --- /dev/null +++ b/models/research/object_detection/meta_architectures/context_rcnn_meta_arch_tf1_test.py @@ -0,0 +1,540 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.meta_architectures.context_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import unittest +from absl.testing import parameterized +import mock +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from google.protobuf import text_format + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import post_processing_builder +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.meta_architectures import context_rcnn_meta_arch +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.protos import box_predictor_pb2 +from object_detection.protos import hyperparams_pb2 +from object_detection.protos import post_processing_pb2 +from object_detection.utils import ops +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + + +class FakeFasterRCNNFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + reuse_weights=None, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_proposal_features(self, preprocessed_inputs, scope): + with tf.variable_scope('mock_model'): + proposal_features = 0 * slim.conv2d( + preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1') + return proposal_features, {} + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + with tf.variable_scope('mock_model'): + return 0 * slim.conv2d( + proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2') + + +class FakeFasterRCNNKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNKerasFeatureExtractor, self).__init__( + is_training=False, first_stage_features_stride=32, weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def get_proposal_feature_extractor_model(self, name): + + class ProposalFeatureExtractor(tf.keras.Model): + """Dummy proposal feature extraction.""" + + def __init__(self, name): + super(ProposalFeatureExtractor, self).__init__(name=name) + self.conv = None + + def build(self, input_shape): + self.conv = tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name='layer1') + + def call(self, inputs): + return self.conv(inputs) + + return ProposalFeatureExtractor(name=name) + + def get_box_classifier_feature_extractor_model(self, name): + return tf.keras.Sequential([ + tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name=name + '_layer2') + ]) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ContextRCNNMetaArchTest(test_case.TestCase, parameterized.TestCase): + + def _get_model(self, box_predictor, **common_kwargs): + return context_rcnn_meta_arch.ContextRCNNMetaArch( + initial_crop_size=3, + maxpool_kernel_size=1, + maxpool_stride=1, + second_stage_mask_rcnn_box_predictor=box_predictor, + attention_bottleneck_dimension=10, + attention_temperature=0.2, + **common_kwargs) + + def _build_arg_scope_with_hyperparams(self, hyperparams_text_proto, + is_training): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.build(hyperparams, is_training=is_training) + + def _build_keras_layer_hyperparams(self, hyperparams_text_proto): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def _get_second_stage_box_predictor_text_proto(self, + share_box_across_classes=False + ): + share_box_field = 'true' if share_box_across_classes else 'false' + box_predictor_text_proto = """ + mask_rcnn_box_predictor {{ + fc_hyperparams {{ + op: FC + activation: NONE + regularizer {{ + l2_regularizer {{ + weight: 0.0005 + }} + }} + initializer {{ + variance_scaling_initializer {{ + factor: 1.0 + uniform: true + mode: FAN_AVG + }} + }} + }} + share_box_across_classes: {share_box_across_classes} + }} + """.format(share_box_across_classes=share_box_field) + return box_predictor_text_proto + + def _get_box_classifier_features_shape(self, + image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + num_features): + return (batch_size * max_num_proposals, + initial_crop_size/maxpool_stride, + initial_crop_size/maxpool_stride, + num_features) + + def _get_second_stage_box_predictor(self, + num_classes, + is_training, + predict_masks, + masks_are_class_agnostic, + share_box_across_classes=False, + use_keras=False): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge( + self._get_second_stage_box_predictor_text_proto( + share_box_across_classes), box_predictor_proto) + if predict_masks: + text_format.Merge( + self._add_mask_to_second_stage_box_predictor_text_proto( + masks_are_class_agnostic), box_predictor_proto) + + if use_keras: + return box_predictor_builder.build_keras( + hyperparams_builder.KerasLayerHyperparams, + inplace_batchnorm_update=False, + freeze_batchnorm=False, + box_predictor_config=box_predictor_proto, + num_classes=num_classes, + num_predictions_per_location_list=None, + is_training=is_training) + else: + return box_predictor_builder.build( + hyperparams_builder.build, + box_predictor_proto, + num_classes=num_classes, + is_training=is_training) + + def _build_model(self, + is_training, + number_of_stages, + second_stage_batch_size, + first_stage_max_proposals=8, + num_classes=2, + hard_mining=False, + softmax_second_stage_classification_loss=True, + predict_masks=False, + pad_to_max_dimension=None, + masks_are_class_agnostic=False, + use_matmul_crop_and_resize=False, + clip_anchors_to_image=False, + use_matmul_gather_in_matcher=False, + use_static_shapes=False, + calibration_mapping_value=None, + share_box_across_classes=False, + return_raw_detections_during_predict=False): + use_keras = tf_version.is_tf2() + def image_resizer_fn(image, masks=None): + """Fake image resizer function.""" + resized_inputs = [] + resized_image = tf.identity(image) + if pad_to_max_dimension is not None: + resized_image = tf.image.pad_to_bounding_box(image, 0, 0, + pad_to_max_dimension, + pad_to_max_dimension) + resized_inputs.append(resized_image) + if masks is not None: + resized_masks = tf.identity(masks) + if pad_to_max_dimension is not None: + resized_masks = tf.image.pad_to_bounding_box( + tf.transpose(masks, [1, 2, 0]), 0, 0, pad_to_max_dimension, + pad_to_max_dimension) + resized_masks = tf.transpose(resized_masks, [2, 0, 1]) + resized_inputs.append(resized_masks) + resized_inputs.append(tf.shape(image)) + return resized_inputs + + # anchors in this test are designed so that a subset of anchors are inside + # the image and a subset of anchors are outside. + first_stage_anchor_scales = (0.001, 0.005, 0.1) + first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0) + first_stage_anchor_strides = (1, 1) + first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator( + first_stage_anchor_scales, + first_stage_anchor_aspect_ratios, + anchor_stride=first_stage_anchor_strides) + first_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'proposal', + use_matmul_gather=use_matmul_gather_in_matcher) + + if use_keras: + fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor() + else: + fake_feature_extractor = FakeFasterRCNNFeatureExtractor() + + first_stage_box_predictor_hyperparams_text_proto = """ + op: CONV + activation: RELU + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + } + } + """ + if use_keras: + first_stage_box_predictor_arg_scope_fn = ( + self._build_keras_layer_hyperparams( + first_stage_box_predictor_hyperparams_text_proto)) + else: + first_stage_box_predictor_arg_scope_fn = ( + self._build_arg_scope_with_hyperparams( + first_stage_box_predictor_hyperparams_text_proto, is_training)) + + first_stage_box_predictor_kernel_size = 3 + first_stage_atrous_rate = 1 + first_stage_box_predictor_depth = 512 + first_stage_minibatch_size = 3 + first_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=0.5, is_static=use_static_shapes) + + first_stage_nms_score_threshold = -1.0 + first_stage_nms_iou_threshold = 1.0 + first_stage_max_proposals = first_stage_max_proposals + first_stage_non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=first_stage_nms_score_threshold, + iou_thresh=first_stage_nms_iou_threshold, + max_size_per_class=first_stage_max_proposals, + max_total_size=first_stage_max_proposals, + use_static_shapes=use_static_shapes) + + first_stage_localization_loss_weight = 1.0 + first_stage_objectness_loss_weight = 1.0 + + post_processing_config = post_processing_pb2.PostProcessing() + post_processing_text_proto = """ + score_converter: IDENTITY + batch_non_max_suppression { + score_threshold: -20.0 + iou_threshold: 1.0 + max_detections_per_class: 5 + max_total_detections: 5 + use_static_shapes: """ + '{}'.format(use_static_shapes) + """ + } + """ + if calibration_mapping_value: + calibration_text_proto = """ + calibration_config { + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: %f + } + x_y_pair { + x: 1.0 + y: %f + }}}}""" % (calibration_mapping_value, calibration_mapping_value) + post_processing_text_proto = ( + post_processing_text_proto + ' ' + calibration_text_proto) + text_format.Merge(post_processing_text_proto, post_processing_config) + second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = ( + post_processing_builder.build(post_processing_config)) + + second_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'detection', + use_matmul_gather=use_matmul_gather_in_matcher) + second_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=1.0, is_static=use_static_shapes) + + second_stage_localization_loss_weight = 1.0 + second_stage_classification_loss_weight = 1.0 + if softmax_second_stage_classification_loss: + second_stage_classification_loss = ( + losses.WeightedSoftmaxClassificationLoss()) + else: + second_stage_classification_loss = ( + losses.WeightedSigmoidClassificationLoss()) + + hard_example_miner = None + if hard_mining: + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=1, + iou_threshold=0.99, + loss_type='both', + cls_loss_weight=second_stage_classification_loss_weight, + loc_loss_weight=second_stage_localization_loss_weight, + max_negatives_per_positive=None) + + crop_and_resize_fn = ( + ops.matmul_crop_and_resize + if use_matmul_crop_and_resize else ops.native_crop_and_resize) + common_kwargs = { + 'is_training': + is_training, + 'num_classes': + num_classes, + 'image_resizer_fn': + image_resizer_fn, + 'feature_extractor': + fake_feature_extractor, + 'number_of_stages': + number_of_stages, + 'first_stage_anchor_generator': + first_stage_anchor_generator, + 'first_stage_target_assigner': + first_stage_target_assigner, + 'first_stage_atrous_rate': + first_stage_atrous_rate, + 'first_stage_box_predictor_arg_scope_fn': + first_stage_box_predictor_arg_scope_fn, + 'first_stage_box_predictor_kernel_size': + first_stage_box_predictor_kernel_size, + 'first_stage_box_predictor_depth': + first_stage_box_predictor_depth, + 'first_stage_minibatch_size': + first_stage_minibatch_size, + 'first_stage_sampler': + first_stage_sampler, + 'first_stage_non_max_suppression_fn': + first_stage_non_max_suppression_fn, + 'first_stage_max_proposals': + first_stage_max_proposals, + 'first_stage_localization_loss_weight': + first_stage_localization_loss_weight, + 'first_stage_objectness_loss_weight': + first_stage_objectness_loss_weight, + 'second_stage_target_assigner': + second_stage_target_assigner, + 'second_stage_batch_size': + second_stage_batch_size, + 'second_stage_sampler': + second_stage_sampler, + 'second_stage_non_max_suppression_fn': + second_stage_non_max_suppression_fn, + 'second_stage_score_conversion_fn': + second_stage_score_conversion_fn, + 'second_stage_localization_loss_weight': + second_stage_localization_loss_weight, + 'second_stage_classification_loss_weight': + second_stage_classification_loss_weight, + 'second_stage_classification_loss': + second_stage_classification_loss, + 'hard_example_miner': + hard_example_miner, + 'crop_and_resize_fn': + crop_and_resize_fn, + 'clip_anchors_to_image': + clip_anchors_to_image, + 'use_static_shapes': + use_static_shapes, + 'resize_masks': + True, + 'return_raw_detections_during_predict': + return_raw_detections_during_predict + } + + return self._get_model( + self._get_second_stage_box_predictor( + num_classes=num_classes, + is_training=is_training, + use_keras=use_keras, + predict_masks=predict_masks, + masks_are_class_agnostic=masks_are_class_agnostic, + share_box_across_classes=share_box_across_classes), **common_kwargs) + + @mock.patch.object(context_rcnn_meta_arch, 'context_rcnn_lib') + def test_prediction_mock(self, mock_context_rcnn_lib): + """Mocks the context_rcnn_lib module to test the prediction. + + Using mock object so that we can ensure compute_box_context_attention is + called in side the prediction function. + + Args: + mock_context_rcnn_lib: mock module for the context_rcnn_lib. + """ + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + num_classes=42) + mock_tensor = tf.ones([2, 8, 3, 3, 3], tf.float32) + + mock_context_rcnn_lib.compute_box_context_attention.return_value = mock_tensor + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + context_features = tf.random_uniform((2, 20, 10), + minval=0, + maxval=255, + dtype=tf.float32) + valid_context_size = tf.random_uniform((2,), + minval=0, + maxval=10, + dtype=tf.int32) + features = { + fields.InputDataFields.context_features: context_features, + fields.InputDataFields.valid_context_size: valid_context_size + } + + side_inputs = model.get_side_inputs(features) + + _ = model.predict(preprocessed_inputs, true_image_shapes, **side_inputs) + mock_context_rcnn_lib.compute_box_context_attention.assert_called_once() + + @parameterized.named_parameters( + {'testcase_name': 'static_shapes', 'static_shapes': True}, + {'testcase_name': 'nostatic_shapes', 'static_shapes': False}, + ) + def test_prediction_end_to_end(self, static_shapes): + """Runs prediction end to end and test the shape of the results.""" + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + use_matmul_crop_and_resize=static_shapes, + clip_anchors_to_image=static_shapes, + use_matmul_gather_in_matcher=static_shapes, + use_static_shapes=static_shapes, + num_classes=42) + + def graph_fn(): + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + context_features = tf.random_uniform((2, 20, 10), + minval=0, + maxval=255, + dtype=tf.float32) + valid_context_size = tf.random_uniform((2,), + minval=0, + maxval=10, + dtype=tf.int32) + features = { + fields.InputDataFields.context_features: context_features, + fields.InputDataFields.valid_context_size: valid_context_size + } + + side_inputs = model.get_side_inputs(features) + + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes, + **side_inputs) + return (prediction_dict['rpn_box_predictor_features'], + prediction_dict['rpn_box_encodings'], + prediction_dict['refined_box_encodings'], + prediction_dict['proposal_boxes_normalized'], + prediction_dict['proposal_boxes']) + execute_fn = self.execute if static_shapes else self.execute_cpu + (rpn_box_predictor_features, rpn_box_encodings, refined_box_encodings, + proposal_boxes_normalized, proposal_boxes) = execute_fn(graph_fn, [], + graph=g) + self.assertAllEqual(rpn_box_predictor_features.shape, [2, 20, 20, 512]) + self.assertAllEqual(rpn_box_encodings.shape, [2, 3600, 4]) + self.assertAllEqual(refined_box_encodings.shape, [16, 42, 4]) + self.assertAllEqual(proposal_boxes_normalized.shape, [2, 8, 4]) + self.assertAllEqual(proposal_boxes.shape, [2, 8, 4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py b/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..2b6c093a36d0d12a03ce98c673790cf5d65bf34a --- /dev/null +++ b/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py @@ -0,0 +1,2865 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Faster R-CNN meta-architecture definition. + +General tensorflow implementation of Faster R-CNN detection models. + +See Faster R-CNN: Ren, Shaoqing, et al. +"Faster R-CNN: Towards real-time object detection with region proposal +networks." Advances in neural information processing systems. 2015. + +We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage, +all of the user facing methods (e.g., predict, postprocess, loss) can be used as +if the model consisted only of the RPN, returning class agnostic proposals +(these can be thought of as approximate detections with no associated class +information). In case of 2 stages, proposals are computed, then passed +through a second stage "box classifier" to yield (multi-class) detections. +Finally, in case of 3 stages which is only used during eval, proposals are +computed, then passed through a second stage "box classifier" that will compute +refined boxes and classes, and then features are pooled from the refined and +non-maximum suppressed boxes and are passed through the box classifier again. If +number of stages is 3 during training it will be reduced to two automatically. + +Implementations of Faster R-CNN models must define a new +FasterRCNNFeatureExtractor and override three methods: `preprocess`, +`_extract_proposal_features` (the first stage of the model), and +`_extract_box_classifier_features` (the second stage of the model). Optionally, +the `restore_fn` method can be overridden. See tests for an example. + +A few important notes: ++ Batching conventions: We support batched inference and training where +all images within a batch have the same resolution. Batch sizes are determined +dynamically via the shape of the input tensors (rather than being specified +directly as, e.g., a model constructor). + +A complication is that due to non-max suppression, we are not guaranteed to get +the same number of proposals from the first stage RPN (region proposal network) +for each image (though in practice, we should often get the same number of +proposals). For this reason we pad to a max number of proposals per image +within a batch. This `self.max_num_proposals` property is set to the +`first_stage_max_proposals` parameter at inference time and the +`second_stage_batch_size` at training time since we subsample the batch to +be sent through the box classifier during training. + +For the second stage of the pipeline, we arrange the proposals for all images +within the batch along a single batch dimension. For example, the input to +_extract_box_classifier_features is a tensor of shape +`[total_num_proposals, crop_height, crop_width, depth]` where +total_num_proposals is batch_size * self.max_num_proposals. (And note that per +the above comment, a subset of these entries correspond to zero paddings.) + ++ Coordinate representations: +Following the API (see model.DetectionModel definition), our outputs after +postprocessing operations are always normalized boxes however, internally, we +sometimes convert to absolute --- e.g. for loss computation. In particular, +anchors and proposal_boxes are both represented as absolute coordinates. + +Images are resized in the `preprocess` method. + +The Faster R-CNN meta architecture has two post-processing methods +`_postprocess_rpn` which is applied after first stage and +`_postprocess_box_classifier` which is applied after second stage. There are +three different ways post-processing can happen depending on number_of_stages +configured in the meta architecture: + +1. When number_of_stages is 1: + `_postprocess_rpn` is run as part of the `postprocess` method where + true_image_shapes is used to clip proposals, perform non-max suppression and + normalize them. +2. When number of stages is 2: + `_postprocess_rpn` is run as part of the `_predict_second_stage` method where + `resized_image_shapes` is used to clip proposals, perform non-max suppression + and normalize them. In this case `postprocess` method skips `_postprocess_rpn` + and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip + detections, perform non-max suppression and normalize them. +3. When number of stages is 3: + `_postprocess_rpn` is run as part of the `_predict_second_stage` using + `resized_image_shapes` to clip proposals, perform non-max suppression and + normalize them. Subsequently, `_postprocess_box_classifier` is run as part of + `_predict_third_stage` using `true_image_shapes` to clip detections, peform + non-max suppression and normalize them. In this case, the `postprocess` method + skips both `_postprocess_rpn` and `_postprocess_box_classifier`. +""" + +from __future__ import print_function +import abc +import functools +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import box_predictor +from object_detection.core import losses +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import variables_helper + + +_UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__' + + +class FasterRCNNFeatureExtractor(object): + """Faster R-CNN Feature Extractor definition.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + first_stage_features_stride: Output stride of extracted RPN feature map. + batch_norm_trainable: Whether to update batch norm parameters during + training or not. When training with a relative large batch size + (e.g. 8), it could be desirable to enable batch norm update. + reuse_weights: Whether to reuse variables. Default is None. + weight_decay: float weight decay for feature extractor (default: 0.0). + """ + self._is_training = is_training + self._first_stage_features_stride = first_stage_features_stride + self._train_batch_norm = (batch_norm_trainable and is_training) + self._reuse_weights = tf.AUTO_REUSE if reuse_weights else None + self._weight_decay = weight_decay + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Feature-extractor specific preprocessing (minus image resizing).""" + pass + + def extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + This function is responsible for extracting feature maps from preprocessed + images. These features are used by the region proposal network (RPN) to + predict proposals. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping activation tensor names to tensors. + """ + with tf.variable_scope(scope, values=[preprocessed_inputs]): + return self._extract_proposal_features(preprocessed_inputs, scope) + + @abc.abstractmethod + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features, to be overridden.""" + pass + + def extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.variable_scope( + scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE): + return self._extract_box_classifier_features(proposal_feature_maps, scope) + + @abc.abstractmethod + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features, to be overridden.""" + pass + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + for scope_name in [first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope]: + if variable.op.name.startswith(scope_name): + var_name = variable.op.name.replace(scope_name + '/', '') + variables_to_restore[var_name] = variable + return variables_to_restore + + +class FasterRCNNKerasFeatureExtractor(object): + """Keras-based Faster R-CNN Feature Extractor definition.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + first_stage_features_stride: Output stride of extracted RPN feature map. + batch_norm_trainable: Whether to update batch norm parameters during + training or not. When training with a relative large batch size + (e.g. 8), it could be desirable to enable batch norm update. + weight_decay: float weight decay for feature extractor (default: 0.0). + """ + self._is_training = is_training + self._first_stage_features_stride = first_stage_features_stride + self._train_batch_norm = (batch_norm_trainable and is_training) + self._weight_decay = weight_decay + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Feature-extractor specific preprocessing (minus image resizing).""" + pass + + @abc.abstractmethod + def get_proposal_feature_extractor_model(self, name): + """Get model that extracts first stage RPN features, to be overridden.""" + pass + + @abc.abstractmethod + def get_box_classifier_feature_extractor_model(self, name): + """Get model that extracts second stage box classifier features.""" + pass + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + for scope_name in [first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope]: + if variable.op.name.startswith(scope_name): + var_name = variable.op.name.replace(scope_name + '/', '') + variables_to_restore[var_name] = variable + return variables_to_restore + + +class FasterRCNNMetaArch(model.DetectionModel): + """Faster R-CNN Meta-architecture definition.""" + + def __init__(self, + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + initial_crop_size, + maxpool_kernel_size, + maxpool_stride, + second_stage_target_assigner, + second_stage_mask_rcnn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + second_stage_mask_prediction_loss_weight=1.0, + hard_example_miner=None, + parallel_iterations=16, + add_summaries=True, + clip_anchors_to_image=False, + use_static_shapes=False, + resize_masks=True, + freeze_batchnorm=False, + return_raw_detections_during_predict=False, + output_final_box_features=False): + """FasterRCNNMetaArch Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + num_classes: Number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + image_resizer_fn: A callable for image resizing. This callable + takes a rank-3 image tensor of shape [height, width, channels] + (corresponding to a single image), an optional rank-3 instance mask + tensor of shape [num_masks, height, width] and returns a resized rank-3 + image tensor, a resized mask tensor if one was provided in the input. In + addition this callable must also return a 1-D tensor of the form + [height, width, channels] containing the size of the true image, as the + image resizer can perform zero padding. See protos/image_resizer.proto. + feature_extractor: A FasterRCNNFeatureExtractor object. + number_of_stages: An integer values taking values in {1, 2, 3}. If + 1, the function will construct only the Region Proposal Network (RPN) + part of the model. If 2, the function will perform box refinement and + other auxiliary predictions all in the second stage. If 3, it will + extract features from refined boxes and perform the auxiliary + predictions on the non-maximum suppressed refined boxes. + If is_training is true and the value of number_of_stages is 3, it is + reduced to 2 since all the model heads are trained in parallel in second + stage during training. + first_stage_anchor_generator: An anchor_generator.AnchorGenerator object + (note that currently we only support + grid_anchor_generator.GridAnchorGenerator objects) + first_stage_target_assigner: Target assigner to use for first stage of + Faster R-CNN (RPN). + first_stage_atrous_rate: A single integer indicating the atrous rate for + the single convolution op which is applied to the `rpn_features_to_crop` + tensor to obtain a tensor to be used for box prediction. Some feature + extractors optionally allow for producing feature maps computed at + denser resolutions. The atrous rate is used to compensate for the + denser feature maps by using an effectively larger receptive field. + (This should typically be set to 1). + first_stage_box_predictor_arg_scope_fn: Either a + Keras layer hyperparams object or a function to construct tf-slim + arg_scope for conv2d, separable_conv2d and fully_connected ops. Used + for the RPN box predictor. If it is a keras hyperparams object the + RPN box predictor will be a Keras model. If it is a function to + construct an arg scope it will be a tf-slim box predictor. + first_stage_box_predictor_kernel_size: Kernel size to use for the + convolution op just prior to RPN box predictions. + first_stage_box_predictor_depth: Output depth for the convolution op + just prior to RPN box predictions. + first_stage_minibatch_size: The "batch size" to use for computing the + objectness and location loss of the region proposal network. This + "batch size" refers to the number of anchors selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + first_stage_sampler: Sampler to use for first stage loss (RPN loss). + first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window`(with + all other inputs already set) and returns a dictionary containing + tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes`, `num_detections`. This is used to perform non max + suppression on the boxes predicted by the Region Proposal Network + (RPN). + See `post_processing.batch_multiclass_non_max_suppression` for the type + and shape of these tensors. + first_stage_max_proposals: Maximum number of boxes to retain after + performing Non-Max Suppression (NMS) on the boxes predicted by the + Region Proposal Network (RPN). + first_stage_localization_loss_weight: A float + first_stage_objectness_loss_weight: A float + crop_and_resize_fn: A differentiable resampler to use for cropping RPN + proposal features. + initial_crop_size: A single integer indicating the output size + (width and height are set to be the same) of the initial bilinear + interpolation based cropping during ROI pooling. + maxpool_kernel_size: A single integer indicating the kernel size of the + max pool op on the cropped feature map during ROI pooling. + maxpool_stride: A single integer indicating the stride of the max pool + op on the cropped feature map during ROI pooling. + second_stage_target_assigner: Target assigner to use for second stage of + Faster R-CNN. If the model is configured with multiple prediction heads, + this target assigner is used to generate targets for all heads (with the + correct `unmatched_class_label`). + second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for + the second stage. + second_stage_batch_size: The batch size used for computing the + classification and refined location loss of the box classifier. This + "batch size" refers to the number of proposals selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + second_stage_sampler: Sampler to use for second stage loss (box + classifier loss). + second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores`, optional `clip_window` and + optional (kwarg) `mask` inputs (with all other inputs already set) + and returns a dictionary containing tensors with keys: + `detection_boxes`, `detection_scores`, `detection_classes`, + `num_detections`, and (optionally) `detection_masks`. See + `post_processing.batch_multiclass_non_max_suppression` for the type and + shape of these tensors. + second_stage_score_conversion_fn: Callable elementwise nonlinearity + (that takes tensors as inputs and returns tensors). This is usually + used to convert logits to probabilities. + second_stage_localization_loss_weight: A float indicating the scale factor + for second stage localization loss. + second_stage_classification_loss_weight: A float indicating the scale + factor for second stage classification loss. + second_stage_classification_loss: Classification loss used by the second + stage classifier. Either losses.WeightedSigmoidClassificationLoss or + losses.WeightedSoftmaxClassificationLoss. + second_stage_mask_prediction_loss_weight: A float indicating the scale + factor for second stage mask prediction loss. This is applicable only if + second stage box predictor is configured to predict masks. + hard_example_miner: A losses.HardExampleMiner object (can be None). + parallel_iterations: (Optional) The number of iterations allowed to run + in parallel for calls to tf.map_fn. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + clip_anchors_to_image: Normally, anchors generated for a given image size + are pruned during training if they lie outside the image window. This + option clips the anchors to be within the image instead of pruning. + use_static_shapes: If True, uses implementation of ops with static shape + guarantees. + resize_masks: Indicates whether the masks presend in the groundtruth + should be resized in the model with `image_resizer_fn` + freeze_batchnorm: Whether to freeze batch norm parameters in the first + stage box predictor during training or not. When training with a small + batch size (e.g. 1), it is desirable to freeze batch norm update and + use pretrained batch norm params. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + output_final_box_features: Whether to output final box features. If true, + it crops the feauture map based on the final box prediction and returns + in the dict as detection_features. + + Raises: + ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at + training time. + ValueError: If first_stage_anchor_generator is not of type + grid_anchor_generator.GridAnchorGenerator. + """ + # TODO(rathodv): add_summaries is currently unused. Respect that directive + # in the future. + super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes) + + if not isinstance(first_stage_anchor_generator, + grid_anchor_generator.GridAnchorGenerator): + raise ValueError('first_stage_anchor_generator must be of type ' + 'grid_anchor_generator.GridAnchorGenerator.') + + self._is_training = is_training + self._image_resizer_fn = image_resizer_fn + self._resize_masks = resize_masks + self._feature_extractor = feature_extractor + if isinstance(feature_extractor, FasterRCNNKerasFeatureExtractor): + # We delay building the feature extractor until it is used, + # to avoid creating the variables when a model is built just for data + # preprocessing. (This prevents a subtle bug where variable names are + # mismatched across workers, causing only one worker to be able to train) + self._feature_extractor_for_proposal_features = ( + _UNINITIALIZED_FEATURE_EXTRACTOR) + self._feature_extractor_for_box_classifier_features = ( + _UNINITIALIZED_FEATURE_EXTRACTOR) + else: + self._feature_extractor_for_proposal_features = None + self._feature_extractor_for_box_classifier_features = None + + self._number_of_stages = number_of_stages + + self._proposal_target_assigner = first_stage_target_assigner + self._detector_target_assigner = second_stage_target_assigner + # Both proposal and detector target assigners use the same box coder + self._box_coder = self._proposal_target_assigner.box_coder + + # (First stage) Region proposal network parameters + self._first_stage_anchor_generator = first_stage_anchor_generator + self._first_stage_atrous_rate = first_stage_atrous_rate + self._first_stage_box_predictor_depth = first_stage_box_predictor_depth + self._first_stage_box_predictor_kernel_size = ( + first_stage_box_predictor_kernel_size) + self._first_stage_minibatch_size = first_stage_minibatch_size + self._first_stage_sampler = first_stage_sampler + if isinstance(first_stage_box_predictor_arg_scope_fn, + hyperparams_builder.KerasLayerHyperparams): + num_anchors_per_location = ( + self._first_stage_anchor_generator.num_anchors_per_location()) + if len(num_anchors_per_location) != 1: + raise ValueError('anchor_generator is expected to generate anchors ' + 'corresponding to a single feature map.') + conv_hyperparams = ( + first_stage_box_predictor_arg_scope_fn) + self._first_stage_box_predictor_first_conv = ( + tf.keras.Sequential([ + tf.keras.layers.Conv2D( + self._first_stage_box_predictor_depth, + kernel_size=[self._first_stage_box_predictor_kernel_size, + self._first_stage_box_predictor_kernel_size], + dilation_rate=self._first_stage_atrous_rate, + padding='SAME', + name='RPNConv', + **conv_hyperparams.params()), + conv_hyperparams.build_batch_norm( + (self._is_training and not freeze_batchnorm), + name='RPNBatchNorm'), + tf.keras.layers.Lambda( + tf.nn.relu6, + name='RPNActivation') + ], name='FirstStageRPNFeatures')) + self._first_stage_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=self._is_training, + num_classes=1, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=False, + num_predictions_per_location_list=num_anchors_per_location, + use_dropout=False, + dropout_keep_prob=1.0, + box_code_size=self._box_coder.code_size, + kernel_size=1, + num_layers_before_predictor=0, + min_depth=0, + max_depth=0, + name=self.first_stage_box_predictor_scope)) + else: + self._first_stage_box_predictor_arg_scope_fn = ( + first_stage_box_predictor_arg_scope_fn) + def rpn_box_predictor_feature_extractor(rpn_features_to_crop): + with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()): + reuse = tf.get_variable_scope().reuse + return slim.conv2d( + rpn_features_to_crop, + self._first_stage_box_predictor_depth, + kernel_size=[ + self._first_stage_box_predictor_kernel_size, + self._first_stage_box_predictor_kernel_size + ], + rate=self._first_stage_atrous_rate, + activation_fn=tf.nn.relu6, + scope='Conv', + reuse=reuse) + self._first_stage_box_predictor_first_conv = ( + rpn_box_predictor_feature_extractor) + self._first_stage_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=self._is_training, + num_classes=1, + conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn, + use_dropout=False, + dropout_keep_prob=1.0, + box_code_size=self._box_coder.code_size, + kernel_size=1, + num_layers_before_predictor=0, + min_depth=0, + max_depth=0)) + + self._first_stage_nms_fn = first_stage_non_max_suppression_fn + self._first_stage_max_proposals = first_stage_max_proposals + self._use_static_shapes = use_static_shapes + + self._first_stage_localization_loss = ( + losses.WeightedSmoothL1LocalizationLoss()) + self._first_stage_objectness_loss = ( + losses.WeightedSoftmaxClassificationLoss()) + self._first_stage_loc_loss_weight = first_stage_localization_loss_weight + self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight + + # Per-region cropping parameters + self._crop_and_resize_fn = crop_and_resize_fn + self._initial_crop_size = initial_crop_size + self._maxpool_kernel_size = maxpool_kernel_size + self._maxpool_stride = maxpool_stride + # If max pooling is to be used, build the layer + if maxpool_kernel_size: + self._maxpool_layer = tf.keras.layers.MaxPooling2D( + [self._maxpool_kernel_size, self._maxpool_kernel_size], + strides=self._maxpool_stride, + name='MaxPool2D') + + self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor + + self._second_stage_batch_size = second_stage_batch_size + self._second_stage_sampler = second_stage_sampler + + self._second_stage_nms_fn = second_stage_non_max_suppression_fn + self._second_stage_score_conversion_fn = second_stage_score_conversion_fn + + self._second_stage_localization_loss = ( + losses.WeightedSmoothL1LocalizationLoss()) + self._second_stage_classification_loss = second_stage_classification_loss + self._second_stage_mask_loss = ( + losses.WeightedSigmoidClassificationLoss()) + self._second_stage_loc_loss_weight = second_stage_localization_loss_weight + self._second_stage_cls_loss_weight = second_stage_classification_loss_weight + self._second_stage_mask_loss_weight = ( + second_stage_mask_prediction_loss_weight) + self._hard_example_miner = hard_example_miner + self._parallel_iterations = parallel_iterations + + self.clip_anchors_to_image = clip_anchors_to_image + + if self._number_of_stages <= 0 or self._number_of_stages > 3: + raise ValueError('Number of stages should be a value in {1, 2, 3}.') + self._batched_prediction_tensor_names = [] + self._return_raw_detections_during_predict = ( + return_raw_detections_during_predict) + self._output_final_box_features = output_final_box_features + + @property + def first_stage_feature_extractor_scope(self): + return 'FirstStageFeatureExtractor' + + @property + def second_stage_feature_extractor_scope(self): + return 'SecondStageFeatureExtractor' + + @property + def first_stage_box_predictor_scope(self): + return 'FirstStageBoxPredictor' + + @property + def second_stage_box_predictor_scope(self): + return 'SecondStageBoxPredictor' + + @property + def max_num_proposals(self): + """Max number of proposals (to pad to) for each image in the input batch. + + At training time, this is set to be the `second_stage_batch_size` if hard + example miner is not configured, else it is set to + `first_stage_max_proposals`. At inference time, this is always set to + `first_stage_max_proposals`. + + Returns: + A positive integer. + """ + if self._is_training and not self._hard_example_miner: + return self._second_stage_batch_size + return self._first_stage_max_proposals + + @property + def anchors(self): + if not self._anchors: + raise RuntimeError('anchors have not been constructed yet!') + if not isinstance(self._anchors, box_list.BoxList): + raise RuntimeError('anchors should be a BoxList object, but is not.') + return self._anchors + + @property + def batched_prediction_tensor_names(self): + if not self._batched_prediction_tensor_names: + raise RuntimeError('Must call predict() method to get batched prediction ' + 'tensor names.') + return self._batched_prediction_tensor_names + + @property + def feature_extractor(self): + return self._feature_extractor + + def preprocess(self, inputs): + """Feature-extractor specific preprocessing. + + See base class. + + For Faster R-CNN, we perform image resizing in the base class --- each + class subclassing FasterRCNNMetaArch is responsible for any additional + preprocessing (e.g., scaling pixel values to be in [-1, 1]). + + Args: + inputs: a [batch, height_in, width_in, channels] float tensor representing + a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, height_out, width_out, channels] float + tensor representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + Raises: + ValueError: if inputs tensor does not have type tf.float32 + """ + + with tf.name_scope('Preprocessor'): + (resized_inputs, + true_image_shapes) = shape_utils.resize_images_and_return_shapes( + inputs, self._image_resizer_fn) + + return (self._feature_extractor.preprocess(resized_inputs), + true_image_shapes) + + def _compute_clip_window(self, image_shapes): + """Computes clip window for non max suppression based on image shapes. + + This function assumes that the clip window's left top corner is at (0, 0). + + Args: + image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing + shapes of images in the batch. Each row represents [height, width, + channels] of an image. + + Returns: + A 2-D float32 tensor of shape [batch_size, 4] containing the clip window + for each image in the form [ymin, xmin, ymax, xmax]. + """ + clip_heights = image_shapes[:, 0] + clip_widths = image_shapes[:, 1] + clip_window = tf.cast( + tf.stack([ + tf.zeros_like(clip_heights), + tf.zeros_like(clip_heights), clip_heights, clip_widths + ], + axis=1), + dtype=tf.float32) + return clip_window + + def _proposal_postprocess(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, anchors, + image_shape, true_image_shapes): + """Wraps over FasterRCNNMetaArch._postprocess_rpn().""" + image_shape_2d = self._image_batch_shape_2d(image_shape) + proposal_boxes_normalized, _, _, num_proposals, _, _ = \ + self._postprocess_rpn( + rpn_box_encodings, rpn_objectness_predictions_with_background, + anchors, image_shape_2d, true_image_shapes) + return proposal_boxes_normalized, num_proposals + + def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): + """Predicts unpostprocessed tensors from input tensor. + + This function takes an input batch of images and runs it through the + forward pass of the network to yield "raw" un-postprocessed predictions. + If `number_of_stages` is 1, this function only returns first stage + RPN predictions (un-postprocessed). Otherwise it returns both + first stage RPN predictions as well as second stage box classifier + predictions. + + Other remarks: + + Anchor pruning vs. clipping: following the recommendation of the Faster + R-CNN paper, we prune anchors that venture outside the image window at + training time and clip anchors to the image window at inference time. + + Proposal padding: as described at the top of the file, proposals are + padded to self._max_num_proposals and flattened so that proposals from all + images within the input batch are arranged along the same batch dimension. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) rpn_box_predictor_features: A 4-D float32 tensor with shape + [batch_size, height, width, depth] to be used for predicting proposal + boxes and corresponding objectness scores. + 2) rpn_features_to_crop: A 4-D float32 tensor with shape + [batch_size, height, width, depth] representing image features to crop + using the proposal boxes predicted by the RPN. + 3) image_shape: a 1-D tensor of shape [4] representing the input + image shape. + 4) rpn_box_encodings: 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN (in absolute coordinates). Note that + `num_anchors` can differ depending on whether the model is created in + training or inference mode. + 7) feature_maps: A single element list containing a 4-D float32 tensor + with shape batch_size, height, width, depth] representing the RPN + features to crop. + + (and if number_of_stages > 1): + 8) refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using + a shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 9) class_predictions_with_background: a 3-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 10) num_proposals: An int32 tensor of shape [batch_size] representing + the number of proposals generated by the RPN. `num_proposals` allows + us to keep track of which entries are to be treated as zero paddings + and which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 11) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 12) mask_predictions: (optional) a 4-D tensor with shape + [total_num_padded_proposals, num_classes, mask_height, mask_width] + containing instance mask predictions. + 13) raw_detection_boxes: (optional) a + [batch_size, self.max_num_proposals, num_classes, 4] float32 tensor + with detections prior to NMS in normalized coordinates. + 14) raw_detection_feature_map_indices: (optional) a + [batch_size, self.max_num_proposals, num_classes] int32 tensor with + indices indicating which feature map each raw detection box was + produced from. The indices correspond to the elements in the + 'feature_maps' field. + + Raises: + ValueError: If `predict` is called before `preprocess`. + """ + prediction_dict = self._predict_first_stage(preprocessed_inputs) + + if self._number_of_stages >= 2: + prediction_dict.update( + self._predict_second_stage( + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['rpn_features_to_crop'], + prediction_dict['anchors'], prediction_dict['image_shape'], + true_image_shapes, **side_inputs)) + + if self._number_of_stages == 3: + prediction_dict = self._predict_third_stage(prediction_dict, + true_image_shapes) + + self._batched_prediction_tensor_names = [ + x for x in prediction_dict if x not in ('image_shape', 'anchors') + ] + return prediction_dict + + def _predict_first_stage(self, preprocessed_inputs): + """First stage of prediction. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) rpn_box_predictor_features: A 4-D float32/bfloat16 tensor with shape + [batch_size, height, width, depth] to be used for predicting proposal + boxes and corresponding objectness scores. + 2) rpn_features_to_crop: A 4-D float32/bfloat16 tensor with shape + [batch_size, height, width, depth] representing image features to crop + using the proposal boxes predicted by the RPN. + 3) image_shape: a 1-D tensor of shape [4] representing the input + image shape. + 4) rpn_box_encodings: 3-D float32 tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + 5) rpn_objectness_predictions_with_background: 3-D float32 tensor of + shape [batch_size, num_anchors, 2] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions (at class index 0). + 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN (in absolute coordinates). Note that + `num_anchors` can differ depending on whether the model is created in + training or inference mode. + 7) feature_maps: A single element list containing a 4-D float32 tensor + with shape batch_size, height, width, depth] representing the RPN + features to crop. + """ + (rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist, + image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs) + (rpn_box_encodings, rpn_objectness_predictions_with_background + ) = self._predict_rpn_proposals(rpn_box_predictor_features) + + # The Faster R-CNN paper recommends pruning anchors that venture outside + # the image window at training time and clipping at inference time. + clip_window = tf.cast(tf.stack([0, 0, image_shape[1], image_shape[2]]), + dtype=tf.float32) + if self._is_training: + if self.clip_anchors_to_image: + anchors_boxlist = box_list_ops.clip_to_window( + anchors_boxlist, clip_window, filter_nonoverlapping=False) + else: + (rpn_box_encodings, rpn_objectness_predictions_with_background, + anchors_boxlist) = self._remove_invalid_anchors_and_predictions( + rpn_box_encodings, rpn_objectness_predictions_with_background, + anchors_boxlist, clip_window) + else: + anchors_boxlist = box_list_ops.clip_to_window( + anchors_boxlist, clip_window, + filter_nonoverlapping=not self._use_static_shapes) + + self._anchors = anchors_boxlist + prediction_dict = { + 'rpn_box_predictor_features': + rpn_box_predictor_features, + 'rpn_features_to_crop': + rpn_features_to_crop, + 'image_shape': + image_shape, + 'rpn_box_encodings': + tf.cast(rpn_box_encodings, dtype=tf.float32), + 'rpn_objectness_predictions_with_background': + tf.cast(rpn_objectness_predictions_with_background, + dtype=tf.float32), + 'anchors': + anchors_boxlist.data['boxes'], + fields.PredictionFields.feature_maps: [rpn_features_to_crop] + } + return prediction_dict + + def _image_batch_shape_2d(self, image_batch_shape_1d): + """Takes a 1-D image batch shape tensor and converts it to a 2-D tensor. + + Example: + If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D + image batch tensor would be [[300, 300, 3], [300, 300, 3]] + + Args: + image_batch_shape_1d: 1-D tensor of the form [batch_size, height, + width, channels]. + + Returns: + image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is + of the form [height, width, channels]. + """ + return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0), + [image_batch_shape_1d[0], 1]) + + def _predict_second_stage(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, anchors, image_shape, + true_image_shapes, **side_inputs): + """Predicts the output tensors from second stage of Faster R-CNN. + + Args: + rpn_box_encodings: 3-D float tensor of shape + [batch_size, num_valid_anchors, self._box_coder.code_size] containing + predicted boxes. + rpn_objectness_predictions_with_background: 2-D float tensor of shape + [batch_size, num_valid_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + rpn_features_to_crop: A 4-D float32 or bfloat16 tensor with shape + [batch_size, height, width, depth] representing image features to crop + using the proposal boxes predicted by the RPN. + anchors: 2-D float tensor of shape + [num_anchors, self._box_coder.code_size]. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D float32 tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using a + shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 2) class_predictions_with_background: a 3-D float32 tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) num_proposals: An int32 tensor of shape [batch_size] representing the + number of proposals generated by the RPN. `num_proposals` allows us + to keep track of which entries are to be treated as zero paddings and + which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 4) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 5) proposal_boxes_normalized: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes in normalized coordinates. Can be used to override the + boxes proposed by the RPN, thus enabling one to extract features and + get box classification and prediction for externally selected areas + of the image. + 6) box_classifier_features: a 4-D float32/bfloat16 tensor + representing the features for each proposal. + If self._return_raw_detections_during_predict is True, the dictionary + will also contain: + 7) raw_detection_boxes: a 4-D float32 tensor with shape + [batch_size, self.max_num_proposals, num_classes, 4] in normalized + coordinates. + 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape + [batch_size, self.max_num_proposals, num_classes]. + """ + proposal_boxes_normalized, num_proposals = self._proposal_postprocess( + rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, + image_shape, true_image_shapes) + prediction_dict = self._box_prediction(rpn_features_to_crop, + proposal_boxes_normalized, + image_shape, true_image_shapes, + **side_inputs) + prediction_dict['num_proposals'] = num_proposals + return prediction_dict + + def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized, + image_shape, true_image_shapes, **side_inputs): + """Predicts the output tensors from second stage of Faster R-CNN. + + Args: + rpn_features_to_crop: A 4-D float32 or bfloat16 tensor with shape + [batch_size, height, width, depth] representing image features to crop + using the proposal boxes predicted by the RPN. + proposal_boxes_normalized: A float tensor with shape [batch_size, + max_num_proposals, 4] representing the (potentially zero padded) + proposal boxes for all images in the batch. These boxes are represented + as normalized coordinates. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D float32 tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using a + shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 2) class_predictions_with_background: a 3-D float32 tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 4) proposal_boxes_normalized: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes in normalized coordinates. Can be used to override the + boxes proposed by the RPN, thus enabling one to extract features and + get box classification and prediction for externally selected areas + of the image. + 5) box_classifier_features: a 4-D float32/bfloat16 tensor + representing the features for each proposal. + If self._return_raw_detections_during_predict is True, the dictionary + will also contain: + 6) raw_detection_boxes: a 4-D float32 tensor with shape + [batch_size, self.max_num_proposals, num_classes, 4] in normalized + coordinates. + 7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape + [batch_size, self.max_num_proposals, num_classes]. + 8) final_anchors: a 3-D float tensor of shape [batch_size, + self.max_num_proposals, 4] containing the reference anchors for raw + detection boxes in normalized coordinates. + """ + flattened_proposal_feature_maps = ( + self._compute_second_stage_input_feature_maps( + rpn_features_to_crop, proposal_boxes_normalized, **side_inputs)) + + box_classifier_features = self._extract_box_classifier_features( + flattened_proposal_feature_maps) + + if self._mask_rcnn_box_predictor.is_keras_model: + box_predictions = self._mask_rcnn_box_predictor( + [box_classifier_features], + prediction_stage=2) + else: + box_predictions = self._mask_rcnn_box_predictor.predict( + [box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + prediction_stage=2) + + refined_box_encodings = tf.squeeze( + box_predictions[box_predictor.BOX_ENCODINGS], + axis=1, name='all_refined_box_encodings') + class_predictions_with_background = tf.squeeze( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1, name='all_class_predictions_with_background') + + absolute_proposal_boxes = ops.normalized_to_image_coordinates( + proposal_boxes_normalized, image_shape, self._parallel_iterations) + + prediction_dict = { + 'refined_box_encodings': tf.cast(refined_box_encodings, + dtype=tf.float32), + 'class_predictions_with_background': + tf.cast(class_predictions_with_background, dtype=tf.float32), + 'proposal_boxes': absolute_proposal_boxes, + 'box_classifier_features': box_classifier_features, + 'proposal_boxes_normalized': proposal_boxes_normalized, + 'final_anchors': proposal_boxes_normalized + } + + if self._return_raw_detections_during_predict: + prediction_dict.update(self._raw_detections_and_feature_map_inds( + refined_box_encodings, absolute_proposal_boxes, true_image_shapes)) + + return prediction_dict + + def _raw_detections_and_feature_map_inds( + self, refined_box_encodings, absolute_proposal_boxes, true_image_shapes): + """Returns raw detections and feat map inds from where they originated. + + Args: + refined_box_encodings: [total_num_proposals, num_classes, + self._box_coder.code_size] float32 tensor. + absolute_proposal_boxes: [batch_size, self.max_num_proposals, 4] float32 + tensor representing decoded proposal bounding boxes in absolute + coordinates. + true_image_shapes: [batch, 3] int32 tensor where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + A dictionary with raw detection boxes, and the feature map indices from + which they originated. + """ + box_encodings_batch = tf.reshape( + refined_box_encodings, + [-1, self.max_num_proposals, refined_box_encodings.shape[1], + self._box_coder.code_size]) + raw_detection_boxes_absolute = self._batch_decode_boxes( + box_encodings_batch, absolute_proposal_boxes) + + raw_detection_boxes_normalized = shape_utils.static_or_dynamic_map_fn( + self._normalize_and_clip_boxes, + elems=[raw_detection_boxes_absolute, true_image_shapes], + dtype=tf.float32) + detection_feature_map_indices = tf.zeros_like( + raw_detection_boxes_normalized[:, :, :, 0], dtype=tf.int32) + return { + fields.PredictionFields.raw_detection_boxes: + raw_detection_boxes_normalized, + fields.PredictionFields.raw_detection_feature_map_indices: + detection_feature_map_indices + } + + def _extract_box_classifier_features(self, flattened_feature_maps): + if self._feature_extractor_for_box_classifier_features == ( + _UNINITIALIZED_FEATURE_EXTRACTOR): + self._feature_extractor_for_box_classifier_features = ( + self._feature_extractor.get_box_classifier_feature_extractor_model( + name=self.second_stage_feature_extractor_scope)) + + if self._feature_extractor_for_box_classifier_features: + box_classifier_features = ( + self._feature_extractor_for_box_classifier_features( + flattened_feature_maps)) + else: + box_classifier_features = ( + self._feature_extractor.extract_box_classifier_features( + flattened_feature_maps, + scope=self.second_stage_feature_extractor_scope)) + return box_classifier_features + + def _predict_third_stage(self, prediction_dict, image_shapes): + """Predicts non-box, non-class outputs using refined detections. + + For training, masks as predicted directly on the box_classifier_features, + which are region-features from the initial anchor boxes. + For inference, this happens after calling the post-processing stage, such + that masks are only calculated for the top scored boxes. + + Args: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using a + shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 2) class_predictions_with_background: a 3-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) num_proposals: An int32 tensor of shape [batch_size] representing the + number of proposals generated by the RPN. `num_proposals` allows us + to keep track of which entries are to be treated as zero paddings and + which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 4) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 5) box_classifier_features: a 4-D float32 tensor representing the + features for each proposal. + image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing + shapes of images in the batch. + + Returns: + prediction_dict: a dictionary that in addition to the input predictions + does hold the following predictions as well: + 1) mask_predictions: a 4-D tensor with shape + [batch_size, max_detection, mask_height, mask_width] containing + instance mask predictions. + """ + if self._is_training: + curr_box_classifier_features = prediction_dict['box_classifier_features'] + detection_classes = prediction_dict['class_predictions_with_background'] + if self._mask_rcnn_box_predictor.is_keras_model: + mask_predictions = self._mask_rcnn_box_predictor( + [curr_box_classifier_features], + prediction_stage=3) + else: + mask_predictions = self._mask_rcnn_box_predictor.predict( + [curr_box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + prediction_stage=3) + prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[ + box_predictor.MASK_PREDICTIONS], axis=1) + else: + detections_dict = self._postprocess_box_classifier( + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['proposal_boxes'], + prediction_dict['num_proposals'], + image_shapes) + prediction_dict.update(detections_dict) + detection_boxes = detections_dict[ + fields.DetectionResultFields.detection_boxes] + detection_classes = detections_dict[ + fields.DetectionResultFields.detection_classes] + rpn_features_to_crop = prediction_dict['rpn_features_to_crop'] + batch_size = tf.shape(detection_boxes)[0] + max_detection = tf.shape(detection_boxes)[1] + flattened_detected_feature_maps = ( + self._compute_second_stage_input_feature_maps( + rpn_features_to_crop, detection_boxes)) + curr_box_classifier_features = self._extract_box_classifier_features( + flattened_detected_feature_maps) + + if self._mask_rcnn_box_predictor.is_keras_model: + mask_predictions = self._mask_rcnn_box_predictor( + [curr_box_classifier_features], + prediction_stage=3) + else: + mask_predictions = self._mask_rcnn_box_predictor.predict( + [curr_box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + prediction_stage=3) + + detection_masks = tf.squeeze(mask_predictions[ + box_predictor.MASK_PREDICTIONS], axis=1) + + _, num_classes, mask_height, mask_width = ( + detection_masks.get_shape().as_list()) + _, max_detection = detection_classes.get_shape().as_list() + prediction_dict['mask_predictions'] = tf.reshape( + detection_masks, [-1, num_classes, mask_height, mask_width]) + if num_classes > 1: + detection_masks = self._gather_instance_masks( + detection_masks, detection_classes) + + detection_masks = tf.cast(detection_masks, tf.float32) + prediction_dict[fields.DetectionResultFields.detection_masks] = ( + tf.reshape(tf.sigmoid(detection_masks), + [batch_size, max_detection, mask_height, mask_width])) + + return prediction_dict + + def _gather_instance_masks(self, instance_masks, classes): + """Gathers the masks that correspond to classes. + + Args: + instance_masks: A 4-D float32 tensor with shape + [K, num_classes, mask_height, mask_width]. + classes: A 2-D int32 tensor with shape [batch_size, max_detection]. + + Returns: + masks: a 3-D float32 tensor with shape [K, mask_height, mask_width]. + """ + _, num_classes, height, width = instance_masks.get_shape().as_list() + k = tf.shape(instance_masks)[0] + instance_masks = tf.reshape(instance_masks, [-1, height, width]) + classes = tf.cast(tf.reshape(classes, [-1]), dtype=tf.int32) + gather_idx = tf.range(k) * num_classes + classes + return tf.gather(instance_masks, gather_idx) + + def _extract_rpn_feature_maps(self, preprocessed_inputs): + """Extracts RPN features. + + This function extracts two feature maps: a feature map to be directly + fed to a box predictor (to predict location and objectness scores for + proposals) and a feature map from which to crop regions which will then + be sent to the second stage box classifier. + + Args: + preprocessed_inputs: a [batch, height, width, channels] image tensor. + + Returns: + rpn_box_predictor_features: A 4-D float32 tensor with shape + [batch, height, width, depth] to be used for predicting proposal boxes + and corresponding objectness scores. + rpn_features_to_crop: A 4-D float32 tensor with shape + [batch, height, width, depth] representing image features to crop using + the proposals boxes. + anchors: A BoxList representing anchors (for the RPN) in + absolute coordinates. + image_shape: A 1-D tensor representing the input image shape. + """ + image_shape = tf.shape(preprocessed_inputs) + + rpn_features_to_crop, self.endpoints = self._extract_proposal_features( + preprocessed_inputs) + + feature_map_shape = tf.shape(rpn_features_to_crop) + anchors = box_list_ops.concatenate( + self._first_stage_anchor_generator.generate([(feature_map_shape[1], + feature_map_shape[2])])) + rpn_box_predictor_features = ( + self._first_stage_box_predictor_first_conv(rpn_features_to_crop)) + return (rpn_box_predictor_features, rpn_features_to_crop, + anchors, image_shape) + + def _extract_proposal_features(self, preprocessed_inputs): + if self._feature_extractor_for_proposal_features == ( + _UNINITIALIZED_FEATURE_EXTRACTOR): + self._feature_extractor_for_proposal_features = ( + self._feature_extractor.get_proposal_feature_extractor_model( + name=self.first_stage_feature_extractor_scope)) + if self._feature_extractor_for_proposal_features: + proposal_features = ( + self._feature_extractor_for_proposal_features(preprocessed_inputs), + {}) + else: + proposal_features = ( + self._feature_extractor.extract_proposal_features( + preprocessed_inputs, + scope=self.first_stage_feature_extractor_scope)) + return proposal_features + + def _predict_rpn_proposals(self, rpn_box_predictor_features): + """Adds box predictors to RPN feature map to predict proposals. + + Note resulting tensors will not have been postprocessed. + + Args: + rpn_box_predictor_features: A 4-D float32 tensor with shape + [batch, height, width, depth] to be used for predicting proposal boxes + and corresponding objectness scores. + + Returns: + box_encodings: 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + + Raises: + RuntimeError: if the anchor generator generates anchors corresponding to + multiple feature maps. We currently assume that a single feature map + is generated for the RPN. + """ + num_anchors_per_location = ( + self._first_stage_anchor_generator.num_anchors_per_location()) + if len(num_anchors_per_location) != 1: + raise RuntimeError('anchor_generator is expected to generate anchors ' + 'corresponding to a single feature map.') + if self._first_stage_box_predictor.is_keras_model: + box_predictions = self._first_stage_box_predictor( + [rpn_box_predictor_features]) + else: + box_predictions = self._first_stage_box_predictor.predict( + [rpn_box_predictor_features], + num_anchors_per_location, + scope=self.first_stage_box_predictor_scope) + + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (tf.squeeze(box_encodings, axis=2), + objectness_predictions_with_background) + + def _remove_invalid_anchors_and_predictions( + self, + box_encodings, + objectness_predictions_with_background, + anchors_boxlist, + clip_window): + """Removes anchors that (partially) fall outside an image. + + Also removes associated box encodings and objectness predictions. + + Args: + box_encodings: 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN) + in absolute coordinates. + clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax] + extent of the window to clip/prune to. + + Returns: + box_encodings: 4-D float tensor of shape + [batch_size, num_valid_anchors, self._box_coder.code_size] containing + predicted boxes, where num_valid_anchors <= num_anchors + objectness_predictions_with_background: 2-D float tensor of shape + [batch_size, num_valid_anchors, 2] containing class + predictions (logits) for each of the anchors, where + num_valid_anchors <= num_anchors. Note that this + tensor *includes* background class predictions (at class index 0). + anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in + absolute coordinates. + """ + pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window( + anchors_boxlist, clip_window) + def _batch_gather_kept_indices(predictions_tensor): + return shape_utils.static_or_dynamic_map_fn( + functools.partial(tf.gather, indices=keep_indices), + elems=predictions_tensor, + dtype=tf.float32, + parallel_iterations=self._parallel_iterations, + back_prop=True) + return (_batch_gather_kept_indices(box_encodings), + _batch_gather_kept_indices(objectness_predictions_with_background), + pruned_anchors_boxlist) + + def _flatten_first_two_dimensions(self, inputs): + """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor. + + Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape + [A * B, ..., depth]. + + Args: + inputs: A float tensor with shape [A, B, ..., depth]. Note that the first + two and last dimensions must be statically defined. + Returns: + A float tensor with shape [A * B, ..., depth] (where the first and last + dimension are statically defined. + """ + combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs) + flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] + + combined_shape[2:]) + return tf.reshape(inputs, flattened_shape) + + def postprocess(self, prediction_dict, true_image_shapes): + """Convert prediction tensors to final detections. + + This function converts raw predictions tensors to final detection results. + See base class for output format conventions. Note also that by default, + scores are to be interpreted as logits, but if a score_converter is used, + then scores are remapped (and may thus have a different interpretation). + + If number_of_stages=1, the returned results represent proposals from the + first stage RPN and are padded to have self.max_num_proposals for each + image; otherwise, the results can be interpreted as multiclass detections + from the full two-stage model and are padded to self._max_detections. + + Args: + prediction_dict: a dictionary holding prediction tensors (see the + documentation for the predict method. If number_of_stages=1, we + expect prediction_dict to contain `rpn_box_encodings`, + `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, + and `anchors` fields. Otherwise we expect prediction_dict to + additionally contain `refined_box_encodings`, + `class_predictions_with_background`, `num_proposals`, + `proposal_boxes` and, optionally, `mask_predictions` fields. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + detections: a dictionary containing the following fields + detection_boxes: [batch, max_detection, 4] + detection_scores: [batch, max_detections] + detection_multiclass_scores: [batch, max_detections, 2] + detection_anchor_indices: [batch, max_detections] + detection_classes: [batch, max_detections] + (this entry is only created if rpn_mode=False) + num_detections: [batch] + raw_detection_boxes: [batch, total_detections, 4] + raw_detection_scores: [batch, total_detections, num_classes + 1] + + Raises: + ValueError: If `predict` is called before `preprocess`. + ValueError: If `_output_final_box_features` is true but + rpn_features_to_crop is not in the prediction_dict. + """ + + with tf.name_scope('FirstStagePostprocessor'): + if self._number_of_stages == 1: + + image_shapes = self._image_batch_shape_2d( + prediction_dict['image_shape']) + (proposal_boxes, proposal_scores, proposal_multiclass_scores, + num_proposals, raw_proposal_boxes, + raw_proposal_scores) = self._postprocess_rpn( + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors'], image_shapes, true_image_shapes) + return { + fields.DetectionResultFields.detection_boxes: + proposal_boxes, + fields.DetectionResultFields.detection_scores: + proposal_scores, + fields.DetectionResultFields.detection_multiclass_scores: + proposal_multiclass_scores, + fields.DetectionResultFields.num_detections: + tf.cast(num_proposals, dtype=tf.float32), + fields.DetectionResultFields.raw_detection_boxes: + raw_proposal_boxes, + fields.DetectionResultFields.raw_detection_scores: + raw_proposal_scores + } + + # TODO(jrru): Remove mask_predictions from _post_process_box_classifier. + if (self._number_of_stages == 2 or + (self._number_of_stages == 3 and self._is_training)): + with tf.name_scope('SecondStagePostprocessor'): + mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS) + detections_dict = self._postprocess_box_classifier( + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['proposal_boxes'], + prediction_dict['num_proposals'], + true_image_shapes, + mask_predictions=mask_predictions) + + if self._output_final_box_features: + if 'rpn_features_to_crop' not in prediction_dict: + raise ValueError( + 'Please make sure rpn_features_to_crop is in the prediction_dict.' + ) + detections_dict[ + 'detection_features'] = self._add_detection_features_output_node( + detections_dict[fields.DetectionResultFields.detection_boxes], + prediction_dict['rpn_features_to_crop']) + + return detections_dict + + if self._number_of_stages == 3: + # Post processing is already performed in 3rd stage. We need to transfer + # postprocessed tensors from `prediction_dict` to `detections_dict`. + # Remove any items from the prediction dictionary if they are not pure + # Tensors. + non_tensor_predictions = [ + k for k, v in prediction_dict.items() if not isinstance(v, tf.Tensor)] + for k in non_tensor_predictions: + tf.logging.info('Removing {0} from prediction_dict'.format(k)) + prediction_dict.pop(k) + return prediction_dict + + def _add_detection_features_output_node(self, detection_boxes, + rpn_features_to_crop): + """Add detection features to outputs. + + This function extracts box features for each box in rpn_features_to_crop. + It returns the extracted box features, reshaped to + [batch size, max_detections, height, width, depth], and average pools + the extracted features across the spatial dimensions and adds a graph node + to the pooled features named 'pooled_detection_features' + + Args: + detection_boxes: a 3-D float32 tensor of shape + [batch_size, max_detections, 4] which represents the bounding boxes. + rpn_features_to_crop: A 4-D float32 tensor with shape + [batch, height, width, depth] representing image features to crop using + the proposals boxes. + + Returns: + detection_features: a 4-D float32 tensor of shape + [batch size, max_detections, height, width, depth] representing + cropped image features + """ + with tf.name_scope('SecondStageDetectionFeaturesExtract'): + flattened_detected_feature_maps = ( + self._compute_second_stage_input_feature_maps( + rpn_features_to_crop, detection_boxes)) + detection_features_unpooled = self._extract_box_classifier_features( + flattened_detected_feature_maps) + + batch_size = tf.shape(detection_boxes)[0] + max_detections = tf.shape(detection_boxes)[1] + detection_features_pool = tf.reduce_mean( + detection_features_unpooled, axis=[1, 2]) + reshaped_detection_features_pool = tf.reshape( + detection_features_pool, + [batch_size, max_detections, tf.shape(detection_features_pool)[-1]]) + reshaped_detection_features_pool = tf.identity( + reshaped_detection_features_pool, 'pooled_detection_features') + + reshaped_detection_features = tf.reshape( + detection_features_unpooled, + [batch_size, max_detections, + tf.shape(detection_features_unpooled)[1], + tf.shape(detection_features_unpooled)[2], + tf.shape(detection_features_unpooled)[3]]) + + return reshaped_detection_features + + def _postprocess_rpn(self, + rpn_box_encodings_batch, + rpn_objectness_predictions_with_background_batch, + anchors, + image_shapes, + true_image_shapes): + """Converts first stage prediction tensors from the RPN to proposals. + + This function decodes the raw RPN predictions, runs non-max suppression + on the result. + + Note that the behavior of this function is slightly modified during + training --- specifically, we stop the gradient from passing through the + proposal boxes and we only return a balanced sampled subset of proposals + with size `second_stage_batch_size`. + + Args: + rpn_box_encodings_batch: A 3-D float32 tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted proposal box encodings. + rpn_objectness_predictions_with_background_batch: A 3-D float tensor of + shape [batch_size, num_anchors, 2] containing objectness predictions + (logits) for each of the anchors with 0 corresponding to background + and 1 corresponding to object. + anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN. Note that `num_anchors` can differ depending + on whether the model is created in training or inference mode. + image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of + images in the batch. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + proposal_boxes: A float tensor with shape + [batch_size, max_num_proposals, 4] representing the (potentially zero + padded) proposal boxes for all images in the batch. These boxes are + represented as normalized coordinates. + proposal_scores: A float tensor with shape + [batch_size, max_num_proposals] representing the (potentially zero + padded) proposal objectness scores for all images in the batch. + proposal_multiclass_scores: A float tensor with shape + [batch_size, max_num_proposals, 2] representing the (potentially zero + padded) proposal multiclass scores for all images in the batch. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + raw_detection_boxes: [batch, total_detections, 4] tensor with decoded + proposal boxes before Non-Max Suppression. + raw_detection_scores: [batch, total_detections, + num_classes_with_background] tensor of multi-class scores for raw + proposal boxes. + """ + rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2) + rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape( + rpn_box_encodings_batch) + tiled_anchor_boxes = tf.tile( + tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1]) + proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch, + tiled_anchor_boxes) + raw_proposal_boxes = tf.squeeze(proposal_boxes, axis=2) + rpn_objectness_softmax = tf.nn.softmax( + rpn_objectness_predictions_with_background_batch) + rpn_objectness_softmax_without_background = rpn_objectness_softmax[:, :, 1] + clip_window = self._compute_clip_window(true_image_shapes) + additional_fields = {'multiclass_scores': rpn_objectness_softmax} + (proposal_boxes, proposal_scores, _, _, nmsed_additional_fields, + num_proposals) = self._first_stage_nms_fn( + tf.expand_dims(raw_proposal_boxes, axis=2), + tf.expand_dims(rpn_objectness_softmax_without_background, axis=2), + additional_fields=additional_fields, + clip_window=clip_window) + if self._is_training: + proposal_boxes = tf.stop_gradient(proposal_boxes) + if not self._hard_example_miner: + (groundtruth_boxlists, groundtruth_classes_with_background_list, _, + groundtruth_weights_list + ) = self._format_groundtruth_data(image_shapes) + (proposal_boxes, proposal_scores, + num_proposals) = self._sample_box_classifier_batch( + proposal_boxes, proposal_scores, num_proposals, + groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_weights_list) + # normalize proposal boxes + def normalize_boxes(args): + proposal_boxes_per_image = args[0] + image_shape = args[1] + normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( + box_list.BoxList(proposal_boxes_per_image), image_shape[0], + image_shape[1], check_range=False).get() + return normalized_boxes_per_image + normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( + normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32) + raw_normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( + normalize_boxes, + elems=[raw_proposal_boxes, image_shapes], + dtype=tf.float32) + proposal_multiclass_scores = ( + nmsed_additional_fields.get('multiclass_scores') + if nmsed_additional_fields else None) + return (normalized_proposal_boxes, proposal_scores, + proposal_multiclass_scores, num_proposals, + raw_normalized_proposal_boxes, rpn_objectness_softmax) + + def _sample_box_classifier_batch( + self, + proposal_boxes, + proposal_scores, + num_proposals, + groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list): + """Samples a minibatch for second stage. + + Args: + proposal_boxes: A float tensor with shape + [batch_size, num_proposals, 4] representing the (potentially zero + padded) proposal boxes for all images in the batch. These boxes are + represented in absolute coordinates. + proposal_scores: A float tensor with shape + [batch_size, num_proposals] representing the (potentially zero + padded) proposal objectness scores for all images in the batch. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates + of the groundtruth boxes. + groundtruth_classes_with_background_list: A list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes] + indicating the weight associated with the groundtruth boxes. + + Returns: + proposal_boxes: A float tensor with shape + [batch_size, second_stage_batch_size, 4] representing the (potentially + zero padded) proposal boxes for all images in the batch. These boxes + are represented in absolute coordinates. + proposal_scores: A float tensor with shape + [batch_size, second_stage_batch_size] representing the (potentially zero + padded) proposal objectness scores for all images in the batch. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + """ + single_image_proposal_box_sample = [] + single_image_proposal_score_sample = [] + single_image_num_proposals_sample = [] + for (single_image_proposal_boxes, + single_image_proposal_scores, + single_image_num_proposals, + single_image_groundtruth_boxlist, + single_image_groundtruth_classes_with_background, + single_image_groundtruth_weights) in zip( + tf.unstack(proposal_boxes), + tf.unstack(proposal_scores), + tf.unstack(num_proposals), + groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list): + single_image_boxlist = box_list.BoxList(single_image_proposal_boxes) + single_image_boxlist.add_field(fields.BoxListFields.scores, + single_image_proposal_scores) + sampled_boxlist = self._sample_box_classifier_minibatch_single_image( + single_image_boxlist, + single_image_num_proposals, + single_image_groundtruth_boxlist, + single_image_groundtruth_classes_with_background, + single_image_groundtruth_weights) + sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list( + sampled_boxlist, + num_boxes=self._second_stage_batch_size) + single_image_num_proposals_sample.append(tf.minimum( + sampled_boxlist.num_boxes(), + self._second_stage_batch_size)) + bb = sampled_padded_boxlist.get() + single_image_proposal_box_sample.append(bb) + single_image_proposal_score_sample.append( + sampled_padded_boxlist.get_field(fields.BoxListFields.scores)) + return (tf.stack(single_image_proposal_box_sample), + tf.stack(single_image_proposal_score_sample), + tf.stack(single_image_num_proposals_sample)) + + def _format_groundtruth_data(self, image_shapes): + """Helper function for preparing groundtruth data for target assignment. + + In order to be consistent with the model.DetectionModel interface, + groundtruth boxes are specified in normalized coordinates and classes are + specified as label indices with no assumed background category. To prepare + for target assignment, we: + 1) convert boxes to absolute coordinates, + 2) add a background class at class index 0 + 3) groundtruth instance masks, if available, are resized to match + image_shape. + + Args: + image_shapes: a 2-D int32 tensor of shape [batch_size, 3] containing + shapes of input image in the batch. + + Returns: + groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates + of the groundtruth boxes. + groundtruth_classes_with_background_list: A list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of + shape [num_boxes, image_height, image_width] containing instance masks. + This is set to None if no masks exist in the provided groundtruth. + """ + # pylint: disable=g-complex-comprehension + groundtruth_boxlists = [ + box_list_ops.to_absolute_coordinates( + box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1]) + for i, boxes in enumerate( + self.groundtruth_lists(fields.BoxListFields.boxes)) + ] + groundtruth_classes_with_background_list = [] + for one_hot_encoding in self.groundtruth_lists( + fields.BoxListFields.classes): + groundtruth_classes_with_background_list.append( + tf.cast( + tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'), + dtype=tf.float32)) + + groundtruth_masks_list = self._groundtruth_lists.get( + fields.BoxListFields.masks) + # TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted. + if groundtruth_masks_list is not None and self._resize_masks: + resized_masks_list = [] + for mask in groundtruth_masks_list: + + _, resized_mask, _ = self._image_resizer_fn( + # Reuse the given `image_resizer_fn` to resize groundtruth masks. + # `mask` tensor for an image is of the shape [num_masks, + # image_height, image_width]. Below we create a dummy image of the + # the shape [image_height, image_width, 1] to use with + # `image_resizer_fn`. + image=tf.zeros(tf.stack([tf.shape(mask)[1], + tf.shape(mask)[2], 1])), + masks=mask) + resized_masks_list.append(resized_mask) + + groundtruth_masks_list = resized_masks_list + # Masks could be set to bfloat16 in the input pipeline for performance + # reasons. Convert masks back to floating point space here since the rest of + # this module assumes groundtruth to be of float32 type. + float_groundtruth_masks_list = [] + if groundtruth_masks_list: + for mask in groundtruth_masks_list: + float_groundtruth_masks_list.append(tf.cast(mask, tf.float32)) + groundtruth_masks_list = float_groundtruth_masks_list + + if self.groundtruth_has_field(fields.BoxListFields.weights): + groundtruth_weights_list = self.groundtruth_lists( + fields.BoxListFields.weights) + else: + # Set weights for all batch elements equally to 1.0 + groundtruth_weights_list = [] + for groundtruth_classes in groundtruth_classes_with_background_list: + num_gt = tf.shape(groundtruth_classes)[0] + groundtruth_weights = tf.ones(num_gt) + groundtruth_weights_list.append(groundtruth_weights) + + return (groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_masks_list, groundtruth_weights_list) + + def _sample_box_classifier_minibatch_single_image( + self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist, + groundtruth_classes_with_background, groundtruth_weights): + """Samples a mini-batch of proposals to be sent to the box classifier. + + Helper function for self._postprocess_rpn. + + Args: + proposal_boxlist: A BoxList containing K proposal boxes in absolute + coordinates. + num_valid_proposals: Number of valid proposals in the proposal boxlist. + groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in + absolute coordinates. + groundtruth_classes_with_background: A tensor with shape + `[N, self.num_classes + 1]` representing groundtruth classes. The + classes are assumed to be k-hot encoded, and include background as the + zero-th class. + groundtruth_weights: Weights attached to the groundtruth_boxes. + + Returns: + a BoxList contained sampled proposals. + """ + (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( + proposal_boxlist, + groundtruth_boxlist, + groundtruth_classes_with_background, + unmatched_class_label=tf.constant( + [1] + self._num_classes * [0], dtype=tf.float32), + groundtruth_weights=groundtruth_weights) + # Selects all boxes as candidates if none of them is selected according + # to cls_weights. This could happen as boxes within certain IOU ranges + # are ignored. If triggered, the selected boxes will still be ignored + # during loss computation. + cls_weights = tf.reduce_mean(cls_weights, axis=-1) + positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) + valid_indicator = tf.logical_and( + tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals, + cls_weights > 0 + ) + selected_positions = self._second_stage_sampler.subsample( + valid_indicator, + self._second_stage_batch_size, + positive_indicator) + return box_list_ops.boolean_mask( + proposal_boxlist, + selected_positions, + use_static_shapes=self._use_static_shapes, + indicator_sum=(self._second_stage_batch_size + if self._use_static_shapes else None)) + + def _compute_second_stage_input_feature_maps(self, features_to_crop, + proposal_boxes_normalized, + **side_inputs): + """Crops to a set of proposals from the feature map for a batch of images. + + Helper function for self._postprocess_rpn. This function calls + `tf.image.crop_and_resize` to create the feature map to be passed to the + second stage box classifier for each proposal. + + Args: + features_to_crop: A float32 tensor with shape + [batch_size, height, width, depth] + proposal_boxes_normalized: A float32 tensor with shape [batch_size, + num_proposals, box_code_size] containing proposal boxes in + normalized coordinates. + **side_inputs: additional tensors that are required by the network. + + Returns: + A float32 tensor with shape [K, new_height, new_width, depth]. + """ + cropped_regions = self._flatten_first_two_dimensions( + self._crop_and_resize_fn( + features_to_crop, proposal_boxes_normalized, + [self._initial_crop_size, self._initial_crop_size])) + return self._maxpool_layer(cropped_regions) + + def _postprocess_box_classifier(self, + refined_box_encodings, + class_predictions_with_background, + proposal_boxes, + num_proposals, + image_shapes, + mask_predictions=None): + """Converts predictions from the second stage box classifier to detections. + + Args: + refined_box_encodings: a 3-D float tensor with shape + [total_num_padded_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings. If using a shared + box across classes the shape will instead be + [total_num_padded_proposals, 1, 4] + class_predictions_with_background: a 2-D tensor float with shape + [total_num_padded_proposals, num_classes + 1] containing class + predictions (logits) for each of the proposals. Note that this tensor + *includes* background class predictions (at class index 0). + proposal_boxes: a 3-D float tensor with shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes in absolute coordinates. + num_proposals: a 1-D int32 tensor of shape [batch] representing the number + of proposals predicted for each image in the batch. + image_shapes: a 2-D int32 tensor containing shapes of input image in the + batch. + mask_predictions: (optional) a 4-D float tensor with shape + [total_num_padded_proposals, num_classes, mask_height, mask_width] + containing instance mask prediction logits. + + Returns: + A dictionary containing: + `detection_boxes`: [batch, max_detection, 4] in normalized co-ordinates. + `detection_scores`: [batch, max_detections] + `detection_multiclass_scores`: [batch, max_detections, + num_classes_with_background] tensor with class score distribution for + post-processed detection boxes including background class if any. + `detection_anchor_indices`: [batch, max_detections] with anchor + indices. + `detection_classes`: [batch, max_detections] + `num_detections`: [batch] + `detection_masks`: + (optional) [batch, max_detections, mask_height, mask_width]. Note + that a pixel-wise sigmoid score converter is applied to the detection + masks. + `raw_detection_boxes`: [batch, total_detections, 4] tensor with decoded + detection boxes in normalized coordinates, before Non-Max Suppression. + The value total_detections is the number of second stage anchors + (i.e. the total number of boxes before NMS). + `raw_detection_scores`: [batch, total_detections, + num_classes_with_background] tensor of multi-class scores for + raw detection boxes. The value total_detections is the number of + second stage anchors (i.e. the total number of boxes before NMS). + """ + refined_box_encodings_batch = tf.reshape( + refined_box_encodings, + [-1, + self.max_num_proposals, + refined_box_encodings.shape[1], + self._box_coder.code_size]) + class_predictions_with_background_batch = tf.reshape( + class_predictions_with_background, + [-1, self.max_num_proposals, self.num_classes + 1] + ) + refined_decoded_boxes_batch = self._batch_decode_boxes( + refined_box_encodings_batch, proposal_boxes) + class_predictions_with_background_batch_normalized = ( + self._second_stage_score_conversion_fn( + class_predictions_with_background_batch)) + class_predictions_batch = tf.reshape( + tf.slice(class_predictions_with_background_batch_normalized, + [0, 0, 1], [-1, -1, -1]), + [-1, self.max_num_proposals, self.num_classes]) + clip_window = self._compute_clip_window(image_shapes) + mask_predictions_batch = None + if mask_predictions is not None: + mask_height = shape_utils.get_dim_as_int(mask_predictions.shape[2]) + mask_width = shape_utils.get_dim_as_int(mask_predictions.shape[3]) + mask_predictions = tf.sigmoid(mask_predictions) + mask_predictions_batch = tf.reshape( + mask_predictions, [-1, self.max_num_proposals, + self.num_classes, mask_height, mask_width]) + + batch_size = shape_utils.combined_static_and_dynamic_shape( + refined_box_encodings_batch)[0] + batch_anchor_indices = tf.tile( + tf.expand_dims(tf.range(self.max_num_proposals), 0), + multiples=[batch_size, 1]) + additional_fields = { + 'multiclass_scores': class_predictions_with_background_batch_normalized, + 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32) + } + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections) = self._second_stage_nms_fn( + refined_decoded_boxes_batch, + class_predictions_batch, + clip_window=clip_window, + change_coordinate_frame=True, + num_valid_boxes=num_proposals, + additional_fields=additional_fields, + masks=mask_predictions_batch) + if refined_decoded_boxes_batch.shape[2] > 1: + class_ids = tf.expand_dims( + tf.argmax(class_predictions_with_background_batch[:, :, 1:], axis=2, + output_type=tf.int32), + axis=-1) + raw_detection_boxes = tf.squeeze( + tf.batch_gather(refined_decoded_boxes_batch, class_ids), axis=2) + else: + raw_detection_boxes = tf.squeeze(refined_decoded_boxes_batch, axis=2) + + raw_normalized_detection_boxes = shape_utils.static_or_dynamic_map_fn( + self._normalize_and_clip_boxes, + elems=[raw_detection_boxes, image_shapes], + dtype=tf.float32) + + detections = { + fields.DetectionResultFields.detection_boxes: + nmsed_boxes, + fields.DetectionResultFields.detection_scores: + nmsed_scores, + fields.DetectionResultFields.detection_classes: + nmsed_classes, + fields.DetectionResultFields.detection_multiclass_scores: + nmsed_additional_fields['multiclass_scores'], + fields.DetectionResultFields.detection_anchor_indices: + tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), + fields.DetectionResultFields.num_detections: + tf.cast(num_detections, dtype=tf.float32), + fields.DetectionResultFields.raw_detection_boxes: + raw_normalized_detection_boxes, + fields.DetectionResultFields.raw_detection_scores: + class_predictions_with_background_batch_normalized + } + if nmsed_masks is not None: + detections[fields.DetectionResultFields.detection_masks] = nmsed_masks + return detections + + def _batch_decode_boxes(self, box_encodings, anchor_boxes): + """Decodes box encodings with respect to the anchor boxes. + + Args: + box_encodings: a 4-D tensor with shape + [batch_size, num_anchors, num_classes, self._box_coder.code_size] + representing box encodings. + anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size] + representing decoded bounding boxes. If using a shared box across + classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + + Returns: + decoded_boxes: a + [batch_size, num_anchors, num_classes, self._box_coder.code_size] + float tensor representing bounding box predictions (for each image in + batch, proposal and class). If using a shared box across classes the + shape will instead be + [batch_size, num_anchors, 1, self._box_coder.code_size]. + """ + combined_shape = shape_utils.combined_static_and_dynamic_shape( + box_encodings) + num_classes = combined_shape[2] + tiled_anchor_boxes = tf.tile( + tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1]) + tiled_anchors_boxlist = box_list.BoxList( + tf.reshape(tiled_anchor_boxes, [-1, 4])) + decoded_boxes = self._box_coder.decode( + tf.reshape(box_encodings, [-1, self._box_coder.code_size]), + tiled_anchors_boxlist) + return tf.reshape(decoded_boxes.get(), + tf.stack([combined_shape[0], combined_shape[1], + num_classes, 4])) + + def _normalize_and_clip_boxes(self, boxes_and_image_shape): + """Normalize and clip boxes.""" + boxes_per_image = boxes_and_image_shape[0] + image_shape = boxes_and_image_shape[1] + + boxes_contains_classes_dim = boxes_per_image.shape.ndims == 3 + if boxes_contains_classes_dim: + boxes_per_image = shape_utils.flatten_first_n_dimensions( + boxes_per_image, 2) + normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( + box_list.BoxList(boxes_per_image), + image_shape[0], + image_shape[1], + check_range=False).get() + + normalized_boxes_per_image = box_list_ops.clip_to_window( + box_list.BoxList(normalized_boxes_per_image), + tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32), + filter_nonoverlapping=False).get() + + if boxes_contains_classes_dim: + max_num_proposals, num_classes, _ = ( + shape_utils.combined_static_and_dynamic_shape( + boxes_and_image_shape[0])) + normalized_boxes_per_image = shape_utils.expand_first_dimension( + normalized_boxes_per_image, [max_num_proposals, num_classes]) + + return normalized_boxes_per_image + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Compute scalar loss tensors given prediction tensors. + + If number_of_stages=1, only RPN related losses are computed (i.e., + `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all + losses are computed. + + Args: + prediction_dict: a dictionary holding prediction tensors (see the + documentation for the predict method. If number_of_stages=1, we + expect prediction_dict to contain `rpn_box_encodings`, + `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, + `image_shape`, and `anchors` fields. Otherwise we expect + prediction_dict to additionally contain `refined_box_encodings`, + `class_predictions_with_background`, `num_proposals`, and + `proposal_boxes` fields. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + scope: Optional scope name. + + Returns: + a dictionary mapping loss keys (`first_stage_localization_loss`, + `first_stage_objectness_loss`, 'second_stage_localization_loss', + 'second_stage_classification_loss') to scalar tensors representing + corresponding loss values. + """ + with tf.name_scope(scope, 'Loss', prediction_dict.values()): + (groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_masks_list, groundtruth_weights_list + ) = self._format_groundtruth_data( + self._image_batch_shape_2d(prediction_dict['image_shape'])) + loss_dict = self._loss_rpn( + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors'], groundtruth_boxlists, + groundtruth_classes_with_background_list, groundtruth_weights_list) + if self._number_of_stages > 1: + loss_dict.update( + self._loss_box_classifier( + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['proposal_boxes'], + prediction_dict['num_proposals'], groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list, prediction_dict['image_shape'], + prediction_dict.get('mask_predictions'), groundtruth_masks_list, + prediction_dict.get( + fields.DetectionResultFields.detection_boxes), + prediction_dict.get( + fields.DetectionResultFields.num_detections))) + return loss_dict + + def _loss_rpn(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, anchors, + groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_weights_list): + """Computes scalar RPN loss tensors. + + Uses self._proposal_target_assigner to obtain regression and classification + targets for the first stage RPN, samples a "minibatch" of anchors to + participate in the loss computation, and returns the RPN losses. + + Args: + rpn_box_encodings: A 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted proposal box encodings. + rpn_objectness_predictions_with_background: A 2-D float tensor of shape + [batch_size, num_anchors, 2] containing objectness predictions + (logits) for each of the anchors with 0 corresponding to background + and 1 corresponding to object. + anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN. Note that `num_anchors` can differ depending + on whether the model is created in training or inference mode. + groundtruth_boxlists: A list of BoxLists containing coordinates of the + groundtruth boxes. + groundtruth_classes_with_background_list: A list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + + Returns: + a dictionary mapping loss keys (`first_stage_localization_loss`, + `first_stage_objectness_loss`) to scalar tensors representing + corresponding loss values. + """ + with tf.name_scope('RPNLoss'): + (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, _) = target_assigner.batch_assign_targets( + target_assigner=self._proposal_target_assigner, + anchors_batch=box_list.BoxList(anchors), + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=(len(groundtruth_boxlists) * [None]), + gt_weights_batch=groundtruth_weights_list) + batch_cls_weights = tf.reduce_mean(batch_cls_weights, axis=2) + batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2) + + def _minibatch_subsample_fn(inputs): + cls_targets, cls_weights = inputs + return self._first_stage_sampler.subsample( + tf.cast(cls_weights, tf.bool), + self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool)) + batch_sampled_indices = tf.cast(shape_utils.static_or_dynamic_map_fn( + _minibatch_subsample_fn, + [batch_cls_targets, batch_cls_weights], + dtype=tf.bool, + parallel_iterations=self._parallel_iterations, + back_prop=True), dtype=tf.float32) + + # Normalize by number of examples in sampled minibatch + normalizer = tf.maximum( + tf.reduce_sum(batch_sampled_indices, axis=1), 1.0) + batch_one_hot_targets = tf.one_hot( + tf.cast(batch_cls_targets, dtype=tf.int32), depth=2) + sampled_reg_indices = tf.multiply(batch_sampled_indices, + batch_reg_weights) + + losses_mask = None + if self.groundtruth_has_field(fields.InputDataFields.is_annotated): + losses_mask = tf.stack(self.groundtruth_lists( + fields.InputDataFields.is_annotated)) + localization_losses = self._first_stage_localization_loss( + rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices, + losses_mask=losses_mask) + objectness_losses = self._first_stage_objectness_loss( + rpn_objectness_predictions_with_background, + batch_one_hot_targets, + weights=tf.expand_dims(batch_sampled_indices, axis=-1), + losses_mask=losses_mask) + localization_loss = tf.reduce_mean( + tf.reduce_sum(localization_losses, axis=1) / normalizer) + objectness_loss = tf.reduce_mean( + tf.reduce_sum(objectness_losses, axis=1) / normalizer) + + localization_loss = tf.multiply(self._first_stage_loc_loss_weight, + localization_loss, + name='localization_loss') + objectness_loss = tf.multiply(self._first_stage_obj_loss_weight, + objectness_loss, name='objectness_loss') + loss_dict = {'Loss/RPNLoss/localization_loss': localization_loss, + 'Loss/RPNLoss/objectness_loss': objectness_loss} + return loss_dict + + def _loss_box_classifier(self, + refined_box_encodings, + class_predictions_with_background, + proposal_boxes, + num_proposals, + groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list, + image_shape, + prediction_masks=None, + groundtruth_masks_list=None, + detection_boxes=None, + num_detections=None): + """Computes scalar box classifier loss tensors. + + Uses self._detector_target_assigner to obtain regression and classification + targets for the second stage box classifier, optionally performs + hard mining, and returns losses. All losses are computed independently + for each image and then averaged across the batch. + Please note that for boxes and masks with multiple labels, the box + regression and mask prediction losses are only computed for one label. + + This function assumes that the proposal boxes in the "padded" regions are + actually zero (and thus should not be matched to). + + + Args: + refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, box_coder.code_size] representing + predicted (final) refined box encodings. If using a shared box across + classes this will instead have shape + [total_num_proposals, 1, box_coder.code_size]. + class_predictions_with_background: a 2-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors. Note that this tensor + *includes* background class predictions (at class index 0). + proposal_boxes: [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + groundtruth_boxlists: a list of BoxLists containing coordinates of the + groundtruth boxes. + groundtruth_classes_with_background_list: a list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + image_shape: a 1-D tensor of shape [4] representing the image shape. + prediction_masks: an optional 4-D tensor with shape [total_num_proposals, + num_classes, mask_height, mask_width] containing the instance masks for + each box. + groundtruth_masks_list: an optional list of 3-D tensors of shape + [num_boxes, image_height, image_width] containing the instance masks for + each of the boxes. + detection_boxes: 3-D float tensor of shape [batch, + max_total_detections, 4] containing post-processed detection boxes in + normalized co-ordinates. + num_detections: 1-D int32 tensor of shape [batch] containing number of + valid detections in `detection_boxes`. + + Returns: + a dictionary mapping loss keys ('second_stage_localization_loss', + 'second_stage_classification_loss') to scalar tensors representing + corresponding loss values. + + Raises: + ValueError: if `predict_instance_masks` in + second_stage_mask_rcnn_box_predictor is True and + `groundtruth_masks_list` is not provided. + """ + with tf.name_scope('BoxClassifierLoss'): + paddings_indicator = self._padded_batched_proposals_indicator( + num_proposals, proposal_boxes.shape[1]) + proposal_boxlists = [ + box_list.BoxList(proposal_boxes_single_image) + for proposal_boxes_single_image in tf.unstack(proposal_boxes)] + batch_size = len(proposal_boxlists) + + num_proposals_or_one = tf.cast(tf.expand_dims( + tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1), + dtype=tf.float32) + normalizer = tf.tile(num_proposals_or_one, + [1, self.max_num_proposals]) * batch_size + + (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets, + batch_reg_weights, _) = target_assigner.batch_assign_targets( + target_assigner=self._detector_target_assigner, + anchors_batch=proposal_boxlists, + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=groundtruth_classes_with_background_list, + unmatched_class_label=tf.constant( + [1] + self._num_classes * [0], dtype=tf.float32), + gt_weights_batch=groundtruth_weights_list) + + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, self.max_num_proposals, -1]) + + flat_cls_targets_with_background = tf.reshape( + batch_cls_targets_with_background, + [batch_size * self.max_num_proposals, -1]) + one_hot_flat_cls_targets_with_background = tf.argmax( + flat_cls_targets_with_background, axis=1) + one_hot_flat_cls_targets_with_background = tf.one_hot( + one_hot_flat_cls_targets_with_background, + flat_cls_targets_with_background.get_shape()[1]) + + # If using a shared box across classes use directly + if refined_box_encodings.shape[1] == 1: + reshaped_refined_box_encodings = tf.reshape( + refined_box_encodings, + [batch_size, self.max_num_proposals, self._box_coder.code_size]) + # For anchors with multiple labels, picks refined_location_encodings + # for just one class to avoid over-counting for regression loss and + # (optionally) mask loss. + else: + reshaped_refined_box_encodings = ( + self._get_refined_encodings_for_postitive_class( + refined_box_encodings, + one_hot_flat_cls_targets_with_background, batch_size)) + + losses_mask = None + if self.groundtruth_has_field(fields.InputDataFields.is_annotated): + losses_mask = tf.stack(self.groundtruth_lists( + fields.InputDataFields.is_annotated)) + second_stage_loc_losses = self._second_stage_localization_loss( + reshaped_refined_box_encodings, + batch_reg_targets, + weights=batch_reg_weights, + losses_mask=losses_mask) / normalizer + second_stage_cls_losses = ops.reduce_sum_trailing_dimensions( + self._second_stage_classification_loss( + class_predictions_with_background, + batch_cls_targets_with_background, + weights=batch_cls_weights, + losses_mask=losses_mask), + ndims=2) / normalizer + + second_stage_loc_loss = tf.reduce_sum( + second_stage_loc_losses * tf.cast(paddings_indicator, + dtype=tf.float32)) + second_stage_cls_loss = tf.reduce_sum( + second_stage_cls_losses * tf.cast(paddings_indicator, + dtype=tf.float32)) + + if self._hard_example_miner: + (second_stage_loc_loss, second_stage_cls_loss + ) = self._unpad_proposals_and_apply_hard_mining( + proposal_boxlists, second_stage_loc_losses, + second_stage_cls_losses, num_proposals) + localization_loss = tf.multiply(self._second_stage_loc_loss_weight, + second_stage_loc_loss, + name='localization_loss') + + classification_loss = tf.multiply(self._second_stage_cls_loss_weight, + second_stage_cls_loss, + name='classification_loss') + + loss_dict = {'Loss/BoxClassifierLoss/localization_loss': + localization_loss, + 'Loss/BoxClassifierLoss/classification_loss': + classification_loss} + second_stage_mask_loss = None + if prediction_masks is not None: + if groundtruth_masks_list is None: + raise ValueError('Groundtruth instance masks not provided. ' + 'Please configure input reader.') + + if not self._is_training: + (proposal_boxes, proposal_boxlists, paddings_indicator, + one_hot_flat_cls_targets_with_background + ) = self._get_mask_proposal_boxes_and_classes( + detection_boxes, num_detections, image_shape, + groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_weights_list) + unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32) + (batch_mask_targets, _, _, batch_mask_target_weights, + _) = target_assigner.batch_assign_targets( + target_assigner=self._detector_target_assigner, + anchors_batch=proposal_boxlists, + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=groundtruth_masks_list, + unmatched_class_label=unmatched_mask_label, + gt_weights_batch=groundtruth_weights_list) + + # Pad the prediction_masks with to add zeros for background class to be + # consistent with class predictions. + if prediction_masks.get_shape().as_list()[1] == 1: + # Class agnostic masks or masks for one-class prediction. Logic for + # both cases is the same since background predictions are ignored + # through the batch_mask_target_weights. + prediction_masks_masked_by_class_targets = prediction_masks + else: + prediction_masks_with_background = tf.pad( + prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]]) + prediction_masks_masked_by_class_targets = tf.boolean_mask( + prediction_masks_with_background, + tf.greater(one_hot_flat_cls_targets_with_background, 0)) + + mask_height = shape_utils.get_dim_as_int(prediction_masks.shape[2]) + mask_width = shape_utils.get_dim_as_int(prediction_masks.shape[3]) + reshaped_prediction_masks = tf.reshape( + prediction_masks_masked_by_class_targets, + [batch_size, -1, mask_height * mask_width]) + + batch_mask_targets_shape = tf.shape(batch_mask_targets) + flat_gt_masks = tf.reshape(batch_mask_targets, + [-1, batch_mask_targets_shape[2], + batch_mask_targets_shape[3]]) + + # Use normalized proposals to crop mask targets from image masks. + flat_normalized_proposals = box_list_ops.to_normalized_coordinates( + box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])), + image_shape[1], image_shape[2], check_range=False).get() + + flat_cropped_gt_mask = self._crop_and_resize_fn( + tf.expand_dims(flat_gt_masks, -1), + tf.expand_dims(flat_normalized_proposals, axis=1), + [mask_height, mask_width]) + # Without stopping gradients into cropped groundtruth masks the + # performance with 100-padded groundtruth masks when batch size > 1 is + # about 4% worse. + # TODO(rathodv): Investigate this since we don't expect any variables + # upstream of flat_cropped_gt_mask. + flat_cropped_gt_mask = tf.stop_gradient(flat_cropped_gt_mask) + + batch_cropped_gt_mask = tf.reshape( + flat_cropped_gt_mask, + [batch_size, -1, mask_height * mask_width]) + + mask_losses_weights = ( + batch_mask_target_weights * tf.cast(paddings_indicator, + dtype=tf.float32)) + mask_losses = self._second_stage_mask_loss( + reshaped_prediction_masks, + batch_cropped_gt_mask, + weights=tf.expand_dims(mask_losses_weights, axis=-1), + losses_mask=losses_mask) + total_mask_loss = tf.reduce_sum(mask_losses) + normalizer = tf.maximum( + tf.reduce_sum(mask_losses_weights * mask_height * mask_width), 1.0) + second_stage_mask_loss = total_mask_loss / normalizer + + if second_stage_mask_loss is not None: + mask_loss = tf.multiply(self._second_stage_mask_loss_weight, + second_stage_mask_loss, name='mask_loss') + loss_dict[mask_loss.op.name] = mask_loss + return loss_dict + + def _get_mask_proposal_boxes_and_classes( + self, detection_boxes, num_detections, image_shape, groundtruth_boxlists, + groundtruth_classes_with_background_list, groundtruth_weights_list): + """Returns proposal boxes and class targets to compute evaluation mask loss. + + During evaluation, detection boxes are used to extract features for mask + prediction. Therefore, to compute mask loss during evaluation detection + boxes must be used to compute correct class and mask targets. This function + returns boxes and classes in the correct format for computing mask targets + during evaluation. + + Args: + detection_boxes: A 3-D float tensor of shape [batch, max_detection_boxes, + 4] containing detection boxes in normalized co-ordinates. + num_detections: A 1-D float tensor of shape [batch] containing number of + valid boxes in `detection_boxes`. + image_shape: A 1-D tensor of shape [4] containing image tensor shape. + groundtruth_boxlists: A list of groundtruth boxlists. + groundtruth_classes_with_background_list: A list of groundtruth classes. + groundtruth_weights_list: A list of groundtruth weights. + Return: + mask_proposal_boxes: detection boxes to use for mask proposals in absolute + co-ordinates. + mask_proposal_boxlists: `mask_proposal_boxes` in a list of BoxLists in + absolute co-ordinates. + mask_proposal_paddings_indicator: a tensor indicating valid boxes. + mask_proposal_one_hot_flat_cls_targets_with_background: Class targets + computed using detection boxes. + """ + batch, max_num_detections, _ = detection_boxes.shape.as_list() + proposal_boxes = tf.reshape(box_list_ops.to_absolute_coordinates( + box_list.BoxList(tf.reshape(detection_boxes, [-1, 4])), image_shape[1], + image_shape[2]).get(), [batch, max_num_detections, 4]) + proposal_boxlists = [ + box_list.BoxList(detection_boxes_single_image) + for detection_boxes_single_image in tf.unstack(proposal_boxes) + ] + paddings_indicator = self._padded_batched_proposals_indicator( + tf.cast(num_detections, dtype=tf.int32), detection_boxes.shape[1]) + (batch_cls_targets_with_background, _, _, _, + _) = target_assigner.batch_assign_targets( + target_assigner=self._detector_target_assigner, + anchors_batch=proposal_boxlists, + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=groundtruth_classes_with_background_list, + unmatched_class_label=tf.constant( + [1] + self._num_classes * [0], dtype=tf.float32), + gt_weights_batch=groundtruth_weights_list) + flat_cls_targets_with_background = tf.reshape( + batch_cls_targets_with_background, [-1, self._num_classes + 1]) + one_hot_flat_cls_targets_with_background = tf.argmax( + flat_cls_targets_with_background, axis=1) + one_hot_flat_cls_targets_with_background = tf.one_hot( + one_hot_flat_cls_targets_with_background, + flat_cls_targets_with_background.get_shape()[1]) + return (proposal_boxes, proposal_boxlists, paddings_indicator, + one_hot_flat_cls_targets_with_background) + + def _get_refined_encodings_for_postitive_class( + self, refined_box_encodings, flat_cls_targets_with_background, + batch_size): + # We only predict refined location encodings for the non background + # classes, but we now pad it to make it compatible with the class + # predictions + refined_box_encodings_with_background = tf.pad(refined_box_encodings, + [[0, 0], [1, 0], [0, 0]]) + refined_box_encodings_masked_by_class_targets = ( + box_list_ops.boolean_mask( + box_list.BoxList( + tf.reshape(refined_box_encodings_with_background, + [-1, self._box_coder.code_size])), + tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]), + use_static_shapes=self._use_static_shapes, + indicator_sum=batch_size * self.max_num_proposals + if self._use_static_shapes else None).get()) + return tf.reshape( + refined_box_encodings_masked_by_class_targets, [ + batch_size, self.max_num_proposals, + self._box_coder.code_size + ]) + + def _padded_batched_proposals_indicator(self, + num_proposals, + max_num_proposals): + """Creates indicator matrix of non-pad elements of padded batch proposals. + + Args: + num_proposals: Tensor of type tf.int32 with shape [batch_size]. + max_num_proposals: Maximum number of proposals per image (integer). + + Returns: + A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. + """ + batch_size = tf.size(num_proposals) + tiled_num_proposals = tf.tile( + tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) + tiled_proposal_index = tf.tile( + tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) + return tf.greater(tiled_num_proposals, tiled_proposal_index) + + def _unpad_proposals_and_apply_hard_mining(self, + proposal_boxlists, + second_stage_loc_losses, + second_stage_cls_losses, + num_proposals): + """Unpads proposals and applies hard mining. + + Args: + proposal_boxlists: A list of `batch_size` BoxLists each representing + `self.max_num_proposals` representing decoded proposal bounding boxes + for each image. + second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape + `[batch_size, self.max_num_proposals]` representing per-anchor + second stage localization loss values. + second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape + `[batch_size, self.max_num_proposals]` representing per-anchor + second stage classification loss values. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + + Returns: + second_stage_loc_loss: A scalar float32 tensor representing the second + stage localization loss. + second_stage_cls_loss: A scalar float32 tensor representing the second + stage classification loss. + """ + for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss, + single_image_num_proposals) in zip( + proposal_boxlists, + tf.unstack(second_stage_loc_losses), + tf.unstack(second_stage_cls_losses), + tf.unstack(num_proposals)): + proposal_boxlist = box_list.BoxList( + tf.slice(proposal_boxlist.get(), + [0, 0], [single_image_num_proposals, -1])) + single_image_loc_loss = tf.slice(single_image_loc_loss, + [0], [single_image_num_proposals]) + single_image_cls_loss = tf.slice(single_image_cls_loss, + [0], [single_image_num_proposals]) + return self._hard_example_miner( + location_losses=tf.expand_dims(single_image_loc_loss, 0), + cls_losses=tf.expand_dims(single_image_cls_loss, 0), + decoded_boxlist_list=[proposal_boxlist]) + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + all_losses = [] + slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + # Copy the slim losses to avoid modifying the collection + if slim_losses: + all_losses.extend(slim_losses) + # TODO(kaftan): Possibly raise an error if the feature extractors are + # uninitialized in Keras. + if self._feature_extractor_for_proposal_features: + if (self._feature_extractor_for_proposal_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + all_losses.extend(self._feature_extractor_for_proposal_features.losses) + if isinstance(self._first_stage_box_predictor_first_conv, + tf.keras.Model): + all_losses.extend( + self._first_stage_box_predictor_first_conv.losses) + if self._first_stage_box_predictor.is_keras_model: + all_losses.extend(self._first_stage_box_predictor.losses) + if self._feature_extractor_for_box_classifier_features: + if (self._feature_extractor_for_box_classifier_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + all_losses.extend( + self._feature_extractor_for_box_classifier_features.losses) + if self._mask_rcnn_box_predictor: + if self._mask_rcnn_box_predictor.is_keras_model: + all_losses.extend(self._mask_rcnn_box_predictor.losses) + return all_losses + + def restore_map(self, + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False): + """Returns a map of variables to load from a foreign checkpoint. + + See parent class for details. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + load_all_detection_checkpoint_vars: whether to load all variables (when + `fine_tune_checkpoint_type` is `detection`). If False, only variables + within the feature extractor scopes are included. Default False. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + Raises: + ValueError: if fine_tune_checkpoint_type is neither `classification` + nor `detection`. + """ + if fine_tune_checkpoint_type not in ['detection', 'classification']: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + if fine_tune_checkpoint_type == 'classification': + return self._feature_extractor.restore_from_classification_checkpoint_fn( + self.first_stage_feature_extractor_scope, + self.second_stage_feature_extractor_scope) + + variables_to_restore = variables_helper.get_global_variables_safely() + variables_to_restore.append(tf.train.get_or_create_global_step()) + # Only load feature extractor variables to be consistent with loading from + # a classification checkpoint. + include_patterns = None + if not load_all_detection_checkpoint_vars: + include_patterns = [ + self.first_stage_feature_extractor_scope, + self.second_stage_feature_extractor_scope + ] + feature_extractor_variables = slim.filter_variables( + variables_to_restore, include_patterns=include_patterns) + return {var.op.name: var for var in feature_extractor_variables} + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + update_ops = [] + slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + # Copy the slim ops to avoid modifying the collection + if slim_update_ops: + update_ops.extend(slim_update_ops) + # Passing None to get_updates_for grabs updates that should always be + # executed and don't depend on any model inputs in the graph. + # (E.g. if there was some count that should be incremented every time a + # model is run). + # + # Passing inputs grabs updates that are transitively computed from the + # model inputs being passed in. + # (E.g. a batchnorm update depends on the observed inputs) + if self._feature_extractor_for_proposal_features: + if (self._feature_extractor_for_proposal_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + update_ops.extend( + self._feature_extractor_for_proposal_features.get_updates_for(None)) + update_ops.extend( + self._feature_extractor_for_proposal_features.get_updates_for( + self._feature_extractor_for_proposal_features.inputs)) + if isinstance(self._first_stage_box_predictor_first_conv, + tf.keras.Model): + update_ops.extend( + self._first_stage_box_predictor_first_conv.get_updates_for( + None)) + update_ops.extend( + self._first_stage_box_predictor_first_conv.get_updates_for( + self._first_stage_box_predictor_first_conv.inputs)) + if self._first_stage_box_predictor.is_keras_model: + update_ops.extend( + self._first_stage_box_predictor.get_updates_for(None)) + update_ops.extend( + self._first_stage_box_predictor.get_updates_for( + self._first_stage_box_predictor.inputs)) + if self._feature_extractor_for_box_classifier_features: + if (self._feature_extractor_for_box_classifier_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + update_ops.extend( + self._feature_extractor_for_box_classifier_features.get_updates_for( + None)) + update_ops.extend( + self._feature_extractor_for_box_classifier_features.get_updates_for( + self._feature_extractor_for_box_classifier_features.inputs)) + if self._mask_rcnn_box_predictor: + if self._mask_rcnn_box_predictor.is_keras_model: + update_ops.extend( + self._mask_rcnn_box_predictor.get_updates_for(None)) + update_ops.extend( + self._mask_rcnn_box_predictor.get_updates_for( + self._mask_rcnn_box_predictor.inputs)) + return update_ops diff --git a/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py b/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6c830b32d58cff5756521abc5bfeecacc7118531 --- /dev/null +++ b/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py @@ -0,0 +1,513 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib +from object_detection.utils import test_utils + + +class FasterRCNNMetaArchTest( + faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase, + parameterized.TestCase): + + def test_postprocess_second_stage_only_inference_mode_with_masks(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + mask_height = 2 + mask_width = 2 + mask_predictions = 30. * tf.ones( + [total_num_padded_proposals, model.num_classes, + mask_height, mask_width], dtype=tf.float32) + + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': image_shape, + 'mask_predictions': mask_predictions + }, true_image_shapes) + return (detections['detection_boxes'], + detections['detection_scores'], + detections['detection_classes'], + detections['num_detections'], + detections['detection_masks']) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) + exp_detection_masks = np.array([[[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]]], + [[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[0, 0], [0, 0]]]]) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllClose(detection_scores, + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + self.assertAllClose(detection_masks, exp_detection_masks) + self.assertTrue(np.amax(detection_masks <= 1.0)) + self.assertTrue(np.amin(detection_masks >= 0.0)) + + def test_postprocess_second_stage_only_inference_mode_with_calibration(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6, + calibration_mapping_value=0.5) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + mask_height = 2 + mask_width = 2 + mask_predictions = 30. * tf.ones( + [total_num_padded_proposals, model.num_classes, + mask_height, mask_width], dtype=tf.float32) + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': image_shape, + 'mask_predictions': mask_predictions + }, true_image_shapes) + return (detections['detection_boxes'], + detections['detection_scores'], + detections['detection_classes'], + detections['num_detections'], + detections['detection_masks']) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) + exp_detection_masks = np.array([[[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]]], + [[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[0, 0], [0, 0]]]]) + + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + # All scores map to 0.5, except for the final one, which is pruned. + self.assertAllClose(detection_scores, + [[0.5, 0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5, 0.0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + self.assertAllClose(detection_masks, + exp_detection_masks) + self.assertTrue(np.amax(detection_masks <= 1.0)) + self.assertTrue(np.amin(detection_masks >= 0.0)) + + def test_postprocess_second_stage_only_inference_mode_with_shared_boxes(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + + # This has 1 box instead of one for each class. + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, 1, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': image_shape, + }, true_image_shapes) + return (detections['detection_boxes'], + detections['detection_scores'], + detections['detection_classes'], + detections['num_detections']) + (detection_boxes, detection_scores, detection_classes, + num_detections) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllClose(detection_scores, + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + + @parameterized.parameters( + {'masks_are_class_agnostic': False}, + {'masks_are_class_agnostic': True}, + ) + def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks( + self, masks_are_class_agnostic): + batch_size = 2 + image_size = 10 + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=3, + second_stage_batch_size=2, + predict_masks=True, + masks_are_class_agnostic=masks_are_class_agnostic) + def graph_fn(): + shape = [tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + 3] + image = tf.zeros(shape) + _, true_image_shapes = model.preprocess(image) + detections = model.predict(image, true_image_shapes) + return (detections['detection_boxes'], detections['detection_classes'], + detections['detection_scores'], detections['num_detections'], + detections['detection_masks'], detections['mask_predictions']) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_masks, + mask_predictions) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllEqual(detection_masks.shape, + [2, 5, 14, 14]) + self.assertAllEqual(detection_classes.shape, [2, 5]) + self.assertAllEqual(detection_scores.shape, [2, 5]) + self.assertAllEqual(num_detections.shape, [2]) + num_classes = 1 if masks_are_class_agnostic else 2 + self.assertAllEqual(mask_predictions.shape, + [10, num_classes, 14, 14]) + + def test_raw_detection_boxes_and_anchor_indices_correct(self): + batch_size = 2 + image_size = 10 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=2, + share_box_across_classes=True, + return_raw_detections_during_predict=True) + def graph_fn(): + shape = [tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + 3] + image = tf.zeros(shape) + _, true_image_shapes = model.preprocess(image) + predict_tensor_dict = model.predict(image, true_image_shapes) + detections = model.postprocess(predict_tensor_dict, true_image_shapes) + return (detections['detection_boxes'], + detections['num_detections'], + detections['detection_anchor_indices'], + detections['raw_detection_boxes'], + predict_tensor_dict['raw_detection_boxes']) + (detection_boxes, num_detections, detection_anchor_indices, + raw_detection_boxes, + predict_raw_detection_boxes) = self.execute_cpu(graph_fn, [], graph=g) + + # Verify that the raw detections from predict and postprocess are the + # same. + self.assertAllClose( + np.squeeze(predict_raw_detection_boxes), raw_detection_boxes) + # Verify that the raw detection boxes at detection anchor indices are the + # same as the postprocessed detections. + for i in range(batch_size): + num_detections_per_image = int(num_detections[i]) + detection_boxes_per_image = detection_boxes[i][ + :num_detections_per_image] + detection_anchor_indices_per_image = detection_anchor_indices[i][ + :num_detections_per_image] + raw_detections_per_image = np.squeeze(raw_detection_boxes[i]) + raw_detections_at_anchor_indices = raw_detections_per_image[ + detection_anchor_indices_per_image] + self.assertAllClose(detection_boxes_per_image, + raw_detections_at_anchor_indices) + + @parameterized.parameters( + {'masks_are_class_agnostic': False}, + {'masks_are_class_agnostic': True}, + ) + def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks( + self, masks_are_class_agnostic): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=3, + second_stage_batch_size=7, + predict_masks=True, + masks_are_class_agnostic=masks_are_class_agnostic) + batch_size = 2 + image_size = 10 + max_num_proposals = 7 + def graph_fn(): + image_shape = (batch_size, image_size, image_size, 3) + preprocessed_inputs = tf.zeros(image_shape, dtype=tf.float32) + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32) + ] + groundtruth_classes_list = [ + tf.constant([[1, 0], [0, 1]], dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], dtype=tf.float32) + ] + groundtruth_weights_list = [ + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 1], dtype=tf.float32)] + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_weights_list=groundtruth_weights_list) + + result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes) + return result_tensor_dict['mask_predictions'] + mask_shape_1 = 1 if masks_are_class_agnostic else model._num_classes + mask_out = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(mask_out.shape, + (2 * max_num_proposals, mask_shape_1, 14, 14)) + + def test_postprocess_third_stage_only_inference_mode(self): + batch_size = 2 + initial_crop_size = 3 + maxpool_stride = 1 + height = initial_crop_size // maxpool_stride + width = initial_crop_size // maxpool_stride + depth = 3 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, number_of_stages=3, + second_stage_batch_size=6, predict_masks=True) + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(images_shape, num_proposals, proposal_boxes, + refined_box_encodings, class_predictions_with_background): + _, true_image_shapes = model.preprocess( + tf.zeros(images_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': images_shape, + 'detection_boxes': tf.zeros([2, 5, 4]), + 'detection_masks': tf.zeros([2, 5, 14, 14]), + 'detection_scores': tf.zeros([2, 5]), + 'detection_classes': tf.zeros([2, 5]), + 'num_detections': tf.zeros([2]), + 'detection_features': tf.zeros([2, 5, width, height, depth]) + }, true_image_shapes) + return (detections['detection_boxes'], detections['detection_masks'], + detections['detection_scores'], detections['detection_classes'], + detections['num_detections'], + detections['detection_features']) + images_shape = np.array((2, 36, 48, 3), dtype=np.int32) + proposal_boxes = np.array( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]]) + num_proposals = np.array([3, 2], dtype=np.int32) + refined_box_encodings = np.zeros( + [total_num_padded_proposals, model.num_classes, 4]) + class_predictions_with_background = np.ones( + [total_num_padded_proposals, model.num_classes+1]) + + (detection_boxes, detection_masks, detection_scores, detection_classes, + num_detections, + detection_features) = self.execute_cpu(graph_fn, + [images_shape, num_proposals, + proposal_boxes, + refined_box_encodings, + class_predictions_with_background], + graph=g) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllEqual(detection_masks.shape, [2, 5, 14, 14]) + self.assertAllClose(detection_scores.shape, [2, 5]) + self.assertAllClose(detection_classes.shape, [2, 5]) + self.assertAllClose(num_detections.shape, [2]) + self.assertTrue(np.amax(detection_masks <= 1.0)) + self.assertTrue(np.amin(detection_masks >= 0.0)) + self.assertAllEqual(detection_features.shape, + [2, 5, width, height, depth]) + self.assertGreaterEqual(np.amax(detection_features), 0) + + def _get_box_classifier_features_shape(self, + image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + num_features): + return (batch_size * max_num_proposals, + initial_crop_size // maxpool_stride, + initial_crop_size // maxpool_stride, + num_features) + + def test_output_final_box_features(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + output_final_box_features=True) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant([[[1, 1, 2, 3], [0, 0, 1, 1], + [.5, .5, .6, .6], 4 * [0], 4 * [0], + 4 * [0], 4 * [0], 4 * [0]], + [[2, 3, 6, 8], [1, 2, 5, 3], 4 * [0], + 4 * [0], 4 * [0], 4 * [0], 4 * [0], + 4 * [0]]], + dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes + 1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + mask_height = 2 + mask_width = 2 + mask_predictions = 30. * tf.ones([ + total_num_padded_proposals, model.num_classes, mask_height, mask_width + ], + dtype=tf.float32) + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + rpn_features_to_crop = tf.ones((batch_size, mask_height, mask_width, 3), + tf.float32) + detections = model.postprocess( + { + 'refined_box_encodings': + refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': + num_proposals, + 'proposal_boxes': + proposal_boxes, + 'image_shape': + image_shape, + 'mask_predictions': + mask_predictions, + 'rpn_features_to_crop': + rpn_features_to_crop + }, true_image_shapes) + self.assertIn('detection_features', detections) + return (detections['detection_boxes'], detections['detection_scores'], + detections['detection_classes'], detections['num_detections'], + detections['detection_masks']) + (detection_boxes, detection_scores, detection_classes, num_detections, + detection_masks) = self.execute_cpu(graph_fn, [], graph=g) + exp_detection_masks = np.array([[[[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[1, 1], [1, 1]]], + [[[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[0, 0], [0, 0]]]]) + + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllClose(detection_scores, + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + self.assertAllClose(detection_masks, + exp_detection_masks) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py b/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..beead134d515a1084b4ed1a57f63d601e07a02b2 --- /dev/null +++ b/models/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py @@ -0,0 +1,2008 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.""" +import functools +from absl.testing import parameterized + +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import post_processing_builder +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import target_assigner +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.protos import box_predictor_pb2 +from object_detection.protos import hyperparams_pb2 +from object_detection.protos import post_processing_pb2 +from object_detection.utils import ops +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +BOX_CODE_SIZE = 4 + + +class FakeFasterRCNNFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + reuse_weights=None, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_proposal_features(self, preprocessed_inputs, scope): + with tf.variable_scope('mock_model'): + proposal_features = 0 * slim.conv2d( + preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1') + return proposal_features, {} + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + with tf.variable_scope('mock_model'): + return 0 * slim.conv2d( + proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2') + + +class FakeFasterRCNNKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNKerasFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def get_proposal_feature_extractor_model(self, name): + + class ProposalFeatureExtractor(tf.keras.Model): + """Dummy proposal feature extraction.""" + + def __init__(self, name): + super(ProposalFeatureExtractor, self).__init__(name=name) + self.conv = None + + def build(self, input_shape): + self.conv = tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name='layer1') + + def call(self, inputs): + return self.conv(inputs) + + return ProposalFeatureExtractor(name=name) + + def get_box_classifier_feature_extractor_model(self, name): + return tf.keras.Sequential([tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name=name + '_layer2')]) + + +class FasterRCNNMetaArchTestBase(test_case.TestCase, parameterized.TestCase): + """Base class to test Faster R-CNN and R-FCN meta architectures.""" + + def _build_arg_scope_with_hyperparams(self, + hyperparams_text_proto, + is_training): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.build(hyperparams, is_training=is_training) + + def _build_keras_layer_hyperparams(self, hyperparams_text_proto): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def _get_second_stage_box_predictor_text_proto( + self, share_box_across_classes=False): + share_box_field = 'true' if share_box_across_classes else 'false' + box_predictor_text_proto = """ + mask_rcnn_box_predictor {{ + fc_hyperparams {{ + op: FC + activation: NONE + regularizer {{ + l2_regularizer {{ + weight: 0.0005 + }} + }} + initializer {{ + variance_scaling_initializer {{ + factor: 1.0 + uniform: true + mode: FAN_AVG + }} + }} + }} + share_box_across_classes: {share_box_across_classes} + }} + """.format(share_box_across_classes=share_box_field) + return box_predictor_text_proto + + def _add_mask_to_second_stage_box_predictor_text_proto( + self, masks_are_class_agnostic=False): + agnostic = 'true' if masks_are_class_agnostic else 'false' + box_predictor_text_proto = """ + mask_rcnn_box_predictor { + predict_instance_masks: true + masks_are_class_agnostic: """ + agnostic + """ + mask_height: 14 + mask_width: 14 + conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + } + """ + return box_predictor_text_proto + + def _get_second_stage_box_predictor(self, num_classes, is_training, + predict_masks, masks_are_class_agnostic, + share_box_across_classes=False, + use_keras=False): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(self._get_second_stage_box_predictor_text_proto( + share_box_across_classes), box_predictor_proto) + if predict_masks: + text_format.Merge( + self._add_mask_to_second_stage_box_predictor_text_proto( + masks_are_class_agnostic), + box_predictor_proto) + + if use_keras: + return box_predictor_builder.build_keras( + hyperparams_builder.KerasLayerHyperparams, + inplace_batchnorm_update=False, + freeze_batchnorm=False, + box_predictor_config=box_predictor_proto, + num_classes=num_classes, + num_predictions_per_location_list=None, + is_training=is_training) + else: + return box_predictor_builder.build( + hyperparams_builder.build, + box_predictor_proto, + num_classes=num_classes, + is_training=is_training) + + def _get_model(self, box_predictor, keras_model=False, **common_kwargs): + return faster_rcnn_meta_arch.FasterRCNNMetaArch( + initial_crop_size=3, + maxpool_kernel_size=1, + maxpool_stride=1, + second_stage_mask_rcnn_box_predictor=box_predictor, + **common_kwargs) + + def _build_model(self, + is_training, + number_of_stages, + second_stage_batch_size, + first_stage_max_proposals=8, + num_classes=2, + hard_mining=False, + softmax_second_stage_classification_loss=True, + predict_masks=False, + pad_to_max_dimension=None, + masks_are_class_agnostic=False, + use_matmul_crop_and_resize=False, + clip_anchors_to_image=False, + use_matmul_gather_in_matcher=False, + use_static_shapes=False, + calibration_mapping_value=None, + share_box_across_classes=False, + return_raw_detections_during_predict=False, + output_final_box_features=False): + use_keras = tf_version.is_tf2() + def image_resizer_fn(image, masks=None): + """Fake image resizer function.""" + resized_inputs = [] + resized_image = tf.identity(image) + if pad_to_max_dimension is not None: + resized_image = tf.image.pad_to_bounding_box(image, 0, 0, + pad_to_max_dimension, + pad_to_max_dimension) + resized_inputs.append(resized_image) + if masks is not None: + resized_masks = tf.identity(masks) + if pad_to_max_dimension is not None: + resized_masks = tf.image.pad_to_bounding_box(tf.transpose(masks, + [1, 2, 0]), + 0, 0, + pad_to_max_dimension, + pad_to_max_dimension) + resized_masks = tf.transpose(resized_masks, [2, 0, 1]) + resized_inputs.append(resized_masks) + resized_inputs.append(tf.shape(image)) + return resized_inputs + + # anchors in this test are designed so that a subset of anchors are inside + # the image and a subset of anchors are outside. + first_stage_anchor_scales = (0.001, 0.005, 0.1) + first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0) + first_stage_anchor_strides = (1, 1) + first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator( + first_stage_anchor_scales, + first_stage_anchor_aspect_ratios, + anchor_stride=first_stage_anchor_strides) + first_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'proposal', + use_matmul_gather=use_matmul_gather_in_matcher) + + if use_keras: + fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor() + else: + fake_feature_extractor = FakeFasterRCNNFeatureExtractor() + + first_stage_box_predictor_hyperparams_text_proto = """ + op: CONV + activation: RELU + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + } + } + """ + if use_keras: + first_stage_box_predictor_arg_scope_fn = ( + self._build_keras_layer_hyperparams( + first_stage_box_predictor_hyperparams_text_proto)) + else: + first_stage_box_predictor_arg_scope_fn = ( + self._build_arg_scope_with_hyperparams( + first_stage_box_predictor_hyperparams_text_proto, is_training)) + + first_stage_box_predictor_kernel_size = 3 + first_stage_atrous_rate = 1 + first_stage_box_predictor_depth = 512 + first_stage_minibatch_size = 3 + first_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=0.5, is_static=use_static_shapes) + + first_stage_nms_score_threshold = -1.0 + first_stage_nms_iou_threshold = 1.0 + first_stage_max_proposals = first_stage_max_proposals + first_stage_non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=first_stage_nms_score_threshold, + iou_thresh=first_stage_nms_iou_threshold, + max_size_per_class=first_stage_max_proposals, + max_total_size=first_stage_max_proposals, + use_static_shapes=use_static_shapes) + + first_stage_localization_loss_weight = 1.0 + first_stage_objectness_loss_weight = 1.0 + + post_processing_config = post_processing_pb2.PostProcessing() + post_processing_text_proto = """ + score_converter: IDENTITY + batch_non_max_suppression { + score_threshold: -20.0 + iou_threshold: 1.0 + max_detections_per_class: 5 + max_total_detections: 5 + use_static_shapes: """ +'{}'.format(use_static_shapes) + """ + } + """ + if calibration_mapping_value: + calibration_text_proto = """ + calibration_config { + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: %f + } + x_y_pair { + x: 1.0 + y: %f + }}}}""" % (calibration_mapping_value, calibration_mapping_value) + post_processing_text_proto = (post_processing_text_proto + + ' ' + calibration_text_proto) + text_format.Merge(post_processing_text_proto, post_processing_config) + second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = ( + post_processing_builder.build(post_processing_config)) + + second_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', 'detection', + use_matmul_gather=use_matmul_gather_in_matcher) + second_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=1.0, is_static=use_static_shapes) + + second_stage_localization_loss_weight = 1.0 + second_stage_classification_loss_weight = 1.0 + if softmax_second_stage_classification_loss: + second_stage_classification_loss = ( + losses.WeightedSoftmaxClassificationLoss()) + else: + second_stage_classification_loss = ( + losses.WeightedSigmoidClassificationLoss()) + + hard_example_miner = None + if hard_mining: + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=1, + iou_threshold=0.99, + loss_type='both', + cls_loss_weight=second_stage_classification_loss_weight, + loc_loss_weight=second_stage_localization_loss_weight, + max_negatives_per_positive=None) + + crop_and_resize_fn = ( + ops.matmul_crop_and_resize + if use_matmul_crop_and_resize else ops.native_crop_and_resize) + common_kwargs = { + 'is_training': + is_training, + 'num_classes': + num_classes, + 'image_resizer_fn': + image_resizer_fn, + 'feature_extractor': + fake_feature_extractor, + 'number_of_stages': + number_of_stages, + 'first_stage_anchor_generator': + first_stage_anchor_generator, + 'first_stage_target_assigner': + first_stage_target_assigner, + 'first_stage_atrous_rate': + first_stage_atrous_rate, + 'first_stage_box_predictor_arg_scope_fn': + first_stage_box_predictor_arg_scope_fn, + 'first_stage_box_predictor_kernel_size': + first_stage_box_predictor_kernel_size, + 'first_stage_box_predictor_depth': + first_stage_box_predictor_depth, + 'first_stage_minibatch_size': + first_stage_minibatch_size, + 'first_stage_sampler': + first_stage_sampler, + 'first_stage_non_max_suppression_fn': + first_stage_non_max_suppression_fn, + 'first_stage_max_proposals': + first_stage_max_proposals, + 'first_stage_localization_loss_weight': + first_stage_localization_loss_weight, + 'first_stage_objectness_loss_weight': + first_stage_objectness_loss_weight, + 'second_stage_target_assigner': + second_stage_target_assigner, + 'second_stage_batch_size': + second_stage_batch_size, + 'second_stage_sampler': + second_stage_sampler, + 'second_stage_non_max_suppression_fn': + second_stage_non_max_suppression_fn, + 'second_stage_score_conversion_fn': + second_stage_score_conversion_fn, + 'second_stage_localization_loss_weight': + second_stage_localization_loss_weight, + 'second_stage_classification_loss_weight': + second_stage_classification_loss_weight, + 'second_stage_classification_loss': + second_stage_classification_loss, + 'hard_example_miner': + hard_example_miner, + 'crop_and_resize_fn': + crop_and_resize_fn, + 'clip_anchors_to_image': + clip_anchors_to_image, + 'use_static_shapes': + use_static_shapes, + 'resize_masks': + True, + 'return_raw_detections_during_predict': + return_raw_detections_during_predict, + 'output_final_box_features': + output_final_box_features + } + + return self._get_model( + self._get_second_stage_box_predictor( + num_classes=num_classes, + is_training=is_training, + use_keras=use_keras, + predict_masks=predict_masks, + masks_are_class_agnostic=masks_are_class_agnostic, + share_box_across_classes=share_box_across_classes), **common_kwargs) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only( + self, use_static_shapes=False): + batch_size = 2 + height = 10 + width = 12 + input_image_shape = (batch_size, height, width, 3) + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=1, + second_stage_batch_size=2, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + def graph_fn(images): + """Function to construct tf graph for the test.""" + + preprocessed_inputs, true_image_shapes = model.preprocess(images) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (prediction_dict['rpn_box_predictor_features'], + prediction_dict['rpn_features_to_crop'], + prediction_dict['image_shape'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors']) + + images = np.zeros(input_image_shape, dtype=np.float32) + + # In inference mode, anchors are clipped to the image window, but not + # pruned. Since MockFasterRCNN.extract_proposal_features returns a + # tensor with the same shape as its input, the expected number of anchors + # is height * width * the number of anchors per location (i.e. 3x3). + expected_num_anchors = height * width * 3 * 3 + expected_output_shapes = { + 'rpn_box_predictor_features': (batch_size, height, width, 512), + 'rpn_features_to_crop': (batch_size, height, width, 3), + 'rpn_box_encodings': (batch_size, expected_num_anchors, 4), + 'rpn_objectness_predictions_with_background': + (batch_size, expected_num_anchors, 2), + 'anchors': (expected_num_anchors, 4) + } + + if use_static_shapes: + results = self.execute(graph_fn, [images], graph=g) + else: + results = self.execute_cpu(graph_fn, [images], graph=g) + + self.assertAllEqual(results[0].shape, + expected_output_shapes['rpn_box_predictor_features']) + self.assertAllEqual(results[1].shape, + expected_output_shapes['rpn_features_to_crop']) + self.assertAllEqual(results[2], + input_image_shape) + self.assertAllEqual(results[3].shape, + expected_output_shapes['rpn_box_encodings']) + self.assertAllEqual( + results[4].shape, + expected_output_shapes['rpn_objectness_predictions_with_background']) + self.assertAllEqual(results[5].shape, + expected_output_shapes['anchors']) + + # Check that anchors are clipped to window. + anchors = results[5] + self.assertTrue(np.all(np.greater_equal(anchors, 0))) + self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) + self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) + + def test_regularization_losses(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, number_of_stages=1, second_stage_batch_size=2) + def graph_fn(): + batch_size = 2 + height = 10 + width = 12 + input_image_shape = (batch_size, height, width, 3) + image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape)) + model.predict(image, true_image_shapes) + + reg_losses = tf.math.add_n(model.regularization_losses()) + return reg_losses + reg_losses = self.execute(graph_fn, [], graph=g) + self.assertGreaterEqual(reg_losses, 0) + + def test_predict_gives_valid_anchors_in_training_mode_first_stage_only(self): + expected_output_keys = set([ + 'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape', + 'rpn_box_encodings', 'rpn_objectness_predictions_with_background', + 'anchors', 'feature_maps']) + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, number_of_stages=1, second_stage_batch_size=2,) + + batch_size = 2 + height = 10 + width = 12 + input_image_shape = (batch_size, height, width, 3) + def graph_fn(): + image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape)) + prediction_dict = model.predict(image, true_image_shapes) + self.assertEqual(set(prediction_dict.keys()), expected_output_keys) + return (prediction_dict['image_shape'], prediction_dict['anchors'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background']) + + (image_shape, anchors, rpn_box_encodings, + rpn_objectness_predictions_with_background) = self.execute(graph_fn, [], + graph=g) + # At training time, anchors that exceed image bounds are pruned. Thus + # the `expected_num_anchors` in the above inference mode test is now + # a strict upper bound on the number of anchors. + num_anchors_strict_upper_bound = height * width * 3 * 3 + self.assertAllEqual(image_shape, input_image_shape) + self.assertTrue(len(anchors.shape) == 2 and anchors.shape[1] == 4) + num_anchors_out = anchors.shape[0] + self.assertLess(num_anchors_out, num_anchors_strict_upper_bound) + + self.assertTrue(np.all(np.greater_equal(anchors, 0))) + self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) + self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) + + self.assertAllEqual(rpn_box_encodings.shape, + (batch_size, num_anchors_out, 4)) + self.assertAllEqual( + rpn_objectness_predictions_with_background.shape, + (batch_size, num_anchors_out, 2)) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_correct_shapes_in_inference_mode_two_stages( + self, use_static_shapes): + + def compare_results(results, expected_output_shapes): + """Checks if the shape of the predictions are as expected.""" + self.assertAllEqual(results[0].shape, + expected_output_shapes['rpn_box_predictor_features']) + self.assertAllEqual(results[1].shape, + expected_output_shapes['rpn_features_to_crop']) + self.assertAllEqual(results[2].shape, + expected_output_shapes['image_shape']) + self.assertAllEqual(results[3].shape, + expected_output_shapes['rpn_box_encodings']) + self.assertAllEqual( + results[4].shape, + expected_output_shapes['rpn_objectness_predictions_with_background']) + self.assertAllEqual(results[5].shape, + expected_output_shapes['anchors']) + self.assertAllEqual(results[6].shape, + expected_output_shapes['refined_box_encodings']) + self.assertAllEqual( + results[7].shape, + expected_output_shapes['class_predictions_with_background']) + self.assertAllEqual(results[8].shape, + expected_output_shapes['num_proposals']) + self.assertAllEqual(results[9].shape, + expected_output_shapes['proposal_boxes']) + self.assertAllEqual(results[10].shape, + expected_output_shapes['proposal_boxes_normalized']) + self.assertAllEqual(results[11].shape, + expected_output_shapes['box_classifier_features']) + self.assertAllEqual(results[12].shape, + expected_output_shapes['final_anchors']) + batch_size = 2 + image_size = 10 + max_num_proposals = 8 + initial_crop_size = 3 + maxpool_stride = 1 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=2, + predict_masks=False, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + def graph_fn(): + """A function with TF compute.""" + if use_static_shapes: + images = tf.random_uniform((batch_size, image_size, image_size, 3)) + else: + images = tf.random_uniform((tf.random_uniform([], + minval=batch_size, + maxval=batch_size + 1, + dtype=tf.int32), + tf.random_uniform([], + minval=image_size, + maxval=image_size + 1, + dtype=tf.int32), + tf.random_uniform([], + minval=image_size, + maxval=image_size + 1, + dtype=tf.int32), 3)) + preprocessed_inputs, true_image_shapes = model.preprocess(images) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (prediction_dict['rpn_box_predictor_features'], + prediction_dict['rpn_features_to_crop'], + prediction_dict['image_shape'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors'], + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['num_proposals'], + prediction_dict['proposal_boxes'], + prediction_dict['proposal_boxes_normalized'], + prediction_dict['box_classifier_features'], + prediction_dict['final_anchors']) + expected_num_anchors = image_size * image_size * 3 * 3 + expected_shapes = { + 'rpn_box_predictor_features': + (2, image_size, image_size, 512), + 'rpn_features_to_crop': (2, image_size, image_size, 3), + 'image_shape': (4,), + 'rpn_box_encodings': (2, expected_num_anchors, 4), + 'rpn_objectness_predictions_with_background': + (2, expected_num_anchors, 2), + 'anchors': (expected_num_anchors, 4), + 'refined_box_encodings': (2 * max_num_proposals, 2, 4), + 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1), + 'num_proposals': (2,), + 'proposal_boxes': (2, max_num_proposals, 4), + 'proposal_boxes_normalized': (2, max_num_proposals, 4), + 'box_classifier_features': + self._get_box_classifier_features_shape(image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + 3), + 'feature_maps': [(2, image_size, image_size, 512)], + 'final_anchors': (2, max_num_proposals, 4) + } + + if use_static_shapes: + results = self.execute(graph_fn, [], graph=g) + else: + results = self.execute_cpu(graph_fn, [], graph=g) + compare_results(results, expected_shapes) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_gives_correct_shapes_in_train_mode_both_stages( + self, + use_static_shapes=False): + batch_size = 2 + image_size = 10 + max_num_proposals = 7 + initial_crop_size = 3 + maxpool_stride = 1 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, + second_stage_batch_size=7, + predict_masks=False, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + + def graph_fn(images, gt_boxes, gt_classes, gt_weights): + """Function to construct tf graph for the test.""" + preprocessed_inputs, true_image_shapes = model.preprocess(images) + model.provide_groundtruth( + groundtruth_boxes_list=tf.unstack(gt_boxes), + groundtruth_classes_list=tf.unstack(gt_classes), + groundtruth_weights_list=tf.unstack(gt_weights)) + result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (result_tensor_dict['refined_box_encodings'], + result_tensor_dict['class_predictions_with_background'], + result_tensor_dict['proposal_boxes'], + result_tensor_dict['proposal_boxes_normalized'], + result_tensor_dict['anchors'], + result_tensor_dict['rpn_box_encodings'], + result_tensor_dict['rpn_objectness_predictions_with_background'], + result_tensor_dict['rpn_features_to_crop'], + result_tensor_dict['rpn_box_predictor_features'], + result_tensor_dict['final_anchors'], + ) + + image_shape = (batch_size, image_size, image_size, 3) + images = np.zeros(image_shape, dtype=np.float32) + gt_boxes = np.stack([ + np.array([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=np.float32), + np.array([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=np.float32) + ]) + gt_classes = np.stack([ + np.array([[1, 0], [0, 1]], dtype=np.float32), + np.array([[1, 0], [1, 0]], dtype=np.float32) + ]) + gt_weights = np.stack([ + np.array([1, 1], dtype=np.float32), + np.array([1, 1], dtype=np.float32) + ]) + if use_static_shapes: + results = self.execute(graph_fn, + [images, gt_boxes, gt_classes, gt_weights], + graph=g) + else: + results = self.execute_cpu(graph_fn, + [images, gt_boxes, gt_classes, gt_weights], + graph=g) + + expected_shapes = { + 'rpn_box_predictor_features': (2, image_size, image_size, 512), + 'rpn_features_to_crop': (2, image_size, image_size, 3), + 'refined_box_encodings': (2 * max_num_proposals, 2, 4), + 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1), + 'proposal_boxes': (2, max_num_proposals, 4), + 'rpn_box_encodings': (2, image_size * image_size * 9, 4), + 'proposal_boxes_normalized': (2, max_num_proposals, 4), + 'box_classifier_features': + self._get_box_classifier_features_shape( + image_size, batch_size, max_num_proposals, initial_crop_size, + maxpool_stride, 3), + 'rpn_objectness_predictions_with_background': + (2, image_size * image_size * 9, 2), + 'final_anchors': (2, max_num_proposals, 4) + } + # TODO(rathodv): Possibly change utils/test_case.py to accept dictionaries + # and return dicionaries so don't have to rely on the order of tensors. + self.assertAllEqual(results[0].shape, + expected_shapes['refined_box_encodings']) + self.assertAllEqual(results[1].shape, + expected_shapes['class_predictions_with_background']) + self.assertAllEqual(results[2].shape, expected_shapes['proposal_boxes']) + self.assertAllEqual(results[3].shape, + expected_shapes['proposal_boxes_normalized']) + anchors_shape = results[4].shape + self.assertAllEqual(results[5].shape, + [batch_size, anchors_shape[0], 4]) + self.assertAllEqual(results[6].shape, + [batch_size, anchors_shape[0], 2]) + self.assertAllEqual(results[7].shape, + expected_shapes['rpn_features_to_crop']) + self.assertAllEqual(results[8].shape, + expected_shapes['rpn_box_predictor_features']) + self.assertAllEqual(results[9].shape, + expected_shapes['final_anchors']) + + @parameterized.parameters( + {'use_static_shapes': False, 'pad_to_max_dimension': None}, + {'use_static_shapes': True, 'pad_to_max_dimension': None}, + {'use_static_shapes': False, 'pad_to_max_dimension': 56,}, + {'use_static_shapes': True, 'pad_to_max_dimension': 56}, + ) + def test_postprocess_first_stage_only_inference_mode( + self, use_static_shapes=False, + pad_to_max_dimension=None): + batch_size = 2 + first_stage_max_proposals = 4 if use_static_shapes else 8 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=1, second_stage_batch_size=6, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes, + use_matmul_gather_in_matcher=use_static_shapes, + first_stage_max_proposals=first_stage_max_proposals, + pad_to_max_dimension=pad_to_max_dimension) + + def graph_fn(images, + rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, + anchors): + """Function to construct tf graph for the test.""" + preprocessed_images, true_image_shapes = model.preprocess(images) + proposals = model.postprocess({ + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'rpn_features_to_crop': rpn_features_to_crop, + 'image_shape': tf.shape(preprocessed_images), + 'anchors': anchors}, true_image_shapes) + return (proposals['num_detections'], proposals['detection_boxes'], + proposals['detection_scores'], proposals['raw_detection_boxes'], + proposals['raw_detection_scores']) + + anchors = np.array( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=np.float32) + rpn_box_encodings = np.zeros( + (batch_size, anchors.shape[0], BOX_CODE_SIZE), dtype=np.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = np.array([ + [[-10, 13], + [10, -10], + [10, -11], + [-10, 12]], + [[10, -10], + [-10, 13], + [-10, 12], + [10, -11]]], dtype=np.float32) + rpn_features_to_crop = np.ones((batch_size, 8, 8, 10), dtype=np.float32) + image_shape = (batch_size, 32, 32, 3) + images = np.zeros(image_shape, dtype=np.float32) + + if use_static_shapes: + results = self.execute(graph_fn, + [images, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, anchors], graph=g) + else: + results = self.execute_cpu(graph_fn, + [images, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, anchors], graph=g) + + expected_proposal_boxes = [ + [[0, 0, .5, .5], [.5, .5, 1, 1], [0, .5, .5, 1], [.5, 0, 1.0, .5]] + + 4 * [4 * [0]], + [[0, .5, .5, 1], [.5, 0, 1.0, .5], [0, 0, .5, .5], [.5, .5, 1, 1]] + + 4 * [4 * [0]]] + expected_proposal_scores = [[1, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0]] + expected_num_proposals = [4, 4] + expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]], + [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]] + expected_raw_scores = [[[0., 1.], [1., 0.], [1., 0.], [0., 1.]], + [[1., 0.], [0., 1.], [0., 1.], [1., 0.]]] + + if pad_to_max_dimension is not None: + expected_raw_proposal_boxes = (np.array(expected_raw_proposal_boxes) * + 32 / pad_to_max_dimension) + expected_proposal_boxes = (np.array(expected_proposal_boxes) * + 32 / pad_to_max_dimension) + + self.assertAllClose(results[0], expected_num_proposals) + for indx, num_proposals in enumerate(expected_num_proposals): + self.assertAllClose(results[1][indx][0:num_proposals], + expected_proposal_boxes[indx][0:num_proposals]) + self.assertAllClose(results[2][indx][0:num_proposals], + expected_proposal_scores[indx][0:num_proposals]) + self.assertAllClose(results[3], expected_raw_proposal_boxes) + self.assertAllClose(results[4], expected_raw_scores) + + def _test_postprocess_first_stage_only_train_mode(self, + pad_to_max_dimension=None): + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=1, second_stage_batch_size=2, + pad_to_max_dimension=pad_to_max_dimension) + batch_size = 2 + + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [-10, 12], + [-10, 11], + [-10, 10]], + [[-10, 13], + [-10, 12], + [-10, 11], + [-10, 10]]], dtype=tf.float32) + rpn_features_to_crop = tf.ones((batch_size, 8, 8, 10), dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], + dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], + dtype=tf.float32)] + groundtruth_weights_list = [ + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 1], dtype=tf.float32) + ] + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_weights_list=groundtruth_weights_list) + proposals = model.postprocess({ + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'rpn_features_to_crop': rpn_features_to_crop, + 'anchors': anchors, + 'image_shape': image_shape}, true_image_shapes) + return (proposals['detection_boxes'], proposals['detection_scores'], + proposals['num_detections'], + proposals['detection_multiclass_scores'], + proposals['raw_detection_boxes'], + proposals['raw_detection_scores']) + + expected_proposal_boxes = [ + [[0, 0, .5, .5], [.5, .5, 1, 1]], [[0, .5, .5, 1], [.5, 0, 1, .5]]] + expected_proposal_scores = [[1, 1], + [1, 1]] + expected_proposal_multiclass_scores = [[[0., 1.], [0., 1.]], + [[0., 1.], [0., 1.]]] + expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]], + [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]] + expected_raw_scores = [[[0., 1.], [0., 1.], [0., 1.], [0., 1.]], + [[0., 1.], [0., 1.], [0., 1.], [0., 1.]]] + + (proposal_boxes, proposal_scores, batch_num_detections, + batch_multiclass_scores, raw_detection_boxes, + raw_detection_scores) = self.execute_cpu(graph_fn, [], graph=g) + for image_idx in range(batch_size): + num_detections = int(batch_num_detections[image_idx]) + boxes = proposal_boxes[image_idx][:num_detections, :].tolist() + scores = proposal_scores[image_idx][:num_detections].tolist() + multiclass_scores = batch_multiclass_scores[ + image_idx][:num_detections, :].tolist() + expected_boxes = expected_proposal_boxes[image_idx] + expected_scores = expected_proposal_scores[image_idx] + expected_multiclass_scores = expected_proposal_multiclass_scores[ + image_idx] + self.assertTrue( + test_utils.first_rows_close_as_set(boxes, expected_boxes)) + self.assertTrue( + test_utils.first_rows_close_as_set(scores, expected_scores)) + self.assertTrue( + test_utils.first_rows_close_as_set(multiclass_scores, + expected_multiclass_scores)) + + self.assertAllClose(raw_detection_boxes, expected_raw_proposal_boxes) + self.assertAllClose(raw_detection_scores, expected_raw_scores) + + @parameterized.parameters( + {'pad_to_max_dimension': 56}, + {'pad_to_max_dimension': None} + ) + def test_postprocess_first_stage_only_train_mode_padded_image( + self, pad_to_max_dimension): + self._test_postprocess_first_stage_only_train_mode(pad_to_max_dimension) + + @parameterized.parameters( + {'use_static_shapes': False, 'pad_to_max_dimension': None}, + {'use_static_shapes': True, 'pad_to_max_dimension': None}, + {'use_static_shapes': False, 'pad_to_max_dimension': 56}, + {'use_static_shapes': True, 'pad_to_max_dimension': 56}, + ) + def test_postprocess_second_stage_only_inference_mode( + self, use_static_shapes=False, + pad_to_max_dimension=None): + batch_size = 2 + num_classes = 2 + image_shape = np.array((2, 36, 48, 3), dtype=np.int32) + first_stage_max_proposals = 8 + total_num_padded_proposals = batch_size * first_stage_max_proposals + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes, + use_matmul_gather_in_matcher=use_static_shapes, + pad_to_max_dimension=pad_to_max_dimension) + def graph_fn(images, + refined_box_encodings, + class_predictions_with_background, + num_proposals, + proposal_boxes): + """Function to construct tf graph for the test.""" + _, true_image_shapes = model.preprocess(images) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + }, true_image_shapes) + return (detections['num_detections'], detections['detection_boxes'], + detections['detection_scores'], detections['detection_classes'], + detections['raw_detection_boxes'], + detections['raw_detection_scores'], + detections['detection_multiclass_scores'], + detections['detection_anchor_indices']) + + proposal_boxes = np.array( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=np.float32) + + num_proposals = np.array([3, 2], dtype=np.int32) + refined_box_encodings = np.zeros( + [total_num_padded_proposals, num_classes, 4], dtype=np.float32) + class_predictions_with_background = np.ones( + [total_num_padded_proposals, num_classes+1], dtype=np.float32) + images = np.zeros(image_shape, dtype=np.float32) + + if use_static_shapes: + results = self.execute(graph_fn, + [images, refined_box_encodings, + class_predictions_with_background, + num_proposals, proposal_boxes], graph=g) + else: + results = self.execute_cpu(graph_fn, + [images, refined_box_encodings, + class_predictions_with_background, + num_proposals, proposal_boxes], graph=g) + # Note that max_total_detections=5 in the NMS config. + expected_num_detections = [5, 4] + expected_detection_classes = [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]] + expected_detection_scores = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]] + expected_multiclass_scores = [[[1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]], + [[1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [0, 0, 0]]] + # Note that a single anchor can be used for multiple detections (predictions + # are made independently per class). + expected_anchor_indices = [[0, 1, 2, 0, 1], + [0, 1, 0, 1]] + + h = float(image_shape[1]) + w = float(image_shape[2]) + expected_raw_detection_boxes = np.array( + [[[1 / h, 1 / w, 2 / h, 3 / w], [0, 0, 1 / h, 1 / w], + [.5 / h, .5 / w, .6 / h, .6 / w], 4 * [0], 4 * [0], 4 * [0], 4 * [0], + 4 * [0]], + [[2 / h, 3 / w, 6 / h, 8 / w], [1 / h, 2 / w, 5 / h, 3 / w], 4 * [0], + 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]]], + dtype=np.float32) + + self.assertAllClose(results[0], expected_num_detections) + + for indx, num_proposals in enumerate(expected_num_detections): + self.assertAllClose(results[2][indx][0:num_proposals], + expected_detection_scores[indx][0:num_proposals]) + self.assertAllClose(results[3][indx][0:num_proposals], + expected_detection_classes[indx][0:num_proposals]) + self.assertAllClose(results[6][indx][0:num_proposals], + expected_multiclass_scores[indx][0:num_proposals]) + self.assertAllClose(results[7][indx][0:num_proposals], + expected_anchor_indices[indx][0:num_proposals]) + + self.assertAllClose(results[4], expected_raw_detection_boxes) + self.assertAllClose(results[5], + class_predictions_with_background.reshape([-1, 8, 3])) + if not use_static_shapes: + self.assertAllEqual(results[1].shape, [2, 5, 4]) + + def test_preprocess_preserves_dynamic_input_shapes(self): + width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, 5, width, 3]) + image = tf.random.uniform(shape) + model = self._build_model( + is_training=False, number_of_stages=2, second_stage_batch_size=6) + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue( + preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3])) + + def test_preprocess_preserves_static_input_shapes(self): + shape = tf.stack([2, 5, 5, 3]) + image = tf.random.uniform(shape) + model = self._build_model( + is_training=False, number_of_stages=2, second_stage_batch_size=6) + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue( + preprocessed_inputs.shape.is_compatible_with([2, 5, 5, 3])) + + # TODO(rathodv): Split test into two - with and without masks. + def test_loss_first_stage_only_mode(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=1, second_stage_batch_size=6) + batch_size = 2 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [10, -10], + [10, -11], + [-10, 12]], + [[10, -10], + [-10, 13], + [-10, 12], + [10, -11]]], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], + dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + self.assertNotIn('Loss/BoxClassifierLoss/localization_loss', + loss_dict) + self.assertNotIn('Loss/BoxClassifierLoss/classification_loss', + loss_dict) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss']) + loc_loss, obj_loss = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(loc_loss, 0) + self.assertAllClose(obj_loss, 0) + + # TODO(rathodv): Split test into two - with and without masks. + def test_loss_full(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, second_stage_batch_size=6) + batch_size = 3 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant( + [[[-10, 13], [10, -10], [10, -11], [-10, 12]], + [[10, -10], [-10, 13], [-10, 12], [10, -11]], + [[10, -10], [-10, 13], [-10, 12], [10, -11]]], + dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + num_proposals = tf.constant([6, 6, 6], dtype=tf.int32) + proposal_boxes = tf.constant( + 3 * [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], + [16, 16, 32, 32], [0, 0, 16, 16], [0, 16, 16, 32]]], + dtype=tf.float32) + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [ + [-10, 10, -10], # first image + [10, -10, -10], + [10, -10, -10], + [-10, -10, 10], + [-10, 10, -10], + [10, -10, -10], + [10, -10, -10], # second image + [-10, 10, -10], + [-10, 10, -10], + [10, -10, -10], + [10, -10, -10], + [-10, 10, -10], + [10, -10, -10], # third image + [-10, 10, -10], + [-10, 10, -10], + [10, -10, -10], + [10, -10, -10], + [-10, 10, -10] + ], + dtype=tf.float32) + + mask_predictions_logits = 20 * tf.ones((batch_size * + model.max_num_proposals, + model.num_classes, + 14, 14), + dtype=tf.float32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, 1]], dtype=tf.float32) + ] + groundtruth_classes_list = [ + tf.constant([[1, 0], [0, 1]], dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], dtype=tf.float32), + tf.constant([[1, 0], [0, 1]], dtype=tf.float32) + ] + + # Set all elements of groundtruth mask to 1.0. In this case all proposal + # crops of the groundtruth masks should return a mask that covers the + # entire proposal. Thus, if mask_predictions_logits element values are all + # greater than 20, the loss should be zero. + groundtruth_masks_list = [ + tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32), + tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32), + tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32) + ] + groundtruth_weights_list = [ + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 0], dtype=tf.float32) + ] + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals, + 'mask_predictions': mask_predictions_logits + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list, + groundtruth_weights_list=groundtruth_weights_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss'], + loss_dict['Loss/BoxClassifierLoss/mask_loss']) + (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, + box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(rpn_loc_loss, 0) + self.assertAllClose(rpn_obj_loss, 0) + self.assertAllClose(box_loc_loss, 0) + self.assertAllClose(box_cls_loss, 0) + self.assertAllClose(box_mask_loss, 0) + + def test_loss_full_zero_padded_proposals(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, number_of_stages=2, second_stage_batch_size=6) + batch_size = 1 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [10, -10], + [10, -11], + [10, -12]],], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([3], dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-10, 10, -10], + [10, -10, -10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + mask_predictions_logits = 20 * tf.ones((batch_size * + model.max_num_proposals, + model.num_classes, + 14, 14), + dtype=tf.float32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5]], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0]], dtype=tf.float32)] + + # Set all elements of groundtruth mask to 1.0. In this case all proposal + # crops of the groundtruth masks should return a mask that covers the + # entire proposal. Thus, if mask_predictions_logits element values are all + # greater than 20, the loss should be zero. + groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)), + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals, + 'mask_predictions': mask_predictions_logits + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss'], + loss_dict['Loss/BoxClassifierLoss/mask_loss']) + (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, + box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(rpn_loc_loss, 0) + self.assertAllClose(rpn_obj_loss, 0) + self.assertAllClose(box_loc_loss, 0) + self.assertAllClose(box_cls_loss, 0) + self.assertAllClose(box_mask_loss, 0) + + def test_loss_full_multiple_label_groundtruth(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, second_stage_batch_size=6, + softmax_second_stage_classification_loss=False) + batch_size = 1 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [10, -10], + [10, -11], + [10, -12]],], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([3], dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + # second_stage_localization_loss should only be computed for predictions + # that match groundtruth. For multiple label groundtruth boxes, the loss + # should only be computed once for the label with the smaller index. + refined_box_encodings = tf.constant( + [[[0, 0, 0, 0], [1, 1, -1, -1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]]], dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-100, 100, 100], + [100, -100, -100], + [100, -100, -100], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + mask_predictions_logits = 20 * tf.ones((batch_size * + model.max_num_proposals, + model.num_classes, + 14, 14), + dtype=tf.float32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5]], dtype=tf.float32)] + # Box contains two ground truth labels. + groundtruth_classes_list = [tf.constant([[1, 1]], dtype=tf.float32)] + + # Set all elements of groundtruth mask to 1.0. In this case all proposal + # crops of the groundtruth masks should return a mask that covers the + # entire proposal. Thus, if mask_predictions_logits element values are all + # greater than 20, the loss should be zero. + groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)), + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals, + 'mask_predictions': mask_predictions_logits + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss'], + loss_dict['Loss/BoxClassifierLoss/mask_loss']) + (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, + box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(rpn_loc_loss, 0) + self.assertAllClose(rpn_obj_loss, 0) + self.assertAllClose(box_loc_loss, 0) + self.assertAllClose(box_cls_loss, 0) + self.assertAllClose(box_mask_loss, 0) + + @parameterized.parameters( + {'use_static_shapes': False, 'shared_boxes': False}, + {'use_static_shapes': False, 'shared_boxes': True}, + {'use_static_shapes': True, 'shared_boxes': False}, + {'use_static_shapes': True, 'shared_boxes': True}, + ) + def test_loss_full_zero_padded_proposals_nonzero_loss_with_two_images( + self, use_static_shapes=False, shared_boxes=False): + batch_size = 2 + first_stage_max_proposals = 8 + second_stage_batch_size = 6 + num_classes = 2 + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, + second_stage_batch_size=second_stage_batch_size, + first_stage_max_proposals=first_stage_max_proposals, + num_classes=num_classes, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + + def graph_fn(anchors, rpn_box_encodings, + rpn_objectness_predictions_with_background, images, + num_proposals, proposal_boxes, refined_box_encodings, + class_predictions_with_background, groundtruth_boxes, + groundtruth_classes): + """Function to construct tf graph for the test.""" + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': tf.shape(images), + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals + } + _, true_image_shapes = model.preprocess(images) + model.provide_groundtruth(tf.unstack(groundtruth_boxes), + tf.unstack(groundtruth_classes)) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss']) + + anchors = np.array( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=np.float32) + rpn_box_encodings = np.zeros( + [batch_size, anchors.shape[1], BOX_CODE_SIZE], dtype=np.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = np.array( + [[[-10, 13], + [10, -10], + [10, -11], + [10, -12]], + [[-10, 13], + [10, -10], + [10, -11], + [10, -12]]], dtype=np.float32) + images = np.zeros([batch_size, 32, 32, 3], dtype=np.float32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer. + num_proposals = np.array([3, 2], dtype=np.int32) + proposal_boxes = np.array( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 16, 16], + [0, 16, 16, 32], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=np.float32) + + refined_box_encodings = np.zeros( + (batch_size * second_stage_batch_size, 1 + if shared_boxes else num_classes, BOX_CODE_SIZE), + dtype=np.float32) + class_predictions_with_background = np.array( + [[-10, 10, -10], # first image + [10, -10, -10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0], + [-10, -10, 10], # second image + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0], + [0, 0, 0],], dtype=np.float32) + + # The first groundtruth box is 4/5 of the anchor size in both directions + # experiencing a loss of: + # 2 * SmoothL1(5 * log(4/5)) / num_proposals + # = 2 * (abs(5 * log(1/2)) - .5) / 3 + # The second groundtruth box is identical to the prediction and thus + # experiences zero loss. + # Total average loss is (abs(5 * log(1/2)) - .5) / 3. + groundtruth_boxes = np.stack([ + np.array([[0.05, 0.05, 0.45, 0.45]], dtype=np.float32), + np.array([[0.0, 0.0, 0.5, 0.5]], dtype=np.float32)]) + groundtruth_classes = np.stack([np.array([[1, 0]], dtype=np.float32), + np.array([[0, 1]], dtype=np.float32)]) + + execute_fn = self.execute_cpu + if use_static_shapes: + execute_fn = self.execute + + results = execute_fn(graph_fn, [ + anchors, rpn_box_encodings, rpn_objectness_predictions_with_background, + images, num_proposals, proposal_boxes, refined_box_encodings, + class_predictions_with_background, groundtruth_boxes, + groundtruth_classes + ], graph=g) + + exp_loc_loss = (-5 * np.log(.8) - 0.5) / 3.0 + + self.assertAllClose(results[0], exp_loc_loss, rtol=1e-4, atol=1e-4) + self.assertAllClose(results[1], 0.0) + self.assertAllClose(results[2], exp_loc_loss, rtol=1e-4, atol=1e-4) + self.assertAllClose(results[3], 0.0) + + def test_loss_with_hard_mining(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model(is_training=True, + number_of_stages=2, + second_stage_batch_size=None, + first_stage_max_proposals=6, + hard_mining=True) + batch_size = 1 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant( + [[[-10, 13], + [-10, 12], + [10, -11], + [10, -12]]], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([3], dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-10, 10, -10], # first image + [-10, -10, 10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + # The first groundtruth box is 4/5 of the anchor size in both directions + # experiencing a loss of: + # 2 * SmoothL1(5 * log(4/5)) / num_proposals + # = 2 * (abs(5 * log(1/2)) - .5) / 3 + # The second groundtruth box is 46/50 of the anchor size in both + # directions experiencing a loss of: + # 2 * SmoothL1(5 * log(42/50)) / num_proposals + # = 2 * (.5(5 * log(.92))^2 - .5) / 3. + # Since the first groundtruth box experiences greater loss, and we have + # set num_hard_examples=1 in the HardMiner, the final localization loss + # corresponds to that of the first groundtruth box. + groundtruth_boxes_list = [ + tf.constant([[0.05, 0.05, 0.45, 0.45], + [0.02, 0.52, 0.48, 0.98],], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss']) + loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g) + exp_loc_loss = 2 * (-5 * np.log(.8) - 0.5) / 3.0 + self.assertAllClose(loc_loss, exp_loc_loss) + self.assertAllClose(cls_loss, 0) + + def test_loss_with_hard_mining_and_losses_mask(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model(is_training=True, + number_of_stages=2, + second_stage_batch_size=None, + first_stage_max_proposals=6, + hard_mining=True) + batch_size = 2 + number_of_proposals = 3 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant( + [[[-10, 13], + [-10, 12], + [10, -11], + [10, -12]], + [[-10, 13], + [-10, 12], + [10, -11], + [10, -12]]], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([number_of_proposals, number_of_proposals], + dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], # first image + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 16, 16], # second image + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-10, 10, -10], # first image + [-10, -10, 10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0], + [-10, 10, -10], # second image + [-10, -10, 10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + # The first groundtruth box is 4/5 of the anchor size in both directions + # experiencing a loss of: + # 2 * SmoothL1(5 * log(4/5)) / (num_proposals * batch_size) + # = 2 * (abs(5 * log(1/2)) - .5) / 3 + # The second groundtruth box is 46/50 of the anchor size in both + # directions experiencing a loss of: + # 2 * SmoothL1(5 * log(42/50)) / (num_proposals * batch_size) + # = 2 * (.5(5 * log(.92))^2 - .5) / 3. + # Since the first groundtruth box experiences greater loss, and we have + # set num_hard_examples=1 in the HardMiner, the final localization loss + # corresponds to that of the first groundtruth box. + groundtruth_boxes_list = [ + tf.constant([[0.05, 0.05, 0.45, 0.45], + [0.02, 0.52, 0.48, 0.98]], dtype=tf.float32), + tf.constant([[0.05, 0.05, 0.45, 0.45], + [0.02, 0.52, 0.48, 0.98]], dtype=tf.float32)] + groundtruth_classes_list = [ + tf.constant([[1, 0], [0, 1]], dtype=tf.float32), + tf.constant([[1, 0], [0, 1]], dtype=tf.float32)] + is_annotated_list = [tf.constant(True, dtype=tf.bool), + tf.constant(False, dtype=tf.bool)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + is_annotated_list=is_annotated_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss']) + exp_loc_loss = (2 * (-5 * np.log(.8) - 0.5) / + (number_of_proposals * batch_size)) + loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(loc_loss, exp_loc_loss) + self.assertAllClose(cls_loss, 0) + + def test_restore_map_for_classification_ckpt(self): + if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') + # Define mock tensorflow classification graph and save variables. + test_graph_classification = tf.Graph() + with test_graph_classification.as_default(): + image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3]) + with tf.variable_scope('mock_model'): + net = slim.conv2d(image, num_outputs=3, kernel_size=1, scope='layer1') + slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2') + + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.test_session(graph=test_graph_classification) as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + + # Create tensorflow detection graph and load variables from + # classification checkpoint. + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast(tf.random_uniform( + inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + var_map = model.restore_map(fine_tune_checkpoint_type='classification') + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + with self.test_session(graph=test_graph_classification) as sess: + saver.restore(sess, saved_model_path) + for var in sess.run(tf.report_uninitialized_variables()): + self.assertNotIn(model.first_stage_feature_extractor_scope, var) + self.assertNotIn(model.second_stage_feature_extractor_scope, var) + + def test_restore_map_for_detection_ckpt(self): + if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') + # Define mock tensorflow classification graph and save variables. + # Define first detection graph and save variables. + test_graph_detection1 = tf.Graph() + with test_graph_detection1.as_default(): + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast(tf.random_uniform( + inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.test_session(graph=test_graph_detection1) as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + + # Define second detection graph and restore variables. + test_graph_detection2 = tf.Graph() + with test_graph_detection2.as_default(): + model2 = self._build_model(is_training=False, + number_of_stages=2, + second_stage_batch_size=6, num_classes=42) + + inputs_shape2 = (2, 20, 20, 3) + inputs2 = tf.cast(tf.random_uniform( + inputs_shape2, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs2, true_image_shapes = model2.preprocess(inputs2) + prediction_dict2 = model2.predict(preprocessed_inputs2, true_image_shapes) + model2.postprocess(prediction_dict2, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model2.restore_map(fine_tune_checkpoint_type='detection') + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + with self.test_session(graph=test_graph_detection2) as sess: + saver.restore(sess, saved_model_path) + uninitialized_vars_list = sess.run(tf.report_uninitialized_variables()) + self.assertIn(six.b('another_variable'), uninitialized_vars_list) + for var in uninitialized_vars_list: + self.assertNotIn( + six.b(model2.first_stage_feature_extractor_scope), var) + self.assertNotIn( + six.b(model2.second_stage_feature_extractor_scope), var) + + def test_load_all_det_checkpoint_vars(self): + if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + num_classes=42) + + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model.restore_map( + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=True) + self.assertIsInstance(var_map, dict) + self.assertIn('another_variable', var_map) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/rfcn_meta_arch.py b/models/research/object_detection/meta_architectures/rfcn_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..1228a4b90a79039aa6519ffe2d899bc80541aedc --- /dev/null +++ b/models/research/object_detection/meta_architectures/rfcn_meta_arch.py @@ -0,0 +1,388 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""R-FCN meta-architecture definition. + +R-FCN: Dai, Jifeng, et al. "R-FCN: Object Detection via Region-based +Fully Convolutional Networks." arXiv preprint arXiv:1605.06409 (2016). + +The R-FCN meta architecture is similar to Faster R-CNN and only differs in the +second stage. Hence this class inherits FasterRCNNMetaArch and overrides only +the `_predict_second_stage` method. + +Similar to Faster R-CNN we allow for two modes: number_of_stages=1 and +number_of_stages=2. In the former setting, all of the user facing methods +(e.g., predict, postprocess, loss) can be used as if the model consisted +only of the RPN, returning class agnostic proposals (these can be thought of as +approximate detections with no associated class information). In the latter +setting, proposals are computed, then passed through a second stage +"box classifier" to yield (multi-class) detections. + +Implementations of R-FCN models must define a new FasterRCNNFeatureExtractor and +override three methods: `preprocess`, `_extract_proposal_features` (the first +stage of the model), and `_extract_box_classifier_features` (the second stage of +the model). Optionally, the `restore_fn` method can be overridden. See tests +for an example. + +See notes in the documentation of Faster R-CNN meta-architecture as they all +apply here. +""" +import tensorflow.compat.v1 as tf + +from object_detection.core import box_predictor +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import ops + + +class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): + """R-FCN Meta-architecture definition.""" + + def __init__(self, + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + second_stage_target_assigner, + second_stage_rfcn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + hard_example_miner, + parallel_iterations=16, + add_summaries=True, + clip_anchors_to_image=False, + use_static_shapes=False, + resize_masks=False, + freeze_batchnorm=False, + return_raw_detections_during_predict=False, + output_final_box_features=False): + """RFCNMetaArch Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + num_classes: Number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + image_resizer_fn: A callable for image resizing. This callable always + takes a rank-3 image tensor (corresponding to a single image) and + returns a rank-3 image tensor, possibly with new spatial dimensions. + See builders/image_resizer_builder.py. + feature_extractor: A FasterRCNNFeatureExtractor object. + number_of_stages: Valid values are {1, 2}. If 1 will only construct the + Region Proposal Network (RPN) part of the model. + first_stage_anchor_generator: An anchor_generator.AnchorGenerator object + (note that currently we only support + grid_anchor_generator.GridAnchorGenerator objects) + first_stage_target_assigner: Target assigner to use for first stage of + R-FCN (RPN). + first_stage_atrous_rate: A single integer indicating the atrous rate for + the single convolution op which is applied to the `rpn_features_to_crop` + tensor to obtain a tensor to be used for box prediction. Some feature + extractors optionally allow for producing feature maps computed at + denser resolutions. The atrous rate is used to compensate for the + denser feature maps by using an effectively larger receptive field. + (This should typically be set to 1). + first_stage_box_predictor_arg_scope_fn: Either a + Keras layer hyperparams object or a function to construct tf-slim + arg_scope for conv2d, separable_conv2d and fully_connected ops. Used + for the RPN box predictor. If it is a keras hyperparams object the + RPN box predictor will be a Keras model. If it is a function to + construct an arg scope it will be a tf-slim box predictor. + first_stage_box_predictor_kernel_size: Kernel size to use for the + convolution op just prior to RPN box predictions. + first_stage_box_predictor_depth: Output depth for the convolution op + just prior to RPN box predictions. + first_stage_minibatch_size: The "batch size" to use for computing the + objectness and location loss of the region proposal network. This + "batch size" refers to the number of anchors selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + first_stage_sampler: The sampler for the boxes used to calculate the RPN + loss after the first stage. + first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window`(with + all other inputs already set) and returns a dictionary containing + tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes`, `num_detections`. This is used to perform non max + suppression on the boxes predicted by the Region Proposal Network + (RPN). + See `post_processing.batch_multiclass_non_max_suppression` for the type + and shape of these tensors. + first_stage_max_proposals: Maximum number of boxes to retain after + performing Non-Max Suppression (NMS) on the boxes predicted by the + Region Proposal Network (RPN). + first_stage_localization_loss_weight: A float + first_stage_objectness_loss_weight: A float + crop_and_resize_fn: A differentiable resampler to use for cropping RPN + proposal features. + second_stage_target_assigner: Target assigner to use for second stage of + R-FCN. If the model is configured with multiple prediction heads, this + target assigner is used to generate targets for all heads (with the + correct `unmatched_class_label`). + second_stage_rfcn_box_predictor: RFCN box predictor to use for + second stage. + second_stage_batch_size: The batch size used for computing the + classification and refined location loss of the box classifier. This + "batch size" refers to the number of proposals selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + second_stage_sampler: The sampler for the boxes used for second stage + box classifier. + second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores`, optional `clip_window` and + optional (kwarg) `mask` inputs (with all other inputs already set) + and returns a dictionary containing tensors with keys: + `detection_boxes`, `detection_scores`, `detection_classes`, + `num_detections`, and (optionally) `detection_masks`. See + `post_processing.batch_multiclass_non_max_suppression` for the type and + shape of these tensors. + second_stage_score_conversion_fn: Callable elementwise nonlinearity + (that takes tensors as inputs and returns tensors). This is usually + used to convert logits to probabilities. + second_stage_localization_loss_weight: A float + second_stage_classification_loss_weight: A float + second_stage_classification_loss: A string indicating which loss function + to use, supports 'softmax' and 'sigmoid'. + hard_example_miner: A losses.HardExampleMiner object (can be None). + parallel_iterations: (Optional) The number of iterations allowed to run + in parallel for calls to tf.map_fn. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + clip_anchors_to_image: The anchors generated are clip to the + window size without filtering the nonoverlapping anchors. This generates + a static number of anchors. This argument is unused. + use_static_shapes: If True, uses implementation of ops with static shape + guarantees. + resize_masks: Indicates whether the masks presend in the groundtruth + should be resized in the model with `image_resizer_fn` + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + output_final_box_features: Whether to output final box features. If true, + it crops the feauture map based on the final box prediction and returns + in the dict as detection_features. + + Raises: + ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` + ValueError: If first_stage_anchor_generator is not of type + grid_anchor_generator.GridAnchorGenerator. + """ + # TODO(rathodv): add_summaries and crop_and_resize_fn is currently + # unused. Respect that directive in the future. + super(RFCNMetaArch, self).__init__( + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + None, # initial_crop_size is not used in R-FCN + None, # maxpool_kernel_size is not use in R-FCN + None, # maxpool_stride is not use in R-FCN + second_stage_target_assigner, + None, # fully_connected_box_predictor is not used in R-FCN. + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + 1.0, # second stage mask prediction loss weight isn't used in R-FCN. + hard_example_miner, + parallel_iterations, + add_summaries, + clip_anchors_to_image, + use_static_shapes, + resize_masks, + freeze_batchnorm=freeze_batchnorm, + return_raw_detections_during_predict=( + return_raw_detections_during_predict), + output_final_box_features=output_final_box_features) + + self._rfcn_box_predictor = second_stage_rfcn_box_predictor + + def _predict_second_stage(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features, + anchors, + image_shape, + true_image_shapes): + """Predicts the output tensors from 2nd stage of R-FCN. + + Args: + rpn_box_encodings: 3-D float tensor of shape + [batch_size, num_valid_anchors, self._box_coder.code_size] containing + predicted boxes. + rpn_objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_valid_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + rpn_features: A 4-D float32 tensor with shape + [batch_size, height, width, depth] representing image features from the + RPN. + anchors: 2-D float tensor of shape + [num_anchors, self._box_coder.code_size]. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, 4] representing predicted + (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals + 2) class_predictions_with_background: a 2-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) num_proposals: An int32 tensor of shape [batch_size] representing the + number of proposals generated by the RPN. `num_proposals` allows us + to keep track of which entries are to be treated as zero paddings and + which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 4) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes (in absolute coordinates). + 5) proposal_boxes_normalized: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes (in normalized coordinates). Can be used to override + the boxes proposed by the RPN, thus enabling one to extract box + classification and prediction for externally selected areas of the + image. + 6) box_classifier_features: a 4-D float32 tensor, of shape + [batch_size, feature_map_height, feature_map_width, depth], + representing the box classifier features. + """ + image_shape_2d = tf.tile(tf.expand_dims(image_shape[1:], 0), + [image_shape[0], 1]) + (proposal_boxes_normalized, _, _, num_proposals, _, + _) = self._postprocess_rpn(rpn_box_encodings, + rpn_objectness_predictions_with_background, + anchors, image_shape_2d, true_image_shapes) + + box_classifier_features = ( + self._extract_box_classifier_features(rpn_features)) + + if self._rfcn_box_predictor.is_keras_model: + box_predictions = self._rfcn_box_predictor( + [box_classifier_features], + proposal_boxes=proposal_boxes_normalized) + else: + box_predictions = self._rfcn_box_predictor.predict( + [box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + proposal_boxes=proposal_boxes_normalized) + refined_box_encodings = tf.squeeze( + tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1), axis=1) + class_predictions_with_background = tf.squeeze( + tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1), + axis=1) + + absolute_proposal_boxes = ops.normalized_to_image_coordinates( + proposal_boxes_normalized, image_shape, + parallel_iterations=self._parallel_iterations) + + prediction_dict = { + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': absolute_proposal_boxes, + 'box_classifier_features': box_classifier_features, + 'proposal_boxes_normalized': proposal_boxes_normalized, + 'final_anchors': absolute_proposal_boxes + } + if self._return_raw_detections_during_predict: + prediction_dict.update(self._raw_detections_and_feature_map_inds( + refined_box_encodings, absolute_proposal_boxes)) + return prediction_dict + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + reg_losses = super(RFCNMetaArch, self).regularization_losses() + if self._rfcn_box_predictor.is_keras_model: + reg_losses.extend(self._rfcn_box_predictor.losses) + return reg_losses + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + update_ops = super(RFCNMetaArch, self).updates() + + if self._rfcn_box_predictor.is_keras_model: + update_ops.extend( + self._rfcn_box_predictor.get_updates_for(None)) + update_ops.extend( + self._rfcn_box_predictor.get_updates_for( + self._rfcn_box_predictor.inputs)) + return update_ops diff --git a/models/research/object_detection/meta_architectures/rfcn_meta_arch_test.py b/models/research/object_detection/meta_architectures/rfcn_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9e279bdf499901d6682cb6a195071f22c537f98e --- /dev/null +++ b/models/research/object_detection/meta_architectures/rfcn_meta_arch_test.py @@ -0,0 +1,67 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.rfcn_meta_arch.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib +from object_detection.meta_architectures import rfcn_meta_arch + + +class RFCNMetaArchTest( + faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase): + + def _get_second_stage_box_predictor_text_proto( + self, share_box_across_classes=False): + del share_box_across_classes + box_predictor_text_proto = """ + rfcn_box_predictor { + conv_hyperparams { + op: CONV + activation: NONE + regularizer { + l2_regularizer { + weight: 0.0005 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + } + """ + return box_predictor_text_proto + + def _get_model(self, box_predictor, **common_kwargs): + return rfcn_meta_arch.RFCNMetaArch( + second_stage_rfcn_box_predictor=box_predictor, **common_kwargs) + + def _get_box_classifier_features_shape(self, + image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + num_features): + return (batch_size, image_size, image_size, num_features) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/ssd_meta_arch.py b/models/research/object_detection/meta_architectures/ssd_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..d401b0de75a6a1c04984caad12986029e3166226 --- /dev/null +++ b/models/research/object_detection/meta_architectures/ssd_meta_arch.py @@ -0,0 +1,1367 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD Meta-architecture definition. + +General tensorflow implementation of convolutional Multibox/SSD detection +models. +""" +import abc +import tensorflow.compat.v1 as tf +from tensorflow.python.util.deprecation import deprecated_args +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import matcher +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import variables_helper +from object_detection.utils import visualization_utils + + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +class SSDFeatureExtractor(object): + """SSD Slim Feature Extractor definition.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + self._is_training = is_training + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + self._pad_to_multiple = pad_to_multiple + self._conv_hyperparams_fn = conv_hyperparams_fn + self._reuse_weights = reuse_weights + self._use_explicit_padding = use_explicit_padding + self._use_depthwise = use_depthwise + self._num_layers = num_layers + self._override_base_feature_extractor_hyperparams = ( + override_base_feature_extractor_hyperparams) + + @property + def is_keras_model(self): + return False + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Preprocesses images for feature extraction (minus image resizing). + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + pass + + @abc.abstractmethod + def extract_features(self, preprocessed_inputs): + """Extracts features from preprocessed inputs. + + This function is responsible for extracting feature maps from preprocessed + images. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + raise NotImplementedError + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + var_name = variable.op.name + if var_name.startswith(feature_extractor_scope + '/'): + var_name = var_name.replace(feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + + return variables_to_restore + + +class SSDKerasFeatureExtractor(tf.keras.Model): + """SSD Feature Extractor definition.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + name=None): + """Constructor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_config`. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDKerasFeatureExtractor, self).__init__(name=name) + + self._is_training = is_training + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + self._pad_to_multiple = pad_to_multiple + self._conv_hyperparams = conv_hyperparams + self._freeze_batchnorm = freeze_batchnorm + self._inplace_batchnorm_update = inplace_batchnorm_update + self._use_explicit_padding = use_explicit_padding + self._use_depthwise = use_depthwise + self._num_layers = num_layers + self._override_base_feature_extractor_hyperparams = ( + override_base_feature_extractor_hyperparams) + + @property + def is_keras_model(self): + return True + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Preprocesses images for feature extraction (minus image resizing). + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + raise NotImplementedError + + @abc.abstractmethod + def _extract_features(self, preprocessed_inputs): + """Extracts features from preprocessed inputs. + + This function is responsible for extracting feature maps from preprocessed + images. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + raise NotImplementedError + + # This overrides the keras.Model `call` method with the _extract_features + # method. + def call(self, inputs, **kwargs): + return self._extract_features(inputs) + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + if tf.executing_eagerly(): + for variable in self.variables: + # variable.name includes ":0" at the end, but the names in the + # checkpoint do not have the suffix ":0". So, we strip it here. + var_name = variable.name[:-2] + if var_name.startswith(feature_extractor_scope + '/'): + var_name = var_name.replace(feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + else: + # b/137854499: use global_variables. + for variable in variables_helper.get_global_variables_safely(): + var_name = variable.op.name + if var_name.startswith(feature_extractor_scope + '/'): + var_name = var_name.replace(feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + + return variables_to_restore + + +class SSDMetaArch(model.DetectionModel): + """SSD Meta-architecture definition.""" + + @deprecated_args(None, + 'NMS is always placed on TPU; do not use nms_on_host ' + 'as it has no effect.', 'nms_on_host') + def __init__(self, + is_training, + anchor_generator, + box_predictor, + box_coder, + feature_extractor, + encode_background_as_zeros, + image_resizer_fn, + non_max_suppression_fn, + score_conversion_fn, + classification_loss, + localization_loss, + classification_loss_weight, + localization_loss_weight, + normalize_loss_by_num_matches, + hard_example_miner, + target_assigner_instance, + add_summaries=True, + normalize_loc_loss_by_codesize=False, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + add_background_class=True, + explicit_background_class=False, + random_example_sampler=None, + expected_loss_weights_fn=None, + use_confidences_as_targets=False, + implicit_example_weight=0.5, + equalization_loss_config=None, + return_raw_detections_during_predict=False, + nms_on_host=True): + """SSDMetaArch Constructor. + + TODO(rathodv,jonathanhuang): group NMS parameters + score converter into + a class and loss parameters into a class and write config protos for + postprocessing and losses. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + anchor_generator: an anchor_generator.AnchorGenerator object. + box_predictor: a box_predictor.BoxPredictor object. + box_coder: a box_coder.BoxCoder object. + feature_extractor: a SSDFeatureExtractor object. + encode_background_as_zeros: boolean determining whether background + targets are to be encoded as an all zeros vector or a one-hot + vector (where background is the 0th class). + image_resizer_fn: a callable for image resizing. This callable always + takes a rank-3 image tensor (corresponding to a single image) and + returns a rank-3 image tensor, possibly with new spatial dimensions and + a 1-D tensor of shape [3] indicating shape of true image within + the resized image tensor as the resized image tensor could be padded. + See builders/image_resizer_builder.py. + non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window` + inputs (with all other inputs already set) and returns a dictionary + hold tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes` and `num_detections`. See `post_processing. + batch_multiclass_non_max_suppression` for the type and shape of these + tensors. + score_conversion_fn: callable elementwise nonlinearity (that takes tensors + as inputs and returns tensors). This is usually used to convert logits + to probabilities. + classification_loss: an object_detection.core.losses.Loss object. + localization_loss: a object_detection.core.losses.Loss object. + classification_loss_weight: float + localization_loss_weight: float + normalize_loss_by_num_matches: boolean + hard_example_miner: a losses.HardExampleMiner object (can be None) + target_assigner_instance: target_assigner.TargetAssigner instance to use. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + normalize_loc_loss_by_codesize: whether to normalize localization loss + by code size of the box encoder. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + add_background_class: Whether to add an implicit background class to + one-hot encodings of groundtruth labels. Set to false if training a + single class model or using groundtruth labels with an explicit + background class. + explicit_background_class: Set to true if using groundtruth labels with an + explicit background class, as in multiclass scores. + random_example_sampler: a BalancedPositiveNegativeSampler object that can + perform random example sampling when computing loss. If None, random + sampling process is skipped. Note that random example sampler and hard + example miner can both be applied to the model. In that case, random + sampler will take effect first and hard example miner can only process + the random sampled examples. + expected_loss_weights_fn: If not None, use to calculate + loss by background/foreground weighting. Should take batch_cls_targets + as inputs and return foreground_weights, background_weights. See + expected_classification_loss_by_expected_sampling and + expected_classification_loss_by_reweighting_unmatched_anchors in + third_party/tensorflow_models/object_detection/utils/ops.py as examples. + use_confidences_as_targets: Whether to use groundtruth_condifences field + to assign the targets. + implicit_example_weight: a float number that specifies the weight used + for the implicit negative examples. + equalization_loss_config: a namedtuple that specifies configs for + computing equalization loss. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + nms_on_host: boolean (default: True) controlling whether NMS should be + carried out on the host (outside of TPU). + """ + super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes) + self._is_training = is_training + self._freeze_batchnorm = freeze_batchnorm + self._inplace_batchnorm_update = inplace_batchnorm_update + + self._anchor_generator = anchor_generator + self._box_predictor = box_predictor + + self._box_coder = box_coder + self._feature_extractor = feature_extractor + self._add_background_class = add_background_class + self._explicit_background_class = explicit_background_class + + if add_background_class and explicit_background_class: + raise ValueError("Cannot have both 'add_background_class' and" + " 'explicit_background_class' true.") + + # Needed for fine-tuning from classification checkpoints whose + # variables do not have the feature extractor scope. + if self._feature_extractor.is_keras_model: + # Keras feature extractors will have a name they implicitly use to scope. + # So, all contained variables are prefixed by this name. + # To load from classification checkpoints, need to filter out this name. + self._extract_features_scope = feature_extractor.name + else: + # Slim feature extractors get an explicit naming scope + self._extract_features_scope = 'FeatureExtractor' + + if encode_background_as_zeros: + background_class = [0] + else: + background_class = [1] + + if self._add_background_class: + num_foreground_classes = self.num_classes + else: + num_foreground_classes = self.num_classes - 1 + + self._unmatched_class_label = tf.constant( + background_class + num_foreground_classes * [0], tf.float32) + + self._target_assigner = target_assigner_instance + + self._classification_loss = classification_loss + self._localization_loss = localization_loss + self._classification_loss_weight = classification_loss_weight + self._localization_loss_weight = localization_loss_weight + self._normalize_loss_by_num_matches = normalize_loss_by_num_matches + self._normalize_loc_loss_by_codesize = normalize_loc_loss_by_codesize + self._hard_example_miner = hard_example_miner + self._random_example_sampler = random_example_sampler + self._parallel_iterations = 16 + + self._image_resizer_fn = image_resizer_fn + self._non_max_suppression_fn = non_max_suppression_fn + self._score_conversion_fn = score_conversion_fn + + self._anchors = None + self._add_summaries = add_summaries + self._batched_prediction_tensor_names = [] + self._expected_loss_weights_fn = expected_loss_weights_fn + self._use_confidences_as_targets = use_confidences_as_targets + self._implicit_example_weight = implicit_example_weight + + self._equalization_loss_config = equalization_loss_config + + self._return_raw_detections_during_predict = ( + return_raw_detections_during_predict) + + @property + def feature_extractor(self): + return self._feature_extractor + + @property + def anchors(self): + if not self._anchors: + raise RuntimeError('anchors have not been constructed yet!') + if not isinstance(self._anchors, box_list.BoxList): + raise RuntimeError('anchors should be a BoxList object, but is not.') + return self._anchors + + @property + def batched_prediction_tensor_names(self): + if not self._batched_prediction_tensor_names: + raise RuntimeError('Must call predict() method to get batched prediction ' + 'tensor names.') + return self._batched_prediction_tensor_names + + def preprocess(self, inputs): + """Feature-extractor specific preprocessing. + + SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during + post-processing. On calling `preprocess` method, clip_window gets updated + based on `true_image_shapes` returned by `image_resizer_fn`. + + Args: + inputs: a [batch, height_in, width_in, channels] float tensor representing + a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, height_out, width_out, channels] float + tensor representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Raises: + ValueError: if inputs tensor does not have type tf.float32 + """ + with tf.name_scope('Preprocessor'): + (resized_inputs, + true_image_shapes) = shape_utils.resize_images_and_return_shapes( + inputs, self._image_resizer_fn) + + return (self._feature_extractor.preprocess(resized_inputs), + true_image_shapes) + + def _compute_clip_window(self, preprocessed_images, true_image_shapes): + """Computes clip window to use during post_processing. + + Computes a new clip window to use during post-processing based on + `resized_image_shapes` and `true_image_shapes` only if `preprocess` method + has been called. Otherwise returns a default clip window of [0, 0, 1, 1]. + + Args: + preprocessed_images: the [batch, height, width, channels] image + tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. Or None if the clip window should cover the full image. + + Returns: + a 2-D float32 tensor of the form [batch_size, 4] containing the clip + window for each image in the batch in normalized coordinates (relative to + the resized dimensions) where each clip window is of the form [ymin, xmin, + ymax, xmax] or a default clip window of [0, 0, 1, 1]. + + """ + if true_image_shapes is None: + return tf.constant([0, 0, 1, 1], dtype=tf.float32) + + resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( + preprocessed_images) + true_heights, true_widths, _ = tf.unstack( + tf.cast(true_image_shapes, dtype=tf.float32), axis=1) + padded_height = tf.cast(resized_inputs_shape[1], dtype=tf.float32) + padded_width = tf.cast(resized_inputs_shape[2], dtype=tf.float32) + return tf.stack( + [ + tf.zeros_like(true_heights), + tf.zeros_like(true_widths), true_heights / padded_height, + true_widths / padded_width + ], + axis=1) + + def predict(self, preprocessed_inputs, true_image_shapes): + """Predicts unpostprocessed tensors from input tensor. + + This function takes an input batch of images and runs it through the forward + pass of the network to yield unpostprocessesed predictions. + + A side effect of calling the predict method is that self._anchors is + populated with a box_list.BoxList of anchors. These anchors must be + constructed before the postprocess or loss functions can be called. + + Args: + preprocessed_inputs: a [batch, height, width, channels] image tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) preprocessed_inputs: the [batch, height, width, channels] image + tensor. + 2) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 3) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions (at class index 0). + 4) feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i]. + 5) anchors: 2-D float tensor of shape [num_anchors, 4] containing + the generated anchors in normalized coordinates. + 6) final_anchors: 3-D float tensor of shape [batch_size, num_anchors, 4] + containing the generated anchors in normalized coordinates. + If self._return_raw_detections_during_predict is True, the dictionary + will also contain: + 7) raw_detection_boxes: a 4-D float32 tensor with shape + [batch_size, self.max_num_proposals, 4] in normalized coordinates. + 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape + [batch_size, self.max_num_proposals]. + """ + if self._inplace_batchnorm_update: + batchnorm_updates_collections = None + else: + batchnorm_updates_collections = tf.GraphKeys.UPDATE_OPS + if self._feature_extractor.is_keras_model: + feature_maps = self._feature_extractor(preprocessed_inputs) + else: + with slim.arg_scope([slim.batch_norm], + is_training=(self._is_training and + not self._freeze_batchnorm), + updates_collections=batchnorm_updates_collections): + with tf.variable_scope(None, self._extract_features_scope, + [preprocessed_inputs]): + feature_maps = self._feature_extractor.extract_features( + preprocessed_inputs) + + feature_map_spatial_dims = self._get_feature_map_spatial_dims( + feature_maps) + image_shape = shape_utils.combined_static_and_dynamic_shape( + preprocessed_inputs) + boxlist_list = self._anchor_generator.generate( + feature_map_spatial_dims, + im_height=image_shape[1], + im_width=image_shape[2]) + self._anchors = box_list_ops.concatenate(boxlist_list) + if self._box_predictor.is_keras_model: + predictor_results_dict = self._box_predictor(feature_maps) + else: + with slim.arg_scope([slim.batch_norm], + is_training=(self._is_training and + not self._freeze_batchnorm), + updates_collections=batchnorm_updates_collections): + predictor_results_dict = self._box_predictor.predict( + feature_maps, self._anchor_generator.num_anchors_per_location()) + predictions_dict = { + 'preprocessed_inputs': + preprocessed_inputs, + 'feature_maps': + feature_maps, + 'anchors': + self._anchors.get(), + 'final_anchors': + tf.tile( + tf.expand_dims(self._anchors.get(), 0), [image_shape[0], 1, 1]) + } + for prediction_key, prediction_list in iter(predictor_results_dict.items()): + prediction = tf.concat(prediction_list, axis=1) + if (prediction_key == 'box_encodings' and prediction.shape.ndims == 4 and + prediction.shape[2] == 1): + prediction = tf.squeeze(prediction, axis=2) + predictions_dict[prediction_key] = prediction + if self._return_raw_detections_during_predict: + predictions_dict.update(self._raw_detections_and_feature_map_inds( + predictions_dict['box_encodings'], boxlist_list)) + self._batched_prediction_tensor_names = [x for x in predictions_dict + if x != 'anchors'] + return predictions_dict + + def _raw_detections_and_feature_map_inds(self, box_encodings, boxlist_list): + anchors = self._anchors.get() + raw_detection_boxes, _ = self._batch_decode(box_encodings, anchors) + batch_size, _, _ = shape_utils.combined_static_and_dynamic_shape( + raw_detection_boxes) + feature_map_indices = ( + self._anchor_generator.anchor_index_to_feature_map_index(boxlist_list)) + feature_map_indices_batched = tf.tile( + tf.expand_dims(feature_map_indices, 0), + multiples=[batch_size, 1]) + return { + fields.PredictionFields.raw_detection_boxes: raw_detection_boxes, + fields.PredictionFields.raw_detection_feature_map_indices: + feature_map_indices_batched + } + + def _get_feature_map_spatial_dims(self, feature_maps): + """Return list of spatial dimensions for each feature map in a list. + + Args: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i]. + + Returns: + a list of pairs (height, width) for each feature map in feature_maps + """ + feature_map_shapes = [ + shape_utils.combined_static_and_dynamic_shape( + feature_map) for feature_map in feature_maps + ] + return [(shape[1], shape[2]) for shape in feature_map_shapes] + + def postprocess(self, prediction_dict, true_image_shapes): + """Converts prediction tensors to final detections. + + This function converts raw predictions tensors to final detection results by + slicing off the background class, decoding box predictions and applying + non max suppression and clipping to the image window. + + See base class for output format conventions. Note also that by default, + scores are to be interpreted as logits, but if a score_conversion_fn is + used, then scores are remapped (and may thus have a different + interpretation). + + Args: + prediction_dict: a dictionary holding prediction tensors with + 1) preprocessed_inputs: a [batch, height, width, channels] image + tensor. + 2) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 3) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + 4) mask_predictions: (optional) a 5-D float tensor of shape + [batch_size, num_anchors, q, mask_height, mask_width]. `q` can be + either number of classes or 1 depending on whether a separate mask is + predicted per class. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. Or None, if the clip window should cover the full image. + + Returns: + detections: a dictionary containing the following fields + detection_boxes: [batch, max_detections, 4] tensor with post-processed + detection boxes. + detection_scores: [batch, max_detections] tensor with scalar scores for + post-processed detection boxes. + detection_multiclass_scores: [batch, max_detections, + num_classes_with_background] tensor with class score distribution for + post-processed detection boxes including background class if any. + detection_classes: [batch, max_detections] tensor with classes for + post-processed detection classes. + detection_keypoints: [batch, max_detections, num_keypoints, 2] (if + encoded in the prediction_dict 'box_encodings') + detection_masks: [batch_size, max_detections, mask_height, mask_width] + (optional) + num_detections: [batch] + raw_detection_boxes: [batch, total_detections, 4] tensor with decoded + detection boxes before Non-Max Suppression. + raw_detection_score: [batch, total_detections, + num_classes_with_background] tensor of multi-class scores for raw + detection boxes. + Raises: + ValueError: if prediction_dict does not contain `box_encodings` or + `class_predictions_with_background` fields. + """ + if ('box_encodings' not in prediction_dict or + 'class_predictions_with_background' not in prediction_dict): + raise ValueError('prediction_dict does not contain expected entries.') + if 'anchors' not in prediction_dict: + prediction_dict['anchors'] = self.anchors.get() + with tf.name_scope('Postprocessor'): + preprocessed_images = prediction_dict['preprocessed_inputs'] + box_encodings = prediction_dict['box_encodings'] + box_encodings = tf.identity(box_encodings, 'raw_box_encodings') + class_predictions_with_background = ( + prediction_dict['class_predictions_with_background']) + detection_boxes, detection_keypoints = self._batch_decode( + box_encodings, prediction_dict['anchors']) + detection_boxes = tf.identity(detection_boxes, 'raw_box_locations') + detection_boxes = tf.expand_dims(detection_boxes, axis=2) + + detection_scores_with_background = self._score_conversion_fn( + class_predictions_with_background) + detection_scores = tf.identity(detection_scores_with_background, + 'raw_box_scores') + if self._add_background_class or self._explicit_background_class: + detection_scores = tf.slice(detection_scores, [0, 0, 1], [-1, -1, -1]) + additional_fields = None + + batch_size = ( + shape_utils.combined_static_and_dynamic_shape(preprocessed_images)[0]) + + if 'feature_maps' in prediction_dict: + feature_map_list = [] + for feature_map in prediction_dict['feature_maps']: + feature_map_list.append(tf.reshape(feature_map, [batch_size, -1])) + box_features = tf.concat(feature_map_list, 1) + box_features = tf.identity(box_features, 'raw_box_features') + additional_fields = { + 'multiclass_scores': detection_scores_with_background + } + if self._anchors is not None: + num_boxes = (self._anchors.num_boxes_static() or + self._anchors.num_boxes()) + anchor_indices = tf.range(num_boxes) + batch_anchor_indices = tf.tile( + tf.expand_dims(anchor_indices, 0), [batch_size, 1]) + # All additional fields need to be float. + additional_fields.update({ + 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32), + }) + if detection_keypoints is not None: + detection_keypoints = tf.identity( + detection_keypoints, 'raw_keypoint_locations') + additional_fields[fields.BoxListFields.keypoints] = detection_keypoints + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, + num_detections) = self._non_max_suppression_fn( + detection_boxes, + detection_scores, + clip_window=self._compute_clip_window( + preprocessed_images, true_image_shapes), + additional_fields=additional_fields, + masks=prediction_dict.get('mask_predictions')) + + detection_dict = { + fields.DetectionResultFields.detection_boxes: + nmsed_boxes, + fields.DetectionResultFields.detection_scores: + nmsed_scores, + fields.DetectionResultFields.detection_classes: + nmsed_classes, + fields.DetectionResultFields.num_detections: + tf.cast(num_detections, dtype=tf.float32), + fields.DetectionResultFields.raw_detection_boxes: + tf.squeeze(detection_boxes, axis=2), + fields.DetectionResultFields.raw_detection_scores: + detection_scores_with_background + } + if (nmsed_additional_fields is not None and + fields.InputDataFields.multiclass_scores in nmsed_additional_fields): + detection_dict[ + fields.DetectionResultFields.detection_multiclass_scores] = ( + nmsed_additional_fields[ + fields.InputDataFields.multiclass_scores]) + if (nmsed_additional_fields is not None and + 'anchor_indices' in nmsed_additional_fields): + detection_dict.update({ + fields.DetectionResultFields.detection_anchor_indices: + tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), + }) + if (nmsed_additional_fields is not None and + fields.BoxListFields.keypoints in nmsed_additional_fields): + detection_dict[fields.DetectionResultFields.detection_keypoints] = ( + nmsed_additional_fields[fields.BoxListFields.keypoints]) + if nmsed_masks is not None: + detection_dict[ + fields.DetectionResultFields.detection_masks] = nmsed_masks + return detection_dict + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Compute scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding prediction tensors with + 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 2) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + scope: Optional scope name. + + Returns: + a dictionary mapping loss keys (`localization_loss` and + `classification_loss`) to scalar tensors representing corresponding loss + values. + """ + with tf.name_scope(scope, 'Loss', prediction_dict.values()): + keypoints = None + if self.groundtruth_has_field(fields.BoxListFields.keypoints): + keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints) + weights = None + if self.groundtruth_has_field(fields.BoxListFields.weights): + weights = self.groundtruth_lists(fields.BoxListFields.weights) + confidences = None + if self.groundtruth_has_field(fields.BoxListFields.confidences): + confidences = self.groundtruth_lists(fields.BoxListFields.confidences) + (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) = self._assign_targets( + self.groundtruth_lists(fields.BoxListFields.boxes), + self.groundtruth_lists(fields.BoxListFields.classes), + keypoints, weights, confidences) + match_list = [matcher.Match(match) for match in tf.unstack(batch_match)] + if self._add_summaries: + self._summarize_target_assignment( + self.groundtruth_lists(fields.BoxListFields.boxes), match_list) + + if self._random_example_sampler: + batch_cls_per_anchor_weights = tf.reduce_mean( + batch_cls_weights, axis=-1) + batch_sampled_indicator = tf.cast( + shape_utils.static_or_dynamic_map_fn( + self._minibatch_subsample_fn, + [batch_cls_targets, batch_cls_per_anchor_weights], + dtype=tf.bool, + parallel_iterations=self._parallel_iterations, + back_prop=True), dtype=tf.float32) + batch_reg_weights = tf.multiply(batch_sampled_indicator, + batch_reg_weights) + batch_cls_weights = tf.multiply( + tf.expand_dims(batch_sampled_indicator, -1), + batch_cls_weights) + + losses_mask = None + if self.groundtruth_has_field(fields.InputDataFields.is_annotated): + losses_mask = tf.stack(self.groundtruth_lists( + fields.InputDataFields.is_annotated)) + + + location_losses = self._localization_loss( + prediction_dict['box_encodings'], + batch_reg_targets, + ignore_nan_targets=True, + weights=batch_reg_weights, + losses_mask=losses_mask) + + cls_losses = self._classification_loss( + prediction_dict['class_predictions_with_background'], + batch_cls_targets, + weights=batch_cls_weights, + losses_mask=losses_mask) + + if self._expected_loss_weights_fn: + # Need to compute losses for assigned targets against the + # unmatched_class_label as well as their assigned targets. + # simplest thing (but wasteful) is just to calculate all losses + # twice + batch_size, num_anchors, num_classes = batch_cls_targets.get_shape() + unmatched_targets = tf.ones([batch_size, num_anchors, 1 + ]) * self._unmatched_class_label + + unmatched_cls_losses = self._classification_loss( + prediction_dict['class_predictions_with_background'], + unmatched_targets, + weights=batch_cls_weights, + losses_mask=losses_mask) + + if cls_losses.get_shape().ndims == 3: + batch_size, num_anchors, num_classes = cls_losses.get_shape() + cls_losses = tf.reshape(cls_losses, [batch_size, -1]) + unmatched_cls_losses = tf.reshape(unmatched_cls_losses, + [batch_size, -1]) + batch_cls_targets = tf.reshape( + batch_cls_targets, [batch_size, num_anchors * num_classes, -1]) + batch_cls_targets = tf.concat( + [1 - batch_cls_targets, batch_cls_targets], axis=-1) + + location_losses = tf.tile(location_losses, [1, num_classes]) + + foreground_weights, background_weights = ( + self._expected_loss_weights_fn(batch_cls_targets)) + + cls_losses = ( + foreground_weights * cls_losses + + background_weights * unmatched_cls_losses) + + location_losses *= foreground_weights + + classification_loss = tf.reduce_sum(cls_losses) + localization_loss = tf.reduce_sum(location_losses) + elif self._hard_example_miner: + cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) + (localization_loss, classification_loss) = self._apply_hard_mining( + location_losses, cls_losses, prediction_dict, match_list) + if self._add_summaries: + self._hard_example_miner.summarize() + else: + cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) + localization_loss = tf.reduce_sum(location_losses) + classification_loss = tf.reduce_sum(cls_losses) + + # Optionally normalize by number of positive matches + normalizer = tf.constant(1.0, dtype=tf.float32) + if self._normalize_loss_by_num_matches: + normalizer = tf.maximum(tf.cast(tf.reduce_sum(batch_reg_weights), + dtype=tf.float32), + 1.0) + + localization_loss_normalizer = normalizer + if self._normalize_loc_loss_by_codesize: + localization_loss_normalizer *= self._box_coder.code_size + localization_loss = tf.multiply((self._localization_loss_weight / + localization_loss_normalizer), + localization_loss, + name='localization_loss') + classification_loss = tf.multiply((self._classification_loss_weight / + normalizer), classification_loss, + name='classification_loss') + + loss_dict = { + 'Loss/localization_loss': localization_loss, + 'Loss/classification_loss': classification_loss + } + + + return loss_dict + + def _minibatch_subsample_fn(self, inputs): + """Randomly samples anchors for one image. + + Args: + inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors, + num_classes] indicating targets assigned to each anchor. Second one + is a tensor of shape [num_anchors] indicating the class weight of each + anchor. + + Returns: + batch_sampled_indicator: bool tensor of shape [num_anchors] indicating + whether the anchor should be selected for loss computation. + """ + cls_targets, cls_weights = inputs + if self._add_background_class: + # Set background_class bits to 0 so that the positives_indicator + # computation would not consider background class. + background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1])) + regular_class = tf.slice(cls_targets, [0, 1], [-1, -1]) + cls_targets = tf.concat([background_class, regular_class], 1) + positives_indicator = tf.reduce_sum(cls_targets, axis=1) + return self._random_example_sampler.subsample( + tf.cast(cls_weights, tf.bool), + batch_size=None, + labels=tf.cast(positives_indicator, tf.bool)) + + def _summarize_anchor_classification_loss(self, class_ids, cls_losses): + positive_indices = tf.where(tf.greater(class_ids, 0)) + positive_anchor_cls_loss = tf.squeeze( + tf.gather(cls_losses, positive_indices), axis=1) + visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss, + 'PositiveAnchorLossCDF') + negative_indices = tf.where(tf.equal(class_ids, 0)) + negative_anchor_cls_loss = tf.squeeze( + tf.gather(cls_losses, negative_indices), axis=1) + visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss, + 'NegativeAnchorLossCDF') + + def _assign_targets(self, + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_keypoints_list=None, + groundtruth_weights_list=None, + groundtruth_confidences_list=None): + """Assign groundtruth targets. + + Adds a background class to each one-hot encoding of groundtruth classes + and uses target assigner to obtain regression and classification targets. + + Args: + groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] + containing coordinates of the groundtruth boxes. + Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] + format and assumed to be normalized and clipped + relative to the image window with y_min <= y_max and x_min <= x_max. + groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of + shape [num_boxes, num_classes] containing the class targets with the 0th + index assumed to map to the first non-background class. + groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape + [num_boxes, num_keypoints, 2] + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape + [num_boxes, num_classes] containing class confidences for + groundtruth boxes. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_anchors], + batch_reg_targets: a tensor with shape [batch_size, num_anchors, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_anchors], + match: an int32 tensor of shape [batch_size, num_anchors], containing + result of anchor groundtruth matching. Each position in the tensor + indicates an anchor and holds the following meaning: + (1) if match[x, i] >= 0, anchor i is matched with groundtruth + match[x, i]. + (2) if match[x, i]=-1, anchor i is marked to be background . + (3) if match[x, i]=-2, anchor i is ignored since it is not background + and does not have sufficient overlap to call it a foreground. + """ + groundtruth_boxlists = [ + box_list.BoxList(boxes) for boxes in groundtruth_boxes_list + ] + train_using_confidences = (self._is_training and + self._use_confidences_as_targets) + if self._add_background_class: + groundtruth_classes_with_background_list = [ + tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT') + for one_hot_encoding in groundtruth_classes_list + ] + if train_using_confidences: + groundtruth_confidences_with_background_list = [ + tf.pad(groundtruth_confidences, [[0, 0], [1, 0]], mode='CONSTANT') + for groundtruth_confidences in groundtruth_confidences_list + ] + else: + groundtruth_classes_with_background_list = groundtruth_classes_list + + if groundtruth_keypoints_list is not None: + for boxlist, keypoints in zip( + groundtruth_boxlists, groundtruth_keypoints_list): + boxlist.add_field(fields.BoxListFields.keypoints, keypoints) + if train_using_confidences: + return target_assigner.batch_assign_confidences( + self._target_assigner, + self.anchors, + groundtruth_boxlists, + groundtruth_confidences_with_background_list, + groundtruth_weights_list, + self._unmatched_class_label, + self._add_background_class, + self._implicit_example_weight) + else: + return target_assigner.batch_assign_targets( + self._target_assigner, + self.anchors, + groundtruth_boxlists, + groundtruth_classes_with_background_list, + self._unmatched_class_label, + groundtruth_weights_list) + + def _summarize_target_assignment(self, groundtruth_boxes_list, match_list): + """Creates tensorflow summaries for the input boxes and anchors. + + This function creates four summaries corresponding to the average + number (over images in a batch) of (1) groundtruth boxes, (2) anchors + marked as positive, (3) anchors marked as negative, and (4) anchors marked + as ignored. + + Args: + groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] + containing corners of the groundtruth boxes. + match_list: a list of matcher.Match objects encoding the match between + anchors and groundtruth boxes for each image of the batch, + with rows of the Match objects corresponding to groundtruth boxes + and columns corresponding to anchors. + """ + # TODO(rathodv): Add a test for these summaries. + try: + # TODO(kaftan): Integrate these summaries into the v2 style loops + with tf.compat.v2.init_scope(): + if tf.compat.v2.executing_eagerly(): + return + except AttributeError: + pass + + avg_num_gt_boxes = tf.reduce_mean( + tf.cast( + tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list]), + dtype=tf.float32)) + avg_num_matched_gt_boxes = tf.reduce_mean( + tf.cast( + tf.stack([match.num_matched_rows() for match in match_list]), + dtype=tf.float32)) + avg_pos_anchors = tf.reduce_mean( + tf.cast( + tf.stack([match.num_matched_columns() for match in match_list]), + dtype=tf.float32)) + avg_neg_anchors = tf.reduce_mean( + tf.cast( + tf.stack([match.num_unmatched_columns() for match in match_list]), + dtype=tf.float32)) + avg_ignored_anchors = tf.reduce_mean( + tf.cast( + tf.stack([match.num_ignored_columns() for match in match_list]), + dtype=tf.float32)) + + tf.summary.scalar('AvgNumGroundtruthBoxesPerImage', + avg_num_gt_boxes, + family='TargetAssignment') + tf.summary.scalar('AvgNumGroundtruthBoxesMatchedPerImage', + avg_num_matched_gt_boxes, + family='TargetAssignment') + tf.summary.scalar('AvgNumPositiveAnchorsPerImage', + avg_pos_anchors, + family='TargetAssignment') + tf.summary.scalar('AvgNumNegativeAnchorsPerImage', + avg_neg_anchors, + family='TargetAssignment') + tf.summary.scalar('AvgNumIgnoredAnchorsPerImage', + avg_ignored_anchors, + family='TargetAssignment') + + def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict, + match_list): + """Applies hard mining to anchorwise losses. + + Args: + location_losses: Float tensor of shape [batch_size, num_anchors] + representing anchorwise location losses. + cls_losses: Float tensor of shape [batch_size, num_anchors] + representing anchorwise classification losses. + prediction_dict: p a dictionary holding prediction tensors with + 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 2) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + 3) anchors: (optional) 2-D float tensor of shape [num_anchors, 4]. + match_list: a list of matcher.Match objects encoding the match between + anchors and groundtruth boxes for each image of the batch, + with rows of the Match objects corresponding to groundtruth boxes + and columns corresponding to anchors. + + Returns: + mined_location_loss: a float scalar with sum of localization losses from + selected hard examples. + mined_cls_loss: a float scalar with sum of classification losses from + selected hard examples. + """ + class_predictions = prediction_dict['class_predictions_with_background'] + if self._add_background_class: + class_predictions = tf.slice(class_predictions, [0, 0, 1], [-1, -1, -1]) + + if 'anchors' not in prediction_dict: + prediction_dict['anchors'] = self.anchors.get() + decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'], + prediction_dict['anchors']) + decoded_box_tensors_list = tf.unstack(decoded_boxes) + class_prediction_list = tf.unstack(class_predictions) + decoded_boxlist_list = [] + for box_location, box_score in zip(decoded_box_tensors_list, + class_prediction_list): + decoded_boxlist = box_list.BoxList(box_location) + decoded_boxlist.add_field('scores', box_score) + decoded_boxlist_list.append(decoded_boxlist) + return self._hard_example_miner( + location_losses=location_losses, + cls_losses=cls_losses, + decoded_boxlist_list=decoded_boxlist_list, + match_list=match_list) + + def _batch_decode(self, box_encodings, anchors): + """Decodes a batch of box encodings with respect to the anchors. + + Args: + box_encodings: A float32 tensor of shape + [batch_size, num_anchors, box_code_size] containing box encodings. + anchors: A tensor of shape [num_anchors, 4]. + + Returns: + decoded_boxes: A float32 tensor of shape + [batch_size, num_anchors, 4] containing the decoded boxes. + decoded_keypoints: A float32 tensor of shape + [batch_size, num_anchors, num_keypoints, 2] containing the decoded + keypoints if present in the input `box_encodings`, None otherwise. + """ + combined_shape = shape_utils.combined_static_and_dynamic_shape( + box_encodings) + batch_size = combined_shape[0] + tiled_anchor_boxes = tf.tile(tf.expand_dims(anchors, 0), [batch_size, 1, 1]) + tiled_anchors_boxlist = box_list.BoxList( + tf.reshape(tiled_anchor_boxes, [-1, 4])) + decoded_boxes = self._box_coder.decode( + tf.reshape(box_encodings, [-1, self._box_coder.code_size]), + tiled_anchors_boxlist) + decoded_keypoints = None + if decoded_boxes.has_field(fields.BoxListFields.keypoints): + decoded_keypoints = decoded_boxes.get_field( + fields.BoxListFields.keypoints) + num_keypoints = decoded_keypoints.get_shape()[1] + decoded_keypoints = tf.reshape( + decoded_keypoints, + tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) + decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack( + [combined_shape[0], combined_shape[1], 4])) + return decoded_boxes, decoded_keypoints + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + losses = [] + slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + # Copy the slim losses to avoid modifying the collection + if slim_losses: + losses.extend(slim_losses) + if self._box_predictor.is_keras_model: + losses.extend(self._box_predictor.losses) + if self._feature_extractor.is_keras_model: + losses.extend(self._feature_extractor.losses) + return losses + + def restore_map(self, + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False): + """Returns a map of variables to load from a foreign checkpoint. + + See parent class for details. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + load_all_detection_checkpoint_vars: whether to load all variables (when + `fine_tune_checkpoint_type='detection'`). If False, only variables + within the appropriate scopes are included. Default False. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + Raises: + ValueError: if fine_tune_checkpoint_type is neither `classification` + nor `detection`. + """ + if fine_tune_checkpoint_type == 'classification': + return self._feature_extractor.restore_from_classification_checkpoint_fn( + self._extract_features_scope) + + elif fine_tune_checkpoint_type == 'detection': + variables_to_restore = {} + if tf.executing_eagerly(): + if load_all_detection_checkpoint_vars: + # Grab all detection vars by name + for variable in self.variables: + # variable.name includes ":0" at the end, but the names in the + # checkpoint do not have the suffix ":0". So, we strip it here. + var_name = variable.name[:-2] + variables_to_restore[var_name] = variable + else: + # Grab just the feature extractor vars by name + for variable in self._feature_extractor.variables: + # variable.name includes ":0" at the end, but the names in the + # checkpoint do not have the suffix ":0". So, we strip it here. + var_name = variable.name[:-2] + variables_to_restore[var_name] = variable + else: + for variable in variables_helper.get_global_variables_safely(): + var_name = variable.op.name + if load_all_detection_checkpoint_vars: + variables_to_restore[var_name] = variable + else: + if var_name.startswith(self._extract_features_scope): + variables_to_restore[var_name] = variable + + return variables_to_restore + + else: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + update_ops = [] + slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + # Copy the slim ops to avoid modifying the collection + if slim_update_ops: + update_ops.extend(slim_update_ops) + if self._box_predictor.is_keras_model: + update_ops.extend(self._box_predictor.get_updates_for(None)) + update_ops.extend(self._box_predictor.get_updates_for( + self._box_predictor.inputs)) + if self._feature_extractor.is_keras_model: + update_ops.extend(self._feature_extractor.get_updates_for(None)) + update_ops.extend(self._feature_extractor.get_updates_for( + self._feature_extractor.inputs)) + return update_ops diff --git a/models/research/object_detection/meta_architectures/ssd_meta_arch_test.py b/models/research/object_detection/meta_architectures/ssd_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..585eb1778f72deae1aeee45bfbf1d18fa3af1212 --- /dev/null +++ b/models/research/object_detection/meta_architectures/ssd_meta_arch_test.py @@ -0,0 +1,680 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.ssd_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized + +import numpy as np +import six +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.meta_architectures import ssd_meta_arch_test_lib +from object_detection.protos import model_pb2 +from object_detection.utils import test_utils + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +keras = tf.keras.layers + + +class SsdMetaArchTest(ssd_meta_arch_test_lib.SSDMetaArchTestBase, + parameterized.TestCase): + + def _create_model( + self, + apply_hard_mining=True, + normalize_loc_loss_by_codesize=False, + add_background_class=True, + random_example_sampling=False, + expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE, + min_num_negative_samples=1, + desired_negative_sampling_ratio=3, + predict_mask=False, + use_static_shapes=False, + nms_max_size_per_class=5, + calibration_mapping_value=None, + return_raw_detections_during_predict=False): + return super(SsdMetaArchTest, self)._create_model( + model_fn=ssd_meta_arch.SSDMetaArch, + apply_hard_mining=apply_hard_mining, + normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, + add_background_class=add_background_class, + random_example_sampling=random_example_sampling, + expected_loss_weights=expected_loss_weights, + min_num_negative_samples=min_num_negative_samples, + desired_negative_sampling_ratio=desired_negative_sampling_ratio, + predict_mask=predict_mask, + use_static_shapes=use_static_shapes, + nms_max_size_per_class=nms_max_size_per_class, + calibration_mapping_value=calibration_mapping_value, + return_raw_detections_during_predict=( + return_raw_detections_during_predict)) + + def test_preprocess_preserves_shapes_with_dynamic_input_image(self): + width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, 5, width, 3]) + image = tf.random.uniform(shape) + model, _, _, _ = self._create_model() + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue( + preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3])) + + def test_preprocess_preserves_shape_with_static_input_image(self): + image = tf.random.uniform([2, 3, 3, 3]) + model, _, _, _ = self._create_model() + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue(preprocessed_inputs.shape.is_compatible_with([2, 3, 3, 3])) + + def test_predict_result_shapes_on_image_with_dynamic_shape(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, code_size = self._create_model() + + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + prediction_dict = model.predict(image, true_image_shapes=None) + self.assertIn('box_encodings', prediction_dict) + self.assertIn('class_predictions_with_background', prediction_dict) + self.assertIn('feature_maps', prediction_dict) + self.assertIn('anchors', prediction_dict) + self.assertIn('final_anchors', prediction_dict) + return (prediction_dict['box_encodings'], + prediction_dict['final_anchors'], + prediction_dict['class_predictions_with_background'], + tf.constant(num_anchors), batch) + (box_encodings_out, final_anchors, class_predictions_with_background, + num_anchors, batch_size) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(box_encodings_out.shape, + (batch_size, num_anchors, code_size)) + self.assertAllEqual(final_anchors.shape, + (batch_size, num_anchors, code_size)) + self.assertAllEqual( + class_predictions_with_background.shape, + (batch_size, num_anchors, num_classes + 1)) + + def test_predict_result_shapes_on_image_with_static_shape(self): + + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, code_size = self._create_model() + + def graph_fn(input_image): + predictions = model.predict(input_image, true_image_shapes=None) + return (predictions['box_encodings'], + predictions['class_predictions_with_background'], + predictions['final_anchors']) + batch_size = 3 + image_size = 2 + channels = 3 + input_image = np.random.rand(batch_size, image_size, image_size, + channels).astype(np.float32) + expected_box_encodings_shape = (batch_size, num_anchors, code_size) + expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1) + final_anchors_shape = (batch_size, num_anchors, 4) + (box_encodings, class_predictions, final_anchors) = self.execute( + graph_fn, [input_image], graph=g) + self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape) + self.assertAllEqual(class_predictions.shape, + expected_class_predictions_shape) + self.assertAllEqual(final_anchors.shape, final_anchors_shape) + + def test_predict_with_raw_output_fields(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, code_size = self._create_model( + return_raw_detections_during_predict=True) + + def graph_fn(input_image): + predictions = model.predict(input_image, true_image_shapes=None) + return (predictions['box_encodings'], + predictions['class_predictions_with_background'], + predictions['final_anchors'], + predictions['raw_detection_boxes'], + predictions['raw_detection_feature_map_indices']) + batch_size = 3 + image_size = 2 + channels = 3 + input_image = np.random.rand(batch_size, image_size, image_size, + channels).astype(np.float32) + expected_box_encodings_shape = (batch_size, num_anchors, code_size) + expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1) + final_anchors_shape = (batch_size, num_anchors, 4) + expected_raw_detection_boxes_shape = (batch_size, num_anchors, 4) + (box_encodings, class_predictions, final_anchors, raw_detection_boxes, + raw_detection_feature_map_indices) = self.execute( + graph_fn, [input_image], graph=g) + self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape) + self.assertAllEqual(class_predictions.shape, + expected_class_predictions_shape) + self.assertAllEqual(final_anchors.shape, final_anchors_shape) + self.assertAllEqual(raw_detection_boxes.shape, + expected_raw_detection_boxes_shape) + self.assertAllEqual(raw_detection_feature_map_indices, + np.zeros((batch_size, num_anchors))) + + def test_raw_detection_boxes_agree_predict_postprocess(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model( + return_raw_detections_during_predict=True) + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + preprocessed_inputs, true_image_shapes = model.preprocess( + image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + raw_detection_boxes_predict = prediction_dict['raw_detection_boxes'] + detections = model.postprocess(prediction_dict, true_image_shapes) + raw_detection_boxes_postprocess = detections['raw_detection_boxes'] + return raw_detection_boxes_predict, raw_detection_boxes_postprocess + (raw_detection_boxes_predict_out, + raw_detection_boxes_postprocess_out) = self.execute_cpu(graph_fn, [], + graph=g) + self.assertAllEqual(raw_detection_boxes_predict_out, + raw_detection_boxes_postprocess_out) + + def test_postprocess_results_are_correct(self): + + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model() + + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + preprocessed_inputs, true_image_shapes = model.preprocess( + image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + return [ + batch, detections['detection_boxes'], detections['detection_scores'], + detections['detection_classes'], + detections['detection_multiclass_scores'], + detections['num_detections'], detections['raw_detection_boxes'], + detections['raw_detection_scores'], + detections['detection_anchor_indices'] + ] + + expected_boxes = [ + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0], # pruned prediction + [0, 0, 0, 0] + ], # padding + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0], # pruned prediction + [0, 0, 0, 0] + ] + ] # padding + expected_scores = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] + expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]] + + expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] + expected_num_detections = np.array([3, 3]) + + expected_raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]], + [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]] + expected_raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0], [0, 0]]] + expected_detection_anchor_indices = [[0, 1, 2], [0, 1, 2]] + (batch, detection_boxes, detection_scores, detection_classes, + detection_multiclass_scores, num_detections, raw_detection_boxes, + raw_detection_scores, detection_anchor_indices) = self.execute_cpu( + graph_fn, [], graph=g) + for image_idx in range(batch): + self.assertTrue( + test_utils.first_rows_close_as_set( + detection_boxes[image_idx].tolist(), expected_boxes[image_idx])) + self.assertSameElements(detection_anchor_indices[image_idx], + expected_detection_anchor_indices[image_idx]) + self.assertAllClose(detection_scores, expected_scores) + self.assertAllClose(detection_classes, expected_classes) + self.assertAllClose(detection_multiclass_scores, expected_multiclass_scores) + self.assertAllClose(num_detections, expected_num_detections) + self.assertAllEqual(raw_detection_boxes, expected_raw_detection_boxes) + self.assertAllEqual(raw_detection_scores, + expected_raw_detection_scores) + + def test_postprocess_results_are_correct_static(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model(use_static_shapes=True, + nms_max_size_per_class=4) + + def graph_fn(input_image): + preprocessed_inputs, true_image_shapes = model.preprocess(input_image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + return (detections['detection_boxes'], detections['detection_scores'], + detections['detection_classes'], detections['num_detections'], + detections['detection_multiclass_scores']) + + expected_boxes = [ + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0] + ], # padding + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0] + ] + ] # padding + expected_scores = [[0, 0, 0, 0], [0, 0, 0, 0]] + expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0], [0, 0]]] + expected_classes = [[0, 0, 0, 0], [0, 0, 0, 0]] + expected_num_detections = np.array([3, 3]) + batch_size = 2 + image_size = 2 + channels = 3 + input_image = np.random.rand(batch_size, image_size, image_size, + channels).astype(np.float32) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_multiclass_scores) = self.execute(graph_fn, + [input_image], + graph=g) + for image_idx in range(batch_size): + self.assertTrue(test_utils.first_rows_close_as_set( + detection_boxes[image_idx][ + 0:expected_num_detections[image_idx]].tolist(), + expected_boxes[image_idx][0:expected_num_detections[image_idx]])) + self.assertAllClose( + detection_scores[image_idx][0:expected_num_detections[image_idx]], + expected_scores[image_idx][0:expected_num_detections[image_idx]]) + self.assertAllClose( + detection_multiclass_scores[image_idx] + [0:expected_num_detections[image_idx]], + expected_multiclass_scores[image_idx] + [0:expected_num_detections[image_idx]]) + self.assertAllClose( + detection_classes[image_idx][0:expected_num_detections[image_idx]], + expected_classes[image_idx][0:expected_num_detections[image_idx]]) + self.assertAllClose(num_detections, + expected_num_detections) + + def test_postprocess_results_are_correct_with_calibration(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model(calibration_mapping_value=0.5) + + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + preprocessed_inputs, true_image_shapes = model.preprocess( + image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + return detections['detection_scores'], detections['raw_detection_scores'] + # Calibration mapping value below is set to map all scores to 0.5, except + # for the last two detections in each batch (see expected number of + # detections below. + expected_scores = [[0.5, 0.5, 0.5, 0., 0.], [0.5, 0.5, 0.5, 0., 0.]] + expected_raw_detection_scores = [ + [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], + [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] + ] + detection_scores, raw_detection_scores = self.execute_cpu(graph_fn, [], + graph=g) + self.assertAllClose(detection_scores, expected_scores) + self.assertAllEqual(raw_detection_scores, expected_raw_detection_scores) + + def test_loss_results_are_correct(self): + + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model( + apply_hard_mining=False) + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + (localization_loss, classification_loss) = self.execute( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], + graph=g) + + expected_localization_loss = 0.0 + expected_classification_loss = (batch_size * num_anchors + * (num_classes+1) * np.log(2.0)) + + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + def test_loss_results_are_correct_with_normalize_by_codesize_true(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model( + apply_hard_mining=False, normalize_loc_loss_by_codesize=True) + + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'),) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.5 / 4 + localization_loss = self.execute(graph_fn, [preprocessed_input, + groundtruth_boxes1, + groundtruth_boxes2, + groundtruth_classes1, + groundtruth_classes2], graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + + def test_loss_results_are_correct_with_hard_example_mining(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model() + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + expected_classification_loss = (batch_size * num_anchors + * (num_classes+1) * np.log(2.0)) + (localization_loss, classification_loss) = self.execute_cpu( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + def test_loss_results_are_correct_without_add_background_class(self): + + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model( + apply_hard_mining=False, add_background_class=False) + + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict( + preprocessed_tensor, true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (loss_dict['Loss/localization_loss'], + loss_dict['Loss/classification_loss']) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + expected_classification_loss = ( + batch_size * num_anchors * num_classes * np.log(2.0)) + (localization_loss, classification_loss) = self.execute( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], graph=g) + + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + + def test_loss_results_are_correct_with_losses_mask(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model( + apply_hard_mining=False) + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2, + groundtruth_classes3): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2, + groundtruth_boxes3] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2, + groundtruth_classes3] + is_annotated_list = [tf.constant(True), tf.constant(True), + tf.constant(False)] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + is_annotated_list=is_annotated_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + + batch_size = 3 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes3 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + groundtruth_classes3 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + # Note that we are subtracting 1 from batch_size, since the final image is + # not annotated. + expected_classification_loss = ((batch_size - 1) * num_anchors + * (num_classes+1) * np.log(2.0)) + (localization_loss, + classification_loss) = self.execute(graph_fn, [preprocessed_input, + groundtruth_boxes1, + groundtruth_boxes2, + groundtruth_boxes3, + groundtruth_classes1, + groundtruth_classes2, + groundtruth_classes3], + graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + def test_restore_map_for_detection_ckpt(self): + # TODO(rathodv): Support TF2.X + if self.is_tf2(): return + model, _, _, _ = self._create_model() + model.predict(tf.constant(np.array([[[[0, 0], [1, 1]], [[1, 0], [0, 1]]]], + dtype=np.float32)), + true_image_shapes=None) + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.session() as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + var_map = model.restore_map( + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False) + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + saver.restore(sess, saved_model_path) + for var in sess.run(tf.report_uninitialized_variables()): + self.assertNotIn('FeatureExtractor', var) + + def test_restore_map_for_classification_ckpt(self): + # TODO(rathodv): Support TF2.X + if self.is_tf2(): return + # Define mock tensorflow classification graph and save variables. + test_graph_classification = tf.Graph() + with test_graph_classification.as_default(): + image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3]) + + with tf.variable_scope('mock_model'): + net = slim.conv2d(image, num_outputs=32, kernel_size=1, scope='layer1') + slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2') + + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.session(graph=test_graph_classification) as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + + # Create tensorflow detection graph and load variables from + # classification checkpoint. + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model, _, _, _ = self._create_model() + inputs_shape = [2, 2, 2, 3] + inputs = tf.cast(tf.random_uniform( + inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model.restore_map(fine_tune_checkpoint_type='classification') + self.assertNotIn('another_variable', var_map) + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + with self.session(graph=test_graph_detection) as sess: + saver.restore(sess, saved_model_path) + for var in sess.run(tf.report_uninitialized_variables()): + self.assertNotIn(six.ensure_binary('FeatureExtractor'), var) + + def test_load_all_det_checkpoint_vars(self): + # TODO(rathodv): Support TF2.X + if self.is_tf2(): return + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model, _, _, _ = self._create_model() + inputs_shape = [2, 2, 2, 3] + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model.restore_map( + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=True) + self.assertIsInstance(var_map, dict) + self.assertIn('another_variable', var_map) + + def test_loss_results_are_correct_with_random_example_sampling(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, _, _ = self._create_model( + random_example_sampling=True) + + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict( + preprocessed_tensor, true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + # Among 4 anchors (1 positive, 3 negative) in this test, only 2 anchors are + # selected (1 positive, 1 negative) since random sampler will adjust number + # of negative examples to make sure positive example fraction in the batch + # is 0.5. + expected_classification_loss = ( + batch_size * 2 * (num_classes + 1) * np.log(2.0)) + (localization_loss, classification_loss) = self.execute_cpu( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/meta_architectures/ssd_meta_arch_test_lib.py b/models/research/object_detection/meta_architectures/ssd_meta_arch_test_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..0991388b31ac6a5974c9297e50b9630b3966e489 --- /dev/null +++ b/models/research/object_detection/meta_architectures/ssd_meta_arch_test_lib.py @@ -0,0 +1,259 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for SSD models meta architecture tests.""" + +import functools +import tensorflow.compat.v1 as tf +from google.protobuf import text_format + +from object_detection.builders import post_processing_builder +from object_detection.core import anchor_generator +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import box_list +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import region_similarity_calculator as sim_calc +from object_detection.core import target_assigner +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.protos import calibration_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import ops +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +keras = tf.keras.layers + + +class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """Fake ssd feature extracture for ssd meta arch tests.""" + + def __init__(self): + super(FakeSSDFeatureExtractor, self).__init__( + is_training=True, + depth_multiplier=0, + min_depth=0, + pad_to_multiple=1, + conv_hyperparams_fn=None) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def extract_features(self, preprocessed_inputs): + with tf.variable_scope('mock_model'): + features = slim.conv2d( + inputs=preprocessed_inputs, + num_outputs=32, + kernel_size=1, + scope='layer1') + return [features] + + +class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor): + """Fake keras based ssd feature extracture for ssd meta arch tests.""" + + def __init__(self): + with tf.name_scope('mock_model'): + super(FakeSSDKerasFeatureExtractor, self).__init__( + is_training=True, + depth_multiplier=0, + min_depth=0, + pad_to_multiple=1, + conv_hyperparams=None, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + ) + + self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1') + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_features(self, preprocessed_inputs, **kwargs): + with tf.name_scope('mock_model'): + return [self._conv(preprocessed_inputs)] + + +class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator): + """A simple 2x2 anchor grid on the unit square used for test only.""" + + def name_scope(self): + return 'MockAnchorGenerator' + + def num_anchors_per_location(self): + return [1] + + def _generate(self, feature_map_shape_list, im_height, im_width): + return [ + box_list.BoxList( + tf.constant( + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [1., 1., 1.5, 1.5] # Anchor that is outside clip_window. + ], + tf.float32)) + ] + + def num_anchors(self): + return 4 + + +class SSDMetaArchTestBase(test_case.TestCase): + """Base class to test SSD based meta architectures.""" + + def _create_model( + self, + model_fn=ssd_meta_arch.SSDMetaArch, + apply_hard_mining=True, + normalize_loc_loss_by_codesize=False, + add_background_class=True, + random_example_sampling=False, + expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE, + min_num_negative_samples=1, + desired_negative_sampling_ratio=3, + predict_mask=False, + use_static_shapes=False, + nms_max_size_per_class=5, + calibration_mapping_value=None, + return_raw_detections_during_predict=False): + is_training = False + num_classes = 1 + mock_anchor_generator = MockAnchorGenerator2x2() + use_keras = tf_version.is_tf2() + if use_keras: + mock_box_predictor = test_utils.MockKerasBoxPredictor( + is_training, num_classes, add_background_class=add_background_class) + else: + mock_box_predictor = test_utils.MockBoxPredictor( + is_training, num_classes, add_background_class=add_background_class) + mock_box_coder = test_utils.MockBoxCoder() + if use_keras: + fake_feature_extractor = FakeSSDKerasFeatureExtractor() + else: + fake_feature_extractor = FakeSSDFeatureExtractor() + mock_matcher = test_utils.MockMatcher() + region_similarity_calculator = sim_calc.IouSimilarity() + encode_background_as_zeros = False + + def image_resizer_fn(image): + return [tf.identity(image), tf.shape(image)] + + classification_loss = losses.WeightedSigmoidClassificationLoss() + localization_loss = losses.WeightedSmoothL1LocalizationLoss() + non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=-20.0, + iou_thresh=1.0, + max_size_per_class=nms_max_size_per_class, + max_total_size=nms_max_size_per_class, + use_static_shapes=use_static_shapes) + score_conversion_fn = tf.identity + calibration_config = calibration_pb2.CalibrationConfig() + if calibration_mapping_value: + calibration_text_proto = """ + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: %f + } + x_y_pair { + x: 1.0 + y: %f + }}}""" % (calibration_mapping_value, calibration_mapping_value) + text_format.Merge(calibration_text_proto, calibration_config) + score_conversion_fn = ( + post_processing_builder._build_calibrated_score_converter( # pylint: disable=protected-access + tf.identity, calibration_config)) + classification_loss_weight = 1.0 + localization_loss_weight = 1.0 + negative_class_weight = 1.0 + normalize_loss_by_num_matches = False + + hard_example_miner = None + if apply_hard_mining: + # This hard example miner is expected to be a no-op. + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=None, iou_threshold=1.0) + + random_example_sampler = None + if random_example_sampling: + random_example_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=0.5) + + target_assigner_instance = target_assigner.TargetAssigner( + region_similarity_calculator, + mock_matcher, + mock_box_coder, + negative_class_weight=negative_class_weight) + + model_config = model_pb2.DetectionModel() + if expected_loss_weights == model_config.ssd.loss.NONE: + expected_loss_weights_fn = None + else: + raise ValueError('Not a valid value for expected_loss_weights.') + + code_size = 4 + + kwargs = {} + if predict_mask: + kwargs.update({ + 'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict, + }) + + model = model_fn( + is_training=is_training, + anchor_generator=mock_anchor_generator, + box_predictor=mock_box_predictor, + box_coder=mock_box_coder, + feature_extractor=fake_feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=score_conversion_fn, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_loss_weight, + localization_loss_weight=localization_loss_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=hard_example_miner, + target_assigner_instance=target_assigner_instance, + add_summaries=False, + normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + add_background_class=add_background_class, + random_example_sampler=random_example_sampler, + expected_loss_weights_fn=expected_loss_weights_fn, + return_raw_detections_during_predict=( + return_raw_detections_during_predict), + **kwargs) + return model, num_classes, mock_anchor_generator.num_anchors(), code_size + + def _get_value_for_matching_key(self, dictionary, suffix): + for key in dictionary.keys(): + if key.endswith(suffix): + return dictionary[key] + raise ValueError('key not found {}'.format(suffix)) diff --git a/models/research/object_detection/metrics/__init__.py b/models/research/object_detection/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/metrics/calibration_evaluation.py b/models/research/object_detection/metrics/calibration_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..e3fc4b05639b94d7425a8811ef92e1878c13f4f8 --- /dev/null +++ b/models/research/object_detection/metrics/calibration_evaluation.py @@ -0,0 +1,228 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Class for evaluating object detections with calibration metrics.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_list +from object_detection.core import region_similarity_calculator +from object_detection.core import standard_fields +from object_detection.core import target_assigner +from object_detection.matchers import argmax_matcher +from object_detection.metrics import calibration_metrics +from object_detection.utils import object_detection_evaluation + + +# TODO(zbeaver): Implement metrics per category. +class CalibrationDetectionEvaluator( + object_detection_evaluation.DetectionEvaluator): + """Class to evaluate calibration detection metrics.""" + + def __init__(self, + categories, + iou_threshold=0.5): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + iou_threshold: Threshold above which to consider a box as matched during + evaluation. + """ + super(CalibrationDetectionEvaluator, self).__init__(categories) + + # Constructing target_assigner to match detections to groundtruth. + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher( + matched_threshold=iou_threshold, unmatched_threshold=iou_threshold) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + self._target_assigner = target_assigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + def match_single_image_info(self, image_info): + """Match detections to groundtruth for a single image. + + Detections are matched to available groundtruth in the image based on the + IOU threshold from the constructor. The classes of the detections and + groundtruth matches are then compared. Detections that do not have IOU above + the required threshold or have different classes from their match are + considered negative matches. All inputs in `image_info` originate or are + inferred from the eval_dict passed to class method + `get_estimator_eval_metric_ops`. + + Args: + image_info: a tuple or list containing the following (in order): + - gt_boxes: tf.float32 tensor of groundtruth boxes. + - gt_classes: tf.int64 tensor of groundtruth classes associated with + groundtruth boxes. + - num_gt_box: scalar indicating the number of groundtruth boxes per + image. + - det_boxes: tf.float32 tensor of detection boxes. + - det_classes: tf.int64 tensor of detection classes associated with + detection boxes. + - num_det_box: scalar indicating the number of detection boxes per + image. + Returns: + is_class_matched: tf.int64 tensor identical in shape to det_boxes, + indicating whether detection boxes matched with and had the same + class as groundtruth annotations. + """ + (gt_boxes, gt_classes, num_gt_box, det_boxes, det_classes, + num_det_box) = image_info + detection_boxes = det_boxes[:num_det_box] + detection_classes = det_classes[:num_det_box] + groundtruth_boxes = gt_boxes[:num_gt_box] + groundtruth_classes = gt_classes[:num_gt_box] + det_boxlist = box_list.BoxList(detection_boxes) + gt_boxlist = box_list.BoxList(groundtruth_boxes) + + # Target assigner requires classes in one-hot format. An additional + # dimension is required since gt_classes are 1-indexed; the zero index is + # provided to all non-matches. + one_hot_depth = tf.cast(tf.add(tf.reduce_max(groundtruth_classes), 1), + dtype=tf.int32) + gt_classes_one_hot = tf.one_hot( + groundtruth_classes, one_hot_depth, dtype=tf.float32) + one_hot_cls_targets, _, _, _, _ = self._target_assigner.assign( + det_boxlist, + gt_boxlist, + gt_classes_one_hot, + unmatched_class_label=tf.zeros(shape=one_hot_depth, dtype=tf.float32)) + # Transform from one-hot back to indexes. + cls_targets = tf.argmax(one_hot_cls_targets, axis=1) + is_class_matched = tf.cast( + tf.equal(tf.cast(cls_targets, tf.int64), detection_classes), + dtype=tf.int64) + return is_class_matched + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + This function can take in groundtruth and detections for a batch of images, + or for a single image. For the latter case, the batch dimension for input + tensors need not be present. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + detection_boxes = eval_dict[detection_fields.detection_boxes] + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + num_gt_boxes_per_image = eval_dict.get( + 'num_groundtruth_boxes_per_image', None) + num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) + is_annotated_batched = eval_dict.get('is_annotated', None) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + detection_boxes = tf.expand_dims(detection_boxes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_boxes)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + + if is_annotated_batched is None: + is_annotated_batched = tf.constant([True]) + else: + is_annotated_batched = tf.expand_dims(is_annotated_batched, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_boxes)[1:2], + multiples=tf.shape(detection_boxes)[0:1]) + if is_annotated_batched is None: + is_annotated_batched = tf.ones_like(image_id, dtype=tf.bool) + + # Filter images based on is_annotated_batched and match detections. + image_info = [tf.boolean_mask(tensor, is_annotated_batched) for tensor in + [groundtruth_boxes, groundtruth_classes, + num_gt_boxes_per_image, detection_boxes, detection_classes, + num_det_boxes_per_image]] + is_class_matched = tf.map_fn( + self.match_single_image_info, image_info, dtype=tf.int64) + y_true = tf.squeeze(is_class_matched) + y_pred = tf.squeeze(tf.boolean_mask(detection_scores, is_annotated_batched)) + ece, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred) + return {'CalibrationError/ExpectedCalibrationError': (ece, update_op)} + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary of groundtruth numpy arrays required + for evaluations. + """ + raise NotImplementedError + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary of detection numpy arrays required for + evaluation. + """ + raise NotImplementedError + + def evaluate(self): + """Evaluates detections and returns a dictionary of metrics.""" + raise NotImplementedError + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + raise NotImplementedError diff --git a/models/research/object_detection/metrics/calibration_evaluation_tf1_test.py b/models/research/object_detection/metrics/calibration_evaluation_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0f3d6eb319f0819937c04e030c9e1937bf09db10 --- /dev/null +++ b/models/research/object_detection/metrics/calibration_evaluation_tf1_test.py @@ -0,0 +1,203 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.object_detection.metrics.calibration_evaluation.""" # pylint: disable=line-too-long + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields +from object_detection.metrics import calibration_evaluation +from object_detection.utils import tf_version + + +def _get_categories_list(): + return [{ + 'id': 1, + 'name': 'person' + }, { + 'id': 2, + 'name': 'dog' + }, { + 'id': 3, + 'name': 'cat' + }] + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class CalibrationDetectionEvaluationTest(tf.test.TestCase): + + def _get_ece(self, ece_op, update_op): + """Return scalar expected calibration error.""" + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + _ = sess.run(update_op) + return sess.run(ece_op) + + def testGetECEWithMatchingGroundtruthAndDetections(self): + """Tests that ECE is calculated correctly when box matches exist.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # All gt and detection boxes match. + base_eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1], [2], [3]], dtype=tf.int64), + # Note that, in the zero ECE case, the detection class for image_2 + # should NOT match groundtruth, since the detection score is zero. + detection_fields.detection_scores: + tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32) + } + + # Zero ECE (perfectly calibrated). + zero_ece_eval_dict = base_eval_dict.copy() + zero_ece_eval_dict[detection_fields.detection_classes] = tf.constant( + [[1], [1], [3]], dtype=tf.int64) + zero_ece_op, zero_ece_update_op = ( + calibration_evaluator.get_estimator_eval_metric_ops(zero_ece_eval_dict) + ['CalibrationError/ExpectedCalibrationError']) + zero_ece = self._get_ece(zero_ece_op, zero_ece_update_op) + self.assertAlmostEqual(zero_ece, 0.0) + + # ECE of 1 (poorest calibration). + one_ece_eval_dict = base_eval_dict.copy() + one_ece_eval_dict[detection_fields.detection_classes] = tf.constant( + [[3], [2], [1]], dtype=tf.int64) + one_ece_op, one_ece_update_op = ( + calibration_evaluator.get_estimator_eval_metric_ops(one_ece_eval_dict) + ['CalibrationError/ExpectedCalibrationError']) + one_ece = self._get_ece(one_ece_op, one_ece_update_op) + self.assertAlmostEqual(one_ece, 1.0) + + def testGetECEWithUnmatchedGroundtruthAndDetections(self): + """Tests that ECE is correctly calculated when boxes are unmatched.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # No gt and detection boxes match. + eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[50., 50., 100., 100.]], + [[25., 25., 50., 50.]], + [[100., 100., 200., 200.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1], [2], [3]], dtype=tf.int64), + detection_fields.detection_classes: + tf.constant([[1], [1], [3]], dtype=tf.int64), + # Detection scores of zero when boxes are unmatched = ECE of zero. + detection_fields.detection_scores: + tf.constant([[0.0], [0.0], [0.0]], dtype=tf.float32) + } + + ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( + eval_dict)['CalibrationError/ExpectedCalibrationError'] + ece = self._get_ece(ece_op, update_op) + self.assertAlmostEqual(ece, 0.0) + + def testGetECEWithBatchedDetections(self): + """Tests that ECE is correct with multiple detections per image.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # Note that image_2 has mismatched classes and detection scores but should + # still produce ECE of 0 because detection scores are also 0. + eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]], + [[50., 50., 100., 100.], [100., 100., 200., 200.]], + [[25., 25., 50., 50.], [100., 100., 200., 200.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]], + [[50., 50., 100., 100.], [25., 25., 50., 50.]], + [[25., 25., 50., 50.], [100., 100., 200., 200.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1, 2], [2, 3], [3, 1]], dtype=tf.int64), + detection_fields.detection_classes: + tf.constant([[1, 2], [1, 1], [3, 1]], dtype=tf.int64), + detection_fields.detection_scores: + tf.constant([[1.0, 1.0], [0.0, 0.0], [1.0, 1.0]], dtype=tf.float32) + } + + ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( + eval_dict)['CalibrationError/ExpectedCalibrationError'] + ece = self._get_ece(ece_op, update_op) + self.assertAlmostEqual(ece, 0.0) + + def testGetECEWhenImagesFilteredByIsAnnotated(self): + """Tests that ECE is correct when detections filtered by is_annotated.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # ECE will be 0 only if the third image is filtered by is_annotated. + eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1], [2], [1]], dtype=tf.int64), + detection_fields.detection_classes: + tf.constant([[1], [1], [3]], dtype=tf.int64), + detection_fields.detection_scores: + tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32), + 'is_annotated': tf.constant([True, True, False], dtype=tf.bool) + } + + ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( + eval_dict)['CalibrationError/ExpectedCalibrationError'] + ece = self._get_ece(ece_op, update_op) + self.assertAlmostEqual(ece, 0.0) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/calibration_metrics.py b/models/research/object_detection/metrics/calibration_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..611c81c3381604923af7831fb0ab030d56617ebb --- /dev/null +++ b/models/research/object_detection/metrics/calibration_metrics.py @@ -0,0 +1,118 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Object detection calibration metrics. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +from tensorflow.python.ops import metrics_impl + + +def _safe_div(numerator, denominator): + """Divides two tensors element-wise, returning 0 if the denominator is <= 0. + + Args: + numerator: A real `Tensor`. + denominator: A real `Tensor`, with dtype matching `numerator`. + + Returns: + 0 if `denominator` <= 0, else `numerator` / `denominator` + """ + t = tf.truediv(numerator, denominator) + zero = tf.zeros_like(t, dtype=denominator.dtype) + condition = tf.greater(denominator, zero) + zero = tf.cast(zero, t.dtype) + return tf.where(condition, t, zero) + + +def _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name): + """Calculates Expected Calibration Error from accumulated statistics.""" + bin_accuracies = _safe_div(bin_true_sum, bin_counts) + bin_confidences = _safe_div(bin_preds_sum, bin_counts) + abs_bin_errors = tf.abs(bin_accuracies - bin_confidences) + bin_weights = _safe_div(bin_counts, tf.reduce_sum(bin_counts)) + return tf.reduce_sum(abs_bin_errors * bin_weights, name=name) + + +def expected_calibration_error(y_true, y_pred, nbins=20): + """Calculates Expected Calibration Error (ECE). + + ECE is a scalar summary statistic of calibration error. It is the + sample-weighted average of the difference between the predicted and true + probabilities of a positive detection across uniformly-spaced model + confidences [0, 1]. See referenced paper for a thorough explanation. + + Reference: + Guo, et. al, "On Calibration of Modern Neural Networks" + Page 2, Expected Calibration Error (ECE). + https://arxiv.org/pdf/1706.04599.pdf + + This function creates three local variables, `bin_counts`, `bin_true_sum`, and + `bin_preds_sum` that are used to compute ECE. For estimation of the metric + over a stream of data, the function creates an `update_op` operation that + updates these variables and returns the ECE. + + Args: + y_true: 1-D tf.int64 Tensor of binarized ground truth, corresponding to each + prediction in y_pred. + y_pred: 1-D tf.float32 tensor of model confidence scores in range + [0.0, 1.0]. + nbins: int specifying the number of uniformly-spaced bins into which y_pred + will be bucketed. + + Returns: + value_op: A value metric op that returns ece. + update_op: An operation that increments the `bin_counts`, `bin_true_sum`, + and `bin_preds_sum` variables appropriately and whose value matches `ece`. + + Raises: + InvalidArgumentError: if y_pred is not in [0.0, 1.0]. + """ + bin_counts = metrics_impl.metric_variable( + [nbins], tf.float32, name='bin_counts') + bin_true_sum = metrics_impl.metric_variable( + [nbins], tf.float32, name='true_sum') + bin_preds_sum = metrics_impl.metric_variable( + [nbins], tf.float32, name='preds_sum') + + with tf.control_dependencies([ + tf.assert_greater_equal(y_pred, 0.0), + tf.assert_less_equal(y_pred, 1.0), + ]): + bin_ids = tf.histogram_fixed_width_bins(y_pred, [0.0, 1.0], nbins=nbins) + + with tf.control_dependencies([bin_ids]): + update_bin_counts_op = tf.assign_add( + bin_counts, tf.cast(tf.bincount(bin_ids, minlength=nbins), + dtype=tf.float32)) + update_bin_true_sum_op = tf.assign_add( + bin_true_sum, + tf.cast(tf.bincount(bin_ids, weights=y_true, minlength=nbins), + dtype=tf.float32)) + update_bin_preds_sum_op = tf.assign_add( + bin_preds_sum, + tf.cast(tf.bincount(bin_ids, weights=y_pred, minlength=nbins), + dtype=tf.float32)) + + ece_update_op = _ece_from_bins( + update_bin_counts_op, + update_bin_true_sum_op, + update_bin_preds_sum_op, + name='update_op') + ece = _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name='value') + return ece, ece_update_op diff --git a/models/research/object_detection/metrics/calibration_metrics_tf1_test.py b/models/research/object_detection/metrics/calibration_metrics_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1adbca20dfae80e97927d462c9cc18de6ff823 --- /dev/null +++ b/models/research/object_detection/metrics/calibration_metrics_tf1_test.py @@ -0,0 +1,112 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for calibration_metrics.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.metrics import calibration_metrics +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class CalibrationLibTest(tf.test.TestCase): + + @staticmethod + def _get_calibration_placeholders(): + """Returns TF placeholders for y_true and y_pred.""" + return (tf.placeholder(tf.int64, shape=(None)), + tf.placeholder(tf.float32, shape=(None))) + + def test_expected_calibration_error_all_bins_filled(self): + """Test expected calibration error when all bins contain predictions.""" + y_true, y_pred = self._get_calibration_placeholders() + expected_ece_op, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred, nbins=2) + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + # Bin calibration errors (|confidence - accuracy| * bin_weight): + # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08 + # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1 + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0., 0.2, 0.4, 0.5, 1.0]), + y_true: np.array([0, 0, 1, 0, 1]) + }) + actual_ece = 0.08 + 0.1 + expected_ece = sess.run(expected_ece_op) + self.assertAlmostEqual(actual_ece, expected_ece) + + def test_expected_calibration_error_all_bins_not_filled(self): + """Test expected calibration error when no predictions for one bin.""" + y_true, y_pred = self._get_calibration_placeholders() + expected_ece_op, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred, nbins=2) + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + # Bin calibration errors (|confidence - accuracy| * bin_weight): + # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08 + # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1 + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0., 0.2, 0.4]), + y_true: np.array([0, 0, 1]) + }) + actual_ece = np.abs(0.2 - (1 / 3.)) + expected_ece = sess.run(expected_ece_op) + self.assertAlmostEqual(actual_ece, expected_ece) + + def test_expected_calibration_error_with_multiple_data_streams(self): + """Test expected calibration error when multiple data batches provided.""" + y_true, y_pred = self._get_calibration_placeholders() + expected_ece_op, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred, nbins=2) + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + # Identical data to test_expected_calibration_error_all_bins_filled, + # except split over three batches. + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0., 0.2]), + y_true: np.array([0, 0]) + }) + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0.4, 0.5]), + y_true: np.array([1, 0]) + }) + sess.run( + update_op, feed_dict={ + y_pred: np.array([1.0]), + y_true: np.array([1]) + }) + actual_ece = 0.08 + 0.1 + expected_ece = sess.run(expected_ece_op) + self.assertAlmostEqual(actual_ece, expected_ece) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/coco_evaluation.py b/models/research/object_detection/metrics/coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..3ecfddb0dd4221c3e511fab628b884bc5eb514e6 --- /dev/null +++ b/models/research/object_detection/metrics/coco_evaluation.py @@ -0,0 +1,1798 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Class for evaluating object detections with COCO metrics.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.metrics import coco_tools +from object_detection.utils import json_utils +from object_detection.utils import np_mask_ops +from object_detection.utils import object_detection_evaluation + + +class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator): + """Class to evaluate COCO detection metrics.""" + + def __init__(self, + categories, + include_metrics_per_category=False, + all_metrics_per_category=False): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + include_metrics_per_category: If True, include metrics for each category. + all_metrics_per_category: Whether to include all the summary metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + """ + super(CocoDetectionEvaluator, self).__init__(categories) + # _image_ids is a dictionary that maps unique image ids to Booleans which + # indicate whether a corresponding detection has been added. + self._image_ids = {} + self._groundtruth_list = [] + self._detection_boxes_list = [] + self._category_id_set = set([cat['id'] for cat in self._categories]) + self._annotation_id = 1 + self._metrics = None + self._include_metrics_per_category = include_metrics_per_category + self._all_metrics_per_category = all_metrics_per_category + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._image_ids.clear() + self._groundtruth_list = [] + self._detection_boxes_list = [] + + def add_single_ground_truth_image_info(self, + image_id, + groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + InputDataFields.groundtruth_area (optional): float numpy array of + shape [num_boxes] containing the area (in the original absolute + coordinates) of the annotated object. + InputDataFields.groundtruth_keypoints (optional): float numpy array of + keypoints with shape [num_boxes, num_keypoints, 2]. + InputDataFields.groundtruth_keypoint_visibilities (optional): integer + numpy array of keypoint visibilities with shape [num_gt_boxes, + num_keypoints]. Integer is treated as an enum with 0=not labeled, + 1=labeled but not visible and 2=labeled and visible. + """ + if image_id in self._image_ids: + tf.logging.warning('Ignoring ground truth with image id %s since it was ' + 'previously added', image_id) + return + + # Drop optional fields if empty tensor. + groundtruth_is_crowd = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_is_crowd) + groundtruth_area = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_area) + groundtruth_keypoints = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_keypoints) + groundtruth_keypoint_visibilities = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_keypoint_visibilities) + if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]: + groundtruth_is_crowd = None + if groundtruth_area is not None and not groundtruth_area.shape[0]: + groundtruth_area = None + if groundtruth_keypoints is not None and not groundtruth_keypoints.shape[0]: + groundtruth_keypoints = None + if groundtruth_keypoint_visibilities is not None and not groundtruth_keypoint_visibilities.shape[ + 0]: + groundtruth_keypoint_visibilities = None + + self._groundtruth_list.extend( + coco_tools.ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self._annotation_id, + category_id_set=self._category_id_set, + groundtruth_boxes=groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_boxes], + groundtruth_classes=groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_classes], + groundtruth_is_crowd=groundtruth_is_crowd, + groundtruth_area=groundtruth_area, + groundtruth_keypoints=groundtruth_keypoints, + groundtruth_keypoint_visibilities=groundtruth_keypoint_visibilities) + ) + + self._annotation_id += groundtruth_dict[standard_fields.InputDataFields. + groundtruth_boxes].shape[0] + # Boolean to indicate whether a detection has been added for this image. + self._image_ids[image_id] = False + + def add_single_detected_image_info(self, + image_id, + detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` detection boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_keypoints (optional): float numpy array + of keypoints with shape [num_boxes, num_keypoints, 2]. + Raises: + ValueError: If groundtruth for the image_id is not available. + """ + if image_id not in self._image_ids: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + if self._image_ids[image_id]: + tf.logging.warning('Ignoring detection with image id %s since it was ' + 'previously added', image_id) + return + + # Drop optional fields if empty tensor. + detection_keypoints = detections_dict.get( + standard_fields.DetectionResultFields.detection_keypoints) + if detection_keypoints is not None and not detection_keypoints.shape[0]: + detection_keypoints = None + self._detection_boxes_list.extend( + coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id=image_id, + category_id_set=self._category_id_set, + detection_boxes=detections_dict[ + standard_fields.DetectionResultFields.detection_boxes], + detection_scores=detections_dict[ + standard_fields.DetectionResultFields.detection_scores], + detection_classes=detections_dict[ + standard_fields.DetectionResultFields.detection_classes], + detection_keypoints=detection_keypoints)) + self._image_ids[image_id] = True + + def dump_detections_to_json_file(self, json_output_path): + """Saves the detections into json_output_path in the format used by MS COCO. + + Args: + json_output_path: String containing the output file's path. It can be also + None. In that case nothing will be written to the output file. + """ + if json_output_path and json_output_path is not None: + with tf.gfile.GFile(json_output_path, 'w') as fid: + tf.logging.info('Dumping detections to output json file.') + json_utils.Dump( + obj=self._detection_boxes_list, fid=fid, float_digits=4, indent=2) + + def evaluate(self): + """Evaluates the detection boxes and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metrics: + 'DetectionBoxes_Precision/mAP': mean average precision over classes + averaged over IOU thresholds ranging from .5 to .95 with .05 + increments. + 'DetectionBoxes_Precision/mAP@.50IOU': mean average precision at 50% IOU + 'DetectionBoxes_Precision/mAP@.75IOU': mean average precision at 75% IOU + 'DetectionBoxes_Precision/mAP (small)': mean average precision for small + objects (area < 32^2 pixels). + 'DetectionBoxes_Precision/mAP (medium)': mean average precision for + medium sized objects (32^2 pixels < area < 96^2 pixels). + 'DetectionBoxes_Precision/mAP (large)': mean average precision for large + objects (96^2 pixels < area < 10000^2 pixels). + 'DetectionBoxes_Recall/AR@1': average recall with 1 detection. + 'DetectionBoxes_Recall/AR@10': average recall with 10 detections. + 'DetectionBoxes_Recall/AR@100': average recall with 100 detections. + 'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects + with 100. + 'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects + with 100. + 'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects + with 100 detections. + + 2. per_category_ap: if include_metrics_per_category is True, category + specific results with keys of the form: + 'Precision mAP ByCategory/category' (without the supercategory part if + no supercategories exist). For backward compatibility + 'PerformanceByCategory' is included in the output regardless of + all_metrics_per_category. + """ + tf.logging.info('Performing evaluation on %d images.', len(self._image_ids)) + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [{'id': image_id} for image_id in self._image_ids], + 'categories': self._categories + } + coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations( + self._detection_boxes_list) + box_evaluator = coco_tools.COCOEvalWrapper( + coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False) + box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( + include_metrics_per_category=self._include_metrics_per_category, + all_metrics_per_category=self._all_metrics_per_category) + box_metrics.update(box_per_category_ap) + box_metrics = {'DetectionBoxes_'+ key: value + for key, value in iter(box_metrics.items())} + return box_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + def update_op( + image_id_batched, + groundtruth_boxes_batched, + groundtruth_classes_batched, + groundtruth_is_crowd_batched, + num_gt_boxes_per_image, + detection_boxes_batched, + detection_scores_batched, + detection_classes_batched, + num_det_boxes_per_image, + is_annotated_batched): + """Update operation for adding batch of images to Coco evaluator.""" + + for (image_id, gt_box, gt_class, gt_is_crowd, num_gt_box, det_box, + det_score, det_class, num_det_box, is_annotated) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_is_crowd_batched, + num_gt_boxes_per_image, + detection_boxes_batched, detection_scores_batched, + detection_classes_batched, num_det_boxes_per_image, + is_annotated_batched): + if is_annotated: + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_boxes': gt_box[:num_gt_box], + 'groundtruth_classes': gt_class[:num_gt_box], + 'groundtruth_is_crowd': gt_is_crowd[:num_gt_box] + }) + self.add_single_detected_image_info( + image_id, + {'detection_boxes': det_box[:num_det_box], + 'detection_scores': det_score[:num_det_box], + 'detection_classes': det_class[:num_det_box]}) + + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_is_crowd = eval_dict.get( + input_data_fields.groundtruth_is_crowd, None) + detection_boxes = eval_dict[detection_fields.detection_boxes] + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + num_gt_boxes_per_image = eval_dict.get( + 'num_groundtruth_boxes_per_image', None) + num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) + is_annotated = eval_dict.get('is_annotated', None) + + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + detection_boxes = tf.expand_dims(detection_boxes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_boxes)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + + if is_annotated is None: + is_annotated = tf.constant([True]) + else: + is_annotated = tf.expand_dims(is_annotated, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_boxes)[1:2], + multiples=tf.shape(detection_boxes)[0:1]) + if is_annotated is None: + is_annotated = tf.ones_like(image_id, dtype=tf.bool) + + return tf.py_func(update_op, [image_id, + groundtruth_boxes, + groundtruth_classes, + groundtruth_is_crowd, + num_gt_boxes_per_image, + detection_boxes, + detection_scores, + detection_classes, + num_det_boxes_per_image, + is_annotated], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + This function can take in groundtruth and detections for a batch of images, + or for a single image. For the latter case, the batch dimension for input + tensors need not be present. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + metric_names = ['DetectionBoxes_Precision/mAP', + 'DetectionBoxes_Precision/mAP@.50IOU', + 'DetectionBoxes_Precision/mAP@.75IOU', + 'DetectionBoxes_Precision/mAP (large)', + 'DetectionBoxes_Precision/mAP (medium)', + 'DetectionBoxes_Precision/mAP (small)', + 'DetectionBoxes_Recall/AR@1', + 'DetectionBoxes_Recall/AR@10', + 'DetectionBoxes_Recall/AR@100', + 'DetectionBoxes_Recall/AR@100 (large)', + 'DetectionBoxes_Recall/AR@100 (medium)', + 'DetectionBoxes_Recall/AR@100 (small)'] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' + + category_dict['name']) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + +def _check_mask_type_and_value(array_name, masks): + """Checks whether mask dtype is uint8 and the values are either 0 or 1.""" + if masks.dtype != np.uint8: + raise ValueError('{} must be of type np.uint8. Found {}.'.format( + array_name, masks.dtype)) + if np.any(np.logical_and(masks != 0, masks != 1)): + raise ValueError('{} elements can only be either 0 or 1.'.format( + array_name)) + + +class CocoKeypointEvaluator(CocoDetectionEvaluator): + """Class to evaluate COCO keypoint metrics.""" + + def __init__(self, + category_id, + category_keypoints, + class_text, + oks_sigmas=None): + """Constructor. + + Args: + category_id: An integer id uniquely identifying this category. + category_keypoints: A list specifying keypoint mappings, with items: + 'id': (required) an integer id identifying the keypoint. + 'name': (required) a string representing the keypoint name. + class_text: A string representing the category name for which keypoint + metrics are to be computed. + oks_sigmas: A dict of keypoint name to standard deviation values for OKS + metrics. If not provided, default value of 0.05 will be used. + """ + self._category_id = category_id + self._category_name = class_text + self._keypoint_ids = sorted( + [keypoint['id'] for keypoint in category_keypoints]) + kpt_id_to_name = {kpt['id']: kpt['name'] for kpt in category_keypoints} + if oks_sigmas: + self._oks_sigmas = np.array([ + oks_sigmas[kpt_id_to_name[idx]] for idx in self._keypoint_ids + ]) + else: + # Default all per-keypoint sigmas to 0. + self._oks_sigmas = np.full((len(self._keypoint_ids)), 0.05) + tf.logging.warning('No default keypoint OKS sigmas provided. Will use ' + '0.05') + tf.logging.info('Using the following keypoint OKS sigmas: {}'.format( + self._oks_sigmas)) + self._metrics = None + super(CocoKeypointEvaluator, self).__init__([{ + 'id': self._category_id, + 'name': class_text + }]) + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image with keypoints. + + If the image has already been added, a warning is logged, and groundtruth + is ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + InputDataFields.groundtruth_area (optional): float numpy array of + shape [num_boxes] containing the area (in the original absolute + coordinates) of the annotated object. + InputDataFields.groundtruth_keypoints: float numpy array of + keypoints with shape [num_boxes, num_keypoints, 2]. + InputDataFields.groundtruth_keypoint_visibilities (optional): integer + numpy array of keypoint visibilities with shape [num_gt_boxes, + num_keypoints]. Integer is treated as an enum with 0=not labels, + 1=labeled but not visible and 2=labeled and visible. + """ + + # Keep only the groundtruth for our category and its keypoints. + groundtruth_classes = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_classes] + groundtruth_boxes = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_boxes] + groundtruth_keypoints = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_keypoints] + class_indices = [ + idx for idx, gt_class_id in enumerate(groundtruth_classes) + if gt_class_id == self._category_id + ] + filtered_groundtruth_classes = np.take( + groundtruth_classes, class_indices, axis=0) + filtered_groundtruth_boxes = np.take( + groundtruth_boxes, class_indices, axis=0) + filtered_groundtruth_keypoints = np.take( + groundtruth_keypoints, class_indices, axis=0) + filtered_groundtruth_keypoints = np.take( + filtered_groundtruth_keypoints, self._keypoint_ids, axis=1) + + filtered_groundtruth_dict = {} + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_classes] = filtered_groundtruth_classes + filtered_groundtruth_dict[standard_fields.InputDataFields + .groundtruth_boxes] = filtered_groundtruth_boxes + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_keypoints] = filtered_groundtruth_keypoints + + if (standard_fields.InputDataFields.groundtruth_is_crowd in + groundtruth_dict.keys()): + groundtruth_is_crowd = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_is_crowd] + filtered_groundtruth_is_crowd = np.take(groundtruth_is_crowd, + class_indices, 0) + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_is_crowd] = filtered_groundtruth_is_crowd + if (standard_fields.InputDataFields.groundtruth_area in + groundtruth_dict.keys()): + groundtruth_area = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_area] + filtered_groundtruth_area = np.take(groundtruth_area, class_indices, 0) + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_area] = filtered_groundtruth_area + if (standard_fields.InputDataFields.groundtruth_keypoint_visibilities in + groundtruth_dict.keys()): + groundtruth_keypoint_visibilities = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_keypoint_visibilities] + filtered_groundtruth_keypoint_visibilities = np.take( + groundtruth_keypoint_visibilities, class_indices, axis=0) + filtered_groundtruth_keypoint_visibilities = np.take( + filtered_groundtruth_keypoint_visibilities, + self._keypoint_ids, + axis=1) + filtered_groundtruth_dict[ + standard_fields.InputDataFields. + groundtruth_keypoint_visibilities] = filtered_groundtruth_keypoint_visibilities + + super(CocoKeypointEvaluator, + self).add_single_ground_truth_image_info(image_id, + filtered_groundtruth_dict) + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image and the specific category for which keypoints are evaluated. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` detection boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_keypoints: float numpy array of + keypoints with shape [num_boxes, num_keypoints, 2]. + + Raises: + ValueError: If groundtruth for the image_id is not available. + """ + + # Keep only the detections for our category and its keypoints. + detection_classes = detections_dict[ + standard_fields.DetectionResultFields.detection_classes] + detection_boxes = detections_dict[ + standard_fields.DetectionResultFields.detection_boxes] + detection_scores = detections_dict[ + standard_fields.DetectionResultFields.detection_scores] + detection_keypoints = detections_dict[ + standard_fields.DetectionResultFields.detection_keypoints] + class_indices = [ + idx for idx, class_id in enumerate(detection_classes) + if class_id == self._category_id + ] + filtered_detection_classes = np.take( + detection_classes, class_indices, axis=0) + filtered_detection_boxes = np.take(detection_boxes, class_indices, axis=0) + filtered_detection_scores = np.take(detection_scores, class_indices, axis=0) + filtered_detection_keypoints = np.take( + detection_keypoints, class_indices, axis=0) + filtered_detection_keypoints = np.take( + filtered_detection_keypoints, self._keypoint_ids, axis=1) + + filtered_detections_dict = {} + filtered_detections_dict[standard_fields.DetectionResultFields + .detection_classes] = filtered_detection_classes + filtered_detections_dict[standard_fields.DetectionResultFields + .detection_boxes] = filtered_detection_boxes + filtered_detections_dict[standard_fields.DetectionResultFields + .detection_scores] = filtered_detection_scores + filtered_detections_dict[standard_fields.DetectionResultFields. + detection_keypoints] = filtered_detection_keypoints + + super(CocoKeypointEvaluator, + self).add_single_detected_image_info(image_id, + filtered_detections_dict) + + def evaluate(self): + """Evaluates the keypoints and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metrics: + 'Keypoints_Precision/mAP': mean average precision over classes + averaged over OKS thresholds ranging from .5 to .95 with .05 + increments. + 'Keypoints_Precision/mAP@.50IOU': mean average precision at 50% OKS + 'Keypoints_Precision/mAP@.75IOU': mean average precision at 75% OKS + 'Keypoints_Precision/mAP (medium)': mean average precision for medium + sized objects (32^2 pixels < area < 96^2 pixels). + 'Keypoints_Precision/mAP (large)': mean average precision for large + objects (96^2 pixels < area < 10000^2 pixels). + 'Keypoints_Recall/AR@1': average recall with 1 detection. + 'Keypoints_Recall/AR@10': average recall with 10 detections. + 'Keypoints_Recall/AR@100': average recall with 100 detections. + 'Keypoints_Recall/AR@100 (medium)': average recall for medium objects with + 100. + 'Keypoints_Recall/AR@100 (large)': average recall for large objects with + 100 detections. + """ + tf.logging.info('Performing evaluation on %d images.', len(self._image_ids)) + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [{'id': image_id} for image_id in self._image_ids], + 'categories': self._categories + } + coco_wrapped_groundtruth = coco_tools.COCOWrapper( + groundtruth_dict, detection_type='bbox') + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations( + self._detection_boxes_list) + keypoint_evaluator = coco_tools.COCOEvalWrapper( + coco_wrapped_groundtruth, + coco_wrapped_detections, + agnostic_mode=False, + iou_type='keypoints', + oks_sigmas=self._oks_sigmas) + keypoint_metrics, _ = keypoint_evaluator.ComputeMetrics( + include_metrics_per_category=False, all_metrics_per_category=False) + keypoint_metrics = { + 'Keypoints_' + key: value + for key, value in iter(keypoint_metrics.items()) + } + return keypoint_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + def update_op( + image_id_batched, + groundtruth_boxes_batched, + groundtruth_classes_batched, + groundtruth_is_crowd_batched, + groundtruth_area_batched, + groundtruth_keypoints_batched, + groundtruth_keypoint_visibilities_batched, + num_gt_boxes_per_image, + detection_boxes_batched, + detection_scores_batched, + detection_classes_batched, + detection_keypoints_batched, + num_det_boxes_per_image, + is_annotated_batched): + """Update operation for adding batch of images to Coco evaluator.""" + + for (image_id, gt_box, gt_class, gt_is_crowd, gt_area, gt_keyp, + gt_keyp_vis, num_gt_box, det_box, det_score, det_class, det_keyp, + num_det_box, is_annotated) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_is_crowd_batched, + groundtruth_area_batched, groundtruth_keypoints_batched, + groundtruth_keypoint_visibilities_batched, + num_gt_boxes_per_image, detection_boxes_batched, + detection_scores_batched, detection_classes_batched, + detection_keypoints_batched, num_det_boxes_per_image, + is_annotated_batched): + if is_annotated: + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_boxes': gt_box[:num_gt_box], + 'groundtruth_classes': gt_class[:num_gt_box], + 'groundtruth_is_crowd': gt_is_crowd[:num_gt_box], + 'groundtruth_area': gt_area[:num_gt_box], + 'groundtruth_keypoints': gt_keyp[:num_gt_box], + 'groundtruth_keypoint_visibilities': gt_keyp_vis[:num_gt_box] + }) + self.add_single_detected_image_info( + image_id, { + 'detection_boxes': det_box[:num_det_box], + 'detection_scores': det_score[:num_det_box], + 'detection_classes': det_class[:num_det_box], + 'detection_keypoints': det_keyp[:num_det_box], + }) + + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd, + None) + groundtruth_area = eval_dict.get(input_data_fields.groundtruth_area, None) + groundtruth_keypoints = eval_dict[input_data_fields.groundtruth_keypoints] + groundtruth_keypoint_visibilities = eval_dict.get( + input_data_fields.groundtruth_keypoint_visibilities, None) + detection_boxes = eval_dict[detection_fields.detection_boxes] + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + detection_keypoints = eval_dict[detection_fields.detection_keypoints] + num_gt_boxes_per_image = eval_dict.get( + 'num_groundtruth_boxes_per_image', None) + num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) + is_annotated = eval_dict.get('is_annotated', None) + + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + if groundtruth_area is None: + groundtruth_area = tf.zeros_like(groundtruth_classes, dtype=tf.float32) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + groundtruth_area = tf.expand_dims(groundtruth_area, 0) + groundtruth_keypoints = tf.expand_dims(groundtruth_keypoints, 0) + detection_boxes = tf.expand_dims(detection_boxes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_keypoints = tf.expand_dims(detection_keypoints, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_boxes)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + + if is_annotated is None: + is_annotated = tf.constant([True]) + else: + is_annotated = tf.expand_dims(is_annotated, 0) + + if groundtruth_keypoint_visibilities is None: + groundtruth_keypoint_visibilities = tf.fill([ + tf.shape(groundtruth_boxes)[1], + tf.shape(groundtruth_keypoints)[2] + ], tf.constant(2, dtype=tf.int32)) + groundtruth_keypoint_visibilities = tf.expand_dims( + groundtruth_keypoint_visibilities, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_boxes)[1:2], + multiples=tf.shape(detection_boxes)[0:1]) + if is_annotated is None: + is_annotated = tf.ones_like(image_id, dtype=tf.bool) + if groundtruth_keypoint_visibilities is None: + groundtruth_keypoint_visibilities = tf.fill([ + tf.shape(groundtruth_keypoints)[1], + tf.shape(groundtruth_keypoints)[2] + ], tf.constant(2, dtype=tf.int32)) + groundtruth_keypoint_visibilities = tf.tile( + tf.expand_dims(groundtruth_keypoint_visibilities, 0), + multiples=[tf.shape(groundtruth_keypoints)[0], 1, 1]) + + return tf.py_func(update_op, [ + image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd, + groundtruth_area, groundtruth_keypoints, + groundtruth_keypoint_visibilities, num_gt_boxes_per_image, + detection_boxes, detection_scores, detection_classes, + detection_keypoints, num_det_boxes_per_image, is_annotated + ], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + This function can take in groundtruth and detections for a batch of images, + or for a single image. For the latter case, the batch dimension for input + tensors need not be present. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + category = self._category_name + metric_names = [ + 'Keypoints_Precision/mAP ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP@.50IOU ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP@.75IOU ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP (large) ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP (medium) ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@1 ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@10 ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@100 ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@100 (large) ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@100 (medium) ByCategory/{}'.format(category) + ] + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + +class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator): + """Class to evaluate COCO detection metrics.""" + + def __init__(self, categories, include_metrics_per_category=False): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + include_metrics_per_category: If True, include metrics for each category. + """ + super(CocoMaskEvaluator, self).__init__(categories) + self._image_id_to_mask_shape_map = {} + self._image_ids_with_detections = set([]) + self._groundtruth_list = [] + self._detection_masks_list = [] + self._category_id_set = set([cat['id'] for cat in self._categories]) + self._annotation_id = 1 + self._include_metrics_per_category = include_metrics_per_category + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._image_id_to_mask_shape_map.clear() + self._image_ids_with_detections.clear() + self._groundtruth_list = [] + self._detection_masks_list = [] + + def add_single_ground_truth_image_info(self, + image_id, + groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape + [num_boxes, image_height, image_width] containing groundtruth masks + corresponding to the boxes. The elements of the array must be in + {0, 1}. + """ + if image_id in self._image_id_to_mask_shape_map: + tf.logging.warning('Ignoring ground truth with image id %s since it was ' + 'previously added', image_id) + return + + groundtruth_instance_masks = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks] + _check_mask_type_and_value(standard_fields.InputDataFields. + groundtruth_instance_masks, + groundtruth_instance_masks) + self._groundtruth_list.extend( + coco_tools. + ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self._annotation_id, + category_id_set=self._category_id_set, + groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields. + groundtruth_boxes], + groundtruth_classes=groundtruth_dict[standard_fields. + InputDataFields. + groundtruth_classes], + groundtruth_masks=groundtruth_instance_masks)) + self._annotation_id += groundtruth_dict[standard_fields.InputDataFields. + groundtruth_boxes].shape[0] + self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks].shape + + def add_single_detected_image_info(self, + image_id, + detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_masks: optional uint8 numpy array of + shape [num_boxes, image_height, image_width] containing instance + masks corresponding to the boxes. The elements of the array must be + in {0, 1}. + + Raises: + ValueError: If groundtruth for the image_id is not available or if + spatial shapes of groundtruth_instance_masks and detection_masks are + incompatible. + """ + if image_id not in self._image_id_to_mask_shape_map: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + if image_id in self._image_ids_with_detections: + tf.logging.warning('Ignoring detection with image id %s since it was ' + 'previously added', image_id) + return + + groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id] + detection_masks = detections_dict[standard_fields.DetectionResultFields. + detection_masks] + if groundtruth_masks_shape[1:] != detection_masks.shape[1:]: + raise ValueError('Spatial shape of groundtruth masks and detection masks ' + 'are incompatible: {} vs {}'.format( + groundtruth_masks_shape, + detection_masks.shape)) + _check_mask_type_and_value(standard_fields.DetectionResultFields. + detection_masks, + detection_masks) + self._detection_masks_list.extend( + coco_tools.ExportSingleImageDetectionMasksToCoco( + image_id=image_id, + category_id_set=self._category_id_set, + detection_masks=detection_masks, + detection_scores=detections_dict[standard_fields. + DetectionResultFields. + detection_scores], + detection_classes=detections_dict[standard_fields. + DetectionResultFields. + detection_classes])) + self._image_ids_with_detections.update([image_id]) + + def dump_detections_to_json_file(self, json_output_path): + """Saves the detections into json_output_path in the format used by MS COCO. + + Args: + json_output_path: String containing the output file's path. It can be also + None. In that case nothing will be written to the output file. + """ + if json_output_path and json_output_path is not None: + tf.logging.info('Dumping detections to output json file.') + with tf.gfile.GFile(json_output_path, 'w') as fid: + json_utils.Dump( + obj=self._detection_masks_list, fid=fid, float_digits=4, indent=2) + + def evaluate(self): + """Evaluates the detection masks and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metrics: + 'DetectionMasks_Precision/mAP': mean average precision over classes + averaged over IOU thresholds ranging from .5 to .95 with .05 increments. + 'DetectionMasks_Precision/mAP@.50IOU': mean average precision at 50% IOU. + 'DetectionMasks_Precision/mAP@.75IOU': mean average precision at 75% IOU. + 'DetectionMasks_Precision/mAP (small)': mean average precision for small + objects (area < 32^2 pixels). + 'DetectionMasks_Precision/mAP (medium)': mean average precision for medium + sized objects (32^2 pixels < area < 96^2 pixels). + 'DetectionMasks_Precision/mAP (large)': mean average precision for large + objects (96^2 pixels < area < 10000^2 pixels). + 'DetectionMasks_Recall/AR@1': average recall with 1 detection. + 'DetectionMasks_Recall/AR@10': average recall with 10 detections. + 'DetectionMasks_Recall/AR@100': average recall with 100 detections. + 'DetectionMasks_Recall/AR@100 (small)': average recall for small objects + with 100 detections. + 'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects + with 100 detections. + 'DetectionMasks_Recall/AR@100 (large)': average recall for large objects + with 100 detections. + + 2. per_category_ap: if include_metrics_per_category is True, category + specific results with keys of the form: + 'Precision mAP ByCategory/category' (without the supercategory part if + no supercategories exist). For backward compatibility + 'PerformanceByCategory' is included in the output regardless of + all_metrics_per_category. + """ + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]} + for image_id, shape in self._image_id_to_mask_shape_map. + items()], + 'categories': self._categories + } + coco_wrapped_groundtruth = coco_tools.COCOWrapper( + groundtruth_dict, detection_type='segmentation') + coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations( + self._detection_masks_list) + mask_evaluator = coco_tools.COCOEvalWrapper( + coco_wrapped_groundtruth, coco_wrapped_detection_masks, + agnostic_mode=False, iou_type='segm') + mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics( + include_metrics_per_category=self._include_metrics_per_category) + mask_metrics.update(mask_per_category_ap) + mask_metrics = {'DetectionMasks_'+ key: value + for key, value in mask_metrics.items()} + return mask_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + def update_op(image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, + groundtruth_instance_masks_batched, + groundtruth_is_crowd_batched, num_gt_boxes_per_image, + detection_scores_batched, detection_classes_batched, + detection_masks_batched, num_det_boxes_per_image): + """Update op for metrics.""" + + for (image_id, groundtruth_boxes, groundtruth_classes, + groundtruth_instance_masks, groundtruth_is_crowd, num_gt_box, + detection_scores, detection_classes, + detection_masks, num_det_box) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_instance_masks_batched, + groundtruth_is_crowd_batched, num_gt_boxes_per_image, + detection_scores_batched, detection_classes_batched, + detection_masks_batched, num_det_boxes_per_image): + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_boxes': + groundtruth_boxes[:num_gt_box], + 'groundtruth_classes': + groundtruth_classes[:num_gt_box], + 'groundtruth_instance_masks': + groundtruth_instance_masks[:num_gt_box], + 'groundtruth_is_crowd': + groundtruth_is_crowd[:num_gt_box] + }) + self.add_single_detected_image_info( + image_id, { + 'detection_scores': detection_scores[:num_det_box], + 'detection_classes': detection_classes[:num_det_box], + 'detection_masks': detection_masks[:num_det_box] + }) + + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_instance_masks = eval_dict[ + input_data_fields.groundtruth_instance_masks] + groundtruth_is_crowd = eval_dict.get( + input_data_fields.groundtruth_is_crowd, None) + num_gt_boxes_per_image = eval_dict.get( + input_data_fields.num_groundtruth_boxes, None) + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + detection_masks = eval_dict[detection_fields.detection_masks] + num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections, + None) + + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_masks = tf.expand_dims(detection_masks, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_scores)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_scores)[1:2], + multiples=tf.shape(detection_scores)[0:1]) + + return tf.py_func(update_op, [ + image_id, groundtruth_boxes, groundtruth_classes, + groundtruth_instance_masks, groundtruth_is_crowd, + num_gt_boxes_per_image, detection_scores, detection_classes, + detection_masks, num_det_boxes_per_image + ], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + metric_names = ['DetectionMasks_Precision/mAP', + 'DetectionMasks_Precision/mAP@.50IOU', + 'DetectionMasks_Precision/mAP@.75IOU', + 'DetectionMasks_Precision/mAP (large)', + 'DetectionMasks_Precision/mAP (medium)', + 'DetectionMasks_Precision/mAP (small)', + 'DetectionMasks_Recall/AR@1', + 'DetectionMasks_Recall/AR@10', + 'DetectionMasks_Recall/AR@100', + 'DetectionMasks_Recall/AR@100 (large)', + 'DetectionMasks_Recall/AR@100 (medium)', + 'DetectionMasks_Recall/AR@100 (small)'] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' + + category_dict['name']) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + +class CocoPanopticSegmentationEvaluator( + object_detection_evaluation.DetectionEvaluator): + """Class to evaluate PQ (panoptic quality) metric on COCO dataset. + + More details about this metric: https://arxiv.org/pdf/1801.00868.pdf. + """ + + def __init__(self, + categories, + include_metrics_per_category=False, + iou_threshold=0.5, + ioa_threshold=0.5): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + include_metrics_per_category: If True, include metrics for each category. + iou_threshold: intersection-over-union threshold for mask matching (with + normal groundtruths). + ioa_threshold: intersection-over-area threshold for mask matching with + "is_crowd" groundtruths. + """ + super(CocoPanopticSegmentationEvaluator, self).__init__(categories) + self._groundtruth_masks = {} + self._groundtruth_class_labels = {} + self._groundtruth_is_crowd = {} + self._predicted_masks = {} + self._predicted_class_labels = {} + self._include_metrics_per_category = include_metrics_per_category + self._iou_threshold = iou_threshold + self._ioa_threshold = ioa_threshold + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._groundtruth_masks.clear() + self._groundtruth_class_labels.clear() + self._groundtruth_is_crowd.clear() + self._predicted_masks.clear() + self._predicted_class_labels.clear() + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_masks] containing 1-indexed groundtruth classes for the mask. + InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape + [num_masks, image_height, image_width] containing groundtruth masks. + The elements of the array must be in {0, 1}. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + """ + + if image_id in self._groundtruth_masks: + tf.logging.warning( + 'Ignoring groundtruth with image %s, since it has already been ' + 'added to the ground truth database.', image_id) + return + + self._groundtruth_masks[image_id] = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks] + self._groundtruth_class_labels[image_id] = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_classes] + groundtruth_is_crowd = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_is_crowd) + # Drop groundtruth_is_crowd if empty tensor. + if groundtruth_is_crowd is not None and not groundtruth_is_crowd.size > 0: + groundtruth_is_crowd = None + if groundtruth_is_crowd is not None: + self._groundtruth_is_crowd[image_id] = groundtruth_is_crowd + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_classes: integer numpy array of shape + [num_masks] containing 1-indexed detection classes for the masks. + DetectionResultFields.detection_masks: optional uint8 numpy array of + shape [num_masks, image_height, image_width] containing instance + masks. The elements of the array must be in {0, 1}. + + Raises: + ValueError: If results and groundtruth shape don't match. + """ + + if image_id not in self._groundtruth_masks: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + detection_masks = detections_dict[ + standard_fields.DetectionResultFields.detection_masks] + self._predicted_masks[image_id] = detection_masks + self._predicted_class_labels[image_id] = detections_dict[ + standard_fields.DetectionResultFields.detection_classes] + groundtruth_mask_shape = self._groundtruth_masks[image_id].shape + if groundtruth_mask_shape[1:] != detection_masks.shape[1:]: + raise ValueError("The shape of results doesn't match groundtruth.") + + def evaluate(self): + """Evaluates the detection masks and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metric: + 'PanopticQuality@%.2fIOU': mean panoptic quality averaged over classes at + the required IOU. + 'SegmentationQuality@%.2fIOU': mean segmentation quality averaged over + classes at the required IOU. + 'RecognitionQuality@%.2fIOU': mean recognition quality averaged over + classes at the required IOU. + 'NumValidClasses': number of valid classes. A valid class should have at + least one normal (is_crowd=0) groundtruth mask or one predicted mask. + 'NumTotalClasses': number of total classes. + + 2. per_category_pq: if include_metrics_per_category is True, category + specific results with keys of the form: + 'PanopticQuality@%.2fIOU_ByCategory/category'. + """ + # Evaluate and accumulate the iou/tp/fp/fn. + sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn = self._evaluate_all_masks() + # Compute PQ metric for each category and average over all classes. + mask_metrics = self._compute_panoptic_metrics(sum_tp_iou, sum_num_tp, + sum_num_fp, sum_num_fn) + return mask_metrics + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_gt_masks_per_image' and 'num_det_masks_per_image' to properly unpad + the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + + def update_op(image_id_batched, groundtruth_classes_batched, + groundtruth_instance_masks_batched, + groundtruth_is_crowd_batched, num_gt_masks_per_image, + detection_classes_batched, detection_masks_batched, + num_det_masks_per_image): + """Update op for metrics.""" + for (image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_mask, detection_classes, + detection_masks, num_det_mask) in zip( + image_id_batched, groundtruth_classes_batched, + groundtruth_instance_masks_batched, groundtruth_is_crowd_batched, + num_gt_masks_per_image, detection_classes_batched, + detection_masks_batched, num_det_masks_per_image): + + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_classes': + groundtruth_classes[:num_gt_mask], + 'groundtruth_instance_masks': + groundtruth_instance_masks[:num_gt_mask], + 'groundtruth_is_crowd': + groundtruth_is_crowd[:num_gt_mask] + }) + self.add_single_detected_image_info( + image_id, { + 'detection_classes': detection_classes[:num_det_mask], + 'detection_masks': detection_masks[:num_det_mask] + }) + + # Unpack items from the evaluation dictionary. + (image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_masks_per_image, detection_classes, + detection_masks, num_det_masks_per_image + ) = self._unpack_evaluation_dictionary_items(eval_dict) + + update_op = tf.py_func(update_op, [ + image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_masks_per_image, detection_classes, + detection_masks, num_det_masks_per_image + ], []) + + metric_names = [ + 'PanopticQuality@%.2fIOU' % self._iou_threshold, + 'SegmentationQuality@%.2fIOU' % self._iou_threshold, + 'RecognitionQuality@%.2fIOU' % self._iou_threshold + ] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('PanopticQuality@%.2fIOU_ByCategory/%s' % + (self._iou_threshold, category_dict['name'])) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + + def value_func(): + return np.float32(self._metrics[metric_name]) + + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + def _evaluate_all_masks(self): + """Evaluate all masks and compute sum iou/TP/FP/FN.""" + + sum_num_tp = {category['id']: 0 for category in self._categories} + sum_num_fp = sum_num_tp.copy() + sum_num_fn = sum_num_tp.copy() + sum_tp_iou = sum_num_tp.copy() + + for image_id in self._groundtruth_class_labels: + # Separate normal and is_crowd groundtruth + crowd_gt_indices = self._groundtruth_is_crowd.get(image_id) + (normal_gt_masks, normal_gt_classes, crowd_gt_masks, + crowd_gt_classes) = self._separate_normal_and_crowd_labels( + crowd_gt_indices, self._groundtruth_masks[image_id], + self._groundtruth_class_labels[image_id]) + + # Mask matching to normal GT. + predicted_masks = self._predicted_masks[image_id] + predicted_class_labels = self._predicted_class_labels[image_id] + (overlaps, pred_matched, + gt_matched) = self._match_predictions_to_groundtruths( + predicted_masks, + predicted_class_labels, + normal_gt_masks, + normal_gt_classes, + self._iou_threshold, + is_crowd=False, + with_replacement=False) + + # Accumulate true positives. + for (class_id, is_matched, overlap) in zip(predicted_class_labels, + pred_matched, overlaps): + if is_matched: + sum_num_tp[class_id] += 1 + sum_tp_iou[class_id] += overlap + + # Accumulate false negatives. + for (class_id, is_matched) in zip(normal_gt_classes, gt_matched): + if not is_matched: + sum_num_fn[class_id] += 1 + + # Match remaining predictions to crowd gt. + remained_pred_indices = np.logical_not(pred_matched) + remained_pred_masks = predicted_masks[remained_pred_indices, :, :] + remained_pred_classes = predicted_class_labels[remained_pred_indices] + _, pred_matched, _ = self._match_predictions_to_groundtruths( + remained_pred_masks, + remained_pred_classes, + crowd_gt_masks, + crowd_gt_classes, + self._ioa_threshold, + is_crowd=True, + with_replacement=True) + + # Accumulate false positives + for (class_id, is_matched) in zip(remained_pred_classes, pred_matched): + if not is_matched: + sum_num_fp[class_id] += 1 + return sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn + + def _compute_panoptic_metrics(self, sum_tp_iou, sum_num_tp, sum_num_fp, + sum_num_fn): + """Compute PQ metric for each category and average over all classes. + + Args: + sum_tp_iou: dict, summed true positive intersection-over-union (IoU) for + each class, keyed by class_id. + sum_num_tp: the total number of true positives for each class, keyed by + class_id. + sum_num_fp: the total number of false positives for each class, keyed by + class_id. + sum_num_fn: the total number of false negatives for each class, keyed by + class_id. + + Returns: + mask_metrics: a dictionary containing averaged metrics over all classes, + and per-category metrics if required. + """ + mask_metrics = {} + sum_pq = 0 + sum_sq = 0 + sum_rq = 0 + num_valid_classes = 0 + for category in self._categories: + class_id = category['id'] + (panoptic_quality, segmentation_quality, + recognition_quality) = self._compute_panoptic_metrics_single_class( + sum_tp_iou[class_id], sum_num_tp[class_id], sum_num_fp[class_id], + sum_num_fn[class_id]) + if panoptic_quality is not None: + sum_pq += panoptic_quality + sum_sq += segmentation_quality + sum_rq += recognition_quality + num_valid_classes += 1 + if self._include_metrics_per_category: + mask_metrics['PanopticQuality@%.2fIOU_ByCategory/%s' % + (self._iou_threshold, + category['name'])] = panoptic_quality + mask_metrics['PanopticQuality@%.2fIOU' % + self._iou_threshold] = sum_pq / num_valid_classes + mask_metrics['SegmentationQuality@%.2fIOU' % + self._iou_threshold] = sum_sq / num_valid_classes + mask_metrics['RecognitionQuality@%.2fIOU' % + self._iou_threshold] = sum_rq / num_valid_classes + mask_metrics['NumValidClasses'] = num_valid_classes + mask_metrics['NumTotalClasses'] = len(self._categories) + return mask_metrics + + def _compute_panoptic_metrics_single_class(self, sum_tp_iou, num_tp, num_fp, + num_fn): + """Compute panoptic metrics: panoptic/segmentation/recognition quality. + + More computation details in https://arxiv.org/pdf/1801.00868.pdf. + Args: + sum_tp_iou: summed true positive intersection-over-union (IoU) for a + specific class. + num_tp: the total number of true positives for a specific class. + num_fp: the total number of false positives for a specific class. + num_fn: the total number of false negatives for a specific class. + + Returns: + panoptic_quality: sum_tp_iou / (num_tp + 0.5*num_fp + 0.5*num_fn). + segmentation_quality: sum_tp_iou / num_tp. + recognition_quality: num_tp / (num_tp + 0.5*num_fp + 0.5*num_fn). + """ + denominator = num_tp + 0.5 * num_fp + 0.5 * num_fn + # Calculate metric only if there is at least one GT or one prediction. + if denominator > 0: + recognition_quality = num_tp / denominator + if num_tp > 0: + segmentation_quality = sum_tp_iou / num_tp + else: + # If there is no TP for this category. + segmentation_quality = 0 + panoptic_quality = segmentation_quality * recognition_quality + return panoptic_quality, segmentation_quality, recognition_quality + else: + return None, None, None + + def _separate_normal_and_crowd_labels(self, crowd_gt_indices, + groundtruth_masks, groundtruth_classes): + """Separate normal and crowd groundtruth class_labels and masks. + + Args: + crowd_gt_indices: None or array of shape [num_groundtruths]. If None, all + groundtruths are treated as normal ones. + groundtruth_masks: array of shape [num_groundtruths, height, width]. + groundtruth_classes: array of shape [num_groundtruths]. + + Returns: + normal_gt_masks: array of shape [num_normal_groundtruths, height, width]. + normal_gt_classes: array of shape [num_normal_groundtruths]. + crowd_gt_masks: array of shape [num_crowd_groundtruths, height, width]. + crowd_gt_classes: array of shape [num_crowd_groundtruths]. + Raises: + ValueError: if the shape of groundtruth classes doesn't match groundtruth + masks or if the shape of crowd_gt_indices. + """ + if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]: + raise ValueError( + "The number of masks doesn't match the number of labels.") + if crowd_gt_indices is None: + # All gts are treated as normal + crowd_gt_indices = np.zeros(groundtruth_masks.shape, dtype=np.bool) + else: + if groundtruth_masks.shape[0] != crowd_gt_indices.shape[0]: + raise ValueError( + "The number of masks doesn't match the number of is_crowd labels.") + crowd_gt_indices = crowd_gt_indices.astype(np.bool) + normal_gt_indices = np.logical_not(crowd_gt_indices) + if normal_gt_indices.size: + normal_gt_masks = groundtruth_masks[normal_gt_indices, :, :] + normal_gt_classes = groundtruth_classes[normal_gt_indices] + crowd_gt_masks = groundtruth_masks[crowd_gt_indices, :, :] + crowd_gt_classes = groundtruth_classes[crowd_gt_indices] + else: + # No groundtruths available, groundtruth_masks.shape = (0, h, w) + normal_gt_masks = groundtruth_masks + normal_gt_classes = groundtruth_classes + crowd_gt_masks = groundtruth_masks + crowd_gt_classes = groundtruth_classes + return normal_gt_masks, normal_gt_classes, crowd_gt_masks, crowd_gt_classes + + def _match_predictions_to_groundtruths(self, + predicted_masks, + predicted_classes, + groundtruth_masks, + groundtruth_classes, + matching_threshold, + is_crowd=False, + with_replacement=False): + """Match the predicted masks to groundtruths. + + Args: + predicted_masks: array of shape [num_predictions, height, width]. + predicted_classes: array of shape [num_predictions]. + groundtruth_masks: array of shape [num_groundtruths, height, width]. + groundtruth_classes: array of shape [num_groundtruths]. + matching_threshold: if the overlap between a prediction and a groundtruth + is larger than this threshold, the prediction is true positive. + is_crowd: whether the groundtruths are crowd annotation or not. If True, + use intersection over area (IoA) as the overlapping metric; otherwise + use intersection over union (IoU). + with_replacement: whether a groundtruth can be matched to multiple + predictions. By default, for normal groundtruths, only 1-1 matching is + allowed for normal groundtruths; for crowd groundtruths, 1-to-many must + be allowed. + + Returns: + best_overlaps: array of shape [num_predictions]. Values representing the + IoU + or IoA with best matched groundtruth. + pred_matched: array of shape [num_predictions]. Boolean value representing + whether the ith prediction is matched to a groundtruth. + gt_matched: array of shape [num_groundtruth]. Boolean value representing + whether the ith groundtruth is matched to a prediction. + Raises: + ValueError: if the shape of groundtruth/predicted masks doesn't match + groundtruth/predicted classes. + """ + if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]: + raise ValueError( + "The number of GT masks doesn't match the number of labels.") + if predicted_masks.shape[0] != predicted_classes.shape[0]: + raise ValueError( + "The number of predicted masks doesn't match the number of labels.") + gt_matched = np.zeros(groundtruth_classes.shape, dtype=np.bool) + pred_matched = np.zeros(predicted_classes.shape, dtype=np.bool) + best_overlaps = np.zeros(predicted_classes.shape) + for pid in range(predicted_classes.shape[0]): + best_overlap = 0 + matched_gt_id = -1 + for gid in range(groundtruth_classes.shape[0]): + if predicted_classes[pid] == groundtruth_classes[gid]: + if (not with_replacement) and gt_matched[gid]: + continue + if not is_crowd: + overlap = np_mask_ops.iou(predicted_masks[pid:pid + 1], + groundtruth_masks[gid:gid + 1])[0, 0] + else: + overlap = np_mask_ops.ioa(groundtruth_masks[gid:gid + 1], + predicted_masks[pid:pid + 1])[0, 0] + if overlap >= matching_threshold and overlap > best_overlap: + matched_gt_id = gid + best_overlap = overlap + if matched_gt_id >= 0: + gt_matched[matched_gt_id] = True + pred_matched[pid] = True + best_overlaps[pid] = best_overlap + return best_overlaps, pred_matched, gt_matched + + def _unpack_evaluation_dictionary_items(self, eval_dict): + """Unpack items from the evaluation dictionary.""" + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_instance_masks = eval_dict[ + input_data_fields.groundtruth_instance_masks] + groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd, + None) + num_gt_masks_per_image = eval_dict.get( + input_data_fields.num_groundtruth_boxes, None) + detection_classes = eval_dict[detection_fields.detection_classes] + detection_masks = eval_dict[detection_fields.detection_masks] + num_det_masks_per_image = eval_dict.get(detection_fields.num_detections, + None) + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_masks = tf.expand_dims(detection_masks, 0) + + if num_gt_masks_per_image is None: + num_gt_masks_per_image = tf.shape(groundtruth_classes)[1:2] + else: + num_gt_masks_per_image = tf.expand_dims(num_gt_masks_per_image, 0) + + if num_det_masks_per_image is None: + num_det_masks_per_image = tf.shape(detection_classes)[1:2] + else: + num_det_masks_per_image = tf.expand_dims(num_det_masks_per_image, 0) + else: + if num_gt_masks_per_image is None: + num_gt_masks_per_image = tf.tile( + tf.shape(groundtruth_classes)[1:2], + multiples=tf.shape(groundtruth_classes)[0:1]) + if num_det_masks_per_image is None: + num_det_masks_per_image = tf.tile( + tf.shape(detection_classes)[1:2], + multiples=tf.shape(detection_classes)[0:1]) + return (image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_masks_per_image, detection_classes, + detection_masks, num_det_masks_per_image) diff --git a/models/research/object_detection/metrics/coco_evaluation_test.py b/models/research/object_detection/metrics/coco_evaluation_test.py new file mode 100644 index 0000000000000000000000000000000000000000..165c94780d93bb93bab9ab1187c7fa41b79b96b9 --- /dev/null +++ b/models/research/object_detection/metrics/coco_evaluation_test.py @@ -0,0 +1,1941 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields +from object_detection.metrics import coco_evaluation +from object_detection.utils import tf_version + + +def _get_categories_list(): + return [{ + 'id': 1, + 'name': 'person' + }, { + 'id': 2, + 'name': 'dog' + }, { + 'id': 3, + 'name': 'cat' + }] + + +def _get_category_keypoints_dict(): + return { + 'person': [{ + 'id': 0, + 'name': 'left_eye' + }, { + 'id': 3, + 'name': 'right_eye' + }], + 'dog': [{ + 'id': 1, + 'name': 'tail_start' + }, { + 'id': 2, + 'name': 'mouth' + }] + } + + +class CocoDetectionEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + """Tests that mAP is calculated correctly on GT and Detections.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image3', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): + """Tests computing mAP with is_crowd GT boxes skipped.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1, 2]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([0, 1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): + """Tests computing mAP with empty is_crowd array passed in.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testRejectionOnDuplicateGroundtruth(self): + """Tests that groundtruth cannot be added more than once for an image.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + # Add groundtruth + image_key1 = 'img1' + groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], + dtype=float) + groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) + coco_evaluator.add_single_ground_truth_image_info(image_key1, { + standard_fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes1, + standard_fields.InputDataFields.groundtruth_classes: + groundtruth_class_labels1 + }) + groundtruth_lists_len = len(coco_evaluator._groundtruth_list) + + # Add groundtruth with the same image id. + coco_evaluator.add_single_ground_truth_image_info(image_key1, { + standard_fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes1, + standard_fields.InputDataFields.groundtruth_classes: + groundtruth_class_labels1 + }) + self.assertEqual(groundtruth_lists_len, + len(coco_evaluator._groundtruth_list)) + + def testRejectionOnDuplicateDetections(self): + """Tests that detections cannot be added more than once for an image.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + # Add groundtruth + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[99., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + detections_lists_len = len(coco_evaluator._detection_boxes_list) + coco_evaluator.add_single_detected_image_info( + image_id='image1', # Note that this image id was previously added. + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + self.assertEqual(detections_lists_len, + len(coco_evaluator._detection_boxes_list)) + + def testExceptionRaisedWithMissingGroundtruth(self): + """Tests that exception is raised for detection with missing groundtruth.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + with self.assertRaises(ValueError): + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoEvaluationPyFuncTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run(update_op, + feed_dict={ + image_id: 'image1', + groundtruth_boxes: np.array([[100., 100., 200., 200.]]), + groundtruth_classes: np.array([1]), + detection_boxes: np.array([[100., 100., 200., 200.]]), + detection_scores: np.array([.8]), + detection_classes: np.array([1]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([3]), + detection_boxes: np.array([[50., 50., 100., 100.]]), + detection_scores: np.array([.7]), + detection_classes: np.array([3]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([2]), + detection_boxes: np.array([[25., 25., 50., 50.]]), + detection_scores: np.array([.9]), + detection_classes: np.array([2]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsIsAnnotated(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + is_annotated = tf.placeholder(tf.bool, shape=()) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + 'is_annotated': is_annotated, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run(update_op, + feed_dict={ + image_id: 'image1', + groundtruth_boxes: np.array([[100., 100., 200., 200.]]), + groundtruth_classes: np.array([1]), + is_annotated: True, + detection_boxes: np.array([[100., 100., 200., 200.]]), + detection_scores: np.array([.8]), + detection_classes: np.array([1]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([3]), + is_annotated: True, + detection_boxes: np.array([[50., 50., 100., 100.]]), + detection_scores: np.array([.7]), + detection_classes: np.array([3]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([2]), + is_annotated: True, + detection_boxes: np.array([[25., 25., 50., 50.]]), + detection_scores: np.array([.9]), + detection_classes: np.array([2]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image4', + groundtruth_boxes: np.zeros((0, 4)), + groundtruth_classes: np.zeros((0)), + is_annotated: False, # Note that this image isn't annotated. + detection_boxes: np.array([[25., 25., 50., 50.], + [25., 25., 70., 50.], + [25., 25., 80., 50.], + [25., 25., 90., 50.]]), + detection_scores: np.array([0.6, 0.7, 0.8, 0.9]), + detection_classes: np.array([1, 2, 2, 3]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsPadded(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.], [-1, -1, -1, -1]]), + groundtruth_classes: + np.array([1, -1]), + detection_boxes: + np.array([[100., 100., 200., 200.], [0., 0., 0., 0.]]), + detection_scores: + np.array([.8, 0.]), + detection_classes: + np.array([1, -1]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image2', + groundtruth_boxes: + np.array([[50., 50., 100., 100.], [-1, -1, -1, -1]]), + groundtruth_classes: + np.array([3, -1]), + detection_boxes: + np.array([[50., 50., 100., 100.], [0., 0., 0., 0.]]), + detection_scores: + np.array([.7, 0.]), + detection_classes: + np.array([3, -1]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image3', + groundtruth_boxes: + np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]), + groundtruth_classes: + np.array([2, 2]), + detection_boxes: + np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]), + detection_scores: + np.array([.95, .9]), + detection_classes: + np.array([2, 2]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + batch_size = 3 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run(update_op, + feed_dict={ + image_id: ['image1', 'image2', 'image3'], + groundtruth_boxes: np.array([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]]), + groundtruth_classes: np.array([[1], [3], [2]]), + detection_boxes: np.array([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]]), + detection_scores: np.array([[.8], [.7], [.9]]), + detection_classes: np.array([[1], [3], [2]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + batch_size = 3 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + num_gt_boxes_per_image = tf.placeholder(tf.int32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + num_det_boxes_per_image = tf.placeholder(tf.int32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + 'num_groundtruth_boxes_per_image': num_gt_boxes_per_image, + 'num_det_boxes_per_image': num_det_boxes_per_image + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image1', 'image2', 'image3'], + groundtruth_boxes: + np.array([[[100., 100., 200., 200.], [-1, -1, -1, -1]], + [[50., 50., 100., 100.], [-1, -1, -1, -1]], + [[25., 25., 50., 50.], [10., 10., 15., 15.]]]), + groundtruth_classes: + np.array([[1, -1], [3, -1], [2, 2]]), + num_gt_boxes_per_image: + np.array([1, 1, 2]), + detection_boxes: + np.array([[[100., 100., 200., 200.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], + [[50., 50., 100., 100.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], + [[25., 25., 50., 50.], + [10., 10., 15., 15.], + [10., 10., 15., 15.]]]), + detection_scores: + np.array([[.8, 0., 0.], [.7, 0., 0.], [.95, .9, 0.9]]), + detection_classes: + np.array([[1, -1, -1], [3, -1, -1], [2, 2, 2]]), + num_det_boxes_per_image: + np.array([1, 1, 3]), + }) + + # Check the number of bounding boxes added. + self.assertEqual(len(coco_evaluator._groundtruth_list), 4) + self.assertEqual(len(coco_evaluator._detection_boxes_list), 5) + + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + +class CocoKeypointEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingKeypoints(self): + """Tests that correct mAP for keypoints is calculated.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + + def testGroundtruthListValues(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]), + standard_fields.InputDataFields.groundtruth_area: np.array([15.]) + }) + gt_dict = coco_evaluator._groundtruth_list[0] + self.assertEqual(gt_dict['id'], 1) + self.assertAlmostEqual(gt_dict['bbox'], [100.0, 100.0, 100.0, 100.0]) + self.assertAlmostEqual( + gt_dict['keypoints'], [160.0, 150.0, 2, 180.0, 170.0, 2]) + self.assertEqual(gt_dict['num_keypoints'], 2) + self.assertAlmostEqual(gt_dict['area'], 15.0) + + def testKeypointVisibilitiesAreOptional(self): + """Tests that evaluator works when visibilities aren't provided.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + + def testFiltersDetectionsFromOtherCategories(self): + """Tests that the evaluator ignores detections from other categories.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=2, category_keypoints=category_keypoint_dict['person'], + class_text='dog') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [170., 180.], [110., 120.], + [130., 140.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 2, 2, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.9]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [170., 180.], [110., 120.], + [130., 140.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/dog'], + -1.0) + + def testHandlesUnlabeledKeypointData(self): + """Tests that the evaluator handles missing keypoints GT.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[0, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[50., 60.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + + def testIgnoresCrowdAnnotations(self): + """Tests that the evaluator ignores GT marked as crowd.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + -1.0) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoKeypointEvaluationPyFuncTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingKeypoints(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_keypoints: detection_keypoints, + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + detection_boxes: + np.array([[100., 100., 200., 200.]]), + detection_scores: + np.array([.8]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image2', + groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]), + detection_boxes: + np.array([[50., 50., 100., 100.]]), + detection_scores: + np.array([.7]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], 1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + def testGetOneMAPWithMatchingKeypointsAndVisibilities(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + groundtruth_keypoint_visibilities = tf.placeholder( + tf.float32, shape=(None, 4)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: + image_id, + input_data_fields.groundtruth_boxes: + groundtruth_boxes, + input_data_fields.groundtruth_classes: + groundtruth_classes, + input_data_fields.groundtruth_keypoints: + groundtruth_keypoints, + input_data_fields.groundtruth_keypoint_visibilities: + groundtruth_keypoint_visibilities, + detection_fields.detection_boxes: + detection_boxes, + detection_fields.detection_scores: + detection_scores, + detection_fields.detection_classes: + detection_classes, + detection_fields.detection_keypoints: + detection_keypoints, + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + groundtruth_keypoint_visibilities: + np.array([[0, 0, 0, 2]]), + detection_boxes: + np.array([[100., 100., 200., 200.]]), + detection_scores: + np.array([.8]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[50., 60.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], -1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], -1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + def testGetOneMAPWithMatchingKeypointsIsAnnotated(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + is_annotated = tf.placeholder(tf.bool, shape=()) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + 'is_annotated': is_annotated, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_keypoints: detection_keypoints, + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + is_annotated: + True, + detection_boxes: + np.array([[100., 100., 200., 200.]]), + detection_scores: + np.array([.8]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image2', + groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]), + is_annotated: + True, + detection_boxes: + np.array([[50., 50., 100., 100.]]), + detection_scores: + np.array([.7]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image3', + groundtruth_boxes: + np.zeros((0, 4)), + groundtruth_classes: + np.zeros((0)), + groundtruth_keypoints: + np.zeros((0, 4, 2)), + is_annotated: + False, # Note that this image isn't annotated. + detection_boxes: + np.array([[25., 25., 50., 50.], [25., 25., 70., 50.], + [25., 25., 80., 50.], [25., 25., 90., 50.]]), + detection_scores: + np.array([0.6, 0.7, 0.8, 0.9]), + detection_classes: + np.array([1, 2, 2, 3]), + detection_keypoints: + np.array([[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], 1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + def testGetOneMAPWithMatchingKeypointsBatched(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + batch_size = 2 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + groundtruth_keypoints = tf.placeholder( + tf.float32, shape=(batch_size, None, 4, 2)) + detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_keypoints = tf.placeholder( + tf.float32, shape=(batch_size, None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_keypoints: detection_keypoints + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image1', 'image2'], + groundtruth_boxes: + np.array([[[100., 100., 200., 200.]], [[50., 50., 100., + 100.]]]), + groundtruth_classes: + np.array([[1], [3]]), + groundtruth_keypoints: + np.array([[[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]], + [[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]]), + detection_boxes: + np.array([[[100., 100., 200., 200.]], [[50., 50., 100., + 100.]]]), + detection_scores: + np.array([[.8], [.7]]), + detection_classes: + np.array([[1], [3]]), + detection_keypoints: + np.array([[[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]], + [[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], -1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], -1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + +class CocoMaskEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.pad(np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + np.pad(np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.pad(np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + np.pad(np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image3', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.pad(np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + np.pad(np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + coco_evaluator.clear() + self.assertFalse(coco_evaluator._image_id_to_mask_shape_map) + self.assertFalse(coco_evaluator._image_ids_with_detections) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_masks_list) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoMaskEvaluationPyFuncTest(tf.test.TestCase): + + def testAddEvalDict(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + update_op = coco_evaluator.add_eval_dict(eval_dict) + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1, 2]), + groundtruth_masks: + np.stack([ + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant') + ]), + detection_scores: + np.array([.9, .8]), + detection_classes: + np.array([2, 1]), + detection_masks: + np.stack([ + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant'), + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + ]) + }) + self.assertLen(coco_evaluator._groundtruth_list, 2) + self.assertLen(coco_evaluator._detection_masks_list, 2) + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1, 2]), + groundtruth_masks: + np.stack([ + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant') + ]), + detection_scores: + np.array([.9, .8]), + detection_classes: + np.array([2, 1]), + detection_masks: + np.stack([ + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant'), + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + ]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([1]), + groundtruth_masks: np.pad(np.ones([1, 50, 50], + dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant'), + detection_scores: np.array([.8]), + detection_classes: np.array([1]), + detection_masks: np.pad(np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant') + }) + sess.run(update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([1]), + groundtruth_masks: np.pad(np.ones([1, 25, 25], + dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant'), + detection_scores: np.array([.8]), + detection_classes: np.array([1]), + detection_masks: np.pad(np.ones([1, 25, 25], + dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant') + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._image_ids_with_detections) + self.assertFalse(coco_evaluator._image_id_to_mask_shape_map) + self.assertFalse(coco_evaluator._detection_masks_list) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + batch_size = 3 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + groundtruth_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image1', 'image2', 'image3'], + groundtruth_boxes: + np.array([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]]), + groundtruth_classes: + np.array([[1], [1], [1]]), + groundtruth_masks: + np.stack([ + np.pad( + np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (0, 0), (0, 0)), + mode='constant'), + np.pad( + np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (25, 25), (25, 25)), + mode='constant'), + np.pad( + np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (37, 38), (37, 38)), + mode='constant') + ], + axis=0), + detection_scores: + np.array([[.8], [.8], [.8]]), + detection_classes: + np.array([[1], [1], [1]]), + detection_masks: + np.stack([ + np.pad( + np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (0, 0), (0, 0)), + mode='constant'), + np.pad( + np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (25, 25), (25, 25)), + mode='constant'), + np.pad( + np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (37, 38), (37, 38)), + mode='constant') + ], + axis=0) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._image_ids_with_detections) + self.assertFalse(coco_evaluator._image_id_to_mask_shape_map) + self.assertFalse(coco_evaluator._detection_masks_list) + + +def _get_panoptic_test_data(): + # image1 contains 3 people in gt, (2 normal annotation and 1 "is_crowd" + # annotation), and 3 people in prediction. + gt_masks1 = np.zeros((3, 50, 50), dtype=np.uint8) + result_masks1 = np.zeros((3, 50, 50), dtype=np.uint8) + gt_masks1[0, 10:20, 20:30] = 1 + result_masks1[0, 10:18, 20:30] = 1 + gt_masks1[1, 25:30, 25:35] = 1 + result_masks1[1, 18:25, 25:30] = 1 + gt_masks1[2, 40:50, 40:50] = 1 + result_masks1[2, 47:50, 47:50] = 1 + gt_class1 = np.array([1, 1, 1]) + gt_is_crowd1 = np.array([0, 0, 1]) + result_class1 = np.array([1, 1, 1]) + + # image2 contains 1 dog and 1 cat in gt, while 1 person and 1 dog in + # prediction. + gt_masks2 = np.zeros((2, 30, 40), dtype=np.uint8) + result_masks2 = np.zeros((2, 30, 40), dtype=np.uint8) + gt_masks2[0, 5:15, 20:35] = 1 + gt_masks2[1, 20:30, 0:10] = 1 + result_masks2[0, 20:25, 10:15] = 1 + result_masks2[1, 6:15, 15:35] = 1 + gt_class2 = np.array([2, 3]) + gt_is_crowd2 = np.array([0, 0]) + result_class2 = np.array([1, 2]) + + gt_class = [gt_class1, gt_class2] + gt_masks = [gt_masks1, gt_masks2] + gt_is_crowd = [gt_is_crowd1, gt_is_crowd2] + result_class = [result_class1, result_class2] + result_masks = [result_masks1, result_masks2] + return gt_class, gt_masks, gt_is_crowd, result_class, result_masks + + +class CocoPanopticEvaluationTest(tf.test.TestCase): + + def test_panoptic_quality(self): + pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator( + _get_categories_list(), include_metrics_per_category=True) + (gt_class, gt_masks, gt_is_crowd, result_class, + result_masks) = _get_panoptic_test_data() + + for i in range(2): + pq_evaluator.add_single_ground_truth_image_info( + image_id='image%d' % i, + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_classes: + gt_class[i], + standard_fields.InputDataFields.groundtruth_instance_masks: + gt_masks[i], + standard_fields.InputDataFields.groundtruth_is_crowd: + gt_is_crowd[i] + }) + + pq_evaluator.add_single_detected_image_info( + image_id='image%d' % i, + detections_dict={ + standard_fields.DetectionResultFields.detection_classes: + result_class[i], + standard_fields.DetectionResultFields.detection_masks: + result_masks[i] + }) + + metrics = pq_evaluator.evaluate() + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/person'], + 0.32) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/dog'], + 135.0 / 195) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/cat'], 0) + self.assertAlmostEqual(metrics['SegmentationQuality@0.50IOU'], + (0.8 + 135.0 / 195) / 3) + self.assertAlmostEqual(metrics['RecognitionQuality@0.50IOU'], (0.4 + 1) / 3) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'], + (0.32 + 135.0 / 195) / 3) + self.assertEqual(metrics['NumValidClasses'], 3) + self.assertEqual(metrics['NumTotalClasses'], 3) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoPanopticEvaluationPyFuncTest(tf.test.TestCase): + + def testPanopticQualityNoBatch(self): + pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator( + _get_categories_list(), include_metrics_per_category=True) + + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_classes = tf.placeholder(tf.int32, shape=(None)) + groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + groundtruth_is_crowd = tf.placeholder(tf.int32, shape=(None)) + detection_classes = tf.placeholder(tf.int32, shape=(None)) + detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.groundtruth_is_crowd: groundtruth_is_crowd, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + + eval_metric_ops = pq_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['PanopticQuality@0.50IOU'] + (gt_class, gt_masks, gt_is_crowd, result_class, + result_masks) = _get_panoptic_test_data() + + with self.test_session() as sess: + for i in range(2): + sess.run( + update_op, + feed_dict={ + image_id: 'image%d' % i, + groundtruth_classes: gt_class[i], + groundtruth_masks: gt_masks[i], + groundtruth_is_crowd: gt_is_crowd[i], + detection_classes: result_class[i], + detection_masks: result_masks[i] + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'], + (0.32 + 135.0 / 195) / 3) + + def testPanopticQualityBatched(self): + pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator( + _get_categories_list(), include_metrics_per_category=True) + batch_size = 2 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_classes = tf.placeholder(tf.int32, shape=(batch_size, None)) + groundtruth_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + groundtruth_is_crowd = tf.placeholder(tf.int32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.int32, shape=(batch_size, None)) + detection_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + num_gt_masks_per_image = tf.placeholder(tf.int32, shape=(batch_size)) + num_det_masks_per_image = tf.placeholder(tf.int32, shape=(batch_size)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.groundtruth_is_crowd: groundtruth_is_crowd, + input_data_fields.num_groundtruth_boxes: num_gt_masks_per_image, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + detection_fields.num_detections: num_det_masks_per_image, + } + + eval_metric_ops = pq_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['PanopticQuality@0.50IOU'] + (gt_class, gt_masks, gt_is_crowd, result_class, + result_masks) = _get_panoptic_test_data() + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image0', 'image1'], + groundtruth_classes: + np.stack([ + gt_class[0], + np.pad(gt_class[1], (0, 1), mode='constant') + ], + axis=0), + groundtruth_masks: + np.stack([ + np.pad( + gt_masks[0], ((0, 0), (0, 10), (0, 10)), + mode='constant'), + np.pad( + gt_masks[1], ((0, 1), (0, 30), (0, 20)), + mode='constant'), + ], + axis=0), + groundtruth_is_crowd: + np.stack([ + gt_is_crowd[0], + np.pad(gt_is_crowd[1], (0, 1), mode='constant') + ], + axis=0), + num_gt_masks_per_image: np.array([3, 2]), + detection_classes: + np.stack([ + result_class[0], + np.pad(result_class[1], (0, 1), mode='constant') + ], + axis=0), + detection_masks: + np.stack([ + np.pad( + result_masks[0], ((0, 0), (0, 10), (0, 10)), + mode='constant'), + np.pad( + result_masks[1], ((0, 1), (0, 30), (0, 20)), + mode='constant'), + ], + axis=0), + num_det_masks_per_image: np.array([3, 2]), + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'], + (0.32 + 135.0 / 195) / 3) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/coco_tools.py b/models/research/object_detection/metrics/coco_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..790d5bdef23bef149e8eb1afa9cdecb9ce458e6e --- /dev/null +++ b/models/research/object_detection/metrics/coco_tools.py @@ -0,0 +1,951 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Wrappers for third party pycocotools to be used within object_detection. + +Note that nothing in this file is tensorflow related and thus cannot +be called directly as a slim metric, for example. + +TODO(jonathanhuang): wrap as a slim metric in metrics.py + + +Usage example: given a set of images with ids in the list image_ids +and corresponding lists of numpy arrays encoding groundtruth (boxes and classes) +and detections (boxes, scores and classes), where elements of each list +correspond to detections/annotations of a single image, +then evaluation (in multi-class mode) can be invoked as follows: + + groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( + image_ids, groundtruth_boxes_list, groundtruth_classes_list, + max_num_classes, output_path=None) + detections_list = coco_tools.ExportDetectionsToCOCO( + image_ids, detection_boxes_list, detection_scores_list, + detection_classes_list, output_path=None) + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import OrderedDict +import copy +import time +import numpy as np + +from pycocotools import coco +from pycocotools import cocoeval +from pycocotools import mask + +import six +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.utils import json_utils + + +class COCOWrapper(coco.COCO): + """Wrapper for the pycocotools COCO class.""" + + def __init__(self, dataset, detection_type='bbox'): + """COCOWrapper constructor. + + See http://mscoco.org/dataset/#format for a description of the format. + By default, the coco.COCO class constructor reads from a JSON file. + This function duplicates the same behavior but loads from a dictionary, + allowing us to perform evaluation without writing to external storage. + + Args: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + + Raises: + ValueError: if detection_type is unsupported. + """ + supported_detection_types = ['bbox', 'segmentation'] + if detection_type not in supported_detection_types: + raise ValueError('Unsupported detection type: {}. ' + 'Supported values are: {}'.format( + detection_type, supported_detection_types)) + self._detection_type = detection_type + coco.COCO.__init__(self) + self.dataset = dataset + self.createIndex() + + def LoadAnnotations(self, annotations): + """Load annotations dictionary into COCO datastructure. + + See http://mscoco.org/dataset/#format for a description of the annotations + format. As above, this function replicates the default behavior of the API + but does not require writing to external storage. + + Args: + annotations: python list holding object detection results where each + detection is encoded as a dict with required keys ['image_id', + 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on + `detection_type`. + + Returns: + a coco.COCO datastructure holding object detection annotations results + + Raises: + ValueError: if annotations is not a list + ValueError: if annotations do not correspond to the images contained + in self. + """ + results = coco.COCO() + results.dataset['images'] = [img for img in self.dataset['images']] + + tf.logging.info('Loading and preparing annotation results...') + tic = time.time() + + if not isinstance(annotations, list): + raise ValueError('annotations is not a list of objects') + annotation_img_ids = [ann['image_id'] for ann in annotations] + if (set(annotation_img_ids) != (set(annotation_img_ids) + & set(self.getImgIds()))): + raise ValueError('Results do not correspond to current coco set') + results.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + if self._detection_type == 'bbox': + for idx, ann in enumerate(annotations): + bb = ann['bbox'] + ann['area'] = bb[2] * bb[3] + ann['id'] = idx + 1 + ann['iscrowd'] = 0 + elif self._detection_type == 'segmentation': + for idx, ann in enumerate(annotations): + ann['area'] = mask.area(ann['segmentation']) + ann['bbox'] = mask.toBbox(ann['segmentation']) + ann['id'] = idx + 1 + ann['iscrowd'] = 0 + tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic)) + + results.dataset['annotations'] = annotations + results.createIndex() + return results + + +class COCOEvalWrapper(cocoeval.COCOeval): + """Wrapper for the pycocotools COCOeval class. + + To evaluate, create two objects (groundtruth_dict and detections_list) + using the conventions listed at http://mscoco.org/dataset/#format. + Then call evaluation as follows: + + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + + metrics = evaluator.ComputeMetrics() + """ + + def __init__(self, groundtruth=None, detections=None, agnostic_mode=False, + iou_type='bbox', oks_sigmas=None): + """COCOEvalWrapper constructor. + + Note that for the area-based metrics to be meaningful, detection and + groundtruth boxes must be in image coordinates measured in pixels. + + Args: + groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding + groundtruth annotations + detections: a coco.COCO (or coco_tools.COCOWrapper) object holding + detections + agnostic_mode: boolean (default: False). If True, evaluation ignores + class labels, treating all detections as proposals. + iou_type: IOU type to use for evaluation. Supports `bbox', `segm`, + `keypoints`. + oks_sigmas: Float numpy array holding the OKS variances for keypoints. + """ + cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type) + if oks_sigmas is not None: + self.params.kpt_oks_sigmas = oks_sigmas + if agnostic_mode: + self.params.useCats = 0 + self._iou_type = iou_type + + def GetCategory(self, category_id): + """Fetches dictionary holding category information given category id. + + Args: + category_id: integer id + Returns: + dictionary holding 'id', 'name'. + """ + return self.cocoGt.cats[category_id] + + def GetAgnosticMode(self): + """Returns true if COCO Eval is configured to evaluate in agnostic mode.""" + return self.params.useCats == 0 + + def GetCategoryIdList(self): + """Returns list of valid category ids.""" + return self.params.catIds + + def ComputeMetrics(self, + include_metrics_per_category=False, + all_metrics_per_category=False): + """Computes detection/keypoint metrics. + + Args: + include_metrics_per_category: If True, will include metrics per category. + all_metrics_per_category: If true, include all the summery metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + + Returns: + 1. summary_metrics: a dictionary holding: + 'Precision/mAP': mean average precision over classes averaged over IOU + thresholds ranging from .5 to .95 with .05 increments + 'Precision/mAP@.50IOU': mean average precision at 50% IOU + 'Precision/mAP@.75IOU': mean average precision at 75% IOU + 'Precision/mAP (small)': mean average precision for small objects + (area < 32^2 pixels). NOTE: not present for 'keypoints' + 'Precision/mAP (medium)': mean average precision for medium sized + objects (32^2 pixels < area < 96^2 pixels) + 'Precision/mAP (large)': mean average precision for large objects + (96^2 pixels < area < 10000^2 pixels) + 'Recall/AR@1': average recall with 1 detection + 'Recall/AR@10': average recall with 10 detections + 'Recall/AR@100': average recall with 100 detections + 'Recall/AR@100 (small)': average recall for small objects with 100 + detections. NOTE: not present for 'keypoints' + 'Recall/AR@100 (medium)': average recall for medium objects with 100 + detections + 'Recall/AR@100 (large)': average recall for large objects with 100 + detections + 2. per_category_ap: a dictionary holding category specific results with + keys of the form: 'Precision mAP ByCategory/category' + (without the supercategory part if no supercategories exist). + For backward compatibility 'PerformanceByCategory' is included in the + output regardless of all_metrics_per_category. + If evaluating class-agnostic mode, per_category_ap is an empty + dictionary. + + Raises: + ValueError: If category_stats does not exist. + """ + self.evaluate() + self.accumulate() + self.summarize() + + summary_metrics = {} + if self._iou_type in ['bbox', 'segm']: + summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]), + ('Precision/mAP@.50IOU', self.stats[1]), + ('Precision/mAP@.75IOU', self.stats[2]), + ('Precision/mAP (small)', self.stats[3]), + ('Precision/mAP (medium)', self.stats[4]), + ('Precision/mAP (large)', self.stats[5]), + ('Recall/AR@1', self.stats[6]), + ('Recall/AR@10', self.stats[7]), + ('Recall/AR@100', self.stats[8]), + ('Recall/AR@100 (small)', self.stats[9]), + ('Recall/AR@100 (medium)', self.stats[10]), + ('Recall/AR@100 (large)', self.stats[11])]) + elif self._iou_type == 'keypoints': + category_id = self.GetCategoryIdList()[0] + category_name = self.GetCategory(category_id)['name'] + summary_metrics = OrderedDict([]) + summary_metrics['Precision/mAP ByCategory/{}'.format( + category_name)] = self.stats[0] + summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format( + category_name)] = self.stats[1] + summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format( + category_name)] = self.stats[2] + summary_metrics['Precision/mAP (medium) ByCategory/{}'.format( + category_name)] = self.stats[3] + summary_metrics['Precision/mAP (large) ByCategory/{}'.format( + category_name)] = self.stats[4] + summary_metrics['Recall/AR@1 ByCategory/{}'.format( + category_name)] = self.stats[5] + summary_metrics['Recall/AR@10 ByCategory/{}'.format( + category_name)] = self.stats[6] + summary_metrics['Recall/AR@100 ByCategory/{}'.format( + category_name)] = self.stats[7] + summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format( + category_name)] = self.stats[8] + summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format( + category_name)] = self.stats[9] + if not include_metrics_per_category: + return summary_metrics, {} + if not hasattr(self, 'category_stats'): + raise ValueError('Category stats do not exist') + per_category_ap = OrderedDict([]) + if self.GetAgnosticMode(): + return summary_metrics, per_category_ap + for category_index, category_id in enumerate(self.GetCategoryIdList()): + category = self.GetCategory(category_id)['name'] + # Kept for backward compatilbility + per_category_ap['PerformanceByCategory/mAP/{}'.format( + category)] = self.category_stats[0][category_index] + if all_metrics_per_category: + per_category_ap['Precision mAP ByCategory/{}'.format( + category)] = self.category_stats[0][category_index] + per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format( + category)] = self.category_stats[1][category_index] + per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format( + category)] = self.category_stats[2][category_index] + per_category_ap['Precision mAP (small) ByCategory/{}'.format( + category)] = self.category_stats[3][category_index] + per_category_ap['Precision mAP (medium) ByCategory/{}'.format( + category)] = self.category_stats[4][category_index] + per_category_ap['Precision mAP (large) ByCategory/{}'.format( + category)] = self.category_stats[5][category_index] + per_category_ap['Recall AR@1 ByCategory/{}'.format( + category)] = self.category_stats[6][category_index] + per_category_ap['Recall AR@10 ByCategory/{}'.format( + category)] = self.category_stats[7][category_index] + per_category_ap['Recall AR@100 ByCategory/{}'.format( + category)] = self.category_stats[8][category_index] + per_category_ap['Recall AR@100 (small) ByCategory/{}'.format( + category)] = self.category_stats[9][category_index] + per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format( + category)] = self.category_stats[10][category_index] + per_category_ap['Recall AR@100 (large) ByCategory/{}'.format( + category)] = self.category_stats[11][category_index] + + return summary_metrics, per_category_ap + + +def _ConvertBoxToCOCOFormat(box): + """Converts a box in [ymin, xmin, ymax, xmax] format to COCO format. + + This is a utility function for converting from our internal + [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API + i.e., [xmin, ymin, width, height]. + + Args: + box: a [ymin, xmin, ymax, xmax] numpy array + + Returns: + a list of floats representing [xmin, ymin, width, height] + """ + return [float(box[1]), float(box[0]), float(box[3] - box[1]), + float(box[2] - box[0])] + + +def _RleCompress(masks): + """Compresses mask using Run-length encoding provided by pycocotools. + + Args: + masks: uint8 numpy array of shape [mask_height, mask_width] with values in + {0, 1}. + + Returns: + A pycocotools Run-length encoding of the mask. + """ + rle = mask.encode(np.asfortranarray(masks)) + rle['counts'] = six.ensure_str(rle['counts']) + return rle + + +def ExportSingleImageGroundtruthToCoco(image_id, + next_annotation_id, + category_id_set, + groundtruth_boxes, + groundtruth_classes, + groundtruth_keypoints=None, + groundtruth_keypoint_visibilities=None, + groundtruth_masks=None, + groundtruth_is_crowd=None, + groundtruth_area=None): + """Export groundtruth of a single image to COCO format. + + This function converts groundtruth detection annotations represented as numpy + arrays to dictionaries that can be ingested by the COCO evaluation API. Note + that the image_ids provided here must match the ones given to + ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in + correspondence - that is: groundtruth_boxes[i, :], and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box. + + Args: + image_id: a unique image identifier either of type integer or string. + next_annotation_id: integer specifying the first id to use for the + groundtruth annotations. All annotations are assigned a continuous integer + id starting from this value. + category_id_set: A set of valid class ids. Groundtruth with classes not in + category_id_set are dropped. + groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4] + groundtruth_classes: numpy array (int) with shape [num_gt_boxes] + groundtruth_keypoints: optional float numpy array of keypoints + with shape [num_gt_boxes, num_keypoints, 2]. + groundtruth_keypoint_visibilities: optional integer numpy array of keypoint + visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated + as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and + visible. + groundtruth_masks: optional uint8 numpy array of shape [num_detections, + image_height, image_width] containing detection_masks. + groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes] + indicating whether groundtruth boxes are crowd. + groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If + provided, then the area values (in the original absolute coordinates) will + be populated instead of calculated from bounding box coordinates. + + Returns: + a list of groundtruth annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + + if len(groundtruth_classes.shape) != 1: + raise ValueError('groundtruth_classes is ' + 'expected to be of rank 1.') + if len(groundtruth_boxes.shape) != 2: + raise ValueError('groundtruth_boxes is expected to be of ' + 'rank 2.') + if groundtruth_boxes.shape[1] != 4: + raise ValueError('groundtruth_boxes should have ' + 'shape[1] == 4.') + num_boxes = groundtruth_classes.shape[0] + if num_boxes != groundtruth_boxes.shape[0]: + raise ValueError('Corresponding entries in groundtruth_classes, ' + 'and groundtruth_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension).' + 'Classes shape: %d. Boxes shape: %d. Image ID: %s' % ( + groundtruth_classes.shape[0], + groundtruth_boxes.shape[0], image_id)) + has_is_crowd = groundtruth_is_crowd is not None + if has_is_crowd and len(groundtruth_is_crowd.shape) != 1: + raise ValueError('groundtruth_is_crowd is expected to be of rank 1.') + has_keypoints = groundtruth_keypoints is not None + has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None + if has_keypoints and not has_keypoint_visibilities: + groundtruth_keypoint_visibilities = np.full( + (num_boxes, groundtruth_keypoints.shape[1]), 2) + groundtruth_list = [] + for i in range(num_boxes): + if groundtruth_classes[i] in category_id_set: + iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0 + if groundtruth_area is not None and groundtruth_area[i] > 0: + area = float(groundtruth_area[i]) + else: + area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) * + (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])) + export_dict = { + 'id': + next_annotation_id + i, + 'image_id': + image_id, + 'category_id': + int(groundtruth_classes[i]), + 'bbox': + list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), + 'area': area, + 'iscrowd': + iscrowd + } + if groundtruth_masks is not None: + export_dict['segmentation'] = _RleCompress(groundtruth_masks[i]) + if has_keypoints: + keypoints = groundtruth_keypoints[i] + visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1]) + coco_keypoints = [] + num_valid_keypoints = 0 + for keypoint, visibility in zip(keypoints, visibilities): + # Convert from [y, x] to [x, y] as mandated by COCO. + coco_keypoints.append(float(keypoint[1])) + coco_keypoints.append(float(keypoint[0])) + coco_keypoints.append(int(visibility)) + if int(visibility) > 0: + num_valid_keypoints = num_valid_keypoints + 1 + export_dict['keypoints'] = coco_keypoints + export_dict['num_keypoints'] = num_valid_keypoints + + groundtruth_list.append(export_dict) + return groundtruth_list + + +def ExportGroundtruthToCOCO(image_ids, + groundtruth_boxes, + groundtruth_classes, + categories, + output_path=None): + """Export groundtruth detection annotations in numpy arrays to COCO API. + + This function converts a set of groundtruth detection annotations represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are three lists: image ids for each groundtruth image, + groundtruth boxes for each image and groundtruth classes respectively. + Note that the image_ids provided here must match the ones given to the + ExportDetectionsToCOCO function in order for evaluation to work properly. + We assume that for each image, boxes, scores and classes are in + correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box and "iscrowd" fields are always set to 0. + TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset. + + Args: + image_ids: a list of unique image identifier either of type integer or + string. + groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4] + (note that num_gt_boxes can be different for each entry in the list) + groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes] + (note that num_gt_boxes can be different for each entry in the list) + categories: a list of dictionaries representing all possible categories. + Each dict in this list has the following keys: + 'id': (required) an integer id uniquely identifying this category + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza' + 'supercategory': (optional) string representing the supercategory + e.g., 'animal', 'vehicle', 'food', etc + output_path: (optional) path for exporting result to JSON + Returns: + dictionary that can be read by COCO API + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + category_id_set = set([cat['id'] for cat in categories]) + groundtruth_export_list = [] + image_export_list = [] + if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes): + raise ValueError('Input lists must have the same length') + + # For reasons internal to the COCO API, it is important that annotation ids + # are not equal to zero; we thus start counting from 1. + annotation_id = 1 + for image_id, boxes, classes in zip(image_ids, groundtruth_boxes, + groundtruth_classes): + image_export_list.append({'id': image_id}) + groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco( + image_id, + annotation_id, + category_id_set, + boxes, + classes)) + num_boxes = classes.shape[0] + annotation_id += num_boxes + + groundtruth_dict = { + 'annotations': groundtruth_export_list, + 'images': image_export_list, + 'categories': categories + } + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2) + return groundtruth_dict + + +def ExportSingleImageDetectionBoxesToCoco(image_id, + category_id_set, + detection_boxes, + detection_scores, + detection_classes, + detection_keypoints=None, + detection_keypoint_visibilities=None): + """Export detections of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. Note that the image_ids + provided here must match the ones given to the + ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in + correspondence - that is: boxes[i, :], and classes[i] + are associated with the same groundtruth annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_boxes: float numpy array of shape [num_detections, 4] containing + detection boxes. + detection_scores: float numpy array of shape [num_detections] containing + scored for the detection boxes. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection boxes. + detection_keypoints: optional float numpy array of keypoints + with shape [num_detections, num_keypoints, 2]. + detection_keypoint_visibilities: optional integer numpy array of keypoint + visibilities with shape [num_detections, num_keypoints]. Integer is + treated as an enum with 0=not labels, 1=labeled but not visible and + 2=labeled and visible. + + Returns: + a list of detection annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_boxes, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + if len(detection_boxes.shape) != 2: + raise ValueError('All entries in detection_boxes expected to be of ' + 'rank 2.') + if detection_boxes.shape[1] != 4: + raise ValueError('All entries in detection_boxes should have ' + 'shape[1] == 4.') + num_boxes = detection_classes.shape[0] + if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_scores and detection_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension). ' + 'Classes shape: %d. Boxes shape: %d. ' + 'Scores shape: %d' % ( + detection_classes.shape[0], detection_boxes.shape[0], + detection_scores.shape[0] + )) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + export_dict = { + 'image_id': + image_id, + 'category_id': + int(detection_classes[i]), + 'bbox': + list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), + 'score': + float(detection_scores[i]), + } + if detection_keypoints is not None: + keypoints = detection_keypoints[i] + num_keypoints = keypoints.shape[0] + if detection_keypoint_visibilities is None: + detection_keypoint_visibilities = np.full((num_boxes, num_keypoints), + 2) + visibilities = np.reshape(detection_keypoint_visibilities[i], [-1]) + coco_keypoints = [] + for keypoint, visibility in zip(keypoints, visibilities): + # Convert from [y, x] to [x, y] as mandated by COCO. + coco_keypoints.append(float(keypoint[1])) + coco_keypoints.append(float(keypoint[0])) + coco_keypoints.append(int(visibility)) + export_dict['keypoints'] = coco_keypoints + export_dict['num_keypoints'] = num_keypoints + detections_list.append(export_dict) + + return detections_list + + +def ExportSingleImageDetectionMasksToCoco(image_id, + category_id_set, + detection_masks, + detection_scores, + detection_classes): + """Export detection masks of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. We assume that + detection_masks, detection_scores, and detection_classes are in correspondence + - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i] + are associated with the same annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_masks: uint8 numpy array of shape [num_detections, image_height, + image_width] containing detection_masks. + detection_scores: float numpy array of shape [num_detections] containing + scores for detection masks. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection masks. + + Returns: + a list of detection mask annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_masks, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + num_boxes = detection_classes.shape[0] + if not num_boxes == len(detection_masks) == detection_scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_scores and detection_masks should have ' + 'compatible lengths and shapes ' + 'Classes length: %d. Masks length: %d. ' + 'Scores length: %d' % ( + detection_classes.shape[0], len(detection_masks), + detection_scores.shape[0] + )) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append({ + 'image_id': image_id, + 'category_id': int(detection_classes[i]), + 'segmentation': _RleCompress(detection_masks[i]), + 'score': float(detection_scores[i]) + }) + return detections_list + + +def ExportDetectionsToCOCO(image_ids, + detection_boxes, + detection_scores, + detection_classes, + categories, + output_path=None): + """Export detection annotations in numpy arrays to COCO API. + + This function converts a set of predicted detections represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are lists, consisting of boxes, scores and + classes, respectively, corresponding to each image for which detections + have been produced. Note that the image_ids provided here must + match the ones given to the ExportGroundtruthToCOCO function in order + for evaluation to work properly. + + We assume that for each image, boxes, scores and classes are in + correspondence --- that is: detection_boxes[i, :], detection_scores[i] and + detection_classes[i] are associated with the same detection. + + Args: + image_ids: a list of unique image identifier either of type integer or + string. + detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4] + detection_scores: list of numpy arrays (float) with shape + [num_detection_boxes]. Note that num_detection_boxes can be different + for each entry in the list. + detection_classes: list of numpy arrays (int) with shape + [num_detection_boxes]. Note that num_detection_boxes can be different + for each entry in the list. + categories: a list of dictionaries representing all possible categories. + Each dict in this list must have an integer 'id' key uniquely identifying + this category. + output_path: (optional) path for exporting result to JSON + + Returns: + list of dictionaries that can be read by COCO API, where each entry + corresponds to a single detection and has keys from: + ['image_id', 'category_id', 'bbox', 'score']. + Raises: + ValueError: if (1) detection_boxes and detection_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers. + """ + category_id_set = set([cat['id'] for cat in categories]) + detections_export_list = [] + if not (len(image_ids) == len(detection_boxes) == len(detection_scores) == + len(detection_classes)): + raise ValueError('Input lists must have the same length') + for image_id, boxes, scores, classes in zip(image_ids, detection_boxes, + detection_scores, + detection_classes): + detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco( + image_id, + category_id_set, + boxes, + scores, + classes)) + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2) + return detections_export_list + + +def ExportSegmentsToCOCO(image_ids, + detection_masks, + detection_scores, + detection_classes, + categories, + output_path=None): + """Export segmentation masks in numpy arrays to COCO API. + + This function converts a set of predicted instance masks represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are lists, consisting of segments, scores and + classes, respectively, corresponding to each image for which detections + have been produced. + + Note this function is recommended to use for small dataset. + For large dataset, it should be used with a merge function + (e.g. in map reduce), otherwise the memory consumption is large. + + We assume that for each image, masks, scores and classes are in + correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i] + and detection_classes[i] are associated with the same detection. + + Args: + image_ids: list of image ids (typically ints or strings) + detection_masks: list of numpy arrays with shape [num_detection, h, w, 1] + and type uint8. The height and width should match the shape of + corresponding image. + detection_scores: list of numpy arrays (float) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + detection_classes: list of numpy arrays (int) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + categories: a list of dictionaries representing all possible categories. + Each dict in this list must have an integer 'id' key uniquely identifying + this category. + output_path: (optional) path for exporting result to JSON + + Returns: + list of dictionaries that can be read by COCO API, where each entry + corresponds to a single detection and has keys from: + ['image_id', 'category_id', 'segmentation', 'score']. + + Raises: + ValueError: if detection_masks and detection_classes do not have the + right lengths or if each of the elements inside these lists do not + have the correct shapes. + """ + if not (len(image_ids) == len(detection_masks) == len(detection_scores) == + len(detection_classes)): + raise ValueError('Input lists must have the same length') + + segment_export_list = [] + for image_id, masks, scores, classes in zip(image_ids, detection_masks, + detection_scores, + detection_classes): + + if len(classes.shape) != 1 or len(scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + if len(masks.shape) != 4: + raise ValueError('All entries in masks expected to be of ' + 'rank 4. Given {}'.format(masks.shape)) + + num_boxes = classes.shape[0] + if not num_boxes == masks.shape[0] == scores.shape[0]: + raise ValueError('Corresponding entries in segment_classes, ' + 'detection_scores and detection_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension).') + + category_id_set = set([cat['id'] for cat in categories]) + segment_export_list.extend(ExportSingleImageDetectionMasksToCoco( + image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes)) + + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2) + return segment_export_list + + +def ExportKeypointsToCOCO(image_ids, + detection_keypoints, + detection_scores, + detection_classes, + categories, + output_path=None): + """Exports keypoints in numpy arrays to COCO API. + + This function converts a set of predicted keypoints represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are lists, consisting of keypoints, scores and + classes, respectively, corresponding to each image for which detections + have been produced. + + We assume that for each image, keypoints, scores and classes are in + correspondence --- that is: detection_keypoints[i, :, :, :], + detection_scores[i] and detection_classes[i] are associated with the same + detection. + + Args: + image_ids: list of image ids (typically ints or strings) + detection_keypoints: list of numpy arrays with shape + [num_detection, num_keypoints, 2] and type float32 in absolute + x-y coordinates. + detection_scores: list of numpy arrays (float) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + detection_classes: list of numpy arrays (int) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + categories: a list of dictionaries representing all possible categories. + Each dict in this list must have an integer 'id' key uniquely identifying + this category and an integer 'num_keypoints' key specifying the number of + keypoints the category has. + output_path: (optional) path for exporting result to JSON + + Returns: + list of dictionaries that can be read by COCO API, where each entry + corresponds to a single detection and has keys from: + ['image_id', 'category_id', 'keypoints', 'score']. + + Raises: + ValueError: if detection_keypoints and detection_classes do not have the + right lengths or if each of the elements inside these lists do not + have the correct shapes. + """ + if not (len(image_ids) == len(detection_keypoints) == + len(detection_scores) == len(detection_classes)): + raise ValueError('Input lists must have the same length') + + keypoints_export_list = [] + for image_id, keypoints, scores, classes in zip( + image_ids, detection_keypoints, detection_scores, detection_classes): + + if len(classes.shape) != 1 or len(scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + if len(keypoints.shape) != 3: + raise ValueError('All entries in keypoints expected to be of ' + 'rank 3. Given {}'.format(keypoints.shape)) + + num_boxes = classes.shape[0] + if not num_boxes == keypoints.shape[0] == scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_keypoints, and detection_scores should have ' + 'compatible shapes (i.e., agree on the 0th dimension).') + + category_id_set = set([cat['id'] for cat in categories]) + category_id_to_num_keypoints_map = { + cat['id']: cat['num_keypoints'] for cat in categories + if 'num_keypoints' in cat} + + for i in range(num_boxes): + if classes[i] not in category_id_set: + raise ValueError('class id should be in category_id_set\n') + + if classes[i] in category_id_to_num_keypoints_map: + num_keypoints = category_id_to_num_keypoints_map[classes[i]] + # Adds extra ones to indicate the visibility for each keypoint as is + # recommended by MSCOCO. + instance_keypoints = np.concatenate( + [keypoints[i, 0:num_keypoints, :], + np.expand_dims(np.ones(num_keypoints), axis=1)], + axis=1).astype(int) + + instance_keypoints = instance_keypoints.flatten().tolist() + keypoints_export_list.append({ + 'image_id': image_id, + 'category_id': int(classes[i]), + 'keypoints': instance_keypoints, + 'score': float(scores[i]) + }) + + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2) + return keypoints_export_list diff --git a/models/research/object_detection/metrics/coco_tools_test.py b/models/research/object_detection/metrics/coco_tools_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c3ce0a81d46f6c4447272b60a3381a2adeeb0c --- /dev/null +++ b/models/research/object_detection/metrics/coco_tools_test.py @@ -0,0 +1,405 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_model.object_detection.metrics.coco_tools.""" +import json +import os +import re +import numpy as np + +from pycocotools import mask + +import tensorflow.compat.v1 as tf + +from object_detection.metrics import coco_tools + + +class CocoToolsTest(tf.test.TestCase): + + def setUp(self): + groundtruth_annotations_list = [ + { + 'id': 1, + 'image_id': 'first', + 'category_id': 1, + 'bbox': [100., 100., 100., 100.], + 'area': 100.**2, + 'iscrowd': 0 + }, + { + 'id': 2, + 'image_id': 'second', + 'category_id': 1, + 'bbox': [50., 50., 50., 50.], + 'area': 50.**2, + 'iscrowd': 0 + }, + ] + image_list = [{'id': 'first'}, {'id': 'second'}] + category_list = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + self._groundtruth_dict = { + 'annotations': groundtruth_annotations_list, + 'images': image_list, + 'categories': category_list + } + + self._detections_list = [ + { + 'image_id': 'first', + 'category_id': 1, + 'bbox': [100., 100., 100., 100.], + 'score': .8 + }, + { + 'image_id': 'second', + 'category_id': 1, + 'bbox': [50., 50., 50., 50.], + 'score': .7 + }, + ] + + def testCocoWrappers(self): + groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict) + detections = groundtruth.LoadAnnotations(self._detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections) + summary_metrics, _ = evaluator.ComputeMetrics() + self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP']) + + def testExportGroundtruthToCOCO(self): + image_ids = ['first', 'second'] + groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float), + np.array([[50, 50, 100, 100]], np.float)] + groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)] + categories = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json') + result = coco_tools.ExportGroundtruthToCOCO( + image_ids, + groundtruth_boxes, + groundtruth_classes, + categories, + output_path=output_path) + self.assertDictEqual(result, self._groundtruth_dict) + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + # The json output should have floats written to 4 digits of precision. + matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) + self.assertTrue(matcher.findall(written_result)) + written_result = json.loads(written_result) + self.assertAlmostEqual(result, written_result) + + def testExportDetectionsToCOCO(self): + image_ids = ['first', 'second'] + detections_boxes = [np.array([[100, 100, 200, 200]], np.float), + np.array([[50, 50, 100, 100]], np.float)] + detections_scores = [np.array([.8], np.float), np.array([.7], np.float)] + detections_classes = [np.array([1], np.int32), np.array([1], np.int32)] + categories = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json') + result = coco_tools.ExportDetectionsToCOCO( + image_ids, + detections_boxes, + detections_scores, + detections_classes, + categories, + output_path=output_path) + self.assertListEqual(result, self._detections_list) + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + # The json output should have floats written to 4 digits of precision. + matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) + self.assertTrue(matcher.findall(written_result)) + written_result = json.loads(written_result) + self.assertAlmostEqual(result, written_result) + + def testExportSegmentsToCOCO(self): + image_ids = ['first', 'second'] + detection_masks = [np.array( + [[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]], + dtype=np.uint8), np.array( + [[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]], + dtype=np.uint8)] + + for i, detection_mask in enumerate(detection_masks): + detection_masks[i] = detection_mask[:, :, :, None] + + detection_scores = [np.array([.8], np.float), np.array([.7], np.float)] + detection_classes = [np.array([1], np.int32), np.array([1], np.int32)] + + categories = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json') + result = coco_tools.ExportSegmentsToCOCO( + image_ids, + detection_masks, + detection_scores, + detection_classes, + categories, + output_path=output_path) + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + written_result = json.loads(written_result) + mask_load = mask.decode([written_result[0]['segmentation']]) + self.assertTrue(np.allclose(mask_load, detection_masks[0])) + self.assertAlmostEqual(result, written_result) + + def testExportKeypointsToCOCO(self): + image_ids = ['first', 'second'] + detection_keypoints = [ + np.array( + [[[100, 200], [300, 400], [500, 600]], + [[50, 150], [250, 350], [450, 550]]], dtype=np.int32), + np.array( + [[[110, 210], [310, 410], [510, 610]], + [[60, 160], [260, 360], [460, 560]]], dtype=np.int32)] + + detection_scores = [np.array([.8, 0.2], np.float), + np.array([.7, 0.3], np.float)] + detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)] + + categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3}, + {'id': 2, 'name': 'cat'}, + {'id': 3, 'name': 'dog'}] + + output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json') + result = coco_tools.ExportKeypointsToCOCO( + image_ids, + detection_keypoints, + detection_scores, + detection_classes, + categories, + output_path=output_path) + + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + written_result = json.loads(written_result) + self.assertAlmostEqual(result, written_result) + + def testSingleImageDetectionBoxesExport(self): + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_boxes=boxes, + detection_classes=classes, + detection_scores=scores) + for i, annotation in enumerate(coco_annotations): + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertAlmostEqual(annotation['score'], scores[i]) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + + def testSingleImageDetectionMaskExport(self): + masks = np.array( + [[[1, 1,], [1, 1]], + [[0, 0], [0, 1]], + [[0, 0], [0, 0]]], dtype=np.uint8) + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_classes=classes, + detection_scores=scores, + detection_masks=masks) + expected_counts = ['04', '31', '4'] + for i, mask_annotation in enumerate(coco_annotations): + self.assertEqual(mask_annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + mask_annotation['segmentation']), masks[i]))) + self.assertEqual(mask_annotation['image_id'], 'first_image') + self.assertEqual(mask_annotation['category_id'], classes[i]) + self.assertAlmostEqual(mask_annotation['score'], scores[i]) + + def testSingleImageGroundtruthExport(self): + masks = np.array( + [[[1, 1,], [1, 1]], + [[0, 0], [0, 1]], + [[0, 0], [0, 0]]], dtype=np.uint8) + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + classes = np.array([1, 2, 3], dtype=np.int32) + is_crowd = np.array([0, 1, 0], dtype=np.int32) + next_annotation_id = 1 + expected_counts = ['04', '31', '4'] + + # Tests exporting without passing in is_crowd (for backward compatibility). + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_masks=masks) + for i, annotation in enumerate(coco_annotations): + self.assertEqual(annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + annotation['segmentation']), masks[i]))) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + + # Tests exporting with is_crowd. + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_masks=masks, + groundtruth_is_crowd=is_crowd) + for i, annotation in enumerate(coco_annotations): + self.assertEqual(annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + annotation['segmentation']), masks[i]))) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['iscrowd'], is_crowd[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + + def testSingleImageGroundtruthExportWithKeypoints(self): + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]], + [[0, 0], [0.125, 0.125], [0.375, 0.375]], + [[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]], + dtype=np.float32) + visibilities = np.array([[2, 2, 2], + [2, 2, 0], + [2, 0, 0]], dtype=np.int32) + areas = np.array([15., 16., 17.]) + + classes = np.array([1, 2, 3], dtype=np.int32) + is_crowd = np.array([0, 1, 0], dtype=np.int32) + next_annotation_id = 1 + + # Tests exporting without passing in is_crowd (for backward compatibility). + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_keypoints=keypoints, + groundtruth_keypoint_visibilities=visibilities, + groundtruth_area=areas) + for i, annotation in enumerate(coco_annotations): + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + self.assertEqual(annotation['num_keypoints'], 3 - i) + self.assertEqual(annotation['area'], 15.0 + i) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1]))) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0]))) + self.assertTrue( + np.all(np.equal(annotation['keypoints'][2::3], visibilities[i]))) + + # Tests exporting with is_crowd. + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_keypoints=keypoints, + groundtruth_keypoint_visibilities=visibilities, + groundtruth_is_crowd=is_crowd) + for i, annotation in enumerate(coco_annotations): + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['iscrowd'], is_crowd[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + self.assertEqual(annotation['num_keypoints'], 3 - i) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1]))) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0]))) + self.assertTrue( + np.all(np.equal(annotation['keypoints'][2::3], visibilities[i]))) + # Testing the area values are derived from the bounding boxes. + if i == 0: + self.assertAlmostEqual(annotation['area'], 1.0) + else: + self.assertAlmostEqual(annotation['area'], 0.25) + + def testSingleImageDetectionBoxesExportWithKeypoints(self): + boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]], + dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]], + dtype=np.float32) + keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]], + [[0, 0], [0.125, 0.125], [0.375, 0.375]], + [[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]], + dtype=np.float32) + visibilities = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=np.int32) + + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + + # Tests exporting without passing in is_crowd (for backward compatibility). + coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_boxes=boxes, + detection_scores=scores, + detection_classes=classes, + detection_keypoints=keypoints, + detection_keypoint_visibilities=visibilities) + for i, annotation in enumerate(coco_annotations): + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['score'], scores[i]) + self.assertEqual(annotation['num_keypoints'], 3) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1]))) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0]))) + self.assertTrue( + np.all(np.equal(annotation['keypoints'][2::3], visibilities[i]))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/io_utils.py b/models/research/object_detection/metrics/io_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..900584de1e5cd26b51ae8928581a4283bea2598e --- /dev/null +++ b/models/research/object_detection/metrics/io_utils.py @@ -0,0 +1,34 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common IO utils used in offline metric computation. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv + + +def write_csv(fid, metrics): + """Writes metrics key-value pairs to CSV file. + + Args: + fid: File identifier of an opened file. + metrics: A dictionary with metrics to be written. + """ + metrics_writer = csv.writer(fid, delimiter=',') + for metric_name, metric_value in metrics.items(): + metrics_writer.writerow([metric_name, str(metric_value)]) diff --git a/models/research/object_detection/metrics/offline_eval_map_corloc.py b/models/research/object_detection/metrics/offline_eval_map_corloc.py new file mode 100644 index 0000000000000000000000000000000000000000..a12b1d98493e022d302c76b0cadb514e7fc0eb60 --- /dev/null +++ b/models/research/object_detection/metrics/offline_eval_map_corloc.py @@ -0,0 +1,171 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Evaluation executable for detection data. + +This executable evaluates precomputed detections produced by a detection +model and writes the evaluation results into csv file metrics.csv, stored +in the directory, specified by --eval_dir. + +The evaluation metrics set is supplied in object_detection.protos.EvalConfig +in metrics_set field. +Currently two set of metrics are supported: +- pascal_voc_metrics: standard PASCAL VOC 2007 metric +- open_images_detection_metrics: Open Image V2 metric +All other field of object_detection.protos.EvalConfig are ignored. + +Example usage: + ./compute_metrics \ + --eval_dir=path/to/eval_dir \ + --eval_config_path=path/to/evaluation/configuration/file \ + --input_config_path=path/to/input/configuration/file +""" +import csv +import os +import re +import tensorflow.compat.v1 as tf + +from object_detection import eval_util +from object_detection.core import standard_fields +from object_detection.metrics import tf_example_parser +from object_detection.utils import config_util +from object_detection.utils import label_map_util + +flags = tf.app.flags +tf.logging.set_verbosity(tf.logging.INFO) + +flags.DEFINE_string('eval_dir', None, 'Directory to write eval summaries to.') +flags.DEFINE_string('eval_config_path', None, + 'Path to an eval_pb2.EvalConfig config file.') +flags.DEFINE_string('input_config_path', None, + 'Path to an eval_pb2.InputConfig config file.') + +FLAGS = flags.FLAGS + + +def _generate_sharded_filenames(filename): + m = re.search(r'@(\d{1,})', filename) + if m: + num_shards = int(m.group(1)) + return [ + re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards), filename) + for i in range(num_shards) + ] + else: + return [filename] + + +def _generate_filenames(filenames): + result = [] + for filename in filenames: + result += _generate_sharded_filenames(filename) + return result + + +def read_data_and_evaluate(input_config, eval_config): + """Reads pre-computed object detections and groundtruth from tf_record. + + Args: + input_config: input config proto of type + object_detection.protos.InputReader. + eval_config: evaluation config proto of type + object_detection.protos.EvalConfig. + + Returns: + Evaluated detections metrics. + + Raises: + ValueError: if input_reader type is not supported or metric type is unknown. + """ + if input_config.WhichOneof('input_reader') == 'tf_record_input_reader': + input_paths = input_config.tf_record_input_reader.input_path + + categories = label_map_util.create_categories_from_labelmap( + input_config.label_map_path) + + object_detection_evaluators = eval_util.get_evaluators( + eval_config, categories) + # Support a single evaluator + object_detection_evaluator = object_detection_evaluators[0] + + skipped_images = 0 + processed_images = 0 + for input_path in _generate_filenames(input_paths): + tf.logging.info('Processing file: {0}'.format(input_path)) + + record_iterator = tf.python_io.tf_record_iterator(path=input_path) + data_parser = tf_example_parser.TfExampleDetectionAndGTParser() + + for string_record in record_iterator: + tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000, + processed_images) + processed_images += 1 + + example = tf.train.Example() + example.ParseFromString(string_record) + decoded_dict = data_parser.parse(example) + + if decoded_dict: + object_detection_evaluator.add_single_ground_truth_image_info( + decoded_dict[standard_fields.DetectionResultFields.key], + decoded_dict) + object_detection_evaluator.add_single_detected_image_info( + decoded_dict[standard_fields.DetectionResultFields.key], + decoded_dict) + else: + skipped_images += 1 + tf.logging.info('Skipped images: {0}'.format(skipped_images)) + + return object_detection_evaluator.evaluate() + + raise ValueError('Unsupported input_reader_config.') + + +def write_metrics(metrics, output_dir): + """Write metrics to the output directory. + + Args: + metrics: A dictionary containing metric names and values. + output_dir: Directory to write metrics to. + """ + tf.logging.info('Writing metrics.') + + with open(os.path.join(output_dir, 'metrics.csv'), 'w') as csvfile: + metrics_writer = csv.writer(csvfile, delimiter=',') + for metric_name, metric_value in metrics.items(): + metrics_writer.writerow([metric_name, str(metric_value)]) + + +def main(argv): + del argv + required_flags = ['input_config_path', 'eval_config_path', 'eval_dir'] + for flag_name in required_flags: + if not getattr(FLAGS, flag_name): + raise ValueError('Flag --{} is required'.format(flag_name)) + + configs = config_util.get_configs_from_multiple_files( + eval_input_config_path=FLAGS.input_config_path, + eval_config_path=FLAGS.eval_config_path) + + eval_config = configs['eval_config'] + input_config = configs['eval_input_config'] + + metrics = read_data_and_evaluate(input_config, eval_config) + + # Save metrics + write_metrics(metrics, FLAGS.eval_dir) + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/models/research/object_detection/metrics/offline_eval_map_corloc_test.py b/models/research/object_detection/metrics/offline_eval_map_corloc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9641dfb2d189d7aaa2c39246d143e97f68c8dbae --- /dev/null +++ b/models/research/object_detection/metrics/offline_eval_map_corloc_test.py @@ -0,0 +1,58 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for utilities in offline_eval_map_corloc binary.""" + +import tensorflow.compat.v1 as tf + +from object_detection.metrics import offline_eval_map_corloc as offline_eval + + +class OfflineEvalMapCorlocTest(tf.test.TestCase): + + def test_generateShardedFilenames(self): + test_filename = '/path/to/file' + result = offline_eval._generate_sharded_filenames(test_filename) + self.assertEqual(result, [test_filename]) + + test_filename = '/path/to/file-00000-of-00050' + result = offline_eval._generate_sharded_filenames(test_filename) + self.assertEqual(result, [test_filename]) + + result = offline_eval._generate_sharded_filenames('/path/to/@3.record') + self.assertEqual(result, [ + '/path/to/-00000-of-00003.record', '/path/to/-00001-of-00003.record', + '/path/to/-00002-of-00003.record' + ]) + + result = offline_eval._generate_sharded_filenames('/path/to/abc@3') + self.assertEqual(result, [ + '/path/to/abc-00000-of-00003', '/path/to/abc-00001-of-00003', + '/path/to/abc-00002-of-00003' + ]) + + result = offline_eval._generate_sharded_filenames('/path/to/@1') + self.assertEqual(result, ['/path/to/-00000-of-00001']) + + def test_generateFilenames(self): + test_filenames = ['/path/to/file', '/path/to/@3.record'] + result = offline_eval._generate_filenames(test_filenames) + self.assertEqual(result, [ + '/path/to/file', '/path/to/-00000-of-00003.record', + '/path/to/-00001-of-00003.record', '/path/to/-00002-of-00003.record' + ]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/oid_challenge_evaluation.py b/models/research/object_detection/metrics/oid_challenge_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..25f553a917fb5120ad7fed8f0a9cc43c78092d5e --- /dev/null +++ b/models/research/object_detection/metrics/oid_challenge_evaluation.py @@ -0,0 +1,149 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Runs evaluation using OpenImages groundtruth and predictions. + +Uses Open Images Challenge 2018, 2019 metrics + +Example usage: +python models/research/object_detection/metrics/oid_od_challenge_evaluation.py \ + --input_annotations_boxes=/path/to/input/annotations-human-bbox.csv \ + --input_annotations_labels=/path/to/input/annotations-label.csv \ + --input_class_labelmap=/path/to/input/class_labelmap.pbtxt \ + --input_predictions=/path/to/input/predictions.csv \ + --output_metrics=/path/to/output/metric.csv \ + --input_annotations_segm=[/path/to/input/annotations-human-mask.csv] \ + +If optional flag has_masks is True, Mask column is also expected in CSV. + +CSVs with bounding box annotations, instance segmentations and image label +can be downloaded from the Open Images Challenge website: +https://storage.googleapis.com/openimages/web/challenge.html +The format of the input csv and the metrics itself are described on the +challenge website as well. + + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +from absl import app +from absl import flags +import pandas as pd +from google.protobuf import text_format + +from object_detection.metrics import io_utils +from object_detection.metrics import oid_challenge_evaluation_utils as utils +from object_detection.protos import string_int_label_map_pb2 +from object_detection.utils import object_detection_evaluation + +flags.DEFINE_string('input_annotations_boxes', None, + 'File with groundtruth boxes annotations.') +flags.DEFINE_string('input_annotations_labels', None, + 'File with groundtruth labels annotations.') +flags.DEFINE_string( + 'input_predictions', None, + """File with detection predictions; NOTE: no postprocessing is applied in the evaluation script.""" +) +flags.DEFINE_string('input_class_labelmap', None, + 'Open Images Challenge labelmap.') +flags.DEFINE_string('output_metrics', None, 'Output file with csv metrics.') +flags.DEFINE_string( + 'input_annotations_segm', None, + 'File with groundtruth instance segmentation annotations [OPTIONAL].') + +FLAGS = flags.FLAGS + + +def _load_labelmap(labelmap_path): + """Loads labelmap from the labelmap path. + + Args: + labelmap_path: Path to the labelmap. + + Returns: + A dictionary mapping class name to class numerical id + A list with dictionaries, one dictionary per category. + """ + + label_map = string_int_label_map_pb2.StringIntLabelMap() + with open(labelmap_path, 'r') as fid: + label_map_string = fid.read() + text_format.Merge(label_map_string, label_map) + labelmap_dict = {} + categories = [] + for item in label_map.item: + labelmap_dict[item.name] = item.id + categories.append({'id': item.id, 'name': item.name}) + return labelmap_dict, categories + + +def main(unused_argv): + flags.mark_flag_as_required('input_annotations_boxes') + flags.mark_flag_as_required('input_annotations_labels') + flags.mark_flag_as_required('input_predictions') + flags.mark_flag_as_required('input_class_labelmap') + flags.mark_flag_as_required('output_metrics') + + all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes) + all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels) + all_label_annotations.rename( + columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) + + is_instance_segmentation_eval = False + if FLAGS.input_annotations_segm: + is_instance_segmentation_eval = True + all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm) + # Note: this part is unstable as it requires the float point numbers in both + # csvs are exactly the same; + # Will be replaced by more stable solution: merge on LabelName and ImageID + # and filter down by IoU. + all_location_annotations = utils.merge_boxes_and_masks( + all_location_annotations, all_segm_annotations) + all_annotations = pd.concat([all_location_annotations, all_label_annotations]) + + class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap) + challenge_evaluator = ( + object_detection_evaluation.OpenImagesChallengeEvaluator( + categories, evaluate_masks=is_instance_segmentation_eval)) + + all_predictions = pd.read_csv(FLAGS.input_predictions) + images_processed = 0 + for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): + logging.info('Processing image %d', images_processed) + image_id, image_groundtruth = groundtruth + groundtruth_dictionary = utils.build_groundtruth_dictionary( + image_groundtruth, class_label_map) + challenge_evaluator.add_single_ground_truth_image_info( + image_id, groundtruth_dictionary) + + prediction_dictionary = utils.build_predictions_dictionary( + all_predictions.loc[all_predictions['ImageID'] == image_id], + class_label_map) + challenge_evaluator.add_single_detected_image_info(image_id, + prediction_dictionary) + images_processed += 1 + + metrics = challenge_evaluator.evaluate() + + with open(FLAGS.output_metrics, 'w') as fid: + io_utils.write_csv(fid, metrics) + + +if __name__ == '__main__': + app.run(main) diff --git a/models/research/object_detection/metrics/oid_challenge_evaluation_utils.py b/models/research/object_detection/metrics/oid_challenge_evaluation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..64893dbd87b194d407d7b49498361d6fc9990db6 --- /dev/null +++ b/models/research/object_detection/metrics/oid_challenge_evaluation_utils.py @@ -0,0 +1,197 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Converts data from CSV to the OpenImagesDetectionChallengeEvaluator format.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import zlib + +import numpy as np +import pandas as pd +from pycocotools import mask as coco_mask + +from object_detection.core import standard_fields + + +def _to_normalized_box(mask_np): + """Decodes binary segmentation masks into np.arrays and boxes. + + Args: + mask_np: np.ndarray of size NxWxH. + + Returns: + a np.ndarray of the size Nx4, each row containing normalized coordinates + [YMin, XMin, YMax, XMax] of a box computed of axis parallel enclosing box of + a mask. + """ + coord1, coord2 = np.nonzero(mask_np) + if coord1.size > 0: + ymin = float(min(coord1)) / mask_np.shape[0] + ymax = float(max(coord1) + 1) / mask_np.shape[0] + xmin = float(min(coord2)) / mask_np.shape[1] + xmax = float((max(coord2) + 1)) / mask_np.shape[1] + + return np.array([ymin, xmin, ymax, xmax]) + else: + return np.array([0.0, 0.0, 0.0, 0.0]) + + +def _decode_raw_data_into_masks_and_boxes(segments, image_widths, + image_heights): + """Decods binary segmentation masks into np.arrays and boxes. + + Args: + segments: pandas Series object containing either + None entries, or strings with + base64, zlib compressed, COCO RLE-encoded binary masks. + All masks are expected to be the same size. + image_widths: pandas Series of mask widths. + image_heights: pandas Series of mask heights. + + Returns: + a np.ndarray of the size NxWxH, where W and H is determined from the encoded + masks; for the None values, zero arrays of size WxH are created. If input + contains only None values, W=1, H=1. + """ + segment_masks = [] + segment_boxes = [] + ind = segments.first_valid_index() + if ind is not None: + size = [int(image_heights[ind]), int(image_widths[ind])] + else: + # It does not matter which size we pick since no masks will ever be + # evaluated. + return np.zeros((segments.shape[0], 1, 1), dtype=np.uint8), np.zeros( + (segments.shape[0], 4), dtype=np.float32) + + for segment, im_width, im_height in zip(segments, image_widths, + image_heights): + if pd.isnull(segment): + segment_masks.append(np.zeros([1, size[0], size[1]], dtype=np.uint8)) + segment_boxes.append(np.expand_dims(np.array([0.0, 0.0, 0.0, 0.0]), 0)) + else: + compressed_mask = base64.b64decode(segment) + rle_encoded_mask = zlib.decompress(compressed_mask) + decoding_dict = { + 'size': [im_height, im_width], + 'counts': rle_encoded_mask + } + mask_tensor = coco_mask.decode(decoding_dict) + + segment_masks.append(np.expand_dims(mask_tensor, 0)) + segment_boxes.append(np.expand_dims(_to_normalized_box(mask_tensor), 0)) + + return np.concatenate( + segment_masks, axis=0), np.concatenate( + segment_boxes, axis=0) + + +def merge_boxes_and_masks(box_data, mask_data): + return pd.merge( + box_data, + mask_data, + how='outer', + on=['LabelName', 'ImageID', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf']) + + +def build_groundtruth_dictionary(data, class_label_map): + """Builds a groundtruth dictionary from groundtruth data in CSV file. + + Args: + data: Pandas DataFrame with the groundtruth data for a single image. + class_label_map: Class labelmap from string label name to an integer. + + Returns: + A dictionary with keys suitable for passing to + OpenImagesDetectionChallengeEvaluator.add_single_ground_truth_image_info: + standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array + of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of + the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.InputDataFields.groundtruth_classes: integer numpy array + of shape [num_boxes] containing 1-indexed groundtruth classes for the + boxes. + standard_fields.InputDataFields.verified_labels: integer 1D numpy array + containing all classes for which labels are verified. + standard_fields.InputDataFields.groundtruth_group_of: Optional length + M numpy boolean array denoting whether a groundtruth box contains a + group of instances. + """ + data_location = data[data.XMin.notnull()] + data_labels = data[data.ConfidenceImageLabel.notnull()] + + dictionary = { + standard_fields.InputDataFields.groundtruth_boxes: + data_location[['YMin', 'XMin', 'YMax', 'XMax']].as_matrix(), + standard_fields.InputDataFields.groundtruth_classes: + data_location['LabelName'].map(lambda x: class_label_map[x] + ).as_matrix(), + standard_fields.InputDataFields.groundtruth_group_of: + data_location['IsGroupOf'].as_matrix().astype(int), + standard_fields.InputDataFields.groundtruth_image_classes: + data_labels['LabelName'].map(lambda x: class_label_map[x] + ).as_matrix(), + } + + if 'Mask' in data_location: + segments, _ = _decode_raw_data_into_masks_and_boxes( + data_location['Mask'], data_location['ImageWidth'], + data_location['ImageHeight']) + dictionary[ + standard_fields.InputDataFields.groundtruth_instance_masks] = segments + + return dictionary + + +def build_predictions_dictionary(data, class_label_map): + """Builds a predictions dictionary from predictions data in CSV file. + + Args: + data: Pandas DataFrame with the predictions data for a single image. + class_label_map: Class labelmap from string label name to an integer. + + Returns: + Dictionary with keys suitable for passing to + OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info: + standard_fields.DetectionResultFields.detection_boxes: float32 numpy + array of shape [num_boxes, 4] containing `num_boxes` detection boxes + of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.DetectionResultFields.detection_scores: float32 numpy + array of shape [num_boxes] containing detection scores for the boxes. + standard_fields.DetectionResultFields.detection_classes: integer numpy + array of shape [num_boxes] containing 1-indexed detection classes for + the boxes. + + """ + dictionary = { + standard_fields.DetectionResultFields.detection_classes: + data['LabelName'].map(lambda x: class_label_map[x]).as_matrix(), + standard_fields.DetectionResultFields.detection_scores: + data['Score'].as_matrix() + } + + if 'Mask' in data: + segments, boxes = _decode_raw_data_into_masks_and_boxes( + data['Mask'], data['ImageWidth'], data['ImageHeight']) + dictionary[standard_fields.DetectionResultFields.detection_masks] = segments + dictionary[standard_fields.DetectionResultFields.detection_boxes] = boxes + else: + dictionary[standard_fields.DetectionResultFields.detection_boxes] = data[[ + 'YMin', 'XMin', 'YMax', 'XMax' + ]].as_matrix() + + return dictionary diff --git a/models/research/object_detection/metrics/oid_challenge_evaluation_utils_test.py b/models/research/object_detection/metrics/oid_challenge_evaluation_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..94a1da0327e6fc56981aea7fb0100d16ac681f74 --- /dev/null +++ b/models/research/object_detection/metrics/oid_challenge_evaluation_utils_test.py @@ -0,0 +1,308 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for oid_od_challenge_evaluation_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import zlib + +import numpy as np +import pandas as pd +from pycocotools import mask as coco_mask +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.metrics import oid_challenge_evaluation_utils as utils + + +def encode_mask(mask_to_encode): + """Encodes a binary mask into the Kaggle challenge text format. + + The encoding is done in three stages: + - COCO RLE-encoding, + - zlib compression, + - base64 encoding (to use as entry in csv file). + + Args: + mask_to_encode: binary np.ndarray of dtype bool and 2d shape. + + Returns: + A (base64) text string of the encoded mask. + """ + mask_to_encode = np.squeeze(mask_to_encode) + mask_to_encode = mask_to_encode.reshape(mask_to_encode.shape[0], + mask_to_encode.shape[1], 1) + mask_to_encode = mask_to_encode.astype(np.uint8) + mask_to_encode = np.asfortranarray(mask_to_encode) + encoded_mask = coco_mask.encode(mask_to_encode)[0]['counts'] + compressed_mask = zlib.compress(six.ensure_binary(encoded_mask), + zlib.Z_BEST_COMPRESSION) + base64_mask = base64.b64encode(compressed_mask) + return base64_mask + + +class OidUtilTest(tf.test.TestCase): + + def testMaskToNormalizedBox(self): + mask_np = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]]) + box = utils._to_normalized_box(mask_np) + self.assertAllEqual(np.array([0.25, 0.25, 0.75, 0.5]), box) + mask_np = np.array([[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]]) + box = utils._to_normalized_box(mask_np) + self.assertAllEqual(np.array([0.25, 0.25, 1.0, 1.0]), box) + mask_np = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + box = utils._to_normalized_box(mask_np) + self.assertAllEqual(np.array([0.0, 0.0, 0.0, 0.0]), box) + + def testDecodeToTensors(self): + mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0]], dtype=np.uint8) + mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.uint8) + + encoding1 = encode_mask(mask1) + encoding2 = encode_mask(mask2) + + vals = pd.Series([encoding1, encoding2]) + image_widths = pd.Series([mask1.shape[1], mask2.shape[1]]) + image_heights = pd.Series([mask1.shape[0], mask2.shape[0]]) + + segm, bbox = utils._decode_raw_data_into_masks_and_boxes( + vals, image_widths, image_heights) + expected_segm = np.concatenate( + [np.expand_dims(mask1, 0), + np.expand_dims(mask2, 0)], axis=0) + expected_bbox = np.array([[0.0, 0.5, 2.0 / 3.0, 1.0], [0, 0, 0, 0]]) + self.assertAllEqual(expected_segm, segm) + self.assertAllEqual(expected_bbox, bbox) + + def testDecodeToTensorsNoMasks(self): + vals = pd.Series([None, None]) + image_widths = pd.Series([None, None]) + image_heights = pd.Series([None, None]) + segm, bbox = utils._decode_raw_data_into_masks_and_boxes( + vals, image_widths, image_heights) + self.assertAllEqual(np.zeros((2, 1, 1), dtype=np.uint8), segm) + self.assertAllEqual(np.zeros((2, 4), dtype=np.float32), bbox) + + +class OidChallengeEvaluationUtilTest(tf.test.TestCase): + + def testBuildGroundtruthDictionaryBoxes(self): + np_data = pd.DataFrame( + [['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 1, None], + ['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0, None], + ['fe58ec1b06db2bb7', '/m/04bcr3', None, None, None, None, None, 1], + ['fe58ec1b06db2bb7', '/m/083vt', None, None, None, None, None, 0], + ['fe58ec1b06db2bb7', '/m/02gy9n', None, None, None, None, None, 1]], + columns=[ + 'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf', + 'ConfidenceImageLabel' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + groundtruth_dictionary = utils.build_groundtruth_dictionary( + np_data, class_label_map) + + self.assertIn(standard_fields.InputDataFields.groundtruth_boxes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_classes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_group_of, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_image_classes, + groundtruth_dictionary) + + self.assertAllEqual( + np.array([1, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_classes]) + self.assertAllEqual( + np.array([1, 0]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_group_of]) + + expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]]) + + self.assertNDArrayNear( + expected_boxes_data, groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_boxes], 1e-5) + self.assertAllEqual( + np.array([1, 2, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_image_classes]) + + def testBuildPredictionDictionaryBoxes(self): + np_data = pd.DataFrame( + [['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 0.1], + ['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0.2], + ['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.1, 0.2, 0.3, 0.3]], + columns=[ + 'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'Score' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + prediction_dictionary = utils.build_predictions_dictionary( + np_data, class_label_map) + + self.assertIn(standard_fields.DetectionResultFields.detection_boxes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_classes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_scores, + prediction_dictionary) + + self.assertAllEqual( + np.array([1, 3, 1]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_classes]) + expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2], + [0.2, 0.0, 0.3, 0.1]]) + self.assertNDArrayNear( + expected_boxes_data, prediction_dictionary[ + standard_fields.DetectionResultFields.detection_boxes], 1e-5) + self.assertNDArrayNear( + np.array([0.1, 0.2, 0.3]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_scores], 1e-5) + + def testBuildGroundtruthDictionaryMasks(self): + mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + + encoding1 = encode_mask(mask1) + encoding2 = encode_mask(mask2) + + np_data = pd.DataFrame( + [[ + 'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0], '/m/04bcr3', + 0.0, 0.3, 0.5, 0.6, 0, None, encoding1 + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 1, + None, None + ], + [ + 'fe58ec1b06db2bb7', mask2.shape[1], mask2.shape[0], '/m/02gy9n', + 0.5, 0.6, 0.8, 0.9, 0, None, encoding2 + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/04bcr3', None, None, None, + None, None, 1, None + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/083vt', None, None, None, None, + None, 0, None + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/02gy9n', None, None, None, + None, None, 1, None + ]], + columns=[ + 'ImageID', 'ImageWidth', 'ImageHeight', 'LabelName', 'XMin', 'XMax', + 'YMin', 'YMax', 'IsGroupOf', 'ConfidenceImageLabel', 'Mask' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + groundtruth_dictionary = utils.build_groundtruth_dictionary( + np_data, class_label_map) + self.assertIn(standard_fields.InputDataFields.groundtruth_boxes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_classes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_group_of, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_image_classes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_instance_masks, + groundtruth_dictionary) + self.assertAllEqual( + np.array([1, 3, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_classes]) + self.assertAllEqual( + np.array([0, 1, 0]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_group_of]) + + expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2], + [0.8, 0.5, 0.9, 0.6]]) + + self.assertNDArrayNear( + expected_boxes_data, groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_boxes], 1e-5) + self.assertAllEqual( + np.array([1, 2, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_image_classes]) + + expected_segm = np.concatenate([ + np.expand_dims(mask1, 0), + np.zeros((1, 4, 4), dtype=np.uint8), + np.expand_dims(mask2, 0) + ], + axis=0) + self.assertAllEqual( + expected_segm, groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_instance_masks]) + + def testBuildPredictionDictionaryMasks(self): + mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + + encoding1 = encode_mask(mask1) + encoding2 = encode_mask(mask2) + + np_data = pd.DataFrame([[ + 'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0], '/m/04bcr3', + encoding1, 0.8 + ], + [ + 'fe58ec1b06db2bb7', mask2.shape[1], + mask2.shape[0], '/m/02gy9n', encoding2, 0.6 + ]], + columns=[ + 'ImageID', 'ImageWidth', 'ImageHeight', + 'LabelName', 'Mask', 'Score' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/02gy9n': 3} + prediction_dictionary = utils.build_predictions_dictionary( + np_data, class_label_map) + + self.assertIn(standard_fields.DetectionResultFields.detection_boxes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_classes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_scores, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_masks, + prediction_dictionary) + + self.assertAllEqual( + np.array([1, 3]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_classes]) + + expected_boxes_data = np.array([[0.0, 0.5, 0.5, 1.0], [0, 0, 0, 0]]) + self.assertNDArrayNear( + expected_boxes_data, prediction_dictionary[ + standard_fields.DetectionResultFields.detection_boxes], 1e-5) + self.assertNDArrayNear( + np.array([0.8, 0.6]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_scores], 1e-5) + expected_segm = np.concatenate( + [np.expand_dims(mask1, 0), + np.expand_dims(mask2, 0)], axis=0) + self.assertAllEqual( + expected_segm, prediction_dictionary[ + standard_fields.DetectionResultFields.detection_masks]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py b/models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..7a56c6bc0807ff00fcfaa261b4842995057b5015 --- /dev/null +++ b/models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py @@ -0,0 +1,154 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Runs evaluation using OpenImages groundtruth and predictions. + +Example usage: +python \ +models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py \ + --input_annotations_vrd=/path/to/input/annotations-human-bbox.csv \ + --input_annotations_labels=/path/to/input/annotations-label.csv \ + --input_class_labelmap=/path/to/input/class_labelmap.pbtxt \ + --input_relationship_labelmap=/path/to/input/relationship_labelmap.pbtxt \ + --input_predictions=/path/to/input/predictions.csv \ + --output_metrics=/path/to/output/metric.csv \ + +CSVs with bounding box annotations and image label (including the image URLs) +can be downloaded from the Open Images Challenge website: +https://storage.googleapis.com/openimages/web/challenge.html +The format of the input csv and the metrics itself are described on the +challenge website. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import pandas as pd +from google.protobuf import text_format + +from object_detection.metrics import io_utils +from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils +from object_detection.protos import string_int_label_map_pb2 +from object_detection.utils import vrd_evaluation + + +def _load_labelmap(labelmap_path): + """Loads labelmap from the labelmap path. + + Args: + labelmap_path: Path to the labelmap. + + Returns: + A dictionary mapping class name to class numerical id. + """ + + label_map = string_int_label_map_pb2.StringIntLabelMap() + with open(labelmap_path, 'r') as fid: + label_map_string = fid.read() + text_format.Merge(label_map_string, label_map) + labelmap_dict = {} + for item in label_map.item: + labelmap_dict[item.name] = item.id + return labelmap_dict + + +def _swap_labelmap_dict(labelmap_dict): + """Swaps keys and labels in labelmap. + + Args: + labelmap_dict: Input dictionary. + + Returns: + A dictionary mapping class name to class numerical id. + """ + return dict((v, k) for k, v in labelmap_dict.iteritems()) + + +def main(parsed_args): + all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) + all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) + all_annotations = pd.concat([all_box_annotations, all_label_annotations]) + + class_label_map = _load_labelmap(parsed_args.input_class_labelmap) + relationship_label_map = _load_labelmap( + parsed_args.input_relationship_labelmap) + + relation_evaluator = vrd_evaluation.VRDRelationDetectionEvaluator() + phrase_evaluator = vrd_evaluation.VRDPhraseDetectionEvaluator() + + for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): + image_id, image_groundtruth = groundtruth + groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary( + image_groundtruth, class_label_map, relationship_label_map) + + relation_evaluator.add_single_ground_truth_image_info( + image_id, groundtruth_dictionary) + phrase_evaluator.add_single_ground_truth_image_info(image_id, + groundtruth_dictionary) + + all_predictions = pd.read_csv(parsed_args.input_predictions) + for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): + image_id, image_predictions = prediction_data + prediction_dictionary = utils.build_predictions_vrd_dictionary( + image_predictions, class_label_map, relationship_label_map) + + relation_evaluator.add_single_detected_image_info(image_id, + prediction_dictionary) + phrase_evaluator.add_single_detected_image_info(image_id, + prediction_dictionary) + + relation_metrics = relation_evaluator.evaluate( + relationships=_swap_labelmap_dict(relationship_label_map)) + phrase_metrics = phrase_evaluator.evaluate( + relationships=_swap_labelmap_dict(relationship_label_map)) + + with open(parsed_args.output_metrics, 'w') as fid: + io_utils.write_csv(fid, relation_metrics) + io_utils.write_csv(fid, phrase_metrics) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser( + description= + 'Evaluate Open Images Visual Relationship Detection predictions.') + parser.add_argument( + '--input_annotations_vrd', + required=True, + help='File with groundtruth vrd annotations.') + parser.add_argument( + '--input_annotations_labels', + required=True, + help='File with groundtruth labels annotations') + parser.add_argument( + '--input_predictions', + required=True, + help="""File with detection predictions; NOTE: no postprocessing is + applied in the evaluation script.""") + parser.add_argument( + '--input_class_labelmap', + required=True, + help="""OpenImages Challenge labelmap; note: it is expected to include + attributes.""") + parser.add_argument( + '--input_relationship_labelmap', + required=True, + help="""OpenImages Challenge relationship labelmap.""") + parser.add_argument( + '--output_metrics', required=True, help='Output file with csv metrics') + + args = parser.parse_args() + main(args) diff --git a/models/research/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py b/models/research/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..34be018c4b2d44ec0c43135b433423a4ac379b64 --- /dev/null +++ b/models/research/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py @@ -0,0 +1,125 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Converts data from CSV format to the VRDDetectionEvaluator format.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from object_detection.core import standard_fields +from object_detection.utils import vrd_evaluation + + +def build_groundtruth_vrd_dictionary(data, class_label_map, + relationship_label_map): + """Builds a groundtruth dictionary from groundtruth data in CSV file. + + Args: + data: Pandas DataFrame with the groundtruth data for a single image. + class_label_map: Class labelmap from string label name to an integer. + relationship_label_map: Relationship type labelmap from string name to an + integer. + + Returns: + A dictionary with keys suitable for passing to + VRDDetectionEvaluator.add_single_ground_truth_image_info: + standard_fields.InputDataFields.groundtruth_boxes: A numpy array + of structures with the shape [M, 1], representing M tuples, each tuple + containing the same number of named bounding boxes. + Each box is of the format [y_min, x_min, y_max, x_max] (see + datatype vrd_box_data_type, single_box_data_type above). + standard_fields.InputDataFields.groundtruth_classes: A numpy array of + structures shape [M, 1], representing the class labels of the + corresponding bounding boxes and possibly additional classes (see + datatype label_data_type above). + standard_fields.InputDataFields.verified_labels: numpy array + of shape [K] containing verified labels. + """ + data_boxes = data[data.LabelName.isnull()] + data_labels = data[data.LabelName1.isnull()] + + boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type) + boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1', + 'XMax1']].as_matrix() + boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].as_matrix() + + labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type) + labels['subject'] = data_boxes['LabelName1'].map( + lambda x: class_label_map[x]).as_matrix() + labels['object'] = data_boxes['LabelName2'].map( + lambda x: class_label_map[x]).as_matrix() + labels['relation'] = data_boxes['RelationshipLabel'].map( + lambda x: relationship_label_map[x]).as_matrix() + + return { + standard_fields.InputDataFields.groundtruth_boxes: + boxes, + standard_fields.InputDataFields.groundtruth_classes: + labels, + standard_fields.InputDataFields.groundtruth_image_classes: + data_labels['LabelName'].map(lambda x: class_label_map[x]) + .as_matrix(), + } + + +def build_predictions_vrd_dictionary(data, class_label_map, + relationship_label_map): + """Builds a predictions dictionary from predictions data in CSV file. + + Args: + data: Pandas DataFrame with the predictions data for a single image. + class_label_map: Class labelmap from string label name to an integer. + relationship_label_map: Relationship type labelmap from string name to an + integer. + + Returns: + Dictionary with keys suitable for passing to + VRDDetectionEvaluator.add_single_detected_image_info: + standard_fields.DetectionResultFields.detection_boxes: A numpy array of + structures with shape [N, 1], representing N tuples, each tuple + containing the same number of named bounding boxes. + Each box is of the format [y_min, x_min, y_max, x_max] (as an example + see datatype vrd_box_data_type, single_box_data_type above). + standard_fields.DetectionResultFields.detection_scores: float32 numpy + array of shape [N] containing detection scores for the boxes. + standard_fields.DetectionResultFields.detection_classes: A numpy array + of structures shape [N, 1], representing the class labels of the + corresponding bounding boxes and possibly additional classes (see + datatype label_data_type above). + """ + data_boxes = data + + boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type) + boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1', + 'XMax1']].as_matrix() + boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].as_matrix() + + labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type) + labels['subject'] = data_boxes['LabelName1'].map( + lambda x: class_label_map[x]).as_matrix() + labels['object'] = data_boxes['LabelName2'].map( + lambda x: class_label_map[x]).as_matrix() + labels['relation'] = data_boxes['RelationshipLabel'].map( + lambda x: relationship_label_map[x]).as_matrix() + + return { + standard_fields.DetectionResultFields.detection_boxes: + boxes, + standard_fields.DetectionResultFields.detection_classes: + labels, + standard_fields.DetectionResultFields.detection_scores: + data_boxes['Score'].as_matrix() + } diff --git a/models/research/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py b/models/research/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..04547bbedacc687927d72070c46ffc273b5d95af --- /dev/null +++ b/models/research/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py @@ -0,0 +1,149 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for oid_vrd_challenge_evaluation_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import pandas as pd +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields +from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils +from object_detection.utils import vrd_evaluation + + +class OidVrdChallengeEvaluationUtilsTest(tf.test.TestCase): + + def testBuildGroundtruthDictionary(self): + np_data = pd.DataFrame( + [[ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.3, 0.5, 0.6, + 0.0, 0.3, 0.5, 0.6, 'is', None, None + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/02gy9n', 0.0, 0.3, 0.5, 0.6, + 0.1, 0.2, 0.3, 0.4, 'under', None, None + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.1, 0.2, 0.3, + 0.0, 0.1, 0.2, 0.3, 'is', None, None + ], [ + 'fe58ec1b06db2bb7', '/m/083vt', '/m/04bcr3', 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, 'at', None, None + ], [ + 'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None, + None, None, None, '/m/04bcr3', 1.0 + ], [ + 'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None, + None, None, None, '/m/083vt', 0.0 + ], [ + 'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None, + None, None, None, '/m/02gy9n', 0.0 + ]], + columns=[ + 'ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1', + 'YMax1', 'XMin2', 'XMax2', 'YMin2', 'YMax2', 'RelationshipLabel', + 'LabelName', 'Confidence' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + relationship_label_map = {'is': 1, 'under': 2, 'at': 3} + groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary( + np_data, class_label_map, relationship_label_map) + + self.assertTrue(standard_fields.InputDataFields.groundtruth_boxes in + groundtruth_dictionary) + self.assertTrue(standard_fields.InputDataFields.groundtruth_classes in + groundtruth_dictionary) + self.assertTrue(standard_fields.InputDataFields.groundtruth_image_classes in + groundtruth_dictionary) + + self.assertAllEqual( + np.array( + [(1, 2, 1), (1, 3, 2), (1, 2, 1), (2, 1, 3)], + dtype=vrd_evaluation.label_data_type), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_classes]) + expected_vrd_data = np.array( + [ + ([0.5, 0.0, 0.6, 0.3], [0.5, 0.0, 0.6, 0.3]), + ([0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]), + ([0.2, 0.0, 0.3, 0.1], [0.2, 0.0, 0.3, 0.1]), + ([0.3, 0.1, 0.4, 0.2], [0.7, 0.5, 0.8, 0.6]), + ], + dtype=vrd_evaluation.vrd_box_data_type) + for field in expected_vrd_data.dtype.fields: + self.assertNDArrayNear( + expected_vrd_data[field], groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_boxes][field], 1e-5) + self.assertAllEqual( + np.array([1, 2, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_image_classes]) + + def testBuildPredictionDictionary(self): + np_data = pd.DataFrame( + [[ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.3, 0.5, 0.6, + 0.0, 0.3, 0.5, 0.6, 'is', 0.1 + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/02gy9n', 0.0, 0.3, 0.5, 0.6, + 0.1, 0.2, 0.3, 0.4, 'under', 0.2 + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.1, 0.2, 0.3, + 0.0, 0.1, 0.2, 0.3, 'is', 0.3 + ], [ + 'fe58ec1b06db2bb7', '/m/083vt', '/m/04bcr3', 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, 'at', 0.4 + ]], + columns=[ + 'ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1', + 'YMax1', 'XMin2', 'XMax2', 'YMin2', 'YMax2', 'RelationshipLabel', + 'Score' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + relationship_label_map = {'is': 1, 'under': 2, 'at': 3} + prediction_dictionary = utils.build_predictions_vrd_dictionary( + np_data, class_label_map, relationship_label_map) + + self.assertTrue(standard_fields.DetectionResultFields.detection_boxes in + prediction_dictionary) + self.assertTrue(standard_fields.DetectionResultFields.detection_classes in + prediction_dictionary) + self.assertTrue(standard_fields.DetectionResultFields.detection_scores in + prediction_dictionary) + + self.assertAllEqual( + np.array( + [(1, 2, 1), (1, 3, 2), (1, 2, 1), (2, 1, 3)], + dtype=vrd_evaluation.label_data_type), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_classes]) + expected_vrd_data = np.array( + [ + ([0.5, 0.0, 0.6, 0.3], [0.5, 0.0, 0.6, 0.3]), + ([0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]), + ([0.2, 0.0, 0.3, 0.1], [0.2, 0.0, 0.3, 0.1]), + ([0.3, 0.1, 0.4, 0.2], [0.7, 0.5, 0.8, 0.6]), + ], + dtype=vrd_evaluation.vrd_box_data_type) + for field in expected_vrd_data.dtype.fields: + self.assertNDArrayNear( + expected_vrd_data[field], prediction_dictionary[ + standard_fields.DetectionResultFields.detection_boxes][field], + 1e-5) + self.assertNDArrayNear( + np.array([0.1, 0.2, 0.3, 0.4]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_scores], 1e-5) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/metrics/tf_example_parser.py b/models/research/object_detection/metrics/tf_example_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1535f89bf585d2be6d2d19183b7b97318aeade --- /dev/null +++ b/models/research/object_detection/metrics/tf_example_parser.py @@ -0,0 +1,159 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensorflow Example proto parser for data loading. + +A parser to decode data containing serialized tensorflow.Example +protos into materialized tensors (numpy arrays). +""" + +import numpy as np + +from object_detection.core import data_parser +from object_detection.core import standard_fields as fields + + +class FloatParser(data_parser.DataToNumpyParser): + """Tensorflow Example float parser.""" + + def __init__(self, field_name): + self.field_name = field_name + + def parse(self, tf_example): + return np.array( + tf_example.features.feature[self.field_name].float_list.value, + dtype=np.float).transpose() if tf_example.features.feature[ + self.field_name].HasField("float_list") else None + + +class StringParser(data_parser.DataToNumpyParser): + """Tensorflow Example string parser.""" + + def __init__(self, field_name): + self.field_name = field_name + + def parse(self, tf_example): + return b"".join(tf_example.features.feature[ + self.field_name].bytes_list.value) if tf_example.features.feature[ + self.field_name].HasField("bytes_list") else None + + +class Int64Parser(data_parser.DataToNumpyParser): + """Tensorflow Example int64 parser.""" + + def __init__(self, field_name): + self.field_name = field_name + + def parse(self, tf_example): + return np.array( + tf_example.features.feature[self.field_name].int64_list.value, + dtype=np.int64).transpose() if tf_example.features.feature[ + self.field_name].HasField("int64_list") else None + + +class BoundingBoxParser(data_parser.DataToNumpyParser): + """Tensorflow Example bounding box parser.""" + + def __init__(self, xmin_field_name, ymin_field_name, xmax_field_name, + ymax_field_name): + self.field_names = [ + ymin_field_name, xmin_field_name, ymax_field_name, xmax_field_name + ] + + def parse(self, tf_example): + result = [] + parsed = True + for field_name in self.field_names: + result.append(tf_example.features.feature[field_name].float_list.value) + parsed &= ( + tf_example.features.feature[field_name].HasField("float_list")) + + return np.array(result).transpose() if parsed else None + + +class TfExampleDetectionAndGTParser(data_parser.DataToNumpyParser): + """Tensorflow Example proto parser.""" + + def __init__(self): + self.items_to_handlers = { + fields.DetectionResultFields.key: + StringParser(fields.TfExampleFields.source_id), + # Object ground truth boxes and classes. + fields.InputDataFields.groundtruth_boxes: (BoundingBoxParser( + fields.TfExampleFields.object_bbox_xmin, + fields.TfExampleFields.object_bbox_ymin, + fields.TfExampleFields.object_bbox_xmax, + fields.TfExampleFields.object_bbox_ymax)), + fields.InputDataFields.groundtruth_classes: ( + Int64Parser(fields.TfExampleFields.object_class_label)), + # Object detections. + fields.DetectionResultFields.detection_boxes: (BoundingBoxParser( + fields.TfExampleFields.detection_bbox_xmin, + fields.TfExampleFields.detection_bbox_ymin, + fields.TfExampleFields.detection_bbox_xmax, + fields.TfExampleFields.detection_bbox_ymax)), + fields.DetectionResultFields.detection_classes: ( + Int64Parser(fields.TfExampleFields.detection_class_label)), + fields.DetectionResultFields.detection_scores: ( + FloatParser(fields.TfExampleFields.detection_score)), + } + + self.optional_items_to_handlers = { + fields.InputDataFields.groundtruth_difficult: + Int64Parser(fields.TfExampleFields.object_difficult), + fields.InputDataFields.groundtruth_group_of: + Int64Parser(fields.TfExampleFields.object_group_of), + fields.InputDataFields.groundtruth_image_classes: + Int64Parser(fields.TfExampleFields.image_class_label), + } + + def parse(self, tf_example): + """Parses tensorflow example and returns a tensor dictionary. + + Args: + tf_example: a tf.Example object. + + Returns: + A dictionary of the following numpy arrays: + fields.DetectionResultFields.source_id - string containing original image + id. + fields.InputDataFields.groundtruth_boxes - a numpy array containing + groundtruth boxes. + fields.InputDataFields.groundtruth_classes - a numpy array containing + groundtruth classes. + fields.InputDataFields.groundtruth_group_of - a numpy array containing + groundtruth group of flag (optional, None if not specified). + fields.InputDataFields.groundtruth_difficult - a numpy array containing + groundtruth difficult flag (optional, None if not specified). + fields.InputDataFields.groundtruth_image_classes - a numpy array + containing groundtruth image-level labels. + fields.DetectionResultFields.detection_boxes - a numpy array containing + detection boxes. + fields.DetectionResultFields.detection_classes - a numpy array containing + detection class labels. + fields.DetectionResultFields.detection_scores - a numpy array containing + detection scores. + Returns None if tf.Example was not parsed or non-optional fields were not + found. + """ + results_dict = {} + parsed = True + for key, parser in self.items_to_handlers.items(): + results_dict[key] = parser.parse(tf_example) + parsed &= (results_dict[key] is not None) + + for key, parser in self.optional_items_to_handlers.items(): + results_dict[key] = parser.parse(tf_example) + + return results_dict if parsed else None diff --git a/models/research/object_detection/metrics/tf_example_parser_test.py b/models/research/object_detection/metrics/tf_example_parser_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c195c7376acdb58f341846031a255d630fd4be13 --- /dev/null +++ b/models/research/object_detection/metrics/tf_example_parser_test.py @@ -0,0 +1,197 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.data_decoders.tf_example_parser.""" + +import numpy as np +import numpy.testing as np_testing +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.metrics import tf_example_parser + + +class TfExampleDecoderTest(tf.test.TestCase): + + def _Int64Feature(self, value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + def _FloatFeature(self, value): + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + def _BytesFeature(self, value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def testParseDetectionsAndGT(self): + source_id = b'abc.jpg' + # y_min, x_min, y_max, x_max + object_bb = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6], [1.0, 0.6, 0.8], + [1.0, 0.6, 0.7]]).transpose() + detection_bb = np.array([[0.1, 0.2], [0.0, 0.8], [1.0, 0.6], + [1.0, 0.85]]).transpose() + + object_class_label = [1, 1, 2] + object_difficult = [1, 0, 0] + object_group_of = [0, 0, 1] + verified_labels = [1, 2, 3, 4] + detection_class_label = [2, 1] + detection_score = [0.5, 0.3] + features = { + fields.TfExampleFields.source_id: + self._BytesFeature(source_id), + fields.TfExampleFields.object_bbox_ymin: + self._FloatFeature(object_bb[:, 0].tolist()), + fields.TfExampleFields.object_bbox_xmin: + self._FloatFeature(object_bb[:, 1].tolist()), + fields.TfExampleFields.object_bbox_ymax: + self._FloatFeature(object_bb[:, 2].tolist()), + fields.TfExampleFields.object_bbox_xmax: + self._FloatFeature(object_bb[:, 3].tolist()), + fields.TfExampleFields.detection_bbox_ymin: + self._FloatFeature(detection_bb[:, 0].tolist()), + fields.TfExampleFields.detection_bbox_xmin: + self._FloatFeature(detection_bb[:, 1].tolist()), + fields.TfExampleFields.detection_bbox_ymax: + self._FloatFeature(detection_bb[:, 2].tolist()), + fields.TfExampleFields.detection_bbox_xmax: + self._FloatFeature(detection_bb[:, 3].tolist()), + fields.TfExampleFields.detection_class_label: + self._Int64Feature(detection_class_label), + fields.TfExampleFields.detection_score: + self._FloatFeature(detection_score), + } + + example = tf.train.Example(features=tf.train.Features(feature=features)) + parser = tf_example_parser.TfExampleDetectionAndGTParser() + + results_dict = parser.parse(example) + self.assertIsNone(results_dict) + + features[fields.TfExampleFields.object_class_label] = ( + self._Int64Feature(object_class_label)) + features[fields.TfExampleFields.object_difficult] = ( + self._Int64Feature(object_difficult)) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + results_dict = parser.parse(example) + + self.assertIsNotNone(results_dict) + self.assertEqual(source_id, results_dict[fields.DetectionResultFields.key]) + np_testing.assert_almost_equal( + object_bb, results_dict[fields.InputDataFields.groundtruth_boxes]) + np_testing.assert_almost_equal( + detection_bb, + results_dict[fields.DetectionResultFields.detection_boxes]) + np_testing.assert_almost_equal( + detection_score, + results_dict[fields.DetectionResultFields.detection_scores]) + np_testing.assert_almost_equal( + detection_class_label, + results_dict[fields.DetectionResultFields.detection_classes]) + np_testing.assert_almost_equal( + object_difficult, + results_dict[fields.InputDataFields.groundtruth_difficult]) + np_testing.assert_almost_equal( + object_class_label, + results_dict[fields.InputDataFields.groundtruth_classes]) + + parser = tf_example_parser.TfExampleDetectionAndGTParser() + + features[fields.TfExampleFields.object_group_of] = ( + self._Int64Feature(object_group_of)) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + results_dict = parser.parse(example) + self.assertIsNotNone(results_dict) + np_testing.assert_equal( + object_group_of, + results_dict[fields.InputDataFields.groundtruth_group_of]) + + features[fields.TfExampleFields.image_class_label] = ( + self._Int64Feature(verified_labels)) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + results_dict = parser.parse(example) + self.assertIsNotNone(results_dict) + np_testing.assert_equal( + verified_labels, + results_dict[fields.InputDataFields.groundtruth_image_classes]) + + def testParseString(self): + string_val = b'abc' + features = {'string': self._BytesFeature(string_val)} + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.StringParser('string') + result = parser.parse(example) + self.assertIsNotNone(result) + self.assertEqual(result, string_val) + + parser = tf_example_parser.StringParser('another_string') + result = parser.parse(example) + self.assertIsNone(result) + + def testParseFloat(self): + float_array_val = [1.5, 1.4, 2.0] + features = {'floats': self._FloatFeature(float_array_val)} + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.FloatParser('floats') + result = parser.parse(example) + self.assertIsNotNone(result) + np_testing.assert_almost_equal(result, float_array_val) + + parser = tf_example_parser.StringParser('another_floats') + result = parser.parse(example) + self.assertIsNone(result) + + def testInt64Parser(self): + int_val = [1, 2, 3] + features = {'ints': self._Int64Feature(int_val)} + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.Int64Parser('ints') + result = parser.parse(example) + self.assertIsNotNone(result) + np_testing.assert_almost_equal(result, int_val) + + parser = tf_example_parser.Int64Parser('another_ints') + result = parser.parse(example) + self.assertIsNone(result) + + def testBoundingBoxParser(self): + bounding_boxes = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6], + [1.0, 0.6, 0.8], [1.0, 0.6, 0.7]]).transpose() + features = { + 'ymin': self._FloatFeature(bounding_boxes[:, 0]), + 'xmin': self._FloatFeature(bounding_boxes[:, 1]), + 'ymax': self._FloatFeature(bounding_boxes[:, 2]), + 'xmax': self._FloatFeature(bounding_boxes[:, 3]) + } + + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.BoundingBoxParser('xmin', 'ymin', 'xmax', 'ymax') + result = parser.parse(example) + self.assertIsNotNone(result) + np_testing.assert_almost_equal(result, bounding_boxes) + + parser = tf_example_parser.BoundingBoxParser('xmin', 'ymin', 'xmax', + 'another_ymax') + result = parser.parse(example) + self.assertIsNone(result) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/model_hparams.py b/models/research/object_detection/model_hparams.py new file mode 100644 index 0000000000000000000000000000000000000000..12b043e9b1c8652aa856d919931074adf5ec18e4 --- /dev/null +++ b/models/research/object_detection/model_hparams.py @@ -0,0 +1,50 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hyperparameters for the object detection model in TF.learn. + +This file consolidates and documents the hyperparameters used by the model. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import training as contrib_training +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +def create_hparams(hparams_overrides=None): + """Returns hyperparameters, including any flag value overrides. + + Args: + hparams_overrides: Optional hparams overrides, represented as a + string containing comma-separated hparam_name=value pairs. + + Returns: + The hyperparameters as a tf.HParams object. + """ + hparams = contrib_training.HParams( + # Whether a fine tuning checkpoint (provided in the pipeline config) + # should be loaded for training. + load_pretrained=True) + # Override any of the preceding hyperparameter values. + if hparams_overrides: + hparams = hparams.parse(hparams_overrides) + return hparams diff --git a/models/research/object_detection/model_lib.py b/models/research/object_detection/model_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..365ea1c0261aa0b23c11460d2f5127632ed7b482 --- /dev/null +++ b/models/research/object_detection/model_lib.py @@ -0,0 +1,1013 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Constructs model, inputs, and training environment.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +import os + +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 +import tf_slim as slim + +from object_detection import eval_util +from object_detection import exporter as exporter_lib +from object_detection import inputs +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.builders import optimizer_builder +from object_detection.core import standard_fields as fields +from object_detection.utils import config_util +from object_detection.utils import label_map_util +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import variables_helper +from object_detection.utils import visualization_utils as vis_utils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import learn as contrib_learn + from tensorflow.contrib import tpu as contrib_tpu +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +# A map of names to methods that help build the model. +MODEL_BUILD_UTIL_MAP = { + 'get_configs_from_pipeline_file': + config_util.get_configs_from_pipeline_file, + 'create_pipeline_proto_from_configs': + config_util.create_pipeline_proto_from_configs, + 'merge_external_params_with_configs': + config_util.merge_external_params_with_configs, + 'create_train_input_fn': + inputs.create_train_input_fn, + 'create_eval_input_fn': + inputs.create_eval_input_fn, + 'create_predict_input_fn': + inputs.create_predict_input_fn, + 'detection_model_fn_base': model_builder.build, +} + + +def _prepare_groundtruth_for_eval(detection_model, class_agnostic, + max_number_of_boxes): + """Extracts groundtruth data from detection_model and prepares it for eval. + + Args: + detection_model: A `DetectionModel` object. + class_agnostic: Whether the detections are class_agnostic. + max_number_of_boxes: Max number of groundtruth boxes. + + Returns: + A tuple of: + groundtruth: Dictionary with the following fields: + 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, + in normalized coordinates. + 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed + classes. + 'groundtruth_masks': 4D float32 tensor of instance masks (if provided in + groundtruth) + 'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating + is_crowd annotations (if provided in groundtruth). + 'groundtruth_area': [batch_size, num_boxes] float32 tensor indicating + the area (in the original absolute coordinates) of annotations (if + provided in groundtruth). + 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number + of groundtruth boxes per image.. + 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 + tensor of keypoints (if provided in groundtruth). + 'groundtruth_group_of': [batch_size, num_boxes] bool tensor indicating + group_of annotations (if provided in groundtruth). + 'groundtruth_labeled_classes': [batch_size, num_classes] int64 + tensor of 1-indexed classes. + class_agnostic: Boolean indicating whether detections are class agnostic. + """ + input_data_fields = fields.InputDataFields() + groundtruth_boxes = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.boxes)) + groundtruth_boxes_shape = tf.shape(groundtruth_boxes) + # For class-agnostic models, groundtruth one-hot encodings collapse to all + # ones. + if class_agnostic: + groundtruth_classes_one_hot = tf.ones( + [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1]) + else: + groundtruth_classes_one_hot = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.classes)) + label_id_offset = 1 # Applying label id offset (b/63711816) + groundtruth_classes = ( + tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset) + groundtruth = { + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes + } + if detection_model.groundtruth_has_field(fields.BoxListFields.masks): + groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.masks)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd): + groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.is_crowd)) + + if detection_model.groundtruth_has_field(input_data_fields.groundtruth_area): + groundtruth[input_data_fields.groundtruth_area] = tf.stack( + detection_model.groundtruth_lists(input_data_fields.groundtruth_area)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.keypoints): + groundtruth[input_data_fields.groundtruth_keypoints] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.keypoints)) + + if detection_model.groundtruth_has_field( + fields.BoxListFields.keypoint_visibilities): + groundtruth[input_data_fields.groundtruth_keypoint_visibilities] = tf.stack( + detection_model.groundtruth_lists( + fields.BoxListFields.keypoint_visibilities)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.group_of): + groundtruth[input_data_fields.groundtruth_group_of] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.group_of)) + + if detection_model.groundtruth_has_field( + fields.InputDataFields.groundtruth_labeled_classes): + labeled_classes_list = detection_model.groundtruth_lists( + fields.InputDataFields.groundtruth_labeled_classes) + labeled_classes = [ + tf.where(x)[:, 0] + label_id_offset for x in labeled_classes_list + ] + if len(labeled_classes) > 1: + num_classes = labeled_classes_list[0].shape[0] + padded_labeled_classes = [] + for x in labeled_classes: + padding = num_classes - tf.shape(x)[0] + padded_labeled_classes.append(tf.pad(x, [[0, padding]])) + groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.stack( + padded_labeled_classes) + else: + groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.stack( + labeled_classes) + + groundtruth[input_data_fields.num_groundtruth_boxes] = ( + tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]])) + return groundtruth + + +def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True): + """Unstacks all tensors in `tensor_dict` along 0th dimension. + + Unstacks tensor from the tensor dict along 0th dimension and returns a + tensor_dict containing values that are lists of unstacked, unpadded tensors. + + Tensors in the `tensor_dict` are expected to be of one of the three shapes: + 1. [batch_size] + 2. [batch_size, height, width, channels] + 3. [batch_size, num_boxes, d1, d2, ... dn] + + When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3 + above are sliced along the `num_boxes` dimension using the value in tensor + field.InputDataFields.num_groundtruth_boxes. + + Note that this function has a static list of input data fields and has to be + kept in sync with the InputDataFields defined in core/standard_fields.py + + Args: + tensor_dict: A dictionary of batched groundtruth tensors. + unpad_groundtruth_tensors: Whether to remove padding along `num_boxes` + dimension of the groundtruth tensors. + + Returns: + A dictionary where the keys are from fields.InputDataFields and values are + a list of unstacked (optionally unpadded) tensors. + + Raises: + ValueError: If unpad_tensors is True and `tensor_dict` does not contain + `num_groundtruth_boxes` tensor. + """ + unbatched_tensor_dict = { + key: tf.unstack(tensor) for key, tensor in tensor_dict.items() + } + if unpad_groundtruth_tensors: + if (fields.InputDataFields.num_groundtruth_boxes not in + unbatched_tensor_dict): + raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. ' + 'Keys available: {}'.format( + unbatched_tensor_dict.keys())) + unbatched_unpadded_tensor_dict = {} + unpad_keys = set([ + # List of input data fields that are padded along the num_boxes + # dimension. This list has to be kept in sync with InputDataFields in + # standard_fields.py. + fields.InputDataFields.groundtruth_instance_masks, + fields.InputDataFields.groundtruth_classes, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_keypoints, + fields.InputDataFields.groundtruth_keypoint_visibilities, + fields.InputDataFields.groundtruth_group_of, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_weights + ]).intersection(set(unbatched_tensor_dict.keys())) + + for key in unpad_keys: + unpadded_tensor_list = [] + for num_gt, padded_tensor in zip( + unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + unbatched_tensor_dict[key]): + tensor_shape = shape_utils.combined_static_and_dynamic_shape( + padded_tensor) + slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32) + slice_size = tf.stack( + [num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]]) + unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size) + unpadded_tensor_list.append(unpadded_tensor) + unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list + + unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict) + + return unbatched_tensor_dict + + +def provide_groundtruth(model, labels): + """Provides the labels to a model as groundtruth. + + This helper function extracts the corresponding boxes, classes, + keypoints, weights, masks, etc. from the labels, and provides it + as groundtruth to the models. + + Args: + model: The detection model to provide groundtruth to. + labels: The labels for the training or evaluation inputs. + """ + gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes] + gt_classes_list = labels[fields.InputDataFields.groundtruth_classes] + gt_masks_list = None + if fields.InputDataFields.groundtruth_instance_masks in labels: + gt_masks_list = labels[ + fields.InputDataFields.groundtruth_instance_masks] + gt_keypoints_list = None + if fields.InputDataFields.groundtruth_keypoints in labels: + gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints] + gt_keypoint_visibilities_list = None + if fields.InputDataFields.groundtruth_keypoint_visibilities in labels: + gt_keypoint_visibilities_list = labels[ + fields.InputDataFields.groundtruth_keypoint_visibilities] + gt_weights_list = None + if fields.InputDataFields.groundtruth_weights in labels: + gt_weights_list = labels[fields.InputDataFields.groundtruth_weights] + gt_confidences_list = None + if fields.InputDataFields.groundtruth_confidences in labels: + gt_confidences_list = labels[ + fields.InputDataFields.groundtruth_confidences] + gt_is_crowd_list = None + if fields.InputDataFields.groundtruth_is_crowd in labels: + gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd] + gt_group_of_list = None + if fields.InputDataFields.groundtruth_group_of in labels: + gt_group_of_list = labels[fields.InputDataFields.groundtruth_group_of] + gt_area_list = None + if fields.InputDataFields.groundtruth_area in labels: + gt_area_list = labels[fields.InputDataFields.groundtruth_area] + gt_labeled_classes = None + if fields.InputDataFields.groundtruth_labeled_classes in labels: + gt_labeled_classes = labels[ + fields.InputDataFields.groundtruth_labeled_classes] + model.provide_groundtruth( + groundtruth_boxes_list=gt_boxes_list, + groundtruth_classes_list=gt_classes_list, + groundtruth_confidences_list=gt_confidences_list, + groundtruth_labeled_classes=gt_labeled_classes, + groundtruth_masks_list=gt_masks_list, + groundtruth_keypoints_list=gt_keypoints_list, + groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list, + groundtruth_weights_list=gt_weights_list, + groundtruth_is_crowd_list=gt_is_crowd_list, + groundtruth_group_of_list=gt_group_of_list, + groundtruth_area_list=gt_area_list) + + +def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False, + postprocess_on_cpu=False): + """Creates a model function for `Estimator`. + + Args: + detection_model_fn: Function that returns a `DetectionModel` instance. + configs: Dictionary of pipeline config objects. + hparams: `HParams` object. + use_tpu: Boolean indicating whether model should be constructed for + use on TPU. + postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess + is scheduled on the host cpu. + + Returns: + `model_fn` for `Estimator`. + """ + train_config = configs['train_config'] + eval_input_config = configs['eval_input_config'] + eval_config = configs['eval_config'] + + def model_fn(features, labels, mode, params=None): + """Constructs the object detection model. + + Args: + features: Dictionary of feature tensors, returned from `input_fn`. + labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL, + otherwise None. + mode: Mode key from tf.estimator.ModeKeys. + params: Parameter dictionary passed from the estimator. + + Returns: + An `EstimatorSpec` that encapsulates the model and its serving + configurations. + """ + params = params or {} + total_loss, train_op, detections, export_outputs = None, None, None, None + is_training = mode == tf.estimator.ModeKeys.TRAIN + + # Make sure to set the Keras learning phase. True during training, + # False for inference. + tf.keras.backend.set_learning_phase(is_training) + # Set policy for mixed-precision training with Keras-based models. + if use_tpu and train_config.use_bfloat16: + from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top + # Enable v2 behavior, as `mixed_bfloat16` is only supported in TF 2.0. + base_layer_utils.enable_v2_dtype_behavior() + tf2.keras.mixed_precision.experimental.set_policy( + 'mixed_bfloat16') + detection_model = detection_model_fn( + is_training=is_training, add_summaries=(not use_tpu)) + scaffold_fn = None + + if mode == tf.estimator.ModeKeys.TRAIN: + labels = unstack_batch( + labels, + unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors) + elif mode == tf.estimator.ModeKeys.EVAL: + # For evaling on train data, it is necessary to check whether groundtruth + # must be unpadded. + boxes_shape = ( + labels[fields.InputDataFields.groundtruth_boxes].get_shape() + .as_list()) + unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu + labels = unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): + provide_groundtruth(detection_model, labels) + + preprocessed_images = features[fields.InputDataFields.image] + + side_inputs = detection_model.get_side_inputs(features) + + if use_tpu and train_config.use_bfloat16: + with contrib_tpu.bfloat16_scope(): + prediction_dict = detection_model.predict( + preprocessed_images, + features[fields.InputDataFields.true_image_shape], **side_inputs) + prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict) + else: + prediction_dict = detection_model.predict( + preprocessed_images, + features[fields.InputDataFields.true_image_shape], **side_inputs) + + def postprocess_wrapper(args): + return detection_model.postprocess(args[0], args[1]) + + if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT): + if use_tpu and postprocess_on_cpu: + detections = contrib_tpu.outside_compilation( + postprocess_wrapper, + (prediction_dict, + features[fields.InputDataFields.true_image_shape])) + else: + detections = postprocess_wrapper(( + prediction_dict, + features[fields.InputDataFields.true_image_shape])) + + if mode == tf.estimator.ModeKeys.TRAIN: + load_pretrained = hparams.load_pretrained if hparams else False + if train_config.fine_tune_checkpoint and load_pretrained: + if not train_config.fine_tune_checkpoint_type: + # train_config.from_detection_checkpoint field is deprecated. For + # backward compatibility, set train_config.fine_tune_checkpoint_type + # based on train_config.from_detection_checkpoint. + if train_config.from_detection_checkpoint: + train_config.fine_tune_checkpoint_type = 'detection' + else: + train_config.fine_tune_checkpoint_type = 'classification' + asg_map = detection_model.restore_map( + fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, + load_all_detection_checkpoint_vars=( + train_config.load_all_detection_checkpoint_vars)) + available_var_map = ( + variables_helper.get_variables_available_in_checkpoint( + asg_map, + train_config.fine_tune_checkpoint, + include_global_step=False)) + if use_tpu: + + def tpu_scaffold(): + tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint, + available_var_map) + return tf.train.Scaffold() + + scaffold_fn = tpu_scaffold + else: + tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint, + available_var_map) + + if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): + if (mode == tf.estimator.ModeKeys.EVAL and + eval_config.use_dummy_loss_in_eval): + total_loss = tf.constant(1.0) + losses_dict = {'Loss/total_loss': total_loss} + else: + losses_dict = detection_model.loss( + prediction_dict, features[fields.InputDataFields.true_image_shape]) + losses = [loss_tensor for loss_tensor in losses_dict.values()] + if train_config.add_regularization_loss: + regularization_losses = detection_model.regularization_losses() + if use_tpu and train_config.use_bfloat16: + regularization_losses = ops.bfloat16_to_float32_nested( + regularization_losses) + if regularization_losses: + regularization_loss = tf.add_n( + regularization_losses, name='regularization_loss') + losses.append(regularization_loss) + losses_dict['Loss/regularization_loss'] = regularization_loss + total_loss = tf.add_n(losses, name='total_loss') + losses_dict['Loss/total_loss'] = total_loss + + if 'graph_rewriter_config' in configs: + graph_rewriter_fn = graph_rewriter_builder.build( + configs['graph_rewriter_config'], is_training=is_training) + graph_rewriter_fn() + + # TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we + # can write learning rate summaries on TPU without host calls. + global_step = tf.train.get_or_create_global_step() + training_optimizer, optimizer_summary_vars = optimizer_builder.build( + train_config.optimizer) + + if mode == tf.estimator.ModeKeys.TRAIN: + if use_tpu: + training_optimizer = contrib_tpu.CrossShardOptimizer(training_optimizer) + + # Optionally freeze some layers by setting their gradients to be zero. + trainable_variables = None + include_variables = ( + train_config.update_trainable_variables + if train_config.update_trainable_variables else None) + exclude_variables = ( + train_config.freeze_variables + if train_config.freeze_variables else None) + trainable_variables = slim.filter_variables( + tf.trainable_variables(), + include_patterns=include_variables, + exclude_patterns=exclude_variables) + + clip_gradients_value = None + if train_config.gradient_clipping_by_norm > 0: + clip_gradients_value = train_config.gradient_clipping_by_norm + + if not use_tpu: + for var in optimizer_summary_vars: + tf.summary.scalar(var.op.name, var) + summaries = [] if use_tpu else None + if train_config.summarize_gradients: + summaries = ['gradients', 'gradient_norm', 'global_gradient_norm'] + train_op = slim.optimizers.optimize_loss( + loss=total_loss, + global_step=global_step, + learning_rate=None, + clip_gradients=clip_gradients_value, + optimizer=training_optimizer, + update_ops=detection_model.updates(), + variables=trainable_variables, + summaries=summaries, + name='') # Preventing scope prefix on all variables. + + if mode == tf.estimator.ModeKeys.PREDICT: + exported_output = exporter_lib.add_output_tensor_nodes(detections) + export_outputs = { + tf.saved_model.signature_constants.PREDICT_METHOD_NAME: + tf.estimator.export.PredictOutput(exported_output) + } + + eval_metric_ops = None + scaffold = None + if mode == tf.estimator.ModeKeys.EVAL: + class_agnostic = ( + fields.DetectionResultFields.detection_classes not in detections) + groundtruth = _prepare_groundtruth_for_eval( + detection_model, class_agnostic, + eval_input_config.max_number_of_boxes) + use_original_images = fields.InputDataFields.original_image in features + if use_original_images: + eval_images = features[fields.InputDataFields.original_image] + true_image_shapes = tf.slice( + features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3]) + original_image_spatial_shapes = features[fields.InputDataFields + .original_image_spatial_shape] + else: + eval_images = features[fields.InputDataFields.image] + true_image_shapes = None + original_image_spatial_shapes = None + + eval_dict = eval_util.result_dict_for_batched_example( + eval_images, + features[inputs.HASH_KEY], + detections, + groundtruth, + class_agnostic=class_agnostic, + scale_to_absolute=True, + original_image_spatial_shapes=original_image_spatial_shapes, + true_image_shapes=true_image_shapes) + + if fields.InputDataFields.image_additional_channels in features: + eval_dict[fields.InputDataFields.image_additional_channels] = features[ + fields.InputDataFields.image_additional_channels] + + if class_agnostic: + category_index = label_map_util.create_class_agnostic_category_index() + else: + category_index = label_map_util.create_category_index_from_labelmap( + eval_input_config.label_map_path) + vis_metric_ops = None + if not use_tpu and use_original_images: + keypoint_edges = [ + (kp.start, kp.end) for kp in eval_config.keypoint_edge] + + eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections( + category_index, + max_examples_to_draw=eval_config.num_visualizations, + max_boxes_to_draw=eval_config.max_num_boxes_to_visualize, + min_score_thresh=eval_config.min_score_threshold, + use_normalized_coordinates=False, + keypoint_edges=keypoint_edges or None) + vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops( + eval_dict) + + # Eval metrics on a single example. + eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, list(category_index.values()), eval_dict) + for loss_key, loss_tensor in iter(losses_dict.items()): + eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor) + for var in optimizer_summary_vars: + eval_metric_ops[var.op.name] = (var, tf.no_op()) + if vis_metric_ops is not None: + eval_metric_ops.update(vis_metric_ops) + eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()} + + if eval_config.use_moving_averages: + variable_averages = tf.train.ExponentialMovingAverage(0.0) + variables_to_restore = variable_averages.variables_to_restore() + keep_checkpoint_every_n_hours = ( + train_config.keep_checkpoint_every_n_hours) + saver = tf.train.Saver( + variables_to_restore, + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) + scaffold = tf.train.Scaffold(saver=saver) + + # EVAL executes on CPU, so use regular non-TPU EstimatorSpec. + if use_tpu and mode != tf.estimator.ModeKeys.EVAL: + return contrib_tpu.TPUEstimatorSpec( + mode=mode, + scaffold_fn=scaffold_fn, + predictions=detections, + loss=total_loss, + train_op=train_op, + eval_metrics=eval_metric_ops, + export_outputs=export_outputs) + else: + if scaffold is None: + keep_checkpoint_every_n_hours = ( + train_config.keep_checkpoint_every_n_hours) + saver = tf.train.Saver( + sharded=True, + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, + save_relative_paths=True) + tf.add_to_collection(tf.GraphKeys.SAVERS, saver) + scaffold = tf.train.Scaffold(saver=saver) + return tf.estimator.EstimatorSpec( + mode=mode, + predictions=detections, + loss=total_loss, + train_op=train_op, + eval_metric_ops=eval_metric_ops, + export_outputs=export_outputs, + scaffold=scaffold) + + return model_fn + + +def create_estimator_and_inputs(run_config, + hparams, + pipeline_config_path, + config_override=None, + train_steps=None, + sample_1_of_n_eval_examples=1, + sample_1_of_n_eval_on_train_examples=1, + model_fn_creator=create_model_fn, + use_tpu_estimator=False, + use_tpu=False, + num_shards=1, + params=None, + override_eval_num_epochs=True, + save_final_config=False, + postprocess_on_cpu=False, + export_to_tpu=None, + **kwargs): + """Creates `Estimator`, input functions, and steps. + + Args: + run_config: A `RunConfig`. + hparams: A `HParams`. + pipeline_config_path: A path to a pipeline config file. + config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to + override the config from `pipeline_config_path`. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + sample_1_of_n_eval_examples: Integer representing how often an eval example + should be sampled. If 1, will sample all examples. + sample_1_of_n_eval_on_train_examples: Similar to + `sample_1_of_n_eval_examples`, except controls the sampling of training + data for evaluation. + model_fn_creator: A function that creates a `model_fn` for `Estimator`. + Follows the signature: + + * Args: + * `detection_model_fn`: Function that returns `DetectionModel` instance. + * `configs`: Dictionary of pipeline config objects. + * `hparams`: `HParams` object. + * Returns: + `model_fn` for `Estimator`. + + use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False, + an `Estimator` will be returned. + use_tpu: Boolean, whether training and evaluation should run on TPU. Only + used if `use_tpu_estimator` is True. + num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator` + is True. + params: Parameter dictionary passed from the estimator. Only used if + `use_tpu_estimator` is True. + override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for + eval_input. + save_final_config: Whether to save final config (obtained after applying + overrides) to `estimator.model_dir`. + postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true, + postprocess is scheduled on the host cpu. + export_to_tpu: When use_tpu and export_to_tpu are true, + `export_savedmodel()` exports a metagraph for serving on TPU besides the + one on CPU. + **kwargs: Additional keyword arguments for configuration override. + + Returns: + A dictionary with the following fields: + 'estimator': An `Estimator` or `TPUEstimator`. + 'train_input_fn': A training input function. + 'eval_input_fns': A list of all evaluation input functions. + 'eval_input_names': A list of names for each evaluation input. + 'eval_on_train_input_fn': An evaluation-on-train input function. + 'predict_input_fn': A prediction input function. + 'train_steps': Number of training steps. Either directly from input or from + configuration. + """ + get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ + 'get_configs_from_pipeline_file'] + merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ + 'merge_external_params_with_configs'] + create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ + 'create_pipeline_proto_from_configs'] + create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn'] + create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn'] + create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn'] + detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base'] + + configs = get_configs_from_pipeline_file( + pipeline_config_path, config_override=config_override) + kwargs.update({ + 'train_steps': train_steps, + 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu + }) + if sample_1_of_n_eval_examples >= 1: + kwargs.update({ + 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples + }) + if override_eval_num_epochs: + kwargs.update({'eval_num_epochs': 1}) + tf.logging.warning( + 'Forced number of epochs for all eval validations to be 1.') + configs = merge_external_params_with_configs( + configs, hparams, kwargs_dict=kwargs) + model_config = configs['model'] + train_config = configs['train_config'] + train_input_config = configs['train_input_config'] + eval_config = configs['eval_config'] + eval_input_configs = configs['eval_input_configs'] + eval_on_train_input_config = copy.deepcopy(train_input_config) + eval_on_train_input_config.sample_1_of_n_examples = ( + sample_1_of_n_eval_on_train_examples) + if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1: + tf.logging.warning('Expected number of evaluation epochs is 1, but ' + 'instead encountered `eval_on_train_input_config' + '.num_epochs` = ' + '{}. Overwriting `num_epochs` to 1.'.format( + eval_on_train_input_config.num_epochs)) + eval_on_train_input_config.num_epochs = 1 + + # update train_steps from config but only when non-zero value is provided + if train_steps is None and train_config.num_steps != 0: + train_steps = train_config.num_steps + + detection_model_fn = functools.partial( + detection_model_fn_base, model_config=model_config) + + # Create the input functions for TRAIN/EVAL/PREDICT. + train_input_fn = create_train_input_fn( + train_config=train_config, + train_input_config=train_input_config, + model_config=model_config) + eval_input_fns = [ + create_eval_input_fn( + eval_config=eval_config, + eval_input_config=eval_input_config, + model_config=model_config) for eval_input_config in eval_input_configs + ] + eval_input_names = [ + eval_input_config.name for eval_input_config in eval_input_configs + ] + eval_on_train_input_fn = create_eval_input_fn( + eval_config=eval_config, + eval_input_config=eval_on_train_input_config, + model_config=model_config) + predict_input_fn = create_predict_input_fn( + model_config=model_config, predict_input_config=eval_input_configs[0]) + + # Read export_to_tpu from hparams if not passed. + if export_to_tpu is None: + export_to_tpu = hparams.get('export_to_tpu', False) + tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s', + use_tpu, export_to_tpu) + model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu, + postprocess_on_cpu) + if use_tpu_estimator: + estimator = contrib_tpu.TPUEstimator( + model_fn=model_fn, + train_batch_size=train_config.batch_size, + # For each core, only batch size 1 is supported for eval. + eval_batch_size=num_shards * 1 if use_tpu else 1, + use_tpu=use_tpu, + config=run_config, + export_to_tpu=export_to_tpu, + eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU + params=params if params else {}) + else: + estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config) + + # Write the as-run pipeline config to disk. + if run_config.is_chief and save_final_config: + pipeline_config_final = create_pipeline_proto_from_configs(configs) + config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir) + + return dict( + estimator=estimator, + train_input_fn=train_input_fn, + eval_input_fns=eval_input_fns, + eval_input_names=eval_input_names, + eval_on_train_input_fn=eval_on_train_input_fn, + predict_input_fn=predict_input_fn, + train_steps=train_steps) + + +def create_train_and_eval_specs(train_input_fn, + eval_input_fns, + eval_on_train_input_fn, + predict_input_fn, + train_steps, + eval_on_train_data=False, + final_exporter_name='Servo', + eval_spec_names=None): + """Creates a `TrainSpec` and `EvalSpec`s. + + Args: + train_input_fn: Function that produces features and labels on train data. + eval_input_fns: A list of functions that produce features and labels on eval + data. + eval_on_train_input_fn: Function that produces features and labels for + evaluation on train data. + predict_input_fn: Function that produces features for inference. + train_steps: Number of training steps. + eval_on_train_data: Whether to evaluate model on training data. Default is + False. + final_exporter_name: String name given to `FinalExporter`. + eval_spec_names: A list of string names for each `EvalSpec`. + + Returns: + Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is + True, the last `EvalSpec` in the list will correspond to training data. The + rest EvalSpecs in the list are evaluation datas. + """ + train_spec = tf.estimator.TrainSpec( + input_fn=train_input_fn, max_steps=train_steps) + + if eval_spec_names is None: + eval_spec_names = [str(i) for i in range(len(eval_input_fns))] + + eval_specs = [] + for index, (eval_spec_name, eval_input_fn) in enumerate( + zip(eval_spec_names, eval_input_fns)): + # Uses final_exporter_name as exporter_name for the first eval spec for + # backward compatibility. + if index == 0: + exporter_name = final_exporter_name + else: + exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name) + exporter = tf.estimator.FinalExporter( + name=exporter_name, serving_input_receiver_fn=predict_input_fn) + eval_specs.append( + tf.estimator.EvalSpec( + name=eval_spec_name, + input_fn=eval_input_fn, + steps=None, + exporters=exporter)) + + if eval_on_train_data: + eval_specs.append( + tf.estimator.EvalSpec( + name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None)) + + return train_spec, eval_specs + + +def _evaluate_checkpoint(estimator, + input_fn, + checkpoint_path, + name, + max_retries=0): + """Evaluates a checkpoint. + + Args: + estimator: Estimator object to use for evaluation. + input_fn: Input function to use for evaluation. + checkpoint_path: Path of the checkpoint to evaluate. + name: Namescope for eval summary. + max_retries: Maximum number of times to retry the evaluation on encountering + a tf.errors.InvalidArgumentError. If negative, will always retry the + evaluation. + + Returns: + Estimator evaluation results. + """ + always_retry = True if max_retries < 0 else False + retries = 0 + while always_retry or retries <= max_retries: + try: + return estimator.evaluate( + input_fn=input_fn, + steps=None, + checkpoint_path=checkpoint_path, + name=name) + except tf.errors.InvalidArgumentError as e: + if always_retry or retries < max_retries: + tf.logging.info('Retrying checkpoint evaluation after exception: %s', e) + retries += 1 + else: + raise e + + +def continuous_eval(estimator, + model_dir, + input_fn, + train_steps, + name, + max_retries=0): + """Perform continuous evaluation on checkpoints written to a model directory. + + Args: + estimator: Estimator object to use for evaluation. + model_dir: Model directory to read checkpoints for continuous evaluation. + input_fn: Input function to use for evaluation. + train_steps: Number of training steps. This is used to infer the last + checkpoint and stop evaluation loop. + name: Namescope for eval summary. + max_retries: Maximum number of times to retry the evaluation on encountering + a tf.errors.InvalidArgumentError. If negative, will always retry the + evaluation. + """ + + def terminate_eval(): + tf.logging.info('Terminating eval after 180 seconds of no checkpoints') + return True + + for ckpt in tf.train.checkpoints_iterator( + model_dir, min_interval_secs=180, timeout=None, + timeout_fn=terminate_eval): + + tf.logging.info('Starting Evaluation.') + try: + eval_results = _evaluate_checkpoint( + estimator=estimator, + input_fn=input_fn, + checkpoint_path=ckpt, + name=name, + max_retries=max_retries) + tf.logging.info('Eval results: %s' % eval_results) + + # Terminate eval job when final checkpoint is reached + current_step = int(os.path.basename(ckpt).split('-')[1]) + if current_step >= train_steps: + tf.logging.info( + 'Evaluation finished after training step %d' % current_step) + break + + except tf.errors.NotFoundError: + tf.logging.info( + 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt) + + +def populate_experiment(run_config, + hparams, + pipeline_config_path, + train_steps=None, + eval_steps=None, + model_fn_creator=create_model_fn, + **kwargs): + """Populates an `Experiment` object. + + EXPERIMENT CLASS IS DEPRECATED. Please switch to + tf.estimator.train_and_evaluate. As an example, see model_main.py. + + Args: + run_config: A `RunConfig`. + hparams: A `HParams`. + pipeline_config_path: A path to a pipeline config file. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + eval_steps: Number of evaluation steps per evaluation cycle. If None, the + number of evaluation steps is set from the `EvalConfig` proto. + model_fn_creator: A function that creates a `model_fn` for `Estimator`. + Follows the signature: + + * Args: + * `detection_model_fn`: Function that returns `DetectionModel` instance. + * `configs`: Dictionary of pipeline config objects. + * `hparams`: `HParams` object. + * Returns: + `model_fn` for `Estimator`. + + **kwargs: Additional keyword arguments for configuration override. + + Returns: + An `Experiment` that defines all aspects of training, evaluation, and + export. + """ + tf.logging.warning('Experiment is being deprecated. Please use ' + 'tf.estimator.train_and_evaluate(). See model_main.py for ' + 'an example.') + train_and_eval_dict = create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps, + eval_steps=eval_steps, + model_fn_creator=model_fn_creator, + save_final_config=True, + **kwargs) + estimator = train_and_eval_dict['estimator'] + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + predict_input_fn = train_and_eval_dict['predict_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + export_strategies = [ + contrib_learn.utils.saved_model_export_utils.make_export_strategy( + serving_input_fn=predict_input_fn) + ] + + return contrib_learn.Experiment( + estimator=estimator, + train_input_fn=train_input_fn, + eval_input_fn=eval_input_fns[0], + train_steps=train_steps, + eval_steps=None, + export_strategies=export_strategies, + eval_delay_secs=120, + ) diff --git a/models/research/object_detection/model_lib_tf1_test.py b/models/research/object_detection/model_lib_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7d4d81b2cb43e0faa3d84f48df91c27d0da217bc --- /dev/null +++ b/models/research/object_detection/model_lib_tf1_test.py @@ -0,0 +1,505 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object detection model library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection import inputs +from object_detection import model_hparams +from object_detection import model_lib +from object_detection.builders import model_builder +from object_detection.core import standard_fields as fields +from object_detection.utils import config_util +from object_detection.utils import tf_version + + +# Model for test. Options are: +# 'ssd_inception_v2_pets', 'faster_rcnn_resnet50_pets' +MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets' + +# Model for testing keypoints. +MODEL_NAME_FOR_KEYPOINTS_TEST = 'ssd_mobilenet_v1_fpp' + +# Model for testing tfSequenceExample inputs. +MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST = 'context_rcnn_camera_trap' + + +def _get_data_path(model_name): + """Returns an absolute path to TFRecord file.""" + if model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + 'snapshot_serengeti_sequence_examples.record') + else: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + 'pets_examples.record') + + +def get_pipeline_config_path(model_name): + """Returns path to the local pipeline config file.""" + if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + model_name + '.config') + elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + model_name + '.config') + else: + return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', + 'configs', model_name + '.config') + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'pet_label_map.pbtxt') + + +def _get_keypoints_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'face_person_with_keypoints_label_map.pbtxt') + + +def _get_sequence_example_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'snapshot_serengeti_label_map.pbtxt') + + +def _get_configs_for_model(model_name): + """Returns configurations for model.""" + filename = get_pipeline_config_path(model_name) + data_path = _get_data_path(model_name) + if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: + label_map_path = _get_keypoints_labelmap_path() + elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: + label_map_path = _get_sequence_example_labelmap_path() + else: + label_map_path = _get_labelmap_path() + configs = config_util.get_configs_from_pipeline_file(filename) + override_dict = { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + configs = config_util.merge_external_params_with_configs( + configs, kwargs_dict=override_dict) + return configs + + +def _make_initializable_iterator(dataset): + """Creates an iterator, and initializes tables. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.data.Iterator`. + """ + iterator = tf.data.make_initializable_iterator(dataset) + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ModelLibTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + tf.reset_default_graph() + + def _assert_model_fn_for_train_eval(self, configs, mode, + class_agnostic=False): + model_config = configs['model'] + train_config = configs['train_config'] + with tf.Graph().as_default(): + if mode == 'train': + features, labels = _make_initializable_iterator( + inputs.create_train_input_fn(configs['train_config'], + configs['train_input_config'], + configs['model'])()).get_next() + model_mode = tf.estimator.ModeKeys.TRAIN + batch_size = train_config.batch_size + elif mode == 'eval': + features, labels = _make_initializable_iterator( + inputs.create_eval_input_fn(configs['eval_config'], + configs['eval_input_config'], + configs['model'])()).get_next() + model_mode = tf.estimator.ModeKeys.EVAL + batch_size = 1 + elif mode == 'eval_on_train': + features, labels = _make_initializable_iterator( + inputs.create_eval_input_fn(configs['eval_config'], + configs['train_input_config'], + configs['model'])()).get_next() + model_mode = tf.estimator.ModeKeys.EVAL + batch_size = 1 + + detection_model_fn = functools.partial( + model_builder.build, model_config=model_config, is_training=True) + + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + + model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) + estimator_spec = model_fn(features, labels, model_mode) + + self.assertIsNotNone(estimator_spec.loss) + self.assertIsNotNone(estimator_spec.predictions) + if mode == 'eval' or mode == 'eval_on_train': + if class_agnostic: + self.assertNotIn('detection_classes', estimator_spec.predictions) + else: + detection_classes = estimator_spec.predictions['detection_classes'] + self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_classes.dtype) + detection_boxes = estimator_spec.predictions['detection_boxes'] + detection_scores = estimator_spec.predictions['detection_scores'] + num_detections = estimator_spec.predictions['num_detections'] + self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_boxes.dtype) + self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_scores.dtype) + self.assertEqual(tf.float32, num_detections.dtype) + if mode == 'eval': + self.assertIn('Detections_Left_Groundtruth_Right/0', + estimator_spec.eval_metric_ops) + if model_mode == tf.estimator.ModeKeys.TRAIN: + self.assertIsNotNone(estimator_spec.train_op) + return estimator_spec + + def _assert_model_fn_for_predict(self, configs): + model_config = configs['model'] + + with tf.Graph().as_default(): + features, _ = _make_initializable_iterator( + inputs.create_eval_input_fn(configs['eval_config'], + configs['eval_input_config'], + configs['model'])()).get_next() + detection_model_fn = functools.partial( + model_builder.build, model_config=model_config, is_training=False) + + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + + model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) + estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) + + self.assertIsNone(estimator_spec.loss) + self.assertIsNone(estimator_spec.train_op) + self.assertIsNotNone(estimator_spec.predictions) + self.assertIsNotNone(estimator_spec.export_outputs) + self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, + estimator_spec.export_outputs) + + def test_model_fn_in_train_mode(self): + """Tests the model function in TRAIN mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_sequences(self): + """Tests the model function in TRAIN mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_freeze_all_variables(self): + """Tests model_fn TRAIN mode with all variables frozen.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + configs['train_config'].freeze_variables.append('.*') + with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_freeze_all_included_variables(self): + """Tests model_fn TRAIN mode with all included variables frozen.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + train_config = configs['train_config'] + train_config.update_trainable_variables.append('FeatureExtractor') + train_config.freeze_variables.append('.*') + with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_freeze_box_predictor(self): + """Tests model_fn TRAIN mode with FeatureExtractor variables frozen.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + train_config = configs['train_config'] + train_config.update_trainable_variables.append('FeatureExtractor') + train_config.update_trainable_variables.append('BoxPredictor') + train_config.freeze_variables.append('FeatureExtractor') + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_eval_mode(self): + """Tests the model function in EVAL mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_train_eval(configs, 'eval') + + def test_model_fn_in_eval_mode_sequences(self): + """Tests the model function in EVAL mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) + self._assert_model_fn_for_train_eval(configs, 'eval') + + def test_model_fn_in_keypoints_eval_mode(self): + """Tests the model function in EVAL mode with keypoints config.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_KEYPOINTS_TEST) + estimator_spec = self._assert_model_fn_for_train_eval(configs, 'eval') + metric_ops = estimator_spec.eval_metric_ops + self.assertIn('Keypoints_Precision/mAP ByCategory/face', metric_ops) + self.assertIn('Keypoints_Precision/mAP ByCategory/PERSON', metric_ops) + detection_keypoints = estimator_spec.predictions['detection_keypoints'] + self.assertEqual(1, detection_keypoints.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_keypoints.dtype) + + def test_model_fn_in_eval_on_train_mode(self): + """Tests the model function in EVAL mode with train data.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_train_eval(configs, 'eval_on_train') + + def test_model_fn_in_predict_mode(self): + """Tests the model function in PREDICT mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_predict(configs) + + def test_create_estimator_and_inputs(self): + """Tests that Estimator and input function are constructed correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + self.assertIsInstance(estimator, tf.estimator.Estimator) + self.assertEqual(20, train_steps) + self.assertIn('train_input_fn', train_and_eval_dict) + self.assertIn('eval_input_fns', train_and_eval_dict) + self.assertIn('eval_on_train_input_fn', train_and_eval_dict) + + def test_create_estimator_and_inputs_sequence_example(self): + """Tests that Estimator and input function are constructed correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path( + MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + self.assertIsInstance(estimator, tf.estimator.Estimator) + self.assertEqual(20, train_steps) + self.assertIn('train_input_fn', train_and_eval_dict) + self.assertIn('eval_input_fns', train_and_eval_dict) + self.assertIn('eval_on_train_input_fn', train_and_eval_dict) + + def test_create_estimator_with_default_train_eval_steps(self): + """Tests that number of train/eval defaults to config values.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + config_train_steps = configs['train_config'].num_steps + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, hparams, pipeline_config_path) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + + self.assertIsInstance(estimator, tf.estimator.Estimator) + self.assertEqual(config_train_steps, train_steps) + + def test_create_tpu_estimator_and_inputs(self): + """Tests that number of train/eval defaults to config values.""" + run_config = tf.estimator.tpu.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps, + use_tpu_estimator=True) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + + self.assertIsInstance(estimator, tf.estimator.tpu.TPUEstimator) + self.assertEqual(20, train_steps) + + def test_create_train_and_eval_specs(self): + """Tests that `TrainSpec` and `EvalSpec` is created correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps) + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] + predict_input_fn = train_and_eval_dict['predict_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + train_spec, eval_specs = model_lib.create_train_and_eval_specs( + train_input_fn, + eval_input_fns, + eval_on_train_input_fn, + predict_input_fn, + train_steps, + eval_on_train_data=True, + final_exporter_name='exporter', + eval_spec_names=['holdout']) + self.assertEqual(train_steps, train_spec.max_steps) + self.assertEqual(2, len(eval_specs)) + self.assertEqual(None, eval_specs[0].steps) + self.assertEqual('holdout', eval_specs[0].name) + self.assertEqual('exporter', eval_specs[0].exporters[0].name) + self.assertEqual(None, eval_specs[1].steps) + self.assertEqual('eval_on_train', eval_specs[1].name) + + def test_experiment(self): + """Tests that the `Experiment` object is constructed correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + experiment = model_lib.populate_experiment( + run_config, + hparams, + pipeline_config_path, + train_steps=10, + eval_steps=20) + self.assertEqual(10, experiment.train_steps) + self.assertEqual(None, experiment.eval_steps) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class UnbatchTensorsTest(tf.test.TestCase): + + def test_unbatch_without_unpadding(self): + image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) + groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, None, None]) + groundtruth_classes_placeholder = tf.placeholder(tf.float32, + [2, None, None]) + groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, None]) + + tensor_dict = { + fields.InputDataFields.image: + image_placeholder, + fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes_placeholder, + fields.InputDataFields.groundtruth_classes: + groundtruth_classes_placeholder, + fields.InputDataFields.groundtruth_weights: + groundtruth_weights_placeholder + } + unbatched_tensor_dict = model_lib.unstack_batch( + tensor_dict, unpad_groundtruth_tensors=False) + + with self.test_session() as sess: + unbatched_tensor_dict_out = sess.run( + unbatched_tensor_dict, + feed_dict={ + image_placeholder: + np.random.rand(2, 4, 4, 3).astype(np.float32), + groundtruth_boxes_placeholder: + np.random.rand(2, 5, 4).astype(np.float32), + groundtruth_classes_placeholder: + np.random.rand(2, 5, 6).astype(np.float32), + groundtruth_weights_placeholder: + np.random.rand(2, 5).astype(np.float32) + }) + for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: + self.assertAllEqual(image_out.shape, [4, 4, 3]) + for groundtruth_boxes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_boxes]: + self.assertAllEqual(groundtruth_boxes_out.shape, [5, 4]) + for groundtruth_classes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_classes]: + self.assertAllEqual(groundtruth_classes_out.shape, [5, 6]) + for groundtruth_weights_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_weights]: + self.assertAllEqual(groundtruth_weights_out.shape, [5]) + + def test_unbatch_and_unpad_groundtruth_tensors(self): + image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) + groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) + groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) + groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, 5]) + num_groundtruth_placeholder = tf.placeholder(tf.int32, [2]) + + tensor_dict = { + fields.InputDataFields.image: + image_placeholder, + fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes_placeholder, + fields.InputDataFields.groundtruth_classes: + groundtruth_classes_placeholder, + fields.InputDataFields.groundtruth_weights: + groundtruth_weights_placeholder, + fields.InputDataFields.num_groundtruth_boxes: + num_groundtruth_placeholder + } + unbatched_tensor_dict = model_lib.unstack_batch( + tensor_dict, unpad_groundtruth_tensors=True) + with self.test_session() as sess: + unbatched_tensor_dict_out = sess.run( + unbatched_tensor_dict, + feed_dict={ + image_placeholder: + np.random.rand(2, 4, 4, 3).astype(np.float32), + groundtruth_boxes_placeholder: + np.random.rand(2, 5, 4).astype(np.float32), + groundtruth_classes_placeholder: + np.random.rand(2, 5, 6).astype(np.float32), + groundtruth_weights_placeholder: + np.random.rand(2, 5).astype(np.float32), + num_groundtruth_placeholder: + np.array([3, 3], np.int32) + }) + for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: + self.assertAllEqual(image_out.shape, [4, 4, 3]) + for groundtruth_boxes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_boxes]: + self.assertAllEqual(groundtruth_boxes_out.shape, [3, 4]) + for groundtruth_classes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_classes]: + self.assertAllEqual(groundtruth_classes_out.shape, [3, 6]) + for groundtruth_weights_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_weights]: + self.assertAllEqual(groundtruth_weights_out.shape, [3]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/model_lib_tf2_test.py b/models/research/object_detection/model_lib_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8c6d961722facae731804e66296948e62c6b6844 --- /dev/null +++ b/models/research/object_detection/model_lib_tf2_test.py @@ -0,0 +1,223 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object detection model library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection import inputs +from object_detection import model_hparams +from object_detection import model_lib_v2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import train_pb2 +from object_detection.utils import config_util +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-importing-member,g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top + +# Model for test. Current options are: +# 'ssd_mobilenet_v2_pets_keras' +MODEL_NAME_FOR_TEST = 'ssd_mobilenet_v2_pets_keras' + + +def _get_data_path(): + """Returns an absolute path to TFRecord file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + 'pets_examples.record') + + +def get_pipeline_config_path(model_name): + """Returns path to the local pipeline config file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', + 'configs', model_name + '.config') + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'pet_label_map.pbtxt') + + +def _get_config_kwarg_overrides(): + """Returns overrides to the configs that insert the correct local paths.""" + data_path = _get_data_path() + label_map_path = _get_labelmap_path() + return { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ModelLibTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): # pylint:disable=g-missing-super-call + tf.keras.backend.clear_session() + + def test_train_loop_then_eval_loop(self): + """Tests that Estimator and input function are constructed correctly.""" + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + config_kwarg_overrides = _get_config_kwarg_overrides() + model_dir = tf.test.get_temp_dir() + + train_steps = 2 + model_lib_v2.train_loop( + hparams, + pipeline_config_path, + model_dir=model_dir, + train_steps=train_steps, + checkpoint_every_n=1, + **config_kwarg_overrides) + + model_lib_v2.eval_continuously( + hparams, + pipeline_config_path, + model_dir=model_dir, + checkpoint_dir=model_dir, + train_steps=train_steps, + wait_interval=1, + timeout=10, + **config_kwarg_overrides) + + +class SimpleModel(model.DetectionModel): + """A model with a single weight vector.""" + + def __init__(self, num_classes=1): + super(SimpleModel, self).__init__(num_classes) + self.weight = tf.keras.backend.variable(np.ones(10), name='weight') + + def postprocess(self, prediction_dict, true_image_shapes): + return {} + + def updates(self): + return [] + + def restore_map(self, *args, **kwargs): + return {'model': self} + + def preprocess(self, _): + return tf.zeros((1, 128, 128, 3)), tf.constant([[128, 128, 3]]) + + def provide_groundtruth(self, *args, **kwargs): + pass + + def predict(self, pred_inputs, true_image_shapes): + return {'prediction': + tf.abs(tf.reduce_sum(self.weight) * tf.reduce_sum(pred_inputs))} + + def loss(self, prediction_dict, _): + return {'loss': tf.reduce_sum(prediction_dict['prediction'])} + + def regularization_losses(self): + return [] + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ModelCheckpointTest(tf.test.TestCase): + """Test for model checkpoint related functionality.""" + + def test_checkpoint_max_to_keep(self): + """Test that only the most recent checkpoints are kept.""" + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = SimpleModel() + + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + config_kwarg_overrides = _get_config_kwarg_overrides() + model_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) + + model_lib_v2.train_loop( + hparams, pipeline_config_path, model_dir=model_dir, + train_steps=20, checkpoint_every_n=2, checkpoint_max_to_keep=3, + **config_kwarg_overrides + ) + ckpt_files = tf.io.gfile.glob(os.path.join(model_dir, 'ckpt-*.index')) + self.assertEqual(len(ckpt_files), 3, + '{} not of length 3.'.format(ckpt_files)) + + +class IncompatibleModel(SimpleModel): + + def restore_map(self, *args, **kwargs): + return {'weight': self.weight} + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CheckpointV2Test(tf.test.TestCase): + + def setUp(self): + super(CheckpointV2Test, self).setUp() + + self._model = SimpleModel() + tf.keras.backend.set_value(self._model.weight, np.ones(10) * 42) + ckpt = tf.train.Checkpoint(model=self._model) + + self._test_dir = tf.test.get_temp_dir() + self._ckpt_path = ckpt.save(os.path.join(self._test_dir, 'ckpt')) + tf.keras.backend.set_value(self._model.weight, np.ones(10)) + + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + configs = config_util.merge_external_params_with_configs( + configs, kwargs_dict=_get_config_kwarg_overrides()) + self._train_input_fn = inputs.create_train_input_fn( + configs['train_config'], + configs['train_input_config'], + configs['model']) + + def test_restore_v2(self): + """Test that restoring a v2 style checkpoint works.""" + + model_lib_v2.load_fine_tune_checkpoint( + self._model, self._ckpt_path, checkpoint_type='', + checkpoint_version=train_pb2.CheckpointVersion.V2, + load_all_detection_checkpoint_vars=True, + input_dataset=self._train_input_fn(), + unpad_groundtruth_tensors=True) + np.testing.assert_allclose(self._model.weight.numpy(), 42) + + def test_restore_map_incompatible_error(self): + """Test that restoring an incompatible restore map causes an error.""" + + with self.assertRaisesRegex(TypeError, + r'.*received a \(str -> ResourceVariable\).*'): + model_lib_v2.load_fine_tune_checkpoint( + IncompatibleModel(), self._ckpt_path, checkpoint_type='', + checkpoint_version=train_pb2.CheckpointVersion.V2, + load_all_detection_checkpoint_vars=True, + input_dataset=self._train_input_fn(), + unpad_groundtruth_tensors=True) + + diff --git a/models/research/object_detection/model_lib_v2.py b/models/research/object_detection/model_lib_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..29eb5a29578ec4cf53713e2fadae78e63e8c4bdb --- /dev/null +++ b/models/research/object_detection/model_lib_v2.py @@ -0,0 +1,971 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Constructs model, inputs, and training environment.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import os +import time + +import tensorflow.compat.v1 as tf + +from object_detection import eval_util +from object_detection import inputs +from object_detection import model_lib +from object_detection.builders import model_builder +from object_detection.builders import optimizer_builder +from object_detection.core import standard_fields as fields +from object_detection.protos import train_pb2 +from object_detection.utils import config_util +from object_detection.utils import label_map_util +from object_detection.utils import ops +from object_detection.utils import variables_helper +from object_detection.utils import visualization_utils as vutils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import tpu as contrib_tpu +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +MODEL_BUILD_UTIL_MAP = model_lib.MODEL_BUILD_UTIL_MAP + +### NOTE: This file is a wip. +### TODO(kaftan): Explore adding unit tests for individual methods +### TODO(kaftan): Add unit test that checks training on a single image w/ +#### groundtruth, and verfiy that loss goes to zero. +#### Possibly have version that takes it as the whole train & eval dataset, +#### & verify the loss output from the eval_loop method. +### TODO(kaftan): Make sure the unit tests run in TAP presubmits or Kokoro + +RESTORE_MAP_ERROR_TEMPLATE = ( + 'Since we are restoring a v2 style checkpoint' + ' restore_map was expected to return a (str -> Model) mapping,' + ' but we received a ({} -> {}) mapping instead.' +) + + +def _compute_losses_and_predictions_dicts( + model, features, labels, + add_regularization_loss=True): + """Computes the losses dict and predictions dict for a model on inputs. + + Args: + model: a DetectionModel (based on Keras). + features: Dictionary of feature tensors from the input dataset. + Should be in the format output by `inputs.train_input` and + `inputs.eval_input`. + features[fields.InputDataFields.image] is a [batch_size, H, W, C] + float32 tensor with preprocessed images. + features[HASH_KEY] is a [batch_size] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] (optional) is a + [batch_size, H, W, C] float32 tensor with original images. + labels: A dictionary of groundtruth tensors post-unstacking. The original + labels are of the form returned by `inputs.train_input` and + `inputs.eval_input`. The shapes may have been modified by unstacking with + `model_lib.unstack_batch`. However, the dictionary includes the following + fields. + labels[fields.InputDataFields.num_groundtruth_boxes] is a + int32 tensor indicating the number of valid groundtruth boxes + per image. + labels[fields.InputDataFields.groundtruth_boxes] is a float32 tensor + containing the corners of the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a float32 + one-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_weights] is a float32 tensor + containing groundtruth weights for the boxes. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + float32 tensor containing only binary values, which represent + instance masks for objects. + labels[fields.InputDataFields.groundtruth_keypoints] is a + float32 tensor containing keypoints for each box. + labels[fields.InputDataFields.groundtruth_group_of] is a tf.bool tensor + containing group_of annotations. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32 + k-hot tensor of classes. + add_regularization_loss: Whether or not to include the model's + regularization loss in the losses dictionary. + + Returns: + A tuple containing the losses dictionary (with the total loss under + the key 'Loss/total_loss'), and the predictions dictionary produced by + `model.predict`. + + """ + model_lib.provide_groundtruth(model, labels) + preprocessed_images = features[fields.InputDataFields.image] + + prediction_dict = model.predict( + preprocessed_images, + features[fields.InputDataFields.true_image_shape]) + prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict) + + losses_dict = model.loss( + prediction_dict, features[fields.InputDataFields.true_image_shape]) + losses = [loss_tensor for loss_tensor in losses_dict.values()] + if add_regularization_loss: + # TODO(kaftan): As we figure out mixed precision & bfloat 16, we may + ## need to convert these regularization losses from bfloat16 to float32 + ## as well. + regularization_losses = model.regularization_losses() + if regularization_losses: + regularization_losses = ops.bfloat16_to_float32_nested( + regularization_losses) + regularization_loss = tf.add_n( + regularization_losses, name='regularization_loss') + losses.append(regularization_loss) + losses_dict['Loss/regularization_loss'] = regularization_loss + + total_loss = tf.add_n(losses, name='total_loss') + losses_dict['Loss/total_loss'] = total_loss + + return losses_dict, prediction_dict + + +# TODO(kaftan): Explore removing learning_rate from this method & returning +## The full losses dict instead of just total_loss, then doing all summaries +## saving in a utility method called by the outer training loop. +# TODO(kaftan): Explore adding gradient summaries +def eager_train_step(detection_model, + features, + labels, + unpad_groundtruth_tensors, + optimizer, + learning_rate, + add_regularization_loss=True, + clip_gradients_value=None, + global_step=None, + num_replicas=1.0): + """Process a single training batch. + + This method computes the loss for the model on a single training batch, + while tracking the gradients with a gradient tape. It then updates the + model variables with the optimizer, clipping the gradients if + clip_gradients_value is present. + + This method can run eagerly or inside a tf.function. + + Args: + detection_model: A DetectionModel (based on Keras) to train. + features: Dictionary of feature tensors from the input dataset. + Should be in the format output by `inputs.train_input. + features[fields.InputDataFields.image] is a [batch_size, H, W, C] + float32 tensor with preprocessed images. + features[HASH_KEY] is a [batch_size] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] (optional, not used + during training) is a + [batch_size, H, W, C] float32 tensor with original images. + labels: A dictionary of groundtruth tensors. This method unstacks + these labels using model_lib.unstack_batch. The stacked labels are of + the form returned by `inputs.train_input` and `inputs.eval_input`. + labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] + int32 tensor indicating the number of valid groundtruth boxes + per image. + labels[fields.InputDataFields.groundtruth_boxes] is a + [batch_size, num_boxes, 4] float32 tensor containing the corners of + the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a + [batch_size, num_boxes, num_classes] float32 one-hot tensor of + classes. num_classes includes the background class. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes] float32 tensor containing groundtruth weights + for the boxes. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + [batch_size, num_boxes, H, W] float32 tensor containing only binary + values, which represent instance masks for objects. + labels[fields.InputDataFields.groundtruth_keypoints] is a + [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing + keypoints for each box. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32 + k-hot tensor of classes. + unpad_groundtruth_tensors: A parameter passed to unstack_batch. + optimizer: The training optimizer that will update the variables. + learning_rate: The learning rate tensor for the current training step. + This is used only for TensorBoard logging purposes, it does not affect + model training. + add_regularization_loss: Whether or not to include the model's + regularization loss in the losses dictionary. + clip_gradients_value: If this is present, clip the gradients global norm + at this value using `tf.clip_by_global_norm`. + global_step: The current training step. Used for TensorBoard logging + purposes. This step is not updated by this function and must be + incremented separately. + num_replicas: The number of replicas in the current distribution strategy. + This is used to scale the total loss so that training in a distribution + strategy works correctly. + + Returns: + The total loss observed at this training step + """ + # """Execute a single training step in the TF v2 style loop.""" + is_training = True + + detection_model._is_training = is_training # pylint: disable=protected-access + tf.keras.backend.set_learning_phase(is_training) + + labels = model_lib.unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + with tf.GradientTape() as tape: + losses_dict, _ = _compute_losses_and_predictions_dicts( + detection_model, features, labels, add_regularization_loss) + + total_loss = losses_dict['Loss/total_loss'] + + # Normalize loss for num replicas + total_loss = tf.math.divide(total_loss, + tf.constant(num_replicas, dtype=tf.float32)) + losses_dict['Loss/normalized_total_loss'] = total_loss + + for loss_type in losses_dict: + tf.compat.v2.summary.scalar( + loss_type, losses_dict[loss_type], step=global_step) + + trainable_variables = detection_model.trainable_variables + + gradients = tape.gradient(total_loss, trainable_variables) + + if clip_gradients_value: + gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients_value) + optimizer.apply_gradients(zip(gradients, trainable_variables)) + tf.compat.v2.summary.scalar('learning_rate', learning_rate, step=global_step) + tf.compat.v2.summary.image( + name='train_input_images', + step=global_step, + data=features[fields.InputDataFields.image], + max_outputs=3) + return total_loss + + +def validate_tf_v2_checkpoint_restore_map(checkpoint_restore_map): + """Ensure that given dict is a valid TF v2 style restore map. + + Args: + checkpoint_restore_map: A dict mapping strings to tf.keras.Model objects. + + Raises: + ValueError: If they keys in checkpoint_restore_map are not strings or if + the values are not keras Model objects. + + """ + + for key, value in checkpoint_restore_map.items(): + if not (isinstance(key, str) and isinstance(value, tf.Module)): + raise TypeError(RESTORE_MAP_ERROR_TEMPLATE.format( + key.__class__.__name__, value.__class__.__name__)) + + +def load_fine_tune_checkpoint( + model, checkpoint_path, checkpoint_type, checkpoint_version, + load_all_detection_checkpoint_vars, input_dataset, + unpad_groundtruth_tensors): + """Load a fine tuning classification or detection checkpoint. + + To make sure the model variables are all built, this method first executes + the model by computing a dummy loss. (Models might not have built their + variables before their first execution) + + It then loads a variable-name based classification or detection checkpoint + that comes from converted TF 1.x slim model checkpoints. + + This method updates the model in-place and does not return a value. + + Args: + model: A DetectionModel (based on Keras) to load a fine-tuning + checkpoint for. + checkpoint_path: Directory with checkpoints file or path to checkpoint. + checkpoint_type: Whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. + checkpoint_version: train_pb2.CheckpointVersion.V1 or V2 enum indicating + whether to load checkpoints in V1 style or V2 style. + load_all_detection_checkpoint_vars: whether to load all variables (when + `fine_tune_checkpoint_type` is `detection`). If False, only variables + within the feature extractor scopes are included. Default False. + input_dataset: The tf.data Dataset the model is being trained on. Needed + to get the shapes for the dummy loss computation. + unpad_groundtruth_tensors: A parameter passed to unstack_batch. + """ + features, labels = iter(input_dataset).next() + + @tf.function + def _dummy_computation_fn(features, labels): + model._is_training = False # pylint: disable=protected-access + tf.keras.backend.set_learning_phase(False) + + labels = model_lib.unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + return _compute_losses_and_predictions_dicts( + model, + features, + labels) + + strategy = tf.compat.v2.distribute.get_strategy() + strategy.run( + _dummy_computation_fn, args=( + features, + labels, + )) + + if checkpoint_version == train_pb2.CheckpointVersion.V1: + var_map = model.restore_map( + fine_tune_checkpoint_type=checkpoint_type, + load_all_detection_checkpoint_vars=( + load_all_detection_checkpoint_vars)) + available_var_map = variables_helper.get_variables_available_in_checkpoint( + var_map, + checkpoint_path, + include_global_step=False) + tf.train.init_from_checkpoint(checkpoint_path, + available_var_map) + elif checkpoint_version == train_pb2.CheckpointVersion.V2: + restore_map = model.restore_map( + fine_tune_checkpoint_type=checkpoint_type, + load_all_detection_checkpoint_vars=( + load_all_detection_checkpoint_vars)) + validate_tf_v2_checkpoint_restore_map(restore_map) + + ckpt = tf.train.Checkpoint(**restore_map) + ckpt.restore(checkpoint_path).assert_existing_objects_matched() + + +def get_filepath(strategy, filepath): + """Get appropriate filepath for worker. + + Args: + strategy: A tf.distribute.Strategy object. + filepath: A path to where the Checkpoint object is stored. + + Returns: + A temporary filepath for non-chief workers to use or the original filepath + for the chief. + """ + if strategy.extended.should_checkpoint: + return filepath + else: + # TODO(vighneshb) Replace with the public API when TF exposes it. + task_id = strategy.extended._task_id # pylint:disable=protected-access + return os.path.join(filepath, 'temp_worker_{:03d}'.format(task_id)) + + +def clean_temporary_directories(strategy, filepath): + """Temporary directory clean up for MultiWorker Mirrored Strategy. + + This is needed for all non-chief workers. + + Args: + strategy: A tf.distribute.Strategy object. + filepath: The filepath for the temporary directory. + """ + if not strategy.extended.should_checkpoint: + if tf.io.gfile.exists(filepath) and tf.io.gfile.isdir(filepath): + tf.io.gfile.rmtree(filepath) + + +def train_loop( + hparams, + pipeline_config_path, + model_dir, + config_override=None, + train_steps=None, + use_tpu=False, + save_final_config=False, + export_to_tpu=None, + checkpoint_every_n=1000, + checkpoint_max_to_keep=7, + **kwargs): + """Trains a model using eager + functions. + + This method: + 1. Processes the pipeline configs + 2. (Optionally) saves the as-run config + 3. Builds the model & optimizer + 4. Gets the training input data + 5. Loads a fine-tuning detection or classification checkpoint if requested + 6. Loops over the train data, executing distributed training steps inside + tf.functions. + 7. Checkpoints the model every `checkpoint_every_n` training steps. + 8. Logs the training metrics as TensorBoard summaries. + + Args: + hparams: A `HParams`. + pipeline_config_path: A path to a pipeline config file. + model_dir: + The directory to save checkpoints and summaries to. + config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to + override the config from `pipeline_config_path`. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + use_tpu: Boolean, whether training and evaluation should run on TPU. + save_final_config: Whether to save final config (obtained after applying + overrides) to `model_dir`. + export_to_tpu: When use_tpu and export_to_tpu are true, + `export_savedmodel()` exports a metagraph for serving on TPU besides the + one on CPU. If export_to_tpu is not provided, we will look for it in + hparams too. + checkpoint_every_n: + Checkpoint every n training steps. + checkpoint_max_to_keep: + int, the number of most recent checkpoints to keep in the model directory. + **kwargs: Additional keyword arguments for configuration override. + """ + ## Parse the configs + get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ + 'get_configs_from_pipeline_file'] + merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ + 'merge_external_params_with_configs'] + create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ + 'create_pipeline_proto_from_configs'] + + configs = get_configs_from_pipeline_file( + pipeline_config_path, config_override=config_override) + kwargs.update({ + 'train_steps': train_steps, + 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu + }) + configs = merge_external_params_with_configs( + configs, hparams, kwargs_dict=kwargs) + model_config = configs['model'] + train_config = configs['train_config'] + train_input_config = configs['train_input_config'] + + unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors + add_regularization_loss = train_config.add_regularization_loss + clip_gradients_value = None + if train_config.gradient_clipping_by_norm > 0: + clip_gradients_value = train_config.gradient_clipping_by_norm + + # update train_steps from config but only when non-zero value is provided + if train_steps is None and train_config.num_steps != 0: + train_steps = train_config.num_steps + + # Read export_to_tpu from hparams if not passed. + if export_to_tpu is None: + export_to_tpu = hparams.get('export_to_tpu', False) + tf.logging.info( + 'train_loop: use_tpu %s, export_to_tpu %s', use_tpu, + export_to_tpu) + + if kwargs['use_bfloat16']: + tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16') + + # Parse the checkpoint fine tuning configs + if hparams.load_pretrained: + fine_tune_checkpoint_path = train_config.fine_tune_checkpoint + else: + fine_tune_checkpoint_path = None + load_all_detection_checkpoint_vars = ( + train_config.load_all_detection_checkpoint_vars) + # TODO(kaftan) (or anyone else): move this piece of config munging to + ## utils/config_util.py + if not train_config.fine_tune_checkpoint_type: + # train_config.from_detection_checkpoint field is deprecated. For + # backward compatibility, set train_config.fine_tune_checkpoint_type + # based on train_config.from_detection_checkpoint. + if train_config.from_detection_checkpoint: + train_config.fine_tune_checkpoint_type = 'detection' + else: + train_config.fine_tune_checkpoint_type = 'classification' + fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type + fine_tune_checkpoint_version = train_config.fine_tune_checkpoint_version + + # Write the as-run pipeline config to disk. + if save_final_config: + pipeline_config_final = create_pipeline_proto_from_configs(configs) + config_util.save_pipeline_config(pipeline_config_final, model_dir) + + # Build the model, optimizer, and training input + strategy = tf.compat.v2.distribute.get_strategy() + with strategy.scope(): + detection_model = model_builder.build( + model_config=model_config, is_training=True) + + def train_dataset_fn(input_context): + """Callable to create train input.""" + # Create the inputs. + train_input = inputs.train_input( + train_config=train_config, + train_input_config=train_input_config, + model_config=model_config, + model=detection_model, + input_context=input_context) + train_input = train_input.repeat() + return train_input + + train_input = strategy.experimental_distribute_datasets_from_function( + train_dataset_fn) + + + global_step = tf.Variable( + 0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step', + aggregation=tf.compat.v2.VariableAggregation.ONLY_FIRST_REPLICA) + optimizer, (learning_rate,) = optimizer_builder.build( + train_config.optimizer, global_step=global_step) + + if callable(learning_rate): + learning_rate_fn = learning_rate + else: + learning_rate_fn = lambda: learning_rate + + ## Train the model + # Get the appropriate filepath (temporary or not) based on whether the worker + # is the chief. + summary_writer_filepath = get_filepath(strategy, + os.path.join(model_dir, 'train')) + summary_writer = tf.compat.v2.summary.create_file_writer( + summary_writer_filepath) + + if use_tpu: + num_steps_per_iteration = 100 + else: + # TODO(b/135933080) Explore setting to 100 when GPU performance issues + # are fixed. + num_steps_per_iteration = 1 + + with summary_writer.as_default(): + with strategy.scope(): + with tf.compat.v2.summary.record_if( + lambda: global_step % num_steps_per_iteration == 0): + # Load a fine-tuning checkpoint. + if fine_tune_checkpoint_path: + load_fine_tune_checkpoint(detection_model, fine_tune_checkpoint_path, + fine_tune_checkpoint_type, + fine_tune_checkpoint_version, + load_all_detection_checkpoint_vars, + train_input, + unpad_groundtruth_tensors) + + ckpt = tf.compat.v2.train.Checkpoint( + step=global_step, model=detection_model, optimizer=optimizer) + + manager_dir = get_filepath(strategy, model_dir) + if not strategy.extended.should_checkpoint: + checkpoint_max_to_keep = 1 + manager = tf.compat.v2.train.CheckpointManager( + ckpt, manager_dir, max_to_keep=checkpoint_max_to_keep) + + # We use the following instead of manager.latest_checkpoint because + # manager_dir does not point to the model directory when we are running + # in a worker. + latest_checkpoint = tf.train.latest_checkpoint(model_dir) + ckpt.restore(latest_checkpoint) + + def train_step_fn(features, labels): + """Single train step.""" + loss = eager_train_step( + detection_model, + features, + labels, + unpad_groundtruth_tensors, + optimizer, + learning_rate=learning_rate_fn(), + add_regularization_loss=add_regularization_loss, + clip_gradients_value=clip_gradients_value, + global_step=global_step, + num_replicas=strategy.num_replicas_in_sync) + global_step.assign_add(1) + return loss + + def _sample_and_train(strategy, train_step_fn, data_iterator): + features, labels = data_iterator.next() + per_replica_losses = strategy.run( + train_step_fn, args=(features, labels)) + # TODO(anjalisridhar): explore if it is safe to remove the + ## num_replicas scaling of the loss and switch this to a ReduceOp.Mean + return strategy.reduce(tf.distribute.ReduceOp.SUM, + per_replica_losses, axis=None) + + @tf.function + def _dist_train_step(data_iterator): + """A distributed train step.""" + + if num_steps_per_iteration > 1: + for _ in tf.range(num_steps_per_iteration - 1): + _sample_and_train(strategy, train_step_fn, data_iterator) + + return _sample_and_train(strategy, train_step_fn, data_iterator) + + train_input_iter = iter(train_input) + + if int(global_step.value()) == 0: + manager.save() + + checkpointed_step = int(global_step.value()) + logged_step = global_step.value() + + last_step_time = time.time() + for _ in range(global_step.value(), train_steps, + num_steps_per_iteration): + + loss = _dist_train_step(train_input_iter) + + time_taken = time.time() - last_step_time + last_step_time = time.time() + + tf.compat.v2.summary.scalar( + 'steps_per_sec', num_steps_per_iteration * 1.0 / time_taken, + step=global_step) + + if global_step.value() - logged_step >= 100: + tf.logging.info( + 'Step {} per-step time {:.3f}s loss={:.3f}'.format( + global_step.value(), time_taken / num_steps_per_iteration, + loss)) + logged_step = global_step.value() + + if ((int(global_step.value()) - checkpointed_step) >= + checkpoint_every_n): + manager.save() + checkpointed_step = int(global_step.value()) + + # Remove the checkpoint directories of the non-chief workers that + # MultiWorkerMirroredStrategy forces us to save during sync distributed + # training. + clean_temporary_directories(strategy, manager_dir) + clean_temporary_directories(strategy, summary_writer_filepath) + + +def eager_eval_loop( + detection_model, + configs, + eval_dataset, + use_tpu=False, + postprocess_on_cpu=False, + global_step=None): + """Evaluate the model eagerly on the evaluation dataset. + + This method will compute the evaluation metrics specified in the configs on + the entire evaluation dataset, then return the metrics. It will also log + the metrics to TensorBoard. + + Args: + detection_model: A DetectionModel (based on Keras) to evaluate. + configs: Object detection configs that specify the evaluators that should + be used, as well as whether regularization loss should be included and + if bfloat16 should be used on TPUs. + eval_dataset: Dataset containing evaluation data. + use_tpu: Whether a TPU is being used to execute the model for evaluation. + postprocess_on_cpu: Whether model postprocessing should happen on + the CPU when using a TPU to execute the model. + global_step: A variable containing the training step this model was trained + to. Used for logging purposes. + + Returns: + A dict of evaluation metrics representing the results of this evaluation. + """ + train_config = configs['train_config'] + eval_input_config = configs['eval_input_config'] + eval_config = configs['eval_config'] + add_regularization_loss = train_config.add_regularization_loss + + is_training = False + detection_model._is_training = is_training # pylint: disable=protected-access + tf.keras.backend.set_learning_phase(is_training) + + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + + class_agnostic_category_index = ( + label_map_util.create_class_agnostic_category_index()) + class_agnostic_evaluators = eval_util.get_evaluators( + eval_config, + list(class_agnostic_category_index.values()), + evaluator_options) + + class_aware_evaluators = None + if eval_input_config.label_map_path: + class_aware_category_index = ( + label_map_util.create_category_index_from_labelmap( + eval_input_config.label_map_path)) + class_aware_evaluators = eval_util.get_evaluators( + eval_config, + list(class_aware_category_index.values()), + evaluator_options) + + evaluators = None + loss_metrics = {} + + @tf.function + def compute_eval_dict(features, labels): + """Compute the evaluation result on an image.""" + # For evaling on train data, it is necessary to check whether groundtruth + # must be unpadded. + boxes_shape = ( + labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list()) + unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu + labels = model_lib.unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + losses_dict, prediction_dict = _compute_losses_and_predictions_dicts( + detection_model, features, labels, add_regularization_loss) + + def postprocess_wrapper(args): + return detection_model.postprocess(args[0], args[1]) + + # TODO(kaftan): Depending on how postprocessing will work for TPUS w/ + ## TPUStrategy, may be good to move wrapping to a utility method + if use_tpu and postprocess_on_cpu: + detections = contrib_tpu.outside_compilation( + postprocess_wrapper, + (prediction_dict, features[fields.InputDataFields.true_image_shape])) + else: + detections = postprocess_wrapper( + (prediction_dict, features[fields.InputDataFields.true_image_shape])) + + class_agnostic = ( + fields.DetectionResultFields.detection_classes not in detections) + # TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util + ## and call this from there. + groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access + detection_model, class_agnostic, eval_input_config.max_number_of_boxes) + use_original_images = fields.InputDataFields.original_image in features + if use_original_images: + eval_images = features[fields.InputDataFields.original_image] + true_image_shapes = tf.slice( + features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3]) + original_image_spatial_shapes = features[ + fields.InputDataFields.original_image_spatial_shape] + else: + eval_images = features[fields.InputDataFields.image] + true_image_shapes = None + original_image_spatial_shapes = None + + eval_dict = eval_util.result_dict_for_batched_example( + eval_images, + features[inputs.HASH_KEY], + detections, + groundtruth, + class_agnostic=class_agnostic, + scale_to_absolute=True, + original_image_spatial_shapes=original_image_spatial_shapes, + true_image_shapes=true_image_shapes) + + return eval_dict, losses_dict, class_agnostic + + for i, (features, labels) in enumerate(eval_dataset): + eval_dict, losses_dict, class_agnostic = compute_eval_dict(features, labels) + + if i % 100 == 0: + tf.logging.info('Finished eval step %d', i) + + use_original_images = fields.InputDataFields.original_image in features + if not use_tpu and use_original_images: + # Summary for input images. + tf.compat.v2.summary.image( + name='eval_input_images', + step=global_step, + data=eval_dict['original_image'], + max_outputs=1) + # Summary for prediction/groundtruth side-by-side images. + if class_agnostic: + category_index = label_map_util.create_class_agnostic_category_index() + else: + category_index = label_map_util.create_category_index_from_labelmap( + eval_input_config.label_map_path) + keypoint_edges = [ + (kp.start, kp.end) for kp in eval_config.keypoint_edge] + sbys_image_list = vutils.draw_side_by_side_evaluation_image( + eval_dict, + category_index=category_index, + max_boxes_to_draw=eval_config.max_num_boxes_to_visualize, + min_score_thresh=eval_config.min_score_threshold, + use_normalized_coordinates=False, + keypoint_edges=keypoint_edges or None) + sbys_images = tf.concat(sbys_image_list, axis=0) + tf.compat.v2.summary.image( + name='eval_side_by_side', + step=global_step, + data=sbys_images, + max_outputs=eval_config.num_visualizations) + + if evaluators is None: + if class_agnostic: + evaluators = class_agnostic_evaluators + else: + evaluators = class_aware_evaluators + + for evaluator in evaluators: + evaluator.add_eval_dict(eval_dict) + + for loss_key, loss_tensor in iter(losses_dict.items()): + if loss_key not in loss_metrics: + loss_metrics[loss_key] = tf.keras.metrics.Mean() + # Skip the loss with value equal or lower than 0.0 when calculating the + # average loss since they don't usually reflect the normal loss values + # causing spurious average loss value. + if loss_tensor <= 0.0: + continue + loss_metrics[loss_key].update_state(loss_tensor) + + eval_metrics = {} + + for evaluator in evaluators: + eval_metrics.update(evaluator.evaluate()) + for loss_key in loss_metrics: + eval_metrics[loss_key] = loss_metrics[loss_key].result() + + eval_metrics = {str(k): v for k, v in eval_metrics.items()} + for k in eval_metrics: + tf.compat.v2.summary.scalar(k, eval_metrics[k], step=global_step) + + return eval_metrics + + +def eval_continuously( + hparams, + pipeline_config_path, + config_override=None, + train_steps=None, + sample_1_of_n_eval_examples=1, + sample_1_of_n_eval_on_train_examples=1, + use_tpu=False, + override_eval_num_epochs=True, + postprocess_on_cpu=False, + export_to_tpu=None, + model_dir=None, + checkpoint_dir=None, + wait_interval=180, + timeout=3600, + **kwargs): + """Run continuous evaluation of a detection model eagerly. + + This method builds the model, and continously restores it from the most + recent training checkpoint in the checkpoint directory & evaluates it + on the evaluation data. + + Args: + hparams: A `HParams`. + pipeline_config_path: A path to a pipeline config file. + config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to + override the config from `pipeline_config_path`. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + sample_1_of_n_eval_examples: Integer representing how often an eval example + should be sampled. If 1, will sample all examples. + sample_1_of_n_eval_on_train_examples: Similar to + `sample_1_of_n_eval_examples`, except controls the sampling of training + data for evaluation. + use_tpu: Boolean, whether training and evaluation should run on TPU. + override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for + eval_input. + postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true, + postprocess is scheduled on the host cpu. + export_to_tpu: When use_tpu and export_to_tpu are true, + `export_savedmodel()` exports a metagraph for serving on TPU besides the + one on CPU. If export_to_tpu is not provided, we will look for it in + hparams too. + model_dir: Directory to output resulting evaluation summaries to. + checkpoint_dir: Directory that contains the training checkpoints. + wait_interval: The mimmum number of seconds to wait before checking for a + new checkpoint. + timeout: The maximum number of seconds to wait for a checkpoint. Execution + will terminate if no new checkpoints are found after these many seconds. + + **kwargs: Additional keyword arguments for configuration override. + """ + get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ + 'get_configs_from_pipeline_file'] + merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ + 'merge_external_params_with_configs'] + + configs = get_configs_from_pipeline_file( + pipeline_config_path, config_override=config_override) + kwargs.update({ + 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples, + 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu + }) + if train_steps is not None: + kwargs['train_steps'] = train_steps + if override_eval_num_epochs: + kwargs.update({'eval_num_epochs': 1}) + tf.logging.warning( + 'Forced number of epochs for all eval validations to be 1.') + configs = merge_external_params_with_configs( + configs, hparams, kwargs_dict=kwargs) + model_config = configs['model'] + train_input_config = configs['train_input_config'] + eval_config = configs['eval_config'] + eval_input_configs = configs['eval_input_configs'] + eval_on_train_input_config = copy.deepcopy(train_input_config) + eval_on_train_input_config.sample_1_of_n_examples = ( + sample_1_of_n_eval_on_train_examples) + if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1: + tf.logging.warning('Expected number of evaluation epochs is 1, but ' + 'instead encountered `eval_on_train_input_config' + '.num_epochs` = ' + '{}. Overwriting `num_epochs` to 1.'.format( + eval_on_train_input_config.num_epochs)) + eval_on_train_input_config.num_epochs = 1 + + if kwargs['use_bfloat16']: + tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16') + + detection_model = model_builder.build( + model_config=model_config, is_training=True) + + # Create the inputs. + eval_inputs = [] + for eval_input_config in eval_input_configs: + next_eval_input = inputs.eval_input( + eval_config=eval_config, + eval_input_config=eval_input_config, + model_config=model_config, + model=detection_model) + eval_inputs.append((eval_input_config.name, next_eval_input)) + + # Read export_to_tpu from hparams if not passed. + if export_to_tpu is None: + export_to_tpu = hparams.get('export_to_tpu', False) + tf.logging.info('eval_continuously: use_tpu %s, export_to_tpu %s', + use_tpu, export_to_tpu) + + global_step = tf.compat.v2.Variable( + 0, trainable=False, dtype=tf.compat.v2.dtypes.int64) + + for latest_checkpoint in tf.train.checkpoints_iterator( + checkpoint_dir, timeout=timeout, min_interval_secs=wait_interval): + ckpt = tf.compat.v2.train.Checkpoint( + step=global_step, model=detection_model) + + ckpt.restore(latest_checkpoint).expect_partial() + + for eval_name, eval_input in eval_inputs: + summary_writer = tf.compat.v2.summary.create_file_writer( + model_dir + '/eval' + eval_name) + with summary_writer.as_default(): + eager_eval_loop( + detection_model, + configs, + eval_input, + use_tpu=use_tpu, + postprocess_on_cpu=postprocess_on_cpu, + global_step=global_step) diff --git a/models/research/object_detection/model_main.py b/models/research/object_detection/model_main.py new file mode 100644 index 0000000000000000000000000000000000000000..2636ad4bdf0e7a950af387cf97c8e53684efcb52 --- /dev/null +++ b/models/research/object_detection/model_main.py @@ -0,0 +1,114 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Binary to run train and evaluation on object detection model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags + +import tensorflow.compat.v1 as tf + +from object_detection import model_hparams +from object_detection import model_lib + +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job. Note ' + 'that one call only use this in eval-only mode, and ' + '`checkpoint_dir` must be supplied.') +flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'hparams_overrides', None, 'Hyperparameter overrides, ' + 'represented as a string containing comma-separated ' + 'hparam_name=value pairs.') +flags.DEFINE_string( + 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' + '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' + 'writing resulting metrics to `model_dir`.') +flags.DEFINE_boolean( + 'run_once', False, 'If running in eval-only mode, whether to run just ' + 'one round of eval vs running continuously (default).' +) +flags.DEFINE_integer( + 'max_eval_retries', 0, 'If running continuous eval, the maximum number of ' + 'retries upon encountering tf.errors.InvalidArgumentError. If negative, ' + 'will always retry the evaluation.' +) +FLAGS = flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir) + + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config=config, + hparams=model_hparams.create_hparams(FLAGS.hparams_overrides), + pipeline_config_path=FLAGS.pipeline_config_path, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples)) + estimator = train_and_eval_dict['estimator'] + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] + predict_input_fn = train_and_eval_dict['predict_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + if FLAGS.checkpoint_dir: + if FLAGS.eval_training_data: + name = 'training_data' + input_fn = eval_on_train_input_fn + else: + name = 'validation_data' + # The first eval input will be evaluated. + input_fn = eval_input_fns[0] + if FLAGS.run_once: + estimator.evaluate(input_fn, + steps=None, + checkpoint_path=tf.train.latest_checkpoint( + FLAGS.checkpoint_dir)) + else: + model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn, + train_steps, name, FLAGS.max_eval_retries) + else: + train_spec, eval_specs = model_lib.create_train_and_eval_specs( + train_input_fn, + eval_input_fns, + eval_on_train_input_fn, + predict_input_fn, + train_steps, + eval_on_train_data=False) + + # Currently only a single Eval Spec is allowed. + tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0]) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/model_main_tf2.py b/models/research/object_detection/model_main_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..f6832ba8425b8939b815ccc880682267e640b354 --- /dev/null +++ b/models/research/object_detection/model_main_tf2.py @@ -0,0 +1,112 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Creates and runs TF2 object detection models. + +################################## +NOTE: This module has not been fully tested; please bear with us while we iron +out the kinks. +################################## + +When a TPU device is available, this binary uses TPUStrategy. Otherwise, it uses +GPUS with MirroredStrategy/MultiWorkerMirroredStrategy. + +For local training/evaluation run: +PIPELINE_CONFIG_PATH=path/to/pipeline.config +MODEL_DIR=/tmp/model_outputs +NUM_TRAIN_STEPS=10000 +SAMPLE_1_OF_N_EVAL_EXAMPLES=1 +python model_main_tf2.py -- \ + --model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \ + --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ + --pipeline_config_path=$PIPELINE_CONFIG_PATH \ + --alsologtostderr +""" +from absl import flags +import tensorflow.compat.v2 as tf +from object_detection import model_hparams +from object_detection import model_lib_v2 + +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train ' + 'data (only supported in distributed training).') +flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'hparams_overrides', None, 'Hyperparameter overrides, ' + 'represented as a string containing comma-separated ' + 'hparam_name=value pairs.') +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string( + 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' + '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' + 'writing resulting metrics to `model_dir`.') + +flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an' + 'evaluation checkpoint before exiting.') +flags.DEFINE_integer( + 'num_workers', 1, 'When num_workers > 1, training uses ' + 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses ' + 'MirroredStrategy.') + +FLAGS = flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + tf.config.set_soft_device_placement(True) + + if FLAGS.checkpoint_dir: + model_lib_v2.eval_continuously( + hparams=model_hparams.create_hparams(FLAGS.hparams_overrides), + pipeline_config_path=FLAGS.pipeline_config_path, + model_dir=FLAGS.model_dir, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples), + checkpoint_dir=FLAGS.checkpoint_dir, + wait_interval=300, timeout=FLAGS.eval_timeout) + else: + if tf.config.get_visible_devices('TPU'): + resolver = tf.distribute.cluster_resolver.TPUClusterResolver() + tf.config.experimental_connect_to_cluster(resolver) + tf.tpu.experimental.initialize_tpu_system(resolver) + strategy = tf.distribute.experimental.TPUStrategy(resolver) + elif FLAGS.num_workers > 1: + strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() + else: + strategy = tf.compat.v2.distribute.MirroredStrategy() + + with strategy.scope(): + model_lib_v2.train_loop( + hparams=model_hparams.create_hparams(FLAGS.hparams_overrides), + pipeline_config_path=FLAGS.pipeline_config_path, + model_dir=FLAGS.model_dir, + train_steps=FLAGS.num_train_steps, + use_tpu=FLAGS.use_tpu) + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/model_tpu_main.py b/models/research/object_detection/model_tpu_main.py new file mode 100644 index 0000000000000000000000000000000000000000..a1229eb10f81d56a7014f89c9297c2acdb01b8c4 --- /dev/null +++ b/models/research/object_detection/model_tpu_main.py @@ -0,0 +1,154 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Creates and runs `Estimator` for object detection model on TPUs. + +This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes. +""" +# pylint: enable=line-too-long + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +import tensorflow.compat.v1 as tf + + +from object_detection import model_hparams +from object_detection import model_lib + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver + from tensorflow.contrib import tpu as contrib_tpu +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs') + +# Cloud TPU Cluster Resolvers +flags.DEFINE_string( + 'gcp_project', + default=None, + help='Project name for the Cloud TPU-enabled project. If not specified, we ' + 'will attempt to automatically detect the GCE project from metadata.') +flags.DEFINE_string( + 'tpu_zone', + default=None, + help='GCE zone where the Cloud TPU is located in. If not specified, we ' + 'will attempt to automatically detect the GCE project from metadata.') +flags.DEFINE_string( + 'tpu_name', + default=None, + help='Name of the Cloud TPU for Cluster Resolvers.') + +flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).') +flags.DEFINE_integer('iterations_per_loop', 100, + 'Number of iterations per TPU training loop.') +# For mode=train_and_eval, evaluation occurs after training is finished. +# Note: independently of steps_per_checkpoint, estimator will save the most +# recent checkpoint every 10 minutes by default for train_and_eval +flags.DEFINE_string('mode', 'train', + 'Mode to run: train, eval') +flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If ' + 'this is not provided, batch size is read from training ' + 'config.') + +flags.DEFINE_string( + 'hparams_overrides', None, 'Comma-separated list of ' + 'hyperparameters to override defaults.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job.') +flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer( + 'max_eval_retries', 0, 'If running continuous eval, the maximum number of ' + 'retries upon encountering tf.errors.InvalidArgumentError. If negative, ' + 'will always retry the evaluation.' +) + +FLAGS = tf.flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + + tpu_cluster_resolver = ( + contrib_cluster_resolver.TPUClusterResolver( + tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)) + tpu_grpc_url = tpu_cluster_resolver.get_master() + + config = contrib_tpu.RunConfig( + master=tpu_grpc_url, + evaluation_master=tpu_grpc_url, + model_dir=FLAGS.model_dir, + tpu_config=contrib_tpu.TPUConfig( + iterations_per_loop=FLAGS.iterations_per_loop, + num_shards=FLAGS.num_shards)) + + kwargs = {} + if FLAGS.train_batch_size: + kwargs['batch_size'] = FLAGS.train_batch_size + + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config=config, + hparams=model_hparams.create_hparams(FLAGS.hparams_overrides), + pipeline_config_path=FLAGS.pipeline_config_path, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples), + use_tpu_estimator=True, + use_tpu=FLAGS.use_tpu, + num_shards=FLAGS.num_shards, + save_final_config=FLAGS.mode == 'train', + **kwargs) + estimator = train_and_eval_dict['estimator'] + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + if FLAGS.mode == 'train': + estimator.train(input_fn=train_input_fn, max_steps=train_steps) + + # Continuously evaluating. + if FLAGS.mode == 'eval': + if FLAGS.eval_training_data: + name = 'training_data' + input_fn = eval_on_train_input_fn + else: + name = 'validation_data' + # Currently only a single eval input is allowed. + input_fn = eval_input_fns[0] + model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps, + name, FLAGS.max_eval_retries) + + +if __name__ == '__main__': + tf.app.run() diff --git a/models/research/object_detection/models/__init__.py b/models/research/object_detection/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/models/center_net_hourglass_feature_extractor.py b/models/research/object_detection/models/center_net_hourglass_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..4761915aa5ad0023673199f2083ff355816f7bb1 --- /dev/null +++ b/models/research/object_detection/models/center_net_hourglass_feature_extractor.py @@ -0,0 +1,75 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hourglass[1] feature extractor for CenterNet[2] meta architecture. + +[1]: https://arxiv.org/abs/1603.06937 +[2]: https://arxiv.org/abs/1904.07850 +""" + +from object_detection.meta_architectures import center_net_meta_arch +from object_detection.models.keras_models import hourglass_network + + +class CenterNetHourglassFeatureExtractor( + center_net_meta_arch.CenterNetFeatureExtractor): + """The hourglass feature extractor for CenterNet. + + This class is a thin wrapper around the HourglassFeatureExtractor class + along with some preprocessing methods inherited from the base class. + """ + + def __init__(self, hourglass_net, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Intializes the feature extractor. + + Args: + hourglass_net: The underlying hourglass network to use. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + """ + + super(CenterNetHourglassFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + self._network = hourglass_net + + def call(self, inputs): + return self._network(inputs) + + @property + def out_stride(self): + """The stride in the output image of the network.""" + return 4 + + @property + def num_feature_outputs(self): + """Ther number of feature outputs returned by the feature extractor.""" + return self._network.num_hourglasses + + def get_model(self): + return self._network + + +def hourglass_104(channel_means, channel_stds, bgr_ordering): + """The Hourglass-104 backbone for CenterNet.""" + + network = hourglass_network.hourglass_104() + return CenterNetHourglassFeatureExtractor( + network, channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/models/research/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py b/models/research/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..19d5cbe9843ff03d6d1499a02980a067dc305579 --- /dev/null +++ b/models/research/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py @@ -0,0 +1,44 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing hourglass feature extractor for CenterNet.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_hourglass_feature_extractor as hourglass +from object_detection.models.keras_models import hourglass_network +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetHourglassFeatureExtractorTest(test_case.TestCase): + + def test_center_net_hourglass_feature_extractor(self): + + net = hourglass_network.HourglassNetwork( + num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], + channel_dims=[4, 6, 8, 10, 12, 14], num_hourglasses=2) + + model = hourglass.CenterNetHourglassFeatureExtractor(net) + def graph_fn(): + return model(tf.zeros((2, 64, 64, 3), dtype=np.float32)) + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs[0].shape, (2, 16, 16, 6)) + self.assertEqual(outputs[1].shape, (2, 16, 16, 6)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/center_net_resnet_feature_extractor.py b/models/research/object_detection/models/center_net_resnet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..477fa4c50ea9e0bc62b43a75c1674acfef7a183c --- /dev/null +++ b/models/research/object_detection/models/center_net_resnet_feature_extractor.py @@ -0,0 +1,149 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Resnetv2 based feature extractors for CenterNet[1] meta architecture. + +[1]: https://arxiv.org/abs/1904.07850 +""" + + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor + + +class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor): + """Resnet v2 base feature extractor for the CenterNet model.""" + + def __init__(self, resnet_type, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Initializes the feature extractor with a specific ResNet architecture. + + Args: + resnet_type: A string specifying which kind of ResNet to use. Currently + only `resnet_v2_50` and `resnet_v2_101` are supported. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + + """ + + super(CenterNetResnetFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + if resnet_type == 'resnet_v2_101': + self._base_model = tf.keras.applications.ResNet101V2(weights=None) + output_layer = 'conv5_block3_out' + elif resnet_type == 'resnet_v2_50': + self._base_model = tf.keras.applications.ResNet50V2(weights=None) + output_layer = 'conv5_block3_out' + else: + raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) + output_layer = self._base_model.get_layer(output_layer) + + self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, + outputs=output_layer.output) + resnet_output = self._resnet_model(self._base_model.input) + + for num_filters in [256, 128, 64]: + # TODO(vighneshb) This section has a few differences from the paper + # Figure out how much of a performance impact they have. + + # 1. We use a simple convolution instead of a deformable convolution + conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3, + strides=1, padding='same') + resnet_output = conv(resnet_output) + resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) + resnet_output = tf.keras.layers.ReLU()(resnet_output) + + # 2. We use the default initialization for the convolution layers + # instead of initializing it to do bilinear upsampling. + conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters, + kernel_size=3, strides=2, + padding='same') + resnet_output = conv_transpose(resnet_output) + resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) + resnet_output = tf.keras.layers.ReLU()(resnet_output) + + self._feature_extractor_model = tf.keras.models.Model( + inputs=self._base_model.input, outputs=resnet_output) + + def preprocess(self, resized_inputs): + """Preprocess input images for the ResNet model. + + This scales images in the range [0, 255] to the range [-1, 1] + + Args: + resized_inputs: a [batch, height, width, channels] float32 tensor. + + Returns: + outputs: a [batch, height, width, channels] float32 tensor. + + """ + resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess( + resized_inputs) + return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs) + + def load_feature_extractor_weights(self, path): + self._base_model.load_weights(path) + + def get_base_model(self): + """Get base resnet model for inspection and testing.""" + return self._base_model + + def call(self, inputs): + """Returns image features extracted by the backbone. + + Args: + inputs: An image tensor of shape [batch_size, input_height, + input_width, 3] + + Returns: + features_list: A list of length 1 containing a tensor of shape + [batch_size, input_height // 4, input_width // 4, 64] containing + the features extracted by the ResNet. + """ + return [self._feature_extractor_model(inputs)] + + @property + def num_feature_outputs(self): + return 1 + + @property + def out_stride(self): + return 4 + + +def resnet_v2_101(channel_means, channel_stds, bgr_ordering): + """The ResNet v2 101 feature extractor.""" + + return CenterNetResnetFeatureExtractor( + resnet_type='resnet_v2_101', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering + ) + + +def resnet_v2_50(channel_means, channel_stds, bgr_ordering): + """The ResNet v2 50 feature extractor.""" + + return CenterNetResnetFeatureExtractor( + resnet_type='resnet_v2_50', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/models/research/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py b/models/research/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3429c0442053982d3d3d9502508ede3177cbf102 --- /dev/null +++ b/models/research/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py @@ -0,0 +1,54 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing ResNet v2 models for the CenterNet meta architecture.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_resnet_feature_extractor +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetResnetFeatureExtractorTest(test_case.TestCase): + + def test_output_size(self): + """Verify that shape of features returned by the backbone is correct.""" + + model = center_net_resnet_feature_extractor.\ + CenterNetResnetFeatureExtractor('resnet_v2_101') + def graph_fn(): + img = np.zeros((8, 224, 224, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs.shape, (8, 56, 56, 64)) + + def test_output_size_resnet50(self): + """Verify that shape of features returned by the backbone is correct.""" + + model = center_net_resnet_feature_extractor.\ + CenterNetResnetFeatureExtractor('resnet_v2_50') + def graph_fn(): + img = np.zeros((8, 224, 224, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs.shape, (8, 56, 56, 64)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py b/models/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..842e9cf1b2e5393a6bc87df3989f173d0409de70 --- /dev/null +++ b/models/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py @@ -0,0 +1,176 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Resnetv1 FPN [1] based feature extractors for CenterNet[2] meta architecture. + + +[1]: https://arxiv.org/abs/1612.03144. +[2]: https://arxiv.org/abs/1904.07850. +""" +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor + + +_RESNET_MODEL_OUTPUT_LAYERS = { + 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block23_out', 'conv5_block3_out'], +} + + +class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor): + """Resnet v1 FPN base feature extractor for the CenterNet model. + + This feature extractor uses residual skip connections and nearest neighbor + upsampling to produce an output feature map of stride 4, which has precise + localization information along with strong semantic information from the top + of the net. This design does not exactly follow the original FPN design, + specifically: + - Since only one output map is necessary for heatmap prediction (stride 4 + output), the top-down feature maps can have different numbers of channels. + Specifically, the top down feature maps have the following sizes: + [h/4, w/4, 64], [h/8, w/8, 128], [h/16, w/16, 256], [h/32, w/32, 256]. + - No additional coarse features are used after conv5_x. + """ + + def __init__(self, resnet_type, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Initializes the feature extractor with a specific ResNet architecture. + + Args: + resnet_type: A string specifying which kind of ResNet to use. Currently + only `resnet_v1_50` and `resnet_v1_101` are supported. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + + """ + + super(CenterNetResnetV1FpnFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + if resnet_type == 'resnet_v1_50': + self._base_model = tf.keras.applications.ResNet50(weights=None) + elif resnet_type == 'resnet_v1_101': + self._base_model = tf.keras.applications.ResNet101(weights=None) + else: + raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[resnet_type] + outputs = [self._base_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + + self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, + outputs=outputs) + resnet_outputs = self._resnet_model(self._base_model.input) + + # Construct the top-down feature maps. + top_layer = resnet_outputs[-1] + residual_op = tf.keras.layers.Conv2D(filters=256, kernel_size=1, + strides=1, padding='same') + top_down = residual_op(top_layer) + + num_filters_list = [256, 128, 64] + for i, num_filters in enumerate(num_filters_list): + level_ind = 2 - i + # Upsample. + upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest') + top_down = upsample_op(top_down) + + # Residual (skip-connection) from bottom-up pathway. + residual_op = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=1, + strides=1, padding='same') + residual = residual_op(resnet_outputs[level_ind]) + + # Merge. + top_down = top_down + residual + next_num_filters = num_filters_list[i+1] if i + 1 <= 2 else 64 + conv = tf.keras.layers.Conv2D(filters=next_num_filters, + kernel_size=3, strides=1, padding='same') + top_down = conv(top_down) + top_down = tf.keras.layers.BatchNormalization()(top_down) + top_down = tf.keras.layers.ReLU()(top_down) + + self._feature_extractor_model = tf.keras.models.Model( + inputs=self._base_model.input, outputs=top_down) + + def preprocess(self, resized_inputs): + """Preprocess input images for the ResNet model. + + This scales images in the range [0, 255] to the range [-1, 1] + + Args: + resized_inputs: a [batch, height, width, channels] float32 tensor. + + Returns: + outputs: a [batch, height, width, channels] float32 tensor. + + """ + resized_inputs = super( + CenterNetResnetV1FpnFeatureExtractor, self).preprocess(resized_inputs) + return tf.keras.applications.resnet.preprocess_input(resized_inputs) + + def load_feature_extractor_weights(self, path): + self._base_model.load_weights(path) + + def get_base_model(self): + """Get base resnet model for inspection and testing.""" + return self._base_model + + def call(self, inputs): + """Returns image features extracted by the backbone. + + Args: + inputs: An image tensor of shape [batch_size, input_height, + input_width, 3] + + Returns: + features_list: A list of length 1 containing a tensor of shape + [batch_size, input_height // 4, input_width // 4, 64] containing + the features extracted by the ResNet. + """ + return [self._feature_extractor_model(inputs)] + + @property + def num_feature_outputs(self): + return 1 + + @property + def out_stride(self): + return 4 + + +def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering): + """The ResNet v1 101 FPN feature extractor.""" + + return CenterNetResnetV1FpnFeatureExtractor( + resnet_type='resnet_v1_101', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering + ) + + +def resnet_v1_50_fpn(channel_means, channel_stds, bgr_ordering): + """The ResNet v1 50 FPN feature extractor.""" + + return CenterNetResnetV1FpnFeatureExtractor( + resnet_type='resnet_v1_50', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/models/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py b/models/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1524904f0a055e48342d09febdd7bd3ec6fb3c --- /dev/null +++ b/models/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,49 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing ResNet v1 FPN models for the CenterNet meta architecture.""" +import unittest +from absl.testing import parameterized + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_resnet_v1_fpn_feature_extractor +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetResnetV1FpnFeatureExtractorTest(test_case.TestCase, + parameterized.TestCase): + + @parameterized.parameters( + {'resnet_type': 'resnet_v1_50'}, + {'resnet_type': 'resnet_v1_101'}, + ) + def test_correct_output_size(self, resnet_type): + """Verify that shape of features returned by the backbone is correct.""" + + model = center_net_resnet_v1_fpn_feature_extractor.\ + CenterNetResnetV1FpnFeatureExtractor(resnet_type) + def graph_fn(): + img = np.zeros((8, 224, 224, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + + self.assertEqual(self.execute(graph_fn, []).shape, (8, 56, 56, 64)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py b/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..ac1886e025280165dfde4c6d4158fe8964cdc0eb --- /dev/null +++ b/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py @@ -0,0 +1,165 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Embedded-friendly SSDFeatureExtractor for MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from nets import mobilenet_v1 + + +class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """Embedded-friendly SSD Feature Extractor using MobilenetV1 features. + + This feature extractor is similar to SSD MobileNetV1 feature extractor, and + it fixes input resolution to be 256x256, reduces the number of feature maps + used for box prediction and ensures convolution kernel to be no larger + than input tensor in spatial dimensions. + + This feature extractor requires support of the following ops if used in + embedded devices: + - Conv + - DepthwiseConv + - Relu6 + + All conv/depthwiseconv use SAME padding, and no additional spatial padding is + needed. + """ + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """MobileNetV1 Feature Extractor for Embedded-friendly SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. For EmbeddedSSD it must be set to 1. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: upon invalid `pad_to_multiple` values. + """ + if pad_to_multiple != 1: + raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` ' + 'of 1.') + + super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + + Raises: + ValueError: if image height or width are not 256 pixels. + """ + image_shape = preprocessed_inputs.get_shape() + image_shape.assert_has_rank(4) + image_height = image_shape[1].value + image_width = image_shape[2].value + + if image_height is None or image_width is None: + shape_assert = tf.Assert( + tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256), + tf.equal(tf.shape(preprocessed_inputs)[2], 256)), + ['image size must be 256 in both height and width.']) + with tf.control_dependencies([shape_assert]): + preprocessed_inputs = tf.identity(preprocessed_inputs) + elif image_height != 256 or image_width != 256: + raise ValueError('image size must be = 256 in both height and width;' + ' image dim = %d,%d' % (image_height, image_width)) + + feature_map_layout = { + 'from_layer': [ + 'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '' + ], + 'layer_depth': [-1, -1, 512, 256, 256], + 'conv_kernel_size': [-1, -1, 3, 3, 2], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py b/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4a27e8c8d649c4cb9ae961bffafc7ad824b63b25 --- /dev/null +++ b/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,132 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for embedded_ssd_mobilenet_v1_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor +from object_detection.models import ssd_feature_extractor_test +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class EmbeddedSSDMobileNetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return (embedded_ssd_mobilenet_v1_feature_extractor. + EmbeddedSSDMobileNetV1FeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + override_base_feature_extractor_hyperparams=True)) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), + (2, 4, 4, 512), (2, 2, 2, 256), + (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), + (2, 4, 4, 512), (2, 2, 2, 256), + (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple_of_1( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), + (2, 4, 4, 512), (2, 2, 2, 256), + (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_pad_to_multiple_not_1(self): + depth_multiplier = 1.0 + pad_to_multiple = 2 + with self.assertRaises(ValueError): + _ = self._create_feature_extractor(depth_multiplier, pad_to_multiple) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..a94aa207b3a6fdbddc440ea4bac64a9ba9e5d8de --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py @@ -0,0 +1,212 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inception Resnet v2 Faster R-CNN implementation. + +See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on +Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) +as well as +"Speed/accuracy trade-offs for modern convolutional object detectors" by +Huang et al. (https://arxiv.org/abs/1611.10012) +""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import variables_helper +from nets import inception_resnet_v2 + + +class FasterRCNNInceptionResnetV2FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN with Inception Resnet v2 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Extracts features using the first half of the Inception Resnet v2 network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + + with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( + weight_decay=self._weight_decay)): + # Forces is_training to False to disable batch norm update. + with slim.arg_scope([slim.batch_norm], + is_training=self._train_batch_norm): + with tf.variable_scope('InceptionResnetV2', + reuse=self._reuse_weights) as scope: + return inception_resnet_v2.inception_resnet_v2_base( + preprocessed_inputs, final_endpoint='PreAuxLogits', + scope=scope, output_stride=self._first_stage_features_stride, + align_feature_maps=True) + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + This function reconstructs the "second half" of the Inception ResNet v2 + network after the part defined in `_extract_proposal_features`. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): + with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( + weight_decay=self._weight_decay)): + # Forces is_training to False to disable batch norm update. + with slim.arg_scope([slim.batch_norm], + is_training=self._train_batch_norm): + with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], + stride=1, padding='SAME'): + with tf.variable_scope('Mixed_7a'): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(proposal_feature_maps, + 256, 1, scope='Conv2d_0a_1x1') + tower_conv_1 = slim.conv2d( + tower_conv, 384, 3, stride=2, + padding='VALID', scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_1'): + tower_conv1 = slim.conv2d( + proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d( + tower_conv1, 288, 3, stride=2, + padding='VALID', scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_2'): + tower_conv2 = slim.conv2d( + proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') + tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, + scope='Conv2d_0b_3x3') + tower_conv2_2 = slim.conv2d( + tower_conv2_1, 320, 3, stride=2, + padding='VALID', scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_3'): + tower_pool = slim.max_pool2d( + proposal_feature_maps, 3, stride=2, padding='VALID', + scope='MaxPool_1a_3x3') + net = tf.concat( + [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) + net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) + net = inception_resnet_v2.block8(net, activation_fn=None) + proposal_classifier_features = slim.conv2d( + net, 1536, 1, scope='Conv2d_7b_1x1') + return proposal_classifier_features + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for + InceptionResnetV2 checkpoints. + + TODO(jonathanhuang,rathodv): revisit whether it's possible to force the + `Repeat` namescope as created in `_extract_box_classifier_features` to + start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can + be used. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith( + first_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + first_stage_feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + if variable.op.name.startswith( + second_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + second_stage_feature_extractor_scope + + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') + var_name = var_name.replace( + second_stage_feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2505fbfb3ad6e8621a3b2d05caba506b350f0f49 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py @@ -0,0 +1,111 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 19, 19, 1088]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 28, 28, 1088]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 1088]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [2, 8, 8, 1536]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9196871bd3acbdf5d2b8379b56e1a8778daf3065 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py @@ -0,0 +1,1093 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inception Resnet v2 Faster R-CNN implementation in Keras. + +See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on +Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) +as well as +"Speed/accuracy trade-offs for modern convolutional object detectors" by +Huang et al. (https://arxiv.org/abs/1611.10012) +""" + +# Skip pylint for this file because it times out +# pylint: skip-file + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.models.keras_models import inception_resnet_v2 +from object_detection.utils import model_util +from object_detection.utils import variables_helper + + +class FasterRCNNInceptionResnetV2KerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + super(FasterRCNNInceptionResnetV2KerasFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + weight_decay) + self._variable_dict = {} + + def preprocess(self, resized_inputs): + """Faster R-CNN with Inception Resnet v2 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def get_proposal_feature_extractor_model(self, name=None): + """Returns a model that extracts first stage RPN features. + + Extracts features using the first half of the Inception Resnet v2 network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes preprocessed_inputs: + A [batch, height, width, channels] float32 tensor + representing a batch of images. + + And returns rpn_feature_map: + A tensor with shape [batch, height, width, depth] + """ + with tf.name_scope(name): + with tf.name_scope('InceptionResnetV2'): + model = inception_resnet_v2.inception_resnet_v2( + self._train_batch_norm, + output_stride=self._first_stage_features_stride, + align_feature_maps=True, + weight_decay=self._weight_decay, + weights=None, + include_top=False) + proposal_features = model.get_layer( + name='block17_20_ac').output + keras_model = tf.keras.Model( + inputs=model.inputs, + outputs=proposal_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + def get_box_classifier_feature_extractor_model(self, name=None): + """Returns a model that extracts second stage box classifier features. + + This function reconstructs the "second half" of the Inception ResNet v2 + network after the part defined in `get_proposal_feature_extractor_model`. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes proposal_feature_maps: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + And returns proposal_classifier_features: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.name_scope(name): + with tf.name_scope('InceptionResnetV2'): + model = inception_resnet_v2.inception_resnet_v2( + self._train_batch_norm, + output_stride=16, + align_feature_maps=False, + weight_decay=self._weight_decay, + weights=None, + include_top=False) + + proposal_feature_maps = model.get_layer( + name='block17_20_ac').output + proposal_classifier_features = model.get_layer( + name='conv_7b_ac').output + + keras_model = model_util.extract_submodel( + model=model, + inputs=proposal_feature_maps, + outputs=proposal_classifier_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + This uses a hard-coded conversion to load into Keras from a slim-trained + inception_resnet_v2 checkpoint. + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor which does not work + for InceptionResnetV2 checkpoints. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + + keras_to_slim_name_mapping = { + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d/kernel': 'InceptionResnetV2/Conv2d_1a_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm/beta': 'InceptionResnetV2/Conv2d_1a_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm/moving_mean': 'InceptionResnetV2/Conv2d_1a_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm/moving_variance': 'InceptionResnetV2/Conv2d_1a_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_1/kernel': 'InceptionResnetV2/Conv2d_2a_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_1/beta': 'InceptionResnetV2/Conv2d_2a_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_1/moving_mean': 'InceptionResnetV2/Conv2d_2a_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_1/moving_variance': 'InceptionResnetV2/Conv2d_2a_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_2/kernel': 'InceptionResnetV2/Conv2d_2b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_2/beta': 'InceptionResnetV2/Conv2d_2b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_2/moving_mean': 'InceptionResnetV2/Conv2d_2b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_2/moving_variance': 'InceptionResnetV2/Conv2d_2b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_3/kernel': 'InceptionResnetV2/Conv2d_3b_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_3/beta': 'InceptionResnetV2/Conv2d_3b_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_3/moving_mean': 'InceptionResnetV2/Conv2d_3b_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_3/moving_variance': 'InceptionResnetV2/Conv2d_3b_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_4/kernel': 'InceptionResnetV2/Conv2d_4a_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_4/beta': 'InceptionResnetV2/Conv2d_4a_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_4/moving_mean': 'InceptionResnetV2/Conv2d_4a_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_4/moving_variance': 'InceptionResnetV2/Conv2d_4a_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_5/kernel': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_5/beta': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_5/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_5/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_6/kernel': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_6/beta': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_6/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_6/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_7/kernel': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_7/beta': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_7/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_7/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_8/kernel': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_8/beta': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_8/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_8/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_9/kernel': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_9/beta': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_9/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_9/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_10/kernel': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_10/beta': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_10/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_10/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_11/kernel': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_11/beta': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_11/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_11/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_12/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_12/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_12/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_12/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_13/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_13/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_13/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_13/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_14/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_14/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_14/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_14/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_15/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_15/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_15/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_15/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_16/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_16/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_16/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_16/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_17/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_17/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_17/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_17/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_1_conv/kernel': 'InceptionResnetV2/Repeat/block35_1/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_1_conv/bias': 'InceptionResnetV2/Repeat/block35_1/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_18/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_18/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_18/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_18/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_19/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_19/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_19/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_19/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_20/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_20/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_20/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_20/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_21/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_21/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_21/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_21/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_22/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_22/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_22/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_22/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_23/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_23/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_23/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_23/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_2_conv/kernel': 'InceptionResnetV2/Repeat/block35_2/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_2_conv/bias': 'InceptionResnetV2/Repeat/block35_2/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_24/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_24/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_24/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_24/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_25/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_25/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_25/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_25/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_26/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_26/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_26/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_26/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_27/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_27/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_27/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_27/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_28/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_28/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_28/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_28/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_29/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_29/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_29/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_29/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_3_conv/kernel': 'InceptionResnetV2/Repeat/block35_3/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_3_conv/bias': 'InceptionResnetV2/Repeat/block35_3/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_30/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_30/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_30/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_30/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_31/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_31/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_31/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_31/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_32/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_32/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_32/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_32/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_33/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_33/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_33/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_33/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_34/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_34/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_34/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_34/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_35/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_35/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_35/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_35/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_4_conv/kernel': 'InceptionResnetV2/Repeat/block35_4/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_4_conv/bias': 'InceptionResnetV2/Repeat/block35_4/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_36/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_36/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_36/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_36/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_37/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_37/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_37/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_37/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_38/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_38/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_38/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_38/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_39/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_39/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_39/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_39/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_40/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_40/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_40/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_40/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_41/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_41/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_41/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_41/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_5_conv/kernel': 'InceptionResnetV2/Repeat/block35_5/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_5_conv/bias': 'InceptionResnetV2/Repeat/block35_5/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_42/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_42/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_42/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_42/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_43/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_43/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_43/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_43/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_44/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_44/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_44/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_44/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_45/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_45/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_45/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_45/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_46/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_46/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_46/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_46/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_47/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_47/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_47/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_47/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_6_conv/kernel': 'InceptionResnetV2/Repeat/block35_6/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_6_conv/bias': 'InceptionResnetV2/Repeat/block35_6/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_48/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_48/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_48/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_48/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_49/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_49/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_49/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_49/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_50/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_50/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_50/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_50/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_51/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_51/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_51/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_51/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_52/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_52/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_52/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_52/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_53/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_53/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_53/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_53/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_7_conv/kernel': 'InceptionResnetV2/Repeat/block35_7/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_7_conv/bias': 'InceptionResnetV2/Repeat/block35_7/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_54/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_54/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_54/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_54/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_55/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_55/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_55/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_55/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_56/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_56/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_56/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_56/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_57/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_57/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_57/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_57/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_58/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_58/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_58/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_58/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_59/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_59/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_59/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_59/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_8_conv/kernel': 'InceptionResnetV2/Repeat/block35_8/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_8_conv/bias': 'InceptionResnetV2/Repeat/block35_8/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_60/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_60/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_60/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_60/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_61/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_61/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_61/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_61/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_62/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_62/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_62/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_62/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_63/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_63/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_63/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_63/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_64/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_64/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_64/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_64/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_65/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_65/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_65/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_65/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_9_conv/kernel': 'InceptionResnetV2/Repeat/block35_9/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_9_conv/bias': 'InceptionResnetV2/Repeat/block35_9/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_66/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_66/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_66/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_66/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_67/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_67/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_67/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_67/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_68/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_68/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_68/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_68/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_69/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_69/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_69/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_69/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_70/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_70/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_70/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_70/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_71/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_71/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_71/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_71/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_10_conv/kernel': 'InceptionResnetV2/Repeat/block35_10/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block35_10_conv/bias': 'InceptionResnetV2/Repeat/block35_10/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_72/kernel': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_72/beta': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_72/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_72/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_73/kernel': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_73/beta': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_73/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_73/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_74/kernel': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_74/beta': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_74/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_74/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_75/kernel': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_75/beta': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_75/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_75/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_76/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_76/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_76/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_76/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_77/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_77/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_77/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_77/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_78/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_78/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_78/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_78/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_79/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_79/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_79/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_79/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_1_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_1_conv/bias': 'InceptionResnetV2/Repeat_1/block17_1/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_80/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_80/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_80/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_80/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_81/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_81/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_81/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_81/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_82/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_82/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_82/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_82/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_83/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_83/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_83/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_83/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_2_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_2_conv/bias': 'InceptionResnetV2/Repeat_1/block17_2/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_84/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_84/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_84/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_84/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_85/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_85/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_85/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_85/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_86/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_86/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_86/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_86/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_87/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_87/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_87/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_87/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_3_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_3_conv/bias': 'InceptionResnetV2/Repeat_1/block17_3/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_88/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_88/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_88/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_88/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_89/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_89/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_89/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_89/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_90/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_90/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_90/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_90/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_91/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_91/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_91/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_91/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_4_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_4_conv/bias': 'InceptionResnetV2/Repeat_1/block17_4/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_92/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_92/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_92/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_92/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_93/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_93/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_93/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_93/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_94/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_94/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_94/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_94/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_95/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_95/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_95/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_95/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_5_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_5_conv/bias': 'InceptionResnetV2/Repeat_1/block17_5/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_96/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_96/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_96/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_96/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_97/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_97/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_97/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_97/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_98/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_98/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_98/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_98/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_99/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_99/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_99/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_99/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_6_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_6_conv/bias': 'InceptionResnetV2/Repeat_1/block17_6/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_100/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_100/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_100/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_100/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_101/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_101/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_101/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_101/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_102/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_102/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_102/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_102/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_103/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_103/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_103/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_103/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_7_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_7_conv/bias': 'InceptionResnetV2/Repeat_1/block17_7/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_104/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_104/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_104/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_104/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_105/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_105/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_105/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_105/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_106/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_106/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_106/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_106/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_107/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_107/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_107/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_107/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_8_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_8_conv/bias': 'InceptionResnetV2/Repeat_1/block17_8/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_108/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_108/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_108/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_108/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_109/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_109/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_109/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_109/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_110/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_110/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_110/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_110/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_111/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_111/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_111/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_111/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_9_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_9_conv/bias': 'InceptionResnetV2/Repeat_1/block17_9/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_112/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_112/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_112/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_112/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_113/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_113/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_113/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_113/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_114/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_114/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_114/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_114/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_115/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_115/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_115/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_115/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_10_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_10_conv/bias': 'InceptionResnetV2/Repeat_1/block17_10/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_116/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_116/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_116/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_116/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_117/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_117/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_117/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_117/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_118/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_118/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_118/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_118/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_119/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_119/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_119/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_119/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_11_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_11_conv/bias': 'InceptionResnetV2/Repeat_1/block17_11/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_120/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_120/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_120/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_120/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_121/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_121/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_121/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_121/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_122/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_122/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_122/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_122/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_123/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_123/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_123/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_123/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_12_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_12_conv/bias': 'InceptionResnetV2/Repeat_1/block17_12/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_124/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_124/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_124/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_124/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_125/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_125/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_125/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_125/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_126/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_126/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_126/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_126/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_127/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_127/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_127/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_127/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_13_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_13_conv/bias': 'InceptionResnetV2/Repeat_1/block17_13/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_128/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_128/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_128/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_128/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_129/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_129/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_129/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_129/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_130/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_130/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_130/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_130/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_131/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_131/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_131/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_131/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_14_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_14_conv/bias': 'InceptionResnetV2/Repeat_1/block17_14/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_132/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_132/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_132/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_132/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_133/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_133/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_133/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_133/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_134/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_134/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_134/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_134/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_135/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_135/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_135/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_135/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_15_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_15_conv/bias': 'InceptionResnetV2/Repeat_1/block17_15/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_136/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_136/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_136/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_136/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_137/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_137/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_137/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_137/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_138/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_138/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_138/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_138/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_139/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_139/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_139/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_139/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_16_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_16_conv/bias': 'InceptionResnetV2/Repeat_1/block17_16/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_140/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_140/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_140/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_140/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_141/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_141/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_141/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_141/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_142/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_142/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_142/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_142/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_143/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_143/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_143/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_143/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_17_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_17_conv/bias': 'InceptionResnetV2/Repeat_1/block17_17/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_144/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_144/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_144/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_144/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_145/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_145/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_145/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_145/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_146/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_146/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_146/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_146/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_147/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_147/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_147/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_147/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_18_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_18_conv/bias': 'InceptionResnetV2/Repeat_1/block17_18/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_148/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_148/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_148/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_148/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_149/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_149/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_149/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_149/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_150/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_150/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_150/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_150/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_151/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_151/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_151/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_151/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_19_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_19_conv/bias': 'InceptionResnetV2/Repeat_1/block17_19/Conv2d_1x1/biases', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_152/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_152/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_152/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_152/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_153/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_153/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_153/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_153/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_154/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_154/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_154/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_154/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_155/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_155/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/BatchNorm/beta', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_155/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean', + 'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_155/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_20_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Conv2d_1x1/weights', + 'FirstStageFeatureExtractor/InceptionResnetV2/block17_20_conv/bias': 'InceptionResnetV2/Repeat_1/block17_20/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_359/kernel': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_359/beta': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_359/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_359/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_360/kernel': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_360/beta': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_360/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_360/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_361/kernel': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_361/beta': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_361/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_361/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_362/kernel': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_362/beta': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_362/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_362/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_363/kernel': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_363/beta': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_363/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_363/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_364/kernel': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_364/beta': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_364/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_364/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_365/kernel': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_365/beta': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_365/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_365/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_366/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_366/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_366/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_366/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_367/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_367/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_367/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_367/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_368/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_368/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_368/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_368/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_369/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_369/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_369/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_369/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_1_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_1_conv/bias': 'InceptionResnetV2/Repeat_2/block8_1/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_370/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_370/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_370/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_370/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_371/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_371/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_371/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_371/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_372/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_372/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_372/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_372/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_373/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_373/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_373/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_373/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_2_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_2_conv/bias': 'InceptionResnetV2/Repeat_2/block8_2/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_374/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_374/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_374/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_374/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_375/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_375/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_375/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_375/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_376/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_376/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_376/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_376/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_377/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_377/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_377/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_377/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_3_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_3_conv/bias': 'InceptionResnetV2/Repeat_2/block8_3/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_378/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_378/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_378/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_378/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_379/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_379/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_379/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_379/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_380/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_380/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_380/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_380/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_381/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_381/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_381/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_381/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_4_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_4_conv/bias': 'InceptionResnetV2/Repeat_2/block8_4/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_382/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_382/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_382/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_382/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_383/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_383/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_383/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_383/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_384/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_384/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_384/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_384/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_385/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_385/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_385/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_385/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_5_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_5_conv/bias': 'InceptionResnetV2/Repeat_2/block8_5/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_386/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_386/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_386/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_386/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_387/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_387/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_387/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_387/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_388/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_388/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_388/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_388/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_389/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_389/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_389/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_389/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_6_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_6_conv/bias': 'InceptionResnetV2/Repeat_2/block8_6/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_390/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_390/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_390/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_390/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_391/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_391/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_391/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_391/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_392/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_392/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_392/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_392/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_393/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_393/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_393/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_393/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_7_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_7_conv/bias': 'InceptionResnetV2/Repeat_2/block8_7/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_394/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_394/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_394/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_394/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_395/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_395/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_395/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_395/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_396/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_396/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_396/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_396/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_397/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_397/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_397/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_397/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_8_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_8_conv/bias': 'InceptionResnetV2/Repeat_2/block8_8/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_398/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_398/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_398/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_398/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_399/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_399/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_399/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_399/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_400/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_400/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_400/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_400/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_401/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_401/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_401/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_401/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_9_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_9_conv/bias': 'InceptionResnetV2/Repeat_2/block8_9/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_402/kernel': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_402/beta': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_402/moving_mean': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_402/moving_variance': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_403/kernel': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_403/beta': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_403/moving_mean': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_403/moving_variance': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_404/kernel': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_404/beta': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_404/moving_mean': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_404/moving_variance': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_405/kernel': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_405/beta': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_405/moving_mean': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_405/moving_variance': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_10_conv/kernel': 'InceptionResnetV2/Block8/Conv2d_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/block8_10_conv/bias': 'InceptionResnetV2/Block8/Conv2d_1x1/biases', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b/kernel': 'InceptionResnetV2/Conv2d_7b_1x1/weights', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b_bn/beta': 'InceptionResnetV2/Conv2d_7b_1x1/BatchNorm/beta', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b_bn/moving_mean': 'InceptionResnetV2/Conv2d_7b_1x1/BatchNorm/moving_mean', + 'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b_bn/moving_variance': 'InceptionResnetV2/Conv2d_7b_1x1/BatchNorm/moving_variance', + } + + variables_to_restore = {} + if tf.executing_eagerly(): + for key in self._variable_dict: + # variable.name includes ":0" at the end, but the names in the + # checkpoint do not have the suffix ":0". So, we strip it here. + var_name = keras_to_slim_name_mapping.get(key) + if var_name: + variables_to_restore[var_name] = self._variable_dict[key] + else: + for variable in variables_helper.get_global_variables_safely(): + var_name = keras_to_slim_name_mapping.get(variable.op.name) + if var_name: + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..49c56045714e8b2e145c9dd5131884727bb48d7f --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py @@ -0,0 +1,80 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + + self.assertAllEqual(features_shape.numpy(), [1, 19, 19, 1088]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + + self.assertAllEqual(features_shape.numpy(), [1, 28, 28, 1088]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1088]) + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + model = feature_extractor.get_box_classifier_feature_extractor_model( + name='TestScope') + proposal_classifier_features = ( + model(proposal_feature_maps)) + features_shape = tf.shape(proposal_classifier_features) + self.assertAllEqual(features_shape.numpy(), [2, 8, 8, 1536]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..549ad6bb2f42ccf834c4dcea6834f8b4b9d10ee7 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py @@ -0,0 +1,253 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inception V2 Faster R-CNN implementation. + +See "Rethinking the Inception Architecture for Computer Vision" +https://arxiv.org/abs/1512.00567 +""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from nets import inception_v2 + + +def _batch_norm_arg_scope(list_ops, + use_batch_norm=True, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001, + batch_norm_scale=False, + train_batch_norm=False): + """Slim arg scope for InceptionV2 batch norm.""" + if use_batch_norm: + batch_norm_params = { + 'is_training': train_batch_norm, + 'scale': batch_norm_scale, + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon + } + normalizer_fn = slim.batch_norm + else: + normalizer_fn = None + batch_norm_params = None + + return slim.arg_scope(list_ops, + normalizer_fn=normalizer_fn, + normalizer_params=batch_norm_params) + + +class FasterRCNNInceptionV2FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN Inception V2 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + depth_multiplier=1.0, + min_depth=16): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + super(FasterRCNNInceptionV2FeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN Inception V2 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping feature extractor tensor names to + tensors + + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + + preprocessed_inputs.get_shape().assert_has_rank(4) + shape_assert = tf.Assert( + tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), + tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), + ['image size must at least be 33 in both height and width.']) + + with tf.control_dependencies([shape_assert]): + with tf.variable_scope('InceptionV2', + reuse=self._reuse_weights) as scope: + with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], + batch_norm_scale=True, + train_batch_norm=self._train_batch_norm): + _, activations = inception_v2.inception_v2_base( + preprocessed_inputs, + final_endpoint='Mixed_4e', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + + return activations['Mixed_4e'], activations + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name (unused). + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + net = proposal_feature_maps + + depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) + + data_format = 'NHWC' + concat_dim = 3 if data_format == 'NHWC' else 1 + + with tf.variable_scope('InceptionV2', reuse=self._reuse_weights): + with slim.arg_scope( + [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], + stride=1, + padding='SAME', + data_format=data_format): + with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], + batch_norm_scale=True, + train_batch_norm=self._train_batch_norm): + + with tf.variable_scope('Mixed_5a'): + with tf.variable_scope('Branch_0'): + branch_0 = slim.conv2d( + net, depth(128), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_1'): + branch_1 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], + scope='Conv2d_0b_3x3') + branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_2'): + branch_2 = slim.max_pool2d(net, [3, 3], stride=2, + scope='MaxPool_1a_3x3') + net = tf.concat([branch_0, branch_1, branch_2], concat_dim) + + with tf.variable_scope('Mixed_5b'): + with tf.variable_scope('Branch_0'): + branch_0 = slim.conv2d(net, depth(352), [1, 1], + scope='Conv2d_0a_1x1') + with tf.variable_scope('Branch_1'): + branch_1 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], + scope='Conv2d_0b_3x3') + with tf.variable_scope('Branch_2'): + branch_2 = slim.conv2d( + net, depth(160), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0b_3x3') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0c_3x3') + with tf.variable_scope('Branch_3'): + branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') + branch_3 = slim.conv2d( + branch_3, depth(128), [1, 1], + weights_initializer=trunc_normal(0.1), + scope='Conv2d_0b_1x1') + net = tf.concat([branch_0, branch_1, branch_2, branch_3], + concat_dim) + + with tf.variable_scope('Mixed_5c'): + with tf.variable_scope('Branch_0'): + branch_0 = slim.conv2d(net, depth(352), [1, 1], + scope='Conv2d_0a_1x1') + with tf.variable_scope('Branch_1'): + branch_1 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], + scope='Conv2d_0b_3x3') + with tf.variable_scope('Branch_2'): + branch_2 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0b_3x3') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0c_3x3') + with tf.variable_scope('Branch_3'): + branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') + branch_3 = slim.conv2d( + branch_3, depth(128), [1, 1], + weights_initializer=trunc_normal(0.1), + scope='Conv2d_0b_1x1') + proposal_classifier_features = tf.concat( + [branch_0, branch_1, branch_2, branch_3], concat_dim) + + return proposal_classifier_features diff --git a/models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py b/models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d01145f291f7b795a917e5a96632d52b42bac5 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py @@ -0,0 +1,128 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for faster_rcnn_inception_v2_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnInceptionV2FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return faster_rcnn_inception_v2.FasterRCNNInceptionV2FeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 576]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 576]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 576]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_on_very_small_images(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run( + features_shape, + feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [3, 14, 14, 576], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [3, 7, 7, 1024]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..aa37848bb844dc58037cc815783b156b03b928a0 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py @@ -0,0 +1,193 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mobilenet v1 Faster R-CNN implementation.""" +import numpy as np + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage): + if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]: + raise ValueError( + 'Only the following ratio percentages are supported: 25, 50, 75, 100') + conv_depth_ratio_in_percentage = float(conv_depth_ratio_in_percentage) / 100.0 + channels = np.array([ + 32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024 + ], dtype=np.float32) + channels = (channels * conv_depth_ratio_in_percentage).astype(np.int32) + return [ + mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=channels[0]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[1]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[2]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[3]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[4]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[5]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[6]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[7]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[8]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[9]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[10]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[11]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[12]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[13]) + ] + + +class FasterRCNNMobilenetV1FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN Mobilenet V1 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + depth_multiplier=1.0, + min_depth=16, + skip_last_stride=False, + conv_depth_ratio_in_percentage=100): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + skip_last_stride: Skip the last stride if True. + conv_depth_ratio_in_percentage: Conv depth ratio in percentage. Only + applied if skip_last_stride is True. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + self._skip_last_stride = skip_last_stride + self._conv_depth_ratio_in_percentage = conv_depth_ratio_in_percentage + super(FasterRCNNMobilenetV1FeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN Mobilenet V1 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping feature extractor tensor names to + tensors + + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + + preprocessed_inputs.get_shape().assert_has_rank(4) + preprocessed_inputs = shape_utils.check_min_image_dim( + min_dim=33, image_tensor=preprocessed_inputs) + + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=self._train_batch_norm, + weight_decay=self._weight_decay)): + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + params = {} + if self._skip_last_stride: + params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs( + conv_depth_ratio_in_percentage=self. + _conv_depth_ratio_in_percentage) + _, activations = mobilenet_v1.mobilenet_v1_base( + preprocessed_inputs, + final_endpoint='Conv2d_11_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope, + **params) + return activations['Conv2d_11_pointwise'], activations + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name (unused). + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + net = proposal_feature_maps + + conv_depth = 1024 + if self._skip_last_stride: + conv_depth_ratio = float(self._conv_depth_ratio_in_percentage) / 100.0 + conv_depth = int(float(conv_depth) * conv_depth_ratio) + + depth = lambda d: max(int(d * 1.0), 16) + with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights): + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=self._train_batch_norm, + weight_decay=self._weight_decay)): + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], padding='SAME'): + net = slim.separable_conv2d( + net, + depth(conv_depth), [3, 3], + depth_multiplier=1, + stride=2, + scope='Conv2d_12_pointwise') + return slim.separable_conv2d( + net, + depth(conv_depth), [3, 3], + depth_multiplier=1, + stride=1, + scope='Conv2d_13_pointwise') diff --git a/models/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py b/models/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..65a4958e4c20964b2857f95f7bc2b83d05d3cc02 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,128 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for faster_rcnn_mobilenet_v1_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnMobilenetV1FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return faster_rcnn_mobilenet_v1.FasterRCNNMobilenetV1FeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 512]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 512]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 512]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_on_very_small_images(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run( + features_shape, + feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [3, 14, 14, 576], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [3, 7, 7, 1024]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_nas_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_nas_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9fe17cbea856dd1ed8ca0bf1a8c25327714c5b6d --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_nas_feature_extractor.py @@ -0,0 +1,336 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""NASNet Faster R-CNN implementation. + +Learning Transferable Architectures for Scalable Image Recognition +Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le +https://arxiv.org/abs/1707.07012 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import variables_helper + +# pylint: disable=g-import-not-at-top +try: + from nets.nasnet import nasnet + from nets.nasnet import nasnet_utils +except: # pylint: disable=bare-except + pass +# pylint: enable=g-import-not-at-top + +arg_scope = slim.arg_scope + + +def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False): + """Defines the default arg scope for the NASNet-A Large for object detection. + + This provides a small edit to switch batch norm training on and off. + + Args: + is_batch_norm_training: Boolean indicating whether to train with batch norm. + + Returns: + An `arg_scope` to use for the NASNet Large Model. + """ + imagenet_scope = nasnet.nasnet_large_arg_scope() + with arg_scope(imagenet_scope): + with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: + return sc + + +# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but +# with special edits to remove instantiation of the stem and the special +# ability to receive as input a pair of hidden states. +def _build_nasnet_base(hidden_previous, + hidden, + normal_cell, + reduction_cell, + hparams, + true_cell_num, + start_cell_num): + """Constructs a NASNet image model.""" + + # Find where to place the reduction cells or stride normal cells + reduction_indices = nasnet_utils.calc_reduction_layers( + hparams.num_cells, hparams.num_reduction_layers) + + # Note: The None is prepended to match the behavior of _imagenet_stem() + cell_outputs = [None, hidden_previous, hidden] + net = hidden + + # NOTE: In the nasnet.py code, filter_scaling starts at 1.0. We instead + # start at 2.0 because 1 reduction cell has been created which would + # update the filter_scaling to 2.0. + filter_scaling = 2.0 + + # Run the cells + for cell_num in range(start_cell_num, hparams.num_cells): + stride = 1 + if hparams.skip_reduction_layer_input: + prev_layer = cell_outputs[-2] + if cell_num in reduction_indices: + filter_scaling *= hparams.filter_scaling_rate + net = reduction_cell( + net, + scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)), + filter_scaling=filter_scaling, + stride=2, + prev_layer=cell_outputs[-2], + cell_num=true_cell_num) + true_cell_num += 1 + cell_outputs.append(net) + if not hparams.skip_reduction_layer_input: + prev_layer = cell_outputs[-2] + net = normal_cell( + net, + scope='cell_{}'.format(cell_num), + filter_scaling=filter_scaling, + stride=stride, + prev_layer=prev_layer, + cell_num=true_cell_num) + true_cell_num += 1 + cell_outputs.append(net) + + # Final nonlinearity. + # Note that we have dropped the final pooling, dropout and softmax layers + # from the default nasnet version. + with tf.variable_scope('final_layer'): + net = tf.nn.relu(net) + return net + + +# TODO(shlens): Only fixed_shape_resizer is currently supported for NASNet +# featurization. The reason for this is that nasnet.py only supports +# inputs with fully known shapes. We need to update nasnet.py to handle +# shapes not known at compile time. +class FasterRCNNNASFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN with NASNet-A feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 16. + """ + if first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 16.') + super(FasterRCNNNASFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN with NAS preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Extracts features using the first half of the NASNet network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + end_points: A dictionary mapping feature extractor tensor names to tensors + + Raises: + ValueError: If the created network is missing the required activation. + """ + del scope + + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + + with slim.arg_scope(nasnet_large_arg_scope_for_detection( + is_batch_norm_training=self._train_batch_norm)): + with arg_scope([slim.conv2d, + slim.batch_norm, + slim.separable_conv2d], + reuse=self._reuse_weights): + _, end_points = nasnet.build_nasnet_large( + preprocessed_inputs, num_classes=None, + is_training=self._is_training, + final_endpoint='Cell_11') + + # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016. + rpn_feature_map = tf.concat([end_points['Cell_10'], + end_points['Cell_11']], 3) + + # nasnet.py does not maintain the batch size in the first dimension. + # This work around permits us retaining the batch for below. + batch = preprocessed_inputs.get_shape().as_list()[0] + shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] + rpn_feature_map_shape = [batch] + shape_without_batch + rpn_feature_map.set_shape(rpn_feature_map_shape) + + return rpn_feature_map, end_points + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + This function reconstructs the "second half" of the NASNet-A + network after the part defined in `_extract_proposal_features`. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + del scope + + # Note that we always feed into 2 layers of equal depth + # where the first N channels corresponds to previous hidden layer + # and the second N channels correspond to the final hidden layer. + hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) + + # Note that what follows is largely a copy of build_nasnet_large() within + # nasnet.py. We are copying to minimize code pollution in slim. + + # TODO(shlens,skornblith): Determine the appropriate drop path schedule. + # For now the schedule is the default (1.0->0.7 over 250,000 train steps). + hparams = nasnet.large_imagenet_config() + if not self._is_training: + hparams.set_hparam('drop_path_keep_prob', 1.0) + + # Calculate the total number of cells in the network + # -- Add 2 for the reduction cells. + total_num_cells = hparams.num_cells + 2 + # -- And add 2 for the stem cells for ImageNet training. + total_num_cells += 2 + + normal_cell = nasnet_utils.NasNetANormalCell( + hparams.num_conv_filters, hparams.drop_path_keep_prob, + total_num_cells, hparams.total_training_steps) + reduction_cell = nasnet_utils.NasNetAReductionCell( + hparams.num_conv_filters, hparams.drop_path_keep_prob, + total_num_cells, hparams.total_training_steps) + with arg_scope([slim.dropout, nasnet_utils.drop_path], + is_training=self._is_training): + with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): + with arg_scope([slim.avg_pool2d, + slim.max_pool2d, + slim.conv2d, + slim.batch_norm, + slim.separable_conv2d, + nasnet_utils.factorized_reduction, + nasnet_utils.global_avg_pool, + nasnet_utils.get_channel_index, + nasnet_utils.get_channel_dim], + data_format=hparams.data_format): + + # This corresponds to the cell number just past 'Cell_11' used by + # by _extract_proposal_features(). + start_cell_num = 12 + # Note that this number equals: + # start_cell_num + 2 stem cells + 1 reduction cell + true_cell_num = 15 + + with slim.arg_scope(nasnet.nasnet_large_arg_scope()): + net = _build_nasnet_base(hidden_previous, + hidden, + normal_cell=normal_cell, + reduction_cell=reduction_cell, + hparams=hparams, + true_cell_num=true_cell_num, + start_cell_num=start_cell_num) + + proposal_classifier_features = net + return proposal_classifier_features + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for + NASNet-A checkpoints. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + # Note that the NAS checkpoint only contains the moving average version of + # the Variables so we need to generate an appropriate dictionary mapping. + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith( + first_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + first_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + if variable.op.name.startswith( + second_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + second_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/models/research/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py b/models/research/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a41cb0f733d613ffb050bbf4f8506579375c9d08 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py @@ -0,0 +1,111 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_nas_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnNASFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_nas.FasterRCNNNASFeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 19, 19, 4032]) + + def test_extract_proposal_features_input_size_224(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 14, 14, 4032]) + + def test_extract_proposal_features_input_size_112(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 4032]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [2, 9, 9, 4032]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_pnas_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_pnas_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..ec32cd309d3a3fe135cf72665631b04273e21424 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_pnas_feature_extractor.py @@ -0,0 +1,329 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""PNASNet Faster R-CNN implementation. + +Based on PNASNet model: https://arxiv.org/abs/1712.00559 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import variables_helper +from nets.nasnet import nasnet_utils + +try: + from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + +arg_scope = slim.arg_scope + + +def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): + """Defines the default arg scope for the PNASNet Large for object detection. + + This provides a small edit to switch batch norm training on and off. + + Args: + is_batch_norm_training: Boolean indicating whether to train with batch norm. + + Returns: + An `arg_scope` to use for the PNASNet Large Model. + """ + imagenet_scope = pnasnet.pnasnet_large_arg_scope() + with arg_scope(imagenet_scope): + with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: + return sc + + +def _filter_scaling(reduction_indices, start_cell_num): + """Compute the expected filter scaling at given PNASNet cell start_cell_num. + + In the pnasnet.py code, filter_scaling starts at 1.0. We instead + adapt filter scaling to depend on the starting cell. + At first cells, before any reduction, filter_scalling is 1.0. With passing + any reduction cell, the filter_scaling is multiplied by 2. + + Args: + reduction_indices: list of int indices. + start_cell_num: int. + Returns: + filter_scaling: float. + """ + filter_scaling = 1.0 + for ind in reduction_indices: + if ind < start_cell_num: + filter_scaling *= 2.0 + return filter_scaling + + +# Note: This is largely a copy of _build_pnasnet_base inside pnasnet.py but +# with special edits to remove instantiation of the stem and the special +# ability to receive as input a pair of hidden states. It constructs only +# a sub-network from the original PNASNet model, starting from the +# start_cell_num cell and with modified final layer. +def _build_pnasnet_base( + hidden_previous, hidden, normal_cell, hparams, true_cell_num, + start_cell_num): + """Constructs a PNASNet image model for proposal classifier features.""" + + # Find where to place the reduction cells or stride normal cells + reduction_indices = nasnet_utils.calc_reduction_layers( + hparams.num_cells, hparams.num_reduction_layers) + filter_scaling = _filter_scaling(reduction_indices, start_cell_num) + + # Note: The None is prepended to match the behavior of _imagenet_stem() + cell_outputs = [None, hidden_previous, hidden] + net = hidden + + # Run the cells + for cell_num in range(start_cell_num, hparams.num_cells): + is_reduction = cell_num in reduction_indices + stride = 2 if is_reduction else 1 + if is_reduction: filter_scaling *= hparams.filter_scaling_rate + prev_layer = cell_outputs[-2] + net = normal_cell( + net, + scope='cell_{}'.format(cell_num), + filter_scaling=filter_scaling, + stride=stride, + prev_layer=prev_layer, + cell_num=true_cell_num) + true_cell_num += 1 + cell_outputs.append(net) + + # Final nonlinearity. + # Note that we have dropped the final pooling, dropout and softmax layers + # from the default pnasnet version. + with tf.variable_scope('final_layer'): + net = tf.nn.relu(net) + return net + + +# TODO(shlens): Only fixed_shape_resizer is currently supported for PNASNet +# featurization. The reason for this is that pnasnet.py only supports +# inputs with fully known shapes. We need to update pnasnet.py to handle +# shapes not known at compile time. +class FasterRCNNPNASFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN with PNASNet feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 16. + """ + if first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 16.') + super(FasterRCNNPNASFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN with PNAS preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Extracts features using the first half of the PNASNet network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + end_points: A dictionary mapping feature extractor tensor names to tensors + + Raises: + ValueError: If the created network is missing the required activation. + """ + del scope + + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + + with slim.arg_scope(pnasnet_large_arg_scope_for_detection( + is_batch_norm_training=self._train_batch_norm)): + with arg_scope([slim.conv2d, + slim.batch_norm, + slim.separable_conv2d], + reuse=self._reuse_weights): + _, end_points = pnasnet.build_pnasnet_large( + preprocessed_inputs, num_classes=None, + is_training=self._is_training, + final_endpoint='Cell_7') + + # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160. + # Cell_7 is the last cell before second reduction. + rpn_feature_map = tf.concat([end_points['Cell_6'], + end_points['Cell_7']], 3) + + # pnasnet.py does not maintain the batch size in the first dimension. + # This work around permits us retaining the batch for below. + batch = preprocessed_inputs.get_shape().as_list()[0] + shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] + rpn_feature_map_shape = [batch] + shape_without_batch + rpn_feature_map.set_shape(rpn_feature_map_shape) + + return rpn_feature_map, end_points + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + This function reconstructs the "second half" of the PNASNet + network after the part defined in `_extract_proposal_features`. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + del scope + + # Number of used stem cells. + num_stem_cells = 2 + + # Note that we always feed into 2 layers of equal depth + # where the first N channels corresponds to previous hidden layer + # and the second N channels correspond to the final hidden layer. + hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) + + # Note that what follows is largely a copy of build_pnasnet_large() within + # pnasnet.py. We are copying to minimize code pollution in slim. + + # TODO(shlens,skornblith): Determine the appropriate drop path schedule. + # For now the schedule is the default (1.0->0.7 over 250,000 train steps). + hparams = pnasnet.large_imagenet_config() + if not self._is_training: + hparams.set_hparam('drop_path_keep_prob', 1.0) + + # Calculate the total number of cells in the network + total_num_cells = hparams.num_cells + num_stem_cells + + normal_cell = pnasnet.PNasNetNormalCell( + hparams.num_conv_filters, hparams.drop_path_keep_prob, + total_num_cells, hparams.total_training_steps) + with arg_scope([slim.dropout, nasnet_utils.drop_path], + is_training=self._is_training): + with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): + with arg_scope([slim.avg_pool2d, + slim.max_pool2d, + slim.conv2d, + slim.batch_norm, + slim.separable_conv2d, + nasnet_utils.factorized_reduction, + nasnet_utils.global_avg_pool, + nasnet_utils.get_channel_index, + nasnet_utils.get_channel_dim], + data_format=hparams.data_format): + + # This corresponds to the cell number just past 'Cell_7' used by + # _extract_proposal_features(). + start_cell_num = 8 + true_cell_num = start_cell_num + num_stem_cells + + with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): + net = _build_pnasnet_base( + hidden_previous, + hidden, + normal_cell=normal_cell, + hparams=hparams, + true_cell_num=true_cell_num, + start_cell_num=start_cell_num) + + proposal_classifier_features = net + return proposal_classifier_features + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for + PNASNet checkpoints. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith( + first_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + first_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + if variable.op.name.startswith( + second_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + second_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/models/research/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py b/models/research/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..16774511b4d9c6eb1c94b8304640d9bf99c47ce0 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py @@ -0,0 +1,124 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_pnas_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_pnas.FasterRCNNPNASFeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 19, 19, 4320]) + + def test_extract_proposal_features_input_size_224(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 14, 14, 4320]) + + def test_extract_proposal_features_input_size_112(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 4320]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [2, 9, 9, 4320]) + + def test_filter_scaling_computation(self): + expected_filter_scaling = { + ((4, 8), 2): 1.0, + ((4, 8), 7): 2.0, + ((4, 8), 8): 2.0, + ((4, 8), 9): 4.0 + } + for args, filter_scaling in expected_filter_scaling.items(): + reduction_indices, start_cell_num = args + self.assertAlmostEqual( + frcnn_pnas._filter_scaling(reduction_indices, start_cell_num), + filter_scaling) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..a2029d24251bf7541405b06918fcfc0897f44af7 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py @@ -0,0 +1,271 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Resnet based Faster R-CNN implementation in Keras. + +See Deep Residual Learning for Image Recognition by He et al. +https://arxiv.org/abs/1512.03385 +""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.models.keras_models import resnet_v1 +from object_detection.utils import model_util + + +_RESNET_MODEL_CONV4_LAST_LAYERS = { + 'resnet_v1_50': 'conv4_block6_out', + 'resnet_v1_101': 'conv4_block23_out', + 'resnet_v1_152': 'conv4_block36_out', +} + + +class FasterRCNNResnetKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Faster R-CNN with Resnet feature extractor implementation.""" + + def __init__(self, + is_training, + resnet_v1_base_model, + resnet_v1_base_model_name, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 16.') + super(FasterRCNNResnetKerasFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + weight_decay) + self.classification_backbone = None + self._variable_dict = {} + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name + + def preprocess(self, resized_inputs): + """Faster R-CNN Resnet V1 preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def get_proposal_feature_extractor_model(self, name=None): + """Returns a model that extracts first stage RPN features. + + Extracts features using the first half of the Resnet v1 network. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes preprocessed_inputs: + A [batch, height, width, channels] float32 tensor + representing a batch of images. + + And returns rpn_feature_map: + A tensor with shape [batch, height, width, depth] + """ + if not self.classification_backbone: + self.classification_backbone = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=None, + weight_decay=self._weight_decay, + classes=None, + weights=None, + include_top=False + ) + with tf.name_scope(name): + with tf.name_scope('ResnetV1'): + + conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ + self._resnet_v1_base_model_name] + proposal_features = self.classification_backbone.get_layer( + name=conv4_last_layer).output + keras_model = tf.keras.Model( + inputs=self.classification_backbone.inputs, + outputs=proposal_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + def get_box_classifier_feature_extractor_model(self, name=None): + """Returns a model that extracts second stage box classifier features. + + This function reconstructs the "second half" of the ResNet v1 + network after the part defined in `get_proposal_feature_extractor_model`. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes proposal_feature_maps: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + And returns proposal_classifier_features: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + if not self.classification_backbone: + self.classification_backbone = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=None, + weight_decay=self._weight_decay, + classes=None, + weights=None, + include_top=False + ) + with tf.name_scope(name): + with tf.name_scope('ResnetV1'): + conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ + self._resnet_v1_base_model_name] + proposal_feature_maps = self.classification_backbone.get_layer( + name=conv4_last_layer).output + proposal_classifier_features = self.classification_backbone.get_layer( + name='conv5_block3_out').output + + keras_model = model_util.extract_submodel( + model=self.classification_backbone, + inputs=proposal_feature_maps, + outputs=proposal_classifier_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor (unused). + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} + + +class FasterRCNNResnet50KerasFeatureExtractor( + FasterRCNNResnetKerasFeatureExtractor): + """Faster R-CNN with Resnet50 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + """ + super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + is_training=is_training, + resnet_v1_base_model=resnet_v1.resnet_v1_50, + resnet_v1_base_model_name='resnet_v1_50', + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + + +class FasterRCNNResnet101KerasFeatureExtractor( + FasterRCNNResnetKerasFeatureExtractor): + """Faster R-CNN with Resnet101 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + """ + super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__( + is_training=is_training, + resnet_v1_base_model=resnet_v1.resnet_v1_101, + resnet_v1_base_model_name='resnet_v1_101', + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + + +class FasterRCNNResnet152KerasFeatureExtractor( + FasterRCNNResnetKerasFeatureExtractor): + """Faster R-CNN with Resnet152 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + """ + super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__( + is_training=is_training, + resnet_v1_base_model=resnet_v1.resnet_v1_152, + resnet_v1_base_model_name='resnet_v1_152', + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) diff --git a/models/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py b/models/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..15e8a5fbf153cdee690be94d2d9c910070af35f0 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py @@ -0,0 +1,80 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_resnet_keras_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_res +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FasterRcnnResnetKerasFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, architecture='resnet_v1_50'): + return frcnn_res.FasterRCNNResnet50KerasFeatureExtractor( + is_training=False, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + self.assertAllEqual(features_shape.numpy(), [1, 14, 14, 1024]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1024]) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(tf.errors.InvalidArgumentError): + feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + proposal_feature_maps = tf.random_uniform( + [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + model = feature_extractor.get_box_classifier_feature_extractor_model( + name='TestScope') + proposal_classifier_features = ( + model(proposal_feature_maps)) + features_shape = tf.shape(proposal_classifier_features) + # Note: due to a slight mismatch in slim and keras resnet definitions + # the output shape of the box classifier is slightly different compared to + # that of the slim implementation. The keras version is more `canonical` + # in that it more accurately reflects the original authors' implementation. + # TODO(jonathanhuang): make the output shape match that of the slim + # implementation by using atrous convolutions. + self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048]) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/models/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py b/models/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..30cd9d42c54500af36f74d45870b04726755b90e --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py @@ -0,0 +1,268 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Resnet V1 Faster R-CNN implementation. + +See "Deep Residual Learning for Image Recognition" by He et al., 2015. +https://arxiv.org/abs/1512.03385 + +Note: this implementation assumes that the classification checkpoint used +to finetune this model is trained using the same configuration as that of +the MSRA provided checkpoints +(see https://github.com/KaimingHe/deep-residual-networks), e.g., with +same preprocessing, batch norm scaling, etc. +""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from nets import resnet_utils +from nets import resnet_v1 + + +class FasterRCNNResnetV1FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN Resnet V1 feature extractor implementation.""" + + def __init__(self, + architecture, + resnet_model, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + architecture: Architecture name of the Resnet V1 model. + resnet_model: Definition of the Resnet V1 model. + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: Activaton functon to use in Resnet V1 model. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + self._architecture = architecture + self._resnet_model = resnet_model + self._activation_fn = activation_fn + super(FasterRCNNResnetV1FeatureExtractor, + self).__init__(is_training, first_stage_features_stride, + batch_norm_trainable, reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN Resnet V1 preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping feature extractor tensor names to + tensors + + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + shape_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), + tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), + ['image size must at least be 33 in both height and width.']) + + with tf.control_dependencies([shape_assert]): + # Disables batchnorm for fine-tuning with smaller batch sizes. + # TODO(chensun): Figure out if it is needed when image + # batch size is bigger. + with slim.arg_scope( + resnet_utils.resnet_arg_scope( + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=self._activation_fn, + weight_decay=self._weight_decay)): + with tf.variable_scope( + self._architecture, reuse=self._reuse_weights) as var_scope: + _, activations = self._resnet_model( + preprocessed_inputs, + num_classes=None, + is_training=self._train_batch_norm, + global_pool=False, + output_stride=self._first_stage_features_stride, + spatial_squeeze=False, + scope=var_scope) + + handle = scope + '/%s/block3' % self._architecture + return activations[handle], activations + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name (unused). + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.variable_scope(self._architecture, reuse=self._reuse_weights): + with slim.arg_scope( + resnet_utils.resnet_arg_scope( + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=self._activation_fn, + weight_decay=self._weight_decay)): + with slim.arg_scope([slim.batch_norm], + is_training=self._train_batch_norm): + blocks = [ + resnet_utils.Block('block4', resnet_v1.bottleneck, [{ + 'depth': 2048, + 'depth_bottleneck': 512, + 'stride': 1 + }] * 3) + ] + proposal_classifier_features = resnet_utils.stack_blocks_dense( + proposal_feature_maps, blocks) + return proposal_classifier_features + + +class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): + """Faster R-CNN Resnet 50 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16, + or if `architecture` is not supported. + """ + super(FasterRCNNResnet50FeatureExtractor, + self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, + first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay, activation_fn) + + +class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): + """Faster R-CNN Resnet 101 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16, + or if `architecture` is not supported. + """ + super(FasterRCNNResnet101FeatureExtractor, + self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, + first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay, activation_fn) + + +class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): + """Faster R-CNN Resnet 152 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16, + or if `architecture` is not supported. + """ + super(FasterRCNNResnet152FeatureExtractor, + self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, + first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay, activation_fn) diff --git a/models/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py b/models/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3d47da04af5fb3f728379a649d64329c862eaf75 --- /dev/null +++ b/models/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,167 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, + first_stage_features_stride, + activation_fn=tf.nn.relu, + architecture='resnet_v1_101'): + feature_extractor_map = { + 'resnet_v1_50': + faster_rcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, + 'resnet_v1_101': + faster_rcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, + 'resnet_v1_152': + faster_rcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor + } + return feature_extractor_map[architecture]( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + activation_fn=activation_fn, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16, architecture=architecture) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 1024]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 28, 28, 1024]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 1024]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_on_very_small_images(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run( + features_shape, + feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [3, 7, 7, 2048]) + + def test_overwriting_activation_fn(self): + for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16, + architecture=architecture, + activation_fn=tf.nn.relu6) + preprocessed_inputs = tf.random_uniform([4, 224, 224, 3], + maxval=255, + dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestStage1Scope') + _ = feature_extractor.extract_box_classifier_features( + rpn_feature_map, scope='TestStaget2Scope') + conv_ops = [ + op for op in tf.get_default_graph().get_operations() + if op.type == 'Relu6' + ] + op_names = [op.name for op in conv_ops] + + self.assertIsNotNone(conv_ops) + self.assertIn('TestStage1Scope/resnet_v1_50/resnet_v1_50/conv1/Relu6', + op_names) + self.assertIn( + 'TestStaget2Scope/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/Relu6', + op_names) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/feature_map_generators.py b/models/research/object_detection/models/feature_map_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..87d15e968390446a4332e20e5b737e04d573d98a --- /dev/null +++ b/models/research/object_detection/models/feature_map_generators.py @@ -0,0 +1,825 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to generate a list of feature maps based on image features. + +Provides several feature map generators that can be used to build object +detection feature extractors. + +Object detection feature extractors usually are built by stacking two components +- A base feature extractor such as Inception V3 and a feature map generator. +Feature map generators build on the base feature extractors and produce a list +of final feature maps. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import collections +import functools +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.utils import ops +from object_detection.utils import shape_utils + +# Activation bound used for TPU v1. Activations will be clipped to +# [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with +# use_bounded_activations enabled. +ACTIVATION_BOUND = 6.0 + + +def get_depth_fn(depth_multiplier, min_depth): + """Builds a callable to compute depth (output channels) of conv filters. + + Args: + depth_multiplier: a multiplier for the nominal depth. + min_depth: a lower bound on the depth of filters. + + Returns: + A callable that takes in a nominal depth and returns the depth to use. + """ + def multiply_depth(depth): + new_depth = int(depth * depth_multiplier) + return max(new_depth, min_depth) + return multiply_depth + + +def create_conv_block( + use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, + is_training, freeze_batchnorm, depth): + """Create Keras layers for depthwise & non-depthwise convolutions. + + Args: + use_depthwise: Whether to use depthwise separable conv instead of regular + conv. + kernel_size: A list of length 2: [kernel_height, kernel_width] of the + filters. Can be an int if both values are the same. + padding: One of 'VALID' or 'SAME'. + stride: A list of length 2: [stride_height, stride_width], specifying the + convolution stride. Can be an int if both strides are the same. + layer_name: String. The name of the layer. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + is_training: Indicates whether the feature generator is in training mode. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + depth: Depth of output feature maps. + + Returns: + A list of conv layers. + """ + layers = [] + if use_depthwise: + kwargs = conv_hyperparams.params() + # Both the regularizer and initializer apply to the depthwise layer, + # so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + layers.append( + tf.keras.layers.SeparableConv2D( + depth, [kernel_size, kernel_size], + depth_multiplier=1, + padding=padding, + strides=stride, + name=layer_name + '_depthwise_conv', + **kwargs)) + else: + layers.append(tf.keras.layers.Conv2D( + depth, + [kernel_size, kernel_size], + padding=padding, + strides=stride, + name=layer_name + '_conv', + **conv_hyperparams.params())) + layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + return layers + + +class KerasMultiResolutionFeatureMaps(tf.keras.Model): + """Generates multi resolution feature maps from input image features. + + A Keras model that generates multi-scale feature maps for detection as in the + SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. + + More specifically, when called on inputs it performs the following two tasks: + 1) If a layer name is provided in the configuration, returns that layer as a + feature map. + 2) If a layer name is left as an empty string, constructs a new feature map + based on the spatial shape and depth configuration. Note that the current + implementation only supports generating new layers using convolution of + stride 2 resulting in a spatial resolution reduction by a factor of 2. + By default convolution kernel size is set to 3, and it can be customized + by caller. + + An example of the configuration for Inception V3: + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + + When this feature generator object is called on input image_features: + Args: + image_features: A dictionary of handles to activation tensors from the + base feature extractor. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + + def __init__(self, + feature_map_layout, + depth_multiplier, + min_depth, + insert_1x1_conv, + is_training, + conv_hyperparams, + freeze_batchnorm, + name=None): + """Constructor. + + Args: + feature_map_layout: Dictionary of specifications for the feature map + layouts in the following format (Inception V2/V3 respectively): + { + 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + or + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + If 'from_layer' is specified, the specified feature map is directly used + as a box predictor layer, and the layer_depth is directly infered from + the feature map (instead of using the provided 'layer_depth' parameter). + In this case, our convention is to set 'layer_depth' to -1 for clarity. + Otherwise, if 'from_layer' is an empty string, then the box predictor + layer will be built from the previous layer using convolution + operations. Note that the current implementation only supports + generating new layers using convolutions of stride 2 (resulting in a + spatial resolution reduction by a factor of 2), and will be extended to + a more flexible design. Convolution kernel size is set to 3 by default, + and can be customized by 'conv_kernel_size' parameter (similarily, + 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). + The created convolution operation will be a normal 2D convolution by + default, and a depthwise convolution followed by 1x1 convolution if + 'use_depthwise' is set to True. + depth_multiplier: Depth multiplier for convolutional layers. + min_depth: Minimum depth for convolutional layers. + insert_1x1_conv: A boolean indicating whether an additional 1x1 + convolution should be inserted before shrinking the feature map. + is_training: Indicates whether the feature generator is in training mode. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(KerasMultiResolutionFeatureMaps, self).__init__(name=name) + + self.feature_map_layout = feature_map_layout + self.convolutions = [] + + depth_fn = get_depth_fn(depth_multiplier, min_depth) + + base_from_layer = '' + use_explicit_padding = False + if 'use_explicit_padding' in feature_map_layout: + use_explicit_padding = feature_map_layout['use_explicit_padding'] + use_depthwise = False + if 'use_depthwise' in feature_map_layout: + use_depthwise = feature_map_layout['use_depthwise'] + for index, from_layer in enumerate(feature_map_layout['from_layer']): + net = [] + layer_depth = feature_map_layout['layer_depth'][index] + conv_kernel_size = 3 + if 'conv_kernel_size' in feature_map_layout: + conv_kernel_size = feature_map_layout['conv_kernel_size'][index] + if from_layer: + base_from_layer = from_layer + else: + if insert_1x1_conv: + layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( + base_from_layer, index, depth_fn(layer_depth // 2)) + net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth // 2), + [1, 1], + padding='SAME', + strides=1, + name=layer_name + '_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + + layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( + base_from_layer, index, conv_kernel_size, conv_kernel_size, + depth_fn(layer_depth)) + stride = 2 + padding = 'SAME' + if use_explicit_padding: + padding = 'VALID' + # We define this function here while capturing the value of + # conv_kernel_size, to avoid holding a reference to the loop variable + # conv_kernel_size inside of a lambda function + def fixed_padding(features, kernel_size=conv_kernel_size): + return ops.fixed_padding(features, kernel_size) + net.append(tf.keras.layers.Lambda(fixed_padding)) + # TODO(rathodv): Add some utilities to simplify the creation of + # Depthwise & non-depthwise convolutions w/ normalization & activations + if use_depthwise: + net.append(tf.keras.layers.DepthwiseConv2D( + [conv_kernel_size, conv_kernel_size], + depth_multiplier=1, + padding=padding, + strides=stride, + name=layer_name + '_depthwise_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_depthwise_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name + '_depthwise')) + + net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1], + padding='SAME', + strides=1, + name=layer_name + '_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + + else: + net.append(tf.keras.layers.Conv2D( + depth_fn(layer_depth), + [conv_kernel_size, conv_kernel_size], + padding=padding, + strides=stride, + name=layer_name + '_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + + # Until certain bugs are fixed in checkpointable lists, + # this net must be appended only once it's been filled with layers + self.convolutions.append(net) + + def call(self, image_features): + """Generate the multi-resolution feature maps. + + Executed when calling the `.__call__` method on input. + + Args: + image_features: A dictionary of handles to activation tensors from the + base feature extractor. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + feature_maps = [] + feature_map_keys = [] + + for index, from_layer in enumerate(self.feature_map_layout['from_layer']): + if from_layer: + feature_map = image_features[from_layer] + feature_map_keys.append(from_layer) + else: + feature_map = feature_maps[-1] + for layer in self.convolutions[index]: + feature_map = layer(feature_map) + layer_name = self.convolutions[index][-1].name + feature_map_keys.append(layer_name) + feature_maps.append(feature_map) + return collections.OrderedDict( + [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) + + +def multi_resolution_feature_maps(feature_map_layout, depth_multiplier, + min_depth, insert_1x1_conv, image_features, + pool_residual=False): + """Generates multi resolution feature maps from input image features. + + Generates multi-scale feature maps for detection as in the SSD papers by + Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. + + More specifically, it performs the following two tasks: + 1) If a layer name is provided in the configuration, returns that layer as a + feature map. + 2) If a layer name is left as an empty string, constructs a new feature map + based on the spatial shape and depth configuration. Note that the current + implementation only supports generating new layers using convolution of + stride 2 resulting in a spatial resolution reduction by a factor of 2. + By default convolution kernel size is set to 3, and it can be customized + by caller. + + An example of the configuration for Inception V3: + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + + Args: + feature_map_layout: Dictionary of specifications for the feature map + layouts in the following format (Inception V2/V3 respectively): + { + 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + or + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + If 'from_layer' is specified, the specified feature map is directly used + as a box predictor layer, and the layer_depth is directly infered from the + feature map (instead of using the provided 'layer_depth' parameter). In + this case, our convention is to set 'layer_depth' to -1 for clarity. + Otherwise, if 'from_layer' is an empty string, then the box predictor + layer will be built from the previous layer using convolution operations. + Note that the current implementation only supports generating new layers + using convolutions of stride 2 (resulting in a spatial resolution + reduction by a factor of 2), and will be extended to a more flexible + design. Convolution kernel size is set to 3 by default, and can be + customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' + should be set to -1 if 'from_layer' is specified). The created convolution + operation will be a normal 2D convolution by default, and a depthwise + convolution followed by 1x1 convolution if 'use_depthwise' is set to True. + depth_multiplier: Depth multiplier for convolutional layers. + min_depth: Minimum depth for convolutional layers. + insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution + should be inserted before shrinking the feature map. + image_features: A dictionary of handles to activation tensors from the + base feature extractor. + pool_residual: Whether to add an average pooling layer followed by a + residual connection between subsequent feature maps when the channel + depth match. For example, with option 'layer_depth': [-1, 512, 256, 256], + a pooling and residual layer is added between the third and forth feature + map. This option is better used with Weight Shared Convolution Box + Predictor when all feature maps have the same channel depth to encourage + more consistent features across multi-scale feature maps. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + + Raises: + ValueError: if the number entries in 'from_layer' and + 'layer_depth' do not match. + ValueError: if the generated layer does not have the same resolution + as specified. + """ + depth_fn = get_depth_fn(depth_multiplier, min_depth) + + feature_map_keys = [] + feature_maps = [] + base_from_layer = '' + use_explicit_padding = False + if 'use_explicit_padding' in feature_map_layout: + use_explicit_padding = feature_map_layout['use_explicit_padding'] + use_depthwise = False + if 'use_depthwise' in feature_map_layout: + use_depthwise = feature_map_layout['use_depthwise'] + for index, from_layer in enumerate(feature_map_layout['from_layer']): + layer_depth = feature_map_layout['layer_depth'][index] + conv_kernel_size = 3 + if 'conv_kernel_size' in feature_map_layout: + conv_kernel_size = feature_map_layout['conv_kernel_size'][index] + if from_layer: + feature_map = image_features[from_layer] + base_from_layer = from_layer + feature_map_keys.append(from_layer) + else: + pre_layer = feature_maps[-1] + pre_layer_depth = pre_layer.get_shape().as_list()[3] + intermediate_layer = pre_layer + if insert_1x1_conv: + layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( + base_from_layer, index, depth_fn(layer_depth // 2)) + intermediate_layer = slim.conv2d( + pre_layer, + depth_fn(layer_depth // 2), [1, 1], + padding='SAME', + stride=1, + scope=layer_name) + layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( + base_from_layer, index, conv_kernel_size, conv_kernel_size, + depth_fn(layer_depth)) + stride = 2 + padding = 'SAME' + if use_explicit_padding: + padding = 'VALID' + intermediate_layer = ops.fixed_padding( + intermediate_layer, conv_kernel_size) + if use_depthwise: + feature_map = slim.separable_conv2d( + intermediate_layer, + None, [conv_kernel_size, conv_kernel_size], + depth_multiplier=1, + padding=padding, + stride=stride, + scope=layer_name + '_depthwise') + feature_map = slim.conv2d( + feature_map, + depth_fn(layer_depth), [1, 1], + padding='SAME', + stride=1, + scope=layer_name) + if pool_residual and pre_layer_depth == depth_fn(layer_depth): + feature_map += slim.avg_pool2d( + pre_layer, [3, 3], + padding='SAME', + stride=2, + scope=layer_name + '_pool') + else: + feature_map = slim.conv2d( + intermediate_layer, + depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], + padding=padding, + stride=stride, + scope=layer_name) + feature_map_keys.append(layer_name) + feature_maps.append(feature_map) + return collections.OrderedDict( + [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) + + +class KerasFpnTopDownFeatureMaps(tf.keras.Model): + """Generates Keras based `top-down` feature maps for Feature Pyramid Networks. + + See https://arxiv.org/abs/1612.03144 for details. + """ + + def __init__(self, + num_levels, + depth, + is_training, + conv_hyperparams, + freeze_batchnorm, + use_depthwise=False, + use_explicit_padding=False, + use_bounded_activations=False, + use_native_resize_op=False, + scope=None, + name=None): + """Constructor. + + Args: + num_levels: the number of image features. + depth: depth of output feature maps. + is_training: Indicates whether the feature generator is in training mode. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_depthwise: whether to use depthwise separable conv instead of regular + conv. + use_explicit_padding: whether to use explicit padding. + use_bounded_activations: Whether or not to clip activations to range + [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend + themselves to quantized inference. + use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op + for the upsampling process instead of reshape and broadcasting + implementation. + scope: A scope name to wrap this op under. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(KerasFpnTopDownFeatureMaps, self).__init__(name=name) + + self.scope = scope if scope else 'top_down' + self.top_layers = [] + self.residual_blocks = [] + self.top_down_blocks = [] + self.reshape_blocks = [] + self.conv_layers = [] + + padding = 'VALID' if use_explicit_padding else 'SAME' + stride = 1 + kernel_size = 3 + def clip_by_value(features): + return tf.clip_by_value(features, -ACTIVATION_BOUND, ACTIVATION_BOUND) + + # top layers + self.top_layers.append(tf.keras.layers.Conv2D( + depth, [1, 1], strides=stride, padding=padding, + name='projection_%d' % num_levels, + **conv_hyperparams.params(use_bias=True))) + if use_bounded_activations: + self.top_layers.append(tf.keras.layers.Lambda( + clip_by_value, name='clip_by_value')) + + for level in reversed(list(range(num_levels - 1))): + # to generate residual from image features + residual_net = [] + # to preprocess top_down (the image feature map from last layer) + top_down_net = [] + # to reshape top_down according to residual if necessary + reshaped_residual = [] + # to apply convolution layers to feature map + conv_net = [] + + # residual block + residual_net.append(tf.keras.layers.Conv2D( + depth, [1, 1], padding=padding, strides=1, + name='projection_%d' % (level + 1), + **conv_hyperparams.params(use_bias=True))) + if use_bounded_activations: + residual_net.append(tf.keras.layers.Lambda( + clip_by_value, name='clip_by_value')) + + # top-down block + # TODO (b/128922690): clean-up of ops.nearest_neighbor_upsampling + if use_native_resize_op: + def resize_nearest_neighbor(image): + image_shape = shape_utils.combined_static_and_dynamic_shape(image) + return tf.image.resize_nearest_neighbor( + image, [image_shape[1] * 2, image_shape[2] * 2]) + top_down_net.append(tf.keras.layers.Lambda( + resize_nearest_neighbor, name='nearest_neighbor_upsampling')) + else: + def nearest_neighbor_upsampling(image): + return ops.nearest_neighbor_upsampling(image, scale=2) + top_down_net.append(tf.keras.layers.Lambda( + nearest_neighbor_upsampling, name='nearest_neighbor_upsampling')) + + # reshape block + if use_explicit_padding: + def reshape(inputs): + residual_shape = tf.shape(inputs[0]) + return inputs[1][:, :residual_shape[1], :residual_shape[2], :] + reshaped_residual.append( + tf.keras.layers.Lambda(reshape, name='reshape')) + + # down layers + if use_bounded_activations: + conv_net.append(tf.keras.layers.Lambda( + clip_by_value, name='clip_by_value')) + + if use_explicit_padding: + def fixed_padding(features, kernel_size=kernel_size): + return ops.fixed_padding(features, kernel_size) + conv_net.append(tf.keras.layers.Lambda( + fixed_padding, name='fixed_padding')) + + layer_name = 'smoothing_%d' % (level + 1) + conv_block = create_conv_block( + use_depthwise, kernel_size, padding, stride, layer_name, + conv_hyperparams, is_training, freeze_batchnorm, depth) + conv_net.extend(conv_block) + + self.residual_blocks.append(residual_net) + self.top_down_blocks.append(top_down_net) + self.reshape_blocks.append(reshaped_residual) + self.conv_layers.append(conv_net) + + def call(self, image_features): + """Generate the multi-resolution feature maps. + + Executed when calling the `.__call__` method on input. + + Args: + image_features: list of tuples of (tensor_name, image_feature_tensor). + Spatial resolutions of succesive tensors must reduce exactly by a factor + of 2. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + output_feature_maps_list = [] + output_feature_map_keys = [] + + with tf.name_scope(self.scope): + top_down = image_features[-1][1] + for layer in self.top_layers: + top_down = layer(top_down) + output_feature_maps_list.append(top_down) + output_feature_map_keys.append('top_down_%s' % image_features[-1][0]) + + num_levels = len(image_features) + for index, level in enumerate(reversed(list(range(num_levels - 1)))): + residual = image_features[level][1] + top_down = output_feature_maps_list[-1] + for layer in self.residual_blocks[index]: + residual = layer(residual) + for layer in self.top_down_blocks[index]: + top_down = layer(top_down) + for layer in self.reshape_blocks[index]: + top_down = layer([residual, top_down]) + top_down += residual + for layer in self.conv_layers[index]: + top_down = layer(top_down) + output_feature_maps_list.append(top_down) + output_feature_map_keys.append('top_down_%s' % image_features[level][0]) + return collections.OrderedDict(reversed( + list(zip(output_feature_map_keys, output_feature_maps_list)))) + + +def fpn_top_down_feature_maps(image_features, + depth, + use_depthwise=False, + use_explicit_padding=False, + use_bounded_activations=False, + scope=None, + use_native_resize_op=False): + """Generates `top-down` feature maps for Feature Pyramid Networks. + + See https://arxiv.org/abs/1612.03144 for details. + + Args: + image_features: list of tuples of (tensor_name, image_feature_tensor). + Spatial resolutions of succesive tensors must reduce exactly by a factor + of 2. + depth: depth of output feature maps. + use_depthwise: whether to use depthwise separable conv instead of regular + conv. + use_explicit_padding: whether to use explicit padding. + use_bounded_activations: Whether or not to clip activations to range + [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend + themselves to quantized inference. + scope: A scope name to wrap this op under. + use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for + the upsampling process instead of reshape and broadcasting implementation. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + with tf.name_scope(scope, 'top_down'): + num_levels = len(image_features) + output_feature_maps_list = [] + output_feature_map_keys = [] + padding = 'VALID' if use_explicit_padding else 'SAME' + kernel_size = 3 + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], padding=padding, stride=1): + top_down = slim.conv2d( + image_features[-1][1], + depth, [1, 1], activation_fn=None, normalizer_fn=None, + scope='projection_%d' % num_levels) + if use_bounded_activations: + top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, + ACTIVATION_BOUND) + output_feature_maps_list.append(top_down) + output_feature_map_keys.append( + 'top_down_%s' % image_features[-1][0]) + + for level in reversed(list(range(num_levels - 1))): + if use_native_resize_op: + with tf.name_scope('nearest_neighbor_upsampling'): + top_down_shape = shape_utils.combined_static_and_dynamic_shape( + top_down) + top_down = tf.image.resize_nearest_neighbor( + top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2]) + else: + top_down = ops.nearest_neighbor_upsampling(top_down, scale=2) + residual = slim.conv2d( + image_features[level][1], depth, [1, 1], + activation_fn=None, normalizer_fn=None, + scope='projection_%d' % (level + 1)) + if use_bounded_activations: + residual = tf.clip_by_value(residual, -ACTIVATION_BOUND, + ACTIVATION_BOUND) + if use_explicit_padding: + # slice top_down to the same shape as residual + residual_shape = tf.shape(residual) + top_down = top_down[:, :residual_shape[1], :residual_shape[2], :] + top_down += residual + if use_bounded_activations: + top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, + ACTIVATION_BOUND) + if use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + pre_output = top_down + if use_explicit_padding: + pre_output = ops.fixed_padding(pre_output, kernel_size) + output_feature_maps_list.append(conv_op( + pre_output, + depth, [kernel_size, kernel_size], + scope='smoothing_%d' % (level + 1))) + output_feature_map_keys.append('top_down_%s' % image_features[level][0]) + return collections.OrderedDict(reversed( + list(zip(output_feature_map_keys, output_feature_maps_list)))) + + +def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers, + image_features, replace_pool_with_conv=False): + """Generates pooling pyramid feature maps. + + The pooling pyramid feature maps is motivated by + multi_resolution_feature_maps. The main difference are that it is simpler and + reduces the number of free parameters. + + More specifically: + - Instead of using convolutions to shrink the feature map, it uses max + pooling, therefore totally gets rid of the parameters in convolution. + - By pooling feature from larger map up to a single cell, it generates + features in the same feature space. + - Instead of independently making box predictions from individual maps, it + shares the same classifier across different feature maps, therefore reduces + the "mis-calibration" across different scales. + + See go/ppn-detection for more details. + + Args: + base_feature_map_depth: Depth of the base feature before the max pooling. + num_layers: Number of layers used to make predictions. They are pooled + from the base feature. + image_features: A dictionary of handles to activation tensors from the + feature extractor. + replace_pool_with_conv: Whether or not to replace pooling operations with + convolutions in the PPN. Default is False. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + Raises: + ValueError: image_features does not contain exactly one entry + """ + if len(image_features) != 1: + raise ValueError('image_features should be a dictionary of length 1.') + image_features = image_features[list(image_features.keys())[0]] + + feature_map_keys = [] + feature_maps = [] + feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth + if base_feature_map_depth > 0: + image_features = slim.conv2d( + image_features, + base_feature_map_depth, + [1, 1], # kernel size + padding='SAME', stride=1, scope=feature_map_key) + # Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for + # TPU v1 compatibility. Without the following dummy op, TPU runtime + # compiler will combine the convolution with one max-pooling below into a + # single cycle, so getting the conv2d feature becomes impossible. + image_features = slim.max_pool2d( + image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key) + feature_map_keys.append(feature_map_key) + feature_maps.append(image_features) + feature_map = image_features + if replace_pool_with_conv: + with slim.arg_scope([slim.conv2d], padding='SAME', stride=2): + for i in range(num_layers - 1): + feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i, + base_feature_map_depth) + feature_map = slim.conv2d( + feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key) + feature_map_keys.append(feature_map_key) + feature_maps.append(feature_map) + else: + with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2): + for i in range(num_layers - 1): + feature_map_key = 'MaxPool2d_%d_2x2' % i + feature_map = slim.max_pool2d( + feature_map, [2, 2], padding='SAME', scope=feature_map_key) + feature_map_keys.append(feature_map_key) + feature_maps.append(feature_map) + return collections.OrderedDict( + [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) diff --git a/models/research/object_detection/models/feature_map_generators_test.py b/models/research/object_detection/models/feature_map_generators_test.py new file mode 100644 index 0000000000000000000000000000000000000000..951e7760bd8a42afb19f61b6c6bc1c1f744d74dd --- /dev/null +++ b/models/research/object_detection/models/feature_map_generators_test.py @@ -0,0 +1,842 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for feature map generators.""" +import unittest +from absl.testing import parameterized + +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models import feature_map_generators +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +INCEPTION_V2_LAYOUT = { + 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 256], + 'anchor_strides': [16, 32, 64, -1, -1, -1], + 'layer_target_norm': [20.0, -1, -1, -1, -1, -1], +} + +INCEPTION_V3_LAYOUT = { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128], + 'anchor_strides': [16, 32, 64, -1, -1, -1], + 'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3] +} + +EMBEDDED_SSD_MOBILENET_V1_LAYOUT = { + 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''], + 'layer_depth': [-1, -1, 512, 256, 256], + 'conv_kernel_size': [-1, -1, 3, 3, 2], +} + +SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = { + 'from_layer': ['Conv2d_13_pointwise', '', '', ''], + 'layer_depth': [-1, 256, 256, 256], +} + + +class MultiResolutionFeatureMapGeneratorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _build_feature_map_generator(self, feature_map_layout, + pool_residual=False): + if tf_version.is_tf2(): + return feature_map_generators.KerasMultiResolutionFeatureMaps( + feature_map_layout=feature_map_layout, + depth_multiplier=1, + min_depth=32, + insert_1x1_conv=True, + freeze_batchnorm=False, + is_training=True, + conv_hyperparams=self._build_conv_hyperparams(), + name='FeatureMaps' + ) + else: + def feature_map_generator(image_features): + return feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=1, + min_depth=32, + insert_1x1_conv=True, + image_features=image_features, + pool_residual=pool_residual) + return feature_map_generator + + def test_get_expected_feature_map_shapes_with_inception_v2(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=INCEPTION_V2_LAYOUT) + def graph_fn(): + feature_maps = feature_map_generator(image_features) + return feature_maps + + expected_feature_map_shapes = { + 'Mixed_3c': (4, 28, 28, 256), + 'Mixed_4c': (4, 14, 14, 576), + 'Mixed_5c': (4, 7, 7, 1024), + 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + layout_copy = INCEPTION_V2_LAYOUT.copy() + layout_copy['use_depthwise'] = True + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=layout_copy) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Mixed_3c': (4, 28, 28, 256), + 'Mixed_4c': (4, 14, 14, 576), + 'Mixed_5c': (4, 7, 7, 1024), + 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_use_explicit_padding(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + layout_copy = INCEPTION_V2_LAYOUT.copy() + layout_copy['use_explicit_padding'] = True + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=layout_copy, + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Mixed_3c': (4, 28, 28, 256), + 'Mixed_4c': (4, 14, 14, 576), + 'Mixed_5c': (4, 7, 7, 1024), + 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_inception_v3(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), + 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), + 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) + } + + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=INCEPTION_V3_LAYOUT, + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Mixed_5d': (4, 35, 35, 256), + 'Mixed_6e': (4, 17, 17, 576), + 'Mixed_7c': (4, 8, 8, 1024), + 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], + dtype=tf.float32), + 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], + dtype=tf.float32), + } + + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Conv2d_11_pointwise': (4, 16, 16, 512), + 'Conv2d_13_pointwise': (4, 8, 8, 1024), + 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), + 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), + 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], + dtype=tf.float32), + } + + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT, + pool_residual=True + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Conv2d_13_pointwise': (4, 8, 8, 1024), + 'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256), + 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256), + 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_variable_names_with_inception_v2(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=INCEPTION_V2_LAYOUT, + ) + def graph_fn(): + return feature_map_generator(image_features) + + self.execute(graph_fn, [], g) + expected_slim_variables = set([ + 'Mixed_5c_1_Conv2d_3_1x1_256/weights', + 'Mixed_5c_1_Conv2d_3_1x1_256/biases', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases', + 'Mixed_5c_1_Conv2d_4_1x1_128/weights', + 'Mixed_5c_1_Conv2d_4_1x1_128/biases', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases', + 'Mixed_5c_1_Conv2d_5_1x1_128/weights', + 'Mixed_5c_1_Conv2d_5_1x1_128/biases', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias', + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias', + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias', + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + def test_get_expected_variable_names_with_inception_v2_use_depthwise( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + layout_copy = INCEPTION_V2_LAYOUT.copy() + layout_copy['use_depthwise'] = True + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=layout_copy, + ) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + + expected_slim_variables = set([ + 'Mixed_5c_1_Conv2d_3_1x1_256/weights', + 'Mixed_5c_1_Conv2d_3_1x1_256/biases', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/depthwise_weights', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/biases', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases', + 'Mixed_5c_1_Conv2d_4_1x1_128/weights', + 'Mixed_5c_1_Conv2d_4_1x1_128/biases', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/depthwise_weights', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/biases', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases', + 'Mixed_5c_1_Conv2d_5_1x1_128/weights', + 'Mixed_5c_1_Conv2d_5_1x1_128/biases', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/depthwise_weights', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/biases', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias', + ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/' + 'depthwise_kernel'), + ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/' + 'bias'), + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias', + ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/' + 'depthwise_kernel'), + ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/' + 'bias'), + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias', + ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/' + 'depthwise_kernel'), + ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/' + 'bias'), + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + +@parameterized.parameters({'use_native_resize_op': True}, + {'use_native_resize_op': False}) +class FPNFeatureMapGeneratorTest(test_case.TestCase, parameterized.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _build_feature_map_generator( + self, image_features, depth, use_bounded_activations=False, + use_native_resize_op=False, use_explicit_padding=False, + use_depthwise=False): + if tf_version.is_tf2(): + return feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=len(image_features), + depth=depth, + is_training=True, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + use_depthwise=use_depthwise, + use_explicit_padding=use_explicit_padding, + use_bounded_activations=use_bounded_activations, + use_native_resize_op=use_native_resize_op, + scope=None, + name='FeatureMaps', + ) + else: + def feature_map_generator(image_features): + return feature_map_generators.fpn_top_down_feature_maps( + image_features=image_features, + depth=depth, + use_depthwise=use_depthwise, + use_explicit_padding=use_explicit_padding, + use_bounded_activations=use_bounded_activations, + use_native_resize_op=use_native_resize_op) + return feature_map_generator + + def test_get_expected_feature_map_shapes( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'top_down_block2': (4, 8, 8, 128), + 'top_down_block3': (4, 4, 4, 128), + 'top_down_block4': (4, 2, 2, 128), + 'top_down_block5': (4, 1, 1, 128) + } + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_explicit_padding( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_explicit_padding=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'top_down_block2': (4, 8, 8, 128), + 'top_down_block3': (4, 4, 4, 128), + 'top_down_block4': (4, 2, 2, 128), + 'top_down_block5': (4, 1, 1, 128) + } + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + def test_use_bounded_activations_add_operations( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [('block2', + tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', + tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', + tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', + tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_bounded_activations=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + expected_added_operations = dict.fromkeys([ + 'top_down/clip_by_value', 'top_down/clip_by_value_1', + 'top_down/clip_by_value_2', 'top_down/clip_by_value_3', + 'top_down/clip_by_value_4', 'top_down/clip_by_value_5', + 'top_down/clip_by_value_6' + ]) + op_names = {op.name: None for op in g.get_operations()} + self.assertDictContainsSubset(expected_added_operations, op_names) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + def test_use_bounded_activations_clip_value( + self, use_native_resize_op): + tf_graph = tf.Graph() + with tf_graph.as_default(): + image_features = [ + ('block2', 255 * tf.ones([4, 8, 8, 256], dtype=tf.float32)), + ('block3', 255 * tf.ones([4, 4, 4, 256], dtype=tf.float32)), + ('block4', 255 * tf.ones([4, 2, 2, 256], dtype=tf.float32)), + ('block5', 255 * tf.ones([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_bounded_activations=True, + use_native_resize_op=use_native_resize_op) + feature_map_generator(image_features) + + expected_clip_by_value_ops = [ + 'top_down/clip_by_value', 'top_down/clip_by_value_1', + 'top_down/clip_by_value_2', 'top_down/clip_by_value_3', + 'top_down/clip_by_value_4', 'top_down/clip_by_value_5', + 'top_down/clip_by_value_6' + ] + + # Gathers activation tensors before and after clip_by_value operations. + activations = {} + for clip_by_value_op in expected_clip_by_value_ops: + clip_input_tensor = tf_graph.get_operation_by_name( + '{}/Minimum'.format(clip_by_value_op)).inputs[0] + clip_output_tensor = tf_graph.get_tensor_by_name( + '{}:0'.format(clip_by_value_op)) + activations.update({ + 'before_{}'.format(clip_by_value_op): clip_input_tensor, + 'after_{}'.format(clip_by_value_op): clip_output_tensor, + }) + + expected_lower_bound = -feature_map_generators.ACTIVATION_BOUND + expected_upper_bound = feature_map_generators.ACTIVATION_BOUND + init_op = tf.global_variables_initializer() + with self.test_session() as session: + session.run(init_op) + activations_output = session.run(activations) + for clip_by_value_op in expected_clip_by_value_ops: + # Before clipping, activations are beyound the expected bound because + # of large input image_features values. + activations_before_clipping = ( + activations_output['before_{}'.format(clip_by_value_op)]) + before_clipping_lower_bound = np.amin(activations_before_clipping) + before_clipping_upper_bound = np.amax(activations_before_clipping) + self.assertLessEqual(before_clipping_lower_bound, + expected_lower_bound) + self.assertGreaterEqual(before_clipping_upper_bound, + expected_upper_bound) + + # After clipping, activations are bounded as expectation. + activations_after_clipping = ( + activations_output['after_{}'.format(clip_by_value_op)]) + after_clipping_lower_bound = np.amin(activations_after_clipping) + after_clipping_upper_bound = np.amax(activations_after_clipping) + self.assertGreaterEqual(after_clipping_lower_bound, + expected_lower_bound) + self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound) + + def test_get_expected_feature_map_shapes_with_depthwise( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_depthwise=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'top_down_block2': (4, 8, 8, 128), + 'top_down_block3': (4, 4, 4, 128), + 'top_down_block4': (4, 2, 2, 128), + 'top_down_block5': (4, 1, 1, 128) + } + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_variable_names( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + expected_slim_variables = set([ + 'projection_1/weights', + 'projection_1/biases', + 'projection_2/weights', + 'projection_2/biases', + 'projection_3/weights', + 'projection_3/biases', + 'projection_4/weights', + 'projection_4/biases', + 'smoothing_1/weights', + 'smoothing_1/biases', + 'smoothing_2/weights', + 'smoothing_2/biases', + 'smoothing_3/weights', + 'smoothing_3/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/top_down/projection_1/kernel', + 'FeatureMaps/top_down/projection_1/bias', + 'FeatureMaps/top_down/projection_2/kernel', + 'FeatureMaps/top_down/projection_2/bias', + 'FeatureMaps/top_down/projection_3/kernel', + 'FeatureMaps/top_down/projection_3/bias', + 'FeatureMaps/top_down/projection_4/kernel', + 'FeatureMaps/top_down/projection_4/bias', + 'FeatureMaps/top_down/smoothing_1_conv/kernel', + 'FeatureMaps/top_down/smoothing_1_conv/bias', + 'FeatureMaps/top_down/smoothing_2_conv/kernel', + 'FeatureMaps/top_down/smoothing_2_conv/bias', + 'FeatureMaps/top_down/smoothing_3_conv/kernel', + 'FeatureMaps/top_down/smoothing_3_conv/bias' + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + def test_get_expected_variable_names_with_depthwise( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_depthwise=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + expected_slim_variables = set([ + 'projection_1/weights', + 'projection_1/biases', + 'projection_2/weights', + 'projection_2/biases', + 'projection_3/weights', + 'projection_3/biases', + 'projection_4/weights', + 'projection_4/biases', + 'smoothing_1/depthwise_weights', + 'smoothing_1/pointwise_weights', + 'smoothing_1/biases', + 'smoothing_2/depthwise_weights', + 'smoothing_2/pointwise_weights', + 'smoothing_2/biases', + 'smoothing_3/depthwise_weights', + 'smoothing_3/pointwise_weights', + 'smoothing_3/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/top_down/projection_1/kernel', + 'FeatureMaps/top_down/projection_1/bias', + 'FeatureMaps/top_down/projection_2/kernel', + 'FeatureMaps/top_down/projection_2/bias', + 'FeatureMaps/top_down/projection_3/kernel', + 'FeatureMaps/top_down/projection_3/bias', + 'FeatureMaps/top_down/projection_4/kernel', + 'FeatureMaps/top_down/projection_4/bias', + 'FeatureMaps/top_down/smoothing_1_depthwise_conv/depthwise_kernel', + 'FeatureMaps/top_down/smoothing_1_depthwise_conv/pointwise_kernel', + 'FeatureMaps/top_down/smoothing_1_depthwise_conv/bias', + 'FeatureMaps/top_down/smoothing_2_depthwise_conv/depthwise_kernel', + 'FeatureMaps/top_down/smoothing_2_depthwise_conv/pointwise_kernel', + 'FeatureMaps/top_down/smoothing_2_depthwise_conv/bias', + 'FeatureMaps/top_down/smoothing_3_depthwise_conv/depthwise_kernel', + 'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel', + 'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias' + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + +class GetDepthFunctionTest(tf.test.TestCase): + + def test_return_min_depth_when_multiplier_is_small(self): + depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5, + min_depth=16) + self.assertEqual(depth_fn(16), 16) + + def test_return_correct_depth_with_multiplier(self): + depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5, + min_depth=16) + self.assertEqual(depth_fn(64), 32) + + +@parameterized.parameters( + {'replace_pool_with_conv': False}, + {'replace_pool_with_conv': True}, +) +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase): + + def test_get_expected_feature_map_shapes(self, replace_pool_with_conv): + image_features = { + 'image_features': tf.random_uniform([4, 19, 19, 1024]) + } + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=1024, + num_layers=6, + image_features=image_features, + replace_pool_with_conv=replace_pool_with_conv) + + expected_pool_feature_map_shapes = { + 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024), + 'MaxPool2d_0_2x2': (4, 10, 10, 1024), + 'MaxPool2d_1_2x2': (4, 5, 5, 1024), + 'MaxPool2d_2_2x2': (4, 3, 3, 1024), + 'MaxPool2d_3_2x2': (4, 2, 2, 1024), + 'MaxPool2d_4_2x2': (4, 1, 1, 1024), + } + + expected_conv_feature_map_shapes = { + 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024), + 'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024), + 'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024), + 'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024), + 'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024), + 'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024), + } + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + out_feature_maps = sess.run(feature_maps) + out_feature_map_shapes = {key: value.shape + for key, value in out_feature_maps.items()} + if replace_pool_with_conv: + self.assertDictEqual(expected_conv_feature_map_shapes, + out_feature_map_shapes) + else: + self.assertDictEqual(expected_pool_feature_map_shapes, + out_feature_map_shapes) + + def test_get_expected_variable_names(self, replace_pool_with_conv): + image_features = { + 'image_features': tf.random_uniform([4, 19, 19, 1024]) + } + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=1024, + num_layers=6, + image_features=image_features, + replace_pool_with_conv=replace_pool_with_conv) + + expected_pool_variables = set([ + 'Base_Conv2d_1x1_1024/weights', + 'Base_Conv2d_1x1_1024/biases', + ]) + + expected_conv_variables = set([ + 'Base_Conv2d_1x1_1024/weights', + 'Base_Conv2d_1x1_1024/biases', + 'Conv2d_0_3x3_s2_1024/weights', + 'Conv2d_0_3x3_s2_1024/biases', + 'Conv2d_1_3x3_s2_1024/weights', + 'Conv2d_1_3x3_s2_1024/biases', + 'Conv2d_2_3x3_s2_1024/weights', + 'Conv2d_2_3x3_s2_1024/biases', + 'Conv2d_3_3x3_s2_1024/weights', + 'Conv2d_3_3x3_s2_1024/biases', + 'Conv2d_4_3x3_s2_1024/weights', + 'Conv2d_4_3x3_s2_1024/biases', + ]) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + sess.run(feature_maps) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + if replace_pool_with_conv: + self.assertSetEqual(expected_conv_variables, actual_variable_set) + else: + self.assertSetEqual(expected_pool_variables, actual_variable_set) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/keras_models/__init__.py b/models/research/object_detection/models/keras_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/models/keras_models/base_models/original_mobilenet_v2.py b/models/research/object_detection/models/keras_models/base_models/original_mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7f95724e86c422b568921b77dcf094d901d11f --- /dev/null +++ b/models/research/object_detection/models/keras_models/base_models/original_mobilenet_v2.py @@ -0,0 +1,478 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""MobileNet v2 models for Keras. + +MobileNetV2 is a general architecture and can be used for multiple use cases. +Depending on the use case, it can use different input layer size and +different width factors. This allows different width models to reduce +the number of multiply-adds and thereby +reduce inference cost on mobile devices. + +MobileNetV2 is very similar to the original MobileNet, +except that it uses inverted residual blocks with +bottlenecking features. It has a drastically lower +parameter count than the original MobileNet. +MobileNets support any input size greater +than 32 x 32, with larger image sizes +offering better performance. + +The number of parameters and number of multiply-adds +can be modified by using the `alpha` parameter, +which increases/decreases the number of filters in each layer. +By altering the image size and `alpha` parameter, +all 22 models from the paper can be built, with ImageNet weights provided. + +The paper demonstrates the performance of MobileNets using `alpha` values of +1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4 + +For each of these `alpha` values, weights for 5 different input image sizes +are provided (224, 192, 160, 128, and 96). + + +The following table describes the performance of +MobileNet on various input sizes: +------------------------------------------------------------------------ +MACs stands for Multiply Adds + + Classification Checkpoint| MACs (M) | Parameters (M)| Top 1 Acc| Top 5 Acc +--------------------------|------------|---------------|---------|----|------- +| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 | +| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 | +| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 | +| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 | +| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 | +| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 | +| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 | +| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 | +| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 | +| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 | +| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 | +| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 | +| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 | +| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 | +| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 | +| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 | +| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 | +| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 | +| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 | +| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 | +| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 | +| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 | + +The weights for all 16 models are obtained and translated from the Tensorflow +checkpoints from TensorFlow checkpoints found at +https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md + +# Reference +This file contains building code for MobileNetV2, based on +[MobileNetV2: Inverted Residuals and Linear Bottlenecks] +(https://arxiv.org/abs/1801.04381) + +Tests comparing this model to the existing Tensorflow model can be +found at +[mobilenet_v2_keras](https://github.com/JonathanCMitchell/mobilenet_v2_keras) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import warnings +import numpy as np +import tensorflow.compat.v1 as tf + +Model = tf.keras.Model +Input = tf.keras.layers.Input +Activation = tf.keras.layers.Activation +BatchNormalization = tf.keras.layers.BatchNormalization +Conv2D = tf.keras.layers.Conv2D +DepthwiseConv2D = tf.keras.layers.DepthwiseConv2D +GlobalAveragePooling2D = tf.keras.layers.GlobalAveragePooling2D +Add = tf.keras.layers.Add +Dense = tf.keras.layers.Dense +K = tf.keras.Backend + + +def relu6(x): + return K.relu(x, max_value=6) + + +def _obtain_input_shape( + input_shape, + default_size, + min_size, + data_format, + require_flatten): + """Internal utility to compute/validate an ImageNet model's input shape. + + Arguments: + input_shape: either None (will return the default network input shape), + or a user-provided shape to be validated. + default_size: default input width/height for the model. + min_size: minimum input width/height accepted by the model. + data_format: image data format to use. + require_flatten: whether the model is expected to + be linked to a classifier via a Flatten layer. + + Returns: + An integer shape tuple (may include None entries). + + Raises: + ValueError: in case of invalid argument values. + """ + if input_shape and len(input_shape) == 3: + if data_format == 'channels_first': + if input_shape[0] not in {1, 3}: + warnings.warn( + 'This model usually expects 1 or 3 input channels. ' + 'However, it was passed an input_shape with ' + + str(input_shape[0]) + ' input channels.') + default_shape = (input_shape[0], default_size, default_size) + else: + if input_shape[-1] not in {1, 3}: + warnings.warn( + 'This model usually expects 1 or 3 input channels. ' + 'However, it was passed an input_shape with ' + + str(input_shape[-1]) + ' input channels.') + default_shape = (default_size, default_size, input_shape[-1]) + else: + if data_format == 'channels_first': + default_shape = (3, default_size, default_size) + else: + default_shape = (default_size, default_size, 3) + if input_shape: + if data_format == 'channels_first': + if input_shape is not None: + if len(input_shape) != 3: + raise ValueError( + '`input_shape` must be a tuple of three integers.') + if ((input_shape[1] is not None and input_shape[1] < min_size) or + (input_shape[2] is not None and input_shape[2] < min_size)): + raise ValueError('Input size must be at least ' + + str(min_size) + 'x' + str(min_size) + + '; got `input_shape=' + + str(input_shape) + '`') + else: + if input_shape is not None: + if len(input_shape) != 3: + raise ValueError( + '`input_shape` must be a tuple of three integers.') + if ((input_shape[0] is not None and input_shape[0] < min_size) or + (input_shape[1] is not None and input_shape[1] < min_size)): + raise ValueError('Input size must be at least ' + + str(min_size) + 'x' + str(min_size) + + '; got `input_shape=' + + str(input_shape) + '`') + else: + if require_flatten: + input_shape = default_shape + else: + if data_format == 'channels_first': + input_shape = (3, None, None) + else: + input_shape = (None, None, 3) + if require_flatten: + if None in input_shape: + raise ValueError('If `include_top` is True, ' + 'you should specify a static `input_shape`. ' + 'Got `input_shape=' + str(input_shape) + '`') + return input_shape + + +def preprocess_input(x): + """Preprocesses a numpy array encoding a batch of images. + + This function applies the "Inception" preprocessing which converts + the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing + function is different from `imagenet_utils.preprocess_input()`. + + Arguments: + x: a 4D numpy array consists of RGB values within [0, 255]. + + Returns: + Preprocessed array. + """ + x /= 128. + x -= 1. + return x.astype(np.float32) + + +# This function is taken from the original tf repo. +# It ensures that all layers have a channel number that is divisible by 8 +# It can be seen here: +# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def mobilenet_v2(input_shape=None, + alpha=1.0, + include_top=True, + classes=1000): + """Instantiates the MobileNetV2 architecture. + + To load a MobileNetV2 model via `load_model`, import the custom + objects `relu6` and pass them to the `custom_objects` parameter. + E.g. + model = load_model('mobilenet.h5', custom_objects={ + 'relu6': mobilenet.relu6}) + + Arguments: + input_shape: optional shape tuple, to be specified if you would + like to use a model with an input img resolution that is not + (224, 224, 3). + It should have exactly 3 inputs channels (224, 224, 3). + You can also omit this option if you would like + to infer input_shape from an input_tensor. + If you choose to include both input_tensor and input_shape then + input_shape will be used if they match, if the shapes + do not match then we will throw an error. + E.g. `(160, 160, 3)` would be one valid value. + alpha: controls the width of the network. This is known as the + width multiplier in the MobileNetV2 paper. + - If `alpha` < 1.0, proportionally decreases the number + of filters in each layer. + - If `alpha` > 1.0, proportionally increases the number + of filters in each layer. + - If `alpha` = 1, default number of filters from the paper + are used at each layer. + include_top: whether to include the fully-connected + layer at the top of the network. + classes: optional number of classes to classify images + into, only to be specified if `include_top` is True, and + if no `weights` argument is specified. + + Returns: + A Keras model instance. + + Raises: + ValueError: in case of invalid argument for `weights`, + or invalid input shape or invalid depth_multiplier, alpha, + rows when weights='imagenet' + """ + + # Determine proper input shape and default size. + # If input_shape is None and no input_tensor + if input_shape is None: + default_size = 224 + + # If input_shape is not None, assume default size + else: + if K.image_data_format() == 'channels_first': + rows = input_shape[1] + cols = input_shape[2] + else: + rows = input_shape[0] + cols = input_shape[1] + + if rows == cols and rows in [96, 128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + + input_shape = _obtain_input_shape(input_shape, + default_size=default_size, + min_size=32, + data_format=K.image_data_format(), + require_flatten=include_top) + + if K.image_data_format() == 'channels_last': + row_axis, col_axis = (0, 1) + else: + row_axis, col_axis = (1, 2) + rows = input_shape[row_axis] + cols = input_shape[col_axis] + + if K.image_data_format() != 'channels_last': + warnings.warn('The MobileNet family of models is only available ' + 'for the input data format "channels_last" ' + '(width, height, channels). ' + 'However your settings specify the default ' + 'data format "channels_first" (channels, width, height).' + ' You should set `image_data_format="channels_last"` ' + 'in your Keras config located at ~/.keras/keras.json. ' + 'The model being returned right now will expect inputs ' + 'to follow the "channels_last" data format.') + K.set_image_data_format('channels_last') + old_data_format = 'channels_first' + else: + old_data_format = None + + img_input = Input(shape=input_shape) + + first_block_filters = _make_divisible(32 * alpha, 8) + x = Conv2D(first_block_filters, + kernel_size=3, + strides=(2, 2), padding='same', + use_bias=False, name='Conv1')(img_input) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='bn_Conv1')(x) + x = Activation(relu6, name='Conv1_relu')(x) + + x = _first_inverted_res_block(x, + filters=16, + alpha=alpha, + stride=1, + block_id=0) + + x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2, + expansion=6, block_id=1) + x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, + expansion=6, block_id=2) + + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, + expansion=6, block_id=3) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, + expansion=6, block_id=4) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, + expansion=6, block_id=5) + + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2, + expansion=6, block_id=6) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, + expansion=6, block_id=7) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, + expansion=6, block_id=8) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, + expansion=6, block_id=9) + + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, + expansion=6, block_id=10) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, + expansion=6, block_id=11) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, + expansion=6, block_id=12) + + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2, + expansion=6, block_id=13) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, + expansion=6, block_id=14) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, + expansion=6, block_id=15) + + x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, + expansion=6, block_id=16) + + # no alpha applied to last conv as stated in the paper: + # if the width multiplier is greater than 1 we + # increase the number of output channels + if alpha > 1.0: + last_block_filters = _make_divisible(1280 * alpha, 8) + else: + last_block_filters = 1280 + + x = Conv2D(last_block_filters, + kernel_size=1, + use_bias=False, + name='Conv_1')(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x) + x = Activation(relu6, name='out_relu')(x) + + if include_top: + x = GlobalAveragePooling2D()(x) + x = Dense(classes, activation='softmax', + use_bias=True, name='Logits')(x) + + # Ensure that the model takes into account + # any potential predecessors of `input_tensor`. + inputs = img_input + + # Create model. + model = Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows)) + + if old_data_format: + K.set_image_data_format(old_data_format) + return model + + +def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): + """Build an inverted res block.""" + in_channels = int(inputs.shape[-1]) + pointwise_conv_filters = int(filters * alpha) + pointwise_filters = _make_divisible(pointwise_conv_filters, 8) + # Expand + + x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', + use_bias=False, activation=None, + name='mobl%d_conv_expand' % block_id)(inputs) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_bn_expand' % + block_id)(x) + x = Activation(relu6, name='conv_%d_relu' % block_id)(x) + + # Depthwise + x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, + use_bias=False, padding='same', + name='mobl%d_conv_depthwise' % block_id)(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_depthwise' % block_id)(x) + + x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) + + # Project + x = Conv2D(pointwise_filters, + kernel_size=1, padding='same', use_bias=False, activation=None, + name='mobl%d_conv_project' % block_id)(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_bn_project' % block_id)(x) + + if in_channels == pointwise_filters and stride == 1: + return Add(name='res_connect_' + str(block_id))([inputs, x]) + + return x + + +def _first_inverted_res_block(inputs, + stride, + alpha, filters, block_id): + """Build the first inverted res block.""" + in_channels = int(inputs.shape[-1]) + pointwise_conv_filters = int(filters * alpha) + pointwise_filters = _make_divisible(pointwise_conv_filters, 8) + + # Depthwise + x = DepthwiseConv2D(kernel_size=3, + strides=stride, activation=None, + use_bias=False, padding='same', + name='mobl%d_conv_depthwise' % + block_id)(inputs) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_depthwise' % + block_id)(x) + x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) + + # Project + x = Conv2D(pointwise_filters, + kernel_size=1, + padding='same', + use_bias=False, + activation=None, + name='mobl%d_conv_project' % + block_id)(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_project' % + block_id)(x) + + if in_channels == pointwise_filters and stride == 1: + return Add(name='res_connect_' + str(block_id))([inputs, x]) + + return x diff --git a/models/research/object_detection/models/keras_models/convert_keras_models.py b/models/research/object_detection/models/keras_models/convert_keras_models.py new file mode 100644 index 0000000000000000000000000000000000000000..a34af981b37032115bf0c3e957e0f4c216504d4c --- /dev/null +++ b/models/research/object_detection/models/keras_models/convert_keras_models.py @@ -0,0 +1,85 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Write keras weights into a tensorflow checkpoint. + +The imagenet weights in `keras.applications` are downloaded from github. +This script converts them into the tensorflow checkpoint format and stores them +on disk where they can be easily accessible during training. +""" + +from __future__ import print_function + +import os + +from absl import app +import numpy as np +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +tf.flags.DEFINE_string('model', 'resnet_v2_101', + 'The model to load. The following are supported: ' + '"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", ' + '"resnet_v2_101"') +tf.flags.DEFINE_string('output_path', None, + 'The directory to output weights in.') +tf.flags.DEFINE_boolean('verify_weights', True, + ('Verify the weights are loaded correctly by making ' + 'sure the predictions are the same before and after ' + 'saving.')) + + +def init_model(name): + """Creates a Keras Model with the specific ResNet version.""" + if name == 'resnet_v1_50': + model = tf.keras.applications.ResNet50(weights='imagenet') + elif name == 'resnet_v1_101': + model = tf.keras.applications.ResNet101(weights='imagenet') + elif name == 'resnet_v2_50': + model = tf.keras.applications.ResNet50V2(weights='imagenet') + elif name == 'resnet_v2_101': + model = tf.keras.applications.ResNet101V2(weights='imagenet') + else: + raise ValueError('Model {} not supported'.format(FLAGS.model)) + + return model + + +def main(_): + + model = init_model(FLAGS.model) + + path = os.path.join(FLAGS.output_path, FLAGS.model) + tf.gfile.MakeDirs(path) + weights_path = os.path.join(path, 'weights') + ckpt = tf.train.Checkpoint(feature_extractor=model) + saved_path = ckpt.save(weights_path) + + if FLAGS.verify_weights: + imgs = np.random.randn(1, 224, 224, 3).astype(np.float32) + keras_preds = model(imgs) + + model = init_model(FLAGS.model) + ckpt.restore(saved_path) + loaded_weights_pred = model(imgs).numpy() + + if not np.all(np.isclose(keras_preds, loaded_weights_pred)): + raise RuntimeError('The model was not saved correctly.') + + +if __name__ == '__main__': + tf.enable_v2_behavior() + app.run(main) diff --git a/models/research/object_detection/models/keras_models/hourglass_network.py b/models/research/object_detection/models/keras_models/hourglass_network.py new file mode 100644 index 0000000000000000000000000000000000000000..d216b1669e3864083ff477a395f48c596172e356 --- /dev/null +++ b/models/research/object_detection/models/keras_models/hourglass_network.py @@ -0,0 +1,376 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Hourglass[1] network. + +[1]: https://arxiv.org/abs/1603.06937 +""" + + +import tensorflow.compat.v2 as tf + + +BATCH_NORM_EPSILON = 1e-5 +BATCH_NORM_MOMENTUM = 0.1 +BATCH_NORM_FUSED = True + + +class IdentityLayer(tf.keras.layers.Layer): + """A layer which passes through the input as it is.""" + + def call(self, inputs): + return inputs + + +def _get_padding_for_kernel_size(kernel_size): + if kernel_size == 7: + return (3, 3) + elif kernel_size == 3: + return (1, 1) + else: + raise ValueError('Padding for kernel size {} not known.'.format( + kernel_size)) + + +class ConvolutionalBlock(tf.keras.layers.Layer): + """Block that aggregates Convolution + Norm layer + ReLU.""" + + def __init__(self, kernel_size, out_channels, stride=1, relu=True, + padding='same'): + """Initializes the Convolutional block. + + Args: + kernel_size: int, convolution kernel size. + out_channels: int, the desired number of output channels. + stride: Integer, stride used in the convolution. + relu: bool, whether to use relu at the end of the layer. + padding: str, the padding scheme to use when kernel_size <= 1 + """ + super(ConvolutionalBlock, self).__init__() + + if kernel_size > 1: + padding = 'valid' + padding_size = _get_padding_for_kernel_size(kernel_size) + + # TODO(vighneshb) Explore if removing and using padding option in conv + # layer works. + self.pad = tf.keras.layers.ZeroPadding2D(padding_size) + else: + self.pad = IdentityLayer() + + self.conv = tf.keras.layers.Conv2D( + filters=out_channels, kernel_size=kernel_size, use_bias=False, + strides=stride, padding=padding) + + self.norm = tf.keras.layers.experimental.SyncBatchNormalization( + name='batchnorm', epsilon=1e-5, momentum=0.1) + + if relu: + self.relu = tf.keras.layers.ReLU() + else: + self.relu = IdentityLayer() + + def call(self, inputs): + net = self.pad(inputs) + net = self.conv(net) + net = self.norm(net) + return self.relu(net) + + +class SkipConvolution(ConvolutionalBlock): + """The skip connection layer for a ResNet.""" + + def __init__(self, out_channels, stride): + """Initializes the skip convolution layer. + + Args: + out_channels: int, the desired number of output channels. + stride: int, the stride for the layer. + """ + super(SkipConvolution, self).__init__( + out_channels=out_channels, kernel_size=1, stride=stride, relu=False) + + +class ResidualBlock(tf.keras.layers.Layer): + """A Residual block.""" + + def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1, + padding='same'): + """Initializes the Residual block. + + Args: + out_channels: int, the desired number of output channels. + skip_conv: bool, whether to use a conv layer for skip connections. + kernel_size: int, convolution kernel size. + stride: Integer, stride used in the convolution. + padding: str, the type of padding to use. + """ + + super(ResidualBlock, self).__init__() + self.conv_block = ConvolutionalBlock( + kernel_size=kernel_size, out_channels=out_channels, stride=stride) + + self.conv = tf.keras.layers.Conv2D( + filters=out_channels, kernel_size=kernel_size, use_bias=False, + strides=1, padding=padding) + self.norm = tf.keras.layers.experimental.SyncBatchNormalization( + name='batchnorm', epsilon=1e-5, momentum=0.1) + + if skip_conv: + self.skip = SkipConvolution(out_channels=out_channels, + stride=stride) + else: + self.skip = IdentityLayer() + + self.relu = tf.keras.layers.ReLU() + + def call(self, inputs): + net = self.conv_block(inputs) + net = self.conv(net) + net = self.norm(net) + net_skip = self.skip(inputs) + return self.relu(net + net_skip) + + +class InputDownsampleBlock(tf.keras.layers.Layer): + """Block for the initial feature downsampling.""" + + def __init__(self, out_channels_initial_conv, out_channels_residual_block): + """Initializes the downsample block. + + Args: + out_channels_initial_conv: int, the desired number of output channels + in the initial conv layer. + out_channels_residual_block: int, the desired number of output channels + in the underlying residual block. + """ + + super(InputDownsampleBlock, self).__init__() + self.conv_block = ConvolutionalBlock( + kernel_size=7, out_channels=out_channels_initial_conv, stride=2, + padding='valid') + self.residual_block = ResidualBlock( + out_channels=out_channels_residual_block, stride=2, skip_conv=True) + + def call(self, inputs): + return self.residual_block(self.conv_block(inputs)) + + +def _make_repeated_residual_blocks(out_channels, num_blocks, + initial_stride=1, residual_channels=None): + """Stack Residual blocks one after the other. + + Args: + out_channels: int, the desired number of output channels. + num_blocks: int, the number of residual blocks to be stacked. + initial_stride: int, the stride of the initial residual block. + residual_channels: int, the desired number of output channels in the + intermediate residual blocks. If not specifed, we use out_channels. + + Returns: + blocks: A list of residual blocks to be applied in sequence. + + """ + + blocks = [] + + if residual_channels is None: + residual_channels = out_channels + + for i in range(num_blocks - 1): + stride = initial_stride if i == 0 else 1 + skip_conv = stride > 1 + + blocks.append( + ResidualBlock(out_channels=residual_channels, stride=stride, + skip_conv=skip_conv) + ) + + skip_conv = residual_channels != out_channels + blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv)) + + return blocks + + +def _apply_blocks(inputs, blocks): + net = inputs + + for block in blocks: + net = block(net) + + return net + + +class EncoderDecoderBlock(tf.keras.layers.Layer): + """An encoder-decoder block which recursively defines the hourglass network.""" + + def __init__(self, num_stages, channel_dims, blocks_per_stage): + """Initializes the encoder-decoder block. + + Args: + num_stages: int, Number of stages in the network. At each stage we have 2 + encoder and 1 decoder blocks. The second encoder block downsamples the + input. + channel_dims: int list, the output channels dimensions of stages in + the network. `channel_dims[0]` is used to define the number of + channels in the first encoder block and `channel_dims[1]` is used to + define the number of channels in the second encoder block. The channels + in the recursive inner layers are defined using `channel_dims[1:]` + blocks_per_stage: int list, number of residual blocks to use at each + stage. `blocks_per_stage[0]` defines the number of blocks at the + current stage and `blocks_per_stage[1:]` is used at further stages. + """ + + super(EncoderDecoderBlock, self).__init__() + + out_channels = channel_dims[0] + out_channels_downsampled = channel_dims[1] + + self.encoder_block1 = _make_repeated_residual_blocks( + out_channels=out_channels, num_blocks=blocks_per_stage[0], + initial_stride=1) + self.encoder_block2 = _make_repeated_residual_blocks( + out_channels=out_channels_downsampled, + num_blocks=blocks_per_stage[0], initial_stride=2) + + if num_stages > 1: + self.inner_block = [ + EncoderDecoderBlock(num_stages - 1, channel_dims[1:], + blocks_per_stage[1:]) + ] + else: + self.inner_block = _make_repeated_residual_blocks( + out_channels=out_channels_downsampled, + num_blocks=blocks_per_stage[1]) + + self.decoder_block = _make_repeated_residual_blocks( + residual_channels=out_channels_downsampled, + out_channels=out_channels, num_blocks=blocks_per_stage[0]) + self.upsample = tf.keras.layers.UpSampling2D(2) + + self.merge_features = tf.keras.layers.Add() + + def call(self, inputs): + + encoded_outputs = _apply_blocks(inputs, self.encoder_block1) + encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2) + inner_block_outputs = _apply_blocks( + encoded_downsampled_outputs, self.inner_block) + + decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block) + upsampled_outputs = self.upsample(decoded_outputs) + + return self.merge_features([encoded_outputs, upsampled_outputs]) + + +class HourglassNetwork(tf.keras.Model): + """The hourglass network.""" + + def __init__(self, num_stages, channel_dims, blocks_per_stage, + num_hourglasses): + """Intializes the feature extractor. + + Args: + num_stages: int, Number of stages in the network. At each stage we have 2 + encoder and 1 decoder blocks. The second encoder block downsamples the + input. + channel_dims: int list, the output channel dimensions of stages in + the network. `channel_dims[0]` and `channel_dims[1]` are used to define + the initial downsampling block. `channel_dims[1:]` is used to define + the hourglass network(s) which follow(s). + blocks_per_stage: int list, number of residual blocks to use at each + stage in the hourglass network + num_hourglasses: int, number of hourglas networks to stack + sequentially. + """ + + super(HourglassNetwork, self).__init__() + + self.num_hourglasses = num_hourglasses + self.downsample_input = InputDownsampleBlock( + out_channels_initial_conv=channel_dims[0], + out_channels_residual_block=channel_dims[1] + ) + + self.hourglass_network = [] + self.output_conv = [] + for _ in range(self.num_hourglasses): + self.hourglass_network.append( + EncoderDecoderBlock( + num_stages=num_stages, channel_dims=channel_dims[1:], + blocks_per_stage=blocks_per_stage) + ) + self.output_conv.append( + ConvolutionalBlock(kernel_size=3, out_channels=channel_dims[1]) + ) + + self.intermediate_conv1 = [] + self.intermediate_conv2 = [] + self.intermediate_residual = [] + + for _ in range(self.num_hourglasses - 1): + self.intermediate_conv1.append( + ConvolutionalBlock( + kernel_size=1, out_channels=channel_dims[1], relu=False) + ) + self.intermediate_conv2.append( + ConvolutionalBlock( + kernel_size=1, out_channels=channel_dims[1], relu=False) + ) + self.intermediate_residual.append( + ResidualBlock(out_channels=channel_dims[1]) + ) + + self.intermediate_relu = tf.keras.layers.ReLU() + + def call(self, inputs): + + inputs = self.downsample_input(inputs) + outputs = [] + + for i in range(self.num_hourglasses): + + hourglass_output = self.hourglass_network[i](inputs) + + output = self.output_conv[i](hourglass_output) + outputs.append(output) + + if i < self.num_hourglasses - 1: + secondary_output = (self.intermediate_conv1[i](inputs) + + self.intermediate_conv2[i](output)) + secondary_output = self.intermediate_relu(secondary_output) + inputs = self.intermediate_residual[i](secondary_output) + + return outputs + + @property + def out_stride(self): + """The stride in the output image of the network.""" + return 4 + + @property + def num_feature_outputs(self): + """Ther number of feature outputs returned by the feature extractor.""" + return self.num_hourglasses + + +def hourglass_104(): + """The Hourglass-104 backbone.""" + + return HourglassNetwork( + channel_dims=[128, 256, 256, 384, 384, 384, 512], + num_hourglasses=2, + num_stages=5, + blocks_per_stage=[2, 2, 2, 2, 2, 4], + ) diff --git a/models/research/object_detection/models/keras_models/hourglass_network_tf2_test.py b/models/research/object_detection/models/keras_models/hourglass_network_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d90b950ecd4102a260643391de6a4475ed959c0f --- /dev/null +++ b/models/research/object_detection/models/keras_models/hourglass_network_tf2_test.py @@ -0,0 +1,100 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing the Hourglass network.""" +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models.keras_models import hourglass_network as hourglass +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase): + + def test_identity_layer(self): + + layer = hourglass.IdentityLayer() + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 3)) + + def test_skip_conv_layer_stride_1(self): + + layer = hourglass.SkipConvolution(out_channels=8, stride=1) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + def test_skip_conv_layer_stride_2(self): + + layer = hourglass.SkipConvolution(out_channels=8, stride=2) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 16, 16, 8)) + + @parameterized.parameters([{'kernel_size': 1}, + {'kernel_size': 3}, + {'kernel_size': 7}]) + def test_conv_block(self, kernel_size): + + layer = hourglass.ConvolutionalBlock( + out_channels=8, kernel_size=kernel_size, stride=1) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + layer = hourglass.ConvolutionalBlock( + out_channels=8, kernel_size=kernel_size, stride=2) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 16, 16, 8)) + + def test_residual_block_stride_1(self): + + layer = hourglass.ResidualBlock(out_channels=8, stride=1) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + def test_residual_block_stride_2(self): + + layer = hourglass.ResidualBlock(out_channels=8, stride=2, + skip_conv=True) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 16, 16, 8)) + + def test_input_downsample_block(self): + + layer = hourglass.InputDownsampleBlock( + out_channels_initial_conv=4, out_channels_residual_block=8) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 8, 8, 8)) + + def test_encoder_decoder_block(self): + + layer = hourglass.EncoderDecoderBlock( + num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], + channel_dims=[4, 6, 8, 10, 12]) + output = layer(np.zeros((2, 64, 64, 4), dtype=np.float32)) + self.assertEqual(output.shape, (2, 64, 64, 4)) + + def test_hourglass_feature_extractor(self): + + model = hourglass.HourglassNetwork( + num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], + channel_dims=[4, 6, 8, 10, 12, 14], num_hourglasses=2) + outputs = model(np.zeros((2, 64, 64, 3), dtype=np.float32)) + self.assertEqual(outputs[0].shape, (2, 16, 16, 6)) + self.assertEqual(outputs[1].shape, (2, 16, 16, 6)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/keras_models/inception_resnet_v2.py b/models/research/object_detection/models/keras_models/inception_resnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..9ecdfa2615f47198335e5a8467c06bdb97e9b3be --- /dev/null +++ b/models/research/object_detection/models/keras_models/inception_resnet_v2.py @@ -0,0 +1,244 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the Keras InceptionResnetV2 models for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras InceptionResNetV2.""" + + def __init__(self, + batchnorm_training, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + default_batchnorm_momentum=0.999, + default_batchnorm_epsilon=1e-3, + weight_decay=0.00004): + """Alternative tf.keras.layers interface, for use by InceptionResNetV2. + + It is used by the Keras applications kwargs injection API to + modify the Inception Resnet V2 Keras application with changes required by + the Object Detection API. + + These injected interfaces make the following changes to the network: + + - Supports freezing batch norm layers + - Adds support for feature map alignment (like in the Slim model) + - Adds support for changing the output stride (like in the Slim model) + - Adds support for overriding various batch norm hyperparameters + + Because the Keras inception resnet v2 application does not assign explicit + names to most individual layers, the injection of output stride support + works by identifying convolution layers according to their filter counts + and pre-feature-map-alignment padding arguments. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + output_stride: A scalar that specifies the requested ratio of input to + output spatial resolution. Only supports 8 and 16. + align_feature_maps: When true, changes all the VALID paddings in the + network to SAME padding so that the feature maps are aligned. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + default_batchnorm_momentum: Float. Batch norm layers will be constructed + using this value as the momentum. + default_batchnorm_epsilon: small float added to variance to avoid + dividing by zero. + weight_decay: the l2 regularization weight decay for weights variables. + (gets multiplied by 0.5 to map from slim l2 regularization weight to + Keras l2 regularization weight). + """ + self._use_atrous = output_stride == 8 + self._align_feature_maps = align_feature_maps + self._batchnorm_training = batchnorm_training + self._batchnorm_scale = batchnorm_scale + self._default_batchnorm_momentum = default_batchnorm_momentum + self._default_batchnorm_epsilon = default_batchnorm_epsilon + self.regularizer = tf.keras.regularizers.l2(weight_decay * 0.5) + + def Conv2D(self, filters, kernel_size, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras InceptionResnetV2 application's convolutions with ones + that follow the spec specified by the Object Detection hyperparameters. + + If feature map alignment is enabled, the padding will be forced to 'same'. + If output_stride is 8, some conv2d layers will be matched according to + their name or filter counts or pre-alignment padding parameters, and will + have the correct 'dilation rate' or 'strides' set. + + Args: + filters: The number of filters to use for the convolution. + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A Keras Conv2D layer specified by the Object Detection hyperparameter + configurations. + """ + kwargs['kernel_regularizer'] = self.regularizer + kwargs['bias_regularizer'] = self.regularizer + + # Because the Keras application does not set explicit names for most layers, + # (instead allowing names to auto-increment), we must match individual + # layers in the model according to their filter count, name, or + # pre-alignment mapping. This means we can only align the feature maps + # after we have applied our updates in cases where output_stride=8. + if self._use_atrous and (filters == 384): + kwargs['strides'] = 1 + + name = kwargs.get('name') + if self._use_atrous and ( + (name and 'block17' in name) or + (filters == 128 or filters == 160 or + (filters == 192 and kwargs.get('padding', '').lower() != 'valid'))): + kwargs['dilation_rate'] = 2 + + if self._align_feature_maps: + kwargs['padding'] = 'same' + + return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) + + def MaxPooling2D(self, pool_size, strides, **kwargs): + """Builds a pooling layer according to the current Object Detection config. + + Overrides the Keras InceptionResnetV2 application's MaxPooling2D layers with + ones that follow the spec specified by the Object Detection hyperparameters. + + If feature map alignment is enabled, the padding will be forced to 'same'. + If output_stride is 8, some pooling layers will be matched according to + their pre-alignment padding parameters, and will have their 'strides' + argument overridden. + + Args: + pool_size: The pool size specified by the Keras application. + strides: The strides specified by the unwrapped Keras application. + **kwargs: Keyword args specified by the Keras application for + constructing the max pooling layer. + + Returns: + A MaxPool2D layer specified by the Object Detection hyperparameter + configurations. + """ + if self._use_atrous and kwargs.get('padding', '').lower() == 'valid': + strides = 1 + + if self._align_feature_maps: + kwargs['padding'] = 'same' + + return tf.keras.layers.MaxPool2D(pool_size, strides=strides, **kwargs) + + # We alias MaxPool2D because Keras has that alias + MaxPool2D = MaxPooling2D # pylint: disable=invalid-name + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Keyword arguments from the `layers.BatchNormalization` calls in + the Keras application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + kwargs['scale'] = self._batchnorm_scale + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + epsilon=self._default_batchnorm_epsilon, + momentum=self._default_batchnorm_momentum, + **kwargs) + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +# pylint: disable=invalid-name +def inception_resnet_v2( + batchnorm_training, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + weight_decay=0.00004, + default_batchnorm_momentum=0.9997, + default_batchnorm_epsilon=0.001, + **kwargs): + """Instantiates the InceptionResnetV2 architecture. + + (Modified for object detection) + + This wraps the InceptionResnetV2 tensorflow Keras application, but uses the + Keras application's kwargs-based monkey-patching API to override the Keras + architecture with the following changes: + + - Supports freezing batch norm layers with FreezableBatchNorms + - Adds support for feature map alignment (like in the Slim model) + - Adds support for changing the output stride (like in the Slim model) + - Changes the default batchnorm momentum to 0.9997 + - Adds support for overriding various batchnorm hyperparameters + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + output_stride: A scalar that specifies the requested ratio of input to + output spatial resolution. Only supports 8 and 16. + align_feature_maps: When true, changes all the VALID paddings in the + network to SAME padding so that the feature maps are aligned. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + weight_decay: the l2 regularization weight decay for weights variables. + (gets multiplied by 0.5 to map from slim l2 regularization weight to + Keras l2 regularization weight). + default_batchnorm_momentum: Float. Batch norm layers will be constructed + using this value as the momentum. + default_batchnorm_epsilon: small float added to variance to avoid + dividing by zero. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.InceptionResNetV2` method that constructs the + Keras model. + + Returns: + A Keras model instance. + """ + if output_stride != 8 and output_stride != 16: + raise ValueError('output_stride must be 8 or 16.') + + layers_override = _LayersOverride( + batchnorm_training, + output_stride, + align_feature_maps=align_feature_maps, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + weight_decay=weight_decay) + return tf.keras.applications.InceptionResNetV2( + layers=layers_override, **kwargs) +# pylint: enable=invalid-name diff --git a/models/research/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py b/models/research/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4cbcc54ad66985920e7739888b3542b6a1e48bca --- /dev/null +++ b/models/research/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py @@ -0,0 +1,228 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for inception_resnet_v2.py. + +This test mainly focuses on comparing slim inception resnet v2 and Keras +inception resnet v2 for object detection. To verify the consistency of the two +models, we compare: + 1. Output shape of each layer given different inputs + 2. Number of global variables + +We also visualize the model structure via Tensorboard, and compare the model +layout and the parameters of each Op to make sure the two implementations are +consistent. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.models.keras_models import inception_resnet_v2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_KERAS_TO_SLIM_ENDPOINT_NAMES = { + 'activation': 'Conv2d_1a_3x3', + 'activation_1': 'Conv2d_2a_3x3', + 'activation_2': 'Conv2d_2b_3x3', + 'activation_3': 'Conv2d_3b_1x1', + 'activation_4': 'Conv2d_4a_3x3', + 'max_pooling2d': 'MaxPool_3a_3x3', + 'max_pooling2d_1': 'MaxPool_5a_3x3', + 'mixed_5b': 'Mixed_5b', + 'mixed_6a': 'Mixed_6a', + 'block17_20_ac': 'PreAuxLogits', + 'mixed_7a': 'Mixed_7a', + 'conv_7b_ac': 'Conv2d_7b_1x1', +} + +_SLIM_ENDPOINT_SHAPES_128 = { + 'Conv2d_1a_3x3': (2, 64, 64, 32), + 'Conv2d_2a_3x3': (2, 64, 64, 32), + 'Conv2d_2b_3x3': (2, 64, 64, 64), + 'Conv2d_3b_1x1': (2, 32, 32, 80), + 'Conv2d_4a_3x3': (2, 32, 32, 192), + 'Conv2d_7b_1x1': (2, 4, 4, 1536), + 'MaxPool_3a_3x3': (2, 32, 32, 64), + 'MaxPool_5a_3x3': (2, 16, 16, 192), + 'Mixed_5b': (2, 16, 16, 320), + 'Mixed_6a': (2, 8, 8, 1088), + 'Mixed_7a': (2, 4, 4, 2080), + 'PreAuxLogits': (2, 8, 8, 1088)} +_SLIM_ENDPOINT_SHAPES_128_STRIDE_8 = { + 'Conv2d_1a_3x3': (2, 64, 64, 32), + 'Conv2d_2a_3x3': (2, 64, 64, 32), + 'Conv2d_2b_3x3': (2, 64, 64, 64), + 'Conv2d_3b_1x1': (2, 32, 32, 80), + 'Conv2d_4a_3x3': (2, 32, 32, 192), + 'MaxPool_3a_3x3': (2, 32, 32, 64), + 'MaxPool_5a_3x3': (2, 16, 16, 192), + 'Mixed_5b': (2, 16, 16, 320), + 'Mixed_6a': (2, 16, 16, 1088), + 'PreAuxLogits': (2, 16, 16, 1088)} +_SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE = { + 'Conv2d_1a_3x3': (2, 63, 63, 32), + 'Conv2d_2a_3x3': (2, 61, 61, 32), + 'Conv2d_2b_3x3': (2, 61, 61, 64), + 'Conv2d_3b_1x1': (2, 30, 30, 80), + 'Conv2d_4a_3x3': (2, 28, 28, 192), + 'Conv2d_7b_1x1': (2, 2, 2, 1536), + 'MaxPool_3a_3x3': (2, 30, 30, 64), + 'MaxPool_5a_3x3': (2, 13, 13, 192), + 'Mixed_5b': (2, 13, 13, 320), + 'Mixed_6a': (2, 6, 6, 1088), + 'Mixed_7a': (2, 2, 2, 2080), + 'PreAuxLogits': (2, 6, 6, 1088)} +_SLIM_ENDPOINT_SHAPES_299 = {} +_SLIM_ENDPOINT_SHAPES_299_STRIDE_8 = {} +_SLIM_ENDPOINT_SHAPES_299_ALIGN_FEATURE_MAPS_FALSE = {} + +_KERAS_LAYERS_TO_CHECK = list(_KERAS_TO_SLIM_ENDPOINT_NAMES.keys()) + +_NUM_CHANNELS = 3 +_BATCH_SIZE = 2 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class InceptionResnetV2Test(test_case.TestCase): + + def _create_application_with_layer_outputs( + self, layer_names, batchnorm_training, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + weight_decay=0.00004, + default_batchnorm_momentum=0.9997, + default_batchnorm_epsilon=0.001,): + """Constructs Keras inception_resnet_v2 that extracts layer outputs.""" + # Have to clear the Keras backend to ensure isolation in layer naming + tf.keras.backend.clear_session() + if not layer_names: + layer_names = _KERAS_LAYERS_TO_CHECK + full_model = inception_resnet_v2.inception_resnet_v2( + batchnorm_training=batchnorm_training, + output_stride=output_stride, + align_feature_maps=align_feature_maps, + weights=None, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + include_top=False) + layer_outputs = [full_model.get_layer(name=layer).output + for layer in layer_names] + return tf.keras.Model( + inputs=full_model.inputs, + outputs=layer_outputs) + + def _check_returns_correct_shape( + self, image_height, image_width, + expected_feature_map_shape, layer_names=None, batchnorm_training=True, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + weight_decay=0.00004, + default_batchnorm_momentum=0.9997, + default_batchnorm_epsilon=0.001,): + if not layer_names: + layer_names = _KERAS_LAYERS_TO_CHECK + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=batchnorm_training, + output_stride=output_stride, + align_feature_maps=align_feature_maps, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon) + + image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS).astype(np.float32) + feature_maps = model(image_tensor) + + for feature_map, layer_name in zip(feature_maps, layer_names): + endpoint_name = _KERAS_TO_SLIM_ENDPOINT_NAMES[layer_name] + expected_shape = expected_feature_map_shape[endpoint_name] + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, layer_names=None): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False) + preprocessed_inputs = tf.random.uniform([4, 40, 40, _NUM_CHANNELS]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + expected_feature_map_shape = ( + _SLIM_ENDPOINT_SHAPES_128) + self._check_returns_correct_shape( + image_height, image_width, expected_feature_map_shape, + align_feature_maps=True) + + def test_returns_correct_shapes_128_output_stride_8(self): + image_height = 128 + image_width = 128 + expected_feature_map_shape = ( + _SLIM_ENDPOINT_SHAPES_128_STRIDE_8) + + # Output stride of 8 not defined beyond 'block17_20_ac', which is + # PreAuxLogits in slim. So, we exclude those layers in our Keras vs Slim + # comparison. + excluded_layers = {'mixed_7a', 'conv_7b_ac'} + layer_names = [l for l in _KERAS_LAYERS_TO_CHECK + if l not in excluded_layers] + self._check_returns_correct_shape( + image_height, image_width, expected_feature_map_shape, + layer_names=layer_names, output_stride=8, align_feature_maps=True) + + def test_returns_correct_shapes_128_align_feature_maps_false( + self): + image_height = 128 + image_width = 128 + expected_feature_map_shape = ( + _SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE) + self._check_returns_correct_shape( + image_height, image_width, expected_feature_map_shape, + align_feature_maps=False) + + def test_hyperparam_override(self): + model = inception_resnet_v2.inception_resnet_v2( + batchnorm_training=True, + default_batchnorm_momentum=0.2, + default_batchnorm_epsilon=0.1, + weights=None, + include_top=False) + bn_layer = model.get_layer(name='freezable_batch_norm') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + variables = self._get_variables() + # 896 is the number of variables from slim inception resnet v2 model. + self.assertEqual(len(variables), 896) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/keras_models/mobilenet_v1.py b/models/research/object_detection/models/keras_models/mobilenet_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..71c20e2cead25573d831290bcf14fbf1e1942c2e --- /dev/null +++ b/models/research/object_detection/models/keras_models/mobilenet_v1.py @@ -0,0 +1,358 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the Keras MobilenetV1 models for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm +from object_detection.models.keras_models import model_utils + + +def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), + kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] + pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] + pad_beg = [pad_total[0] // 2, pad_total[1] // 2] + pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], + [pad_beg[1], pad_end[1]], [0, 0]]) + return padded_inputs + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras MobileNetV1.""" + + def __init__(self, + batchnorm_training, + default_batchnorm_momentum=0.999, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Alternative tf.keras.layers interface, for use by the Keras MobileNetV1. + + It is used by the Keras applications kwargs injection API to + modify the MobilenetV1 Keras application with changes required by + the Object Detection API. + + These injected interfaces make the following changes to the network: + + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v1 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV1 paper. It + modifies the number of filters in each convolutional layer. It's called + depth multiplier in Keras application MobilenetV1. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v1 body. Default is + `None` to use the default mobilenet_v1 network layout. + """ + self._alpha = alpha + self._batchnorm_training = batchnorm_training + self._default_batchnorm_momentum = default_batchnorm_momentum + self._conv_hyperparams = conv_hyperparams + self._use_explicit_padding = use_explicit_padding + self._min_depth = min_depth + self._conv_defs = conv_defs + self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) + self.initializer = tf.truncated_normal_initializer(stddev=0.09) + + def _FixedPaddingLayer(self, kernel_size, rate=1): + return tf.keras.layers.Lambda( + lambda x: _fixed_padding(x, kernel_size, rate)) + + def Conv2D(self, filters, kernel_size, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras MobileNetV1 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + filters: The number of filters to use for the convolution. + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. In this function, the kernel size is expected to + be pair of numbers and the numbers must be equal for this function. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras Conv2D layer to + the input argument, or that will first pad the input then apply a Conv2D + layer. + + Raises: + ValueError: if kernel size is not a pair of equal + integers (representing a square kernel). + """ + if not isinstance(kernel_size, tuple): + raise ValueError('kernel is expected to be a tuple.') + if len(kernel_size) != 2: + raise ValueError('kernel is expected to be length two.') + if kernel_size[0] != kernel_size[1]: + raise ValueError('kernel is expected to be square.') + layer_name = kwargs['name'] + if self._conv_defs: + conv_filters = model_utils.get_conv_def(self._conv_defs, layer_name) + if conv_filters: + filters = conv_filters + # Apply the width multiplier and the minimum depth to the convolution layers + filters = int(filters * self._alpha) + if self._min_depth and filters < self._min_depth: + filters = self._min_depth + + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + else: + kwargs['kernel_regularizer'] = self.regularizer + kwargs['kernel_initializer'] = self.initializer + + kwargs['padding'] = 'same' + if self._use_explicit_padding and kernel_size[0] > 1: + kwargs['padding'] = 'valid' + def padded_conv(features): # pylint: disable=invalid-name + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.Conv2D( + filters, kernel_size, **kwargs)(padded_features) + return padded_conv + else: + return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) + + def DepthwiseConv2D(self, kernel_size, **kwargs): + """Builds a DepthwiseConv2D according to the Object Detection config. + + Overrides the Keras MobileNetV2 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras DepthwiseConv2D + layer to the input argument, or that will first pad the input then apply + the depthwise convolution. + """ + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + # Both regularizer and initializaer also applies to depthwise layer in + # MobilenetV1, so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + else: + kwargs['depthwise_regularizer'] = self.regularizer + kwargs['depthwise_initializer'] = self.initializer + + kwargs['padding'] = 'same' + if self._use_explicit_padding: + kwargs['padding'] = 'valid' + def padded_depthwise_conv(features): # pylint: disable=invalid-name + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.DepthwiseConv2D( + kernel_size, **kwargs)(padded_features) + return padded_depthwise_conv + else: + return tf.keras.layers.DepthwiseConv2D(kernel_size, **kwargs) + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Only the name is used, all other params ignored. + Required for matching `layers.BatchNormalization` calls in the Keras + application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_batch_norm( + training=self._batchnorm_training, + name=name) + else: + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + epsilon=1e-3, + momentum=self._default_batchnorm_momentum, + name=name) + + def Input(self, shape): + """Builds an Input layer. + + Overrides the Keras application Input layer with one that uses a + tf.placeholder_with_default instead of a tf.placeholder. This is necessary + to ensure the application works when run on a TPU. + + Args: + shape: The shape for the input layer to use. (Does not include a dimension + for the batch size). + Returns: + An input layer for the specified shape that internally uses a + placeholder_with_default. + """ + default_size = 224 + default_batch_size = 1 + shape = list(shape) + default_shape = [default_size if dim is None else dim for dim in shape] + + input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) + + placeholder_with_default = tf.placeholder_with_default( + input=input_tensor, shape=[None] + shape) + return model_utils.input_layer(shape, placeholder_with_default) + + # pylint: disable=unused-argument + def ReLU(self, *args, **kwargs): + """Builds an activation layer. + + Overrides the Keras application ReLU with the activation specified by the + Object Detection configuration. + + Args: + *args: Ignored, required to match the `tf.keras.ReLU` interface + **kwargs: Only the name is used, + required to match `tf.keras.ReLU` interface + + Returns: + An activation layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_activation_layer(name=name) + else: + return tf.keras.layers.Lambda(tf.nn.relu6, name=name) + # pylint: enable=unused-argument + + # pylint: disable=unused-argument + def ZeroPadding2D(self, padding, **kwargs): + """Replaces explicit padding in the Keras application with a no-op. + + Args: + padding: The padding values for image height and width. + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A no-op identity lambda. + """ + return lambda x: x + # pylint: enable=unused-argument + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +# pylint: disable=invalid-name +def mobilenet_v1(batchnorm_training, + default_batchnorm_momentum=0.9997, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None, + **kwargs): + """Instantiates the MobileNetV1 architecture, modified for object detection. + + This wraps the MobileNetV1 tensorflow Keras application, but uses the + Keras application's kwargs-based monkey-patching API to override the Keras + architecture with the following changes: + + - Changes the default batchnorm momentum to 0.9997 + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + - Makes the Input layer use a tf.placeholder_with_default instead of a + tf.placeholder, to work on TPUs. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v1 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV1 paper. It + modifies the number of filters in each convolutional layer. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v1 body. Default is + `None` to use the default mobilenet_v1 network layout. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + default_batchnorm_momentum=default_batchnorm_momentum, + conv_hyperparams=conv_hyperparams, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=alpha, + conv_defs=conv_defs) + return tf.keras.applications.MobileNet( + alpha=alpha, layers=layers_override, **kwargs) +# pylint: enable=invalid-name diff --git a/models/research/object_detection/models/keras_models/mobilenet_v1_tf2_test.py b/models/research/object_detection/models/keras_models/mobilenet_v1_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7e46999d9dfd2fc4ddcd2c432f5ecc2a07f3a9eb --- /dev/null +++ b/models/research/object_detection/models/keras_models/mobilenet_v1_tf2_test.py @@ -0,0 +1,256 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for mobilenet_v1.py. + +This test mainly focuses on comparing slim MobilenetV1 and Keras MobilenetV1 for +object detection. To verify the consistency of the two models, we compare: + 1. Output shape of each layer given different inputs + 2. Number of global variables + +We also visualize the model structure via Tensorboard, and compare the model +layout and the parameters of each Op to make sure the two implementations are +consistent. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models.keras_models import mobilenet_v1 +from object_detection.models.keras_models import model_utils +from object_detection.models.keras_models import test_utils +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_KERAS_LAYERS_TO_CHECK = [ + 'conv1_relu', + 'conv_dw_1_relu', 'conv_pw_1_relu', + 'conv_dw_2_relu', 'conv_pw_2_relu', + 'conv_dw_3_relu', 'conv_pw_3_relu', + 'conv_dw_4_relu', 'conv_pw_4_relu', + 'conv_dw_5_relu', 'conv_pw_5_relu', + 'conv_dw_6_relu', 'conv_pw_6_relu', + 'conv_dw_7_relu', 'conv_pw_7_relu', + 'conv_dw_8_relu', 'conv_pw_8_relu', + 'conv_dw_9_relu', 'conv_pw_9_relu', + 'conv_dw_10_relu', 'conv_pw_10_relu', + 'conv_dw_11_relu', 'conv_pw_11_relu', + 'conv_dw_12_relu', 'conv_pw_12_relu', + 'conv_dw_13_relu', 'conv_pw_13_relu', +] + +_NUM_CHANNELS = 3 +_BATCH_SIZE = 2 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MobilenetV1Test(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + train: true, + scale: false, + center: true, + decay: 0.2, + epsilon: 0.1, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_application_with_layer_outputs( + self, layer_names, batchnorm_training, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Constructs Keras MobilenetV1 that extracts intermediate layer outputs.""" + if not layer_names: + layer_names = _KERAS_LAYERS_TO_CHECK + full_model = mobilenet_v1.mobilenet_v1( + batchnorm_training=batchnorm_training, + conv_hyperparams=conv_hyperparams, + weights=None, + use_explicit_padding=use_explicit_padding, + alpha=alpha, + min_depth=min_depth, + conv_defs=conv_defs, + include_top=False) + layer_outputs = [full_model.get_layer(name=layer).output + for layer in layer_names] + return tf.keras.Model( + inputs=full_model.inputs, + outputs=layer_outputs) + + def _check_returns_correct_shape( + self, image_height, image_width, depth_multiplier, + expected_feature_map_shape, use_explicit_padding=False, min_depth=8, + layer_names=None, conv_defs=None): + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=depth_multiplier, + conv_defs=conv_defs) + + image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS).astype(np.float32) + feature_maps = model(image_tensor) + + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shape): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _check_returns_correct_shapes_with_dynamic_inputs( + self, image_height, image_width, depth_multiplier, + expected_feature_map_shape, use_explicit_padding=False, min_depth=8, + layer_names=None): + image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS], dtype=tf.float32) + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, + use_explicit_padding=use_explicit_padding, + alpha=depth_multiplier) + + feature_maps = model(image_tensor) + + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shape): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, depth_multiplier, layer_names=None): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, use_explicit_padding=False, + alpha=depth_multiplier) + preprocessed_inputs = tf.random.uniform([2, 40, 40, 3]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_128) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_128_explicit_padding) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape, + use_explicit_padding=True) + + def test_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs) + self._check_returns_correct_shapes_with_dynamic_inputs( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_299) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_enforcing_min_depth) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_with_conv_defs( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + conv_def_block_12 = model_utils.ConvDefs( + conv_name='conv_pw_12', filters=512) + conv_def_block_13 = model_utils.ConvDefs( + conv_name='conv_pw_13', filters=256) + conv_defs = [conv_def_block_12, conv_def_block_13] + + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_with_conv_defs) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape, + conv_defs=conv_defs) + + def test_hyperparam_override(self): + hyperparams = self._build_conv_hyperparams() + model = mobilenet_v1.mobilenet_v1( + batchnorm_training=True, + conv_hyperparams=hyperparams, + weights=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=32, + include_top=False) + hyperparams.params() + bn_layer = model.get_layer(name='conv_pw_5_bn') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + depth_multiplier = 1 + variables = self._get_variables(depth_multiplier) + # 135 is the number of variables from slim MobilenetV1 model. + self.assertEqual(len(variables), 135) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/keras_models/mobilenet_v2.py b/models/research/object_detection/models/keras_models/mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..b534cfbb182e333fc15d06b391b51ba8b3a03c4d --- /dev/null +++ b/models/research/object_detection/models/keras_models/mobilenet_v2.py @@ -0,0 +1,334 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the MobileNet v2 models for Keras, for object detection.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops + + +# pylint: disable=invalid-name +# This method copied from the slim mobilenet base network code (same license) +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras MobileNetV2.""" + + def __init__(self, + batchnorm_training, + default_batchnorm_momentum=0.999, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Alternative tf.keras.layers interface, for use by the Keras MobileNetV2. + + It is used by the Keras applications kwargs injection API to + modify the Mobilenet v2 Keras application with changes required by + the Object Detection API. + + These injected interfaces make the following changes to the network: + + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v2 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV2 paper. It + modifies the number of filters in each convolutional layer. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v2 body. Default is + `None` to use the default mobilenet_v2 network layout. + """ + self._alpha = alpha + self._batchnorm_training = batchnorm_training + self._default_batchnorm_momentum = default_batchnorm_momentum + self._conv_hyperparams = conv_hyperparams + self._use_explicit_padding = use_explicit_padding + self._min_depth = min_depth + self._conv_defs = conv_defs + self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) + self.initializer = tf.truncated_normal_initializer(stddev=0.09) + + def _FixedPaddingLayer(self, kernel_size): + return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size)) + + def Conv2D(self, filters, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras MobileNetV2 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + filters: The number of filters to use for the convolution. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras Conv2D layer to + the input argument, or that will first pad the input then apply a Conv2D + layer. + """ + # Make sure 'alpha' is always applied to the last convolution block's size + # (This overrides the Keras application's functionality) + layer_name = kwargs.get('name') + if layer_name == 'Conv_1': + if self._conv_defs: + filters = model_utils.get_conv_def(self._conv_defs, 'Conv_1') + else: + filters = 1280 + if self._alpha < 1.0: + filters = _make_divisible(filters * self._alpha, 8) + + # Apply the minimum depth to the convolution layers + if (self._min_depth and (filters < self._min_depth) + and not kwargs.get('name').endswith('expand')): + filters = self._min_depth + + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + else: + kwargs['kernel_regularizer'] = self.regularizer + kwargs['kernel_initializer'] = self.initializer + + kwargs['padding'] = 'same' + kernel_size = kwargs.get('kernel_size') + if self._use_explicit_padding and kernel_size > 1: + kwargs['padding'] = 'valid' + def padded_conv(features): + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features) + + return padded_conv + else: + return tf.keras.layers.Conv2D(filters, **kwargs) + + def DepthwiseConv2D(self, **kwargs): + """Builds a DepthwiseConv2D according to the Object Detection config. + + Overrides the Keras MobileNetV2 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras DepthwiseConv2D + layer to the input argument, or that will first pad the input then apply + the depthwise convolution. + """ + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + # Both the regularizer and initializer apply to the depthwise layer in + # MobilenetV1, so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + else: + kwargs['depthwise_regularizer'] = self.regularizer + kwargs['depthwise_initializer'] = self.initializer + + kwargs['padding'] = 'same' + kernel_size = kwargs.get('kernel_size') + if self._use_explicit_padding and kernel_size > 1: + kwargs['padding'] = 'valid' + def padded_depthwise_conv(features): + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features) + + return padded_depthwise_conv + else: + return tf.keras.layers.DepthwiseConv2D(**kwargs) + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Only the name is used, all other params ignored. + Required for matching `layers.BatchNormalization` calls in the Keras + application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_batch_norm( + training=self._batchnorm_training, + name=name) + else: + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + epsilon=1e-3, + momentum=self._default_batchnorm_momentum, + name=name) + + def Input(self, shape): + """Builds an Input layer. + + Overrides the Keras application Input layer with one that uses a + tf.placeholder_with_default instead of a tf.placeholder. This is necessary + to ensure the application works when run on a TPU. + + Args: + shape: The shape for the input layer to use. (Does not include a dimension + for the batch size). + Returns: + An input layer for the specified shape that internally uses a + placeholder_with_default. + """ + default_size = 224 + default_batch_size = 1 + shape = list(shape) + default_shape = [default_size if dim is None else dim for dim in shape] + + input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) + + placeholder_with_default = tf.placeholder_with_default( + input=input_tensor, shape=[None] + shape) + return model_utils.input_layer(shape, placeholder_with_default) + + # pylint: disable=unused-argument + def ReLU(self, *args, **kwargs): + """Builds an activation layer. + + Overrides the Keras application ReLU with the activation specified by the + Object Detection configuration. + + Args: + *args: Ignored, required to match the `tf.keras.ReLU` interface + **kwargs: Only the name is used, + required to match `tf.keras.ReLU` interface + + Returns: + An activation layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_activation_layer(name=name) + else: + return tf.keras.layers.Lambda(tf.nn.relu6, name=name) + # pylint: enable=unused-argument + + # pylint: disable=unused-argument + def ZeroPadding2D(self, **kwargs): + """Replaces explicit padding in the Keras application with a no-op. + + Args: + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A no-op identity lambda. + """ + return lambda x: x + # pylint: enable=unused-argument + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +def mobilenet_v2(batchnorm_training, + default_batchnorm_momentum=0.9997, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None, + **kwargs): + """Instantiates the MobileNetV2 architecture, modified for object detection. + + This wraps the MobileNetV2 tensorflow Keras application, but uses the + Keras application's kwargs-based monkey-patching API to override the Keras + architecture with the following changes: + + - Changes the default batchnorm momentum to 0.9997 + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + - Makes the Input layer use a tf.placeholder_with_default instead of a + tf.placeholder, to work on TPUs. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v2 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV2 paper. It + modifies the number of filters in each convolutional layer. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v2 body. Default is + `None` to use the default mobilenet_v2 network layout. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.MobilenetV2` method that constructs the Keras + model. + + Returns: + A Keras model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + default_batchnorm_momentum=default_batchnorm_momentum, + conv_hyperparams=conv_hyperparams, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=alpha, + conv_defs=conv_defs) + return tf.keras.applications.MobileNetV2(alpha=alpha, + layers=layers_override, + **kwargs) +# pylint: enable=invalid-name diff --git a/models/research/object_detection/models/keras_models/mobilenet_v2_tf2_test.py b/models/research/object_detection/models/keras_models/mobilenet_v2_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2a53a9b63f28522197bc3daab29dab3a56dfb994 --- /dev/null +++ b/models/research/object_detection/models/keras_models/mobilenet_v2_tf2_test.py @@ -0,0 +1,250 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for mobilenet_v2.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.models.keras_models import model_utils +from object_detection.models.keras_models import test_utils +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_layers_to_check = [ + 'Conv1_relu', + 'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN', + 'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN', + 'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN', + 'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN', + 'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN', + 'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN', + 'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN', + 'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN', + 'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN', + 'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN', + 'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN', + 'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN', + 'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN', + 'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN', + 'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN', + 'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN', + 'out_relu'] + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MobilenetV2Test(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + train: true, + scale: false, + center: true, + decay: 0.2, + epsilon: 0.1, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_application_with_layer_outputs( + self, layer_names, batchnorm_training, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Constructs Keras mobilenetv2 that extracts intermediate layer outputs.""" + # Have to clear the Keras backend to ensure isolation in layer naming + tf.keras.backend.clear_session() + if not layer_names: + layer_names = _layers_to_check + full_model = mobilenet_v2.mobilenet_v2( + batchnorm_training=batchnorm_training, + conv_hyperparams=conv_hyperparams, + weights=None, + use_explicit_padding=use_explicit_padding, + alpha=alpha, + min_depth=min_depth, + include_top=False, + conv_defs=conv_defs) + layer_outputs = [full_model.get_layer(name=layer).output + for layer in layer_names] + return tf.keras.Model( + inputs=full_model.inputs, + outputs=layer_outputs) + + def _check_returns_correct_shape( + self, batch_size, image_height, image_width, depth_multiplier, + expected_feature_map_shapes, use_explicit_padding=False, min_depth=None, + layer_names=None, conv_defs=None): + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=depth_multiplier, + conv_defs=conv_defs) + + image_tensor = np.random.rand(batch_size, image_height, image_width, + 3).astype(np.float32) + feature_maps = model([image_tensor]) + + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _check_returns_correct_shapes_with_dynamic_inputs( + self, batch_size, image_height, image_width, depth_multiplier, + expected_feature_map_shapes, use_explicit_padding=False, + layer_names=None): + height = tf.random.uniform([], minval=image_height, maxval=image_height+1, + dtype=tf.int32) + width = tf.random.uniform([], minval=image_width, maxval=image_width+1, + dtype=tf.int32) + image_tensor = tf.random.uniform([batch_size, height, width, + 3], dtype=tf.float32) + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, use_explicit_padding=use_explicit_padding, + alpha=depth_multiplier) + feature_maps = model(image_tensor) + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, depth_multiplier, layer_names=None): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, use_explicit_padding=False, + alpha=depth_multiplier) + preprocessed_inputs = tf.random.uniform([2, 40, 40, 3]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_128) + + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape) + + def test_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_128_explicit_padding) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape, use_explicit_padding=True) + + def test_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs) + self._check_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape) + + def test_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_299) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape) + + def test_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_enforcing_min_depth) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape, min_depth=32) + + def test_returns_correct_shapes_with_conv_defs( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + conv_1 = model_utils.ConvDefs( + conv_name='Conv_1', filters=256) + conv_defs = [conv_1] + + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_with_conv_defs) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape, conv_defs=conv_defs) + + def test_hyperparam_override(self): + hyperparams = self._build_conv_hyperparams() + model = mobilenet_v2.mobilenet_v2( + batchnorm_training=True, + conv_hyperparams=hyperparams, + weights=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=32, + include_top=False) + hyperparams.params() + bn_layer = model.get_layer(name='block_5_project_BN') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + depth_multiplier = 1 + variables = self._get_variables(depth_multiplier) + self.assertEqual(len(variables), 260) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/keras_models/model_utils.py b/models/research/object_detection/models/keras_models/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..77f3cbd15d7981bd30876b1aebd62f8372a04288 --- /dev/null +++ b/models/research/object_detection/models/keras_models/model_utils.py @@ -0,0 +1,53 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utils for Keras models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import tensorflow.compat.v1 as tf + +# This is to specify the custom config of model structures. For example, +# ConvDefs(conv_name='conv_pw_12', filters=512) for Mobilenet V1 is to specify +# the filters of the conv layer with name 'conv_pw_12' as 512.s +ConvDefs = collections.namedtuple('ConvDefs', ['conv_name', 'filters']) + + +def get_conv_def(conv_defs, layer_name): + """Get the custom config for some layer of the model structure. + + Args: + conv_defs: A named tuple to specify the custom config of the model + network. See `ConvDefs` for details. + layer_name: A string, the name of the layer to be customized. + + Returns: + The number of filters for the layer, or `None` if there is no custom + config for the requested layer. + """ + for conv_def in conv_defs: + if layer_name == conv_def.conv_name: + return conv_def.filters + return None + + +def input_layer(shape, placeholder_with_default): + if tf.executing_eagerly(): + return tf.keras.layers.Input(shape=shape) + else: + return tf.keras.layers.Input(tensor=placeholder_with_default) diff --git a/models/research/object_detection/models/keras_models/resnet_v1.py b/models/research/object_detection/models/keras_models/resnet_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..d5426ad6b5e499171dbd955dc9c3fe465c4b6051 --- /dev/null +++ b/models/research/object_detection/models/keras_models/resnet_v1.py @@ -0,0 +1,397 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the Keras Resnet V1 models for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm +from object_detection.models.keras_models import model_utils + + +def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = tf.pad( + inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + return padded_inputs + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras Resnet V1.""" + + def __init__(self, + batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1): + """Alternative tf.keras.layers interface, for use by the Keras Resnet V1. + + The class is used by the Keras applications kwargs injection API to + modify the Resnet V1 Keras application with changes required by + the Object Detection API. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + """ + self._batchnorm_training = batchnorm_training + self._batchnorm_scale = batchnorm_scale + self._default_batchnorm_momentum = default_batchnorm_momentum + self._default_batchnorm_epsilon = default_batchnorm_epsilon + self._conv_hyperparams = conv_hyperparams + self._min_depth = min_depth + self._depth_multiplier = depth_multiplier + self.regularizer = tf.keras.regularizers.l2(weight_decay) + self.initializer = tf.variance_scaling_initializer() + + def _FixedPaddingLayer(self, kernel_size, rate=1): + return tf.keras.layers.Lambda( + lambda x: _fixed_padding(x, kernel_size, rate)) + + def Conv2D(self, filters, kernel_size, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras Resnet application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + filters: The number of filters to use for the convolution. + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras Conv2D layer to + the input argument, or that will first pad the input then apply a Conv2D + layer. + """ + # Apply the minimum depth to the convolution layers. + filters = max(int(filters * self._depth_multiplier), self._min_depth) + + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + else: + kwargs['kernel_regularizer'] = self.regularizer + kwargs['kernel_initializer'] = self.initializer + + # Set use_bias as false to keep it consistent with Slim Resnet model. + kwargs['use_bias'] = False + + kwargs['padding'] = 'same' + stride = kwargs.get('strides') + if stride and kernel_size and stride > 1 and kernel_size > 1: + kwargs['padding'] = 'valid' + def padded_conv(features): # pylint: disable=invalid-name + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.Conv2D( + filters, kernel_size, **kwargs)(padded_features) + return padded_conv + else: + return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) + + def Activation(self, *args, **kwargs): # pylint: disable=unused-argument + """Builds an activation layer. + + Overrides the Keras application Activation layer specified by the + Object Detection configuration. + + Args: + *args: Ignored, + required to match the `tf.keras.layers.Activation` interface. + **kwargs: Only the name is used, + required to match `tf.keras.layers.Activation` interface. + + Returns: + An activation layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_activation_layer(name=name) + else: + return tf.keras.layers.Lambda(tf.nn.relu, name=name) + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Only the name is used, all other params ignored. + Required for matching `layers.BatchNormalization` calls in the Keras + application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_batch_norm( + training=self._batchnorm_training, + name=name) + else: + kwargs['scale'] = self._batchnorm_scale + kwargs['epsilon'] = self._default_batchnorm_epsilon + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + momentum=self._default_batchnorm_momentum, + **kwargs) + + def Input(self, shape): + """Builds an Input layer. + + Overrides the Keras application Input layer with one that uses a + tf.placeholder_with_default instead of a tf.placeholder. This is necessary + to ensure the application works when run on a TPU. + + Args: + shape: A tuple of integers representing the shape of the input, which + includes both spatial share and channels, but not the batch size. + Elements of this tuple can be None; 'None' elements represent dimensions + where the shape is not known. + + Returns: + An input layer for the specified shape that internally uses a + placeholder_with_default. + """ + default_size = 224 + default_batch_size = 1 + shape = list(shape) + default_shape = [default_size if dim is None else dim for dim in shape] + + input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) + + placeholder_with_default = tf.placeholder_with_default( + input=input_tensor, shape=[None] + shape) + return model_utils.input_layer(shape, placeholder_with_default) + + def MaxPooling2D(self, pool_size, **kwargs): + """Builds a MaxPooling2D layer with default padding as 'SAME'. + + This is specified by the default resnet arg_scope in slim. + + Args: + pool_size: The pool size specified by the Keras application. + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A MaxPooling2D layer with default padding as 'SAME'. + """ + kwargs['padding'] = 'same' + return tf.keras.layers.MaxPooling2D(pool_size, **kwargs) + + # Add alias as Keras also has it. + MaxPool2D = MaxPooling2D # pylint: disable=invalid-name + + def ZeroPadding2D(self, padding, **kwargs): # pylint: disable=unused-argument + """Replaces explicit padding in the Keras application with a no-op. + + Args: + padding: The padding values for image height and width. + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A no-op identity lambda. + """ + return lambda x: x + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +# pylint: disable=invalid-name +def resnet_v1_50(batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1, + **kwargs): + """Instantiates the Resnet50 architecture, modified for object detection. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras ResnetV1-50 model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + conv_hyperparams=conv_hyperparams, + weight_decay=weight_decay, + min_depth=min_depth, + depth_multiplier=depth_multiplier) + return tf.keras.applications.resnet.ResNet50( + layers=layers_override, **kwargs) + + +def resnet_v1_101(batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1, + **kwargs): + """Instantiates the Resnet50 architecture, modified for object detection. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras ResnetV1-101 model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + conv_hyperparams=conv_hyperparams, + weight_decay=weight_decay, + min_depth=min_depth, + depth_multiplier=depth_multiplier) + return tf.keras.applications.resnet.ResNet101( + layers=layers_override, **kwargs) + + +def resnet_v1_152(batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1, + **kwargs): + """Instantiates the Resnet50 architecture, modified for object detection. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras ResnetV1-152 model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + conv_hyperparams=conv_hyperparams, + weight_decay=weight_decay, + min_depth=min_depth, + depth_multiplier=depth_multiplier) + return tf.keras.applications.resnet.ResNet152( + layers=layers_override, **kwargs) +# pylint: enable=invalid-name diff --git a/models/research/object_detection/models/keras_models/resnet_v1_tf2_test.py b/models/research/object_detection/models/keras_models/resnet_v1_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..71cc5f22bd994b6432957bf5b34837f829c9b8da --- /dev/null +++ b/models/research/object_detection/models/keras_models/resnet_v1_tf2_test.py @@ -0,0 +1,184 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for resnet_v1.py. + +This test mainly focuses on comparing slim resnet v1 and Keras resnet v1 for +object detection. To verify the consistency of the two models, we compare: + 1. Output shape of each layer given different inputs. + 2. Number of global variables. +""" +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models.keras_models import resnet_v1 +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_EXPECTED_SHAPES_224_RESNET50 = { + 'conv2_block3_out': (4, 56, 56, 256), + 'conv3_block4_out': (4, 28, 28, 512), + 'conv4_block6_out': (4, 14, 14, 1024), + 'conv5_block3_out': (4, 7, 7, 2048), +} + +_EXPECTED_SHAPES_224_RESNET101 = { + 'conv2_block3_out': (4, 56, 56, 256), + 'conv3_block4_out': (4, 28, 28, 512), + 'conv4_block23_out': (4, 14, 14, 1024), + 'conv5_block3_out': (4, 7, 7, 2048), +} + +_EXPECTED_SHAPES_224_RESNET152 = { + 'conv2_block3_out': (4, 56, 56, 256), + 'conv3_block8_out': (4, 28, 28, 512), + 'conv4_block36_out': (4, 14, 14, 1024), + 'conv5_block3_out': (4, 7, 7, 2048), +} + +_RESNET_NAMES = ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152'] +_RESNET_MODELS = [ + resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101, resnet_v1.resnet_v1_152 +] +_RESNET_SHAPES = [ + _EXPECTED_SHAPES_224_RESNET50, _EXPECTED_SHAPES_224_RESNET101, + _EXPECTED_SHAPES_224_RESNET152 +] + +_NUM_CHANNELS = 3 +_BATCH_SIZE = 4 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ResnetV1Test(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_application_with_layer_outputs(self, + model_index, + batchnorm_training, + batchnorm_scale=True, + weight_decay=0.0001, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5): + """Constructs Keras resnet_v1 that extracts layer outputs.""" + # Have to clear the Keras backend to ensure isolation in layer naming + tf.keras.backend.clear_session() + layer_names = _RESNET_SHAPES[model_index].keys() + full_model = _RESNET_MODELS[model_index]( + batchnorm_training=batchnorm_training, + weights=None, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + include_top=False) + + layer_outputs = [ + full_model.get_layer(name=layer).output for layer in layer_names + ] + return tf.keras.Model(inputs=full_model.inputs, outputs=layer_outputs) + + def _check_returns_correct_shape(self, + image_height, + image_width, + model_index, + expected_feature_map_shape, + batchnorm_training=True, + batchnorm_scale=True, + weight_decay=0.0001, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5): + model = self._create_application_with_layer_outputs( + model_index=model_index, + batchnorm_training=batchnorm_training, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon) + + image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS).astype(np.float32) + feature_maps = model(image_tensor) + layer_names = _RESNET_SHAPES[model_index].keys() + for feature_map, layer_name in zip(feature_maps, layer_names): + expected_shape = _RESNET_SHAPES[model_index][layer_name] + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, model_index): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + model_index, batchnorm_training=False) + preprocessed_inputs = tf.random.uniform([2, 40, 40, _NUM_CHANNELS]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_224(self): + image_height = 224 + image_width = 224 + for model_index, _ in enumerate(_RESNET_NAMES): + expected_feature_map_shape = _RESNET_SHAPES[model_index] + self._check_returns_correct_shape(image_height, image_width, model_index, + expected_feature_map_shape) + + def test_hyperparam_override(self): + for model_name in _RESNET_MODELS: + model = model_name( + batchnorm_training=True, + default_batchnorm_momentum=0.2, + default_batchnorm_epsilon=0.1, + weights=None, + include_top=False) + bn_layer = model.get_layer(name='conv1_bn') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + # The number of variables from slim resnetv1-* model. + variable_nums = [265, 520, 775] + for model_index, var_num in enumerate(variable_nums): + variables = self._get_variables(model_index) + self.assertEqual(len(variables), var_num) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/keras_models/test_utils.py b/models/research/object_detection/models/keras_models/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0669b6c697f81ca982070983892cec01e894fad1 --- /dev/null +++ b/models/research/object_detection/models/keras_models/test_utils.py @@ -0,0 +1,214 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test utils for other test files.""" + +# import tensorflow as tf +# +# from nets import mobilenet_v1 +# +# slim = tf.contrib.slim +# +# # Layer names of Slim to map Keras layer names in MobilenetV1 +# _MOBLIENET_V1_SLIM_ENDPOINTS = [ +# 'Conv2d_0', +# 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', +# 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', +# 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', +# 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', +# 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', +# 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', +# 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', +# 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', +# 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', +# 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', +# 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', +# 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', +# 'Conv2d_13_depthwise', 'Conv2d_13_pointwise' +# ] +# +# +# # Function to get the output shape of each layer in Slim. It's used to +# # generate the following constant expected_feature_map_shape for MobilenetV1. +# # Similarly, this can also apply to MobilenetV2. +# def _get_slim_endpoint_shapes(inputs, depth_multiplier=1.0, min_depth=8, +# use_explicit_padding=False): +# with slim.arg_scope([slim.conv2d, slim.separable_conv2d], +# normalizer_fn=slim.batch_norm): +# _, end_points = mobilenet_v1.mobilenet_v1_base( +# inputs, final_endpoint='Conv2d_13_pointwise', +# depth_multiplier=depth_multiplier, min_depth=min_depth, +# use_explicit_padding=use_explicit_padding) +# return [end_points[endpoint_name].get_shape() +# for endpoint_name in _MOBLIENET_V1_SLIM_ENDPOINTS] + + +# For Mobilenet V1 +moblenet_v1_expected_feature_map_shape_128 = [ + (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), + (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), + (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), + (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), +] + +moblenet_v1_expected_feature_map_shape_128_explicit_padding = [ + (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), + (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), + (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), + (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), +] + +mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs = [ + (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), + (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), + (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), + (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), +] + +moblenet_v1_expected_feature_map_shape_299 = [ + (2, 150, 150, 32), (2, 150, 150, 32), (2, 150, 150, 64), (2, 75, 75, 64), + (2, 75, 75, 128), (2, 75, 75, 128), (2, 75, 75, 128), (2, 38, 38, 128), + (2, 38, 38, 256), (2, 38, 38, 256), (2, 38, 38, 256), (2, 19, 19, 256), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 10, 10, 512), + (2, 10, 10, 1024), (2, 10, 10, 1024), (2, 10, 10, 1024), +] + +moblenet_v1_expected_feature_map_shape_enforcing_min_depth = [ + (2, 150, 150, 8), (2, 150, 150, 8), (2, 150, 150, 8), (2, 75, 75, 8), + (2, 75, 75, 8), (2, 75, 75, 8), (2, 75, 75, 8), (2, 38, 38, 8), + (2, 38, 38, 8), (2, 38, 38, 8), (2, 38, 38, 8), (2, 19, 19, 8), + (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), + (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), + (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 10, 10, 8), + (2, 10, 10, 8), (2, 10, 10, 8), (2, 10, 10, 8), +] + +moblenet_v1_expected_feature_map_shape_with_conv_defs = [ + (2, 150, 150, 32), (2, 150, 150, 32), (2, 150, 150, 64), (2, 75, 75, 64), + (2, 75, 75, 128), (2, 75, 75, 128), (2, 75, 75, 128), (2, 38, 38, 128), + (2, 38, 38, 256), (2, 38, 38, 256), (2, 38, 38, 256), (2, 19, 19, 256), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 10, 10, 512), + (2, 10, 10, 512), (2, 10, 10, 512), (2, 10, 10, 256), +] + +# For Mobilenet V2 +moblenet_v2_expected_feature_map_shape_128 = [ + (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), + (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), + (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), + (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), + (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), + (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), + (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), + (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), + (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), + (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), + (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), + (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), + (2, 4, 4, 320), (2, 4, 4, 1280) +] + +moblenet_v2_expected_feature_map_shape_128_explicit_padding = [ + (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), + (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), + (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), + (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), + (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), + (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), + (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), + (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), + (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), + (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), + (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), + (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), + (2, 4, 4, 320), (2, 4, 4, 1280) +] + +mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs = [ + (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), + (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), + (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), + (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), + (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), + (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), + (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), + (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), + (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), + (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), + (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), + (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), + (2, 4, 4, 320), (2, 4, 4, 1280) +] + +moblenet_v2_expected_feature_map_shape_299 = [ + (2, 150, 150, 32), (2, 150, 150, 96), (2, 75, 75, 96), (2, 75, 75, 24), + (2, 75, 75, 144), (2, 75, 75, 144), (2, 75, 75, 24), (2, 75, 75, 144), + (2, 38, 38, 144), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), + (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), + (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 64), (2, 19, 19, 384), + (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), + (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), + (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 96), (2, 19, 19, 576), + (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), + (2, 19, 19, 96), (2, 19, 19, 576), (2, 10, 10, 576), (2, 10, 10, 160), + (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), + (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), + (2, 10, 10, 320), (2, 10, 10, 1280) +] + +moblenet_v2_expected_feature_map_shape_enforcing_min_depth = [ + (2, 150, 150, 32), (2, 150, 150, 192), (2, 75, 75, 192), (2, 75, 75, 32), + (2, 75, 75, 192), (2, 75, 75, 192), (2, 75, 75, 32), (2, 75, 75, 192), + (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), + (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), + (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), + (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), + (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), + (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), + (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), + (2, 19, 19, 32), (2, 19, 19, 192), (2, 10, 10, 192), (2, 10, 10, 32), + (2, 10, 10, 192), (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), + (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), (2, 10, 10, 192), + (2, 10, 10, 32), (2, 10, 10, 32) +] + +moblenet_v2_expected_feature_map_shape_with_conv_defs = [ + (2, 150, 150, 32), (2, 150, 150, 96), (2, 75, 75, 96), (2, 75, 75, 24), + (2, 75, 75, 144), (2, 75, 75, 144), (2, 75, 75, 24), (2, 75, 75, 144), + (2, 38, 38, 144), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), + (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), + (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 64), (2, 19, 19, 384), + (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), + (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), + (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 96), (2, 19, 19, 576), + (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), + (2, 19, 19, 96), (2, 19, 19, 576), (2, 10, 10, 576), (2, 10, 10, 160), + (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), + (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), + (2, 10, 10, 320), (2, 10, 10, 256) +] diff --git a/models/research/object_detection/models/ssd_feature_extractor_test.py b/models/research/object_detection/models/ssd_feature_extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..29c43e376c6167b61a256eb0812ee4d3bcee3ed5 --- /dev/null +++ b/models/research/object_detection/models/ssd_feature_extractor_test.py @@ -0,0 +1,263 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base test class SSDFeatureExtractors.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import abstractmethod + +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf +import tf_slim as slim +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import test_utils + + +class SsdFeatureExtractorTestBase(test_case.TestCase): + + def _build_conv_hyperparams(self, add_batch_norm=True): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + if add_batch_norm: + batch_norm_proto = """ + batch_norm { + scale: false + } + """ + conv_hyperparams_text_proto += batch_norm_proto + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def conv_hyperparams_fn(self): + with slim.arg_scope([]) as sc: + return sc + + @abstractmethod + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + use_keras=False, + use_depthwise=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + use_depthwise: Whether to use depthwise convolutions. + Returns: + an ssd_meta_arch.SSDFeatureExtractor or an + ssd_meta_arch.SSDKerasFeatureExtractor object. + """ + pass + + def _create_features(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + use_keras=False): + kwargs = {} + if use_explicit_padding: + kwargs.update({'use_explicit_padding': use_explicit_padding}) + if use_depthwise: + kwargs.update({'use_depthwise': use_depthwise}) + if num_layers != 6: + kwargs.update({'num_layers': num_layers}) + if use_keras: + kwargs.update({'use_keras': use_keras}) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + **kwargs) + return feature_extractor + + def _extract_features(self, + image_tensor, + feature_extractor, + use_keras=False): + if use_keras: + feature_maps = feature_extractor(image_tensor) + else: + feature_maps = feature_extractor.extract_features(image_tensor) + return feature_maps + + def check_extract_features_returns_correct_shape(self, + batch_size, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shapes, + use_explicit_padding=False, + num_layers=6, + use_keras=False, + use_depthwise=False): + with test_utils.GraphContextOrNone() as g: + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def graph_fn(image_tensor): + return self._extract_features( + image_tensor, + feature_extractor, + use_keras=use_keras) + + image_tensor = np.random.rand(batch_size, image_height, image_width, + 3).astype(np.float32) + feature_maps = self.execute(graph_fn, [image_tensor], graph=g) + for feature_map, expected_shape in zip( + feature_maps, expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def check_extract_features_returns_correct_shapes_with_dynamic_inputs( + self, + batch_size, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shapes, + use_explicit_padding=False, + num_layers=6, + use_keras=False, + use_depthwise=False): + + with test_utils.GraphContextOrNone() as g: + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def graph_fn(image_height, image_width): + image_tensor = tf.random_uniform([batch_size, image_height, image_width, + 3], dtype=tf.float32) + return self._extract_features( + image_tensor, + feature_extractor, + use_keras=use_keras) + + feature_maps = self.execute_cpu(graph_fn, [ + np.array(image_height, dtype=np.int32), + np.array(image_width, dtype=np.int32) + ], graph=g) + for feature_map, expected_shape in zip( + feature_maps, expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def check_extract_features_raises_error_with_invalid_image_size( + self, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=False, + use_depthwise=False): + + with test_utils.GraphContextOrNone() as g: + batch = 4 + width = tf.random.uniform([], minval=image_width, maxval=image_width+1, + dtype=tf.int32) + height = tf.random.uniform([], minval=image_height, maxval=image_height+1, + dtype=tf.int32) + shape = tf.stack([batch, height, width, 3]) + preprocessed_inputs = tf.random.uniform(shape) + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def graph_fn(): + feature_maps = self._extract_features( + preprocessed_inputs, + feature_extractor, + use_keras=use_keras) + return feature_maps + if self.is_tf2(): + with self.assertRaises(ValueError): + self.execute_cpu(graph_fn, [], graph=g) + else: + with self.assertRaises(tf.errors.InvalidArgumentError): + self.execute_cpu(graph_fn, [], graph=g) + + def check_feature_extractor_variables_under_scope(self, + depth_multiplier, + pad_to_multiple, + scope_name, + use_keras=False, + use_depthwise=False): + variables = self.get_feature_extractor_variables( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + for variable in variables: + self.assertTrue(variable.name.startswith(scope_name)) + + def get_feature_extractor_variables(self, + depth_multiplier, + pad_to_multiple, + use_keras=False, + use_depthwise=False): + g = tf.Graph() + with g.as_default(): + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + self._extract_features( + preprocessed_inputs, + feature_extractor, + use_keras=use_keras) + return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) diff --git a/models/research/object_detection/models/ssd_inception_v2_feature_extractor.py b/models/research/object_detection/models/ssd_inception_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..c782bb2524ecf1a135d94c5743e5a20461231403 --- /dev/null +++ b/models/research/object_detection/models/ssd_inception_v2_feature_extractor.py @@ -0,0 +1,137 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for InceptionV2 features.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import inception_v2 + + +class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using InceptionV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """InceptionV2 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: If `override_base_feature_extractor_hyperparams` is False. + """ + super(SSDInceptionV2FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + if not self._override_base_feature_extractor_hyperparams: + raise ValueError('SSD Inception V2 feature extractor always uses' + 'scope returned by `conv_hyperparams_fn` for both the ' + 'base feature extractor and the additional layers ' + 'added since there is no arg_scope defined for the base ' + 'feature extractor.') + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('InceptionV2', + reuse=self._reuse_weights) as scope: + _, image_features = inception_v2.inception_v2_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Mixed_5c', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/models/research/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1e33ed70ed45cef900d9f615cba9a5f196d36e23 --- /dev/null +++ b/models/research/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py @@ -0,0 +1,160 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.models.ssd_inception_v2_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_inception_v2_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdInceptionV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=True): + """Constructs a SsdInceptionV2FeatureExtractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + + Returns: + an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor. + """ + min_depth = 32 + return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + num_layers=num_layers, + override_base_feature_extractor_hyperparams=True) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 128), (2, 10, 10, 128), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'InceptionV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_inception_v3_feature_extractor.py b/models/research/object_detection/models/ssd_inception_v3_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa7f78d15baf0280833d4ad2e36d9f16a4ffccb --- /dev/null +++ b/models/research/object_detection/models/ssd_inception_v3_feature_extractor.py @@ -0,0 +1,137 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for InceptionV3 features.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import inception_v3 + + +class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using InceptionV3 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """InceptionV3 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: If `override_base_feature_extractor_hyperparams` is False. + """ + super(SSDInceptionV3FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + if not self._override_base_feature_extractor_hyperparams: + raise ValueError('SSD Inception V3 feature extractor always uses' + 'scope returned by `conv_hyperparams_fn` for both the ' + 'base feature extractor and the additional layers ' + 'added since there is no arg_scope defined for the base ' + 'feature extractor.') + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, -1, 512, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: + _, image_features = inception_v3.inception_v3_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Mixed_7c', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/models/research/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0cbb451586b865cc448c292231a21dc468110a4 --- /dev/null +++ b/models/research/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py @@ -0,0 +1,160 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.models.ssd_inception_v3_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_inception_v3_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdInceptionV3FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=True): + """Constructs a SsdInceptionV3FeatureExtractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + + Returns: + an ssd_inception_v3_feature_extractor.SsdInceptionV3FeatureExtractor. + """ + min_depth = 32 + return ssd_inception_v3_feature_extractor.SSDInceptionV3FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + num_layers=num_layers, + override_base_feature_extractor_hyperparams=True) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), + (2, 2, 2, 2048), (2, 1, 1, 512), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), + (2, 2, 2, 2048), (2, 1, 1, 512), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 35, 35, 288), (2, 17, 17, 768), + (2, 8, 8, 2048), (2, 4, 4, 512), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 35, 35, 128), (2, 17, 17, 128), + (2, 8, 8, 192), (2, 4, 4, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 37, 37, 288), (2, 18, 18, 768), + (2, 8, 8, 2048), (2, 4, 4, 512), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'InceptionV3' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), + (2, 2, 2, 2048), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobiledet_feature_extractor.py b/models/research/object_detection/models/ssd_mobiledet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..019d7543bb7b271d6158b6b30fbb69a7db5a99a8 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobiledet_feature_extractor.py @@ -0,0 +1,586 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSDFeatureExtractor for MobileDet features.""" + +import functools +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +BACKBONE_WEIGHT_DECAY = 4e-5 + + +def _scale_filters(filters, multiplier, base=8): + """Scale the filters accordingly to (multiplier, base).""" + round_half_up = int(int(filters) * multiplier / base + 0.5) + result = int(round_half_up * base) + return max(result, base) + + +def _swish6(h): + with tf.name_scope('swish6'): + return h * tf.nn.relu6(h + np.float32(3)) * np.float32(1. / 6.) + + +def _conv(h, filters, kernel_size, strides=1, + normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6): + if activation_fn is None: + raise ValueError('Activation function cannot be None. Use tf.identity ' + 'instead to better support quantized training.') + return slim.conv2d( + h, + filters, + kernel_size, + stride=strides, + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_initializer=tf.initializers.he_normal(), + weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), + padding='SAME') + + +def _separable_conv( + h, filters, kernel_size, strides=1, activation_fn=tf.nn.relu6): + """Separable convolution layer.""" + if activation_fn is None: + raise ValueError('Activation function cannot be None. Use tf.identity ' + 'instead to better support quantized training.') + # Depthwise variant of He initialization derived under the principle proposed + # in the original paper. Note the original He normalization was designed for + # full convolutions and calling tf.initializers.he_normal() can over-estimate + # the fan-in of a depthwise kernel by orders of magnitude. + stddev = (2.0 / kernel_size**2)**0.5 / .87962566103423978 + depthwise_initializer = tf.initializers.truncated_normal(stddev=stddev) + return slim.separable_conv2d( + h, + filters, + kernel_size, + stride=strides, + activation_fn=activation_fn, + normalizer_fn=slim.batch_norm, + weights_initializer=depthwise_initializer, + pointwise_initializer=tf.initializers.he_normal(), + weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), + padding='SAME') + + +def _squeeze_and_excite(h, hidden_dim, activation_fn=tf.nn.relu6): + with tf.variable_scope(None, default_name='SqueezeExcite'): + height, width = h.shape[1], h.shape[2] + u = slim.avg_pool2d(h, [height, width], stride=1, padding='VALID') + u = _conv(u, hidden_dim, 1, + normalizer_fn=None, activation_fn=activation_fn) + u = _conv(u, h.shape[-1], 1, + normalizer_fn=None, activation_fn=tf.nn.sigmoid) + return u * h + + +def _inverted_bottleneck_no_expansion( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, strides=1, use_se=False): + """Inverted bottleneck layer without the first 1x1 expansion convolution.""" + with tf.variable_scope(None, default_name='IBNNoExpansion'): + # Setting filters to None will make _separable_conv a depthwise conv. + h = _separable_conv( + h, None, kernel_size, strides=strides, activation_fn=activation_fn) + if use_se: + hidden_dim = _scale_filters(h.shape[-1], 0.25) + h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + return h + + +def _inverted_bottleneck( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): + """Inverted bottleneck layer.""" + with tf.variable_scope(None, default_name='IBN'): + shortcut = h + expanded_filters = int(h.shape[-1]) * expansion + if expansion <= 1: + raise ValueError('Expansion factor must be greater than 1.') + h = _conv(h, expanded_filters, 1, activation_fn=activation_fn) + # Setting filters to None will make _separable_conv a depthwise conv. + h = _separable_conv(h, None, kernel_size, strides=strides, + activation_fn=activation_fn) + if use_se: + hidden_dim = _scale_filters(expanded_filters, 0.25) + h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + if residual: + h = h + shortcut + return h + + +def _fused_conv( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): + """Fused convolution layer.""" + with tf.variable_scope(None, default_name='FusedConv'): + shortcut = h + expanded_filters = int(h.shape[-1]) * expansion + if expansion <= 1: + raise ValueError('Expansion factor must be greater than 1.') + h = _conv(h, expanded_filters, kernel_size, strides=strides, + activation_fn=activation_fn) + if use_se: + hidden_dim = _scale_filters(expanded_filters, 0.25) + h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + if residual: + h = h + shortcut + return h + + +def _tucker_conv( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, input_rank_ratio=0.25, output_rank_ratio=0.25, + strides=1, residual=True): + """Tucker convolution layer (generalized bottleneck).""" + with tf.variable_scope(None, default_name='TuckerConv'): + shortcut = h + input_rank = _scale_filters(h.shape[-1], input_rank_ratio) + h = _conv(h, input_rank, 1, activation_fn=activation_fn) + output_rank = _scale_filters(filters, output_rank_ratio) + h = _conv(h, output_rank, kernel_size, strides=strides, + activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + if residual: + h = h + shortcut + return h + + +def mobiledet_cpu_backbone(h, multiplier=1.0): + """Build a MobileDet CPU backbone.""" + def _scale(filters): + return _scale_filters(filters, multiplier) + ibn = functools.partial( + _inverted_bottleneck, use_se=True, activation_fn=_swish6) + + endpoints = {} + h = _conv(h, _scale(16), 3, strides=2, activation_fn=_swish6) + h = _inverted_bottleneck_no_expansion( + h, _scale(8), use_se=True, activation_fn=_swish6) + endpoints['C1'] = h + h = ibn(h, _scale(16), expansion=4, strides=2, residual=False) + endpoints['C2'] = h + h = ibn(h, _scale(32), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(32), expansion=4) + h = ibn(h, _scale(32), expansion=4) + h = ibn(h, _scale(32), expansion=4) + endpoints['C3'] = h + h = ibn(h, _scale(72), kernel_size=5, expansion=8, strides=2, residual=False) + h = ibn(h, _scale(72), expansion=8) + h = ibn(h, _scale(72), kernel_size=5, expansion=4) + h = ibn(h, _scale(72), expansion=4) + h = ibn(h, _scale(72), expansion=8, residual=False) + h = ibn(h, _scale(72), expansion=8) + h = ibn(h, _scale(72), expansion=8) + h = ibn(h, _scale(72), expansion=8) + endpoints['C4'] = h + h = ibn(h, _scale(104), kernel_size=5, expansion=8, strides=2, residual=False) + h = ibn(h, _scale(104), kernel_size=5, expansion=4) + h = ibn(h, _scale(104), kernel_size=5, expansion=4) + h = ibn(h, _scale(104), expansion=4) + h = ibn(h, _scale(144), expansion=8, residual=False) + endpoints['C5'] = h + return endpoints + + +def mobiledet_dsp_backbone(h, multiplier=1.0): + """Build a MobileDet DSP backbone.""" + def _scale(filters): + return _scale_filters(filters, multiplier) + + ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) + fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) + tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) + + endpoints = {} + h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) + h = _inverted_bottleneck_no_expansion( + h, _scale(24), activation_fn=tf.nn.relu6) + endpoints['C1'] = h + h = fused(h, _scale(32), expansion=4, strides=2, residual=False) + h = fused(h, _scale(32), expansion=4) + h = ibn(h, _scale(32), expansion=4) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.75) + endpoints['C2'] = h + h = fused(h, _scale(64), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(64), expansion=4) + h = fused(h, _scale(64), expansion=4) + h = fused(h, _scale(64), expansion=4) + endpoints['C3'] = h + h = fused(h, _scale(120), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(120), expansion=4) + h = ibn(h, _scale(120), expansion=8) + h = ibn(h, _scale(120), expansion=8) + h = fused(h, _scale(144), expansion=8, residual=False) + h = ibn(h, _scale(144), expansion=8) + h = ibn(h, _scale(144), expansion=8) + h = ibn(h, _scale(144), expansion=8) + endpoints['C4'] = h + h = ibn(h, _scale(160), expansion=4, strides=2, residual=False) + h = ibn(h, _scale(160), expansion=4) + h = fused(h, _scale(160), expansion=4) + h = tucker(h, _scale(160), input_rank_ratio=0.75, output_rank_ratio=0.75) + h = ibn(h, _scale(240), expansion=8, residual=False) + endpoints['C5'] = h + return endpoints + + +def mobiledet_edgetpu_backbone(h, multiplier=1.0): + """Build a MobileDet EdgeTPU backbone.""" + def _scale(filters): + return _scale_filters(filters, multiplier) + + ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) + fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) + tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) + + endpoints = {} + h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) + h = tucker(h, _scale(16), + input_rank_ratio=0.25, output_rank_ratio=0.75, residual=False) + endpoints['C1'] = h + h = fused(h, _scale(16), expansion=8, strides=2, residual=False) + h = fused(h, _scale(16), expansion=4) + h = fused(h, _scale(16), expansion=8) + h = fused(h, _scale(16), expansion=4) + endpoints['C2'] = h + h = fused(h, _scale(40), expansion=8, kernel_size=5, strides=2, + residual=False) + h = fused(h, _scale(40), expansion=4) + h = fused(h, _scale(40), expansion=4) + h = fused(h, _scale(40), expansion=4) + endpoints['C3'] = h + h = ibn(h, _scale(72), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(72), expansion=8) + h = fused(h, _scale(72), expansion=4) + h = fused(h, _scale(72), expansion=4) + h = ibn(h, _scale(96), expansion=8, kernel_size=5, residual=False) + h = ibn(h, _scale(96), expansion=8, kernel_size=5) + h = ibn(h, _scale(96), expansion=8) + h = ibn(h, _scale(96), expansion=8) + endpoints['C4'] = h + h = ibn(h, _scale(120), expansion=8, kernel_size=5, strides=2, residual=False) + h = ibn(h, _scale(120), expansion=8) + h = ibn(h, _scale(120), expansion=4, kernel_size=5) + h = ibn(h, _scale(120), expansion=8) + h = ibn(h, _scale(384), expansion=8, kernel_size=5, residual=False) + endpoints['C5'] = h + return endpoints + + +def mobiledet_gpu_backbone(h, multiplier=1.0): + """Build a MobileDet GPU backbone.""" + + def _scale(filters): + return _scale_filters(filters, multiplier) + + ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) + fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) + tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) + + endpoints = {} + # block 0 + h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) + + # block 1 + h = tucker( + h, + _scale(16), + input_rank_ratio=0.25, + output_rank_ratio=0.25, + residual=False) + endpoints['C1'] = h + + # block 2 + h = fused(h, _scale(32), expansion=8, strides=2, residual=False) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) + endpoints['C2'] = h + + # block 3 + h = fused( + h, _scale(64), expansion=8, kernel_size=3, strides=2, residual=False) + h = fused(h, _scale(64), expansion=8) + h = fused(h, _scale(64), expansion=8) + h = fused(h, _scale(64), expansion=4) + endpoints['C3'] = h + + # block 4 + h = fused( + h, _scale(128), expansion=8, kernel_size=3, strides=2, residual=False) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + + # block 5 + h = fused( + h, _scale(128), expansion=8, kernel_size=3, strides=1, residual=False) + h = fused(h, _scale(128), expansion=8) + h = fused(h, _scale(128), expansion=8) + h = fused(h, _scale(128), expansion=8) + endpoints['C4'] = h + + # block 6 + h = fused( + h, _scale(128), expansion=4, kernel_size=3, strides=2, residual=False) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + + # block 7 + h = ibn(h, _scale(384), expansion=8, kernel_size=3, strides=1, residual=False) + endpoints['C5'] = h + return endpoints + + +class SSDMobileDetFeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): + """Base class of SSD feature extractor using MobileDet features.""" + + def __init__(self, + backbone_fn, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDet'): + """MobileDet Feature Extractor for SSD Models. + + Reference: + https://arxiv.org/abs/2004.14525 + + Args: + backbone_fn: function to construct the MobileDet backbone. + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: Integer, minimum feature extractor depth (number of filters). + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. + use_depthwise: Whether to use depthwise convolutions in the SSD head. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + scope_name: scope name (string) of network variables. + """ + if use_explicit_padding: + raise NotImplementedError( + 'Explicit padding is not yet supported in MobileDet backbones.') + + super(SSDMobileDetFeatureExtractorBase, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams + ) + self._backbone_fn = backbone_fn + self._scope_name = scope_name + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. The preprocessing assumes an input + value range of [0, 255]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + padded_inputs = ops.pad_to_multiple( + preprocessed_inputs, self._pad_to_multiple) + + feature_map_layout = { + 'from_layer': ['C4', 'C5', '', '', '', ''], + # Do not specify the layer depths (number of filters) for C4 and C5, as + # their values are determined based on the backbone. + 'layer_depth': [-1, -1, 512, 256, 256, 128], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + with tf.variable_scope(self._scope_name, reuse=self._reuse_weights): + with slim.arg_scope([slim.batch_norm], + is_training=self._is_training, + epsilon=0.01, decay=0.99, center=True, scale=True): + endpoints = self._backbone_fn( + padded_inputs, + multiplier=self._depth_multiplier) + + image_features = {'C4': endpoints['C4'], 'C5': endpoints['C5']} + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) + + +class SSDMobileDetCPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-CPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetCPU'): + super(SSDMobileDetCPUFeatureExtractor, self).__init__( + backbone_fn=mobiledet_cpu_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) + + +class SSDMobileDetDSPFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-DSP feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetDSP'): + super(SSDMobileDetDSPFeatureExtractor, self).__init__( + backbone_fn=mobiledet_dsp_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) + + +class SSDMobileDetEdgeTPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-EdgeTPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetEdgeTPU'): + super(SSDMobileDetEdgeTPUFeatureExtractor, self).__init__( + backbone_fn=mobiledet_edgetpu_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) + + +class SSDMobileDetGPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-GPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetGPU'): + super(SSDMobileDetGPUFeatureExtractor, self).__init__( + backbone_fn=mobiledet_gpu_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) diff --git a/models/research/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2af37554b55f68e85ddbe7587b86015e10ac65e8 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py @@ -0,0 +1,172 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobiledet_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobiledet_feature_extractor +from object_detection.utils import tf_version + +try: + from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDMobileDetFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + feature_extractor_cls, + is_training=False, + depth_multiplier=1.0, + pad_to_multiple=1, + use_explicit_padding=False, + use_keras=False): + """Constructs a new MobileDet feature extractor. + + Args: + feature_extractor_cls: feature extractor class. + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: If True, we will use 'VALID' padding for + convolutions, but prepad inputs so that the output dimensions are the + same as if 'SAME' padding were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDMobileDetFeatureExtractor object. + """ + min_depth = 32 + return feature_extractor_cls( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding) + + def test_mobiledet_cpu_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 72), + (2, 20, 10, 144), + (2, 10, 5, 512), + (2, 5, 3, 256), + (2, 3, 2, 256), + (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def test_mobiledet_dsp_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 144), + (2, 20, 10, 240), + (2, 10, 5, 512), + (2, 5, 3, 256), + (2, 3, 2, 256), + (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def test_mobiledet_edgetpu_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 96), + (2, 20, 10, 384), + (2, 10, 5, 512), + (2, 5, 3, 256), + (2, 3, 2, 256), + (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def test_mobiledet_gpu_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 128), (2, 20, 10, 384), + (2, 10, 5, 512), (2, 5, 3, 256), + (2, 3, 2, 256), (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetGPUFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def _check_quantization(self, model_fn): + checkpoint_dir = self.get_temp_dir() + + with tf.Graph().as_default() as training_graph: + model_fn(is_training=True) + contrib_quantize.experimental_create_training_graph(training_graph) + with self.session(graph=training_graph) as sess: + sess.run(tf.global_variables_initializer()) + tf.train.Saver().save(sess, checkpoint_dir) + + with tf.Graph().as_default() as eval_graph: + model_fn(is_training=False) + contrib_quantize.experimental_create_eval_graph(eval_graph) + with self.session(graph=eval_graph) as sess: + tf.train.Saver().restore(sess, checkpoint_dir) + + def test_mobiledet_cpu_quantization(self): + def model_fn(is_training): + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor, + is_training=is_training) + image = tf.random.normal((2, 320, 320, 3)) + feature_extractor.extract_features(image) + self._check_quantization(model_fn) + + def test_mobiledet_dsp_quantization(self): + def model_fn(is_training): + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor, + is_training=is_training) + image = tf.random.normal((2, 320, 320, 3)) + feature_extractor.extract_features(image) + self._check_quantization(model_fn) + + def test_mobiledet_edgetpu_quantization(self): + def model_fn(is_training): + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor, + is_training=is_training) + image = tf.random.normal((2, 320, 320, 3)) + feature_extractor.extract_features(image) + self._check_quantization(model_fn) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..6de4cae310e05b92032b76d63120ab4b24eadd0d --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py @@ -0,0 +1,49 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSDFeatureExtractor for MobileNetEdgeTPU features.""" + +from object_detection.models import ssd_mobilenet_v3_feature_extractor +from nets.mobilenet import mobilenet_v3 + + +class SSDMobileNetEdgeTPUFeatureExtractor( + ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3FeatureExtractorBase): + """MobileNetEdgeTPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetEdgeTPU'): + super(SSDMobileNetEdgeTPUFeatureExtractor, self).__init__( + conv_defs=mobilenet_v3.V3_EDGETPU, + from_layer=['layer_18/expansion_output', 'layer_23'], + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name + ) diff --git a/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py b/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3290f895a4a5a0701df7d8ea110280f638f61c --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py @@ -0,0 +1,112 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base test class for ssd_mobilenet_edgetpu_feature_extractor.""" + +import abc + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test + + +class _SsdMobilenetEdgeTPUFeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Base class for MobilenetEdgeTPU tests.""" + + @abc.abstractmethod + def _get_input_sizes(self): + """Return feature map sizes for the two inputs to SSD head.""" + pass + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]), + (2, 4, 4, input_feature_sizes[1]), + (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_has_fused_batchnorm(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) diff --git a/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..841fe5a148864a0d62b52fd8f6f3e0059670dd57 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py @@ -0,0 +1,65 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobilenet_edgetpu_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor +from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetEdgeTPUFeatureExtractorTest( + ssd_mobilenet_edgetpu_feature_extractor_testbase + ._SsdMobilenetEdgeTPUFeatureExtractorTestBase): + + def _get_input_sizes(self): + """Return first two input feature map sizes.""" + return [384, 192] + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_keras=False): + """Constructs a new MobileNetEdgeTPU feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return (ssd_mobilenet_edgetpu_feature_extractor + .SSDMobileNetEdgeTPUFeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..7fdcdac5f6e95a891ef2ed8b5a648dca351243bb --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py @@ -0,0 +1,138 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV1 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """MobileNetV1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV1FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', + '', ''][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=None, regularize_depthwise=True)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2f1d4839693c891b550e04cdaff391219c4b8cf1 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,272 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for SSD Mobilenet V1 feature extractors. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=False, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + del use_keras + return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), + (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=False) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name, use_keras=False) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple, use_keras=False) + self.assertEqual(len(variables), 151) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue( + any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4, + use_keras=False) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py b/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b60537b886909edbc7236f799c51733b8030380a --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py @@ -0,0 +1,248 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for SSD Mobilenet V1 feature extractors. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SsdMobilenetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=False, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v1_keras_feature_extractor + .SSDMobileNetV1KerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + name='MobilenetV1')) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), + (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=True) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4, + use_keras=True) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..37f8eb837b53053a64fb41aaaf16be06388f1478 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py @@ -0,0 +1,202 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD MobilenetV1 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +# A modified config of mobilenet v1 that makes it more detection friendly, +def _create_modified_mobilenet_config(): + conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS) + conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512) + conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256) + return conv_defs + + +class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV1 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD FPN feature extractor based on Mobilenet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v1 layers + {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, + Conv2d_13_pointwise}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV1FpnFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=None, regularize_depthwise=True)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + conv_defs=self._conv_defs, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + + depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('fpn', reuse=self._reuse_weights): + feature_blocks = [ + 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', + 'Conv2d_13_pointwise' + ] + base_fpn_max_level = min(self._fpn_max_level, 5) + feature_block_list = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_block_list.append(feature_blocks[level - 2]) + fpn_features = feature_map_generators.fpn_top_down_feature_maps( + [(key, image_features[key]) for key in feature_block_list], + depth=depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op) + feature_maps = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + feature_blocks[base_fpn_max_level - 2])] + # Construct coarse features + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): + if self._use_depthwise: + conv_op = functools.partial( + slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + if self._use_explicit_padding: + last_feature_map = ops.fixed_padding( + last_feature_map, kernel_size) + last_feature_map = conv_op( + last_feature_map, + num_outputs=depth_fn(self._additional_layer_depth), + kernel_size=[kernel_size, kernel_size], + stride=2, + padding=padding, + scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13)) + feature_maps.append(last_feature_map) + return feature_maps diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..449b7803d390f60747f0f4d67d8b98414a7d24eb --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py @@ -0,0 +1,206 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v1_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 FPN feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV1FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True, use_explicit_padding=False, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v1_fpn_feature_extractor. + SSDMobileNetV1FpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_depthwise=True, + use_explicit_padding=use_explicit_padding)) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_384(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=False) + + def test_preprocess_returns_correct_value_range(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple, + use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name, use_keras=False) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple, use_keras=False) + self.assertEqual(len(variables), 153) + + def test_fused_batchnorm(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple, + use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + + self.assertTrue( + any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..307cfa8b0b5594f921fee670699cc026ec16fbce --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,179 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v1_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 FPN feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_fpn_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SsdMobilenetV1FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True, use_explicit_padding=False, + use_keras=True): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + del use_keras + return (ssd_mobilenet_v1_fpn_keras_feature_extractor. + SSDMobileNetV1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + use_depthwise=True, + name='MobilenetV1_FPN')) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_384(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple, + use_keras=True) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..7792931875dc122ea938f8c87633e31f4adc4336 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py @@ -0,0 +1,256 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD Keras-based MobilenetV1 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v1 +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +# A modified config of mobilenet v1 that makes it more detection friendly. +def _create_modified_mobilenet_config(): + conv_def_block_12 = model_utils.ConvDefs(conv_name='conv_pw_12', filters=512) + conv_def_block_13 = model_utils.ConvDefs(conv_name='conv_pw_13', filters=256) + return [conv_def_block_12, conv_def_block_13] + + +class SSDMobileNetV1FpnKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based MobilenetV1 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False, + name=None): + """SSD Keras based FPN feature extractor Mobilenet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v1 layers + {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, + Conv2d_13_pointwise}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + self._feature_blocks = [ + 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', + 'Conv2d_13_pointwise' + ] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self, input_shape): + full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + conv_defs=self._conv_defs, + include_top=False) + conv2d_3_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_3_relu').output + conv2d_5_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_5_relu').output + conv2d_11_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_11_relu').output + conv2d_13_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_13_relu').output + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v1.inputs, + outputs=[conv2d_3_pointwise, conv2d_5_pointwise, + conv2d_11_pointwise, conv2d_13_pointwise] + ) + # pylint:disable=g-long-lambda + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + stride = 2 + for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): + coarse_feature_layers = [] + if self._use_explicit_padding: + def fixed_padding(features, kernel_size=kernel_size): + return ops.fixed_padding(features, kernel_size) + coarse_feature_layers.append(tf.keras.layers.Lambda( + fixed_padding, name='fixed_padding')) + layer_name = 'bottom_up_Conv2d_{}'.format( + i - self._base_fpn_max_level + 13) + conv_block = feature_map_generators.create_conv_block( + self._use_depthwise, kernel_size, padding, stride, layer_name, + self._conv_hyperparams, self._is_training, self._freeze_batchnorm, + self._depth_fn(self._additional_layer_depth)) + coarse_feature_layers.extend(conv_block) + self._coarse_feature_layers.append(coarse_feature_layers) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append(self._feature_blocks[level - 2]) + + feature_start_index = len(self._feature_blocks) - self._num_levels + fpn_input_image_features = [ + (key, image_features[feature_start_index + index]) + for index, key in enumerate(feature_block_list)] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + self._feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + self._feature_blocks[self._base_fpn_max_level - 2])] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + return feature_maps + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..82b48c1a8032c316b1a139150bdf0fef66ed743a --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py @@ -0,0 +1,176 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for Keras MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v1 +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class SSDMobileNetV1KerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras MobilenetV1 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + name=None): + """Keras MobileNetV1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV1KerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._feature_map_layout = { + 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', + '', ''][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + self.classification_backbone = None + self._feature_map_generator = None + + def build(self, input_shape): + full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + include_top=False) + conv2d_11_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_11_relu').output + conv2d_13_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_13_relu').output + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v1.inputs, + outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) + self._feature_map_generator = ( + feature_map_generators.KerasMultiResolutionFeatureMaps( + feature_map_layout=self._feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_maps = self._feature_map_generator({ + 'Conv2d_11_pointwise': image_features[0], + 'Conv2d_13_pointwise': image_features[1]}) + + return list(feature_maps.values()) + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..85f6a5594d2ecdb9989afe223aa71962103da394 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py @@ -0,0 +1,84 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV1 PPN features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV1 PPN features.""" + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=None, regularize_depthwise=True)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=0, + num_layers=6, + image_features={ + 'image_features': image_features['Conv2d_11_pointwise'] + }) + return list(feature_maps.values()) diff --git a/models/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b5918c0dfa9a3e3819df14f9d504dd63b8febc63 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py @@ -0,0 +1,186 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v1_ppn_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_ppn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV1PpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True, use_explicit_padding=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return (ssd_mobilenet_v1_ppn_feature_extractor. + SSDMobileNetV1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def test_extract_features_returns_correct_shapes_320(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_300(self): + image_height = 300 + image_width = 300 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_640(self): + image_height = 640 + image_width = 640 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 512), (2, 20, 20, 512), + (2, 10, 10, 512), (2, 5, 5, 512), + (2, 3, 3, 512), (2, 2, 2, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), + (2, 4, 4, 32), (2, 2, 2, 32), + (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_has_fused_batchnorm(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a37e16ab0e3fd0dc5a5e36695affd4239e1843 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor.py @@ -0,0 +1,140 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV2 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """MobileNetV2 Feature Extractor for SSD Models. + + Mobilenet v2 (experimental), designed by sandler@. More details can be found + in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV2FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = mobilenet_v2.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='layer_19', + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..96f9bc26e120f2f4396968429f474406b67894ca --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py @@ -0,0 +1,196 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_feature_extractor.""" +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple) + self.assertEqual(len(variables), 292) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py b/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4cb5afcf7c978cc24e01d5806914c618cd7fd7 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py @@ -0,0 +1,192 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_feature_extractor.""" +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SsdMobilenetV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + use_keras: unused argument. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v2_keras_feature_extractor. + SSDMobileNetV2KerasFeatureExtractor( + is_training=False, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + name='MobilenetV2')) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, use_keras=True) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name, use_keras=True) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple, use_keras=True) + self.assertEqual(len(variables), 292) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4, + use_keras=True) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..65cdcc85ab6b3e034279868342379864f8d3b5ef --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py @@ -0,0 +1,199 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD MobilenetV2 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +# A modified config of mobilenet v2 that makes it more detection friendly. +def _create_modified_mobilenet_config(): + conv_defs = copy.deepcopy(mobilenet_v2.V2_DEF) + conv_defs['spec'][-1] = mobilenet.op( + slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=256) + return conv_defs + + +class SSDMobileNetV2FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD FPN feature extractor based on Mobilenet v2 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v2 layers + {layer_4, layer_7, layer_14, layer_19}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV2FpnFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = mobilenet_v2.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='layer_19', + depth_multiplier=self._depth_multiplier, + conv_defs=self._conv_defs, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('fpn', reuse=self._reuse_weights): + feature_blocks = [ + 'layer_4', 'layer_7', 'layer_14', 'layer_19' + ] + base_fpn_max_level = min(self._fpn_max_level, 5) + feature_block_list = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_block_list.append(feature_blocks[level - 2]) + fpn_features = feature_map_generators.fpn_top_down_feature_maps( + [(key, image_features[key]) for key in feature_block_list], + depth=depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op) + feature_maps = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + feature_blocks[base_fpn_max_level - 2])] + # Construct coarse features + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): + if self._use_depthwise: + conv_op = functools.partial( + slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + if self._use_explicit_padding: + last_feature_map = ops.fixed_padding( + last_feature_map, kernel_size) + last_feature_map = conv_op( + last_feature_map, + num_outputs=depth_fn(self._additional_layer_depth), + kernel_size=[kernel_size, kernel_size], + stride=2, + padding=padding, + scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 19)) + feature_maps.append(last_feature_map) + return feature_maps diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9cdbed5fbe160baefb0afd41477748b9374e191f --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py @@ -0,0 +1,372 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V2 FPN feature extractors in SSD. +""" +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_fpn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +@parameterized.parameters( + { + 'use_depthwise': False + }, + { + 'use_depthwise': True + }, +) +class SsdMobilenetV2FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + is_training=True, + use_explicit_padding=False, + use_keras=False, + use_depthwise=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + use_depthwise: Whether to use depthwise convolutions. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v2_fpn_feature_extractor + .SSDMobileNetV2FpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_depthwise=use_depthwise, + use_explicit_padding=use_explicit_padding)) + + def test_extract_features_returns_correct_shapes_256(self, use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_384(self, use_depthwise): + use_keras = False + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_with_dynamic_image_shape(self, + use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self, use_depthwise): + use_keras = False + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self, use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_raises_error_with_invalid_image_size( + self, use_depthwise): + use_keras = False + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_preprocess_returns_correct_value_range(self, + use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self, use_depthwise): + use_keras = False + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, + pad_to_multiple, + scope_name, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_fused_batchnorm(self, use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue( + any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + def test_variable_count(self, use_depthwise): + use_keras = False + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + expected_variables_len = 274 + if use_depthwise: + expected_variables_len = 278 + self.assertEqual(len(variables), expected_variables_len) + + def test_get_expected_feature_map_variable_names(self, + use_depthwise): + use_keras = False + depth_multiplier = 1.0 + pad_to_multiple = 1 + + slim_expected_feature_maps_variables = set([ + # Slim Mobilenet V2 feature maps + 'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights', + 'MobilenetV2/Conv_1/weights', + # FPN layers + 'MobilenetV2/fpn/bottom_up_Conv2d_20/weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_21/weights', + 'MobilenetV2/fpn/smoothing_1/weights', + 'MobilenetV2/fpn/smoothing_2/weights', + 'MobilenetV2/fpn/projection_1/weights', + 'MobilenetV2/fpn/projection_2/weights', + 'MobilenetV2/fpn/projection_3/weights', + ]) + slim_expected_feature_maps_variables_with_depthwise = set([ + # Slim Mobilenet V2 feature maps + 'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights', + 'MobilenetV2/Conv_1/weights', + # FPN layers + 'MobilenetV2/fpn/bottom_up_Conv2d_20/pointwise_weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_20/depthwise_weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_21/pointwise_weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_21/depthwise_weights', + 'MobilenetV2/fpn/smoothing_1/depthwise_weights', + 'MobilenetV2/fpn/smoothing_1/pointwise_weights', + 'MobilenetV2/fpn/smoothing_2/depthwise_weights', + 'MobilenetV2/fpn/smoothing_2/pointwise_weights', + 'MobilenetV2/fpn/projection_1/weights', + 'MobilenetV2/fpn/projection_2/weights', + 'MobilenetV2/fpn/projection_3/weights', + ]) + + g = tf.Graph() + with g.as_default(): + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + _ = feature_extractor.extract_features(preprocessed_inputs) + expected_feature_maps_variables = slim_expected_feature_maps_variables + if use_depthwise: + expected_feature_maps_variables = ( + slim_expected_feature_maps_variables_with_depthwise) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + variable_intersection = expected_feature_maps_variables.intersection( + actual_variable_set) + self.assertSetEqual(expected_feature_maps_variables, + variable_intersection) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..44522ac94494430cb109e084689cc6a1a1dbeddb --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,269 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V2 FPN feature extractors in SSD. +""" +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_fpn_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +@parameterized.parameters( + { + 'use_depthwise': False, + }, + { + 'use_depthwise': True, + }, +) +class SsdMobilenetV2FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + is_training=True, + use_explicit_padding=False, + use_keras=False, + use_depthwise=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + use_depthwise: Whether to use depthwise convolutions. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v2_fpn_keras_feature_extractor + .SSDMobileNetV2FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + name='MobilenetV2_FPN')) + + def test_extract_features_returns_correct_shapes_256(self, + use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_384(self, + use_depthwise): + use_keras = True + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_with_dynamic_image_shape(self, + use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self, use_depthwise): + use_keras = True + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self, use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_raises_error_with_invalid_image_size( + self, use_depthwise=False): + use_keras = True + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_preprocess_returns_correct_value_range(self, + use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0e36e8bda08e376ace2cb51ca7e7045d6c689663 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py @@ -0,0 +1,254 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD Keras-based MobilenetV2 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops +from object_detection.utils import shape_utils + +# Total number of blocks in Mobilenet_V2 base network. +NUM_LAYERS = 19 + + +# A modified config of mobilenet v2 that makes it more detection friendly. +def _create_modified_mobilenet_config(): + last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256) + return [last_conv] + + +class SSDMobileNetV2FpnKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based MobilenetV2 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False, + name=None): + """SSD Keras based FPN feature extractor Mobilenet v2 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v2 layers + {layer_4, layer_7, layer_14, layer_19}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19'] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self, input_shape): + full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + include_top=False) + layer_names = [layer.name for layer in full_mobilenet_v2.layers] + outputs = [] + for layer_idx in [4, 7, 14]: + add_name = 'block_{}_add'.format(layer_idx - 2) + project_name = 'block_{}_project_BN'.format(layer_idx - 2) + output_layer_name = add_name if add_name in layer_names else project_name + outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output) + layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output + outputs.append(layer_19) + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v2.inputs, + outputs=outputs) + # pylint:disable=g-long-lambda + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + stride = 2 + for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): + coarse_feature_layers = [] + if self._use_explicit_padding: + def fixed_padding(features, kernel_size=kernel_size): + return ops.fixed_padding(features, kernel_size) + coarse_feature_layers.append(tf.keras.layers.Lambda( + fixed_padding, name='fixed_padding')) + layer_name = 'bottom_up_Conv2d_{}'.format( + i - self._base_fpn_max_level + NUM_LAYERS) + conv_block = feature_map_generators.create_conv_block( + self._use_depthwise, kernel_size, padding, stride, layer_name, + self._conv_hyperparams, self._is_training, self._freeze_batchnorm, + self._depth_fn(self._additional_layer_depth)) + coarse_feature_layers.extend(conv_block) + self._coarse_feature_layers.append(coarse_feature_layers) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append(self._feature_blocks[level - 2]) + + feature_start_index = len(self._feature_blocks) - self._num_levels + fpn_input_image_features = [ + (key, image_features[feature_start_index + index]) + for index, key in enumerate(feature_block_list)] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + self._feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + self._feature_blocks[self._base_fpn_max_level - 2])] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + return feature_maps + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9f0622f3290fbfd15790eaad15f2831f7ae08c34 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py @@ -0,0 +1,179 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV2 features.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class SSDMobileNetV2KerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + name=None): + """MobileNetV2 Feature Extractor for SSD Models. + + Mobilenet v2 (experimental), designed by sandler@. More details can be found + in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor (Functions + as a width multiplier for the mobilenet_v2 network itself). + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV2KerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._feature_map_layout = { + 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + self.classification_backbone = None + self.feature_map_generator = None + + def build(self, input_shape): + full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + include_top=False) + conv2d_11_pointwise = full_mobilenet_v2.get_layer( + name='block_13_expand_relu').output + conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v2.inputs, + outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) + self.feature_map_generator = ( + feature_map_generators.KerasMultiResolutionFeatureMaps( + feature_map_layout=self._feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_maps = self.feature_map_generator({ + 'layer_15/expansion_output': image_features[0], + 'layer_19': image_features[1]}) + + return list(feature_maps.values()) + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..be1d55a0390a218b21ba809c952d6f8ee58f995c --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py @@ -0,0 +1,412 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD MobilenetV2 NAS-FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import functools +from six.moves import range +import tensorflow.compat.v1 as tf + +import tf_slim as slim +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +Block = collections.namedtuple( + 'Block', ['inputs', 'output_level', 'kernel_size', 'expansion_size']) + +_MNASFPN_CELL_CONFIG = [ + Block(inputs=(1, 2), output_level=4, kernel_size=3, expansion_size=256), + Block(inputs=(0, 4), output_level=3, kernel_size=3, expansion_size=128), + Block(inputs=(5, 4), output_level=4, kernel_size=3, expansion_size=128), + Block(inputs=(4, 3), output_level=5, kernel_size=5, expansion_size=128), + Block(inputs=(4, 3), output_level=6, kernel_size=3, expansion_size=96), +] + +MNASFPN_DEF = dict( + feature_levels=[3, 4, 5, 6], + spec=[_MNASFPN_CELL_CONFIG] * 4, +) + + +def _maybe_pad(feature, use_explicit_padding, kernel_size=3): + return ops.fixed_padding(feature, + kernel_size) if use_explicit_padding else feature + + +# Wrapper around mobilenet.depth_multiplier +def _apply_multiplier(d, multiplier, min_depth): + p = {'num_outputs': d} + mobilenet.depth_multiplier( + p, multiplier=multiplier, divisible_by=8, min_depth=min_depth) + return p['num_outputs'] + + +def _apply_size_dependent_ordering(input_feature, feature_level, block_level, + expansion_size, use_explicit_padding, + use_native_resize_op): + """Applies Size-Dependent-Ordering when resizing feature maps. + + See https://arxiv.org/abs/1912.01106 + + Args: + input_feature: input feature map to be resized. + feature_level: the level of the input feature. + block_level: the desired output level for the block. + expansion_size: the expansion size for the block. + use_explicit_padding: Whether to use explicit padding. + use_native_resize_op: Whether to use native resize op. + + Returns: + A transformed feature at the desired resolution and expansion size. + """ + padding = 'VALID' if use_explicit_padding else 'SAME' + if feature_level >= block_level: # Perform 1x1 then upsampling. + node = slim.conv2d( + input_feature, + expansion_size, [1, 1], + activation_fn=None, + normalizer_fn=slim.batch_norm, + padding=padding, + scope='Conv1x1') + if feature_level == block_level: + return node + scale = 2**(feature_level - block_level) + if use_native_resize_op: + input_shape = shape_utils.combined_static_and_dynamic_shape(node) + node = tf.image.resize_nearest_neighbor( + node, [input_shape[1] * scale, input_shape[2] * scale]) + else: + node = ops.nearest_neighbor_upsampling(node, scale=scale) + else: # Perform downsampling then 1x1. + stride = 2**(block_level - feature_level) + node = slim.max_pool2d( + _maybe_pad(input_feature, use_explicit_padding), [3, 3], + stride=[stride, stride], + padding=padding, + scope='Downsample') + node = slim.conv2d( + node, + expansion_size, [1, 1], + activation_fn=None, + normalizer_fn=slim.batch_norm, + padding=padding, + scope='Conv1x1') + return node + + +def _mnasfpn_cell(feature_maps, + feature_levels, + cell_spec, + output_channel=48, + use_explicit_padding=False, + use_native_resize_op=False, + multiplier_func=None): + """Create a MnasFPN cell. + + Args: + feature_maps: input feature maps. + feature_levels: levels of the feature maps. + cell_spec: A list of Block configs. + output_channel: Number of features for the input, output and intermediate + feature maps. + use_explicit_padding: Whether to use explicit padding. + use_native_resize_op: Whether to use native resize op. + multiplier_func: Depth-multiplier function. If None, use identity function. + + Returns: + A transformed list of feature maps at the same resolutions as the inputs. + """ + # This is the level where multipliers are realized. + if multiplier_func is None: + multiplier_func = lambda x: x + num_outputs = len(feature_maps) + cell_features = list(feature_maps) + cell_levels = list(feature_levels) + padding = 'VALID' if use_explicit_padding else 'SAME' + for bi, block in enumerate(cell_spec): + with tf.variable_scope('block_{}'.format(bi)): + block_level = block.output_level + intermediate_feature = None + for i, inp in enumerate(block.inputs): + with tf.variable_scope('input_{}'.format(i)): + input_level = cell_levels[inp] + node = _apply_size_dependent_ordering( + cell_features[inp], input_level, block_level, + multiplier_func(block.expansion_size), use_explicit_padding, + use_native_resize_op) + # Add features incrementally to avoid producing AddN, which doesn't + # play well with TfLite. + if intermediate_feature is None: + intermediate_feature = node + else: + intermediate_feature += node + node = tf.nn.relu6(intermediate_feature) + node = slim.separable_conv2d( + _maybe_pad(node, use_explicit_padding, block.kernel_size), + multiplier_func(output_channel), + block.kernel_size, + activation_fn=None, + normalizer_fn=slim.batch_norm, + padding=padding, + scope='SepConv') + cell_features.append(node) + cell_levels.append(block_level) + + # Cell-wide residuals. + out_idx = range(len(cell_features) - num_outputs, len(cell_features)) + for in_i, out_i in enumerate(out_idx): + if cell_features[out_i].shape.as_list( + ) == cell_features[in_i].shape.as_list(): + cell_features[out_i] += cell_features[in_i] + + return cell_features[-num_outputs:] + + +def mnasfpn(feature_maps, + head_def, + output_channel=48, + use_explicit_padding=False, + use_native_resize_op=False, + multiplier_func=None): + """Create the MnasFPN head given head_def.""" + features = feature_maps + for ci, cell_spec in enumerate(head_def['spec']): + with tf.variable_scope('cell_{}'.format(ci)): + features = _mnasfpn_cell(features, head_def['feature_levels'], cell_spec, + output_channel, use_explicit_padding, + use_native_resize_op, multiplier_func) + return features + + +def training_scope(l2_weight_decay=1e-4, is_training=None): + """Arg scope for training MnasFPN.""" + with slim.arg_scope( + [slim.conv2d], + weights_initializer=tf.initializers.he_normal(), + weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ + slim.arg_scope( + [slim.separable_conv2d], + weights_initializer=tf.initializers.truncated_normal( + stddev=0.536), # He_normal for 3x3 depthwise kernel. + weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ + slim.arg_scope([slim.batch_norm], + is_training=is_training, + epsilon=0.01, + decay=0.99, + center=True, + scale=True) as s: + return s + + +class SSDMobileNetV2MnasFPNFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 MnasFPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=6, + additional_layer_depth=48, + head_def=None, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False, + data_format='channels_last'): + """SSD MnasFPN feature extractor based on Mobilenet v2 architecture. + + See https://arxiv.org/abs/1912.01106 + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + fpn_min_level: the highest resolution feature map to use in MnasFPN. + Currently the only valid value is 3. + fpn_max_level: the smallest resolution feature map to construct or use in + MnasFPN. Currentl the only valid value is 6. + additional_layer_depth: additional feature map layer channel depth for + NAS-FPN. + head_def: A dictionary specifying the MnasFPN head architecture. Default + uses MNASFPN_DEF. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use native resize op. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + data_format: The ordering of the dimensions in the inputs, The valid + values are {'channels_first', 'channels_last'). + """ + super(SSDMobileNetV2MnasFPNFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) + if fpn_min_level != 3 or fpn_max_level != 6: + raise ValueError('Min and max levels of MnasFPN must be 3 and 6 for now.') + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._fpn_layer_depth = additional_layer_depth + self._head_def = head_def if head_def else MNASFPN_DEF + self._data_format = data_format + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _verify_config(self, inputs): + """Verify that MnasFPN config and its inputs.""" + num_inputs = len(inputs) + assert len(self._head_def['feature_levels']) == num_inputs + + base_width = inputs[0].shape.as_list( + )[1] * 2**self._head_def['feature_levels'][0] + for i in range(1, num_inputs): + width = inputs[i].shape.as_list()[1] + level = self._head_def['feature_levels'][i] + expected_width = base_width // 2**level + if width != expected_width: + raise ValueError( + 'Resolution of input {} does not match its level {}.'.format( + i, level)) + + for cell_spec in self._head_def['spec']: + # The last K nodes in a cell are the inputs to the next cell. Assert that + # their feature maps are at the right level. + for i in range(num_inputs): + if cell_spec[-num_inputs + + i].output_level != self._head_def['feature_levels'][i]: + raise ValueError( + 'Mismatch between node level {} and desired output level {}.' + .format(cell_spec[-num_inputs + i].output_level, + self._head_def['feature_levels'][i])) + # Assert that each block only uses precending blocks. + for bi, block_spec in enumerate(cell_spec): + for inp in block_spec.inputs: + if inp >= bi + num_inputs: + raise ValueError( + 'Block {} is trying to access uncreated block {}.'.format( + bi, inp)) + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v2.training_scope(is_training=None, bn_decay=0.99)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with slim.arg_scope( + training_scope(l2_weight_decay=4e-5, + is_training=self._is_training)): + + _, image_features = mobilenet_v2.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='layer_18', + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + + multiplier_func = functools.partial( + _apply_multiplier, + multiplier=self._depth_multiplier, + min_depth=self._min_depth) + with tf.variable_scope('MnasFPN', reuse=self._reuse_weights): + with slim.arg_scope( + training_scope(l2_weight_decay=1e-4, is_training=self._is_training)): + # Create C6 by downsampling C5. + c6 = slim.max_pool2d( + _maybe_pad(image_features['layer_18'], self._use_explicit_padding), + [3, 3], + stride=[2, 2], + padding='VALID' if self._use_explicit_padding else 'SAME', + scope='C6_downsample') + c6 = slim.conv2d( + c6, + multiplier_func(self._fpn_layer_depth), + [1, 1], + activation_fn=tf.identity, + normalizer_fn=slim.batch_norm, + weights_regularizer=None, # this 1x1 has no kernel regularizer. + padding='VALID', + scope='C6_Conv1x1') + image_features['C6'] = tf.identity(c6) # Needed for quantization. + for k in sorted(image_features.keys()): + tf.logging.error('{}: {}'.format(k, image_features[k])) + + mnasfpn_inputs = [ + image_features['layer_7'], # C3 + image_features['layer_14'], # C4 + image_features['layer_18'], # C5 + image_features['C6'] # C6 + ] + self._verify_config(mnasfpn_inputs) + feature_maps = mnasfpn( + mnasfpn_inputs, + head_def=self._head_def, + output_channel=self._fpn_layer_depth, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op, + multiplier_func=multiplier_func) + return feature_maps diff --git a/models/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..032433128de057c97a422c97e96d16bd2942f62b --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py @@ -0,0 +1,87 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobilenet_v2_nas_fpn_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_mnasfpn_feature_extractor as mnasfpn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV2MnasFPNFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False): + min_depth = 16 + is_training = True + fpn_num_filters = 48 + return mnasfpn_feature_extractor.SSDMobileNetV2MnasFPNFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + additional_layer_depth=fpn_num_filters, + use_explicit_padding=use_explicit_padding) + + def test_extract_features_returns_correct_shapes_320_256(self): + image_height = 320 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 32, 48), (2, 20, 16, 48), + (2, 10, 8, 48), (2, 5, 4, 48)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 16), (2, 16, 16, 16), + (2, 8, 8, 16), (2, 4, 4, 16)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor.py b/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..cc85fdccb793ee8f4e1d3197a957f450fa108f75 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor.py @@ -0,0 +1,218 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSDFeatureExtractor for MobileNetV3 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v3 + + +class SSDMobileNetV3FeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): + """Base class of SSD feature extractor using MobilenetV3 features.""" + + def __init__(self, + conv_defs, + from_layer, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetV3'): + """MobileNetV3 Feature Extractor for SSD Models. + + MobileNet v3. Details found in: + https://arxiv.org/abs/1905.02244 + + Args: + conv_defs: MobileNetV3 conv defs for backbone. + from_layer: A cell of two layer names (string) to connect to the 1st and + 2nd inputs of the SSD head. + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + scope_name: scope name (string) of network variables. + """ + super(SSDMobileNetV3FeatureExtractorBase, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams + ) + self._conv_defs = conv_defs + self._from_layer = from_layer + self._scope_name = scope_name + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + Raises: + ValueError if conv_defs is not provided or from_layer does not meet the + size requirement. + """ + + if not self._conv_defs: + raise ValueError('Must provide backbone conv defs.') + + if len(self._from_layer) != 2: + raise ValueError('SSD input feature names are not provided.') + + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': [ + self._from_layer[0], self._from_layer[1], '', '', '', '' + ], + 'layer_depth': [-1, -1, 512, 256, 256, 128], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + with tf.variable_scope( + self._scope_name, reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v3.training_scope(is_training=None, bn_decay=0.9997)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = mobilenet_v3.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + conv_defs=self._conv_defs, + final_endpoint=self._from_layer[1], + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) + + +class SSDMobileNetV3LargeFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): + """Mobilenet V3-Large feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetV3'): + super(SSDMobileNetV3LargeFeatureExtractor, self).__init__( + conv_defs=mobilenet_v3.V3_LARGE_DETECTION, + from_layer=['layer_14/expansion_output', 'layer_17'], + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name + ) + + +class SSDMobileNetV3SmallFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): + """Mobilenet V3-Small feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetV3'): + super(SSDMobileNetV3SmallFeatureExtractor, self).__init__( + conv_defs=mobilenet_v3.V3_SMALL_DETECTION, + from_layer=['layer_10/expansion_output', 'layer_13'], + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name + ) diff --git a/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py b/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ba60f2efe588f5afb9f7d9a3951dd1ce4d77c5 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py @@ -0,0 +1,112 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base test class for ssd_mobilenet_v3_feature_extractor.""" + +import abc + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test + + +class _SsdMobilenetV3FeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Base class for MobilenetV3 tests.""" + + @abc.abstractmethod + def _get_input_sizes(self): + """Return feature map sizes for the two inputs to SSD head.""" + pass + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]), + (2, 4, 4, input_feature_sizes[1]), + (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) diff --git a/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..43c02490a7358820404380d20aa1d2190fce01a1 --- /dev/null +++ b/models/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py @@ -0,0 +1,105 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobilenet_v3_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_mobilenet_v3_feature_extractor +from object_detection.models import ssd_mobilenet_v3_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV3LargeFeatureExtractorTest( + ssd_mobilenet_v3_feature_extractor_testbase + ._SsdMobilenetV3FeatureExtractorTestBase): + + def _get_input_sizes(self): + """Return first two input feature map sizes.""" + return [672, 480] + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_keras=False): + """Constructs a new Mobilenet V3-Large feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ( + ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3LargeFeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV3SmallFeatureExtractorTest( + ssd_mobilenet_v3_feature_extractor_testbase + ._SsdMobilenetV3FeatureExtractorTestBase): + + def _get_input_sizes(self): + """Return first two input feature map sizes.""" + return [288, 288] + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_keras=False): + """Constructs a new Mobilenet V3-Small feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ( + ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3SmallFeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_pnasnet_feature_extractor.py b/models/research/object_detection/models/ssd_pnasnet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..48f1dee3b4f6aceffd87b995bebb06a88b25c4ca --- /dev/null +++ b/models/research/object_detection/models/ssd_pnasnet_feature_extractor.py @@ -0,0 +1,182 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for PNASNet features. + +Based on PNASNet ImageNet model: https://arxiv.org/abs/1712.00559 +""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import variables_helper +try: + from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + + +def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): + """Defines the default arg scope for the PNASNet Large for object detection. + + This provides a small edit to switch batch norm training on and off. + + Args: + is_batch_norm_training: Boolean indicating whether to train with batch norm. + Default is False. + + Returns: + An `arg_scope` to use for the PNASNet Large Model. + """ + imagenet_scope = pnasnet.pnasnet_large_arg_scope() + with slim.arg_scope(imagenet_scope): + with slim.arg_scope([slim.batch_norm], + is_training=is_batch_norm_training) as sc: + return sc + + +class SSDPNASNetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using PNASNet features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """PNASNet Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_depthwise: Whether to use depthwise convolutions. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDPNASNetFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + + feature_map_layout = { + 'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with slim.arg_scope( + pnasnet_large_arg_scope_for_detection( + is_batch_norm_training=self._is_training)): + with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], + reuse=self._reuse_weights): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = pnasnet.build_pnasnet_large( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + num_classes=None, + is_training=self._is_training, + final_endpoint='Cell_11') + with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights): + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + ssd_meta_arch.SSDFeatureExtractor which does not work for PNASNet + checkpoints. + + Args: + feature_extractor_scope: A scope name for the first stage feature + extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith(feature_extractor_scope): + var_name = variable.op.name.replace(feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/models/research/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f5bff92d9f7da6fbf8243dd3dc1dff0bc9e628 --- /dev/null +++ b/models/research/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py @@ -0,0 +1,108 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_pnas_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_pnasnet_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdPnasNetFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=True): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ssd_pnasnet_feature_extractor.SSDPNASNetFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 2160), (2, 10, 10, 4320), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1827a1a10e3c99562a33d6b64d29c50a09d3bd --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py @@ -0,0 +1,391 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1. + +See https://arxiv.org/abs/1708.02002 for details. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import resnet_v1 + + +class SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD FPN feature extractor based on Resnet v1 architecture.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_base_fn, + resnet_scope_name, + fpn_scope_name, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + resnet_base_fn: base resnet network to use. + resnet_scope_name: scope name under which to construct resnet + fpn_scope_name: scope name under which to construct the feature pyramid + network. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: On supplying invalid arguments for unused arguments. + """ + super(SSDResnetV1FpnFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + if self._use_explicit_padding is True: + raise ValueError('Explicit padding is not a valid option.') + self._resnet_base_fn = resnet_base_fn + self._resnet_scope_name = resnet_scope_name + self._fpn_scope_name = fpn_scope_name + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def _filter_features(self, image_features): + # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead + # of munging the scope here. + filtered_image_features = dict({}) + for key, feature in image_features.items(): + feature_name = key.split('/')[-1] + if feature_name in ['block1', 'block2', 'block3', 'block4']: + filtered_image_features[feature_name] = feature + return filtered_image_features + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + with tf.variable_scope( + self._resnet_scope_name, reuse=self._reuse_weights) as scope: + with slim.arg_scope(resnet_v1.resnet_arg_scope()): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = self._resnet_base_fn( + inputs=ops.pad_to_multiple(preprocessed_inputs, + self._pad_to_multiple), + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + store_non_strided_activations=True, + min_base_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + image_features = self._filter_features(image_features) + depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope(self._fpn_scope_name, + reuse=self._reuse_weights): + base_fpn_max_level = min(self._fpn_max_level, 5) + feature_block_list = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + fpn_features = feature_map_generators.fpn_top_down_feature_maps( + [(key, image_features[key]) for key in feature_block_list], + depth=depth_fn(self._additional_layer_depth), + use_native_resize_op=self._use_native_resize_op) + feature_maps = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_maps.append( + fpn_features['top_down_block{}'.format(level - 1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + base_fpn_max_level - 1)] + # Construct coarse features + for i in range(base_fpn_max_level, self._fpn_max_level): + last_feature_map = slim.conv2d( + last_feature_map, + num_outputs=depth_fn(self._additional_layer_depth), + kernel_size=[3, 3], + stride=2, + padding='SAME', + scope='bottom_up_block{}'.format(i)) + feature_maps.append(last_feature_map) + return feature_maps + + +class SSDResnet50V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): + """SSD Resnet50 V1 FPN feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet50V1FpnFeatureExtractor, self).__init__( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_v1.resnet_v1_50, + 'resnet_v1_50', + 'fpn', + fpn_min_level, + fpn_max_level, + additional_layer_depth, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + use_native_resize_op=use_native_resize_op, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + +class SSDResnet101V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): + """SSD Resnet101 V1 FPN feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet101V1FpnFeatureExtractor, self).__init__( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_v1.resnet_v1_101, + 'resnet_v1_101', + 'fpn', + fpn_min_level, + fpn_max_level, + additional_layer_depth, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + use_native_resize_op=use_native_resize_op, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + +class SSDResnet152V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): + """SSD Resnet152 V1 FPN feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet152V1FpnFeatureExtractor, self).__init__( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_v1.resnet_v1_152, + 'resnet_v1_152', + 'fpn', + fpn_min_level, + fpn_max_level, + additional_layer_depth, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + use_native_resize_op=use_native_resize_op, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) diff --git a/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..1ccad530ed5f34da2bd903c23b1d974f86a9d933 --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py @@ -0,0 +1,193 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 FPN feature extractors.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.utils import test_utils + + +class SSDResnetFPNFeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Helper test class for SSD Resnet v1 FPN feature extractors.""" + + @abc.abstractmethod + def _resnet_scope_name(self): + pass + + @abc.abstractmethod + def _fpn_scope_name(self): + return 'fpn' + + @abc.abstractmethod + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + min_depth=32, + use_keras=False): + pass + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_returns_correct_shapes_with_depth_multiplier( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5 + expected_num_channels = int(256 * depth_multiplier) + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, expected_num_channels), + (2, 16, 16, expected_num_channels), + (2, 8, 8, expected_num_channels), + (2, 4, 4, expected_num_channels), + (2, 2, 2, expected_num_channels)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_returns_correct_shapes_with_min_depth( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + min_depth = 320 + expected_feature_map_shape = [(2, 32, 32, min_depth), + (2, 16, 16, min_depth), + (2, 8, 8, min_depth), + (2, 4, 4, min_depth), + (2, 2, 2, min_depth)] + + with test_utils.GraphContextOrNone() as g: + image_tensor = tf.random.uniform([2, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, min_depth=min_depth, + use_keras=self.is_tf2()) + + def graph_fn(): + if self.is_tf2(): + return feature_extractor(image_tensor) + return feature_extractor.extract_features(image_tensor) + + feature_maps = self.execute(graph_fn, [], graph=g) + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shape): + self.assertAllEqual(feature_map.shape, expected_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 254 + image_width = 254 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=self.is_tf2()) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image_np = np.random.rand(4, image_height, image_width, 3) + with test_utils.GraphContextOrNone() as g: + test_image = tf.constant(test_image_np) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=self.is_tf2()) + + def graph_fn(): + preprocessed_image = feature_extractor.preprocess(test_image) + return preprocessed_image + + preprocessed_image_out = self.execute(graph_fn, [], graph=g) + self.assertAllClose(preprocessed_image_out, + test_image_np - [[123.68, 116.779, 103.939]]) + + def test_variables_only_created_in_scope(self): + if self.is_tf2(): + self.skipTest('test_variables_only_created_in_scope is only tf1') + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = self._resnet_scope_name() + self.check_feature_extractor_variables_under_scope( + depth_multiplier, + pad_to_multiple, + scope_name, + use_keras=self.is_tf2()) + + def test_variable_count(self): + if self.is_tf2(): + self.skipTest('test_variable_count is only tf1') + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, + pad_to_multiple, + use_keras=self.is_tf2()) + # The number of expected variables in resnet_v1_50, resnet_v1_101, + # and resnet_v1_152 is 279, 534, and 789 respectively. + expected_variables_len = 279 + scope_name = self._resnet_scope_name() + if scope_name in ('ResNet101V1_FPN', 'resnet_v1_101'): + expected_variables_len = 534 + elif scope_name in ('ResNet152V1_FPN', 'resnet_v1_152'): + expected_variables_len = 789 + self.assertEqual(len(variables), expected_variables_len) diff --git a/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..58952ff9486d6be3f077c9e21788ce8409806d18 --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py @@ -0,0 +1,85 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 FPN feature extractors.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_resnet_v1_fpn_feature_extractor +from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet50V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet50v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return ( + ssd_resnet_v1_fpn_feature_extractor.SSDResnet50V1FpnFeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _resnet_scope_name(self): + return 'resnet_v1_50' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet101V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet101v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return ( + ssd_resnet_v1_fpn_feature_extractor.SSDResnet101V1FpnFeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _resnet_scope_name(self): + return 'resnet_v1_101' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet152V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet152v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return ( + ssd_resnet_v1_fpn_feature_extractor.SSDResnet152V1FpnFeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _resnet_scope_name(self): + return 'resnet_v1_152' + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..27c54ddd08ffa866dad4975c9bed7c629e8c46ac --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,103 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 FPN feature extractors.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase +from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDResnet50V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet50v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=True): + is_training = True + return (ssd_resnet_v1_fpn_keras_feature_extractor. + SSDResNet50V1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + name='ResNet50V1_FPN')) + + def _resnet_scope_name(self): + return 'ResNet50V1_FPN' + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDResnet101V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet101v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return (ssd_resnet_v1_fpn_keras_feature_extractor. + SSDResNet101V1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + name='ResNet101V1_FPN')) + + def _resnet_scope_name(self): + return 'ResNet101V1_FPN' + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDResnet152V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet152v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return (ssd_resnet_v1_fpn_keras_feature_extractor. + SSDResNet152V1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + name='ResNet152V1_FPN')) + + def _resnet_scope_name(self): + return 'ResNet152V1_FPN' + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py b/models/research/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..6de9ae3e5b8f24885d139b2b277b09ccd1782169 --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py @@ -0,0 +1,468 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD Keras-based ResnetV1 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import resnet_v1 +from object_detection.utils import ops +from object_detection.utils import shape_utils + +_RESNET_MODEL_OUTPUT_LAYERS = { + 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block23_out', 'conv5_block3_out'], + 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', + 'conv4_block36_out', 'conv5_block3_out'], +} + + +class SSDResNetV1FpnKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + resnet_v1_base_model, + resnet_v1_base_model_name, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name=None): + """SSD Keras based FPN feature extractor Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNetV1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + if self._use_explicit_padding: + raise ValueError('Explicit padding is not a valid option.') + if self._use_depthwise: + raise ValueError('Depthwise is not a valid option.') + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name + self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self, input_shape): + full_resnet_v1_model = self._resnet_v1_base_model( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + classes=None, + weights=None, + include_top=False) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] + outputs = [full_resnet_v1_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + self.classification_backbone = tf.keras.Model( + inputs=full_resnet_v1_model.inputs, + outputs=outputs) + # pylint:disable=g-long-lambda + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + depth = self._depth_fn(self._additional_layer_depth) + for i in range(self._base_fpn_max_level, self._fpn_max_level): + layers = [] + layer_name = 'bottom_up_block{}'.format(i) + layers.append( + tf.keras.layers.Conv2D( + depth, + [3, 3], + padding='SAME', + strides=2, + name=layer_name + '_conv', + **self._conv_hyperparams.params())) + layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + self._conv_hyperparams.build_activation_layer( + name=layer_name)) + self._coarse_feature_layers.append(layers) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + feature_block_map = dict( + list(zip(self._resnet_block_names, image_features))) + fpn_input_image_features = [ + (feature_block, feature_block_map[feature_block]) + for feature_block in feature_block_list] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + return feature_maps + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} + + +class SSDResNet50V1FpnKerasFeatureExtractor( + SSDResNetV1FpnKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1-50 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name='ResNet50V1_FPN'): + """SSD Keras based FPN feature extractor ResnetV1-50 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNet50V1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + resnet_v1_base_model=resnet_v1.resnet_v1_50, + resnet_v1_base_model_name='resnet_v1_50', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDResNet101V1FpnKerasFeatureExtractor( + SSDResNetV1FpnKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1-101 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name='ResNet101V1_FPN'): + """SSD Keras based FPN feature extractor ResnetV1-101 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNet101V1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + resnet_v1_base_model=resnet_v1.resnet_v1_101, + resnet_v1_base_model_name='resnet_v1_101', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDResNet152V1FpnKerasFeatureExtractor( + SSDResNetV1FpnKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1-152 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name='ResNet152V1_FPN'): + """SSD Keras based FPN feature extractor ResnetV1-152 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNet152V1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + resnet_v1_base_model=resnet_v1.resnet_v1_152, + resnet_v1_base_model_name='resnet_v1_152', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) diff --git a/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py b/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb34e261e83122a2b59bdca2ed96363f67fd632 --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py @@ -0,0 +1,284 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD feature extractors based on Resnet v1 and PPN architectures.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import resnet_v1 + + +class _SSDResnetPpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD feature extractor based on resnet architecture and PPN.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_base_fn, + resnet_scope_name, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + base_feature_map_depth=1024, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + use_bounded_activations=False): + """Resnet based PPN Feature Extractor for SSD Models. + + See go/pooling-pyramid for more details about PPN. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + resnet_base_fn: base resnet network to use. + resnet_scope_name: scope name to construct resnet + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + base_feature_map_depth: Depth of the base feature before the max pooling. + num_layers: Number of layers used to make predictions. They are pooled + from the base feature. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + use_bounded_activations: Whether or not to use bounded activations for + resnet v1 bottleneck residual unit. Bounded activations better lend + themselves to quantized inference. + """ + super(_SSDResnetPpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams) + self._resnet_base_fn = resnet_base_fn + self._resnet_scope_name = resnet_scope_name + self._base_feature_map_depth = base_feature_map_depth + self._num_layers = num_layers + self._use_bounded_activations = use_bounded_activations + + def _filter_features(self, image_features): + # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead + # of munging the scope here. + filtered_image_features = dict({}) + for key, feature in image_features.items(): + feature_name = key.split('/')[-1] + if feature_name in ['block2', 'block3', 'block4']: + filtered_image_features[feature_name] = feature + return filtered_image_features + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + + Raises: + ValueError: depth multiplier is not supported. + """ + if self._depth_multiplier != 1.0: + raise ValueError('Depth multiplier not supported.') + + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + with tf.variable_scope( + self._resnet_scope_name, reuse=self._reuse_weights) as scope: + with slim.arg_scope(resnet_v1.resnet_arg_scope()): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + with slim.arg_scope( + [resnet_v1.bottleneck], + use_bounded_activations=self._use_bounded_activations): + _, activations = self._resnet_base_fn( + inputs=ops.pad_to_multiple(preprocessed_inputs, + self._pad_to_multiple), + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + store_non_strided_activations=True, + scope=scope) + + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=self._base_feature_map_depth, + num_layers=self._num_layers, + image_features={ + 'image_features': self._filter_features(activations)['block3'] + }) + return list(feature_maps.values()) + + +class SSDResnet50V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): + """PPN Resnet50 v1 Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """Resnet50 v1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet50V1PpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50', + reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) + + +class SSDResnet101V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): + """PPN Resnet101 v1 Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """Resnet101 v1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet101V1PpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101', + reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) + + +class SSDResnet152V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): + """PPN Resnet152 v1 Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """Resnet152 v1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet152V1PpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152', + reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) diff --git a/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py b/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..ba80c6627a0711d54ffdf63a58e35ba6431c9f62 --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py @@ -0,0 +1,82 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 feature extractors.""" +import abc +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test + + +class SSDResnetPpnFeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Helper test class for SSD Resnet PPN feature extractors.""" + + @abc.abstractmethod + def _scope_name(self): + pass + + def test_extract_features_returns_correct_shapes_289(self): + image_height = 289 + image_width = 289 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024), + (2, 5, 5, 1024), (2, 3, 3, 1024), + (2, 2, 2, 1024), (2, 1, 1, 1024)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 289 + image_width = 289 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024), + (2, 5, 5, 1024), (2, 3, 3, 1024), + (2, 2, 2, 1024), (2, 1, 1, 1024)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = tf.constant(np.random.rand(4, image_height, image_width, 3)) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + with self.test_session() as sess: + test_image_out, preprocessed_image_out = sess.run( + [test_image, preprocessed_image]) + self.assertAllClose(preprocessed_image_out, + test_image_out - [[123.68, 116.779, 103.939]]) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, self._scope_name()) diff --git a/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py b/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bb95cb53f3905ef9288ade7600005c1ba9372be5 --- /dev/null +++ b/models/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py @@ -0,0 +1,93 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 feature extractors.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_resnet_v1_ppn_feature_extractor +from object_detection.models import ssd_resnet_v1_ppn_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet50V1PpnFeatureExtractorTest( + ssd_resnet_v1_ppn_feature_extractor_testbase. + SSDResnetPpnFeatureExtractorTestBase): + """SSDResnet50v1 feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False): + min_depth = 32 + is_training = True + return ssd_resnet_v1_ppn_feature_extractor.SSDResnet50V1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding) + + def _scope_name(self): + return 'resnet_v1_50' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet101V1PpnFeatureExtractorTest( + ssd_resnet_v1_ppn_feature_extractor_testbase. + SSDResnetPpnFeatureExtractorTestBase): + """SSDResnet101v1 feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False): + min_depth = 32 + is_training = True + return ( + ssd_resnet_v1_ppn_feature_extractor.SSDResnet101V1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _scope_name(self): + return 'resnet_v1_101' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet152V1PpnFeatureExtractorTest( + ssd_resnet_v1_ppn_feature_extractor_testbase. + SSDResnetPpnFeatureExtractorTestBase): + """SSDResnet152v1 feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False): + min_depth = 32 + is_training = True + return ( + ssd_resnet_v1_ppn_feature_extractor.SSDResnet152V1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _scope_name(self): + return 'resnet_v1_152' + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/__init__.py b/models/research/object_detection/predictors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/predictors/convolutional_box_predictor.py b/models/research/object_detection/predictors/convolutional_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..44b47533091db0a8967f7009cd6095074fe2f202 --- /dev/null +++ b/models/research/object_detection/predictors/convolutional_box_predictor.py @@ -0,0 +1,421 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Convolutional Box Predictors with and without weight sharing.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import functools +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.core import box_predictor +from object_detection.utils import shape_utils +from object_detection.utils import static_shape + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class _NoopVariableScope(object): + """A dummy class that does not push any scope.""" + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +class ConvolutionalBoxPredictor(box_predictor.BoxPredictor): + """Convolutional Box Predictor. + + Optionally add an intermediate 1x1 convolutional layer after features and + predict in parallel branches box_encodings and + class_predictions_with_background. + + Currently this box predictor assumes that predictions are "shared" across + classes --- that is each anchor makes box predictions which do not depend + on class. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + other_heads, + conv_hyperparams_fn, + num_layers_before_predictor, + min_depth, + max_depth): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes. + class_prediction_head: The head that predicts the classes. + other_heads: A dictionary mapping head names to convolutional + head classes. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._other_heads = other_heads + self._conv_hyperparams_fn = conv_hyperparams_fn + self._min_depth = min_depth + self._max_depth = max_depth + self._num_layers_before_predictor = num_layers_before_predictor + + @property + def num_classes(self): + return self._num_classes + + def _predict(self, image_features, num_predictions_per_location_list): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + + Returns: + A dictionary containing: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + (optional) Predictions from other heads. + """ + predictions = { + BOX_ENCODINGS: [], + CLASS_PREDICTIONS_WITH_BACKGROUND: [], + } + for head_name in self._other_heads.keys(): + predictions[head_name] = [] + # TODO(rathodv): Come up with a better way to generate scope names + # in box predictor once we have time to retrain all models in the zoo. + # The following lines create scope names to be backwards compatible with the + # existing checkpoints. + box_predictor_scopes = [_NoopVariableScope()] + if len(image_features) > 1: + box_predictor_scopes = [ + tf.variable_scope('BoxPredictor_{}'.format(i)) + for i in range(len(image_features)) + ] + for (image_feature, + num_predictions_per_location, box_predictor_scope) in zip( + image_features, num_predictions_per_location_list, + box_predictor_scopes): + net = image_feature + with box_predictor_scope: + with slim.arg_scope(self._conv_hyperparams_fn()): + with slim.arg_scope([slim.dropout], is_training=self._is_training): + # Add additional conv layers before the class predictor. + features_depth = static_shape.get_depth(image_feature.get_shape()) + depth = max(min(features_depth, self._max_depth), self._min_depth) + tf.logging.info('depth of additional conv before box predictor: {}'. + format(depth)) + if depth > 0 and self._num_layers_before_predictor > 0: + for i in range(self._num_layers_before_predictor): + net = slim.conv2d( + net, + depth, [1, 1], + reuse=tf.AUTO_REUSE, + scope='Conv2d_%d_1x1_%d' % (i, depth)) + sorted_keys = sorted(self._other_heads.keys()) + sorted_keys.append(BOX_ENCODINGS) + sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) + for head_name in sorted_keys: + if head_name == BOX_ENCODINGS: + head_obj = self._box_prediction_head + elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + head_obj = self._class_prediction_head + else: + head_obj = self._other_heads[head_name] + prediction = head_obj.predict( + features=net, + num_predictions_per_location=num_predictions_per_location) + predictions[head_name].append(prediction) + return predictions + + +# TODO(rathodv): Replace with slim.arg_scope_func_key once its available +# externally. +def _arg_scope_func_key(op): + """Returns a key that can be used to index arg_scope dictionary.""" + return getattr(op, '_key_op', str(op)) + + +# TODO(rathodv): Merge the implementation with ConvolutionalBoxPredictor above +# since they are very similar. +class WeightSharedConvolutionalBoxPredictor(box_predictor.BoxPredictor): + """Convolutional Box Predictor with weight sharing. + + Defines the box predictor as defined in + https://arxiv.org/abs/1708.02002. This class differs from + ConvolutionalBoxPredictor in that it shares weights and biases while + predicting from different feature maps. However, batch_norm parameters are not + shared because the statistics of the activations vary among the different + feature maps. + + Also note that separate multi-layer towers are constructed for the box + encoding and class predictors respectively. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + other_heads, + conv_hyperparams_fn, + depth, + num_layers_before_predictor, + kernel_size=3, + apply_batch_norm=False, + share_prediction_tower=False, + use_depthwise=False): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes. + class_prediction_head: The head that predicts the classes. + other_heads: A dictionary mapping head names to convolutional + head classes. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + kernel_size: Size of final convolution kernel. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + share_prediction_tower: Whether to share the multi-layer tower among box + prediction head, class prediction head and other heads. + use_depthwise: Whether to use depthwise separable conv2d instead of + regular conv2d. + """ + super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training, + num_classes) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._other_heads = other_heads + self._conv_hyperparams_fn = conv_hyperparams_fn + self._depth = depth + self._num_layers_before_predictor = num_layers_before_predictor + self._kernel_size = kernel_size + self._apply_batch_norm = apply_batch_norm + self._share_prediction_tower = share_prediction_tower + self._use_depthwise = use_depthwise + + @property + def num_classes(self): + return self._num_classes + + def _insert_additional_projection_layer(self, image_feature, + inserted_layer_counter, + target_channel): + if inserted_layer_counter < 0: + return image_feature, inserted_layer_counter + image_feature = slim.conv2d( + image_feature, + target_channel, [1, 1], + stride=1, + padding='SAME', + activation_fn=None, + normalizer_fn=(tf.identity if self._apply_batch_norm else None), + scope='ProjectionLayer/conv2d_{}'.format( + inserted_layer_counter)) + if self._apply_batch_norm: + image_feature = slim.batch_norm( + image_feature, + scope='ProjectionLayer/conv2d_{}/BatchNorm'.format( + inserted_layer_counter)) + inserted_layer_counter += 1 + return image_feature, inserted_layer_counter + + def _compute_base_tower(self, tower_name_scope, image_feature, feature_index): + net = image_feature + for i in range(self._num_layers_before_predictor): + if self._use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + net = conv_op( + net, + self._depth, [self._kernel_size, self._kernel_size], + stride=1, + padding='SAME', + activation_fn=None, + normalizer_fn=(tf.identity if self._apply_batch_norm else None), + scope='{}/conv2d_{}'.format(tower_name_scope, i)) + if self._apply_batch_norm: + net = slim.batch_norm( + net, + scope='{}/conv2d_{}/BatchNorm/feature_{}'. + format(tower_name_scope, i, feature_index)) + net = tf.nn.relu6(net) + return net + + def _predict_head(self, head_name, head_obj, image_feature, box_tower_feature, + feature_index, num_predictions_per_location): + if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + tower_name_scope = 'ClassPredictionTower' + else: + tower_name_scope = head_name + 'PredictionTower' + if self._share_prediction_tower: + head_tower_feature = box_tower_feature + else: + head_tower_feature = self._compute_base_tower( + tower_name_scope=tower_name_scope, + image_feature=image_feature, + feature_index=feature_index) + return head_obj.predict( + features=head_tower_feature, + num_predictions_per_location=num_predictions_per_location) + + def _predict(self, image_features, num_predictions_per_location_list): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels] containing features for a batch of images. Note that + when not all tensors in the list have the same number of channels, an + additional projection layer will be added on top the tensor to generate + feature map with number of channels consitent with the majority. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. Note that all values must be the same since the weights are + shared. + + Returns: + A dictionary containing: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, code_size] representing the location of + the objects. Each entry in the list corresponds to a feature map in + the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + (optional) Predictions from other heads. + E.g., mask_predictions: A list of float tensors of shape + [batch_size, num_anchord_i, num_classes, mask_height, mask_width]. + + + Raises: + ValueError: If the num predictions per locations differs between the + feature maps. + """ + if len(set(num_predictions_per_location_list)) > 1: + raise ValueError('num predictions per location must be same for all' + 'feature maps, found: {}'.format( + num_predictions_per_location_list)) + feature_channels = [ + shape_utils.get_dim_as_int(image_feature.shape[3]) + for image_feature in image_features + ] + has_different_feature_channels = len(set(feature_channels)) > 1 + if has_different_feature_channels: + inserted_layer_counter = 0 + target_channel = max(set(feature_channels), key=feature_channels.count) + tf.logging.info('Not all feature maps have the same number of ' + 'channels, found: {}, appending additional projection ' + 'layers to bring all feature maps to uniformly have {} ' + 'channels.'.format(feature_channels, target_channel)) + else: + # Place holder variables if has_different_feature_channels is False. + target_channel = -1 + inserted_layer_counter = -1 + predictions = { + BOX_ENCODINGS: [], + CLASS_PREDICTIONS_WITH_BACKGROUND: [], + } + for head_name in self._other_heads.keys(): + predictions[head_name] = [] + for feature_index, (image_feature, + num_predictions_per_location) in enumerate( + zip(image_features, + num_predictions_per_location_list)): + with tf.variable_scope('WeightSharedConvolutionalBoxPredictor', + reuse=tf.AUTO_REUSE): + with slim.arg_scope(self._conv_hyperparams_fn()): + # TODO(wangjiang) Pass is_training to the head class directly. + with slim.arg_scope([slim.dropout], is_training=self._is_training): + (image_feature, + inserted_layer_counter) = self._insert_additional_projection_layer( + image_feature, inserted_layer_counter, target_channel) + if self._share_prediction_tower: + box_tower_scope = 'PredictionTower' + else: + box_tower_scope = 'BoxPredictionTower' + box_tower_feature = self._compute_base_tower( + tower_name_scope=box_tower_scope, + image_feature=image_feature, + feature_index=feature_index) + box_encodings = self._box_prediction_head.predict( + features=box_tower_feature, + num_predictions_per_location=num_predictions_per_location) + predictions[BOX_ENCODINGS].append(box_encodings) + sorted_keys = sorted(self._other_heads.keys()) + sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) + for head_name in sorted_keys: + if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + head_obj = self._class_prediction_head + else: + head_obj = self._other_heads[head_name] + prediction = self._predict_head( + head_name=head_name, + head_obj=head_obj, + image_feature=image_feature, + box_tower_feature=box_tower_feature, + feature_index=feature_index, + num_predictions_per_location=num_predictions_per_location) + predictions[head_name].append(prediction) + return predictions + + diff --git a/models/research/object_detection/predictors/convolutional_box_predictor_tf1_test.py b/models/research/object_detection/predictors/convolutional_box_predictor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3236615dfb60bc848ec271fc5173b9c4169feb93 --- /dev/null +++ b/models/research/object_detection/predictors/convolutional_box_predictor_tf1_test.py @@ -0,0 +1,932 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.convolutional_box_predictor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +from absl.testing import parameterized +import numpy as np +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import convolutional_box_predictor as box_predictor +from object_detection.predictors.heads import box_head +from object_detection.predictors.heads import class_head +from object_detection.predictors.heads import mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def test_get_boxes_for_five_aspect_ratios_per_location(self): + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_get_boxes_for_one_aspect_ratio_per_location(self): + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[1], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + num_classes_without_background = 6 + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], + num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_predictions_with_feature_maps_of_dynamic_shape( + self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + init_op = tf.global_variables_initializer() + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + with self.test_session() as sess: + sess.run(init_op) + (box_encodings_shape, + objectness_predictions_shape) = sess.run( + [tf.shape(box_encodings), tf.shape(objectness_predictions)], + feed_dict={image_features: + np.random.rand(4, resolution, resolution, 64)}) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions_shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/Conv2d_0_1x1_32/biases', + 'BoxPredictor/Conv2d_0_1x1_32/weights', + 'BoxPredictor/BoxEncodingPredictor/biases', + 'BoxPredictor/BoxEncodingPredictor/weights', + 'BoxPredictor/ClassPredictor/biases', + 'BoxPredictor/ClassPredictor/weights']) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_use_depthwise_convolution(self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + dropout_keep_prob=0.8, + kernel_size=3, + box_code_size=4, + use_dropout=True, + use_depthwise=True)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + init_op = tf.global_variables_initializer() + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + with self.test_session() as sess: + sess.run(init_op) + (box_encodings_shape, + objectness_predictions_shape) = sess.run( + [tf.shape(box_encodings), tf.shape(objectness_predictions)], + feed_dict={image_features: + np.random.rand(4, resolution, resolution, 64)}) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions_shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/Conv2d_0_1x1_32/biases', + 'BoxPredictor/Conv2d_0_1x1_32/weights', + 'BoxPredictor/BoxEncodingPredictor_depthwise/biases', + 'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights', + 'BoxPredictor/BoxEncodingPredictor/biases', + 'BoxPredictor/BoxEncodingPredictor/weights', + 'BoxPredictor/ClassPredictor_depthwise/biases', + 'BoxPredictor/ClassPredictor_depthwise/depthwise_weights', + 'BoxPredictor/ClassPredictor/biases', + 'BoxPredictor/ClassPredictor/weights']) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_no_dangling_outputs(self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + dropout_keep_prob=0.8, + kernel_size=3, + box_code_size=4, + use_dropout=True, + use_depthwise=True)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + + bad_dangling_ops = [] + types_safe_to_dangle = set(['Assign', 'Mul', 'Const']) + for op in tf.get_default_graph().get_operations(): + if (not op.outputs) or (not op.outputs[0].consumers()): + if 'BoxPredictor' in op.name: + if op.type not in types_safe_to_dangle: + bad_dangling_ops.append(op) + + self.assertEqual(bad_dangling_ops, []) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + train: true, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def _build_conv_arg_scope_no_batch_norm(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def test_get_boxes_for_five_aspect_ratios_per_location(self): + + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): + + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=True, + num_classes=2, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + class_prediction_bias_init=-4.6, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + class_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (tf.nn.sigmoid(class_predictions),) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + class_predictions = self.execute(graph_fn, [image_features]) + self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + + num_classes_without_background = 6 + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], + num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_two_feature_maps( + self): + + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2]) + self.assertAllEqual(box_encodings.shape, [4, 640, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 640, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_feature_maps_of_different_depth( + self): + + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2, image_features3): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2, image_features3], + num_predictions_per_location=[5, 5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2, image_features3]) + self.assertAllEqual(box_encodings.shape, [4, 960, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 960, num_classes_without_background+1]) + + def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_with_depthwise( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False, + use_depthwise=True)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_conv_arg_scope_no_batch_norm(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_separate_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Shared prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_without_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True, + apply_batch_norm=False)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Shared prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_get_predictions_with_feature_maps_of_dynamic_shape( + self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], + axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + init_op = tf.global_variables_initializer() + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + with self.test_session() as sess: + sess.run(init_op) + (box_encodings_shape, + objectness_predictions_shape) = sess.run( + [tf.shape(box_encodings), tf.shape(objectness_predictions)], + feed_dict={image_features: + np.random.rand(4, resolution, resolution, 64)}) + self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 4]) + self.assertAllEqual(objectness_predictions_shape, + [4, expected_num_anchors, 1]) + + def test_other_heads_predictions(self): + box_code_size = 4 + num_classes_without_background = 3 + other_head_name = 'Mask' + mask_height = 5 + mask_width = 5 + num_predictions_per_location = 5 + + def graph_fn(image_features): + box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( + box_code_size) + class_prediction_head = class_head.WeightSharedConvolutionalClassHead( + num_classes_without_background + 1) + other_heads = { + other_head_name: + mask_head.WeightSharedConvolutionalMaskHead( + num_classes_without_background, + mask_height=mask_height, + mask_width=mask_width) + } + conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=False, + num_classes=num_classes_without_background, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2) + box_predictions = conv_box_predictor.predict( + [image_features], + num_predictions_per_location=[num_predictions_per_location], + scope='BoxPredictor') + for key, value in box_predictions.items(): + box_predictions[key] = tf.concat(value, axis=1) + assert len(box_predictions) == 3 + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + box_predictions[other_head_name]) + + batch_size = 4 + feature_ht = 8 + feature_wt = 8 + image_features = np.random.rand(batch_size, feature_ht, feature_wt, + 64).astype(np.float32) + (box_encodings, class_predictions, other_head_predictions) = self.execute( + graph_fn, [image_features]) + num_anchors = feature_ht * feature_wt * num_predictions_per_location + self.assertAllEqual(box_encodings.shape, + [batch_size, num_anchors, box_code_size]) + self.assertAllEqual( + class_predictions.shape, + [batch_size, num_anchors, num_classes_without_background + 1]) + self.assertAllEqual(other_head_predictions.shape, [ + batch_size, num_anchors, num_classes_without_background, mask_height, + mask_width + ]) + + + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/convolutional_keras_box_predictor.py b/models/research/object_detection/predictors/convolutional_keras_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..630c680398baa4a60c945a0bd2d874ea0f8c1783 --- /dev/null +++ b/models/research/object_detection/predictors/convolutional_keras_box_predictor.py @@ -0,0 +1,482 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Convolutional Box Predictors with and without weight sharing.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.core import box_predictor +from object_detection.utils import shape_utils +from object_detection.utils import static_shape + +keras = tf.keras.layers + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class _NoopVariableScope(object): + """A dummy class that does not push any scope.""" + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): + """Convolutional Keras Box Predictor. + + Optionally add an intermediate 1x1 convolutional layer after features and + predict in parallel branches box_encodings and + class_predictions_with_background. + + Currently this box predictor assumes that predictions are "shared" across + classes --- that is each anchor makes box predictions which do not depend + on class. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_heads, + class_prediction_heads, + other_heads, + conv_hyperparams, + num_layers_before_predictor, + min_depth, + max_depth, + freeze_batchnorm, + inplace_batchnorm_update, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_heads: A list of heads that predict the boxes. + class_prediction_heads: A list of heads that predict the classes. + other_heads: A dictionary mapping head names to lists of convolutional + heads. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + name=name) + if min_depth > max_depth: + raise ValueError('min_depth should be less than or equal to max_depth') + if len(box_prediction_heads) != len(class_prediction_heads): + raise ValueError('All lists of heads must be the same length.') + for other_head_list in other_heads.values(): + if len(box_prediction_heads) != len(other_head_list): + raise ValueError('All lists of heads must be the same length.') + + self._prediction_heads = { + BOX_ENCODINGS: box_prediction_heads, + CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads, + } + + if other_heads: + self._prediction_heads.update(other_heads) + + # We generate a consistent ordering for the prediction head names, + # So that all workers build the model in the exact same order + self._sorted_head_names = sorted(self._prediction_heads.keys()) + + self._conv_hyperparams = conv_hyperparams + self._min_depth = min_depth + self._max_depth = max_depth + self._num_layers_before_predictor = num_layers_before_predictor + + self._shared_nets = [] + + def build(self, input_shapes): + """Creates the variables of the layer.""" + if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]): + raise ValueError('This box predictor was constructed with %d heads,' + 'but there are %d inputs.' % + (len(self._prediction_heads[BOX_ENCODINGS]), + len(input_shapes))) + for stack_index, input_shape in enumerate(input_shapes): + net = [] + + # Add additional conv layers before the class predictor. + features_depth = static_shape.get_depth(input_shape) + depth = max(min(features_depth, self._max_depth), self._min_depth) + tf.logging.info( + 'depth of additional conv before box predictor: {}'.format(depth)) + + if depth > 0 and self._num_layers_before_predictor > 0: + for i in range(self._num_layers_before_predictor): + net.append(keras.Conv2D(depth, [1, 1], + name='SharedConvolutions_%d/Conv2d_%d_1x1_%d' + % (stack_index, i, depth), + padding='SAME', + **self._conv_hyperparams.params())) + net.append(self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm' + % (stack_index, i, depth))) + net.append(self._conv_hyperparams.build_activation_layer( + name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation' + % (stack_index, i, depth), + )) + # Until certain bugs are fixed in checkpointable lists, + # this net must be appended only once it's been filled with layers + self._shared_nets.append(net) + self.built = True + + def _predict(self, image_features, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Unused Keyword args + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + predictions = collections.defaultdict(list) + + for (index, net) in enumerate(image_features): + + # Apply shared conv layers before the head predictors. + for layer in self._shared_nets[index]: + net = layer(net) + + for head_name in self._sorted_head_names: + head_obj = self._prediction_heads[head_name][index] + prediction = head_obj(net) + predictions[head_name].append(prediction) + + return predictions + + +class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): + """Convolutional Box Predictor with weight sharing based on Keras. + + Defines the box predictor as defined in + https://arxiv.org/abs/1708.02002. This class differs from + ConvolutionalBoxPredictor in that it shares weights and biases while + predicting from different feature maps. However, batch_norm parameters are not + shared because the statistics of the activations vary among the different + feature maps. + + Also note that separate multi-layer towers are constructed for the box + encoding and class predictors respectively. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + other_heads, + conv_hyperparams, + depth, + num_layers_before_predictor, + freeze_batchnorm, + inplace_batchnorm_update, + kernel_size=3, + apply_batch_norm=False, + share_prediction_tower=False, + use_depthwise=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes. + class_prediction_head: The head that predicts the classes. + other_heads: A dictionary mapping head names to convolutional + head classes. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + kernel_size: Size of final convolution kernel. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + share_prediction_tower: Whether to share the multi-layer tower among box + prediction head, class prediction head and other heads. + use_depthwise: Whether to use depthwise separable conv2d instead of + regular conv2d. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + """ + super(WeightSharedConvolutionalBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + name=name) + + self._box_prediction_head = box_prediction_head + self._prediction_heads = { + CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_head, + } + if other_heads: + self._prediction_heads.update(other_heads) + # We generate a consistent ordering for the prediction head names, + # so that all workers build the model in the exact same order. + self._sorted_head_names = sorted(self._prediction_heads.keys()) + + self._conv_hyperparams = conv_hyperparams + self._depth = depth + self._num_layers_before_predictor = num_layers_before_predictor + self._kernel_size = kernel_size + self._apply_batch_norm = apply_batch_norm + self._share_prediction_tower = share_prediction_tower + self._use_depthwise = use_depthwise + + # Additional projection layers to bring all feature maps to uniform + # channels. + self._additional_projection_layers = [] + # The base tower layers for each head. + self._base_tower_layers_for_heads = { + BOX_ENCODINGS: [], + CLASS_PREDICTIONS_WITH_BACKGROUND: [], + } + for head_name in other_heads.keys(): + self._base_tower_layers_for_heads[head_name] = [] + + # A dict maps the tower_name_scope of each head to the shared conv layers in + # the base tower for different feature map levels. + self._head_scope_conv_layers = {} + + def _insert_additional_projection_layer( + self, inserted_layer_counter, target_channel): + projection_layers = [] + if inserted_layer_counter >= 0: + use_bias = False if self._apply_batch_norm else True + projection_layers.append(keras.Conv2D( + target_channel, [1, 1], strides=1, padding='SAME', + name='ProjectionLayer/conv2d_{}'.format(inserted_layer_counter), + **self._conv_hyperparams.params(use_bias=use_bias))) + if self._apply_batch_norm: + projection_layers.append(self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='ProjectionLayer/conv2d_{}/BatchNorm'.format( + inserted_layer_counter))) + inserted_layer_counter += 1 + return inserted_layer_counter, projection_layers + + def _compute_base_tower(self, tower_name_scope, feature_index): + conv_layers = [] + batch_norm_layers = [] + activation_layers = [] + use_bias = False if self._apply_batch_norm else True + for additional_conv_layer_idx in range(self._num_layers_before_predictor): + layer_name = '{}/conv2d_{}'.format( + tower_name_scope, additional_conv_layer_idx) + if tower_name_scope not in self._head_scope_conv_layers: + if self._use_depthwise: + kwargs = self._conv_hyperparams.params(use_bias=use_bias) + # Both the regularizer and initializer apply to the depthwise layer, + # so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + conv_layers.append( + tf.keras.layers.SeparableConv2D( + self._depth, [self._kernel_size, self._kernel_size], + padding='SAME', + name=layer_name, + **kwargs)) + else: + conv_layers.append( + tf.keras.layers.Conv2D( + self._depth, + [self._kernel_size, self._kernel_size], + padding='SAME', + name=layer_name, + **self._conv_hyperparams.params(use_bias=use_bias))) + # Each feature gets a separate batchnorm parameter even though they share + # the same convolution weights. + if self._apply_batch_norm: + batch_norm_layers.append(self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='{}/conv2d_{}/BatchNorm/feature_{}'.format( + tower_name_scope, additional_conv_layer_idx, feature_index))) + activation_layers.append(tf.keras.layers.Lambda(tf.nn.relu6)) + + # Set conv layers as the shared conv layers for different feature maps with + # the same tower_name_scope. + if tower_name_scope in self._head_scope_conv_layers: + conv_layers = self._head_scope_conv_layers[tower_name_scope] + + # Stack the base_tower_layers in the order of conv_layer, batch_norm_layer + # and activation_layer + base_tower_layers = [] + for i in range(self._num_layers_before_predictor): + base_tower_layers.extend([conv_layers[i]]) + if self._apply_batch_norm: + base_tower_layers.extend([batch_norm_layers[i]]) + base_tower_layers.extend([activation_layers[i]]) + return conv_layers, base_tower_layers + + def build(self, input_shapes): + """Creates the variables of the layer.""" + feature_channels = [ + shape_utils.get_dim_as_int(input_shape[3]) + for input_shape in input_shapes + ] + has_different_feature_channels = len(set(feature_channels)) > 1 + if has_different_feature_channels: + inserted_layer_counter = 0 + target_channel = max(set(feature_channels), key=feature_channels.count) + tf.logging.info('Not all feature maps have the same number of ' + 'channels, found: {}, appending additional projection ' + 'layers to bring all feature maps to uniformly have {} ' + 'channels.'.format(feature_channels, target_channel)) + else: + # Place holder variables if has_different_feature_channels is False. + target_channel = -1 + inserted_layer_counter = -1 + + def _build_layers(tower_name_scope, feature_index): + conv_layers, base_tower_layers = self._compute_base_tower( + tower_name_scope=tower_name_scope, feature_index=feature_index) + if tower_name_scope not in self._head_scope_conv_layers: + self._head_scope_conv_layers[tower_name_scope] = conv_layers + return base_tower_layers + + for feature_index in range(len(input_shapes)): + # Additional projection layers should not be shared as input channels + # (and thus weight shapes) are different + inserted_layer_counter, projection_layers = ( + self._insert_additional_projection_layer( + inserted_layer_counter, target_channel)) + self._additional_projection_layers.append(projection_layers) + + if self._share_prediction_tower: + box_tower_scope = 'PredictionTower' + else: + box_tower_scope = 'BoxPredictionTower' + # For box tower base + box_tower_layers = _build_layers(box_tower_scope, feature_index) + self._base_tower_layers_for_heads[BOX_ENCODINGS].append(box_tower_layers) + + for head_name in self._sorted_head_names: + if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + tower_name_scope = 'ClassPredictionTower' + else: + tower_name_scope = '{}PredictionTower'.format(head_name) + box_tower_layers = _build_layers(tower_name_scope, feature_index) + self._base_tower_layers_for_heads[head_name].append(box_tower_layers) + + self.built = True + + def _predict(self, image_features, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Unused Keyword args + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + predictions = collections.defaultdict(list) + + def _apply_layers(base_tower_layers, image_feature): + for layer in base_tower_layers: + image_feature = layer(image_feature) + return image_feature + + for (index, image_feature) in enumerate(image_features): + # Apply additional projection layers to image features + for layer in self._additional_projection_layers[index]: + image_feature = layer(image_feature) + + # Apply box tower layers. + box_tower_feature = _apply_layers( + self._base_tower_layers_for_heads[BOX_ENCODINGS][index], + image_feature) + box_encodings = self._box_prediction_head(box_tower_feature) + predictions[BOX_ENCODINGS].append(box_encodings) + + for head_name in self._sorted_head_names: + head_obj = self._prediction_heads[head_name] + if self._share_prediction_tower: + head_tower_feature = box_tower_feature + else: + head_tower_feature = _apply_layers( + self._base_tower_layers_for_heads[head_name][index], + image_feature) + prediction = head_obj(head_tower_feature) + predictions[head_name].append(prediction) + return predictions diff --git a/models/research/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py b/models/research/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..180a6e94643a80ac04ee12dfacb5bc6d04e09ec8 --- /dev/null +++ b/models/research/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py @@ -0,0 +1,952 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.convolutional_keras_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import convolutional_keras_box_predictor as box_predictor +from object_detection.predictors.heads import keras_box_head +from object_detection.predictors.heads import keras_class_head +from object_detection.predictors.heads import keras_mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalKerasBoxPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_get_boxes_for_five_aspect_ratios_per_location(self): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_get_boxes_for_one_aspect_ratio_per_location(self): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[1], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + num_classes_without_background = 6 + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_predictions_with_feature_maps_of_dynamic_shape( + self): + tf.keras.backend.clear_session() + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + variables = [] + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return box_encodings, objectness_predictions + resolution = 32 + expected_num_anchors = resolution*resolution*5 + box_encodings, objectness_predictions = self.execute( + graph_fn, [np.random.rand(4, resolution, resolution, 64)]) + + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) + self.assertEqual(expected_variable_set, actual_variable_set) + self.assertEqual(conv_box_predictor._sorted_head_names, + ['box_encodings', 'class_predictions_with_background']) + + def test_use_depthwise_convolution(self): + tf.keras.backend.clear_session() + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=3, + box_code_size=4, + use_depthwise=True + )) + variables = [] + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return box_encodings, objectness_predictions + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + box_encodings, objectness_predictions = self.execute( + graph_fn, [np.random.rand(4, resolution, resolution, 64)]) + + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', + + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/' + 'bias', + + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/' + 'depthwise_kernel', + + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/bias', + + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/' + 'depthwise_kernel', + + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) + self.assertEqual(expected_variable_set, actual_variable_set) + self.assertEqual(conv_box_predictor._sorted_head_names, + ['box_encodings', 'class_predictions_with_background']) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalKerasBoxPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self, add_batch_norm=True): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + """ + if add_batch_norm: + batch_norm_proto = """ + batch_norm { + train: true, + } + """ + conv_hyperparams_text_proto += batch_norm_proto + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + # pylint: disable=line-too-long + def test_get_boxes_for_five_aspect_ratios_per_location(self): + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=True, + num_classes=2, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + depth=32, + num_layers_before_predictor=1, + class_prediction_bias_init=-4.6, + box_code_size=4)) + + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + class_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (tf.nn.sigmoid(class_predictions),) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + class_predictions = self.execute(graph_fn, [image_features]) + self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_two_feature_maps( + self): + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2]) + self.assertAllEqual(box_encodings.shape, [4, 640, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 640, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_feature_maps_of_different_depth( + self): + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5, 5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features1, image_features2, image_features3): + box_predictions = conv_box_predictor( + [image_features1, image_features2, image_features3]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2, image_features3]) + self.assertAllEqual(box_encodings.shape, [4, 960, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 960, num_classes_without_background+1]) + + def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_with_depthwise( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False, + use_depthwise=True)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor( + [image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_separate_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor( + [image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Shared prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_without_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True, + apply_batch_norm=False)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor( + [image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Shared prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_other_heads_predictions(self): + box_code_size = 4 + num_classes_without_background = 3 + other_head_name = 'Mask' + mask_height = 5 + mask_width = 5 + num_predictions_per_location = 5 + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=box_code_size, + conv_hyperparams=self._build_conv_hyperparams(), + num_predictions_per_location=num_predictions_per_location) + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=num_classes_without_background + 1, + conv_hyperparams=self._build_conv_hyperparams(), + num_predictions_per_location=num_predictions_per_location) + other_heads = { + other_head_name: + keras_mask_head.WeightSharedConvolutionalMaskHead( + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + num_predictions_per_location=num_predictions_per_location, + mask_height=mask_height, + mask_width=mask_width) + } + + conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=False, + num_classes=num_classes_without_background, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + depth=32, + num_layers_before_predictor=2) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + for key, value in box_predictions.items(): + box_predictions[key] = tf.concat(value, axis=1) + assert len(box_predictions) == 3 + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + box_predictions[other_head_name]) + + batch_size = 4 + feature_ht = 8 + feature_wt = 8 + image_features = np.random.rand(batch_size, feature_ht, feature_wt, + 64).astype(np.float32) + (box_encodings, class_predictions, other_head_predictions) = self.execute( + graph_fn, [image_features]) + num_anchors = feature_ht * feature_wt * num_predictions_per_location + self.assertAllEqual(box_encodings.shape, + [batch_size, num_anchors, box_code_size]) + self.assertAllEqual( + class_predictions.shape, + [batch_size, num_anchors, num_classes_without_background + 1]) + self.assertAllEqual(other_head_predictions.shape, [ + batch_size, num_anchors, num_classes_without_background, mask_height, + mask_width + ]) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/__init__.py b/models/research/object_detection/predictors/heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/predictors/heads/box_head.py b/models/research/object_detection/predictors/heads/box_head.py new file mode 100644 index 0000000000000000000000000000000000000000..6535e9b28192b05d15a202ce8b9bfef20f63ce83 --- /dev/null +++ b/models/research/object_detection/predictors/heads/box_head.py @@ -0,0 +1,281 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Box Head. + +Contains Box prediction head classes for different meta architectures. +All the box prediction heads have a predict function that receives the +`features` as the first argument and returns `box_encodings`. +""" +import functools +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head + + +class MaskRCNNBoxHead(head.Head): + """Box prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_classes, + fc_hyperparams_fn, + use_dropout, + dropout_keep_prob, + box_code_size, + share_box_across_classes=False): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for fully connected ops. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + """ + super(MaskRCNNBoxHead, self).__init__() + self._is_training = is_training + self._num_classes = num_classes + self._fc_hyperparams_fn = fc_hyperparams_fn + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._box_code_size = box_code_size + self._share_box_across_classes = share_box_across_classes + + def predict(self, features, num_predictions_per_location=1): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, + channels] containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + box_encodings: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the location of the + objects. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + flattened_roi_pooled_features = slim.flatten( + spatial_averaged_roi_pooled_features) + if self._use_dropout: + flattened_roi_pooled_features = slim.dropout( + flattened_roi_pooled_features, + keep_prob=self._dropout_keep_prob, + is_training=self._is_training) + number_of_boxes = 1 + if not self._share_box_across_classes: + number_of_boxes = self._num_classes + + with slim.arg_scope(self._fc_hyperparams_fn()): + box_encodings = slim.fully_connected( + flattened_roi_pooled_features, + number_of_boxes * self._box_code_size, + reuse=tf.AUTO_REUSE, + activation_fn=None, + scope='BoxEncodingPredictor') + box_encodings = tf.reshape(box_encodings, + [-1, 1, number_of_boxes, self._box_code_size]) + return box_encodings + + +class ConvolutionalBoxHead(head.Head): + """Convolutional box prediction head.""" + + def __init__(self, + is_training, + box_code_size, + kernel_size, + use_depthwise=False, + box_encodings_clip_range=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalBoxHead, self).__init__() + self._is_training = is_training + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + + Returns: + box_encodings: A float tensors of shape + [batch_size, num_anchors, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. + """ + net = features + if self._use_depthwise: + box_encodings = slim.separable_conv2d( + net, None, [self._kernel_size, self._kernel_size], + padding='SAME', depth_multiplier=1, stride=1, + rate=1, scope='BoxEncodingPredictor_depthwise') + box_encodings = slim.conv2d( + box_encodings, + num_predictions_per_location * self._box_code_size, [1, 1], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='BoxEncodingPredictor') + else: + box_encodings = slim.conv2d( + net, num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='BoxEncodingPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, 1, self._box_code_size]) + return box_encodings + + +# TODO(alirezafathi): See if possible to unify Weight Shared with regular +# convolutional box head. +class WeightSharedConvolutionalBoxHead(head.Head): + """Weight shared convolutional box prediction head. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + box_code_size, + kernel_size=3, + use_depthwise=False, + box_encodings_clip_range=None, + return_flat_predictions=True): + """Constructor. + + Args: + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. + use_depthwise: Whether to use depthwise convolutions for prediction steps. + Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalBoxHead, self).__init__() + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + self._return_flat_predictions = return_flat_predictions + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + box_encodings: A float tensor of shape + [batch_size, num_anchors, code_size] representing the location of + the objects, or a float tensor of shape [batch, height, width, + num_predictions_per_location * box_code_size] representing grid box + location predictions if self._return_flat_predictions is False. + """ + box_encodings_net = features + if self._use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + box_encodings = conv_op( + box_encodings_net, + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + activation_fn=None, stride=1, padding='SAME', + normalizer_fn=None, + scope='BoxPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + if self._return_flat_predictions: + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, self._box_code_size]) + return box_encodings diff --git a/models/research/object_detection/predictors/heads/box_head_tf1_test.py b/models/research/object_detection/predictors/heads/box_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ab534a2bd029abed5f39e232d023a27dd2e9a361 --- /dev/null +++ b/models/research/object_detection/predictors/heads/box_head_tf1_test.py @@ -0,0 +1,132 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.box_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import box_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNBoxHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + box_prediction_head = box_head.MaskRCNNBoxHead( + is_training=False, + num_classes=20, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=True, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=False) + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = box_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 20, 4], prediction.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + box_prediction_head = box_head.ConvolutionalBoxHead( + is_training=True, + box_code_size=4, + kernel_size=3) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 4], box_encodings.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/class_head.py b/models/research/object_detection/predictors/heads/class_head.py new file mode 100644 index 0000000000000000000000000000000000000000..604859313de84a783953e67dbe47e301a740cb96 --- /dev/null +++ b/models/research/object_detection/predictors/heads/class_head.py @@ -0,0 +1,315 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class Head. + +Contains Class prediction head classes for different meta architectures. +All the class prediction heads have a predict function that receives the +`features` as the first argument and returns class predictions with background. +""" +import functools +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head + + +class MaskRCNNClassHead(head.Head): + """Mask RCNN class prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_class_slots, + fc_hyperparams_fn, + use_dropout, + dropout_keep_prob, + scope='ClassPredictor'): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + fc_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for fully connected ops. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + scope: Scope name for the convolution operation. + """ + super(MaskRCNNClassHead, self).__init__() + self._is_training = is_training + self._num_class_slots = num_class_slots + self._fc_hyperparams_fn = fc_hyperparams_fn + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._scope = scope + + def predict(self, features, num_predictions_per_location=1): + """Predicts boxes and class scores. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, 1, num_class_slots] representing the class predictions for + the proposals. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + flattened_roi_pooled_features = slim.flatten( + spatial_averaged_roi_pooled_features) + if self._use_dropout: + flattened_roi_pooled_features = slim.dropout( + flattened_roi_pooled_features, + keep_prob=self._dropout_keep_prob, + is_training=self._is_training) + + with slim.arg_scope(self._fc_hyperparams_fn()): + class_predictions_with_background = slim.fully_connected( + flattened_roi_pooled_features, + self._num_class_slots, + reuse=tf.AUTO_REUSE, + activation_fn=None, + scope=self._scope) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [-1, 1, self._num_class_slots]) + return class_predictions_with_background + + +class ConvolutionalClassHead(head.Head): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_class_slots, + use_dropout, + dropout_keep_prob, + kernel_size, + apply_sigmoid_to_scores=False, + class_prediction_bias_init=0.0, + use_depthwise=False, + scope='ClassPredictor'): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + apply_sigmoid_to_scores: if True, apply the sigmoid on the output + class_predictions. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + scope: Scope name for the convolution operation. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalClassHead, self).__init__() + self._is_training = is_training + self._num_class_slots = num_class_slots + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._apply_sigmoid_to_scores = apply_sigmoid_to_scores + self._class_prediction_bias_init = class_prediction_bias_init + self._use_depthwise = use_depthwise + self._scope = scope + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + class_predictions_with_background: A float tensors of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals. + """ + net = features + if self._use_dropout: + net = slim.dropout(net, keep_prob=self._dropout_keep_prob) + if self._use_depthwise: + depthwise_scope = self._scope + '_depthwise' + class_predictions_with_background = slim.separable_conv2d( + net, None, [self._kernel_size, self._kernel_size], + padding='SAME', depth_multiplier=1, stride=1, + rate=1, scope=depthwise_scope) + class_predictions_with_background = slim.conv2d( + class_predictions_with_background, + num_predictions_per_location * self._num_class_slots, [1, 1], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope=self._scope) + else: + class_predictions_with_background = slim.conv2d( + net, + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope=self._scope, + biases_initializer=tf.constant_initializer( + self._class_prediction_bias_init)) + if self._apply_sigmoid_to_scores: + class_predictions_with_background = tf.sigmoid( + class_predictions_with_background) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background + + +# TODO(alirezafathi): See if possible to unify Weight Shared with regular +# convolutional class head. +class WeightSharedConvolutionalClassHead(head.Head): + """Weight shared convolutional class prediction head. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + num_class_slots, + kernel_size=3, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + use_depthwise=False, + score_converter_fn=tf.identity, + return_flat_predictions=True, + scope='ClassPredictor'): + """Constructor. + + Args: + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + kernel_size: Size of final convolution kernel. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + score_converter_fn: Callable elementwise nonlinearity (that takes tensors + as inputs and returns tensors). + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + scope: Scope name for the convolution operation. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalClassHead, self).__init__() + self._num_class_slots = num_class_slots + self._kernel_size = kernel_size + self._class_prediction_bias_init = class_prediction_bias_init + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._use_depthwise = use_depthwise + self._score_converter_fn = score_converter_fn + self._return_flat_predictions = return_flat_predictions + self._scope = scope + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + class_predictions_with_background: A tensor of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals, or a tensor of shape [batch, height, + width, num_predictions_per_location * num_class_slots] representing + class predictions before reshaping if self._return_flat_predictions is + False. + """ + class_predictions_net = features + if self._use_dropout: + class_predictions_net = slim.dropout( + class_predictions_net, keep_prob=self._dropout_keep_prob) + if self._use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + class_predictions_with_background = conv_op( + class_predictions_net, + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + activation_fn=None, stride=1, padding='SAME', + normalizer_fn=None, + biases_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + scope=self._scope) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = self._score_converter_fn( + class_predictions_with_background) + if self._return_flat_predictions: + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background diff --git a/models/research/object_detection/predictors/heads/class_head_tf1_test.py b/models/research/object_detection/predictors/heads/class_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc8fb120cb9a4c19ff2d595d31dc3645f6e06d0 --- /dev/null +++ b/models/research/object_detection/predictors/heads/class_head_tf1_test.py @@ -0,0 +1,199 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.class_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import class_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNClassHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + class_prediction_head = class_head.MaskRCNNClassHead( + is_training=False, + num_class_slots=20, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=True, + dropout_keep_prob=0.5) + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = class_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list()) + + def test_scope_name(self): + expected_var_names = set([ + """ClassPredictor/weights""", + """ClassPredictor/biases""" + ]) + + g = tf.Graph() + with g.as_default(): + class_prediction_head = class_head.MaskRCNNClassHead( + is_training=True, + num_class_slots=20, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=True, + dropout_keep_prob=0.5) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + self.assertSetEqual(expected_var_names, actual_variable_set) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalClassPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + class_prediction_head = class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20], + class_predictions.get_shape().as_list()) + + def test_scope_name(self): + expected_var_names = set([ + """ClassPredictor/weights""", + """ClassPredictor/biases""" + ]) + g = tf.Graph() + with g.as_default(): + class_prediction_head = class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + self.assertSetEqual(expected_var_names, actual_variable_set) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalClassPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + class_prediction_head = ( + class_head.WeightSharedConvolutionalClassHead(num_class_slots=20)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) + + def test_scope_name(self): + expected_var_names = set([ + """ClassPredictor/weights""", + """ClassPredictor/biases""" + ]) + g = tf.Graph() + with g.as_default(): + class_prediction_head = class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + self.assertSetEqual(expected_var_names, actual_variable_set) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/head.py b/models/research/object_detection/predictors/heads/head.py new file mode 100644 index 0000000000000000000000000000000000000000..d2780319dd8ce8bf58ff79db5c552d55a3568eb8 --- /dev/null +++ b/models/research/object_detection/predictors/heads/head.py @@ -0,0 +1,81 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base head class. + +All the different kinds of prediction heads in different models will inherit +from this class. What is in common between all head classes is that they have a +`predict` function that receives `features` as its first argument. + +How to add a new prediction head to an existing meta architecture? +For example, how can we add a `3d shape` prediction head to Mask RCNN? + +We have to take the following steps to add a new prediction head to an +existing meta arch: +(a) Add a class for predicting the head. This class should inherit from the +`Head` class below and have a `predict` function that receives the features +and predicts the output. The output is always a tf.float32 tensor. +(b) Add the head to the meta architecture. For example in case of Mask RCNN, +go to box_predictor_builder and put in the logic for adding the new head to the +Mask RCNN box predictor. +(c) Add the logic for computing the loss for the new head. +(d) Add the necessary metrics for the new head. +(e) (optional) Add visualization for the new head. +""" +from abc import abstractmethod + +import tensorflow.compat.v1 as tf + + +class Head(object): + """Mask RCNN head base class.""" + + def __init__(self): + """Constructor.""" + pass + + @abstractmethod + def predict(self, features, num_predictions_per_location): + """Returns the head's predictions. + + Args: + features: A float tensor of features. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + A tf.float32 tensor. + """ + pass + + +class KerasHead(tf.keras.Model): + """Keras head base class.""" + + def call(self, features): + """The Keras model call will delegate to the `_predict` method.""" + return self._predict(features) + + @abstractmethod + def _predict(self, features): + """Returns the head's predictions. + + Args: + features: A float tensor of features. + + Returns: + A tf.float32 tensor. + """ + pass diff --git a/models/research/object_detection/predictors/heads/keras_box_head.py b/models/research/object_detection/predictors/heads/keras_box_head.py new file mode 100644 index 0000000000000000000000000000000000000000..b8def7fc1b01291d92ce545c8c3c29d9a24c646a --- /dev/null +++ b/models/research/object_detection/predictors/heads/keras_box_head.py @@ -0,0 +1,333 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Box Head. + +Contains Box prediction head classes for different meta architectures. +All the box prediction heads have a _predict function that receives the +`features` as the first argument and returns `box_encodings`. +""" +import tensorflow.compat.v1 as tf + +from object_detection.predictors.heads import head + + +class ConvolutionalBoxHead(head.KerasHead): + """Convolutional box prediction head.""" + + def __init__(self, + is_training, + box_code_size, + kernel_size, + num_predictions_per_location, + conv_hyperparams, + freeze_batchnorm, + use_depthwise=False, + box_encodings_clip_range=None, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalBoxHead, self).__init__(name=name) + self._is_training = is_training + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._num_predictions_per_location = num_predictions_per_location + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + + self._box_encoder_layers = [] + + if self._use_depthwise: + self._box_encoder_layers.append( + tf.keras.layers.DepthwiseConv2D( + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + dilation_rate=1, + name='BoxEncodingPredictor_depthwise', + **conv_hyperparams.params())) + self._box_encoder_layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name='BoxEncodingPredictor_depthwise_batchnorm')) + self._box_encoder_layers.append( + conv_hyperparams.build_activation_layer( + name='BoxEncodingPredictor_depthwise_activation')) + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._box_code_size, [1, 1], + name='BoxEncodingPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='BoxEncodingPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + box_encodings: A float tensor of shape + [batch_size, num_anchors, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. + """ + box_encodings = features + for layer in self._box_encoder_layers: + box_encodings = layer(box_encodings) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, 1, self._box_code_size]) + return box_encodings + + +class MaskRCNNBoxHead(head.KerasHead): + """Box prediction head. + + This is a piece of Mask RCNN which is responsible for predicting + just the box encodings. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_classes, + fc_hyperparams, + freeze_batchnorm, + use_dropout, + dropout_keep_prob, + box_code_size, + share_box_across_classes=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for fully connected dense ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + name: A string name scope to assign to the box head. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNBoxHead, self).__init__(name=name) + self._is_training = is_training + self._num_classes = num_classes + self._fc_hyperparams = fc_hyperparams + self._freeze_batchnorm = freeze_batchnorm + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._box_code_size = box_code_size + self._share_box_across_classes = share_box_across_classes + + self._box_encoder_layers = [tf.keras.layers.Flatten()] + + if self._use_dropout: + self._box_encoder_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + + self._number_of_boxes = 1 + if not self._share_box_across_classes: + self._number_of_boxes = self._num_classes + + self._box_encoder_layers.append( + tf.keras.layers.Dense(self._number_of_boxes * self._box_code_size, + name='BoxEncodingPredictor_dense')) + self._box_encoder_layers.append( + fc_hyperparams.build_batch_norm(training=(is_training and + not freeze_batchnorm), + name='BoxEncodingPredictor_batchnorm')) + + def _predict(self, features): + """Predicts box encodings. + + Args: + features: A float tensor of shape [batch_size, height, width, + channels] containing features for a batch of images. + + Returns: + box_encodings: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the location of the + objects. + """ + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + net = spatial_averaged_roi_pooled_features + for layer in self._box_encoder_layers: + net = layer(net) + box_encodings = tf.reshape(net, + [-1, 1, + self._number_of_boxes, + self._box_code_size]) + return box_encodings + + +# TODO(b/128922690): Unify the implementations of ConvolutionalBoxHead +# and WeightSharedConvolutionalBoxHead +class WeightSharedConvolutionalBoxHead(head.KerasHead): + """Weight shared convolutional box prediction head based on Keras. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + box_code_size, + num_predictions_per_location, + conv_hyperparams, + kernel_size=3, + use_depthwise=False, + box_encodings_clip_range=None, + return_flat_predictions=True, + name=None): + """Constructor. + + Args: + box_code_size: Size of encoding for each box. + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + kernel_size: Size of final convolution kernel. + use_depthwise: Whether to use depthwise convolutions for prediction steps. + Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalBoxHead, self).__init__(name=name) + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._num_predictions_per_location = num_predictions_per_location + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + self._return_flat_predictions = return_flat_predictions + + self._box_encoder_layers = [] + + if self._use_depthwise: + self._box_encoder_layers.append( + tf.keras.layers.SeparableConv2D( + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='BoxPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='BoxPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + box_encodings: A float tensor of shape + [batch_size, num_anchors, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. + """ + box_encodings = features + for layer in self._box_encoder_layers: + box_encodings = layer(box_encodings) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + if self._return_flat_predictions: + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, self._box_code_size]) + return box_encodings diff --git a/models/research/object_detection/predictors/heads/keras_box_head_tf2_test.py b/models/research/object_detection/predictors/heads/keras_box_head_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e8b8dcc3aa07ce6917a881c42cf51db7318576 --- /dev/null +++ b/models/research/object_detection/predictors/heads/keras_box_head_tf2_test.py @@ -0,0 +1,199 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.box_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keras_box_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalKerasBoxHeadTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.ConvolutionalBoxHead( + is_training=True, + box_code_size=4, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 4], box_encodings.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.ConvolutionalBoxHead( + is_training=True, + box_code_size=4, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 4], box_encodings.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNKerasBoxHeadTest(test_case.TestCase): + + def _build_fc_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_prediction_size(self): + box_prediction_head = keras_box_head.MaskRCNNBoxHead( + is_training=False, + num_classes=20, + fc_hyperparams=self._build_fc_hyperparams(), + freeze_batchnorm=False, + use_dropout=True, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=False) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = box_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 20, 4], prediction.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalKerasBoxHead(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 4], box_encodings.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 4], box_encodings.shape) + + def test_variable_count_depth_wise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_prediction_head(image_feature) + self.assertEqual(len(box_prediction_head.variables), 3) + + def test_variable_count_depth_wise_False(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_prediction_head(image_feature) + self.assertEqual(len(box_prediction_head.variables), 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/keras_class_head.py b/models/research/object_detection/predictors/heads/keras_class_head.py new file mode 100644 index 0000000000000000000000000000000000000000..988ebb2ee720f5db137ade0aef9919a942a57a5b --- /dev/null +++ b/models/research/object_detection/predictors/heads/keras_class_head.py @@ -0,0 +1,351 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class Head. + +Contains Class prediction head classes for different meta architectures. +All the class prediction heads have a predict function that receives the +`features` as the first argument and returns class predictions with background. +""" +import tensorflow.compat.v1 as tf + +from object_detection.predictors.heads import head + + +class ConvolutionalClassHead(head.KerasHead): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_class_slots, + use_dropout, + dropout_keep_prob, + kernel_size, + num_predictions_per_location, + conv_hyperparams, + freeze_batchnorm, + class_prediction_bias_init=0.0, + use_depthwise=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalClassHead, self).__init__(name=name) + self._is_training = is_training + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._class_prediction_bias_init = class_prediction_bias_init + self._use_depthwise = use_depthwise + self._num_class_slots = num_class_slots + + self._class_predictor_layers = [] + + if self._use_dropout: + self._class_predictor_layers.append( + # The Dropout layer's `training` parameter for the call method must + # be set implicitly by the Keras set_learning_phase. The object + # detection training code takes care of this. + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + if self._use_depthwise: + self._class_predictor_layers.append( + tf.keras.layers.DepthwiseConv2D( + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + dilation_rate=1, + name='ClassPredictor_depthwise', + **conv_hyperparams.params())) + self._class_predictor_layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name='ClassPredictor_depthwise_batchnorm')) + self._class_predictor_layers.append( + conv_hyperparams.build_activation_layer( + name='ClassPredictor_depthwise_activation')) + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._num_class_slots, [1, 1], + name='ClassPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='ClassPredictor', + bias_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals. + """ + class_predictions_with_background = features + for layer in self._class_predictor_layers: + class_predictions_with_background = layer( + class_predictions_with_background) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background + + +class MaskRCNNClassHead(head.KerasHead): + """Mask RCNN class prediction head. + + This is a piece of Mask RCNN which is responsible for predicting + just the class scores of boxes. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_class_slots, + fc_hyperparams, + freeze_batchnorm, + use_dropout, + dropout_keep_prob, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for fully connected dense ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + name: A string name scope to assign to the class head. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNClassHead, self).__init__(name=name) + self._is_training = is_training + self._freeze_batchnorm = freeze_batchnorm + self._num_class_slots = num_class_slots + self._fc_hyperparams = fc_hyperparams + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + + self._class_predictor_layers = [tf.keras.layers.Flatten()] + + if self._use_dropout: + self._class_predictor_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + + self._class_predictor_layers.append( + tf.keras.layers.Dense(self._num_class_slots, + name='ClassPredictor_dense')) + self._class_predictor_layers.append( + fc_hyperparams.build_batch_norm(training=(is_training and + not freeze_batchnorm), + name='ClassPredictor_batchnorm')) + + def _predict(self, features): + """Predicts the class scores for boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, 1, num_class_slots] representing the class predictions for + the proposals. + """ + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + net = spatial_averaged_roi_pooled_features + for layer in self._class_predictor_layers: + net = layer(net) + class_predictions_with_background = tf.reshape( + net, + [-1, 1, self._num_class_slots]) + return class_predictions_with_background + + +class WeightSharedConvolutionalClassHead(head.KerasHead): + """Weight shared convolutional class prediction head. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + num_class_slots, + num_predictions_per_location, + conv_hyperparams, + kernel_size=3, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + use_depthwise=False, + score_converter_fn=tf.identity, + return_flat_predictions=True, + name=None): + """Constructor. + + Args: + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + kernel_size: Size of final convolution kernel. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + score_converter_fn: Callable elementwise nonlinearity (that takes tensors + as inputs and returns tensors). + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalClassHead, self).__init__(name=name) + self._num_class_slots = num_class_slots + self._kernel_size = kernel_size + self._class_prediction_bias_init = class_prediction_bias_init + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._use_depthwise = use_depthwise + self._score_converter_fn = score_converter_fn + self._return_flat_predictions = return_flat_predictions + + self._class_predictor_layers = [] + + if self._use_dropout: + self._class_predictor_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + if self._use_depthwise: + self._class_predictor_layers.append( + tf.keras.layers.SeparableConv2D( + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + name='ClassPredictor', + bias_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + **conv_hyperparams.params(use_bias=True))) + else: + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='ClassPredictor', + bias_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals. + """ + class_predictions_with_background = features + for layer in self._class_predictor_layers: + class_predictions_with_background = layer( + class_predictions_with_background) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = self._score_converter_fn( + class_predictions_with_background) + if self._return_flat_predictions: + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background diff --git a/models/research/object_detection/predictors/heads/keras_class_head_tf2_test.py b/models/research/object_detection/predictors/heads/keras_class_head_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..aa890ce522defb6ec4c97965846e8f20529bc24b --- /dev/null +++ b/models/research/object_detection/predictors/heads/keras_class_head_tf2_test.py @@ -0,0 +1,203 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.class_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keras_class_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalKerasClassPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature,) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature,) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNClassHeadTest(test_case.TestCase): + + def _build_fc_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_prediction_size(self): + class_prediction_head = keras_class_head.MaskRCNNClassHead( + is_training=False, + num_class_slots=20, + fc_hyperparams=self._build_fc_hyperparams(), + freeze_batchnorm=False, + use_dropout=True, + dropout_keep_prob=0.5) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = class_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 20], prediction.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + def test_variable_count_depth_wise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = ( + keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head(image_feature) + self.assertEqual(len(class_prediction_head.variables), 3) + + def test_variable_count_depth_wise_False(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = ( + keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head(image_feature) + self.assertEqual(len(class_prediction_head.variables), 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/keras_mask_head.py b/models/research/object_detection/predictors/heads/keras_mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..3b65cc4b6588457908cfcb4c97efb8a1e5313096 --- /dev/null +++ b/models/research/object_detection/predictors/heads/keras_mask_head.py @@ -0,0 +1,447 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keras Mask Heads. + +Contains Mask prediction head classes for different meta architectures. +All the mask prediction heads have a predict function that receives the +`features` as the first argument and returns `mask_predictions`. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.predictors.heads import head +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class ConvolutionalMaskHead(head.KerasHead): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_classes, + use_dropout, + dropout_keep_prob, + kernel_size, + num_predictions_per_location, + conv_hyperparams, + freeze_batchnorm, + use_depthwise=False, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: Number of classes. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalMaskHead, self).__init__(name=name) + self._is_training = is_training + self._num_classes = num_classes + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._num_predictions_per_location = num_predictions_per_location + self._use_depthwise = use_depthwise + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + self._mask_predictor_layers = [] + + # Add a slot for the background class. + if self._masks_are_class_agnostic: + self._num_masks = 1 + else: + self._num_masks = self._num_classes + + num_mask_channels = self._num_masks * self._mask_height * self._mask_width + + if self._use_dropout: + self._mask_predictor_layers.append( + # The Dropout layer's `training` parameter for the call method must + # be set implicitly by the Keras set_learning_phase. The object + # detection training code takes care of this. + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + if self._use_depthwise: + self._mask_predictor_layers.append( + tf.keras.layers.DepthwiseConv2D( + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + dilation_rate=1, + name='MaskPredictor_depthwise', + **conv_hyperparams.params())) + self._mask_predictor_layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name='MaskPredictor_depthwise_batchnorm')) + self._mask_predictor_layers.append( + conv_hyperparams.build_activation_layer( + name='MaskPredictor_depthwise_activation')) + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * num_mask_channels, [1, 1], + name='MaskPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='MaskPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + mask_predictions: A float tensors of shape + [batch_size, num_anchors, num_masks, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + mask_predictions = features + for layer in self._mask_predictor_layers: + mask_predictions = layer(mask_predictions) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) + return mask_predictions + + +class MaskRCNNMaskHead(head.KerasHead): + """Mask RCNN mask prediction head. + + This is a piece of Mask RCNN which is responsible for predicting + just the pixelwise foreground scores for regions within the boxes. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_classes, + freeze_batchnorm, + conv_hyperparams, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the Mask head is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample: Whether to apply convolutions on mask features + before upsampling using nearest neighbor resizing. Otherwise, mask + features are resized to [`mask_height`, `mask_width`] using bilinear + resizing before applying convolutions. + name: A string name scope to assign to the mask head. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNMaskHead, self).__init__(name=name) + self._is_training = is_training + self._freeze_batchnorm = freeze_batchnorm + self._num_classes = num_classes + self._conv_hyperparams = conv_hyperparams + self._mask_height = mask_height + self._mask_width = mask_width + self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers + self._mask_prediction_conv_depth = mask_prediction_conv_depth + self._masks_are_class_agnostic = masks_are_class_agnostic + self._convolve_then_upsample = convolve_then_upsample + + self._mask_predictor_layers = [] + + def build(self, input_shapes): + num_conv_channels = self._mask_prediction_conv_depth + if num_conv_channels == 0: + num_feature_channels = input_shapes.as_list()[3] + num_conv_channels = self._get_mask_predictor_conv_depth( + num_feature_channels, self._num_classes) + + for i in range(self._mask_prediction_num_conv_layers - 1): + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_conv_channels, + [3, 3], + padding='SAME', + name='MaskPredictor_conv2d_{}'.format(i), + **self._conv_hyperparams.params())) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='MaskPredictor_batchnorm_{}'.format(i))) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_activation_layer( + name='MaskPredictor_activation_{}'.format(i))) + + if self._convolve_then_upsample: + # Replace Transposed Convolution with a Nearest Neighbor upsampling step + # followed by 3x3 convolution. + height_scale = self._mask_height // shape_utils.get_dim_as_int( + input_shapes[1]) + width_scale = self._mask_width // shape_utils.get_dim_as_int( + input_shapes[2]) + # pylint: disable=g-long-lambda + self._mask_predictor_layers.append(tf.keras.layers.Lambda( + lambda features: ops.nearest_neighbor_upsampling( + features, height_scale=height_scale, width_scale=width_scale) + )) + # pylint: enable=g-long-lambda + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_conv_channels, + [3, 3], + padding='SAME', + name='MaskPredictor_upsample_conv2d', + **self._conv_hyperparams.params())) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='MaskPredictor_upsample_batchnorm')) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_activation_layer( + name='MaskPredictor_upsample_activation')) + + num_masks = 1 if self._masks_are_class_agnostic else self._num_classes + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_masks, + [3, 3], + padding='SAME', + name='MaskPredictor_last_conv2d', + **self._conv_hyperparams.params(use_bias=True))) + + self.built = True + + def _get_mask_predictor_conv_depth(self, + num_feature_channels, + num_classes, + class_weight=3.0, + feature_weight=2.0): + """Computes the depth of the mask predictor convolutions. + + Computes the depth of the mask predictor convolutions given feature channels + and number of classes by performing a weighted average of the two in + log space to compute the number of convolution channels. The weights that + are used for computing the weighted average do not need to sum to 1. + + Args: + num_feature_channels: An integer containing the number of feature + channels. + num_classes: An integer containing the number of classes. + class_weight: Class weight used in computing the weighted average. + feature_weight: Feature weight used in computing the weighted average. + + Returns: + An integer containing the number of convolution channels used by mask + predictor. + """ + num_feature_channels_log = math.log(float(num_feature_channels), 2.0) + num_classes_log = math.log(float(num_classes), 2.0) + weighted_num_feature_channels_log = ( + num_feature_channels_log * feature_weight) + weighted_num_classes_log = num_classes_log * class_weight + total_weight = feature_weight + class_weight + num_conv_channels_log = round( + (weighted_num_feature_channels_log + weighted_num_classes_log) / + total_weight) + return int(math.pow(2.0, num_conv_channels_log)) + + def _predict(self, features): + """Predicts pixelwise foreground scores for regions within the boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + + Returns: + instance_masks: A float tensor of shape + [batch_size, 1, num_classes, mask_height, mask_width]. + """ + if not self._convolve_then_upsample: + features = tf.image.resize_bilinear( + features, [self._mask_height, self._mask_width], + align_corners=True) + + mask_predictions = features + for layer in self._mask_predictor_layers: + mask_predictions = layer(mask_predictions) + return tf.expand_dims( + tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), + axis=1, + name='MaskPredictor') + + +class WeightSharedConvolutionalMaskHead(head.KerasHead): + """Weight shared convolutional mask prediction head based on Keras.""" + + def __init__(self, + num_classes, + num_predictions_per_location, + conv_hyperparams, + kernel_size=3, + use_dropout=False, + dropout_keep_prob=0.8, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False, + name=None): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + kernel_size: Size of final convolution kernel. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(WeightSharedConvolutionalMaskHead, self).__init__(name=name) + self._num_classes = num_classes + self._num_predictions_per_location = num_predictions_per_location + self._kernel_size = kernel_size + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + self._mask_predictor_layers = [] + + if self._masks_are_class_agnostic: + self._num_masks = 1 + else: + self._num_masks = self._num_classes + num_mask_channels = self._num_masks * self._mask_height * self._mask_width + + if self._use_dropout: + self._mask_predictor_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='MaskPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + mask_predictions: A tensor of shape + [batch_size, num_anchors, num_classes, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + mask_predictions = features + for layer in self._mask_predictor_layers: + mask_predictions = layer(mask_predictions) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) + return mask_predictions diff --git a/models/research/object_detection/predictors/heads/keras_mask_head_tf2_test.py b/models/research/object_detection/predictors/heads/keras_mask_head_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5465be06fe1fe5150c8c4c3583bfcd3be5c5d079 --- /dev/null +++ b/models/research/object_detection/predictors/heads/keras_mask_head_tf2_test.py @@ -0,0 +1,252 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.mask_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keras_mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_use_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False, + mask_height=7, + mask_width=7) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) + + def test_prediction_size_use_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True, + mask_height=7, + mask_width=7) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) + + def test_class_agnostic_prediction_size_use_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) + + def test_class_agnostic_prediction_size_use_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNMaskHeadTest(test_case.TestCase): + + def _build_conv_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_prediction_size(self): + mask_prediction_head = keras_mask_head.MaskRCNNMaskHead( + is_training=True, + num_classes=20, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 20, 14, 14], prediction.shape) + + def test_prediction_size_with_convolve_then_upsample(self): + mask_prediction_head = keras_mask_head.MaskRCNNMaskHead( + is_training=True, + num_classes=20, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + mask_height=28, + mask_width=28, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=True, + convolve_then_upsample=True) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 1, 28, 28], prediction.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size(self): + mask_prediction_head = ( + keras_mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + num_predictions_per_location=1, + conv_hyperparams=self._build_conv_hyperparams(), + mask_height=7, + mask_width=7)) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) + + def test_class_agnostic_prediction_size(self): + mask_prediction_head = ( + keras_mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + num_predictions_per_location=1, + conv_hyperparams=self._build_conv_hyperparams(), + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True)) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/keypoint_head.py b/models/research/object_detection/predictors/heads/keypoint_head.py new file mode 100644 index 0000000000000000000000000000000000000000..79a4d4bef3a3877b8b1ed96f6c36b9287dad3f37 --- /dev/null +++ b/models/research/object_detection/predictors/heads/keypoint_head.py @@ -0,0 +1,115 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keypoint Head. + +Contains Keypoint prediction head classes for different meta architectures. +All the keypoint prediction heads have a predict function that receives the +`features` as the first argument and returns `keypoint_predictions`. +Keypoints could be used to represent the human body joint locations as in +Mask RCNN paper. Or they could be used to represent different part locations of +objects. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head + + +class MaskRCNNKeypointHead(head.Head): + """Mask RCNN keypoint prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + num_keypoints=17, + conv_hyperparams_fn=None, + keypoint_heatmap_height=56, + keypoint_heatmap_width=56, + keypoint_prediction_num_conv_layers=8, + keypoint_prediction_conv_depth=512): + """Constructor. + + Args: + num_keypoints: (int scalar) number of keypoints. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + keypoint_heatmap_height: Desired output mask height. The default value + is 14. + keypoint_heatmap_width: Desired output mask width. The default value + is 14. + keypoint_prediction_num_conv_layers: Number of convolution layers applied + to the image_features in mask prediction branch. + keypoint_prediction_conv_depth: The depth for the first conv2d_transpose + op applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + """ + super(MaskRCNNKeypointHead, self).__init__() + self._num_keypoints = num_keypoints + self._conv_hyperparams_fn = conv_hyperparams_fn + self._keypoint_heatmap_height = keypoint_heatmap_height + self._keypoint_heatmap_width = keypoint_heatmap_width + self._keypoint_prediction_num_conv_layers = ( + keypoint_prediction_num_conv_layers) + self._keypoint_prediction_conv_depth = keypoint_prediction_conv_depth + + def predict(self, features, num_predictions_per_location=1): + """Performs keypoint prediction. + + Args: + features: A float tensor of shape [batch_size, height, width, + channels] containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + instance_masks: A float tensor of shape + [batch_size, 1, num_keypoints, heatmap_height, heatmap_width]. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + with slim.arg_scope(self._conv_hyperparams_fn()): + net = slim.conv2d( + features, + self._keypoint_prediction_conv_depth, [3, 3], + scope='conv_1') + for i in range(1, self._keypoint_prediction_num_conv_layers): + net = slim.conv2d( + net, + self._keypoint_prediction_conv_depth, [3, 3], + scope='conv_%d' % (i + 1)) + net = slim.conv2d_transpose( + net, self._num_keypoints, [2, 2], scope='deconv1') + heatmaps_mask = tf.image.resize_bilinear( + net, [self._keypoint_heatmap_height, self._keypoint_heatmap_width], + align_corners=True, + name='upsample') + return tf.expand_dims( + tf.transpose(heatmaps_mask, perm=[0, 3, 1, 2]), + axis=1, + name='KeypointPredictor') diff --git a/models/research/object_detection/predictors/heads/keypoint_head_tf1_test.py b/models/research/object_detection/predictors/heads/keypoint_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..828174989133fd2ec6552ad848985719bdae35a5 --- /dev/null +++ b/models/research/object_detection/predictors/heads/keypoint_head_tf1_test.py @@ -0,0 +1,60 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.keypoint_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keypoint_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNKeypointHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + keypoint_prediction_head = keypoint_head.MaskRCNNKeypointHead( + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams()) + roi_pooled_features = tf.random_uniform( + [64, 14, 14, 1024], minval=-2.0, maxval=2.0, dtype=tf.float32) + prediction = keypoint_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 17, 56, 56], prediction.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/heads/mask_head.py b/models/research/object_detection/predictors/heads/mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ca0a694f531806982bda0f46c407babca36adeed --- /dev/null +++ b/models/research/object_detection/predictors/heads/mask_head.py @@ -0,0 +1,360 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mask Head. + +Contains Mask prediction head classes for different meta architectures. +All the mask prediction heads have a predict function that receives the +`features` as the first argument and returns `mask_predictions`. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head +from object_detection.utils import ops + + +class MaskRCNNMaskHead(head.Head): + """Mask RCNN mask prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + num_classes, + conv_hyperparams_fn=None, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample=False): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample: Whether to apply convolutions on mask features + before upsampling using nearest neighbor resizing. Otherwise, mask + features are resized to [`mask_height`, `mask_width`] using bilinear + resizing before applying convolutions. + + Raises: + ValueError: conv_hyperparams_fn is None. + """ + super(MaskRCNNMaskHead, self).__init__() + self._num_classes = num_classes + self._conv_hyperparams_fn = conv_hyperparams_fn + self._mask_height = mask_height + self._mask_width = mask_width + self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers + self._mask_prediction_conv_depth = mask_prediction_conv_depth + self._masks_are_class_agnostic = masks_are_class_agnostic + self._convolve_then_upsample = convolve_then_upsample + if conv_hyperparams_fn is None: + raise ValueError('conv_hyperparams_fn is None.') + + def _get_mask_predictor_conv_depth(self, + num_feature_channels, + num_classes, + class_weight=3.0, + feature_weight=2.0): + """Computes the depth of the mask predictor convolutions. + + Computes the depth of the mask predictor convolutions given feature channels + and number of classes by performing a weighted average of the two in + log space to compute the number of convolution channels. The weights that + are used for computing the weighted average do not need to sum to 1. + + Args: + num_feature_channels: An integer containing the number of feature + channels. + num_classes: An integer containing the number of classes. + class_weight: Class weight used in computing the weighted average. + feature_weight: Feature weight used in computing the weighted average. + + Returns: + An integer containing the number of convolution channels used by mask + predictor. + """ + num_feature_channels_log = math.log(float(num_feature_channels), 2.0) + num_classes_log = math.log(float(num_classes), 2.0) + weighted_num_feature_channels_log = ( + num_feature_channels_log * feature_weight) + weighted_num_classes_log = num_classes_log * class_weight + total_weight = feature_weight + class_weight + num_conv_channels_log = round( + (weighted_num_feature_channels_log + weighted_num_classes_log) / + total_weight) + return int(math.pow(2.0, num_conv_channels_log)) + + def predict(self, features, num_predictions_per_location=1): + """Performs mask prediction. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + instance_masks: A float tensor of shape + [batch_size, 1, num_classes, mask_height, mask_width]. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + num_conv_channels = self._mask_prediction_conv_depth + if num_conv_channels == 0: + num_feature_channels = features.get_shape().as_list()[3] + num_conv_channels = self._get_mask_predictor_conv_depth( + num_feature_channels, self._num_classes) + with slim.arg_scope(self._conv_hyperparams_fn()): + if not self._convolve_then_upsample: + features = tf.image.resize_bilinear( + features, [self._mask_height, self._mask_width], + align_corners=True) + for _ in range(self._mask_prediction_num_conv_layers - 1): + features = slim.conv2d( + features, + num_outputs=num_conv_channels, + kernel_size=[3, 3]) + if self._convolve_then_upsample: + # Replace Transposed Convolution with a Nearest Neighbor upsampling step + # followed by 3x3 convolution. + height_scale = self._mask_height // features.shape[1].value + width_scale = self._mask_width // features.shape[2].value + features = ops.nearest_neighbor_upsampling( + features, height_scale=height_scale, width_scale=width_scale) + features = slim.conv2d( + features, + num_outputs=num_conv_channels, + kernel_size=[3, 3]) + + num_masks = 1 if self._masks_are_class_agnostic else self._num_classes + mask_predictions = slim.conv2d( + features, + num_outputs=num_masks, + activation_fn=None, + normalizer_fn=None, + kernel_size=[3, 3]) + return tf.expand_dims( + tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), + axis=1, + name='MaskPredictor') + + +class ConvolutionalMaskHead(head.Head): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_classes, + use_dropout, + dropout_keep_prob, + kernel_size, + use_depthwise=False, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: Number of classes. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalMaskHead, self).__init__() + self._is_training = is_training + self._num_classes = num_classes + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._use_depthwise = use_depthwise + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + mask_predictions: A float tensors of shape + [batch_size, num_anchors, num_masks, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + image_feature = features + # Add a slot for the background class. + if self._masks_are_class_agnostic: + num_masks = 1 + else: + num_masks = self._num_classes + num_mask_channels = num_masks * self._mask_height * self._mask_width + net = image_feature + if self._use_dropout: + net = slim.dropout(net, keep_prob=self._dropout_keep_prob) + if self._use_depthwise: + mask_predictions = slim.separable_conv2d( + net, None, [self._kernel_size, self._kernel_size], + padding='SAME', depth_multiplier=1, stride=1, + rate=1, scope='MaskPredictor_depthwise') + mask_predictions = slim.conv2d( + mask_predictions, + num_predictions_per_location * num_mask_channels, + [1, 1], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='MaskPredictor') + else: + mask_predictions = slim.conv2d( + net, + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='MaskPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, num_masks, self._mask_height, self._mask_width]) + return mask_predictions + + +# TODO(alirezafathi): See if possible to unify Weight Shared with regular +# convolutional mask head. +class WeightSharedConvolutionalMaskHead(head.Head): + """Weight shared convolutional mask prediction head.""" + + def __init__(self, + num_classes, + kernel_size=3, + use_dropout=False, + dropout_keep_prob=0.8, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + kernel_size: Size of final convolution kernel. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + """ + super(WeightSharedConvolutionalMaskHead, self).__init__() + self._num_classes = num_classes + self._kernel_size = kernel_size + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + mask_predictions: A tensor of shape + [batch_size, num_anchors, num_classes, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + mask_predictions_net = features + if self._masks_are_class_agnostic: + num_masks = 1 + else: + num_masks = self._num_classes + num_mask_channels = num_masks * self._mask_height * self._mask_width + if self._use_dropout: + mask_predictions_net = slim.dropout( + mask_predictions_net, keep_prob=self._dropout_keep_prob) + mask_predictions = slim.conv2d( + mask_predictions_net, + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + activation_fn=None, stride=1, padding='SAME', + normalizer_fn=None, + scope='MaskPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, num_masks, self._mask_height, self._mask_width]) + return mask_predictions diff --git a/models/research/object_detection/predictors/heads/mask_head_tf1_test.py b/models/research/object_detection/predictors/heads/mask_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..152394836135abeaa68f32dd48275a89347d4059 --- /dev/null +++ b/models/research/object_detection/predictors/heads/mask_head_tf1_test.py @@ -0,0 +1,190 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.mask_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNMaskHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + mask_prediction_head = mask_head.MaskRCNNMaskHead( + num_classes=20, + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False) + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 20, 14, 14], prediction.get_shape().as_list()) + + def test_prediction_size_with_convolve_then_upsample(self): + mask_prediction_head = mask_head.MaskRCNNMaskHead( + num_classes=20, + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + mask_height=28, + mask_width=28, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=True, + convolve_then_upsample=True) + roi_pooled_features = tf.random_uniform( + [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 1, 28, 28], prediction.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + mask_prediction_head = mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + mask_height=7, + mask_width=7) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20, 7, 7], + mask_predictions.get_shape().as_list()) + + def test_class_agnostic_prediction_size(self): + mask_prediction_head = mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 1, 7, 7], + mask_predictions.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + mask_prediction_head = ( + mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + mask_height=7, + mask_width=7)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20, 7, 7], + mask_predictions.get_shape().as_list()) + + def test_class_agnostic_prediction_size(self): + mask_prediction_head = ( + mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 1, 7, 7], + mask_predictions.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/mask_rcnn_box_predictor.py b/models/research/object_detection/predictors/mask_rcnn_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..26ff65daabdc8c140a42c8a0f6ed4d3cf42752c5 --- /dev/null +++ b/models/research/object_detection/predictors/mask_rcnn_box_predictor.py @@ -0,0 +1,141 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mask R-CNN Box Predictor.""" +from object_detection.core import box_predictor + + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class MaskRCNNBoxPredictor(box_predictor.BoxPredictor): + """Mask R-CNN Box Predictor. + + See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). + Mask R-CNN. arXiv preprint arXiv:1703.06870. + + This is used for the second stage of the Mask R-CNN detector where proposals + cropped from an image are arranged along the batch dimension of the input + image_features tensor. Notice that locations are *not* shared across classes, + thus for each anchor, a separate prediction is made for each class. + + In addition to predicting boxes and classes, optionally this class allows + predicting masks and/or keypoints inside detection boxes. + + Currently this box predictor makes per-class predictions; that is, each + anchor makes a separate box prediction for each class. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + third_stage_heads): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes in second stage. + class_prediction_head: The head that predicts the classes in second stage. + third_stage_heads: A dictionary mapping head names to mask rcnn head + classes. + """ + super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._third_stage_heads = third_stage_heads + + @property + def num_classes(self): + return self._num_classes + + def get_second_stage_prediction_heads(self): + return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND + + def get_third_stage_prediction_heads(self): + return sorted(self._third_stage_heads.keys()) + + def _predict(self, + image_features, + num_predictions_per_location, + prediction_stage=2): + """Optionally computes encoded object locations, confidences, and masks. + + Predicts the heads belonging to the given prediction stage. + + Args: + image_features: A list of float tensors of shape + [batch_size, height_i, width_i, channels_i] containing roi pooled + features for each image. The length of the list should be 1 otherwise + a ValueError will be raised. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + Currently, this must be set to [1], or an error will be raised. + prediction_stage: Prediction stage. Acceptable values are 2 and 3. + + Returns: + A dictionary containing the predicted tensors that are listed in + self._prediction_heads. A subset of the following keys will exist in the + dictionary: + BOX_ENCODINGS: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the + location of the objects. + CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape + [batch_size, 1, num_classes + 1] representing the class + predictions for the proposals. + MASK_PREDICTIONS: A float tensor of shape + [batch_size, 1, num_classes, image_height, image_width] + + Raises: + ValueError: If num_predictions_per_location is not 1 or if + len(image_features) is not 1. + ValueError: if prediction_stage is not 2 or 3. + """ + if (len(num_predictions_per_location) != 1 or + num_predictions_per_location[0] != 1): + raise ValueError('Currently FullyConnectedBoxPredictor only supports ' + 'predicting a single box per class per location.') + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'.format( + len(image_features))) + image_feature = image_features[0] + predictions_dict = {} + + if prediction_stage == 2: + predictions_dict[BOX_ENCODINGS] = self._box_prediction_head.predict( + features=image_feature, + num_predictions_per_location=num_predictions_per_location[0]) + predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( + self._class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=num_predictions_per_location[0])) + elif prediction_stage == 3: + for prediction_head in self.get_third_stage_prediction_heads(): + head_object = self._third_stage_heads[prediction_head] + predictions_dict[prediction_head] = head_object.predict( + features=image_feature, + num_predictions_per_location=num_predictions_per_location[0]) + else: + raise ValueError('prediction_stage should be either 2 or 3.') + + return predictions_dict diff --git a/models/research/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py b/models/research/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a4bcbbf004dedc670956baf05615358e33e1a1 --- /dev/null +++ b/models/research/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py @@ -0,0 +1,154 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.mask_rcnn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import mask_rcnn_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_get_boxes_with_five_classes(self): + def graph_fn(image_features): + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + ) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_get_boxes_with_five_classes_share_box_across_classes(self): + def graph_fn(image_features): + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=True + ) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self): + with self.assertRaises(ValueError): + box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + predict_instance_masks=True) + + def test_get_instance_masks(self): + def graph_fn(image_features): + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams( + op_type=hyperparams_pb2.Hyperparams.CONV), + predict_instance_masks=True) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=3) + return (box_predictions[box_predictor.MASK_PREDICTIONS],) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + mask_predictions = self.execute(graph_fn, [image_features]) + self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) + + def test_do_not_return_instance_masks_without_request(self): + image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=2) + self.assertEqual(len(box_predictions), 2) + self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) + self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND + in box_predictions) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/mask_rcnn_keras_box_predictor.py b/models/research/object_detection/predictors/mask_rcnn_keras_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..baca02edda0e21dbbc070e8c3800f898875aafe1 --- /dev/null +++ b/models/research/object_detection/predictors/mask_rcnn_keras_box_predictor.py @@ -0,0 +1,139 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mask R-CNN Box Predictor.""" +from object_detection.core import box_predictor + + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class MaskRCNNKerasBoxPredictor(box_predictor.KerasBoxPredictor): + """Mask R-CNN Box Predictor. + + See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). + Mask R-CNN. arXiv preprint arXiv:1703.06870. + + This is used for the second stage of the Mask R-CNN detector where proposals + cropped from an image are arranged along the batch dimension of the input + image_features tensor. Notice that locations are *not* shared across classes, + thus for each anchor, a separate prediction is made for each class. + + In addition to predicting boxes and classes, optionally this class allows + predicting masks and/or keypoints inside detection boxes. + + Currently this box predictor makes per-class predictions; that is, each + anchor makes a separate box prediction for each class. + """ + + def __init__(self, + is_training, + num_classes, + freeze_batchnorm, + box_prediction_head, + class_prediction_head, + third_stage_heads, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + box_prediction_head: The head that predicts the boxes in second stage. + class_prediction_head: The head that predicts the classes in second stage. + third_stage_heads: A dictionary mapping head names to mask rcnn head + classes. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNKerasBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=False, name=name) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._third_stage_heads = third_stage_heads + + @property + def num_classes(self): + return self._num_classes + + def get_second_stage_prediction_heads(self): + return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND + + def get_third_stage_prediction_heads(self): + return sorted(self._third_stage_heads.keys()) + + def _predict(self, + image_features, + prediction_stage=2, + **kwargs): + """Optionally computes encoded object locations, confidences, and masks. + + Predicts the heads belonging to the given prediction stage. + + Args: + image_features: A list of float tensors of shape + [batch_size, height_i, width_i, channels_i] containing roi pooled + features for each image. The length of the list should be 1 otherwise + a ValueError will be raised. + prediction_stage: Prediction stage. Acceptable values are 2 and 3. + **kwargs: Unused Keyword args + + Returns: + A dictionary containing the predicted tensors that are listed in + self._prediction_heads. A subset of the following keys will exist in the + dictionary: + BOX_ENCODINGS: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the + location of the objects. + CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape + [batch_size, 1, num_classes + 1] representing the class + predictions for the proposals. + MASK_PREDICTIONS: A float tensor of shape + [batch_size, 1, num_classes, image_height, image_width] + + Raises: + ValueError: If num_predictions_per_location is not 1 or if + len(image_features) is not 1. + ValueError: if prediction_stage is not 2 or 3. + """ + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'.format( + len(image_features))) + image_feature = image_features[0] + predictions_dict = {} + + if prediction_stage == 2: + predictions_dict[BOX_ENCODINGS] = self._box_prediction_head(image_feature) + predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( + self._class_prediction_head(image_feature)) + elif prediction_stage == 3: + for prediction_head in self.get_third_stage_prediction_heads(): + head_object = self._third_stage_heads[prediction_head] + predictions_dict[prediction_head] = head_object(image_feature) + else: + raise ValueError('prediction_stage should be either 2 or 3.') + + return predictions_dict diff --git a/models/research/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py b/models/research/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a92db9e90fb8299ff449bb614886a9a5542033c3 --- /dev/null +++ b/models/research/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py @@ -0,0 +1,144 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.mask_rcnn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import mask_rcnn_keras_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNKerasBoxPredictorTest(test_case.TestCase): + + def _build_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_get_boxes_with_five_classes(self): + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + )) + def graph_fn(image_features): + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_get_boxes_with_five_classes_share_box_across_classes(self): + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=True + )) + def graph_fn(image_features): + + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_get_instance_masks(self): + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + conv_hyperparams=self._build_hyperparams( + op_type=hyperparams_pb2.Hyperparams.CONV), + predict_instance_masks=True)) + def graph_fn(image_features): + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=3) + return (box_predictions[box_predictor.MASK_PREDICTIONS],) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + mask_predictions = self.execute(graph_fn, [image_features]) + self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) + + def test_do_not_return_instance_masks_without_request(self): + image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4)) + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=2) + self.assertEqual(len(box_predictions), 2) + self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) + self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND + in box_predictions) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/rfcn_box_predictor.py b/models/research/object_detection/predictors/rfcn_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..c5cf7acbebde3225eea1a2b0631fda208784f43d --- /dev/null +++ b/models/research/object_detection/predictors/rfcn_box_predictor.py @@ -0,0 +1,159 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""RFCN Box Predictor.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.core import box_predictor +from object_detection.utils import ops + + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class RfcnBoxPredictor(box_predictor.BoxPredictor): + """RFCN Box Predictor. + + Applies a position sensitive ROI pooling on position sensitive feature maps to + predict classes and refined locations. See https://arxiv.org/abs/1605.06409 + for details. + + This is used for the second stage of the RFCN meta architecture. Notice that + locations are *not* shared across classes, thus for each anchor, a separate + prediction is made for each class. + """ + + def __init__(self, + is_training, + num_classes, + conv_hyperparams_fn, + num_spatial_bins, + depth, + crop_size, + box_code_size): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to construct tf-slim arg_scope with + hyperparameters for convolutional layers. + num_spatial_bins: A list of two integers `[spatial_bins_y, + spatial_bins_x]`. + depth: Target depth to reduce the input feature maps to. + crop_size: A list of two integers `[crop_height, crop_width]`. + box_code_size: Size of encoding for each box. + """ + super(RfcnBoxPredictor, self).__init__(is_training, num_classes) + self._conv_hyperparams_fn = conv_hyperparams_fn + self._num_spatial_bins = num_spatial_bins + self._depth = depth + self._crop_size = crop_size + self._box_code_size = box_code_size + + @property + def num_classes(self): + return self._num_classes + + def _predict(self, image_features, num_predictions_per_location, + proposal_boxes): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + Currently, this must be set to [1], or an error will be raised. + proposal_boxes: A float tensor of shape [batch_size, num_proposals, + box_code_size]. + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + + Raises: + ValueError: if num_predictions_per_location is not 1 or if + len(image_features) is not 1. + """ + if (len(num_predictions_per_location) != 1 or + num_predictions_per_location[0] != 1): + raise ValueError('Currently RfcnBoxPredictor only supports ' + 'predicting a single box per class per location.') + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'. + format(len(image_features))) + image_feature = image_features[0] + num_predictions_per_location = num_predictions_per_location[0] + batch_size = tf.shape(proposal_boxes)[0] + num_boxes = tf.shape(proposal_boxes)[1] + net = image_feature + with slim.arg_scope(self._conv_hyperparams_fn()): + net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth') + # Location predictions. + location_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + self.num_classes * + self._box_code_size) + location_feature_map = slim.conv2d(net, location_feature_map_depth, + [1, 1], activation_fn=None, + scope='refined_locations') + box_encodings = ops.batch_position_sensitive_crop_regions( + location_feature_map, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True) + box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) + box_encodings = tf.reshape(box_encodings, + [batch_size * num_boxes, 1, self.num_classes, + self._box_code_size]) + + # Class predictions. + total_classes = self.num_classes + 1 # Account for background class. + class_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + total_classes) + class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], + activation_fn=None, + scope='class_predictions') + class_predictions_with_background = ( + ops.batch_position_sensitive_crop_regions( + class_feature_map, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True)) + class_predictions_with_background = tf.squeeze( + class_predictions_with_background, axis=[2, 3]) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size * num_boxes, 1, total_classes]) + + return {BOX_ENCODINGS: [box_encodings], + CLASS_PREDICTIONS_WITH_BACKGROUND: + [class_predictions_with_background]} diff --git a/models/research/object_detection/predictors/rfcn_box_predictor_tf1_test.py b/models/research/object_detection/predictors/rfcn_box_predictor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..555c4b2adeaef6142884adbc5c4e1087084fd884 --- /dev/null +++ b/models/research/object_detection/predictors/rfcn_box_predictor_tf1_test.py @@ -0,0 +1,80 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.rfcn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors import rfcn_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class RfcnBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def test_get_correct_box_encoding_and_class_prediction_shapes(self): + + def graph_fn(image_features, proposal_boxes): + rfcn_box_predictor = box_predictor.RfcnBoxPredictor( + is_training=False, + num_classes=2, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + num_spatial_bins=[3, 3], + depth=4, + crop_size=[12, 12], + box_code_size=4 + ) + box_predictions = rfcn_box_predictor.predict( + [image_features], num_predictions_per_location=[1], + scope='BoxPredictor', + proposal_boxes=proposal_boxes) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features, proposal_boxes]) + + self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/predictors/rfcn_keras_box_predictor.py b/models/research/object_detection/predictors/rfcn_keras_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..094e665f69c92235fe48686014f18f71225bb796 --- /dev/null +++ b/models/research/object_detection/predictors/rfcn_keras_box_predictor.py @@ -0,0 +1,204 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""RFCN Box Predictor.""" +import tensorflow.compat.v1 as tf +from object_detection.core import box_predictor +from object_detection.utils import ops + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class RfcnKerasBoxPredictor(box_predictor.KerasBoxPredictor): + """RFCN Box Predictor. + + Applies a position sensitive ROI pooling on position sensitive feature maps to + predict classes and refined locations. See https://arxiv.org/abs/1605.06409 + for details. + + This is used for the second stage of the RFCN meta architecture. Notice that + locations are *not* shared across classes, thus for each anchor, a separate + prediction is made for each class. + """ + + def __init__(self, + is_training, + num_classes, + conv_hyperparams, + freeze_batchnorm, + num_spatial_bins, + depth, + crop_size, + box_code_size, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + num_spatial_bins: A list of two integers `[spatial_bins_y, + spatial_bins_x]`. + depth: Target depth to reduce the input feature maps to. + crop_size: A list of two integers `[crop_height, crop_width]`. + box_code_size: Size of encoding for each box. + name: A string name scope to assign to the box predictor. If `None`, Keras + will auto-generate one from the class name. + """ + super(RfcnKerasBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=False, name=name) + self._freeze_batchnorm = freeze_batchnorm + self._conv_hyperparams = conv_hyperparams + self._num_spatial_bins = num_spatial_bins + self._depth = depth + self._crop_size = crop_size + self._box_code_size = box_code_size + + # Build the shared layers used for both heads + self._shared_conv_layers = [] + self._shared_conv_layers.append( + tf.keras.layers.Conv2D( + self._depth, + [1, 1], + padding='SAME', + name='reduce_depth_conv', + **self._conv_hyperparams.params())) + self._shared_conv_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='reduce_depth_batchnorm')) + self._shared_conv_layers.append( + self._conv_hyperparams.build_activation_layer( + name='reduce_depth_activation')) + + self._box_encoder_layers = [] + location_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + self.num_classes * + self._box_code_size) + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + location_feature_map_depth, + [1, 1], + padding='SAME', + name='refined_locations_conv', + **self._conv_hyperparams.params())) + self._box_encoder_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='refined_locations_batchnorm')) + + self._class_predictor_layers = [] + self._total_classes = self.num_classes + 1 # Account for background class. + class_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + self._total_classes) + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + class_feature_map_depth, + [1, 1], + padding='SAME', + name='class_predictions_conv', + **self._conv_hyperparams.params())) + self._class_predictor_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='class_predictions_batchnorm')) + + @property + def num_classes(self): + return self._num_classes + + def _predict(self, image_features, proposal_boxes, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + proposal_boxes: A float tensor of shape [batch_size, num_proposals, + box_code_size]. + **kwargs: Unused Keyword args + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + + Raises: + ValueError: if num_predictions_per_location is not 1 or if + len(image_features) is not 1. + """ + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'. + format(len(image_features))) + image_feature = image_features[0] + batch_size = tf.shape(proposal_boxes)[0] + num_boxes = tf.shape(proposal_boxes)[1] + net = image_feature + for layer in self._shared_conv_layers: + net = layer(net) + + # Location predictions. + box_net = net + for layer in self._box_encoder_layers: + box_net = layer(box_net) + box_encodings = ops.batch_position_sensitive_crop_regions( + box_net, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True) + box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) + box_encodings = tf.reshape(box_encodings, + [batch_size * num_boxes, 1, self.num_classes, + self._box_code_size]) + + # Class predictions. + class_net = net + for layer in self._class_predictor_layers: + class_net = layer(class_net) + class_predictions_with_background = ( + ops.batch_position_sensitive_crop_regions( + class_net, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True)) + class_predictions_with_background = tf.squeeze( + class_predictions_with_background, axis=[2, 3]) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size * num_boxes, 1, self._total_classes]) + + return {BOX_ENCODINGS: [box_encodings], + CLASS_PREDICTIONS_WITH_BACKGROUND: + [class_predictions_with_background]} diff --git a/models/research/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py b/models/research/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f845068e35b37a9b0d77873fb5adbf59c78450ae --- /dev/null +++ b/models/research/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py @@ -0,0 +1,79 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.rfcn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors import rfcn_keras_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class RfcnKerasBoxPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_get_correct_box_encoding_and_class_prediction_shapes(self): + rfcn_box_predictor = box_predictor.RfcnKerasBoxPredictor( + is_training=False, + num_classes=2, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + num_spatial_bins=[3, 3], + depth=4, + crop_size=[12, 12], + box_code_size=4) + def graph_fn(image_features, proposal_boxes): + + box_predictions = rfcn_box_predictor( + [image_features], + proposal_boxes=proposal_boxes) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features, proposal_boxes]) + + self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/models/research/object_detection/protos/__init__.py b/models/research/object_detection/protos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/research/object_detection/protos/anchor_generator.proto b/models/research/object_detection/protos/anchor_generator.proto new file mode 100644 index 0000000000000000000000000000000000000000..9608ca48908e6f7aff35f57042b2275c5d0ab5d4 --- /dev/null +++ b/models/research/object_detection/protos/anchor_generator.proto @@ -0,0 +1,19 @@ +syntax = "proto2"; + +package object_detection.protos; + +import "object_detection/protos/flexible_grid_anchor_generator.proto"; +import "object_detection/protos/grid_anchor_generator.proto"; +import "object_detection/protos/multiscale_anchor_generator.proto"; +import "object_detection/protos/ssd_anchor_generator.proto"; + +// Configuration proto for the anchor generator to use in the object detection +// pipeline. See core/anchor_generator.py for details. +message AnchorGenerator { + oneof anchor_generator_oneof { + GridAnchorGenerator grid_anchor_generator = 1; + SsdAnchorGenerator ssd_anchor_generator = 2; + MultiscaleAnchorGenerator multiscale_anchor_generator = 3; + FlexibleGridAnchorGenerator flexible_grid_anchor_generator = 4; + } +} diff --git a/models/research/object_detection/protos/argmax_matcher.proto b/models/research/object_detection/protos/argmax_matcher.proto new file mode 100644 index 0000000000000000000000000000000000000000..947fcb983dcbe3bcb0b39b7c8bd48a50b1667edf --- /dev/null +++ b/models/research/object_detection/protos/argmax_matcher.proto @@ -0,0 +1,29 @@ +syntax = "proto2"; + +package object_detection.protos; + +// Configuration proto for ArgMaxMatcher. See +// matchers/argmax_matcher.py for details. +message ArgMaxMatcher { + // Threshold for positive matches. + optional float matched_threshold = 1 [default = 0.5]; + + // Threshold for negative matches. + optional float unmatched_threshold = 2 [default = 0.5]; + + // Whether to construct ArgMaxMatcher without thresholds. + optional bool ignore_thresholds = 3 [default = false]; + + // If True then negative matches are the ones below the unmatched_threshold, + // whereas ignored matches are in between the matched and umatched + // threshold. If False, then negative matches are in between the matched + // and unmatched threshold, and everything lower than unmatched is ignored. + optional bool negatives_lower_than_unmatched = 4 [default = true]; + + // Whether to ensure each row is matched to at least one column. + optional bool force_match_for_each_row = 5 [default = false]; + + // Force constructed match objects to use matrix multiplication based gather + // instead of standard tf.gather + optional bool use_matmul_gather = 6 [default = false]; +} diff --git a/models/research/object_detection/protos/bipartite_matcher.proto b/models/research/object_detection/protos/bipartite_matcher.proto new file mode 100644 index 0000000000000000000000000000000000000000..175ecdd109653ae1c0b37a9655873b72161e963e --- /dev/null +++ b/models/research/object_detection/protos/bipartite_matcher.proto @@ -0,0 +1,11 @@ +syntax = "proto2"; + +package object_detection.protos; + +// Configuration proto for bipartite matcher. See +// matchers/bipartite_matcher.py for details. +message BipartiteMatcher { + // Force constructed match objects to use matrix multiplication based gather + // instead of standard tf.gather + optional bool use_matmul_gather = 6 [default = false]; +} diff --git a/models/research/object_detection/protos/box_coder.proto b/models/research/object_detection/protos/box_coder.proto new file mode 100644 index 0000000000000000000000000000000000000000..79b818125a33c39022262b9fc7754f4081f6b169 --- /dev/null +++ b/models/research/object_detection/protos/box_coder.proto @@ -0,0 +1,19 @@ +syntax = "proto2"; + +package object_detection.protos; + +import "object_detection/protos/faster_rcnn_box_coder.proto"; +import "object_detection/protos/keypoint_box_coder.proto"; +import "object_detection/protos/mean_stddev_box_coder.proto"; +import "object_detection/protos/square_box_coder.proto"; + +// Configuration proto for the box coder to be used in the object detection +// pipeline. See core/box_coder.py for details. +message BoxCoder { + oneof box_coder_oneof { + FasterRcnnBoxCoder faster_rcnn_box_coder = 1; + MeanStddevBoxCoder mean_stddev_box_coder = 2; + SquareBoxCoder square_box_coder = 3; + KeypointBoxCoder keypoint_box_coder = 4; + } +} diff --git a/models/research/object_detection/protos/box_predictor.proto b/models/research/object_detection/protos/box_predictor.proto new file mode 100644 index 0000000000000000000000000000000000000000..0b0fadd7977eb799c2920adb0f79c5e535b68534 --- /dev/null +++ b/models/research/object_detection/protos/box_predictor.proto @@ -0,0 +1,200 @@ +syntax = "proto2"; + +package object_detection.protos; + +import "object_detection/protos/hyperparams.proto"; + +// Configuration proto for box predictor. See core/box_predictor.py for details. +message BoxPredictor { + oneof box_predictor_oneof { + ConvolutionalBoxPredictor convolutional_box_predictor = 1; + MaskRCNNBoxPredictor mask_rcnn_box_predictor = 2; + RfcnBoxPredictor rfcn_box_predictor = 3; + WeightSharedConvolutionalBoxPredictor + weight_shared_convolutional_box_predictor = 4; + } +} + +// Configuration proto for Convolutional box predictor. +// Next id: 13 +message ConvolutionalBoxPredictor { + // Hyperparameters for convolution ops used in the box predictor. + optional Hyperparams conv_hyperparams = 1; + + // Minimum feature depth prior to predicting box encodings and class + // predictions. + optional int32 min_depth = 2 [default = 0]; + + // Maximum feature depth prior to predicting box encodings and class + // predictions. If max_depth is set to 0, no additional feature map will be + // inserted before location and class predictions. + optional int32 max_depth = 3 [default = 0]; + + // Number of the additional conv layers before the predictor. + optional int32 num_layers_before_predictor = 4 [default = 0]; + + // Whether to use dropout for class prediction. + optional bool use_dropout = 5 [default = true]; + + // Keep probability for dropout + optional float dropout_keep_probability = 6 [default = 0.8]; + + // Size of final convolution kernel. If the spatial resolution of the feature + // map is smaller than the kernel size, then the kernel size is set to + // min(feature_width, feature_height). + optional int32 kernel_size = 7 [default = 1]; + + // Size of the encoding for boxes. + optional int32 box_code_size = 8 [default = 4]; + + // Whether to apply sigmoid to the output of class predictions. + // TODO(jonathanhuang): Do we need this since we have a post processing + // module.? + optional bool apply_sigmoid_to_scores = 9 [default = false]; + + optional float class_prediction_bias_init = 10 [default = 0.0]; + + // Whether to use depthwise separable convolution for box predictor layers. + optional bool use_depthwise = 11 [default = false]; + + // If specified, apply clipping to box encodings. + message BoxEncodingsClipRange { + optional float min = 1; + optional float max = 2; + } + optional BoxEncodingsClipRange box_encodings_clip_range = 12; +} + +// Configuration proto for weight shared convolutional box predictor. +// Next id: 19 +message WeightSharedConvolutionalBoxPredictor { + // Hyperparameters for convolution ops used in the box predictor. + optional Hyperparams conv_hyperparams = 1; + + // Number of the additional conv layers before the predictor. + optional int32 num_layers_before_predictor = 4 [default = 0]; + + // Output depth for the convolution ops prior to predicting box encodings + // and class predictions. + optional int32 depth = 2 [default = 0]; + + // Size of final convolution kernel. If the spatial resolution of the feature + // map is smaller than the kernel size, then the kernel size is set to + // min(feature_width, feature_height). + optional int32 kernel_size = 7 [default = 3]; + + // Size of the encoding for boxes. + optional int32 box_code_size = 8 [default = 4]; + + // Bias initialization for class prediction. It has been show to stabilize + // training where there are large number of negative boxes. See + // https://arxiv.org/abs/1708.02002 for details. + optional float class_prediction_bias_init = 10 [default = 0.0]; + + // Whether to use dropout for class prediction. + optional bool use_dropout = 11 [default = false]; + + // Keep probability for dropout. + optional float dropout_keep_probability = 12 [default = 0.8]; + + // Whether to share the multi-layer tower between box prediction and class + // prediction heads. + optional bool share_prediction_tower = 13 [default = false]; + + // Whether to use depthwise separable convolution for box predictor layers. + optional bool use_depthwise = 14 [default = false]; + + // Enum to specify how to convert the detection scores at inference time. + enum ScoreConverter { + // Input scores equals output scores. + IDENTITY = 0; + + // Applies a sigmoid on input scores. + SIGMOID = 1; + } + + // Callable elementwise score converter at inference time. + optional ScoreConverter score_converter = 16 [default = IDENTITY]; + + // If specified, apply clipping to box encodings. + message BoxEncodingsClipRange { + optional float min = 1; + optional float max = 2; + } + optional BoxEncodingsClipRange box_encodings_clip_range = 17; + +} + + +// TODO(alirezafathi): Refactor the proto file to be able to configure mask rcnn +// head easily. +// Next id: 15 +message MaskRCNNBoxPredictor { + // Hyperparameters for fully connected ops used in the box predictor. + optional Hyperparams fc_hyperparams = 1; + + // Whether to use dropout op prior to the both box and class predictions. + optional bool use_dropout = 2 [default = false]; + + // Keep probability for dropout. This is only used if use_dropout is true. + optional float dropout_keep_probability = 3 [default = 0.5]; + + // Size of the encoding for the boxes. + optional int32 box_code_size = 4 [default = 4]; + + // Hyperparameters for convolution ops used in the box predictor. + optional Hyperparams conv_hyperparams = 5; + + // Whether to predict instance masks inside detection boxes. + optional bool predict_instance_masks = 6 [default = false]; + + // The depth for the first conv2d_transpose op applied to the + // image_features in the mask prediction branch. If set to 0, the value + // will be set automatically based on the number of channels in the image + // features and the number of classes. + optional int32 mask_prediction_conv_depth = 7 [default = 256]; + + // Whether to predict keypoints inside detection boxes. + optional bool predict_keypoints = 8 [default = false]; + + // The height and the width of the predicted mask. + optional int32 mask_height = 9 [default = 15]; + optional int32 mask_width = 10 [default = 15]; + + // The number of convolutions applied to image_features in the mask prediction + // branch. + optional int32 mask_prediction_num_conv_layers = 11 [default = 2]; + optional bool masks_are_class_agnostic = 12 [default = false]; + + // Whether to use one box for all classes rather than a different box for each + // class. + optional bool share_box_across_classes = 13 [default = false]; + + // Whether to apply convolutions on mask features before upsampling using + // nearest neighbor resizing. + // By default, mask features are resized to [`mask_height`, `mask_width`] + // before applying convolutions and predicting masks. + optional bool convolve_then_upsample_masks = 14 [default = false]; +} + +message RfcnBoxPredictor { + // Hyperparameters for convolution ops used in the box predictor. + optional Hyperparams conv_hyperparams = 1; + + // Bin sizes for RFCN crops. + optional int32 num_spatial_bins_height = 2 [default = 3]; + + optional int32 num_spatial_bins_width = 3 [default = 3]; + + // Target depth to reduce the input image features to. + optional int32 depth = 4 [default = 1024]; + + // Size of the encoding for the boxes. + optional int32 box_code_size = 5 [default = 4]; + + // Size to resize the rfcn crops to. + optional int32 crop_height = 6 [default = 12]; + + optional int32 crop_width = 7 [default = 12]; +} + diff --git a/models/research/object_detection/protos/calibration.proto b/models/research/object_detection/protos/calibration.proto new file mode 100644 index 0000000000000000000000000000000000000000..6025117013fb4fbce726fe322d33a7f2e2218830 --- /dev/null +++ b/models/research/object_detection/protos/calibration.proto @@ -0,0 +1,90 @@ +// These protos contain the calibration parameters necessary for transforming +// a model's original detection scores or logits. The parameters result from +// fitting a calibration function on the model's outputs. + +syntax = "proto2"; + +package object_detection.protos; + +// Message wrapper for various calibration configurations. +message CalibrationConfig { + oneof calibrator { + // Class-agnostic calibration via linear interpolation (usually output from + // isotonic regression). + FunctionApproximation function_approximation = 1; + + // Per-class calibration via linear interpolation. + ClassIdFunctionApproximations class_id_function_approximations = 2; + + // Class-agnostic sigmoid calibration. + SigmoidCalibration sigmoid_calibration = 3; + + // Per-class sigmoid calibration. + ClassIdSigmoidCalibrations class_id_sigmoid_calibrations = 4; + + // Temperature scaling calibration. + TemperatureScalingCalibration temperature_scaling_calibration = 5; + } +} + +// Message for class-agnostic domain/range mapping for function +// approximations. +message FunctionApproximation { + // Message mapping class labels to indices + optional XYPairs x_y_pairs = 1; +} + +// Message for class-specific domain/range mapping for function +// approximations. +message ClassIdFunctionApproximations { + // Message mapping class ids to indices. + map class_id_xy_pairs_map = 1; +} + +// Message for class-agnostic Sigmoid Calibration. +message SigmoidCalibration { + // Message mapping class index to Sigmoid Parameters + optional SigmoidParameters sigmoid_parameters = 1; +} + +// Message for class-specific Sigmoid Calibration. +message ClassIdSigmoidCalibrations { + // Message mapping class index to Sigmoid Parameters. + map class_id_sigmoid_parameters_map = 1; +} + +// Message for Temperature Scaling Calibration. +message TemperatureScalingCalibration { + optional float scaler = 1; +} + +// Description of data used to fit the calibration model. CLASS_SPECIFIC +// indicates that the calibration parameters are derived from detections +// pertaining to a single class. ALL_CLASSES indicates that parameters were +// obtained by fitting a model on detections from all classes (including the +// background class). +enum TrainingDataType { + DATA_TYPE_UNKNOWN = 0; + ALL_CLASSES = 1; + CLASS_SPECIFIC = 2; +} + +// Message to store a domain/range pair for function to be approximated. +message XYPairs { + message XYPair { + optional float x = 1; + optional float y = 2; + } + + // Sequence of x/y pairs for function approximation. + repeated XYPair x_y_pair = 1; + + // Description of data used to fit the calibration model. + optional TrainingDataType training_data_type = 2; +} + +// Message defining parameters for sigmoid calibration. +message SigmoidParameters { + optional float a = 1 [default = -1.0]; + optional float b = 2 [default = 0.0]; +} diff --git a/models/research/object_detection/protos/center_net.proto b/models/research/object_detection/protos/center_net.proto new file mode 100644 index 0000000000000000000000000000000000000000..5047c000f3d4ba22d42127b54e61fbb8726429f8 --- /dev/null +++ b/models/research/object_detection/protos/center_net.proto @@ -0,0 +1,203 @@ +syntax = "proto2"; + +package object_detection.protos; + +import "object_detection/protos/image_resizer.proto"; +import "object_detection/protos/losses.proto"; + +// Configuration for the CenterNet meta architecture from the "Objects as +// Points" paper [1] +// [1]: https://arxiv.org/abs/1904.07850 + +message CenterNet { + // Number of classes to predict. + optional int32 num_classes = 1; + + // Feature extractor config. + optional CenterNetFeatureExtractor feature_extractor = 2; + + // Image resizer for preprocessing the input image. + optional ImageResizer image_resizer = 3; + + // Parameters which are related to object detection task. + message ObjectDetection { + // The original fields are moved to ObjectCenterParams or deleted. + reserved 2, 5, 6, 7; + + // Weight of the task loss. The total loss of the model will be the + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // Weight for the offset localization loss. + optional float offset_loss_weight = 3 [default = 1.0]; + + // Weight for the height/width localization loss. + optional float scale_loss_weight = 4 [default = 0.1]; + + // Localization loss configuration for object scale and offset losses. + optional LocalizationLoss localization_loss = 8; + } + optional ObjectDetection object_detection_task = 4; + + // Parameters related to object center prediction. This is required for both + // object detection and keypoint estimation tasks. + message ObjectCenterParams { + // Weight for the object center loss. + optional float object_center_loss_weight = 1 [default = 1.0]; + + // Classification loss configuration for object center loss. + optional ClassificationLoss classification_loss = 2; + + // The initial bias value of the convlution kernel of the class heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. See "Focal Loss for Dense Object Detection" + // at https://arxiv.org/abs/1708.02002. + optional float heatmap_bias_init = 3 [default = -2.19]; + + // The minimum IOU overlap boxes need to have to not be penalized. + optional float min_box_overlap_iou = 4 [default = 0.7]; + + // Maximum number of boxes to predict. + optional int32 max_box_predictions = 5 [default = 100]; + + // If set, loss is only computed for the labeled classes. + optional bool use_labeled_classes = 6 [default = false]; + } + optional ObjectCenterParams object_center_params = 5; + + // Path of the file that conatins the label map along with the keypoint + // information, including the keypoint indices, corresponding labels, and the + // corresponding class. The file should be the same one as used in the input + // pipeline. Note that a plain text of StringIntLabelMap proto is expected in + // this file. + // It is required only if the keypoint estimation task is specified. + optional string keypoint_label_map_path = 6; + + // Parameters which are related to keypoint estimation task. + message KeypointEstimation { + // Name of the task, e.g. "human pose". Note that the task name should be + // unique to each keypoint task. + optional string task_name = 1; + + // Weight of the task loss. The total loss of the model will be their + // summation of task losses weighted by the weights. + optional float task_loss_weight = 2 [default = 1.0]; + + // Loss configuration for keypoint heatmap, offset, regression losses. Note + // that the localization loss is used for offset/regression losses and + // classification loss is used for heatmap loss. + optional Loss loss = 3; + + // The name of the class that contains the keypoints for this task. This is + // used to retrieve the corresponding keypoint indices from the label map. + // Note that this corresponds to the "name" field, not "display_name". + optional string keypoint_class_name = 4; + + // The standard deviation of the Gaussian kernel used to generate the + // keypoint heatmap. The unit is the pixel in the output image. It is to + // provide the flexibility of using different sizes of Gaussian kernel for + // each keypoint class. Note that if provided, the keypoint standard + // deviations will be overridden by the specified values here, otherwise, + // the default value 5.0 will be used. + // TODO(yuhuic): Update the default value once we found the best value. + map keypoint_label_to_std = 5; + + // Loss weights corresponding to different heads. + optional float keypoint_regression_loss_weight = 6 [default = 1.0]; + optional float keypoint_heatmap_loss_weight = 7 [default = 1.0]; + optional float keypoint_offset_loss_weight = 8 [default = 1.0]; + + // The initial bias value of the convolution kernel of the keypoint heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. See "Focal Loss for Dense Object Detection" + // at https://arxiv.org/abs/1708.02002. + optional float heatmap_bias_init = 9 [default = -2.19]; + + // The heatmap score threshold for a keypoint to become a valid candidate. + optional float keypoint_candidate_score_threshold = 10 [default = 0.1]; + + // The maximum number of candidates to retrieve for each keypoint. + optional int32 num_candidates_per_keypoint = 11 [default = 100]; + + // Max pool kernel size to use to pull off peak score locations in a + // neighborhood (independently for each keypoint types). + optional int32 peak_max_pool_kernel_size = 12 [default = 3]; + + // The default score to use for regressed keypoints that are not + // successfully snapped to a nearby candidate. + optional float unmatched_keypoint_score = 13 [default = 0.1]; + + // The multiplier to expand the bounding boxes (either the provided boxes or + // those which tightly cover the regressed keypoints). Note that new + // expanded box for an instance becomes the feasible search window for all + // associated keypoints. + optional float box_scale = 14 [default = 1.2]; + + // The scale parameter that multiplies the largest dimension of a bounding + // box. The resulting distance becomes a search radius for candidates in the + // vicinity of each regressed keypoint. + optional float candidate_search_scale = 15 [default = 0.3]; + + // One of ['min_distance', 'score_distance_ratio'] indicating how to select + // the keypoint candidate. + optional string candidate_ranking_mode = 16 [default = "min_distance"]; + + // The radius (in the unit of output pixel) around heatmap peak to assign + // the offset targets. If set 0, then the offset target will only be + // assigned to the heatmap peak (same behavior as the original paper). + optional int32 offset_peak_radius = 17 [default = 0]; + + // Indicates whether to assign offsets for each keypoint channel + // separately. If set False, the output offset target has the shape + // [batch_size, out_height, out_width, 2] (same behavior as the original + // paper). If set True, the output offset target has the shape [batch_size, + // out_height, out_width, 2 * num_keypoints] (recommended when the + // offset_peak_radius is not zero). + optional bool per_keypoint_offset = 18 [default = false]; + } + repeated KeypointEstimation keypoint_estimation_task = 7; + + // Parameters which are related to mask estimation task. + // Note: Currently, CenterNet supports a weak instance segmentation, where + // semantic segmentation masks are estimated, and then cropped based on + // bounding box detections. Therefore, it is possible for the same image + // pixel to be assigned to multiple instances. + message MaskEstimation { + // Weight of the task loss. The total loss of the model will be their + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // Classification loss configuration for segmentation loss. + optional ClassificationLoss classification_loss = 2; + + // Each instance mask (one per detection) is cropped and resized (bilinear + // resampling) from the predicted segmentation feature map. After + // resampling, the masks are binarized with the provided score threshold. + optional int32 mask_height = 4 [default = 256]; + optional int32 mask_width = 5 [default = 256]; + optional float score_threshold = 6 [default = 0.5]; + + // The initial bias value of the convlution kernel of the class heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. + optional float heatmap_bias_init = 3 [default = -2.19]; + } + optional MaskEstimation mask_estimation_task = 8; +} + +message CenterNetFeatureExtractor { + optional string type = 1; + + // Channel means to be subtracted from each image channel. If not specified, + // we use a default value of 0. + repeated float channel_means = 2; + + // Channel standard deviations. Each channel will be normalized by dividing + // it by its standard deviation. If not specified, we use a default value + // of 1. + repeated float channel_stds = 3; + + // If set, will change channel order to be [blue, green, red]. This can be + // useful to be compatible with some pre-trained feature extractors. + optional bool bgr_ordering = 4 [default = false]; +} diff --git a/models/research/object_detection/protos/eval.proto b/models/research/object_detection/protos/eval.proto new file mode 100644 index 0000000000000000000000000000000000000000..b1b99881c266da3643c4620efa55184de60e35d7 --- /dev/null +++ b/models/research/object_detection/protos/eval.proto @@ -0,0 +1,152 @@ +syntax = "proto2"; + +package object_detection.protos; + +// Message for configuring DetectionModel evaluation jobs (eval.py). +// Next id - 33 +message EvalConfig { + optional uint32 batch_size = 25 [default = 1]; + // Number of visualization images to generate. + optional uint32 num_visualizations = 1 [default = 10]; + + // Number of examples to process of evaluation. + optional uint32 num_examples = 2 [default = 5000, deprecated = true]; + + // How often to run evaluation. + optional uint32 eval_interval_secs = 3 [default = 300]; + + // Maximum number of times to run evaluation. If set to 0, will run forever. + optional uint32 max_evals = 4 [default = 0, deprecated = true]; + + // Whether the TensorFlow graph used for evaluation should be saved to disk. + optional bool save_graph = 5 [default = false]; + + // Path to directory to store visualizations in. If empty, visualization + // images are not exported (only shown on Tensorboard). + optional string visualization_export_dir = 6 [default = ""]; + + // BNS name of the TensorFlow master. + optional string eval_master = 7 [default = ""]; + + // Type of metrics to use for evaluation. + repeated string metrics_set = 8; + + // Type of metrics to use for evaluation. Unlike `metrics_set` above, this + // field allows configuring evaluation metric through config files. + repeated ParameterizedMetric parameterized_metric = 31; + + // Path to export detections to COCO compatible JSON format. + optional string export_path = 9 [default ='']; + + // Option to not read groundtruth labels and only export detections to + // COCO-compatible JSON file. + optional bool ignore_groundtruth = 10 [default = false]; + + // Use exponential moving averages of variables for evaluation. + // TODO(rathodv): When this is false make sure the model is constructed + // without moving averages in restore_fn. + optional bool use_moving_averages = 11 [default = false]; + + // Whether to evaluate instance masks. + // Note that since there is no evaluation code currently for instance + // segmentation this option is unused. + optional bool eval_instance_masks = 12 [default = false]; + + // Minimum score threshold for a detected object box to be visualized + optional float min_score_threshold = 13 [default = 0.5]; + + // Maximum number of detections to visualize + optional int32 max_num_boxes_to_visualize = 14 [default = 20]; + + // When drawing a single detection, each label is by default visualized as + //